merge in nyc-release history after reset to nyc-dev
diff --git a/UPSTREAM_REVISION b/UPSTREAM_REVISION
index 85f78e9..fe7d28d 100644
--- a/UPSTREAM_REVISION
+++ b/UPSTREAM_REVISION
@@ -1 +1 @@
-e44f0a94c015a6d5d91fd8b8172ad78911a296e5
+e5abb7bded38f0a9d4c267adf5e8abb115cafca7
diff --git a/catapult/.gitignore b/catapult/.gitignore
index e298a45..d0b582d 100644
--- a/catapult/.gitignore
+++ b/catapult/.gitignore
@@ -10,3 +10,6 @@
 # directory but should not be committed for privacy reasons. This prevents
 # an accidental add.
 perf_insights/perf_insights/download_traces.py
+
+# devil's binary dependency download folder.
+/devil/bin/deps/
diff --git a/catapult/AUTHORS b/catapult/AUTHORS
index 5e48583..d62680b 100644
--- a/catapult/AUTHORS
+++ b/catapult/AUTHORS
@@ -8,7 +8,8 @@
 #
 # See python fnmatch module documentation for more information.
 
-Sylvester Willis <sylvester.lee.willis@gmail.com
+Sylvester Willis <sylvester.lee.willis@gmail.com>
+Mathieu Laprise <mathlaprise@gmail.com>
 
 The Chromium Authors <*@chromium.org>
 Google Inc. <*@google.com>
diff --git a/catapult/CONTRIBUTING.md b/catapult/CONTRIBUTING.md
index 4b9a570..d6aeb98 100644
--- a/catapult/CONTRIBUTING.md
+++ b/catapult/CONTRIBUTING.md
@@ -2,6 +2,12 @@
      Use of this source code is governed by a BSD-style license that can be
      found in the LICENSE file.
 -->
+# Code of Conduct
+
+We follow the [Chromium code of conduct](
+https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md) in
+our our repos and organizations, mailing lists, and other communications.
+
 # Workflow
 
 Install [depot_tools]
@@ -41,7 +47,7 @@
 git branch -D foo
 ```
 
-# Legal
+# Becoming a committer
 
 If you're new to the chromium-family of projects, you will also need to sign the
 chrome contributors license agreement. You can sign the
@@ -56,6 +62,9 @@
 If you've never submitted code before, you must add your (or your
 organization's) name and contact info to the Chromium AUTHORS file.
 
+Next, ask an admin to add you (see
+[adding committers](/docs/adding-committers.md))
+
 # Contributing from a Chromium checkout
 
 If you already have catapult checked out as part of a Chromium checkout and want
@@ -79,10 +88,15 @@
 
 # Code style
 
-We follow the [Chromium style]
-(https://www.chromium.org/developers/coding-style). 
+See the [style guide](/docs/style-guide.md).
 
-If you're contributing to Trace Viewer, refer to the [Trace Viewer style guide](https://docs.google.com/document/d/1MMOfywou2Oaho4jOttUk-ZSJcHVd5G5BTsD48rPrBtQ/edit).
+# Individual project documentation
+
+Look to individual project documentation for more info on getting started:
+   * [perf dashboard](/dashboard/README.md)
+   * [systrace](/systrace/README.md)
+   * [telemetry](/telemetry/README.md)
+   * [trace-viewer](/tracing/README.md)
 
 # Tests
 
@@ -95,22 +109,12 @@
 
 # Updating Chromium's about:tracing (rolling DEPS)
 
-To get your change to appear in Chrome's about:tracing or other
-third_party/catapult files, commit to catapult. Then check the [mirror]
-(https://chromium.googlesource.com/external/github.com/catapult-project/catapult.git)
-to find the git hash of your commit. (Note: it may take a few minutes to be
-mirrored).
+Chromium's DEPS file needs to be rolled to the catapult revision containing your
+change in order for it to appear in Chrome's about:tracing or other
+third_party/catapult files. Follow the [directions for rolling DEPS]
+(/docs/rolling-deps.md) to do this.
 
-Then edit Chrome's [src/DEPS]
-(https://code.google.com/p/chromium/codesearch#chromium/src/DEPS) file. Look for
-a line like:
+# Adding a new project
 
-```
-  'src/third_party/catapult':
-    Var('chromium_git') + '/external/github.com/catapult-project/catapult.git' + '@' +
-    '2da8924915bd6fb7609c518f5b1f63cb606248eb',
-```
-
-Update the number to the git hash you want to roll to, and [contribute a
-codereview to chrome](http://www.chromium.org/developers/contributing-code)
-for your edit. If you are a Chromium committer, feel free to TBR this.
+Please read the [directory structure guide](/docs/directory-structure.md)
+to learn the conventions for new directories.
diff --git a/catapult/OWNERS b/catapult/OWNERS
index ed4a63e..37ea155 100644
--- a/catapult/OWNERS
+++ b/catapult/OWNERS
@@ -1,3 +1,4 @@
 aiolos@chromium.org
-sullivan@chromium.org
 nduca@chromium.org
+nednguyen@google.com
+sullivan@chromium.org
diff --git a/catapult/PRESUBMIT.py b/catapult/PRESUBMIT.py
index 2bcec0f..26b6694 100644
--- a/catapult/PRESUBMIT.py
+++ b/catapult/PRESUBMIT.py
@@ -36,10 +36,11 @@
     r'^tracing[\\/]tracing_examples[\\/]string_convert\.js$',
     r'^tracing[\\/]test_data[\\/].*',
     r'^tracing[\\/]third_party[\\/].*',
+    r'^telemetry[\\/]support[\\/]html_output[\\/]results-template.html',
 )
 
 
-def GetPreferredTryMasters(project, change):  # pylint: disable=unused-argument
+def GetPreferredTryMasters(project, change):
   return {
       'tryserver.client.catapult': {
           'Catapult Linux Tryserver': {'defaulttests'},
@@ -51,11 +52,13 @@
 
 def CheckChangeLogBug(input_api, output_api):
   if input_api.change.BUG is None or re.match(
-      '(catapult\:\#\d+)(,\s*\#\d+)*$', input_api.change.BUG):
+      r'((chromium\:|catapult\:\#)\d+)(,\s*(chromium\:|catapult\:\#)\d+)*$',
+      input_api.change.BUG):
     return []
   return [output_api.PresubmitError(
-      ('Invalid bug "%s". BUG= should either not be present or start with '
-       '"catapult:#"" for a github issue.' % input_api.change.BUG))]
+      ('Invalid bug "%s". Chromium issues should be prefixed with "chromium:" '
+       'and Catapult issues should be prefixed with "catapult:#".' %
+       input_api.change.BUG))]
 
 
 def CheckChange(input_api, output_api):
@@ -64,15 +67,15 @@
     sys.path += [input_api.PresubmitLocalPath()]
     from catapult_build import js_checks
     from catapult_build import html_checks
+    from catapult_build import repo_checks
     results += input_api.canned_checks.PanProjectChecks(
         input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
-    results += input_api.canned_checks.RunPylint(
-        input_api, output_api, black_list=_EXCLUDED_PATHS)
     results += CheckChangeLogBug(input_api, output_api)
     results += js_checks.RunChecks(
         input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
     results += html_checks.RunChecks(
         input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
+    results += repo_checks.RunChecks(input_api, output_api)
   finally:
     sys.path.remove(input_api.PresubmitLocalPath())
   return results
diff --git a/catapult/README.md b/catapult/README.md
index 1f0fa83..15a95d4 100644
--- a/catapult/README.md
+++ b/catapult/README.md
@@ -20,6 +20,6 @@
 
 Contributing
 ============
-Please see [our contributor's guide](CONTRIBUTING.md)
+Please see [our contributor's guide](/CONTRIBUTING.md)
 
 **[Current build status](https://build.chromium.org/p/client.catapult/waterfall)**
diff --git a/catapult/base/util/perfbot_stats/__init__.py b/catapult/base/util/perfbot_stats/__init__.py
deleted file mode 100644
index 1aaf0e1..0000000
--- a/catapult/base/util/perfbot_stats/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
diff --git a/catapult/base/util/perfbot_stats/chrome_perf_stats.py b/catapult/base/util/perfbot_stats/chrome_perf_stats.py
deleted file mode 100755
index aee0b1b..0000000
--- a/catapult/base/util/perfbot_stats/chrome_perf_stats.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Script to pull chromium.perf stats from chrome-infra-stats API.
-
-Currently this just pulls success rates from the API, averages daily per
-builder, and uploads to perf dashboard. It could be improved to provide more
-detailed success rates.
-
-The API documentation for chrome-infra-stats is at:
-https://apis-explorer.appspot.com/apis-explorer/?
-   base=https://chrome-infra-stats.appspot.com/_ah/api#p/
-"""
-
-import calendar
-import datetime
-import json
-import sys
-import urllib
-import urllib2
-
-BUILDER_LIST_URL = ('https://chrome-infra-stats.appspot.com/'
-                    '_ah/api/stats/v1/masters/chromium.perf')
-
-BUILDER_STATS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
-                     'stats/chromium.perf/%s/overall__build__result__/%s')
-
-USAGE = ('Usage: chrome_perf_stats.py <year> <month> <day>. If date is not '
-         'specified, yesterday will be used.')
-
-
-def main():
-  if len(sys.argv) == 2 and sys.argv[0] == '--help':
-    print USAGE
-    sys.exit(0)
-  year = None
-  month = None
-  days = None
-  if len(sys.argv) == 4 or len(sys.argv) == 3:
-    year = int(sys.argv[1])
-    if year > 2016 or year < 2014:
-      print USAGE
-      sys.exit(0)
-    month = int(sys.argv[2])
-    if month > 12 or month <= 0:
-      print USAGE
-      sys.exit(0)
-    if len(sys.argv) == 3:
-      days = range(1, calendar.monthrange(year, month)[1] + 1)
-    else:
-      day = int(sys.argv[3])
-      if day > 31 or day <=0:
-        print USAGE
-        sys.exit(0)
-      days = [day]
-  elif len(sys.argv) != 1:
-    print USAGE
-    sys.exit(0)
-  else:
-    yesterday = datetime.date.today() - datetime.timedelta(days=1)
-    year = yesterday.year
-    month = yesterday.month
-    days = [yesterday.day]
-
-  response = urllib2.urlopen(BUILDER_LIST_URL)
-  builders = [builder['name'] for builder in json.load(response)['builders']]
-  success_rates = CalculateSuccessRates(year, month, days, builders)
-  UploadToPerfDashboard(success_rates)
-
-
-def _UpdateSuccessRatesWithResult(
-    success_rates, results, date_dict_str, builder):
-  count = int(results['count'])
-  if count == 0:
-    return
-  success_count = count - int(results['failure_count'])
-  success_rates.setdefault(date_dict_str, {})
-  success_rates[date_dict_str].setdefault(builder, {
-      'count': 0,
-      'success_count': 0
-  })
-  success_rates[date_dict_str][builder]['count'] += count
-  success_rates[date_dict_str][builder]['success_count'] += success_count
-
-def _SummarizeSuccessRates(success_rates):
-  overall_success_rates = []
-  for day, results in success_rates.iteritems():
-    success_rate_sum = 0
-    success_rate_count = 0
-    for rates in results.values():
-      if rates['count'] == 0:
-        continue
-      success_rate_sum += (
-          float(rates['success_count']) / float(rates['count']))
-      success_rate_count += 1
-    overall_success_rates.append(
-        [day, float(success_rate_sum) / float(success_rate_count)])
-  return overall_success_rates
-
-
-def UploadToPerfDashboard(success_rates):
-  for success_rate in success_rates:
-    date_str = ('%s-%s-%s' %
-        (success_rate[0][0:4], success_rate[0][4:6], success_rate[0][6:8]))
-    dashboard_data = {
-        'master': 'WaterfallStats',
-        'bot': 'ChromiumPerf',
-        'point_id': int(success_rate[0]),
-        'supplemental': {},
-        'versions': {
-            'date': date_str,
-        },
-        'chart_data': {
-            'benchmark_name': 'success_rate',
-            'benchmark_description': 'Success rates averaged per-builder',
-            'format_version': 1.0,
-            'charts': {
-                'overall_success_rate': {
-                    'summary': {
-                        'name': 'overall_success_rate',
-                        'type': 'scalar',
-                        'units': '%',
-                        'value': success_rate[1]
-                    }
-                }
-            }
-        }
-    }
-    url = 'https://chromeperf.appspot.com/add_point'
-    data = urllib.urlencode({'data': json.dumps(dashboard_data)})
-    urllib2.urlopen(url=url, data=data).read()
-
-
-def CalculateSuccessRates(year, month, days, builders):
-  success_rates = {}
-  for day in days:
-    for hour in range(24):
-      date_str = '%d-%02d-%02dT%02d:00Z' % (year, month, day, hour)
-      date_dict_str = '%d%02d%02d' % (year, month, day)
-      for builder in builders:
-        url = BUILDER_STATS_URL % (
-            urllib.quote(builder), urllib.quote(date_str))
-        response = urllib2.urlopen(url)
-        results = json.load(response)
-        _UpdateSuccessRatesWithResult(
-            success_rates, results, date_dict_str, builder)
-  return _SummarizeSuccessRates(success_rates)
-
-
-if __name__ == "__main__":
-  main()
diff --git a/catapult/base/util/perfbot_stats/chrome_perf_stats_unittest.py b/catapult/base/util/perfbot_stats/chrome_perf_stats_unittest.py
deleted file mode 100644
index 7bb8ed6..0000000
--- a/catapult/base/util/perfbot_stats/chrome_perf_stats_unittest.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-
-from perfbot_stats import chrome_perf_stats
-
-
-class TestChromePerfStats(unittest.TestCase):
-
-  def testUpdateSuccessRatesWithResult(self):
-    success_rates = {}
-    chrome_perf_stats._UpdateSuccessRatesWithResult(
-        success_rates,
-        {'count': 0},
-        'invalid_date_str',
-        'invalid_builder')
-    self.assertDictEqual({}, success_rates)
-    chrome_perf_stats._UpdateSuccessRatesWithResult(
-      success_rates,
-      {'count': 5, 'failure_count': 3},
-      '20151010',
-      'android_nexus_10')
-    self.assertDictEqual(
-        {'20151010':
-            {'android_nexus_10':
-                {'count': 5, 'success_count': 2}
-            }
-        },
-        success_rates)
-    chrome_perf_stats._UpdateSuccessRatesWithResult(
-      success_rates,
-      {'count': 5, 'failure_count': 4},
-      '20151010',
-      'android_nexus_4')
-    self.assertDictEqual(
-        {'20151010':
-            {'android_nexus_10':
-                {'count': 5, 'success_count': 2},
-             'android_nexus_4':
-                {'count': 5, 'success_count': 1}
-            }
-        },
-        success_rates)
-    chrome_perf_stats._UpdateSuccessRatesWithResult(
-      success_rates,
-      {'count': 5, 'failure_count': 0},
-      '20151009',
-      'win_xp')
-    self.assertDictEqual(
-        {'20151010':
-            {'android_nexus_10':
-                {'count': 5, 'success_count': 2},
-             'android_nexus_4':
-                {'count': 5, 'success_count': 1}
-            },
-        '20151009':
-            {'win_xp':
-                {'count': 5, 'success_count': 5}
-            }
-        },
-        success_rates)
-  def testSummarizeSuccessRates(self):
-    rates = chrome_perf_stats._SummarizeSuccessRates(
-        {'20151010':
-            {'android_nexus_10':
-                {'count': 5, 'success_count': 2},
-             'android_nexus_4':
-                {'count': 5, 'success_count': 3}
-            },
-        '20151009':
-            {'win_xp':
-                {'count': 5, 'success_count': 5}
-            }
-        })
-    self.assertListEqual([['20151010', 0.5], ['20151009', 1.0]], rates)
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/catapult/base/util/perfbot_stats/chrome_perf_step_timings.py b/catapult/base/util/perfbot_stats/chrome_perf_step_timings.py
deleted file mode 100755
index 2b017c4..0000000
--- a/catapult/base/util/perfbot_stats/chrome_perf_step_timings.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Script to pull chromium.perf step timings from chrome-infra-stats API.
-
-Currently this pulls the list of steps per builder. For each step, if it is not
-a setup step, we get the step stats for the last 20 runs for that builder.
-
-The API documentation for chrome-infra-stats is at:
-https://apis-explorer.appspot.com/apis-explorer/?
-   base=https://chrome-infra-stats.appspot.com/_ah/api#p/
-"""
-
-import calendar
-import csv
-import json
-import sys
-import urllib
-import urllib2
-from dateutil import parser as dateparser
-from datetime import datetime, timedelta
-
-BUILDER_STEPS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
-                     'masters/chromium.perf/%s')
-
-STEP_ACTIVE_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
-                   'steps/last/chromium.perf/%s/%s/1')
-
-STEP_STATS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
-                  'stats/last/chromium.perf/%s/%s/20')
-
-IGNORED_STEPS = [
-  'List Perf Tests',
-  'Sharded Perf Tests',
-  'authorize_adb_devices',
-  'bot_update',
-  'build__schedule__time__',
-  'clean local files',
-  'cleanup_temp',
-  'device_status_check',
-  'extract build',
-  'gclient runhooks',
-  'get compile targets for scripts',
-  'get perf test list',
-  'gsutil download_build_product',
-  'host_info',
-  'install ChromeShell.apk',
-  'json.output cache',
-  'json.output cache',
-  'overall__build__result__',
-  'overall__queued__time__',
-  'provision_devices',
-  'read test spec',
-  'rmtree build directory',
-  'setup_build',
-  'spawn_logcat_monitor',
-  'stack_tool_for_tombstones',
-  'stack_tool_with_logcat_dump',
-  'steps',
-  'test_report',
-  'unzip_build_product',
-  'update_scripts'
-]
-
-KNOWN_TESTERS_LIST = [
-    'Android Nexus4 Perf',
-    'Android Nexus5 Perf',
-    'Android Nexus6 Perf',
-    'Android Nexus10 Perf',
-    'Android Nexus7v2 Perf',
-    'Android One Perf',
-    'Linux Perf (1)',
-    'Linux Perf (2)',
-    'Linux Perf (3)',
-    'Linux Perf (4)',
-    'Linux Perf (5)',
-    'Mac 10.8 Perf (1)',
-    'Mac 10.8 Perf (2)',
-    'Mac 10.8 Perf (3)',
-    'Mac 10.8 Perf (4)',
-    'Mac 10.8 Perf (5)',
-    'Mac 10.9 Perf (1)',
-    'Mac 10.9 Perf (2)',
-    'Mac 10.9 Perf (3)',
-    'Mac 10.9 Perf (4)',
-    'Mac 10.9 Perf (5)',
-    'Win 7 ATI GPU Perf',
-    'Win 7 Intel GPU Perf',
-    'Win 7 Low-End Perf (1)',
-    'Win 7 Low-End Perf (2)',
-    'Win 7 Nvidia GPU Perf',
-    'Win 7 Perf (1)',
-    'Win 7 Perf (2)',
-    'Win 7 Perf (3)',
-    'Win 7 Perf (4)',
-    'Win 7 Perf (5)',
-    'Win 7 x64 Perf (1)',
-    'Win 7 x64 Perf (2)',
-    'Win 8 Perf (1)',
-    'Win 8 Perf (2)',
-    'Win XP Perf (1)',
-    'Win XP Perf (2)',
-    'Win XP Perf (3)',
-    'Win XP Perf (4)',
-    'Win XP Perf (5)'
-]
-
-USAGE = 'Usage: chrome-perf-step-timings.py <outfilename>'
-
-if len(sys.argv) != 2:
-  print USAGE
-  sys.exit(0)
-outfilename = sys.argv[1]
-
-threshold_time = datetime.now() - timedelta(days=2)
-
-col_names = [('builder', 'step', 'run_count', 'stddev', 'mean', 'maximum',
-             'median', 'seventyfive', 'ninety', 'ninetynine')]
-with open(outfilename, 'wb') as f:
-  writer = csv.writer(f)
-  writer.writerows(col_names)
-
-for builder in KNOWN_TESTERS_LIST:
-  step_timings = []
-  url = BUILDER_STEPS_URL % urllib.quote(builder)
-  response = urllib2.urlopen(url)
-  results = json.load(response)
-  steps = results['steps']
-  steps.sort() # to group tests and their references together.
-  for step in steps:
-    if step in IGNORED_STEPS:
-      continue
-    url = STEP_ACTIVE_URL % (urllib.quote(builder), urllib.quote(step))
-    response = urllib2.urlopen(url)
-    results = json.load(response)
-    if ('step_records' not in results.keys() or
-        len(results['step_records']) == 0):
-      continue
-    first_record = results['step_records'][0]
-    last_step_time = dateparser.parse(first_record['step_start'])
-    # ignore steps that did not run for more than 2 days
-    if last_step_time < threshold_time:
-      continue
-    url = STEP_STATS_URL % (urllib.quote(builder), urllib.quote(step))
-    response = urllib2.urlopen(url)
-    results = json.load(response)
-    step_timings.append([builder, step, results['count'], results['stddev'],
-                         results['mean'], results['maximum'], results['median'],
-                         results['seventyfive'], results['ninety'],
-                         results['ninetynine']])
-  with open(outfilename, 'ab') as f:
-    writer = csv.writer(f)
-    writer.writerows(step_timings)
diff --git a/catapult/base/util/run_tests.py b/catapult/base/util/run_tests.py
deleted file mode 100755
index 7394001..0000000
--- a/catapult/base/util/run_tests.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import glob
-import os
-import subprocess
-import sys
-import unittest
-
-
-def main():
-
-  sys.path.append(os.path.dirname(__file__))
-  suite = unittest.TestLoader().discover(
-      os.path.dirname(__file__), pattern = '*_unittest.py')
-  result = unittest.TextTestRunner(verbosity=2).run(suite)
-  if result.wasSuccessful():
-    sys.exit(0)
-  else:
-    sys.exit(1)
-
-
-if __name__ == '__main__':
-  main()
diff --git a/catapult/bin/run_tests b/catapult/bin/run_tests
index 22e3a6b..407d321 100755
--- a/catapult/bin/run_tests
+++ b/catapult/bin/run_tests
@@ -18,7 +18,9 @@
     {'path': os.path.join(_CATAPULT_PATH, 'perf_insights', 'bin', 'run_tests')},
     {'path': os.path.join(
         _CATAPULT_PATH, 'catapult_build', 'bin', 'run_py_tests')},
-    {'path': os.path.join(_CATAPULT_PATH, 'base', 'util', 'run_tests.py')},
+    {'path': os.path.join(_CATAPULT_PATH, 'catapult_base', 'bin', 'run_tests')},
+    {'path': os.path.join(_CATAPULT_PATH, 'dependency_manager', 'bin', 'run_tests')},
+    {'path': os.path.join(_CATAPULT_PATH, 'telemetry', 'bin', 'run_tests')},
     {'path': os.path.join(_CATAPULT_PATH, 'third_party', 'vinn', 'run_test')},
     # TODO(anniesullie): Add dashboard tests when SDK issues are sorted out.
 ]
diff --git a/catapult/base/util/OWNERS b/catapult/catapult_base/OWNERS
similarity index 100%
rename from catapult/base/util/OWNERS
rename to catapult/catapult_base/OWNERS
diff --git a/catapult/catapult_base/PRESUBMIT.py b/catapult/catapult_base/PRESUBMIT.py
new file mode 100644
index 0000000..5a48a87
--- /dev/null
+++ b/catapult/catapult_base/PRESUBMIT.py
@@ -0,0 +1,31 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='../pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'dependency_manager'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'pyfakefs'),
+  ]
diff --git a/catapult/catapult_base/bin/run_tests b/catapult/catapult_base/bin/run_tests
new file mode 100755
index 0000000..7e4e880
--- /dev/null
+++ b/catapult/catapult_base/bin/run_tests
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+_CATAPULT_PATH = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..'))
+
+_CATAPULT_BASE_PATH = os.path.abspath(
+    os.path.join(_CATAPULT_PATH, 'catapult_base'))
+
+
+def _RunTestsOrDie(top_level_dir):
+  exit_code = run_with_typ.Run(top_level_dir, path=[_CATAPULT_BASE_PATH])
+  if exit_code:
+    sys.exit(exit_code)
+
+
+def _AddToPathIfNeeded(path):
+  if path not in sys.path:
+    sys.path.insert(0, path)
+
+
+if __name__ == '__main__':
+  _AddToPathIfNeeded(_CATAPULT_PATH)
+
+  from hooks import install
+  if '--no-install-hooks' in sys.argv:
+    sys.argv.remove('--no-install-hooks')
+  else:
+    install.InstallHooks()
+
+  from catapult_build import run_with_typ
+  _RunTestsOrDie(_CATAPULT_BASE_PATH)
+  sys.exit(0)
diff --git a/catapult/catapult_base/catapult_base/__init__.py b/catapult/catapult_base/catapult_base/__init__.py
new file mode 100644
index 0000000..25e0fda
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# All files in this directory should be moved to catapult/base/ after moving
+# to the new repo.
+
+import os
+import sys
+
+from catapult_base import util
+
+
+def _AddDirToPythonPath(*path_parts):
+  path = os.path.abspath(os.path.join(*path_parts))
+  if os.path.isdir(path) and path not in sys.path:
+    # Some callsite that use telemetry assumes that sys.path[0] is the directory
+    # containing the script, so we add these extra paths to right after it.
+    sys.path.insert(1, path)
+
+_AddDirToPythonPath(os.path.join(util.GetCatapultDir(), 'dependency_manager'))
+_AddDirToPythonPath(os.path.join(util.GetCatapultDir(), 'third_party', 'mock'))
+# mox3 is needed for pyfakefs usage, but not for pylint.
+_AddDirToPythonPath(os.path.join(util.GetCatapultDir(), 'third_party', 'mox3'))
+_AddDirToPythonPath(
+    os.path.join(util.GetCatapultDir(), 'third_party', 'pyfakefs'))
diff --git a/catapult/catapult_base/catapult_base/binary_manager.py b/catapult/catapult_base/catapult_base/binary_manager.py
new file mode 100644
index 0000000..ab0048e
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/binary_manager.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+import dependency_manager
+
+
+class BinaryManager(object):
+  """ This class is effectively a subclass of dependency_manager, but uses a
+      different number of arguments for FetchPath and LocalPath.
+  """
+
+  def __init__(self, configs):
+    self._dependency_manager = dependency_manager.DependencyManager(configs)
+
+  def FetchPath(self, binary_name, arch, os_name, os_version=None):
+    """ Return a path to the executable for <binary_name>, or None if not found.
+
+    Will attempt to download from cloud storage if needed.
+    """
+    platform = '%s_%s' % (os_name, arch)
+    if os_version:
+      try:
+        versioned_platform = '%s_%s_%s' % (os_name, os_version, arch)
+        return self._dependency_manager.FetchPath(
+            binary_name, versioned_platform)
+      except dependency_manager.NoPathFoundError:
+        logging.warning(
+            'Cannot find path for %s on platform %s. Falling back to %s.',
+            binary_name, versioned_platform, platform)
+    return self._dependency_manager.FetchPath(binary_name, platform)
+
+
+  def LocalPath(self, binary_name, arch, os_name, os_version=None):
+    """ Return a local path to the given binary name, or None if not found.
+
+    Will not download from cloud_storage.
+    """
+    platform = '%s_%s' % (os_name, arch)
+    if os_version:
+      try:
+        versioned_platform = '%s_%s_%s' % (os_name, os_version, arch)
+        return self._dependency_manager.LocalPath(
+            binary_name, versioned_platform)
+      except dependency_manager.NoPathFoundError:
+        logging.warning(
+            'Cannot find local path for %s on platform %s. Falling back to %s.',
+            binary_name, versioned_platform, platform)
+    return self._dependency_manager.LocalPath(binary_name, platform)
diff --git a/catapult/catapult_base/catapult_base/binary_manager_unittest.py b/catapult/catapult_base/catapult_base/binary_manager_unittest.py
new file mode 100644
index 0000000..ae0cf48
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/binary_manager_unittest.py
@@ -0,0 +1,216 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+
+from pyfakefs import fake_filesystem_unittest
+from dependency_manager import base_config
+from dependency_manager import exceptions
+
+from catapult_base import binary_manager
+
+class BinaryManagerTest(fake_filesystem_unittest.TestCase):
+  # TODO(aiolos): disable cloud storage use during this test.
+
+  def setUp(self):
+    self.setUpPyfakefs()
+    # pylint: disable=bad-continuation
+    self.expected_dependencies = {
+        'dep_1': {
+          'cloud_storage_base_folder': 'dependencies/fake_config',
+          'cloud_storage_bucket': 'chrome-tel',
+          'file_info': {
+            'linux_x86_64': {
+              'cloud_storage_hash': '661ce936b3276f7ec3d687ab62be05b96d796f21',
+              'download_path': 'bin/linux/x86_64/dep_1'
+            },
+            'mac_x86_64': {
+              'cloud_storage_hash': 'c7b1bfc6399dc683058e88dac1ef0f877edea74b',
+              'download_path': 'bin/mac/x86_64/dep_1'
+            },
+            'win_AMD64': {
+              'cloud_storage_hash': 'ac4fee89a51662b9d920bce443c19b9b2929b198',
+              'download_path': 'bin/win/AMD64/dep_1.exe'
+            },
+            'win_x86': {
+              'cloud_storage_hash': 'e246e183553ea26967d7b323ea269e3357b9c837',
+              'download_path': 'bin/win/x86/dep_1.exe'
+            }
+          }
+        },
+        'dep_2': {
+          'cloud_storage_base_folder': 'dependencies/fake_config',
+          'cloud_storage_bucket': 'chrome-tel',
+          'file_info': {
+            'linux_x86_64': {
+              'cloud_storage_hash': '13a57efae9a680ac0f160b3567e02e81f4ac493c',
+              'download_path': 'bin/linux/x86_64/dep_2',
+              'local_paths': [
+                  '../../example/location/linux/dep_2',
+                  '../../example/location2/linux/dep_2'
+              ]
+            },
+            'mac_x86_64': {
+              'cloud_storage_hash': 'd10c0ddaa8586b20449e951216bee852fa0f8850',
+              'download_path': 'bin/mac/x86_64/dep_2',
+              'local_paths': [
+                  '../../example/location/mac/dep_2',
+                  '../../example/location2/mac/dep_2'
+              ]
+            },
+            'win_AMD64': {
+              'cloud_storage_hash': 'fd5b417f78c7f7d9192a98967058709ded1d399d',
+              'download_path': 'bin/win/AMD64/dep_2.exe',
+              'local_paths': [
+                  '../../example/location/win64/dep_2',
+                  '../../example/location2/win64/dep_2'
+              ]
+            },
+            'win_x86': {
+              'cloud_storage_hash': 'cf5c8fe920378ce30d057e76591d57f63fd31c1a',
+              'download_path': 'bin/win/x86/dep_2.exe',
+              'local_paths': [
+                  '../../example/location/win32/dep_2',
+                  '../../example/location2/win32/dep_2'
+              ]
+            },
+            'android_k_x64': {
+              'cloud_storage_hash': '09177be2fed00b44df0e777932828425440b23b3',
+              'download_path': 'bin/android/x64/k/dep_2.apk',
+              'local_paths': [
+                  '../../example/location/android_x64/k/dep_2',
+                  '../../example/location2/android_x64/k/dep_2'
+              ]
+            },
+            'android_l_x64': {
+              'cloud_storage_hash': '09177be2fed00b44df0e777932828425440b23b3',
+              'download_path': 'bin/android/x64/l/dep_2.apk',
+              'local_paths': [
+                  '../../example/location/android_x64/l/dep_2',
+                  '../../example/location2/android_x64/l/dep_2'
+              ]
+            },
+            'android_k_x86': {
+              'cloud_storage_hash': 'bcf02af039713a48b69b89bd7f0f9c81ed8183a4',
+              'download_path': 'bin/android/x86/k/dep_2.apk',
+              'local_paths': [
+                  '../../example/location/android_x86/k/dep_2',
+                  '../../example/location2/android_x86/k/dep_2'
+              ]
+            },
+            'android_l_x86': {
+              'cloud_storage_hash': '12a74cec071017ba11655b5740b8a58e2f52a219',
+              'download_path': 'bin/android/x86/l/dep_2.apk',
+              'local_paths': [
+                  '../../example/location/android_x86/l/dep_2',
+                  '../../example/location2/android_x86/l/dep_2'
+              ]
+            }
+          }
+        },
+        'dep_3': {
+          'file_info': {
+            'linux_x86_64': {
+              'local_paths': [
+                  '../../example/location/linux/dep_3',
+                  '../../example/location2/linux/dep_3'
+              ]
+            },
+            'mac_x86_64': {
+              'local_paths': [
+                  '../../example/location/mac/dep_3',
+                  '../../example/location2/mac/dep_3'
+              ]
+            },
+            'win_AMD64': {
+              'local_paths': [
+                  '../../example/location/win64/dep_3',
+                  '../../example/location2/win64/dep_3'
+              ]
+            },
+            'win_x86': {
+              'local_paths': [
+                  '../../example/location/win32/dep_3',
+                  '../../example/location2/win32/dep_3'
+              ]
+            }
+          }
+        }
+    }
+    # pylint: enable=bad-continuation
+    fake_config = {
+        'config_type': 'BaseConfig',
+        'dependencies': self.expected_dependencies
+    }
+
+    base_config_path = os.path.join(os.path.dirname(__file__),
+                                    'example_config.json')
+    self.fs.CreateFile(base_config_path, contents=json.dumps(fake_config))
+    self.base_config = base_config.BaseConfig(base_config_path)
+    linux_file = os.path.join(
+        os.path.dirname(base_config_path),
+        os.path.join('..', '..', 'example', 'location2', 'linux', 'dep_2'))
+    android_file = os.path.join(
+        os.path.dirname(base_config_path),
+        '..', '..', 'example', 'location', 'android_x86', 'l', 'dep_2')
+    self.expected_dep2_linux_file = os.path.abspath(linux_file)
+    self.expected_dep2_android_file = os.path.abspath(android_file)
+    self.fs.CreateFile(self.expected_dep2_linux_file)
+    self.fs.CreateFile(self.expected_dep2_android_file)
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+
+  def testInitializationNoConfig(self):
+    with self.assertRaises(ValueError):
+      binary_manager.BinaryManager(None)
+
+  def testInitializationMissingConfig(self):
+    with self.assertRaises(ValueError):
+      binary_manager.BinaryManager(os.path.join('missing', 'path'))
+
+  def testInitializationWithConfig(self):
+    with self.assertRaises(ValueError):
+      manager = binary_manager.BinaryManager(self.base_config)
+    manager = binary_manager.BinaryManager([self.base_config])
+    self.assertItemsEqual(self.expected_dependencies,
+                          manager._dependency_manager._lookup_dict)
+
+  def testSuccessfulFetchPathNoOsVersion(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    found_path = manager.FetchPath('dep_2', 'x86_64', 'linux')
+    self.assertEqual(self.expected_dep2_linux_file, found_path)
+
+  def testSuccessfulFetchPathOsVersion(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    found_path = manager.FetchPath('dep_2', 'x86', 'android', 'l')
+    self.assertEqual(self.expected_dep2_android_file, found_path)
+
+  def testSuccessfulFetchPathFallbackToNoOsVersion(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    found_path = manager.FetchPath('dep_2', 'x86_64', 'linux', 'fake_version')
+    self.assertEqual(self.expected_dep2_linux_file, found_path)
+
+  def testFailedFetchPathMissingDep(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    with self.assertRaises(exceptions.NoPathFoundError):
+      manager.FetchPath('missing_dep', 'x86_64', 'linux')
+    with self.assertRaises(exceptions.NoPathFoundError):
+      manager.FetchPath('missing_dep', 'x86', 'android', 'l')
+    with self.assertRaises(exceptions.NoPathFoundError):
+      manager.FetchPath('dep_1', 'bad_arch', 'linux')
+    with self.assertRaises(exceptions.NoPathFoundError):
+      manager.FetchPath('dep_1', 'x86', 'bad_os')
+
+  def testSuccessfulLocalPathNoOsVersion(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    found_path = manager.LocalPath('dep_2', 'x86_64', 'linux')
+    self.assertEqual(self.expected_dep2_linux_file, found_path)
+
+  def testSuccessfulLocalPathOsVersion(self):
+    manager = binary_manager.BinaryManager([self.base_config])
+    found_path = manager.LocalPath('dep_2', 'x86', 'android', 'l')
+    self.assertEqual(self.expected_dep2_android_file, found_path)
+
diff --git a/catapult/catapult_base/catapult_base/chrome_binaries.json b/catapult/catapult_base/catapult_base/chrome_binaries.json
new file mode 100644
index 0000000..e5f5c95
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/chrome_binaries.json
@@ -0,0 +1,61 @@
+{
+  "config_type": "BaseConfig",
+  "dependencies": {
+    "chrome_stable": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chrome-telemetry",
+      "file_info": {
+        "android_k_arm64-v8a": {
+          "cloud_storage_hash": "08f7e4dcdeba512043bfe7fe714c571bccc45954",
+          "download_path": "bin/reference_build/android_k_arm64-v8a/ChromeStable.apk",
+          "version_in_cs": "48.0.2564.95"
+        },
+        "android_k_armeabi-v7a": {
+          "cloud_storage_hash": "798efed0a7ae1460fe297ba1bd16308585c9e6ca",
+          "download_path": "bin/reference_build/android_k_armeabi-v7a/ChromeStable.apk",
+          "version_in_cs": "48.0.2564.95"
+        },
+        "android_l_arm64-v8a": {
+          "cloud_storage_hash": "e054040d12a20d6c7c4b847d46e254821b4d4532",
+          "download_path": "bin/reference_build/android_l_arm64-v8a/ChromeStable.apk",
+          "version_in_cs": "48.0.2564.95"
+        },
+        "android_l_armeabi-v7a": {
+          "cloud_storage_hash": "1438bb54959d9fbf891616392959b778df9e62f9",
+          "download_path": "bin/reference_build/android_l_armeabi-v7a/ChromeStable.apk",
+          "version_in_cs": "48.0.2564.95"
+        }
+      }
+    },
+    "reference_build": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chrome-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "60e5d69c84c2701c4fdb4384fa7085858cf752c7",
+          "download_path": "bin/reference_build/chrome-linux64.zip",
+          "path_within_archive": "chrome-precise64/chrome",
+          "version_in_cs": "48.0.2564.109"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "6518400501b59456058d479a57ead0186599b693",
+          "download_path": "bin/reference_builds/chrome-mac64.zip",
+          "path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
+          "version_in_cs": "48.0.2564.109"
+        },
+        "win_AMD64": {
+          "cloud_storage_hash": "c8813938a530d3df5e22107538ce6ec374c8ba88",
+          "download_path": "bin\\reference_build\\chrome-win64.zip",
+          "path_within_archive": "chrome-win64\\chrome.exe",
+          "version_in_cs": "48.0.2564.109"
+        },
+        "win_x86": {
+          "cloud_storage_hash": "9c3f8d9ef4165503fe89fa01b0dfd0ae629d41b3",
+          "download_path": "bin\\reference_build\\chrome-win32.zip",
+          "path_within_archive": "chrome-win32\\chrome.exe",
+          "version_in_cs": "48.0.2564.109"
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/catapult/catapult_base/catapult_base/cloud_storage.py b/catapult/catapult_base/catapult_base/cloud_storage.py
new file mode 100644
index 0000000..c7a6a0d
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/cloud_storage.py
@@ -0,0 +1,370 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrappers for gsutil, for basic interaction with Google Cloud Storage."""
+
+import collections
+import contextlib
+import hashlib
+import logging
+import os
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+
+try:
+  import fcntl
+except ImportError:
+  fcntl = None
+
+from catapult_base import util
+
+
+PUBLIC_BUCKET = 'chromium-telemetry'
+PARTNER_BUCKET = 'chrome-partner-telemetry'
+INTERNAL_BUCKET = 'chrome-telemetry'
+TELEMETRY_OUTPUT = 'chrome-telemetry-output'
+
+# Uses ordered dict to make sure that bucket's key-value items are ordered from
+# the most open to the most restrictive.
+BUCKET_ALIASES = collections.OrderedDict((
+    ('public', PUBLIC_BUCKET),
+    ('partner', PARTNER_BUCKET),
+    ('internal', INTERNAL_BUCKET),
+    ('output', TELEMETRY_OUTPUT),
+))
+
+BUCKET_ALIAS_NAMES = BUCKET_ALIASES.keys()
+
+
+_GSUTIL_PATH = os.path.join(util.GetCatapultDir(), 'third_party', 'gsutil',
+                            'gsutil')
+
+# TODO(tbarzic): A workaround for http://crbug.com/386416 and
+#     http://crbug.com/359293. See |_RunCommand|.
+_CROS_GSUTIL_HOME_WAR = '/home/chromeos-test/'
+
+
+# If Environment variables has DISABLE_CLOUD_STORAGE_IO set to '1', any method
+# calls that invoke cloud storage network io will throw exceptions.
+DISABLE_CLOUD_STORAGE_IO = 'DISABLE_CLOUD_STORAGE_IO'
+
+
+
+class CloudStorageError(Exception):
+
+  @staticmethod
+  def _GetConfigInstructions():
+    command = _GSUTIL_PATH
+    if util.IsRunningOnCrosDevice():
+      command = 'HOME=%s %s' % (_CROS_GSUTIL_HOME_WAR, _GSUTIL_PATH)
+    return ('To configure your credentials:\n'
+            '  1. Run "%s config" and follow its instructions.\n'
+            '  2. If you have a @google.com account, use that account.\n'
+            '  3. For the project-id, just enter 0.' % command)
+
+
+class PermissionError(CloudStorageError):
+
+  def __init__(self):
+    super(PermissionError, self).__init__(
+        'Attempted to access a file from Cloud Storage but you don\'t '
+        'have permission. ' + self._GetConfigInstructions())
+
+
+class CredentialsError(CloudStorageError):
+
+  def __init__(self):
+    super(CredentialsError, self).__init__(
+        'Attempted to access a file from Cloud Storage but you have no '
+        'configured credentials. ' + self._GetConfigInstructions())
+
+
+class CloudStorageIODisabled(CloudStorageError):
+  pass
+
+
+class NotFoundError(CloudStorageError):
+  pass
+
+
+class ServerError(CloudStorageError):
+  pass
+
+
+# TODO(tonyg/dtu): Can this be replaced with distutils.spawn.find_executable()?
+def _FindExecutableInPath(relative_executable_path, *extra_search_paths):
+  search_paths = list(extra_search_paths) + os.environ['PATH'].split(os.pathsep)
+  for search_path in search_paths:
+    executable_path = os.path.join(search_path, relative_executable_path)
+    if util.IsExecutable(executable_path):
+      return executable_path
+  return None
+
+
+def _EnsureExecutable(gsutil):
+  """chmod +x if gsutil is not executable."""
+  st = os.stat(gsutil)
+  if not st.st_mode & stat.S_IEXEC:
+    os.chmod(gsutil, st.st_mode | stat.S_IEXEC)
+
+
+def _RunCommand(args):
+  # On cros device, as telemetry is running as root, home will be set to /root/,
+  # which is not writable. gsutil will attempt to create a download tracker dir
+  # in home dir and fail. To avoid this, override HOME dir to something writable
+  # when running on cros device.
+  #
+  # TODO(tbarzic): Figure out a better way to handle gsutil on cros.
+  #     http://crbug.com/386416, http://crbug.com/359293.
+  gsutil_env = None
+  if util.IsRunningOnCrosDevice():
+    gsutil_env = os.environ.copy()
+    gsutil_env['HOME'] = _CROS_GSUTIL_HOME_WAR
+
+  if os.name == 'nt':
+    # If Windows, prepend python. Python scripts aren't directly executable.
+    args = [sys.executable, _GSUTIL_PATH] + args
+  else:
+    # Don't do it on POSIX, in case someone is using a shell script to redirect.
+    args = [_GSUTIL_PATH] + args
+    _EnsureExecutable(_GSUTIL_PATH)
+
+  if (os.getenv(DISABLE_CLOUD_STORAGE_IO) == '1' and
+      args[0] not in ('help', 'hash', 'version')):
+    raise CloudStorageIODisabled(
+        "Environment variable DISABLE_CLOUD_STORAGE_IO is set to 1. "
+        'Command %s is not allowed to run' % args)
+
+  gsutil = subprocess.Popen(args, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE, env=gsutil_env)
+  stdout, stderr = gsutil.communicate()
+
+  if gsutil.returncode:
+    if stderr.startswith((
+        'You are attempting to access protected data with no configured',
+        'Failure: No handler was ready to authenticate.')):
+      raise CredentialsError()
+    if ('status=403' in stderr or 'status 403' in stderr or
+        '403 Forbidden' in stderr):
+      raise PermissionError()
+    if (stderr.startswith('InvalidUriError') or 'No such object' in stderr or
+        'No URLs matched' in stderr or 'One or more URLs matched no' in stderr):
+      raise NotFoundError(stderr)
+    if '500 Internal Server Error' in stderr:
+      raise ServerError(stderr)
+    raise CloudStorageError(stderr)
+
+  return stdout
+
+
+def List(bucket):
+  query = 'gs://%s/' % bucket
+  stdout = _RunCommand(['ls', query])
+  return [url[len(query):] for url in stdout.splitlines()]
+
+
+def Exists(bucket, remote_path):
+  try:
+    _RunCommand(['ls', 'gs://%s/%s' % (bucket, remote_path)])
+    return True
+  except NotFoundError:
+    return False
+
+
+def Move(bucket1, bucket2, remote_path):
+  url1 = 'gs://%s/%s' % (bucket1, remote_path)
+  url2 = 'gs://%s/%s' % (bucket2, remote_path)
+  logging.info('Moving %s to %s', url1, url2)
+  _RunCommand(['mv', url1, url2])
+
+
+def Copy(bucket_from, bucket_to, remote_path_from, remote_path_to):
+  """Copy a file from one location in CloudStorage to another.
+
+  Args:
+      bucket_from: The cloud storage bucket where the file is currently located.
+      bucket_to: The cloud storage bucket it is being copied to.
+      remote_path_from: The file path where the file is located in bucket_from.
+      remote_path_to: The file path it is being copied to in bucket_to.
+
+  It should: cause no changes locally or to the starting file, and will
+  overwrite any existing files in the destination location.
+  """
+  url1 = 'gs://%s/%s' % (bucket_from, remote_path_from)
+  url2 = 'gs://%s/%s' % (bucket_to, remote_path_to)
+  logging.info('Copying %s to %s', url1, url2)
+  _RunCommand(['cp', url1, url2])
+
+
+def Delete(bucket, remote_path):
+  url = 'gs://%s/%s' % (bucket, remote_path)
+  logging.info('Deleting %s', url)
+  _RunCommand(['rm', url])
+
+
+def Get(bucket, remote_path, local_path):
+  with _PseudoFileLock(local_path):
+    _GetLocked(bucket, remote_path, local_path)
+
+
+@contextlib.contextmanager
+def _PseudoFileLock(base_path):
+  pseudo_lock_path = '%s.pseudo_lock' % base_path
+  _CreateDirectoryIfNecessary(os.path.dirname(pseudo_lock_path))
+  # This is somewhat of a racy hack because we don't have a good
+  # cross-platform file lock. If we get one, this should be refactored
+  # to use it.
+  while os.path.exists(pseudo_lock_path):
+    time.sleep(0.1)
+  fd = os.open(pseudo_lock_path, os.O_RDONLY | os.O_CREAT)
+  if fcntl:
+    fcntl.flock(fd, fcntl.LOCK_EX)
+  try:
+    yield
+  finally:
+    if fcntl:
+      fcntl.flock(fd, fcntl.LOCK_UN)
+    try:
+      os.close(fd)
+      os.remove(pseudo_lock_path)
+    except OSError:
+      # We don't care if the pseudo-lock gets removed elsewhere before we have
+      # a chance to do so.
+      pass
+
+
+def _CreateDirectoryIfNecessary(directory):
+  if not os.path.exists(directory):
+    os.makedirs(directory)
+
+
+def _GetLocked(bucket, remote_path, local_path):
+  url = 'gs://%s/%s' % (bucket, remote_path)
+  logging.info('Downloading %s to %s', url, local_path)
+  _CreateDirectoryIfNecessary(os.path.dirname(local_path))
+  with tempfile.NamedTemporaryFile(
+      dir=os.path.dirname(local_path),
+      delete=False) as partial_download_path:
+    try:
+      # Windows won't download to an open file.
+      partial_download_path.close()
+      try:
+        _RunCommand(['cp', url, partial_download_path.name])
+      except ServerError:
+        logging.info('Cloud Storage server error, retrying download')
+        _RunCommand(['cp', url, partial_download_path.name])
+      shutil.move(partial_download_path.name, local_path)
+    finally:
+      if os.path.exists(partial_download_path.name):
+        os.remove(partial_download_path.name)
+
+
+def Insert(bucket, remote_path, local_path, publicly_readable=False):
+  """ Upload file in |local_path| to cloud storage.
+  Args:
+    bucket: the google cloud storage bucket name.
+    remote_path: the remote file path in |bucket|.
+    local_path: path of the local file to be uploaded.
+    publicly_readable: whether the uploaded file has publicly readable
+    permission.
+
+  Returns:
+    The url where the file is uploaded to.
+  """
+  url = 'gs://%s/%s' % (bucket, remote_path)
+  command_and_args = ['cp']
+  extra_info = ''
+  if publicly_readable:
+    command_and_args += ['-a', 'public-read']
+    extra_info = ' (publicly readable)'
+  command_and_args += [local_path, url]
+  logging.info('Uploading %s to %s%s', local_path, url, extra_info)
+  _RunCommand(command_and_args)
+  return 'https://console.developers.google.com/m/cloudstorage/b/%s/o/%s' % (
+      bucket, remote_path)
+
+
+def GetIfHashChanged(cs_path, download_path, bucket, file_hash):
+  """Downloads |download_path| to |file_path| if |file_path| doesn't exist or
+     it's hash doesn't match |file_hash|.
+
+  Returns:
+    True if the binary was changed.
+  Raises:
+    CredentialsError if the user has no configured credentials.
+    PermissionError if the user does not have permission to access the bucket.
+    NotFoundError if the file is not in the given bucket in cloud_storage.
+  """
+  with _PseudoFileLock(download_path):
+    if (os.path.exists(download_path) and
+        CalculateHash(download_path) == file_hash):
+      return False
+    _GetLocked(bucket, cs_path, download_path)
+    return True
+
+
+def GetIfChanged(file_path, bucket):
+  """Gets the file at file_path if it has a hash file that doesn't match or
+  if there is no local copy of file_path, but there is a hash file for it.
+
+  Returns:
+    True if the binary was changed.
+  Raises:
+    CredentialsError if the user has no configured credentials.
+    PermissionError if the user does not have permission to access the bucket.
+    NotFoundError if the file is not in the given bucket in cloud_storage.
+  """
+  with _PseudoFileLock(file_path):
+    hash_path = file_path + '.sha1'
+    if not os.path.exists(hash_path):
+      logging.warning('Hash file not found: %s', hash_path)
+      return False
+
+    expected_hash = ReadHash(hash_path)
+    if os.path.exists(file_path) and CalculateHash(file_path) == expected_hash:
+      return False
+    _GetLocked(bucket, expected_hash, file_path)
+    return True
+
+
+def GetFilesInDirectoryIfChanged(directory, bucket):
+  """ Scan the directory for .sha1 files, and download them from the given
+  bucket in cloud storage if the local and remote hash don't match or
+  there is no local copy.
+  """
+  if not os.path.isdir(directory):
+    raise ValueError(
+        '%s does not exist. Must provide a valid directory path.' % directory)
+  # Don't allow the root directory to be a serving_dir.
+  if directory == os.path.abspath(os.sep):
+    raise ValueError('Trying to serve root directory from HTTP server.')
+  for dirpath, _, filenames in os.walk(directory):
+    for filename in filenames:
+      path_name, extension = os.path.splitext(
+          os.path.join(dirpath, filename))
+      if extension != '.sha1':
+        continue
+      GetIfChanged(path_name, bucket)
+
+
+def CalculateHash(file_path):
+  """Calculates and returns the hash of the file at file_path."""
+  sha1 = hashlib.sha1()
+  with open(file_path, 'rb') as f:
+    while True:
+      # Read in 1mb chunks, so it doesn't all have to be loaded into memory.
+      chunk = f.read(1024 * 1024)
+      if not chunk:
+        break
+      sha1.update(chunk)
+  return sha1.hexdigest()
+
+
+def ReadHash(hash_path):
+  with open(hash_path, 'rb') as f:
+    return f.read(1024).rstrip()
diff --git a/catapult/catapult_base/catapult_base/cloud_storage_unittest.py b/catapult/catapult_base/catapult_base/cloud_storage_unittest.py
new file mode 100644
index 0000000..fe2d24d
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/cloud_storage_unittest.py
@@ -0,0 +1,238 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+import mock
+from pyfakefs import fake_filesystem_unittest
+
+
+from catapult_base import cloud_storage
+from catapult_base import util
+
+
+def _FakeReadHash(_):
+  return 'hashthis!'
+
+
+def _FakeCalulateHashMatchesRead(_):
+  return 'hashthis!'
+
+
+def _FakeCalulateHashNewHash(_):
+  return 'omgnewhash'
+
+
+class CloudStorageUnitTest(fake_filesystem_unittest.TestCase):
+
+  def setUp(self):
+    self.original_environ = os.environ.copy()
+    os.environ['DISABLE_CLOUD_STORAGE_IO'] = ''
+    self.setUpPyfakefs()
+    self.fs.CreateFile(
+        os.path.join(util.GetCatapultDir(), 'third_party', 'gsutil', 'gsutil'))
+
+  def CreateFiles(self, file_paths):
+    for f in file_paths:
+      self.fs.CreateFile(f)
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+    os.environ = self.original_environ
+
+  def _FakeRunCommand(self, cmd):
+    pass
+
+  def _FakeGet(self, bucket, remote_path, local_path):
+    pass
+
+  def _AssertRunCommandRaisesError(self, communicate_strs, error):
+    with mock.patch('catapult_base.cloud_storage.subprocess.Popen') as popen:
+      p_mock = mock.Mock()
+      popen.return_value = p_mock
+      p_mock.returncode = 1
+      for stderr in communicate_strs:
+        p_mock.communicate.return_value = ('', stderr)
+        self.assertRaises(error, cloud_storage._RunCommand, [])
+
+  def testRunCommandCredentialsError(self):
+    strs = ['You are attempting to access protected data with no configured',
+            'Failure: No handler was ready to authenticate.']
+    self._AssertRunCommandRaisesError(strs, cloud_storage.CredentialsError)
+
+  def testRunCommandPermissionError(self):
+    strs = ['status=403', 'status 403', '403 Forbidden']
+    self._AssertRunCommandRaisesError(strs, cloud_storage.PermissionError)
+
+  def testRunCommandNotFoundError(self):
+    strs = ['InvalidUriError', 'No such object', 'No URLs matched',
+            'One or more URLs matched no', 'InvalidUriError']
+    self._AssertRunCommandRaisesError(strs, cloud_storage.NotFoundError)
+
+  def testRunCommandServerError(self):
+    strs = ['500 Internal Server Error']
+    self._AssertRunCommandRaisesError(strs, cloud_storage.ServerError)
+
+  def testRunCommandGenericError(self):
+    strs = ['Random string']
+    self._AssertRunCommandRaisesError(strs, cloud_storage.CloudStorageError)
+
+  def testInsertCreatesValidCloudUrl(self):
+    orig_run_command = cloud_storage._RunCommand
+    try:
+      cloud_storage._RunCommand = self._FakeRunCommand
+      remote_path = 'test-remote-path.html'
+      local_path = 'test-local-path.html'
+      cloud_url = cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET,
+                                       remote_path, local_path)
+      self.assertEqual('https://console.developers.google.com/m/cloudstorage'
+                       '/b/chromium-telemetry/o/test-remote-path.html',
+                       cloud_url)
+    finally:
+      cloud_storage._RunCommand = orig_run_command
+
+  @mock.patch('catapult_base.cloud_storage.subprocess')
+  def testExistsReturnsFalse(self, subprocess_mock):
+    p_mock = mock.Mock()
+    subprocess_mock.Popen.return_value = p_mock
+    p_mock.communicate.return_value = (
+        '',
+        'CommandException: One or more URLs matched no objects.\n')
+    p_mock.returncode_result = 1
+    self.assertFalse(cloud_storage.Exists('fake bucket',
+                                          'fake remote path'))
+
+  @mock.patch('catapult_base.cloud_storage.CalculateHash')
+  @mock.patch('catapult_base.cloud_storage._GetLocked')
+  @mock.patch('catapult_base.cloud_storage._PseudoFileLock')
+  @mock.patch('catapult_base.cloud_storage.os.path')
+  def testGetIfHashChanged(self, path_mock, unused_lock_mock, get_mock,
+                           calc_hash_mock):
+    path_mock.exists.side_effect = [False, True, True]
+    calc_hash_mock.return_value = 'hash'
+
+    # The file at |local_path| doesn't exist. We should download file from cs.
+    ret = cloud_storage.GetIfHashChanged(
+        'remote_path', 'local_path', 'cs_bucket', 'hash')
+    self.assertTrue(ret)
+    get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
+    get_mock.reset_mock()
+    self.assertFalse(calc_hash_mock.call_args)
+    calc_hash_mock.reset_mock()
+
+    # A local file exists at |local_path| but has the wrong hash.
+    # We should download file from cs.
+    ret = cloud_storage.GetIfHashChanged(
+        'remote_path', 'local_path', 'cs_bucket', 'new_hash')
+    self.assertTrue(ret)
+    get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
+    get_mock.reset_mock()
+    calc_hash_mock.assert_called_once_with('local_path')
+    calc_hash_mock.reset_mock()
+
+    # Downloaded file exists locally and has the right hash. Don't download.
+    ret = cloud_storage.GetIfHashChanged(
+        'remote_path', 'local_path', 'cs_bucket', 'hash')
+    self.assertFalse(get_mock.call_args)
+    self.assertFalse(ret)
+    calc_hash_mock.reset_mock()
+    get_mock.reset_mock()
+
+  @mock.patch('catapult_base.cloud_storage._PseudoFileLock')
+  def testGetIfChanged(self, unused_lock_mock):
+    orig_get = cloud_storage._GetLocked
+    orig_read_hash = cloud_storage.ReadHash
+    orig_calculate_hash = cloud_storage.CalculateHash
+    cloud_storage.ReadHash = _FakeReadHash
+    cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+    file_path = 'test-file-path.wpr'
+    hash_path = file_path + '.sha1'
+    try:
+      cloud_storage._GetLocked = self._FakeGet
+      # hash_path doesn't exist.
+      self.assertFalse(cloud_storage.GetIfChanged(file_path,
+                                                  cloud_storage.PUBLIC_BUCKET))
+      # hash_path exists, but file_path doesn't.
+      self.CreateFiles([hash_path])
+      self.assertTrue(cloud_storage.GetIfChanged(file_path,
+                                                 cloud_storage.PUBLIC_BUCKET))
+      # hash_path and file_path exist, and have same hash.
+      self.CreateFiles([file_path])
+      self.assertFalse(cloud_storage.GetIfChanged(file_path,
+                                                  cloud_storage.PUBLIC_BUCKET))
+      # hash_path and file_path exist, and have different hashes.
+      cloud_storage.CalculateHash = _FakeCalulateHashNewHash
+      self.assertTrue(cloud_storage.GetIfChanged(file_path,
+                                                 cloud_storage.PUBLIC_BUCKET))
+    finally:
+      cloud_storage._GetLocked = orig_get
+      cloud_storage.CalculateHash = orig_calculate_hash
+      cloud_storage.ReadHash = orig_read_hash
+
+  @unittest.skipIf(sys.platform.startswith('win'),
+                   'https://github.com/catapult-project/catapult/issues/1861')
+  def testGetFilesInDirectoryIfChanged(self):
+    self.CreateFiles([
+        'real_dir_path/dir1/1file1.sha1',
+        'real_dir_path/dir1/1file2.txt',
+        'real_dir_path/dir1/1file3.sha1',
+        'real_dir_path/dir2/2file.txt',
+        'real_dir_path/dir3/3file1.sha1'])
+
+    def IncrementFilesUpdated(*_):
+      IncrementFilesUpdated.files_updated += 1
+    IncrementFilesUpdated.files_updated = 0
+    orig_get_if_changed = cloud_storage.GetIfChanged
+    cloud_storage.GetIfChanged = IncrementFilesUpdated
+    try:
+      self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
+                        os.path.abspath(os.sep), cloud_storage.PUBLIC_BUCKET)
+      self.assertEqual(0, IncrementFilesUpdated.files_updated)
+      self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
+                        'fake_dir_path', cloud_storage.PUBLIC_BUCKET)
+      self.assertEqual(0, IncrementFilesUpdated.files_updated)
+      cloud_storage.GetFilesInDirectoryIfChanged('real_dir_path',
+                                                 cloud_storage.PUBLIC_BUCKET)
+      self.assertEqual(3, IncrementFilesUpdated.files_updated)
+    finally:
+      cloud_storage.GetIfChanged = orig_get_if_changed
+
+  def testCopy(self):
+    orig_run_command = cloud_storage._RunCommand
+
+    def AssertCorrectRunCommandArgs(args):
+      self.assertEqual(expected_args, args)
+    cloud_storage._RunCommand = AssertCorrectRunCommandArgs
+    expected_args = ['cp', 'gs://bucket1/remote_path1',
+                     'gs://bucket2/remote_path2']
+    try:
+      cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
+    finally:
+      cloud_storage._RunCommand = orig_run_command
+
+
+  @mock.patch('catapult_base.cloud_storage._PseudoFileLock')
+  def testDisableCloudStorageIo(self, unused_lock_mock):
+    os.environ['DISABLE_CLOUD_STORAGE_IO'] = '1'
+    dir_path = 'real_dir_path'
+    self.fs.CreateDirectory(dir_path)
+    file_path = os.path.join(dir_path, 'file1')
+    file_path_sha = file_path + '.sha1'
+    self.CreateFiles([file_path, file_path_sha])
+    with open(file_path_sha, 'w') as f:
+      f.write('hash1234')
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.Get('bucket', 'foo', file_path)
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.GetIfChanged(file_path, 'foo')
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.GetIfHashChanged('bar', file_path, 'bucket', 'hash1234')
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.Insert('bucket', 'foo', file_path)
+    with self.assertRaises(cloud_storage.CloudStorageIODisabled):
+      cloud_storage.GetFilesInDirectoryIfChanged(dir_path, 'bucket')
diff --git a/catapult/catapult_base/catapult_base/dependency_util.py b/catapult/catapult_base/catapult_base/dependency_util.py
new file mode 100644
index 0000000..440173f
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/dependency_util.py
@@ -0,0 +1,13 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def GetChromeApkOsVersion(version_name):
+  version = version_name[0]
+  assert version.isupper(), (
+      'First character of versions name %s was not an uppercase letter.')
+  if version < 'L':
+    return 'k'
+  elif version > 'M':
+    return 'n'
+  return 'l'
diff --git a/catapult/catapult_base/catapult_base/lock.py b/catapult/catapult_base/catapult_base/lock.py
new file mode 100644
index 0000000..aa9a095
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/lock.py
@@ -0,0 +1,117 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import os
+
+LOCK_EX = None  # Exclusive lock
+LOCK_SH = None  # Shared lock
+LOCK_NB = None  # Non-blocking (LockException is raised if resource is locked)
+
+
+class LockException(Exception):
+  pass
+
+
+if os.name == 'nt':
+  import win32con    # pylint: disable=import-error
+  import win32file   # pylint: disable=import-error
+  import pywintypes  # pylint: disable=import-error
+  LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
+  LOCK_SH = 0  # the default
+  LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
+  _OVERLAPPED = pywintypes.OVERLAPPED()
+elif os.name == 'posix':
+  import fcntl       # pylint: disable=import-error
+  LOCK_EX = fcntl.LOCK_EX
+  LOCK_SH = fcntl.LOCK_SH
+  LOCK_NB = fcntl.LOCK_NB
+
+
+@contextlib.contextmanager
+def FileLock(target_file, flags):
+  """ Lock the target file. Similar to AcquireFileLock but allow user to write:
+        with FileLock(f, LOCK_EX):
+           ...do stuff on file f without worrying about race condition
+    Args: see AcquireFileLock's documentation.
+  """
+  AcquireFileLock(target_file, flags)
+  try:
+    yield
+  finally:
+    ReleaseFileLock(target_file)
+
+
+def AcquireFileLock(target_file, flags):
+  """ Lock the target file. Note that if |target_file| is closed, the lock is
+    automatically released.
+  Args:
+    target_file: file handle of the file to acquire lock.
+    flags: can be any of the type LOCK_EX, LOCK_SH, LOCK_NB, or a bitwise
+      OR combination of flags.
+  """
+  assert flags in (
+      LOCK_EX, LOCK_SH, LOCK_NB, LOCK_EX | LOCK_NB, LOCK_SH | LOCK_NB)
+  if os.name == 'nt':
+    _LockImplWin(target_file, flags)
+  elif os.name == 'posix':
+    _LockImplPosix(target_file, flags)
+  else:
+    raise NotImplementedError('%s is not supported' % os.name)
+
+
+def ReleaseFileLock(target_file):
+  """ Unlock the target file.
+  Args:
+    target_file: file handle of the file to release the lock.
+  """
+  if os.name == 'nt':
+    _UnlockImplWin(target_file)
+  elif os.name == 'posix':
+    _UnlockImplPosix(target_file)
+  else:
+    raise NotImplementedError('%s is not supported' % os.name)
+
+# These implementations are based on
+# http://code.activestate.com/recipes/65203/
+
+def _LockImplWin(target_file, flags):
+  hfile = win32file._get_osfhandle(target_file.fileno())
+  try:
+    win32file.LockFileEx(hfile, flags, 0, -0x10000, _OVERLAPPED)
+  except pywintypes.error, exc_value:
+    if exc_value[0] == 33:
+      raise LockException('Error trying acquiring lock of %s: %s' %
+                          (target_file.name, exc_value[2]))
+    else:
+      raise
+
+
+def _UnlockImplWin(target_file):
+  hfile = win32file._get_osfhandle(target_file.fileno())
+  try:
+    win32file.UnlockFileEx(hfile, 0, -0x10000, _OVERLAPPED)
+  except pywintypes.error, exc_value:
+    if exc_value[0] == 158:
+      # error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
+      # To match the 'posix' implementation, silently ignore this error
+      pass
+    else:
+      # Q:  Are there exceptions/codes we should be dealing with here?
+      raise
+
+
+def _LockImplPosix(target_file, flags):
+  try:
+    fcntl.flock(target_file.fileno(), flags)
+  except IOError, exc_value:
+    if exc_value[0] == 11 or exc_value[0] == 35:
+      raise LockException('Error trying acquiring lock of %s: %s' %
+                          (target_file.name, exc_value[1]))
+    else:
+      raise
+
+
+def _UnlockImplPosix(target_file):
+  fcntl.flock(target_file.fileno(), fcntl.LOCK_UN)
diff --git a/catapult/catapult_base/catapult_base/lock_unittest.py b/catapult/catapult_base/catapult_base/lock_unittest.py
new file mode 100644
index 0000000..e59422d
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/lock_unittest.py
@@ -0,0 +1,165 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import multiprocessing
+import os
+import time
+import unittest
+import tempfile
+
+
+from catapult_base import lock
+
+
+def _AppendTextToFile(file_name):
+  with open(file_name, 'a') as f:
+    lock.AcquireFileLock(f, lock.LOCK_EX)
+    # Sleep 100 ms to increase the chance of another process trying to acquire
+    # the lock of file as the same time.
+    time.sleep(0.1)
+    f.write('Start')
+    for _ in range(10000):
+      f.write('*')
+    f.write('End')
+
+
+def _ReadFileWithSharedLockBlockingThenWrite(read_file, write_file):
+  with open(read_file, 'r') as f:
+    lock.AcquireFileLock(f, lock.LOCK_SH)
+    content = f.read()
+    with open(write_file, 'a') as f2:
+      lock.AcquireFileLock(f2, lock.LOCK_EX)
+      f2.write(content)
+
+
+def _ReadFileWithExclusiveLockNonBlocking(target_file, status_file):
+  with open(target_file, 'r') as f:
+    try:
+      lock.AcquireFileLock(f, lock.LOCK_EX | lock.LOCK_NB)
+      with open(status_file, 'w') as f2:
+        f2.write('LockException was not raised')
+    except lock.LockException:
+      with open(status_file, 'w') as f2:
+        f2.write('LockException raised')
+
+
+class FileLockTest(unittest.TestCase):
+  def setUp(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    tf.close()
+    self.temp_file_path = tf.name
+
+  def tearDown(self):
+    os.remove(self.temp_file_path)
+
+  def testExclusiveLock(self):
+    processess = []
+    for _ in range(10):
+      p = multiprocessing.Process(
+          target=_AppendTextToFile, args=(self.temp_file_path,))
+      p.start()
+      processess.append(p)
+    for p in processess:
+      p.join()
+
+    # If the file lock works as expected, there should be 10 atomic writes of
+    # 'Start***...***End' to the file in some order, which lead to the final
+    # file content as below.
+    expected_file_content = ''.join((['Start'] + ['*']*10000 + ['End']) * 10)
+    with open(self.temp_file_path, 'r') as f:
+      # Use assertTrue instead of assertEquals since the strings are big, hence
+      # assertEquals's assertion failure will contain huge strings.
+      self.assertTrue(expected_file_content == f.read())
+
+  def testSharedLock(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    tf.close()
+    temp_write_file = tf.name
+    try:
+      with open(self.temp_file_path, 'w') as f:
+        f.write('0123456789')
+      with open(self.temp_file_path, 'r') as f:
+        # First, acquire a shared lock on temp_file_path
+        lock.AcquireFileLock(f, lock.LOCK_SH)
+
+        processess = []
+        # Create 10 processes that also try to acquire shared lock from
+        # temp_file_path then append temp_file_path's content to temp_write_file
+        for _ in range(10):
+          p = multiprocessing.Process(
+              target=_ReadFileWithSharedLockBlockingThenWrite,
+              args=(self.temp_file_path, temp_write_file))
+          p.start()
+          processess.append(p)
+        for p in processess:
+          p.join()
+
+      # temp_write_file should contains 10 copy of temp_file_path's content.
+      with open(temp_write_file, 'r') as f:
+        self.assertEquals('0123456789'*10, f.read())
+    finally:
+      os.remove(temp_write_file)
+
+  def testNonBlockingLockAcquiring(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    tf.close()
+    temp_status_file = tf.name
+    try:
+      with open(self.temp_file_path, 'w') as f:
+        lock.AcquireFileLock(f, lock.LOCK_EX)
+        p = multiprocessing.Process(
+            target=_ReadFileWithExclusiveLockNonBlocking,
+            args=(self.temp_file_path, temp_status_file))
+        p.start()
+        p.join()
+      with open(temp_status_file, 'r') as f:
+        self.assertEquals('LockException raised', f.read())
+    finally:
+      os.remove(temp_status_file)
+
+  def testUnlockBeforeClosingFile(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    tf.close()
+    temp_status_file = tf.name
+    try:
+      with open(self.temp_file_path, 'r') as f:
+        lock.AcquireFileLock(f, lock.LOCK_SH)
+        lock.ReleaseFileLock(f)
+        p = multiprocessing.Process(
+            target=_ReadFileWithExclusiveLockNonBlocking,
+            args=(self.temp_file_path, temp_status_file))
+        p.start()
+        p.join()
+      with open(temp_status_file, 'r') as f:
+        self.assertEquals('LockException was not raised', f.read())
+    finally:
+      os.remove(temp_status_file)
+
+  def testContextualLock(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    tf.close()
+    temp_status_file = tf.name
+    try:
+      with open(self.temp_file_path, 'r') as f:
+        with lock.FileLock(f, lock.LOCK_EX):
+          # Within this block, accessing self.temp_file_path from another
+          # process should raise exception.
+          p = multiprocessing.Process(
+              target=_ReadFileWithExclusiveLockNonBlocking,
+              args=(self.temp_file_path, temp_status_file))
+          p.start()
+          p.join()
+          with open(temp_status_file, 'r') as f:
+            self.assertEquals('LockException raised', f.read())
+
+        # Accessing self.temp_file_path here should not raise exception.
+        p = multiprocessing.Process(
+            target=_ReadFileWithExclusiveLockNonBlocking,
+            args=(self.temp_file_path, temp_status_file))
+        p.start()
+        p.join()
+      with open(temp_status_file, 'r') as f:
+        self.assertEquals('LockException was not raised', f.read())
+    finally:
+      os.remove(temp_status_file)
diff --git a/catapult/catapult_base/catapult_base/refactor/__init__.py b/catapult/catapult_base/catapult_base/refactor/__init__.py
new file mode 100644
index 0000000..406dc6f
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Style-preserving Python code transforms.
+
+This module provides components for modifying and querying Python code. They can
+be used to build custom refactorings and linters.
+"""
+
+import functools
+import multiprocessing
+
+# pylint: disable=wildcard-import
+from catapult_base.refactor.annotated_symbol import *
+from catapult_base.refactor.module import Module
+
+
+def _TransformFile(transform, file_path):
+  module = Module(file_path)
+  result = transform(module)
+  module.Write()
+  return result
+
+
+def Transform(transform, file_paths):
+  transform = functools.partial(_TransformFile, transform)
+  return multiprocessing.Pool().map(transform, file_paths)
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/__init__.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/__init__.py
new file mode 100644
index 0000000..32858b5
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/__init__.py
@@ -0,0 +1,71 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=wildcard-import
+from catapult_base.refactor.annotated_symbol.class_definition import *
+from catapult_base.refactor.annotated_symbol.function_definition import *
+from catapult_base.refactor.annotated_symbol.import_statement import *
+from catapult_base.refactor.annotated_symbol.reference import *
+from catapult_base.refactor import snippet
+
+
+__all__ = [
+    'Annotate',
+
+    'Class',
+    'Function',
+    'Import',
+    'Reference',
+]
+
+
+# Specific symbol types with extra methods for manipulating them.
+# Python's full grammar is here:
+# https://docs.python.org/2/reference/grammar.html
+
+# Annotated Symbols have an Annotate classmethod that takes a symbol type and
+# list of children, and returns an instance of that annotated Symbol.
+
+ANNOTATED_SYMBOLS = (
+    AsName,
+    Class,
+    DottedName,
+    ImportFrom,
+    ImportName,
+    Function,
+)
+
+
+# Unfortunately, some logical groupings are not represented by a node in the
+# parse tree. To work around this, some annotated Symbols have an Annotate
+# classmethod that takes and returns a list of Snippets instead.
+
+ANNOTATED_GROUPINGS = (
+    Reference,
+)
+
+
+def Annotate(f):
+  """Return the syntax tree of the given file."""
+  return _AnnotateNode(snippet.Snippetize(f))
+
+
+def _AnnotateNode(node):
+  if not isinstance(node, snippet.Symbol):
+    return node
+
+  children = map(_AnnotateNode, node.children)
+
+  for symbol_type in ANNOTATED_GROUPINGS:
+    annotated_grouping = symbol_type.Annotate(children)
+    if annotated_grouping:
+      children = annotated_grouping
+      break
+
+  for symbol_type in ANNOTATED_SYMBOLS:
+    annotated_symbol = symbol_type.Annotate(node.type, children)
+    if annotated_symbol:
+      return annotated_symbol
+
+  return snippet.Symbol(node.type, children)
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/base_symbol.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/base_symbol.py
new file mode 100644
index 0000000..80fbc0b
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/base_symbol.py
@@ -0,0 +1,36 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from catapult_base.refactor import snippet
+
+
+class AnnotatedSymbol(snippet.Symbol):
+  def __init__(self, symbol_type, children):
+    super(AnnotatedSymbol, self).__init__(symbol_type, children)
+    self._modified = False
+
+  @property
+  def modified(self):
+    if self._modified:
+      return True
+    return super(AnnotatedSymbol, self).modified
+
+  def __setattr__(self, name, value):
+    if (hasattr(self.__class__, name) and
+        isinstance(getattr(self.__class__, name), property)):
+      self._modified = True
+    return super(AnnotatedSymbol, self).__setattr__(name, value)
+
+  def Cut(self, child):
+    for i in xrange(len(self._children)):
+      if self._children[i] == child:
+        self._modified = True
+        del self._children[i]
+        break
+    else:
+      raise ValueError('%s is not in %s.' % (child, self))
+
+  def Paste(self, child):
+    self._modified = True
+    self._children.append(child)
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/class_definition.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/class_definition.py
new file mode 100644
index 0000000..f164b4c
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/class_definition.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import symbol
+
+from catapult_base.refactor.annotated_symbol import base_symbol
+
+
+__all__ = [
+    'Class',
+]
+
+
+class Class(base_symbol.AnnotatedSymbol):
+  # pylint: disable=abstract-class-not-used
+
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if symbol_type != symbol.stmt:
+      return None
+
+    compound_statement = children[0]
+    if compound_statement.type != symbol.compound_stmt:
+      return None
+
+    statement = compound_statement.children[0]
+    if statement.type == symbol.classdef:
+      return cls(statement.type, statement.children)
+    elif (statement.type == symbol.decorated and
+          statement.children[-1].type == symbol.classdef):
+      return cls(statement.type, statement.children)
+    else:
+      return None
+
+  @property
+  def suite(self):
+    # TODO: Complete.
+    raise NotImplementedError()
+
+  def FindChild(self, snippet_type, **kwargs):
+    return self.suite.FindChild(snippet_type, **kwargs)
+
+  def FindChildren(self, snippet_type):
+    return self.suite.FindChildren(snippet_type)
+
+  def Cut(self, child):
+    self.suite.Cut(child)
+
+  def Paste(self, child):
+    self.suite.Paste(child)
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/function_definition.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/function_definition.py
new file mode 100644
index 0000000..acdc21e
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/function_definition.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import symbol
+
+from catapult_base.refactor.annotated_symbol import base_symbol
+
+
+__all__ = [
+    'Function',
+]
+
+
+class Function(base_symbol.AnnotatedSymbol):
+  # pylint: disable=abstract-class-not-used
+
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if symbol_type != symbol.stmt:
+      return None
+
+    compound_statement = children[0]
+    if compound_statement.type != symbol.compound_stmt:
+      return None
+
+    statement = compound_statement.children[0]
+    if statement.type == symbol.funcdef:
+      return cls(statement.type, statement.children)
+    elif (statement.type == symbol.decorated and
+          statement.children[-1].type == symbol.funcdef):
+      return cls(statement.type, statement.children)
+    else:
+      return None
+
+  @property
+  def suite(self):
+    # TODO: Complete.
+    raise NotImplementedError()
+
+  def FindChild(self, snippet_type, **kwargs):
+    return self.suite.FindChild(snippet_type, **kwargs)
+
+  def FindChildren(self, snippet_type):
+    return self.suite.FindChildren(snippet_type)
+
+  def Cut(self, child):
+    self.suite.Cut(child)
+
+  def Paste(self, child):
+    self.suite.Paste(child)
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/import_statement.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/import_statement.py
new file mode 100644
index 0000000..e598f5a
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/import_statement.py
@@ -0,0 +1,322 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import keyword
+import symbol
+import token
+
+from catapult_base.refactor.annotated_symbol import base_symbol
+from catapult_base.refactor import snippet
+
+
+__all__ = [
+    'AsName',
+    'DottedName',
+    'Import',
+    'ImportFrom',
+    'ImportName',
+]
+
+
+class DottedName(base_symbol.AnnotatedSymbol):
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if symbol_type != symbol.dotted_name:
+      return None
+    return cls(symbol_type, children)
+
+  @property
+  def value(self):
+    return ''.join(token_snippet.value for token_snippet in self._children)
+
+  @value.setter
+  def value(self, value):
+    value_parts = value.split('.')
+    for value_part in value_parts:
+      if keyword.iskeyword(value_part):
+        raise ValueError('%s is a reserved keyword.' % value_part)
+
+    # If we have too many children, cut the list down to size.
+    self._children = self._children[:len(value_parts)*2-1]
+
+    # Update child nodes.
+    for child, value_part in itertools.izip_longest(
+        self._children[::2], value_parts):
+      if child:
+        # Modify existing children. This helps preserve comments and spaces.
+        child.value = value_part
+      else:
+        # Add children as needed.
+        self._children.append(snippet.TokenSnippet.Create(token.DOT, '.'))
+        self._children.append(
+            snippet.TokenSnippet.Create(token.NAME, value_part))
+
+
+class AsName(base_symbol.AnnotatedSymbol):
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if (symbol_type != symbol.dotted_as_name and
+        symbol_type != symbol.import_as_name):
+      return None
+    return cls(symbol_type, children)
+
+  @property
+  def name(self):
+    return self.children[0].value
+
+  @name.setter
+  def name(self, value):
+    self.children[0].value = value
+
+  @property
+  def alias(self):
+    if len(self.children) < 3:
+      return None
+    return self.children[2].value
+
+  @alias.setter
+  def alias(self, value):
+    if keyword.iskeyword(value):
+      raise ValueError('%s is a reserved keyword.' % value)
+
+    if value:
+      if len(self.children) < 3:
+        # If we currently have no alias, add one.
+        self.children.append(
+            snippet.TokenSnippet.Create(token.NAME, 'as', (0, 1)))
+        self.children.append(
+            snippet.TokenSnippet.Create(token.NAME, value, (0, 1)))
+      else:
+        # We already have an alias. Just update the value.
+        self.children[2].value = value
+    else:
+      # Removing the alias. Strip the "as foo".
+      self.children = [self.children[0]]
+
+
+class Import(base_symbol.AnnotatedSymbol):
+  """An import statement.
+
+  Example:
+    import a.b.c as d
+    from a.b import c as d
+
+  In these examples,
+    path == 'a.b.c'
+    alias == 'd'
+    root == 'a.b' (only for "from" imports)
+    module == 'c' (only for "from" imports)
+    name (read-only) == the name used by references to the module, which is the
+    alias if there is one, the full module path in "full" imports, and the
+    module name in "from" imports.
+  """
+  @property
+  def has_from(self):
+    """Returns True iff the import statment is of the form "from x import y"."""
+    raise NotImplementedError()
+
+  @property
+  def values(self):
+    raise NotImplementedError()
+
+  @property
+  def paths(self):
+    raise NotImplementedError()
+
+  @property
+  def aliases(self):
+    raise NotImplementedError()
+
+  @property
+  def path(self):
+    """The full dotted path of the module."""
+    raise NotImplementedError()
+
+  @path.setter
+  def path(self, value):
+    raise NotImplementedError()
+
+  @property
+  def alias(self):
+    """The alias, if the module is renamed with "as". None otherwise."""
+    raise NotImplementedError()
+
+  @alias.setter
+  def alias(self, value):
+    raise NotImplementedError()
+
+  @property
+  def name(self):
+    """The name used to reference this import's module."""
+    raise NotImplementedError()
+
+
+class ImportName(Import):
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if symbol_type != symbol.import_stmt:
+      return None
+    if children[0].type != symbol.import_name:
+      return None
+    assert len(children) == 1
+    return cls(symbol_type, children[0].children)
+
+  @property
+  def has_from(self):
+    return False
+
+  @property
+  def values(self):
+    dotted_as_names = self.children[1]
+    return tuple((dotted_as_name.name, dotted_as_name.alias)
+                 for dotted_as_name in dotted_as_names.children[::2])
+
+  @property
+  def paths(self):
+    return tuple(path for path, _ in self.values)
+
+  @property
+  def aliases(self):
+    return tuple(alias for _, alias in self.values)
+
+  @property
+  def _dotted_as_name(self):
+    dotted_as_names = self.children[1]
+    if len(dotted_as_names.children) != 1:
+      raise NotImplementedError(
+          'This method only works if the statement has one import.')
+    return dotted_as_names.children[0]
+
+  @property
+  def path(self):
+    return self._dotted_as_name.name
+
+  @path.setter
+  def path(self, value):  # pylint: disable=arguments-differ
+    self._dotted_as_name.name = value
+
+  @property
+  def alias(self):
+    return self._dotted_as_name.alias
+
+  @alias.setter
+  def alias(self, value):  # pylint: disable=arguments-differ
+    self._dotted_as_name.alias = value
+
+  @property
+  def name(self):
+    if self.alias:
+      return self.alias
+    else:
+      return self.path
+
+
+class ImportFrom(Import):
+  @classmethod
+  def Annotate(cls, symbol_type, children):
+    if symbol_type != symbol.import_stmt:
+      return None
+    if children[0].type != symbol.import_from:
+      return None
+    assert len(children) == 1
+    return cls(symbol_type, children[0].children)
+
+  @property
+  def has_from(self):
+    return True
+
+  @property
+  def values(self):
+    try:
+      import_as_names = self.FindChild(symbol.import_as_names)
+    except ValueError:
+      return (('*', None),)
+
+    return tuple((import_as_name.name, import_as_name.alias)
+                 for import_as_name in import_as_names.children[::2])
+
+  @property
+  def paths(self):
+    module = self.module
+    return tuple('.'.join((module, name)) for name, _ in self.values)
+
+  @property
+  def aliases(self):
+    return tuple(alias for _, alias in self.values)
+
+  @property
+  def root(self):
+    return self.FindChild(symbol.dotted_name).value
+
+  @root.setter
+  def root(self, value):
+    self.FindChild(symbol.dotted_name).value = value
+
+  @property
+  def _import_as_name(self):
+    try:
+      import_as_names = self.FindChild(symbol.import_as_names)
+    except ValueError:
+      return None
+
+    if len(import_as_names.children) != 1:
+      raise NotImplementedError(
+          'This method only works if the statement has one import.')
+
+    return import_as_names.children[0]
+
+  @property
+  def module(self):
+    import_as_name = self._import_as_name
+    if import_as_name:
+      return import_as_name.name
+    else:
+      return '*'
+
+  @module.setter
+  def module(self, value):
+    if keyword.iskeyword(value):
+      raise ValueError('%s is a reserved keyword.' % value)
+
+    import_as_name = self._import_as_name
+    if value == '*':
+      # TODO: Implement this.
+      raise NotImplementedError()
+    else:
+      if import_as_name:
+        import_as_name.name = value
+      else:
+        # TODO: Implement this.
+        raise NotImplementedError()
+
+  @property
+  def path(self):
+    return '.'.join((self.root, self.module))
+
+  @path.setter
+  def path(self, value):  # pylint: disable=arguments-differ
+    self.root, _, self.module = value.rpartition('.')
+
+  @property
+  def alias(self):
+    import_as_name = self._import_as_name
+    if import_as_name:
+      return import_as_name.alias
+    else:
+      return None
+
+  @alias.setter
+  def alias(self, value):  # pylint: disable=arguments-differ
+    import_as_name = self._import_as_name
+    if not import_as_name:
+      raise NotImplementedError('Cannot change alias for "import *".')
+    import_as_name.alias = value
+
+  @property
+  def name(self):
+    if self.alias:
+      return self.alias
+    else:
+      return self.module
diff --git a/catapult/catapult_base/catapult_base/refactor/annotated_symbol/reference.py b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/reference.py
new file mode 100644
index 0000000..b57c4f5
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/annotated_symbol/reference.py
@@ -0,0 +1,75 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import symbol
+import token
+
+from catapult_base.refactor.annotated_symbol import base_symbol
+from catapult_base.refactor import snippet
+
+
+__all__ = [
+    'Reference',
+]
+
+
+class Reference(base_symbol.AnnotatedSymbol):
+  @classmethod
+  def Annotate(cls, nodes):
+    if not nodes:
+      return None
+    if nodes[0].type != symbol.atom:
+      return None
+    if not nodes[0].children or nodes[0].children[0].type != token.NAME:
+      return None
+
+    for i in xrange(1, len(nodes)):
+      if not nodes:
+        break
+      if nodes[i].type != symbol.trailer:
+        break
+      if len(nodes[i].children) != 2:
+        break
+      if (nodes[i].children[0].type != token.DOT or
+          nodes[i].children[1].type != token.NAME):
+        break
+    else:
+      i = len(nodes)
+
+    return [cls(nodes[:i])] + nodes[i:]
+
+  def __init__(self, children):
+    super(Reference, self).__init__(-1, children)
+
+  @property
+  def type_name(self):
+    return 'attribute_reference'
+
+  @property
+  def value(self):
+    return ''.join(token_snippet.value
+                   for child in self.children
+                   for token_snippet in child.children)
+
+  @value.setter
+  def value(self, value):
+    value_parts = value.split('.')
+
+    # If we have too many children, cut the list down to size.
+    self._children = self._children[:len(value_parts)]
+
+    # Update child nodes.
+    for child, value_part in itertools.izip_longest(
+        self._children, value_parts):
+      if child:
+        # Modify existing children. This helps preserve comments and spaces.
+        child.children[-1].value = value_part
+      else:
+        # Add children as needed.
+        token_snippets = [
+            snippet.TokenSnippet.Create(token.DOT, '.'),
+            snippet.TokenSnippet.Create(token.NAME, value_part),
+        ]
+        self._children.append(snippet.Symbol(symbol.trailer, token_snippets))
diff --git a/catapult/catapult_base/catapult_base/refactor/module.py b/catapult/catapult_base/catapult_base/refactor/module.py
new file mode 100644
index 0000000..00d7466
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/module.py
@@ -0,0 +1,39 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from catapult_base.refactor import annotated_symbol
+
+
+class Module(object):
+
+  def __init__(self, file_path):
+    self._file_path = file_path
+
+    with open(self._file_path, 'r') as f:
+      self._snippet = annotated_symbol.Annotate(f)
+
+  @property
+  def file_path(self):
+    return self._file_path
+
+  @property
+  def modified(self):
+    return self._snippet.modified
+
+  def FindAll(self, snippet_type):
+    return self._snippet.FindAll(snippet_type)
+
+  def FindChildren(self, snippet_type):
+    return self._snippet.FindChildren(snippet_type)
+
+  def Write(self):
+    """Write modifications to the file."""
+    if not self.modified:
+      return
+
+    # Stringify before opening the file for writing.
+    # If we fail, we won't truncate the file.
+    string = str(self._snippet)
+    with open(self._file_path, 'w') as f:
+      f.write(string)
diff --git a/catapult/catapult_base/catapult_base/refactor/offset_token.py b/catapult/catapult_base/catapult_base/refactor/offset_token.py
new file mode 100644
index 0000000..5fa953e
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/offset_token.py
@@ -0,0 +1,115 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+import token
+import tokenize
+
+
+def _Pairwise(iterable):
+  """s -> (None, s0), (s0, s1), (s1, s2), (s2, s3), ..."""
+  a, b = itertools.tee(iterable)
+  a = itertools.chain((None,), a)
+  return itertools.izip(a, b)
+
+
+class OffsetToken(object):
+  """A Python token with a relative position.
+
+  A token is represented by a type defined in Python's token module, a string
+  representing the content, and an offset. Using relative positions makes it
+  easy to insert and remove tokens.
+  """
+
+  def __init__(self, token_type, string, offset):
+    self._type = token_type
+    self._string = string
+    self._offset = offset
+
+  @property
+  def type(self):
+    return self._type
+
+  @property
+  def type_name(self):
+    return token.tok_name[self._type]
+
+  @property
+  def string(self):
+    return self._string
+
+  @string.setter
+  def string(self, value):
+    self._string = value
+
+  @property
+  def offset(self):
+    return self._offset
+
+  def __str__(self):
+    return str((self.type_name, self.string, self.offset))
+
+
+def Tokenize(f):
+  """Read tokens from a file-like object.
+
+  Args:
+    f: Any object that has a readline method.
+
+  Returns:
+    A collections.deque containing OffsetTokens. Deques are cheaper and easier
+    to manipulate sequentially than lists.
+  """
+  f.seek(0)
+  tokenize_tokens = tokenize.generate_tokens(f.readline)
+
+  offset_tokens = collections.deque()
+  for prev_token, next_token in _Pairwise(tokenize_tokens):
+    token_type, string, (srow, scol), _, _ = next_token
+    if not prev_token:
+      offset_tokens.append(OffsetToken(token_type, string, (0, 0)))
+    else:
+      erow, ecol = prev_token[3]
+      if erow == srow:
+        offset_tokens.append(OffsetToken(token_type, string, (0, scol - ecol)))
+      else:
+        offset_tokens.append(OffsetToken(
+            token_type, string, (srow - erow, scol)))
+
+  return offset_tokens
+
+
+def Untokenize(offset_tokens):
+  """Return the string representation of an iterable of OffsetTokens."""
+  # Make a copy. Don't modify the original.
+  offset_tokens = collections.deque(offset_tokens)
+
+  # Strip leading NL tokens.
+  while offset_tokens[0].type == tokenize.NL:
+    offset_tokens.popleft()
+
+  # Strip leading vertical whitespace.
+  first_token = offset_tokens.popleft()
+  # Take care not to modify the existing token. Create a new one in its place.
+  first_token = OffsetToken(first_token.type, first_token.string,
+                            (0, first_token.offset[1]))
+  offset_tokens.appendleft(first_token)
+
+  # Convert OffsetTokens to tokenize tokens.
+  tokenize_tokens = []
+  row = 1
+  col = 0
+  for t in offset_tokens:
+    offset_row, offset_col = t.offset
+    if offset_row == 0:
+      col += offset_col
+    else:
+      row += offset_row
+      col = offset_col
+    tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))
+
+  # tokenize can't handle whitespace before line continuations.
+  # So add a space.
+  return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
diff --git a/catapult/catapult_base/catapult_base/refactor/snippet.py b/catapult/catapult_base/catapult_base/refactor/snippet.py
new file mode 100644
index 0000000..2277261
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor/snippet.py
@@ -0,0 +1,244 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import parser
+import symbol
+import sys
+import token
+import tokenize
+
+from catapult_base.refactor import offset_token
+
+
+class Snippet(object):
+  """A node in the Python parse tree.
+
+  The Python grammar is defined at:
+  https://docs.python.org/2/reference/grammar.html
+
+  There are two types of Snippets:
+    TokenSnippets are leaf nodes containing actual text.
+    Symbols are internal nodes representing higher-level groupings, and are
+        defined by the left-hand sides of the BNFs in the above link.
+  """
+  @property
+  def type(self):
+    raise NotImplementedError()
+
+  @property
+  def type_name(self):
+    raise NotImplementedError()
+
+  @property
+  def children(self):
+    """Return a list of this node's children."""
+    raise NotImplementedError()
+
+  @property
+  def tokens(self):
+    """Return a tuple of the tokens this Snippet contains."""
+    raise NotImplementedError()
+
+  def PrintTree(self, indent=0, stream=sys.stdout):
+    """Spew a pretty-printed parse tree. Mostly useful for debugging."""
+    raise NotImplementedError()
+
+  def __str__(self):
+    return offset_token.Untokenize(self.tokens)
+
+  def FindAll(self, snippet_type):
+    if isinstance(snippet_type, int):
+      if self.type == snippet_type:
+        yield self
+    else:
+      if isinstance(self, snippet_type):
+        yield self
+
+    for child in self.children:
+      for snippet in child.FindAll(snippet_type):
+        yield snippet
+
+  def FindChild(self, snippet_type, **kwargs):
+    for child in self.children:
+      if isinstance(snippet_type, int):
+        if child.type != snippet_type:
+          continue
+      else:
+        if not isinstance(child, snippet_type):
+          continue
+
+      for attribute, value in kwargs:
+        if getattr(child, attribute) != value:
+          break
+      else:
+        return child
+    raise ValueError('%s is not in %s. Children are: %s' %
+                     (snippet_type, self, self.children))
+
+  def FindChildren(self, snippet_type):
+    if isinstance(snippet_type, int):
+      for child in self.children:
+        if child.type == snippet_type:
+          yield child
+    else:
+      for child in self.children:
+        if isinstance(child, snippet_type):
+          yield child
+
+
+class TokenSnippet(Snippet):
+  """A Snippet containing a list of tokens.
+
+  A list of tokens may start with any number of comments and non-terminating
+  newlines, but must end with a syntactically meaningful token.
+  """
+
+  def __init__(self, token_type, tokens):
+    # For operators and delimiters, the TokenSnippet's type may be more specific
+    # than the type of the constituent token. E.g. the TokenSnippet type is
+    # token.DOT, but the token type is token.OP. This is because the parser
+    # has more context than the tokenizer.
+    self._type = token_type
+    self._tokens = tokens
+    self._modified = False
+
+  @classmethod
+  def Create(cls, token_type, string, offset=(0, 0)):
+    return cls(token_type,
+               [offset_token.OffsetToken(token_type, string, offset)])
+
+  @property
+  def type(self):
+    return self._type
+
+  @property
+  def type_name(self):
+    return token.tok_name[self.type]
+
+  @property
+  def value(self):
+    return self._tokens[-1].string
+
+  @value.setter
+  def value(self, value):
+    self._tokens[-1].string = value
+    self._modified = True
+
+  @property
+  def children(self):
+    return []
+
+  @property
+  def tokens(self):
+    return tuple(self._tokens)
+
+  @property
+  def modified(self):
+    return self._modified
+
+  def PrintTree(self, indent=0, stream=sys.stdout):
+    stream.write(' ' * indent)
+    if not self.tokens:
+      print >> stream, self.type_name
+      return
+
+    print >> stream, '%-4s' % self.type_name, repr(self.tokens[0].string)
+    for tok in self.tokens[1:]:
+      stream.write(' ' * indent)
+      print >> stream, ' ' * max(len(self.type_name), 4), repr(tok.string)
+
+
+class Symbol(Snippet):
+  """A Snippet containing sub-Snippets.
+
+  The possible types and type_names are defined in Python's symbol module."""
+
+  def __init__(self, symbol_type, children):
+    self._type = symbol_type
+    self._children = children
+
+  @property
+  def type(self):
+    return self._type
+
+  @property
+  def type_name(self):
+    return symbol.sym_name[self.type]
+
+  @property
+  def children(self):
+    return self._children
+
+  @children.setter
+  def children(self, value):  # pylint: disable=arguments-differ
+    self._children = value
+
+  @property
+  def tokens(self):
+    tokens = []
+    for child in self.children:
+      tokens += child.tokens
+    return tuple(tokens)
+
+  @property
+  def modified(self):
+    return any(child.modified for child in self.children)
+
+  def PrintTree(self, indent=0, stream=sys.stdout):
+    stream.write(' ' * indent)
+
+    # If there's only one child, collapse it onto the same line.
+    node = self
+    while len(node.children) == 1 and len(node.children[0].children) == 1:
+      print >> stream, node.type_name,
+      node = node.children[0]
+
+    print >> stream, node.type_name
+    for child in node.children:
+      child.PrintTree(indent + 2, stream)
+
+
+def Snippetize(f):
+  """Return the syntax tree of the given file."""
+  f.seek(0)
+  syntax_tree = parser.st2list(parser.suite(f.read()))
+  tokens = offset_token.Tokenize(f)
+
+  snippet = _SnippetizeNode(syntax_tree, tokens)
+  assert not tokens
+  return snippet
+
+
+def _SnippetizeNode(node, tokens):
+  # The parser module gives a syntax tree that discards comments,
+  # non-terminating newlines, and whitespace information. Use the tokens given
+  # by the tokenize module to annotate the syntax tree with the information
+  # needed to exactly reproduce the original source code.
+  node_type = node[0]
+
+  if node_type >= token.NT_OFFSET:
+    # Symbol.
+    children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])
+    return Symbol(node_type, children)
+  else:
+    # Token.
+    grabbed_tokens = []
+    while tokens and (
+        tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):
+      grabbed_tokens.append(tokens.popleft())
+
+    # parser has 2 NEWLINEs right before the end.
+    # tokenize has 0 or 1 depending on if the file has one.
+    # Create extra nodes without consuming tokens to account for this.
+    if node_type == token.NEWLINE:
+      for tok in tokens:
+        if tok.type == token.ENDMARKER:
+          return TokenSnippet(node_type, grabbed_tokens)
+        if tok.type != token.DEDENT:
+          break
+
+    assert tokens[0].type == token.OP or node_type == tokens[0].type
+
+    grabbed_tokens.append(tokens.popleft())
+    return TokenSnippet(node_type, grabbed_tokens)
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/catapult_base/catapult_base/refactor_util/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/catapult_base/catapult_base/refactor_util/__init__.py
diff --git a/catapult/catapult_base/catapult_base/refactor_util/move.py b/catapult/catapult_base/catapult_base/refactor_util/move.py
new file mode 100644
index 0000000..9493e3b
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/refactor_util/move.py
@@ -0,0 +1,116 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import functools
+import os
+import sys
+
+from catapult_base import refactor
+
+
+def Run(sources, target, files_to_update):
+  """Move modules and update imports.
+
+  Args:
+    sources: List of source module or package paths.
+    target: Destination module or package path.
+    files_to_update: Modules whose imports we should check for changes.
+  """
+  # TODO(dtu): Support moving classes and functions.
+  moves = tuple(_Move(source, target) for source in sources)
+
+  # Update imports and references.
+  refactor.Transform(functools.partial(_Update, moves), files_to_update)
+
+  # Move files.
+  for move in moves:
+    os.rename(move.source_path, move.target_path)
+
+
+def _Update(moves, module):
+  for import_statement in module.FindAll(refactor.Import):
+    for move in moves:
+      try:
+        if move.UpdateImportAndReferences(module, import_statement):
+          break
+      except NotImplementedError as e:
+        print >> sys.stderr, 'Error updating %s: %s' % (module.file_path, e)
+
+
+class _Move(object):
+
+  def __init__(self, source, target):
+    self._source_path = os.path.realpath(source)
+    self._target_path = os.path.realpath(target)
+
+    if os.path.isdir(self._target_path):
+      self._target_path = os.path.join(
+          self._target_path, os.path.basename(self._source_path))
+
+  @property
+  def source_path(self):
+    return self._source_path
+
+  @property
+  def target_path(self):
+    return self._target_path
+
+  @property
+  def source_module_path(self):
+    return _ModulePath(self._source_path)
+
+  @property
+  def target_module_path(self):
+    return _ModulePath(self._target_path)
+
+  def UpdateImportAndReferences(self, module, import_statement):
+    """Update an import statement in a module and all its references..
+
+    Args:
+      module: The refactor.Module to update.
+      import_statement:  The refactor.Import to update.
+
+    Returns:
+      True if the import statement was updated, or False if the import statement
+      needed no updating.
+    """
+    statement_path_parts = import_statement.path.split('.')
+    source_path_parts = self.source_module_path.split('.')
+    if source_path_parts != statement_path_parts[:len(source_path_parts)]:
+      return False
+
+    # Update import statement.
+    old_name_parts = import_statement.name.split('.')
+    new_name_parts = ([self.target_module_path] +
+                      statement_path_parts[len(source_path_parts):])
+    import_statement.path = '.'.join(new_name_parts)
+    new_name = import_statement.name
+
+    # Update references.
+    for reference in module.FindAll(refactor.Reference):
+      reference_parts = reference.value.split('.')
+      if old_name_parts != reference_parts[:len(old_name_parts)]:
+        continue
+
+      new_reference_parts = [new_name] + reference_parts[len(old_name_parts):]
+      reference.value = '.'.join(new_reference_parts)
+
+    return True
+
+
+def _BaseDir(module_path):
+  if not os.path.isdir(module_path):
+    module_path = os.path.dirname(module_path)
+
+  while '__init__.py' in os.listdir(module_path):
+    module_path = os.path.dirname(module_path)
+
+  return module_path
+
+
+def _ModulePath(module_path):
+  if os.path.split(module_path)[1] == '__init__.py':
+    module_path = os.path.dirname(module_path)
+  rel_path = os.path.relpath(module_path, _BaseDir(module_path))
+  return os.path.splitext(rel_path)[0].replace(os.sep, '.')
diff --git a/catapult/catapult_base/catapult_base/test_data/foo.txt b/catapult/catapult_base/catapult_base/test_data/foo.txt
new file mode 100644
index 0000000..a9cac3e
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/test_data/foo.txt
@@ -0,0 +1 @@
+This file is not executable.
diff --git a/catapult/catapult_base/catapult_base/util.py b/catapult/catapult_base/catapult_base/util.py
new file mode 100644
index 0000000..de525d5
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/util.py
@@ -0,0 +1,36 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import sys
+
+
+def GetCatapultDir():
+  return os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))
+
+
+def IsRunningOnCrosDevice():
+  """Returns True if we're on a ChromeOS device."""
+  lsb_release = '/etc/lsb-release'
+  if sys.platform.startswith('linux') and os.path.exists(lsb_release):
+    with open(lsb_release, 'r') as f:
+      res = f.read()
+      if res.count('CHROMEOS_RELEASE_NAME'):
+        return True
+  return False
+
+
+def _ExecutableExtensions():
+  # pathext is, e.g. '.com;.exe;.bat;.cmd'
+  exts = os.getenv('PATHEXT').split(';') #e.g. ['.com','.exe','.bat','.cmd']
+  return [x[1:].upper() for x in exts] #e.g. ['COM','EXE','BAT','CMD']
+
+
+def IsExecutable(path):
+  if os.path.isfile(path):
+    if hasattr(os, 'name') and os.name == 'nt':
+      return path.split('.')[-1].upper() in _ExecutableExtensions()
+    else:
+      return os.access(path, os.X_OK)
+  else:
+    return False
diff --git a/catapult/catapult_base/catapult_base/util_unittest.py b/catapult/catapult_base/catapult_base/util_unittest.py
new file mode 100644
index 0000000..72f1fe6
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/util_unittest.py
@@ -0,0 +1,23 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import sys
+import unittest
+
+from catapult_base import util
+
+
+class PathTest(unittest.TestCase):
+
+  def testIsExecutable(self):
+    self.assertFalse(util.IsExecutable('nonexistent_file'))
+    # We use actual files on disk instead of pyfakefs because the executable is
+    # set different on win that posix platforms and pyfakefs doesn't support
+    # win platform well.
+    self.assertFalse(util.IsExecutable(_GetFileInTestDir('foo.txt')))
+    self.assertTrue(util.IsExecutable(sys.executable))
+
+
+def _GetFileInTestDir(file_name):
+  return os.path.join(os.path.dirname(__file__), 'test_data', file_name)
diff --git a/catapult/catapult_base/catapult_base/xvfb.py b/catapult/catapult_base/catapult_base/xvfb.py
new file mode 100644
index 0000000..c09f3e3
--- /dev/null
+++ b/catapult/catapult_base/catapult_base/xvfb.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import logging
+import subprocess
+import platform
+import time
+
+
+def ShouldStartXvfb():
+  return platform.system() == 'Linux'
+
+
+def StartXvfb():
+  display = ':99'
+  xvfb_command = ['Xvfb', display, '-screen', '0', '1024x769x24', '-ac']
+  xvfb_process = subprocess.Popen(
+      xvfb_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  time.sleep(0.2)
+  returncode = xvfb_process.poll()
+  if returncode is None:
+    os.environ['DISPLAY'] = display
+  else:
+    logging.error('Xvfb did not start, returncode: %s, stdout:\n%s',
+                  returncode, xvfb_process.stdout.read())
+    xvfb_process = None
+  return xvfb_process
diff --git a/catapult/catapult_build/PRESUBMIT.py b/catapult/catapult_build/PRESUBMIT.py
new file mode 100644
index 0000000..2a62658
--- /dev/null
+++ b/catapult/catapult_build/PRESUBMIT.py
@@ -0,0 +1,31 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='../pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'Paste'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'typ'),
+  ]
diff --git a/catapult/catapult_build/__init__.py b/catapult/catapult_build/__init__.py
index eff4130..1b08fe2 100644
--- a/catapult/catapult_build/__init__.py
+++ b/catapult/catapult_build/__init__.py
@@ -15,6 +15,7 @@
   catapult_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
   catapult_third_party_path = os.path.abspath(os.path.join(
       catapult_path, 'third_party'))
+  _AddToPathIfNeeded(os.path.join(catapult_path, 'catapult_base'))
   _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'beautifulsoup4'))
   _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'html5lib-python'))
   _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'six'))
diff --git a/catapult/catapult_build/appengine_deploy.py b/catapult/catapult_build/appengine_deploy.py
index 4cb24ae..279a77a 100644
--- a/catapult/catapult_build/appengine_deploy.py
+++ b/catapult/catapult_build/appengine_deploy.py
@@ -8,7 +8,6 @@
 
 import subprocess
 import sys
-import tempfile
 
 from catapult_build import module_finder
 from catapult_build import temp_deployment_dir
@@ -27,6 +26,7 @@
   except ImportError:
     # TODO(qyearsley): Put the App Engine SDK in the path with the
     # binary dependency manager.
+    # See: https://github.com/catapult-project/catapult/issues/2135
     print 'This script requires the App Engine SDK to be in PYTHONPATH.'
     sys.exit(1)
   with temp_deployment_dir.TempDeploymentDir(
diff --git a/catapult/catapult_build/appengine_dev_server.py b/catapult/catapult_build/appengine_dev_server.py
index 27dda86..f1f2abc 100644
--- a/catapult/catapult_build/appengine_dev_server.py
+++ b/catapult/catapult_build/appengine_dev_server.py
@@ -3,7 +3,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import os
 import subprocess
 import sys
 
@@ -24,6 +23,7 @@
   except ImportError:
     # TODO(qyearsley): Put the App Engine SDK in the path with the
     # binary dependency manager.
+    # See https://github.com/catapult-project/catapult/issues/2135
     print 'This script requires the App Engine SDK to be in PYTHONPATH.'
     sys.exit(1)
   with temp_deployment_dir.TempDeploymentDir(paths) as temp_dir:
diff --git a/catapult/catapult_build/build_steps.py b/catapult/catapult_build/build_steps.py
new file mode 100644
index 0000000..b40d2b2
--- /dev/null
+++ b/catapult/catapult_build/build_steps.py
@@ -0,0 +1,200 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import json
+import os
+import sys
+
+# This is the list of tests to run. It is a dictionary with the following
+# fields:
+#
+# name (required): The name of the step, to show on the buildbot status page.
+# path (required): The path to the executable which runs the tests.
+# additional_args (optional): An array of optional arguments.
+# uses_app_engine_sdk (optional): True if app engine SDK must be in PYTHONPATH.
+# uses_sandbox_env (optional): True if CHROME_DEVEL_SANDBOX must be in
+#   environment.
+# disabled (optional): List of platforms the test is disabled on. May contain
+#   'win', 'mac', or 'linux'.
+# outputs_presentation_json (optional): If True, pass in --presentation-json
+#   argument to the test executable to allow it to update the buildbot status
+#   page. More details here:
+# github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
+_CATAPULT_TESTS = [
+    {
+        'name': 'Build Python Tests',
+        'path': 'catapult_build/bin/run_py_tests',
+    },
+    {
+        'name': 'Catapult Base Tests',
+        'path': 'catapult_base/bin/run_tests',
+    },
+    {
+        'name': 'Dashboard Dev Server Tests Canary',
+        'path': 'dashboard/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=canary'
+        ],
+        # https://github.com/catapult-project/catapult/issues/2138
+        'disabled': ['linux', 'mac', 'win'],
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Dashboard Dev Server Tests Stable',
+        'path': 'dashboard/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=stable',
+        ],
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Dashboard Python Tests',
+        'path': 'dashboard/bin/run_py_tests',
+        'additional_args': ['--no-install-hooks'],
+        'uses_app_engine_sdk': True,
+    },
+    {
+        'name': 'Dependency Manager Tests',
+        'path': 'dependency_manager/bin/run_tests',
+    },
+    {
+        'name': 'Devil Python Tests',
+        'path': 'devil/bin/run_py_tests',
+        'disabled': ['mac', 'win'],
+    },
+    {
+        'name': 'Perf Insights Dev Server Tests Canary',
+        'path': 'perf_insights/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=canary'
+        ],
+        # https://github.com/catapult-project/catapult/issues/2138
+        'disabled': ['linux', 'mac', 'win'],
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Perf Insights Dev Server Tests Stable',
+        'path': 'perf_insights/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=stable',
+        ],
+        'uses_sandbox_env': True,
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Perf Insights Python Tests',
+        'path': 'perf_insights/bin/run_py_tests',
+        'additional_args': ['--no-install-hooks'],
+    },
+    {
+        'name': 'Perf VINN Insights Tests',
+        'path': 'perf_insights/bin/run_vinn_tests',
+    },
+    {
+        'name': 'Py-vulcanize Tests',
+        'path': 'third_party/py_vulcanize/bin/run_py_tests',
+        'additional_args': ['--no-install-hooks'],
+    },
+    {
+        'name': 'Systrace Tests',
+        'path': 'systrace/bin/run_tests',
+    },
+    {
+        'name': 'Telemetry Tests with Stable Browser',
+        'path': 'telemetry/bin/run_tests',
+        'additional_args': [
+            '--browser=reference',
+            '--start-xvfb'
+        ],
+        'uses_sandbox_env': True,
+    },
+    {
+        'name': 'Tracing Dev Server Tests Canary',
+        'path': 'tracing/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=canary'
+        ],
+        # Test failing on Windows:
+        # https://github.com/catapult-project/catapult/issues/1816
+        # Tests failing on all platform:
+        # https://github.com/catapult-project/catapult/issues/2138
+        'disabled': ['win', 'linux', 'mac'],
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Tracing Dev Server Tests Stable',
+        'path': 'tracing/bin/run_dev_server_tests',
+        'additional_args': [
+            '--no-install-hooks',
+            '--no-use-local-chrome',
+            '--channel=stable',
+        ],
+        'outputs_presentation_json': True,
+    },
+    {
+        'name': 'Tracing D8 Tests',
+        'path': 'tracing/bin/run_vinn_tests',
+    },
+    {
+        'name': 'Tracing Python Tests',
+        'path': 'tracing/bin/run_py_tests',
+        'additional_args': ['--no-install-hooks'],
+    },
+    {
+        'name': 'Vinn Tests',
+        'path': 'third_party/vinn/run_test',
+    },
+]
+
+
+def main(args=None):
+  """Send list of test to run to recipes generator_script.
+
+  See documentation at:
+  github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
+  """
+  parser = argparse.ArgumentParser(description='Run catapult tests.')
+  parser.add_argument('--api-path-checkout', help='Path to catapult checkout')
+  parser.add_argument('--app-engine-sdk-pythonpath',
+                      help='PYTHONPATH to include app engine SDK path')
+  parser.add_argument('--platform',
+                      help='Platform name (linux, mac, or win)')
+  parser.add_argument('--output-json', help='Output for buildbot status page')
+  args = parser.parse_args(args)
+
+  steps = []
+  for test in _CATAPULT_TESTS:
+    if args.platform in test.get('disabled', []):
+      continue
+    step = {
+        'name': test['name'],
+        'env': {}
+    }
+    step['cmd'] = ['python', os.path.join(args.api_path_checkout, test['path'])]
+    if test.get('additional_args'):
+      step['cmd'] += test['additional_args']
+    if test.get('uses_app_engine_sdk'):
+      step['env']['PYTHONPATH'] = args.app_engine_sdk_pythonpath
+    if test.get('uses_sandbox_env'):
+      step['env']['CHROME_DEVEL_SANDBOX'] = '/opt/chromium/chrome_sandbox'
+    if test.get('outputs_presentation_json'):
+      step['outputs_presentation_json'] = True
+    steps.append(step)
+  with open(args.output_json, 'w') as outfile:
+    json.dump(steps, outfile)
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/catapult/catapult_build/build_steps_unittest.py b/catapult/catapult_build/build_steps_unittest.py
new file mode 100644
index 0000000..697ada0
--- /dev/null
+++ b/catapult/catapult_build/build_steps_unittest.py
@@ -0,0 +1,44 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from catapult_build import build_steps
+
+
+class BuildStepsTest(unittest.TestCase):
+
+  def testCatapultTestList(self):
+    catapult_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    for test in build_steps._CATAPULT_TESTS:
+      self.assertIn('name', test, msg=(
+          'All tests in build_steps._CATAPULT_TESTS must have a name;'
+          ' error in:\n %s' % test))
+      self.assertIsInstance(test['name'], str, msg=(
+          'Test name %s in build_steps._CATAPULT_TESTS must be a string.'
+          % test['name']))
+      self.assertIn('path', test, msg=(
+          'All tests in build_steps._CATAPULT_TESTS must have a path '
+          'relative to catapult/; error in:\n %s' % test))
+      abs_path = os.path.join(catapult_dir, test['path'])
+      self.assertTrue(os.path.exists(abs_path), msg=(
+          'Bad path %s in build_steps._CATAPULT_TESTS; '
+          ' should be relative to catapult/' % test['path']))
+      if test.get('additional_args'):
+        self.assertIsInstance(test['additional_args'], list, msg=(
+            'additional_args %s in build_steps._CATAPULT_TESTS %s not a list'
+            % (test['additional_args'], test['name'])
+        ))
+      if test.get('disabled'):
+        self.assertIsInstance(test['disabled'], list, msg=(
+            'disabled %s in build_steps._CATAPULT_TESTS for %s not a list'
+            % (test['disabled'], test['name'])
+        ))
+        for platform in test['disabled']:
+          self.assertIn(platform, ['win', 'mac', 'linux'], msg=(
+              'Bad platform %s in build_steps._CATAPULT_TESTS for %s;'
+              'should be one of "linux", "win", "mac"' % (
+                  platform, test['name'])
+          ))
diff --git a/catapult/catapult_build/dev_server.py b/catapult/catapult_build/dev_server.py
index a273827..13018d1 100644
--- a/catapult/catapult_build/dev_server.py
+++ b/catapult/catapult_build/dev_server.py
@@ -33,8 +33,10 @@
 """
 
 _QUICK_LINKS = [
-  ('Trace File Viewer', '/tracing_examples/trace_viewer.html'),
-  ('Perf Insights Viewer', '/perf_insights_examples/perf_insights_viewer.html')
+    ('Trace File Viewer',
+     '/tracing_examples/trace_viewer.html'),
+    ('Perf Insights Viewer',
+     '/perf_insights_examples/perf_insights_viewer.html')
 ]
 
 _LINK_ITEM = '<li><a href="%s">%s</a></li>'
@@ -75,7 +77,7 @@
   def post(self, *args, **kwargs):  # pylint: disable=unused-argument
     msg = self.request.body
     sys.stdout.write(msg + '\n')
-    exit_code=(0 if 'ALL_PASSED' in msg else 1)
+    exit_code = 0 if 'ALL_PASSED' in msg else 1
     if hasattr(self.app.server, 'please_exit'):
       self.app.server.please_exit(exit_code)
     return self.response.write('')
@@ -162,7 +164,7 @@
     for name, path in _QUICK_LINKS:
       quick_links.append(_LINK_ITEM % (path, name))
     self.response.out.write(_MAIN_HTML % ('\n'.join(test_links),
-        '\n'.join(quick_links)))
+                                          '\n'.join(quick_links)))
 
 class DevServerApp(webapp2.WSGIApplication):
   def __init__(self, pds, args):
@@ -185,18 +187,18 @@
     default_tests = dict((pd.GetName(), pd.GetRunUnitTestsUrl())
                          for pd in self.pds)
     routes = [
-      Route('/tests.html', TestOverviewHandler,
-            defaults={'pds': default_tests}),
-      Route('', RedirectHandler, defaults={'_uri': '/tests.html'}),
-      Route('/', RedirectHandler, defaults={'_uri': '/tests.html'}),
+        Route('/tests.html', TestOverviewHandler,
+              defaults={'pds': default_tests}),
+        Route('', RedirectHandler, defaults={'_uri': '/tests.html'}),
+        Route('/', RedirectHandler, defaults={'_uri': '/tests.html'}),
     ]
     for pd in self.pds:
       routes += pd.GetRoutes(args)
       routes += [
-        Route('/%s/notify_test_result' % pd.GetName(),
-              TestResultHandler),
-        Route('/%s/notify_tests_completed' % pd.GetName(),
-              TestsCompletedHandler)
+          Route('/%s/notify_test_result' % pd.GetName(),
+                TestResultHandler),
+          Route('/%s/notify_tests_completed' % pd.GetName(),
+                TestsCompletedHandler)
       ]
 
     for pd in self.pds:
@@ -220,15 +222,14 @@
     for pd in self.pds:
       self._all_source_paths += pd.GetSourcePaths(args)
     routes.append(
-      Route('/<:.+>', SourcePathsHandler,
-            defaults={'_source_paths': self._all_source_paths}))
+        Route('/<:.+>', SourcePathsHandler,
+              defaults={'_source_paths': self._all_source_paths}))
 
     for route in routes:
       self.router.add(route)
 
   def GetAbsFilenameForHref(self, href):
     for source_path in self._all_source_paths:
-      print source_path
       full_source_path = os.path.abspath(source_path)
       expanded_href_path = os.path.abspath(os.path.join(full_source_path,
                                                         href.lstrip('/')))
@@ -259,36 +260,36 @@
   # Shutting down httpserver gracefully and yielding a return code requires
   # a bit of mixin code.
 
-  exitCodeAttempt = []
-  def please_exit(exitCode):
-    if len(exitCodeAttempt) > 0:
+  exit_code_attempt = []
+  def PleaseExit(exit_code):
+    if len(exit_code_attempt) > 0:
       return
-    exitCodeAttempt.append(exitCode)
+    exit_code_attempt.append(exit_code)
     server.running = False
 
   real_serve_forever = server.serve_forever
 
-  def serve_forever():
+  def ServeForever():
     try:
       real_serve_forever()
     except KeyboardInterrupt:
-        # allow CTRL+C to shutdown
-        return 255
+      # allow CTRL+C to shutdown
+      return 255
 
-    if len(exitCodeAttempt) == 1:
-      return exitCodeAttempt[0]
+    if len(exit_code_attempt) == 1:
+      return exit_code_attempt[0]
     # The serve_forever returned for some reason separate from
     # exit_please.
     return 0
 
-  server.please_exit = please_exit
-  server.serve_forever = serve_forever
+  server.please_exit = PleaseExit
+  server.serve_forever = ServeForever
 
 
 def _AddCommandLineArguments(pds, argv):
   parser = argparse.ArgumentParser(description='Run development server')
   parser.add_argument(
-    '--no-install-hooks', dest='install_hooks', action='store_false')
+      '--no-install-hooks', dest='install_hooks', action='store_false')
   parser.add_argument('-p', '--port', default=8003, type=int)
   for pd in pds:
     g = parser.add_argument_group(pd.GetName())
@@ -314,6 +315,7 @@
   server = httpserver.serve(app, host='127.0.0.1', port=args.port,
                             start_loop=False)
   _AddPleaseExitMixinToServer(server)
+  # pylint: disable=no-member
   server.urlbase = 'http://127.0.0.1:%i' % server.server_port
   app.server = server
 
diff --git a/catapult/catapult_build/dev_server_unittest.py b/catapult/catapult_build/dev_server_unittest.py
index 5a0bd9b..718b2d6 100644
--- a/catapult/catapult_build/dev_server_unittest.py
+++ b/catapult/catapult_build/dev_server_unittest.py
@@ -1,7 +1,6 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import argparse
 import json
 import os
 import unittest
@@ -16,8 +15,8 @@
 
   def setUp(self):
     self.pds = [
-      perf_insights_dev_server_config.PerfInsightsDevServerConfig(),
-      tracing_dev_server_config.TracingDevServerConfig(),
+        perf_insights_dev_server_config.PerfInsightsDevServerConfig(),
+        tracing_dev_server_config.TracingDevServerConfig(),
     ]
 
     self.args = dev_server._AddCommandLineArguments(self.pds, [])
diff --git a/catapult/catapult_build/fixjsstyle b/catapult/catapult_build/fixjsstyle
deleted file mode 100755
index 36bc61b..0000000
--- a/catapult/catapult_build/fixjsstyle
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import sys
-
-if __name__ == '__main__':
-  top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
-  sys.path.append(top_dir)
-  from tracing.build import fixjsstyle
-  sys.exit(fixjsstyle.main())
diff --git a/catapult/catapult_build/fixjsstyle.py b/catapult/catapult_build/fixjsstyle.py
deleted file mode 100644
index a9d1ebd..0000000
--- a/catapult/catapult_build/fixjsstyle.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import sys
-
-tracing_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                            '..', '..'))
-if tracing_path not in sys.path:
-  sys.path.append(tracing_path)
-
-from tracing import tracing_project
-
-
-def main():
-  project = tracing_project.TracingProject()
-
-  sys.path.append(os.path.join(
-      project.tracing_third_party_path, 'python_gflags'))
-  sys.path.append(os.path.join(
-      project.tracing_third_party_path, 'closure_linter'))
-
-  from closure_linter import fixjsstyle
-
-  os.chdir(project.tracing_src_path)
-
-  fixjsstyle.main()
diff --git a/catapult/catapult_build/gjslint b/catapult/catapult_build/gjslint
deleted file mode 100755
index 617b475..0000000
--- a/catapult/catapult_build/gjslint
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import sys
-
-src_dir = os.path.join(os.path.dirname(__file__), '..')
-
-if __name__ == '__main__':
-  top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
-  sys.path.append(top_dir)
-  import tracing
-  from hooks import gjslint
-  sys.exit(gjslint.Main([
-    os.path.join(top_dir, 'tracing', 'tracing'),
-  ]))
diff --git a/catapult/catapult_build/gjslint.py b/catapult/catapult_build/gjslint.py
deleted file mode 100644
index 6f9eff9..0000000
--- a/catapult/catapult_build/gjslint.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import sys
-
-from tracing import tracing_project
-
-
-def Main(paths_to_lint):
-  project = tracing_project.TracingProject()
-  new_paths = [
-      os.path.abspath(os.path.join(
-          project.tracing_third_party_path, 'python_gflags')),
-      os.path.abspath(os.path.join(
-          project.tracing_third_party_path, 'closure_linter'))
-  ]
-  sys.path += new_paths
-  try:
-    _MainImpl(paths_to_lint)
-  finally:
-    for p in new_paths:
-      sys.path.remove(p)
-
-
-def _MainImpl(paths_to_lint):
-  from closure_linter import gjslint
-
-  if sys.argv[1:] == ['--help']:
-    sys.exit(gjslint.main())
-
-  if len(sys.argv) > 1:
-    sys.stderr.write('No arguments allowed')
-    sys.exit(1)
-
-  sys.argv.append('--strict')
-  sys.argv.append('--unix_mode')
-  sys.argv.append('--check_html')
-  for p in paths_to_lint:
-    sys.argv.extend(['-r', os.path.relpath(p)])
-
-  gjslint.main()
diff --git a/catapult/catapult_build/html_checks.py b/catapult/catapult_build/html_checks.py
index 41b473d..caf4dea 100644
--- a/catapult/catapult_build/html_checks.py
+++ b/catapult/catapult_build/html_checks.py
@@ -4,7 +4,8 @@
 
 """Checks to use in PRESUBMIT.py for HTML style violations."""
 
-import os
+import collections
+import difflib
 import re
 
 import bs4
@@ -26,22 +27,40 @@
       file_filter=ShouldCheck, include_deletes=False)
   results = []
   for f in affected_files:
-    results.extend(CheckDoctype(f, output_api))
+    CheckAffectedFile(f, results, output_api)
   return results
 
 
-def CheckDoctype(affected_file, output_api):
-  contents = '\n'.join(affected_file.NewContents())
-  if _HasHtml5Declaration(contents):
-    return []
-  error_text = ('In %s:\n' % affected_file.LocalPath() +
-                'could not find "<!DOCTYPE html>."')
-  return [output_api.PresubmitError(error_text)]
+def CheckAffectedFile(affected_file, results, output_api):
+  path = affected_file.LocalPath()
+  soup = parse_html.BeautifulSoup('\n'.join(affected_file.NewContents()))
+  for check in [CheckDoctype, CheckImportOrder]:
+    check(path, soup, results, output_api)
 
 
-def _HasHtml5Declaration(contents):
-  soup = parse_html.BeautifulSoup(contents)
+def CheckDoctype(path, soup, results, output_api):
+  if _HasHtml5Declaration(soup):
+    return
+  error_text = 'Could not find "<!DOCTYPE html>" in %s.' % path
+  results.append(output_api.PresubmitError(error_text))
+
+
+def _HasHtml5Declaration(soup):
   for item in soup.contents:
     if isinstance(item, bs4.Doctype) and item.lower() == 'html':
       return True
   return False
+
+
+def CheckImportOrder(path, soup, results, output_api):
+  grouped_hrefs = collections.defaultdict(list)  # Link rel -> [link hrefs].
+  for link in soup.find_all('link'):
+    grouped_hrefs[','.join(link.get('rel'))].append(link.get('href'))
+
+  for rel, actual_hrefs in grouped_hrefs.iteritems():
+    expected_hrefs = list(sorted(set(actual_hrefs)))
+    if actual_hrefs != expected_hrefs:
+      error_text = (
+          'Invalid "%s" link sort order in %s:\n' % (rel, path) +
+          '  ' + '\n  '.join(difflib.ndiff(actual_hrefs, expected_hrefs)))
+      results.append(output_api.PresubmitError(error_text))
diff --git a/catapult/catapult_build/js_checks.py b/catapult/catapult_build/js_checks.py
index 88447dd..fb4eaea 100644
--- a/catapult/catapult_build/js_checks.py
+++ b/catapult/catapult_build/js_checks.py
@@ -43,7 +43,7 @@
           line_number,
           message,
           line,
-          self._ErrorHighlight(start, length))
+          _ErrorHighlight(start, length))
     return ''
 
   def ConstCheck(self, i, line):
@@ -55,13 +55,6 @@
     return self.RegexCheck(
         i, line, r'(?:^|\s|\()(const)\s', 'Use var instead of const.')
 
-  def _ErrorHighlight(self, start, length):
-    """Produces a row of '^'s to underline part of a string."""
-    return start * ' ' + length * '^'
-
-  def _MakeErrorOrWarning(self, output_api, error_text):
-    return output_api.PresubmitError(error_text)
-
   def RunChecks(self):
     """Checks for violations of the Chromium JavaScript style guide.
 
@@ -73,17 +66,17 @@
 
     try:
       base_path = os.path.abspath(os.path.join(
-        os.path.dirname(__file__), '..'))
+          os.path.dirname(__file__), '..'))
       closure_linter_path = os.path.join(
-        base_path, 'third_party', 'closure_linter')
+          base_path, 'third_party', 'closure_linter')
       gflags_path = os.path.join(
-        base_path, 'third_party', 'python_gflags')
+          base_path, 'third_party', 'python_gflags')
       sys.path.insert(0, closure_linter_path)
       sys.path.insert(0, gflags_path)
 
       warnings.filterwarnings('ignore', category=DeprecationWarning)
 
-      from closure_linter import checker, errors
+      from closure_linter import runner, errors
       from closure_linter.common import errorhandler
 
     finally:
@@ -94,6 +87,7 @@
       """Filters out errors that don't apply to Chromium JavaScript code."""
 
       def __init__(self):
+        super(ErrorHandlerImpl, self).__init__()
         self._errors = []
         self._filename = None
 
@@ -116,23 +110,24 @@
 
         Most errors are valid, with a few exceptions which are listed here.
         """
-        is_grit_statement = bool(
-          re.search('</?(include|if)', error.token.line))
+        if re.search('</?(include|if)', error.token.line):
+          return False  # GRIT statement.
 
-        return not is_grit_statement and error.code not in [
+        if (error.code == errors.MISSING_SEMICOLON and
+            error.token.string == 'of'):
+          return False  # ES6 for...of statement.
+
+        return error.code not in [
             errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
-            errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
             errors.MISSING_JSDOC_TAG_THIS,
+            errors.MISSING_MEMBER_DOCUMENTATION,
         ]
 
     results = []
 
-    try:
-      affected_files = self.input_api.AffectedFiles(
-          file_filter=self.file_filter,
-          include_deletes=False)
-    except Exception:
-      affected_files = []
+    affected_files = self.input_api.AffectedFiles(
+        file_filter=self.file_filter,
+        include_deletes=False)
 
     def ShouldCheck(f):
       if f.LocalPath().endswith('.js'):
@@ -157,11 +152,10 @@
       import gflags as flags
       flags.FLAGS.strict = True
       error_handler = ErrorHandlerImpl()
-      js_checker = checker.JavaScriptStyleChecker(error_handler)
-      js_checker.Check(f.AbsoluteLocalPath())
+      runner.Run(f.AbsoluteLocalPath(), error_handler)
 
       for error in error_handler.GetErrors():
-        highlight = self._ErrorHighlight(
+        highlight = _ErrorHighlight(
             error.token.start_index, error.token.length)
         error_msg = '  line %d: E%04d: %s\n%s\n%s' % (
             error.token.line_number,
@@ -176,11 +170,20 @@
             'Found JavaScript style violations in %s:' %
             f.LocalPath()] + error_lines
         results.append(
-            self._MakeErrorOrWarning(self.output_api, '\n'.join(error_lines)))
+            _MakeErrorOrWarning(self.output_api, '\n'.join(error_lines)))
 
     return results
 
 
+def _ErrorHighlight(start, length):
+  """Produces a row of '^'s to underline part of a string."""
+  return start * ' ' + length * '^'
+
+
+def _MakeErrorOrWarning(output_api, error_text):
+  return output_api.PresubmitError(error_text)
+
+
 def CheckStrictMode(contents, is_html_file=False):
   statements_to_check = []
   if is_html_file:
@@ -189,7 +192,7 @@
     statements_to_check.append(_FirstStatement(contents))
   error_lines = []
   for s in statements_to_check:
-    if s !=  "'use strict'":
+    if s != "'use strict'":
       error_lines.append('Expected "\'use strict\'" as first statement, '
                          'but found "%s" instead.' % s)
   return error_lines
diff --git a/catapult/catapult_build/node_bootstrap.js b/catapult/catapult_build/node_bootstrap.js
new file mode 100644
index 0000000..d36fde3
--- /dev/null
+++ b/catapult/catapult_build/node_bootstrap.js
@@ -0,0 +1,83 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+/**
+ * @fileoverview Lets node import catapult HTML-imports-authored modules.
+ *
+ */
+var isNode = global.process && global.process.versions.node;
+if (!isNode)
+  throw new Error('Only works inside node');
+
+var fs = require('fs');
+var path = require('path');
+var process = require('process');
+var child_process = require('child_process');
+
+var catapultPath = fs.realpathSync(path.join(__dirname, '..'));
+var catapultBuildPath = path.join(catapultPath, 'catapult_build');
+
+var vinnPath = path.join(catapultPath, 'third_party', 'vinn');
+
+function loadAndEval(fileName) {
+  var contents = fs.readFileSync(fileName, 'utf8');
+  (function() {
+    eval(contents);
+  }).call(global);
+}
+
+function initialize() {
+  loadAndEval(path.join(vinnPath, 'vinn', 'base64_compat.js'));
+
+  // First, we need to hand-load the HTML imports loader from Vinn,
+  // plus a few of its supporting files. These all assume that 'this' is the
+  // global object, so eval them with 'this' redirected.
+  loadAndEval(path.join(vinnPath, 'third_party', 'parse5', 'parse5.js'));
+  loadAndEval(path.join(vinnPath, 'vinn', 'html_to_js_generator.js'));
+  loadAndEval(path.join(vinnPath, 'vinn', 'html_imports_loader.js'));
+  loadAndEval(path.join(vinnPath, 'vinn', 'path_utils.js'));
+
+  // Now that everything is loaded, we need to set up the loader.
+  var pathUtils = new global.PathUtils(
+      {
+        currentWorkingDirectory: process.cwd(),
+        exists: function(fileName) {
+          return fs.existsSync(fileName);
+        }
+      });
+  global.HTMLImportsLoader.setPathUtils(pathUtils);
+}
+
+
+/**
+ * Gets the source search paths for a catapult project module.
+ *
+ * @param {String} projectName The project in question.
+ * @return {Array} A list of search paths.
+ */
+module.exports.getSourcePathsForProject = function(projectName) {
+  var sourcePathsString = child_process.execFileSync(
+      path.join(catapultBuildPath, 'print_project_info'),
+      ['--source-paths', projectName]);
+  return JSON.parse(sourcePathsString);
+};
+
+
+/**
+ * Gets the headless test module filenames for a catapult project module.
+ *
+ * @param {String} projectName The project in question.
+ * @return {Array} A list of module filenames.
+ */
+module.exports.getHeadlessTestModuleFilenamesForProject =
+    function(projectName) {
+  var sourcePathsString = child_process.execFileSync(
+      path.join(catapultBuildPath, 'print_project_info'),
+      ['--headless-test-module-filenames', projectName]);
+  return JSON.parse(sourcePathsString);
+};
+
+initialize();
diff --git a/catapult/catapult_build/perfbot_stats/__init__.py b/catapult/catapult_build/perfbot_stats/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/catapult_build/perfbot_stats/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/catapult_build/perfbot_stats/chrome_perf_stats.py b/catapult/catapult_build/perfbot_stats/chrome_perf_stats.py
new file mode 100755
index 0000000..e98fddf
--- /dev/null
+++ b/catapult/catapult_build/perfbot_stats/chrome_perf_stats.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python2.7
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to pull chromium.perf stats from chrome-infra-stats API.
+
+Currently this just pulls success rates from the API, averages daily per
+builder, and uploads to perf dashboard. It could be improved to provide more
+detailed success rates.
+
+The API documentation for chrome-infra-stats is at:
+https://apis-explorer.appspot.com/apis-explorer/?
+   base=https://chrome-infra-stats.appspot.com/_ah/api#p/
+"""
+
+import calendar
+import datetime
+import json
+import sys
+import urllib
+import urllib2
+
+BUILDER_LIST_URL = ('https://chrome-infra-stats.appspot.com/'
+                    '_ah/api/stats/v1/masters/chromium.perf')
+
+BUILDER_STATS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
+                     'stats/chromium.perf/%s/overall__build__result__/%s')
+
+USAGE = ('Usage: chrome_perf_stats.py <year> <month> <day>. If date is not '
+         'specified, yesterday will be used.')
+
+
+def main():
+  if len(sys.argv) == 2 and sys.argv[0] == '--help':
+    print USAGE
+    sys.exit(0)
+  year = None
+  month = None
+  days = None
+  if len(sys.argv) == 4 or len(sys.argv) == 3:
+    year = int(sys.argv[1])
+    if year > 2016 or year < 2014:
+      print USAGE
+      sys.exit(0)
+    month = int(sys.argv[2])
+    if month > 12 or month <= 0:
+      print USAGE
+      sys.exit(0)
+    if len(sys.argv) == 3:
+      days = range(1, calendar.monthrange(year, month)[1] + 1)
+    else:
+      day = int(sys.argv[3])
+      if day > 31 or day <= 0:
+        print USAGE
+        sys.exit(0)
+      days = [day]
+  elif len(sys.argv) != 1:
+    print USAGE
+    sys.exit(0)
+  else:
+    yesterday = datetime.date.today() - datetime.timedelta(days=1)
+    year = yesterday.year
+    month = yesterday.month
+    days = [yesterday.day]
+
+  response = urllib2.urlopen(BUILDER_LIST_URL)
+  builders = [builder['name'] for builder in json.load(response)['builders']]
+  success_rates = CalculateSuccessRates(year, month, days, builders)
+  UploadToPerfDashboard(success_rates)
+
+
+def _UpdateSuccessRatesWithResult(
+    success_rates, results, date_dict_str, builder):
+  count = int(results['count'])
+  if count == 0:
+    return
+  success_count = count - int(results['failure_count'])
+  success_rates.setdefault(date_dict_str, {})
+  success_rates[date_dict_str].setdefault(builder, {
+      'count': 0,
+      'success_count': 0
+  })
+  success_rates[date_dict_str][builder]['count'] += count
+  success_rates[date_dict_str][builder]['success_count'] += success_count
+
+
+def _SummarizeSuccessRates(success_rates):
+  overall_success_rates = []
+  for day, results in success_rates.iteritems():
+    success_rate_sum = 0
+    success_rate_count = 0
+    for rates in results.values():
+      if rates['count'] == 0:
+        continue
+      success_rate_sum += (
+          float(rates['success_count']) / float(rates['count']))
+      success_rate_count += 1
+    overall_success_rates.append(
+        [day, float(success_rate_sum) / float(success_rate_count)])
+  return overall_success_rates
+
+
+def UploadToPerfDashboard(success_rates):
+  for success_rate in success_rates:
+    date_str = '%s-%s-%s' % (success_rate[0][0:4],
+                             success_rate[0][4:6],
+                             success_rate[0][6:8])
+    dashboard_data = {
+        'master': 'WaterfallStats',
+        'bot': 'ChromiumPerf',
+        'point_id': int(success_rate[0]),
+        'supplemental': {},
+        'versions': {
+            'date': date_str,
+        },
+        'chart_data': {
+            'benchmark_name': 'success_rate',
+            'benchmark_description': 'Success rates averaged per-builder',
+            'format_version': 1.0,
+            'charts': {
+                'overall_success_rate': {
+                    'summary': {
+                        'name': 'overall_success_rate',
+                        'type': 'scalar',
+                        'units': '%',
+                        'value': success_rate[1]
+                    }
+                }
+            }
+        }
+    }
+    url = 'https://chromeperf.appspot.com/add_point'
+    data = urllib.urlencode({'data': json.dumps(dashboard_data)})
+    urllib2.urlopen(url=url, data=data).read()
+
+
+def CalculateSuccessRates(year, month, days, builders):
+  success_rates = {}
+  for day in days:
+    for hour in range(24):
+      date_str = '%d-%02d-%02dT%02d:00Z' % (year, month, day, hour)
+      date_dict_str = '%d%02d%02d' % (year, month, day)
+      for builder in builders:
+        url = BUILDER_STATS_URL % (
+            urllib.quote(builder), urllib.quote(date_str))
+        response = urllib2.urlopen(url)
+        results = json.load(response)
+        _UpdateSuccessRatesWithResult(
+            success_rates, results, date_dict_str, builder)
+  return _SummarizeSuccessRates(success_rates)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/catapult/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py b/catapult/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py
new file mode 100644
index 0000000..bb72bf1
--- /dev/null
+++ b/catapult/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python2.7
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from perfbot_stats import chrome_perf_stats
+
+
+class TestChromePerfStats(unittest.TestCase):
+
+  def testUpdateSuccessRatesWithResult(self):
+    success_rates = {}
+    chrome_perf_stats._UpdateSuccessRatesWithResult(
+        success_rates,
+        {'count': 0},
+        'invalid_date_str',
+        'invalid_builder')
+    self.assertDictEqual({}, success_rates)
+    chrome_perf_stats._UpdateSuccessRatesWithResult(
+        success_rates,
+        {'count': 5, 'failure_count': 3},
+        '20151010',
+        'android_nexus_10')
+    self.assertDictEqual(
+        {'20151010': {'android_nexus_10': {'count': 5, 'success_count': 2}}},
+        success_rates)
+    chrome_perf_stats._UpdateSuccessRatesWithResult(
+        success_rates,
+        {'count': 5, 'failure_count': 4},
+        '20151010',
+        'android_nexus_4')
+    self.assertDictEqual(
+        {
+            '20151010': {
+                'android_nexus_10': {'count': 5, 'success_count': 2},
+                'android_nexus_4': {'count': 5, 'success_count': 1},
+            }
+        },
+        success_rates)
+    chrome_perf_stats._UpdateSuccessRatesWithResult(
+        success_rates,
+        {'count': 5, 'failure_count': 0},
+        '20151009',
+        'win_xp')
+    self.assertDictEqual(
+        {
+            '20151010': {
+                'android_nexus_10': {'count': 5, 'success_count': 2},
+                'android_nexus_4': {'count': 5, 'success_count': 1},
+            },
+            '20151009': {
+                'win_xp': {'count': 5, 'success_count': 5},
+            },
+        },
+        success_rates)
+
+  def testSummarizeSuccessRates(self):
+    rates = chrome_perf_stats._SummarizeSuccessRates(
+        {
+            '20151010': {
+                'android_nexus_10': {'count': 5, 'success_count': 2},
+                'android_nexus_4': {'count': 5, 'success_count': 3},
+            },
+            '20151009': {
+                'win_xp': {'count': 5, 'success_count': 5},
+            },
+        })
+    self.assertListEqual([['20151010', 0.5], ['20151009', 1.0]], rates)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/catapult_build/perfbot_stats/chrome_perf_step_timings.py b/catapult/catapult_build/perfbot_stats/chrome_perf_step_timings.py
new file mode 100755
index 0000000..30558ab
--- /dev/null
+++ b/catapult/catapult_build/perfbot_stats/chrome_perf_step_timings.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python2.7
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to pull chromium.perf step timings from chrome-infra-stats API.
+
+Currently this pulls the list of steps per builder. For each step, if it is not
+a setup step, we get the step stats for the last 20 runs for that builder.
+
+The API documentation for chrome-infra-stats is at:
+https://apis-explorer.appspot.com/apis-explorer/?
+   base=https://chrome-infra-stats.appspot.com/_ah/api#p/
+"""
+
+import csv
+import datetime
+import json
+import sys
+import urllib
+import urllib2
+
+
+BUILDER_STEPS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
+                     'masters/chromium.perf/%s')
+
+
+STEP_ACTIVE_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
+                   'steps/last/chromium.perf/%s/%s/1')
+
+
+STEP_STATS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/'
+                  'stats/last/chromium.perf/%s/%s/20')
+
+
+IGNORED_STEPS = [
+    'List Perf Tests',
+    'Sharded Perf Tests',
+    'authorize_adb_devices',
+    'bot_update',
+    'build__schedule__time__',
+    'clean local files',
+    'cleanup_temp',
+    'device_status_check',
+    'extract build',
+    'gclient runhooks',
+    'get compile targets for scripts',
+    'get perf test list',
+    'gsutil download_build_product',
+    'host_info',
+    'install ChromeShell.apk',
+    'json.output cache',
+    'json.output cache',
+    'overall__build__result__',
+    'overall__queued__time__',
+    'provision_devices',
+    'read test spec',
+    'rmtree build directory',
+    'setup_build',
+    'spawn_logcat_monitor',
+    'stack_tool_for_tombstones',
+    'stack_tool_with_logcat_dump',
+    'steps',
+    'test_report',
+    'unzip_build_product',
+    'update_scripts'
+]
+
+KNOWN_TESTERS_LIST = [
+    'Android Nexus4 Perf',
+    'Android Nexus5 Perf',
+    'Android Nexus6 Perf',
+    'Android Nexus10 Perf',
+    'Android Nexus7v2 Perf',
+    'Android One Perf',
+    'Linux Perf (1)',
+    'Linux Perf (2)',
+    'Linux Perf (3)',
+    'Linux Perf (4)',
+    'Linux Perf (5)',
+    'Mac 10.8 Perf (1)',
+    'Mac 10.8 Perf (2)',
+    'Mac 10.8 Perf (3)',
+    'Mac 10.8 Perf (4)',
+    'Mac 10.8 Perf (5)',
+    'Mac 10.9 Perf (1)',
+    'Mac 10.9 Perf (2)',
+    'Mac 10.9 Perf (3)',
+    'Mac 10.9 Perf (4)',
+    'Mac 10.9 Perf (5)',
+    'Win 7 ATI GPU Perf',
+    'Win 7 Intel GPU Perf',
+    'Win 7 Low-End Perf (1)',
+    'Win 7 Low-End Perf (2)',
+    'Win 7 Nvidia GPU Perf',
+    'Win 7 Perf (1)',
+    'Win 7 Perf (2)',
+    'Win 7 Perf (3)',
+    'Win 7 Perf (4)',
+    'Win 7 Perf (5)',
+    'Win 7 x64 Perf (1)',
+    'Win 7 x64 Perf (2)',
+    'Win 8 Perf (1)',
+    'Win 8 Perf (2)',
+    'Win XP Perf (1)',
+    'Win XP Perf (2)',
+    'Win XP Perf (3)',
+    'Win XP Perf (4)',
+    'Win XP Perf (5)'
+]
+
+
+USAGE = 'Usage: chrome-perf-step-timings.py <outfilename>'
+
+
+def main():
+  if len(sys.argv) != 2:
+    print USAGE
+    sys.exit(0)
+  outfilename = sys.argv[1]
+
+  threshold_time = datetime.datetime.now() - datetime.timedelta(days=2)
+
+  col_names = [('builder', 'step', 'run_count', 'stddev', 'mean', 'maximum',
+                'median', 'seventyfive', 'ninety', 'ninetynine')]
+  with open(outfilename, 'wb') as f:
+    writer = csv.writer(f)
+    writer.writerows(col_names)
+
+  for builder in KNOWN_TESTERS_LIST:
+    step_timings = []
+    url = BUILDER_STEPS_URL % urllib.quote(builder)
+    response = urllib2.urlopen(url)
+    results = json.load(response)
+    steps = results['steps']
+    steps.sort()  # to group tests and their references together.
+    for step in steps:
+      if step in IGNORED_STEPS:
+        continue
+      url = STEP_ACTIVE_URL % (urllib.quote(builder), urllib.quote(step))
+      response = urllib2.urlopen(url)
+      results = json.load(response)
+      if ('step_records' not in results.keys() or
+          len(results['step_records']) == 0):
+        continue
+      first_record = results['step_records'][0]
+      last_step_time = datetime.datetime.strptime(
+          first_record['step_start'], "%Y-%m-%dT%H:%M:%S.%f")
+      # ignore steps that did not run for more than 2 days
+      if last_step_time < threshold_time:
+        continue
+      url = STEP_STATS_URL % (urllib.quote(builder), urllib.quote(step))
+      response = urllib2.urlopen(url)
+      results = json.load(response)
+      step_timings.append(
+          [builder, step, results['count'], results['stddev'],
+           results['mean'], results['maximum'], results['median'],
+           results['seventyfive'], results['ninety'],
+           results['ninetynine']])
+    with open(outfilename, 'ab') as f:
+      writer = csv.writer(f)
+      writer.writerows(step_timings)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/catapult_build/print_project_info b/catapult/catapult_build/print_project_info
new file mode 100755
index 0000000..79be6eb
--- /dev/null
+++ b/catapult/catapult_build/print_project_info
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import sys
+import os
+import json
+
+"""
+Prints the source path of the provided project name.
+
+This utility loads the specified x_project.py from one of our standard
+module folders, constructs its project module, and prints the source paths that
+it uses.
+
+This is used by the node_bootstrap.js to load the tracing code into node.
+"""
+
+def _ToUpperCamelCase(name):
+  in_parts = name.split('_')
+  out_parts = []
+  for part in in_parts:
+    out_part = part[0].upper() + part[1:]
+    out_parts.append(out_part)
+  return ''.join(out_parts)
+
+def _RelPathToUnixPath(p):
+  return p.replace(os.sep, '/')
+
+def Main(args):
+  parser = argparse.ArgumentParser(
+      usage='%(prog)s project_name',
+      epilog='Prints the source paths for the provided catapult project\n')
+  parser.add_argument('--source-paths', action='store_true')
+  parser.add_argument('--headless-test-module-filenames', action='store_true')
+  parser.add_argument('project_name', nargs=1)
+  args = parser.parse_args(args)
+
+  catapult_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                 '..'))
+
+  project_name = args.project_name[0]
+  project_path = os.path.join(catapult_path, project_name)
+  sys.path.append(project_path)
+
+  project_module_name = project_name + '_project'
+  try:
+    project_module = __import__(project_module_name, fromlist=[True])
+  except:
+    sys.stderr.write('Could not import %s from %s' % (project_module_name,
+                                                      project_path))
+    return 1
+
+  project_module.UpdateSysPathIfNeeded()
+
+  class_name = _ToUpperCamelCase(project_name) + 'Project'
+
+  try:
+    project_class = project_module.__dict__[class_name]
+  except:
+    sys.stderr.write('Could not find %s in %s' % (class_name,
+                                                  project_module_name))
+    return 1
+
+  project = project_class()
+
+  if args.source_paths:
+    print json.dumps(project.source_paths)
+
+  if args.headless_test_module_filenames:
+    headless_test_module_filenames = ['/' + _RelPathToUnixPath(x)
+                              for x in project.FindAllD8TestModuleRelPaths()]
+    headless_test_module_filenames.sort()
+    print json.dumps(headless_test_module_filenames)
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv[1:]))
\ No newline at end of file
diff --git a/catapult/catapult_build/repo_checks.py b/catapult/catapult_build/repo_checks.py
new file mode 100644
index 0000000..8fc765a
--- /dev/null
+++ b/catapult/catapult_build/repo_checks.py
@@ -0,0 +1,17 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Checks to use in PRESUBMIT.py for general repository violations."""
+
+
+def RunChecks(input_api, output_api):
+  orig_files = [f.LocalPath()
+                for f in input_api.AffectedFiles(include_deletes=False)
+                if f.LocalPath().endswith('.orig')]
+  if orig_files:
+    return [output_api.PresubmitError(
+        'Files with ".orig" suffix must not be checked into the '
+        'repository:\n  ' + '\n  '.join(orig_files))]
+  else:
+    return []
diff --git a/catapult/catapult_build/run_dev_server_tests.py b/catapult/catapult_build/run_dev_server_tests.py
index 0b67bd1..ff7f6c4 100644
--- a/catapult/catapult_build/run_dev_server_tests.py
+++ b/catapult/catapult_build/run_dev_server_tests.py
@@ -19,6 +19,8 @@
 
 from hooks import install
 
+from catapult_base import xvfb
+
 # URL on omahaproxy.appspot.com which lists the current version for the os
 # and channel.
 VERSION_LOOKUP_URL = 'https://omahaproxy.appspot.com/all?os=%s&channel=%s'
@@ -50,8 +52,7 @@
         'omaha': 'linux',
         'prefix': 'Linux_x64',
         'zip_prefix': 'linux',
-        'chromepath': 'chrome-linux/chrome',
-        'use_xfvb': True,
+        'chromepath': 'chrome-linux/chrome'
     },
     'win32': {
         'omaha': 'win',
@@ -73,27 +74,6 @@
 }
 
 
-def StartXvfb():
-  display = ':99'
-  xvfb_command = [
-    'Xvfb',
-    display,
-    '-screen',
-    '0',
-    '1024x769x24',
-    '-ac'
-  ]
-  xvfb_process = subprocess.Popen(
-      xvfb_command, stdout=open(os.devnull), stderr=open(os.devnull))
-  time.sleep(0.2)
-  returncode = xvfb_process.poll()
-  if returncode is None:
-    os.environ['DISPLAY'] = display
-  else:
-    logging.error('Xvfb did not start, returncode: %s', returncode)
-  return xvfb_process
-
-
 def IsDepotToolsPath(path):
   return os.path.isfile(os.path.join(path, 'gclient'))
 
@@ -107,7 +87,7 @@
   # Check if depot_tools is in the path
   for path in os.environ['PATH'].split(os.pathsep):
     if IsDepotToolsPath(path):
-        return path.rstrip(os.sep)
+      return path.rstrip(os.sep)
 
   return None
 
@@ -126,12 +106,14 @@
   platform_data = PLATFORM_MAPPING[sys.platform]
   omaha_platform = platform_data['omaha']
   version_lookup_url = VERSION_LOOKUP_URL % (omaha_platform, channel)
-  response = urllib2.urlopen(version_lookup_url)
+  print 'Getting version from %s' % version_lookup_url
+  response = urllib2.urlopen(version_lookup_url, timeout=120)
   version = response.readlines()[1].split(',')[2]
 
   # Get the base position for that version from omahaproxy
   base_pos_lookup_url = BASE_POS_LOOKUP_URL % version
-  response = urllib2.urlopen(base_pos_lookup_url)
+  print 'Getting base_pos from %s' % base_pos_lookup_url
+  response = urllib2.urlopen(base_pos_lookup_url, timeout=120)
   base_pos = json.load(response)['chromium_base_position']
 
   # Find the build from that base position in cloud storage. If it's not found,
@@ -140,7 +122,8 @@
       platform_data['prefix'], base_pos)
   download_url = None
   while not download_url:
-    response = urllib2.urlopen(cloud_storage_lookup_url)
+    print 'Getting download url from %s' % cloud_storage_lookup_url
+    response = urllib2.urlopen(cloud_storage_lookup_url, timeout=120)
     prefixes = json.load(response).get('prefixes')
     if prefixes:
       download_url = CLOUD_STORAGE_DOWNLOAD_URL % (
@@ -157,10 +140,10 @@
   tmpdir = tempfile.mkdtemp()
   zip_path = os.path.join(tmpdir, 'chrome.zip')
   with open(zip_path, 'wb') as local_file:
-    local_file.write(urllib2.urlopen(download_url).read())
+    local_file.write(urllib2.urlopen(download_url, timeout=600).read())
   zf = zipfile.ZipFile(zip_path)
   zf.extractall(path=tmpdir)
-  return tmpdir, version
+  return tmpdir, version, download_url
 
 
 def GetLocalChromePath(path_from_command_line):
@@ -169,7 +152,7 @@
 
   if sys.platform == 'darwin':  # Mac
     chrome_path = (
-      '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
+        '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
     if os.path.isfile(chrome_path):
       return chrome_path
   elif sys.platform.startswith('linux'):
@@ -208,6 +191,8 @@
                         help='Set of tests to run (tracing or perf_insights)')
     parser.add_argument('--channel', type=str, default='stable',
                         help='Chrome channel to run (stable or canary)')
+    parser.add_argument('--presentation-json', type=str,
+                        help='Recipe presentation-json output file path')
     parser.set_defaults(install_hooks=True)
     parser.set_defaults(use_local_chrome=True)
     args = parser.parse_args(argv[1:])
@@ -218,6 +203,8 @@
     platform_data = PLATFORM_MAPPING[sys.platform]
     user_data_dir = tempfile.mkdtemp()
     tmpdir = None
+    xvfb_process = None
+
     server_path = os.path.join(os.path.dirname(
         os.path.abspath(__file__)), os.pardir, 'bin', 'run_dev_server')
     # TODO(anniesullie): Make OS selection of port work on Windows. See #1235.
@@ -227,7 +214,7 @@
       port = '0'
     server_command = [server_path, '--no-install-hooks', '--port', port]
     if sys.platform.startswith('win'):
-        server_command = ['python.exe'] + server_command
+      server_command = ['python.exe'] + server_command
     print "Starting dev_server..."
     server_process = subprocess.Popen(
         server_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
@@ -236,9 +223,8 @@
     if sys.platform != 'win32':
       output = server_process.stderr.readline()
       port = re.search(
-          'Now running on http://127.0.0.1:([\d]+)', output).group(1)
+          r'Now running on http://127.0.0.1:([\d]+)', output).group(1)
 
-    xvfb_process = None
     chrome_info = None
     if args.use_local_chrome:
       chrome_path = GetLocalChromePath(args.chrome_path)
@@ -251,9 +237,11 @@
       if sys.platform == 'linux2' and channel == 'canary':
         channel = 'dev'
       assert channel in ['stable', 'beta', 'dev', 'canary']
-      tmpdir, version = DownloadChromium(channel)
-      if platform_data.get('use_xfvb'):
-        xvfb_process = StartXvfb()
+
+
+      tmpdir, version, download_url = DownloadChromium(channel)
+      if xvfb.ShouldStartXvfb():
+        xvfb_process = xvfb.StartXvfb()
       chrome_path = os.path.join(
           tmpdir, platform_data['chromepath'])
       os.chmod(chrome_path, os.stat(chrome_path).st_mode | stat.S_IEXEC)
@@ -264,7 +252,7 @@
         contents = os.listdir(
             os.path.join(tmpdir, platform_data['version_path']))
         for path in contents:
-          if re.match('\d+\.\d+\.\d+\.\d+', path):
+          if re.match(r'\d+\.\d+\.\d+\.\d+', path):
             version = path
       if platform_data.get('additional_paths'):
         for path in platform_data.get('additional_paths'):
@@ -282,7 +270,7 @@
         '--noerrdialogs',
         '--window-size=1280,1024',
         ('http://localhost:%s/%s/tests.html?' % (port, args.tests)) +
-            'headless=true&testTypeToRun=all',
+        'headless=true&testTypeToRun=all',
     ]
     print "Starting Chrome %s..." % chrome_info
     chrome_process = subprocess.Popen(
@@ -304,6 +292,14 @@
       logging.error(server_out)
     else:
       print server_out
+    if args.presentation_json:
+      with open(args.presentation_json, 'w') as recipe_out:
+        # Add a link to the buildbot status for the step saying which version
+        # of Chrome the test ran on. The actual linking feature is not used,
+        # but there isn't a way to just add text.
+        link_name = 'Chrome Version %s' % version
+        presentation_info = {'links': {link_name: download_url}}
+        json.dump(presentation_info, recipe_out)
   finally:
     # Wait for Chrome to be killed before deleting temp Chrome dir. Only have
     # this timing issue on Windows.
@@ -314,8 +310,8 @@
         shutil.rmtree(tmpdir)
         shutil.rmtree(user_data_dir)
       except OSError as e:
-        logging.error('Error cleaning up temp dirs %s and %s: %s' % (
-            tmpdir, user_data_dir, e))
+        logging.error('Error cleaning up temp dirs %s and %s: %s',
+                      tmpdir, user_data_dir, e)
     if xvfb_process:
       xvfb_process.kill()
 
diff --git a/catapult/common/py_trace_event/README.txt b/catapult/common/py_trace_event/README.txt
new file mode 100644
index 0000000..2f0d33d
--- /dev/null
+++ b/catapult/common/py_trace_event/README.txt
@@ -0,0 +1,7 @@
+py_trace_event allows low-overhead instrumentation of a multi-threaded,
+multi-process application in order to study its global performance
+characteristics. It uses the trace event format used in Chromium/Chrome's
+about:tracing system.
+
+Trace files generated by py_trace_event can be viewed and manipulated by
+trace_event_viewer.
diff --git a/catapult/common/py_trace_event/py_trace_event/__init__.py b/catapult/common/py_trace_event/py_trace_event/__init__.py
new file mode 100644
index 0000000..1ddf31d
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/__init__.py
@@ -0,0 +1,7 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import sys
+
+
+sys.path.append('../../../catapult_base')
diff --git a/catapult/common/py_trace_event/py_trace_event/run_tests b/catapult/common/py_trace_event/py_trace_event/run_tests
new file mode 100755
index 0000000..7f9673d
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/run_tests
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import optparse
+import os
+import platform
+import re
+import sys
+import types
+import traceback
+import unittest
+
+
+def discover(dir, filters):
+  if hasattr(unittest.TestLoader, 'discover'):
+    return unittest.TestLoader().discover(dir, '*')
+
+  # poor mans unittest.discover
+  loader = unittest.TestLoader()
+  subsuites = []
+
+  for (dirpath, dirnames, filenames) in os.walk(dir):
+    for filename in [x for x in filenames if re.match('.*_test\.py$', x)]:
+      if filename.startswith('.') or filename.startswith('_'):
+        continue
+      fqn = dirpath.replace(
+          '/', '.') + '.' + re.match('(.+)\.py$', filename).group(1)
+
+      # load the test
+      try:
+        module = __import__(fqn,fromlist=[True])
+      except:
+        print "While importing [%s]\n" % fqn
+        traceback.print_exc()
+        continue
+
+      def test_is_selected(name):
+        for f in filters:
+          if re.search(f,name):
+            return True
+        return False
+
+      if hasattr(module, 'suite'):
+        base_suite = module.suite()
+      else:
+        base_suite = loader.loadTestsFromModule(module)
+      new_suite = unittest.TestSuite()
+      for t in base_suite:
+        if isinstance(t, unittest.TestSuite):
+          for i in t:
+            if test_is_selected(i.id()):
+              new_suite.addTest(i)
+        elif isinstance(t, unittest.TestCase):
+          if test_is_selected(t.id()):
+            new_suite.addTest(t)
+        else:
+          raise Exception("Wtf, expected TestSuite or TestCase, got %s" % t)
+
+      if new_suite.countTestCases():
+        subsuites.append(new_suite)
+
+  return unittest.TestSuite(subsuites)
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option(
+      '-v', '--verbose', action='count', default=0,
+      help='Increase verbosity level (repeat as needed)')
+  parser.add_option('--debug', dest='debug', action='store_true', default=False,
+                    help='Break into pdb when an assertion fails')
+  parser.add_option('--incremental', dest='incremental', action='store_true',
+                    default=False, help='Run tests one at a time.')
+  parser.add_option('--stop', dest='stop_on_error', action='store_true',
+                    default=False, help='Stop running tests on error.')
+  (options, args) = parser.parse_args()
+
+  if options.verbose >= 2:
+    logging.basicConfig(level=logging.DEBUG)
+  elif options.verbose:
+    logging.basicConfig(level=logging.INFO)
+  else:
+    logging.basicConfig(level=logging.WARNING)
+
+  # install hook on set_trace if --debug
+  if options.debug:
+    import exceptions
+    class DebuggingAssertionError(exceptions.AssertionError):
+      def __init__(self, *args):
+        exceptions.AssertionError.__init__(self, *args)
+        print "Assertion failed, entering PDB..."
+        import pdb
+        if hasattr(sys, '_getframe'):
+          pdb.Pdb().set_trace(sys._getframe().f_back.f_back)
+        else:
+          pdb.set_trace()
+    unittest.TestCase.failureException = DebuggingAssertionError
+
+    def hook(*args):
+      import traceback, pdb
+      traceback.print_exception(*args)
+      pdb.pm()
+    sys.excepthook = hook
+
+    import browser
+    browser.debug_mode = True
+
+  else:
+    def hook(exc, value, tb):
+      import traceback
+      if not str(value).startswith("_noprint"):
+        traceback.print_exception(exc, value, tb)
+      import src.message_loop
+      if src.message_loop.is_main_loop_running():
+        if not str(value).startswith("_noprint"):
+          print "Untrapped exception! Exiting message loop with exception."
+        src.message_loop.quit_main_loop(quit_with_exception=True)
+
+    sys.excepthook = hook
+
+  # make sure cwd is the base directory!
+  os.chdir(os.path.dirname(__file__))
+
+  if len(args) > 0:
+    suites = discover('trace_event_impl', args)
+  else:
+    suites = discover('trace_event_impl', ['.*'])
+
+  r = unittest.TextTestRunner()
+  if not options.incremental:
+    res = r.run(suites)
+    if res.wasSuccessful():
+      return 0
+    return 255
+  else:
+    ok = True
+    for s in suites:
+      if isinstance(s, unittest.TestSuite):
+        for t in s:
+          print '--------------------------------------------------------------'
+          print 'Running %s' % str(t)
+          res = r.run(t)
+          if not res.wasSuccessful():
+            ok = False
+            if options.stop_on_error:
+              break
+        if ok == False and options.stop_on_error:
+          break
+      else:
+        res = r.run(s)
+        if not res.wasSuccessful():
+          ok = False
+          if options.stop_on_error:
+            break
+    if ok:
+      return 0
+    return 255
+
+
+if __name__ == "__main__":
+  sys.exit(main())
diff --git a/catapult/common/py_trace_event/py_trace_event/setup.py b/catapult/common/py_trace_event/py_trace_event/setup.py
new file mode 100644
index 0000000..0b0070a
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/setup.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# Copyright 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from distutils.core import setup
+setup(
+    name='py_trace_event',
+    packages=['trace_event_impl'],
+    version='0.1.0',
+    description='Performance tracing for python',
+    author='Nat Duca'
+)
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event.py b/catapult/common/py_trace_event/py_trace_event/trace_event.py
new file mode 100644
index 0000000..dc8501d
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event.py
@@ -0,0 +1,258 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from py_trace_event import trace_time
+
+
+r"""Instrumentation-based profiling for Python.
+
+trace_event allows you to hand-instrument your code with areas of interest.
+When enabled, trace_event logs the start and stop times of these events to a
+logfile. These resulting logfiles can be viewed with either Chrome's
+about:tracing UI or with the standalone trace_event_viewer available at
+  http://www.github.com/natduca/trace_event_viewer/
+
+To use trace event, call trace_event_enable and start instrumenting your code:
+   from trace_event import *
+
+   if "--trace" in sys.argv:
+     trace_enable("myfile.trace")
+
+   @traced
+   def foo():
+     ...
+
+   class MyFoo(object):
+     @traced
+     def bar(self):
+       ...
+
+trace_event records trace events to an in-memory buffer. If your application is
+long running and you want to see the results of a trace before it exits, you can
+call trace_flush to write any in-memory events to disk.
+
+To help intregrating trace_event into existing codebases that dont want to add
+trace_event as a dependancy, trace_event is split into an import shim
+(trace_event.py) and an implementaiton (trace_event_impl/*). You can copy the
+shim, trace_event.py, directly into your including codebase. If the
+trace_event_impl is not found, the shim will simply noop.
+
+trace_event is safe with regard to Python threads. Simply trace as you normally
+would and each thread's timing will show up in the trace file.
+
+Multiple processes can safely output into a single trace_event logfile. If you
+fork after enabling tracing, the child process will continue outputting to the
+logfile. Use of the multiprocessing module will work as well. In both cases,
+however, note that disabling tracing in the parent process will not stop tracing
+in the child processes.
+"""
+
+try:
+  import trace_event_impl
+except ImportError:
+  trace_event_impl = None
+
+
+def trace_can_enable():
+  """
+  Returns True if a trace_event_impl was found. If false,
+  trace_enable will fail. Regular tracing methods, including
+  trace_begin and trace_end, will simply be no-ops.
+  """
+  return trace_event_impl != None
+
+if trace_event_impl:
+  import time
+
+
+  def trace_is_enabled():
+    return trace_event_impl.trace_is_enabled()
+
+  def trace_enable(logfile):
+    return trace_event_impl.trace_enable(logfile)
+
+  def trace_disable():
+    return trace_event_impl.trace_disable()
+
+  def trace_flush():
+    trace_event_impl.trace_flush()
+
+  def trace_begin(name, **kwargs):
+    args_to_log = {key: repr(value) for key, value in kwargs.iteritems()}
+    trace_event_impl.add_trace_event("B", trace_time.Now(), "python", name,
+                                     args_to_log)
+
+  def trace_end(name):
+    trace_event_impl.add_trace_event("E", trace_time.Now(), "python", name)
+
+  def trace(name, **kwargs):
+    return trace_event_impl.trace(name, **kwargs)
+
+  def traced(fn):
+    return trace_event_impl.traced(fn)
+
+  def clock_sync(sync_id, issue_ts=None):
+    time_stamp = trace_time.Now()
+    args_to_log = {'sync_id': sync_id}
+    if issue_ts: # Issuer if issue_ts is set, else reciever.
+      assert issue_ts <= time_stamp
+      # Convert to right units for ts.
+      args_to_log['issue_ts'] = issue_ts
+    trace_event_impl.add_trace_event(
+        "c", time_stamp, "python", "clock_sync", args_to_log)
+
+  def is_tracing_controllable():
+    return trace_event_impl.is_tracing_controllable()
+
+else:
+  import contextlib
+
+  def trace_enable():
+    raise TraceException(
+        "Cannot enable trace_event. No trace_event_impl module found.")
+
+  def trace_disable():
+    pass
+
+  def trace_is_enabled():
+    return False
+
+  def trace_flush():
+    pass
+
+  def trace_begin(name, **kwargs):
+    del name # unused.
+    del kwargs # unused.
+    pass
+
+  def trace_end(name):
+    del name # unused.
+    pass
+
+  @contextlib.contextmanager
+  def trace(name, **kwargs):
+    del name # unused
+    del kwargs # unused
+    yield
+
+  def traced(fn):
+    return fn
+
+  def clock_sync(sync_id, issue_ts=None):
+    del sync_id # unused.
+    pass
+
+  def is_tracing_controllable():
+    return False
+
+trace_enable.__doc__ = """Enables tracing.
+
+  Once enabled, the enabled bit propagates to forked processes and
+  multiprocessing subprocesses. Regular child processes, e.g. those created via
+  os.system/popen, or subprocess.Popen instances, will not get traced. You can,
+  however, enable tracing on those subprocess manually.
+
+  Trace files are multiprocess safe, so you can have multiple processes
+  outputting to the same tracelog at once.
+
+  log_file can be one of three things:
+
+    None: a logfile is opened based on sys[argv], namely
+          "./" + sys.argv[0] + ".json"
+
+    string: a logfile of the given name is opened.
+
+    file-like object: the fileno() is is used. The underlying file descriptor
+                      must support fcntl.lockf() operations.
+  """
+
+trace_disable.__doc__ = """Disables tracing, if enabled.
+
+  Will not disable tracing on any existing child proceses that were forked
+  from this process. You must disable them yourself.
+  """
+
+trace_flush.__doc__ = """Flushes any currently-recorded trace data to disk.
+
+  trace_event records traces into an in-memory buffer for efficiency. Flushing
+  is only done at process exit or when this method is called.
+  """
+
+trace_is_enabled.__doc__ = """Returns whether tracing is enabled.
+  """
+
+trace_begin.__doc__ = """Records the beginning of an event of the given name.
+
+  The building block for performance tracing. A typical example is:
+     from trace_event import *
+     def something_heavy():
+        trace_begin("something_heavy")
+
+        trace_begin("read")
+        try:
+          lines = open().readlines()
+        finally:
+          trace_end("read")
+
+        trace_begin("parse")
+        try:
+          parse(lines)
+        finally:
+          trace_end("parse")
+
+        trace_end("something_heavy")
+
+  Note that a trace_end call must be issued for every trace_begin call. When
+  tracing around blocks that might throw exceptions, you should use the trace
+  function, or a try-finally pattern to ensure that the trace_end method is
+  called.
+
+  See the documentation for the @traced decorator for a simpler way to
+  instrument functions and methods.
+  """
+
+trace_end.__doc__ = """Records the end of an event of the given name.
+
+  See the documentation for trace_begin for more information.
+
+  Make sure to issue a trace_end for every trace_begin issued. Failure to pair
+  these calls will lead to bizarrely tall looking traces in the
+  trace_event_viewer UI.
+  """
+
+trace.__doc__ = """Traces a block of code using a with statement.
+
+  Example usage:
+    from trace_event import *
+    def something_heavy(lines):
+      with trace("parse_lines", lines=lines):
+        parse(lines)
+
+  If tracing an entire function call, prefer the @traced decorator.
+  """
+
+traced.__doc__ = """
+  Traces the provided function, using the function name for the actual generated
+  event.
+
+  Prefer this decorator over the explicit trace_begin and trace_end functions
+  whenever you are tracing the start and stop of a function. It automatically
+  issues trace_begin/end events, even when the wrapped function throws.
+
+  You can also pass the function's argument names to traced, and the argument
+  values will be added to the trace. Example usage:
+    from trace_event import *
+    @traced("url")
+    def send_request(url):
+      urllib2.urlopen(url).read()
+  """
+
+clock_sync.__doc__ = """
+  Issues a clock sync marker event.
+
+  Clock sync markers are used to synchronize the clock domains of different
+  traces so that they can be used together. It takes a sync_id, and if it is
+  the issuer of a clock sync event it will also require an issue_ts. The
+  issue_ts is a timestamp from when the clocksync was first issued. This is used
+  to calculate the time difference between clock domains.
+  """
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/__init__.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/__init__.py
new file mode 100644
index 0000000..21b0217
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/__init__.py
@@ -0,0 +1,6 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from log import *
+from decorators import *
+import multiprocessing_shim
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py
new file mode 100644
index 0000000..ab1463d
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py
@@ -0,0 +1,86 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import contextlib
+import inspect
+import time
+import functools
+
+import log
+
+
+@contextlib.contextmanager
+def trace(name, **kwargs):
+  category = "python"
+  start = time.time()
+  args_to_log = {key: repr(value) for key, value in kwargs.iteritems()}
+  log.add_trace_event("B", start, category, name, args_to_log)
+  try:
+    yield
+  finally:
+    end = time.time()
+    log.add_trace_event("E", end, category, name)
+
+def traced(*args):
+  def get_wrapper(func):
+    if inspect.isgeneratorfunction(func):
+      raise Exception("Can not trace generators.")
+
+    category = "python"
+
+    arg_spec = inspect.getargspec(func)
+    is_method = arg_spec.args and arg_spec.args[0] == "self"
+
+    def arg_spec_tuple(name):
+      arg_index = arg_spec.args.index(name)
+      defaults_length = len(arg_spec.defaults) if arg_spec.defaults else 0
+      default_index = arg_index + defaults_length - len(arg_spec.args)
+      if default_index >= 0:
+        default = arg_spec.defaults[default_index]
+      else:
+        default = None
+      return (name, arg_index, default)
+
+    args_to_log = map(arg_spec_tuple, arg_names)
+
+    @functools.wraps(func)
+    def traced_function(*args, **kwargs):
+      # Everything outside traced_function is done at decoration-time.
+      # Everything inside traced_function is done at run-time and must be fast.
+      if not log._enabled:  # This check must be at run-time.
+        return func(*args, **kwargs)
+
+      def get_arg_value(name, index, default):
+        if name in kwargs:
+          return kwargs[name]
+        elif index < len(args):
+          return args[index]
+        else:
+          return default
+
+      if is_method:
+        name = "%s.%s" % (args[0].__class__.__name__, func.__name__)
+      else:
+        name = "%s.%s" % (func.__module__, func.__name__)
+
+      # Be sure to repr before calling func. Argument values may change.
+      arg_values = {
+          name: repr(get_arg_value(name, index, default))
+          for name, index, default in args_to_log}
+
+      start = time.time()
+      log.add_trace_event("B", start, category, name, arg_values)
+      try:
+        return func(*args, **kwargs)
+      finally:
+        end = time.time()
+        log.add_trace_event("E", end, category, name)
+    return traced_function
+
+  no_decorator_arguments = len(args) == 1 and callable(args[0])
+  if no_decorator_arguments:
+    arg_names = ()
+    return get_wrapper(args[0])
+  else:
+    arg_names = args
+    return get_wrapper
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators_test.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators_test.py
new file mode 100644
index 0000000..5bb13ad
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators_test.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import decorators
+import logging
+import unittest
+
+from trace_test import TraceTest
+#from .trace_test import TraceTest
+
+def generator():
+  yield 1
+  yield 2
+
+class DecoratorTests(unittest.TestCase):
+  def test_tracing_object_fails(self):
+    self.assertRaises(Exception, lambda: decorators.trace(1))
+    self.assertRaises(Exception, lambda: decorators.trace(""))
+    self.assertRaises(Exception, lambda: decorators.trace([]))
+
+  def test_tracing_generators_fail(self):
+    self.assertRaises(Exception, lambda: decorators.trace(generator))
+
+class ClassToTest(object):
+  @decorators.traced
+  def method1(self):
+    return 1
+
+  @decorators.traced
+  def method2(self):
+    return 1
+
+@decorators.traced
+def traced_func():
+  return 1
+
+class DecoratorTests(TraceTest):
+  def _get_decorated_method_name(self, f):
+    res = self.go(f)
+    events = res.findEventsOnThread(res.findThreadIds()[0])
+
+    # Sanity checks.
+    self.assertEquals(2, len(events))
+    self.assertEquals(events[0]["name"], events[1]["name"])
+    return events[1]["name"]
+
+
+  def test_func_names_work(self):
+    self.assertEquals('__main__.traced_func',
+                      self._get_decorated_method_name(traced_func))
+
+  def test_method_names_work(self):
+    ctt = ClassToTest()
+    self.assertEquals('ClassToTest.method1',
+                      self._get_decorated_method_name(ctt.method1))
+    self.assertEquals('ClassToTest.method2',
+                      self._get_decorated_method_name(ctt.method2))
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log.py
new file mode 100644
index 0000000..7cb7222
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log.py
@@ -0,0 +1,177 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import atexit
+import json
+import os
+import sys
+import time
+import threading
+
+from catapult_base import lock
+
+
+_lock = threading.Lock()
+
+_enabled = False
+_log_file = None
+
+_cur_events = [] # events that have yet to be buffered
+
+_tls = threading.local() # tls used to detect forking/etc
+_atexit_regsitered_for_pid = None
+
+_control_allowed = True
+
+
+class TraceException(Exception):
+  pass
+
+def _note(msg, *args):
+  pass
+#  print "%i: %s" % (os.getpid(), msg)
+
+
+def _locked(fn):
+  def locked_fn(*args,**kwargs):
+    _lock.acquire()
+    try:
+      ret = fn(*args,**kwargs)
+    finally:
+      _lock.release()
+    return ret
+  return locked_fn
+
+def _disallow_tracing_control():
+  global _control_allowed
+  _control_allowed = False
+
+def trace_enable(log_file=None):
+  _trace_enable(log_file)
+
+@_locked
+def _trace_enable(log_file=None):
+  global _enabled
+  if _enabled:
+    raise TraceException("Already enabled")
+  if not _control_allowed:
+    raise TraceException("Tracing control not allowed in child processes.")
+  _enabled = True
+  global _log_file
+  if log_file == None:
+    if sys.argv[0] == '':
+      n = 'trace_event'
+    else:
+      n = sys.argv[0]
+    log_file = open("%s.json" % n, "ab", False)
+    _note("trace_event: tracelog name is %s.json" % n)
+  elif isinstance(log_file, basestring):
+    _note("trace_event: tracelog name is %s" % log_file)
+    log_file = open("%s" % log_file, "ab", False)
+  elif not hasattr(log_file, 'fileno'):
+    raise TraceException(
+        "Log file must be None, a string, or file-like object with a fileno()")
+
+  _log_file = log_file
+  with lock.FileLock(_log_file, lock.LOCK_EX):
+    _log_file.seek(0, os.SEEK_END)
+
+    lastpos = _log_file.tell()
+    creator = lastpos == 0
+    if creator:
+      _note("trace_event: Opened new tracelog, lastpos=%i", lastpos)
+      _log_file.write('[')
+
+      tid = threading.current_thread().ident
+      if not tid:
+        tid = os.getpid()
+      x = {"ph": "M", "category": "process_argv",
+           "pid": os.getpid(), "tid": threading.current_thread().ident,
+           "ts": time.time(),
+           "name": "process_argv", "args": {"argv": sys.argv}}
+      _log_file.write("%s\n" % json.dumps(x))
+    else:
+      _note("trace_event: Opened existing tracelog")
+    _log_file.flush()
+
+@_locked
+def trace_flush():
+  if _enabled:
+    _flush()
+
+@_locked
+def trace_disable():
+  global _enabled
+  if not _control_allowed:
+    raise TraceException("Tracing control not allowed in child processes.")
+  if not _enabled:
+    return
+  _enabled = False
+  _flush(close=True)
+
+def _flush(close=False):
+  global _log_file
+  with lock.FileLock(_log_file, lock.LOCK_EX):
+    _log_file.seek(0, os.SEEK_END)
+    if len(_cur_events):
+      _log_file.write(",\n")
+      _log_file.write(",\n".join([json.dumps(e) for e in _cur_events]))
+      del _cur_events[:]
+
+    if close:
+      # We might not be the only process writing to this logfile. So,
+      # we will simply close the file rather than writign the trailing ] that
+      # it technically requires. The trace viewer understands that this may
+      # happen and will insert a trailing ] during loading.
+      pass
+    _log_file.flush()
+
+  if close:
+    _note("trace_event: Closed")
+    _log_file.close()
+    _log_file = None
+  else:
+    _note("trace_event: Flushed")
+
+@_locked
+def trace_is_enabled():
+  return _enabled
+
+@_locked
+def add_trace_event(ph, ts, category, name, args=None):
+  global _enabled
+  if not _enabled:
+    return
+  if not hasattr(_tls, 'pid') or _tls.pid != os.getpid():
+    _tls.pid = os.getpid()
+    global _atexit_regsitered_for_pid
+    if _tls.pid != _atexit_regsitered_for_pid:
+      _atexit_regsitered_for_pid = _tls.pid
+      atexit.register(_trace_disable_atexit)
+      _tls.pid = os.getpid()
+      del _cur_events[:] # we forked, clear the event buffer!
+    tid = threading.current_thread().ident
+    if not tid:
+      tid = os.getpid()
+    _tls.tid = tid
+
+  _cur_events.append({"ph": ph,
+                      "category": category,
+                      "pid": _tls.pid,
+                      "tid": _tls.tid,
+                      "ts": ts,
+                      "name": name,
+                      "args": args or {}});
+
+def trace_begin(name, args=None):
+  add_trace_event("B", time.time(), "python", name, args)
+
+def trace_end(name, args=None):
+  add_trace_event("E", time.time(), "python", name, args)
+
+def _trace_disable_atexit():
+  trace_disable()
+
+def is_tracing_controllable():
+  global _control_allowed
+  return _control_allowed
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log_io_test.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log_io_test.py
new file mode 100644
index 0000000..99a0621
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log_io_test.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import os
+import sys
+import tempfile
+import unittest
+
+from log import *
+from parsed_trace_events import *
+
+
+class LogIOTest(unittest.TestCase):
+  def test_enable_with_file(self):
+    file = tempfile.NamedTemporaryFile()
+    trace_enable(open(file.name, 'w+'))
+    trace_disable()
+    e = ParsedTraceEvents(trace_filename = file.name)
+    file.close()
+    self.assertTrue(len(e) > 0)
+
+  def test_enable_with_filename(self):
+    file = tempfile.NamedTemporaryFile()
+    trace_enable(file.name)
+    trace_disable()
+    e = ParsedTraceEvents(trace_filename = file.name)
+    file.close()
+    self.assertTrue(len(e) > 0)
+
+  def test_enable_with_implicit_filename(self):
+    expected_filename = "%s.json" % sys.argv[0]
+    def do_work():
+      file = tempfile.NamedTemporaryFile()
+      trace_enable()
+      trace_disable()
+      e = ParsedTraceEvents(trace_filename = expected_filename)
+      file.close()
+      self.assertTrue(len(e) > 0)
+    try:
+      do_work()
+    finally:
+      if os.path.exists(expected_filename):
+        os.unlink(expected_filename)
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
+
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py
new file mode 100644
index 0000000..9796bdf
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py
@@ -0,0 +1,92 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import multiprocessing
+import log
+import time
+
+
+_RealProcess = multiprocessing.Process
+__all__ = []
+
+
+class ProcessSubclass(_RealProcess):
+  def __init__(self, shim, *args, **kwards):
+    _RealProcess.__init__(self, *args, **kwards)
+    self._shim = shim
+
+  def run(self,*args,**kwargs):
+    log._disallow_tracing_control()
+    try:
+      r = _RealProcess.run(self, *args, **kwargs)
+    finally:
+      if log.trace_is_enabled():
+        log.trace_flush() # todo, reduce need for this...
+    return r
+
+class ProcessShim():
+  def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+    self._proc = ProcessSubclass(self, group, target, name, args, kwargs)
+    # hint to testing code that the shimming worked
+    self._shimmed_by_trace_event = True
+
+  def run(self):
+    self._proc.run()
+
+  def start(self):
+    self._proc.start()
+
+  def terminate(self):
+    if log.trace_is_enabled():
+      # give the flush a chance to finish --> TODO: find some other way.
+      time.sleep(0.25)
+    self._proc.terminate()
+
+  def join(self, timeout=None):
+    self._proc.join( timeout)
+
+  def is_alive(self):
+    return self._proc.is_alive()
+
+  @property
+  def name(self):
+    return self._proc.name
+
+  @name.setter
+  def name(self, name):
+    self._proc.name = name
+
+  @property
+  def daemon(self):
+    return self._proc.daemon
+
+  @daemon.setter
+  def daemon(self, daemonic):
+    self._proc.daemon = daemonic
+
+  @property
+  def authkey(self):
+    return self._proc._authkey
+
+  @authkey.setter
+  def authkey(self, authkey):
+    self._proc.authkey = AuthenticationString(authkey)
+
+  @property
+  def exitcode(self):
+    return self._proc.exitcode
+
+  @property
+  def ident(self):
+    return self._proc.ident
+
+  @property
+  def pid(self):
+    return self._proc.pid
+
+  def __repr__(self):
+    return self._proc.__repr__()
+
+# Monkeypatch in our process replacement.
+if multiprocessing.Process != ProcessShim:
+  multiprocessing.Process = ProcessShim
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/parsed_trace_events.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/parsed_trace_events.py
new file mode 100644
index 0000000..fdc7514
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/parsed_trace_events.py
@@ -0,0 +1,98 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import math
+import json
+
+
+class ParsedTraceEvents(object):
+  def __init__(self, events = None, trace_filename = None):
+    """
+    Utility class for filtering and manipulating trace data.
+
+    events -- An iterable object containing trace events
+    trace_filename -- A file object that contains a complete trace.
+
+    """
+    if trace_filename and events:
+      raise Exception("Provide either a trace file or event list")
+    if not trace_filename and events == None:
+      raise Exception("Provide either a trace file or event list")
+
+    if trace_filename:
+      f = open(trace_filename, 'r')
+      t = f.read()
+      f.close()
+
+      # If the event data begins with a [, then we know it should end with a ].
+      # The reason we check for this is because some tracing implementations
+      # cannot guarantee that a ']' gets written to the trace file. So, we are
+      # forgiving and if this is obviously the case, we fix it up before
+      # throwing the string at JSON.parse.
+      if t[0] == '[':
+        n = len(t);
+        if t[n - 1] != ']' and t[n - 1] != '\n':
+          t = t + ']'
+        elif t[n - 2] != ']' and t[n - 1] == '\n':
+          t = t + ']'
+        elif t[n - 3] != ']' and t[n - 2] == '\r' and t[n - 1] == '\n':
+          t = t + ']'
+
+      try:
+        events = json.loads(t)
+      except ValueError:
+        raise Exception("Corrupt trace, did not parse. Value: %s" % t)
+
+      if 'traceEvents' in events:
+        events = events['traceEvents']
+
+    if not hasattr(events, '__iter__'):
+      raise Exception, 'events must be iteraable.'
+    self.events = events
+    self.pids = None
+    self.tids = None
+
+  def __len__(self):
+    return len(self.events)
+
+  def __getitem__(self, i):
+    return self.events[i]
+
+  def __setitem__(self, i, v):
+    self.events[i] = v
+
+  def __repr__(self):
+    return "[%s]" % ",\n ".join([repr(e) for e in self.events])
+
+  def findProcessIds(self):
+    if self.pids:
+      return self.pids
+    pids = set()
+    for e in self.events:
+      if "pid" in e and e["pid"]:
+        pids.add(e["pid"])
+    self.pids = list(pids)
+    return self.pids
+
+  def findThreadIds(self):
+    if self.tids:
+      return self.tids
+    tids = set()
+    for e in self.events:
+      if "tid" in e and e["tid"]:
+        tids.add(e["tid"])
+    self.tids = list(tids)
+    return self.tids
+
+  def findEventsOnProcess(self, pid):
+    return ParsedTraceEvents([e for e in self.events if e["pid"] == pid])
+
+  def findEventsOnThread(self, tid):
+    return ParsedTraceEvents(
+        [e for e in self.events if e["ph"] != "M" and e["tid"] == tid])
+
+  def findByPhase(self, ph):
+    return ParsedTraceEvents([e for e in self.events if e["ph"] == ph])
+
+  def findByName(self, n):
+    return ParsedTraceEvents([e for e in self.events if e["name"] == n])
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_impl/trace_test.py b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/trace_test.py
new file mode 100644
index 0000000..7047e0e
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_impl/trace_test.py
@@ -0,0 +1,48 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import tempfile
+import unittest
+
+#from .log import *
+#from .parsed_trace_events import *
+
+from log import *
+from parsed_trace_events import *
+
+class TraceTest(unittest.TestCase):
+  def __init__(self, *args):
+    """
+    Infrastructure for running tests of the tracing system.
+
+    Does not actually run any tests. Look at subclasses for those.
+    """
+    unittest.TestCase.__init__(self, *args)
+    self._file = None
+
+  def go(self, cb):
+    """
+    Enables tracing, runs the provided callback, and if successful, returns a
+    TraceEvents object with the results.
+    """
+    self._file = tempfile.NamedTemporaryFile()
+    trace_enable(open(self._file.name, 'a+'))
+
+    try:
+      cb()
+    finally:
+      trace_disable()
+    e = ParsedTraceEvents(trace_filename = self._file.name)
+    self._file.close()
+    self._file = None
+    return e
+
+  @property
+  def trace_filename(self):
+    return self._file.name
+
+  def tearDown(self):
+    if trace_is_enabled():
+      trace_disable()
+    if self._file:
+      self._file.close()
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_event_unittest.py b/catapult/common/py_trace_event/py_trace_event/trace_event_unittest.py
new file mode 100644
index 0000000..f88ef95
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_event_unittest.py
@@ -0,0 +1,423 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import contextlib
+import json
+import logging
+import math
+import multiprocessing
+import os
+import tempfile
+import time
+import unittest
+
+from py_trace_event import trace_event
+from py_trace_event import trace_time
+from py_trace_event.trace_event_impl import log
+
+
+class TraceEventTests(unittest.TestCase):
+
+  def setUp(self):
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    self._log_path = tf.name
+    tf.close()
+
+  def tearDown(self):
+    if os.path.exists(self._log_path):
+      os.remove(self._log_path)
+
+  @contextlib.contextmanager
+  def _test_trace(self, disable=True):
+    try:
+      trace_event.trace_enable(self._log_path)
+      yield
+    finally:
+      if disable:
+        trace_event.trace_disable()
+
+  def testNoImpl(self):
+    orig_impl = trace_event.trace_event_impl
+    try:
+      trace_event.trace_event_impl = None
+      self.assertFalse(trace_event.trace_can_enable())
+    finally:
+      trace_event.trace_event_impl = orig_impl
+
+  def testImpl(self):
+    self.assertTrue(trace_event.trace_can_enable())
+
+  def testIsEnabledFalse(self):
+    self.assertFalse(trace_event.trace_is_enabled())
+
+  def testIsEnabledTrue(self):
+    with self._test_trace():
+      self.assertTrue(trace_event.trace_is_enabled())
+
+  def testEnable(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 1)
+        self.assertTrue(trace_event.trace_is_enabled())
+        log_output = log_output.pop()
+        self.assertEquals(log_output['category'], 'process_argv')
+        self.assertEquals(log_output['name'], 'process_argv')
+        self.assertTrue(log_output['args']['argv'])
+        self.assertEquals(log_output['ph'], 'M')
+
+  def testDoubleEnable(self):
+    try:
+      with self._test_trace():
+        with self._test_trace():
+          pass
+    except log.TraceException:
+      return
+    assert False
+
+  def testDisable(self):
+    with self._test_trace(disable=False):
+      with open(self._log_path, 'r') as f:
+        self.assertTrue(trace_event.trace_is_enabled())
+        trace_event.trace_disable()
+        self.assertEquals(len(json.loads(f.read() + ']')), 1)
+        self.assertFalse(trace_event.trace_is_enabled())
+
+  def testDoubleDisable(self):
+    with self._test_trace():
+      pass
+    trace_event.trace_disable()
+
+  def testFlushChanges(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.clock_sync('1')
+        self.assertEquals(len(json.loads(f.read() + ']')), 1)
+        f.seek(0)
+        trace_event.trace_flush()
+        self.assertEquals(len(json.loads(f.read() + ']')), 2)
+
+  def testFlushNoChanges(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        self.assertEquals(len(json.loads(f.read() + ']')),1)
+        f.seek(0)
+        trace_event.trace_flush()
+        self.assertEquals(len(json.loads(f.read() + ']')), 1)
+
+  def testDoubleFlush(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.clock_sync('1')
+        self.assertEquals(len(json.loads(f.read() + ']')), 1)
+        f.seek(0)
+        trace_event.trace_flush()
+        trace_event.trace_flush()
+        self.assertEquals(len(json.loads(f.read() + ']')), 2)
+
+  def testTraceBegin(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.trace_begin('test_event', this='that')
+        trace_event.trace_flush()
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 2)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue( current_entry['args']['argv'])
+        self.assertEquals( current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'test_event')
+        self.assertEquals(current_entry['args']['this'], '\'that\'')
+        self.assertEquals(current_entry['ph'], 'B')
+
+  def testTraceEnd(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.trace_end('test_event')
+        trace_event.trace_flush()
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 2)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue(current_entry['args']['argv'])
+        self.assertEquals(current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'test_event')
+        self.assertEquals(current_entry['args'], {})
+        self.assertEquals(current_entry['ph'], 'E')
+
+  def testTrace(self):
+   with self._test_trace():
+      with trace_event.trace('test_event', this='that'):
+        pass
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 3)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue(current_entry['args']['argv'])
+        self.assertEquals(current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'test_event')
+        self.assertEquals(current_entry['args']['this'], '\'that\'')
+        self.assertEquals(current_entry['ph'], 'B')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'test_event')
+        self.assertEquals(current_entry['args'], {})
+        self.assertEquals(current_entry['ph'], 'E')
+
+  def testTracedDecorator(self):
+    @trace_event.traced("this")
+    def test_decorator(this="that"):
+      pass
+
+    with self._test_trace():
+      test_decorator()
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 3)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue(current_entry['args']['argv'])
+        self.assertEquals(current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], '__main__.test_decorator')
+        self.assertEquals(current_entry['args']['this'], '\'that\'')
+        self.assertEquals(current_entry['ph'], 'B')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], '__main__.test_decorator')
+        self.assertEquals(current_entry['args'], {})
+        self.assertEquals(current_entry['ph'], 'E')
+
+  def testClockSyncWithTs(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.clock_sync('id', issue_ts=trace_time.Now())
+        trace_event.trace_flush()
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 2)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue(current_entry['args']['argv'])
+        self.assertEquals(current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'clock_sync')
+        self.assertTrue(current_entry['args']['issue_ts'])
+        self.assertEquals(current_entry['ph'], 'c')
+
+  def testClockSyncWithoutTs(self):
+    with self._test_trace():
+      with open(self._log_path, 'r') as f:
+        trace_event.clock_sync('id')
+        trace_event.trace_flush()
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 2)
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'process_argv')
+        self.assertEquals(current_entry['name'], 'process_argv')
+        self.assertTrue(current_entry['args']['argv'])
+        self.assertEquals(current_entry['ph'], 'M')
+        current_entry = log_output.pop(0)
+        self.assertEquals(current_entry['category'], 'python')
+        self.assertEquals(current_entry['name'], 'clock_sync')
+        self.assertFalse(current_entry['args'].get('issue_ts'))
+        self.assertEquals(current_entry['ph'], 'c')
+
+  def testTime(self):
+    actual_diff = []
+    def func1():
+      trace_begin("func1")
+      start = time.time()
+      time.sleep(0.25)
+      end = time.time()
+      actual_diff.append(end-start) # Pass via array because of Python scoping
+      trace_end("func1")
+
+    with self._test_trace():
+      start_ts = time.time()
+      trace_event.trace_begin('test')
+      end_ts = time.time()
+      trace_event.trace_end('test')
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 3)
+        meta_data = log_output[0]
+        open_data = log_output[1]
+        close_data = log_output[2]
+        self.assertEquals(meta_data['category'], 'process_argv')
+        self.assertEquals(meta_data['name'], 'process_argv')
+        self.assertTrue(meta_data['args']['argv'])
+        self.assertEquals(meta_data['ph'], 'M')
+        self.assertEquals(open_data['category'], 'python')
+        self.assertEquals(open_data['name'], 'test')
+        self.assertEquals(open_data['ph'], 'B')
+        self.assertEquals(close_data['category'], 'python')
+        self.assertEquals(close_data['name'], 'test')
+        self.assertEquals(close_data['ph'], 'E')
+        event_time_diff = close_data['ts'] - open_data['ts']
+        recorded_time_diff = (end_ts - start_ts) * 1000000
+        self.assertLess(math.fabs(event_time_diff - recorded_time_diff), 1000)
+
+  def testNestedCalls(self):
+    with self._test_trace():
+      trace_event.trace_begin('one')
+      trace_event.trace_begin('two')
+      trace_event.trace_end('two')
+      trace_event.trace_end('one')
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 5)
+        meta_data = log_output[0]
+        one_open = log_output[1]
+        two_open = log_output[2]
+        two_close = log_output[3]
+        one_close = log_output[4]
+        self.assertEquals(meta_data['category'], 'process_argv')
+        self.assertEquals(meta_data['name'], 'process_argv')
+        self.assertTrue(meta_data['args']['argv'])
+        self.assertEquals(meta_data['ph'], 'M')
+
+        self.assertEquals(one_open['category'], 'python')
+        self.assertEquals(one_open['name'], 'one')
+        self.assertEquals(one_open['ph'], 'B')
+        self.assertEquals(one_close['category'], 'python')
+        self.assertEquals(one_close['name'], 'one')
+        self.assertEquals(one_close['ph'], 'E')
+
+        self.assertEquals(two_open['category'], 'python')
+        self.assertEquals(two_open['name'], 'two')
+        self.assertEquals(two_open['ph'], 'B')
+        self.assertEquals(two_close['category'], 'python')
+        self.assertEquals(two_close['name'], 'two')
+        self.assertEquals(two_close['ph'], 'E')
+
+        self.assertLessEqual(one_open['ts'], two_open['ts'])
+        self.assertGreaterEqual(one_close['ts'], two_close['ts'])
+
+  def testInterleavedCalls(self):
+    with self._test_trace():
+      trace_event.trace_begin('one')
+      trace_event.trace_begin('two')
+      trace_event.trace_end('one')
+      trace_event.trace_end('two')
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 5)
+        meta_data = log_output[0]
+        one_open = log_output[1]
+        two_open = log_output[2]
+        two_close = log_output[4]
+        one_close = log_output[3]
+        self.assertEquals(meta_data['category'], 'process_argv')
+        self.assertEquals(meta_data['name'], 'process_argv')
+        self.assertTrue(meta_data['args']['argv'])
+        self.assertEquals(meta_data['ph'], 'M')
+
+        self.assertEquals(one_open['category'], 'python')
+        self.assertEquals(one_open['name'], 'one')
+        self.assertEquals(one_open['ph'], 'B')
+        self.assertEquals(one_close['category'], 'python')
+        self.assertEquals(one_close['name'], 'one')
+        self.assertEquals(one_close['ph'], 'E')
+
+        self.assertEquals(two_open['category'], 'python')
+        self.assertEquals(two_open['name'], 'two')
+        self.assertEquals(two_open['ph'], 'B')
+        self.assertEquals(two_close['category'], 'python')
+        self.assertEquals(two_close['name'], 'two')
+        self.assertEquals(two_close['ph'], 'E')
+
+        self.assertLessEqual(one_open['ts'], two_open['ts'])
+        self.assertLessEqual(one_close['ts'], two_close['ts'])
+
+  def testMultiprocess(self):
+    def child_function():
+      with trace_event.trace('child_event'):
+        pass
+
+    with self._test_trace():
+      trace_event.trace_begin('parent_event')
+      trace_event.trace_flush()
+      p = multiprocessing.Process(target=child_function)
+      p.start()
+      self.assertTrue(hasattr(p, "_shimmed_by_trace_event"))
+      p.join()
+      trace_event.trace_end('parent_event')
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 5)
+        meta_data = log_output[0]
+        parent_open = log_output[1]
+        child_open = log_output[2]
+        child_close = log_output[3]
+        parent_close = log_output[4]
+        self.assertEquals(meta_data['category'], 'process_argv')
+        self.assertEquals(meta_data['name'], 'process_argv')
+        self.assertTrue(meta_data['args']['argv'])
+        self.assertEquals(meta_data['ph'], 'M')
+
+        self.assertEquals(parent_open['category'], 'python')
+        self.assertEquals(parent_open['name'], 'parent_event')
+        self.assertEquals(parent_open['ph'], 'B')
+
+        self.assertEquals(child_open['category'], 'python')
+        self.assertEquals(child_open['name'], 'child_event')
+        self.assertEquals(child_open['ph'], 'B')
+
+        self.assertEquals(child_close['category'], 'python')
+        self.assertEquals(child_close['name'], 'child_event')
+        self.assertEquals(child_close['ph'], 'E')
+
+        self.assertEquals(parent_close['category'], 'python')
+        self.assertEquals(parent_close['name'], 'parent_event')
+        self.assertEquals(parent_close['ph'], 'E')
+
+  def testMultiprocessExceptionInChild(self):
+    def bad_child():
+      trace_event.trace_disable()
+
+    with self._test_trace():
+      p = multiprocessing.Pool(1)
+      trace_event.trace_begin('parent')
+      self.assertRaises(Exception, lambda: p.apply(bad_child, ()))
+      p.close()
+      p.terminate()
+      p.join()
+      trace_event.trace_end('parent')
+      trace_event.trace_flush()
+      with open(self._log_path, 'r') as f:
+        log_output = json.loads(f.read() + ']')
+        self.assertEquals(len(log_output), 3)
+        meta_data = log_output[0]
+        parent_open = log_output[1]
+        parent_close = log_output[2]
+        self.assertEquals(parent_open['category'], 'python')
+        self.assertEquals(parent_open['name'], 'parent')
+        self.assertEquals(parent_open['ph'], 'B')
+        self.assertEquals(parent_close['category'], 'python')
+        self.assertEquals(parent_close['name'], 'parent')
+        self.assertEquals(parent_close['ph'], 'E')
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_time.py b/catapult/common/py_trace_event/py_trace_event/trace_time.py
new file mode 100644
index 0000000..ccba57f
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_time.py
@@ -0,0 +1,215 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import ctypes
+import ctypes.util
+import logging
+import os
+import platform
+import sys
+import time
+import threading
+
+
+GET_TICK_COUNT_LAST_NOW = 0
+# If GET_TICK_COUNTER_LAST_NOW is less than the current time, the clock has
+# rolled over, and this needs to be accounted for.
+GET_TICK_COUNT_WRAPAROUNDS = 0
+# The current detected platform
+DETECTED_PLATFORM = None
+# Mapping of supported platforms and what is returned by sys.platform.
+_PLATFORMS = {
+    'mac': 'darwin',
+    'linux': 'linux',
+    'windows': 'win32',
+    'cygwin': 'cygwin',
+    'freebsd': 'freebsd',
+    'sunos': 'sunos5',
+    'bsd': 'bsd'
+}
+# Mapping of what to pass get_clocktime based on platform.
+_CLOCK_MONOTONIC = {
+    'linux': 1,
+    'freebsd': 4,
+    'bsd': 3,
+    'sunos5': 4
+}
+
+
+def GetMacNowFunction(plat):
+  """ Get a monotonic clock for the Mac platform.
+
+    Args:
+      plat: Platform that is being run on. Unused in GetMacNowFunction. Passed
+        for consistency between initilaizers.
+    Returns:
+      Function pointer to monotonic clock for mac platform.
+  """
+  del plat # Unused
+  global DETECTED_PLATFORM # pylint: disable=global-statement
+  DETECTED_PLATFORM = 'mac'
+  libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+  class MachTimebaseInfoData(ctypes.Structure):
+    """System timebase info. Defined in <mach/mach_time.h>."""
+    _fields_ = (('numer', ctypes.c_uint32),
+                ('denom', ctypes.c_uint32))
+
+  mach_absolute_time = libc.mach_absolute_time
+  mach_absolute_time.restype = ctypes.c_uint64
+
+  timebase = MachTimebaseInfoData()
+  libc.mach_timebase_info(ctypes.byref(timebase))
+  ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+  def GetMacNowFunctionImpl():
+    return mach_absolute_time() / ticks_per_second
+  return GetMacNowFunctionImpl
+
+
+def GetClockGetTimeClockNumber(plat):
+  for key in _CLOCK_MONOTONIC:
+    if plat.startswith(key):
+      return _CLOCK_MONOTONIC[key]
+  raise LookupError('Platform not in clock dicitonary')
+
+def GetLinuxNowFunction(plat):
+  """ Get a monotonic clock for linux platforms.
+
+    Args:
+      plat: Platform that is being run on.
+    Returns:
+      Function pointer to monotonic clock for linux platform.
+  """
+  global DETECTED_PLATFORM # pylint: disable=global-statement
+  DETECTED_PLATFORM = 'linux'
+  clock_monotonic = GetClockGetTimeClockNumber(plat)
+  try:
+    # Attempt to find clock_gettime in the C library.
+    clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
+                                use_errno=True).clock_gettime
+  except AttributeError:
+    # If not able to find int in the C library, look in rt library.
+    clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
+                                use_errno=True).clock_gettime
+
+  class Timespec(ctypes.Structure):
+    """Time specification, as described in clock_gettime(3)."""
+    _fields_ = (('tv_sec', ctypes.c_long),
+                ('tv_nsec', ctypes.c_long))
+
+  def GetLinuxNowFunctionImpl():
+    ts = Timespec()
+    if clock_gettime(clock_monotonic, ctypes.pointer(ts)):
+      errno = ctypes.get_errno()
+      raise OSError(errno, os.strerror(errno))
+    return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+  return GetLinuxNowFunctionImpl
+
+
+def IsQPCUsable():
+  """ Determines if system can query the performance counter.
+    The performance counter is a high resolution timer on windows systems.
+    Some chipsets have unreliable performance counters, so this checks that one
+    of those chipsets is not present.
+
+    Returns:
+      True if QPC is useable, false otherwise.
+  """
+
+  # Sample output: 'Intel64 Family 6 Model 23 Stepping 6, GenuineIntel'
+  info = platform.processor()
+  if 'AuthenticAMD' in info and 'Family 15' in info:
+    return False
+  try: # If anything goes wrong during this, assume QPC isn't available.
+    frequency = ctypes.c_int64()
+    ctypes.windll.Kernel32.QueryPerformanceFrequency(
+        ctypes.byref(frequency))
+    if float(frequency.value) <= 0:
+      return False
+  except Exception: # pylint: disable=broad-except
+    logging.exception('Error when determining if QPC is usable.')
+    return False
+  return True
+
+
+def GetWinNowFunction(plat):
+  """ Get a monotonic clock for windows platforms.
+
+    Args:
+      plat: Platform that is being run on.
+    Returns:
+      Function pointer to monotonic clock for windows platform.
+  """
+  global DETECTED_PLATFORM # pylint: disable=global-statement
+  DETECTED_PLATFORM = 'windows'
+  if IsQPCUsable():
+    qpc_return = ctypes.c_int64()
+    qpc_frequency = ctypes.c_int64()
+    ctypes.windll.Kernel32.QueryPerformanceFrequency(
+        ctypes.byref(qpc_frequency))
+    qpc_frequency = float(qpc_frequency.value)
+    qpc = ctypes.windll.Kernel32.QueryPerformanceCounter
+    def GetWinNowFunctionImpl():
+      qpc(ctypes.byref(qpc_return))
+      return qpc_return.value / qpc_frequency
+
+  else:
+    kernel32 = (ctypes.cdll.kernel32
+                if plat.startswith(_PLATFORMS['cygwin'])
+                else ctypes.windll.kernel32)
+    get_tick_count_64 = getattr(kernel32, 'GetTickCount64', None)
+
+    # Windows Vista or newer
+    if get_tick_count_64:
+      get_tick_count_64.restype = ctypes.c_ulonglong
+      def GetWinNowFunctionImpl():
+        return get_tick_count_64() / 1000.0
+
+    else: # Pre Vista.
+      get_tick_count = kernel32.GetTickCount
+      get_tick_count.restype = ctypes.c_uint32
+      get_tick_count_lock = threading.Lock()
+      def GetWinNowFunctionImpl():
+        global GET_TICK_COUNT_LAST_NOW # pylint: disable=global-statement
+        global GET_TICK_COUNT_WRAPAROUNDS # pylint: disable=global-statement
+        with get_tick_count_lock:
+          current_sample = get_tick_count()
+          if current_sample < GET_TICK_COUNT_LAST_NOW:
+            GET_TICK_COUNT_WRAPAROUNDS += 1
+          GET_TICK_COUNT_LAST_NOW = current_sample
+          final_ms = GET_TICK_COUNT_WRAPAROUNDS << 32
+          final_ms += GET_TICK_COUNT_LAST_NOW
+          return final_ms / 1000.0
+  return GetWinNowFunctionImpl
+
+
+def InitializeNowFunction(plat):
+  """ Get a monotonic clock for the current platform.
+
+    Args:
+      plat: Platform that is being run on.
+    Returns:
+      Function pointer to monotonic clock function for current platform.
+  """
+  if plat.startswith(_PLATFORMS['mac']):
+    return GetMacNowFunction(plat)
+
+  elif (plat.startswith(_PLATFORMS['linux'])
+        or plat.startswith(_PLATFORMS['freebsd'])
+        or plat.startswith(_PLATFORMS['bsd'])
+        or plat.startswith(_PLATFORMS['sunos'])):
+    return GetLinuxNowFunction(plat)
+
+  elif (plat.startswith(_PLATFORMS['windows'])
+        or plat.startswith(_PLATFORMS['cygwin'])):
+    return GetWinNowFunction(plat)
+
+  else:
+    raise RuntimeError('%s is not a supported platform.' % plat)
+
+def Now():
+  return monotonic()
+
+monotonic = InitializeNowFunction(sys.platform)
diff --git a/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py b/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py
new file mode 100644
index 0000000..4e712fa
--- /dev/null
+++ b/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py
@@ -0,0 +1,122 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import logging
+import platform
+import sys
+import unittest
+
+from py_trace_event import trace_time
+
+
+class TimerTest(unittest.TestCase):
+  # Helper methods.
+  @contextlib.contextmanager
+  def ReplacePlatformProcessorCall(self, f):
+    try:
+      old_proc = platform.processor
+      platform.processor = f
+      yield
+    finally:
+      platform.processor = old_proc
+
+  @contextlib.contextmanager
+  def ReplaceQPCCheck(self, f):
+    try:
+      old_qpc = trace_time.IsQPCUsable
+      trace_time.IsQPCUsable = f
+      yield
+    finally:
+      trace_time.IsQPCUsable = old_qpc
+
+  # Platform detection tests.
+  def testInitializeNowFunction_platformNotSupported(self):
+    with self.assertRaises(RuntimeError):
+      trace_time.InitializeNowFunction('invalid_platform')
+
+  def testInitializeNowFunction_windows(self):
+    if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
+            or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
+      return True
+    trace_time.InitializeNowFunction(sys.platform)
+    self.assertEqual(trace_time.DETECTED_PLATFORM, 'windows')
+
+  def testInitializeNowFunction_linux(self):
+    if not sys.platform.startswith(trace_time._PLATFORMS['linux']):
+      return True
+    trace_time.InitializeNowFunction(sys.platform)
+    self.assertEqual(trace_time.DETECTED_PLATFORM, 'linux')
+
+  def testInitializeNowFunction_mac(self):
+    if not sys.platform.startswith(trace_time._PLATFORMS['mac']):
+      return True
+    trace_time.InitializeNowFunction(sys.platform)
+    self.assertEqual(trace_time.DETECTED_PLATFORM, 'mac')
+
+  # Windows Tests
+  def testIsQPCUsable_buggyAthlonProcReturnsFalse(self):
+    if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
+            or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
+      return True
+
+    def BuggyAthlonProc():
+      return 'AMD64 Family 15 Model 23 Stepping 6, AuthenticAMD'
+
+    with self.ReplacePlatformProcessorCall(BuggyAthlonProc):
+      self.assertFalse(trace_time.IsQPCUsable())
+
+  def testIsQPCUsable_returnsTrueOnWindows(self):
+    if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
+            or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
+      return True
+
+    def Proc():
+      return 'Intel64 Family 15 Model 23 Stepping 6, GenuineIntel'
+
+    with self.ReplacePlatformProcessorCall(Proc):
+      self.assertTrue(trace_time.IsQPCUsable())
+
+  def testGetWinNowFunction_QPC(self):
+    if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
+            or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
+      return True
+    # Test requires QPC to be available on platform.
+    if not trace_time.IsQPCUsable():
+      return True
+    self.assertGreater(trace_time.monotonic(), 0)
+
+  # Works even if QPC would work.
+  def testGetWinNowFunction_GetTickCount(self):
+    if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
+            or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
+      return True
+    with self.ReplaceQPCCheck(lambda: False):
+      self.assertGreater(trace_time.monotonic(), 0)
+
+  # Linux tests.
+  def testGetClockGetTimeClockNumber_linux(self):
+    self.assertEquals(trace_time.GetClockGetTimeClockNumber('linux'), 1)
+
+  def testGetClockGetTimeClockNumber_freebsd(self):
+    self.assertEquals(trace_time.GetClockGetTimeClockNumber('freebsd'), 4)
+
+  def testGetClockGetTimeClockNumber_bsd(self):
+    self.assertEquals(trace_time.GetClockGetTimeClockNumber('bsd'), 3)
+
+  def testGetClockGetTimeClockNumber_sunos(self):
+    self.assertEquals(trace_time.GetClockGetTimeClockNumber('sunos5'), 4)
+
+  # Smoke Test.
+  def testMonotonic(self):
+    time_one = trace_time.Now()
+    for _ in xrange(1000):
+      time_two = trace_time.Now()
+      self.assertLessEqual(time_one, time_two)
+      time_one = time_two
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/dashboard/PRESUBMIT.py b/catapult/dashboard/PRESUBMIT.py
new file mode 100644
index 0000000..0edc9f0
--- /dev/null
+++ b/catapult/dashboard/PRESUBMIT.py
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+  ]
diff --git a/catapult/dashboard/README.md b/catapult/dashboard/README.md
index 93212f5..8c012e7 100644
--- a/catapult/dashboard/README.md
+++ b/catapult/dashboard/README.md
@@ -1,47 +1,20 @@
 # Performance Dashboard
 
-This is a App Engine web app for displaying and monitoring performance
-test results.
+The Chrome Performance Dashboard is a App Engine web app for displaying
+and monitoring performance test results.
 
-See
-[online documentation](http://www.chromium.org/developers/speed-infra/performance-dashboard).
+ - [Getting set up to contribute](/dashboard/docs/getting-set-up.md)
+ - [Dashboard admin tasks](/dashboard/docs/admin-tasks.md)
+ - [Debugging in production](/dashboard/docs/cloud-debugger.md)
+ - [Example code snippets](/dashboard/docs/code-snippets.md)
+ - [Data format for new graph data](/dashboard/docs/data-format.md)
+ - [Checklist for deployment](/dashboard/docs/deploy-checklist.md)
+ - [Project glossary](/dashboard/docs/glossary.md)
+ - [Pages and endpoints](/dashboard/docs/pages-and-endpoints.md)
 
-## Prerequisites
+## Contact
 
-Running the tests, running the local server and
-deploying all depends on having the [the App Engine
-SDK](https://cloud.google.com/appengine/downloads).
-
-After downloading and unpacking, you should add the path to the SDK to
-both `PATH` and `PYTHONPATH`.
-
-## Running the tests
-
-To run all of the unit tests, you can run `bin/run_tests`. You can also
-pass the import path to the test module to run a particular test, for
-example `bin/run_tests dashboard.utils_test`.
-
-## To run locally
-
-Run `bin/dev_server`; this sets up a temporary directory, adds links to
-required libraries, and calls `dev_appserver.py` on that directory.  By
-default, this starts a server on [localhost:8080](http://localhost:8080/).
-
-To load sample graph or alert data from production, navigate to
-[/load_from_prod](http://localhost:8080/load_from_prod).
-
-## Deploying to production
-
-To deploy, you can run `bin/deploy`, which prepares the
-code to be deployed and runs `appcfg.py`. Note that this
-doesn't set the new version as the default version; to do
-this, you can use the versions page on the [Google Developers
-Console](https://console.developers.google.com/) if you have edit or
-owner permissions for the App Engine project; otherwise if you want to
-request to set a new default version for chromeperf.appspot.com you can
-contact chrome-perf-dashboard-team@google.com.
-
-WARNING: Some changes to production may not be easily reversible; for
-example `appcfg.py ... vacuum_indexes` will remove datastore indexes that
-are not in your local index.yaml file, which may take more than 24 hours,
-and will disable any queries that depend on those indexes.
+Bugs can be reported on the
+[github issue tracker](https://github.com/catapult-project/catapult/issues);
+for questions and feedback,
+send an email to chrome-perf-dashboard-team@google.com.
diff --git a/catapult/dashboard/app.yaml b/catapult/dashboard/app.yaml
index 17df653..223e58c 100644
--- a/catapult/dashboard/app.yaml
+++ b/catapult/dashboard/app.yaml
@@ -47,6 +47,7 @@
 - url: /dashboard/static/
   static_dir: dashboard/static/
   secure: always
+  application_readable: true
 
 - url: /dashboard/elements/(.*\.html)$
   static_files: dashboard/elements/\1
@@ -69,125 +70,130 @@
   secure: always
 
 - url: /add_point_queue
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /associate_alerts
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: required
   secure: always
 
 - url: /auto_bisect
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /auto_triage
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
-- url: /bisect
-  script: dashboard.dispatcher.app
-  login: required
-  secure: always
-
 - url: /bot_whitelist
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /change_internal_only
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /delete_expired_entities
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /edit_anomaly_configs
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /edit_bug_labels
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /edit_sheriffs
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /edit_anomalies
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: required
   secure: always
 
+- url: /edit_site_config
+  script: dashboard.dispatcher.APP
+  login: admin
+  secure: always
+
 - url: /email_summary
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /file_bug
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: required
   secure: always
 
-- url: /ip_whitelist
-  script: dashboard.dispatcher.app
-  login: admin
-  secure: always
-
 - url: /load_from_prod
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /migrate_test_names
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /mr_deprecate_tests
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
+- url: /new_points
+  script: dashboard.dispatcher.APP
+  login: required
+  secure: always
+
 - url: /put_entities_task
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /send_stoppage_alert_emails
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
+- url: /start_try_job
+  script: dashboard.dispatcher.APP
+  login: required
+  secure: always
+
 - url: /stats_around_revision
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /update_bug_with_results
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /update_test_metadata
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /update_test_suites
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   login: admin
   secure: always
 
 - url: /.*
-  script: dashboard.dispatcher.app
+  script: dashboard.dispatcher.APP
   secure: always
diff --git a/catapult/dashboard/appengine_config.py b/catapult/dashboard/appengine_config.py
index 227f9b9..c76de7a 100644
--- a/catapult/dashboard/appengine_config.py
+++ b/catapult/dashboard/appengine_config.py
@@ -9,20 +9,22 @@
   https://cloud.google.com/appengine/docs/python/tools/appengineconfig
 """
 
-import logging
 import os
-import sys
 
 from google.appengine.ext import vendor
 
 import dashboard
 
+# The names used below are special constant names which other code depends on.
+# pylint: disable=invalid-name
+
 appstats_SHELL_OK = True
 
 # Allows remote_api from the peng team to support the crosbolt dashboard.
 remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = (
     'LOAS_PEER_USERNAME', ['chromeos-peng-performance'])
 
+# pylint: enable=invalid-name
 
 def _AddThirdPartyLibraries():
   """Registers the third party libraries with App Engine.
diff --git a/catapult/dashboard/bin/deploy b/catapult/dashboard/bin/deploy
index a495c83..82c1eb8 100755
--- a/catapult/dashboard/bin/deploy
+++ b/catapult/dashboard/bin/deploy
@@ -15,7 +15,7 @@
 
 def Main():
   catapult_path = os.path.abspath(os.path.join(
-      os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+      os.path.dirname(__file__), '..', '..'))
   parser = argparse.ArgumentParser()
   parser.add_argument('--appid', default='chromeperf')
   args = parser.parse_args()
diff --git a/catapult/dashboard/bin/dev_server b/catapult/dashboard/bin/dev_server
index 5275540..11994cc 100755
--- a/catapult/dashboard/bin/dev_server
+++ b/catapult/dashboard/bin/dev_server
@@ -14,7 +14,7 @@
 
 def Main():
   catapult_path = os.path.abspath(os.path.join(
-      os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+      os.path.dirname(__file__), '..', '..'))
 
   _AddToPathIfNeeded(os.path.join(catapult_path, 'dashboard'))
   import dashboard
diff --git a/catapult/dashboard/bin/run_py_tests b/catapult/dashboard/bin/run_py_tests
index 721b484..b516d50 100755
--- a/catapult/dashboard/bin/run_py_tests
+++ b/catapult/dashboard/bin/run_py_tests
@@ -8,27 +8,9 @@
 import os
 import sys
 
-_CATAPULT = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
-
-
-def _ExtraPaths():
-  """Returns a list of paths to add to sys.path when running dashboard tests."""
-  try:
-    import dev_appserver
-  except ImportError:
-    # TODO(qyearsley): Put the App Engine SDK in the path with the
-    # binary dependency manager.
-    print 'This script requires the App Engine SDK to be in PYTHONPATH.'
-    sys.exit(1)
-  extra_paths = dev_appserver.EXTRA_PATHS
-  dashboard_path = os.path.join(_CATAPULT, 'dashboard')
-  extra_paths.append(dashboard_path)
-  _AddToPathIfNeeded(dashboard_path)
-  import dashboard
-  for library in dashboard.THIRD_PARTY_LIBRARIES:
-    extra_paths.append(os.path.join(_CATAPULT, 'third_party', library))
-  return extra_paths
+_CATAPULT_PATH = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..'))
+_DASHBOARD_PATH = os.path.join(_CATAPULT_PATH, 'dashboard')
 
 
 def _AddToPathIfNeeded(path):
@@ -37,7 +19,8 @@
 
 
 if __name__ == '__main__':
-  _AddToPathIfNeeded(_CATAPULT)
+  _AddToPathIfNeeded(_CATAPULT_PATH)
+  _AddToPathIfNeeded(_DASHBOARD_PATH)
 
   from hooks import install
   if '--no-install-hooks' in sys.argv:
@@ -46,6 +29,8 @@
     install.InstallHooks()
 
   from catapult_build import run_with_typ
-  sys.exit(run_with_typ.Run(
-      os.path.join(_CATAPULT, 'dashboard', 'dashboard'),
-      path=_ExtraPaths()))
+  import dashboard
+  return_code = run_with_typ.Run(
+      os.path.join(_DASHBOARD_PATH, 'dashboard'),
+      path=dashboard.ExtraPythonLibraryPaths())
+  sys.exit(return_code)
diff --git a/catapult/dashboard/cron.yaml b/catapult/dashboard/cron.yaml
index 897c209..465cdfa 100644
--- a/catapult/dashboard/cron.yaml
+++ b/catapult/dashboard/cron.yaml
@@ -32,30 +32,43 @@
   url: /delete_expired_entities
   schedule: every 24 hours
 
-- description: Update the test suite data used on the /report page.
+- description: Update the internal-only test suite data used on the /report page.
+  url: /update_test_suites?internal_only=true
+  schedule: every 20 minutes
+
+- description: Update the externally-visible test suite data used on the /report page.
   url: /update_test_suites
   schedule: every 20 minutes
 
+- description: Triggers bisect FYI jobs.
+  url: /bisect_fyi
+  schedule: every 24 hours
+
+# The backup cron job is disabled temporarily in order to check whether
+# it is responsible for increased cost on the weekends.
+# See: https://github.com/catapult-project/catapult/issues/1944
+#
 # Scheduled backup.
 # If you add new datastore kinds and want them to be backed up,
 # you must add kind=argument to the URL below. Backups are available at:
-# https://appengine.google.com/datastore/admin?&app_id=s~chromeperf
+# https://console.developers.google.com/datastore/settings?project=chromeperf
 # See: https://cloud.google.com/appengine/articles/scheduled_backups
-- description: Back up all entities in the datastore.
-  url: "/_ah/datastore_admin/backup.create?name=ScheduledBackup\
-&kind=Master\
-&kind=Bot\
-&kind=Test\
-&kind=Row\
-&kind=Sheriff\
-&kind=AnomalyConfig\
-&kind=Anomaly\
-&kind=StoppageAlert\
-&kind=IpWhitelist\
-&kind=BotWhitelist\
-&kind=BugLabelPatterns\
-&kind=MultipartEntity\
-&kind=PartEntity"
-  schedule: every saturday 05:00
-  target: ah-builtin-python-bundle
-
+#- description: Back up all entities in the datastore.
+#  url: "/_ah/datastore_admin/backup.create?name=ScheduledBackup\
+#&kind=Master\
+#&kind=Bot\
+#&kind=Test\
+#&kind=Row\
+#&kind=Sheriff\
+#&kind=AnomalyConfig\
+#&kind=Anomaly\
+#&kind=StoppageAlert\
+#&kind=IpWhitelist\
+#&kind=BotWhitelist\
+#&kind=BugLabelPatterns\
+#&kind=MultipartEntity\
+#&kind=PartEntity\
+#&filesystem=gs\
+#&gs_bucket_name=chromeperf.appspot.com"
+#  schedule: every saturday 05:00
+#  target: ah-builtin-python-bundle
diff --git a/catapult/dashboard/dashboard/__init__.py b/catapult/dashboard/dashboard/__init__.py
index 1071abd..40b1582 100644
--- a/catapult/dashboard/dashboard/__init__.py
+++ b/catapult/dashboard/dashboard/__init__.py
@@ -5,6 +5,8 @@
 import os
 import sys
 
+_CATAPULT_PATH = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..'))
 
 # Directories in catapult/third_party required by dashboard.
 THIRD_PARTY_LIBRARIES = [
@@ -42,33 +44,48 @@
 def PathsForDeployment():
   """Returns a list of paths to things required for deployment.
 
+  This includes both Python libraries that are required, and also
+  other files, such as config files.
+
   This list is used when building a temporary deployment directory;
   each of the items in this list will have a corresponding file or
   directory with the same basename in the deployment directory.
   """
   paths = []
-
-  catapult_path = os.path.abspath(os.path.join(
-      os.path.dirname(__file__), os.path.pardir, os.path.pardir))
-  dashboard_dir = os.path.join(catapult_path, 'dashboard')
+  paths.extend(_CatapultThirdPartyLibraryPaths())
+  for p in _AllSdkThirdPartyLibraryPaths():
+    if os.path.basename(p) in THIRD_PARTY_LIBRARIES_IN_SDK:
+      paths.append(p)
   for name in DASHBOARD_FILES:
-    paths.append(os.path.join(dashboard_dir, name))
+    paths.append(os.path.join(_CATAPULT_PATH, 'dashboard', name))
+  return paths
 
+
+def ExtraPythonLibraryPaths():
+  """Returns a list of Python library paths required for dashboard tests."""
+  paths = []
+  paths.append(os.path.join(_CATAPULT_PATH, 'dashboard'))
+  paths.extend(_AllSdkThirdPartyLibraryPaths())
+  paths.extend(_CatapultThirdPartyLibraryPaths())
+  return paths
+
+
+def _AllSdkThirdPartyLibraryPaths():
+  """Returns a list of all third party library paths from the SDK."""
   try:
     import dev_appserver
   except ImportError:
-    # The App Engine SDK is assumed to be in PYTHONPATH when setting
-    # up the deployment directory, but isn't available in production.
-    # (But this function shouldn't be called in production anyway.)
-    sys.stderr.write('Error importing dev_appserver; please install app engine'
-                     ' SDK. See https://cloud.google.com/appengine/downloads\n')
+    # TODO(qyearsley): Put the App Engine SDK in the path with the
+    # binary dependency manager.
+    # https://github.com/catapult-project/catapult/issues/2135
+    print 'This script requires the App Engine SDK to be in PYTHONPATH.'
     sys.exit(1)
-  for path in dev_appserver.EXTRA_PATHS:
-    if os.path.basename(path) in THIRD_PARTY_LIBRARIES_IN_SDK:
-      paths.append(path)
+  return dev_appserver.EXTRA_PATHS
 
-  third_party_dir = os.path.join(catapult_path, 'third_party')
-  for library_dir in THIRD_PARTY_LIBRARIES:
-    paths.append(os.path.join(third_party_dir, library_dir))
 
+def _CatapultThirdPartyLibraryPaths():
+  """Returns a list of required third-party libraries in catapult."""
+  paths = []
+  for library in THIRD_PARTY_LIBRARIES:
+    paths.append(os.path.join(_CATAPULT_PATH, 'third_party', library))
   return paths
diff --git a/catapult/dashboard/dashboard/add_point.py b/catapult/dashboard/dashboard/add_point.py
index 9d15441..7e1d620 100644
--- a/catapult/dashboard/dashboard/add_point.py
+++ b/catapult/dashboard/dashboard/add_point.py
@@ -17,14 +17,13 @@
 from dashboard import datastore_hooks
 from dashboard import math_utils
 from dashboard import post_data_handler
-from dashboard.models import anomaly
 from dashboard.models import graph_data
 
 _TASK_QUEUE_NAME = 'add-point-queue'
 
 # Number of rows to process per task queue task. This limits the task size
 # and execution time (Limits: 100KB object size and 10 minutes execution time).
-_TASK_QUEUE_SIZE = 64
+_TASK_QUEUE_SIZE = 32
 
 # Max length for a Row property name.
 _MAX_COLUMN_NAME_LENGTH = 25
@@ -38,7 +37,7 @@
 # Maximum length for a test path. This limit is required because the test path
 # used as the string ID for TestContainer (the parent in the datastore for Row
 # entities), and datastore imposes a maximum string ID length.
-_MAX_TESTPATH_LENGTH = 500
+_MAX_TEST_PATH_LENGTH = 500
 
 
 class BadRequestError(Exception):
@@ -122,12 +121,12 @@
     """
     datastore_hooks.SetPrivilegedRequest()
     if not self._CheckIpAgainstWhitelist():
-      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+      # TODO(qyearsley): Add test coverage. See catapult:#1346.
       return
 
     data = self.request.get('data')
     if not data:
-      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+      # TODO(qyearsley): Add test coverage. See catapult:#1346.
       self.ReportError('Missing "data" parameter.', status=400)
       return
 
@@ -142,7 +141,9 @@
     try:
       if type(data) is dict:
         if data.get('chart_data'):
-          data = self._DashboardJsonToRawRows(data)
+          data = _DashboardJsonToRawRows(data)
+          if not data:
+            return  # No data to add, bail out.
         else:
           self.ReportError(
               'Data should be a list of rows or a Dashboard JSON v1.0 dict.',
@@ -151,98 +152,124 @@
       test_map = _ConstructTestPathMap(data)
       for row_dict in data:
         _ValidateRowDict(row_dict, test_map)
-      _AddTasksAsync(data)
+      _AddTasks(data)
     except BadRequestError as error:
       # If any of the data was invalid, abort immediately and return an error.
       self.ReportError(error.message, status=400)
 
-  def _DashboardJsonToRawRows(self, dash_json_dict):
-    """Formats a Dashboard JSON dict as a list of row dicts.
 
-    For the dashboard to begin accepting the Telemetry Dashboard JSON format
-    as per go/telemetry-json, this function chunks a Dashboard JSON literal
-    into rows and passes the resulting list to _AddTasksAsync.
+def _DashboardJsonToRawRows(dash_json_dict):
+  """Formats a Dashboard JSON dict as a list of row dicts.
 
-    Args:
-      dash_json_dict: A dashboard JSON v1.0 dict.
-
-    Returns:
-      A list of dicts, each of which represents a point.
-
-    Raises:
-      AssertionError: The given argument wasn't a dict.
-      BadRequestError: The content of the input wasn't valid.
-    """
-    assert type(dash_json_dict) is dict
-    # A Dashboard JSON dict should at least have all charts coming from the
-    # same master, bot and rev. It can contain multiple charts, however.
-    if not dash_json_dict.get('master'):
-      raise BadRequestError('No master name given.')
-    if not dash_json_dict.get('bot'):
-      raise BadRequestError('No bot name given.')
-    if not dash_json_dict.get('point_id'):
-      raise BadRequestError('No point_id number given.')
-    if not dash_json_dict.get('chart_data'):
-      self.ReportError('No chart data given.', status=400)
-      return None
-
-    charts = dash_json_dict['chart_data']['charts']
-    # Links to about:tracing traces are listed under 'trace'; if they
-    # exist copy them to a separate dictionary and delete from the chartjson
-    # so that we don't try to process them as data points.
-    tracing_links = None
-    if 'trace' in charts:
-      tracing_links = charts['trace'].copy()
-      del charts['trace']
-    row_template = _MakeRowTemplate(dash_json_dict)
-
-    benchmark_name = dash_json_dict['chart_data']['benchmark_name']
-    benchmark_description = dash_json_dict['chart_data'].get(
-        'benchmark_description', '')
-    trace_rerun_options = dash_json_dict['chart_data'].get(
-        'trace_rerun_options', [])
-    trace_rerun_options = dict((k, v) for (k, v) in trace_rerun_options)
-    is_ref = bool(dash_json_dict.get('is_ref'))
-    rows = []
-
-    for chart in charts:
-      for trace in charts[chart]:
-        # Need to do a deep copy here so we don't copy a_tracing_uri data.
-        row = copy.deepcopy(row_template)
-        specific_vals = _FlattenTrace(
-            benchmark_name, chart, trace, charts[chart][trace], is_ref,
-            tracing_links, benchmark_description)
-        # Telemetry may validly produce rows that represent a value of NaN. To
-        # avoid getting into messy situations with alerts, we do not add such
-        # rows to be processed.
-        if not (math.isnan(specific_vals['value']) or
-                math.isnan(specific_vals['error'])):
-          if specific_vals['tracing_uri']:
-            row['supplemental_columns']['a_tracing_uri'] = specific_vals[
-                'tracing_uri']
-          if trace_rerun_options:
-            row['supplemental_columns']['a_trace_rerun_options'] = (
-                trace_rerun_options)
-          row.update(specific_vals)
-          rows.append(row)
-
-    return rows
-
-
-def _AddTasksAsync(data):
-  """Puts tasks on queue for adding row and analyzing for anomalies.
+  For the dashboard to begin accepting the Telemetry Dashboard JSON format
+  as per go/telemetry-json, this function chunks a Dashboard JSON literal
+  into rows and passes the resulting list to _AddTasks.
 
   Args:
-    data: A list of dictionary each of which represents one point.
+    dash_json_dict: A dashboard JSON v1.0 dict.
+
+  Returns:
+    A list of dicts, each of which represents a point.
+
+  Raises:
+    AssertionError: The given argument wasn't a dict.
+    BadRequestError: The content of the input wasn't valid.
   """
-  queue = taskqueue.Queue(_TASK_QUEUE_NAME)
+  assert type(dash_json_dict) is dict
+  # A Dashboard JSON dict should at least have all charts coming from the
+  # same master, bot and rev. It can contain multiple charts, however.
+  if not dash_json_dict.get('master'):
+    raise BadRequestError('No master name given.')
+  if not dash_json_dict.get('bot'):
+    raise BadRequestError('No bot name given.')
+  if not dash_json_dict.get('point_id'):
+    raise BadRequestError('No point_id number given.')
+  if not dash_json_dict.get('chart_data'):
+    raise BadRequestError('No chart data given.')
+  test_suite_name = _TestSuiteName(dash_json_dict)
+
+  chart_data = dash_json_dict.get('chart_data', {})
+  charts = chart_data.get('charts', {})
+  if not charts:
+    return []  # No charts implies no data to add.
+
+  # Links to about:tracing traces are listed under 'trace'; if they
+  # exist copy them to a separate dictionary and delete from the chartjson
+  # so that we don't try to process them as data points.
+  tracing_links = None
+  if 'trace' in charts:
+    tracing_links = charts['trace'].copy()
+    del charts['trace']
+  row_template = _MakeRowTemplate(dash_json_dict)
+
+  benchmark_description = chart_data.get('benchmark_description', '')
+  trace_rerun_options = dict(chart_data.get('trace_rerun_options', []))
+  is_ref = bool(dash_json_dict.get('is_ref'))
+  rows = []
+
+  for chart in charts:
+    for trace in charts[chart]:
+      # Need to do a deep copy here so we don't copy a_tracing_uri data.
+      row = copy.deepcopy(row_template)
+      specific_vals = _FlattenTrace(
+          test_suite_name, chart, trace, charts[chart][trace], is_ref,
+          tracing_links, benchmark_description)
+      # Telemetry may validly produce rows that represent a value of NaN. To
+      # avoid getting into messy situations with alerts, we do not add such
+      # rows to be processed.
+      if not (math.isnan(specific_vals['value']) or
+              math.isnan(specific_vals['error'])):
+        if specific_vals['tracing_uri']:
+          row['supplemental_columns']['a_tracing_uri'] = specific_vals[
+              'tracing_uri']
+        if trace_rerun_options:
+          row['supplemental_columns']['a_trace_rerun_options'] = (
+              trace_rerun_options)
+        row.update(specific_vals)
+        rows.append(row)
+
+  return rows
+
+
+def _TestSuiteName(dash_json_dict):
+  """Extracts a test suite name from Dashboard JSON.
+
+  The dashboard JSON may contain a field "test_suite_name". If this is not
+  present or it is None, the dashboard will fall back to using "benchmark_name"
+  in the "chart_data" dict.
+  """
+  if dash_json_dict.get('test_suite_name'):
+    return dash_json_dict['test_suite_name']
+  try:
+    return dash_json_dict['chart_data']['benchmark_name']
+  except KeyError as e:
+    raise BadRequestError('Could not find test suite name. ' + e.message)
+
+
+def _AddTasks(data):
+  """Puts tasks on queue for adding data.
+
+  Args:
+    data: A list of dictionaries, each of which represents one point.
+  """
   task_list = []
-  for i in range(0, len(data), _TASK_QUEUE_SIZE):
-    data_chunk = data[i:i + _TASK_QUEUE_SIZE]
-    task = taskqueue.Task(url='/add_point_queue',
-                          params={'data': json.dumps(data_chunk)})
-    task_list.append(task)
-  queue.add_async(task_list).get_result()
+  for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
+    task_list.append(taskqueue.Task(
+        url='/add_point_queue',
+        params={'data': json.dumps(data_sublist)}))
+  queue = taskqueue.Queue(_TASK_QUEUE_NAME)
+  for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD):
+    # Calling get_result waits for all tasks to be added. It's possible that
+    # this is different, and maybe faster, than just calling queue.add.
+    queue.add_async(task_sublist).get_result()
+
+
+def _Chunk(items, chunk_size):
+  """Breaks a long list into sub-lists of a particular size."""
+  chunks = []
+  for i in range(0, len(items), chunk_size):
+    chunks.append(items[i:i + chunk_size])
+  return chunks
 
 
 def _MakeRowTemplate(dash_json_dict):
@@ -321,37 +348,7 @@
     tir_label, chart_name = chart_name.split('@@')
     chart_name = chart_name + '/' + tir_label
 
-  trace_type = trace.get('type')
-  if trace_type == 'scalar':
-    value = trace.get('value')
-    if value is None:
-      if trace.get('none_value_reason'):
-        value = float('nan')
-      else:
-        # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
-        raise BadRequestError('Expected scalar value, got: ' + value)
-    error = 0
-  elif trace_type == 'list_of_scalar_values':
-    values = trace.get('values')
-    if not values or None in values:
-      if trace.get('none_value_reason'):
-        value = float('nan')
-        error = float('nan')
-      else:
-        raise BadRequestError('Expected list of scalar values, got: ' + values)
-    else:
-      value = math_utils.Mean(values)
-      std = trace.get('std')
-      if std is not None:
-        error = std
-      else:
-        error = math_utils.StandardDeviation(values)
-  elif trace_type == 'histogram':
-    value, error = _GeomMeanAndStdDevFromHistogram(trace)
-  elif trace_type is not None:
-    raise BadRequestError('Invalid value type in chart object: ' + trace_type)
-  else:
-    raise BadRequestError('No trace type provided.')
+  value, error = _ExtractValueAndError(trace)
 
   # If there is a link to an about:tracing trace in cloud storage for this
   # test trace_name, cache it.
@@ -362,7 +359,6 @@
     tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/')
 
   trace_name = _EscapeName(trace_name)
-
   if trace_name == 'summary':
     subtest_name = chart_name
   else:
@@ -393,6 +389,58 @@
   return row_dict
 
 
+def _ExtractValueAndError(trace):
+  """Returns the value and measure of error from a chartjson trace dict.
+
+  Args:
+    trace: A dict that has one "result" from a performance test, e.g. one
+        "value" in a Telemetry test, with the keys "trace_type", "value", etc.
+
+  Returns:
+    A pair (value, error) where |value| is a float and |error| is some measure
+    of variance used to show error bars; |error| could be None.
+
+  Raises:
+    BadRequestError: Data format was invalid.
+  """
+  trace_type = trace.get('type')
+
+  if trace_type == 'scalar':
+    value = trace.get('value')
+    if value is None and trace.get('none_value_reason'):
+      return float('nan'), 0
+    try:
+      return float(value), 0
+    except:
+      raise BadRequestError('Expected scalar value, got: %r' % value)
+
+  if trace_type == 'list_of_scalar_values':
+    values = trace.get('values')
+    if not isinstance(values, list) and values is not None:
+      # Something else (such as a single scalar, or string) was given.
+      raise BadRequestError('Expected list of scalar values, got: %r' % values)
+    if not values or None in values:
+      # None was included or values is None; this is not an error if there
+      # is a reason.
+      if trace.get('none_value_reason'):
+        return float('nan'), float('nan')
+      raise BadRequestError('Expected list of scalar values, got: %r' % values)
+    if not all(isinstance(v, float) or isinstance(v, int) for v in values):
+      raise BadRequestError('Non-number found in values list: %r' % values)
+    value = math_utils.Mean(values)
+    std = trace.get('std')
+    if std is not None:
+      error = std
+    else:
+      error = math_utils.StandardDeviation(values)
+    return value, error
+
+  if trace_type == 'histogram':
+    return _GeomMeanAndStdDevFromHistogram(trace)
+
+  raise BadRequestError('Invalid value type in chart object: %r' % trace_type)
+
+
 def _EscapeName(name):
   """Escapes a trace name so it can be stored in a row.
 
@@ -425,7 +473,7 @@
   # build/scripts/common/chromium_utils.py and was used initially for
   # processing histogram results on the buildbot side previously.
   if 'buckets' not in histogram:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     return 0.0, 0.0
   count = 0
   sum_of_logs = 0
@@ -433,7 +481,7 @@
     if 'high' in bucket:
       bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
     else:
-      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+      # TODO(qyearsley): Add test coverage. See catapult:#1346.
       bucket['mean'] = bucket['low']
     if bucket['mean'] > 0:
       sum_of_logs += math.log(bucket['mean']) * bucket['count']
@@ -481,14 +529,14 @@
     if not ('master' in row and 'bot' in row and 'test' in row):
       continue
     path = '%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))
-    if len(path) > _MAX_TESTPATH_LENGTH:
+    if len(path) > _MAX_TEST_PATH_LENGTH:
       continue
     last_added_revision_keys.append(ndb.Key('LastAddedRevision', path))
 
   try:
     last_added_revision_entities = ndb.get_multi(last_added_revision_keys)
   except datastore_errors.BadRequestError:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     logging.warn('Datastore BadRequestError when getting %s',
                  repr(last_added_revision_keys))
     return {}
@@ -538,7 +586,7 @@
   """Checks whether all the parts of the test path are valid."""
   # A test with a test path length over the max key length shouldn't be
   # created, since the test path is used in TestContainer keys.
-  if len(test_path) > _MAX_TESTPATH_LENGTH:
+  if len(test_path) > _MAX_TEST_PATH_LENGTH:
     raise BadRequestError('Test path too long: %s' % test_path)
 
   # Stars are reserved for test path patterns, so they can't be used in names.
@@ -610,10 +658,10 @@
     True if acceptable, False otherwise.
   """
   if last_row_id is None:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     return True
   if row_id <= 0:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     return False
   # Too big of a decrease.
   if row_id < 0.5 * last_row_id:
diff --git a/catapult/dashboard/dashboard/add_point_queue.py b/catapult/dashboard/dashboard/add_point_queue.py
index 2c8f283..8cf9d2f 100644
--- a/catapult/dashboard/dashboard/add_point_queue.py
+++ b/catapult/dashboard/dashboard/add_point_queue.py
@@ -11,16 +11,18 @@
 from google.appengine.ext import ndb
 
 from dashboard import add_point
-from dashboard import bot_whitelist
 from dashboard import datastore_hooks
 from dashboard import find_anomalies
 from dashboard import graph_revisions
 from dashboard import request_handler
+from dashboard import stored_object
 from dashboard import units_to_direction
 from dashboard import utils
 from dashboard.models import anomaly
 from dashboard.models import graph_data
 
+BOT_WHITELIST_KEY = 'bot_whitelist'
+
 
 class AddPointQueueHandler(request_handler.RequestHandler):
   """Request handler to process points and add them to the datastore.
@@ -46,14 +48,14 @@
     data = json.loads(self.request.get('data'))
     _PrewarmGets(data)
 
-    whitelist = ndb.Key('BotWhitelist', bot_whitelist.WHITELIST_KEY).get()
+    bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)
 
     all_put_futures = []
     added_rows = []
     monitored_test_keys = []
     for row_dict in data:
       try:
-        new_row, parent_test, put_futures = _AddRow(row_dict, whitelist)
+        new_row, parent_test, put_futures = _AddRow(row_dict, bot_whitelist)
         added_rows.append(new_row)
         is_monitored = parent_test.sheriff and parent_test.has_rows
         if is_monitored:
@@ -110,8 +112,8 @@
   ndb.get_multi_async(list(master_keys) + list(bot_keys) + list(test_keys))
 
 
-def _AddRow(row_dict, whitelist):
-  """Add a Row entity to the datastore.
+def _AddRow(row_dict, bot_whitelist):
+  """Adds a Row entity to the datastore.
 
   There are three main things that are needed in order to make a new entity;
   the ID, the parent key, and all of the properties. Making these three
@@ -120,8 +122,7 @@
 
   Args:
     row_dict: A dictionary obtained from the JSON that was received.
-    whitelist: A BotWhitelist entity, which determines what new tests
-        are marked as internal-only.
+    bot_whitelist: A list of whitelisted bots names.
 
   Returns:
     A triple: The new row, the parent test, and a list of entity put futures.
@@ -130,7 +131,7 @@
     add_point.BadRequestError: The input dict was invalid.
     RuntimeError: The required parent entities couldn't be created.
   """
-  parent_test = _GetParentTest(row_dict, whitelist)
+  parent_test = _GetParentTest(row_dict, bot_whitelist)
   test_container_key = utils.GetTestContainerKey(parent_test.key)
 
   columns = add_point.GetAndValidateRowProperties(row_dict)
@@ -158,13 +159,12 @@
   return new_row, parent_test, entity_put_futures
 
 
-def _GetParentTest(row_dict, whitelist):
+def _GetParentTest(row_dict, bot_whitelist):
   """Gets the parent test for a Row based on an input dictionary.
 
   Args:
     row_dict: A dictionary from the data parameter.
-    whitelist: A BotWhitelist entity, which determines what new tests
-        are marked as internal-only.
+    bot_whitelist: A list of whitelisted bot names.
 
   Returns:
     A Test entity.
@@ -178,7 +178,7 @@
   units = row_dict.get('units')
   higher_is_better = row_dict.get('higher_is_better')
   improvement_direction = _ImprovementDirection(higher_is_better)
-  internal_only = _BotInternalOnly(bot_name, whitelist)
+  internal_only = _BotInternalOnly(bot_name, bot_whitelist)
   benchmark_description = row_dict.get('benchmark_description')
 
   parent_test = _GetOrCreateAncestors(
@@ -197,15 +197,18 @@
   return anomaly.UP if higher_is_better else anomaly.DOWN
 
 
-def _BotInternalOnly(bot_name, whitelist=None):
-  """Check whether the bot with a given name is internal-only."""
-  if not whitelist:
+def _BotInternalOnly(bot_name, bot_whitelist):
+  """Checks whether a given bot name is internal-only.
+
+  If a bot name is internal only, then new data for that bot should be marked
+  as internal-only.
+  """
+  if not bot_whitelist:
     logging.warning(
         'No bot whitelist available. All data will be internal-only. If this '
-        'is not intended, go to /bot_whitelist to add a list of externally '
-        'visible bots.')
+        'is not intended, please add a bot whitelist using /edit_site_config.')
     return True
-  return bot_name not in whitelist.bots
+  return bot_name not in bot_whitelist
 
 
 def _GetOrCreateAncestors(
@@ -306,8 +309,11 @@
 
   if existing.stoppage_alert:
     alert = existing.stoppage_alert.get()
-    alert.recovered = True
-    alert.put()
+    if alert:
+      alert.recovered = True
+      alert.put()
+    else:
+      logging.warning('Stoppage alert %s not found.', existing.stoppage_alert)
     existing.stoppage_alert = None
     properties_changed = True
 
@@ -334,6 +340,6 @@
 
 
 def _IsRefBuild(test_key):
-  """Returns True if test_key is a reference build."""
+  """Checks whether a Test is for a reference build test run."""
   key_path = test_key.flat()
   return key_path[-1] == 'ref' or key_path[-1].endswith('_ref')
diff --git a/catapult/dashboard/dashboard/add_point_test.py b/catapult/dashboard/dashboard/add_point_test.py
index 6e1b424..a5c1630 100644
--- a/catapult/dashboard/dashboard/add_point_test.py
+++ b/catapult/dashboard/dashboard/add_point_test.py
@@ -2,6 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import copy
 import json
 import math
 import unittest
@@ -15,15 +16,20 @@
 
 from dashboard import add_point
 from dashboard import add_point_queue
-from dashboard import bot_whitelist
 from dashboard import layered_cache
+from dashboard import stored_object
 from dashboard import testing_common
 from dashboard import units_to_direction
+from dashboard import utils
 from dashboard.models import anomaly
 from dashboard.models import anomaly_config
 from dashboard.models import graph_data
 from dashboard.models import sheriff
 
+# TODO(qyearsley): Shorten this module.
+# See https://github.com/catapult-project/catapult/issues/1917
+# pylint: disable=too-many-lines
+
 # A limit to the number of entities that can be fetched. This is just an
 # safe-guard to prevent possibly fetching too many entities.
 _FETCH_LIMIT = 100
@@ -42,6 +48,7 @@
     'master': 'ChromiumPerf',
     'bot': 'win7',
     'point_id': '12345',
+    'test_suite_name': 'my_test_suite',
     'supplemental': {
         'os': 'mavericks',
         'gpu_oem': 'intel'
@@ -51,7 +58,7 @@
         'blink': '234567'
     },
     'chart_data': {
-        'benchmark_name': 'my_test_suite',
+        'benchmark_name': 'my_benchmark',
         'benchmark_description': 'foo',
         'format_version': '1.0',
         'charts': {
@@ -72,6 +79,7 @@
     'master': 'ChromiumPerf',
     'bot': 'win7',
     'point_id': '12345',
+    'test_suite_name': 'my_test_suite',
     'supplemental': {
         'os': 'mavericks',
         'gpu_oem': 'intel'
@@ -81,7 +89,7 @@
         'blink': '234567'
     },
     'chart_data': {
-        'benchmark_name': 'my_test_suite',
+        'benchmark_name': 'my_benchmark',
         'benchmark_description': 'foo',
         'format_version': '1.0',
         'charts': {
@@ -250,10 +258,10 @@
   @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTest')
   def testPost_TestNameEndsWithUnderscoreRef_ProcessTestIsNotCalled(
       self, mock_process_test):
-    """Tests that tests ending with _ref aren't analyze for anomalies."""
+    """Tests that Tests ending with "_ref" aren't analyzed for Anomalies."""
     sheriff.Sheriff(
         id='ref_sheriff', email='a@chromium.org', patterns=['*/*/*/*']).put()
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = '1234/abcd_ref'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -267,7 +275,7 @@
     """Tests that leaf tests named ref aren't added to the task queue."""
     sheriff.Sheriff(
         id='ref_sheriff', email='a@chromium.org', patterns=['*/*/*/*']).put()
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = '1234/ref'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -280,7 +288,7 @@
       self, mock_process_test):
     sheriff.Sheriff(
         id='ref_sheriff', email='a@chromium.org', patterns=['*/*/*/*']).put()
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = '_ref/abcd'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -290,7 +298,7 @@
 
   def testPost_TestPathTooLong_PointRejected(self):
     """Tests that an error is returned when the test path would be too long."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = 'long_test/%s' % ('x' * 490)
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
@@ -300,7 +308,7 @@
     self.assertEqual(0, len(tests))
 
   def testPost_TrailingSlash_Ignored(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = 'mach_ports_parent/mach_ports/'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -312,8 +320,8 @@
     self.assertEqual('mach_ports', tests[1].key.id())
     self.assertEqual('mach_ports_parent', tests[1].parent_test.id())
 
-  def test_LeadingSlash_Ignored(self):
-    point = _SAMPLE_POINT.copy()
+  def testPost_LeadingSlash_Ignored(self):
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = '/boot_time/pre_plugin_time'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -333,14 +341,14 @@
 
   def testPost_BadGraphName_DataRejected(self):
     """Tests that an error is returned when the test name has too many parts."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = 'a/b/c/d/e/f/g/h/i/j/k'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
 
   def testPost_TestNameHasDoubleUnderscores_Rejected(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['test'] = 'my_test_suite/__my_test__'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
@@ -387,16 +395,16 @@
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
 
   def testPost_InvalidRevision_Rejected(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 'I am not a valid revision number!'
     response = self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
-    self.assertEqual(
+    self.assertIn(
         'Bad value for "revision", should be numerical.\n', response.body)
 
   def testPost_InvalidSupplementalRevision_DropsRevision(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['supplemental_columns'] = {
         'r_one': '1234',
         'r_two': 'I am not a valid revision or version.',
@@ -411,8 +419,8 @@
     self.assertFalse(hasattr(row, 'r_two'))
 
   def testPost_UnWhitelistedBots_MarkedInternalOnly(self):
-    bot_whitelist.BotWhitelist(
-        id=bot_whitelist.WHITELIST_KEY, bots=['linux-release', 'win7']).put()
+    stored_object.Set(
+        add_point_queue.BOT_WHITELIST_KEY, ['linux-release', 'win7'])
     parent = graph_data.Master(id='ChromiumPerf').put()
     parent = graph_data.Bot(
         id='suddenly_secret', parent=parent, internal_only=False).put()
@@ -549,10 +557,10 @@
     if the Test matches the pattern of the AnomalyConfig.
     """
     anomaly_config1 = anomaly_config.AnomalyConfig(
-        id='modelset1', config='',
+        id='anomaly_config1', config='',
         patterns=['ChromiumPerf/*/dromaeo/jslib']).put()
     anomaly_config2 = anomaly_config.AnomalyConfig(
-        id='modelset2', config='',
+        id='anomaly_config2', config='',
         patterns=['*/*image_benchmark/*', '*/*/scrolling_benchmark/*']).put()
 
     data_param = json.dumps([
@@ -757,7 +765,7 @@
 
   def testPost_GitHashSupplementalRevision_Accepted(self):
     """Tests that git hashes can be added as supplemental revision columns."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 123
     point['supplemental_columns'] = {
         'r_chromium_rev': '2eca27b067e3e57c70e40b8b95d0030c5d7c1a7f',
@@ -839,28 +847,28 @@
 
   def testPost_NoValue_Rejected(self):
     """Tests the error returned when no "value" is given."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     del point['value']
     response = self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
-    self.assertEqual('No "value" given.\n', response.body)
+    self.assertIn('No "value" given.\n', response.body)
     self.assertIsNone(graph_data.Row.query().get())
 
   def testPost_WithBadValue_Rejected(self):
     """Tests the error returned when an invalid "value" is given."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['value'] = 'hello'
     response = self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
     self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME)
-    self.assertEqual(
+    self.assertIn(
         'Bad value for "value", should be numerical.\n', response.body)
     self.assertIsNone(graph_data.Row.query().get())
 
   def testPost_WithBadPointErrorValue_ErrorValueDropped(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['error'] = 'not a number'
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -871,7 +879,7 @@
 
   def testPost_TooManyColumns_SomeColumnsDropped(self):
     """Tests that some columns are dropped if there are too many."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     supplemental_columns = {}
     for i in range(1, add_point._MAX_NUM_COLUMNS * 2):
       supplemental_columns['d_run_%d' % i] = i
@@ -887,7 +895,7 @@
     self.assertLessEqual(len(data_columns), add_point._MAX_NUM_COLUMNS)
 
   def testPost_BadSupplementalColumnName_ColumnDropped(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['supplemental_columns'] = {'q_foo': 'bar'}
 
     self.testapp.post(
@@ -899,7 +907,7 @@
     self.assertFalse(hasattr(row, 'q_foo'))
 
   def testPost_LongSupplementalColumnName_ColumnDropped(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     key = 'a_' + ('a' * add_point._MAX_COLUMN_NAME_LENGTH)
     point['supplemental_columns'] = {
         key: '1234',
@@ -915,7 +923,7 @@
     self.assertFalse(hasattr(row, key))
 
   def testPost_LongSupplementalAnnotation_ColumnDropped(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['supplemental_columns'] = {
         'a_one': 'z' * (add_point._STRING_COLUMN_MAX_LENGTH + 1),
         'a_two': 'hello',
@@ -931,7 +939,7 @@
 
   def testPost_BadSupplementalDataColumn_ColumnDropped(self):
     """Tests that bad supplemental data columns are dropped."""
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['supplemental_columns'] = {
         'd_run_1': 'hello',
         'd_run_2': 42.5,
@@ -949,7 +957,7 @@
     # If a point's ID is much lower than the last one, it should be rejected
     # because this indicates that the revision type was accidentally changed.
     # First add one point; it's accepted because it's the first in the series.
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 1408479179
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -959,7 +967,7 @@
     last_added_revision = ndb.Key('LastAddedRevision', test_path).get()
     self.assertEqual(1408479179, last_added_revision.revision)
 
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 285000
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
@@ -969,14 +977,14 @@
 
   def testPost_RevisionTooHigh_Rejected(self):
     # First add one point; it's accepted because it's the first in the series.
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 285000
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
     self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME)
 
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 1408479179
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])}, status=400,
@@ -985,17 +993,17 @@
     self.assertEqual(1, len(rows))
 
   def testPost_MultiplePointsWithCloseRevisions_Accepted(self):
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 285000
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 285200
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
-    point = _SAMPLE_POINT.copy()
+    point = copy.deepcopy(_SAMPLE_POINT)
     point['revision'] = 285100
     self.testapp.post(
         '/add_point', {'data': json.dumps([point])},
@@ -1024,8 +1032,30 @@
         'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'my_test_suite').get()
     self.assertEqual('foo', test_suite.description)
 
+  def testPost_NoTestSuiteName_BenchmarkNameUsed(self):
+    sample = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
+    del sample['test_suite_name']
+    data_param = json.dumps(sample)
+    self.testapp.post(
+        '/add_point', {'data': data_param},
+        extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
+    self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME)
+    self.assertIsNone(utils.TestKey('ChromiumPerf/win7/my_test_suite').get())
+    self.assertIsNotNone(utils.TestKey('ChromiumPerf/win7/my_benchmark').get())
+
+  def testPost_TestSuiteNameIsNone_BenchmarkNameUsed(self):
+    sample = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
+    sample['test_suite_name'] = None
+    data_param = json.dumps(sample)
+    self.testapp.post(
+        '/add_point', {'data': data_param},
+        extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
+    self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME)
+    self.assertIsNone(utils.TestKey('ChromiumPerf/win7/my_test_suite').get())
+    self.assertIsNotNone(utils.TestKey('ChromiumPerf/win7/my_benchmark').get())
+
   def testPost_WithBenchmarkRerunOptions_AddsTraceRerunOptions(self):
-    sample_json = _SAMPLE_DASHBOARD_JSON.copy()
+    sample_json = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
     sample_json['chart_data']['trace_rerun_options'] = [['foo', '--foo']]
     data_param = json.dumps(sample_json)
     self.testapp.post(
@@ -1057,7 +1087,7 @@
 
   def testPost_FormatV1_BadMaster_Rejected(self):
     """Tests that attempting to post with no master name will error."""
-    chart = _SAMPLE_DASHBOARD_JSON.copy()
+    chart = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
     del chart['master']
     self.testapp.post(
         '/add_point', {'data': json.dumps(chart)}, status=400,
@@ -1065,7 +1095,7 @@
 
   def testPost_FormatV1_BadBot_Rejected(self):
     """Tests that attempting to post with no bot name will error."""
-    chart = _SAMPLE_DASHBOARD_JSON.copy()
+    chart = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
     del chart['bot']
     self.testapp.post(
         '/add_point', {'data': json.dumps(chart)}, status=400,
@@ -1073,7 +1103,7 @@
 
   def testPost_FormatV1_BadPointId_Rejected(self):
     """Tests that attempting to post a chart no point id will error."""
-    chart = _SAMPLE_DASHBOARD_JSON.copy()
+    chart = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
     del chart['point_id']
     self.testapp.post(
         '/add_point', {'data': json.dumps(chart)}, status=400,
@@ -1086,223 +1116,211 @@
         '/add_point', {'data': json.dumps(chart)}, status=400,
         extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
 
+  def testPost_FormatV1_EmptyCharts_NothingAdded(self):
+    chart = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
+    chart['chart_data']['charts'] = {}
+    self.testapp.post(
+        '/add_point', {'data': json.dumps(chart)},
+        extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
+    # Status is OK, but no rows are added.
+    self.assertIsNone(graph_data.Row.query().get())
+
 
 class FlattenTraceTest(testing_common.TestCase):
 
   def testDashboardJsonToRawRows_WithIsRef(self):
     """Tests that rows from a chart from a ref build have the correct name."""
-    chart = _SAMPLE_DASHBOARD_JSON.copy()
+    chart = copy.deepcopy(_SAMPLE_DASHBOARD_JSON)
     chart['is_ref'] = True
-    rows = add_point.AddPointHandler()._DashboardJsonToRawRows(chart)
+    rows = add_point._DashboardJsonToRawRows(chart)
     self.assertEqual('my_test_suite/my_test/ref', rows[0]['test'])
 
+  @staticmethod
+  def _SampleTrace():
+    return {
+        'name': 'bar.baz',
+        'units': 'meters',
+        'type': 'scalar',
+        'value': 42,
+    }
+
   def testFlattenTrace_PreservesUnits(self):
     """Tests that _FlattenTrace preserves the units property."""
-    trace = {
-        'type': 'scalar',
-        'name': 'overall',
-        'units': 'ms',
-        'value': 42
-    }
+    trace = self._SampleTrace()
+    trace.update({'units': 'ms'})
     row = add_point._FlattenTrace('foo', 'bar', 'bar', trace)
     self.assertEqual(row['units'], 'ms')
 
   def testFlattenTrace_CoreTraceName(self):
     """Tests that chartname.summary will be flattened to chartname."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42
-    }
+    trace = self._SampleTrace()
+    trace.update({'name': 'summary'})
     row = add_point._FlattenTrace('foo', 'bar', 'summary', trace)
     self.assertEqual(row['test'], 'foo/bar')
 
   def testFlattenTrace_NonSummaryTraceName_SetCorrectly(self):
     """Tests that chart.trace will be flattened to chart/trace."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar.baz',
-        'units': 'ms',
-        'value': 42
-    }
+    trace = self._SampleTrace()
+    trace.update({'name': 'bar.baz'})
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertEqual(row['test'], 'foo/bar/baz')
 
   def testFlattenTrace_ImprovementDirectionCannotBeNone(self):
     """Tests that an improvement_direction must not be None if passed."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42,
-        'improvement_direction': None
-    }
+    trace = self._SampleTrace()
+    trace.update({'improvement_direction': None})
     with self.assertRaises(add_point.BadRequestError):
       add_point._FlattenTrace('foo', 'bar', 'summary', trace)
 
-  def testFlattenTraceAddsImprovementDirectionIfPresent(self):
+  def testFlattenTrace_AddsImprovementDirectionIfPresent(self):
     """Tests that improvement_direction will be respected if present."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42,
-        'improvement_direction': 'up'
-    }
-
+    trace = self._SampleTrace()
+    trace.update({'improvement_direction': 'up'})
     row = add_point._FlattenTrace('foo', 'bar', 'summary', trace)
-    self.assertIn('higher_is_better', row)
-    self.assertEqual(row['higher_is_better'], True)
+    self.assertTrue(row['higher_is_better'])
 
-  def testFlattenTraceDoesNotAddImprovementDirectionIfAbsent(self):
+  def testFlattenTrace_DoesNotAddImprovementDirectionIfAbsent(self):
     """Tests that no higher_is_better is added if no improvement_direction."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42
-    }
-
-    row = add_point._FlattenTrace('foo', 'bar', 'summary', trace)
+    row = add_point._FlattenTrace('foo', 'bar', 'summary', self._SampleTrace())
     self.assertNotIn('higher_is_better', row)
 
-  def testFlattenTraceRejectsBadImprovementDirection(self):
+  def testFlattenTrace_RejectsBadImprovementDirection(self):
     """Tests that passing a bad improvement_direction will cause an error."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42,
-        'improvement_direction': 'foo'
-    }
-
+    trace = self._SampleTrace()
+    trace.update({'improvement_direction': 'foo'})
     with self.assertRaises(add_point.BadRequestError):
       add_point._FlattenTrace('foo', 'bar', 'summary', trace)
 
   def testFlattenTrace_ScalarValue(self):
     """Tests that scalars are flattened to 0-error values."""
-    trace = {
-        'type': 'scalar',
-        'name': 'overall',
-        'units': 'ms',
-        'value': 42
-    }
-    row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+    row = add_point._FlattenTrace('foo', 'bar', 'baz', self._SampleTrace())
     self.assertEqual(row['value'], 42)
     self.assertEqual(row['error'], 0)
 
-  def testFlattenTraceScalarNoneValue(self):
+  def testFlattenTrace_ScalarNoneValue(self):
     """Tests that scalar NoneValue is flattened to NaN."""
-    trace = {
-        'type': 'scalar',
-        'name': 'overall',
-        'units': 'ms',
-        'value': None,
-        'none_value_reason': 'Reason for test'
-    }
+    trace = self._SampleTrace()
+    trace.update({'value': None, 'none_value_reason': 'reason'})
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertTrue(math.isnan(row['value']))
     self.assertEqual(row['error'], 0)
 
-  def testFlattenTraceListValue(self):
+  def testFlattenTrace_InvalidScalarValue_RaisesError(self):
+    """Tests that scalar NoneValue is flattened to NaN."""
+    trace = self._SampleTrace()
+    trace.update({'value': [42, 43, 44]})
+    with self.assertRaises(add_point.BadRequestError):
+      add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+
+  def testFlattenTrace_ListValue(self):
     """Tests that lists are properly flattened to avg/stddev."""
-    trace = {
+    trace = self._SampleTrace()
+    trace.update({
         'type': 'list_of_scalar_values',
-        'name': 'bar.baz',
-        'units': 'ms',
         'values': [5, 10, 25, 10, 15],
-    }
+    })
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertAlmostEqual(row['value'], 13)
     self.assertAlmostEqual(row['error'], 6.78232998)
 
-  def testFlattenTraceListValueWithStd(self):
+  def testFlattenTrace_ListValueWithStd(self):
     """Tests that lists with reported std use std as error."""
-    trace = {
+    trace = self._SampleTrace()
+    trace.update({
         'type': 'list_of_scalar_values',
-        'name': 'bar.baz',
-        'units': 'ms',
         'values': [5, 10, 25, 10, 15],
         'std': 100,
-    }
+    })
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertNotAlmostEqual(row['error'], 6.78232998)
     self.assertEqual(row['error'], 100)
 
   def testFlattenTrace_ListNoneValue(self):
     """Tests that LoS NoneValue is flattened to NaN."""
-    trace = {
+    trace = self._SampleTrace()
+    trace.update({
         'type': 'list_of_scalar_values',
-        'name': 'overall',
-        'units': 'ms',
-        'value': None,
-        'none_value_reason': 'Reason for test'
-    }
+        'value': [None],
+        'none_value_reason': 'Reason for null value'
+    })
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertTrue(math.isnan(row['value']))
     self.assertTrue(math.isnan(row['error']))
 
+  def testFlattenTrace_ListNoneValueNoReason_RaisesError(self):
+    trace = self._SampleTrace()
+    trace.update({
+        'type': 'list_of_scalar_values',
+        'value': [None],
+    })
+    with self.assertRaises(add_point.BadRequestError):
+      add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+
+  def testFlattenTrace_ListValueNotAList_RaisesError(self):
+    trace = self._SampleTrace()
+    trace.update({
+        'type': 'list_of_scalar_values',
+        'values': 42,
+    })
+    with self.assertRaises(add_point.BadRequestError):
+      add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+
+  def testFlattenTrace_ListContainsString_RaisesError(self):
+    trace = self._SampleTrace()
+    trace.update({
+        'type': 'list_of_scalar_values',
+        'values': ['-343', 123],
+    })
+    with self.assertRaises(add_point.BadRequestError):
+      add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+
   def testFlattenTrace_HistogramValue(self):
     """Tests that histograms are yield geommean/stddev as value/error."""
-    trace = {
+    trace = self._SampleTrace()
+    trace.update({
         'type': 'histogram',
-        'name': 'bar.baz',
-        'units': 'ms',
         'buckets': [{'low': 1, 'high': 5, 'count': 3},
                     {'low': 4, 'high': 6, 'count': 4}]
-    }
+    })
     row = add_point._FlattenTrace('foo', 'bar', 'baz', trace)
     self.assertAlmostEqual(row['value'], 4.01690877)
     self.assertAlmostEqual(row['error'], 0.99772482)
 
   def testFlattenTrace_RespectsIsRefForSameTraceName(self):
     """Tests whether a ref trace that is a chart has the /ref suffix."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar',
-        'units': 'ms',
-        'value': 42
-    }
     row = add_point._FlattenTrace(
-        'foo', 'bar', 'summary', trace, is_ref=True)
+        'foo', 'bar', 'summary', self._SampleTrace(), is_ref=True)
     self.assertEqual(row['test'], 'foo/bar/ref')
 
   def testFlattenTrace_RespectsIsRefForDifferentTraceName(self):
     """Tests whether a ref trace that is not a chart has the _ref suffix."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar.baz',
-        'units': 'ms',
-        'value': 42
-    }
     row = add_point._FlattenTrace(
-        'foo', 'bar', 'baz', trace, is_ref=True)
+        'foo', 'bar', 'baz', self._SampleTrace(), is_ref=True)
     self.assertEqual(row['test'], 'foo/bar/baz_ref')
 
+  def testFlattenTrace_InvalidTraceType(self):
+    """Tests whether a ref trace that is not a chart has the _ref suffix."""
+    trace = self._SampleTrace()
+    trace.update({'type': 'foo'})
+    with self.assertRaises(add_point.BadRequestError):
+      add_point._FlattenTrace('foo', 'bar', 'baz', trace)
+
   def testFlattenTrace_SanitizesTraceName(self):
     """Tests whether a trace name with special characters is sanitized."""
-    trace = {
-        'type': 'scalar',
-        'name': 'bar.baz',
-        'page': 'http://example.com',
-        'units': 'ms',
-        'value': 42
-    }
+    trace = self._SampleTrace()
+    trace.update({'page': 'http://example.com'})
     row = add_point._FlattenTrace(
         'foo', 'bar', 'http://example.com', trace)
     self.assertEqual(row['test'], 'foo/bar/http___example.com')
 
   def testFlattenTrace_FlattensInteractionRecordLabelToFivePartName(self):
     """Tests whether a TIR label will appear between chart and trace name."""
-    trace = {
-        'type': 'scalar',
+    trace = self._SampleTrace()
+    trace.update({
         'name': 'bar',
         'page': 'https://abc.xyz/',
-        'units': 'ms',
-        'value': 42,
         'tir_label': 'baz'
-    }
+    })
     row = add_point._FlattenTrace('foo', 'baz@@bar', 'https://abc.xyz/', trace)
     self.assertEqual(row['test'], 'foo/bar/baz/https___abc.xyz_')
 
diff --git a/catapult/dashboard/dashboard/alerts.py b/catapult/dashboard/dashboard/alerts.py
index 2e0272f..e345664 100644
--- a/catapult/dashboard/dashboard/alerts.py
+++ b/catapult/dashboard/dashboard/alerts.py
@@ -25,7 +25,11 @@
   """Shows an overview of recent anomalies for perf sheriffing."""
 
   def get(self):
-    """Renders the UI for listing alerts.
+    """Renders the UI for listing alerts."""
+    self.RenderStaticHtml('alerts.html')
+
+  def post(self):
+    """Returns dynamic data for listing alerts in response to XHR.
 
     Request parameters:
       sheriff: The name of a sheriff (optional).
@@ -33,10 +37,16 @@
       improvements: Whether to include improvement anomalies.
 
     Outputs:
-      A page displaying an overview table of all alerts.
+      JSON data for an XHR request to show a table of alerts.
     """
     sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff')
     sheriff_key = ndb.Key('Sheriff', sheriff_name)
+    if not _SheriffIsFound(sheriff_key):
+      self.response.out.write(json.dumps({
+          'error': 'Sheriff "%s" not found.' % sheriff_name
+      }))
+      return
+
     include_improvements = bool(self.request.get('improvements'))
     include_triaged = bool(self.request.get('triaged'))
 
@@ -45,14 +55,24 @@
     anomalies = ndb.get_multi(anomaly_keys[:_MAX_ANOMALIES_TO_SHOW])
     stoppage_alerts = _FetchStoppageAlerts(sheriff_key, include_triaged)
 
-    self.RenderHtml('alerts.html', {
-        'anomaly_list': json.dumps(AnomalyDicts(anomalies)),
-        'stoppage_alert_list': json.dumps(StoppageAlertDicts(stoppage_alerts)),
-        'have_anomalies': bool(anomalies),
-        'have_stoppage_alerts': bool(stoppage_alerts),
-        'sheriff_list': json.dumps(_GetSheriffList()),
-        'num_anomalies': len(anomaly_keys),
-    })
+    values = {
+        'anomaly_list': AnomalyDicts(anomalies),
+        'stoppage_alert_list': StoppageAlertDicts(stoppage_alerts),
+        'sheriff_list': _GetSheriffList(),
+    }
+    self.GetDynamicVariables(values)
+    self.response.out.write(json.dumps(values))
+
+
+def _SheriffIsFound(sheriff_key):
+  """Checks whether the sheriff can be found for the current user."""
+  try:
+    sheriff_entity = sheriff_key.get()
+  except AssertionError:
+    # This assertion is raised in InternalOnlyModel._post_get_hook,
+    # and indicates an internal-only Sheriff but an external user.
+    return False
+  return sheriff_entity is not None
 
 
 def _FetchAnomalyKeys(sheriff_key, include_improvements, include_triaged):
diff --git a/catapult/dashboard/dashboard/alerts_test.py b/catapult/dashboard/dashboard/alerts_test.py
index 5cdd1d3..c89469a 100644
--- a/catapult/dashboard/dashboard/alerts_test.py
+++ b/catapult/dashboard/dashboard/alerts_test.py
@@ -92,10 +92,15 @@
 
     return key_map
 
-  def testGet_NoParametersSet_UntriagedAlertsListed(self):
-    key_map = self._AddAlertsToDataStore()
+  def testGet(self):
     response = self.testapp.get('/alerts')
-    anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
+    self.assertEqual('text/html', response.content_type)
+    self.assertIn('Chrome Performance Alerts', response.body)
+
+  def testPost_NoParametersSet_UntriagedAlertsListed(self):
+    key_map = self._AddAlertsToDataStore()
+    response = self.testapp.post('/alerts')
+    anomaly_list = self.GetJsonValue(response, 'anomaly_list')
     self.assertEqual(12, len(anomaly_list))
     # The test below depends on the order of the items, but the order is not
     # guaranteed; it depends on the timestamps, which depend on put order.
@@ -117,10 +122,10 @@
       expected_end_rev -= 10
     self.assertEqual(expected_end_rev, 9990)
 
-  def testGet_TriagedParameterSet_TriagedListed(self):
+  def testPost_TriagedParameterSet_TriagedListed(self):
     self._AddAlertsToDataStore()
-    response = self.testapp.get('/alerts', {'triaged': 'true'})
-    anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
+    response = self.testapp.post('/alerts', {'triaged': 'true'})
+    anomaly_list = self.GetJsonValue(response, 'anomaly_list')
     # The alerts listed should contain those added above, including alerts
     # that have a bug ID that is not None.
     self.assertEqual(14, len(anomaly_list))
@@ -138,13 +143,13 @@
       expected_end_rev -= 10
     self.assertEqual(expected_end_rev, 9990)
 
-  def testGet_ImprovementsParameterSet_ListsImprovements(self):
+  def testPost_ImprovementsParameterSet_ListsImprovements(self):
     self._AddAlertsToDataStore()
-    response = self.testapp.get('/alerts', {'improvements': 'true'})
-    anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
+    response = self.testapp.post('/alerts', {'improvements': 'true'})
+    anomaly_list = self.GetJsonValue(response, 'anomaly_list')
     self.assertEqual(18, len(anomaly_list))
 
-  def testGet_SheriffParameterSet_OtherSheriffAlertsListed(self):
+  def testPost_SheriffParameterSet_OtherSheriffAlertsListed(self):
     self._AddAlertsToDataStore()
     # Add another sheriff to the mock datastore, and set the sheriff of some
     # anomalies to be this new sheriff.
@@ -158,30 +163,28 @@
       anomaly_entity.sheriff = sheriff2_key
       anomaly_entity.put()
 
-    response = self.testapp.get('/alerts', {'sheriff': 'Sheriff2'})
-    anomaly_list = self.GetEmbeddedVariable(response, 'ANOMALY_LIST')
-    sheriff_list = self.GetEmbeddedVariable(response, 'SHERIFF_LIST')
+    response = self.testapp.post('/alerts', {'sheriff': 'Sheriff2'})
+    anomaly_list = self.GetJsonValue(response, 'anomaly_list')
+    sheriff_list = self.GetJsonValue(response, 'sheriff_list')
     for alert in anomaly_list:
       self.assertEqual('mean_frame_time', alert['test'])
     self.assertEqual(2, len(sheriff_list))
     self.assertEqual('Chromium Perf Sheriff', sheriff_list[0])
     self.assertEqual('Sheriff2', sheriff_list[1])
 
-  def testGet_StoppageAlerts_EmbedsStoppageAlertListAndOneTable(self):
+  def testPost_StoppageAlerts_EmbedsStoppageAlertListAndOneTable(self):
     sheriff.Sheriff(id='Sheriff', patterns=['M/b/*/*']).put()
     testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}})
     test_key = utils.TestKey('M/b/foo/bar')
     rows = testing_common.AddRows('M/b/foo/bar', {9800, 9802})
     for row in rows:
       stoppage_alert.CreateStoppageAlert(test_key.get(), row).put()
-    response = self.testapp.get('/alerts?sheriff=Sheriff')
-    stoppage_alert_list = self.GetEmbeddedVariable(
-        response, 'STOPPAGE_ALERT_LIST')
+    response = self.testapp.post('/alerts?sheriff=Sheriff')
+    stoppage_alert_list = self.GetJsonValue(response, 'stoppage_alert_list')
     self.assertEqual(2, len(stoppage_alert_list))
-    self.assertEqual(1, len(response.html('alerts-table')))
 
   @mock.patch('logging.error')
-  def testGet_StoppageAlertWithBogusRow_LogsErrorAndShowsTable(
+  def testPost_StoppageAlertWithBogusRow_LogsErrorAndShowsTable(
       self, mock_logging_error):
     sheriff.Sheriff(id='Sheriff', patterns=['M/b/*/*']).put()
     testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}})
@@ -189,17 +192,22 @@
     row_parent = utils.GetTestContainerKey(test_key)
     row = graph_data.Row(parent=row_parent, id=1234)
     stoppage_alert.CreateStoppageAlert(test_key.get(), row).put()
-    response = self.testapp.get('/alerts?sheriff=Sheriff')
-    stoppage_alert_list = self.GetEmbeddedVariable(
-        response, 'STOPPAGE_ALERT_LIST')
+    response = self.testapp.post('/alerts?sheriff=Sheriff')
+    stoppage_alert_list = self.GetJsonValue(response, 'stoppage_alert_list')
     self.assertEqual(1, len(stoppage_alert_list))
-    self.assertEqual(1, len(response.html('alerts-table')))
     self.assertEqual(1, mock_logging_error.call_count)
 
-  def testGet_WithNoAlerts_HasImageAndNoAlertsTable(self):
-    response = self.testapp.get('/alerts')
-    self.assertEqual(1, len(response.html('img')))
-    self.assertEqual(0, len(response.html('alerts-table')))
+  def testPost_WithBogusSheriff_HasErrorMessage(self):
+    response = self.testapp.post('/alerts?sheriff=Foo')
+    error = self.GetJsonValue(response, 'error')
+    self.assertIsNotNone(error)
+
+  def testPost_ExternalUserRequestsInternalOnlySheriff_ErrorMessage(self):
+    sheriff.Sheriff(id='Foo', internal_only=True).put()
+    self.assertFalse(utils.IsInternalUser())
+    response = self.testapp.post('/alerts?sheriff=Foo')
+    error = self.GetJsonValue(response, 'error')
+    self.assertIsNotNone(error)
 
 
 if __name__ == '__main__':
diff --git a/catapult/dashboard/dashboard/associate_alerts.py b/catapult/dashboard/dashboard/associate_alerts.py
index 910a9c7..e89ffa0 100644
--- a/catapult/dashboard/dashboard/associate_alerts.py
+++ b/catapult/dashboard/dashboard/associate_alerts.py
@@ -4,31 +4,18 @@
 
 """Provides an endpoint and web interface for associating alerts with bug."""
 
-import json
-import logging
 import re
-import urllib
 
-from google.appengine.api import urlfetch
 from google.appengine.api import users
 from google.appengine.ext import ndb
 
+from dashboard import issue_tracker_service
+from dashboard import oauth2_decorator
 from dashboard import request_handler
 from dashboard import utils
 from dashboard.models import anomaly
 from dashboard.models import stoppage_alert
 
-# The API for fetching info from the issue tracker appears to be similar to
-# that described at <https://code.google.com/p/support/wiki/IssueTrackerAPI>.
-_RECENT_BUGS_QUERY = (
-    'https://www.googleapis.com/projecthosting/v2/projects/chromium/issues'
-    '?' + urllib.urlencode({
-        'q': 'label:Type-Bug-Regression label:Performance opened-after:today-5',
-        'fields': 'items(id,state,status,summary,author)',
-        'sort': '-id',
-        'can': 'all',
-        'key': 'AIzaSyDrEBALf59D7TkOuz-bBuOnN2OqzD70NCQ'}))
-
 
 class AssociateAlertsHandler(request_handler.RequestHandler):
   """Associates alerts with a bug."""
@@ -37,6 +24,7 @@
     """POST is the same as GET for this endpoint."""
     self.get()
 
+  @oauth2_decorator.DECORATOR.oauth_required
   def get(self):
     """Response handler for the page used to group an alert with a bug.
 
@@ -74,15 +62,6 @@
     Args:
       urlsafe_keys: Comma-separated Alert keys in urlsafe format.
     """
-    # Fetch metadata about recent bugs.
-    response = urlfetch.fetch(_RECENT_BUGS_QUERY)
-    if response.status_code == 200:
-      bugs = json.loads(response.content)
-      bugs = bugs.get('items', []) if bugs else []
-    else:
-      logging.error('Couldn\'t fetch recent bugs from www.googleapis.com.')
-      bugs = []
-
     # Get information about Alert entities and related Test entities,
     # so that they can be compared with recent bugs.
     alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')]
@@ -93,6 +72,7 @@
     # On the alerts page, alerts are only highlighted if the revision range
     # overlaps with the revision ranges for all of the selected alerts; the
     # same thing is done here.
+    bugs = self._FetchBugs()
     for bug in bugs:
       this_range = _RevisionRangeFromSummary(bug['summary'])
       bug['relevant'] = all(_RangesOverlap(this_range, r) for r in ranges)
@@ -103,6 +83,14 @@
         'bugs': bugs
     })
 
+  def _FetchBugs(self):
+    http = oauth2_decorator.DECORATOR.http()
+    issue_tracker = issue_tracker_service.IssueTrackerService(http=http)
+    response = issue_tracker.List(
+        q='opened-after:today-5', label='Type-Bug-Regression,Performance',
+        sort='-id')
+    return response.get('items', []) if response else []
+
   def _AssociateAlertsWithBug(self, bug_id, urlsafe_keys, is_confirmed):
     """Sets the bug ID for a set of alerts.
 
diff --git a/catapult/dashboard/dashboard/associate_alerts_test.py b/catapult/dashboard/dashboard/associate_alerts_test.py
index f1e621a..c4c6e0a 100644
--- a/catapult/dashboard/dashboard/associate_alerts_test.py
+++ b/catapult/dashboard/dashboard/associate_alerts_test.py
@@ -2,14 +2,18 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import json
 import unittest
 
 import mock
 import webapp2
 import webtest
 
+# pylint: disable=unused-import
+from dashboard import mock_oauth2_decorator
+# pylint: enable=unused-import
+
 from dashboard import associate_alerts
+from dashboard import issue_tracker_service
 from dashboard import testing_common
 from dashboard import utils
 from dashboard.models import anomaly
@@ -91,30 +95,27 @@
     self.assertIn('<div class="error">', response.body)
     self.assertIn('Invalid bug ID', response.body)
 
-  # In this test method, the request handler is expected to use urlfetch.fetch
-  # to request recent bug information. Below is some sample data in the format
-  # returned by this request.
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(return_value=testing_common.FakeResponseObject(
-          200, json.dumps({
-              'items': [
-                  {
-                      'id': 12345,
-                      'summary': '5% regression in bot/suite/x at 10000:20000',
-                      'state': 'open',
-                      'status': 'New',
-                      'author': {'name': 'exam...@google.com'},
-                  },
-                  {
-                      'id': 13579,
-                      'summary': '1% regression in bot/suite/y at 10000:20000',
-                      'state': 'closed',
-                      'status': 'WontFix',
-                      'author': {'name': 'exam...@google.com'},
-                  },
-              ]})
-      )))
+  # Mocks fetching bugs from issue tracker.
+  @mock.patch('issue_tracker_service.discovery.build', mock.MagicMock())
+  @mock.patch.object(
+      issue_tracker_service.IssueTrackerService, 'List',
+      mock.MagicMock(return_value={
+          'items': [
+              {
+                  'id': 12345,
+                  'summary': '5% regression in bot/suite/x at 10000:20000',
+                  'state': 'open',
+                  'status': 'New',
+                  'author': {'name': 'exam...@google.com'},
+              },
+              {
+                  'id': 13579,
+                  'summary': '1% regression in bot/suite/y at 10000:20000',
+                  'state': 'closed',
+                  'status': 'WontFix',
+                  'author': {'name': 'exam...@google.com'},
+              },
+          ]}))
   def testGet_NoBugId_ShowsDialog(self):
     # When a GET request is made with some anomaly keys but no bug ID,
     # A HTML form is shown for the user to input a bug number.
@@ -127,7 +128,7 @@
 
   def testGet_WithBugId_AlertIsAssociatedWithBugId(self):
     # When the bug ID is given and the alerts overlap, then the Anomaly
-    # entities are updated and there is a resopnse indicating success.
+    # entities are updated and there is a response indicating success.
     key_map = self._AddAnomalies()
     response = self.testapp.get(
         '/associate_alerts?keys=%s,%s&bug_id=12345' % (
diff --git a/catapult/dashboard/dashboard/auto_bisect.py b/catapult/dashboard/dashboard/auto_bisect.py
index 7df7878..86f6901 100644
--- a/catapult/dashboard/dashboard/auto_bisect.py
+++ b/catapult/dashboard/dashboard/auto_bisect.py
@@ -5,9 +5,9 @@
 """URL endpoint for a cron job to automatically run bisects."""
 
 import datetime
-import json
 import logging
 
+from dashboard import can_bisect
 from dashboard import datastore_hooks
 from dashboard import request_handler
 from dashboard import start_try_job
@@ -19,15 +19,6 @@
 # Days between successive bisect restarts.
 _BISECT_RESTART_PERIOD_DAYS = [0, 1, 7, 14]
 
-# A set of suites for which we can't do performance bisects.
-# This list currently also exists in the front-end code.
-_UNBISECTABLE_SUITES = [
-    'arc-perf-test',
-    'browser_tests',
-    'content_browsertests',
-    'sizes',
-    'v8',
-]
 
 class AutoBisectHandler(request_handler.RequestHandler):
   """URL endpoint for a cron job to automatically run bisects."""
@@ -71,7 +62,7 @@
           if job.run_count == 1:
             try:
               start_try_job.PerformBisect(job)
-            except Exception as e:
+            except request_handler.InvalidInputError as e:
               logging.error(e.message)
               all_successful = False
           elif job.bug_id:
@@ -104,10 +95,11 @@
     return False
   bisect_job.config = new_bisect_job.config
   bisect_job.bot = new_bisect_job.bot
+  bisect_job.use_buildbucket = new_bisect_job.use_buildbucket
   bisect_job.put()
   try:
     start_try_job.PerformBisect(bisect_job)
-  except Exception as e:
+  except request_handler.InvalidInputError as e:
     logging.error(e.message)
     return False
   return True
@@ -158,15 +150,14 @@
   if not anomalies:
     raise NotBisectableError('No Anomaly alerts found for this bug.')
 
-  # Note: This check for bisectability is parallel to that in bisect_utils.js.
   good_revision, bad_revision = _ChooseRevisionRange(anomalies)
-  if not start_try_job.IsValidRevisionForBisect(good_revision):
+  if not can_bisect.IsValidRevisionForBisect(good_revision):
     raise NotBisectableError('Invalid "good" revision: %s.' % good_revision)
-  if not start_try_job.IsValidRevisionForBisect(bad_revision):
+  if not can_bisect.IsValidRevisionForBisect(bad_revision):
     raise NotBisectableError('Invalid "bad" revision: %s.' % bad_revision)
 
   test = _ChooseTest(anomalies, run_count)
-  if not test or not _IsValidTestForBisect(test.test_path):
+  if not test or not can_bisect.IsValidTestForBisect(test.test_path):
     raise NotBisectableError('Could not select a test.')
 
   metric = start_try_job.GuessMetric(test.test_path)
@@ -245,7 +236,7 @@
   index %= len(anomalies)
   anomalies.sort(cmp=_CompareAnomalyBisectability)
   for anomaly_entity in anomalies[index:]:
-    if _IsValidTestForBisect(utils.TestPath(anomaly_entity.test)):
+    if can_bisect.IsValidTestForBisect(utils.TestPath(anomaly_entity.test)):
       return anomaly_entity.test.get()
   return None
 
@@ -280,20 +271,6 @@
   return 0
 
 
-def _IsValidTestForBisect(test_path):
-  """Checks whether a test is valid for bisect."""
-  if not test_path:
-    return False
-  path_parts = test_path.split('/')
-  if len(path_parts) < 3:
-    return False
-  if path_parts[2] in _UNBISECTABLE_SUITES:
-    return False
-  if test_path.endswith('/ref') or test_path.endswith('_ref'):
-    return False
-  return True
-
-
 def _ChooseRevisionRange(anomalies):
   """Chooses a revision range to use for a bisect job.
 
@@ -345,7 +322,7 @@
 
 
 def _PrintStartedAndFailedBisectJobs():
-  """Print started and failed bisect jobs in datastore."""
+  """Prints started and failed bisect jobs in datastore."""
   failed_jobs = try_job.TryJob.query(
       try_job.TryJob.status == 'failed').fetch()
   started_jobs = try_job.TryJob.query(
diff --git a/catapult/dashboard/dashboard/auto_bisect_test.py b/catapult/dashboard/dashboard/auto_bisect_test.py
index 2c5d7e4..3299e39 100644
--- a/catapult/dashboard/dashboard/auto_bisect_test.py
+++ b/catapult/dashboard/dashboard/auto_bisect_test.py
@@ -3,7 +3,6 @@
 # found in the LICENSE file.
 
 import datetime
-import sys
 import unittest
 
 import mock
@@ -67,7 +66,7 @@
     job_key = try_job.TryJob(
         bug_id=333, status='failed',
         last_ran_timestamp=datetime.datetime.now(),
-        run_count=len(auto_bisect._BISECT_RESTART_PERIOD_DAYS)+1).put()
+        run_count=len(auto_bisect._BISECT_RESTART_PERIOD_DAYS) + 1).put()
     self.testapp.post('/auto_bisect')
     self.assertIsNone(job_key.get())
     mock_log_result.assert_called_once_with(333, mock.ANY)
@@ -92,6 +91,7 @@
 
 
 class StartNewBisectForBugTest(testing_common.TestCase):
+
   def setUp(self):
     super(StartNewBisectForBugTest, self).setUp()
     stored_object.Set(
@@ -194,7 +194,7 @@
   @mock.patch.object(utils, 'TickMonitoringCustomMetric')
   def testPost_RunCount1_ExceptionInPerformBisect_CustomMetricNotTicked(
       self, mock_tick, mock_perform_bisect):
-    mock_perform_bisect.side_effect = Exception('Error')
+    mock_perform_bisect.side_effect = request_handler.InvalidInputError()
     try_job.TryJob(
         bug_id=222, status='failed',
         last_ran_timestamp=datetime.datetime.now(),
@@ -206,7 +206,7 @@
   @mock.patch.object(utils, 'TickMonitoringCustomMetric')
   def testPost_RunCount2_ExceptionInPerformBisect_CustomMetricNotTicked(
       self, mock_tick, mock_perform_bisect):
-    mock_perform_bisect.side_effect = Exception('Error')
+    mock_perform_bisect.side_effect = request_handler.InvalidInputError()
     try_job.TryJob(
         bug_id=111, status='failed',
         last_ran_timestamp=datetime.datetime.now() - datetime.timedelta(days=8),
diff --git a/catapult/dashboard/dashboard/auto_triage.py b/catapult/dashboard/dashboard/auto_triage.py
index 8e8b82b..6514de3 100644
--- a/catapult/dashboard/dashboard/auto_triage.py
+++ b/catapult/dashboard/dashboard/auto_triage.py
@@ -20,7 +20,6 @@
 from dashboard import math_utils
 from dashboard import quick_logger
 from dashboard import request_handler
-from dashboard import rietveld_service
 from dashboard import utils
 from dashboard.models import anomaly
 from dashboard.models import anomaly_config
@@ -36,16 +35,11 @@
 # Number of days to query for bugs.
 _OLDEST_BUG_DELTA = datetime.timedelta(days=30)
 
-# Default parameters used when deciding whether or not an alert should
-# be considered recovered, if not overridden by the anomaly threshold
-# config of a test. These may be, but are not necessarily, the same as
-# the related constants in the find_change_points module.
-# TODO(qyearsley): If possible, simplify _IsAnomalyRecovered so that
-# these values are no longer needed, or change it to use a method in
-# find_change_points.
-_DEFAULT_MULTIPLE_OF_STD_DEV = 3.5
-_DEFAULT_MIN_RELATIVE_CHANGE = 0.01
-_DEFAULT_MIN_ABSOLUTE_CHANGE = 0.0
+# Maximum relative difference between two steps for them to be considered
+# similar enough for the second to be a "recovery" of the first.
+# For example, if there's an increase of 5 units followed by a decrease of 6
+# units, the relative difference of the deltas is 0.2.
+_MAX_DELTA_DIFFERENCE = 0.25
 
 
 class AutoTriageHandler(request_handler.RequestHandler):
@@ -126,7 +120,7 @@
 
   @classmethod
   def UpdateRecoveredBugs(cls, bug_id):
-    """Checks whether anomalies with bug_id have recovered."""
+    """Checks whether Anomalies with a given bug ID have recovered."""
     anomalies = anomaly.Anomaly.query(
         anomaly.Anomaly.bug_id == bug_id).fetch()
     # If no anomalies found, mark this Bug entity as closed.
@@ -155,11 +149,8 @@
     bug.put()
     comment = cls._RecoveredBugComment(bug_id)
 
-    credentials = rietveld_service.Credentials(
-        rietveld_service.GetDefaultRietveldConfig(),
-        rietveld_service.PROJECTHOSTING_SCOPE)
     issue_tracker = issue_tracker_service.IssueTrackerService(
-        additional_credentials=credentials)
+        additional_credentials=utils.ServiceAccountCredentials())
     issue_tracker.AddBugComment(bug_id, comment)
 
   @classmethod
@@ -180,107 +171,59 @@
   """Finds and updates anomalies that recovered."""
   recovered_anomalies = []
   for anomaly_entity in anomalies:
-    is_recovered, measurements = _IsAnomalyRecovered(anomaly_entity)
-    if is_recovered:
+    if _IsAnomalyRecovered(anomaly_entity):
       anomaly_entity.recovered = True
       recovered_anomalies.append(anomaly_entity)
-      logging.debug('Anomaly %s recovered with measurements %s.',
-                    anomaly_entity.key, measurements)
   ndb.put_multi(recovered_anomalies)
   return recovered_anomalies
 
 
 def _IsAnomalyRecovered(anomaly_entity):
-  """Checks whether anomaly has recovered.
+  """Checks whether an Anomaly has recovered.
 
-  We have the measurements for the segment before the anomaly.  If we take
-  the measurements for the latest segment after the anomaly, we can find if
-  the anomaly recovered.
+  An Anomaly will be considered "recovered" if there's a change point in
+  the series after the Anomaly with roughly equal magnitude and opposite
+  direction.
 
   Args:
-    anomaly_entity: The original regression anomaly.
+    anomaly_entity: The original regression Anomaly.
 
   Returns:
-    A tuple (is_anomaly_recovered, measurements), where is_anomaly_recovered
-    is True if anomaly has recovered, and measurements is dictionary
-    of name to value of measurements used to evaluate if anomaly recovered.
-    measurements is None if anomaly has not recovered.
+    True if the Anomaly should be marked as recovered, False otherwise.
   """
-  # 1. Check if the Anomaly entity has std_dev_before_anomaly and
-  #    window_end_revision properties which we're using to decide whether or
-  #    not it is recovered.
-  if (anomaly_entity.std_dev_before_anomaly is None or
-      anomaly_entity.window_end_revision is None):
-    return False, None
-
   test = anomaly_entity.test.get()
+  if not test:
+    logging.error('Test %s not found for Anomaly %s, deleting test.',
+                  utils.TestPath(anomaly_entity.test),
+                  anomaly_entity)
+    anomaly_entity.key.delete()
+    return False
   config = anomaly_config.GetAnomalyConfigDict(test)
-  latest_rows = find_anomalies.GetRowsToAnalyze(
-      test, anomaly_entity.segment_size_after)
-  latest_values = [row.value for row in latest_rows
-                   if row.revision > anomaly_entity.window_end_revision]
+  max_num_rows = config.get(
+      'max_window_size', find_anomalies.DEFAULT_NUM_POINTS)
+  rows = [r for r in find_anomalies.GetRowsToAnalyze(test, max_num_rows)
+          if r.revision > anomaly_entity.end_revision]
+  change_points = find_anomalies.FindChangePointsForTest(rows, config)
+  delta_anomaly = (anomaly_entity.median_after_anomaly -
+                   anomaly_entity.median_before_anomaly)
+  for change in change_points:
+    delta_change = change.median_after - change.median_before
+    if (_IsOppositeDirection(delta_anomaly, delta_change) and
+        _IsApproximatelyEqual(delta_anomaly, -delta_change)):
+      logging.debug('Anomaly %s recovered; recovery change point %s.',
+                    anomaly_entity.key, change.AsDict())
+      return True
+  return False
 
-  # 2. Segment size filter.
-  if len(latest_values) < anomaly_entity.segment_size_after:
-    return False, None
 
-  median_before = anomaly_entity.median_before_anomaly
-  median_after = math_utils.Median(latest_values)
-  std_dev_before = anomaly_entity.std_dev_before_anomaly
-  std_dev_after = math_utils.StandardDeviation(latest_values)
-  multiple_of_std_dev = config.get('multiple_of_std_dev',
-                                   _DEFAULT_MULTIPLE_OF_STD_DEV)
-  min_relative_change = config.get('min_relative_change',
-                                   _DEFAULT_MIN_RELATIVE_CHANGE)
-  min_absolute_change = config.get('min_absolute_change',
-                                   _DEFAULT_MIN_ABSOLUTE_CHANGE)
+def _IsOppositeDirection(delta1, delta2):
+  return delta1 * delta2 < 0
 
-  # If no improvement direction is provided, use absolute changes.
-  if test.improvement_direction == anomaly.UNKNOWN:
-    absolute_change = abs(median_after - median_before)
-    relative_change = abs(
-        math_utils.RelativeChange(median_before, median_after))
-  else:
-    if test.improvement_direction == anomaly.UP:
-      direction = -1
-    else:
-      direction = 1
-    absolute_change = direction * (median_after - median_before)
-    relative_change = direction * math_utils.RelativeChange(
-        median_before, median_after)
 
-  measurements = {
-      'segment_size_after': anomaly_entity.segment_size_after,
-      'window_end_revision': anomaly_entity.window_end_revision,
-      'median_before': median_before,
-      'median_after': median_after,
-      'std_dev_before': std_dev_before,
-      'std_dev_after': std_dev_after,
-      'multiple_of_std_dev': multiple_of_std_dev,
-      'min_relative_change': min_relative_change,
-      'min_absolute_change': min_absolute_change,
-      'absolute_change': absolute_change,
-      'relative_change': relative_change,
-  }
-
-  # 3. If it's an improvement, return.
-  if absolute_change <= 0:
-    return True, measurements
-
-  # 4. Absolute change filter.
-  if min_absolute_change > 0 and absolute_change >= min_absolute_change:
-    return False, None
-
-  # 5. Relative change filter.
-  if relative_change >= min_relative_change:
-    return False, None
-
-  # 6. Standard deviation filter.
-  min_std_dev = min(std_dev_before, std_dev_after)
-  if absolute_change > min_std_dev:
-    return False, None
-
-  return True, measurements
+def _IsApproximatelyEqual(delta1, delta2):
+  smaller = min(delta1, delta2)
+  larger = max(delta1, delta2)
+  return math_utils.RelativeChange(smaller, larger) <= _MAX_DELTA_DIFFERENCE
 
 
 def _AddLogForRecoveredAnomaly(anomaly_entity):
@@ -291,12 +234,12 @@
     return
   sheriff_name = sheriff_key.string_id()
   logger = quick_logger.QuickLogger('auto_triage', sheriff_name, formatter)
-  logger.Log(
-      'Alert on %s has recovered. See <a href="%s">graph</a>.%s',
-      utils.TestPath(anomaly_entity.test),
-      ('https://chromeperf.appspot.com/group_report?keys=' +
-       anomaly_entity.key.urlsafe()),
-      _BugLink(anomaly_entity))
+  message = ('Alert on %s has recovered. See <a href="%s">graph</a>.%s' %
+             (utils.TestPath(anomaly_entity.test),
+              ('https://chromeperf.appspot.com/group_report?keys=' +
+               anomaly_entity.key.urlsafe()),
+              _BugLink(anomaly_entity)))
+  logger.Log(message)
   logger.Save()
 
 
diff --git a/catapult/dashboard/dashboard/auto_triage_test.py b/catapult/dashboard/dashboard/auto_triage_test.py
index 41e0ac9..253d1e3 100644
--- a/catapult/dashboard/dashboard/auto_triage_test.py
+++ b/catapult/dashboard/dashboard/auto_triage_test.py
@@ -14,9 +14,7 @@
 from dashboard import testing_common
 from dashboard import utils
 from dashboard.models import anomaly
-from dashboard.models import anomaly_config
 from dashboard.models import bug_data
-from dashboard.models import graph_data
 from dashboard.models import sheriff
 
 
@@ -29,156 +27,135 @@
         [('/auto_triage', auto_triage.AutoTriageHandler)])
     self.testapp = webtest.TestApp(app)
 
-  def _AddTestData(self, test_name, rows, sheriff_key,
-                   improvement_direction=anomaly.UNKNOWN):
-    """Adds a sample Test and associated data and returns the Test."""
-    testing_common.AddTests(
-        ['ChromiumGPU'],
-        ['linux-release'], {
-            'scrolling_benchmark': {
-                test_name: {},
-            },
-        })
-    test = utils.TestKey(
-        'ChromiumGPU/linux-release/scrolling_benchmark/' + test_name).get()
-    test.improvement_direction = improvement_direction
-    test_container_key = utils.GetTestContainerKey(test.key)
-
-    sheriff_key = sheriff_key.get()
-    if sheriff_key.patterns:
-      sheriff_key.patterns.append(test.test_path)
-    else:
-      sheriff_key.patterns = [test.test_path]
-    sheriff_key.put()
-
-    for i, val in enumerate(rows):
-      graph_data.Row(id=(i+1), value=val, parent=test_container_key).put()
-
-    # Add test config.
-    overridden_config = {
-        'min_relative_change': 0.1,
-        'min_absolute_change': 10.0
-    }
-    anomaly_config.AnomalyConfig(
-        id='config_' + test_name, config=overridden_config,
-        patterns=[test.test_path]).put()
-    test.put()
-    return test
-
-  def _AddAnomalyForTest(
-      self, median_before_anomaly, std_dev_before_anomaly, sheriff_key,
-      bug_id, test_key):
-    """Adds an Anomaly to the given Test with the given properties.
+  def _AddTestData(self, series, sheriff_key,
+                   improvement_direction=anomaly.UP):
+    """Adds one sample Test and associated data.
 
     Args:
-      median_before_anomaly: Median value of segment before alert.
-      std_dev_before_anomaly: Std. dev. for segment before alert.
-      sheriff_key: Sheriff associated with the Anomaly.
-      bug_id: Bug ID associated with the Anomaly.
-      test_key: Test to associate the Anomaly with.
+      series: Either a list of values, or a list of (x, y) pairs.
+      sheriff_key: A Sheriff entity key.
+      improvement_direction: One of {anomaly.UP, anomaly.DOWN, anomaly.UNKNOWN}.
 
     Returns:
-      The ndb.Key for the Anomaly that was put.
+      The Test entity key of the Test that was added.
     """
+    testing_common.AddTests(['M'], ['b'], {'benchmark': {'t': {}}})
+    test_path = 'M/b/benchmark/t'
+    test = utils.TestKey(test_path).get()
+    test.improvement_direction = improvement_direction
+    test.sheriff = sheriff_key
+    sheriff_entity = sheriff_key.get()
+    sheriff_entity.patterns.append(test.test_path)
+    sheriff_entity.put()
+    if series and isinstance(series[0], (int, float)):
+      series = enumerate(series, start=1)
+    testing_common.AddRows(test_path, {x: {'value': y} for x, y in series})
+    return test.put()
+
+  def _AddAnomalyForTest(self, sheriff_key, test_key, revision,
+                         median_before, median_after, bug_id=None):
+    """Adds a sample Anomaly and returns the key."""
     if bug_id > 0:
-      bug = ndb.Key('Bug', int(bug_id)).get()
+      bug = ndb.Key('Bug', bug_id).get()
       if not bug:
         bug_data.Bug(id=bug_id).put()
     return anomaly.Anomaly(
-        start_revision=4,
-        end_revision=4,
+        start_revision=revision,
+        end_revision=revision,
         test=test_key,
-        median_before_anomaly=median_before_anomaly,
-        segment_size_after=3,
-        window_end_revision=6,
-        std_dev_before_anomaly=std_dev_before_anomaly,
+        median_before_anomaly=median_before,
+        median_after_anomaly=median_after,
         bug_id=bug_id,
         sheriff=sheriff_key).put()
 
-  def testAnomalyRecovery_AbsoluteCheck(self):
+  def testPost_Recovered_MarkedAsRecovered(self):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    abs_not_recovered = [990, 1000, 1010, 1010, 1010, 1010, 1000, 1010, 1020]
-    t1 = self._AddTestData('t1', abs_not_recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, None, t1.key)
-    abs_recovered = [990, 1000, 1010, 1010, 1010, 1010, 995, 1005, 1015]
-    t2 = self._AddTestData('t2', abs_recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, None, t2.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55)
     self.testapp.post('/auto_triage')
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(2, len(anomalies))
-    self.assertEqual(t1.key, anomalies[0].test)
-    self.assertEqual(t2.key, anomalies[1].test)
-    self.assertFalse(anomalies[0].recovered)
-    self.assertTrue(anomalies[1].recovered)
+    self.assertTrue(anomaly_key.get().recovered)
 
-  def testAnomalyRecovery_RelativeCheck(self):
+  def testPost_NotRecovered_NotMarkedAsRecovered(self):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    rel_not_recovered = [49, 50, 51, 55, 55, 55, 44, 55, 56]
-    t1 = self._AddTestData('t1', rel_not_recovered, sheriff_key)
-    self._AddAnomalyForTest(50, 10, sheriff_key, None, t1.key)
-    rel_recovered = [40, 50, 60, 60, 60, 60, 44, 54, 64]
-    t2 = self._AddTestData('t2', rel_recovered, sheriff_key)
-    self._AddAnomalyForTest(50, 10, sheriff_key, None, t2.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55)
     self.testapp.post('/auto_triage')
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(2, len(anomalies))
-    self.assertEqual(t1.key, anomalies[0].test)
-    self.assertEqual(t2.key, anomalies[1].test)
-    self.assertFalse(anomalies[0].recovered)
-    self.assertTrue(anomalies[1].recovered)
+    self.assertFalse(anomaly_key.get().recovered)
 
-  def testAnomalyRecovery_StdDevCheck(self):
+  def testPost_ChangeTooLarge_NotMarkedAsRecovered(self):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    std_not_recovered = [990, 1000, 1010, 1010, 1010, 1010, 1010, 1020, 1030]
-    test = self._AddTestData('t1', std_not_recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, None, test.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        30, 29, 32, 34, 30, 31, 31, 32, 33, 30,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55)
     self.testapp.post('/auto_triage')
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(1, len(anomalies))
-    self.assertFalse(anomalies[0].recovered)
+    self.assertFalse(anomaly_key.get().recovered)
 
-  def testAnomalyRecovery_ImprovementCheck(self):
+  def testPost_ChangeWrongDirection_NotMarkedAsRecovered(self):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    improvements = [990, 1000, 1010, 1010, 1010, 1010, 890, 900, 910]
-    test = self._AddTestData('t1', improvements, sheriff_key, anomaly.DOWN)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, None, test.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        59, 60, 61, 60, 61, 59, 61, 60, 60, 59,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55)
     self.testapp.post('/auto_triage')
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(1, len(anomalies))
-    self.assertTrue(anomalies[0].recovered)
+    self.assertFalse(anomaly_key.get().recovered)
 
-  def testAnomalyRecover_IgnoredCheck(self):
+  def testPost_AlertInvalid_NotMarkedAsRecovered(self):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    recovered = [990, 1000, 1010, 1010, 1010, 1010, 990, 1000, 1010]
-    test = self._AddTestData('t1', recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, -1, test.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55,
+        bug_id=-1)
     self.testapp.post('/auto_triage')
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(1, len(anomalies))
-    self.assertFalse(anomalies[0].recovered)
+    self.assertFalse(anomaly_key.get().recovered)
 
   @mock.patch.object(
-      auto_triage.rietveld_service, 'Credentials', mock.MagicMock())
+      utils, 'ServiceAccountCredentials', mock.MagicMock())
   @mock.patch.object(
       auto_triage.issue_tracker_service.IssueTrackerService, 'AddBugComment')
   def testPost_AllAnomaliesRecovered_AddsComment(self, add_bug_comment_mock):
     sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
-    recovered = [990, 1000, 1010, 1010, 1010, 1010, 990, 1000, 1010]
-    t1 = self._AddTestData('t1', recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, 1234, t1.key)
-    abs_recovered = [990, 1000, 1010, 1010, 1010, 1010, 995, 1005, 1015]
-    t2 = self._AddTestData('t2', abs_recovered, sheriff_key)
-    self._AddAnomalyForTest(1000, 10, sheriff_key, 1234, t2.key)
+    values = [
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+        55, 54, 55, 56, 54, 56, 57, 56, 55, 56,
+        49, 50, 51, 50, 51, 49, 51, 50, 50, 49,
+    ]
+    test_key = self._AddTestData(values, sheriff_key)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=11, median_before=50, median_after=55,
+        bug_id=1234)
     self.testapp.post('/auto_triage')
     self.ExecuteTaskQueueTasks('/auto_triage', auto_triage._TASK_QUEUE_NAME)
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(2, len(anomalies))
-    self.assertTrue(anomalies[0].recovered)
-    self.assertTrue(anomalies[1].recovered)
+    self.assertTrue(anomaly_key.get().recovered)
     add_bug_comment_mock.assert_called_once_with(mock.ANY, mock.ANY)
 
   @mock.patch.object(auto_triage.TriageBugs, '_CommentOnRecoveredBug')
-  def testPost_BugHasNoAlerts_NotMarkRecovered(self, close_recovered_bug_mock):
+  def testPost_BugHasNoAlerts_NoCommentPosted(self, close_recovered_bug_mock):
     bug_id = 1234
     bug_data.Bug(id=bug_id).put()
     self.testapp.post('/auto_triage')
@@ -187,6 +164,58 @@
     self.assertEqual(bug_data.BUG_STATUS_CLOSED, bug.status)
     self.assertFalse(close_recovered_bug_mock.called)
 
+  def testPost_RealWorldExample_NoClearRecovery(self):
+    # This test is based on a real-world case on a relatively noisy graph where
+    # after the step up at r362262 the results meandered down again with no
+    # clear step. Alert key agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgIDAnYnIqAoM.
+    sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
+    series = [
+        (362080, 1562.6), (362086, 1641.4), (362095, 1572.4), (362102, 1552.9),
+        (362104, 1579.9), (362114, 1564.6), (362118, 1570.5), (362122, 1555.7),
+        (362129, 1550.1), (362134, 1547.5), (362149, 1536.2), (362186, 1533.1),
+        (362224, 1542.0), (362262, 1658.9), (362276, 1675.6), (362305, 1630.8),
+        (362321, 1664.7), (362345, 1659.6), (362361, 1669.4), (362366, 1681.6),
+        (362367, 1601.3), (362369, 1664.7), (362401, 1648.4), (362402, 1595.2),
+        (362417, 1676.9), (362445, 1532.0), (362470, 1631.9), (362490, 1585.1),
+        (362500, 1674.3), (362543, 1639.7), (362565, 1670.7), (362611, 1594.7),
+        (362635, 1677.5), (362638, 1687.5), (362650, 1702.2), (362663, 1614.9),
+        (362676, 1650.1), (362686, 1724.0), (362687, 1594.8), (362700, 1633.7),
+        (362721, 1684.1), (362744, 1678.7), (362776, 1642.4), (362899, 1591.1),
+        (362915, 1639.1), (362925, 1633.7), (362935, 1539.6), (362937, 1572.0),
+        (362950, 1567.4), (362963, 1608.3)
+    ]
+    test_key = self._AddTestData(
+        series, sheriff_key, improvement_direction=anomaly.DOWN)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=362262,
+        median_before=1579.2, median_after=1680.7)
+    self.testapp.post('/auto_triage')
+    self.assertFalse(anomaly_key.get().recovered)
+
+  def testPost_RealisticExample_Recovered(self):
+    # This test is based on a real-world case where there was a step up at
+    # r362399, and shortly thereafter a step down at r362680 of roughly similar
+    # magnitude. Alert key agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgIDAnbimogoM
+    sheriff_key = sheriff.Sheriff(email='a@google.com', id='sheriff_key').put()
+    series = [
+        (361776, 78260720), (361807, 78760907), (361837, 77723737),
+        (361864, 77984606), (361869, 78660955), (361879, 78276998),
+        (361903, 77420262), (362399, 79629598), (362416, 79631028),
+        (362428, 79074016), (362445, 79348860), (362483, 79724728),
+        (362532, 79673772), (362623, 79120915), (362641, 79384809),
+        (362666, 79885480), (362680, 78308585), (362701, 78063846),
+        (362730, 78244836), (362759, 77375408), (362799, 77836310),
+        (362908, 78069878), (362936, 77191699), (362958, 77951200),
+        (362975, 77906097)
+    ]
+    test_key = self._AddTestData(
+        series, sheriff_key, improvement_direction=anomaly.DOWN)
+    anomaly_key = self._AddAnomalyForTest(
+        sheriff_key, test_key, revision=362399,
+        median_before=78275468.8, median_after=79630313.6)
+    self.testapp.post('/auto_triage')
+    self.assertTrue(anomaly_key.get().recovered)
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/catapult/dashboard/dashboard/bad_bisect.py b/catapult/dashboard/dashboard/bad_bisect.py
new file mode 100644
index 0000000..19c0ed4
--- /dev/null
+++ b/catapult/dashboard/dashboard/bad_bisect.py
@@ -0,0 +1,69 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""URL endpoint to record bad bisect."""
+
+from google.appengine.api import users
+
+from dashboard import oauth2_decorator
+from dashboard import quick_logger
+from dashboard import request_handler
+from dashboard import utils
+from dashboard import xsrf
+from dashboard.models import try_job
+
+
+class BadBisectHandler(request_handler.RequestHandler):
+
+  @oauth2_decorator.DECORATOR.oauth_required
+  def get(self):
+    """Renders bad_bisect.html."""
+    if not utils.IsValidSheriffUser():
+      self._RenderError('No permission.')
+      return
+    if not self.request.get('try_job_id'):
+      self._RenderError('Missing try_job_id.')
+      return
+
+    self.RenderHtml('bad_bisect.html',
+                    {'try_job_id': self.request.get('try_job_id')})
+
+  @xsrf.TokenRequired
+  def post(self):
+    """Handles post requests from bad_bisect.html."""
+    if not utils.IsValidSheriffUser():
+      self._RenderError('No permission.')
+      return
+    if not self.request.get('try_job_id'):
+      self._RenderError('Missing try_job_id.')
+      return
+
+    try_job_id = int(self.request.get('try_job_id'))
+    job = try_job.TryJob.get_by_id(try_job_id)
+    if not job:
+      self._RenderError('TryJob doesn\'t exist.')
+      return
+
+    user = users.get_current_user()
+    email = user.email()
+    if not job.bad_result_emails:
+      job.bad_result_emails = set()
+    if email not in job.bad_result_emails:
+      job.bad_result_emails.add(email)
+      job.put()
+      _LogFeedback(try_job_id, email)
+
+    self.RenderHtml('result.html', {
+        'headline': 'Confirmed bad bisect.  Thank you for reporting.'})
+
+  def _RenderError(self, error):
+    self.RenderHtml('result.html', {'errors': [error]})
+
+
+def _LogFeedback(try_job_id, email):
+  formatter = quick_logger.Formatter()
+  logger = quick_logger.QuickLogger('bad_bisect', 'report', formatter)
+  message = '%s marked try job %d.' % (email, try_job_id)
+  logger.Log(message)
+  logger.Save()
diff --git a/catapult/dashboard/dashboard/bad_bisect_test.py b/catapult/dashboard/dashboard/bad_bisect_test.py
new file mode 100644
index 0000000..b59b047
--- /dev/null
+++ b/catapult/dashboard/dashboard/bad_bisect_test.py
@@ -0,0 +1,64 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import webapp2
+import webtest
+
+# pylint: disable=unused-import
+from dashboard import mock_oauth2_decorator
+# pylint: enable=unused-import
+
+from google.appengine.api import users
+
+from dashboard import bad_bisect
+from dashboard import quick_logger
+from dashboard import testing_common
+from dashboard import xsrf
+from dashboard.models import try_job
+
+
+class BadBisectHandlerTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(BadBisectHandlerTest, self).setUp()
+    app = webapp2.WSGIApplication([
+        ('/bad_bisect',
+         bad_bisect.BadBisectHandler)])
+    self.testapp = webtest.TestApp(app)
+    testing_common.SetSheriffDomains(['chromium.org'])
+    testing_common.SetIsInternalUser('test@chromium.com', True)
+    self.SetCurrentUser('test@chromium.org')
+    try_job.TryJob(id=1234).put()
+
+  def testGet_WithNoTryJobId_ShowsError(self):
+    response = self.testapp.get('/bad_bisect')
+    self.assertIn('<h1 class="error">', response.body)
+
+  def testGet_NotLoggedIn_ShowsError(self):
+    self.UnsetCurrentUser()
+    self.testapp.get('/bad_bisect?', {'try_job_id': '1234'})
+    response = self.testapp.get('/bad_bisect')
+    self.assertIn('<h1 class="error">', response.body)
+
+  def testGet_RenderForm(self):
+    response = self.testapp.get('/bad_bisect?', {'try_job_id': '1234'})
+    self.assertEqual(1, len(response.html('form')))
+    self.assertIn('1234', response.body)
+
+  def testPost_FeedbackRecorded(self):
+    self.testapp.post('/bad_bisect?', {
+        'try_job_id': '1234',
+        'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
+    })
+    jobs = try_job.TryJob.query().fetch()
+    self.assertEqual(1, len(jobs))
+    self.assertEqual({'test@chromium.org'}, jobs[0].bad_result_emails)
+
+  def testPost_LogAdded(self):
+    self.testapp.post('/bad_bisect?', {
+        'try_job_id': '1234',
+        'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
+    })
+    logs = quick_logger.Get('bad_bisect', 'report')
+    self.assertEqual(1, len(logs))
diff --git a/catapult/dashboard/dashboard/bench_find_anomalies.py b/catapult/dashboard/dashboard/bench_find_anomalies.py
index fdac29d..3a23cfa 100644
--- a/catapult/dashboard/dashboard/bench_find_anomalies.py
+++ b/catapult/dashboard/dashboard/bench_find_anomalies.py
@@ -64,26 +64,18 @@
 
 _EXPERIMENTAL_FUNCTIONS = {
     'find_change_points_default': find_change_points_exp.RunFindChangePoints,
-    'find_change_points_absolute_change_threshold':
-        find_change_points_exp.FindChangePointsWithAbsoluteChangeThreshold,
-    'segment_size_4': lambda(test, series):
+    'steppiness_0_3': lambda test, series:
                       find_change_points_exp.RunFindChangePoints(
-                          test, series, min_segment_size=4),
-    'segment_size_8': lambda(test, series):
+                          test, series, min_steppiness=0.3),
+    'steppiness_0_4': lambda test, series:
                       find_change_points_exp.RunFindChangePoints(
-                          test, series, min_segment_size=8),
-    'steppiness_0_1': lambda(test, series):
-                      find_change_points_exp.RunFindChangePoints(
-                          test, series, min_steppiness=0.1),
-    'steppiness_0_5': lambda(test, series):
+                          test, series, min_steppiness=0.4),
+    'steppiness_0_5': lambda test, series:
                       find_change_points_exp.RunFindChangePoints(
                           test, series, min_steppiness=0.5),
-    'std_dev_2': lambda(test, series):
-                 find_change_points_exp.RunFindChangePoints(
-                     test, series, multiple_of_std_dev=2.0),
-    'std_dev_3': lambda(test, series):
-                 find_change_points_exp.RunFindChangePoints(
-                     test, series, multiple_of_std_dev=3.0),
+    'steppiness_0_6': lambda test, series:
+                      find_change_points_exp.RunFindChangePoints(
+                          test, series, min_steppiness=0.6),
 }
 
 
@@ -138,7 +130,7 @@
 
 class SimulateAlertProcessingPipeline(pipeline.Pipeline):
 
-  def run(self, bench_name, test_bench_id):
+  def run(self, bench_name, test_bench_id):  # pylint: disable=invalid-name
     """Runs one experimental alerting function for one TestBench entity.
 
     Args:
@@ -173,7 +165,8 @@
 
 class GenerateComparisonReportPipeline(pipeline.Pipeline):
 
-  def run(self, bench_name, description, simulation_results):
+  def run(  # pylint: disable=invalid-name
+      self, bench_name, description, simulation_results):
     """"Generates a comparison report between experimental and base results.
 
     Args:
@@ -319,11 +312,11 @@
 
 class RunExperimentalChunkPipeline(pipeline.Pipeline):
 
-  def run(self, bench_name, test_bench_ids):
+  def run(self, bench_name, test_bench_ids):  # pylint: disable=invalid-name
     """Runs the experimental find_change_points on each TestBench entity.
 
     This runs SimulateAlertProcessing in parallel and returns a list of
-    the combinded results.
+    the combined results.
 
     Args:
       bench_name: A string bench name.
@@ -342,7 +335,7 @@
 
 class RunExperimentalPipeline(pipeline.Pipeline):
 
-  def run(self, bench_name, description):
+  def run(self, bench_name, description):  # pylint: disable=invalid-name
     """The root pipeline that start simulation tasks and generating report.
 
     This spawns tasks to spawn more tasks that run simulation and executes the
@@ -395,7 +388,7 @@
 
   Raises:
     ValueError: The input was not valid.
-    Exception: Not enough data valable.
+    Exception: Not enough data available.
   """
   if bench_name not in _EXPERIMENTAL_FUNCTIONS:
     raise ValueError('%s is not a valid find anomalies bench function.' %
@@ -481,7 +474,8 @@
   # Start rev for getting Anomalies should be at min_segment_size.
   test = test_bench.test.get()
   config_dict = anomaly_config.GetAnomalyConfigDict(test)
-  min_segment_size = config_dict.get('min_segment_size')
+  min_segment_size = config_dict.get(
+      'min_segment_size', find_change_points.MIN_SEGMENT_SIZE)
   start_index = min(min_segment_size, len(test_bench.data_series)) - 1
   start_rev = test_bench.data_series[start_index][0]
 
diff --git a/catapult/dashboard/dashboard/bench_find_anomalies_test.py b/catapult/dashboard/dashboard/bench_find_anomalies_test.py
index 4bd6b23..94566b3 100644
--- a/catapult/dashboard/dashboard/bench_find_anomalies_test.py
+++ b/catapult/dashboard/dashboard/bench_find_anomalies_test.py
@@ -128,8 +128,8 @@
     self.assertEqual({bench_key: True}, layered_cache.Get(
         bench_find_anomalies._FIND_ANOMALIES_BENCH_CACHE_KEY))
 
-    taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
-    test_support.execute_until_empty(taskq,
+    task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
+    test_support.execute_until_empty(task_queue,
                                      bench_find_anomalies._TASK_QUEUE_NAME)
 
     expected_result_dict = {
@@ -161,8 +161,8 @@
     bench_find_anomalies.BenchFindChangePoints(bench_name, bench_description)
 
     # A task should be added.
-    taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
-    tasks = taskq.GetTasks(bench_find_anomalies._TASK_QUEUE_NAME)
+    task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
+    tasks = task_queue.GetTasks(bench_find_anomalies._TASK_QUEUE_NAME)
     self.assertEqual(1, len(tasks))
 
     with self.assertRaises(ValueError):
diff --git a/catapult/dashboard/dashboard/bisect_fyi.py b/catapult/dashboard/dashboard/bisect_fyi.py
index 8167590..db696eb 100644
--- a/catapult/dashboard/dashboard/bisect_fyi.py
+++ b/catapult/dashboard/dashboard/bisect_fyi.py
@@ -4,6 +4,9 @@
 
 """URL endpoint for a cron job to run bisects integration tests."""
 
+import datetime
+import time
+
 from google.appengine.api import mail
 
 from dashboard import auto_bisect
@@ -32,10 +35,10 @@
   def post(self):
     """Runs auto bisects."""
     datastore_hooks.SetPrivilegedRequest()
-    _RunBisectIngrationTests()
+    _RunBisectIntegrationTests()
 
 
-def _RunBisectIngrationTests():
+def _RunBisectIntegrationTests():
   """Runs bisect jobs with pre determined configs."""
   errors_list = {}
   bisect_fyi_configs = stored_object.Get(_BISECT_FYI_CONFIGS_KEY)
@@ -45,7 +48,7 @@
       if 'error' in results:
         errors_list[test_name] = {
             'error': results['error'],
-            'info':config.get('bisect_config')}
+            'info': config.get('bisect_config')}
     else:
       errors_list[test_name] = {'error': 'Missing bisect config.'}
   if errors_list:
@@ -63,19 +66,18 @@
     If successful, a dict containing "issue_id" and "issue_url" for the
     bisect job. Otherwise, a dict containing "error", with some description
     of the reason why a job wasn't started.
-
   """
   try:
     bisect_job = _MakeBisectFYITryJob(test_name, bisect_config)
   except auto_bisect.NotBisectableError as e:
     return {'error': e.message}
-  bisect_job_key = bisect_job.put()
   try:
     bisect_result = start_try_job.PerformBisect(bisect_job)
   except request_handler.InvalidInputError as e:
     bisect_result = {'error': e.message}
   if 'error' in bisect_result:
-    bisect_job_key.delete()
+    if bisect_job.key:
+      bisect_job.key.delete()
   return bisect_result
 
 
@@ -103,7 +105,6 @@
       config=config_python_string,
       bug_id=bisect_config.get('bug_id', -1),
       master_name='ChromiumPerf',
-      internal_only=True,
       job_type='bisect-fyi',
       use_buildbucket=use_recipe,
       job_name=test_name)
@@ -111,30 +112,37 @@
   return bisect_job
 
 
-def VerifyBisectFYIResults(job, bisect_results):
-  """Verifies the bisect results against expected results in test config."""
-  bisect_fyi_configs = stored_object.Get(_BISECT_FYI_CONFIGS_KEY)
-  for test_name, config in bisect_fyi_configs.iteritems():
-    if job.job_name == test_name:
-      errors = _VerifyExpectedResults(
-          bisect_results.get('results'), config.get('expected_results'))
-      if errors:
-        bisect_results['status'] = 'Failure'
-        bisect_results['errors'] = errors
+def VerifyBisectFYIResults(job):
+  """Verifies the bisect results against expected results in test config.
 
-  return bisect_results
+  Args:
+    job: TryJob entity.
+
+  Returns:
+    A message with the missing properties, otherwise returns an empty string.
+  """
+  expected_results = _GetBisectConfig(job).get('expected_results')
+  try:
+    utils.Validate(expected_results, job.results_data)
+  except ValueError as e:
+    return 'Bisect result is not as expected: %s.' % e
+  return ''
 
 
-def _VerifyExpectedResults(bisect_results, expected_results):
-  if not expected_results:
-    return 'No expected results found in test config.'
-  error_list = []
-  for key, value in expected_results.iteritems():
-    if value not in bisect_results:
-      error_list.append('Expected results %s = "%s" not found in bisect '
-                        'results.\n' % (key, value))
+def IsBugUpdated(job, issue_tracker):
+  """Verifies whether bug is updated with the bisect results."""
+  comment_info = issue_tracker.GetLastBugCommentsAndTimestamp(job.bug_id)
+  if not comment_info:
+    return False
 
-  return ''.join(error_list)
+  last_comment_timestamp = datetime.datetime.strptime(
+      comment_info['timestamp'], '%Y-%m-%dT%H:%M:%S')
+  bug_update_timestamp = time.mktime(last_comment_timestamp.timetuple())
+  try_job_timestamp = time.mktime(job.last_ran_timestamp.timetuple())
+
+  if bug_update_timestamp <= try_job_timestamp:
+    return False
+  return True
 
 
 def _TextBody(errors_list):
@@ -143,7 +151,7 @@
   for test_name, data in errors_list.iteritems():
     test_alerts.append(
         _TEST_FAILURE_TEMPLATE % {
-            'test_name':test_name,
+            'test_name': test_name,
             'error': data.get('error'),
             'info': data.get('info', '')
         }
@@ -154,7 +162,15 @@
 def _SendEmailAlert(errors_list):
   """Sends email alert about bisect integration tests failures."""
   mail.send_mail(
-      sender='auto-bisect-team@google.com',
-      to='prasadv@google.com',
+      sender='gasper-alerts@google.com',
+      to='auto-bisect-team@google.com',
       subject='[Bisect FYI Alert]Failed to run bisect integration tests.',
       body=_TextBody(errors_list))
+
+
+def _GetBisectConfig(job):
+  bisect_fyi_configs = stored_object.Get(_BISECT_FYI_CONFIGS_KEY)
+  for test_name, config in bisect_fyi_configs.iteritems():
+    if job.job_name == test_name:
+      return config
+  return {}
diff --git a/catapult/dashboard/dashboard/bisect_fyi_test.py b/catapult/dashboard/dashboard/bisect_fyi_test.py
index 8fb3679..6187f18 100644
--- a/catapult/dashboard/dashboard/bisect_fyi_test.py
+++ b/catapult/dashboard/dashboard/bisect_fyi_test.py
@@ -2,8 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import datetime
-import sys
 import unittest
 
 import mock
@@ -11,57 +9,50 @@
 import webtest
 
 from dashboard import bisect_fyi
-from dashboard import request_handler
 from dashboard import start_try_job
 from dashboard import stored_object
 from dashboard import testing_common
-from dashboard import utils
-from dashboard.models import anomaly
-from dashboard.models import try_job
 
 TEST_FYI_CONFIGS = {
-  'positive_culprit': {
-      'bisect_config': {
-          'bad_revision': '357672',
-          'bug_id': 111,
-          'command': ('python src/tools/perf/run_benchmark -v '
-                      '--browser=release_x64 --output-format=chartjson '
-                      '--also-run-disabled-tests blink_perf.bindings'),
-          'good_revision': '357643',
-          'gs_bucket': 'chrome-perf',
-          'max_time_minutes': '20',
-          'metric': 'create-element/create-element',
-          'recipe_tester_name': 'win_x64_perf_bisect',
-          'repeat_count': '10',
-          'test_type': 'perf'
-      },
-      'expected_results': {
-          'status': 'Status: Positive',
-          'culprit': 'Commit  : 2a1781d64d',
-          'job_banner': '= BISECT JOB RESULTS =',
-      }
-  },
-  'early_abort': {
-      'bisect_config': {
-          'bad_revision': '257672',
-          'bug_id': 222,
-          'command': ('python src/tools/perf/run_benchmark -v '
-                      '--browser=release_x64 --output-format=chartjson '
-                      '--also-run-disabled-tests blink_perf.bindings'),
-          'good_revision': '257643',
-          'gs_bucket': 'chrome-perf',
-          'max_time_minutes': '20',
-          'metric': 'create-element/create-element',
-          'recipe_tester_name': 'win_x64_perf_bisect',
-          'repeat_count': '10',
-          'test_type': 'perf'
-      },
-      'expected_results': {
-          'status': '',
-          'culprit': '',
-          'job_banner': '= BISECTION ABORTED =',
-      }
-  },
+    'positive_culprit': {
+        'bisect_config': {
+            'bad_revision': '357672',
+            'bug_id': 111,
+            'command': ('python src/tools/perf/run_benchmark -v '
+                        '--browser=release_x64 --output-format=chartjson '
+                        '--also-run-disabled-tests blink_perf.bindings'),
+            'good_revision': '357643',
+            'gs_bucket': 'chrome-perf',
+            'max_time_minutes': '20',
+            'metric': 'create-element/create-element',
+            'recipe_tester_name': 'win_x64_perf_bisect',
+            'repeat_count': '10',
+            'test_type': 'perf'
+        },
+        'expected_results': {
+            'status': ['completed'],
+            'culprit_data': {'cl': ['2a1781d64d']},
+        }
+    },
+    'early_abort': {
+        'bisect_config': {
+            'bad_revision': '257672',
+            'bug_id': 222,
+            'command': ('python src/tools/perf/run_benchmark -v '
+                        '--browser=release_x64 --output-format=chartjson '
+                        '--also-run-disabled-tests blink_perf.bindings'),
+            'good_revision': '257643',
+            'gs_bucket': 'chrome-perf',
+            'max_time_minutes': '20',
+            'metric': 'create-element/create-element',
+            'recipe_tester_name': 'win_x64_perf_bisect',
+            'repeat_count': '10',
+            'test_type': 'perf'
+        },
+        'expected_results': {
+            'status': ['aborted'],
+        }
+    },
 }
 
 
@@ -82,15 +73,15 @@
     self.testapp = webtest.TestApp(app)
 
   @mock.patch.object(bisect_fyi.start_try_job, '_PerformBuildbucketBisect')
-  def testPost_FailedJobs_BisectFYI(self,  mock_perform_bisect):
-    mock_perform_bisect.return_value = {'error':'PerformBisect Failed'}
+  def testPost_FailedJobs_BisectFYI(self, mock_perform_bisect):
+    mock_perform_bisect.return_value = {'error': 'PerformBisect Failed'}
     self.testapp.post('/bisect_fyi')
     messages = self.mail_stub.get_sent_messages()
     self.assertEqual(1, len(messages))
 
   @mock.patch.object(bisect_fyi.start_try_job, '_PerformBuildbucketBisect')
-  def testPost_SuccessJobs_BisectFYI(self,  mock_perform_bisect):
-    mock_perform_bisect.return_value = {'issue_id':'http://fake'}
+  def testPost_SuccessJobs_BisectFYI(self, mock_perform_bisect):
+    mock_perform_bisect.return_value = {'issue_id': 'http://fake'}
     self.testapp.post('/bisect_fyi')
     messages = self.mail_stub.get_sent_messages()
     self.assertEqual(0, len(messages))
diff --git a/catapult/dashboard/dashboard/bisect_report.py b/catapult/dashboard/dashboard/bisect_report.py
new file mode 100644
index 0000000..5a48c77
--- /dev/null
+++ b/catapult/dashboard/dashboard/bisect_report.py
@@ -0,0 +1,155 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates reports base on bisect result data."""
+
+import copy
+
+_CONFIDENCE_THRESHOLD = 99.5
+
+_BISECT_REPORT_TEMPLATE = """
+===== BISECT JOB RESULTS =====
+Status: %(status)s
+
+%(result)s
+
+Bisect job ran on: %(bisect_bot)s
+Bug ID: %(bug_id)s
+
+Test Command: %(command)s
+Test Metric: %(metric)s
+Relative Change: %(change)s
+Score: %(score)s
+
+Buildbot stdio: %(buildbot_log_url)s
+Job details: %(issue_url)s
+
+"""
+
+_RESULTS_REVISION_INFO = """
+===== SUSPECTED CL(s) =====
+Subject : %(subject)s
+Author  : %(author)s
+Commit description:
+  %(commit_info)s
+Commit  : %(cl)s
+Date    : %(cl_date)s
+
+"""
+
+_ABORTED_REASON_TEMPLATE = """
+=== Bisection aborted ===
+The bisect was aborted because %s
+Please contact the the team (see below) if you believe this is in error.
+"""
+
+_WARNINGS_TEMPLATE = """
+=== Warnings ===
+The following warnings were raised by the bisect job:
+
+ * %s
+"""
+
+_REVISION_TABLE_TEMPLATE = """
+===== TESTED REVISIONS =====
+%(table)s"""
+
+_RESULTS_THANK_YOU = """
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \\ | file a bug with component Tests>AutoBisect.  Thank you!"""
+
+_REPORT_BAD_BISECT_TEMPLATE = """
+Not what you expected? We'll investigate and get back to you!
+  https://chromeperf.appspot.com/bad_bisect?try_job_id=%s
+"""
+
+
+def GetReport(try_job_entity):
+  """Generates a report for bisect results.
+
+  This was ported from recipe_modules/auto_bisect/bisect_results.py.
+
+  Args:
+    try_job_entity: A TryJob entity.
+
+  Returns:
+    Bisect report string.
+  """
+  results_data = copy.deepcopy(try_job_entity.results_data)
+  if not results_data:
+    return ''
+  result = ''
+  if results_data.get('aborted_reason'):
+    result += _ABORTED_REASON_TEMPLATE % results_data['aborted_reason']
+
+  if results_data.get('warnings'):
+    warnings = '\n'.join(results_data['warnings'])
+    result += _WARNINGS_TEMPLATE % warnings
+
+  if results_data.get('culprit_data'):
+    result += _RESULTS_REVISION_INFO % results_data['culprit_data']
+
+  if results_data.get('revision_data'):
+    result += _RevisionTable(results_data)
+
+  results_data['result'] = result
+  report = _BISECT_REPORT_TEMPLATE % results_data
+  if try_job_entity.bug_id > 0:
+    report += _REPORT_BAD_BISECT_TEMPLATE % try_job_entity.bug_id
+  report += _RESULTS_THANK_YOU
+  return report
+
+
+def _MakeLegacyRevisionString(r):
+  result = 'chromium@' + str(r.get('commit_pos', 'unknown'))
+  if r.get('depot_name', 'chromium') != 'chromium':
+    result += ',%s@%s' % (r['depot_name'], r.get('deps_revision', 'unknown'))
+  return result
+
+
+def _RevisionTable(results_data):
+  is_return_code = results_data.get('test_type') == 'return_code'
+  culprit_commit_hash = None
+  if 'culprit_data' in results_data and results_data['culprit_data']:
+    culprit_commit_hash = results_data['culprit_data']['cl']
+
+  def RevisionRow(r):
+    result = [
+        r.get('revision_string', _MakeLegacyRevisionString(r)),
+        _FormatNumber(r['mean_value']),
+        _FormatNumber(r['std_dev']),
+        len(r['values']),
+        r['result'],
+        '<-' if r['commit_hash'] == culprit_commit_hash  else '',
+    ]
+    return map(str, result)
+  revision_rows = [RevisionRow(r) for r in results_data['revision_data']]
+
+  headers_row = [[
+      'Revision',
+      'Mean Value' if not is_return_code else 'Exit Code',
+      'Std. Dev.',
+      'Num Values',
+      'Good?',
+      '',
+  ]]
+  all_rows = headers_row + revision_rows
+  return _REVISION_TABLE_TEMPLATE % {'table': _PrettyTable(all_rows)}
+
+
+def _FormatNumber(x):
+  if x is None:
+    return 'N/A'
+  if isinstance(x, int):
+    return str(x)
+  return str(round(x, 6))
+
+
+def _PrettyTable(data):
+  results = []
+  for row in data:
+    results.append(
+        (('%-24s' + '%-12s' * (len(row) - 1)) % tuple(row)).rstrip())
+  return '\n'.join(results)
diff --git a/catapult/dashboard/dashboard/bisect_report_test.py b/catapult/dashboard/dashboard/bisect_report_test.py
new file mode 100644
index 0000000..a50cadc
--- /dev/null
+++ b/catapult/dashboard/dashboard/bisect_report_test.py
@@ -0,0 +1,302 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import copy
+import unittest
+
+from dashboard import bisect_report
+from dashboard import testing_common
+from dashboard.models import try_job
+
+_SAMPLE_BISECT_RESULTS_JSON = {
+    'try_job_id': None,
+    'bug_id': None,
+    'status': None,
+    'bisect_bot': 'linux',
+    'buildbot_log_url': 'http://build.chromium.org/513',
+    'command': ('tools/perf/run_benchmark -v '
+                '--browser=release page_cycler'),
+    'metric': 'page_load_time',
+    'test_type': 'perf',
+    'issue_url': 'https://test-rietveld.appspot.com/200039',
+    'change': 10,
+    'score': 99.9,
+    'good_revision': '306475',
+    'bad_revision': '306477',
+    'warnings': None,
+    'aborted_reason': None,
+    'culprit_data': {
+        'subject': 'subject',
+        'author': 'author',
+        'email': 'author@email.com',
+        'cl_date': '1/2/2015',
+        'commit_info': 'commit info',
+        'revisions_links': ['http://src.chromium.org/viewvc/chrome?view='
+                            'revision&revision=306476'],
+        'cl': '306476abcdabcdfabcdfabcdfabcdfabcdfabcdf'
+    },
+    'revision_data': [
+        {
+            'depot_name': 'chromium',
+            'commit_hash': '306475abcdabcdfabcdfabcdfabcdfabcdfabcdf',
+            'revision_string': 'chromium@306475',
+            'mean_value': 70,
+            'std_dev': 0,
+            'values': [70, 70, 70],
+            'result': 'good'
+        },
+        {
+            'revision_string': 'chromium@306476',
+            'commit_hash': '306476abcdabcdfabcdfabcdfabcdfabcdfabcdf',
+            'depot_name': 'chromium',
+            'mean_value': 80,
+            'std_dev': 0,
+            'values': [80, 80, 80],
+            'result': 'bad'
+        },
+        {
+            'revision_string': 'chromium@306477',
+            'depot_name': 'chromium',
+            'commit_hash': '306477abcdabcdfabcdfabcdfabcdfabcdfabcdf',
+            'mean_value': 80,
+            'std_dev': 0,
+            'values': [80, 80, 80],
+            'result': 'bad'
+        }
+    ]
+}
+
+
+class BisectReportTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(BisectReportTest, self).setUp()
+
+  def _AddTryJob(self, results_data, **kwargs):
+    job = try_job.TryJob(results_data=results_data, **kwargs)
+    job.put()
+    return job
+
+  def _BisectResults(self, try_job_id, bug_id, status, **kwargs):
+    results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
+    results['try_job_id'] = try_job_id
+    results['bug_id'] = bug_id
+    results['status'] = status
+    results.update(kwargs)
+    return results
+
+  def testGetReport_CompletedWithCulprit(self):
+    results_data = self._BisectResults(6789, 12345, 'completed')
+    job = self._AddTryJob(results_data)
+
+    log_with_culprit = r"""
+===== BISECT JOB RESULTS =====
+Status: completed
+
+
+===== SUSPECTED CL(s) =====
+Subject : subject
+Author  : author
+Commit description:
+  commit info
+Commit  : 306476abcdabcdfabcdfabcdfabcdfabcdfabcdf
+Date    : 1/2/2015
+
+
+===== TESTED REVISIONS =====
+Revision                Mean Value  Std. Dev.   Num Values  Good?
+chromium@306475         70          0           3           good
+chromium@306476         80          0           3           bad         <-
+chromium@306477         80          0           3           bad
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 99.9
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+    self.assertEqual(log_with_culprit, bisect_report.GetReport(job))
+
+  def testGetReport_CompletedWithoutCulprit(self):
+    results_data = self._BisectResults(6789, 12345, 'completed',
+                                       culprit_data=None, score=0)
+    job = self._AddTryJob(results_data)
+
+    log_without_culprit = r"""
+===== BISECT JOB RESULTS =====
+Status: completed
+
+
+===== TESTED REVISIONS =====
+Revision                Mean Value  Std. Dev.   Num Values  Good?
+chromium@306475         70          0           3           good
+chromium@306476         80          0           3           bad
+chromium@306477         80          0           3           bad
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 0
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+
+    self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
+
+  def testGetReport_FailedBisect(self):
+    results_data = self._BisectResults(6789, 12345, 'failed',
+                                       culprit_data=None, score=0,
+                                       revision_data=None)
+    job = self._AddTryJob(results_data)
+
+    log_failed_bisect = r"""
+===== BISECT JOB RESULTS =====
+Status: failed
+
+
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 0
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+
+    self.assertEqual(log_failed_bisect, bisect_report.GetReport(job))
+
+  def testGetReport_BisectWithWarnings(self):
+    results_data = self._BisectResults(6789, 12345, 'failed',
+                                       culprit_data=None, score=0,
+                                       revision_data=None,
+                                       warnings=['A warning.'])
+    job = self._AddTryJob(results_data)
+
+    log_failed_bisect = r"""
+===== BISECT JOB RESULTS =====
+Status: failed
+
+
+=== Warnings ===
+The following warnings were raised by the bisect job:
+
+ * A warning.
+
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 0
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+
+    self.assertEqual(log_failed_bisect, bisect_report.GetReport(job))
+
+  def testGetReport_BisectWithAbortedReason(self):
+    results_data = self._BisectResults(6789, 12345, 'aborted',
+                                       culprit_data=None, score=0,
+                                       revision_data=None,
+                                       aborted_reason='invalid revisions.')
+    job = self._AddTryJob(results_data)
+
+    log_failed_bisect = r"""
+===== BISECT JOB RESULTS =====
+Status: aborted
+
+
+=== Bisection aborted ===
+The bisect was aborted because invalid revisions.
+Please contact the the team (see below) if you believe this is in error.
+
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 0
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+
+    self.assertEqual(log_failed_bisect, bisect_report.GetReport(job))
+
+  def testGetReport_WithBugIdBadBisectFeedback(self):
+    results_data = self._BisectResults(6789, 12345, 'completed',
+                                       culprit_data=None, score=0)
+    job = self._AddTryJob(results_data, bug_id=6789)
+
+    log_without_culprit = r"""
+===== BISECT JOB RESULTS =====
+Status: completed
+
+
+===== TESTED REVISIONS =====
+Revision                Mean Value  Std. Dev.   Num Values  Good?
+chromium@306475         70          0           3           good
+chromium@306476         80          0           3           bad
+chromium@306477         80          0           3           bad
+
+Bisect job ran on: linux
+Bug ID: 12345
+
+Test Command: tools/perf/run_benchmark -v --browser=release page_cycler
+Test Metric: page_load_time
+Relative Change: 10
+Score: 0
+
+Buildbot stdio: http://build.chromium.org/513
+Job details: https://test-rietveld.appspot.com/200039
+
+
+Not what you expected? We'll investigate and get back to you!
+  https://chromeperf.appspot.com/bad_bisect?try_job_id=6789
+
+| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
+|  X  | for more information addressing perf regression bugs. For feedback,
+| / \ | file a bug with component Tests>AutoBisect.  Thank you!"""
+    self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/dashboard/dashboard/bisect_stats.py b/catapult/dashboard/dashboard/bisect_stats.py
index 7d94a01..1c64f20 100644
--- a/catapult/dashboard/dashboard/bisect_stats.py
+++ b/catapult/dashboard/dashboard/bisect_stats.py
@@ -91,7 +91,9 @@
     bot_name: Name of the bisect bot.
     status: Bisect status.  Either 'failed' or 'completed'.
   """
-  assert status in ['failed', 'completed']
+  # TODO(chrisphan): Add stats for staled bisect.
+  if status not in ['failed', 'completed']:
+    return
   series_name = _GetSeriesNameFromBotName(bot_name)
   week_timestamp = _GetLastMondayTimestamp()
 
@@ -116,7 +118,7 @@
 
 
 def _GetLastMondayTimestamp():
-  """Get timestamp of 00:00 last Monday in milliseconds as an integer."""
+  """Gets timestamp of 00:00 last Monday in milliseconds as an integer."""
   today = datetime.date.today()
   monday = today - datetime.timedelta(days=today.weekday())
   return utils.TimestampMilliseconds(monday)
diff --git a/catapult/dashboard/dashboard/bot_whitelist.py b/catapult/dashboard/dashboard/bot_whitelist.py
deleted file mode 100644
index ffd81e0..0000000
--- a/catapult/dashboard/dashboard/bot_whitelist.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""URL endpoint to list externally-visible Bots."""
-
-from google.appengine.ext import ndb
-
-from dashboard import request_handler
-from dashboard import xsrf
-
-WHITELIST_KEY = 'bot_whitelist'
-
-
-class BotWhitelist(ndb.Model):
-  bots = ndb.StringProperty(repeated=True)
-
-
-class BotWhitelistHandler(request_handler.RequestHandler):
-  """URL endpoint to view/edit the external Bot whitelist for /add_point."""
-
-  def get(self):
-    """Lists the Bots in the whitelist."""
-    bots = []
-    whitelist = ndb.Key('BotWhitelist', WHITELIST_KEY).get()
-    if whitelist:
-      bots = whitelist.bots
-    self.RenderHtml('bot_whitelist.html', {'bot_whitelist': '\n'.join(bots)})
-
-  @xsrf.TokenRequired
-  def post(self):
-    """Updates the Bot names in the whitelist."""
-    bots = []
-    whitelist_text = self.request.get('bot_whitelist', '')
-    if whitelist_text:
-      bots = whitelist_text.strip().split()
-    whitelist = BotWhitelist.get_or_insert(WHITELIST_KEY)
-    whitelist.bots = bots
-    whitelist.put()
-    self.RenderHtml('result.html', {
-        'headline': 'Updated Bot Whitelist',
-        'results': [{
-            'name': 'New Bot Whitelist',
-            'class': 'results-pre',
-            'value': '\n'.join(bots)}]})
diff --git a/catapult/dashboard/dashboard/bot_whitelist_test.py b/catapult/dashboard/dashboard/bot_whitelist_test.py
deleted file mode 100644
index 9d30580..0000000
--- a/catapult/dashboard/dashboard/bot_whitelist_test.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-
-import webapp2
-import webtest
-
-from google.appengine.api import users
-from google.appengine.ext import ndb
-
-from dashboard import bot_whitelist
-from dashboard import testing_common
-from dashboard import xsrf
-
-
-class BotWhitelistTest(testing_common.TestCase):
-
-  def setUp(self):
-    super(BotWhitelistTest, self).setUp()
-    app = webapp2.WSGIApplication(
-        [('/bot_whitelist', bot_whitelist.BotWhitelistHandler)])
-    self.testapp = webtest.TestApp(app)
-
-  def testGet(self):
-    whitelist = bot_whitelist.BotWhitelist(
-        id=bot_whitelist.WHITELIST_KEY,
-        bots=['linux-release', 'linux-release-lowmem'])
-    whitelist.put()
-    response = self.testapp.get('/bot_whitelist')
-    textarea_value = response.html('textarea')[0].renderContents()
-    self.assertEqual('linux-release\nlinux-release-lowmem', textarea_value)
-
-  def testGet_EmptyWhitelist(self):
-    response = self.testapp.get('/bot_whitelist')
-    textarea_value = response.html('textarea')[0].renderContents()
-    self.assertEqual('', textarea_value)
-
-  def testPost_InitializeWhitelist(self):
-    self.SetCurrentUser('sullivan@google.com', is_admin=True)
-    self.testapp.post('/bot_whitelist', {
-        'bot_whitelist': 'linux-release\nandroid-gn\nchromium-rel-mac6',
-        'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
-    })
-    whitelist = ndb.Key('BotWhitelist', bot_whitelist.WHITELIST_KEY).get()
-    self.assertEqual(
-        ['linux-release', 'android-gn', 'chromium-rel-mac6'], whitelist.bots)
-
-  def testPost_UpdateWhitelist(self):
-    self.SetCurrentUser('sullivan@google.com', is_admin=True)
-    whitelist = bot_whitelist.BotWhitelist(
-        id=bot_whitelist.WHITELIST_KEY,
-        bots=['linux-release', 'chromium-rel-mac6'])
-    self.testapp.post('/bot_whitelist', {
-        'bot_whitelist': 'linux-release\nchromium-rel-win7\nandroid-gn',
-        'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
-    })
-    whitelist = ndb.Key('BotWhitelist', bot_whitelist.WHITELIST_KEY).get()
-    self.assertEqual(
-        ['linux-release', 'chromium-rel-win7', 'android-gn'], whitelist.bots)
-
-  def testPost_ClearWhitelist(self):
-    self.SetCurrentUser('sullivan@google.com', is_admin=True)
-    whitelist = bot_whitelist.BotWhitelist(id=bot_whitelist.WHITELIST_KEY,
-                                           bots=['linux-release', 'android-gn'])
-    self.testapp.post('/bot_whitelist', {
-        'bot_whitelist': '',
-        'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
-    })
-    whitelist = ndb.Key('BotWhitelist', bot_whitelist.WHITELIST_KEY).get()
-    self.assertEqual([], whitelist.bots)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/catapult/dashboard/dashboard/buildbucket_job.py b/catapult/dashboard/dashboard/buildbucket_job.py
index fa2dba7..76178ec 100644
--- a/catapult/dashboard/dashboard/buildbucket_job.py
+++ b/catapult/dashboard/dashboard/buildbucket_job.py
@@ -10,15 +10,17 @@
 class BisectJob(object):
   """A buildbot bisect job started and monitored through buildbucket."""
 
-  def __init__(self, bisect_director, good_revision, bad_revision, test_command,
-               metric, repeats, timeout_minutes, bug_id, gs_bucket,
-               recipe_tester_name, builder_host=None, builder_port=None,
-               test_type='perf', required_initial_confidence=None):
+  def __init__(self, try_job_id, bisect_director, good_revision, bad_revision,
+               test_command, metric, repeats, timeout_minutes, bug_id,
+               gs_bucket, recipe_tester_name, builder_host=None,
+               builder_port=None, test_type='perf',
+               required_initial_confidence=None):
     if not all([good_revision, bad_revision, test_command, metric,
                 repeats, timeout_minutes, recipe_tester_name]):
       raise ValueError('At least one of the values required for BisectJob '
                        'construction was not given or was given with a None '
                        'value.')
+    self.try_job_id = try_job_id
     self.bisect_director = bisect_director
     self.good_revision = good_revision
     self.bad_revision = bad_revision
@@ -48,6 +50,7 @@
     """Prepares a nested dict containing the bisect config."""
     # TODO(robertocn): Some of these should be optional.
     bisect_config = {
+        'try_job_id': self.try_job_id,
         'test_type': self.test_type,
         'command': self.command,
         'good_revision': self.good_revision,
diff --git a/catapult/dashboard/dashboard/buildbucket_job_status.py b/catapult/dashboard/dashboard/buildbucket_job_status.py
index a5ec80f..3dd8aa7 100644
--- a/catapult/dashboard/dashboard/buildbucket_job_status.py
+++ b/catapult/dashboard/dashboard/buildbucket_job_status.py
@@ -89,4 +89,3 @@
           result[key])
       result.pop(key)
   return result
-
diff --git a/catapult/dashboard/dashboard/buildbucket_job_status_test.py b/catapult/dashboard/dashboard/buildbucket_job_status_test.py
index d366772..556fda3 100644
--- a/catapult/dashboard/dashboard/buildbucket_job_status_test.py
+++ b/catapult/dashboard/dashboard/buildbucket_job_status_test.py
@@ -98,7 +98,6 @@
 }"""
 
 
-
 class BuildbucketJobStatusTest(testing_common.TestCase):
 
   def setUp(self):
diff --git a/catapult/dashboard/dashboard/buildbucket_job_test.py b/catapult/dashboard/dashboard/buildbucket_job_test.py
index 160d9ac..7730015 100644
--- a/catapult/dashboard/dashboard/buildbucket_job_test.py
+++ b/catapult/dashboard/dashboard/buildbucket_job_test.py
@@ -13,6 +13,7 @@
   def setUp(self):
     super(BuildbucketJobTest, self).setUp()
     self._args_base = {
+        'try_job_id': 1,
         'bisect_director': 'linux_perf_bisector',
         'recipe_tester_name': 'linux_perf_bisect',
         'good_revision': '1',
diff --git a/catapult/dashboard/dashboard/buildbucket_service.py b/catapult/dashboard/dashboard/buildbucket_service.py
index 6cd16fb..1d68c80 100644
--- a/catapult/dashboard/dashboard/buildbucket_service.py
+++ b/catapult/dashboard/dashboard/buildbucket_service.py
@@ -8,9 +8,8 @@
 
 from apiclient import discovery
 import httplib2
-import oauth2client
 
-from google.appengine.ext import ndb
+from dashboard import utils
 
 _DISCOVERY_URL = (
     'https://cr-buildbucket.appspot.com'
@@ -19,9 +18,6 @@
 # Default Buildbucket bucket name.
 _BUCKET_NAME = 'master.tryserver.chromium.perf'
 
-# Scope required by the Build Bucket Service.
-_OAUTH2_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
-
 
 def _DiscoverService():
   return discovery.build('buildbucket', 'v1',
@@ -30,6 +26,7 @@
 
 class _AuthenticatedHttp(object):
   """Provides access via its constructor to a singleton authenticated http."""
+
   _singleton = None
   _singleton_credentials = None
 
@@ -49,19 +46,14 @@
     # account is authorized for buildbucket.
     cls._singleton = httplib2.Http()
     if override_credentials:
-      rietveld_credentials = override_credentials
+      credentials = override_credentials
     else:
-      rietveld_credentials = ndb.Key('RietveldConfig',
-                                     'default_rietveld_config').get()
+      credentials = utils.ServiceAccountCredentials()
 
     # If we cannot pull the credentials from ndb we simply use the unauthorized
     # client. This useful when running a local dev server.
-    if rietveld_credentials:
-      creds = oauth2client.client.SignedJwtAssertionCredentials(
-          rietveld_credentials.client_email,
-          rietveld_credentials.service_account_key,
-          _OAUTH2_SCOPE)
-      creds.authorize(cls._singleton)
+    if credentials:
+      credentials.authorize(cls._singleton)
 
 
 def PutJob(job, bucket=_BUCKET_NAME, credentials=None):
diff --git a/catapult/dashboard/dashboard/buildbucket_service_test.py b/catapult/dashboard/dashboard/buildbucket_service_test.py
index 867938b..5cb1609 100644
--- a/catapult/dashboard/dashboard/buildbucket_service_test.py
+++ b/catapult/dashboard/dashboard/buildbucket_service_test.py
@@ -5,26 +5,23 @@
 import json
 import unittest
 
+from oauth2client import client
 import mock
 
 from dashboard import buildbucket_service
 from dashboard import testing_common
 
 
+
+
 class BuildbucketServiceTest(testing_common.TestCase):
 
-  class FakeCredentials(object):
-
-    def __init__(self):
-      self.client_email = 'dummy@tempuri.org'
-      self.service_account_key = 'Some random string'
-
   class FakeJob(object):
 
     def __init__(self):
       pass
 
-    def GetBuildParameters(self):  # pylint: disable=invalid-name
+    def GetBuildParameters(self):
       return {
           'builder_name': 'dummy_builder',
           'properties': {'bisect_config': {}}
@@ -56,6 +53,11 @@
     super(BuildbucketServiceTest, self).setUp()
     self.fake_service = BuildbucketServiceTest.FakeService()
 
+  @staticmethod
+  def FakeCredentials():
+    return client.SignedJwtAssertionCredentials(
+        'service_account@foo.org', 'private key', 'bogus scope')
+
   @mock.patch('oauth2client.client.SignedJwtAssertionCredentials',
               mock.MagicMock())
   @mock.patch('httplib2.Http', mock.MagicMock())
diff --git a/catapult/dashboard/dashboard/can_bisect.py b/catapult/dashboard/dashboard/can_bisect.py
new file mode 100644
index 0000000..b01a078
--- /dev/null
+++ b/catapult/dashboard/dashboard/can_bisect.py
@@ -0,0 +1,80 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A handler and functions to check whether bisect is supported."""
+
+import re
+
+from dashboard import request_handler
+from dashboard import namespaced_stored_object
+
+# A set of suites for which we can't do performance bisects.
+# This list currently also exists in the front-end code.
+_UNBISECTABLE_SUITES = [
+    'arc-perf-test',
+    'browser_tests',
+    'content_browsertests',
+    'resource_sizes',
+    'sizes',
+    'v8',
+]
+
+# The bisect bot map stored in datastore is expected to be
+# a dict mapping master names to [perf bot, bisect bot] pairs.
+# If a master name is not in the dict, bisect isn't supported.
+BISECT_BOT_MAP_KEY = 'bisect_bot_map'
+
+
+class CanBisectHandler(request_handler.RequestHandler):
+
+  def post(self):
+    """Checks whether bisect is supported for a test.
+
+    Request parameters:
+      test_path: A full test path (Master/bot/benchmark/...)
+      start_revision: The start of the bisect revision range.
+      end_revision: The end of the bisect revision range.
+
+    Outputs: The string "true" or the string "false".
+    """
+    can_bisect = (
+        IsValidTestForBisect(self.request.get('test_path')) and
+        IsValidRevisionForBisect(self.request.get('start_revision')) and
+        IsValidRevisionForBisect(self.request.get('end_revision')))
+    self.response.write('true' if can_bisect else 'false')
+
+
+def IsValidTestForBisect(test_path):
+  """Checks whether a test is valid for bisect."""
+  if not test_path:
+    return False
+  path_parts = test_path.split('/')
+  if len(path_parts) < 3:
+    return False
+  if not _MasterNameIsWhitelisted(path_parts[0]):
+    return False
+  if path_parts[2] in _UNBISECTABLE_SUITES:
+    return False
+  if test_path.endswith('/ref') or test_path.endswith('_ref'):
+    return False
+  return True
+
+
+def _MasterNameIsWhitelisted(master_name):
+  """Checks whether a master name is acceptable by checking a whitelist."""
+  bisect_bot_map = namespaced_stored_object.Get(BISECT_BOT_MAP_KEY)
+  if not bisect_bot_map:
+    return True  # If there's no list available, all names are OK.
+  whitelisted_masters = list(bisect_bot_map)
+  return master_name in whitelisted_masters
+
+
+def IsValidRevisionForBisect(revision):
+  """Checks whether a revision looks like a valid revision for bisect."""
+  return _IsGitHash(revision) or re.match(r'^[0-9]{5,7}$', str(revision))
+
+
+def _IsGitHash(revision):
+  """Checks whether the input looks like a SHA1 hash."""
+  return re.match(r'[a-fA-F0-9]{40}$', str(revision))
diff --git a/catapult/dashboard/dashboard/can_bisect_test.py b/catapult/dashboard/dashboard/can_bisect_test.py
new file mode 100644
index 0000000..6bd5f6f
--- /dev/null
+++ b/catapult/dashboard/dashboard/can_bisect_test.py
@@ -0,0 +1,62 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import webapp2
+import webtest
+
+from dashboard import can_bisect
+from dashboard import testing_common
+from dashboard import namespaced_stored_object
+
+
+class CanBisectTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(CanBisectTest, self).setUp()
+    app = webapp2.WSGIApplication(
+        [('/can_bisect', can_bisect.CanBisectHandler)])
+    self.testapp = webtest.TestApp(app)
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    self.SetCurrentUser('internal@chromium.org')
+    namespaced_stored_object.Set(
+        can_bisect.BISECT_BOT_MAP_KEY,
+        {'SupportedMaster': ['perf_bot', 'bisect_bot']})
+
+  def testPost_BisectableTests_ReturnsTrue(self):
+    response = self.testapp.post('/can_bisect', {
+        'test_path': 'SupportedMaster/mac/blink_perf.parser/simple-url',
+        'start_revision': 123400,
+        'end_revision': 123500,
+    })
+    self.assertEqual('true', response.body)
+
+  def testPost_UnsupportedMaster_ReturnsFalse(self):
+    response = self.testapp.post('/can_bisect', {
+        'test_path': 'UnsupportedMaster/mac/blink_perf.parser/simple-url',
+        'start_revision': 123400,
+        'end_revision': 123500,
+    })
+    self.assertEqual('false', response.body)
+
+  def testPost_EmptyRequest_ReturnsFalse(self):
+    response = self.testapp.post('/can_bisect', {})
+    self.assertEqual('false', response.body)
+
+  def testIsValidTestForBisect_Supported_ReturnsTrue(self):
+    self.assertTrue(
+        can_bisect.IsValidTestForBisect('SupportedMaster/b/t/foo'))
+
+  def testIsValidTestForBisect_RefTest_ReturnsFalse(self):
+    self.assertFalse(
+        can_bisect.IsValidTestForBisect('SupportedMaster/b/t/ref'))
+
+  def testIsValidTestForBisect_UnsupportedMaster_ReturnsFalse(self):
+    self.assertFalse(
+        can_bisect.IsValidTestForBisect('X/b/t/foo'))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/dashboard/dashboard/change_internal_only.py b/catapult/dashboard/dashboard/change_internal_only.py
index f141099..8da986d 100644
--- a/catapult/dashboard/dashboard/change_internal_only.py
+++ b/catapult/dashboard/dashboard/change_internal_only.py
@@ -47,7 +47,7 @@
     })
 
   def post(self):
-    """Updates the selected bots internal_only_property.
+    """Updates the selected bots internal_only property.
 
     POST requests will be made by the task queue; tasks are added to the task
     queue either by a kick-off POST from the front-end form, or by this handler
@@ -106,7 +106,7 @@
           queue_name=_QUEUE_NAME)
 
   def _UpdateBot(self, bot_name, internal_only, cursor=None):
-    """Start updating internal_only for the given bot and associated data."""
+    """Starts updating internal_only for the given bot and associated data."""
     master, bot = bot_name.split('/')
     bot_key = ndb.Key('Master', master, 'Bot', bot)
 
diff --git a/catapult/dashboard/dashboard/chart_handler.py b/catapult/dashboard/dashboard/chart_handler.py
index 546944e..718d55e 100644
--- a/catapult/dashboard/dashboard/chart_handler.py
+++ b/catapult/dashboard/dashboard/chart_handler.py
@@ -21,11 +21,20 @@
 
   def RenderHtml(self, template_file, template_values, status=200):
     """Fills in template values for pages that show charts."""
-    revision_info = namespaced_stored_object.Get(_REVISION_INFO_KEY) or {}
-    template_values.update({
-        'revision_info': json.dumps(revision_info),
-        'warning_message': layered_cache.Get('warning_message'),
-        'warning_bug': layered_cache.Get('warning_bug'),
-    })
+    template_values.update(self._GetChartValues())
+    template_values['revision_info'] = json.dumps(
+        template_values['revision_info'])
     return super(ChartHandler, self).RenderHtml(
         template_file, template_values, status)
+
+  def GetDynamicVariables(self, template_values, request_path=None):
+    template_values.update(self._GetChartValues())
+    super(ChartHandler, self).GetDynamicVariables(
+        template_values, request_path)
+
+  def _GetChartValues(self):
+    return {
+        'revision_info': namespaced_stored_object.Get(_REVISION_INFO_KEY) or {},
+        'warning_message': layered_cache.Get('warning_message'),
+        'warning_bug': layered_cache.Get('warning_bug'),
+    }
diff --git a/catapult/dashboard/dashboard/datastore_hooks.py b/catapult/dashboard/dashboard/datastore_hooks.py
index 9e15b65..7b67be1 100644
--- a/catapult/dashboard/dashboard/datastore_hooks.py
+++ b/catapult/dashboard/dashboard/datastore_hooks.py
@@ -56,20 +56,42 @@
   request.registry['privileged'] = True
 
 
+def SetSinglePrivilegedRequest():
+  """Allows the current request to act as a privileged user only ONCE.
+
+  This should be called ONLY by handlers that have checked privilege immediately
+  before making a query. It will be automatically unset when the next query is
+  made.
+  """
+  request = webapp2.get_request()
+  request.registry['single_privileged'] = True
+
+
+def CancelSinglePrivilegedRequest():
+  """Disallows the current request to act as a privileged user only."""
+  request = webapp2.get_request()
+  request.registry['single_privileged'] = False
+
+
 def _IsServicingPrivilegedRequest():
   """Checks whether the request is considered privileged."""
   try:
     request = webapp2.get_request()
   except AssertionError:
-    # This only happens in unit tests, when the code gets called outside of
-    # a request.
+    # This happens in unit tests, when code gets called outside of a request.
     return False
-  if (not request or
-      hasattr(request, 'path') and request.path.startswith('/mapreduce')):
-    # Running a mapreduce.
+  path = getattr(request, 'path', '')
+  if path.startswith('/mapreduce'):
+    return True
+  if path.startswith('/_ah/queue/deferred'):
+    return True
+  if path.startswith('/_ah/pipeline/'):
     return True
   if request.registry.get('privileged', False):
     return True
+  if request.registry.get('single_privileged', False):
+    request.registry['single_privileged'] = False
+    return True
   whitelist = utils.GetIpWhitelist()
   if whitelist and hasattr(request, 'remote_addr'):
     return request.remote_addr in whitelist
diff --git a/catapult/dashboard/dashboard/datastore_hooks_test.py b/catapult/dashboard/dashboard/datastore_hooks_test.py
index 503938b..61f8577 100644
--- a/catapult/dashboard/dashboard/datastore_hooks_test.py
+++ b/catapult/dashboard/dashboard/datastore_hooks_test.py
@@ -4,8 +4,6 @@
 
 import unittest
 
-import mock
-
 from google.appengine.ext import ndb
 
 from dashboard import datastore_hooks
@@ -25,14 +23,11 @@
 
   def setUp(self):
     super(DatastoreHooksTest, self).setUp()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
     self._AddDataToDatastore()
     datastore_hooks.InstallHooks()
-    get_request_patcher = mock.patch(
-        'webapp2.get_request',
-        mock.MagicMock(return_value=FakeRequest()))
-    self.mock_get_request = get_request_patcher.start()
-    self.addCleanup(get_request_patcher.stop)
+    self.PatchDatastoreHooksRequest()
 
   def tearDown(self):
     super(DatastoreHooksTest, self).tearDown()
@@ -44,7 +39,7 @@
     # there is a get() for the parent_test in the pre_put_hook. This should work
     # correctly in production because Rows and Tests should only be added by
     # /add_point, which is privileged.
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     testing_common.AddTests(
         ['ChromiumPerf'],
         ['Win7External', 'FooInternal'], {
@@ -87,7 +82,7 @@
           parent=external_test_container_key, id=i, value=float(i * 2)).put()
     self.UnsetCurrentUser()
     sheriff.Sheriff(
-        id='external', email='external@chromium.org', internal_only=False).put()
+        id='external', email='foo@chromium.org', internal_only=False).put()
     sheriff.Sheriff(
         id='internal', email='internal@google.com', internal_only=True).put()
 
@@ -152,24 +147,24 @@
     if include_internal:
       self.assertEqual(2, len(sheriffs))
       self.assertEqual('external', sheriffs[0].key.string_id())
-      self.assertEqual('external@chromium.org', sheriffs[0].email)
+      self.assertEqual('foo@chromium.org', sheriffs[0].email)
       self.assertEqual('internal', sheriffs[1].key.string_id())
       self.assertEqual('internal@google.com', sheriffs[1].email)
     else:
       self.assertEqual(1, len(sheriffs))
       self.assertEqual('external', sheriffs[0].key.string_id())
-      self.assertEqual('external@chromium.org', sheriffs[0].email)
+      self.assertEqual('foo@chromium.org', sheriffs[0].email)
 
   def testQuery_NoUser_InternalOnlyNotFetched(self):
     self.UnsetCurrentUser()
     self._CheckQueryResults(include_internal=False)
 
   def testQuery_ExternalUser_InternalOnlyNotFetched(self):
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self._CheckQueryResults(include_internal=False)
 
   def testQuery_InternalUser_InternalOnlyFetched(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self._CheckQueryResults(True)
 
   def testQuery_PrivilegedRequest_InternalOnlyFetched(self):
@@ -177,6 +172,17 @@
     datastore_hooks.SetPrivilegedRequest()
     self._CheckQueryResults(True)
 
+  def testQuery_SinglePrivilegedRequest_InternalOnlyFetched(self):
+    self.UnsetCurrentUser()
+    datastore_hooks.SetSinglePrivilegedRequest()
+    # Not using _CheckQueryResults because this only affects a single query.
+    # First query has internal results.
+    rows = graph_data.Row.query().filter(graph_data.Row.value == 20).fetch()
+    self.assertEqual(2, len(rows))
+    # Second query does not.
+    rows = graph_data.Row.query().filter(graph_data.Row.value == 20).fetch()
+    self.assertEqual(1, len(rows))
+
   def _CheckGet(self, include_internal):
     m = ndb.Key('Master', 'ChromiumPerf').get()
     self.assertEqual(m.key.string_id(), 'ChromiumPerf')
@@ -201,7 +207,7 @@
       self.assertRaises(AssertionError, graph_data.Bot.get_by_id,
                         'FooInternal', parent=m.key)
     sheriff_entity = ndb.Key('Sheriff', 'external').get()
-    self.assertEqual(sheriff_entity.email, 'external@chromium.org')
+    self.assertEqual(sheriff_entity.email, 'foo@chromium.org')
     if include_internal:
       internal_sheriff_entity = ndb.Key('Sheriff', 'internal').get()
       self.assertEqual('internal@google.com', internal_sheriff_entity.email)
@@ -216,15 +222,15 @@
     self._CheckGet(include_internal=False)
 
   def testGet_ExternalUser(self):
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self._CheckGet(include_internal=False)
 
   def testGet_InternalUser(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self._CheckGet(include_internal=True)
 
   def testGet_AdminUser(self):
-    self.SetCurrentUser('test@example.com', is_admin=True)
+    self.SetCurrentUser('foo@chromium.org', is_admin=True)
     self._CheckGet(include_internal=True)
 
   def testGet_PrivilegedRequest(self):
diff --git a/catapult/dashboard/dashboard/debug_alert.py b/catapult/dashboard/dashboard/debug_alert.py
index 4a10fc7..32a938a 100644
--- a/catapult/dashboard/dashboard/debug_alert.py
+++ b/catapult/dashboard/dashboard/debug_alert.py
@@ -7,6 +7,7 @@
 import json
 import urllib
 
+from dashboard import datastore_hooks
 from dashboard import find_anomalies
 from dashboard import find_change_points
 from dashboard import request_handler
@@ -43,16 +44,17 @@
     try:
       test = self._GetTest()
       num_before, num_after = self._GetNumBeforeAfter()
-      config_name, config_dict = self._GetAnomalyConfigNameAndDict(test)
+      config_name = self._GetConfigName(test)
+      config_dict = anomaly_config.CleanConfigDict(self._GetConfigDict(test))
     except QueryParameterError as e:
       self.RenderHtml('debug_alert.html', {'error': e.message})
       return
 
     revision = self.request.get('rev')
     if revision:
-      rows = _FetchRowsAroundRev(test.key, int(revision), num_before, num_after)
+      rows = _FetchRowsAroundRev(test, int(revision), num_before, num_after)
     else:
-      rows = _FetchLatestRows(test.key, num_before)
+      rows = _FetchLatestRows(test, num_before)
 
     chart_series = _ChartSeries(rows)
     lookup = _RevisionList(rows)
@@ -104,34 +106,23 @@
       raise QueryParameterError('Invalid "num_before" or "num_after".')
     return num_before, num_after
 
-  def _GetAnomalyConfigNameAndDict(self, test):
-    """Gets the anomaly threshold dict to use and its name.
-
-    Args:
-      test: A Test entity.
-
-    Returns:
-      A (name, config dict) pair.
-
-    Raises:
-      ValueError: The user-specified dict couldn't be parsed.
-    """
-    # Get the anomaly config name and config dict based on the test.
-    config_name = 'Default config'
+  def _GetConfigName(self, test):
+    """Gets the name of the custom anomaly threshold, just for display."""
     if test.overridden_anomaly_config:
-      config_name = test.overridden_anomaly_config.string_id()
-    config_dict = anomaly_config.GetAnomalyConfigDict(test)
+      return test.overridden_anomaly_config.string_id()
+    if self.request.get('config'):
+      return 'Custom config'
+    return 'Default config'
 
-    # If the user specified a config, then use that.
+  def _GetConfigDict(self, test):
+    """Gets the name of the anomaly threshold dict to use."""
     input_config_json = self.request.get('config')
-    if input_config_json:
-      try:
-        config_dict = json.loads(input_config_json)
-      except ValueError:
-        raise QueryParameterError('Invalid JSON.')
-      config_name = 'Custom config'
-
-    return config_name, config_dict
+    if not input_config_json:
+      return anomaly_config.GetAnomalyConfigDict(test)
+    try:
+      return json.loads(input_config_json)
+    except ValueError:
+      raise QueryParameterError('Invalid JSON.')
 
 
 def SimulateAlertProcessing(chart_series, **config_dict):
@@ -246,29 +237,31 @@
   return [r.revision for r in rows]
 
 
-def _FetchLatestRows(test_key, num_points):
+def _FetchLatestRows(test, num_points):
   """Does a query for the latest Row entities in the given test.
 
   Args:
-    test_key: A Test entity key to fetch Row entities for.
+    test: A Test entity to fetch Row entities for.
     num_points: Number of points to fetch.
 
   Returns:
     A list of Row entities, ordered by revision. The number to fetch is limited
     to the number that is expected to be processed at once by GASP.
   """
+  assert utils.IsInternalUser() or not test.internal_only
+  datastore_hooks.SetSinglePrivilegedRequest()
   q = graph_data.Row.query(projection=['revision', 'value'])
-  q = q.filter(graph_data.Row.parent_test == test_key)
+  q = q.filter(graph_data.Row.parent_test == test.key)
   q = q.order(-graph_data.Row.revision)
   rows = list(reversed(q.fetch(limit=num_points)))
   return rows
 
 
-def _FetchRowsAroundRev(test_key, revision, num_before, num_after):
+def _FetchRowsAroundRev(test, revision, num_before, num_after):
   """Fetches Row entities before and after a given revision.
 
   Args:
-    test_key: A Test entity key.
+    test: A Test entity.
     revision: A Row ID.
     num_before: Maximum number of Rows before |revision| to fetch.
     num_after: Max number of Rows starting from |revision| to fetch.
@@ -278,15 +271,18 @@
     the "revision" and "value" properties, which are the only ones relevant
     to their use in this module.
   """
+  assert utils.IsInternalUser() or not test.internal_only
   query = graph_data.Row.query(projection=['revision', 'value'])
-  query = query.filter(graph_data.Row.parent_test == test_key)
+  query = query.filter(graph_data.Row.parent_test == test.key)
 
   before_query = query.filter(graph_data.Row.revision < revision)
   before_query = before_query.order(-graph_data.Row.revision)
+  datastore_hooks.SetSinglePrivilegedRequest()
   rows_before = list(reversed(before_query.fetch(limit=num_before)))
 
   after_query = query.filter(graph_data.Row.revision >= revision)
   after_query = after_query.order(graph_data.Row.revision)
+  datastore_hooks.SetSinglePrivilegedRequest()
   rows_at_and_after = after_query.fetch(num_after)
 
   return rows_before + rows_at_and_after
@@ -339,4 +335,3 @@
   """Returns a display string for the given bug ID property of an anomaly."""
   special_ids = {-1: 'INVALID', -2: 'IGNORE', None: 'NONE'}
   return special_ids.get(bug_id, str(bug_id))
-
diff --git a/catapult/dashboard/dashboard/debug_alert_test.py b/catapult/dashboard/dashboard/debug_alert_test.py
index 62ee346..cc753b7 100644
--- a/catapult/dashboard/dashboard/debug_alert_test.py
+++ b/catapult/dashboard/dashboard/debug_alert_test.py
@@ -30,6 +30,7 @@
     app = webapp2.WSGIApplication(
         [('/debug_alert', debug_alert.DebugAlertHandler)])
     self.testapp = webtest.TestApp(app)
+    self.PatchDatastoreHooksRequest()
 
   def _AddSampleData(self):
     """Adds a Test and Row entities, and returns the Test key."""
@@ -116,6 +117,15 @@
     # The config JSON should also be put into the form on the page.
     self.assertIn('"min_relative_change": 0.75', response.body)
 
+  @mock.patch.object(debug_alert, 'SimulateAlertProcessing')
+  def testGet_WithBogusParameterNames_ParameterIgnored(self, simulate_mock):
+    test_key = self._AddSampleData()
+    response = self.testapp.get(
+        '/debug_alert?test_path=%s&config=%s' %
+        (utils.TestPath(test_key), '{"foo":0.75}'))
+    simulate_mock.assert_called_once_with(mock.ANY)
+    self.assertNotIn('"foo"', response.body)
+
   def testGet_WithInvalidCustomConfig_ErrorShown(self):
     test_key = self._AddSampleData()
     response = self.testapp.get(
@@ -138,13 +148,13 @@
 
   def testFetchLatestRows(self):
     test_key = self._AddSampleData()
-    rows = debug_alert._FetchLatestRows(test_key, 4)
+    rows = debug_alert._FetchLatestRows(test_key.get(), 4)
     revisions = [r.revision for r in rows]
     self.assertEqual([316, 317, 318, 319], revisions)
 
   def testFetchAroundRev(self):
     test_key = self._AddSampleData()
-    rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 8)
+    rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 8)
     revisions = [r.revision for r in rows]
     self.assertEqual(
         [305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317],
@@ -152,13 +162,13 @@
 
   def testFetchRowsAroundRev_NotAllRowsAvailable(self):
     test_key = self._AddSampleData()
-    rows = debug_alert._FetchRowsAroundRev(test_key, 310, 100, 100)
+    rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 100, 100)
     # There are only 20 rows in the sample data, so only 20 can be fetched.
     self.assertEqual(20, len(rows))
 
   def testChartSeries(self):
     test_key = self._AddSampleData()
-    rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 5)
+    rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
     # The indexes used in the chart series should match those in the lookup.
     self.assertEqual(
         [(0, 60.65), (1, 55.61), (2, 61.88), (3, 61.51), (4, 59.58),
@@ -167,7 +177,7 @@
 
   def testRevisionList(self):
     test_key = self._AddSampleData()
-    rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 5)
+    rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
     # The lookup dict maps indexes to x-values in the input series.
     self.assertEqual(
         [305, 306, 307, 308, 309, 310, 311, 312, 313, 314],
diff --git a/catapult/dashboard/dashboard/dispatcher.py b/catapult/dashboard/dashboard/dispatcher.py
index f38d362..6df1322 100644
--- a/catapult/dashboard/dashboard/dispatcher.py
+++ b/catapult/dashboard/dashboard/dispatcher.py
@@ -12,10 +12,11 @@
 from dashboard import associate_alerts
 from dashboard import auto_bisect
 from dashboard import auto_triage
+from dashboard import bad_bisect
 from dashboard import bisect_stats
 from dashboard import bisect_fyi
-from dashboard import bot_whitelist
 from dashboard import buildbucket_job_status
+from dashboard import can_bisect
 from dashboard import change_internal_only
 from dashboard import debug_alert
 from dashboard import dump_graph_json
@@ -26,7 +27,6 @@
 from dashboard import edit_site_config
 from dashboard import edit_test_owners
 from dashboard import email_summary
-from dashboard import embed
 from dashboard import file_bug
 from dashboard import get_logs
 from dashboard import graph_csv
@@ -40,14 +40,15 @@
 from dashboard import main
 from dashboard import migrate_test_names
 from dashboard import mr
+from dashboard import navbar
 from dashboard import new_points
 from dashboard import oauth2_decorator
+from dashboard import post_bisect_results
 from dashboard import put_entities_task
 from dashboard import report
 from dashboard import send_stoppage_alert_emails
 from dashboard import set_warning_message
 from dashboard import short_uri
-from dashboard import shrink_timestamp_revisions
 from dashboard import start_try_job
 from dashboard import stats
 from dashboard import test_buildbucket
@@ -62,11 +63,12 @@
     ('/associate_alerts', associate_alerts.AssociateAlertsHandler),
     ('/auto_bisect', auto_bisect.AutoBisectHandler),
     ('/auto_triage', auto_triage.AutoTriageHandler),
+    ('/bad_bisect', bad_bisect.BadBisectHandler),
     ('/bisect_fyi', bisect_fyi.BisectFYIHandler),
     ('/bisect_stats', bisect_stats.BisectStatsHandler),
-    ('/bot_whitelist', bot_whitelist.BotWhitelistHandler),
     (r'/buildbucket_job_status/(\d+)',
      buildbucket_job_status.BuildbucketJobStatusHandler),
+    ('/can_bisect', can_bisect.CanBisectHandler),
     ('/change_internal_only', change_internal_only.ChangeInternalOnlyHandler),
     ('/debug_alert', debug_alert.DebugAlertHandler),
     ('/delete_expired_entities', layered_cache.DeleteExpiredEntitiesHandler),
@@ -78,7 +80,6 @@
     ('/edit_site_config', edit_site_config.EditSiteConfigHandler),
     ('/edit_test_owners', edit_test_owners.EditTestOwnersHandler),
     ('/email_summary', email_summary.EmailSummaryHandler),
-    ('/embed', embed.EmbedHandler),
     ('/file_bug', file_bug.FileBugHandler),
     ('/get_logs', get_logs.GetLogsHandler),
     ('/graph_csv', graph_csv.GraphCsvHandler),
@@ -91,15 +92,15 @@
     ('/', main.MainHandler),
     ('/migrate_test_names', migrate_test_names.MigrateTestNamesHandler),
     ('/mr_deprecate_tests', mr.MRDeprecateTestsHandler),
+    ('/navbar', navbar.NavbarHandler),
     ('/new_points', new_points.NewPointsHandler),
+    ('/post_bisect_results', post_bisect_results.PostBisectResultsHandler),
     ('/put_entities_task', put_entities_task.PutEntitiesTaskHandler),
     ('/report', report.ReportHandler),
     ('/send_stoppage_alert_emails',
      send_stoppage_alert_emails.SendStoppageAlertEmailsHandler),
     ('/set_warning_message', set_warning_message.SetWarningMessageHandler),
     ('/short_uri', short_uri.ShortUriHandler),
-    ('/shrink_timestamp_revisions',
-     shrink_timestamp_revisions.ShrinkTimestampRevisionsHandler),
     ('/start_try_job', start_try_job.StartBisectHandler),
     ('/stats_around_revision', stats.StatsAroundRevisionHandler),
     ('/stats_for_alerts', stats.StatsForAlertsHandler),
@@ -108,8 +109,8 @@
     ('/update_bug_with_results',
      update_bug_with_results.UpdateBugWithResultsHandler),
     ('/update_test_suites', update_test_suites.UpdateTestSuitesHandler),
-    (oauth2_decorator.decorator.callback_path,
-     oauth2_decorator.decorator.callback_handler())
+    (oauth2_decorator.DECORATOR.callback_path,
+     oauth2_decorator.DECORATOR.callback_handler())
 ]
 
-app = webapp2.WSGIApplication(_URL_MAPPING, debug=False)
+APP = webapp2.WSGIApplication(_URL_MAPPING, debug=False)
diff --git a/catapult/dashboard/dashboard/dump_graph_json.py b/catapult/dashboard/dashboard/dump_graph_json.py
index 3e56954..02f44cc 100644
--- a/catapult/dashboard/dashboard/dump_graph_json.py
+++ b/catapult/dashboard/dashboard/dump_graph_json.py
@@ -121,7 +121,7 @@
     self.response.out.write(json.dumps(protobuf_strings))
 
   def _GetTestAncestors(self, test_keys):
-    """Get the Test, Bot, and Master entities that are ancestors of test."""
+    """Gets the Test, Bot, and Master entities that are ancestors of test."""
     entities = []
     added_parents = set()
     for test_key in test_keys:
@@ -136,7 +136,7 @@
     return entities
 
   def _FetchRowsAsync(self, test_keys, num_points):
-    """Fetches recent Row asychronously across all 'test_keys'."""
+    """Fetches recent Row asynchronously across all 'test_keys'."""
     rows = []
     futures = []
     for test_key in test_keys:
diff --git a/catapult/dashboard/dashboard/edit_site_config.py b/catapult/dashboard/dashboard/edit_site_config.py
index 941f9bb..3e2f53a 100644
--- a/catapult/dashboard/dashboard/edit_site_config.py
+++ b/catapult/dashboard/dashboard/edit_site_config.py
@@ -2,8 +2,13 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-"""Provides the web interface for adding and editing sheriff rotations."""
+"""Provides the web interface for adding and editing stored configs."""
 
+# TODO(qyearsley): If a namespaced config is set, don't show/edit
+# the non-namespaced configs. If a non-namespaced config is set,
+# don't show or edit the namespaced configs.
+
+import difflib
 import json
 
 from google.appengine.api import app_identity
@@ -18,18 +23,17 @@
 
 _NOTIFICATION_EMAIL_BODY = """
 The configuration of %(hostname)s was changed by %(user)s.
-Here are the new values:
 
 Key: %(key)s
 
-Non-namespaced value:
-%(value)s
+Non-namespaced value diff:
+%(value_diff)s
 
-Externally-visible value:
-%(external_value)s
+Externally-visible value diff:
+%(external_value_diff)s
 
-Internal-only value:
-%(internal_value)s
+Internal-only value diff:
+%(internal_value_diff)s
 """
 
 # TODO(qyearsley): Make this customizable by storing the value in datastore.
@@ -74,45 +78,68 @@
       self.RenderHtml('edit_site_config.html', {})
       return
 
-    value = self.request.get('value').strip()
-    external_value = self.request.get('external_value').strip()
-    internal_value = self.request.get('internal_value').strip()
+    new_value_json = self.request.get('value').strip()
+    new_external_value_json = self.request.get('external_value').strip()
+    new_internal_value_json = self.request.get('internal_value').strip()
+
     template_params = {
         'key': key,
-        'value': value,
-        'external_value': external_value,
-        'internal_value': internal_value,
+        'value': new_value_json,
+        'external_value': new_external_value_json,
+        'internal_value': new_internal_value_json,
     }
 
     try:
-      if value:
-        stored_object.Set(key, json.loads(value))
-      if external_value:
-        namespaced_stored_object.SetExternal(key, json.loads(external_value))
-      if internal_value:
-        namespaced_stored_object.Set(key, json.loads(internal_value))
+      new_value = json.loads(new_value_json or 'null')
+      new_external_value = json.loads(new_external_value_json or 'null')
+      new_internal_value = json.loads(new_internal_value_json or 'null')
     except ValueError:
       template_params['error'] = 'Invalid JSON in at least one field.'
+      self.RenderHtml('edit_site_config.html', template_params)
+      return
 
-    _SendNotificationEmail(key, template_params)
+    old_value = stored_object.Get(key)
+    old_external_value = namespaced_stored_object.GetExternal(key)
+    old_internal_value = namespaced_stored_object.Get(key)
+
+    stored_object.Set(key, new_value)
+    namespaced_stored_object.SetExternal(key, new_external_value)
+    namespaced_stored_object.Set(key, new_internal_value)
+
+    _SendNotificationEmail(
+        key, old_value, old_external_value, old_internal_value,
+        new_value, new_external_value, new_internal_value)
+
     self.RenderHtml('edit_site_config.html', template_params)
 
 
-def _SendNotificationEmail(key, email_body_params):
+def _SendNotificationEmail(
+    key, old_value, old_external_value, old_internal_value,
+    new_value, new_external_value, new_internal_value):
   user_email = users.get_current_user().email()
   subject = 'Config "%s" changed by %s' % (key, user_email)
-  email_body_params.update({
+  email_body = _NOTIFICATION_EMAIL_BODY % {
+      'key': key,
+      'value_diff': _DiffJson(old_value, new_value),
+      'external_value_diff': _DiffJson(old_external_value, new_external_value),
+      'internal_value_diff': _DiffJson(old_internal_value, new_internal_value),
       'hostname': app_identity.get_default_version_hostname(),
-      'user': user_email,
-  })
-  body = _NOTIFICATION_EMAIL_BODY % email_body_params
+      'user': users.get_current_user().email(),
+  }
   mail.send_mail(
-      sender=_SENDER_ADDRESS, to=_NOTIFICATION_ADDRESS,
-      subject=subject, body=body)
+      sender=_SENDER_ADDRESS,
+      to=_NOTIFICATION_ADDRESS,
+      subject=subject,
+      body=email_body)
+
+
+def _DiffJson(obj1, obj2):
+  """Returns a string diff of two JSON-serializable objects."""
+  differ = difflib.Differ()
+  return '\n'.join(differ.compare(
+      _FormatJson(obj1).splitlines(),
+      _FormatJson(obj2).splitlines()))
 
 
 def _FormatJson(obj):
-  if not obj:
-    return ''
   return json.dumps(obj, indent=2, sort_keys=True)
-
diff --git a/catapult/dashboard/dashboard/edit_site_config_test.py b/catapult/dashboard/dashboard/edit_site_config_test.py
index fc13a77..71bbd6f 100644
--- a/catapult/dashboard/dashboard/edit_site_config_test.py
+++ b/catapult/dashboard/dashboard/edit_site_config_test.py
@@ -23,8 +23,9 @@
     app = webapp2.WSGIApplication(
         [('/edit_site_config', edit_site_config.EditSiteConfigHandler)])
     self.testapp = webtest.TestApp(app)
-    testing_common.SetInternalDomain('internal.org')
-    self.SetCurrentUser('foo@internal.org', is_admin=True)
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
+    self.SetCurrentUser('internal@chromium.org', is_admin=True)
 
   def testGet_NoKey_ShowsPageWithNoTextArea(self):
     response = self.testapp.get('/edit_site_config')
@@ -51,7 +52,7 @@
     }, status=403)
 
   def testPost_ExternalUser_ShowsErrorMessage(self):
-    self.SetCurrentUser('foo@external.org')
+    self.SetCurrentUser('foo@chromium.org')
     response = self.testapp.post('/edit_site_config', {
         'key': 'foo',
         'value': '[1, 2, 3]',
@@ -90,10 +91,12 @@
     self.assertEqual({'x': 'y'}, namespaced_stored_object.GetExternal('foo'))
 
   def testPost_SendsNotificationEmail(self):
+    namespaced_stored_object.SetExternal('foo', {'x': 10, 'y': 2})
+    namespaced_stored_object.Set('foo', {'z': 3, 'x': 1})
     self.testapp.post('/edit_site_config', {
         'key': 'foo',
-        'external_value': '{"x": "y"}',
-        'internal_value': '{"x": "yz"}',
+        'external_value': '{"x": 1, "y": 2}',
+        'internal_value': '{"x": 1, "z": 3, "y": 2}',
         'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
     })
     messages = self.mail_stub.get_sent_messages()
@@ -101,9 +104,38 @@
     self.assertEqual('gasper-alerts@google.com', messages[0].sender)
     self.assertEqual('chrome-perf-dashboard-alerts@google.com', messages[0].to)
     self.assertEqual(
-        'Config "foo" changed by foo@internal.org', messages[0].subject)
-    self.assertIn('{"x": "y"}', str(messages[0].body))
-    self.assertIn('{"x": "yz"}', str(messages[0].body))
+        'Config "foo" changed by internal@chromium.org', messages[0].subject)
+    self.assertIn(
+        'Non-namespaced value diff:\n'
+        '  null\n'
+        '\n'
+        'Externally-visible value diff:\n'
+        '  {\n'
+        '-   "x": 10, \n'
+        '?         -\n'
+        '\n'
+        '+   "x": 1, \n'
+        '    "y": 2\n'
+        '  }\n'
+        '\n'
+        'Internal-only value diff:\n'
+        '  {\n'
+        '    "x": 1, \n'
+        '+   "y": 2, \n'
+        '    "z": 3\n'
+        '  }\n',
+        str(messages[0].body))
+
+
+class HelperFunctionTests(unittest.TestCase):
+
+  def testDiffJson_NoneToEmptyString(self):
+    self.assertEqual('- null\n+ ""', edit_site_config._DiffJson(None, ''))
+
+  def testDiffJson_AddListItem(self):
+    self.assertEqual(
+        '  [\n    1, \n+   2, \n    3\n  ]',
+        edit_site_config._DiffJson([1, 3], [1, 2, 3]))
 
 
 if __name__ == '__main__':
diff --git a/catapult/dashboard/dashboard/edit_test_owners_test.py b/catapult/dashboard/dashboard/edit_test_owners_test.py
index 2906493..17f6e85 100644
--- a/catapult/dashboard/dashboard/edit_test_owners_test.py
+++ b/catapult/dashboard/dashboard/edit_test_owners_test.py
@@ -76,7 +76,7 @@
     ]
     self.assertEqual(expected_owner_info, owner_info)
 
-  def test_NonAdminAddsAndRemovesSelf_Succeeds(self):
+  def testPost_NonAdminAddsAndRemovesSelf_Succeeds(self):
     self.SetCurrentUser('chris@chromium.org', is_admin=False)
     self._SetOwnersDict(_SAMPLE_OWNER_DICT)
 
@@ -97,7 +97,7 @@
     owner_dict = layered_cache.GetExternal(test_owner._MASTER_OWNER_CACHE_KEY)
     self.assertNotIn('ChromiumPerf/spaceport', owner_dict)
 
-  def test_AdminAddsAndRemovesOther_Succeeds(self):
+  def testPost_AdminAddsAndRemovesOther_Succeeds(self):
     self.SetCurrentUser('chris@chromium.org', is_admin=True)
     self._SetOwnersDict(_SAMPLE_OWNER_DICT)
 
diff --git a/catapult/dashboard/dashboard/elements/alert-icon.html b/catapult/dashboard/dashboard/elements/alert-icon.html
index 479b5a4..de6129f 100644
--- a/catapult/dashboard/dashboard/elements/alert-icon.html
+++ b/catapult/dashboard/dashboard/elements/alert-icon.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/components/core-icon/core-icon.html">
 
 <polymer-element name="alert-icon">
diff --git a/catapult/dashboard/dashboard/elements/alert-remove-box-test.html b/catapult/dashboard/dashboard/elements/alert-remove-box-test.html
index fc4f53a..d9eba1f 100644
--- a/catapult/dashboard/dashboard/elements/alert-remove-box-test.html
+++ b/catapult/dashboard/dashboard/elements/alert-remove-box-test.html
@@ -1,12 +1,12 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
-Copyright 2015 The Chromium Authors. All rights reserved.
+Copyright 2016 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/alert-remove-box.html">
+<link rel="import" href="/tracing/core/test_utils.html">
 
 <script>
 'use strict';
@@ -14,6 +14,9 @@
 tr.b.unittest.testSuite(function() {
   /**
    * Mock XMLHttpRequest which returns a canned response.
+   *
+   * TODO(qyearsley): Replace this with the XHR mock in testing_common.html.
+   *
    * @param {string} responseText Text to put in mock XHR response.
    * @param {Object} expectedData Parameter data expected by the test.
    * @return {Function} A fake constructor for XMLHttpRequest.
@@ -49,7 +52,7 @@
     }
   };
 
-  test('onRemoveBug, request parameters are used.', function() {
+  test('onRemoveBug sends request with parameters', function() {
     var box = document.createElement('alert-remove-box');
     box['key'] = 'alert-key';
     box['xsrfToken'] = 'xsrf-token';
@@ -65,8 +68,7 @@
     });
   }, testOptions);
 
-  test('onRemoveBug, an "untriaged" event is fired when the XHR completes.',
-       function() {
+  test('onRemoveBug causes an "untriaged" event to be fired', function() {
     var box = document.createElement('alert-remove-box');
     window.XMLHttpRequest = new XMLHttpRequestMock('{"bug_id": "REMOVE"}');
     box.addEventListener('untriaged', function() {
diff --git a/catapult/dashboard/dashboard/elements/alert-remove-box.html b/catapult/dashboard/dashboard/elements/alert-remove-box.html
index 1de53ce..3b16704 100644
--- a/catapult/dashboard/dashboard/elements/alert-remove-box.html
+++ b/catapult/dashboard/dashboard/elements/alert-remove-box.html
@@ -1,9 +1,15 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The alert-remove-box element provides the functionality of dis-associating
 an alert from an issue on the issue tracker.
 -->
-<link rel="import" href="/components/paper-spinner/paper-spinner.html">
 
+<link rel="import" href="/components/paper-spinner/paper-spinner.html">
 <link rel="import" href="/dashboard/static/simple_xhr.html">
 
 <polymer-element name="alert-remove-box" attributes="xsrfToken key">
diff --git a/catapult/dashboard/dashboard/elements/alerts-page-test.html b/catapult/dashboard/dashboard/elements/alerts-page-test.html
new file mode 100644
index 0000000..f44fb1a
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/alerts-page-test.html
@@ -0,0 +1,93 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/alerts-page.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    tearDown: function() {
+      testing_common.clearXhrMock();
+    }
+  };
+
+  test('instantiation', function() {
+    var mockResponse = {
+      'anomaly_list': [
+        {
+          'key': '12345',
+          'group': null,
+          'start_revision': 55555,
+          'end_revision': 55558,
+          'date': '2016-02-27',
+          'master': 'Chromium',
+          'bot': 'win',
+          'testsuite': 'sunspider',
+          'test': 'Total',
+          'bug_id': null,
+          'dashboard_link': 'http://do/not/click/me',
+          'median_after_anomaly': 40,
+          'median_before_anomaly': 20,
+          'percent_changed': '50%',
+          'improvement': true,
+          'bisect_status': null,
+          'recovered': false,
+        },
+        {
+          'key': '54321',
+          'group': null,
+          'start_revision': 55555,
+          'end_revision': 55558,
+          'date': '2016-02-27',
+          'master': 'Chromium',
+          'bot': 'win',
+          'testsuite': 'sunspider',
+          'test': 'Total',
+          'bug_id': null,
+          'dashboard_link': 'http://do/not/click/me',
+          'median_after_anomaly': 10,
+          'median_before_anomaly': 20,
+          'percent_changed': '100%',
+          'improvement': false,
+          'bisect_status': null,
+          'recovered': false,
+        },
+      ],
+      'stoppage_alert_list': [
+        {
+          'key': '12345',
+          'group': null,
+          'start_revision': 55555,
+          'end_revision': 55558,
+          'date': '2016-02-27',
+          'master': 'Chromium',
+          'bot': 'win',
+          'testsuite': 'sunspider',
+          'test': 'Total',
+          'bug_id': null,
+          'dashboard_link': 'http://do/not/click/me',
+          'mail_sent': false,
+          'last_row_date': 'N/A',
+          'recovered': false,
+        }
+      ],
+      'sheriff_list': ['Chromium Perf Sheriff', 'V8 Sheriff', 'CrOS Sheriff'],
+      'xsrf_token': '12345'
+    };
+    testing_common.addXhrMock('*', JSON.stringify(mockResponse));
+    var page = document.createElement('alerts-page');
+    this.addHTMLOutput(page);
+  }, testOptions);
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/alerts-page.html b/catapult/dashboard/dashboard/elements/alerts-page.html
new file mode 100644
index 0000000..d50c4f2
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/alerts-page.html
@@ -0,0 +1,245 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+<link rel="import" href="/components/paper-button/paper-button.html">
+<link rel="import" href="/components/polymer/polymer.html">
+<link rel="import" href="/dashboard/elements/alerts-table.html">
+<link rel="import" href="/dashboard/elements/overlay-message.html">
+<link rel="import" href="/dashboard/elements/quick-log.html">
+<link rel="import" href="/dashboard/elements/select-menu.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
+<link rel="import" href="/dashboard/static/uri.html">
+
+<polymer-element name="alerts-page">
+  <template>
+    <style>
+    .error {
+      color: #dd4b39;
+      font-weight: bold;
+    }
+
+    .center {
+      margin: auto;
+      padding: 10px;
+    }
+
+    /* The action bar contains the graph button and triage button. */
+    #action-bar {
+      margin-top: 20px;
+      width: 100%;
+    }
+
+    /* The content container contains everything below the sheriff select menu.
+     */
+    #content {
+      display: inline-flex;
+      display: -webkit-inline-flex;
+      flex-direction: column;
+      -webkit-flex-direction: column;
+      align-items: flex-start;
+      -webkit-align-items: flex-start;
+    }
+
+    /* This class indicates a button toggled on (e.g. show improvements). */
+    .alert-togglebutton {
+      float: right;
+      margin-left: 4px;
+      margin-right: 4px;
+    }
+
+    #auto-triage-log {
+      padding: 15px 0 15px 0;
+    }
+
+    #anomaly-table, #stoppage-alert-table {
+      width: 100%;
+    }
+
+    #loading-spinner {
+      width: 100%;
+      display: flex;
+      justify-content: center;
+    }
+    </style>
+    <template if="{{loading}}">
+      <div id="loading-spinner"><img src="//www.google.com/images/loading.gif"></div>
+    </template>
+    <template if="{{error}}">
+      <div class="error">{{error}}</div>
+    </template>
+    <template if="{{!(loading || error)}}">
+      <div id="content">
+        <div id="action-bar">
+          <select-menu id="sheriff-select"
+                       menuItems="{{sheriffList}}"
+                       on-core-activate="{{onSheriffChange}}"></select-menu>
+          <paper-button raised noink id="improvements-toggle"
+                        class="alert-togglebutton"
+                        on-click="{{onToggleImprovements}}">Show improvements</paper-button>
+          <paper-button raised noink id="triaged-toggle"
+                        class="alert-togglebutton"
+                        on-click="{{onToggleTriaged}}">Show triaged</paper-button>
+        </div>
+        <template if="{{anomalies.length > 0}}">
+          <h2>Performance alerts</h2>
+          <p id='num-alerts'>
+            <template if="{{anomalies.length == 1}}">1 alert.</template>
+            <template if="{{anomalies.length != 1}}">{{anomalies.length}} alerts.</template>
+          </p>
+          <alerts-table id="anomaly-table"
+                        xsrfToken="{{xsrfToken}}"
+                        alertList="{{anomalies}}"
+                        extraColumns="{{extraAnomaliesColumns}}"
+                        on-sortby="{{onPushHistoryState}}"
+                        on-sortdirection="{{onPushHistoryState}}">
+          </alerts-table>
+        </template>
+        <template if="{{stoppageAlerts.length > 0}}">
+          <h2>Data stoppage alerts</h2>
+          <alerts-table id="stoppage-alert-table"
+                        xsrfToken="{{xsrfToken}}"
+                        alertList="{{stoppageAlerts}}"
+                        extraColumns="{{extraStoppageAlertsColumns}}">
+          </alerts-table>
+        </template>
+        <template if="{{anomalies.length == 0 && stoppageAlerts.length == 0 && !error}}">
+          <h2 class="center">All alerts triaged!</h2>
+          <img class="center" src="http://thecatapi.com/api/images/get?api_key=MjUzMDQ&amp;category=space&amp;size=small">
+        </template>
+        <quick-log id="auto-triage-log" xsrfToken="{{xsrfToken}}"
+                   style="width:100%; display:block;"></quick-log>
+      </div>
+    </template>
+  </template>
+  <script>
+    'use strict';
+    Polymer('alerts-page', {
+      loading: true,
+
+      get anomaliesTable() {
+        return this.$['anomaly-table'];
+      },
+
+      get stoppageAlertsTable() {
+        return this.$['stoppage-alert-table'];
+      },
+
+      extraAnomaliesColumns: [{
+        'key': 'percent_changed',
+        'label': 'Delta %'
+      }],
+
+      extraStoppageAlertsColumns: [{
+        'key': 'last_row_date',
+        'label': 'Date'
+      }],
+
+      onSheriffChange: function(e) {
+        var sheriff = e.detail.item.getAttribute('label');
+        if (!sheriff) {
+          return;
+        }
+        var params = uri.getAllParameters();
+        params['sheriff'] = sheriff;
+        // TODO(sullivan): changing the param should automatically update
+        // everything without needing to reload.
+        window.location.href = uri.getCurrentPathWithParams(params);
+      },
+
+      onToggleTriaged: function(e) {
+        var params = uri.getAllParameters();
+        if (params['triaged']) {
+          delete params['triaged'];
+        } else {
+          params['triaged'] = 'true';
+        }
+        // TODO(sullivan): changing the param should automatically update
+        // everything without needing to reload.
+        window.location.href = uri.getCurrentPathWithParams(params);
+      },
+
+      onToggleImprovements: function(e) {
+        var params = uri.getAllParameters();
+        if (params['improvements']) {
+          delete params['improvements'];
+        } else {
+          params['improvements'] = 'true';
+        }
+        // TODO(sullivan): changing the param should automatically update
+        // everything without needing to reload.
+        window.location.href = uri.getCurrentPathWithParams(params);
+      },
+
+      onPopState: function(e) {
+        // Pop State event will have a non-null state if this came from an
+        // actual pop instead of the load event.
+        if (e['state']) {
+          this.updateFromURIParameters();
+        }
+      },
+
+      onPushHistoryState: function(event, detail, sender) {
+        if (!sender) {
+          return;
+        }
+        var params = uri.getAllParameters();
+        params['sortby'] = sender['sortBy'];
+        params['sortdirection'] = sender['sortDirection'];
+        var newUri = uri.getCurrentPathWithParams(params);
+        history.pushState(params, '', newUri);
+      },
+
+      updateFromURIParameters: function() {
+        this.anomaliesTable.sortBy = uri.getParameter('sortby', 'end_revision');
+        this.anomaliesTable.sortDirection = uri.getParameter(
+            'sortdirection', 'down');
+        var sheriff = uri.getParameter('sheriff', 'Chromium Perf Sheriff');
+        this.$['sheriff-select'].select(sheriff);
+        // The show improvements and show triaged toggles are initially "off";
+        // set them to on if the corresponding query parameter is set.
+        // The buttons are displayed differently if they have the "active"
+        // attribute.
+        if (uri.getParameter('improvements')) {
+          this.$['improvements-toggle'].setAttribute('active', '');
+        }
+        if (uri.getParameter('triaged')) {
+          this.$['triaged-toggle'].setAttribute('active', '');
+        }
+      },
+
+      ready: function() {
+        this.sheriff = uri.getParameter('sheriff', 'Chromium Perf Sheriff');
+        this.sortBy = uri.getParameter('sortby', 'end_revision');
+        this.sortDirection = uri.getParameter('sortdirection', 'down');
+        this.showImprovements = uri.getParameter('improvements', false);
+        this.showTriaged = uri.getParameter('triaged', false);
+        var params = {
+          'sheriff': this.sheriff
+        };
+        if (this.showImprovements) {
+          params['improvements'] = true;
+        }
+        if (this.showTriaged) {
+          params['triaged'] = true;
+        }
+        simple_xhr.send('/alerts', params,
+          function(response) {
+            this.anomalies = response['anomaly_list'];
+            this.stoppageAlerts = response['stoppage_alert_list'];
+            this.sheriffList = response['sheriff_list'];
+            this.xsrfToken = response['xsrf_token'];
+            this.loading = false;
+          }.bind(this),
+          function(msg) {
+            this.error = msg;
+            this.loading = false;
+          }.bind(this));
+      }
+    });
+  </script>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/alerts-table-test.html b/catapult/dashboard/dashboard/elements/alerts-table-test.html
new file mode 100644
index 0000000..1ffb6fa
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/alerts-table-test.html
@@ -0,0 +1,98 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/alerts-page.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {};
+
+  test('instantiate', function() {
+    var table = document.createElement('alerts-table');
+    table.alertList = [
+      {
+        'key': '12345',
+        'group': null,
+        'start_revision': 55555,
+        'end_revision': 55558,
+        'date': '2016-02-27',
+        'master': 'Chromium',
+        'bot': 'win',
+        'testsuite': 'sunspider',
+        'test': 'Total',
+        'bug_id': null,
+        'dashboard_link': 'http://do/not/click/me',
+        'median_after_anomaly': 40,
+        'median_before_anomaly': 20,
+        'percent_changed': '50%',
+        'improvement': true,
+        'bisect_status': null,
+        'recovered': false,
+      },
+      {
+        'key': '54321',
+        'group': null,
+        'start_revision': 55555,
+        'end_revision': 55558,
+        'date': '2016-02-27',
+        'master': 'Chromium',
+        'bot': 'win',
+        'testsuite': 'sunspider',
+        'test': 'Total',
+        'bug_id': null,
+        'dashboard_link': 'http://do/not/click/me',
+        'median_after_anomaly': 10,
+        'median_before_anomaly': 20,
+        'percent_changed': '100%',
+        'improvement': false,
+        'bisect_status': null,
+        'recovered': false,
+      }
+    ];
+    this.addHTMLOutput(table);
+  }, testOptions);
+
+  test('test xsrf token set in alert list', function() {
+    var table = document.createElement('alerts-table');
+    table.xsrfToken = 'abcdef012345';
+    table.alertList = [
+      {
+        'key': '54321',
+        'group': null,
+        'start_revision': 55555,
+        'end_revision': 55558,
+        'date': '2016-02-27',
+        'master': 'Chromium',
+        'bot': 'win',
+        'testsuite': 'sunspider',
+        'test': 'Total',
+        'bug_id': null,
+        'dashboard_link': 'http://do/not/click/me',
+        'median_after_anomaly': 10,
+        'median_before_anomaly': 20,
+        'percent_changed': '100%',
+        'improvement': false,
+        'bisect_status': null,
+        'recovered': false,
+      }
+    ];
+    return new Promise(function(resolve) {
+      function check() {
+        assert.equal(table.alertList[0].xsrfToken, 'abcdef012345');
+        resolve();
+      }
+      setTimeout(check, 10);
+    });
+  }, testOptions);
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/alerts-table.html b/catapult/dashboard/dashboard/elements/alerts-table.html
index 9c82022..f1e92be 100644
--- a/catapult/dashboard/dashboard/elements/alerts-table.html
+++ b/catapult/dashboard/dashboard/elements/alerts-table.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 
 <link rel="import" href="/dashboard/elements/bisect-status.html">
@@ -7,7 +14,8 @@
 <link rel="import" href="/dashboard/static/uri.html">
 
 <polymer-element name="alerts-table"
-                 attributes="sortBy sortDirection xsrfToken">
+                 attributes="sortBy sortDirection
+                             xsrfToken alertList extraColumns">
   <template>
     <style>
       #alerts {
@@ -273,7 +281,7 @@
           <input type="checkbox" id="header-checkbox" on-change="{{onHeaderCheckboxChange}}">
         </th>
         <th id="graphheader"></th>
-        <th id="bug_id" on-click="{{columnHeaderClicked}}">Bug ID</valth>
+        <th id="bug_id" on-click="{{columnHeaderClicked}}">Bug ID</th>
         <th id="end_revision" on-click="{{columnHeaderClicked}}">Revisions</th>
         <th id="master" on-click="{{columnHeaderClicked}}">Master</th>
         <th id="bot" on-click="{{columnHeaderClicked}}">Bot</th>
@@ -388,21 +396,29 @@
        */
       ready: function() {
         this.checkedAlerts = [];
-        if (this.alertList) {
-          this.initialize();
+      },
+
+      isRecursiveChange: function() {
+        if (this.isRecursingIntoChange) {
+          return true;
         }
+        this.isRecursingIntoChange = true;
+        setTimeout(function() {
+          this.isRecursingIntoChange = false;
+        }.bind(this), 10);
+        return false;
       },
 
       /**
        * Initializes the table.
        * This should be called after this.alertList has been set.
        */
-      initialize: function() {
-        this.alertList.forEach(function(alert) {
-          // The XSRF token is set for each row of the table here so that the
-          // alert-remove-box in each row can also have the XSRF token.
-          alert.xsrfToken = this.xsrfToken;
-        }, this);
+      alertListChanged: function() {
+        // Some calls to alertListChanged can change the alert list.
+        // If that happens, don't do anything.
+        if (this.isRecursiveChange()) {
+          return;
+        }
         this.initRowsBasedOnQueryParameters();
         this.showAlertsGrouped();
         this.updateBugColumn();
@@ -583,6 +599,13 @@
        * triaged alerts listed in the table.
        */
       updateBugColumn: function() {
+        // We need the xsrf token to be set in the individual bug-info-span
+        // element, and this can be done by setting it in the alert objects,
+        // and binding xsrfToken in the template above.
+        var alertsTable = this;
+        this.alertList.forEach(function(alertRow) {
+          alertRow.xsrfToken = alertsTable.xsrfToken;
+        });
         // Make a list of all bug IDs that indicate an alert is triaged.
         // This includes the pseudo-bug-ids indicating invalid or ignored.
         // Note: The 'hideRow' parameter is set in static/alerts.js, and it
diff --git a/catapult/dashboard/dashboard/elements/autocomplete-box.html b/catapult/dashboard/dashboard/elements/autocomplete-box.html
index 361ed2c..47bccf5 100644
--- a/catapult/dashboard/dashboard/elements/autocomplete-box.html
+++ b/catapult/dashboard/dashboard/elements/autocomplete-box.html
@@ -1,3 +1,9 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The 'autocomplete-box' is a input box with autocomplete drop-down menu.
 The drop-down menu has support for multi-select, grouping, and tag name.
@@ -36,6 +42,7 @@
         ...
     ]
 -->
+
 <link rel="import" href="/components/core-icon-button/core-icon-button.html">
 <link rel="import" href="/components/core-item/core-item.html">
 <link rel="import" href="/components/core-menu/core-menu.html">
diff --git a/catapult/dashboard/dashboard/elements/base-chart.html b/catapult/dashboard/dashboard/elements/base-chart.html
index 14675f1..50281e2 100644
--- a/catapult/dashboard/dashboard/elements/base-chart.html
+++ b/catapult/dashboard/dashboard/elements/base-chart.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <polymer-element name="base-chart" attributes="chartTitle units flotData">
   <script>
     'use strict';
diff --git a/catapult/dashboard/dashboard/elements/base-form.html b/catapult/dashboard/dashboard/elements/base-form.html
index 71260c1..03e0403 100644
--- a/catapult/dashboard/dashboard/elements/base-form.html
+++ b/catapult/dashboard/dashboard/elements/base-form.html
@@ -1,9 +1,16 @@
+<!DOCTYPE html>
 <!--
-Base element for form.
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<!--
+Base element for forms.
 
 This element uses overlay-message element on the page with the ID
 "message-bar".
 -->
+
 <polymer-element name="base-form">
   <script>
     'use strict';
diff --git a/catapult/dashboard/dashboard/elements/bisect-button-test.html b/catapult/dashboard/dashboard/elements/bisect-button-test.html
new file mode 100644
index 0000000..672ecdc
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/bisect-button-test.html
@@ -0,0 +1,87 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/bisect-button.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    tearDown: function() {
+      testing_common.clearXhrMock();
+      testing_common.clearFixture();
+    }
+  };
+
+  test('instantiate_basic', function() {
+    var button = document.createElement('bisect-button');
+    this.addHTMLOutput(button);
+  }, testOptions);
+
+  test('canBisect false if there is no bisectInfo', function() {
+    var button = document.createElement('bisect-button');
+    testing_common.addToFixture(button);
+    button.bugId = 500500;
+    assert.isFalse(button.canBisect);
+  }, testOptions);
+
+  test('empty', function() {
+  }, testOptions);
+
+  test('canBisect set to true for valid tests after check', function() {
+    testing_common.addXhrMock(
+        '/can_bisect?test_path=ChromiumPerf%2Flinux%2Fsunspider%2FTotal' +
+        '&start_revision=323400&end_revision=323500',
+        'true');
+
+    var button = document.createElement('bisect-button');
+    testing_common.addToFixture(button);
+    button.bisectInfo = {
+      'testPath': 'ChromiumPerf/linux/sunspider/Total',
+      'goodRev': 323400,
+      'badRev': 323500
+    };
+
+    return new Promise(function(resolve) {
+      function check() {
+        assert.isTrue(button.canBisect);
+        resolve();
+      }
+      setTimeout(check, 10);
+    });
+  }, testOptions);
+
+  test('canBisect set to false for invalid tests after check', function() {
+    testing_common.addXhrMock(
+        '/can_bisect?test_path=Chromium%2Flinux%2Fsizes' +
+        '&start_revision=323400&end_revision=323500',
+        'false');
+
+    var button = document.createElement('bisect-button');
+    testing_common.addToFixture(button);
+    button.bisectInfo = {
+      'testPath': 'Chromium/linux/sizes',
+      'goodRev': 323400,
+      'badRev': 323500
+    };
+
+    return new Promise(function(resolve) {
+      function check() {
+        assert.isFalse(button.canBisect);
+        resolve();
+      }
+      setTimeout(check, 100);
+    });
+  }, testOptions);
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/bisect-button.html b/catapult/dashboard/dashboard/elements/bisect-button.html
index 7a94331..db61e16 100644
--- a/catapult/dashboard/dashboard/elements/bisect-button.html
+++ b/catapult/dashboard/dashboard/elements/bisect-button.html
@@ -1,7 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 
 <link rel="import" href="/dashboard/elements/bisect-form.html">
-<link rel="import" href="/dashboard/static/bisect_utils.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
 
 <polymer-element name="bisect-button" attributes="bisectInfo bugId xsrfToken">
   <template>
@@ -65,13 +72,25 @@
          * Updates the canBisect state based on the bisectInfo state.
          */
         update: function() {
+          this.canBisect = false;
           if (!this.bisectInfo) {
-            this.canBisect = false;
             return;
           }
-          var testPath = this.bisectInfo.testPath;
-          var rev = this.bisectInfo.badRev;
-          this.canBisect = bisect_utils.canBisect(testPath, rev);
+          var that = this;
+          simple_xhr.send(
+              '/can_bisect',
+              {
+                'test_path': this.bisectInfo.testPath,
+                'start_revision': this.bisectInfo.goodRev,
+                'end_revision': this.bisectInfo.badRev,
+              },
+              function loadCallback(responseBool) {
+                that.canBisect = responseBool;
+              },
+              function errorCallback(message) {
+                console.warn('Request to /can_bisect failed.', message);
+                that.canBisect = true;
+              });
         },
 
         /**
diff --git a/catapult/dashboard/dashboard/elements/bisect-form.html b/catapult/dashboard/dashboard/elements/bisect-form.html
index e0da090..397bcb7 100644
--- a/catapult/dashboard/dashboard/elements/bisect-form.html
+++ b/catapult/dashboard/dashboard/elements/bisect-form.html
@@ -1,7 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The bisect-form element includes the form for all of the different stages of
 the bisect process after the user clicks on the bisect button.
 -->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 <link rel="import" href="/components/paper-dialog/paper-action-dialog.html">
 <link rel="import" href="/components/paper-spinner/paper-spinner.html">
diff --git a/catapult/dashboard/dashboard/elements/bisect-status.html b/catapult/dashboard/dashboard/elements/bisect-status.html
index d627221..30ec415 100644
--- a/catapult/dashboard/dashboard/elements/bisect-status.html
+++ b/catapult/dashboard/dashboard/elements/bisect-status.html
@@ -1,7 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The bisect-status elements indicate whether the last bisect job
 for a bug succeeded or failed, or is currently in progress. */
 -->
+
 <polymer-element name="bisect-status">
   <template>
     <style>
diff --git a/catapult/dashboard/dashboard/elements/bug-info-span.html b/catapult/dashboard/dashboard/elements/bug-info-span.html
index 31cfe6e..9bedae3 100644
--- a/catapult/dashboard/dashboard/elements/bug-info-span.html
+++ b/catapult/dashboard/dashboard/elements/bug-info-span.html
@@ -1,8 +1,15 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The bug-info-span is a in-line element showing a bug ID with link, etc.
 TODO(qyearsley): Expand this element and use it in chart-tooltip.
 -->
-<link rel="import" href="alert-remove-box.html">
+
+<link rel="import" href="/dashboard/elements/alert-remove-box.html">
 
 <polymer-element name="bug-info-span">
   <template>
diff --git a/catapult/dashboard/dashboard/elements/bug-info.html b/catapult/dashboard/dashboard/elements/bug-info.html
index 2affb23..42076fb 100644
--- a/catapult/dashboard/dashboard/elements/bug-info.html
+++ b/catapult/dashboard/dashboard/elements/bug-info.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/dashboard/elements/bisect-button.html">
 
 <polymer-element name="bug-info">
@@ -122,7 +129,7 @@
         this.$.bisect.bisectInfo = {
           badRev: minRevRange['end'],
           goodRev: minRevRange['start'] - 1,
-          testPath: this.getTestpathFromStartRev(alerts, minRevRange['start'])
+          testPath: this.getTestPathFromStartRev(alerts, minRevRange['start'])
         };
       },
 
@@ -133,7 +140,7 @@
        *
        * @return {?string} The test path.
        */
-      getTestpathFromStartRev: function(alerts, startRevision) {
+      getTestPathFromStartRev: function(alerts, startRevision) {
         for (var i = 0; i < alerts.length; i++) {
           if (alerts[i]['start_revision'] == startRevision) {
             return alerts[i]['master'] + '/' +
diff --git a/catapult/dashboard/dashboard/elements/chart-container-test.html b/catapult/dashboard/dashboard/elements/chart-container-test.html
index 090d8e0..fab411d 100644
--- a/catapult/dashboard/dashboard/elements/chart-container-test.html
+++ b/catapult/dashboard/dashboard/elements/chart-container-test.html
@@ -1,4 +1,4 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
@@ -11,13 +11,13 @@
 <script src="/flot/jquery.flot.fillbetween.min.js"></script>
 <script src="/flot/jquery.flot.selection.min.js"></script>
 
-<link rel="import" href="/tracing/core/test_utils.html">
-
 <link rel="import" href="/dashboard/elements/chart-container.html">
 <link rel="import" href="/dashboard/static/graph.html">
 <link rel="import" href="/dashboard/static/testing_common.html">
 <link rel="import" href="/dashboard/static/uri.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
@@ -121,7 +121,7 @@
     return chart;
   };
 
-  test('instantiate_ basic', function() {
+  test('instantiate basic', function() {
     var chart = createBasicChart();
     chart.addSeriesGroup([['ChromiumPerf/win/dromaeo', ['important']]]);
     this.addHTMLOutput(chart);
diff --git a/catapult/dashboard/dashboard/elements/chart-container.html b/catapult/dashboard/dashboard/elements/chart-container.html
index 8b97cb7..8f00f3e 100644
--- a/catapult/dashboard/dashboard/elements/chart-container.html
+++ b/catapult/dashboard/dashboard/elements/chart-container.html
@@ -1,10 +1,16 @@
 <!DOCTYPE html>
 <!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<!--
 The chart-container element represents one chart and all related functionality,
 including a legend listing different traces that can be plotted on the same
 chart, a revision range selecting mini-chart at the bottom, and all of the alert
 triaging functionality in the chart.
 -->
+
 <link rel="import" href="/components/core-collapse/core-collapse.html">
 <link rel="import" href="/components/core-icon/core-icon.html">
 <link rel="import" href="/components/paper-button/paper-button.html">
@@ -14,14 +20,15 @@
 <link rel="import" href="/dashboard/elements/chart-slider.html">
 <link rel="import" href="/dashboard/elements/chart-title.html">
 <link rel="import" href="/dashboard/elements/chart-tooltip.html">
-<link rel="import" href="/dashboard/static/simple_xhr.html">
 <link rel="import" href="/dashboard/static/graph.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
 <link rel="import" href="/dashboard/static/testselection.html">
 <link rel="import" href="/dashboard/static/uri.html">
 
 <polymer-element name="chart-container"
                  attributes="graphParams revisionInfo xsrfToken alertKey
-                             showCompact collapseLegend"
+                             showCompact collapseLegend testSuites
+                             isInternalUser"
                  on-drop="{{onDrop}}"
                  on-dragover="{{allowDrop}}">
 
@@ -143,7 +150,8 @@
     <div id="container" compact?="{{showCompact}}">
       <div id="top-bar" horizontal layout>
         <div flex horizontal center-justified layout>
-          <chart-title id="title" seriesGroupList={{seriesGroupList}}></chart-title>
+          <chart-title id="title" seriesGroupList="{{seriesGroupList}}"
+                       testSuites="{{testSuites}}"></chart-title>
         </div>
         <div horizontal layout center>
           <paper-button id="close-chart" on-click="{{closeChartClicked}}">
@@ -582,7 +590,7 @@
                 selectedTestPathDict[testPath] = [];
               } else {
                 // Create test data here for selected traces so they can
-                // be initialy shown in chart legend.
+                // be initially shown in chart legend.
                 for (var j = 0; j < selectedTraces.length; j++) {
                   var testData = {
                     name: selectedTraces[j],
@@ -669,7 +677,7 @@
          * Update loading counter for all series groups in a testPaths.
          *
          * @param {Array} testPaths List of test paths.
-         * @param {boolean} increment True to add one, False to subract one.
+         * @param {boolean} increment True to add one, False to subtract one.
          */
         updateSeriesGroupLoadingCounter: function(testPaths, increment) {
           var testPathToGroup = {};
@@ -684,7 +692,7 @@
         },
 
         checkForInternalUser: function() {
-          if (!window['IS_INTERNAL_USER']) {
+          if (!this.isInternalUser) {
              this.addWarnings(
                'Note that some data is only available when logged in.');
           }
@@ -703,7 +711,7 @@
         },
 
         /**
-         * Adds warnings for selected series that are staed or have no data.
+         * Adds warnings for selected series that are stale or have no data.
          */
         updateWarningsForSelectedSeries: function() {
           this.warnings = this.warnings.filter(function(value) {
@@ -834,7 +842,8 @@
               color: this.json.data[i].color,
               index: i,
               important: testselection.isImportant(
-                  series[i].path.split('/').slice(2).join('/'))
+                  series[i].path.split('/').slice(2).join('/'),
+                  this.testSuites)
             };
 
             testList.push(test);
@@ -851,7 +860,7 @@
         },
 
         /**
-         * Updates this.seriesGrouplist for a list of series object.
+         * Updates this.seriesGroupList for a list of series object.
          *
          * @param {Array} testList List of test object.
          */
@@ -1811,7 +1820,7 @@
         /**
          * Displays a tooltip for the given point on the graph.
          *
-         * TODO(qyearsley): Refactor. http://crbug.com/508991.
+         * TODO(qyearsley): Refactor. See catapult:#1348.
          *
          * NOTE: Instead of taking lists of alerts and triaged alerts, it
          * would be possible for this function to just take one alert, since
@@ -2085,10 +2094,6 @@
          * Returns an object with revision-related information for one type
          * of revision, for two revision numbers.
          *
-         * The property this.revisionInfo is set in graph.js from the global
-         * variable REVISION_INFO, which is embedded based on the content of
-         * chart_handler.py.
-         *
          * @param {string} revisionTypeKey A key in this.revisionInfo.
          *     This will be a string starting with "r_".
          * @param {(number|string|boolean)} start Optional start revision.
@@ -2119,7 +2124,7 @@
           }
 
           // If the substring R1_trim is found in the URL template, we assume
-          // that it's the URL template for Crome OS versions.
+          // that it's the URL template for Chrome OS versions.
           var url = '';
           if (urlTemplate.indexOf('{{R1_trim}}') != -1) {
             url = this.fillInChromeOSChangeLogURL(urlTemplate, start, end);
@@ -2381,7 +2386,7 @@
 
         /**
          * Gets the current state of the chart.
-         * This is called by 'report-container.html' to create page state.
+         * This is called by 'report-page.html' to create page state.
          *
          * @return {Array} List of pair of test path and selected series.
          */
diff --git a/catapult/dashboard/dashboard/elements/chart-legend-test.html b/catapult/dashboard/dashboard/elements/chart-legend-test.html
index 7028e92..1c9fb2b 100644
--- a/catapult/dashboard/dashboard/elements/chart-legend-test.html
+++ b/catapult/dashboard/dashboard/elements/chart-legend-test.html
@@ -1,15 +1,15 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/core/test_utils.html">
-
 <link rel="import" href="/dashboard/elements/chart-legend.html">
 <link rel="import" href="/dashboard/static/testing_common.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
@@ -56,7 +56,7 @@
     return legend;
   }
 
-  test('instantiate_basic', function() {
+  test('instantiate basic', function() {
     var legend = createBasicLegend();
     this.addHTMLOutput(legend);
   }, testOptions);
diff --git a/catapult/dashboard/dashboard/elements/chart-legend.html b/catapult/dashboard/dashboard/elements/chart-legend.html
index 699c231..2d3311f 100644
--- a/catapult/dashboard/dashboard/elements/chart-legend.html
+++ b/catapult/dashboard/dashboard/elements/chart-legend.html
@@ -1,10 +1,12 @@
+<!DOCTYPE html>
 <!--
-Attributes:
-  seriesGroupList: A list of trace names.
-  indicesToGraph: List of indices of traces that are selected.
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
 -->
-<link rel="import" href="/components/core-selector/core-selector.html">
+
 <link rel="import" href="/components/core-icon-button/core-icon-button.html">
+<link rel="import" href="/components/core-selector/core-selector.html">
 <link rel="import" href="/components/paper-icon-button/paper-icon-button.html">
 <link rel="import" href="/components/paper-shadow/paper-shadow.html">
 <link rel="import" href="/components/paper-spinner/paper-spinner.html">
@@ -291,7 +293,9 @@
 
         var description = document.createElement('tooltip-test-description');
         description.test = test;
-        document.getElementById('tooltip').set(
+        // This assumes that the tooltip element is present at the top-level.
+        // See https://github.com/catapult-project/catapult/issues/2172.
+        document.getElementById('legend-details-tooltip').set(
             description, event.pageX, event.pageY);
       },
 
diff --git a/catapult/dashboard/dashboard/elements/chart-slider.html b/catapult/dashboard/dashboard/elements/chart-slider.html
index 2566853..aed0a1b 100644
--- a/catapult/dashboard/dashboard/elements/chart-slider.html
+++ b/catapult/dashboard/dashboard/elements/chart-slider.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <polymer-element name="chart-slider" attributes="testpath startrev endrev">
   <template>
     <style>
diff --git a/catapult/dashboard/dashboard/elements/chart-title-test.html b/catapult/dashboard/dashboard/elements/chart-title-test.html
index 47a9c36..4309a3c 100644
--- a/catapult/dashboard/dashboard/elements/chart-title-test.html
+++ b/catapult/dashboard/dashboard/elements/chart-title-test.html
@@ -1,13 +1,13 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/chart-title.html">
 <link rel="import" href="/dashboard/static/testing_common.html">
+<link rel="import" href="/tracing/core/test_utils.html">
 
 <script>
 'use strict';
@@ -16,17 +16,17 @@
 
   var testOptions = {
     setUp: function() {
-      window.TEST_SUITES = {
-          'dromaeo': {'des': 'Description of dromaeo.'},
-          'sunspider': {'des': 'Sunspider test.'}
-      };
     },
     tearDown: function() {
     }
   };
 
-  test('instantiate_ basic', function() {
+  test('instantiate basic', function() {
     var title = document.createElement('chart-title');
+    title.testSuites = {
+        'dromaeo': {'des': 'Description of dromaeo.'},
+        'sunspider': {'des': 'Sunspider test.'}
+    };
     title.seriesGroupList = [{
         'path': 'ChromiumPerf/win/dromaeo',
         'tests': [{'name': 'Total'}]
@@ -35,8 +35,12 @@
     this.addHTMLOutput(title);
   }, testOptions);
 
-  test('check_title', function() {
+  test('after update is called, suite descriptions are set', function() {
     var title = document.createElement('chart-title');
+    title.testSuites = {
+        'dromaeo': {'des': 'Description of dromaeo.'},
+        'sunspider': {'des': 'Sunspider test.'}
+    };
     title.seriesGroupList = [
     {
         'path': 'ChromiumPerf/win/dromaeo',
diff --git a/catapult/dashboard/dashboard/elements/chart-title.html b/catapult/dashboard/dashboard/elements/chart-title.html
index a822c4d..06690aa 100644
--- a/catapult/dashboard/dashboard/elements/chart-title.html
+++ b/catapult/dashboard/dashboard/elements/chart-title.html
@@ -1,4 +1,10 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <polymer-element name="chart-title" attributes="seriesGroupList">
   <template>
     <style>
@@ -128,10 +134,10 @@
         var suiteDescriptions = {};
         for (var i = 0; i < this.seriesGroupList.length; i++) {
           var suite = this.seriesGroupList[i].path.split('/')[2];
-          if (window['TEST_SUITES'] &&
-              window['TEST_SUITES'][suite] &&
-              window['TEST_SUITES'][suite]['des']) {
-            suiteDescriptions[suite] = window['TEST_SUITES'][suite]['des'];
+          if (this.testSuites &&
+              this.testSuites[suite] &&
+              this.testSuites[suite]['des']) {
+            suiteDescriptions[suite] = this.testSuites[suite]['des'];
           }
         }
         var suiteNames = Object.keys(suiteDescriptions);
diff --git a/catapult/dashboard/dashboard/elements/chart-tooltip.html b/catapult/dashboard/dashboard/elements/chart-tooltip.html
index 6548d87..96d5fac 100644
--- a/catapult/dashboard/dashboard/elements/chart-tooltip.html
+++ b/catapult/dashboard/dashboard/elements/chart-tooltip.html
@@ -1,9 +1,15 @@
 <!DOCTYPE html>
 <!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<!--
 The chart-tooltip element is the box that is shown when you hover over or click
 on a point on a graph. It shows more detailed information about the point that
 was just clicked.
 -->
+
 <link rel="import" href="/components/paper-dialog/paper-action-dialog.html">
 
 <link rel="import" href="/dashboard/elements/alert-remove-box.html">
@@ -74,7 +80,6 @@
           <div>
             <span hidden?={{alertInvalidOrIgnored}}>Bug ID:
               <b><a target="_blank" href="http://crbug.com/{{bugId}}">{{bugId}}</a></b>
-              <span class="{{bugState}} "hidden?="{{!bugStatus}}">({{bugStatus}})</span>
             </span>
 
             <span hidden?="{{bugId != -1}}">Invalid alert</span>
@@ -124,11 +129,30 @@
       bugId: null,
       stdioUri: null,
       hideStdioUri: true,
-      bugStatusLink: ('https://www.googleapis.com' +
-                      '/projecthosting/v2/projects/chromium/issues/BUGID' +
-                      '?key=AIzaSyDrEBALf59D7TkOuz-bBuOnN2OqzD70NCQ'),
       revisions: [],
 
+      ready: function() {
+        // This allows tooltip to show beyond the current window size.
+        // Our tooltip size is determined by 'sizingTarget' which is default to
+        // the  window size.  Here we set it to '#scroller' which is the
+        // content's container.
+        // 'core-overlay' API:
+        //   https://github.com/Polymer/core-overlay/blob/master/core-overlay.html
+        //
+        // Note: Though this deep shadow DOM selector was deprecated, it was
+        // a solution in Polymer 0.5 to customize elements.  This can be done
+        // using custom CSS properties when migrating to Polymer 1.0.
+        this.$.tooltip.sizingTarget = document.querySelector(
+            'html /deep/ paper-action-dialog::shadow #scroller');
+
+        // We're going to call 'open' tooltip and hide it's container on
+        // 'ready' to avoid 'core-overlay' initial element focus which causes
+        // the page to jump.
+        // TODO(chrisphan): Figure out a better way to do this.
+        this.$.container.hidden = true;
+        this.$.tooltip.open();
+      },
+
       testPathChanged: function() {
         if (this.testPath) {
           var parts = this.testPath.split('/');
@@ -161,28 +185,6 @@
       },
 
       /**
-       * Updates the interface when the bug ID changes.
-       */
-      bugIdChanged: function() {
-        this.bugStatus = this.bugState = null;
-        // If there is a bug ID, request its state and status. Note that these
-        // are two different things -- state is closed/open, whereas status
-        // includes things like available, started, duplicate, etc.
-        if (this.bugId && this.bugId > 0) {
-          var url = this.bugStatusLink.replace('BUGID', this.bugId);
-          var req = new XMLHttpRequest();
-          var self = this;
-          req.onload = function() {
-            var json = JSON.parse(req.responseText);
-            self.bugStatus = json.status;
-            self.bugState = json.state;
-          };
-          req.open('get', url, true);
-          req.send();
-        }
-      },
-
-      /**
        * Fires a 'triaged' event, which should be caught in chart-container.
        */
       onUntriaged: function(event, detail, sender) {
diff --git a/catapult/dashboard/dashboard/elements/custom-tooltip.html b/catapult/dashboard/dashboard/elements/custom-tooltip.html
index c8d1a8a..2107b3d 100644
--- a/catapult/dashboard/dashboard/elements/custom-tooltip.html
+++ b/catapult/dashboard/dashboard/elements/custom-tooltip.html
@@ -1,3 +1,9 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The 'custom-tooltip' displays overlaid pop-up above a target.
 
@@ -10,8 +16,10 @@
   var tooltip = document.getElementById("tooltip");
   tooltip.set('A tooltip message', bound.left, bound.top);
 -->
-<link rel="import" href="/components/paper-shadow/paper-shadow.html">
+
 <link rel="import" href="/components/core-overlay/core-overlay.html">
+<link rel="import" href="/components/paper-shadow/paper-shadow.html">
+
 <polymer-element
     name="custom-tooltip"
     attributes="autoCloseDisabled transition duration maxWidth">
diff --git a/catapult/dashboard/dashboard/elements/editable-list.html b/catapult/dashboard/dashboard/elements/editable-list.html
index f13e940..ec1531c 100644
--- a/catapult/dashboard/dashboard/elements/editable-list.html
+++ b/catapult/dashboard/dashboard/elements/editable-list.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/dashboard/static/simple_xhr.html">
 
 <polymer-element name="editable-list" attributes="xsrfToken">
diff --git a/catapult/dashboard/dashboard/elements/group-report-page-test.html b/catapult/dashboard/dashboard/elements/group-report-page-test.html
new file mode 100644
index 0000000..f914856
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/group-report-page-test.html
@@ -0,0 +1,68 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/group-report-page.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    tearDown: function() {
+      testing_common.clearXhrMock();
+    }
+  };
+
+  test('instantiation', function() {
+    var mockResponse = {
+      'alert_list': [
+        {
+          'key': '12345',
+          'group': null,
+          'start_revision': 55555,
+          'end_revision': 55558,
+          'date': '2016-02-27',
+          'master': 'Chromium',
+          'bot': 'win',
+          'testsuite': 'sunspider',
+          'test': 'Total',
+          'bug_id': null,
+          'dashboard_link': 'http://do/not/click/me',
+          'median_after_anomaly': 40,
+          'median_before_anomaly': 20,
+          'percent_changed': '50%',
+          'improvement': true,
+          'bisect_status': null,
+          'recovered': false
+        }
+      ],
+      'owner_info': [{'email': 'foo@bar.org'}],
+      'subtests': {
+        'Chromium/win': {'sunspider': ['Total', '3d-cube']}
+      },
+      'revision_info': {
+        'r_chromium': {
+          'name': 'Chromium Git Hash',
+          'url': 'https://chromium.googlesource.com/+log/{{R1}}..{{R2}}'
+        }
+      },
+      'test_suites': {},
+      'login_url': 'http://do/not/click/me',
+      'is_internal_user': false,
+      'xsrf_token': '12345'
+    };
+    testing_common.addXhrMock('*', JSON.stringify(mockResponse));
+    var page = document.createElement('group-report-page');
+    this.addHTMLOutput(page);
+  }, testOptions);
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/group-report-page.html b/catapult/dashboard/dashboard/elements/group-report-page.html
new file mode 100644
index 0000000..781daa7
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/group-report-page.html
@@ -0,0 +1,384 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<script src="/jquery/jquery-2.1.4.min.js"></script>
+<script src="/flot/jquery.flot.min.js"></script>
+<script src="/flot/jquery.flot.crosshair.min.js"></script>
+<script src="/flot/jquery.flot.fillbetween.min.js"></script>
+<script src="/flot/jquery.flot.selection.min.js"></script>
+
+<link rel="import" href="/components/paper-button/paper-button.html">
+<link rel="import" href="/components/polymer/polymer.html">
+
+<link rel="import" href="/dashboard/elements/alerts-table.html">
+<link rel="import" href="/dashboard/elements/bug-info.html">
+<link rel="import" href="/dashboard/elements/chart-container.html">
+<link rel="import" href="/dashboard/elements/login-warning.html">
+<link rel="import" href="/dashboard/elements/quick-log.html">
+<link rel="import" href="/dashboard/elements/triage-dialog.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
+<link rel="import" href="/dashboard/static/uri.html">
+
+<polymer-element name="group-report-page">
+  <template>
+    <style>
+      .error {
+        color: #dd4b39;
+        font-weight: bold;
+      }
+
+      /* The action bar contains the graph button and triage button. */
+      #action-bar {
+        margin-top: 20px;
+        width: 100%;
+      }
+
+      /* The top container contains the action bar and alerts list. */
+      #top {
+        display: inline-flex;
+        display: -webkit-inline-flex;
+        flex-direction: column;
+        -webkit-flex-direction: column;
+        align-items: flex-start;
+        -webkit-align-items: flex-start;
+        margin-bottom: 15px;
+        width: 100%
+      }
+
+      /* The bottom container contains the charts. */
+      #bottom {
+        display: flex;
+        display: -webkit-flex;
+        flex-direction: column;
+        -webkit-flex-direction: column;
+        min-width: 100%;
+        min-height: 100%;
+      }
+
+      /* Triage dialog at the top level when the user clicks the triage button. */
+      triage-dialog {
+        position: absolute;
+        margin-top: 30px;
+        z-index: 1000;
+      }
+
+      /* This class indicates a button toggled on (e.g. show improvements). */
+      .alert-togglebutton {
+        float: right;
+        margin-left: 4px;
+        margin-right: 4px;
+      }
+
+      #bisect-result-log {
+        width: 100%;
+        display: block;
+      }
+
+      #loading-spinner {
+        width: 100%;
+        display: flex;
+        justify-content: center;
+      }
+    </style>
+    <template if="{{loading}}">
+      <div id="loading-spinner"><img src="//www.google.com/images/loading.gif"></div>
+    </template>
+    <template if="{{error}}">
+      <div class="error">{{error}}</div>
+    </template>
+    <!-- TODO(sullivan): The below should be in a template if={{!loading}}.
+         This doesn't work correctly due to polymer 0.5 bug, let's try again
+         in polymer 1.0 -->
+    <template if="{{warningMessage}}">
+      <overlay-message id="warning-message" opened="true" autoCloseDisabled duration="-1">
+      {{warningMessage}}
+      <template if="{{warningBug}}">
+        <a href="https://github.com/catapult-project/catapult/issues/{{warningBug}}">See bug #{{warningBug}}.</a>
+      </template>
+      </overlay-message>
+    </template>
+    <login-warning id="login-warning" loginLink="{{loginUrl}}"
+                   hidden?="{{isInternalUser}}">
+    </login-warning>
+    <div id="top">
+      <div id="action-bar" hidden?="{{loading || error}}">
+        <paper-button toggle raised
+                      id="improvements-toggle"
+                      class="alert-togglebutton"
+                      on-click="{{onToggleImprovements}}">
+          Show all improvements
+        </paper-button>
+      </div>
+      <bug-info id="bug-info" xsrfToken="{{xsrfToken}}"></bug-info>
+      <template if="{{bugId}}">
+      <quick-log id="bisect-result-log"
+                 xsrfToken="{{xsrfToken}}"
+                 logNamespace="bisect_result"
+                 logName="{{bugId}}"
+                 logLabel="Bisect results"
+                 loadOnReady="true"
+                 expandOnReady="true"></quick-log>
+      </template>
+      <alerts-table id="alerts-table"
+                    hidden?="{{loading || error}}"
+                    xsrfToken="{{xsrfToken}}"
+                    alertList="{{alertList}}"
+                    extraColumns="{{extraColumns}}"
+                    on-changeselection="{{onAlertSelectionChange}}"></alerts-table>
+    </div>
+
+    <div id="bottom">
+      <section id="charts-container"></section>
+    </div>
+
+  </template>
+  <script>
+    'use strict';
+    Polymer('group-report-page', {
+      loading: true,
+
+      get alertsTable() {
+        return this.$['alerts-table'];
+      },
+
+      getCharts() {
+        // Note: This cannot be a property getter for attribute charts because
+        // polymer caches the getter result.
+        var charts = [];
+        var children = this.$['charts-container'].children;
+        for (var i = 0; i < children.length; i++) {
+          charts.push(children[i]);
+        }
+        return charts;
+      },
+
+      extraColumns: [{
+        'key': 'percent_changed',
+        'label': 'Delta %'
+      }],
+
+      onToggleImprovements: function(event, sender, details) {
+        var improvementsToggle = sender;
+        if (improvementsToggle.hasAttribute('active')) {
+          this.alertsTable['alertList'].forEach(function(alert) {
+            if (alert['improvement']) {
+              alert['hideRow'] = false;
+            }
+          });
+        } else {
+          this.alertsTable['alertList'].forEach(function(alert) {
+            if (alert['improvement'] && !alert['selected']) {
+              alert['hideRow'] = true;
+              alert['selected'] = false;
+            }
+          });
+          // Make the table update its list of checked alerts.
+          this.alertsTable.onCheckboxChange();
+        }
+      },
+
+      alertChangedRevisions: function(event) {
+        var alertList = this.alertsTable['alertList'];
+        var nudgedAlert = event.detail['alerts'][0];
+        for (var i = 0; i < alertList.length; i++) {
+          if (alertList[i]['key'] == nudgedAlert['key']) {
+            alertList[i].start_revision = event.detail['startRev'];
+            alertList[i].end_revision = event.detail['endRev'];
+            // Make the table update its list of checked alerts.
+            this.alertsTable.onCheckboxChange();
+            return;
+          }
+        }
+      },
+
+      onGraphClose: function(event) {
+        // Un-check the alert in the table.
+        var key = event.target['alertKey'];
+        var alertList = this.alertsTable['alertList'];
+        for (var i = 0; i < alertList.length; i++) {
+          if (alertList[i].key == key) {
+            alertList[i].selected = false;
+            break;
+          }
+        }
+
+        // Make the table update its list of checked alerts.
+        // This is necessary so that the triage dialog will get a correct list
+        // of alerts that should be affected by a triage action.
+        this.alertsTable.onCheckboxChange();
+
+        // Remove the graph from the set of currently-displayed graph elements.
+        delete this.graphElements_[key];
+      },
+
+      getSubtestsEntry: function(testPath) {
+        var testPathParts = testPath.split('/');
+        var botName = testPathParts[0] + '/' + testPathParts[1];
+        var subtestParts = testPathParts.splice(3);
+        var subtestDict = this.subtests[botName][testPathParts[2]];
+        if (!subtestDict) {
+          return null;
+        }
+        for (var level = 0; level < subtestParts.length - 1; level++) {
+          var name = subtestParts[level];
+          if (!(name in subtestDict)) {
+            return null;
+          }
+          subtestDict = subtestDict[name]['sub_tests'];
+        }
+        return subtestDict[subtestParts[subtestParts.length - 1]];
+      },
+
+      getTestPath: function(alert) {
+        return [
+          alert['master'],
+          alert['bot'],
+          alert['testsuite'],
+          alert['test']
+        ].join('/');
+      },
+
+      getTestPathAndSelectedSeries: function(alert) {
+        var testPath = this.getTestPath(alert);
+        var subtestsEntry = this.getSubtestsEntry(testPath);
+        var traceName = testPath.split('/').pop();
+
+        // If the "subtests" property of |subtestsEntry| is an empty object,
+        // that implies that this test has no subtests. In this case, show a
+        // chart for the parent test, with this particular child selected.
+        if (subtestsEntry && subtestsEntry['sub_tests'] &&
+            Object.keys(subtestsEntry['sub_tests']).length == 0) {
+          testPath = testPath.split('/').slice(0, -1).join('/');
+          subtestsEntry = this.getSubtestsEntry(testPath);
+        }
+
+        // Get a list of selected traces. This should include the series that
+        // the alert was on, as well as any related reference build result
+        // series.
+        var selectedTraces = [traceName];
+        if (subtestsEntry && subtestsEntry['sub_tests']) {
+          if ('ref' in subtestsEntry['sub_tests']) {
+            selectedTraces.push('ref');
+          }
+          if (traceName + '_ref' in subtestsEntry['sub_tests']) {
+            selectedTraces.push(traceName + '_ref');
+          }
+        }
+
+        // Otherwise, the test is either not found in the SUBTESTS dict, or it
+        // is a test with children (e.g. a summary metric). In either of these
+        // cases, we want to return the test path and trace found on the alert.
+        return [testPath, selectedTraces];
+      },
+
+      setChartData: function(chart) {
+        chart.revisionInfo = this.revisionInfo;
+        chart.xsrfToken = this.xsrfToken;
+        chart.isInternalUser = this.isInternalUser;
+        chart.testSuites = this.testSuites;
+      },
+
+      addGraph: function(alerts, insertBefore) {
+        if (!alerts) {
+          return;
+        }
+
+        var containerElement = this.$['charts-container'];
+        for (var i = 0; i < alerts.length; i++) {
+          var alert = alerts[i];
+          var chart = document.createElement('chart-container');
+          this.graphElements_[alert['key']] = chart;
+          if (insertBefore) {
+            containerElement.insertBefore(chart, containerElement.firstChild);
+          } else {
+            containerElement.appendChild(chart);
+          }
+
+          // Set graph params.
+          var graphParams = {
+            'rev': alert['end_revision']
+          };
+          chart.graphParams = graphParams;
+          chart.alertKey = alert['key'];
+          chart.addSeriesGroup([this.getTestPathAndSelectedSeries(alert)]);
+          chart.addEventListener('chartclosed', this.onGraphClose, false);
+          chart.addEventListener('alertChangedRevisions',
+                                 this.alertChangedRevisions, true);
+          this.setChartData(chart);
+        }
+      },
+
+      onAlertSelectionChange: function() {
+        // Make a set of all alerts that are checked in the table.
+        var alerts = {};
+        this.alertsTable.checkedAlerts.forEach(function(a) {
+          alerts[a.key] = a;
+        });
+        // Add graphs that are checked in the table but not added yet.
+        for (var key in alerts) {
+          if (!(key in this.graphElements_)) {
+            this.addGraph([alerts[key]], true);
+          }
+        }
+
+        // Remove graphs that are no longer checked in the table.
+        var chartsContainer = this.$['charts-container'];
+        for (var key in this.graphElements_) {
+          if (!(key in alerts) && key in this.graphElements_) {
+            if (this.graphElements_[key].parentNode == chartsContainer) {
+              chartsContainer.removeChild(this.graphElements_[key]);
+              delete this.graphElements_[key];
+            }
+          }
+        }
+      },
+
+      ready: function() {
+        this.graphElements_ = {};
+        var params = {};
+        var keys = uri.getParameter('keys');
+        if (keys) {
+          params['keys'] = keys;
+        }
+        var bug_id = uri.getParameter('bug_id');
+        if (bug_id) {
+          params['bug_id'] = bug_id;
+        }
+        var rev = uri.getParameter('rev');
+        if (rev) {
+          params['rev'] = rev;
+        }
+        simple_xhr.send('/group_report', params,
+          function(response) {
+            this.alertList = response['alert_list'];
+            this.ownerInfo = response['owner_info'];
+            this.subtests = response['subtests'];
+            this.revisionInfo = response['revision_info'];
+            this.loginLink = response['login_url'];
+            this.isInternalUser = response['is_internal_user'];
+            this.testSuites = response['test_suites'];
+            this.xsrfToken = response['xsrf_token'];
+            this.bugId = uri.getParameter('bug_id');
+            if (this.bugId) {
+              this.$['bug-info'].initialize(
+                  this.bugId, this.alertsTable, this.ownerInfo);
+            }
+            this.addGraph(this.alertsTable.checkedAlerts, false);
+            var charts = this.getCharts();
+            for (var i = 0; i < charts.length; i++) {
+              this.setChartData(charts[i]);
+            }
+            this.loading = false;
+          }.bind(this),
+          function(msg) {
+            this.error = msg;
+            this.loading = false;
+          }.bind(this));
+      }
+    });
+  </script>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/load-analytics.html b/catapult/dashboard/dashboard/elements/load-analytics.html
new file mode 100644
index 0000000..3a82c45
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/load-analytics.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<polymer-element name="load-analytics">
+  <script>
+    'use strict';
+    Polymer('load-analytics', {
+      ready: function() {
+        // This code comes from Google Analytics.
+        (function(i, s, o, g, r, a, m) {
+          i['GoogleAnalyticsObject'] = r;
+          i[r] = i[r] || function() {
+            (i[r].q = i[r].q || []).push(arguments);
+          };
+          i[r].l = 1 * new Date();
+          a = s.createElement(o), m = s.getElementsByTagName(o)[0];
+          a.async = 1;
+          a.src = g;
+          m.parentNode.insertBefore(a, m);
+        })(window, document,
+           'script', '//www.google-analytics.com/analytics.js', 'ga');
+
+        ga('create', 'UA-56758330-1', 'auto');
+        ga('send', 'pageview');
+      }
+    });
+  </script>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/login-warning-test.html b/catapult/dashboard/dashboard/elements/login-warning-test.html
index d881876..78c6d7c 100644
--- a/catapult/dashboard/dashboard/elements/login-warning-test.html
+++ b/catapult/dashboard/dashboard/elements/login-warning-test.html
@@ -1,13 +1,14 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/login-warning.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
diff --git a/catapult/dashboard/dashboard/elements/login-warning.html b/catapult/dashboard/dashboard/elements/login-warning.html
index 4127d58..511134e 100644
--- a/catapult/dashboard/dashboard/elements/login-warning.html
+++ b/catapult/dashboard/dashboard/elements/login-warning.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <polymer-element name="login-warning" attributes="missing loginLink">
   <template>
     <style>
diff --git a/catapult/dashboard/dashboard/elements/nav-bar-test.html b/catapult/dashboard/dashboard/elements/nav-bar-test.html
new file mode 100644
index 0000000..1ca66e9
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/nav-bar-test.html
@@ -0,0 +1,37 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/nav-bar.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    tearDown: function() {
+      testing_common.clearXhrMock();
+    }
+  };
+
+  test('instantiation', function() {
+    var mockResponse = {
+      'login_url': 'FAKE_LOGIN_URL',
+      'is_admin': true,
+      'display_username': 'foo@bar.com'
+    };
+    console.log('Adding XHR mock');
+    testing_common.addXhrMock('*', JSON.stringify(mockResponse));
+      var bar = document.createElement('nav-bar');
+      this.addHTMLOutput(bar);
+  }, testOptions);
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/nav-bar.html b/catapult/dashboard/dashboard/elements/nav-bar.html
new file mode 100644
index 0000000..d3ed693
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/nav-bar.html
@@ -0,0 +1,323 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/components/core-icons/core-icons.html">
+
+<link rel="import" href="/dashboard/static/simple_xhr.html">
+
+<polymer-element name="nav-bar">
+  <template>
+    <style>
+
+      #navbar {
+        width: 100%;
+      }
+
+      #navbar ul {
+        list-style: none;
+        padding: 0;
+        margin: 0;
+        border: 0;
+        font-size: 100%;
+        font: inherit;
+        vertical-align: baseline;
+        z-index: 1000;
+        margin-left: -10px; /* Ignore body's padding left. */
+        padding-right: 20px; /* Ignore body's padding right. */
+      }
+
+      #navbar > ul {
+        display: flex;
+        display: -webkit-flex;
+        width: 100%;
+        background-color: #2D2D2D;
+        border-bottom: black;
+        margin-bottom: 6px;
+      }
+
+      #navbar li {
+        padding: 6px 10px;
+      }
+
+      #navbar li > a {
+        cursor: pointer;
+        text-decoration: none;
+      }
+
+      #navbar > ul > li, #navbar > ul > li > a {
+        color: #ddd;
+        font-weight: bold;
+      }
+
+      /* The addition of the icons to the <core-icon> elements to the submenus
+       * makes the submenu title text lower; the below style rule is intended
+       * to align the other menu items. */
+      #navbar .menu > li a {
+        display: inline-block;
+        padding-top: 3px;
+      }
+
+      #navbar .submenu li, #navbar .submenu a {
+        color: #ddd;
+        font-weight: normal;
+      }
+
+      /* This is a spacer in the navbar list that pushes the items after it
+       * all the way to the right side. */
+      .spacer {
+        flex: 100;
+      }
+
+      #navbar .menu li:hover > ul {
+        margin-top: 6px;
+        background-color: #2D2D2D;
+        border: 1px solid rgba(0, 0, 0, .2);
+      }
+
+      #navbar li:hover, #navbar a:hover {
+        color: white;
+      }
+
+      #navbar .menu-drop-arrow {
+        border-top-color: #aaa;
+        position: relative;
+        top: -1px;
+        border-style: solid dashed dashed;
+        border-color: transparent;
+        border-top-color: #c0c0c0;
+        display: -moz-inline-box;
+        display: inline-block;
+        font-size: 0;
+        height: 0;
+        line-height: 0;
+        width: 0;
+        border-width: 3px 3px 0;
+        padding-top: 1px;
+        left: 4px;
+      }
+
+      /* Basic select menus. */
+      .menu ul {
+        display: none;
+      }
+
+      .menu li:hover > ul {
+        display: block;
+        position: absolute;
+      }
+
+      .report-issue {
+        color: #dd4b39 !important;
+      }
+    </style>
+
+    <nav id="navbar">
+      <ul class="menu">
+        <li><a href="/">Home</a></li>
+        <li><a href="/alerts">Alerts</a></li>
+        <li><a href="/report">Browse Graphs</a></li>
+        <li><a href="https://code.google.com/p/chromium/issues/list?q=label%3AType-Bug-Regression+label%3APerformance&amp;sort=-id"
+               target="_blank">Perf Bugs</a></li>
+        <li>Other Pages<core-icon icon="arrow-drop-down"></core-icon>
+          <ul class="submenu">
+            <li><a href="/edit_test_owners">Edit Test Owners</a></li>
+            <li><a href="/bisect_stats">Bisect Stat Graphs</a></li>
+            <li><a href="/new_points">Recently Added Points</a></li>
+            <li><a href="/debug_alert">Debug Alert</a></li>
+          </ul>
+        </li>
+        <li>Waterfalls<core-icon icon="arrow-drop-down"></core-icon>
+          <ul class="submenu">
+            <li><a href="http://build.chromium.org/p/chromium.perf/waterfall?show_events=true&amp;failures_only=true&amp;reload=120"
+                   target="_blank">chromium.perf</a></li>
+            <li><a href="http://build.chromium.org/p/chromium.webkit/waterfall?builder=Win7%20Perf&amp;builder=Mac10.6%20Perf&amp;builder=Linux%20Perf"
+                   target="_blank">chromium.webkit</a></li>
+            <li><a href="http://build.chromium.org/p/chromium.gpu/waterfall?show_events=true&amp;failures_only=true&amp;reload=120"
+                   target="_blank">chromium.gpu</a></li>
+            <li><a href="http://build.chromium.org/p/tryserver.chromium.perf/builders"
+                   target="_blank">Bisect bots</a></li>
+          </ul>
+        </li>
+        <li>Help<core-icon icon="arrow-drop-down"></core-icon>
+          <ul class="submenu">
+            <li><a href="http://www.chromium.org/developers/speed-infra/performance-dashboard"
+                   target="_blank">Perf Dashboard Public Documentation</a></li>
+            <li><a href="http://www.chromium.org/developers/speed-infra/performance-dashboard/endpoints"
+                   target="_blank">Documented Endpoints</a></li>
+            <li><a href="http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs"
+                   target="_blank">About Chromium Perf Sheriffing</a></li>
+            <li><a href="https://docs.google.com/a/chromium.org/document/d/1kIMZ8jNA2--4JsCtUJ_OprnlfT6aM3BfHrQ8o4s3bDI/edit"
+                   target="_blank">Chromium Perf Sheriff Status</a></li>
+            <li><a href="http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions"
+                   target="_blank">Bisecting Performance Regressions</a></li>
+            <li><a href="http://www.chromium.org/developers/how-tos/gpu-wrangling"
+                   target="_blank">GPU Bots &amp; Pixel Wrangling</a></li>
+            <li><a href="https://docs.google.com/a/google.com/spreadsheets/d/1R_1BAOd3xeVtR0jn6wB5HHJ2K25mIbKp3iIRQKkX38o/view"
+                   target="_blank">Benchmark Owners Spreadsheet</a></li>
+            <li><a href="https://docs.google.com/document/d/1cF2Ny3UYbXq2y3fZaygUSz_3lVc0SOrSRZwXqGjjKgo/view"
+                   target="_blank">Triaging Stoppage Alerts</a></li>
+          </ul>
+        </li>
+
+        <template if="{{isAdmin}}">
+          <li>Admin<core-icon icon="arrow-drop-down"></core-icon>
+            <ul class="submenu">
+              <li><a href="/edit_site_config" target="_blank">Edit Site Config</a></li>
+              <li><a href="/edit_site_config?key=ip_whitelist" target="_blank">Edit IP Whitelist</a></li>
+              <li><a href="/edit_sheriffs" target="_blank">Edit Sheriff Rotations</a></li>
+              <li><a href="/edit_anomaly_configs" target="_blank">Edit Anomaly Configs</a></li>
+              <li><a href="/edit_bug_labels" target="_blank">Edit Bug Labels</a></li>
+              <li><a href="/stats" target="_blank">View Statistics</a></li>
+              <li><a href="/migrate_test_names" target="_blank">Migrate Test Names</a></li>
+              <li><a href="/edit_site_config?key=bot_whitelist" target="_blank">Bot Whitelist</a></li>
+              <li><a href="/change_internal_only" target="_blank">Change internal_only</a></li>
+            </ul>
+          </li>
+        </template>
+
+        <li class="report-issue">
+          <core-icon icon="bug-report"></core-icon> Report Issue
+          <core-icon icon="arrow-drop-down"></core-icon>
+          <ul class="submenu">
+            <li><a on-click="{{reportBug}}">Report a Perf Dashboard Bug</a></li>
+            <li><a on-click="{{fileIpWhitelistRequest}}">Request Buildbot IP Whitelisting</a></li>
+            <li><a on-click="{{fileBotWhitelistRequest}}">Request to Make Buildbots Publicly Visible</a></li>
+            <li><a on-click="{{fileMonitoringRequest}}">Request Monitoring for Tests</a></li>
+          </ul>
+        </li>
+        <li class="spacer"></li>
+        <li><a href="{{loginUrl}}" title="switch user">{{displayUsername}}</a></li>
+      </ul>
+    </nav>
+  </template>
+  <script>
+    'use strict';
+    Polymer('nav-bar', {
+      ready: function() {
+        simple_xhr.send('/navbar',
+                        {'path': location.pathname + location.search},
+                        function(response) {
+                          this.loginUrl = response.login_url;
+                          this.displayUsername = response.display_username;
+                          this.isAdmin = response.is_admin;
+                        }.bind(this));
+      },
+      /**
+       * Opens a window with new Chromium bug, pre-filled with some info.
+       * @param {string} summary The title of the bug.
+       * @param {string} comment The description of the bug.
+       * @param {Array} labels A list of labels to apply to the bug.
+       * @param {Array} cc A list of email addresses to cc on the bug.
+       */
+      openCrBugWindow: function(summary, comment, labels, cc) {
+        var url = 'https://code.google.com/p/chromium/issues/entry?';
+        url += [
+          'summary=' + encodeURIComponent(summary),
+          'comment=' + encodeURIComponent(comment),
+          'labels=' + encodeURIComponent(labels.join(',')),
+          'cc=' + encodeURIComponent(cc.join(','))
+        ].join('&');
+        window.open(url, '_blank');
+      },
+
+      /**
+       * Opens a window with new GitHub issue, pre-filled with some info.
+       * @param {string} summary The title of the bug.
+       * @param {string} comment The description of the bug.
+       * @param {string} label Label to apply to the bug.
+       */
+      openGitHubIssueWindow: function(summary, comment, label) {
+        var url = 'https://github.com/catapult-project/catapult/issues/new?';
+        url += [
+          'title=' + encodeURIComponent(summary),
+          'body=' + encodeURIComponent(comment),
+          'labels=' + encodeURIComponent(label),
+        ].join('&');
+        window.open(url, '_blank');
+      },
+
+      /**
+       * Opens a window to report a general dashboard bug.
+       */
+      reportBug: function() {
+        var os = this.guessOS();
+        var chromeVersion = 'unknown';
+        var chromeVersionMatch = navigator.userAgent.match(/Chrome\/(\S*)/);
+        if (chromeVersionMatch) {
+          chromeVersion = chromeVersionMatch[1];
+        }
+        var description = 'Chrome version: ' + chromeVersion;
+        description += ' (' + os + ')\n';
+        description += 'URL: ' + document.location.href + '\n\n';
+        description += 'Please copy and paste any errors from JavaScript';
+        description += ' console (';
+        description += (os == 'Mac' ? 'Command+Option+J' : 'Ctrl+Shift+J');
+        description += ' to open):\n\n';
+        description += 'Please describe the problem:\n';
+        this.openGitHubIssueWindow(
+            'Perf Dashboard: ', description, 'Perf Dashboard');
+      },
+
+      /**
+       * Guesses user's OS from user agent string (for pre-filling bug labels).
+       * @return {string} The name of an OS.
+       */
+      guessOS: function() {
+        var userAgentContains = function(s) {
+          return navigator.userAgent.indexOf(s) != -1;
+        };
+        if (userAgentContains('CrOS')) {
+          return 'Chrome OS';
+        } else if (userAgentContains('Windows')) {
+          return 'Windows';
+        } else if (userAgentContains('Macintosh')) {
+          return 'Mac';
+        } else if (userAgentContains('Linux')) {
+          return 'Linux';
+        } else if (userAgentContains('Android')) {
+          return 'Android';
+        }
+        return 'unknown';
+      },
+
+      fileIpWhitelistRequest: function() {
+        var description = 'Please whitelist the following IP addresses ' +
+            'to send data to the Chrome Perf Dashboard:\n' +
+            '<IP ADDRESSES HERE>\n\n' +
+            'These buildbots are for:\n';
+        var labels = ['Performance-Dashboard-IPWhitelist',
+                      'Restrict-View-Google'];
+        this.openCrBugWindow('IP Whitelist Request', description, labels, []);
+      },
+
+      fileBotWhitelistRequest: function() {
+        var description = 'Please make the following bots and all their data ' +
+            'publicly available, with no google.com login required: \n' +
+            '<BOT NAMES HERE>\n\n';
+        var labels = ['Performance-Dashboard-BotWhitelist',
+                      'Restrict-View-Google'];
+        this.openCrBugWindow('Bot Whitelist Request', description, labels, []);
+      },
+
+      fileMonitoringRequest: function() {
+        var description = 'Please add monitoring for the following tests:\n\n' +
+            'Test owner (see http://go/perf-test-owners):\n' +
+            'Buildbot master name:\n' +
+            'Test suite names:\n' +
+            'Restrict to these specific traces (if any):\n' +
+            'Email address and/or URL of sheriff rotation: \n' +
+            'Receive individual email alerts immediately or as a daily' +
+            ' summary?\nShould these alerts be Google-internal?\n';
+        var labels = [
+          'Performance-Dashboard-MonitoringRequest',
+          'Restrict-View-Google'
+        ];
+        this.openCrBugWindow('Monitoring Request', description, labels, []);
+      }
+    });
+  </script>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/overlay-message.html b/catapult/dashboard/dashboard/elements/overlay-message.html
index f25007f..445e9cb 100644
--- a/catapult/dashboard/dashboard/elements/overlay-message.html
+++ b/catapult/dashboard/dashboard/elements/overlay-message.html
@@ -1,3 +1,9 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The 'overlay-message' displays an overlaid pop-up on top of the content.
 
@@ -20,8 +26,8 @@
     top: 26px;
     left: 10px;
   }
-
 -->
+
 <link rel="import" href="/components/core-overlay/core-overlay.html">
 <link rel="import" href="/components/paper-shadow/paper-shadow.html">
 
diff --git a/catapult/dashboard/dashboard/elements/primary-button.html b/catapult/dashboard/dashboard/elements/primary-button.html
new file mode 100644
index 0000000..5755e60
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/primary-button.html
@@ -0,0 +1,38 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/components/paper-button/paper-button.html">
+
+<polymer-element name="primary-button" extends="button" noscript>
+  <template>
+    <style>
+      :host {
+        border: 0;
+        padding: 0;
+        background: transparent;
+        font-size: inherit;
+      }
+
+      :host(.mini) {
+        height: 22px;
+        line-height: 0.5em;
+        margin-left: 5px;
+        padding-top: 0;
+      }
+
+      paper-button {
+        background-color: #4285f4;
+        color: white;
+      }
+    </style>
+
+    <paper-button raised disabled?="{{disabled}}">
+      <content></content>
+    </paper-button>
+
+  </template>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/quick-log-test.html b/catapult/dashboard/dashboard/elements/quick-log-test.html
new file mode 100644
index 0000000..6674da5
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/quick-log-test.html
@@ -0,0 +1,139 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/dashboard/elements/quick-log.html">
+<link rel="import" href="/dashboard/static/testing_common.html">
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    tearDown: function() {
+      testing_common.clearXhrMock();
+    }
+  };
+
+  test('instantiate basic', function() {
+    var quickLog = document.createElement('quick-log');
+    this.addHTMLOutput(quickLog);
+
+    var params = {
+      'namespace': 'test_namespace',
+      'name': 'test_name',
+      'size': 100,
+      'xsrf_token': 'undefined'
+    };
+    var mockResponse = [
+      {
+        'message': 'Second sample message.',
+        'id': 'abc-124',
+        'timestamp': 1454030606,
+      },
+      {
+        'message': 'First sample message.',
+        'id': 'abc-123',
+        'timestamp': 1454030605,
+      }
+    ];
+    var query = ('/get_logs?' + testing_common.paramString(params));
+    testing_common.addXhrMock(query, JSON.stringify(mockResponse));
+    quickLog.initialize('A label', 'test_namespace', 'test_name');
+  }, testOptions);
+
+  test('logs added on initializing.', function() {
+    var quickLog = document.createElement('quick-log');
+    testing_common.addToFixture(quickLog);
+    var params = {
+      'namespace': 'test_namespace',
+      'name': 'test_name',
+      'size': 100,
+      'xsrf_token': 'undefined'
+    };
+    var mockResponse = [
+      {
+        'message': 'Second sample message.',
+        'id': 'abc-124',
+        'timestamp': 1454030606,
+      },
+      {
+        'message': 'First sample message.',
+        'id': 'abc-123',
+        'timestamp': 1454030605,
+      }
+    ];
+    var query = ('/get_logs?' + testing_common.paramString(params));
+    testing_common.addXhrMock(query, JSON.stringify(mockResponse));
+    quickLog.initialize('A label', 'test_namespace', 'test_name');
+
+    return new Promise(function(resolve) {
+      function check() {
+        assert.equal(2, quickLog.logList.length);
+        resolve();
+      }
+      setTimeout(check, 10);
+    });
+  }, testOptions);
+
+  test('new logs are added and duplicates are removed.', function() {
+    var quickLog = document.createElement('quick-log');
+    testing_common.addToFixture(quickLog);
+    quickLog.initialize('A label', 'test_namespace', 'test_name');
+
+    var params = {
+      'namespace': 'test_namespace',
+      'name': 'test_name',
+      'size': 100,
+      'xsrf_token': 'undefined',
+      'after_timestamp': '1454030606'
+    };
+    var sampleLogs = [
+      {
+        'message': 'Second message.',
+        'id': 'abc-124',
+        'timestamp': 1454030606,
+      },
+      {
+        'message': 'First message.',
+        'id': 'abc-123',
+        'timestamp': 1454030605,
+      }
+    ];
+    quickLog.updateLogs(sampleLogs);
+
+    var newSampleLogs = [
+      {
+        'message': 'Updated first message.',
+        'id': 'abc-123',
+        'timestamp': 1454030608,
+      },
+      {
+        'message': 'Third message.',
+        'id': 'abc-125',
+        'timestamp': 1454030607,
+      },
+    ];
+    var query = ('/get_logs?' + testing_common.paramString(params));
+    testing_common.addXhrMock(query, JSON.stringify(newSampleLogs));
+    quickLog.getLogs();
+
+    return new Promise(function(resolve) {
+      function check() {
+        assert.equal(3, quickLog.logList.length);
+        assert.equal('Updated first message.', quickLog.logList[0].message);
+        assert.equal('Third message.', quickLog.logList[1].message);
+        assert.equal('Second message.', quickLog.logList[2].message);
+        resolve();
+      }
+      setTimeout(check, 10);
+    });
+  }, testOptions);
+});
+</script>
diff --git a/catapult/dashboard/dashboard/elements/quick-log.html b/catapult/dashboard/dashboard/elements/quick-log.html
index fcf7236..3b42e67 100644
--- a/catapult/dashboard/dashboard/elements/quick-log.html
+++ b/catapult/dashboard/dashboard/elements/quick-log.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/components/core-icon-button/core-icon-button.html">
 
 <link rel="import" href="/dashboard/static/simple_xhr.html">
@@ -126,7 +133,7 @@
         text-overflow: ellipsis;
       }
 
-      /* Wraps text and also preverses line break.*/
+      /* Wraps text and also preserves line break.*/
       #logs td .message.expand pre {
         white-space: pre-line;
         position: static;
@@ -173,7 +180,7 @@
     'use strict';
     Polymer('quick-log', {
 
-      MAX_LOG_SIZE: 100,
+      MAX_LOG_REQUEST_SIZE: 100,
 
       /**
        * Custom element lifecycle callback, called once this element is ready.
@@ -210,11 +217,9 @@
        * Sends XMLHttpRequest to get logs.
        * @param {boolean} latest True to get the latest logs,
                           False to get older logs.
-       * @param {Number} size Number of logs to retrieve.
        */
-      getLogs: function(latest, size) {
+      getLogs: function(latest) {
         latest = ((latest == undefined) ? true : latest);
-        size = size || this.MAX_LOG_SIZE;
         if (this.xhr) {
           this.xhr.abort();
           this.xhr = null;
@@ -223,7 +228,7 @@
         var params = {
            namespace: this.logNamespace,
            name: this.logName,
-           size: size,
+           size: this.MAX_LOG_REQUEST_SIZE,
            xsrf_token: this.xsrfToken
         };
         if (this.logFilter) {
@@ -231,10 +236,10 @@
         }
         if (this.logList.length > 0) {
           if (latest) {
-            params['after_index'] = this.logList[0].index;
+            params['after_timestamp'] = this.logList[0].timestamp;
           } else {
-            var lastIndex = this.logList[this.logList.length - 1].index;
-            params['before_index'] = lastIndex;
+            var lastLog = this.logList[this.logList.length - 1];
+            params['before_timestamp'] = lastLog.timestamp;
           }
         }
         this.xhr = simple_xhr.send('/get_logs', params,
@@ -257,61 +262,64 @@
        * @param {Array.<Object>} newLogs Array of log objects.
        */
       updateLogs: function(newLogs) {
-        if (this.logList.length == 0) {
-          this.logList = newLogs.slice(0, this.MAX_LOG_SIZE);
-          this.insertLogRows(this.logList);
-          return;
+        var insertBefore = true;
+        if (this.logList.length) {
+          var lastTimestamp = newLogs[newLogs.length - 1].timestamp;
+          insertBefore = lastTimestamp >= this.logList[0].timestamp;
         }
-        var lastIndex = this.logList[this.logList.length - 1].index;
-        var newIndex = newLogs[0].index;
-        if (lastIndex >= newIndex) {
-          // Prepend new logs.
-          this.logList.push.apply(this.logList, newLogs);
-          this.insertLogRows(newLogs);
-        } else {
-          // Append new logs.
-          this.logList.unshift.apply(this.logList, newLogs);
-          this.insertLogRows(newLogs, true);
+
+        var table = this.$.logs;
+        if (insertBefore) {
+          newLogs.reverse();
         }
+        for (var i = 0; i < newLogs.length; i++) {
+          this.removeLog(table, newLogs[i]);
+          this.insertLog(table, newLogs[i], insertBefore);
+        }
+        this.updateHeight();
       },
 
       /**
-       * Inserts list of logs into HTML table.
-       * @param {Array.<Object>} logs Array of log objects.
+       * Inserts a log into HTML table.
+       * @param {Object} table Table HTML element.
+       * @param {Object} log A log object.
        * @param {boolean} insertBefore true to prepend, false to append.
        */
-      insertLogRows: function(logs, insertBefore) {
-        var table = this.$.logs;
-        for (var i = 0; i < logs.length; i++) {
-          var row = document.createElement('tr');
+      insertLog: function(table, log, insertBefore) {
+        if (insertBefore) {
+          this.logList.unshift(log);
+        } else {
+          this.logList.push(log);
+        }
+        var row = document.createElement('tr');
+        var expandTd = document.createElement('td');
+        row.appendChild(expandTd);
+        var span = document.createElement('span');
+        span.className = 'toggle-arrow arrow-right';
+        expandTd.appendChild(span);
 
-          var expandTd = document.createElement('td');
-          row.appendChild(expandTd);
-          var span = document.createElement('span');
-          span.className = 'toggle-arrow arrow-right';
-          expandTd.appendChild(span);
+        var td = document.createElement('td');
+        var messageDiv = document.createElement('div');
+        messageDiv.className = 'message';
+        row.appendChild(td);
+        td.appendChild(messageDiv);
+        messageDiv.innerHTML = '<pre>' + log.message + '</pre>';
+        span.onclick = this.onLogToggleClick.bind(this, messageDiv);
+        table.insertBefore(row, table.childNodes[0]);
+      },
 
-          var td = document.createElement('td');
-          var messageDiv = document.createElement('div');
-          messageDiv.className = 'message';
-          row.appendChild(td);
-          td.appendChild(messageDiv);
-          messageDiv.innerHTML = '<pre>' + logs[i].message + '</pre>';
-          span.onclick = this.onLogToggleClick.bind(this, messageDiv);
-
-          if (insertBefore) {
-            if (table.rows.length >= this.MAX_LOG_SIZE) {
-              table.deleteRow(table.rows.length - 1);
-            }
-            table.insertBefore(row, table.childNodes[0]);
-          } else {
-            if (table.rows.length >= this.MAX_LOG_SIZE) {
-              table.deleteRow(0);
-            }
-            table.appendChild(row);
+      /**
+       * Removes a log.
+       * @param {Object} table Table HTML element.
+       * @param {Object} log A log object.
+       */
+      removeLog: function(table, log) {
+        for (var i = 0; i < this.logList.length; i++) {
+          if (log.id == this.logList[i].id) {
+            this.logList.splice(i, 1);
+            table.deleteRow(i);
           }
         }
-        this.updateHeight();
       },
 
       /**
diff --git a/catapult/dashboard/dashboard/elements/report-container.html b/catapult/dashboard/dashboard/elements/report-container.html
deleted file mode 100644
index 86a6500..0000000
--- a/catapult/dashboard/dashboard/elements/report-container.html
+++ /dev/null
@@ -1,197 +0,0 @@
-<link rel="import" href="/dashboard/elements/test-picker.html">
-<link rel="import" href="/dashboard/static/uri.html">
-
-<polymer-element name="report-container" attributes="hasChart xsrfToken">
-  <template>
-    <style>
-      #nav-container {
-        display: flex;
-        margin: 5px;
-      }
-    </style>
-
-    <div id="nav-container">
-      <test-picker id="test-picker" xsrfToken="{{xsrfToken}}"></test-picker>
-    </div>
-
-  </template>
-
-  <script>
-    'use strict';
-    Polymer('report-container', {
-
-      ready: function() {
-        this.charts = [];
-        this.graphParams = {};
-
-        window.addEventListener('uriload', this.onUriLoad.bind(this));
-        this.uriController = new uri.Controller(this.getPageState.bind(this));
-        this.uriController.load();
-
-        window.addEventListener('pagestaterequest', this.onPageStateRequest);
-
-        this.testPicker = this.$['test-picker'];
-        this.testPicker.addEventListener(
-            'add', this.onAddChartButtonClicked.bind(this));
-      },
-
-      /**
-       * On 'uriload' event, adds charts from the current query parameters.
-       * @param {Object} event Event object.
-       */
-      onUriLoad: function(event) {
-        var params = event.detail.params;
-        var pageState = event.detail.state;
-        if (!pageState) {
-          return;
-        }
-        // Set page level parameters.
-        this.graphParams = {};
-        for (var key in params) {
-          this.graphParams[key] = params[key];
-        }
-
-        // Add charts.
-        var chartStates = pageState['charts'];
-        for (var i = 0; i < chartStates.length; i++) {
-          this.addChart(chartStates[i], false);
-        }
-      },
-
-      /**
-       * Adds a chart.
-       * @param {Array.<Array>} testPathAndSelected A list of two-element
-       *     Arrays, each containing a test path and selected series to plot.
-       * @param {boolean} isPrepend True for prepend, false for append.
-       */
-      addChart: function(testPathAndSelected, isPrepend) {
-        var container = document.getElementById('charts-container');
-        var chart = document.createElement('chart-container');
-        if (isPrepend) {
-          this.charts.unshift(chart);
-          container.insertBefore(chart, container.firstChild);
-        } else {
-          this.charts.push(chart);
-          container.appendChild(chart);
-        }
-
-        chart.addEventListener(
-            'chartclosed', this.onChartClosed.bind(this), true);
-        chart.addEventListener(
-            'chartstatechanged',
-            this.uriController.onPageStateChanged.bind(this.uriController));
-        chart.addEventListener(
-            'revisionrange', this.onRevisionRangeChanged.bind(this));
-
-        chart.revisionInfo = window['REVISION_INFO'];
-        chart.xsrfToken = this.xsrfToken;
-        chart.graphParams = this.graphParams;
-        chart.addSeriesGroup(testPathAndSelected, true);
-        this.testPicker.hasChart = true;
-      },
-
-      /**
-       * On chart closed, update URI.
-       */
-      onChartClosed: function(event) {
-        var chart = event.target;
-        var index = this.charts.indexOf(chart);
-        if (index > -1) {
-          this.charts.splice(index, 1);
-        }
-
-        this.firenNumChartChangedEvent();
-      },
-
-      /**
-       * Triggers page state change handler with 'numchartchanged' event.
-       */
-      firenNumChartChangedEvent: function() {
-        // Send page state change event.
-        var event = document.createEvent('Event');
-        event.initEvent('numchartchanged', true, true);
-        event.detail = {
-          'stateName': 'numchartchanged',
-          'params': this.graphParams,
-          'state': {}
-        };
-
-        if (this.charts.length == 0) {
-          event.detail['params'] = null;
-          this.graphParams = {};
-          this.testPicker.hasChart = false;
-        }
-
-        this.uriController.onPageStateChanged(event);
-      },
-
-      /**
-       * When the revision range changes for one graph, update the rest of
-       * the graphs and the URI.
-       */
-      onRevisionRangeChanged: function(event) {
-        for (var i = 0; i < this.charts.length; i++) {
-          var chart = this.charts[i];
-          if (chart == event.target) {
-            continue;
-          }
-          chart.onRevisionRange(event, event['detail'], null);
-        }
-      },
-
-      /**
-       * On 'Add' button clicked, add a chart for the current selection.
-       */
-      onAddChartButtonClicked: function(event) {
-        var selection = this.testPicker.getCurrentSelection();
-        if (selection && selection.isValid()) {
-          this.addChart(selection.getTestPathAndSelectedSeries(), true);
-        }
-        this.firenNumChartChangedEvent();
-      },
-
-      /**
-       * Gets report page state.
-       *
-       * @return {Object} Dictionary of page state data.
-       */
-      getPageState: function() {
-        var chartStates = [];
-        for (var i = 0; i < this.charts.length; i++) {
-          var chart = this.charts[i];
-          chartStates.push(chart.getState());
-        }
-
-        if (chartStates.length === 0) {
-          return null;
-        }
-
-        return {
-          'charts': chartStates
-        };
-      },
-
-      /**
-       * Handles displaying loading messages on 'pagestaterequest' event.
-       */
-      onPageStateRequest: function(event) {
-        var status = event.detail.status;
-        var messageBar = document.getElementById('message-bar');
-        var messageConfig = {
-          'autoCloseDisabled': true,
-          'duration': 0,
-          'delay': 200
-        };
-        if (status == 'loading') {
-          messageBar.updateContent('Saving report...', messageConfig);
-        } else if (status == 'complete') {
-          messageBar['hide']();
-        } else if (status == 'error') {
-          messageBar.updateContent(
-              '<span style="color: red;">Failed to save report</span>',
-              messageConfig);
-        }
-      }
-    });
-  </script>
-</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/report-page.html b/catapult/dashboard/dashboard/elements/report-page.html
new file mode 100644
index 0000000..45e9e03
--- /dev/null
+++ b/catapult/dashboard/dashboard/elements/report-page.html
@@ -0,0 +1,257 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/dashboard/elements/chart-container.html">
+<link rel="import" href="/dashboard/elements/login-warning.html">
+<link rel="import" href="/dashboard/elements/overlay-message.html">
+<link rel="import" href="/dashboard/elements/test-picker.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
+<link rel="import" href="/dashboard/static/uri.html">
+
+<polymer-element name="report-page" attributes="hasChart charts">
+  <template>
+    <style>
+      #nav-container {
+        display: flex;
+        margin: 5px;
+      }
+      overlay-message {
+        top: 0px;
+      }
+    </style>
+    <template if="{{warningMessage}}">
+      <overlay-message id="warning-message"
+                       opened="true"
+                       autoCloseDisabled
+                       duration="-1">
+      {{warningMessage}}
+      <template if="{{warningBug}}">
+        <a href="https://github.com/catapult-project/catapult/issues/{{warningBug}}">See
+          bug #{{warningBug}}.</a>
+      </template>
+      </overlay-message>
+    </template>
+
+    <login-warning id="login-warning" loginLink="{{loginUrl}}"
+                   hidden?="{{isInternalUser}}">
+    </login-warning>
+
+    <div id="nav-container">
+      <test-picker id="test-picker" xsrfToken="{{xsrfToken}}"
+                   testSuites="{{testSuites}}"></test-picker>
+    </div>
+    <section id="charts-container"></section>
+
+  </template>
+
+  <script>
+    'use strict';
+    Polymer('report-page', {
+
+      ready: function() {
+        simple_xhr.send('/report', uri.getAllParameters(),
+          function(response) {
+            this.isInternalUser = response['is_internal_user'];
+            this.loginUrl = response['login_url'];
+            this.revisionInfo = response['revision_info'];
+            this.warningBug = response['warning_bug'];
+            this.warningMessage = response['warning_message'];
+            this.xsrfToken = response['xsrf_token'];
+            this.testSuites = response['test_suites'];
+            for (var i = 0; i < this.charts.length; i++) {
+              this.setChartData(this.charts[i]);
+            }
+          }.bind(this),
+          function(error) {
+
+          }.bind(this));
+        this.charts = [];
+        this.graphParams = {};
+
+        window.addEventListener('uriload', this.onUriLoad.bind(this));
+        this.uriController = new uri.Controller(this.getPageState.bind(this));
+        this.uriController.load();
+
+        window.addEventListener('pagestaterequest',
+                                this.onPageStateRequest.bind(this));
+
+        this.testPicker = this.$['test-picker'];
+        this.testPicker.addEventListener(
+            'add', this.onAddChartButtonClicked.bind(this));
+      },
+
+      /**
+       * On 'uriload' event, adds charts from the current query parameters.
+       * @param {Object} event Event object.
+       */
+      onUriLoad: function(event) {
+        var params = event.detail.params;
+        var pageState = event.detail.state;
+        if (!pageState) {
+          return;
+        }
+        // Set page level parameters.
+        this.graphParams = {};
+        for (var key in params) {
+          this.graphParams[key] = params[key];
+        }
+
+        // Add charts.
+        var chartStates = pageState['charts'];
+        for (var i = 0; i < chartStates.length; i++) {
+          this.addChart(chartStates[i], false);
+        }
+      },
+
+      /**
+       * Updates chart data with member variables.
+       * TODO(sullivan): this should be done with polymer templates, not
+       * JS code.
+       */
+      setChartData: function(chart) {
+        chart.isInternalUser = this.isInternalUser;
+        chart.testSuites = this.testSuites;
+        chart.revisionInfo = this.revisionInfo;
+        chart.xsrfToken = this.xsrfToken;
+        chart.graphParams = this.graphParams;
+      },
+
+      /**
+       * Adds a chart.
+       * @param {Array.<Array>} testPathAndSelected A list of two-element
+       *     Arrays, each containing a test path and selected series to plot.
+       * @param {boolean} isPrepend True for prepend, false for append.
+       */
+      addChart: function(testPathAndSelected, isPrepend) {
+        // TODO(sullivan): This should be done with a polymer template, not
+        // JavaScript-built DOM!!
+        var container = this.$['charts-container'];
+        var chart = document.createElement('chart-container');
+        if (isPrepend) {
+          this.charts.unshift(chart);
+          container.insertBefore(chart, container.firstChild);
+        } else {
+          this.charts.push(chart);
+          container.appendChild(chart);
+        }
+
+        chart.addEventListener(
+            'chartclosed', this.onChartClosed.bind(this), true);
+        chart.addEventListener(
+            'chartstatechanged',
+            this.uriController.onPageStateChanged.bind(this.uriController));
+        chart.addEventListener(
+            'revisionrange', this.onRevisionRangeChanged.bind(this));
+        this.setChartData(chart);
+        chart.addSeriesGroup(testPathAndSelected, true);
+        this.testPicker.hasChart = true;
+      },
+
+      /**
+       * On chart closed, update URI.
+       */
+      onChartClosed: function(event) {
+        var chart = event.target;
+        var index = this.charts.indexOf(chart);
+        if (index > -1) {
+          this.charts.splice(index, 1);
+        }
+
+        this.fireNumChartChangedEvent();
+      },
+
+      /**
+       * Triggers page state change handler with 'numchartchanged' event.
+       */
+      fireNumChartChangedEvent: function() {
+        // Send page state change event.
+        var event = document.createEvent('Event');
+        event.initEvent('numchartchanged', true, true);
+        event.detail = {
+          'stateName': 'numchartchanged',
+          'params': this.graphParams,
+          'state': {}
+        };
+
+        if (this.charts.length == 0) {
+          event.detail['params'] = null;
+          this.graphParams = {};
+          this.testPicker.hasChart = false;
+        }
+
+        this.uriController.onPageStateChanged(event);
+      },
+
+      /**
+       * When the revision range changes for one graph, update the rest of
+       * the graphs and the URI.
+       */
+      onRevisionRangeChanged: function(event) {
+        for (var i = 0; i < this.charts.length; i++) {
+          var chart = this.charts[i];
+          if (chart == event.target) {
+            continue;
+          }
+          chart.onRevisionRange(event, event['detail'], null);
+        }
+      },
+
+      /**
+       * On 'Add' button clicked, add a chart for the current selection.
+       */
+      onAddChartButtonClicked: function(event) {
+        var selection = this.testPicker.getCurrentSelection();
+        if (selection && selection.isValid()) {
+          this.addChart(selection.getTestPathAndSelectedSeries(), true);
+        }
+        this.fireNumChartChangedEvent();
+      },
+
+      /**
+       * Gets report page state.
+       *
+       * @return {Object} Dictionary of page state data.
+       */
+      getPageState: function() {
+        var chartStates = [];
+        for (var i = 0; i < this.charts.length; i++) {
+          var chart = this.charts[i];
+          chartStates.push(chart.getState());
+        }
+
+        if (chartStates.length === 0) {
+          return null;
+        }
+
+        return {
+          'charts': chartStates
+        };
+      },
+
+      /**
+       * Handles displaying loading messages on 'pagestaterequest' event.
+       */
+      onPageStateRequest: function(event) {
+        var status = event.detail.status;
+        var messageBar = document.getElementById('message-bar');
+        var messageConfig = {
+          'autoCloseDisabled': true,
+          'duration': 0,
+          'delay': 200
+        };
+        if (status == 'loading') {
+          messageBar.updateContent('Saving report...', messageConfig);
+        } else if (status == 'complete') {
+          messageBar['hide']();
+        } else if (status == 'error') {
+          messageBar.updateContent(
+              '<span style="color: red;">Failed to save report</span>',
+              messageConfig);
+        }
+      }
+    });
+  </script>
+</polymer-element>
diff --git a/catapult/dashboard/dashboard/elements/revision-range-test.html b/catapult/dashboard/dashboard/elements/revision-range-test.html
index 36a2123..f93e6b4 100644
--- a/catapult/dashboard/dashboard/elements/revision-range-test.html
+++ b/catapult/dashboard/dashboard/elements/revision-range-test.html
@@ -1,13 +1,14 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/revision-range.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
diff --git a/catapult/dashboard/dashboard/elements/revision-range.html b/catapult/dashboard/dashboard/elements/revision-range.html
index 34a86d6..3d5c6f6 100644
--- a/catapult/dashboard/dashboard/elements/revision-range.html
+++ b/catapult/dashboard/dashboard/elements/revision-range.html
@@ -1,9 +1,16 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The revision-range element is an in-line element that shows a revision range.
 TODO(qyearsley) Expand this element to take information about revision types
 (including changelog viewer links, revision type names, etc.) in order
 to allow it to show links).
 -->
+
 <polymer-element name="revision-range" attributes="start end">
   <template>
     <span>{{displayRevisionRange}}</span>
diff --git a/catapult/dashboard/dashboard/elements/select-menu-test.html b/catapult/dashboard/dashboard/elements/select-menu-test.html
index 9d71c8d..959fad4 100644
--- a/catapult/dashboard/dashboard/elements/select-menu-test.html
+++ b/catapult/dashboard/dashboard/elements/select-menu-test.html
@@ -1,13 +1,14 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/select-menu.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
diff --git a/catapult/dashboard/dashboard/elements/select-menu.html b/catapult/dashboard/dashboard/elements/select-menu.html
index 505cd5e..eeb2b45 100644
--- a/catapult/dashboard/dashboard/elements/select-menu.html
+++ b/catapult/dashboard/dashboard/elements/select-menu.html
@@ -1,6 +1,13 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/components/core-menu/core-menu.html">
 <link rel="import" href="/components/paper-dropdown-menu/paper-dropdown-menu.html">
 <link rel="import" href="/components/paper-dropdown/paper-dropdown.html">
-<link rel="import" href="/components/core-menu/core-menu.html">
 <link rel="import" href="/components/paper-item/paper-item.html">
 
 <polymer-element name="select-menu" attributes="menuItems">
diff --git a/catapult/dashboard/dashboard/elements/test-picker-test.html b/catapult/dashboard/dashboard/elements/test-picker-test.html
index cdea191..1630436 100644
--- a/catapult/dashboard/dashboard/elements/test-picker-test.html
+++ b/catapult/dashboard/dashboard/elements/test-picker-test.html
@@ -1,13 +1,14 @@
-<!doctype html>
+<!DOCTYPE html>
 <!--
 Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/core/test_utils.html">
 
 <link rel="import" href="/dashboard/elements/test-picker.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
@@ -15,31 +16,27 @@
 
   var testOptions = {
     setUp: function() {
-      // Test suites dict keys 'mas', 'mon', 'dep', and 'des' are
-      // abbreviations for 'masters', 'monitored', 'deprecated', and
-      // 'description', respectively.
-      window.TEST_SUITES = {
-        'endure': {
-          'mas': {'Chromium': {'mac': false, 'win7': true}},
-          'dep': true
-        },
-        'dromaeo': {
-          'mas': {'Chromium': {'mac': false, 'win7': false}}
-        },
-        'scrolling': {
-          'mas': {'Chromium': {'mac': false, 'win7': false}},
-          'mon': ['average/www.yahoo.com']
-        }
-      };
     },
 
     tearDown: function() {
-      delete window.TEST_SUITES;
     }
   };
 
   test('getSuiteItems', function() {
     var testPicker = document.createElement('test-picker');
+    testPicker.testSuites = {
+      'endure': {
+        'mas': {'Chromium': {'mac': false, 'win7': true}},
+        'dep': true
+      },
+      'dromaeo': {
+        'mas': {'Chromium': {'mac': false, 'win7': false}}
+      },
+      'scrolling': {
+        'mas': {'Chromium': {'mac': false, 'win7': false}},
+        'mon': ['average/www.yahoo.com']
+      }
+    };
     var suiteItems = testPicker.getSuiteItems();
     // Test suites should be in the order of monitored, unmonitored,
     // and deprecated.
diff --git a/catapult/dashboard/dashboard/elements/test-picker.html b/catapult/dashboard/dashboard/elements/test-picker.html
index 5efeb05..fc9771d 100644
--- a/catapult/dashboard/dashboard/elements/test-picker.html
+++ b/catapult/dashboard/dashboard/elements/test-picker.html
@@ -1,3 +1,10 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
 <link rel="import" href="/components/core-icon-button/core-icon-button.html">
 <link rel="import" href="/components/paper-button/paper-button.html">
 <link rel="import" href="/components/paper-progress/paper-progress.html">
@@ -6,7 +13,7 @@
 <link rel="import" href="/dashboard/static/simple_xhr.html">
 <link rel="import" href="/dashboard/static/testselection.html">
 
-<polymer-element name="test-picker" attributes="hasChart xsrfToken">
+<polymer-element name="test-picker" attributes="hasChart xsrfToken testSuites">
   <template>
     <style>
       #container * {
@@ -104,7 +111,6 @@
       UNMONITORED_TAG: 'unmonitored',
 
       ready: function() {
-        this.TEST_SUITES = window['TEST_SUITES'];
         this.pageStateLoading = true;
         this.hasChart = false;
         this.enableAddSeries = false;
@@ -123,6 +129,11 @@
         ];
       },
 
+      testSuitesChanged: function() {
+        this.selectionModels[0].datalist = this.getSuiteItems();
+        this.getSelectionMenu(0).setDataList(this.selectionModels[0].datalist);
+      },
+
       /**
        * Gets a list of menu items for test suites.
        */
@@ -132,17 +143,17 @@
         var isUnmonitored = function(suite) {
           // A suite is considered unmonitored if there are no monitored
           // subtests.
-          var suiteInfo = self.TEST_SUITES[suite];
+          var suiteInfo = self.testSuites[suite];
           return !suiteInfo['mon'] || suiteInfo['mon'].length == 0;
         };
 
-        for (var suite in this.TEST_SUITES) {
+        for (var suite in this.testSuites) {
           var suiteItem = {
             name: suite
           };
 
           var tags = [];
-          if (this.TEST_SUITES[suite]['dep']) {
+          if (this.testSuites[suite]['dep']) {
             suiteItem.deprecated = true;
             tags.push(this.DEPRECATED_TAG);
           }
@@ -163,10 +174,10 @@
        */
       getBotItems: function() {
         var suite = this.getSelectionMenu(0).value;
-        if (!this.TEST_SUITES[suite]) {
+        if (!this.testSuites[suite]) {
           return [];
         }
-        var dict = this.TEST_SUITES[suite]['mas'];
+        var dict = this.testSuites[suite]['mas'];
         var botMenuItems = [];
         var masters = Object.keys(dict).sort();
         for (var i = 0; i < masters.length; i++) {
@@ -212,12 +223,12 @@
         // Display the test suite description if there is one.
         var descriptionElement = this.$['suite-description'];
         var suite = this.getSelectionMenu(0).value;
-        if (!suite) {
+        if (!suite || !this.testSuites[suite]) {
           descriptionElement.innerHTML = '';
           return;
         }
 
-        var description = this.TEST_SUITES[suite]['des'];
+        var description = this.testSuites[suite]['des'];
         if (description) {
           var descriptionHTML = '<b>' + suite + '</b>: ';
           descriptionHTML += this.convertMarkdownLinks(description);
@@ -411,7 +422,7 @@
         }
 
         var suite = this.getSelectionMenu(0).getSelectedItems()[0];
-        var selection = new testselection.TestSelection();
+        var selection = new testselection.TestSelection(this.testSuites);
         var testPathAndSelected = this.addTestPathFromSubtestDict(
             this.subtestDict, suite.name, 2);
         var bots = this.getCheckedBots();
diff --git a/catapult/dashboard/dashboard/elements/tooltip-test-description.html b/catapult/dashboard/dashboard/elements/tooltip-test-description.html
index 50ee563..32fe01f 100644
--- a/catapult/dashboard/dashboard/elements/tooltip-test-description.html
+++ b/catapult/dashboard/dashboard/elements/tooltip-test-description.html
@@ -1,3 +1,9 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 Test description shown in chart-legend's tooltip.
 -->
diff --git a/catapult/dashboard/dashboard/elements/trace-button.html b/catapult/dashboard/dashboard/elements/trace-button.html
index 5882074..51f9685 100644
--- a/catapult/dashboard/dashboard/elements/trace-button.html
+++ b/catapult/dashboard/dashboard/elements/trace-button.html
@@ -1,7 +1,19 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<!--
+Most of this code is duplicated in bisect-form.
+TODO(qyearsley): This should be resolved for better code health.
+See https://github.com/catapult-project/catapult/issues/1905
+-->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 
 <link rel="import" href="/dashboard/elements/trace-form.html">
-<link rel="import" href="/dashboard/static/bisect_utils.html">
+<link rel="import" href="/dashboard/static/simple_xhr.html">
 
 <polymer-element name="trace-button" attributes="traceInfo bugId xsrfToken">
   <template>
@@ -58,13 +70,25 @@
          * Updates the canTrace state based on the traceInfo state.
          */
         update: function() {
+          this.canTrace = false;
           if (!this.traceInfo) {
-            this.canTrace = false;
             return;
           }
-          var testPath = this.traceInfo.testPath;
-          var rev = this.traceInfo.badRev;
-          this.canTrace = bisect_utils.canBisect(testPath, rev);
+          var that = this;
+          simple_xhr.send(
+              '/can_bisect',
+              {
+                'test_path': this.traceInfo.testPath,
+                'start_revision': this.traceInfo.goodRev,
+                'end_revision': this.traceInfo.badRev,
+              },
+              function loadCallback(responseBool) {
+                that.canTrace = responseBool;
+              },
+              function errorCallback(message) {
+                console.warn('Request to /can_bisect failed.', message);
+                that.canTrace = true;
+              });
         },
 
         /**
diff --git a/catapult/dashboard/dashboard/elements/trace-form.html b/catapult/dashboard/dashboard/elements/trace-form.html
index 7b39bc5..f93defa 100644
--- a/catapult/dashboard/dashboard/elements/trace-form.html
+++ b/catapult/dashboard/dashboard/elements/trace-form.html
@@ -1,7 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The trace-form element includes the form for all of the different stages of
 the trace process after the user clicks on the trace button.
 -->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 <link rel="import" href="/components/paper-dialog/paper-action-dialog.html">
 <link rel="import" href="/components/paper-spinner/paper-spinner.html">
diff --git a/catapult/dashboard/dashboard/elements/triage-dialog.html b/catapult/dashboard/dashboard/elements/triage-dialog.html
index e7d9c01..d14bc20 100644
--- a/catapult/dashboard/dashboard/elements/triage-dialog.html
+++ b/catapult/dashboard/dashboard/elements/triage-dialog.html
@@ -1,7 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <!--
 The triage-dialog element is the dialog box that is shown when a user clicks
 on an alert, or clicks on a "triage" button on the alerts page. It allows the
 -->
+
 <link rel="import" href="/components/paper-button/paper-button.html">
 <link rel="import" href="/components/paper-dialog/paper-action-dialog.html">
 
@@ -99,6 +106,11 @@
        * Called when the triage-dialog element is ready.
        */
       ready: function() {
+        // This allows tooltip to show beyond the current window size.
+        // See chart-tooltip.html for more details.
+        this.$.container.sizingTarget = document.querySelector(
+            'html /deep/ paper-action-dialog::shadow #scroller');
+
         this.bugWindow = null;
         this.close();
         // Event listener for the message event.
diff --git a/catapult/dashboard/dashboard/email_summary.py b/catapult/dashboard/dashboard/email_summary.py
index c9dc889..6faab51 100644
--- a/catapult/dashboard/dashboard/email_summary.py
+++ b/catapult/dashboard/dashboard/email_summary.py
@@ -79,7 +79,7 @@
 
 
 def _MaximalRevisionRange(anomalies):
-  """Get the lowest start and highest end revision for |anomalies|."""
+  """Gets the lowest start and highest end revision for |anomalies|."""
   lowest_revision = sys.maxint
   highest_revision = 1
   for anomaly_entity in anomalies:
diff --git a/catapult/dashboard/dashboard/email_template.py b/catapult/dashboard/dashboard/email_template.py
index bec6390..fa7bd8a 100644
--- a/catapult/dashboard/dashboard/email_template.py
+++ b/catapult/dashboard/dashboard/email_template.py
@@ -62,6 +62,7 @@
 _PERF_TRY_EMAIL_HTML_BODY = """
 Perf Try Job %(status)s
 <br><br>
+%(warnings)s
 A Perf Try Job was submitted on %(bot)s at
 <a href="%(perf_url)s">%(perf_url)s</a>.<br>
 <table cellpadding='4'>
@@ -75,7 +76,7 @@
 
 _PERF_TRY_EMAIL_TEXT_BODY = """
 Perf Try Job %(status)s
-
+%(warnings)s
 Bot: %(bot)s
 Test: %(command)s
 Revision Range:%(start)s - %(end)s
@@ -92,16 +93,18 @@
     'Bisect FYI Try Job Failed on %(bot)s for %(test_name)s.')
 
 _BISECT_FYI_EMAIL_HTML_BODY = """
-Bisect FYI Try Job Failed
+<font color="red"><b>Bisect FYI Try Job Failed</b></font>
+<br><br>
+%(message)s
 <br><br>
 A Bisect FYI Try Job for %(test_name)s was submitted on %(bot)s at
 <a href="%(job_url)s">%(job_url)s</a>.<br>
 <table cellpadding='4'>
   <tr><td>Bot:</td><td><b>%(bot)s</b></td>
   <tr><td>Test Case:</td><td><b>%(test_name)s</b></td>
-  <tr><td>Bisect Config:</td><td><b>%(config)s</b></td>
-  <tr><td>Error Details:</td><td><b>%(errors)s</b></td>
-  <tr><td>Bisect Results:</td><td><b>%(results)s</b></td>
+  <tr><td>Bisect Config:</td><td><b><pre>%(config)s</pre></b></td>
+  <tr><td>Error Details:</td><td><b><pre>%(errors)s</pre></b></td>
+  <tr><td>Bisect Results:</td><td><b><pre>%(results)s</pre></b></td>
 </table>
 """
 
@@ -119,35 +122,36 @@
 """
 
 
-def GetPerfTryJobEmail(perf_results):
+def GetPerfTryJobEmailReport(try_job_entity):
   """Gets the contents of the email to send once a perf try job completes."""
-  if perf_results['status'] == 'Completed':
+  results_data = try_job_entity.results_data
+  config = try_job_entity.GetConfigDict()
+  if results_data['status'] == 'completed':
     profiler_html_links = ''
     profiler_text_links = ''
-    for title, link in perf_results['profiler_results']:
-      profiler_html_links += _PERF_PROFILER_HTML_ROW % {'title': title,
-                                                        'link': link}
-      profiler_text_links += _PERF_PROFILER_TEXT_ROW % {'title': title,
-                                                        'link': link}
+    for link_dict in results_data['profiler_links']:
+      profiler_html_links += _PERF_PROFILER_HTML_ROW % link_dict
+      profiler_text_links += _PERF_PROFILER_TEXT_ROW % link_dict
     subject_dict = {
-        'status': 'Success', 'bot': perf_results['bisect_bot'],
-        'start': perf_results['config']['good_revision'],
-        'end': perf_results['config']['bad_revision']
+        'status': 'Success', 'bot': results_data['bisect_bot'],
+        'start': config['good_revision'],
+        'end': config['bad_revision']
     }
     html_dict = {
         'status': 'SUCCESS',
-        'bot': perf_results['bisect_bot'],
-        'perf_url': perf_results['buildbot_log_url'],
-        'command': perf_results['config']['command'],
-        'start': perf_results['config']['good_revision'],
-        'end': perf_results['config']['bad_revision'],
-        'html_results': perf_results['html_results'],
-        'profiler_results': profiler_html_links
+        'bot': results_data['bisect_bot'],
+        'perf_url': results_data['buildbot_log_url'],
+        'command': config['command'],
+        'start': config['good_revision'],
+        'end': config['bad_revision'],
+        'html_results': results_data['cloud_link'],
+        'profiler_results': profiler_html_links,
     }
+    if results_data.get('warnings'):
+      html_dict['warnings'] = ','.join(results_data['warnings'])
     text_dict = html_dict.copy()
     text_dict['profiler_results'] = profiler_text_links
-  elif perf_results['status'] == 'Failure':
-    config = perf_results.get('config')
+  elif results_data['status'] == 'failed':
     if not config:
       config = {
           'good_revision': '?',
@@ -155,18 +159,18 @@
           'command': '?',
       }
     subject_dict = {
-        'status': 'Failure', 'bot': perf_results['bisect_bot'],
+        'status': 'Failure', 'bot': results_data['bisect_bot'],
         'start': config['good_revision'],
         'end': config['bad_revision']
     }
     html_dict = {
         'status': 'FAILURE',
-        'bot': perf_results['bisect_bot'],
-        'perf_url': perf_results['buildbot_log_url'],
+        'bot': results_data['bisect_bot'],
+        'perf_url': results_data['buildbot_log_url'],
         'command': config['command'],
         'start': config['good_revision'],
         'end': config['bad_revision'],
-        'html_results': '', 'profiler_results': ''
+        'html_results': '', 'profiler_results': '',
     }
     text_dict = html_dict
   else:
@@ -312,26 +316,24 @@
   return results
 
 
-def GetBisectFYITryJobEmail(job, test_results):
+def GetBisectFYITryJobEmailReport(job, message):
   """Gets the contents of the email to send once a bisect FYI job completes."""
-  if test_results['status'] != 'Completed':
-    subject_dict = {
-        'bot': test_results['bisect_bot'],
-        'test_name': job.job_name
-    }
-    html_dict = {
-        'bot': test_results['bisect_bot'],
-        'job_url': test_results['buildbot_log_url'],
-        'test_name': job.job_name,
-        'config': job.config if job.config else 'Undefined',
-        'errors': test_results.get('errors'),
-        'results': test_results.get('results'),
-    }
-    text_dict = html_dict
-  else:
-    return None
+  results_data = job.results_data
+  subject_dict = {
+      'bot': results_data['bisect_bot'],
+      'test_name': job.job_name,
+  }
+  report_dict = {
+      'message': message,
+      'bot': results_data['bisect_bot'],
+      'job_url': results_data['buildbot_log_url'],
+      'test_name': job.job_name,
+      'config': job.config if job.config else 'Undefined',
+      'errors': results_data.get('errors'),
+      'results': results_data.get('results'),
+  }
 
-  html = _BISECT_FYI_EMAIL_HTML_BODY % html_dict
-  text = _BISECT_FYI_EMAIL_TEXT_BODY % text_dict
+  html = _BISECT_FYI_EMAIL_HTML_BODY % report_dict
+  text = _BISECT_FYI_EMAIL_TEXT_BODY % report_dict
   subject = _BISECT_FYI_EMAIL_SUBJECT % subject_dict
   return {'subject': subject, 'html': html, 'body': text}
diff --git a/catapult/dashboard/dashboard/embed.py b/catapult/dashboard/dashboard/embed.py
deleted file mode 100644
index 3b0b6dc..0000000
--- a/catapult/dashboard/dashboard/embed.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""URL endpoint to show a chart with minimal UI."""
-
-from google.appengine.api import users
-
-from dashboard import chart_handler
-
-
-class EmbedHandler(chart_handler.ChartHandler):
-
-  def get(self):
-    """Renders the UI for a simple, embeddable chart.
-
-    Request parameters:
-      masters: Comma-separated list of master names.
-      bots: Comma-separated list of bot names.
-      tests: Comma-separated list of of slash-separated test paths
-          (without master/bot).
-      rev: Revision number (optional).
-      num_points: Number of points to plot (optional).
-      start_rev: Starting evision number (optional).
-      end_rev: Ending revision number (optional).
-
-    Outputs:
-      An HTML page with a chart.
-    """
-    # TODO(qyearsley): Re-enable embed page. http://crbug.com/521756.
-    self.response.out.write(
-        'The embed page is temporarily disabled, see http://crbug.com/521756.')
diff --git a/catapult/dashboard/dashboard/file_bug.py b/catapult/dashboard/dashboard/file_bug.py
index c1fb202..86cf431 100644
--- a/catapult/dashboard/dashboard/file_bug.py
+++ b/catapult/dashboard/dashboard/file_bug.py
@@ -34,10 +34,10 @@
   """Uses oauth2 to file a new bug with a set of alerts."""
 
   def post(self):
-    """Make all the functions available via POST as well as GET."""
+    """A POST request for this endpoint is the same as a GET request."""
     self.get()
 
-  @oauth2_decorator.decorator.oauth_required
+  @oauth2_decorator.DECORATOR.oauth_required
   def get(self):
     """Either shows the form to file a bug, or if filled in, files the bug.
 
@@ -54,13 +54,21 @@
       HTML, using the template 'bug_result.html'.
     """
     if not utils.IsValidSheriffUser():
-      user = users.get_current_user()
-      self.ReportError('User "%s" not authorized.' % user, status=403)
+      # TODO(qyearsley): Simplify this message (after a couple months).
+      self.RenderHtml('bug_result.html', {
+          'error': ('You must be logged in with a chromium.org account '
+                    'in order to file bugs here! This is the case ever '
+                    'since we switched to the Monorail issue tracker. '
+                    'Note, viewing internal data should work for Googlers '
+                    'that are logged in with the Chromium accounts. See '
+                    'https://github.com/catapult-project/catapult/issues/2042')
+      })
       return
 
     summary = self.request.get('summary')
     description = self.request.get('description')
     labels = self.request.get_all('label')
+    components = self.request.get_all('component')
     keys = self.request.get('keys')
     if not keys:
       self.RenderHtml('bug_result.html', {
@@ -69,7 +77,7 @@
       return
 
     if self.request.get('finish'):
-      self._CreateBug(summary, description, labels, keys)
+      self._CreateBug(summary, description, labels, components, keys)
     else:
       self._ShowBugDialog(summary, description, keys)
 
@@ -81,29 +89,26 @@
       description: The default bug description string.
       urlsafe_keys: Comma-separated Alert keys in urlsafe format.
     """
-    # Fill in the owner field with the logged in user's email. For convenience,
-    # if it's a @google.com account, swap @google.com with @chromium.org.
-    user_email = users.get_current_user().email()
-    if user_email.endswith('@google.com'):
-      user_email = user_email.replace('@google.com', '@chromium.org')
     alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')]
-    labels = _FetchLabels(alert_keys)
+    labels, components = _FetchLabelsAndComponents(alert_keys)
     self.RenderHtml('bug_result.html', {
         'bug_create_form': True,
         'keys': urlsafe_keys,
         'summary': summary,
         'description': description,
         'labels': labels,
-        'owner': user_email,
+        'components': components,
+        'owner': users.get_current_user(),
     })
 
-  def _CreateBug(self, summary, description, labels, urlsafe_keys):
+  def _CreateBug(self, summary, description, labels, components, urlsafe_keys):
     """Creates a bug, associates it with the alerts, sends a HTML response.
 
     Args:
       summary: The new bug summary string.
       description: The new bug description string.
       labels: List of label strings for the new bug.
+      components: List of component strings for the new bug.
       urlsafe_keys: Comma-separated alert keys in urlsafe format.
     """
     alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')]
@@ -124,9 +129,11 @@
       })
       return
 
-    http = oauth2_decorator.decorator.http()
+    http = oauth2_decorator.DECORATOR.http()
     service = issue_tracker_service.IssueTrackerService(http=http)
-    bug_id = service.NewBug(summary, description, labels=labels, owner=owner)
+
+    bug_id = service.NewBug(
+        summary, description, labels=labels, components=components, owner=owner)
     if not bug_id:
       self.RenderHtml('bug_result.html', {'error': 'Error creating bug!'})
       return
@@ -136,7 +143,8 @@
       alert_entity.bug_id = bug_id
     ndb.put_multi(alerts)
 
-    self._AddAdditionalDetailsToBug(bug_id, alerts)
+    comment_body = _AdditionalDetails(bug_id, alerts)
+    service.AddBugComment(bug_id, comment_body)
 
     template_params = {'bug_id': bug_id}
     if all(k.kind() == 'Anomaly' for k in alert_keys):
@@ -147,32 +155,21 @@
         template_params.update(bisect_result)
     self.RenderHtml('bug_result.html', template_params)
 
-  def _AddAdditionalDetailsToBug(self, bug_id, alerts):
-    """Adds additional data to the bug as a comment.
 
-    Adds the link to /group_report and bug_id as well as the names of the bots
-    that triggered the alerts, and a milestone label.
-
-    Args:
-      bug_id: Bug ID number.
-      alerts: The Alert entities being associated with this bug.
-    """
-    base_url = '%s/group_report' % _GetServerURL()
-    bug_page_url = '%s?bug_id=%s' % (base_url, bug_id)
-    alerts_url = '%s?keys=%s' % (base_url, _UrlsafeKeys(alerts))
-    comment = 'All graphs for this bug:\n  %s\n\n' % bug_page_url
-    comment += 'Original alerts at time of bug-filing:\n  %s\n' % alerts_url
-
-    bot_names = alert.GetBotNamesFromAlerts(alerts)
-    if bot_names:
-      comment += '\n\nBot(s) for this bug\'s original alert(s):\n\n'
-      comment += '\n'.join(sorted(bot_names))
-    else:
-      comment += '\nCould not extract bot names from the list of alerts.'
-
-    http = oauth2_decorator.decorator.http()
-    service = issue_tracker_service.IssueTrackerService(http=http)
-    service.AddBugComment(bug_id, comment)
+def _AdditionalDetails(bug_id, alerts):
+  """Returns a message with additional information to add to a bug."""
+  base_url = '%s/group_report' % _GetServerURL()
+  bug_page_url = '%s?bug_id=%s' % (base_url, bug_id)
+  alerts_url = '%s?keys=%s' % (base_url, _UrlsafeKeys(alerts))
+  comment = 'All graphs for this bug:\n  %s\n\n' % bug_page_url
+  comment += 'Original alerts at time of bug-filing:\n  %s\n' % alerts_url
+  bot_names = alert.GetBotNamesFromAlerts(alerts)
+  if bot_names:
+    comment += '\n\nBot(s) for this bug\'s original alert(s):\n\n'
+    comment += '\n'.join(sorted(bot_names))
+  else:
+    comment += '\nCould not extract bot names from the list of alerts.'
+  return comment
 
 
 def _GetServerURL():
@@ -183,9 +180,10 @@
   return ','.join(a.key.urlsafe() for a in alerts)
 
 
-def _FetchLabels(alert_keys):
-  """Fetches a list of bug labels for the given list of Alert keys."""
+def _FetchLabelsAndComponents(alert_keys):
+  """Fetches a list of bug labels and components for the given Alert keys."""
   labels = set(_DEFAULT_LABELS)
+  components = set()
   alerts = ndb.get_multi(alert_keys)
   if any(a.internal_only for a in alerts):
     # This is a Chrome-specific behavior, and should ideally be made
@@ -193,8 +191,13 @@
     # labels to add for internal bugs).
     labels.add('Restrict-View-Google')
   for test in {a.test for a in alerts}:
-    labels.update(bug_label_patterns.GetBugLabelsForTest(test))
-  return labels
+    labels_components = bug_label_patterns.GetBugLabelsForTest(test)
+    for item in labels_components:
+      if item.startswith('Cr-'):
+        components.add(item.replace('Cr-', '').replace('-', '>'))
+      else:
+        labels.add(item)
+  return labels, components
 
 
 def _MilestoneLabel(alerts):
diff --git a/catapult/dashboard/dashboard/file_bug_test.py b/catapult/dashboard/dashboard/file_bug_test.py
index f335cde..cd3e520 100644
--- a/catapult/dashboard/dashboard/file_bug_test.py
+++ b/catapult/dashboard/dashboard/file_bug_test.py
@@ -19,26 +19,54 @@
 from dashboard import testing_common
 from dashboard import utils
 from dashboard.models import anomaly
+from dashboard.models import bug_label_patterns
 from dashboard.models import sheriff
 
 
+class MockIssueTrackerService(object):
+  """A fake version of IssueTrackerService that saves call values."""
+
+  bug_id = 12345
+  new_bug_args = None
+  new_bug_kwargs = None
+  add_comment_args = None
+  add_comment_kwargs = None
+
+  def __init__(self, http=None):
+    pass
+
+  @classmethod
+  def NewBug(cls, *args, **kwargs):
+    cls.new_bug_args = args
+    cls.new_bug_kwargs = kwargs
+    return cls.bug_id
+
+  @classmethod
+  def AddBugComment(cls, *args, **kwargs):
+    cls.add_comment_args = args
+    cls.add_comment_kwargs = kwargs
+
+
 class FileBugTest(testing_common.TestCase):
 
   def setUp(self):
     super(FileBugTest, self).setUp()
     app = webapp2.WSGIApplication([('/file_bug', file_bug.FileBugHandler)])
     self.testapp = webtest.TestApp(app)
-    testing_common.SetSheriffDomains(['chromium.org', 'google.com'])
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetSheriffDomains(['chromium.org'])
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
     self.SetCurrentUser('foo@chromium.org')
-    # When requests are made to the issue tracker service (using the mock
-    # HTTP object in mock_oauth2_decorator), some data is expected,
-    # but not necessarily read.
-    mock_oauth2_decorator.HTTP_MOCK.data = '{"id": 123}'
+
+    # Add a fake issue tracker service that we can get call values from.
+    file_bug.issue_tracker_service = mock.MagicMock()
+    self.original_service = file_bug.issue_tracker_service.IssueTrackerService
+    self.service = MockIssueTrackerService
+    file_bug.issue_tracker_service.IssueTrackerService = self.service
 
   def tearDown(self):
     super(FileBugTest, self).tearDown()
-    mock_oauth2_decorator.MockOAuth2Decorator.past_bodies = []
+    file_bug.issue_tracker_service.IssueTrackerService = self.original_service
     self.UnsetCurrentUser()
 
   def _AddSampleAlerts(self):
@@ -84,7 +112,7 @@
     # If any of the alerts are marked as internal-only, which should happen
     # when the corresponding test is internal-only, then the create bug dialog
     # should suggest adding a Restrict-View-Google label.
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     alert_keys = self._AddSampleAlerts()
     anomaly_entity = alert_keys[0].get()
     anomaly_entity.internal_only = True
@@ -93,6 +121,18 @@
         '/file_bug?summary=s&description=d&keys=%s' % alert_keys[0].urlsafe())
     self.assertIn('Restrict-View-Google', response.body)
 
+  def testGet_SetsBugLabelsComponents(self):
+    self.SetCurrentUser('internal@chromium.org')
+    alert_keys = self._AddSampleAlerts()
+    bug_label_patterns.AddBugLabelPattern('label1-foo', '*/*/*/first_paint')
+    bug_label_patterns.AddBugLabelPattern('Cr-Performance-Blink',
+                                          '*/*/*/mean_frame_time')
+    response = self.testapp.get(
+        '/file_bug?summary=s&description=d&keys=%s,%s' % (
+            alert_keys[0].urlsafe(), alert_keys[1].urlsafe()))
+    self.assertIn('label1-foo', response.body)
+    self.assertIn('Performance&gt;Blink', response.body)
+
   @mock.patch(
       'google.appengine.api.app_identity.get_default_version_hostname',
       mock.MagicMock(return_value='chromeperf.appspot.com'))
@@ -111,6 +151,7 @@
             ('finish', 'true'),
             ('label', 'one'),
             ('label', 'two'),
+            ('component', 'Foo>Bar'),
         ])
     return response
 
@@ -125,7 +166,7 @@
     # parameter given, an issue will be created using the issue tracker
     # API, and the anomalies will be updated, and a response page will
     # be sent which indicates success.
-    mock_oauth2_decorator.HTTP_MOCK.data = '{"id": 277761}'
+    self.service.bug_id = 277761
     response = self._PostSampleBug()
 
     # The response page should have a bug number.
@@ -139,7 +180,7 @@
         self.assertIsNone(anomaly_entity.bug_id)
 
     # Two HTTP requests are made when filing a bug; only test 2nd request.
-    comment = json.loads(mock_oauth2_decorator.HTTP_MOCK.body)['content']
+    comment = self.service.add_comment_args[1]
     self.assertIn(
         'https://chromeperf.appspot.com/group_report?bug_id=277761', comment)
     self.assertIn('https://chromeperf.appspot.com/group_report?keys=', comment)
@@ -166,8 +207,7 @@
     # M-2 since 111995 (lowest possible revision introducing regression)
     # is less than 112000 (revision for M-2).
     self._PostSampleBug()
-    self.assertIn(u'M-2', json.loads(
-        mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
+    self.assertIn('M-2', self.service.new_bug_kwargs['labels'])
 
   @unittest.skip('Flaky; see #1555.')
   @mock.patch(
@@ -192,8 +232,7 @@
     # label the bug M-2 since 111995 is less than 112000 (M-2) and 111999
     # (M-3) AND M-2 is lower than M-3.
     self._PostSampleBug()
-    self.assertIn(u'M-2', json.loads(
-        mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
+    self.assertIn('M-2', self.service.new_bug_kwargs['labels'])
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
@@ -203,13 +242,21 @@
     # Here, we test that we don't label the bug with an unexpected value when
     # there is no version information from omahaproxy (for whatever reason)
     self._PostSampleBug()
-    labels = json.loads(
-        mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels']
+    labels = self.service.new_bug_kwargs['labels']
     self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(return_value=testing_common.FakeResponseObject(
+          200, '[]')))
+  def testGet_WithFinish_SucceedsWithComponents(self):
+    # Here, we test that components are posted separately from labels.
+    self._PostSampleBug()
+    self.assertIn('Foo>Bar', self.service.new_bug_kwargs['components'])
+
+  @mock.patch(
+      'google.appengine.api.urlfetch.fetch',
+      mock.MagicMock(return_value=testing_common.FakeResponseObject(
           200, json.dumps([
               {
                   'versions': [
@@ -221,8 +268,7 @@
     # Here, we test that we label the bug with the highest milestone when the
     # revision introducing regression is beyond all milestones in the list.
     self._PostSampleBug()
-    self.assertIn(u'M-1', json.loads(
-        mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels'])
+    self.assertIn('M-1', self.service.new_bug_kwargs['labels'])
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
@@ -237,8 +283,7 @@
   @mock.patch('logging.warn')
   def testGet_WithFinish_SucceedsWithNAAndLogsWarning(self, mock_warn):
     self._PostSampleBug()
-    labels = json.loads(
-        mock_oauth2_decorator.MockOAuth2Decorator.past_bodies[-1])['labels']
+    labels = self.service.new_bug_kwargs['labels']
     self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
     self.assertEqual(1, mock_warn.call_count)
 
diff --git a/catapult/dashboard/dashboard/find_anomalies.py b/catapult/dashboard/dashboard/find_anomalies.py
index ff69f21..37a1543 100644
--- a/catapult/dashboard/dashboard/find_anomalies.py
+++ b/catapult/dashboard/dashboard/find_anomalies.py
@@ -57,7 +57,7 @@
     return
 
   # Get anomalies and check if they happen in ref build also.
-  change_points = _FindChangePointsForTest(rows, config)
+  change_points = FindChangePointsForTest(rows, config)
   change_points = _FilterAnomaliesFoundInRef(change_points, test_key, len(rows))
 
   anomalies = [_MakeAnomalyEntity(c, test, rows) for c in change_points]
@@ -148,7 +148,7 @@
 
   ref_config = anomaly_config.GetAnomalyConfigDict(ref_test)
   ref_rows = GetRowsToAnalyze(ref_test, num_rows)
-  ref_change_points = _FindChangePointsForTest(ref_rows, ref_config)
+  ref_change_points = FindChangePointsForTest(ref_rows, ref_config)
   if not ref_change_points:
     return change_points[:]
 
@@ -157,7 +157,7 @@
   for c in change_points:
     # Log information about what anomaly got filtered and what did not.
     if not _IsAnomalyInRef(c, ref_change_points):
-      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+      # TODO(qyearsley): Add test coverage. See catapult:#1346.
       logging.info('Nothing was filtered out for test %s, and revision %s',
                    test_path, c.x_value)
       change_points_filtered.append(c)
@@ -192,7 +192,7 @@
   for ref_change_point in ref_change_points:
     if change_point.x_value == ref_change_point.x_value:
       return True
-  # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+  # TODO(qyearsley): Add test coverage. See catapult:#1346.
   return False
 
 
@@ -216,7 +216,7 @@
   for row in reversed(rows):
     if row.revision < later_revision:
       return row.revision
-  # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+  # TODO(qyearsley): Add test coverage. See catapult:#1346.
   assert False, 'No matching revision found in |rows|.'
 
 
@@ -253,7 +253,7 @@
       internal_only=test.internal_only)
 
 
-def _FindChangePointsForTest(rows, config_dict):
+def FindChangePointsForTest(rows, config_dict):
   """Gets the anomaly data from the anomaly detection module.
 
   Args:
diff --git a/catapult/dashboard/dashboard/find_anomalies_test.py b/catapult/dashboard/dashboard/find_anomalies_test.py
index 35873d7..2c0012a 100644
--- a/catapult/dashboard/dashboard/find_anomalies_test.py
+++ b/catapult/dashboard/dashboard/find_anomalies_test.py
@@ -17,27 +17,26 @@
 
 # Sample time series.
 _TEST_ROW_DATA = [
-    (241105, 2126.75), (241116, 2140.375), (241151, 2149.125),
-    (241154, 2147.25), (241156, 2130.625), (241160, 2136.25),
-    (241188, 2146.75), (241201, 2141.875), (241226, 2160.625),
-    (241247, 2108.125), (241249, 2134.25), (241254, 2130.0),
-    (241262, 2126.0), (241268, 2142.625), (241271, 2129.125),
-    (241282, 2166.625), (241294, 2125.375), (241298, 2155.5),
-    (241303, 2158.5), (241317, 2146.25), (241323, 2123.375),
-    (241330, 2121.5), (241342, 2151.25), (241355, 2155.25),
-    (241371, 2136.375), (241386, 2154.0), (241405, 2118.125),
-    (241420, 2157.625), (241432, 2140.75), (241441, 2132.25),
-    (241452, 2138.25), (241455, 2119.375), (241471, 2134.0),
-    (241488, 2127.25), (241503, 2162.5), (241520, 2116.375),
-    (241524, 2139.375), (241529, 2143.5), (241532, 2141.5),
-    (241535, 2147.0), (241537, 2184.125), (241546, 2180.875),
-    (241553, 2181.5), (241559, 2176.875), (241566, 2164.0),
-    (241577, 2182.875), (241579, 2194.875), (241582, 2200.5),
-    (241584, 2163.125), (241609, 2178.375), (241620, 2178.125),
-    (241645, 2190.875), (241653, 2147.75), (241666, 2185.375),
-    (241697, 2173.875), (241716, 2172.125), (241735, 2172.5),
-    (241757, 2154.75), (241766, 2196.75), (241782, 2184.125),
-    (241795, 2191.5)
+    (241105, 2136.7), (241116, 2140.3), (241151, 2149.1),
+    (241154, 2147.2), (241156, 2130.6), (241160, 2136.2),
+    (241188, 2146.7), (241201, 2141.8), (241226, 2140.6),
+    (241247, 2128.1), (241249, 2134.2), (241254, 2130.0),
+    (241262, 2136.0), (241268, 2142.6), (241271, 2149.1),
+    (241282, 2156.6), (241294, 2125.3), (241298, 2155.5),
+    (241303, 2148.5), (241317, 2146.2), (241323, 2123.3),
+    (241330, 2121.5), (241342, 2141.2), (241355, 2145.2),
+    (241371, 2136.3), (241386, 2144.0), (241405, 2138.1),
+    (241420, 2147.6), (241432, 2140.7), (241441, 2132.2),
+    (241452, 2138.2), (241455, 2139.3), (241471, 2134.0),
+    (241488, 2137.2), (241503, 2152.5), (241520, 2136.3),
+    (241524, 2139.3), (241529, 2143.5), (241532, 2145.5),
+    (241535, 2147.0), (241537, 2184.1), (241546, 2180.8),
+    (241553, 2181.5), (241559, 2176.8), (241566, 2174.0),
+    (241577, 2182.8), (241579, 2184.8), (241582, 2190.5),
+    (241584, 2183.1), (241609, 2178.3), (241620, 2178.1),
+    (241645, 2190.8), (241653, 2177.7), (241666, 2185.3),
+    (241697, 2173.8), (241716, 2172.1), (241735, 2172.5),
+    (241757, 2174.7), (241766, 2196.7), (241782, 2184.1),
 ]
 
 
@@ -220,7 +219,7 @@
           _MakeSampleChangePoint(10041, 45.2, 37.8),
       ]))
   @mock.patch.object(find_anomalies.email_sheriff, 'EmailSheriff')
-  def testProcessTest_FiltersOutImpovements(self, mock_email_sheriff):
+  def testProcessTest_FiltersOutImprovements(self, mock_email_sheriff):
     self._AddDataForTests()
     test = utils.TestKey(
         'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
diff --git a/catapult/dashboard/dashboard/find_change_points.py b/catapult/dashboard/dashboard/find_change_points.py
index 516b82e..782f199 100644
--- a/catapult/dashboard/dashboard/find_change_points.py
+++ b/catapult/dashboard/dashboard/find_change_points.py
@@ -25,7 +25,7 @@
 
 # Minimum number of points in a segment. This can help filter out erroneous
 # results by ignoring results that were found from looking at too few points.
-_MIN_SEGMENT_SIZE = 6
+MIN_SEGMENT_SIZE = 6
 
 # Minimum absolute difference between medians before and after.
 _MIN_ABSOLUTE_CHANGE = 0
@@ -35,7 +35,7 @@
 
 # "Steppiness" is a number between 0 and 1 that indicates how similar the
 # shape is to a perfect step function, where 1 represents a step function.
-_MIN_STEPPINESS = 0.3
+_MIN_STEPPINESS = 0.5
 
 # The "standard deviation" is based on a subset of points in the series.
 # This parameter is the minimum acceptable ratio of the relative change
@@ -70,7 +70,7 @@
 def FindChangePoints(
     series,
     max_window_size=_MAX_WINDOW_SIZE,
-    min_segment_size=_MIN_SEGMENT_SIZE,
+    min_segment_size=MIN_SEGMENT_SIZE,
     min_absolute_change=_MIN_ABSOLUTE_CHANGE,
     min_relative_change=_MIN_RELATIVE_CHANGE,
     min_steppiness=_MIN_STEPPINESS,
diff --git a/catapult/dashboard/dashboard/get_logs.py b/catapult/dashboard/dashboard/get_logs.py
index 942129b..2a0286f 100644
--- a/catapult/dashboard/dashboard/get_logs.py
+++ b/catapult/dashboard/dashboard/get_logs.py
@@ -27,18 +27,16 @@
       log_name: Name of log to retrieve.
       log_filter: Regex string to filter logs.
       log_size: Number of logs to get.
-      after_index: Get the logs after this index.
-      before_index: Get the logs before this index.
+      after_timestamp: Get the logs after this timestamp.
 
     Outputs:
-      JSON which contains a list of quick_loger.Log.
+      JSON which contains a list of quick_logger.Log.
     """
     log_namespace = self.request.get('namespace')
     log_name = self.request.get('name')
     log_filter = self.request.get('filter')
     log_size = self.request.get('size')
-    after_index = self.request.get('after_index')
-    before_index = self.request.get('before_index')
+    after_timestamp = self.request.get('after_timestamp')
 
     logs = quick_logger.Get(log_namespace, log_name)
     if logs is None:
@@ -49,18 +47,17 @@
 
     if log_filter:
       logs = [l for l in logs if re.match(log_filter, l.message)]
-    if after_index:
-      after_index = float(after_index)
-      logs = [l for l in logs if l.index > after_index]
-    if before_index:
-      before_index = float(before_index)
-      logs = [l for l in logs if l.index < before_index]
+    if after_timestamp:
+      after_timestamp = float(after_timestamp)
+      logs = [l for l in logs if
+              getattr(l, 'timestamp', l.index) > after_timestamp]
     if log_size:
       logs = logs[0:int(log_size)]
 
     serializable_logs = []
     for log in logs:
       serializable_logs.append({
-          'index': log.index,
+          'id': getattr(log, 'id', log.index),
+          'timestamp': getattr(log, 'timestamp', log.index),
           'message': log.message})
     self.response.out.write(json.dumps(serializable_logs))
diff --git a/catapult/dashboard/dashboard/get_logs_test.py b/catapult/dashboard/dashboard/get_logs_test.py
new file mode 100644
index 0000000..4552869
--- /dev/null
+++ b/catapult/dashboard/dashboard/get_logs_test.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+import webapp2
+import webtest
+
+from dashboard import get_logs
+from dashboard import quick_logger
+from dashboard import testing_common
+
+
+class GetLogsTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(GetLogsTest, self).setUp()
+    app = webapp2.WSGIApplication([
+        ('/get_logs', get_logs.GetLogsHandler)])
+    self.testapp = webtest.TestApp(app)
+
+  def testPost_WithTimestamp_RespondsWithNewLogs(self):
+    logger = quick_logger.QuickLogger('test_namespace', 'test_name')
+    logger.Log('First message.')
+    logger.Log('Second message.')
+    # Set back the time on these records.
+    logger._records[0].timestamp -= 1
+    logger._records[1].timestamp -= 2
+    logger.Save()
+    response = self.testapp.post('/get_logs', {
+        'namespace': 'test_namespace',
+        'name': 'test_name',
+        'size': 100
+    })
+    response_logs = json.loads(response.body)
+    self.assertEqual(2, len(response_logs))
+
+    logger.Log('Third message.')
+    logger.Save()
+
+    response = self.testapp.post('/get_logs', {
+        'namespace': 'test_namespace',
+        'name': 'test_name',
+        'size': 100,
+        'after_timestamp': repr(response_logs[0]['timestamp'])
+    })
+
+    response_logs = json.loads(response.body)
+    self.assertEqual(1, len(response_logs))
+    self.assertEqual('Third message.', response_logs[0]['message'])
diff --git a/catapult/dashboard/dashboard/graph_csv.py b/catapult/dashboard/dashboard/graph_csv.py
index 005753c..ee3dc68 100644
--- a/catapult/dashboard/dashboard/graph_csv.py
+++ b/catapult/dashboard/dashboard/graph_csv.py
@@ -8,13 +8,14 @@
 import logging
 import StringIO
 
+from dashboard import datastore_hooks
 from dashboard import request_handler
 from dashboard import utils
 from dashboard.models import graph_data
 
 
 class GraphCsvHandler(request_handler.RequestHandler):
-  """Get data from data store and outputs it in CSV format."""
+  """Request handler for getting data from one series as CSV."""
 
   def get(self):
     """Gets CSV from data store and outputs it.
@@ -40,6 +41,10 @@
     logging.info('Got request to /graph_csv for test: "%s".', test_path)
 
     test_key = utils.TestKey(test_path)
+    test = test_key.get()
+    assert(datastore_hooks.IsUnalteredQueryPermitted() or
+           not test.internal_only)
+    datastore_hooks.SetSinglePrivilegedRequest()
     q = graph_data.Row.query()
     q = q.filter(graph_data.Row.parent_test == test_key)
     if rev:
@@ -61,7 +66,7 @@
     self.get()
 
   def _GenerateRows(self, points, attributes):
-    """Generate all the rows based on the attributes given.
+    """Generates CSV rows based on the attributes given.
 
     Args:
       points: A list of Row entities.
diff --git a/catapult/dashboard/dashboard/graph_csv_test.py b/catapult/dashboard/dashboard/graph_csv_test.py
index e323695..9e6193d 100644
--- a/catapult/dashboard/dashboard/graph_csv_test.py
+++ b/catapult/dashboard/dashboard/graph_csv_test.py
@@ -44,14 +44,16 @@
       bot = graph_data.Bot(id=name, parent=master, internal_only=True).put()
       bots.append(bot)
       test = graph_data.Test(id='dromaeo', parent=bot, internal_only=True).put()
-      dom_test = graph_data.Test(id='dom', parent=test, has_rows=True).put()
+      dom_test = graph_data.Test(
+          id='dom', parent=test, has_rows=True, internal_only=True).put()
       test_container_key = utils.GetTestContainerKey(dom_test)
       for i in range(1, 50):
         graph_data.Row(
             parent=test_container_key, id=i, value=float(i * 2), error=(i + 10),
             internal_only=True).put()
 
-  def _CheckGet(self, result_query, expected_result, whitelisted_ip=''):
+  def _CheckGet(
+      self, result_query, expected_result, whitelisted_ip='', status=200):
     """Asserts that the given query has the given CSV result.
 
     Args:
@@ -61,7 +63,11 @@
     """
     response_rows = []
     response = self.testapp.get(
-        result_query, extra_environ={'REMOTE_ADDR': whitelisted_ip})
+        result_query,
+        extra_environ={'REMOTE_ADDR': whitelisted_ip},
+        status=status)
+    if status != 200:
+      return
     for row in csv.reader(StringIO.StringIO(response.body)):
       response_rows.append(row)
     self.assertEqual(expected_result, response_rows)
@@ -145,9 +151,10 @@
     testing_common.SetIpWhitelist(['123.45.67.89'])
     query = '/graph_csv?test_path=ChromiumPerf/win7/dromaeo/dom&num_points=3'
     expected = [['revision', 'value']]
-    self._CheckGet(query, expected)
+    self._CheckGet(query, expected, status=500)
 
   def testGet_WhitelistedIPOnly(self):
+    self.PatchDatastoreHooksRequest('123.45.67.89')
     self._AddMockInternalData()
     self.UnsetCurrentUser()
     datastore_hooks.InstallHooks()
diff --git a/catapult/dashboard/dashboard/graph_json.py b/catapult/dashboard/dashboard/graph_json.py
index bafb8b3..169c1e9 100644
--- a/catapult/dashboard/dashboard/graph_json.py
+++ b/catapult/dashboard/dashboard/graph_json.py
@@ -13,11 +13,11 @@
 import json
 import logging
 import re
-import urllib
 
 from google.appengine.ext import ndb
 
 from dashboard import alerts
+from dashboard import datastore_hooks
 from dashboard import list_tests
 from dashboard import request_handler
 from dashboard import utils
@@ -28,6 +28,10 @@
 # This can be overridden by specifying num_points or start_rev and end_rev.
 _DEFAULT_NUM_POINTS = 150
 
+# If data for more than this many tests is requested for unselected tests,
+# an empty response will be returned.
+_MAX_UNSELECTED_TESTS = 50
+
 # Dictionary mapping improvement directions constants to strings.
 _BETTER_DICT = {
     anomaly.UP: 'Higher',
@@ -82,17 +86,71 @@
 
     arguments = {
         'test_path_dict': graphs['test_path_dict'],
-        'rev': PositiveIntOrNone(graphs.get('rev')),
-        'num_points': (PositiveIntOrNone(graphs.get('num_points'))
+        'rev': _PositiveIntOrNone(graphs.get('rev')),
+        'num_points': (_PositiveIntOrNone(graphs.get('num_points'))
                        or _DEFAULT_NUM_POINTS),
         'is_selected': graphs.get('is_selected'),
-        'start_rev': PositiveIntOrNone(graphs.get('start_rev')),
-        'end_rev': PositiveIntOrNone(graphs.get('end_rev')),
+        'start_rev': _PositiveIntOrNone(graphs.get('start_rev')),
+        'end_rev': _PositiveIntOrNone(graphs.get('end_rev')),
     }
     return arguments
 
 
-def PositiveIntOrNone(input_str):
+def GetGraphJson(
+    test_path_dict, rev=None, num_points=None,
+    is_selected=True, start_rev=None, end_rev=None):
+  """Makes a JSON serialization of data for one chart with multiple series.
+
+  This function can return data for one chart (with multiple data series
+  plotted on it) with revisions on the x-axis, for a certain range of
+  revisions. The particular set of revisions to get data for can be specified
+  with the arguments rev, num_points, start_rev, and end_rev.
+
+  Args:
+    test_path_dict: Dictionary of test path to list of selected series.
+    rev: A revision number that the chart may be clamped relative to.
+    num_points: Number of points to plot.
+    is_selected: Whether this request is for selected or un-selected series.
+    start_rev: The lowest revision to get trace data for.
+    end_rev: The highest revision to get trace data for.
+
+  Returns:
+    JSON serialization of a dict with info that will be used to plot a chart.
+  """
+  # TODO(qyearsley): Parallelize queries if possible.
+
+  if is_selected:
+    test_paths = _GetTestPathFromDict(test_path_dict)
+  else:
+    test_paths = _GetUnselectedTestPathFromDict(test_path_dict)
+
+  # If a particular test has a lot of children, then a request will be made
+  # for data for a lot of unselected series, which may be very slow and may
+  # time out. In this case, return nothing.
+  # TODO(qyearsley): Stop doing this when there's a better solution (#1876).
+  if not is_selected and len(test_paths) > _MAX_UNSELECTED_TESTS:
+    return json.dumps({'data': {}, 'annotations': {}, 'error_bars': {}})
+
+  test_keys = map(utils.TestKey, test_paths)
+  test_entities = ndb.get_multi(test_keys)
+  test_entities = [t for t in test_entities if t is not None]
+
+  # Filter out deprecated tests, but only if not all the tests are deprecated.
+  all_deprecated = all(t.deprecated for t in test_entities)
+  if not all_deprecated:
+    test_entities = [t for t in test_entities if not t.deprecated]
+  test_entities = [t for t in test_entities if t.has_rows]
+
+  revision_map = {}
+  num_points = num_points or _DEFAULT_NUM_POINTS
+  for test in test_entities:
+    _UpdateRevisionMap(revision_map, test, rev, num_points, start_rev, end_rev)
+  if not (start_rev and end_rev):
+    _ClampRevisionMap(revision_map, rev, num_points)
+  return _GetFlotJson(revision_map, test_entities)
+
+
+def _PositiveIntOrNone(input_str):
   """Parses a string as a positive int if possible, otherwise returns None."""
   if not input_str:
     return None
@@ -134,25 +192,27 @@
     end_rev: End revision number (optional).
   """
   anomaly_annotation_map = _GetAnomalyAnnotationMap(parent_test.key)
+  assert(datastore_hooks.IsUnalteredQueryPermitted() or
+         not parent_test.internal_only)
 
   if start_rev and end_rev:
-    rows = _GetRowsForTestInRange(parent_test.key, start_rev, end_rev)
+    rows = _GetRowsForTestInRange(parent_test.key, start_rev, end_rev, True)
   elif rev:
     assert num_points
-    rows = _GetRowsForTestAroundRev(parent_test.key, rev, num_points)
+    rows = _GetRowsForTestAroundRev(parent_test.key, rev, num_points, True)
   else:
     assert num_points
-    rows = _GetLatestRowsForTest(parent_test.key, num_points)
+    rows = _GetLatestRowsForTest(parent_test.key, num_points, True)
 
   parent_test_key = parent_test.key.urlsafe()
   for row in rows:
     if row.revision not in revision_map:
       revision_map[row.revision] = {}
     revision_map[row.revision][parent_test_key] = _PointInfoDict(
-        row, parent_test, anomaly_annotation_map)
+        row, anomaly_annotation_map)
 
 
-def _PointInfoDict(row, parent_test, anomaly_annotation_map):
+def _PointInfoDict(row, anomaly_annotation_map):
   """Makes a dict of properties of one Row."""
   point_info = {
       'value': row.value,
@@ -163,11 +223,6 @@
   if tracing_uri:
     point_info['a_tracing_uri'] = tracing_uri
 
-  old_stdio_uri = _GetOldStdioUri(row, parent_test)
-  if old_stdio_uri:
-    point_info.update(
-        _CreateLinkProperty('stdio_uri', 'Buildbot stdio', old_stdio_uri))
-
   if row.error is not None:
     point_info['error'] = row.error
   if anomaly_annotation_map.get(row.revision):
@@ -197,61 +252,10 @@
   return {'a_' + name: '[%s](%s)' % (label, url)}
 
 
-def _GetOldStdioUri(row, test):
-  """Gets or makes the URI string for the buildbot stdio link.
-
-  This is here to support the deprecated method way of creating
-  Buildbot stdio URI.
-
-  TODO(chrisphan): Remove this after sometime.
-
-  Args:
-    row: A Row entity.
-    test: The Test entity for the given Row.
-
-  Returns:
-    An URI string, or None if none can be made.
-  """
-  # A masterid and buildname are required to construct a valid URI.
-  if (not hasattr(test, 'masterid') or not hasattr(test, 'buildername')
-      or not hasattr(row, 'buildnumber')):
-    return None
-
-  buildbot_uri_prefix = _GetBuildbotUriPrefix(test, row=row)
-  if not buildbot_uri_prefix:
-    return None
-  return '%s/%s/builders/%s/builds/%s/steps/%s/logs/stdio' % (
-      buildbot_uri_prefix,
-      urllib.quote(test.masterid),
-      urllib.quote(test.buildername),
-      urllib.quote(str(getattr(row, 'buildnumber'))),
-      urllib.quote(test.suite_name))
-
-
-def _GetBuildbotUriPrefix(test, row=None):
-  """Gets the start of the buildbot stdio or builder status URI.
-
-  Gets the uri prefix from 'a_stdio_uri_prefix' property if exist or
-  the public uri prefix if test is not internal.
-
-  Args:
-    test: A Test entity.
-    row: A Row entity, optional.
-
-  Returns:
-    The protocol, hostname and start of the pathname for Buildbot builder
-    status or stdio links.
-  """
-  if row and hasattr(row, 'a_stdio_uri_prefix'):
-    return row.a_stdio_uri_prefix
-
-  if test.internal_only:
-    return None
-  return 'http://build.chromium.org/p'
-
-
-def _GetRowsForTestInRange(test_key, start_rev, end_rev):
+def _GetRowsForTestInRange(test_key, start_rev, end_rev, privileged=False):
   """Gets all the Row entities for a Test between a given start and end."""
+  if privileged:
+    datastore_hooks.SetSinglePrivilegedRequest()
   query = graph_data.Row.query(
       graph_data.Row.parent_test == test_key,
       graph_data.Row.revision >= start_rev,
@@ -259,17 +263,21 @@
   return query.fetch(batch_size=100)
 
 
-def _GetRowsForTestAroundRev(test_key, rev, num_points):
-  """Gets up to num_points Row entities for a Test centered on a revision."""
+def _GetRowsForTestAroundRev(test_key, rev, num_points, privileged=False):
+  """Gets up to |num_points| Row entities for a Test centered on a revision."""
   num_rows_before = int(num_points / 2) + 1
   num_rows_after = int(num_points / 2)
 
+  if privileged:
+    datastore_hooks.SetSinglePrivilegedRequest()
   query_up_to_rev = graph_data.Row.query(
       graph_data.Row.parent_test == test_key,
       graph_data.Row.revision <= rev)
   query_up_to_rev = query_up_to_rev.order(-graph_data.Row.revision)
   rows_up_to_rev = query_up_to_rev.fetch(limit=num_rows_before, batch_size=100)
 
+  if privileged:
+    datastore_hooks.SetSinglePrivilegedRequest()
   query_after_rev = graph_data.Row.query(
       graph_data.Row.parent_test == test_key,
       graph_data.Row.revision > rev)
@@ -279,8 +287,10 @@
   return rows_up_to_rev + rows_after_rev
 
 
-def _GetLatestRowsForTest(test_key, num_points):
+def _GetLatestRowsForTest(test_key, num_points, privileged=False):
   """Gets the latest num_points Row entities for a Test."""
+  if privileged:
+    datastore_hooks.SetSinglePrivilegedRequest()
   query = graph_data.Row.query(graph_data.Row.parent_test == test_key)
   query = query.order(-graph_data.Row.revision)
   return query.fetch(limit=num_points, batch_size=100)
@@ -308,7 +318,7 @@
 
 
 def _ClampRevisionMap(revision_map, rev, num_points):
-  """Clamp the results down to the requested number of points before/after rev.
+  """Clamps the results down to the requested number of points before/after rev.
 
   Not all of the Tests have Rows for the exact same revisions. If one test has
   gaps in the requested range, the query for points before/after rev will
@@ -346,7 +356,7 @@
   """Gets the URI string for tracing in cloud storage, if available.
 
   Args:
-    point: A Row entitiy.
+    point: A Row entity.
 
   Returns:
     An URI string, or None if there is no trace available.
@@ -360,7 +370,7 @@
   """Gets the trace rerun options, if available.
 
   Args:
-    point: A Row entitiy.
+    point: A Row entity.
 
   Returns:
     A dict of {description: params} strings, or None.
@@ -379,14 +389,14 @@
 
   Returns:
     JSON serialization of a dict with line data, annotations, error range data,
-    (This data may not be passed exactly as-is to the Flot plot funciton, but
+    (This data may not be passed exactly as-is to the Flot plot function, but
     it will all be used when plotting.)
   """
   # TODO(qyearsley): Break this function into smaller functions.
 
   # Each entry in the following dict is one Flot series object. The actual
   # x-y values will be put into the 'data' properties for each object.
-  cols = {i: _FlotSeries(i) for i in  range(len(tests))}
+  cols = {i: _FlotSeries(i) for i in range(len(tests))}
 
   flot_annotations = {}
   flot_annotations['series'] = _GetSeriesAnnotations(tests)
@@ -600,51 +610,3 @@
     if value['has_rows']:
       traces.append(key)
   return traces
-
-
-def GetGraphJson(
-    test_path_dict, rev=None, num_points=None,
-    is_selected=True, start_rev=None, end_rev=None):
-  """Makes a JSON serialization of data for one chart with multiple series.
-
-  This function can return data for one chart (with multiple data series
-  plotted on it) with revisions on the x-axis, for a certain range of
-  revisions. The particular set of revisions to get data for can be specified
-  with the arguments rev, num_points, start_rev, and end_rev.
-
-  Args:
-    test_path_dict: Dictionary of test path to list of selected series.
-    rev: A revision number that the chart may be clamped relative to.
-    num_points: Number of points to plot.
-    is_selected: Whether this request is for selected or un-selected series.
-    start_rev: The lowest revision to get trace data for.
-    end_rev: The highest revision to get trace data for.
-
-  Returns:
-    JSON serialization of a dict with info that will be used to plot a chart.
-  """
-  # TODO(qyearsley): Parallelize queries if possible.
-
-  # Get a list of Test entities.
-  if is_selected:
-    test_paths = _GetTestPathFromDict(test_path_dict)
-  else:
-    test_paths = _GetUnselectedTestPathFromDict(test_path_dict)
-
-  test_keys = map(utils.TestKey, test_paths)
-  test_entities = ndb.get_multi(test_keys)
-  test_entities = [t for t in test_entities if t is not None]
-
-  # Filter out deprecated tests, but only if not all the tests are deprecated.
-  all_deprecated = all(t.deprecated for t in test_entities)
-  if not all_deprecated:
-    test_entities = [t for t in test_entities if not t.deprecated]
-
-  test_entities = [t for t in test_entities if t.has_rows]
-  revision_map = {}
-  num_points = num_points or _DEFAULT_NUM_POINTS
-  for test in test_entities:
-    _UpdateRevisionMap(revision_map, test, rev, num_points, start_rev, end_rev)
-  if not (start_rev and end_rev):
-    _ClampRevisionMap(revision_map, rev, num_points)
-  return _GetFlotJson(revision_map, test_entities)
diff --git a/catapult/dashboard/dashboard/graph_json_test.py b/catapult/dashboard/dashboard/graph_json_test.py
index 9391492..8d80afc 100644
--- a/catapult/dashboard/dashboard/graph_json_test.py
+++ b/catapult/dashboard/dashboard/graph_json_test.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import datetime
 import json
 import unittest
 
@@ -26,7 +25,7 @@
     app = webapp2.WSGIApplication(
         [('/graph_json', graph_json.GraphJsonHandler)])
     self.testapp = webtest.TestApp(app)
-    testing_common.SetInternalDomain('google.com')
+    self.PatchDatastoreHooksRequest()
 
   # TODO(qyearsley): graph_json_test is very slow (it takes 60+ seconds
   # to run sometimes), and I have a hypothesis that most of the time is
@@ -662,6 +661,18 @@
     self.assertEqual(5, len(flot['annotations'].get(sub_test_a_index).keys()))
     self.assertEqual(5, len(flot['annotations'].get(sub_test_b_index).keys()))
 
+  def testGetGraphJson_ManyUnselected_ReturnsNothing(self):
+    testing_common.AddTests(
+        ['M'], ['b'], {'suite': {str(i): {} for i in range(100)}})
+    test_paths = ['M/b/suite/%s' % i for i in range(100)]
+    for p in test_paths:
+      testing_common.AddRows(p, [1])
+    response = graph_json.GetGraphJson(
+        test_path_dict={p: [] for p in test_paths}, is_selected=False)
+    self.assertEqual(
+        {'data': {}, 'annotations': {}, 'error_bars': {}},
+        json.loads(response))
+
 
 class GraphJsonParseRequestArgumentsTest(testing_common.TestCase):
 
@@ -736,48 +747,6 @@
 
 class GraphJsonHelperFunctionTest(testing_common.TestCase):
 
-  def testGetOldStdioUri_NoMasterId_NoURIReturned(self):
-    testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
-    test = utils.TestKey('Master/b/my_suite').get()
-    test.buildername = 'MyBuilder'
-    row = graph_data.Row(id=345, buildnumber=456)
-    self.assertIsNone(graph_json._GetOldStdioUri(row, test))
-
-  def testGetOldStdioUri_WithMasterId_URIReturned(self):
-    testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
-    test = utils.TestKey('Master/b/my_suite').get()
-    test.buildername = 'MyBuilder'
-    row = graph_data.Row(id=345, buildnumber=456)
-    test.masterid = 'my.master.id'
-    self.assertEqual(
-        ('http://build.chromium.org/p/my.master.id/builders/MyBuilder'
-         '/builds/456/steps/my_suite/logs/stdio'),
-        graph_json._GetOldStdioUri(row, test))
-
-  def testGetOldStdioUri_InternalOnly_NoURIReturned(self):
-    testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
-    test = utils.TestKey('Master/b/my_suite').get()
-    test.buildername = 'MyBuilder'
-    row = graph_data.Row(id=345, buildnumber=456)
-    test.masterid = 'my.master.id'
-    test.internal_only = True
-    self.assertIsNone(graph_json._GetOldStdioUri(row, test))
-
-  def testGetOldStdioUri_CustomPrefix_CustomPrefixUsed(self):
-    testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
-    test = utils.TestKey('Master/b/my_suite').get()
-    test.buildername = 'MyBuilder'
-    row = graph_data.Row(id=345, buildnumber=456)
-    test.masterid = 'my.master.id'
-    test.internal_only = True
-    # If the row has a custom prefix, that will be used, even if the test is
-    # internal-only.
-    row.a_stdio_uri_prefix = 'http://special-logs.chromium.org/x'
-    self.assertEqual(
-        ('http://special-logs.chromium.org/x/my.master.id/builders/MyBuilder'
-         '/builds/456/steps/my_suite/logs/stdio'),
-        graph_json._GetOldStdioUri(row, test))
-
   def testPointInfoDict_StdioUriMarkdown(self):
     testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
     test = utils.TestKey('Master/b/my_suite').get()
@@ -787,16 +756,15 @@
     row.a_stdio_uri = ('[Build stdio](http://build.chromium.org/p/my.master.id/'
                        'builders/MyBuilder/builds/456/steps/my_suite/logs/'
                        'stdio)')
-    point_info = graph_json._PointInfoDict(row, test, {})
+    point_info = graph_json._PointInfoDict(row, {})
     self.assertEqual(row.a_stdio_uri, point_info['a_stdio_uri'])
 
   def testPointInfoDict_RowHasNoTracingUri_ResultHasNoTracingUri(self):
     testing_common.AddTests(['Master'], ['b'], {'my_suite': {}})
-    test = utils.TestKey('Master/b/my_suite').get()
     rows = testing_common.AddRows('Master/b/my_suite', [345])
     # This row has no a_tracing_uri property, so there should be no
     # trace annotation returned by _PointInfoDict.
-    point_info = graph_json._PointInfoDict(rows[0], test, {})
+    point_info = graph_json._PointInfoDict(rows[0], {})
     self.assertFalse(hasattr(rows[0], 'a_tracing_uri'))
     self.assertNotIn('a_tracing_uri', point_info)
 
diff --git a/catapult/dashboard/dashboard/graph_revisions.py b/catapult/dashboard/dashboard/graph_revisions.py
index 0aee43e..2996704 100644
--- a/catapult/dashboard/dashboard/graph_revisions.py
+++ b/catapult/dashboard/dashboard/graph_revisions.py
@@ -16,7 +16,6 @@
 
 import bisect
 import json
-import logging
 
 from dashboard import datastore_hooks
 from dashboard import namespaced_stored_object
@@ -78,10 +77,11 @@
   Returns:
     The list of triplets that was just fetched and set in the cache.
   """
-  if not test_key.get():
-    # The caching can actually proceed even if the Test entity doesn't exist,
-    # but a non-existent Test entity definitely indicates something is wrong.
-    logging.warn('Test not found: %s', utils.TestPath(test_key))
+  test = test_key.get()
+  if not test:
+    return []
+  assert utils.IsInternalUser() or not test.internal_only
+  datastore_hooks.SetSinglePrivilegedRequest()
 
   # A projection query queries just for the values of particular properties;
   # this is faster than querying for whole entities.
@@ -90,6 +90,9 @@
 
   # Using a large batch_size speeds up queries with > 1000 Rows.
   rows = map(_MakeTriplet, query.iter(batch_size=1000))
+  # Note: Unit tests do not call datastore_hooks with the above query, but
+  # it is called in production and with more recent SDK.
+  datastore_hooks.CancelSinglePrivilegedRequest()
   SetCache(utils.TestPath(test_key), rows)
   return rows
 
diff --git a/catapult/dashboard/dashboard/graph_revisions_test.py b/catapult/dashboard/dashboard/graph_revisions_test.py
index 093c9ed..e8ceef6 100644
--- a/catapult/dashboard/dashboard/graph_revisions_test.py
+++ b/catapult/dashboard/dashboard/graph_revisions_test.py
@@ -23,6 +23,7 @@
     app = webapp2.WSGIApplication(
         [('/graph_revisions', graph_revisions.GraphRevisionsHandler)])
     self.testapp = webtest.TestApp(app)
+    self.PatchDatastoreHooksRequest()
 
   def _AddMockData(self):
     """Adds mock data to the datastore, not updating stored_object."""
diff --git a/catapult/dashboard/dashboard/group_report.py b/catapult/dashboard/dashboard/group_report.py
index b9106c5..7fd2855 100644
--- a/catapult/dashboard/dashboard/group_report.py
+++ b/catapult/dashboard/dashboard/group_report.py
@@ -30,7 +30,11 @@
   """Request handler for requests for group report page."""
 
   def get(self):
-    """Renders the UI for /group_report with some set of alerts.
+    """Renders the UI for the group report page."""
+    self.RenderStaticHtml('group_report.html')
+
+  def post(self):
+    """Returns dynamic data for /group_report with some set of alerts.
 
     The set of alerts is determined by the keys, bug ID or revision given.
 
@@ -40,7 +44,7 @@
       rev: A revision number (optional).
 
     Outputs:
-      HTML for the /group_report page.
+      JSON for the /group_report page XHR request.
     """
     keys = self.request.get('keys')
     bug_id = self.request.get('bug_id')
@@ -58,7 +62,7 @@
         # where the user can input a bug ID or revision.
         raise request_handler.InvalidInputError('No anomalies specified.')
     except request_handler.InvalidInputError as error:
-      self.RenderHtml('bug_result.html', {'error': str(error)})
+      self.response.out.write(json.dumps({'error': str(error)}))
 
   def _ShowAlertsWithBugId(self, bug_id):
     """Show alerts for |bug_id|.
@@ -153,7 +157,7 @@
     self._ShowAlerts(anomalies)
 
   def _ShowAlerts(self, alert_list, bug_id=None):
-    """Renders the template group_report.html with a list of alerts.
+    """Responds to an XHR from /group_report page with a JSON list of alerts.
 
     Args:
       alert_list: A list of Anomaly and/or StoppageAlert entities.
@@ -168,13 +172,16 @@
     if bug_id and ndb.Key('Bug', bug_id).get():
       owner_info = _GetOwnerInfo(alert_dicts)
 
-    self.RenderHtml('group_report.html', {
-        'alert_list': json.dumps(alert_dicts[:_DISPLAY_LIMIT]),
-        'subtests': json.dumps(_GetSubTestsForAlerts(alert_dicts)),
+    values = {
+        'alert_list': alert_dicts[:_DISPLAY_LIMIT],
+        'subtests': _GetSubTestsForAlerts(alert_dicts),
         'bug_id': bug_id,
-        'owner_info': json.dumps(owner_info),
-        'test_suites': json.dumps(update_test_suites.FetchCachedTestSuites()),
-    })
+        'owner_info': owner_info,
+        'test_suites': update_test_suites.FetchCachedTestSuites(),
+    }
+    self.GetDynamicVariables(values)
+
+    self.response.out.write(json.dumps(values))
 
 
 def _IsInt(x):
diff --git a/catapult/dashboard/dashboard/group_report_test.py b/catapult/dashboard/dashboard/group_report_test.py
index 5bf8cfb..daa8794 100644
--- a/catapult/dashboard/dashboard/group_report_test.py
+++ b/catapult/dashboard/dashboard/group_report_test.py
@@ -68,7 +68,12 @@
     return sheriff.Sheriff(
         id='Chromium Perf Sheriff', email='sullivan@google.com').put()
 
-  def testGet_WithAnomalyKeys_ShowsSelectedAndOverlapping(self):
+  def testGet(self):
+    response = self.testapp.get('/group_report')
+    self.assertEqual('text/html', response.content_type)
+    self.assertIn('Chrome Performance Dashboard', response.body)
+
+  def testPost_WithAnomalyKeys_ShowsSelectedAndOverlapping(self):
     sheriff_key = self._AddSheriff()
     test_keys = self._AddTests()
     selected_ranges = [(400, 900), (200, 700)]
@@ -81,26 +86,26 @@
     self._AddAnomalyEntities(
         non_overlapping_ranges, test_keys[0], sheriff_key)
 
-    response = self.testapp.get(
+    response = self.testapp.post(
         '/group_report?keys=%s' % ','.join(selected_keys))
-    alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
+    alert_list = self.GetJsonValue(response, 'alert_list')
 
     # Expect selected alerts + overlapping alerts,
     # but not the non-overlapping alert.
     self.assertEqual(5, len(alert_list))
 
-  def testGet_WithKeyOfNonExistentAlert_ShowsError(self):
+  def testPost_WithKeyOfNonExistentAlert_ShowsError(self):
     key = ndb.Key('Anomaly', 123)
-    response = self.testapp.get('/group_report?keys=%s' % key.urlsafe())
-    self.assertIn('error', response.body)
-    self.assertIn('No Anomaly found for key', response.body)
+    response = self.testapp.post('/group_report?keys=%s' % key.urlsafe())
+    error = self.GetJsonValue(response, 'error')
+    self.assertEqual('No Anomaly found for key %s.' % key.urlsafe(), error)
 
-  def testGet_WithInvalidKeyParameter_ShowsError(self):
-    response = self.testapp.get('/group_report?keys=foobar')
-    self.assertIn('error', response.body)
-    self.assertIn('Invalid Anomaly key', response.body)
+  def testPost_WithInvalidKeyParameter_ShowsError(self):
+    response = self.testapp.post('/group_report?keys=foobar')
+    error = self.GetJsonValue(response, 'error')
+    self.assertIn('Invalid Anomaly key', error)
 
-  def testGet_WithRevParameter(self):
+  def testPost_WithRevParameter(self):
     # If the rev parameter is given, then all alerts whose revision range
     # includes the given revision should be included.
     sheriff_key = self._AddSheriff()
@@ -108,16 +113,16 @@
     self._AddAnomalyEntities(
         [(190, 210), (200, 300), (100, 200), (400, 500)],
         test_keys[0], sheriff_key)
-    response = self.testapp.get('/group_report?rev=200')
-    alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
+    response = self.testapp.post('/group_report?rev=200')
+    alert_list = self.GetJsonValue(response, 'alert_list')
     self.assertEqual(3, len(alert_list))
 
-  def testGet_WithInvalidRevParameter_ShowsError(self):
-    response = self.testapp.get('/group_report?rev=foo')
-    self.assertIn('error', response.body)
-    self.assertIn('Invalid rev', response.body)
+  def testPost_WithInvalidRevParameter_ShowsError(self):
+    response = self.testapp.post('/group_report?rev=foo')
+    error = self.GetJsonValue(response, 'error')
+    self.assertEqual('Invalid rev "foo".', error)
 
-  def testGet_WithBugIdParameter(self):
+  def testPost_WithBugIdParameter(self):
     sheriff_key = self._AddSheriff()
     test_keys = self._AddTests()
     bug_data.Bug(id=123).put()
@@ -126,22 +131,22 @@
         test_keys[0], sheriff_key, bug_id=123)
     self._AddAnomalyEntities(
         [(150, 250)], test_keys[0], sheriff_key)
-    response = self.testapp.get('/group_report?bug_id=123')
-    alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
+    response = self.testapp.post('/group_report?bug_id=123')
+    alert_list = self.GetJsonValue(response, 'alert_list')
     self.assertEqual(3, len(alert_list))
 
-  def testGet_WithBugIdParameter_ListsStoppageAlerts(self):
+  def testPost_WithBugIdParameter_ListsStoppageAlerts(self):
     test_keys = self._AddTests()
     bug_data.Bug(id=123).put()
     row = testing_common.AddRows(utils.TestPath(test_keys[0]), {100})[0]
     alert = stoppage_alert.CreateStoppageAlert(test_keys[0].get(), row)
     alert.bug_id = 123
     alert.put()
-    response = self.testapp.get('/group_report?bug_id=123')
-    alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
+    response = self.testapp.post('/group_report?bug_id=123')
+    alert_list = self.GetJsonValue(response, 'alert_list')
     self.assertEqual(1, len(alert_list))
 
-  def testGet_WithBugIdForBugThatHasOwner_ShowsOwnerInfo(self):
+  def testPost_WithBugIdForBugThatHasOwner_ShowsOwnerInfo(self):
     sheriff_key = self._AddSheriff()
     test_keys = self._AddTests()
     bug_data.Bug(id=123).put()
@@ -150,14 +155,16 @@
     test_suite_path = '%s/%s' % (test_path_parts[0], test_path_parts[2])
     test_owner.AddOwnerFromDict({test_suite_path: ['foo@bar.com']})
     self._AddAnomalyEntities([(150, 250)], test_key, sheriff_key, bug_id=123)
-    response = self.testapp.get('/group_report?bug_id=123')
-    owner_info = self.GetEmbeddedVariable(response, 'OWNER_INFO')
+    response = self.testapp.post('/group_report?bug_id=123')
+    owner_info = self.GetJsonValue(response, 'owner_info')
     self.assertEqual('foo@bar.com', owner_info[0]['email'])
 
-  def testGet_WithInvalidBugIdParameter_ShowsError(self):
-    response = self.testapp.get('/group_report?bug_id=foo')
-    self.assertNotIn('ALERT_LIST', response.body)
-    self.assertIn('Invalid bug ID', response.body)
+  def testPost_WithInvalidBugIdParameter_ShowsError(self):
+    response = self.testapp.post('/group_report?bug_id=foo')
+    alert_list = self.GetJsonValue(response, 'alert_list')
+    self.assertIsNone(alert_list)
+    error = self.GetJsonValue(response, 'error')
+    self.assertEqual('Invalid bug ID "foo".', error)
 
 
 if __name__ == '__main__':
diff --git a/catapult/dashboard/dashboard/issue_tracker_service.py b/catapult/dashboard/dashboard/issue_tracker_service.py
index 5787eb2..4c31d02 100644
--- a/catapult/dashboard/dashboard/issue_tracker_service.py
+++ b/catapult/dashboard/dashboard/issue_tracker_service.py
@@ -4,12 +4,16 @@
 
 """Provides a layer of abstraction for the issue tracker API."""
 
+import json
 import logging
 
 from apiclient import discovery
 from apiclient import errors
 import httplib2
 
+_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
+                  '/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
+
 
 class IssueTrackerService(object):
   """Class for updating bug issues."""
@@ -20,20 +24,22 @@
     This object can be re-used to make multiple requests without calling
     apliclient.discovery.build multiple times.
 
-    This class makes requests to the Project Hosting API. Project hosting is
-    another name for Google Code, which includes the issue tracker used by
-    Chromium. API explorer:
-    http://developers.google.com/apis-explorer/#s/projecthosting/v2/
+    This class makes requests to the Monorail API.
+    API explorer: https://goo.gl/xWd0dX
 
     Args:
-      http: A Http object to pass to request.execute.
+      http: A Http object to pass to request.execute; this should be an
+          Http object that's already authenticated via OAuth2.
       additional_credentials: A credentials object, e.g. an instance of
-          oauth2client.client.SignedJwtAssertionCredentials.
+          oauth2client.client.SignedJwtAssertionCredentials. This includes
+          the email and secret key of a service account.
     """
     self._http = http or httplib2.Http()
     if additional_credentials:
       additional_credentials.authorize(self._http)
-    self._service = discovery.build('projecthosting', 'v2')
+    self._service = discovery.build(
+        'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI,
+        http=self._http)
 
   def AddBugComment(self, bug_id, comment, status=None, cc_list=None,
                     merge_issue=None, labels=None, owner=None):
@@ -75,25 +81,53 @@
 
     return self._MakeCommentRequest(bug_id, body)
 
-  def _MakeCommentRequest(self, bug_id, body):
-    """Make a request to the issue tracker to update a bug."""
+  def List(self, **kwargs):
+    """Makes a request to the issue tracker to list bugs."""
+    request = self._service.issues().list(projectId='chromium', **kwargs)
+    return self._ExecuteRequest(request)
+
+  def _MakeCommentRequest(self, bug_id, body, retry=True):
+    """Makes a request to the issue tracker to update a bug.
+
+    Args:
+      bug_id: Bug ID of the issue.
+      body: Dict of comment parameters.
+      retry: True to retry on failure, False otherwise.
+
+    Returns:
+      True if successful posted a comment or issue was deleted.  False if
+      making a comment failed unexpectedly.
+    """
     request = self._service.issues().comments().insert(
         projectId='chromium',
         issueId=bug_id,
+        sendEmail=True,
         body=body)
-    response = self._ExecuteRequest(request)
-    if not response:
-      logging.error('Error updating bug %s with body %s', bug_id, body)
-      return False
-    return True
+    try:
+      if self._ExecuteRequest(request, ignore_error=False):
+        return True
+    except errors.HttpError as e:
+      reason = _GetErrorReason(e)
+      # Retry without owner if we cannot set owner to this issue.
+      if retry and 'The user does not exist' in reason:
+        _RemoveOwnerAndCC(body)
+        return self._MakeCommentRequest(bug_id, body, retry=False)
+      # This error reason is received when issue is deleted.
+      elif 'User is not allowed to view this issue' in reason:
+        logging.warning('Unable to update bug %s with body %s', bug_id, body)
+        return True
+    logging.error('Error updating bug %s with body %s', bug_id, body)
+    return False
 
-  def NewBug(self, title, description, labels=None, owner=None):
+  def NewBug(self, title, description, labels=None, components=None,
+             owner=None):
     """Creates a new bug.
 
     Args:
       title: The short title text of the bug.
       description: The body text for the bug.
       labels: Starting labels for the bug.
+      components: Starting components for the bug.
       owner: Starting owner account name.
 
     Returns:
@@ -104,6 +138,7 @@
         'summary': title,
         'description': description,
         'labels': labels or [],
+        'components': components or [],
         'status': 'Assigned',
     }
     if owner:
@@ -119,14 +154,52 @@
     Returns:
       A bug ID if successful, or None otherwise.
     """
-    request = self._service.issues().insert(projectId='chromium', body=body)
+    request = self._service.issues().insert(
+        projectId='chromium',
+        sendEmail=True,
+        body=body)
     response = self._ExecuteRequest(request)
     if response and 'id' in response:
       return response['id']
     return None
 
-  def _ExecuteRequest(self, request):
-    """Make a request to the issue tracker.
+  def GetLastBugCommentsAndTimestamp(self, bug_id):
+    """Gets last updated comments and timestamp in the given bug.
+
+    Args:
+      bug_id: Bug ID of the issue to update.
+
+    Returns:
+      A dictionary with last comment and timestamp, or None on failure.
+    """
+    if not bug_id or bug_id < 0:
+      return None
+    response = self._MakeGetCommentsRequest(bug_id)
+    if response and all(v in response.keys()
+                        for v in ['totalResults', 'items']):
+      bug_comments = response.get('items')[response.get('totalResults') - 1]
+      if bug_comments.get('content') and bug_comments.get('published'):
+        return {
+            'comment': bug_comments.get('content'),
+            'timestamp': bug_comments.get('published')
+        }
+    return None
+
+  def _MakeGetCommentsRequest(self, bug_id):
+    """Makes a request to the issue tracker to get comments in the bug."""
+    # TODO (prasadv): By default the max number of comments retrieved in
+    # one request is 100. Since bisect-fyi jobs may have more then 100
+    # comments for now we set this maxResults count as 10000.
+    # Remove this max count once we find a way to clear old comments
+    # on FYI issues.
+    request = self._service.issues().comments().list(
+        projectId='chromium',
+        issueId=bug_id,
+        maxResults=10000)
+    return self._ExecuteRequest(request)
+
+  def _ExecuteRequest(self, request, ignore_error=True):
+    """Makes a request to the issue tracker.
 
     Args:
       request: The request object, which has a execute method.
@@ -139,4 +212,23 @@
       return response
     except errors.HttpError as e:
       logging.error(e)
-      return None
+      if ignore_error:
+        return None
+      raise e
+
+
+def _RemoveOwnerAndCC(request_body):
+  if 'updates' not in request_body:
+    return
+  if 'owner' in request_body['updates']:
+    del request_body['updates']['owner']
+  if 'cc' in request_body['updates']:
+    del request_body['updates']['cc']
+
+
+def _GetErrorReason(request_error):
+  if request_error.resp.get('content-type', '').startswith('application/json'):
+    error_json = json.loads(request_error.content).get('error')
+    if error_json:
+      return error_json.get('message')
+  return None
diff --git a/catapult/dashboard/dashboard/issue_tracker_service_test.py b/catapult/dashboard/dashboard/issue_tracker_service_test.py
index 42218ba..0861e33 100644
--- a/catapult/dashboard/dashboard/issue_tracker_service_test.py
+++ b/catapult/dashboard/dashboard/issue_tracker_service_test.py
@@ -2,14 +2,17 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import json
+import mock
 import unittest
 
-import mock
+from apiclient import errors
 
 from dashboard import issue_tracker_service
 from dashboard import testing_common
 
 
+@mock.patch('issue_tracker_service.discovery.build', mock.MagicMock())
 class IssueTrackerServiceTest(testing_common.TestCase):
 
   def testAddBugComment_Basic(self):
@@ -91,10 +94,36 @@
             'summary': 'Bug title',
             'description': 'body',
             'labels': [],
+            'components': [],
             'status': 'Assigned',
             'owner': {'name': 'someone@chromium.org'},
         })
 
+  def testMakeCommentRequest_UserDoesNotExist_RetryMakeCommentRequest(self):
+    service = issue_tracker_service.IssueTrackerService()
+    error_content = {
+        'error': {'message': 'The user does not exist: test@chromium.org',
+                  'code': 404}
+    }
+    service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
+        mock.Mock(return_value={'status': 404}), json.dumps(error_content)))
+    service.AddBugComment(12345, 'The comment', cc_list='test@chromium.org',
+                          owner=['test@chromium.org'])
+    self.assertEqual(2, service._ExecuteRequest.call_count)
+
+  def testMakeCommentRequest_IssueDeleted_ReturnsTrue(self):
+    service = issue_tracker_service.IssueTrackerService()
+    error_content = {
+        'error': {'message': 'User is not allowed to view this issue 12345',
+                  'code': 403}
+    }
+    service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
+        mock.Mock(return_value={'status': 403}), json.dumps(error_content)))
+    comment_posted = service.AddBugComment(12345, 'The comment',
+                                           owner='test@chromium.org')
+    self.assertEqual(1, service._ExecuteRequest.call_count)
+    self.assertEqual(True, comment_posted)
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/catapult/dashboard/dashboard/layered_cache.py b/catapult/dashboard/dashboard/layered_cache.py
index 070839d..2c51264 100644
--- a/catapult/dashboard/dashboard/layered_cache.py
+++ b/catapult/dashboard/dashboard/layered_cache.py
@@ -76,7 +76,7 @@
 
 
 def Prewarm(keys):
-  """Prewarm the NDB in-context cache by doing async_get for the keys.
+  """Prewarms the NDB in-context cache by doing async_get for the keys.
 
   For requests like /add_point which can get/set dozens of keys, contention
   occasionally causes the gets to take several seconds. But they will be
@@ -145,7 +145,7 @@
 def SetExternal(key, value, days_to_keep=None):
   """Sets the value in the datastore for the externally namespaced key.
 
-  Needed for things like /add_point that update internal/exteral data at the
+  Needed for things like /add_point that update internal/external data at the
   same time.
 
   Args:
diff --git a/catapult/dashboard/dashboard/layered_cache_test.py b/catapult/dashboard/dashboard/layered_cache_test.py
index c1d5d0f..833e7d5 100644
--- a/catapult/dashboard/dashboard/layered_cache_test.py
+++ b/catapult/dashboard/dashboard/layered_cache_test.py
@@ -24,10 +24,11 @@
         layered_cache.DeleteExpiredEntitiesHandler)])
     self.testapp = webtest.TestApp(app)
     self.UnsetCurrentUser()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def testSetAndGet(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('str', 'Hello, World!')
     layered_cache.Set('dict', {'hello': [1, 2, 3]})
     self.assertEqual(
@@ -37,9 +38,9 @@
     self.assertIsNone(
         ndb.Key('CachedPickledString', 'externally_visible__str').get())
     self.assertEqual('Hello, World!', layered_cache.Get('str'))
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self.assertIsNone(layered_cache.Get('str'))
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.assertEqual(
         {'hello': [1, 2, 3]},
         cPickle.loads(
@@ -47,7 +48,7 @@
     self.assertIsNone(
         ndb.Key('CachedPickledString', 'externally_visible__dict').get())
     self.assertEqual({'hello': [1, 2, 3]}, layered_cache.Get('dict'))
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self.assertIsNone(layered_cache.Get('dict'))
 
   def testGetAndSet_External(self):
@@ -73,18 +74,18 @@
     self.assertEqual({'hello': [1, 2, 3]}, layered_cache.GetExternal('dict'))
 
   def testDelete(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('hello', 'secret')
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     layered_cache.Set('hello', 'not secret')
     layered_cache.Delete('hello')
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.assertIsNone(layered_cache.Get('hello'))
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self.assertIsNone(layered_cache.Get('hello'))
 
   def testExpireTime(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('str1', 'Hello, World!', days_to_keep=10)
     key_internal = ndb.Key('CachedPickledString', 'internal_only__str1')
     key_external = ndb.Key('CachedPickledString', 'externally_visible__str1')
@@ -98,11 +99,11 @@
     self.assertEqual(actual_date.date(), expected_date.date())
 
     # When current user is external, the external version is returned by Get.
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     self.assertIsNone(layered_cache.Get('str1'))
 
   def testDeleteAllExpiredEntities(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('expired_str1', 'apple', days_to_keep=-10)
     layered_cache.Set('expired_str2', 'bat', days_to_keep=-1)
     layered_cache.Set('expired_str3', 'cat', days_to_keep=10)
@@ -121,7 +122,7 @@
     self.assertEqual('egg', layered_cache.Get('expired_str5'))
 
   def testGet_DeleteExpiredEntities(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('expired_str1', 'apple', days_to_keep=-10)
     layered_cache.Set('expired_str2', 'bat', days_to_keep=-1)
     layered_cache.Set('expired_str3', 'cat', days_to_keep=10)
diff --git a/catapult/dashboard/dashboard/list_tests.py b/catapult/dashboard/dashboard/list_tests.py
index 091ffa9..cf9dc48 100644
--- a/catapult/dashboard/dashboard/list_tests.py
+++ b/catapult/dashboard/dashboard/list_tests.py
@@ -190,8 +190,9 @@
     entry['deprecated'] = True
   return entry
 
+
 def GetTestsMatchingPattern(pattern, only_with_rows=False, list_entities=False):
-  """Given a pattern, get the Test entities or keys which match.
+  """Gets the Test entities or keys which match |pattern|.
 
   For this function, it's assumed that a test path should only have up to seven
   parts. In theory, tests can be arbitrarily nested, but in practice, tests
diff --git a/catapult/dashboard/dashboard/list_tests_test.py b/catapult/dashboard/dashboard/list_tests_test.py
index a7be330..433f062 100644
--- a/catapult/dashboard/dashboard/list_tests_test.py
+++ b/catapult/dashboard/dashboard/list_tests_test.py
@@ -27,7 +27,8 @@
     self.testapp = webtest.TestApp(app)
     datastore_hooks.InstallHooks()
     self.UnsetCurrentUser()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def _AddSampleData(self):
     testing_common.AddTests(
@@ -175,10 +176,9 @@
     }
     self.assertEqual(expected, json.loads(response.body))
 
-
   def testGetSubTests_InternalData_OnlyReturnedForAuthorizedUsers(self):
     # When the user has a an internal account, internal-only data is given.
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self._AddSampleData()
 
     # Set internal_only on a bot and top-level test.
@@ -212,12 +212,12 @@
     self.assertEqual(expected, json.loads(response.body))
 
     # After setting the user to another domain, an empty dict is returned.
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     response = self.testapp.post('/list_tests', {
         'type': 'sub_tests', 'suite': 'dromaeo', 'bots': 'Chromium/win7'})
     self.assertEqual({}, json.loads(response.body))
 
-  def test_MergeSubTestsDict(self):
+  def testMergeSubTestsDict(self):
     a = {
         'foo': {
             'has_rows': True,
@@ -300,7 +300,7 @@
     self.assertEqual(
         expected, list_tests._SubTestsDict(paths, False))
 
-  def test_GetTestsMatchingPattern(self):
+  def testPost_GetTestsMatchingPattern(self):
     """Tests the basic functionality of the GetTestsMatchingPattern function."""
     self._AddSampleData()
 
@@ -329,7 +329,7 @@
         'p': '*/mac/*/*/www*'})
     self.assertEqual(expected, json.loads(response.body))
 
-  def test_GetTestsMatchingPattern_OnlyWithRows(self):
+  def testPost_GetTestsMatchingPattern_OnlyWithRows(self):
     """Tests GetTestsMatchingPattern with the parameter only_with_rows set."""
     self._AddSampleData()
 
diff --git a/catapult/dashboard/dashboard/main.py b/catapult/dashboard/dashboard/main.py
index 39dbd27..459bf92 100644
--- a/catapult/dashboard/dashboard/main.py
+++ b/catapult/dashboard/dashboard/main.py
@@ -2,15 +2,11 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-"""URL endpoint for the main page which lists recent anomalies and bugs."""
+"""URL endpoint for the main page which lists recent anomalies."""
 
 import datetime
-import json
 import logging
-import urllib
 
-from google.appengine.api import urlfetch
-from google.appengine.api import urlfetch_errors
 from google.appengine.ext import ndb
 
 from dashboard import email_template
@@ -30,25 +26,20 @@
   def get(self):
     """Renders the UI for the main overview page.
 
-    The purpose of this page is to show recent regressions and improvements,
-    as well as recently-filed bugs.
-
     Request parameters:
-      days: Number of days to show anomalies and bugs for (optional).
+      days: Number of days to show anomalies for (optional).
       sheriff: Sheriff to show anomalies for (optional)
       num_changes: The number of improvements/regressions to list.
 
     Outputs:
-      A HTML page that shows recent regressions, improvements and bugs.
+      A HTML page that shows recent regressions, improvements.
     """
     days = int(self.request.get('days', _DEFAULT_DAYS_TO_SHOW))
     num_changes = int(self.request.get('num_changes', _DEFAULT_CHANGES_TO_SHOW))
     sheriff_name = self.request.get('sheriff', _DEFAULT_SHERIFF_NAME)
     sheriff = ndb.Key('Sheriff', sheriff_name)
 
-    top_bugs_rpc = _TopBugsUrlFetch(days)
     anomalies = _GetRecentAnomalies(days, sheriff)
-    top_bugs = _GetTopBugsResult(top_bugs_rpc)
 
     top_improvements = _TopImprovements(anomalies, num_changes)
     top_regressions = _TopRegressions(anomalies, num_changes)
@@ -60,7 +51,6 @@
         'sheriff_name': sheriff_name,
         'improvements': _AnomalyInfoDicts(top_improvements, tests),
         'regressions': _AnomalyInfoDicts(top_regressions, tests),
-        'bugs': top_bugs,
     }
     self.RenderHtml('main.html', template_dict)
 
@@ -120,7 +110,7 @@
   """
   anomaly_list = []
   for anomaly_entity in anomalies:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     test = tests.get(anomaly_entity.test)
     if not test:
       logging.warning('No Test entity for key: %s.', anomaly_entity.test)
@@ -170,64 +160,3 @@
   """
   regressions = [a for a in recent_anomalies if not a.is_improvement]
   return regressions[:num_to_show]
-
-
-def _TopBugsUrlFetch(days):
-  """Makes asychronous fetch for top bugs.
-
-  Args:
-    days: Number of days, as an integer.
-
-  Returns:
-    An RPC object of asynchronous request.
-  """
-  query_url = _GetQueryUrl(days)
-  rpc = urlfetch.create_rpc(deadline=5)
-  urlfetch.make_fetch_call(rpc, query_url)
-  return rpc
-
-
-def _GetTopBugsResult(rpc):
-  """Gets a dictionary with recent bug information.
-
-  Args:
-    rpc: RPC object of asynchronous request.
-
-  Returns:
-    A list of dictionaries with information about bugs, or [] if no list
-    could be fetched.
-  """
-  try:
-    response = rpc.get_result()
-    if response.status_code == 200:
-      bugs = json.loads(response.content)
-      if bugs and bugs.get('items'):
-        return bugs['items']
-  except urlfetch_errors.DeadlineExceededError:
-    pass
-  except urlfetch.DownloadError:
-    pass
-  return []
-
-
-def _GetQueryUrl(days):
-  """Returns the URL to query for bugs.
-
-  Args:
-    days: Number of days as an integer.
-
-  Returns:
-    A URL which can be used to request information about recent bugs.
-  """
-  base_url = ('https://www.googleapis.com'
-              '/projecthosting/v2/projects/chromium/issues?')
-  query_string = urllib.urlencode({
-      'q': ('label:Type-Bug-Regression label:Performance '
-            'opened-after:today-%d' % days),
-      'fields': 'items(id,state,status,summary,author)',
-      'maxResults': '1000',
-      'sort': '-id',
-      'can': 'all',
-      'key': 'AIzaSyDrEBALf59D7TkOuz-bBuOnN2OqzD70NCQ',
-  })
-  return base_url + query_string
diff --git a/catapult/dashboard/dashboard/main_test.py b/catapult/dashboard/dashboard/main_test.py
index 948b2fd..6985769 100644
--- a/catapult/dashboard/dashboard/main_test.py
+++ b/catapult/dashboard/dashboard/main_test.py
@@ -4,13 +4,9 @@
 
 import unittest
 
-import mock
 import webapp2
 import webtest
 
-from google.appengine.api import urlfetch
-from google.appengine.api import urlfetch_errors
-
 from dashboard import main
 from dashboard import testing_common
 from dashboard import utils
@@ -24,11 +20,7 @@
     app = webapp2.WSGIApplication([('/', main.MainHandler)])
     self.testapp = webtest.TestApp(app)
 
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(return_value=testing_common.FakeResponseObject(500, '')))
-  def testGet_BugRequestFails_PageIsStillShown(self):
-    # Even if the recent bugs list can't be fetched, the page should load.
+  def testGet_PageIsShown(self):
     response = self.testapp.get('/')
     self.assertIn('<html>', response.body)
 
@@ -40,17 +32,6 @@
     self.assertEqual('over-10', main._GetColorClass(12.0))
     self.assertEqual('under-10', main._GetColorClass(0.1))
 
-  def testGetTopBugsResult_DeadlineExceededError_ReturnsEmptyList(self):
-    mock_rpc = mock.MagicMock()
-    mock_rpc.get_result = mock.MagicMock(
-        side_effect=urlfetch_errors.DeadlineExceededError)
-    self.assertEqual([], main._GetTopBugsResult(mock_rpc))
-
-  def testGetTopBugsResult_DownloadError_ReturnsEmptyList(self):
-    mock_rpc = mock.MagicMock()
-    mock_rpc.get_result = mock.MagicMock(side_effect=urlfetch.DownloadError)
-    self.assertEqual([], main._GetTopBugsResult(mock_rpc))
-
   def testAnomalyInfoDicts(self):
     testing_common.AddTests(['M'], ['b'], {'t': {'foo': {}}})
     foo_key = utils.TestKey('M/b/t/foo')
diff --git a/catapult/dashboard/dashboard/migrate_test_names.py b/catapult/dashboard/dashboard/migrate_test_names.py
index 3266f5c..d8b2e63 100644
--- a/catapult/dashboard/dashboard/migrate_test_names.py
+++ b/catapult/dashboard/dashboard/migrate_test_names.py
@@ -16,7 +16,6 @@
 is split up using the task queue.
 """
 
-import logging
 import re
 
 from google.appengine.api import mail
diff --git a/catapult/dashboard/dashboard/mock_oauth2_decorator.py b/catapult/dashboard/dashboard/mock_oauth2_decorator.py
index b3e1308..32d3ff6 100644
--- a/catapult/dashboard/dashboard/mock_oauth2_decorator.py
+++ b/catapult/dashboard/dashboard/mock_oauth2_decorator.py
@@ -7,17 +7,10 @@
 from apiclient import http
 from dashboard import oauth2_decorator
 
-HTTP_MOCK = http.HttpMock(headers={'status': '200'})
-
 
 class MockOAuth2Decorator(object):
   """Mocks OAuth2Decorator for testing."""
 
-  # This list will be used to keep a copy of the mocked http requests' bodies.
-  # Note that this is a class variable because it may not be easy to get the
-  # particular instance of the decorator used.
-  past_bodies = []
-
   def __init__(self, client_id, client_secret, scope, message, callback_path):
     self.client_id = client_id
     self.client_secret = client_secret
@@ -25,11 +18,12 @@
     self.message = message
     self.callback_path = callback_path
 
+  # Lowercase method names are used in this class to match those
+  # in oauth2client.appengine.Oauth2Decorator.
+  # pylint: disable=invalid-name
+
   def http(self):
-    # The body attribute is set after this is returned, so all we can do here
-    # is to save the previous one before it's overriden.
-    MockOAuth2Decorator.past_bodies.append(HTTP_MOCK.body)
-    return HTTP_MOCK
+    return http.HttpMock(headers={'status': '200'})
 
   def oauth_required(self, method):
     def check_oauth(request_handler, *args, **kwargs):
@@ -38,7 +32,7 @@
     return check_oauth
 
 
-oauth2_decorator.decorator = MockOAuth2Decorator(
+oauth2_decorator.DECORATOR = MockOAuth2Decorator(
     client_id='client_id',
     client_secret='client_secret',
     scope='scope',
diff --git a/catapult/dashboard/dashboard/models/alert.py b/catapult/dashboard/dashboard/models/alert.py
index 128c1e9..66701d5 100644
--- a/catapult/dashboard/dashboard/models/alert.py
+++ b/catapult/dashboard/dashboard/models/alert.py
@@ -6,7 +6,6 @@
 
 from google.appengine.ext import ndb
 
-from dashboard.models import alert_group
 from dashboard.models import internal_only_model
 from dashboard.models import sheriff as sheriff_module
 
@@ -82,10 +81,11 @@
           alert_class.group == group.key).fetch()
       grouped_alerts.append(self)
 
-      # The alert has been assigned a real bug ID. Possibly move it to
-      # another group or update the bug ID of its current group.
-      if self.bug_id > 0:
-        self._UpdateGroupOnBugIdChanged(grouped_alerts)
+      # The alert has been assigned a real bug ID.
+      # Update the group bug ID if necessary.
+      if self.bug_id > 0 and group.bug_id != self.bug_id:
+        group.bug_id = self.bug_id
+        group.put()
 
       # The bug has been marked invalid/ignored. Kick it out of the group.
       elif self.bug_id < 0 and self.bug_id is not None:
@@ -123,40 +123,6 @@
     # Update minimum revision range for group.
     group.UpdateRevisionRange(grouped_alerts)
 
-  def _UpdateGroupOnBugIdChanged(self, grouped_alerts):
-    """Updates group and alert state when an alert's bug ID changes.
-
-    If an AnomalyGroup with the same bug_id is found, move this alert to
-    the new group by updating the properties of the new group and old group.
-
-    Otherwise update this group's bug_id. This requires self.bug_id to be a
-    positive integer.
-
-    Args:
-      grouped_alerts: The list of alerts in |group| used to calculate new
-          revision range; none are modified.
-    """
-    new_group = alert_group.AlertGroup.query(
-        alert_group.AlertGroup.bug_id == self.bug_id).get()
-
-    if new_group:
-      self._RemoveFromGroup(grouped_alerts)
-      if self.start_revision > new_group.start_revision:
-        # TODO(qyearsley): Add test to cover this branch.
-        new_group.start_revision = self.start_revision
-      if self.end_revision < new_group.end_revision:
-        new_group.end_revision = self.end_revision
-      test_suite = _GetTestSuiteFromKey(self.test)
-      if test_suite not in new_group.test_suites:
-        new_group.test_suites.append(test_suite)
-      new_group.put()
-      self.group = new_group.key
-    else:
-      group = self.group.get()
-      if group.bug_id is None or len(grouped_alerts) == 1:
-        group.bug_id = self.bug_id
-        group.put()
-
 
 def _GetTestSuiteFromKey(test_key):
   """Gets test suite from |test_key|, None if not found."""
diff --git a/catapult/dashboard/dashboard/models/alert_group.py b/catapult/dashboard/dashboard/models/alert_group.py
index 0059bf0..88c0197 100644
--- a/catapult/dashboard/dashboard/models/alert_group.py
+++ b/catapult/dashboard/dashboard/models/alert_group.py
@@ -122,7 +122,7 @@
   """Adds an anomaly to group and updates the group's properties."""
   update_group = False
   if alert_entity.start_revision > group.start_revision:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     group.start_revision = alert_entity.start_revision
     update_group = True
   if alert_entity.end_revision < group.end_revision:
@@ -151,13 +151,14 @@
   sheriff = anomaly_entity.test.get().sheriff
   if not sheriff:
     return
-  # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+  # TODO(qyearsley): Add test coverage. See catapult:#1346.
   sheriff = sheriff.string_id()
-  html_str = 'Associated alert on %s with bug <a href="%s">%s</a>.'
   bug_url = ('https://chromeperf.appspot.com/group_report?bug_id=' +
              str(bug_id))
   test_path = utils.TestPath(anomaly_entity.test)
+  html_str = ('Associated alert on %s with bug <a href="%s">%s</a>.' %
+              (test_path, bug_url, bug_id))
   formatter = quick_logger.Formatter()
   logger = quick_logger.QuickLogger('auto_triage', sheriff, formatter)
-  logger.Log(html_str, test_path, bug_url, bug_id)
+  logger.Log(html_str)
   logger.Save()
diff --git a/catapult/dashboard/dashboard/models/anomaly.py b/catapult/dashboard/dashboard/models/anomaly.py
index 65850a6..947bd23 100644
--- a/catapult/dashboard/dashboard/models/anomaly.py
+++ b/catapult/dashboard/dashboard/models/anomaly.py
@@ -85,4 +85,3 @@
     # so when the test improvement direction is |UNKNOWN|, |self.is_improvement|
     # will be False.
     self.is_improvement = (self.direction == test.improvement_direction)
-
diff --git a/catapult/dashboard/dashboard/models/anomaly_config.py b/catapult/dashboard/dashboard/models/anomaly_config.py
index 40f2f89..750550c 100644
--- a/catapult/dashboard/dashboard/models/anomaly_config.py
+++ b/catapult/dashboard/dashboard/models/anomaly_config.py
@@ -36,6 +36,17 @@
   patterns = ndb.StringProperty(repeated=True, indexed=False)
 
 
+def CleanConfigDict(config_dict):
+  """Removes invalid parameters from a config dictionary.
+
+  In the config dict there may be extra "comment" parameters which
+  should be ignored. These are removed so that the parameters can
+  be passed to FindChangePoints using ** notation.
+  """
+  return {key: value for key, value in config_dict.iteritems()
+          if key in _VALID_ANOMALY_CONFIG_PARAMETERS}
+
+
 def GetAnomalyConfigDict(test):
   """Gets the anomaly threshold config for the given test.
 
@@ -55,8 +66,4 @@
     # in the pre-put hook of the Test entity.
     test.put()
     return {}
-  config_dict = anomaly_config.config
-  # In the config dict there may be extra "comment" parameters which
-  # should be ignored.
-  return {key: value for key, value in config_dict.iteritems()
-          if key in _VALID_ANOMALY_CONFIG_PARAMETERS}
+  return CleanConfigDict(anomaly_config.config)
diff --git a/catapult/dashboard/dashboard/models/bug_data.py b/catapult/dashboard/dashboard/models/bug_data.py
index b2b6e9f..6d87fff 100644
--- a/catapult/dashboard/dashboard/models/bug_data.py
+++ b/catapult/dashboard/dashboard/models/bug_data.py
@@ -48,7 +48,7 @@
 
 
 def SetBisectStatus(bug_id, status):
-  """Sets bisect status for bug with bug_id."""
+  """Sets the bisect status for a Bug entity."""
   if bug_id is None or bug_id < 0:
     return
   bug = ndb.Key('Bug', int(bug_id)).get()
diff --git a/catapult/dashboard/dashboard/models/graph_data.py b/catapult/dashboard/dashboard/models/graph_data.py
index e2bf51e..019c3e1 100644
--- a/catapult/dashboard/dashboard/models/graph_data.py
+++ b/catapult/dashboard/dashboard/models/graph_data.py
@@ -168,8 +168,12 @@
   # Command to run the test. Optional.
   command_line = ndb.StringProperty(indexed=False)
 
+  # Computed properties are treated like member variables, so they have
+  # lowercase names, even though they look like methods to pylint.
+  # pylint: disable=invalid-name
+
   @ndb.ComputedProperty
-  def bot(self):
+  def bot(self):  # pylint: disable=invalid-name
     """Immediate parent Bot entity, or None if this is not a test suite."""
     parent = self.key.parent()
     if parent.kind() == 'Bot':
@@ -177,7 +181,7 @@
     return None
 
   @ndb.ComputedProperty
-  def parent_test(self):
+  def parent_test(self):  # pylint: disable=invalid-name
     """Immediate parent Test entity, or None if this is a test suite."""
     parent = self.key.parent()
     if parent.kind() == 'Test':
@@ -336,7 +340,7 @@
 
   # The parent_test is the key of the Test entity that this Row belongs to.
   @ndb.ComputedProperty
-  def parent_test(self):
+  def parent_test(self):  # pylint: disable=invalid-name
     # The Test entity that a Row belongs to isn't actually its parent in the
     # datastore. Rather, the parent key of each Row contains a test path, which
     # contains the information necessary to get the actual Test key.
@@ -346,7 +350,7 @@
   # SVN version number, but it might also be any other integer, as long as
   # newer points have higher numbers.
   @ndb.ComputedProperty
-  def revision(self):
+  def revision(self):  # pylint: disable=invalid-name
     return self.key.integer_id()
 
   # The time the revision was added to the dashboard is tracked in order
diff --git a/catapult/dashboard/dashboard/models/internal_only_model_test.py b/catapult/dashboard/dashboard/models/internal_only_model_test.py
index 21a2343..c7e106c 100644
--- a/catapult/dashboard/dashboard/models/internal_only_model_test.py
+++ b/catapult/dashboard/dashboard/models/internal_only_model_test.py
@@ -31,7 +31,8 @@
 
   def setUp(self):
     super(InternalOnlyModelTest, self).setUp()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('x@google.com', True)
+    testing_common.SetIsInternalUser('x@foo.com', False)
 
   def testInternalOnlyModel_InternalUser_EntityFetched(self):
     key = InternalOnlyModelExample(internal_only=True).put()
diff --git a/catapult/dashboard/dashboard/models/stoppage_alert.py b/catapult/dashboard/dashboard/models/stoppage_alert.py
index 68476ef..66f8fee 100644
--- a/catapult/dashboard/dashboard/models/stoppage_alert.py
+++ b/catapult/dashboard/dashboard/models/stoppage_alert.py
@@ -33,6 +33,10 @@
   # Whether new points have been received for the test after this alert.
   recovered = ndb.BooleanProperty(indexed=True, default=False)
 
+  # Computed properties are treated like member variables, so they have
+  # lowercase names, even though they look like methods to pylint.
+  # pylint: disable=invalid-name
+
   @ndb.ComputedProperty
   def revision(self):
     return self.key.id()
@@ -102,4 +106,3 @@
   test.stoppage_alert = new_alert.key
   test.put()
   return new_alert
-
diff --git a/catapult/dashboard/dashboard/models/try_job.py b/catapult/dashboard/dashboard/models/try_job.py
index eb832c5..496666f 100644
--- a/catapult/dashboard/dashboard/models/try_job.py
+++ b/catapult/dashboard/dashboard/models/try_job.py
@@ -35,8 +35,15 @@
 
   # Bisect run status (e.g., started, failed).
   status = ndb.StringProperty(
-      default=None,
-      choices=['started', 'failed'],
+      default='pending',
+      choices=[
+          'pending',  # Created, but job start has not been confirmed.
+          'started',  # Job is confirmed started.
+          'failed',   # Job terminated, red build.
+          'staled',   # No updates from bots.
+          'completed',  # Job terminated, green build.
+          'aborted',  # Job terminated with abort (purple, early abort).
+      ],
       indexed=True)
 
   # Number of times this job has been tried.
@@ -52,6 +59,14 @@
   # job_name attribute is used by try jobs of bisect FYI.
   job_name = ndb.StringProperty(default=None)
 
+  # Results data coming from bisect bots.
+  results_data = ndb.JsonProperty(indexed=False)
+
+  log_record_id = ndb.StringProperty(indexed=False)
+
+  # Sets of emails of users who has confirmed this TryJob result is bad.
+  bad_result_emails = ndb.PickleProperty()
+
   def SetStarted(self):
     self.status = 'started'
     self.run_count += 1
@@ -67,8 +82,17 @@
       bug_data.SetBisectStatus(self.bug_id, 'failed')
     bisect_stats.UpdateBisectStats(self.bot, 'failed')
 
+  def SetStaled(self):
+    self.status = 'staled'
+    self.put()
+    # TODO(chrisphan): Add 'staled' state to bug_data and bisect_stats.
+    if self.bug_id:
+      bug_data.SetBisectStatus(self.bug_id, 'failed')
+    bisect_stats.UpdateBisectStats(self.bot, 'failed')
+
   def SetCompleted(self):
-    self.key.delete()
+    self.status = 'completed'
+    self.put()
     if self.bug_id:
       bug_data.SetBisectStatus(self.bug_id, 'completed')
     bisect_stats.UpdateBisectStats(self.bot, 'completed')
diff --git a/catapult/dashboard/dashboard/mr.py b/catapult/dashboard/dashboard/mr.py
index 4650b42..9d38362 100644
--- a/catapult/dashboard/dashboard/mr.py
+++ b/catapult/dashboard/dashboard/mr.py
@@ -25,6 +25,7 @@
 
 from google.appengine.ext import ndb
 
+from dashboard import datastore_hooks
 from dashboard import layered_cache
 from dashboard import request_handler
 from dashboard.models import graph_data
@@ -57,7 +58,7 @@
   """Handler to run a deprecate tests mapper job."""
 
   def get(self):
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     name = 'Update test deprecation status.'
     handler = ('dashboard.mr.DeprecateTestsMapper')
     reader = 'mapreduce.input_readers.DatastoreInputReader'
@@ -87,18 +88,19 @@
   """
   # Make sure that we have a non-deprecated Test with Rows.
   if entity.key.kind() != 'Test' or not entity.has_rows or entity.deprecated:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     logging.error(
         'Got bad entity in mapreduce! Kind: %s, has_rows: %s, deprecated: %s',
         entity.key.kind(), entity.has_rows, entity.deprecated)
     return
 
   # Fetch the last row.
+  datastore_hooks.SetPrivilegedRequest()
   query = graph_data.Row.query(graph_data.Row.parent_test == entity.key)
   query = query.order(-graph_data.Row.timestamp)
   last_row = query.get()
   if not last_row:
-    # TODO(qyearsley): Add test coverage. See http://crbug.com/
+    # TODO(qyearsley): Add test coverage. See catapult:#1346.
     logging.error('No rows for %s (but has_rows=True)', entity.key)
     return
 
diff --git a/catapult/dashboard/dashboard/mr_test.py b/catapult/dashboard/dashboard/mr_test.py
index cf04cc4..ababfda 100644
--- a/catapult/dashboard/dashboard/mr_test.py
+++ b/catapult/dashboard/dashboard/mr_test.py
@@ -4,6 +4,8 @@
 
 import datetime
 import unittest
+import webapp2
+import webtest
 
 from mapreduce import operation as op
 
@@ -29,6 +31,13 @@
 
 class MrTest(testing_common.TestCase):
 
+  def setUp(self):
+    super(MrTest, self).setUp()
+    app = webapp2.WSGIApplication([])
+    self.testapp = webtest.TestApp(app)
+    self.SetCurrentUser('foo@bar.com', is_admin=True)
+    self.PatchDatastoreHooksRequest()
+
   def _ExecOperation(self, operation):
     """Helper method to run a datastore mutation operation.
 
diff --git a/catapult/dashboard/dashboard/namespaced_stored_object_test.py b/catapult/dashboard/dashboard/namespaced_stored_object_test.py
index 601fa28..eb0d6c5 100644
--- a/catapult/dashboard/dashboard/namespaced_stored_object_test.py
+++ b/catapult/dashboard/dashboard/namespaced_stored_object_test.py
@@ -13,26 +13,27 @@
 
   def setUp(self):
     super(NamespacedStoredObjectTest, self).setUp()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def tearDown(self):
     super(NamespacedStoredObjectTest, self).tearDown()
     self.UnsetCurrentUser()
 
   def testSet_InternalUser_InternalVersionSet(self):
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     namespaced_stored_object.Set('foo', 12345)
     self.assertEqual(12345, stored_object.Get('internal_only__foo'))
     self.assertIsNone(stored_object.Get('externally_visible__foo'))
 
   def testSet_ExternalUser_ExternalVersionSet(self):
-    self.SetCurrentUser('x@external.com')
+    self.SetCurrentUser('foo@chromium.org')
     namespaced_stored_object.Set('foo', 12345)
     self.assertIsNone(stored_object.Get('internal_only__foo'))
     self.assertEqual(12345, stored_object.Get('externally_visible__foo'))
 
   def testSetExternal_InternalUser_ExternalVersionSet(self):
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     namespaced_stored_object.SetExternal('foo', 12345)
     self.assertIsNone(stored_object.Get('internal_only__foo'))
     self.assertEqual(12345, stored_object.Get('externally_visible__foo'))
@@ -41,19 +42,19 @@
     self.assertIsNone(namespaced_stored_object.Get('foo'))
 
   def testGet_InternalUser_InternalVersionReturned(self):
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     stored_object.Set('internal_only__foo', [1, 2, 3])
     stored_object.Set('externally_visible__foo', [4, 5, 6])
     self.assertEqual([1, 2, 3], namespaced_stored_object.Get('foo'))
 
   def testGet_ExternalUser_ExternalVersionReturned(self):
-    self.SetCurrentUser('x@external.com')
+    self.SetCurrentUser('foo@chromium.org')
     stored_object.Set('internal_only__foo', [1, 2, 3])
     stored_object.Set('externally_visible__foo', [4, 5, 6])
     self.assertEqual([4, 5, 6], namespaced_stored_object.Get('foo'))
 
   def testGetExternal_InternalUser_ExternalVersionReturned(self):
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     stored_object.Set('internal_only__foo', [1, 2, 3])
     stored_object.Set('externally_visible__foo', [4, 5, 6])
     self.assertEqual([4, 5, 6], namespaced_stored_object.GetExternal('foo'))
diff --git a/catapult/dashboard/dashboard/navbar.py b/catapult/dashboard/dashboard/navbar.py
new file mode 100644
index 0000000..d1a9e79
--- /dev/null
+++ b/catapult/dashboard/dashboard/navbar.py
@@ -0,0 +1,22 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""XHR endpoint to fill in navbar fields."""
+
+import json
+
+from dashboard import request_handler
+
+
+class NavbarHandler(request_handler.RequestHandler):
+  """XHR endpoint to fill in navbar fields."""
+
+  def post(self):
+    template_values = {}
+    self.GetDynamicVariables(template_values, self.request.get('path'))
+    self.response.out.write(json.dumps({
+        'login_url': template_values['login_url'],
+        'is_admin': template_values['is_admin'],
+        'display_username': template_values['display_username'],
+    }))
diff --git a/catapult/dashboard/dashboard/new_points_test.py b/catapult/dashboard/dashboard/new_points_test.py
index 0d8dee8..752da76 100644
--- a/catapult/dashboard/dashboard/new_points_test.py
+++ b/catapult/dashboard/dashboard/new_points_test.py
@@ -23,8 +23,9 @@
     app = webapp2.WSGIApplication(
         [('/new_points', new_points.NewPointsHandler)])
     self.testapp = webtest.TestApp(app)
-    self.SetCurrentUser('foo@bar.com', is_admin=True)
-    testing_common.SetInternalDomain('google.com')
+    self.SetCurrentUser('foo@chromium.org', is_admin=True)
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def _AddSampleData(self):
     """Adds some normal test data from two different tests."""
@@ -94,7 +95,7 @@
     self._AddInternalSampleData()
     # The user doesn't need to be authorized as admin to view internal data,
     # they only need to have an internal email address.
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     datastore_hooks.InstallHooks()
     response = self.testapp.get('/new_points')
     # 50 rows for xtest, 1 for the header.
diff --git a/catapult/dashboard/dashboard/oauth2_decorator.py b/catapult/dashboard/dashboard/oauth2_decorator.py
index 81bffbd..d53cf88 100644
--- a/catapult/dashboard/dashboard/oauth2_decorator.py
+++ b/catapult/dashboard/dashboard/oauth2_decorator.py
@@ -6,10 +6,11 @@
 
 from oauth2client.appengine import OAuth2Decorator
 
-decorator = OAuth2Decorator(
+from dashboard import utils
+
+DECORATOR = OAuth2Decorator(
     client_id='425761728072.apps.googleusercontent.com',
     client_secret='9g-XlmEFW8ROI01YY6nrQVKq',
-    scope=['https://www.googleapis.com/auth/projecthosting',
-           'https://www.googleapis.com/auth/userinfo.email'],
+    scope=utils.EMAIL_SCOPE,
     message='Oauth error occurred!',
     callback_path='/oauth2callback')
diff --git a/catapult/dashboard/dashboard/post_bisect_results.py b/catapult/dashboard/dashboard/post_bisect_results.py
new file mode 100644
index 0000000..81bad97
--- /dev/null
+++ b/catapult/dashboard/dashboard/post_bisect_results.py
@@ -0,0 +1,111 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""URL endpoint to allow bisect bots to post results to the dashboard."""
+
+import json
+import logging
+
+from google.appengine.api import app_identity
+from google.appengine.ext import ndb
+
+from dashboard import datastore_hooks
+from dashboard import post_data_handler
+from dashboard import rietveld_service
+from dashboard import update_bug_with_results
+from dashboard import utils
+from dashboard.models import try_job
+
+_EXPECTED_RESULT_PROPERTIES = {
+    'status': ['pending', 'started', 'completed', 'failed', 'aborted'],
+}
+
+
+class BadRequestError(Exception):
+  """An error indicating that a 400 response status should be returned."""
+  pass
+
+
+class PostBisectResultsHandler(post_data_handler.PostDataHandler):
+
+  def post(self):
+    """Validates data parameter and saves to TryJob entity.
+
+    Bisect results come from a "data" parameter, which is a JSON encoding of a
+    dictionary.
+
+    The required fields are "master", "bot", "test".
+
+    Request parameters:
+      data: JSON encoding of a dictionary.
+
+    Outputs:
+      Empty 200 response with if successful,
+      200 response with warning message if optional data is invalid,
+      403 response with error message if sender IP is not white-listed,
+      400 response with error message if required data is invalid.
+      500 with error message otherwise.
+    """
+    datastore_hooks.SetPrivilegedRequest()
+    if not self._CheckIpAgainstWhitelist():
+      return
+
+    data = self.request.get('data')
+    if not data:
+      self.ReportError('Missing "data" parameter.', status=400)
+      return
+
+    try:
+      data = json.loads(self.request.get('data'))
+    except ValueError:
+      self.ReportError('Invalid JSON string.', status=400)
+      return
+
+    logging.info('Received data: %s', data)
+
+    try:
+      _ValidateResultsData(data)
+      job = _GetTryJob(data)
+      if not job:
+        self.ReportWarning('No try job found.')
+        return
+      _UpdateTryJob(job, data)
+      update_bug_with_results.UpdateQuickLog(job)
+    except BadRequestError as error:
+      self.ReportError(error.message, status=400)
+
+
+def _ValidateResultsData(results_data):
+  utils.Validate(_EXPECTED_RESULT_PROPERTIES, results_data)
+  # TODO(chrisphan): Validate other values.
+
+
+def _UpdateTryJob(job, results_data):
+  if not job.results_data:
+    job.results_data = {}
+  job.results_data.update(results_data)
+  job.results_data['issue_url'] = (job.results_data.get('issue_url') or
+                                   _IssueURL(job))
+  job.put()
+
+
+def _GetTryJob(results_data):
+  try_job_id = results_data.get('try_job_id')
+  if not try_job_id:
+    return None
+  job = ndb.Key(try_job.TryJob, try_job_id).get()
+  return job
+
+
+def _IssueURL(job):
+  """Returns a URL for information about a bisect try job."""
+  if job.use_buildbucket:
+    hostname = app_identity.get_default_version_hostname()
+    job_id = job.buildbucket_job_id
+    return 'https://%s/buildbucket_job_status/%s' % (hostname, job_id)
+  else:
+    config = rietveld_service.GetDefaultRietveldConfig()
+    host = (config.internal_server_url if job.internal_only else
+            config.server_url)
+    return '%s/%d' % (host, job.rietveld_issue_id)
diff --git a/catapult/dashboard/dashboard/post_bisect_results_test.py b/catapult/dashboard/dashboard/post_bisect_results_test.py
new file mode 100644
index 0000000..606d836
--- /dev/null
+++ b/catapult/dashboard/dashboard/post_bisect_results_test.py
@@ -0,0 +1,98 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+import webapp2
+import webtest
+
+from dashboard import post_bisect_results
+from dashboard import rietveld_service
+from dashboard import testing_common
+from dashboard.models import try_job
+
+_SAMPLE_BISECT_RESULTS_JSON = {
+    'try_job_id': 6789,
+    'bug_id': 4567,
+    'status': 'completed',
+    'bisect_bot': 'linux',
+    'buildbot_log_url': '',
+    'command': ('tools/perf/run_benchmark -v '
+                '--browser=release page_cycler.intl_ar_fa_he'),
+    'metric': 'warm_times/page_load_time',
+    'change': '',
+    'score': 99.9,
+    'good_revision': '306475',
+    'bad_revision': '306478',
+    'warnings': None,
+    'aborted_reason': None,
+    'culprit_data': {
+        'subject': 'subject',
+        'author': 'author',
+        'email': 'author@email.com',
+        'cl_date': '1/2/2015',
+        'commit_info': 'commit_info',
+        'revisions_links': [],
+        'cl': '1235'
+    },
+    'revision_data': [
+        {
+            'revision_string': 'chromium@1234',
+            'commit_hash': '1234123412341234123412341234123412341234',
+            'depot_name': 'chromium',
+            'mean_value': 70,
+            'std_dev': 0,
+            'values': [70, 70, 70],
+            'result': 'good'
+        }, {
+            'revision_string': 'chromium@1235',
+            'depot_name': 'chromium',
+            'commit_hash': '1235123512351235123512351235123512351235',
+            'mean_value': 80,
+            'std_dev': 0,
+            'values': [80, 80, 80],
+            'result': 'bad'
+        }
+    ]
+}
+
+# Sample IP addresses to use in the tests below.
+_WHITELISTED_IP = '123.45.67.89'
+
+
+class PostBisectResultsTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(PostBisectResultsTest, self).setUp()
+    app = webapp2.WSGIApplication([
+        ('/post_bisect_results',
+         post_bisect_results.PostBisectResultsHandler)])
+    self.testapp = webtest.TestApp(app)
+    testing_common.SetIpWhitelist([_WHITELISTED_IP])
+    self._AddRietveldConfig()
+
+  def _AddRietveldConfig(self):
+    """Adds a RietveldConfig entity to the datastore.
+
+    This is used in order to get the Rietveld URL when requests are made to the
+    handler in te tests below. In the real datastore, the RietveldConfig entity
+    would contain credentials.
+    """
+    rietveld_service.RietveldConfig(
+        id='default_rietveld_config',
+        client_email='sullivan@email.com',
+        service_account_key='Fake Account Key',
+        server_url='https://test-rietveld.appspot.com',
+        internal_server_url='https://test-rietveld.appspot.com').put()
+
+  def testPost(self):
+    job_key = try_job.TryJob(id=6789, rietveld_issue_id=200034).put()
+    data_param = json.dumps(_SAMPLE_BISECT_RESULTS_JSON)
+    self.testapp.post(
+        '/post_bisect_results', {'data': data_param},
+        extra_environ={'REMOTE_ADDR': _WHITELISTED_IP})
+
+    job = job_key.get()
+    self.assertEqual(6789, job.results_data['try_job_id'])
+    self.assertEqual('completed', job.results_data['status'])
diff --git a/catapult/dashboard/dashboard/quick_logger.py b/catapult/dashboard/dashboard/quick_logger.py
index ebe4e3e..d30eda2 100644
--- a/catapult/dashboard/dashboard/quick_logger.py
+++ b/catapult/dashboard/dashboard/quick_logger.py
@@ -17,12 +17,13 @@
   logger.Save()
 """
 
+import uuid
+
 import collections
 import cPickle as pickle
 import logging
 import time
 
-from google.appengine.api import datastore_errors
 from google.appengine.ext import ndb
 
 # Maximum number of QuickLogPart entities to hold a log.
@@ -34,24 +35,17 @@
 _MAX_MSG_SIZE = 12288  # 12KB
 
 
-def Get(namespace, key, no_wait=True):
+def Get(namespace, key):
   """Gets list of Record from the datastore.
 
   Args:
     namespace: The namespace for key.
     key: The key name.
-    no_wait: True to get results without waiting for datastore to apply
-             pending changes, False otherwise.
 
   Returns:
     List of Record, None if key does not exist in datastore.
   """
-  namespaced_key = '%s__%s' % (namespace, key)
-  key = ndb.Key('QuickLog', namespaced_key)
-  if no_wait:
-    quick_log = key.get(read_policy=ndb.EVENTUAL_CONSISTENCY)
-  else:
-    quick_log = key.get()
+  quick_log = _GetQuickLog(namespace, key)
   if quick_log:
     return quick_log.GetRecords()
   return None
@@ -68,32 +62,25 @@
   ndb.Key('QuickLog', namespaced_key).delete()
 
 
-def _Set(namespace, key, records):
-  """Sets list of Record in the datastore.
-
-  Args:
-    namespace: A string namespace for the key.
-    key: A string key name which will be namespaced for QuickLog entity key.
-    records: List of Record entities.
-  """
+def _GetQuickLog(namespace, key):
+  """Gets QuickLog entity from a namespace and a key."""
   namespaced_key = '%s__%s' % (namespace, key)
-  try:
-    log = QuickLog(id=namespaced_key, namespace=namespace)
-    log.SetRecords(namespaced_key, records)
-  except datastore_errors.BadRequestError as e:
-    logging.warning('BadRequestError for namespaced key %s: %s',
-                    namespaced_key, e)
+  key = ndb.Key('QuickLog', namespaced_key)
+  return key.get()
+
+
+def _CreateQuickLog(namespace, key):
+  """Creates an empty QuickLog entity."""
+  namespaced_key = '%s__%s' % (namespace, key)
+  log = QuickLog(id=namespaced_key)
+  log.put()
+  return log
 
 
 class QuickLog(ndb.Model):
   """Represents a log entity."""
 
-  # A pickled list of Record. (Deprecated)
-  # TODO(chrisphan): Remove this in the future.  Old version of quick_logger
-  # storing logs in this property and we don't want to delete them yet.
-  records = ndb.PickleProperty()
-
-  # Namespace for identifying logs.
+  # Used for querying existing logs.
   namespace = ndb.StringProperty(indexed=True)
 
   # The time log was first created.
@@ -103,15 +90,6 @@
   size = ndb.IntegerProperty(default=0)
 
   def GetRecords(self):
-    """Gets records for this log."""
-    # Move old data from old version to use multi-entities storage.
-    if self.records:
-      records_copy = self.records
-      self.records = None
-      self.SetRecords(self.key.string_id(), records_copy)
-    return self.GetMultiEntityRecords()
-
-  def GetMultiEntityRecords(self):
     """Gets records store in multiple entities.
 
     Combines and deserializes the data stored in QuickLogPart for this log.
@@ -126,7 +104,7 @@
       return None
 
     string_id = self.key.string_id()
-    log_part_keys = [ndb.Key('QuickLog', string_id, 'QuickLogPart', i+1)
+    log_part_keys = [ndb.Key('QuickLog', string_id, 'QuickLogPart', i + 1)
                      for i in xrange(self.size)]
     log_parts = ndb.get_multi(log_part_keys)
     serialized = ''.join(l.value for l in log_parts if l is not None)
@@ -136,13 +114,12 @@
       logging.error('Failed to load QuickLog "%s".', string_id)
     return None
 
-  def SetRecords(self, key, records):
+  def SetRecords(self, records):
     """Sets records for this log and put into datastore.
 
     Serializes records and save over multiple entities if necessary.
 
     Args:
-      key: String key name of a QuickLog entity.
       records: List of Record object.
     """
     # Number of bytes less than 1MB for ndb.BlobProperty.
@@ -155,11 +132,10 @@
 
     log_parts = []
     for i in xrange(0, length, chunk_size):
-      # +1 to start entitiy key at 1.
+      # +1 to start entity key at 1.
       part_id = i // chunk_size + 1
-      part_value = serialized[i:i+chunk_size]
-      parent_key = ndb.Key('QuickLog', key)
-      log_part = QuickLogPart(id=part_id, parent=parent_key, value=part_value)
+      part_value = serialized[i:i + chunk_size]
+      log_part = QuickLogPart(id=part_id, parent=self.key, value=part_value)
       log_parts.append(log_part)
 
     self.size = len(log_parts)
@@ -197,22 +173,32 @@
       self._template = '{asctime} {message}'
 
   def Format(self, record):
-    """Format the record."""
+    """Formats a record."""
     self._kwargs['message'] = record.message
     if '{asctime}' in self._template:
-      lt = time.localtime(record.index)
+      # Support backward compatibility.
+      timestamp = getattr(record, 'timestamp', record.index)
+      lt = time.localtime(timestamp)
       self._kwargs['asctime'] = time.strftime(self._datefmt, lt)
     record.message = self._template.format(*self._args, **self._kwargs)
 
 
 # Not subclassing object (aka old-style class) reduces the serialization size.
-class Record:
-  """Class to hold a log."""
+class Record:  # pylint: disable=old-style-class, invalid-name
+  """Class to hold a log.
 
-  def __init__(self, message):
+  Properties:
+    message: A string.
+    id: A string ID.
+    timestamp: Seconds since the epoch which represents time when record was
+        created.
+  """
+
+  def __init__(self, message, record_id):
     self.message = message
-    self.index = time.time()
-
+    self.id = record_id
+    self.timestamp = time.time()
+    self.index = None  # Deprecated.  Remove this when we migrate old Records.
 
 class QuickLogger(object):
   """Logger class."""
@@ -225,30 +211,36 @@
       name: Name of logger.
       formatter: Formatter object to format logs.
     """
-    self._namespace = namespace
-    self._name = name
     self._formatter = formatter
     self._records = collections.deque(maxlen=_MAX_NUM_RECORD)
+    self._record_count = 0
+    self._log = _GetQuickLog(namespace, name)
+    if not self._log:
+      self._log = _CreateQuickLog(namespace, name)
+    self._unique_id = uuid.uuid1().hex
 
-  def Log(self, message, *args):
-    """Add a message with 'message % args'.
 
-    Must call Save() to save to datastore.
+  def Log(self, message, record_id=None):
+    """Adds or updates a log record.
+
+    After this is called, Save() must be called to save to datastore.
 
     Args:
       message: String message.
-      *args: Replacement field for positional argument.
+      record_id: ID of the record to update; if None, add a new record.
+
+    Returns:
+      The ID of updated or created Record.
     """
     message = str(message)
-    if args:
-      message %= args
-    record = Record(message)
+    record = self._CreateRecord(message, record_id)
     if self._formatter:
       self._formatter.Format(record)
     if len(record.message) > _MAX_MSG_SIZE:
       logging.error('Message must be less than (%s)', _MAX_MSG_SIZE)
       return
     self._records.appendleft(record)
+    return record.id
 
   @ndb.transactional
   def Save(self):
@@ -260,8 +252,34 @@
     if not self._records:
       return
     records = list(self._records)
-    stored_records = Get(self._namespace, self._name, no_wait=False)
-    if stored_records:
-      records.extend(stored_records)
-    _Set(self._namespace, self._name, records[0:_MAX_NUM_RECORD])
+    stored_records = self._log.GetRecords()
+    self._MergeRecords(records, stored_records)
+    self._log.SetRecords(records[0:_MAX_NUM_RECORD])
     self._records.clear()
+
+  def _CreateRecord(self, message, record_id=None):
+    if not record_id:
+      return Record(message, self._CreateRecordId())
+
+    for record in list(self._records):
+      if getattr(record, 'id', None) == record_id:
+        self._records.remove(record)
+        return Record(message, record_id)
+    # If index provided doesn't exist, we'll create a log with this index.
+    return Record(message, record_id)
+
+  def _CreateRecordId(self):
+    """Creates an ID for a Record.
+
+    A record's ID is the current record count namespaced by self._unique_id.
+    """
+    self._record_count += 1
+    return '%s_%s' % (self._unique_id, self._record_count)
+
+  def _MergeRecords(self, records, stored_records):
+    """Updates |records| with stored records if id does not already exist."""
+    if not stored_records:
+      return
+    new_ids = {r.id for r in records}
+    records.extend(r for r in stored_records
+                   if getattr(r, 'id', None) not in new_ids)
diff --git a/catapult/dashboard/dashboard/quick_logger_test.py b/catapult/dashboard/dashboard/quick_logger_test.py
index 5d5f44f..d1d2834 100644
--- a/catapult/dashboard/dashboard/quick_logger_test.py
+++ b/catapult/dashboard/dashboard/quick_logger_test.py
@@ -14,7 +14,7 @@
     template = '{message}{extra}'
     formatter = quick_logger.Formatter(template, extra='!')
     logger = quick_logger.QuickLogger('a_namespace', 'a_log_name', formatter)
-    logger.Log('Hello %s', 'world')
+    logger.Log('Hello world')
     logger.Save()
     logs = quick_logger.Get('a_namespace', 'a_log_name')
     self.assertEqual(len(logs), 1)
@@ -23,7 +23,7 @@
   def testQuickLogger_LogSizeAndNumberAtSizeLimit(self):
     logger = quick_logger.QuickLogger('a_namespace', 'a_log_name')
     for i in xrange(quick_logger._MAX_NUM_RECORD):
-      logger.Log(str(i%2) * quick_logger._MAX_MSG_SIZE)
+      logger.Log(str(i % 2) * quick_logger._MAX_MSG_SIZE)
     logger.Save()
     logs = quick_logger.Get('a_namespace', 'a_log_name')
     self.assertEqual(len(logs), quick_logger._MAX_NUM_RECORD)
@@ -38,6 +38,30 @@
     # First record is the last log added.
     self.assertEqual(logs[0].message, str(quick_logger._MAX_NUM_RECORD + 9))
 
+  def testQuickLogger_LoggingWithId_UpdatesExistingLog(self):
+    logger = quick_logger.QuickLogger('a_namespace', 'a_log_name')
+    first_id = logger.Log('First message.')
+    logger.Log('Second message.')
+    logger.Log('Third message.')
+    logger.Save()
+
+    logger = quick_logger.QuickLogger('a_namespace', 'a_log_name')
+    logger.Log('Updated first message.', first_id)
+    logger.Save()
+
+    logs = quick_logger.Get('a_namespace', 'a_log_name')
+    self.assertEqual(3, len(logs))
+    self.assertEqual('Updated first message.', logs[0].message)
+    self.assertEqual('Third message.', logs[1].message)
+    self.assertEqual('Second message.', logs[2].message)
+
+  def testQuickLogger_MultipleLogs_RecordIsUnique(self):
+    first_logger = quick_logger.QuickLogger('a_namespace', 'a_log_name')
+    second_logger = quick_logger.QuickLogger('a_namespace', 'a_log_name')
+    first_id = first_logger.Log('First message.')
+    second_id = second_logger.Log('Second message.')
+    self.assertNotEqual(first_id, second_id)
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/catapult/dashboard/dashboard/report.py b/catapult/dashboard/dashboard/report.py
index b80dd91..820df3c 100644
--- a/catapult/dashboard/dashboard/report.py
+++ b/catapult/dashboard/dashboard/report.py
@@ -5,7 +5,6 @@
 """Provides the web interface for reporting a graph of traces."""
 
 import json
-import os
 
 from google.appengine.ext import ndb
 
@@ -20,20 +19,26 @@
   """URL endpoint for /report page."""
 
   def get(self):
-    """Renders the UI for selecting graphs."""
-
+    """Renders the static UI for selecting graphs."""
     query_string = self._GetQueryStringForOldUri()
     if query_string:
       self.redirect('/report?' + query_string)
       return
+    self.RenderStaticHtml('report.html')
 
-    dev_version = ('Development' in os.environ['SERVER_SOFTWARE'] or
-                   self.request.host == 'chrome-perf.googleplex.com')
-
-    self.RenderHtml('report.html', {
-        'dev_version': dev_version,
-        'test_suites': json.dumps(update_test_suites.FetchCachedTestSuites()),
-    })
+  def post(self):
+    """Gets dynamic data for selecting graphs"""
+    values = {}
+    self.GetDynamicVariables(values)
+    self.response.out.write(json.dumps({
+        'is_internal_user': values['is_internal_user'],
+        'login_url': values['login_url'],
+        'revision_info': values['revision_info'],
+        'warning_bug': values['warning_bug'],
+        'warning_message': values['warning_message'],
+        'xsrf_token': values['xsrf_token'],
+        'test_suites': update_test_suites.FetchCachedTestSuites(),
+    }))
 
   def _GetQueryStringForOldUri(self):
     """Gets a new query string if old URI parameters are present.
@@ -107,11 +112,11 @@
       for test in tests:
         test_parts = test.split('/')
         if len(test_parts) == 1:
-          first_test = _GetFirstTest(test, master + '/' + bot)
-          if first_test:
-            test += '/' + first_test
+          first_test_parts = _GetFirstTest(test, master + '/' + bot)
+          if first_test_parts:
+            test += '/' + '/'.join(first_test_parts)
             if not selected_series:
-              selected_series.append(first_test)
+              selected_series.append(first_test_parts[-1])
         test_paths.append(master + '/' + bot + '/' + test)
 
   chart_states = []
@@ -131,7 +136,8 @@
     bot_path: Master and bot name separated by a slash.
 
   Returns:
-    The first test that has rows, otherwise returns None.
+    A list of test path parts of the first test that has rows, otherwise
+    returns None.
   """
   sub_test_tree = list_tests.GetSubTests(test_suite, [bot_path])
   test_parts = []
@@ -139,6 +145,6 @@
     first_test = sorted(sub_test_tree.keys())[0]
     test_parts.append(first_test)
     if sub_test_tree[first_test]['has_rows']:
-      return '/'.join(test_parts)
+      return test_parts
     sub_test_tree = sub_test_tree[first_test]['sub_tests']
   return None
diff --git a/catapult/dashboard/dashboard/report_test.py b/catapult/dashboard/dashboard/report_test.py
index 1ae856e..db74861 100644
--- a/catapult/dashboard/dashboard/report_test.py
+++ b/catapult/dashboard/dashboard/report_test.py
@@ -59,7 +59,7 @@
     # data must be updated.
     self.testapp.post('/update_test_suites')
 
-  def testGet_EmbedsTestSuites(self):
+  def testPost_ContainsTestSuites(self):
     self._AddTestSuites()
 
     # We expect to have this JavaScript in the rendered HTML.
@@ -91,10 +91,15 @@
             'des': 'This should show up',
         },
     }
-    response = self.testapp.get('/report')
-    actual_suites = self.GetEmbeddedVariable(response, 'TEST_SUITES')
+    response = self.testapp.post('/report')
+    actual_suites = self.GetJsonValue(response, 'test_suites')
     self.assertEqual(expected_suites, actual_suites)
 
+  def testGet(self):
+    response = self.testapp.get('/report')
+    self.assertEqual('text/html', response.content_type)
+    self.assertIn('Chrome Performance Dashboard', response.body)
+
   def testGet_OldUri(self):
     expected_state = {
         'charts': [
@@ -173,6 +178,35 @@
     self.assertIn('start_rev=1234', location)
     self.assertIn('end_rev=5678', location)
 
+  def testGet_OldUriWithNestedSubtestAndMissingSubTestParam(self):
+    self._AddTestSuites()
+    testing_common.AddRows(
+        ('ChromiumGPU/linux-release/scrolling_benchmark/average_commit_time/'
+         'answers.yahoo.com'),
+        {200})
+
+    expected_state = {
+        'charts': [
+            [[('ChromiumGPU/linux-release/scrolling_benchmark/'
+               'average_commit_time/answers.yahoo.com'),
+              ['answers.yahoo.com']]],
+        ]
+    }
+
+    response = self.testapp.get(
+        '/report'
+        '?masters=ChromiumGPU&bots=linux-release'
+        '&tests=scrolling_benchmark')
+
+    # We expect to get a URL redirect with an sid.
+    location = response.headers.get('location')
+    self.assertIn('sid=', location)
+
+    state_id = location.split('sid=')[1]
+    state = ndb.Key(page_state.PageState, state_id).get()
+    self.assertEqual(json.dumps(expected_state, separators=(',', ':')),
+                     state.value)
+
 
 if __name__ == '__main__':
   unittest.main()
diff --git a/catapult/dashboard/dashboard/request_handler.py b/catapult/dashboard/dashboard/request_handler.py
index ff31828..f6d970f 100644
--- a/catapult/dashboard/dashboard/request_handler.py
+++ b/catapult/dashboard/dashboard/request_handler.py
@@ -31,12 +31,28 @@
 
     Args:
       template_file: string. File name under templates directory.
-      template_values: dict. Mapping of template variables to corresponding
+      template_values: dict. Mapping of template variables to corresponding.
           values.
       status: int. HTTP status code.
     """
     self.response.set_status(status)
     template = JINJA2_ENVIRONMENT.get_template(template_file)
+    self.GetDynamicVariables(template_values)
+    self.response.out.write(template.render(template_values))
+
+  def RenderStaticHtml(self, filename):
+    filename = os.path.join(os.path.dirname(__file__), 'static', filename)
+    contents = open(filename, 'r')
+    self.response.out.write(contents.read())
+    contents.close()
+
+  def GetDynamicVariables(self, template_values, request_path=None):
+    """Gets the values that vary for every page.
+
+    Args:
+      template_values: dict of name/value pairs.
+      request_path: path for login urls, None if using the current path.
+    """
     user_info = ''
     xsrf_token = ''
     user = users.get_current_user()
@@ -49,7 +65,7 @@
       xsrf_token = xsrf.GenerateToken(user)
       is_admin = users.is_current_user_admin()
     try:
-      login_url = users.create_login_url(self.request.path_qs)
+      login_url = users.create_login_url(request_path or self.request.path_qs)
     except users.RedirectTooLongError:
       # On the bug filing pages, the full login URL can be too long. Drop
       # the correct redirect URL, since the user should already be logged in at
@@ -57,6 +73,8 @@
       login_url = users.create_login_url('/')
     user_info = '<a href="%s" title="%s">%s</a>' % (
         login_url, title, display_username)
+    template_values['login_url'] = login_url
+    template_values['display_username'] = display_username
     template_values['user_info'] = user_info
     template_values['is_admin'] = is_admin
     template_values['is_internal_user'] = utils.IsInternalUser()
@@ -64,7 +82,7 @@
     template_values['xsrf_input'] = (
         '<input type="hidden" name="xsrf_token" value="%s">' % xsrf_token)
     template_values['login_url'] = login_url
-    self.response.out.write(template.render(template_values))
+    return template_values
 
   def ReportError(self, error_message, status=500):
     """Reports the given error to the client and logs the error.
@@ -75,7 +93,8 @@
     """
     logging.error(error_message)
     self.response.set_status(status)
-    self.response.out.write('%s\n' % error_message)
+    self.response.out.write('%s\nrequest_id:%s\n' %
+                            (error_message, utils.GetRequestId()))
 
   def ReportWarning(self, warning_message, status=200):
     """Reports a warning to the client and logs the warning.
@@ -86,7 +105,8 @@
     """
     logging.warning(warning_message)
     self.response.set_status(status)
-    self.response.out.write('%s\n' % warning_message)
+    self.response.out.write('%s\nrequest_id:%s\n' %
+                            (warning_message, utils.GetRequestId()))
 
 
 class InvalidInputError(Exception):
diff --git a/catapult/dashboard/dashboard/rietveld_service.py b/catapult/dashboard/dashboard/rietveld_service.py
index 664bdea..b998db8 100644
--- a/catapult/dashboard/dashboard/rietveld_service.py
+++ b/catapult/dashboard/dashboard/rietveld_service.py
@@ -10,24 +10,37 @@
 import urllib
 
 import httplib2
-from oauth2client import client
 
 from google.appengine.ext import ndb
 
-_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
-PROJECTHOSTING_SCOPE = 'https://www.googleapis.com/auth/projecthosting'
+from dashboard import utils
 
 _DESCRIPTION = """This patch was automatically uploaded by the Chrome Perf
 Dashboard (https://chromeperf.appspot.com). It is being used to run a perf
 bisect try job. It should not be submitted."""
 
 
+class ResponseObject(object):
+  """Class for Response Object.
+
+  This class holds attributes similar to response object returned by
+  google.appengine.api.urlfetch. This is used to convert response object
+  returned by httplib2.Http.request.
+  """
+
+  def __init__(self, status_code, content):
+    self.status_code = int(status_code)
+    self.content = content
+
+
 class RietveldConfig(ndb.Model):
   """Configuration info for a Rietveld service account.
 
   The data is stored only in the App Engine datastore (and the cloud console)
   and not the code because it contains sensitive information like private keys.
   """
+  # TODO(qyearsley): Remove RietveldConfig and store the server URL in
+  # datastore.
   client_email = ndb.TextProperty()
   service_account_key = ndb.TextProperty()
 
@@ -39,12 +52,6 @@
   internal_server_url = ndb.TextProperty()
 
 
-def Credentials(config, scope):
-  """Returns a credentials object used to authenticate a Http object."""
-  return client.SignedJwtAssertionCredentials(
-      config.client_email, config.service_account_key, scope)
-
-
 def GetDefaultRietveldConfig():
   """Returns the default rietveld config entity from the datastore."""
   return ndb.Key(RietveldConfig, 'default_rietveld_config').get()
@@ -71,27 +78,27 @@
       self._config = GetDefaultRietveldConfig()
     return self._config
 
-  def MakeRequest(self, path, *args, **kwwargs):
+  def MakeRequest(self, path, *args, **kwargs):
     """Makes a request to the Rietveld server."""
     if self.internal_only:
       server_url = self.Config().internal_server_url
     else:
       server_url = self.Config().server_url
     url = '%s/%s' % (server_url, path)
-    response, content = self._Http().request(url, *args, **kwwargs)
-    return (response, content)
+    response, content = self._Http().request(url, *args, **kwargs)
+    return ResponseObject(response.get('status'), content)
 
   def _Http(self):
     if not self._http:
       self._http = httplib2.Http()
-      creds = Credentials(self.Config(), _EMAIL_SCOPE)
-      creds.authorize(self._http)
+      credentials = utils.ServiceAccountCredentials()
+      credentials.authorize(self._http)
     return self._http
 
   def _XsrfToken(self):
     """Requests a XSRF token from Rietveld."""
     return self.MakeRequest(
-        'xsrf_token', headers={'X-Requesting-XSRF-Token': 1})[1]
+        'xsrf_token', headers={'X-Requesting-XSRF-Token': 1}).content
 
   def _EncodeMultipartFormData(self, fields, files):
     """Encode form fields for multipart/form-data.
@@ -169,26 +176,26 @@
     uploaded_diff_file = [('data', 'data.diff', patch)]
     ctype, body = self._EncodeMultipartFormData(
         form_fields, uploaded_diff_file)
-    response, content = self.MakeRequest(
+    response = self.MakeRequest(
         'upload', method='POST', body=body, headers={'content-type': ctype})
-    if response.get('status') != '200':
-      logging.error('Error %s uploading to /upload', response.get('status'))
-      logging.error(content)
+    if response.status_code != 200:
+      logging.error('Error %s uploading to /upload', response.status_code)
+      logging.error(response.content)
       return (None, None)
 
     # There should always be 3 lines in the request, but sometimes Rietveld
     # returns 2 lines. Log the content so we can debug further.
-    logging.info('Response from Rietveld /upload:\n%s', content)
-    if not content.startswith('Issue created.'):
-      logging.error('Unexpected response: %s', content)
+    logging.info('Response from Rietveld /upload:\n%s', response.content)
+    if not response.content.startswith('Issue created.'):
+      logging.error('Unexpected response: %s', response.content)
       return (None, None)
-    lines = content.splitlines()
+    lines = response.content.splitlines()
     if len(lines) < 2:
-      logging.error('Unexpected response %s', content)
+      logging.error('Unexpected response %s', response.content)
       return (None, None)
 
     msg = lines[0]
-    issue_id = msg[msg.rfind('/')+1:]
+    issue_id = msg[msg.rfind('/') + 1:]
     patchset_id = lines[1].strip()
     patches = [x.split(' ', 1) for x in lines[2:]]
     request_path = '%d/upload_content/%d/%d' % (
@@ -202,20 +209,20 @@
     ]
     uploaded_diff_file = [('data', config_path, base_content)]
     ctype, body = self._EncodeMultipartFormData(form_fields, uploaded_diff_file)
-    response, content = self.MakeRequest(
+    response = self.MakeRequest(
         request_path, method='POST', body=body, headers={'content-type': ctype})
-    if response.get('status') != '200':
+    if response.status_code != 200:
       logging.error(
-          'Error %s uploading to %s', response.get('status'), request_path)
-      logging.error(content)
+          'Error %s uploading to %s', response.status_code, request_path)
+      logging.error(response.content)
       return (None, None)
 
     request_path = '%s/upload_complete/%s' % (issue_id, patchset_id)
-    response, content = self.MakeRequest(request_path, method='POST')
-    if response.get('status') != '200':
+    response = self.MakeRequest(request_path, method='POST')
+    if response.status_code != 200:
       logging.error(
-          'Error %s uploading to %s', response.get('status'), request_path)
-      logging.error(content)
+          'Error %s uploading to %s', response.status_code, request_path)
+      logging.error(response.content)
       return (None, None)
     return issue_id, patchset_id
 
@@ -242,12 +249,12 @@
         'clobber': 'False',
     }
     request_path = '%s/try/%s' % (issue_id, patchset_id)
-    response, content = self.MakeRequest(
+    response = self.MakeRequest(
         request_path, method='POST', body=urllib.urlencode(args))
-    if response.get('status') != '200':
-      status = response.get('status')
+    if response.status_code != 200:
       logging.error(
-          'Error %s POSTing to /%s/try/%s', status, issue_id, patchset_id)
-      logging.error(content)
+          'Error %s POSTing to /%s/try/%s', response.status_code, issue_id,
+          patchset_id)
+      logging.error(response.content)
       return False
     return True
diff --git a/catapult/dashboard/dashboard/set_warning_message_test.py b/catapult/dashboard/dashboard/set_warning_message_test.py
index 3b5e3e2..cccace3 100644
--- a/catapult/dashboard/dashboard/set_warning_message_test.py
+++ b/catapult/dashboard/dashboard/set_warning_message_test.py
@@ -20,10 +20,11 @@
         [('/set_warning_message',
           set_warning_message.SetWarningMessageHandler)])
     self.testapp = webtest.TestApp(app)
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def testGet_VariablesSet(self):
-    self.SetCurrentUser('bar@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     layered_cache.Set('warning_message', 'The Message')
     layered_cache.Set('warning_bug', '12345')
     response = self.testapp.get('/set_warning_message')
@@ -36,7 +37,7 @@
     self.assertIn('Only logged-in internal users', response)
 
   def testPost_NotLoggedIn(self):
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     response = self.testapp.post(
         '/set_warning_message',
         {'warning_bug': '54321', 'warning_message': 'Stern warning'})
@@ -45,7 +46,7 @@
     self.assertIn('Only logged-in internal users', response)
 
   def testPost_CacheSet(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.testapp.post(
         '/set_warning_message',
         {'warning_bug': '54321', 'warning_message': 'Stern warning'})
@@ -53,7 +54,7 @@
     self.assertEqual('54321', layered_cache.Get('warning_bug'))
 
   def testPost_CacheSetOnlyMessage(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.testapp.post(
         '/set_warning_message',
         {'warning_bug': '', 'warning_message': 'Random warning'})
@@ -61,7 +62,7 @@
     self.assertIsNone(layered_cache.Get('warning_bug'))
 
   def testPost_CacheCleared(self):
-    self.SetCurrentUser('foo@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.testapp.post('/set_warning_message', {'warning_message': ''})
     self.assertEqual(None, layered_cache.Get('warning_message'))
     self.assertIsNone(layered_cache.Get('warning_bug'))
diff --git a/catapult/dashboard/dashboard/shrink_timestamp_revisions.py b/catapult/dashboard/dashboard/shrink_timestamp_revisions.py
deleted file mode 100644
index 5787d63..0000000
--- a/catapult/dashboard/dashboard/shrink_timestamp_revisions.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Change some Row entities with timestamp IDs to have IDs < 300000.
-
-This is based on cl/71187034 and is intended to be temporary.
-
-Background:
-Historically, chromium.webrtc and chromium.webrtc.fyi had sent data with
-timestamps for x-values. In the future, they plan to switch to using
-commit positions as x-values, but want to keep all of the existing data
-there, in order.
-
-Therefore, we want to change all revision numbers of points under the
-ChromiumWebRTC and ChromiumWebRTCFYI masters to use modified x-values.
-
-TODO(qyearsley): Remove this handler (and its related entries in BUILD and
-dispatcher.py) when http://crbug.com/496048 (and http://crbug.com/469523)
-are fixed.
-"""
-
-import logging
-
-from google.appengine.api import taskqueue
-from google.appengine.datastore import datastore_query
-from google.appengine.ext import ndb
-
-from dashboard import graph_revisions
-from dashboard import request_handler
-from dashboard import utils
-from dashboard.models import anomaly
-from dashboard.models import graph_data
-
-# Properties that shouldn't be copied when copying rows.
-_ROW_EXCLUDE_PROPERTIES = ['parent_test', 'revision', 'id']
-
-# Number of tests and rows to process at once.
-_NUM_TESTS = 5
-_NUM_ROWS = 600
-_NUM_ALERTS = 10
-
-# Minimum number that's considered a timestamp.
-_MIN_TIMESTAMP = 100000000
-
-# Task queue that tasks will be pushed onto.
-_QUEUE_NAME = 'migrate-queue'
-
-
-def _ConvertTimestamp(timestamp_seconds):
-  """Converts from a timestamp to some new x-value.
-
-  Requirements:
-   - Order doesn't change.
-   - All resulting x-values are below 300000.
-   - It's OK if some timestamps map to the same output values.
-
-  Note: 1356998400 is 2013-01-01 00:00 GMT.
-  Generally the points are 1-2 hours apart.
-  June 4 2015 is 1433378000, and 1433378000 / (2 * 60 * 60) = 199080.
-
-  Args:
-    timestamp_seconds: A Unix timestamp (seconds since 1970).
-
-  Returns:
-    A number that can be used as the new point ID for a point.
-  """
-  return (timestamp_seconds - 1356998400) / (2 * 60 * 60)
-
-
-class ShrinkTimestampRevisionsHandler(request_handler.RequestHandler):
-
-  def post(self):
-    self.get()
-
-  def get(self):
-    """Fixes rows for one or more tests and queues the next task to fix more.
-
-    Request parameters:
-      ancestor: A slash-separated path to the ancestor to start from.
-      cursor: An urlsafe string for a datastore_query.Cursor object.
-
-    Outputs:
-      Some indication of the results.
-    """
-    # Get the ancestor of the tests to change, and abort if not given.
-    ancestor = self.request.get('ancestor')
-    if not ancestor:
-      self.ReportError('Missing ancestor parameter.')
-      return
-    ancestor_key = utils.TestKey(ancestor)
-
-    # Get the query cursor if given.
-    urlsafe_cursor = self.request.get('cursor')
-    cursor = None
-    if urlsafe_cursor:
-      cursor = datastore_query.Cursor(urlsafe=urlsafe_cursor)
-    more = False
-
-    test_query = graph_data.Test.query(ancestor=ancestor_key)
-    test_query = test_query.filter(
-        graph_data.Test.has_rows == True)
-    keys, next_cursor, more = test_query.fetch_page(
-        _NUM_TESTS, keys_only=True, start_cursor=cursor)
-
-    futures = []
-    for key in keys:
-      futures.extend(_FixTest(key))
-    ndb.Future.wait_all(futures)
-
-    if not futures:
-      cursor = next_cursor
-
-    urlsafe_cursor = cursor.urlsafe() if cursor else ''
-    if more or futures:
-      taskqueue.add(
-          queue_name=_QUEUE_NAME,
-          url='/shrink_timestamp_revisions',
-          params={'cursor': urlsafe_cursor or '', 'ancestor': ancestor})
-      logging.info('Task added, cursor: %s', urlsafe_cursor)
-
-    # Display some information, to verify that something is happening.
-    self.RenderHtml('result.html', {
-        'results': [{'name': 'cursor', 'value': urlsafe_cursor}]
-    })
-
-
-def _FixTest(test_key):
-  """Changes Row and Anomaly entities from using timestamps to SVN revisions."""
-  futures = _MoveRowsForTest(test_key)
-  futures.extend(_UpdateAlertsForTest(test_key))
-
-  # Clear graph revisions cache. This is done so that the cached data
-  # will not be inconsistent with the actual data.
-  graph_revisions.DeleteCache(utils.TestPath(test_key))
-  return futures
-
-
-def _MoveRowsForTest(test_key):
-  """Moves rows for the given test."""
-  row_query = graph_data.Row.query(
-      graph_data.Row.parent_test == test_key,
-      graph_data.Row.revision > _MIN_TIMESTAMP)
-  rows = row_query.fetch(limit=_NUM_ROWS)
-  test_path = utils.TestPath(test_key)
-  logging.info('Moving %d rows for test "%s".', len(rows), test_path)
-  to_put = []
-  to_delete = []
-  for row in rows:
-    new_row = _CopyRow(row, _ConvertTimestamp(row.revision))
-    to_put.append(new_row)
-    to_delete.append(row.key)
-  put_futures = ndb.put_multi_async(to_put)
-  delete_futures = ndb.delete_multi_async(to_delete)
-  return put_futures + delete_futures
-
-
-def _CopyRow(row, new_revision):
-  """Make a copy of the given Row but with a new ID."""
-  new_row = graph_data.Row(id=new_revision, parent=row.key.parent())
-  create_args = create_args = {
-      'id': new_revision,
-      'parent': row.key.parent(),
-  }
-  for prop, val in row.to_dict(exclude=_ROW_EXCLUDE_PROPERTIES).iteritems():
-    create_args[prop] = val
-  new_row = graph_data.Row(**create_args)
-  return new_row
-
-
-def _UpdateAlertsForTest(test_key):
-  """Changes revision properties of alerts."""
-  alert_query = anomaly.Anomaly.query(
-      anomaly.Anomaly.test == test_key,
-      anomaly.Anomaly.end_revision > _MIN_TIMESTAMP)
-  alerts = alert_query.fetch(limit=_NUM_ALERTS)
-  test_path = utils.TestPath(test_key)
-  logging.info('Moving %d alerts in %s', len(alerts), test_path)
-  to_put = []
-  for a in alerts:
-    a.start_revision = _ConvertTimestamp(a.start_revision)
-    a.end_revision = _ConvertTimestamp(a.end_revision)
-    to_put.append(a)
-  return ndb.put_multi_async(to_put)
diff --git a/catapult/dashboard/dashboard/shrink_timestamp_revisions_test.py b/catapult/dashboard/dashboard/shrink_timestamp_revisions_test.py
deleted file mode 100644
index 0ef0a0d..0000000
--- a/catapult/dashboard/dashboard/shrink_timestamp_revisions_test.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-
-import webapp2
-import webtest
-
-from dashboard import shrink_timestamp_revisions
-from dashboard import testing_common
-from dashboard import utils
-from dashboard.models import anomaly
-from dashboard.models import graph_data
-
-
-class ShrinkTimestampRevisionsTest(testing_common.TestCase):
-
-  def setUp(self):
-    super(ShrinkTimestampRevisionsTest, self).setUp()
-    app = webapp2.WSGIApplication(
-        [('/shrink_timestamp_revisions',
-          shrink_timestamp_revisions.ShrinkTimestampRevisionsHandler)])
-    self.testapp = webtest.TestApp(app)
-
-  def testConvertTimestamp(self):
-    convert = shrink_timestamp_revisions._ConvertTimestamp
-    self.assertEqual(-5139, convert(1320001000))  # Before 2013
-    self.assertEqual(417, convert(1360001000))  # In Feb 2013
-    self.assertEqual(5972, convert(1400001000))  # In 2014
-    self.assertEqual(5972, convert(1400001010))  # 10 seconds later
-    self.assertEqual(12917, convert(1450001010))  # In 2015
-    self.assertEqual(19861, convert(1500001010))  # In 2017
-
-  def testPost_NoParameters_ReportsError(self):
-    response = self.testapp.post('/shrink_timestamp_revisions', status=500)
-    self.assertEqual('Missing ancestor parameter.\n', response.body)
-
-  def testGet_SameAsPost(self):
-    response1 = self.testapp.get('/shrink_timestamp_revisions', status=500)
-    response2 = self.testapp.post('/shrink_timestamp_revisions', status=500)
-    self.assertEqual(response1.body, response2.body)
-
-  def testPost_WithAncestor_AllRowsMoved(self):
-    testing_common.AddTests(
-        ['M'], ['b1', 'b2'], {'foo': {'bar': {}, 'baz': {}}})
-    for test_path in ('M/b1/foo/bar', 'M/b1/foo/baz', 'M/b2/foo/bar'):
-      # range(1425001000, 1430001000, 6000) includes 834 numbers.
-      testing_common.AddRows(
-          test_path,
-          {i for i in range(1425001000, 1430001000, 6000)})
-
-    self.testapp.post(
-        '/shrink_timestamp_revisions', {'ancestor': 'M/b1'})
-    self.ExecuteTaskQueueTasks(
-        '/shrink_timestamp_revisions', shrink_timestamp_revisions._QUEUE_NAME)
-
-    b1_bar_rows = graph_data.Row.query(
-        graph_data.Row.parent_test == utils.TestKey('M/b1/foo/bar')).fetch()
-    b1_baz_rows = graph_data.Row.query(
-        graph_data.Row.parent_test == utils.TestKey('M/b1/foo/baz')).fetch()
-    b2_bar_rows = graph_data.Row.query(
-        graph_data.Row.parent_test == utils.TestKey('M/b2/foo/bar')).fetch()
-    self.assertGreater(len(b1_bar_rows), 600)
-    self.assertGreater(len(b1_baz_rows), 600)
-    self.assertEqual(834, len(b2_bar_rows))
-    for r in b1_bar_rows:
-      self.assertLess(r.revision, 300000)
-    for r in b1_baz_rows:
-      self.assertLess(r.revision, 300000)
-    for r in b2_bar_rows:
-      self.assertGreater(r.revision, 300000)
-
-  def testGet_WithAncestor_AllAlertsUpdated(self):
-    testing_common.AddTests(
-        ['M'], ['b1', 'b2'], {'foo': {'bar': {}, 'baz': {}}})
-    testing_common.AddRows(
-        'M/b1/foo/bar',
-        {i for i in range(1431001000, 1432001000, 6000)})
-    test_key = utils.TestKey('M/b1/foo/bar')
-    # range(1431001000, 1431081000, 6000) includes 14 numbers.
-    for i in range(1431001000, 1431081000, 6000):
-      anomaly.Anomaly(
-          start_revision=i, end_revision=i+12000, test=test_key,
-          median_before_anomaly=100, median_after_anomaly=200).put()
-
-    self.testapp.post(
-        '/shrink_timestamp_revisions', {'ancestor': 'M'})
-    self.ExecuteTaskQueueTasks(
-        '/shrink_timestamp_revisions', shrink_timestamp_revisions._QUEUE_NAME)
-
-    anomalies = anomaly.Anomaly.query().fetch()
-    self.assertEqual(14, len(anomalies))
-    for a in anomalies:
-      self.assertLess(a.start_revision, 300000)
-      self.assertLess(a.end_revision, 300000)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/catapult/dashboard/dashboard/start_try_job.py b/catapult/dashboard/dashboard/start_try_job.py
index e23f007..77f41e9 100644
--- a/catapult/dashboard/dashboard/start_try_job.py
+++ b/catapult/dashboard/dashboard/start_try_job.py
@@ -8,7 +8,6 @@
 import hashlib
 import json
 import logging
-import re
 
 import httplib2
 
@@ -17,6 +16,7 @@
 
 from dashboard import buildbucket_job
 from dashboard import buildbucket_service
+from dashboard import can_bisect
 from dashboard import namespaced_stored_object
 from dashboard import quick_logger
 from dashboard import request_handler
@@ -38,7 +38,6 @@
 index %(hash_a)s..%(hash_b)s 100644
 """
 
-_BISECT_BOT_MAP_KEY = 'bisect_bot_map'
 _BOT_BROWSER_MAP_KEY = 'bot_browser_map'
 _INTERNAL_MASTERS_KEY = 'internal_masters'
 _BUILDER_TYPES_KEY = 'bisect_builder_types'
@@ -277,20 +276,15 @@
     return {'error': 'Could not guess command for %r.' % suite}
 
   try:
-    if not _IsGitHash(good_revision):
-      good_revision = int(good_revision)
-    if not _IsGitHash(bad_revision):
-      bad_revision = int(bad_revision)
     repeat_count = int(repeat_count)
     max_time_minutes = int(max_time_minutes)
     bug_id = int(bug_id)
   except ValueError:
-    return {'error': ('repeat count and max time must be integers '
-                      'and revision as git hash or int.')}
+    return {'error': 'repeat count, max time and bug_id must be integers.'}
 
-  if not IsValidRevisionForBisect(good_revision):
+  if not can_bisect.IsValidRevisionForBisect(good_revision):
     return {'error': 'Invalid "good" revision "%s".' % good_revision}
-  if not IsValidRevisionForBisect(bad_revision):
+  if not can_bisect.IsValidRevisionForBisect(bad_revision):
     return {'error': 'Invalid "bad" revision "%s".' % bad_revision}
 
   config_dict = {
@@ -331,7 +325,7 @@
 
 
 def GuessTargetArch(bisect_bot):
-  """Return target architecture for the bisect job."""
+  """Returns target architecture for the bisect job."""
   if 'x64' in bisect_bot or 'win64' in bisect_bot:
     return 'x64'
   elif bisect_bot in ['android_nexus9_perf_bisect']:
@@ -359,9 +353,9 @@
   if not command:
     return {'error': 'Only Telemetry is supported at the moment.'}
 
-  if not IsValidRevisionForBisect(good_revision):
+  if not can_bisect.IsValidRevisionForBisect(good_revision):
     return {'error': 'Invalid "good" revision "%s".' % good_revision}
-  if not IsValidRevisionForBisect(bad_revision):
+  if not can_bisect.IsValidRevisionForBisect(bad_revision):
     return {'error': 'Invalid "bad" revision "%s".' % bad_revision}
 
   config_dict = {
@@ -374,19 +368,9 @@
   return config_dict
 
 
-def IsValidRevisionForBisect(revision):
-  """Checks whether a revision looks like a valid revision for bisect."""
-  return _IsGitHash(revision) or re.match(r'^[0-9]{5,7}$', str(revision))
-
-
-def _IsGitHash(revision):
-  """Checks whether the input looks like a SHA1 hash."""
-  return re.match(r'[a-fA-F0-9]{40}$', str(revision))
-
-
 def _GetAvailableBisectBots(master_name):
-  """Get all available bisect bots corresponding to a master name."""
-  bisect_bot_map = namespaced_stored_object.Get(_BISECT_BOT_MAP_KEY)
+  """Gets all available bisect bots corresponding to a master name."""
+  bisect_bot_map = namespaced_stored_object.Get(can_bisect.BISECT_BOT_MAP_KEY)
   for master, platform_bot_pairs in bisect_bot_map.iteritems():
     if master_name.startswith(master):
       return sorted({bot for _, bot in platform_bot_pairs})
@@ -394,14 +378,14 @@
 
 
 def _CanDownloadBuilds(master_name):
-  """Check whether bisecting using archives is supported."""
+  """Checks whether bisecting using archives is supported."""
   return master_name.startswith('ChromiumPerf')
 
 
 def GuessBisectBot(master_name, bot_name):
   """Returns a bisect bot name based on |bot_name| (perf_id) string."""
   fallback = 'linux_perf_bisect'
-  bisect_bot_map = namespaced_stored_object.Get(_BISECT_BOT_MAP_KEY)
+  bisect_bot_map = namespaced_stored_object.Get(can_bisect.BISECT_BOT_MAP_KEY)
   if not bisect_bot_map:
     return fallback
   bot_name = bot_name.lower()
@@ -409,7 +393,7 @@
     # Treat ChromiumPerfFyi (etc.) the same as ChromiumPerf.
     if master_name.startswith(master):
       for platform, bisect_bot in platform_bot_pairs:
-        if platform in bot_name:
+        if platform.lower() in bot_name:
           return bisect_bot
   # Nothing was found; log a warning and return a fall-back name.
   logging.warning('No bisect bot for %s/%s.', master_name, bot_name)
@@ -554,33 +538,6 @@
   return bool(child)
 
 
-def _RewriteMetricName(metric):
-  """Rewrites a metric name for legacy bisect.
-
-  With the introduction of test names with interaction record labels coming
-  from Telemetry, it is necessary to rewrite names to the format described in
-  goo.gl/CXGyxT so that they can be interpreted by legacy bisect. Recipe bisect
-  does the rewriting itself.
-
-  For instance, foo/bar/baz would be rewritten as bar-foo/baz.
-
-  Args:
-    metric: The slash-separated metric name, generally from GuessMetric.
-
-  Returns:
-    The Buildbot output format-compatible metric name.
-  """
-  test_parts = metric.split('/')
-
-  if len(test_parts) == 3:
-    chart_name, interaction_record_name, trace_name = test_parts
-    return '%s-%s/%s' % (interaction_record_name,
-                         chart_name,
-                         trace_name)
-  else:
-    return metric
-
-
 def _CreatePatch(base_config, config_changes, config_path):
   """Takes the base config file and the changes and generates a patch.
 
@@ -644,8 +601,16 @@
   Returns:
     A dictionary containing the result; if successful, this dictionary contains
     the field "issue_id" and "issue_url", otherwise it contains "error".
+
+  Raises:
+    AssertionError: Bot or config not set as expected.
+    request_handler.InvalidInputError: Some property of the bisect job
+        is invalid.
   """
   assert bisect_job.bot and bisect_job.config
+  if not bisect_job.key:
+    bisect_job.put()
+
   if bisect_job.use_buildbucket:
     result = _PerformBuildbucketBisect(bisect_job)
   else:
@@ -657,14 +622,12 @@
 
 
 def _PerformLegacyBisect(bisect_job):
-  config_dict = bisect_job.GetConfigDict()
-  config = bisect_job.config
   bot = bisect_job.bot
   email = bisect_job.email
   bug_id = bisect_job.bug_id
 
-  # We need to rewrite the metric name for legacy bisect.
-  config_dict['metric'] = _RewriteMetricName(config_dict['metric'])
+  config_dict = bisect_job.GetConfigDict()
+  config_dict['try_job_id'] = bisect_job.key.id()
   bisect_job.config = utils.BisectConfigPythonString(config_dict)
 
   # Get the base config file contents and make a patch.
@@ -672,7 +635,7 @@
   if not base_config:
     return {'error': 'Error downloading base config'}
   patch, base_checksum, base_hashes = _CreatePatch(
-      base_config, config, _BISECT_CONFIG_PATH)
+      base_config, bisect_job.config, _BISECT_CONFIG_PATH)
 
   # Check if bisect is for internal only tests.
   bisect_internal = _IsBisectInternalOnly(bisect_job)
@@ -711,7 +674,7 @@
       bisect_job.SetStarted()
       bug_comment = ('Bisect started; track progress at '
                      '<a href="%s">%s</a>' % (issue_url, issue_url))
-      LogBisectResult(bug_id, bug_comment)
+      LogBisectResult(bisect_job, bug_comment)
     return {'issue_id': issue_id, 'issue_url': issue_url}
 
   return {'error': 'Error starting try job. Try to fix at %s' % issue_url}
@@ -746,16 +709,23 @@
     the field "issue_id", otherwise it contains "error".
   """
   assert perf_job.bot and perf_job.config
-  config = perf_job.config
+
+  if not perf_job.key:
+    perf_job.put()
+
   bot = perf_job.bot
   email = perf_job.email
 
+  config_dict = perf_job.GetConfigDict()
+  config_dict['try_job_id'] = perf_job.key.id()
+  perf_job.config = utils.BisectConfigPythonString(config_dict)
+
   # Get the base config file contents and make a patch.
   base_config = utils.DownloadChromiumFile(_PERF_CONFIG_PATH)
   if not base_config:
     return {'error': 'Error downloading base config'}
   patch, base_checksum, base_hashes = _CreatePatch(
-      base_config, config, _PERF_CONFIG_PATH)
+      base_config, perf_job.config, _PERF_CONFIG_PATH)
 
   # Upload the patch to Rietveld.
   server = rietveld_service.RietveldService()
@@ -784,14 +754,19 @@
   return {'error': 'Error starting try job. Try to fix at %s' % url}
 
 
-def LogBisectResult(bug_id, comment):
+def LogBisectResult(job, comment):
   """Adds an entry to the bisect result log for a particular bug."""
-  if not bug_id or bug_id < 0:
+  if not job.bug_id or job.bug_id < 0:
     return
   formatter = quick_logger.Formatter()
-  logger = quick_logger.QuickLogger('bisect_result', bug_id, formatter)
-  logger.Log(comment)
-  logger.Save()
+  logger = quick_logger.QuickLogger('bisect_result', job.bug_id, formatter)
+  if job.log_record_id:
+    logger.Log(comment, record_id=job.log_record_id)
+    logger.Save()
+  else:
+    job.log_record_id = logger.Log(comment)
+    logger.Save()
+    job.put()
 
 
 def _MakeBuildbucketBisectJob(bisect_job):
@@ -824,6 +799,7 @@
   tester_name = config['recipe_tester_name']
 
   return buildbucket_job.BisectJob(
+      try_job_id=bisect_job.key.id(),
       bisect_director=GetBisectDirectorForTester(tester_name),
       good_revision=config['good_revision'],
       bad_revision=config['bad_revision'],
@@ -855,7 +831,7 @@
     issue_url = 'https://%s/buildbucket_job_status/%s' % (hostname, job_id)
     bug_comment = ('Bisect started; track progress at '
                    '<a href="%s">%s</a>' % (issue_url, issue_url))
-    LogBisectResult(bisect_job.bug_id, bug_comment)
+    LogBisectResult(bisect_job, bug_comment)
     return {
         'issue_id': job_id,
         'issue_url': issue_url,
diff --git a/catapult/dashboard/dashboard/start_try_job_test.py b/catapult/dashboard/dashboard/start_try_job_test.py
index a682b7e..b01236f 100644
--- a/catapult/dashboard/dashboard/start_try_job_test.py
+++ b/catapult/dashboard/dashboard/start_try_job_test.py
@@ -6,13 +6,13 @@
 import json
 import unittest
 
-import httplib2
 import mock
 import webapp2
 import webtest
 
 from google.appengine.ext import ndb
 
+from dashboard import can_bisect
 from dashboard import namespaced_stored_object
 from dashboard import rietveld_service
 from dashboard import start_try_job
@@ -22,6 +22,9 @@
 from dashboard.models import graph_data
 from dashboard.models import try_job
 
+# TODO(qyearsley): Shorten this module.
+# See https://github.com/catapult-project/catapult/issues/1917
+# pylint: disable=too-many-lines
 
 # Below is a series of test strings which may contain long lines.
 # pylint: disable=line-too-long
@@ -41,7 +44,8 @@
 +  "max_time_minutes": "20",
 +  "metric": "jslib/jslib",
 +  "repeat_count": "20",
-+  "target_arch": "ia32"
++  "target_arch": "ia32",
++  "try_job_id": 1
  }
 """
 
@@ -61,7 +65,8 @@
 +  "max_time_minutes": "20",
 +  "metric": "foreground_tab_request_start/foreground_tab_request_start",
 +  "repeat_count": "20",
-+  "target_arch": "ia32"
++  "target_arch": "ia32",
++  "try_job_id": 1
  }
 """
 
@@ -81,7 +86,8 @@
 +  "max_time_minutes": "20",
 +  "metric": "jslib/jslib",
 +  "repeat_count": "20",
-+  "target_arch": "ia32"
++  "target_arch": "ia32",
++  "try_job_id": 1
  }
 """
 
@@ -94,7 +100,8 @@
 +  "command": "tools/perf/run_benchmark -v --browser=release --output-format=buildbot --also-run-disabled-tests dromaeo.jslibstylejquery",
 +  "good_revision": "215806",
 +  "max_time_minutes": "60",
-+  "repeat_count": "1"
++  "repeat_count": "1",
++  "try_job_id": 1
  }
 """
 
@@ -280,29 +287,29 @@
 
 
 def _MockFailedFetch(url=None):  # pylint: disable=unused-argument
-    return testing_common.FakeResponseObject(404, {})
+  return testing_common.FakeResponseObject(404, {})
 
 
 def _MockMakeRequest(path, *args, **kwargs):  # pylint: disable=unused-argument
   """Mocks out a request, returning a canned response."""
   if path.endswith('xsrf_token'):
     assert kwargs['headers']['X-Requesting-XSRF-Token'] == 1
-    return (httplib2.Response({'status': '200'}), _FAKE_XSRF_TOKEN)
+    return testing_common.FakeResponseObject(200, _FAKE_XSRF_TOKEN)
   if path == 'upload':
     assert kwargs['method'] == 'POST'
     assert _EXPECTED_CONFIG_DIFF in kwargs['body'], (
         '%s\nnot in\n%s\n' % (_EXPECTED_CONFIG_DIFF, kwargs['body']))
-    return (httplib2.Response({'status': '200'}), _ISSUE_CREATED_RESPONSE)
+    return testing_common.FakeResponseObject(200, _ISSUE_CREATED_RESPONSE)
   if path == '33001/upload_content/1/1001':
     assert kwargs['method'] == 'POST'
     assert _TEST_EXPECTED_CONFIG_CONTENTS in kwargs['body']
-    return (httplib2.Response({'status': '200'}), 'Dummy content')
+    return testing_common.FakeResponseObject(200, 'Dummy content')
   if path == '33001/upload_complete/1':
     assert kwargs['method'] == 'POST'
-    return (httplib2.Response({'status': '200'}), 'Dummy content')
+    return testing_common.FakeResponseObject(200, 'Dummy content')
   if path == '33001/try/1':
     assert _TEST_EXPECTED_BOT in kwargs['body']
-    return (httplib2.Response({'status': '200'}), 'Dummy content')
+    return testing_common.FakeResponseObject(200, 'Dummy content')
   assert False, 'Invalid url %s requested!' % path
 
 
@@ -320,7 +327,7 @@
         [('/start_try_job', start_try_job.StartBisectHandler)])
     self.testapp = webtest.TestApp(app)
     namespaced_stored_object.Set(
-        start_try_job._BISECT_BOT_MAP_KEY,
+        can_bisect.BISECT_BOT_MAP_KEY,
         {
             'ChromiumPerf': [
                 ('nexus4', 'android_nexus4_perf_bisect'),
@@ -368,29 +375,31 @@
     self.SetCurrentUser('foo@chromium.org')
     testing_common.AddTests(
         ['ChromiumPerf'],
-        ['win7',
-         'android-nexus7',
-         'chromium-rel-win8-dual',
-         'chromium-rel-xp-single'], {
-             'page_cycler.morejs': {
-                 'times': {
-                     'page_load_time': {},
-                     'page_load_time_ref': {},
-                     'blog.chromium.org': {},
-                     'dev.chromium.org': {},
-                     'test.blogspot.com': {},
-                     'http___test.com_': {}
-                 },
-                 'vm_final_size_renderer': {
-                     'ref': {},
-                     'vm_final_size_renderer_extcs1': {}
-                 },
-             },
-             'blink_perf': {
-                 'Animation_balls': {}
-             }
-         }
-    )
+        [
+            'win7',
+            'android-nexus7',
+            'chromium-rel-win8-dual',
+            'chromium-rel-xp-single'
+        ],
+        {
+            'page_cycler.morejs': {
+                'times': {
+                    'page_load_time': {},
+                    'page_load_time_ref': {},
+                    'blog.chromium.org': {},
+                    'dev.chromium.org': {},
+                    'test.blogspot.com': {},
+                    'http___test.com_': {}
+                },
+                'vm_final_size_renderer': {
+                    'ref': {},
+                    'vm_final_size_renderer_extcs1': {}
+                },
+            },
+            'blink_perf': {
+                'Animation_balls': {}
+            }
+        })
     tests = graph_data.Test.query().fetch()
     for test in tests:
       name = test.key.string_id()
@@ -690,7 +699,7 @@
 
   def testGuessBisectBot_FetchesNameFromBisectBotMap(self):
     namespaced_stored_object.Set(
-        start_try_job._BISECT_BOT_MAP_KEY,
+        can_bisect.BISECT_BOT_MAP_KEY,
         {'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
     self.assertEqual(
         'super_foo_bisect_bot',
@@ -698,7 +707,7 @@
 
   def testGuessBisectBot_PlatformNotFound_UsesFallback(self):
     namespaced_stored_object.Set(
-        start_try_job._BISECT_BOT_MAP_KEY,
+        can_bisect.BISECT_BOT_MAP_KEY,
         {'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
     self.assertEqual(
         'linux_perf_bisect',
@@ -706,7 +715,7 @@
 
   def testGuessBisectBot_TreatsMasterNameAsPrefix(self):
     namespaced_stored_object.Set(
-        start_try_job._BISECT_BOT_MAP_KEY,
+        can_bisect.BISECT_BOT_MAP_KEY,
         {'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
     self.assertEqual(
         'super_foo_bisect_bot',
@@ -822,14 +831,6 @@
       mock.MagicMock(side_effect=_MockMakeRequest))
   def testPerformBisectStep_DeleteJobOnFailedBisect(self):
     self.SetCurrentUser('foo@chromium.org')
-    # Fake Rietveld auth info
-    cfg = rietveld_service.RietveldConfig(
-        id='default_rietveld_config',
-        client_email='sullivan@chromium.org',
-        service_account_key='Fake Account Key',
-        server_url='https://test-rietveld.appspot.com/')
-    cfg.put()
-
     query_parameters = {
         'bisect_bot': 'linux_perf_bisect',
         'suite': 'dromaeo.jslibstylejquery',
@@ -857,14 +858,6 @@
       mock.MagicMock(side_effect=_MockMakeRequest))
   def testPerformPerfTryStep_DeleteJobOnFailedBisect(self):
     self.SetCurrentUser('foo@chromium.org')
-    # Fake Rietveld auth info
-    cfg = rietveld_service.RietveldConfig(
-        id='default_rietveld_config',
-        client_email='sullivan@chromium.org',
-        service_account_key='Fake Account Key',
-        server_url='https://test-rietveld.appspot.com/')
-    cfg.put()
-
     query_parameters = {
         'bisect_bot': 'linux_perf_bisect',
         'suite': 'dromaeo.jslibstylejquery',
@@ -918,7 +911,7 @@
                                   '/buildbucket_job_status/1234567')}),
         response.body)
 
-  def testGetBisectconfig_UseArchive(self):
+  def testGetBisectConfig_UseArchive(self):
     self._TestGetBisectConfig(
         {
             'bisect_bot': 'win_perf_bisect',
@@ -1005,7 +998,7 @@
 
   def testGetConfig_UseBuildbucket_IdbPerf(self):
     self._TestGetConfigCommand(
-        ('.\src\out\Release\performance_ui_tests.exe '
+        ('.\\src\\out\\Release\\performance_ui_tests.exe '
          '--gtest_filter=IndexedDBTest.Perf'),
         bisect_bot='win_perf_bisect',
         suite='idb_perf',
@@ -1046,18 +1039,5 @@
         start_try_job.GuessMetric('M/b/benchmark/chart/tir_label'))
 
 
-class RewriteMetricNameTests(testing_common.TestCase):
-
-  def testRewriteMetricWithoutInteractionRecord(self):
-    self.assertEqual(
-        'old/skool',
-        start_try_job._RewriteMetricName('old/skool'))
-
-  def testRewriteMetricWithInteractionRecord(self):
-    self.assertEqual(
-        'interaction-chart/trace',
-        start_try_job._RewriteMetricName('chart/interaction/trace'))
-
-
 if __name__ == '__main__':
   unittest.main()
diff --git a/catapult/dashboard/dashboard/static/alerts.html b/catapult/dashboard/dashboard/static/alerts.html
index 689656b..8459cde 100644
--- a/catapult/dashboard/dashboard/static/alerts.html
+++ b/catapult/dashboard/dashboard/static/alerts.html
@@ -4,183 +4,35 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<html>
+  <head>
+    <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
 
-<link rel="import" href="/dashboard/static/uri.html">
+    <link rel="import" href="/components/polymer/polymer.html">
 
-<script>
-'use strict';
+    <link rel="import" href="/dashboard/elements/alerts-page.html">
+    <link rel="import" href="/dashboard/elements/load-analytics.html">
+    <link rel="import" href="/dashboard/elements/nav-bar.html">
+    <link rel="import" href="/dashboard/static/uri.html">
 
-/**
- * Module for alerts page.
- * TODO(chrisphan): Convert this to Polymer element.
- */
-var alerts = (function() {
+    <style>
+      #message-bar {
+        top: 26px;
+      }
+    </style>
+  </head>
 
-  var sheriffSelect_ = null;
-  var anomalyTable_ = null;
-  var stoppageAlertTable_ = null;
-  var triagedToggle_ = null;
-  var improvementsToggle_ = null;
+  <body>
+    <nav-bar></nav-bar>
+    <h1>Chrome Performance Alerts</h1>
+    <overlay-message id="message-bar"></overlay-message>
+    <alerts-page></alerts-page>
+    <load-analytics></load-analytics>
 
-  /**
-   * Redirects to the page for the new sheriff when a new sheriff is selected.
-   * @param {Event} e The event for the select.
-   */
-  var onSheriffChange_ = function(e) {
-    var sheriff = e['detail']['item'].getAttribute('label');
-    if (!sheriff) {
-      return;
-    }
-    var params = uri.getAllParameters();
-    params['sheriff'] = sheriff;
-    window.location.href = uri.getCurrentPathWithParams(params);
-  };
-
-  /**
-   * Updates an alerts-table elemenet from the URI parameters.
-   * @param {Element} alertsTable The alerts-table element to update.
-   */
-  var updateFromURIParameters_ = function(alertsTable) {
-    if (!alertsTable) {
-      return;
-    }
-    alertsTable['sortBy'] = uri.getParameter('sortby', 'end_revision');
-    alertsTable['sortDirection'] = uri.getParameter('sortdirection', 'down');
-    var sheriff = uri.getParameter('sheriff', 'Chromium Perf Sheriff');
-    sheriffSelect_.select(sheriff);
-
-    // The show improvements and show triaged toggles are initially "off"; set
-    // them to on if the corresponding query parameter is set.
-    // The buttons are displayed differently if they have the "active"
-    // attribute.
-    if (uri.getParameter('improvements')) {
-      improvementsToggle_.setAttribute('active', '');
-    }
-    if (uri.getParameter('triaged')) {
-      triagedToggle_.setAttribute('active', '');
-    }
-  };
-
-  /**
-   * Pushes a new state into the history when an alerts-table is updated.
-   * @param {Element} alertsTable The alerts-table element that's being
-   *     updated.
-   */
-  var pushHistoryState_ = function(alertsTable) {
-    if (!alertsTable) {
-      return;
-    }
-    var params = uri.getAllParameters();
-    params['sortby'] = alertsTable['sortBy'];
-    params['sortdirection'] = alertsTable['sortDirection'];
-    var newUri = uri.getCurrentPathWithParams(params);
-    history.pushState(params, '', newUri);
-  };
-
-  /**
-   * Refreshes the UI from URI parameters when the history state is popped.
-   * @param {Event} e PopStateEvent.
-   * @private
-   */
-  var onPopState_ = function(e) {
-    // Pop State event will have a non-null state if this came from an actual
-    // pop instead of the load event.
-    if (e['state']) {
-      updateFromURIParameters_(anomalyTable_);
-      updateFromURIParameters_(stoppageAlertTable_);
-    }
-  };
-
-  /**
-   * Updates the table content to include or exclude triaged alerts.
-   * @param {Event} e The event object.
-   */
-  var onToggleTriaged_ = function(e) {
-    var params = uri.getAllParameters();
-    if (params['triaged']) {
-      delete params['triaged'];
-    } else {
-      params['triaged'] = 'true';
-    }
-    window.location.href = uri.getCurrentPathWithParams(params);
-  };
-
-  /**
-   * Updates the table to include or exclude improvement anomalies.
-   * @param {Event} e The event object.
-   */
-  var onToggleImprovements_ = function(e) {
-    var params = uri.getAllParameters();
-    if (params['improvements']) {
-      delete params['improvements'];
-    } else {
-      params['improvements'] = 'true';
-    }
-    window.location.href = uri.getCurrentPathWithParams(params);
-  };
-
-  /**
-   * Finds an element in the DOM and initializes it with some properties.
-   * The element with the given ID is assumed to be an alerts-table element.
-   * @param {string} id alerts-table ID.
-   * @param {Array.<Object>} alertList List of alerts.
-   * @param {Array.<Object>} extraColumns List of extra columns.
-   * @return {Element} The alerts-table element.
-   */
-  var initializeAlertsTable_ = function(id, alertList, extraColumns) {
-    var table = document.getElementById(id);
-    if (!table) {
-      return null;
-    }
-    var pushHistory = function() {
-      pushHistoryState_(table);
-    };
-    table.addEventListener('sortby', pushHistory, false);
-    table.addEventListener('sortdirection', pushHistory, false);
-    table['alertList'] = alertList;
-    table['extraColumns'] = extraColumns;
-    table['initialize']();
-    return table;
-  };
-
-  /**
-   * Initializes the page on the window load event.
-   * @param {Event} e The load event.
-   */
-  var initialize = function(e) {
-    anomalyTable_ = initializeAlertsTable_(
-        'anomaly-table',
-        window['ANOMALY_LIST'],
-        [{'key': 'percent_changed', 'label': 'Delta %'}]);
-    stoppageAlertTable_ = initializeAlertsTable_(
-        'stoppage-alert-table',
-        window['STOPPAGE_ALERT_LIST'],
-        [{'key': 'last_row_date', 'label': 'Date'}]);
-
-    triagedToggle_ = document.getElementById('triaged-toggle');
-    triagedToggle_.addEventListener('click', onToggleTriaged_);
-
-    improvementsToggle_ = document.getElementById('improvements-toggle');
-    improvementsToggle_.addEventListener('click', onToggleImprovements_);
-
-    sheriffSelect_ = document.getElementById('sheriff-select');
-    sheriffSelect_['menuItems'] = window['SHERIFF_LIST'];
-    sheriffSelect_.addEventListener('core-activate', onSheriffChange_);
-
-    updateFromURIParameters_(anomalyTable_);
-    updateFromURIParameters_(stoppageAlertTable_);
-    window.addEventListener('popstate', onPopState_, true);
-
-    var autoTriageLog = document.getElementById('auto-triage-log');
-    var sheriff = sheriffSelect_['selected'];
-    autoTriageLog['initialize']('Auto triage', 'auto_triage', sheriff);
-  };
-
-  return {
-    initialize: initialize
-  };
-})();
-
-document.addEventListener('polymer-ready', alerts.initialize, false);
-
-</script>
+    <script>
+      'use strict';
+      // The Google analytics code assumes there will be a script element to
+      // inject into in the page. This empty element is used for that.
+    </script>
+  </body>
+</html>
diff --git a/catapult/dashboard/dashboard/static/autocomplete_test.html b/catapult/dashboard/dashboard/static/autocomplete_test.html
index 8097803..a8b58c4 100644
--- a/catapult/dashboard/dashboard/static/autocomplete_test.html
+++ b/catapult/dashboard/dashboard/static/autocomplete_test.html
@@ -5,21 +5,21 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/core/test_utils.html">
-
 <link rel="import" href="/dashboard/static/autocomplete.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
 
   test('basic search', function() {
-    var dataList = [{name: 'apple'}, {name: 'bannana'}, {name: 'blueberry'},
+    var dataList = [{name: 'apple'}, {name: 'banana'}, {name: 'blueberry'},
                     {name: 'blackberry'}, {name: 'Grape'}];
 
     var a = new autocomplete.Trie(dataList);
-    assert.deepEqual([{name: 'bannana'},
+    assert.deepEqual([{name: 'banana'},
                       {name: 'blueberry'},
                       {name: 'blackberry'}],
                      a.search('b'));
diff --git a/catapult/dashboard/dashboard/static/base.css b/catapult/dashboard/dashboard/static/base.css
index 375089e..ca137ee 100644
--- a/catapult/dashboard/dashboard/static/base.css
+++ b/catapult/dashboard/dashboard/static/base.css
@@ -22,6 +22,10 @@
   min-height: 100%;
 }
 
+nav-bar {
+  width: 100%;
+}
+
 h1 {
   align-self: center;
   -webkit-align-self: center;
diff --git a/catapult/dashboard/dashboard/static/bisect_utils.html b/catapult/dashboard/dashboard/static/bisect_utils.html
deleted file mode 100644
index 4e31291..0000000
--- a/catapult/dashboard/dashboard/static/bisect_utils.html
+++ /dev/null
@@ -1,73 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<script>
-'use strict';
-
-/**
- * Module for bisect/trace buttons.
- */
-var bisect_utils = (function() {
-
-  var unBisectableSuites = {
-      'arc-perf-test': true,
-      'browser_tests': true,
-      'content_browsertests': true,
-      'sizes': true,
-      'v8': true
-  };
-
-  /**
-   * @param {string} testPath A test's full test path.
-   * @param {string} rev A sample revision from the test.
-   * @return {boolean} Whether a bisect can be done for the given test.
-   */
-  var canBisect = function(testPath, rev) {
-    if (!testPath || !rev) {
-      return false;
-    }
-    var testPathParts = testPath.split('/');
-    if (testPathParts.length < 3) {
-      return false;
-    }
-    if (unBisectableSuites[testPathParts[2]]) {
-      return false;
-    }
-    if (!looksLikeSupportedRevision_(rev)) {
-      return false;
-    }
-    if (isRefTest_(testPath)) {
-      return false;
-    }
-    return true;
-  };
-
-  /**
-   * Checks whether the input could be a Chromium revision.
-   * @param {string} rev A sample revision from the test.
-   * @return {boolean} Whether the given revision looks valid.
-   */
-  var looksLikeSupportedRevision_ = function(rev) {
-    return (/^[a-fA-F0-9]{40}$/.test(rev) || (/^[\d]{5,7}$/.test(rev)));
-  };
-
-  /**
-   * Checks whether the given test path is for a reference build.
-   * @param {string} testPath A test's full test path.
-   * @return {boolean} Whether the given test is a reference test.
-   */
-  var isRefTest_ = function(testPath) {
-    return (testPath.lastIndexOf('/ref') + 4 == testPath.length ||
-            testPath.lastIndexOf('_ref') + 4 == testPath.length);
-  };
-
-  return {
-    canBisect: canBisect
-  };
-})();
-
-</script>
diff --git a/catapult/dashboard/dashboard/static/bisect_utils_test.html b/catapult/dashboard/dashboard/static/bisect_utils_test.html
deleted file mode 100644
index 1ff672e..0000000
--- a/catapult/dashboard/dashboard/static/bisect_utils_test.html
+++ /dev/null
@@ -1,23 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-
-<link rel="import" href="/dashboard/static/bisect_utils.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-
-  test('canBisect', function() {
-    var testPath = 'ChromiumPerf/mac/blink_perf.parser/simple-url';
-    assert.isTrue(bisect_utils.canBisect(testPath, 123456));
-  });
-
-});
-</script>
diff --git a/catapult/dashboard/dashboard/static/debug_alert_test.html b/catapult/dashboard/dashboard/static/debug_alert_test.html
index d23809a..dbd3074 100644
--- a/catapult/dashboard/dashboard/static/debug_alert_test.html
+++ b/catapult/dashboard/dashboard/static/debug_alert_test.html
@@ -5,10 +5,10 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/core/test_utils.html">
-
 <link rel="import" href="/dashboard/static/debug_alert.html">
 
+<link rel="import" href="/tracing/core/test_utils.html">
+
 <script>
 'use strict';
 
diff --git a/catapult/dashboard/dashboard/static/embed.html b/catapult/dashboard/dashboard/static/embed.html
deleted file mode 100644
index 9984ebe..0000000
--- a/catapult/dashboard/dashboard/static/embed.html
+++ /dev/null
@@ -1,412 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<script>
-'use strict';
-
-/**
- * Functions for the embed page.
- *
- * The overall purpose of these functions is to process the output of the
- * graph_json handler so that it can be used by Flot.
- *
- * Some aspects of the embed page are similar to the chart-container; both
- * involve preparing chart data, options, and callback functions for Flot.
- * The embed page, however, is intended to be simple, lightweight, and
- * compatible with older browsers that don't fully support polymer.
- */
-var embed = (function() {
-
-  /**
-   * Initializes the chart and legend elements for displaying graph data.
-   * This method should be called when the embed page is loaded.
-   *
-   * Preconditions:
-   *   The jQuery and Flot libraries are loaded.
-   *   The chart and legend elements exist on the page.
-   *   The globals GRAPH_DATA and REVISION_INFO are present.
-   */
-  var initialize = function() {
-    var data = window['GRAPH_DATA']['data'];
-    var annotations = window['GRAPH_DATA']['annotations'];
-    var seriesAnnotations = annotations['series'];
-    var revisionInfo = window['REVISION_INFO'];
-
-    // First, compile data that can be used to look up revision numbers
-    // from data series indexes.
-    var revisionDetails = getRevisionDetails(data, annotations, revisionInfo);
-    var revisionLookup = getRevisionLookup(revisionDetails);
-
-    // Then, convert the x-values in the data from revision numbers to indexes.
-    // This is done so that values are more evenly-spaced on the x-axis.
-    // Note that revision numbers aren't necessarily very meaningful, and so
-    // spacing values on the x-axis by revision number can be misleading.
-    changeXValuesToIndexes(data, revisionLookup);
-
-    // Chart options are made, including the x-axis tick formatter function,
-    // which depends on the revision details compiled above.
-    var chartOptions = getChartOptions(revisionDetails, revisionLookup);
-
-    // Finally, plot the chart and listen for hover events.
-    // The plot method is defined externally when the Flot library is loaded.
-    $['plot']($('#chart'), data, chartOptions);
-    $('#chart').bind('plothover', function(hoverEvent, pos, item) {
-      plotHoverCallback(pos, item, revisionDetails, revisionLookup, data,
-                        seriesAnnotations);
-    });
-  };
-
-  /**
-   * Makes a map of revision numbers to Arrays of Objects with revision info.
-   *
-   * This revision info consists of revision numbers and names for different
-   * revision types (e.g. chromium, blink, v8 revisions). Generally, this
-   * information comes from the annotations passed in. However, even if some
-   * entries in this data are missing, a revision details object will still be
-   * returned, containing the x-values in the data.
-   *
-   * @param {Array.<Object>} data Array of series objects (in Flot format).
-   * @param {?(Object)} annotations A mapping of series indexes to data
-   * @param {?(Object)} revisionInfo A mapping of keys to revision info.
-   *     point indexes to Objects with revision info.
-   * @return {!Object} An Object mapping revisions to Arrays of Objects,
-   *     each of which has the properties 'name' (name of a revision type)
-   *     and 'value' (value for this revision type for this point).
-   */
-  var getRevisionDetails = function getRevisionDetails(data, annotations,
-      revisionInfo) {
-    var revisionDetails = {};
-
-    for (var seriesIndex = 0; seriesIndex < data.length; seriesIndex++) {
-      var seriesData = data[seriesIndex]['data'];
-      for (var dataIndex = 0; dataIndex < seriesData.length; dataIndex++) {
-
-        // Save the original x-value in the data series, and use it as the
-        // default in the revision details to return. Normally, this is expected
-        // to be a Chromium revision, but in the case of single-revision charts,
-        // it may be something else.
-        var xValue = seriesData[dataIndex][0];
-
-        // If this x-value already has an entry, no need to re-add it.
-        if (revisionDetails[xValue])
-          continue;
-
-        var defaultRevDetails = {
-          'name': getRevisionName(xValue, null, revisionInfo),
-          'value': getDisplayRevision(xValue)
-        };
-        var thisPointRevDetails = [];
-
-        if (annotations && annotations[seriesIndex] &&
-            annotations[seriesIndex][dataIndex]) {
-          var pointAnnotations = annotations[seriesIndex][dataIndex];
-          var defaultRevKey = pointAnnotations['a_default_rev'];
-
-          // If there's another "default revision" specified, use that.
-          if (defaultRevKey && pointAnnotations[defaultRevKey]) {
-            var defaultRev = pointAnnotations[defaultRevKey];
-            defaultRevDetails = {
-              'name': getRevisionName(defaultRev, defaultRevKey, revisionInfo),
-              'value': getDisplayRevision(defaultRev)
-            };
-          }
-
-          // Also collect details about all other kinds of revisions in the
-          // annotations for this point.
-          for (var key in pointAnnotations) {
-            if (key.indexOf('r_') == 0 && key != defaultRevKey) {
-              thisPointRevDetails.push({
-                'name': getRevisionName(
-                    pointAnnotations[key], key, revisionInfo),
-                'value': getDisplayRevision(pointAnnotations[key])
-              });
-            }
-          }
-        }
-
-        thisPointRevDetails.unshift(defaultRevDetails);
-        revisionDetails[xValue] = thisPointRevDetails;
-      }
-    }
-
-    return revisionDetails;
-  };
-
-  /**
-   * Gets the value that a revision will be displayed as; In the case of a hash,
-   * this will be a truncation of the hash.
-   * @param {(string|number)} rev The revision value.
-   * @return {(string|number)} The value to display for revision number.
-   */
-  var getDisplayRevision = function(rev) {
-    // SHA1 hashes have 40 hex digits. Truncate it to 7 characters.
-    var gitRegex = /^[a-f0-9]{40}$/;
-    if (gitRegex.test(rev)) {
-      return rev.substring(0, 7);
-    }
-    return rev;
-  };
-
-  /**
-   * Gets the name of a type of revision.
-   * @param {(string|number)} rev A revision number.
-   * @param {?(string)} key A key in the revisionInfo Object (optional).
-   * @param {?(Object)} revisionInfo A mapping of keys to revision info.
-   * @return {string} The name of the revision type.
-   */
-  var getRevisionName = function(rev, key, revisionInfo) {
-    if (revisionInfo && key && revisionInfo[key]) {
-      return revisionInfo[key]['name'];
-    }
-    return 'X-Value';
-  };
-
-  /**
-   * Returns an Array of revision numbers (as strings) in numerical order.
-   *
-   * @param {!Object} revisionDetails Mapping of rev numbers to detail info.
-   * @return {Array.<string>} Revision numbers, in order.
-   */
-  var getRevisionLookup = function(revisionDetails) {
-    var revisionNumbers = Object.keys(revisionDetails);
-    var numericCompare = function(a, b) {
-      return a - b;
-    };
-    return revisionNumbers.sort(numericCompare);
-  };
-
-  /**
-   * Constructs an Object mapping Array values to indexes.
-   * (If a value occurs multiple times, the higher index will be used.)
-   *
-   * @param {Array.<(string|number)>} array Array of values.
-   * @return {Object.<string, number>} Map of values to indexes.
-   */
-  var makeReverseLookup = function(array) {
-    var reverseLookup = {};
-    for (var i = 0; i < array.length; i++) {
-      reverseLookup[array[i]] = i;
-    }
-    return reverseLookup;
-  };
-
-  /**
-   * Changes all x-values in each data series to integers starting from zero.
-   *
-   * @param {Array.<Object>} data Flot series data. This will be modified.
-   * @param {Array.<string>} revisionLookup Array of revision numbers.
-   */
-  var changeXValuesToIndexes = function(data, revisionLookup) {
-    var reverseRevisionLookup = makeReverseLookup(revisionLookup);
-    for (var seriesIndex = 0; seriesIndex < data.length; seriesIndex++) {
-      var seriesData = data[seriesIndex]['data'];
-      for (var dataIndex = 0; dataIndex < seriesData.length; dataIndex++) {
-        seriesData[dataIndex][0] =
-            reverseRevisionLookup[seriesData[dataIndex][0]];
-      }
-    }
-  };
-
-  /**
-   * Determines how labels on the x-axis are displayed.
-   * @param {number} val An x-value (which is assumed to be an index).
-   * @param {Object} revisionDetails An Object mapping rev numbers to detail
-   *     info.
-   * @param {Array.<string>} revisionLookup An Array of rev numbers.
-   * @return {string} The value to display at each tick on the axis.
-   */
-  var xAxisTickFormatter = function(val, revisionDetails, revisionLookup) {
-    // The value must be a nonnegative integer.
-    var xIndex = Math.max(0, Math.round(val));
-    var info = revisionDetails[revisionLookup[xIndex]];
-    if (info && info.length) {
-      // The first item in the list of revision details is the default.
-      return String(info[0].value);
-    }
-    // This point shouldn't be reached. If 'undefined' is displayed on the
-    // x-axis, then some data is missing from revisionDetails.
-    return 'undefined';
-  };
-
-  /**
-   * Determines how labels along the y-axis are displayed.
-   * @param {number} val A y-value.
-   * @return {string} The string to display at each tick on the axis.
-   */
-  var yAxisTickFormatter = function(val) {
-    // Truncate to at most 3 decimal points. Don't use toFixed() because
-    // we don't want to add precision if it's not there.
-    val = Math.round(val * 1000) / 1000;
-    // Add commas for thousands marker.
-    var parts = val.toString().split('.');
-    return parts[0].replace(/\B(?=(\d{3})+(?!\d))/g, ',') +
-         (parts[1] ? ('.' + parts[1]) : '');
-  };
-
-  /**
-   * Returns the chart options object for Flot.
-   *
-   * The x-axis tickFormatter function depends on having an Array which
-   * maps indexes to revision numbers, because it is assumed that all x-values
-   * for all the data series have been converted to integers starting from 0.
-   * (This is done by changeXValuesToIndexes.)
-   *
-   * For more information about the flot chart options object:
-   * https://github.com/flot/flot/blob/master/API.md#plot-options
-   *
-   * @param {Object} revisionDetails An Object mapping revision numbers to
-         detail info objects.
-   * @param {Array.<string>} revisionLookup An Array of to rev numbers.
-   * @return {!Object} A flot chart options object.
-   */
-  var getChartOptions = function(revisionDetails, revisionLookup) {
-    return {
-      'grid': {
-        'hoverable': true,
-        'borderWidth': 1,
-        'borderColor': 'rgba(0, 0, 0, 128)'
-      },
-      'crosshair': {
-        'mode': 'xy',
-        'color': 'rgba(34, 34, 34, 80)',
-        'lineWidth': 0.3
-      },
-      'xaxis': {
-        'tickFormatter': function(val) {
-          return xAxisTickFormatter(val, revisionDetails, revisionLookup);
-        }
-      },
-      'yaxis': {
-        'tickFormatter': yAxisTickFormatter
-      },
-      'colors': [
-        '#4D90FE',
-        '#FFE83B',
-        '#8E4EFE',
-        '#FFB83B',
-        '#194FA5',
-        '#A69413',
-        '#4C19A5',
-        '#A67113'
-      ]
-    };
-  };
-
-  /**
-   * A callback function called when hovering over a new position on the chart.
-   *
-   * Here, it displays information about the point that's being hovered over
-   * in a DOM element with id 'legend'. It assumes that elements with id 'chart'
-   * and 'legend' exist. For more details about hover events and callbacks in
-   * Flot, see: https://github.com/flot/flot/blob/master/API.md
-   *
-   * @param {Object} pos Position information.
-   * @param {Object} item Nearest item to the mouse.
-   * @param {Object} revisionDetails An Object mapping revision numbers to
-   *     detail info objects.
-   * @param {Array.<string>} revisionLookup An Array of rev numbers.
-   * @param {Array.<Object>} data Array of series objects (in Flot format).
-   * @param {Array.<Object>} seriesAnnotations Metadata about each series.
-   */
-  var plotHoverCallback = function(pos, item, revisionDetails, revisionLookup,
-                                   data, seriesAnnotations) {
-     var xIndex = Math.max(0, Math.round(pos['x1']));
-     var xValue = revisionLookup[xIndex];
-
-     var legendContents = document.createElement('dl');
-
-     var xUnits = getXUnits(seriesAnnotations);
-     if (xUnits) {
-       addNameValuePair(legendContents, xUnits, xValue);
-     }
-
-     if (!xUnits && revisionDetails[xValue]) {
-       for (var i = 0; i < revisionDetails[xValue].length; i++) {
-         var revisionTypeName = revisionDetails[xValue][i]['name'];
-         var revisionValue = revisionDetails[xValue][i]['value'];
-         addNameValuePair(legendContents, revisionTypeName, revisionValue);
-       }
-     }
-
-     for (var seriesIndex = 0; seriesIndex < data.length; seriesIndex++) {
-       var seriesData = data[seriesIndex]['data'];
-       for (var dataIndex = 0; dataIndex < seriesData.length; dataIndex++) {
-         if (seriesData[dataIndex][0] == xIndex) {
-           var yUnits = seriesAnnotations[seriesIndex]['units'];
-           var seriesName = seriesAnnotations[seriesIndex]['name'];
-           var yValue = seriesData[dataIndex][1];
-
-           // If the user is hovering over one of the series lines, mark it so
-           // that the user knows which line they're hovering over.
-           if (item && item['seriesIndex'] == seriesIndex) {
-             seriesName = '* ' + seriesName;
-           }
-
-           addNameValuePair(legendContents, seriesName, yValue + ' ' + yUnits);
-         }
-       }
-     }
-
-     // Populate the legend element and position it in the right corner.
-     var legend = document.getElementById('legend');
-     var chart = document.getElementById('chart');
-     if (legend.hasChildNodes()) {
-        legend.removeChild(legend.firstChild);
-     }
-     legend.appendChild(legendContents);
-     legend.style.left = ((chart.offsetWidth - legend.offsetWidth) - 20) + 'px';
-     legend.style.display = 'block';
-  };
-
-  /**
-   * Gets the units for the X-value for this chart if there is one.
-   * It is assumed that if there are multiple series plotted on one chart, they
-   * should all have the same X units. If there are no X units specified for any
-   * of the series, then this function will return null, indicating that the
-   * units are supposed to be revisions.
-   */
-  var getXUnits = function(seriesAnnotations) {
-    for (var i = 0; i < seriesAnnotations.length; i++) {
-      if (seriesAnnotations[i]['units_x']) {
-        return seriesAnnotations[i]['units_x'];
-      }
-    }
-    return null;
-  };
-
-  /**
-   * Adds a name-value pair to a container element.
-   */
-  var addNameValuePair = function(containerElement, name, value) {
-    var dt = document.createElement('dt');
-    dt.appendChild(document.createTextNode(name));
-    containerElement.appendChild(dt);
-    var dd = document.createElement('dd');
-    dd.appendChild(document.createTextNode(value));
-    containerElement.appendChild(dd);
-  };
-
-  return {
-    initialize: initialize,
-    getRevisionDetails: getRevisionDetails,
-    getRevisionName: getRevisionName,
-    getDisplayRevision: getDisplayRevision,
-    getRevisionLookup: getRevisionLookup,
-    makeReverseLookup: makeReverseLookup,
-    changeXValuesToIndexes: changeXValuesToIndexes,
-    xAxisTickFormatter: xAxisTickFormatter,
-    yAxisTickFormatter: yAxisTickFormatter,
-    getChartOptions: getChartOptions,
-    plotHoverCallback: plotHoverCallback,
-    getXUnits: getXUnits,
-    addNameValuePair: addNameValuePair
-  };
-
-})();
-
-document.addEventListener('DOMContentLoaded', embed.initialize);
-
-</script>
diff --git a/catapult/dashboard/dashboard/static/embed_test.html b/catapult/dashboard/dashboard/static/embed_test.html
deleted file mode 100644
index 4fcb0e1..0000000
--- a/catapult/dashboard/dashboard/static/embed_test.html
+++ /dev/null
@@ -1,210 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/core/test_utils.html">
-
-<link rel="import" href="/dashboard/static/embed.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-
-  var testOptions = {
-    setUp: function() {
-      // Reset the sample data for each test, in case they're
-      // modified by some test case.
-      window.GRAPH_DATA = {
-        'data': [
-          {'data': [[10000, 345], [12500, 345], [14000, 345]]},
-          {'data': [[10000, 101], [12500, 102], [14000, 103]]}
-        ],
-        'annotations': {
-          'series': [
-            {'name': 'foo', 'units': 'ms'},
-            {'name': 'bar', 'units': 'MB'}
-          ],
-          '0': {
-            '0': {'r_foo': 'A', 'a_default_rev': 'r_foo', 'r_fizz': 123},
-            '1': {'r_foo': 'B', 'a_default_rev': 'r_foo', 'r_fizz': 124},
-            '2': {'r_foo': 'C', 'a_default_rev': 'r_foo', 'r_fizz': 125}
-          },
-          '1': {
-            '0': {'r_foo': 'A', 'a_default_rev': 'r_foo', 'r_fizz': 123},
-            '1': {'r_foo': 'B', 'a_default_rev': 'r_foo', 'r_fizz': 124},
-            '2': {'r_foo': 'C', 'a_default_rev': 'r_foo', 'r_fizz': 125}
-          }
-        }
-      };
-
-      window.REVISION_INFO = {
-        'r_fizz': {
-           'name': 'Fizz Revision',
-           'url': 'http://example.com/fizz'
-        },
-        'r_foo': {
-           'name': 'Foo Revision',
-           'url': 'http://example.com/foo'
-        }
-      };
-    }
-  };
-
-  test('getRevisionDetails', function() {
-    var data = GRAPH_DATA.data;
-    var annotations = GRAPH_DATA.annotations;
-
-    // If the different series in the graph data have different annotations,
-    // but the same revision numbers, then only the annotations in the last
-    // series are used. Note, the default revision is first in each list.
-    assert.deepEqual(
-       {
-         10000: [
-           {'name': 'Foo Revision', 'value': 'A'},
-           {'name': 'Fizz Revision', 'value': 123}
-         ],
-         12500: [
-           {'name': 'Foo Revision', 'value': 'B'},
-           {'name': 'Fizz Revision', 'value': 124}
-         ],
-         14000: [
-           {'name': 'Foo Revision', 'value': 'C'},
-           {'name': 'Fizz Revision', 'value': 125}
-         ]
-       },
-       embed.getRevisionDetails(data, annotations, REVISION_INFO));
-
-    // If annotations are not provided, then default revision details are
-    // still returned.
-    assert.deepEqual(
-       {
-         10000: [{'name': 'X-Value', 'value': 10000}],
-         12500: [{'name': 'X-Value', 'value': 12500}],
-         14000: [{'name': 'X-Value', 'value': 14000}]
-       },
-       embed.getRevisionDetails(data, null, REVISION_INFO));
-  }, testOptions);
-
-  test('getDisplayRevision', function() {
-    // The display revision of a revision that is a sha1 hash is truncated.
-    assert.equal(
-        '01234ab',
-        embed.getDisplayRevision('01234abcde0123456789abcdefabcd0123456789'));
-    // The display revision of other values is whatever was passed in.
-    assert.equal('abcdefgh', embed.getDisplayRevision('abcdefgh'));
-    assert.equal(1234567890, embed.getDisplayRevision(1234567890));
-  }, testOptions);
-
-  test('getRevisionName', function() {
-    // If there's an entry in the revision info object, use the name there.
-    assert.equal('Fizz Revision', embed.getRevisionName(123, 'r_fizz',
-                                                        REVISION_INFO));
-    assert.equal('Foo Revision', embed.getRevisionName(12345, 'r_foo',
-                                                       REVISION_INFO));
-
-    // Otherwise, just use a generic name for the revision type.
-    assert.equal('X-Value', embed.getRevisionName(123, 'r_absent',
-                                                  REVISION_INFO));
-    assert.equal('X-Value', embed.getRevisionName(1234.5, 'r_absent',
-                                                  REVISION_INFO));
-    assert.equal('X-Value', embed.getRevisionName(123456, 'r_absent', null));
-  }, testOptions);
-
-  test('getRevisionLookup', function() {
-    var data = GRAPH_DATA.data;
-    var annotations = GRAPH_DATA.annotations;
-    var revisionDetails = embed.getRevisionDetails(data, annotations,
-                                                   REVISION_INFO);
-
-    // The revision lookup array contains the x-values in order.
-    assert.deepEqual(
-        ['10000', '12500', '14000'],
-        embed.getRevisionLookup(revisionDetails));
-    assert.deepEqual([], embed.getRevisionLookup({}));
-    assert.deepEqual(['100', '200'],
-                     embed.getRevisionLookup({200: [], 100: []}));
-  }, testOptions);
-
-  test('makeReverseLookup', function() {
-    // The makeReverseLookup function makes an object mapping items to indexes.
-    assert.deepEqual({'x': 0, 'y': 1}, embed.makeReverseLookup(['x', 'y']));
-    assert.deepEqual({'x': 1, 'z': 2},
-                     embed.makeReverseLookup(['x', 'x', 'z']));
-    assert.deepEqual({}, embed.makeReverseLookup([]));
-  }, testOptions);
-
-  test('changeXValuesToIndexes', function() {
-    var data = GRAPH_DATA.data;
-    var revisionLookup = ['10000', '12500', '14000'];
-
-    // The data series start out like this:
-    assert.deepEqual(
-       [
-         {'data': [[10000, 345], [12500, 345], [14000, 345]]},
-         {'data': [[10000, 101], [12500, 102], [14000, 103]]}
-       ],
-       GRAPH_DATA.data);
-    // After calling this function, the x values in all the data series have
-    // been changed to indexes.
-    embed.changeXValuesToIndexes(data, revisionLookup);
-    assert.deepEqual(
-       [
-         {'data': [[0, 345], [1, 345], [2, 345]]},
-         {'data': [[0, 101], [1, 102], [2, 103]]}
-       ],
-       GRAPH_DATA.data);
-  }, testOptions);
-
-  test('xAxisTickFormatter', function() {
-    var revisionDetails = {
-      100: [
-        {'name': 'X Revision', 'value': 111},
-        {'name': 'Y Revision', 'value': 'A'}
-      ],
-      200: [
-        {'name': 'Y Revision', 'value': 'B'},
-        {'name': 'Z Revision', 'value': '34.43'}
-      ]
-    };
-    var revisionLookup = ['100', '200'];
-
-    // The tick formatter gives the 'value' for the first revision
-    // details entry. Note that the number given to the tick formatter
-    // is an index in the revision lookup Array.
-    assert.equal(
-        '111', embed.xAxisTickFormatter(0, revisionDetails, revisionLookup));
-    assert.equal(
-        'B', embed.xAxisTickFormatter(1, revisionDetails, revisionLookup));
-
-    // The tick formatter still gives values even when the given index isn't
-    // an integer.
-    assert.equal(
-        '111', embed.xAxisTickFormatter(-1, revisionDetails, revisionLookup));
-    assert.equal(
-        '111', embed.xAxisTickFormatter(0.2, revisionDetails, revisionLookup));
-    assert.equal(
-        'B', embed.xAxisTickFormatter(0.9, revisionDetails, revisionLookup));
-    assert.equal(
-        'B', embed.xAxisTickFormatter(1.4, revisionDetails, revisionLookup));
-
-    // If the index isn't found, another string is returned to indicate this.
-    assert.equal(
-        'undefined',
-        embed.xAxisTickFormatter(2, revisionDetails, revisionLookup));
-  }, testOptions);
-
-  test('yAxisTickFormatter', function() {
-    // Numbers are formatted to have no more than 3 decimal points,
-    // and to have commas for the thousands (millions, billions) marker.
-    assert.equal('3.142', embed.yAxisTickFormatter(3.14159));
-    assert.equal('1,111.1', embed.yAxisTickFormatter(1111.1));
-    assert.equal('1,111.1', embed.yAxisTickFormatter(1111.1));
-    assert.equal('10,000', embed.yAxisTickFormatter(10000.00));
-    assert.equal('1,000,000', embed.yAxisTickFormatter(1000000));
-  }, testOptions);
-
-});
-</script>
diff --git a/catapult/dashboard/dashboard/static/group_report.html b/catapult/dashboard/dashboard/static/group_report.html
index dea5e88..9ba00e8 100644
--- a/catapult/dashboard/dashboard/static/group_report.html
+++ b/catapult/dashboard/dashboard/static/group_report.html
@@ -1,263 +1,56 @@
 <!DOCTYPE html>
 <!--
-Copyright 2015 The Chromium Authors. All rights reserved.
+Copyright 2016 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<html>
+<head>
+  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <title>Chrome Performance Dashboard</title>
 
-<link rel="import" href="/dashboard/static/testselection.html">
+  <link rel="import" href="/components/polymer/polymer.html">
 
-<script>
-'use strict';
+  <link rel="import" href="/dashboard/elements/custom-tooltip.html">
+  <link rel="import" href="/dashboard/elements/group-report-page.html">
+  <link rel="import" href="/dashboard/elements/load-analytics.html">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
+  <link rel="import" href="/dashboard/elements/overlay-message.html">
 
-/**
- * Main JavaScript file for group reporting page.
- */
-var group_report = (function() {
-
-  // Dictionary of alert key to 'chart-element' element.
-  var graphElements_ = {};
-
-  // 'alerts-table' element.
-  var table_ = null;
-
-  /**
-   * Unchecks the checkbox in the alerts-table when a graph is closed.
-   * This is the handler for the 'closeGraph' event, fired by chart-container.
-   * @param {Event} event The event object.
-   */
-  var onGraphClose_ = function(event) {
-    // Un-check the alert in the table.
-    var key = event.target['alertKey'];
-    var alertList = table_['alertList'];
-    for (var i = 0; i < alertList.length; i++) {
-      if (alertList[i].key == key) {
-        alertList[i].selected = false;
-        break;
-      }
-    }
-
-    // Make the table update its list of checked alerts.
-    // This is necessary so that the triage dialog will get a correct list of
-    // alerts that should be affected by a triage action.
-    table_['onCheckboxChange']();
-
-    // Remove the graph from the set of currently-displayed graph elements.
-    delete graphElements_[key];
-  };
-
-  /**
-   * Selects the series to graph for an alert. The series is specified as a
-   * two-item Array; with a test path and a list of selected series under that
-   * that test path. This will be used in the JSON request parameter when
-   * requesting data to graph.
-   * @param {Object} alert The alert object.
-   * @return {Array} A two-item Array: test path and Array of selected series.
-   */
-  var getTestPathAndSelectedSeries = function(alert) {
-    var testPath = getTestPath(alert);
-    var subtestsEntry = getSubtestsEntry(testPath);
-    var traceName = testPath.split('/').pop();
-
-    // If the "subtests" property of |subtestsEntry| is an empty object, that
-    // implies that this test has no subtests. In this case, show a chart for
-    // the parent test, with this particular child selected.
-    if (subtestsEntry && subtestsEntry['sub_tests'] &&
-        Object.keys(subtestsEntry['sub_tests']).length == 0) {
-      testPath = testPath.split('/').slice(0, -1).join('/');
-      subtestsEntry = getSubtestsEntry(testPath);
-    }
-
-    // Get a list of selected traces. This should include the series that the
-    // alert was on, as well as any related reference build result series.
-    var selectedTraces = [traceName];
-    if (subtestsEntry && subtestsEntry['sub_tests']) {
-      if ('ref' in subtestsEntry['sub_tests']) {
-        selectedTraces.push('ref');
-      }
-      if (traceName + '_ref' in subtestsEntry['sub_tests']) {
-        selectedTraces.push(traceName + '_ref');
-      }
-    }
-
-    // Otherwise, the test is either not found in the SUBTESTS dict, or it is
-    // a test with children (e.g. a summary metric). In either of these cases,
-    // we want to return the test path and trace found on the alert.
-    return [testPath, selectedTraces];
-  };
-
-  var getTestPath = function(alert) {
-    return [
-      alert['master'],
-      alert['bot'],
-      alert['testsuite'],
-      alert['test']
-    ].join('/');
-  };
-
-  /**
-   * Gets the object in the global SUBTESTS that corresponds to the test
-   * that an alert is on, or null if nothing was found.
-   */
-  var getSubtestsEntry = function(testPath) {
-    var testPathParts = testPath.split('/');
-    var botName = testPathParts[0] + '/' + testPathParts[1];
-    var subtestParts = testPathParts.splice(3);
-    var subtestDict = window['SUBTESTS'][botName][testPathParts[2]];
-    if (!subtestDict) {
-      return null;
-    }
-    for (var level = 0; level < subtestParts.length - 1; level++) {
-      var name = subtestParts[level];
-      if (!(name in subtestDict)) {
-        return null;
-      }
-      subtestDict = subtestDict[name]['sub_tests'];
-    }
-    return subtestDict[subtestParts[subtestParts.length - 1]];
-  };
-
-  /**
-   * Toggles whether improvement alerts are displayed.
-   * Regardless of whether this is toggled on or off, any improvement
-   * alerts specified in the keys parameter are always displayed.
-   * @param {Event} event The event for the button click.
-   */
-  var onToggleImprovements_ = function(event) {
-    var improvementsToggle = document.getElementById('improvements-toggle');
-    if (improvementsToggle.hasAttribute('active')) {
-      table_['alertList'].forEach(function(alert) {
-        if (alert['improvement']) {
-          alert['hideRow'] = false;
-        }
-      });
-    } else {
-      table_['alertList'].forEach(function(alert) {
-        if (alert['improvement'] && !alert['selected']) {
-          alert['hideRow'] = true;
-          alert['selected'] = false;
-        }
-      });
-      // Make the table update its list of checked alerts.
-      table_['onCheckboxChange']();
-    }
-  };
-
-  /**
-   * Updates alerts table when user nudges an anomaly on the graph.
-   * @param {Event} event The event of nudge.
-   */
-  var alertChangedRevisions_ = function(event) {
-    var alertList = table_['alertList'];
-    var nudgedAlert = event.detail['alerts'][0];
-    for (var i = 0; i < alertList.length; i++) {
-      if (alertList[i]['key'] == nudgedAlert['key']) {
-        alertList[i]['start_revision'] = event.detail['startRev'];
-        alertList[i]['end_revision'] = event.detail['endRev'];
-        // Make the table update its list of checked alerts.
-        table_['onCheckboxChange']();
-        return;
-      }
-    }
-  };
-
-  /**
-   * Add graphs for tests in 'alerts' to the given element.
-   * @param {Element} containerElement The element that contains all the chart
-   *     UI elements.
-   * @param {Array.<Object>} alerts List of alert objects.
-   * @param {boolean} insertBefore True for adding graphs in prepending order.
-   */
-  var addGraph_ = function(containerElement, alerts, insertBefore) {
-    if (!alerts) {
-      return;
-    }
-
-    for (var i = 0; i < alerts.length; i++) {
-      var alert = alerts[i];
-      var chart = document.createElement('chart-container');
-      graphElements_[alert['key']] = chart;
-      chart['revisionInfo'] = window['REVISION_INFO'];
-      chart['xsrfToken'] = window['XSRF_TOKEN'];
-      if (insertBefore != 'undefined') {
-        containerElement.insertBefore(chart, containerElement.firstChild);
-      } else {
-        containerElement.appendChild(chart);
-      }
-
-      // Set graph params.
-      var graphParams = {
-        'rev': alert['end_revision']
-      };
-      chart['graphParams'] = graphParams;
-      chart['alertKey'] = alert['key'];
-      chart['addSeriesGroup']([getTestPathAndSelectedSeries(alert)]);
-      chart.addEventListener('chartclosed', onGraphClose_, false);
-      chart.addEventListener('alertChangedRevisions',
-                             alertChangedRevisions_, true);
-    }
-  };
-
-  /**
-   * On alert checkbox change, remove or add graphs.
-   * @param {Event} e The event object.
-   */
-  var onAlertSelectionChange_ = function(e) {
-    // Make a set of all alerts that are checked in the table.
-    var alerts = {};
-    table_['checkedAlerts'].forEach(function(a) {
-      alerts[a.key] = a;
-    });
-    // Add graphs that are checked in the table but not added yet.
-    for (var key in alerts) {
-      if (!(key in graphElements_)) {
-        addGraph_(document.getElementById('charts-container'),
-                  [alerts[key]], true);
-      }
-    }
-    // Remove graphs that are no longer checked in the table.
-    for (var key in graphElements_) {
-      if (!(key in alerts) && key in graphElements_) {
-        var chartContainer = document.getElementById('charts-container');
-        if (graphElements_[key].parentNode == chartContainer) {
-          chartContainer.removeChild(graphElements_[key]);
-          delete graphElements_[key];
+  <script>
+    'use strict';
+    // Workaround for document.contains returning false for elements in the
+    // shadow DOM. jQuery mouse events need it to return true for scrolling
+    // to be properly accounted for. For background, see
+    // https://github.com/Polymer/polymer/issues/162 and
+    // https://www.w3.org/Bugs/Public/show_bug.cgi?id=22141
+    jQuery.contains = function(doc, elem) {
+      var charts = document.getElementsByTagName(
+          'group-report-page')[0].getCharts();
+      for (var i = 0; i < charts.length; i++) {
+        if (charts[i].shadowRoot.contains(elem)) {
+          return true;
         }
       }
+      return doc.contains(elem);
+    };
+  </script>
+
+  <style>
+    body {
+      align-items: center;
+      -webkit-align-items: center;
     }
-  };
-
-  /**
-   * Sets up event listeners.
-   */
-  var initialize = function() {
-    table_ = document.getElementById('alerts-table');
-    table_['alertList'] = window['ALERT_LIST'];
-    table_['extraColumns'] = [
-      {'key': 'percent_changed', 'label': 'Delta %'}
-    ];
-    table_['initialize']();
-    table_.addEventListener('changeselection', onAlertSelectionChange_, false);
-
-    var improvementsToggle = document.getElementById('improvements-toggle');
-    improvementsToggle.addEventListener('click', onToggleImprovements_);
-
-    var bugId = uri.getParameter('bug_id');
-    if (bugId) {
-      var bugInfo = document.getElementById('bug-info');
-      bugInfo['initialize'](bugId, table_, window['OWNER_INFO']);
-    }
-
-    // Load graphs for checked alerts.
-    addGraph_(document.getElementById('charts-container'),
-              table_['checkedAlerts'], false);
-  };
-
-  return {
-    initialize: initialize,
-    getTestPathAndSelectedSeries: getTestPathAndSelectedSeries
-  };
-})();
-
-document.addEventListener('polymer-ready', group_report.initialize);
-</script>
+  </style>
+</head>
+<body>
+  <nav-bar></nav-bar>
+  <h1>Chrome Performance Dashboard</h1>
+  <overlay-message id="message-bar"></overlay-message>
+  <!-- This custom-tooltip is used by chart-legend.
+       See https://github.com/catapult-project/catapult/issues/2172. -->
+  <custom-tooltip id="legend-details-tooltip"></custom-tooltip>
+  <group-report-page></group-report-page>
+  <load-analytics></load-analytics>
+</body>
+</html>
diff --git a/catapult/dashboard/dashboard/static/group_report_test.html b/catapult/dashboard/dashboard/static/group_report_test.html
deleted file mode 100644
index ce5b2e1..0000000
--- a/catapult/dashboard/dashboard/static/group_report_test.html
+++ /dev/null
@@ -1,97 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-
-<link rel="import" href="group_report.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-
-  var testOptions = {
-    setUp: function() {
-      window.SUBTESTS = {
-        'ChromiumPerf/chromium-rel-mac': {
-          'page_cycler.morejs': {
-            'cold_times': {
-              'has_rows': false,
-              'sub_tests': {
-                'test.blogspot.com': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                },
-                'www.yahoo.com': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                },
-                'page_load_time': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                },
-                'page_load_time_ref': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                }
-              }
-            },
-            'idle_wakeups_gpu': {
-              'has_rows': true,
-              'sub_tests': {
-                'test.blogspot.com': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                },
-                'www.yahoo.com': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                },
-                'ref': {
-                  'has_rows': true,
-                  'sub_tests': {}
-                }
-              }
-            }
-          }
-        }
-      };
-    }
-  };
-
-  test('getTestPathAndSelectedSeries for alert on summary series', function() {
-    var alert = {
-      'master': 'ChromiumPerf',
-      'bot': 'chromium-rel-mac',
-      'testsuite': 'page_cycler.morejs',
-      'test': 'idle_wakeups_gpu'
-    };
-    var result = group_report.getTestPathAndSelectedSeries(alert);
-    assert.equal(result.length, 2);
-    assert.equal(
-        result[0],
-        'ChromiumPerf/chromium-rel-mac/page_cycler.morejs/idle_wakeups_gpu');
-    assert.deepEqual(result[1], ['idle_wakeups_gpu', 'ref']);
-  }, testOptions);
-
-  test('getTestPathAndSelectedSeries for alert on leaf series', function() {
-    var alert = {
-      'master': 'ChromiumPerf',
-      'bot': 'chromium-rel-mac',
-      'testsuite': 'page_cycler.morejs',
-      'test': 'cold_times/page_load_time'
-    };
-    var result = group_report.getTestPathAndSelectedSeries(alert);
-    assert.equal(result.length, 2);
-    assert.equal(
-        result[0],
-        'ChromiumPerf/chromium-rel-mac/page_cycler.morejs/cold_times');
-    assert.deepEqual(result[1], ['page_load_time', 'page_load_time_ref']);
-  }, testOptions);
-
-});
-</script>
diff --git a/catapult/dashboard/dashboard/static/report.html b/catapult/dashboard/dashboard/static/report.html
new file mode 100644
index 0000000..7e501cb
--- /dev/null
+++ b/catapult/dashboard/dashboard/static/report.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<html>
+<head>
+  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+
+  <script src="/jquery/jquery-2.1.4.min.js"></script>
+  <script src="/flot/jquery.flot.min.js"></script>
+  <script src="/flot/jquery.flot.crosshair.min.js"></script>
+  <script src="/flot/jquery.flot.fillbetween.min.js"></script>
+  <script src="/flot/jquery.flot.selection.min.js"></script>
+
+  <link rel="import" href="/components/polymer/polymer.html">
+
+  <link rel="import" href="/dashboard/elements/custom-tooltip.html">
+  <link rel="import" href="/dashboard/elements/load-analytics.html">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
+  <link rel="import" href="/dashboard/elements/overlay-message.html">
+  <link rel="import" href="/dashboard/elements/report-page.html">
+
+  <title>Chrome Performance Dashboard</title>
+
+  <style>
+    #message-bar {
+      top: 26px;
+    }
+  </style>
+
+  <script>
+    'use strict';
+    // Workaround for document.contains returning false for elements in the
+    // shadow DOM. jQuery mouse events need it to return true for scrolling
+    // to be properly accounted for. For background, see
+    // https://github.com/Polymer/polymer/issues/162 and
+    // https://www.w3.org/Bugs/Public/show_bug.cgi?id=22141
+    jQuery.contains = function(doc, elem) {
+      var charts = document.getElementsByTagName('report-page')[0].charts;
+      for (var i = 0; i < charts.length; i++) {
+        if (charts[i].shadowRoot.contains(elem)) {
+          return true;
+        }
+      }
+      return doc.contains(elem);
+    };
+  </script>
+</head>
+<body>
+  <nav-bar></nav-bar>
+  <h1>Chrome Performance Dashboard</h1>
+  <overlay-message id="message-bar"></overlay-message>
+  <!-- This custom-tooltip is used by chart-legend.
+       See https://github.com/catapult-project/catapult/issues/2172. -->
+  <custom-tooltip id="legend-details-tooltip"></custom-tooltip>
+  <report-page></report-page>
+  <load-analytics></load-analytics>
+</body>
+</html>
diff --git a/catapult/dashboard/dashboard/static/testing_common.html b/catapult/dashboard/dashboard/static/testing_common.html
index df4284a..f0c168f 100644
--- a/catapult/dashboard/dashboard/static/testing_common.html
+++ b/catapult/dashboard/dashboard/static/testing_common.html
@@ -10,6 +10,7 @@
 
 var testing_common = (function() {
 
+  var originalXMLHttpRequest = window.XMLHttpRequest;
   var fixture = null;
   var xhrMock = null;
 
@@ -31,10 +32,13 @@
       this.setRequestHeader = function() {};
       this.onload = function() {};
       this.send = function(param) {
-        var requestStr = this.url + '?' + param;
+        var requestStr = this.url + '?' + sortQueryPart(param);
         if (requestStr in self.responseMap_) {
           this.responseText = self.responseMap_[requestStr];
           this.onload();
+        } else if ('*' in self.responseMap_) {
+          this.responseText = self.responseMap_['*'];
+          this.onload();
         } else {
           console.warn('XMLHttpRequest sent without a handler: ' + requestStr);
         }
@@ -43,6 +47,9 @@
   };
 
   XMLHttpRequestMock.prototype.add = function(param, response) {
+    if (param != '*') {
+      param = sortQueryPart(param);
+    }
     this.responseMap_[param] = response;
   };
 
@@ -54,8 +61,7 @@
   };
 
   function clearXhrMock() {
-    // Reverts back to native XMLHttpRequest.
-    delete window.XMLHttpRequest;
+    window.XMLHttpRequest = originalXMLHttpRequest;
     xhrMock = null;
   };
 
@@ -82,12 +88,37 @@
     }
   };
 
+  var paramString = function(params) {
+    var keys = Object.keys(params).sort();
+    return keys.map(function(key) {
+      return key + '=' + encodeURIComponent(params[key]);
+    }).join('&');
+  };
+
+  var sortQueryPart = function(paramStr) {
+    var params = {};
+    var path = paramStr.substring(0, paramStr.indexOf('?') + 1);
+    var query = paramStr.substring(paramStr.indexOf('?') + 1);
+    var queryParts = query.split('&');
+    for (var i = 0; i < queryParts.length; i++) {
+      var pair = queryParts[i].split('=');
+      params[pair[0]] = pair.length == 1 ? '' : pair[1];
+    }
+    var keys = Object.keys(params).sort();
+    query = keys.map(function(key) {
+      return key + '=' + params[key];
+    }).join('&');
+    return path + query;
+  };
+
   return {
     addXhrMock: addXhrMock,
     clearXhrMock: clearXhrMock,
     deepCopy: deepCopy,
     addToFixture: addToFixture,
-    clearFixture: clearFixture
+    clearFixture: clearFixture,
+    paramString: paramString,
+    sortQueryPart: sortQueryPart
   };
 })();
 
diff --git a/catapult/dashboard/dashboard/static/testing_common_test.html b/catapult/dashboard/dashboard/static/testing_common_test.html
new file mode 100644
index 0000000..a108a55
--- /dev/null
+++ b/catapult/dashboard/dashboard/static/testing_common_test.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<link rel="import" href="testing_common.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  test('sortQueryPart sorts query string', function() {
+    var query = 'c=1&b=2&a=3';
+    assert.equal('a=3&b=2&c=1',
+                 testing_common.sortQueryPart(query));
+  });
+
+  test('sortQueryPart sorts query string with path', function() {
+    var query = 'path?c=1&b=2&a=3';
+    assert.equal('path?a=3&b=2&c=1',
+                 testing_common.sortQueryPart(query));
+  });
+
+  test('sortQueryPart does not support repeated keys', function() {
+    var query = 'path?a=1&a=3&a=2';
+    assert.equal('path?a=2',
+                 testing_common.sortQueryPart(query));
+  });
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/static/testselection.html b/catapult/dashboard/dashboard/static/testselection.html
index 199f6e1..fb8c7e3 100644
--- a/catapult/dashboard/dashboard/static/testselection.html
+++ b/catapult/dashboard/dashboard/static/testselection.html
@@ -19,8 +19,8 @@
    * @return {boolean} True if the test is "important". Note that this is
    *     different from "important" flag for data that's sent to /add_point.
    */
-  var isImportant = function(test) {
-    return isChartLevel_(test) || isMonitored_(test);
+  var isImportant = function(test, testSuites) {
+    return isChartLevel_(test) || isMonitored_(test, testSuites);
   };
 
   /**
@@ -43,12 +43,12 @@
    * @param {string} test Test path, not including master/bot.
    * @return {boolean} True if the test name only has two parts.
    */
-  var isMonitored_ = function(test) {
+  var isMonitored_ = function(test, testSuites) {
     var parts = test.split('/');
     var testSuiteName = parts[0];
     var subTestPath = parts.slice(1).join('/');
-    if (window['TEST_SUITES'] && window['TEST_SUITES'][testSuiteName]) {
-      var important = window['TEST_SUITES'][testSuiteName]['mon'];
+    if (testSuites && testSuites[testSuiteName]) {
+      var important = testSuites[testSuiteName]['mon'];
       if (important && important.indexOf(subTestPath) != -1) {
         return true;
       }
@@ -62,7 +62,9 @@
    * as well as (optionally) the revision range to plot.
    * @constructor
    */
-  var TestSelection = function() {
+  var TestSelection = function(testSuites) {
+
+    this.testSuites = testSuites;
 
     /**
      * A mapping of test paths to lists of selected series.
@@ -149,7 +151,7 @@
     var core = {};
     for (var testPath in this.testPathDict_) {
       var test = testFromTestPath_(testPath);
-      if (isImportant(test)) {
+      if (isImportant(test, this.testSuites)) {
         core[test] = true;
         var ref = this.getCorrespondingRef_(testPath);
         if (ref) {
diff --git a/catapult/dashboard/dashboard/static/uri.html b/catapult/dashboard/dashboard/static/uri.html
index eab252e..5fb0ce9 100644
--- a/catapult/dashboard/dashboard/static/uri.html
+++ b/catapult/dashboard/dashboard/static/uri.html
@@ -45,8 +45,8 @@
    * the event detail object.
    */
   Controller.prototype.load = function() {
-    var params = getAllParameters();
-    var sid = getParameter('sid');
+    var params = uri.getAllParameters();
+    var sid = uri.getParameter('sid');
     if (!params || !sid) {
       return;
     }
@@ -54,7 +54,7 @@
     var request = new XMLHttpRequest();
     request.onload = function() {
       var pageState = JSON.parse(request.responseText);
-      var params = getAllParameters();
+      var params = uri.getAllParameters();
       var uriLoadEvent = new CustomEvent('uriload', {
         'detail': {
           'params': params,
@@ -110,7 +110,7 @@
    * @param {Object} state Dictionary containing state data.
    */
   Controller.prototype.updateUri_ = function(state) {
-    var params = getAllParameters();
+    var params = uri.getAllParameters();
     if (!state || !state['params']) {
       params = {};
     } else {
@@ -124,7 +124,7 @@
       }
     }
     var uri_str = getCurrentPathWithParams(params);
-    if (uri_str == window.location.pathname + window.location.search) {
+    if (uri_str == window.location.pathname + uri.getQueryString()) {
       return;
     }
     window.history.pushState(state, '', uri_str);
@@ -204,7 +204,7 @@
   };
 
   /**
-   * Gets the named parameter from the URI querystring.
+   * Gets the named parameter from the URI query string.
    * @param {string} name The name of the parameter to get.
    * @param {?string=} opt_default The default to return.
    * @return {?string} The value of the parameter.
@@ -212,7 +212,7 @@
   var getParameter = function(name, opt_default) {
     name = name.replace(/[\[]/, '\\\[').replace(/[\]]/, '\\\]');
     var regex = new RegExp('[\\?&]' + name + '=([^&#]*)');
-    var results = regex.exec(window.location.search);
+    var results = regex.exec(uri.getQueryString());
     if (results != null) {
       return decodeURIComponent(results[1].replace(/\+/g, ' '));
     }
@@ -226,7 +226,7 @@
    */
   var getAllParameters = function() {
     var params = {};
-    var queryString = window.location.search.replace(/^\?/, '');
+    var queryString = uri.getQueryString().replace(/^\?/, '');
     if (!queryString) {
       return {};
     }
@@ -236,12 +236,17 @@
       if (pair.length == 1) {
         params[pair[0]] = '';
       } else {
-        params[pair[0]] = decodeURIComponent(pair[1]);
+        params[pair[0]] = decodeURIComponent(pair[1].replace(/\+/g, ' '));
       }
     }
     return params;
   };
 
+  /** Gets the query string, including "?", in a mockable way. */
+  var getQueryString = function() {
+    return window.location.search;
+  };
+
   /**
    * Returns a string which is the current path of the URI with the query
    * parameters from the given Object. This is suitable for usage with
@@ -260,7 +265,8 @@
     Controller: Controller,
     getParameter: getParameter,
     getAllParameters: getAllParameters,
-    getCurrentPathWithParams: getCurrentPathWithParams
+    getCurrentPathWithParams: getCurrentPathWithParams,
+    getQueryString: getQueryString
   };
 })();
 
diff --git a/catapult/dashboard/dashboard/static/uri_test.html b/catapult/dashboard/dashboard/static/uri_test.html
new file mode 100644
index 0000000..836bdef
--- /dev/null
+++ b/catapult/dashboard/dashboard/static/uri_test.html
@@ -0,0 +1,69 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<link rel="import" href="/dashboard/static/uri.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var testOptions = {
+    setUp: function() {
+      window.originalGetQueryString_ = uri.getQueryString;
+    },
+
+    tearDown: function() {
+      uri.getQueryString = window.originalGetQueryString_;
+    }
+  };
+
+  test('getCurrentPathWithParams encodes value', function() {
+    assert.equal(
+        window.location.pathname + '?key=some%20value',
+        uri.getCurrentPathWithParams({'key': 'some value'}));
+  }, testOptions);
+
+  test('getParameter at start', function() {
+    uri.getQueryString = function() {
+      return '?foo=bar&x=y';
+    };
+    assert.equal('bar', uri.getParameter('foo'));
+  }, testOptions);
+
+  test('getParameter not at start', function() {
+    uri.getQueryString = function() {
+      return '?foo=bar&x=y';
+    };
+    assert.equal('y', uri.getParameter('x'));
+  }, testOptions);
+
+  test('getParameter decodes hex sequences and plus signs', function() {
+    uri.getQueryString = function() {
+      return '?foo=bar%2C+baz';
+    };
+    assert.equal('bar, baz', uri.getParameter('foo'));
+  }, testOptions);
+
+  test('getAllParameters basic', function() {
+    uri.getQueryString = function() {
+      return '?a=1&b=2';
+    };
+    assert.deepEqual({'a': '1', 'b': '2'}, uri.getAllParameters());
+  }, testOptions);
+
+  test('getAllParameters decodes hex sequences and plus signs', function() {
+    uri.getQueryString = function() {
+      return '?a=%3A%3A&b=x+y';
+    };
+    assert.deepEqual({'a': '::', 'b': 'x y'}, uri.getAllParameters());
+  }, testOptions);
+
+});
+</script>
diff --git a/catapult/dashboard/dashboard/stats_test.py b/catapult/dashboard/dashboard/stats_test.py
index ba28998..392ec06 100644
--- a/catapult/dashboard/dashboard/stats_test.py
+++ b/catapult/dashboard/dashboard/stats_test.py
@@ -53,7 +53,8 @@
         ('/stats_around_revision', stats.StatsAroundRevisionHandler),
         ('/stats_for_alerts', stats.StatsForAlertsHandler)])
     self.testapp = webtest.TestApp(app)
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def _AddMockData(self):
     """Adds data which will be used in the around-revision stats tests below."""
@@ -85,9 +86,9 @@
   def _AddMockAlertSummaryData(self):
     """Adds data to be used in the alert-summary stats tests below."""
     correct_sheriff = sheriff.Sheriff(
-        id='Chromium Perf Sheriff', email='a@google.com', patterns=[]).put()
+        id='Chromium Perf Sheriff', patterns=[]).put()
     wrong_sheriff = sheriff.Sheriff(
-        id='Some other sheriff', email='b@google.com', patterns=[]).put()
+        id='Some other sheriff', patterns=[]).put()
 
     linux_sunspider = 'ChromiumPerf/linux-release/sunspider/Total'
     linux_octane = 'ChromiumPerf/linux-release/octane/Total'
@@ -176,7 +177,7 @@
 
   def testPost_NonInternalUser_ShowsErrorMessage(self):
     """Tests that the stats page is only shown when logged in."""
-    self.SetCurrentUser('foo@yahoo.com')
+    self.SetCurrentUser('foo@chromium.org')
     response = self.testapp.get('/stats')
     self.assertIn('Only logged-in internal users', response.body)
 
@@ -191,7 +192,7 @@
     self._AddMockAlertSummaryData()
 
     # The user must be an internal user.
-    self.SetCurrentUser('sullivan@google.com')
+    self.SetCurrentUser('internal@chromium.org')
 
     # Make the initial request to generate statistics.
     response = self.testapp.post(
@@ -263,7 +264,7 @@
   def testPost_AroundRevision(self):
     """Tests generation of around_revision statistics."""
     self._AddMockData()
-    self.SetCurrentUser('sullivan@google.com')
+    self.SetCurrentUser('internal@chromium.org')
 
     response = self.testapp.post('/stats', [
         ('type', 'around_revision'),
@@ -363,7 +364,7 @@
   def testPost_AroundRevisionWithOneBot(self):
     """Tests generation of around_revision stats for only one bot."""
     self._AddMockData()
-    self.SetCurrentUser('sullivan@google.com')
+    self.SetCurrentUser('internal@chromium.org')
 
     # Post a request to get around_revision stats.
     self.testapp.post('/stats', {
diff --git a/catapult/dashboard/dashboard/stored_object.py b/catapult/dashboard/dashboard/stored_object.py
index ef928d1..d0f8b96 100644
--- a/catapult/dashboard/dashboard/stored_object.py
+++ b/catapult/dashboard/dashboard/stored_object.py
@@ -113,7 +113,7 @@
     num_parts = len(serialized_parts)
     for i in xrange(num_parts):
       if serialized_parts[i] is not None:
-        part = PartEntity(id=i+1, parent=self.key, value=serialized_parts[i])
+        part = PartEntity(id=i + 1, parent=self.key, value=serialized_parts[i])
         part_list.append(part)
     self.size = num_parts
     ndb.put_multi(part_list + [self])
@@ -216,7 +216,7 @@
   length = len(serialized)
   values = []
   for i in xrange(0, length, _CHUNK_SIZE):
-    values.append(serialized[i:i+_CHUNK_SIZE])
+    values.append(serialized[i:i + _CHUNK_SIZE])
   for i in xrange(len(values), _MAX_NUM_PARTS):
     values.append(None)
   return values
diff --git a/catapult/dashboard/dashboard/task_runner.py b/catapult/dashboard/dashboard/task_runner.py
new file mode 100644
index 0000000..f924abb
--- /dev/null
+++ b/catapult/dashboard/dashboard/task_runner.py
@@ -0,0 +1,106 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A tool to run long running tasks.
+
+This allows a task to run in Task Queue which gives about 10 minutes execution
+time.
+
+Usage:
+
+In https://chromeperf.appspot.com/_ah/stats/shell, pass a function to
+task_runner.Run.  Task function should be picklable and must include any
+required imports within the function's body.
+
+Example:
+
+  from dashboard import task_runner
+
+  def unique_test_suite_names():
+    from dashboard.models import graph_data
+    query = graph_data.Test.query(graph_data.Test.parent_test == None)
+    test_keys = query.fetch(limit=50000, keys_only=True)
+    return sorted(set(k.string_id() for k in test_keys))
+
+  task_runner.Run(unique_test_suite_names)
+
+The task function return value and stdouts will be displayed at:
+    https://chromeperf.appspot.com/get_logs?namespace=task_runner&name=report
+
+WARNING:
+Running code in Appstats does affect live dashboard.  So watchout for any
+datastore writes that may corrupt or unintentionally delete data.
+"""
+
+import datetime
+import marshal
+import cStringIO
+import sys
+import time
+import types
+
+from google.appengine.ext import deferred
+
+from dashboard import quick_logger
+
+_TASK_QUEUE_NAME = 'task-runner-queue'
+
+_REPORT_TEMPLATE = """%(function_name)s: %(start_time)s
+ Stdout:
+ %(stdout)s
+
+ Elapsed: %(elapsed_time)f seconds.
+ Returned results:
+ %(returned_results)s
+"""
+
+
+def Run(task_function):
+  """Runs task in task queue."""
+  # Since defer uses pickle and pickle can't serialize non-global function,
+  # we'll use marshal to serialize and deserialize the function code object
+  # before and after defer.
+  code_string = marshal.dumps(task_function.func_code)
+  deferred.defer(_TaskWrapper, code_string, task_function.__name__,
+                 _queue=_TASK_QUEUE_NAME)
+
+
+def _TaskWrapper(code_string, function_name):
+  """Runs the task and captures the stdout and the returned results."""
+  formatted_start_time = datetime.datetime.now().strftime(
+      '%Y-%m-%d %H:%M:%S %Z')
+  _AddReportToLog('Starting task "%s" at %s.' %
+                  (function_name, formatted_start_time))
+
+  code = marshal.loads(code_string)
+  task_function = types.FunctionType(code, globals(), 'TaskFunction')
+
+  stdout_original = sys.stdout
+  sys.stdout = stream = cStringIO.StringIO()
+  start_time = time.time()
+  try:
+    returned_results = task_function()
+  except Exception as e:  # Intentionally broad -- pylint: disable=broad-except
+    print str(e)
+    returned_results = ''
+  elapsed_time = time.time() - start_time
+  stdout = stream.getvalue()
+  sys.stdout = stdout_original
+
+  results = {
+      'function_name': function_name,
+      'start_time': formatted_start_time,
+      'stdout': stdout,
+      'returned_results': returned_results,
+      'elapsed_time': elapsed_time
+  }
+  _AddReportToLog(_REPORT_TEMPLATE % results)
+
+
+def _AddReportToLog(report):
+  """Adds a log for bench results."""
+  formatter = quick_logger.Formatter()
+  logger = quick_logger.QuickLogger('task_runner', 'report', formatter)
+  logger.Log(report)
+  logger.Save()
diff --git a/catapult/dashboard/dashboard/task_runner_test.py b/catapult/dashboard/dashboard/task_runner_test.py
new file mode 100644
index 0000000..21ac572
--- /dev/null
+++ b/catapult/dashboard/dashboard/task_runner_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mock
+
+from dashboard import task_runner
+from dashboard import testing_common
+
+
+class TaskRunnerTest(testing_common.TestCase):
+
+  def setUp(self):
+    super(TaskRunnerTest, self).setUp()
+
+  def _GetMockCallArg(self, function_mock, call_index):
+    """Gets the first argument value for the call at |call_index|.
+
+    Args:
+      function_mock: A Mock object.
+      call_index: The index at which the mocked function was called.
+
+    Returns:
+      The first argument value.
+    """
+    # See http://www.voidspace.org.uk/python/mock/helpers.html#call and
+    # http://www.voidspace.org.uk/python/mock/mock.html#mock.Mock.call_args_list
+    call_args_list = function_mock.call_args_list
+    if not call_args_list or len(call_args_list) <= call_index:
+      return None
+    args, _ = call_args_list[call_index]
+    return args[0]
+
+  @mock.patch.object(task_runner, '_AddReportToLog')
+  def testRun(self, add_report_to_log_mock):
+    def SampleTask():
+      print 'square root of 16'
+      return 16 ** (1 / 2.0)
+
+    task_runner.Run(SampleTask)
+
+    self.ExecuteDeferredTasks(task_runner._TASK_QUEUE_NAME)
+
+    call_arg = self._GetMockCallArg(add_report_to_log_mock, 1)
+    self.assertIn('4.0', call_arg)
+    self.assertIn('square root of 16', call_arg)
diff --git a/catapult/dashboard/dashboard/templates/alerts.html b/catapult/dashboard/dashboard/templates/alerts.html
deleted file mode 100644
index e9289d6..0000000
--- a/catapult/dashboard/dashboard/templates/alerts.html
+++ /dev/null
@@ -1,110 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
-  <link rel="import" href="/components/polymer/polymer.html">
-  <link rel="import" href="/components/paper-button/paper-button.html">
-
-  <link rel="import" href="/dashboard/elements/select-menu.html">
-  <link rel="import" href="/dashboard/elements/alerts-table.html">
-  <link rel="import" href="/dashboard/elements/overlay-message.html">
-  <link rel="import" href="/dashboard/elements/quick-log.html">
-  <link rel="import" href="/dashboard/static/alerts.html">
-
-  <title>Chrome Performance Alerts</title>
-  <script>
-    'use strict';
-    var ANOMALY_LIST = {{anomaly_list | safe}};
-    var STOPPAGE_ALERT_LIST = {{stoppage_alert_list | safe}};
-    var SHERIFF_LIST = {{sheriff_list | safe}};
-    var XSRF_TOKEN = '{{xsrf_token}}';
-    var NUM_ANOMALIES = {{num_anomalies | safe}};
-  </script>
-  <style>
-    body {
-      align-items: center;
-      -webkit-align-items: center;
-    }
-
-    .center {
-      margin: auto;
-      padding: 10px;
-    }
-
-    /* The action bar contains the graph button and triage button. */
-    #action-bar {
-      margin-top: 20px;
-      width: 100%;
-    }
-
-    /* The content container contains everything below the sheriff select menu. */
-    #content {
-      display: inline-flex;
-      display: -webkit-inline-flex;
-      flex-direction: column;
-      -webkit-flex-direction: column;
-      align-items: flex-start;
-      -webkit-align-items: flex-start;
-    }
-
-    /* This class indicates a button toggled on (e.g. show improvements). */
-    .alert-togglebutton {
-      float: right;
-      margin-left: 4px;
-      margin-right: 4px;
-    }
-
-    #auto-triage-log {
-      padding: 15px 0 15px 0;
-    }
-
-    #message-bar {
-      top: 26px;
-    }
-
-    #anomaly-table, #stoppage-alert-table {
-      width: 100%;
-    }
-  </style>
-</head>
-<body>
-  {% include 'nav.html' %}
-  <h1>Chrome Performance Alerts</h1>
-  <overlay-message id="message-bar"></overlay-message>
-  <div id="content">
-    <div id="action-bar">
-      <select-menu id="sheriff-select"></select-menu>
-      <paper-button raised noink id="improvements-toggle"
-                    class="alert-togglebutton">Show improvements</paper-button>
-      <paper-button raised noink id="triaged-toggle"
-                    class="alert-togglebutton">Show triaged</paper-button>
-    </div>
-    {% if have_anomalies %}
-      <h2>Performance alerts</h2>
-      <p id='num-alerts'></p>
-      <script>
-        'use strict';
-        var numAlertsElement = document.getElementById('num-alerts');
-        var numAlerts = NUM_ANOMALIES;
-        numAlertsElement.innerText = numAlerts + (
-            numAlerts == 1 ? ' alert.' : ' alerts.');
-        document.title += ' (' + numAlerts + ')';
-      </script>
-      <alerts-table id="anomaly-table" xsrfToken="{{xsrf_token | safe}}">
-      </alerts-table>
-    {% endif %}
-    {% if have_stoppage_alerts %}
-      <h2>Data stoppage alerts</h2>
-      <alerts-table id="stoppage-alert-table" xsrfToken="{{xsrf_token | safe}}">
-      </alerts-table>
-    {% endif %}
-    {% if not have_anomalies and not have_stoppage_alerts %}
-      <h2 class="center">All alerts triaged!</h2>
-      <img class="center" src="http://thecatapi.com/api/images/get?api_key=MjUzMDQ&category=space&size=small">
-    {% endif %}
-    <quick-log id="auto-triage-log" xsrfToken="{{xsrf_token | safe}}"
-               style="width:100%; display:block;"></quick-log>
-  </div>
-  {% include 'analytics.html' %}
-</body>
-</html>
diff --git a/catapult/dashboard/dashboard/templates/analytics.html b/catapult/dashboard/dashboard/templates/analytics.html
deleted file mode 100644
index d2874dd..0000000
--- a/catapult/dashboard/dashboard/templates/analytics.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<script>
-  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
-  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
-  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
-  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
-
-  ga('create', 'UA-56758330-1', 'auto');
-  ga('send', 'pageview');
-
-</script>
diff --git a/catapult/dashboard/dashboard/templates/bad_bisect.html b/catapult/dashboard/dashboard/templates/bad_bisect.html
new file mode 100644
index 0000000..3c52ea3
--- /dev/null
+++ b/catapult/dashboard/dashboard/templates/bad_bisect.html
@@ -0,0 +1,27 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
+  <link rel="import" href="/dashboard/elements/primary-button.html">
+  <style>
+    a {
+      text-decoration: none;
+    }
+  </style>
+</head>
+<body>
+  <nav-bar></nav-bar>
+
+  <form action="/bad_bisect" method="POST">
+    {{xsrf_input|safe}}
+    <input type="hidden" name="try_job_id" value="{{try_job_id}}">
+    <p>
+      <button class="mini" type="submit" is="primary-button">
+      Confirm</button>
+      that bisect job <a href="#">{{try_job_id}}</a> is incorrect.
+    </p>
+  </form>
+
+</body>
+</html>
diff --git a/catapult/dashboard/dashboard/templates/bisect_stats.html b/catapult/dashboard/dashboard/templates/bisect_stats.html
index 1826eed..b262fa0 100644
--- a/catapult/dashboard/dashboard/templates/bisect_stats.html
+++ b/catapult/dashboard/dashboard/templates/bisect_stats.html
@@ -1,4 +1,9 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
@@ -21,10 +26,11 @@
   </style>
 
   <link rel="import" href="/components/polymer/polymer.html">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <link rel="import" href="/dashboard/elements/quick-log.html">
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Bisect Statistics</h1>
 
   <div id="content">
diff --git a/catapult/dashboard/dashboard/templates/bot_whitelist.html b/catapult/dashboard/dashboard/templates/bot_whitelist.html
deleted file mode 100644
index 35217c0..0000000
--- a/catapult/dashboard/dashboard/templates/bot_whitelist.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
-</head>
-<body>
-  {% include 'nav.html' %}
-  <p>The bot perf_ids listed below are externally visible. All the tests and
-  data for these bots will be shown to external users. If a perf_id is not on
-  this list, all the tests and data for the bot will be visible only to users
-  logged in with google.com accounts.</p>
-
-  <p>Note: If there is already existing data for this bot, it will not be shown
-  until you <a href="/change_internal_only">update the internal_only
-  property</a>.</p>
-
-  <form action="/bot_whitelist" method="POST">
-    {{xsrf_input|safe}}
-    <textarea rows=50 cols=100
-              name="bot_whitelist">{{bot_whitelist}}</textarea>
-    <br>
-    <input type="submit" value="Update Bot Whitelist">
-  </form>
-</body>
-</html>
diff --git a/catapult/dashboard/dashboard/templates/bug_result.html b/catapult/dashboard/dashboard/templates/bug_result.html
index 4bb873a..e7f82a1 100644
--- a/catapult/dashboard/dashboard/templates/bug_result.html
+++ b/catapult/dashboard/dashboard/templates/bug_result.html
@@ -1,5 +1,10 @@
 <!DOCTYPE html>
 <!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<!--
 This template contains HTML for the following things:
  - A form for creating a new bug for an alert.
  - A form for choosing an existing bug number for an alert.
@@ -222,6 +227,13 @@
       </label><br>
       {% endfor %}
       <br>
+      <b>Components:</b><br>
+      {% for component in components %}
+      <label>
+        <input type="checkbox" checked name="component" value="{{component}}">
+        {{component}}
+      </label><br>
+      {% endfor %}
       <b>Owner:</b><br>
       <input type="text" name="owner" value="{{owner}}"><br><br>
       <input type="submit">
diff --git a/catapult/dashboard/dashboard/templates/buildbucket_job_status.html b/catapult/dashboard/dashboard/templates/buildbucket_job_status.html
index 9dd7518..005e8a2 100644
--- a/catapult/dashboard/dashboard/templates/buildbucket_job_status.html
+++ b/catapult/dashboard/dashboard/templates/buildbucket_job_status.html
@@ -1,10 +1,16 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
 </head>
 <body>
-{% include 'nav.html' %}
+  <nav-bar></nav-bar>
 {% if error %}
   <pre>
   Status of the bisect job {{job_id}}: ERROR, {{error}}
diff --git a/catapult/dashboard/dashboard/templates/change_internal_only.html b/catapult/dashboard/dashboard/templates/change_internal_only.html
index e0d701c..a39555e 100644
--- a/catapult/dashboard/dashboard/templates/change_internal_only.html
+++ b/catapult/dashboard/dashboard/templates/change_internal_only.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <style>
     /* The content container contains everything after the title. */
     #content {
@@ -27,7 +33,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Change internal_only Property</h1>
   <div id="content">
 
diff --git a/catapult/dashboard/dashboard/templates/debug_alert.html b/catapult/dashboard/dashboard/templates/debug_alert.html
index fa55b58..9ac7067 100644
--- a/catapult/dashboard/dashboard/templates/debug_alert.html
+++ b/catapult/dashboard/dashboard/templates/debug_alert.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Anomaly Detection Function Debugger</title>
 
   <script src="/jquery/jquery-2.1.4.min.js"></script>
@@ -28,7 +34,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Anomaly Detection Function Debugger</h1>
   <div id="content">
     <p>This page shows the results of running the anomaly detection function
diff --git a/catapult/dashboard/dashboard/templates/edit_anomaly_configs.html b/catapult/dashboard/dashboard/templates/edit_anomaly_configs.html
index feca4af..74ce0d0 100644
--- a/catapult/dashboard/dashboard/templates/edit_anomaly_configs.html
+++ b/catapult/dashboard/dashboard/templates/edit_anomaly_configs.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Edit Anomaly Configs</title>
   <script>
     'use strict';
@@ -53,7 +59,7 @@
   </script>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Edit Anomaly Configs</h1>
 
   <p><b>Note</b>: If the test path patterns of two different anomaly
diff --git a/catapult/dashboard/dashboard/templates/edit_bug_labels.html b/catapult/dashboard/dashboard/templates/edit_bug_labels.html
index eedd0c3..5e431c3 100644
--- a/catapult/dashboard/dashboard/templates/edit_bug_labels.html
+++ b/catapult/dashboard/dashboard/templates/edit_bug_labels.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Edit Bug Labels</title>
   <script>
   'use strict';
@@ -43,7 +49,7 @@
   </script>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Edit Bug Labels</h1>
 
   <form method="POST">
diff --git a/catapult/dashboard/dashboard/templates/edit_sheriffs.html b/catapult/dashboard/dashboard/templates/edit_sheriffs.html
index 61334dc..ba9d933 100644
--- a/catapult/dashboard/dashboard/templates/edit_sheriffs.html
+++ b/catapult/dashboard/dashboard/templates/edit_sheriffs.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Edit Sheriff Rotations</title>
   <script>
   'use strict';
@@ -95,7 +101,7 @@
   </script>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Edit Sheriffs</h1>
 
   <p><b>Note</b>: If the test path patterns of two different sheriffs
diff --git a/catapult/dashboard/dashboard/templates/edit_site_config.html b/catapult/dashboard/dashboard/templates/edit_site_config.html
index 612ecb0..cbe8253 100644
--- a/catapult/dashboard/dashboard/templates/edit_site_config.html
+++ b/catapult/dashboard/dashboard/templates/edit_site_config.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <style>
     textarea {
       height: 20em;
@@ -10,7 +16,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <p class="error">{{error}}</p>
   {% if key %}
     <h2>Key: {{key}}</h2>
@@ -38,14 +44,33 @@
     </form>
   {% else %}
     <h2>Keys</h2>
+    <h3>Non-namespaced</h3>
     <ul>
-      <li><a href="?key=bisect_bot_map">bisect_bot_map</a>: map of bot names to bisect tester bot names.
-      <li><a href="?key=bisect_builder_types">bisect_builder_types</a>: map of master names to bisect test types.
-      <li><a href="?key=internal_domain_key">internal_domain_key</a>: domain of "internal" users.
-      <li><a href="?key=ip_whitelist">ip_whitelist</a>: list of whitelisted sender IP addresses.
-      <li><a href="?key=recipe_tester_director_map">recipe_tester_director_map</a>: map of tester names to bisect director name.
-      <li><a href="?key=revision_info">revision_info</a>: repository names and change log URLs.
-      <li><a href="?key=sheriff_domains_key">sheriff_domains_key</a>: (domains of users who can triage alerts.
+      <li><a href="?key=bisect_fyi_config_map">bisect_fyi_config_map</a>:
+        Bisect FYI test cases.</li>
+      <li><a href="?key=internal_domain_key">internal_domain_key</a>:
+        The domain of users belong to to view internal data.</li>
+      <li><a href="?key=ip_whitelist">ip_whitelist</a>:
+        List of whitelisted sender IP addresses.</li>
+      <li><a href="?key=project_id">project_id</a>:
+        Project ID used for ticking custom metrics.</li>
+      <li><a href="?key=recipe_tester_director_map">recipe_tester_director_map</a>:
+        Map of tester names to bisector bot for recipe bisect.</li>
+      <li><a href="?key=sheriff_domains_key">sheriff_domains_key</a>:
+        The domains of users who can triage alerts.</li>
+    </ul>
+    <h3>Namespaced</h3>
+    <ul>
+      <li><a href="?key=bisect_bot_map">bisect_bot_map</a>:
+        Association between bot names and bisect bot names.</li>
+      <li><a href="?key=bisect_builder_types">bisect_builder_types</a>:
+        Map of master names to bisect "test types".</li>
+      <li><a href="?key=bot_browser_map">bot_browser_map</a>:
+        Browsers to use for Telemetry.</li>
+      <li><a href="?key=master_try_server_map">master_try_server_map</a>:
+        Try servers to use for bisect jobs.</li>
+      <li><a href="?key=revision_info">revision_info</a>:
+        Repository names and change log viewers.</li>
     </ul>
   {% endif %}
 </body>
diff --git a/catapult/dashboard/dashboard/templates/edit_test_owners.html b/catapult/dashboard/dashboard/templates/edit_test_owners.html
index a233bc7..591a238 100644
--- a/catapult/dashboard/dashboard/templates/edit_test_owners.html
+++ b/catapult/dashboard/dashboard/templates/edit_test_owners.html
@@ -1,4 +1,9 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
@@ -11,6 +16,8 @@
   </script>
 
   <link rel="import" href="/components/polymer/polymer.html">
+  <link rel="import" href="/dashboard/elements/load-analytics.html">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <link rel="import" href="/dashboard/elements/editable-list.html">
 
   <style>
@@ -20,7 +27,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Chrome Performance Test Owners</h1>
   <div id="content">
     <editable-list id="edit-owner-list"
@@ -46,6 +53,6 @@
 
     document.addEventListener('polymer-ready', init, false);
   </script>
-  {% include 'analytics.html' %}
+  <load-analytics></load-analytics>
 </body>
 </html>
diff --git a/catapult/dashboard/dashboard/templates/embed.html b/catapult/dashboard/dashboard/templates/embed.html
deleted file mode 100644
index 2271a53..0000000
--- a/catapult/dashboard/dashboard/templates/embed.html
+++ /dev/null
@@ -1,57 +0,0 @@
-<!doctype html>
-<html>
-<head>
-  <title>Embedded Graph Page</title>
-  <script>
-    var GRAPH_DATA = {{graph_data|safe}};
-    var REVISION_INFO = {{revision_info|safe}};
-    var XSRF_TOKEN = '{{xsrf_token}}';
-  </script>
-
-  <script src="/jquery/jquery-2.1.4.min.js"></script>
-  <script src="/flot/jquery.flot.min.js"></script>
-  <script src="/flot/jquery.flot.crosshair.min.js"></script>
-
-  <style>
-    html {
-      height: 100%;
-    }
-
-    body {
-      color: #222;
-      background-color: #fff;
-      font-family: Arial;
-      font-size: 13px;
-      height: 100%;
-    }
-
-    #chart {
-      height: 100%;
-      width: 100%;
-    }
-
-    #legend {
-      background-color: rgba(240, 240, 240, 0.5);
-      border: 1px solid #2d2d2d;
-      padding: 5px;
-      position: absolute;
-      top: 20px;
-    }
-
-    #legend dt::after {
-      content: ":";
-    }
-
-    #legend dd {
-      font-weight: bold;
-    }
-  </style>
-</head>
-<body>
-  <div id="chart"></div>
-  <div id="legend" style="display: none;"></div>
-
-  <link rel="import" href="/dashboard/static/embed.html">
-
-</body>
-</html>
diff --git a/catapult/dashboard/dashboard/templates/group_report.html b/catapult/dashboard/dashboard/templates/group_report.html
deleted file mode 100644
index c17fb84..0000000
--- a/catapult/dashboard/dashboard/templates/group_report.html
+++ /dev/null
@@ -1,149 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
-  <title>Chrome Performance Dashboard</title>
-
-  <script>
-    'use strict';
-    var ALERT_LIST = {{alert_list | safe}};
-    var OWNER_INFO = {{owner_info | safe}};
-    var SUBTESTS = {{subtests | safe}};
-    var TEST_SUITES = {{test_suites | safe}};
-    var REVISION_INFO = {{revision_info | safe}};
-    var IS_INTERNAL_USER = ('{{is_internal_user}}' == 'True');
-    var XSRF_TOKEN = '{{xsrf_token}}';
-  </script>
-
-  <script src="/jquery/jquery-2.1.4.min.js"></script>
-  <script src="/flot/jquery.flot.min.js"></script>
-  <script src="/flot/jquery.flot.crosshair.min.js"></script>
-  <script src="/flot/jquery.flot.fillbetween.min.js"></script>
-  <script src="/flot/jquery.flot.selection.min.js"></script>
-
-  <link rel="import" href="/components/polymer/polymer.html">
-  <link rel="import" href="/components/paper-button/paper-button.html">
-
-  <link rel="import" href="/dashboard/elements/alerts-table.html">
-  <link rel="import" href="/dashboard/elements/bug-info.html">
-  <link rel="import" href="/dashboard/elements/chart-container.html">
-  <link rel="import" href="/dashboard/elements/custom-tooltip.html">
-  <link rel="import" href="/dashboard/elements/login-warning.html">
-  <link rel="import" href="/dashboard/elements/overlay-message.html">
-  <link rel="import" href="/dashboard/elements/quick-log.html">
-  <link rel="import" href="/dashboard/elements/triage-dialog.html">
-  <link rel="import" href="/dashboard/static/group_report.html">
-
-  <script>
-    // Workaround for document.contains returning false for elements in the
-    // shadow DOM. jQuery mouse events need it to return true for scrolling
-    // to be properly accounted for. For background, see
-    // https://github.com/Polymer/polymer/issues/162 and
-    // https://www.w3.org/Bugs/Public/show_bug.cgi?id=22141
-    jQuery.contains = function(doc, elem) {
-      var shadowElems = document.getElementsByTagName('chart-container');
-      for (var i = 0; i < shadowElems.length; i++) {
-        if (shadowElems[i].shadowRoot.contains(elem)) {
-          return true;
-        }
-      }
-      return doc.contains(elem);
-    };
-  </script>
-
-  <style>
-    body {
-      align-items: center;
-      -webkit-align-items: center;
-    }
-
-    /* The action bar contains the graph button and triage button. */
-    #action-bar {
-      margin-top: 20px;
-      width: 100%;
-    }
-
-    /* The top container contains the action bar and alerts list. */
-    #top {
-      display: inline-flex;
-      display: -webkit-inline-flex;
-      flex-direction: column;
-      -webkit-flex-direction: column;
-      align-items: flex-start;
-      -webkit-align-items: flex-start;
-      margin-bottom: 15px;
-      width: 100%
-    }
-
-    /* The bottom container contains the charts. */
-    #bottom {
-      display: flex;
-      display: -webkit-flex;
-      flex-direction: column;
-      -webkit-flex-direction: column;
-      min-width: 100%;
-      min-height: 100%;
-    }
-
-    /* Triage dialog at the top level when the user clicks the triage button. */
-    triage-dialog {
-      position: absolute;
-      margin-top: 30px;
-      z-index: 1000;
-    }
-
-    /* This class indicates a button toggled on (e.g. show improvements). */
-    .alert-togglebutton {
-      float: right;
-      margin-left: 4px;
-      margin-right: 4px;
-    }
-
-    #bisect-result-log {
-      width: 100%;
-      display: block;
-    }
-  </style>
-</head>
-<body>
-  {% include 'nav.html' %}
-  <h1>Chrome Performance Dashboard</h1>
-  <overlay-message id="message-bar"></overlay-message>
-  {% if warning_message %}
-    <overlay-message id="warning-message" opened="true" autoCloseDisabled duration="-1">
-    {{warning_message}}
-    {% if warning_bug %}
-      <a href="https://github.com/catapult-project/catapult/issues/{{warning_bug}}">See bug #{{warning_bug}}.</a>
-    {% endif %}
-    </overlay-message>
-  {% endif %}
-  <login-warning id="login-warning" loginLink="{{login_url}}"
-                 {% if is_internal_user %}hidden="true"{% endif %}>
-  </login-warning>
-  <div id="top">
-    <div id="action-bar">
-      <paper-button toggle raised id="improvements-toggle" class="alert-togglebutton">
-        Show all improvements
-      </paper-button>
-    </div>
-    <bug-info id="bug-info" xsrfToken="{{xsrf_token | safe}}"></bug-info>
-    {% if bug_id %}
-    <quick-log id="bisect-result-log"
-               xsrfToken="{{xsrf_token | safe}}"
-               logNamespace="bisect_result"
-               logName="{{bug_id}}"
-               logLabel="Bisect results"
-               loadOnReady="true"
-               expandOnReady="true"></quick-log>
-    {% endif %}
-    <alerts-table id="alerts-table" xsrfToken="{{xsrf_token | safe}}"></alerts-table>
-  </div>
-
-  <div id="bottom">
-    <section id="charts-container"></section>
-  </div>
-
-  <custom-tooltip id="tooltip"></custom-tooltip>
-  {% include 'analytics.html' %}
-</body>
-</html>
diff --git a/catapult/dashboard/dashboard/templates/load_from_prod.html b/catapult/dashboard/dashboard/templates/load_from_prod.html
index abdbf00..7039a75 100644
--- a/catapult/dashboard/dashboard/templates/load_from_prod.html
+++ b/catapult/dashboard/dashboard/templates/load_from_prod.html
@@ -1,11 +1,17 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Load Data From Production</title>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Load Data From Production</h1>
   <p>Use this form to load a given master/system/test/graph/trace and all
   associated alerts from prod onto a dev server.</p>
diff --git a/catapult/dashboard/dashboard/templates/main.html b/catapult/dashboard/dashboard/templates/main.html
index 9eb223b..4901124 100644
--- a/catapult/dashboard/dashboard/templates/main.html
+++ b/catapult/dashboard/dashboard/templates/main.html
@@ -1,4 +1,9 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 
 {% macro link(anomaly) -%}
 <a href="{{anomaly.dashboard_link}}"
@@ -50,7 +55,11 @@
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/components/polymer/polymer.html">
+  <link rel="import" href="/dashboard/elements/load-analytics.html">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Chrome Performance Dashboard</title>
+  <script>// Placeholder for analytics</script>
 
   <style>
     /* The #content element contains everything after the title. */
@@ -123,7 +132,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Chrome Performance Dashboard</h1>
   <div id="content">
     <h2>Top {{num_changes}} improvements in the last {{num_days}} days
@@ -133,37 +142,7 @@
     <h2>Top {{num_changes}} regressions in the last {{num_days}} days
     for {{sheriff_name}}</h2>
     {{anomaly_table(regressions)}}
-
-    {% if bugs %}
-      <h2>Bugs in the last {{num_days}} days</h2>
-      <table>
-        <thead>
-          <tr class="header">
-            <th>Bug</th>
-            <th>State</th>
-            <th>Status</th>
-            <th>Author</th>
-            <th>Summary</th>
-          </tr>
-        </thead>
-        <tbody>
-        {% for bug in bugs %}
-          {% if bug.status != 'WontFix' and bug.status != 'Duplicate' %}
-            <tr class="row-{{bug.state}}">
-              <td><a href="http://crbug.com/{{bug.id}}"
-                     target="_blank"
-                     title="View bug">{{bug.id}}</a></td>
-              <td>{{bug.state}}</td>
-              <td>{{bug.status}}</td>
-              <td>{{bug.author.name}}</td>
-              <td>{{bug.summary}}</td>
-            </tr>
-          {% endif %}
-        {% endfor %}
-        </tbody>
-      </table>
-    {% endif %}
   </div>
-  {% include 'analytics.html' %}
+  <load-analytics></load-analytics>
   </body>
 </html>
diff --git a/catapult/dashboard/dashboard/templates/migrate_test_names.html b/catapult/dashboard/dashboard/templates/migrate_test_names.html
index 5cf2d16..a3decf7 100644
--- a/catapult/dashboard/dashboard/templates/migrate_test_names.html
+++ b/catapult/dashboard/dashboard/templates/migrate_test_names.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Migrate Test Names</title>
   <script>
     'use strict';
@@ -34,7 +40,7 @@
   </script>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Migrate Test Names</h1>
 
   <h2 style="color:red">If a test name has changed,
diff --git a/catapult/dashboard/dashboard/templates/nav.html b/catapult/dashboard/dashboard/templates/nav.html
deleted file mode 100644
index b6a1606..0000000
--- a/catapult/dashboard/dashboard/templates/nav.html
+++ /dev/null
@@ -1,294 +0,0 @@
-<link rel="import" href="/components/polymer/polymer.html">
-<link rel="import" href="/components/core-icons/core-icons.html">
-
-<script async>
-  'use strict';
-  /**
-   * Opens a window with new Chromium bug, pre-filled with some info.
-   * @param {string} summary The title of the bug.
-   * @param {string} comment The description of the bug.
-   * @param {Array} labels A list of labels to apply to the bug.
-   * @param {Array} cc A list of email addresses to cc on the bug.
-   */
-  function openCrBugWindow(summary, comment, labels, cc) {
-    var url = 'https://code.google.com/p/chromium/issues/entry?';
-    url += [
-      'summary=' + encodeURIComponent(summary),
-      'comment=' + encodeURIComponent(comment),
-      'labels=' + encodeURIComponent(labels.join(',')),
-      'cc=' + encodeURIComponent(cc.join(','))
-    ].join('&');
-    window.open(url, '_blank');
-  }
-
-  /**
-   * Opens a window with new GitHub issue, pre-filled with some info.
-   * @param {string} summary The title of the bug.
-   * @param {string} comment The description of the bug.
-   * @param {string} label Label to apply to the bug.
-   */
-  function openGitHubIssueWindow(summary, comment, label) {
-    var url = 'https://github.com/catapult-project/catapult/issues/new?';
-    url += [
-      'title=' + encodeURIComponent(summary),
-      'body=' + encodeURIComponent(comment),
-      'labels=' + encodeURIComponent(label),
-    ].join('&');
-    window.open(url, '_blank');
-  }
-
-  /**
-   * Opens a window to report a general dashboard bug.
-   */
-  function reportBug() {
-    var os = guessOS();
-    var chromeVersion = 'unknown';
-    var chromeVersionMatch = navigator.userAgent.match(/Chrome\/(\S*)/);
-    if (chromeVersionMatch) {
-      chromeVersion = chromeVersionMatch[1];
-    }
-    var description = 'Chrome version: ' + chromeVersion + ' (' + os + ')\n';
-    description += 'URL: ' + document.location.href + '\n\n';
-    description += 'Please copy and paste any errors from JavaScript console (';
-    description += (os == 'Mac' ? 'Command+Option+J' : 'Ctrl+Shift+J');
-    description += ' to open):\n\n';
-    description += 'Please describe the problem:\n';
-    openGitHubIssueWindow('Perf Dashboard: ', description, 'Perf Dashboard');
-  }
-
-  /**
-   * Guesses user's OS from user agent string (for pre-filling bug labels).
-   * @return {string} The name of an OS.
-   */
-  function guessOS() {
-    var userAgentContains = function(s) {
-      return navigator.userAgent.indexOf(s) != -1;
-    };
-    if (userAgentContains('CrOS')) {
-      return 'Chrome OS';
-    } else if (userAgentContains('Windows')) {
-      return 'Windows';
-    } else if (userAgentContains('Macintosh')) {
-      return 'Mac';
-    } else if (userAgentContains('Linux')) {
-      return 'Linux';
-    } else if (userAgentContains('Android')) {
-      return 'Android';
-    }
-    return 'unknown';
-  }
-
-  function fileIpWhitelistRequest() {
-    var description = 'Please whitelist the following IP addresses ' +
-        'to send data to the Chrome Perf Dashboard:\n' +
-        '<IP ADDRESSES HERE>\n\n' +
-        'These buildbots are for:\n';
-    var labels = ['Performance-Dashboard-IPWhitelist', 'Restrict-View-Google'];
-    openCrBugWindow('IP Whitelist Request', description, labels, []);
-  }
-
-  function fileBotWhitelistRequest() {
-    var description = 'Please make the following bots and all their data ' +
-        'publicly available, with no google.com login required: \n' +
-        '<BOT NAMES HERE>\n\n';
-    var labels = ['Performance-Dashboard-BotWhitelist', 'Restrict-View-Google'];
-    openCrBugWindow('Bot Whitelist Request', description, labels, []);
-  }
-
-  function fileMonitoringRequest() {
-    var description = 'Please add monitoring for the following tests:\n\n' +
-        'Test owner (see http://go/perf-test-owners):\n' +
-        'Buildbot master name:\n' +
-        'Test suite names:\n' +
-        'Restrict to these specific traces (if any):\n' +
-        'Email address and/or URL of sheriff rotation: \n' +
-        'Receive individual email alerts immediately or as a daily summary?\n' +
-        'Should these alerts be Google-internal?\n';
-    var labels = [
-      'Performance-Dashboard-MonitoringRequest',
-      'Restrict-View-Google'
-    ];
-    openCrBugWindow('Monitoring Request', description, labels, []);
-  }
-</script>
-
-<style>
-  #navbar {
-    width: 100%;
-  }
-
-  #navbar ul {
-    list-style: none;
-    padding: 0;
-    margin: 0;
-    border: 0;
-    font-size: 100%;
-    font: inherit;
-    vertical-align: baseline;
-    z-index: 1000;
-    margin-left: -10px; /* Ignore body's padding left. */
-    padding-right: 20px; /* Ignore body's padding right. */
-  }
-
-  #navbar > ul {
-    display: flex;
-    display: -webkit-flex;
-    width: 100%;
-    background-color: #2D2D2D;
-    border-bottom: black;
-    margin-bottom: 6px;
-  }
-
-  #navbar li {
-    padding: 6px 10px;
-  }
-
-  #navbar li > a {
-    cursor: pointer;
-    text-decoration: none;
-  }
-
-  #navbar > ul > li, #navbar > ul > li > a {
-    color: #ddd;
-    font-weight: bold;
-  }
-
-  /* The addition of the icons to the <core-icon> elements to the submenus
-   * makes the submenu title text lower; the below style rule is intended
-   * to align the other menu items. */
-  #navbar .menu > li a {
-    display: inline-block;
-    padding-top: 3px;
-  }
-
-  #navbar .submenu li, #navbar .submenu a {
-    color: #ddd;
-    font-weight: normal;
-  }
-
-  /* This is a spacer in the navbar list that pushes the items after it
-   * all the way to the right side. */
-  .spacer {
-    flex: 100;
-  }
-
-  #navbar .menu li:hover > ul {
-    margin-top: 6px;
-    background-color: #2D2D2D;
-    border: 1px solid rgba(0, 0, 0, .2);
-  }
-
-  #navbar li:hover, #navbar a:hover {
-    color: white;
-  }
-
-  #navbar .menu-drop-arrow {
-    border-top-color: #aaa;
-    position: relative;
-    top: -1px;
-    border-style: solid dashed dashed;
-    border-color: transparent;
-    border-top-color: #c0c0c0;
-    display: -moz-inline-box;
-    display: inline-block;
-    font-size: 0;
-    height: 0;
-    line-height: 0;
-    width: 0;
-    border-width: 3px 3px 0;
-    padding-top: 1px;
-    left: 4px;
-  }
-
-  /* Basic select menus. */
-  .menu ul {
-    display: none;
-  }
-
-  .menu li:hover > ul {
-    display: block;
-    position: absolute;
-  }
-
-  .report-issue {
-    color: #dd4b39 !important;
-  }
-</style>
-
-<nav id="navbar">
-  <ul class="menu">
-    <li><a href="/">Home</a></li>
-    <li><a href="/alerts">Alerts</a></li>
-    <li><a href="/report">Browse Graphs</a></li>
-    <li><a href="https://code.google.com/p/chromium/issues/list?q=label%3AType-Bug-Regression+label%3APerformance&amp;sort=-id"
-           target="_blank">Perf Bugs</a></li>
-    <li>Other Pages<core-icon icon="arrow-drop-down"></core-icon>
-      <ul class="submenu">
-        <li><a href="/edit_test_owners">Edit Test Owners</a></li>
-        <li><a href="/bisect_stats">Bisect Stat Graphs</a></li>
-        <li><a href="/new_points">Recently Added Points</a></li>
-        <li><a href="/debug_alert">Debug Alert</a></li>
-      </ul>
-    </li>
-    <li>Waterfalls<core-icon icon="arrow-drop-down"></core-icon>
-      <ul class="submenu">
-        <li><a href="http://build.chromium.org/p/chromium.perf/waterfall?show_events=true&amp;failures_only=true&amp;reload=120"
-               target="_blank">chromium.perf</a></li>
-        <li><a href="http://build.chromium.org/p/chromium.webkit/waterfall?builder=Win7%20Perf&amp;builder=Mac10.6%20Perf&amp;builder=Linux%20Perf"
-               target="_blank">chromium.webkit</a></li>
-        <li><a href="http://build.chromium.org/p/chromium.gpu/waterfall?show_events=true&amp;failures_only=true&amp;reload=120"
-               target="_blank">chromium.gpu</a></li>
-        <li><a href="http://build.chromium.org/p/tryserver.chromium.perf/builders"
-               target="_blank">Bisect bots</a></li>
-      </ul>
-    </li>
-    <li>Help<core-icon icon="arrow-drop-down"></core-icon>
-      <ul class="submenu">
-        <li><a href="http://www.chromium.org/developers/speed-infra/performance-dashboard"
-               target="_blank">Perf Dashboard Public Documentation</a></li>
-        <li><a href="http://www.chromium.org/developers/speed-infra/performance-dashboard/endpoints"
-               target="_blank">Documented Endpoints</a></li>
-        <li><a href="http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs"
-               target="_blank">About Chromium Perf Sheriffing</a></li>
-        <li><a href="https://docs.google.com/a/chromium.org/document/d/1kIMZ8jNA2--4JsCtUJ_OprnlfT6aM3BfHrQ8o4s3bDI/edit"
-               target="_blank">Chromium Perf Sheriff Status</a></li>
-        <li><a href="http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions"
-               target="_blank">Bisecting Performance Regressions</a></li>
-        <li><a href="http://www.chromium.org/developers/how-tos/gpu-wrangling"
-               target="_blank">GPU Bots &amp; Pixel Wrangling</a></li>
-        <li><a href="https://docs.google.com/a/google.com/spreadsheets/d/1R_1BAOd3xeVtR0jn6wB5HHJ2K25mIbKp3iIRQKkX38o/view"
-               target="_blank">Benchmark Owners Spreadsheet</a></li>
-        <li><a href="https://docs.google.com/document/d/1cF2Ny3UYbXq2y3fZaygUSz_3lVc0SOrSRZwXqGjjKgo/view"
-               target="_blank">Triaging Stoppage Alerts</a></li>
-      </ul>
-    </li>
-
-    {% if is_admin %}
-    <li>Admin<core-icon icon="arrow-drop-down"></core-icon>
-      <ul class="submenu">
-        <li><a href="/edit_site_config" target="_blank">Edit Site Config</a></li>
-        <li><a href="/edit_site_config?ip_whitelist" target="_blank">Edit IP Whitelist</a></li>
-        <li><a href="/edit_sheriffs" target="_blank">Edit Sheriff Rotations</a></li>
-        <li><a href="/edit_anomaly_configs" target="_blank">Edit Anomaly Configs</a></li>
-        <li><a href="/edit_bug_labels" target="_blank">Edit Bug Labels</a></li>
-        <li><a href="/stats" target="_blank">View Statistics</a></li>
-        <li><a href="/migrate_test_names" target="_blank">Migrate Test Names</a></li>
-        <li><a href="/bot_whitelist" target="_blank">Bot Whitelist</a></li>
-        <li><a href="/change_internal_only" target="_blank">Change internal_only</a></li>
-      </ul>
-    </li>
-    {% endif %}
-    <li class="report-issue">
-      <core-icon icon="bug-report"></core-icon> Report Issue
-      <core-icon icon="arrow-drop-down"></core-icon>
-      <ul class="submenu">
-        <li><a href="javascript:reportBug()">Report a Perf Dashboard Bug</a></li>
-        <li><a href="javascript:fileIpWhitelistRequest()">Request Buildbot IP Whitelisting</a></li>
-        <li><a href="javascript:fileBotWhitelistRequest()">Request to Make Buildbots Publicly Visible</a></li>
-        <li><a href="javascript:fileMonitoringRequest()">Request Monitoring for Tests</a></li>
-      </ul>
-    </li>
-    <li class="spacer"></li>
-    <li>{{user_info | safe}}</li>
-  </ul>
-</nav>
diff --git a/catapult/dashboard/dashboard/templates/new_points.html b/catapult/dashboard/dashboard/templates/new_points.html
index 5a85f83..8922463 100644
--- a/catapult/dashboard/dashboard/templates/new_points.html
+++ b/catapult/dashboard/dashboard/templates/new_points.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>New Points</title>
   <style>
   /*
@@ -30,7 +36,7 @@
   </style>
 </head>
 </body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>New Points</h1>
   <form method="GET">
     <label>Test path pattern:
diff --git a/catapult/dashboard/dashboard/templates/put_buildbucket_job.html b/catapult/dashboard/dashboard/templates/put_buildbucket_job.html
index 6ecdda4..8f15c44 100644
--- a/catapult/dashboard/dashboard/templates/put_buildbucket_job.html
+++ b/catapult/dashboard/dashboard/templates/put_buildbucket_job.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Buildbucket PUT Test</title>
   <style>
     table {
@@ -26,7 +32,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Buildbucket PUT Test</h1>
   <form method="POST">
     <label>Test type
diff --git a/catapult/dashboard/dashboard/templates/quick_log_viewer.html b/catapult/dashboard/dashboard/templates/quick_log_viewer.html
index 2a4fc42..24d18cc 100644
--- a/catapult/dashboard/dashboard/templates/quick_log_viewer.html
+++ b/catapult/dashboard/dashboard/templates/quick_log_viewer.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Quick Log Viewer</title>
 
   <link rel="import" href="/components/polymer/polymer.html">
@@ -24,7 +30,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Quick Log Viewer</h1>
 
   <div id="content">
diff --git a/catapult/dashboard/dashboard/templates/report.html b/catapult/dashboard/dashboard/templates/report.html
deleted file mode 100644
index 63288a7..0000000
--- a/catapult/dashboard/dashboard/templates/report.html
+++ /dev/null
@@ -1,74 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-  <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
-  <script>
-    'use strict';
-    var TEST_SUITES = {{test_suites | safe}};
-    var REVISION_INFO = {{revision_info | safe}};
-    var XSRF_TOKEN = '{{xsrf_token}}';
-    var IS_INTERNAL_USER = ('{{is_internal_user}}' == 'True');
-    {% if dev_version %}var DEV_SERVER = true;{% endif %}
-  </script>
-
-  <script src="/jquery/jquery-2.1.4.min.js"></script>
-  <script src="/flot/jquery.flot.min.js"></script>
-  <script src="/flot/jquery.flot.crosshair.min.js"></script>
-  <script src="/flot/jquery.flot.fillbetween.min.js"></script>
-  <script src="/flot/jquery.flot.selection.min.js"></script>
-
-  <link rel="import" href="/components/polymer/polymer.html">
-
-  <link rel="import" href="/dashboard/elements/chart-container.html">
-  <link rel="import" href="/dashboard/elements/custom-tooltip.html">
-  <link rel="import" href="/dashboard/elements/login-warning.html">
-  <link rel="import" href="/dashboard/elements/overlay-message.html">
-  <link rel="import" href="/dashboard/elements/report-container.html">
-
-  <title>Chrome Performance Dashboard</title>
-
-  <script>
-    'use strict';
-    // Workaround for document.contains returning false for elements in the
-    // shadow DOM. jQuery mouse events need it to return true for scrolling
-    // to be properly accounted for. For background, see
-    // https://github.com/Polymer/polymer/issues/162 and
-    // https://www.w3.org/Bugs/Public/show_bug.cgi?id=22141
-    jQuery.contains = function(doc, elem) {
-      var shadowElems = document.getElementsByTagName('chart-container');
-      for (var i = 0; i < shadowElems.length; i++) {
-        if (shadowElems[i].shadowRoot.contains(elem)) {
-          return true;
-        }
-      }
-      return doc.contains(elem);
-    };
-  </script>
-</head>
-<body>
-  {% include 'nav.html' %}
-  <h1>Chrome Performance Dashboard</h1>
-
-  <overlay-message id="message-bar"></overlay-message>
-
-  {% if warning_message %}
-    <overlay-message id="warning-message" opened="true" autoCloseDisabled duration="-1">
-    {{warning_message}}
-    {% if warning_bug %}
-      <a href="https://github.com/catapult-project/catapult/issues/{{warning_bug}}">See bug #{{warning_bug}}.</a>
-    {% endif %}
-    </overlay-message>
-  {% endif %}
-
-  <login-warning id="login-warning" loginLink="{{login_url}}"
-                 {% if is_internal_user %}hidden="true"{% endif %}>
-  </login-warning>
-
-  <custom-tooltip id="tooltip"></custom-tooltip>
-
-  <report-container xsrfToken="{{xsrf_token | safe}}"></report-container>
-  <section id="charts-container"></section>
-
-  {% include 'analytics.html' %}
-</body>
-</html>
diff --git a/catapult/dashboard/dashboard/templates/result.html b/catapult/dashboard/dashboard/templates/result.html
index e70b1bb..27ad2a1 100644
--- a/catapult/dashboard/dashboard/templates/result.html
+++ b/catapult/dashboard/dashboard/templates/result.html
@@ -1,11 +1,17 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <title>Result</title>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   {% if errors %}
     {% for error in errors %}
       <h1 class="error">{{error}}</h1>
diff --git a/catapult/dashboard/dashboard/templates/set_warning_message.html b/catapult/dashboard/dashboard/templates/set_warning_message.html
index 5d32ee6..3d3700c 100644
--- a/catapult/dashboard/dashboard/templates/set_warning_message.html
+++ b/catapult/dashboard/dashboard/templates/set_warning_message.html
@@ -1,4 +1,9 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <title>Set a warning message</title>
diff --git a/catapult/dashboard/dashboard/templates/stats.html b/catapult/dashboard/dashboard/templates/stats.html
index 82938d6..159d68f 100644
--- a/catapult/dashboard/dashboard/templates/stats.html
+++ b/catapult/dashboard/dashboard/templates/stats.html
@@ -1,7 +1,13 @@
 <!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
 <html>
 <head>
   <link type="text/css" rel="stylesheet" href="/dashboard/static/base.css">
+  <link rel="import" href="/dashboard/elements/nav-bar.html">
   <link rel="import" href="/components/polymer/polymer.html">
 
   <script src="/jquery/jquery-2.1.4.min.js"></script>
@@ -53,7 +59,7 @@
   </style>
 </head>
 <body>
-  {% include 'nav.html' %}
+  <nav-bar></nav-bar>
   <h1>Chrome Performance Statistics</h1>
   {% if waiting %}
     Waiting ({{processed}} of {{total}})...
diff --git a/catapult/dashboard/dashboard/testing_common.py b/catapult/dashboard/dashboard/testing_common.py
index 2ab3543..7334d8d 100644
--- a/catapult/dashboard/dashboard/testing_common.py
+++ b/catapult/dashboard/dashboard/testing_common.py
@@ -6,20 +6,39 @@
 
 import base64
 import json
+import mock
 import os
 import re
 import unittest
 import urllib
 
+from google.appengine.api import users
 from google.appengine.ext import deferred
 from google.appengine.ext import ndb
 from google.appengine.ext import testbed
 
+from dashboard import rietveld_service
 from dashboard import stored_object
 from dashboard import utils
 from dashboard.models import graph_data
 
-_QUEUE_YAML_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
+_QUEUE_YAML_DIR = os.path.join(os.path.dirname(__file__), '..')
+
+
+class FakeRequestObject(object):
+  """Fake Request object which can be used by datastore_hooks mocks."""
+
+  def __init__(self, remote_addr=None):
+    self.registry = {}
+    self.remote_addr = remote_addr
+
+
+class FakeResponseObject(object):
+  """Fake Response Object which can be returned by urlfetch mocks."""
+
+  def __init__(self, status_code, content):
+    self.status_code = status_code
+    self.content = content
 
 
 class TestCase(unittest.TestCase):
@@ -35,10 +54,21 @@
     self.testbed.init_taskqueue_stub(root_path=_QUEUE_YAML_DIR)
     self.testbed.init_user_stub()
     self.testbed.init_urlfetch_stub()
+    self.mock_get_request = None
+    self._PatchIsInternalUser()
 
   def tearDown(self):
     self.testbed.deactivate()
 
+  def _AddFakeRietveldConfig(self):
+    """Sets up fake service account credentials for tests."""
+    rietveld_service.RietveldConfig(
+        id='default_rietveld_config',
+        client_email='foo@bar.com',
+        service_account_key='Fake Account Key',
+        server_url='https://test-rietveld.appspot.com',
+        internal_server_url='https://test-rietveld.appspot.com').put()
+
   def ExecuteTaskQueueTasks(self, handler_name, task_queue_name):
     """Executes all of the tasks on the queue until there are none left."""
     task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
@@ -100,6 +130,37 @@
           return None
     return None
 
+  def GetJsonValue(self, response, key):
+    return json.loads(response.body).get(key)
+
+  def PatchDatastoreHooksRequest(self, remote_addr=None):
+    """This patches the request object to allow IP address to be set.
+
+    It should be used by tests which check code that does IP address checking
+    through datastore_hooks.
+    """
+    get_request_patcher = mock.patch(
+        'webapp2.get_request',
+        mock.MagicMock(return_value=FakeRequestObject(remote_addr)))
+    self.mock_get_request = get_request_patcher.start()
+    self.addCleanup(get_request_patcher.stop)
+
+  def _PatchIsInternalUser(self):
+    """Sets up a fake version of utils.IsInternalUser to use in tests.
+
+    This version doesn't try to make any requests to check whether the
+    user is internal; it just checks for cached values and returns False
+    if nothing is found.
+    """
+    def IsInternalUser():
+      username = users.get_current_user()
+      return bool(utils.GetCachedIsInternalUser(username))
+
+    is_internal_user_patcher = mock.patch.object(
+        utils, 'IsInternalUser', IsInternalUser)
+    is_internal_user_patcher.start()
+    self.addCleanup(is_internal_user_patcher.stop)
+
 
 def AddTests(masters, bots, tests_dict):
   """Adds data to the mock datastore.
@@ -167,9 +228,9 @@
   return rows
 
 
-def SetInternalDomain(domain):
+def SetIsInternalUser(user, is_internal_user):
   """Sets the domain that users who can access internal data belong to."""
-  stored_object.Set(utils.INTERNAL_DOMAIN_KEY, domain)
+  utils.SetCachedIsInternalUser(user, is_internal_user)
 
 
 def SetSheriffDomains(domains):
@@ -177,14 +238,6 @@
   stored_object.Set(utils.SHERIFF_DOMAINS_KEY, domains)
 
 
-class FakeResponseObject(object):
-  """Fake Response Object which can be returned by urlfetch mocks."""
-
-  def __init__(self, status_code, content):
-    self.status_code = status_code
-    self.content = content
-
-
 def SetIpWhitelist(ip_addresses):
   """Sets the list of whitelisted IP addresses."""
   stored_object.Set(utils.IP_WHITELIST_KEY, ip_addresses)
diff --git a/catapult/dashboard/dashboard/ttest.py b/catapult/dashboard/dashboard/ttest.py
index 4a249b5..e287154 100644
--- a/catapult/dashboard/dashboard/ttest.py
+++ b/catapult/dashboard/dashboard/ttest.py
@@ -130,9 +130,9 @@
   df = math_utils.Divide(
       (stats1.var / stats1.size + stats2.var / stats2.size) ** 2,
       math_utils.Divide(stats1.var ** 2,
-                       (stats1.size ** 2) * (stats1.size - 1)) +
+                        (stats1.size ** 2) * (stats1.size - 1)) +
       math_utils.Divide(stats2.var ** 2,
-                       (stats2.size ** 2) * (stats2.size - 1)))
+                        (stats2.size ** 2) * (stats2.size - 1)))
   return max(1.0, df)
 
 
diff --git a/catapult/dashboard/dashboard/update_bug_with_results.py b/catapult/dashboard/dashboard/update_bug_with_results.py
index 5395745..0fa8dc0 100644
--- a/catapult/dashboard/dashboard/update_bug_with_results.py
+++ b/catapult/dashboard/dashboard/update_bug_with_results.py
@@ -1,4 +1,4 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
+# Copyright 2016 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
@@ -8,63 +8,34 @@
 import json
 import logging
 import re
-import sys
 import traceback
-import urllib
 
-from google.appengine.api import app_identity
 from google.appengine.api import mail
-from google.appengine.api import urlfetch
-from google.appengine.api import urlfetch_errors
 from google.appengine.ext import ndb
 
 from dashboard import bisect_fyi
-from dashboard import buildbucket_service
+from dashboard import bisect_report
 from dashboard import datastore_hooks
 from dashboard import email_template
 from dashboard import issue_tracker_service
 from dashboard import layered_cache
 from dashboard import quick_logger
 from dashboard import request_handler
-from dashboard import rietveld_service
-from dashboard import start_try_job
 from dashboard import utils
 from dashboard.models import anomaly
 from dashboard.models import bug_data
 from dashboard.models import try_job
 
-# Try job status codes from rietveld (see TryJobResult in codereview/models.py)
-SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7)
-# Not a status code from rietveld, added for completeness of the possible
-# statuses a job can be in.
-STARTED = -1
-OK = (SUCCESS, WARNINGS, SKIPPED)
-FAIL = (FAILURE, EXCEPTION)
+COMPLETED, FAILED, PENDING, ABORTED = ('completed', 'failed', 'pending',
+                                       'aborted')
 
 _COMMIT_HASH_CACHE_KEY = 'commit_hash_%s'
 
-_CONFIDENCE_THRESHOLD = 99.5
-
-# Timeout in minutes set by buildbot for trybots.
-_BISECT_BOT_TIMEOUT = 12 * 60
-
 # Amount of time to pass before deleting a try job.
 _STALE_TRYJOB_DELTA = datetime.timedelta(days=7)
 
-# Amount of time pass before deleteing try jobs that use Buildbucket.
-_STALE_TRYJOB_DELTA_BUILDBUCKET = datetime.timedelta(days=21)
-
-_BUG_COMMENT_TEMPLATE = """Bisect job status: %(status)s
-Bisect job ran on: %(bisect_bot)s
-
-%(results)s
-
-Buildbot stdio: %(buildbot_log_url)s
-Job details: %(issue_url)s
-"""
-
 _AUTO_ASSIGN_MSG = """
-==== Auto-CCing suspected CL author %(author)s ====
+=== Auto-CCing suspected CL author %(author)s ===
 
 Hi %(author)s, the bisect results pointed to your CL below as possibly
 causing a regression. Please have a look at this info and see whether
@@ -72,9 +43,7 @@
 
 """
 
-
-class UnexpectedJsonError(Exception):
-  pass
+_CONFIDENCE_LEVEL_TO_CC_AUTHOR = 95
 
 
 class BugUpdateFailure(Exception):
@@ -91,9 +60,7 @@
     jobs and send comments to an issue on the issue tracker if a bisect job has
     completed.
     """
-    credentials = rietveld_service.Credentials(
-        rietveld_service.GetDefaultRietveldConfig(),
-        rietveld_service.PROJECTHOSTING_SCOPE)
+    credentials = utils.ServiceAccountCredentials()
     issue_tracker = issue_tracker_service.IssueTrackerService(
         additional_credentials=credentials)
 
@@ -101,21 +68,17 @@
     datastore_hooks.SetPrivilegedRequest()
 
     jobs_to_check = try_job.TryJob.query(
-        try_job.TryJob.status == 'started').fetch()
+        try_job.TryJob.status.IN(['started', 'pending'])).fetch()
     all_successful = True
+
     for job in jobs_to_check:
       try:
-        if job.use_buildbucket:
-          logging.info('Checking job %s with Buildbucket job ID %s.',
-                       job.key.id(), getattr(job, 'buildbucket_job_id', None))
-        else:
-          logging.info('Checking job %s with Rietveld issue ID %s.',
-                       job.key.id(), getattr(job, 'rietveld_issue_id', None))
         _CheckJob(job, issue_tracker)
       except Exception as e:  # pylint: disable=broad-except
         logging.error('Caught Exception %s: %s\n%s',
                       type(e).__name__, e, traceback.format_exc())
         all_successful = False
+
     if all_successful:
       utils.TickMonitoringCustomMetric('UpdateBugWithResults')
 
@@ -129,367 +92,81 @@
     job: A TryJob entity, which represents one bisect try job.
     issue_tracker: An issue_tracker_service.IssueTrackerService instance.
   """
-  # Give up on stale try job.
-  if job.use_buildbucket:
-    stale_delta = _STALE_TRYJOB_DELTA_BUILDBUCKET
-  else:
-    stale_delta = _STALE_TRYJOB_DELTA
-  if (job.last_ran_timestamp and
-      job.last_ran_timestamp < datetime.datetime.now() - stale_delta):
-    comment = 'Stale bisect job, will stop waiting for results.'
-    comment += 'Rietveld issue: %s' % job.rietveld_issue_id
-    start_try_job.LogBisectResult(job.bug_id, comment)
-    job.SetFailed()
+  if _IsStale(job):
+    job.SetStaled()
+    # TODO(chrisphan): Add a staled TryJob log.
+    # TODO(chrisphan): Do we want to send a FYI Bisect email here?
+    return
+
+  results_data = job.results_data
+  if not results_data or results_data['status'] not in [COMPLETED, FAILED]:
     return
 
   if job.job_type == 'perf-try':
-    _CheckPerfTryJob(job)
+    _SendPerfTryJobEmail(job)
   elif job.job_type == 'bisect-fyi':
     _CheckFYIBisectJob(job, issue_tracker)
   else:
-    # Delete bisect jobs that aren't associated with any bug id.
-    if job.bug_id is None or job.bug_id < 0:
-      job.key.delete()
-      return
     _CheckBisectJob(job, issue_tracker)
 
-
-def _CheckPerfTryJob(job):
-  perf_results = _GetPerfTryResults(job)
-  if not perf_results:
-    return
-  _SendPerfTryJobEmail(job, perf_results)
-  job.SetCompleted()
-
-
-def _SendPerfTryJobEmail(job, perf_results):
-  """Sends an email to the user who started the perf try job."""
-  to = [job.email] if job.email else []
-  if not to:
-    logging.error('No "email" in job data. %s.', job.rietveld_issue_id)
-    return
-
-  perf_email = email_template.GetPerfTryJobEmail(perf_results)
-  if not perf_email:
-    logging.error('Failed to create "perf_email" from result data. %s.'
-                  ' Results data: %s', job.rietveld_issue_id, perf_results)
-    return
-
-  mail.send_mail(sender='gasper-alerts@google.com',
-                 to=','.join(to),
-                 subject=perf_email['subject'],
-                 body=perf_email['body'],
-                 html=perf_email['html'])
-
-
-def _ParseCloudLinksFromOutput(output):
-  """Extracts cloud storage URLs from text."""
-  html_results_pattern = re.compile(
-      r'@@@STEP_LINK@HTML Results@(?P<link>http://storage.googleapis.com/'
-      'chromium-telemetry/html-results/results-[a-z0-9-_]+)@@@',
-      re.MULTILINE)
-  profiler_pattern = re.compile(
-      r'@@@STEP_LINK@(?P<title>[^@]+)@(?P<link>https://console.developers.'
-      'google.com/m/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)@@@',
-      re.MULTILINE)
-
-  links = {
-      'html-results': html_results_pattern.findall(output),
-      'profiler': profiler_pattern.findall(output),
-  }
-
-  return links
-
-
-def _LoadConfigFromString(contents):
-  try:
-    # The config should be in the following format:
-    # config = {'foo': 'foo'}
-    # So we really just need to strip off the "config" part.
-    json_contents = str(contents).split('{')[1].split('}')[0]
-    json_contents = json_contents.replace("'", '\"')
-    json_contents = '{%s}' % json_contents
-    return json.loads(json_contents)
-  except (IndexError, ValueError, AttributeError):
-    logging.error('Could not parse config contents: %s', contents)
-    return None
-
-
-def _GetPerfTryResults(job):
-  """Gets perf results for a perf try job.
-
-  Args:
-    job: TryJob entity.
-
-  Returns:
-    A dictionary containing status, results, buildbot_log_url, and
-    issue_url for this bisect job, None if perf try job is pending or
-    there's an error fetching run data.
-  """
-  results = {}
-  # Fetch bisect bot results from Rietveld server.
-  response = _FetchRietveldIssueJSON(job)
-  issue_url = _RietveldIssueURL(job)
-  try_job_info = _ValidateRietveldResponse(response)
-
-  results['buildbot_log_url'] = str(try_job_info['url'])
-  results['issue_url'] = str(issue_url)
-
-  # Check whether the bisect job is finished or not and fetch the output.
-  result = int(try_job_info['result'])
-  if result not in OK + FAIL:
-    return None
-
-  results_url = ('%s/steps/Running%%20Bisection/logs/stdio/text' %
-                 try_job_info['url'])
-  response = _FetchURL(results_url, skip_status_code=True)
-  results['bisect_bot'] = try_job_info['builder']
-  results['config'] = _LoadConfigFromString(job.config)
-
-  if not results['config']:
-    results['status'] = 'Failure'
-    return results
-
-  # We don't see content for "Result" step.  Bot probably did not get there.
-  if not response or response.status_code != 200:
-    results['status'] = 'Failure'
-    return results
-
-  links = _ParseCloudLinksFromOutput(response.content)
-
-  results['html_results'] = (links['html-results'][0]
-                             if links['html-results'] else '')
-  results['profiler_results'] = links['profiler']
-  results['status'] = 'Completed'
-
-  return results
+  if results_data['status'] == COMPLETED:
+    job.SetCompleted()
+  else:
+    job.SetFailed()
 
 
 def _CheckBisectJob(job, issue_tracker):
-  bisect_results = _GetBisectResults(job)
-  if not bisect_results:
-    logging.info('No bisect results, job may be pending.')
+  results_data = job.results_data
+  has_partial_result = ('revision_data' in results_data and
+                        results_data['revision_data'])
+  if results_data['status'] == FAILED and not has_partial_result:
     return
-  logging.info('Bisect job status: %s.', bisect_results['status'])
-  if bisect_results['status'] == 'Completed':
-    _PostSucessfulResult(job, bisect_results, issue_tracker)
-    job.SetCompleted()
-  elif bisect_results['status'] == 'Failure with partial results':
-    _PostFailedResult(
-        job, bisect_results, issue_tracker, add_bug_comment=True)
-    job.SetFailed()
-  elif bisect_results['status'] == 'Failure':
-    _PostFailedResult(job, bisect_results, issue_tracker)
-    job.SetFailed()
+  _PostResult(job, issue_tracker)
 
 
-def _GetBisectResults(job):
-  """Gets bisect results for a bisect job.
-
-  Args:
-    job: TryJob entity.
-
-  Returns:
-    A dictionary containing status, results, buildbot_log_url, and
-    issue_url for this bisect job. The issue_url may be a link to a Rietveld
-    issue or to Buildbucket job info.
-  """
-  results = {}
-  # Fetch bisect bot results from Rietveld server.
-  if job.use_buildbucket:
-    try_job_info = _ValidateAndConvertBuildbucketResponse(
-        buildbucket_service.GetJobStatus(job.buildbucket_job_id))
-    hostname = app_identity.get_default_version_hostname()
-    job_id = job.buildbucket_job_id
-    issue_url = 'https://%s/buildbucket_job_status/%s' % (hostname, job_id)
-  else:
-    response = _FetchRietveldIssueJSON(job)
-    issue_url = _RietveldIssueURL(job)
-    try_job_info = _ValidateRietveldResponse(response)
-
-  results['buildbot_log_url'] = str(try_job_info['url'])
-  results['issue_url'] = str(issue_url)
-
-  # Check whether the bisect job is finished or not and fetch the output.
-  result = int(try_job_info['result'])
-  if result not in OK + FAIL:
-    return None
-
-  results_url = '%s/steps/Results/logs/stdio/text' % try_job_info['url']
-  response = _FetchURL(results_url, skip_status_code=True)
-  results['bisect_bot'] = try_job_info['builder']
-  # We don't see content for "Result" step.  Bot probably did not get there.
-  if not response or response.status_code != 200:
-    results['status'] = 'Failure'
-    results['results'] = ''
-    build_data = _FetchBuildData(try_job_info['url'])
-    if build_data:
-      _CheckBisectBotForInfraFailure(job.bug_id, build_data,
-                                     try_job_info['url'])
-      results['results'] = _GetBotFailureInfo(build_data)
-      partial_result = _GetPartialBisectResult(build_data, try_job_info['url'])
-      if partial_result:
-        results['status'] = 'Failure with partial results'
-        results['results'] += partial_result
-    return results
-
-  # Clean result.
-  # If the bisect_results string contains any non-ASCII characters,
-  # converting to string should prevent an error from being raised.
-  bisect_result = _BeautifyContent(str(response.content))
-
-  # Bisect is considered success if result is provided.
-  # "BISECTION ABORTED" is added when a job is ealy aborted because the
-  # associated issue was closed.
-  # TODO(robertocn): Make sure we are outputting this string
-  if ('BISECT JOB RESULTS' in bisect_result or
-      'BISECTION ABORTED' in bisect_result):
-    results['status'] = 'Completed'
-  else:
-    results['status'] = 'Failure'
-
-  results['results'] = bisect_result
-  return results
-
-
-def _FetchBuildData(build_url):
-  """Fetches build data from buildbot json api.
-
-  For json api examples see:
-  http://build.chromium.org/p/tryserver.chromium.perf/json/help
-
-  Args:
-    build_url: URL to a Buildbot bisect tryjob.
-
-  Returns:
-    A dictionary of build data for a bisect tryjob. None if there's an
-    error fetching build data.
-  """
-  index = build_url.find('/builders/')
-  if index == -1:
-    logging.error('Build url does not contain expected "/builders/" to '
-                  'fetch json data. URL: %s.', build_url)
-    return None
-
-  # Fetch and verify json data.
-  json_build_url = build_url[:index] + '/json' + build_url[index:]
-  response = _FetchURL(json_build_url)
-  if not response:
-    logging.error('Could not fetch json data from %s.', json_build_url)
-    return None
+def _CheckFYIBisectJob(job, issue_tracker):
   try:
-    build_data = json.loads(response.content)
-    if (not build_data or
-        not build_data.get('steps') or
-        not build_data.get('times') or
-        not build_data.get('text')):
-      raise ValueError('Expected properties not found in build data: %s.' %
-                       build_data)
-  except ValueError, e:
-    logging.error('Response from builder could not be parsed as JSON. '
-                  'URL: %s. Error: %s.', json_build_url, e)
-    return None
-  return build_data
+    _PostResult(job, issue_tracker)
+    error_message = bisect_fyi.VerifyBisectFYIResults(job)
+    if not bisect_fyi.IsBugUpdated(job, issue_tracker):
+      error_message += '\nFailed to update bug with bisect results.'
+  except BugUpdateFailure as e:
+    error_message = 'Failed to update bug with bisect results: %s' % e
+  if job.results_data['status'] == FAILED or error_message:
+    _SendFYIBisectEmail(job, error_message)
 
 
-def _GetBotFailureInfo(build_data):
-  """Returns helpful message about failed bisect runs."""
-  message = ''
-
-  # Add success rate message.
-  build_steps = build_data['steps']
-  num_success_build = 0
-  total_build = 0
-  for step in build_steps:
-    # 'Working on' is the step name for bisect run for a build.
-    if 'Working on' in step['name']:
-      if step['results'][0] in (SUCCESS, WARNINGS):
-        num_success_build += 1
-      total_build += 1
-  message += 'Completed %s/%s builds.\n' % (num_success_build, total_build)
-
-  # Add run time messsage.
-  run_time = build_data['times'][1] - build_data['times'][0]
-  run_time = int(run_time / 60)  # Minutes.
-  message += 'Run time: %s/%s minutes.\n' % (run_time, _BISECT_BOT_TIMEOUT)
-  if run_time >= _BISECT_BOT_TIMEOUT:
-    message += 'Bisect timed out! Try again with a smaller revision range.\n'
-
-  # Add failed steps message.
-  # 'text' field has the following properties:
-  #   text":["failed","slave_steps","failed","Working on [b92af3931458f2]"]
-  status_list = build_data['text']
-  if status_list[0] == 'failed':
-    message += 'Failed steps: %s\n\n' % ', '.join(status_list[1::2])
-
-  return message
+def _SendPerfTryJobEmail(job):
+  """Sends an email to the user who started the perf try job."""
+  if not job.email:
+    return
+  email_report = email_template.GetPerfTryJobEmailReport(job)
+  if not email_report:
+    return
+  mail.send_mail(sender='gasper-alerts@google.com',
+                 to=job.email,
+                 subject=email_report['subject'],
+                 body=email_report['body'],
+                 html=email_report['html'])
 
 
-def _GetPartialBisectResult(build_data, build_url):
-  """Gets partial bisect result if there's any.
-
-  For bisect result output format see:
-  https://chromium.googlesource.com/chromium/src/+/master/tools/
-  auto_bisect/bisect_perf_regression.py
-
-  Args:
-    build_data: A dictionary of build data for a bisect tryjob.
-    build_url: URL to a Buildbot bisect tryjob.
-
-  Returns:
-    String result of bisect job.
-  """
-  build_steps = build_data['steps']
-  # Search for the last successful bisect step.
-  pattern = re.compile(r'===== PARTIAL RESULTS =====(.*)\n\n', re.DOTALL)
-  for step in reversed(build_steps):
-    # 'Working on' is the step name for bisect run for a build.
-    if ('Working on' in step['name'] and
-        step['results'][0] in (SUCCESS, WARNINGS)):
-      stdio_url = ('%s/steps/%s/logs/stdio/text' %
-                   (build_url, urllib.quote(step['name'])))
-      response = _FetchURL(stdio_url)
-      if response:
-        match = pattern.search(response.content)
-        if match:
-          return _BeautifyContent(match.group())
-  return None
-
-
-def _PostFailedResult(
-    job, bisect_results, issue_tracker, add_bug_comment=False):
-  """Posts failed bisect results on logger and optional issue tracker."""
-  comment = _BUG_COMMENT_TEMPLATE % bisect_results
-  if add_bug_comment:
-    # Set restrict view label if the bisect results are internal only.
-    labels = ['Restrict-View-Google'] if job.internal_only else None
-    added_comment = issue_tracker.AddBugComment(
-        job.bug_id, comment, labels=labels)
-    if not added_comment:
-      raise BugUpdateFailure('Failed to update bug %s with comment %s'
-                             % (job.bug_id, comment))
-  start_try_job.LogBisectResult(job.bug_id, comment)
-  logging.info('Updated bug %s with results from %s',
-               job.bug_id, job.rietveld_issue_id)
-
-
-def _PostSucessfulResult(job, bisect_results, issue_tracker):
-  """Posts successful bisect results on logger and issue tracker."""
+def _PostResult(job, issue_tracker):
+  """Posts bisect results on issue tracker."""
   # From the results, get the list of people to CC (if applicable), the bug
   # to merge into (if applicable) and the commit hash cache key, which
   # will be used below.
+  if job.bug_id < 0:
+    return
+
+  results_data = job.results_data
   authors_to_cc = []
-  merge_issue = None
-  bug = ndb.Key('Bug', job.bug_id).get()
+  commit_cache_key = _GetCommitHashCacheKey(results_data)
 
-  commit_cache_key = _GetCommitHashCacheKey(bisect_results['results'])
-  result_is_positive = _BisectResultIsPositive(bisect_results['results'])
-  if bug and result_is_positive:
-    merge_issue = layered_cache.GetExternal(commit_cache_key)
-    if not merge_issue:
-      authors_to_cc = _GetAuthorsToCC(bisect_results['results'])
+  merge_issue = layered_cache.GetExternal(commit_cache_key)
+  if not merge_issue:
+    authors_to_cc = _GetAuthorsToCC(results_data)
 
-  comment = _BUG_COMMENT_TEMPLATE % bisect_results
+  comment = bisect_report.GetReport(job)
 
   # Add a friendly message to author of culprit CL.
   owner = None
@@ -499,14 +176,13 @@
     owner = authors_to_cc[0]
   # Set restrict view label if the bisect results are internal only.
   labels = ['Restrict-View-Google'] if job.internal_only else None
-  added_comment = issue_tracker.AddBugComment(
+  comment_added = issue_tracker.AddBugComment(
       job.bug_id, comment, cc_list=authors_to_cc, merge_issue=merge_issue,
       labels=labels, owner=owner)
-  if not added_comment:
+  if not comment_added:
     raise BugUpdateFailure('Failed to update bug %s with comment %s'
                            % (job.bug_id, comment))
 
-  start_try_job.LogBisectResult(job.bug_id, comment)
   logging.info('Updated bug %s with results from %s',
                job.bug_id, job.rietveld_issue_id)
 
@@ -514,94 +190,26 @@
     _MapAnomaliesToMergeIntoBug(merge_issue, job.bug_id)
     # Mark the duplicate bug's Bug entity status as closed so that
     # it doesn't get auto triaged.
-    bug.status = bug_data.BUG_STATUS_CLOSED
-    bug.put()
+    bug = ndb.Key('Bug', job.bug_id).get()
+    if bug:
+      bug.status = bug_data.BUG_STATUS_CLOSED
+      bug.put()
 
   # Cache the commit info and bug ID to datastore when there is no duplicate
   # issue that this issue is getting merged into. This has to be done only
   # after the issue is updated successfully with bisect information.
-  if commit_cache_key and not merge_issue and result_is_positive:
+  if commit_cache_key and not merge_issue:
     layered_cache.SetExternal(commit_cache_key, str(job.bug_id),
                               days_to_keep=30)
     logging.info('Cached bug id %s and commit info %s in the datastore.',
                  job.bug_id, commit_cache_key)
 
 
-def _ValidateAndConvertBuildbucketResponse(job_info):
-  """Checks the response from the buildbucket service and converts it.
-
-  The response is converted to a similar format to that used by Rietveld for
-  backwards compatibility.
-
-  Args:
-    job_info: A dictionary containing the response from the buildbucket service.
-
-  Returns:
-    Try job info dict in the same format as _ValidateRietveldResponse; will
-    have the keys "url", "results", and "bisect_bot".
-
-  Raises:
-    UnexpectedJsonError: The format was not as expected.
-  """
-  job_info = job_info['build']
-  json_response = json.dumps(job_info)
-  if not job_info:
-    raise UnexpectedJsonError('No response from Buildbucket.')
-  if job_info.get('result') is None:
-    raise UnexpectedJsonError('No "result" in try job results. '
-                              'Buildbucket response: %s' % json_response)
-  if job_info.get('url') is None:
-    raise UnexpectedJsonError('No "url" in try job results. This could mean '
-                              'that the job has not started. '
-                              'Buildbucket response: %s' % json_response)
-  try:
-    result_details = json.loads(job_info['result_details_json'])
-    bisect_config = result_details['properties']['bisect_config']
-    job_info['builder'] = bisect_config['recipe_tester_name']
-  except (KeyError, ValueError, TypeError):
-    # If the tester name isn't found here, this is unexpected but non-fatal.
-    job_info['builder'] = 'Unknown'
-    logging.error('Failed to extract tester name from JSON: %s', json_response)
-  job_info['result'] = _BuildbucketStatusToStatusConstant(
-      job_info['status'], job_info['result'])
-  return job_info
-
-
-def _ValidateRietveldResponse(response):
-  """Checks the response from Rietveld to see if the JSON format is right.
-
-  Args:
-    response: A Response object, should have a string content attribute.
-
-  Returns:
-    Try job info dict, guaranteed to have the keys "url" and "result".
-
-  Raises:
-    UnexpectedJsonError: The format was not as expected.
-  """
-  if not response:
-    raise UnexpectedJsonError('No response from Rietveld.')
-  try:
-    issue_data = json.loads(response.content)
-  except ValueError:
-    raise UnexpectedJsonError('Response from Rietveld could not be parsed '
-                              'as JSON: %s' % response.content)
-  # Check whether we can get the results from the issue data response.
-  if not issue_data.get('try_job_results'):
-    raise UnexpectedJsonError('Empty "try_job_results" in Rietveld response. '
-                              'Response: %s.' % response.content)
-  try_job_info = issue_data['try_job_results'][0]
-  if not try_job_info:
-    raise UnexpectedJsonError('Empty item in try job results. '
-                              'Rietveld response: %s' % response.content)
-  if try_job_info.get('result') is None:
-    raise UnexpectedJsonError('No "result" in try job results. '
-                              'Rietveld response: %s' % response.content)
-  if try_job_info.get('url') is None:
-    raise UnexpectedJsonError('No "url" in try job results. This could mean '
-                              'that the job has not started. '
-                              'Rietveld response: %s' % response.content)
-  return try_job_info
+def _IsStale(job):
+  if not job.last_ran_timestamp:
+    return False
+  time_since_last_ran = datetime.datetime.now() - job.last_ran_timestamp
+  return time_since_last_ran > _STALE_TRYJOB_DELTA
 
 
 def _MapAnomaliesToMergeIntoBug(dest_bug_id, source_bug_id):
@@ -619,104 +227,22 @@
   ndb.put_multi(anomalies)
 
 
-def _CheckBisectBotForInfraFailure(bug_id, build_data, build_url):
-  """Logs bisect failures related to infrastructure.
+def _GetCommitHashCacheKey(results_data):
+  """Gets a commit hash cache key for the given bisect results output.
 
   Args:
-    bug_id: Bug number.
-    build_data: A dictionary of build data for a bisect tryjob.
-    build_url: URL to a Buildbot bisect tryjob.
+    results_data: Bisect results data.
 
-  TODO(chrisphan): Remove this once we get an idea of the rate of infra related
-                   failures.
+  Returns:
+    A string to use as a layered_cache key, or None if we don't want
+    to merge any bugs based on this bisect result.
   """
-  build_steps = build_data['steps']
-
-  # If there's no bisect scripts step then it is considered infra issue.
-  slave_step_index = _GetBisectScriptStepIndex(build_steps)
-  if not slave_step_index:
-    _LogBisectInfraFailure(bug_id, 'Bot failure.', build_url)
-    return
-
-  # Timeout failure is our problem.
-  run_time = build_data['times'][1] - build_data['times'][0]
-  run_time = int(run_time / 60)  # Minutes.
-  if run_time >= _BISECT_BOT_TIMEOUT:
-    return
-
-  # Any build failure is an infra issue.
-  # These flags are output by bisect_perf_regression.py.
-  build_failure_flags = [
-      'Failed to build revision',
-      'Failed to produce build',
-      'Failed to perform pre-sync cleanup',
-      'Failed to sync',
-      'Failed to run [gclient runhooks]',
-  ]
-  slave_step = build_steps[slave_step_index]
-  stdio_url = ('%s/steps/%s/logs/stdio/text' %
-               (build_url, urllib.quote(slave_step['name'])))
-  response = _FetchURL(stdio_url)
-  if response:
-    for flag in build_failure_flags:
-      if flag in response.content:
-        _LogBisectInfraFailure(bug_id, 'Build failure.', build_url)
-        return
-
-
-def _GetBisectScriptStepIndex(build_steps):
-  """Gets the index of step that run bisect script in build step data."""
-  index = 0
-  for step in build_steps:
-    if step['name'] in ['slave_steps', 'Running Bisection']:
-      return index
-    index += 1
+  if results_data.get('culprit_data'):
+    return _COMMIT_HASH_CACHE_KEY % results_data['culprit_data']['cl']
   return None
 
 
-def _LogBisectInfraFailure(bug_id, failure_message, stdio_url):
-  """Adds infrastructure related bisect failures to log."""
-  comment = failure_message + '\n'
-  comment += ('<a href="https://chromeperf.appspot.com/group_report?'
-              'bug_id=%s">%s</a>\n' % (bug_id, bug_id))
-  comment += 'Buildbot stdio: <a href="%s">%s</a>\n' % (stdio_url, stdio_url)
-  formatter = quick_logger.Formatter()
-  logger = quick_logger.QuickLogger('bisect_failures', 'infra', formatter)
-  logger.Log(comment)
-  logger.Save()
-
-
-def _BisectResultIsPositive(results_output):
-  """Returns True if the bisect found a culprit with high confidence."""
-  return 'Status: Positive' in results_output
-
-
-def _GetCommitHashCacheKey(results_output):
-  """Gets a commit hash cache key for the given bisect results output.
-
-  One commit hash key represents a set of culprit CLs. This information is
-  stored so in case one issue has the same set of culprit CLs as another,
-  in which case one can be marked as duplicate of the other.
-
-  Args:
-    results_output: The bisect results output.
-
-  Returns:
-    A cache key, less than 500 characters long.
-  """
-  commits_list = re.findall(r'Commit  : (.*)', results_output)
-  commit_hashes = sorted({commit.strip() for commit in commits_list})
-  # Generate a cache key by concatenating commit hashes found in bisect
-  # results and prepend it with commit_hash.
-  commit_cache_key = _COMMIT_HASH_CACHE_KEY % ''.join(commit_hashes)
-  # Datastore key name strings must be non-empty strings up to
-  # 500 bytes.
-  if sys.getsizeof(commit_cache_key) >= 500:
-    commit_cache_key = commit_cache_key[:400] + '...'
-  return commit_cache_key
-
-
-def _GetAuthorsToCC(results_output):
+def _GetAuthorsToCC(results_data):
   """Makes a list of email addresses that we want to CC on the bug.
 
   TODO(qyearsley): Make sure that the bisect result bot doesn't cc
@@ -725,30 +251,22 @@
   the datastore for the bug id and checking the internal-only property).
 
   Args:
-    results_output: The bisect results output.
+    results_data: Bisect results data.
 
   Returns:
     A list of email addresses, possibly empty.
   """
-  author_lines = re.findall(r'Author  : (.*)', results_output)
-  unique_emails = set()
-  for line in author_lines:
-    parts = line.split(',')
-    unique_emails.update(p.strip() for p in parts if '@' in p)
-  emails = sorted(unique_emails)
-
-  # Avoid CCing issue to multiple authors when bisect finds multiple
-  # different authors for culprits CLs.
-  if len(emails) > 1:
-    emails = []
-  if len(emails) == 1:
-    # In addition to the culprit CL author, we also want to add reviewers
-    # of the culprit CL to the cc list.
-    emails.extend(_GetReviewersFromBisectLog(results_output))
+  if results_data.get('score') < _CONFIDENCE_LEVEL_TO_CC_AUTHOR:
+    return []
+  culprit_data = results_data.get('culprit_data')
+  if not culprit_data:
+    return []
+  emails = [culprit_data['email']] if culprit_data['email'] else []
+  emails.extend(_GetReviewersFromCulpritData(culprit_data))
   return emails
 
 
-def _GetReviewersFromBisectLog(results_output):
+def _GetReviewersFromCulpritData(culprit_data):
   """Parse bisect log and gets reviewers email addresses from Rietveld issue.
 
   Note: This method doesn't get called when bisect reports multiple CLs by
@@ -756,29 +274,29 @@
   same owner.
 
   Args:
-    results_output: Bisect results output.
+    culprit_data: Bisect results culprit data.
 
   Returns:
     List of email addresses from the committed CL.
   """
+
   reviewer_list = []
-  revisions_list = re.findall(r'Link    : (.*)', results_output)
-  revisions_links = {rev.strip() for rev in revisions_list}
+  revisions_links = culprit_data['revisions_links']
   # Sometime revision page content consist of multiple "Review URL" strings
   # due to some reverted CLs, such CLs are prefixed with ">"(&gt;) symbols.
-  # Should only parse CL link correspoinding the revision found by the bisect.
+  # Should only parse CL link corresponding the revision found by the bisect.
   link_pattern = (r'(?<!&gt;\s)Review URL: <a href=[\'"]'
                   r'https://codereview.chromium.org/(\d+)[\'"].*>')
   for link in revisions_links:
-    # Fetch the commit links in order to get codereview link
-    response = _FetchURL(link)
+    # Fetch the commit links in order to get codereview link.
+    response = utils.FetchURL(link)
     if not response:
       continue
     rietveld_issue_ids = re.findall(link_pattern, response.content)
     for issue_id in rietveld_issue_ids:
       # Fetch codereview link, and get reviewer email addresses from the
       # response JSON.
-      issue_response = _FetchURL(
+      issue_response = utils.FetchURL(
           'https://codereview.chromium.org/api/%s' % issue_id)
       if not issue_response:
         continue
@@ -787,116 +305,30 @@
   return reviewer_list
 
 
-def _BeautifyContent(response_data):
-  """Strip lines begins with @@@ and strip leading and trailing whitespace."""
-  pattern = re.compile(r'@@@.*@@@.*\n')
-  response_str = re.sub(pattern, '', response_data)
-  new_response = [line.strip() for line in response_str.split('\n')]
-  response_str = '\n'.join(new_response)
-
-  delimiter = '---bisect results start here---'
-  if delimiter in response_str:
-    response_str = response_str.split(delimiter)[1]
-
-  return response_str.rstrip()
-
-
-def _FetchURL(request_url, skip_status_code=False):
-  """Wrapper around URL fetch service to make request.
-
-  Args:
-    request_url: URL of request.
-    skip_status_code: Skips return code check when True, default is False.
-
-  Returns:
-    Response object return by URL fetch, otherwise None when there's an error.
-  """
-  logging.info('URL being fetched: ' + request_url)
-  try:
-    response = urlfetch.fetch(request_url)
-  except urlfetch_errors.DeadlineExceededError:
-    logging.error('Deadline exceeded error checking %s', request_url)
-    return None
-  except urlfetch_errors.DownloadError as err:
-    # DownloadError is raised to indicate a non-specific failure when there
-    # was not a 4xx or 5xx status code.
-    logging.error(err)
-    return None
-  if skip_status_code:
-    return response
-  elif response.status_code != 200:
-    logging.error(
-        'ERROR %s checking %s', response.status_code, request_url)
-    return None
-  return response
-
-
-def _FetchRietveldIssueJSON(job):
-    server = rietveld_service.RietveldService(internal_only=job.internal_only)
-    path = 'api/%d/%d' % (job.rietveld_issue_id, job.rietveld_patchset_id)
-    response, _ = server.MakeRequest(path, method='GET')
-    return response
-
-
-def _RietveldIssueURL(job):
-  config = rietveld_service.GetDefaultRietveldConfig()
-  host = config.internal_server_url if job.internal_only else config.server_url
-  return '%s/%d' % (host, job.rietveld_issue_id)
-
-
-def _BuildbucketStatusToStatusConstant(status, result):
-  """Converts the string status from buildbucket to a numeric constant."""
-  # TODO(robertocn): We might need to make a difference between
-  # - Scheduled and Started
-  # - Failure and Cancelled.
-  if status == 'COMPLETED':
-    if result == 'SUCCESS':
-      return SUCCESS
-    return FAILURE
-  return STARTED
-
-
-def _CheckFYIBisectJob(job, issue_tracker):
-  bisect_results = _GetBisectResults(job)
-  if not bisect_results:
-    logging.info('Bisect FYI: [%s] No bisect results, job might be pending.',
-                 job.job_name)
-    return
-  logging.info('Bisect FYI: [%s] Bisect job status: %s.',
-               job.job_name, bisect_results['status'])
-  try:
-    if bisect_results['status'] == 'Completed':
-      _PostSucessfulResult(job, bisect_results, issue_tracker)
-      # Below in VerifyBisectFYIResults we verify whether the actual
-      # results matches with the expectations; if they don't match then
-      # bisect_results['status'] gets set to 'Failure'.
-      bisect_fyi.VerifyBisectFYIResults(job, bisect_results)
-    elif 'Failure' in bisect_results['status']:
-      _PostFailedResult(
-          job, bisect_results, issue_tracker, add_bug_comment=True)
-      bisect_results['errors'] = 'Bisect FYI job failed:\n%s' % bisect_results
-  except BugUpdateFailure as e:
-    bisect_results['status'] = 'Failure'
-    bisect_results['error'] = 'Bug update Failed: %s' % e
-  finally:
-    _SendFYIBisectEmail(job, bisect_results)
-    job.key.delete()
-
-
-def _SendFYIBisectEmail(job, results):
+def _SendFYIBisectEmail(job, message):
   """Sends an email to auto-bisect-team about FYI bisect results."""
-  # Don't send email when test case pass.
-  if results.get('status') == 'Completed':
-    logging.info('Test Passed: %s.\n Results: %s', job.job_name, results)
-    return
-
-  email_data = email_template.GetBisectFYITryJobEmail(job, results)
-  if not email_data:
-    logging.error('Failed to create "email_data" from results for %s.\n'
-                  ' Results: %s', job.job_name, results)
-    return
-  mail.send_mail(sender='auto-bisect-team@google.com',
-                 to='prasadv@google.com',
+  email_data = email_template.GetBisectFYITryJobEmailReport(job, message)
+  mail.send_mail(sender='gasper-alerts@google.com',
+                 to='auto-bisect-team@google.com',
                  subject=email_data['subject'],
                  body=email_data['body'],
                  html=email_data['html'])
+
+
+def UpdateQuickLog(job):
+  if not job.bug_id or job.bug_id < 0:
+    return
+  report = bisect_report.GetReport(job)
+  if not report:
+    logging.error('Bisect report returns empty for job id %s, bug_id %s.',
+                  job.key.id(), job.bug_id)
+    return
+  formatter = quick_logger.Formatter()
+  logger = quick_logger.QuickLogger('bisect_result', job.bug_id, formatter)
+  if job.log_record_id:
+    logger.Log(report, record_id=job.log_record_id)
+    logger.Save()
+  else:
+    job.log_record_id = logger.Log(report)
+    logger.Save()
+    job.put()
diff --git a/catapult/dashboard/dashboard/update_bug_with_results_test.py b/catapult/dashboard/dashboard/update_bug_with_results_test.py
index 411c525..16506da 100644
--- a/catapult/dashboard/dashboard/update_bug_with_results_test.py
+++ b/catapult/dashboard/dashboard/update_bug_with_results_test.py
@@ -1,7 +1,9 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
+# Copyright 2016 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import copy
+import datetime
 import json
 import unittest
 
@@ -9,7 +11,6 @@
 import webapp2
 import webtest
 
-from google.appengine.ext import ndb
 from dashboard import bisect_fyi
 from dashboard import bisect_fyi_test
 from dashboard import layered_cache
@@ -22,179 +23,52 @@
 from dashboard.models import bug_data
 from dashboard.models import try_job
 
-# Bisect log with multiple potential culprits with different authors.
-_BISECT_LOG_MULTI_OWNER = """
-@@@STEP_CURSOR Results@@@
-@@@STEP_STARTED@@@
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total
-Relative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 99.9%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  : sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Date    : Sat, 22 Jun 2013 00:59:35 +0000
-
-Subject : Subject 2
-Author  : prasadv, prasadv@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Date    : Sat, 22 Jun 2013 00:57:48 +0000
-
-Subject : Subject 3
-Author  :   qyearsley@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Date    : Sat, 22 Jun 2013 00:55:52 +0000
-"""
-
-# Bisect log with multiple potential culprits but same Author.
-_BISECT_LOG_MULTI_SAME_OWNER = """
-@@@STEP_CURSOR Results@@@
-@@@STEP_STARTED@@@
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total
-Relative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 99.9%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  :   sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Date    : Sat, 22 Jun 2013 00:59:35 +0000
-
-Subject : Subject 2
-Author  : sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Date    : Sat, 22 Jun 2013 00:57:48 +0000:55:52 +0000
-"""
-
-# Bisect log with single potential culprits.
-_BISECT_LOG_SINGLE_OWNER = """
-@@@STEP_CURSOR Results@@@
-@@@STEP_STARTED@@@
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total
-Relative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 100%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  :   sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Commit  : d6432657771a9fd720179d8c3dd64c8daee025c7
-Date    : Sat, 22 Jun 2013 00:59:35 +0000
-"""
-
-_EXPECTED_BISECT_LOG_SINGLE_OWNER = """
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total
-Relative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 100%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  :   sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Commit  : d6432657771a9fd720179d8c3dd64c8daee025c7
-Date    : Sat, 22 Jun 2013 00:59:35 +0000"""
-
-_EXPECTED_BISECT_RESULTS_ON_BUG = """
-==== Auto-CCing suspected CL author sullivan@google.com ====
-
-Hi sullivan@google.com, the bisect results pointed to your CL below as possibly
-causing a regression. Please have a look at this info and see whether
-your CL be related.
-
-Bisect job status: Completed
-Bisect job ran on: win_perf_bisect
-
-
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total\nRelative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 100%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  :   sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798
-Commit  : d6432657771a9fd720179d8c3dd64c8daee025c7
-Date    : Sat, 22 Jun 2013 00:59:35 +0000
-
-Buildbot stdio: http://build.chromium.org/513
-Job details: https://test-rietveld.appspot.com/200037
-"""
-
-_BISECT_LOG_FAILED_REVISION = """
-@@@STEP_CURSOR Results@@@
-@@@STEP_STARTED@@@
-
-===== BISECT JOB RESULTS =====
-Status: Positive
-
-Test Command: python tools/perf/run_benchmark -v --browser=release sunspider
-Test Metric: Total/Total
-Relative Change: 1.23% (+/-1.26%)
-Estimated Confidence: 99.9%
-
-===== SUSPECTED CL(s) =====
-Subject : Subject 1
-Author  :   sullivan@google.com
-Link    : http://src.chromium.org/viewvc/chrome?view=revision&revision=20799
-Commit  : a80773bb263a9706cc8ee4e3f336d2d3d28fadd8
-Date    : Sat, 22 Jun 2013 00:59:35 +0000
-"""
-
-_BISECT_LOG_PARTIAL_RESULT = """
-===== PARTIAL RESULTS =====
-Depot Commit SHA Mean Std. Error State
-chromium 282472 91730.00 +-0.00 Bad
-
-chromium 282469 92973.00 +-0.00 Good
-chromium 282460 93468.00 +-0.00 Good
-
-
-"""
-
-_EXPECTED_BISECT_LOG_PARTIAL_RESULT = u"""Bisect job status: Failure with \
-partial results
-Bisect job ran on: win_perf_bisect
-
-Completed 1/2 builds.
-Run time: 724/720 minutes.
-Bisect timed out! Try again with a smaller revision range.
-Failed steps: slave_steps, Working on def
-
-===== PARTIAL RESULTS =====
-Depot Commit SHA Mean Std. Error State
-chromium 282472 91730.00 +-0.00 Bad
-
-chromium 282469 92973.00 +-0.00 Good
-chromium 282460 93468.00 +-0.00 Good
-
-Buildbot stdio: http://build.chromium.org/builders/515
-Job details: https://test-rietveld.appspot.com/200039
-"""
+_SAMPLE_BISECT_RESULTS_JSON = {
+    'try_job_id': 6789,
+    'bug_id': 4567,
+    'status': 'completed',
+    'bisect_bot': 'linux',
+    'buildbot_log_url': '',
+    'command': ('tools/perf/run_benchmark -v '
+                '--browser=release page_cycler.intl_ar_fa_he'),
+    'metric': 'warm_times/page_load_time',
+    'change': '',
+    'score': 99.9,
+    'good_revision': '306475',
+    'bad_revision': '306478',
+    'warnings': None,
+    'abort_reason': None,
+    'issue_url': 'https://issue_url/123456',
+    'culprit_data': {
+        'subject': 'subject',
+        'author': 'author',
+        'email': 'author@email.com',
+        'cl_date': '1/2/2015',
+        'commit_info': 'commit_info',
+        'revisions_links': ['http://src.chromium.org/viewvc/chrome?view='
+                            'revision&revision=20798'],
+        'cl': '2a1781d64d'  # Should match config in bisect_fyi_test.py.
+    },
+    'revision_data': [
+        {
+            'depot_name': 'chromium',
+            'deps_revision': 1234,
+            'commit_hash': '1234abcdf',
+            'mean_value': 70,
+            'std_dev': 0,
+            'values': [70, 70, 70],
+            'result': 'good'
+        }, {
+            'depot_name': 'chromium',
+            'deps_revision': 1235,
+            'commit_hash': '1235abdcf',
+            'mean_value': 80,
+            'std_dev': 0,
+            'values': [80, 80, 80],
+            'result': 'bad'
+        }
+    ]
+}
 
 _REVISION_RESPONSE = """
 <html xmlns=....>
@@ -222,32 +96,6 @@
   'max_time_minutes': '120'
 }"""
 
-_PERF_LOG_EXPECTED_TITLE_1 = 'With Patch - Profiler Data[0]'
-_PERF_LOG_EXPECTED_TITLE_2 = 'Without Patch - Profiler Data[0]'
-_PERF_LOG_EXPECTED_PROFILER_LINK1 = (
-    'https://console.developers.google.com/m/cloudstorage/b/chrome-telemetry/o/'
-    'profiler-file-id_0-2014-11-27_14-08-5560487.json')
-_PERF_LOG_EXPECTED_PROFILER_LINK2 = (
-    'https://console.developers.google.com/m/cloudstorage/b/chrome-telemetry/o/'
-    'profiler-file-id_0-2014-11-27_14-10-1644780.json')
-_PERF_LOG_EXPECTED_HTML_LINK = (
-    'http://storage.googleapis.com/chromium-telemetry/html-results/'
-    'results-2014-11-27_14-10-21')
-_PERF_LOG_WITH_RESULTS = """
-@@@STEP_CLOSED@@@
-
-
-@@@STEP_LINK@HTML Results@%s@@@
-
-
-@@@STEP_LINK@%s@%s@@@
-
-
-@@@STEP_LINK@%s@%s@@@
-""" % (_PERF_LOG_EXPECTED_HTML_LINK, _PERF_LOG_EXPECTED_TITLE_1,
-       _PERF_LOG_EXPECTED_PROFILER_LINK1, _PERF_LOG_EXPECTED_TITLE_2,
-       _PERF_LOG_EXPECTED_PROFILER_LINK2)
-
 _ISSUE_RESPONSE = """
     {
       "description": "Issue Description.",
@@ -273,130 +121,9 @@
     }
 """
 
-_BISECT_LOG_INFRA_FAILURE = 'Failed to produce build'
-
-# Globals that are set in mock functions and then checked in tests.
-_TEST_RECEIEVED_EMAIL_RESULTS = None
-_TEST_RECEIVED_EMAIL = None
-
-
-def _MockGetJobStatus(job):
-  id_to_response_map = {
-      # Complete
-      '1234567': {
-          'result': 'SUCCESS',
-          'result_details': {
-              'buildername': 'Fake_Bot',
-          },
-          'url': 'http://build.chromium.org/bb1234567',
-          'status': 'COMPLETED',
-      },
-      # In progress
-      '11111': {
-          'result_details': {
-              'buildername': 'Fake_Bot',
-          },
-          'url': 'http://build.chromium.org/bb11111',
-          'status': 'STARTED',
-      },
-      # Failed
-      '66666': {
-          'result': 'FAILURE',
-          'result_details': {
-              'buildername': 'Fake_Bot',
-          },
-          'url': 'http://build.chromium.org/bb66666',
-          'status': 'COMPLETED',
-      },
-  }
-  return id_to_response_map.get(str(job.buildbucket_job_id))
-
 
 def _MockFetch(url=None):
   url_to_response_map = {
-      'https://test-rietveld.appspot.com/api/200034/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/508'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/302304/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '2',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/509'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/100001/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '6',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/510'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/200035/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/511'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/200036/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/512'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/200037/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/513'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/200038/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/514'}]})
-      ],
-      'https://test-rietveld.appspot.com/api/200039/1': [
-          200,
-          json.dumps({'try_job_results': [{
-              'result': '0',
-              'builder': 'win_perf_bisect',
-              'url': 'http://build.chromium.org/builders/515'}]})
-      ],
-      'http://build.chromium.org/json/builders/515': [
-          200,
-          json.dumps({
-              'steps': [{'name': 'Working on abc', 'results': [0]},
-                        {'name': 'Working on def', 'results': [2]}],
-              'times': [1411501756.293642, 1411545237.89049],
-              'text': ['failed', 'slave_steps', 'failed', 'Working on def']})
-      ],
-      'http://build.chromium.org/bb1234567/steps/Results/logs/stdio/text': [
-          200, _BISECT_LOG_SINGLE_OWNER
-      ],
-      'http://build.chromium.org/bb66666': [
-          200,
-          json.dumps({
-              'steps': [{'name': 'Working on abc', 'results': [0]},
-                        {'name': 'Working on def', 'results': [2]}],
-              'times': [1411501756.293642, 1411545237.89049],
-              'text': ['failed', 'slave_steps', 'failed', 'Working on def']})
-      ],
-      ('http://build.chromium.org/builders/bb66666'
-       '/steps/Results/logs/stdio/text'): [
-           404, ''
-       ],
-      'http://build.chromium.org/json/builders/516': [
-          200,
-          json.dumps({'steps': [{'name': 'gclient', 'results': [2]}]})
-      ],
       'http://src.chromium.org/viewvc/chrome?view=revision&revision=20798': [
           200, _REVISION_RESPONSE
       ],
@@ -406,43 +133,6 @@
       'https://codereview.chromium.org/api/17504006': [
           200, json.dumps(json.loads(_ISSUE_RESPONSE))
       ],
-      'http://build.chromium.org/508/steps/Results/logs/stdio/text': [
-          200, '===== BISECT JOB RESULTS ====='
-      ],
-      'http://build.chromium.org/509/steps/Results/logs/stdio/text': [
-          200, 'BISECT FAILURE! '
-      ],
-      'http://build.chromium.org/511/steps/Results/logs/stdio/text': [
-          200, _BISECT_LOG_MULTI_OWNER
-      ],
-      'http://build.chromium.org/512/steps/Results/logs/stdio/text': [
-          200, _BISECT_LOG_MULTI_SAME_OWNER
-      ],
-      'http://build.chromium.org/513/steps/Results/logs/stdio/text': [
-          200, _BISECT_LOG_SINGLE_OWNER
-      ],
-      'http://build.chromium.org/514/steps/Results/logs/stdio/text': [
-          200, _BISECT_LOG_FAILED_REVISION
-      ],
-      'http://build.chromium.org/builders/515/steps/Results/logs/stdio/text': [
-          404, ''
-      ],
-      'http://build.chromium.org/builders/515/steps/Working%20on%20abc/logs/'
-      'stdio/text': [
-          200, _BISECT_LOG_PARTIAL_RESULT
-      ],
-      'http://build.chromium.org/builders/516/steps/slave_steps/logs/stdio/'
-      'text': [
-          200, _BISECT_LOG_INFRA_FAILURE
-      ],
-      'http://build.chromium.org/508/steps/Running%20Bisection/logs/stdio/'
-      'text': [
-          200, _PERF_LOG_WITH_RESULTS
-      ],
-      'http://build.chromium.org/511/steps/Running%20Bisection/logs/stdio/'
-      'text': [
-          200, ''
-      ],
   }
 
   if url not in url_to_response_map:
@@ -453,22 +143,6 @@
   return testing_common.FakeResponseObject(response_code, response)
 
 
-def _MockMakeRequest(path, method):  # pylint: disable=unused-argument
-  url = 'https://test-rietveld.appspot.com/' + path
-  response = _MockFetch(url=url)
-  return response, response.content
-
-
-def _MockSendPerfTryJobEmail(_, results):
-  global _TEST_RECEIEVED_EMAIL_RESULTS
-  _TEST_RECEIEVED_EMAIL_RESULTS = results
-
-
-def _MockSendMail(**kwargs):
-  global _TEST_RECEIVED_EMAIL
-  _TEST_RECEIVED_EMAIL = kwargs
-
-
 # In this class, we patch apiclient.discovery.build so as to not make network
 # requests, which are normally made when the IssueTrackerService is initialized.
 @mock.patch('apiclient.discovery.build', mock.MagicMock())
@@ -482,10 +156,6 @@
         update_bug_with_results.UpdateBugWithResultsHandler)])
     self.testapp = webtest.TestApp(app)
     self._AddRietveldConfig()
-    # Calling the real Credentials function doesn't work in the test
-    # environment; using no credentials in the tests works because the requests
-    # to the issue tracker are mocked out as well.
-    rietveld_service.Credentials = mock.MagicMock(return_value=None)
 
   def _AddRietveldConfig(self):
     """Adds a RietveldConfig entity to the datastore.
@@ -501,80 +171,68 @@
         server_url='https://test-rietveld.appspot.com',
         internal_server_url='https://test-rietveld.appspot.com').put()
 
+  def _AddTryJob(self, bug_id, status, bot, **kwargs):
+    job = try_job.TryJob(bug_id=bug_id, status=status, bot=bot, **kwargs)
+    job.put()
+    bug_data.Bug(id=bug_id).put()
+    return job
+
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
       mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results.buildbucket_service, 'GetJobStatus',
-      _MockGetJobStatus)
   def testGet(self):
-    # Put succeeded, failed, and not yet finished jobs in the datastore.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    try_job.TryJob(
-        bug_id=54321, rietveld_issue_id=302304, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    try_job.TryJob(
-        bug_id=99999, rietveld_issue_id=100001, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    try_job.TryJob(
-        bug_id=77777, buildbucket_job_id='1234567', use_buildbucket=True,
-        status='started', bot='win_perf').put()
-    # Create bug.
-    bug_data.Bug(id=12345).put()
-    bug_data.Bug(id=54321).put()
-    bug_data.Bug(id=99999).put()
-    bug_data.Bug(id=77777).put()
+    # Put succeeded, failed, staled, and not yet finished jobs in the
+    # datastore.
+    self._AddTryJob(11111, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
+    staled_timestamp = (datetime.datetime.now() -
+                        update_bug_with_results._STALE_TRYJOB_DELTA)
+    self._AddTryJob(22222, 'started', 'win_perf',
+                    last_ran_timestamp=staled_timestamp)
+    self._AddTryJob(33333, 'failed', 'win_perf')
+    self._AddTryJob(44444, 'started', 'win_perf')
 
     self.testapp.get('/update_bug_with_results')
     pending_jobs = try_job.TryJob.query().fetch()
-    # Expects a failed and not yet finished bisect job to be in datastore.
-    self.assertEqual(3, len(pending_jobs))
-    self.assertEqual(54321, pending_jobs[0].bug_id)
-    self.assertEqual('failed', pending_jobs[0].status)
-    self.assertEqual(99999, pending_jobs[1].bug_id)
-    self.assertEqual(77777, pending_jobs[2].bug_id)
-    self.assertEqual('started', pending_jobs[1].status)
-    self.assertEqual('started', pending_jobs[2].status)
-    self.assertEqual('bisect', pending_jobs[0].job_type)
-    self.assertEqual('bisect', pending_jobs[1].job_type)
-    self.assertEqual('bisect', pending_jobs[2].job_type)
+    # Expects no jobs to be deleted.
+    self.assertEqual(4, len(pending_jobs))
+    self.assertEqual(11111, pending_jobs[0].bug_id)
+    self.assertEqual('completed', pending_jobs[0].status)
+    self.assertEqual(22222, pending_jobs[1].bug_id)
+    self.assertEqual('staled', pending_jobs[1].status)
+    self.assertEqual(33333, pending_jobs[2].bug_id)
+    self.assertEqual('failed', pending_jobs[2].status)
+    self.assertEqual(44444, pending_jobs[3].bug_id)
+    self.assertEqual('started', pending_jobs[3].status)
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
       mock.MagicMock())
   def testCreateTryJob_WithoutExistingBug(self):
     # Put succeeded job in the datastore.
     try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
+        bug_id=12345, status='started', bot='win_perf',
+        results_data=_SAMPLE_BISECT_RESULTS_JSON).put()
 
     self.testapp.get('/update_bug_with_results')
     pending_jobs = try_job.TryJob.query().fetch()
 
     # Expects job to finish.
-    self.assertEqual(0, len(pending_jobs))
+    self.assertEqual(1, len(pending_jobs))
+    self.assertEqual(12345, pending_jobs[0].bug_id)
+    self.assertEqual('completed', pending_jobs[0].status)
 
+  @mock.patch.object(utils, 'ServiceAccountCredentials', mock.MagicMock())
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment', mock.MagicMock(return_value=False))
   @mock.patch('logging.error')
@@ -582,15 +240,10 @@
     # Put a successful job and a failed job with partial results.
     # Note that AddBugComment is mocked to always returns false, which
     # simulates failing to post results to the issue tracker for all bugs.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    try_job.TryJob(
-        bug_id=54321, rietveld_issue_id=200039, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    bug_data.Bug(id=12345).put()
-    bug_data.Bug(id=54321).put()
-
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
+    self._AddTryJob(54321, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
     self.testapp.get('/update_bug_with_results')
 
     # Two errors should be logged.
@@ -606,231 +259,74 @@
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment')
-  def testGet_BisectJobWithPartialResults(self, mock_update_bug):
-    # Put failed job in the datastore.
-    try_job.TryJob(
-        bug_id=54321, rietveld_issue_id=200039, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    # Create bug.
-    bug_data.Bug(id=54321).put()
-
-    self.testapp.get('/update_bug_with_results')
-
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(1, len(pending_jobs))
-    self.assertEqual('failed', pending_jobs[0].status)
-    mock_update_bug.assert_called_once_with(
-        54321, _EXPECTED_BISECT_LOG_PARTIAL_RESULT, labels=None)
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service.IssueTrackerService,
-      'AddBugComment')
-  def testGet_BisectCulpritHasMultipleAuthors_NoneCCd(self, mock_update_bug):
-    # When a bisect finds multiple culprits for a perf regression,
-    # owners of CLs shouldn't be cc'ed on issue update.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200035, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    bug_data.Bug(id=12345).put()
-
-    self.testapp.get('/update_bug_with_results')
-
-    mock_update_bug.assert_called_once_with(
-        mock.ANY, mock.ANY, cc_list=[], merge_issue=None, labels=None,
-        owner=None)
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(0, len(pending_jobs))
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service.IssueTrackerService,
-      'AddBugComment')
-  def testGet_MultipleCulpritsSameAuthor_AssignsAuthor(self, mock_update_bug):
-    # When a bisect finds multiple culprits by same Author for a perf
-    # regression, owner of CLs should be cc'ed.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200036, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    bug_data.Bug(id=12345).put()
-
-    self.testapp.get('/update_bug_with_results')
-
-    mock_update_bug.assert_called_once_with(
-        mock.ANY, mock.ANY,
-        cc_list=['sullivan@google.com', 'prasadv@google.com'],
-        merge_issue=None, labels=None, owner='sullivan@google.com')
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(0, len(pending_jobs))
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service.IssueTrackerService,
-      'AddBugComment')
-  def testGet_BisectCulpritHasSingleAuthor_AssignsAuthor(self, mock_update_bug):
-    # When a bisect finds a single culprit for a perf regression,
+  def testGet_BisectCulpritHasAuthor_AssignsAuthor(self, mock_update_bug):
+    # When a bisect has a culprit for a perf regression,
     # author and reviewer of the CL should be cc'ed on issue update.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
 
-    # Create bug.
-    bug_data.Bug(id=12345).put()
     self.testapp.get('/update_bug_with_results')
     mock_update_bug.assert_called_once_with(
         mock.ANY, mock.ANY,
-        cc_list=['sullivan@google.com', 'prasadv@google.com'],
-        merge_issue=None, labels=None, owner='sullivan@google.com')
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(0, len(pending_jobs))
-
-  def testBeautifyContent(self):
-    # Remove buildbot annotations (@@@), leading and trailing spaces from bisect
-    # results log.
-    actual_output = update_bug_with_results._BeautifyContent(
-        _BISECT_LOG_SINGLE_OWNER)
-    self.assertNotIn('@@@', actual_output)
-    for line in actual_output.split('\n'):
-      self.assertFalse(line.startswith(' '))
-      self.assertFalse(line.endswith(' '))
-    self.assertEqual(_EXPECTED_BISECT_LOG_SINGLE_OWNER, actual_output)
+        cc_list=['author@email.com', 'prasadv@google.com'],
+        merge_issue=None, labels=None, owner='author@email.com')
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment')
   def testGet_FailedRevisionResponse(self, mock_add_bug):
-    # When a Rietveld CL link fails to respond, only update CL owner in CC list.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200038, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
+    # When a Rietveld CL link fails to respond, only update CL owner in CC
+    # list.
+    sample_bisect_results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
+    sample_bisect_results['revisions_links'] = [
+        'http://src.chromium.org/viewvc/chrome?view=revision&revision=20799']
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=sample_bisect_results)
 
-    # Create bug.
-    bug_data.Bug(id=12345).put()
     self.testapp.get('/update_bug_with_results')
     mock_add_bug.assert_called_once_with(mock.ANY,
                                          mock.ANY,
-                                         cc_list=['sullivan@google.com'],
+                                         cc_list=['author@email.com',
+                                                  'prasadv@google.com'],
                                          merge_issue=None,
                                          labels=None,
-                                         owner='sullivan@google.com')
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(0, len(pending_jobs))
+                                         owner='author@email.com')
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service.IssueTrackerService,
-      'AddBugComment')
-  def testGet_MergesBugIntoExistingBug(self, mock_update_bug):
-    # When there exists a bug with the same revision (commit hash),
-    # mark bug as duplicate and merge current issue into that.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    try_job.TryJob(
-        bug_id=54321, rietveld_issue_id=200037, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-
-    # Create bug.
-    bug_data.Bug(id=12345).put()
-    bug_data.Bug(id=54321).put()
-    self.testapp.get('/update_bug_with_results')
-    # Owners of CLs are not cc'ed for duplicate bugs and the issue should be
-    # marked as duplicate.
-    mock_update_bug.assert_called_with(mock.ANY,
-                                       mock.ANY,
-                                       cc_list=[],
-                                       merge_issue='12345',
-                                       labels=None,
-                                       owner=None)
-    pending_jobs = try_job.TryJob.query().fetch()
-    self.assertEqual(0, len(pending_jobs))
-    # Add anomalies.
-    test_keys = map(utils.TestKey, [
-        'ChromiumGPU/linux-release/scrolling-benchmark/first_paint',
-        'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'])
-    anomaly.Anomaly(
-        start_revision=9990, end_revision=9997, test=test_keys[0],
-        median_before_anomaly=100, median_after_anomaly=200,
-        sheriff=None, bug_id=12345).put()
-    anomaly.Anomaly(
-        start_revision=9990, end_revision=9996, test=test_keys[0],
-        median_before_anomaly=100, median_after_anomaly=200,
-        sheriff=None, bug_id=54321).put()
-    # Map anomalies to base(dest_bug_id) bug.
-    update_bug_with_results._MapAnomaliesToMergeIntoBug(
-        dest_bug_id=12345, source_bug_id=54321)
-    anomalies = anomaly.Anomaly.query(
-        anomaly.Anomaly.bug_id == int(54321)).fetch()
-    self.assertEqual(0, len(anomalies))
-
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment', mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results, '_GetBisectResults',
-      mock.MagicMock(return_value={
-          'results': 'Status: Positive\nCommit  : abcd123',
-          'status': 'Completed',
-          'bisect_bot': 'bar',
-          'issue_url': 'bar',
-          'buildbot_log_url': 'bar',
-      }))
   def testGet_PositiveResult_StoresCommitHash(self):
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-    self.testapp.get('/update_bug_with_results')
-    self.assertEqual('12345', layered_cache.GetExternal('commit_hash_abcd123'))
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
 
+    self.testapp.get('/update_bug_with_results')
+    self.assertEqual('12345',
+                     layered_cache.GetExternal('commit_hash_2a1781d64d'))
+
+  @mock.patch(
+      'google.appengine.api.urlfetch.fetch',
+      mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment', mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results, '_GetBisectResults',
-      mock.MagicMock(return_value={
-          'results': 'Status: Negative\nCommit  : a121212',
-          'status': 'Completed',
-          'bisect_bot': 'bar',
-          'issue_url': 'bar',
-          'buildbot_log_url': 'bar',
-      }))
-  def testGet_NegativeResult_StoresCommitHash(self):
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
+  def testGet_NegativeResult_DoesNotStoreCommitHash(self):
+    sample_bisect_results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
+    sample_bisect_results['culprit_data'] = None
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=sample_bisect_results)
     self.testapp.get('/update_bug_with_results')
-    self.assertIsNone(layered_cache.GetExternal('commit_hash_a121212'))
+
+    caches = layered_cache.CachedPickledString.query().fetch()
+    # Only 1 cache for bisect stats.
+    self.assertEqual(1, len(caches))
 
   def testMapAnomaliesToMergeIntoBug(self):
     # Add anomalies.
@@ -856,327 +352,52 @@
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(update_bug_with_results, '_LogBisectInfraFailure')
-  def testCheckBisectBotForInfraFailure_BotFailure(
-      self, log_bisect_failure_mock):
-    bug_id = 516
-    build_data = {
-        'steps': [{'name': 'A', 'results': [0]},
-                  {'name': 'B', 'results': [2]}],
-        'times': [1411501756, 1411545237],
-    }
-    build_url = 'http://build.chromium.org/builders/516'
-    update_bug_with_results._CheckBisectBotForInfraFailure(
-        bug_id, build_data, build_url)
-    log_bisect_failure_mock.assert_called_with(
-        bug_id, 'Bot failure.', mock.ANY)
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(update_bug_with_results, '_LogBisectInfraFailure')
-  def testCheckBisectBotForInfraFailure_BuildFailure(
-      self, log_bisect_failure_mock):
-    bug_id = 516
-    build_data = {
-        'steps': [{'name': 'A', 'results': [0]},
-                  {'name': 'slave_steps', 'results': [2]}],
-        'times': [1411500000, 1411501000],
-    }
-    build_url = 'http://build.chromium.org/builders/516'
-    update_bug_with_results._CheckBisectBotForInfraFailure(
-        bug_id, build_data, build_url)
-    log_bisect_failure_mock.assert_called_with(
-        bug_id, 'Build failure.', mock.ANY)
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service.IssueTrackerService,
-      'AddBugComment')
-  def testGet_BotInfoInBisectResults(self, mock_update_bug):
-    # When a bisect finds multiple culprits by same Author for a perf
-    # regression, owner of CLs should be cc'ed.
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1,
-        status='started', bot='win_perf').put()
-
-    # Create bug.
-    bug_data.Bug(id=12345).put()
+      update_bug_with_results.email_template,
+      'GetPerfTryJobEmailReport', mock.MagicMock(return_value=None))
+  def testSendPerfTryJobEmail_EmptyEmailReport_DontSendEmail(self):
+    self._AddTryJob(12345, 'started', 'win_perf', job_type='perf-try',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON)
     self.testapp.get('/update_bug_with_results')
-    mock_update_bug.assert_called_once_with(
-        12345,
-        _EXPECTED_BISECT_RESULTS_ON_BUG,
-        cc_list=['sullivan@google.com', 'prasadv@google.com'],
-        merge_issue=None,
-        labels=None,
-        owner='sullivan@google.com')
+    messages = self.mail_stub.get_sent_messages()
+    self.assertEqual(0, len(messages))
 
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results, '_SendPerfTryJobEmail',
-      mock.MagicMock(side_effect=_MockSendPerfTryJobEmail))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
-      mock.MagicMock())
-  def testGet_PerfTryJob(self):
-    try_job.TryJob(
-        rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf', email='just@atestemail.com',
-        job_type='perf-try', config=_PERF_TEST_CONFIG).put()
-    global _TEST_RECEIEVED_EMAIL_RESULTS
-    _TEST_RECEIEVED_EMAIL_RESULTS = None
-
-    self.testapp.get('/update_bug_with_results')
-
-    results = _TEST_RECEIEVED_EMAIL_RESULTS
-    self.assertEqual('Completed', results['status'])
-    self.assertEqual(2, len(results['profiler_results']))
-    self.assertEqual(_PERF_LOG_EXPECTED_HTML_LINK,
-                     results['html_results'])
-    self.assertEqual(_PERF_LOG_EXPECTED_TITLE_1,
-                     results['profiler_results'][0][0])
-    self.assertEqual(_PERF_LOG_EXPECTED_PROFILER_LINK1,
-                     results['profiler_results'][0][1])
-    self.assertEqual(_PERF_LOG_EXPECTED_TITLE_2,
-                     results['profiler_results'][1][0])
-    self.assertEqual(_PERF_LOG_EXPECTED_PROFILER_LINK2,
-                     results['profiler_results'][1][1])
-    self.assertEqual('win_perf_bisect', results['bisect_bot'])
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
-      update_bug_with_results, '_SendPerfTryJobEmail',
-      mock.MagicMock(side_effect=_MockSendPerfTryJobEmail))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
-      mock.MagicMock())
-  def testGet_PerfTryJobWithInvalidOutput_EmailResultsAreEmpty(self):
-    try_job.TryJob(
-        rietveld_issue_id=200035, rietveld_patchset_id=1,
-        status='started', bot='win_perf', email='just@atestemail.com',
-        job_type='perf-try', config=_PERF_TEST_CONFIG).put()
-    global _TEST_RECEIEVED_EMAIL_RESULTS
-    _TEST_RECEIEVED_EMAIL_RESULTS = None
-
-    self.testapp.get('/update_bug_with_results')
-
-    results = _TEST_RECEIEVED_EMAIL_RESULTS
-    self.assertEqual('Completed', results['status'])
-    self.assertEqual(0, len(results['profiler_results']))
-    self.assertEqual('', results['html_results'])
-    self.assertEqual('win_perf_bisect', results['bisect_bot'])
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch(
-      'google.appengine.api.mail.send_mail',
-      mock.MagicMock(side_effect=_MockSendMail))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
-      mock.MagicMock())
-  def testGet_CreatePerfSuccessEmail(self):
-    try_job.TryJob(
-        rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf', email='just@atestemail.com',
-        job_type='perf-try', config=_PERF_TEST_CONFIG).put()
-    global _TEST_RECEIVED_EMAIL
-    _TEST_RECEIVED_EMAIL = {}
-
-    self.testapp.get('/update_bug_with_results')
-
-    self.assertIn('<a href="http://build.chromium.org/508">'
-                  'http://build.chromium.org/508</a>.',
-                  _TEST_RECEIVED_EMAIL.get('html'))
-    self.assertIn('With Patch', _TEST_RECEIVED_EMAIL.get('body'))
-    self.assertIn('Without Patch', _TEST_RECEIVED_EMAIL.get('body'))
-    self.assertIn('just@atestemail.com',
-                  _TEST_RECEIVED_EMAIL.get('to'))
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch(
-      'google.appengine.api.mail.send_mail',
-      mock.MagicMock(side_effect=_MockSendMail))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
-      mock.MagicMock())
-  def testGet_CreatePerfFailureEmail(self):
-    try_job.TryJob(
-        rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf', email='just@atestemail.com',
-        job_type='perf-try').put()
-
-    global _TEST_RECEIVED_EMAIL
-    _TEST_RECEIVED_EMAIL = {}
-
-    self.testapp.get('/update_bug_with_results')
-
-    self.assertIn('Perf Try Job FAILURE\n<br>',
-                  _TEST_RECEIVED_EMAIL.get('html'))
-    self.assertIn('Perf Try Job FAILURE\n\n',
-                  _TEST_RECEIVED_EMAIL.get('body'))
-    self.assertIn('just@atestemail.com',
-                  _TEST_RECEIVED_EMAIL.get('to'))
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service.IssueTrackerService,
       'AddBugComment')
   def testGet_InternalOnlyTryJob_AddsInternalOnlyBugLabel(
       self, mock_update_bug):
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1,
-        status='started', bot='win_perf', internal_only=True).put()
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON,
+                    internal_only=True)
 
-    # Create bug.
-    bug_data.Bug(id=12345).put()
     self.testapp.get('/update_bug_with_results')
     mock_update_bug.assert_called_once_with(
         mock.ANY, mock.ANY,
         cc_list=mock.ANY,
         merge_issue=None, labels=['Restrict-View-Google'], owner=mock.ANY)
 
-  def testValidateAndConvertBuildbucketResponse_NoResults(self):
-    buildbucket_response_scheduled = r"""{
-      "build": {
-        "status": "SCHEDULED",
-        "id": "9043191319901995952"
-      }
-    }"""
-    with self.assertRaises(update_bug_with_results.UnexpectedJsonError):
-      update_bug_with_results._ValidateAndConvertBuildbucketResponse(
-          json.loads(buildbucket_response_scheduled))
-
-  def testValidateAndConvertBuildbucketResponse_Failed(self):
-    buildbucket_response_failed = r"""{
-      "build": {
-        "status": "COMPLETED",
-        "url": "http://build.chromium.org/linux_perf_bisector/builds/41",
-        "failure_reason": "BUILD_FAILURE",
-        "result": "FAILURE",
-        "id": "9043547105089652704"
-      }
-    }"""
-    converted_response = (
-        update_bug_with_results._ValidateAndConvertBuildbucketResponse(
-            json.loads(buildbucket_response_failed)))
-    self.assertIn('http', converted_response['url'])
-    self.assertEqual(converted_response['result'],
-                     update_bug_with_results.FAILURE)
-
-  def testValidateAndConvertBuildbucketResponse_Success(self):
-    buildbucket_response_success = r"""{
-      "build": {
-        "status": "COMPLETED",
-        "url": "http://build.chromium.org/linux_perf_bisector/builds/47",
-        "id": "9043278384371361584",
-        "result": "SUCCESS"
-      }
-    }"""
-    converted_response = (
-        update_bug_with_results._ValidateAndConvertBuildbucketResponse(
-            json.loads(buildbucket_response_success)))
-    self.assertIn('http', converted_response['url'])
-    self.assertEqual(converted_response['result'],
-                     update_bug_with_results.SUCCESS)
-
-  @mock.patch('logging.error')
-  def testValidateAndConvertBuildbucketResponse_NoTesterInConfig(
-      self, mock_logging_error):
-    job_info = {
-        'build': {
-            'status': 'foo',
-            'url': 'www.baz.com',
-            'result': 'bar',
-        }
-    }
-    result = update_bug_with_results._ValidateAndConvertBuildbucketResponse(
-        job_info)
-    self.assertEqual('Unknown', result['builder'])
-    self.assertEqual(1, mock_logging_error.call_count)
-
-  def testValidateAndConvertBuildbucketResponse_TesterInConfig(self):
-    job_info = {
-        'build': {
-            'status': 'foo',
-            'url': 'www.baz.com',
-            'result': 'bar',
-            'result_details_json': json.dumps({
-                'properties': {
-                    'bisect_config': {'recipe_tester_name': 'my_perf_bisect'}
-                }
-            })
-        }
-    }
-    result = update_bug_with_results._ValidateAndConvertBuildbucketResponse(
-        job_info)
-    self.assertEqual('my_perf_bisect', result['builder'])
-
   @mock.patch(
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch.object(
       update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
       mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results, '_GetBisectResults',
-      mock.MagicMock(return_value={
-          'results': ('===== BISECT JOB RESULTS =====\n'
-                      'Status: Positive\n'
-                      'Commit  : 2a1781d64d'),
-          'status': 'Completed',
-          'bisect_bot': 'bar',
-          'issue_url': 'bar',
-          'buildbot_log_url': 'bar',
-      }))
   def testFYI_Send_No_Email_On_Success(self):
     stored_object.Set(
         bisect_fyi._BISECT_FYI_CONFIGS_KEY,
         bisect_fyi_test.TEST_FYI_CONFIGS)
     test_config = bisect_fyi_test.TEST_FYI_CONFIGS['positive_culprit']
     bisect_config = test_config.get('bisect_config')
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf',
-        job_name='positive_culprit',
-        job_type='bisect-fyi',
-        config=utils.BisectConfigPythonString(bisect_config)).put()
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=_SAMPLE_BISECT_RESULTS_JSON,
+                    internal_only=True,
+                    config=utils.BisectConfigPythonString(bisect_config),
+                    job_type='bisect-fyi',
+                    job_name='positive_culprit',
+                    email='chris@email.com')
 
     self.testapp.get('/update_bug_with_results')
     messages = self.mail_stub.get_sent_messages()
@@ -1186,93 +407,66 @@
       'google.appengine.api.urlfetch.fetch',
       mock.MagicMock(side_effect=_MockFetch))
   @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch(
-      'google.appengine.api.mail.send_mail',
-      mock.MagicMock(side_effect=_MockSendMail))
+      update_bug_with_results.bisect_fyi, 'IsBugUpdated',
+      mock.MagicMock(return_value=True))
   @mock.patch.object(
       update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
       mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results, '_GetBisectResults',
-      mock.MagicMock(return_value={
-          'results': ('===== BISECT JOB RESULTS =====\n'
-                      'Status: Positive\n'
-                      'Commit  : a121212'),
-          'status': 'Completed',
-          'bisect_bot': 'bar',
-          'issue_url': 'bar',
-          'buildbot_log_url': 'bar',
-      }))
-  def testFYI_Expected_Results_Mismatch_SendEmail(self):
-    stored_object.Set(
-        bisect_fyi._BISECT_FYI_CONFIGS_KEY,
-        bisect_fyi_test.TEST_FYI_CONFIGS)
-    test_config = bisect_fyi_test.TEST_FYI_CONFIGS['positive_culprit']
-    bisect_config = test_config.get('bisect_config')
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf',
-        job_name='positive_culprit',
-        job_type='bisect-fyi',
-        config=utils.BisectConfigPythonString(bisect_config)).put()
-
-    global _TEST_RECEIVED_EMAIL
-    _TEST_RECEIVED_EMAIL = None
-
-    self.testapp.get('/update_bug_with_results')
-    self.assertIn('Bisect FYI Try Job Failed\n<br>',
-                  _TEST_RECEIVED_EMAIL.get('html'))
-    self.assertIn('Bisect FYI Try Job Failed\n\n',
-                  _TEST_RECEIVED_EMAIL.get('body'))
-    self.assertIn('prasadv@google.com',
-                  _TEST_RECEIVED_EMAIL.get('to'))
-
-  @mock.patch(
-      'google.appengine.api.urlfetch.fetch',
-      mock.MagicMock(side_effect=_MockFetch))
-  @mock.patch.object(
-      update_bug_with_results.rietveld_service.RietveldService, 'MakeRequest',
-      mock.MagicMock(side_effect=_MockMakeRequest))
-  @mock.patch(
-      'google.appengine.api.mail.send_mail',
-      mock.MagicMock(side_effect=_MockSendMail))
-  @mock.patch.object(
-      update_bug_with_results.issue_tracker_service, 'IssueTrackerService',
-      mock.MagicMock())
-  @mock.patch.object(
-      update_bug_with_results, '_GetBisectResults',
-      mock.MagicMock(return_value={
-          'results': ('Failed to produce build.'),
-          'status': 'Failure',
-          'bisect_bot': 'bar',
-          'issue_url': 'bar',
-          'buildbot_log_url': 'bar',
-      }))
   def testFYI_Failed_Job_SendEmail(self):
     stored_object.Set(
         bisect_fyi._BISECT_FYI_CONFIGS_KEY,
         bisect_fyi_test.TEST_FYI_CONFIGS)
     test_config = bisect_fyi_test.TEST_FYI_CONFIGS['positive_culprit']
     bisect_config = test_config.get('bisect_config')
-    try_job.TryJob(
-        bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1,
-        status='started', bot='win_perf',
-        job_name='positive_culprit',
-        job_type='bisect-fyi',
-        config=utils.BisectConfigPythonString(bisect_config)).put()
-
-    global _TEST_RECEIVED_EMAIL
-    _TEST_RECEIVED_EMAIL = None
+    sample_bisect_results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
+    sample_bisect_results['status'] = 'failed'
+    self._AddTryJob(12345, 'started', 'win_perf',
+                    results_data=sample_bisect_results,
+                    internal_only=True,
+                    config=utils.BisectConfigPythonString(bisect_config),
+                    job_type='bisect-fyi',
+                    job_name='positive_culprit',
+                    email='chris@email.com')
 
     self.testapp.get('/update_bug_with_results')
-    self.assertIn('Bisect FYI Try Job Failed\n<br>',
-                  _TEST_RECEIVED_EMAIL.get('html'))
-    self.assertIn('Bisect FYI Try Job Failed\n\n',
-                  _TEST_RECEIVED_EMAIL.get('body'))
-    self.assertIn('prasadv@google.com',
-                  _TEST_RECEIVED_EMAIL.get('to'))
+    messages = self.mail_stub.get_sent_messages()
+    self.assertEqual(1, len(messages))
+
+  @mock.patch.object(
+      update_bug_with_results.quick_logger.QuickLogger,
+      'Log', mock.MagicMock(return_value='record_key_123'))
+  @mock.patch('logging.error')
+  def testUpdateQuickLog_WithJobResults_NoError(self, mock_logging_error):
+    job = self._AddTryJob(111, 'started', 'win_perf',
+                          results_data=_SAMPLE_BISECT_RESULTS_JSON)
+    update_bug_with_results.UpdateQuickLog(job)
+    self.assertEqual(0, mock_logging_error.call_count)
+
+  @mock.patch('logging.error')
+  @mock.patch('update_bug_with_results.quick_logger.QuickLogger.Log')
+  def testUpdateQuickLog_NoResultsData_ReportsError(
+      self, mock_log, mock_logging_error):
+    job = self._AddTryJob(111, 'started', 'win_perf')
+    update_bug_with_results.UpdateQuickLog(job)
+    self.assertEqual(0, mock_log.call_count)
+    mock_logging_error.assert_called_once_with(
+        'Bisect report returns empty for job id %s, bug_id %s.', 1, 111)
+
+  @mock.patch(
+      'google.appengine.api.urlfetch.fetch',
+      mock.MagicMock(side_effect=_MockFetch))
+  @mock.patch.object(
+      update_bug_with_results.issue_tracker_service.IssueTrackerService,
+      'AddBugComment')
+  def testGet_PostResult_WithoutBugEntity(
+      self, mock_update_bug):
+    job = try_job.TryJob(bug_id=12345, status='started', bot='win_perf',
+                         results_data=_SAMPLE_BISECT_RESULTS_JSON)
+    job.put()
+    self.testapp.get('/update_bug_with_results')
+    mock_update_bug.assert_called_once_with(
+        12345, mock.ANY, cc_list=mock.ANY, merge_issue=mock.ANY,
+        labels=mock.ANY, owner=mock.ANY)
 
 
 if __name__ == '__main__':
diff --git a/catapult/dashboard/dashboard/update_test_suites.py b/catapult/dashboard/dashboard/update_test_suites.py
index 297e560..0ec1b3f 100644
--- a/catapult/dashboard/dashboard/update_test_suites.py
+++ b/catapult/dashboard/dashboard/update_test_suites.py
@@ -7,7 +7,6 @@
 import logging
 
 from google.appengine.api import datastore_errors
-from google.appengine.ext import ndb
 
 from dashboard import datastore_hooks
 from dashboard import request_handler
@@ -20,7 +19,15 @@
 
 def FetchCachedTestSuites():
   """Fetches cached test suite data."""
-  return stored_object.Get(_NamespaceKey(_LIST_SUITES_CACHE_KEY))
+  cache_key = _NamespaceKey(_LIST_SUITES_CACHE_KEY)
+  cached = stored_object.Get(cache_key)
+  if cached is None:
+    # If the cache test suite list is not set, update it before fetching.
+    # This is for convenience when testing sending of data to a local instance.
+    namespace = datastore_hooks.GetNamespace()
+    UpdateTestSuites(namespace)
+    cached = stored_object.Get(cache_key)
+  return cached
 
 
 class UpdateTestSuitesHandler(request_handler.RequestHandler):
@@ -32,14 +39,15 @@
 
   def post(self):
     """Refreshes the cached test suites list."""
-    logging.info('Going to update test suites data.')
-
-    # Update externally-visible test suites data.
-    UpdateTestSuites(datastore_hooks.EXTERNAL)
-
-    # Update internal-only test suites data.
-    datastore_hooks.SetPrivilegedRequest()
-    UpdateTestSuites(datastore_hooks.INTERNAL)
+    if self.request.get('internal_only') == 'true':
+      logging.info('Going to update internal-only test suites data.')
+      # Update internal-only test suites data.
+      datastore_hooks.SetPrivilegedRequest()
+      UpdateTestSuites(datastore_hooks.INTERNAL)
+    else:
+      logging.info('Going to update externally-visible test suites data.')
+      # Update externally-visible test suites data.
+      UpdateTestSuites(datastore_hooks.EXTERNAL)
 
 
 def UpdateTestSuites(permissions_namespace):
diff --git a/catapult/dashboard/dashboard/update_test_suites_test.py b/catapult/dashboard/dashboard/update_test_suites_test.py
index eb2c05f..85b5a74 100644
--- a/catapult/dashboard/dashboard/update_test_suites_test.py
+++ b/catapult/dashboard/dashboard/update_test_suites_test.py
@@ -7,6 +7,9 @@
 import webapp2
 import webtest
 
+from google.appengine.ext import ndb
+
+from dashboard import datastore_hooks
 from dashboard import stored_object
 from dashboard import testing_common
 from dashboard import update_test_suites
@@ -22,6 +25,9 @@
         [('/update_test_suites',
           update_test_suites.UpdateTestSuitesHandler)])
     self.testapp = webtest.TestApp(app)
+    datastore_hooks.InstallHooks()
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    self.UnsetCurrentUser()
 
   def testFetchCachedTestSuites_NotEmpty(self):
     # If the cache is set, then whatever's there is returned.
@@ -32,13 +38,6 @@
         {'foo': 'bar'},
         update_test_suites.FetchCachedTestSuites())
 
-  def testFetchCachedTestSuites_Empty_ReturnsNone(self):
-    # If the cache is not set, then FetchCachedTestSuites
-    # just returns None; compiling the list of test suites would
-    # take too long.
-    self._AddSampleData()
-    self.assertIsNone(update_test_suites.FetchCachedTestSuites())
-
   def _AddSampleData(self):
     testing_common.AddTests(
         ['Chromium'],
@@ -67,12 +66,68 @@
             },
         })
 
-  def testPost(self):
+  def testPost_ForcesCacheUpdate(self):
+    key = update_test_suites._NamespaceKey(
+        update_test_suites._LIST_SUITES_CACHE_KEY)
+    stored_object.Set(key, {'foo': 'bar'})
+    self.assertEqual(
+        {'foo': 'bar'},
+        update_test_suites.FetchCachedTestSuites())
     self._AddSampleData()
-    # The cache starts out empty.
-    self.assertIsNone(update_test_suites.FetchCachedTestSuites())
+    # Because there is something cached, the cache is
+    # not automatically updated when new data is added.
+    self.assertEqual(
+        {'foo': 'bar'},
+        update_test_suites.FetchCachedTestSuites())
+
+    # Making a request to /udate_test_suites forces an update.
     self.testapp.post('/update_test_suites')
-    # After the request is made, it will no longer be empty.
+    self.assertEqual(
+        {
+            'dromaeo': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+            'scrolling': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+            'really': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+        },
+        update_test_suites.FetchCachedTestSuites())
+
+  def testPost_InternalOnly(self):
+    self.SetCurrentUser('internal@chromium.org')
+    self._AddSampleData()
+    master_key = ndb.Key('Master', 'Chromium')
+    bot_key = graph_data.Bot(id='internal_mac', parent=master_key,
+                             internal_only=True).put()
+    graph_data.Test(id='internal_test', parent=bot_key,
+                    internal_only=True).put()
+
+    self.testapp.post('/update_test_suites?internal_only=true')
+
+    self.assertEqual(
+        {
+            'dromaeo': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+            'internal_test': {
+                'mas': {'Chromium': {'internal_mac': False}},
+            },
+            'scrolling': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+            'really': {
+                'mas': {'Chromium': {'mac': False, 'win7': False}},
+            },
+        },
+        update_test_suites.FetchCachedTestSuites())
+
+  def testFetchCachedTestSuites_Empty_UpdatesWhenFetching(self):
+    # If the cache is not set at all, then FetchCachedTestSuites
+    # just updates the cache before returning the list.
+    self._AddSampleData()
     self.assertEqual(
         {
             'dromaeo': {
@@ -148,7 +203,6 @@
   def testCreateSuiteMastersDict(self):
     self._AddSampleData()
     suites = update_test_suites._FetchSuites()
-    print update_test_suites._CreateSuiteMastersDict(suites)
     self.assertEqual(
         {
             'dromaeo': {'Chromium': {'mac': False, 'win7': False}},
diff --git a/catapult/dashboard/dashboard/utils.py b/catapult/dashboard/dashboard/utils.py
index bbeccbc..2474e9b 100644
--- a/catapult/dashboard/dashboard/utils.py
+++ b/catapult/dashboard/dashboard/utils.py
@@ -8,20 +8,25 @@
 import binascii
 import json
 import logging
+import os
 import re
 import time
 
 from apiclient import discovery
+from apiclient import errors
+from google.appengine.api import memcache
 from google.appengine.api import urlfetch
+from google.appengine.api import urlfetch_errors
 from google.appengine.api import users
 from google.appengine.ext import ndb
-from oauth2client.client import GoogleCredentials
+from oauth2client import client
 
 from dashboard import stored_object
 
-INTERNAL_DOMAIN_KEY = 'internal_domain_key'
 SHERIFF_DOMAINS_KEY = 'sheriff_domains_key'
 IP_WHITELIST_KEY = 'ip_whitelist'
+SERVICE_ACCOUNT_KEY = 'service_account'
+EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
 _PROJECT_ID_KEY = 'project_id'
 _DEFAULT_CUSTOM_METRIC_VAL = 1
 
@@ -41,7 +46,7 @@
   Args:
     metric_name: The name of the metric being monitored.
   """
-  credentials = GoogleCredentials.get_application_default()
+  credentials = client.GoogleCredentials.get_application_default()
   monitoring = discovery.build(
       'cloudmonitoring', 'v2beta2', credentials=credentials)
   now = _GetNowRfc3339()
@@ -222,9 +227,63 @@
 
 def IsInternalUser():
   """Checks whether the user should be able to see internal-only data."""
-  user = users.get_current_user()
-  domain = stored_object.Get(INTERNAL_DOMAIN_KEY)
-  return user and domain and user.email().endswith('@' + domain)
+  username = users.get_current_user()
+  if not username:
+    return False
+  cached = GetCachedIsInternalUser(username)
+  if cached is not None:
+    return cached
+  is_internal_user = IsGroupMember(identity=username, group='googlers')
+  SetCachedIsInternalUser(username, is_internal_user)
+  return is_internal_user
+
+
+def GetCachedIsInternalUser(username):
+  return memcache.get(_IsInternalUserCacheKey(username))
+
+
+def SetCachedIsInternalUser(username, value):
+  memcache.add(_IsInternalUserCacheKey(username), value, time=60*60*24)
+
+
+def _IsInternalUserCacheKey(username):
+  return 'is_internal_user_%s' % username
+
+
+def IsGroupMember(identity, group):
+  """Checks if a user is a group member of using chrome-infra-auth.appspot.com.
+
+  Args:
+    identity: User email address.
+    group: Group name.
+
+  Returns:
+    True if confirmed to be a member, False otherwise.
+  """
+  try:
+    discovery_url = ('https://chrome-infra-auth.appspot.com'
+                     '/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
+    service = discovery.build(
+        'auth', 'v1', discoveryServiceUrl=discovery_url,
+        credentials=ServiceAccountCredentials())
+    request = service.membership(identity=identity, group=group)
+    response = request.execute()
+    return response['is_member']
+  except (errors.HttpError, KeyError, AttributeError) as e:
+    logging.error('Failed to check membership of %s: %s', identity, e)
+    return False
+
+
+def ServiceAccountCredentials():
+  """Returns the Credentials of the service account if available."""
+  account_details = stored_object.Get(SERVICE_ACCOUNT_KEY)
+  if not account_details:
+    logging.error('Service account credentials not found.')
+    return None
+  return client.SignedJwtAssertionCredentials(
+      service_account_name=account_details['client_email'],
+      private_key=account_details['private_key'],
+      scope=EMAIL_SCOPE)
 
 
 def IsValidSheriffUser():
@@ -278,3 +337,92 @@
     logging.error('Failed to decode "%s" from "%s".', response.content, url)
     return None
   return plaintext_content
+
+
+def GetRequestId():
+  """Returns the request log ID which can be used to find a specific log."""
+  return os.environ.get('REQUEST_LOG_ID')
+
+
+def Validate(expected, actual):
+  """Generic validator for expected keys, values, and types.
+
+  Values are also considered equal if |actual| can be converted to |expected|'s
+  type.  For instance:
+    _Validate([3], '3')  # Returns True.
+
+  See utils_test.py for more examples.
+
+  Args:
+    expected: Either a list of expected values or a dictionary of expected
+        keys and type.  A dictionary can contain a list of expected values.
+    actual: A value.
+  """
+  def IsValidType(expected, actual):
+    if type(expected) is type and type(actual) is not expected:
+      try:
+        expected(actual)
+      except ValueError:
+        return False
+    return True
+
+  def IsInList(expected, actual):
+    for value in expected:
+      try:
+        if type(value)(actual) == value:
+          return True
+      except ValueError:
+        pass
+    return False
+
+  if not expected:
+    return
+  expected_type = type(expected)
+  actual_type = type(actual)
+  if expected_type is list:
+    if not IsInList(expected, actual):
+      raise ValueError('Invalid value. Expected one of the following: '
+                       '%s. Actual: %s.' % (','.join(expected), actual))
+  elif expected_type is dict:
+    if actual_type is not dict:
+      raise ValueError('Invalid type. Expected: %s. Actual: %s.'
+                       % (expected_type, actual_type))
+    missing = set(expected.keys()) - set(actual.keys())
+    if missing:
+      raise ValueError('Missing the following properties: %s'
+                       % ','.join(missing))
+    for key in expected:
+      Validate(expected[key], actual[key])
+  elif not IsValidType(expected, actual):
+    raise ValueError('Invalid type. Expected: %s. Actual: %s.' %
+                     (expected, actual_type))
+
+
+def FetchURL(request_url, skip_status_code=False):
+  """Wrapper around URL fetch service to make request.
+
+  Args:
+    request_url: URL of request.
+    skip_status_code: Skips return code check when True, default is False.
+
+  Returns:
+    Response object return by URL fetch, otherwise None when there's an error.
+  """
+  logging.info('URL being fetched: ' + request_url)
+  try:
+    response = urlfetch.fetch(request_url)
+  except urlfetch_errors.DeadlineExceededError:
+    logging.error('Deadline exceeded error checking %s', request_url)
+    return None
+  except urlfetch_errors.DownloadError as err:
+    # DownloadError is raised to indicate a non-specific failure when there
+    # was not a 4xx or 5xx status code.
+    logging.error(err)
+    return None
+  if skip_status_code:
+    return response
+  elif response.status_code != 200:
+    logging.error(
+        'ERROR %s checking %s', response.status_code, request_url)
+    return None
+  return response
diff --git a/catapult/dashboard/dashboard/utils_test.py b/catapult/dashboard/dashboard/utils_test.py
index 28dec65..83e8ec7 100644
--- a/catapult/dashboard/dashboard/utils_test.py
+++ b/catapult/dashboard/dashboard/utils_test.py
@@ -19,7 +19,8 @@
 
   def setUp(self):
     super(UtilsTest, self).setUp()
-    testing_common.SetInternalDomain('google.com')
+    testing_common.SetIsInternalUser('internal@chromium.org', True)
+    testing_common.SetIsInternalUser('foo@chromium.org', False)
 
   def _AssertMatches(self, test_path, pattern):
     """Asserts that a test path matches a pattern with MatchesPattern."""
@@ -104,19 +105,19 @@
     ]
     return keys
 
-  def testGetMulti_NotLoggedIn_ReturnsSomeEntities(self):
+  def testGetMulti_ExternalUser_ReturnsSomeEntities(self):
     keys = self._PutEntitiesHalfInternal()
-    self.SetCurrentUser('x@hotmail.com')
+    self.SetCurrentUser('foo@chromium.org')
     self.assertEqual(len(keys) / 2, len(utils.GetMulti(keys)))
 
-  def testGetMulti_LoggedIn_ReturnsAllEntities(self):
+  def testGetMulti_InternalUser_ReturnsAllEntities(self):
     keys = self._PutEntitiesHalfInternal()
-    self.SetCurrentUser('x@google.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.assertEqual(len(keys), len(utils.GetMulti(keys)))
 
-  def testGetMulti_AllExternal_ReturnsAllEntities(self):
+  def testGetMulti_AllExternalEntities_ReturnsAllEntities(self):
     keys = self._PutEntitiesAllExternal()
-    self.SetCurrentUser('x@hotmail.com')
+    self.SetCurrentUser('internal@chromium.org')
     self.assertEqual(len(keys), len(utils.GetMulti(keys)))
 
   def testTestSuiteName_Basic(self):
@@ -143,6 +144,82 @@
     self.assertEqual((6, 14), utils.MinimumRange(
         [(3, 20), (5, 15), (6, 25), (3, 14)]))
 
+  def testValidate_StringNotInOptionList_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(
+          ['completed', 'pending', 'failed'], 'running')
+
+  def testValidate_InvalidType_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(int, 'a string')
+
+  def testValidate_MissingProperty_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(
+          {'status': str, 'try_job_id': int, 'required_property': int},
+          {'status': 'completed', 'try_job_id': 1234})
+
+  def testValidate_InvalidTypeInDict_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(
+          {'status': int, 'try_job_id': int},
+          {'status': 'completed', 'try_job_id': 1234})
+
+  def testValidate_StringNotInNestedOptionList_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(
+          {'values': {'nested_values': ['orange', 'banana']}},
+          {'values': {'nested_values': 'apple'}})
+
+  def testValidate_MissingPropertyInNestedDict_Fails(self):
+    with self.assertRaises(ValueError):
+      utils.Validate(
+          {'values': {'nested_values': ['orange', 'banana']}},
+          {'values': {}})
+
+  def testValidate_ExpectedValueIsNone_Passes(self):
+    utils.Validate(None, 'running')
+
+  def testValidate_StringInOptionList_Passes(self):
+    utils.Validate(str, 'a string')
+
+  def testValidate_HasExpectedProperties_Passes(self):
+    utils.Validate(
+        {'status': str, 'try_job_id': int},
+        {'status': 'completed', 'try_job_id': 1234})
+
+  def testValidate_StringInNestedOptionList_Passes(self):
+    utils.Validate(
+        {'values': {'nested_values': ['orange', 'banana']}},
+        {'values': {'nested_values': 'orange'}})
+
+  def testValidate_TypeConversion_Passes(self):
+    utils.Validate([1], '1')
+
+  @mock.patch('utils.discovery.build')
+  def testIsGroupMember_PositiveCase(self, mock_discovery_build):
+    mock_request = mock.MagicMock()
+    mock_request.execute = mock.MagicMock(return_value={'is_member': True})
+    mock_service = mock.MagicMock()
+    mock_service.membership = mock.MagicMock(
+        return_value=mock_request)
+    mock_discovery_build.return_value = mock_service
+    self.assertTrue(utils.IsGroupMember('foo@bar.com', 'group'))
+    mock_service.membership.assert_called_once_with(
+        identity='foo@bar.com', group='group')
+
+  @mock.patch.object(utils, 'ServiceAccountCredentials', mock.MagicMock())
+  @mock.patch('logging.error')
+  @mock.patch('utils.discovery.build')
+  def testIsGroupMember_RequestFails_LogsErrorAndReturnsFalse(
+      self, mock_discovery_build, mock_logging_error):
+    mock_service = mock.MagicMock()
+    mock_service.membership = mock.MagicMock(
+        return_value={'error': 'Some error'})
+    mock_discovery_build.return_value = mock_service
+    self.assertFalse(utils.IsGroupMember('foo@bar.com', 'group'))
+    self.assertEqual(1, mock_logging_error.call_count)
+
 
 def _MakeMockFetch(base64_encoded=True, status=200):
   """Returns a mock fetch object that returns a canned response."""
diff --git a/catapult/dashboard/dashboard_build/__init__.py b/catapult/dashboard/dashboard_build/__init__.py
index 7adca8a..eac07a1 100644
--- a/catapult/dashboard/dashboard_build/__init__.py
+++ b/catapult/dashboard/dashboard_build/__init__.py
@@ -1,8 +1,6 @@
 # Copyright 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import os
-import sys
 
 import dashboard_project
 
diff --git a/catapult/dashboard/dashboard_build/dashboard_dev_server_config.py b/catapult/dashboard/dashboard_build/dashboard_dev_server_config.py
index ad2eb01..bf62465 100644
--- a/catapult/dashboard/dashboard_build/dashboard_dev_server_config.py
+++ b/catapult/dashboard/dashboard_build/dashboard_dev_server_config.py
@@ -4,7 +4,6 @@
 
 import json
 import os
-import sys
 
 import dashboard_project
 
@@ -17,6 +16,7 @@
 
 
 class TestListHandler(webapp2.RequestHandler):
+
   def get(self, *args, **kwargs):  # pylint: disable=unused-argument
     project = dashboard_project.DashboardProject()
     test_relpaths = ['/' + _RelPathToUnixPath(x)
@@ -29,6 +29,7 @@
 
 
 class DashboardDevServerConfig(object):
+
   def __init__(self):
     self.project = dashboard_project.DashboardProject()
 
diff --git a/catapult/dashboard/dashboard_project.py b/catapult/dashboard/dashboard_project.py
index 2632df7..ac733b5 100644
--- a/catapult/dashboard/dashboard_project.py
+++ b/catapult/dashboard/dashboard_project.py
@@ -4,7 +4,6 @@
 
 import sys
 import os
-import re
 
 
 def _AddToPathIfNeeded(path):
@@ -45,7 +44,7 @@
 
 class DashboardProject(object):
   catapult_path = os.path.abspath(
-      os.path.join(os.path.dirname(__file__), os.path.pardir))
+      os.path.join(os.path.dirname(__file__), '..'))
 
   catapult_third_party_path = os.path.join(catapult_path, 'third_party')
 
diff --git a/catapult/dashboard/docs/admin-tasks.md b/catapult/dashboard/docs/admin-tasks.md
new file mode 100644
index 0000000..817bce3
--- /dev/null
+++ b/catapult/dashboard/docs/admin-tasks.md
@@ -0,0 +1,162 @@
+# Admin Tasks for the Chrome Performance Dashboard
+
+## "Dashboard is down" check list
+
+- Is app engine up? If not, just have to sit tight. You can check the status
+here: [https://code.google.com/status/appengine](https://code.google.com/status/appengine)
+- Check the [main app engine dashboard page](https://console.developers.google.com/appengine?project=chromeperf&moduleId=default).
+- Are we over quota?
+- Look at the error rates on the dashboard.
+- Check the task queues.
+- Test data not showing up
+  - Check [/new\_tests](https://chromeperf.appspot.com/new_tests).
+  - Search the logs
+  - Is the test internal-only and the user is logged out?
+
+## Scheduled downtime
+
+If it's necessary at some point to have scheduled downtime, announce
+it ahead of time. At least 2 days before the downtime (ideally more),
+announce in these ways:
+
+ 1. Use [/set\_warning\_message](https://chromeperf.appspot.com/set_warning_message) to
+    put a warning message on the dashboard itself.
+ 2. Send an email to any Chromium perf sheriffs who will be affected,
+    or all perf sheriffs (`perf-sheriffs@chromium.org`).
+ 3. Send an email to `chrome-perf-dashboard-announce@google.com`.
+
+If possible, it's probably best to schedule it for Saturday, when usage
+is likely to be relatively low.
+
+## Routine tasks
+
+There are several routine tasks to do to set up the dashboard for a
+user. The official process for this is to file bugs on crbug.com
+with labels:
+
+- `Performance-Dashboard-IPWhitelist`
+- `Performance-Dashboard-BotWhitelist`
+- `Performance-Dashboard-MonitoringRequest`
+
+### Editing sheriff rotations
+
+You can view, create and edit sheriff rotations
+at [/edit\_sheriffs](https://chromeperf.appspot.com/edit_sheriffs).
+
+#### Adding a new sheriff
+
+It’s fine to add a new sheriff rotation any time a team wants alerts
+to go to a new email address. It’s fine to make a temporary sheriff
+rotation for monitoring new tests before they are stable. Here are the
+fields that need to be filled out:
+
+ - **Name**: This is the name of the sheriff
+   rotation. It will be listed in the drop-down
+   at [/alerts](https://chromeperf.appspot.com/alerts).
+ - **Rotation URL**: Some sheriff rotations have a URL for specifying
+   the email of the sheriff. For example, the Chromium Perf Sheriff URL
+   is [http://chromium-build.appspot.com/p/chromium/sheriff\_perf.js](http://chromium-build.appspot.com/p/chromium/sheriff_perf.js).
+   Most sheriff rotations don’t have a URL, and if not it’s fine to leave
+   this blank and just specify an email address.
+ - **Notification Email**:
+   This is usually a mailing list that alerts should go to. However,
+   there’s nothing stopping it from being an individual’s email
+   account. It must be specified if there is no Rotation URL, but it’s
+   optional otherwise.
+ - **Days before alerting on missing data**:
+   Number of days before "stoppage alerts" are made; -1 for no alerts. 
+ - **Internal-only**: If the tests this sheriff is monitoring are internal-only,
+   or the name of the sheriff rotation is sensitive, please
+   set this to "Yes". If set to "Yes", the sheriff rotation will only
+   show up on the alerts page for users logged in with google.com accounts.
+ - **Summarize Email**: By default, the perf dashboard sends one email
+   for each alert, as soon as it gets the alert. If that will add up to
+   too much mail, setting this to "Yes" will switch to a daily summary.
+
+#### Monitoring tests
+
+After creating a sheriffing rotation, you need to add the individual
+tests to monitor. You do this by clicking on "Set a sheriff for a
+group of tests". It asks for a pattern. Patterns match test paths,
+which are of the form "Master/Bot/test-suite/graph/trace". You can replace
+any part of the test path with a `*` for a wildcard.
+
+The dashboard will list the matching tests before allowing you to apply
+the pattern, so you’ll be able to check if the pattern is correct.
+
+To remove a pattern, click "Remove a sheriff from a group of tests".
+
+If you want to keep alerting on most of the tests in a pattern and
+just disable alerting on a few noisy ones, you can add the "Disable
+Alerting" anomaly threshold config to the noisy tests (see "Modify
+anomaly threshold configs" below).
+
+### Setting up alert threshold configs
+
+The default alert thresholds should work reasonably well for most test
+data, but there are some graphs for which it may not be correct. If
+there are invalid alerts, or the dashboard is not sending alerts when
+you expect them, you may want to modify an alert threshold config.
+
+To edit alert threshold configs, go
+to [/edit\_anomaly\_configs](https://chromeperf.appspot.com/edit_anomaly_configs).
+Add a new config with a descriptive name and a JSON mapping of parameters
+to values.
+
+### Anomaly config debugger page
+
+Start off by using the anomaly threshold debugging
+page: [/debug\_alert](https://chromeperf.appspot.com/debug_alert). The
+page shows the segmentation of the data that was given by the anomaly
+finding algorithm. Based on the documentation, change the config
+parameters to get the alerts where you want them.
+
+### Automatically applying labels to bugs
+
+The dashboard can automatically apply labels to bugs filed on alerts,
+based on which test triggered the alert. This is useful for flagging
+the relevant teams attention. For example, the dashboard automatically
+applies the label "Cr-Blink-JavaScript" to dromaeo regressions,
+which cuts down on a lot of CC-ing by hand.
+
+To make a label automatically applied to a bug, go
+to [/edit\_sheriffs](https://chromeperf.appspot.com/edit_sheriffs) and
+click "Set a bug lable to automatically apply to a group of
+tests". Then type in a pattern as described in "Edit Sheriff
+Rotations -&gt; Monitoring Tests" section above, and type in the bug
+label. You’ll see a list of tests the label will be applied to before
+you confirm.
+
+To remove a label, go
+to [/edit\_sheriffs](https://chromeperf.appspot.com/edit_sheriffs) and
+click "Remove a bug label that automatically applies to a group of
+tests".
+
+### Migrating and renaming data
+
+When a test name changes, it is possible to migrate
+the existing test data to use the new name. You
+can do this by entering a pattern for the test name
+at [/migrate\_test\_names](https://chromeperf.appspot.com/migrate_test_names).
+
+### Whitelisting senders of data
+
+There are two types of whitelisting on the perf dashboard:
+
+The IP whitelist is a list of IP addresses of machines which
+are allowed to post data to /add\_point. This is to prevent
+/add\_point from being spammed. You can add a bot to the IP whitelist
+at [/ip\_whitelist](https://chromeperf.appspot.com/ip_whitelist). If
+you’re seeing 403 errors on your buildbots, the IPs to add are likely
+already in the logs. Note that if you are seeing 500 errors, those are
+not related to IP whitelisting. They are usually caused by an error in
+the JSON data sent by the buildbot. If you can’t tell by looking at
+the JSON data what is going wrong, the easiest thing to do is to add a
+unit test with the JSON to `add_point_test.py` and debug it from there.
+
+The bot whitelist is a list of bot names which are publicly visible. If a
+bot is not on the list, users must be logged into google.com accounts to
+see the data for that bot. You can add or remove a bot from the whitelist
+at [/bot\_whitelist](https://chromeperf.appspot.com/bot_whitelist),
+and make a bot’s existing data publicly visible (or internal\_only)
+at [/change\_internal\_only](https://chromeperf.appspot.com/change_internal_only).
diff --git a/catapult/dashboard/docs/cloud-debugger.md b/catapult/dashboard/docs/cloud-debugger.md
new file mode 100644
index 0000000..13cc183
--- /dev/null
+++ b/catapult/dashboard/docs/cloud-debugger.md
@@ -0,0 +1,18 @@
+# Updating the cloud repository
+
+[Cloud debugger](https://cloud.google.com/tools/cloud-debugger/)
+can now be used to debug errors in production. This debugging
+functionality requires having a copy of this repo in a [Cloud Source
+Repository](https://cloud.google.com/tools/cloud-repositories/docs/).
+
+In order to push the current state of this repository to the
+Cloud Source Repository for the Chrome Performance Dashboard:
+
+    gcloud auth login
+    git config credential.helper gcloud.sh
+    git remote add cloud-repo https://source.developers.google.com/p/chromeperf/
+    git push --all cloud-repo
+
+Note: If the Cloud Source Repository is changed to automatically mirror
+from the official catapult repository, then this should be unnecessary.
+
diff --git a/catapult/dashboard/docs/code-snippets.md b/catapult/dashboard/docs/code-snippets.md
new file mode 100644
index 0000000..bcf9283
--- /dev/null
+++ b/catapult/dashboard/docs/code-snippets.md
@@ -0,0 +1,156 @@
+# Code Snippets
+
+It is possible to directly execute code on a production instance of the performance dashboard. This is one way to directly query information about the state of the datastore, and make quick adjustments to data in the datastore.
+
+There are two places where production code can be run (admins only):
+
+ - https://chromeperf.appspot.com/\_ah/dev\_console/interactive
+ - https://chromeperf.appspot.com/\_ah/stats/shell
+
+## List tests frequently marked as invalid
+
+```python
+import collections
+from google.appengine.ext import ndb
+from speed.dashboard import utils
+from dashboard.models import anomaly
+
+sheriff = ndb.Key('Sheriff', 'Chromium Perf Sheriff')
+query = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == -1)
+query = query.filter(anomaly.Anomaly.sheriff == sheriff)
+query = query.order(-anomaly.Anomaly.timestamp)
+alerts = query.fetch(limit=5000)
+
+total_alerts = len(alerts)
+print 'Fetched {} "invalid" alerts.'.format(len(alerts))
+
+occurrences = [[], [], []]
+for a in alerts:
+  parts = utils.TestPath(a.test).split('/', 3)[1:]
+    for i, part in enumerate(parts):
+      occurrences[i].append(part)
+
+types = ['bot', 'benchmark', 'subtest']
+counters = [(type, collections.Counter(x)) for type, x in zip(types, occurrences)]
+for type, counter in counters:
+  print 'nTop {}s marked invalid:'.format(type)
+    print ' {0:>5} {1:>13} {2}'.format('Count', '% of invalid', 'Name')
+    for name, count in counter.most_common(10):
+      percent = 100 * float(count) / total_alerts
+        print ' {0:>5} {1:>12}% {2}'.format(count, percent, name)
+```
+
+## List unique test suite names
+
+```python
+from dashboard.models import graph_data
+
+LIMIT = 10000
+
+query = graph_data.Test.query(graph_data.Test.parent_test == None)
+test_keys = query.fetch(limit=LIMIT, keys_only=True)
+unique = sorted(set(k.string_id() for k in test_keys))
+print 'Fetched %d Test keys, %d unique names.' % (len(test_keys), len(unique))
+for name in unique:
+  print name
+```
+
+## List deprecated test suites
+
+```python
+from dashboard import utils
+from dashboard.models import graph_data
+
+LIMIT = 10000
+
+query = graph_data.Test.query(
+    graph_data.Test.parent_test == None,
+        graph_data.Test.deprecated == True)
+        test_keys = query.fetch(limit=LIMIT, keys_only=True)
+    print 'Fetched %d Test keys.' % len(test_keys)
+for key in test_keys:
+  print utils.TestPath(key)
+```
+
+## List all sub-tests of a particular test
+
+```python
+from google.appengine.ext import ndb
+from dashboard import utils
+from dashboard.models import graph_data
+
+ancestor = utils.TestKey('ChromiumPerf/linux-release/sunspider')
+keys = graph_data.Test.query(ancestor=ancestor).fetch(keys_only=True)
+
+print 'Fetched %d keys.' % len(keys)
+for key in keys:
+  print utils.TestPath(key)
+```
+
+## Delete a particular sheriff or other entity
+
+```python
+from google.appengine.ext import ndb
+
+key = ndb.Key('Sheriff', 'Sheriff name')
+print 'Deleting: %s\n%s' % (key.string_id(), key.get())
+key.delete()
+```
+
+## Clear the LastAddedRevision entities for a Test
+
+This allows point IDs that are much higher or lower to be posted.
+
+```python
+from google.appengine.ext import ndb
+from dashboard import utils
+from dashboard.models import graph_data
+
+ancestor_key = utils.TestKey('Master/bot/test')
+test_query = graph_data.Test.query(ancestor=ancestor_key)
+test_keys = test_query.fetch(keys_only=True)
+to_delete = []
+for test_key in test_keys:
+  to_delete.append(ndb.Key('LastAddedRevision', utils.TestPath(test_key)))
+print 'Deleting up to %d LastAddedRevision entities.' % len(to_delete)
+ndb.delete_multi(to_delete)
+```
+
+## Delete a few specific points (dangerous)
+
+```python
+from google.appengine.ext import ndb
+from dashboard.models import graph_data
+
+POINTIDS = []
+TEST_PATHS = []
+
+to_delete = []
+for id in IDS:
+  for path in TEST_PATHS:
+    to_delete.append(ndb.Key('TestContainer', path, 'Row', id))
+
+print 'Deleting %d rows.' % len(to_delete)
+ndb.delete_multi(to_delete)
+```
+
+## Delete Rows and Tests under a particular Master or Bot (dangerous)
+
+```python
+from google.appengine.ext import ndb
+from dashboard import utils
+from dashboard.models import graph_data
+
+ancestor_key = utils.TestKey('ChromiumEndure')
+test_keys = graph_data.Test.query(ancestor=ancestor_key).fetch(keys_only=True)
+print len(test_keys)
+to_delete = []
+for test_key in test_keys:
+  row_keys = graph_data.Row.query(
+      graph_data.Row.parent_test == test_key).fetch(keys_only=True, limit=100)
+  to_delete.extend(row_keys)
+  if not row_keys:
+    to_delete.append(test_key)
+print len(to_delete)
+ndb.delete_multi(to_delete[:1000])
+```
diff --git a/catapult/dashboard/docs/data-format.md b/catapult/dashboard/docs/data-format.md
new file mode 100644
index 0000000..81f4600
--- /dev/null
+++ b/catapult/dashboard/docs/data-format.md
@@ -0,0 +1,206 @@
+# Chrome Performance Dashboard Data Format
+
+## Recommended Format: Dashboard JSON v1
+
+The endpoint that accepts new points
+(`https://chromeperf.appspot.com/add_point`) accepts HTTP POST
+requests. With the POST request, there should be one parameter given,
+called "data", the value of which is JSON which contains all of the data
+being uploaded.
+
+Example:
+
+```javascript
+{
+  "master": "master.chromium.perf",
+  "bot": "linux-release",
+  "point_id": 123456,
+  "versions": {
+    "version type": "version string"
+  },
+  "supplemental": {
+    "field name": "supplemental data string",
+    "default_rev": "r_chrome_version"
+  },
+  "chart_data": {/*... as output by Telemetry; see below ...*/}
+}
+```
+
+Fields:
+
+ * `master` (string): Buildbot master name or top-level category for data.
+ * `bot` (string): Buildbot builder name, or another string that
+ represents platform type.
+ * `test_suite_name` (string): A string to use in the perf dashboard test
+ * path after master/bot. Can contain slashes.
+ * `format_version` (string): Allows dashboard to know how to process
+ the structure.
+ * `revisions` (dict): Maps repo name to revision.
+ * `supplemental` (dict): Unstructured key-value pairs which may be
+ displayed on the dashboard. Used to describe bot hardware, OS,
+ Chrome feature status, etc.
+ * `chart_data` (dict): The chart JSON as output by Telemetry.
+
+### Chart data:
+
+This contains all of the test results and any metadata that is stored with
+the test.
+
+```json
+{
+  "format_version": "1.0",
+  "benchmark_name": "page_cycler.typical_25",
+  "charts": {
+    "warm_times": {
+      "http://www.google.com/": {
+        "type": "list_of_scalar_values",
+        "values": [9, 9, 8, 9],
+      },
+      "http://www.yahoo.com/": {
+        "type": "list_of_scalar_values",
+        "values": [4, 5, 4, 4],
+      },
+      "summary": {
+        "type": "list_of_scalar_values",
+        "values": [13, 14, 12, 13],
+        "file": "gs://..."
+      },
+    }
+  }
+}
+```
+
+Fields:
+
+ * `charts`: [dict of string to dict] Maps a list of chart name strings
+ to their data dicts.
+ * `units`: [string] Units to display on the dashboard.
+ * `traces`: [dict of string to dict] Maps a list of trace name strings
+ to their trace dicts.
+ * `type`: [string] `"scalar"`, `"list_of_scalar_values"` or `"histogram"`,
+ which tells the dashboard how to interpret the rest of the fields.
+ * `improvement_direction` (string): Either `"bigger_is_better"`, or
+ `"smaller_is_better"`.
+ * `summary`: A special trace name which denotes the trace in a chart which does
+ not correspond to a specific page.
+
+## Legacy Format
+
+This format is deprecated and should not be used for new clients.
+
+In the format described below, the value of "data" in the HTTP POST
+should be a JSON encoding of a list of points to add. Each point is a
+map of property names to values for that point.
+
+Example 1:
+
+```json
+[
+  {
+    "master": "SenderType",
+    "bot": "platform-type",
+    "test": "my_test_suite/chart_name/trace_name",
+    "revision": 1234,
+    "value": 18.5
+  }
+]
+```
+
+Required fields:
+
+ * `master` (string), `bot` (string), `test` (string): These three
+ fields in combination specify a particular "test path". The master and
+ bot are supposed to be the Buildbot master name and slave `perf_id`,
+ respectively, but if the tests aren't being run by Buildbot, these
+ can be any descriptive strings which specify the test data origin
+ (note master and bot names can't contain slashes, and none of these
+ can contain asterisks).
+ * `revision` (int): The point ID, used to index the data point. It
+ doesn't actually have to be a "revision". Should be monotonically increasing
+ for data in each series.
+ * `value` (float): The Y-value for this point.
+
+Example 2 (including optional fields):
+
+```json
+[
+  {
+    "master": "ChromiumPerf",
+    "bot": "linux-release",
+    "test": "sunspider/string-unpack-code/ref",
+    "revision": 33241,
+    "value": "18.5",
+    "error": "0.5",
+    "units": "ms",
+    "masterid": "master.chromium.perf",
+    "buildername": "Linux Builder",
+    "buildnumber": 75,
+    "supplemental_columns": {
+      "r_webkit_rev": "167808",
+      "a_default_rev": "r_webkit_rev"
+    }
+  },
+  {
+    "master": "ChromiumPerf",
+    "bot": "linux-release",
+    "test": "sunspider/string-unpack-code",
+    "revision": 33241,
+    "value": "18.4",
+    "error": "0.489",
+    "units": "ms",
+    "masterid": "master.chromium.perf",
+    "buildername": "Linux Builder",
+    "buildnumber": 75,
+    "supplemental_columns": {
+      "r_webkit_rev": "167808",
+      "a_default_rev": "r_webkit_rev"
+    }
+  }
+]
+```
+
+Optional fields:
+
+ * `units` (string): The (y-axis) units for this point.
+ * `error` (float): A standard error or standard deviation value.
+ * `supplemental_columns`: A dictionary of other data associated with
+ this point.
+   * Properties starting with `r\_` are revision/version numbers.
+   * Properties starting with `d\_` are extra data numbers.
+   * Properties starting with `a\_` are extra metadata strings.
+     * `a_default_rev`: The name of a another supplemental property key
+     starting with "a_".
+     * `a_stdio_uri`: Link to stdio logs for the test run.
+ * `higher_is_better` (boolean). You can use this field to explicitly
+ define improvement direction.
+
+## Providing test and unit information
+
+Sending test descriptions are supported in with Dashboard JSON v1.
+Test descriptions for Telemetry tests are provided in code for the
+benchmarks, and are included by Telemetry in the chart JSON output.
+
+## Relevant code links
+
+Implementations of code that sends data to the dashboard:
+
+ * `chromium/build/scripts/slave/results_dashboard.py`
+ * `chromiumos/src/third_party/autotest/files/tko/perf_upload/perf_uploader.py`
+
+## Getting set up with new test results
+
+Once you're ready to start sending data to the real perf dashboard, there
+are a few more things you might want to do. Firstly, in order for the
+dashboard to accept the data, the IP of the sender must be whitelisted.
+
+If your data is not internal-only data, you can request that it be marked
+as such, again by filing an issue.
+
+Finally, if you want to monitor your the test results, you can decide
+which tests you want to be monitored, who should be receiving alerts, and
+whether you want to set any special thresholds for alerting.
+
+## Contact
+
+In general, for questions or requests you can email
+chrome-perf-dashboard-team@google.com.
diff --git a/catapult/dashboard/docs/deploy-checklist.md b/catapult/dashboard/docs/deploy-checklist.md
new file mode 100644
index 0000000..8ad3dfb
--- /dev/null
+++ b/catapult/dashboard/docs/deploy-checklist.md
@@ -0,0 +1,69 @@
+# Deploy process check list
+
+## Background
+
+There are two types of versions of
+the dashboard in the [app engine versions
+list](https://appengine.google.com/deployment?&app_id=s~chromeperf):
+"dev" versions and "clean" versions. There is always one default version
+that's being used, and multiple other versions that could be set as
+default, if the current version is broken. Dev versions contain "dev-"
+in the name, and contain changes that haven't been reviewed and checked
+in; in general, dev versions should never be set as default.
+
+## General procedure
+
+Every Tuesday and Thursday (or whenever required), a new "clean" version
+should be uploaded and checked (see below) to see if there are any
+problems. If any problems are seen, the previous clean version should be
+set as default When setting a new version as default, in order to avoid
+outages, the basic functionality of the dashboard should be checked to
+make sure it's not broken.
+
+## The check list
+
+### Alerts page functionality
+
+[/alerts](https://chromeperf.appspot.com/alerts)
+
+- If there are untriaged alerts, a table of alerts should be shown.
+- If there are no alerts, try
+  [/alerts?triaged=true](https://chromeperf.appspot.com/alerts?triaged=true);
+  it should show the last 500 alerts.
+- After checking a row and clicking "graph", a page with should open
+  with the selected alerts and their graphs.
+- After clicking the link inside a row, a page should open with a graph
+  that shows the revision where the alert occurred.
+
+### Report page functionality
+
+[/report](https://chromeperf.appspot.com/report)
+
+- After using the menu to select a test suite, bot and test (e.g. kraken,
+  ChromiumPerf/linux-release, Total) and clicking "add", a chart should
+  be shown.
+- A sub-series chart can also be added by selecting a sub-test and
+  clicking "add".
+- After dragging the revision range slider, the revision range of the data
+  shown should be changed and the and URL should be updated.
+
+### Graph functionality
+
+On a page with a graph, e.g.
+[/group\_report?bug\_id=509851](https://chromeperf.appspot.com/group_report?bug_id=509851) or
+[/report?sid=89a4bd60...](https://chromeperf.appspot.com/report?sid=89a4bd60efbaf838455514aef4f6487e2e782888b1787a420b2f694e539e90da),
+check:
+
+- The buttons in the legend should change which items are selected.
+- The items which aren't loaded by default should items should be
+  loaded later than core items.
+
+### Triaging and bisect functionality
+
+On a page graph with an alert, e.g.
+[/group\_report?keys=agxz...](https://chromeperf.appspot.com/group_report?keys=agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgIDAwY_K9QgM), check:
+
+- One can un-triage the alert on the graph by clicking it then clicking X
+- One can mark an alert as ignored by clicking on it and clicking "Ignore".
+- One can start a bisect job by clicking any point in the graph, clicking
+  "bisect", then submitting the form.
diff --git a/catapult/dashboard/docs/getting-set-up.md b/catapult/dashboard/docs/getting-set-up.md
new file mode 100644
index 0000000..4dedc2f
--- /dev/null
+++ b/catapult/dashboard/docs/getting-set-up.md
@@ -0,0 +1,51 @@
+# Getting started with the Performance Dashboard
+
+## Prerequisites
+
+Running the tests, running the local server and
+deploying all depends on having the [the App Engine
+SDK](https://cloud.google.com/appengine/downloads).
+
+After downloading and unpacking, you should add the path to the SDK to
+both `PATH` and `PYTHONPATH`.
+
+## Running the tests
+
+To run the Python unit tests, use `bin/run_py_tests`. To run the front-end
+component tests, use `bin/run_dev_server_tests`.
+
+## Running a local instance
+
+Run `bin/dev_server`; this sets up a temporary directory, adds links to
+required libraries, and calls `dev_appserver.py` on that directory.  By
+default, this starts a server on [localhost:8080](http://localhost:8080/).
+
+To load sample graph or alert data from production, navigate to
+[/load\_from\_prod](http://localhost:8080/load_from_prod).
+
+## Deploying to production
+
+To deploy, you can run `bin/deploy`, which prepares the
+code to be deployed and runs `appcfg.py`. Note that this
+doesn't set the new version as the default version; to do
+this, you can use the versions page on the [Google Developers
+Console](https://console.developers.google.com/) if you have edit or
+owner permissions for the App Engine project; otherwise if you want to
+request to set a new default version for chromeperf.appspot.com you can
+contact chrome-perf-dashboard-team@google.com.
+
+After deploying, there is a checklist to verify that no major functionality
+has regressed: [deploy checklist](/dashboard/docs/deploy-checklist.md).
+
+WARNING: Some changes to production may not be easily reversible; for
+example `appcfg.py ... vacuum_indexes` will remove datastore indexes that
+are not in your local index.yaml file, which may take more than 24 hours,
+and will disable any queries that depend on those indexes.
+
+## Where to find documentation
+
+- [App Engine](https://developers.google.com/appengine/docs/python/)
+- [Polymer](http://www.polymer-project.org/) (web component framework)
+- [Flot](http://flotcharts.org/) (JS chart plotting library)
+- [App engine stubs](https://developers.google.com/appengine/docs/python/tools/localunittesting)
+- [Python mock](http://www.voidspace.org.uk/python/mock/)
diff --git a/catapult/dashboard/docs/glossary.md b/catapult/dashboard/docs/glossary.md
new file mode 100644
index 0000000..316540b
--- /dev/null
+++ b/catapult/dashboard/docs/glossary.md
@@ -0,0 +1,59 @@
+# Perf Dashboard Project Glossary
+
+## Data organization
+
+*Test path*: A string which serves as the identifier of a single data
+series.  This is a slash-separated sequence of strings.
+
+*Test suite name*: A top-level test name, representing a collection of
+related data series whose results are output together. This is often a
+Telemetry benchmark.
+
+*Chart name*: This usually refers to the 4th part of the test path,
+which often comes from the "chart name" in Telemetry data.
+
+*Trace name*: In the dashboard code, this used to refer to the 5th
+part of a test path, which used to be called "trace name" in Telemetry.
+This should be discouraged now, since "trace" has other meanings in
+other projects.
+
+*Data series*: A sequence of (x, y) pairs, plotted as a line chart.
+
+*Test*: In the perf dashboard code, test often refers to a single
+data series and associated data, since for each data series there is a
+corresponding Test entity in datastore.
+
+## Perf sheriff rotations
+
+*Sheriff*: Also known as a perf sheriff rotation, this is a group or
+person who is interested in regressions in a particular set of tests.
+
+*Monitored*: A test is monitored if there's a sheriff rotation that will
+receive alerts for regressions in that test.
+
+*Anomaly*: In the dashboard code, an anomaly refers to a step up or step
+down in test results.
+
+*Change point*: A point in a data series where there is some change. In
+the case of performance tests, we're generally concerned with step-like
+increases and decreases.
+
+*Data stoppage*: A data stoppage is when data is received for a particular
+data series for some time, and then is not received (i.e. because the
+test is disabled or broken).
+
+*To triage*: To assign a bug number to an alert, or mark it as "invalid" or
+"ignored".
+
+## Chromium continuous integration infrastructure
+
+*Master*: A Buildbot master, also called a "waterfall". For example, the
+Chromium Perf waterfall is also known as chromium.perf or ChromiumPerf.
+
+*Bot*: A Buildbot builder; each platform type will have a different builder
+name.
+
+*Reference (ref) build*: On the Chromium Perf waterfall, tests are run
+on both ToT Chromium and on an older build of Chromium. This is useful
+because if the ref build results change along with the ToT results,
+we can conclude that this was not caused by a change in Chrome.
diff --git a/catapult/dashboard/docs/pages-and-endpoints.md b/catapult/dashboard/docs/pages-and-endpoints.md
new file mode 100644
index 0000000..88c749c
--- /dev/null
+++ b/catapult/dashboard/docs/pages-and-endpoints.md
@@ -0,0 +1,73 @@
+# Pages and Endpoints
+
+## Main public web pages and their query parameters.
+
+**/**: View recent regressions and improvements.
+ - *days*: Number of days to show anomalies (optional).
+ - *sheriff*: Sheriff to show anomalies for (optional)
+ - *num\_changes*: The number of improvements/regressions to list.
+
+**/alerts**: View current outstanding alerts
+ - *sheriff*: A sheriff rotation name, defaults to Chromium Perf Sheriff.
+ - *triaged*: Whether to include recent already-triaged alerts.
+ - *improvements*: Whether to include improvement alerts.
+ - *sortby*: A field in the alerts table to sort rows by.
+ - *sortdirection*: Direction to sort, either "up" or "down".
+
+**/report**: Browse graphs and compare charts across platforms.
+ - *sid*: A stored combination set of tests and graphs to view.
+ - *masters*: Comma-separated list of master names
+ - *bots*: Comma-separated list of bot names.
+ - *tests*: Comma-separated list of test paths starting from benchmark name.
+ - *rev*: Revision number (optional).
+ - *num\_points*: Number of points to plot (optional).
+ - *start\_rev*: Starting revision number (optional).
+ - *end\_rev*: Ending revision number (optional).
+ - *checked*: Series to check. Could be "core" (important + ref) or "all".
+
+**/group\_report**: View graphs for a set of alerts
+ - *bug\_id*: Bug ID to view alerts for.
+ - *rev*: Chromium commit position to view alerts for.
+ - *keys*: Comma-separated list URL-safe keys, each represents one alert
+
+**/debug\_alert**: Experiment with the alerting function, or diagnose why and when an alert would occur at some place.
+ - *test\_path*: Full test path (Master/bot/benchmark/...) to get points for.
+ - *rev*: A revision to center the graph on.
+ - *num\_before*: Number of points to fetch before rev.
+ - *num\_after*: Number of points to fetch starting from rev.
+ - *config*: JSON containing custom thresholds parameters.
+
+**/new\_points**: View recently-added points for some set of tests, and verify whether or not data was received.
+- *num\_points*: Max number of points to fetch.
+- *pattern*: A test path pattern (Master/bot/benchmark/...) with wildcards to match.
+- *max\_tests*: Maximum number of tests that match the pattern to fetch.
+
+**/stats**: View and generate stats about alert volume.
+ - *key*: URL-safe key of existing previously generated stats group.
+
+**/bisect\_stats**: View bisect job success rate stats.
+
+**/set\_warning\_message**: Set a warning message about outages and planned maintenance.
+
+## Administrative pages
+
+ - /change\_internal\_only
+ - /edit\_anomaly\_configs
+ - /edit\_bug\_labels
+ - /edit\_sheriffs
+ - /edit\_test\_owners
+ - /load\_graph\_from\_prod
+ - /migrate\_test\_names
+ - /get\_logs
+
+## XHR handlers
+
+ - /associate\_alerts
+ - /file\_bug
+ - /edit\_anomalies
+ - /graph\_json
+ - /graph\_revisions
+ - /list\_tests
+ - /list\_monitored\_tests
+ - /start\_try\_job
+ - /graph\_csv
diff --git a/catapult/dashboard/index.yaml b/catapult/dashboard/index.yaml
index 96b4b7a..7f4bdae 100644
--- a/catapult/dashboard/index.yaml
+++ b/catapult/dashboard/index.yaml
@@ -1,43 +1,38 @@
 # Datastore Composite Index Configuration.
 # https://developers.google.com/appengine/docs/python/config/indexconfig
-# Note: To clear old indexes from production after deleting them here, you
-# can run appcfg.py vacuum_indexes.
-
-# Note about composite indexes with the "internal_only" property below:
-# All queries are filtered by internal_only == False when the user is not
-# logged in. Therefore, the composite indexes below are grouped into pairs;
-# the index that's used when the user is not logged in (which includes the
-# internal_only property), and the one used when the user is logged in.
-
-# General note about App Engine Datastore composite indexes:
-# Indexes must be made differently depending on how the properties are used.
-# There are three main ways to use properties in a query:
+#
+# Below, most indexes come in pairs; one with the internal_only property,
+# one without. This is because all queries when the user is not logged in
+# have the filter internal_only == False.
+#
+# Composite index properties must be listed differently depending on how the
+# properties are used. There are three main ways to use properties in a query:
 #   (1) In an equality filter.
 #   (2) In an inequality filter.
 #   (3) In a sort order.
 # The properties below must be listed in this order. The "direction" only needs
 # to be specified for properties used for sort order.
+#
+# To update the indexes in production after editing them here, you must run
+# appcfg.py vacuum_indexes or appcfg.py update_indexes.
 
 indexes:
 
-# Used in main.py when fetching top improvements/regressions in past days.
+# Used in main.py when fetching top improvements/regressions in past N days.
+- kind: Anomaly
+  properties:
+  - name: sheriff
+  - name: timestamp
 - kind: Anomaly
   properties:
   - name: internal_only
   - name: sheriff
   - name: timestamp
-    direction: asc
-- kind: Anomaly
-  properties:
-  - name: sheriff
-  - name: timestamp
-    direction: asc
 
 # Used in alerts.py for fetching recent un-triaged regressions for one sheriff.
 - kind: Anomaly
   properties:
   - name: bug_id
-  - name: internal_only
   - name: is_improvement
   - name: recovered
   - name: sheriff
@@ -45,6 +40,7 @@
     direction: desc
 - kind: Anomaly
   properties:
+  - name: internal_only
   - name: bug_id
   - name: is_improvement
   - name: recovered
@@ -57,13 +53,13 @@
 - kind: Anomaly
   properties:
   - name: bug_id
-  - name: internal_only
   - name: recovered
   - name: sheriff
   - name: timestamp
     direction: desc
 - kind: Anomaly
   properties:
+  - name: internal_only
   - name: bug_id
   - name: recovered
   - name: sheriff
@@ -74,13 +70,13 @@
 # of triaged vs un-triaged status, i.e. when the triaged button is on.
 - kind: Anomaly
   properties:
-  - name: internal_only
   - name: is_improvement
   - name: sheriff
   - name: timestamp
     direction: desc
 - kind: Anomaly
   properties:
+  - name: internal_only
   - name: is_improvement
   - name: sheriff
   - name: timestamp
@@ -90,41 +86,41 @@
 # improvements and triaged, i.e. both improvements and triaged buttons are on.
 - kind: Anomaly
   properties:
-  - name: internal_only
   - name: sheriff
   - name: timestamp
     direction: desc
 - kind: Anomaly
   properties:
+  - name: internal_only
   - name: sheriff
   - name: timestamp
     direction: desc
 
 # Used in group_report.py when querying for anomalies around a revision.
+# No composite index is required without internal_only because then the
+# query uses only one (indexed) property, end_revision.
 - kind: Anomaly
   properties:
   - name: internal_only
   - name: end_revision
-    direction: asc
 
-# This *might* be unused. It would be used for querying for non-internal-only
-# points for a particular test, filtering or sorting (in ascending order) by
-# revision.
+# Might be unused!
+# This index would enable querying for points for a particular test,
+# filtering or sorting by revision, and possibly doing a projection
+# query including value.
 - kind: Row
   properties:
-  - name: internal_only
   - name: parent_test
   - name: revision
+  - name: value
 
-# Used in graph_revisions.py to do a projection query for timestamp, revision
-# and value for points from a particular test, with filtering for only non-
-# internal-only points.
+# Used in find_anomalies.GetRowsToAnalyze when getting latest points,
+# with projection query for properties revision and value.
 - kind: Row
   properties:
-  - name: internal_only
   - name: parent_test
   - name: revision
-  - name: timestamp
+    direction: desc
   - name: value
 
 # Used in graph_revisions.py to do a projection query for timestamp, revision
@@ -136,96 +132,76 @@
   - name: timestamp
   - name: value
 
-# This *might* be unused. It would be used for doing a projection query for
-# value or revision and value, filtering or sorting (in ascending order) by
-# revision, with filtering for only non-internal-only.
-- kind: Row
-  properties:
-  - name: internal_only
-  - name: parent_test
-  - name: revision
-  - name: value
-
-# Used in find_anomalies.GetRowsToAnalyze when getting latest points,
-# with projection query for revision and value, with filtering by internal_only
-# for non-logged-in users.
-# This *might* be unused if this type of query is only done in find_anomalies
-# and all requests to find_anomalies are authorized to fetch internal_only.
-- kind: Row
-  properties:
-  - name: internal_only
-  - name: parent_test
-  - name: revision
-    direction: desc
-  - name: value
-
-# This *might* be unused. It would be used for doing a projection query for
-# value or revision and value, filtering or sorting (in ascending order) by
-# revision.
-- kind: Row
-  properties:
-  - name: parent_test
-  - name: revision
-  - name: value
-
-# Used in several modules (graph_json.py, graph_csv.py, dump_graph_json.py)
-# to get the latest points for a test for users who aren't logged in.
-- kind: Row
-  properties:
-  - name: internal_only
-  - name: parent_test
-  - name: revision
-    direction: desc
-
 # Used in several modules (graph_json.py and graph_csv.py) to fetch the latest
-# points for a test, without filtering by internal_only.
+# points for a test.
 - kind: Row
   properties:
   - name: parent_test
   - name: revision
     direction: desc
 
-# Useful for queries on the interactive console.
+# This composite index enables querying for points for a particular test,
+# filtering or sorting by revision. This may be unused, but it may be useful
+# for queries on the interactive console.
 - kind: Row
   properties:
   - name: parent_test
   - name: revision
 
-# Used in new_points.py to query newest points for all tests.
-- kind: Row
-  properties:
-  - name: internal_only
-  - name: timestamp
-    direction: desc
-
-# Used in new_points.py to query newest points for test path.
+# May be unused!
+# Likely used in new_points.py to query newest points for a particular test.
+# However listing the latest points for a test also works when not logged in
+# currently, although there appears to be no index in this file for that.
 - kind: Row
   properties:
   - name: parent_test
   - name: timestamp
     direction: desc
 
-# Used in find_anomalies.GetRowsToAnalyze when getting latest points,
-# with projection query for properties revision and value.
-- kind: Row
-  properties:
-  - name: parent_test
-  - name: revision
-    direction: desc
-  - name: value
-
 # May be used in send_stoppage_alert_emails to fetch recent StoppageAlert
 # entities for a particular sheriff, for both internal-only and public alerts.
 - kind: StoppageAlert
   properties:
-  - name: internal_only
   - name: sheriff
   - name: mail_sent
 - kind: StoppageAlert
   properties:
+  - name: internal_only
   - name: sheriff
   - name: mail_sent
 
+# Used in alerts to fetch recent StoppageAlert entities for a particular
+# sheriff, for both internal and non-internal users.
+- kind: StoppageAlert
+  properties:
+  - name: sheriff
+  - name: timestamp
+    direction: desc
+- kind: StoppageAlert
+  properties:
+  - name: internal_only
+  - name: sheriff
+  - name: timestamp
+    direction: desc
+
+# Used in /alerts to query for stoppage alerts, for internal and external
+# sheriffs.
+- kind: StoppageAlert
+  properties:
+  - name: bug_id
+  - name: internal_only
+  - name: recovered
+  - name: sheriff
+  - name: timestamp
+    direction: desc
+- kind: StoppageAlert
+  properties:
+  - name: bug_id
+  - name: recovered
+  - name: sheriff
+  - name: timestamp
+    direction: desc
+
 # Used in update_test_suites to query keys of test suites (parent_test == None)
 # with deprecated and description projection. Two separate lists of
 # test suites are kept, one for external and one for internal.
@@ -257,7 +233,6 @@
 # Used in list_tests.py to query Test by test path pattern.
 - kind: Test
   properties:
-  - name: internal_only
   - name: master_name
   - name: bot_name
   - name: suite_name
@@ -267,6 +242,7 @@
   - name: test_part4_name
 - kind: Test
   properties:
+  - name: internal_only
   - name: master_name
   - name: bot_name
   - name: suite_name
diff --git a/catapult/dashboard/pylintrc b/catapult/dashboard/pylintrc
new file mode 100644
index 0000000..1cc6d73
--- /dev/null
+++ b/catapult/dashboard/pylintrc
@@ -0,0 +1,70 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  fixme,
+  global-statement,
+  import-error,
+  locally-disabled,
+  locally-enabled,
+  missing-docstring,
+  no-init,
+  no-member,
+  no-name-in-module,
+  no-self-use,
+  protected-access,
+  star-args,
+  super-on-old-class,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|run|put|execute|_pre_put_hook|_post_put_hook|_post_get_hook|_pre_delete_hook|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/dashboard/queue.yaml b/catapult/dashboard/queue.yaml
index 3875c99..387e8ab 100644
--- a/catapult/dashboard/queue.yaml
+++ b/catapult/dashboard/queue.yaml
@@ -9,7 +9,8 @@
      task_retry_limit: 1
 
  - name: add-point-queue
-   rate: 5/s
+   rate: 20/s
+   max_concurrent_requests: 20
    retry_parameters:
      task_retry_limit: 8
      min_backoff_seconds: 2
@@ -44,3 +45,8 @@
    rate: 20/m
    retry_parameters:
      task_retry_limit: 1
+
+ - name: task-runner-queue
+   rate: 5/s
+   retry_parameters:
+     task_retry_limit: 1
diff --git a/catapult/dependency_manager/OWNERS b/catapult/dependency_manager/OWNERS
new file mode 100644
index 0000000..ca5ce3b
--- /dev/null
+++ b/catapult/dependency_manager/OWNERS
@@ -0,0 +1 @@
+aiolos@chromium.org
diff --git a/catapult/dependency_manager/PRESUBMIT.py b/catapult/dependency_manager/PRESUBMIT.py
new file mode 100644
index 0000000..3db32ce
--- /dev/null
+++ b/catapult/dependency_manager/PRESUBMIT.py
@@ -0,0 +1,32 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'catapult_base'),
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'pyfakefs'),
+  ]
diff --git a/catapult/dependency_manager/bin/run_tests b/catapult/dependency_manager/bin/run_tests
new file mode 100755
index 0000000..9a87bd6
--- /dev/null
+++ b/catapult/dependency_manager/bin/run_tests
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs all Python unit tests in dependency_manager/."""
+
+import os
+import sys
+
+_CATAPULT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+sys.path.append(os.path.join(_CATAPULT, 'third_party', 'mock'))
+
+
+def main():
+  sys.path.append(_CATAPULT)
+
+  from hooks import install
+  if '--no-install-hooks' in sys.argv:
+    sys.argv.remove('--no-install-hooks')
+  else:
+    install.InstallHooks()
+
+  from catapult_build import run_with_typ
+  return run_with_typ.Run(
+      os.path.join(_CATAPULT, 'dependency_manager'), path=[_CATAPULT])
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/dependency_manager/dependency_manager/OWNERS b/catapult/dependency_manager/dependency_manager/OWNERS
new file mode 100644
index 0000000..ca5ce3b
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/OWNERS
@@ -0,0 +1 @@
+aiolos@chromium.org
diff --git a/catapult/dependency_manager/dependency_manager/__init__.py b/catapult/dependency_manager/dependency_manager/__init__.py
new file mode 100644
index 0000000..76e0bef
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/__init__.py
@@ -0,0 +1,41 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+
+CATAPULT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+CATAPULT_THIRD_PARTY_PATH = os.path.join(CATAPULT_PATH, 'third_party')
+DEPENDENCY_MANAGER_PATH = os.path.join(CATAPULT_PATH, 'dependency_manager')
+
+
+def _AddDirToPythonPath(*path_parts):
+  path = os.path.abspath(os.path.join(*path_parts))
+  if os.path.isdir(path) and path not in sys.path:
+    sys.path.append(path)
+
+
+_AddDirToPythonPath(CATAPULT_PATH, 'catapult_base')
+_AddDirToPythonPath(CATAPULT_THIRD_PARTY_PATH, 'mock')
+_AddDirToPythonPath(CATAPULT_THIRD_PARTY_PATH, 'pyfakefs')
+_AddDirToPythonPath(DEPENDENCY_MANAGER_PATH)
+
+
+# pylint: disable=unused-import
+from .archive_info import ArchiveInfo
+from .base_config import BaseConfig
+from .cloud_storage_info import CloudStorageInfo
+from .dependency_info import DependencyInfo
+from .exceptions import CloudStorageUploadConflictError
+from .exceptions import EmptyConfigError
+from .exceptions import FileNotFoundError
+from .exceptions import NoPathFoundError
+from .exceptions import ReadWriteError
+from .exceptions import UnsupportedConfigFormatError
+from .local_path_info import LocalPathInfo
+from .manager import DependencyManager
+# pylint: enable=unused-import
+
diff --git a/catapult/dependency_manager/dependency_manager/archive_info.py b/catapult/dependency_manager/dependency_manager/archive_info.py
new file mode 100644
index 0000000..637e7cc
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/archive_info.py
@@ -0,0 +1,69 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from dependency_manager import exceptions
+from dependency_manager import dependency_manager_util
+
+
+class ArchiveInfo(object):
+
+  def __init__(self, archive_file, unzip_path, path_within_archive):
+    """ Container for the information needed to unzip a downloaded archive.
+
+    Args:
+        archive_path: Path to the archive file.
+        unzip_path: Path to unzip the archive into. Assumes that this path
+            is unique for the archive.
+        path_within_archive: Specify if and how to handle zip archives
+            downloaded from cloud_storage. Expected values:
+                None: Do not unzip the file downloaded from cloud_storage.
+                '.': Unzip the file downloaded from cloud_storage. The
+                    unzipped file/folder is the expected dependency.
+                file_path: Unzip the file downloaded from cloud_storage.
+                    |file_path| is the path to the expected dependency,
+                    relative to the unzipped archive path.
+    """
+    self._archive_file = archive_file
+    self._unzip_path = unzip_path
+    self._path_within_archive = path_within_archive
+    self._dependency_path = os.path.join(
+        self._unzip_path, self._path_within_archive)
+    if not self._has_minimum_data:
+      raise ValueError(
+          'Not enough information specified to initialize an archive info.'
+          ' %s' % self)
+
+  def GetUnzippedPath(self):
+    if self.ShouldUnzipArchive():
+      # TODO(aiolos): Replace UnzipFile with zipfile.extractall once python
+      # version 2.7.4 or later can safely be assumed.
+      dependency_manager_util.UnzipArchive(
+          self._archive_file, self._unzip_path)
+      if self.ShouldUnzipArchive():
+        raise exceptions.ArchiveError(
+            "Expected path '%s' was not extracted from archive '%s'." %
+            (self._dependency_path, self._archive_file))
+    return self._dependency_path
+
+  def ShouldUnzipArchive(self):
+    if not self._has_minimum_data:
+      raise exceptions.ArchiveError(
+          'Missing needed info to unzip archive. Known data: %s',
+          self.data_string)
+    return not os.path.exists(self._dependency_path)
+
+  @property
+  def _has_minimum_data(self):
+    return all([self._archive_file, self._unzip_path,
+                self._dependency_path])
+
+  def __repr__(self):
+    return (
+        'ArchiveInfo(archive_file=%s, unzip_path=%s, path_within_archive=%s, '
+        'dependency_path =%s)' % (
+            self._archive_file, self._unzip_path, self._path_within_archive,
+            self._dependency_path))
+
diff --git a/catapult/dependency_manager/dependency_manager/base_config.py b/catapult/dependency_manager/dependency_manager/base_config.py
new file mode 100644
index 0000000..47897c7
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/base_config.py
@@ -0,0 +1,383 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+
+from catapult_base import cloud_storage
+from dependency_manager import archive_info
+from dependency_manager import cloud_storage_info
+from dependency_manager import dependency_info
+from dependency_manager import exceptions
+from dependency_manager import local_path_info
+from dependency_manager import uploader
+
+
+class BaseConfig(object):
+  """A basic config class for use with the DependencyManager.
+
+  Initiated with a json file in the following format:
+
+            {  "config_type": "BaseConfig",
+               "dependencies": {
+                 "dep_name1": {
+                   "cloud_storage_base_folder": "base_folder1",
+                   "cloud_storage_bucket": "bucket1",
+                   "file_info": {
+                     "platform1": {
+                        "cloud_storage_hash": "hash_for_platform1",
+                        "download_path": "download_path111",
+                        "version_in_cs": "1.11.1.11."
+                        "local_paths": ["local_path1110", "local_path1111"]
+                      },
+                      "platform2": {
+                        "cloud_storage_hash": "hash_for_platform2",
+                        "download_path": "download_path2",
+                        "local_paths": ["local_path20", "local_path21"]
+                      },
+                      ...
+                   }
+                 },
+                 "dependency_name_2": {
+                    ...
+                 },
+                  ...
+              }
+            }
+
+    Required fields: "dependencies" and "config_type".
+                     Note that config_type must be "BaseConfig"
+
+    Assumptions:
+        "cloud_storage_base_folder" is a top level folder in the given
+          "cloud_storage_bucket" where all of the dependency files are stored
+          at "dependency_name"_"cloud_storage_hash".
+
+        "download_path" and all paths in "local_paths" are relative to the
+          config file's location.
+
+        All or none of the following cloud storage related fields must be
+          included in each platform dictionary:
+          "cloud_storage_hash", "download_path", "cs_remote_path"
+
+        "version_in_cs" is an optional cloud storage field, but is dependent
+          on the above cloud storage related fields.
+
+
+    Also note that platform names are often of the form os_architechture.
+    Ex: "win_AMD64"
+
+    More information on the fields can be found in dependencies_info.py
+  """
+  def __init__(self, file_path, writable=False):
+    """ Initialize a BaseConfig for the DependencyManager.
+
+    Args:
+        writable: False: This config will be used to lookup information.
+                  True: This config will be used to update information.
+
+        file_path: Path to a file containing a json dictionary in the expected
+                   json format for this config class. Base format expected:
+
+                   { "config_type": config_type,
+                     "dependencies": dependencies_dict }
+
+                   config_type: must match the return value of GetConfigType.
+                   dependencies: A dictionary with the information needed to
+                       create dependency_info instances for the given
+                       dependencies.
+
+                   See dependency_info.py for more information.
+    """
+    self._config_path = file_path
+    self._writable = writable
+    self._pending_uploads = []
+    if not self._config_path:
+      raise ValueError('Must supply config file path.')
+    if not os.path.exists(self._config_path):
+      if not writable:
+        raise exceptions.EmptyConfigError(file_path)
+      self._config_data = {}
+      self._WriteConfigToFile(self._config_path, dependencies=self._config_data)
+    else:
+      with open(file_path, 'r') as f:
+        config_data = json.load(f)
+      if not config_data:
+        raise exceptions.EmptyConfigError(file_path)
+      config_type = config_data.pop('config_type', None)
+      if config_type != self.GetConfigType():
+        raise ValueError(
+            'Supplied config_type (%s) is not the expected type (%s) in file '
+            '%s' % (config_type, self.GetConfigType(), file_path))
+      self._config_data = config_data.get('dependencies', {})
+
+  def IterDependencyInfo(self):
+    """ Yields a DependencyInfo for each dependency/platform pair.
+
+    Raises:
+        ReadWriteError: If called when the config is writable.
+        ValueError: If any of the dependencies contain partial information for
+            downloading from cloud_storage. (See dependency_info.py)
+    """
+    if self._writable:
+      raise exceptions.ReadWriteError(
+          'Trying to read dependency info from a  writable config. File for '
+          'config: %s' % self._config_path)
+    base_path = os.path.dirname(self._config_path)
+    for dependency in self._config_data:
+      dependency_dict = self._config_data.get(dependency)
+      platforms_dict = dependency_dict.get('file_info', {})
+      for platform in platforms_dict:
+        platform_info = platforms_dict.get(platform)
+
+        local_info = None
+        local_paths = platform_info.get('local_paths', [])
+        if local_paths:
+          paths = []
+          for path in local_paths:
+            path = self._FormatPath(path)
+            paths.append(os.path.abspath(os.path.join(base_path, path)))
+          local_info = local_path_info.LocalPathInfo(paths)
+
+        cs_info = None
+        cs_bucket = dependency_dict.get('cloud_storage_bucket')
+        cs_base_folder = dependency_dict.get('cloud_storage_base_folder', '')
+        download_path = platform_info.get('download_path')
+        if download_path:
+          download_path = self._FormatPath(download_path)
+          download_path = os.path.abspath(
+              os.path.join(base_path, download_path))
+
+          cs_hash = platform_info.get('cloud_storage_hash')
+          if not cs_hash:
+            raise exceptions.ConfigError(
+                'Dependency %s has cloud storage info on platform %s, but is '
+                'missing a cloud storage hash.', dependency, platform)
+          cs_remote_path = self._CloudStorageRemotePath(
+              dependency, cs_hash, cs_base_folder)
+          version_in_cs = platform_info.get('version_in_cs')
+
+          zip_info = None
+          path_within_archive = platform_info.get('path_within_archive')
+          if path_within_archive:
+            unzip_path = os.path.abspath(
+                os.path.join(os.path.dirname(download_path),
+                             '%s_%s_%s' % (dependency, platform, cs_hash)))
+            zip_info = archive_info.ArchiveInfo(
+                download_path, unzip_path, path_within_archive)
+
+          cs_info = cloud_storage_info.CloudStorageInfo(
+              cs_bucket, cs_hash, download_path, cs_remote_path,
+              version_in_cs=version_in_cs, archive_info=zip_info)
+
+        dep_info = dependency_info.DependencyInfo(
+            dependency, platform, self._config_path,
+            local_path_info=local_info, cloud_storage_info=cs_info)
+        yield dep_info
+
+  @classmethod
+  def GetConfigType(cls):
+    return 'BaseConfig'
+
+  @property
+  def config_path(self):
+    return self._config_path
+
+  def AddCloudStorageDependencyUpdateJob(
+      self, dependency, platform, dependency_path, version=None,
+      execute_job=True):
+    """Update the file downloaded from cloud storage for a dependency/platform.
+
+    Upload a new file to cloud storage for the given dependency and platform
+    pair and update the cloud storage hash and the version for the given pair.
+
+    Example usage:
+      The following should update the default platform for 'dep_name':
+          UpdateCloudStorageDependency('dep_name', 'default', 'path/to/file')
+
+      The following should update both the mac and win platforms for 'dep_name',
+      or neither if either update fails:
+          UpdateCloudStorageDependency(
+              'dep_name', 'mac_x86_64', 'path/to/mac/file', execute_job=False)
+          UpdateCloudStorageDependency(
+              'dep_name', 'win_AMD64', 'path/to/win/file', execute_job=False)
+          ExecuteUpdateJobs()
+
+    Args:
+      dependency: The dependency to update.
+      platform: The platform to update the dependency info for.
+      dependency_path: Path to the new dependency to be used.
+      version: Version of the updated dependency, for checking future updates
+          against.
+      execute_job: True if the config should be written to disk and the file
+          should be uploaded to cloud storage after the update. False if
+          multiple updates should be performed atomically. Must call
+          ExecuteUpdateJobs after all non-executed jobs are added to complete
+          the update.
+
+    Raises:
+      ReadWriteError: If the config was not initialized as writable, or if
+          |execute_job| is True but the config has update jobs still pending
+          execution.
+      ValueError: If no information exists in the config for |dependency| on
+          |platform|.
+    """
+    self._ValidateIsConfigUpdatable(
+        execute_job=execute_job, dependency=dependency, platform=platform)
+    cs_hash = cloud_storage.CalculateHash(dependency_path)
+    if version:
+      self._SetPlatformData(dependency, platform, 'version_in_cs', version)
+    self._SetPlatformData(dependency, platform, 'cloud_storage_hash', cs_hash)
+
+    cs_base_folder = self._GetPlatformData(
+        dependency, platform, 'cloud_storage_base_folder')
+    cs_bucket = self._GetPlatformData(
+        dependency, platform, 'cloud_storage_bucket')
+    cs_remote_path = self._CloudStorageRemotePath(
+        dependency, cs_hash, cs_base_folder)
+    self._pending_uploads.append(uploader.CloudStorageUploader(
+        cs_bucket, cs_remote_path, dependency_path))
+    if execute_job:
+      self.ExecuteUpdateJobs()
+
+  def ExecuteUpdateJobs(self, force=False):
+    """Write all config changes to the config_path specified in __init__.
+
+    Upload all files pending upload and then write the updated config to
+    file. Attempt to remove all uploaded files on failure.
+
+    Args:
+      force: True if files should be uploaded to cloud storage even if a
+          file already exists in the upload location.
+
+    Returns:
+      True: if the config was dirty and the upload succeeded.
+      False: if the config was not dirty.
+
+    Raises:
+      CloudStorageUploadConflictError: If |force| is False and the potential
+          upload location of a file already exists.
+      CloudStorageError: If copying an existing file to the backup location
+          or uploading a new file fails.
+    """
+    self._ValidateIsConfigUpdatable()
+    if not self._IsDirty():
+      logging.info('ExecuteUpdateJobs called on clean config')
+      return False
+    if not self._pending_uploads:
+      logging.debug('No files needing upload.')
+    else:
+      try:
+        for item_pending_upload in self._pending_uploads:
+          item_pending_upload.Upload(force)
+        self._WriteConfigToFile(self._config_path, self._config_data)
+        self._pending_uploads = []
+      except:
+        # Attempt to rollback the update in any instance of failure, even user
+        # interrupt via Ctrl+C; but don't consume the exception.
+        logging.error('Update failed, attempting to roll it back.')
+        for upload_item in reversed(self._pending_uploads):
+          upload_item.Rollback()
+        raise
+    return True
+
+  def GetVersion(self, dependency, platform):
+    """Return the Version information for the given dependency."""
+    return self._GetPlatformData(
+        dependency, platform, data_type='version_in_cs')
+
+  def _IsDirty(self):
+    with open(self._config_path, 'r') as fstream:
+      curr_config_data = json.load(fstream)
+    curr_config_data = curr_config_data.get('dependencies', {})
+    return self._config_data != curr_config_data
+
+  def _SetPlatformData(self, dependency, platform, data_type, data):
+    self._ValidateIsConfigWritable()
+    dependency_dict = self._config_data.get(dependency, {})
+    platform_dict = dependency_dict.get('file_info', {}).get(platform)
+    if not platform_dict:
+      raise ValueError('No platform data for platform %s on dependency %s' %
+                       (platform, dependency))
+    if (data_type == 'cloud_storage_bucket' or
+        data_type == 'cloud_storage_base_folder'):
+      self._config_data[dependency][data_type] = data
+    else:
+      self._config_data[dependency]['file_info'][platform][data_type] = data
+
+  def _GetPlatformData(self, dependency, platform, data_type=None):
+    dependency_dict = self._config_data.get(dependency, {})
+    if not dependency_dict:
+      raise ValueError('Dependency %s is not in config.' % dependency)
+    platform_dict = dependency_dict.get('file_info', {}).get(platform)
+    if not platform_dict:
+      raise ValueError('No platform data for platform %s on dependency %s' %
+                       (platform, dependency))
+    if data_type:
+      if (data_type == 'cloud_storage_bucket' or
+          data_type == 'cloud_storage_base_folder'):
+        return dependency_dict.get(data_type)
+      return platform_dict.get(data_type)
+    return platform_dict
+
+  def _ValidateIsConfigUpdatable(
+      self, execute_job=False, dependency=None, platform=None):
+    self._ValidateIsConfigWritable()
+    if self._IsDirty() and execute_job:
+      raise exceptions.ReadWriteError(
+          'A change has already been made to this config. Either call without'
+          'using the execute_job option or first call ExecuteUpdateJobs().')
+    if dependency and not self._config_data.get(dependency):
+      raise ValueError('Cannot update information because dependency %s does '
+                       'not exist.' % dependency)
+    if platform and not self._GetPlatformData(dependency, platform):
+      raise ValueError('No dependency info is available for the given '
+                       'dependency: %s' % dependency)
+
+  def _ValidateIsConfigWritable(self):
+    if not self._writable:
+      raise exceptions.ReadWriteError(
+          'Trying to update the information from a read-only config. '
+          'File for config: %s' % self._config_path)
+
+  @staticmethod
+  def _CloudStorageRemotePath(dependency, cs_hash, cs_base_folder):
+    cs_remote_file = '%s_%s' % (dependency, cs_hash)
+    cs_remote_path = cs_remote_file if not cs_base_folder else (
+        '%s/%s' % (cs_base_folder, cs_remote_file))
+    return cs_remote_path
+
+  @classmethod
+  def _FormatPath(cls, file_path):
+    """ Format |file_path| for the current file system.
+
+    We may be downloading files for another platform, so paths must be
+    downloadable on the current system.
+    """
+    if not file_path:
+      return file_path
+    if os.path.sep != '\\':
+      return file_path.replace('\\', os.path.sep)
+    elif os.path.sep != '/':
+      return file_path.replace('/', os.path.sep)
+    return file_path
+
+  @classmethod
+  def _WriteConfigToFile(cls, file_path, dependencies=None):
+    json_dict = cls._GetJsonDict(dependencies)
+    file_dir = os.path.dirname(file_path)
+    if not os.path.exists(file_dir):
+      os.makedirs(file_dir)
+    with open(file_path, 'w') as outfile:
+      json.dump(
+          json_dict, outfile, indent=2, sort_keys=True, separators=(',', ': '))
+    return json_dict
+
+  @classmethod
+  def _GetJsonDict(cls, dependencies=None):
+    dependencies = dependencies or {}
+    json_dict = {'config_type': cls.GetConfigType(),
+                 'dependencies': dependencies}
+    return json_dict
diff --git a/catapult/dependency_manager/dependency_manager/base_config_unittest.py b/catapult/dependency_manager/dependency_manager/base_config_unittest.py
new file mode 100755
index 0000000..6f1140c
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/base_config_unittest.py
@@ -0,0 +1,1488 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-argument
+
+import os
+import unittest
+
+from catapult_base import cloud_storage
+import mock
+from pyfakefs import fake_filesystem_unittest
+from pyfakefs import fake_filesystem
+
+import dependency_manager
+from dependency_manager import uploader
+
+
+class BaseConfigCreationAndUpdateUnittests(fake_filesystem_unittest.TestCase):
+  def setUp(self):
+    self.addTypeEqualityFunc(uploader.CloudStorageUploader,
+                             uploader.CloudStorageUploader.__eq__)
+    self.setUpPyfakefs()
+    self.dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash12',
+                         'download_path': '../../relative/dep1/path2'}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+
+    self.expected_file_lines = [
+      # pylint: disable=bad-continuation
+      '{', '"config_type": "BaseConfig",', '"dependencies": {',
+        '"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
+          '"cloud_storage_bucket": "bucket1",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash11",',
+              '"download_path": "../../relative/dep1/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash12",',
+              '"download_path": "../../relative/dep1/path2"', '}', '}', '},',
+        '"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash21",',
+              '"download_path": "../../relative/dep2/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash22",',
+              '"download_path": "../../relative/dep2/path2"', '}', '}', '}',
+      '}', '}']
+
+    self.file_path = os.path.abspath(os.path.join(
+        'path', 'to', 'config', 'file'))
+
+    self.new_dep_path = 'path/to/new/dep'
+    self.fs.CreateFile(self.new_dep_path)
+    self.new_dep_hash = 'A23B56B7F23E798601F'
+    self.new_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': self.new_dep_hash,
+                         'download_path': '../../relative/dep1/path2'}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    self.new_bucket = 'bucket1'
+    self.new_remote_path = 'dependencies_folder/dep1_%s' % self.new_dep_hash
+    self.new_pending_upload = uploader.CloudStorageUploader(
+        self.new_bucket, self.new_remote_path, self.new_dep_path)
+    self.expected_new_backup_path = '.'.join([self.new_remote_path, 'old'])
+    self.new_expected_file_lines = [
+      # pylint: disable=bad-continuation
+      '{', '"config_type": "BaseConfig",', '"dependencies": {',
+        '"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
+          '"cloud_storage_bucket": "bucket1",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash11",',
+              '"download_path": "../../relative/dep1/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
+              '"download_path": "../../relative/dep1/path2"', '}', '}', '},',
+        '"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash21",',
+              '"download_path": "../../relative/dep2/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash22",',
+              '"download_path": "../../relative/dep2/path2"', '}', '}', '}',
+      '}', '}']
+
+    self.final_dep_path = 'path/to/final/dep'
+    self.fs.CreateFile(self.final_dep_path)
+    self.final_dep_hash = 'B34662F23B56B7F98601F'
+    self.final_bucket = 'bucket2'
+    self.final_remote_path = 'dep1_%s' % self.final_dep_hash
+    self.final_pending_upload = uploader.CloudStorageUploader(
+        self.final_bucket, self.final_remote_path, self.final_dep_path)
+    self.expected_final_backup_path = '.'.join([self.final_remote_path,
+                                                'old'])
+    self.final_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': self.new_dep_hash,
+                         'download_path': '../../relative/dep1/path2'}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': self.final_dep_hash,
+                         'download_path': '../../relative/dep2/path1'},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    self.final_expected_file_lines = [
+      # pylint: disable=bad-continuation
+      '{', '"config_type": "BaseConfig",', '"dependencies": {',
+        '"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
+          '"cloud_storage_bucket": "bucket1",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash11",',
+              '"download_path": "../../relative/dep1/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "%s",' % self.new_dep_hash,
+              '"download_path": "../../relative/dep1/path2"', '}', '}', '},',
+        '"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "%s",' % self.final_dep_hash,
+              '"download_path": "../../relative/dep2/path1"', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash22",',
+              '"download_path": "../../relative/dep2/path2"', '}', '}', '}',
+      '}', '}']
+
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+
+  # Init is not meant to be overridden, so we should be mocking the
+  # base_config's json module, even in subclasses.
+  def testCreateEmptyConfig(self):
+    expected_file_lines = ['{',
+                           '"config_type": "BaseConfig",',
+                           '"dependencies": {}',
+                           '}']
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual({}, config._config_data)
+    self.assertEqual(self.file_path, config._config_path)
+
+  def testCreateEmptyConfigError(self):
+    self.assertRaises(dependency_manager.EmptyConfigError,
+                      dependency_manager.BaseConfig, self.file_path)
+
+  def testCloudStorageRemotePath(self):
+    dependency = 'dep_name'
+    cs_hash = self.new_dep_hash
+    cs_base_folder = 'dependency_remote_folder'
+    expected_remote_path = '%s/%s_%s' % (cs_base_folder, dependency, cs_hash)
+    remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
+        dependency, cs_hash, cs_base_folder)
+    self.assertEqual(expected_remote_path, remote_path)
+
+    cs_base_folder = 'dependency_remote_folder'
+    expected_remote_path = '%s_%s' % (dependency, cs_hash)
+    remote_path = dependency_manager.BaseConfig._CloudStorageRemotePath(
+        dependency, cs_hash, cs_base_folder)
+
+  def testGetEmptyJsonDict(self):
+    expected_json_dict = {'config_type': 'BaseConfig',
+                          'dependencies': {}}
+    json_dict = dependency_manager.BaseConfig._GetJsonDict()
+    self.assertEqual(expected_json_dict, json_dict)
+
+  def testGetNonEmptyJsonDict(self):
+    expected_json_dict = {"config_type": "BaseConfig",
+                          "dependencies": self.dependencies}
+    json_dict = dependency_manager.BaseConfig._GetJsonDict(self.dependencies)
+    self.assertEqual(expected_json_dict, json_dict)
+
+  def testWriteEmptyConfigToFile(self):
+    expected_file_lines = ['{', '"config_type": "BaseConfig",',
+                           '"dependencies": {}', '}']
+    self.assertFalse(os.path.exists(self.file_path))
+    dependency_manager.BaseConfig._WriteConfigToFile(self.file_path)
+    self.assertTrue(os.path.exists(self.file_path))
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+  def testWriteNonEmptyConfigToFile(self):
+    self.assertFalse(os.path.exists(self.file_path))
+    dependency_manager.BaseConfig._WriteConfigToFile(self.file_path,
+                                                     self.dependencies)
+    self.assertTrue(os.path.exists(self.file_path))
+    expected_file_lines = list(self.expected_file_lines)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsNoOp(self, uploader_cs_mock):
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+
+    self.assertFalse(config.ExecuteUpdateJobs())
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(self.dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnInsertNoCSCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = False
+    uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = []
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnInsertCSCollisionForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path),
+                           mock.call(self.new_bucket, self.new_bucket,
+                                     self.expected_new_backup_path,
+                                     self.new_remote_path)]
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnInsertCSCollisionNoForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    uploader_cs_mock.Insert.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = []
+    expected_copy_calls = []
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnCopy(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = []
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path)]
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondInsertNoCSCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = False
+    uploader_cs_mock.Insert.side_effect = [
+        True, cloud_storage.CloudStorageError]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path,
+                                       self.final_dep_path)]
+    expected_copy_calls = []
+    expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondInsertCSCollisionForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    uploader_cs_mock.Insert.side_effect = [
+        True, cloud_storage.CloudStorageError]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path,
+                                       self.final_dep_path)]
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path),
+                           mock.call(self.final_bucket, self.final_bucket,
+                                     self.final_remote_path,
+                                     self.expected_final_backup_path),
+                           mock.call(self.final_bucket, self.final_bucket,
+                                     self.expected_final_backup_path,
+                                     self.final_remote_path),
+                           mock.call(self.new_bucket, self.new_bucket,
+                                     self.expected_new_backup_path,
+                                     self.new_remote_path)]
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondInsertFirstCSCollisionForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.side_effect = [True, False, True]
+    uploader_cs_mock.Insert.side_effect = [
+        True, cloud_storage.CloudStorageError]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path,
+                                       self.final_dep_path)]
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path),
+                           mock.call(self.new_bucket, self.new_bucket,
+                                     self.expected_new_backup_path,
+                                     self.new_remote_path)]
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnFirstCSCollisionNoForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.side_effect = [True, False, True]
+    uploader_cs_mock.Insert.side_effect = [
+        True, cloud_storage.CloudStorageError]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = []
+    expected_copy_calls = []
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondCopyCSCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    uploader_cs_mock.Insert.return_value = True
+    uploader_cs_mock.Copy.side_effect = [
+        True, cloud_storage.CloudStorageError, True]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path),
+                           mock.call(self.final_bucket, self.final_bucket,
+                                     self.final_remote_path,
+                                     self.expected_final_backup_path),
+                           mock.call(self.new_bucket, self.new_bucket,
+                                     self.expected_new_backup_path,
+                                     self.new_remote_path)]
+    expected_delete_calls = []
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.side_effect = [False, True, False]
+    uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
+                                     self.final_remote_path,
+                                     self.expected_final_backup_path)]
+    expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs, force=True)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsFailureOnSecondCopyNoCSCollisionNoForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.side_effect = [False, True, False]
+    uploader_cs_mock.Copy.side_effect = cloud_storage.CloudStorageError
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = []
+    expected_delete_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsSuccessOnePendingDepNoCloudStorageCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = False
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._IsDirty())
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = []
+    expected_delete_calls = []
+
+    self.assertTrue(config.ExecuteUpdateJobs())
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.new_expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+    self.assertEqual(expected_delete_calls,
+                     uploader_cs_mock.Delete.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsSuccessOnePendingDepCloudStorageCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._IsDirty())
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path)]
+    expected_copy_calls = [mock.call(self.new_bucket, self.new_bucket,
+                                     self.new_remote_path,
+                                     self.expected_new_backup_path)]
+
+    self.assertTrue(config.ExecuteUpdateJobs(force=True))
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(self.new_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.new_expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsErrorOnePendingDepCloudStorageCollisionNoForce(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.return_value = True
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.new_dependencies.copy()
+    config._is_dirty = True
+    config._pending_uploads = [self.new_pending_upload]
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertTrue(config._is_dirty)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path)]
+    expected_insert_calls = []
+    expected_copy_calls = []
+
+    self.assertRaises(dependency_manager.CloudStorageUploadConflictError,
+                      config.ExecuteUpdateJobs)
+    self.assertTrue(config._is_dirty)
+    self.assertTrue(config._pending_uploads)
+    self.assertEqual(self.new_dependencies, config._config_data)
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testExecuteUpdateJobsSuccessMultiplePendingDepsOneCloudStorageCollision(
+      self, uploader_cs_mock):
+    uploader_cs_mock.Exists.side_effect = [False, True]
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    config._config_data = self.final_dependencies.copy()
+    config._pending_uploads = [self.new_pending_upload,
+                               self.final_pending_upload]
+    self.assertEqual(self.final_dependencies, config._config_data)
+    self.assertTrue(config._IsDirty())
+    self.assertEqual(2, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(self.final_pending_upload, config._pending_uploads[1])
+
+    expected_exists_calls = [mock.call(self.new_bucket, self.new_remote_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path)]
+    expected_insert_calls = [mock.call(self.new_bucket, self.new_remote_path,
+                                       self.new_dep_path),
+                             mock.call(self.final_bucket,
+                                       self.final_remote_path,
+                                       self.final_dep_path)]
+    expected_copy_calls = [mock.call(self.final_bucket, self.final_bucket,
+                                     self.final_remote_path,
+                                     self.expected_final_backup_path)]
+
+    self.assertTrue(config.ExecuteUpdateJobs(force=True))
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(self.final_dependencies, config._config_data)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.final_expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_insert_calls,
+                     uploader_cs_mock.Insert.call_args_list)
+    self.assertEqual(expected_exists_calls,
+                     uploader_cs_mock.Exists.call_args_list)
+    self.assertEqual(expected_copy_calls,
+                     uploader_cs_mock.Copy.call_args_list)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testUpdateCloudStorageDependenciesReadOnlyConfig(
+      self, uploader_cs_mock):
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path)
+    with self.assertRaises(dependency_manager.ReadWriteError):
+      config.AddCloudStorageDependencyUpdateJob(
+          'dep', 'plat', 'path')
+    with self.assertRaises(dependency_manager.ReadWriteError):
+      config.AddCloudStorageDependencyUpdateJob(
+          'dep', 'plat', 'path', version='1.2.3')
+    with self.assertRaises(dependency_manager.ReadWriteError):
+      config.AddCloudStorageDependencyUpdateJob(
+          'dep', 'plat', 'path', execute_job=False)
+    with self.assertRaises(dependency_manager.ReadWriteError):
+      config.AddCloudStorageDependencyUpdateJob(
+          'dep', 'plat', 'path', version='1.2.3', execute_job=False)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  def testUpdateCloudStorageDependenciesMissingDependency(
+      self, uploader_cs_mock):
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path')
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path', version='1.2.3')
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path', execute_job=False)
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path', version='1.2.3', execute_job=False)
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  @mock.patch('dependency_manager.base_config.cloud_storage')
+  def testUpdateCloudStorageDependenciesWrite(
+      self, base_config_cs_mock, uploader_cs_mock):
+    expected_dependencies = self.dependencies
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertFalse(config._IsDirty())
+    self.assertEqual(expected_dependencies, config._config_data)
+
+    base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
+    uploader_cs_mock.Exists.return_value = False
+    expected_dependencies = self.new_dependencies
+    config.AddCloudStorageDependencyUpdateJob(
+        'dep1', 'plat2', self.new_dep_path, execute_job=True)
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_dependencies, config._config_data)
+    # check that file contents has been updated
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    expected_file_lines = list(self.new_expected_file_lines)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+    expected_dependencies = self.final_dependencies
+    base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
+    config.AddCloudStorageDependencyUpdateJob(
+        'dep2', 'plat1', self.final_dep_path, execute_job=True)
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_dependencies, config._config_data)
+    # check that file contents has been updated
+    expected_file_lines = list(self.final_expected_file_lines)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+  @mock.patch('dependency_manager.uploader.cloud_storage')
+  @mock.patch('dependency_manager.base_config.cloud_storage')
+  def testUpdateCloudStorageDependenciesNoWrite(
+      self, base_config_cs_mock, uploader_cs_mock):
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path')
+    self.assertRaises(ValueError, config.AddCloudStorageDependencyUpdateJob,
+                      'dep', 'plat', 'path', version='1.2.3')
+
+    expected_dependencies = self.dependencies
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertFalse(config._IsDirty())
+    self.assertFalse(config._pending_uploads)
+    self.assertEqual(expected_dependencies, config._config_data)
+
+    base_config_cs_mock.CalculateHash.return_value = self.new_dep_hash
+    uploader_cs_mock.Exists.return_value = False
+    expected_dependencies = self.new_dependencies
+    config.AddCloudStorageDependencyUpdateJob(
+        'dep1', 'plat2', self.new_dep_path, execute_job=False)
+    self.assertTrue(config._IsDirty())
+    self.assertEqual(1, len(config._pending_uploads))
+    self.assertEqual(self.new_pending_upload, config._pending_uploads[0])
+    self.assertEqual(expected_dependencies, config._config_data)
+    # check that file contents have not been updated.
+    expected_file_lines = list(self.expected_file_lines)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+    expected_dependencies = self.final_dependencies
+    base_config_cs_mock.CalculateHash.return_value = self.final_dep_hash
+    config.AddCloudStorageDependencyUpdateJob(
+        'dep2', 'plat1', self.final_dep_path, execute_job=False)
+    self.assertTrue(config._IsDirty())
+    self.assertEqual(expected_dependencies, config._config_data)
+    # check that file contents have not been updated.
+    expected_file_lines = list(self.expected_file_lines)
+    file_module = fake_filesystem.FakeFileOpen(self.fs)
+    for line in file_module(self.file_path):
+      self.assertEqual(expected_file_lines.pop(0), line.strip())
+    self.fs.CloseOpenFile(file_module(self.file_path))
+
+
+class BaseConfigDataManipulationUnittests(fake_filesystem_unittest.TestCase):
+  def setUp(self):
+    self.addTypeEqualityFunc(uploader.CloudStorageUploader,
+                             uploader.CloudStorageUploader.__eq__)
+    self.setUpPyfakefs()
+
+    self.cs_bucket = 'bucket1'
+    self.cs_base_folder = 'dependencies_folder'
+    self.cs_hash = 'hash12'
+    self.download_path = '../../relative/dep1/path2'
+    self.local_paths = ['../../../relative/local/path21',
+                        '../../../relative/local/path22']
+    self.platform_dict = {'cloud_storage_hash': self.cs_hash,
+                          'download_path': self.download_path,
+                          'local_paths': self.local_paths}
+    self.dependencies = {
+        'dep1': {
+            'cloud_storage_bucket': self.cs_bucket,
+            'cloud_storage_base_folder': self.cs_base_folder,
+            'file_info': {
+                'plat1': {
+                    'cloud_storage_hash': 'hash11',
+                    'download_path': '../../relative/dep1/path1',
+                    'local_paths': ['../../../relative/local/path11',
+                                    '../../../relative/local/path12']},
+                'plat2': self.platform_dict
+            }
+        },
+        'dep2': {
+            'cloud_storage_bucket': 'bucket2',
+            'file_info': {
+                'plat1': {
+                    'cloud_storage_hash': 'hash21',
+                    'download_path': '../../relative/dep2/path1',
+                    'local_paths': ['../../../relative/local/path31',
+                                    '../../../relative/local/path32']},
+                'plat2': {
+                    'cloud_storage_hash': 'hash22',
+                    'download_path': '../../relative/dep2/path2'}}}}
+
+    self.file_path = os.path.abspath(os.path.join(
+        'path', 'to', 'config', 'file'))
+
+
+    self.expected_file_lines = [
+      # pylint: disable=bad-continuation
+      '{', '"config_type": "BaseConfig",', '"dependencies": {',
+        '"dep1": {', '"cloud_storage_base_folder": "dependencies_folder",',
+          '"cloud_storage_bucket": "bucket1",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash11",',
+              '"download_path": "../../relative/dep1/path1",',
+              '"local_paths": [', '"../../../relative/local/path11",',
+                              '"../../../relative/local/path12"', ']', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash12",',
+              '"download_path": "../../relative/dep1/path2",',
+              '"local_paths": [', '"../../../relative/local/path21",',
+                              '"../../../relative/local/path22"', ']',
+              '}', '}', '},',
+        '"dep2": {', '"cloud_storage_bucket": "bucket2",', '"file_info": {',
+            '"plat1": {', '"cloud_storage_hash": "hash21",',
+              '"download_path": "../../relative/dep2/path1",',
+              '"local_paths": [', '"../../../relative/local/path31",',
+                              '"../../../relative/local/path32"', ']', '},',
+            '"plat2": {', '"cloud_storage_hash": "hash22",',
+              '"download_path": "../../relative/dep2/path2"', '}', '}', '}',
+      '}', '}']
+    self.fs.CreateFile(self.file_path,
+                       contents='\n'.join(self.expected_file_lines))
+
+
+  def testSetPlatformDataFailureNotWritable(self):
+    config = dependency_manager.BaseConfig(self.file_path)
+    self.assertRaises(
+        dependency_manager.ReadWriteError, config._SetPlatformData,
+        'dep1', 'plat1', 'cloud_storage_bucket', 'new_bucket')
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testSetPlatformDataFailure(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertRaises(ValueError, config._SetPlatformData, 'missing_dep',
+                      'plat2', 'cloud_storage_bucket', 'new_bucket')
+    self.assertEqual(self.dependencies, config._config_data)
+    self.assertRaises(ValueError, config._SetPlatformData, 'dep1',
+                      'missing_plat', 'cloud_storage_bucket', 'new_bucket')
+    self.assertEqual(self.dependencies, config._config_data)
+
+
+  def testSetPlatformDataCloudStorageBucketSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    updated_cs_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'new_bucket',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1',
+                         'local_paths': ['../../../relative/local/path11',
+                                         '../../../relative/local/path12']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash12',
+                         'download_path': '../../relative/dep1/path2',
+                         'local_paths': ['../../../relative/local/path21',
+                                         '../../../relative/local/path22']}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1',
+                         'local_paths': ['../../../relative/local/path31',
+                                         '../../../relative/local/path32']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    config._SetPlatformData('dep1', 'plat2', 'cloud_storage_bucket',
+                            'new_bucket')
+    self.assertEqual(updated_cs_dependencies, config._config_data)
+
+  def testSetPlatformDataCloudStorageBaseFolderSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    updated_cs_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'new_dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1',
+                         'local_paths': ['../../../relative/local/path11',
+                                         '../../../relative/local/path12']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash12',
+                         'download_path': '../../relative/dep1/path2',
+                         'local_paths': ['../../../relative/local/path21',
+                                         '../../../relative/local/path22']}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1',
+                         'local_paths': ['../../../relative/local/path31',
+                                         '../../../relative/local/path32']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    config._SetPlatformData('dep1', 'plat2', 'cloud_storage_base_folder',
+                            'new_dependencies_folder')
+    self.assertEqual(updated_cs_dependencies, config._config_data)
+
+  def testSetPlatformDataHashSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    updated_cs_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1',
+                         'local_paths': ['../../../relative/local/path11',
+                                         '../../../relative/local/path12']},
+                     'plat2': {
+                         'cloud_storage_hash': 'new_hash',
+                         'download_path': '../../relative/dep1/path2',
+                         'local_paths': ['../../../relative/local/path21',
+                                         '../../../relative/local/path22']}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1',
+                         'local_paths': ['../../../relative/local/path31',
+                                         '../../../relative/local/path32']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    config._SetPlatformData('dep1', 'plat2', 'cloud_storage_hash',
+                            'new_hash')
+    self.assertEqual(updated_cs_dependencies, config._config_data)
+
+  def testSetPlatformDataDownloadPathSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    updated_cs_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1',
+                         'local_paths': ['../../../relative/local/path11',
+                                         '../../../relative/local/path12']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash12',
+                         'download_path': '../../new/dep1/path2',
+                         'local_paths': ['../../../relative/local/path21',
+                                         '../../../relative/local/path22']}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1',
+                         'local_paths': ['../../../relative/local/path31',
+                                         '../../../relative/local/path32']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    config._SetPlatformData('dep1', 'plat2', 'download_path',
+                            '../../new/dep1/path2')
+    self.assertEqual(updated_cs_dependencies, config._config_data)
+
+  def testSetPlatformDataLocalPathSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    updated_cs_dependencies = {
+        'dep1': {'cloud_storage_bucket': 'bucket1',
+                 'cloud_storage_base_folder': 'dependencies_folder',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash11',
+                         'download_path': '../../relative/dep1/path1',
+                         'local_paths': ['../../../relative/local/path11',
+                                         '../../../relative/local/path12']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash12',
+                         'download_path': '../../relative/dep1/path2',
+                         'local_paths': ['../../new/relative/local/path21',
+                                         '../../new/relative/local/path22']}}},
+        'dep2': {'cloud_storage_bucket': 'bucket2',
+                 'file_info': {
+                     'plat1': {
+                         'cloud_storage_hash': 'hash21',
+                         'download_path': '../../relative/dep2/path1',
+                         'local_paths': ['../../../relative/local/path31',
+                                         '../../../relative/local/path32']},
+                     'plat2': {
+                         'cloud_storage_hash': 'hash22',
+                         'download_path': '../../relative/dep2/path2'}}}}
+    config._SetPlatformData('dep1', 'plat2', 'local_paths',
+                            ['../../new/relative/local/path21',
+                             '../../new/relative/local/path22'])
+    self.assertEqual(updated_cs_dependencies, config._config_data)
+
+  def testGetPlatformDataFailure(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertRaises(ValueError, config._GetPlatformData, 'missing_dep',
+                      'plat2', 'cloud_storage_bucket')
+    self.assertEqual(self.dependencies, config._config_data)
+    self.assertRaises(ValueError, config._GetPlatformData, 'dep1',
+                      'missing_plat', 'cloud_storage_bucket')
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataDictSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.platform_dict,
+                     config._GetPlatformData('dep1', 'plat2'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataCloudStorageBucketSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.cs_bucket, config._GetPlatformData(
+        'dep1', 'plat2', 'cloud_storage_bucket'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataCloudStorageBaseFolderSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.cs_base_folder, config._GetPlatformData(
+        'dep1', 'plat2', 'cloud_storage_base_folder'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataHashSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.cs_hash, config._GetPlatformData(
+        'dep1', 'plat2', 'cloud_storage_hash'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataDownloadPathSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.download_path, config._GetPlatformData(
+        'dep1', 'plat2', 'download_path'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+  def testGetPlatformDataLocalPathSuccess(self):
+    config = dependency_manager.BaseConfig(self.file_path, writable=True)
+    self.assertEqual(self.local_paths, config._GetPlatformData(
+        'dep1', 'plat2', 'local_paths'))
+    self.assertEqual(self.dependencies, config._config_data)
+
+class BaseConfigTest(unittest.TestCase):
+  """ Subclassable unittests for BaseConfig.
+  For subclasses: override setUp, GetConfigDataFromDict,
+    and EndToEndExpectedConfigData as needed.
+
+    setUp must set the following properties:
+      self.config_type: String returnedd from GetConfigType in config subclass.
+      self.config_class: the class for the config subclass.
+      self.config_module: importable module for the config subclass.
+      self.empty_dict: expected dictionary for an empty config, as it would be
+        stored in a json file.
+      self.one_dep_dict: example dictionary for a config with one dependency,
+        as it would be stored in a json file.
+  """
+  def setUp(self):
+    self.config_type = 'BaseConfig'
+    self.config_class = dependency_manager.BaseConfig
+    self.config_module = 'dependency_manager.base_config'
+
+    self.empty_dict = {'config_type': self.config_type,
+                       'dependencies': {}}
+
+    dependency_dict = {
+        'dep': {
+            'cloud_storage_base_folder': 'cs_base_folder1',
+            'cloud_storage_bucket': 'bucket1',
+            'file_info': {
+                'plat1_arch1': {
+                    'cloud_storage_hash': 'hash111',
+                    'download_path': 'download_path111',
+                    'cs_remote_path': 'cs_path111',
+                    'version_in_cs': 'version_111',
+                    'local_paths': ['local_path1110', 'local_path1111']
+                },
+                'plat1_arch2': {
+                    'cloud_storage_hash': 'hash112',
+                    'download_path': 'download_path112',
+                    'cs_remote_path': 'cs_path112',
+                    'local_paths': ['local_path1120', 'local_path1121']
+                },
+                'win_arch1': {
+                    'cloud_storage_hash': 'hash1w1',
+                    'download_path': 'download\\path\\1w1',
+                    'cs_remote_path': 'cs_path1w1',
+                    'local_paths': ['local\\path\\1w10', 'local\\path\\1w11']
+                },
+                'all_the_variables': {
+                    'cloud_storage_hash': 'hash111',
+                    'download_path': 'download_path111',
+                    'cs_remote_path': 'cs_path111',
+                    'version_in_cs': 'version_111',
+                    'path_in_archive': 'path/in/archive',
+                    'local_paths': ['local_path1110', 'local_path1111']
+                }
+            }
+        }
+    }
+    self.one_dep_dict = {'config_type': self.config_type,
+                         'dependencies': dependency_dict}
+
+  def GetConfigDataFromDict(self, config_dict):
+    return config_dict.get('dependencies', {})
+
+  @mock.patch('os.path')
+  @mock.patch('__builtin__.open')
+  def testInitBaseProperties(self, open_mock, path_mock):
+    # Init is not meant to be overridden, so we should be mocking the
+    # base_config's json module, even in subclasses.
+    json_module = 'dependency_manager.base_config.json'
+    with mock.patch(json_module) as json_mock:
+      json_mock.load.return_value = self.empty_dict.copy()
+      config = self.config_class('file_path')
+      self.assertEqual('file_path', config._config_path)
+      self.assertEqual(self.config_type, config.GetConfigType())
+      self.assertEqual(self.GetConfigDataFromDict(self.empty_dict),
+                       config._config_data)
+
+
+  @mock.patch('dependency_manager.dependency_info.DependencyInfo')
+  @mock.patch('os.path')
+  @mock.patch('__builtin__.open')
+  def testInitWithDependencies(self, open_mock, path_mock, dep_info_mock):
+    # Init is not meant to be overridden, so we should be mocking the
+    # base_config's json module, even in subclasses.
+    json_module = 'dependency_manager.base_config.json'
+    with mock.patch(json_module) as json_mock:
+      json_mock.load.return_value = self.one_dep_dict
+      config = self.config_class('file_path')
+      self.assertEqual('file_path', config._config_path)
+      self.assertEqual(self.config_type, config.GetConfigType())
+      self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
+                       config._config_data)
+
+  def testFormatPath(self):
+    self.assertEqual(None, self.config_class._FormatPath(None))
+    self.assertEqual('', self.config_class._FormatPath(''))
+    self.assertEqual('some_string',
+                     self.config_class._FormatPath('some_string'))
+
+    expected_path = os.path.join('some', 'file', 'path')
+    self.assertEqual(expected_path,
+                     self.config_class._FormatPath('some/file/path'))
+    self.assertEqual(expected_path,
+                     self.config_class._FormatPath('some\\file\\path'))
+
+  @mock.patch('dependency_manager.base_config.json')
+  @mock.patch('dependency_manager.dependency_info.DependencyInfo')
+  @mock.patch('os.path.exists')
+  @mock.patch('__builtin__.open')
+  def testIterDependenciesError(
+      self, open_mock, exists_mock, dep_info_mock, json_mock):
+    # Init is not meant to be overridden, so we should be mocking the
+    # base_config's json module, even in subclasses.
+    json_mock.load.return_value = self.one_dep_dict
+    config = self.config_class('file_path', writable=True)
+    self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
+                     config._config_data)
+    self.assertTrue(config._writable)
+    with self.assertRaises(dependency_manager.ReadWriteError):
+      for _ in config.IterDependencyInfo():
+        pass
+
+  @mock.patch('dependency_manager.base_config.json')
+  @mock.patch('dependency_manager.dependency_info.DependencyInfo')
+  @mock.patch('os.path.exists')
+  @mock.patch('__builtin__.open')
+  def testIterDependencies(
+      self, open_mock, exists_mock, dep_info_mock, json_mock):
+    json_mock.load.return_value = self.one_dep_dict
+    config = self.config_class('file_path')
+    self.assertEqual(self.GetConfigDataFromDict(self.one_dep_dict),
+                     config._config_data)
+    expected_dep_info = ['dep_info0', 'dep_info1', 'dep_info2']
+    dep_info_mock.side_effect = expected_dep_info
+    expected_calls = [
+        mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
+                  cs_hash='hash111', download_path='download_path111',
+                  cs_remote_path='cs_path111',
+                  local_paths=['local_path1110', 'local_path1111']),
+        mock.call('dep', 'plat1_arch1', 'file_path', cs_bucket='bucket1',
+                  cs_hash='hash112', download_path='download_path112',
+                  cs_remote_path='cs_path112',
+                  local_paths=['local_path1120', 'local_path1121']),
+        mock.call('dep', 'win_arch1', 'file_path', cs_bucket='bucket1',
+                  cs_hash='hash1w1',
+                  download_path=os.path.join('download', 'path', '1w1'),
+                  cs_remote_path='cs_path1w1',
+                  local_paths=[os.path.join('download', 'path', '1w10'),
+                               os.path.join('download', 'path', '1w11')])]
+    deps_seen = []
+    for dep_info in config.IterDependencyInfo():
+      deps_seen.append(dep_info)
+    dep_info_mock.assert_call_args(expected_calls)
+    self.assertItemsEqual(expected_dep_info, deps_seen)
diff --git a/catapult/dependency_manager/dependency_manager/cloud_storage_info.py b/catapult/dependency_manager/dependency_manager/cloud_storage_info.py
new file mode 100644
index 0000000..e8b73c8
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/cloud_storage_info.py
@@ -0,0 +1,110 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import errno
+import os
+import stat
+
+from catapult_base import cloud_storage
+
+from dependency_manager import exceptions
+
+class CloudStorageInfo(object):
+  def __init__(self, cs_bucket, cs_hash, download_path, cs_remote_path,
+               version_in_cs=None, archive_info=None):
+    """ Container for the information needed to download a dependency from
+        cloud storage.
+
+    Args:
+          cs_bucket: The cloud storage bucket the dependency is located in.
+          cs_hash: The hash of the file stored in cloud storage.
+          download_path: Where the file should be downloaded to.
+          cs_remote_path: Where the file is stored in the cloud storage bucket.
+          version_in_cs: The version of the file stored in cloud storage.
+          archive_info: An instance of ArchiveInfo if this dependency is an
+              archive. Else None.
+    """
+    self._download_path = download_path
+    self._cs_remote_path = cs_remote_path
+    self._cs_bucket = cs_bucket
+    self._cs_hash = cs_hash
+    self._version_in_cs = version_in_cs
+    self._archive_info = archive_info
+    if not self._has_minimum_data:
+      raise ValueError(
+          'Not enough information specified to initialize a cloud storage info.'
+          ' %s' % self)
+
+  def DependencyExistsInCloudStorage(self):
+    return cloud_storage.Exists(self._cs_bucket, self._cs_remote_path)
+
+  def GetRemotePath(self):
+    """Gets the path to a downloaded version of the dependency.
+
+    May not download the file if it has already been downloaded.
+    Will unzip the downloaded file if a non-empty archive_info was passed in at
+    init.
+
+    Returns: A path to an executable that was stored in cloud_storage, or None
+       if not found.
+
+    Raises:
+        CredentialsError: If cloud_storage credentials aren't configured.
+        PermissionError: If cloud_storage credentials are configured, but not
+            with an account that has permission to download the needed file.
+        NotFoundError: If the needed file does not exist where expected in
+            cloud_storage or the downloaded zip file.
+        ServerError: If an internal server error is hit while downloading the
+            needed file.
+        CloudStorageError: If another error occured while downloading the remote
+            path.
+        FileNotFoundError: If the download was otherwise unsuccessful.
+    """
+    if not self._has_minimum_data:
+      return None
+
+    download_dir = os.path.dirname(self._download_path)
+    if not os.path.exists(download_dir):
+      try:
+        os.makedirs(download_dir)
+      except OSError as e:
+        # The logic above is racy, and os.makedirs will raise an OSError if
+        # the directory exists.
+        if e.errno != errno.EEXIST:
+          raise
+
+    dependency_path = self._download_path
+    cloud_storage.GetIfHashChanged(
+        self._cs_remote_path, self._download_path, self._cs_bucket,
+        self._cs_hash)
+    if not os.path.exists(dependency_path):
+      raise exceptions.FileNotFoundError(dependency_path)
+
+    if self.has_archive_info:
+      dependency_path = self._archive_info.GetUnzippedPath()
+    else:
+      mode = os.stat(dependency_path).st_mode
+      os.chmod(dependency_path, mode | stat.S_IXUSR)
+    return os.path.abspath(dependency_path)
+
+  @property
+  def version_in_cs(self):
+    return self._version_in_cs
+
+  @property
+  def _has_minimum_data(self):
+    return all([self._cs_bucket, self._cs_remote_path, self._download_path,
+                self._cs_hash])
+
+
+  @property
+  def has_archive_info(self):
+    return bool(self._archive_info)
+
+  def __repr__(self):
+    return (
+        'CloudStorageInfo(download_path=%s, cs_remote_path=%s, cs_bucket=%s, '
+        'cs_hash=%s, version_in_cs=%s, archive_info=%s)' % (
+            self._download_path, self._cs_remote_path, self._cs_bucket,
+            self._cs_hash, self._version_in_cs, self._archive_info))
diff --git a/catapult/dependency_manager/dependency_manager/cloud_storage_info_unittest.py b/catapult/dependency_manager/dependency_manager/cloud_storage_info_unittest.py
new file mode 100644
index 0000000..5c03d9a
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/cloud_storage_info_unittest.py
@@ -0,0 +1,221 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import stat
+import unittest
+
+import mock
+from pyfakefs import fake_filesystem_unittest
+from catapult_base import cloud_storage
+
+from dependency_manager import archive_info
+from dependency_manager import cloud_storage_info
+from dependency_manager import exceptions
+
+class CloudStorageInfoTest(unittest.TestCase):
+  def testInitCloudStorageInfoErrors(self):
+    # Must specify cloud storage information atomically.
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      None, None, None, None)
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      'cs_bucket', None, None, None)
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      None, 'cs_hash', None, None)
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      None, None, 'download_path', None)
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      None, None, None, 'cs_remote_path')
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      None, 'cs_hash', 'download_path', 'cs_remote_path')
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      'cs_bucket', None, 'download_path', 'cs_remote_path')
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      'cs_bucket', 'cs_hash', None, 'cs_remote_path')
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      'cs_bucket', 'cs_hash', 'download_path', None)
+
+  def testInitWithVersion(self):
+    self.assertRaises(
+        ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
+        'cs_remote_path', version_in_cs='version_in_cs')
+    self.assertRaises(
+        ValueError, cloud_storage_info.CloudStorageInfo, None, 'cs_hash',
+        'download_path', 'cs_remote_path', version_in_cs='version_in_cs')
+
+    cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
+        version_in_cs='version_in_cs')
+    self.assertEqual('cs_hash', cs_info._cs_hash)
+    self.assertEqual('cs_bucket', cs_info._cs_bucket)
+    self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
+    self.assertEqual('download_path', cs_info._download_path)
+    self.assertEqual('version_in_cs', cs_info._version_in_cs)
+
+  def testInitWithArchiveInfoErrors(self):
+    zip_info = archive_info.ArchiveInfo(
+        'download_path', 'unzip_location', 'path_within_archive')
+    self.assertRaises(
+        ValueError, cloud_storage_info.CloudStorageInfo, None, None, None, None,
+        archive_info=zip_info)
+    self.assertRaises(
+        ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
+        'cs_remote_path', archive_info=zip_info)
+    self.assertRaises(
+        ValueError, cloud_storage_info.CloudStorageInfo, 'cs_bucket', 'cs_hash',
+        None, 'cs_remote_path', archive_info=zip_info)
+    self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
+                      'cs_bucket', 'cs_hash',
+                      'cs_remote_path', None, version_in_cs='version',
+                      archive_info=zip_info)
+
+
+  def testInitWithArchiveInfo(self):
+    zip_info = archive_info.ArchiveInfo(
+        'download_path', 'unzip_location', 'path_within_archive')
+    cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
+        archive_info=zip_info)
+    self.assertEqual('cs_hash', cs_info._cs_hash)
+    self.assertEqual('cs_bucket', cs_info._cs_bucket)
+    self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
+    self.assertEqual('download_path', cs_info._download_path)
+    self.assertEqual(zip_info, cs_info._archive_info)
+    self.assertFalse(cs_info._version_in_cs)
+
+  def testInitWithVersionAndArchiveInfo(self):
+    zip_info = archive_info.ArchiveInfo(
+        'download_path', 'unzip_location', 'path_within_archive')
+    cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'download_path',
+        'cs_remote_path', version_in_cs='version_in_cs',
+        archive_info=zip_info)
+    self.assertEqual('cs_hash', cs_info._cs_hash)
+    self.assertEqual('cs_bucket', cs_info._cs_bucket)
+    self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
+    self.assertEqual('download_path', cs_info._download_path)
+    self.assertEqual(zip_info, cs_info._archive_info)
+    self.assertEqual('version_in_cs', cs_info._version_in_cs)
+
+  def testInitMinimumCloudStorageInfo(self):
+    cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket',
+        'cs_hash', 'download_path',
+        'cs_remote_path')
+    self.assertEqual('cs_hash', cs_info._cs_hash)
+    self.assertEqual('cs_bucket', cs_info._cs_bucket)
+    self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
+    self.assertEqual('download_path', cs_info._download_path)
+    self.assertFalse(cs_info._version_in_cs)
+    self.assertFalse(cs_info._archive_info)
+
+
+class TestGetRemotePath(fake_filesystem_unittest.TestCase):
+  def setUp(self):
+    self.setUpPyfakefs()
+    self.config_path = '/test/dep_config.json'
+    self.fs.CreateFile(self.config_path, contents='{}')
+    self.download_path = '/foo/download_path'
+    self.fs.CreateFile(
+        self.download_path, contents='1010110', st_mode=stat.S_IWOTH)
+    self.cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
+        version_in_cs='1.2.3.4',)
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+
+  @mock.patch(
+      'catapult_base.cloud_storage.GetIfHashChanged')
+  def testGetRemotePathNoArchive(self, cs_get_mock):
+    def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
+      del cs_path, bucket, file_hash
+      if not os.path.exists(download_path):
+        self.fs.CreateFile(download_path, contents='1010001010101010110101')
+    cs_get_mock.side_effect = _GetIfHashChangedMock
+    # All of the needed information is given, and the downloaded path exists
+    # after calling cloud storage.
+    self.assertEqual(
+        os.path.abspath(self.download_path),
+        self.cs_info.GetRemotePath())
+    self.assertTrue(os.stat(self.download_path).st_mode & stat.S_IXUSR)
+
+    # All of the needed information is given, but the downloaded path doesn't
+    # exists after calling cloud storage.
+    self.fs.RemoveObject(self.download_path)
+    cs_get_mock.side_effect = [True]
+    self.assertRaises(
+        exceptions.FileNotFoundError, self.cs_info.GetRemotePath)
+
+  @mock.patch(
+      'dependency_manager.dependency_manager_util.UnzipArchive')
+  @mock.patch(
+      'dependency_manager.cloud_storage_info.cloud_storage.GetIfHashChanged') # pylint: disable=line-too-long
+  def testGetRemotePathWithArchive(self, cs_get_mock, unzip_mock):
+    def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
+      del cs_path, bucket, file_hash
+      if not os.path.exists(download_path):
+        self.fs.CreateFile(download_path, contents='1010001010101010110101')
+    cs_get_mock.side_effect = _GetIfHashChangedMock
+
+    unzip_path = os.path.join(
+        os.path.dirname(self.download_path), 'unzip_dir')
+    path_within_archive = os.path.join('path', 'within', 'archive')
+    dep_path = os.path.join(unzip_path, path_within_archive)
+    def _UnzipFileMock(archive_file, unzip_location, tmp_location=None):
+      del archive_file, tmp_location
+      self.fs.CreateFile(dep_path)
+      self.fs.CreateFile(os.path.join(unzip_location, 'extra', 'path'))
+      self.fs.CreateFile(os.path.join(unzip_location, 'another_extra_path'))
+    unzip_mock.side_effect = _UnzipFileMock
+
+    self.assertFalse(os.path.exists(dep_path))
+    zip_info = archive_info.ArchiveInfo(
+        self.download_path, unzip_path, path_within_archive)
+    self.cs_info = cloud_storage_info.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
+        version_in_cs='1.2.3.4', archive_info=zip_info)
+
+    self.assertFalse(unzip_mock.called)
+    self.assertEqual(
+        os.path.abspath(dep_path),
+        self.cs_info.GetRemotePath())
+    self.assertTrue(os.path.exists(dep_path))
+    self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
+                    (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
+    unzip_mock.assert_called_once_with(self.download_path, unzip_path)
+
+    # Should not need to unzip a second time, but should return the same path.
+    unzip_mock.reset_mock()
+    self.assertTrue(os.path.exists(dep_path))
+    self.assertEqual(
+        os.path.abspath(dep_path),
+        self.cs_info.GetRemotePath())
+    self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
+                    (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
+    self.assertFalse(unzip_mock.called)
+
+
+  @mock.patch(
+      'catapult_base.cloud_storage.GetIfHashChanged')
+  def testGetRemotePathCloudStorageErrors(self, cs_get_mock):
+    cs_get_mock.side_effect = cloud_storage.CloudStorageError
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      self.cs_info.GetRemotePath)
+
+    cs_get_mock.side_effect = cloud_storage.ServerError
+    self.assertRaises(cloud_storage.ServerError,
+                      self.cs_info.GetRemotePath)
+
+    cs_get_mock.side_effect = cloud_storage.NotFoundError
+    self.assertRaises(cloud_storage.NotFoundError,
+                      self.cs_info.GetRemotePath)
+
+    cs_get_mock.side_effect = cloud_storage.PermissionError
+    self.assertRaises(cloud_storage.PermissionError,
+                      self.cs_info.GetRemotePath)
+
+    cs_get_mock.side_effect = cloud_storage.CredentialsError
+    self.assertRaises(cloud_storage.CredentialsError,
+                      self.cs_info.GetRemotePath)
diff --git a/catapult/dependency_manager/dependency_manager/dependency_info.py b/catapult/dependency_manager/dependency_manager/dependency_info.py
new file mode 100644
index 0000000..2e99768
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/dependency_info.py
@@ -0,0 +1,123 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class DependencyInfo(object):
+  def __init__(self, dependency, platform, config_path, local_path_info=None,
+               cloud_storage_info=None):
+    """ Container for the information needed for each dependency/platform pair
+    in the dependency_manager.
+
+    Args:
+        Required:
+          dependency: Name of the dependency.
+          platform: Name of the platform to be run on.
+          config_path: Path to the config_path this information came from. Used
+                       for error messages to improve debugging.
+
+        Optional:
+          local_paths: A list of paths to search in order for a local file.
+          cloud_storage_info: An instance of CloudStorageInfo.
+    """
+    # TODO(aiolos): update the above doc string for A) the usage of zip files
+    # and B) supporting lists of local_paths to be checked for most recently
+    # changed files.
+    if not dependency or not platform:
+      raise ValueError(
+          'Must supply both a dependency and platform to DependencyInfo')
+
+    self._dependency = dependency
+    self._platform = platform
+    self._config_paths = [config_path]
+    self._local_path_info = local_path_info
+    self._cloud_storage_info = cloud_storage_info
+
+  def Update(self, new_dep_info):
+    """Add the information from |new_dep_info| to this instance.
+    """
+    self._config_paths.extend(new_dep_info.config_paths)
+    if (self.dependency != new_dep_info.dependency or
+        self.platform != new_dep_info.platform):
+      raise ValueError(
+          'Cannot update DependencyInfo with different dependency or platform.'
+          'Existing dep: %s, existing platform: %s. New dep: %s, new platform:'
+          '%s. Config_paths conflicting: %s' % (
+              self.dependency, self.platform, new_dep_info.dependency,
+              new_dep_info.platform, self.config_paths))
+    if new_dep_info.has_cloud_storage_info:
+      if self.has_cloud_storage_info:
+        raise ValueError(
+            'Overriding cloud storage data is not allowed when updating a '
+            'DependencyInfo. Conflict in dependency %s on platform %s in '
+            'config_paths: %s.' % (self.dependency, self.platform,
+                                   self.config_paths))
+      else:
+        self._cloud_storage_info = new_dep_info._cloud_storage_info
+    if not self._local_path_info:
+      self._local_path_info = new_dep_info._local_path_info
+    else:
+      self._local_path_info.Update(new_dep_info._local_path_info)
+
+  def GetRemotePath(self):
+    """Gets the path to a downloaded version of the dependency.
+
+    May not download the file if it has already been downloaded.
+    Will unzip the downloaded file if specified in the config
+    via unzipped_hash.
+
+    Returns: A path to an executable that was stored in cloud_storage, or None
+       if not found.
+
+    Raises:
+        CredentialsError: If cloud_storage credentials aren't configured.
+        PermissionError: If cloud_storage credentials are configured, but not
+            with an account that has permission to download the needed file.
+        NotFoundError: If the needed file does not exist where expected in
+            cloud_storage or the downloaded zip file.
+        ServerError: If an internal server error is hit while downloading the
+            needed file.
+        CloudStorageError: If another error occured while downloading the remote
+            path.
+        FileNotFoundError: If the download was otherwise unsuccessful.
+    """
+    if self.has_cloud_storage_info:
+      return self._cloud_storage_info.GetRemotePath()
+    return None
+
+  def GetLocalPath(self):
+    """Gets the path to a local version of the dependency.
+
+    Returns: A path to a local dependency, or None if not found.
+
+    """
+    if self.has_local_path_info:
+      return self._local_path_info.GetLocalPath()
+    return None
+
+  @property
+  def dependency(self):
+    return self._dependency
+
+  @property
+  def platform(self):
+    return self._platform
+
+  @property
+  def config_paths(self):
+    return self._config_paths
+
+  @property
+  def local_path_info(self):
+    return self._local_path_info
+
+  @property
+  def has_cloud_storage_info(self):
+    return bool(self._cloud_storage_info)
+
+  @property
+  def has_local_path_info(self):
+    return bool(self._local_path_info)
+
+  @property
+  def cloud_storage_info(self):
+    return self._cloud_storage_info
diff --git a/catapult/dependency_manager/dependency_manager/dependency_info_unittest.py b/catapult/dependency_manager/dependency_manager/dependency_info_unittest.py
new file mode 100644
index 0000000..6117cd3
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/dependency_info_unittest.py
@@ -0,0 +1,234 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import dependency_manager
+
+class DependencyInfoTest(unittest.TestCase):
+  def testInitRequiredInfo(self):
+    # Must have a dependency, platform and file_path.
+    self.assertRaises(ValueError, dependency_manager.DependencyInfo,
+                      None, None, None)
+    self.assertRaises(ValueError, dependency_manager.DependencyInfo,
+                      'dep', None, None)
+    self.assertRaises(ValueError, dependency_manager.DependencyInfo,
+                      None, 'plat', None)
+    self.assertRaises(ValueError, dependency_manager.DependencyInfo,
+                      None, None, 'config_path')
+    # Empty DependencyInfo.
+    empty_di = dependency_manager.DependencyInfo('dep', 'plat', 'config_path')
+    self.assertEqual('dep', empty_di.dependency)
+    self.assertEqual('plat', empty_di.platform)
+    self.assertEqual(['config_path'], empty_di.config_paths)
+    self.assertFalse(empty_di.has_local_path_info)
+    self.assertFalse(empty_di.has_cloud_storage_info)
+
+  def testInitLocalPaths(self):
+    local_path_info = dependency_manager.LocalPathInfo(['path0', 'path1'])
+    dep_info = dependency_manager.DependencyInfo(
+        'dep', 'platform', 'config_path', local_path_info
+        )
+    self.assertEqual('dep', dep_info.dependency)
+    self.assertEqual('platform', dep_info.platform)
+    self.assertEqual(['config_path'], dep_info.config_paths)
+    self.assertEqual(local_path_info, dep_info._local_path_info)
+    self.assertFalse(dep_info.has_cloud_storage_info)
+
+  def testInitCloudStorageInfo(self):
+    cs_info = dependency_manager.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'dowload_path', 'cs_remote_path')
+    dep_info = dependency_manager.DependencyInfo(
+        'dep', 'platform', 'config_path', cloud_storage_info=cs_info)
+    self.assertEqual('dep', dep_info.dependency)
+    self.assertEqual('platform', dep_info.platform)
+    self.assertEqual(['config_path'], dep_info.config_paths)
+    self.assertFalse(dep_info.has_local_path_info)
+    self.assertTrue(dep_info.has_cloud_storage_info)
+    self.assertEqual(cs_info, dep_info._cloud_storage_info)
+
+  def testInitAllInfo(self):
+    cs_info = dependency_manager.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'dowload_path', 'cs_remote_path')
+    dep_info = dependency_manager.DependencyInfo(
+        'dep', 'platform', 'config_path', cloud_storage_info=cs_info)
+    self.assertEqual('dep', dep_info.dependency)
+    self.assertEqual('platform', dep_info.platform)
+    self.assertEqual(['config_path'], dep_info.config_paths)
+    self.assertFalse(dep_info.has_local_path_info)
+    self.assertTrue(dep_info.has_cloud_storage_info)
+
+
+  def testUpdateRequiredArgsConflicts(self):
+    lp_info = dependency_manager.LocalPathInfo(['path0', 'path2'])
+    dep_info1 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path1', local_path_info=lp_info)
+    dep_info2 = dependency_manager.DependencyInfo(
+        'dep1', 'platform2', 'config_path2', local_path_info=lp_info)
+    dep_info3 = dependency_manager.DependencyInfo(
+        'dep2', 'platform1', 'config_path3', local_path_info=lp_info)
+    self.assertRaises(ValueError, dep_info1.Update, dep_info2)
+    self.assertRaises(ValueError, dep_info1.Update, dep_info3)
+    self.assertRaises(ValueError, dep_info3.Update, dep_info2)
+
+  def testUpdateMinimumCloudStorageInfo(self):
+    dep_info1 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path1')
+
+    cs_info2 = dependency_manager.CloudStorageInfo(
+        cs_bucket='cs_bucket2', cs_hash='cs_hash2',
+        download_path='download_path2', cs_remote_path='cs_remote_path2')
+    dep_info2 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path2', cloud_storage_info=cs_info2)
+
+    dep_info3 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path3')
+
+    cs_info4 = dependency_manager.CloudStorageInfo(
+        cs_bucket='cs_bucket4', cs_hash='cs_hash4',
+        download_path='download_path4', cs_remote_path='cs_remote_path4')
+    dep_info4 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path4', cloud_storage_info=cs_info4)
+
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1'], dep_info1.config_paths)
+
+    dep_info1.Update(dep_info2)
+    self.assertFalse(dep_info1.has_local_path_info)
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1', 'config_path2'], dep_info1.config_paths)
+
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+
+    dep_info1.Update(dep_info3)
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1', 'config_path2', 'config_path3'],
+                     dep_info1.config_paths)
+    self.assertFalse(dep_info1.has_local_path_info)
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+
+    self.assertRaises(ValueError, dep_info1.Update, dep_info4)
+
+  def testUpdateMaxCloudStorageInfo(self):
+    dep_info1 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path1')
+
+    zip_info2 = dependency_manager.ArchiveInfo(
+        'archive_path2', 'unzip_path2', 'path_withing_archive2')
+    cs_info2 = dependency_manager.CloudStorageInfo(
+        'cs_bucket2', 'cs_hash2', 'download_path2', 'cs_remote_path2',
+        version_in_cs='2.1.1', archive_info=zip_info2)
+    dep_info2 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path2', cloud_storage_info=cs_info2)
+
+    dep_info3 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path3')
+
+    zip_info4 = dependency_manager.ArchiveInfo(
+        'archive_path4', 'unzip_path4', 'path_withing_archive4')
+    cs_info4 = dependency_manager.CloudStorageInfo(
+        'cs_bucket4', 'cs_hash4', 'download_path4', 'cs_remote_path4',
+        version_in_cs='4.2.1', archive_info=zip_info4)
+    dep_info4 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path4', cloud_storage_info=cs_info4)
+
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1'], dep_info1.config_paths)
+
+    dep_info1.Update(dep_info2)
+    self.assertFalse(dep_info1.has_local_path_info)
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1', 'config_path2'], dep_info1.config_paths)
+
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+
+    dep_info1.Update(dep_info3)
+    self.assertEqual('dep1', dep_info1.dependency)
+    self.assertEqual('platform1', dep_info1.platform)
+    self.assertEqual(['config_path1', 'config_path2', 'config_path3'],
+                     dep_info1.config_paths)
+    self.assertFalse(dep_info1.has_local_path_info)
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+
+    self.assertRaises(ValueError, dep_info1.Update, dep_info4)
+
+  def testUpdateAllInfo(self):
+    lp_info1 = dependency_manager.LocalPathInfo(['path1'])
+    dep_info1 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path1', local_path_info=lp_info1)
+    cs_info2 = dependency_manager.CloudStorageInfo(
+        cs_bucket='cs_bucket2', cs_hash='cs_hash2',
+        download_path='download_path2', cs_remote_path='cs_remote_path2')
+    lp_info2 = dependency_manager.LocalPathInfo(['path2'])
+    dep_info2 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path2', local_path_info=lp_info2,
+        cloud_storage_info=cs_info2)
+    lp_info3 = dependency_manager.LocalPathInfo(['path3'])
+    dep_info3 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path3', local_path_info=lp_info3)
+    lp_info4 = dependency_manager.LocalPathInfo(['path4'])
+    cs_info4 = dependency_manager.CloudStorageInfo(
+        cs_bucket='cs_bucket4', cs_hash='cs_hash4',
+        download_path='download_path4', cs_remote_path='cs_remote_path4')
+    dep_info4 = dependency_manager.DependencyInfo(
+        'dep1', 'platform1', 'config_path4', local_path_info=lp_info4,
+        cloud_storage_info=cs_info4)
+
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path1'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path2'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path3'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path4'))
+
+    dep_info1.Update(dep_info2)
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path1'))
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path2'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path3'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path4'))
+
+    dep_info1.Update(dep_info3)
+    cs_info = dep_info1._cloud_storage_info
+    self.assertEqual(cs_info, cs_info2)
+    self.assertEqual('cs_bucket2', cs_info._cs_bucket)
+    self.assertEqual('cs_hash2', cs_info._cs_hash)
+    self.assertEqual('download_path2', cs_info._download_path)
+    self.assertEqual('cs_remote_path2', cs_info._cs_remote_path)
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path1'))
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path2'))
+    self.assertTrue(dep_info1._local_path_info.IsPathInLocalPaths('path3'))
+    self.assertFalse(dep_info1._local_path_info.IsPathInLocalPaths('path4'))
+
+    self.assertRaises(ValueError, dep_info1.Update, dep_info4)
+
diff --git a/catapult/dependency_manager/dependency_manager/dependency_manager_unittest.py b/catapult/dependency_manager/dependency_manager/dependency_manager_unittest.py
new file mode 100644
index 0000000..1d55afe
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/dependency_manager_unittest.py
@@ -0,0 +1,526 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-argument
+
+from catapult_base import cloud_storage
+import mock
+from pyfakefs import fake_filesystem_unittest
+
+import dependency_manager
+from dependency_manager import exceptions
+
+
+class DependencyManagerTest(fake_filesystem_unittest.TestCase):
+
+  def setUp(self):
+    self.lp_info012 = dependency_manager.LocalPathInfo(
+        ['path0', 'path1', 'path2'])
+    self.cloud_storage_info = dependency_manager.CloudStorageInfo(
+        'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path')
+
+    self.dep_info = dependency_manager.DependencyInfo(
+        'dep', 'platform', 'config_file', local_path_info=self.lp_info012,
+        cloud_storage_info=self.cloud_storage_info)
+    self.setUpPyfakefs()
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+
+  # TODO(nednguyen): add a test that construct
+  # dependency_manager.DependencyManager from a list of DependencyInfo.
+  def testErrorInit(self):
+    with self.assertRaises(ValueError):
+      dependency_manager.DependencyManager(None)
+    with self.assertRaises(ValueError):
+      dependency_manager.DependencyManager('config_file?')
+
+  def testInitialUpdateDependencies(self):
+    dep_manager = dependency_manager.DependencyManager([])
+
+    # Empty BaseConfig.
+    dep_manager._lookup_dict = {}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+    base_config_mock.IterDependencyInfo.return_value = iter([])
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertFalse(dep_manager._lookup_dict)
+
+    # One dependency/platform in a BaseConfig.
+    dep_manager._lookup_dict = {}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+    dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep = 'dependency'
+    plat = 'platform'
+    dep_info.dependency = dep
+    dep_info.platform = plat
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
+    expected_lookup_dict = {dep: {plat: dep_info}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info.Update.called)
+
+    # One dependency multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = {}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+    dep = 'dependency'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep
+    dep_info2.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
+                                                             dep_info2])
+    expected_lookup_dict = {dep: {plat1: dep_info1,
+                                  plat2: dep_info2}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+
+    # Multiple dependencies, multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = {}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+    dep1 = 'dependency1'
+    dep2 = 'dependency2'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep1
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep1
+    dep_info2.platform = plat2
+    dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info3.dependency = dep2
+    dep_info3.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter(
+        [dep_info1, dep_info2, dep_info3])
+    expected_lookup_dict = {dep1: {plat1: dep_info1,
+                                   plat2: dep_info2},
+                            dep2: {plat2: dep_info3}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+    self.assertFalse(dep_info3.Update.called)
+
+  def testFollowupUpdateDependenciesNoOverlap(self):
+    dep_manager = dependency_manager.DependencyManager([])
+    dep = 'dependency'
+    dep1 = 'dependency1'
+    dep2 = 'dependency2'
+    dep3 = 'dependency3'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    plat3 = 'platform3'
+    dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_a.dependency = dep1
+    dep_info_a.platform = plat1
+    dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_b.dependency = dep1
+    dep_info_b.platform = plat2
+    dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_c.dependency = dep
+    dep_info_c.platform = plat1
+
+    start_lookup_dict = {dep: {plat1: dep_info_a,
+                               plat2: dep_info_b},
+                         dep1: {plat1: dep_info_c}}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+
+    # Empty BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    base_config_mock.IterDependencyInfo.return_value = iter([])
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(start_lookup_dict, dep_manager._lookup_dict)
+
+    # One dependency/platform in a BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info.dependency = dep3
+    dep_info.platform = plat1
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c},
+                            dep3: {plat3: dep_info}}
+
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info.Update.called)
+    self.assertFalse(dep_info_a.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    self.assertFalse(dep_info_c.Update.called)
+
+    # One dependency multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep2
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep2
+    dep_info2.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
+                                                             dep_info2])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c},
+                            dep2: {plat1: dep_info1,
+                                   plat2: dep_info2}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+    self.assertFalse(dep_info_a.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    self.assertFalse(dep_info_c.Update.called)
+
+    # Multiple dependencies, multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep1 = 'dependency1'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep2
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep2
+    dep_info2.platform = plat2
+    dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info3.dependency = dep3
+    dep_info3.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter(
+        [dep_info1, dep_info2, dep_info3])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c},
+                            dep2: {plat1: dep_info1,
+                                   plat2: dep_info2},
+                            dep3: {plat2: dep_info3}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+    self.assertFalse(dep_info3.Update.called)
+    self.assertFalse(dep_info_a.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    self.assertFalse(dep_info_c.Update.called)
+
+    # Ensure the testing data wasn't corrupted.
+    self.assertEqual(start_lookup_dict,
+                     {dep: {plat1: dep_info_a,
+                            plat2: dep_info_b},
+                      dep1: {plat1: dep_info_c}})
+
+  def testFollowupUpdateDependenciesWithCollisions(self):
+    dep_manager = dependency_manager.DependencyManager([])
+    dep = 'dependency'
+    dep1 = 'dependency1'
+    dep2 = 'dependency2'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_a.dependency = dep1
+    dep_info_a.platform = plat1
+    dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_b.dependency = dep1
+    dep_info_b.platform = plat2
+    dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info_c.dependency = dep
+    dep_info_c.platform = plat1
+
+    start_lookup_dict = {dep: {plat1: dep_info_a,
+                               plat2: dep_info_b},
+                         dep1: {plat1: dep_info_c}}
+    base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
+
+    # One dependency/platform.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info.dependency = dep
+    dep_info.platform = plat1
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c}}
+
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    dep_info_a.Update.assert_called_once_with(dep_info)
+    self.assertFalse(dep_info.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    self.assertFalse(dep_info_c.Update.called)
+    dep_info_a.reset_mock()
+    dep_info_b.reset_mock()
+    dep_info_c.reset_mock()
+
+    # One dependency multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep1
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep2
+    dep_info2.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
+                                                             dep_info2])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c},
+                            dep2: {plat2: dep_info2}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+    self.assertFalse(dep_info_a.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    dep_info_c.Update.assert_called_once_with(dep_info1)
+    dep_info_a.reset_mock()
+    dep_info_b.reset_mock()
+    dep_info_c.reset_mock()
+
+    # Multiple dependencies, multiple platforms in a BaseConfig.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep1 = 'dependency1'
+    plat1 = 'platform1'
+    plat2 = 'platform2'
+    dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info1.dependency = dep
+    dep_info1.platform = plat1
+    dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info2.dependency = dep1
+    dep_info2.platform = plat1
+    dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info3.dependency = dep2
+    dep_info3.platform = plat2
+    base_config_mock.IterDependencyInfo.return_value = iter(
+        [dep_info1, dep_info2, dep_info3])
+    expected_lookup_dict = {dep: {plat1: dep_info_a,
+                                  plat2: dep_info_b},
+                            dep1: {plat1: dep_info_c},
+                            dep2: {plat2: dep_info3}}
+    dep_manager._UpdateDependencies(base_config_mock)
+    self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
+    self.assertFalse(dep_info1.Update.called)
+    self.assertFalse(dep_info2.Update.called)
+    self.assertFalse(dep_info3.Update.called)
+    self.assertFalse(dep_info_b.Update.called)
+    dep_info_a.Update.assert_called_once_with(dep_info1)
+    dep_info_c.Update.assert_called_once_with(dep_info2)
+
+    # Collision error.
+    dep_manager._lookup_dict = start_lookup_dict.copy()
+    dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
+    dep_info.dependency = dep
+    dep_info.platform = plat1
+    base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
+    dep_info_a.Update.side_effect = ValueError
+    self.assertRaises(ValueError,
+                      dep_manager._UpdateDependencies, base_config_mock)
+
+    # Ensure the testing data wasn't corrupted.
+    self.assertEqual(start_lookup_dict,
+                     {dep: {plat1: dep_info_a,
+                            plat2: dep_info_b},
+                      dep1: {plat1: dep_info_c}})
+
+  def testGetDependencyInfo(self):
+    dep_manager = dependency_manager.DependencyManager([])
+    self.assertFalse(dep_manager._lookup_dict)
+
+    # No dependencies in the dependency manager.
+    self.assertEqual(None, dep_manager._GetDependencyInfo('missing_dep',
+                                                          'missing_plat'))
+
+    dep_manager._lookup_dict = {'dep1': {'plat1': 'dep_info11',
+                                         'plat2': 'dep_info12',
+                                         'plat3': 'dep_info13'},
+                                'dep2': {'plat1': 'dep_info11',
+                                         'plat2': 'dep_info21',
+                                         'plat3': 'dep_info23',
+                                         'default': 'dep_info2d'},
+                                'dep3': {'plat1': 'dep_info31',
+                                         'plat2': 'dep_info32',
+                                         'default': 'dep_info3d'}}
+    # Dependency not in the dependency manager.
+    self.assertEqual(None, dep_manager._GetDependencyInfo(
+        'missing_dep', 'missing_plat'))
+    # Dependency in the dependency manager, but not the platform. No default.
+    self.assertEqual(None, dep_manager._GetDependencyInfo(
+        'dep1', 'missing_plat'))
+    # Dependency in the dependency manager, but not the platform, but a default
+    # exists.
+    self.assertEqual('dep_info2d', dep_manager._GetDependencyInfo(
+        'dep2', 'missing_plat'))
+    # Dependency and platform in the dependency manager. A default exists.
+    self.assertEqual('dep_info23', dep_manager._GetDependencyInfo(
+        'dep2', 'plat3'))
+    # Dependency and platform in the dependency manager. No default exists.
+    self.assertEqual('dep_info12', dep_manager._GetDependencyInfo(
+        'dep1', 'plat2'))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  @mock.patch(
+      'dependency_manager.dependency_info.DependencyInfo.GetRemotePath')  # pylint: disable=line-too-long
+  def testFetchPathUnititializedDependency(
+      self, cs_path_mock):
+    dep_manager = dependency_manager.DependencyManager([])
+    self.assertFalse(cs_path_mock.call_args)
+    cs_path = 'cs_path'
+    cs_path_mock.return_value = cs_path
+
+    # Empty lookup_dict
+    with self.assertRaises(exceptions.NoPathFoundError):
+      dep_manager.FetchPath('dep', 'plat_arch_x86')
+
+    # Non-empty lookup dict that doesn't contain the dependency we're looking
+    # for.
+    dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
+                                'dep2': mock.MagicMock()}
+    with self.assertRaises(exceptions.NoPathFoundError):
+      dep_manager.FetchPath('dep', 'plat_arch_x86')
+
+  @mock.patch('os.path')
+  @mock.patch(
+      'dependency_manager.DependencyManager._GetDependencyInfo')
+  @mock.patch(
+      'dependency_manager.dependency_info.DependencyInfo.GetRemotePath')  # pylint: disable=line-too-long
+  def testFetchPathLocalFile(self, cs_path_mock, dep_info_mock, path_mock):
+    dep_manager = dependency_manager.DependencyManager([])
+    self.assertFalse(cs_path_mock.call_args)
+    cs_path = 'cs_path'
+    dep_info = self.dep_info
+    cs_path_mock.return_value = cs_path
+    # The DependencyInfo returned should be passed through to LocalPath.
+    dep_info_mock.return_value = dep_info
+
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path exists.
+    dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info},
+                                'dep2': mock.MagicMock()}
+    self.fs.CreateFile('path1')
+    found_path = dep_manager.FetchPath('dep', 'platform')
+
+    self.assertEqual('path1', found_path)
+    self.assertFalse(cs_path_mock.call_args)
+
+
+  @mock.patch(
+      'dependency_manager.dependency_info.DependencyInfo.GetRemotePath')  # pylint: disable=line-too-long
+  def testFetchPathRemoteFile(
+      self, cs_path_mock):
+    dep_manager = dependency_manager.DependencyManager([])
+    self.assertFalse(cs_path_mock.call_args)
+    cs_path = 'cs_path'
+    def FakeCSPath():
+      self.fs.CreateFile(cs_path)
+      return cs_path
+    cs_path_mock.side_effect = FakeCSPath
+
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path doesn't exist, but cloud_storage_path is downloaded.
+    dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
+                                        'plat1': mock.MagicMock()},
+                                'dep2': {'plat2': mock.MagicMock()}}
+    found_path = dep_manager.FetchPath('dep', 'platform')
+    self.assertEqual(cs_path, found_path)
+
+
+  @mock.patch(
+      'dependency_manager.dependency_info.DependencyInfo.GetRemotePath')  # pylint: disable=line-too-long
+  def testFetchPathError(
+      self, cs_path_mock):
+    dep_manager = dependency_manager.DependencyManager([])
+    self.assertFalse(cs_path_mock.call_args)
+    cs_path_mock.return_value = None
+    dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
+                                        'plat1': mock.MagicMock()},
+                                'dep2': {'plat2': mock.MagicMock()}}
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path doesn't exist, and cloud_storage path wasn't successfully
+    # found.
+    self.assertRaises(exceptions.NoPathFoundError,
+                      dep_manager.FetchPath, 'dep', 'platform')
+
+    cs_path_mock.side_effect = cloud_storage.CredentialsError
+    self.assertRaises(cloud_storage.CredentialsError,
+                      dep_manager.FetchPath, 'dep', 'platform')
+
+    cs_path_mock.side_effect = cloud_storage.CloudStorageError
+    self.assertRaises(cloud_storage.CloudStorageError,
+                      dep_manager.FetchPath, 'dep', 'platform')
+
+    cs_path_mock.side_effect = cloud_storage.PermissionError
+    self.assertRaises(cloud_storage.PermissionError,
+                      dep_manager.FetchPath, 'dep', 'platform')
+
+  def testLocalPath(self):
+    dep_manager = dependency_manager.DependencyManager([])
+    # Empty lookup_dict
+    with self.assertRaises(exceptions.NoPathFoundError):
+      dep_manager.LocalPath('dep', 'plat')
+
+  def testLocalPathNoDependency(self):
+    # Non-empty lookup dict that doesn't contain the dependency we're looking
+    # for.
+    dep_manager = dependency_manager.DependencyManager([])
+    dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
+                                'dep2': mock.MagicMock()}
+    with self.assertRaises(exceptions.NoPathFoundError):
+      dep_manager.LocalPath('dep', 'plat')
+
+  def testLocalPathExists(self):
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path exists.
+    dep_manager = dependency_manager.DependencyManager([])
+    dep_manager._lookup_dict = {'dependency' : {'platform': self.dep_info},
+                                'dep1': mock.MagicMock(),
+                                'dep2': mock.MagicMock()}
+    self.fs.CreateFile('path1')
+    found_path = dep_manager.LocalPath('dependency', 'platform')
+
+    self.assertEqual('path1', found_path)
+
+  def testLocalPathMissingPaths(self):
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path is found but doesn't exist.
+    dep_manager = dependency_manager.DependencyManager([])
+    dep_manager._lookup_dict = {'dependency' : {'platform': self.dep_info},
+                                'dep1': mock.MagicMock(),
+                                'dep2': mock.MagicMock()}
+    self.assertRaises(exceptions.NoPathFoundError,
+                      dep_manager.LocalPath, 'dependency', 'platform')
+
+  def testLocalPathNoPaths(self):
+    # Non-empty lookup dict that contains the dependency we're looking for.
+    # Local path isn't found.
+    dep_manager = dependency_manager.DependencyManager([])
+    dep_info = dependency_manager.DependencyInfo(
+        'dep', 'platform', 'config_file',
+        cloud_storage_info=self.cloud_storage_info)
+    dep_manager._lookup_dict = {'dependency' : {'platform': dep_info},
+                                'dep1': mock.MagicMock(),
+                                'dep2': mock.MagicMock()}
+    self.assertRaises(exceptions.NoPathFoundError,
+                      dep_manager.LocalPath, 'dependency', 'platform')
+
diff --git a/catapult/dependency_manager/dependency_manager/dependency_manager_util.py b/catapult/dependency_manager/dependency_manager/dependency_manager_util.py
new file mode 100644
index 0000000..afa15f3
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/dependency_manager_util.py
@@ -0,0 +1,95 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import stat
+import sys
+import zipfile
+
+from dependency_manager import exceptions
+
+
+def _WinReadOnlyHandler(func, path, execinfo):
+  if not os.access(path, os.W_OK):
+    os.chmod(path, stat.S_IWRITE)
+    func(path)
+  else:
+    raise execinfo[0], execinfo[1], execinfo[2]
+
+
+def RemoveDir(dir_path):
+  if os.path.isdir(dir_path):
+    shutil.rmtree(dir_path, onerror=_WinReadOnlyHandler)
+
+
+def VerifySafeArchive(archive):
+  def ResolvePath(path_name):
+    return os.path.realpath(os.path.abspath(path_name))
+  # Must add pathsep to avoid false positives.
+  # Ex: /tmp/abc/bad_file.py starts with /tmp/a but not /tmp/a/
+  base_path = ResolvePath(os.getcwd()) + os.path.sep
+  for member in archive.namelist():
+    if not ResolvePath(os.path.join(base_path, member)).startswith(base_path):
+      raise exceptions.ArchiveError(
+          'Archive %s contains a bad member: %s.' % (archive.filename, member))
+
+
+def GetModeFromPath(file_path):
+  return stat.S_IMODE(os.stat(file_path).st_mode)
+
+
+def GetModeFromZipInfo(zip_info):
+  return zip_info.external_attr >> 16
+
+
+def SetUnzippedDirPermissions(archive, unzipped_dir):
+  """Set the file permissions in an unzipped archive.
+
+     Designed to be called right after extractall() was called on |archive|.
+     Noop on Win. Otherwise sets the executable bit on files where needed.
+
+     Args:
+         archive: A zipfile.ZipFile object opened for reading.
+         unzipped_dir: A path to a directory containing the unzipped contents
+             of |archive|.
+  """
+  if sys.platform.startswith('win'):
+    # Windows doesn't have an executable bit, so don't mess with the ACLs.
+    return
+  for zip_info in archive.infolist():
+    archive_acls = GetModeFromZipInfo(zip_info)
+    if archive_acls & stat.S_IXUSR:
+      # Only preserve owner execurable permissions.
+      unzipped_path = os.path.abspath(
+          os.path.join(unzipped_dir, zip_info.filename))
+      mode = GetModeFromPath(unzipped_path)
+      os.chmod(unzipped_path, mode | stat.S_IXUSR)
+
+
+def UnzipArchive(archive_path, unzip_path):
+  """Unzips a file if it is a zip file.
+
+  Args:
+      archive_path: The downloaded file to unzip.
+      unzip_path: The destination directory to unzip to.
+
+  Raises:
+      ValueError: If |archive_path| is not a zipfile.
+  """
+  # TODO(aiolos): Add tests once the refactor is completed. crbug.com/551158
+  if not (archive_path and zipfile.is_zipfile(archive_path)):
+    raise ValueError(
+        'Attempting to unzip a non-archive file at %s' % archive_path)
+  if not os.path.exists(unzip_path):
+    os.makedirs(unzip_path)
+  try:
+    with zipfile.ZipFile(archive_path, 'r') as archive:
+      VerifySafeArchive(archive)
+      archive.extractall(path=unzip_path)
+      SetUnzippedDirPermissions(archive, unzip_path)
+  except:
+    if unzip_path and os.path.isdir(unzip_path):
+      RemoveDir(unzip_path)
+    raise
diff --git a/catapult/dependency_manager/dependency_manager/dependency_manager_util_unittest.py b/catapult/dependency_manager/dependency_manager/dependency_manager_util_unittest.py
new file mode 100644
index 0000000..19889ee
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/dependency_manager_util_unittest.py
@@ -0,0 +1,165 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import stat
+import sys
+import tempfile
+import unittest
+import uuid
+import zipfile
+
+import mock
+
+from dependency_manager import dependency_manager_util
+from dependency_manager import exceptions
+
+
+class DependencyManagerUtilTest(unittest.TestCase):
+  # This class intentionally uses actual file I/O to test real system behavior.
+
+  def setUp(self):
+    self.tmp_dir = os.path.abspath(tempfile.mkdtemp(prefix='telemetry'))
+    self.sub_dir = os.path.join(self.tmp_dir, 'sub_dir')
+    os.mkdir(self.sub_dir)
+
+    self.read_only_path = (os.path.join(self.tmp_dir, 'read_only'))
+    with open(self.read_only_path, 'w+') as read_file:
+      read_file.write('Read-only file')
+    os.chmod(self.read_only_path, stat.S_IRUSR)
+
+    self.writable_path = (os.path.join(self.tmp_dir, 'writable'))
+    with open(self.writable_path, 'w+') as writable_file:
+      writable_file.write('Writable file')
+    os.chmod(self.writable_path, stat.S_IRUSR | stat.S_IWUSR)
+
+    self.executable_path = (os.path.join(self.tmp_dir, 'executable'))
+    with open(self.executable_path, 'w+') as executable_file:
+      executable_file.write('Executable file')
+    os.chmod(self.executable_path, stat.S_IRWXU)
+
+    self.sub_read_only_path = (os.path.join(self.sub_dir, 'read_only'))
+    with open(self.sub_read_only_path, 'w+') as read_file:
+      read_file.write('Read-only sub file')
+    os.chmod(self.sub_read_only_path, stat.S_IRUSR)
+
+    self.sub_writable_path = (os.path.join(self.sub_dir, 'writable'))
+    with open(self.sub_writable_path, 'w+') as writable_file:
+      writable_file.write('Writable sub file')
+    os.chmod(self.sub_writable_path, stat.S_IRUSR | stat.S_IWUSR)
+
+    self.sub_executable_path = (os.path.join(self.sub_dir, 'executable'))
+    with open(self.sub_executable_path, 'w+') as executable_file:
+      executable_file.write('Executable sub file')
+    os.chmod(self.sub_executable_path, stat.S_IRWXU)
+
+    self.AssertExpectedDirFiles(self.tmp_dir)
+    self.archive_path = self.CreateZipArchiveFromDir(self.tmp_dir)
+
+  def tearDown(self):
+    if os.path.isdir(self.tmp_dir):
+      dependency_manager_util.RemoveDir(self.tmp_dir)
+    if os.path.isfile(self.archive_path):
+      os.remove(self.archive_path)
+
+  def AssertExpectedDirFiles(self, top_dir):
+    sub_dir = os.path.join(top_dir, 'sub_dir')
+    read_only_path = (os.path.join(top_dir, 'read_only'))
+    writable_path = (os.path.join(top_dir, 'writable'))
+    executable_path = (os.path.join(top_dir, 'executable'))
+    sub_read_only_path = (os.path.join(sub_dir, 'read_only'))
+    sub_writable_path = (os.path.join(sub_dir, 'writable'))
+    sub_executable_path = (os.path.join(sub_dir, 'executable'))
+    # assert contents as expected
+    self.assertTrue(os.path.isdir(top_dir))
+    self.assertTrue(os.path.isdir(sub_dir))
+    self.assertTrue(os.path.isfile(read_only_path))
+    self.assertTrue(os.path.isfile(writable_path))
+    self.assertTrue(os.path.isfile(executable_path))
+    self.assertTrue(os.path.isfile(sub_read_only_path))
+    self.assertTrue(os.path.isfile(sub_writable_path))
+    self.assertTrue(os.path.isfile(sub_executable_path))
+
+    # assert permissions as expected
+    self.assertTrue(
+        stat.S_IRUSR & stat.S_IMODE(os.stat(read_only_path).st_mode))
+    self.assertTrue(
+        stat.S_IRUSR & stat.S_IMODE(os.stat(sub_read_only_path).st_mode))
+    self.assertTrue(
+        stat.S_IRUSR & stat.S_IMODE(os.stat(writable_path).st_mode))
+    self.assertTrue(
+        stat.S_IWUSR & stat.S_IMODE(os.stat(writable_path).st_mode))
+    self.assertTrue(
+        stat.S_IRUSR & stat.S_IMODE(os.stat(sub_writable_path).st_mode))
+    self.assertTrue(
+        stat.S_IWUSR & stat.S_IMODE(os.stat(sub_writable_path).st_mode))
+    if not sys.platform.startswith('win'):
+      self.assertEqual(
+          stat.S_IRWXU,
+          stat.S_IRWXU & stat.S_IMODE(os.stat(executable_path).st_mode))
+      self.assertEqual(
+          stat.S_IRWXU,
+          stat.S_IRWXU & stat.S_IMODE(os.stat(sub_executable_path).st_mode))
+
+  def CreateZipArchiveFromDir(self, dir_path):
+    try:
+      base_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
+      archive_path = shutil.make_archive(base_path, 'zip', dir_path)
+      self.assertTrue(os.path.exists(archive_path))
+      self.assertTrue(zipfile.is_zipfile(archive_path))
+    except:
+      if os.path.isfile(archive_path):
+        os.remove(archive_path)
+      raise
+    return archive_path
+
+  def testRemoveDirWithSubDir(self):
+    dependency_manager_util.RemoveDir(self.tmp_dir)
+
+    self.assertFalse(os.path.exists(self.tmp_dir))
+    self.assertFalse(os.path.exists(self.sub_dir))
+    self.assertFalse(os.path.exists(self.read_only_path))
+    self.assertFalse(os.path.exists(self.writable_path))
+    self.assertFalse(os.path.isfile(self.executable_path))
+    self.assertFalse(os.path.exists(self.sub_read_only_path))
+    self.assertFalse(os.path.exists(self.sub_writable_path))
+    self.assertFalse(os.path.isfile(self.sub_executable_path))
+
+  def testUnzipFile(self):
+    self.AssertExpectedDirFiles(self.tmp_dir)
+    unzip_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
+    dependency_manager_util.UnzipArchive(self.archive_path, unzip_path)
+    self.AssertExpectedDirFiles(unzip_path)
+    self.AssertExpectedDirFiles(self.tmp_dir)
+    dependency_manager_util.RemoveDir(unzip_path)
+
+  def testUnzipFileFailure(self):
+    unzip_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
+    self.assertFalse(os.path.exists(unzip_path))
+    with mock.patch(
+        'dependency_manager.dependency_manager_util.zipfile.ZipFile.extractall'  # pylint: disable=line-too-long
+        ) as zipfile_mock:
+      zipfile_mock.side_effect = IOError
+      self.assertRaises(
+          IOError, dependency_manager_util.UnzipArchive, self.archive_path,
+          unzip_path)
+    self.AssertExpectedDirFiles(self.tmp_dir)
+    self.assertFalse(os.path.exists(unzip_path))
+
+  def testVerifySafeArchivePasses(self):
+    with zipfile.ZipFile(self.archive_path) as archive:
+      dependency_manager_util.VerifySafeArchive(archive)
+
+  def testVerifySafeArchiveFailsOnRelativePathWithPardir(self):
+    tmp_file = tempfile.NamedTemporaryFile(delete=False)
+    tmp_file_name = tmp_file.name
+    tmp_file.write('Bad file!')
+    tmp_file.close()
+    with zipfile.ZipFile(self.archive_path, 'w') as archive:
+      archive.write(tmp_file_name, '../../foo')
+      self.assertRaises(
+          exceptions.ArchiveError, dependency_manager_util.VerifySafeArchive,
+          archive)
+
diff --git a/catapult/dependency_manager/dependency_manager/exceptions.py b/catapult/dependency_manager/dependency_manager/exceptions.py
new file mode 100644
index 0000000..96cb4e6
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/exceptions.py
@@ -0,0 +1,52 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from catapult_base import cloud_storage
+
+
+CloudStorageError = cloud_storage.CloudStorageError
+
+
+class UnsupportedConfigFormatError(ValueError):
+  def __init__(self, config_type, config_file):
+    if not config_type:
+      message = ('The json file at %s is unsupported by the dependency_manager '
+                 'due to no specified config type' % config_file)
+    else:
+      message = ('The json file at %s has config type %s, which is unsupported '
+                 'by the dependency manager.' % (config_file, config_type))
+    super(UnsupportedConfigFormatError, self).__init__(message)
+
+
+class EmptyConfigError(ValueError):
+  def __init__(self, file_path):
+    super(EmptyConfigError, self).__init__('Empty config at %s.' % file_path)
+
+
+class FileNotFoundError(Exception):
+  def __init__(self, file_path):
+    super(FileNotFoundError, self).__init__('No file found at %s' % file_path)
+
+
+class NoPathFoundError(Exception):
+  def __init__(self, dependency, platform):
+    super(NoPathFoundError, self).__init__(
+        'No file could be found locally, and no file to download from cloud '
+        'storage for %s on platform %s' % (dependency, platform))
+
+
+class ReadWriteError(Exception):
+  pass
+
+
+class CloudStorageUploadConflictError(CloudStorageError):
+  def __init__(self, bucket, path):
+    super(CloudStorageUploadConflictError, self).__init__(
+        'File location %s already exists in bucket %s' % (path, bucket))
+
+
+class ArchiveError(Exception):
+  def __init__(self, msg):
+    super(ArchiveError, self).__init__(msg)
+
diff --git a/catapult/dependency_manager/dependency_manager/local_path_info.py b/catapult/dependency_manager/dependency_manager/local_path_info.py
new file mode 100644
index 0000000..0103e8f
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/local_path_info.py
@@ -0,0 +1,40 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+
+class LocalPathInfo(object):
+
+  def __init__(self, path_priority_groups):
+    self._path_priority_groups = self._ParseLocalPaths(path_priority_groups)
+
+  def GetLocalPath(self):
+    for priority_group in self._path_priority_groups:
+      priority_group = filter(os.path.exists, priority_group)
+      if not priority_group:
+        continue
+      return max(priority_group, key=lambda path: os.stat(path).st_mtime)
+    return None
+
+  def IsPathInLocalPaths(self, path):
+    return any(
+        path in priority_group for priority_group in self._path_priority_groups)
+
+  def Update(self, local_path_info):
+    if not local_path_info:
+      return
+    for priority_group in local_path_info._path_priority_groups:
+      group_list = []
+      for path in priority_group:
+        if not self.IsPathInLocalPaths(path):
+          group_list.append(path)
+      if group_list:
+        self._path_priority_groups.append(group_list)
+
+  @staticmethod
+  def _ParseLocalPaths(local_paths):
+    if not local_paths:
+      return []
+    return [[e] if isinstance(e, basestring) else e for e in local_paths]
diff --git a/catapult/dependency_manager/dependency_manager/manager.py b/catapult/dependency_manager/dependency_manager/manager.py
new file mode 100644
index 0000000..d5fbf77
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/manager.py
@@ -0,0 +1,208 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+
+from dependency_manager import base_config
+from dependency_manager import exceptions
+
+
+DEFAULT_TYPE = 'default'
+
+
+class DependencyManager(object):
+  def __init__(self, configs, supported_config_types=None):
+    """Manages file dependencies found locally or in cloud_storage.
+
+    Args:
+        configs: A list of instances of BaseConfig or it's subclasses, passed
+            in decreasing order of precedence.
+        supported_config_types: A list of whitelisted config_types.
+            No restrictions if None is specified.
+
+    Raises:
+        ValueError: If |configs| is not a list of instances of BaseConfig or
+            its subclasses.
+        UnsupportedConfigFormatError: If supported_config_types is specified and
+            configs contains a config not in the supported config_types.
+
+    Example: DependencyManager([config1, config2, config3])
+        No requirements on the type of Config, and any dependencies that have
+        local files for the same platform will first look in those from
+        config1, then those from config2, and finally those from config3.
+    """
+    if configs is None or type(configs) != list:
+      raise ValueError(
+          'Must supply a list of config files to DependencyManager')
+    # self._lookup_dict is a dictionary with the following format:
+    # { dependency1: {platform1: dependency_info1,
+    #                 platform2: dependency_info2}
+    #   dependency2: {platform1: dependency_info3,
+    #                  ...}
+    #   ...}
+    #
+    # Where the dependencies and platforms are strings, and the
+    # dependency_info's are DependencyInfo instances.
+    self._lookup_dict = {}
+    self.supported_configs = supported_config_types or []
+    for config in configs:
+      self._UpdateDependencies(config)
+
+  def FetchPath(self, dependency, platform):
+    """Get a path to an executable for |dependency|, downloading as needed.
+
+    A path to a default executable may be returned if a platform specific
+    version is not specified in the config(s).
+
+    Args:
+        dependency: Name of the desired dependency, as given in the config(s)
+            used in this DependencyManager.
+        platform: Name of the platform the dependency will run on. Often of the
+            form 'os_architecture'. Must match those specified in the config(s)
+            used in this DependencyManager.
+    Returns:
+        A path to an executable of |dependency| that will run on |platform|,
+        downloading from cloud storage if needed.
+
+    Raises:
+        NoPathFoundError: If a local copy of the executable cannot be found and
+            a remote path could not be downloaded from cloud_storage.
+        CredentialsError: If cloud_storage credentials aren't configured.
+        PermissionError: If cloud_storage credentials are configured, but not
+            with an account that has permission to download the remote file.
+        NotFoundError: If the remote file does not exist where expected in
+            cloud_storage.
+        ServerError: If an internal server error is hit while downloading the
+            remote file.
+        CloudStorageError: If another error occured while downloading the remote
+            path.
+        FileNotFoundError: If an attempted download was otherwise unsuccessful.
+
+    """
+    dependency_info = self._GetDependencyInfo(dependency, platform)
+    if not dependency_info:
+      raise exceptions.NoPathFoundError(dependency, platform)
+    path = dependency_info.GetLocalPath()
+    if not path or not os.path.exists(path):
+      path = dependency_info.GetRemotePath()
+      if not path or not os.path.exists(path):
+        raise exceptions.NoPathFoundError(dependency, platform)
+    return path
+
+  def LocalPath(self, dependency, platform):
+    """Get a path to a locally stored executable for |dependency|.
+
+    A path to a default executable may be returned if a platform specific
+    version is not specified in the config(s).
+    Will not download the executable.
+
+    Args:
+        dependency: Name of the desired dependency, as given in the config(s)
+            used in this DependencyManager.
+        platform: Name of the platform the dependency will run on. Often of the
+            form 'os_architecture'. Must match those specified in the config(s)
+            used in this DependencyManager.
+    Returns:
+        A path to an executable for |dependency| that will run on |platform|.
+
+    Raises:
+        NoPathFoundError: If a local copy of the executable cannot be found.
+    """
+    dependency_info = self._GetDependencyInfo(dependency, platform)
+    if not dependency_info:
+      raise exceptions.NoPathFoundError(dependency, platform)
+    local_path = dependency_info.GetLocalPath()
+    if not local_path or not os.path.exists(local_path):
+      raise exceptions.NoPathFoundError(dependency, platform)
+    return local_path
+
+  def PrefetchPaths(self, platform, dependencies=None, cloud_storage_retries=3):
+    if not dependencies:
+      dependencies = self._lookup_dict.keys()
+
+    skipped_deps = []
+    found_deps = []
+    missing_deps = []
+    for dependency in dependencies:
+      dependency_info = self._GetDependencyInfo(dependency, platform)
+      if not dependency_info:
+        # The dependency is only configured for other platforms.
+        skipped_deps.append(dependency)
+        logging.warning(
+            'Dependency %s not configured for platform %s. Skipping prefetch.',
+            dependency, platform)
+        continue
+      local_path = dependency_info.GetLocalPath()
+      if local_path:
+        found_deps.append(dependency)
+        continue
+      fetched_path = None
+      for _ in range(0, cloud_storage_retries + 1):
+        try:
+          fetched_path = dependency_info.GetRemotePath()
+        except exceptions.CloudStorageError:
+          continue
+        break
+      if fetched_path:
+        found_deps.append(dependency)
+      else:
+        missing_deps.append(dependency)
+        logging.error(
+            'Dependency %s could not be found or fetched from cloud storage for'
+            ' platform %s.', dependency, platform)
+      if missing_deps:
+        raise exceptions.NoPathFoundError(', '.join(missing_deps), platform)
+      return (found_deps, skipped_deps)
+
+  def _UpdateDependencies(self, config):
+    """Add the dependency information stored in |config| to this instance.
+
+    Args:
+        config: An instances of BaseConfig or a subclasses.
+
+    Raises:
+        UnsupportedConfigFormatError: If supported_config_types was specified
+        and config is not in the supported config_types.
+    """
+    if not isinstance(config, base_config.BaseConfig):
+      raise ValueError('Must use a BaseConfig or subclass instance with the '
+                       'DependencyManager.')
+    if (self.supported_configs and
+        config.GetConfigType() not in self.supported_configs):
+      raise exceptions.UnsupportedConfigFormatError(config.GetConfigType(),
+                                                    config.config_path)
+    for dep_info in config.IterDependencyInfo():
+      dependency = dep_info.dependency
+      platform = dep_info.platform
+      if dependency not in self._lookup_dict:
+        self._lookup_dict[dependency] = {}
+      if platform not in self._lookup_dict[dependency]:
+        self._lookup_dict[dependency][platform] = dep_info
+      else:
+        self._lookup_dict[dependency][platform].Update(dep_info)
+
+
+  def _GetDependencyInfo(self, dependency, platform):
+    """Get information for |dependency| on |platform|, or a default if needed.
+
+    Args:
+        dependency: Name of the desired dependency, as given in the config(s)
+            used in this DependencyManager.
+        platform: Name of the platform the dependency will run on. Often of the
+            form 'os_architecture'. Must match those specified in the config(s)
+            used in this DependencyManager.
+
+    Returns: The dependency_info for |dependency| on |platform| if it exists.
+        Or the default version of |dependency| if it exists, or None if neither
+        exist.
+    """
+    if not self._lookup_dict or dependency not in self._lookup_dict:
+      return None
+    dependency_dict = self._lookup_dict[dependency]
+    device_type = platform
+    if not device_type in dependency_dict:
+      device_type = DEFAULT_TYPE
+    return dependency_dict.get(device_type)
+
diff --git a/catapult/dependency_manager/dependency_manager/uploader.py b/catapult/dependency_manager/dependency_manager/uploader.py
new file mode 100644
index 0000000..34b91f5
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/uploader.py
@@ -0,0 +1,108 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+
+from catapult_base import cloud_storage
+
+from dependency_manager import exceptions
+
+
+BACKUP_PATH_EXTENSION = 'old'
+
+
+class CloudStorageUploader(object):
+  def __init__(self, bucket, remote_path, local_path, cs_backup_path=None):
+    if not bucket or not remote_path or not local_path:
+      raise ValueError(
+          'Attempted to partially initialize upload data with bucket %s, '
+          'remote_path %s, and local_path %s', bucket, remote_path, local_path)
+    if not os.path.exists(local_path):
+      raise ValueError('Attempting to initilize UploadInfo with missing '
+                       'local path %s', local_path)
+
+    self._cs_bucket = bucket
+    self._cs_remote_path = remote_path
+    self._local_path = local_path
+    self._cs_backup_path = (cs_backup_path or
+                            '%s.%s' % (self._cs_remote_path,
+                                       BACKUP_PATH_EXTENSION))
+    self._updated = False
+    self._backed_up = False
+
+  def Upload(self, force=False):
+    """Upload all pending files and then write the updated config to disk.
+
+    Will attempt to copy files existing in the upload location to a backup
+    location in the same bucket in cloud storage if |force| is True.
+
+    Args:
+      force: True if files should be uploaded to cloud storage even if a
+          file already exists in the upload location.
+
+    Raises:
+      CloudStorageUploadConflictError: If |force| is False and the potential
+          upload location of a file already exists.
+      CloudStorageError: If copying an existing file to the backup location
+          or uploading the new file fails.
+    """
+    if cloud_storage.Exists(self._cs_bucket, self._cs_remote_path):
+      if not force:
+        #pylint: disable=nonstandard-exception
+        raise exceptions.CloudStorageUploadConflictError(self._cs_bucket,
+                                                         self._cs_remote_path)
+        #pylint: enable=nonstandard-exception
+      logging.debug('A file already exists at upload path %s in self.cs_bucket'
+                    ' %s', self._cs_remote_path, self._cs_bucket)
+      try:
+        cloud_storage.Copy(self._cs_bucket, self._cs_bucket,
+                           self._cs_remote_path, self._cs_backup_path)
+        self._backed_up = True
+      except cloud_storage.CloudStorageError:
+        logging.error('Failed to copy existing file %s in cloud storage bucket '
+                      '%s to backup location %s', self._cs_remote_path,
+                      self._cs_bucket, self._cs_backup_path)
+        raise
+
+    try:
+      cloud_storage.Insert(
+          self._cs_bucket, self._cs_remote_path, self._local_path)
+    except cloud_storage.CloudStorageError:
+      logging.error('Failed to upload %s to %s in cloud_storage bucket %s',
+                    self._local_path, self._cs_remote_path, self._cs_bucket)
+      raise
+    self._updated = True
+
+  def Rollback(self):
+    """Attempt to undo the previous call to Upload.
+
+    Does nothing if no previous call to Upload was made, or if nothing was
+    successfully changed.
+
+    Returns:
+      True iff changes were successfully rolled back.
+    Raises:
+      CloudStorageError: If copying the backed up file to its original
+          location or removing the uploaded file fails.
+    """
+    cloud_storage_changed = False
+    if self._backed_up:
+      cloud_storage.Copy(self._cs_bucket, self._cs_bucket, self._cs_backup_path,
+                         self._cs_remote_path)
+      cloud_storage_changed = True
+      self._cs_backup_path = None
+    elif self._updated:
+      cloud_storage.Delete(self._cs_bucket, self._cs_remote_path)
+      cloud_storage_changed = True
+    self._updated = False
+    return cloud_storage_changed
+
+  def __eq__(self, other, msg=None):
+    if type(self) != type(other):
+      return False
+    return (self._local_path == other._local_path and
+            self._cs_remote_path == other._cs_remote_path and
+            self._cs_bucket == other._cs_bucket)
+
diff --git a/catapult/dependency_manager/dependency_manager/uploader_unittest.py b/catapult/dependency_manager/dependency_manager/uploader_unittest.py
new file mode 100644
index 0000000..5c8e2a0
--- /dev/null
+++ b/catapult/dependency_manager/dependency_manager/uploader_unittest.py
@@ -0,0 +1,91 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from pyfakefs import fake_filesystem_unittest
+
+from dependency_manager import uploader
+
+
+class CloudStorageUploaderTest(fake_filesystem_unittest.TestCase):
+  def setUp(self):
+    self.setUpPyfakefs()
+    self.bucket = 'cloud_storage_bucket'
+    self.local_path = os.path.abspath(os.path.join('path', 'to', 'dependency'))
+    self.fs.CreateFile(self.local_path)
+    self.remote_path = 'config_folder/remote_path'
+
+  def testCloudStorageUploaderMissingData(self):
+    self.assertRaises(ValueError, uploader.CloudStorageUploader,
+                      None, self.remote_path, self.local_path)
+    self.assertRaises(ValueError, uploader.CloudStorageUploader,
+                      self.bucket, None, self.local_path)
+    self.assertRaises(ValueError, uploader.CloudStorageUploader,
+                      self.bucket, self.remote_path, None)
+
+  def testCloudStorageUploaderLocalFileMissing(self):
+    self.fs.RemoveObject(self.local_path)
+    self.assertRaises(ValueError, uploader.CloudStorageUploader,
+                      self.bucket, self.remote_path, self.local_path)
+
+  def testCloudStorageUploaderCreation(self):
+    upload_data = uploader.CloudStorageUploader(
+        self.bucket, self.remote_path, self.local_path)
+    expected_bucket = self.bucket
+    expected_remote_path = self.remote_path
+    expected_cs_backup_path = '%s.old' % expected_remote_path
+    expected_local_path = self.local_path
+    self.assertEqual(expected_bucket, upload_data._cs_bucket)
+    self.assertEqual(expected_remote_path, upload_data._cs_remote_path)
+    self.assertEqual(expected_local_path, upload_data._local_path)
+    self.assertEqual(expected_cs_backup_path, upload_data._cs_backup_path)
+
+  def testCloudStorageUploaderEquality(self):
+    upload_data = uploader.CloudStorageUploader(
+        self.bucket, self.remote_path, self.local_path)
+    upload_data_exact = uploader.CloudStorageUploader(
+        self.bucket, self.remote_path, self.local_path)
+    upload_data_equal = uploader.CloudStorageUploader(
+        'cloud_storage_bucket',
+        'config_folder/remote_path',
+        os.path.abspath(os.path.join('path', 'to', 'dependency')))
+    self.assertEqual(upload_data, upload_data)
+    self.assertEqual(upload_data, upload_data_exact)
+    self.assertEqual(upload_data_exact, upload_data)
+    self.assertEqual(upload_data, upload_data_equal)
+    self.assertEqual(upload_data_equal, upload_data)
+
+
+  def testCloudStorageUploaderInequality(self):
+    new_local_path = os.path.abspath(os.path.join('new', 'local', 'path'))
+    self.fs.CreateFile(new_local_path)
+    new_bucket = 'new_bucket'
+    new_remote_path = 'new_remote/path'
+
+    upload_data = uploader.CloudStorageUploader(
+        self.bucket, self.remote_path, self.local_path)
+    upload_data_all_different = uploader.CloudStorageUploader(
+        new_bucket, new_remote_path, new_local_path)
+    upload_data_different_bucket = uploader.CloudStorageUploader(
+        new_bucket, self.remote_path, self.local_path)
+    upload_data_different_remote_path = uploader.CloudStorageUploader(
+        self.bucket, new_remote_path, self.local_path)
+    upload_data_different_local_path = uploader.CloudStorageUploader(
+        self.bucket, self.remote_path, new_local_path)
+
+    self.assertNotEqual(upload_data, 'a string!')
+    self.assertNotEqual(upload_data, 0)
+    self.assertNotEqual(upload_data, 2354)
+    self.assertNotEqual(upload_data, None)
+    self.assertNotEqual(upload_data, upload_data_all_different)
+    self.assertNotEqual(upload_data_all_different, upload_data)
+    self.assertNotEqual(upload_data, upload_data_different_bucket)
+    self.assertNotEqual(upload_data_different_bucket, upload_data)
+    self.assertNotEqual(upload_data, upload_data_different_remote_path)
+    self.assertNotEqual(upload_data_different_remote_path, upload_data)
+    self.assertNotEqual(upload_data, upload_data_different_local_path)
+    self.assertNotEqual(upload_data_different_local_path, upload_data)
+
+  #TODO: write unittests for upload and rollback
diff --git a/catapult/dependency_manager/pylintrc b/catapult/dependency_manager/pylintrc
new file mode 100644
index 0000000..4541fb8
--- /dev/null
+++ b/catapult/dependency_manager/pylintrc
@@ -0,0 +1,68 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  fixme,
+  locally-disabled,
+  locally-enabled,
+  missing-docstring,
+  no-member,
+  no-self-use,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# Maximum number of lines in a module.
+max-module-lines=2000
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/devil/PRESUBMIT.py b/catapult/devil/PRESUBMIT.py
new file mode 100644
index 0000000..edec3e1
--- /dev/null
+++ b/catapult/devil/PRESUBMIT.py
@@ -0,0 +1,85 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Presubmit script for devil.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
+details on the presubmit API built into depot_tools.
+"""
+
+
+def _RunPylint(input_api, output_api):
+  return input_api.RunTests(input_api.canned_checks.RunPylint(
+      input_api, output_api, pylintrc='pylintrc'))
+
+
+def _RunUnitTests(input_api, output_api):
+  def J(*dirs):
+    """Returns a path relative to presubmit directory."""
+    return input_api.os_path.join(
+        input_api.PresubmitLocalPath(), 'devil', *dirs)
+
+  test_env = dict(input_api.environ)
+  test_env.update({
+    'PYTHONDONTWRITEBYTECODE': '1',
+    'PYTHONPATH': ':'.join([J(), J('..')]),
+  })
+
+  return input_api.canned_checks.RunUnitTests(
+      input_api,
+      output_api,
+      unit_tests=[
+          J('devil_env_test.py'),
+          J('android', 'battery_utils_test.py'),
+          J('android', 'device_utils_test.py'),
+          J('android', 'fastboot_utils_test.py'),
+          J('android', 'md5sum_test.py'),
+          J('android', 'logcat_monitor_test.py'),
+          J('android', 'tools', 'script_common_test.py'),
+          J('utils', 'cmd_helper_test.py'),
+          J('utils', 'timeout_retry_unittest.py'),
+      ],
+      env=test_env)
+
+
+def _EnsureNoPylibUse(input_api, output_api):
+  def other_python_files(f):
+    this_presubmit_file = input_api.os_path.join(
+        input_api.PresubmitLocalPath(), 'PRESUBMIT.py')
+    return (f.LocalPath().endswith('.py')
+            and not f.AbsoluteLocalPath() == this_presubmit_file)
+
+  changed_files = input_api.AffectedSourceFiles(other_python_files)
+  import_error_re = input_api.re.compile(
+      r'(from pylib.* import)|(import pylib)')
+
+  errors = []
+  for f in changed_files:
+    errors.extend(
+        '%s:%d' % (f.LocalPath(), line_number)
+        for line_number, line_text in f.ChangedContents()
+        if import_error_re.search(line_text))
+
+  if errors:
+    return [output_api.PresubmitError(
+        'pylib modules should not be imported from devil modules.',
+        items=errors)]
+  return []
+
+
+def CommonChecks(input_api, output_api):
+  output = []
+  output += _RunPylint(input_api, output_api)
+  output += _RunUnitTests(input_api, output_api)
+  output += _EnsureNoPylibUse(input_api, output_api)
+  return output
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return CommonChecks(input_api, output_api)
+
diff --git a/catapult/devil/bin/run_py_tests b/catapult/devil/bin/run_py_tests
new file mode 100755
index 0000000..44ec61e
--- /dev/null
+++ b/catapult/devil/bin/run_py_tests
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+_CATAPULT_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '..', '..'))
+_DEVIL_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '..'))
+
+sys.path.append(_CATAPULT_PATH)
+from catapult_build import run_with_typ
+
+
+def main():
+  return run_with_typ.Run(top_level_dir=_DEVIL_PATH)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/devil/devil/OWNERS b/catapult/devil/devil/OWNERS
new file mode 100644
index 0000000..fd584fc
--- /dev/null
+++ b/catapult/devil/devil/OWNERS
@@ -0,0 +1,4 @@
+jbudorick@chromium.org
+mikecase@chromium.org
+perezju@chromium.org
+rnephew@chromium.org
diff --git a/catapult/devil/devil/README.md b/catapult/devil/devil/README.md
new file mode 100644
index 0000000..b3eb5d0
--- /dev/null
+++ b/catapult/devil/devil/README.md
@@ -0,0 +1,17 @@
+<!-- Copyright 2015 The Chromium Authors. All rights reserved.
+     Use of this source code is governed by a BSD-style license that can be
+     found in the LICENSE file.
+-->
+devil
+=====
+
+devil is a library used by the Chromium developers to interact with Android
+devices. It currently supports SDK level 16 and above.
+
+😈
+
+Contributing
+============
+
+Please see the [contributor's guide](https://github.com/catapult-project/catapult/blob/master/CONTRIBUTING.md).
+
diff --git a/catapult/devil/devil/__init__.py b/catapult/devil/devil/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/android/__init__.py b/catapult/devil/devil/android/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/android/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/android/apk_helper.py b/catapult/devil/devil/android/apk_helper.py
new file mode 100644
index 0000000..61eeda0
--- /dev/null
+++ b/catapult/devil/devil/android/apk_helper.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing utilities for apk packages."""
+
+import re
+
+from devil.android.sdk import aapt
+
+
+_MANIFEST_ATTRIBUTE_RE = re.compile(
+    r'\s*A: ([^\(\)= ]*)(?:\([^\(\)= ]*\))?='
+    r'(?:"(.*)" \(Raw: .*\)|\(type.*?\)(.*))$')
+_MANIFEST_ELEMENT_RE = re.compile(r'\s*(?:E|N): (\S*) .*$')
+
+
+def GetPackageName(apk_path):
+  """Returns the package name of the apk."""
+  return ApkHelper(apk_path).GetPackageName()
+
+
+# TODO(jbudorick): Deprecate and remove this function once callers have been
+# converted to ApkHelper.GetInstrumentationName
+def GetInstrumentationName(apk_path):
+  """Returns the name of the Instrumentation in the apk."""
+  return ApkHelper(apk_path).GetInstrumentationName()
+
+
+def ToHelper(path_or_helper):
+  """Creates an ApkHelper unless one is already given."""
+  if isinstance(path_or_helper, basestring):
+    return ApkHelper(path_or_helper)
+  return path_or_helper
+
+
+def _ParseManifestFromApk(apk_path):
+  aapt_output = aapt.Dump('xmltree', apk_path, 'AndroidManifest.xml')
+
+  parsed_manifest = {}
+  node_stack = [parsed_manifest]
+  indent = '  '
+
+  for line in aapt_output[1:]:
+    if len(line) == 0:
+      continue
+
+    indent_depth = 0
+    while line[(len(indent) * indent_depth):].startswith(indent):
+      indent_depth += 1
+
+    node_stack = node_stack[:indent_depth]
+    node = node_stack[-1]
+
+    m = _MANIFEST_ELEMENT_RE.match(line[len(indent) * indent_depth:])
+    if m:
+      if not m.group(1) in node:
+        node[m.group(1)] = {}
+      node_stack += [node[m.group(1)]]
+      continue
+
+    m = _MANIFEST_ATTRIBUTE_RE.match(line[len(indent) * indent_depth:])
+    if m:
+      if not m.group(1) in node:
+        node[m.group(1)] = []
+      node[m.group(1)].append(m.group(2) or m.group(3))
+      continue
+
+  return parsed_manifest
+
+
+class ApkHelper(object):
+
+  def __init__(self, path):
+    self._apk_path = path
+    self._manifest = None
+
+  @property
+  def path(self):
+    return self._apk_path
+
+  def GetActivityName(self):
+    """Returns the name of the Activity in the apk."""
+    manifest_info = self._GetManifest()
+    try:
+      activity = (
+          manifest_info['manifest']['application']['activity']
+              ['android:name'][0])
+    except KeyError:
+      return None
+    if '.' not in activity:
+      activity = '%s.%s' % (self.GetPackageName(), activity)
+    elif activity.startswith('.'):
+      activity = '%s%s' % (self.GetPackageName(), activity)
+    return activity
+
+  def GetInstrumentationName(
+      self, default='android.test.InstrumentationTestRunner'):
+    """Returns the name of the Instrumentation in the apk."""
+    manifest_info = self._GetManifest()
+    try:
+      return manifest_info['manifest']['instrumentation']['android:name'][0]
+    except KeyError:
+      return default
+
+  def GetPackageName(self):
+    """Returns the package name of the apk."""
+    manifest_info = self._GetManifest()
+    try:
+      return manifest_info['manifest']['package'][0]
+    except KeyError:
+      raise Exception('Failed to determine package name of %s' % self._apk_path)
+
+  def GetPermissions(self):
+    manifest_info = self._GetManifest()
+    try:
+      return manifest_info['manifest']['uses-permission']['android:name']
+    except KeyError:
+      return []
+
+  def GetSplitName(self):
+    """Returns the name of the split of the apk."""
+    manifest_info = self._GetManifest()
+    try:
+      return manifest_info['manifest']['split'][0]
+    except KeyError:
+      return None
+
+  def HasIsolatedProcesses(self):
+    """Returns whether any services exist that use isolatedProcess=true."""
+    manifest_info = self._GetManifest()
+    try:
+      services = manifest_info['manifest']['application']['service']
+      return any(int(v, 0) for v in services['android:isolatedProcess'])
+    except KeyError:
+      return False
+
+  def _GetManifest(self):
+    if not self._manifest:
+      self._manifest = _ParseManifestFromApk(self._apk_path)
+    return self._manifest
+
diff --git a/catapult/devil/devil/android/app_ui.py b/catapult/devil/devil/android/app_ui.py
new file mode 100644
index 0000000..d5025f4
--- /dev/null
+++ b/catapult/devil/devil/android/app_ui.py
@@ -0,0 +1,213 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides functionality to interact with UI elements of an Android app."""
+
+import re
+from xml.etree import ElementTree as element_tree
+
+from devil.android import decorators
+from devil.android import device_temp_file
+from devil.utils import geometry
+from devil.utils import timeout_retry
+
+_DEFAULT_SHORT_TIMEOUT = 10
+_DEFAULT_SHORT_RETRIES = 3
+_DEFAULT_LONG_TIMEOUT = 30
+_DEFAULT_LONG_RETRIES = 0
+
+# Parse rectangle bounds given as: '[left,top][right,bottom]'.
+_RE_BOUNDS = re.compile(
+    r'\[(?P<left>\d+),(?P<top>\d+)\]\[(?P<right>\d+),(?P<bottom>\d+)\]')
+
+
+class _UiNode(object):
+
+  def __init__(self, device, xml_node, package=None):
+    """Object to interact with a UI node from an xml snapshot.
+
+    Note: there is usually no need to call this constructor directly. Instead,
+    use an AppUi object (below) to grab an xml screenshot from a device and
+    find nodes in it.
+
+    Args:
+      device: A device_utils.DeviceUtils instance.
+      xml_node: An ElementTree instance of the node to interact with.
+      package: An optional package name for the app owning this node.
+    """
+    self._device = device
+    self._xml_node = xml_node
+    self._package = package
+
+  def _GetAttribute(self, key):
+    """Get the value of an attribute of this node."""
+    return self._xml_node.attrib.get(key)
+
+  @property
+  def bounds(self):
+    """Get a rectangle with the bounds of this UI node.
+
+    Returns:
+      A geometry.Rectangle instance.
+    """
+    d = _RE_BOUNDS.match(self._GetAttribute('bounds')).groupdict()
+    return geometry.Rectangle.FromDict({k: int(v) for k, v in d.iteritems()})
+
+  def Tap(self, point=None, dp_units=False):
+    """Send a tap event to the UI node.
+
+    Args:
+      point: An optional geometry.Point instance indicating the location to
+        tap, relative to the bounds of the UI node, i.e. (0, 0) taps the
+        top-left corner. If ommited, the center of the node is tapped.
+      dp_units: If True, indicates that the coordinates of the point are given
+        in device-independent pixels; otherwise they are assumed to be "real"
+        pixels. This option has no effect when the point is ommited.
+    """
+    if point is None:
+      point = self.bounds.center
+    else:
+      if dp_units:
+        point = (float(self._device.pixel_density) / 160) * point
+      point += self.bounds.top_left
+
+    x, y = (str(int(v)) for v in point)
+    self._device.RunShellCommand(['input', 'tap', x, y], check_return=True)
+
+  def __getitem__(self, key):
+    """Retrieve a child of this node by its index.
+
+    Args:
+      key: An integer with the index of the child to retrieve.
+    Returns:
+      A UI node instance of the selected child.
+    Raises:
+      IndexError if the index is out of range.
+    """
+    return type(self)(self._device, self._xml_node[key], package=self._package)
+
+  def _Find(self, **kwargs):
+    """Find the first descendant node that matches a given criteria.
+
+    Note: clients would usually call AppUi.GetUiNode or AppUi.WaitForUiNode
+    instead.
+
+    For example:
+
+      app = app_ui.AppUi(device, package='org.my.app')
+      app.GetUiNode(resource_id='some_element', text='hello')
+
+    would retrieve the first matching node with both of the xml attributes:
+
+      resource-id='org.my.app:id/some_element'
+      text='hello'
+
+    As the example shows, if given and needed, the value of the resource_id key
+    is auto-completed with the package name specified in the AppUi constructor.
+
+    Args:
+      Arguments are specified as key-value pairs, where keys correnspond to
+      attribute names in xml nodes (replacing any '-' with '_' to make them
+      valid identifiers). At least one argument must be supplied, and arguments
+      with a None value are ignored.
+    Returns:
+      A UI node instance of the first descendant node that matches ALL the
+      given key-value criteria; or None if no such node is found.
+    Raises:
+      TypeError if no search arguments are provided.
+    """
+    matches_criteria = self._NodeMatcher(kwargs)
+    for node in self._xml_node.iter():
+      if matches_criteria(node):
+        return type(self)(self._device, node, package=self._package)
+    return None
+
+  def _NodeMatcher(self, kwargs):
+    # Auto-complete resource-id's using the package name if available.
+    resource_id = kwargs.get('resource_id')
+    if (resource_id is not None
+        and self._package is not None
+        and ':id/' not in resource_id):
+      kwargs['resource_id'] = '%s:id/%s' % (self._package, resource_id)
+
+    criteria = [(k.replace('_', '-'), v)
+                for k, v in kwargs.iteritems()
+                if v is not None]
+    if not criteria:
+      raise TypeError('At least one search criteria should be specified')
+    return lambda node: all(node.get(k) == v for k, v in criteria)
+
+
+class AppUi(object):
+  # timeout and retry arguments appear unused, but are handled by decorator.
+  # pylint: disable=unused-argument
+
+  def __init__(self, device, package=None):
+    """Object to interact with the UI of an Android app.
+
+    Args:
+      device: A device_utils.DeviceUtils instance.
+      package: An optional package name for the app.
+    """
+    self._device = device
+    self._package = package
+
+  @property
+  def package(self):
+    return self._package
+
+  @decorators.WithTimeoutAndRetriesDefaults(_DEFAULT_SHORT_TIMEOUT,
+                                            _DEFAULT_SHORT_RETRIES)
+  def _GetRootUiNode(self, timeout=None, retries=None):
+    """Get a node pointing to the root of the UI nodes on screen.
+
+    Note: This is currently implemented via adb calls to uiatomator and it
+    is *slow*, ~2 secs per call. Do not rely on low-level implementation
+    details that may change in the future.
+
+    TODO(crbug.com/567217): Swap to a more efficient implementation.
+
+    Args:
+      timeout: A number of seconds to wait for the uiautomator dump.
+      retries: Number of times to retry if the adb command fails.
+    Returns:
+      A UI node instance pointing to the root of the xml screenshot.
+    """
+    with device_temp_file.DeviceTempFile(self._device.adb) as dtemp:
+      self._device.RunShellCommand(['uiautomator', 'dump', dtemp.name],
+                                  check_return=True)
+      xml_node = element_tree.fromstring(
+          self._device.ReadFile(dtemp.name, force_pull=True))
+    return _UiNode(self._device, xml_node, package=self._package)
+
+  def GetUiNode(self, **kwargs):
+    """Get the first node found matching a specified criteria.
+
+    Args:
+      See _UiNode._Find.
+    Returns:
+      A UI node instance of the node if found, otherwise None.
+    """
+    # pylint: disable=protected-access
+    return self._GetRootUiNode()._Find(**kwargs)
+
+  @decorators.WithTimeoutAndRetriesDefaults(_DEFAULT_LONG_TIMEOUT,
+                                            _DEFAULT_LONG_RETRIES)
+  def WaitForUiNode(self, timeout=None, retries=None, **kwargs):
+    """Wait for a node matching a given criteria to appear on the screen.
+
+    Args:
+      timeout: A number of seconds to wait for the matching node to appear.
+      retries: Number of times to retry in case of adb command errors.
+      For other args, to specify the search criteria, see _UiNode._Find.
+    Returns:
+      The UI node instance found.
+    Raises:
+      device_errors.CommandTimeoutError if the node is not found before the
+      timeout.
+    """
+    def node_found():
+      return self.GetUiNode(**kwargs)
+
+    return timeout_retry.WaitFor(node_found)
diff --git a/catapult/devil/devil/android/app_ui_test.py b/catapult/devil/devil/android/app_ui_test.py
new file mode 100644
index 0000000..3472985
--- /dev/null
+++ b/catapult/devil/devil/android/app_ui_test.py
@@ -0,0 +1,191 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the app_ui module."""
+
+import unittest
+from xml.etree import ElementTree as element_tree
+
+from devil import devil_env
+from devil.android import app_ui
+from devil.android import device_errors
+from devil.utils import geometry
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+MOCK_XML_LOADING = '''
+<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>
+<hierarchy rotation="0">
+  <node bounds="[0,50][1536,178]" content-desc="Loading"
+      resource-id="com.example.app:id/spinner"/>
+</hierarchy>
+'''.strip()
+
+
+MOCK_XML_LOADED = '''
+<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>
+<hierarchy rotation="0">
+  <node bounds="[0,50][1536,178]" content-desc=""
+      resource-id="com.example.app:id/toolbar">
+    <node bounds="[0,58][112,170]" content-desc="Open navigation drawer"/>
+    <node bounds="[121,50][1536,178]"
+        resource-id="com.example.app:id/actionbar_custom_view">
+      <node bounds="[121,50][1424,178]"
+          resource-id="com.example.app:id/actionbar_title" text="Primary"/>
+      <node bounds="[1424,50][1536,178]" content-desc="Search"
+          resource-id="com.example.app:id/actionbar_search_button"/>
+    </node>
+  </node>
+  <node bounds="[0,178][576,1952]" resource-id="com.example.app:id/drawer">
+    <node bounds="[0,178][144,1952]"
+        resource-id="com.example.app:id/mini_drawer">
+      <node bounds="[40,254][104,318]" resource-id="com.example.app:id/avatar"/>
+      <node bounds="[16,354][128,466]" content-desc="Primary"
+          resource-id="com.example.app:id/image_view"/>
+      <node bounds="[16,466][128,578]" content-desc="Social"
+          resource-id="com.example.app:id/image_view"/>
+      <node bounds="[16,578][128,690]" content-desc="Promotions"
+          resource-id="com.example.app:id/image_view"/>
+    </node>
+  </node>
+</hierarchy>
+'''.strip()
+
+
+class UiAppTest(unittest.TestCase):
+
+  def setUp(self):
+    self.device = mock.Mock()
+    self.device.pixel_density = 320  # Each dp pixel is 2 real pixels.
+    self.app = app_ui.AppUi(self.device, package='com.example.app')
+    self._setMockXmlScreenshots([MOCK_XML_LOADED])
+
+  def _setMockXmlScreenshots(self, xml_docs):
+    """Mock self.app._GetRootUiNode to load nodes from some test xml_docs.
+
+    Each time the method is called it will return a UI node for each string
+    given in |xml_docs|, or rise a time out error when the list is exhausted.
+    """
+    # pylint: disable=protected-access
+    def get_mock_root_ui_node(value):
+      if isinstance(value, Exception):
+        raise value
+      return app_ui._UiNode(
+          self.device, element_tree.fromstring(value), self.app.package)
+
+    xml_docs.append(device_errors.CommandTimeoutError('Timed out!'))
+
+    self.app._GetRootUiNode = mock.Mock(
+        side_effect=(get_mock_root_ui_node(doc) for doc in xml_docs))
+
+  def assertNodeHasAttribs(self, node, attr):
+    # pylint: disable=protected-access
+    for key, value in attr.iteritems():
+      self.assertEquals(node._GetAttribute(key), value)
+
+  def assertTappedOnceAt(self, x, y):
+    self.device.RunShellCommand.assert_called_once_with(
+        ['input', 'tap', str(x), str(y)], check_return=True)
+
+  def testFind_byText(self):
+    node = self.app.GetUiNode(text='Primary')
+    self.assertNodeHasAttribs(node, {
+        'text': 'Primary',
+        'content-desc': None,
+        'resource-id': 'com.example.app:id/actionbar_title',
+    })
+    self.assertEquals(node.bounds, geometry.Rectangle([121, 50], [1424, 178]))
+
+  def testFind_byContentDesc(self):
+    node = self.app.GetUiNode(content_desc='Social')
+    self.assertNodeHasAttribs(node, {
+        'text': None,
+        'content-desc': 'Social',
+        'resource-id': 'com.example.app:id/image_view',
+    })
+    self.assertEquals(node.bounds, geometry.Rectangle([16, 466], [128, 578]))
+
+  def testFind_byResourceId_autocompleted(self):
+    node = self.app.GetUiNode(resource_id='image_view')
+    self.assertNodeHasAttribs(node, {
+        'content-desc': 'Primary',
+        'resource-id': 'com.example.app:id/image_view',
+    })
+
+  def testFind_byResourceId_absolute(self):
+    node = self.app.GetUiNode(resource_id='com.example.app:id/image_view')
+    self.assertNodeHasAttribs(node, {
+        'content-desc': 'Primary',
+        'resource-id': 'com.example.app:id/image_view',
+    })
+
+  def testFind_byMultiple(self):
+    node = self.app.GetUiNode(resource_id='image_view',
+                              content_desc='Promotions')
+    self.assertNodeHasAttribs(node, {
+        'content-desc': 'Promotions',
+        'resource-id': 'com.example.app:id/image_view',
+    })
+    self.assertEquals(node.bounds, geometry.Rectangle([16, 578], [128, 690]))
+
+  def testFind_notFound(self):
+    node = self.app.GetUiNode(resource_id='does_not_exist')
+    self.assertIsNone(node)
+
+  def testFind_noArgsGiven(self):
+    # Same exception given by Python for a function call with not enough args.
+    with self.assertRaises(TypeError):
+      self.app.GetUiNode()
+
+  def testGetChildren(self):
+    node = self.app.GetUiNode(resource_id='mini_drawer')
+    self.assertNodeHasAttribs(
+        node[0], {'resource-id': 'com.example.app:id/avatar'})
+    self.assertNodeHasAttribs(node[1], {'content-desc': 'Primary'})
+    self.assertNodeHasAttribs(node[2], {'content-desc': 'Social'})
+    self.assertNodeHasAttribs(node[3], {'content-desc': 'Promotions'})
+    with self.assertRaises(IndexError):
+      # pylint: disable=pointless-statement
+      node[4]
+
+  def testTap_center(self):
+    node = self.app.GetUiNode(content_desc='Open navigation drawer')
+    node.Tap()
+    self.assertTappedOnceAt(56, 114)
+
+  def testTap_topleft(self):
+    node = self.app.GetUiNode(content_desc='Open navigation drawer')
+    node.Tap(geometry.Point(0, 0))
+    self.assertTappedOnceAt(0, 58)
+
+  def testTap_withOffset(self):
+    node = self.app.GetUiNode(content_desc='Open navigation drawer')
+    node.Tap(geometry.Point(10, 20))
+    self.assertTappedOnceAt(10, 78)
+
+  def testTap_withOffsetInDp(self):
+    node = self.app.GetUiNode(content_desc='Open navigation drawer')
+    node.Tap(geometry.Point(10, 20), dp_units=True)
+    self.assertTappedOnceAt(20, 98)
+
+  def testTap_dpUnitsIgnored(self):
+    node = self.app.GetUiNode(content_desc='Open navigation drawer')
+    node.Tap(dp_units=True)
+    self.assertTappedOnceAt(56, 114)  # Still taps at center.
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitForUiNode_found(self):
+    self._setMockXmlScreenshots(
+        [MOCK_XML_LOADING, MOCK_XML_LOADING, MOCK_XML_LOADED])
+    node = self.app.WaitForUiNode(resource_id='actionbar_title')
+    self.assertNodeHasAttribs(node, {'text': 'Primary'})
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitForUiNode_notFound(self):
+    self._setMockXmlScreenshots(
+        [MOCK_XML_LOADING, MOCK_XML_LOADING, MOCK_XML_LOADING])
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      self.app.WaitForUiNode(resource_id='actionbar_title')
diff --git a/catapult/devil/devil/android/battery_utils.py b/catapult/devil/devil/android/battery_utils.py
new file mode 100644
index 0000000..4c8f543
--- /dev/null
+++ b/catapult/devil/devil/android/battery_utils.py
@@ -0,0 +1,650 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides a variety of device interactions with power.
+"""
+# pylint: disable=unused-argument
+
+import collections
+import contextlib
+import csv
+import logging
+
+from devil.android import decorators
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.sdk import version_codes
+from devil.utils import timeout_retry
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+
+
+_DEVICE_PROFILES = [
+  {
+    'name': 'Nexus 4',
+    'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
+    'enable_command': (
+        'echo 0 > /sys/module/pm8921_charger/parameters/disabled && '
+        'dumpsys battery reset'),
+    'disable_command': (
+        'echo 1 > /sys/module/pm8921_charger/parameters/disabled && '
+        'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
+    'charge_counter': None,
+    'voltage': None,
+    'current': None,
+  },
+  {
+    'name': 'Nexus 5',
+    # Nexus 5
+    # Setting the HIZ bit of the bq24192 causes the charger to actually ignore
+    # energy coming from USB. Setting the power_supply offline just updates the
+    # Android system to reflect that.
+    'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
+    'enable_command': (
+        'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
+        'chmod 644 /sys/class/power_supply/usb/online && '
+        'echo 1 > /sys/class/power_supply/usb/online && '
+        'dumpsys battery reset'),
+    'disable_command': (
+        'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
+        'chmod 644 /sys/class/power_supply/usb/online && '
+        'echo 0 > /sys/class/power_supply/usb/online && '
+        'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
+    'charge_counter': None,
+    'voltage': None,
+    'current': None,
+  },
+  {
+    'name': 'Nexus 6',
+    'witness_file': None,
+    'enable_command': (
+        'echo 1 > /sys/class/power_supply/battery/charging_enabled && '
+        'dumpsys battery reset'),
+    'disable_command': (
+        'echo 0 > /sys/class/power_supply/battery/charging_enabled && '
+        'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
+    'charge_counter': (
+        '/sys/class/power_supply/max170xx_battery/charge_counter_ext'),
+    'voltage': '/sys/class/power_supply/max170xx_battery/voltage_now',
+    'current': '/sys/class/power_supply/max170xx_battery/current_now',
+  },
+  {
+    'name': 'Nexus 9',
+    'witness_file': None,
+    'enable_command': (
+        'echo Disconnected > '
+        '/sys/bus/i2c/drivers/bq2419x/0-006b/input_cable_state && '
+        'dumpsys battery reset'),
+    'disable_command': (
+        'echo Connected > '
+        '/sys/bus/i2c/drivers/bq2419x/0-006b/input_cable_state && '
+        'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
+    'charge_counter': '/sys/class/power_supply/battery/charge_counter_ext',
+    'voltage': '/sys/class/power_supply/battery/voltage_now',
+    'current': '/sys/class/power_supply/battery/current_now',
+  },
+  {
+    'name': 'Nexus 10',
+    'witness_file': None,
+    'enable_command': None,
+    'disable_command': None,
+    'charge_counter': None,
+    'voltage': '/sys/class/power_supply/ds2784-fuelgauge/voltage_now',
+    'current': '/sys/class/power_supply/ds2784-fuelgauge/current_now',
+
+  },
+]
+
+# The list of useful dumpsys columns.
+# Index of the column containing the format version.
+_DUMP_VERSION_INDEX = 0
+# Index of the column containing the type of the row.
+_ROW_TYPE_INDEX = 3
+# Index of the column containing the uid.
+_PACKAGE_UID_INDEX = 4
+# Index of the column containing the application package.
+_PACKAGE_NAME_INDEX = 5
+# The column containing the uid of the power data.
+_PWI_UID_INDEX = 1
+# The column containing the type of consumption. Only consumption since last
+# charge are of interest here.
+_PWI_AGGREGATION_INDEX = 2
+_PWS_AGGREGATION_INDEX = _PWI_AGGREGATION_INDEX
+# The column containing the amount of power used, in mah.
+_PWI_POWER_CONSUMPTION_INDEX = 5
+_PWS_POWER_CONSUMPTION_INDEX = _PWI_POWER_CONSUMPTION_INDEX
+
+_MAX_CHARGE_ERROR = 20
+
+
+class BatteryUtils(object):
+
+  def __init__(self, device, default_timeout=_DEFAULT_TIMEOUT,
+               default_retries=_DEFAULT_RETRIES):
+    """BatteryUtils constructor.
+
+      Args:
+        device: A DeviceUtils instance.
+        default_timeout: An integer containing the default number of seconds to
+                         wait for an operation to complete if no explicit value
+                         is provided.
+        default_retries: An integer containing the default number or times an
+                         operation should be retried on failure if no explicit
+                         value is provided.
+      Raises:
+        TypeError: If it is not passed a DeviceUtils instance.
+    """
+    if not isinstance(device, device_utils.DeviceUtils):
+      raise TypeError('Must be initialized with DeviceUtils object.')
+    self._device = device
+    self._cache = device.GetClientCache(self.__class__.__name__)
+    self._default_timeout = default_timeout
+    self._default_retries = default_retries
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SupportsFuelGauge(self, timeout=None, retries=None):
+    """Detect if fuel gauge chip is present.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if known fuel gauge files are present.
+      False otherwise.
+    """
+    self._DiscoverDeviceProfile()
+    return (self._cache['profile']['enable_command'] != None
+        and self._cache['profile']['charge_counter'] != None)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetFuelGaugeChargeCounter(self, timeout=None, retries=None):
+    """Get value of charge_counter on fuel gauge chip.
+
+    Device must have charging disabled for this, not just battery updates
+    disabled. The only device that this currently works with is the nexus 5.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      value of charge_counter for fuel gauge chip in units of nAh.
+
+    Raises:
+      device_errors.CommandFailedError: If fuel gauge chip not found.
+    """
+    if self.SupportsFuelGauge():
+      return int(self._device.ReadFile(
+          self._cache['profile']['charge_counter']))
+    raise device_errors.CommandFailedError(
+        'Unable to find fuel gauge.')
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetNetworkData(self, package, timeout=None, retries=None):
+    """Get network data for specific package.
+
+    Args:
+      package: package name you want network data for.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      Tuple of (sent_data, recieved_data)
+      None if no network data found
+    """
+    # If device_utils clears cache, cache['uids'] doesn't exist
+    if 'uids' not in self._cache:
+      self._cache['uids'] = {}
+    if package not in self._cache['uids']:
+      self.GetPowerData()
+      if package not in self._cache['uids']:
+        logging.warning('No UID found for %s. Can\'t get network data.',
+                        package)
+        return None
+
+    network_data_path = '/proc/uid_stat/%s/' % self._cache['uids'][package]
+    try:
+      send_data = int(self._device.ReadFile(network_data_path + 'tcp_snd'))
+    # If ReadFile throws exception, it means no network data usage file for
+    # package has been recorded. Return 0 sent and 0 received.
+    except device_errors.AdbShellCommandFailedError:
+      logging.warning('No sent data found for package %s', package)
+      send_data = 0
+    try:
+      recv_data = int(self._device.ReadFile(network_data_path + 'tcp_rcv'))
+    except device_errors.AdbShellCommandFailedError:
+      logging.warning('No received data found for package %s', package)
+      recv_data = 0
+    return (send_data, recv_data)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetPowerData(self, timeout=None, retries=None):
+    """Get power data for device.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      Dict containing system power, and a per-package power dict keyed on
+      package names.
+      {
+        'system_total': 23.1,
+        'per_package' : {
+          package_name: {
+            'uid': uid,
+            'data': [1,2,3]
+          },
+        }
+      }
+    """
+    if 'uids' not in self._cache:
+      self._cache['uids'] = {}
+    dumpsys_output = self._device.RunShellCommand(
+        ['dumpsys', 'batterystats', '-c'],
+        check_return=True, large_output=True)
+    csvreader = csv.reader(dumpsys_output)
+    pwi_entries = collections.defaultdict(list)
+    system_total = None
+    for entry in csvreader:
+      if entry[_DUMP_VERSION_INDEX] not in ['8', '9']:
+        # Wrong dumpsys version.
+        raise device_errors.DeviceVersionError(
+            'Dumpsys version must be 8 or 9. %s found.'
+            % entry[_DUMP_VERSION_INDEX])
+      if _ROW_TYPE_INDEX < len(entry) and entry[_ROW_TYPE_INDEX] == 'uid':
+        current_package = entry[_PACKAGE_NAME_INDEX]
+        if (self._cache['uids'].get(current_package)
+            and self._cache['uids'].get(current_package)
+            != entry[_PACKAGE_UID_INDEX]):
+          raise device_errors.CommandFailedError(
+              'Package %s found multiple times with different UIDs %s and %s'
+               % (current_package, self._cache['uids'][current_package],
+               entry[_PACKAGE_UID_INDEX]))
+        self._cache['uids'][current_package] = entry[_PACKAGE_UID_INDEX]
+      elif (_PWI_POWER_CONSUMPTION_INDEX < len(entry)
+          and entry[_ROW_TYPE_INDEX] == 'pwi'
+          and entry[_PWI_AGGREGATION_INDEX] == 'l'):
+        pwi_entries[entry[_PWI_UID_INDEX]].append(
+            float(entry[_PWI_POWER_CONSUMPTION_INDEX]))
+      elif (_PWS_POWER_CONSUMPTION_INDEX < len(entry)
+          and entry[_ROW_TYPE_INDEX] == 'pws'
+          and entry[_PWS_AGGREGATION_INDEX] == 'l'):
+        # This entry should only appear once.
+        assert system_total is None
+        system_total = float(entry[_PWS_POWER_CONSUMPTION_INDEX])
+
+    per_package = {p: {'uid': uid, 'data': pwi_entries[uid]}
+                   for p, uid in self._cache['uids'].iteritems()}
+    return {'system_total': system_total, 'per_package': per_package}
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetBatteryInfo(self, timeout=None, retries=None):
+    """Gets battery info for the device.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+    Returns:
+      A dict containing various battery information as reported by dumpsys
+      battery.
+    """
+    result = {}
+    # Skip the first line, which is just a header.
+    for line in self._device.RunShellCommand(
+        ['dumpsys', 'battery'], check_return=True)[1:]:
+      # If usb charging has been disabled, an extra line of header exists.
+      if 'UPDATES STOPPED' in line:
+        logging.warning('Dumpsys battery not receiving updates. '
+                        'Run dumpsys battery reset if this is in error.')
+      elif ':' not in line:
+        logging.warning('Unknown line found in dumpsys battery: "%s"', line)
+      else:
+        k, v = line.split(':', 1)
+        result[k.strip()] = v.strip()
+    return result
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetCharging(self, timeout=None, retries=None):
+    """Gets the charging state of the device.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+    Returns:
+      True if the device is charging, false otherwise.
+    """
+    battery_info = self.GetBatteryInfo()
+    for k in ('AC powered', 'USB powered', 'Wireless powered'):
+      if (k in battery_info and
+          battery_info[k].lower() in ('true', '1', 'yes')):
+        return True
+    return False
+
+  # TODO(rnephew): Make private when all use cases can use the context manager.
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def DisableBatteryUpdates(self, timeout=None, retries=None):
+    """Resets battery data and makes device appear like it is not
+    charging so that it will collect power data since last charge.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      device_errors.CommandFailedError: When resetting batterystats fails to
+        reset power values.
+      device_errors.DeviceVersionError: If device is not L or higher.
+    """
+    def battery_updates_disabled():
+      return self.GetCharging() is False
+
+    self._ClearPowerData()
+    self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'ac', '0'],
+                                 check_return=True)
+    self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'usb', '0'],
+                                 check_return=True)
+    timeout_retry.WaitFor(battery_updates_disabled, wait_period=1)
+
+  # TODO(rnephew): Make private when all use cases can use the context manager.
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def EnableBatteryUpdates(self, timeout=None, retries=None):
+    """Restarts device charging so that dumpsys no longer collects power data.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      device_errors.DeviceVersionError: If device is not L or higher.
+    """
+    def battery_updates_enabled():
+      return (self.GetCharging()
+              or not bool('UPDATES STOPPED' in self._device.RunShellCommand(
+                  ['dumpsys', 'battery'], check_return=True)))
+
+    self._device.RunShellCommand(['dumpsys', 'battery', 'reset'],
+                                 check_return=True)
+    timeout_retry.WaitFor(battery_updates_enabled, wait_period=1)
+
+  @contextlib.contextmanager
+  def BatteryMeasurement(self, timeout=None, retries=None):
+    """Context manager that enables battery data collection. It makes
+    the device appear to stop charging so that dumpsys will start collecting
+    power data since last charge. Once the with block is exited, charging is
+    resumed and power data since last charge is no longer collected.
+
+    Only for devices L and higher.
+
+    Example usage:
+      with BatteryMeasurement():
+        browser_actions()
+        get_power_data() # report usage within this block
+      after_measurements() # Anything that runs after power
+                           # measurements are collected
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      device_errors.DeviceVersionError: If device is not L or higher.
+    """
+    if self._device.build_version_sdk < version_codes.LOLLIPOP:
+      raise device_errors.DeviceVersionError('Device must be L or higher.')
+    try:
+      self.DisableBatteryUpdates(timeout=timeout, retries=retries)
+      yield
+    finally:
+      self.EnableBatteryUpdates(timeout=timeout, retries=retries)
+
+  def _DischargeDevice(self, percent, wait_period=120):
+    """Disables charging and waits for device to discharge given amount
+
+    Args:
+      percent: level of charge to discharge.
+
+    Raises:
+      ValueError: If percent is not between 1 and 99.
+    """
+    battery_level = int(self.GetBatteryInfo().get('level'))
+    if not 0 < percent < 100:
+      raise ValueError('Discharge amount(%s) must be between 1 and 99'
+                       % percent)
+    if battery_level is None:
+      logging.warning('Unable to find current battery level. Cannot discharge.')
+      return
+    # Do not discharge if it would make battery level too low.
+    if percent >= battery_level - 10:
+      logging.warning('Battery is too low or discharge amount requested is too '
+                      'high. Cannot discharge phone %s percent.', percent)
+      return
+
+    self._HardwareSetCharging(False)
+
+    def device_discharged():
+      self._HardwareSetCharging(True)
+      current_level = int(self.GetBatteryInfo().get('level'))
+      logging.info('current battery level: %s', current_level)
+      if battery_level - current_level >= percent:
+        return True
+      self._HardwareSetCharging(False)
+      return False
+
+    timeout_retry.WaitFor(device_discharged, wait_period=wait_period)
+
+  def ChargeDeviceToLevel(self, level, wait_period=60):
+    """Enables charging and waits for device to be charged to given level.
+
+    Args:
+      level: level of charge to wait for.
+      wait_period: time in seconds to wait between checking.
+    Raises:
+      device_errors.DeviceChargingError: If error while charging is detected.
+    """
+    self.SetCharging(True)
+    charge_status = {
+        'charge_failure_count': 0,
+        'last_charge_value': 0
+    }
+    def device_charged():
+      battery_level = self.GetBatteryInfo().get('level')
+      if battery_level is None:
+        logging.warning('Unable to find current battery level.')
+        battery_level = 100
+      else:
+        logging.info('current battery level: %s', battery_level)
+        battery_level = int(battery_level)
+
+      # Use > so that it will not reset if charge is going down.
+      if battery_level > charge_status['last_charge_value']:
+        charge_status['last_charge_value'] = battery_level
+        charge_status['charge_failure_count'] = 0
+      else:
+        charge_status['charge_failure_count'] += 1
+
+      if (not battery_level >= level
+          and charge_status['charge_failure_count'] >= _MAX_CHARGE_ERROR):
+        raise device_errors.DeviceChargingError(
+            'Device not charging properly. Current level:%s Previous level:%s'
+             % (battery_level, charge_status['last_charge_value']))
+      return battery_level >= level
+
+    timeout_retry.WaitFor(device_charged, wait_period=wait_period)
+
+  def LetBatteryCoolToTemperature(self, target_temp, wait_period=180):
+    """Lets device sit to give battery time to cool down
+    Args:
+      temp: maximum temperature to allow in tenths of degrees c.
+      wait_period: time in seconds to wait between checking.
+    """
+    def cool_device():
+      temp = self.GetBatteryInfo().get('temperature')
+      if temp is None:
+        logging.warning('Unable to find current battery temperature.')
+        temp = 0
+      else:
+        logging.info('Current battery temperature: %s', temp)
+      if int(temp) <= target_temp:
+        return True
+      else:
+        if self._cache['profile']['name'] == 'Nexus 5':
+          self._DischargeDevice(1)
+        return False
+
+    self._DiscoverDeviceProfile()
+    self.EnableBatteryUpdates()
+    logging.info('Waiting for the device to cool down to %s (0.1 C)',
+                 target_temp)
+    timeout_retry.WaitFor(cool_device, wait_period=wait_period)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SetCharging(self, enabled, timeout=None, retries=None):
+    """Enables or disables charging on the device.
+
+    Args:
+      enabled: A boolean indicating whether charging should be enabled or
+        disabled.
+      timeout: timeout in seconds
+      retries: number of retries
+    """
+    if self.GetCharging() == enabled:
+      logging.warning('Device charging already in expected state: %s', enabled)
+      return
+
+    self._DiscoverDeviceProfile()
+    if enabled:
+      if self._cache['profile']['enable_command']:
+        self._HardwareSetCharging(enabled)
+      else:
+        logging.info('Unable to enable charging via hardware. '
+                     'Falling back to software enabling.')
+        self.EnableBatteryUpdates()
+    else:
+      if self._cache['profile']['enable_command']:
+        self._ClearPowerData()
+        self._HardwareSetCharging(enabled)
+      else:
+        logging.info('Unable to disable charging via hardware. '
+                     'Falling back to software disabling.')
+        self.DisableBatteryUpdates()
+
+  def _HardwareSetCharging(self, enabled, timeout=None, retries=None):
+    """Enables or disables charging on the device.
+
+    Args:
+      enabled: A boolean indicating whether charging should be enabled or
+        disabled.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      device_errors.CommandFailedError: If method of disabling charging cannot
+        be determined.
+    """
+    self._DiscoverDeviceProfile()
+    if not self._cache['profile']['enable_command']:
+      raise device_errors.CommandFailedError(
+          'Unable to find charging commands.')
+
+    command = (self._cache['profile']['enable_command'] if enabled
+               else self._cache['profile']['disable_command'])
+
+    def verify_charging():
+      return self.GetCharging() == enabled
+
+    self._device.RunShellCommand(
+        command, check_return=True, as_root=True, large_output=True)
+    timeout_retry.WaitFor(verify_charging, wait_period=1)
+
+  @contextlib.contextmanager
+  def PowerMeasurement(self, timeout=None, retries=None):
+    """Context manager that enables battery power collection.
+
+    Once the with block is exited, charging is resumed. Will attempt to disable
+    charging at the hardware level, and if that fails will fall back to software
+    disabling of battery updates.
+
+    Only for devices L and higher.
+
+    Example usage:
+      with PowerMeasurement():
+        browser_actions()
+        get_power_data() # report usage within this block
+      after_measurements() # Anything that runs after power
+                           # measurements are collected
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+    """
+    try:
+      self.SetCharging(False, timeout=timeout, retries=retries)
+      yield
+    finally:
+      self.SetCharging(True, timeout=timeout, retries=retries)
+
+  def _ClearPowerData(self):
+    """Resets battery data and makes device appear like it is not
+    charging so that it will collect power data since last charge.
+
+    Returns:
+      True if power data cleared.
+      False if power data clearing is not supported (pre-L)
+
+    Raises:
+      device_errors.DeviceVersionError: If power clearing is supported,
+        but fails.
+    """
+    if self._device.build_version_sdk < version_codes.LOLLIPOP:
+      logging.warning('Dumpsys power data only available on 5.0 and above. '
+                      'Cannot clear power data.')
+      return False
+
+    self._device.RunShellCommand(
+        ['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True)
+    self._device.RunShellCommand(
+        ['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True)
+    self._device.RunShellCommand(
+        ['dumpsys', 'batterystats', '--reset'], check_return=True)
+    battery_data = self._device.RunShellCommand(
+        ['dumpsys', 'batterystats', '--charged', '-c'],
+        check_return=True, large_output=True)
+    for line in battery_data:
+      l = line.split(',')
+      if (len(l) > _PWI_POWER_CONSUMPTION_INDEX and l[_ROW_TYPE_INDEX] == 'pwi'
+          and l[_PWI_POWER_CONSUMPTION_INDEX] != 0):
+        self._device.RunShellCommand(
+            ['dumpsys', 'battery', 'reset'], check_return=True)
+        raise device_errors.CommandFailedError(
+            'Non-zero pmi value found after reset.')
+    self._device.RunShellCommand(
+        ['dumpsys', 'battery', 'reset'], check_return=True)
+    return True
+
+  def _DiscoverDeviceProfile(self):
+    """Checks and caches device information.
+
+    Returns:
+      True if profile is found, false otherwise.
+    """
+
+    if 'profile' in self._cache:
+      return True
+    for profile in _DEVICE_PROFILES:
+      if self._device.product_model == profile['name']:
+        self._cache['profile'] = profile
+        return True
+    self._cache['profile'] = {
+        'name': None,
+        'witness_file': None,
+        'enable_command': None,
+        'disable_command': None,
+        'charge_counter': None,
+        'voltage': None,
+        'current': None,
+    }
+    return False
diff --git a/catapult/devil/devil/android/battery_utils_test.py b/catapult/devil/devil/android/battery_utils_test.py
new file mode 100755
index 0000000..7993921
--- /dev/null
+++ b/catapult/devil/devil/android/battery_utils_test.py
@@ -0,0 +1,676 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of battery_utils.py
+"""
+
+# pylint: disable=protected-access,unused-argument
+
+import logging
+import unittest
+
+from devil import devil_env
+from devil.android import battery_utils
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import device_utils_test
+from devil.utils import mock_calls
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+_DUMPSYS_OUTPUT = [
+    '9,0,i,uid,1000,test_package1',
+    '9,0,i,uid,1001,test_package2',
+    '9,1000,l,pwi,uid,1',
+    '9,1001,l,pwi,uid,2',
+    '9,0,l,pws,1728,2000,190,207',
+]
+
+
+class BatteryUtilsTest(mock_calls.TestCase):
+
+  _NEXUS_5 = {
+    'name': 'Nexus 5',
+    'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
+    'enable_command': (
+        'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
+        'echo 1 > /sys/class/power_supply/usb/online'),
+    'disable_command': (
+        'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
+        'chmod 644 /sys/class/power_supply/usb/online && '
+        'echo 0 > /sys/class/power_supply/usb/online'),
+    'charge_counter': None,
+    'voltage': None,
+    'current': None,
+  }
+
+  _NEXUS_6 = {
+    'name': 'Nexus 6',
+    'witness_file': None,
+    'enable_command': None,
+    'disable_command': None,
+    'charge_counter': (
+        '/sys/class/power_supply/max170xx_battery/charge_counter_ext'),
+    'voltage': '/sys/class/power_supply/max170xx_battery/voltage_now',
+    'current': '/sys/class/power_supply/max170xx_battery/current_now',
+  }
+
+  _NEXUS_10 = {
+    'name': 'Nexus 10',
+    'witness_file': None,
+    'enable_command': None,
+    'disable_command': None,
+    'charge_counter': (
+        '/sys/class/power_supply/ds2784-fuelgauge/charge_counter_ext'),
+    'voltage': '/sys/class/power_supply/ds2784-fuelgauge/voltage_now',
+    'current': '/sys/class/power_supply/ds2784-fuelgauge/current_now',
+  }
+
+  def ShellError(self, output=None, status=1):
+    def action(cmd, *args, **kwargs):
+      raise device_errors.AdbShellCommandFailedError(
+          cmd, output, status, str(self.device))
+    if output is None:
+      output = 'Permission denied\n'
+    return action
+
+  def setUp(self):
+    self.adb = device_utils_test._AdbWrapperMock('0123456789abcdef')
+    self.device = device_utils.DeviceUtils(
+        self.adb, default_timeout=10, default_retries=0)
+    self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
+    self.battery = battery_utils.BatteryUtils(
+        self.device, default_timeout=10, default_retries=0)
+
+
+class BatteryUtilsInitTest(unittest.TestCase):
+
+  def testInitWithDeviceUtil(self):
+    serial = '0fedcba987654321'
+    d = device_utils.DeviceUtils(serial)
+    b = battery_utils.BatteryUtils(d)
+    self.assertEqual(d, b._device)
+
+  def testInitWithMissing_fails(self):
+    with self.assertRaises(TypeError):
+      battery_utils.BatteryUtils(None)
+    with self.assertRaises(TypeError):
+      battery_utils.BatteryUtils('')
+
+
+class BatteryUtilsSetChargingTest(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testHardwareSetCharging_enabled(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            mock.ANY, check_return=True, as_root=True, large_output=True), []),
+        (self.call.battery.GetCharging(), False),
+        (self.call.battery.GetCharging(), True)):
+      self.battery._HardwareSetCharging(True)
+
+  def testHardwareSetCharging_alreadyEnabled(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            mock.ANY, check_return=True, as_root=True, large_output=True), []),
+        (self.call.battery.GetCharging(), True)):
+      self.battery._HardwareSetCharging(True)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testHardwareSetCharging_disabled(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            mock.ANY, check_return=True, as_root=True, large_output=True), []),
+        (self.call.battery.GetCharging(), True),
+        (self.call.battery.GetCharging(), False)):
+      self.battery._HardwareSetCharging(False)
+
+
+class BatteryUtilsSetBatteryMeasurementTest(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testBatteryMeasurementWifi(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=22):
+      with self.assertCalls(
+          (self.call.battery._ClearPowerData(), True),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True),
+           []),
+          (self.call.battery.GetCharging(), False),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'reset'], check_return=True), []),
+          (self.call.battery.GetCharging(), False),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
+          (self.call.battery.GetCharging(), False),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery'], check_return=True), [])):
+        with self.battery.BatteryMeasurement():
+          pass
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testBatteryMeasurementUsb(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=22):
+      with self.assertCalls(
+          (self.call.battery._ClearPowerData(), True),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True),
+           []),
+          (self.call.battery.GetCharging(), False),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'reset'], check_return=True), []),
+          (self.call.battery.GetCharging(), False),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
+          (self.call.battery.GetCharging(), True)):
+        with self.battery.BatteryMeasurement():
+          pass
+
+
+class BatteryUtilsGetPowerData(BatteryUtilsTest):
+
+  def testGetPowerData(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+         _DUMPSYS_OUTPUT)):
+      data = self.battery.GetPowerData()
+      check = {
+        'system_total': 2000.0,
+        'per_package': {
+          'test_package1': {'uid': '1000', 'data': [1.0]},
+          'test_package2': {'uid': '1001', 'data': [2.0]}
+        }
+      }
+      self.assertEqual(data, check)
+
+  def testGetPowerData_packageCollisionSame(self):
+    self.battery._cache['uids'] = {'test_package1': '1000'}
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+        _DUMPSYS_OUTPUT):
+      data = self.battery.GetPowerData()
+      check = {
+        'system_total': 2000.0,
+        'per_package': {
+          'test_package1': {'uid': '1000', 'data': [1.0]},
+          'test_package2': {'uid': '1001', 'data': [2.0]}
+        }
+      }
+      self.assertEqual(data, check)
+
+  def testGetPowerData_packageCollisionDifferent(self):
+    self.battery._cache['uids'] = {'test_package1': '1'}
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+        _DUMPSYS_OUTPUT):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.battery.GetPowerData()
+
+  def testGetPowerData_cacheCleared(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+         _DUMPSYS_OUTPUT)):
+      self.battery._cache.clear()
+      data = self.battery.GetPowerData()
+      check = {
+        'system_total': 2000.0,
+        'per_package': {
+          'test_package1': {'uid': '1000', 'data': [1.0]},
+          'test_package2': {'uid': '1001', 'data': [2.0]}
+        }
+      }
+      self.assertEqual(data, check)
+
+
+class BatteryUtilsChargeDevice(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testChargeDeviceToLevel_pass(self):
+    with self.assertCalls(
+        (self.call.battery.SetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '50'}),
+        (self.call.battery.GetBatteryInfo(), {'level': '100'})):
+      self.battery.ChargeDeviceToLevel(95)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testChargeDeviceToLevel_failureSame(self):
+    with self.assertCalls(
+        (self.call.battery.SetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '50'}),
+        (self.call.battery.GetBatteryInfo(), {'level': '50'}),
+
+        (self.call.battery.GetBatteryInfo(), {'level': '50'})):
+      with self.assertRaises(device_errors.DeviceChargingError):
+        old_max = battery_utils._MAX_CHARGE_ERROR
+        try:
+          battery_utils._MAX_CHARGE_ERROR = 2
+          self.battery.ChargeDeviceToLevel(95)
+        finally:
+          battery_utils._MAX_CHARGE_ERROR = old_max
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testChargeDeviceToLevel_failureDischarge(self):
+    with self.assertCalls(
+        (self.call.battery.SetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '50'}),
+        (self.call.battery.GetBatteryInfo(), {'level': '49'}),
+        (self.call.battery.GetBatteryInfo(), {'level': '48'})):
+      with self.assertRaises(device_errors.DeviceChargingError):
+        old_max = battery_utils._MAX_CHARGE_ERROR
+        try:
+          battery_utils._MAX_CHARGE_ERROR = 2
+          self.battery.ChargeDeviceToLevel(95)
+        finally:
+          battery_utils._MAX_CHARGE_ERROR = old_max
+
+
+class BatteryUtilsDischargeDevice(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testDischargeDevice_exact(self):
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '99'})):
+      self.battery._DischargeDevice(1)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testDischargeDevice_over(self):
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '50'})):
+      self.battery._DischargeDevice(1)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testDischargeDevice_takeslong(self):
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '100'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '99'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '98'}),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery._HardwareSetCharging(True)),
+        (self.call.battery.GetBatteryInfo(), {'level': '97'})):
+      self.battery._DischargeDevice(3)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testDischargeDevice_dischargeTooClose(self):
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'})):
+      self.battery._DischargeDevice(99)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testDischargeDevice_percentageOutOfBounds(self):
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'})):
+      with self.assertRaises(ValueError):
+        self.battery._DischargeDevice(100)
+    with self.assertCalls(
+        (self.call.battery.GetBatteryInfo(), {'level': '100'})):
+      with self.assertRaises(ValueError):
+        self.battery._DischargeDevice(0)
+
+
+class BatteryUtilsGetBatteryInfoTest(BatteryUtilsTest):
+
+  def testGetBatteryInfo_normal(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery'], check_return=True),
+        [
+          'Current Battery Service state:',
+          '  AC powered: false',
+          '  USB powered: true',
+          '  level: 100',
+          '  temperature: 321',
+        ])):
+      self.assertEquals(
+          {
+            'AC powered': 'false',
+            'USB powered': 'true',
+            'level': '100',
+            'temperature': '321',
+          },
+          self.battery.GetBatteryInfo())
+
+  def testGetBatteryInfo_nothing(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery'], check_return=True), [])):
+      self.assertEquals({}, self.battery.GetBatteryInfo())
+
+
+class BatteryUtilsGetChargingTest(BatteryUtilsTest):
+
+  def testGetCharging_usb(self):
+    with self.assertCall(
+        self.call.battery.GetBatteryInfo(), {'USB powered': 'true'}):
+      self.assertTrue(self.battery.GetCharging())
+
+  def testGetCharging_usbFalse(self):
+    with self.assertCall(
+        self.call.battery.GetBatteryInfo(), {'USB powered': 'false'}):
+      self.assertFalse(self.battery.GetCharging())
+
+  def testGetCharging_ac(self):
+    with self.assertCall(
+        self.call.battery.GetBatteryInfo(), {'AC powered': 'true'}):
+      self.assertTrue(self.battery.GetCharging())
+
+  def testGetCharging_wireless(self):
+    with self.assertCall(
+        self.call.battery.GetBatteryInfo(), {'Wireless powered': 'true'}):
+      self.assertTrue(self.battery.GetCharging())
+
+  def testGetCharging_unknown(self):
+    with self.assertCall(
+        self.call.battery.GetBatteryInfo(), {'level': '42'}):
+      self.assertFalse(self.battery.GetCharging())
+
+
+class BatteryUtilsGetNetworkDataTest(BatteryUtilsTest):
+
+  def testGetNetworkData_noDataUsage(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+         _DUMPSYS_OUTPUT),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'),
+            self.ShellError()),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'),
+            self.ShellError())):
+      self.assertEquals(self.battery.GetNetworkData('test_package1'), (0, 0))
+
+  def testGetNetworkData_badPackage(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+        _DUMPSYS_OUTPUT):
+      self.assertEqual(self.battery.GetNetworkData('asdf'), None)
+
+  def testGetNetworkData_packageNotCached(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+         _DUMPSYS_OUTPUT),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
+      self.assertEqual(self.battery.GetNetworkData('test_package1'), (1, 2))
+
+  def testGetNetworkData_packageCached(self):
+    self.battery._cache['uids'] = {'test_package1': '1000'}
+    with self.assertCalls(
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
+      self.assertEqual(self.battery.GetNetworkData('test_package1'), (1, 2))
+
+  def testGetNetworkData_clearedCache(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'batterystats', '-c'],
+            check_return=True, large_output=True),
+         _DUMPSYS_OUTPUT),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
+        (self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
+      self.battery._cache.clear()
+      self.assertEqual(self.battery.GetNetworkData('test_package1'), (1, 2))
+
+
+class BatteryUtilsLetBatteryCoolToTemperatureTest(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testLetBatteryCoolToTemperature_startUnder(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.EnableBatteryUpdates(), []),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '500'})):
+      self.battery.LetBatteryCoolToTemperature(600)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testLetBatteryCoolToTemperature_startOver(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.EnableBatteryUpdates(), []),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '500'}),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '400'})):
+      self.battery.LetBatteryCoolToTemperature(400)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testLetBatteryCoolToTemperature_nexus5Hot(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.battery.EnableBatteryUpdates(), []),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '500'}),
+        (self.call.battery._DischargeDevice(1), []),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '400'})):
+      self.battery.LetBatteryCoolToTemperature(400)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testLetBatteryCoolToTemperature_nexus5Cool(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.battery.EnableBatteryUpdates(), []),
+        (self.call.battery.GetBatteryInfo(), {'temperature': '400'})):
+      self.battery.LetBatteryCoolToTemperature(400)
+
+
+class BatteryUtilsSupportsFuelGaugeTest(BatteryUtilsTest):
+
+  def testSupportsFuelGauge_false(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    self.assertFalse(self.battery.SupportsFuelGauge())
+
+  def testSupportsFuelGauge_trueMax(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    # TODO(rnephew): Change this to assertTrue when we have support for
+    # disabling hardware charging on nexus 6.
+    self.assertFalse(self.battery.SupportsFuelGauge())
+
+  def testSupportsFuelGauge_trueDS(self):
+    self.battery._cache['profile'] = self._NEXUS_10
+    # TODO(rnephew): Change this to assertTrue when we have support for
+    # disabling hardware charging on nexus 10.
+    self.assertFalse(self.battery.SupportsFuelGauge())
+
+
+class BatteryUtilsGetFuelGaugeChargeCounterTest(BatteryUtilsTest):
+
+  def testGetFuelGaugeChargeCounter_noFuelGauge(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertRaises(device_errors.CommandFailedError):
+      self.battery.GetFuelGaugeChargeCounter()
+
+  def testGetFuelGaugeChargeCounter_fuelGaugePresent(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.SupportsFuelGauge(), True),
+        (self.call.device.ReadFile(mock.ANY), '123')):
+      self.assertEqual(self.battery.GetFuelGaugeChargeCounter(), 123)
+
+
+class BatteryUtilsSetCharging(BatteryUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetCharging_softwareSetTrue(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), False),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'reset'], check_return=True), []),
+        (self.call.battery.GetCharging(), False),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
+        (self.call.battery.GetCharging(), True)):
+      self.battery.SetCharging(True)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetCharging_softwareSetFalse(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), True),
+        (self.call.battery._ClearPowerData(), True),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
+        (self.call.battery.GetCharging(), False)):
+      self.battery.SetCharging(False)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetCharging_hardwareSetTrue(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), False),
+        (self.call.battery._HardwareSetCharging(True))):
+      self.battery.SetCharging(True)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetCharging_hardwareSetFalse(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), True),
+        (self.call.battery._ClearPowerData(), True),
+        (self.call.battery._HardwareSetCharging(False))):
+      self.battery.SetCharging(False)
+
+  def testSetCharging_expectedStateAlreadyTrue(self):
+    with self.assertCalls((self.call.battery.GetCharging(), True)):
+      self.battery.SetCharging(True)
+
+  def testSetCharging_expectedStateAlreadyFalse(self):
+    with self.assertCalls((self.call.battery.GetCharging(), False)):
+      self.battery.SetCharging(False)
+
+
+class BatteryUtilsPowerMeasurement(BatteryUtilsTest):
+
+  def testPowerMeasurement_hardware(self):
+    self.battery._cache['profile'] = self._NEXUS_5
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), True),
+        (self.call.battery._ClearPowerData(), True),
+        (self.call.battery._HardwareSetCharging(False)),
+        (self.call.battery.GetCharging(), False),
+        (self.call.battery._HardwareSetCharging(True))):
+      with self.battery.PowerMeasurement():
+        pass
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testPowerMeasurement_software(self):
+    self.battery._cache['profile'] = self._NEXUS_6
+    with self.assertCalls(
+        (self.call.battery.GetCharging(), True),
+        (self.call.battery._ClearPowerData(), True),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
+        (self.call.battery.GetCharging(), False),
+        (self.call.battery.GetCharging(), False),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery', 'reset'], check_return=True), []),
+        (self.call.battery.GetCharging(), False),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
+        (self.call.battery.GetCharging(), True)):
+      with self.battery.PowerMeasurement():
+        pass
+
+
+class BatteryUtilsDiscoverDeviceProfile(BatteryUtilsTest):
+
+  def testDiscoverDeviceProfile_known(self):
+    with self.patch_call(self.call.device.product_model,
+                         return_value='Nexus 4'):
+      self.battery._DiscoverDeviceProfile()
+      self.assertEqual(self.battery._cache['profile']['name'], "Nexus 4")
+
+  def testDiscoverDeviceProfile_unknown(self):
+    with self.patch_call(self.call.device.product_model,
+                         return_value='Other'):
+      self.battery._DiscoverDeviceProfile()
+      self.assertEqual(self.battery._cache['profile']['name'], None)
+
+
+class BatteryUtilsClearPowerData(BatteryUtilsTest):
+
+  def testClearPowerData_preL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=20):
+      self.assertFalse(self.battery._ClearPowerData())
+
+  def testClearPowerData_clearedL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=22):
+      with self.assertCalls(
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True),
+           []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'batterystats', '--reset'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'batterystats', '--charged', '-c'],
+              check_return=True, large_output=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'reset'], check_return=True), [])):
+        self.assertTrue(self.battery._ClearPowerData())
+
+  def testClearPowerData_notClearedL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=22):
+      with self.assertCalls(
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True),
+           []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'batterystats', '--reset'], check_return=True), []),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'batterystats', '--charged', '-c'],
+              check_return=True, large_output=True),
+              ['9,1000,l,pwi,uid,0.0327']),
+          (self.call.device.RunShellCommand(
+              ['dumpsys', 'battery', 'reset'], check_return=True), [])):
+        with self.assertRaises(device_errors.CommandFailedError):
+          self.battery._ClearPowerData()
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/android/constants/__init__.py b/catapult/devil/devil/android/constants/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/android/constants/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/android/constants/chrome.py b/catapult/devil/devil/android/constants/chrome.py
new file mode 100644
index 0000000..5190ff9
--- /dev/null
+++ b/catapult/devil/devil/android/constants/chrome.py
@@ -0,0 +1,60 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+PackageInfo = collections.namedtuple(
+    'PackageInfo',
+    ['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
+
+PACKAGE_INFO = {
+    'chrome_document': PackageInfo(
+        'com.google.android.apps.chrome.document',
+        'com.google.android.apps.chrome.document.ChromeLauncherActivity',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chrome': PackageInfo(
+        'com.google.android.apps.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        'com.google.android.apps.chrome.tests'),
+    'chrome_beta': PackageInfo(
+        'com.chrome.beta',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chrome_stable': PackageInfo(
+        'com.android.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chrome_dev': PackageInfo(
+        'com.chrome.dev',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chrome_canary': PackageInfo(
+        'com.chrome.canary',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chrome_work': PackageInfo(
+        'com.chrome.work',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        None),
+    'chromium': PackageInfo(
+        'org.chromium.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote',
+        'org.chromium.chrome.tests'),
+}
diff --git a/catapult/devil/devil/android/constants/file_system.py b/catapult/devil/devil/android/constants/file_system.py
new file mode 100644
index 0000000..bffec61
--- /dev/null
+++ b/catapult/devil/devil/android/constants/file_system.py
@@ -0,0 +1,5 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+TEST_EXECUTABLE_DIR = '/data/local/tmp'
diff --git a/catapult/devil/devil/android/decorators.py b/catapult/devil/devil/android/decorators.py
new file mode 100644
index 0000000..3844b49
--- /dev/null
+++ b/catapult/devil/devil/android/decorators.py
@@ -0,0 +1,176 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Function/method decorators that provide timeout and retry logic.
+"""
+
+import functools
+import itertools
+import sys
+
+from devil.android import device_errors
+from devil.utils import cmd_helper
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+
+DEFAULT_TIMEOUT_ATTR = '_default_timeout'
+DEFAULT_RETRIES_ATTR = '_default_retries'
+
+
+def _TimeoutRetryWrapper(
+    f, timeout_func, retries_func, retry_if_func=timeout_retry.AlwaysRetry,
+    pass_values=False):
+  """ Wraps a funcion with timeout and retry handling logic.
+
+  Args:
+    f: The function to wrap.
+    timeout_func: A callable that returns the timeout value.
+    retries_func: A callable that returns the retries value.
+    pass_values: If True, passes the values returned by |timeout_func| and
+                 |retries_func| to the wrapped function as 'timeout' and
+                 'retries' kwargs, respectively.
+  Returns:
+    The wrapped function.
+  """
+  @functools.wraps(f)
+  def timeout_retry_wrapper(*args, **kwargs):
+    timeout = timeout_func(*args, **kwargs)
+    retries = retries_func(*args, **kwargs)
+    if pass_values:
+      kwargs['timeout'] = timeout
+      kwargs['retries'] = retries
+
+    @functools.wraps(f)
+    def impl():
+      return f(*args, **kwargs)
+    try:
+      if timeout_retry.CurrentTimeoutThreadGroup():
+        # Don't wrap if there's already an outer timeout thread.
+        return impl()
+      else:
+        desc = '%s(%s)' % (f.__name__, ', '.join(itertools.chain(
+            (str(a) for a in args),
+            ('%s=%s' % (k, str(v)) for k, v in kwargs.iteritems()))))
+        return timeout_retry.Run(impl, timeout, retries, desc=desc,
+                                 retry_if_func=retry_if_func)
+    except reraiser_thread.TimeoutError as e:
+      raise device_errors.CommandTimeoutError(str(e)), None, (
+          sys.exc_info()[2])
+    except cmd_helper.TimeoutError as e:
+      raise device_errors.CommandTimeoutError(str(e)), None, (
+          sys.exc_info()[2])
+  return timeout_retry_wrapper
+
+
+def WithTimeoutAndRetries(f):
+  """A decorator that handles timeouts and retries.
+
+  'timeout' and 'retries' kwargs must be passed to the function.
+
+  Args:
+    f: The function to decorate.
+  Returns:
+    The decorated function.
+  """
+  get_timeout = lambda *a, **kw: kw['timeout']
+  get_retries = lambda *a, **kw: kw['retries']
+  return _TimeoutRetryWrapper(f, get_timeout, get_retries)
+
+
+def WithTimeoutAndConditionalRetries(retry_if_func):
+  """Returns a decorator that handles timeouts and, in some cases, retries.
+
+  'timeout' and 'retries' kwargs must be passed to the function.
+
+  Args:
+    retry_if_func: A unary callable that takes an exception and returns
+      whether failures should be retried.
+  Returns:
+    The actual decorator.
+  """
+  def decorator(f):
+    get_timeout = lambda *a, **kw: kw['timeout']
+    get_retries = lambda *a, **kw: kw['retries']
+    return _TimeoutRetryWrapper(
+        f, get_timeout, get_retries, retry_if_func=retry_if_func)
+  return decorator
+
+
+def WithExplicitTimeoutAndRetries(timeout, retries):
+  """Returns a decorator that handles timeouts and retries.
+
+  The provided |timeout| and |retries| values are always used.
+
+  Args:
+    timeout: The number of seconds to wait for the decorated function to
+             return. Always used.
+    retries: The number of times the decorated function should be retried on
+             failure. Always used.
+  Returns:
+    The actual decorator.
+  """
+  def decorator(f):
+    get_timeout = lambda *a, **kw: timeout
+    get_retries = lambda *a, **kw: retries
+    return _TimeoutRetryWrapper(f, get_timeout, get_retries)
+  return decorator
+
+
+def WithTimeoutAndRetriesDefaults(default_timeout, default_retries):
+  """Returns a decorator that handles timeouts and retries.
+
+  The provided |default_timeout| and |default_retries| values are used only
+  if timeout and retries values are not provided.
+
+  Args:
+    default_timeout: The number of seconds to wait for the decorated function
+                     to return. Only used if a 'timeout' kwarg is not passed
+                     to the decorated function.
+    default_retries: The number of times the decorated function should be
+                     retried on failure. Only used if a 'retries' kwarg is not
+                     passed to the decorated function.
+  Returns:
+    The actual decorator.
+  """
+  def decorator(f):
+    get_timeout = lambda *a, **kw: kw.get('timeout', default_timeout)
+    get_retries = lambda *a, **kw: kw.get('retries', default_retries)
+    return _TimeoutRetryWrapper(f, get_timeout, get_retries, pass_values=True)
+  return decorator
+
+
+def WithTimeoutAndRetriesFromInstance(
+    default_timeout_name=DEFAULT_TIMEOUT_ATTR,
+    default_retries_name=DEFAULT_RETRIES_ATTR,
+    min_default_timeout=None):
+  """Returns a decorator that handles timeouts and retries.
+
+  The provided |default_timeout_name| and |default_retries_name| are used to
+  get the default timeout value and the default retries value from the object
+  instance if timeout and retries values are not provided.
+
+  Note that this should only be used to decorate methods, not functions.
+
+  Args:
+    default_timeout_name: The name of the default timeout attribute of the
+                          instance.
+    default_retries_name: The name of the default retries attribute of the
+                          instance.
+    min_timeout: Miniumum timeout to be used when using instance timeout.
+  Returns:
+    The actual decorator.
+  """
+  def decorator(f):
+    def get_timeout(inst, *_args, **kwargs):
+      ret = getattr(inst, default_timeout_name)
+      if min_default_timeout is not None:
+        ret = max(min_default_timeout, ret)
+      return kwargs.get('timeout', ret)
+
+    def get_retries(inst, *_args, **kwargs):
+      return kwargs.get('retries', getattr(inst, default_retries_name))
+    return _TimeoutRetryWrapper(f, get_timeout, get_retries, pass_values=True)
+  return decorator
+
diff --git a/catapult/devil/devil/android/decorators_test.py b/catapult/devil/devil/android/decorators_test.py
new file mode 100644
index 0000000..f60953e
--- /dev/null
+++ b/catapult/devil/devil/android/decorators_test.py
@@ -0,0 +1,332 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for decorators.py.
+"""
+
+# pylint: disable=W0613
+
+import time
+import traceback
+import unittest
+
+from devil.android import decorators
+from devil.android import device_errors
+from devil.utils import reraiser_thread
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+
+
+class DecoratorsTest(unittest.TestCase):
+  _decorated_function_called_count = 0
+
+  def testFunctionDecoratorDoesTimeouts(self):
+    """Tests that the base decorator handles the timeout logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithTimeoutAndRetries
+    def alwaysTimesOut(timeout=None, retries=None):
+      DecoratorsTest._decorated_function_called_count += 1
+      time.sleep(100)
+
+    start_time = time.time()
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      alwaysTimesOut(timeout=1, retries=0)
+    elapsed_time = time.time() - start_time
+    self.assertTrue(elapsed_time >= 1)
+    self.assertEquals(1, DecoratorsTest._decorated_function_called_count)
+
+  def testFunctionDecoratorDoesRetries(self):
+    """Tests that the base decorator handles the retries logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithTimeoutAndRetries
+    def alwaysRaisesCommandFailedError(timeout=None, retries=None):
+      DecoratorsTest._decorated_function_called_count += 1
+      raise device_errors.CommandFailedError('testCommand failed')
+
+    with self.assertRaises(device_errors.CommandFailedError):
+      alwaysRaisesCommandFailedError(timeout=30, retries=10)
+    self.assertEquals(11, DecoratorsTest._decorated_function_called_count)
+
+  def testFunctionDecoratorRequiresParams(self):
+    """Tests that the base decorator requires timeout and retries params."""
+    @decorators.WithTimeoutAndRetries
+    def requiresExplicitTimeoutAndRetries(timeout=None, retries=None):
+      return (timeout, retries)
+
+    with self.assertRaises(KeyError):
+      requiresExplicitTimeoutAndRetries()
+    with self.assertRaises(KeyError):
+      requiresExplicitTimeoutAndRetries(timeout=10)
+    with self.assertRaises(KeyError):
+      requiresExplicitTimeoutAndRetries(retries=0)
+    expected_timeout = 10
+    expected_retries = 1
+    (actual_timeout, actual_retries) = (
+        requiresExplicitTimeoutAndRetries(timeout=expected_timeout,
+                                          retries=expected_retries))
+    self.assertEquals(expected_timeout, actual_timeout)
+    self.assertEquals(expected_retries, actual_retries)
+
+  def testFunctionDecoratorTranslatesReraiserExceptions(self):
+    """Tests that the explicit decorator translates reraiser exceptions."""
+    @decorators.WithTimeoutAndRetries
+    def alwaysRaisesProvidedException(exception, timeout=None, retries=None):
+      raise exception
+
+    exception_desc = 'Reraiser thread timeout error'
+    with self.assertRaises(device_errors.CommandTimeoutError) as e:
+      alwaysRaisesProvidedException(
+          reraiser_thread.TimeoutError(exception_desc),
+          timeout=10, retries=1)
+    self.assertEquals(exception_desc, str(e.exception))
+
+  def testConditionalRetriesDecoratorRetries(self):
+    def do_not_retry_no_adb_error(exc):
+      return not isinstance(exc, device_errors.NoAdbError)
+
+    actual_tries = [0]
+
+    @decorators.WithTimeoutAndConditionalRetries(do_not_retry_no_adb_error)
+    def alwaysRaisesCommandFailedError(timeout=None, retries=None):
+      actual_tries[0] += 1
+      raise device_errors.CommandFailedError('Command failed :(')
+
+    with self.assertRaises(device_errors.CommandFailedError):
+      alwaysRaisesCommandFailedError(timeout=10, retries=10)
+    self.assertEquals(11, actual_tries[0])
+
+  def testConditionalRetriesDecoratorDoesntRetry(self):
+    def do_not_retry_no_adb_error(exc):
+      return not isinstance(exc, device_errors.NoAdbError)
+
+    actual_tries = [0]
+
+    @decorators.WithTimeoutAndConditionalRetries(do_not_retry_no_adb_error)
+    def alwaysRaisesNoAdbError(timeout=None, retries=None):
+      actual_tries[0] += 1
+      raise device_errors.NoAdbError()
+
+    with self.assertRaises(device_errors.NoAdbError):
+      alwaysRaisesNoAdbError(timeout=10, retries=10)
+    self.assertEquals(1, actual_tries[0])
+
+  def testDefaultsFunctionDecoratorDoesTimeouts(self):
+    """Tests that the defaults decorator handles timeout logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithTimeoutAndRetriesDefaults(1, 0)
+    def alwaysTimesOut(timeout=None, retries=None):
+      DecoratorsTest._decorated_function_called_count += 1
+      time.sleep(100)
+
+    start_time = time.time()
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      alwaysTimesOut()
+    elapsed_time = time.time() - start_time
+    self.assertTrue(elapsed_time >= 1)
+    self.assertEquals(1, DecoratorsTest._decorated_function_called_count)
+
+    DecoratorsTest._decorated_function_called_count = 0
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      alwaysTimesOut(timeout=2)
+    elapsed_time = time.time() - start_time
+    self.assertTrue(elapsed_time >= 2)
+    self.assertEquals(1, DecoratorsTest._decorated_function_called_count)
+
+  def testDefaultsFunctionDecoratorDoesRetries(self):
+    """Tests that the defaults decorator handles retries logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithTimeoutAndRetriesDefaults(30, 10)
+    def alwaysRaisesCommandFailedError(timeout=None, retries=None):
+      DecoratorsTest._decorated_function_called_count += 1
+      raise device_errors.CommandFailedError('testCommand failed')
+
+    with self.assertRaises(device_errors.CommandFailedError):
+      alwaysRaisesCommandFailedError()
+    self.assertEquals(11, DecoratorsTest._decorated_function_called_count)
+
+    DecoratorsTest._decorated_function_called_count = 0
+    with self.assertRaises(device_errors.CommandFailedError):
+      alwaysRaisesCommandFailedError(retries=5)
+    self.assertEquals(6, DecoratorsTest._decorated_function_called_count)
+
+  def testDefaultsFunctionDecoratorPassesValues(self):
+    """Tests that the defaults decorator passes timeout and retries kwargs."""
+    @decorators.WithTimeoutAndRetriesDefaults(30, 10)
+    def alwaysReturnsTimeouts(timeout=None, retries=None):
+      return timeout
+
+    self.assertEquals(30, alwaysReturnsTimeouts())
+    self.assertEquals(120, alwaysReturnsTimeouts(timeout=120))
+
+    @decorators.WithTimeoutAndRetriesDefaults(30, 10)
+    def alwaysReturnsRetries(timeout=None, retries=None):
+      return retries
+
+    self.assertEquals(10, alwaysReturnsRetries())
+    self.assertEquals(1, alwaysReturnsRetries(retries=1))
+
+  def testDefaultsFunctionDecoratorTranslatesReraiserExceptions(self):
+    """Tests that the explicit decorator translates reraiser exceptions."""
+    @decorators.WithTimeoutAndRetriesDefaults(30, 10)
+    def alwaysRaisesProvidedException(exception, timeout=None, retries=None):
+      raise exception
+
+    exception_desc = 'Reraiser thread timeout error'
+    with self.assertRaises(device_errors.CommandTimeoutError) as e:
+      alwaysRaisesProvidedException(
+          reraiser_thread.TimeoutError(exception_desc))
+    self.assertEquals(exception_desc, str(e.exception))
+
+  def testExplicitFunctionDecoratorDoesTimeouts(self):
+    """Tests that the explicit decorator handles timeout logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithExplicitTimeoutAndRetries(1, 0)
+    def alwaysTimesOut():
+      DecoratorsTest._decorated_function_called_count += 1
+      time.sleep(100)
+
+    start_time = time.time()
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      alwaysTimesOut()
+    elapsed_time = time.time() - start_time
+    self.assertTrue(elapsed_time >= 1)
+    self.assertEquals(1, DecoratorsTest._decorated_function_called_count)
+
+  def testExplicitFunctionDecoratorDoesRetries(self):
+    """Tests that the explicit decorator handles retries logic."""
+    DecoratorsTest._decorated_function_called_count = 0
+
+    @decorators.WithExplicitTimeoutAndRetries(30, 10)
+    def alwaysRaisesCommandFailedError():
+      DecoratorsTest._decorated_function_called_count += 1
+      raise device_errors.CommandFailedError('testCommand failed')
+
+    with self.assertRaises(device_errors.CommandFailedError):
+      alwaysRaisesCommandFailedError()
+    self.assertEquals(11, DecoratorsTest._decorated_function_called_count)
+
+  def testExplicitDecoratorTranslatesReraiserExceptions(self):
+    """Tests that the explicit decorator translates reraiser exceptions."""
+    @decorators.WithExplicitTimeoutAndRetries(30, 10)
+    def alwaysRaisesProvidedException(exception):
+      raise exception
+
+    exception_desc = 'Reraiser thread timeout error'
+    with self.assertRaises(device_errors.CommandTimeoutError) as e:
+      alwaysRaisesProvidedException(
+          reraiser_thread.TimeoutError(exception_desc))
+    self.assertEquals(exception_desc, str(e.exception))
+
+  class _MethodDecoratorTestObject(object):
+    """An object suitable for testing the method decorator."""
+
+    def __init__(self, test_case, default_timeout=_DEFAULT_TIMEOUT,
+                 default_retries=_DEFAULT_RETRIES):
+      self._test_case = test_case
+      self.default_timeout = default_timeout
+      self.default_retries = default_retries
+      self.function_call_counters = {
+          'alwaysRaisesCommandFailedError': 0,
+          'alwaysTimesOut': 0,
+          'requiresExplicitTimeoutAndRetries': 0,
+      }
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries')
+    def alwaysTimesOut(self, timeout=None, retries=None):
+      self.function_call_counters['alwaysTimesOut'] += 1
+      time.sleep(100)
+      self._test_case.assertFalse(True, msg='Failed to time out?')
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries')
+    def alwaysRaisesCommandFailedError(self, timeout=None, retries=None):
+      self.function_call_counters['alwaysRaisesCommandFailedError'] += 1
+      raise device_errors.CommandFailedError('testCommand failed')
+
+    # pylint: disable=no-self-use
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries')
+    def alwaysReturnsTimeout(self, timeout=None, retries=None):
+      return timeout
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries', min_default_timeout=100)
+    def alwaysReturnsTimeoutWithMin(self, timeout=None, retries=None):
+      return timeout
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries')
+    def alwaysReturnsRetries(self, timeout=None, retries=None):
+      return retries
+
+    @decorators.WithTimeoutAndRetriesFromInstance(
+        'default_timeout', 'default_retries')
+    def alwaysRaisesProvidedException(self, exception, timeout=None,
+                                      retries=None):
+      raise exception
+
+    # pylint: enable=no-self-use
+
+  def testMethodDecoratorDoesTimeout(self):
+    """Tests that the method decorator handles timeout logic."""
+    test_obj = self._MethodDecoratorTestObject(self)
+    start_time = time.time()
+    with self.assertRaises(device_errors.CommandTimeoutError):
+      try:
+        test_obj.alwaysTimesOut(timeout=1, retries=0)
+      except:
+        traceback.print_exc()
+        raise
+    elapsed_time = time.time() - start_time
+    self.assertTrue(elapsed_time >= 1)
+    self.assertEquals(1, test_obj.function_call_counters['alwaysTimesOut'])
+
+  def testMethodDecoratorDoesRetries(self):
+    """Tests that the method decorator handles retries logic."""
+    test_obj = self._MethodDecoratorTestObject(self)
+    with self.assertRaises(device_errors.CommandFailedError):
+      try:
+        test_obj.alwaysRaisesCommandFailedError(retries=10)
+      except:
+        traceback.print_exc()
+        raise
+    self.assertEquals(
+        11, test_obj.function_call_counters['alwaysRaisesCommandFailedError'])
+
+  def testMethodDecoratorPassesValues(self):
+    """Tests that the method decorator passes timeout and retries kwargs."""
+    test_obj = self._MethodDecoratorTestObject(
+        self, default_timeout=42, default_retries=31)
+    self.assertEquals(42, test_obj.alwaysReturnsTimeout())
+    self.assertEquals(41, test_obj.alwaysReturnsTimeout(timeout=41))
+    self.assertEquals(31, test_obj.alwaysReturnsRetries())
+    self.assertEquals(32, test_obj.alwaysReturnsRetries(retries=32))
+
+  def testMethodDecoratorUsesMiniumumTimeout(self):
+    test_obj = self._MethodDecoratorTestObject(
+        self, default_timeout=42, default_retries=31)
+    self.assertEquals(100, test_obj.alwaysReturnsTimeoutWithMin())
+    self.assertEquals(41, test_obj.alwaysReturnsTimeoutWithMin(timeout=41))
+
+  def testMethodDecoratorTranslatesReraiserExceptions(self):
+    test_obj = self._MethodDecoratorTestObject(self)
+
+    exception_desc = 'Reraiser thread timeout error'
+    with self.assertRaises(device_errors.CommandTimeoutError) as e:
+      test_obj.alwaysRaisesProvidedException(
+          reraiser_thread.TimeoutError(exception_desc))
+    self.assertEquals(exception_desc, str(e.exception))
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/catapult/devil/devil/android/device_blacklist.py b/catapult/devil/devil/android/device_blacklist.py
new file mode 100644
index 0000000..94f9cbe
--- /dev/null
+++ b/catapult/devil/devil/android/device_blacklist.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+import threading
+import time
+
+
+class Blacklist(object):
+
+  def __init__(self, path):
+    self._blacklist_lock = threading.RLock()
+    self._path = path
+
+  def Read(self):
+    """Reads the blacklist from the blacklist file.
+
+    Returns:
+      A dict containing bad devices.
+    """
+    with self._blacklist_lock:
+      if not os.path.exists(self._path):
+        return dict()
+
+      with open(self._path, 'r') as f:
+        blacklist = json.load(f)
+      if not isinstance(blacklist, dict):
+        logging.warning('Ignoring %s: %s (a dict was expected instead)',
+                        self._path, blacklist)
+        blacklist = dict()
+      return blacklist
+
+  def Write(self, blacklist):
+    """Writes the provided blacklist to the blacklist file.
+
+    Args:
+      blacklist: list of bad devices to write to the blacklist file.
+    """
+    with self._blacklist_lock:
+      with open(self._path, 'w') as f:
+        json.dump(blacklist, f)
+
+  def Extend(self, devices, reason='unknown'):
+    """Adds devices to blacklist file.
+
+    Args:
+      devices: list of bad devices to be added to the blacklist file.
+      reason: string specifying the reason for blacklist (eg: 'unauthorized')
+    """
+    timestamp = time.time()
+    event_info = {
+        'timestamp': timestamp,
+        'reason': reason,
+    }
+    device_dicts = {device: event_info for device in devices}
+    logging.info('Adding %s to blacklist %s for reason: %s',
+                 ','.join(devices), self._path, reason)
+    with self._blacklist_lock:
+      blacklist = self.Read()
+      blacklist.update(device_dicts)
+      self.Write(blacklist)
+
+  def Reset(self):
+    """Erases the blacklist file if it exists."""
+    logging.info('Resetting blacklist %s', self._path)
+    with self._blacklist_lock:
+      if os.path.exists(self._path):
+        os.remove(self._path)
diff --git a/catapult/devil/devil/android/device_errors.py b/catapult/devil/devil/android/device_errors.py
new file mode 100644
index 0000000..b1b8890
--- /dev/null
+++ b/catapult/devil/devil/android/device_errors.py
@@ -0,0 +1,124 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Exception classes raised by AdbWrapper and DeviceUtils.
+"""
+
+from devil import base_error
+from devil.utils import cmd_helper
+
+
+class CommandFailedError(base_error.BaseError):
+  """Exception for command failures."""
+
+  def __init__(self, message, device_serial=None):
+    if device_serial is not None:
+      message = '(device: %s) %s' % (device_serial, message)
+    self.device_serial = device_serial
+    super(CommandFailedError, self).__init__(message)
+
+
+class _BaseCommandFailedError(CommandFailedError):
+  """Base Exception for adb and fastboot command failures."""
+
+  def __init__(self, args, output, status=None, device_serial=None,
+               message=None):
+    self.args = args
+    self.output = output
+    self.status = status
+    if not message:
+      adb_cmd = ' '.join(cmd_helper.SingleQuote(arg) for arg in self.args)
+      message = ['adb %s: failed ' % adb_cmd]
+      if status:
+        message.append('with exit status %s ' % self.status)
+      if output:
+        message.append('and output:\n')
+        message.extend('- %s\n' % line for line in output.splitlines())
+      else:
+        message.append('and no output.')
+      message = ''.join(message)
+    super(_BaseCommandFailedError, self).__init__(message, device_serial)
+
+
+class AdbCommandFailedError(_BaseCommandFailedError):
+  """Exception for adb command failures."""
+
+  def __init__(self, args, output, status=None, device_serial=None,
+               message=None):
+    super(AdbCommandFailedError, self).__init__(
+        args, output, status=status, message=message,
+        device_serial=device_serial)
+
+
+class FastbootCommandFailedError(_BaseCommandFailedError):
+  """Exception for fastboot command failures."""
+
+  def __init__(self, args, output, status=None, device_serial=None,
+               message=None):
+    super(FastbootCommandFailedError, self).__init__(
+        args, output, status=status, message=message,
+        device_serial=device_serial)
+
+
+class DeviceVersionError(CommandFailedError):
+  """Exception for device version failures."""
+
+  def __init__(self, message, device_serial=None):
+    super(DeviceVersionError, self).__init__(message, device_serial)
+
+
+class AdbShellCommandFailedError(AdbCommandFailedError):
+  """Exception for shell command failures run via adb."""
+
+  def __init__(self, command, output, status, device_serial=None):
+    self.command = command
+    message = ['shell command run via adb failed on the device:\n',
+               '  command: %s\n' % command]
+    message.append('  exit status: %s\n' % status)
+    if output:
+      message.append('  output:\n')
+      if isinstance(output, basestring):
+        output_lines = output.splitlines()
+      else:
+        output_lines = output
+      message.extend('  - %s\n' % line for line in output_lines)
+    else:
+      message.append("  output: ''\n")
+    message = ''.join(message)
+    super(AdbShellCommandFailedError, self).__init__(
+      ['shell', command], output, status, device_serial, message)
+
+
+class CommandTimeoutError(base_error.BaseError):
+  """Exception for command timeouts."""
+  pass
+
+
+class DeviceUnreachableError(base_error.BaseError):
+  """Exception for device unreachable failures."""
+  pass
+
+
+class NoDevicesError(base_error.BaseError):
+  """Exception for having no devices attached."""
+
+  def __init__(self):
+    super(NoDevicesError, self).__init__(
+        'No devices attached.', is_infra_error=True)
+
+
+class NoAdbError(base_error.BaseError):
+  """Exception for being unable to find ADB."""
+
+  def __init__(self, msg=None):
+    super(NoAdbError, self).__init__(
+        msg or 'Unable to find adb.', is_infra_error=True)
+
+
+class DeviceChargingError(CommandFailedError):
+  """Exception for device charging errors."""
+
+  def __init__(self, message, device_serial=None):
+    super(DeviceChargingError, self).__init__(message, device_serial)
diff --git a/catapult/devil/devil/android/device_list.py b/catapult/devil/devil/android/device_list.py
new file mode 100644
index 0000000..0eb6acb
--- /dev/null
+++ b/catapult/devil/devil/android/device_list.py
@@ -0,0 +1,30 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A module to keep track of devices across builds."""
+
+import os
+
+LAST_DEVICES_FILENAME = '.last_devices'
+LAST_MISSING_DEVICES_FILENAME = '.last_missing'
+
+
+def GetPersistentDeviceList(file_name):
+  """Returns a list of devices.
+
+  Args:
+    file_name: the file name containing a list of devices.
+
+  Returns: List of device serial numbers that were on the bot.
+  """
+  with open(file_name) as f:
+    return f.read().splitlines()
+
+
+def WritePersistentDeviceList(file_name, device_list):
+  path = os.path.dirname(file_name)
+  if not os.path.exists(path):
+    os.makedirs(path)
+  with open(file_name, 'w') as f:
+    f.write('\n'.join(set(device_list)))
diff --git a/catapult/devil/devil/android/device_signal.py b/catapult/devil/devil/android/device_signal.py
new file mode 100644
index 0000000..2cec46d
--- /dev/null
+++ b/catapult/devil/devil/android/device_signal.py
@@ -0,0 +1,41 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines constants for signals that should be supported on devices.
+
+Note: Obtained by running `kill -l` on a user device.
+"""
+
+
+SIGHUP = 1  # Hangup
+SIGINT = 2  # Interrupt
+SIGQUIT = 3  # Quit
+SIGILL = 4  # Illegal instruction
+SIGTRAP = 5  # Trap
+SIGABRT = 6  # Aborted
+SIGBUS = 7  # Bus error
+SIGFPE = 8  # Floating point exception
+SIGKILL = 9  # Killed
+SIGUSR1 = 10  # User signal 1
+SIGSEGV = 11  # Segmentation fault
+SIGUSR2 = 12  # User signal 2
+SIGPIPE = 13  # Broken pipe
+SIGALRM = 14  # Alarm clock
+SIGTERM = 15  # Terminated
+SIGSTKFLT = 16  # Stack fault
+SIGCHLD = 17  # Child exited
+SIGCONT = 18  # Continue
+SIGSTOP = 19  # Stopped (signal)
+SIGTSTP = 20  # Stopped
+SIGTTIN = 21  # Stopped (tty input)
+SIGTTOU = 22  # Stopped (tty output)
+SIGURG = 23  # Urgent I/O condition
+SIGXCPU = 24  # CPU time limit exceeded
+SIGXFSZ = 25  # File size limit exceeded
+SIGVTALRM = 26  # Virtual timer expired
+SIGPROF = 27  # Profiling timer expired
+SIGWINCH = 28  # Window size changed
+SIGIO = 29  # I/O possible
+SIGPWR = 30  # Power failure
+SIGSYS = 31  # Bad system call
diff --git a/catapult/devil/devil/android/device_temp_file.py b/catapult/devil/devil/android/device_temp_file.py
new file mode 100644
index 0000000..75488c5
--- /dev/null
+++ b/catapult/devil/devil/android/device_temp_file.py
@@ -0,0 +1,56 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A temp file that automatically gets pushed and deleted from a device."""
+
+# pylint: disable=W0622
+
+import posixpath
+import random
+import threading
+
+from devil.android import device_errors
+from devil.utils import cmd_helper
+
+
+class DeviceTempFile(object):
+
+  def __init__(self, adb, suffix='', prefix='temp_file', dir='/data/local/tmp'):
+    """Find an unused temporary file path on the device.
+
+    When this object is closed, the file will be deleted on the device.
+
+    Args:
+      adb: An instance of AdbWrapper
+      suffix: The suffix of the name of the temp file.
+      prefix: The prefix of the name of the temp file.
+      dir: The directory on the device where to place the temp file.
+    """
+    self._adb = adb
+    # Python's random module use 52-bit numbers according to its docs.
+    random_hex = hex(random.randint(0, 2 ** 52))[2:]
+    self.name = posixpath.join(dir, '%s-%s%s' % (prefix, random_hex, suffix))
+    self.name_quoted = cmd_helper.SingleQuote(self.name)
+
+  def close(self):
+    """Deletes the temporary file from the device."""
+    # ignore exception if the file is already gone.
+    def delete_temporary_file():
+      try:
+        self._adb.Shell('rm -f %s' % self.name_quoted, expect_status=None)
+      except device_errors.AdbCommandFailedError:
+        # file does not exist on Android version without 'rm -f' support (ICS)
+        pass
+
+    # It shouldn't matter when the temp file gets deleted, so do so
+    # asynchronously.
+    threading.Thread(
+        target=delete_temporary_file,
+        name='delete_temporary_file(%s)' % self._adb.GetDeviceSerial()).start()
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, type, value, traceback):
+    self.close()
diff --git a/catapult/devil/devil/android/device_utils.py b/catapult/devil/devil/android/device_utils.py
new file mode 100644
index 0000000..5cea40b
--- /dev/null
+++ b/catapult/devil/devil/android/device_utils.py
@@ -0,0 +1,2180 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides a variety of device interactions based on adb.
+
+Eventually, this will be based on adb_wrapper.
+"""
+# pylint: disable=unused-argument
+
+import collections
+import itertools
+import json
+import logging
+import multiprocessing
+import os
+import posixpath
+import re
+import shutil
+import tempfile
+import time
+import zipfile
+
+from devil import base_error
+from devil import devil_env
+from devil.utils import cmd_helper
+from devil.android import apk_helper
+from devil.android import device_signal
+from devil.android import decorators
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import install_commands
+from devil.android import logcat_monitor
+from devil.android import md5sum
+from devil.android.sdk import adb_wrapper
+from devil.android.sdk import gce_adb_wrapper
+from devil.android.sdk import intent
+from devil.android.sdk import keyevent
+from devil.android.sdk import split_select
+from devil.android.sdk import version_codes
+from devil.utils import host_utils
+from devil.utils import parallelizer
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+from devil.utils import zip_utils
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+
+# A sentinel object for default values
+# TODO(jbudorick,perezju): revisit how default values are handled by
+# the timeout_retry decorators.
+DEFAULT = object()
+
+_RESTART_ADBD_SCRIPT = """
+  trap '' HUP
+  trap '' TERM
+  trap '' PIPE
+  function restart() {
+    stop adbd
+    start adbd
+  }
+  restart &
+"""
+
+# Not all permissions can be set.
+_PERMISSIONS_BLACKLIST = [
+    'android.permission.ACCESS_MOCK_LOCATION',
+    'android.permission.ACCESS_NETWORK_STATE',
+    'android.permission.ACCESS_WIFI_STATE',
+    'android.permission.AUTHENTICATE_ACCOUNTS',
+    'android.permission.BLUETOOTH',
+    'android.permission.BLUETOOTH_ADMIN',
+    'android.permission.DOWNLOAD_WITHOUT_NOTIFICATION',
+    'android.permission.INTERNET',
+    'android.permission.MANAGE_ACCOUNTS',
+    'android.permission.MODIFY_AUDIO_SETTINGS',
+    'android.permission.NFC',
+    'android.permission.READ_SYNC_SETTINGS',
+    'android.permission.READ_SYNC_STATS',
+    'android.permission.RECEIVE_BOOT_COMPLETED',
+    'android.permission.RECORD_VIDEO',
+    'android.permission.RUN_INSTRUMENTATION',
+    'android.permission.USE_CREDENTIALS',
+    'android.permission.VIBRATE',
+    'android.permission.WAKE_LOCK',
+    'android.permission.WRITE_SYNC_SETTINGS',
+    'com.android.browser.permission.READ_HISTORY_BOOKMARKS',
+    'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS',
+    'com.android.launcher.permission.INSTALL_SHORTCUT',
+    'com.chrome.permission.DEVICE_EXTRAS',
+    'com.google.android.apps.chrome.permission.C2D_MESSAGE',
+    'com.google.android.apps.chrome.permission.READ_WRITE_BOOKMARK_FOLDERS',
+    'com.google.android.apps.chrome.TOS_ACKED',
+    'com.google.android.c2dm.permission.RECEIVE',
+    'com.google.android.providers.gsf.permission.READ_GSERVICES',
+    'com.sec.enterprise.knox.MDM_CONTENT_PROVIDER',
+    'org.chromium.chrome.permission.C2D_MESSAGE',
+    'org.chromium.chrome.permission.READ_WRITE_BOOKMARK_FOLDERS',
+    'org.chromium.chrome.TOS_ACKED',
+]
+
+_CURRENT_FOCUS_CRASH_RE = re.compile(
+    r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
+
+_GETPROP_RE = re.compile(r'\[(.*?)\]: \[(.*?)\]')
+_IPV4_ADDRESS_RE = re.compile(r'([0-9]{1,3}\.){3}[0-9]{1,3}\:[0-9]{4,5}')
+
+
+@decorators.WithExplicitTimeoutAndRetries(
+    _DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
+def GetAVDs():
+  """Returns a list of Android Virtual Devices.
+
+  Returns:
+    A list containing the configured AVDs.
+  """
+  lines = cmd_helper.GetCmdOutput([
+      os.path.join(devil_env.config.LocalPath('android_sdk'),
+                   'tools', 'android'),
+      'list', 'avd']).splitlines()
+  avds = []
+  for line in lines:
+    if 'Name:' not in line:
+      continue
+    key, value = (s.strip() for s in line.split(':', 1))
+    if key == 'Name':
+      avds.append(value)
+  return avds
+
+
+@decorators.WithExplicitTimeoutAndRetries(
+    _DEFAULT_TIMEOUT, _DEFAULT_RETRIES)
+def RestartServer():
+  """Restarts the adb server.
+
+  Raises:
+    CommandFailedError if we fail to kill or restart the server.
+  """
+  def adb_killed():
+    return not adb_wrapper.AdbWrapper.IsServerOnline()
+
+  def adb_started():
+    return adb_wrapper.AdbWrapper.IsServerOnline()
+
+  adb_wrapper.AdbWrapper.KillServer()
+  if not timeout_retry.WaitFor(adb_killed, wait_period=1, max_tries=5):
+    # TODO(perezju): raise an exception after fixng http://crbug.com/442319
+    logging.warning('Failed to kill adb server')
+  adb_wrapper.AdbWrapper.StartServer()
+  if not timeout_retry.WaitFor(adb_started, wait_period=1, max_tries=5):
+    raise device_errors.CommandFailedError('Failed to start adb server')
+
+
+def _GetTimeStamp():
+  """Return a basic ISO 8601 time stamp with the current local time."""
+  return time.strftime('%Y%m%dT%H%M%S', time.localtime())
+
+
+def _JoinLines(lines):
+  # makes sure that the last line is also terminated, and is more memory
+  # efficient than first appending an end-line to each line and then joining
+  # all of them together.
+  return ''.join(s for line in lines for s in (line, '\n'))
+
+
+def _IsGceInstance(serial):
+  return _IPV4_ADDRESS_RE.match(serial)
+
+
+def _CreateAdbWrapper(device):
+  if _IsGceInstance(str(device)):
+    return gce_adb_wrapper.GceAdbWrapper(str(device))
+  else:
+    if isinstance(device, adb_wrapper.AdbWrapper):
+      return device
+    else:
+      return adb_wrapper.AdbWrapper(device)
+
+
+class DeviceUtils(object):
+
+  _MAX_ADB_COMMAND_LENGTH = 512
+  _MAX_ADB_OUTPUT_LENGTH = 32768
+  _LAUNCHER_FOCUSED_RE = re.compile(
+      r'\s*mCurrentFocus.*(Launcher|launcher).*')
+  _VALID_SHELL_VARIABLE = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
+
+  LOCAL_PROPERTIES_PATH = posixpath.join('/', 'data', 'local.prop')
+
+  # Property in /data/local.prop that controls Java assertions.
+  JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
+
+  def __init__(self, device, enable_device_files_cache=False,
+               default_timeout=_DEFAULT_TIMEOUT,
+               default_retries=_DEFAULT_RETRIES):
+    """DeviceUtils constructor.
+
+    Args:
+      device: Either a device serial, an existing AdbWrapper instance, or an
+        an existing AndroidCommands instance.
+      enable_device_files_cache: For PushChangedFiles(), cache checksums of
+        pushed files rather than recomputing them on a subsequent call.
+      default_timeout: An integer containing the default number of seconds to
+        wait for an operation to complete if no explicit value is provided.
+      default_retries: An integer containing the default number or times an
+        operation should be retried on failure if no explicit value is provided.
+    """
+    self.adb = None
+    if isinstance(device, basestring):
+      self.adb = _CreateAdbWrapper(device)
+    elif isinstance(device, adb_wrapper.AdbWrapper):
+      self.adb = device
+    else:
+      raise ValueError('Unsupported device value: %r' % device)
+    self._commands_installed = None
+    self._default_timeout = default_timeout
+    self._default_retries = default_retries
+    self._enable_device_files_cache = enable_device_files_cache
+    self._cache = {}
+    self._client_caches = {}
+    assert hasattr(self, decorators.DEFAULT_TIMEOUT_ATTR)
+    assert hasattr(self, decorators.DEFAULT_RETRIES_ATTR)
+
+    self._ClearCache()
+
+  def __eq__(self, other):
+    """Checks whether |other| refers to the same device as |self|.
+
+    Args:
+      other: The object to compare to. This can be a basestring, an instance
+        of adb_wrapper.AdbWrapper, or an instance of DeviceUtils.
+    Returns:
+      Whether |other| refers to the same device as |self|.
+    """
+    return self.adb.GetDeviceSerial() == str(other)
+
+  def __lt__(self, other):
+    """Compares two instances of DeviceUtils.
+
+    This merely compares their serial numbers.
+
+    Args:
+      other: The instance of DeviceUtils to compare to.
+    Returns:
+      Whether |self| is less than |other|.
+    """
+    return self.adb.GetDeviceSerial() < other.adb.GetDeviceSerial()
+
+  def __str__(self):
+    """Returns the device serial."""
+    return self.adb.GetDeviceSerial()
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def IsOnline(self, timeout=None, retries=None):
+    """Checks whether the device is online.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if the device is online, False otherwise.
+
+    Raises:
+      CommandTimeoutError on timeout.
+    """
+    try:
+      return self.adb.GetState() == 'device'
+    except base_error.BaseError as exc:
+      logging.info('Failed to get state: %s', exc)
+      return False
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def HasRoot(self, timeout=None, retries=None):
+    """Checks whether or not adbd has root privileges.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if adbd has root privileges, False otherwise.
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    try:
+      self.RunShellCommand('ls /root', check_return=True)
+      return True
+    except device_errors.AdbCommandFailedError:
+      return False
+
+  def NeedsSU(self, timeout=DEFAULT, retries=DEFAULT):
+    """Checks whether 'su' is needed to access protected resources.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if 'su' is available on the device and is needed to to access
+        protected resources; False otherwise if either 'su' is not available
+        (e.g. because the device has a user build), or not needed (because adbd
+        already has root privileges).
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    if 'needs_su' not in self._cache:
+      try:
+        self.RunShellCommand(
+            '%s && ! ls /root' % self._Su('ls /root'), check_return=True,
+            timeout=self._default_timeout if timeout is DEFAULT else timeout,
+            retries=self._default_retries if retries is DEFAULT else retries)
+        self._cache['needs_su'] = True
+      except device_errors.AdbCommandFailedError:
+        self._cache['needs_su'] = False
+    return self._cache['needs_su']
+
+  def _Su(self, command):
+    if self.build_version_sdk >= version_codes.MARSHMALLOW:
+      return 'su 0 %s' % command
+    return 'su -c %s' % command
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def EnableRoot(self, timeout=None, retries=None):
+    """Restarts adbd with root privileges.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if root could not be enabled.
+      CommandTimeoutError on timeout.
+    """
+    if self.IsUserBuild():
+      raise device_errors.CommandFailedError(
+          'Cannot enable root in user builds.', str(self))
+    if 'needs_su' in self._cache:
+      del self._cache['needs_su']
+    self.adb.Root()
+    self.WaitUntilFullyBooted()
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def IsUserBuild(self, timeout=None, retries=None):
+    """Checks whether or not the device is running a user build.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if the device is running a user build, False otherwise (i.e. if
+        it's running a userdebug build).
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    return self.build_type == 'user'
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetExternalStoragePath(self, timeout=None, retries=None):
+    """Get the device's path to its SD card.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The device's path to its SD card.
+
+    Raises:
+      CommandFailedError if the external storage path could not be determined.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    if 'external_storage' in self._cache:
+      return self._cache['external_storage']
+
+    value = self.RunShellCommand('echo $EXTERNAL_STORAGE',
+                                 single_line=True,
+                                 check_return=True)
+    if not value:
+      raise device_errors.CommandFailedError('$EXTERNAL_STORAGE is not set',
+                                             str(self))
+    self._cache['external_storage'] = value
+    return value
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetApplicationPaths(self, package, timeout=None, retries=None):
+    """Get the paths of the installed apks on the device for the given package.
+
+    Args:
+      package: Name of the package.
+
+    Returns:
+      List of paths to the apks on the device for the given package.
+    """
+    return self._GetApplicationPathsInternal(package)
+
+  def _GetApplicationPathsInternal(self, package, skip_cache=False):
+    cached_result = self._cache['package_apk_paths'].get(package)
+    if cached_result is not None and not skip_cache:
+      if package in self._cache['package_apk_paths_to_verify']:
+        self._cache['package_apk_paths_to_verify'].remove(package)
+        # Don't verify an app that is not thought to be installed. We are
+        # concerned only with apps we think are installed having been
+        # uninstalled manually.
+        if cached_result and not self.PathExists(cached_result):
+          cached_result = None
+          self._cache['package_apk_checksums'].pop(package, 0)
+      if cached_result is not None:
+        return list(cached_result)
+    # 'pm path' is liable to incorrectly exit with a nonzero number starting
+    # in Lollipop.
+    # TODO(jbudorick): Check if this is fixed as new Android versions are
+    # released to put an upper bound on this.
+    should_check_return = (self.build_version_sdk < version_codes.LOLLIPOP)
+    output = self.RunShellCommand(
+        ['pm', 'path', package], check_return=should_check_return)
+    apks = []
+    for line in output:
+      if not line.startswith('package:'):
+        raise device_errors.CommandFailedError(
+            'pm path returned: %r' % '\n'.join(output), str(self))
+      apks.append(line[len('package:'):])
+    self._cache['package_apk_paths'][package] = list(apks)
+    return apks
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetApplicationVersion(self, package, timeout=None, retries=None):
+    """Get the version name of a package installed on the device.
+
+    Args:
+      package: Name of the package.
+
+    Returns:
+      A string with the version name or None if the package is not found
+      on the device.
+    """
+    output = self.RunShellCommand(
+        ['dumpsys', 'package', package], check_return=True)
+    if not output:
+      return None
+    for line in output:
+      line = line.strip()
+      if line.startswith('versionName='):
+        return line[len('versionName='):]
+    raise device_errors.CommandFailedError(
+        'Version name for %s not found on dumpsys output' % package, str(self))
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetApplicationDataDirectory(self, package, timeout=None, retries=None):
+    """Get the data directory on the device for the given package.
+
+    Args:
+      package: Name of the package.
+
+    Returns:
+      The package's data directory, or None if the package doesn't exist on the
+      device.
+    """
+    try:
+      output = self._RunPipedShellCommand(
+          'pm dump %s | grep dataDir=' % cmd_helper.SingleQuote(package))
+      for line in output:
+        _, _, dataDir = line.partition('dataDir=')
+        if dataDir:
+          return dataDir
+    except device_errors.CommandFailedError:
+      logging.exception('Could not find data directory for %s', package)
+    return None
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def WaitUntilFullyBooted(self, wifi=False, timeout=None, retries=None):
+    """Wait for the device to fully boot.
+
+    This means waiting for the device to boot, the package manager to be
+    available, and the SD card to be ready. It can optionally mean waiting
+    for wifi to come up, too.
+
+    Args:
+      wifi: A boolean indicating if we should wait for wifi to come up or not.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError on failure.
+      CommandTimeoutError if one of the component waits times out.
+      DeviceUnreachableError if the device becomes unresponsive.
+    """
+    def sd_card_ready():
+      try:
+        self.RunShellCommand(['test', '-d', self.GetExternalStoragePath()],
+                             check_return=True)
+        return True
+      except device_errors.AdbCommandFailedError:
+        return False
+
+    def pm_ready():
+      try:
+        return self._GetApplicationPathsInternal('android', skip_cache=True)
+      except device_errors.CommandFailedError:
+        return False
+
+    def boot_completed():
+      return self.GetProp('sys.boot_completed', cache=False) == '1'
+
+    def wifi_enabled():
+      return 'Wi-Fi is enabled' in self.RunShellCommand(['dumpsys', 'wifi'],
+                                                        check_return=False)
+
+    self.adb.WaitForDevice()
+    timeout_retry.WaitFor(sd_card_ready)
+    timeout_retry.WaitFor(pm_ready)
+    timeout_retry.WaitFor(boot_completed)
+    if wifi:
+      timeout_retry.WaitFor(wifi_enabled)
+
+  REBOOT_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=REBOOT_DEFAULT_TIMEOUT)
+  def Reboot(self, block=True, wifi=False, timeout=None, retries=None):
+    """Reboot the device.
+
+    Args:
+      block: A boolean indicating if we should wait for the reboot to complete.
+      wifi: A boolean indicating if we should wait for wifi to be enabled after
+        the reboot. The option has no effect unless |block| is also True.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    def device_offline():
+      return not self.IsOnline()
+
+    self.adb.Reboot()
+    self._ClearCache()
+    timeout_retry.WaitFor(device_offline, wait_period=1)
+    if block:
+      self.WaitUntilFullyBooted(wifi=wifi)
+
+  INSTALL_DEFAULT_TIMEOUT = 4 * _DEFAULT_TIMEOUT
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=INSTALL_DEFAULT_TIMEOUT)
+  def Install(self, apk, allow_downgrade=False, reinstall=False,
+              permissions=None, timeout=None, retries=None):
+    """Install an APK.
+
+    Noop if an identical APK is already installed.
+
+    Args:
+      apk: An ApkHelper instance or string containing the path to the APK.
+      allow_downgrade: A boolean indicating if we should allow downgrades.
+      reinstall: A boolean indicating if we should keep any existing app data.
+      permissions: Set of permissions to set. If not set, finds permissions with
+          apk helper. To set no permissions, pass [].
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if the installation fails.
+      CommandTimeoutError if the installation times out.
+      DeviceUnreachableError on missing device.
+    """
+    self._InstallInternal(apk, None, allow_downgrade=allow_downgrade,
+                          reinstall=reinstall, permissions=permissions)
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=INSTALL_DEFAULT_TIMEOUT)
+  def InstallSplitApk(self, base_apk, split_apks, allow_downgrade=False,
+                      reinstall=False, allow_cached_props=False,
+                      permissions=None, timeout=None, retries=None):
+    """Install a split APK.
+
+    Noop if all of the APK splits are already installed.
+
+    Args:
+      base_apk: An ApkHelper instance or string containing the path to the base
+          APK.
+      split_apks: A list of strings of paths of all of the APK splits.
+      allow_downgrade: A boolean indicating if we should allow downgrades.
+      reinstall: A boolean indicating if we should keep any existing app data.
+      allow_cached_props: Whether to use cached values for device properties.
+      permissions: Set of permissions to set. If not set, finds permissions with
+          apk helper. To set no permissions, pass [].
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if the installation fails.
+      CommandTimeoutError if the installation times out.
+      DeviceUnreachableError on missing device.
+      DeviceVersionError if device SDK is less than Android L.
+    """
+    self._InstallInternal(base_apk, split_apks, reinstall=reinstall,
+                          allow_cached_props=allow_cached_props,
+                          permissions=permissions,
+                          allow_downgrade=allow_downgrade)
+
+  def _InstallInternal(self, base_apk, split_apks, allow_downgrade=False,
+                       reinstall=False, allow_cached_props=False,
+                       permissions=None):
+    if split_apks:
+      self._CheckSdkLevel(version_codes.LOLLIPOP)
+
+    base_apk = apk_helper.ToHelper(base_apk)
+
+    all_apks = [base_apk.path]
+    if split_apks:
+      all_apks += split_select.SelectSplits(
+        self, base_apk.path, split_apks, allow_cached_props=allow_cached_props)
+      if len(all_apks) == 1:
+        logging.warning('split-select did not select any from %s', split_apks)
+
+    package_name = base_apk.GetPackageName()
+    device_apk_paths = self._GetApplicationPathsInternal(package_name)
+
+    apks_to_install = None
+    host_checksums = None
+    if not device_apk_paths:
+      apks_to_install = all_apks
+    elif len(device_apk_paths) > 1 and not split_apks:
+      logging.warning(
+          'Installing non-split APK when split APK was previously installed')
+      apks_to_install = all_apks
+    elif len(device_apk_paths) == 1 and split_apks:
+      logging.warning(
+          'Installing split APK when non-split APK was previously installed')
+      apks_to_install = all_apks
+    else:
+      try:
+        apks_to_install, host_checksums = (
+            self._ComputeStaleApks(package_name, all_apks))
+      except EnvironmentError as e:
+        logging.warning('Error calculating md5: %s', e)
+        apks_to_install, host_checksums = all_apks, None
+      if apks_to_install and not reinstall:
+        self.Uninstall(package_name)
+        apks_to_install = all_apks
+
+    if apks_to_install:
+      # Assume that we won't know the resulting device state.
+      self._cache['package_apk_paths'].pop(package_name, 0)
+      self._cache['package_apk_checksums'].pop(package_name, 0)
+      if split_apks:
+        partial = package_name if len(apks_to_install) < len(all_apks) else None
+        self.adb.InstallMultiple(
+            apks_to_install, partial=partial, reinstall=reinstall,
+            allow_downgrade=allow_downgrade)
+      else:
+        self.adb.Install(
+            base_apk.path, reinstall=reinstall, allow_downgrade=allow_downgrade)
+      if (permissions is None
+          and self.build_version_sdk >= version_codes.MARSHMALLOW):
+        permissions = base_apk.GetPermissions()
+      self.GrantPermissions(package_name, permissions)
+      # Upon success, we know the device checksums, but not their paths.
+      if host_checksums is not None:
+        self._cache['package_apk_checksums'][package_name] = host_checksums
+    else:
+      # Running adb install terminates running instances of the app, so to be
+      # consistent, we explicitly terminate it when skipping the install.
+      self.ForceStop(package_name)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def Uninstall(self, package_name, keep_data=False, timeout=None,
+                retries=None):
+    """Remove the app |package_name| from the device.
+
+    This is a no-op if the app is not already installed.
+
+    Args:
+      package_name: The package to uninstall.
+      keep_data: (optional) Whether to keep the data and cache directories.
+      timeout: Timeout in seconds.
+      retries: Number of retries.
+
+    Raises:
+      CommandFailedError if the uninstallation fails.
+      CommandTimeoutError if the uninstallation times out.
+      DeviceUnreachableError on missing device.
+    """
+    installed = self._GetApplicationPathsInternal(package_name)
+    if not installed:
+      return
+    try:
+      self.adb.Uninstall(package_name, keep_data)
+      self._cache['package_apk_paths'][package_name] = []
+      self._cache['package_apk_checksums'][package_name] = set()
+    except:
+      # Clear cache since we can't be sure of the state.
+      self._cache['package_apk_paths'].pop(package_name, 0)
+      self._cache['package_apk_checksums'].pop(package_name, 0)
+      raise
+
+  def _CheckSdkLevel(self, required_sdk_level):
+    """Raises an exception if the device does not have the required SDK level.
+    """
+    if self.build_version_sdk < required_sdk_level:
+      raise device_errors.DeviceVersionError(
+          ('Requires SDK level %s, device is SDK level %s' %
+           (required_sdk_level, self.build_version_sdk)),
+           device_serial=self.adb.GetDeviceSerial())
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def RunShellCommand(self, cmd, check_return=False, cwd=None, env=None,
+                      as_root=False, single_line=False, large_output=False,
+                      timeout=None, retries=None):
+    """Run an ADB shell command.
+
+    The command to run |cmd| should be a sequence of program arguments or else
+    a single string.
+
+    When |cmd| is a sequence, it is assumed to contain the name of the command
+    to run followed by its arguments. In this case, arguments are passed to the
+    command exactly as given, without any further processing by the shell. This
+    allows to easily pass arguments containing spaces or special characters
+    without having to worry about getting quoting right. Whenever possible, it
+    is recomended to pass |cmd| as a sequence.
+
+    When |cmd| is given as a string, it will be interpreted and run by the
+    shell on the device.
+
+    This behaviour is consistent with that of command runners in cmd_helper as
+    well as Python's own subprocess.Popen.
+
+    TODO(perezju) Change the default of |check_return| to True when callers
+      have switched to the new behaviour.
+
+    Args:
+      cmd: A string with the full command to run on the device, or a sequence
+        containing the command and its arguments.
+      check_return: A boolean indicating whether or not the return code should
+        be checked.
+      cwd: The device directory in which the command should be run.
+      env: The environment variables with which the command should be run.
+      as_root: A boolean indicating whether the shell command should be run
+        with root privileges.
+      single_line: A boolean indicating if only a single line of output is
+        expected.
+      large_output: Uses a work-around for large shell command output. Without
+        this large output will be truncated.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      If single_line is False, the output of the command as a list of lines,
+      otherwise, a string with the unique line of output emmited by the command
+      (with the optional newline at the end stripped).
+
+    Raises:
+      AdbCommandFailedError if check_return is True and the exit code of
+        the command run on the device is non-zero.
+      CommandFailedError if single_line is True but the output contains two or
+        more lines.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    def env_quote(key, value):
+      if not DeviceUtils._VALID_SHELL_VARIABLE.match(key):
+        raise KeyError('Invalid shell variable name %r' % key)
+      # using double quotes here to allow interpolation of shell variables
+      return '%s=%s' % (key, cmd_helper.DoubleQuote(value))
+
+    def run(cmd):
+      return self.adb.Shell(cmd)
+
+    def handle_check_return(cmd):
+      try:
+        return run(cmd)
+      except device_errors.AdbCommandFailedError as exc:
+        if check_return:
+          raise
+        else:
+          return exc.output
+
+    def handle_large_command(cmd):
+      if len(cmd) < self._MAX_ADB_COMMAND_LENGTH:
+        return handle_check_return(cmd)
+      else:
+        with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script:
+          self._WriteFileWithPush(script.name, cmd)
+          logging.info('Large shell command will be run from file: %s ...',
+                       cmd[:self._MAX_ADB_COMMAND_LENGTH])
+          return handle_check_return('sh %s' % script.name_quoted)
+
+    def handle_large_output(cmd, large_output_mode):
+      if large_output_mode:
+        with device_temp_file.DeviceTempFile(self.adb) as large_output_file:
+          cmd = '( %s )>%s' % (cmd, large_output_file.name)
+          logging.debug('Large output mode enabled. Will write output to '
+                        'device and read results from file.')
+          handle_large_command(cmd)
+          return self.ReadFile(large_output_file.name, force_pull=True)
+      else:
+        try:
+          return handle_large_command(cmd)
+        except device_errors.AdbCommandFailedError as exc:
+          if exc.status is None:
+            logging.exception('No output found for %s', cmd)
+            logging.warning('Attempting to run in large_output mode.')
+            logging.warning('Use RunShellCommand(..., large_output=True) for '
+                            'shell commands that expect a lot of output.')
+            return handle_large_output(cmd, True)
+          else:
+            raise
+
+    if not isinstance(cmd, basestring):
+      cmd = ' '.join(cmd_helper.SingleQuote(s) for s in cmd)
+    if env:
+      env = ' '.join(env_quote(k, v) for k, v in env.iteritems())
+      cmd = '%s %s' % (env, cmd)
+    if cwd:
+      cmd = 'cd %s && %s' % (cmd_helper.SingleQuote(cwd), cmd)
+    if as_root and self.NeedsSU():
+      # "su -c sh -c" allows using shell features in |cmd|
+      cmd = self._Su('sh -c %s' % cmd_helper.SingleQuote(cmd))
+
+    output = handle_large_output(cmd, large_output).splitlines()
+
+    if single_line:
+      if not output:
+        return ''
+      elif len(output) == 1:
+        return output[0]
+      else:
+        msg = 'one line of output was expected, but got: %s'
+        raise device_errors.CommandFailedError(msg % output, str(self))
+    else:
+      return output
+
+  def _RunPipedShellCommand(self, script, **kwargs):
+    PIPESTATUS_LEADER = 'PIPESTATUS: '
+
+    script += '; echo "%s${PIPESTATUS[@]}"' % PIPESTATUS_LEADER
+    kwargs['check_return'] = True
+    output = self.RunShellCommand(script, **kwargs)
+    pipestatus_line = output[-1]
+
+    if not pipestatus_line.startswith(PIPESTATUS_LEADER):
+      logging.error('Pipe exit statuses of shell script missing.')
+      raise device_errors.AdbShellCommandFailedError(
+          script, output, status=None,
+          device_serial=self.adb.GetDeviceSerial())
+
+    output = output[:-1]
+    statuses = [
+        int(s) for s in pipestatus_line[len(PIPESTATUS_LEADER):].split()]
+    if any(statuses):
+      raise device_errors.AdbShellCommandFailedError(
+          script, output, status=statuses,
+          device_serial=self.adb.GetDeviceSerial())
+    return output
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def KillAll(self, process_name, exact=False, signum=device_signal.SIGKILL,
+              as_root=False, blocking=False, quiet=False,
+              timeout=None, retries=None):
+    """Kill all processes with the given name on the device.
+
+    Args:
+      process_name: A string containing the name of the process to kill.
+      exact: A boolean indicating whether to kill all processes matching
+             the string |process_name| exactly, or all of those which contain
+             |process_name| as a substring. Defaults to False.
+      signum: An integer containing the signal number to send to kill. Defaults
+              to SIGKILL (9).
+      as_root: A boolean indicating whether the kill should be executed with
+               root privileges.
+      blocking: A boolean indicating whether we should wait until all processes
+                with the given |process_name| are dead.
+      quiet: A boolean indicating whether to ignore the fact that no processes
+             to kill were found.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The number of processes attempted to kill.
+
+    Raises:
+      CommandFailedError if no process was killed and |quiet| is False.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    procs_pids = self.GetPids(process_name)
+    if exact:
+      procs_pids = {process_name: procs_pids.get(process_name, [])}
+    pids = set(itertools.chain(*procs_pids.values()))
+    if not pids:
+      if quiet:
+        return 0
+      else:
+        raise device_errors.CommandFailedError(
+            'No process "%s"' % process_name, str(self))
+
+    logging.info(
+        'KillAll(%r, ...) attempting to kill the following:', process_name)
+    for name, ids in procs_pids.iteritems():
+      for i in ids:
+        logging.info('  %05s %s', str(i), name)
+
+    cmd = ['kill', '-%d' % signum] + sorted(pids)
+    self.RunShellCommand(cmd, as_root=as_root, check_return=True)
+
+    def all_pids_killed():
+      procs_pids_remain = self.GetPids(process_name)
+      return not pids.intersection(itertools.chain(*procs_pids_remain.values()))
+
+    if blocking:
+      timeout_retry.WaitFor(all_pids_killed, wait_period=0.1)
+
+    return len(pids)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def StartActivity(self, intent_obj, blocking=False, trace_file_name=None,
+                    force_stop=False, timeout=None, retries=None):
+    """Start package's activity on the device.
+
+    Args:
+      intent_obj: An Intent object to send.
+      blocking: A boolean indicating whether we should wait for the activity to
+                finish launching.
+      trace_file_name: If present, a string that both indicates that we want to
+                       profile the activity and contains the path to which the
+                       trace should be saved.
+      force_stop: A boolean indicating whether we should stop the activity
+                  before starting it.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if the activity could not be started.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    cmd = ['am', 'start']
+    if blocking:
+      cmd.append('-W')
+    if trace_file_name:
+      cmd.extend(['--start-profiler', trace_file_name])
+    if force_stop:
+      cmd.append('-S')
+    cmd.extend(intent_obj.am_args)
+    for line in self.RunShellCommand(cmd, check_return=True):
+      if line.startswith('Error:'):
+        raise device_errors.CommandFailedError(line, str(self))
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def StartInstrumentation(self, component, finish=True, raw=False,
+                           extras=None, timeout=None, retries=None):
+    if extras is None:
+      extras = {}
+
+    cmd = ['am', 'instrument']
+    if finish:
+      cmd.append('-w')
+    if raw:
+      cmd.append('-r')
+    for k, v in extras.iteritems():
+      cmd.extend(['-e', str(k), str(v)])
+    cmd.append(component)
+
+    # Store the package name in a shell variable to help the command stay under
+    # the _MAX_ADB_COMMAND_LENGTH limit.
+    package = component.split('/')[0]
+    shell_snippet = 'p=%s;%s' % (package,
+                                 cmd_helper.ShrinkToSnippet(cmd, 'p', package))
+    return self.RunShellCommand(shell_snippet, check_return=True,
+                                large_output=True)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def BroadcastIntent(self, intent_obj, timeout=None, retries=None):
+    """Send a broadcast intent.
+
+    Args:
+      intent: An Intent to broadcast.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    cmd = ['am', 'broadcast'] + intent_obj.am_args
+    self.RunShellCommand(cmd, check_return=True)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GoHome(self, timeout=None, retries=None):
+    """Return to the home screen and obtain launcher focus.
+
+    This command launches the home screen and attempts to obtain
+    launcher focus until the timeout is reached.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    def is_launcher_focused():
+      output = self.RunShellCommand(['dumpsys', 'window', 'windows'],
+                                    check_return=True, large_output=True)
+      return any(self._LAUNCHER_FOCUSED_RE.match(l) for l in output)
+
+    def dismiss_popups():
+      # There is a dialog present; attempt to get rid of it.
+      # Not all dialogs can be dismissed with back.
+      self.SendKeyEvent(keyevent.KEYCODE_ENTER)
+      self.SendKeyEvent(keyevent.KEYCODE_BACK)
+      return is_launcher_focused()
+
+    # If Home is already focused, return early to avoid unnecessary work.
+    if is_launcher_focused():
+      return
+
+    self.StartActivity(
+        intent.Intent(action='android.intent.action.MAIN',
+                      category='android.intent.category.HOME'),
+        blocking=True)
+
+    if not is_launcher_focused():
+      timeout_retry.WaitFor(dismiss_popups, wait_period=1)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def ForceStop(self, package, timeout=None, retries=None):
+    """Close the application.
+
+    Args:
+      package: A string containing the name of the package to stop.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    cmd = 'p=%s;if [[ "$(ps)" = *$p* ]]; then am force-stop $p; fi'
+    self.RunShellCommand(cmd % package, check_return=True)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def ClearApplicationState(
+      self, package, permissions=None, timeout=None, retries=None):
+    """Clear all state for the given package.
+
+    Args:
+      package: A string containing the name of the package to stop.
+      permissions: List of permissions to set after clearing data.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    # Check that the package exists before clearing it for android builds below
+    # JB MR2. Necessary because calling pm clear on a package that doesn't exist
+    # may never return.
+    if ((self.build_version_sdk >= version_codes.JELLY_BEAN_MR2)
+        or self._GetApplicationPathsInternal(package)):
+      self.RunShellCommand(['pm', 'clear', package], check_return=True)
+      self.GrantPermissions(package, permissions)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SendKeyEvent(self, keycode, timeout=None, retries=None):
+    """Sends a keycode to the device.
+
+    See the devil.android.sdk.keyevent module for suitable keycode values.
+
+    Args:
+      keycode: A integer keycode to send to the device.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    self.RunShellCommand(['input', 'keyevent', format(keycode, 'd')],
+                         check_return=True)
+
+  PUSH_CHANGED_FILES_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=PUSH_CHANGED_FILES_DEFAULT_TIMEOUT)
+  def PushChangedFiles(self, host_device_tuples, timeout=None,
+                       retries=None, delete_device_stale=False):
+    """Push files to the device, skipping files that don't need updating.
+
+    When a directory is pushed, it is traversed recursively on the host and
+    all files in it are pushed to the device as needed.
+    Additionally, if delete_device_stale option is True,
+    files that exist on the device but don't exist on the host are deleted.
+
+    Args:
+      host_device_tuples: A list of (host_path, device_path) tuples, where
+        |host_path| is an absolute path of a file or directory on the host
+        that should be minimially pushed to the device, and |device_path| is
+        an absolute path of the destination on the device.
+      timeout: timeout in seconds
+      retries: number of retries
+      delete_device_stale: option to delete stale files on device
+
+    Raises:
+      CommandFailedError on failure.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+
+    all_changed_files = []
+    all_stale_files = []
+    missing_dirs = []
+    cache_commit_funcs = []
+    for h, d in host_device_tuples:
+      assert os.path.isabs(h) and posixpath.isabs(d)
+      changed_files, up_to_date_files, stale_files, cache_commit_func = (
+          self._GetChangedAndStaleFiles(h, d, delete_device_stale))
+      all_changed_files += changed_files
+      all_stale_files += stale_files
+      cache_commit_funcs.append(cache_commit_func)
+      if (os.path.isdir(h) and changed_files and not up_to_date_files
+          and not stale_files):
+        missing_dirs.append(d)
+
+    if delete_device_stale and all_stale_files:
+      self.RunShellCommand(['rm', '-f'] + all_stale_files,
+                             check_return=True)
+
+    if all_changed_files:
+      if missing_dirs:
+        self.RunShellCommand(['mkdir', '-p'] + missing_dirs, check_return=True)
+      self._PushFilesImpl(host_device_tuples, all_changed_files)
+    for func in cache_commit_funcs:
+      func()
+
+  def _GetChangedAndStaleFiles(self, host_path, device_path, track_stale=False):
+    """Get files to push and delete
+
+    Args:
+      host_path: an absolute path of a file or directory on the host
+      device_path: an absolute path of a file or directory on the device
+      track_stale: whether to bother looking for stale files (slower)
+
+    Returns:
+      a three-element tuple
+      1st element: a list of (host_files_path, device_files_path) tuples to push
+      2nd element: a list of host_files_path that are up-to-date
+      3rd element: a list of stale files under device_path, or [] when
+        track_stale == False
+    """
+    try:
+      # Length calculations below assume no trailing /.
+      host_path = host_path.rstrip('/')
+      device_path = device_path.rstrip('/')
+
+      specific_device_paths = [device_path]
+      ignore_other_files = not track_stale and os.path.isdir(host_path)
+      if ignore_other_files:
+        specific_device_paths = []
+        for root, _, filenames in os.walk(host_path):
+          relative_dir = root[len(host_path) + 1:]
+          specific_device_paths.extend(
+              posixpath.join(device_path, relative_dir, f) for f in filenames)
+
+      def calculate_host_checksums():
+        return md5sum.CalculateHostMd5Sums([host_path])
+
+      def calculate_device_checksums():
+        if self._enable_device_files_cache:
+          cache_entry = self._cache['device_path_checksums'].get(device_path)
+          if cache_entry and cache_entry[0] == ignore_other_files:
+            return dict(cache_entry[1])
+
+        sums = md5sum.CalculateDeviceMd5Sums(specific_device_paths, self)
+
+        cache_entry = [ignore_other_files, sums]
+        self._cache['device_path_checksums'][device_path] = cache_entry
+        return dict(sums)
+
+      host_checksums, device_checksums = reraiser_thread.RunAsync((
+          calculate_host_checksums,
+          calculate_device_checksums))
+    except EnvironmentError as e:
+      logging.warning('Error calculating md5: %s', e)
+      return ([(host_path, device_path)], [], [], lambda: 0)
+
+    to_push = []
+    up_to_date = []
+    to_delete = []
+    if os.path.isfile(host_path):
+      host_checksum = host_checksums.get(host_path)
+      device_checksum = device_checksums.get(device_path)
+      if host_checksum == device_checksum:
+        up_to_date.append(host_path)
+      else:
+        to_push.append((host_path, device_path))
+    else:
+      for host_abs_path, host_checksum in host_checksums.iteritems():
+        device_abs_path = posixpath.join(
+            device_path, os.path.relpath(host_abs_path, host_path))
+        device_checksum = device_checksums.pop(device_abs_path, None)
+        if device_checksum == host_checksum:
+          up_to_date.append(host_abs_path)
+        else:
+          to_push.append((host_abs_path, device_abs_path))
+      to_delete = device_checksums.keys()
+
+    def cache_commit_func():
+      new_sums = {posixpath.join(device_path, path[len(host_path) + 1:]): val
+                  for path, val in host_checksums.iteritems()}
+      cache_entry = [ignore_other_files, new_sums]
+      self._cache['device_path_checksums'][device_path] = cache_entry
+
+    return (to_push, up_to_date, to_delete, cache_commit_func)
+
+  def _ComputeDeviceChecksumsForApks(self, package_name):
+    ret = self._cache['package_apk_checksums'].get(package_name)
+    if ret is None:
+      device_paths = self._GetApplicationPathsInternal(package_name)
+      file_to_checksums = md5sum.CalculateDeviceMd5Sums(device_paths, self)
+      ret = set(file_to_checksums.values())
+      self._cache['package_apk_checksums'][package_name] = ret
+    return ret
+
+  def _ComputeStaleApks(self, package_name, host_apk_paths):
+    def calculate_host_checksums():
+      return md5sum.CalculateHostMd5Sums(host_apk_paths)
+
+    def calculate_device_checksums():
+      return self._ComputeDeviceChecksumsForApks(package_name)
+
+    host_checksums, device_checksums = reraiser_thread.RunAsync((
+        calculate_host_checksums, calculate_device_checksums))
+    stale_apks = [k for (k, v) in host_checksums.iteritems()
+                  if v not in device_checksums]
+    return stale_apks, set(host_checksums.values())
+
+  def _PushFilesImpl(self, host_device_tuples, files):
+    if not files:
+      return
+
+    size = sum(host_utils.GetRecursiveDiskUsage(h) for h, _ in files)
+    file_count = len(files)
+    dir_size = sum(host_utils.GetRecursiveDiskUsage(h)
+                   for h, _ in host_device_tuples)
+    dir_file_count = 0
+    for h, _ in host_device_tuples:
+      if os.path.isdir(h):
+        dir_file_count += sum(len(f) for _r, _d, f in os.walk(h))
+      else:
+        dir_file_count += 1
+
+    push_duration = self._ApproximateDuration(
+        file_count, file_count, size, False)
+    dir_push_duration = self._ApproximateDuration(
+        len(host_device_tuples), dir_file_count, dir_size, False)
+    zip_duration = self._ApproximateDuration(1, 1, size, True)
+
+    if dir_push_duration < push_duration and dir_push_duration < zip_duration:
+      self._PushChangedFilesIndividually(host_device_tuples)
+    elif push_duration < zip_duration:
+      self._PushChangedFilesIndividually(files)
+    elif self._commands_installed is False:
+      # Already tried and failed to install unzip command.
+      self._PushChangedFilesIndividually(files)
+    elif not self._PushChangedFilesZipped(
+        files, [d for _, d in host_device_tuples]):
+      self._PushChangedFilesIndividually(files)
+
+  def _MaybeInstallCommands(self):
+    if self._commands_installed is None:
+      try:
+        if not install_commands.Installed(self):
+          install_commands.InstallCommands(self)
+        self._commands_installed = True
+      except device_errors.CommandFailedError as e:
+        logging.warning('unzip not available: %s', str(e))
+        self._commands_installed = False
+    return self._commands_installed
+
+  @staticmethod
+  def _ApproximateDuration(adb_calls, file_count, byte_count, is_zipping):
+    # We approximate the time to push a set of files to a device as:
+    #   t = c1 * a + c2 * f + c3 + b / c4 + b / (c5 * c6), where
+    #     t: total time (sec)
+    #     c1: adb call time delay (sec)
+    #     a: number of times adb is called (unitless)
+    #     c2: push time delay (sec)
+    #     f: number of files pushed via adb (unitless)
+    #     c3: zip time delay (sec)
+    #     c4: zip rate (bytes/sec)
+    #     b: total number of bytes (bytes)
+    #     c5: transfer rate (bytes/sec)
+    #     c6: compression ratio (unitless)
+
+    # All of these are approximations.
+    ADB_CALL_PENALTY = 0.1  # seconds
+    ADB_PUSH_PENALTY = 0.01  # seconds
+    ZIP_PENALTY = 2.0  # seconds
+    ZIP_RATE = 10000000.0  # bytes / second
+    TRANSFER_RATE = 2000000.0  # bytes / second
+    COMPRESSION_RATIO = 2.0  # unitless
+
+    adb_call_time = ADB_CALL_PENALTY * adb_calls
+    adb_push_setup_time = ADB_PUSH_PENALTY * file_count
+    if is_zipping:
+      zip_time = ZIP_PENALTY + byte_count / ZIP_RATE
+      transfer_time = byte_count / (TRANSFER_RATE * COMPRESSION_RATIO)
+    else:
+      zip_time = 0
+      transfer_time = byte_count / TRANSFER_RATE
+    return adb_call_time + adb_push_setup_time + zip_time + transfer_time
+
+  def _PushChangedFilesIndividually(self, files):
+    for h, d in files:
+      self.adb.Push(h, d)
+
+  def _PushChangedFilesZipped(self, files, dirs):
+    with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file:
+      zip_proc = multiprocessing.Process(
+          target=DeviceUtils._CreateDeviceZip,
+          args=(zip_file.name, files))
+      zip_proc.start()
+      try:
+        # While it's zipping, ensure the unzip command exists on the device.
+        if not self._MaybeInstallCommands():
+          zip_proc.terminate()
+          return False
+
+        # Warm up NeedsSU cache while we're still zipping.
+        self.NeedsSU()
+        with device_temp_file.DeviceTempFile(
+            self.adb, suffix='.zip') as device_temp:
+          zip_proc.join()
+          self.adb.Push(zip_file.name, device_temp.name)
+          quoted_dirs = ' '.join(cmd_helper.SingleQuote(d) for d in dirs)
+          self.RunShellCommand(
+              'unzip %s&&chmod -R 777 %s' % (device_temp.name, quoted_dirs),
+              as_root=True,
+              env={'PATH': '%s:$PATH' % install_commands.BIN_DIR},
+              check_return=True)
+      finally:
+        if zip_proc.is_alive():
+          zip_proc.terminate()
+    return True
+
+  @staticmethod
+  def _CreateDeviceZip(zip_path, host_device_tuples):
+    with zipfile.ZipFile(zip_path, 'w') as zip_file:
+      for host_path, device_path in host_device_tuples:
+        zip_utils.WriteToZipFile(zip_file, host_path, device_path)
+
+  # TODO(nednguyen): remove this and migrate the callsite to PathExists().
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def FileExists(self, device_path, timeout=None, retries=None):
+    """Checks whether the given file exists on the device.
+
+    Arguments are the same as PathExists.
+    """
+    return self.PathExists(device_path, timeout=timeout, retries=retries)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def PathExists(self, device_paths, as_root=False, timeout=None, retries=None):
+    """Checks whether the given path(s) exists on the device.
+
+    Args:
+      device_path: A string containing the absolute path to the file on the
+                   device, or an iterable of paths to check.
+      as_root: Whether root permissions should be use to check for the existence
+               of the given path(s).
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if the all given paths exist on the device, False otherwise.
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    paths = device_paths
+    if isinstance(paths, basestring):
+      paths = (paths,)
+    condition = ' -a '.join('-e %s' % cmd_helper.SingleQuote(p) for p in paths)
+    cmd = 'test %s' % condition
+    try:
+      self.RunShellCommand(cmd, as_root=as_root, check_return=True,
+                           timeout=timeout, retries=retries)
+      return True
+    except device_errors.CommandFailedError:
+      return False
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def PullFile(self, device_path, host_path, timeout=None, retries=None):
+    """Pull a file from the device.
+
+    Args:
+      device_path: A string containing the absolute path of the file to pull
+                   from the device.
+      host_path: A string containing the absolute path of the destination on
+                 the host.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError on failure.
+      CommandTimeoutError on timeout.
+    """
+    # Create the base dir if it doesn't exist already
+    dirname = os.path.dirname(host_path)
+    if dirname and not os.path.exists(dirname):
+      os.makedirs(dirname)
+    self.adb.Pull(device_path, host_path)
+
+  def _ReadFileWithPull(self, device_path):
+    try:
+      d = tempfile.mkdtemp()
+      host_temp_path = os.path.join(d, 'tmp_ReadFileWithPull')
+      self.adb.Pull(device_path, host_temp_path)
+      with open(host_temp_path, 'r') as host_temp:
+        return host_temp.read()
+    finally:
+      if os.path.exists(d):
+        shutil.rmtree(d)
+
+  _LS_RE = re.compile(
+      r'(?P<perms>\S+) (?:(?P<inodes>\d+) +)?(?P<owner>\S+) +(?P<group>\S+) +'
+      r'(?:(?P<size>\d+) +)?(?P<date>\S+) +(?P<time>\S+) +(?P<name>.+)$')
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def ReadFile(self, device_path, as_root=False, force_pull=False,
+               timeout=None, retries=None):
+    """Reads the contents of a file from the device.
+
+    Args:
+      device_path: A string containing the absolute path of the file to read
+                   from the device.
+      as_root: A boolean indicating whether the read should be executed with
+               root privileges.
+      force_pull: A boolean indicating whether to force the operation to be
+          performed by pulling a file from the device. The default is, when the
+          contents are short, to retrieve the contents using cat instead.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The contents of |device_path| as a string. Contents are intepreted using
+      universal newlines, so the caller will see them encoded as '\n'. Also,
+      all lines will be terminated.
+
+    Raises:
+      AdbCommandFailedError if the file can't be read.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    def get_size(path):
+      # TODO(jbudorick): Implement a generic version of Stat() that handles
+      # as_root=True, then switch this implementation to use that.
+      ls_out = self.RunShellCommand(['ls', '-l', device_path], as_root=as_root,
+                                    check_return=True)
+      file_name = posixpath.basename(device_path)
+      for line in ls_out:
+        m = self._LS_RE.match(line)
+        if m and file_name == posixpath.basename(m.group('name')):
+          return int(m.group('size'))
+      logging.warning('Could not determine size of %s.', device_path)
+      return None
+
+    if (not force_pull
+        and 0 < get_size(device_path) <= self._MAX_ADB_OUTPUT_LENGTH):
+      return _JoinLines(self.RunShellCommand(
+          ['cat', device_path], as_root=as_root, check_return=True))
+    elif as_root and self.NeedsSU():
+      with device_temp_file.DeviceTempFile(self.adb) as device_temp:
+        cmd = 'SRC=%s DEST=%s;cp "$SRC" "$DEST" && chmod 666 "$DEST"' % (
+            cmd_helper.SingleQuote(device_path),
+            cmd_helper.SingleQuote(device_temp.name))
+        self.RunShellCommand(cmd, as_root=True, check_return=True)
+        return self._ReadFileWithPull(device_temp.name)
+    else:
+      return self._ReadFileWithPull(device_path)
+
+  def _WriteFileWithPush(self, device_path, contents):
+    with tempfile.NamedTemporaryFile() as host_temp:
+      host_temp.write(contents)
+      host_temp.flush()
+      self.adb.Push(host_temp.name, device_path)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def WriteFile(self, device_path, contents, as_root=False, force_push=False,
+                timeout=None, retries=None):
+    """Writes |contents| to a file on the device.
+
+    Args:
+      device_path: A string containing the absolute path to the file to write
+          on the device.
+      contents: A string containing the data to write to the device.
+      as_root: A boolean indicating whether the write should be executed with
+          root privileges (if available).
+      force_push: A boolean indicating whether to force the operation to be
+          performed by pushing a file to the device. The default is, when the
+          contents are short, to pass the contents using a shell script instead.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if the file could not be written on the device.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    if not force_push and len(contents) < self._MAX_ADB_COMMAND_LENGTH:
+      # If the contents are small, for efficieny we write the contents with
+      # a shell command rather than pushing a file.
+      cmd = 'echo -n %s > %s' % (cmd_helper.SingleQuote(contents),
+                                 cmd_helper.SingleQuote(device_path))
+      self.RunShellCommand(cmd, as_root=as_root, check_return=True)
+    elif as_root and self.NeedsSU():
+      # Adb does not allow to "push with su", so we first push to a temp file
+      # on a safe location, and then copy it to the desired location with su.
+      with device_temp_file.DeviceTempFile(self.adb) as device_temp:
+        self._WriteFileWithPush(device_temp.name, contents)
+        # Here we need 'cp' rather than 'mv' because the temp and
+        # destination files might be on different file systems (e.g.
+        # on internal storage and an external sd card).
+        self.RunShellCommand(['cp', device_temp.name, device_path],
+                             as_root=True, check_return=True)
+    else:
+      # If root is not needed, we can push directly to the desired location.
+      self._WriteFileWithPush(device_path, contents)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def Ls(self, device_path, timeout=None, retries=None):
+    """Lists the contents of a directory on the device.
+
+    Args:
+      device_path: A string containing the path of the directory on the device
+                   to list.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      A list of pairs (filename, stat) for each file found in the directory,
+      where the stat object has the properties: st_mode, st_size, and st_time.
+
+    Raises:
+      AdbCommandFailedError if |device_path| does not specify a valid and
+          accessible directory in the device.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    return self.adb.Ls(device_path)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def Stat(self, device_path, timeout=None, retries=None):
+    """Get the stat attributes of a file or directory on the device.
+
+    Args:
+      device_path: A string containing the path of from which to get attributes
+                   on the device.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      A stat object with the properties: st_mode, st_size, and st_time
+
+    Raises:
+      CommandFailedError if device_path cannot be found on the device.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    dirname, target = device_path.rsplit('/', 1)
+    for filename, stat in self.adb.Ls(dirname):
+      if filename == target:
+        return stat
+    raise device_errors.CommandFailedError(
+        'Cannot find file or directory: %r' % device_path, str(self))
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SetJavaAsserts(self, enabled, timeout=None, retries=None):
+    """Enables or disables Java asserts.
+
+    Args:
+      enabled: A boolean indicating whether Java asserts should be enabled
+               or disabled.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      True if the device-side property changed and a restart is required as a
+      result, False otherwise.
+
+    Raises:
+      CommandTimeoutError on timeout.
+    """
+    def find_property(lines, property_name):
+      for index, line in enumerate(lines):
+        if line.strip() == '':
+          continue
+        key_value = tuple(s.strip() for s in line.split('=', 1))
+        if len(key_value) != 2:
+          continue
+        key, value = key_value
+        if key == property_name:
+          return index, value
+      return None, ''
+
+    new_value = 'all' if enabled else ''
+
+    # First ensure the desired property is persisted.
+    try:
+      properties = self.ReadFile(self.LOCAL_PROPERTIES_PATH).splitlines()
+    except device_errors.CommandFailedError:
+      properties = []
+    index, value = find_property(properties, self.JAVA_ASSERT_PROPERTY)
+    if new_value != value:
+      if new_value:
+        new_line = '%s=%s' % (self.JAVA_ASSERT_PROPERTY, new_value)
+        if index is None:
+          properties.append(new_line)
+        else:
+          properties[index] = new_line
+      else:
+        assert index is not None  # since new_value == '' and new_value != value
+        properties.pop(index)
+      self.WriteFile(self.LOCAL_PROPERTIES_PATH, _JoinLines(properties))
+
+    # Next, check the current runtime value is what we need, and
+    # if not, set it and report that a reboot is required.
+    value = self.GetProp(self.JAVA_ASSERT_PROPERTY)
+    if new_value != value:
+      self.SetProp(self.JAVA_ASSERT_PROPERTY, new_value)
+      return True
+    else:
+      return False
+
+  def GetLanguage(self, cache=False):
+    """Returns the language setting on the device.
+    Args:
+      cache: Whether to use cached properties when available.
+    """
+    return self.GetProp('persist.sys.language', cache=cache)
+
+  def GetCountry(self, cache=False):
+    """Returns the country setting on the device.
+
+    Args:
+      cache: Whether to use cached properties when available.
+    """
+    return self.GetProp('persist.sys.country', cache=cache)
+
+  @property
+  def screen_density(self):
+    """Returns the screen density of the device."""
+    DPI_TO_DENSITY = {
+      120: 'ldpi',
+      160: 'mdpi',
+      240: 'hdpi',
+      320: 'xhdpi',
+      480: 'xxhdpi',
+      640: 'xxxhdpi',
+    }
+    return DPI_TO_DENSITY.get(self.pixel_density, 'tvdpi')
+
+  @property
+  def pixel_density(self):
+    return int(self.GetProp('ro.sf.lcd_density', cache=True))
+
+  @property
+  def build_description(self):
+    """Returns the build description of the system.
+
+    For example:
+      nakasi-user 4.4.4 KTU84P 1227136 release-keys
+    """
+    return self.GetProp('ro.build.description', cache=True)
+
+  @property
+  def build_fingerprint(self):
+    """Returns the build fingerprint of the system.
+
+    For example:
+      google/nakasi/grouper:4.4.4/KTU84P/1227136:user/release-keys
+    """
+    return self.GetProp('ro.build.fingerprint', cache=True)
+
+  @property
+  def build_id(self):
+    """Returns the build ID of the system (e.g. 'KTU84P')."""
+    return self.GetProp('ro.build.id', cache=True)
+
+  @property
+  def build_product(self):
+    """Returns the build product of the system (e.g. 'grouper')."""
+    return self.GetProp('ro.build.product', cache=True)
+
+  @property
+  def build_type(self):
+    """Returns the build type of the system (e.g. 'user')."""
+    return self.GetProp('ro.build.type', cache=True)
+
+  @property
+  def build_version_sdk(self):
+    """Returns the build version sdk of the system as a number (e.g. 19).
+
+    For version code numbers see:
+    http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
+
+    For named constants see devil.android.sdk.version_codes
+
+    Raises:
+      CommandFailedError if the build version sdk is not a number.
+    """
+    value = self.GetProp('ro.build.version.sdk', cache=True)
+    try:
+      return int(value)
+    except ValueError:
+      raise device_errors.CommandFailedError(
+          'Invalid build version sdk: %r' % value)
+
+  @property
+  def product_cpu_abi(self):
+    """Returns the product cpu abi of the device (e.g. 'armeabi-v7a')."""
+    return self.GetProp('ro.product.cpu.abi', cache=True)
+
+  @property
+  def product_model(self):
+    """Returns the name of the product model (e.g. 'Nexus 7')."""
+    return self.GetProp('ro.product.model', cache=True)
+
+  @property
+  def product_name(self):
+    """Returns the product name of the device (e.g. 'nakasi')."""
+    return self.GetProp('ro.product.name', cache=True)
+
+  @property
+  def product_board(self):
+    """Returns the product board name of the device (e.g. 'shamu')."""
+    return self.GetProp('ro.product.board', cache=True)
+
+  def GetProp(self, property_name, cache=False, timeout=DEFAULT,
+              retries=DEFAULT):
+    """Gets a property from the device.
+
+    Args:
+      property_name: A string containing the name of the property to get from
+                     the device.
+      cache: Whether to use cached properties when available.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The value of the device's |property_name| property.
+
+    Raises:
+      CommandTimeoutError on timeout.
+    """
+    assert isinstance(property_name, basestring), (
+        "property_name is not a string: %r" % property_name)
+
+    prop_cache = self._cache['getprop']
+    if cache:
+      if property_name not in prop_cache:
+        # It takes ~120ms to query a single property, and ~130ms to query all
+        # properties. So, when caching we always query all properties.
+        output = self.RunShellCommand(
+            ['getprop'], check_return=True, large_output=True,
+            timeout=self._default_timeout if timeout is DEFAULT else timeout,
+            retries=self._default_retries if retries is DEFAULT else retries)
+        prop_cache.clear()
+        for key, value in _GETPROP_RE.findall(''.join(output)):
+          prop_cache[key] = value
+        if property_name not in prop_cache:
+          prop_cache[property_name] = ''
+    else:
+      # timeout and retries are handled down at run shell, because we don't
+      # want to apply them in the other branch when reading from the cache
+      value = self.RunShellCommand(
+          ['getprop', property_name], single_line=True, check_return=True,
+          timeout=self._default_timeout if timeout is DEFAULT else timeout,
+          retries=self._default_retries if retries is DEFAULT else retries)
+      prop_cache[property_name] = value
+    return prop_cache[property_name]
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SetProp(self, property_name, value, check=False, timeout=None,
+              retries=None):
+    """Sets a property on the device.
+
+    Args:
+      property_name: A string containing the name of the property to set on
+                     the device.
+      value: A string containing the value to set to the property on the
+             device.
+      check: A boolean indicating whether to check that the property was
+             successfully set on the device.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Raises:
+      CommandFailedError if check is true and the property was not correctly
+        set on the device (e.g. because it is not rooted).
+      CommandTimeoutError on timeout.
+    """
+    assert isinstance(property_name, basestring), (
+        "property_name is not a string: %r" % property_name)
+    assert isinstance(value, basestring), "value is not a string: %r" % value
+
+    self.RunShellCommand(['setprop', property_name, value], check_return=True)
+    prop_cache = self._cache['getprop']
+    if property_name in prop_cache:
+      del prop_cache[property_name]
+    # TODO(perezju) remove the option and make the check mandatory, but using a
+    # single shell script to both set- and getprop.
+    if check and value != self.GetProp(property_name, cache=False):
+      raise device_errors.CommandFailedError(
+          'Unable to set property %r on the device to %r'
+          % (property_name, value), str(self))
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetABI(self, timeout=None, retries=None):
+    """Gets the device main ABI.
+
+    Args:
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The device's main ABI name.
+
+    Raises:
+      CommandTimeoutError on timeout.
+    """
+    return self.GetProp('ro.product.cpu.abi', cache=True)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetPids(self, process_name, timeout=None, retries=None):
+    """Returns the PIDs of processes with the given name.
+
+    Note that the |process_name| is often the package name.
+
+    Args:
+      process_name: A string containing the process name to get the PIDs for.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      A dict mapping process name to a list of PIDs for each process that
+      contained the provided |process_name|.
+
+    Raises:
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    procs_pids = collections.defaultdict(list)
+    try:
+      ps_output = self._RunPipedShellCommand(
+          'ps | grep -F %s' % cmd_helper.SingleQuote(process_name))
+    except device_errors.AdbShellCommandFailedError as e:
+      if e.status and isinstance(e.status, list) and not e.status[0]:
+        # If ps succeeded but grep failed, there were no processes with the
+        # given name.
+        return procs_pids
+      else:
+        raise
+
+    for line in ps_output:
+      try:
+        ps_data = line.split()
+        if process_name in ps_data[-1]:
+          pid, process = ps_data[1], ps_data[-1]
+          procs_pids[process].append(pid)
+      except IndexError:
+        pass
+    return procs_pids
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def TakeScreenshot(self, host_path=None, timeout=None, retries=None):
+    """Takes a screenshot of the device.
+
+    Args:
+      host_path: A string containing the path on the host to save the
+                 screenshot to. If None, a file name in the current
+                 directory will be generated.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      The name of the file on the host to which the screenshot was saved.
+
+    Raises:
+      CommandFailedError on failure.
+      CommandTimeoutError on timeout.
+      DeviceUnreachableError on missing device.
+    """
+    if not host_path:
+      host_path = os.path.abspath('screenshot-%s-%s.png' % (
+          self.adb.GetDeviceSerial(), _GetTimeStamp()))
+    with device_temp_file.DeviceTempFile(self.adb, suffix='.png') as device_tmp:
+      self.RunShellCommand(['/system/bin/screencap', '-p', device_tmp.name],
+                           check_return=True)
+      self.PullFile(device_tmp.name, host_path)
+    return host_path
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GetMemoryUsageForPid(self, pid, timeout=None, retries=None):
+    """Gets the memory usage for the given PID.
+
+    Args:
+      pid: PID of the process.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      A dict containing memory usage statistics for the PID. May include:
+        Size, Rss, Pss, Shared_Clean, Shared_Dirty, Private_Clean,
+        Private_Dirty, VmHWM
+
+    Raises:
+      CommandTimeoutError on timeout.
+    """
+    result = collections.defaultdict(int)
+
+    try:
+      result.update(self._GetMemoryUsageForPidFromSmaps(pid))
+    except device_errors.CommandFailedError:
+      logging.exception('Error getting memory usage from smaps')
+
+    try:
+      result.update(self._GetMemoryUsageForPidFromStatus(pid))
+    except device_errors.CommandFailedError:
+      logging.exception('Error getting memory usage from status')
+
+    return result
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def DismissCrashDialogIfNeeded(self, timeout=None, retries=None):
+    """Dismiss the error/ANR dialog if present.
+
+    Returns: Name of the crashed package if a dialog is focused,
+             None otherwise.
+    """
+    def _FindFocusedWindow():
+      match = None
+      # TODO(jbudorick): Try to grep the output on the device instead of using
+      # large_output if/when DeviceUtils exposes a public interface for piped
+      # shell command handling.
+      for line in self.RunShellCommand(['dumpsys', 'window', 'windows'],
+                                       check_return=True, large_output=True):
+        match = re.match(_CURRENT_FOCUS_CRASH_RE, line)
+        if match:
+          break
+      return match
+
+    match = _FindFocusedWindow()
+    if not match:
+      return None
+    package = match.group(2)
+    logging.warning('Trying to dismiss %s dialog for %s', *match.groups())
+    self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
+    self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT)
+    self.SendKeyEvent(keyevent.KEYCODE_ENTER)
+    match = _FindFocusedWindow()
+    if match:
+      logging.error('Still showing a %s dialog for %s', *match.groups())
+    return package
+
+  def _GetMemoryUsageForPidFromSmaps(self, pid):
+    SMAPS_COLUMNS = (
+        'Size', 'Rss', 'Pss', 'Shared_Clean', 'Shared_Dirty', 'Private_Clean',
+        'Private_Dirty')
+
+    showmap_out = self._RunPipedShellCommand(
+        'showmap %d | grep TOTAL' % int(pid), as_root=True)
+
+    split_totals = showmap_out[-1].split()
+    if (not split_totals
+        or len(split_totals) != 9
+        or split_totals[-1] != 'TOTAL'):
+      raise device_errors.CommandFailedError(
+          'Invalid output from showmap: %s' % '\n'.join(showmap_out))
+
+    return dict(itertools.izip(SMAPS_COLUMNS, (int(n) for n in split_totals)))
+
+  def _GetMemoryUsageForPidFromStatus(self, pid):
+    for line in self.ReadFile(
+        '/proc/%s/status' % str(pid), as_root=True).splitlines():
+      if line.startswith('VmHWM:'):
+        return {'VmHWM': int(line.split()[1])}
+    raise device_errors.CommandFailedError(
+        'Could not find memory peak value for pid %s', str(pid))
+
+  def GetLogcatMonitor(self, *args, **kwargs):
+    """Returns a new LogcatMonitor associated with this device.
+
+    Parameters passed to this function are passed directly to
+    |logcat_monitor.LogcatMonitor| and are documented there.
+    """
+    return logcat_monitor.LogcatMonitor(self.adb, *args, **kwargs)
+
+  def GetClientCache(self, client_name):
+    """Returns client cache."""
+    if client_name not in self._client_caches:
+      self._client_caches[client_name] = {}
+    return self._client_caches[client_name]
+
+  def _ClearCache(self):
+    """Clears all caches."""
+    for client in self._client_caches:
+      self._client_caches[client].clear()
+    self._cache = {
+        # Map of packageId -> list of on-device .apk paths
+        'package_apk_paths': {},
+        # Set of packageId that were loaded from LoadCacheData and not yet
+        # verified.
+        'package_apk_paths_to_verify': set(),
+        # Map of packageId -> set of on-device .apk checksums
+        'package_apk_checksums': {},
+        # Map of property_name -> value
+        'getprop': {},
+        # Map of device_path -> [ignore_other_files, map of path->checksum]
+        'device_path_checksums': {},
+    }
+
+  def LoadCacheData(self, data):
+    """Initializes the cache from data created using DumpCacheData."""
+    obj = json.loads(data)
+    self._cache['package_apk_paths'] = obj.get('package_apk_paths', {})
+    # When using a cache across script invokations, verify that apps have
+    # not been uninstalled.
+    self._cache['package_apk_paths_to_verify'] = set(
+        self._cache['package_apk_paths'].iterkeys())
+
+    package_apk_checksums = obj.get('package_apk_checksums', {})
+    for k, v in package_apk_checksums.iteritems():
+      package_apk_checksums[k] = set(v)
+    self._cache['package_apk_checksums'] = package_apk_checksums
+    device_path_checksums = obj.get('device_path_checksums', {})
+    self._cache['device_path_checksums'] = device_path_checksums
+
+  def DumpCacheData(self):
+    """Dumps the current cache state to a string."""
+    obj = {}
+    obj['package_apk_paths'] = self._cache['package_apk_paths']
+    obj['package_apk_checksums'] = self._cache['package_apk_checksums']
+    # JSON can't handle sets.
+    for k, v in obj['package_apk_checksums'].iteritems():
+      obj['package_apk_checksums'][k] = list(v)
+    obj['device_path_checksums'] = self._cache['device_path_checksums']
+    return json.dumps(obj, separators=(',', ':'))
+
+  @classmethod
+  def parallel(cls, devices, async=False):
+    """Creates a Parallelizer to operate over the provided list of devices.
+
+    Args:
+      devices: A list of either DeviceUtils instances or objects from
+               from which DeviceUtils instances can be constructed. If None,
+               all attached devices will be used.
+      async: If true, returns a Parallelizer that runs operations
+             asynchronously.
+
+    Returns:
+      A Parallelizer operating over |devices|.
+
+    Raises:
+      device_errors.NoDevicesError: If no devices are passed.
+    """
+    if not devices:
+      raise device_errors.NoDevicesError()
+
+    devices = [d if isinstance(d, cls) else cls(d) for d in devices]
+    if async:
+      return parallelizer.Parallelizer(devices)
+    else:
+      return parallelizer.SyncParallelizer(devices)
+
+  @classmethod
+  def HealthyDevices(cls, blacklist=None, **kwargs):
+    blacklisted_devices = blacklist.Read() if blacklist else []
+
+    def blacklisted(adb):
+      if adb.GetDeviceSerial() in blacklisted_devices:
+        logging.warning('Device %s is blacklisted.', adb.GetDeviceSerial())
+        return True
+      return False
+
+    devices = []
+    for adb in adb_wrapper.AdbWrapper.Devices():
+      if not blacklisted(adb):
+        devices.append(cls(_CreateAdbWrapper(adb), **kwargs))
+    return devices
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def RestartAdbd(self, timeout=None, retries=None):
+    logging.info('Restarting adbd on device.')
+    with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script:
+      self.WriteFile(script.name, _RESTART_ADBD_SCRIPT)
+      self.RunShellCommand(['source', script.name], as_root=True)
+      self.adb.WaitForDevice()
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def GrantPermissions(self, package, permissions, timeout=None, retries=None):
+    # Permissions only need to be set on M and above because of the changes to
+    # the permission model.
+    if not permissions or self.build_version_sdk < version_codes.MARSHMALLOW:
+      return
+    logging.info('Setting permissions for %s.', package)
+    permissions = [p for p in permissions if p not in _PERMISSIONS_BLACKLIST]
+    if ('android.permission.WRITE_EXTERNAL_STORAGE' in permissions
+        and 'android.permission.READ_EXTERNAL_STORAGE' not in permissions):
+      permissions.append('android.permission.READ_EXTERNAL_STORAGE')
+    cmd = '&&'.join('pm grant %s %s' % (package, p) for p in permissions)
+    if cmd:
+      output = self.RunShellCommand(cmd, check_return=True)
+      if output:
+        logging.warning('Possible problem when granting permissions. Blacklist '
+                        'may need to be updated.')
+        for line in output:
+          logging.warning('  %s', line)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def IsScreenOn(self, timeout=None, retries=None):
+    """Determines if screen is on.
+
+    Dumpsys input_method exposes screen on/off state. Below is an explination of
+    the states.
+
+    Pre-L:
+      On: mScreenOn=true
+      Off: mScreenOn=false
+    L+:
+      On: mInteractive=true
+      Off: mInteractive=false
+
+    Returns:
+      True if screen is on, false if it is off.
+
+    Raises:
+      device_errors.CommandFailedError: If screen state cannot be found.
+    """
+    if self.build_version_sdk < version_codes.LOLLIPOP:
+      input_check = 'mScreenOn'
+      check_value = 'mScreenOn=true'
+    else:
+      input_check = 'mInteractive'
+      check_value = 'mInteractive=true'
+    dumpsys_out = self._RunPipedShellCommand(
+        'dumpsys input_method | grep %s' % input_check)
+    if not dumpsys_out:
+      raise device_errors.CommandFailedError(
+          'Unable to detect screen state', str(self))
+    return check_value in dumpsys_out[0]
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SetScreen(self, on, timeout=None, retries=None):
+    """Turns screen on and off.
+
+    Args:
+      on: bool to decide state to switch to. True = on False = off.
+    """
+    def screen_test():
+      return self.IsScreenOn() == on
+
+    if screen_test():
+      logging.info('Screen already in expected state.')
+      return
+    self.RunShellCommand('input keyevent 26')
+    timeout_retry.WaitFor(screen_test, wait_period=1)
diff --git a/catapult/devil/devil/android/device_utils_devicetest.py b/catapult/devil/devil/android/device_utils_devicetest.py
new file mode 100755
index 0000000..9a50373
--- /dev/null
+++ b/catapult/devil/devil/android/device_utils_devicetest.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of device_utils.py (mostly DeviceUtils).
+The test will invoke real devices
+"""
+
+import os
+import tempfile
+import unittest
+
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+
+_OLD_CONTENTS = "foo"
+_NEW_CONTENTS = "bar"
+_DEVICE_DIR = "/data/local/tmp/device_utils_test"
+_SUB_DIR = "sub"
+_SUB_DIR1 = "sub1"
+_SUB_DIR2 = "sub2"
+
+
+class DeviceUtilsPushDeleteFilesTest(unittest.TestCase):
+
+  def setUp(self):
+    devices = adb_wrapper.AdbWrapper.Devices()
+    assert devices, 'A device must be attached'
+    self.adb = devices[0]
+    self.adb.WaitForDevice()
+    self.device = device_utils.DeviceUtils(
+        self.adb, default_timeout=10, default_retries=0)
+
+  @staticmethod
+  def _MakeTempFile(contents):
+    """Make a temporary file with the given contents.
+
+    Args:
+      contents: string to write to the temporary file.
+
+    Returns:
+      the tuple contains the absolute path to the file and the file name
+    """
+    fi, path = tempfile.mkstemp(text=True)
+    with os.fdopen(fi, 'w') as f:
+      f.write(contents)
+    file_name = os.path.basename(path)
+    return (path, file_name)
+
+  @staticmethod
+  def _MakeTempFileGivenDir(directory, contents):
+    """Make a temporary file under the given directory
+    with the given contents
+
+    Args:
+      directory: the temp directory to create the file
+      contents: string to write to the temp file
+
+    Returns:
+      the list contains the absolute path to the file and the file name
+    """
+    fi, path = tempfile.mkstemp(dir=directory, text=True)
+    with os.fdopen(fi, 'w') as f:
+      f.write(contents)
+    file_name = os.path.basename(path)
+    return (path, file_name)
+
+  @staticmethod
+  def _ChangeTempFile(path, contents):
+    with os.open(path, 'w') as f:
+      f.write(contents)
+
+  @staticmethod
+  def _DeleteTempFile(path):
+    os.remove(path)
+
+  def testPushChangedFiles_noFileChange(self):
+    (host_file_path, file_name) = self._MakeTempFile(_OLD_CONTENTS)
+    device_file_path = "%s/%s" % (_DEVICE_DIR, file_name)
+    self.adb.Push(host_file_path, device_file_path)
+    self.device.PushChangedFiles([(host_file_path, device_file_path)])
+    result = self.device.RunShellCommand(['cat', device_file_path],
+                                         single_line=True)
+    self.assertEqual(_OLD_CONTENTS, result)
+
+    cmd_helper.RunCmd(['rm', host_file_path])
+    self.device.RunShellCommand(['rm', '-rf', _DEVICE_DIR])
+
+  def testPushChangedFiles_singleFileChange(self):
+    (host_file_path, file_name) = self._MakeTempFile(_OLD_CONTENTS)
+    device_file_path = "%s/%s" % (_DEVICE_DIR, file_name)
+    self.adb.Push(host_file_path, device_file_path)
+
+    with open(host_file_path, 'w') as f:
+      f.write(_NEW_CONTENTS)
+    self.device.PushChangedFiles([(host_file_path, device_file_path)])
+    result = self.device.RunShellCommand(['cat', device_file_path],
+                                         single_line=True)
+    self.assertEqual(_NEW_CONTENTS, result)
+
+    cmd_helper.RunCmd(['rm', host_file_path])
+    self.device.RunShellCommand(['rm', '-rf', _DEVICE_DIR])
+
+  def testDeleteFiles(self):
+    host_tmp_dir = tempfile.mkdtemp()
+    (host_file_path, file_name) = self._MakeTempFileGivenDir(
+        host_tmp_dir, _OLD_CONTENTS)
+
+    device_file_path = "%s/%s" % (_DEVICE_DIR, file_name)
+    self.adb.Push(host_file_path, device_file_path)
+
+    cmd_helper.RunCmd(['rm', host_file_path])
+    self.device.PushChangedFiles([(host_tmp_dir, _DEVICE_DIR)],
+                                 delete_device_stale=True)
+    result = self.device.RunShellCommand(['ls', _DEVICE_DIR], single_line=True)
+    self.assertEqual('', result)
+
+    cmd_helper.RunCmd(['rm', '-rf', host_tmp_dir])
+    self.device.RunShellCommand(['rm', '-rf', _DEVICE_DIR])
+
+  def testPushAndDeleteFiles_noSubDir(self):
+    host_tmp_dir = tempfile.mkdtemp()
+    (host_file_path1, file_name1) = self._MakeTempFileGivenDir(
+        host_tmp_dir, _OLD_CONTENTS)
+    (host_file_path2, file_name2) = self._MakeTempFileGivenDir(
+        host_tmp_dir, _OLD_CONTENTS)
+
+    device_file_path1 = "%s/%s" % (_DEVICE_DIR, file_name1)
+    device_file_path2 = "%s/%s" % (_DEVICE_DIR, file_name2)
+    self.adb.Push(host_file_path1, device_file_path1)
+    self.adb.Push(host_file_path2, device_file_path2)
+
+    with open(host_file_path1, 'w') as f:
+      f.write(_NEW_CONTENTS)
+    cmd_helper.RunCmd(['rm', host_file_path2])
+
+    self.device.PushChangedFiles([(host_tmp_dir, _DEVICE_DIR)],
+                                   delete_device_stale=True)
+    result = self.device.RunShellCommand(['cat', device_file_path1],
+                                         single_line=True)
+    self.assertEqual(_NEW_CONTENTS, result)
+    result = self.device.RunShellCommand(['ls', _DEVICE_DIR], single_line=True)
+    self.assertEqual(file_name1, result)
+
+    self.device.RunShellCommand(['rm', '-rf', _DEVICE_DIR])
+    cmd_helper.RunCmd(['rm', '-rf', host_tmp_dir])
+
+  def testPushAndDeleteFiles_SubDir(self):
+    host_tmp_dir = tempfile.mkdtemp()
+    host_sub_dir1 = "%s/%s" % (host_tmp_dir, _SUB_DIR1)
+    host_sub_dir2 = "%s/%s/%s" % (host_tmp_dir, _SUB_DIR, _SUB_DIR2)
+    cmd_helper.RunCmd(['mkdir', '-p', host_sub_dir1])
+    cmd_helper.RunCmd(['mkdir', '-p', host_sub_dir2])
+
+    (host_file_path1, file_name1) = self._MakeTempFileGivenDir(
+        host_tmp_dir, _OLD_CONTENTS)
+    (host_file_path2, file_name2) = self._MakeTempFileGivenDir(
+        host_tmp_dir, _OLD_CONTENTS)
+    (host_file_path3, file_name3) = self._MakeTempFileGivenDir(
+        host_sub_dir1, _OLD_CONTENTS)
+    (host_file_path4, file_name4) = self._MakeTempFileGivenDir(
+        host_sub_dir2, _OLD_CONTENTS)
+
+    device_file_path1 = "%s/%s" % (_DEVICE_DIR, file_name1)
+    device_file_path2 = "%s/%s" % (_DEVICE_DIR, file_name2)
+    device_file_path3 = "%s/%s/%s" % (_DEVICE_DIR, _SUB_DIR1, file_name3)
+    device_file_path4 = "%s/%s/%s/%s" % (_DEVICE_DIR, _SUB_DIR,
+                                         _SUB_DIR2, file_name4)
+
+    self.adb.Push(host_file_path1, device_file_path1)
+    self.adb.Push(host_file_path2, device_file_path2)
+    self.adb.Push(host_file_path3, device_file_path3)
+    self.adb.Push(host_file_path4, device_file_path4)
+
+    with open(host_file_path1, 'w') as f:
+      f.write(_NEW_CONTENTS)
+    cmd_helper.RunCmd(['rm', host_file_path2])
+    cmd_helper.RunCmd(['rm', host_file_path4])
+
+    self.device.PushChangedFiles([(host_tmp_dir, _DEVICE_DIR)],
+                                   delete_device_stale=True)
+    result = self.device.RunShellCommand(['cat', device_file_path1],
+                                         single_line=True)
+    self.assertEqual(_NEW_CONTENTS, result)
+
+    result = self.device.RunShellCommand(['ls', _DEVICE_DIR])
+    self.assertIn(file_name1, result)
+    self.assertIn(_SUB_DIR1, result)
+    self.assertIn(_SUB_DIR, result)
+    self.assertEqual(3, len(result))
+
+    result = self.device.RunShellCommand(['cat', device_file_path3],
+                                      single_line=True)
+    self.assertEqual(_OLD_CONTENTS, result)
+
+    result = self.device.RunShellCommand(["ls", "%s/%s/%s"
+                                          % (_DEVICE_DIR, _SUB_DIR, _SUB_DIR2)],
+                                         single_line=True)
+    self.assertEqual('', result)
+
+    self.device.RunShellCommand(['rm', '-rf', _DEVICE_DIR])
+    cmd_helper.RunCmd(['rm', '-rf', host_tmp_dir])
+
+  def testRestartAdbd(self):
+    old_adbd_pid = self.device.RunShellCommand(
+        ['ps', '|', 'grep', 'adbd'])[1].split()[1]
+    self.device.RestartAdbd()
+    new_adbd_pid = self.device.RunShellCommand(
+        ['ps', '|', 'grep', 'adbd'])[1].split()[1]
+    self.assertNotEqual(old_adbd_pid, new_adbd_pid)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/android/device_utils_test.py b/catapult/devil/devil/android/device_utils_test.py
new file mode 100755
index 0000000..38849ec
--- /dev/null
+++ b/catapult/devil/devil/android/device_utils_test.py
@@ -0,0 +1,2312 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of device_utils.py (mostly DeviceUtils).
+"""
+
+# pylint: disable=protected-access
+# pylint: disable=unused-argument
+
+import logging
+import unittest
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.android import device_signal
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.android.sdk import intent
+from devil.android.sdk import version_codes
+from devil.utils import cmd_helper
+from devil.utils import mock_calls
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+class _MockApkHelper(object):
+
+  def __init__(self, path, package_name, perms=None):
+    self.path = path
+    self.package_name = package_name
+    self.perms = perms
+
+  def GetPackageName(self):
+    return self.package_name
+
+  def GetPermissions(self):
+    return self.perms
+
+
+class DeviceUtilsInitTest(unittest.TestCase):
+
+  def testInitWithStr(self):
+    serial_as_str = str('0123456789abcdef')
+    d = device_utils.DeviceUtils('0123456789abcdef')
+    self.assertEqual(serial_as_str, d.adb.GetDeviceSerial())
+
+  def testInitWithUnicode(self):
+    serial_as_unicode = unicode('fedcba9876543210')
+    d = device_utils.DeviceUtils(serial_as_unicode)
+    self.assertEqual(serial_as_unicode, d.adb.GetDeviceSerial())
+
+  def testInitWithAdbWrapper(self):
+    serial = '123456789abcdef0'
+    a = adb_wrapper.AdbWrapper(serial)
+    d = device_utils.DeviceUtils(a)
+    self.assertEqual(serial, d.adb.GetDeviceSerial())
+
+  def testInitWithMissing_fails(self):
+    with self.assertRaises(ValueError):
+      device_utils.DeviceUtils(None)
+    with self.assertRaises(ValueError):
+      device_utils.DeviceUtils('')
+
+
+class DeviceUtilsGetAVDsTest(mock_calls.TestCase):
+
+  def testGetAVDs(self):
+    mocked_attrs = {
+      'android_sdk': '/my/sdk/path'
+    }
+    with mock.patch('devil.devil_env._Environment.LocalPath',
+                    mock.Mock(side_effect=lambda a: mocked_attrs[a])):
+      with self.assertCall(
+          mock.call.devil.utils.cmd_helper.GetCmdOutput(
+              [mock.ANY, 'list', 'avd']),
+          'Available Android Virtual Devices:\n'
+          '    Name: my_android5.0\n'
+          '    Path: /some/path/to/.android/avd/my_android5.0.avd\n'
+          '  Target: Android 5.0 (API level 21)\n'
+          ' Tag/ABI: default/x86\n'
+          '    Skin: WVGA800\n'):
+        self.assertEquals(['my_android5.0'], device_utils.GetAVDs())
+
+
+class DeviceUtilsRestartServerTest(mock_calls.TestCase):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testRestartServer_succeeds(self):
+    with self.assertCalls(
+        mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.KillServer(),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
+            ['pgrep', 'adb']),
+         (1, '')),
+        mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.StartServer(),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
+            ['pgrep', 'adb']),
+         (1, '')),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
+            ['pgrep', 'adb']),
+         (0, '123\n'))):
+      device_utils.RestartServer()
+
+
+class MockTempFile(object):
+
+  def __init__(self, name='/tmp/some/file'):
+    self.file = mock.MagicMock(spec=file)
+    self.file.name = name
+    self.file.name_quoted = cmd_helper.SingleQuote(name)
+
+  def __enter__(self):
+    return self.file
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    pass
+
+  @property
+  def name(self):
+    return self.file.name
+
+
+class _PatchedFunction(object):
+
+  def __init__(self, patched=None, mocked=None):
+    self.patched = patched
+    self.mocked = mocked
+
+
+def _AdbWrapperMock(test_serial, is_ready=True):
+  adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
+  adb.__str__ = mock.Mock(return_value=test_serial)
+  adb.GetDeviceSerial.return_value = test_serial
+  adb.is_ready = is_ready
+  return adb
+
+
+class DeviceUtilsTest(mock_calls.TestCase):
+
+  def setUp(self):
+    self.adb = _AdbWrapperMock('0123456789abcdef')
+    self.device = device_utils.DeviceUtils(
+        self.adb, default_timeout=10, default_retries=0)
+    self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
+
+  def AdbCommandError(self, args=None, output=None, status=None, msg=None):
+    if args is None:
+      args = ['[unspecified]']
+    return mock.Mock(side_effect=device_errors.AdbCommandFailedError(
+        args, output, status, msg, str(self.device)))
+
+  def CommandError(self, msg=None):
+    if msg is None:
+      msg = 'Command failed'
+    return mock.Mock(side_effect=device_errors.CommandFailedError(
+        msg, str(self.device)))
+
+  def ShellError(self, output=None, status=1):
+    def action(cmd, *args, **kwargs):
+      raise device_errors.AdbShellCommandFailedError(
+          cmd, output, status, str(self.device))
+    if output is None:
+      output = 'Permission denied\n'
+    return action
+
+  def TimeoutError(self, msg=None):
+    if msg is None:
+      msg = 'Operation timed out'
+    return mock.Mock(side_effect=device_errors.CommandTimeoutError(
+        msg, str(self.device)))
+
+
+class DeviceUtilsEqTest(DeviceUtilsTest):
+
+  def testEq_equal_deviceUtils(self):
+    other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
+    self.assertTrue(self.device == other)
+    self.assertTrue(other == self.device)
+
+  def testEq_equal_adbWrapper(self):
+    other = adb_wrapper.AdbWrapper('0123456789abcdef')
+    self.assertTrue(self.device == other)
+    self.assertTrue(other == self.device)
+
+  def testEq_equal_string(self):
+    other = '0123456789abcdef'
+    self.assertTrue(self.device == other)
+    self.assertTrue(other == self.device)
+
+  def testEq_devicesNotEqual(self):
+    other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdee'))
+    self.assertFalse(self.device == other)
+    self.assertFalse(other == self.device)
+
+  def testEq_identity(self):
+    self.assertTrue(self.device == self.device)
+
+  def testEq_serialInList(self):
+    devices = [self.device]
+    self.assertTrue('0123456789abcdef' in devices)
+
+
+class DeviceUtilsLtTest(DeviceUtilsTest):
+
+  def testLt_lessThan(self):
+    other = device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff'))
+    self.assertTrue(self.device < other)
+    self.assertTrue(other > self.device)
+
+  def testLt_greaterThan_lhs(self):
+    other = device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000'))
+    self.assertFalse(self.device < other)
+    self.assertFalse(other > self.device)
+
+  def testLt_equal(self):
+    other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
+    self.assertFalse(self.device < other)
+    self.assertFalse(other > self.device)
+
+  def testLt_sorted(self):
+    devices = [
+        device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff')),
+        device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000')),
+    ]
+    sorted_devices = sorted(devices)
+    self.assertEquals('0000000000000000',
+                      sorted_devices[0].adb.GetDeviceSerial())
+    self.assertEquals('ffffffffffffffff',
+                      sorted_devices[1].adb.GetDeviceSerial())
+
+
+class DeviceUtilsStrTest(DeviceUtilsTest):
+
+  def testStr_returnsSerial(self):
+    with self.assertCalls(
+        (self.call.adb.GetDeviceSerial(), '0123456789abcdef')):
+      self.assertEqual('0123456789abcdef', str(self.device))
+
+
+class DeviceUtilsIsOnlineTest(DeviceUtilsTest):
+
+  def testIsOnline_true(self):
+    with self.assertCall(self.call.adb.GetState(), 'device'):
+      self.assertTrue(self.device.IsOnline())
+
+  def testIsOnline_false(self):
+    with self.assertCall(self.call.adb.GetState(), 'offline'):
+      self.assertFalse(self.device.IsOnline())
+
+  def testIsOnline_error(self):
+    with self.assertCall(self.call.adb.GetState(), self.CommandError()):
+      self.assertFalse(self.device.IsOnline())
+
+
+class DeviceUtilsHasRootTest(DeviceUtilsTest):
+
+  def testHasRoot_true(self):
+    with self.assertCall(self.call.adb.Shell('ls /root'), 'foo\n'):
+      self.assertTrue(self.device.HasRoot())
+
+  def testHasRoot_false(self):
+    with self.assertCall(self.call.adb.Shell('ls /root'), self.ShellError()):
+      self.assertFalse(self.device.HasRoot())
+
+
+class DeviceUtilsEnableRootTest(DeviceUtilsTest):
+
+  def testEnableRoot_succeeds(self):
+    with self.assertCalls(
+        (self.call.device.IsUserBuild(), False),
+         self.call.adb.Root(),
+         self.call.device.WaitUntilFullyBooted()):
+      self.device.EnableRoot()
+
+  def testEnableRoot_userBuild(self):
+    with self.assertCalls(
+        (self.call.device.IsUserBuild(), True)):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.EnableRoot()
+
+  def testEnableRoot_rootFails(self):
+    with self.assertCalls(
+        (self.call.device.IsUserBuild(), False),
+        (self.call.adb.Root(), self.CommandError())):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.EnableRoot()
+
+
+class DeviceUtilsIsUserBuildTest(DeviceUtilsTest):
+
+  def testIsUserBuild_yes(self):
+    with self.assertCall(
+        self.call.device.GetProp('ro.build.type', cache=True), 'user'):
+      self.assertTrue(self.device.IsUserBuild())
+
+  def testIsUserBuild_no(self):
+    with self.assertCall(
+        self.call.device.GetProp('ro.build.type', cache=True), 'userdebug'):
+      self.assertFalse(self.device.IsUserBuild())
+
+
+class DeviceUtilsGetExternalStoragePathTest(DeviceUtilsTest):
+
+  def testGetExternalStoragePath_succeeds(self):
+    with self.assertCall(
+        self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '/fake/storage/path\n'):
+      self.assertEquals('/fake/storage/path',
+                        self.device.GetExternalStoragePath())
+
+  def testGetExternalStoragePath_fails(self):
+    with self.assertCall(self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '\n'):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.GetExternalStoragePath()
+
+
+class DeviceUtilsGetApplicationPathsInternalTest(DeviceUtilsTest):
+
+  def testGetApplicationPathsInternal_exists(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
+        (self.call.device.RunShellCommand(
+            ['pm', 'path', 'android'], check_return=True),
+         ['package:/path/to/android.apk'])):
+      self.assertEquals(['/path/to/android.apk'],
+                        self.device._GetApplicationPathsInternal('android'))
+
+  def testGetApplicationPathsInternal_notExists(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
+        (self.call.device.RunShellCommand(
+            ['pm', 'path', 'not.installed.app'], check_return=True),
+         '')):
+      self.assertEquals([],
+          self.device._GetApplicationPathsInternal('not.installed.app'))
+
+  def testGetApplicationPathsInternal_fails(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
+        (self.call.device.RunShellCommand(
+            ['pm', 'path', 'android'], check_return=True),
+         self.CommandError('ERROR. Is package manager running?\n'))):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device._GetApplicationPathsInternal('android')
+
+
+class DeviceUtils_GetApplicationVersionTest(DeviceUtilsTest):
+
+  def test_GetApplicationVersion_exists(self):
+    with self.assertCalls(
+        (self.call.adb.Shell('dumpsys package com.android.chrome'),
+         'Packages:\n'
+         '  Package [com.android.chrome] (3901ecfb):\n'
+         '    userId=1234 gids=[123, 456, 789]\n'
+         '    pkg=Package{1fecf634 com.android.chrome}\n'
+         '    versionName=45.0.1234.7\n')):
+      self.assertEquals('45.0.1234.7',
+                        self.device.GetApplicationVersion('com.android.chrome'))
+
+  def test_GetApplicationVersion_notExists(self):
+    with self.assertCalls(
+        (self.call.adb.Shell('dumpsys package com.android.chrome'), '')):
+      self.assertEquals(None,
+                        self.device.GetApplicationVersion('com.android.chrome'))
+
+  def test_GetApplicationVersion_fails(self):
+    with self.assertCalls(
+        (self.call.adb.Shell('dumpsys package com.android.chrome'),
+         'Packages:\n'
+         '  Package [com.android.chrome] (3901ecfb):\n'
+         '    userId=1234 gids=[123, 456, 789]\n'
+         '    pkg=Package{1fecf634 com.android.chrome}\n')):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.GetApplicationVersion('com.android.chrome')
+
+
+class DeviceUtilsGetApplicationDataDirectoryTest(DeviceUtilsTest):
+
+  def testGetApplicationDataDirectory_exists(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand(
+            'pm dump foo.bar.baz | grep dataDir='),
+        ['dataDir=/data/data/foo.bar.baz']):
+      self.assertEquals(
+          '/data/data/foo.bar.baz',
+          self.device.GetApplicationDataDirectory('foo.bar.baz'))
+
+  def testGetApplicationDataDirectory_notExists(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand(
+            'pm dump foo.bar.baz | grep dataDir='),
+        self.ShellError()):
+      self.assertIsNone(self.device.GetApplicationDataDirectory('foo.bar.baz'))
+
+
+@mock.patch('time.sleep', mock.Mock())
+class DeviceUtilsWaitUntilFullyBootedTest(DeviceUtilsTest):
+
+  def testWaitUntilFullyBooted_succeedsNoWifi(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         ['package:/some/fake/path']),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
+      self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_succeedsWithWifi(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         ['package:/some/fake/path']),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
+        # wifi_enabled
+        (self.call.adb.Shell('dumpsys wifi'),
+         'stuff\nWi-Fi is enabled\nmore stuff\n')):
+      self.device.WaitUntilFullyBooted(wifi=True)
+
+  def testWaitUntilFullyBooted_deviceNotInitiallyAvailable(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         ['package:/some/fake/path']),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
+      self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_sdCardReadyFails_noPath(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), self.CommandError())):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_sdCardReadyFails_notExists(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'),
+         self.TimeoutError())):
+      with self.assertRaises(device_errors.CommandTimeoutError):
+        self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_devicePmFails(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         self.CommandError()),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         self.CommandError()),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         self.TimeoutError())):
+      with self.assertRaises(device_errors.CommandTimeoutError):
+        self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_bootFails(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         ['package:/some/fake/path']),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False),
+         self.TimeoutError())):
+      with self.assertRaises(device_errors.CommandTimeoutError):
+        self.device.WaitUntilFullyBooted(wifi=False)
+
+  def testWaitUntilFullyBooted_wifiFails(self):
+    with self.assertCalls(
+        self.call.adb.WaitForDevice(),
+        # sd_card_ready
+        (self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
+        (self.call.adb.Shell('test -d /fake/storage/path'), ''),
+        # pm_ready
+        (self.call.device._GetApplicationPathsInternal('android',
+                                                       skip_cache=True),
+         ['package:/some/fake/path']),
+        # boot_completed
+        (self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
+        # wifi_enabled
+        (self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
+        # wifi_enabled
+        (self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
+        # wifi_enabled
+        (self.call.adb.Shell('dumpsys wifi'), self.TimeoutError())):
+      with self.assertRaises(device_errors.CommandTimeoutError):
+        self.device.WaitUntilFullyBooted(wifi=True)
+
+
+@mock.patch('time.sleep', mock.Mock())
+class DeviceUtilsRebootTest(DeviceUtilsTest):
+
+  def testReboot_nonBlocking(self):
+    with self.assertCalls(
+        self.call.adb.Reboot(),
+        (self.call.device.IsOnline(), True),
+        (self.call.device.IsOnline(), False)):
+      self.device.Reboot(block=False)
+
+  def testReboot_blocking(self):
+    with self.assertCalls(
+        self.call.adb.Reboot(),
+        (self.call.device.IsOnline(), True),
+        (self.call.device.IsOnline(), False),
+        self.call.device.WaitUntilFullyBooted(wifi=False)):
+      self.device.Reboot(block=True)
+
+  def testReboot_blockUntilWifi(self):
+    with self.assertCalls(
+        self.call.adb.Reboot(),
+        (self.call.device.IsOnline(), True),
+        (self.call.device.IsOnline(), False),
+        self.call.device.WaitUntilFullyBooted(wifi=True)):
+      self.device.Reboot(block=True, wifi=True)
+
+
+class DeviceUtilsInstallTest(DeviceUtilsTest):
+
+  mock_apk = _MockApkHelper('/fake/test/app.apk', 'test.package', ['p1'])
+
+  def testInstall_noPriorInstall(self):
+    with self.patch_call(self.call.device.build_version_sdk, return_value=23):
+      with self.assertCalls(
+          (self.call.device._GetApplicationPathsInternal('test.package'), []),
+          self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                                allow_downgrade=False),
+          (self.call.device.GrantPermissions('test.package', ['p1']), [])):
+        self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
+
+  def testInstall_permissionsPreM(self):
+    with self.patch_call(self.call.device.build_version_sdk, return_value=20):
+      with self.assertCalls(
+          (self.call.device._GetApplicationPathsInternal('test.package'), []),
+          (self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                                 allow_downgrade=False))):
+        self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
+
+  def testInstall_findPermissions(self):
+    with self.patch_call(self.call.device.build_version_sdk, return_value=23):
+      with self.assertCalls(
+          (self.call.device._GetApplicationPathsInternal('test.package'), []),
+          (self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                                 allow_downgrade=False)),
+          (self.call.device.GrantPermissions('test.package', ['p1']), [])):
+        self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
+
+  def testInstall_passPermissions(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'), []),
+        (self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                               allow_downgrade=False)),
+        (self.call.device.GrantPermissions('test.package', ['p1', 'p2']), [])):
+      self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0,
+                          permissions=['p1', 'p2'])
+
+  def testInstall_differentPriorInstall(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['/fake/data/app/test.package.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+            ['/fake/test/app.apk']),
+         (['/fake/test/app.apk'], None)),
+        self.call.device.Uninstall('test.package'),
+        self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                              allow_downgrade=False)):
+      self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0,
+                          permissions=[])
+
+  def testInstall_differentPriorInstall_reinstall(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['/fake/data/app/test.package.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+            ['/fake/test/app.apk']),
+         (['/fake/test/app.apk'], None)),
+        self.call.adb.Install('/fake/test/app.apk', reinstall=True,
+                              allow_downgrade=False)):
+      self.device.Install(DeviceUtilsInstallTest.mock_apk,
+          reinstall=True, retries=0, permissions=[])
+
+  def testInstall_identicalPriorInstall_reinstall(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['/fake/data/app/test.package.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+            ['/fake/test/app.apk']),
+         ([], None)),
+        (self.call.device.ForceStop('test.package'))):
+      self.device.Install(DeviceUtilsInstallTest.mock_apk,
+          reinstall=True, retries=0, permissions=[])
+
+  def testInstall_fails(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'), []),
+        (self.call.adb.Install('/fake/test/app.apk', reinstall=False,
+                               allow_downgrade=False),
+         self.CommandError('Failure\r\n'))):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
+
+  def testInstall_downgrade(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['/fake/data/app/test.package.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+            ['/fake/test/app.apk']),
+         (['/fake/test/app.apk'], None)),
+        self.call.adb.Install('/fake/test/app.apk', reinstall=True,
+                              allow_downgrade=True)):
+      self.device.Install(DeviceUtilsInstallTest.mock_apk,
+          reinstall=True, retries=0, permissions=[], allow_downgrade=True)
+
+
+class DeviceUtilsInstallSplitApkTest(DeviceUtilsTest):
+
+  mock_apk = _MockApkHelper('base.apk', 'test.package', ['p1'])
+
+  def testInstallSplitApk_noPriorInstall(self):
+    with self.assertCalls(
+        (self.call.device._CheckSdkLevel(21)),
+        (mock.call.devil.android.sdk.split_select.SelectSplits(
+            self.device, 'base.apk',
+            ['split1.apk', 'split2.apk', 'split3.apk'],
+            allow_cached_props=False),
+         ['split2.apk']),
+        (self.call.device._GetApplicationPathsInternal('test.package'), []),
+        (self.call.adb.InstallMultiple(
+            ['base.apk', 'split2.apk'], partial=None, reinstall=False,
+            allow_downgrade=False))):
+      self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
+          ['split1.apk', 'split2.apk', 'split3.apk'], permissions=[], retries=0)
+
+  def testInstallSplitApk_partialInstall(self):
+    with self.assertCalls(
+        (self.call.device._CheckSdkLevel(21)),
+        (mock.call.devil.android.sdk.split_select.SelectSplits(
+            self.device, 'base.apk',
+            ['split1.apk', 'split2.apk', 'split3.apk'],
+            allow_cached_props=False),
+         ['split2.apk']),
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['base-on-device.apk', 'split2-on-device.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+                                            ['base.apk', 'split2.apk']),
+         (['split2.apk'], None)),
+        (self.call.adb.InstallMultiple(
+            ['split2.apk'], partial='test.package', reinstall=True,
+            allow_downgrade=False))):
+      self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
+                                  ['split1.apk', 'split2.apk', 'split3.apk'],
+                                  reinstall=True, permissions=[], retries=0)
+
+  def testInstallSplitApk_downgrade(self):
+    with self.assertCalls(
+        (self.call.device._CheckSdkLevel(21)),
+        (mock.call.devil.android.sdk.split_select.SelectSplits(
+            self.device, 'base.apk',
+            ['split1.apk', 'split2.apk', 'split3.apk'],
+            allow_cached_props=False),
+         ['split2.apk']),
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['base-on-device.apk', 'split2-on-device.apk']),
+        (self.call.device._ComputeStaleApks('test.package',
+                                            ['base.apk', 'split2.apk']),
+         (['split2.apk'], None)),
+        (self.call.adb.InstallMultiple(
+            ['split2.apk'], partial='test.package', reinstall=True,
+            allow_downgrade=True))):
+      self.device.InstallSplitApk(DeviceUtilsInstallSplitApkTest.mock_apk,
+                                  ['split1.apk', 'split2.apk', 'split3.apk'],
+                                  reinstall=True, permissions=[], retries=0,
+                                  allow_downgrade=True)
+
+
+class DeviceUtilsUninstallTest(DeviceUtilsTest):
+
+  def testUninstall_callsThrough(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'),
+         ['/path.apk']),
+        self.call.adb.Uninstall('test.package', True)):
+      self.device.Uninstall('test.package', True)
+
+  def testUninstall_noop(self):
+    with self.assertCalls(
+        (self.call.device._GetApplicationPathsInternal('test.package'), [])):
+      self.device.Uninstall('test.package', True)
+
+
+class DeviceUtilsSuTest(DeviceUtilsTest):
+
+  def testSu_preM(self):
+    with self.patch_call(
+        self.call.device.build_version_sdk,
+        return_value=version_codes.LOLLIPOP_MR1):
+      self.assertEquals('su -c foo', self.device._Su('foo'))
+
+  def testSu_mAndAbove(self):
+    with self.patch_call(
+        self.call.device.build_version_sdk,
+        return_value=version_codes.MARSHMALLOW):
+      self.assertEquals('su 0 foo', self.device._Su('foo'))
+
+
+class DeviceUtilsRunShellCommandTest(DeviceUtilsTest):
+
+  def setUp(self):
+    super(DeviceUtilsRunShellCommandTest, self).setUp()
+    self.device.NeedsSU = mock.Mock(return_value=False)
+
+  def testRunShellCommand_commandAsList(self):
+    with self.assertCall(self.call.adb.Shell('pm list packages'), ''):
+      self.device.RunShellCommand(['pm', 'list', 'packages'])
+
+  def testRunShellCommand_commandAsListQuoted(self):
+    with self.assertCall(self.call.adb.Shell("echo 'hello world' '$10'"), ''):
+      self.device.RunShellCommand(['echo', 'hello world', '$10'])
+
+  def testRunShellCommand_commandAsString(self):
+    with self.assertCall(self.call.adb.Shell('echo "$VAR"'), ''):
+      self.device.RunShellCommand('echo "$VAR"')
+
+  def testNewRunShellImpl_withEnv(self):
+    with self.assertCall(
+        self.call.adb.Shell('VAR=some_string echo "$VAR"'), ''):
+      self.device.RunShellCommand('echo "$VAR"', env={'VAR': 'some_string'})
+
+  def testNewRunShellImpl_withEnvQuoted(self):
+    with self.assertCall(
+        self.call.adb.Shell('PATH="$PATH:/other/path" run_this'), ''):
+      self.device.RunShellCommand('run_this', env={'PATH': '$PATH:/other/path'})
+
+  def testNewRunShellImpl_withEnv_failure(self):
+    with self.assertRaises(KeyError):
+      self.device.RunShellCommand('some_cmd', env={'INVALID NAME': 'value'})
+
+  def testNewRunShellImpl_withCwd(self):
+    with self.assertCall(self.call.adb.Shell('cd /some/test/path && ls'), ''):
+      self.device.RunShellCommand('ls', cwd='/some/test/path')
+
+  def testNewRunShellImpl_withCwdQuoted(self):
+    with self.assertCall(
+        self.call.adb.Shell("cd '/some test/path with/spaces' && ls"), ''):
+      self.device.RunShellCommand('ls', cwd='/some test/path with/spaces')
+
+  def testRunShellCommand_withHugeCmd(self):
+    payload = 'hi! ' * 1024
+    expected_cmd = "echo '%s'" % payload
+    with self.assertCalls(
+      (mock.call.devil.android.device_temp_file.DeviceTempFile(
+          self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
+      self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
+      (self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
+      self.assertEquals([payload],
+                        self.device.RunShellCommand(['echo', payload]))
+
+  def testRunShellCommand_withHugeCmdAndSU(self):
+    payload = 'hi! ' * 1024
+    expected_cmd_without_su = """sh -c 'echo '"'"'%s'"'"''""" % payload
+    expected_cmd = 'su -c %s' % expected_cmd_without_su
+    with self.assertCalls(
+      (self.call.device.NeedsSU(), True),
+      (self.call.device._Su(expected_cmd_without_su), expected_cmd),
+      (mock.call.devil.android.device_temp_file.DeviceTempFile(
+          self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
+      self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
+      (self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
+      self.assertEquals(
+          [payload],
+          self.device.RunShellCommand(['echo', payload], as_root=True))
+
+  def testRunShellCommand_withSu(self):
+    expected_cmd_without_su = "sh -c 'setprop service.adb.root 0'"
+    expected_cmd = 'su -c %s' % expected_cmd_without_su
+    with self.assertCalls(
+        (self.call.device.NeedsSU(), True),
+        (self.call.device._Su(expected_cmd_without_su), expected_cmd),
+        (self.call.adb.Shell(expected_cmd), '')):
+      self.device.RunShellCommand('setprop service.adb.root 0', as_root=True)
+
+  def testRunShellCommand_manyLines(self):
+    cmd = 'ls /some/path'
+    with self.assertCall(self.call.adb.Shell(cmd), 'file1\nfile2\nfile3\n'):
+      self.assertEquals(['file1', 'file2', 'file3'],
+                        self.device.RunShellCommand(cmd))
+
+  def testRunShellCommand_singleLine_success(self):
+    cmd = 'echo $VALUE'
+    with self.assertCall(self.call.adb.Shell(cmd), 'some value\n'):
+      self.assertEquals('some value',
+                        self.device.RunShellCommand(cmd, single_line=True))
+
+  def testRunShellCommand_singleLine_successEmptyLine(self):
+    cmd = 'echo $VALUE'
+    with self.assertCall(self.call.adb.Shell(cmd), '\n'):
+      self.assertEquals('',
+                        self.device.RunShellCommand(cmd, single_line=True))
+
+  def testRunShellCommand_singleLine_successWithoutEndLine(self):
+    cmd = 'echo -n $VALUE'
+    with self.assertCall(self.call.adb.Shell(cmd), 'some value'):
+      self.assertEquals('some value',
+                        self.device.RunShellCommand(cmd, single_line=True))
+
+  def testRunShellCommand_singleLine_successNoOutput(self):
+    cmd = 'echo -n $VALUE'
+    with self.assertCall(self.call.adb.Shell(cmd), ''):
+      self.assertEquals('',
+                        self.device.RunShellCommand(cmd, single_line=True))
+
+  def testRunShellCommand_singleLine_failTooManyLines(self):
+    cmd = 'echo $VALUE'
+    with self.assertCall(self.call.adb.Shell(cmd),
+                         'some value\nanother value\n'):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.RunShellCommand(cmd, single_line=True)
+
+  def testRunShellCommand_checkReturn_success(self):
+    cmd = 'echo $ANDROID_DATA'
+    output = '/data\n'
+    with self.assertCall(self.call.adb.Shell(cmd), output):
+      self.assertEquals([output.rstrip()],
+                        self.device.RunShellCommand(cmd, check_return=True))
+
+  def testRunShellCommand_checkReturn_failure(self):
+    cmd = 'ls /root'
+    output = 'opendir failed, Permission denied\n'
+    with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
+      with self.assertRaises(device_errors.AdbCommandFailedError):
+        self.device.RunShellCommand(cmd, check_return=True)
+
+  def testRunShellCommand_checkReturn_disabled(self):
+    cmd = 'ls /root'
+    output = 'opendir failed, Permission denied\n'
+    with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
+      self.assertEquals([output.rstrip()],
+                        self.device.RunShellCommand(cmd, check_return=False))
+
+  def testRunShellCommand_largeOutput_enabled(self):
+    cmd = 'echo $VALUE'
+    temp_file = MockTempFile('/sdcard/temp-123')
+    cmd_redirect = '( %s )>%s' % (cmd, temp_file.name)
+    with self.assertCalls(
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
+            temp_file),
+        (self.call.adb.Shell(cmd_redirect)),
+        (self.call.device.ReadFile(temp_file.name, force_pull=True),
+         'something')):
+      self.assertEquals(
+          ['something'],
+          self.device.RunShellCommand(
+              cmd, large_output=True, check_return=True))
+
+  def testRunShellCommand_largeOutput_disabledNoTrigger(self):
+    cmd = 'something'
+    with self.assertCall(self.call.adb.Shell(cmd), self.ShellError('')):
+      with self.assertRaises(device_errors.AdbCommandFailedError):
+        self.device.RunShellCommand(cmd, check_return=True)
+
+  def testRunShellCommand_largeOutput_disabledTrigger(self):
+    cmd = 'echo $VALUE'
+    temp_file = MockTempFile('/sdcard/temp-123')
+    cmd_redirect = '( %s )>%s' % (cmd, temp_file.name)
+    with self.assertCalls(
+        (self.call.adb.Shell(cmd), self.ShellError('', None)),
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
+            temp_file),
+        (self.call.adb.Shell(cmd_redirect)),
+        (self.call.device.ReadFile(mock.ANY, force_pull=True),
+         'something')):
+      self.assertEquals(['something'],
+                        self.device.RunShellCommand(cmd, check_return=True))
+
+
+class DeviceUtilsRunPipedShellCommandTest(DeviceUtilsTest):
+
+  def testRunPipedShellCommand_success(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
+            check_return=True),
+        ['This line contains foo', 'PIPESTATUS: 0 0']):
+      self.assertEquals(['This line contains foo'],
+                        self.device._RunPipedShellCommand('ps | grep foo'))
+
+  def testRunPipedShellCommand_firstCommandFails(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
+            check_return=True),
+        ['PIPESTATUS: 1 0']):
+      with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
+        self.device._RunPipedShellCommand('ps | grep foo')
+      self.assertEquals([1, 0], ec.exception.status)
+
+  def testRunPipedShellCommand_secondCommandFails(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
+            check_return=True),
+        ['PIPESTATUS: 0 1']):
+      with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
+        self.device._RunPipedShellCommand('ps | grep foo')
+      self.assertEquals([0, 1], ec.exception.status)
+
+  def testRunPipedShellCommand_outputCutOff(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
+            check_return=True),
+        ['foo.bar'] * 256 + ['foo.ba']):
+      with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
+        self.device._RunPipedShellCommand('ps | grep foo')
+      self.assertIs(None, ec.exception.status)
+
+
+@mock.patch('time.sleep', mock.Mock())
+class DeviceUtilsKillAllTest(DeviceUtilsTest):
+
+  def testKillAll_noMatchingProcessesFailure(self):
+    with self.assertCall(self.call.device.GetPids('test_process'), {}):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.KillAll('test_process')
+
+  def testKillAll_noMatchingProcessesQuiet(self):
+    with self.assertCall(self.call.device.GetPids('test_process'), {}):
+      self.assertEqual(0, self.device.KillAll('test_process', quiet=True))
+
+  def testKillAll_nonblocking(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1234'], 'some.processing.thing': ['5678']}),
+        (self.call.adb.Shell('kill -9 1234 5678'), '')):
+      self.assertEquals(
+          2, self.device.KillAll('some.process', blocking=False))
+
+  def testKillAll_blocking(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1234'], 'some.processing.thing': ['5678']}),
+        (self.call.adb.Shell('kill -9 1234 5678'), ''),
+        (self.call.device.GetPids('some.process'),
+         {'some.processing.thing': ['5678']}),
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1111']})):  # Other instance with different pid.
+      self.assertEquals(
+          2, self.device.KillAll('some.process', blocking=True))
+
+  def testKillAll_exactNonblocking(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1234'], 'some.processing.thing': ['5678']}),
+        (self.call.adb.Shell('kill -9 1234'), '')):
+      self.assertEquals(
+          1, self.device.KillAll('some.process', exact=True, blocking=False))
+
+  def testKillAll_exactBlocking(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1234'], 'some.processing.thing': ['5678']}),
+        (self.call.adb.Shell('kill -9 1234'), ''),
+        (self.call.device.GetPids('some.process'),
+         {'some.process': ['1234'], 'some.processing.thing': ['5678']}),
+        (self.call.device.GetPids('some.process'),
+         {'some.processing.thing': ['5678']})):
+      self.assertEquals(
+          1, self.device.KillAll('some.process', exact=True, blocking=True))
+
+  def testKillAll_root(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'), {'some.process': ['1234']}),
+        (self.call.device.NeedsSU(), True),
+        (self.call.device._Su("sh -c 'kill -9 1234'"),
+         "su -c sh -c 'kill -9 1234'"),
+        (self.call.adb.Shell("su -c sh -c 'kill -9 1234'"), '')):
+      self.assertEquals(
+          1, self.device.KillAll('some.process', as_root=True))
+
+  def testKillAll_sigterm(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+            {'some.process': ['1234']}),
+        (self.call.adb.Shell('kill -15 1234'), '')):
+      self.assertEquals(
+          1, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
+
+  def testKillAll_multipleInstances(self):
+    with self.assertCalls(
+        (self.call.device.GetPids('some.process'),
+            {'some.process': ['1234', '4567']}),
+        (self.call.adb.Shell('kill -15 1234 4567'), '')):
+      self.assertEquals(
+          2, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
+
+
+class DeviceUtilsStartActivityTest(DeviceUtilsTest):
+
+  def testStartActivity_actionOnly(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_success(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_failure(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main'),
+        'Error: Failed to start test activity'):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.StartActivity(test_intent)
+
+  def testStartActivity_blocking(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-W '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent, blocking=True)
+
+  def testStartActivity_withCategory(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                category='android.intent.category.HOME')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-c android.intent.category.HOME '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withMultipleCategories(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                category=['android.intent.category.HOME',
+                                          'android.intent.category.BROWSABLE'])
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-c android.intent.category.HOME '
+                            '-c android.intent.category.BROWSABLE '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withData(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                data='http://www.google.com/')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-d http://www.google.com/ '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withStringExtra(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                extras={'foo': 'test'})
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main '
+                            '--es foo test'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withBoolExtra(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                extras={'foo': True})
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main '
+                            '--ez foo True'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withIntExtra(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                extras={'foo': 123})
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main '
+                            '--ei foo 123'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+  def testStartActivity_withTraceFile(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '--start-profiler test_trace_file.out '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent,
+                                trace_file_name='test_trace_file.out')
+
+  def testStartActivity_withForceStop(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-S '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent, force_stop=True)
+
+  def testStartActivity_withFlags(self):
+    test_intent = intent.Intent(action='android.intent.action.VIEW',
+                                package='test.package',
+                                activity='.Main',
+                                flags='0x10000000')
+    with self.assertCall(
+        self.call.adb.Shell('am start '
+                            '-a android.intent.action.VIEW '
+                            '-n test.package/.Main '
+                            '-f 0x10000000'),
+        'Starting: Intent { act=android.intent.action.VIEW }'):
+      self.device.StartActivity(test_intent)
+
+
+class DeviceUtilsStartInstrumentationTest(DeviceUtilsTest):
+
+  def testStartInstrumentation_nothing(self):
+    with self.assertCalls(
+        self.call.device.RunShellCommand(
+            'p=test.package;am instrument "$p"/.TestInstrumentation',
+            check_return=True, large_output=True)):
+      self.device.StartInstrumentation(
+          'test.package/.TestInstrumentation',
+          finish=False, raw=False, extras=None)
+
+  def testStartInstrumentation_finish(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            'p=test.package;am instrument -w "$p"/.TestInstrumentation',
+            check_return=True, large_output=True),
+         ['OK (1 test)'])):
+      output = self.device.StartInstrumentation(
+          'test.package/.TestInstrumentation',
+          finish=True, raw=False, extras=None)
+      self.assertEquals(['OK (1 test)'], output)
+
+  def testStartInstrumentation_raw(self):
+    with self.assertCalls(
+        self.call.device.RunShellCommand(
+            'p=test.package;am instrument -r "$p"/.TestInstrumentation',
+            check_return=True, large_output=True)):
+      self.device.StartInstrumentation(
+          'test.package/.TestInstrumentation',
+          finish=False, raw=True, extras=None)
+
+  def testStartInstrumentation_extras(self):
+    with self.assertCalls(
+        self.call.device.RunShellCommand(
+            'p=test.package;am instrument -e "$p".foo Foo -e bar \'Val \'"$p" '
+            '"$p"/.TestInstrumentation',
+            check_return=True, large_output=True)):
+      self.device.StartInstrumentation(
+          'test.package/.TestInstrumentation',
+          finish=False, raw=False, extras={'test.package.foo': 'Foo',
+                                           'bar': 'Val test.package'})
+
+
+class DeviceUtilsBroadcastIntentTest(DeviceUtilsTest):
+
+  def testBroadcastIntent_noExtras(self):
+    test_intent = intent.Intent(action='test.package.with.an.INTENT')
+    with self.assertCall(
+        self.call.adb.Shell('am broadcast -a test.package.with.an.INTENT'),
+        'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
+      self.device.BroadcastIntent(test_intent)
+
+  def testBroadcastIntent_withExtra(self):
+    test_intent = intent.Intent(action='test.package.with.an.INTENT',
+                                extras={'foo': 'bar value'})
+    with self.assertCall(
+        self.call.adb.Shell(
+            "am broadcast -a test.package.with.an.INTENT --es foo 'bar value'"),
+        'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
+      self.device.BroadcastIntent(test_intent)
+
+  def testBroadcastIntent_withExtra_noValue(self):
+    test_intent = intent.Intent(action='test.package.with.an.INTENT',
+                                extras={'foo': None})
+    with self.assertCall(
+        self.call.adb.Shell(
+            'am broadcast -a test.package.with.an.INTENT --esn foo'),
+        'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
+      self.device.BroadcastIntent(test_intent)
+
+
+class DeviceUtilsGoHomeTest(DeviceUtilsTest):
+
+  def testGoHome_popupsExist(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
+            '-c', 'android.intent.category.HOME'], check_return=True),
+         'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '66'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '4'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True),
+         ['mCurrentFocus Launcher'])):
+      self.device.GoHome()
+
+  def testGoHome_willRetry(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
+            '-c', 'android.intent.category.HOME'], check_return=True),
+         'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '66'], check_return=True,)),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '4'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '66'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '4'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True),
+         self.TimeoutError())):
+      with self.assertRaises(device_errors.CommandTimeoutError):
+        self.device.GoHome()
+
+  def testGoHome_alreadyFocused(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True),
+        ['mCurrentFocus Launcher']):
+      self.device.GoHome()
+
+  def testGoHome_alreadyFocusedAlternateCase(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True),
+        [' mCurrentFocus .launcher/.']):
+      self.device.GoHome()
+
+  def testGoHome_obtainsFocusAfterGoingHome(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), []),
+        (self.call.device.RunShellCommand(
+            ['am', 'start', '-W', '-a', 'android.intent.action.MAIN',
+            '-c', 'android.intent.category.HOME'], check_return=True),
+         'Starting: Intent { act=android.intent.action.MAIN }\r\n'''),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True),
+         ['mCurrentFocus Launcher'])):
+      self.device.GoHome()
+
+
+class DeviceUtilsForceStopTest(DeviceUtilsTest):
+
+  def testForceStop(self):
+    with self.assertCall(
+        self.call.adb.Shell('p=test.package;if [[ "$(ps)" = *$p* ]]; then '
+                            'am force-stop $p; fi'),
+        ''):
+      self.device.ForceStop('test.package')
+
+
+class DeviceUtilsClearApplicationStateTest(DeviceUtilsTest):
+
+  def testClearApplicationState_setPermissions(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
+        (self.call.device._GetApplicationPathsInternal('this.package.exists'),
+         ['/data/app/this.package.exists.apk']),
+        (self.call.device.RunShellCommand(
+            ['pm', 'clear', 'this.package.exists'],
+            check_return=True),
+         ['Success']),
+        (self.call.device.GrantPermissions(
+            'this.package.exists', ['p1']), [])):
+      self.device.ClearApplicationState(
+          'this.package.exists', permissions=['p1'])
+
+  def testClearApplicationState_packageDoesntExist(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '11'),
+        (self.call.device._GetApplicationPathsInternal('does.not.exist'),
+         [])):
+      self.device.ClearApplicationState('does.not.exist')
+
+  def testClearApplicationState_packageDoesntExistOnAndroidJBMR2OrAbove(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
+        (self.call.device.RunShellCommand(
+            ['pm', 'clear', 'this.package.does.not.exist'],
+            check_return=True),
+         ['Failed'])):
+      self.device.ClearApplicationState('this.package.does.not.exist')
+
+  def testClearApplicationState_packageExists(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
+        (self.call.device._GetApplicationPathsInternal('this.package.exists'),
+         ['/data/app/this.package.exists.apk']),
+        (self.call.device.RunShellCommand(
+            ['pm', 'clear', 'this.package.exists'],
+            check_return=True),
+         ['Success'])):
+      self.device.ClearApplicationState('this.package.exists')
+
+  def testClearApplicationState_packageExistsOnAndroidJBMR2OrAbove(self):
+    with self.assertCalls(
+        (self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
+        (self.call.device.RunShellCommand(
+            ['pm', 'clear', 'this.package.exists'],
+            check_return=True),
+         ['Success'])):
+      self.device.ClearApplicationState('this.package.exists')
+
+
+class DeviceUtilsSendKeyEventTest(DeviceUtilsTest):
+
+  def testSendKeyEvent(self):
+    with self.assertCall(self.call.adb.Shell('input keyevent 66'), ''):
+      self.device.SendKeyEvent(66)
+
+
+class DeviceUtilsPushChangedFilesIndividuallyTest(DeviceUtilsTest):
+
+  def testPushChangedFilesIndividually_empty(self):
+    test_files = []
+    with self.assertCalls():
+      self.device._PushChangedFilesIndividually(test_files)
+
+  def testPushChangedFilesIndividually_single(self):
+    test_files = [('/test/host/path', '/test/device/path')]
+    with self.assertCalls(self.call.adb.Push(*test_files[0])):
+      self.device._PushChangedFilesIndividually(test_files)
+
+  def testPushChangedFilesIndividually_multiple(self):
+    test_files = [
+        ('/test/host/path/file1', '/test/device/path/file1'),
+        ('/test/host/path/file2', '/test/device/path/file2')]
+    with self.assertCalls(
+        self.call.adb.Push(*test_files[0]),
+        self.call.adb.Push(*test_files[1])):
+      self.device._PushChangedFilesIndividually(test_files)
+
+
+class DeviceUtilsPushChangedFilesZippedTest(DeviceUtilsTest):
+
+  def testPushChangedFilesZipped_noUnzipCommand(self):
+    test_files = [('/test/host/path/file1', '/test/device/path/file1')]
+    mock_zip_temp = mock.mock_open()
+    mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
+    with self.assertCalls(
+        (mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
+        (mock.call.multiprocessing.Process(
+            target=device_utils.DeviceUtils._CreateDeviceZip,
+            args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
+        (self.call.device._MaybeInstallCommands(), False)):
+      self.assertFalse(self.device._PushChangedFilesZipped(test_files,
+                                                           ['/test/dir']))
+
+  def _testPushChangedFilesZipped_spec(self, test_files):
+    mock_zip_temp = mock.mock_open()
+    mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
+    with self.assertCalls(
+        (mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
+        (mock.call.multiprocessing.Process(
+            target=device_utils.DeviceUtils._CreateDeviceZip,
+            args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
+        (self.call.device._MaybeInstallCommands(), True),
+        (self.call.device.NeedsSU(), True),
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb,
+                                                                 suffix='.zip'),
+             MockTempFile('/test/sdcard/foo123.zip')),
+        self.call.adb.Push(
+            '/test/temp/file/tmp.zip', '/test/sdcard/foo123.zip'),
+        self.call.device.RunShellCommand(
+            'unzip /test/sdcard/foo123.zip&&chmod -R 777 /test/dir',
+            as_root=True,
+            env={'PATH': '/data/local/tmp/bin:$PATH'},
+            check_return=True)):
+      self.assertTrue(self.device._PushChangedFilesZipped(test_files,
+                                                          ['/test/dir']))
+
+  def testPushChangedFilesZipped_single(self):
+    self._testPushChangedFilesZipped_spec(
+        [('/test/host/path/file1', '/test/device/path/file1')])
+
+  def testPushChangedFilesZipped_multiple(self):
+    self._testPushChangedFilesZipped_spec(
+        [('/test/host/path/file1', '/test/device/path/file1'),
+         ('/test/host/path/file2', '/test/device/path/file2')])
+
+
+class DeviceUtilsPathExistsTest(DeviceUtilsTest):
+
+  def testPathExists_pathExists(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            "test -e '/path/file exists'",
+            as_root=False, check_return=True, timeout=10, retries=0),
+        []):
+      self.assertTrue(self.device.PathExists('/path/file exists'))
+
+  def testPathExists_multiplePathExists(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            "test -e '/path 1' -a -e /path2",
+            as_root=False, check_return=True, timeout=10, retries=0),
+        []):
+      self.assertTrue(self.device.PathExists(('/path 1', '/path2')))
+
+  def testPathExists_pathDoesntExist(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            "test -e /path/file.not.exists",
+            as_root=False, check_return=True, timeout=10, retries=0),
+        self.ShellError()):
+      self.assertFalse(self.device.PathExists('/path/file.not.exists'))
+
+  def testPathExists_asRoot(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            "test -e /root/path/exists",
+            as_root=True, check_return=True, timeout=10, retries=0),
+        self.ShellError()):
+      self.assertFalse(
+          self.device.PathExists('/root/path/exists', as_root=True))
+
+  def testFileExists_pathDoesntExist(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            "test -e /path/file.not.exists",
+            as_root=False, check_return=True, timeout=10, retries=0),
+        self.ShellError()):
+      self.assertFalse(self.device.FileExists('/path/file.not.exists'))
+
+
+class DeviceUtilsPullFileTest(DeviceUtilsTest):
+
+  def testPullFile_existsOnDevice(self):
+    with mock.patch('os.path.exists', return_value=True):
+      with self.assertCall(
+          self.call.adb.Pull('/data/app/test.file.exists',
+                             '/test/file/host/path')):
+        self.device.PullFile('/data/app/test.file.exists',
+                             '/test/file/host/path')
+
+  def testPullFile_doesntExistOnDevice(self):
+    with mock.patch('os.path.exists', return_value=True):
+      with self.assertCall(
+          self.call.adb.Pull('/data/app/test.file.does.not.exist',
+                             '/test/file/host/path'),
+          self.CommandError('remote object does not exist')):
+        with self.assertRaises(device_errors.CommandFailedError):
+          self.device.PullFile('/data/app/test.file.does.not.exist',
+                               '/test/file/host/path')
+
+
+class DeviceUtilsReadFileTest(DeviceUtilsTest):
+
+  def testReadFileWithPull_success(self):
+    tmp_host_dir = '/tmp/dir/on.host/'
+    tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithPull')
+    tmp_host.file.read.return_value = 'some interesting contents'
+    with self.assertCalls(
+        (mock.call.tempfile.mkdtemp(), tmp_host_dir),
+        (self.call.adb.Pull('/path/to/device/file', mock.ANY)),
+        (mock.call.__builtin__.open(mock.ANY, 'r'), tmp_host),
+        (mock.call.os.path.exists(tmp_host_dir), True),
+        (mock.call.shutil.rmtree(tmp_host_dir), None)):
+      self.assertEquals('some interesting contents',
+                        self.device._ReadFileWithPull('/path/to/device/file'))
+    tmp_host.file.read.assert_called_once_with()
+
+  def testReadFileWithPull_rejected(self):
+    tmp_host_dir = '/tmp/dir/on.host/'
+    with self.assertCalls(
+        (mock.call.tempfile.mkdtemp(), tmp_host_dir),
+        (self.call.adb.Pull('/path/to/device/file', mock.ANY),
+         self.CommandError()),
+        (mock.call.os.path.exists(tmp_host_dir), True),
+        (mock.call.shutil.rmtree(tmp_host_dir), None)):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device._ReadFileWithPull('/path/to/device/file')
+
+  def testReadFile_exists(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/read/this/test/file'],
+            as_root=False, check_return=True),
+         ['-rw-rw---- root foo 256 1970-01-01 00:00 file']),
+        (self.call.device.RunShellCommand(
+            ['cat', '/read/this/test/file'],
+            as_root=False, check_return=True),
+         ['this is a test file'])):
+      self.assertEqual('this is a test file\n',
+                       self.device.ReadFile('/read/this/test/file'))
+
+  def testReadFile_exists2(self):
+    # Same as testReadFile_exists, but uses Android N ls output.
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/read/this/test/file'],
+            as_root=False, check_return=True),
+         ['-rw-rw-rw- 1 root root 256 2016-03-15 03:27 /read/this/test/file']),
+        (self.call.device.RunShellCommand(
+            ['cat', '/read/this/test/file'],
+            as_root=False, check_return=True),
+         ['this is a test file'])):
+      self.assertEqual('this is a test file\n',
+                       self.device.ReadFile('/read/this/test/file'))
+
+  def testReadFile_doesNotExist(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['ls', '-l', '/this/file/does.not.exist'],
+            as_root=False, check_return=True),
+        self.CommandError('File does not exist')):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.ReadFile('/this/file/does.not.exist')
+
+  def testReadFile_zeroSize(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/this/file/has/zero/size'],
+            as_root=False, check_return=True),
+         ['-r--r--r-- root foo 0 1970-01-01 00:00 zero_size_file']),
+        (self.call.device._ReadFileWithPull('/this/file/has/zero/size'),
+         'but it has contents\n')):
+      self.assertEqual('but it has contents\n',
+                       self.device.ReadFile('/this/file/has/zero/size'))
+
+  def testReadFile_withSU(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/this/file/can.be.read.with.su'],
+            as_root=True, check_return=True),
+         ['-rw------- root root 256 1970-01-01 00:00 can.be.read.with.su']),
+        (self.call.device.RunShellCommand(
+            ['cat', '/this/file/can.be.read.with.su'],
+            as_root=True, check_return=True),
+         ['this is a test file', 'read with su'])):
+      self.assertEqual(
+          'this is a test file\nread with su\n',
+          self.device.ReadFile('/this/file/can.be.read.with.su',
+                               as_root=True))
+
+  def testReadFile_withPull(self):
+    contents = 'a' * 123456
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/read/this/big/test/file'],
+            as_root=False, check_return=True),
+         ['-rw-rw---- root foo 123456 1970-01-01 00:00 file']),
+        (self.call.device._ReadFileWithPull('/read/this/big/test/file'),
+         contents)):
+      self.assertEqual(
+          contents, self.device.ReadFile('/read/this/big/test/file'))
+
+  def testReadFile_withPullAndSU(self):
+    contents = 'b' * 123456
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['ls', '-l', '/this/big/file/can.be.read.with.su'],
+            as_root=True, check_return=True),
+         ['-rw------- root root 123456 1970-01-01 00:00 can.be.read.with.su']),
+        (self.call.device.NeedsSU(), True),
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
+         MockTempFile('/sdcard/tmp/on.device')),
+        self.call.device.RunShellCommand(
+            'SRC=/this/big/file/can.be.read.with.su DEST=/sdcard/tmp/on.device;'
+            'cp "$SRC" "$DEST" && chmod 666 "$DEST"',
+            as_root=True, check_return=True),
+        (self.call.device._ReadFileWithPull('/sdcard/tmp/on.device'),
+         contents)):
+      self.assertEqual(
+          contents,
+          self.device.ReadFile('/this/big/file/can.be.read.with.su',
+                               as_root=True))
+
+  def testReadFile_forcePull(self):
+    contents = 'a' * 123456
+    with self.assertCall(
+        self.call.device._ReadFileWithPull('/read/this/big/test/file'),
+        contents):
+      self.assertEqual(
+          contents,
+          self.device.ReadFile('/read/this/big/test/file', force_pull=True))
+
+
+class DeviceUtilsWriteFileTest(DeviceUtilsTest):
+
+  def testWriteFileWithPush_success(self):
+    tmp_host = MockTempFile('/tmp/file/on.host')
+    contents = 'some interesting contents'
+    with self.assertCalls(
+        (mock.call.tempfile.NamedTemporaryFile(), tmp_host),
+        self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file')):
+      self.device._WriteFileWithPush('/path/to/device/file', contents)
+    tmp_host.file.write.assert_called_once_with(contents)
+
+  def testWriteFileWithPush_rejected(self):
+    tmp_host = MockTempFile('/tmp/file/on.host')
+    contents = 'some interesting contents'
+    with self.assertCalls(
+        (mock.call.tempfile.NamedTemporaryFile(), tmp_host),
+        (self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file'),
+         self.CommandError())):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device._WriteFileWithPush('/path/to/device/file', contents)
+
+  def testWriteFile_withPush(self):
+    contents = 'some large contents ' * 26  # 20 * 26 = 520 chars
+    with self.assertCalls(
+        self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
+      self.device.WriteFile('/path/to/device/file', contents)
+
+  def testWriteFile_withPushForced(self):
+    contents = 'tiny contents'
+    with self.assertCalls(
+        self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
+      self.device.WriteFile('/path/to/device/file', contents, force_push=True)
+
+  def testWriteFile_withPushAndSU(self):
+    contents = 'some large contents ' * 26  # 20 * 26 = 520 chars
+    with self.assertCalls(
+        (self.call.device.NeedsSU(), True),
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
+         MockTempFile('/sdcard/tmp/on.device')),
+        self.call.device._WriteFileWithPush('/sdcard/tmp/on.device', contents),
+        self.call.device.RunShellCommand(
+            ['cp', '/sdcard/tmp/on.device', '/path/to/device/file'],
+            as_root=True, check_return=True)):
+      self.device.WriteFile('/path/to/device/file', contents, as_root=True)
+
+  def testWriteFile_withEcho(self):
+    with self.assertCall(self.call.adb.Shell(
+        "echo -n the.contents > /test/file/to.write"), ''):
+      self.device.WriteFile('/test/file/to.write', 'the.contents')
+
+  def testWriteFile_withEchoAndQuotes(self):
+    with self.assertCall(self.call.adb.Shell(
+        "echo -n 'the contents' > '/test/file/to write'"), ''):
+      self.device.WriteFile('/test/file/to write', 'the contents')
+
+  def testWriteFile_withEchoAndSU(self):
+    expected_cmd_without_su = "sh -c 'echo -n contents > /test/file'"
+    expected_cmd = 'su -c %s' % expected_cmd_without_su
+    with self.assertCalls(
+        (self.call.device.NeedsSU(), True),
+        (self.call.device._Su(expected_cmd_without_su), expected_cmd),
+        (self.call.adb.Shell(expected_cmd),
+         '')):
+      self.device.WriteFile('/test/file', 'contents', as_root=True)
+
+
+class DeviceUtilsLsTest(DeviceUtilsTest):
+
+  def testLs_directory(self):
+    result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
+              ('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
+              ('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
+    with self.assertCalls(
+        (self.call.adb.Ls('/data/local/tmp'), result)):
+      self.assertEquals(result,
+                        self.device.Ls('/data/local/tmp'))
+
+  def testLs_nothing(self):
+    with self.assertCalls(
+        (self.call.adb.Ls('/data/local/tmp/testfile.txt'), [])):
+      self.assertEquals([],
+                        self.device.Ls('/data/local/tmp/testfile.txt'))
+
+
+class DeviceUtilsStatTest(DeviceUtilsTest):
+
+  def testStat_file(self):
+    result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
+              ('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
+              ('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
+    with self.assertCalls(
+        (self.call.adb.Ls('/data/local/tmp'), result)):
+      self.assertEquals(adb_wrapper.DeviceStat(33206, 3, 1417436122),
+                        self.device.Stat('/data/local/tmp/testfile.txt'))
+
+  def testStat_directory(self):
+    result = [('.', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
+              ('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
+              ('tmp', adb_wrapper.DeviceStat(16889, 4096, 1417436123))]
+    with self.assertCalls(
+        (self.call.adb.Ls('/data/local'), result)):
+      self.assertEquals(adb_wrapper.DeviceStat(16889, 4096, 1417436123),
+                        self.device.Stat('/data/local/tmp'))
+
+  def testStat_doesNotExist(self):
+    result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
+              ('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
+              ('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
+    with self.assertCalls(
+        (self.call.adb.Ls('/data/local/tmp'), result)):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.Stat('/data/local/tmp/does.not.exist.txt')
+
+
+class DeviceUtilsSetJavaAssertsTest(DeviceUtilsTest):
+
+  def testSetJavaAsserts_enable(self):
+    with self.assertCalls(
+        (self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
+         'some.example.prop=with an example value\n'
+         'some.other.prop=value_ok\n'),
+        self.call.device.WriteFile(
+            self.device.LOCAL_PROPERTIES_PATH,
+            'some.example.prop=with an example value\n'
+            'some.other.prop=value_ok\n'
+            'dalvik.vm.enableassertions=all\n'),
+        (self.call.device.GetProp('dalvik.vm.enableassertions'), ''),
+        self.call.device.SetProp('dalvik.vm.enableassertions', 'all')):
+      self.assertTrue(self.device.SetJavaAsserts(True))
+
+  def testSetJavaAsserts_disable(self):
+    with self.assertCalls(
+        (self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
+         'some.example.prop=with an example value\n'
+         'dalvik.vm.enableassertions=all\n'
+         'some.other.prop=value_ok\n'),
+        self.call.device.WriteFile(
+            self.device.LOCAL_PROPERTIES_PATH,
+            'some.example.prop=with an example value\n'
+            'some.other.prop=value_ok\n'),
+        (self.call.device.GetProp('dalvik.vm.enableassertions'), 'all'),
+        self.call.device.SetProp('dalvik.vm.enableassertions', '')):
+      self.assertTrue(self.device.SetJavaAsserts(False))
+
+  def testSetJavaAsserts_alreadyEnabled(self):
+    with self.assertCalls(
+        (self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
+         'some.example.prop=with an example value\n'
+         'dalvik.vm.enableassertions=all\n'
+         'some.other.prop=value_ok\n'),
+        (self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
+      self.assertFalse(self.device.SetJavaAsserts(True))
+
+  def testSetJavaAsserts_malformedLocalProp(self):
+    with self.assertCalls(
+        (self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
+         'some.example.prop=with an example value\n'
+         'malformed_property\n'
+         'dalvik.vm.enableassertions=all\n'
+         'some.other.prop=value_ok\n'),
+        (self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
+      self.assertFalse(self.device.SetJavaAsserts(True))
+
+
+class DeviceUtilsGetPropTest(DeviceUtilsTest):
+
+  def testGetProp_exists(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['getprop', 'test.property'], check_return=True, single_line=True,
+            timeout=self.device._default_timeout,
+            retries=self.device._default_retries),
+        'property_value'):
+      self.assertEqual('property_value',
+                       self.device.GetProp('test.property'))
+
+  def testGetProp_doesNotExist(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['getprop', 'property.does.not.exist'],
+            check_return=True, single_line=True,
+            timeout=self.device._default_timeout,
+            retries=self.device._default_retries),
+        ''):
+      self.assertEqual('', self.device.GetProp('property.does.not.exist'))
+
+  def testGetProp_cachedRoProp(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['getprop'], check_return=True, large_output=True,
+            timeout=self.device._default_timeout,
+            retries=self.device._default_retries),
+        ['[ro.build.type]: [userdebug]']):
+      self.assertEqual('userdebug',
+                       self.device.GetProp('ro.build.type', cache=True))
+      self.assertEqual('userdebug',
+                       self.device.GetProp('ro.build.type', cache=True))
+
+  def testGetProp_retryAndCache(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['getprop'], check_return=True, large_output=True,
+            timeout=self.device._default_timeout,
+            retries=3),
+         ['[ro.build.type]: [userdebug]'])):
+      self.assertEqual('userdebug',
+                       self.device.GetProp('ro.build.type',
+                                           cache=True, retries=3))
+      self.assertEqual('userdebug',
+                       self.device.GetProp('ro.build.type',
+                                           cache=True, retries=3))
+
+
+class DeviceUtilsSetPropTest(DeviceUtilsTest):
+
+  def testSetProp(self):
+    with self.assertCall(
+        self.call.device.RunShellCommand(
+            ['setprop', 'test.property', 'test value'], check_return=True)):
+      self.device.SetProp('test.property', 'test value')
+
+  def testSetProp_check_succeeds(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['setprop', 'test.property', 'new_value'], check_return=True)),
+        (self.call.device.GetProp('test.property', cache=False), 'new_value')):
+      self.device.SetProp('test.property', 'new_value', check=True)
+
+  def testSetProp_check_fails(self):
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['setprop', 'test.property', 'new_value'], check_return=True)),
+        (self.call.device.GetProp('test.property', cache=False), 'old_value')):
+      with self.assertRaises(device_errors.CommandFailedError):
+        self.device.SetProp('test.property', 'new_value', check=True)
+
+
+class DeviceUtilsGetPidsTest(DeviceUtilsTest):
+
+  def testGetPids_noMatches(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand('ps | grep -F does.not.match'),
+        []):
+      self.assertEqual({}, self.device.GetPids('does.not.match'))
+
+  def testGetPids_oneMatch(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
+        ['user  1001    100   1024 1024   ffffffff 00000000 one.match']):
+      self.assertEqual(
+          {'one.match': ['1001']},
+          self.device.GetPids('one.match'))
+
+  def testGetPids_multipleMatches(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand('ps | grep -F match'),
+        ['user  1001    100   1024 1024   ffffffff 00000000 one.match',
+         'user  1002    100   1024 1024   ffffffff 00000000 two.match',
+         'user  1003    100   1024 1024   ffffffff 00000000 three.match']):
+      self.assertEqual(
+          {'one.match': ['1001'],
+           'two.match': ['1002'],
+           'three.match': ['1003']},
+          self.device.GetPids('match'))
+
+  def testGetPids_exactMatch(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand('ps | grep -F exact.match'),
+        ['user  1000    100   1024 1024   ffffffff 00000000 not.exact.match',
+         'user  1234    100   1024 1024   ffffffff 00000000 exact.match']):
+      self.assertEqual(
+          {'not.exact.match': ['1000'], 'exact.match': ['1234']},
+          self.device.GetPids('exact.match'))
+
+  def testGetPids_quotable(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand("ps | grep -F 'my$process'"),
+        ['user  1234    100   1024 1024   ffffffff 00000000 my$process']):
+      self.assertEqual(
+          {'my$process': ['1234']}, self.device.GetPids('my$process'))
+
+  def testGetPids_multipleInstances(self):
+    with self.assertCall(
+        self.call.device._RunPipedShellCommand('ps | grep -F foo'),
+        ['user  1000    100   1024 1024   ffffffff 00000000 foo',
+         'user  1234    100   1024 1024   ffffffff 00000000 foo']):
+      self.assertEqual(
+          {'foo': ['1000', '1234']},
+          self.device.GetPids('foo'))
+
+
+class DeviceUtilsTakeScreenshotTest(DeviceUtilsTest):
+
+  def testTakeScreenshot_fileNameProvided(self):
+    with self.assertCalls(
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(
+            self.adb, suffix='.png'),
+         MockTempFile('/tmp/path/temp-123.png')),
+        (self.call.adb.Shell('/system/bin/screencap -p /tmp/path/temp-123.png'),
+         ''),
+        self.call.device.PullFile('/tmp/path/temp-123.png',
+                                  '/test/host/screenshot.png')):
+      self.device.TakeScreenshot('/test/host/screenshot.png')
+
+
+class DeviceUtilsGetMemoryUsageForPidTest(DeviceUtilsTest):
+
+  def setUp(self):
+    super(DeviceUtilsGetMemoryUsageForPidTest, self).setUp()
+
+  def testGetMemoryUsageForPid_validPid(self):
+    with self.assertCalls(
+        (self.call.device._RunPipedShellCommand(
+            'showmap 1234 | grep TOTAL', as_root=True),
+         ['100 101 102 103 104 105 106 107 TOTAL']),
+        (self.call.device.ReadFile('/proc/1234/status', as_root=True),
+         'VmHWM: 1024 kB\n')):
+      self.assertEqual(
+          {
+            'Size': 100,
+            'Rss': 101,
+            'Pss': 102,
+            'Shared_Clean': 103,
+            'Shared_Dirty': 104,
+            'Private_Clean': 105,
+            'Private_Dirty': 106,
+            'VmHWM': 1024
+          },
+          self.device.GetMemoryUsageForPid(1234))
+
+  def testGetMemoryUsageForPid_noSmaps(self):
+    with self.assertCalls(
+        (self.call.device._RunPipedShellCommand(
+            'showmap 4321 | grep TOTAL', as_root=True),
+         ['cannot open /proc/4321/smaps: No such file or directory']),
+        (self.call.device.ReadFile('/proc/4321/status', as_root=True),
+         'VmHWM: 1024 kb\n')):
+      self.assertEquals({'VmHWM': 1024}, self.device.GetMemoryUsageForPid(4321))
+
+  def testGetMemoryUsageForPid_noStatus(self):
+    with self.assertCalls(
+        (self.call.device._RunPipedShellCommand(
+            'showmap 4321 | grep TOTAL', as_root=True),
+         ['100 101 102 103 104 105 106 107 TOTAL']),
+        (self.call.device.ReadFile('/proc/4321/status', as_root=True),
+         self.CommandError())):
+      self.assertEquals(
+          {
+            'Size': 100,
+            'Rss': 101,
+            'Pss': 102,
+            'Shared_Clean': 103,
+            'Shared_Dirty': 104,
+            'Private_Clean': 105,
+            'Private_Dirty': 106,
+          },
+          self.device.GetMemoryUsageForPid(4321))
+
+
+class DeviceUtilsDismissCrashDialogIfNeededTest(DeviceUtilsTest):
+
+  def testDismissCrashDialogIfNeeded_crashedPageckageNotFound(self):
+    sample_dumpsys_output = '''
+WINDOW MANAGER WINDOWS (dumpsys window windows)
+  Window #11 Window{f8b647a u0 SearchPanel}:
+    mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
+    mOwnerUid=100 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
+    mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
+    Requested w=1080 h=1920 mLayoutSeq=426
+    mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
+'''
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), sample_dumpsys_output.split('\n'))):
+      package_name = self.device.DismissCrashDialogIfNeeded()
+      self.assertIsNone(package_name)
+
+  def testDismissCrashDialogIfNeeded_crashedPageckageFound(self):
+    sample_dumpsys_output = '''
+WINDOW MANAGER WINDOWS (dumpsys window windows)
+  Window #11 Window{f8b647a u0 SearchPanel}:
+    mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
+    mOwnerUid=102 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
+    mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
+    Requested w=1080 h=1920 mLayoutSeq=426
+    mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
+  mHasPermanentDpad=false
+  mCurrentFocus=Window{3a27740f u0 Application Error: com.android.chrome}
+  mFocusedApp=AppWindowToken{470af6f token=Token{272ec24e ActivityRecord{t894}}}
+'''
+    with self.assertCalls(
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), sample_dumpsys_output.split('\n')),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '22'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '22'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['input', 'keyevent', '66'], check_return=True)),
+        (self.call.device.RunShellCommand(
+            ['dumpsys', 'window', 'windows'], check_return=True,
+            large_output=True), [])):
+      package_name = self.device.DismissCrashDialogIfNeeded()
+      self.assertEqual(package_name, 'com.android.chrome')
+
+
+class DeviceUtilsClientCache(DeviceUtilsTest):
+
+  def testClientCache_twoCaches(self):
+    self.device._cache['test'] = 0
+    client_cache_one = self.device.GetClientCache('ClientOne')
+    client_cache_one['test'] = 1
+    client_cache_two = self.device.GetClientCache('ClientTwo')
+    client_cache_two['test'] = 2
+    self.assertEqual(self.device._cache['test'], 0)
+    self.assertEqual(client_cache_one, {'test': 1})
+    self.assertEqual(client_cache_two, {'test': 2})
+    self.device._ClearCache()
+    self.assertTrue('test' not in self.device._cache)
+    self.assertEqual(client_cache_one, {})
+    self.assertEqual(client_cache_two, {})
+
+  def testClientCache_multipleInstances(self):
+    client_cache_one = self.device.GetClientCache('ClientOne')
+    client_cache_one['test'] = 1
+    client_cache_two = self.device.GetClientCache('ClientOne')
+    self.assertEqual(client_cache_one, {'test': 1})
+    self.assertEqual(client_cache_two, {'test': 1})
+    self.device._ClearCache()
+    self.assertEqual(client_cache_one, {})
+    self.assertEqual(client_cache_two, {})
+
+
+class DeviceUtilsHealthyDevicesTest(mock_calls.TestCase):
+
+  def testHealthyDevices_emptyBlacklist(self):
+    test_serials = ['0123456789abcdef', 'fedcba9876543210']
+    with self.assertCalls(
+        (mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
+         [_AdbWrapperMock(s) for s in test_serials])):
+      blacklist = mock.NonCallableMock(**{'Read.return_value': []})
+      devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
+    for serial, device in zip(test_serials, devices):
+      self.assertTrue(isinstance(device, device_utils.DeviceUtils))
+      self.assertEquals(serial, device.adb.GetDeviceSerial())
+
+  def testHealthyDevices_blacklist(self):
+    test_serials = ['0123456789abcdef', 'fedcba9876543210']
+    with self.assertCalls(
+        (mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
+         [_AdbWrapperMock(s) for s in test_serials])):
+      blacklist = mock.NonCallableMock(
+          **{'Read.return_value': ['fedcba9876543210']})
+      devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
+    self.assertEquals(1, len(devices))
+    self.assertTrue(isinstance(devices[0], device_utils.DeviceUtils))
+    self.assertEquals('0123456789abcdef', devices[0].adb.GetDeviceSerial())
+
+
+class DeviceUtilsRestartAdbdTest(DeviceUtilsTest):
+
+  def testAdbdRestart(self):
+    mock_temp_file = '/sdcard/temp-123.sh'
+    with self.assertCalls(
+        (mock.call.devil.android.device_temp_file.DeviceTempFile(
+            self.adb, suffix='.sh'), MockTempFile(mock_temp_file)),
+        self.call.device.WriteFile(mock.ANY, mock.ANY),
+        (self.call.device.RunShellCommand(
+            ['source', mock_temp_file], as_root=True)),
+        self.call.adb.WaitForDevice()):
+      self.device.RestartAdbd()
+
+
+class DeviceUtilsGrantPermissionsTest(DeviceUtilsTest):
+
+  def testGrantPermissions_none(self):
+    self.device.GrantPermissions('package', [])
+
+  def testGrantPermissions_underM(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.LOLLIPOP):
+      self.device.GrantPermissions('package', ['p1'])
+
+  def testGrantPermissions_one(self):
+    permissions_cmd = 'pm grant package p1'
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.MARSHMALLOW):
+      with self.assertCalls(
+          (self.call.device.RunShellCommand(
+              permissions_cmd, check_return=True), [])):
+        self.device.GrantPermissions('package', ['p1'])
+
+  def testGrantPermissions_multiple(self):
+    permissions_cmd = 'pm grant package p1&&pm grant package p2'
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.MARSHMALLOW):
+      with self.assertCalls(
+          (self.call.device.RunShellCommand(
+              permissions_cmd, check_return=True), [])):
+        self.device.GrantPermissions('package', ['p1', 'p2'])
+
+  def testGrantPermissions_WriteExtrnalStorage(self):
+    permissions_cmd = (
+        'pm grant package android.permission.WRITE_EXTERNAL_STORAGE&&'
+        'pm grant package android.permission.READ_EXTERNAL_STORAGE')
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.MARSHMALLOW):
+      with self.assertCalls(
+          (self.call.device.RunShellCommand(
+              permissions_cmd, check_return=True), [])):
+        self.device.GrantPermissions(
+            'package', ['android.permission.WRITE_EXTERNAL_STORAGE'])
+
+  def testGrantPermissions_BlackList(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.MARSHMALLOW):
+      self.device.GrantPermissions(
+          'package', ['android.permission.ACCESS_MOCK_LOCATION'])
+
+
+class DeviecUtilsIsScreenOn(DeviceUtilsTest):
+
+  _L_SCREEN_ON = ['test=test mInteractive=true']
+  _K_SCREEN_ON = ['test=test mScreenOn=true']
+  _L_SCREEN_OFF = ['mInteractive=false']
+  _K_SCREEN_OFF = ['mScreenOn=false']
+
+  def testIsScreenOn_onPreL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.KITKAT):
+      with self.assertCalls(
+          (self.call.device._RunPipedShellCommand(
+              'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_ON)):
+        self.assertTrue(self.device.IsScreenOn())
+
+  def testIsScreenOn_onL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.LOLLIPOP):
+      with self.assertCalls(
+          (self.call.device._RunPipedShellCommand(
+              'dumpsys input_method | grep mInteractive'), self._L_SCREEN_ON)):
+        self.assertTrue(self.device.IsScreenOn())
+
+  def testIsScreenOn_offPreL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.KITKAT):
+      with self.assertCalls(
+          (self.call.device._RunPipedShellCommand(
+              'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_OFF)):
+        self.assertFalse(self.device.IsScreenOn())
+
+  def testIsScreenOn_offL(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.LOLLIPOP):
+      with self.assertCalls(
+          (self.call.device._RunPipedShellCommand(
+              'dumpsys input_method | grep mInteractive'), self._L_SCREEN_OFF)):
+        self.assertFalse(self.device.IsScreenOn())
+
+  def testIsScreenOn_noOutput(self):
+    with self.patch_call(self.call.device.build_version_sdk,
+                         return_value=version_codes.LOLLIPOP):
+      with self.assertCalls(
+          (self.call.device._RunPipedShellCommand(
+              'dumpsys input_method | grep mInteractive'), [])):
+        with self.assertRaises(device_errors.CommandFailedError):
+          self.device.IsScreenOn()
+
+
+class DeviecUtilsSetScreen(DeviceUtilsTest):
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetScren_alreadySet(self):
+    with self.assertCalls(
+        (self.call.device.IsScreenOn(), False)):
+      self.device.SetScreen(False)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetScreen_on(self):
+    with self.assertCalls(
+        (self.call.device.IsScreenOn(), False),
+        (self.call.device.RunShellCommand('input keyevent 26'), []),
+        (self.call.device.IsScreenOn(), True)):
+      self.device.SetScreen(True)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetScreen_off(self):
+    with self.assertCalls(
+        (self.call.device.IsScreenOn(), True),
+        (self.call.device.RunShellCommand('input keyevent 26'), []),
+        (self.call.device.IsScreenOn(), False)):
+      self.device.SetScreen(False)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testSetScreen_slow(self):
+    with self.assertCalls(
+        (self.call.device.IsScreenOn(), True),
+        (self.call.device.RunShellCommand('input keyevent 26'), []),
+        (self.call.device.IsScreenOn(), True),
+        (self.call.device.IsScreenOn(), True),
+        (self.call.device.IsScreenOn(), False)):
+      self.device.SetScreen(False)
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/android/fastboot_utils.py b/catapult/devil/devil/android/fastboot_utils.py
new file mode 100644
index 0000000..f1287d1
--- /dev/null
+++ b/catapult/devil/devil/android/fastboot_utils.py
@@ -0,0 +1,246 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides a variety of device interactions based on fastboot."""
+# pylint: disable=unused-argument
+
+import contextlib
+import fnmatch
+import logging
+import os
+import re
+
+from devil.android import decorators
+from devil.android import device_errors
+from devil.android.sdk import fastboot
+from devil.utils import timeout_retry
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+_FASTBOOT_REBOOT_TIMEOUT = 10 * _DEFAULT_TIMEOUT
+ALL_PARTITIONS = [
+    'bootloader',
+    'radio',
+    'boot',
+    'recovery',
+    'system',
+    'userdata',
+    'cache',
+]
+
+
+class FastbootUtils(object):
+
+  _FASTBOOT_WAIT_TIME = 1
+  _RESTART_WHEN_FLASHING = ['bootloader', 'radio']
+  _BOARD_VERIFICATION_FILE = 'android-info.txt'
+  _FLASH_IMAGE_FILES = {
+      'bootloader': 'bootloader*.img',
+      'radio': 'radio*.img',
+      'boot': 'boot.img',
+      'recovery': 'recovery.img',
+      'system': 'system.img',
+      'userdata': 'userdata.img',
+      'cache': 'cache.img',
+  }
+
+  def __init__(self, device, fastbooter=None, default_timeout=_DEFAULT_TIMEOUT,
+               default_retries=_DEFAULT_RETRIES):
+    """FastbootUtils constructor.
+
+    Example Usage to flash a device:
+      fastboot = fastboot_utils.FastbootUtils(device)
+      fastboot.FlashDevice('/path/to/build/directory')
+
+    Args:
+      device: A DeviceUtils instance.
+      fastbooter: Optional fastboot object. If none is passed, one will
+        be created.
+      default_timeout: An integer containing the default number of seconds to
+        wait for an operation to complete if no explicit value is provided.
+      default_retries: An integer containing the default number or times an
+        operation should be retried on failure if no explicit value is provided.
+    """
+    self._device = device
+    self._board = device.product_board
+    self._serial = str(device)
+    self._default_timeout = default_timeout
+    self._default_retries = default_retries
+    if fastbooter:
+      self.fastboot = fastbooter
+    else:
+      self.fastboot = fastboot.Fastboot(self._serial)
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def WaitForFastbootMode(self, timeout=None, retries=None):
+    """Wait for device to boot into fastboot mode.
+
+    This waits for the device serial to show up in fastboot devices output.
+    """
+    def fastboot_mode():
+      return self._serial in self.fastboot.Devices()
+
+    timeout_retry.WaitFor(fastboot_mode, wait_period=self._FASTBOOT_WAIT_TIME)
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=_FASTBOOT_REBOOT_TIMEOUT)
+  def EnableFastbootMode(self, timeout=None, retries=None):
+    """Reboots phone into fastboot mode.
+
+    Roots phone if needed, then reboots phone into fastboot mode and waits.
+    """
+    self._device.EnableRoot()
+    self._device.adb.Reboot(to_bootloader=True)
+    self.WaitForFastbootMode()
+
+  @decorators.WithTimeoutAndRetriesFromInstance(
+      min_default_timeout=_FASTBOOT_REBOOT_TIMEOUT)
+  def Reboot(self, bootloader=False, timeout=None, retries=None):
+    """Reboots out of fastboot mode.
+
+    It reboots the phone either back into fastboot, or to a regular boot. It
+    then blocks until the device is ready.
+
+    Args:
+      bootloader: If set to True, reboots back into bootloader.
+    """
+    if bootloader:
+      self.fastboot.RebootBootloader()
+      self.WaitForFastbootMode()
+    else:
+      self.fastboot.Reboot()
+      self._device.WaitUntilFullyBooted(timeout=_FASTBOOT_REBOOT_TIMEOUT)
+
+  def _VerifyBoard(self, directory):
+    """Validate as best as possible that the android build matches the device.
+
+    Goes through build files and checks if the board name is mentioned in the
+    |self._BOARD_VERIFICATION_FILE| or in the build archive.
+
+    Args:
+      directory: directory where build files are located.
+    """
+    files = os.listdir(directory)
+    board_regex = re.compile(r'require board=(\w+)')
+    if self._BOARD_VERIFICATION_FILE in files:
+      with open(os.path.join(directory, self._BOARD_VERIFICATION_FILE)) as f:
+        for line in f:
+          m = board_regex.match(line)
+          if m:
+            board_name = m.group(1)
+            if board_name == self._board:
+              return True
+            elif board_name:
+              return False
+            else:
+              logging.warning('No board type found in %s.',
+                              self._BOARD_VERIFICATION_FILE)
+    else:
+      logging.warning('%s not found. Unable to use it to verify device.',
+                      self._BOARD_VERIFICATION_FILE)
+
+    zip_regex = re.compile(r'.*%s.*\.zip' % re.escape(self._board))
+    for f in files:
+      if zip_regex.match(f):
+        return True
+
+    return False
+
+  def _FindAndVerifyPartitionsAndImages(self, partitions, directory):
+    """Validate partitions and images.
+
+    Validate all partition names and partition directories. Cannot stop mid
+    flash so its important to validate everything first.
+
+    Args:
+      Partitions: partitions to be tested.
+      directory: directory containing the images.
+
+    Returns:
+      Dictionary with exact partition, image name mapping.
+    """
+    files = os.listdir(directory)
+
+    def find_file(pattern):
+      for filename in files:
+        if fnmatch.fnmatch(filename, pattern):
+          return os.path.join(directory, filename)
+      raise device_errors.FastbootCommandFailedError(
+          'Failed to flash device. Counld not find image for %s.', pattern)
+
+    return {name: find_file(self._FLASH_IMAGE_FILES[name])
+            for name in partitions}
+
+  def _FlashPartitions(self, partitions, directory, wipe=False, force=False):
+    """Flashes all given partiitons with all given images.
+
+    Args:
+      partitions: List of partitions to flash.
+      directory: Directory where all partitions can be found.
+      wipe: If set to true, will automatically detect if cache and userdata
+          partitions are sent, and if so ignore them.
+      force: boolean to decide to ignore board name safety checks.
+
+    Raises:
+      device_errors.CommandFailedError(): If image cannot be found or if bad
+          partition name is give.
+    """
+    if not self._VerifyBoard(directory):
+      if force:
+        logging.warning('Could not verify build is meant to be installed on '
+                        'the current device type, but force flag is set. '
+                        'Flashing device. Possibly dangerous operation.')
+      else:
+        raise device_errors.CommandFailedError(
+            'Could not verify build is meant to be installed on the current '
+            'device type. Run again with force=True to force flashing with an '
+            'unverified board.')
+
+    flash_image_files = self._FindAndVerifyPartitionsAndImages(partitions,
+                                                               directory)
+    for partition in partitions:
+      if partition in ['cache', 'userdata'] and not wipe:
+        logging.info(
+            'Not flashing in wipe mode. Skipping partition %s.', partition)
+      else:
+        logging.info(
+            'Flashing %s with %s', partition, flash_image_files[partition])
+        self.fastboot.Flash(partition, flash_image_files[partition])
+        if partition in self._RESTART_WHEN_FLASHING:
+          self.Reboot(bootloader=True)
+
+  @contextlib.contextmanager
+  def FastbootMode(self, timeout=None, retries=None):
+    """Context manager that enables fastboot mode, and reboots after.
+
+    Example usage:
+      with FastbootMode():
+        Flash Device
+      # Anything that runs after flashing.
+    """
+    self.EnableFastbootMode()
+    self.fastboot.SetOemOffModeCharge(False)
+    try:
+      yield self
+    finally:
+      self.fastboot.SetOemOffModeCharge(True)
+      self.Reboot()
+
+  def FlashDevice(self, directory, partitions=None, wipe=False):
+    """Flash device with build in |directory|.
+
+    Directory must contain bootloader, radio, boot, recovery, system, userdata,
+    and cache .img files from an android build. This is a dangerous operation so
+    use with care.
+
+    Args:
+      fastboot: A FastbootUtils instance.
+      directory: Directory with build files.
+      wipe: Wipes cache and userdata if set to true.
+      partitions: List of partitions to flash. Defaults to all.
+    """
+    if partitions is None:
+      partitions = ALL_PARTITIONS
+    with self.FastbootMode():
+      self._FlashPartitions(partitions, directory, wipe=wipe)
diff --git a/catapult/devil/devil/android/fastboot_utils_test.py b/catapult/devil/devil/android/fastboot_utils_test.py
new file mode 100755
index 0000000..8e6fc88
--- /dev/null
+++ b/catapult/devil/devil/android/fastboot_utils_test.py
@@ -0,0 +1,280 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of fastboot_utils.py
+"""
+
+# pylint: disable=protected-access,unused-argument
+
+import io
+import logging
+import unittest
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import fastboot_utils
+from devil.android.sdk import fastboot
+from devil.utils import mock_calls
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+_BOARD = 'board_type'
+_SERIAL = '0123456789abcdef'
+_PARTITIONS = ['cache', 'userdata', 'system', 'bootloader', 'radio']
+_IMAGES = {
+    'cache': 'cache.img',
+    'userdata': 'userdata.img',
+    'system': 'system.img',
+    'bootloader': 'bootloader.img',
+    'radio': 'radio.img',
+}
+_VALID_FILES = [_BOARD + '.zip', 'android-info.txt']
+_INVALID_FILES = ['test.zip', 'android-info.txt']
+
+
+class MockFile(object):
+
+  def __init__(self, name='/tmp/some/file'):
+    self.file = mock.MagicMock(spec=file)
+    self.file.name = name
+
+  def __enter__(self):
+    return self.file
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    pass
+
+  @property
+  def name(self):
+    return self.file.name
+
+
+def _FastbootWrapperMock(test_serial):
+  fastbooter = mock.Mock(spec=fastboot.Fastboot)
+  fastbooter.__str__ = mock.Mock(return_value=test_serial)
+  fastbooter.Devices.return_value = [test_serial]
+  return fastbooter
+
+
+def _DeviceUtilsMock(test_serial):
+  device = mock.Mock(spec=device_utils.DeviceUtils)
+  device.__str__ = mock.Mock(return_value=test_serial)
+  device.product_board = mock.Mock(return_value=_BOARD)
+  device.adb = mock.Mock()
+  return device
+
+
+class FastbootUtilsTest(mock_calls.TestCase):
+
+  def setUp(self):
+    self.device_utils_mock = _DeviceUtilsMock(_SERIAL)
+    self.fastboot_wrapper = _FastbootWrapperMock(_SERIAL)
+    self.fastboot = fastboot_utils.FastbootUtils(
+        self.device_utils_mock, fastbooter=self.fastboot_wrapper,
+        default_timeout=2, default_retries=0)
+    self.fastboot._board = _BOARD
+
+
+class FastbootUtilsInitTest(FastbootUtilsTest):
+
+  def testInitWithDeviceUtil(self):
+    f = fastboot_utils.FastbootUtils(self.device_utils_mock)
+    self.assertEqual(str(self.device_utils_mock), str(f._device))
+
+  def testInitWithMissing_fails(self):
+    with self.assertRaises(AttributeError):
+      fastboot_utils.FastbootUtils(None)
+    with self.assertRaises(AttributeError):
+      fastboot_utils.FastbootUtils('')
+
+
+class FastbootUtilsWaitForFastbootMode(FastbootUtilsTest):
+
+  # If this test fails by timing out after 1 second.
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitForFastbootMode(self):
+    self.fastboot.WaitForFastbootMode()
+
+
+class FastbootUtilsEnableFastbootMode(FastbootUtilsTest):
+
+  def testEnableFastbootMode(self):
+    with self.assertCalls(
+        self.call.fastboot._device.EnableRoot(),
+        self.call.fastboot._device.adb.Reboot(to_bootloader=True),
+        self.call.fastboot.WaitForFastbootMode()):
+      self.fastboot.EnableFastbootMode()
+
+
+class FastbootUtilsReboot(FastbootUtilsTest):
+
+  def testReboot_bootloader(self):
+    with self.assertCalls(
+        self.call.fastboot.fastboot.RebootBootloader(),
+        self.call.fastboot.WaitForFastbootMode()):
+      self.fastboot.Reboot(bootloader=True)
+
+  def testReboot_normal(self):
+    with self.assertCalls(
+        self.call.fastboot.fastboot.Reboot(),
+        self.call.fastboot._device.WaitUntilFullyBooted(timeout=mock.ANY)):
+      self.fastboot.Reboot()
+
+
+class FastbootUtilsFlashPartitions(FastbootUtilsTest):
+
+  def testFlashPartitions_wipe(self):
+    with self.assertCalls(
+        (self.call.fastboot._VerifyBoard('test'), True),
+        (self.call.fastboot._FindAndVerifyPartitionsAndImages(
+            _PARTITIONS, 'test'), _IMAGES),
+        (self.call.fastboot.fastboot.Flash('cache', 'cache.img')),
+        (self.call.fastboot.fastboot.Flash('userdata', 'userdata.img')),
+        (self.call.fastboot.fastboot.Flash('system', 'system.img')),
+        (self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
+        (self.call.fastboot.Reboot(bootloader=True)),
+        (self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
+        (self.call.fastboot.Reboot(bootloader=True))):
+      self.fastboot._FlashPartitions(_PARTITIONS, 'test', wipe=True)
+
+  def testFlashPartitions_noWipe(self):
+    with self.assertCalls(
+        (self.call.fastboot._VerifyBoard('test'), True),
+        (self.call.fastboot._FindAndVerifyPartitionsAndImages(
+            _PARTITIONS, 'test'), _IMAGES),
+        (self.call.fastboot.fastboot.Flash('system', 'system.img')),
+        (self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
+        (self.call.fastboot.Reboot(bootloader=True)),
+        (self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
+        (self.call.fastboot.Reboot(bootloader=True))):
+      self.fastboot._FlashPartitions(_PARTITIONS, 'test')
+
+
+class FastbootUtilsFastbootMode(FastbootUtilsTest):
+
+  def testFastbootMode_good(self):
+    with self.assertCalls(
+        self.call.fastboot.EnableFastbootMode(),
+        self.call.fastboot.fastboot.SetOemOffModeCharge(False),
+        self.call.fastboot.fastboot.SetOemOffModeCharge(True),
+        self.call.fastboot.Reboot()):
+      with self.fastboot.FastbootMode() as fbm:
+        self.assertEqual(self.fastboot, fbm)
+
+  def testFastbootMode_exception(self):
+    with self.assertCalls(
+        self.call.fastboot.EnableFastbootMode(),
+        self.call.fastboot.fastboot.SetOemOffModeCharge(False),
+        self.call.fastboot.fastboot.SetOemOffModeCharge(True),
+        self.call.fastboot.Reboot()):
+      with self.assertRaises(NotImplementedError):
+        with self.fastboot.FastbootMode() as fbm:
+          self.assertEqual(self.fastboot, fbm)
+          raise NotImplementedError
+
+  def testFastbootMode_exceptionInEnableFastboot(self):
+    self.fastboot.EnableFastbootMode = mock.Mock()
+    self.fastboot.EnableFastbootMode.side_effect = NotImplementedError
+    with self.assertRaises(NotImplementedError):
+      with self.fastboot.FastbootMode():
+        pass
+
+
+class FastbootUtilsVerifyBoard(FastbootUtilsTest):
+
+  def testVerifyBoard_bothValid(self):
+    mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_VALID_FILES):
+        self.assertTrue(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_BothNotValid(self):
+    mock_file = io.StringIO(u'abc')
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_INVALID_FILES):
+        self.assertFalse(self.assertFalse(self.fastboot._VerifyBoard('test')))
+
+  def testVerifyBoard_FileNotFoundZipValid(self):
+    with mock.patch('os.listdir', return_value=[_BOARD + '.zip']):
+      self.assertTrue(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_ZipNotFoundFileValid(self):
+    mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=['android-info.txt']):
+        self.assertTrue(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_zipNotValidFileIs(self):
+    mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_INVALID_FILES):
+        self.assertTrue(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_fileNotValidZipIs(self):
+    mock_file = io.StringIO(u'require board=WrongBoard')
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_VALID_FILES):
+        self.assertFalse(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_noBoardInFileValidZip(self):
+    mock_file = io.StringIO(u'Regex wont match')
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_VALID_FILES):
+        self.assertTrue(self.fastboot._VerifyBoard('test'))
+
+  def testVerifyBoard_noBoardInFileInvalidZip(self):
+    mock_file = io.StringIO(u'Regex wont match')
+    with mock.patch('__builtin__.open', return_value=mock_file, create=True):
+      with mock.patch('os.listdir', return_value=_INVALID_FILES):
+        self.assertFalse(self.fastboot._VerifyBoard('test'))
+
+
+class FastbootUtilsFindAndVerifyPartitionsAndImages(FastbootUtilsTest):
+
+  def testFindAndVerifyPartitionsAndImages_valid(self):
+    PARTITIONS = [
+        'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata', 'cache'
+    ]
+    files = [
+        'bootloader-test-.img',
+        'radio123.img',
+        'boot.img',
+        'recovery.img',
+        'system.img',
+        'userdata.img',
+        'cache.img'
+    ]
+    return_check = {
+      'bootloader': 'test/bootloader-test-.img',
+      'radio': 'test/radio123.img',
+      'boot': 'test/boot.img',
+      'recovery': 'test/recovery.img',
+      'system': 'test/system.img',
+      'userdata': 'test/userdata.img',
+      'cache': 'test/cache.img',
+    }
+
+    with mock.patch('os.listdir', return_value=files):
+      return_value = self.fastboot._FindAndVerifyPartitionsAndImages(
+          PARTITIONS, 'test')
+      self.assertDictEqual(return_value, return_check)
+
+  def testFindAndVerifyPartitionsAndImages_badPartition(self):
+    with mock.patch('os.listdir', return_value=['test']):
+      with self.assertRaises(KeyError):
+        self.fastboot._FindAndVerifyPartitionsAndImages(['test'], 'test')
+
+  def testFindAndVerifyPartitionsAndImages_noFile(self):
+    with mock.patch('os.listdir', return_value=['test']):
+      with self.assertRaises(device_errors.FastbootCommandFailedError):
+        self.fastboot._FindAndVerifyPartitionsAndImages(['cache'], 'test')
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/android/flag_changer.py b/catapult/devil/devil/android/flag_changer.py
new file mode 100644
index 0000000..4267f11
--- /dev/null
+++ b/catapult/devil/devil/android/flag_changer.py
@@ -0,0 +1,182 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from devil.android import device_errors
+
+
+class FlagChanger(object):
+  """Changes the flags Chrome runs with.
+
+    Flags can be temporarily set for a particular set of unit tests.  These
+    tests should call Restore() to revert the flags to their original state
+    once the tests have completed.
+  """
+
+  def __init__(self, device, cmdline_file):
+    """Initializes the FlagChanger and records the original arguments.
+
+    Args:
+      device: A DeviceUtils instance.
+      cmdline_file: Path to the command line file on the device.
+    """
+    self._device = device
+
+    # Unrooted devices have limited access to the file system.
+    # Place files in /data/local/tmp/ rather than /data/local/
+    if not device.HasRoot() and not '/data/local/tmp/' in cmdline_file:
+      self._cmdline_file = cmdline_file.replace('/data/local/',
+                                                '/data/local/tmp/')
+    else:
+      self._cmdline_file = cmdline_file
+
+    stored_flags = ''
+    if self._device.PathExists(self._cmdline_file):
+      try:
+        stored_flags = self._device.ReadFile(self._cmdline_file).strip()
+      except device_errors.CommandFailedError:
+        pass
+    # Store the flags as a set to facilitate adding and removing flags.
+    self._state_stack = [set(self._TokenizeFlags(stored_flags))]
+
+  def ReplaceFlags(self, flags):
+    """Replaces the flags in the command line with the ones provided.
+       Saves the current flags state on the stack, so a call to Restore will
+       change the state back to the one preceeding the call to ReplaceFlags.
+
+    Args:
+      flags: A sequence of command line flags to set, eg. ['--single-process'].
+             Note: this should include flags only, not the name of a command
+             to run (ie. there is no need to start the sequence with 'chrome').
+    """
+    new_flags = set(flags)
+    self._state_stack.append(new_flags)
+    self._UpdateCommandLineFile()
+
+  def AddFlags(self, flags):
+    """Appends flags to the command line if they aren't already there.
+       Saves the current flags state on the stack, so a call to Restore will
+       change the state back to the one preceeding the call to AddFlags.
+
+    Args:
+      flags: A sequence of flags to add on, eg. ['--single-process'].
+    """
+    self.PushFlags(add=flags)
+
+  def RemoveFlags(self, flags):
+    """Removes flags from the command line, if they exist.
+       Saves the current flags state on the stack, so a call to Restore will
+       change the state back to the one preceeding the call to RemoveFlags.
+
+       Note that calling RemoveFlags after AddFlags will result in having
+       two nested states.
+
+    Args:
+      flags: A sequence of flags to remove, eg. ['--single-process'].  Note
+             that we expect a complete match when removing flags; if you want
+             to remove a switch with a value, you must use the exact string
+             used to add it in the first place.
+    """
+    self.PushFlags(remove=flags)
+
+  def PushFlags(self, add=None, remove=None):
+    """Appends and removes flags to/from the command line if they aren't already
+       there. Saves the current flags state on the stack, so a call to Restore
+       will change the state back to the one preceeding the call to PushFlags.
+
+    Args:
+      add: A list of flags to add on, eg. ['--single-process'].
+      remove: A list of flags to remove, eg. ['--single-process'].  Note that we
+              expect a complete match when removing flags; if you want to remove
+              a switch with a value, you must use the exact string used to add
+              it in the first place.
+    """
+    new_flags = self._state_stack[-1].copy()
+    if add:
+      new_flags.update(add)
+    if remove:
+      new_flags.difference_update(remove)
+    self.ReplaceFlags(new_flags)
+
+  def Restore(self):
+    """Restores the flags to their state prior to the last AddFlags or
+       RemoveFlags call.
+    """
+    # The initial state must always remain on the stack.
+    assert len(self._state_stack) > 1, (
+      "Mismatch between calls to Add/RemoveFlags and Restore")
+    self._state_stack.pop()
+    self._UpdateCommandLineFile()
+
+  def _UpdateCommandLineFile(self):
+    """Writes out the command line to the file, or removes it if empty."""
+    current_flags = list(self._state_stack[-1])
+    logging.info('Current flags: %s', current_flags)
+    # Root is not required to write to /data/local/tmp/.
+    use_root = '/data/local/tmp/' not in self._cmdline_file
+    if current_flags:
+      # The first command line argument doesn't matter as we are not actually
+      # launching the chrome executable using this command line.
+      cmd_line = ' '.join(['_'] + current_flags)
+      self._device.WriteFile(
+          self._cmdline_file, cmd_line, as_root=use_root)
+      file_contents = self._device.ReadFile(
+          self._cmdline_file, as_root=use_root).rstrip()
+      assert file_contents == cmd_line, (
+          'Failed to set the command line file at %s' % self._cmdline_file)
+    else:
+      self._device.RunShellCommand('rm ' + self._cmdline_file,
+                                   as_root=use_root)
+      assert not self._device.FileExists(self._cmdline_file), (
+          'Failed to remove the command line file at %s' % self._cmdline_file)
+
+  @staticmethod
+  def _TokenizeFlags(line):
+    """Changes the string containing the command line into a list of flags.
+
+    Follows similar logic to CommandLine.java::tokenizeQuotedArguments:
+    * Flags are split using whitespace, unless the whitespace is within a
+      pair of quotation marks.
+    * Unlike the Java version, we keep the quotation marks around switch
+      values since we need them to re-create the file when new flags are
+      appended.
+
+    Args:
+      line: A string containing the entire command line.  The first token is
+            assumed to be the program name.
+    """
+    if not line:
+      return []
+
+    tokenized_flags = []
+    current_flag = ""
+    within_quotations = False
+
+    # Move through the string character by character and build up each flag
+    # along the way.
+    for c in line.strip():
+      if c is '"':
+        if len(current_flag) > 0 and current_flag[-1] == '\\':
+          # Last char was a backslash; pop it, and treat this " as a literal.
+          current_flag = current_flag[0:-1] + '"'
+        else:
+          within_quotations = not within_quotations
+          current_flag += c
+      elif not within_quotations and (c is ' ' or c is '\t'):
+        if current_flag is not "":
+          tokenized_flags.append(current_flag)
+          current_flag = ""
+      else:
+        current_flag += c
+
+    # Tack on the last flag.
+    if not current_flag:
+      if within_quotations:
+        logging.warn('Unterminated quoted argument: ' + line)
+    else:
+      tokenized_flags.append(current_flag)
+
+    # Return everything but the program name.
+    return tokenized_flags[1:]
diff --git a/catapult/devil/devil/android/forwarder.py b/catapult/devil/devil/android/forwarder.py
new file mode 100644
index 0000000..21f5223
--- /dev/null
+++ b/catapult/devil/devil/android/forwarder.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=W0212
+
+import fcntl
+import logging
+import os
+import psutil
+
+from devil import base_error
+from devil import devil_env
+from devil.android.constants import file_system
+from devil.android.valgrind_tools import base_tool
+from devil.utils import cmd_helper
+
+
+def _GetProcessStartTime(pid):
+  return psutil.Process(pid).create_time
+
+
+class _FileLock(object):
+  """With statement-aware implementation of a file lock.
+
+  File locks are needed for cross-process synchronization when the
+  multiprocessing Python module is used.
+  """
+
+  def __init__(self, path):
+    self._fd = -1
+    self._path = path
+
+  def __enter__(self):
+    self._fd = os.open(self._path, os.O_RDONLY | os.O_CREAT)
+    if self._fd < 0:
+      raise Exception('Could not open file %s for reading' % self._path)
+    fcntl.flock(self._fd, fcntl.LOCK_EX)
+
+  def __exit__(self, _exception_type, _exception_value, traceback):
+    fcntl.flock(self._fd, fcntl.LOCK_UN)
+    os.close(self._fd)
+
+
+class HostForwarderError(base_error.BaseError):
+  """Exception for failures involving host_forwarder."""
+
+  def __init__(self, message):
+    super(HostForwarderError, self).__init__(message)
+
+
+class Forwarder(object):
+  """Thread-safe class to manage port forwards from the device to the host."""
+
+  _DEVICE_FORWARDER_FOLDER = (file_system.TEST_EXECUTABLE_DIR +
+                              '/forwarder/')
+  _DEVICE_FORWARDER_PATH = (file_system.TEST_EXECUTABLE_DIR +
+                            '/forwarder/device_forwarder')
+  _LOCK_PATH = '/tmp/chrome.forwarder.lock'
+  # Defined in host_forwarder_main.cc
+  _HOST_FORWARDER_LOG = '/tmp/host_forwarder_log'
+
+  _instance = None
+
+  @staticmethod
+  def Map(port_pairs, device, tool=None):
+    """Runs the forwarder.
+
+    Args:
+      port_pairs: A list of tuples (device_port, host_port) to forward. Note
+                 that you can specify 0 as a device_port, in which case a
+                 port will by dynamically assigned on the device. You can
+                 get the number of the assigned port using the
+                 DevicePortForHostPort method.
+      device: A DeviceUtils instance.
+      tool: Tool class to use to get wrapper, if necessary, for executing the
+            forwarder (see valgrind_tools.py).
+
+    Raises:
+      Exception on failure to forward the port.
+    """
+    if not tool:
+      tool = base_tool.BaseTool()
+    with _FileLock(Forwarder._LOCK_PATH):
+      instance = Forwarder._GetInstanceLocked(tool)
+      instance._InitDeviceLocked(device, tool)
+
+      device_serial = str(device)
+      redirection_commands = [
+          ['--adb=' + devil_env.config.FetchPath('adb'),
+           '--serial-id=' + device_serial,
+           '--map', str(device_port), str(host_port)]
+          for device_port, host_port in port_pairs]
+      logging.info('Forwarding using commands: %s', redirection_commands)
+
+      for redirection_command in redirection_commands:
+        try:
+          (exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
+              [instance._host_forwarder_path] + redirection_command)
+        except OSError as e:
+          if e.errno == 2:
+            raise HostForwarderError(
+                'Unable to start host forwarder. '
+                'Make sure you have built host_forwarder.')
+          else: raise
+        if exit_code != 0:
+          Forwarder._KillDeviceLocked(device, tool)
+          # Log alive forwarders
+          ps_out = device.RunShellCommand(['ps'])
+          logging.info('Currently running device_forwarders:')
+          for line in ps_out:
+            if 'device_forwarder' in line:
+              logging.info('    %s', line)
+          raise HostForwarderError(
+              '%s exited with %d:\n%s' % (instance._host_forwarder_path,
+                                          exit_code, '\n'.join(output)))
+        tokens = output.split(':')
+        if len(tokens) != 2:
+          raise HostForwarderError(
+              'Unexpected host forwarder output "%s", '
+              'expected "device_port:host_port"' % output)
+        device_port = int(tokens[0])
+        host_port = int(tokens[1])
+        serial_with_port = (device_serial, device_port)
+        instance._device_to_host_port_map[serial_with_port] = host_port
+        instance._host_to_device_port_map[host_port] = serial_with_port
+        logging.info('Forwarding device port: %d to host port: %d.',
+                     device_port, host_port)
+
+  @staticmethod
+  def UnmapDevicePort(device_port, device):
+    """Unmaps a previously forwarded device port.
+
+    Args:
+      device: A DeviceUtils instance.
+      device_port: A previously forwarded port (through Map()).
+    """
+    with _FileLock(Forwarder._LOCK_PATH):
+      Forwarder._UnmapDevicePortLocked(device_port, device)
+
+  @staticmethod
+  def UnmapAllDevicePorts(device):
+    """Unmaps all the previously forwarded ports for the provided device.
+
+    Args:
+      device: A DeviceUtils instance.
+      port_pairs: A list of tuples (device_port, host_port) to unmap.
+    """
+    with _FileLock(Forwarder._LOCK_PATH):
+      if not Forwarder._instance:
+        return
+      adb_serial = str(device)
+      if adb_serial not in Forwarder._instance._initialized_devices:
+        return
+      port_map = Forwarder._GetInstanceLocked(
+          None)._device_to_host_port_map
+      for (device_serial, device_port) in port_map.keys():
+        if adb_serial == device_serial:
+          Forwarder._UnmapDevicePortLocked(device_port, device)
+      # There are no more ports mapped, kill the device_forwarder.
+      tool = base_tool.BaseTool()
+      Forwarder._KillDeviceLocked(device, tool)
+
+  @staticmethod
+  def DevicePortForHostPort(host_port):
+    """Returns the device port that corresponds to a given host port."""
+    with _FileLock(Forwarder._LOCK_PATH):
+      _, device_port = Forwarder._GetInstanceLocked(
+          None)._host_to_device_port_map.get(host_port)
+      return device_port
+
+  @staticmethod
+  def RemoveHostLog():
+    if os.path.exists(Forwarder._HOST_FORWARDER_LOG):
+      os.unlink(Forwarder._HOST_FORWARDER_LOG)
+
+  @staticmethod
+  def GetHostLog():
+    if not os.path.exists(Forwarder._HOST_FORWARDER_LOG):
+      return ''
+    with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f:
+      return f.read()
+
+  @staticmethod
+  def _GetInstanceLocked(tool):
+    """Returns the singleton instance.
+
+    Note that the global lock must be acquired before calling this method.
+
+    Args:
+      tool: Tool class to use to get wrapper, if necessary, for executing the
+            forwarder (see valgrind_tools.py).
+    """
+    if not Forwarder._instance:
+      Forwarder._instance = Forwarder(tool)
+    return Forwarder._instance
+
+  def __init__(self, tool):
+    """Constructs a new instance of Forwarder.
+
+    Note that Forwarder is a singleton therefore this constructor should be
+    called only once.
+
+    Args:
+      tool: Tool class to use to get wrapper, if necessary, for executing the
+            forwarder (see valgrind_tools.py).
+    """
+    assert not Forwarder._instance
+    self._tool = tool
+    self._initialized_devices = set()
+    self._device_to_host_port_map = dict()
+    self._host_to_device_port_map = dict()
+    self._host_forwarder_path = devil_env.config.FetchPath('forwarder_host')
+    assert os.path.exists(self._host_forwarder_path), 'Please build forwarder2'
+    self._InitHostLocked()
+
+  @staticmethod
+  def _UnmapDevicePortLocked(device_port, device):
+    """Internal method used by UnmapDevicePort().
+
+    Note that the global lock must be acquired before calling this method.
+    """
+    instance = Forwarder._GetInstanceLocked(None)
+    serial = str(device)
+    serial_with_port = (serial, device_port)
+    if not serial_with_port in instance._device_to_host_port_map:
+      logging.error('Trying to unmap non-forwarded port %d', device_port)
+      return
+    redirection_command = ['--adb=' + devil_env.config.FetchPath('adb'),
+                           '--serial-id=' + serial,
+                           '--unmap', str(device_port)]
+    logging.info('Undo forwarding using command: %s', redirection_command)
+    (exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
+        [instance._host_forwarder_path] + redirection_command)
+    if exit_code != 0:
+      logging.error(
+          '%s exited with %d:\n%s',
+          instance._host_forwarder_path, exit_code, '\n'.join(output))
+    host_port = instance._device_to_host_port_map[serial_with_port]
+    del instance._device_to_host_port_map[serial_with_port]
+    del instance._host_to_device_port_map[host_port]
+
+  @staticmethod
+  def _GetPidForLock():
+    """Returns the PID used for host_forwarder initialization.
+
+    The PID of the "sharder" is used to handle multiprocessing. The "sharder"
+    is the initial process that forks that is the parent process.
+    """
+    return os.getpgrp()
+
+  def _InitHostLocked(self):
+    """Initializes the host forwarder daemon.
+
+    Note that the global lock must be acquired before calling this method. This
+    method kills any existing host_forwarder process that could be stale.
+    """
+    # See if the host_forwarder daemon was already initialized by a concurrent
+    # process or thread (in case multi-process sharding is not used).
+    pid_for_lock = Forwarder._GetPidForLock()
+    fd = os.open(Forwarder._LOCK_PATH, os.O_RDWR | os.O_CREAT)
+    with os.fdopen(fd, 'r+') as pid_file:
+      pid_with_start_time = pid_file.readline()
+      if pid_with_start_time:
+        (pid, process_start_time) = pid_with_start_time.split(':')
+        if pid == str(pid_for_lock):
+          if process_start_time == str(_GetProcessStartTime(pid_for_lock)):
+            return
+      self._KillHostLocked()
+      pid_file.seek(0)
+      pid_file.write(
+          '%s:%s' % (pid_for_lock, str(_GetProcessStartTime(pid_for_lock))))
+      pid_file.truncate()
+
+  def _InitDeviceLocked(self, device, tool):
+    """Initializes the device_forwarder daemon for a specific device (once).
+
+    Note that the global lock must be acquired before calling this method. This
+    method kills any existing device_forwarder daemon on the device that could
+    be stale, pushes the latest version of the daemon (to the device) and starts
+    it.
+
+    Args:
+      device: A DeviceUtils instance.
+      tool: Tool class to use to get wrapper, if necessary, for executing the
+            forwarder (see valgrind_tools.py).
+    """
+    device_serial = str(device)
+    if device_serial in self._initialized_devices:
+      return
+    Forwarder._KillDeviceLocked(device, tool)
+    forwarder_device_path_on_host = devil_env.config.FetchPath(
+        'forwarder_device', device=device)
+    forwarder_device_path_on_device = (
+        Forwarder._DEVICE_FORWARDER_FOLDER
+        if os.path.isdir(forwarder_device_path_on_host)
+        else Forwarder._DEVICE_FORWARDER_PATH)
+    device.PushChangedFiles([(
+        forwarder_device_path_on_host,
+        forwarder_device_path_on_device)])
+
+    cmd = '%s %s' % (tool.GetUtilWrapper(), Forwarder._DEVICE_FORWARDER_PATH)
+    device.RunShellCommand(
+        cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
+        check_return=True)
+    self._initialized_devices.add(device_serial)
+
+  def _KillHostLocked(self):
+    """Kills the forwarder process running on the host.
+
+    Note that the global lock must be acquired before calling this method.
+    """
+    logging.info('Killing host_forwarder.')
+    (exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
+        [self._host_forwarder_path, '--kill-server'])
+    if exit_code != 0:
+      (exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
+          ['pkill', '-9', 'host_forwarder'])
+      if exit_code != 0:
+        raise HostForwarderError(
+            '%s exited with %d:\n%s' % (self._host_forwarder_path, exit_code,
+                                        '\n'.join(output)))
+
+  @staticmethod
+  def _KillDeviceLocked(device, tool):
+    """Kills the forwarder process running on the device.
+
+    Note that the global lock must be acquired before calling this method.
+
+    Args:
+      device: Instance of DeviceUtils for talking to the device.
+      tool: Wrapper tool (e.g. valgrind) that can be used to execute the device
+            forwarder (see valgrind_tools.py).
+    """
+    logging.info('Killing device_forwarder.')
+    Forwarder._instance._initialized_devices.discard(str(device))
+    if not device.FileExists(Forwarder._DEVICE_FORWARDER_PATH):
+      return
+
+    cmd = '%s %s --kill-server' % (tool.GetUtilWrapper(),
+                                   Forwarder._DEVICE_FORWARDER_PATH)
+    device.RunShellCommand(
+        cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
+        check_return=True)
diff --git a/catapult/devil/devil/android/install_commands.py b/catapult/devil/devil/android/install_commands.py
new file mode 100644
index 0000000..5a06bf3
--- /dev/null
+++ b/catapult/devil/devil/android/install_commands.py
@@ -0,0 +1,57 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import posixpath
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.android.constants import file_system
+
+BIN_DIR = '%s/bin' % file_system.TEST_EXECUTABLE_DIR
+_FRAMEWORK_DIR = '%s/framework' % file_system.TEST_EXECUTABLE_DIR
+
+_COMMANDS = {
+  'unzip': 'org.chromium.android.commands.unzip.Unzip',
+}
+
+_SHELL_COMMAND_FORMAT = (
+"""#!/system/bin/sh
+base=%s
+export CLASSPATH=$base/framework/chromium_commands.jar
+exec app_process $base/bin %s $@
+""")
+
+
+def Installed(device):
+  paths = [posixpath.join(BIN_DIR, c) for c in _COMMANDS]
+  paths.append(posixpath.join(_FRAMEWORK_DIR, 'chromium_commands.jar'))
+  return device.PathExists(paths)
+
+
+def InstallCommands(device):
+  if device.IsUserBuild():
+    raise device_errors.CommandFailedError(
+        'chromium_commands currently requires a userdebug build.',
+        device_serial=device.adb.GetDeviceSerial())
+
+  chromium_commands_jar_path = devil_env.config.FetchPath('chromium_commands')
+  if not os.path.exists(chromium_commands_jar_path):
+    raise device_errors.CommandFailedError(
+        '%s not found. Please build chromium_commands.'
+        % chromium_commands_jar_path)
+
+  device.RunShellCommand(['mkdir', BIN_DIR, _FRAMEWORK_DIR])
+  for command, main_class in _COMMANDS.iteritems():
+    shell_command = _SHELL_COMMAND_FORMAT % (
+        file_system.TEST_EXECUTABLE_DIR, main_class)
+    shell_file = '%s/%s' % (BIN_DIR, command)
+    device.WriteFile(shell_file, shell_command)
+    device.RunShellCommand(
+        ['chmod', '755', shell_file], check_return=True)
+
+  device.adb.Push(
+      chromium_commands_jar_path,
+      '%s/chromium_commands.jar' % _FRAMEWORK_DIR)
+
diff --git a/catapult/devil/devil/android/logcat_monitor.py b/catapult/devil/devil/android/logcat_monitor.py
new file mode 100644
index 0000000..9ec9412
--- /dev/null
+++ b/catapult/devil/devil/android/logcat_monitor.py
@@ -0,0 +1,242 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-argument
+
+import errno
+import logging
+import os
+import re
+import shutil
+import tempfile
+import threading
+import time
+
+from devil.android import decorators
+from devil.android import device_errors
+from devil.android.sdk import adb_wrapper
+from devil.utils import reraiser_thread
+
+
+class LogcatMonitor(object):
+
+  _RECORD_THREAD_JOIN_WAIT = 2.0
+  _WAIT_TIME = 0.2
+  _THREADTIME_RE_FORMAT = (
+      r'(?P<date>\S*) +(?P<time>\S*) +(?P<proc_id>%s) +(?P<thread_id>%s) +'
+      r'(?P<log_level>%s) +(?P<component>%s) *: +(?P<message>%s)$')
+
+  def __init__(self, adb, clear=True, filter_specs=None, output_file=None):
+    """Create a LogcatMonitor instance.
+
+    Args:
+      adb: An instance of adb_wrapper.AdbWrapper.
+      clear: If True, clear the logcat when monitoring starts.
+      filter_specs: An optional list of '<tag>[:priority]' strings.
+      output_file: File path to save recorded logcat.
+    """
+    if isinstance(adb, adb_wrapper.AdbWrapper):
+      self._adb = adb
+    else:
+      raise ValueError('Unsupported type passed for argument "device"')
+    self._clear = clear
+    self._filter_specs = filter_specs
+    self._output_file = output_file
+    self._record_file = None
+    self._record_file_lock = threading.Lock()
+    self._record_thread = None
+    self._stop_recording_event = threading.Event()
+
+  @property
+  def output_file(self):
+    return self._output_file
+
+  @decorators.WithTimeoutAndRetriesDefaults(10, 0)
+  def WaitFor(self, success_regex, failure_regex=None, timeout=None,
+              retries=None):
+    """Wait for a matching logcat line or until a timeout occurs.
+
+    This will attempt to match lines in the logcat against both |success_regex|
+    and |failure_regex| (if provided). Note that this calls re.search on each
+    logcat line, not re.match, so the provided regular expressions don't have
+    to match an entire line.
+
+    Args:
+      success_regex: The regular expression to search for.
+      failure_regex: An optional regular expression that, if hit, causes this
+        to stop looking for a match. Can be None.
+      timeout: timeout in seconds
+      retries: number of retries
+
+    Returns:
+      A match object if |success_regex| matches a part of a logcat line, or
+      None if |failure_regex| matches a part of a logcat line.
+    Raises:
+      CommandFailedError on logcat failure (NOT on a |failure_regex| match).
+      CommandTimeoutError if no logcat line matching either |success_regex| or
+        |failure_regex| is found in |timeout| seconds.
+      DeviceUnreachableError if the device becomes unreachable.
+      LogcatMonitorCommandError when calling |WaitFor| while not recording
+        logcat.
+    """
+    if self._record_thread is None:
+      raise LogcatMonitorCommandError(
+          'Must be recording logcat when calling |WaitFor|',
+          device_serial=str(self._adb))
+    if isinstance(success_regex, basestring):
+      success_regex = re.compile(success_regex)
+    if isinstance(failure_regex, basestring):
+      failure_regex = re.compile(failure_regex)
+
+    logging.debug('Waiting %d seconds for "%s"', timeout, success_regex.pattern)
+
+    # NOTE This will continue looping until:
+    #  - success_regex matches a line, in which case the match object is
+    #    returned.
+    #  - failure_regex matches a line, in which case None is returned
+    #  - the timeout is hit, in which case a CommandTimeoutError is raised.
+    with open(self._record_file.name, 'r') as f:
+      while True:
+        line = f.readline()
+        if line:
+          m = success_regex.search(line)
+          if m:
+            return m
+          if failure_regex and failure_regex.search(line):
+            return None
+        else:
+          time.sleep(self._WAIT_TIME)
+
+  def FindAll(self, message_regex, proc_id=None, thread_id=None, log_level=None,
+              component=None):
+    """Finds all lines in the logcat that match the provided constraints.
+
+    Args:
+      message_regex: The regular expression that the <message> section must
+        match.
+      proc_id: The process ID to match. If None, matches any process ID.
+      thread_id: The thread ID to match. If None, matches any thread ID.
+      log_level: The log level to match. If None, matches any log level.
+      component: The component to match. If None, matches any component.
+
+    Raises:
+      LogcatMonitorCommandError when calling |FindAll| before recording logcat.
+
+    Yields:
+      A match object for each matching line in the logcat. The match object
+      will always contain, in addition to groups defined in |message_regex|,
+      the following named groups: 'date', 'time', 'proc_id', 'thread_id',
+      'log_level', 'component', and 'message'.
+    """
+    if self._record_file is None:
+      raise LogcatMonitorCommandError(
+          'Must have recorded or be recording a logcat to call |FindAll|',
+          device_serial=str(self._adb))
+    if proc_id is None:
+      proc_id = r'\d+'
+    if thread_id is None:
+      thread_id = r'\d+'
+    if log_level is None:
+      log_level = r'[VDIWEF]'
+    if component is None:
+      component = r'[^\s:]+'
+    # pylint: disable=protected-access
+    threadtime_re = re.compile(
+        type(self)._THREADTIME_RE_FORMAT % (
+            proc_id, thread_id, log_level, component, message_regex))
+
+    with open(self._record_file.name, 'r') as f:
+      for line in f:
+        m = re.match(threadtime_re, line)
+        if m:
+          yield m
+
+  def _StartRecording(self):
+    """Starts recording logcat to file.
+
+    Function spawns a thread that records logcat to file and will not die
+    until |StopRecording| is called.
+    """
+    def record_to_file():
+      # Write the log with line buffering so the consumer sees each individual
+      # line.
+      for data in self._adb.Logcat(filter_specs=self._filter_specs,
+                                   logcat_format='threadtime'):
+        with self._record_file_lock:
+          if self._stop_recording_event.isSet():
+            return
+          if self._record_file and not self._record_file.closed:
+            self._record_file.write(data + '\n')
+
+    self._stop_recording_event.clear()
+    if not self._record_thread:
+      self._record_thread = reraiser_thread.ReraiserThread(record_to_file)
+      self._record_thread.start()
+
+  def _StopRecording(self):
+    """Finish recording logcat."""
+    if self._record_thread:
+      self._stop_recording_event.set()
+      self._record_thread.join(timeout=self._RECORD_THREAD_JOIN_WAIT)
+      self._record_thread.ReraiseIfException()
+      self._record_thread = None
+
+  def Start(self):
+    """Starts the logcat monitor.
+
+    Clears the logcat if |clear| was set in |__init__|.
+    """
+    if self._clear:
+      self._adb.Logcat(clear=True)
+    if not self._record_file:
+      self._record_file = tempfile.NamedTemporaryFile(mode='a', bufsize=1)
+    self._StartRecording()
+
+  def Stop(self):
+    """Stops the logcat monitor.
+
+    Stops recording the logcat. Copies currently recorded logcat to
+    |self._output_file|.
+    """
+    self._StopRecording()
+    with self._record_file_lock:
+      if self._record_file and self._output_file:
+        try:
+          os.makedirs(os.path.dirname(self._output_file))
+        except OSError as e:
+          if e.errno != errno.EEXIST:
+            raise
+        shutil.copy(self._record_file.name, self._output_file)
+
+  def Close(self):
+    """Closes logcat recording file.
+
+    Should be called when finished using the logcat monitor.
+    """
+    with self._record_file_lock:
+      if self._record_file:
+        self._record_file.close()
+        self._record_file = None
+
+  def __enter__(self):
+    """Starts the logcat monitor."""
+    self.Start()
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    """Stops the logcat monitor."""
+    self.Stop()
+
+  def __del__(self):
+    """Closes logcat recording file in case |Close| was never called."""
+    with self._record_file_lock:
+      if self._record_file:
+        logging.warning(
+            'Need to call |Close| on the logcat monitor when done!')
+        self._record_file.close()
+
+
+class LogcatMonitorCommandError(device_errors.CommandFailedError):
+  """Exception for errors with logcat monitor commands."""
+  pass
diff --git a/catapult/devil/devil/android/logcat_monitor_test.py b/catapult/devil/devil/android/logcat_monitor_test.py
new file mode 100755
index 0000000..8fb4d74
--- /dev/null
+++ b/catapult/devil/devil/android/logcat_monitor_test.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import itertools
+import threading
+import unittest
+
+from devil import devil_env
+from devil.android import logcat_monitor
+from devil.android.sdk import adb_wrapper
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+def _CreateTestLog(raw_logcat=None):
+  test_adb = adb_wrapper.AdbWrapper('0123456789abcdef')
+  test_adb.Logcat = mock.Mock(return_value=(l for l in raw_logcat))
+  test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
+  return test_log
+
+
+class LogcatMonitorTest(unittest.TestCase):
+
+  _TEST_THREADTIME_LOGCAT_DATA = [
+      '01-01 01:02:03.456  7890  0987 V LogcatMonitorTest: '
+          'verbose logcat monitor test message 1',
+      '01-01 01:02:03.457  8901  1098 D LogcatMonitorTest: '
+          'debug logcat monitor test message 2',
+      '01-01 01:02:03.458  9012  2109 I LogcatMonitorTest: '
+          'info logcat monitor test message 3',
+      '01-01 01:02:03.459  0123  3210 W LogcatMonitorTest: '
+          'warning logcat monitor test message 4',
+      '01-01 01:02:03.460  1234  4321 E LogcatMonitorTest: '
+          'error logcat monitor test message 5',
+      '01-01 01:02:03.461  2345  5432 F LogcatMonitorTest: '
+          'fatal logcat monitor test message 6',
+      '01-01 01:02:03.462  3456  6543 D LogcatMonitorTest: '
+          'last line'
+  ]
+
+  def assertIterEqual(self, expected_iter, actual_iter):
+    for expected, actual in itertools.izip_longest(expected_iter, actual_iter):
+      self.assertIsNotNone(
+          expected,
+          msg='actual has unexpected elements starting with %s' % str(actual))
+      self.assertIsNotNone(
+          actual,
+          msg='actual is missing elements starting with %s' % str(expected))
+      self.assertEqual(actual.group('proc_id'), expected[0])
+      self.assertEqual(actual.group('thread_id'), expected[1])
+      self.assertEqual(actual.group('log_level'), expected[2])
+      self.assertEqual(actual.group('component'), expected[3])
+      self.assertEqual(actual.group('message'), expected[4])
+
+    with self.assertRaises(StopIteration):
+      next(actual_iter)
+    with self.assertRaises(StopIteration):
+      next(expected_iter)
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitFor_success(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    actual_match = test_log.WaitFor(r'.*(fatal|error) logcat monitor.*', None)
+    self.assertTrue(actual_match)
+    self.assertEqual(
+        '01-01 01:02:03.460  1234  4321 E LogcatMonitorTest: '
+            'error logcat monitor test message 5',
+        actual_match.group(0))
+    self.assertEqual('error', actual_match.group(1))
+    test_log.Stop()
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitFor_failure(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    actual_match = test_log.WaitFor(
+        r'.*My Success Regex.*', r'.*(fatal|error) logcat monitor.*')
+    self.assertIsNone(actual_match)
+    test_log.Stop()
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testWaitFor_buffering(self):
+    # Simulate an adb log stream which does not complete until the test tells it
+    # to. This checks that the log matcher can receive individual lines from the
+    # log reader thread even if adb is not producing enough output to fill an
+    # entire file io buffer.
+    finished_lock = threading.Lock()
+    finished_lock.acquire()
+
+    def LogGenerator():
+      for line in type(self)._TEST_THREADTIME_LOGCAT_DATA:
+        yield line
+      finished_lock.acquire()
+
+    test_adb = adb_wrapper.AdbWrapper('0123456789abcdef')
+    test_adb.Logcat = mock.Mock(return_value=LogGenerator())
+    test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
+    test_log.Start()
+
+    actual_match = test_log.WaitFor(r'.*last line.*', None)
+    finished_lock.release()
+    self.assertTrue(actual_match)
+    test_log.Stop()
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_defaults(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    expected_results = [
+        ('7890', '0987', 'V', 'LogcatMonitorTest',
+         'verbose logcat monitor test message 1'),
+        ('8901', '1098', 'D', 'LogcatMonitorTest',
+         'debug logcat monitor test message 2'),
+        ('9012', '2109', 'I', 'LogcatMonitorTest',
+         'info logcat monitor test message 3'),
+        ('0123', '3210', 'W', 'LogcatMonitorTest',
+         'warning logcat monitor test message 4'),
+        ('1234', '4321', 'E', 'LogcatMonitorTest',
+         'error logcat monitor test message 5'),
+        ('2345', '5432', 'F', 'LogcatMonitorTest',
+         'fatal logcat monitor test message 6')]
+    actual_results = test_log.FindAll(r'\S* logcat monitor test message \d')
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_defaults_miss(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    expected_results = []
+    actual_results = test_log.FindAll(r'\S* nothing should match this \d')
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_filterProcId(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    actual_results = test_log.FindAll(
+        r'\S* logcat monitor test message \d', proc_id=1234)
+    expected_results = [
+        ('1234', '4321', 'E', 'LogcatMonitorTest',
+         'error logcat monitor test message 5')]
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_filterThreadId(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    actual_results = test_log.FindAll(
+        r'\S* logcat monitor test message \d', thread_id=2109)
+    expected_results = [
+        ('9012', '2109', 'I', 'LogcatMonitorTest',
+         'info logcat monitor test message 3')]
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_filterLogLevel(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    actual_results = test_log.FindAll(
+        r'\S* logcat monitor test message \d', log_level=r'[DW]')
+    expected_results = [
+        ('8901', '1098', 'D', 'LogcatMonitorTest',
+         'debug logcat monitor test message 2'),
+        ('0123', '3210', 'W', 'LogcatMonitorTest',
+         'warning logcat monitor test message 4')
+    ]
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+  @mock.patch('time.sleep', mock.Mock())
+  def testFindAll_filterComponent(self):
+    test_log = _CreateTestLog(
+        raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
+    test_log.Start()
+    test_log.WaitFor(r'.*last line.*', None)
+    test_log.Stop()
+    actual_results = test_log.FindAll(r'.*', component='LogcatMonitorTest')
+    expected_results = [
+        ('7890', '0987', 'V', 'LogcatMonitorTest',
+         'verbose logcat monitor test message 1'),
+        ('8901', '1098', 'D', 'LogcatMonitorTest',
+         'debug logcat monitor test message 2'),
+        ('9012', '2109', 'I', 'LogcatMonitorTest',
+         'info logcat monitor test message 3'),
+        ('0123', '3210', 'W', 'LogcatMonitorTest',
+         'warning logcat monitor test message 4'),
+        ('1234', '4321', 'E', 'LogcatMonitorTest',
+         'error logcat monitor test message 5'),
+        ('2345', '5432', 'F', 'LogcatMonitorTest',
+         'fatal logcat monitor test message 6'),
+        ('3456', '6543', 'D', 'LogcatMonitorTest',
+         'last line')
+    ]
+    self.assertIterEqual(iter(expected_results), actual_results)
+    test_log.Close()
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/catapult/devil/devil/android/md5sum.py b/catapult/devil/devil/android/md5sum.py
new file mode 100644
index 0000000..5270646
--- /dev/null
+++ b/catapult/devil/devil/android/md5sum.py
@@ -0,0 +1,120 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import posixpath
+import re
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.utils import cmd_helper
+
+MD5SUM_DEVICE_LIB_PATH = '/data/local/tmp/md5sum'
+MD5SUM_DEVICE_BIN_PATH = MD5SUM_DEVICE_LIB_PATH + '/md5sum_bin'
+
+_STARTS_WITH_CHECKSUM_RE = re.compile(r'^\s*[0-9a-fA-F]{32}\s+')
+
+
+def CalculateHostMd5Sums(paths):
+  """Calculates the MD5 sum value for all items in |paths|.
+
+  Directories are traversed recursively and the MD5 sum of each file found is
+  reported in the result.
+
+  Args:
+    paths: A list of host paths to md5sum.
+  Returns:
+    A dict mapping file paths to their respective md5sum checksums.
+  """
+  if isinstance(paths, basestring):
+    paths = [paths]
+
+  md5sum_bin_host_path = devil_env.config.FetchPath('md5sum_host')
+  if not os.path.exists(md5sum_bin_host_path):
+    raise IOError('File not built: %s' % md5sum_bin_host_path)
+  out = cmd_helper.GetCmdOutput(
+    [md5sum_bin_host_path] + [os.path.realpath(p) for p in paths])
+
+  return _ParseMd5SumOutput(out.splitlines())
+
+
+def CalculateDeviceMd5Sums(paths, device):
+  """Calculates the MD5 sum value for all items in |paths|.
+
+  Directories are traversed recursively and the MD5 sum of each file found is
+  reported in the result.
+
+  Args:
+    paths: A list of device paths to md5sum.
+  Returns:
+    A dict mapping file paths to their respective md5sum checksums.
+  """
+  if not paths:
+    return {}
+
+  if isinstance(paths, basestring):
+    paths = [paths]
+  # Allow generators
+  paths = list(paths)
+
+  md5sum_dist_path = devil_env.config.FetchPath('md5sum_device', device=device)
+
+  if os.path.isdir(md5sum_dist_path):
+    md5sum_dist_bin_path = os.path.join(md5sum_dist_path, 'md5sum_bin')
+  else:
+    md5sum_dist_bin_path = md5sum_dist_path
+
+  if not os.path.exists(md5sum_dist_path):
+    raise IOError('File not built: %s' % md5sum_dist_path)
+  md5sum_file_size = os.path.getsize(md5sum_dist_bin_path)
+
+  # For better performance, make the script as small as possible to try and
+  # avoid needing to write to an intermediary file (which RunShellCommand will
+  # do if necessary).
+  md5sum_script = 'a=%s;' % MD5SUM_DEVICE_BIN_PATH
+  # Check if the binary is missing or has changed (using its file size as an
+  # indicator), and trigger a (re-)push via the exit code.
+  md5sum_script += '! [[ $(ls -l $a) = *%d* ]]&&exit 2;' % md5sum_file_size
+  # Make sure it can find libbase.so
+  md5sum_script += 'export LD_LIBRARY_PATH=%s;' % MD5SUM_DEVICE_LIB_PATH
+  if len(paths) > 1:
+    prefix = posixpath.commonprefix(paths)
+    if len(prefix) > 4:
+      md5sum_script += 'p="%s";' % prefix
+      paths = ['$p"%s"' % p[len(prefix):] for p in paths]
+
+  md5sum_script += ';'.join('$a %s' % p for p in paths)
+  # Don't fail the script if the last md5sum fails (due to file not found)
+  # Note: ":" is equivalent to "true".
+  md5sum_script += ';:'
+  try:
+    out = device.RunShellCommand(md5sum_script, check_return=True)
+  except device_errors.AdbShellCommandFailedError as e:
+    # Push the binary only if it is found to not exist
+    # (faster than checking up-front).
+    if e.status == 2:
+      # If files were previously pushed as root (adbd running as root), trying
+      # to re-push as non-root causes the push command to report success, but
+      # actually fail. So, wipe the directory first.
+      device.RunShellCommand(['rm', '-rf', MD5SUM_DEVICE_LIB_PATH],
+                             as_root=True, check_return=True)
+      if os.path.isdir(md5sum_dist_path):
+        device.adb.Push(md5sum_dist_path, MD5SUM_DEVICE_LIB_PATH)
+      else:
+        mkdir_cmd = 'a=%s;[[ -e $a ]] || mkdir $a' % MD5SUM_DEVICE_LIB_PATH
+        device.RunShellCommand(mkdir_cmd, check_return=True)
+        device.adb.Push(md5sum_dist_bin_path, MD5SUM_DEVICE_BIN_PATH)
+
+      out = device.RunShellCommand(md5sum_script, check_return=True)
+    else:
+      raise
+
+  return _ParseMd5SumOutput(out)
+
+
+def _ParseMd5SumOutput(out):
+  hash_and_path = (l.split(None, 1) for l in out
+                   if l and _STARTS_WITH_CHECKSUM_RE.match(l))
+  return dict((p, h) for h, p in hash_and_path)
+
diff --git a/catapult/devil/devil/android/md5sum_test.py b/catapult/devil/devil/android/md5sum_test.py
new file mode 100755
index 0000000..c9b4954
--- /dev/null
+++ b/catapult/devil/devil/android/md5sum_test.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.android import md5sum
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+TEST_OUT_DIR = os.path.join('test', 'out', 'directory')
+HOST_MD5_EXECUTABLE = os.path.join(TEST_OUT_DIR, 'md5sum_bin_host')
+MD5_DIST = os.path.join(TEST_OUT_DIR, 'md5sum_dist')
+
+
+class Md5SumTest(unittest.TestCase):
+
+  def setUp(self):
+    mocked_attrs = {
+      'md5sum_host': HOST_MD5_EXECUTABLE,
+      'md5sum_device': MD5_DIST,
+    }
+    self._patchers = [
+      mock.patch('devil.devil_env._Environment.FetchPath',
+                 mock.Mock(side_effect=lambda a, device=None: mocked_attrs[a])),
+      mock.patch('os.path.exists',
+                 new=mock.Mock(return_value=True)),
+    ]
+    for p in self._patchers:
+      p.start()
+
+  def tearDown(self):
+    for p in self._patchers:
+      p.stop()
+
+  def testCalculateHostMd5Sums_singlePath(self):
+    test_path = '/test/host/file.dat'
+    mock_get_cmd_output = mock.Mock(
+        return_value='0123456789abcdeffedcba9876543210 /test/host/file.dat')
+    with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
+                    new=mock_get_cmd_output):
+      out = md5sum.CalculateHostMd5Sums(test_path)
+      self.assertEquals(1, len(out))
+      self.assertTrue('/test/host/file.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/test/host/file.dat'])
+      mock_get_cmd_output.assert_called_once_with(
+          [HOST_MD5_EXECUTABLE, '/test/host/file.dat'])
+
+  def testCalculateHostMd5Sums_list(self):
+    test_paths = ['/test/host/file0.dat', '/test/host/file1.dat']
+    mock_get_cmd_output = mock.Mock(
+        return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n'
+                     '123456789abcdef00fedcba987654321 /test/host/file1.dat\n')
+    with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
+                    new=mock_get_cmd_output):
+      out = md5sum.CalculateHostMd5Sums(test_paths)
+      self.assertEquals(2, len(out))
+      self.assertTrue('/test/host/file0.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/test/host/file0.dat'])
+      self.assertTrue('/test/host/file1.dat' in out)
+      self.assertEquals('123456789abcdef00fedcba987654321',
+                        out['/test/host/file1.dat'])
+      mock_get_cmd_output.assert_called_once_with(
+          [HOST_MD5_EXECUTABLE, '/test/host/file0.dat',
+           '/test/host/file1.dat'])
+
+  def testCalculateHostMd5Sums_generator(self):
+    test_paths = ('/test/host/' + p for p in ['file0.dat', 'file1.dat'])
+    mock_get_cmd_output = mock.Mock(
+        return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n'
+                     '123456789abcdef00fedcba987654321 /test/host/file1.dat\n')
+    with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
+                    new=mock_get_cmd_output):
+      out = md5sum.CalculateHostMd5Sums(test_paths)
+      self.assertEquals(2, len(out))
+      self.assertTrue('/test/host/file0.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/test/host/file0.dat'])
+      self.assertTrue('/test/host/file1.dat' in out)
+      self.assertEquals('123456789abcdef00fedcba987654321',
+                        out['/test/host/file1.dat'])
+      mock_get_cmd_output.assert_called_once_with(
+          [HOST_MD5_EXECUTABLE, '/test/host/file0.dat', '/test/host/file1.dat'])
+
+  def testCalculateDeviceMd5Sums_noPaths(self):
+    device = mock.NonCallableMock()
+    device.RunShellCommand = mock.Mock(side_effect=Exception())
+
+    out = md5sum.CalculateDeviceMd5Sums([], device)
+    self.assertEquals(0, len(out))
+
+  def testCalculateDeviceMd5Sums_singlePath(self):
+    test_path = '/storage/emulated/legacy/test/file.dat'
+
+    device = mock.NonCallableMock()
+    device_md5sum_output = [
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file.dat',
+    ]
+    device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
+
+    with mock.patch('os.path.getsize', return_value=1337):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(1, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file.dat'])
+      self.assertEquals(1, len(device.RunShellCommand.call_args_list))
+
+  def testCalculateDeviceMd5Sums_list(self):
+    test_path = ['/storage/emulated/legacy/test/file0.dat',
+                 '/storage/emulated/legacy/test/file1.dat']
+    device = mock.NonCallableMock()
+    device_md5sum_output = [
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file0.dat',
+        '123456789abcdef00fedcba987654321 '
+            '/storage/emulated/legacy/test/file1.dat',
+    ]
+    device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
+
+    with mock.patch('os.path.getsize', return_value=1337):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(2, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file0.dat'])
+      self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out)
+      self.assertEquals('123456789abcdef00fedcba987654321',
+                        out['/storage/emulated/legacy/test/file1.dat'])
+      self.assertEquals(1, len(device.RunShellCommand.call_args_list))
+
+  def testCalculateDeviceMd5Sums_generator(self):
+    test_path = ('/storage/emulated/legacy/test/file%d.dat' % n
+                 for n in xrange(0, 2))
+
+    device = mock.NonCallableMock()
+    device_md5sum_output = [
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file0.dat',
+        '123456789abcdef00fedcba987654321 '
+            '/storage/emulated/legacy/test/file1.dat',
+    ]
+    device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
+
+    with mock.patch('os.path.getsize', return_value=1337):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(2, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file0.dat'])
+      self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out)
+      self.assertEquals('123456789abcdef00fedcba987654321',
+                        out['/storage/emulated/legacy/test/file1.dat'])
+      self.assertEquals(1, len(device.RunShellCommand.call_args_list))
+
+  def testCalculateDeviceMd5Sums_singlePath_linkerWarning(self):
+    # See crbug/479966
+    test_path = '/storage/emulated/legacy/test/file.dat'
+
+    device = mock.NonCallableMock()
+    device_md5sum_output = [
+        'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: '
+            'unused DT entry: type 0x1d arg 0x15db',
+        'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text',
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file.dat',
+    ]
+    device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
+
+    with mock.patch('os.path.getsize', return_value=1337):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(1, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file.dat'])
+      self.assertEquals(1, len(device.RunShellCommand.call_args_list))
+
+  def testCalculateDeviceMd5Sums_list_fileMissing(self):
+    test_path = ['/storage/emulated/legacy/test/file0.dat',
+                 '/storage/emulated/legacy/test/file1.dat']
+    device = mock.NonCallableMock()
+    device_md5sum_output = [
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file0.dat',
+        '[0819/203513:ERROR:md5sum.cc(25)] Could not open file asdf',
+        '',
+    ]
+    device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
+
+    with mock.patch('os.path.getsize', return_value=1337):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(1, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file0.dat'])
+      self.assertEquals(1, len(device.RunShellCommand.call_args_list))
+
+  def testCalculateDeviceMd5Sums_requiresBinary(self):
+    test_path = '/storage/emulated/legacy/test/file.dat'
+
+    device = mock.NonCallableMock()
+    device.adb = mock.NonCallableMock()
+    device.adb.Push = mock.Mock()
+    device_md5sum_output = [
+        'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: '
+            'unused DT entry: type 0x1d arg 0x15db',
+        'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text',
+        '0123456789abcdeffedcba9876543210 '
+            '/storage/emulated/legacy/test/file.dat',
+    ]
+    error = device_errors.AdbShellCommandFailedError('cmd', 'out', 2)
+    device.RunShellCommand = mock.Mock(
+        side_effect=(error, '', device_md5sum_output))
+
+    with mock.patch('os.path.isdir', return_value=True), (
+         mock.patch('os.path.getsize', return_value=1337)):
+      out = md5sum.CalculateDeviceMd5Sums(test_path, device)
+      self.assertEquals(1, len(out))
+      self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
+      self.assertEquals('0123456789abcdeffedcba9876543210',
+                        out['/storage/emulated/legacy/test/file.dat'])
+      self.assertEquals(3, len(device.RunShellCommand.call_args_list))
+      device.adb.Push.assert_called_once_with(
+          'test/out/directory/md5sum_dist', '/data/local/tmp/md5sum')
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/catapult/devil/devil/android/perf/__init__.py b/catapult/devil/devil/android/perf/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/android/perf/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/android/perf/cache_control.py b/catapult/devil/devil/android/perf/cache_control.py
new file mode 100644
index 0000000..7bd0a4e
--- /dev/null
+++ b/catapult/devil/devil/android/perf/cache_control.py
@@ -0,0 +1,16 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class CacheControl(object):
+  _DROP_CACHES = '/proc/sys/vm/drop_caches'
+
+  def __init__(self, device):
+    self._device = device
+
+  def DropRamCaches(self):
+    """Drops the filesystem ram caches for performance testing."""
+    self._device.RunShellCommand('sync', as_root=True)
+    self._device.WriteFile(CacheControl._DROP_CACHES, '3', as_root=True)
+
diff --git a/catapult/devil/devil/android/perf/perf_control.py b/catapult/devil/devil/android/perf/perf_control.py
new file mode 100644
index 0000000..af1d52c
--- /dev/null
+++ b/catapult/devil/devil/android/perf/perf_control.py
@@ -0,0 +1,156 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import logging
+
+from devil.android import device_errors
+
+
+class PerfControl(object):
+  """Provides methods for setting the performance mode of a device."""
+  _CPU_PATH = '/sys/devices/system/cpu'
+  _KERNEL_MAX = '/sys/devices/system/cpu/kernel_max'
+
+  def __init__(self, device):
+    self._device = device
+    # this will raise an AdbCommandFailedError if no CPU files are found
+    self._cpu_files = self._device.RunShellCommand(
+        'ls -d cpu[0-9]*', cwd=self._CPU_PATH, check_return=True, as_root=True)
+    assert self._cpu_files, 'Failed to detect CPUs.'
+    self._cpu_file_list = ' '.join(self._cpu_files)
+    logging.info('CPUs found: %s', self._cpu_file_list)
+    self._have_mpdecision = self._device.FileExists('/system/bin/mpdecision')
+
+  def SetHighPerfMode(self):
+    """Sets the highest stable performance mode for the device."""
+    try:
+      self._device.EnableRoot()
+    except device_errors.CommandFailedError:
+      message = 'Need root for performance mode. Results may be NOISY!!'
+      logging.warning(message)
+      # Add an additional warning at exit, such that it's clear that any results
+      # may be different/noisy (due to the lack of intended performance mode).
+      atexit.register(logging.warning, message)
+      return
+
+    product_model = self._device.product_model
+    # TODO(epenner): Enable on all devices (http://crbug.com/383566)
+    if 'Nexus 4' == product_model:
+      self._ForceAllCpusOnline(True)
+      if not self._AllCpusAreOnline():
+        logging.warning('Failed to force CPUs online. Results may be NOISY!')
+      self._SetScalingGovernorInternal('performance')
+    elif 'Nexus 5' == product_model:
+      self._ForceAllCpusOnline(True)
+      if not self._AllCpusAreOnline():
+        logging.warning('Failed to force CPUs online. Results may be NOISY!')
+      self._SetScalingGovernorInternal('performance')
+      self._SetScalingMaxFreq(1190400)
+      self._SetMaxGpuClock(200000000)
+    else:
+      self._SetScalingGovernorInternal('performance')
+
+  def SetPerfProfilingMode(self):
+    """Enables all cores for reliable perf profiling."""
+    self._ForceAllCpusOnline(True)
+    self._SetScalingGovernorInternal('performance')
+    if not self._AllCpusAreOnline():
+      if not self._device.HasRoot():
+        raise RuntimeError('Need root to force CPUs online.')
+      raise RuntimeError('Failed to force CPUs online.')
+
+  def SetDefaultPerfMode(self):
+    """Sets the performance mode for the device to its default mode."""
+    if not self._device.HasRoot():
+      return
+    product_model = self._device.product_model
+    if 'Nexus 5' == product_model:
+      if self._AllCpusAreOnline():
+        self._SetScalingMaxFreq(2265600)
+        self._SetMaxGpuClock(450000000)
+
+    governor_mode = {
+        'GT-I9300': 'pegasusq',
+        'Galaxy Nexus': 'interactive',
+        'Nexus 4': 'ondemand',
+        'Nexus 5': 'ondemand',
+        'Nexus 7': 'interactive',
+        'Nexus 10': 'interactive'
+    }.get(product_model, 'ondemand')
+    self._SetScalingGovernorInternal(governor_mode)
+    self._ForceAllCpusOnline(False)
+
+  def GetCpuInfo(self):
+    online = (output.rstrip() == '1' and status == 0
+              for (_, output, status) in self._ForEachCpu('cat "$CPU/online"'))
+    governor = (output.rstrip() if status == 0 else None
+                for (_, output, status)
+                in self._ForEachCpu('cat "$CPU/cpufreq/scaling_governor"'))
+    return zip(self._cpu_files, online, governor)
+
+  def _ForEachCpu(self, cmd):
+    script = '; '.join([
+        'for CPU in %s' % self._cpu_file_list,
+        'do %s' % cmd,
+        'echo -n "%~%$?%~%"',
+        'done'
+    ])
+    output = self._device.RunShellCommand(
+        script, cwd=self._CPU_PATH, check_return=True, as_root=True)
+    output = '\n'.join(output).split('%~%')
+    return zip(self._cpu_files, output[0::2], (int(c) for c in output[1::2]))
+
+  def _WriteEachCpuFile(self, path, value):
+    results = self._ForEachCpu(
+        'test -e "$CPU/{path}" && echo {value} > "$CPU/{path}"'.format(
+            path=path, value=value))
+    cpus = ' '.join(cpu for (cpu, _, status) in results if status == 0)
+    if cpus:
+      logging.info('Successfully set %s to %r on: %s', path, value, cpus)
+    else:
+      logging.warning('Failed to set %s to %r on any cpus', path, value)
+
+  def _SetScalingGovernorInternal(self, value):
+    self._WriteEachCpuFile('cpufreq/scaling_governor', value)
+
+  def _SetScalingMaxFreq(self, value):
+    self._WriteEachCpuFile('cpufreq/scaling_max_freq', '%d' % value)
+
+  def _SetMaxGpuClock(self, value):
+    self._device.WriteFile('/sys/class/kgsl/kgsl-3d0/max_gpuclk',
+                           str(value),
+                           as_root=True)
+
+  def _AllCpusAreOnline(self):
+    results = self._ForEachCpu('cat "$CPU/online"')
+    # TODO(epenner): Investigate why file may be missing
+    # (http://crbug.com/397118)
+    return all(output.rstrip() == '1' and status == 0
+               for (cpu, output, status) in results
+               if cpu != 'cpu0')
+
+  def _ForceAllCpusOnline(self, force_online):
+    """Enable all CPUs on a device.
+
+    Some vendors (or only Qualcomm?) hot-plug their CPUs, which can add noise
+    to measurements:
+    - In perf, samples are only taken for the CPUs that are online when the
+      measurement is started.
+    - The scaling governor can't be set for an offline CPU and frequency scaling
+      on newly enabled CPUs adds noise to both perf and tracing measurements.
+
+    It appears Qualcomm is the only vendor that hot-plugs CPUs, and on Qualcomm
+    this is done by "mpdecision".
+
+    """
+    if self._have_mpdecision:
+      script = 'stop mpdecision' if force_online else 'start mpdecision'
+      self._device.RunShellCommand(script, check_return=True, as_root=True)
+
+    if not self._have_mpdecision and not self._AllCpusAreOnline():
+      logging.warning('Unexpected cpu hot plugging detected.')
+
+    if force_online:
+      self._ForEachCpu('echo 1 > "$CPU/online"')
diff --git a/catapult/devil/devil/android/perf/perf_control_devicetest.py b/catapult/devil/devil/android/perf/perf_control_devicetest.py
new file mode 100644
index 0000000..71bf3fb
--- /dev/null
+++ b/catapult/devil/devil/android/perf/perf_control_devicetest.py
@@ -0,0 +1,39 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# pylint: disable=W0212
+
+import os
+import sys
+import unittest
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
+
+from devil.android import device_utils
+from devil.android.perf import perf_control
+
+
+class TestPerfControl(unittest.TestCase):
+
+  def setUp(self):
+    if not os.getenv('BUILDTYPE'):
+      os.environ['BUILDTYPE'] = 'Debug'
+
+    devices = device_utils.DeviceUtils.HealthyDevices(blacklist=None)
+    self.assertGreater(len(devices), 0, 'No device attached!')
+    self._device = devices[0]
+
+  def testHighPerfMode(self):
+    perf = perf_control.PerfControl(self._device)
+    try:
+      perf.SetPerfProfilingMode()
+      cpu_info = perf.GetCpuInfo()
+      self.assertEquals(len(perf._cpu_files), len(cpu_info))
+      for _, online, governor in cpu_info:
+        self.assertTrue(online)
+        self.assertEquals('performance', governor)
+    finally:
+      perf.SetDefaultPerfMode()
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/android/perf/surface_stats_collector.py b/catapult/devil/devil/android/perf/surface_stats_collector.py
new file mode 100644
index 0000000..49372ad
--- /dev/null
+++ b/catapult/devil/devil/android/perf/surface_stats_collector.py
@@ -0,0 +1,183 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import Queue
+import threading
+
+
+# Log marker containing SurfaceTexture timestamps.
+_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
+_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
+
+
+class SurfaceStatsCollector(object):
+  """Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
+
+  Args:
+    device: A DeviceUtils instance.
+  """
+
+  def __init__(self, device):
+    self._device = device
+    self._collector_thread = None
+    self._surface_before = None
+    self._get_data_event = None
+    self._data_queue = None
+    self._stop_event = None
+    self._warn_about_empty_data = True
+
+  def DisableWarningAboutEmptyData(self):
+    self._warn_about_empty_data = False
+
+  def Start(self):
+    assert not self._collector_thread
+
+    if self._ClearSurfaceFlingerLatencyData():
+      self._get_data_event = threading.Event()
+      self._stop_event = threading.Event()
+      self._data_queue = Queue.Queue()
+      self._collector_thread = threading.Thread(target=self._CollectorThread)
+      self._collector_thread.start()
+    else:
+      raise Exception('SurfaceFlinger not supported on this device.')
+
+  def Stop(self):
+    assert self._collector_thread
+    (refresh_period, timestamps) = self._GetDataFromThread()
+    if self._collector_thread:
+      self._stop_event.set()
+      self._collector_thread.join()
+      self._collector_thread = None
+    return (refresh_period, timestamps)
+
+  def _CollectorThread(self):
+    last_timestamp = 0
+    timestamps = []
+    retries = 0
+
+    while not self._stop_event.is_set():
+      self._get_data_event.wait(1)
+      try:
+        refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
+        if refresh_period is None or timestamps is None:
+          retries += 1
+          if retries < 3:
+            continue
+          if last_timestamp:
+            # Some data has already been collected, but either the app
+            # was closed or there's no new data. Signal the main thread and
+            # wait.
+            self._data_queue.put((None, None))
+            self._stop_event.wait()
+            break
+          raise Exception('Unable to get surface flinger latency data')
+
+        timestamps += [timestamp for timestamp in new_timestamps
+                       if timestamp > last_timestamp]
+        if len(timestamps):
+          last_timestamp = timestamps[-1]
+
+        if self._get_data_event.is_set():
+          self._get_data_event.clear()
+          self._data_queue.put((refresh_period, timestamps))
+          timestamps = []
+      except Exception as e:
+        # On any error, before aborting, put the exception into _data_queue to
+        # prevent the main thread from waiting at _data_queue.get() infinitely.
+        self._data_queue.put(e)
+        raise
+
+  def _GetDataFromThread(self):
+    self._get_data_event.set()
+    ret = self._data_queue.get()
+    if isinstance(ret, Exception):
+      raise ret
+    return ret
+
+  def _ClearSurfaceFlingerLatencyData(self):
+    """Clears the SurfaceFlinger latency data.
+
+    Returns:
+      True if SurfaceFlinger latency is supported by the device, otherwise
+      False.
+    """
+    # The command returns nothing if it is supported, otherwise returns many
+    # lines of result just like 'dumpsys SurfaceFlinger'.
+    results = self._device.RunShellCommand(
+        'dumpsys SurfaceFlinger --latency-clear SurfaceView')
+    return not len(results)
+
+  def GetSurfaceFlingerPid(self):
+    results = self._device.RunShellCommand('ps | grep surfaceflinger')
+    if not results:
+      raise Exception('Unable to get surface flinger process id')
+    pid = results[0].split()[1]
+    return pid
+
+  def _GetSurfaceFlingerFrameData(self):
+    """Returns collected SurfaceFlinger frame timing data.
+
+    Returns:
+      A tuple containing:
+      - The display's nominal refresh period in milliseconds.
+      - A list of timestamps signifying frame presentation times in
+        milliseconds.
+      The return value may be (None, None) if there was no data collected (for
+      example, if the app was closed before the collector thread has finished).
+    """
+    # adb shell dumpsys SurfaceFlinger --latency <window name>
+    # prints some information about the last 128 frames displayed in
+    # that window.
+    # The data returned looks like this:
+    # 16954612
+    # 7657467895508   7657482691352   7657493499756
+    # 7657484466553   7657499645964   7657511077881
+    # 7657500793457   7657516600576   7657527404785
+    # (...)
+    #
+    # The first line is the refresh period (here 16.95 ms), it is followed
+    # by 128 lines w/ 3 timestamps in nanosecond each:
+    # A) when the app started to draw
+    # B) the vsync immediately preceding SF submitting the frame to the h/w
+    # C) timestamp immediately after SF submitted that frame to the h/w
+    #
+    # The difference between the 1st and 3rd timestamp is the frame-latency.
+    # An interesting data is when the frame latency crosses a refresh period
+    # boundary, this can be calculated this way:
+    #
+    # ceil((C - A) / refresh-period)
+    #
+    # (each time the number above changes, we have a "jank").
+    # If this happens a lot during an animation, the animation appears
+    # janky, even if it runs at 60 fps in average.
+    #
+    # We use the special "SurfaceView" window name because the statistics for
+    # the activity's main window are not updated when the main web content is
+    # composited into a SurfaceView.
+    results = self._device.RunShellCommand(
+        'dumpsys SurfaceFlinger --latency SurfaceView')
+    if not len(results):
+      return (None, None)
+
+    timestamps = []
+    nanoseconds_per_millisecond = 1e6
+    refresh_period = long(results[0]) / nanoseconds_per_millisecond
+
+    # If a fence associated with a frame is still pending when we query the
+    # latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
+    # Since we only care about completed frames, we will ignore any timestamps
+    # with this value.
+    pending_fence_timestamp = (1 << 63) - 1
+
+    for line in results[1:]:
+      fields = line.split()
+      if len(fields) != 3:
+        continue
+      timestamp = long(fields[1])
+      if timestamp == pending_fence_timestamp:
+        continue
+      timestamp /= nanoseconds_per_millisecond
+      timestamps.append(timestamp)
+
+    return (refresh_period, timestamps)
diff --git a/catapult/devil/devil/android/perf/thermal_throttle.py b/catapult/devil/devil/android/perf/thermal_throttle.py
new file mode 100644
index 0000000..9aad4bb
--- /dev/null
+++ b/catapult/devil/devil/android/perf/thermal_throttle.py
@@ -0,0 +1,132 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+
+class OmapThrottlingDetector(object):
+  """Class to detect and track thermal throttling on an OMAP 4."""
+  OMAP_TEMP_FILE = ('/sys/devices/platform/omap/omap_temp_sensor.0/'
+                    'temperature')
+
+  @staticmethod
+  def IsSupported(device):
+    return device.FileExists(OmapThrottlingDetector.OMAP_TEMP_FILE)
+
+  def __init__(self, device):
+    self._device = device
+
+  @staticmethod
+  def BecameThrottled(log_line):
+    return 'omap_thermal_throttle' in log_line
+
+  @staticmethod
+  def BecameUnthrottled(log_line):
+    return 'omap_thermal_unthrottle' in log_line
+
+  @staticmethod
+  def GetThrottlingTemperature(log_line):
+    if 'throttle_delayed_work_fn' in log_line:
+      return float([s for s in log_line.split() if s.isdigit()][0]) / 1000.0
+
+  def GetCurrentTemperature(self):
+    tempdata = self._device.ReadFile(OmapThrottlingDetector.OMAP_TEMP_FILE)
+    return float(tempdata) / 1000.0
+
+
+class ExynosThrottlingDetector(object):
+  """Class to detect and track thermal throttling on an Exynos 5."""
+  @staticmethod
+  def IsSupported(device):
+    return device.FileExists('/sys/bus/exynos5-core')
+
+  def __init__(self, device):
+    pass
+
+  @staticmethod
+  def BecameThrottled(log_line):
+    return 'exynos_tmu: Throttling interrupt' in log_line
+
+  @staticmethod
+  def BecameUnthrottled(log_line):
+    return 'exynos_thermal_unthrottle: not throttling' in log_line
+
+  @staticmethod
+  def GetThrottlingTemperature(_log_line):
+    return None
+
+  @staticmethod
+  def GetCurrentTemperature():
+    return None
+
+
+class ThermalThrottle(object):
+  """Class to detect and track thermal throttling.
+
+  Usage:
+    Wait for IsThrottled() to be False before running test
+    After running test call HasBeenThrottled() to find out if the
+    test run was affected by thermal throttling.
+  """
+
+  def __init__(self, device):
+    self._device = device
+    self._throttled = False
+    self._detector = None
+    if OmapThrottlingDetector.IsSupported(device):
+      self._detector = OmapThrottlingDetector(device)
+    elif ExynosThrottlingDetector.IsSupported(device):
+      self._detector = ExynosThrottlingDetector(device)
+
+  def HasBeenThrottled(self):
+    """True if there has been any throttling since the last call to
+       HasBeenThrottled or IsThrottled.
+    """
+    return self._ReadLog()
+
+  def IsThrottled(self):
+    """True if currently throttled."""
+    self._ReadLog()
+    return self._throttled
+
+  def _ReadLog(self):
+    if not self._detector:
+      return False
+    has_been_throttled = False
+    serial_number = str(self._device)
+    log = self._device.RunShellCommand('dmesg -c')
+    degree_symbol = unichr(0x00B0)
+    for line in log:
+      if self._detector.BecameThrottled(line):
+        if not self._throttled:
+          logging.warning('>>> Device %s thermally throttled', serial_number)
+        self._throttled = True
+        has_been_throttled = True
+      elif self._detector.BecameUnthrottled(line):
+        if self._throttled:
+          logging.warning('>>> Device %s thermally unthrottled', serial_number)
+        self._throttled = False
+        has_been_throttled = True
+      temperature = self._detector.GetThrottlingTemperature(line)
+      if temperature is not None:
+        logging.info(u'Device %s thermally throttled at %3.1f%sC',
+                     serial_number, temperature, degree_symbol)
+
+    if logging.getLogger().isEnabledFor(logging.DEBUG):
+      # Print current temperature of CPU SoC.
+      temperature = self._detector.GetCurrentTemperature()
+      if temperature is not None:
+        logging.debug(u'Current SoC temperature of %s = %3.1f%sC',
+                      serial_number, temperature, degree_symbol)
+
+      # Print temperature of battery, to give a system temperature
+      dumpsys_log = self._device.RunShellCommand('dumpsys battery')
+      for line in dumpsys_log:
+        if 'temperature' in line:
+          btemp = float([s for s in line.split() if s.isdigit()][0]) / 10.0
+          logging.debug(u'Current battery temperature of %s = %3.1f%sC',
+                        serial_number, btemp, degree_symbol)
+
+    return has_been_throttled
+
diff --git a/catapult/devil/devil/android/ports.py b/catapult/devil/devil/android/ports.py
new file mode 100644
index 0000000..4783082
--- /dev/null
+++ b/catapult/devil/devil/android/ports.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions that deal with local and device ports."""
+
+import contextlib
+import fcntl
+import httplib
+import logging
+import os
+import socket
+import traceback
+
+# The net test server is started from port 10201.
+_TEST_SERVER_PORT_FIRST = 10201
+_TEST_SERVER_PORT_LAST = 30000
+# A file to record next valid port of test server.
+_TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
+_TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
+
+
+# The following two methods are used to allocate the port source for various
+# types of test servers. Because some net-related tests can be run on shards at
+# same time, it's important to have a mechanism to allocate the port
+# process-safe. In here, we implement the safe port allocation by leveraging
+# flock.
+def ResetTestServerPortAllocation():
+  """Resets the port allocation to start from TEST_SERVER_PORT_FIRST.
+
+  Returns:
+    Returns True if reset successes. Otherwise returns False.
+  """
+  try:
+    with open(_TEST_SERVER_PORT_FILE, 'w') as fp:
+      fp.write('%d' % _TEST_SERVER_PORT_FIRST)
+    if os.path.exists(_TEST_SERVER_PORT_LOCKFILE):
+      os.unlink(_TEST_SERVER_PORT_LOCKFILE)
+    return True
+  except Exception:  # pylint: disable=broad-except
+    logging.exception('Error while resetting port allocation')
+  return False
+
+
+def AllocateTestServerPort():
+  """Allocates a port incrementally.
+
+  Returns:
+    Returns a valid port which should be in between TEST_SERVER_PORT_FIRST and
+    TEST_SERVER_PORT_LAST. Returning 0 means no more valid port can be used.
+  """
+  port = 0
+  ports_tried = []
+  try:
+    fp_lock = open(_TEST_SERVER_PORT_LOCKFILE, 'w')
+    fcntl.flock(fp_lock, fcntl.LOCK_EX)
+    # Get current valid port and calculate next valid port.
+    if not os.path.exists(_TEST_SERVER_PORT_FILE):
+      ResetTestServerPortAllocation()
+    with open(_TEST_SERVER_PORT_FILE, 'r+') as fp:
+      port = int(fp.read())
+      ports_tried.append(port)
+      while not IsHostPortAvailable(port):
+        port += 1
+        ports_tried.append(port)
+      if (port > _TEST_SERVER_PORT_LAST or
+          port < _TEST_SERVER_PORT_FIRST):
+        port = 0
+      else:
+        fp.seek(0, os.SEEK_SET)
+        fp.write('%d' % (port + 1))
+  except Exception:  # pylint: disable=broad-except
+    logging.exception('ERror while allocating port')
+  finally:
+    if fp_lock:
+      fcntl.flock(fp_lock, fcntl.LOCK_UN)
+      fp_lock.close()
+  if port:
+    logging.info('Allocate port %d for test server.', port)
+  else:
+    logging.error('Could not allocate port for test server. '
+                  'List of ports tried: %s', str(ports_tried))
+  return port
+
+
+def IsHostPortAvailable(host_port):
+  """Checks whether the specified host port is available.
+
+  Args:
+    host_port: Port on host to check.
+
+  Returns:
+    True if the port on host is available, otherwise returns False.
+  """
+  s = socket.socket()
+  try:
+    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    s.bind(('', host_port))
+    s.close()
+    return True
+  except socket.error:
+    return False
+
+
+def IsDevicePortUsed(device, device_port, state=''):
+  """Checks whether the specified device port is used or not.
+
+  Args:
+    device: A DeviceUtils instance.
+    device_port: Port on device we want to check.
+    state: String of the specified state. Default is empty string, which
+           means any state.
+
+  Returns:
+    True if the port on device is already used, otherwise returns False.
+  """
+  base_urls = ('127.0.0.1:%d' % device_port, 'localhost:%d' % device_port)
+  netstat_results = device.RunShellCommand(
+      ['netstat', '-a'], check_return=True, large_output=True)
+  for single_connect in netstat_results:
+    # Column 3 is the local address which we want to check with.
+    connect_results = single_connect.split()
+    if connect_results[0] != 'tcp':
+      continue
+    if len(connect_results) < 6:
+      raise Exception('Unexpected format while parsing netstat line: ' +
+                      single_connect)
+    is_state_match = connect_results[5] == state if state else True
+    if connect_results[3] in base_urls and is_state_match:
+      return True
+  return False
+
+
+def IsHttpServerConnectable(host, port, tries=3, command='GET', path='/',
+                            expected_read='', timeout=2):
+  """Checks whether the specified http server is ready to serve request or not.
+
+  Args:
+    host: Host name of the HTTP server.
+    port: Port number of the HTTP server.
+    tries: How many times we want to test the connection. The default value is
+           3.
+    command: The http command we use to connect to HTTP server. The default
+             command is 'GET'.
+    path: The path we use when connecting to HTTP server. The default path is
+          '/'.
+    expected_read: The content we expect to read from the response. The default
+                   value is ''.
+    timeout: Timeout (in seconds) for each http connection. The default is 2s.
+
+  Returns:
+    Tuple of (connect status, client error). connect status is a boolean value
+    to indicate whether the server is connectable. client_error is the error
+    message the server returns when connect status is false.
+  """
+  assert tries >= 1
+  for i in xrange(0, tries):
+    client_error = None
+    try:
+      with contextlib.closing(httplib.HTTPConnection(
+          host, port, timeout=timeout)) as http:
+        # Output some debug information when we have tried more than 2 times.
+        http.set_debuglevel(i >= 2)
+        http.request(command, path)
+        r = http.getresponse()
+        content = r.read()
+        if r.status == 200 and r.reason == 'OK' and content == expected_read:
+          return (True, '')
+        client_error = ('Bad response: %s %s version %s\n  ' %
+                        (r.status, r.reason, r.version) +
+                        '\n  '.join([': '.join(h) for h in r.getheaders()]))
+    except (httplib.HTTPException, socket.error) as e:
+      # Probably too quick connecting: try again.
+      exception_error_msgs = traceback.format_exception_only(type(e), e)
+      if exception_error_msgs:
+        client_error = ''.join(exception_error_msgs)
+  # Only returns last client_error.
+  return (False, client_error or 'Timeout')
diff --git a/catapult/devil/devil/android/sdk/__init__.py b/catapult/devil/devil/android/sdk/__init__.py
new file mode 100644
index 0000000..f95d3b2
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/__init__.py
@@ -0,0 +1,6 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This package is intended for modules that are very tightly coupled to
+# tools or APIs from the Android SDK.
diff --git a/catapult/devil/devil/android/sdk/aapt.py b/catapult/devil/devil/android/sdk/aapt.py
new file mode 100644
index 0000000..7ae3a93
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/aapt.py
@@ -0,0 +1,43 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module wraps the Android Asset Packaging Tool."""
+
+from devil.android.sdk import build_tools
+from devil.utils import cmd_helper
+from devil.utils import lazy
+
+
+_aapt_path = lazy.WeakConstant(lambda: build_tools.GetPath('aapt'))
+
+
+def _RunAaptCmd(args):
+  """Runs an aapt command.
+
+  Args:
+    args: A list of arguments for aapt.
+
+  Returns:
+    The output of the command.
+  """
+  cmd = [_aapt_path.read()] + args
+  status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
+  if status != 0:
+    raise Exception('Failed running aapt command: "%s" with output "%s".' %
+                    (' '.join(cmd), output))
+  return output
+
+
+def Dump(what, apk, assets=None):
+  """Returns the output of the aapt dump command.
+
+  Args:
+    what: What you want to dump.
+    apk: Path to apk you want to dump information for.
+    assets: List of assets in apk you want to dump information for.
+  """
+  assets = assets or []
+  if isinstance(assets, basestring):
+    assets = [assets]
+  return _RunAaptCmd(['dump', what, apk] + assets).splitlines()
diff --git a/catapult/devil/devil/android/sdk/adb_compatibility_devicetest.py b/catapult/devil/devil/android/sdk/adb_compatibility_devicetest.py
new file mode 100755
index 0000000..de08e21
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/adb_compatibility_devicetest.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import signal
+import sys
+import unittest
+
+from devil import devil_env
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+from devil.utils import timeout_retry
+
+_PYMOCK_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir,
+    'third_party', 'mock'))
+with devil_env.SysPath(_PYMOCK_PATH):
+  import mock # pylint: disable=import-error
+
+
+_ADB_PATH = os.environ.get('ADB_PATH', 'adb')
+
+
+def _hostAdbPids():
+  ps_status, ps_output = cmd_helper.GetCmdStatusAndOutput(
+      ['pgrep', '-l', 'adb'])
+  if ps_status != 0:
+    return []
+
+  pids_and_names = (line.split() for line in ps_output.splitlines())
+  return [int(pid) for pid, name in pids_and_names
+          if name == 'adb']
+
+
+@mock.patch('devil.android.sdk.adb_wrapper.AdbWrapper.GetAdbPath',
+            return_value=_ADB_PATH)
+class AdbCompatibilityTest(unittest.TestCase):
+
+  def testStartServer(self, *_args):
+    # Manually kill off any instances of adb.
+    adb_pids = _hostAdbPids()
+    for p in adb_pids:
+      os.kill(p, signal.SIGKILL)
+
+    self.assertIsNotNone(
+        timeout_retry.WaitFor(
+            lambda: not _hostAdbPids(), wait_period=0.1, max_tries=10))
+
+    # start the adb server
+    start_server_status, _ = cmd_helper.GetCmdStatusAndOutput(
+        [_ADB_PATH, 'start-server'])
+
+    # verify that the server is now online
+    self.assertEquals(0, start_server_status)
+    self.assertIsNotNone(
+        timeout_retry.WaitFor(
+            lambda: bool(_hostAdbPids()), wait_period=0.1, max_tries=10))
+
+  def testKillServer(self, *_args):
+    adb_pids = _hostAdbPids()
+    if not adb_pids:
+      adb_wrapper.AdbWrapper.StartServer()
+
+    adb_pids = _hostAdbPids()
+    self.assertEqual(1, len(adb_pids))
+
+    kill_server_status, _ = cmd_helper.GetCmdStatusAndOutput(
+        [_ADB_PATH, 'kill-server'])
+    self.assertEqual(0, kill_server_status)
+
+    adb_pids = _hostAdbPids()
+    self.assertEqual(0, len(adb_pids))
+
+  # TODO(jbudorick): Implement tests for the following:
+  # taskset -c
+  # devices [-l]
+  # push
+  # pull
+  # shell
+  # ls
+  # logcat [-c] [-d] [-v] [-b]
+  # forward [--remove] [--list]
+  # jdwp
+  # install [-l] [-r] [-s] [-d]
+  # install-multiple [-l] [-r] [-s] [-d] [-p]
+  # uninstall [-k]
+  # backup -f [-apk] [-shared] [-nosystem] [-all]
+  # restore
+  # wait-for-device
+  # get-state (BROKEN IN THE M SDK)
+  # get-devpath
+  # remount
+  # reboot
+  # reboot-bootloader
+  # root
+  # emu
+
+  @classmethod
+  def tearDownClass(cls):
+    version_status, version_output = cmd_helper.GetCmdStatusAndOutput(
+        [_ADB_PATH, 'version'])
+    if version_status != 0:
+      version = ['(unable to determine version)']
+    else:
+      version = version_output.splitlines()
+
+    print
+    print 'tested %s' % _ADB_PATH
+    for l in version:
+      print '  %s' % l
+
+
+if __name__ == '__main__':
+  sys.exit(unittest.main())
diff --git a/catapult/devil/devil/android/sdk/adb_wrapper.py b/catapult/devil/devil/android/sdk/adb_wrapper.py
new file mode 100644
index 0000000..a65ab7c
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/adb_wrapper.py
@@ -0,0 +1,704 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module wraps Android's adb tool.
+
+This is a thin wrapper around the adb interface. Any additional complexity
+should be delegated to a higher level (ex. DeviceUtils).
+"""
+
+import collections
+import errno
+import logging
+import os
+import re
+
+from devil import devil_env
+from devil.android import decorators
+from devil.android import device_errors
+from devil.utils import cmd_helper
+from devil.utils import lazy
+from devil.utils import timeout_retry
+
+with devil_env.SysPath(devil_env.DEPENDENCY_MANAGER_PATH):
+  import dependency_manager  # pylint: disable=import-error
+
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 2
+
+_EMULATOR_RE = re.compile(r'^emulator-[0-9]+$')
+
+_READY_STATE = 'device'
+
+
+def VerifyLocalFileExists(path):
+  """Verifies a local file exists.
+
+  Args:
+    path: Path to the local file.
+
+  Raises:
+    IOError: If the file doesn't exist.
+  """
+  if not os.path.exists(path):
+    raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), path)
+
+
+def _FindAdb():
+  try:
+    return devil_env.config.LocalPath('adb')
+  except dependency_manager.NoPathFoundError:
+    pass
+
+  try:
+    return os.path.join(devil_env.config.LocalPath('android_sdk'),
+                        'platform-tools', 'adb')
+  except dependency_manager.NoPathFoundError:
+    pass
+
+  try:
+    return devil_env.config.FetchPath('adb')
+  except dependency_manager.NoPathFoundError:
+    raise device_errors.NoAdbError()
+
+
+def _ShouldRetryAdbCmd(exc):
+  return not isinstance(exc, device_errors.NoAdbError)
+
+
+DeviceStat = collections.namedtuple('DeviceStat',
+                                    ['st_mode', 'st_size', 'st_time'])
+
+
+class AdbWrapper(object):
+  """A wrapper around a local Android Debug Bridge executable."""
+
+  _adb_path = lazy.WeakConstant(_FindAdb)
+
+  def __init__(self, device_serial):
+    """Initializes the AdbWrapper.
+
+    Args:
+      device_serial: The device serial number as a string.
+    """
+    if not device_serial:
+      raise ValueError('A device serial must be specified')
+    self._device_serial = str(device_serial)
+
+  @classmethod
+  def GetAdbPath(cls):
+    return cls._adb_path.read()
+
+  @classmethod
+  def _BuildAdbCmd(cls, args, device_serial, cpu_affinity=None):
+    if cpu_affinity is not None:
+      cmd = ['taskset', '-c', str(cpu_affinity)]
+    else:
+      cmd = []
+    cmd.append(cls.GetAdbPath())
+    if device_serial is not None:
+      cmd.extend(['-s', device_serial])
+    cmd.extend(args)
+    return cmd
+
+  # pylint: disable=unused-argument
+  @classmethod
+  @decorators.WithTimeoutAndConditionalRetries(_ShouldRetryAdbCmd)
+  def _RunAdbCmd(cls, args, timeout=None, retries=None, device_serial=None,
+                 check_error=True, cpu_affinity=None):
+    # pylint: disable=no-member
+    try:
+      status, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
+          cls._BuildAdbCmd(args, device_serial, cpu_affinity=cpu_affinity),
+          timeout_retry.CurrentTimeoutThreadGroup().GetRemainingTime())
+    except OSError as e:
+      if e.errno in (errno.ENOENT, errno.ENOEXEC):
+        raise device_errors.NoAdbError(msg=str(e))
+      else:
+        raise
+
+    if status != 0:
+      raise device_errors.AdbCommandFailedError(
+          args, output, status, device_serial)
+    # This catches some errors, including when the device drops offline;
+    # unfortunately adb is very inconsistent with error reporting so many
+    # command failures present differently.
+    if check_error and output.startswith('error:'):
+      raise device_errors.AdbCommandFailedError(args, output)
+    return output
+  # pylint: enable=unused-argument
+
+  def _RunDeviceAdbCmd(self, args, timeout, retries, check_error=True):
+    """Runs an adb command on the device associated with this object.
+
+    Args:
+      args: A list of arguments to adb.
+      timeout: Timeout in seconds.
+      retries: Number of retries.
+      check_error: Check that the command doesn't return an error message. This
+        does NOT check the exit status of shell commands.
+
+    Returns:
+      The output of the command.
+    """
+    return self._RunAdbCmd(args, timeout=timeout, retries=retries,
+                           device_serial=self._device_serial,
+                           check_error=check_error)
+
+  def _IterRunDeviceAdbCmd(self, args, timeout):
+    """Runs an adb command and returns an iterator over its output lines.
+
+    Args:
+      args: A list of arguments to adb.
+      timeout: Timeout in seconds.
+
+    Yields:
+      The output of the command line by line.
+    """
+    return cmd_helper.IterCmdOutputLines(
+      self._BuildAdbCmd(args, self._device_serial), timeout=timeout)
+
+  def __eq__(self, other):
+    """Consider instances equal if they refer to the same device.
+
+    Args:
+      other: The instance to compare equality with.
+
+    Returns:
+      True if the instances are considered equal, false otherwise.
+    """
+    return self._device_serial == str(other)
+
+  def __str__(self):
+    """The string representation of an instance.
+
+    Returns:
+      The device serial number as a string.
+    """
+    return self._device_serial
+
+  def __repr__(self):
+    return '%s(\'%s\')' % (self.__class__.__name__, self)
+
+  # pylint: disable=unused-argument
+  @classmethod
+  def IsServerOnline(cls):
+    status, output = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
+    output = [int(x) for x in output.split()]
+    logging.info('PIDs for adb found: %r', output)
+    return status == 0
+  # pylint: enable=unused-argument
+
+  @classmethod
+  def KillServer(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    cls._RunAdbCmd(['kill-server'], timeout=timeout, retries=retries)
+
+  @classmethod
+  def StartServer(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    # CPU affinity is used to reduce adb instability http://crbug.com/268450
+    cls._RunAdbCmd(['start-server'], timeout=timeout, retries=retries,
+                   cpu_affinity=0)
+
+  @classmethod
+  def GetDevices(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """DEPRECATED. Refer to Devices(...) below."""
+    # TODO(jbudorick): Remove this function once no more clients are using it.
+    return cls.Devices(timeout=timeout, retries=retries)
+
+  @classmethod
+  def Devices(cls, desired_state=_READY_STATE, long_list=False,
+              timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Get the list of active attached devices.
+
+    Args:
+      desired_state: If not None, limit the devices returned to only those
+        in the given state.
+      long_list: Whether to use the long listing format.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Yields:
+      AdbWrapper instances.
+    """
+    lines = cls._RawDevices(long_list=long_list, timeout=timeout,
+                            retries=retries)
+    if long_list:
+      return [
+        [AdbWrapper(line[0])] + line[1:]
+        for line in lines
+        if (len(line) >= 2 and (not desired_state or line[1] == desired_state))
+      ]
+    else:
+      return [
+        AdbWrapper(line[0])
+        for line in lines
+        if (len(line) == 2 and (not desired_state or line[1] == desired_state))
+      ]
+
+  @classmethod
+  def _RawDevices(cls, long_list=False, timeout=_DEFAULT_TIMEOUT,
+                  retries=_DEFAULT_RETRIES):
+    cmd = ['devices']
+    if long_list:
+      cmd.append('-l')
+    output = cls._RunAdbCmd(cmd, timeout=timeout, retries=retries)
+    return [line.split() for line in output.splitlines()[1:]]
+
+  def GetDeviceSerial(self):
+    """Gets the device serial number associated with this object.
+
+    Returns:
+      Device serial number as a string.
+    """
+    return self._device_serial
+
+  def Push(self, local, remote, timeout=60 * 5, retries=_DEFAULT_RETRIES):
+    """Pushes a file from the host to the device.
+
+    Args:
+      local: Path on the host filesystem.
+      remote: Path on the device filesystem.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    VerifyLocalFileExists(local)
+    self._RunDeviceAdbCmd(['push', local, remote], timeout, retries)
+
+  def Pull(self, remote, local, timeout=60 * 5, retries=_DEFAULT_RETRIES):
+    """Pulls a file from the device to the host.
+
+    Args:
+      remote: Path on the device filesystem.
+      local: Path on the host filesystem.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    cmd = ['pull', remote, local]
+    self._RunDeviceAdbCmd(cmd, timeout, retries)
+    try:
+      VerifyLocalFileExists(local)
+    except IOError:
+      raise device_errors.AdbCommandFailedError(
+          cmd, 'File not found on host: %s' % local, device_serial=str(self))
+
+  def Shell(self, command, expect_status=0, timeout=_DEFAULT_TIMEOUT,
+            retries=_DEFAULT_RETRIES):
+    """Runs a shell command on the device.
+
+    Args:
+      command: A string with the shell command to run.
+      expect_status: (optional) Check that the command's exit status matches
+        this value. Default is 0. If set to None the test is skipped.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      The output of the shell command as a string.
+
+    Raises:
+      device_errors.AdbCommandFailedError: If the exit status doesn't match
+        |expect_status|.
+    """
+    if expect_status is None:
+      args = ['shell', command]
+    else:
+      args = ['shell', '( %s );echo %%$?' % command.rstrip()]
+    output = self._RunDeviceAdbCmd(args, timeout, retries, check_error=False)
+    if expect_status is not None:
+      output_end = output.rfind('%')
+      if output_end < 0:
+        # causes the status string to become empty and raise a ValueError
+        output_end = len(output)
+
+      try:
+        status = int(output[output_end + 1:])
+      except ValueError:
+        logging.warning('exit status of shell command %r missing.', command)
+        raise device_errors.AdbShellCommandFailedError(
+            command, output, status=None, device_serial=self._device_serial)
+      output = output[:output_end]
+      if status != expect_status:
+        raise device_errors.AdbShellCommandFailedError(
+            command, output, status=status, device_serial=self._device_serial)
+    return output
+
+  def IterShell(self, command, timeout):
+    """Runs a shell command and returns an iterator over its output lines.
+
+    Args:
+      command: A string with the shell command to run.
+      timeout: Timeout in seconds.
+
+    Yields:
+      The output of the command line by line.
+    """
+    args = ['shell', command]
+    return cmd_helper.IterCmdOutputLines(
+      self._BuildAdbCmd(args, self._device_serial), timeout=timeout)
+
+  def Ls(self, path, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """List the contents of a directory on the device.
+
+    Args:
+      path: Path on the device filesystem.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      A list of pairs (filename, stat) for each file found in the directory,
+      where the stat object has the properties: st_mode, st_size, and st_time.
+
+    Raises:
+      AdbCommandFailedError if |path| does not specify a valid and accessible
+          directory in the device, or the output of "adb ls" command is less
+          than four columns
+    """
+    def ParseLine(line, cmd):
+      cols = line.split(None, 3)
+      if len(cols) < 4:
+        raise device_errors.AdbCommandFailedError(
+            cmd, line, "the output should be 4 columns, but is only %d columns"
+            % len(cols), device_serial=self._device_serial)
+      filename = cols.pop()
+      stat = DeviceStat(*[int(num, base=16) for num in cols])
+      return (filename, stat)
+
+    cmd = ['ls', path]
+    lines = self._RunDeviceAdbCmd(
+        cmd, timeout=timeout, retries=retries).splitlines()
+    if lines:
+      return [ParseLine(line, cmd) for line in lines]
+    else:
+      raise device_errors.AdbCommandFailedError(
+          cmd, 'path does not specify an accessible directory in the device',
+          device_serial=self._device_serial)
+
+  def Logcat(self, clear=False, dump=False, filter_specs=None,
+             logcat_format=None, ring_buffer=None, timeout=None,
+             retries=_DEFAULT_RETRIES):
+    """Get an iterable over the logcat output.
+
+    Args:
+      clear: If true, clear the logcat.
+      dump: If true, dump the current logcat contents.
+      filter_specs: If set, a list of specs to filter the logcat.
+      logcat_format: If set, the format in which the logcat should be output.
+        Options include "brief", "process", "tag", "thread", "raw", "time",
+        "threadtime", and "long"
+      ring_buffer: If set, a list of alternate ring buffers to request.
+        Options include "main", "system", "radio", "events", "crash" or "all".
+        The default is equivalent to ["main", "system", "crash"].
+      timeout: (optional) If set, timeout per try in seconds. If clear or dump
+        is set, defaults to _DEFAULT_TIMEOUT.
+      retries: (optional) If clear or dump is set, the number of retries to
+        attempt. Otherwise, does nothing.
+
+    Yields:
+      logcat output line by line.
+    """
+    cmd = ['logcat']
+    use_iter = True
+    if clear:
+      cmd.append('-c')
+      use_iter = False
+    if dump:
+      cmd.append('-d')
+      use_iter = False
+    if logcat_format:
+      cmd.extend(['-v', logcat_format])
+    if ring_buffer:
+      for buffer_name in ring_buffer:
+        cmd.extend(['-b', buffer_name])
+    if filter_specs:
+      cmd.extend(filter_specs)
+
+    if use_iter:
+      return self._IterRunDeviceAdbCmd(cmd, timeout)
+    else:
+      timeout = timeout if timeout is not None else _DEFAULT_TIMEOUT
+      return self._RunDeviceAdbCmd(cmd, timeout, retries).splitlines()
+
+  def Forward(self, local, remote, timeout=_DEFAULT_TIMEOUT,
+              retries=_DEFAULT_RETRIES):
+    """Forward socket connections from the local socket to the remote socket.
+
+    Sockets are specified by one of:
+      tcp:<port>
+      localabstract:<unix domain socket name>
+      localreserved:<unix domain socket name>
+      localfilesystem:<unix domain socket name>
+      dev:<character device name>
+      jdwp:<process pid> (remote only)
+
+    Args:
+      local: The host socket.
+      remote: The device socket.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    self._RunDeviceAdbCmd(['forward', str(local), str(remote)], timeout,
+                          retries)
+
+  def ForwardRemove(self, local, timeout=_DEFAULT_TIMEOUT,
+                    retries=_DEFAULT_RETRIES):
+    """Remove a forward socket connection.
+
+    Args:
+      local: The host socket.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    self._RunDeviceAdbCmd(['forward', '--remove', str(local)], timeout,
+                          retries)
+
+  def ForwardList(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """List all currently forwarded socket connections.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    return self._RunDeviceAdbCmd(['forward', '--list'], timeout, retries)
+
+  def JDWP(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """List of PIDs of processes hosting a JDWP transport.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      A list of PIDs as strings.
+    """
+    return [a.strip() for a in
+            self._RunDeviceAdbCmd(['jdwp'], timeout, retries).split('\n')]
+
+  def Install(self, apk_path, forward_lock=False, allow_downgrade=False,
+              reinstall=False, sd_card=False, timeout=60 * 2,
+              retries=_DEFAULT_RETRIES):
+    """Install an apk on the device.
+
+    Args:
+      apk_path: Host path to the APK file.
+      forward_lock: (optional) If set forward-locks the app.
+      allow_downgrade: (optional) If set, allows for downgrades.
+      reinstall: (optional) If set reinstalls the app, keeping its data.
+      sd_card: (optional) If set installs on the SD card.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    VerifyLocalFileExists(apk_path)
+    cmd = ['install']
+    if forward_lock:
+      cmd.append('-l')
+    if reinstall:
+      cmd.append('-r')
+    if sd_card:
+      cmd.append('-s')
+    if allow_downgrade:
+      cmd.append('-d')
+    cmd.append(apk_path)
+    output = self._RunDeviceAdbCmd(cmd, timeout, retries)
+    if 'Success' not in output:
+      raise device_errors.AdbCommandFailedError(
+          cmd, output, device_serial=self._device_serial)
+
+  def InstallMultiple(self, apk_paths, forward_lock=False, reinstall=False,
+                      sd_card=False, allow_downgrade=False, partial=False,
+                      timeout=60 * 2, retries=_DEFAULT_RETRIES):
+    """Install an apk with splits on the device.
+
+    Args:
+      apk_paths: Host path to the APK file.
+      forward_lock: (optional) If set forward-locks the app.
+      reinstall: (optional) If set reinstalls the app, keeping its data.
+      sd_card: (optional) If set installs on the SD card.
+      allow_downgrade: (optional) Allow versionCode downgrade.
+      partial: (optional) Package ID if apk_paths doesn't include all .apks.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    for path in apk_paths:
+      VerifyLocalFileExists(path)
+    cmd = ['install-multiple']
+    if forward_lock:
+      cmd.append('-l')
+    if reinstall:
+      cmd.append('-r')
+    if sd_card:
+      cmd.append('-s')
+    if allow_downgrade:
+      cmd.append('-d')
+    if partial:
+      cmd.extend(('-p', partial))
+    cmd.extend(apk_paths)
+    output = self._RunDeviceAdbCmd(cmd, timeout, retries)
+    if 'Success' not in output:
+      raise device_errors.AdbCommandFailedError(
+          cmd, output, device_serial=self._device_serial)
+
+  def Uninstall(self, package, keep_data=False, timeout=_DEFAULT_TIMEOUT,
+                retries=_DEFAULT_RETRIES):
+    """Remove the app |package| from the device.
+
+    Args:
+      package: The package to uninstall.
+      keep_data: (optional) If set keep the data and cache directories.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    cmd = ['uninstall']
+    if keep_data:
+      cmd.append('-k')
+    cmd.append(package)
+    output = self._RunDeviceAdbCmd(cmd, timeout, retries)
+    if 'Failure' in output:
+      raise device_errors.AdbCommandFailedError(
+          cmd, output, device_serial=self._device_serial)
+
+  def Backup(self, path, packages=None, apk=False, shared=False,
+             nosystem=True, include_all=False, timeout=_DEFAULT_TIMEOUT,
+             retries=_DEFAULT_RETRIES):
+    """Write an archive of the device's data to |path|.
+
+    Args:
+      path: Local path to store the backup file.
+      packages: List of to packages to be backed up.
+      apk: (optional) If set include the .apk files in the archive.
+      shared: (optional) If set buckup the device's SD card.
+      nosystem: (optional) If set exclude system applications.
+      include_all: (optional) If set back up all installed applications and
+        |packages| is optional.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    cmd = ['backup', '-f', path]
+    if apk:
+      cmd.append('-apk')
+    if shared:
+      cmd.append('-shared')
+    if nosystem:
+      cmd.append('-nosystem')
+    if include_all:
+      cmd.append('-all')
+    if packages:
+      cmd.extend(packages)
+    assert bool(packages) ^ bool(include_all), (
+        'Provide \'packages\' or set \'include_all\' but not both.')
+    ret = self._RunDeviceAdbCmd(cmd, timeout, retries)
+    VerifyLocalFileExists(path)
+    return ret
+
+  def Restore(self, path, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Restore device contents from the backup archive.
+
+    Args:
+      path: Host path to the backup archive.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    VerifyLocalFileExists(path)
+    self._RunDeviceAdbCmd(['restore'] + [path], timeout, retries)
+
+  def WaitForDevice(self, timeout=60 * 5, retries=_DEFAULT_RETRIES):
+    """Block until the device is online.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    self._RunDeviceAdbCmd(['wait-for-device'], timeout, retries)
+
+  def GetState(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Get device state.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      One of 'offline', 'bootloader', or 'device'.
+    """
+    # TODO(jbudorick): Revert to using get-state once it doesn't cause a
+    # a protocol fault.
+    # return self._RunDeviceAdbCmd(['get-state'], timeout, retries).strip()
+
+    lines = self._RawDevices(timeout=timeout, retries=retries)
+    for line in lines:
+      if len(line) >= 2 and line[0] == self._device_serial:
+        return line[1]
+    return 'offline'
+
+  def GetDevPath(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Gets the device path.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      The device path (e.g. usb:3-4)
+    """
+    return self._RunDeviceAdbCmd(['get-devpath'], timeout, retries)
+
+  def Remount(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Remounts the /system partition on the device read-write."""
+    self._RunDeviceAdbCmd(['remount'], timeout, retries)
+
+  def Reboot(self, to_bootloader=False, timeout=60 * 5,
+             retries=_DEFAULT_RETRIES):
+    """Reboots the device.
+
+    Args:
+      to_bootloader: (optional) If set reboots to the bootloader.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    if to_bootloader:
+      cmd = ['reboot-bootloader']
+    else:
+      cmd = ['reboot']
+    self._RunDeviceAdbCmd(cmd, timeout, retries)
+
+  def Root(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES):
+    """Restarts the adbd daemon with root permissions, if possible.
+
+    Args:
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+    """
+    output = self._RunDeviceAdbCmd(['root'], timeout, retries)
+    if 'cannot' in output:
+      raise device_errors.AdbCommandFailedError(
+          ['root'], output, device_serial=self._device_serial)
+
+  def Emu(self, cmd, timeout=_DEFAULT_TIMEOUT,
+               retries=_DEFAULT_RETRIES):
+    """Runs an emulator console command.
+
+    See http://developer.android.com/tools/devices/emulator.html#console
+
+    Args:
+      cmd: The command to run on the emulator console.
+      timeout: (optional) Timeout per try in seconds.
+      retries: (optional) Number of retries to attempt.
+
+    Returns:
+      The output of the emulator console command.
+    """
+    if isinstance(cmd, basestring):
+      cmd = [cmd]
+    return self._RunDeviceAdbCmd(['emu'] + cmd, timeout, retries)
+
+  @property
+  def is_emulator(self):
+    return _EMULATOR_RE.match(self._device_serial)
+
+  @property
+  def is_ready(self):
+    try:
+      return self.GetState() == _READY_STATE
+    except device_errors.CommandFailedError:
+      return False
diff --git a/catapult/devil/devil/android/sdk/adb_wrapper_devicetest.py b/catapult/devil/devil/android/sdk/adb_wrapper_devicetest.py
new file mode 100644
index 0000000..59755c0
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/adb_wrapper_devicetest.py
@@ -0,0 +1,96 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for the AdbWrapper class."""
+
+import os
+import tempfile
+import time
+import unittest
+
+from devil.android import device_errors
+from devil.android.sdk import adb_wrapper
+
+
+class TestAdbWrapper(unittest.TestCase):
+
+  def setUp(self):
+    devices = adb_wrapper.AdbWrapper.Devices()
+    assert devices, 'A device must be attached'
+    self._adb = devices[0]
+    self._adb.WaitForDevice()
+
+  @staticmethod
+  def _MakeTempFile(contents):
+    """Make a temporary file with the given contents.
+
+    Args:
+      contents: string to write to the temporary file.
+
+    Returns:
+      The absolute path to the file.
+    """
+    fi, path = tempfile.mkstemp()
+    with os.fdopen(fi, 'wb') as f:
+      f.write(contents)
+    return path
+
+  def testShell(self):
+    output = self._adb.Shell('echo test', expect_status=0)
+    self.assertEqual(output.strip(), 'test')
+    output = self._adb.Shell('echo test')
+    self.assertEqual(output.strip(), 'test')
+    with self.assertRaises(device_errors.AdbCommandFailedError):
+      self._adb.Shell('echo test', expect_status=1)
+
+  def testPushLsPull(self):
+    path = self._MakeTempFile('foo')
+    device_path = '/data/local/tmp/testfile.txt'
+    local_tmpdir = os.path.dirname(path)
+    self._adb.Push(path, device_path)
+    files = dict(self._adb.Ls('/data/local/tmp'))
+    self.assertTrue('testfile.txt' in files)
+    self.assertEquals(3, files['testfile.txt'].st_size)
+    self.assertEqual(self._adb.Shell('cat %s' % device_path), 'foo')
+    self._adb.Pull(device_path, local_tmpdir)
+    with open(os.path.join(local_tmpdir, 'testfile.txt'), 'r') as f:
+      self.assertEqual(f.read(), 'foo')
+
+  def testInstall(self):
+    path = self._MakeTempFile('foo')
+    with self.assertRaises(device_errors.AdbCommandFailedError):
+      self._adb.Install(path)
+
+  def testForward(self):
+    with self.assertRaises(device_errors.AdbCommandFailedError):
+      self._adb.Forward(0, 0)
+
+  def testUninstall(self):
+    with self.assertRaises(device_errors.AdbCommandFailedError):
+      self._adb.Uninstall('some.nonexistant.package')
+
+  def testRebootWaitForDevice(self):
+    self._adb.Reboot()
+    print 'waiting for device to reboot...'
+    while self._adb.GetState() == 'device':
+      time.sleep(1)
+    self._adb.WaitForDevice()
+    self.assertEqual(self._adb.GetState(), 'device')
+    print 'waiting for package manager...'
+    while 'package:' not in self._adb.Shell('pm path android'):
+      time.sleep(1)
+
+  def testRootRemount(self):
+    self._adb.Root()
+    while True:
+      try:
+        self._adb.Shell('start')
+        break
+      except device_errors.AdbCommandFailedError:
+        time.sleep(1)
+    self._adb.Remount()
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/android/sdk/build_tools.py b/catapult/devil/devil/android/sdk/build_tools.py
new file mode 100644
index 0000000..99083d9
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/build_tools.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from devil import devil_env
+from devil.utils import lazy
+
+with devil_env.SysPath(devil_env.DEPENDENCY_MANAGER_PATH):
+  import dependency_manager  # pylint: disable=import-error
+
+
+def GetPath(build_tool):
+  try:
+    return devil_env.config.LocalPath(build_tool)
+  except dependency_manager.NoPathFoundError:
+    pass
+
+  try:
+    return _PathInLocalSdk(build_tool)
+  except dependency_manager.NoPathFoundError:
+    pass
+
+  return devil_env.config.FetchPath(build_tool)
+
+
+def _PathInLocalSdk(build_tool):
+  build_tools_path = _build_tools_path.read()
+  return (os.path.join(build_tools_path, build_tool) if build_tools_path
+          else None)
+
+
+def _FindBuildTools():
+  android_sdk_path = devil_env.config.LocalPath('android_sdk')
+  if not android_sdk_path:
+    return None
+
+  build_tools_contents = os.listdir(
+      os.path.join(android_sdk_path, 'build-tools'))
+
+  if not build_tools_contents:
+    return None
+  else:
+    if len(build_tools_contents) > 1:
+      build_tools_contents.sort()
+    return os.path.join(android_sdk_path, 'build-tools',
+                        build_tools_contents[-1])
+
+
+_build_tools_path = lazy.WeakConstant(_FindBuildTools)
diff --git a/catapult/devil/devil/android/sdk/dexdump.py b/catapult/devil/devil/android/sdk/dexdump.py
new file mode 100644
index 0000000..992366e
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/dexdump.py
@@ -0,0 +1,31 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.android.sdk import build_tools
+from devil.utils import cmd_helper
+from devil.utils import lazy
+
+
+_dexdump_path = lazy.WeakConstant(lambda: build_tools.GetPath('dexdump'))
+
+
+def DexDump(dexfiles, file_summary=False):
+  """A wrapper around the Android SDK's dexdump tool.
+
+  Args:
+    dexfiles: The dexfile or list of dex files to dump.
+    file_summary: Display summary information from the file header. (-f)
+
+  Returns:
+    An iterable over the output lines.
+  """
+  # TODO(jbudorick): Add support for more options as necessary.
+  if isinstance(dexfiles, basestring):
+    dexfiles = [dexfiles]
+  args = [_dexdump_path.read()] + dexfiles
+  if file_summary:
+    args.append('-f')
+
+  return cmd_helper.IterCmdOutputLines(args)
+
diff --git a/catapult/devil/devil/android/sdk/fastboot.py b/catapult/devil/devil/android/sdk/fastboot.py
new file mode 100644
index 0000000..d9fa653
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/fastboot.py
@@ -0,0 +1,101 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module wraps Android's fastboot tool.
+
+This is a thin wrapper around the fastboot interface. Any additional complexity
+should be delegated to a higher level (ex. FastbootUtils).
+"""
+# pylint: disable=unused-argument
+
+import os
+
+from devil import devil_env
+from devil.android import decorators
+from devil.android import device_errors
+from devil.utils import cmd_helper
+from devil.utils import lazy
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+_FLASH_TIMEOUT = _DEFAULT_TIMEOUT * 10
+
+
+class Fastboot(object):
+
+  _fastboot_path = lazy.WeakConstant(lambda: os.path.join(
+      devil_env.config.LocalPath('android_sdk'), 'platform-tools', 'adb'))
+
+  def __init__(self, device_serial, default_timeout=_DEFAULT_TIMEOUT,
+               default_retries=_DEFAULT_RETRIES):
+    """Initializes the FastbootWrapper.
+
+    Args:
+      device_serial: The device serial number as a string.
+    """
+    if not device_serial:
+      raise ValueError('A device serial must be specified')
+    self._device_serial = str(device_serial)
+    self._default_timeout = default_timeout
+    self._default_retries = default_retries
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def _RunFastbootCommand(self, cmd, timeout=None, retries=None):
+    """Run a command line command using the fastboot android tool.
+
+    Args:
+      cmd: Command to run. Must be list of args, the first one being the command
+
+    Returns:
+      output of command.
+
+    Raises:
+      TypeError: If cmd is not of type list.
+    """
+    if type(cmd) == list:
+      cmd = [self._fastboot_path.read(), '-s', self._device_serial] + cmd
+    else:
+      raise TypeError(
+          'Command for _RunFastbootCommand must be a list.')
+    status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
+    if int(status) != 0:
+      raise device_errors.FastbootCommandFailedError(
+          cmd, output, status, self._device_serial)
+    return output
+
+  @decorators.WithTimeoutAndRetriesDefaults(_FLASH_TIMEOUT, 0)
+  def Flash(self, partition, image, timeout=None, retries=None):
+    """Flash partition with img.
+
+    Args:
+      partition: Partition to be flashed.
+      image: location of image to flash with.
+    """
+    self._RunFastbootCommand(['flash', partition, image])
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def Devices(self, timeout=None, retries=None):
+    """Outputs list of devices in fastboot mode."""
+    output = self._RunFastbootCommand(['devices'])
+    return [line.split()[0] for line in output.splitlines()]
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def RebootBootloader(self, timeout=None, retries=None):
+    """Reboot from fastboot, into fastboot."""
+    self._RunFastbootCommand(['reboot-bootloader'])
+
+  @decorators.WithTimeoutAndRetriesDefaults(_FLASH_TIMEOUT, 0)
+  def Reboot(self, timeout=None, retries=None):
+    """Reboot from fastboot to normal usage"""
+    self._RunFastbootCommand(['reboot'])
+
+  @decorators.WithTimeoutAndRetriesFromInstance()
+  def SetOemOffModeCharge(self, value, timeout=None, retries=None):
+    """Sets off mode charging
+
+    Args:
+      value: boolean value to set off-mode-charging on or off.
+    """
+    self._RunFastbootCommand(
+        ['oem', 'off-mode-charge', str(int(value))])
diff --git a/catapult/devil/devil/android/sdk/gce_adb_wrapper.py b/catapult/devil/devil/android/sdk/gce_adb_wrapper.py
new file mode 100644
index 0000000..5ee7959
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/gce_adb_wrapper.py
@@ -0,0 +1,146 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides a work around for various adb commands on android gce instances.
+
+Some adb commands don't work well when the device is a cloud vm, namely
+'push' and 'pull'. With gce instances, moving files through adb can be
+painfully slow and hit timeouts, so the methods here just use scp instead.
+"""
+# pylint: disable=unused-argument
+
+import logging
+import os
+import subprocess
+
+from devil.android import device_errors
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+
+
+# SSH key file for accessing the instances. The keys are created at
+# startup and removed & revoked at teardown.
+_SSH_KEY_FILE = '/tmp/ssh_android_gce_instance'
+
+
+class GceAdbWrapper(adb_wrapper.AdbWrapper):
+
+  def __init__(self, device_serial):
+    super(GceAdbWrapper, self).__init__(device_serial)
+    self._instance_ip = self.Shell('getprop net.gce.ip_address').strip()
+
+  # override
+  def Push(self, local, remote, **kwargs):
+    """Pushes an object from the host to the gce instance.
+
+    Args:
+      local: Path on the host filesystem.
+      remote: Path on the instance filesystem.
+    """
+    adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
+    adb_wrapper.VerifyLocalFileExists(local)
+    if os.path.isdir(local):
+      self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(remote))
+
+      # When the object to be pushed is a directory, adb merges the source dir
+      # with the destination dir. So if local is a dir, just scp its contents.
+      for f in os.listdir(local):
+        self._PushObject(os.path.join(local, f), os.path.join(remote, f))
+        self.Shell('chmod 777 %s' %
+                   cmd_helper.SingleQuote(os.path.join(remote, f)))
+    else:
+      parent_dir = remote[0:remote.rfind('/')]
+      if parent_dir:
+        self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(parent_dir))
+      self._PushObject(local, remote)
+      self.Shell('chmod 777 %s' % cmd_helper.SingleQuote(remote))
+
+  def _PushObject(self, local, remote):
+    """Copies an object from the host to the gce instance using scp.
+
+    Args:
+      local: Path on the host filesystem.
+      remote: Path on the instance filesystem.
+    """
+    cmd = [
+        'scp',
+        '-r',
+        '-i', _SSH_KEY_FILE,
+        '-o', 'UserKnownHostsFile=/dev/null',
+        '-o', 'StrictHostKeyChecking=no',
+        local,
+        'root@%s:%s' % (self._instance_ip, remote)
+    ]
+    status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
+    if status:
+      raise device_errors.AdbCommandFailedError(
+          cmd, 'File not reachable on host: %s' % local,
+          device_serial=str(self))
+
+  # override
+  def Pull(self, remote, local, **kwargs):
+    """Pulls a file from the gce instance to the host.
+
+    Args:
+      remote: Path on the instance filesystem.
+      local: Path on the host filesystem.
+    """
+    adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
+    cmd = [
+        'scp',
+        '-p',
+        '-r',
+        '-i', _SSH_KEY_FILE,
+        '-o', 'UserKnownHostsFile=/dev/null',
+        '-o', 'StrictHostKeyChecking=no',
+        'root@%s:%s' % (self._instance_ip, remote),
+        local,
+    ]
+    status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
+    if status:
+      raise device_errors.AdbCommandFailedError(
+          cmd, 'File not reachable on host: %s' % local,
+          device_serial=str(self))
+
+    try:
+      adb_wrapper.VerifyLocalFileExists(local)
+    except (subprocess.CalledProcessError, IOError):
+      logging.exception('Error when pulling files from android instance.')
+      raise device_errors.AdbCommandFailedError(
+          cmd, 'File not reachable on host: %s' % local,
+          device_serial=str(self))
+
+  # override
+  def Install(self, apk_path, forward_lock=False, reinstall=False,
+              sd_card=False, **kwargs):
+    """Installs an apk on the gce instance
+
+    Args:
+      apk_path: Host path to the APK file.
+      forward_lock: (optional) If set forward-locks the app.
+      reinstall: (optional) If set reinstalls the app, keeping its data.
+      sd_card: (optional) If set installs on the SD card.
+    """
+    adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
+    adb_wrapper.VerifyLocalFileExists(apk_path)
+    cmd = ['install']
+    if forward_lock:
+      cmd.append('-l')
+    if reinstall:
+      cmd.append('-r')
+    if sd_card:
+      cmd.append('-s')
+    self.Push(apk_path, '/data/local/tmp/tmp.apk')
+    cmd = ['pm'] + cmd
+    cmd.append('/data/local/tmp/tmp.apk')
+    output = self.Shell(' '.join(cmd))
+    self.Shell('rm /data/local/tmp/tmp.apk')
+    if 'Success' not in output:
+      raise device_errors.AdbCommandFailedError(
+          cmd, output, device_serial=self._device_serial)
+
+  # override
+  @property
+  def is_emulator(self):
+    return True
diff --git a/catapult/devil/devil/android/sdk/intent.py b/catapult/devil/devil/android/sdk/intent.py
new file mode 100644
index 0000000..e612f76
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/intent.py
@@ -0,0 +1,114 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Manages intents and associated information.
+
+This is generally intended to be used with functions that calls Android's
+Am command.
+"""
+
+
+class Intent(object):
+
+  def __init__(self, action='android.intent.action.VIEW', activity=None,
+               category=None, component=None, data=None, extras=None,
+               flags=None, package=None):
+    """Creates an Intent.
+
+    Args:
+      action: A string containing the action.
+      activity: A string that, with |package|, can be used to specify the
+                component.
+      category: A string or list containing any categories.
+      component: A string that specifies the component to send the intent to.
+      data: A string containing a data URI.
+      extras: A dict containing extra parameters to be passed along with the
+              intent.
+      flags: A string containing flags to pass.
+      package: A string that, with activity, can be used to specify the
+               component.
+    """
+    self._action = action
+    self._activity = activity
+    if isinstance(category, list) or category is None:
+      self._category = category
+    else:
+      self._category = [category]
+    self._component = component
+    self._data = data
+    self._extras = extras
+    self._flags = flags
+    self._package = package
+
+    if self._component and '/' in component:
+      self._package, self._activity = component.split('/', 1)
+    elif self._package and self._activity:
+      self._component = '%s/%s' % (package, activity)
+
+  @property
+  def action(self):
+    return self._action
+
+  @property
+  def activity(self):
+    return self._activity
+
+  @property
+  def category(self):
+    return self._category
+
+  @property
+  def component(self):
+    return self._component
+
+  @property
+  def data(self):
+    return self._data
+
+  @property
+  def extras(self):
+    return self._extras
+
+  @property
+  def flags(self):
+    return self._flags
+
+  @property
+  def package(self):
+    return self._package
+
+  @property
+  def am_args(self):
+    """Returns the intent as a list of arguments for the activity manager.
+
+    For details refer to the specification at:
+    - http://developer.android.com/tools/help/adb.html#IntentSpec
+    """
+    args = []
+    if self.action:
+      args.extend(['-a', self.action])
+    if self.data:
+      args.extend(['-d', self.data])
+    if self.category:
+      args.extend(arg for cat in self.category for arg in ('-c', cat))
+    if self.component:
+      args.extend(['-n', self.component])
+    if self.flags:
+      args.extend(['-f', self.flags])
+    if self.extras:
+      for key, value in self.extras.iteritems():
+        if value is None:
+          args.extend(['--esn', key])
+        elif isinstance(value, str):
+          args.extend(['--es', key, value])
+        elif isinstance(value, bool):
+          args.extend(['--ez', key, str(value)])
+        elif isinstance(value, int):
+          args.extend(['--ei', key, str(value)])
+        elif isinstance(value, float):
+          args.extend(['--ef', key, str(value)])
+        else:
+          raise NotImplementedError(
+              'Intent does not know how to pass %s extras' % type(value))
+    return args
diff --git a/catapult/devil/devil/android/sdk/keyevent.py b/catapult/devil/devil/android/sdk/keyevent.py
new file mode 100644
index 0000000..732a7dc
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/keyevent.py
@@ -0,0 +1,14 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Android KeyEvent constants.
+
+http://developer.android.com/reference/android/view/KeyEvent.html
+"""
+
+KEYCODE_BACK = 4
+KEYCODE_DPAD_RIGHT = 22
+KEYCODE_ENTER = 66
+KEYCODE_MENU = 82
+KEYCODE_APP_SWITCH = 187
diff --git a/catapult/devil/devil/android/sdk/shared_prefs.py b/catapult/devil/devil/android/sdk/shared_prefs.py
new file mode 100644
index 0000000..50ff5c6
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/shared_prefs.py
@@ -0,0 +1,391 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper object to read and modify Shared Preferences from Android apps.
+
+See e.g.:
+  http://developer.android.com/reference/android/content/SharedPreferences.html
+"""
+
+import logging
+import posixpath
+
+from xml.etree import ElementTree
+
+
+_XML_DECLARATION = "<?xml version='1.0' encoding='utf-8' standalone='yes' ?>\n"
+
+
+class BasePref(object):
+  """Base class for getting/setting the value of a specific preference type.
+
+  Should not be instantiated directly. The SharedPrefs collection will
+  instantiate the appropriate subclasses, which directly manipulate the
+  underlying xml document, to parse and serialize values according to their
+  type.
+
+  Args:
+    elem: An xml ElementTree object holding the preference data.
+
+  Properties:
+    tag_name: A string with the tag that must be used for this preference type.
+  """
+  tag_name = None
+
+  def __init__(self, elem):
+    if elem.tag != type(self).tag_name:
+      raise TypeError('Property %r has type %r, but trying to access as %r' %
+                      (elem.get('name'), elem.tag, type(self).tag_name))
+    self._elem = elem
+
+  def __str__(self):
+    """Get the underlying xml element as a string."""
+    return ElementTree.tostring(self._elem)
+
+  def get(self):
+    """Get the value of this preference."""
+    return self._elem.get('value')
+
+  def set(self, value):
+    """Set from a value casted as a string."""
+    self._elem.set('value', str(value))
+
+  @property
+  def has_value(self):
+    """Check whether the element has a value."""
+    return self._elem.get('value') is not None
+
+
+class BooleanPref(BasePref):
+  """Class for getting/setting a preference with a boolean value.
+
+  The underlying xml element has the form, e.g.:
+      <boolean name="featureEnabled" value="false" />
+  """
+  tag_name = 'boolean'
+  VALUES = {'true': True, 'false': False}
+
+  def get(self):
+    """Get the value as a Python bool."""
+    return type(self).VALUES[super(BooleanPref, self).get()]
+
+  def set(self, value):
+    """Set from a value casted as a bool."""
+    super(BooleanPref, self).set('true' if value else 'false')
+
+
+class FloatPref(BasePref):
+  """Class for getting/setting a preference with a float value.
+
+  The underlying xml element has the form, e.g.:
+      <float name="someMetric" value="4.7" />
+  """
+  tag_name = 'float'
+
+  def get(self):
+    """Get the value as a Python float."""
+    return float(super(FloatPref, self).get())
+
+
+class IntPref(BasePref):
+  """Class for getting/setting a preference with an int value.
+
+  The underlying xml element has the form, e.g.:
+      <int name="aCounter" value="1234" />
+  """
+  tag_name = 'int'
+
+  def get(self):
+    """Get the value as a Python int."""
+    return int(super(IntPref, self).get())
+
+
+class LongPref(IntPref):
+  """Class for getting/setting a preference with a long value.
+
+  The underlying xml element has the form, e.g.:
+      <long name="aLongCounter" value="1234" />
+
+  We use the same implementation from IntPref.
+  """
+  tag_name = 'long'
+
+
+class StringPref(BasePref):
+  """Class for getting/setting a preference with a string value.
+
+  The underlying xml element has the form, e.g.:
+      <string name="someHashValue">249b3e5af13d4db2</string>
+  """
+  tag_name = 'string'
+
+  def get(self):
+    """Get the value as a Python string."""
+    return self._elem.text
+
+  def set(self, value):
+    """Set from a value casted as a string."""
+    self._elem.text = str(value)
+
+
+class StringSetPref(StringPref):
+  """Class for getting/setting a preference with a set of string values.
+
+  The underlying xml element has the form, e.g.:
+      <set name="managed_apps">
+          <string>com.mine.app1</string>
+          <string>com.mine.app2</string>
+          <string>com.mine.app3</string>
+      </set>
+  """
+  tag_name = 'set'
+
+  def get(self):
+    """Get a list with the string values contained."""
+    value = []
+    for child in self._elem:
+      assert child.tag == 'string'
+      value.append(child.text)
+    return value
+
+  def set(self, value):
+    """Set from a sequence of values, each casted as a string."""
+    for child in list(self._elem):
+      self._elem.remove(child)
+    for item in value:
+      ElementTree.SubElement(self._elem, 'string').text = str(item)
+
+
+_PREF_TYPES = {c.tag_name: c for c in [BooleanPref, FloatPref, IntPref,
+                                       LongPref, StringPref, StringSetPref]}
+
+
+class SharedPrefs(object):
+
+  def __init__(self, device, package, filename):
+    """Helper object to read and update "Shared Prefs" of Android apps.
+
+    Such files typically look like, e.g.:
+
+        <?xml version='1.0' encoding='utf-8' standalone='yes' ?>
+        <map>
+          <int name="databaseVersion" value="107" />
+          <boolean name="featureEnabled" value="false" />
+          <string name="someHashValue">249b3e5af13d4db2</string>
+        </map>
+
+    Example usage:
+
+        prefs = shared_prefs.SharedPrefs(device, 'com.my.app', 'my_prefs.xml')
+        prefs.Load()
+        prefs.GetString('someHashValue') # => '249b3e5af13d4db2'
+        prefs.SetInt('databaseVersion', 42)
+        prefs.Remove('featureEnabled')
+        prefs.Commit()
+
+    The object may also be used as a context manager to automatically load and
+    commit, respectively, upon entering and leaving the context.
+
+    Args:
+      device: A DeviceUtils object.
+      package: A string with the package name of the app that owns the shared
+        preferences file.
+      filename: A string with the name of the preferences file to read/write.
+    """
+    self._device = device
+    self._xml = None
+    self._package = package
+    self._filename = filename
+    self._path = '/data/data/%s/shared_prefs/%s' % (package, filename)
+    self._changed = False
+
+  def __repr__(self):
+    """Get a useful printable representation of the object."""
+    return '<{cls} file {filename} for {package} on {device}>'.format(
+      cls=type(self).__name__, filename=self.filename, package=self.package,
+      device=str(self._device))
+
+  def __str__(self):
+    """Get the underlying xml document as a string."""
+    return _XML_DECLARATION + ElementTree.tostring(self.xml)
+
+  @property
+  def package(self):
+    """Get the package name of the app that owns the shared preferences."""
+    return self._package
+
+  @property
+  def filename(self):
+    """Get the filename of the shared preferences file."""
+    return self._filename
+
+  @property
+  def path(self):
+    """Get the full path to the shared preferences file on the device."""
+    return self._path
+
+  @property
+  def changed(self):
+    """True if properties have changed and a commit would be needed."""
+    return self._changed
+
+  @property
+  def xml(self):
+    """Get the underlying xml document as an ElementTree object."""
+    if self._xml is None:
+      self._xml = ElementTree.Element('map')
+    return self._xml
+
+  def Load(self):
+    """Load the shared preferences file from the device.
+
+    A empty xml document, which may be modified and saved on |commit|, is
+    created if the file does not already exist.
+    """
+    if self._device.FileExists(self.path):
+      self._xml = ElementTree.fromstring(
+          self._device.ReadFile(self.path, as_root=True))
+      assert self._xml.tag == 'map'
+    else:
+      self._xml = None
+    self._changed = False
+
+  def Clear(self):
+    """Clear all of the preferences contained in this object."""
+    if self._xml is not None and len(self):  # only clear if not already empty
+      self._xml = None
+      self._changed = True
+
+  def Commit(self):
+    """Save the current set of preferences to the device.
+
+    Only actually saves if some preferences have been modified.
+    """
+    if not self.changed:
+      return
+    self._device.RunShellCommand(
+        ['mkdir', '-p', posixpath.dirname(self.path)],
+        as_root=True, check_return=True)
+    self._device.WriteFile(self.path, str(self), as_root=True)
+    self._device.KillAll(self.package, exact=True, as_root=True, quiet=True)
+    self._changed = False
+
+  def __len__(self):
+    """Get the number of preferences in this collection."""
+    return len(self.xml)
+
+  def PropertyType(self, key):
+    """Get the type (i.e. tag name) of a property in the collection."""
+    return self._GetChild(key).tag
+
+  def HasProperty(self, key):
+    try:
+      self._GetChild(key)
+      return True
+    except KeyError:
+      return False
+
+  def GetBoolean(self, key):
+    """Get a boolean property."""
+    return BooleanPref(self._GetChild(key)).get()
+
+  def SetBoolean(self, key, value):
+    """Set a boolean property."""
+    self._SetPrefValue(key, value, BooleanPref)
+
+  def GetFloat(self, key):
+    """Get a float property."""
+    return FloatPref(self._GetChild(key)).get()
+
+  def SetFloat(self, key, value):
+    """Set a float property."""
+    self._SetPrefValue(key, value, FloatPref)
+
+  def GetInt(self, key):
+    """Get an int property."""
+    return IntPref(self._GetChild(key)).get()
+
+  def SetInt(self, key, value):
+    """Set an int property."""
+    self._SetPrefValue(key, value, IntPref)
+
+  def GetLong(self, key):
+    """Get a long property."""
+    return LongPref(self._GetChild(key)).get()
+
+  def SetLong(self, key, value):
+    """Set a long property."""
+    self._SetPrefValue(key, value, LongPref)
+
+  def GetString(self, key):
+    """Get a string property."""
+    return StringPref(self._GetChild(key)).get()
+
+  def SetString(self, key, value):
+    """Set a string property."""
+    self._SetPrefValue(key, value, StringPref)
+
+  def GetStringSet(self, key):
+    """Get a string set property."""
+    return StringSetPref(self._GetChild(key)).get()
+
+  def SetStringSet(self, key, value):
+    """Set a string set property."""
+    self._SetPrefValue(key, value, StringSetPref)
+
+  def Remove(self, key):
+    """Remove a preference from the collection."""
+    self.xml.remove(self._GetChild(key))
+
+  def AsDict(self):
+    """Return the properties and their values as a dictionary."""
+    d = {}
+    for child in self.xml:
+      pref = _PREF_TYPES[child.tag](child)
+      d[child.get('name')] = pref.get()
+    return d
+
+  def __enter__(self):
+    """Load preferences file from the device when entering a context."""
+    self.Load()
+    return self
+
+  def __exit__(self, exc_type, _exc_value, _traceback):
+    """Save preferences file to the device when leaving a context."""
+    if not exc_type:
+      self.Commit()
+
+  def _GetChild(self, key):
+    """Get the underlying xml node that holds the property of a given key.
+
+    Raises:
+      KeyError when the key is not found in the collection.
+    """
+    for child in self.xml:
+      if child.get('name') == key:
+        return child
+    raise KeyError(key)
+
+  def _SetPrefValue(self, key, value, pref_cls):
+    """Set the value of a property.
+
+    Args:
+      key: The key of the property to set.
+      value: The new value of the property.
+      pref_cls: A subclass of BasePref used to access the property.
+
+    Raises:
+      TypeError when the key already exists but with a different type.
+    """
+    try:
+      pref = pref_cls(self._GetChild(key))
+      old_value = pref.get()
+    except KeyError:
+      pref = pref_cls(ElementTree.SubElement(
+          self.xml, pref_cls.tag_name, {'name': key}))
+      old_value = None
+    if old_value != value:
+      pref.set(value)
+      self._changed = True
+      logging.info('Setting property: %s', pref)
diff --git a/catapult/devil/devil/android/sdk/shared_prefs_test.py b/catapult/devil/devil/android/sdk/shared_prefs_test.py
new file mode 100755
index 0000000..ff3b9a1
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/shared_prefs_test.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of shared_prefs.py (mostly SharedPrefs).
+"""
+
+import logging
+import unittest
+
+from devil import devil_env
+from devil.android import device_utils
+from devil.android.sdk import shared_prefs
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+def MockDeviceWithFiles(files=None):
+  if files is None:
+    files = {}
+
+  def file_exists(path):
+    return path in files
+
+  def write_file(path, contents, **_kwargs):
+    files[path] = contents
+
+  def read_file(path, **_kwargs):
+    return files[path]
+
+  device = mock.MagicMock(spec=device_utils.DeviceUtils)
+  device.FileExists = mock.Mock(side_effect=file_exists)
+  device.WriteFile = mock.Mock(side_effect=write_file)
+  device.ReadFile = mock.Mock(side_effect=read_file)
+  return device
+
+
+class SharedPrefsTest(unittest.TestCase):
+
+  def setUp(self):
+    self.device = MockDeviceWithFiles({
+      '/data/data/com.some.package/shared_prefs/prefs.xml':
+          "<?xml version='1.0' encoding='utf-8' standalone='yes' ?>\n"
+          '<map>\n'
+          '  <int name="databaseVersion" value="107" />\n'
+          '  <boolean name="featureEnabled" value="false" />\n'
+          '  <string name="someHashValue">249b3e5af13d4db2</string>\n'
+          '</map>'})
+    self.expected_data = {'databaseVersion': 107,
+                          'featureEnabled': False,
+                          'someHashValue': '249b3e5af13d4db2'}
+
+  def testPropertyLifetime(self):
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml')
+    self.assertEquals(len(prefs), 0)  # collection is empty before loading
+    prefs.SetInt('myValue', 444)
+    self.assertEquals(len(prefs), 1)
+    self.assertEquals(prefs.GetInt('myValue'), 444)
+    self.assertTrue(prefs.HasProperty('myValue'))
+    prefs.Remove('myValue')
+    self.assertEquals(len(prefs), 0)
+    self.assertFalse(prefs.HasProperty('myValue'))
+    with self.assertRaises(KeyError):
+      prefs.GetInt('myValue')
+
+  def testPropertyType(self):
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml')
+    prefs.SetInt('myValue', 444)
+    self.assertEquals(prefs.PropertyType('myValue'), 'int')
+    with self.assertRaises(TypeError):
+      prefs.GetString('myValue')
+    with self.assertRaises(TypeError):
+      prefs.SetString('myValue', 'hello')
+
+  def testLoad(self):
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml')
+    self.assertEquals(len(prefs), 0)  # collection is empty before loading
+    prefs.Load()
+    self.assertEquals(len(prefs), len(self.expected_data))
+    self.assertEquals(prefs.AsDict(), self.expected_data)
+    self.assertFalse(prefs.changed)
+
+  def testClear(self):
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml')
+    prefs.Load()
+    self.assertEquals(prefs.AsDict(), self.expected_data)
+    self.assertFalse(prefs.changed)
+    prefs.Clear()
+    self.assertEquals(len(prefs), 0)  # collection is empty now
+    self.assertTrue(prefs.changed)
+
+  def testCommit(self):
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'other_prefs.xml')
+    self.assertFalse(self.device.FileExists(prefs.path))  # file does not exist
+    prefs.Load()
+    self.assertEquals(len(prefs), 0)  # file did not exist, collection is empty
+    prefs.SetInt('magicNumber', 42)
+    prefs.SetFloat('myMetric', 3.14)
+    prefs.SetLong('bigNumner', 6000000000)
+    prefs.SetStringSet('apps', ['gmail', 'chrome', 'music'])
+    self.assertFalse(self.device.FileExists(prefs.path))  # still does not exist
+    self.assertTrue(prefs.changed)
+    prefs.Commit()
+    self.assertTrue(self.device.FileExists(prefs.path))  # should exist now
+    self.device.KillAll.assert_called_once_with(prefs.package, exact=True,
+                                                as_root=True, quiet=True)
+    self.assertFalse(prefs.changed)
+
+    prefs = shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'other_prefs.xml')
+    self.assertEquals(len(prefs), 0)  # collection is empty before loading
+    prefs.Load()
+    self.assertEquals(prefs.AsDict(), {
+        'magicNumber': 42,
+        'myMetric': 3.14,
+        'bigNumner': 6000000000,
+        'apps': ['gmail', 'chrome', 'music']})  # data survived roundtrip
+
+  def testAsContextManager_onlyReads(self):
+    with shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml') as prefs:
+      self.assertEquals(prefs.AsDict(), self.expected_data)  # loaded and ready
+    self.assertEquals(self.device.WriteFile.call_args_list, [])  # did not write
+
+  def testAsContextManager_readAndWrite(self):
+    with shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml') as prefs:
+      prefs.SetBoolean('featureEnabled', True)
+      prefs.Remove('someHashValue')
+      prefs.SetString('newString', 'hello')
+
+    self.assertTrue(self.device.WriteFile.called)  # did write
+    with shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml') as prefs:
+      # changes persisted
+      self.assertTrue(prefs.GetBoolean('featureEnabled'))
+      self.assertFalse(prefs.HasProperty('someHashValue'))
+      self.assertEquals(prefs.GetString('newString'), 'hello')
+      self.assertTrue(prefs.HasProperty('databaseVersion'))  # still there
+
+  def testAsContextManager_commitAborted(self):
+    with self.assertRaises(TypeError):
+      with shared_prefs.SharedPrefs(
+          self.device, 'com.some.package', 'prefs.xml') as prefs:
+        prefs.SetBoolean('featureEnabled', True)
+        prefs.Remove('someHashValue')
+        prefs.SetString('newString', 'hello')
+        prefs.SetInt('newString', 123)  # oops!
+
+    self.assertEquals(self.device.WriteFile.call_args_list, [])  # did not write
+    with shared_prefs.SharedPrefs(
+        self.device, 'com.some.package', 'prefs.xml') as prefs:
+      # contents were not modified
+      self.assertEquals(prefs.AsDict(), self.expected_data)
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/android/sdk/split_select.py b/catapult/devil/devil/android/sdk/split_select.py
new file mode 100644
index 0000000..6c3d231
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/split_select.py
@@ -0,0 +1,63 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module wraps Android's split-select tool."""
+
+from devil.android.sdk import build_tools
+from devil.utils import cmd_helper
+from devil.utils import lazy
+
+
+_split_select_path = lazy.WeakConstant(
+    lambda: build_tools.GetPath('split-select'))
+
+
+def _RunSplitSelectCmd(args):
+  """Runs a split-select command.
+
+  Args:
+    args: A list of arguments for split-select.
+
+  Returns:
+    The output of the command.
+  """
+  cmd = [_split_select_path.read()] + args
+  status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
+  if status != 0:
+    raise Exception('Failed running command "%s" with output "%s".' %
+                    (' '.join(cmd), output))
+  return output
+
+
+def _SplitConfig(device, allow_cached_props=False):
+  """Returns a config specifying which APK splits are required by the device.
+
+  Args:
+    device: A DeviceUtils object.
+    allow_cached_props: Whether to use cached values for device properties.
+  """
+  return ('%s-r%s-%s:%s' %
+          (device.GetLanguage(cache=allow_cached_props),
+           device.GetCountry(cache=allow_cached_props),
+           device.screen_density,
+           device.product_cpu_abi))
+
+
+def SelectSplits(device, base_apk, split_apks, allow_cached_props=False):
+  """Determines which APK splits the device requires.
+
+  Args:
+    device: A DeviceUtils object.
+    base_apk: The path of the base APK.
+    split_apks: A list of paths of APK splits.
+    allow_cached_props: Whether to use cached values for device properties.
+
+  Returns:
+    The list of APK splits that the device requires.
+  """
+  config = _SplitConfig(device, allow_cached_props=allow_cached_props)
+  args = ['--target', config, '--base', base_apk]
+  for split in split_apks:
+    args.extend(['--split', split])
+  return _RunSplitSelectCmd(args).splitlines()
diff --git a/catapult/devil/devil/android/sdk/version_codes.py b/catapult/devil/devil/android/sdk/version_codes.py
new file mode 100644
index 0000000..410379b
--- /dev/null
+++ b/catapult/devil/devil/android/sdk/version_codes.py
@@ -0,0 +1,18 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Android SDK version codes.
+
+http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
+"""
+
+JELLY_BEAN = 16
+JELLY_BEAN_MR1 = 17
+JELLY_BEAN_MR2 = 18
+KITKAT = 19
+KITKAT_WATCH = 20
+LOLLIPOP = 21
+LOLLIPOP_MR1 = 22
+MARSHMALLOW = 23
+
diff --git a/catapult/devil/devil/android/tools/__init__.py b/catapult/devil/devil/android/tools/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/android/tools/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/android/tools/adb_run_shell_cmd.py b/catapult/devil/devil/android/tools/adb_run_shell_cmd.py
new file mode 100755
index 0000000..f995d27
--- /dev/null
+++ b/catapult/devil/devil/android/tools/adb_run_shell_cmd.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import json
+import logging
+import sys
+
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import run_tests_helper
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      'Run an adb shell command on selected devices')
+  parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
+  parser.add_argument('-d', '--device', action='append', dest='devices',
+                      help='Device to run cmd on. Runs on all devices if not '
+                           'specified. Set multiple times for multiple devices')
+  parser.add_argument('-v', '--verbose', default=0, action='count',
+                      help='Verbose level (multiple times for more)')
+  parser.add_argument('--blacklist-file', help='Device blacklist file.')
+  parser.add_argument('--as-root', action='store_true', help='Run as root.')
+  parser.add_argument('--json-output',
+                      help='File to dump json output to.')
+  args = parser.parse_args()
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  args.blacklist_file = device_blacklist.Blacklist(
+      args.blacklist_file) if args.blacklist_file else None
+  attached_devices = device_utils.DeviceUtils.HealthyDevices(
+      blacklist=args.blacklist_file)
+
+  if args.devices:
+    selected_devices = []
+    attached_devices = {str(d): d for d in attached_devices}
+    for serial in args.devices:
+      if serial in attached_devices:
+        selected_devices.append(attached_devices[serial])
+      else:
+        logging.warning('Specified device %s not found.', serial)
+  else:
+    selected_devices = attached_devices
+
+  if not selected_devices:
+    raise device_errors.NoDevicesError
+
+  p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
+      args.cmd, large_output=True, as_root=args.as_root, check_return=True)
+      .pGet(None))
+
+  data = {}
+  for device, output in zip(selected_devices, p_out):
+    for line in output:
+      print '%s: %s' % (device, line)
+    data[str(device)] = output
+
+  if args.json_output:
+    with open(args.json_output, 'w') as f:
+      json.dump(data, f)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/devil/devil/android/tools/flash_device.py b/catapult/devil/devil/android/tools/flash_device.py
new file mode 100755
index 0000000..50ed696
--- /dev/null
+++ b/catapult/devil/devil/android/tools/flash_device.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import logging
+import os
+import sys
+
+if __name__ == '__main__':
+  sys.path.append(os.path.abspath(os.path.join(
+      os.path.dirname(__file__), '..', '..', '..')))
+from devil.android import device_blacklist
+from devil.android import device_utils
+from devil.android import fastboot_utils
+from devil.android.tools import script_common
+from devil.constants import exit_codes
+from devil.utils import run_tests_helper
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('build_path', help='Path to android build.')
+  parser.add_argument('-d', '--device', dest='devices', action='append',
+                      help='Device(s) to flash.')
+  parser.add_argument('-v', '--verbose', default=0, action='count',
+                      help='Verbose level (multiple times for more)')
+  parser.add_argument('-w', '--wipe', action='store_true',
+                       help='If set, wipes user data')
+  parser.add_argument('--blacklist-file', help='Device blacklist file.')
+  args = parser.parse_args()
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  if args.blacklist_file:
+    blacklist = device_blacklist.Blacklist(args.blacklist_file).Read()
+    if blacklist:
+      logging.critical('Device(s) in blacklist, not flashing devices:')
+      for key in blacklist:
+        logging.critical('  %s', key)
+      return exit_codes.INFRA
+
+  flashed_devices = []
+  failed_devices = []
+
+  def flash(device):
+    fastboot = fastboot_utils.FastbootUtils(device)
+    try:
+      fastboot.FlashDevice(args.build_path, wipe=args.wipe)
+      flashed_devices.append(device)
+    except Exception:  # pylint: disable=broad-except
+      logging.exception('Device %s failed to flash.', str(device))
+      failed_devices.append(device)
+
+  devices = script_common.GetDevices(args.devices, args.blacklist_file)
+  device_utils.DeviceUtils.parallel(devices).pMap(flash)
+
+  if flashed_devices:
+    logging.info('The following devices were flashed:')
+    logging.info('  %s', ' '.join(str(d) for d in flashed_devices))
+  if failed_devices:
+    logging.critical('The following devices failed to flash:')
+    logging.critical('  %s', ' '.join(str(d) for d in failed_devices))
+    return exit_codes.INFRA
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/devil/devil/android/tools/screenshot.py b/catapult/devil/devil/android/tools/screenshot.py
new file mode 100755
index 0000000..326bb16
--- /dev/null
+++ b/catapult/devil/devil/android/tools/screenshot.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Takes a screenshot from an Android device."""
+
+import argparse
+import logging
+import os
+import sys
+
+if __name__ == '__main__':
+  sys.path.append(os.path.abspath(os.path.join(
+      os.path.dirname(__file__), '..', '..', '..')))
+from devil.android import device_utils
+from devil.android.tools import script_common
+
+
+def main():
+  # Parse options.
+  parser = argparse.ArgumentParser(description=__doc__)
+  parser.add_argument('-d', '--device', dest='devices', action='append',
+                      help='Serial number of Android device to use.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_argument('-f', '--file', metavar='FILE',
+                      help='Save result to file instead of generating a '
+                           'timestamped file name.')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Verbose logging.')
+  parser.add_argument('host_file', nargs='?',
+                      help='File to which the screenshot will be saved.')
+
+  args = parser.parse_args()
+
+  host_file = args.host_file or args.file
+
+  if args.verbose:
+    logging.getLogger().setLevel(logging.DEBUG)
+
+  devices = script_common.GetDevices(args.devices, args.blacklist_file)
+
+  def screenshot(device):
+    f = None
+    if host_file:
+      root, ext = os.path.splitext(host_file)
+      f = '%s_%s%s' % (root, str(device), ext)
+    f = device.TakeScreenshot(f)
+    print 'Screenshot for device %s written to %s' % (
+        str(device), os.path.abspath(f))
+
+  device_utils.DeviceUtils.parallel(devices).pMap(screenshot)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/devil/devil/android/tools/script_common.py b/catapult/devil/devil/android/tools/script_common.py
new file mode 100644
index 0000000..eb91cdc
--- /dev/null
+++ b/catapult/devil/devil/android/tools/script_common.py
@@ -0,0 +1,28 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+
+
+def GetDevices(requested_devices, blacklist_file):
+  blacklist = (device_blacklist.Blacklist(blacklist_file)
+               if blacklist_file
+               else None)
+
+  devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
+  if not devices:
+    raise device_errors.NoDevicesError()
+  elif requested_devices:
+    requested = set(requested_devices)
+    available = set(str(d) for d in devices)
+    missing = requested.difference(available)
+    if missing:
+      raise device_errors.DeviceUnreachableError(next(iter(missing)))
+    return sorted(device_utils.DeviceUtils(d)
+                  for d in available.intersection(requested))
+  else:
+    return devices
+
diff --git a/catapult/devil/devil/android/tools/script_common_test.py b/catapult/devil/devil/android/tools/script_common_test.py
new file mode 100755
index 0000000..a226764
--- /dev/null
+++ b/catapult/devil/devil/android/tools/script_common_test.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import sys
+import unittest
+
+from devil import devil_env
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.tools import script_common
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+class ScriptCommonTest(unittest.TestCase):
+
+  def testGetDevices_noSpecs(self):
+    devices = [
+        device_utils.DeviceUtils('123'),
+        device_utils.DeviceUtils('456'),
+    ]
+    with mock.patch('devil.android.device_utils.DeviceUtils.HealthyDevices',
+                    return_value=devices):
+      self.assertEquals(
+          devices,
+          script_common.GetDevices(None, None))
+
+  def testGetDevices_withDevices(self):
+    devices = [
+        device_utils.DeviceUtils('123'),
+        device_utils.DeviceUtils('456'),
+    ]
+    with mock.patch('devil.android.device_utils.DeviceUtils.HealthyDevices',
+                    return_value=devices):
+      self.assertEquals(
+          [device_utils.DeviceUtils('456')],
+          script_common.GetDevices(['456'], None))
+
+  def testGetDevices_missingDevice(self):
+    with mock.patch('devil.android.device_utils.DeviceUtils.HealthyDevices',
+                    return_value=[device_utils.DeviceUtils('123')]):
+      with self.assertRaises(device_errors.DeviceUnreachableError):
+        script_common.GetDevices(['456'], None)
+
+  def testGetDevices_noDevices(self):
+    with mock.patch('devil.android.device_utils.DeviceUtils.HealthyDevices',
+                    return_value=[]):
+      with self.assertRaises(device_errors.NoDevicesError):
+        script_common.GetDevices(None, None)
+
+
+if __name__ == '__main__':
+  sys.exit(unittest.main())
+
diff --git a/catapult/devil/devil/android/tools/video_recorder.py b/catapult/devil/devil/android/tools/video_recorder.py
new file mode 100755
index 0000000..bcc9a75
--- /dev/null
+++ b/catapult/devil/devil/android/tools/video_recorder.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Captures a video from an Android device."""
+
+import argparse
+import logging
+import os
+import threading
+import time
+import sys
+
+if __name__ == '__main__':
+  sys.path.append(os.path.abspath(os.path.join(
+      os.path.dirname(__file__), '..', '..', '..')))
+from devil.android import device_signal
+from devil.android import device_utils
+from devil.android.tools import script_common
+from devil.utils import cmd_helper
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+
+
+class VideoRecorder(object):
+  """Records a screen capture video from an Android Device (KitKat or newer)."""
+
+  def __init__(self, device, megabits_per_second=4, size=None,
+               rotate=False):
+    """Creates a VideoRecorder instance.
+
+    Args:
+      device: DeviceUtils instance.
+      host_file: Path to the video file to store on the host.
+      megabits_per_second: Video bitrate in megabits per second. Allowed range
+                           from 0.1 to 100 mbps.
+      size: Video frame size tuple (width, height) or None to use the device
+            default.
+      rotate: If True, the video will be rotated 90 degrees.
+    """
+    self._bit_rate = megabits_per_second * 1000 * 1000
+    self._device = device
+    self._device_file = (
+        '%s/screen-recording.mp4' % device.GetExternalStoragePath())
+    self._recorder_thread = None
+    self._rotate = rotate
+    self._size = size
+    self._started = threading.Event()
+
+  def __enter__(self):
+    self.Start()
+
+  def Start(self, timeout=None):
+    """Start recording video."""
+    def screenrecord_started():
+      return bool(self._device.GetPids('screenrecord'))
+
+    if screenrecord_started():
+      raise Exception("Can't run multiple concurrent video captures.")
+
+    self._started.clear()
+    self._recorder_thread = reraiser_thread.ReraiserThread(self._Record)
+    self._recorder_thread.start()
+    timeout_retry.WaitFor(
+        screenrecord_started, wait_period=1, max_tries=timeout)
+    self._started.wait(timeout)
+
+  def _Record(self):
+    cmd = ['screenrecord', '--verbose', '--bit-rate', str(self._bit_rate)]
+    if self._rotate:
+      cmd += ['--rotate']
+    if self._size:
+      cmd += ['--size', '%dx%d' % self._size]
+    cmd += [self._device_file]
+    for line in self._device.adb.IterShell(
+        ' '.join(cmd_helper.SingleQuote(i) for i in cmd), None):
+      if line.startswith('Content area is '):
+        self._started.set()
+
+  def __exit__(self, _exc_type, _exc_value, _traceback):
+    self.Stop()
+
+  def Stop(self):
+    """Stop recording video."""
+    if not self._device.KillAll('screenrecord', signum=device_signal.SIGINT,
+                                quiet=True):
+      logging.warning('Nothing to kill: screenrecord was not running')
+    self._recorder_thread.join()
+
+  def Pull(self, host_file=None):
+    """Pull resulting video file from the device.
+
+    Args:
+      host_file: Path to the video file to store on the host.
+    Returns:
+      Output video file name on the host.
+    """
+    # TODO(jbudorick): Merge filename generation with the logic for doing so in
+    # DeviceUtils.
+    host_file_name = (
+        host_file
+        or 'screen-recording-%s-%s.mp4' % (
+            str(self._device),
+            time.strftime('%Y%m%dT%H%M%S', time.localtime())))
+    host_file_name = os.path.abspath(host_file_name)
+    self._device.PullFile(self._device_file, host_file_name)
+    self._device.RunShellCommand('rm -f "%s"' % self._device_file)
+    return host_file_name
+
+
+def main():
+  # Parse options.
+  parser = argparse.ArgumentParser(description=__doc__)
+  parser.add_argument('-d', '--device', dest='devices', action='append',
+                      help='Serial number of Android device to use.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_argument('-f', '--file', metavar='FILE',
+                      help='Save result to file instead of generating a '
+                           'timestamped file name.')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Verbose logging.')
+  parser.add_argument('-b', '--bitrate', default=4, type=float,
+                      help='Bitrate in megabits/s, from 0.1 to 100 mbps, '
+                           '%default mbps by default.')
+  parser.add_argument('-r', '--rotate', action='store_true',
+                      help='Rotate video by 90 degrees.')
+  parser.add_argument('-s', '--size', metavar='WIDTHxHEIGHT',
+                      help='Frame size to use instead of the device '
+                           'screen size.')
+  parser.add_argument('host_file', nargs='?',
+                      help='File to which the video capture will be written.')
+
+  args = parser.parse_args()
+
+  host_file = args.host_file or args.file
+
+  if args.verbose:
+    logging.getLogger().setLevel(logging.DEBUG)
+
+  size = (tuple(int(i) for i in args.size.split('x'))
+          if args.size
+          else None)
+
+  def record_video(device, stop_recording):
+    recorder = VideoRecorder(
+        device, megabits_per_second=args.bitrate, size=size, rotate=args.rotate)
+    with recorder:
+      stop_recording.wait()
+
+    f = None
+    if host_file:
+      root, ext = os.path.splitext(host_file)
+      f = '%s_%s%s' % (root, str(device), ext)
+    f = recorder.Pull(f)
+    print 'Video written to %s' % os.path.abspath(f)
+
+  parallel_devices = device_utils.DeviceUtils.parallel(
+      script_common.GetDevices(args.devices, args.blacklist_file),
+      async=True)
+  stop_recording = threading.Event()
+  running_recording = parallel_devices.pMap(record_video, stop_recording)
+  print 'Recording. Press Enter to stop.',
+  sys.stdout.flush()
+  raw_input()
+  stop_recording.set()
+
+  running_recording.pGet(None)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/devil/devil/android/valgrind_tools/__init__.py b/catapult/devil/devil/android/valgrind_tools/__init__.py
new file mode 100644
index 0000000..0182d4c
--- /dev/null
+++ b/catapult/devil/devil/android/valgrind_tools/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Classes in this package define additional actions that need to be taken to run a
+test under some kind of runtime error detection tool.
+
+The interface is intended to be used as follows.
+
+1. For tests that simply run a native process (i.e. no activity is spawned):
+
+Call tool.CopyFiles(device).
+Prepend test command line with tool.GetTestWrapper().
+
+2. For tests that spawn an activity:
+
+Call tool.CopyFiles(device).
+Call tool.SetupEnvironment().
+Run the test as usual.
+Call tool.CleanUpEnvironment().
+"""
diff --git a/catapult/devil/devil/android/valgrind_tools/base_tool.py b/catapult/devil/devil/android/valgrind_tools/base_tool.py
new file mode 100644
index 0000000..2e6e9af
--- /dev/null
+++ b/catapult/devil/devil/android/valgrind_tools/base_tool.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class BaseTool(object):
+  """A tool that does nothing."""
+  # pylint: disable=R0201
+
+  def __init__(self):
+    """Does nothing."""
+    pass
+
+  def GetTestWrapper(self):
+    """Returns a string that is to be prepended to the test command line."""
+    return ''
+
+  def GetUtilWrapper(self):
+    """Returns the wrapper name for the utilities.
+
+    Returns:
+       A string that is to be prepended to the command line of utility
+    processes (forwarder, etc.).
+    """
+    return ''
+
+  @classmethod
+  def CopyFiles(cls, device):
+    """Copies tool-specific files to the device, create directories, etc."""
+    pass
+
+  def SetupEnvironment(self):
+    """Sets up the system environment for a test.
+
+    This is a good place to set system properties.
+    """
+    pass
+
+  def CleanUpEnvironment(self):
+    """Cleans up environment."""
+    pass
+
+  def GetTimeoutScale(self):
+    """Returns a multiplier that should be applied to timeout values."""
+    return 1.0
+
+  def NeedsDebugInfo(self):
+    """Whether this tool requires debug info.
+
+    Returns:
+      True if this tool can not work with stripped binaries.
+    """
+    return False
diff --git a/catapult/devil/devil/base_error.py b/catapult/devil/devil/base_error.py
new file mode 100644
index 0000000..dadf4da
--- /dev/null
+++ b/catapult/devil/devil/base_error.py
@@ -0,0 +1,17 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class BaseError(Exception):
+  """Base error for all test runner errors."""
+
+  def __init__(self, message, is_infra_error=False):
+    super(BaseError, self).__init__(message)
+    self._is_infra_error = is_infra_error
+
+  @property
+  def is_infra_error(self):
+    """Property to indicate if error was caused by an infrastructure issue."""
+    return self._is_infra_error
+
diff --git a/catapult/devil/devil/constants/__init__.py b/catapult/devil/devil/constants/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/constants/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/constants/exit_codes.py b/catapult/devil/devil/constants/exit_codes.py
new file mode 100644
index 0000000..aaeca4a
--- /dev/null
+++ b/catapult/devil/devil/constants/exit_codes.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common exit codes used by devil."""
+
+ERROR = 1
+INFRA = 87
+WARNING = 88
diff --git a/catapult/devil/devil/devil_dependencies.json b/catapult/devil/devil/devil_dependencies.json
new file mode 100644
index 0000000..4c185c0
--- /dev/null
+++ b/catapult/devil/devil/devil_dependencies.json
@@ -0,0 +1,117 @@
+{
+  "config_type": "BaseConfig",
+  "dependencies": {
+    "aapt": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "7448de3cb5e834afdedeaad8b40ba63ac53f3dc4",
+          "download_path": "../bin/deps/linux2/x86_64/bin/aapt"
+        }
+      }
+    },
+    "adb": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "0c2043552619c8ec8bb5d986ba75703a598611fc",
+          "download_path": "../bin/deps/linux2/x86_64/bin/adb"
+        }
+      }
+    },
+    "android_build_tools_libc++": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "52d150a7ccde835f38b4337392152f3013d5f303",
+          "download_path": "../bin/deps/linux2/x86_64/lib/libc++.so"
+        }
+      }
+    },
+    "chromium_commands": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "049f482f29bc34e2ed844e2e47b7609f8ffbeb4f",
+          "download_path": "../bin/deps/linux2/x86_64/lib.java/chromium_commands.dex.jar"
+        }
+      }
+    },
+    "dexdump": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "38765b5b358c29003e56b1d214606ea13467b6fe",
+          "download_path": "../bin/deps/linux2/x86_64/bin/dexdump"
+        }
+      }
+    },
+    "forwarder_device": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "4858c9e41da72ad8ff24414731feae2137229361",
+          "download_path": "../bin/deps/android/armeabi-v7a/bin/forwarder_device"
+        },
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "8cbd1ac2079ee82ce5f1cf4d3e85fc1e53a8f018",
+          "download_path": "../bin/deps/android/arm64-v8a/bin/forwarder_device"
+        }
+      }
+    },
+    "forwarder_host": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "b3dda9fbdd4a3fb933b64111c11070aa809c7ed4",
+          "download_path": "../bin/deps/linux2/x86_64/forwarder_host"
+        }
+      }
+    },
+    "md5sum_device": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "c8894480be71d5e49118483d83ba7a6e0097cba6",
+          "download_path": "../bin/deps/android/armeabi-v7a/bin/md5sum_device"
+        },
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "bbe410e2ffb48367ac4ca0874598d4f85fd16d9d",
+          "download_path": "../bin/deps/andorid/arm64-v8a/bin/md5sum_device"
+        },
+        "android_x86": {
+          "cloud_storage_hash": "b578a5c2c400ce39761e2558cdf2237567a57257",
+          "download_path": "../bin/deps/android/x86/bin/md5sum_device"
+        }
+      }
+    },
+    "md5sum_host": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "49e36c9c4246cfebef26cbd07436c1a8343254aa",
+          "download_path": "../bin/deps/linux2/x86_64/bin/md5sum_host"
+        }
+      }
+    },
+    "split-select": {
+      "cloud_storage_bucket": "chromium-telemetry",
+      "cloud_storage_base_folder": "binary_dependencies",
+      "file_info": {
+        "linux2_x86_64": {
+          "cloud_storage_hash": "3327881fa3951a503b9467425ea8e781cdffeb9f",
+          "download_path": "../bin/deps/linux2/x86_64/bin/split-select"
+        }
+      }
+    }
+  }
+}
diff --git a/catapult/devil/devil/devil_env.py b/catapult/devil/devil/devil_env.py
new file mode 100644
index 0000000..b54e6f5
--- /dev/null
+++ b/catapult/devil/devil/devil_env.py
@@ -0,0 +1,146 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import json
+import os
+import platform
+import sys
+import tempfile
+import threading
+
+CATAPULT_ROOT_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '..', '..'))
+DEPENDENCY_MANAGER_PATH = os.path.join(
+    CATAPULT_ROOT_PATH, 'dependency_manager')
+PYMOCK_PATH = os.path.join(
+    CATAPULT_ROOT_PATH, 'third_party', 'mock')
+
+
+@contextlib.contextmanager
+def SysPath(path):
+  sys.path.append(path)
+  yield
+  if sys.path[-1] != path:
+    sys.path.remove(path)
+  else:
+    sys.path.pop()
+
+with SysPath(DEPENDENCY_MANAGER_PATH):
+  import dependency_manager  # pylint: disable=import-error
+
+_ANDROID_BUILD_TOOLS = {'aapt', 'dexdump', 'split-select'}
+
+_DEVIL_DEFAULT_CONFIG = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), 'devil_dependencies.json'))
+
+_LEGACY_ENVIRONMENT_VARIABLES = {
+  'ADB_PATH': {
+    'dependency_name': 'adb',
+    'platform': 'linux2_x86_64',
+  },
+  'ANDROID_SDK_ROOT': {
+    'dependency_name': 'android_sdk',
+    'platform': 'linux2_x86_64',
+  },
+}
+
+
+def _GetEnvironmentVariableConfig():
+  path_config = (
+      (os.environ.get(k), v)
+      for k, v in _LEGACY_ENVIRONMENT_VARIABLES.iteritems())
+  return {
+    'config_type': 'BaseConfig',
+    'dependencies': {
+      c['dependency_name']: {
+        'file_info': {
+          c['platform']: {
+            'local_paths': [p],
+          },
+        },
+      } for p, c in path_config if p
+    },
+  }
+
+
+class _Environment(object):
+
+  def __init__(self):
+    self._dm_init_lock = threading.Lock()
+    self._dm = None
+
+  def Initialize(self, configs=None, config_files=None):
+    """Initialize devil's environment from configuration files.
+
+    This uses all configurations provided via |configs| and |config_files|
+    to determine the locations of devil's dependencies. Configurations should
+    all take the form described by catapult_base.dependency_manager.BaseConfig.
+    If no configurations are provided, a default one will be used if available.
+
+    Args:
+      configs: An optional list of dict configurations.
+      config_files: An optional list of files to load
+    """
+
+    # Make sure we only initialize self._dm once.
+    with self._dm_init_lock:
+      if self._dm is None:
+        if configs is None:
+          configs = []
+
+        env_config = _GetEnvironmentVariableConfig()
+        if env_config:
+          configs.insert(0, env_config)
+        self._InitializeRecursive(
+            configs=configs,
+            config_files=config_files)
+        assert self._dm is not None, 'Failed to create dependency manager.'
+
+  def _InitializeRecursive(self, configs=None, config_files=None):
+    # This recurses through configs to create temporary files for each and
+    # take advantage of context managers to appropriately close those files.
+    # TODO(jbudorick): Remove this recursion if/when dependency_manager
+    # supports loading configurations directly from a dict.
+    if configs:
+      with tempfile.NamedTemporaryFile(delete=False) as next_config_file:
+        try:
+          next_config_file.write(json.dumps(configs[0]))
+          next_config_file.close()
+          self._InitializeRecursive(
+              configs=configs[1:],
+              config_files=[next_config_file.name] + (config_files or []))
+        finally:
+          if os.path.exists(next_config_file.name):
+            os.remove(next_config_file.name)
+    else:
+      config_files = config_files or []
+      if 'DEVIL_ENV_CONFIG' in os.environ:
+        config_files.append(os.environ.get('DEVIL_ENV_CONFIG'))
+      config_files.append(_DEVIL_DEFAULT_CONFIG)
+
+      self._dm = dependency_manager.DependencyManager(
+          [dependency_manager.BaseConfig(c) for c in config_files])
+
+  def FetchPath(self, dependency, arch=None, device=None):
+    if self._dm is None:
+      self.Initialize()
+    if dependency in _ANDROID_BUILD_TOOLS:
+      self.FetchPath('android_build_tools_libc++', arch=arch, device=device)
+    return self._dm.FetchPath(dependency, GetPlatform(arch, device))
+
+  def LocalPath(self, dependency, arch=None, device=None):
+    if self._dm is None:
+      self.Initialize()
+    return self._dm.LocalPath(dependency, GetPlatform(arch, device))
+
+
+def GetPlatform(arch=None, device=None):
+  if device:
+    return 'android_%s' % (arch or device.product_cpu_abi)
+  return '%s_%s' % (sys.platform, platform.machine())
+
+
+config = _Environment()
+
diff --git a/catapult/devil/devil/devil_env_test.py b/catapult/devil/devil/devil_env_test.py
new file mode 100755
index 0000000..e78221a
--- /dev/null
+++ b/catapult/devil/devil/devil_env_test.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import logging
+import sys
+import unittest
+
+from devil import devil_env
+
+_sys_path_before = list(sys.path)
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  _sys_path_with_pymock = list(sys.path)
+  import mock  # pylint: disable=import-error
+_sys_path_after = list(sys.path)
+
+
+class DevilEnvTest(unittest.TestCase):
+
+  def testSysPath(self):
+    self.assertEquals(_sys_path_before, _sys_path_after)
+    self.assertEquals(
+        _sys_path_before + [devil_env.PYMOCK_PATH],
+        _sys_path_with_pymock)
+
+  def testGetEnvironmentVariableConfig_configType(self):
+    with mock.patch('os.environ.get',
+                    mock.Mock(side_effect=lambda _env_var: None)):
+      env_config = devil_env._GetEnvironmentVariableConfig()
+    self.assertEquals('BaseConfig', env_config.get('config_type'))
+
+  def testGetEnvironmentVariableConfig_noEnv(self):
+    with mock.patch('os.environ.get',
+                    mock.Mock(side_effect=lambda _env_var: None)):
+      env_config = devil_env._GetEnvironmentVariableConfig()
+    self.assertEquals({}, env_config.get('dependencies'))
+
+  def testGetEnvironmentVariableConfig_adbPath(self):
+    def mock_environment(env_var):
+      return '/my/fake/adb/path' if env_var == 'ADB_PATH' else None
+
+    with mock.patch('os.environ.get',
+                    mock.Mock(side_effect=mock_environment)):
+      env_config = devil_env._GetEnvironmentVariableConfig()
+    self.assertEquals(
+        {
+          'adb': {
+            'file_info': {
+              'linux2_x86_64': {
+                'local_paths': ['/my/fake/adb/path'],
+              },
+            },
+          },
+        },
+        env_config.get('dependencies'))
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/utils/__init__.py b/catapult/devil/devil/utils/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/devil/devil/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/devil/devil/utils/cmd_helper.py b/catapult/devil/devil/utils/cmd_helper.py
new file mode 100644
index 0000000..b623775
--- /dev/null
+++ b/catapult/devil/devil/utils/cmd_helper.py
@@ -0,0 +1,314 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A wrapper for subprocess to make calling shell commands easier."""
+
+import logging
+import os
+import pipes
+import select
+import signal
+import string
+import StringIO
+import subprocess
+import time
+
+# fcntl is not available on Windows.
+try:
+  import fcntl
+except ImportError:
+  fcntl = None
+
+_SafeShellChars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
+
+
+def SingleQuote(s):
+  """Return an shell-escaped version of the string using single quotes.
+
+  Reliably quote a string which may contain unsafe characters (e.g. space,
+  quote, or other special characters such as '$').
+
+  The returned value can be used in a shell command line as one token that gets
+  to be interpreted literally.
+
+  Args:
+    s: The string to quote.
+
+  Return:
+    The string quoted using single quotes.
+  """
+  return pipes.quote(s)
+
+
+def DoubleQuote(s):
+  """Return an shell-escaped version of the string using double quotes.
+
+  Reliably quote a string which may contain unsafe characters (e.g. space
+  or quote characters), while retaining some shell features such as variable
+  interpolation.
+
+  The returned value can be used in a shell command line as one token that gets
+  to be further interpreted by the shell.
+
+  The set of characters that retain their special meaning may depend on the
+  shell implementation. This set usually includes: '$', '`', '\', '!', '*',
+  and '@'.
+
+  Args:
+    s: The string to quote.
+
+  Return:
+    The string quoted using double quotes.
+  """
+  if not s:
+    return '""'
+  elif all(c in _SafeShellChars for c in s):
+    return s
+  else:
+    return '"' + s.replace('"', '\\"') + '"'
+
+
+def ShrinkToSnippet(cmd_parts, var_name, var_value):
+  """Constructs a shell snippet for a command using a variable to shrink it.
+
+  Takes into account all quoting that needs to happen.
+
+  Args:
+    cmd_parts: A list of command arguments.
+    var_name: The variable that holds var_value.
+    var_value: The string to replace in cmd_parts with $var_name
+
+  Returns:
+    A shell snippet that does not include setting the variable.
+  """
+  def shrink(value):
+    parts = (x and SingleQuote(x) for x in value.split(var_value))
+    with_substitutions = ('"$%s"' % var_name).join(parts)
+    return with_substitutions or "''"
+
+  return ' '.join(shrink(part) for part in cmd_parts)
+
+
+def Popen(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
+  return subprocess.Popen(
+      args=args, cwd=cwd, stdout=stdout, stderr=stderr,
+      shell=shell, close_fds=True, env=env,
+      preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
+
+
+def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
+  pipe = Popen(args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd,
+               env=env)
+  pipe.communicate()
+  return pipe.wait()
+
+
+def RunCmd(args, cwd=None):
+  """Opens a subprocess to execute a program and returns its return value.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+
+  Returns:
+    Return code from the command execution.
+  """
+  logging.info(str(args) + ' ' + (cwd or ''))
+  return Call(args, cwd=cwd)
+
+
+def GetCmdOutput(args, cwd=None, shell=False):
+  """Open a subprocess to execute a program and returns its output.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+    shell: Whether to execute args as a shell command.
+
+  Returns:
+    Captures and returns the command's stdout.
+    Prints the command's stderr to logger (which defaults to stdout).
+  """
+  (_, output) = GetCmdStatusAndOutput(args, cwd, shell)
+  return output
+
+
+def _ValidateAndLogCommand(args, cwd, shell):
+  if isinstance(args, basestring):
+    if not shell:
+      raise Exception('string args must be run with shell=True')
+  else:
+    if shell:
+      raise Exception('array args must be run with shell=False')
+    args = ' '.join(SingleQuote(c) for c in args)
+  if cwd is None:
+    cwd = ''
+  else:
+    cwd = ':' + cwd
+  logging.info('[host]%s> %s', cwd, args)
+  return args
+
+
+def GetCmdStatusAndOutput(args, cwd=None, shell=False):
+  """Executes a subprocess and returns its exit code and output.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+    shell: Whether to execute args as a shell command. Must be True if args
+      is a string and False if args is a sequence.
+
+  Returns:
+    The 2-tuple (exit code, output).
+  """
+  status, stdout, stderr = GetCmdStatusOutputAndError(
+      args, cwd=cwd, shell=shell)
+
+  if stderr:
+    logging.critical(stderr)
+  if len(stdout) > 4096:
+    logging.debug('Truncated output:')
+  logging.debug(stdout[:4096])
+  return (status, stdout)
+
+
+def GetCmdStatusOutputAndError(args, cwd=None, shell=False):
+  """Executes a subprocess and returns its exit code, output, and errors.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+    shell: Whether to execute args as a shell command. Must be True if args
+      is a string and False if args is a sequence.
+
+  Returns:
+    The 2-tuple (exit code, output).
+  """
+  _ValidateAndLogCommand(args, cwd, shell)
+  pipe = Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+               shell=shell, cwd=cwd)
+  stdout, stderr = pipe.communicate()
+  return (pipe.returncode, stdout, stderr)
+
+
+class TimeoutError(Exception):
+  """Module-specific timeout exception."""
+
+  def __init__(self, output=None):
+    super(TimeoutError, self).__init__()
+    self._output = output
+
+  @property
+  def output(self):
+    return self._output
+
+
+def _IterProcessStdout(process, timeout=None, buffer_size=4096,
+                       poll_interval=1):
+  assert fcntl, 'fcntl module is required'
+  try:
+    # Enable non-blocking reads from the child's stdout.
+    child_fd = process.stdout.fileno()
+    fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
+    fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+
+    end_time = (time.time() + timeout) if timeout else None
+    while True:
+      if end_time and time.time() > end_time:
+        raise TimeoutError()
+      read_fds, _, _ = select.select([child_fd], [], [], poll_interval)
+      if child_fd in read_fds:
+        data = os.read(child_fd, buffer_size)
+        if not data:
+          break
+        yield data
+      if process.poll() is not None:
+        break
+  finally:
+    try:
+      # Make sure the process doesn't stick around if we fail with an
+      # exception.
+      process.kill()
+    except OSError:
+      pass
+    process.wait()
+
+
+def GetCmdStatusAndOutputWithTimeout(args, timeout, cwd=None, shell=False,
+                                     logfile=None):
+  """Executes a subprocess with a timeout.
+
+  Args:
+    args: List of arguments to the program, the program to execute is the first
+      element.
+    timeout: the timeout in seconds or None to wait forever.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+    shell: Whether to execute args as a shell command. Must be True if args
+      is a string and False if args is a sequence.
+    logfile: Optional file-like object that will receive output from the
+      command as it is running.
+
+  Returns:
+    The 2-tuple (exit code, output).
+  """
+  _ValidateAndLogCommand(args, cwd, shell)
+  output = StringIO.StringIO()
+  process = Popen(args, cwd=cwd, shell=shell, stdout=subprocess.PIPE,
+                  stderr=subprocess.STDOUT)
+  try:
+    for data in _IterProcessStdout(process, timeout=timeout):
+      if logfile:
+        logfile.write(data)
+      output.write(data)
+  except TimeoutError:
+    raise TimeoutError(output.getvalue())
+
+  return process.returncode, output.getvalue()
+
+
+def IterCmdOutputLines(args, timeout=None, cwd=None, shell=False,
+                       check_status=True):
+  """Executes a subprocess and continuously yields lines from its output.
+
+  Args:
+    args: List of arguments to the program, the program to execute is the first
+      element.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+    shell: Whether to execute args as a shell command. Must be True if args
+      is a string and False if args is a sequence.
+    check_status: A boolean indicating whether to check the exit status of the
+      process after all output has been read.
+
+  Yields:
+    The output of the subprocess, line by line.
+
+  Raises:
+    CalledProcessError if check_status is True and the process exited with a
+      non-zero exit status.
+  """
+  cmd = _ValidateAndLogCommand(args, cwd, shell)
+  process = Popen(args, cwd=cwd, shell=shell, stdout=subprocess.PIPE,
+                  stderr=subprocess.STDOUT)
+  buffer_output = ''
+  for data in _IterProcessStdout(process, timeout=timeout):
+    buffer_output += data
+    has_incomplete_line = buffer_output[-1] not in '\r\n'
+    lines = buffer_output.splitlines()
+    buffer_output = lines.pop() if has_incomplete_line else ''
+    for line in lines:
+      yield line
+  if buffer_output:
+    yield buffer_output
+  if check_status and process.returncode:
+    raise subprocess.CalledProcessError(process.returncode, cmd)
diff --git a/catapult/devil/devil/utils/cmd_helper_test.py b/catapult/devil/devil/utils/cmd_helper_test.py
new file mode 100755
index 0000000..a04f1ad
--- /dev/null
+++ b/catapult/devil/devil/utils/cmd_helper_test.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for the cmd_helper module."""
+
+import unittest
+import subprocess
+
+from devil.utils import cmd_helper
+
+
+class CmdHelperSingleQuoteTest(unittest.TestCase):
+
+  def testSingleQuote_basic(self):
+    self.assertEquals('hello',
+                      cmd_helper.SingleQuote('hello'))
+
+  def testSingleQuote_withSpaces(self):
+    self.assertEquals("'hello world'",
+                      cmd_helper.SingleQuote('hello world'))
+
+  def testSingleQuote_withUnsafeChars(self):
+    self.assertEquals("""'hello'"'"'; rm -rf /'""",
+                      cmd_helper.SingleQuote("hello'; rm -rf /"))
+
+  def testSingleQuote_dontExpand(self):
+    test_string = 'hello $TEST_VAR'
+    cmd = 'TEST_VAR=world; echo %s' % cmd_helper.SingleQuote(test_string)
+    self.assertEquals(test_string,
+                      cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
+
+
+class CmdHelperDoubleQuoteTest(unittest.TestCase):
+
+  def testDoubleQuote_basic(self):
+    self.assertEquals('hello',
+                      cmd_helper.DoubleQuote('hello'))
+
+  def testDoubleQuote_withSpaces(self):
+    self.assertEquals('"hello world"',
+                      cmd_helper.DoubleQuote('hello world'))
+
+  def testDoubleQuote_withUnsafeChars(self):
+    self.assertEquals('''"hello\\"; rm -rf /"''',
+                      cmd_helper.DoubleQuote('hello"; rm -rf /'))
+
+  def testSingleQuote_doExpand(self):
+    test_string = 'hello $TEST_VAR'
+    cmd = 'TEST_VAR=world; echo %s' % cmd_helper.DoubleQuote(test_string)
+    self.assertEquals('hello world',
+                      cmd_helper.GetCmdOutput(cmd, shell=True).rstrip())
+
+
+class CmdHelperShinkToSnippetTest(unittest.TestCase):
+
+  def testShrinkToSnippet_noArgs(self):
+    self.assertEquals('foo',
+        cmd_helper.ShrinkToSnippet(['foo'], 'a', 'bar'))
+    self.assertEquals("'foo foo'",
+        cmd_helper.ShrinkToSnippet(['foo foo'], 'a', 'bar'))
+    self.assertEquals('"$a"\' bar\'',
+        cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'foo'))
+    self.assertEquals('\'foo \'"$a"',
+        cmd_helper.ShrinkToSnippet(['foo bar'], 'a', 'bar'))
+    self.assertEquals('foo"$a"',
+        cmd_helper.ShrinkToSnippet(['foobar'], 'a', 'bar'))
+
+  def testShrinkToSnippet_singleArg(self):
+    self.assertEquals("foo ''",
+        cmd_helper.ShrinkToSnippet(['foo', ''], 'a', 'bar'))
+    self.assertEquals("foo foo",
+        cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'bar'))
+    self.assertEquals('"$a" "$a"',
+        cmd_helper.ShrinkToSnippet(['foo', 'foo'], 'a', 'foo'))
+    self.assertEquals('foo "$a""$a"',
+        cmd_helper.ShrinkToSnippet(['foo', 'barbar'], 'a', 'bar'))
+    self.assertEquals('foo "$a"\' \'"$a"',
+        cmd_helper.ShrinkToSnippet(['foo', 'bar bar'], 'a', 'bar'))
+    self.assertEquals('foo "$a""$a"\' \'',
+        cmd_helper.ShrinkToSnippet(['foo', 'barbar '], 'a', 'bar'))
+    self.assertEquals('foo \' \'"$a""$a"\' \'',
+        cmd_helper.ShrinkToSnippet(['foo', ' barbar '], 'a', 'bar'))
+
+
+class CmdHelperIterCmdOutputLinesTest(unittest.TestCase):
+  """Test IterCmdOutputLines with some calls to the unix 'seq' command."""
+
+  def testIterCmdOutputLines_success(self):
+    for num, line in enumerate(
+        cmd_helper.IterCmdOutputLines(['seq', '10']), 1):
+      self.assertEquals(num, int(line))
+
+  def testIterCmdOutputLines_exitStatusFail(self):
+    with self.assertRaises(subprocess.CalledProcessError):
+      for num, line in enumerate(
+          cmd_helper.IterCmdOutputLines('seq 10 && false', shell=True), 1):
+        self.assertEquals(num, int(line))
+      # after reading all the output we get an exit status of 1
+
+  def testIterCmdOutputLines_exitStatusIgnored(self):
+    for num, line in enumerate(
+        cmd_helper.IterCmdOutputLines('seq 10 && false', shell=True,
+                                      check_status=False), 1):
+      self.assertEquals(num, int(line))
+
+  def testIterCmdOutputLines_exitStatusSkipped(self):
+    for num, line in enumerate(
+        cmd_helper.IterCmdOutputLines('seq 10 && false', shell=True), 1):
+      self.assertEquals(num, int(line))
+      # no exception will be raised because we don't attempt to read past
+      # the end of the output and, thus, the status never gets checked
+      if num == 10:
+        break
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/utils/file_utils.py b/catapult/devil/devil/utils/file_utils.py
new file mode 100644
index 0000000..dc5a9ef
--- /dev/null
+++ b/catapult/devil/devil/utils/file_utils.py
@@ -0,0 +1,31 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+
+def MergeFiles(dest_file, source_files):
+  """Merge list of files into single destination file.
+
+  Args:
+    dest_file: File to be written to.
+    source_files: List of files to be merged. Will be merged in the order they
+        appear in the list.
+  """
+  if not os.path.exists(os.path.dirname(dest_file)):
+    os.makedirs(os.path.dirname(dest_file))
+  try:
+    with open(dest_file, 'w') as dest_f:
+      for source_file in source_files:
+        with open(source_file, 'r') as source_f:
+          dest_f.write(source_f.read())
+  except Exception as e:  # pylint: disable=broad-except
+    # Something went wrong when creating dest_file. Cleaning up.
+    try:
+      os.remove(dest_file)
+    except OSError:
+      pass
+    raise e
+
+
diff --git a/catapult/devil/devil/utils/find_usb_devices.py b/catapult/devil/devil/utils/find_usb_devices.py
new file mode 100755
index 0000000..4982e46
--- /dev/null
+++ b/catapult/devil/devil/utils/find_usb_devices.py
@@ -0,0 +1,628 @@
+#!/usr/bin/python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import sys
+import argparse
+
+from devil.utils import cmd_helper
+from devil.utils import lsusb
+
+# Note: In the documentation below, "virtual port" refers to the port number
+# as observed by the system (e.g. by usb-devices) and "physical port" refers
+# to the physical numerical label on the physical port e.g. on a USB hub.
+# The mapping between virtual and physical ports is not always the identity
+# (e.g. the port labeled "1" on a USB hub does not always show up as "port 1"
+# when you plug something into it) but, as far as we are aware, the mapping
+# between virtual and physical ports is always the same for a given
+# model of USB hub. When "port number" is referenced without specifying, it
+# means the virtual port number.
+
+
+# Wrapper functions for system commands to get output. These are in wrapper
+# functions so that they can be more easily mocked-out for tests.
+def _GetParsedLSUSBOutput():
+  return lsusb.lsusb()
+
+
+def _GetUSBDevicesOutput():
+  return cmd_helper.GetCmdOutput(['usb-devices'])
+
+
+def _GetTtyUSBInfo(tty_string):
+  cmd = ['udevadm', 'info', '--name=/dev/' + tty_string, '--attribute-walk']
+  return cmd_helper.GetCmdOutput(cmd)
+
+
+def _GetCommList():
+  return cmd_helper.GetCmdOutput('ls /dev', shell=True)
+
+
+def GetTTYList():
+  return [x for x in _GetCommList().splitlines() if 'ttyUSB' in x]
+
+
+def GetBattorList(device_tree_map):
+  return [x for x in GetTTYList() if IsBattor(x, device_tree_map)]
+
+
+def IsBattor(tty_string, device_tree_map):
+  (bus, device) = GetBusDeviceFromTTY(tty_string)
+  node = device_tree_map[bus].FindDeviceNumber(device)
+  return 'Future Technology Devices International' in node.desc
+
+
+# Class to identify nodes in the USB topology. USB topology is organized as
+# a tree.
+class USBNode(object):
+  def __init__(self):
+    self._port_to_node = {}
+
+  @property
+  def desc(self):
+    raise NotImplementedError
+
+  @property
+  def info(self):
+    raise NotImplementedError
+
+  @property
+  def device_num(self):
+    raise NotImplementedError
+
+  @property
+  def bus_num(self):
+    raise NotImplementedError
+
+  def HasPort(self, port):
+    """Determines if this device has a device connected to the given port."""
+    return port in self._port_to_node
+
+  def PortToDevice(self, port):
+    """Gets the device connected to the given port on this device."""
+    return self._port_to_node[port]
+
+  def Display(self, port_chain='', info=False):
+    """Displays information about this node and its descendants.
+
+    Output format is, e.g. 1:3:3:Device 42 (ID 1234:5678 Some Device)
+    meaning that from the bus, if you look at the device connected
+    to port 1, then the device connected to port 3 of that,
+    then the device connected to port 3 of that, you get the device
+    assigned device number 42, which is Some Device. Note that device
+    numbers will be reassigned whenever a connected device is powercycled
+    or reinserted, but port numbers stay the same as long as the device
+    is reinserted back into the same physical port.
+
+    Args:
+      port_chain: [string] Chain of ports from bus to this node (e.g. '2:4:')
+      info: [bool] Whether to display detailed info as well.
+    """
+    raise NotImplementedError
+
+  def AddChild(self, port, device):
+    """Adds child to the device tree.
+
+    Args:
+      port: [int] Port number of the device.
+      device: [USBDeviceNode] Device to add.
+
+    Raises:
+      ValueError: If device already has a child at the given port.
+    """
+    if self.HasPort(port):
+      raise ValueError('Duplicate port number')
+    else:
+      self._port_to_node[port] = device
+
+  def AllNodes(self):
+    """Generator that yields this node and all of its descendants.
+
+    Yields:
+      [USBNode] First this node, then each of its descendants (recursively)
+    """
+    yield self
+    for child_node in self._port_to_node.values():
+      for descendant_node in child_node.AllNodes():
+        yield descendant_node
+
+  def FindDeviceNumber(self, findnum):
+    """Find device with given number in tree
+
+    Searches the portion of the device tree rooted at this node for
+    a device with the given device number.
+
+    Args:
+      findnum: [int] Device number to search for.
+
+    Returns:
+      [USBDeviceNode] Node that is found.
+    """
+    for node in self.AllNodes():
+      if node.device_num == findnum:
+        return node
+    return None
+
+
+class USBDeviceNode(USBNode):
+  def __init__(self, bus_num=0, device_num=0, serial=None, info=None):
+    """Class that represents a device in USB tree.
+
+    Args:
+      bus_num: [int] Bus number that this node is attached to.
+      device_num: [int] Device number of this device (or 0, if this is a bus)
+      serial: [string] Serial number.
+      info: [dict] Map giving detailed device info.
+    """
+    super(USBDeviceNode, self).__init__()
+    self._bus_num = bus_num
+    self._device_num = device_num
+    self._serial = serial
+    self._info = {} if info is None else info
+
+  #override
+  @property
+  def desc(self):
+    return self._info.get('desc')
+
+  #override
+  @property
+  def info(self):
+    return self._info
+
+  #override
+  @property
+  def device_num(self):
+    return self._device_num
+
+  #override
+  @property
+  def bus_num(self):
+    return self._bus_num
+
+  @property
+  def serial(self):
+    return self._serial
+
+  @serial.setter
+  def serial(self, serial):
+    self._serial = serial
+
+  #override
+  def Display(self, port_chain='', info=False):
+    print '%s Device %d (%s)' % (port_chain, self.device_num, self.desc)
+    if info:
+      print self.info
+    for (port, device) in self._port_to_node.iteritems():
+      device.Display('%s%d:' % (port_chain, port), info=info)
+
+
+class USBBusNode(USBNode):
+  def __init__(self, bus_num=0):
+    """Class that represents a node (either a bus or device) in USB tree.
+
+    Args:
+      is_bus: [bool] If true, node is bus; if not, node is device.
+      bus_num: [int] Bus number that this node is attached to.
+      device_num: [int] Device number of this device (or 0, if this is a bus)
+      desc: [string] Short description of device.
+      serial: [string] Serial number.
+      info: [dict] Map giving detailed device info.
+      port_to_dev: [dict(int:USBDeviceNode)]
+          Maps port # to device connected to port.
+    """
+    super(USBBusNode, self).__init__()
+    self._bus_num = bus_num
+
+  #override
+  @property
+  def desc(self):
+    return 'BUS %d' % self._bus_num
+
+  #override
+  @property
+  def info(self):
+    return {}
+
+  #override
+  @property
+  def device_num(self):
+    return -1
+
+  #override
+  @property
+  def bus_num(self):
+    return self._bus_num
+
+  #override
+  def Display(self, port_chain='', info=False):
+    print "=== %s ===" % self.desc
+    for (port, device) in self._port_to_node.iteritems():
+      device.Display('%s%d:' % (port_chain, port), info=info)
+
+
+_T_LINE_REGEX = re.compile(r'T:  Bus=(?P<bus>\d{2}) Lev=(?P<lev>\d{2}) '
+                           r'Prnt=(?P<prnt>\d{2,3}) Port=(?P<port>\d{2}) '
+                           r'Cnt=(?P<cnt>\d{2}) Dev#=(?P<dev>.{3}) .*')
+
+_S_LINE_REGEX = re.compile(r'S:  SerialNumber=(?P<serial>.*)')
+_LSUSB_BUS_DEVICE_RE = re.compile(r'^Bus (\d{3}) Device (\d{3}): (.*)')
+
+
+def GetBusNumberToDeviceTreeMap(fast=False):
+  """Gets devices currently attached.
+
+  Args:
+    fast [bool]: whether to do it fast (only get description, not
+    the whole dictionary, from lsusb)
+
+  Returns:
+    map of {bus number: bus object}
+    where the bus object has all the devices attached to it in a tree.
+  """
+  if fast:
+    info_map = {}
+    for line in lsusb.raw_lsusb().splitlines():
+      match = _LSUSB_BUS_DEVICE_RE.match(line)
+      if match:
+        info_map[(int(match.group(1)), int(match.group(2)))] = (
+          {'desc':match.group(3)})
+  else:
+    info_map = {((int(line['bus']), int(line['device']))): line
+                for line in _GetParsedLSUSBOutput()}
+
+
+  tree = {}
+  bus_num = -1
+  for line in _GetUSBDevicesOutput().splitlines():
+    match = _T_LINE_REGEX.match(line)
+    if match:
+      bus_num = int(match.group('bus'))
+      parent_num = int(match.group('prnt'))
+      # usb-devices starts counting ports from 0, so add 1
+      port_num = int(match.group('port')) + 1
+      device_num = int(match.group('dev'))
+
+      # create new bus if necessary
+      if bus_num not in tree:
+        tree[bus_num] = USBBusNode(bus_num=bus_num)
+
+      # create the new device
+      new_device = USBDeviceNode(bus_num=bus_num,
+                                 device_num=device_num,
+                                 info=info_map[(bus_num, device_num)])
+
+      # add device to bus
+      if parent_num != 0:
+        tree[bus_num].FindDeviceNumber(parent_num).AddChild(
+            port_num, new_device)
+      else:
+        tree[bus_num].AddChild(port_num, new_device)
+
+    match = _S_LINE_REGEX.match(line)
+    if match:
+      if bus_num == -1:
+        raise ValueError('S line appears before T line in input file')
+      # put the serial number in the device
+      tree[bus_num].FindDeviceNumber(device_num).serial = match.group('serial')
+
+  return tree
+
+
+class HubType(object):
+  def __init__(self, id_func, port_mapping):
+    """Defines a type of hub.
+
+    Args:
+      id_func: [USBNode -> bool] is a function that can be run on a node
+        to determine if the node represents this type of hub.
+      port_mapping: [dict(int:(int|dict))] maps virtual to physical port
+        numbers. For instance, {3:1, 1:2, 2:3} means that virtual port 3
+        corresponds to physical port 1, virtual port 1 corresponds to physical
+        port 2, and virtual port 2 corresponds to physical port 3. In the
+        case of hubs with "internal" topology, this is represented by nested
+        maps. For instance, {1:{1:1,2:2},2:{1:3,2:4}} means, e.g. that the
+        device plugged into physical port 3 will show up as being connected
+        to port 1, on a device which is connected to port 2 on the hub.
+    """
+    self._id_func = id_func
+    # v2p = "virtual to physical" ports
+    self._v2p_port = port_mapping
+
+  def IsType(self, node):
+    """Determines if the given Node is a hub of this type.
+
+    Args:
+      node: [USBNode] Node to check.
+    """
+    return self._id_func(node)
+
+  def GetPhysicalPortToNodeTuples(self, node):
+    """Gets devices connected to the physical ports on a hub of this type.
+
+    Args:
+      node: [USBNode] Node representing a hub of this type.
+
+    Yields:
+      A series of (int, USBNode) tuples giving a physical port
+      and the USBNode connected to it.
+
+    Raises:
+      ValueError: If the given node isn't a hub of this type.
+    """
+    if self.IsType(node):
+      for res in self._GppHelper(node, self._v2p_port):
+        yield res
+    else:
+      raise ValueError('Node must be a hub of this type')
+
+  def _GppHelper(self, node, mapping):
+    """Helper function for GetPhysicalPortToNodeMap.
+
+    Gets devices connected to physical ports, based on device tree
+    rooted at the given node and the mapping between virtual and physical
+    ports.
+
+    Args:
+      node: [USBNode] Root of tree to search for devices.
+      mapping: [dict] Mapping between virtual and physical ports.
+
+    Yields:
+      A series of (int, USBNode) tuples giving a physical port
+      and the Node connected to it.
+    """
+    for (virtual, physical) in mapping.iteritems():
+      if node.HasPort(virtual):
+        if isinstance(physical, dict):
+          for res in self._GppHelper(node.PortToDevice(virtual), physical):
+            yield res
+        else:
+          yield (physical, node.PortToDevice(virtual))
+
+
+def GetHubsOnBus(bus, hub_types):
+  """Scans for all hubs on a bus of given hub types.
+
+  Args:
+    bus: [USBNode] Bus object.
+    hub_types: [iterable(HubType)] Possible types of hubs.
+
+  Yields:
+    Sequence of tuples representing (hub, type of hub)
+  """
+  for device in bus.AllNodes():
+    for hub_type in hub_types:
+      if hub_type.IsType(device):
+        yield (device, hub_type)
+
+
+def GetPhysicalPortToNodeMap(hub, hub_type):
+  """Gets physical-port:node mapping for a given hub.
+  Args:
+    hub: [USBNode] Hub to get map for.
+    hub_type: [HubType] Which type of hub it is.
+
+  Returns:
+    Dict of {physical port: node}
+  """
+  port_device = hub_type.GetPhysicalPortToNodeTuples(hub)
+  return {port: device for (port, device) in port_device}
+
+
+def GetPhysicalPortToBusDeviceMap(hub, hub_type):
+  """Gets physical-port:(bus#, device#) mapping for a given hub.
+  Args:
+    hub: [USBNode] Hub to get map for.
+    hub_type: [HubType] Which type of hub it is.
+
+  Returns:
+    Dict of {physical port: (bus number, device number)}
+  """
+  port_device = hub_type.GetPhysicalPortToNodeTuples(hub)
+  return {port: (device.bus_num, device.device_num)
+          for (port, device) in port_device}
+
+
+def GetPhysicalPortToSerialMap(hub, hub_type):
+  """Gets physical-port:serial# mapping for a given hub.
+  Args:
+    hub: [USBNode] Hub to get map for.
+    hub_type: [HubType] Which type of hub it is.
+
+  Returns:
+    Dict of {physical port: serial number)}
+  """
+  port_device = hub_type.GetPhysicalPortToNodeTuples(hub)
+  return {port: device.serial
+          for (port, device) in port_device
+          if device.serial}
+
+
+def GetPhysicalPortToTTYMap(device, hub_type):
+  """Gets physical-port:tty-string mapping for a given hub.
+  Args:
+    hub: [USBNode] Hub to get map for.
+    hub_type: [HubType] Which type of hub it is.
+
+  Returns:
+    Dict of {physical port: tty-string)}
+  """
+  port_device = hub_type.GetPhysicalPortToNodeTuples(device)
+  bus_device_to_tty = GetBusDeviceToTTYMap()
+  return {port: bus_device_to_tty[(device.bus_num, device.device_num)]
+          for (port, device) in port_device
+          if (device.bus_num, device.device_num) in bus_device_to_tty}
+
+
+def CollectHubMaps(hub_types, map_func, device_tree_map=None, fast=False):
+  """Runs a function on all hubs in the system and collects their output.
+
+  Args:
+    hub_types: [HubType] List of possible hub types.
+    map_func: [string] Function to run on each hub.
+    device_tree: Previously constructed device tree map, if any.
+    fast: Whether to construct device tree fast, if not already provided
+
+  Yields:
+    Sequence of dicts of {physical port: device} where the type of
+    device depends on the ident keyword. Each dict is a separate hub.
+  """
+  if device_tree_map is None:
+    device_tree_map = GetBusNumberToDeviceTreeMap(fast=fast)
+  for bus in device_tree_map.values():
+    for (hub, hub_type) in GetHubsOnBus(bus, hub_types):
+      yield map_func(hub, hub_type)
+
+
+def GetAllPhysicalPortToNodeMaps(hub_types, **kwargs):
+  return CollectHubMaps(hub_types, GetPhysicalPortToNodeMap, **kwargs)
+
+
+def GetAllPhysicalPortToBusDeviceMaps(hub_types, **kwargs):
+  return CollectHubMaps(hub_types, GetPhysicalPortToBusDeviceMap, **kwargs)
+
+
+def GetAllPhysicalPortToSerialMaps(hub_types, **kwargs):
+  return CollectHubMaps(hub_types, GetPhysicalPortToSerialMap, **kwargs)
+
+
+def GetAllPhysicalPortToTTYMaps(hub_types, **kwargs):
+  return CollectHubMaps(hub_types, GetPhysicalPortToTTYMap, **kwargs)
+
+
+_BUS_NUM_REGEX = re.compile(r'.*ATTRS{busnum}=="(\d*)".*')
+_DEVICE_NUM_REGEX = re.compile(r'.*ATTRS{devnum}=="(\d*)".*')
+
+
+def GetBusDeviceFromTTY(tty_string):
+  """Gets bus and device number connected to a ttyUSB port.
+
+  Args:
+    tty_string: [String] Identifier for ttyUSB (e.g. 'ttyUSB0')
+
+  Returns:
+    Tuple (bus, device) giving device connected to that ttyUSB.
+
+  Raises:
+    ValueError: If bus and device information could not be found.
+  """
+  bus_num = None
+  device_num = None
+  # Expected output of GetCmdOutput should be something like:
+  # looking at device /devices/something/.../.../...
+  # KERNELS="ttyUSB0"
+  # SUBSYSTEMS=...
+  # DRIVERS=...
+  # ATTRS{foo}=...
+  # ATTRS{bar}=...
+  # ...
+  for line in _GetTtyUSBInfo(tty_string).splitlines():
+    bus_match = _BUS_NUM_REGEX.match(line)
+    device_match = _DEVICE_NUM_REGEX.match(line)
+    if bus_match and bus_num == None:
+      bus_num = int(bus_match.group(1))
+    if device_match and device_num == None:
+      device_num = int(device_match.group(1))
+  if bus_num is None or device_num is None:
+    raise ValueError('Info not found')
+  return (bus_num, device_num)
+
+
+def GetBusDeviceToTTYMap():
+  """Gets all mappings from (bus, device) to ttyUSB string.
+
+  Gets mapping from (bus, device) to ttyUSB string (e.g. 'ttyUSB0'),
+  for all ttyUSB strings currently active.
+
+  Returns:
+    [dict] Dict that maps (bus, device) to ttyUSB string
+  """
+  result = {}
+  for tty in GetTTYList():
+    result[GetBusDeviceFromTTY(tty)] = tty
+  return result
+
+
+# This dictionary described the mapping between physical and
+# virtual ports on a Plugable 7-Port Hub (model USB2-HUB7BC).
+# Keys are the virtual ports, values are the physical port.
+# The entry 4:{1:4, 2:3, 3:2, 4:1} indicates that virtual port
+# 4 connects to another 'virtual' hub that itself has the
+# virtual-to-physical port mapping {1:4, 2:3, 3:2, 4:1}.
+
+PLUGABLE_7PORT_LAYOUT = {1:7,
+                         2:6,
+                         3:5,
+                         4:{1:4, 2:3, 3:2, 4:1}}
+
+def TestUSBTopologyScript():
+  """Test display and hub identification."""
+  # Identification criteria for Plugable 7-Port Hub
+  def _is_plugable_7port_hub(node):
+    """Check if a node is a Plugable 7-Port Hub
+    (Model USB2-HUB7BC)
+    The topology of this device is a 4-port hub,
+    with another 4-port hub connected on port 4.
+    """
+    if not isinstance(node, USBDeviceNode):
+      return False
+    if '4-Port HUB' not in node.desc:
+      return False
+    if not node.HasPort(4):
+      return False
+    return '4-Port HUB' in node.PortToDevice(4).desc
+
+  plugable_7port = HubType(_is_plugable_7port_hub,
+                           PLUGABLE_7PORT_LAYOUT)
+  print '==== USB TOPOLOGY SCRIPT TEST ===='
+
+  # Display devices
+  print '==== DEVICE DISPLAY ===='
+  device_trees = GetBusNumberToDeviceTreeMap(fast=True)
+  for device_tree in device_trees.values():
+    device_tree.Display()
+  print
+
+  # Display TTY information about devices plugged into hubs.
+  print '==== TTY INFORMATION ===='
+  for port_map in GetAllPhysicalPortToTTYMaps([plugable_7port],
+                                              device_tree_map=device_trees):
+    print port_map
+  print
+
+  # Display serial number information about devices plugged into hubs.
+  print '==== SERIAL NUMBER INFORMATION ===='
+  for port_map in GetAllPhysicalPortToSerialMaps([plugable_7port],
+                                                 device_tree_map=device_trees):
+    print port_map
+  print ''
+  return 0
+
+def parse_options(argv):
+  """Parses and checks the command-line options.
+
+  Returns:
+    A tuple containing the options structure and a list of categories to
+    be traced.
+  """
+  USAGE = '''./find_usb_devices [--help]
+    This script shows the mapping between USB devices and port numbers.
+    Clients are not intended to call this script from the command line.
+    Clients are intended to call the functions in this script directly.
+    For instance, GetAllPhysicalPortToSerialMaps(...)
+    Running this script with --help will display this message.
+    Running this script without --help will display information about
+    devices attached, TTY mapping, and serial number mapping,
+    for testing purposes. See design document for API documentation.
+  '''
+  parser = argparse.ArgumentParser(usage=USAGE)
+  return parser.parse_args(argv[1:])
+
+def main():
+  parse_options(sys.argv)
+  TestUSBTopologyScript()
+
+if __name__ == "__main__":
+  sys.exit(main())
diff --git a/catapult/devil/devil/utils/find_usb_devices_test.py b/catapult/devil/devil/utils/find_usb_devices_test.py
new file mode 100755
index 0000000..2e94dcd
--- /dev/null
+++ b/catapult/devil/devil/utils/find_usb_devices_test.py
@@ -0,0 +1,262 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+"""
+Unit tests for the contents of find_usb_devices.py.
+
+Device tree for these tests is as follows:
+Bus 001:
+1: Device 011 "foo"
+2: Device 012 "bar"
+3: Device 013 "baz"
+
+Bus 002:
+1: Device 011 "quux"
+2: Device 020 "My Test HUB" #hub 1
+2:1: Device 021 "battor_p7_h1_t0" #physical port 7 on hub 1, on ttyUSB0
+2:3: Device 022 "battor_p5_h1_t1" #physical port 5 on hub 1, on ttyUSB1
+2:4: Device 023 "My Test Internal HUB" #internal section of hub 1
+2:4:2: Device 024 "battor_p3_h1_t2" #physical port 3 on hub 1, on ttyUSB2
+2:4:3: Device 026 "Not a Battery Monitor" #physical port 1 on hub 1, on ttyUSB3
+2:4:4: Device 025 "battor_p1_h1_t3" #physical port 1 on hub 1, on ttyUSB3
+3: Device 100 "My Test HUB" #hub 2
+3:4: Device 101 "My Test Internal HUB" #internal section of hub 2
+3:4:4: Device 102 "battor_p1_h2_t4" #physical port 1 on hub 2, on ttyusb4
+"""
+
+import logging
+import unittest
+
+from devil import devil_env
+from devil.utils import find_usb_devices
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock # pylint: disable=import-error
+
+# Output of lsusb.lsusb().
+# We just test that the dictionary is working by creating an
+# "ID number" equal to (bus_num*1000)+device_num and seeing if
+# it is picked up correctly. Also we test the description
+
+DEVLIST = [(1, 11, 'foo'),
+           (1, 12, 'bar'),
+           (1, 13, 'baz'),
+           (2, 11, 'quux'),
+           (2, 20, 'My Test HUB'),
+           (2, 21, 'Future Technology Devices International battor_p7_h1_t0'),
+           (2, 22, 'Future Technology Devices International battor_p5_h1_t1'),
+           (2, 23, 'My Test Internal HUB'),
+           (2, 24, 'Future Technology Devices International battor_p3_h1_t2'),
+           (2, 25, 'Future Technology Devices International battor_p1_h1_t3'),
+           (2, 26, 'Not a Battery Monitor'),
+           (2, 100, 'My Test HUB'),
+           (2, 101, 'My Test Internal HUB'),
+           (2, 102, 'Future Technology Devices International battor_p1_h1_t4')]
+
+LSUSB_OUTPUT = [
+  {'bus': b, 'device': d, 'desc': t, 'id': (1000*b)+d}
+       for (b, d, t) in DEVLIST]
+
+
+# Note: "Lev", "Cnt", "Spd", and "MxCh" are not used by parser,
+# so we just leave them as zeros here. Also note that the port
+# numbers reported here start at 0, so they're 1 less than the
+# port numbers reported elsewhere.
+USB_DEVICES_OUTPUT = '''
+T:  Bus=01 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 11 Spd=000 MxCh=00
+S:  SerialNumber=FooSerial
+T:  Bus=01 Lev=00 Prnt=00 Port=01 Cnt=00 Dev#= 12 Spd=000 MxCh=00
+S:  SerialNumber=BarSerial
+T:  Bus=01 Lev=00 Prnt=00 Port=02 Cnt=00 Dev#= 13 Spd=000 MxCh=00
+S:  SerialNumber=BazSerial
+
+T:  Bus=02 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 11 Spd=000 MxCh=00
+
+T:  Bus=02 Lev=00 Prnt=00 Port=01 Cnt=00 Dev#= 20 Spd=000 MxCh=00
+T:  Bus=02 Lev=00 Prnt=20 Port=00 Cnt=00 Dev#= 21 Spd=000 MxCh=00
+S:  SerialNumber=Battor0
+T:  Bus=02 Lev=00 Prnt=20 Port=02 Cnt=00 Dev#= 22 Spd=000 MxCh=00
+S:  SerialNumber=Battor1
+T:  Bus=02 Lev=00 Prnt=20 Port=03 Cnt=00 Dev#= 23 Spd=000 MxCh=00
+T:  Bus=02 Lev=00 Prnt=23 Port=01 Cnt=00 Dev#= 24 Spd=000 MxCh=00
+S:  SerialNumber=Battor2
+T:  Bus=02 Lev=00 Prnt=23 Port=03 Cnt=00 Dev#= 25 Spd=000 MxCh=00
+S:  SerialNumber=Battor3
+T:  Bus=02 Lev=00 Prnt=23 Port=02 Cnt=00 Dev#= 26 Spd=000 MxCh=00
+
+T:  Bus=02 Lev=00 Prnt=00 Port=02 Cnt=00 Dev#=100 Spd=000 MxCh=00
+T:  Bus=02 Lev=00 Prnt=100 Port=03 Cnt=00 Dev#=101 Spd=000 MxCh=00
+T:  Bus=02 Lev=00 Prnt=101 Port=03 Cnt=00 Dev#=102 Spd=000 MxCh=00
+'''
+
+LIST_TTY_OUTPUT = '''
+ttyUSB0
+Something-else-0
+ttyUSB1
+ttyUSB2
+Something-else-1
+ttyUSB3
+ttyUSB4
+Something-else-2
+ttyUSB5
+'''
+
+# Note: The real output will have multiple lines with
+# ATTRS{busnum} and ATTRS{devnum}, but only the first
+# one counts. Thus the test output duplicates this.
+UDEVADM_USBTTY0_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="21"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_USBTTY1_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="22"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_USBTTY2_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="24"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_USBTTY3_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="25"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_USBTTY4_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="102"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_USBTTY5_OUTPUT = '''
+ATTRS{busnum}=="2"
+ATTRS{devnum}=="26"
+ATTRS{busnum}=="0"
+ATTRS{devnum}=="0"
+'''
+
+UDEVADM_OUTPUT_DICT = {
+  'ttyUSB0': UDEVADM_USBTTY0_OUTPUT,
+  'ttyUSB1': UDEVADM_USBTTY1_OUTPUT,
+  'ttyUSB2': UDEVADM_USBTTY2_OUTPUT,
+  'ttyUSB3': UDEVADM_USBTTY3_OUTPUT,
+  'ttyUSB4': UDEVADM_USBTTY4_OUTPUT,
+  'ttyUSB5': UDEVADM_USBTTY5_OUTPUT}
+
+# Identification criteria for Plugable 7-Port Hub
+def isTestHub(node):
+  """Check if a node is a Plugable 7-Port Hub
+  (Model USB2-HUB7BC)
+  The topology of this device is a 4-port hub,
+  with another 4-port hub connected on port 4.
+  """
+  if not isinstance(node, find_usb_devices.USBDeviceNode):
+    return False
+  if 'Test HUB' not in node.desc:
+    return False
+  if not node.HasPort(4):
+    return False
+  return 'Test Internal HUB' in node.PortToDevice(4).desc
+
+TEST_HUB = find_usb_devices.HubType(isTestHub,
+                                    {1:7,
+                                     2:6,
+                                     3:5,
+                                     4:{1:4, 2:3, 3:2, 4:1}})
+
+class USBScriptTest(unittest.TestCase):
+  def setUp(self):
+    find_usb_devices._GetTtyUSBInfo = mock.Mock(
+        side_effect=lambda x: UDEVADM_OUTPUT_DICT[x])
+    find_usb_devices._GetParsedLSUSBOutput = mock.Mock(
+        return_value=LSUSB_OUTPUT)
+    find_usb_devices._GetUSBDevicesOutput = mock.Mock(
+        return_value=USB_DEVICES_OUTPUT)
+    find_usb_devices._GetCommList = mock.Mock(
+        return_value=LIST_TTY_OUTPUT)
+
+  def testIsBattor(self):
+    bd = find_usb_devices.GetBusNumberToDeviceTreeMap()
+    self.assertTrue(find_usb_devices.IsBattor('ttyUSB3', bd))
+    self.assertFalse(find_usb_devices.IsBattor('ttyUSB5', bd))
+
+  def testGetBattors(self):
+    bd = find_usb_devices.GetBusNumberToDeviceTreeMap()
+    self.assertEquals(find_usb_devices.GetBattorList(bd),
+                          ['ttyUSB0', 'ttyUSB1', 'ttyUSB2',
+                           'ttyUSB3', 'ttyUSB4'])
+
+  def testGetTTYDevices(self):
+    pp = find_usb_devices.GetAllPhysicalPortToTTYMaps([TEST_HUB])
+    result = list(pp)
+    self.assertEquals(result[0], {7:'ttyUSB0',
+                                  5:'ttyUSB1',
+                                  3:'ttyUSB2',
+                                  2:'ttyUSB5',
+                                  1:'ttyUSB3'})
+    self.assertEquals(result[1], {1:'ttyUSB4'})
+
+  def testGetPortDeviceMapping(self):
+    pp = find_usb_devices.GetAllPhysicalPortToBusDeviceMaps([TEST_HUB])
+    result = list(pp)
+    self.assertEquals(result[0], {7:(2, 21),
+                                  5:(2, 22),
+                                  3:(2, 24),
+                                  2:(2, 26),
+                                  1:(2, 25)})
+    self.assertEquals(result[1], {1:(2, 102)})
+
+  def testGetSerialMapping(self):
+    pp = find_usb_devices.GetAllPhysicalPortToSerialMaps([TEST_HUB])
+    result = list(pp)
+    self.assertEquals(result[0], {7:'Battor0',
+                                  5:'Battor1',
+                                  3:'Battor2',
+                                  1:'Battor3'})
+    self.assertEquals(result[1], {})
+
+  def testDeviceDescriptions(self):
+    bd = find_usb_devices.GetBusNumberToDeviceTreeMap()
+    dev_foo = bd[1].FindDeviceNumber(11)
+    dev_bar = bd[1].FindDeviceNumber(12)
+    dev_battor_p7_h1_t0 = bd[2].FindDeviceNumber(21)
+    self.assertEquals(dev_foo.desc, 'foo')
+    self.assertEquals(dev_bar.desc, 'bar')
+    self.assertEquals(dev_battor_p7_h1_t0.desc,
+        'Future Technology Devices International battor_p7_h1_t0')
+
+  def testDeviceInformation(self):
+    bd = find_usb_devices.GetBusNumberToDeviceTreeMap()
+    dev_foo = bd[1].FindDeviceNumber(11)
+    dev_bar = bd[1].FindDeviceNumber(12)
+    dev_battor_p7_h1_t0 = bd[2].FindDeviceNumber(21)
+    self.assertEquals(dev_foo.info['id'], 1011)
+    self.assertEquals(dev_bar.info['id'], 1012)
+    self.assertEquals(dev_battor_p7_h1_t0.info['id'], 2021)
+
+  def testSerialNumber(self):
+    bd = find_usb_devices.GetBusNumberToDeviceTreeMap()
+    dev_foo = bd[1].FindDeviceNumber(11)
+    dev_bar = bd[1].FindDeviceNumber(12)
+    dev_battor_p7_h1_t0 = bd[2].FindDeviceNumber(21)
+    self.assertEquals(dev_foo.serial, 'FooSerial')
+    self.assertEquals(dev_bar.serial, 'BarSerial')
+    self.assertEquals(dev_battor_p7_h1_t0.serial, 'Battor0')
+
+if __name__ == "__main__":
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
diff --git a/catapult/devil/devil/utils/geometry.py b/catapult/devil/devil/utils/geometry.py
new file mode 100644
index 0000000..da21770
--- /dev/null
+++ b/catapult/devil/devil/utils/geometry.py
@@ -0,0 +1,75 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Objects for convenient manipulation of points and other surface areas."""
+
+import collections
+
+
+class Point(collections.namedtuple('Point', ['x', 'y'])):
+  """Object to represent an (x, y) point on a surface.
+
+  Args:
+    x, y: Two numeric coordinates that define the point.
+  """
+  __slots__ = ()
+
+  def __str__(self):
+    """Get a useful string representation of the object."""
+    return '(%s, %s)' % (self.x, self.y)
+
+  def __add__(self, other):
+    """Sum of two points, e.g. p + q."""
+    if isinstance(other, Point):
+      return Point(self.x + other.x, self.y + other.y)
+    else:
+      return NotImplemented
+
+  def __mul__(self, factor):
+    """Multiplication on the right is not implemented."""
+    # This overrides the default behaviour of a tuple multiplied by a constant
+    # on the right, which does not make sense for a Point.
+    return NotImplemented
+
+  def __rmul__(self, factor):
+    """Multiply a point by a scalar factor on the left, e.g. 2 * p."""
+    return Point(factor * self.x, factor * self.y)
+
+
+class Rectangle(
+    collections.namedtuple('Rectangle', ['top_left', 'bottom_right'])):
+  """Object to represent a rectangle on a surface.
+
+  Args:
+    top_left: A pair of (left, top) coordinates. Might be given as a Point
+      or as a two-element sequence (list, tuple, etc.).
+    bottom_right: A pair (right, bottom) coordinates.
+  """
+  __slots__ = ()
+
+  def __new__(cls, top_left, bottom_right):
+    if not isinstance(top_left, Point):
+      top_left = Point(*top_left)
+    if not isinstance(bottom_right, Point):
+      bottom_right = Point(*bottom_right)
+    return super(Rectangle, cls).__new__(cls, top_left, bottom_right)
+
+  def __str__(self):
+    """Get a useful string representation of the object."""
+    return '[%s, %s]' % (self.top_left, self.bottom_right)
+
+  @property
+  def center(self):
+    """Get the point at the center of the rectangle."""
+    return 0.5 * (self.top_left + self.bottom_right)
+
+  @classmethod
+  def FromDict(cls, d):
+    """Create a rectangle object from a dictionary.
+
+    Args:
+      d: A dictionary (or mapping) of the form, e.g., {'top': 0, 'left': 0,
+         'bottom': 1, 'right': 1}.
+    """
+    return cls(Point(d['left'], d['top']), Point(d['right'], d['bottom']))
diff --git a/catapult/devil/devil/utils/geometry_test.py b/catapult/devil/devil/utils/geometry_test.py
new file mode 100644
index 0000000..af69442
--- /dev/null
+++ b/catapult/devil/devil/utils/geometry_test.py
@@ -0,0 +1,61 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for the geometry module."""
+
+import unittest
+
+from devil.utils import geometry as g
+
+
+class PointTest(unittest.TestCase):
+
+  def testStr(self):
+    p = g.Point(1, 2)
+    self.assertEquals(str(p), '(1, 2)')
+
+  def testAdd(self):
+    p = g.Point(1, 2)
+    q = g.Point(3, 4)
+    r = g.Point(4, 6)
+    self.assertEquals(p + q, r)
+
+  def testAdd_TypeErrorWithInvalidOperands(self):
+    # pylint: disable=pointless-statement
+    p = g.Point(1, 2)
+    with self.assertRaises(TypeError):
+      p + 4  # Can't add point and scalar.
+    with self.assertRaises(TypeError):
+      4 + p  # Can't add scalar and point.
+
+  def testMult(self):
+    p = g.Point(1, 2)
+    r = g.Point(2, 4)
+    self.assertEquals(2 * p, r)  # Multiply by scalar on the left.
+
+  def testMult_TypeErrorWithInvalidOperands(self):
+    # pylint: disable=pointless-statement
+    p = g.Point(1, 2)
+    q = g.Point(2, 4)
+    with self.assertRaises(TypeError):
+      p * q  # Can't multiply points.
+    with self.assertRaises(TypeError):
+      p * 4  # Can't multiply by a scalar on the right.
+
+
+class RectangleTest(unittest.TestCase):
+
+  def testStr(self):
+    r = g.Rectangle(g.Point(0, 1), g.Point(2, 3))
+    self.assertEquals(str(r), '[(0, 1), (2, 3)]')
+
+  def testCenter(self):
+    r = g.Rectangle(g.Point(0, 1), g.Point(2, 3))
+    c = g.Point(1, 2)
+    self.assertEquals(r.center, c)
+
+  def testFromJson(self):
+    r1 = g.Rectangle(g.Point(0, 1), g.Point(2, 3))
+    r2 = g.Rectangle.FromDict({'top': 1, 'left': 0, 'bottom': 3, 'right': 2})
+    self.assertEquals(r1, r2)
diff --git a/catapult/devil/devil/utils/host_utils.py b/catapult/devil/devil/utils/host_utils.py
new file mode 100644
index 0000000..580721f
--- /dev/null
+++ b/catapult/devil/devil/utils/host_utils.py
@@ -0,0 +1,16 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+
+def GetRecursiveDiskUsage(path):
+  """Returns the disk usage in bytes of |path|. Similar to `du -sb |path|`."""
+  running_size = os.path.getsize(path)
+  if os.path.isdir(path):
+    for root, dirs, files in os.walk(path):
+      running_size += sum([os.path.getsize(os.path.join(root, f))
+                           for f in files + dirs])
+  return running_size
+
diff --git a/catapult/devil/devil/utils/lazy/__init__.py b/catapult/devil/devil/utils/lazy/__init__.py
new file mode 100644
index 0000000..3cc56c0
--- /dev/null
+++ b/catapult/devil/devil/utils/lazy/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.utils.lazy.weak_constant import WeakConstant
diff --git a/catapult/devil/devil/utils/lazy/weak_constant.py b/catapult/devil/devil/utils/lazy/weak_constant.py
new file mode 100644
index 0000000..3558f29
--- /dev/null
+++ b/catapult/devil/devil/utils/lazy/weak_constant.py
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import threading
+
+
+class WeakConstant(object):
+  """A thread-safe, lazily initialized object.
+
+  This does not support modification after initialization. The intended
+  constant nature of the object is not enforced, though, hence the "weak".
+  """
+
+  def __init__(self, initializer):
+    self._initialized = False
+    self._initializer = initializer
+    self._lock = threading.Lock()
+    self._val = None
+
+  def read(self):
+    """Get the object, creating it if necessary."""
+    if self._initialized:
+      return self._val
+    with self._lock:
+      if not self._initialized:
+        self._val = self._initializer()
+        self._initialized = True
+    return self._val
diff --git a/catapult/devil/devil/utils/lsusb.py b/catapult/devil/devil/utils/lsusb.py
new file mode 100644
index 0000000..d6306df
--- /dev/null
+++ b/catapult/devil/devil/utils/lsusb.py
@@ -0,0 +1,109 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import re
+
+from devil.utils import cmd_helper
+
+_COULDNT_OPEN_ERROR_RE = re.compile(r'Couldn\'t open device.*')
+_INDENTATION_RE = re.compile(r'^( *)')
+_LSUSB_BUS_DEVICE_RE = re.compile(r'^Bus (\d{3}) Device (\d{3}): (.*)')
+_LSUSB_ENTRY_RE = re.compile(r'^ *([^ ]+) +([^ ]+) *([^ ].*)?$')
+_LSUSB_GROUP_RE = re.compile(r'^ *([^ ]+.*):$')
+
+
+def _lsusbv_on_device(bus_id, dev_id):
+  """Calls lsusb -v on device."""
+  _, raw_output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
+      ['lsusb', '-v', '-s', '%s:%s' % (bus_id, dev_id)], timeout=10)
+
+  device = {'bus': bus_id, 'device': dev_id}
+  depth_stack = [device]
+
+  # TODO(jbudorick): Add documentation for parsing.
+  for line in raw_output.splitlines():
+    # Ignore blank lines.
+    if not line:
+      continue
+    # Filter out error mesage about opening device.
+    if _COULDNT_OPEN_ERROR_RE.match(line):
+      continue
+    # Find start of device information.
+    m = _LSUSB_BUS_DEVICE_RE.match(line)
+    if m:
+      if m.group(1) != bus_id:
+        logging.warning(
+            'Expected bus_id value: %r, seen %r', bus_id, m.group(1))
+      if m.group(2) != dev_id:
+        logging.warning(
+            'Expected dev_id value: %r, seen %r', dev_id, m.group(2))
+      device['desc'] = m.group(3)
+      continue
+
+    indent_match = _INDENTATION_RE.match(line)
+    if not indent_match:
+      continue
+
+    depth = 1 + len(indent_match.group(1)) / 2
+    if depth > len(depth_stack):
+      logging.error(
+          'lsusb parsing error: unexpected indentation: "%s"', line)
+      continue
+
+    while depth < len(depth_stack):
+      depth_stack.pop()
+
+    cur = depth_stack[-1]
+
+    m = _LSUSB_GROUP_RE.match(line)
+    if m:
+      new_group = {}
+      cur[m.group(1)] = new_group
+      depth_stack.append(new_group)
+      continue
+
+    m = _LSUSB_ENTRY_RE.match(line)
+    if m:
+      new_entry = {
+        '_value': m.group(2),
+        '_desc': m.group(3),
+      }
+      cur[m.group(1)] = new_entry
+      depth_stack.append(new_entry)
+      continue
+
+    logging.error('lsusb parsing error: unrecognized line: "%s"', line)
+
+  return device
+
+def lsusb():
+  """Call lsusb and return the parsed output."""
+  _, lsusb_list_output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
+      ['lsusb'], timeout=10)
+  devices = []
+  for line in lsusb_list_output.splitlines():
+    m = _LSUSB_BUS_DEVICE_RE.match(line)
+    if m:
+      bus_num = m.group(1)
+      dev_num = m.group(2)
+      try:
+        devices.append(_lsusbv_on_device(bus_num, dev_num))
+      except cmd_helper.TimeoutError:
+        # Will be blacklisted if it is in expected device file, but times out.
+        logging.info('lsusb -v %s:%s timed out.', bus_num, dev_num)
+  return devices
+
+def raw_lsusb():
+  return cmd_helper.GetCmdOutput(['lsusb'])
+
+def get_lsusb_serial(device):
+  try:
+    return device['Device Descriptor']['iSerial']['_desc']
+  except KeyError:
+    return None
+
+def get_android_devices():
+  return [serial for serial in (get_lsusb_serial(d) for d in lsusb())
+          if serial]
diff --git a/catapult/devil/devil/utils/lsusb_test.py b/catapult/devil/devil/utils/lsusb_test.py
new file mode 100755
index 0000000..f381e72
--- /dev/null
+++ b/catapult/devil/devil/utils/lsusb_test.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for the cmd_helper module."""
+
+import unittest
+
+from devil import devil_env
+from devil.utils import lsusb
+from devil.utils import mock_calls
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock # pylint: disable=import-error
+
+RAW_OUTPUT = """
+Bus 003 Device 007: ID 18d1:4ee2 Google Inc. Nexus 4 (debug)
+Device Descriptor:
+  bLength                18
+  bDescriptorType         1
+  bcdUSB               2.00
+  bDeviceClass            0 (Defined at Interface level)
+  bDeviceSubClass         0
+  bDeviceProtocol         0
+  bMaxPacketSize0        64
+  idVendor           0x18d1 Google Inc.
+  idProduct          0x4ee2 Nexus 4 (debug)
+  bcdDevice            2.28
+  iManufacturer           1 LGE
+  iProduct                2 Nexus 4
+  iSerial                 3 01d2450ea194a93b
+  bNumConfigurations      1
+  Configuration Descriptor:
+    bLength                 9
+    bDescriptorType         2
+    wTotalLength           62
+    bNumInterfaces          2
+    bConfigurationValue     1
+    iConfiguration          0
+    bmAttributes         0x80
+      (Bus Powered)
+    MaxPower              500mA
+    Interface Descriptor:
+      bLength                 9
+      bDescriptorType         4
+      bInterfaceNumber        0
+      bAlternateSetting       0
+      bNumEndpoints           3
+      bInterfaceClass       255 Vendor Specific Class
+      bInterfaceSubClass    255 Vendor Specific Subclass
+      bInterfaceProtocol      0
+      iInterface              4 MTP
+      Endpoint Descriptor:
+        bLength                 7
+        bDescriptorType         5
+        bEndpointAddress     0x81  EP 1 IN
+        bmAttributes            2
+          Transfer Type            Bulk
+          Synch Type               None
+          Usage Type               Data
+        wMaxPacketSize     0x0040  1x 64 bytes
+        bInterval               0
+      Endpoint Descriptor:
+        bLength                 7
+        bDescriptorType         5
+        bEndpointAddress     0x01  EP 1 OUT
+        bmAttributes            2
+          Transfer Type            Bulk
+          Synch Type               None
+          Usage Type               Data
+        wMaxPacketSize     0x0040  1x 64 bytes
+        bInterval               0
+      Endpoint Descriptor:
+        bLength                 7
+        bDescriptorType         5
+        bEndpointAddress     0x82  EP 2 IN
+        bmAttributes            3
+          Transfer Type            Interrupt
+          Synch Type               None
+          Usage Type               Data
+        wMaxPacketSize     0x001c  1x 28 bytes
+        bInterval               6
+    Interface Descriptor:
+      bLength                 9
+      bDescriptorType         4
+      bInterfaceNumber        1
+      bAlternateSetting       0
+      bNumEndpoints           2
+      bInterfaceClass       255 Vendor Specific Class
+      bInterfaceSubClass     66
+      bInterfaceProtocol      1
+      iInterface              0
+      Endpoint Descriptor:
+        bLength                 7
+        bDescriptorType         5
+        bEndpointAddress     0x83  EP 3 IN
+        bmAttributes            2
+          Transfer Type            Bulk
+          Synch Type               None
+          Usage Type               Data
+        wMaxPacketSize     0x0040  1x 64 bytes
+        bInterval               0
+      Endpoint Descriptor:
+        bLength                 7
+        bDescriptorType         5
+        bEndpointAddress     0x02  EP 2 OUT
+        bmAttributes            2
+          Transfer Type            Bulk
+          Synch Type               None
+          Usage Type               Data
+        wMaxPacketSize     0x0040  1x 64 bytes
+        bInterval               0
+Device Qualifier (for other device speed):
+  bLength                10
+  bDescriptorType         6
+  bcdUSB               2.00
+  bDeviceClass            0 (Defined at Interface level)
+  bDeviceSubClass         0
+  bDeviceProtocol         0
+  bMaxPacketSize0        64
+  bNumConfigurations      1
+Device Status:     0x0000
+  (Bus Powered)
+"""
+DEVICE_LIST = 'Bus 003 Device 007: ID 18d1:4ee2 Google Inc. Nexus 4 (debug)'
+
+EXPECTED_RESULT = {
+    'device': '007',
+    'bus': '003',
+    'desc': 'ID 18d1:4ee2 Google Inc. Nexus 4 (debug)',
+    'Device': {
+        '_value': 'Status:',
+        '_desc': '0x0000',
+        '(Bus': {
+            '_value': 'Powered)',
+            '_desc': None
+        }
+    },
+    'Device Descriptor': {
+        'bLength': {'_value': '18', '_desc': None},
+        'bcdDevice': {'_value': '2.28', '_desc': None},
+        'bDeviceSubClass': {'_value': '0', '_desc': None},
+        'idVendor': {'_value': '0x18d1', '_desc': 'Google Inc.'},
+        'bcdUSB': {'_value': '2.00', '_desc': None},
+        'bDeviceProtocol': {'_value': '0', '_desc': None},
+        'bDescriptorType': {'_value': '1', '_desc': None},
+        'Configuration Descriptor': {
+            'bLength': {'_value': '9', '_desc': None},
+            'wTotalLength': {'_value': '62', '_desc': None},
+            'bConfigurationValue': {'_value': '1', '_desc': None},
+            'Interface Descriptor': {
+                'bLength': {'_value': '9', '_desc': None},
+                'bAlternateSetting': {'_value': '0', '_desc': None},
+                'bInterfaceNumber': {'_value': '1', '_desc': None},
+                'bNumEndpoints': {'_value': '2', '_desc': None},
+                'bDescriptorType': {'_value': '4', '_desc': None},
+                'bInterfaceSubClass': {'_value': '66', '_desc': None},
+                'bInterfaceClass': {
+                    '_value': '255',
+                    '_desc': 'Vendor Specific Class'
+                },
+                'bInterfaceProtocol': {'_value': '1', '_desc': None},
+                'Endpoint Descriptor': {
+                    'bLength': {'_value': '7', '_desc': None},
+                    'bEndpointAddress': {'_value': '0x02', '_desc': 'EP 2 OUT'},
+                    'bInterval': {'_value': '0', '_desc': None},
+                    'bDescriptorType': {'_value': '5', '_desc': None},
+                    'bmAttributes': {
+                        '_value': '2',
+                        'Transfer': {'_value': 'Type', '_desc': 'Bulk'},
+                        'Usage': {'_value': 'Type', '_desc': 'Data'},
+                        '_desc': None,
+                        'Synch': {'_value': 'Type', '_desc': 'None'}
+                    },
+                    'wMaxPacketSize': {
+                        '_value': '0x0040',
+                        '_desc': '1x 64 bytes'
+                    }
+                },
+                'iInterface': {'_value': '0', '_desc': None}
+            },
+            'bDescriptorType': {'_value': '2', '_desc': None},
+            'iConfiguration': {'_value': '0', '_desc': None},
+            'bmAttributes': {
+                '_value': '0x80',
+                '_desc': None,
+                '(Bus': {'_value': 'Powered)', '_desc': None}
+            },
+            'bNumInterfaces': {'_value': '2', '_desc': None},
+            'MaxPower': {'_value': '500mA', '_desc': None}
+        },
+        'iSerial': {'_value': '3', '_desc': '01d2450ea194a93b'},
+        'idProduct': {'_value': '0x4ee2', '_desc': 'Nexus 4 (debug)'},
+        'iManufacturer': {'_value': '1', '_desc': 'LGE'},
+        'bDeviceClass': {
+            '_value': '0',
+            '_desc': '(Defined at Interface level)'
+        },
+        'iProduct': {'_value': '2', '_desc': 'Nexus 4'},
+        'bMaxPacketSize0': {'_value': '64', '_desc': None},
+        'bNumConfigurations': {'_value': '1', '_desc': None}
+    },
+    'Device Qualifier (for other device speed)': {
+        'bLength': {'_value': '10', '_desc': None},
+        'bNumConfigurations': {'_value': '1', '_desc': None},
+        'bDeviceSubClass': {'_value': '0', '_desc': None},
+        'bcdUSB': {'_value': '2.00', '_desc': None},
+        'bDeviceProtocol': {'_value': '0', '_desc': None},
+        'bDescriptorType': {'_value': '6', '_desc': None},
+        'bDeviceClass': {
+            '_value': '0',
+            '_desc': '(Defined at Interface level)'
+        },
+        'bMaxPacketSize0': {'_value': '64', '_desc': None}
+    }
+}
+
+
+class LsusbTest(mock_calls.TestCase):
+  """Test Lsusb parsing."""
+
+  def testLsusb(self):
+    with self.assertCalls(
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+            ['lsusb'], timeout=10), (None, DEVICE_LIST)),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+          ['lsusb', '-v', '-s', '003:007'], timeout=10), (None, RAW_OUTPUT))):
+      self.assertDictEqual(lsusb.lsusb().pop(), EXPECTED_RESULT)
+
+  def testGetSerial(self):
+    with self.assertCalls(
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+            ['lsusb'], timeout=10), (None, DEVICE_LIST)),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+          ['lsusb', '-v', '-s', '003:007'], timeout=10), (None, RAW_OUTPUT))):
+      self.assertEqual(lsusb.get_android_devices(), ['01d2450ea194a93b'])
+
+  def testGetLsusbSerial(self):
+    with self.assertCalls(
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+            ['lsusb'], timeout=10), (None, DEVICE_LIST)),
+        (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout(
+          ['lsusb', '-v', '-s', '003:007'], timeout=10), (None, RAW_OUTPUT))):
+      out = lsusb.lsusb().pop()
+      self.assertEqual(lsusb.get_lsusb_serial(out), '01d2450ea194a93b')
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/utils/mock_calls.py b/catapult/devil/devil/utils/mock_calls.py
new file mode 100644
index 0000000..5ae951e
--- /dev/null
+++ b/catapult/devil/devil/utils/mock_calls.py
@@ -0,0 +1,180 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A test facility to assert call sequences while mocking their behavior.
+"""
+
+import unittest
+
+from devil import devil_env
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+class TestCase(unittest.TestCase):
+  """Adds assertCalls to TestCase objects."""
+  class _AssertCalls(object):
+
+    def __init__(self, test_case, expected_calls, watched):
+      def call_action(pair):
+        if isinstance(pair, type(mock.call)):
+          return (pair, None)
+        else:
+          return pair
+
+      def do_check(call):
+        def side_effect(*args, **kwargs):
+          received_call = call(*args, **kwargs)
+          self._test_case.assertTrue(
+              self._expected_calls,
+              msg=('Unexpected call: %s' % str(received_call)))
+          expected_call, action = self._expected_calls.pop(0)
+          self._test_case.assertTrue(
+              received_call == expected_call,
+              msg=('Expected call mismatch:\n'
+                   '  expected: %s\n'
+                   '  received: %s\n'
+                   % (str(expected_call), str(received_call))))
+          if callable(action):
+            return action(*args, **kwargs)
+          else:
+            return action
+        return side_effect
+
+      self._test_case = test_case
+      self._expected_calls = [call_action(pair) for pair in expected_calls]
+      watched = watched.copy()  # do not pollute the caller's dict
+      watched.update((call.parent.name, call.parent)
+                     for call, _ in self._expected_calls)
+      self._patched = [test_case.patch_call(call, side_effect=do_check(call))
+                       for call in watched.itervalues()]
+
+    def __enter__(self):
+      for patch in self._patched:
+        patch.__enter__()
+      return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+      for patch in self._patched:
+        patch.__exit__(exc_type, exc_val, exc_tb)
+      if exc_type is None:
+        missing = ''.join('  expected: %s\n' % str(call)
+                          for call, _ in self._expected_calls)
+        self._test_case.assertFalse(
+            missing,
+            msg='Expected calls not found:\n' + missing)
+
+  def __init__(self, *args, **kwargs):
+    super(TestCase, self).__init__(*args, **kwargs)
+    self.call = mock.call.self
+    self._watched = {}
+
+  def call_target(self, call):
+    """Resolve a self.call instance to the target it represents.
+
+    Args:
+      call: a self.call instance, e.g. self.call.adb.Shell
+
+    Returns:
+      The target object represented by the call, e.g. self.adb.Shell
+
+    Raises:
+      ValueError if the path of the call does not start with "self", i.e. the
+          target of the call is external to the self object.
+      AttributeError if the path of the call does not specify a valid
+          chain of attributes (without any calls) starting from "self".
+    """
+    path = call.name.split('.')
+    if path.pop(0) != 'self':
+      raise ValueError("Target %r outside of 'self' object" % call.name)
+    target = self
+    for attr in path:
+      target = getattr(target, attr)
+    return target
+
+  def patch_call(self, call, **kwargs):
+    """Patch the target of a mock.call instance.
+
+    Args:
+      call: a mock.call instance identifying a target to patch
+      Extra keyword arguments are processed by mock.patch
+
+    Returns:
+      A context manager to mock/unmock the target of the call
+    """
+    if call.name.startswith('self.'):
+      target = self.call_target(call.parent)
+      _, attribute = call.name.rsplit('.', 1)
+      if (hasattr(type(target), attribute)
+          and isinstance(getattr(type(target), attribute), property)):
+        return mock.patch.object(
+            type(target), attribute, new_callable=mock.PropertyMock, **kwargs)
+      else:
+        return mock.patch.object(target, attribute, **kwargs)
+    else:
+      return mock.patch(call.name, **kwargs)
+
+  def watchCalls(self, calls):
+    """Add calls to the set of watched calls.
+
+    Args:
+      calls: a sequence of mock.call instances identifying targets to watch
+    """
+    self._watched.update((call.name, call) for call in calls)
+
+  def watchMethodCalls(self, call, ignore=None):
+    """Watch all public methods of the target identified by a self.call.
+
+    Args:
+      call: a self.call instance indetifying an object
+      ignore: a list of public methods to ignore when watching for calls
+    """
+    target = self.call_target(call)
+    if ignore is None:
+      ignore = []
+    self.watchCalls(getattr(call, method)
+                    for method in dir(target.__class__)
+                    if not method.startswith('_') and not method in ignore)
+
+  def clearWatched(self):
+    """Clear the set of watched calls."""
+    self._watched = {}
+
+  def assertCalls(self, *calls):
+    """A context manager to assert that a sequence of calls is made.
+
+    During the assertion, a number of functions and methods will be "watched",
+    and any calls made to them is expected to appear---in the exact same order,
+    and with the exact same arguments---as specified by the argument |calls|.
+
+    By default, the targets of all expected calls are watched. Further targets
+    to watch may be added using watchCalls and watchMethodCalls.
+
+    Optionaly, each call may be accompanied by an action. If the action is a
+    (non-callable) value, this value will be used as the return value given to
+    the caller when the matching call is found. Alternatively, if the action is
+    a callable, the action will be then called with the same arguments as the
+    intercepted call, so that it can provide a return value or perform other
+    side effects. If the action is missing, a return value of None is assumed.
+
+    Note that mock.Mock objects are often convenient to use as a callable
+    action, e.g. to raise exceptions or return other objects which are
+    themselves callable.
+
+    Args:
+      calls: each argument is either a pair (expected_call, action) or just an
+          expected_call, where expected_call is a mock.call instance.
+
+    Raises:
+      AssertionError if the watched targets do not receive the exact sequence
+          of calls specified. Missing calls, extra calls, and calls with
+          mismatching arguments, all cause the assertion to fail.
+    """
+    return self._AssertCalls(self, calls, self._watched)
+
+  def assertCall(self, call, action=None):
+    return self.assertCalls((call, action))
+
diff --git a/catapult/devil/devil/utils/mock_calls_test.py b/catapult/devil/devil/utils/mock_calls_test.py
new file mode 100755
index 0000000..8eb4fc9
--- /dev/null
+++ b/catapult/devil/devil/utils/mock_calls_test.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit tests for the contents of mock_calls.py.
+"""
+
+import logging
+import os
+import unittest
+
+from devil import devil_env
+from devil.android.sdk import version_codes
+from devil.utils import mock_calls
+
+with devil_env.SysPath(devil_env.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+class _DummyAdb(object):
+
+  def __str__(self):
+    return '0123456789abcdef'
+
+  def Push(self, host_path, device_path):
+    logging.debug('(device %s) pushing %r to %r', self, host_path, device_path)
+
+  def IsOnline(self):
+    logging.debug('(device %s) checking device online', self)
+    return True
+
+  def Shell(self, cmd):
+    logging.debug('(device %s) running command %r', self, cmd)
+    return "nice output\n"
+
+  def Reboot(self):
+    logging.debug('(device %s) rebooted!', self)
+
+  @property
+  def build_version_sdk(self):
+    logging.debug('(device %s) getting build_version_sdk', self)
+    return version_codes.LOLLIPOP
+
+
+class TestCaseWithAssertCallsTest(mock_calls.TestCase):
+
+  def setUp(self):
+    self.adb = _DummyAdb()
+
+  def ShellError(self):
+    def action(cmd):
+      raise ValueError('(device %s) command %r is not nice' % (self.adb, cmd))
+    return action
+
+  def get_answer(self):
+    logging.debug("called 'get_answer' of %r object", self)
+    return 42
+
+  def echo(self, thing):
+    logging.debug("called 'echo' of %r object", self)
+    return thing
+
+  def testCallTarget_succeds(self):
+    self.assertEquals(self.adb.Shell,
+                      self.call_target(self.call.adb.Shell))
+
+  def testCallTarget_failsExternal(self):
+    with self.assertRaises(ValueError):
+      self.call_target(mock.call.sys.getcwd)
+
+  def testCallTarget_failsUnknownAttribute(self):
+    with self.assertRaises(AttributeError):
+      self.call_target(self.call.adb.Run)
+
+  def testCallTarget_failsIntermediateCalls(self):
+    with self.assertRaises(AttributeError):
+      self.call_target(self.call.adb.RunShell('cmd').append)
+
+  def testPatchCall_method(self):
+    self.assertEquals(42, self.get_answer())
+    with self.patch_call(self.call.get_answer, return_value=123):
+      self.assertEquals(123, self.get_answer())
+    self.assertEquals(42, self.get_answer())
+
+  def testPatchCall_attribute_method(self):
+    with self.patch_call(self.call.adb.Shell, return_value='hello'):
+      self.assertEquals('hello', self.adb.Shell('echo hello'))
+
+  def testPatchCall_global(self):
+    with self.patch_call(mock.call.os.getcwd, return_value='/some/path'):
+      self.assertEquals('/some/path', os.getcwd())
+
+  def testPatchCall_withSideEffect(self):
+    with self.patch_call(self.call.adb.Shell, side_effect=ValueError):
+      with self.assertRaises(ValueError):
+        self.adb.Shell('echo hello')
+
+  def testPatchCall_property(self):
+    self.assertEquals(version_codes.LOLLIPOP, self.adb.build_version_sdk)
+    with self.patch_call(
+        self.call.adb.build_version_sdk,
+        return_value=version_codes.KITKAT):
+      self.assertEquals(version_codes.KITKAT, self.adb.build_version_sdk)
+    self.assertEquals(version_codes.LOLLIPOP, self.adb.build_version_sdk)
+
+  def testAssertCalls_succeeds_simple(self):
+    self.assertEquals(42, self.get_answer())
+    with self.assertCall(self.call.get_answer(), 123):
+      self.assertEquals(123, self.get_answer())
+    self.assertEquals(42, self.get_answer())
+
+  def testAssertCalls_succeeds_multiple(self):
+    with self.assertCalls(
+        (mock.call.os.getcwd(), '/some/path'),
+        (self.call.echo('hello'), 'hello'),
+        (self.call.get_answer(), 11),
+        self.call.adb.Push('this_file', 'that_file'),
+        (self.call.get_answer(), 12)):
+      self.assertEquals(os.getcwd(), '/some/path')
+      self.assertEquals('hello', self.echo('hello'))
+      self.assertEquals(11, self.get_answer())
+      self.adb.Push('this_file', 'that_file')
+      self.assertEquals(12, self.get_answer())
+
+  def testAsserCalls_succeeds_withAction(self):
+    with self.assertCall(
+        self.call.adb.Shell('echo hello'), self.ShellError()):
+      with self.assertRaises(ValueError):
+        self.adb.Shell('echo hello')
+
+  def testAssertCalls_fails_tooManyCalls(self):
+    with self.assertRaises(AssertionError):
+      with self.assertCalls(self.call.adb.IsOnline()):
+        self.adb.IsOnline()
+        self.adb.IsOnline()
+
+  def testAssertCalls_fails_tooFewCalls(self):
+    with self.assertRaises(AssertionError):
+      with self.assertCalls(self.call.adb.IsOnline()):
+        pass
+
+  def testAssertCalls_succeeds_extraCalls(self):
+    # we are not watching Reboot, so the assertion succeeds
+    with self.assertCalls(self.call.adb.IsOnline()):
+      self.adb.IsOnline()
+      self.adb.Reboot()
+
+  def testAssertCalls_fails_extraCalls(self):
+    self.watchCalls([self.call.adb.Reboot])
+    # this time we are also watching Reboot, so the assertion fails
+    with self.assertRaises(AssertionError):
+      with self.assertCalls(self.call.adb.IsOnline()):
+        self.adb.IsOnline()
+        self.adb.Reboot()
+
+  def testAssertCalls_succeeds_NoCalls(self):
+    self.watchMethodCalls(self.call.adb)  # we are watching all adb methods
+    with self.assertCalls():
+      pass
+
+  def testAssertCalls_fails_NoCalls(self):
+    self.watchMethodCalls(self.call.adb)
+    with self.assertRaises(AssertionError):
+      with self.assertCalls():
+        self.adb.IsOnline()
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
+
diff --git a/catapult/devil/devil/utils/parallelizer.py b/catapult/devil/devil/utils/parallelizer.py
new file mode 100644
index 0000000..b8a2824
--- /dev/null
+++ b/catapult/devil/devil/utils/parallelizer.py
@@ -0,0 +1,242 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Wrapper that allows method execution in parallel.
+
+This class wraps a list of objects of the same type, emulates their
+interface, and executes any functions called on the objects in parallel
+in ReraiserThreads.
+
+This means that, given a list of objects:
+
+  class Foo:
+    def __init__(self):
+      self.baz = Baz()
+
+    def bar(self, my_param):
+      // do something
+
+  list_of_foos = [Foo(1), Foo(2), Foo(3)]
+
+we can take a sequential operation on that list of objects:
+
+  for f in list_of_foos:
+    f.bar('Hello')
+
+and run it in parallel across all of the objects:
+
+  Parallelizer(list_of_foos).bar('Hello')
+
+It can also handle (non-method) attributes of objects, so that this:
+
+  for f in list_of_foos:
+    f.baz.myBazMethod()
+
+can be run in parallel with:
+
+  Parallelizer(list_of_foos).baz.myBazMethod()
+
+Because it emulates the interface of the wrapped objects, a Parallelizer
+can be passed to a method or function that takes objects of that type:
+
+  def DoesSomethingWithFoo(the_foo):
+    the_foo.bar('Hello')
+    the_foo.bar('world')
+    the_foo.baz.myBazMethod
+
+  DoesSomethingWithFoo(Parallelizer(list_of_foos))
+
+Note that this class spins up a thread for each object. Using this class
+to parallelize operations that are already fast will incur a net performance
+penalty.
+
+"""
+# pylint: disable=protected-access
+
+from devil.utils import reraiser_thread
+from devil.utils import watchdog_timer
+
+_DEFAULT_TIMEOUT = 30
+_DEFAULT_RETRIES = 3
+
+
+class Parallelizer(object):
+  """Allows parallel execution of method calls across a group of objects."""
+
+  def __init__(self, objs):
+    assert (objs is not None and len(objs) > 0), (
+        "Passed empty list to 'Parallelizer'")
+    self._orig_objs = objs
+    self._objs = objs
+
+  def __getattr__(self, name):
+    """Emulate getting the |name| attribute of |self|.
+
+    Args:
+      name: The name of the attribute to retrieve.
+    Returns:
+      A Parallelizer emulating the |name| attribute of |self|.
+    """
+    self.pGet(None)
+
+    r = type(self)(self._orig_objs)
+    r._objs = [getattr(o, name) for o in self._objs]
+    return r
+
+  def __getitem__(self, index):
+    """Emulate getting the value of |self| at |index|.
+
+    Returns:
+      A Parallelizer emulating the value of |self| at |index|.
+    """
+    self.pGet(None)
+
+    r = type(self)(self._orig_objs)
+    r._objs = [o[index] for o in self._objs]
+    return r
+
+  def __call__(self, *args, **kwargs):
+    """Emulate calling |self| with |args| and |kwargs|.
+
+    Note that this call is asynchronous. Call pFinish on the return value to
+    block until the call finishes.
+
+    Returns:
+      A Parallelizer wrapping the ReraiserThreadGroup running the call in
+      parallel.
+    Raises:
+      AttributeError if the wrapped objects aren't callable.
+    """
+    self.pGet(None)
+
+    if not self._objs:
+      raise AttributeError('Nothing to call.')
+    for o in self._objs:
+      if not callable(o):
+        raise AttributeError("'%s' is not callable" % o.__name__)
+
+    r = type(self)(self._orig_objs)
+    r._objs = reraiser_thread.ReraiserThreadGroup(
+        [reraiser_thread.ReraiserThread(
+            o, args=args, kwargs=kwargs,
+            name='%s.%s' % (str(d), o.__name__))
+         for d, o in zip(self._orig_objs, self._objs)])
+    r._objs.StartAll()  # pylint: disable=W0212
+    return r
+
+  def pFinish(self, timeout):
+    """Finish any outstanding asynchronous operations.
+
+    Args:
+      timeout: The maximum number of seconds to wait for an individual
+               result to return, or None to wait forever.
+    Returns:
+      self, now emulating the return values.
+    """
+    self._assertNoShadow('pFinish')
+    if isinstance(self._objs, reraiser_thread.ReraiserThreadGroup):
+      self._objs.JoinAll()
+      self._objs = self._objs.GetAllReturnValues(
+          watchdog_timer.WatchdogTimer(timeout))
+    return self
+
+  def pGet(self, timeout):
+    """Get the current wrapped objects.
+
+    Args:
+      timeout: Same as |pFinish|.
+    Returns:
+      A list of the results, in order of the provided devices.
+    Raises:
+      Any exception raised by any of the called functions.
+    """
+    self._assertNoShadow('pGet')
+    self.pFinish(timeout)
+    return self._objs
+
+  def pMap(self, f, *args, **kwargs):
+    """Map a function across the current wrapped objects in parallel.
+
+    This calls f(o, *args, **kwargs) for each o in the set of wrapped objects.
+
+    Note that this call is asynchronous. Call pFinish on the return value to
+    block until the call finishes.
+
+    Args:
+      f: The function to call.
+      args: The positional args to pass to f.
+      kwargs: The keyword args to pass to f.
+    Returns:
+      A Parallelizer wrapping the ReraiserThreadGroup running the map in
+      parallel.
+    """
+    self._assertNoShadow('pMap')
+    r = type(self)(self._orig_objs)
+    r._objs = reraiser_thread.ReraiserThreadGroup(
+        [reraiser_thread.ReraiserThread(
+            f, args=tuple([o] + list(args)), kwargs=kwargs,
+            name='%s(%s)' % (f.__name__, d))
+         for d, o in zip(self._orig_objs, self._objs)])
+    r._objs.StartAll()  # pylint: disable=W0212
+    return r
+
+  def _assertNoShadow(self, attr_name):
+    """Ensures that |attr_name| isn't shadowing part of the wrapped obejcts.
+
+    If the wrapped objects _do_ have an |attr_name| attribute, it will be
+    inaccessible to clients.
+
+    Args:
+      attr_name: The attribute to check.
+    Raises:
+      AssertionError if the wrapped objects have an attribute named 'attr_name'
+      or '_assertNoShadow'.
+    """
+    if isinstance(self._objs, reraiser_thread.ReraiserThreadGroup):
+      assert not hasattr(self._objs, '_assertNoShadow')
+      assert not hasattr(self._objs, attr_name)
+    else:
+      assert not any(hasattr(o, '_assertNoShadow') for o in self._objs)
+      assert not any(hasattr(o, attr_name) for o in self._objs)
+
+
+class SyncParallelizer(Parallelizer):
+  """A Parallelizer that blocks on function calls."""
+
+  # override
+  def __call__(self, *args, **kwargs):
+    """Emulate calling |self| with |args| and |kwargs|.
+
+    Note that this call is synchronous.
+
+    Returns:
+      A Parallelizer emulating the value returned from calling |self| with
+      |args| and |kwargs|.
+    Raises:
+      AttributeError if the wrapped objects aren't callable.
+    """
+    r = super(SyncParallelizer, self).__call__(*args, **kwargs)
+    r.pFinish(None)
+    return r
+
+  # override
+  def pMap(self, f, *args, **kwargs):
+    """Map a function across the current wrapped objects in parallel.
+
+    This calls f(o, *args, **kwargs) for each o in the set of wrapped objects.
+
+    Note that this call is synchronous.
+
+    Args:
+      f: The function to call.
+      args: The positional args to pass to f.
+      kwargs: The keyword args to pass to f.
+    Returns:
+      A Parallelizer wrapping the ReraiserThreadGroup running the map in
+      parallel.
+    """
+    r = super(SyncParallelizer, self).pMap(f, *args, **kwargs)
+    r.pFinish(None)
+    return r
+
diff --git a/catapult/devil/devil/utils/parallelizer_test.py b/catapult/devil/devil/utils/parallelizer_test.py
new file mode 100644
index 0000000..3162a4f
--- /dev/null
+++ b/catapult/devil/devil/utils/parallelizer_test.py
@@ -0,0 +1,166 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the contents of parallelizer.py."""
+
+# pylint: disable=W0212
+# pylint: disable=W0613
+
+import os
+import tempfile
+import time
+import unittest
+
+from devil.utils import parallelizer
+
+
+class ParallelizerTestObject(object):
+  """Class used to test parallelizer.Parallelizer."""
+
+  parallel = parallelizer.Parallelizer
+
+  def __init__(self, thing, completion_file_name=None):
+    self._thing = thing
+    self._completion_file_name = completion_file_name
+    self.helper = ParallelizerTestObjectHelper(thing)
+
+  @staticmethod
+  def doReturn(what):
+    return what
+
+  @classmethod
+  def doRaise(cls, what):
+    raise what
+
+  def doSetTheThing(self, new_thing):
+    self._thing = new_thing
+
+  def doReturnTheThing(self):
+    return self._thing
+
+  def doRaiseTheThing(self):
+    raise self._thing
+
+  def doRaiseIfExceptionElseSleepFor(self, sleep_duration):
+    if isinstance(self._thing, Exception):
+      raise self._thing
+    time.sleep(sleep_duration)
+    self._write_completion_file()
+    return self._thing
+
+  def _write_completion_file(self):
+    if self._completion_file_name and len(self._completion_file_name):
+      with open(self._completion_file_name, 'w+b') as completion_file:
+        completion_file.write('complete')
+
+  def __getitem__(self, index):
+    return self._thing[index]
+
+  def __str__(self):
+    return type(self).__name__
+
+
+class ParallelizerTestObjectHelper(object):
+
+  def __init__(self, thing):
+    self._thing = thing
+
+  def doReturnStringThing(self):
+    return str(self._thing)
+
+
+class ParallelizerTest(unittest.TestCase):
+
+  def testInitWithNone(self):
+    with self.assertRaises(AssertionError):
+      parallelizer.Parallelizer(None)
+
+  def testInitEmptyList(self):
+    with self.assertRaises(AssertionError):
+      parallelizer.Parallelizer([])
+
+  def testMethodCall(self):
+    test_data = ['abc_foo', 'def_foo', 'ghi_foo']
+    expected = ['abc_bar', 'def_bar', 'ghi_bar']
+    r = parallelizer.Parallelizer(test_data).replace('_foo', '_bar').pGet(0.1)
+    self.assertEquals(expected, r)
+
+  def testMutate(self):
+    devices = [ParallelizerTestObject(True) for _ in xrange(0, 10)]
+    self.assertTrue(all(d.doReturnTheThing() for d in devices))
+    ParallelizerTestObject.parallel(devices).doSetTheThing(False).pFinish(1)
+    self.assertTrue(not any(d.doReturnTheThing() for d in devices))
+
+  def testAllReturn(self):
+    devices = [ParallelizerTestObject(True) for _ in xrange(0, 10)]
+    results = ParallelizerTestObject.parallel(
+        devices).doReturnTheThing().pGet(1)
+    self.assertTrue(isinstance(results, list))
+    self.assertEquals(10, len(results))
+    self.assertTrue(all(results))
+
+  def testAllRaise(self):
+    devices = [ParallelizerTestObject(Exception('thing %d' % i))
+               for i in xrange(0, 10)]
+    p = ParallelizerTestObject.parallel(devices).doRaiseTheThing()
+    with self.assertRaises(Exception):
+      p.pGet(1)
+
+  def testOneFailOthersComplete(self):
+    parallel_device_count = 10
+    exception_index = 7
+    exception_msg = 'thing %d' % exception_index
+
+    try:
+      completion_files = [tempfile.NamedTemporaryFile(delete=False)
+                          for _ in xrange(0, parallel_device_count)]
+      devices = [
+          ParallelizerTestObject(
+              i if i != exception_index else Exception(exception_msg),
+              completion_files[i].name)
+          for i in xrange(0, parallel_device_count)]
+      for f in completion_files:
+        f.close()
+      p = ParallelizerTestObject.parallel(devices)
+      with self.assertRaises(Exception) as e:
+        p.doRaiseIfExceptionElseSleepFor(2).pGet(3)
+      self.assertTrue(exception_msg in str(e.exception))
+      for i in xrange(0, parallel_device_count):
+        with open(completion_files[i].name) as f:
+          if i == exception_index:
+            self.assertEquals('', f.read())
+          else:
+            self.assertEquals('complete', f.read())
+    finally:
+      for f in completion_files:
+        os.remove(f.name)
+
+  def testReusable(self):
+    devices = [ParallelizerTestObject(True) for _ in xrange(0, 10)]
+    p = ParallelizerTestObject.parallel(devices)
+    results = p.doReturn(True).pGet(1)
+    self.assertTrue(all(results))
+    results = p.doReturn(True).pGet(1)
+    self.assertTrue(all(results))
+    with self.assertRaises(Exception):
+      results = p.doRaise(Exception('reusableTest')).pGet(1)
+
+  def testContained(self):
+    devices = [ParallelizerTestObject(i) for i in xrange(0, 10)]
+    results = (ParallelizerTestObject.parallel(devices).helper
+        .doReturnStringThing().pGet(1))
+    self.assertTrue(isinstance(results, list))
+    self.assertEquals(10, len(results))
+    for i in xrange(0, 10):
+      self.assertEquals(str(i), results[i])
+
+  def testGetItem(self):
+    devices = [ParallelizerTestObject(range(i, i + 10)) for i in xrange(0, 10)]
+    results = ParallelizerTestObject.parallel(devices)[9].pGet(1)
+    self.assertEquals(range(9, 19), results)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/catapult/devil/devil/utils/reraiser_thread.py b/catapult/devil/devil/utils/reraiser_thread.py
new file mode 100644
index 0000000..56d95f3
--- /dev/null
+++ b/catapult/devil/devil/utils/reraiser_thread.py
@@ -0,0 +1,228 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Thread and ThreadGroup that reraise exceptions on the main thread."""
+# pylint: disable=W0212
+
+import logging
+import sys
+import threading
+import time
+import traceback
+
+from devil.utils import watchdog_timer
+
+
+class TimeoutError(Exception):
+  """Module-specific timeout exception."""
+  pass
+
+
+def LogThreadStack(thread, error_log_func=logging.critical):
+  """Log the stack for the given thread.
+
+  Args:
+    thread: a threading.Thread instance.
+    error_log_func: Logging function when logging errors.
+  """
+  stack = sys._current_frames()[thread.ident]
+  error_log_func('*' * 80)
+  error_log_func('Stack dump for thread %r', thread.name)
+  error_log_func('*' * 80)
+  for filename, lineno, name, line in traceback.extract_stack(stack):
+    error_log_func('File: "%s", line %d, in %s', filename, lineno, name)
+    if line:
+      error_log_func('  %s', line.strip())
+  error_log_func('*' * 80)
+
+
+class ReraiserThread(threading.Thread):
+  """Thread class that can reraise exceptions."""
+
+  def __init__(self, func, args=None, kwargs=None, name=None):
+    """Initialize thread.
+
+    Args:
+      func: callable to call on a new thread.
+      args: list of positional arguments for callable, defaults to empty.
+      kwargs: dictionary of keyword arguments for callable, defaults to empty.
+      name: thread name, defaults to Thread-N.
+    """
+    if not name and func.__name__ != '<lambda>':
+      name = func.__name__
+    super(ReraiserThread, self).__init__(name=name)
+    if not args:
+      args = []
+    if not kwargs:
+      kwargs = {}
+    self.daemon = True
+    self._func = func
+    self._args = args
+    self._kwargs = kwargs
+    self._ret = None
+    self._exc_info = None
+    self._thread_group = None
+
+  def ReraiseIfException(self):
+    """Reraise exception if an exception was raised in the thread."""
+    if self._exc_info:
+      raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
+
+  def GetReturnValue(self):
+    """Reraise exception if present, otherwise get the return value."""
+    self.ReraiseIfException()
+    return self._ret
+
+  # override
+  def run(self):
+    """Overrides Thread.run() to add support for reraising exceptions."""
+    try:
+      self._ret = self._func(*self._args, **self._kwargs)
+    except:  # pylint: disable=W0702
+      self._exc_info = sys.exc_info()
+
+
+class ReraiserThreadGroup(object):
+  """A group of ReraiserThread objects."""
+
+  def __init__(self, threads=None):
+    """Initialize thread group.
+
+    Args:
+      threads: a list of ReraiserThread objects; defaults to empty.
+    """
+    self._threads = []
+    # Set when a thread from one group has called JoinAll on another. It is used
+    # to detect when a there is a TimeoutRetryThread active that links to the
+    # current thread.
+    self.blocked_parent_thread_group = None
+    if threads:
+      for thread in threads:
+        self.Add(thread)
+
+  def Add(self, thread):
+    """Add a thread to the group.
+
+    Args:
+      thread: a ReraiserThread object.
+    """
+    assert thread._thread_group is None
+    thread._thread_group = self
+    self._threads.append(thread)
+
+  def StartAll(self, will_block=False):
+    """Start all threads.
+
+    Args:
+      will_block: Whether the calling thread will subsequently block on this
+        thread group. Causes the active ReraiserThreadGroup (if there is one)
+        to be marked as blocking on this thread group.
+    """
+    if will_block:
+      # Multiple threads blocking on the same outer thread should not happen in
+      # practice.
+      assert not self.blocked_parent_thread_group
+      self.blocked_parent_thread_group = CurrentThreadGroup()
+    for thread in self._threads:
+      thread.start()
+
+  def _JoinAll(self, watcher=None, timeout=None):
+    """Join all threads without stack dumps.
+
+    Reraises exceptions raised by the child threads and supports breaking
+    immediately on exceptions raised on the main thread.
+
+    Args:
+      watcher: Watchdog object providing the thread timeout. If none is
+          provided, the thread will never be timed out.
+      timeout: An optional number of seconds to wait before timing out the join
+          operation. This will not time out the threads.
+    """
+    if watcher is None:
+      watcher = watchdog_timer.WatchdogTimer(None)
+    alive_threads = self._threads[:]
+    end_time = (time.time() + timeout) if timeout else None
+    try:
+      while alive_threads and (end_time is None or end_time > time.time()):
+        for thread in alive_threads[:]:
+          if watcher.IsTimedOut():
+            raise TimeoutError('Timed out waiting for %d of %d threads.' %
+                               (len(alive_threads), len(self._threads)))
+          # Allow the main thread to periodically check for interrupts.
+          thread.join(0.1)
+          if not thread.isAlive():
+            alive_threads.remove(thread)
+      # All threads are allowed to complete before reraising exceptions.
+      for thread in self._threads:
+        thread.ReraiseIfException()
+    finally:
+      self.blocked_parent_thread_group = None
+
+  def IsAlive(self):
+    """Check whether any of the threads are still alive.
+
+    Returns:
+      Whether any of the threads are still alive.
+    """
+    return any(t.isAlive() for t in self._threads)
+
+  def JoinAll(self, watcher=None, timeout=None,
+              error_log_func=logging.critical):
+    """Join all threads.
+
+    Reraises exceptions raised by the child threads and supports breaking
+    immediately on exceptions raised on the main thread. Unfinished threads'
+    stacks will be logged on watchdog timeout.
+
+    Args:
+      watcher: Watchdog object providing the thread timeout. If none is
+          provided, the thread will never be timed out.
+      timeout: An optional number of seconds to wait before timing out the join
+          operation. This will not time out the threads.
+      error_log_func: Logging function when logging errors.
+    """
+    try:
+      self._JoinAll(watcher, timeout)
+    except TimeoutError:
+      error_log_func('Timed out. Dumping threads.')
+      for thread in (t for t in self._threads if t.isAlive()):
+        LogThreadStack(thread, error_log_func=error_log_func)
+      raise
+
+  def GetAllReturnValues(self, watcher=None):
+    """Get all return values, joining all threads if necessary.
+
+    Args:
+      watcher: same as in |JoinAll|. Only used if threads are alive.
+    """
+    if any([t.isAlive() for t in self._threads]):
+      self.JoinAll(watcher)
+    return [t.GetReturnValue() for t in self._threads]
+
+
+def CurrentThreadGroup():
+  """Returns the ReraiserThreadGroup that owns the running thread.
+
+  Returns:
+    The current thread group, otherwise None.
+  """
+  current_thread = threading.current_thread()
+  if isinstance(current_thread, ReraiserThread):
+    return current_thread._thread_group  # pylint: disable=no-member
+  return None
+
+
+def RunAsync(funcs, watcher=None):
+  """Executes the given functions in parallel and returns their results.
+
+  Args:
+    funcs: List of functions to perform on their own threads.
+    watcher: Watchdog object providing timeout, by default waits forever.
+
+  Returns:
+    A list of return values in the order of the given functions.
+  """
+  thread_group = ReraiserThreadGroup(ReraiserThread(f) for f in funcs)
+  thread_group.StartAll(will_block=True)
+  return thread_group.GetAllReturnValues(watcher=watcher)
diff --git a/catapult/devil/devil/utils/reraiser_thread_unittest.py b/catapult/devil/devil/utils/reraiser_thread_unittest.py
new file mode 100644
index 0000000..e3c4e6b
--- /dev/null
+++ b/catapult/devil/devil/utils/reraiser_thread_unittest.py
@@ -0,0 +1,117 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittests for reraiser_thread.py."""
+
+import threading
+import unittest
+
+from devil.utils import reraiser_thread
+from devil.utils import watchdog_timer
+
+
+class TestException(Exception):
+  pass
+
+
+class TestReraiserThread(unittest.TestCase):
+  """Tests for reraiser_thread.ReraiserThread."""
+
+  def testNominal(self):
+    result = [None, None]
+
+    def f(a, b=None):
+      result[0] = a
+      result[1] = b
+
+    thread = reraiser_thread.ReraiserThread(f, [1], {'b': 2})
+    thread.start()
+    thread.join()
+    self.assertEqual(result[0], 1)
+    self.assertEqual(result[1], 2)
+
+  def testRaise(self):
+    def f():
+      raise TestException
+
+    thread = reraiser_thread.ReraiserThread(f)
+    thread.start()
+    thread.join()
+    with self.assertRaises(TestException):
+      thread.ReraiseIfException()
+
+
+class TestReraiserThreadGroup(unittest.TestCase):
+  """Tests for reraiser_thread.ReraiserThreadGroup."""
+
+  def testInit(self):
+    ran = [False] * 5
+
+    def f(i):
+      ran[i] = True
+
+    group = reraiser_thread.ReraiserThreadGroup(
+      [reraiser_thread.ReraiserThread(f, args=[i]) for i in range(5)])
+    group.StartAll()
+    group.JoinAll()
+    for v in ran:
+      self.assertTrue(v)
+
+  def testAdd(self):
+    ran = [False] * 5
+
+    def f(i):
+      ran[i] = True
+
+    group = reraiser_thread.ReraiserThreadGroup()
+    for i in xrange(5):
+      group.Add(reraiser_thread.ReraiserThread(f, args=[i]))
+    group.StartAll()
+    group.JoinAll()
+    for v in ran:
+      self.assertTrue(v)
+
+  def testJoinRaise(self):
+    def f():
+      raise TestException
+    group = reraiser_thread.ReraiserThreadGroup(
+      [reraiser_thread.ReraiserThread(f) for _ in xrange(5)])
+    group.StartAll()
+    with self.assertRaises(TestException):
+      group.JoinAll()
+
+  def testJoinTimeout(self):
+    def f():
+      pass
+    event = threading.Event()
+
+    def g():
+      event.wait()
+    group = reraiser_thread.ReraiserThreadGroup(
+        [reraiser_thread.ReraiserThread(g),
+         reraiser_thread.ReraiserThread(f)])
+    group.StartAll()
+    with self.assertRaises(reraiser_thread.TimeoutError):
+      group.JoinAll(watchdog_timer.WatchdogTimer(0.01))
+    event.set()
+
+
+class TestRunAsync(unittest.TestCase):
+  """Tests for reraiser_thread.RunAsync."""
+
+  def testNoArgs(self):
+    results = reraiser_thread.RunAsync([])
+    self.assertEqual([], results)
+
+  def testOneArg(self):
+    results = reraiser_thread.RunAsync([lambda: 1])
+    self.assertEqual([1], results)
+
+  def testTwoArgs(self):
+    a, b = reraiser_thread.RunAsync((lambda: 1, lambda: 2))
+    self.assertEqual(1, a)
+    self.assertEqual(2, b)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/utils/reset_usb.py b/catapult/devil/devil/utils/reset_usb.py
new file mode 100755
index 0000000..3f3b30a
--- /dev/null
+++ b/catapult/devil/devil/utils/reset_usb.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import fcntl
+import logging
+import re
+import sys
+
+from devil.android import device_errors
+from devil.utils import lsusb
+from devil.utils import run_tests_helper
+
+_INDENTATION_RE = re.compile(r'^( *)')
+_LSUSB_BUS_DEVICE_RE = re.compile(r'^Bus (\d{3}) Device (\d{3}):')
+_LSUSB_ENTRY_RE = re.compile(r'^ *([^ ]+) +([^ ]+) *([^ ].*)?$')
+_LSUSB_GROUP_RE = re.compile(r'^ *([^ ]+.*):$')
+
+_USBDEVFS_RESET = ord('U') << 8 | 20
+
+
+def reset_usb(bus, device):
+  """Reset the USB device with the given bus and device."""
+  usb_file_path = '/dev/bus/usb/%03d/%03d' % (bus, device)
+  with open(usb_file_path, 'w') as usb_file:
+    logging.debug('fcntl.ioctl(%s, %d)', usb_file_path, _USBDEVFS_RESET)
+    fcntl.ioctl(usb_file, _USBDEVFS_RESET)
+
+
+def reset_android_usb(serial):
+  """Reset the USB device for the given Android device."""
+  lsusb_info = lsusb.lsusb()
+
+  bus = None
+  device = None
+  for device_info in lsusb_info:
+    device_serial = lsusb.get_lsusb_serial(device_info)
+    if device_serial == serial:
+      bus = int(device_info.get('bus'))
+      device = int(device_info.get('device'))
+
+  if bus and device:
+    reset_usb(bus, device)
+  else:
+    raise device_errors.DeviceUnreachableError(
+        'Unable to determine bus or device for device %s' % serial)
+
+
+def reset_all_android_devices():
+  """Reset all USB devices that look like an Android device."""
+  _reset_all_matching(lambda i: bool(lsusb.get_lsusb_serial(i)))
+
+
+def _reset_all_matching(condition):
+  lsusb_info = lsusb.lsusb()
+  for device_info in lsusb_info:
+    if int(device_info.get('device')) != 1 and condition(device_info):
+      bus = int(device_info.get('bus'))
+      device = int(device_info.get('device'))
+      try:
+        reset_usb(bus, device)
+        serial = lsusb.get_lsusb_serial(device_info)
+        if serial:
+          logging.info('Reset USB device (bus: %03d, device: %03d, serial: %s)',
+              bus, device, serial)
+        else:
+          logging.info('Reset USB device (bus: %03d, device: %03d)',
+              bus, device)
+      except IOError:
+        logging.error(
+            'Failed to reset USB device (bus: %03d, device: %03d)',
+            bus, device)
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('-v', '--verbose', action='count')
+  parser.add_argument('-s', '--serial')
+  parser.add_argument('--bus', type=int)
+  parser.add_argument('--device', type=int)
+  args = parser.parse_args()
+
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  if args.serial:
+    reset_android_usb(args.serial)
+  elif args.bus and args.device:
+    reset_usb(args.bus, args.device)
+  else:
+    parser.error('Unable to determine target. '
+                 'Specify --serial or BOTH --bus and --device.')
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/catapult/devil/devil/utils/run_tests_helper.py b/catapult/devil/devil/utils/run_tests_helper.py
new file mode 100644
index 0000000..7df2da6
--- /dev/null
+++ b/catapult/devil/devil/utils/run_tests_helper.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions common to native, java and host-driven test runners."""
+
+import logging
+import sys
+import time
+
+
+class CustomFormatter(logging.Formatter):
+  """Custom log formatter."""
+
+  # override
+  def __init__(self, fmt='%(threadName)-4s  %(message)s'):
+    # Can't use super() because in older Python versions logging.Formatter does
+    # not inherit from object.
+    logging.Formatter.__init__(self, fmt=fmt)
+    self._creation_time = time.time()
+
+  # override
+  def format(self, record):
+    # Can't use super() because in older Python versions logging.Formatter does
+    # not inherit from object.
+    msg = logging.Formatter.format(self, record)
+    if 'MainThread' in msg[:19]:
+      msg = msg.replace('MainThread', 'Main', 1)
+    timediff = time.time() - self._creation_time
+    return '%s %8.3fs %s' % (record.levelname[0], timediff, msg)
+
+
+def SetLogLevel(verbose_count):
+  """Sets log level as |verbose_count|."""
+  log_level = logging.WARNING  # Default.
+  if verbose_count == 1:
+    log_level = logging.INFO
+  elif verbose_count >= 2:
+    log_level = logging.DEBUG
+  logger = logging.getLogger()
+  logger.setLevel(log_level)
+  custom_handler = logging.StreamHandler(sys.stdout)
+  custom_handler.setFormatter(CustomFormatter())
+  logging.getLogger().addHandler(custom_handler)
diff --git a/catapult/devil/devil/utils/timeout_retry.py b/catapult/devil/devil/utils/timeout_retry.py
new file mode 100644
index 0000000..95e90ee
--- /dev/null
+++ b/catapult/devil/devil/utils/timeout_retry.py
@@ -0,0 +1,181 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A utility to run functions with timeouts and retries."""
+# pylint: disable=W0702
+
+import logging
+import threading
+import time
+import traceback
+
+from devil.utils import reraiser_thread
+from devil.utils import watchdog_timer
+
+
+class TimeoutRetryThreadGroup(reraiser_thread.ReraiserThreadGroup):
+
+  def __init__(self, timeout, threads=None):
+    super(TimeoutRetryThreadGroup, self).__init__(threads)
+    self._watcher = watchdog_timer.WatchdogTimer(timeout)
+
+  def GetWatcher(self):
+    """Returns the watchdog keeping track of this thread's time."""
+    return self._watcher
+
+  def GetElapsedTime(self):
+    return self._watcher.GetElapsed()
+
+  def GetRemainingTime(self, required=0, msg=None):
+    """Get the remaining time before the thread times out.
+
+    Useful to send as the |timeout| parameter of async IO operations.
+
+    Args:
+      required: minimum amount of time that will be required to complete, e.g.,
+        some sleep or IO operation.
+      msg: error message to show if timing out.
+
+    Returns:
+      The number of seconds remaining before the thread times out, or None
+      if the thread never times out.
+
+    Raises:
+      reraiser_thread.TimeoutError if the remaining time is less than the
+        required time.
+    """
+    remaining = self._watcher.GetRemaining()
+    if remaining is not None and remaining < required:
+      if msg is None:
+        msg = 'Timeout expired'
+      if remaining > 0:
+        msg += (', wait of %.1f secs required but only %.1f secs left'
+                % (required, remaining))
+      raise reraiser_thread.TimeoutError(msg)
+    return remaining
+
+
+def CurrentTimeoutThreadGroup():
+  """Returns the thread group that owns or is blocked on the active thread.
+
+  Returns:
+    Returns None if no TimeoutRetryThreadGroup is tracking the current thread.
+  """
+  thread_group = reraiser_thread.CurrentThreadGroup()
+  while thread_group:
+    if isinstance(thread_group, TimeoutRetryThreadGroup):
+      return thread_group
+    thread_group = thread_group.blocked_parent_thread_group
+  return None
+
+
+def WaitFor(condition, wait_period=5, max_tries=None):
+  """Wait for a condition to become true.
+
+  Repeatedly call the function condition(), with no arguments, until it returns
+  a true value.
+
+  If called within a TimeoutRetryThreadGroup, it cooperates nicely with it.
+
+  Args:
+    condition: function with the condition to check
+    wait_period: number of seconds to wait before retrying to check the
+      condition
+    max_tries: maximum number of checks to make, the default tries forever
+      or until the TimeoutRetryThreadGroup expires.
+
+  Returns:
+    The true value returned by the condition, or None if the condition was
+    not met after max_tries.
+
+  Raises:
+    reraiser_thread.TimeoutError: if the current thread is a
+      TimeoutRetryThreadGroup and the timeout expires.
+  """
+  condition_name = condition.__name__
+  timeout_thread_group = CurrentTimeoutThreadGroup()
+  while max_tries is None or max_tries > 0:
+    result = condition()
+    if max_tries is not None:
+      max_tries -= 1
+    msg = ['condition', repr(condition_name), 'met' if result else 'not met']
+    if timeout_thread_group:
+      # pylint: disable=no-member
+      msg.append('(%.1fs)' % timeout_thread_group.GetElapsedTime())
+    logging.info(' '.join(msg))
+    if result:
+      return result
+    if timeout_thread_group:
+      # pylint: disable=no-member
+      timeout_thread_group.GetRemainingTime(wait_period,
+          msg='Timed out waiting for %r' % condition_name)
+    time.sleep(wait_period)
+  return None
+
+
+def _LogLastException(thread_name, attempt, max_attempts, log_func):
+  log_func('*' * 80)
+  log_func('Exception on thread %s (attempt %d of %d)', thread_name,
+                   attempt, max_attempts)
+  log_func('*' * 80)
+  fmt_exc = ''.join(traceback.format_exc())
+  for line in fmt_exc.splitlines():
+    log_func(line.rstrip())
+  log_func('*' * 80)
+
+
+def AlwaysRetry(_exception):
+  return True
+
+
+def Run(func, timeout, retries, args=None, kwargs=None, desc=None,
+        error_log_func=logging.critical, retry_if_func=AlwaysRetry):
+  """Runs the passed function in a separate thread with timeouts and retries.
+
+  Args:
+    func: the function to be wrapped.
+    timeout: the timeout in seconds for each try.
+    retries: the number of retries.
+    args: list of positional args to pass to |func|.
+    kwargs: dictionary of keyword args to pass to |func|.
+    desc: An optional description of |func| used in logging. If omitted,
+      |func.__name__| will be used.
+    error_log_func: Logging function when logging errors.
+    retry_if_func: Unary callable that takes an exception and returns
+      whether |func| should be retried. Defaults to always retrying.
+
+  Returns:
+    The return value of func(*args, **kwargs).
+  """
+  if not args:
+    args = []
+  if not kwargs:
+    kwargs = {}
+
+  num_try = 1
+  while True:
+    thread_name = 'TimeoutThread-%d-for-%s' % (num_try,
+                                               threading.current_thread().name)
+    child_thread = reraiser_thread.ReraiserThread(lambda: func(*args, **kwargs),
+                                                  name=thread_name)
+    try:
+      thread_group = TimeoutRetryThreadGroup(timeout, threads=[child_thread])
+      thread_group.StartAll(will_block=True)
+      while True:
+        thread_group.JoinAll(watcher=thread_group.GetWatcher(), timeout=60,
+                             error_log_func=error_log_func)
+        if thread_group.IsAlive():
+          logging.info('Still working on %s', desc if desc else func.__name__)
+        else:
+          return thread_group.GetAllReturnValues()[0]
+    except reraiser_thread.TimeoutError as e:
+      # Timeouts already get their stacks logged.
+      if num_try > retries or not retry_if_func(e):
+        raise
+      # Do not catch KeyboardInterrupt.
+    except Exception as e:  # pylint: disable=broad-except
+      if num_try > retries or not retry_if_func(e):
+        raise
+      _LogLastException(thread_name, num_try, retries + 1, error_log_func)
+    num_try += 1
diff --git a/catapult/devil/devil/utils/timeout_retry_unittest.py b/catapult/devil/devil/utils/timeout_retry_unittest.py
new file mode 100755
index 0000000..8498288
--- /dev/null
+++ b/catapult/devil/devil/utils/timeout_retry_unittest.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittests for timeout_and_retry.py."""
+
+import logging
+import time
+import unittest
+
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+
+
+_DEFAULT_TIMEOUT = .1
+
+
+class TestException(Exception):
+  pass
+
+
+def _CountTries(tries):
+  tries[0] += 1
+  raise TestException
+
+
+class TestRun(unittest.TestCase):
+  """Tests for timeout_retry.Run."""
+
+  def testRun(self):
+    self.assertTrue(timeout_retry.Run(
+        lambda x: x, 30, 3, [True], {}))
+
+  def testTimeout(self):
+    tries = [0]
+
+    def _sleep():
+      tries[0] += 1
+      time.sleep(1)
+
+    self.assertRaises(
+        reraiser_thread.TimeoutError, timeout_retry.Run, _sleep, .0001, 1,
+        error_log_func=logging.debug)
+    self.assertEqual(tries[0], 2)
+
+  def testRetries(self):
+    tries = [0]
+    self.assertRaises(
+        TestException, timeout_retry.Run, lambda: _CountTries(tries),
+        _DEFAULT_TIMEOUT, 3, error_log_func=logging.debug)
+    self.assertEqual(tries[0], 4)
+
+  def testNoRetries(self):
+    tries = [0]
+    self.assertRaises(
+        TestException, timeout_retry.Run, lambda: _CountTries(tries),
+        _DEFAULT_TIMEOUT, 0, error_log_func=logging.debug)
+    self.assertEqual(tries[0], 1)
+
+  def testReturnValue(self):
+    self.assertTrue(timeout_retry.Run(lambda: True, _DEFAULT_TIMEOUT, 3))
+
+  def testCurrentTimeoutThreadGroup(self):
+    def InnerFunc():
+      current_thread_group = timeout_retry.CurrentTimeoutThreadGroup()
+      self.assertIsNotNone(current_thread_group)
+
+      def InnerInnerFunc():
+        self.assertEqual(current_thread_group,
+                         timeout_retry.CurrentTimeoutThreadGroup())
+        return True
+      return reraiser_thread.RunAsync((InnerInnerFunc,))[0]
+
+    self.assertTrue(timeout_retry.Run(InnerFunc, _DEFAULT_TIMEOUT, 3))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/devil/devil/utils/watchdog_timer.py b/catapult/devil/devil/utils/watchdog_timer.py
new file mode 100644
index 0000000..2f4c464
--- /dev/null
+++ b/catapult/devil/devil/utils/watchdog_timer.py
@@ -0,0 +1,47 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""WatchdogTimer timeout objects."""
+
+import time
+
+
+class WatchdogTimer(object):
+  """A resetable timeout-based watchdog.
+
+  This object is threadsafe.
+  """
+
+  def __init__(self, timeout):
+    """Initializes the watchdog.
+
+    Args:
+      timeout: The timeout in seconds. If timeout is None it will never timeout.
+    """
+    self._start_time = time.time()
+    self._timeout = timeout
+
+  def Reset(self):
+    """Resets the timeout countdown."""
+    self._start_time = time.time()
+
+  def GetElapsed(self):
+    """Returns the elapsed time of the watchdog."""
+    return time.time() - self._start_time
+
+  def GetRemaining(self):
+    """Returns the remaining time of the watchdog."""
+    if self._timeout:
+      return self._timeout - self.GetElapsed()
+    else:
+      return None
+
+  def IsTimedOut(self):
+    """Whether the watchdog has timed out.
+
+    Returns:
+      True if the watchdog has timed out, False otherwise.
+    """
+    remaining = self.GetRemaining()
+    return remaining is not None and remaining < 0
diff --git a/catapult/devil/devil/utils/zip_utils.py b/catapult/devil/devil/utils/zip_utils.py
new file mode 100644
index 0000000..d799463
--- /dev/null
+++ b/catapult/devil/devil/utils/zip_utils.py
@@ -0,0 +1,31 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import zipfile
+
+
+def WriteToZipFile(zip_file, path, arc_path):
+  """Recursively write |path| to |zip_file| as |arc_path|.
+
+  zip_file: An open instance of zipfile.ZipFile.
+  path: An absolute path to the file or directory to be zipped.
+  arc_path: A relative path within the zip file to which the file or directory
+    located at |path| should be written.
+  """
+  if os.path.isdir(path):
+    for dir_path, _, file_names in os.walk(path):
+      dir_arc_path = os.path.join(arc_path, os.path.relpath(dir_path, path))
+      logging.debug('dir:  %s -> %s', dir_path, dir_arc_path)
+      zip_file.write(dir_path, dir_arc_path, zipfile.ZIP_STORED)
+      for f in file_names:
+        file_path = os.path.join(dir_path, f)
+        file_arc_path = os.path.join(dir_arc_path, f)
+        logging.debug('file: %s -> %s', file_path, file_arc_path)
+        zip_file.write(file_path, file_arc_path, zipfile.ZIP_DEFLATED)
+  else:
+    logging.debug('file: %s -> %s', path, arc_path)
+    zip_file.write(path, arc_path, zipfile.ZIP_DEFLATED)
+
diff --git a/catapult/devil/pylintrc b/catapult/devil/pylintrc
new file mode 100644
index 0000000..7e024a2
--- /dev/null
+++ b/catapult/devil/pylintrc
@@ -0,0 +1,68 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  bad-continuation,
+  fixme,
+  import-error,
+  invalid-name,
+  locally-disabled,
+  locally-enabled,
+  missing-docstring,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^_.*$|dummy
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# Maximum number of lines in a module.
+max-module-lines=10000
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/docs/adding-committers.md b/catapult/docs/adding-committers.md
new file mode 100644
index 0000000..6e87ff4
--- /dev/null
+++ b/catapult/docs/adding-committers.md
@@ -0,0 +1,12 @@
+# Adding contributors
+
+Admins (nduca, sullivan) can add contributors to the project. There are two
+steps:
+
+1.  Add the person's github account to the [catapult]
+(https://github.com/orgs/catapult-project/teams/catapult) team.
+2.  Add the person's email to the [commit queue list]
+(https://chrome-infra-auth.appspot.com/auth/groups#project-catapult-committers).
+
+Because there is no API to retrieve a person's GitHub ID from their email
+address or vice versa, we cannot automate this into one step.
diff --git a/catapult/docs/dev-server-tests.md b/catapult/docs/dev-server-tests.md
new file mode 100644
index 0000000..6a97d0b
--- /dev/null
+++ b/catapult/docs/dev-server-tests.md
@@ -0,0 +1,82 @@
+# Overview of dev_server testing
+
+## Introduction
+
+Catapult has a simple, optionally asynchronous, JavaScript testing framework.
+The framework is located in `/tracing/base/unittest/`.
+
+Test files exist in `<filename>_test.html` files where, typically, filename
+will match the name of the file being tested. The tests sit in the same
+folder as their respective files.
+
+## Test Creation
+
+The general structure of tests is (assuming a file of ui/foo_test.html):
+
+```
+<link rel="import" href="/ui/foo.html">
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('instantiate', function() {
+    var myFoo = ui.Foo();
+    this.addHTMLOutput(myFoo);
+  });
+
+  test('somethingElse', function() {
+  });
+});
+```
+
+Generally, there is one test suite per file (there is an assumption inside the
+code that this is true).
+
+If you add something to the DOM with `appendChild` you should remove it. The
+exception is if you use `this.addHTMLOutput(element)`. If you use that, then
+you should be good, the content will get shown if there is an error,
+otherwise it's hidden.
+
+The current tests follow the convention that if the test is there just to draw
+things, to name them with a prefix of instantiate. So, `instantiate`,
+`instantiate_multiRow`, etc.
+
+## Chai
+
+Catapult uses [Chai](http://chaijs.com) for assertions. We are using Chai's
+[TDD `assert` style](http://chaijs.com/api/assert/).
+
+## Execution
+
+You'll need to start a dev_server to run the tests
+```
+$ bin/run_dev_server
+```
+
+After you start the dev_server, it'll be available at http://localhost:8003.
+You'll see links to run unit tests for all projects. We'll use the `tracing/`
+project as an example below.
+
+### Running all tests
+
+```
+http://localhost:8003/tracing/tests.html
+```
+
+### Running an individual test suite (such as `ui/foo_test.js`)
+
+```
+http://localhost:8003/tracing/tests.html?testSuiteName=ui.foo
+```
+
+### Running tests named blah
+
+```
+http://localhost:8003/tracing/tests.html?testFilterString=blah
+```
+
+## Options
+
+If you select the `small format` option on the main test page and reload then
+the test output will be condensed to a lot smaller, making it easier to see
+errors without having to scroll the screen.
diff --git a/catapult/docs/directory-structure.md b/catapult/docs/directory-structure.md
new file mode 100644
index 0000000..16f844e
--- /dev/null
+++ b/catapult/docs/directory-structure.md
@@ -0,0 +1,55 @@
+# Adding a directory to catapult
+
+## Where should the code live?
+
+Catapult is intended to be a set of performance tools, mostly based on tracing,
+for developers of Chromium and other software to analyze that software’s
+performance. It has a lot of supporting libraries and tooling to make this
+happen. We’d like to create an organizational structure that makes it easy to
+find the performance tooling developers want, and hard to accidentally depend
+on something internal. Furthermore, we’d like to make it easy for code in
+catapult to eventually grow to its own repo, when possible. To that end, we
+use these guidelines to decide where code should live.
+
+  * Is it a **performance product**? Meant be used by external developers for
+    performance analysis? Some examples include telemetry, perf dashboard. If
+    so, it should be at `toplevel`.
+  * Is it used only by our buildbot or build process? Put it in
+    `catapult_build`.
+  * If it's neither of the above, it should go in `common/`
+  * If it is aspiring to be its own repo someday, that doesn't affect where it
+    goes. You should follow the above rules for directory placement. Third
+    party must only be real third party repos to conform to rules of repos
+    which include catapult.
+  * If something is experimental, then talk with the catapult admins to build
+    the best guess of where it should go.
+
+## How should directories be structured?
+We have some rules on directory structure to add consistency and avoid
+overloaded python imports.
+
+  * Toplevel directories are **not** modules. E.g. if `x` is a toplevel
+    directory, `x/__init__.py` **does not** exist. Directories in `common/`
+    do not have this restriction.
+  * Toplevel directories and directories in `common` should centralize all
+    their path computation and sys.path manipulation in their master init file
+    ([example](https://github.com/catapult-project/catapult/blob/master/telemetry/telemetry/__init__.py)).
+  * Projects using web server should provide a module which defines all the
+    search paths to their html & javascript resources in their top directory
+    ([example](https://github.com/catapult-project/catapult/blob/master/dashboard/dashboard_project.py)).
+  * Build code should be separate from production code. Build scripts for
+    projects should be in `x/x_build/`
+  * If you have a feature that has an implementation in JS and Py, then it
+    should be in the same folder.
+  * HTML search paths are arranged such that they have the same _name_ as they
+    would in python. E.g. `tracing/tracing/base/math.html` is
+    `/tracing/base/math.html` for an HTML import, and
+    `tracing/tracing/base/math.py` is `tracing.base.math` in python.
+  * Executable files (e.g. files chmodded to +x) must live in `x/bin`.
+    `bin/` must not be a module, e.g. contain `__init__.py`, as such a name
+    would create namespace conflicts. Executable files should **not** have a
+    `.py` extension.
+  * We use a single dev server, `$catapult/bin/run_dev_server`; and have
+    per-project `bin/run_dev_server_tests` scripts.
+  * All python modules should have unique names. `$catpult/catapult_build`
+    instead of `$catapult/build`.
diff --git a/catapult/docs/promises-vs-tasks.md b/catapult/docs/promises-vs-tasks.md
new file mode 100644
index 0000000..0b95b8d
--- /dev/null
+++ b/catapult/docs/promises-vs-tasks.md
@@ -0,0 +1,40 @@
+# Promises vs Tasks
+
+`Promise` is a well-defined [built-in object](https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Promise) that makes it easy, among other things, to chain synchronous or asynchronous operations.
+
+`Task` is an object defined in trace_viewer/base that can be used to chain synchronous operations.
+
+Since both tasks and promises allow chaining operations it's easy to confuse them. The goal of this page is to clarify how the classes differ and when to use either.
+
+## Semantic differences
+
+A first important difference is that tasks cannot be used with asynchronous operations. In a `Promise`, you can chain an asynchronous operation by calling `then(functionReturningAPromise)`. On the other hand, if you create a task from `functionReturningAPromise` and call `after()` on it, then the subsequent task will execute before the promise is resolved.
+
+Another difference is that it's possible to run a task in a synchronous way. Calling `run()` on a task will block until the first task in the chain is completed. This is made possible by the fact that all operations in the task queue are synchronous. Compare this to `Promise` which doesn't have a `run` method and for which execution is scheduled as soon as the promise is created.
+
+Finally, where tasks allow a queue of dependent operations, promises are more flexible and allow creating a more complex dependency graph between operations via `then()`, `Promise.all()` and `Promise.race()`.
+
+## Differences in API
+
+Chaining operations is similar:
+
+    taskA.after(taskB).after(taskC);
+    promiseA.then(functionReturningPromiseB).then(functionReturningPromiseC);
+
+An important difference, though is a task can only have one `after()` task. Trying to do `taskA.after(taskB); taskA.after(taskC);` will result in a runtime error. Promises have no such limitations.
+
+In addition to `after()`, tasks expose the method `subtask()` that makes it possible to insert tasks in the execution chain, even as the operations in the chain are being executed. Promises do not directly support this feature.
+
+Tasks have a `run()` method to synchronously execute the next operation in the queue. Promises have no such thing, and getting control back once a promise has executed requires the use of `then()`.
+
+`Task.RunWhenIdle` is another API call that is not available on promises. It schedule the task queue to be gradually consumed, tying task execution to `requestAnimationFrame` in a way that makes sure not to bust the time budget for a frame. Given its asynchronous nature `RunWhenIdle` returns a `Promise`.
+
+Note that there are a number of other minor API differences that are omitted here.
+
+## What should you use?
+
+If some of the operations you want to chain are asynchronous then you don't have a choice and must use `Promise`. If you want to chain only synchronous operations, then `Task` may be the right choice for you, especially if you plan on executing your operations when the application is idle, in which case you'll want to benefit from `RunWhenIdle`.
+
+## Future works
+
+It's sad that the nature of tasks makes it impossible to use `RunWhenIdle` with an asynchronous operation. It might be interesting to figure out whether it's possible to build a `RunWhenIdle` on top of promises. Also, if the `after()` and `subtask()` API of tasks make the code simpler in some instances it may be interesting to try to reproduce it on top of promises. Given these improvements to `Promise` it may be possible to eventually remove `Task` entirely.
diff --git a/catapult/docs/rolling-deps.md b/catapult/docs/rolling-deps.md
new file mode 100644
index 0000000..e4cdfe5
--- /dev/null
+++ b/catapult/docs/rolling-deps.md
@@ -0,0 +1,42 @@
+# Updating Chromium's about:tracing (rolling DEPS)
+
+Chromium's DEPS file needs to be rolled to the catapult revision containing your
+change in order for it to appear in Chrome's about:tracing or other
+third_party/catapult files. This should happen automatically, but you may need
+to do it manually in rare cases. See below for more details.
+
+## Automatic rolls
+
+DEPS should be automatically rolled by the auto-roll bot at
+[catapult-roll.skia.org](https://catapult-roll.skia.org/).
+[catapult-sheriff@chromium.org](https://groups.google.com/a/chromium.org/forum/#!forum/catapult-sheriff)
+will be cc-ed on all reviews, and anyone who wants to join that list can
+subscribe. It's also the correct list to report a problem with the autoroll. If
+you need to stop the autoroll, either sign into that page with a google.com
+account, or contact catapult-sheriff@chromium.org.
+
+## Manual rolls
+
+In rare cases, you may need to make changes to chromium at the same time as you
+roll catapult DEPs. In this case you would need to do a manual roll. Here are
+instructions for rolling catapult DEPS, your CL would also include any other
+changes to chromium needed to complete the roll.
+
+First, commit to catapult. Then check the [mirror]
+(https://chromium.googlesource.com/external/github.com/catapult-project/catapult.git)
+to find the git hash of your commit. (Note: it may take a few minutes to be
+mirrored).
+
+Then edit Chrome's [src/DEPS]
+(https://code.google.com/p/chromium/codesearch#chromium/src/DEPS) file. Look for
+a line like:
+
+```
+  'src/third_party/catapult':
+    Var('chromium_git') + '/external/github.com/catapult-project/catapult.git' + '@' +
+    '2da8924915bd6fb7609c518f5b1f63cb606248eb',
+```
+
+Update the number to the git hash you want to roll to, and [contribute a
+codereview to chrome](http://www.chromium.org/developers/contributing-code)
+for your edit. If you are a Chromium committer, feel free to TBR this.
diff --git a/catapult/docs/style-guide.md b/catapult/docs/style-guide.md
new file mode 100644
index 0000000..f98993b
--- /dev/null
+++ b/catapult/docs/style-guide.md
@@ -0,0 +1,232 @@
+# Catapult Style guide
+
+## Base style guide
+
+Unless stated below, we follow the conventions listed in the [Chromium style
+guide](https://www.chromium.org/developers/coding-style) and [Google JavaScript
+style guide](http://google.github.io/styleguide/javascriptguide.xml).
+
+## Files
+File names `should_look_like_this.html`.
+
+Keep to one concept per file, always. In practice, this usually means one
+component or class per file, but can lead to multiple if they’re small and
+closely related. If you can, group utility functions into a static class to
+clarify their relationship, e.g. `base/statistics.html`.
+
+```
+<!-- tracing/model/point.html -->
+<script>
+‘use strict’;
+
+tr.exportTo(‘tr.model’, function() {
+  function Point() {}
+
+  return {
+    Point: Point
+  };
+});
+</script>
+```
+
+The exception to this rule is when there are multiple small, related classes or
+methods. In this case, a file may export multiple symbols:
+
+```
+<!-- tracing/base/dom_helpers.html -->
+<script>
+‘use strict’;
+
+tr.exportTo(‘tr.ui.b’, function() {
+  function createSpan() { // … }
+  function createDiv() { // … }
+  function isElementAttached(element) { // … }
+
+  return {
+    createSpan: createSpan,
+    createDiv: createDiv,
+    isElementAttached: isElementAttached
+  };
+});
+</script>
+```
+
+Any tests for a file should be in a file with the same name as the
+implementation file, but with a trailing `_test`.
+
+```
+touch tracing/model/access_point.html
+touch tracing/model/access_point_test.html
+```
+## Namespacing and element names
+
+All symbols that exist in the global namespace should be exported using the
+`exportTo` method.
+
+Exported package names show the file’s location relative to the root `tracing/`
+directory. These package names are abbreviated, usually with a 1 or 2 letter
+abbreviation - just enough to resolve naming conflicts. All files in the same
+directory should share the same package.
+
+```
+<!-- tracing/base/units/generic_table.html →
+tr.exportTo(‘tr.b.u’, function() {
+   // ...
+});
+```
+
+Polymer element names should use the convention
+`hyphenated-package-name-element-name`.
+
+```
+<!-- tracing/ui/analysis/counter_sample_sub_view.html -->
+<polymer-element name='tr-ui-a-counter-sample-sub-view'>
+  ...
+</polymer-element>
+```
+
+## Classes and objects
+
+Classes should expose public fields only if those fields represent a part of the
+class’s public interface.
+
+All fields should be initialized in the constructor. Fields with no reasonable
+default value should be initialized to undefined.
+
+Do not set defaults via the prototype chain.
+
+```
+function Line() {
+  // Good
+  this.yIntercept_ = undefined;
+}
+
+Line.prototype = {
+  // Bad
+  xIntercept_: undefined,
+
+
+  set slope(slope) {
+    // Bad: this.slope_ wasn’t initialized in the constructor.
+    this.slope_ = slope;
+  },
+
+  set yIntercept() {
+    // Good
+    return this.yIntercept_;
+  }
+};
+```
+
+## Polymer elements
+The `<script>` block for the Polymer element can go either inside or outside of
+the element’s definition. Generally, the block outside is placed outside when
+the script is sufficiently complex that having 2 fewer spaces of indentation
+would make it more readable.
+
+```
+<polymer-element name="tr-bar">
+  <template><div></div></template>
+   <script>
+     // Can go here...
+   </script>
+</polymer-element>
+
+<script>
+'use strict';
+(function(){   // Use this if you need to define constants scoped to that element.
+Polymer('tr-bar’, {
+  // ... or here.
+});
+})();
+</script>
+```
+
+Style sheets should be inline rather than in external .css files.
+
+```
+<polymer-element name="tr-bar">
+  <style>
+  #content {
+    display: flex;
+  }
+  </style>
+  <template><div id=”content”></div></template>
+</polymer-element>
+```
+
+## `undefined` and `null`
+Prefer use of `undefined` over `null`.
+
+```
+function Line() {
+  // Good
+  this.yIntercept_ = undefined;
+  // Bad
+  this.slope = null;
+}
+```
+
+## Tests
+UI element tests that make sure that an element is instantiable should have
+names that start with “`instantiate`”. These tests should, as a general rule,
+should not make assertions.
+
+## ES6 features
+
+**Use of all ES6 features is currently prohibited.** However, we're currently working to allow them.
+
+| Feature                                                                                                                                     | Status                                                                          |
+|---------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|
+| [Arrows](https://github.com/lukehoban/es6features#arrows)                                                                                   | [Testing in progress](https://github.com/catapult-project/catapult/issues/2165) |
+| [Classes](https://github.com/lukehoban/es6features#classes)                                                                                 | To be discussed                                                                 |
+| [Enhanced object literals](https://github.com/lukehoban/es6features#enhanced-object-literals)                                               | To be discussed                                                                 |
+| [Template strings](https://github.com/lukehoban/es6features#template-strings)                                                               | To be discussed                                                                 |
+| [Destructuring](https://github.com/lukehoban/es6features#destructuring)                                                                     | To be discussed                                                                 |
+| [Default, rest, and spread](https://github.com/lukehoban/es6features#default--rest--spread)                                                 | To be discussed                                                                 |
+| [`let` and `const`](https://github.com/lukehoban/es6features#let--const)                                                                    | To be discussed                                                                 |
+| [Iterators and `for...of`](https://github.com/lukehoban/es6features#iterators--forof)                                                       | To be discussed                                                                 |
+| [Generators](https://github.com/lukehoban/es6features#generators)                                                                           | To be discussed                                                                 |
+| [Unicode](https://github.com/lukehoban/es6features#unicode)                                                                                 | To be discussed                                                                 |
+| [Modules](https://github.com/lukehoban/es6features#modules)                                                                                 | To be discussed                                                                 |
+| [Module loaders](https://github.com/lukehoban/es6features#module-loaders)                                                                   | To be discussed                                                                 |
+| [`Map`, `Set`, `Weakmap`, and `Weakset`](https://github.com/lukehoban/es6features#map--set--weakmap--weakset)                               | To be discussed                                                                 |
+| [Proxies](https://github.com/lukehoban/es6features#proxies)                                                                                 | To be discussed                                                                 |
+| [Symbols](https://github.com/lukehoban/es6features#symbols)                                                                                 | To be discussed                                                                 |
+| [Subclassable Built-ins](https://github.com/lukehoban/es6features#subclassable-built-ins)                                                   | To be discussed                                                                 |
+| [Promises](https://github.com/lukehoban/es6features#promises)                                                                               | To be discussed                                                                 |
+| [`Math`, `Number`, `String`, `Array`, and `Object` APIs](https://github.com/lukehoban/es6features#math--number--string--array--object-apis) | To be discussed                                                                 |
+| [Binary and octal literals](https://github.com/lukehoban/es6features#binary-and-octal-literals)                                             | To be discussed                                                                 |
+| [Reflect API](https://github.com/lukehoban/es6features#reflect-api)                                                                         | To be discussed                                                                 |
+| [Tail calls](https://github.com/lukehoban/es6features#tail-calls)                                                                           | To be discussed                                                                 |
+
+### Possible feature statuses
+  - **Approved**: this feature is approved for general use.
+  - **Testing in progress**: there's agreement that we should use this feature, but we still need to make sure that it's safe. "Testing in progress" statuses should link to a Catapult bug thread tracking the testing.
+  - **Discussion in progress**: there's not yet agreement that we should use this feature. "Discussion in progress" statuses should link to a Catapult bug thread about whether the feature should be used.
+  - **To be discussed**: this feature hasn't been discussed yet.
+
+Use of an ES6 features shouldn't be considered until that feature is supported in both Chrome stable and [our current version of D8](/third_party/vinn/third_party/v8/README.chromium).
+
+If you see that Catapult’s version of D8 is behind Chrome stable's, use
+[this script](/third_party/vinn/bin/update_v8) to update it.
+
+## Workarounds have bugs for removal: Avoid defensive programming
+
+We should never silently eat an unexpected condition. When such a condition
+occur we should ensure to output the clearest possible warning or a catastrophic
+error if progress cannot continue. If fixing the problem is hard and a simple
+patch would allow someone to keep working on a feature, then it is OK to submit
+this patch at the express condition that:
+
+  1. An issue is created to track the problem.
+  2. The defensive patch is wrapped in a `// TODO` linking to that issue.
+  3. The todo and defensive patch are removed after the problem is fixed.
+
+## Issues
+
+Issues should either:
+
+  * Not have a BUG= tag
+  * Have a BUG=catapult:#123 bug referring to issue 123 in our github tracker.
+  * Have a BUG=chromium:456 bug referring to issue 456 in the chromium tracker.
diff --git a/catapult/docs/trace-event-format.md b/catapult/docs/trace-event-format.md
new file mode 100644
index 0000000..58ee623
--- /dev/null
+++ b/catapult/docs/trace-event-format.md
@@ -0,0 +1,4 @@
+# The trace-event file format
+
+The current version of the Trace Event Format is housed in a Google Document:
+[Trace Event Format Definition](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit?usp=sharing Trace Event Format Definition)
diff --git a/catapult/experimental/OWNERS b/catapult/experimental/OWNERS
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/catapult/experimental/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/catapult/experimental/PRESUBMIT.py b/catapult/experimental/PRESUBMIT.py
new file mode 100644
index 0000000..c119c87
--- /dev/null
+++ b/catapult/experimental/PRESUBMIT.py
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='../pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+  ]
diff --git a/catapult/experimental/bisect_lib/README.md b/catapult/experimental/bisect_lib/README.md
index b76c3d9..120e079 100644
--- a/catapult/experimental/bisect_lib/README.md
+++ b/catapult/experimental/bisect_lib/README.md
@@ -1,4 +1,3 @@
-
 <!-- Copyright 2015 The Chromium Authors. All rights reserved.
      Use of this source code is governed by a BSD-style license that can be
      found in the LICENSE file.
@@ -13,15 +12,6 @@
 
 Secondary goals are:
 
- * Simplify code sharing with the related [Telemetry](https://www.chromium.org/developers/telemetry) and [Performance Dashboard](https://github.com/catapult-project/catapult/blob/master/dashboard/README.md) projects, also under catapult.
+ * Simplify code sharing with the related [Telemetry](/telemetry/README.md) and [Performance Dashboard](/dashboard/README.md) projects.
  * Eventually move the bisect director role outside of buildbot/recipes and
    into its own standalone application.
-
-These tools were created by Chromium developers for performance analysis,
-testing, and monitoring of Chrome, but they can also be used for analyzing and
-monitoring websites, and eventually Android apps.
-
-Contributing
-============
-Please see [our contributor's guide](https://github.com/catapult-project/catapult/blob/master/CONTRIBUTING.md)
-
diff --git a/catapult/experimental/bisect_lib/bin/run_py_tests b/catapult/experimental/bisect_lib/bin/run_py_tests
new file mode 100755
index 0000000..6b7daf9
--- /dev/null
+++ b/catapult/experimental/bisect_lib/bin/run_py_tests
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs all Python unit tests in catapult_build/."""
+
+import os
+import sys
+
+_CATAPULT = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir))
+
+
+if __name__ == '__main__':
+  sys.path.append(_CATAPULT)
+
+  from hooks import install
+  if '--no-install-hooks' in sys.argv:
+    sys.argv.remove('--no-install-hooks')
+  else:
+    install.InstallHooks()
+
+  from catapult_build import run_with_typ
+  sys.exit(run_with_typ.Run(
+      os.path.join(_CATAPULT, 'experimental', 'bisect_lib'),
+      path=[_CATAPULT]))
diff --git a/catapult/experimental/bisect_lib/bisect_helper.py b/catapult/experimental/bisect_lib/bisect_helper.py
deleted file mode 100755
index 44baa7c..0000000
--- a/catapult/experimental/bisect_lib/bisect_helper.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/python2.7
-
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import os
-import sys
-
-sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
-
-from bisect_lib import chromium_revisions
-
-
-def main(argv):
-  if argv[1] == 'query_revision_info':
-    print json.dumps(chromium_revisions.revision_info(argv[2]))
-  elif argv[1] == 'revision_range':
-    print json.dumps(chromium_revisions.revision_range(argv[2], argv[3]))
-  return 0
-
-
-if __name__ == '__main__':
-  sys.exit(main(sys.argv))
diff --git a/catapult/experimental/bisect_lib/chromium_revisions.py b/catapult/experimental/bisect_lib/chromium_revisions.py
deleted file mode 100644
index 2c660fe..0000000
--- a/catapult/experimental/bisect_lib/chromium_revisions.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import urllib2
-
-BASE_URL = 'https://chromium.googlesource.com/chromium/src/+'
-PADDING = ')]}\'\n'  # Gitiles padding.
-
-def revision_info(revision):
-  """Gets information about a chromium revision.
-
-  Args:
-    revision (str): The git commit hash of the revision to check.
-
-  Returns:
-    A dictionary containing the author, email, 'subject' (the first line of the
-    commit message) the 'body' (the whole message) and the date in string format
-    like "Sat Oct 24 00:33:21 2015".
-  """
-
-  url = '%s/%s?format=json' % (BASE_URL, revision)
-  response = urllib2.urlopen(url).read()
-  response = json.loads(response[len(PADDING):])
-  message = response['message'].splitlines()
-  subject = message[0]
-  body = '\n'.join(message[1:])
-  result = {
-      'author': response['author']['name'],
-      'email': response['author']['email'],
-      'subject': subject,
-      'body': body,
-      'date': response['committer']['time'],
-  }
-  return result
-
-
-def revision_range(first_revision, last_revision):
-  """Gets the revisions in chromium between first and last including the latter.
-
-  Args:
-    first_revision (str): The git commit of the first revision in the range.
-    last_revision (str): The git commit of the last revision in the range.
-
-  Returns:
-    A list of dictionaries, one for each revision after the first revision up to
-    and including the last revision. For each revision, its dictionary will
-    contain information about the author and the comitter and the commit itself
-    analogously to the 'git log' command. See test_data/MOCK_RANGE_RESPONSE_FILE
-    for an example.
-  """
-  url = '%slog/%s..%s?format=json' % (BASE_URL, first_revision, last_revision)
-  response = urllib2.urlopen(url).read()
-  response = json.loads(response[len(PADDING):])
-  return response['log']
diff --git a/catapult/experimental/bisect_lib/chromium_revisions_test.py b/catapult/experimental/bisect_lib/chromium_revisions_test.py
deleted file mode 100644
index 19eb56e..0000000
--- a/catapult/experimental/bisect_lib/chromium_revisions_test.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import sys
-import unittest
-
-_EXPERIMENTAL = os.path.join(os.path.dirname(__file__), os.pardir)
-_CATAPULT = os.path.join(_EXPERIMENTAL, os.pardir)
-
-# TODO(robertocn): Add these to sys.path conditionally.
-sys.path.append(os.path.join(_CATAPULT, 'third_party', 'mock'))
-sys.path.append(_EXPERIMENTAL)
-
-import mock
-
-from bisect_lib import chromium_revisions
-
-
-FIRST_REVISION = '53fc07eb478520a80af6bf8b62be259bb55db0f1'
-LAST_REVISION = 'c89130e28fd01062104e1be7f3a6fc3abbb80ca9'
-
-TEST_DATA_LOCATION = os.path.join(os.path.dirname(__file__),
-                                  'test_data')
-MOCK_INFO_RESPONSE_FILE = open(os.path.join(
-    TEST_DATA_LOCATION, 'MOCK_INFO_RESPONSE_FILE'))
-MOCK_RANGE_RESPONSE_FILE = open(os.path.join(
-    TEST_DATA_LOCATION, 'MOCK_RANGE_RESPONSE_FILE'))
-
-EXPECTED_INFO = {
-    'body':
-        'BUG=548160',
-    'date':
-        'Tue Oct 27 21:26:30 2015',
-    'subject':
-        '[Extensions] Fix hiding browser actions without the toolbar redesign',
-    'email':
-        'rdevlin.cronin@chromium.org',
-    'author':
-        'rdevlin.cronin'
-}
-
-INTERVENING_REVISIONS = [
-    '2e93263dc74f0496100435e1fd7232e9e8323af0',
-    '6feaa73a54d0515ad2940709161ca0a5ad91d1f8',
-    '3861789af25e2d3502f0fb7080da5785d31308aa',
-    '8fcc8af20a3d41b0512e3b1486e4dc7de528a72b',
-    'f1c777e3f97a16cc6a3aa922a23602fa59412989',
-    'ee261f306c3c66e96339aa1026d62a6d953302fe',
-    '7bd1741893bd4e233b5562a6926d7e395d558343',
-    '4f81be50501fbc02d7e44df0d56032e5885e19b6',
-    '8414732168a8867a5d6bd45eaade68a5820a9e34',
-    '01542ac6d0fbec6aa78e33e6c7ec49a582072ea9',
-    '66aeb2b7084850d09f3fccc7d7467b57e4da1882',
-    '48c1471f1f503246dd66753a4c7588d77282d2df',
-    '84f6037e951c21a3b00bd3ddd034f258da6839b5',
-    'ebd5f102ee89a4be5c98815c02c444fbf2b6b040',
-    '5dbc149bebecea186b693b3d780b6965eeffed0f',
-    '22e49fb496d6ffa122c470f6071d47ccb4ccb672',
-    '07a6d9854efab6677b880defa924758334cfd47d',
-    '32ce3b13924d84004a3e05c35942626cbe93cbbd',
-]
-
-
-class ChromiumRevisionsTest(unittest.TestCase):
-
-  def setUp(self):
-    pass
-
-  def tearDown(self):
-    pass
-
-  def testRevisionInfo(self):
-    with mock.patch('urllib2.urlopen', mock.MagicMock(
-        return_value=MOCK_INFO_RESPONSE_FILE)):
-      test_info = chromium_revisions.revision_info(LAST_REVISION)
-    for key in EXPECTED_INFO:
-      self.assertIn(EXPECTED_INFO[key], test_info[key])
-
-  def testRevisionRange(self):
-    with mock.patch('urllib2.urlopen', mock.MagicMock(
-        return_value=MOCK_RANGE_RESPONSE_FILE)):
-      rev_list = chromium_revisions.revision_range(
-          FIRST_REVISION, LAST_REVISION)
-    commits_only = [entry['commit'] for entry in rev_list]
-    for r in INTERVENING_REVISIONS:
-      self.assertIn(r, commits_only)
-    self.assertIn(LAST_REVISION, commits_only)
-    self.assertEqual(len(INTERVENING_REVISIONS) + 1,
-                     len(rev_list))
-
-if __name__ == '__main__':
-  unittest.main()
-
diff --git a/catapult/experimental/bisect_lib/depot_map.py b/catapult/experimental/bisect_lib/depot_map.py
new file mode 100644
index 0000000..e514da8
--- /dev/null
+++ b/catapult/experimental/bisect_lib/depot_map.py
@@ -0,0 +1,19 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module contains a mapping of depot names to paths on gitiles.
+
+This is used to fetch information from gitiles for different
+repositories supported by auto-bisect.
+"""
+
+# For each entry in this map, the key is a "depot" name (a Chromium dependency
+# in the DEPS file) and the value is a path used for the repo on gitiles; each
+# repo can be found at https://chromium.googlesource.com/<PATH>.
+DEPOT_PATH_MAP = {
+    'chromium': 'chromium/src',
+    'angle': 'angle/angle',
+    'v8': 'v8/v8.git',
+    'skia': 'skia',
+}
diff --git a/catapult/experimental/bisect_lib/fetch_intervening_revisions.py b/catapult/experimental/bisect_lib/fetch_intervening_revisions.py
new file mode 100755
index 0000000..9cc4d65
--- /dev/null
+++ b/catapult/experimental/bisect_lib/fetch_intervening_revisions.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Gets list of revisions between two commits and their commit positions.
+
+Example usage:
+  ./fetchInterveningRevisions.py 343b531d31 7b43807df3 chromium
+  ./fetchInterveningRevisions.py 235eff9574 1e4681c33f v8
+
+Note: Another implementation of this functionality can be found in
+findit/common/gitRepository.py (https://goo.gl/Rr8j9O).
+"""
+
+import argparse
+import json
+import urllib2
+
+from bisect_lib import depot_map
+
+_GITILES_PADDING = ')]}\'\n'
+_URL_TEMPLATE = ('https://chromium.googlesource.com/%s/+log/%s..%s'
+                 '?format=json&n=%d')
+
+# Gitiles paginates the list of commits; since we want to get all of the
+# commits at once, the page size should be larger than the largest revision
+# range that we expect to get.
+_PAGE_SIZE = 512
+
+def FetchInterveningRevisions(start, end, depot_name):
+  """Fetches a list of revision in between two commits.
+
+  Args:
+    start (str): A git commit hash in the Chromium src repository.
+    end (str): Another git commit hash, after start.
+    depot_name (str): A respository name.
+
+  Returns:
+    A list of pairs (commit hash, commit position), from earliest to latest,
+    for all commits in between the two given commits, not including either
+    of the given commits.
+
+  Raises:
+    urllib2.URLError: The request to gitiles failed.
+    ValueError: The response wasn't valid JSON.
+    KeyError: The JSON didn't contain the expected data.
+  """
+  revisions = _FetchRangeFromGitiles(start, end, depot_name)
+  # The response from gitiles includes the end revision and is ordered
+  # from latest to earliest.
+  return [_CommitPair(r) for r in reversed(revisions[1:])]
+
+
+def _FetchRangeFromGitiles(start, end, depot_name):
+  """Fetches a list of revision dicts from gitiles.
+
+  Make multiple requests to get multiple pages, if necessary.
+  """
+  revisions = []
+  url = _URL_TEMPLATE % (
+      depot_map.DEPOT_PATH_MAP[depot_name], start, end, _PAGE_SIZE)
+  current_page_url = url
+  while True:
+    response = urllib2.urlopen(current_page_url).read()
+    response_json = response[len(_GITILES_PADDING):]  # Remove padding.
+    response_dict = json.loads(response_json)
+    revisions.extend(response_dict['log'])
+    if 'next' not in response_dict:
+      break
+    current_page_url = url + '&s=' + response_dict['next']
+  return revisions
+
+
+def _CommitPair(commit_dict):
+  return (commit_dict['commit'],
+          _CommitPositionFromMessage(commit_dict['message']))
+
+
+def _CommitPositionFromMessage(message):
+  for line in reversed(message.splitlines()):
+    if line.startswith('Cr-Commit-Position:'):
+      return line.split('#')[1].split('}')[0]
+  return None
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('start')
+  parser.add_argument('end')
+  parser.add_argument('depot', choices=list(depot_map.DEPOT_PATH_MAP))
+  args = parser.parse_args()
+  revision_pairs = FetchInterveningRevisions(args.start, args.end, args.depot)
+  print json.dumps(revision_pairs)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/experimental/bisect_lib/fetch_intervening_revisions_test.py b/catapult/experimental/bisect_lib/fetch_intervening_revisions_test.py
new file mode 100755
index 0000000..ee01a0b
--- /dev/null
+++ b/catapult/experimental/bisect_lib/fetch_intervening_revisions_test.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+_CATAPULT_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+sys.path.insert(0, os.path.join(_CATAPULT_PATH, 'third_party', 'mock'))
+
+import mock
+
+from bisect_lib import fetch_intervening_revisions
+
+_TEST_DATA = os.path.join(os.path.dirname(__file__), 'test_data')
+
+
+class FetchInterveningRevisionsTest(unittest.TestCase):
+
+  def testFetchInterveningRevisions(self):
+    response = open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_1'))
+    with mock.patch('urllib2.urlopen', mock.MagicMock(return_value=response)):
+      revs = fetch_intervening_revisions.FetchInterveningRevisions(
+          '53fc07eb478520a80af6bf8b62be259bb55db0f1',
+          'c89130e28fd01062104e1be7f3a6fc3abbb80ca9',
+          depot_name='chromium')
+    self.assertEqual(
+        revs, [
+            ('32ce3b13924d84004a3e05c35942626cbe93cbbd', '356382'),
+            ('07a6d9854efab6677b880defa924758334cfd47d', '356383'),
+            ('22e49fb496d6ffa122c470f6071d47ccb4ccb672', '356384'),
+            ('5dbc149bebecea186b693b3d780b6965eeffed0f', '356385'),
+            ('ebd5f102ee89a4be5c98815c02c444fbf2b6b040', '356386'),
+            ('84f6037e951c21a3b00bd3ddd034f258da6839b5', '356387'),
+            ('48c1471f1f503246dd66753a4c7588d77282d2df', '356388'),
+            ('66aeb2b7084850d09f3fccc7d7467b57e4da1882', '356389'),
+            ('01542ac6d0fbec6aa78e33e6c7ec49a582072ea9', '356390'),
+            ('8414732168a8867a5d6bd45eaade68a5820a9e34', '356391'),
+            ('4f81be50501fbc02d7e44df0d56032e5885e19b6', '356392'),
+            ('7bd1741893bd4e233b5562a6926d7e395d558343', '356393'),
+            ('ee261f306c3c66e96339aa1026d62a6d953302fe', '356394'),
+            ('f1c777e3f97a16cc6a3aa922a23602fa59412989', '356395'),
+            ('8fcc8af20a3d41b0512e3b1486e4dc7de528a72b', '356396'),
+            ('3861789af25e2d3502f0fb7080da5785d31308aa', '356397'),
+            ('6feaa73a54d0515ad2940709161ca0a5ad91d1f8', '356398'),
+            ('2e93263dc74f0496100435e1fd7232e9e8323af0', '356399')
+        ])
+
+  def testFetchInterveningRevisionsPagination(self):
+
+    def MockUrlopen(url):
+      if 's=' not in url:
+        return open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_2_PAGE_1'))
+      return open(os.path.join(_TEST_DATA, 'MOCK_RANGE_RESPONSE_2_PAGE_2'))
+
+    with mock.patch('urllib2.urlopen', MockUrlopen):
+      revs = fetch_intervening_revisions.FetchInterveningRevisions(
+          '7bd1741893bd4e233b5562a6926d7e395d558343',
+          '3861789af25e2d3502f0fb7080da5785d31308aa',
+          depot_name='chromium')
+    self.assertEqual(
+        revs, [
+            ('ee261f306c3c66e96339aa1026d62a6d953302fe', '356394'),
+            ('f1c777e3f97a16cc6a3aa922a23602fa59412989', '356395'),
+            ('8fcc8af20a3d41b0512e3b1486e4dc7de528a72b', '356396'),
+        ])
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/experimental/bisect_lib/fetch_revision_info.py b/catapult/experimental/bisect_lib/fetch_revision_info.py
new file mode 100755
index 0000000..e34977d
--- /dev/null
+++ b/catapult/experimental/bisect_lib/fetch_revision_info.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Gets information about one commit from gitiles.
+
+Example usage:
+  ./fetch_revision_info.py 343b531d31 chromium
+  ./fetch_revision_info.py 17b4e7450d v8
+"""
+
+import argparse
+import json
+import urllib2
+
+from bisect_lib import depot_map
+
+_GITILES_PADDING = ')]}\'\n'
+_URL_TEMPLATE = 'https://chromium.googlesource.com/%s/+/%s?format=json'
+
+def FetchRevisionInfo(commit_hash, depot_name):
+  """Gets information about a chromium revision."""
+  path = depot_map.DEPOT_PATH_MAP[depot_name]
+  url = _URL_TEMPLATE % (path, commit_hash)
+  response = urllib2.urlopen(url).read()
+  response_json = response[len(_GITILES_PADDING):]
+  response_dict = json.loads(response_json)
+  message = response_dict['message'].splitlines()
+  subject = message[0]
+  body = '\n'.join(message[1:])
+  result = {
+      'author': response_dict['author']['name'],
+      'email': response_dict['author']['email'],
+      'subject': subject,
+      'body': body,
+      'date': response_dict['committer']['time'],
+  }
+  return result
+
+
+def Main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('commit_hash')
+  parser.add_argument('depot', choices=list(depot_map.DEPOT_PATH_MAP))
+  args = parser.parse_args()
+  revision_info = FetchRevisionInfo(args.commit_hash, args.depot)
+  print json.dumps(revision_info)
+
+
+if __name__ == '__main__':
+  Main()
diff --git a/catapult/experimental/bisect_lib/fetch_revision_info_test.py b/catapult/experimental/bisect_lib/fetch_revision_info_test.py
new file mode 100755
index 0000000..e538129
--- /dev/null
+++ b/catapult/experimental/bisect_lib/fetch_revision_info_test.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+_CATAPULT_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+sys.path.insert(0, os.path.join(_CATAPULT_PATH, 'third_party', 'mock'))
+
+import mock
+
+from bisect_lib import fetch_revision_info
+
+_TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'test_data')
+_MOCK_RESPONSE_PATH = os.path.join(_TEST_DATA_PATH, 'MOCK_INFO_RESPONSE_FILE')
+
+
+class ChromiumRevisionsTest(unittest.TestCase):
+
+  def testRevisionInfo(self):
+    commit_hash = 'c89130e28fd01062104e1be7f3a6fc3abbb80ca9'
+    with mock.patch('urllib2.urlopen', mock.MagicMock(
+        return_value=open(_MOCK_RESPONSE_PATH))):
+      revision_info = fetch_revision_info.FetchRevisionInfo(
+          commit_hash, depot_name='chromium')
+    self.assertEqual(
+        {
+            'body': ('\nHiding actions without the toolbar redesign '
+                     'means removing them entirely, so if\nthey exist '
+                     'in the toolbar, they are considered \'visible\' '
+                     '(even if they are in\nthe chevron).\n\n'
+                     'BUG=544859\nBUG=548160\n\nReview URL: '
+                     'https://codereview.chromium.org/1414343003\n\n'
+                     'Cr-Commit-Position: refs/heads/master@{#356400}'),
+            'date': 'Tue Oct 27 21:26:30 2015',
+            'subject': ('[Extensions] Fix hiding browser actions '
+                        'without the toolbar redesign'),
+            'email': 'rdevlin.cronin@chromium.org',
+            'author': 'rdevlin.cronin'
+        },
+        revision_info)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/experimental/bisect_lib/test_data/MOCK_INFO_RESPONSE_FILE b/catapult/experimental/bisect_lib/test_data/MOCK_INFO_RESPONSE_FILE
index cbc53c5..1df2135 100644
--- a/catapult/experimental/bisect_lib/test_data/MOCK_INFO_RESPONSE_FILE
+++ b/catapult/experimental/bisect_lib/test_data/MOCK_INFO_RESPONSE_FILE
@@ -1,48 +1,48 @@
-)]}'
-{
-  "commit": "c89130e28fd01062104e1be7f3a6fc3abbb80ca9",
-  "tree": "b6c8947321df51bd03ce118285e243ca4b14a751",
-  "parents": [
-    "2e93263dc74f0496100435e1fd7232e9e8323af0"
-  ],
-  "author": {
-    "name": "rdevlin.cronin",
-    "email": "rdevlin.cronin@chromium.org",
-    "time": "Tue Oct 27 21:24:54 2015"
-  },
-  "committer": {
-    "name": "Commit bot",
-    "email": "commit-bot@chromium.org",
-    "time": "Tue Oct 27 21:26:30 2015"
-  },
-  "message": "[Extensions] Fix hiding browser actions without the toolbar redesign\n\nHiding actions without the toolbar redesign means removing them entirely, so if\nthey exist in the toolbar, they are considered \u0027visible\u0027 (even if they are in\nthe chevron).\n\nBUG\u003d544859\nBUG\u003d548160\n\nReview URL: https://codereview.chromium.org/1414343003\n\nCr-Commit-Position: refs/heads/master@{#356400}\n",
-  "tree_diff": [
-    {
-      "type": "modify",
-      "old_id": "d97ee3abdf40b1ed6217780c38ac7122d4b3caac",
-      "old_mode": 33188,
-      "old_path": "chrome/browser/extensions/extension_context_menu_model.cc",
-      "new_id": "27ef17bab7ebfb1796fdecb8ce0c35199442cf2f",
-      "new_mode": 33188,
-      "new_path": "chrome/browser/extensions/extension_context_menu_model.cc"
-    },
-    {
-      "type": "modify",
-      "old_id": "2150b00ab82755323c069757ae5d05a4a32b91f6",
-      "old_mode": 33188,
-      "old_path": "chrome/browser/extensions/extension_context_menu_model_unittest.cc",
-      "new_id": "ecdd3ba5b65102b7c54f35bb3a6b5bc1510bf442",
-      "new_mode": 33188,
-      "new_path": "chrome/browser/extensions/extension_context_menu_model_unittest.cc"
-    },
-    {
-      "type": "modify",
-      "old_id": "963f4629354c504b34321029a2e63f87355672e0",
-      "old_mode": 33188,
-      "old_path": "chrome/browser/ui/views/toolbar/chevron_menu_button.cc",
-      "new_id": "69b207a671aa2c80f4a9b3a6873c9db727f2e355",
-      "new_mode": 33188,
-      "new_path": "chrome/browser/ui/views/toolbar/chevron_menu_button.cc"
-    }
-  ]
-}
+ ]}'
+ {
+   "commit": "c89130e28fd01062104e1be7f3a6fc3abbb80ca9",
+   "tree": "b6c8947321df51bd03ce118285e243ca4b14a751",
+   "parents": [
+     "2e93263dc74f0496100435e1fd7232e9e8323af0"
+   ],
+   "author": {
+     "name": "rdevlin.cronin",
+     "email": "rdevlin.cronin@chromium.org",
+     "time": "Tue Oct 27 21:24:54 2015"
+   },
+   "committer": {
+     "name": "Commit bot",
+     "email": "commit-bot@chromium.org",
+     "time": "Tue Oct 27 21:26:30 2015"
+   },
+   "message": "[Extensions] Fix hiding browser actions without the toolbar redesign\n\nHiding actions without the toolbar redesign means removing them entirely, so if\nthey exist in the toolbar, they are considered \u0027visible\u0027 (even if they are in\nthe chevron).\n\nBUG\u003d544859\nBUG\u003d548160\n\nReview URL: https://codereview.chromium.org/1414343003\n\nCr-Commit-Position: refs/heads/master@{#356400}\n",
+   "tree_diff": [
+     {
+       "type": "modify",
+       "old_id": "d97ee3abdf40b1ed6217780c38ac7122d4b3caac",
+       "old_mode": 33188,
+       "old_path": "chrome/browser/extensions/extension_context_menu_model.cc",
+       "new_id": "27ef17bab7ebfb1796fdecb8ce0c35199442cf2f",
+       "new_mode": 33188,
+       "new_path": "chrome/browser/extensions/extension_context_menu_model.cc"
+     },
+     {
+       "type": "modify",
+       "old_id": "2150b00ab82755323c069757ae5d05a4a32b91f6",
+       "old_mode": 33188,
+       "old_path": "chrome/browser/extensions/extension_context_menu_model_unittest.cc",
+       "new_id": "ecdd3ba5b65102b7c54f35bb3a6b5bc1510bf442",
+       "new_mode": 33188,
+       "new_path": "chrome/browser/extensions/extension_context_menu_model_unittest.cc"
+     },
+     {
+       "type": "modify",
+       "old_id": "963f4629354c504b34321029a2e63f87355672e0",
+       "old_mode": 33188,
+       "old_path": "chrome/browser/ui/views/toolbar/chevron_menu_button.cc",
+       "new_id": "69b207a671aa2c80f4a9b3a6873c9db727f2e355",
+       "new_mode": 33188,
+       "new_path": "chrome/browser/ui/views/toolbar/chevron_menu_button.cc"
+     }
+   ]
+ }
diff --git a/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_FILE b/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_1
similarity index 100%
rename from catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_FILE
rename to catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_1
diff --git a/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_1 b/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_1
new file mode 100644
index 0000000..a0a6cce
--- /dev/null
+++ b/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_1
@@ -0,0 +1,42 @@
+)]}'
+{
+  "log": [
+    {
+      "commit": "3861789af25e2d3502f0fb7080da5785d31308aa",
+      "tree": "97c4006bdd97695879ff6e38b6905d628f3ec56a",
+      "parents": [
+        "8fcc8af20a3d41b0512e3b1486e4dc7de528a72b"
+      ],
+      "author": {
+        "name": "v8-autoroll",
+        "email": "v8-autoroll@chromium.org",
+        "time": "Tue Oct 27 21:16:25 2015"
+      },
+      "committer": {
+        "name": "Commit bot",
+        "email": "commit-bot@chromium.org",
+        "time": "Tue Oct 27 21:17:57 2015"
+      },
+      "message": "Update V8 to version 4.8.161.\n\nSummary of changes available at:\nhttps://chromium.googlesource.com/v8/v8/+log/bb6df7e1..1eef3579\n\nPlease follow these instructions for assigning/CC\u0027ing issues:\nhttps://code.google.com/p/v8-wiki/wiki/TriagingIssues\n\nPlease close rolling in case of a roll revert:\nhttps://v8-roll.appspot.com/\n\nTBR\u003dhablich@chromium.org,machenbach@chromium.org,yangguo@chromium.org,vogelheim@chromium.org\n\nReview URL: https://codereview.chromium.org/1430483003\n\nCr-Commit-Position: refs/heads/master@{#356397}\n"
+    },
+    {
+      "commit": "8fcc8af20a3d41b0512e3b1486e4dc7de528a72b",
+      "tree": "a1c05b121ec2e8694dcaca9ef54b9817801d40e6",
+      "parents": [
+        "f1c777e3f97a16cc6a3aa922a23602fa59412989"
+      ],
+      "author": {
+        "name": "oshima",
+        "email": "oshima@chromium.org",
+        "time": "Tue Oct 27 21:16:08 2015"
+      },
+      "committer": {
+        "name": "Commit bot",
+        "email": "commit-bot@chromium.org",
+        "time": "Tue Oct 27 21:16:50 2015"
+      },
+      "message": "Reland of Add dimming to the background during sign in  (patchset #1 id:1 of https://codereview.chromium.org/1424513003/ )\n\nReason for revert:\nThe asan issue has been fixed in the new CL https://codereview.chromium.org/1426573004/\n\nOriginal issue\u0027s description:\n\u003e Revert of Add dimming to the background during sign in  (patchset #4 id:160001 of https://codereview.chromium.org/1400153002/ )\n\u003e\n\u003e Reason for revert:\n\u003e https://code.google.com/p/chromium/issues/detail?id\u003d547178\n\u003e\n\u003e Original issue\u0027s description:\n\u003e \u003e Add dimming to the background during sign in\n\u003e \u003e\n\u003e \u003e * Add option to put the dim layer at the bottom. Login screen put this dim layer at the bottom of login container containers so that\n\u003e \u003e  dim layer stays during login transition.\n\u003e \u003e\n\u003e \u003e BUG\u003d478438\n\u003e \u003e TEST\u003dScreenDimmer.DimAtBottom\n\u003e \u003e\n\u003e \u003e Committed: https://crrev.com/c527600749bfc6970ba39e4ed6b24404b0f7b256\n\u003e \u003e Cr-Commit-Position: refs/heads/master@{#355481}\n\u003e\n\u003e TBR\u003ddzhioev@chromium.org,alemate@chromium.org,oshima@chromium.org\n\u003e NOPRESUBMIT\u003dtrue\n\u003e NOTREECHECKS\u003dtrue\n\u003e NOTRY\u003dtrue\n\u003e BUG\u003d478438\n\u003e\n\u003e Committed: https://crrev.com/ccb7824284975a271967334f86b4fc7a17378de5\n\u003e Cr-Commit-Position: refs/heads/master@{#355885}\n\nTBR\u003ddzhioev@chromium.org,alemate@chromium.org,dalecurtis@chromium.org\nNOPRESUBMIT\u003dtrue\nNOTREECHECKS\u003dtrue\nNOTRY\u003dtrue\nBUG\u003d478438\n\nReview URL: https://codereview.chromium.org/1413523005\n\nCr-Commit-Position: refs/heads/master@{#356396}\n"
+    }
+  ],
+  "next": "f1c777e3f97a16cc6a3aa922a23602fa59412989"
+}
diff --git a/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_2 b/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_2
new file mode 100644
index 0000000..3d06cf8
--- /dev/null
+++ b/catapult/experimental/bisect_lib/test_data/MOCK_RANGE_RESPONSE_2_PAGE_2
@@ -0,0 +1,41 @@
+)]}'
+{
+  "log": [
+    {
+      "commit": "f1c777e3f97a16cc6a3aa922a23602fa59412989",
+      "tree": "99b77b34eca35570c99966e478d58e5f24fdafe7",
+      "parents": [
+        "ee261f306c3c66e96339aa1026d62a6d953302fe"
+      ],
+      "author": {
+        "name": "mmenke",
+        "email": "mmenke@chromium.org",
+        "time": "Tue Oct 27 21:06:44 2015"
+      },
+      "committer": {
+        "name": "Commit bot",
+        "email": "commit-bot@chromium.org",
+        "time": "Tue Oct 27 21:07:26 2015"
+      },
+      "message": "Make NetErrorHelper more OOPIF-friendly.\n\nNow create one per frame instead of creating them only for main frames,\nand get the WebFrame directly from a WebFrame rather than by going\nthrough the RenderView.\n\nBUG\u003d543226,529976\n\nReview URL: https://codereview.chromium.org/1406303002\n\nCr-Commit-Position: refs/heads/master@{#356395}\n"
+    },
+    {
+      "commit": "ee261f306c3c66e96339aa1026d62a6d953302fe",
+      "tree": "d72d77753789a7832b3422d267fe40e805f886c9",
+      "parents": [
+        "7bd1741893bd4e233b5562a6926d7e395d558343"
+      ],
+      "author": {
+        "name": "asanka",
+        "email": "asanka@chromium.org",
+        "time": "Tue Oct 27 21:04:28 2015"
+      },
+      "committer": {
+        "name": "Commit bot",
+        "email": "commit-bot@chromium.org",
+        "time": "Tue Oct 27 21:06:01 2015"
+      },
+      "message": "[SafeBrowsing] Block dangerous unchecked downloads based on a Finch trial.\n\nBUG\u003d533579\n\nReview URL: https://codereview.chromium.org/1409003002\n\nCr-Commit-Position: refs/heads/master@{#356394}\n"
+    }
+  ]
+}
diff --git a/catapult/experimental/buildbot/buildbot.py b/catapult/experimental/buildbot/buildbot.py
index b29b743..cb733f5 100644
--- a/catapult/experimental/buildbot/buildbot.py
+++ b/catapult/experimental/buildbot/buildbot.py
@@ -81,6 +81,8 @@
 
 
 class Builder(object):
+  # pylint: disable=too-many-instance-attributes
+
   def __init__(self, master, name, data):
     self._master = master
     self._name = name
@@ -116,7 +118,7 @@
     """
     build_numbers = tuple(build_number for build_number in build_numbers
                           if not (build_number in self._builds and
-                          self._builds[build_number].complete))
+                                  self._builds[build_number].complete))
     if not build_numbers:
       return ()
 
@@ -214,9 +216,11 @@
     self._complete = not ('currentStep' in data and data['currentStep'])
     self._start_time, self._end_time = data['times']
 
-    self._steps = {step_info['name']:
-        Step(self._master, self._builder_name, self._number, step_info)
-        for step_info in data['steps']}
+    self._steps = {
+        step_info['name']:
+            Step(self._master, self._builder_name, self._number, step_info)
+        for step_info in data['steps']
+    }
 
   def __str__(self):
     return str(self.number)
@@ -286,6 +290,8 @@
 
 
 class Step(object):
+  # pylint: disable=too-many-instance-attributes
+
   def __init__(self, master, builder_name, build_number, data):
     self._master = master
     self._builder_name = builder_name
@@ -308,15 +314,15 @@
 
   def __getstate__(self):
     return {
-      '_master': self._master,
-      '_builder_name': self._builder_name,
-      '_build_number': self._build_number,
-      '_name': self._name,
-      '_result': self._result,
-      '_start_time': self._start_time,
-      '_end_time': self._end_time,
-      '_log_link': self._log_link,
-      '_results_link': self._results_link,
+        '_master': self._master,
+        '_builder_name': self._builder_name,
+        '_build_number': self._build_number,
+        '_name': self._name,
+        '_result': self._result,
+        '_start_time': self._start_time,
+        '_end_time': self._end_time,
+        '_log_link': self._log_link,
+        '_results_link': self._results_link,
     }
 
   def __setstate__(self, state):
@@ -436,7 +442,3 @@
     if self._stack_trace is None:
       self._stack_trace = _ParseTraceFromLog(self.log)
     return self._stack_trace
-
-  @property
-  def chrome_stack_trace(self):
-    raise NotImplementedError()
diff --git a/catapult/experimental/buildbot/query.py b/catapult/experimental/buildbot/query.py
index 0683036..7f1e938 100755
--- a/catapult/experimental/buildbot/query.py
+++ b/catapult/experimental/buildbot/query.py
@@ -33,15 +33,15 @@
   trace_results = step.results['chart_data']['charts'][VALUE_NAME].iteritems()
   for user_story_name, user_story_data in trace_results:
     revision_data.append({
-      'user_story': user_story_name,
-      'start_time': step.start_time,
-      'end_time': step.end_time,
-      'values': user_story_data['values'],
+        'user_story': user_story_name,
+        'start_time': step.start_time,
+        'end_time': step.end_time,
+        'values': user_story_data['values'],
     })
   return {
-    'start_time': build.start_time,
-    'end_time': build.end_time,
-    'user_story_runs': revision_data,
+      'start_time': build.start_time,
+      'end_time': build.end_time,
+      'user_story_runs': revision_data,
   }
 
 
diff --git a/catapult/experimental/hardware.py b/catapult/experimental/hardware.py
index c67aa40..85d4ffc 100755
--- a/catapult/experimental/hardware.py
+++ b/catapult/experimental/hardware.py
@@ -7,13 +7,16 @@
 
 import csv
 import json
+import logging
 import sys
 import urllib2
 
 
 _MASTERS = [
     'chromium.perf',
+    'client.catapult',
     'tryserver.chromium.perf',
+    'tryserver.client.catapult',
 ]
 
 
@@ -33,6 +36,7 @@
     'android device 7',
 ]
 _EXCLUDED_KEYS = frozenset([
+    'architecture (userland)',
     'b directory',
     'last puppet run',
     'uptime',
@@ -46,9 +50,10 @@
 
   for master_name in _MASTERS:
     master_data = json.load(urllib2.urlopen(
-      'http://build.chromium.org/p/%s/json/slaves' % master_name))
+        'http://build.chromium.org/p/%s/json/slaves' % master_name))
 
-    slaves = sorted(master_data.iteritems(), key=lambda x: x[1]['builders'])
+    slaves = sorted(master_data.iteritems(),
+                    key=lambda x: (x[1]['builders'].keys(), x[0]))
     for slave_name, slave_data in slaves:
       for builder_name in slave_data['builders']:
         row = {
@@ -65,16 +70,22 @@
               if not line:
                 continue
               key, value = line.split(': ')
-              if key == 'osfamily':
-                key = 'os family'
               if key in _EXCLUDED_KEYS:
                 continue
               row[key] = value
 
+        # Munge keys.
+        row = {key.replace('_', ' '): value for key, value in row.iteritems()}
+        if 'osfamily' in row:
+          row['os family'] = row.pop('osfamily')
         if 'product name' not in row and slave_name.startswith('slave'):
           row['product name'] = 'Google Compute Engine'
 
-        writer.writerow(row)
+        try:
+          writer.writerow(row)
+        except ValueError:
+          logging.error(row)
+          raise
 
 
 if __name__ == '__main__':
diff --git a/catapult/experimental/statistical_analysis/__init__.py b/catapult/experimental/statistical_analysis/__init__.py
new file mode 100644
index 0000000..ca3e206
--- /dev/null
+++ b/catapult/experimental/statistical_analysis/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/experimental/statistical_analysis/compare_benchmark_results.py b/catapult/experimental/statistical_analysis/compare_benchmark_results.py
new file mode 100755
index 0000000..347c542
--- /dev/null
+++ b/catapult/experimental/statistical_analysis/compare_benchmark_results.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Calculates statistical hypothesis test for given benchmark results.
+
+Evaluate two benchmark results given as Chart JSON files to determine how
+statistically significantly different they are. This evaluation should be run
+using Chart JSON files created by one of the available benchmarks in
+tools/perf/run_benchmark.
+
+A "benchmark" (e.g. startup.cold.blank_page) includes several "metrics" (e.g.
+first_main_frame_load_time).
+"""
+
+from __future__ import print_function
+import argparse
+import json
+import os
+import sys
+
+sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..')))
+from statistical_analysis import results_stats
+
+
+DEFAULT_SIGNIFICANCE_LEVEL = 0.05
+DEFAULT_STATISTICAL_TEST = results_stats.MANN
+
+
+def LoadJsonFromPath(json_path):
+  """Returns a JSON from specified location."""
+  with open(os.path.abspath(json_path)) as data_file:
+    return json.load(data_file)
+
+
+def PrintOutcomeLine(name, max_name_length, outcome, print_p_value):
+  """Prints a single output line, e.g. 'metric_1  True  0.03'."""
+  print('{:{}}{}'.format(name, max_name_length + 2, outcome[0]), end='')
+  if print_p_value:
+    print('\t{:.10f}'.format(outcome[1]), end='')
+  print()
+
+
+def PrintTestOutcome(test_outcome_dict, test_name, significance_level,
+                     print_p_value):
+  """Prints the given test outcomes to the command line.
+
+  Will print the p-values for each metric's outcome if |print_p_value| is True
+  and also prints the name of the executed statistical test and the
+  significance level.
+  """
+  print('Statistical analysis results (True=Performance difference likely)\n'
+        '(Test: {}, Significance Level: {})\n'.format(test_name,
+                                                      significance_level))
+
+  max_metric_name_len = max([len(metric_name) for metric_name in
+                             test_outcome_dict])
+
+  for metric_name, outcome in test_outcome_dict.iteritems():
+    PrintOutcomeLine(metric_name, max_metric_name_len, outcome, print_p_value)
+
+
+def PrintPagesetTestOutcome(test_outcome_dict, test_name, significance_level,
+                            print_p_value, print_details):
+  """Prints the given test outcomes to the command line.
+
+  Prints a summary combining the p-values of the pageset for each metric. Then
+  prints results for each metric/page combination if |print_details| is True.
+  """
+  print('Statistical analysis results (True=Performance difference likely)\n'
+        '(Test: {}, Significance Level: {})\n'.format(test_name,
+                                                      significance_level))
+
+  # Print summarized version at the top.
+  max_metric_name_len = max([len(metric_name) for metric_name in
+                             test_outcome_dict])
+  print('Summary (combined p-values for all pages in pageset):\n')
+  for metric_name, pageset in test_outcome_dict.iteritems():
+    combined_p_value = results_stats.CombinePValues([p[1] for p in
+                                                     pageset.itervalues()])
+    outcome = (combined_p_value < significance_level, combined_p_value)
+    PrintOutcomeLine(metric_name, max_metric_name_len, outcome, print_p_value)
+  print()
+
+  if not print_details:
+    return
+
+  # Print outcome for every metric/page combination.
+  for metric_name, pageset in test_outcome_dict.iteritems():
+    max_page_name_len = max([len(page_name) for page_name in pageset])
+    print('{}:'.format(metric_name))
+    for page_name, page_outcome in pageset.iteritems():
+      PrintOutcomeLine(page_name, max_page_name_len, page_outcome,
+                       print_p_value)
+    print()
+
+
+def main(args=None):
+  """Set up parser and run statistical test on given benchmark results.
+
+  Set up command line parser and its arguments. Then load Chart JSONs from
+  given paths, run the specified statistical hypothesis test on the results and
+  print the test outcomes.
+  """
+  if args is None:
+    args = sys.argv[1:]
+
+  parser = argparse.ArgumentParser(description="""Runs statistical significance
+                                   tests on two given Chart JSON benchmark
+                                   results produced by the telemetry
+                                   benchmarks.""")
+
+  parser.add_argument(dest='json_paths', nargs=2, help='JSON file location')
+
+  parser.add_argument('--significance', dest='significance_level',
+                      default=DEFAULT_SIGNIFICANCE_LEVEL, type=float,
+                      help="""The significance level is the type I error rate,
+                      which is the probability of determining that the
+                      benchmark results are different although they're not.
+                      Default: {}, which is common in statistical hypothesis
+                      testing.""".format(DEFAULT_SIGNIFICANCE_LEVEL))
+
+  parser.add_argument('--statistical-test', dest='statistical_test',
+                      default=DEFAULT_STATISTICAL_TEST,
+                      choices=results_stats.ALL_TEST_OPTIONS,
+                      help="""Specifies the statistical hypothesis test that is
+                      used. Choices are: Mann-Whitney U-test,
+                      Kolmogorov-Smirnov, Welch's t-test. Default: Mann-Whitney
+                      U-Test.""")
+
+  parser.add_argument('-p', action='store_true', dest='print_p_value',
+                      help="""If the -p flag is set, the output will include
+                      the p-value for each metric.""")
+
+  parser.add_argument('-d', action='store_true', dest='print_details',
+                      help="""If the -d flag is set, the output will be more
+                      detailed for benchmarks containing pagesets, giving
+                      results for every metric/page combination after a summary
+                      at the top.""")
+
+  args = parser.parse_args(args)
+
+  result_jsons = [LoadJsonFromPath(json_path) for json_path in args.json_paths]
+
+  if (results_stats.DoesChartJSONContainPageset(result_jsons[0]) and
+      results_stats.DoesChartJSONContainPageset(result_jsons[1])):
+    # Benchmark containing a pageset.
+    result_dict_1, result_dict_2 = (
+        [results_stats.CreatePagesetBenchmarkResultDict(result_json)
+         for result_json in result_jsons])
+    test_outcome_dict = results_stats.ArePagesetBenchmarkResultsDifferent(
+        result_dict_1, result_dict_2, args.statistical_test,
+        args.significance_level)
+
+    PrintPagesetTestOutcome(test_outcome_dict, args.statistical_test,
+                            args.significance_level, args.print_p_value,
+                            args.print_details)
+
+  else:
+    # Benchmark not containing a pageset.
+    # (If only one JSON contains a pageset, results_stats raises an error.)
+    result_dict_1, result_dict_2 = (
+        [results_stats.CreateBenchmarkResultDict(result_json)
+         for result_json in result_jsons])
+    test_outcome_dict = (
+        results_stats.AreBenchmarkResultsDifferent(result_dict_1, result_dict_2,
+                                                   args.statistical_test,
+                                                   args.significance_level))
+
+    PrintTestOutcome(test_outcome_dict, args.statistical_test,
+                     args.significance_level, args.print_p_value)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/experimental/statistical_analysis/results_stats.py b/catapult/experimental/statistical_analysis/results_stats.py
new file mode 100755
index 0000000..7f437b8
--- /dev/null
+++ b/catapult/experimental/statistical_analysis/results_stats.py
@@ -0,0 +1,327 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Statistical hypothesis testing for comparing benchmark results."""
+
+try:
+  import numpy as np
+except ImportError:
+  np = None
+
+try:
+  from scipy import stats
+  import scipy.version
+except ImportError:
+  stats = None
+
+
+MANN = 'mann'
+KOLMOGOROV = 'kolmogorov'
+WELCH = 'welch'
+ALL_TEST_OPTIONS = [MANN, KOLMOGOROV, WELCH]
+
+
+class DictMismatchError(Exception):
+  """Provides exception for result dicts with mismatching keys/metrics."""
+  def __str__(self):
+    return ("Provided benchmark result dicts' keys/metrics do not match. "
+            "Check if they have been created by the same benchmark.")
+
+
+class SampleSizeError(Exception):
+  """Provides exception for sample sizes too small for Mann-Whitney U-test."""
+  def __str__(self):
+    return ('At least one sample size is smaller than 20, which is too small '
+            'for Mann-Whitney U-test.')
+
+
+class NonNormalSampleError(Exception):
+  """Provides exception for samples that are not normally distributed."""
+  def __str__(self):
+    return ("At least one sample is not normally distributed as required by "
+            "Welch's t-test.")
+
+
+def IsScipyMannTestOneSided():
+  """Checks if Scipy version is < 0.17.0.
+
+  This is the version where stats.mannwhitneyu(...) is changed from returning
+  a one-sided to returning a two-sided p-value.
+  """
+  scipy_version = [int(num) for num in scipy.version.version.split('.')]
+  return scipy_version[0] < 1 and scipy_version[1] < 17
+
+
+def GetChartsFromBenchmarkResultJson(benchmark_result_json):
+  """Returns the 'charts' element from a given Chart JSON.
+
+  Excludes entries that are not list_of_scalar_values and empty entries. Also
+  raises errors for an invalid JSON format or empty 'charts' element.
+
+  Raises:
+    ValueError: Provided chart JSON is either not valid or 'charts' is empty.
+  """
+  try:
+    charts = benchmark_result_json['charts']
+  except KeyError:
+    raise ValueError('Invalid benchmark result format. Make sure input is a '
+                     'Chart-JSON.\nProvided JSON:\n',
+                     repr(benchmark_result_json))
+  if not charts:
+    raise ValueError("Invalid benchmark result format. Dict entry 'charts' is "
+                     "empty.")
+
+  def IsValidPageContent(page_content):
+    return (page_content['type'] == 'list_of_scalar_values' and
+            'values' in page_content)
+
+  def CreatePageDict(metric_content):
+    return {page_name: page_content
+            for page_name, page_content in metric_content.iteritems()
+            if IsValidPageContent(page_content)}
+
+  charts_valid_entries_only = {}
+  for metric_name, metric_content in charts.iteritems():
+    inner_page_dict = CreatePageDict(metric_content)
+    if not inner_page_dict:
+      continue
+    charts_valid_entries_only[metric_name] = inner_page_dict
+
+  return charts_valid_entries_only
+
+
+def DoesChartJSONContainPageset(benchmark_result_json):
+  """Checks if given Chart JSON contains results for a pageset.
+
+  A metric in a benchmark NOT containing a pageset contains only two elements
+  ("Only_page_in_this_benchmark" and "Summary", as opposed to "Ex_page_1",
+  "Ex_page_2", ..., and "Summary").
+  """
+  charts = GetChartsFromBenchmarkResultJson(benchmark_result_json)
+
+  arbitrary_metric_in_charts = charts.itervalues().next()
+  return len(arbitrary_metric_in_charts) > 2
+
+
+def CreateBenchmarkResultDict(benchmark_result_json):
+  """Creates a dict of format {metric_name: list of benchmark results}.
+
+  Takes a raw result Chart-JSON produced when using '--output-format=chartjson'
+  for 'run_benchmark'.
+
+  Args:
+    benchmark_result_json: Benchmark result Chart-JSON produced by Telemetry.
+
+  Returns:
+    Dictionary of benchmark results.
+    Example dict entry: 'tab_load_time': [650, 700, ...].
+  """
+  charts = GetChartsFromBenchmarkResultJson(benchmark_result_json)
+
+  benchmark_result_dict = {}
+  for metric_name, metric_content in charts.iteritems():
+    benchmark_result_dict[metric_name] = metric_content['summary']['values']
+
+  return benchmark_result_dict
+
+
+def CreatePagesetBenchmarkResultDict(benchmark_result_json):
+  """Creates a dict of format {metric_name: {page_name: list of page results}}.
+
+  Takes a raw result Chart-JSON produced by 'run_benchmark' when using
+  '--output-format=chartjson' and when specifying a benchmark that has a
+  pageset (e.g. top25mobile). Run 'DoesChartJSONContainPageset' to check if
+  your Chart-JSON contains a pageset.
+
+  Args:
+    benchmark_result_json: Benchmark result Chart-JSON produced by Telemetry.
+
+  Returns:
+    Dictionary of benchmark results.
+    Example dict entry: 'tab_load_time': 'Gmail.com': [650, 700, ...].
+  """
+  charts = GetChartsFromBenchmarkResultJson(benchmark_result_json)
+
+  benchmark_result_dict = {}
+  for metric_name, metric_content in charts.iteritems():
+    benchmark_result_dict[metric_name] = {}
+    for page_name, page_content in metric_content.iteritems():
+      if page_name == 'summary':
+        continue
+      benchmark_result_dict[metric_name][page_name] = page_content['values']
+
+  return benchmark_result_dict
+
+
+def CombinePValues(p_values):
+  """Combines p-values from a number of tests using Fisher's Method.
+
+  The tests the p-values result from must test the same null hypothesis and be
+  independent.
+
+  Args:
+    p_values: List of p-values.
+
+  Returns:
+    combined_p_value: Combined p-value according to Fisher's method.
+  """
+  # TODO (wierichs): Update to use scipy.stats.combine_pvalues(p_values) when
+  # Scipy v0.15.0 becomes available as standard version.
+  if not np:
+    raise ImportError('This function requires Numpy.')
+
+  if not stats:
+    raise ImportError('This function requires Scipy.')
+
+  test_statistic = -2 * np.sum(np.log(p_values))
+  p_value = stats.chi2.sf(test_statistic, 2 * len(p_values))
+  return p_value
+
+
+def IsNormallyDistributed(sample, significance_level=0.05):
+  """Calculates Shapiro-Wilk test for normality for a single sample.
+
+  Note that normality is a requirement for Welch's t-test.
+
+  Args:
+    sample: List of values.
+    significance_level: The significance level the p-value is compared against.
+
+  Returns:
+    is_normally_distributed: Returns True or False.
+    p_value: The calculated p-value.
+  """
+  if not stats:
+    raise ImportError('This function requires Scipy.')
+
+  # pylint: disable=unbalanced-tuple-unpacking
+  _, p_value = stats.shapiro(sample)
+
+  is_normally_distributed = p_value >= significance_level
+  return is_normally_distributed, p_value
+
+
+def AreSamplesDifferent(sample_1, sample_2, test=MANN,
+                        significance_level=0.05):
+  """Calculates the specified statistical test for the given samples.
+
+  The null hypothesis for each test is that the two populations that the
+  samples are taken from are not significantly different. Tests are two-tailed.
+
+  Raises:
+    ImportError: Scipy is not installed.
+    SampleSizeError: Sample size is too small for MANN.
+    NonNormalSampleError: Sample is not normally distributed as required by
+    WELCH.
+
+  Args:
+    sample_1: First list of values.
+    sample_2: Second list of values.
+    test: Statistical test that is used.
+    significance_level: The significance level the p-value is compared against.
+
+  Returns:
+    is_different: True or False, depending on the test outcome.
+    p_value: The p-value the test has produced.
+  """
+  if not stats:
+    raise ImportError('This function requires Scipy.')
+
+  if test == MANN:
+    if len(sample_1) < 20 or len(sample_2) < 20:
+      raise SampleSizeError()
+    try:
+      _, p_value = stats.mannwhitneyu(sample_1, sample_2, use_continuity=True)
+    except ValueError:
+      # If sum of ranks of values in |sample_1| and |sample_2| is equal,
+      # scipy.stats.mannwhitneyu raises ValueError. Treat this as a 1.0 p-value
+      # (indistinguishable).
+      return (False, 1.0)
+
+    if IsScipyMannTestOneSided():
+      p_value = p_value * 2 if p_value < 0.5 else 1
+
+  elif test == KOLMOGOROV:
+    _, p_value = stats.ks_2samp(sample_1, sample_2)
+
+  elif test == WELCH:
+    if not (IsNormallyDistributed(sample_1, significance_level)[0] and
+            IsNormallyDistributed(sample_2, significance_level)[0]):
+      raise NonNormalSampleError()
+    _, p_value = stats.ttest_ind(sample_1, sample_2, equal_var=False)
+  # TODO: Add k sample anderson darling test
+
+  is_different = p_value <= significance_level
+  return is_different, p_value
+
+
+def AssertThatKeysMatch(result_dict_1, result_dict_2):
+  """Raises an exception if benchmark dicts do not contain the same metrics."""
+  if result_dict_1.viewkeys() != result_dict_2.viewkeys():
+    raise DictMismatchError()
+
+
+def AreBenchmarkResultsDifferent(result_dict_1, result_dict_2, test=MANN,
+                                 significance_level=0.05):
+  """Runs the given test on the results of each metric in the benchmarks.
+
+  Checks if the dicts have been created from the same benchmark, i.e. if
+  metric names match (e.g. first_non_empty_paint_time). Then runs the specified
+  statistical test on each metric's samples to find if they vary significantly.
+
+  Args:
+    result_dict_1: Benchmark result dict of format {metric: list of values}.
+    result_dict_2: Benchmark result dict of format {metric: list of values}.
+    test: Statistical test that is used.
+    significance_level: The significance level the p-value is compared against.
+
+  Returns:
+    test_outcome_dict: Format {metric: (bool is_different, p-value)}.
+  """
+  AssertThatKeysMatch(result_dict_1, result_dict_2)
+
+  test_outcome_dict = {}
+  for metric in result_dict_1:
+    is_different, p_value = AreSamplesDifferent(result_dict_1[metric],
+                                                result_dict_2[metric],
+                                                test, significance_level)
+    test_outcome_dict[metric] = (is_different, p_value)
+
+  return test_outcome_dict
+
+
+def ArePagesetBenchmarkResultsDifferent(result_dict_1, result_dict_2, test=MANN,
+                                        significance_level=0.05):
+  """Runs the given test on the results of each metric/page combination.
+
+  Checks if the dicts have been created from the same benchmark, i.e. if metric
+  names and pagesets match (e.g. metric first_non_empty_paint_time and page
+  Google.com). Then runs the specified statistical test on each metric/page
+  combination's sample to find if they vary significantly.
+
+  Args:
+    result_dict_1: Benchmark result dict
+    result_dict_2: Benchmark result dict
+    test: Statistical test that is used.
+    significance_level: The significance level the p-value is compared against.
+
+  Returns:
+    test_outcome_dict: Format {metric: {page: (bool is_different, p-value)}}
+  """
+  AssertThatKeysMatch(result_dict_1, result_dict_2)
+
+  # Pagesets should also match.
+  for metric in result_dict_1.iterkeys():
+    AssertThatKeysMatch(result_dict_1[metric], result_dict_2[metric])
+
+  test_outcome_dict = {}
+  for metric in result_dict_1.iterkeys():
+    test_outcome_dict[metric] = {}
+    for page in result_dict_1[metric]:
+      is_different, p_value = AreSamplesDifferent(result_dict_1[metric][page],
+                                                  result_dict_2[metric][page],
+                                                  test, significance_level)
+      test_outcome_dict[metric][page] = (is_different, p_value)
+
+  return test_outcome_dict
diff --git a/catapult/experimental/statistical_analysis/results_stats_unittest.py b/catapult/experimental/statistical_analysis/results_stats_unittest.py
new file mode 100755
index 0000000..51d1202
--- /dev/null
+++ b/catapult/experimental/statistical_analysis/results_stats_unittest.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for results_stats."""
+
+import os
+import sys
+
+import unittest
+
+try:
+  import numpy as np
+except ImportError:
+  np = None
+
+sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..')))
+from statistical_analysis import results_stats
+
+
+class StatisticalBenchmarkResultsAnalysisTest(unittest.TestCase):
+  """Unit testing of several functions in results_stats."""
+
+  def testGetChartsFromBenchmarkResultJson(self):
+    """Unit test for errors raised when getting the charts element.
+
+    Also makes sure that the 'trace' element is deleted if it exists.
+    """
+    input_json_wrong_format = {'charts_wrong': {}}
+    input_json_empty = {'charts': {}}
+    with self.assertRaises(ValueError):
+      (results_stats.GetChartsFromBenchmarkResultJson(input_json_wrong_format))
+    with self.assertRaises(ValueError):
+      (results_stats.GetChartsFromBenchmarkResultJson(input_json_empty))
+
+    input_json_with_trace = {'charts':
+                             {'trace': {},
+                              'Ex_metric_1':
+                              {'Ex_page_1': {'type': 'list_of_scalar_values',
+                                             'values': [1, 2]},
+                               'Ex_page_2': {'type': 'histogram',
+                                             'values': [1, 2]}},
+                              'Ex_metric_2':
+                              {'Ex_page_1': {'type': 'list_of_scalar_values'},
+                               'Ex_page_2': {'type': 'list_of_scalar_values',
+                                             'values': [1, 2]}}}}
+
+    output = (results_stats.
+              GetChartsFromBenchmarkResultJson(input_json_with_trace))
+    expected_output = {'Ex_metric_1':
+                       {'Ex_page_1': {'type': 'list_of_scalar_values',
+                                      'values': [1, 2]}},
+                       'Ex_metric_2':
+                       {'Ex_page_2': {'type': 'list_of_scalar_values',
+                                      'values': [1, 2]}}}
+    self.assertEqual(output, expected_output)
+
+  def testCreateBenchmarkResultDict(self):
+    """Unit test for benchmark result dict created from a benchmark json.
+
+    Creates a json of the format created by tools/perf/run_benchmark and then
+    compares the output dict against an expected predefined output dict.
+    """
+    metric_names = ['messageloop_start_time',
+                    'open_tabs_time',
+                    'window_display_time']
+    metric_values = [[55, 72, 60], [54, 42, 65], [44, 89]]
+
+    input_json = {'charts': {}}
+    for metric, metric_vals in zip(metric_names, metric_values):
+      input_json['charts'][metric] = {'summary':
+                                      {'values': metric_vals,
+                                       'type': 'list_of_scalar_values'}}
+
+    output = results_stats.CreateBenchmarkResultDict(input_json)
+    expected_output = {'messageloop_start_time': [55, 72, 60],
+                       'open_tabs_time': [54, 42, 65],
+                       'window_display_time': [44, 89]}
+
+    self.assertEqual(output, expected_output)
+
+  def testCreatePagesetBenchmarkResultDict(self):
+    """Unit test for pageset benchmark result dict created from benchmark json.
+
+    Creates a json of the format created by tools/perf/run_benchmark when it
+    includes a pageset and then compares the output dict against an expected
+    predefined output dict.
+    """
+    metric_names = ['messageloop_start_time',
+                    'open_tabs_time',
+                    'window_display_time']
+    metric_values = [[55, 72, 60], [54, 42, 65], [44, 89]]
+    page_names = ['Ex_page_1', 'Ex_page_2']
+
+    input_json = {'charts': {}}
+    for metric, metric_vals in zip(metric_names, metric_values):
+      input_json['charts'][metric] = {'summary':
+                                      {'values': [0, 1, 2, 3],
+                                       'type': 'list_of_scalar_values'}}
+      for page in page_names:
+        input_json['charts'][metric][page] = {'values': metric_vals,
+                                              'type': 'list_of_scalar_values'}
+
+    output = results_stats.CreatePagesetBenchmarkResultDict(input_json)
+    expected_output = {'messageloop_start_time': {'Ex_page_1': [55, 72, 60],
+                                                  'Ex_page_2': [55, 72, 60]},
+                       'open_tabs_time': {'Ex_page_1': [54, 42, 65],
+                                          'Ex_page_2': [54, 42, 65]},
+                       'window_display_time': {'Ex_page_1': [44, 89],
+                                               'Ex_page_2': [44, 89]}}
+
+    self.assertEqual(output, expected_output)
+
+  def testCombinePValues(self):
+    """Unit test for Fisher's Method that combines multiple p-values."""
+    test_p_values = [0.05, 0.04, 0.10, 0.07, 0.01]
+
+    expected_output = 0.00047334256271885721
+    output = results_stats.CombinePValues(test_p_values)
+
+    self.assertEqual(output, expected_output)
+
+  def CreateRandomNormalDistribution(self, mean=0, size=30):
+    """Creates two pseudo random samples for testing in multiple methods."""
+    if not np:
+      raise ImportError('This function requires Numpy.')
+
+    np.random.seed(0)
+    sample = np.random.normal(loc=mean, scale=1, size=size)
+
+    return sample
+
+  def testIsNormallyDistributed(self):
+    """Unit test for values returned when testing for normality."""
+    if not np:
+      self.skipTest("Numpy is not installed.")
+
+    test_samples = [self.CreateRandomNormalDistribution(0),
+                    self.CreateRandomNormalDistribution(1)]
+
+    expected_outputs = [(True, 0.5253966450691223),
+                        (True, 0.5253913402557373)]
+    for sample, expected_output in zip(test_samples, expected_outputs):
+      output = results_stats.IsNormallyDistributed(sample)
+
+      self.assertEqual(output, expected_output)
+
+  def testAreSamplesDifferent(self):
+    """Unit test for values returned after running the statistical tests.
+
+    Creates two pseudo-random normally distributed samples to run the
+    statistical tests and compares the resulting answer and p-value against
+    their pre-calculated values.
+    """
+    test_samples = [3 * [0, 0, 2, 4, 4], 3 * [5, 5, 7, 9, 9]]
+    with self.assertRaises(results_stats.SampleSizeError):
+      results_stats.AreSamplesDifferent(test_samples[0], test_samples[1],
+                                        test=results_stats.MANN)
+    with self.assertRaises(results_stats.NonNormalSampleError):
+      results_stats.AreSamplesDifferent(test_samples[0], test_samples[1],
+                                        test=results_stats.WELCH)
+
+    test_samples_equal = (20 * [1], 20 * [1])
+    expected_output_equal = (False, 1.0)
+    output_equal = results_stats.AreSamplesDifferent(test_samples_equal[0],
+                                                     test_samples_equal[1],
+                                                     test=results_stats.MANN)
+    self.assertEqual(output_equal, expected_output_equal)
+
+    if not np:
+      self.skipTest("Numpy is not installed.")
+
+    test_samples = [self.CreateRandomNormalDistribution(0),
+                    self.CreateRandomNormalDistribution(1)]
+    test_options = results_stats.ALL_TEST_OPTIONS
+
+    expected_outputs = [(True, 2 * 0.00068516628052438266),
+                        (True, 0.0017459498829507842),
+                        (True, 0.00084765230478226514)]
+
+    for test, expected_output in zip(test_options, expected_outputs):
+      output = results_stats.AreSamplesDifferent(test_samples[0],
+                                                 test_samples[1],
+                                                 test=test)
+      self.assertEqual(output, expected_output)
+
+  def testAssertThatKeysMatch(self):
+    """Unit test for exception raised when input dicts' metrics don't match."""
+    differing_input_dicts = [{'messageloop_start_time': [55, 72, 60],
+                              'display_time': [44, 89]},
+                             {'messageloop_start_time': [55, 72, 60]}]
+    with self.assertRaises(results_stats.DictMismatchError):
+      results_stats.AssertThatKeysMatch(differing_input_dicts[0],
+                                        differing_input_dicts[1])
+
+  def testAreBenchmarkResultsDifferent(self):
+    """Unit test for statistical test outcome dict."""
+    test_input_dicts = [{'open_tabs_time':
+                         self.CreateRandomNormalDistribution(0),
+                         'display_time':
+                         self.CreateRandomNormalDistribution(0)},
+                        {'open_tabs_time':
+                         self.CreateRandomNormalDistribution(0),
+                         'display_time':
+                         self.CreateRandomNormalDistribution(1)}]
+    test_options = results_stats.ALL_TEST_OPTIONS
+
+    expected_outputs = [{'open_tabs_time': (False, 2 * 0.49704973080841425),
+                         'display_time': (True, 2 * 0.00068516628052438266)},
+                        {'open_tabs_time': (False, 1.0),
+                         'display_time': (True, 0.0017459498829507842)},
+                        {'open_tabs_time': (False, 1.0),
+                         'display_time': (True, 0.00084765230478226514)}]
+
+    for test, expected_output in zip(test_options, expected_outputs):
+      output = results_stats.AreBenchmarkResultsDifferent(test_input_dicts[0],
+                                                          test_input_dicts[1],
+                                                          test=test)
+      self.assertEqual(output, expected_output)
+
+  def testArePagesetBenchmarkResultsDifferent(self):
+    """Unit test for statistical test outcome dict."""
+    distributions = (self.CreateRandomNormalDistribution(0),
+                     self.CreateRandomNormalDistribution(1))
+    test_input_dicts = ({'open_tabs_time': {'Ex_page_1': distributions[0],
+                                            'Ex_page_2': distributions[0]},
+                         'display_time': {'Ex_page_1': distributions[1],
+                                          'Ex_page_2': distributions[1]}},
+                        {'open_tabs_time': {'Ex_page_1': distributions[0],
+                                            'Ex_page_2': distributions[1]},
+                         'display_time': {'Ex_page_1': distributions[1],
+                                          'Ex_page_2': distributions[0]}})
+    test_options = results_stats.ALL_TEST_OPTIONS
+
+    expected_outputs = ({'open_tabs_time':  # Mann.
+                         {'Ex_page_1': (False, 2 * 0.49704973080841425),
+                          'Ex_page_2': (True, 2 * 0.00068516628052438266)},
+                         'display_time':
+                         {'Ex_page_1': (False, 2 * 0.49704973080841425),
+                          'Ex_page_2': (True, 2 * 0.00068516628052438266)}},
+                        {'open_tabs_time':  # Kolmogorov.
+                         {'Ex_page_1': (False, 1.0),
+                          'Ex_page_2': (True, 0.0017459498829507842)},
+                         'display_time':
+                         {'Ex_page_1': (False, 1.0),
+                          'Ex_page_2': (True, 0.0017459498829507842)}},
+                        {'open_tabs_time':  # Welch.
+                         {'Ex_page_1': (False, 1.0),
+                          'Ex_page_2': (True, 0.00084765230478226514)},
+                         'display_time':
+                         {'Ex_page_1': (False, 1.0),
+                          'Ex_page_2': (True, 0.00084765230478226514)}})
+
+    for test, expected_output in zip(test_options, expected_outputs):
+      output = (results_stats.
+                ArePagesetBenchmarkResultsDifferent(test_input_dicts[0],
+                                                    test_input_dicts[1],
+                                                    test=test))
+      self.assertEqual(output, expected_output)
+
+
+if __name__ == '__main__':
+  sys.exit(unittest.main())
diff --git a/catapult/firefighter/OWNERS b/catapult/firefighter/OWNERS
new file mode 100644
index 0000000..3130136
--- /dev/null
+++ b/catapult/firefighter/OWNERS
@@ -0,0 +1 @@
+dtu@chromium.org
diff --git a/catapult/firefighter/PRESUBMIT.py b/catapult/firefighter/PRESUBMIT.py
new file mode 100644
index 0000000..5ba61d3
--- /dev/null
+++ b/catapult/firefighter/PRESUBMIT.py
@@ -0,0 +1,26 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  return [
+      project_dir,
+  ]
diff --git a/catapult/firefighter/README.md b/catapult/firefighter/README.md
new file mode 100644
index 0000000..fefe0ba
--- /dev/null
+++ b/catapult/firefighter/README.md
@@ -0,0 +1,53 @@
+<!-- Copyright 2015 The Chromium Authors. All rights reserved.
+     Use of this source code is governed by a BSD-style license that can be
+     found in the LICENSE file.
+-->
+
+Firefighter
+===========
+
+Firefighter is an App Engine dashboard that visualizes time series data.
+
+The overall process is to:
+1. Ingest multiple streams of data, either by polling data sources, or by bot uploads.
+1. Convert everything to a trace event and tag it with metadata.
+1. Filter the events on dozens of parameters with low cost and latency, using BigQuery.
+1. Produce arbitrary visualizations for arbitrary data on-demand.
+
+Prerequisites
+-------------
+
+Follow the instructions for setting up [Google App Engine Managed VMs](https://cloud.google.com/appengine/docs/managed-vms/getting-started).
+
+1. Download and install the [Google Cloud SDK](https://cloud.google.com/sdk/#Quick_Start).
+
+1. Ensure you are authorized to run gcloud commands.
+
+        $ gcloud auth login
+
+1. Set the default project name.
+
+        $ gcloud config set project PROJECT
+
+1. Install the gcloud app component.
+
+        $ gcloud components update app
+
+Development Server
+------------------
+
+You must have the [Google Cloud SDK](https://cloud.google.com/sdk/) installed. Run:
+
+    stats$ bin/run
+
+Deployment
+----------
+
+You must have the [Google Cloud SDK](https://cloud.google.com/sdk/) installed. Run:
+
+    stats$ bin/deploy
+
+Code Organization
+-----------------
+
+The app is divided into two modules: `default` and `update`. The `update` module handles ingestion of data, through either polling or uploading from an external service. The `default` module handles user queries. `base/` contains code shared between both modules.
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/firefighter/base/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/firefighter/base/__init__.py
diff --git a/catapult/firefighter/base/bigquery.py b/catapult/firefighter/base/bigquery.py
new file mode 100644
index 0000000..1661bda
--- /dev/null
+++ b/catapult/firefighter/base/bigquery.py
@@ -0,0 +1,183 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import io
+import json
+import logging
+import time
+import uuid
+
+from google.appengine.api import app_identity
+
+from apiclient import http
+from apiclient.discovery import build
+from oauth2client import client
+
+from base import exceptions
+
+
+# urlfetch max size is 10 MB. Assume 1000 bytes per row and split the
+# insert into chunks of 10,000 rows.
+INSERTION_MAX_ROWS = 10000
+
+
+class BigQuery(object):
+  """Methods for interfacing with BigQuery."""
+
+  def __init__(self, project_id=None):
+    self._service = _Service()
+    if project_id:
+      self._project_id = project_id
+    else:
+      self._project_id = app_identity.get_application_id()
+
+  def InsertRowsAsync(self, dataset_id, table_id, rows,
+                      truncate=False, num_retries=5):
+    responses = []
+    for i in xrange(0, len(rows), INSERTION_MAX_ROWS):
+      rows_chunk = rows[i:i+INSERTION_MAX_ROWS]
+      logging.info('Inserting %d rows into %s.%s.',
+                   len(rows_chunk), dataset_id, table_id)
+      body = {
+          'configuration': {
+              'jobReference': {
+                  'projectId': self._project_id,
+                  'jobId': str(uuid.uuid4()),
+              },
+              'load': {
+                  'destinationTable': {
+                      'projectId': self._project_id,
+                      'datasetId': dataset_id,
+                      'tableId': table_id,
+                  },
+                  'sourceFormat': 'NEWLINE_DELIMITED_JSON',
+                  'writeDisposition':
+                      'WRITE_TRUNCATE' if truncate else 'WRITE_APPEND',
+              }
+          }
+      }
+
+      # Format rows as newline-delimited JSON.
+      media_buffer = io.BytesIO()
+      for row in rows_chunk:
+        json.dump(row, media_buffer, separators=(',', ':'))
+        print >> media_buffer
+      media_body = http.MediaIoBaseUpload(
+          media_buffer, mimetype='application/octet-stream')
+
+      responses.append(self._service.jobs().insert(
+          projectId=self._project_id,
+          body=body, media_body=media_body).execute(num_retries=num_retries))
+
+      # Only truncate on the first insert!
+      truncate = False
+
+    # TODO(dtu): Return a Job object.
+    return responses
+
+  def InsertRowsSync(self, dataset_id, table_id, rows, num_retries=5):
+    for i in xrange(0, len(rows), INSERTION_MAX_ROWS):
+      rows_chunk = rows[i:i+INSERTION_MAX_ROWS]
+      logging.info('Inserting %d rows into %s.%s.',
+                   len(rows_chunk), dataset_id, table_id)
+      rows_chunk = [{'insertId': str(uuid.uuid4()), 'json': row}
+                    for row in rows_chunk]
+      insert_data = {'rows': rows_chunk}
+      response = self._service.tabledata().insertAll(
+          projectId=self._project_id,
+          datasetId=dataset_id,
+          tableId=table_id,
+          body=insert_data).execute(num_retries=num_retries)
+
+      if 'insertErrors' in response:
+        raise exceptions.QueryError(response['insertErrors'])
+
+  def QueryAsync(self, query, num_retries=5):
+    logging.debug(query)
+    body = {
+        'jobReference': {
+            'projectId': self._project_id,
+            'jobId': str(uuid.uuid4()),
+        },
+        'configuration': {
+            'query': {
+                'query': query,
+                'priority': 'INTERACTIVE',
+            }
+        }
+    }
+    return self._service.jobs().insert(
+        projectId=self._project_id,
+        body=body).execute(num_retries=num_retries)
+
+  def QuerySync(self, query, timeout=60, num_retries=5):
+    """Query Bigtable and return the results as a dict.
+
+    Args:
+      query: Query string.
+      timeout: Timeout in seconds.
+      num_retries: Number of attempts.
+
+    Returns:
+      Query results. The format is specified in the "rows" field here:
+      https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#getQueryResults
+    """
+    logging.debug(query)
+    query_data = {
+        'query': query,
+        'timeoutMs': timeout * 1000,
+    }
+    start_time = time.time()
+    response = self._service.jobs().query(
+        projectId=self._project_id,
+        body=query_data).execute(num_retries=num_retries)
+
+    if 'errors' in response:
+      raise exceptions.QueryError(response['errors'])
+
+    # TODO(dtu): Fetch subsequent pages of rows for big queries.
+    # TODO(dtu): Reformat results as dicts.
+    result = response.get('rows', [])
+    logging.debug('Query fetched %d rows in %fs.',
+                  len(result), time.time() - start_time)
+    return result
+
+  def IsJobDone(self, job):
+    response = self._service.jobs().get(**job['jobReference']).execute()
+    if response['status']['state'] == 'DONE':
+      return response
+    else:
+      return None
+
+  def PollJob(self, job, timeout):
+    # TODO(dtu): Take multiple jobs as parameters.
+    start_time = time.time()
+    iteration = 0
+
+    while True:
+      elapsed_time = time.time() - start_time
+
+      response = self.IsJobDone(job)
+      if response:
+        if 'errors' in response['status']:
+          raise exceptions.QueryError(response['status']['errors'])
+        logging.debug('Polled job for %d seconds.', int(elapsed_time))
+        return response
+
+      if elapsed_time >= timeout:
+        break
+      time.sleep(min(1.5 ** iteration, timeout - elapsed_time))
+      iteration += 1
+
+    raise exceptions.TimeoutError()
+
+
+def _Service():
+  """Returns an initialized and authorized BigQuery client."""
+  # pylint: disable=no-member
+  credentials = client.GoogleCredentials.get_application_default()
+  if credentials.create_scoped_required():
+    credentials = credentials.create_scoped(
+        'https://www.googleapis.com/auth/bigquery')
+  return build('bigquery', 'v2', credentials=credentials)
diff --git a/catapult/firefighter/base/constants.py b/catapult/firefighter/base/constants.py
new file mode 100644
index 0000000..88aa2ab
--- /dev/null
+++ b/catapult/firefighter/base/constants.py
@@ -0,0 +1,39 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# BigQuery constants.
+
+DATASET = 'public'
+BUILDS_TABLE = 'builds'
+CURRENT_BUILDS_TABLE = 'current_builds'
+
+
+# `Default` module constants.
+
+DEFAULT_HISTORY_DURATION_SECONDS = 60 * 60 * 12
+
+
+# `Update` module constants.
+
+BUILDBOT_BASE_URL = 'https://build.chromium.org/p'
+
+
+MASTER_NAMES = (
+    'chromium.perf',
+    'client.catapult',
+    'tryserver.chromium.perf',
+    'tryserver.client.catapult',
+)
+
+
+# Code organization / deployment constants.
+
+GCLOUD_THIRD_PARTY_LIBRARIES = (
+    'apiclient',
+    'httplib2',
+    'oauth2client',
+    'six',
+    'uritemplate',
+)
diff --git a/catapult/firefighter/base/exceptions.py b/catapult/firefighter/base/exceptions.py
new file mode 100644
index 0000000..963cf78
--- /dev/null
+++ b/catapult/firefighter/base/exceptions.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class QueryError(Exception):
+  """Raised when a BigQuery query fails."""
+
+
+class TimeoutError(Exception):
+  """Raised when an operation takes longer than its specified timeout."""
diff --git a/catapult/firefighter/bin/deploy b/catapult/firefighter/bin/deploy
new file mode 100755
index 0000000..e96b629
--- /dev/null
+++ b/catapult/firefighter/bin/deploy
@@ -0,0 +1,23 @@
+#! /usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
+import packaging
+
+
+def main():
+  root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+  with packaging.TempAppDir(root_dir, symlinks=False) as temp_app_dir:
+    yaml_files = list(packaging.Yamls(temp_app_dir))
+    args = ['gcloud', 'preview', 'app', 'deploy'] + yaml_files + sys.argv[1:]
+    subprocess.call(args, cwd=temp_app_dir)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/firefighter/bin/packaging.py b/catapult/firefighter/bin/packaging.py
new file mode 100644
index 0000000..2814c98
--- /dev/null
+++ b/catapult/firefighter/bin/packaging.py
@@ -0,0 +1,94 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(dtu): Merge this file with its counterparts in catapult_build/ when
+# dashboard and PI migrate to Google Cloud SDK.
+
+import contextlib
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+import sys
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+from base import constants
+
+
+def Modules(root_dir):
+  """Yields module names in root_dir."""
+  for root, _, files in os.walk(root_dir):
+    for file_name in files:
+      if os.path.splitext(file_name)[1] == '.yaml':
+        yield os.path.basename(root)
+        break
+
+
+def Yamls(root_dir):
+  """Yields yaml files in root_dir."""
+  for root, _, files in os.walk(root_dir):
+    for file_name in files:
+      if os.path.splitext(file_name)[1] == '.yaml':
+        yield os.path.join(root, file_name)
+
+
+@contextlib.contextmanager
+def TempAppDir(root_dir, symlinks):
+  """Sets up and tears down a directory for deploying or running an app.
+
+  Args:
+    root_dir: The root directory of the app.
+    symlinks: If true, use symbolic links instead of copying files. This allows
+        the dev server to detect file changes in the repo, and is faster.
+
+  Yields:
+    The path to the temporary directory.
+  """
+  if symlinks:
+    link = os.symlink
+  else:
+    def Link(src, dest):
+      if os.path.isdir(src):
+        return shutil.copytree(src, dest)
+      else:
+        return shutil.copy2(src, dest)
+    link = Link
+
+  gcloud_lib_dir = _GcloudLibDir()
+  temp_app_dir = tempfile.mkdtemp(prefix='app-')
+  try:
+    for module in Modules(root_dir):
+      module_source_dir = os.path.join(root_dir, module)
+      module_dest_dir = os.path.join(temp_app_dir, module)
+      os.mkdir(module_dest_dir)
+
+      # Copy/symlink module into app directory.
+      for node in os.listdir(module_source_dir):
+        link(os.path.join(module_source_dir, node),
+             os.path.join(module_dest_dir, node))
+
+      # Copy/symlink base/ into module directory.
+      link(os.path.join(root_dir, 'base'),
+           os.path.join(module_dest_dir, 'base'))
+
+      # Copy/symlink Gcloud library dependencies into module directory.
+      third_party_dest_dir = os.path.join(module_dest_dir, 'third_party')
+      os.mkdir(third_party_dest_dir)
+      open(os.path.join(third_party_dest_dir, '__init__.py'), 'w').close()
+      for library in constants.GCLOUD_THIRD_PARTY_LIBRARIES:
+        link(os.path.join(gcloud_lib_dir, library),
+             os.path.join(third_party_dest_dir, library))
+
+    yield temp_app_dir
+  finally:
+    shutil.rmtree(temp_app_dir)
+
+
+def _GcloudLibDir():
+  process = subprocess.Popen(['gcloud', 'info'], stdout=subprocess.PIPE)
+  stdout, _ = process.communicate()
+  gcloud_root_dir = re.search(r'^Installation Root: \[(.*)\]$', stdout,
+                              flags=re.MULTILINE).groups()[0]
+  return os.path.join(gcloud_root_dir, 'lib', 'third_party')
diff --git a/catapult/firefighter/bin/run b/catapult/firefighter/bin/run
new file mode 100755
index 0000000..61da778
--- /dev/null
+++ b/catapult/firefighter/bin/run
@@ -0,0 +1,35 @@
+#! /usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import time
+
+
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
+import packaging
+
+
+def main():
+  root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+  with packaging.TempAppDir(root_dir, symlinks=True) as temp_app_dir:
+    yaml_files = list(file_name for file_name in packaging.Yamls(temp_app_dir)
+                      if os.path.basename(file_name) != 'cron.yaml')
+    args = ['dev_appserver.py'] + yaml_files + sys.argv[1:]
+
+    server = subprocess.Popen(args, cwd=temp_app_dir)
+    try:
+      server.wait()
+    except KeyboardInterrupt:
+      server.wait()
+      # It's pretty much impossible to wait for all the subprocesses of the dev
+      # server to finish, as they all shut down asynchronously. But their
+      # shutdown timeouts are generally 1 second, so that's probably enough.
+      time.sleep(1)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/firefighter/default/Dockerfile b/catapult/firefighter/default/Dockerfile
new file mode 100644
index 0000000..a5d9971
--- /dev/null
+++ b/catapult/firefighter/default/Dockerfile
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+FROM gcr.io/google_appengine/python-compat
+
+# The Python standard runtime is based on Debian Wheezy. Use Stretch to get SciPy 0.16.
+RUN echo "deb http://gce_debian_mirror.storage.googleapis.com stretch main" >> /etc/apt/sources.list
+RUN apt-get update && apt-get install -y -t stretch python-numpy python-scipy
+
+ADD . /app
diff --git a/catapult/firefighter/default/app.yaml b/catapult/firefighter/default/app.yaml
new file mode 100644
index 0000000..c0091d4
--- /dev/null
+++ b/catapult/firefighter/default/app.yaml
@@ -0,0 +1,55 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+runtime: python27
+vm: true
+api_version: 1
+threadsafe: true
+
+resources:
+  cpu: .5
+  memory_gb: 1.3
+  disk_size_gb: 10
+
+automatic_scaling:
+  min_num_instances: 2
+  max_num_instances: 20
+  cool_down_period_sec: 60
+  cpu_utilization:
+    target_utilization: 0.5
+
+handlers:
+- url: /favicon\.ico
+  static_files: static/favicon.ico
+  upload: static/favicon\.ico
+  secure: always
+
+# Tracing and its dependencies.
+- url: /tracing
+  static_dir: tracing/tracing
+  secure: always
+
+- url: /components
+  static_dir: tracing/third_party/components
+  secure: always
+
+- url: /d3.min.js
+  static_files: tracing/third_party/d3/d3.min.js
+  upload: tracing/third_party/d3/d3\.min\.js
+  secure: always
+
+- url: /gl-matrix-min.js
+  static_files: tracing/third_party/gl-matrix/dist/gl-matrix-min.js
+  upload: tracing/third_party/gl-matrix/dist/gl-matrix-min\.js
+  secure: always
+
+- url: /jszip.min.js
+  static_files: tracing/third_party/jszip/jszip.min.js
+  upload: tracing/third_party/jszip/jszip\.min\.js
+  secure: always
+
+# Catch-all.
+- url: /.*
+  script: main.app
+  secure: always
diff --git a/catapult/firefighter/default/appengine_config.py b/catapult/firefighter/default/appengine_config.py
new file mode 100644
index 0000000..8cc1dea
--- /dev/null
+++ b/catapult/firefighter/default/appengine_config.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from google.appengine.ext import vendor
+
+
+vendor.add('third_party')
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/firefighter/default/common/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/firefighter/default/common/__init__.py
diff --git a/catapult/firefighter/default/common/jinja.py b/catapult/firefighter/default/common/jinja.py
new file mode 100644
index 0000000..5413d3e
--- /dev/null
+++ b/catapult/firefighter/default/common/jinja.py
@@ -0,0 +1,14 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+import jinja2
+
+
+_TEMPLATES_DIR = os.path.join(
+    os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'templates')
+
+
+ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(_TEMPLATES_DIR))
diff --git a/catapult/firefighter/default/common/query_filter.py b/catapult/firefighter/default/common/query_filter.py
new file mode 100644
index 0000000..b5785d0
--- /dev/null
+++ b/catapult/firefighter/default/common/query_filter.py
@@ -0,0 +1,72 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import time
+
+
+_INTEGER_PARAMETERS = (
+    'build',
+    'device_shard',
+    'host_shard',
+    'status',
+)
+
+
+# TODO(dtu): Pull these from table.
+_STRING_PARAMETERS = (
+    'benchmark',
+    'builder',
+    'configuration',
+    'device_id',
+    'hostname',
+    'master',
+    'os',
+    'os_version',
+    'role',
+)
+
+
+def Filters(request):
+  filters = {}
+
+  for parameter_name in _INTEGER_PARAMETERS:
+    parameter_values = request.get_all(parameter_name)
+    if parameter_values:
+      filters[parameter_name] = map(int, parameter_values)
+
+  for parameter_name in _STRING_PARAMETERS:
+    parameter_values = request.get_all(parameter_name)
+    if parameter_values:
+      for parameter_value in parameter_values:
+        if re.search(r'[^A-Za-z0-9\(\)-_. ]', parameter_value):
+          raise ValueError('invalid %s: "%s"' %
+                           (parameter_name, parameter_value))
+      filters[parameter_name] = parameter_values
+
+  start_time = request.get('start_time')
+  if start_time:
+    filters['start_time'] = _ParseTime(start_time)
+
+  return filters
+
+
+def _ParseTime(time_parameter):
+  units = {
+      's': 1,
+      'm': 60,
+      'h': 60 * 60,
+      'd': 60 * 60 * 24,
+      'w': 60 * 60 * 24 * 7,
+  }
+  unit = time_parameter[-1]
+  if unit in units:
+    time_delta = -abs(float(time_parameter[:-1]))
+    time_parameter = time_delta * units[unit]
+  else:
+    time_parameter = float(time_parameter)
+
+  if time_parameter < 0:
+    time_parameter = time.time() + time_parameter
+  return time_parameter
diff --git a/catapult/firefighter/default/cron.yaml b/catapult/firefighter/default/cron.yaml
new file mode 100644
index 0000000..079dc7d
--- /dev/null
+++ b/catapult/firefighter/default/cron.yaml
@@ -0,0 +1,9 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+cron:
+- description: update build info
+  url: /builds
+  schedule: every 10 minutes
+  target: update
diff --git a/catapult/firefighter/default/dispatch.yaml b/catapult/firefighter/default/dispatch.yaml
new file mode 100644
index 0000000..682c0dd
--- /dev/null
+++ b/catapult/firefighter/default/dispatch.yaml
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+dispatch:
+# Default module serves the typical web resources and all static resources.
+- url: "*/favicon.ico"
+  module: default
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/firefighter/default/handlers/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/firefighter/default/handlers/__init__.py
diff --git a/catapult/firefighter/default/handlers/blank.py b/catapult/firefighter/default/handlers/blank.py
new file mode 100644
index 0000000..12303d4
--- /dev/null
+++ b/catapult/firefighter/default/handlers/blank.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import webapp2
+
+
+class Blank(webapp2.RequestHandler):
+
+  def get(self):
+    pass
diff --git a/catapult/firefighter/default/handlers/query.py b/catapult/firefighter/default/handlers/query.py
new file mode 100644
index 0000000..0305293
--- /dev/null
+++ b/catapult/firefighter/default/handlers/query.py
@@ -0,0 +1,114 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import time
+
+from google.appengine.api import urlfetch
+import webapp2
+
+from base import bigquery
+from base import constants
+from common import query_filter
+
+
+class Query(webapp2.RequestHandler):
+
+  def get(self):
+    urlfetch.set_default_fetch_deadline(60)
+
+    try:
+      filters = query_filter.Filters(self.request)
+    except ValueError as e:
+      self.response.headers['Content-Type'] = 'application/json'
+      self.response.out.write({'error': str(e)})
+      return
+    query_results = _QueryEvents(bigquery.BigQuery(), **filters)
+    trace_events = list(_ConvertQueryEventsToTraceEvents(query_results))
+
+    self.response.headers['Content-Type'] = 'application/json'
+    self.response.out.write(json.dumps(trace_events, separators=(',', ':')))
+
+
+def _QueryEvents(bq, **filters):
+  start_time = filters.get(
+      'start_time', time.time() - constants.DEFAULT_HISTORY_DURATION_SECONDS)
+  query_start_time_ms = int(start_time * 1000)
+  query_start_time_us = int(start_time * 1000000)
+
+  fields = (
+      'name',
+      'GREATEST(INTEGER(start_time), %d) AS start_time_us' %
+      query_start_time_us,
+      'INTEGER(end_time) AS end_time_us',
+      'builder',
+      'configuration',
+      'hostname',
+      'status',
+      'url',
+  )
+
+  tables = (constants.BUILDS_TABLE, constants.CURRENT_BUILDS_TABLE)
+  tables = ['[%s.%s@%d-]' % (constants.DATASET, table, query_start_time_ms)
+            for table in tables]
+
+  conditions = []
+  conditions.append('NOT LOWER(name) CONTAINS "trigger"')
+  conditions.append('end_time - start_time >= 1000000')
+  conditions.append('end_time > %d' % query_start_time_us)
+  for filter_name, filter_values in filters.iteritems():
+    if not isinstance(filter_values, list):
+      continue
+
+    if isinstance(filter_values[0], int):
+      filter_values = map(str, filter_values)
+    elif isinstance(filter_values[0], basestring):
+      # QueryFilter handles string validation. Assume no quotes in string.
+      filter_values = ['"%s"' % v for v in filter_values]
+    else:
+      raise NotImplementedError()
+
+    conditions.append('%s IN (%s)' % (filter_name, ','.join(filter_values)))
+
+  query = ('SELECT %s ' % ','.join(fields) +
+           'FROM %s ' % ','.join(tables) +
+           'WHERE %s ' % ' AND '.join(conditions) +
+           'ORDER BY builder, start_time')
+  return bq.QuerySync(query)
+
+
+def _ConvertQueryEventsToTraceEvents(events):
+  for row in events:
+    event_start_time_us = int(row['f'][1]['v'])
+    event_end_time_us = int(row['f'][2]['v'])
+
+    status = row['f'][6]['v']
+    if status:
+      status = int(status)
+      # TODO: Use constants from update/common/buildbot/__init__.py.
+      if status == 0:
+        color_name = 'cq_build_passed'
+      elif status == 1:
+        color_name = 'cq_build_warning'
+      elif status == 2:
+        color_name = 'cq_build_failed'
+      elif status == 4:
+        color_name = 'cq_build_exception'
+      elif status == 5:
+        color_name = 'cq_build_abandoned'
+    else:
+      color_name = 'cq_build_running'
+
+    yield {
+        'name': row['f'][0]['v'],
+        'pid': row['f'][4]['v'],
+        'tid': '%s [%s]' % (row['f'][3]['v'], row['f'][5]['v']),
+        'ph': 'X',
+        'ts': event_start_time_us,
+        'dur': event_end_time_us - event_start_time_us,
+        'cname': color_name,
+        'args': {
+            'url': row['f'][7]['v'],
+        },
+    }
diff --git a/catapult/firefighter/default/handlers/trace.py b/catapult/firefighter/default/handlers/trace.py
new file mode 100644
index 0000000..2cc5359
--- /dev/null
+++ b/catapult/firefighter/default/handlers/trace.py
@@ -0,0 +1,35 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import urllib
+
+import webapp2
+
+from common import jinja
+from common import query_filter
+
+
+class Trace(webapp2.RequestHandler):
+
+  def get(self):
+    try:
+      filters = query_filter.Filters(self.request)
+    except ValueError as e:
+      self.response.headers['Content-Type'] = 'application/json'
+      self.response.out.write({'error': str(e)})
+      return
+
+    query_parameters = []
+    for filter_name, filter_values in filters.iteritems():
+      if filter_name == 'start_time':
+        query_parameters.append(('start_time', filter_values))
+      else:
+        for filter_value in filter_values:
+          query_parameters.append((filter_name, filter_value))
+    template_values = {
+        'query_string': urllib.urlencode(query_parameters),
+    }
+
+    template = jinja.ENVIRONMENT.get_template('trace.html')
+    self.response.out.write(template.render(template_values))
diff --git a/catapult/firefighter/default/main.py b/catapult/firefighter/default/main.py
new file mode 100644
index 0000000..bf8ea68
--- /dev/null
+++ b/catapult/firefighter/default/main.py
@@ -0,0 +1,18 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import webapp2
+
+from handlers import blank
+from handlers import query
+from handlers import trace
+
+
+_URL_MAPPING = [
+    ('/', trace.Trace),
+    ('/_ah/health', blank.Blank),
+    ('/_ah/start', blank.Blank),
+    ('/query', query.Query),
+]
+app = webapp2.WSGIApplication(_URL_MAPPING)  # pylint: disable=invalid-name
diff --git a/catapult/firefighter/default/static/favicon.ico b/catapult/firefighter/default/static/favicon.ico
new file mode 100644
index 0000000..0358bb4
--- /dev/null
+++ b/catapult/firefighter/default/static/favicon.ico
Binary files differ
diff --git a/catapult/firefighter/default/tracing/third_party b/catapult/firefighter/default/tracing/third_party
new file mode 120000
index 0000000..2bfb90f
--- /dev/null
+++ b/catapult/firefighter/default/tracing/third_party
@@ -0,0 +1 @@
+../../../tracing/third_party
\ No newline at end of file
diff --git a/catapult/firefighter/default/tracing/tracing b/catapult/firefighter/default/tracing/tracing
new file mode 120000
index 0000000..d3f48c5
--- /dev/null
+++ b/catapult/firefighter/default/tracing/tracing
@@ -0,0 +1 @@
+../../../tracing/tracing
\ No newline at end of file
diff --git a/catapult/firefighter/pylintrc b/catapult/firefighter/pylintrc
new file mode 100644
index 0000000..6f9c9b2
--- /dev/null
+++ b/catapult/firefighter/pylintrc
@@ -0,0 +1,71 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  fixme,
+  import-error,
+  locally-disabled,
+  locally-enabled,
+  missing-docstring,
+  no-self-use,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
+
+# Make sure : in dicts and trailing commas are checked for whitespace.
+no-space-check=
diff --git a/catapult/firefighter/schemas/builds.json b/catapult/firefighter/schemas/builds.json
new file mode 100644
index 0000000..c8bcf17
--- /dev/null
+++ b/catapult/firefighter/schemas/builds.json
@@ -0,0 +1,74 @@
+[
+  {
+    "name": "name",
+    "type": "STRING",
+    "mode": "REQUIRED"
+  },
+  {
+    "name": "start_time",
+    "type": "TIMESTAMP",
+    "mode": "REQUIRED"
+  },
+  {
+    "name": "end_time",
+    "type": "TIMESTAMP",
+    "mode": "REQUIRED"
+  },
+
+  {
+    "name": "benchmark",
+    "type": "STRING"
+  },
+  {
+    "name": "build",
+    "type": "INTEGER"
+  },
+  {
+    "name": "builder",
+    "type": "STRING"
+  },
+  {
+    "name": "configuration",
+    "type": "STRING"
+  },
+  {
+    "name": "device_id",
+    "type": "STRING"
+  },
+  {
+    "name": "device_shard",
+    "type": "INTEGER"
+  },
+  {
+    "name": "host_shard",
+    "type": "INTEGER"
+  },
+  {
+    "name": "hostname",
+    "type": "STRING"
+  },
+  {
+    "name": "master",
+    "type": "STRING"
+  },
+  {
+    "name": "os",
+    "type": "STRING"
+  },
+  {
+    "name": "os_version",
+    "type": "STRING"
+  },
+  {
+    "name": "role",
+    "type": "STRING"
+  },
+  {
+    "name": "status",
+    "type": "INTEGER"
+  },
+  {
+    "name": "url",
+    "type": "STRING"
+  }
+]
diff --git a/catapult/firefighter/update/Dockerfile b/catapult/firefighter/update/Dockerfile
new file mode 100644
index 0000000..a5d9971
--- /dev/null
+++ b/catapult/firefighter/update/Dockerfile
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+FROM gcr.io/google_appengine/python-compat
+
+# The Python standard runtime is based on Debian Wheezy. Use Stretch to get SciPy 0.16.
+RUN echo "deb http://gce_debian_mirror.storage.googleapis.com stretch main" >> /etc/apt/sources.list
+RUN apt-get update && apt-get install -y -t stretch python-numpy python-scipy
+
+ADD . /app
diff --git a/catapult/firefighter/update/appengine_config.py b/catapult/firefighter/update/appengine_config.py
new file mode 100644
index 0000000..8cc1dea
--- /dev/null
+++ b/catapult/firefighter/update/appengine_config.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from google.appengine.ext import vendor
+
+
+vendor.add('third_party')
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/firefighter/update/common/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/firefighter/update/common/__init__.py
diff --git a/catapult/firefighter/update/common/buildbot/__init__.py b/catapult/firefighter/update/common/buildbot/__init__.py
new file mode 100644
index 0000000..aa791e4
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from common.buildbot.builder import Builders
+from common.buildbot.slave import Slaves
+
+
+PENDING = None
+SUCCESS = 0
+WARNING = 1
+FAILURE = 2
+EXCEPTION = 4
+SLAVE_LOST = 5
diff --git a/catapult/firefighter/update/common/buildbot/build.py b/catapult/firefighter/update/common/buildbot/build.py
new file mode 100644
index 0000000..b89156e
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/build.py
@@ -0,0 +1,74 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from common.buildbot import step
+
+
+class Build(object):
+
+  def __init__(self, data, builder_url):
+    self._number = data['number']
+    self._slave_name = data['slave']
+    self._status = data['results']
+    self._start_time, self._end_time = data['times']
+    self._url = '%s/builds/%d' % (builder_url, self._number)
+
+    source_stamp = data['sourceStamp']
+    if 'revision' in source_stamp:
+      self._revision = source_stamp['revision']
+    if 'changes' in source_stamp and source_stamp['changes']:
+      self._revision_time = data['sourceStamp']['changes'][-1]['when']
+
+    self._steps = tuple(step.Step(step_data, self._url)
+                        for step_data in data['steps'])
+
+  def __lt__(self, other):
+    return self.number < other.number
+
+  def __str__(self):
+    return str(self.number)
+
+  @property
+  def number(self):
+    return self._number
+
+  @property
+  def url(self):
+    return self._url
+
+  @property
+  def slave_name(self):
+    return self._slave_name
+
+  @property
+  def status(self):
+    return self._status
+
+  @property
+  def complete(self):
+    return self.status is not None
+
+  @property
+  def revision(self):
+    return self._revision
+
+  @property
+  def revision_time(self):
+    """The time the revision was committed.
+
+    Warning: this field may not be populated.
+    """
+    return self._revision_time
+
+  @property
+  def start_time(self):
+    return self._start_time
+
+  @property
+  def end_time(self):
+    return self._end_time
+
+  @property
+  def steps(self):
+    return self._steps
diff --git a/catapult/firefighter/update/common/buildbot/builder.py b/catapult/firefighter/update/common/buildbot/builder.py
new file mode 100644
index 0000000..9c2fc37
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/builder.py
@@ -0,0 +1,96 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import urllib
+
+from common.buildbot import builds
+from common.buildbot import network
+
+
+def Builders(master_name):
+  builder_data = network.FetchData(network.BuildUrl(
+      master_name, 'json/builders'))
+  return sorted(Builder(master_name, builder_name, builder_info)
+                for builder_name, builder_info in builder_data.iteritems())
+
+
+class Builder(object):
+
+  def __init__(self, master_name, name, data):
+    self._master_name = master_name
+    self._name = name
+    self._url = network.BuildUrl(
+        master_name, 'builders/%s' % urllib.quote(self.name))
+    self._builds = builds.Builds(master_name, name, self._url)
+
+    self.Update(data)
+
+  def __lt__(self, other):
+    return self.name < other.name
+
+  def __str__(self):
+    return self.name
+
+  def Update(self, data=None):
+    if not data:
+      data = network.FetchData(network.BuildUrl(
+          self.master_name, 'json/builders/%s' % urllib.quote(self.name)))
+    self._state = data['state']
+    self._pending_build_count = data['pendingBuilds']
+    self._current_builds = frozenset(data['currentBuilds'])
+    self._cached_builds = frozenset(data['cachedBuilds'])
+    self._slaves = frozenset(data['slaves'])
+
+  @property
+  def master_name(self):
+    return self._master_name
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def url(self):
+    return self._url
+
+  @property
+  def state(self):
+    return self._state
+
+  @property
+  def builds(self):
+    return self._builds
+
+  @property
+  def pending_build_count(self):
+    return self._pending_build_count
+
+  @property
+  def current_builds(self):
+    """Set of build numbers currently building.
+
+    There may be multiple entries if there are multiple build slaves.
+    """
+    return self._current_builds
+
+  @property
+  def cached_builds(self):
+    """Set of builds whose data are visible on the master in increasing order.
+
+    More builds may be available than this.
+    """
+    return self._cached_builds
+
+  @property
+  def available_builds(self):
+    return self.cached_builds - self.current_builds
+
+  @property
+  def last_build(self):
+    """Last completed build."""
+    return max(self.available_builds)
+
+  @property
+  def slaves(self):
+    return self._slaves
diff --git a/catapult/firefighter/update/common/buildbot/builds.py b/catapult/firefighter/update/common/buildbot/builds.py
new file mode 100644
index 0000000..55e9cd3
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/builds.py
@@ -0,0 +1,88 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import urllib
+
+from google.appengine.api import urlfetch
+
+from common.buildbot import build
+from common.buildbot import network
+
+
+class Builds(object):
+
+  def __init__(self, master_name, builder_name, url):
+    self._master_name = master_name
+    self._builder_name = builder_name
+    self._url = url
+
+  def __getitem__(self, key):
+    """Fetches a Build object containing build details.
+
+    Args:
+      key: A nonnegative build number.
+
+    Returns:
+      A Build object.
+
+    Raises:
+      TypeError: key is not an int.
+      ValueError: key is negative.
+    """
+    # We can't take slices because we don't have a defined length.
+    if not isinstance(key, int):
+      raise TypeError('build numbers must be integers, not %s' %
+                      type(key).__name__)
+
+    return self.Fetch((key,))
+
+  def Fetch(self, build_numbers):
+    """Downloads and returns build details.
+
+    If a build has corrupt data, it is not included in the result. If you
+    strictly need all the builds requested, be sure to check the result length.
+
+    Args:
+      build_numbers: An iterable of build numbers to download.
+
+    Yields:
+      Build objects, in the order requested. Some may be missing.
+
+    Raises:
+      ValueError: A build number is invalid.
+    """
+    if not build_numbers:
+      return
+
+    for build_number in build_numbers:
+      if build_number < 0:
+        raise ValueError('Invalid build number: %d' % build_number)
+
+    build_query = urllib.urlencode(
+        [('select', build_number) for build_number in build_numbers])
+    url = 'json/builders/%s/builds/?%s' % (
+        urllib.quote(self._builder_name), build_query)
+    url = network.BuildUrl(self._master_name, url)
+    try:
+      builds = network.FetchData(url).values()
+    except (ValueError, urlfetch.ResponseTooLargeError):
+      # The JSON decode failed, or the data was too large.
+      # Try downloading the builds individually instead.
+      builds = []
+      for build_number in build_numbers:
+        url = 'json/builders/%s/builds/%d' % (
+            urllib.quote(self._builder_name), build_number)
+        url = network.BuildUrl(self._master_name, url)
+        try:
+          builds.append(network.FetchData(url))
+        except (ValueError, urlfetch.ResponseTooLargeError):
+          logging.warning('Unable to fetch %s build %d',
+                          self._master_name, build_number)
+          continue
+
+    for build_data in builds:
+      if 'error' in build_data:
+        continue
+      yield build.Build(build_data, self._url)
diff --git a/catapult/firefighter/update/common/buildbot/network.py b/catapult/firefighter/update/common/buildbot/network.py
new file mode 100644
index 0000000..7eee6d3
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/network.py
@@ -0,0 +1,40 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+
+from google.appengine.api import urlfetch
+from google.appengine.runtime import apiproxy_errors
+
+from base import constants
+
+
+def BuildUrl(master_name, url):
+  return '%s/%s/%s' % (constants.BUILDBOT_BASE_URL, master_name, url)
+
+
+def FetchData(url):
+  try:
+    return json.loads(FetchText(url))
+  except ValueError:
+    logging.warning('Data is corrupt: %s', url)
+    raise
+
+
+def FetchText(url):
+  logging.debug('Retrieving %s', url)
+  try:
+    return urlfetch.fetch(url).content
+  except (apiproxy_errors.DeadlineExceededError, urlfetch.DownloadError,
+          urlfetch.InternalTransientError):
+    # Could be intermittent; try again.
+    try:
+      return urlfetch.fetch(url).content
+    except:
+      logging.error('Error retrieving URL: %s', url)
+      raise
+  except:
+    logging.error('Error retrieving URL: %s', url)
+    raise
diff --git a/catapult/firefighter/update/common/buildbot/slave.py b/catapult/firefighter/update/common/buildbot/slave.py
new file mode 100644
index 0000000..33ad637
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/slave.py
@@ -0,0 +1,134 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import cStringIO
+import mimetools
+
+from common.buildbot import network
+
+
+def Slaves(master_name):
+  slave_data = network.FetchData(network.BuildUrl(master_name, 'json/slaves'))
+  return sorted(Slave(master_name, slave_name, slave_info)
+                for slave_name, slave_info in slave_data.iteritems())
+
+
+class Slave(object):
+
+  def __init__(self, master_name, name, data):
+    self._master_name = master_name
+    self._name = name
+
+    self._builders = frozenset(data['builders'].keys())
+    self._connected = data['connected']
+
+    if data['host']:
+      host_data = dict(mimetools.Message(cStringIO.StringIO(data['host'])))
+      self._bitness = 64 if '64' in host_data['architecture'] else 32
+      self._git_version = host_data['git version']
+      self._hardware = host_data['product name']
+      self._memory = float(host_data['memory total'].split()[0])
+      self._os = _ParseOs(host_data['osfamily'])
+      self._os_version = _ParseOsVersion(self._os, host_data['os version'])
+      self._processor_count = host_data['processor count']
+    else:
+      # The information is populated by Puppet. Puppet doesn't run on our GCE
+      # instances, so if the info is missing, assume it's in GCE.
+      self._bitness = 64
+      self._git_version = None
+      self._hardware = 'Compute Engine'
+      self._memory = None
+      self._os = 'linux'
+      self._os_version = None
+      self._processor_count = None
+
+  def __lt__(self, other):
+    return self.name < other.name
+
+  def __str__(self):
+    return self.name
+
+  @property
+  def master_name(self):
+    return self._master_name
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def builders(self):
+    return self._builders
+
+  @property
+  def bitness(self):
+    return self._bitness
+
+  @property
+  def git_version(self):
+    return self._git_version
+
+  @property
+  def hardware(self):
+    """Returns the model of the hardware.
+
+    For example, "MacBookPro11,2", "PowerEdge R220", or "Compute Engine".
+    """
+    return self._hardware
+
+  @property
+  def memory(self):
+    """Returns the quantity of RAM, in GB, as a float."""
+    return self._memory
+
+  @property
+  def os(self):
+    """Returns the canonical os name string.
+
+    The return value must be in the following list:
+    https://chromium.googlesource.com/infra/infra/+/HEAD/doc/users/services/buildbot/builders.pyl.md#os
+    """
+    return self._os
+
+  @property
+  def os_version(self):
+    """Returns the canonical major os version name string.
+
+    The return value must be in the following table:
+    https://chromium.googlesource.com/infra/infra/+/HEAD/doc/users/services/buildbot/builders.pyl.md#version
+    """
+    return self._os_version
+
+  @property
+  def processor_count(self):
+    return self._processor_count
+
+
+def _ParseOs(os_family):
+  return {
+      'darwin': 'mac',
+      'debian': 'linux',
+      'windows': 'win',
+  }[os_family.lower()]
+
+
+def _ParseOsVersion(os, os_version):
+  if os == 'mac':
+    return '.'.join(os_version.split('.')[:2])
+  elif os == 'linux':
+    return {
+        '12.04': 'precise',
+        '14.04': 'trusty',
+    }[os_version]
+  elif os == 'win':
+    return {
+        '5.1.2600': 'xp',
+        '6.0.6001': 'vista',
+        '2008 R2': '2008',  # 2008 R2
+        '7': 'win7',
+        '6.3.9600': 'win8',  # 8.1
+        '10.0.10240': 'win10',
+    }[os_version]
+  else:
+    raise ValueError('"%s" is not a valid os string.' % os)
diff --git a/catapult/firefighter/update/common/buildbot/step.py b/catapult/firefighter/update/common/buildbot/step.py
new file mode 100644
index 0000000..ff25551
--- /dev/null
+++ b/catapult/firefighter/update/common/buildbot/step.py
@@ -0,0 +1,134 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import re
+import urllib
+
+from common.buildbot import network
+
+
+StackTraceLine = collections.namedtuple(
+    'StackTraceLine', ('file', 'function', 'line', 'source'))
+
+
+class Step(object):
+
+  def __init__(self, data, build_url):
+    self._name = data['name']
+    self._status = data['results'][0]
+    self._start_time, self._end_time = data['times']
+    self._url = '%s/steps/%s' % (build_url, urllib.quote(self._name))
+
+    self._log_link = None
+    self._results_link = None
+    for link_name, link_url in data['logs']:
+      if link_name == 'stdio':
+        self._log_link = link_url + '/text'
+      elif link_name == 'json.output':
+        self._results_link = link_url + '/text'
+
+    # Property caches.
+    self._log = None
+    self._results = None
+    self._stack_trace = None
+
+  def __str__(self):
+    return self.name
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def url(self):
+    return self._url
+
+  @property
+  def status(self):
+    return self._status
+
+  @property
+  def start_time(self):
+    return self._start_time
+
+  @property
+  def end_time(self):
+    return self._end_time
+
+  @property
+  def log_link(self):
+    return self._log_link
+
+  @property
+  def results_link(self):
+    return self._results_link
+
+  @property
+  def log(self):
+    if self._log is None:
+      if not self.log_link:
+        return None
+
+      self._log = network.FetchText(self.log_link)
+    return self._log
+
+  @property
+  def results(self):
+    if self._results is None:
+      if not self.results_link:
+        return None
+
+      self._results = network.FetchData(self.results_link)
+    return self._results
+
+  @property
+  def stack_trace(self):
+    if self._stack_trace is None:
+      self._stack_trace = _ParseTraceFromLog(self.log)
+    return self._stack_trace
+
+
+def _ParseTraceFromLog(log):
+  """Searches the log for a stack trace and returns a structured representation.
+
+  This function supports both default Python-style stacks and Telemetry-style
+  stacks. It returns the first stack trace found in the log - sometimes a bug
+  leads to a cascade of failures, so the first one is usually the root cause.
+
+  Args:
+    log: A string containing Python or Telemetry stack traces.
+
+  Returns:
+    Two values, or (None, None) if no stack trace was found.
+    The first is a tuple of StackTraceLine objects, most recent call last.
+    The second is a string with the type and description of the exception.
+  """
+  log_iterator = iter(log.splitlines())
+  for line in log_iterator:
+    if line == 'Traceback (most recent call last):':
+      break
+  else:
+    return (None, None)
+
+  stack_trace = []
+  while True:
+    line = log_iterator.next()
+    match_python = re.match(r'\s*File "(?P<file>.+)", line (?P<line>[0-9]+), '
+                            r'in (?P<function>.+)', line)
+    match_telemetry = re.match(r'\s*(?P<function>.+) at '
+                               r'(?P<file>.+):(?P<line>[0-9]+)', line)
+    match = match_python or match_telemetry
+    if not match:
+      exception = line
+      break
+    trace_line = match.groupdict()
+    # Use the base name, because the path will be different
+    # across platforms and configurations.
+    file_base_name = trace_line['file'].split('/')[-1].split('\\')[-1]
+    source = log_iterator.next().strip()
+    stack_trace.append(StackTraceLine(
+        file_base_name, trace_line['function'], trace_line['line'], source))
+
+  return tuple(stack_trace), exception
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/firefighter/update/handlers/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/firefighter/update/handlers/__init__.py
diff --git a/catapult/firefighter/update/handlers/blank.py b/catapult/firefighter/update/handlers/blank.py
new file mode 100644
index 0000000..12303d4
--- /dev/null
+++ b/catapult/firefighter/update/handlers/blank.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import webapp2
+
+
+class Blank(webapp2.RequestHandler):
+
+  def get(self):
+    pass
diff --git a/catapult/firefighter/update/handlers/builds.py b/catapult/firefighter/update/handlers/builds.py
new file mode 100644
index 0000000..2bf912d
--- /dev/null
+++ b/catapult/firefighter/update/handlers/builds.py
@@ -0,0 +1,235 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import logging
+import re
+import time
+
+from google.appengine.api import urlfetch
+import webapp2
+
+from base import bigquery
+from base import constants
+from common import buildbot
+
+
+class Builds(webapp2.RequestHandler):
+
+  def get(self):
+    urlfetch.set_default_fetch_deadline(300)
+
+    bq = bigquery.BigQuery()
+
+    current_events = []
+    events = []
+    for master_name in constants.MASTER_NAMES:
+      builders = buildbot.Builders(master_name)
+      available_builds = _AvailableBuilds(builders)
+      recorded_builds = _RecordedBuilds(bq, builders, available_builds)
+      for builder in builders:
+        # Filter out recorded builds from available builds.
+        build_numbers = (available_builds[builder.name] -
+                         recorded_builds[builder.name])
+        builder_current_events, builder_events = _TraceEventsForBuilder(
+            builder, build_numbers)
+        current_events += builder_current_events
+        events += builder_events
+
+    jobs = []
+    if current_events:
+      jobs += bq.InsertRowsAsync(
+          constants.DATASET, constants.CURRENT_BUILDS_TABLE,
+          current_events, truncate=True)
+    if events:
+      jobs += bq.InsertRowsAsync(constants.DATASET, constants.BUILDS_TABLE,
+                                 events)
+
+    for job in jobs:
+      bq.PollJob(job, 60 * 20)  # 20 minutes.
+
+
+def _AvailableBuilds(builders):
+  available_builds = {}
+  for builder in builders:
+    if not builder.cached_builds:
+      available_builds[builder.name] = frozenset()
+      continue
+
+    max_build = max(builder.cached_builds)
+    # Buildbot on tryserver.chromium.perf is occasionally including build 0 in
+    # its list of cached builds. That results in more builds than we want.
+    # Limit the list to the last 100 builds, because the urlfetch URL limit is
+    # 2048 bytes, and "&select=100000" * 100 is 1400 bytes.
+    builds = frozenset(build for build in builder.cached_builds
+                       if build >= max_build - 100)
+    available_builds[builder.name] = builds
+  return available_builds
+
+
+def _RecordedBuilds(bq, builders, available_builds):
+  # 105 days / 15 weeks. Must be some number greater than 100 days, because
+  # we request up to 100 builds (see above comment), and the slowest cron bots
+  # run one job every day.
+  start_time_ms = -1000 * 60 * 60 * 24 * 105
+  table = '%s.%s@%d-' % (constants.DATASET, constants.BUILDS_TABLE,
+                         start_time_ms)
+
+  conditions = []
+  for builder in builders:
+    if not available_builds[builder.name]:
+      continue
+    max_build = max(available_builds[builder.name])
+    min_build = min(available_builds[builder.name])
+    conditions.append('WHEN builder = "%s" THEN build >= %d AND build <= %d' %
+                      (builder.name, min_build, max_build))
+
+  query = (
+      'SELECT builder, build '
+      'FROM [%s] ' % table +
+      'WHERE CASE %s END ' % ' '.join(conditions) +
+      'GROUP BY builder, build'
+  )
+  query_result = bq.QuerySync(query, 600)
+
+  builds = collections.defaultdict(set)
+  for row in query_result:
+    builds[row['f'][0]['v']].add(int(row['f'][1]['v']))
+  return builds
+
+
+def _TraceEventsForBuilder(builder, build_numbers):
+  if not build_numbers:
+    return (), ()
+
+  build_numbers_string = ', '.join(map(str, sorted(build_numbers)))
+  logging.info('Getting %s: %s', builder.name, build_numbers_string)
+
+  # Fetch build information and generate trace events.
+  current_events = []
+  events = []
+
+  builder_builds = builder.builds.Fetch(build_numbers)
+  query_time = time.time()
+  for build in builder_builds:
+    if build.complete:
+      events += _TraceEventsFromBuild(builder, build, query_time)
+    else:
+      current_events += _TraceEventsFromBuild(builder, build, query_time)
+
+  return current_events, events
+
+
+def _TraceEventsFromBuild(builder, build, query_time):
+  match = re.match(r'(.+) \(([0-9]+)\)', builder.name)
+  if match:
+    configuration, host_shard = match.groups()
+    host_shard = int(host_shard)
+  else:
+    configuration = builder.name
+    host_shard = 0
+
+  # Build trace event.
+  if build.end_time:
+    build_end_time = build.end_time
+  else:
+    build_end_time = query_time
+  os, os_version, role = _ParseBuilderName(builder.master_name, builder.name)
+  yield {
+      'name': 'Build %d' % build.number,
+      'start_time': build.start_time,
+      'end_time': build_end_time,
+
+      'build': build.number,
+      'builder': builder.name,
+      'configuration': configuration,
+      'host_shard': host_shard,
+      'hostname': build.slave_name,
+      'master': builder.master_name,
+      'os': os,
+      'os_version': os_version,
+      'role': role,
+      'status': build.status,
+      'url': build.url,
+  }
+
+  # Step trace events.
+  for step in build.steps:
+    if not step.start_time:
+      continue
+
+    if step.name == 'steps':
+      continue
+
+    if step.end_time:
+      step_end_time = step.end_time
+    else:
+      step_end_time = query_time
+    yield {
+        'name': step.name,
+        'start_time': step.start_time,
+        'end_time': step_end_time,
+
+        'benchmark': step.name,  # TODO(dtu): This isn't always right.
+        'build': build.number,
+        'builder': builder.name,
+        'configuration': configuration,
+        'host_shard': host_shard,
+        'hostname': build.slave_name,
+        'master': builder.master_name,
+        'os': os,
+        'os_version': os_version,
+        'role': role,
+        'status': step.status,
+        'url': step.url,
+    }
+
+
+def _ParseBuilderName(master_name, builder_name):
+  if master_name == 'chromium.perf':
+    match = re.match(r'^([A-Za-z]+)(?: ([0-9\.]+|XP))?([A-Za-z0-9-\. ]+)? '
+                     r'(Builder|Perf)(?: \([0-9]+\))?$', builder_name).groups()
+    os = match[0]
+    if match[1]:
+      os_version = match[1]
+    else:
+      os_version = None
+    if match[3] == 'Builder':
+      role = 'builder'
+    elif match[3] == 'Perf':
+      role = 'tester'
+    else:
+      raise NotImplementedError()
+  elif master_name == 'client.catapult':
+    match = re.match(r'^Catapult(?: ([A-Za-z])+)? ([A-Za-z]+)$',
+                     builder_name).groups()
+    os = match[1]
+    os_version = None
+    role = match[0]
+    if not role:
+      role = 'tester'
+  elif master_name == 'tryserver.chromium.perf':
+    match = re.match(r'^(android|linux|mac|win).*_([a-z]+)$',
+                     builder_name).groups()
+    os = match[0]
+    os_version = None
+    role = match[1]
+  elif master_name == 'tryserver.client.catapult':
+    match = re.match(r'^Catapult(?: (Android|Linux|Mac|Windows))? ([A-Za-z]+)$',
+                     builder_name).groups()
+    os = match[0]
+    os_version = None
+    role = match[1]
+  else:
+    raise NotImplementedError()
+
+  if os:
+    os = os.lower()
+  if os == 'windows':
+    os = 'win'
+  if os_version:
+    os_version = os_version.lower()
+  role = role.lower()
+
+  return (os, os_version, role)
diff --git a/catapult/firefighter/update/main.py b/catapult/firefighter/update/main.py
new file mode 100644
index 0000000..9442e89
--- /dev/null
+++ b/catapult/firefighter/update/main.py
@@ -0,0 +1,16 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import webapp2
+
+from handlers import blank
+from handlers import builds
+
+
+_URL_MAPPING = [
+    ('/_ah/health', blank.Blank),
+    ('/_ah/start', blank.Blank),
+    ('/builds', builds.Builds),
+]
+app = webapp2.WSGIApplication(_URL_MAPPING)  # pylint: disable=invalid-name
diff --git a/catapult/firefighter/update/module.yaml b/catapult/firefighter/update/module.yaml
new file mode 100644
index 0000000..4e4bfdd
--- /dev/null
+++ b/catapult/firefighter/update/module.yaml
@@ -0,0 +1,23 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+module: update
+
+runtime: python27
+vm: true
+api_version: 1
+threadsafe: true
+
+resources:
+  cpu: .5
+  memory_gb: 1.3
+  disk_size_gb: 10
+
+manual_scaling:
+  instances: 1
+
+handlers:
+- url: /.*
+  script: main.app
+  secure: always
diff --git a/catapult/perf_insights/Dockerfile b/catapult/perf_insights/Dockerfile
index e326ad3..2513f97 100644
--- a/catapult/perf_insights/Dockerfile
+++ b/catapult/perf_insights/Dockerfile
@@ -5,7 +5,7 @@
 # Ideally at some point, instead of using the appengine supplied python
 # image, we could image our own ubuntu version.
 # https://github.com/GoogleCloudPlatform/appengine-python-vm-runtime
-RUN apt-get update && apt-get install -y git libglib2.0-dev
+RUN apt-get update && apt-get install -y git libglib2.0-dev procps
 RUN sed -i '1ideb http://ftp.debian.org/debian experimental main' /etc/apt/sources.list
 RUN apt-get update && apt-get -y -t experimental install libc6
 
diff --git a/catapult/perf_insights/PRESUBMIT.py b/catapult/perf_insights/PRESUBMIT.py
new file mode 100644
index 0000000..0edc9f0
--- /dev/null
+++ b/catapult/perf_insights/PRESUBMIT.py
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+  ]
diff --git a/catapult/perf_insights/app.yaml b/catapult/perf_insights/app.yaml
index 115d867..a0ae020 100644
--- a/catapult/perf_insights/app.yaml
+++ b/catapult/perf_insights/app.yaml
@@ -14,6 +14,11 @@
   script: perf_insights.endpoints.upload.app
   secure: always
 
+- url: /corpus_cleanup
+  script: perf_insights.endpoints.corpus_cleanup.app
+  secure: always
+  login: admin
+
 - url: /query
   script: perf_insights.endpoints.query.app
   secure: always
@@ -22,17 +27,14 @@
 - url: /cloud_mapper/create
   script: perf_insights.endpoints.cloud_mapper.create.app
   secure: always
-  login: admin
 
 - url: /cloud_mapper/status
   script: perf_insights.endpoints.cloud_mapper.status.app
   secure: always
-  login: admin
 
 - url: /cloud_mapper/cancel
   script: perf_insights.endpoints.cloud_mapper.cancel.app
   secure: always
-  login: admin
 
 - url: /cloud_mapper/task
   script: perf_insights.endpoints.cloud_mapper.task.app
diff --git a/catapult/perf_insights/appengine_config.py b/catapult/perf_insights/appengine_config.py
index 2de7d40..b7e4a23 100644
--- a/catapult/perf_insights/appengine_config.py
+++ b/catapult/perf_insights/appengine_config.py
@@ -9,9 +9,7 @@
   https://cloud.google.com/appengine/docs/python/tools/appengineconfig
 """
 
-import logging
 import os
-import sys
 
 from google.appengine.ext import vendor
 
diff --git a/catapult/perf_insights/bin/PRESUBMIT.py b/catapult/perf_insights/bin/PRESUBMIT.py
index 7527192..799215d 100644
--- a/catapult/perf_insights/bin/PRESUBMIT.py
+++ b/catapult/perf_insights/bin/PRESUBMIT.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import os
-import sys
 
 def CheckChange(input_api, output_api):
   init_py_path = os.path.join(input_api.PresubmitLocalPath(), '__init__.py')
diff --git a/catapult/perf_insights/bin/deploy_perfinsights b/catapult/perf_insights/bin/deploy_perfinsights
index a88b166..0ac4b8d 100755
--- a/catapult/perf_insights/bin/deploy_perfinsights
+++ b/catapult/perf_insights/bin/deploy_perfinsights
@@ -31,12 +31,12 @@
   from catapult_build import temp_deployment_dir
 
   file_sets = [
-      ['app.yaml', 'dispatch.yaml', 'queue.yaml', 'index.yaml',
+      ['app.yaml', 'cron.yaml', 'dispatch.yaml', 'queue.yaml', 'index.yaml',
       'remote_worker.yaml']
   ]
   for cur_set in file_sets:
     with temp_deployment_dir.TempDeploymentDir(
-        paths, use_symlinks=True) as temp_dir:
+        paths, use_symlinks=False) as temp_dir:
       cmd = ['gcloud', 'preview', 'app', 'deploy']
       cmd += cur_set
       cmd += [
diff --git a/catapult/perf_insights/bin/run_py_tests b/catapult/perf_insights/bin/run_py_tests
index 509cf85..9043204 100755
--- a/catapult/perf_insights/bin/run_py_tests
+++ b/catapult/perf_insights/bin/run_py_tests
@@ -5,6 +5,7 @@
 
 import os
 import sys
+import platform
 
 _CATAPULT_PATH = os.path.abspath(os.path.join(
     os.path.dirname(__file__), os.path.pardir, os.path.pardir))
diff --git a/catapult/perf_insights/bin/run_vinn_tests b/catapult/perf_insights/bin/run_vinn_tests
index 9962a2d..de10ea9 100755
--- a/catapult/perf_insights/bin/run_vinn_tests
+++ b/catapult/perf_insights/bin/run_vinn_tests
@@ -6,9 +6,10 @@
 import os
 import sys
 
+
 if __name__ == '__main__':
   perf_insights_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                     '..'))
   sys.path.append(perf_insights_path)
   from perf_insights_build import run_vinn_tests
-  sys.exit(run_vinn_tests.Main(sys.argv))
\ No newline at end of file
+  sys.exit(run_vinn_tests.Main(sys.argv))
diff --git a/catapult/perf_insights/cron.yaml b/catapult/perf_insights/cron.yaml
new file mode 100644
index 0000000..1c4dcb2
--- /dev/null
+++ b/catapult/perf_insights/cron.yaml
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+cron:
+- description: Hourly cleanup job
+  url: /corpus_cleanup
+  schedule: every 1 hours
diff --git a/catapult/perf_insights/perf_insights/__init__.py b/catapult/perf_insights/perf_insights/__init__.py
index 5f5d25e..987a3e1 100644
--- a/catapult/perf_insights/perf_insights/__init__.py
+++ b/catapult/perf_insights/perf_insights/__init__.py
@@ -3,4 +3,4 @@
 # found in the LICENSE file.
 
 import perf_insights_project
-perf_insights_project.UpdateSysPathIfNeeded()
\ No newline at end of file
+perf_insights_project.UpdateSysPathIfNeeded()
diff --git a/catapult/perf_insights/perf_insights/cloud_config.py b/catapult/perf_insights/perf_insights/cloud_config.py
index 8811564..ae1fa6a 100644
--- a/catapult/perf_insights/perf_insights/cloud_config.py
+++ b/catapult/perf_insights/perf_insights/cloud_config.py
@@ -7,8 +7,9 @@
 from google.appengine.api import app_identity
 from google.appengine.ext import ndb
 
+
 def _is_devserver():
-  server_software = os.environ.get('SERVER_SOFTWARE','')
+  server_software = os.environ.get('SERVER_SOFTWARE', '')
   return server_software and server_software.startswith('Development')
 
 _DEFAULT_CATAPULT_PATH = '/catapult'
@@ -27,6 +28,7 @@
 _GCE_DEFAULT_ZONE = 'us-central1-f'
 _GCE_DEFAULT_MACHINE_TYPE = 'n1-standard-1'
 
+
 class CloudConfig(ndb.Model):
   control_bucket_path = ndb.StringProperty(default=_DEFAULT_CONTROL_BUCKET_PATH)
   setup_scheme = 'http' if _is_devserver() else 'https'
@@ -43,6 +45,7 @@
       default='%s/traces' % app_identity.get_default_gcs_bucket_name())
   catapult_path = ndb.StringProperty(default=_DEFAULT_CATAPULT_PATH)
 
+
 def Get():
   config = CloudConfig.get_by_id(_CONFIG_KEY_NAME)
   if not config:
diff --git a/catapult/perf_insights/perf_insights/cloud_storage.py b/catapult/perf_insights/perf_insights/cloud_storage.py
index 809b914..0df1638 100644
--- a/catapult/perf_insights/perf_insights/cloud_storage.py
+++ b/catapult/perf_insights/perf_insights/cloud_storage.py
@@ -12,6 +12,7 @@
 
 
 class CloudStorageError(Exception):
+
   @staticmethod
   def _GetConfigInstructions():
     command = _GSUTIL_PATH
@@ -22,6 +23,7 @@
 
 
 class PermissionError(CloudStorageError):
+
   def __init__(self):
     super(PermissionError, self).__init__(
         'Attempted to access a file from Cloud Storage but you don\'t '
@@ -29,6 +31,7 @@
 
 
 class CredentialsError(CloudStorageError):
+
   def __init__(self):
     super(CredentialsError, self).__init__(
         'Attempted to access a file from Cloud Storage but you have no '
diff --git a/catapult/perf_insights/perf_insights/corpus_driver.py b/catapult/perf_insights/perf_insights/corpus_driver.py
index f2b571a..c8666f7 100644
--- a/catapult/perf_insights/perf_insights/corpus_driver.py
+++ b/catapult/perf_insights/perf_insights/corpus_driver.py
@@ -1,9 +1,9 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import os
-import sys
+
 
 class CorpusDriver(object):
+
   def GetTraceHandlesMatchingQuery(self, query):
     raise NotImplementedError()
diff --git a/catapult/perf_insights/perf_insights/corpus_query.py b/catapult/perf_insights/perf_insights/corpus_query.py
index d55d69b..e4426fd 100644
--- a/catapult/perf_insights/perf_insights/corpus_query.py
+++ b/catapult/perf_insights/perf_insights/corpus_query.py
@@ -9,10 +9,13 @@
 import re
 import datetime
 
+
 def _InOp(a, b):
   return a in b
 
+
 class _ReadField(object):
+
   def __init__(self, fieldName):
     self.fieldName = fieldName
 
@@ -22,7 +25,9 @@
   def Eval(self, metadata):
     return metadata[self.fieldName]
 
+
 class _Constant(object):
+
   def __init__(self, constant):
     self.constant = constant
 
@@ -43,26 +48,27 @@
     # pylint: disable=unused-argument
     return self.constant
 
+
 def _StringToValue(s):
   try:
     constant = eval(s, {}, {})
     return _Constant(constant)
-  except:  # pylint: disable=bare-except
+  except Exception:  # pylint: disable=bare-except
     pass
 
   # Barewords are assumed to be fields.
-  m = re.match('([a-zA-Z0-9_]+)$', s)
+  m = re.match(r'([a-zA-Z0-9_]+)$', s)
   if m:
     return _ReadField(m.group(1))
 
   # Tuples.
-  m = re.match('\(.+\)$', s)
+  m = re.match(r'\(.+\)$', s)
   if m:
-    items = m.group(0).split(',\s*')
+    items = m.group(0).split(r',\s*')
     return _Constant([_StringToValue(x) for x in items])
 
   # Dates.
-  m = re.match('Date\((.+)\)$', s)
+  m = re.match(r'Date\((.+)\)$', s)
   if m:
     d = datetime.datetime.strptime(m.group(1), "%Y-%m-%d %H:%M:%S.%f")
     return _Constant(d)
@@ -74,12 +80,13 @@
   '=': operator.eq,
   '<': operator.lt,
   '<=': operator.le,
-  '>':  operator.gt,
+  '>': operator.gt,
   '>=': operator.ge,
   '!=': operator.ne,
-  ' IN ': _InOp # Spaces matter for proper parsing.
+  ' IN ': _InOp  # Spaces matter for proper parsing.
 }
 
+
 def _OperatorToString(op):
   for k, v in _OPERATORS.iteritems():
     if op == v:
@@ -93,7 +100,9 @@
 _TOKEN_SEARCH_ORDER = list(_OPERATORS.keys())
 _TOKEN_SEARCH_ORDER.sort(lambda x, y: len(y) - len(x))
 
+
 class Filter(object):
+
   def __init__(self, a, op, b):
     self.a = a
     self.op = op
@@ -142,7 +151,9 @@
                   _OPERATORS[found_op_key],
                   rvalue)
 
+
 class CorpusQuery(object):
+
   def __init__(self):
     self.max_trace_handles = None
     self.filters = []
@@ -169,7 +180,7 @@
         else:
           b_string = f.b.fieldName
 
-        filter_strings.append( '%s %s %s' % (a_string,
+        filter_strings.append('%s %s %s' % (a_string,
                                              _OperatorToString(f.op).strip(),
                                              b_string))
       gql = 'WHERE ' + ' AND '.join(filter_strings)
@@ -195,7 +206,7 @@
     q = CorpusQuery()
     exprs = filterString.split(' AND ')
     for expr in exprs:
-      m = re.match('MAX_TRACE_HANDLES\s*=\s*(\d+)', expr)
+      m = re.match(r'MAX_TRACE_HANDLES\s*=\s*(\d+)', expr)
       if m:
         q.max_trace_handles = int(m.group(1))
         continue
@@ -206,7 +217,7 @@
     return q
 
   def Eval(self, metadata, num_trace_handles_so_far=0):
-    if self.max_trace_handles:
+    if not self.max_trace_handles is None:
       if num_trace_handles_so_far >= self.max_trace_handles:
         return False
 
diff --git a/catapult/perf_insights/perf_insights/corpus_query_unittest.py b/catapult/perf_insights/perf_insights/corpus_query_unittest.py
index 5564182..06f5907 100644
--- a/catapult/perf_insights/perf_insights/corpus_query_unittest.py
+++ b/catapult/perf_insights/perf_insights/corpus_query_unittest.py
@@ -7,7 +7,9 @@
 
 from perf_insights import corpus_query
 
+
 class FilterTests(unittest.TestCase):
+
   def testEqNumber(self):
     f = corpus_query.Filter.FromString("a = 3")
 
@@ -20,18 +22,18 @@
 
   def testInTuple(self):
     f = corpus_query.Filter.FromString("a IN (1, 2)")
-    self.assertEquals(f.a.fieldName, 'a');
-    self.assertEquals(f.op, corpus_query._InOp);
-    self.assertEquals(f.b.constant, (1, 2));
+    self.assertEquals(f.a.fieldName, 'a')
+    self.assertEquals(f.op, corpus_query._InOp)
+    self.assertEquals(f.b.constant, (1, 2))
 
     self.assertFalse(f.Eval({'a': 3}))
     self.assertTrue(f.Eval({'a': 1}))
 
   def testInTupleStr(self):
     f = corpus_query.Filter.FromString("a IN ('a', 'b')")
-    self.assertEquals(f.a.fieldName, 'a');
-    self.assertEquals(f.op, corpus_query._InOp);
-    self.assertEquals(f.b.constant, ('a', 'b'));
+    self.assertEquals(f.a.fieldName, 'a')
+    self.assertEquals(f.op, corpus_query._InOp)
+    self.assertEquals(f.b.constant, ('a', 'b'))
 
     self.assertFalse(f.Eval({'a': 'c'}))
     self.assertTrue(f.Eval({'a': 'a'}))
@@ -49,10 +51,10 @@
   def testDateComparison(self):
     f = corpus_query.Filter.FromString(
         "date >= Date(2015-01-02 3:04:05.678)")
-    self.assertEquals(f.a.fieldName, 'date');
-    self.assertEquals(f.op, operator.ge);
+    self.assertEquals(f.a.fieldName, 'date')
+    self.assertEquals(f.op, operator.ge)
 
-    self.assertTrue(isinstance(f.b.constant, datetime.datetime));
+    self.assertTrue(isinstance(f.b.constant, datetime.datetime))
     at = datetime.datetime(2015, 1, 2, 3, 4, 5, 678000)
     self.assertEquals(f.b.constant, at)
 
@@ -65,6 +67,7 @@
 
 
 class CorpusQueryTests(unittest.TestCase):
+
   def testSimple(self):
     q = corpus_query.CorpusQuery.FromString('')
     self.assertTrue(q.Eval({'a': 1}))
@@ -101,7 +104,7 @@
     self.assertFalse(f.Eval({'date': end}))
     self.assertFalse(f.Eval({'date': way_after}))
 
-  def testSimpleOp(self):
+  def testSimpleOpWithMaxTraceHandles(self):
     q = corpus_query.CorpusQuery.FromString('a = 3 AND MAX_TRACE_HANDLES=3')
     self.assertTrue(q.Eval({'a': 3}, 0))
     self.assertFalse(q.Eval({'a': 3}, 3))
@@ -121,7 +124,7 @@
 
     (gql, args) = q.AsGQLWhereClause()
     self.assertEquals(gql, 'WHERE a = :1')
-    self.assertEquals(args[0], 1)
+    self.assertEquals(args[0], 3)
 
   def testMultipleFiltersOpQueryString(self):
     q = corpus_query.CorpusQuery.FromString(
@@ -146,7 +149,7 @@
     self.assertEquals(args[0], datetime.datetime(2015, 01, 01, 0, 0, 0))
     self.assertEquals(args[1], datetime.datetime(2015, 02, 01, 0, 0, 0))
 
-  def testSimpleOpQueryString(self):
+  def testSimpleOpWithMaxTraceHandlesQueryString(self):
     q = corpus_query.CorpusQuery.FromString('a = 3 AND MAX_TRACE_HANDLES=3')
     self.assertEquals(q.AsQueryString(), 'a = 3 AND MAX_TRACE_HANDLES=3')
 
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/__init__.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/__init__.py
index 9bccde2..59759a7 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/__init__.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/__init__.py
@@ -12,6 +12,11 @@
     'uritemplate',
 ]
 
+# Directories in perf_insights/third_party required by cloud_mapper.
+THIRD_PARTY_LIBRARIES_IN_PERF_INSIGHTS = [
+    'cloudstorage',
+]
+
 # Libraries bundled with the App Engine SDK.
 THIRD_PARTY_LIBRARIES_IN_SDK = [
     'httplib2',
@@ -27,11 +32,11 @@
     'local_worker.yaml',
     'queue.yaml',
     'dispatch.yaml',
+    'cron.yaml',
     'index.yaml',
     'Dockerfile',
     'perf_insights',
     'perf_insights_project.py',
-    'third_party',
 ]
 
 
@@ -72,4 +77,9 @@
   for library_dir in THIRD_PARTY_LIBRARIES:
     paths.append(os.path.join(third_party_dir, library_dir))
 
+  third_party_dir = os.path.join(catapult_path, 'perf_insights', 'third_party')
+  for library_dir in THIRD_PARTY_LIBRARIES_IN_PERF_INSIGHTS:
+    paths.append(os.path.join(third_party_dir, library_dir))
+  print paths
+
   return paths
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cancel.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cancel.py
index 31b5b08..a0d7964 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cancel.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cancel.py
@@ -4,13 +4,27 @@
 import json
 import webapp2
 
+from google.appengine.api import taskqueue
+from perf_insights.endpoints.cloud_mapper import job_info
+
 
 class CancelPage(webapp2.RequestHandler):
 
   def get(self):
     self.response.headers['Content-Type'] = 'text/plain'
-    response = {'success': False}
-    self.response.out.write(json.dumps(response))
 
+    jobid = self.request.get('jobid')
+    job = job_info.JobInfo.get_by_id(jobid)
+    if not job:
+      response = {'success': False}
+      self.response.out.write(json.dumps(response))
+      return
+
+    taskqueue.Queue('mapper-queue').delete_tasks_by_name(job.running_tasks)
+    job.status = 'CANCELLED'
+    job.put()
+
+    response = {'success': True}
+    self.response.out.write(json.dumps(response))
 
 app = webapp2.WSGIApplication([('/cloud_mapper/cancel', CancelPage)])
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cloud_helper.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cloud_helper.py
index 7ff108a..96b8ecf 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cloud_helper.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/cloud_helper.py
@@ -1,14 +1,27 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import io
+
 import os
 
-from google.appengine.api import memcache
-from perf_insights.endpoints.cloud_mapper import gce_creation_info
-from perf_insights import cloud_config
+import cloudstorage as gcs
 
-import third_party.cloudstorage as gcs
+
+class EnvVarModifier(object):
+  def __init__(self, **kwargs):
+    self._vars = {}
+    self._kwargs = kwargs
+
+  def __enter__(self):
+    for k, v in self._kwargs.iteritems():
+      self._vars[k] = os.environ.get(k)
+      os.environ[k] = v
+    return self
+
+  def __exit__(self, *_):
+    for k, v in self._vars.iteritems():
+      os.environ[k] = v
+
 
 default_retry_params = gcs.RetryParams(initial_delay=0.2,
                                        max_delay=5.0,
@@ -16,33 +29,41 @@
                                        max_retry_period=15)
 gcs.set_default_retry_params(default_retry_params)
 
+
 def _remove_gcs_prefix(full_url):
   return full_url.split('gs:/')[1]
 
+
 def WriteGCS(fullurl, data):
-  gcs_file = gcs.open(_remove_gcs_prefix(fullurl),
-                      'w',
-                      content_type='text/plain',
-                      options={},
-                      retry_params=default_retry_params)
-  gcs_file.write(data)
-  gcs_file.close()
+  with EnvVarModifier(SERVER_SOFTWARE='') as _:
+    gcs_file = gcs.open(_remove_gcs_prefix(fullurl),
+                        'w',
+                        content_type='text/plain',
+                        options={},
+                        retry_params=default_retry_params)
+    gcs_file.write(data)
+    gcs_file.close()
+
 
 def ReadGCS(fullurl):
-  gcs_file = gcs.open(_remove_gcs_prefix(fullurl),
-                      'r',
-                      retry_params=default_retry_params)
+  with EnvVarModifier(SERVER_SOFTWARE='') as _:
+    gcs_file = gcs.open(_remove_gcs_prefix(fullurl),
+                        'r',
+                        retry_params=default_retry_params)
 
-  contents = gcs_file.read()
-  gcs_file.close()
+    contents = gcs_file.read()
+    gcs_file.close()
 
-  return contents
+    return contents
+
 
 def ReadGCSToFile(fullurl, output_file):
   output_file.write(ReadGCS(fullurl))
 
+
 def StatGCS(fullurl):
-  try:
-    return gcs.stat(_remove_gcs_prefix(fullurl))
-  except gcs.NotFoundError:
-    return None
+  with EnvVarModifier(SERVER_SOFTWARE='') as _:
+    try:
+      return gcs.stat(_remove_gcs_prefix(fullurl))
+    except gcs.NotFoundError:
+      return None
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/create.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/create.py
index 7fd650e..759d7a1 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/create.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/create.py
@@ -10,6 +10,12 @@
 from google.appengine.api import taskqueue
 from perf_insights.endpoints.cloud_mapper import job_info
 
+MAX_JOB_TIMEOUT_IN_SECONDS = 600
+DEFAULT_JOB_TIMEOUT_IN_SECONDS = 120
+
+MAX_FUNCTION_TIMEOUT_IN_SECONDS = 120
+DEFAULT_FUNCTION_TIMEOUT_IN_SECONDS = 30
+
 class CreatePage(webapp2.RequestHandler):
 
   def post(self):
@@ -18,11 +24,26 @@
     mapper = self.request.get('mapper')
     reducer = self.request.get('reducer')
     mapper_function = self.request.get('mapper_function')
+    reducer_function = self.request.get('reducer_function')
     query = self.request.get('query')
     corpus = self.request.get('corpus')
     revision = self.request.get('revision')
     if not revision:
       revision = 'HEAD'
+    timeout = self.request.get('timeout')
+    if not timeout:
+      timeout = DEFAULT_JOB_TIMEOUT_IN_SECONDS
+    else:
+      timeout = int(timeout)
+    function_timeout = self.request.get('function_timeout')
+    if not function_timeout:
+      function_timeout = DEFAULT_FUNCTION_TIMEOUT_IN_SECONDS
+    else:
+      function_timeout = int(function_timeout)
+    timeout = max(0, min(
+        timeout, MAX_JOB_TIMEOUT_IN_SECONDS))
+    function_timeout = max(0, min(
+        function_timeout, MAX_FUNCTION_TIMEOUT_IN_SECONDS))
 
     job_uuid = str(uuid.uuid4())
     logging.info('Creating new job %s' % job_uuid)
@@ -32,9 +53,13 @@
     job.mapper = mapper
     job.reducer = reducer
     job.mapper_function = mapper_function
+    job.reducer_function = reducer_function
     job.query = query
     job.corpus = corpus
     job.revision = revision
+    job.running_tasks = []
+    job.timeout = timeout
+    job.function_timeout = function_timeout
     job.put()
 
     response = {
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/job_info.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/job_info.py
index 8aa0d0a..ceb5a17 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/job_info.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/job_info.py
@@ -13,8 +13,15 @@
   mapper = ndb.TextProperty()
   reducer = ndb.TextProperty()
   mapper_function = ndb.StringProperty(indexed=True)
+  reducer_function = ndb.StringProperty(indexed=True)
   query = ndb.StringProperty(indexed=True)
   corpus = ndb.StringProperty(indexed=True)
   revision = ndb.StringProperty(indexed=True)
+  timeout = ndb.IntegerProperty()
+  function_timeout = ndb.IntegerProperty()
+
+  running_tasks = ndb.StringProperty(repeated=True)
+
+  running_tasks = ndb.StringProperty(repeated=True)
 
   results = ndb.StringProperty(indexed=True)
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/task.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/task.py
index 2c8f2e0..619191b 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/task.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/task.py
@@ -4,8 +4,8 @@
 import datetime
 import json
 import logging
+import math
 import os
-import time
 import urllib
 import uuid
 import webapp2
@@ -16,8 +16,9 @@
 from perf_insights.endpoints.cloud_mapper import job_info
 from perf_insights import cloud_config
 
-DEFAULT_TRACES_PER_INSTANCE = 64
-
+# If you modify this, you need to change the max_concurrent_requests in
+# queue.yaml.
+DEFAULT_TRACES_PER_INSTANCE = 4
 
 class TaskPage(webapp2.RequestHandler):
 
@@ -35,64 +36,78 @@
                             headers=headers,
                             follow_redirects=False,
                             deadline=10)
-    logging.info(result.content)
-
     return json.loads(result.content)
 
   def _DispatchTracesAndWaitForResult(self, job, traces, num_instances):
     def _slice_it(li, cols=2):
       start = 0
       for i in xrange(cols):
-          stop = start + len(li[i::cols])
-          yield li[start:stop]
-          start = stop
+        stop = start + len(li[i::cols])
+        yield li[start:stop]
+        start = stop
 
     # TODO(simonhatch): In the future it might be possibly to only specify a
     # reducer and no mapper. Revisit this.
     bucket_path = cloud_config.Get().control_bucket_path + "/jobs/"
-    mapper_url = '%s%s.mapper' % (bucket_path, job.key.id())
-    mapper_text = job.mapper.encode('ascii', 'ignore')
-    cloud_helper.WriteGCS(mapper_url, mapper_text)
 
-    version = self._GetVersion()
+    mapper_url = ''
+    reducer_url = ''
 
-    tasks = {}
+    if job.reducer:
+      reducer_url = '%s%s.reducer' % (bucket_path, job.key.id())
+      reducer_text = job.reducer.encode('ascii', 'ignore')
+      cloud_helper.WriteGCS(reducer_url, reducer_text)
 
-    # Split the traces up into N buckets.
-    for current_traces in _slice_it(traces, num_instances):
-      task_id = str(uuid.uuid4())
+    if job.mapper:
+      mapper_url = '%s%s.mapper' % (bucket_path, job.key.id())
+      mapper_text = job.mapper.encode('ascii', 'ignore')
+      cloud_helper.WriteGCS(mapper_url, mapper_text)
 
-      payload = {
-          'revision': job.revision,
-          'traces': json.dumps(current_traces),
-          'result': '%s%s.result' % (bucket_path, task_id),
-          'mapper': mapper_url,
-          'mapper_function': job.mapper_function
-      }
+      version = self._GetVersion()
+
+      tasks = {}
+
+      # Split the traces up into N buckets.
+      logging.info('Splitting traces across %d instances.' % num_instances)
+      for current_traces in _slice_it(traces, num_instances):
+        logging.info('Submitting %d traces job.' % len(current_traces))
+        task_id = str(uuid.uuid4())
+
+        payload = {
+            'revision': job.revision,
+            'traces': json.dumps(current_traces),
+            'result': '%s%s.result' % (bucket_path, task_id),
+            'mapper': mapper_url,
+            'mapper_function': job.mapper_function,
+            'timeout': job.function_timeout,
+        }
+        taskqueue.add(
+            queue_name='mapper-queue',
+            url='/cloud_worker/task',
+            target=version,
+            name=task_id,
+            params=payload)
+        tasks[task_id] = {'status': 'IN_PROGRESS'}
+
+      # On production servers, we could just sit and wait for the results, but
+      # dev_server is single threaded and won't run any other tasks until the
+      # current one is finished. We'll just do the easy thing for now and
+      # queue a task to check for the result.
+      mapper_timeout = int(job.timeout - job.function_timeout)
+      timeout = (
+          datetime.datetime.now() + datetime.timedelta(
+              seconds=mapper_timeout)).strftime(
+                  '%Y-%m-%d %H:%M:%S')
       taskqueue.add(
-          queue_name='mapper-queue',
-          url='/cloud_worker/task',
+          queue_name='default',
+          url='/cloud_mapper/task',
           target=version,
-          name=task_id,
-          params=payload)
-      tasks[task_id] = {'status': 'IN_PROGRESS'}
-
-    # On production servers, we could just sit and wait for the results, but
-    # dev_server is single threaded and won't run any other tasks until the
-    # current one is finished. We'll just do the easy thing for now and
-    # queue a task to check for the result.
-    timeout = (
-        datetime.datetime.now() + datetime.timedelta(minutes=10)).strftime(
-            '%Y-%m-%d %H:%M:%S')
-    taskqueue.add(
-        queue_name='default',
-        url='/cloud_mapper/task',
-        target=version,
-        countdown=1,
-        params={'jobid': job.key.id(),
-                'type': 'check',
-                'tasks': json.dumps(tasks),
-                'timeout': timeout})
+          countdown=1,
+          params={'jobid': job.key.id(),
+                  'type': 'check_map_results',
+                  'reducer': reducer_url,
+                  'tasks': json.dumps(tasks),
+                  'timeout': timeout})
 
   def _GetVersion(self):
     version = os.environ['CURRENT_VERSION_ID'].split('.')[0]
@@ -100,25 +115,126 @@
       version = taskqueue.DEFAULT_APP_VERSION
     return version
 
-  def _CancelTasks(self, tasks):
-    task_names = [task_id for task_id, _ in tasks.iteritems()]
-    taskqueue.Queue('mapper-queue').delete_tasks_by_name(task_names)
+  def _CheckOnMapResults(self, job):
+    if job.status != 'IN_PROGRESS':
+      return
 
-  def _CheckOnResults(self, job):
     tasks = json.loads(self.request.get('tasks'))
+    reducer_url = self.request.get('reducer')
+    reducer_function = job.reducer_function
+    revision = job.revision
+    timeout = datetime.datetime.strptime(
+        self.request.get('timeout'), '%Y-%m-%d %H:%M:%S')
 
     # TODO: There's no reducer yet, so we can't actually collapse multiple
     # results into one results file.
-    results = None
-    for task_id, _ in tasks.iteritems():
+    mappers_done = True
+    for task_id, task_values in tasks.iteritems():
+      if task_values['status'] == 'DONE':
+        continue
       task_results_path = '%s/jobs/%s.result' % (
           cloud_config.Get().control_bucket_path, task_id)
       stat_result = cloud_helper.StatGCS(task_results_path)
       if stat_result is not None:
         logging.info(str(stat_result))
         tasks[task_id]['status'] = 'DONE'
+      else:
+        mappers_done = False
+
+    logging.info("Tasks: %s" % str(tasks))
+
+    if not mappers_done and datetime.datetime.now() < timeout:
+      taskqueue.add(
+          url='/cloud_mapper/task',
+          target=self._GetVersion(),
+          countdown=1,
+          params={'jobid': job.key.id(),
+                  'type': 'check_map_results',
+                  'reducer': reducer_url,
+                  'tasks': json.dumps(tasks),
+                  'timeout': self.request.get('timeout')})
+      return
+
+    # Clear out any leftover tasks in case we just hit the timeout.
+    self._CancelTasks(tasks)
+
+    map_results = []
+    for task_id, _ in tasks.iteritems():
+      if tasks[task_id]['status'] != 'DONE':
+        continue
+      task_results_path = '%s/jobs/%s.result' % (
+          cloud_config.Get().control_bucket_path, task_id)
+      map_results.append(task_results_path)
+
+    # We'll only do 1 reduce job for now, maybe shard it better later
+    logging.info("Kicking off reduce.")
+    task_id = str(uuid.uuid4())
+    payload = {
+        'revision': revision,
+        'traces': json.dumps(map_results),
+        'result': '%s/jobs/%s.result' % (
+            cloud_config.Get().control_bucket_path, task_id),
+        'reducer': reducer_url,
+        'reducer_function': reducer_function,
+        'timeout': job.function_timeout,
+    }
+    taskqueue.add(
+        queue_name='mapper-queue',
+        url='/cloud_worker/task',
+        target=self._GetVersion(),
+        name=task_id,
+        params=payload)
+
+    tasks = {}
+    tasks[task_id] = {'status': 'IN_PROGRESS'}
+
+    job.running_tasks = [task_id for task_id, _ in tasks.iteritems()]
+    job.put()
+
+    reduce_tasks = {}
+    reduce_tasks[task_id] = {'status': 'IN_PROGRESS'}
+
+    # On production servers, we could just sit and wait for the results, but
+    # dev_server is single threaded and won't run any other tasks until the
+    # current one is finished. We'll just do the easy thing for now and
+    # queue a task to check for the result.
+    reducer_timeout = int(job.function_timeout)
+    timeout = (
+        datetime.datetime.now() + datetime.timedelta(
+            seconds=reducer_timeout)).strftime(
+                '%Y-%m-%d %H:%M:%S')
+    taskqueue.add(
+        queue_name='default',
+        url='/cloud_mapper/task',
+        target=self._GetVersion(),
+        countdown=1,
+        params={'jobid': job.key.id(),
+                'type': 'check_reduce_results',
+                'tasks': json.dumps(reduce_tasks),
+                'timeout': timeout})
+
+  def _CancelTasks(self, tasks):
+    task_names = [task_id for task_id, _ in tasks.iteritems()]
+    taskqueue.Queue('mapper-queue').delete_tasks_by_name(task_names)
+
+  def _CheckOnReduceResults(self, job):
+    if job.status != 'IN_PROGRESS':
+      return
+
+    tasks = json.loads(self.request.get('tasks'))
+
+    # TODO: There's really only one reducer job at the moment
+    results = None
+    for task_id, _ in tasks.iteritems():
+      task_results_path = '%s/jobs/%s.result' % (
+          cloud_config.Get().control_bucket_path, task_id)
+      stat_result = cloud_helper.StatGCS(task_results_path)
+      if stat_result is not None:
+        tasks[task_id]['status'] = 'DONE'
         results = task_results_path
 
+    logging.info("Reduce results: %s" % str(tasks))
+
     if not results:
       timeout = datetime.datetime.strptime(
           self.request.get('timeout'), '%Y-%m-%d %H:%M:%S')
@@ -133,7 +249,7 @@
           target=self._GetVersion(),
           countdown=1,
           params={'jobid': job.key.id(),
-                  'type': 'check',
+                  'type': 'check_reduce_results',
                   'tasks': json.dumps(tasks),
                   'timeout': self.request.get('timeout')})
       return
@@ -145,7 +261,7 @@
     job.put()
 
   def _CalculateNumInstancesNeeded(self, num_traces):
-    return 1 + int(num_traces / DEFAULT_TRACES_PER_INSTANCE)
+    return int(math.ceil(float(num_traces) / DEFAULT_TRACES_PER_INSTANCE))
 
   def _RunMappers(self, job):
     # Get all the traces to process
@@ -178,8 +294,10 @@
     try:
       if self.request.get('type') == 'create':
         self._CreateMapperJob(job)
-      elif self.request.get('type') == 'check':
-        self._CheckOnResults(job)
+      elif self.request.get('type') == 'check_map_results':
+        self._CheckOnMapResults(job)
+      elif self.request.get('type') == 'check_reduce_results':
+        self._CheckOnReduceResults(job)
     except Exception as e:
       job.status = 'ERROR'
       job.put()
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/test.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/test.py
index b3b348a..79dc2c5 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/test.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/test.py
@@ -1,21 +1,14 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import json
-import logging
 import os
-import urllib
-import uuid
 import webapp2
 
-from google.appengine.api import modules
-from google.appengine.api import taskqueue
-from google.appengine.api import urlfetch
-from perf_insights.endpoints.cloud_mapper import job_info
 from perf_insights import cloud_config
 
+
 def _is_devserver():
-  return os.environ.get('SERVER_SOFTWARE','').startswith('Development')
+  return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
 
 _DEFAULT_MAPPER = """
 <!DOCTYPE html>
@@ -25,42 +18,73 @@
 found in the LICENSE file.
 -->
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
+<link rel="import" href="/tracing/value/value.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pi.m', function() {
 
-  function railMapFunction(results, runInfo, model) {
-    var railScore = tr.e.rail.RAILScore.fromModel(model);
-    if (railScore === undefined) {
-      return;
-    }
-    results.addValue(new pi.v.DictValue(runInfo, 'railScore',
-                                        railScore.asDict()));
+  function testMapFunction(results, runInfo, model) {
+    var someValue = 4; // Chosen by fair roll of the dice.
+    results.addResult('simon', {value: someValue});
   }
-  pi.FunctionRegistry.register(railMapFunction);
+  pi.FunctionRegistry.register(testMapFunction);
 
   return {
-    railMapFunction: railMapFunction
+    testMapFunction: testMapFunction
   };
 });
 
 </script>
 """
 
-_DEFAULT_FUNCTION = 'railMapFunction'
+_DEFAULT_REDUCER = """
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.r', function() {
+
+  function testReduceFunction(key, mapResults) {
+    return {value: mapResults[key].value};
+  }
+
+  pi.FunctionRegistry.register(testReduceFunction);
+
+  return {
+    testReduceFunction: testReduceFunction
+  };
+});
+</script>
+"""
+
+_DEFAULT_FUNCTION = 'testMapFunction'
+_DEFAULT_REDUCER_FUNCTION = 'testReduceFunction'
 
 _FORM_HTML = """
 <!DOCTYPE html>
 <html>
 <body>
 <form action="/cloud_mapper/create" method="POST">
-Mapper: <br><textarea rows="50" cols="80" name="mapper">{mapper}</textarea>
+Mapper: <br><textarea rows="15" cols="80" name="mapper">{mapper}</textarea>
 <br>
 FunctionName: <br><input type="text" name="mapper_function"
     value="{mapper_function}"/>
 <br>
+Reducer: <br><textarea rows="15" cols="80" name="reducer">{reducer}</textarea>
+<br>
+ReducerName: <br><input type="text" name="reducer_function"
+    value="{reducer_function}"/>
+<br>
 Query: <br><input type="text" name="query" value="{query}"/>
 <br>
 Corpus: <br><input type="text" name="corpus" value="{corpus}"/>
@@ -71,11 +95,14 @@
 </html>
 """
 
+
 class TestPage(webapp2.RequestHandler):
 
   def get(self):
     form_html = _FORM_HTML.format(mapper=_DEFAULT_MAPPER,
                                   mapper_function=_DEFAULT_FUNCTION,
+                                  reducer=_DEFAULT_REDUCER,
+                                  reducer_function=_DEFAULT_REDUCER_FUNCTION,
                                   query='MAX_TRACE_HANDLES=10',
                                   corpus=cloud_config.Get().default_corpus)
     self.response.out.write(form_html)
diff --git a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/worker.py b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/worker.py
index b9b3138..e6cfbb8 100644
--- a/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/worker.py
+++ b/catapult/perf_insights/perf_insights/endpoints/cloud_mapper/worker.py
@@ -3,43 +3,29 @@
 # found in the LICENSE file.
 import Queue as queue
 
+import datetime
 import json
 import logging
 import os
 import shutil
+import signal
 import subprocess
 import tempfile
 import threading
+import time
 import traceback
 import webapp2
 
-from google.appengine.api import taskqueue
-from google.appengine.api import urlfetch
 from perf_insights import cloud_config
 from perf_insights.endpoints.cloud_mapper import cloud_helper
 
 
-_DEFAULT_PARALLEL_DOWNLOADS = 16
-
-
-class EnvVarModifier(object):
-  def __init__(self, **kwargs):
-    self._vars = {}
-    self._kwargs = kwargs
-
-  def __enter__(self):
-    for k, v in self._kwargs.iteritems():
-      self._vars[k] = os.environ.get(k)
-      os.environ[k] = v
-    return self
-
-  def __exit__(self, *_):
-    for k, v in self._vars.iteritems():
-      os.environ[k] = v
+_DEFAULT_PARALLEL_DOWNLOADS = 64
+_DEFAULT_RETRIES = 3
 
 
 def _is_devserver():
-  server_software = os.environ.get('SERVER_SOFTWARE','')
+  server_software = os.environ.get('SERVER_SOFTWARE', '')
   return server_software and server_software.startswith('Development')
 
 
@@ -54,12 +40,21 @@
     while not work_queue.empty():
       trace_url = work_queue.get()
       local_name = trace_url.split('/')[-1]
-      try:
-        with open(os.path.join(temp_directory, local_name), 'w') as dst:
-          with EnvVarModifier(SERVER_SOFTWARE='') as _:
-            cloud_helper.ReadGCSToFile(trace_url, dst)
-      except Exception as e:
-        logging.info("Failed to copy: %s" % e)
+      for _ in xrange(_DEFAULT_RETRIES):
+        try:
+          logging.info('downloading: %s' % trace_url)
+          # TODO: This is dumb, but we have local vs actual cloud storage.
+          # Fix this.
+          if '.gz' in local_name:
+            with open(os.path.join(temp_directory, local_name), 'w') as dst:
+              cloud_helper.ReadGCSToFile(trace_url, dst)
+          else:
+            with open(os.path.join(temp_directory, local_name), 'w') as dst:
+              cloud_helper.ReadGCSToFile(trace_url, dst)
+          break
+        except Exception as e:
+          logging.info("Failed to copy: %s" % e)
+        time.sleep(0.5)
       work_queue.task_done()
 
   for _ in xrange(_DEFAULT_PARALLEL_DOWNLOADS):
@@ -72,64 +67,96 @@
 
 
 class TaskPage(webapp2.RequestHandler):
+
   def post(self):
     os.putenv('PI_CLOUD_WORKER', '1')
     try:
       traces = json.loads(self.request.get('traces'))
       mapper = self.request.get('mapper')
       map_function = self.request.get('mapper_function')
+      reducer = self.request.get('reducer')
+      reducer_function = self.request.get('reducer_function')
       revision = self.request.get('revision')
       result_path = self.request.get('result')
+      timeout = self.request.get('timeout')
+      if timeout:
+        timeout = int(timeout)
 
       config = cloud_config.Get()
 
       if not _is_devserver():
-        subprocess.call(
-            ['git', 'pull'], cwd=config.catapult_path)
+        logging.info("Updating catapult checkout to: %s" % revision)
         subprocess.call(
             ['git', 'checkout', revision],
             cwd=config.catapult_path)
         job_path = os.path.join(
-            config.catapult_path, 'perf_insights', 'bin', 'map_traces')
+            config.catapult_path, 'perf_insights', 'bin',
+            'gce_instance_map_job')
         cwd = config.catapult_path
       else:
-        job_path = os.path.join('perf_insights', 'bin', 'map_traces')
+        logging.info("DevServer: Ignoring update step.")
+        job_path = os.path.join('perf_insights', 'bin', 'gce_instance_map_job')
         cwd = os.path.abspath(
             os.path.join(os.path.dirname(__file__), '../../../..'))
 
       # Download all the traces
       temp_directory = _DownloadTraces(traces)
 
-      # Download the mapper
-      map_file_handle, map_file_name = tempfile.mkstemp()
-      with open(map_file_name, 'w') as f:
-        f.write(cloud_helper.ReadGCS(mapper))
-
       # Output goes here.
       output_handle, output_name = tempfile.mkstemp()
 
       try:
-        map_handle = '%s:%s' % (map_file_name, map_function)
-        args = [job_path, '--jobs=-1', '--corpus=local-directory', map_handle,
+        args = [job_path, '--corpus=local-directory',
             '--trace_directory', temp_directory, '--output-file', output_name]
-        logging.info("Executing map job: %s" % args)
+        if mapper:
+          # Download the mapper
+          _, map_file_name = tempfile.mkstemp()
+          with open(map_file_name, 'w') as f:
+            f.write(cloud_helper.ReadGCS(mapper))
+          map_handle = '%s:%s' % (map_file_name, map_function)
+          args.extend(['--map_function_handle', map_handle])
+        if reducer:
+          # Download the reducer
+          _, reducer_file_name = tempfile.mkstemp()
+          with open(reducer_file_name, 'w') as f:
+            f.write(cloud_helper.ReadGCS(reducer))
+          reducer_handle = '%s:%s' % (reducer_file_name, reducer_function)
+          args.extend(['--reduce_function_handle', reducer_handle])
+        logging.info("Executing map job: %s" % ' '.join(args))
 
         map_job = subprocess.Popen(args,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
-                                   cwd=cwd)
-        stdout, stderr = map_job.communicate()
+                                   cwd=cwd,
+                                   preexec_fn=os.setsid)
+        start_time = datetime.datetime.now()
+        while datetime.datetime.now() - start_time < datetime.timedelta(
+            seconds=timeout):
+          time.sleep(1)
+          if map_job.poll():
+            break
+
+        if map_job.poll() is None:
+          logging.warning('Job timed out, terminating.')
+          # TODO: Kill child processes.
+          os.killpg(os.getpgid(map_job.pid), signal.SIGTERM)
+
+        stdout = ''
+        stderr = ''
+        if map_job.stdout:
+          stdout = map_job.stdout.read()
+        if map_job.stderr:
+          stderr = map_job.stderr.read()
 
         logging.info('stdout:\n' + stdout)
         logging.info('stderr:\n' + stderr)
 
         with open(output_name, 'r') as f:
+          logging.info('Writing result to: %s' % result_path)
           cloud_helper.WriteGCS(result_path, f.read())
       finally:
         os.close(output_handle)
         os.unlink(output_name)
-        os.close(map_file_handle)
-        os.unlink(map_file_name)
         shutil.rmtree(temp_directory)
     except Exception:
       logging.info(traceback.format_exc())
diff --git a/catapult/perf_insights/perf_insights/endpoints/corpus_cleanup.py b/catapult/perf_insights/perf_insights/endpoints/corpus_cleanup.py
new file mode 100644
index 0000000..d2b34d6
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/endpoints/corpus_cleanup.py
@@ -0,0 +1,56 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import logging
+import webapp2
+
+from perf_insights import cloud_config
+from perf_insights.endpoints.cloud_mapper.cloud_helper import \
+    default_retry_params
+from perf_insights.trace_info import TraceInfo
+import cloudstorage as gcs
+
+BATCH_SIZE = 100
+MAX_DAYS = 30
+
+
+class CorpusCleanupPage(webapp2.RequestHandler):
+
+  def _delete_traces(self):
+    trace_bucket = cloud_config.Get().trace_upload_bucket
+    deleted_traces = 0
+
+    oldest_time = datetime.datetime.now() - datetime.timedelta(days=MAX_DAYS)
+    q = TraceInfo.query(TraceInfo.date < oldest_time)
+
+    for key in q.fetch(BATCH_SIZE, keys_only=True):
+      gcs_path = '/%s/%s.gz' % (trace_bucket, key.id())
+      try:
+        gcs.delete(gcs_path, retry_params=default_retry_params)
+      except gcs.NotFoundError:
+        pass
+
+      key.delete()
+      deleted_traces += 1
+
+    return deleted_traces
+
+  def get(self):
+    self.response.out.write('<html><body>')
+
+    while True:
+      deleted_traces = self._delete_traces()
+      self.response.out.write("<br><div><bold>Traces Cleaned:</bold> %s</div>"
+          % deleted_traces)
+
+      logging.info('Daily cleanup deleted %s traces.' % deleted_traces)
+
+      if deleted_traces < BATCH_SIZE:
+        break
+
+    self.response.out.write('</body></html>')
+
+
+app = webapp2.WSGIApplication([('/corpus_cleanup', CorpusCleanupPage)])
diff --git a/catapult/perf_insights/perf_insights/endpoints/upload.py b/catapult/perf_insights/perf_insights/endpoints/upload.py
index 97a18b4..2fab2dc 100644
--- a/catapult/perf_insights/perf_insights/endpoints/upload.py
+++ b/catapult/perf_insights/perf_insights/endpoints/upload.py
@@ -12,9 +12,8 @@
 from perf_insights import trace_info
 from perf_insights import cloud_config
 
-import third_party.cloudstorage as gcs
+import cloudstorage as gcs
 
-from google.appengine.api import app_identity
 from google.appengine.api import datastore_errors
 
 default_retry_params = gcs.RetryParams(initial_delay=0.2,
diff --git a/catapult/perf_insights/perf_insights/function_handle.html b/catapult/perf_insights/perf_insights/function_handle.html
index 66fcdb6..8c59394 100644
--- a/catapult/perf_insights/perf_insights/function_handle.html
+++ b/catapult/perf_insights/perf_insights/function_handle.html
@@ -1,6 +1,6 @@
 <!DOCTYPE html>
 <!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Copyright 2015 The Chromium Authors. All rights reserved.
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
@@ -20,7 +20,7 @@
 
   FunctionRegistry.getFunction = function(name) {
     return this.allFunctionsByName_[name];
-  }
+  };
 
   FunctionRegistry.register = function(func) {
     if (func.name === '')
@@ -57,26 +57,39 @@
 
   ModuleToLoad.fromDict = function(moduleDict) {
     return new ModuleToLoad(moduleDict.href, moduleDict.filename);
-  }
+  };
 
-  function FunctionHandle(modulesToLoad, functionName) {
+  function FunctionHandle(modulesToLoad, functionName, opt_options) {
     if (!(modulesToLoad instanceof Array))
       throw new Error('modulesToLoad in FunctionHandle must be an array');
     if (typeof(functionName) !== 'string')
-      throw new Error('functionName inFunctionHandle must be a string');
+      throw new Error('functionName in FunctionHandle must be a string');
     this.modulesToLoad = modulesToLoad;
     this.functionName = functionName;
+    this.options_ = opt_options;
   };
 
   FunctionHandle.prototype = {
+    get options() {
+      return this.options_;
+    },
+
     asDict: function() {
       return {
         'modules_to_load': this.modulesToLoad.map(
             function(m) {return m.asDict();}),
-        'function_name': this.functionName
+        'function_name': this.functionName,
+        'options': this.options_
       };
     },
 
+    asUserFriendlyString: function() {
+      var parts = this.modulesToLoad.map(function(mtl) {return mtl.filename});
+      parts.push(this.functionName);
+      parts.push(JSON.stringify(this.options_));
+      return parts.join(',');
+    },
+
     hasHrefs: function() {
       for (var module in this.modulesToLoad) {
         if (this.modulesToLoad[module].href !== undefined) {
@@ -87,10 +100,6 @@
     },
 
     load: function() {
-      if (this.modulesToLoad.length === 1 || this.functionName === undefined) {
-        return FunctionHandle.loadFromFilename_(this.modulesToLoad[0].filename);
-      }
-
       if (this.hasHrefs()) {
         var err = new Error(
             'FunctionHandle named ' + this.functionName +
@@ -102,7 +111,7 @@
       for (var module in this.modulesToLoad) {
         var filename = this.modulesToLoad[module].filename;
         try {
-          loadHTMLFile(filename);
+          HTMLImportsLoader.loadHTMLFile(filename);
         } catch (err) {
           err.name = 'FunctionLoadingError';
           throw err;
@@ -125,14 +134,15 @@
           return module.toString();
       });
       return 'FunctionHandle(modulesToLoad=[' + modulesToLoadStr + '], ' +
-          'functionName="' + this.functionName + '")';
+          'functionName="' + this.functionName + '", options="' +
+          JSON.stringify(this.options_) + '")';
     }
   };
 
   FunctionHandle.loadFromFilename_ = function(filename) {
     try {
       var numFunctionsBefore = FunctionRegistry.allFunctions.length;
-      loadHTMLFile(filename);
+      HTMLImportsLoader.loadHTMLFile(filename);
     } catch (err) {
       err.name = 'FunctionLoadingError';
       throw err;
@@ -150,11 +160,13 @@
   };
 
   FunctionHandle.fromDict = function(handleDict) {
+    var options = handleDict.options;
     if (handleDict.modules_to_load !== undefined) {
       var modulesToLoad = handleDict.modules_to_load.map(function(module) {
-         return ModuleToLoad.fromDict(module);});
+        return ModuleToLoad.fromDict(module);
+      });
     }
-    return new FunctionHandle(modulesToLoad, handleDict.function_name);
+    return new FunctionHandle(modulesToLoad, handleDict.function_name, options);
   };
 
   return {
diff --git a/catapult/perf_insights/perf_insights/function_handle.py b/catapult/perf_insights/perf_insights/function_handle.py
index de627f7..da545b7 100644
--- a/catapult/perf_insights/perf_insights/function_handle.py
+++ b/catapult/perf_insights/perf_insights/function_handle.py
@@ -3,6 +3,7 @@
 # found in the LICENSE file.
 
 import os
+import uuid
 
 
 class AbspathInvalidError(Exception):
@@ -39,9 +40,12 @@
 
 class FunctionHandle(object):
 
-  def __init__(self, modules_to_load=None, function_name=None):
+  def __init__(self, modules_to_load=None, function_name=None,
+               options=None, guid=uuid.uuid4()):
     self.modules_to_load = modules_to_load
     self.function_name = function_name
+    self.options = options
+    self._guid = guid
 
   def __repr__(self):
     return 'FunctionHandle(modules_to_load=[%s], function_name="%s")' % (
@@ -49,6 +53,10 @@
       self.function_name)
 
   @property
+  def guid(self):
+    return self._guid
+
+  @property
   def has_hrefs(self):
     return any(module.href for module in self.modules_to_load)
 
@@ -60,6 +68,8 @@
     if self.modules_to_load is not None:
       handle_dict['modules_to_load'] = [module.AsDict() for module in
                                         self.modules_to_load]
+    if self.options is not None:
+      handle_dict['options'] = self.options
 
     return handle_dict
 
@@ -101,8 +111,12 @@
     if handle_dict.get('modules_to_load') is not None:
       modules_to_load = [ModuleToLoad.FromDict(module_dict) for module_dict in
                          handle_dict['modules_to_load']]
+    else:
+      modules_to_load = []
+    options = handle_dict.get('options')
     return FunctionHandle(modules_to_load=modules_to_load,
-                          function_name=handle_dict['function_name'])
+                          function_name=handle_dict['function_name'],
+                          options=options)
 
   def AsUserFriendlyString(self, app):
     parts = [module.filename for module in
diff --git a/catapult/perf_insights/perf_insights/function_handle_test.html b/catapult/perf_insights/perf_insights/function_handle_test.html
index 0fc6203..18376e3 100644
--- a/catapult/perf_insights/perf_insights/function_handle_test.html
+++ b/catapult/perf_insights/perf_insights/function_handle_test.html
@@ -63,11 +63,12 @@
 
   test('asDictTest', function() {
     var module = new pi.ModuleToLoad('/foo');
-    var handle = new pi.FunctionHandle([module], 'Bar');
+    var handle = new pi.FunctionHandle([module], 'Bar', {'a': 'b'});
 
     assert.deepEqual(handle.asDict(), {
       modules_to_load: [{href: '/foo'}],
-      function_name: 'Bar'
+      function_name: 'Bar',
+      options: {'a': 'b'}
     });
   });
 
@@ -109,12 +110,12 @@
 
   test('toStringTest', function() {
     var module = new pi.ModuleToLoad('/foo');
-    var handle = new pi.FunctionHandle([module], 'Bar');
+    var handle = new pi.FunctionHandle([module], 'Bar', {'a': 'b'});
 
     assert.equal(
         handle.toString(),
         'FunctionHandle(modulesToLoad=[ModuleToLoad(href="/foo")], ' +
-        'functionName="Bar")');
+        'functionName="Bar", options="{"a":"b"}")');
   });
 });
 
diff --git a/catapult/perf_insights/perf_insights/function_handle_unittest.py b/catapult/perf_insights/perf_insights/function_handle_unittest.py
index ab48dc6..a6a6382 100644
--- a/catapult/perf_insights/perf_insights/function_handle_unittest.py
+++ b/catapult/perf_insights/perf_insights/function_handle_unittest.py
@@ -6,6 +6,7 @@
 
 from perf_insights import function_handle
 
+
 class ModuleToLoadTests(unittest.TestCase):
 
   def testExactlyOneHrefOrFilename(self):
@@ -69,7 +70,7 @@
 
     self.assertEquals(
         handle.AsDict(), {
-            'modules_to_load' : [{'href': '/foo'}],
+            'modules_to_load': [{'href': '/foo'}],
             'function_name': 'Bar'
         })
 
diff --git a/catapult/perf_insights/perf_insights/gce_instance_map_job.py b/catapult/perf_insights/perf_insights/gce_instance_map_job.py
index d625545..234c7af 100644
--- a/catapult/perf_insights/perf_insights/gce_instance_map_job.py
+++ b/catapult/perf_insights/perf_insights/gce_instance_map_job.py
@@ -1,24 +1,18 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import Queue as queue
 import argparse
 import json
 import logging
 import os
 import shutil
-import sys
 import tempfile
-import threading
-import traceback
 
-import perf_insights
 from perf_insights import cloud_storage
-from perf_insights import gcs_trace_handle
 from perf_insights import map_runner
 from perf_insights import function_handle
+from perf_insights.mre import file_handle as file_handle_module
 from perf_insights.results import json_output_formatter
-from perf_insights.value import run_info as run_info_module
 
 
 _DEFAULT_PARALLEL_DOWNLOADS = 16
@@ -59,12 +53,7 @@
 
   trace_handles = []
   for trace_url in trace_urls:
-    run_info = run_info_module.RunInfo(
-        url=trace_url,
-        display_name=trace_url,
-        run_id=trace_url)
-
-    th = gcs_trace_handle.GCSTraceHandle(run_info, temp_directory)
+    th = file_handle_module.GCSFileHandle(trace_url, temp_directory)
     trace_handles.append(th)
   return trace_handles
 
@@ -104,13 +93,14 @@
                                   output_formatters=[output_formatter])
     results = runner.Run()
 
-    # TODO: gsutil cp file_name gs://output
-    cloud_storage.Copy(file_name, args.output_url)
+    if args.map_function_handle:
+      results = runner.RunMapper()
+    elif args.reduce_function_handle:
+      results = runner.RunReducer(trace_handles)
 
-    if not results.had_failures:
-      return 0
-    else:
-      return 255
+    output_formatter.Format(results)
+
+    return results
   finally:
     ofile.close()
     os.unlink(map_file)
diff --git a/catapult/perf_insights/perf_insights/gcs_trace_handle.py b/catapult/perf_insights/perf_insights/gcs_trace_handle.py
deleted file mode 100644
index 1c169c3..0000000
--- a/catapult/perf_insights/perf_insights/gcs_trace_handle.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import os
-
-from perf_insights import cloud_storage
-from perf_insights import trace_handle
-
-
-class GCSTraceHandle(trace_handle.TraceHandle):
-  def __init__(self, run_info, cache_directory):
-    super(GCSTraceHandle, self).__init__(run_info)
-    file_name = run_info.run_id.split('/')[-1]
-    self.cache_file = os.path.join(
-        cache_directory, file_name + '.gz')
-
-  def Open(self):
-    if not os.path.exists(self.cache_file):
-      try:
-        cloud_storage.Copy(self.run_info.url, self.cache_file)
-      except cloud_storage.CloudStorageError:
-        return None
-    return open(self.cache_file, 'r')
diff --git a/catapult/perf_insights/perf_insights/in_memory_trace_handle.py b/catapult/perf_insights/perf_insights/in_memory_trace_handle.py
deleted file mode 100644
index 3001480..0000000
--- a/catapult/perf_insights/perf_insights/in_memory_trace_handle.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import os
-import tempfile
-
-from perf_insights import trace_handle
-
-
-class InMemoryTraceHandle(trace_handle.TraceHandle):
-  def __init__(self, run_info, data):
-    super(InMemoryTraceHandle, self).__init__(run_info)
-    self.data = data
-
-  def Open(self):
-    f = tempfile.NamedTemporaryFile()
-    f.write(self.data)
-    f.flush()
-    f.seek(0)
-    return f
-
diff --git a/catapult/perf_insights/perf_insights/local_directory_corpus_driver.py b/catapult/perf_insights/perf_insights/local_directory_corpus_driver.py
index fcbf5cf..bb4a175 100644
--- a/catapult/perf_insights/perf_insights/local_directory_corpus_driver.py
+++ b/catapult/perf_insights/perf_insights/local_directory_corpus_driver.py
@@ -4,8 +4,7 @@
 import os
 
 from perf_insights import corpus_driver
-from perf_insights import local_file_trace_handle
-from perf_insights.value import run_info as run_info_module
+from perf_insights.mre import file_handle
 
 
 def _GetFilesIn(basedir):
@@ -26,6 +25,7 @@
   data_files.sort()
   return data_files
 
+
 def _GetTagsForRelPath(relpath):
   # Tags.
   sub_dir = os.path.dirname(relpath)
@@ -34,6 +34,7 @@
   parts = sub_dir.split(os.sep)
   return [p for p in parts if len(p) > 0]
 
+
 def _GetMetadataForFilename(base_directory, filename):
   relpath = os.path.relpath(filename, base_directory)
   tags = _GetTagsForRelPath(relpath)
@@ -43,10 +44,13 @@
   # TODO(nduca): Add modification time to metadata.
   return metadata
 
+
 def _DefaultUrlResover(abspath):
   return 'file:///%s' % abspath
 
+
 class LocalDirectoryCorpusDriver(corpus_driver.CorpusDriver):
+
   def __init__(self, trace_directory, url_resolver=_DefaultUrlResover):
     self.directory = trace_directory
     self.url_resolver = url_resolver
@@ -76,16 +80,11 @@
       if not query.Eval(metadata, len(trace_handles)):
         continue
 
-      # Make URL relative to server root.
       url = self.url_resolver(filename)
       if url is None:
         url = _DefaultUrlResover(filename)
-      run_info = run_info_module.RunInfo(
-          url=url,
-          display_name=rel_filename,
-          metadata=metadata)
 
-      th = local_file_trace_handle.LocalFileTraceHandle(run_info, filename)
+      th = file_handle.URLFileHandle(url, 'file://' + filename)
       trace_handles.append(th)
 
     return trace_handles
diff --git a/catapult/perf_insights/perf_insights/local_directory_corpus_driver_unittest.py b/catapult/perf_insights/perf_insights/local_directory_corpus_driver_unittest.py
index c9f9cca..7456fa8 100644
--- a/catapult/perf_insights/perf_insights/local_directory_corpus_driver_unittest.py
+++ b/catapult/perf_insights/perf_insights/local_directory_corpus_driver_unittest.py
@@ -1,14 +1,18 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
+import os
 import unittest
+
 from perf_insights import local_directory_corpus_driver
 
+
 class LocalDirectoryCorpusDriverTests(unittest.TestCase):
+
   def testTags(self):
     self.assertEquals(
         local_directory_corpus_driver._GetTagsForRelPath('a.json'), [])
     self.assertEquals(
-        local_directory_corpus_driver._GetTagsForRelPath('/b/c/a.json'),
+        local_directory_corpus_driver._GetTagsForRelPath(
+          os.path.join('b', 'c', 'a.json')),
         ['b', 'c'])
diff --git a/catapult/perf_insights/perf_insights/local_file_trace_handle.py b/catapult/perf_insights/perf_insights/local_file_trace_handle.py
deleted file mode 100644
index 0b28efe..0000000
--- a/catapult/perf_insights/perf_insights/local_file_trace_handle.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-from perf_insights import trace_handle
-
-
-class LocalFileTraceHandle(trace_handle.TraceHandle):
-  def __init__(self, run_info, filename):
-    super(LocalFileTraceHandle, self).__init__(run_info)
-    self.filename = filename
-
-  def Open(self):
-    return open(self.filename, 'r')
diff --git a/catapult/perf_insights/perf_insights/map_runner.py b/catapult/perf_insights/perf_insights/map_runner.py
index 158cd48..3d52e42 100644
--- a/catapult/perf_insights/perf_insights/map_runner.py
+++ b/catapult/perf_insights/perf_insights/map_runner.py
@@ -1,35 +1,36 @@
 # Copyright 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import Queue as queue
-import os
+import json
 import multiprocessing
 import sys
-import threading
-import time
+import tempfile
 
 from perf_insights import map_single_trace
-from perf_insights import results as results_module
+from perf_insights.mre import file_handle
+from perf_insights.mre import mre_result
+from perf_insights.mre import reduce_map_results
 from perf_insights.mre import threaded_work_queue
-from perf_insights import value as value_module
-
 from perf_insights.results import gtest_progress_reporter
 
 AUTO_JOB_COUNT = -1
 
+
 class MapError(Exception):
+
   def __init__(self, *args):
     super(MapError, self).__init__(*args)
-    self.run_info = None
+    self.canonical_url = None
+
 
 class MapRunner(object):
-  def __init__(self, trace_handles, map_function_handle,
+  def __init__(self, trace_handles, job,
                stop_on_error=False, progress_reporter=None,
                jobs=AUTO_JOB_COUNT,
                output_formatters=None):
-    self._map_function_handle = map_function_handle
+    self._job = job
     self._stop_on_error = stop_on_error
-    self._failed_run_info_to_dump = None
+    self._failed_canonical_url_to_dump = None
     if progress_reporter is None:
       self._progress_reporter = gtest_progress_reporter.GTestProgressReporter(
                                     sys.stdout)
@@ -39,40 +40,36 @@
 
     self._trace_handles = trace_handles
     self._num_traces_merged_into_results = 0
-    self._results = None
+    self._map_results = None
+    self._map_results_file = None
 
     if jobs == AUTO_JOB_COUNT:
       jobs = multiprocessing.cpu_count()
     self._wq = threaded_work_queue.ThreadedWorkQueue(num_threads=jobs)
 
   def _ProcessOneTrace(self, trace_handle):
-    run_info = trace_handle.run_info
-    subresults = results_module.Results()
-    run_reporter = self._progress_reporter.WillRun(run_info)
-    map_single_trace.MapSingleTrace(
-        subresults,
+    canonical_url = trace_handle.canonical_url
+    run_reporter = self._progress_reporter.WillRun(canonical_url)
+    result = map_single_trace.MapSingleTrace(
         trace_handle,
-        self._map_function_handle)
+        self._job)
 
-    had_failure = subresults.DoesRunContainFailure(run_info)
+    had_failure = len(result.failures) > 0
 
-    for v in subresults.all_values:
-      run_reporter.DidAddValue(v)
+    for f in result.failures:
+      run_reporter.DidAddFailure(f)
     run_reporter.DidRun(had_failure)
 
-    self._wq.PostMainThreadTask(self._MergeResultsToIntoMaster,
-                                trace_handle, subresults)
+    self._wq.PostMainThreadTask(self._MergeResultIntoMaster, result)
 
-  def _MergeResultsToIntoMaster(self, trace_handle, subresults):
-    self._results.Merge(subresults)
+  def _MergeResultIntoMaster(self, result):
+    self._map_results.append(result)
 
-    run_info = trace_handle.run_info
-    had_failure = subresults.DoesRunContainFailure(run_info)
+    had_failure = len(result.failures) > 0
     if self._stop_on_error and had_failure:
       err = MapError("Mapping error")
-      err.run_info = run_info
       self._AbortMappingDueStopOnError(err)
-      return
+      raise err
 
     self._num_traces_merged_into_results += 1
     if self._num_traces_merged_into_results == len(self._trace_handles):
@@ -84,31 +81,68 @@
   def _AllMappingDone(self):
     self._wq.Stop()
 
+  def RunMapper(self):
+    self._map_results = []
+
+    if not self._trace_handles:
+      err = MapError("No trace handles specified.")
+      raise err
+
+    if self._job.map_function_handle:
+      for trace_handle in self._trace_handles:
+        self._wq.PostAnyThreadTask(self._ProcessOneTrace, trace_handle)
+
+      self._wq.Run()
+
+    return self._map_results
+
+  def _Reduce(self, job_results, key, map_results_file_name):
+    reduce_map_results.ReduceMapResults(job_results, key,
+                                        map_results_file_name, self._job)
+
+  def RunReducer(self, reduce_handles_with_keys):
+    if self._job.reduce_function_handle:
+      self._wq.Reset()
+
+      job_results = mre_result.MreResult()
+
+      for cur in reduce_handles_with_keys:
+        handle = cur['handle']
+        for key in cur['keys']:
+          self._wq.PostAnyThreadTask(
+              self._Reduce, job_results, key, handle)
+
+      def _Stop():
+        self._wq.Stop()
+
+      self._wq.PostAnyThreadTask(_Stop)
+      self._wq.Run()
+
+      return job_results
+    return None
+
+  def _ConvertResultsToFileHandlesAndKeys(self, results_list):
+    handles_and_keys = []
+    for current_result in results_list:
+      _, path = tempfile.mkstemp()
+      with open(path, 'w') as results_file:
+        json.dump(current_result.AsDict(), results_file)
+      rh = file_handle.URLFileHandle(path, 'file://' + path)
+      handles_and_keys.append(
+          {'handle': rh, 'keys': current_result.pairs.keys()})
+    return handles_and_keys
+
   def Run(self):
-    self._results = results_module.Results()
+    mapper_results = self.RunMapper()
+    reduce_handles = self._ConvertResultsToFileHandlesAndKeys(mapper_results)
+    reducer_results = self.RunReducer(reduce_handles)
 
-    for trace_handle in self._trace_handles:
-      self._wq.PostAnyThreadTask(self._ProcessOneTrace, trace_handle)
+    if reducer_results:
+      results = [reducer_results]
+    else:
+      results = mapper_results
 
-    err = self._wq.Run()
-
-    self._progress_reporter.DidFinishAllRuns(self._results)
     for of in self._output_formatters:
-      of.Format(self._results)
+      of.Format(results)
 
-    if err:
-      self._PrintFailedRunInfo(err.run_info)
-
-    results = self._results
-    self._results = None
     return results
-
-  def _PrintFailedRunInfo(self, run_info):
-    sys.stderr.write('\n\nWhile mapping %s:\n' %
-                     run_info.display_name)
-    failures = [v for v in self._results.all_values
-                if (v.run_info == run_info and
-                    isinstance(v, value_module.FailureValue))]
-    for failure in failures:
-      sys.stderr.write(failure.GetGTestPrintString())
-      sys.stderr.write('\n')
diff --git a/catapult/perf_insights/perf_insights/map_single_trace.html b/catapult/perf_insights/perf_insights/map_single_trace.html
index 622f60a..583aae1 100644
--- a/catapult/perf_insights/perf_insights/map_single_trace.html
+++ b/catapult/perf_insights/perf_insights/map_single_trace.html
@@ -4,61 +4,36 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/perf_insights/mre/failure.html">
 <link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/base/utils.html">
 <link rel="import" href="/tracing/base/xhr.html">
+<link rel="import" href="/tracing/extras/full_config.html">
 <link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/model/model.html">
-<link rel="import" href="/tracing/extras/full_config.html">
-
-<link rel="import" href="/perf_insights/value/value.html">
 
 <script>
 'use strict';
 
 tr.exportTo('pi', function() {
 
-  var FailureValue = pi.v.FailureValue;
+  var Failure = pi.mre.Failure;
 
-  function runAndConvertErrorsToFailureValues(results, runInfo, cb, opt_this) {
+  function runAndConvertErrorsToFailures(result, job,
+                                         traceHandle, cb, opt_this) {
     try {
       cb.call(opt_this);
     } catch (err) {
       var err = tr.b.normalizeException(err);
-      results.addValue(new FailureValue(
-          runInfo, err.typeName,
-          {description: err.message,
-           stack: err.stack}));
+      // TODO(eakuefner): Set job once reduction is implemented.
+      result.addFailure(new Failure(
+          job, job.mapFunctionHandle.asUserFriendlyString(),
+          traceHandle.canonicalUrl, err.typeName, err.message, err.stack));
     }
   }
 
-  function mapSingleTrace(results, runInfo, traceData, mapFunction,
-                          opt_metadata) {
-    // Load the model.
-    var model = new tr.Model();
-    try {
-      var importOptions = new tr.importer.ImportOptions();
-      importOptions.pruneEmptyContainers = false;
-      importOptions.showImportWarnings = false;
-
-      var i = new tr.importer.Import(model, importOptions);
-      i.importTraces([traceData]);
-    } catch (ex) {
-      ex.name = 'TraceImportError';
-      throw ex;
-    }
-
-    // Mixin the metadata in case its interesting to the mapper.
-    var metadata = opt_metadata || {};
-    for (var k in metadata) {
-      if (model.metadata[k] !== undefined) {
-        var err = new Error(k + ' is on model and corpus tables!');
-        err.name = 'TraceImportError';
-        throw err;
-      }
-      model.metadata[k] = metadata[k];
-    }
-
+  function mapSingleTrace(result, model, options, mapFunction) {
     // Do not map the trace if its timer is low resolution.
     if (!model.isTimeHighResolution) {
       var err = new Error('Trace doesn\'t have high resolution time, ' +
@@ -68,17 +43,20 @@
     }
 
     // Map the function.
-    var numResultsBeforeMapping = results.allValues.length;
+    var numPairsBeforeMapping = tr.b.dictionaryLength(result.pairs);
+    var numFailuresBeforeMapping = result.failures.length;
     try {
-      mapFunction(results, runInfo, model);
+      mapFunction(result, model, options);
     } catch (ex) {
       ex.name = 'MapFunctionError';
       throw ex;
     }
 
-    if (results.allValues.length === numResultsBeforeMapping) {
-      var err = new Error('Mapper did not add any results! ' +
-              'Add a SkipValue if this was intentional.');
+    var addedPairs = (tr.b.dictionaryLength(result.pairs) >
+        numPairsBeforeMapping);
+    var addedFailures = result.failures.length > numFailuresBeforeMapping;
+    if (!(addedPairs || addedFailures)) {
+      var err = new Error('Mapper did not add any results!');
       err.name = 'NoResultsAddedError';
       throw err;
     }
@@ -86,7 +64,7 @@
 
   return {
     mapSingleTrace: mapSingleTrace,
-    runAndConvertErrorsToFailureValues: runAndConvertErrorsToFailureValues
+    runAndConvertErrorsToFailures: runAndConvertErrorsToFailures
   };
 });
 </script>
diff --git a/catapult/perf_insights/perf_insights/map_single_trace.py b/catapult/perf_insights/perf_insights/map_single_trace.py
index 88fc651..54d1bd4 100644
--- a/catapult/perf_insights/perf_insights/map_single_trace.py
+++ b/catapult/perf_insights/perf_insights/map_single_trace.py
@@ -6,129 +6,130 @@
 import re
 import sys
 import tempfile
-import traceback
+import types
 
-
-from perf_insights import value as value_module
 import perf_insights_project
 import vinn
 
+from perf_insights.mre import failure
+from perf_insights.mre import mre_result
+
+_MAP_SINGLE_TRACE_CMDLINE_PATH = os.path.join(
+    perf_insights_project.PerfInsightsProject.perf_insights_src_path,
+    'map_single_trace_cmdline.html')
 
 class TemporaryMapScript(object):
   def __init__(self, js):
-    self.file = tempfile.NamedTemporaryFile()
-    self.file.write("""
+    temp_file = tempfile.NamedTemporaryFile(delete=False)
+    temp_file.write("""
 <!DOCTYPE html>
-<link rel="import" href="/perf_insights/value/value.html">
 <script>
 %s
 </script>
 """ % js)
-    self.file.flush()
-    self.file.seek(0)
+    temp_file.close()
+    self._filename = temp_file.name
 
   def __enter__(self):
     return self
 
   def __exit__(self, *args, **kwargs):
-    self.file.close()
+    os.remove(self._filename)
+    self._filename = None
 
   @property
   def filename(self):
-      return self.file.name
+    return self._filename
 
 
-class FunctionLoadingErrorValue(value_module.FailureValue):
+class FunctionLoadingFailure(failure.Failure):
   pass
 
-class FunctionNotDefinedErrorValue(value_module.FailureValue):
+class FunctionNotDefinedFailure(failure.Failure):
   pass
 
-class MapFunctionErrorValue(value_module.FailureValue):
+class MapFunctionFailure(failure.Failure):
   pass
 
-class TraceImportErrorValue(value_module.FailureValue):
+class FileLoadingFailure(failure.Failure):
   pass
 
-class NoResultsAddedErrorValue(value_module.FailureValue):
+class TraceImportFailure(failure.Failure):
+  pass
+
+class NoResultsAddedFailure(failure.Failure):
   pass
 
 class InternalMapError(Exception):
   pass
 
 _FAILURE_NAME_TO_FAILURE_CONSTRUCTOR = {
-  'FunctionLoadingError': FunctionLoadingErrorValue,
-  'FunctionNotDefinedError': FunctionNotDefinedErrorValue,
-  'TraceImportError': TraceImportErrorValue,
-  'MapFunctionError': MapFunctionErrorValue,
-  'NoResultsAddedError': NoResultsAddedErrorValue
+  'FileLoadingError': FileLoadingFailure,
+  'FunctionLoadingError': FunctionLoadingFailure,
+  'FunctionNotDefinedError': FunctionNotDefinedFailure,
+  'TraceImportError': TraceImportFailure,
+  'MapFunctionError': MapFunctionFailure,
+  'NoResultsAddedError': NoResultsAddedFailure
 }
 
-def MapSingleTrace(results, trace_handle, map_function_handle):
+
+def MapSingleTrace(trace_handle,
+                   job,
+                   extra_import_options=None):
+  assert (type(extra_import_options) is types.NoneType or
+          type(extra_import_options) is types.DictType), (
+         'extra_import_options should be a dict or None.')
   project = perf_insights_project.PerfInsightsProject()
 
   all_source_paths = list(project.source_paths)
+  all_source_paths.append(project.perf_insights_root_path)
 
-  pi_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                         '..'))
-  all_source_paths.append(pi_path)
-  run_info = trace_handle.run_info
+  result = mre_result.MreResult()
 
-  trace_file = trace_handle.Open()
-  if not trace_file:
-    results.AddValue(value_module.FailureValue(
-        run_info,
-        'Error', 'error while opening trace',
-        'error while opening trace', 'Unknown stack'))
-    return
-
-  try:
+  with trace_handle.PrepareFileForProcessing() as prepared_trace_handle:
     js_args = [
-      json.dumps(run_info.AsDict()),
-      json.dumps(map_function_handle.AsDict()),
-      os.path.abspath(trace_file.name),
-      json.dumps(run_info.metadata)
+      json.dumps(prepared_trace_handle.AsDict()),
+      json.dumps(job.AsDict()),
     ]
+    if extra_import_options:
+      js_args.append(json.dumps(extra_import_options))
 
     res = vinn.RunFile(
-      os.path.join(pi_path, 'perf_insights', 'map_single_trace_cmdline.html'),
-      source_paths=all_source_paths,
+      _MAP_SINGLE_TRACE_CMDLINE_PATH, source_paths=all_source_paths,
       js_args=js_args)
-  finally:
-    trace_file.close()
 
   if res.returncode != 0:
     try:
       sys.stderr.write(res.stdout)
     except Exception:
       pass
-    results.AddValue(value_module.FailureValue(
-        run_info,
+    result.AddFailure(failure.Failure(
+        job.map_function_handle.AsUserFriendlyString(),
+        trace_handle.canonical_url,
         'Error', 'vinn runtime error while mapping trace.',
         'vinn runtime error while mapping trace.', 'Unknown stack'))
-    return
+    return result
 
-
-  found_at_least_one_result=False
   for line in res.stdout.split('\n'):
-    m = re.match('^MAP_RESULT_VALUE: (.+)', line, re.DOTALL)
+    m = re.match('^MRE_RESULT: (.+)', line, re.DOTALL)
     if m:
       found_dict = json.loads(m.group(1))
-      if found_dict['type'] == 'failure':
-        cls = _FAILURE_NAME_TO_FAILURE_CONSTRUCTOR.get(found_dict['name'], None)
-        if not cls:
-          cls = value_module.FailureValue
-      else:
-        cls = value_module.Value
-      found_value = cls.FromDict(run_info, found_dict)
+      failures = [failure.Failure.FromDict(
+                    f, job, _FAILURE_NAME_TO_FAILURE_CONSTRUCTOR)
+                  for f in found_dict['failures']]
 
-      results.AddValue(found_value)
-      found_at_least_one_result = True
+      for f in failures:
+        result.AddFailure(f)
+
+      for k, v in found_dict['pairs'].iteritems():
+        result.AddPair(k, v)
 
     else:
       if len(line) > 0:
         sys.stderr.write(line)
         sys.stderr.write('\n')
 
-  if found_at_least_one_result == False:
+  if not (len(result.pairs) or len(result.failures)):
     raise InternalMapError('Internal error: No results were produced!')
+
+  return result
diff --git a/catapult/perf_insights/perf_insights/map_single_trace_cmdline.html b/catapult/perf_insights/perf_insights/map_single_trace_cmdline.html
index 9d4576c..3388f20 100644
--- a/catapult/perf_insights/perf_insights/map_single_trace_cmdline.html
+++ b/catapult/perf_insights/perf_insights/map_single_trace_cmdline.html
@@ -6,55 +6,72 @@
 -->
 <link rel="import" href="/perf_insights/function_handle.html">
 <link rel="import" href="/perf_insights/map_single_trace.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/mre/failure.html">
+<link rel="import" href="/perf_insights/mre/file_handle.html">
+<link rel="import" href="/perf_insights/mre/job.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 
 <script>
 'use strict';
 
 tr.exportTo('pi', function() {
 
-  var FailureValue = pi.v.FailureValue;
+  var Failure = pi.mre.Failure;
+
+  function createModelFromTraceData(traceData,
+                                    canonicalUrl,
+                                    opt_extraImportOptions) {
+    var model = new tr.Model();
+    try {
+      var importOptions = new tr.importer.ImportOptions();
+      importOptions.pruneEmptyContainers = false;
+      importOptions.showImportWarnings = false;
+      if (opt_extraImportOptions !== undefined) {
+        for (var property in opt_extraImportOptions) {
+          if (opt_extraImportOptions.hasOwnProperty(property)) {
+            importOptions[property] = opt_extraImportOptions[property];
+          }
+        }
+      }
+
+      var i = new tr.importer.Import(model, importOptions);
+      i.importTraces([traceData]);
+    } catch (ex) {
+      ex.name = 'TraceImportError';
+      throw ex;
+    }
+
+    model.canonicalUrlThatCreatedThisTrace = canonicalUrl;
+
+    return model;
+  }
 
   function mapSingleTraceMain(args) {
-    if (args.length !== 4)
-      throw new Error('Must provide four arguments');
+    if (args.length !== 2 && args.length !== 3)
+      throw new Error('Must provide two or three arguments.');
 
     var options = {
-      runInfo: pi.v.RunInfo.fromDict(JSON.parse(args[0])),
-      mapFunctionHandle: pi.FunctionHandle.fromDict(JSON.parse(args[1])),
-      filenameToMap: args[2],
-      metadata: JSON.parse(args[3])
+      traceHandle: pi.mre.FileHandle.fromDict(JSON.parse(args[0])),
+      job: pi.mre.Job.fromDict(JSON.parse(args[1])),
+      extraImportOptions: args.length === 3 ? JSON.parse(args[2]) : undefined
     };
 
-    var results = new pi.r.Results();
-    results.willRun(options.runInfo);
+    var result = new pi.mre.MreResult();
 
-    pi.runAndConvertErrorsToFailureValues(
-        results, options.runInfo,
+    var canonicalUrl = options.traceHandle.canonicalUrl;
+
+    pi.runAndConvertErrorsToFailures(
+        result, options.job, options.traceHandle,
         function() {
-          var mapFunction = options.mapFunctionHandle.load();
-
-          // Read the mapfile.
-          try {
-            var traceData = tr.b.getSync('file://' + options.filenameToMap);
-          } catch (ex) {
-            var err = new Error('Could not open ' + options.filenameToMap);
-            err.name = 'TraceImportError';
-            throw err;
-          }
-
-          pi.mapSingleTrace(results, options.runInfo, traceData,
-                            mapFunction, options.metadata);
+          var mapFunction = options.job.mapFunctionHandle.load();
+          var traceData = options.traceHandle.load();
+          var model = createModelFromTraceData(
+              traceData, canonicalUrl, options.extraImportOptions);
+          var opt_options = options.job.mapFunctionHandle.options;
+          pi.mapSingleTrace(result, model, opt_options, mapFunction);
         });
 
-    results.didRun(options.runInfo);
-    results.didFinishAllRuns();
-
-    results.allValues.forEach(function(value) {
-      var valueAsDict = value.asDict();
-      console.log('MAP_RESULT_VALUE: ' + JSON.stringify(valueAsDict));
-    });
+    console.log('MRE_RESULT: ' + JSON.stringify(result.asDict()));
     return 0;
   }
 
@@ -67,4 +84,3 @@
   quit(pi.mapSingleTraceMain(sys.argv.slice(1)));
 
 </script>
-
diff --git a/catapult/perf_insights/perf_insights/map_single_trace_unittest.py b/catapult/perf_insights/perf_insights/map_single_trace_unittest.py
index d44b4bb..c94f779 100644
--- a/catapult/perf_insights/perf_insights/map_single_trace_unittest.py
+++ b/catapult/perf_insights/perf_insights/map_single_trace_unittest.py
@@ -2,205 +2,151 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import json
-import tempfile
 import unittest
 
-from perf_insights import in_memory_trace_handle
-from perf_insights import map_single_trace
 from perf_insights import function_handle
-from perf_insights import results as results_module
-from perf_insights import value as value_module
-from perf_insights.value import run_info as run_info_module
+from perf_insights import map_single_trace
+from perf_insights.mre import file_handle
+from perf_insights.mre import job as job_module
 
 
 def _Handle(filename):
   module = function_handle.ModuleToLoad(filename=filename)
-  return function_handle.FunctionHandle(modules_to_load=[module],
-                                        function_name='MyMapFunction')
+  map_handle = function_handle.FunctionHandle(
+      modules_to_load=[module], function_name='MyMapFunction')
+  return job_module.Job(map_handle, None)
+
+
 class MapSingleTraceTests(unittest.TestCase):
 
   def testPassingMapScript(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
     events = [
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
        'ts': 0, 'dur': 10, 'args': {}},
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
        'ts': 3, 'dur': 5, 'args': {}}
     ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', json.dumps(events))
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
       pi.FunctionRegistry.register(
-          function MyMapFunction(results, run_info, model) {
-            results.addValue(new pi.v.DictValue(
-              run_info,
-              'result', {
+          function MyMapFunction(result, model) {
+            var canonicalUrl = model.canonicalUrlThatCreatedThisTrace;
+            result.addPair('result', {
                 numProcesses: model.getAllProcesses().length
-              }));
+              });
           });
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                                _Handle(map_script.filename))
 
-    self.assertFalse(results.failure_values)
-    v = results.FindValueNamed('result')
-    self.assertEquals(v['numProcesses'], 1)
+    self.assertFalse(result.failures)
+    r = result.pairs['result']
+    self.assertEquals(r['numProcesses'], 1)
 
   def testTraceDidntImport(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
     trace_string = 'This is intentionally not a trace-formatted string.'
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, trace_string);
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', trace_string)
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
       pi.FunctionRegistry.register(
-          function MyMapFunction(results, run_info, model) {
+          function MyMapFunction(results, model) {
           });
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                               _Handle(map_script.filename))
 
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, map_single_trace.TraceImportErrorValue);
+    self.assertEquals(len(result.failures), 1)
+    self.assertEquals(len(result.pairs), 0)
+    f = result.failures[0]
+    self.assertIsInstance(f, map_single_trace.TraceImportFailure)
 
   def testMapFunctionThatThrows(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
     events = [
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
        'ts': 0, 'dur': 10, 'args': {}},
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
        'ts': 3, 'dur': 5, 'args': {}}
     ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', json.dumps(events))
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
       pi.FunctionRegistry.register(
-          function MyMapFunction(results, run_info, model) {
+          function MyMapFunction(results, model) {
             throw new Error('Expected error');
           });
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                               _Handle(map_script.filename))
 
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, map_single_trace.MapFunctionErrorValue);
+    self.assertEquals(len(result.failures), 1)
+    self.assertEquals(len(result.pairs), 0)
+    f = result.failures[0]
+    self.assertIsInstance(f, map_single_trace.MapFunctionFailure)
 
-  def testMapperWithLoadeError(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
+  def testMapperWithLoadError(self):
     events = [
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
        'ts': 0, 'dur': 10, 'args': {}},
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
        'ts': 3, 'dur': 5, 'args': {}}
     ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', json.dumps(events))
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
       throw new Error('Expected load error');
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                               _Handle(map_script.filename))
 
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, map_single_trace.FunctionLoadingErrorValue);
-
+    self.assertEquals(len(result.failures), 1)
+    self.assertEquals(len(result.pairs), 0)
+    f = result.failures[0]
+    self.assertIsInstance(f, map_single_trace.FunctionLoadingFailure)
 
   def testNoMapper(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
     events = [
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
        'ts': 0, 'dur': 10, 'args': {}},
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
        'ts': 3, 'dur': 5, 'args': {}}
     ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', json.dumps(events))
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                               _Handle(map_script.filename))
 
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, map_single_trace.FunctionNotDefinedErrorValue);
-
+    self.assertEquals(len(result.failures), 1)
+    self.assertEquals(len(result.pairs), 0)
+    f = result.failures[0]
+    self.assertIsInstance(f, map_single_trace.FunctionNotDefinedFailure)
 
   def testMapperDoesntAddValues(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
     events = [
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
        'ts': 0, 'dur': 10, 'args': {}},
       {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
        'ts': 3, 'dur': 5, 'args': {}}
     ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
+    trace_handle = file_handle.InMemoryFileHandle(
+        '/a.json', json.dumps(events))
 
-
-    results = results_module.Results()
     with map_single_trace.TemporaryMapScript("""
       pi.FunctionRegistry.register(
-          function MyMapFunction(results, run_info, model) {
+          function MyMapFunction(results, model) {
       });
     """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
+      result = map_single_trace.MapSingleTrace(trace_handle,
+                                               _Handle(map_script.filename))
 
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, map_single_trace.NoResultsAddedErrorValue);
-
-  def testMapperSkips(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
-    events = [
-      {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'a', 'cat': 'c',
-       'ts': 0, 'dur': 10, 'args': {}},
-      {'pid': 1, 'tid': 2, 'ph': 'X', 'name': 'b', 'cat': 'c',
-       'ts': 3, 'dur': 5, 'args': {}}
-    ]
-    trace_handle = in_memory_trace_handle.InMemoryTraceHandle(
-        run_info, json.dumps(events));
-
-
-    results = results_module.Results()
-    with map_single_trace.TemporaryMapScript("""
-      pi.FunctionRegistry.register(
-          function MyMapFunction(results, run_info, model) {
-            results.addValue(new pi.v.SkipValue(
-                run_info, 'SkippedFieldName',
-                {description: 'SkippedReason'}));
-
-      });
-    """) as map_script:
-      map_single_trace.MapSingleTrace(results, trace_handle,
-                                      _Handle(map_script.filename))
-
-    self.assertEquals(len(results.all_values), 1)
-    v = results.all_values[0]
-    self.assertIsInstance(v, value_module.SkipValue)
-    self.assertEquals(v.name, 'SkippedFieldName')
-    self.assertEquals(v.description, 'SkippedReason')
+    self.assertEquals(len(result.failures), 1)
+    self.assertEquals(len(result.pairs), 0)
+    f = result.failures[0]
+    self.assertIsInstance(f, map_single_trace.NoResultsAddedFailure)
diff --git a/catapult/perf_insights/perf_insights/map_traces.py b/catapult/perf_insights/perf_insights/map_traces.py
index 5357d1f..aa13933 100644
--- a/catapult/perf_insights/perf_insights/map_traces.py
+++ b/catapult/perf_insights/perf_insights/map_traces.py
@@ -2,15 +2,13 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import argparse
-import os
 import sys
-import traceback
 
-import perf_insights
 from perf_insights import corpus_driver_cmdline
 from perf_insights import corpus_query
 from perf_insights import map_runner
 from perf_insights import function_handle
+from perf_insights.mre import job as job_module
 from perf_insights.results import json_output_formatter
 
 
@@ -36,7 +34,8 @@
       description='Bulk trace processing')
   corpus_driver_cmdline.AddArguments(parser)
   parser.add_argument('--query')
-  parser.add_argument('map_function_handle')
+  parser.add_argument('--map_function_handle')
+  parser.add_argument('--reduce_function_handle')
   parser.add_argument('-j', '--jobs', type=int,
                       default=map_runner.AUTO_JOB_COUNT)
   parser.add_argument('-o', '--output-file')
@@ -61,8 +60,15 @@
   output_formatter = json_output_formatter.JSONOutputFormatter(ofile)
 
   try:
-    map_function_handle = function_handle.FunctionHandle.FromUserFriendlyString(
-        args.map_function_handle)
+    map_handle = None
+    reduce_handle = None
+    if args.map_function_handle:
+      map_handle = function_handle.FunctionHandle.FromUserFriendlyString(
+          args.map_function_handle)
+    if args.reduce_function_handle:
+      reduce_handle = function_handle.FunctionHandle.FromUserFriendlyString(
+          args.reduce_function_handle)
+    job = job_module.Job(map_handle, reduce_handle)
   except function_handle.UserFriendlyStringInvalidError:
     error_lines = [
         'The map_traces command-line API has changed! You must now specify the',
@@ -74,12 +80,12 @@
 
   try:
     trace_handles = corpus_driver.GetTraceHandlesMatchingQuery(query)
-    runner = map_runner.MapRunner(trace_handles, map_function_handle,
+    runner = map_runner.MapRunner(trace_handles, job,
                                   stop_on_error=args.stop_on_error,
                                   jobs=args.jobs,
                                   output_formatters=[output_formatter])
     results = runner.Run()
-    if not results.had_failures:
+    if not results.failures:
       return 0
     else:
       return 255
diff --git a/catapult/perf_insights/perf_insights/map_traces_handler.py b/catapult/perf_insights/perf_insights/map_traces_handler.py
index 47466a7..ea9b0b6 100644
--- a/catapult/perf_insights/perf_insights/map_traces_handler.py
+++ b/catapult/perf_insights/perf_insights/map_traces_handler.py
@@ -3,9 +3,12 @@
 # found in the LICENSE file.
 import webapp2
 
+
 def MapTrace(trace_corpus_driver):  # pylint: disable=unused-argument
   pass
 
+
 class MapTracesHandler(webapp2.RequestHandler):
+
   def post(self, *args, **kwargs):  # pylint: disable=unused-argument
-    pass
\ No newline at end of file
+    pass
diff --git a/catapult/perf_insights/perf_insights/mappers/rail.html b/catapult/perf_insights/perf_insights/mappers/rail.html
deleted file mode 100644
index 6c8b193..0000000
--- a/catapult/perf_insights/perf_insights/mappers/rail.html
+++ /dev/null
@@ -1,29 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
-
-<script>
-tr.exportTo('pi.m', function() {
-
-  function railMapFunction(results, runInfo, model) {
-    var railScore = tr.e.rail.RAILScore.fromModel(model);
-    if (railScore === undefined) {
-      return;
-    }
-    results.addValue(new pi.v.DictValue(runInfo, 'railScore',
-                                        railScore.asDict()));
-  }
-  pi.FunctionRegistry.register(railMapFunction);
-
-  return {
-    railMapFunction: railMapFunction
-  };
-});
-
-</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing.html
new file mode 100644
index 0000000..5da84ae
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pie', function() {
+  // Reports the delay and length of scroll gestures relative to first paint.
+  // See https://goo.gl/l7V5xg.
+  function mapGestureTiming(result, model) {
+    var loadIRs = model.userModel.expectations.filter(function(ir) {
+      return ir instanceof tr.model.um.LoadExpectation;
+    });
+    var responseIRs = model.userModel.expectations.filter(function(ir) {
+      return ir.stageTitle === 'Response' &&
+          ir.initiatorTitle.indexOf('Scroll') >= 0;
+    });
+    var animationIRs = model.userModel.expectations.filter(function(ir) {
+      return ir.stageTitle === 'Animation' &&
+          ir.initiatorTitle.indexOf('Scroll') >= 0;
+    });
+
+    var firstGestureAfterLoadTimes = [];
+    var gestureDurations = new Array(responseIRs.length);
+    var intervalBetweenGestures = [];
+
+    // This loop is quadratic, but typically we only expect a low number (~tens
+    // per minute of tracing) of both load and response IRs.
+    loadIRs.forEach(function(loadIR) {
+      var loadEnd = loadIR.start + loadIR.duration;
+      for (var i = 0; i < responseIRs.length; i++) {
+        var responseIR = responseIRs[i];
+        if (responseIR.start < loadEnd)
+          continue;
+        firstGestureAfterLoadTimes.push(responseIR.start - loadEnd);
+        break;
+      }
+    });
+
+    // Compute the interval between responses and the duration of each gesture
+    // and any gesture animation that follows. This loop is also quadratic, but
+    // again the expected number of IRs is small (~tens per minute).
+    var prevGestureStart = undefined;
+    responseIRs.forEach(function(responseIR, index) {
+      if (prevGestureStart !== undefined)
+        intervalBetweenGestures.push(responseIR.start - prevGestureStart);
+      prevGestureStart = responseIR.start;
+      var gestureDuration = responseIR.duration;
+      for (var i = 0; i < animationIRs.length; i++) {
+        if (animationIRs[i].start !== responseIR.start + responseIR.duration)
+          continue;
+        gestureDuration += animationIRs[i].duration;
+        break;
+      }
+      gestureDurations[index] = gestureDuration;
+    });
+
+    result.addPair(
+        'gestureTiming', {
+          firstGestureAfterLoadTime: firstGestureAfterLoadTimes,
+          gestureDuration: gestureDurations,
+          intervalBetweenGestures: intervalBetweenGestures
+        });
+  }
+
+  pi.FunctionRegistry.register(mapGestureTiming);
+
+  return {
+    mapGestureTimingForTest: mapGestureTiming
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing_test.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing_test.html
new file mode 100644
index 0000000..fa4f34b
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_gesture_timing_test.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mappers/scheduling/map_gesture_timing.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
+<link rel="import" href="/tracing/model/user_model/response_expectation.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('mapperTest', function() {
+    var m = tr.c.TestUtils.newModel(function(m) {
+      var loadIr = new tr.model.um.LoadExpectation(
+          m, tr.model.um.LOAD_SUBTYPE_NAMES.SUCCESSFUL, 0, 10);
+      m.userModel.expectations.push(loadIr);
+
+      var scrollIr = new tr.model.um.ResponseExpectation(
+          m, 'Scroll', 50, 10);
+      m.userModel.expectations.push(scrollIr);
+
+      var flingIr = new tr.model.um.ResponseExpectation(
+          m, 'Fling', 60, 10);
+      m.userModel.expectations.push(flingIr);
+
+      var scrollIr2 = new tr.model.um.ResponseExpectation(
+          m, 'Scroll', 100, 20);
+      m.userModel.expectations.push(scrollIr2);
+    });
+
+    var result = new pi.mre.MreResult();
+    pie.mapGestureTimingForTest(result, m);
+
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+
+    var gestureTiming = result.pairs.gestureTiming;
+    assert.deepEqual(gestureTiming.firstGestureAfterLoadTime, [40]);
+    assert.deepEqual(gestureTiming.gestureDuration, [10, 20]);
+    assert.deepEqual(gestureTiming.intervalBetweenGestures, [50]);
+  });
+});
+
+</script>
+
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers.html
new file mode 100644
index 0000000..e148e05
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers.html
@@ -0,0 +1,81 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/base/range.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pie', function() {
+  // Collects the set of tasks that are preventing user input from being
+  // processed on the main thread.
+  // See https://goo.gl/l7V5xg.
+  function mapInputBlockers(result, model) {
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    var rendererHelpers = modelHelper.rendererHelpers;
+
+    if (!rendererHelpers) {
+      // If we didn't detect any renderer processes, bail out.
+      result.addPair('inputBlockers', null);
+      return;
+    }
+
+    // Look for main thread input handling in each renderer process.
+    var inputBlockers = {};
+    var foundInputBlockers = false;
+    for (var pid in rendererHelpers) {
+      var rendererHelper = rendererHelpers[pid];
+      var mainThread = rendererHelper.mainThread;
+      // Look for events that represent main thread input handling that also
+      // have one associated flow event showing where the input came from.
+      mainThread.iterateAllEvents(function(event) {
+        if (event.title !== 'LatencyInfo.Flow' ||
+            event.args['step'] !== 'HandleInputEventMain' ||
+            event.inFlowEvents.length !== 1) {
+          return;
+        }
+
+        // Now we can derive the queueing interval from the flow event.
+        var flowEvent = event.inFlowEvents[0];
+        var queueRange =
+            tr.b.Range.fromExplicitRange(flowEvent.start, event.start);
+
+        // Find all events that intersect the queueing interval and compute how
+        // much they contributed to it.
+        mainThread.iterateAllEvents(function(event) {
+          var eventRange =
+              tr.b.Range.fromExplicitRange(event.start,
+                                           event.start + event.duration);
+          var intersection = queueRange.findIntersection(eventRange);
+          if (intersection.isEmpty || intersection.duration === 0)
+            return;
+          if (inputBlockers[event.title] === undefined)
+            inputBlockers[event.title] = [];
+          inputBlockers[event.title].push(intersection.duration);
+          foundInputBlockers = true;
+        });
+      });
+    }
+
+    if (!foundInputBlockers) {
+      result.addPair('inputBlockers', null);
+      return;
+    }
+
+    result.addPair('inputBlockers', inputBlockers);
+  }
+
+  pi.FunctionRegistry.register(mapInputBlockers);
+
+  return {
+    mapInputBlockersForTest: mapInputBlockers
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers_test.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers_test.html
new file mode 100644
index 0000000..022c7d0
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_input_blockers_test.html
@@ -0,0 +1,68 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mappers/scheduling/map_input_blockers.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var TestUtils = tr.c.TestUtils;
+  var ThreadSlice = tr.model.ThreadSlice;
+
+  test('mapperTest', function() {
+    var m = tr.e.chrome.ChromeTestUtils.newChromeModel(function(m) {
+      var mainThread = m.rendererMain;
+
+      // Set up a model with two events that are blocking input event handling.
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: -10,
+        duration: 15,
+        title: 'blockingEvent1'
+      }));
+
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: 5,
+        duration: 4,
+        title: 'blockingEvent2'
+      }));
+
+      var inputEvent = mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: 10,
+        duration: 10,
+        title: 'LatencyInfo.Flow',
+        args: {step: 'HandleInputEventMain'}
+      }));
+
+      var flow = TestUtils.newFlowEventEx({
+        start: 0,
+        duration: 10
+      });
+      inputEvent.inFlowEvents.push(flow);
+    });
+
+    var result = new pi.mre.MreResult();
+    pie.mapInputBlockersForTest(result, m);
+
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+
+    // The mapper computes the amount of delay each blocking event contributed
+    // to the input event.
+    var inputBlockers = result.pairs.inputBlockers;
+    assert.deepEqual(inputBlockers.blockingEvent1, [5]);
+    assert.deepEqual(inputBlockers.blockingEvent2, [4]);
+  });
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost.html
new file mode 100644
index 0000000..f601f04
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost.html
@@ -0,0 +1,96 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/model/helpers/chrome_browser_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pie', function() {
+  function eventCost(event) {
+    if (event.cpuDuration !== undefined)
+      return event.cpuDuration;
+    return event.duration;
+  }
+
+  // Computes the thread time spent in BeginMainFrame during the loading phase
+  // as a ratio of the overall main thread utilization during that time.
+  // See https://goo.gl/l7V5xg.
+  function mapRenderingCost(result, model) {
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    var browserHelper = modelHelper.browserHelper;
+    var rendererHelpers = modelHelper.rendererHelpers;
+
+    if (!browserHelper || !rendererHelpers) {
+      // If we couldn't find both a browser and a renderer process, bail out.
+      result.addPair('renderingCost', null);
+      return;
+    }
+
+    var loadingEvents = browserHelper.getLoadingEventsInRange(model.bounds);
+    var loadingDurations = new Array(loadingEvents.length);
+    var loadingTotalCost = new Array(loadingEvents.length);
+    var loadingBeginMainFrameCost = new Array(loadingEvents.length);
+    var loadingBeginMainFrameRelativeCost = new Array(loadingEvents.length);
+    var beginMainFrameCount = 0;
+    loadingEvents.forEach(function(loadingEvent, index) {
+      loadingDurations[index] = loadingEvent.duration;
+
+      var totalCost = 0;
+      var beginMainFrameCost = 0;
+      for (var pid in rendererHelpers) {
+        var rendererHelper = rendererHelpers[pid];
+        var mainThread = rendererHelper.mainThread;
+        mainThread.iterateAllEvents(function(event) {
+          // Look for tasks executed by the scheduler. Note that this only
+          // includes slices that are *completely* inside the loading phase.
+          if (event.title !== 'TaskQueueManager::RunTask' ||
+              event.start < loadingEvent.start ||
+              event.start + event.duration >
+                  loadingEvent.start + loadingEvent.duration) {
+            return;
+          }
+          totalCost += eventCost(event);
+
+          var beginMainFrame =
+              event.findDescendentSlice('ThreadProxy::BeginMainFrame');
+          if (beginMainFrame) {
+            beginMainFrameCount++;
+            beginMainFrameCost += eventCost(beginMainFrame);
+          }
+        });
+      }
+
+      loadingTotalCost[index] = totalCost;
+      loadingBeginMainFrameCost[index] = beginMainFrameCost;
+      loadingBeginMainFrameRelativeCost[index] = beginMainFrameCost / totalCost;
+    });
+
+    if (loadingDurations.length === 0) {
+      result.addValue('renderingCost', null);
+      return;
+    }
+
+    result.addPair('renderingCost', {
+          loadingDuration: loadingDurations,
+          loadingTotalCost: loadingTotalCost,
+          loadingBeginMainFrameCost: loadingBeginMainFrameCost,
+          loadingBeginMainFrameRelativeCost: loadingBeginMainFrameRelativeCost,
+          beginMainFramesPerLoad: beginMainFrameCount / loadingDurations.length
+        });
+  }
+
+  pi.FunctionRegistry.register(mapRenderingCost);
+
+  return {
+    mapRenderingCostForTest: mapRenderingCost
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost_test.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost_test.html
new file mode 100644
index 0000000..fb7f796
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_rendering_cost_test.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mappers/scheduling/map_rendering_cost.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var TestUtils = tr.c.TestUtils;
+  var ThreadSlice = tr.model.ThreadSlice;
+
+  test('mapperTest', function() {
+    var m = tr.e.chrome.ChromeTestUtils.newChromeModel(function(m) {
+      tr.e.chrome.ChromeTestUtils.addLoadingEvent(m, {start: 0, end: 10});
+
+      var mainThread = m.rendererMain;
+
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: 4,
+        duration: 4,
+        title: 'TaskQueueManager::RunTask'
+      }));
+
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: 5,
+        duration: 2,
+        title: 'ThreadProxy::BeginMainFrame'
+      }));
+
+      // Two slices that only partially overlaps the loading phase. Both
+      // should be ignored.
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: -5,
+        duration: 10,
+        title: 'ThreadProxy::BeginMainFrame'
+      }));
+
+      mainThread.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        type: ThreadSlice,
+        start: 8,
+        duration: 10,
+        title: 'ThreadProxy::BeginMainFrame'
+      }));
+    });
+
+    var result = new pi.mre.MreResult();
+    pie.mapRenderingCostForTest(result, m);
+
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+
+    var renderingCost = result.pairs.renderingCost;
+    assert.deepEqual(renderingCost.loadingDuration, [10]);
+    assert.deepEqual(renderingCost.loadingTotalCost, [4]);
+    assert.deepEqual(renderingCost.loadingBeginMainFrameCost, [2]);
+    assert.deepEqual(renderingCost.loadingBeginMainFrameRelativeCost, [.5]);
+  });
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups.html
new file mode 100644
index 0000000..4405112
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups.html
@@ -0,0 +1,131 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/base/range.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pie', function() {
+  // Conservative estimate: if we hadn't been doing anything for 1ms, we
+  // probably needed to wake up the CPU for this.
+  // TODO(skyostil): Augment this with CPU power management states.
+  var IDLE_THRESHOLD_MILLISECONDS = 1;
+
+  function sanitizeReason(reason) {
+    // Remove any path name components (e.g., '/foo/bar/baz' or 'c:\foo\bar\baz'
+    // are both reduced to 'baz').
+    return reason.replace(/^.*[\/\\]/, '');
+  }
+
+  function findWakeUpReason(event) {
+    var tqmRunTask = event.findDescendentSlice('TaskQueueManager::RunTask');
+    if (tqmRunTask && tqmRunTask.subSlices.length > 0)
+      return tqmRunTask.subSlices[0].title;
+    var processTask =
+        event.findDescendentSlice('TaskQueueManager::ProcessTaskFromWorkQueue');
+    if (processTask &&
+        processTask.args.src_file &&
+        processTask.args.src_func) {
+      return processTask.args.src_file + ':' + processTask.args.src_func;
+    }
+    if (event.title === 'MessageLoop::RunTask' &&
+        event.args.src_file &&
+        event.args.src_func) {
+      return event.args.src_file + ':' + event.args.src_func;
+    }
+    return event.title;
+  }
+
+  // Estimate number of times the CPU was woken up from idle to execute
+  // different types of work (e.g., timer work) and the time the CPU had been
+  // idle before that.
+  // See https://goo.gl/l7V5xg.
+  function findWakeUpsOnThread(thread) {
+    var wakeUps = {};
+    var foundWakeUps = false;
+    var lastTaskEnd = undefined;
+    thread.iterateAllEvents(function(event) {
+      if (!event.isTopLevel)
+        return;
+      var taskEnd = event.start + event.duration;
+      if (lastTaskEnd === undefined) {
+        lastTaskEnd = taskEnd;
+        return;
+      }
+      var sleepTime = event.start - lastTaskEnd;
+      var isWakeUp = sleepTime >= IDLE_THRESHOLD_MILLISECONDS;
+      lastTaskEnd = taskEnd;
+      if (!isWakeUp)
+        return;
+      var reason = sanitizeReason(findWakeUpReason(event));
+      if (wakeUps[reason] === undefined)
+        wakeUps[reason] = {frequency: 0, sleepTimes: []};
+      wakeUps[reason].frequency++;
+      wakeUps[reason].sleepTimes.push(sleepTime);
+      foundWakeUps = true;
+    });
+    return foundWakeUps ? wakeUps : undefined;
+  }
+
+  function updateThreadWakeUps(existingWakeUps, newWakeUps) {
+    for (var reason in newWakeUps) {
+      if (!(reason in existingWakeUps)) {
+        existingWakeUps[reason] = newWakeUps[reason];
+        continue;
+      }
+      existingWakeUps[reason].frequency += newWakeUps[reason].frequency;
+      existingWakeUps[reason].sleepTimes =
+          existingWakeUps[reason].sleepTimes.concat(
+              newWakeUps[reason].sleepTimes);
+    }
+  }
+
+  function mapWakeUps(result, model) {
+    var allWakeUps = {};
+    for (var pid in model.processes) {
+      var process = model.processes[pid];
+      for (var tid in process.threads) {
+        var thread = process.threads[tid];
+        var wakeUps = findWakeUpsOnThread(thread);
+        if (!wakeUps === undefined)
+          continue;
+        if (!(thread.name in allWakeUps))
+          allWakeUps[thread.name] = {};
+        updateThreadWakeUps(allWakeUps[thread.name], wakeUps);
+      }
+    }
+
+    // Normalize frequency to wake-ups/second.
+    // Note: if we found any wake-ups, the total duration of the trace is
+    // guaranteed to be positive.
+    var totalDurationSeconds = model.bounds.duration / 1000;
+    var foundAnyWakeUps = false;
+    for (var thread in allWakeUps) {
+      var threadWakeUps = allWakeUps[thread];
+      for (var reason in threadWakeUps) {
+        threadWakeUps[reason].frequency /= totalDurationSeconds;
+        foundAnyWakeUps = true;
+      }
+    }
+
+    if (!foundAnyWakeUps) {
+      result.addPair('wakeUps', null);
+      return;
+    }
+
+    result.addPair('wakeUps', allWakeUps);
+  }
+
+  pi.FunctionRegistry.register(mapWakeUps);
+
+  return {
+    mapWakeUpsForTest: mapWakeUps
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups_test.html b/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups_test.html
new file mode 100644
index 0000000..aa92e8f
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/scheduling/map_wake_ups_test.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mappers/scheduling/map_wake_ups.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var TestUtils = tr.c.TestUtils;
+
+  test('mapperTest', function() {
+    var m = TestUtils.newModel(function(m) {
+      var p1 = m.getOrCreateProcess(1);
+      var t2 = p1.getOrCreateThread(2);
+      t2.name = 'mainThread';
+      t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'goingToSleep',
+        start: 0, duration: 10
+      }));
+
+      // This slice doesn't count as a wake-up because it occurs too soon after
+      // the previous one.
+      t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'notAWakeUp',
+        start: 10, duration: 1
+      }));
+
+      t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'wakeUp1',
+        start: 20, duration: 10
+      }));
+
+      var p3 = m.getOrCreateProcess(3);
+      var t4 = p1.getOrCreateThread(4);
+      t4.name = 'mainThread';
+      t4.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'goingToSleep',
+        start: 0, duration: 10
+      }));
+
+      // This wake-up gets merged with the one above because it has the same
+      // title.
+      t4.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'wakeUp1',
+        start: 25, duration: 1
+      }));
+
+      // The name of this wake-up gets sanitized.
+      t4.sliceGroup.pushSlice(TestUtils.newSliceEx({
+        title: 'MessageLoop::RunTask',
+        args: {src_file: 'c:\\foo\\file', src_func: 'func'},
+        start: 29, duration: 1
+      }));
+    });
+
+    var result = new pi.mre.MreResult();
+    pie.mapWakeUpsForTest(result, m);
+
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+
+    var wakeUps = result.pairs.wakeUps;
+    assert.equal(Object.keys(wakeUps.mainThread).length, 2);
+    assert.equal(wakeUps.mainThread.wakeUp1.frequency, 200 / 3);
+    assert.deepEqual(wakeUps.mainThread.wakeUp1.sleepTimes, [9, 15]);
+    assert.deepEqual(wakeUps.mainThread['file:func'].sleepTimes, [3]);
+  });
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/slice_cost.html b/catapult/perf_insights/perf_insights/mappers/slice_cost.html
index 8039c48..6a14349 100644
--- a/catapult/perf_insights/perf_insights/mappers/slice_cost.html
+++ b/catapult/perf_insights/perf_insights/mappers/slice_cost.html
@@ -31,6 +31,7 @@
     for (var state in JSSourceState) {
       this.jsTimeByState[JSSourceState[state]] = 0;
     }
+    this.data = {};
   }
 
   SliceCostInfo.asReduceTarget = function(key, firstValue) {
@@ -41,8 +42,9 @@
     sliceCostInfo.domainCategory = firstValue.domainCategory;
     sliceCostInfo.domain = firstValue.domain;
     sliceCostInfo.userFriendlyCategory = firstValue.userFriendlyCategory;
+    sliceCostInfo.data = firstValue.data;
     return sliceCostInfo;
-  }
+  };
 
   SliceCostInfo.fromDict = function(d) {
     var sliceCostInfo = new SliceCostInfo();
@@ -63,8 +65,9 @@
             d.jsTimeByState[JSSourceState[state]] || 0;
       }
     }
+    sliceCostInfo.data = d.data;
     return sliceCostInfo;
-  }
+  };
 
   SliceCostInfo.prototype = {
     push: function(sliceCostKey, threadSlice) {
@@ -89,7 +92,7 @@
 
 
   function getSliceCostReport(model, threadGrouping, railTypeNameByGUID,
-      filterFunction) {
+      filterFunction, dataCB) {
     var reduce = new pi.m.StreamingReducer(SliceCostInfo.asReduceTarget);
 
     function generateDomainCosts(slice) {
@@ -113,6 +116,8 @@
             tr.e.ads.DomainCategory.fromDomain(sliceCostInfo.domain);
         sliceCostInfo.selfTime = sample.weight;
         sliceCostInfo.cpuSelfTime = sample.weight;
+        if (dataCB !== undefined)
+          sliceCostInfo.data = dataCB(slice);
         // Let's use the state of the leaf frame. TODO(chiniforooshan):
         // understand what it means if frames of a sample stack are in different
         // states (BUG #1542).
@@ -154,6 +159,8 @@
       // For all other events, just generate one sliceCostInfo.
       sliceCostInfo.selfTime = threadSlice.selfTime;
       sliceCostInfo.cpuSelfTime = threadSlice.cpuSelfTime;
+      if (dataCB !== undefined)
+        sliceCostInfo.data = dataCB(event);
 
       var key = sliceCostInfo.threadGroup + '/' +
                 sliceCostInfo.railTypeName + '/' +
diff --git a/catapult/perf_insights/perf_insights/mappers/startup_map_function.html b/catapult/perf_insights/perf_insights/mappers/startup_map_function.html
index 7286cff..0e8fb13 100644
--- a/catapult/perf_insights/perf_insights/mappers/startup_map_function.html
+++ b/catapult/perf_insights/perf_insights/mappers/startup_map_function.html
@@ -5,19 +5,21 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/tracing/extras/rail/load_interaction_record.html">
+<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pi.m', function() {
-  function startupMapFunction(results, runInfo, model) {
+  function startupMapFunction(result, model) {
     var startupIR;
-    model.interactionRecords.forEach(function(ir) {
-      if (ir instanceof tr.e.rail.LoadInteractionRecord &&
-          ir.name === 'Startup') {
+    var canonicalUrl = model.canonicalUrlThatCreatedThisTrace;
+    model.userModel.expectations.forEach(function(ir) {
+      if (ir instanceof tr.model.um.LoadExpectation &&
+          ir.initiatorTitle === tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP) {
         startupIR = ir;
       }
     });
@@ -32,9 +34,10 @@
         function(event) {
           return startupIR.bounds(event);
         });
-    results.addValue(new pi.v.DictValue(runInfo, 'sr',
+    result.addPair('sr',
         {sliceCosts: sliceCosts,
-         startupDuration: startupIR.duration}));
+         startupDuration: startupIR.duration});
+    result.addPair('canonical_url', canonicalUrl);
   }
 
   pi.FunctionRegistry.register(startupMapFunction);
diff --git a/catapult/perf_insights/perf_insights/mappers/startup_map_function_test.html b/catapult/perf_insights/perf_insights/mappers/startup_map_function_test.html
index 82af64f..a06e6c2 100644
--- a/catapult/perf_insights/perf_insights/mappers/startup_map_function_test.html
+++ b/catapult/perf_insights/perf_insights/mappers/startup_map_function_test.html
@@ -5,12 +5,10 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
 <link rel="import" href="/perf_insights/mappers/startup_map_function.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/load_interaction_record.html">
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
 
 <script>
 'use strict';
@@ -34,16 +32,15 @@
         start: 20, end: 30
       }));
 
-      var loadIr = new tr.e.rail.LoadInteractionRecord(m, 0, 10);
-      loadIr.name = 'Startup';
-      m.interactionRecords.push(loadIr);
+      var loadIr = new tr.model.um.LoadExpectation(
+          m, tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP, 0, 10);
+      m.userModel.expectations.push(loadIr);
       loadIr.associatedEvents.push(t2_s1);
       loadIr.associatedEvents.push(t2_s2);
     });
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pi.m.startupMapFunctionForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pi.m.startupMapFunctionForTest(result, m);
   });
 });
 
diff --git a/catapult/perf_insights/perf_insights/mappers/task_info_map_function.html b/catapult/perf_insights/perf_insights/mappers/task_info_map_function.html
index 56f18ea..3794a7f 100644
--- a/catapult/perf_insights/perf_insights/mappers/task_info_map_function.html
+++ b/catapult/perf_insights/perf_insights/mappers/task_info_map_function.html
@@ -5,24 +5,27 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/tracing/base/units/histogram.html">
+<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
 <link rel="import" href="/tracing/model/flow_event.html">
 <link rel="import" href="/tracing/model/slice.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pi.m', function() {
   // Granularity of the histogram.
   var HISTOGRAM_MAX = 250;
   var NUM_BINS = 50;
 
-  function taskInfoMapFunction(results, run_info, model) {
+  function taskInfoMapFunction(result, model) {
+    var canonicalUrl = model.canonicalUrlThatCreatedThisTrace;
     var threadGrouping = new pi.m.ThreadGrouping();
     threadGrouping.autoInitUsingHelpers(model);
-    addTimeInQueue(results, run_info, model, threadGrouping);
-    addTopLevelTasksDuration(results, run_info, model, threadGrouping);
+    addTimeInQueue(result, canonicalUrl, model, threadGrouping);
+    addTopLevelTasksDuration(result, canonicalUrl, model, threadGrouping);
   }
 
   function eatTrailingDigits(str) {
@@ -37,7 +40,7 @@
     }
   }
 
-  function addTimeInQueue(results, run_info, model, threadGrouping) {
+  function addTimeInQueue(result, canonicalUrl, model, threadGrouping) {
     var timeInQueue = {};
     model.flowEvents.forEach(function(flowEvent) {
       if (!flowEvent.endSlice instanceof tr.model.Slice)
@@ -51,16 +54,14 @@
       var threadName = eatTrailingDigits(thread.name) || 'Unknown';
       var processName = threadGrouping.getGroupNameForThread(thread);
       addToHistogram(timeInQueue, processName, threadName, flowEvent.duration,
-          run_info);
+          canonicalUrl);
     });
     histogramsToDict(timeInQueue);
-    results.addValue(new pi.v.DictValue(
-        run_info,
-        'time_spent_in_queue',
-        timeInQueue));
+    result.addPair('time_spent_in_queue', timeInQueue);
   }
 
-  function addTopLevelTasksDuration(results, run_info, model, threadGrouping) {
+  function addTopLevelTasksDuration(result, canonicalUrl, model,
+                                    threadGrouping) {
     var timeInTask = {};
     var cpuTimeInTask = {};
     model.getAllThreads().forEach(function(thread) {
@@ -75,21 +76,15 @@
         if (!isTopLevelTask(slice))
           return;
         addToHistogram(timeInTask, processName, threadName, slice.duration,
-            run_info);
+            canonicalUrl);
         addToHistogram(cpuTimeInTask, processName, threadName,
-            slice.cpuDuration, run_info);
+            slice.cpuDuration, canonicalUrl);
       });
     });
     histogramsToDict(timeInTask);
-    results.addValue(new pi.v.DictValue(
-        run_info,
-        'time_spent_in_top_level_task',
-        timeInTask));
+    result.addPair('time_spent_in_top_level_task', timeInTask);
     histogramsToDict(cpuTimeInTask);
-    results.addValue(new pi.v.DictValue(
-        run_info,
-        'cpu_time_spent_in_top_level_task',
-        cpuTimeInTask));
+    result.addPair('cpu_time_spent_in_top_level_task', cpuTimeInTask);
   }
 
   // A slice is top level if it's on the receiving end of a post task and no
@@ -103,8 +98,8 @@
   function addToHistogram(dict, processName, threadName, value, url) {
     dict[processName] = dict[processName] || {};
     dict[processName][threadName] = dict[processName][threadName] ||
-        tr.b.u.Histogram.createLinear(
-            tr.b.u.Units.timeDurationInMs,
+        tr.v.Numeric.createLinear(
+            tr.v.Unit.byName.timeDurationInMs,
             tr.b.Range.fromExplicitRange(0, HISTOGRAM_MAX),
             NUM_BINS);
     dict[processName][threadName].add(value, url);
diff --git a/catapult/perf_insights/perf_insights/mappers/task_info_map_function_test.html b/catapult/perf_insights/perf_insights/mappers/task_info_map_function_test.html
index c112dee..b559491 100644
--- a/catapult/perf_insights/perf_insights/mappers/task_info_map_function_test.html
+++ b/catapult/perf_insights/perf_insights/mappers/task_info_map_function_test.html
@@ -5,12 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
 <link rel="import" href="/perf_insights/mappers/task_info_map_function.html">
-<link rel="import" href="/tracing/base/units/histogram.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
 
 <script>
 'use strict';
@@ -50,29 +49,29 @@
       m.flowEvents.push(f2);
     });
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pi.m.taskInfoMapFunctionForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pi.m.taskInfoMapFunctionForTest(result, m);
 
-    assert.equal(results.allValues.length, 3);
-    var value = results.allValues[0];
-    assert.equal(value.name, 'time_spent_in_queue');
-    assert.equal(Object.keys(value.value['Browser']).length, 1);
-    var histogram = tr.b.u.Histogram.fromDict(
-        value.value['Browser']['CrBrowserMain']);
+    assert.equal(tr.b.dictionaryLength(result.pairs), 3);
+    var time_spent_in_queue = result.pairs.time_spent_in_queue;
+    assert.equal(tr.b.dictionaryLength(time_spent_in_queue.Browser), 1);
+    var histogram = tr.v.Numeric.fromDict(
+        time_spent_in_queue['Browser']['CrBrowserMain']);
     assert.equal(histogram.getBinForValue(7.2).count, 1);
     assert.equal(histogram.getBinForValue(18.1).count, 1);
-    value = results.allValues[1];
-    assert.equal(value.name, 'time_spent_in_top_level_task');
-    assert.equal(Object.keys(value.value['Browser']).length, 1);
-    histogram = tr.b.u.Histogram.fromDict(
-        value.value['Browser']['CrBrowserMain']);
+    var time_spent_in_top_level_task = (
+        result.pairs.time_spent_in_top_level_task);
+    assert.equal(tr.b.dictionaryLength(
+        time_spent_in_top_level_task['Browser']), 1);
+    histogram = tr.v.Numeric.fromDict(
+        time_spent_in_top_level_task['Browser']['CrBrowserMain']);
     assert.equal(histogram.getBinForValue(10.4).count, 1);
-    value = results.allValues[2];
-    assert.equal(value.name, 'cpu_time_spent_in_top_level_task');
-    assert.equal(Object.keys(value.value['Browser']).length, 1);
-    histogram = tr.b.u.Histogram.fromDict(
-        value.value['Browser']['CrBrowserMain']);
+    var cpu_time_spent_in_top_level_task = (
+        result.pairs.cpu_time_spent_in_top_level_task);
+    assert.equal(tr.b.dictionaryLength(
+        cpu_time_spent_in_top_level_task['Browser']), 1);
+    histogram = tr.v.Numeric.fromDict(
+        cpu_time_spent_in_top_level_task['Browser']['CrBrowserMain']);
     assert.equal(histogram.getBinForValue(3.0).count, 1);
   });
 });
diff --git a/catapult/perf_insights/perf_insights/mappers/test_mapper.html b/catapult/perf_insights/perf_insights/mappers/test_mapper.html
new file mode 100644
index 0000000..cef1890
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/test_mapper.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.m', function() {
+
+  function testMapFunction(result, model) {
+    var someValue = 4; // Chosen by fair roll of the dice.
+    result.addPair('simon', {value: someValue});
+  }
+  pi.FunctionRegistry.register(testMapFunction);
+
+  return {
+    testMapFunction: testMapFunction
+  };
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/thread_grouping.html b/catapult/perf_insights/perf_insights/mappers/thread_grouping.html
index d6d7f50..3e5c616 100644
--- a/catapult/perf_insights/perf_insights/mappers/thread_grouping.html
+++ b/catapult/perf_insights/perf_insights/mappers/thread_grouping.html
@@ -4,10 +4,10 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-<link rel="import" href="/perf_insights/value/value.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+<link rel="import" href="/tracing/value/value.html">
 
 <script>
 'use strict';
@@ -23,10 +23,10 @@
       model.getAllThreads().forEach(function(thread) {
         this.groupNameForThreadGUID_[thread.guid] = 'Other';
       }, this);
+      var chromeHelper = model.getOrCreateHelper(
+          tr.model.helpers.ChromeModelHelper);
 
-      if (tr.e.audits.ChromeModelHelper.supportsModel(model)) {
-        var chromeHelper = new tr.e.audits.ChromeModelHelper(model);
-
+      if (chromeHelper) {
         var browserHelper = chromeHelper.browserHelper;
         this.addThreadsInProcessToGroup_(browserHelper.process, 'Browser');
 
diff --git a/catapult/perf_insights/perf_insights/mappers/thread_grouping_test.html b/catapult/perf_insights/perf_insights/mappers/thread_grouping_test.html
index aa19f94..59b3bed 100644
--- a/catapult/perf_insights/perf_insights/mappers/thread_grouping_test.html
+++ b/catapult/perf_insights/perf_insights/mappers/thread_grouping_test.html
@@ -5,10 +5,7 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/mappers/weather_report_map_function.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
 
diff --git a/catapult/perf_insights/perf_insights/mappers/trace_import_cost.html b/catapult/perf_insights/perf_insights/mappers/trace_import_cost.html
new file mode 100644
index 0000000..3652e17
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/trace_import_cost.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/perf_insights/mappers/reduce.html">
+<link rel="import" href="/tracing/extras/measure/measure.html">
+<link rel="import" href="/tracing/model/async_slice_group.html">
+
+<script>
+'use strict';
+tr.exportTo('pi.m', function() {
+  var MeasureAsyncSlice = tr.e.measure.MeasureAsyncSlice;
+
+  function fetchSlicesInfo(slice, cost_infos) {
+    var cost_info = {
+      args: slice.args,
+      title: slice.originalTitle,
+      start: slice.start,
+      duration: slice.duration,
+      subSlices: []
+    };
+    cost_infos.push(cost_info);
+    var subSlices = slice.subSlices;
+    for (var i = 0; i < subSlices.length; ++i) {
+      fetchSlicesInfo(subSlices[i], cost_info.subSlices);
+    }
+  }
+
+  function getTraceImportCostReport(result, model) {
+    var top_level_slices = [];
+    model.iterateAllEvents(function(event) {
+      if (event instanceof MeasureAsyncSlice &&
+          event.viewSubGroupTitle === 'TraceImport' &&
+          event.isTopLevel) {
+        top_level_slices.push(event);
+      }
+    });
+    var traceImportCostInfos = [];
+    for (var i = 0; i < top_level_slices.length; ++i) {
+      fetchSlicesInfo(top_level_slices[i], traceImportCostInfos);
+    }
+
+    result.addPair('trace_import_cost_info',
+                   {'slices': traceImportCostInfos});
+  }
+
+  pi.FunctionRegistry.register(getTraceImportCostReport);
+
+  return {
+    getTraceImportCostReport: getTraceImportCostReport
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/trace_import_cost_test.html b/catapult/perf_insights/perf_insights/mappers/trace_import_cost_test.html
new file mode 100644
index 0000000..77a8a1a
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/trace_import_cost_test.html
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mappers/trace_import_cost.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/measure/measure.html">
+
+<script>
+'use strict';
+tr.b.unittest.testSuite(function() {
+  var test_utils = tr.c.TestUtils;
+  var MeasureAsyncSlice = tr.e.measure.MeasureAsyncSlice;
+
+  test('traceImportCostTest', function() {
+    /**
+     * The structure of this async slice group
+     *
+     * |___________________||_____|
+     *               s1       s2
+     *    |_| |_________|
+     *   s1_s1   s1_s2
+     *            |_|
+     *          s1_s2_s1
+     **/
+    var m = test_utils.newModel(function(m) {
+      var p1 = m.getOrCreateProcess(1);
+      var t2 = p1.getOrCreateThread(2);
+
+      var s1 = test_utils.newSliceEx({
+        type: MeasureAsyncSlice,
+        title: 'TraceImport:s1',
+        start: 0, end: 20
+      });
+      s1.isTopLevel = true;
+      t2.asyncSliceGroup.push(s1);
+      var s1_s1 = test_utils.newSliceEx({
+        type: MeasureAsyncSlice,
+        title: 'TraceImport:s1_s1',
+        start: 3, end: 5
+      });
+      s1_s1.isTopLevel = false;
+      s1.subSlices.push(s1_s1);
+      var s1_s2 = test_utils.newSliceEx({
+        type: MeasureAsyncSlice,
+        title: 'TraceImport:s1_s2',
+        start: 7, end: 17
+      });
+      s1_s2.isTopLevel = false;
+      s1.subSlices.push(s1_s2);
+      var s1_s2_s1 = test_utils.newSliceEx({
+        type: MeasureAsyncSlice,
+        title: 'TraceImport:s1_s2_s1',
+        start: 11, end: 13
+      });
+      s1_s2_s1.isTopLevel = false;
+      s1_s2.subSlices.push(s1_s2_s1);
+
+      var s2 = test_utils.newSliceEx({
+        type: MeasureAsyncSlice,
+        title: 'TraceImport:s2',
+        start: 21, end: 27
+      });
+      s2.isTopLevel = true;
+      t2.asyncSliceGroup.push(s2);
+    });
+    var result = new pi.mre.MreResult();
+    pi.m.getTraceImportCostReport(result, m);
+
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+    var trace_import_cost_info = result.pairs.trace_import_cost_info;
+    assert.equal(trace_import_cost_info.slices.length, 2);
+
+    var slice1 = trace_import_cost_info.slices[0];
+    assert.equal(slice1.title, 's1');
+    assert.equal(slice1.start, 0);
+    assert.equal(slice1.duration, 20);
+
+    var subSlices = slice1.subSlices;
+    assert.equal(subSlices.length, 2);
+
+    var subSlice_1 = subSlices[0];
+    assert.equal(subSlice_1.subSlices.length, 0);
+    assert.equal(subSlice_1.title, 's1_s1');
+    assert.equal(subSlice_1.start, 3);
+    assert.equal(subSlice_1.duration, 2);
+
+    var subSlice_2 = subSlices[1];
+    assert.equal(subSlice_2.subSlices.length, 1);
+    assert.equal(subSlice_2.title, 's1_s2');
+    assert.equal(subSlice_2.start, 7);
+    assert.equal(subSlice_2.duration, 10);
+
+    var subSlice_2_1 = subSlice_2.subSlices[0];
+    assert.equal(subSlice_2_1.subSlices.length, 0);
+    assert.equal(subSlice_2_1.title, 's1_s2_s1');
+    assert.equal(subSlice_2_1.start, 11);
+    assert.equal(subSlice_2_1.duration, 2);
+
+    var slice2 = trace_import_cost_info.slices[1];
+    assert.equal(slice2.subSlices.length, 0);
+    assert.equal(slice2.title, 's2');
+    assert.equal(slice2.start, 21);
+    assert.equal(slice2.duration, 6);
+  });
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/trace_stats.html b/catapult/perf_insights/perf_insights/mappers/trace_stats.html
index 206fefb..9c6992c 100644
--- a/catapult/perf_insights/perf_insights/mappers/trace_stats.html
+++ b/catapult/perf_insights/perf_insights/mappers/trace_stats.html
@@ -6,14 +6,16 @@
 -->
 
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import"href="/tracing/base/range.html">
-<link rel="import"href="/tracing/base/units/histogram.html">
-<link rel="import"href="/tracing/base/units/units.html">
+<link rel="import" href="/tracing/base/range.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pi.m', function() {
-  function traceStatsFunction(results, run_info, model) {
+  function traceStatsFunction(result, model) {
+    var canonicalUrl = model.canonicalUrlThatCreatedThisTrace;
     var eventCount = 0;
     var firstTime = Number.MAX_VALUE;
     var lastTime = 0;
@@ -41,8 +43,8 @@
       seconds_counts[second]++;
     });
 
-    var histogram = tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.unitlessNumber,
+    var histogram = tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.unitlessNumber,
         tr.b.Range.fromExplicitRange(0, 50000),
         20);
 
@@ -59,7 +61,7 @@
       events_seconds: histogram.asDict()
     };
 
-    results.addValue(new pi.v.DictValue(run_info, 'stats', stats));
+    result.addPair('stats', stats);
   }
 
   pi.FunctionRegistry.register(traceStatsFunction);
diff --git a/catapult/perf_insights/perf_insights/mappers/trace_stats_test.html b/catapult/perf_insights/perf_insights/mappers/trace_stats_test.html
index e9e6ded..002078f 100644
--- a/catapult/perf_insights/perf_insights/mappers/trace_stats_test.html
+++ b/catapult/perf_insights/perf_insights/mappers/trace_stats_test.html
@@ -5,10 +5,9 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
 <link rel="import" href="/perf_insights/mappers/trace_stats.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 
 <script>
@@ -28,18 +27,16 @@
       }));
     });
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pi.m.traceStatsFunctionForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pi.m.traceStatsFunctionForTest(result, m);
 
-    assert.equal(results.allValues.length, 1);
-    var dict = results.allValues[0];
-    assert.isTrue(dict instanceof pi.v.DictValue);
-    assert.equal(dict.value.totalEvents, 1);
-    assert.equal(dict.value.firstTimeInMS, 0);
-    assert.equal(dict.value.lastTimeInMS, 10);
-    assert.equal(dict.value.durationInMS, 10);
-    assert.equal(dict.value.eventsPerSecond, 100);
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+    var dict = result.pairs.stats;
+    assert.equal(dict.totalEvents, 1);
+    assert.equal(dict.firstTimeInMS, 0);
+    assert.equal(dict.lastTimeInMS, 10);
+    assert.equal(dict.durationInMS, 10);
+    assert.equal(dict.eventsPerSecond, 100);
   });
 });
 
diff --git a/catapult/perf_insights/perf_insights/mappers/v8_map_function.html b/catapult/perf_insights/perf_insights/mappers/v8_map_function.html
new file mode 100644
index 0000000..4e6c29d
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mappers/v8_map_function.html
@@ -0,0 +1,256 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+<link rel="import" href="/tracing/model/ir_coverage.html">
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+
+tr.exportTo('pi.m', function() {
+  function v8ReportMapFunction(result, model) {
+    var allIRs = [];
+    model.userModel.expectations.forEach(function(ir) {
+      if (!(ir instanceof tr.model.um.UserExpectation))
+        return;
+      allIRs.push(ir);
+    });
+
+    var railTypeNameByGUID = getStageTitleForEventsByGUID(model, allIRs);
+
+    var threadGrouping = new pi.m.ThreadGrouping();
+    threadGrouping.autoInitUsingHelpers(model);
+    var last_known_framework = ['Unknown/Uncategorized'];
+
+    var sliceCosts = [];
+
+    model.iterateAllEvents(function(event) {
+      if (!(event instanceof tr.model.ThreadSlice))
+        return;
+
+      if (!event.title.startsWith('V8.') && !event.title.startsWith('V8Test.'))
+        return;
+
+      function _get_parent_data(event) {
+        var curSlice = event;
+
+        var data = {};
+        data['js'] = 'Unknown';
+        while (curSlice) {
+          if (curSlice.title === 'v8.run') {
+            data['js'] = curSlice.args['fileName'];
+          } else if (curSlice.title === 'v8.compile') {
+            data['js'] = curSlice.args['fileName'];
+          } else if (curSlice.title === 'v8.callModuleMethod') {
+            data['js'] = 'Unknown';
+          } else if (curSlice.title === 'FunctionCall') {
+            var scriptName = curSlice.args['data']['scriptName'];
+            if (scriptName.indexOf('http') != -1) {
+              data['js'] = scriptName;
+            }
+          } else if (curSlice.title === 'V8Test.ParseScript') {
+            data['js'] = curSlice.args['name'];
+          } else if (curSlice.title === 'V8Test.Compile') {
+            data['js'] = curSlice.args['name'];
+          } else if (curSlice.title === 'V8Test.CompileFullCode') {
+            data['js'] = curSlice.args['name'];
+          }
+          curSlice = curSlice.parentSlice;
+        }
+        return data;
+      }
+
+      function _guess_framework_from_js_file(js_file) {
+        var frameworks = {
+            // Some commone js libs.
+            'jQuery': ['jquery'],
+            'Angular': ['angular'],
+            'Underscore': ['underscore'],
+            'Respond.js': ['respond.js'],
+            'Easeljs': ['easeljs'],
+            'Modernizr': ['modernizr'],
+            'Cloudflare': ['cloudflare.min.js'],
+            'Greensock': ['gsap/'],
+            'Mootools': ['mootools'],
+            'Zepto': ['zepto.'],
+            'Webfont': ['webfont.js'],
+            'Closure': ['closure/'],
+            'Ektron': ['ektron'],
+            'SWFObject': ['swfobject'],
+            'Html5shiv': ['html5shiv'],
+            'Requirejs': ['require.js'],
+            'Tweenjs': ['tweenjs'],
+
+            // Just dividing these out into common sites.
+            'Google - Search': ['google.com/search?', 'www.google.'],
+            'Google - Adsense': ['pagead2.googlesyndication.com/pagead/'],
+            'Google - Analytics': ['google-analytics.com'],
+            'Google - Misc': ['google.', 'googleapis.'],
+            'Adobe - Misc': ['adobe.'],
+            'Facebook': ['facebook.', 'fbcdn.'],
+            'Outlook': ['outlook.', '.live.'],
+            'Craigslist': ['craigslist.'],
+            'Amazon': ['amazon.'],
+            'Yandex': ['yandex.'],
+            'Scene7': ['s7sdk/'],
+            'DoubleClick': [
+                '.doubleclick', 'gpt.js', 'gtm.js', '.googletagservices.'],
+            'Baidu': ['baidu.'],
+            'Bing': ['bing.'],
+            'Twitter': ['twitter.'],
+            'Wish': ['MobileWebsiteCore'],
+            'Extensions - Misc': ['chrome-extension://', 'chrome://']
+        };
+
+        var js_file_lowercase = js_file.toLowerCase();
+        for (var k in frameworks) {
+          var keywords = frameworks[k];
+          for (var i = 0; i < keywords.length; i++) {
+            if (js_file_lowercase.indexOf(keywords[i]) > -1) {
+              //last_known_framework[0] = k;
+              return k;
+            }
+          }
+        }
+
+        // TODO: This is terrible, find a better way to attribute the
+        // unknown callers to a framework. Ideally we'd actually have
+        // access to data about the script or method that's running.
+        return last_known_framework[0];
+      }
+
+      function _cleanup_framework_name(name) {
+        var js_name = name;
+        if (js_name === '') {
+          js_name = 'Unknown';
+        }
+        if (js_name.length > 120) {
+          js_name = js_name.substring(0, 120) + '...';
+        }
+        return js_name;
+      }
+
+      var ufc = model.getUserFriendlyCategoryFromEvent(event);
+      var data = _get_parent_data(event);
+      data.framework = _guess_framework_from_js_file(data.js);
+      var scriptURLClean = _cleanup_framework_name(data.js);
+
+      var slice = event;
+      if (slice.title == 'V8.Execute') {
+
+        // V8.Execute events may generate several sliceCostInfo, based on the
+        // origin of the JS being executed.
+        var range = new tr.b.Range();
+        slice.addBoundsToRange(range);
+        var filtered = range.filterArray(
+            slice.parentContainer.samples,
+            function(sample) {return sample.start;});
+        filtered.forEach(function(sample) {
+          // Let's use the state of the leaf frame. TODO(chiniforooshan):
+          // understand what it means if frames of a sample stack are in
+          // different states (BUG #1542).
+          var sliceData = {
+            threadGroup: threadGrouping.getGroupNameForEvent(slice),
+            railTypeName: railTypeNameByGUID[slice.guid],
+            userFriendlyCategory: ufc || 'other',
+            title: tr.e.chrome.SliceTitleFixer.fromEvent(slice),
+            selfTime: sample.weight,
+            cpuSelfTime: sample.weight,
+            scriptURL: data.js,
+            scriptURLClean: scriptURLClean,
+            framework: data.framework,
+            traceURL: model.canonicalUrlThatCreatedThisTrace
+          };
+
+          var JSSourceState = tr.model.source_info.JSSourceState;
+          sliceData.jsTimeByState = {};
+          for (var state in JSSourceState) {
+            sliceData.jsTimeByState[JSSourceState[state]] = 0;
+          }
+
+          var sourceInfo = sample.leafStackFrame.sourceInfo;
+          if (sourceInfo === undefined ||
+              !(sourceInfo instanceof tr.model.source_info.JSSourceInfo)) {
+            sliceData.jsTime = sample.weight;
+            sliceData.jsTimeByState[JSSourceState.UNKNOWN] = sample.weight;
+          } else {
+            sliceData.jsTimeByState[sourceInfo.state] = sample.weight;
+          }
+
+          var key = sliceData.threadGroup + '/' +
+                    sliceData.railTypeName + '/' +
+                    sliceData.framework + '/' +
+                    //sliceData.scriptURLClean + '/' +
+                    sliceData.title;
+          sliceCosts.push({key: key, value: sliceData});
+        });
+        return;
+      }
+
+      var sliceData = {
+        threadGroup: threadGrouping.getGroupNameForEvent(event),
+        railTypeName: railTypeNameByGUID[event.guid],
+        userFriendlyCategory: ufc || 'other',
+        title: tr.e.chrome.SliceTitleFixer.fromEvent(event),
+        selfTime: event.selfTime,
+        cpuSelfTime: event.cpuSelfTime,
+        scriptURL: data.js,
+        scriptURLClean: scriptURLClean,
+        framework: data.framework,
+        traceURL: model.canonicalUrlThatCreatedThisTrace
+      };
+
+      var key = sliceData.threadGroup + '/' +
+                sliceData.railTypeName + '/' +
+                sliceData.framework + '/' +
+                //sliceData.scriptURLClean + '/' +
+                sliceData.title;
+
+      var newElement = {
+        key: key,
+        value: sliceData
+      };
+      sliceCosts.push(newElement);
+    });
+
+    result.addPair('wr', sliceCosts);
+  }
+
+  function getStageTitleForEventsByGUID(model, expectations) {
+    var stageTitleByGUID = {};
+    expectations.forEach(function applyAssociatedToRTN(ir) {
+      ir.associatedEvents.forEach(function applyEventToRTN(event) {
+        // Unassociated events have already been assigned to a RTN.
+        if (stageTitleByGUID[event.guid] !== undefined)
+          return;
+        stageTitleByGUID[event.guid] = ir.stageTitle;
+      }, this);
+    }, this);
+
+    model.iterateAllEvents(function storeEventToUnassociatedSet(event) {
+      if (stageTitleByGUID[event.guid] !== undefined)
+        return;
+      stageTitleByGUID[event.guid] = 'Unknown';
+    });
+    return stageTitleByGUID;
+  }
+
+  pi.FunctionRegistry.register(v8ReportMapFunction);
+
+  // Exporting for tests.
+  return {
+    v8ReportMapFunction: v8ReportMapFunction
+  };
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/weather_report_map_function.html b/catapult/perf_insights/perf_insights/mappers/weather_report_map_function.html
deleted file mode 100644
index c55d391..0000000
--- a/catapult/perf_insights/perf_insights/mappers/weather_report_map_function.html
+++ /dev/null
@@ -1,104 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
-<link rel="import" href="/perf_insights/mappers/thread_grouping.html">
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
-<link rel="import" href="/tracing/model/ir_coverage.html">
-
-<script>
-tr.exportTo('pi.m', function() {
-
-  function getWeatherReportFromModel(model) {
-    // Organize all RAIL IRs by type and name in a tree. A node of this tree is
-    // a dict with keys |overallScore|, |scores| and optionally |subTypes|.
-    // |overallScore| and |scores| are mutually exclusive. If |overallScore| is
-    // present it contains the overall rail score of all IRs under the tree. If
-    // |scores| is present it contains an array with the IR scores of all the
-    // IRs under the tree. |subTypes| is a map from a subType (IR type, IR name)
-    // to a node.
-    var irTree = {
-      overallScore: 0
-    };
-    var allIRs = [];
-    function addIRToNode(node, ir, path) {
-      if (node.overallScore === undefined) {
-        // For a node without overall rail score keep the individual IR scores.
-        node.irScores = node.irScores || [];
-        node.irScores.push(ir.railScore);
-      }
-      if (path.length === 0)
-        return;
-      var subType = path[0];
-      node.subTypes = node.subTypes || {};
-      node.subTypes[subType] = node.subTypes[subType] || {};
-      addIRToNode(node.subTypes[subType], ir, path.slice(1));
-    }
-    model.interactionRecords.forEach(function(ir) {
-      if (!(ir instanceof tr.e.rail.RAILInteractionRecord))
-        return;
-      allIRs.push(ir);
-      var path = [
-        tr.e.rail.userFriendlyRailTypeName(ir.railTypeName),
-        ir.name || 'Unnamed'
-      ];
-      addIRToNode(irTree, ir, path);
-    });
-    irTree.overallScore = (new tr.e.rail.RAILScore(allIRs)).overallScore;
-
-    var railTypeNameByGUID = getRAILTypeNameForEventsByGUID(model, allIRs);
-    var threadGrouping = new pi.m.ThreadGrouping();
-    threadGrouping.autoInitUsingHelpers(model);
-
-    var wr = {
-      irTree: irTree,
-      irCoverage: tr.model.getIRCoverageFromModel(model),
-      sliceCosts: pi.m.getSliceCostReport(model, threadGrouping,
-                                          railTypeNameByGUID)
-    };
-    return wr;
-  }
-
-  function getRAILTypeNameForEventsByGUID(model, railIRs) {
-    var railTypeNameByGUID = {};
-    railIRs.forEach(function applyAssociatedToRTN(ir) {
-      ir.associatedEvents.forEach(function applyEventToRTN(event) {
-        // Unassociated events have already been assigned to a RTN.
-        if (railTypeNameByGUID[event.guid] !== undefined)
-          return;
-        railTypeNameByGUID[event.guid] = tr.e.rail.userFriendlyRailTypeName(
-            ir.railTypeName);
-      }, this);
-    }, this);
-
-    model.iterateAllEvents(function storeEventToUnassociatedSet(event) {
-      if (railTypeNameByGUID[event.guid] !== undefined)
-        return;
-      railTypeNameByGUID[event.guid] = 'Unknown';
-    });
-    return railTypeNameByGUID;
-  }
-
-  function weatherReportMapFunction(results, runInfo, model) {
-    var wr = pi.m.getWeatherReportFromModel(model);
-    results.addValue(new pi.v.DictValue(runInfo, 'wr', wr));
-  }
-  pi.FunctionRegistry.register(weatherReportMapFunction);
-
-  return {
-    getWeatherReportFromModel: getWeatherReportFromModel,
-    weatherReportMapFunction: weatherReportMapFunction
-  };
-});
-
-</script>
diff --git a/catapult/perf_insights/perf_insights/mappers/weather_report_map_function_test.html b/catapult/perf_insights/perf_insights/mappers/weather_report_map_function_test.html
deleted file mode 100644
index c71a2d4..0000000
--- a/catapult/perf_insights/perf_insights/mappers/weather_report_map_function_test.html
+++ /dev/null
@@ -1,66 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/mappers/weather_report_map_function.html">
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/idle_interaction_record.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-  var ThreadSlice = tr.model.ThreadSlice;
-
-  test('basicModelTest', function() {
-    var m = test_utils.newModel(function(m) {
-      var p1 = m.getOrCreateProcess(1);
-      var t2 = p1.getOrCreateThread(2);
-      t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        name: 'some_slice',
-        start: 0, duration: 10
-      }));
-    });
-
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pi.m.weatherReportMapFunction(results, runInfo, m);
-
-    assert.equal(results.allValues.length, 1);
-    assert.isTrue(results.allValues[0] instanceof pi.v.DictValue);
-  });
-
-  test('basicWrTest', function() {
-    var m = test_utils.newModel(function(m) {
-      var p1 = m.getOrCreateProcess(1);
-      var t2 = p1.getOrCreateThread(2);
-      var t2_s1 = t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        type: ThreadSlice,
-        name: 'some_slice',
-        start: 0, end: 10
-      }));
-      var t2_s2 = t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        type: ThreadSlice,
-        name: 'some_slice',
-        start: 20, end: 30
-      }));
-
-      var idleIr = new tr.e.rail.IdleInteractionRecord(m, 0, 10);
-      m.interactionRecords.push(idleIr);
-      idleIr.associatedEvents.push(t2_s2);
-    });
-
-    var wr = pi.m.getWeatherReportFromModel(m);
-    assert.isDefined(wr.sliceCosts);
-    assert.equal(wr.sliceCosts.length, 2);
-  });
-});
-
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/mre/failure.html b/catapult/perf_insights/perf_insights/mre/failure.html
index 1d449ed..bb34dfc 100644
--- a/catapult/perf_insights/perf_insights/mre/failure.html
+++ b/catapult/perf_insights/perf_insights/mre/failure.html
@@ -4,15 +4,17 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/tracing/base/base.html">
+
 <script>
 'use strict';
 
 tr.exportTo('pi.mre', function() {
-  function Failure(jobGuid, functionHandleGuid, traceGuid, failureTypeName,
-                   description, stack) {
-    this.jobGuid = jobGuid;
-    this.functionHandleGuid = functionHandleGuid;
-    this.traceGuid = traceGuid;
+  function Failure(job, functionHandleString, traceCanonicalUrl,
+                   failureTypeName, description, stack) {
+    this.job = job;
+    this.functionHandleString = functionHandleString;
+    this.traceCanonicalUrl = traceCanonicalUrl;
     this.failureTypeName = failureTypeName;
     this.description = description;
     this.stack = stack;
@@ -20,11 +22,11 @@
 
   Failure.prototype = {
     asDict: function() {
+      // TODO(eakuefner): Serialize job once reduction is implemented.
       return {
-        job_guid: this.jobGuid,
-        function_handle_guid: this.functionHandleGuid,
-        trace_guid: this.traceGuid,
-        failure_type_name: this.failureTypeName,
+        function_handle_string: this.functionHandleString,
+        trace_canonical_url: this.traceCanonicalUrl,
+        type: this.failureTypeName,
         description: this.description,
         stack: this.stack
       };
@@ -32,8 +34,8 @@
   };
 
   Failure.fromDict = function(failureDict) {
-    return new Failure(failureDict.job_guid, failureDict.function_handle_guid,
-                       failureDict.trace_guid, failureDict.failure_type_name,
+    return new Failure(undefined, failureDict.function_handle_string,
+                       failureDict.trace_canonical_url, failureDict.type,
                        failureDict.description, failureDict.stack);
   };
 
diff --git a/catapult/perf_insights/perf_insights/mre/failure.py b/catapult/perf_insights/perf_insights/mre/failure.py
index f464d1f..8c07843 100644
--- a/catapult/perf_insights/perf_insights/mre/failure.py
+++ b/catapult/perf_insights/perf_insights/mre/failure.py
@@ -2,32 +2,52 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+from perf_insights.mre import job as job_module
+
 
 class Failure(object):
 
-  def __init__(self, job_guid, function_handle_guid, trace_guid,
+  def __init__(self, job, function_handle_string, trace_canonical_url,
                failure_type_name, description, stack):
-    self.job_guid = job_guid
-    self.function_handle_guid = function_handle_guid
-    self.trace_guid = trace_guid
+    assert isinstance(job, job_module.Job)
+
+    self.job = job
+    self.function_handle_string = function_handle_string
+    self.trace_canonical_url = trace_canonical_url
     self.failure_type_name = failure_type_name
     self.description = description
     self.stack = stack
 
+  def __str__(self):
+    return (
+      'Failure for job %s with function handle %s and trace handle %s:\n'
+      'of type %s wtih description %s. Stack:\n\n%s' % (
+        self.job.guid, self.function_handle.guid,
+        self.trace_handle.canonical_url,
+        self.failure_type_name, self.description, self.stack))
+
   def AsDict(self):
-    return  {
-        'job_guid': self.job_guid,
-        'function_handle_guid': self.function_handle_guid,
-        'trace_guid': self.trace_guid,
-        'failure_type_name': self.failure_type_name,
+    return {
+        'job_guid': str(self.job.guid),
+        'function_handle_string': self.function_handle_string,
+        'trace_canonical_url': self.trace_canonical_url,
+        'type': self.failure_type_name,
         'description': self.description,
         'stack': self.stack
     }
 
   @staticmethod
-  def FromDict(failure_dict):
-    return Failure(failure_dict['job_guid'],
-                   failure_dict['function_handle_guid'],
-                   failure_dict['trace_guid'],
-                   failure_dict['failure_type_name'],
-                   failure_dict['description'], failure_dict['stack'])
+  def FromDict(failure_dict, job, failure_names_to_constructors=None):
+    if failure_names_to_constructors is None:
+      failure_names_to_constructors = {}
+    failure_type_name = failure_dict['type']
+    if failure_type_name in failure_names_to_constructors:
+      cls = failure_names_to_constructors[failure_type_name]
+    else:
+      cls = Failure
+
+    return cls(job,
+               failure_dict['function_handle_string'],
+               failure_dict['trace_canonical_url'],
+               failure_type_name, failure_dict['description'],
+               failure_dict['stack'])
diff --git a/catapult/perf_insights/perf_insights/mre/failure_test.html b/catapult/perf_insights/perf_insights/mre/failure_test.html
index 4762403..cb8dc0a 100644
--- a/catapult/perf_insights/perf_insights/mre/failure_test.html
+++ b/catapult/perf_insights/perf_insights/mre/failure_test.html
@@ -12,13 +12,13 @@
 
 tr.b.unittest.testSuite(function() {
   test('failureAsDictTest', function() {
-    var failure = new pi.mre.Failure('1', '2', '3', 'err', 'desc', 'stack');
+    var failure = new pi.mre.Failure(undefined, 'foo.html:Foo',
+                                     'file://foo.html', 'err', 'desc', 'stack');
 
     assert.deepEqual(failure.asDict(), {
-      job_guid: '1',
-      function_handle_guid: '2',
-      trace_guid: '3',
-      failure_type_name: 'err',
+      function_handle_string: 'foo.html:Foo',
+      trace_canonical_url: 'file://foo.html',
+      type: 'err',
       description: 'desc',
       stack: 'stack'
     });
@@ -26,19 +26,17 @@
 
   test('failureFromDictTest', function() {
     var failureDict = {
-      job_guid: '1',
-      function_handle_guid: '2',
-      trace_guid: '3',
-      failure_type_name: 'err',
+      function_handle_string: 'foo.html:Foo',
+      trace_canonical_url: 'file://foo.html',
+      type: 'err',
       description: 'desc',
       stack: 'stack'
     };
 
     var failure = pi.mre.Failure.fromDict(failureDict);
 
-    assert.equal(failure.jobGuid, '1');
-    assert.equal(failure.functionHandleGuid, '2');
-    assert.equal(failure.traceGuid, '3');
+    assert.equal(failure.functionHandleString, 'foo.html:Foo');
+    assert.equal(failure.traceCanonicalUrl, 'file://foo.html');
     assert.equal(failure.failureTypeName, 'err');
     assert.equal(failure.description, 'desc');
     assert.equal(failure.stack, 'stack');
diff --git a/catapult/perf_insights/perf_insights/mre/failure_unittest.py b/catapult/perf_insights/perf_insights/mre/failure_unittest.py
index 8738a18..3ffcc62 100644
--- a/catapult/perf_insights/perf_insights/mre/failure_unittest.py
+++ b/catapult/perf_insights/perf_insights/mre/failure_unittest.py
@@ -4,38 +4,55 @@
 
 import unittest
 
+from perf_insights import function_handle
 from perf_insights.mre import failure as failure_module
+from perf_insights.mre import job as job_module
+
+
+def _SingleFileFunctionHandle(filename, function_name, guid):
+  return function_handle.FunctionHandle(
+      modules_to_load=[function_handle.ModuleToLoad(filename=filename)],
+      function_name=function_name, guid=guid)
 
 
 class FailureTests(unittest.TestCase):
 
   def testAsDict(self):
-    failure = failure_module.Failure('1', '2', '3', 'err', 'desc', 'stack')
+    map_function_handle = _SingleFileFunctionHandle('foo.html', 'Foo', '2')
+    reduce_function_handle = _SingleFileFunctionHandle('bar.html', 'Bar', '3')
+    job = job_module.Job(map_function_handle, reduce_function_handle, '1')
+    failure = failure_module.Failure(job, 'foo.html:Foo',
+                                     'file://foo.html',
+                                     'err', 'desc', 'stack')
 
     self.assertEquals(failure.AsDict(), {
       'job_guid': '1',
-      'function_handle_guid': '2',
-      'trace_guid': '3',
-      'failure_type_name': 'err',
+      'function_handle_string': 'foo.html:Foo',
+      'trace_canonical_url': 'file://foo.html',
+      'type': 'err',
       'description': 'desc',
       'stack': 'stack'
     })
 
   def testFromDict(self):
+    map_function_handle = _SingleFileFunctionHandle('foo.html', 'Foo', '2')
+    reduce_function_handle = _SingleFileFunctionHandle('bar.html', 'Bar', '3')
+    job = job_module.Job(map_function_handle, reduce_function_handle, '1')
+
     failure_dict = {
         'job_guid': '1',
-        'function_handle_guid': '2',
-        'trace_guid': '3',
-        'failure_type_name': 'err',
+        'function_handle_string': 'foo.html:Foo',
+        'trace_canonical_url': 'file://foo.html',
+        'type': 'err',
         'description': 'desc',
         'stack': 'stack'
     }
 
-    failure = failure_module.Failure.FromDict(failure_dict)
+    failure = failure_module.Failure.FromDict(failure_dict, job)
 
-    self.assertEquals(failure.job_guid, '1')
-    self.assertEquals(failure.function_handle_guid, '2')
-    self.assertEquals(failure.trace_guid, '3')
+    self.assertEquals(failure.job.guid, '1')
+    self.assertEquals(failure.function_handle_string, 'foo.html:Foo')
+    self.assertEquals(failure.trace_canonical_url, 'file://foo.html')
     self.assertEquals(failure.failure_type_name, 'err')
     self.assertEquals(failure.description, 'desc')
     self.assertEquals(failure.stack, 'stack')
diff --git a/catapult/perf_insights/perf_insights/mre/file_handle.html b/catapult/perf_insights/perf_insights/mre/file_handle.html
new file mode 100644
index 0000000..71d89de
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/file_handle.html
@@ -0,0 +1,82 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/guid.html">
+<link rel="import" href="/tracing/base/xhr.html">
+<link rel="import" href="/tracing/base/utils.html">
+<link rel="import" href="/tracing/extras/full_config.html">
+<link rel="import" href="/tracing/importer/import.html">
+<link rel="import" href="/tracing/model/model.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+  function FileHandle(canonicalUrl) {
+    this.canonicalUrl_ = canonicalUrl;
+  }
+
+  FileHandle.prototype = {
+    get canonicalUrl() { return this.canonicalUrl_; },
+
+    asDict: function() {
+      var d = {
+        canonical_url: this.canonicalUrl_
+      };
+
+      this._asDictInto(d);
+      if (d.type === undefined)
+        throw new Error('_asDictInto must set type field');
+    },
+
+    load: function() {
+      throw new Error('Not implemented');
+    }
+  };
+
+  FileHandle.fromDict = function(handleDict) {
+    if (handleDict.type === 'url')
+      return URLFileHandle.fromDict(handleDict);
+
+    throw new Error('Not implemented: fromDict for ' + handleDict.type);
+  };
+
+
+  function URLFileHandle(canonicalUrl, urlToLoad) {
+    // TODO(eakuefner): assert startswith file://
+    FileHandle.call(this, canonicalUrl);
+    this.urlToLoad = urlToLoad;
+  }
+
+  URLFileHandle.prototype = {
+    __proto__: FileHandle.prototype,
+
+    _asDictInto: function(handleDict) {
+      handleDict.urlToLoad = this.urlToLoad;
+      handleDict.type = 'url';
+    },
+
+    load: function() {
+      try {
+        return tr.b.getSync(this.urlToLoad);
+      } catch (ex) {
+        var err = new Error('Could not open ' + this.urlToLoad);
+        err.name = 'FileLoadingError';
+        throw err;
+      }
+    }
+  };
+
+  URLFileHandle.fromDict = function(handleDict) {
+    return new URLFileHandle(handleDict.canonical_url, handleDict.url_to_load);
+  };
+
+  return {
+    FileHandle: FileHandle,
+    URLFileHandle: URLFileHandle
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/file_handle.py b/catapult/perf_insights/perf_insights/mre/file_handle.py
new file mode 100644
index 0000000..59cef7f
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/file_handle.py
@@ -0,0 +1,100 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import os
+import tempfile
+
+from perf_insights import cloud_storage
+
+
+class FilePreparationError(Exception):
+  """Raised if something goes wrong while preparing a file for processing."""
+
+
+class FileHandle(object):
+  def __init__(self, canonical_url):
+    self._canonical_url = canonical_url
+
+  @property
+  def canonical_url(self):
+    return self._canonical_url
+
+  @contextlib.contextmanager
+  def PrepareFileForProcessing(self):
+    """Ensure that the URL to the file will be acessible during processing.
+
+    This function must do any pre-work to ensure that mappers and reducers will
+    be able to read from the URL contained in the file handle.
+
+    Raises:
+      FilePreparationError: If something went wrong while preparing the file.
+    """
+    yield self._WillProcess()
+    self._DidProcess()
+
+  def _WillProcess(self):
+    raise NotImplementedError()
+
+  def _DidProcess(self):
+    raise NotImplementedError()
+
+
+class URLFileHandle(FileHandle):
+  def __init__(self, canonical_url, url_to_load):
+    super(URLFileHandle, self).__init__(canonical_url)
+
+    self._url_to_load = url_to_load
+
+  def AsDict(self):
+    return {
+        'type': 'url',
+        'canonical_url': self._canonical_url,
+        'url_to_load': self._url_to_load
+    }
+
+  def _WillProcess(self):
+    return self
+
+  def _DidProcess(self):
+    pass
+
+
+class GCSFileHandle(FileHandle):
+  def __init__(self, canonical_url, cache_directory):
+    super(GCSFileHandle, self).__init__(canonical_url)
+    file_name = canonical_url.split('/')[-1]
+    self.cache_file = os.path.join(
+        cache_directory, file_name + '.gz')
+
+  def _WillProcess(self):
+    if not os.path.exists(self.cache_file):
+      try:
+        cloud_storage.Copy(self.canonical_url, self.cache_file)
+      except cloud_storage.CloudStorageError:
+        return None
+    return URLFileHandle(self.canonical_url, 'file://' + self.cache_file)
+
+  def _DidProcess(self):
+    pass
+
+
+class InMemoryFileHandle(FileHandle):
+  def __init__(self, canonical_url, data):
+    super(InMemoryFileHandle, self).__init__(canonical_url)
+
+    self.data = data
+    self._temp_file_path = None
+
+  def _WillProcess(self):
+    temp_file = tempfile.NamedTemporaryFile(delete=False)
+    temp_file.write(self.data)
+    temp_file.close()
+    self._temp_file_path = temp_file.name
+
+    return URLFileHandle(self.canonical_url, 'file://' + self._temp_file_path)
+
+  def _DidProcess(self):
+    os.remove(self._temp_file_path)
+    self._temp_file_path = None
diff --git a/catapult/perf_insights/perf_insights/mre/job.html b/catapult/perf_insights/perf_insights/mre/job.html
new file mode 100644
index 0000000..da95b68
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/job.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/base/guid.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+
+  function Job(mapFunctionHandle, reduceFunctionHandle, opt_guid) {
+    this.mapFunctionHandle_ = mapFunctionHandle;
+    this.reduceFunctionHandle_ = reduceFunctionHandle;
+    if (opt_guid === undefined)
+      this.guid_ = tr.b.GUID.allocate();
+    else
+      this.guid_ = opt_guid;
+  }
+
+  Job.prototype = {
+    get mapFunctionHandle() { return this.mapFunctionHandle_; },
+    get reduceFunctionHandle() { return this.reduceFunctionHandle_; },
+    get guid() { return this.guid_; },
+
+    asDict: function() {
+      return {
+        map_function_handle: this.mapFunctionHandle_.asDict(),
+        reduce_function_handle: this.reduceFunctionHandle_ ?
+            this.reduceFunctionHandle_.asDict() : undefined,
+        guid: this.guid_.toString()
+      };
+    }
+  };
+
+  Job.fromDict = function(jobDict) {
+    var mapFunctionHandle = null;
+    if (jobDict.map_function_handle != null) {
+          mapFunctionHandle = pi.FunctionHandle.fromDict(
+              jobDict.map_function_handle);
+    }
+    var reduceFunctionHandle = null;
+    if (jobDict.reduce_function_handle != null) {
+        reduceFunctionHandle = pi.FunctionHandle.fromDict(
+            jobDict.reduce_function_handle);
+    }
+
+    return new Job(mapFunctionHandle, reduceFunctionHandle, jobDict.guid);
+  };
+
+  return {
+    Job: Job
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/job.py b/catapult/perf_insights/perf_insights/mre/job.py
new file mode 100644
index 0000000..9d5fd6c
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/job.py
@@ -0,0 +1,63 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import uuid
+
+import perf_insights_project
+from perf_insights import function_handle
+
+_DEFAULT_REDUCE_FILE_PATH = os.path.join(
+    perf_insights_project.PerfInsightsProject.perf_insights_src_path,
+    'reducers', 'default_reduce_function.html')
+_DEFAULT_REDUCE_FUNCTION_NAME = 'defaultReduceFunction'
+
+
+class Job(object):
+
+  def __init__(self, map_function_handle, reduce_function_handle=None,
+               guid=uuid.uuid4()):
+    assert map_function_handle is not None
+
+    self._map_function_handle = map_function_handle
+    self._reduce_function_handle = reduce_function_handle
+    if not reduce_function_handle:
+      self._reduce_function_handle = self._CreateDefaultReduceHandle()
+    self._guid = guid
+
+  @property
+  def guid(self):
+    return self._guid
+
+  @property
+  def map_function_handle(self):
+    return self._map_function_handle
+
+  @property
+  def reduce_function_handle(self):
+    return self._reduce_function_handle
+
+  def _CreateDefaultReduceHandle(self):
+    module = function_handle.ModuleToLoad(filename=_DEFAULT_REDUCE_FILE_PATH)
+    handle = function_handle.FunctionHandle(
+        modules_to_load=[module], function_name=_DEFAULT_REDUCE_FUNCTION_NAME)
+    return handle
+
+  def AsDict(self):
+    values_dict = {
+        'map_function_handle': self._map_function_handle.AsDict(),
+        'reduce_function_handle': self._reduce_function_handle.AsDict(),
+        'guid': str(self._guid)
+    }
+    return values_dict
+
+  @staticmethod
+  def FromDict(job_dict):
+    reduce_function_handle = None
+    if job_dict.has_key('reduce_function_handle'):
+      reduce_function_handle = function_handle.FunctionHandle.FromDict(
+            job_dict['reduce_function_handle'])
+
+    return Job(
+        function_handle.FunctionHandle.FromDict(
+            job_dict['map_function_handle']), reduce_function_handle)
diff --git a/catapult/perf_insights/perf_insights/mre/job_results.html b/catapult/perf_insights/perf_insights/mre/job_results.html
deleted file mode 100644
index c3e96c9..0000000
--- a/catapult/perf_insights/perf_insights/mre/job_results.html
+++ /dev/null
@@ -1,52 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/perf_insights/mre/failure.html">
-
-<script>
-'use strict';
-
-tr.exportTo('pi.mre', function() {
-  function JobResults(failures, reduceResults) {
-    if (failures === undefined)
-      failures = [];
-    if (reduceResults === undefined)
-      reduceResults = {};
-    this.failures = failures;
-    this.reduceResults = reduceResults;
-  }
-
-  JobResults.prototype = {
-    addFailure: function(failure) {
-      this.failures.push(failure);
-    },
-
-    addResult: function(key, value) {
-      if (key in this.reduceResults)
-        throw new Error('Key ' + key + 'already exists in results.');
-      this.reduceResults[key] = value;
-    },
-
-    asDict: function() {
-      return {
-        failures: this.failures.map(function(f) {return f.asDict();}),
-        reduce_results: this.reduceResults
-      };
-    }
-  };
-
-  JobResults.fromDict = function(jobResultsDict) {
-    var failures = jobResultsDict.failures.map(pi.mre.Failure.fromDict);
-    var reduceResults = jobResultsDict.reduce_results;
-    return new JobResults(failures, reduceResults);
-  };
-
-  return {
-    JobResults: JobResults
-  };
-});
-
-</script>
diff --git a/catapult/perf_insights/perf_insights/mre/job_results.py b/catapult/perf_insights/perf_insights/mre/job_results.py
deleted file mode 100644
index f7bd5da..0000000
--- a/catapult/perf_insights/perf_insights/mre/job_results.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-from perf_insights.mre import failure as failure_module
-
-
-class JobResults(object):
-
-  def __init__(self, failures=None, reduce_results=None):
-    if failures is None:
-      failures = []
-    if reduce_results is None:
-      reduce_results = {}
-    self._failures = failures
-    self._reduce_results = reduce_results
-
-  @property
-  def failures(self):
-      return self._failures
-
-  @property
-  def reduce_results(self):
-      return self._reduce_results
-
-  def AsDict(self):
-    return {
-        'failures': [failure.AsDict() for failure in self._failures],
-        'reduce_results': self.reduce_results
-    }
-
-  @staticmethod
-  def FromDict(job_results_dict):
-    failures = map(failure_module.Failure.FromDict,
-                   job_results_dict['failures'])
-    reduce_results = job_results_dict['reduce_results']
-
-    return JobResults(failures, reduce_results)
diff --git a/catapult/perf_insights/perf_insights/mre/job_results_test.html b/catapult/perf_insights/perf_insights/mre/job_results_test.html
deleted file mode 100644
index 7e7b5a2..0000000
--- a/catapult/perf_insights/perf_insights/mre/job_results_test.html
+++ /dev/null
@@ -1,49 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/perf_insights/mre/failure.html">
-<link rel="import" href="/perf_insights/mre/job_results.html">
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('jobResultsAsDictTest', function() {
-    var failure = new pi.mre.Failure('1', '2', '3', 'err', 'desc', 'stack');
-    var result = {foo: 'bar'};
-    var results = new pi.mre.JobResults([failure], result);
-
-    var resultsDict = results.asDict();
-
-    assert.equal(tr.b.dictionaryLength(resultsDict), 2);
-    assert.equal(resultsDict.failures.length, 1);
-    assert.deepEqual(resultsDict.failures[0], failure.asDict());
-    assert.deepEqual(resultsDict.reduce_results, {foo: 'bar'});
-  });
-
-  test('jobResultsFromDictTest', function() {
-    var resultsDict = {
-      failures: [{
-          job_guid: '1',
-          function_handle_guid: '2',
-          trace_guid: '3',
-          failure_type_name: 'err',
-          description: 'desc',
-          stack: 'stack'
-      }],
-      reduce_results: {foo: 'bar'}
-    };
-
-    var results = pi.mre.JobResults.fromDict(resultsDict);
-    assert.equal(results.failures.length, 1);
-    assert.instanceOf(results.failures[0], pi.mre.Failure);
-    assert.deepEqual(results.reduceResults, {foo: 'bar'});
-  });
-});
-
-</script>
diff --git a/catapult/perf_insights/perf_insights/mre/job_results_unittest.py b/catapult/perf_insights/perf_insights/mre/job_results_unittest.py
deleted file mode 100644
index 4705bed..0000000
--- a/catapult/perf_insights/perf_insights/mre/job_results_unittest.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-
-from perf_insights.mre import failure as failure_module
-from perf_insights.mre import job_results
-
-
-class JobResultsTests(unittest.TestCase):
-
-  def testJobResultsEmptyByDefault(self):
-    results = job_results.JobResults()
-    self.assertEquals(results.failures, [])
-    self.assertEquals(results.reduce_results, {})
-
-  def testAsDict(self):
-    failure = failure_module.Failure('1', '2', '3', 'err', 'desc', 'stack')
-    result = {'foo': 'bar'}
-    results = job_results.JobResults([failure], result)
-
-    results_dict = results.AsDict()
-
-    self.assertEquals(len(results_dict), 2)
-    self.assertEquals(len(results_dict['failures']), 1)
-    self.assertEquals(results_dict['reduce_results'], {'foo': 'bar'})
-
-  def testFromDict(self):
-    results_dict = {
-        'failures': [{
-            'job_guid': '1',
-            'function_handle_guid': '2',
-            'trace_guid': '3',
-            'failure_type_name': 'err',
-            'description': 'desc',
-            'stack': 'stack'
-        }],
-        'reduce_results': {'foo': 'bar'}
-    }
-
-    results = job_results.JobResults.FromDict(results_dict)
-    self.assertEquals(len(results.failures), 1)
-    self.assertIsInstance(results.failures[0], failure_module.Failure)
-    self.assertEquals(results.reduce_results, {'foo': 'bar'})
diff --git a/catapult/perf_insights/perf_insights/mre/mre_result.html b/catapult/perf_insights/perf_insights/mre/mre_result.html
new file mode 100644
index 0000000..97e46bd
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/mre_result.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/mre/failure.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+  function MreResult(failures, pairs) {
+    if (failures === undefined)
+      failures = [];
+    if (pairs === undefined)
+      pairs = {};
+    this.failures = failures;
+    this.pairs = pairs;
+  }
+
+  MreResult.prototype = {
+    addFailure: function(failure) {
+      this.failures.push(failure);
+    },
+
+    addPair: function(key, value) {
+      if (key in this.pairs)
+        throw new Error('Key ' + key + ' already exists in result.');
+      this.pairs[key] = value;
+    },
+
+    asDict: function() {
+      var d = {
+        pairs: this.pairs
+      };
+
+      if (this.failures)
+        d.failures = this.failures.map(function(f) {return f.asDict();});
+
+      return d;
+    },
+
+    hadFailures: function() {
+      return this.failures.length > 0;
+    }
+  };
+
+  MreResult.fromDict = function(resultDict) {
+    if (resultDict.failures !== undefined)
+      var failures = resultDict.failures.map(pi.mre.Failure.fromDict);
+    var pairs = resultDict.pairs;
+    return new MreResult(failures, pairs);
+  };
+
+  return {
+    MreResult: MreResult
+  };
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/mre_result.py b/catapult/perf_insights/perf_insights/mre/mre_result.py
new file mode 100644
index 0000000..81298b2
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/mre_result.py
@@ -0,0 +1,48 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from perf_insights.mre import failure as failure_module
+
+class DuplicateKeyError(Exception):
+  """Raised if an attempt is made to set a key more than once."""
+
+
+class MreResult(object):
+
+  def __init__(self, failures=None, pairs=None):
+    if failures is None:
+      failures = []
+    if pairs is None:
+      pairs = {}
+    self._failures = failures
+    self._pairs = pairs
+
+  @property
+  def failures(self):
+    return self._failures
+
+  @property
+  def pairs(self):
+    return self._pairs
+
+  def AsDict(self):
+    d = {
+        'pairs': self._pairs
+    }
+
+    if self.failures:
+      d['failures'] = [failure.AsDict() for failure in self._failures]
+
+    return d
+
+  def AddFailure(self, failure):
+    if not isinstance(failure, failure_module.Failure):
+      raise ValueError('Attempted to add %s as Failure', failure)
+
+    self._failures.append(failure)
+
+  def AddPair(self, key, value):
+    if key in self._pairs:
+      raise DuplicateKeyError('Key ' + key + 'already exists in result.')
+    self._pairs[key] = value
diff --git a/catapult/perf_insights/perf_insights/mre/mre_result_test.html b/catapult/perf_insights/perf_insights/mre/mre_result_test.html
new file mode 100644
index 0000000..0761139
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/mre_result_test.html
@@ -0,0 +1,30 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mre/failure.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('mreResultAsDictTest', function() {
+    var result = new pi.mre.MreResult();
+
+    var failure = new pi.mre.Failure('1', '2', '3', 'err', 'desc', 'stack');
+    result.addFailure(failure);
+
+    result.addPair('foo', 'bar');
+
+    var resultDict = result.asDict();
+
+    assert.deepEqual(resultDict.failures, [failure.asDict()]);
+    assert.deepEqual(resultDict.pairs, {foo: 'bar'});
+  });
+});
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/mre_result_unittest.py b/catapult/perf_insights/perf_insights/mre/mre_result_unittest.py
new file mode 100644
index 0000000..57cabac
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/mre_result_unittest.py
@@ -0,0 +1,46 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from perf_insights import function_handle
+from perf_insights import map_single_trace
+from perf_insights.mre import failure as failure_module
+from perf_insights.mre import job as job_module
+from perf_insights.mre import mre_result
+
+
+class MreResultTests(unittest.TestCase):
+
+  def testAsDict(self):
+    result = mre_result.MreResult()
+
+    with map_single_trace.TemporaryMapScript("""
+      pi.FunctionRegistry.register(
+          function MyMapFunction(result, model) {
+            var canonicalUrl = model.canonicalUrlThatCreatedThisTrace;
+            result.addPair('result', {
+                numProcesses: model.getAllProcesses().length
+              });
+          });
+      """) as map_script:
+
+      module = function_handle.ModuleToLoad(filename=map_script.filename)
+      map_handle = function_handle.FunctionHandle(
+          modules_to_load=[module], function_name='MyMapFunction')
+      job = job_module.Job(map_handle, None)
+      failure = failure_module.Failure(job, '2', '3', 'err', 'desc', 'stack')
+      result.AddFailure(failure)
+
+      result.AddPair('foo', 'bar')
+
+      result_dict = result.AsDict()
+
+      self.assertEquals(result_dict['failures'], [failure.AsDict()])
+      self.assertEquals(result_dict['pairs'], {'foo': 'bar'})
+
+  def testAddingNonFailure(self):
+    result = mre_result.MreResult()
+    with self.assertRaises(ValueError):
+      result.AddFailure('foo')
diff --git a/catapult/perf_insights/perf_insights/mre/reduce_map_results.html b/catapult/perf_insights/perf_insights/mre/reduce_map_results.html
new file mode 100644
index 0000000..d9ce464
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/reduce_map_results.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mre/failure.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+
+  function reduceMapResults(jobResults, key, mapResults, reduceFunction) {
+    try {
+      var result = reduceFunction(key, mapResults);
+      jobResults.addPair(key, result);
+    } catch (ex) {
+      ex.name = 'ReduceFunctionError';
+      throw ex;
+    }
+  }
+
+  return {
+    reduceMapResults: reduceMapResults
+  };
+});
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/reduce_map_results.py b/catapult/perf_insights/perf_insights/mre/reduce_map_results.py
new file mode 100644
index 0000000..0aad822
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/reduce_map_results.py
@@ -0,0 +1,68 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+import os
+import sys
+import re
+
+from perf_insights import map_single_trace
+from perf_insights.mre import failure
+import perf_insights_project
+import vinn
+
+_REDUCE_MAP_RESULTS_CMDLINE_PATH = os.path.join(
+  perf_insights_project.PerfInsightsProject.perf_insights_src_path,
+  'mre', 'reduce_map_results_cmdline.html')
+
+
+def ReduceMapResults(job_results, key, file_handle, job):
+  project = perf_insights_project.PerfInsightsProject()
+
+  all_source_paths = list(project.source_paths)
+  all_source_paths.append(project.perf_insights_root_path)
+
+  with file_handle.PrepareFileForProcessing() as prepared_file_handle:
+    js_args = [
+      key,
+      json.dumps(prepared_file_handle.AsDict()),
+      json.dumps(job.AsDict()),
+    ]
+
+    res = vinn.RunFile(_REDUCE_MAP_RESULTS_CMDLINE_PATH,
+                       source_paths=all_source_paths, js_args=js_args)
+
+  if res.returncode != 0:
+    try:
+      sys.stderr.write(res.stdout)
+    except Exception:
+      pass
+    job_results.AddFailure(failure.Failure(
+        job, job.map_function_handle, None, 'Error',
+        'vinn runtime error while reducing results.', 'Unknown stack'))
+    return
+
+  for line in res.stdout.split('\n'):
+    m = re.match('^JOB_(RESULTS|FAILURE): (.+)', line, re.DOTALL)
+    if m:
+      found_type = m.group(1)
+      found_dict = json.loads(m.group(2))
+      if found_type == 'FAILURE':
+        try:
+          sys.stderr.write(res.stdout)
+        except Exception:
+          pass
+        job_results.AddFailure(failure.Failure(
+            job, job.map_function_handle, None, 'Error',
+            'vinn runtime error while reducing results.', 'Unknown stack'))
+
+      elif found_type == 'RESULTS':
+        job_results.AddPair(key, found_dict[key])
+    else:
+      if len(line) > 0:
+        sys.stderr.write(line)
+        sys.stderr.write('\n')
+
+  if len(job_results.pairs) == 0 and len(job_results.failures) == 0:
+    raise map_single_trace.InternalMapError(
+        'Internal error: No results were produced!')
diff --git a/catapult/perf_insights/perf_insights/mre/reduce_map_results_cmdline.html b/catapult/perf_insights/perf_insights/mre/reduce_map_results_cmdline.html
new file mode 100644
index 0000000..f53ee6b
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/reduce_map_results_cmdline.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mre/file_handle.html">
+<link rel="import" href="/perf_insights/mre/job.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/perf_insights/mre/reduce_map_results.html">
+<link rel="import"
+      href="/perf_insights/mre/run_and_convert_errors_to_failures.html">
+<link rel="import" href="/tracing/base/xhr.html">
+<link rel="import" href="/tracing/value/numeric.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+
+  function jsonReplacer(key, value) {
+    if (value instanceof tr.v.Numeric) {
+      return value.asDict();
+    }
+    return value;
+  }
+
+  function reduceMapResultsMain(args) {
+    if (args.length !== 3)
+      throw new Error('Must provide three arguments');
+
+    var options = {
+      key: args[0],
+      fileHandle: pi.mre.FileHandle.fromDict(JSON.parse(args[1])),
+      job: pi.mre.Job.fromDict(JSON.parse(args[2]))
+    };
+
+    var mapResultsLoaded = options.fileHandle.load();
+    var mapResults = JSON.parse(mapResultsLoaded);
+
+    var jobResults = new pi.mre.MreResult();
+
+    pi.mre.runAndConvertErrorsToFailures(
+        jobResults, options.job, options.job.reduceFunctionHandle,
+        undefined,
+        function() {
+          var reduceFunction = options.job.reduceFunctionHandle.load();
+          pi.mre.reduceMapResults(jobResults, options.key, mapResults.pairs,
+                                  reduceFunction);
+        });
+
+    if (Object.keys(jobResults.pairs).length !== 0)
+      console.log('JOB_RESULTS: ' + JSON.stringify(jobResults.pairs,
+                  jsonReplacer));
+    jobResults.failures.forEach(function(failure) {
+      console.log('JOB_FAILURE: ' + JSON.stringify(failure.asDict()));
+    });
+    return 0;
+  }
+
+  return {
+    reduceMapResultsMain: reduceMapResultsMain
+  };
+});
+
+
+if (tr.isHeadless)
+  quit(pi.mre.reduceMapResultsMain(sys.argv.slice(1)));
+
+</script>
diff --git a/catapult/perf_insights/perf_insights/mre/run_and_convert_errors_to_failures.html b/catapult/perf_insights/perf_insights/mre/run_and_convert_errors_to_failures.html
new file mode 100644
index 0000000..c045c4e
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/mre/run_and_convert_errors_to_failures.html
@@ -0,0 +1,31 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mre/failure.html">
+<link rel="import" href="/tracing/base/utils.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.mre', function() {
+  function runAndConvertErrorsToFailures(results, job, functionHandle,
+                                         traceHandle, cb, opt_this) {
+    try {
+      cb.call(opt_this);
+    } catch (err) {
+      var err = tr.b.normalizeException(err);
+      results.addFailure(new pi.mre.Failure(
+          job, functionHandle, traceHandle, err.typeName,
+          err.message, err.stack));
+    }
+  }
+
+  return {
+    runAndConvertErrorsToFailures: runAndConvertErrorsToFailures
+  };
+});
+
diff --git a/catapult/perf_insights/perf_insights/mre/threaded_work_queue.py b/catapult/perf_insights/perf_insights/mre/threaded_work_queue.py
index fccd16e..3ab5633 100644
--- a/catapult/perf_insights/perf_insights/mre/threaded_work_queue.py
+++ b/catapult/perf_insights/perf_insights/mre/threaded_work_queue.py
@@ -1,15 +1,13 @@
 # Copyright 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import os
-import multiprocessing
-import sys
 import threading
-import time
 import traceback
 import Queue
 
-class ThreadedWorkQueue:
+
+class ThreadedWorkQueue(object):
+
   def __init__(self, num_threads):
     self._num_threads = num_threads
 
@@ -20,8 +18,7 @@
     self._stop = False
     self._stop_result = None
 
-    self._main_thread_tasks = Queue.Queue()
-    self._any_thread_tasks = Queue.Queue()
+    self.Reset()
 
   @property
   def is_running(self):
@@ -59,6 +56,11 @@
     self._stop = True
     return True
 
+  def Reset(self):
+    assert not self.is_running
+    self._main_thread_tasks = Queue.Queue()
+    self._any_thread_tasks = Queue.Queue()
+
   def PostMainThreadTask(self, cb, *args, **kwargs):
     def RunTask():
       cb(*args, **kwargs)
@@ -99,10 +101,10 @@
   def _RunMultiThreaded(self):
     threads = []
     for _ in range(self._num_threads):
-        t = threading.Thread(target=self._ThreadMain)
-        t.setDaemon(True)
-        t.start()
-        threads.append(t)
+      t = threading.Thread(target=self._ThreadMain)
+      t.setDaemon(True)
+      t.start()
+      threads.append(t)
 
     while True:
       if self._stop:
diff --git a/catapult/perf_insights/perf_insights/mre/threaded_work_queue_unittest.py b/catapult/perf_insights/perf_insights/mre/threaded_work_queue_unittest.py
index ce693e4..f987a3c 100644
--- a/catapult/perf_insights/perf_insights/mre/threaded_work_queue_unittest.py
+++ b/catapult/perf_insights/perf_insights/mre/threaded_work_queue_unittest.py
@@ -5,7 +5,9 @@
 
 from perf_insights.mre import threaded_work_queue
 
+
 class ThreadedWorkQueueTests(unittest.TestCase):
+
   def testSingleThreaded(self):
     wq = threaded_work_queue.ThreadedWorkQueue(num_threads=1)
     self._RunSimpleDecrementingTest(wq)
@@ -17,6 +19,7 @@
   def _RunSimpleDecrementingTest(self, wq):
 
     remaining = [10]
+
     def Decrement():
       remaining[0] -= 1
       if remaining[0]:
diff --git a/catapult/perf_insights/perf_insights/perf_insights_corpus_driver.py b/catapult/perf_insights/perf_insights/perf_insights_corpus_driver.py
index ceabff6..890641d 100644
--- a/catapult/perf_insights/perf_insights/perf_insights_corpus_driver.py
+++ b/catapult/perf_insights/perf_insights/perf_insights_corpus_driver.py
@@ -3,18 +3,18 @@
 # found in the LICENSE file.
 import json
 import os
-import tempfile
 import urllib
 import urllib2
 
 from perf_insights import corpus_driver
-from perf_insights import gcs_trace_handle
-from perf_insights.value import run_info as run_info_module
+from perf_insights.mre import file_handle
 
 
 _DEFAULT_PERF_INSIGHTS_SERVER = 'http://performance-insights.appspot.com'
 
+
 class PerfInsightsCorpusDriver(corpus_driver.CorpusDriver):
+
   def __init__(self, cache_directory, server=_DEFAULT_PERF_INSIGHTS_SERVER):
     self.directory = cache_directory
     self.server = server
@@ -49,13 +49,7 @@
     file_urls = json.loads(response.read())
 
     for file_url in file_urls:
-      run_info = run_info_module.RunInfo(
-          url=file_url,
-          display_name=file_url,
-          run_id=file_url)
-
-      th = gcs_trace_handle.GCSTraceHandle(
-          run_info, self.directory)
+      th = file_handle.GCSFileHandle(file_url, self.directory)
       trace_handles.append(th)
 
     return trace_handles
diff --git a/catapult/perf_insights/perf_insights/progress_reporter.py b/catapult/perf_insights/perf_insights/progress_reporter.py
index b13270e..b33d744 100644
--- a/catapult/perf_insights/perf_insights/progress_reporter.py
+++ b/catapult/perf_insights/perf_insights/progress_reporter.py
@@ -2,11 +2,13 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-class RunReporter(object):
-  def __init__(self, run_info):
-    self.run_info = run_info
 
-  def DidAddValue(self, value):
+class RunReporter(object):
+
+  def __init__(self, canonical_url):
+    self.canonical_url = canonical_url
+
+  def DidAddFailure(self, failure):
     pass
 
   def DidRun(self, run_failed):
@@ -16,8 +18,10 @@
 # Derived from telemetry ProgressReporter. Should stay close in architecture
 # to telemetry ProgressReporter.
 class ProgressReporter(object):
-  def WillRun(self, run_info):
-    return RunReporter(run_info)
 
-  def DidFinishAllRuns(self, results):
-    pass
\ No newline at end of file
+  def WillRun(self, canonical_url):
+    return RunReporter(canonical_url)
+
+  # TODO(eakuefner): Implement reduction, make this not take a result list.
+  def DidFinishAllRuns(self, result_list):
+    pass
diff --git a/catapult/perf_insights/perf_insights/reducers/default_reduce_function.html b/catapult/perf_insights/perf_insights/reducers/default_reduce_function.html
new file mode 100644
index 0000000..5834069
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/reducers/default_reduce_function.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.r', function() {
+
+  // TODO(eakuefner): Split weather report into discrete mappers.
+  function defaultReduceFunction(key, mapResults) {
+    return mapResults[key];
+  }
+
+  pi.FunctionRegistry.register(defaultReduceFunction);
+
+  return {
+    defaultReduceFunction: defaultReduceFunction
+  };
+});
+
diff --git a/catapult/perf_insights/perf_insights/reducers/test_reducer.html b/catapult/perf_insights/perf_insights/reducers/test_reducer.html
new file mode 100644
index 0000000..befe0b2
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/reducers/test_reducer.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.r', function() {
+
+  function testReduceFunction(key, mapResults) {
+    return {value: mapResults[key].value};
+  }
+
+  pi.FunctionRegistry.register(testReduceFunction);
+
+  return {
+    testReduceFunction: testReduceFunction
+  };
+});
+
diff --git a/catapult/perf_insights/perf_insights/reducers/v8_reduce_function.html b/catapult/perf_insights/perf_insights/reducers/v8_reduce_function.html
new file mode 100644
index 0000000..5aa3803
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/reducers/v8_reduce_function.html
@@ -0,0 +1,127 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/base/range.html">
+<link rel="import" href="/tracing/model/source_info/js_source_info.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.exportTo('pi.r', function() {
+  function ScriptCostInfo() {
+    this.threadGroup = undefined;
+    this.railTypeName = undefined;
+    this.title = undefined;
+    this.scriptURL = undefined;
+    this.scriptURLClean = undefined;
+    this.framework = undefined;
+    this.userFriendlyCategory = undefined;
+
+    this.selfTime = 0;
+    this.cpuSelfTime = 0;
+
+    this.jsTime = 0;
+    this.jsTimeByState = {};
+
+    this.traceURLs = {};
+    this.sliceCount = 1;
+
+    this.selfTimeHistogram = tr.v.Numeric.createLinear(
+         tr.v.Unit.byName.timeDurationInMs,
+         tr.b.Range.fromExplicitRange(0, 100),
+         100);
+
+    this.cpuSelfTimeHistogram = tr.v.Numeric.createLinear(
+         tr.v.Unit.byName.timeDurationInMs,
+         tr.b.Range.fromExplicitRange(0, 100),
+         100);
+  }
+
+  ScriptCostInfo.asReduceTarget = function(firstValue) {
+    var sliceCostInfo = new ScriptCostInfo();
+    sliceCostInfo.threadGroup = firstValue.threadGroup;
+    sliceCostInfo.railTypeName = firstValue.railTypeName;
+    sliceCostInfo.title = firstValue.title;
+    sliceCostInfo.scriptURL = firstValue.scriptURL;
+    sliceCostInfo.scriptURLClean = firstValue.scriptURLClean;
+    sliceCostInfo.framework = firstValue.framework;
+    sliceCostInfo.userFriendlyCategory = firstValue.userFriendlyCategory;
+    sliceCostInfo.traceURLs = {};
+    if (firstValue.traceURL !== undefined) {
+      sliceCostInfo.traceURLs[firstValue.traceURL] = true;
+    }
+    sliceCostInfo.jsTime = firstValue.jsTime || 0;
+
+    var JSSourceState = tr.model.source_info.JSSourceState;
+    for (var state in JSSourceState) {
+      if (firstValue.jsTimeByState === undefined) {
+        sliceCostInfo.jsTimeByState[JSSourceState[state]] = 0;
+      } else {
+        sliceCostInfo.jsTimeByState[JSSourceState[state]] =
+            firstValue.jsTimeByState[JSSourceState[state]] || 0;
+      }
+    }
+    return sliceCostInfo;
+  };
+
+  ScriptCostInfo.prototype = {
+    push: function(threadSlice) {
+      var JSSourceState = tr.model.source_info.JSSourceState;
+      if (threadSlice.selfTime !== undefined)
+        this.selfTime += threadSlice.selfTime;
+      if (threadSlice.cpuSelfTime !== undefined)
+        this.cpuSelfTime += threadSlice.cpuSelfTime;
+      if (threadSlice.jsTime !== undefined)
+        this.jsTime += threadSlice.jsTime;
+      if (threadSlice.jsTimeByState !== undefined) {
+        for (var state in JSSourceState) {
+          this.jsTimeByState[JSSourceState[state]] +=
+              threadSlice.jsTimeByState[JSSourceState[state]];
+        }
+      }
+
+      if (threadSlice.traceURL !== undefined &&
+          !threadSlice.traceURL in this.traceURLs) {
+        this.traceURLs[threadSlice.traceURL] = true;
+      }
+
+      var sourceInfo = {
+        traceURL: threadSlice.traceURL,
+        sourceURL: threadSlice.scriptURLClean
+      };
+
+      this.selfTimeHistogram.add(threadSlice.selfTime, sourceInfo);
+      this.cpuSelfTimeHistogram.add(threadSlice.cpuSelfTime, sourceInfo);
+      this.sliceCount += 1;
+    }
+  };
+
+  function v8ReportReduceFunction(key, mapResults) {
+    var reduceResults = {};
+    mapResults[key].forEach(function(mapResult) {
+      var reducingTarget = reduceResults[mapResult.key];
+      if (!reducingTarget) {
+        reducingTarget = ScriptCostInfo.asReduceTarget(mapResult.value);
+        reduceResults[mapResult.key] = reducingTarget;
+      }
+      reducingTarget.push(mapResult.value);
+    });
+
+    //console.log(JSON.stringify(reduceResults, undefined, 2));
+    return reduceResults;
+  };
+
+  pi.FunctionRegistry.register(v8ReportReduceFunction);
+
+  return {
+    v8ReportReduceFunction: v8ReportReduceFunction
+  };
+});
+
diff --git a/catapult/perf_insights/perf_insights/results/__init__.py b/catapult/perf_insights/perf_insights/results/__init__.py
index 61cd219..047b03c 100644
--- a/catapult/perf_insights/perf_insights/results/__init__.py
+++ b/catapult/perf_insights/perf_insights/results/__init__.py
@@ -1,64 +1,3 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
-from perf_insights import value as value_module
-
-class Results(object):
-  def __init__(self):
-    self.all_values = []
-    self._run_infos_that_have_failures = set()
-
-  @property
-  def had_failures(self):
-    return len(self._run_infos_that_have_failures) > 0
-
-  @property
-  def failure_values(self):
-    return [v for v in self.all_values
-            if isinstance(v, value_module.FailureValue)]
-
-  @property
-  def skip_values(self):
-    return [v for v in self.all_values
-            if isinstance(v, value_module.SkipValue)]
-
-  @property
-  def all_run_infos(self):
-    all_run_infos = set()
-    for value in self.all_values:
-      all_run_infos.add(value.run_info)
-    return all_run_infos
-
-  def DoesRunContainFailure(self, run_info):
-    return run_info in self._run_infos_that_have_failures
-
-  def AddValue(self, value):
-    self.all_values.append(value)
-    if isinstance(value, value_module.FailureValue):
-      self._run_infos_that_have_failures.add(value.run_info)
-
-  def Merge(self, results):
-    for value in results.all_values:
-      self.AddValue(value)
-
-  def FindValueMatching(self, predicate):
-    for v in self.all_values:
-      if predicate(v):
-        return v
-    return None
-
-  def FindValueNamed(self, name):
-    return self.FindValueMatching(lambda v: v.name == name)
-
-  def __repr__(self):
-    return 'Results(%s)' % repr(self.all_values)
-
-  def AsDict(self):
-    run_dict = dict([(run_info.run_id, run_info.AsDict()) for run_info
-                     in self.all_run_infos])
-    all_values_list = [v.AsDict() for v in self.all_values]
-    return {
-      'runs': run_dict,
-      'values': all_values_list
-    }
diff --git a/catapult/perf_insights/perf_insights/results/gtest_progress_reporter.py b/catapult/perf_insights/perf_insights/results/gtest_progress_reporter.py
index cf9d778..f6c0ede 100644
--- a/catapult/perf_insights/perf_insights/results/gtest_progress_reporter.py
+++ b/catapult/perf_insights/perf_insights/results/gtest_progress_reporter.py
@@ -5,12 +5,12 @@
 import time
 
 from perf_insights import progress_reporter
-from perf_insights import value as value_module
 
 
 class GTestRunReporter(progress_reporter.RunReporter):
-  def __init__(self, run_info, output_stream, timestamp):
-    super(GTestRunReporter, self).__init__(run_info)
+
+  def __init__(self, canonical_url, output_stream, timestamp):
+    super(GTestRunReporter, self).__init__(canonical_url)
     self._output_stream = output_stream
     self._timestamp = timestamp
 
@@ -18,23 +18,19 @@
     assert self._timestamp is not None, 'Did not call WillRun.'
     return (time.time() - self._timestamp) * 1000
 
-  def DidAddValue(self, value):
-    super(GTestRunReporter, self).DidAddValue(value)
-    if isinstance(value, value_module.FailureValue):
-      print >> self._output_stream, value.GetGTestPrintString()
-      self._output_stream.flush()
-    elif isinstance(value, value_module.SkipValue):
-      print >> self._output_stream, '===== SKIPPING TEST %s: %s =====' % (
-          value.run_info.display_name, value.description)
+  def DidAddFailure(self, failure):
+    super(GTestRunReporter, self).DidAddFailure(failure)
+    print >> self._output_stream, failure.stack
+    self._output_stream.flush()
 
   def DidRun(self, run_failed):
     super(GTestRunReporter, self).DidRun(run_failed)
     if run_failed:
       print >> self._output_stream, '[  FAILED  ] %s (%0.f ms)' % (
-          self.run_info.display_name, self._GetMs())
+          self.canonical_url, self._GetMs())
     else:
       print >> self._output_stream, '[       OK ] %s (%0.f ms)' % (
-          self.run_info.display_name, self._GetMs())
+          self.canonical_url, self._GetMs())
     self._output_stream.flush()
 
 
@@ -49,47 +45,43 @@
       print >> self._output_stream, "[ OK ] %s" % testname
   """
 
-  def __init__(self, output_stream, output_skipped_tests_summary=False):
+  def __init__(self, output_stream):
     super(GTestProgressReporter, self).__init__()
     self._output_stream = output_stream
-    self._output_skipped_tests_summary = output_skipped_tests_summary
 
-  def WillRun(self, run_info):
-    super(GTestProgressReporter, self).WillRun(run_info)
-    print >> self._output_stream, '[ RUN      ] %s' % (
-        run_info.display_name)
+  def WillRun(self, canonical_url):
+    super(GTestProgressReporter, self).WillRun(canonical_url)
+    print >> self._output_stream, '[ RUN      ] %s' % (canonical_url)
     self._output_stream.flush()
-    return GTestRunReporter(run_info, self._output_stream, time.time())
+    return GTestRunReporter(canonical_url, self._output_stream, time.time())
 
-  def DidFinishAllRuns(self, results):
-    super(GTestProgressReporter, self).DidFinishAllRuns(results)
-    successful_runs = []
-    failed_run_infos = []
-    for run_info in results.all_run_infos:
-      if results.DoesRunContainFailure(run_info):
-        failed_run_infos.append(run_info)
+  def DidFinishAllRuns(self, result_list):
+    super(GTestProgressReporter, self).DidFinishAllRuns(result_list)
+    successful_runs = 0
+    failed_canonical_urls = []
+    failed_runs = 0
+    for run in result_list:
+      if len(run.failures) != 0:
+        failed_runs += 1
+        for f in run.failures:
+          failed_canonical_urls.append(f.trace_canonical_url)
       else:
-        successful_runs.append(run_info)
+        successful_runs += 1
 
-    unit = 'test' if len(successful_runs) == 1 else 'tests'
+    unit = 'test' if successful_runs == 1 else 'tests'
     print >> self._output_stream, '[  PASSED  ] %d %s.' % (
-        (len(successful_runs), unit))
-    if len(failed_run_infos) > 0:
-      unit = 'test' if len(failed_run_infos) == 1 else 'tests'
+        (successful_runs, unit))
+    if len(failed_canonical_urls) > 0:
+      unit = 'test' if len(failed_canonical_urls) == 1 else 'tests'
       print >> self._output_stream, '[  FAILED  ] %d %s, listed below:' % (
-          (len(results.failure_values), unit))
-      for failed_run_info in failed_run_infos:
+          (failed_runs, unit))
+      for failed_canonical_url in failed_canonical_urls:
         print >> self._output_stream, '[  FAILED  ]  %s' % (
-            failed_run_info.display_name)
+            failed_canonical_url)
       print >> self._output_stream
-      count = len(failed_run_infos)
+      count = len(failed_canonical_urls)
       unit = 'TEST' if count == 1 else 'TESTS'
       print >> self._output_stream, '%d FAILED %s' % (count, unit)
     print >> self._output_stream
 
-    if self._output_skipped_tests_summary:
-      if len(results.skip_values) > 0:
-        print >> self._output_stream, 'Skipped:\n%s\n' % ('\n'.join(
-            v.run_info.display_name for v in results.skip_values))
-
     self._output_stream.flush()
diff --git a/catapult/perf_insights/perf_insights/results/json_output_formatter.py b/catapult/perf_insights/perf_insights/results/json_output_formatter.py
index 2ee4877..8d1d499 100644
--- a/catapult/perf_insights/perf_insights/results/json_output_formatter.py
+++ b/catapult/perf_insights/perf_insights/results/json_output_formatter.py
@@ -7,13 +7,14 @@
 
 
 class JSONOutputFormatter(output_formatter.OutputFormatter):
+
   def __init__(self, output_file):
     # TODO(nduca): Resolve output_file here vs output_stream in base class.
     super(JSONOutputFormatter, self).__init__(output_file)
     self.output_file = output_file
 
-  def Format(self, results):
-    d = results.AsDict()
+  def Format(self, result_list):
+    d = [result.AsDict() for result in result_list]
     json.dump(d, self.output_file, indent=2)
     if hasattr(self.output_file, 'flush'):
-      self.output_file.flush()
\ No newline at end of file
+      self.output_file.flush()
diff --git a/catapult/perf_insights/perf_insights/results/output_formatter.html b/catapult/perf_insights/perf_insights/results/output_formatter.html
deleted file mode 100644
index 2bf488f..0000000
--- a/catapult/perf_insights/perf_insights/results/output_formatter.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-
-<script>
-'use strict';
-
-tr.exportTo('pi.r', function() {
-  function OutputFormatter() {
-  }
-
-  OutputFormatter.prototype = {
-    format: function(results) {
-    }
-  };
-
-  return {
-    OutputFormatter: OutputFormatter
-  };
-});
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/results/output_formatter.py b/catapult/perf_insights/perf_insights/results/output_formatter.py
index 221c7a2..5a4be13 100644
--- a/catapult/perf_insights/perf_insights/results/output_formatter.py
+++ b/catapult/perf_insights/perf_insights/results/output_formatter.py
@@ -4,7 +4,10 @@
 
 # Derived from telemetry OutputFormatter. Should stay close in architecture
 # to telemetry OutputFormatter.
+
+
 class OutputFormatter(object):
+
   def __init__(self, output_stream):
     self._output_stream = output_stream
 
@@ -13,4 +16,4 @@
 
   @property
   def output_stream(self):
-    return self._output_stream
\ No newline at end of file
+    return self._output_stream
diff --git a/catapult/perf_insights/perf_insights/results/progress_reporter.html b/catapult/perf_insights/perf_insights/results/progress_reporter.html
deleted file mode 100644
index 5003d1b..0000000
--- a/catapult/perf_insights/perf_insights/results/progress_reporter.html
+++ /dev/null
@@ -1,35 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-
-<script>
-'use strict';
-
-tr.exportTo('pi.r', function() {
-  function ProgressReporter() {
-  }
-
-  ProgressReporter.prototype = {
-    willRun: function(runInfo) {
-    },
-
-    didAddValue: function(value) {
-    },
-
-    didRun: function(runInfo, run_failed) {
-
-    },
-
-    didFinishAllRuns: function(results) {
-    }
-  };
-
-  return {
-    ProgressReporter: ProgressReporter
-  };
-});
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/results/results.html b/catapult/perf_insights/perf_insights/results/results.html
deleted file mode 100644
index 8b1afb2..0000000
--- a/catapult/perf_insights/perf_insights/results/results.html
+++ /dev/null
@@ -1,121 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/perf_insights/value/value.html">
-
-<script>
-'use strict';
-
-tr.exportTo('pi.r', function() {
-  function Results() {
-    this.allValues = [];
-    this.runIdsThatHaveFailures_ = {};
-  }
-
-  Results.fromDict = function(dict) {
-    if (dict.runs === undefined)
-      throw new Error('Expected: runInfo');
-    if (dict.values === undefined)
-      throw new Error('Expected: runInfo');
-
-    var runInfosById = tr.b.mapItems(dict.runs, function(runId, dict) {
-      return pi.v.RunInfo.fromDict(dict);
-    });
-
-    var results = new Results();
-    dict.values.forEach(function(valueDict) {
-      var runInfo = runInfosById[valueDict.run_id];
-      if (runInfo === undefined) {
-        debugger;
-        throw new Error('runInfo not found');
-      }
-      var value = pi.v.Value.fromDict(runInfo, valueDict);
-      results.addValue(value);
-    });
-    return results;
-  }
-
-  Results.prototype = {
-    willRun: function(runInfo) {
-    },
-
-    addValue: function(value) {
-      if (value instanceof pi.v.FailureValue)
-        this.runIdsThatHaveFailures_[value.runInfo.runId] = true;
-      this.allValues.push(value);
-    },
-
-    didRun: function(runInfo) {
-    },
-
-    didFinishAllRuns: function() {
-    },
-
-    get hadFailures() {
-      return this.failureValues.length > 0;
-    },
-
-    get failureValues() {
-      return this.allValues.filter(function(x) {
-        return x instanceof pi.v.FailureValue;
-      });
-    },
-
-    get failedRunInfos() {
-      var failedRunInfos = [];
-      var hasAddedRunInfo = {};
-      this.failureValues.forEach(function(v) {
-        if (hasAddedRunInfo[v.runInfo.runId])
-          return;
-        hasAddedRunInfo[v.runInfo.runId] = true;
-        failedRunInfos.push(v.runInfo);
-      });
-      return failedRunInfos;
-    },
-
-    get allRunInfos() {
-      var allRunInfos = [];
-      var hasAddedRunInfo = {};
-      this.allValues.forEach(function(v) {
-        if (hasAddedRunInfo[v.runInfo.runId])
-          return;
-        hasAddedRunInfo[v.runInfo.runId] = true;
-        allRunInfos.push(v.runInfo);
-      });
-      return allRunInfos;
-    },
-
-    doesRunContainFailure: function(runInfo) {
-      return this.runIdsThatHaveFailures_[runInfo.runId] === true;
-    },
-
-    get allValuesFromFailureFreeRuns() {
-      return this.allValues.filter(function(x) {
-        if (this.doesRunContainFailure(x.runInfo))
-          return false;
-        return true;
-      }, this);
-    },
-
-    getValuesForRunInfo: function(runInfo) {
-      return this.allValues.filter(function(value) {
-        return value.runInfo === runInfo;
-      });
-    },
-
-    getValuesFromFailureFreeRunsNamed: function(name) {
-      return this.allValuesFromFailureFreeRuns.filter(function(value) {
-        return value.name === name;
-      });
-    }
-  };
-
-  return {
-    Results: Results
-  };
-});
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/results/results_test.html b/catapult/perf_insights/perf_insights/results/results_test.html
deleted file mode 100644
index e65975d..0000000
--- a/catapult/perf_insights/perf_insights/results/results_test.html
+++ /dev/null
@@ -1,46 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/utils.html">
-<link rel="import" href="/perf_insights/results/results.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('resultsAllSuccessful', function() {
-    var runInfo1 = new pi.v.RunInfo('file1.json');
-    var runInfo2 = new pi.v.RunInfo('file2.json');
-
-    var results = new pi.r.Results();
-    results.addValue(new pi.v.DictValue(runInfo1, 'res',
-                                        {my_key: 'my_value'}));
-    results.addValue(new pi.v.DictValue(runInfo2, 'res',
-                                        {my_key: 'my_value'}));
-    assert.isFalse(results.hadFailures);
-    assert.equal(results.allValuesFromFailureFreeRuns.length, 2);
-    assert.equal(results.getValuesFromFailureFreeRunsNamed('res').length, 2);
-  });
-
-  test('oneFailed', function() {
-    var runInfo1 = new pi.v.RunInfo('file1.json');
-    var runInfo2 = new pi.v.RunInfo('file2.json');
-
-    var results = new pi.r.Results();
-    results.addValue(new pi.v.DictValue(runInfo1, 'res',
-                                        {my_key: 'my_value'}));
-    results.addValue(new pi.v.FailureValue(runInfo2, 'res',
-                                           {description: 'blah',
-                                            stack: 'stack'}));
-    assert.isTrue(results.hadFailures);
-    assert.isTrue(results.doesRunContainFailure(runInfo2));
-    assert.equal(results.allValuesFromFailureFreeRuns.length, 1);
-    assert.equal(results.getValuesFromFailureFreeRunsNamed('res').length, 1);
-  });
-});
-
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame.html b/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame.html
deleted file mode 100644
index 21e1a4b..0000000
--- a/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame.html
+++ /dev/null
@@ -1,116 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/range.html">
-<link rel="import" href="/tracing/extras/chrome/cc/constants.html">
-<link rel="import" href="/tracing/model/slice.html">
-
-<script>
-'use strict';
-
-tr.exportTo('pi.tbm', function() {
-
-  var SEND_BEGIN_FRAME_EVENT = tr.e.cc.constants.SEND_BEGIN_FRAME_EVENT;
-  var BEGIN_MAIN_FRAME_EVENT = tr.e.cc.constants.BEGIN_MAIN_FRAME_EVENT;
-
-  /* Object with info about the triggering of a BeginMainFrame event.
-   * Do not construct this directly. Use RenderingFrame.fromEvents instead.
-   */
-  function RenderingFrame(sendBeginFrame, beginMainFrame) {
-    this.sendBeginFrame_ = sendBeginFrame;
-    this.beginMainFrame_ = beginMainFrame;
-
-    this.range_ = new tr.b.Range();
-    this.beginMainFrame_.addBoundsToRange(this.range_);
-    this.sendBeginFrame_.addBoundsToRange(this.range_);
-  }
-
-  /**
-   * Construct RenderingFrame from a list of events.
-   * Return undefined if data are missing.
-   */
-  function createRenderingFrameFromEvents(events) {
-    var allSendBeginFrameEvents = events.filter(function(e) {
-        return e.title === SEND_BEGIN_FRAME_EVENT;
-      });
-    if (allSendBeginFrameEvents.length !== 1)
-      return undefined;
-
-    var allBeginMainFrameEvents = events.filter(function(e) {
-        return e.title === BEGIN_MAIN_FRAME_EVENT;
-      });
-    if (allBeginMainFrameEvents.length === 0)
-      return undefined;
-
-    allBeginMainFrameEvents.sort(function(a, b) {
-      return a.start - b.start;
-    });
-    return new RenderingFrame(
-        allSendBeginFrameEvents[0],
-        allBeginMainFrameEvents[allBeginMainFrameEvents.length - 1]);
-  }
-
-  RenderingFrame.prototype = {
-    get range() {
-      return this.range_;
-    },
-
-    get queueDuration() {
-      return this.beginMainFrame_.start - this.sendBeginFrame_.start;
-    }
-  };
-
-  /* Returns RenderingFrames for all relevant events in the timelineRange. */
-  RenderingFrame.getFrameEventsInsideRange = function(
-      rendererProcess, timelineRange) {
-    if (!(timelineRange instanceof tr.b.Range))
-      throw new Error('timelineRange must is Range object');
-    // First filter all events from the rendererProcess and turn them into a
-    // dictonary from event ids -> events objects that are either
-    // send_begin_frame or begin_main_frame event.
-    // e.g:
-    //   {132: [send_begin_frame, begin_main_frame, begin_main_frame],
-    //    213: [begin_main_frame, send_begin_frame],
-    //    9312: [send_begin_frame, begin_main_frame]}
-    var beginFrameEventsById = {};
-    rendererProcess.iterateAllEvents(function(event) {
-        var beginFrameId;
-        if (event instanceof tr.model.Slice &&
-            (event.title === SEND_BEGIN_FRAME_EVENT ||
-             event.title === BEGIN_MAIN_FRAME_EVENT)) {
-          beginFrameId = event.args['begin_frame_id'];
-          if (beginFrameId === undefined) {
-            throw new Error(
-                'Event is missing a beginFrameId.');
-          }
-        }
-        beginFrameEventsById[beginFrameId] =
-            beginFrameEventsById[beginFrameId] || [];
-        beginFrameEventsById[beginFrameId].push(event);
-      });
-
-    // Now, create RenderingFrames for events wherever possible.
-    var frames = [];
-    for (var id in beginFrameEventsById) {
-      var events = beginFrameEventsById[id];
-      var frame = createRenderingFrameFromEvents(events);
-      if (frame === undefined)
-        continue;
-      if (frame.range.intersectsRangeInclusive(timelineRange))
-        frames.push(frame);
-      frames.sort(function(a, b) {
-        return a.range.min - b.range.min;
-      });
-    }
-
-    return frames;
-  }
-
-  return {
-    RenderingFrame: RenderingFrame
-  };
-});
-</script>
diff --git a/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame_test.html b/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame_test.html
deleted file mode 100644
index 9afb906..0000000
--- a/catapult/perf_insights/perf_insights/timeline_based_measurement/rendering_frame_test.html
+++ /dev/null
@@ -1,223 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import"
-  href="/perf_insights/timeline_based_measurement/rendering_frame.html">
-
-<link rel="import" href="/tracing/base/range.html">
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/chrome/cc/constants.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/model/model.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-
-  var ThreadSlice = tr.model.ThreadSlice;
-  var RenderingFrame = pi.tbm.RenderingFrame;
-  var SEND_BEGIN_FRAME_EVENT = tr.e.cc.constants.SEND_BEGIN_FRAME_EVENT;
-  var BEGIN_MAIN_FRAME_EVENT = tr.e.cc.constants.BEGIN_MAIN_FRAME_EVENT;
-  var Range = tr.b.Range;
-
-  function RenderingFrameTestData() {
-    this.beginFrameId_ = 0;
-    this.events_ = [];
-    this.rendererProcess_ = (new tr.Model()).getOrCreateProcess(1);
-    this.mainThread_ = this.rendererProcess_.getOrCreateThread(11);
-    this.compositorThread_ = this.rendererProcess_.getOrCreateThread(12);
-  }
-
-  RenderingFrameTestData.prototype = {
-    get events() {
-      return this.events_;
-    },
-
-    get rendererProcess() {
-      return this.rendererProcess_;
-    },
-
-    get compositorThread() {
-      return this.compositorThread_;
-    },
-
-    addSendEvent: function(opt_ts, opt_duration) {
-      if (opt_ts === undefined)
-        opt_ts = 0;
-      if (opt_duration === undefined)
-        opt_duration = 1;
-      this.beginFrameId_ += 1;
-      var event = this.createEvent_(
-          SEND_BEGIN_FRAME_EVENT, opt_ts, opt_duration);
-      this.compositorThread_.sliceGroup.pushSlice(event);
-    },
-
-    addBeginMainFrameEvent: function(opt_ts, opt_duration) {
-      if (opt_ts === undefined)
-        opt_ts = 0;
-      if (opt_duration === undefined)
-        opt_duration = 1;
-      var event = this.createEvent_(
-          BEGIN_MAIN_FRAME_EVENT, opt_ts, opt_duration);
-      this.mainThread_.sliceGroup.pushSlice(event);
-    },
-
-    updateBounds: function() {
-      this.rendererProcess_.updateBounds();
-    },
-
-    createEvent_: function(eventTitle, ts, duration) {
-      var event = new ThreadSlice('cc,benchmark', eventTitle, 0, ts, {
-        'begin_frame_id': this.beginFrameId_
-      }, duration);
-      this.events_.push(event);
-      return event;
-    }
-  };
-
-  function generateTimelineRange(opt_start, opt_end) {
-    if (opt_start === undefined)
-      opt_start = 0;
-    if (opt_end === undefined)
-      opt_end = 100;
-    var timelineRange = new tr.b.Range();
-    timelineRange.min = opt_start;
-    timelineRange.max = opt_end;
-    return timelineRange;
-  }
-
-  test('renderingFrameConstruction', function() {
-    var r = new RenderingFrameTestData();
-    r.addSendEvent(10);
-    r.addBeginMainFrameEvent(20);
-    r.updateBounds();
-    var frames = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, Range.fromExplicitRange(0, 30));
-    assert.equal(1, frames.length);
-    assert.equal(10, frames[0].queueDuration);
-  });
-
-  test('renderingFrame_missingSendBeginFrameEvents', function() {
-    var r = new RenderingFrameTestData();
-    r.addBeginMainFrameEvent(10);
-    r.updateBounds();
-    var frames = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, Range.fromExplicitRange(0, 30));
-    assert.equal(0, frames.length);
-  });
-
-  test('renderingFrame_duplicateSendBeginFrameEvents', function() {
-    var r = new RenderingFrameTestData();
-    r.addSendEvent(10);
-    r.addBeginMainFrameEvent(20);
-    var begin_frame_id = r.events[0].args['begin_frame_id'];
-    r.compositorThread.sliceGroup.pushSlice(new ThreadSlice(
-        'cc,benchmark', SEND_BEGIN_FRAME_EVENT, 0, 30,
-        {'begin_frame_id': begin_frame_id}, 0));
-    r.updateBounds();
-    var frames = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, Range.fromExplicitRange(0, 30));
-    assert.equal(0, frames.length);
-  });
-
-  test('renderingFrame_missingBeginMainFrameEvents', function() {
-    var r = new RenderingFrameTestData();
-    r.addSendEvent(10);
-    r.updateBounds();
-    var frames = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, Range.fromExplicitRange(0, 30));
-    assert.equal(0, frames.length);
-  });
-
-  test('renderingFrame_duplicateBeginMainFrameEvents', function() {
-    var r = new RenderingFrameTestData();
-    r.addSendEvent(10);
-    r.addBeginMainFrameEvent(20);
-    r.addBeginMainFrameEvent(30);
-    r.addBeginMainFrameEvent(40);
-    r.updateBounds();
-
-    var frames = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, Range.fromExplicitRange(0, 30));
-    assert.equal(1, frames.length);
-    assert.equal(30, frames[0].queueDuration);
-  });
-
-  test('renderingFrame_frameEventMissingBeginFrameId', function() {
-    var model = new tr.Model();
-    var process = model.getOrCreateProcess(1);
-    var main_thread = process.getOrCreateThread(11);
-    var model_range = {};
-
-    // Create an event without the begin_frame_id argument
-    var event = new ThreadSlice(
-        'cc,benchmark', BEGIN_MAIN_FRAME_EVENT, 0, 0.0);
-    main_thread.sliceGroup.pushSlice(event);
-    process.updateBounds();
-    try {
-      RenderingFrame.getFrameEventsInsideRange(process, model_range);
-      assert.isFalse(true, 'Exception should have been thrown');
-    } catch (err) {
-      assert.equal(true, true);
-    }
-  });
-
- /**
-  *  Test a basic sequenece, with expected frame queueing delays A and B.
-  *
-  *              |----A----|    |--B--|
-  *  Main:         [1]     [1]        [2]
-  *
-  *  Compositor: [1]            [2]
-  **/
-  test('renderingFrame_getFrameEventsInsideRange', function() {
-    var r = new RenderingFrameTestData();
-    r.addSendEvent(10);
-    r.addBeginMainFrameEvent(20);
-    r.addBeginMainFrameEvent(30);
-    r.addSendEvent(40);
-    r.addBeginMainFrameEvent(50);
-    r.updateBounds();
-
-    var timelineRange = generateTimelineRange();
-    var frameEvents = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, timelineRange);
-
-    assert.equal(2, frameEvents.length);
-    assert.equal(20, frameEvents[0].queueDuration);
-    assert.equal(10, frameEvents[1].queueDuration);
-  });
-
- /**
-  * Test a sequenece missing an initial SendBeginFrame.
-  *
-  * Only one frame should be returned, with expected frame queueing delay A.
-  *                     |--A--|
-  *    Main:  [0]  [0]        [2]
-  *
-  * Compositor:            [2]
-  **/
-  test('renderingFrame_frameEventsMissingDataNotIncluded', function() {
-    var r = new RenderingFrameTestData();
-    r.addBeginMainFrameEvent(20);
-    r.addBeginMainFrameEvent(30);
-    r.addSendEvent(40);
-    r.addBeginMainFrameEvent(50);
-    r.updateBounds();
-
-    var timelineRange = generateTimelineRange();
-    var frameEvents = RenderingFrame.getFrameEventsInsideRange(
-        r.rendererProcess, timelineRange);
-
-    assert.equal(1, frameEvents.length);
-    assert.equal(10, frameEvents[0].queueDuration);
-  });
-
-});
-</script>
diff --git a/catapult/perf_insights/perf_insights/trace_handle.py b/catapult/perf_insights/perf_insights/trace_handle.py
deleted file mode 100644
index a4abf97..0000000
--- a/catapult/perf_insights/perf_insights/trace_handle.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import uuid
-
-class TraceHandle(object):
-  def __init__(self, run_info):
-    self.run_info = run_info
-
-  def Open(self):
-    # Returns a with-able object containing a name.
-    raise NotImplementedError()
-
diff --git a/catapult/perf_insights/perf_insights/ui/corpus_drivers.html b/catapult/perf_insights/perf_insights/ui/corpus_drivers.html
new file mode 100644
index 0000000..4cad148
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/ui/corpus_drivers.html
@@ -0,0 +1,69 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/base/xhr.html">
+
+<polymer-element name="pi-driver-base">
+  <script>
+  'use strict';
+
+  Polymer();
+  </script>
+</polymer-element>
+
+<polymer-element name="pi-driver-devserver"
+                 display-name="DevServer (local)"
+                 extends="pi-driver-base">
+  <script>
+  'use strict';
+
+  Polymer({
+    runMapFunction: function(mapFunctionHandle, corpusQuery) {
+      return tr.b.postAsync(
+          '/perf_insights_examples/run_map_function?corpus_query=' +
+          encodeURIComponent(corpusQuery),
+          JSON.stringify(mapFunctionHandle.asDict()));
+    }
+  });
+  </script>
+</polymer-element>
+
+<polymer-element name="pi-driver-cloudmapper"
+                 display-name="Cloud Mapper"
+                 extends="pi-driver-base">
+  <script>
+  'use strict';
+
+  Polymer({
+    runMapFunction: function(mapFunctionHandle, corpusQuery) {
+      return tr.b.postAsync(
+          '/perf_insights_examples/run_cloud_mapper?corpus_query=' +
+          encodeURIComponent(corpusQuery),
+          JSON.stringify(mapFunctionHandle.asDict()));
+    }
+  });
+  </script>
+</polymer-element>
+
+<polymer-element name="pi-driver-localcloudmapper"
+                 display-name="Cloud Mapper (local devserver)"
+                 extends="pi-driver-base">
+  <script>
+  'use strict';
+
+  Polymer({
+    runMapFunction: function(mapFunctionHandle, corpusQuery) {
+      return tr.b.postAsync(
+          '/perf_insights_examples/run_cloud_mapper?local=true&corpus_query=' +
+          encodeURIComponent(corpusQuery),
+          JSON.stringify(mapFunctionHandle.asDict()));
+    }
+  });
+  </script>
+</polymer-element>
+
diff --git a/catapult/perf_insights/perf_insights/ui/generic_results_view.html b/catapult/perf_insights/perf_insights/ui/generic_results_view.html
index ee0f113..488dda2 100644
--- a/catapult/perf_insights/perf_insights/ui/generic_results_view.html
+++ b/catapult/perf_insights/perf_insights/ui/generic_results_view.html
@@ -4,10 +4,10 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/generic_table_view.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/tracing/value/ui/generic_table_view.html">
 
 <polymer-element name="pi-ui-generic-results-view">
   <template>
@@ -20,8 +20,8 @@
         flex: 1 1 auto;
       }
     </style>
-    <tr-ui-u-generic-table-view id="table">
-    </tr-ui-u-generic-table-view>
+    <pre id="table">
+    </pre>
   </template>
   <script>
   'use strict';
@@ -48,61 +48,12 @@
 
       var results = this.mapResults_;
       if (!results)
-        results = new tr.r.Results();
+        results = [];
 
-      var rows = [];
-      results.allRunInfos.forEach(function(runInfo) {
-        var valuesForRun = results.getValuesForRunInfo(runInfo);
-
-        var urlSpan = tr.ui.b.createSpan({textContent: runInfo.displayName});
-        var row = {
-          url: urlSpan
-        };
-
-        if (results.doesRunContainFailure(runInfo)) {
-          urlSpan.style.backgroundColor = 'red';
-          var failureValuesForRun = results.allValues.filter(function(v) {
-            if (v.runInfo !== runInfo)
-              return false;
-            if (v instanceof pi.v.FailureValue)
-              return true;
-            return false;
-          });
-
-          var failureText;
-          if (failureValuesForRun.length === 1)
-            failureText = failureValuesForRun[0].name;
-          else
-            failureText = failureValuesForRun.length + ' failures';
-
-          var tooltipText = failureValuesForRun.map(function(v) {
-            return v.stack;
-          }).join('\n\n');
-          var failuresSpan = tr.ui.b.createSpan({
-                textContent: failureText,
-                tooltip: tooltipText,
-                backgroundColor: 'red'});
-          row.failures = failuresSpan;
-        }
-        valuesForRun.forEach(function(v) {
-          if (v instanceof pi.v.FailureValue) {
-            return;
-          } else if (v instanceof pi.v.SkipValue) {
-            row[v.name] = tr.ui.b.createSpan({
-                textContent: 'Skipped',
-                tooltip: v.description,
-                backgroundColor: 'rgb(127,127,127)'});
-          } else if (v instanceof pi.v.DictValue) {
-            row[v.name] = tr.ui.b.createSpan({textContent: 'DictValue'});
-          } else {
-            throw new Error('omg');
-          }
-        });
-        rows.push(row);
+      var table = this.$.table;
+      results.forEach(function(result) {
+        table.innerHTML += JSON.stringify(result.asDict(), undefined, 2);
       });
-      table.rowHighlightStyle = tr.ui.b.TableFormat.HighlightStyle.DARK;
-      table.importantColumNames = ['url'];
-      table.items = rows;
     }
   });
   </script>
diff --git a/catapult/perf_insights/perf_insights/ui/generic_results_view_test.html b/catapult/perf_insights/perf_insights/ui/generic_results_view_test.html
index 87f16b1..35e855e 100644
--- a/catapult/perf_insights/perf_insights/ui/generic_results_view_test.html
+++ b/catapult/perf_insights/perf_insights/ui/generic_results_view_test.html
@@ -5,9 +5,9 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/perf_insights/ui/generic_results_view.html">
+<link rel="import" href="/tracing/value/value.html">
 
 <script>
 'use strict';
@@ -15,38 +15,32 @@
 tr.b.unittest.testSuite(function() {
   var test_utils = tr.c.TestUtils;
   test('basic', function() {
-    var run1 = new pi.v.RunInfo('my_test.json');
-    var run2 = new pi.v.RunInfo('test2.json');
-    var run3 = new pi.v.RunInfo('something.json');
-
-    var results = new pi.r.Results();
+    var results = new pi.mre.MreResult();
 
     // Run 1. One failure.
-    results.addValue(new pi.v.FailureValue(run1, 'SomeFailureType',
-                                           {description: 'Description',
-                                            stack: tr.b.stackTraceAsString()}));
-    results.addValue(new pi.v.SkipValue(run1, 'col2',
-                                        {description: 'WhySkipped'}));
+    results.addPair('SomeFailureType',
+                    {description: 'Description',
+                    stack: tr.b.stackTraceAsString()});
+    results.addPair('col0',
+                    {description: 'WhySkipped'});
 
 
     // Run 2. Successful but had skip.
-    results.addValue(new pi.v.SkipValue(run2, 'col1',
-                                        {description: 'WhySkipped'}));
-    results.addValue(new pi.v.DictValue(run2, 'col2',
-                                        {my_key: 'my_value'}));
+    results.addPair('col1',
+                    {description: 'WhySkipped'});
+    results.addPair('col2',
+                    {my_key: 'my_value'});
 
     // Run 3. Two failures.
-    results.addValue(new pi.v.DictValue(run3, 'col1',
-                                        {my_key: 'my_value'}));
-    results.addValue(new pi.v.FailureValue(run3, 'SomeOtherFailureType',
-                                           {description: 'Description',
-                                            stack: tr.b.stackTraceAsString()}));
-    results.addValue(new pi.v.FailureValue(run3, 'RunThreeHadABadDay',
-                                           {description: 'Description',
-                                            stack: tr.b.stackTraceAsString()}));
+    results.addPair('SomeOtherFailureType',
+                    {description: 'Description',
+                    stack: tr.b.stackTraceAsString()});
+    results.addPair('RunThreeHadABadDay',
+                    {description: 'Description',
+                    stack: tr.b.stackTraceAsString()});
 
     var grv = document.createElement('pi-ui-generic-results-view');
-    grv.mapResults = results;
+    grv.mapResults = [results];
     this.addHTMLOutput(grv);
   });
 });
diff --git a/catapult/perf_insights/perf_insights/ui/grouping_table.html b/catapult/perf_insights/perf_insights/ui/grouping_table.html
deleted file mode 100644
index 62e3c69..0000000
--- a/catapult/perf_insights/perf_insights/ui/grouping_table.html
+++ /dev/null
@@ -1,204 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/base/table.html">
-
-<polymer-element name="pi-ui-grouping-table">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #table {
-      flex: 1 1 auto;
-    }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-</polymer-element>
-<script>
-'use strict';
-
-tr.exportTo('pi.ui', function() {
-
-  function Row(title, data, groupingKeyFuncs, compareFunc) {
-    this.title = title;
-    this.data_ = data;
-    if (groupingKeyFuncs === undefined)
-      groupingKeyFuncs = [];
-    this.groupingKeyFuncs_ = groupingKeyFuncs;
-
-    this.subRowsBuilt_ = false;
-    this.subRows_ = undefined;
-  }
-
-  Row.prototype = {
-    getCurrentGroupingKeyFunc_: function() {
-      if (this.groupingKeyFuncs_.length === 0)
-        return undefined;
-      return this.groupingKeyFuncs_[0];
-    },
-
-    get data() {
-      return this.data_;
-    },
-
-    rebuildSubRowsIfNeeded_: function() {
-      if (this.subRowsBuilt_)
-        return;
-      this.subRowsBuilt_ = true;
-
-      var groupingKeyFunc = this.getCurrentGroupingKeyFunc_();
-      if (groupingKeyFunc === undefined) {
-        this.subRows_ = undefined;
-        return;
-      }
-
-      var dataByKey = {};
-      var hasValues = false;
-      this.data_.forEach(function(datum) {
-        var key = groupingKeyFunc(datum);
-        hasValues = hasValues || (key !== undefined);
-        if (dataByKey[key] === undefined)
-          dataByKey[key] = [];
-        dataByKey[key].push(datum);
-      });
-      if (!hasValues) {
-        this.subRows_ = undefined;
-        return;
-      }
-
-      this.subRows_ = [];
-      for (var key in dataByKey) {
-        var row = new Row(key,
-                       dataByKey[key],
-                       this.groupingKeyFuncs_.slice(1));
-        this.subRows_.push(row);
-      }
-    },
-
-    get isExpanded() {
-      return (this.subRows &&
-              (this.subRows.length > 0) &&
-              (this.subRows.length < 5));
-    },
-
-    get subRows() {
-      this.rebuildSubRowsIfNeeded_();
-      return this.subRows_;
-    }
-  };
-
-  Polymer('pi-ui-grouping-table', {
-    created: function() {
-      this.dataToGroup_ = undefined;
-      this.groupBy_ = undefined;
-    },
-
-    get tableColumns() {
-      return this.$.table.tableColumns;
-    },
-
-    set tableColumns(tableColumns) {
-      this.$.table.tableColumns = tableColumns;
-    },
-
-    get tableRows() {
-      return this.$.table.tableRows;
-    },
-
-    get sortColumnIndex() {
-      return this.$.table.sortColumnIndex;
-    },
-
-    set sortColumnIndex(sortColumnIndex) {
-      this.$.table.sortColumnIndex = sortColumnIndex;
-    },
-
-    get sortDescending() {
-      return this.$.table.sortDescending;
-    },
-
-    set sortDescending(sortDescending) {
-      this.$.table.sortDescending = sortDescending;
-    },
-
-    get selectionMode() {
-      return this.$.table.selectionMode;
-    },
-
-    set selectionMode(selectionMode) {
-      this.$.table.selectionMode = selectionMode;
-    },
-
-    get rowHighlightStyle() {
-      return this.$.table.rowHighlightStyle;
-    },
-
-    set rowHighlightStyle(rowHighlightStyle) {
-      this.$.table.rowHighlightStyle = rowHighlightStyle;
-    },
-
-    get cellHighlightStyle() {
-      return this.$.table.cellHighlightStyle;
-    },
-
-    set cellHighlightStyle(cellHighlightStyle) {
-      this.$.table.cellHighlightStyle = cellHighlightStyle;
-    },
-
-    get selectedColumnIndex() {
-      return this.$.table.selectedColumnIndex;
-    },
-
-    set selectedColumnIndex(selectedColumnIndex) {
-      this.$.table.selectedColumnIndex = selectedColumnIndex;
-    },
-
-    get selectedTableRow() {
-      return this.$.table.selectedTableRow;
-    },
-
-    set selectedTableRow(selectedTableRow) {
-      this.$.table.selectedTableRow = selectedTableRow;
-    },
-
-    get groupBy() {
-      return this.groupBy_;
-    },
-
-    set groupBy(groupBy) {
-      this.groupBy_ = groupBy;
-      this.updateContents_();
-    },
-
-    get dataToGroup() {
-      return this.dataToGroup_;
-    },
-
-    set dataToGroup(dataToGroup) {
-      this.dataToGroup_ = dataToGroup;
-      this.updateContents_();
-    },
-
-    rebuild: function() {
-      this.$.table.rebuild();
-    },
-
-    updateContents_: function() {
-      var groupBy = this.groupBy_ || [];
-      var dataToGroup = this.dataToGroup_ || [];
-
-      var superRow = new Row('', dataToGroup, groupBy);
-      this.$.table.tableRows = superRow.subRows || [];
-    }
-  });
-
-  return {
-  };
-});
-</script>
diff --git a/catapult/perf_insights/perf_insights/ui/map_function_side_panel.html b/catapult/perf_insights/perf_insights/ui/map_function_side_panel.html
index 6d60272..6f8ad75 100644
--- a/catapult/perf_insights/perf_insights/ui/map_function_side_panel.html
+++ b/catapult/perf_insights/perf_insights/ui/map_function_side_panel.html
@@ -5,13 +5,13 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/utils.html">
-<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/value/value.html">
 <link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/base/utils.html">
+<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
+<link rel="import" href="/tracing/value/value.html">
 
 <polymer-element name="pi-ui-map-function-side-panel"
                  extends="tr-ui-side-panel">
@@ -126,12 +126,11 @@
       var gov = document.createElement('tr-ui-a-generic-object-view');
       mapResultEl.appendChild(gov);
 
-      var results = new pi.r.Results();
-      var runInfo = new pi.v.RunInfo(document.location.toString());
+      var result = new pi.mre.MreResult();
+      var canonicalUrl = document.location.toString();
 
       try {
-        this.currentMapFunctionTypeInfo.constructor(results, runInfo,
-                                                    this.model_);
+        this.currentMapFunctionTypeInfo.constructor(result, this.model_);
       } catch (ex) {
         ex = tr.b.normalizeException(ex);
         gov.object = ex.stack;
diff --git a/catapult/perf_insights/perf_insights/ui/map_function_side_panel_test.html b/catapult/perf_insights/perf_insights/ui/map_function_side_panel_test.html
index 47be699..499cccd 100644
--- a/catapult/perf_insights/perf_insights/ui/map_function_side_panel_test.html
+++ b/catapult/perf_insights/perf_insights/ui/map_function_side_panel_test.html
@@ -5,13 +5,10 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/mappers/weather_report_map_function.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/perf_insights/ui/map_function_side_panel.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/perf_insights/ui/map_function_side_panel.html">
 
 
 <script>
diff --git a/catapult/perf_insights/perf_insights/ui/pi_app_main.html b/catapult/perf_insights/perf_insights/ui/pi_app_main.html
index d447733..2f3c7ba 100644
--- a/catapult/perf_insights/perf_insights/ui/pi_app_main.html
+++ b/catapult/perf_insights/perf_insights/ui/pi_app_main.html
@@ -4,13 +4,15 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/perf_insights/mre/job.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/perf_insights/ui/generic_results_view.html">
+<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
 <link rel="import" href="/tracing/base/xhr.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/info_bar_group.html">
 <link rel="import" href="/tracing/ui/base/polymer_utils.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
 
 <polymer-element name="pi-ui-pi-app-main">
   <template>
@@ -86,46 +88,59 @@
       var topLeftControls = this.$.top_left_controls;
       var topRightControls = this.$.top_right_controls;
 
-      var mapTracesDriverSelector = document.createElement('select');
-      mapTracesDriverSelector.classList.add('map-traces-driver-selector');
-      // This selector is actually populated in the mapTracesDrivers setter.
-      topLeftControls.appendChild(mapTracesDriverSelector);
+      var piDriverPolymerElementNames = tr.ui.b.getPolymerElementsThatSubclass(
+          'pi-driver-base');
+      var piDriverElementOptions = piDriverPolymerElementNames.map(
+          function(peTagName) {
+            var pe = tr.ui.b.getPolymerElementNamed(peTagName);
+            return {
+              label: pe.getAttribute('display-name'),
+              value: peTagName
+            };
+          });
 
-      var cannedCorpusQueries = [
+      var driverSelector = tr.ui.b.createSelector(
+          this, 'currentMapTracesDriver',
+          'pi.app_main.currentMapTracesDriver',
+          piDriverElementOptions[0].value,
+          piDriverElementOptions);
+
+      topLeftControls.appendChild(driverSelector);
+
+      var corpusSelections = [
         {
-          'label': 'All traces',
-          'value': 'True'
+          'label': 'Deep Reports',
+          'value': 'https://performance-insights.appspot.com'
         },
         {
-          'label': 'At most one trace',
-          'value': 'MAX_TRACE_HANDLES=1'
-        },
-        {
-          'label': 'At most two traces',
-          'value': 'MAX_TRACE_HANDLES=2'
-        },
-        {
-          'label': 'At most 5 traces',
-          'value': 'MAX_TRACE_HANDLES=5'
-        },
-        {
-          'label': 'At most 10 traces',
-          'value': 'MAX_TRACE_HANDLES=10'
-        },
-        {
-          'label': 'At most 50 traces',
-          'value': 'MAX_TRACE_HANDLES=50'
-        },
-        {
-          'label': 'At most 100 traces',
-          'value': 'MAX_TRACE_HANDLES=100'
+          'label': 'Bulk Reports',
+          'value': 'https://performance-insights.appspot.com'
         }
       ];
-      var corpusQuerySelector = tr.ui.b.createSelector(
+      var corpusSelector = tr.ui.b.createSelector(
+          this, 'corpus',
+          'pi.app_main.corpus',
+          corpusSelections[0].value,
+          corpusSelections);
+      topLeftControls.appendChild(corpusSelector);
+
+      var corpusQuerySelector = tr.ui.b.createTextInput(
           this, 'corpusQuery',
           'pi.app_main.corpusQuery',
-          cannedCorpusQueries[0].value,
-          cannedCorpusQueries);
+          'MAX_TRACE_HANDLES=10');
+
+      var self = this;
+      function onQuerySelectorKeypress(e) {
+        var key = e.which || e.keyCode;
+        if (key === 13) { // 13 is enter
+            self.scheduleUpdateContents_();
+            self.fire('ui-state-changed');
+        }
+      }
+
+      corpusQuerySelector.addEventListener('keypress', onQuerySelectorKeypress);
+
+      corpusQuerySelector.style.width = '350px';
       topLeftControls.appendChild(corpusQuerySelector);
 
       var piReportPolymerElementNames = tr.ui.b.getPolymerElementsThatSubclass(
@@ -144,6 +159,17 @@
           piReportElementOptions);
       topLeftControls.appendChild(reportSelector);
 
+      var self = this;
+      function onProcessButton() {
+        self.scheduleUpdateContents_();
+        self.fire('ui-state-changed');
+      }
+
+      var processButton = tr.ui.b.createButton(
+          this, 'processButton',
+          'Process!', onProcessButton);
+      topLeftControls.appendChild(processButton);
+
       var showRawResultsCheckbox = tr.ui.b.createCheckBox(
           this, 'showRawResults',
           'pi.app_main.showRawResults', false,
@@ -155,28 +181,6 @@
       return this.mapTracesDrivers_;
     },
 
-    set mapTracesDrivers(mapTracesDrivers) {
-      this.mapTracesDrivers_ = mapTracesDrivers;
-
-      var topLeftControls = this.$.top_left_controls;
-      var oldSelector = topLeftControls.querySelector(
-          '.map-traces-driver-selector');
-      var options = mapTracesDrivers.map(function(mapTracesDriver) {
-        return {
-          label: mapTracesDriver.name,
-          value: mapTracesDriver
-        };
-      });
-      var newSelector = tr.ui.b.createSelector(
-          this, 'currentMapTracesDriver',
-          'pi.app_main.currentMapTracesDriver',
-          options[0].value,
-          options);
-      newSelector.classList.add('map-traces-driver-selector');
-      topLeftControls.replaceChild(newSelector, oldSelector);
-    },
-
-
     get mappingState() {
       return {
         currentMapTracesDriver: this.currentMapTracesDriver_,
@@ -192,8 +196,6 @@
 
     set currentMapTracesDriver(currentMapTracesDriver) {
       this.currentMapTracesDriver_ = currentMapTracesDriver;
-      this.scheduleUpdateContents_();
-      this.fire('ui-state-changed');
     },
 
     get corpusQuery() {
@@ -202,8 +204,6 @@
 
     set corpusQuery(corpusQuery) {
       this.corpusQuery_ = corpusQuery;
-      this.scheduleUpdateContents_();
-      this.fire('ui-state-changed');
     },
 
     get piReportElementName() {
@@ -212,8 +212,6 @@
 
     set piReportElementName(piReportElementName) {
       this.piReportElementName_ = piReportElementName;
-      this.scheduleUpdateContents_();
-      this.fire('ui-state-changed');
     },
 
     get mapClientSide() {
@@ -222,8 +220,6 @@
 
     set mapClientSide(mapClientSide) {
       this.mapClientSide_ = mapClientSide;
-      this.scheduleUpdateContents_();
-      this.fire('ui-state-changed');
     },
 
     get showRawResults() {
@@ -232,8 +228,6 @@
 
     set showRawResults(showRawResults) {
       this.showRawResults_ = showRawResults;
-      this.scheduleUpdateContents_();
-      this.fire('ui-state-changed');
     },
 
     scheduleUpdateContents_: function() {
@@ -267,30 +261,12 @@
           infobars.addMessage('Cannot map');
           return mapResults;
         }
-        if (!mapResults.hadFailures)
+        if (!mapResults.some(function(r) { return r.hadFailures(); }))
           return mapResults;
 
-        function onTellMeMore() {
-          var dlg = new tr.ui.b.Overlay();
-          dlg.dlg = 'Results summary';
-
-          var grv = document.createElement('pi-ui-generic-results-view');
-
-          grv.mapResults = mapResults;
-          grv.style.minHeight = '500px';
-          dlg.appendChild(grv);
-          dlg.visible = true;
-        }
-
-        var numFailedRuns = mapResults.failedRunInfos.length;
         infobars.addMessage(
-            'There were ' + numFailedRuns + ' traces that did not process.',
-            [
-              {
-                buttonText: 'Tell me more...',
-                onClick: onTellMeMore
-              }
-            ]);
+            'Some traces that did not process.'
+            );
 
         return mapResults;
       }
@@ -307,25 +283,41 @@
             mappingState.corpusQuery === undefined) {
           return undefined;
         }
-
         var mapFunctionName = pe.getAttribute('map-function-name');
         var mapFunctionHref = pe.getAttribute('map-function-href');
-        var moduleToLoad = new pi.ModuleToLoad(mapFunctionHref);
-        var mapFunctionHandle = new pi.FunctionHandle([moduleToLoad],
-                                                      mapFunctionName);
+        var mapModuleToLoad = new pi.ModuleToLoad(mapFunctionHref);
+        var mapFunctionHandle = new pi.FunctionHandle(
+            [mapModuleToLoad], mapFunctionName);
+
+        var reduceFunctionName = pe.getAttribute('reduce-function-name');
+        var reduceFunctionHref = pe.getAttribute('reduce-function-href');
+        var reduceFunctionHandle;
+        if (reduceFunctionName && reduceFunctionHref) {
+          var reduceModuleToLoad = new pi.ModuleToLoad(reduceFunctionHref);
+          reduceFunctionHandle = new pi.FunctionHandle(
+              [reduceModuleToLoad], reduceFunctionName);
+        }
+
+        var job = new pi.mre.Job(mapFunctionHandle, reduceFunctionHandle);
 
         if (mappingState.mapClientSide) {
           throw new Error('Currently unsupported');
         }
-        return mappingState.currentMapTracesDriver(mapFunctionHandle,
-                                                   mappingState.corpusQuery);
+
+        var peCurrentDriver = document.querySelector(
+            mappingState.currentMapTracesDriver);
+        return peCurrentDriver.runMapFunction(job, mappingState.corpusQuery);
       });
       p = p.then(function responseToResults(responseText) {
         if (responseText === undefined)
           return undefined;
 
         var data = JSON.parse(responseText);
-        return pi.r.Results.fromDict(data);
+        if (!data) {
+          return [];
+        } else {
+          return data.map(pi.mre.MreResult.fromDict);
+        }
       });
 
       p = p.then(clearInfobarsOrUpdateWithErrors);
@@ -341,6 +333,7 @@
         } else {
           reportEl = document.createElement(mappingState.piReportElementName);
         }
+
         reportEl.mapResults = mapResults;
         reportContainer.appendChild(reportEl);
       });
diff --git a/catapult/perf_insights/perf_insights/ui/pi_app_main_test.html b/catapult/perf_insights/perf_insights/ui/pi_app_main_test.html
index e9cd869..6600147 100644
--- a/catapult/perf_insights/perf_insights/ui/pi_app_main_test.html
+++ b/catapult/perf_insights/perf_insights/ui/pi_app_main_test.html
@@ -5,12 +5,34 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/ui/corpus_drivers.html">
+<link rel="import" href="/perf_insights/ui/perf_insights_full_config.html">
 <link rel="import" href="/perf_insights/ui/pi_app_main.html">
 
-<link rel="import" href="/perf_insights/ui/perf_insights_full_config.html">
+
+<polymer-element name="pi-driver-testmapper"
+                 display-name="Test Mapper"
+                 extends="pi-driver-base">
+  <script>
+  'use strict';
+
+  Polymer({
+    runMapFunction: function(mapFunctionHandle, corpusQuery) {
+      var p = Promise.resolve();
+      p = p.then(function() {
+        var dataString = tr.b.getSync(
+        '/perf_insights/ui/reports/wr_result_view_test_data.json');
+        return dataString;
+      });
+      return p;
+
+    }
+  });
+  </script>
+</polymer-element>
+
+<pi-driver-testmapper id="pi-driver-testmapper">
+</pi-driver-testmapper>
 
 <script>
 'use strict';
@@ -20,17 +42,6 @@
   test('instantiate', function() {
     var app = document.createElement('pi-ui-pi-app-main');
 
-    function myFakeMapTracesDriver(mapFunctionName, query) {
-      var p = Promise.resolve();
-      p = p.then(function() {
-        var dataString = tr.b.getSync(
-        '/perf_insights/ui/reports/wr_result_view_test_data.json');
-        return dataString;
-      });
-      return p;
-    };
-
-    app.mapTracesDrivers = [myFakeMapTracesDriver];
     this.addHTMLOutput(app);
   });
 });
diff --git a/catapult/perf_insights/perf_insights/ui/reports/all_reports.html b/catapult/perf_insights/perf_insights/ui/reports/all_reports.html
index e26e2e1..a8fe2c5 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/all_reports.html
+++ b/catapult/perf_insights/perf_insights/ui/reports/all_reports.html
@@ -4,10 +4,12 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/perf_insights/ui/reports/weather_report.html">
-<link rel="import" href="/perf_insights/ui/reports/slice_cost_report.html">
-<link rel="import" href="/perf_insights/ui/reports/rail_score_report.html">
 <link rel="import" href="/perf_insights/ui/reports/coverage_report.html">
-<link rel="import" href="/perf_insights/ui/reports/task_info_report.html">
+<link rel="import" href="/perf_insights/ui/reports/rail_score_report.html">
+<link rel="import" href="/perf_insights/ui/reports/slice_cost_report.html">
 <link rel="import" href="/perf_insights/ui/reports/startup_report.html">
 <link rel="import" href="/perf_insights/ui/reports/stats_report.html">
+<link rel="import" href="/perf_insights/ui/reports/task_info_report.html">
+<link rel="import" href="/perf_insights/ui/reports/test_report.html">
+<link rel="import" href="/perf_insights/ui/reports/v8_report.html">
+<link rel="import" href="/perf_insights/ui/reports/weather_report.html">
diff --git a/catapult/perf_insights/perf_insights/ui/reports/coverage_report.html b/catapult/perf_insights/perf_insights/ui/reports/coverage_report.html
deleted file mode 100644
index 15e6c4a..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/coverage_report.html
+++ /dev/null
@@ -1,195 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-<link rel="import" href="/perf_insights/mappers/reduce.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
-
-<polymer-element name="pi-ui-wr-coverage-report"
-    extends="pi-ui-r-pi-report"
-    map-function-href="/perf_insights/mappers/weather_report_map_function.html"
-    map-function-name="weatherReportMapFunction">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-      top-controls {
-        display: flex;
-        flex: 0 0 auto;
-        background-color: rgb(236, 236, 236);
-        border-bottom: 1px solid #8e8e8e;
-        padding: 4px;
-      }
-      #table {
-        flex: 1 1 auto;
-      }
-    </style>
-    <top-controls>
-    </top-controls>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.mapResults_ = undefined;
-    },
-
-    get mapResults() {
-      return this.mapResults_;
-    },
-
-    set mapResults(mapResults) {
-      this.mapResults_ = mapResults;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      var table = this.$.table;
-
-      var results = this.mapResults_;
-      if (!results)
-        results = new tr.r.Results();
-
-      var columns = this.createColumns_();
-      table.tableColumns = columns;
-      table.sortColumnIndex = 2;
-      table.sortDescending = true;
-
-      var allCoverageInfo = [];
-      results.allValuesFromFailureFreeRuns.forEach(function(result) {
-        if (result.name != 'wr')
-          return;
-
-        // TODO(vmpstr): Why is there no irCoverage here?
-        if (!result.value.irCoverage) {
-          allCoverageInfo.push({
-            title: result.runInfo.displayName + ' (no coverage)',
-            coverage: {
-              associatedEventsCount: 'N/A',
-              unassociatedEventsCount: 'N/A',
-              coveredEventsCountRatio: 'N/A',
-              associatedEventsCpuTimeMs: 'N/A',
-              unassociatedEventsCpuTimeMs: 'N/A',
-              coveredEventsCpuTimeRatio: 'N/A'
-            }
-          });
-          return;
-        }
-        allCoverageInfo.push({
-          title: result.runInfo.displayName,
-          coverage: result.value.irCoverage
-        });
-      });
-
-      table.tableRows = allCoverageInfo;
-      table.rebuild();
-    },
-
-    createColumns_: function() {
-      function formatMs(value) {
-        var floatValue = parseFloat(value);
-        if (isNaN(floatValue))
-          return 'N/A';
-        var span = document.createElement('tr-ui-u-time-duration-span');
-        span.duration = floatValue;
-        return span;
-      }
-
-      function formatPercent(value) {
-        var floatValue = parseFloat(value);
-        if (isNaN(floatValue))
-          return 'N/A';
-        return tr.b.u.Units.normalizedPercentage.format(floatValue);
-      }
-
-      function formatCount(value) {
-        var intValue = parseInt(value);
-        if (isNaN(intValue))
-          return 'N/A';
-        return intValue.toLocaleString();
-      }
-
-      var columns = [
-        {
-          title: 'Title',
-          value: function(row) {
-            return row.title;
-          },
-          width: '400px',
-          cmp: function(a, b) {
-            return a.title.localeCompare(b.title);
-          }
-        },
-        {
-          title: 'Total event count',
-          value: function(row) {
-            return formatCount(row.coverage.associatedEventsCount +
-                               row.coverage.unassociatedEventsCount);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            var aTotal = a.coverage.associatedEventsCount +
-                         a.coverage.unassociatedEventsCount;
-            var bTotal = b.coverage.associatedEventsCount +
-                         b.coverage.unassociatedEventsCount;
-            return tr.b.compareNumericWithNaNs(aTotal, bTotal);
-          }
-        },
-        {
-          title: 'Associated event percentage',
-          value: function(row) {
-            return formatPercent(row.coverage.coveredEventsCountRatio);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            return tr.b.compareNumericWithNaNs(
-                a.coverage.coveredEventsCountRatio,
-                b.coverage.coveredEventsCountRatio);
-          }
-        },
-        {
-          title: 'Total event CPU time',
-          value: function(row) {
-            return formatMs(row.coverage.associatedEventsCpuTimeMs +
-                            row.coverage.unassociatedEventsCpuTimeMs);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            var aTotal = a.coverage.associatedEventsCpuTimeMs +
-                         a.coverage.unassociatedEventsCpuTimeMs;
-            var bTotal = b.coverage.associatedEventsCpuTimeMs +
-                         b.coverage.unassociatedEventsCpuTimeMs;
-            return tr.b.compareNumericWithNaNs(aTotal, bTotal);
-          }
-        },
-        {
-          title: 'Associated time percentage',
-          value: function(row) {
-            return formatPercent(row.coverage.coveredEventsCpuTimeRatio);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            return tr.b.compareNumericWithNaNs(
-                a.coverage.coveredEventsCpuTimeRatio,
-                b.coverage.coveredEventsCpuTimeRatio);
-          }
-        }
-      ];
-      return columns;
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/pi_report.html b/catapult/perf_insights/perf_insights/ui/reports/pi_report.html
index 96d5082..3e51bbd 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/pi_report.html
+++ b/catapult/perf_insights/perf_insights/ui/reports/pi_report.html
@@ -4,12 +4,10 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
 <link rel="import" href="/perf_insights/mappers/reduce.html">
 <link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/table.html">
 
 <polymer-element name="pi-ui-r-pi-report">
   <script>
@@ -17,14 +15,20 @@
 
   Polymer({
     created: function() {
+      // TODO(eakuefner): Testing on master shows that this check fails to
+      // bail out.
       var thisPolymer = Polymer.getPolymerElementNamed(this.tagName);
-      if (!thisPolymer.getAttribute('map-function-href')) {
-        throw new Error(
-            'Subclasses of pi-ui-report must have map-function-href attrs');
-      }
-      if (!thisPolymer.getAttribute('map-function-name')) {
-        throw new Error(
-            'Subclasses of pi-ui-report must have map-function-name attrs');
+      var requiredAttrs = ['map-function-href',
+                           'map-function-name',
+                           'reduce-function-href',
+                           'reduce-function-name'];
+
+      for (var attrIdx in requiredAttrs) {
+        var attr = requiredAttrs[attrIdx];
+        if (!thisPolymer.getAttribute(attr)) {
+          throw new Error(
+            'Subclasses of pi-ui-report must have ' + attr + ' attrs');
+        }
       }
     },
 
diff --git a/catapult/perf_insights/perf_insights/ui/reports/rail_score_report.html b/catapult/perf_insights/perf_insights/ui/reports/rail_score_report.html
deleted file mode 100644
index 9df1a4a..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/rail_score_report.html
+++ /dev/null
@@ -1,210 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/tracing/base/units/scalar.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/info_bar_group.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/generic_table_view.html">
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/caching_column.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-<link rel="import" href="/perf_insights/ui/grouping_table.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/trace_link_list.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
-
-<polymer-element name="pi-ui-r-rail-score-report"
-    extends="pi-ui-r-pi-report"
-    map-function-href="/perf_insights/mappers/weather_report_map_function.html"
-    map-function-name="weatherReportMapFunction">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-      #histogram {
-        flex: 1 1 auto;
-        max-width: 400px;
-      }
-      #links {
-        min-height: 200px;
-      }
-      h2 {
-        font-size: 12pt;
-      }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-
-    <h2>Score Histogram</h2>
-    <tr-ui-u-histogram-span id="histogram"></tr-ui-u-histogram-span>
-
-    <h2>Matching Traces</h2>
-    <pi-ui-trace-link-list id="links"></pi-ui-trace-link-list>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.mapResults_ = undefined;
-    },
-
-    ready: function() {
-      this.$.table.addEventListener('selection-changed', function(tableEvent) {
-        tableEvent.stopPropagation();
-        this.setHistogramBasedOnSelection_();
-      }.bind(this));
-      var histogram = this.$.histogram;
-      histogram.addEventListener('brushed-bins-changed',
-          this.onBrushedBinsChanged_.bind(this));
-    },
-
-    onBrushedBinsChanged_: function(event) {
-      event.stopPropagation();
-      this.setTraceURLsFromBins_(this.$.histogram.brushedBins);
-    },
-
-    get mapResults() {
-      return this.mapResults_;
-    },
-
-    set mapResults(mapResults) {
-      this.mapResults_ = mapResults;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      var results = this.mapResults_;
-      if (!results)
-        results = new tr.r.Results();
-
-      var table = this.$.table;
-
-      var columns = this.createColumns_();
-      table.tableColumns = columns;
-      table.sortColumnIndex = 0;
-      table.sortDescending = false;
-      table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
-
-      var railScoreHistograms = {};
-      results.allValuesFromFailureFreeRuns.map(function(v) {
-        if (!v.value.irTree)
-          return;
-        this.addToHistograms_(railScoreHistograms, v.value.irTree, v.runInfo);
-      }.bind(this));
-
-      if (!railScoreHistograms.histogram)
-        return;
-
-      var overallRow = this.convertToRowAndSubRows_(railScoreHistograms);
-      // Mark all first level subrows as using rail sorting.
-      if (overallRow.subRows) {
-        overallRow.subRows.forEach(function(row) {
-          row.isRailName = true;
-        });
-      }
-
-      overallRow.title = 'Overall RAIL score';
-      table.tableRows = [overallRow];
-      table.selectedTableRow = overallRow;
-      this.setHistogramBasedOnSelection_();
-    },
-
-    createColumns_: function() {
-      var columns = [{
-          title: 'Title',
-          value: function(row) {
-            return row.title;
-          },
-          cmp: function(a, b) {
-            if (a.isRailName && b.isRailName)
-              return tr.e.rail.railCompare(a.title, b.title);
-            return a.title.localeCompare(b.title);
-          },
-          width: '500px'
-        },
-        {
-          title: 'Avg. RAIL score',
-          textAlign: 'right',
-          value: function(row) {
-            return tr.ui.units.createScalarSpan(
-                new tr.b.u.Scalar(row.histogram.average,
-                                  tr.b.u.Units.normalizedPercentage));
-          },
-          cmp: function(a, b) {
-            return a.histogram.average - b.histogram.average;
-          }
-        }
-      ];
-      return columns;
-    },
-
-    addToHistograms_: function(histograms, irTree, sourceInfo) {
-      histograms.histogram = histograms.histogram ||
-          tr.b.u.Histogram.createLinear(
-              tr.b.u.Units.normalizedPercentage,
-              tr.b.Range.fromExplicitRange(0, 1),
-              33);
-      if (irTree.overallScore !== undefined)
-        histograms.histogram.add(irTree.overallScore, sourceInfo);
-      if (irTree.irScores) {
-        irTree.irScores.forEach(function(irScore) {
-          histograms.histogram.add(irScore, sourceInfo);
-        });
-      }
-      if (!irTree.subTypes)
-        return;
-      histograms.subTypes = histograms.subTypes || {};
-      for (var subType in irTree.subTypes) {
-        histograms.subTypes[subType] = histograms.subTypes[subType] || {};
-        this.addToHistograms_(histograms.subTypes[subType],
-            irTree.subTypes[subType], sourceInfo);
-      }
-    },
-
-    convertToRowAndSubRows_: function(histograms) {
-      var row = {
-        histogram: histograms.histogram
-      };
-      if (!histograms.subTypes)
-        return row;
-      row.isExpanded = true;
-      row.subRows = [];
-      for (var subType in histograms.subTypes) {
-        var subRow = this.convertToRowAndSubRows_(histograms.subTypes[subType]);
-        subRow.title = subType;
-        row.subRows.push(subRow);
-      }
-      return row;
-    },
-
-    setTraceURLsFromBins_: function(bins) {
-      var urlSet = [];
-      bins.forEach(function(bin) {
-        bin.sourceInfos.forEach(function(sourceInfo) {
-          urlSet[sourceInfo.url] = 1;
-        });
-      });
-      urlSet = Object.keys(urlSet);
-      urlSet.sort();
-      this.$.links.setTraceUrls(urlSet);
-    },
-
-    setHistogramBasedOnSelection_: function() {
-      this.$.histogram.histogram = this.$.table.selectedTableRow.histogram;
-      this.setTraceURLsFromBins_(this.$.histogram.histogram.allBins);
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/slice_cost_report.html b/catapult/perf_insights/perf_insights/ui/reports/slice_cost_report.html
deleted file mode 100644
index bfd796c..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/slice_cost_report.html
+++ /dev/null
@@ -1,454 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/info_bar_group.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/caching_column.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/grouping_table.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-<link rel="import" href="/perf_insights/ui/trace_link_list.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
-
-<polymer-element name="pi-ui-r-slice-cost-report"
-    extends="pi-ui-r-pi-report"
-    map-function-href="/perf_insights/mappers/weather_report_map_function.html"
-    map-function-name="weatherReportMapFunction">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-
-      top-controls {
-        display: flex;
-        flex: 0 0 auto;
-        flex-flow: wrap;
-        background-color: rgb(236, 236, 236);
-        border-bottom: 1px solid #8e8e8e;
-        padding: 4px;
-      }
-
-      content-pane {
-        min-height: 0;
-        display: flex;
-        flex-direction: row;
-      }
-
-      table-container {
-        flex: 1 1 60%;
-        display: flex;
-        overflow: auto;
-      }
-      #table {
-        flex: 1 1 60%;
-      }
-
-      right-pane {
-        border-left: 1px solid black;
-        display: flex;
-        flex-direction: column;
-        flex: 1 1 40%;
-      }
-      right-pane > * {
-        margin-bottom: 20px;
-      }
-
-      #links {
-        min-height: 0;
-        overflow: auto;
-      }
-    </style>
-    <top-controls></top-controls>
-    <content-pane>
-      <table-container>
-        <pi-ui-grouping-table id="table"></pi-ui-grouping-table>
-      </table-container>
-      <right-pane>
-        <div id="script-costs">
-          Script costs
-          <tr-ui-b-table id="script-costs-table"></tr-ui-b-table>
-        </div>
-
-        <div id="costs-histogram-container">
-          Histogram of
-          <select id="costs-histogram-cost-type"></select> values:
-          <tr-ui-u-histogram-span id="costs-histogram"></tr-ui-u-histogram-span>
-
-          Links
-          <pi-ui-trace-link-list id="links"></pi-ui-trace-link-list>
-        </div>
-      </div>
-    </content-pane>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.mapResults_ = undefined;
-    },
-
-    ready: function() {
-      this.$.table.addEventListener(
-          'selection-changed', this.onSelectionChanged_.bind(this));
-
-      var topControls = this.shadowRoot.querySelector('top-controls');
-
-      this.groupByThreadName_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByThreadName', true,
-          'Group by thread name',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByThreadName_);
-
-      this.groupByRAILTypeName_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByRAILTypeName', true,
-          'Group by RAIL Stage',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByRAILTypeName_);
-
-      this.groupByUserFriendlyCategory_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByUserFriendlyCategory', true,
-          'Group by Event Category',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByUserFriendlyCategory_);
-
-      this.groupByTitle_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByTitle', false,
-          'Group by Event Title',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByTitle_);
-
-      this.groupByDomainCategory_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByDomainCategory', true,
-          'Group by Domain Category',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByDomainCategory_);
-
-      this.groupByDomain_ = tr.ui.b.createCheckBox(
-          undefined, undefined,
-          'pi.ui.wr.weather_report.groupByDomain', true,
-          'Group by Domain',
-          this.updateContents_.bind(this));
-      topControls.appendChild(this.groupByDomain_);
-
-      this.updateRightPane_();
-      this.initCostsHistogram_();
-    },
-
-    get mapResults() {
-      return this.mapResults_;
-    },
-
-    set mapResults(mapResults) {
-      this.mapResults_ = mapResults;
-      this.updateContents_();
-    },
-
-    onSelectionChanged_: function(event) {
-      this.updateRightPane_();
-    },
-
-    updateRightPane_: function() {
-      this.updateScriptCosts_();
-      this.updateCostsHistogram_();
-      var panes = this.shadowRoot.querySelector('right-pane').children;
-      var isFirst = true;
-      for (var i = 0; i < panes.length; i++) {
-        if (getComputedStyle(panes[i]).display === 'none')
-          continue;
-        if (isFirst) {
-          panes[i].style.borderTop = '';
-          isFirst = false;
-          continue;
-        }
-        panes[i].style.borderTop = '1px solid black';
-      }
-    },
-
-    updateScriptCosts_: function() {
-      var rows = [];
-      var footerRows = [];
-
-      // Aggregate values.
-      var aggregated = new pi.m.SliceCostInfo();
-      if (this.$.table.selectedTableRow) {
-        this.$.table.selectedTableRow.data.forEach(function(datum) {
-          aggregated.push(undefined, datum.sliceCostInfo);
-        });
-      }
-      if (aggregated.jsTime === 0) {
-        this.shadowRoot.querySelector('#script-costs').style.display = 'none';
-        return;
-      }
-      this.shadowRoot.querySelector('#script-costs').style.display = '';
-
-      // Display aggregated data.
-      for (var state in tr.model.source_info.JSSourceState) {
-        var stateName = tr.model.source_info.JSSourceState[state];
-        rows.push({
-          label: stateName,
-          value: tr.ui.units.createTimeDurationSpan(
-              aggregated.jsTimeByState[stateName])
-        });
-      }
-      footerRows.push({
-        label: 'JS Time',
-        value: tr.ui.units.createTimeDurationSpan(aggregated.jsTime)
-      });
-
-      // Push to table.
-      var scriptCostsTable = this.shadowRoot.querySelector(
-          '#script-costs-table');
-      scriptCostsTable.tableColumns = [
-        {
-          title: 'Label',
-          value: function(row) { return row.label; },
-          width: '150px'
-        },
-        {
-          title: 'Value',
-          value: function(row) { return row.value; },
-          width: '100%'
-        }
-      ];
-      scriptCostsTable.showHeader = false;
-      scriptCostsTable.tableRows = rows;
-      scriptCostsTable.footerRows = footerRows;
-
-      scriptCostsTable.rebuild();
-    },
-
-    updateContents_: function() {
-      var table = this.$.table;
-
-      var results = this.mapResults_;
-      if (!results)
-        results = new tr.r.Results();
-
-      var columns = this.createColumns_();
-      table.tableColumns = columns;
-      table.sortColumnIndex = 2;
-      table.sortDescending = true;
-
-      var allSliceCosts = [];
-      results.allValuesFromFailureFreeRuns.forEach(function(result) {
-        if (result.name != 'wr')
-          return;
-
-        result.value.sliceCosts.forEach(function(item) {
-          var sliceCostInfo = pi.m.SliceCostInfo.fromDict(item);
-          allSliceCosts.push({
-            runInfo: result.runInfo,
-            sliceCostInfo: sliceCostInfo
-          });
-        });
-      });
-
-      var groupBy = [];
-      if (this.groupByThreadName_.checked) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.threadGroup;
-        });
-      }
-      if (this.groupByRAILTypeName_.checked) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.railTypeName;
-        });
-      }
-
-      if (this.groupByUserFriendlyCategory_.checked) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.userFriendlyCategory || 'other';
-        });
-      }
-
-      var didGroupByDomainCategoryOrDomain;
-      if (groupBy.length === 0) {
-        if (this.groupByDomainCategory_.checked) {
-          groupBy.push(function(datum) {
-            return datum.sliceCostInfo.domainCategory;
-          });
-          didGroupByDomainCategoryOrDomain = true;
-        }
-        if (this.groupByDomain_.checked) {
-          groupBy.push(function(datum) {
-            return datum.sliceCostInfo.domain;
-          });
-          didGroupByDomainCategoryOrDomain = true;
-        }
-      }
-
-      if (this.groupByTitle_.checked) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.title;
-        });
-      }
-
-      if (this.groupByDomainCategory_.checked &&
-          !didGroupByDomainCategoryOrDomain) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.domainCategory;
-        });
-      }
-
-      if (this.groupByDomain_.checked && !didGroupByDomainCategoryOrDomain) {
-        groupBy.push(function(datum) {
-          return datum.sliceCostInfo.domain;
-        });
-      }
-
-      table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
-      table.groupBy = groupBy;
-      table.dataToGroup = allSliceCosts;
-      table.rebuild();
-      this.updateRightPane_();
-    },
-
-    createColumns_: function() {
-      var columns = [
-        {
-          title: 'Title',
-          value: function(row) {
-            return row.title;
-          },
-          cmp: function(a, b) {
-            return a.title.localeCompare(b.title);
-          },
-          width: '500px'
-        },
-        this.createCachingColumn_('Self time (total)', function(datum) {
-          return datum.sliceCostInfo.selfTime;
-        }),
-        this.createCachingColumn_('CPU Self time (total)', function(datum) {
-          return datum.sliceCostInfo.cpuSelfTime;
-        })
-      ];
-      return columns;
-    },
-
-    createCachingColumn_(title, getDataFunction) {
-      function computeStats(sliceCostInfo) {
-        var sum = tr.b.Statistics.sum(sliceCostInfo, getDataFunction);
-        return sum === undefined ? undefined :
-            tr.ui.units.createTimeDurationSpan(sum);
-      }
-
-      var column = new pi.ui.CachingColumn(title, computeStats);
-      column.textAlign = 'right';
-      column.cmp = function(row0, row1) {
-        var value0 = column.value(row0);
-        var value1 = column.value(row1);
-        return tr.b.comparePossiblyUndefinedValues(value0, value1,
-            function(v0, v1) {
-              return v0.duration - v1.duration;
-            });
-      };
-      return column;
-    },
-
-    initCostsHistogram_: function() {
-      var histogram = this.shadowRoot.querySelector('#costs-histogram');
-      histogram.addEventListener('brushed-bins-changed',
-          this.onBrushedBinsChanged_.bind(this));
-
-      var options = [
-        {
-          label: 'Self time',
-          value: 'selfTime',
-          func: function(datum) { return datum.sliceCostInfo.selfTime; }
-        },
-        {
-          label: 'CPU self time',
-          value: 'cpuSelfTime',
-          func: function(datum) { return datum.sliceCostInfo.cpuSelfTime; }
-        },
-        {
-          label: 'JS time',
-          value: 'jsTime',
-          func: function(datum) { return datum.sliceCostInfo.jsTime; }
-        }
-      ];
-      for (var state in tr.model.source_info.JSSourceState) {
-        options.push({
-          label: 'JS time: ' + state,
-          value: 'jsTime.' + state,
-          func: function(datum) {
-            return datum.sliceCostInfo.jsTimeByState[state];
-          }
-        });
-      }
-      var oldSelector = this.shadowRoot.querySelector(
-          '#costs-histogram-cost-type');
-      var newSelector = tr.ui.b.createSelector(
-          this, 'currentSliceReportCostType',
-          'pi.app_main.currentSliceReportCostType',
-          options[0].value,
-          options);
-      newSelector.id = 'costs-histogram-cost-type';
-      oldSelector.parentElement.replaceChild(newSelector, oldSelector);
-    },
-
-    set currentSliceReportCostType(currentSliceReportCostType) {
-      this.updateCostsHistogram_();
-    },
-
-    updateCostsHistogram_: function() {
-      var container = this.shadowRoot.querySelector(
-          '#costs-histogram-container');
-
-      if (this.$.table.selectedTableRow === undefined) {
-        container.style.display = 'none';
-        return;
-      }
-
-      container.style.display = '';
-
-      var selector = this.shadowRoot.querySelector(
-          '#costs-histogram-cost-type');
-      var func = selector.selectedItem.func;
-
-      var histogram = tr.b.u.Histogram.createLinear(
-          tr.b.u.Units.timeDurationInMs,
-          tr.b.Range.fromExplicitRange(0, 100),
-          100);
-      this.$.table.selectedTableRow.data.forEach(function(datum) {
-        var value = func(datum);
-        histogram.add(value, datum.runInfo);
-      });
-
-      var histogramSpan = this.shadowRoot.querySelector('#costs-histogram');
-      histogramSpan.histogram = histogram;
-      this.onBrushedBinsChanged_();
-    },
-
-    onBrushedBinsChanged_: function() {
-      var histogramSpan = this.shadowRoot.querySelector('#costs-histogram');
-      var brushedBins = histogramSpan.brushedBins;
-      var urlSet = {};
-      brushedBins.forEach(function(bin) {
-        bin.sourceInfos.forEach(function(sourceInfo) {
-          urlSet[sourceInfo.url] = 1;
-        });
-      });
-      urlSet = Object.keys(urlSet);
-      urlSet.sort();
-      this.$.links.setTraceUrls(urlSet);
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/startup_report.html b/catapult/perf_insights/perf_insights/ui/reports/startup_report.html
index 461addf..fe01a39 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/startup_report.html
+++ b/catapult/perf_insights/perf_insights/ui/reports/startup_report.html
@@ -4,23 +4,22 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/perf_insights/ui/caching_column.html">
+<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/grouping_table.html">
 <link rel="import" href="/tracing/ui/base/info_bar_group.html">
 <link rel="import" href="/tracing/ui/base/overlay.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/caching_column.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/grouping_table.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/tracing/value/ui/histogram_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="pi-ui-r-startup-report"
     extends="pi-ui-r-pi-report"
     map-function-href="/perf_insights/mappers/startup_map_function.html"
-    map-function-name="startupMapFunction">
+    map-function-name="startupMapFunction"
   <template>
     <style>
       :host {
@@ -74,13 +73,13 @@
       <pane>
         <div>Fast startup traces (<span id="numFast"></span> traces)</div>
         <table-container>
-          <pi-ui-grouping-table id="tableFast"></pi-ui-grouping-table>
+          <tr-ui-b-grouping-table id="tableFast"></tr-ui-b-grouping-table>
         </table-container>
       </pane>
       <pane>
         <div>Slow startup traces (<span id="numSlow"></span> traces)</div>
         <table-container>
-          <pi-ui-grouping-table id="tableSlow"></pi-ui-grouping-table>
+          <tr-ui-b-grouping-table id="tableSlow"></tr-ui-b-grouping-table>
         </table-container>
       </pane>
     </content-pane>
@@ -179,22 +178,21 @@
     updateTable_: function(table, numTracesSpan, filter) {
       var results = this.mapResults_;
       if (!results)
-        results = new tr.r.Results();
+        results = [];
 
       var allSliceCosts = [];
       var numTraces = 0;
       var totalStartupDuration = 0;
-      results.allValuesFromFailureFreeRuns.forEach(function(result) {
-        if (result.name != 'sr')
-          return;
-        if (!filter(result.value))
+      results.forEach(function(result) {
+        var sr = result.pairs.sr;
+        if (sr === undefined)
           return;
         numTraces++;
-        totalStartupDuration += result.value.startupDuration;
-        result.value.sliceCosts.forEach(function(item) {
+        totalStartupDuration += sr.startupDuration;
+        sr.sliceCosts.forEach(function(item) {
           var sliceCostInfo = pi.m.SliceCostInfo.fromDict(item);
           allSliceCosts.push({
-            runInfo: result.runInfo,
+            canonicalUrl: result.pairs.canonical_url,
             sliceCostInfo: sliceCostInfo
           });
         });
@@ -247,7 +245,7 @@
           return undefined;
         mean /= totalStartupDuration;
         var span = tr.ui.units.createScalarSpan(mean);
-        span.unit = tr.b.u.Units.normalizedPercentage;
+        span.unit = tr.v.Unit.byName.normalizedPercentage;
         span.percentage = mean;
         return span;
       }
diff --git a/catapult/perf_insights/perf_insights/ui/reports/stats_report.html b/catapult/perf_insights/perf_insights/ui/reports/stats_report.html
index 6355f65..912cbd0 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/stats_report.html
+++ b/catapult/perf_insights/perf_insights/ui/reports/stats_report.html
@@ -6,7 +6,7 @@
 -->
 
 <link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
+<link rel="import" href="/tracing/value/ui/histogram_span.html">
 
 <polymer-element name="pi-ui-r-trace-stats-report"
     extends="pi-ui-r-pi-report"
@@ -24,7 +24,7 @@
     </style>
 
     <h2>Total events per second</h2>
-    <tr-ui-u-histogram-span id="seconds_histogram"></tr-ui-u-histogram-span>
+    <tr-v-ui-histogram-span id="seconds_histogram"></tr-v-ui-histogram-span>
 
     <tr-ui-b-table id="table"></tr-ui-b-table>
 
@@ -59,15 +59,16 @@
 
       var histograms;
       var total_from_categories = 0;
-      this.mapResults_.allValuesFromFailureFreeRuns.forEach(function(result) {
-        var histogram = tr.b.u.Histogram.fromDict(result.value.events_seconds);
+      this.mapResults_.forEach(function(result) {
+        var histogramDict = result.pairs.stats.events_seconds;
+        var histogram = tr.v.Histogram.fromDict(histogramDict);
         if (histograms !== undefined)
           histograms.addHistogram(histogram);
         else
           histograms = histogram;
 
-        for (var category in result.value.categories) {
-          var num = result.value.categories[category];
+        for (var category in result.pairs.categories) {
+          var num = result.pairs.categories[category];
 
           if (allCategoryStats[category] === undefined)
             allCategoryStats[category] = 0;
diff --git a/catapult/perf_insights/perf_insights/ui/reports/task_info_report.html b/catapult/perf_insights/perf_insights/ui/reports/task_info_report.html
deleted file mode 100644
index 099ba8d..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/task_info_report.html
+++ /dev/null
@@ -1,218 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/caching_column.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-<link rel="import" href="/perf_insights/ui/grouping_table.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/trace_link_list.html">
-<link rel="import" href="/perf_insights/mappers/reduce.html">
-<link rel="import" href="/perf_insights/mappers/slice_cost.html">
-
-<polymer-element name="pi-ui-r-task-info-report"
-    extends="pi-ui-r-pi-report"
-    map-function-href="/perf_insights/mappers/task_info_map_function.html"
-    map-function-name="taskInfoMapFunction">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-      top-controls {
-        display: flex;
-        flex: 0 0 auto;
-        background-color: rgb(236, 236, 236);
-        border-bottom: 1px solid #8e8e8e;
-        padding: 4px;
-      }
-      #histogram {
-        flex: 1 1 auto;
-        max-width: 400px;
-      }
-      #links {
-        min-height: 200px;
-      }
-      h2 {
-        font-size: 12pt;
-      }
-      #table {
-        flex: 1 1 auto;
-      }
-    </style>
-    <pi-ui-grouping-table id="table"></pi-ui-grouping-table>
-    <h2>Histogram</h2>
-    <tr-ui-u-histogram-span id="histogram"></tr-ui-u-histogram-span>
-    <h2>Links</h2>
-    <pi-ui-trace-link-list id="links"></pi-ui-trace-link-list>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.mapResults_ = undefined;
-    },
-
-    ready: function() {
-      this.$.table.addEventListener('selection-changed',
-          function(tableEvent) {
-        tableEvent.stopPropagation();
-        this.setHistogramBasedOnSelection_();
-      }.bind(this));
-      var histogram = this.$.histogram;
-      histogram.addEventListener('brushed-bins-changed',
-          this.onBrushedBinsChanged_.bind(this));
-    },
-
-    onBrushedBinsChanged_: function(event) {
-      event.stopPropagation();
-      this.setTraceURLsFromBins_(this.$.histogram.brushedBins);
-    },
-
-    setTraceURLsFromBins_: function(bins) {
-      var urlSet = [];
-      bins.forEach(function(bin) {
-        bin.sourceInfos.forEach(function(sourceInfo) {
-          urlSet[sourceInfo.url] = 1;
-        });
-      });
-      urlSet = Object.keys(urlSet);
-      urlSet.sort();
-      this.$.links.setTraceUrls(urlSet);
-    },
-
-    get mapResults() {
-      return this.mapResults_;
-    },
-
-    set mapResults(mapResults) {
-      this.mapResults_ = mapResults;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      var table = this.$.table;
-
-      var results = this.mapResults_;
-      if (!results)
-        results = new tr.r.Results();
-
-      var columns = this.createColumns_();
-      table.tableColumns = columns;
-      table.sortColumnIndex = 3;
-      table.sortDescending = true;
-
-      var rows = [];
-      results.allValuesFromFailureFreeRuns.forEach(function(result) {
-        for (var process in result.value) {
-          for (var thread in result.value[process]) {
-            rows.push({
-              process: process,
-              thread: thread,
-              type: result.name,
-              histogram:
-                  tr.b.u.Histogram.fromDict(result.value[process][thread])
-            });
-          }
-        }
-      });
-
-      var groupBy = [];
-      groupBy.push(function(datum) {
-        return datum.process;
-      });
-      groupBy.push(function(datum) {
-        return datum.thread;
-      });
-      table.groupBy = groupBy;
-      table.selectionMode = tr.ui.b.TableFormat.SelectionMode.CELL;
-      table.dataToGroup = rows;
-      table.selectedTableRow = table.tableRows[0];
-      table.selectedColumnIndex = 2;
-      table.rebuild();
-      this.setHistogramBasedOnSelection_();
-    },
-
-    createColumns_: function() {
-      var columns = [{
-          title: 'Title',
-          value: function(row) {
-            return row.title;
-          },
-          cmp: function(a, b) {
-            return a.title.localeCompare(b.title);
-          },
-          width: '500px'
-        },
-        this.createCachingColumn_('Avg. Time in queue', 'time_spent_in_queue'),
-        this.createCachingColumn_('Avg. Self time',
-            'time_spent_in_top_level_task'),
-        this.createCachingColumn_('Avg. CPU time',
-            'cpu_time_spent_in_top_level_task')
-      ];
-
-      return columns;
-    },
-
-    createCachingColumn_: function(title, type) {
-      function averageFromHistograms(data) {
-        var runningSum = 0;
-        var numValues = 0;
-        data.forEach(function(datum) {
-          if (datum.type !== type)
-            return;
-          runningSum += datum.histogram.runningSum;
-          numValues += datum.histogram.numValues;
-        });
-        var average = 0;
-        if (numValues !== 0)
-          average = runningSum / numValues;
-        return tr.ui.units.createTimeDurationSpan(average);
-      }
-
-      var column = new pi.ui.CachingColumn(title, averageFromHistograms);
-      column.type = type;
-      column.textAlign = 'right';
-      column.cmp = function(row0, row1) {
-        return column.value(row0).duration - column.value(row1).duration;
-      };
-      return column;
-    },
-
-    setHistogramBasedOnSelection_: function() {
-      var table = this.$.table;
-      var selectedColumn = table.selectedColumnIndex;
-      // Don't display a histogram if the user selects the title.
-      if (selectedColumn === 0)
-        return;
-
-      var desiredType = table.tableColumns[selectedColumn].type;
-      var histogram = undefined;
-      table.selectedTableRow.data.forEach(function(datum) {
-        if (datum.type !== desiredType)
-          return;
-        if (!histogram)
-          histogram = datum.histogram.clone();
-        else
-          histogram.addHistogram(datum.histogram);
-      });
-
-      this.$.histogram.histogram = histogram;
-      this.setTraceURLsFromBins_(this.$.histogram.histogram.allBins);
-    }
-
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/task_info_report_test.html b/catapult/perf_insights/perf_insights/ui/reports/task_info_report_test.html
deleted file mode 100644
index 435ab8c..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/task_info_report_test.html
+++ /dev/null
@@ -1,33 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/reports/task_info_report.html">
-
-<link rel="import" href="/tracing/base/xhr.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-  test('savedData', function() {
-    // TODO(nduca): #1219, stop reading an actual wr file as our test data and
-    // use synthetic data.
-    var dataString = tr.b.getSync(
-        '/perf_insights/ui/reports/task_info_result_view_test_data.json');
-    var results = pi.r.Results.fromDict(JSON.parse(dataString));
-
-    var view = document.createElement('pi-ui-r-task-info-report');
-    view.mapResults = results;
-    this.addHTMLOutput(view);
-  });
-});
-</script>
-
diff --git a/catapult/perf_insights/perf_insights/ui/reports/task_info_result_view_test_data.json b/catapult/perf_insights/perf_insights/ui/reports/task_info_result_view_test_data.json
index b691bde..41300bc 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/task_info_result_view_test_data.json
+++ b/catapult/perf_insights/perf_insights/ui/reports/task_info_result_view_test_data.json
@@ -1,30 +1,5 @@
-{
-  "runs": {
-    "8601fc46-415b-4939-a8c3-ad3cfee57c41": {
-      "url": "file:////Users/nduca/Local/measurmt-traces/desktop/v8-samples/trace_cnet.json.gz", 
-      "display_name": "trace_cnet.json.gz", 
-      "metadata": {
-        "tags": []
-      }, 
-      "type": "perf_insights.value.RunInfo", 
-      "run_id": "8601fc46-415b-4939-a8c3-ad3cfee57c41"
-    }, 
-    "4d6b4cd6-f0b0-4369-9e8a-ccf340ec0b98": {
-      "url": "file:////Users/nduca/Local/measurmt-traces/desktop/v8-samples/trace_aol.json.gz", 
-      "display_name": "trace_aol.json.gz", 
-      "metadata": {
-        "tags": []
-      }, 
-      "type": "perf_insights.value.RunInfo", 
-      "run_id": "4d6b4cd6-f0b0-4369-9e8a-ccf340ec0b98"
-    }
-  }, 
-  "values": [
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "time_spent_in_queue", 
-      "value": {
+[{"pairs": {
+      "time_spent_in_queue": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -3608,13 +3583,7 @@
           }
         }
       }, 
-      "run_id": "4d6b4cd6-f0b0-4369-9e8a-ccf340ec0b98"
-    }, 
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "time_spent_in_top_level_task", 
-      "value": {
+      "time_spent_in_top_level_task": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -5530,13 +5499,7 @@
           }
         }
       }, 
-      "run_id": "4d6b4cd6-f0b0-4369-9e8a-ccf340ec0b98"
-    }, 
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "cpu_time_spent_in_top_level_task", 
-      "value": {
+      "cpu_time_spent_in_top_level_task": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -7435,14 +7398,10 @@
             "unit": "ms"
           }
         }
-      }, 
-      "run_id": "4d6b4cd6-f0b0-4369-9e8a-ccf340ec0b98"
-    }, 
+      } 
+    }}, {"pairs":
     {
-      "important": false, 
-      "type": "dict", 
-      "name": "time_spent_in_queue", 
-      "value": {
+      "time_spent_in_queue": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -8990,13 +8949,7 @@
           }
         }
       }, 
-      "run_id": "8601fc46-415b-4939-a8c3-ad3cfee57c41"
-    }, 
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "time_spent_in_top_level_task", 
-      "value": {
+      "time_spent_in_top_level_task": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -10533,13 +10486,7 @@
           }
         }
       }, 
-      "run_id": "8601fc46-415b-4939-a8c3-ad3cfee57c41"
-    }, 
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "cpu_time_spent_in_top_level_task", 
-      "value": {
+      "cpu_time_spent_in_top_level_task": {
         "Other": {
           "CrGpuMain": {
             "nanSourceInfos": [], 
@@ -12074,8 +12021,7 @@
             "unit": "ms"
           }
         }
-      }, 
-      "run_id": "8601fc46-415b-4939-a8c3-ad3cfee57c41"
+      } 
     }
-  ]
-}
\ No newline at end of file
+  }
+]
diff --git a/catapult/perf_insights/perf_insights/ui/reports/test_report.html b/catapult/perf_insights/perf_insights/ui/reports/test_report.html
new file mode 100644
index 0000000..f306f43
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/ui/reports/test_report.html
@@ -0,0 +1,114 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/perf_insights/ui/caching_column.html">
+<link rel="import" href="/perf_insights/ui/generic_results_view.html">
+<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
+<link rel="import" href="/perf_insights/ui/trace_link_list.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/model/source_info/source_info.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/grouping_table.html">
+<link rel="import" href="/tracing/ui/base/info_bar_group.html">
+<link rel="import" href="/tracing/ui/base/overlay.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/ui/generic_table_view.html">
+<link rel="import" href="/tracing/value/ui/histogram_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<polymer-element name="pi-ui-r-test-report"
+    extends="pi-ui-r-pi-report"
+    map-function-href="/perf_insights/mappers/test_mapper.html"
+    map-function-name="testMapFunction"
+    reduce-function-href="/perf_insights/reducers/test_reducer.html"
+    reduce-function-name="testReduceFunction">
+  <template>
+    <style>
+      :host {
+        display: flex;
+        flex-direction: column;
+      }
+      #histogram {
+        flex: 1 1 auto;
+        max-width: 400px;
+      }
+      #links {
+        min-height: 200px;
+      }
+      h2 {
+        font-size: 12pt;
+      }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.mapResults_ = undefined;
+    },
+
+    get mapResults() {
+      return this.mapResults_;
+    },
+
+    set mapResults(mapResults) {
+      this.mapResults_ = mapResults;
+      this.updateContents_();
+    },
+
+    updateContents_: function() {
+      var results = this.mapResults_;
+      var table = this.$.table;
+      var columns = this.createColumns_();
+      table.tableColumns = columns;
+      table.sortColumnIndex = 1;
+      table.sortDescending = true;
+
+      var categoryRows = [];
+      results.forEach(function(result) {
+          categoryRows.push({
+            title: 'Foo',
+            total: result.pairs['simon'].value
+          });
+      });
+
+      table.tableRows = categoryRows;
+      table.rebuild();
+    },
+
+    createColumns_: function() {
+      var columns = [
+        {
+          title: 'Title',
+          value: function(row) {
+            return row.title;
+          },
+          textAlign: 'left',
+          width: '400px'
+        },
+        {
+          title: 'Total',
+          value: function(row) {
+            return row.total;
+          },
+          cmp: function(a, b) {
+            return tr.b.compareNumericWithNaNs(a.total, b.total);
+          },
+          textAlign: 'left',
+          width: '100px'
+        }
+
+      ];
+      return columns;
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/v8_report.html b/catapult/perf_insights/perf_insights/ui/reports/v8_report.html
new file mode 100644
index 0000000..c74ecb8
--- /dev/null
+++ b/catapult/perf_insights/perf_insights/ui/reports/v8_report.html
@@ -0,0 +1,487 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/mappers/slice_cost.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/perf_insights/ui/generic_results_view.html">
+<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/grouping_table_groupby_picker.html">
+<link rel="import" href="/tracing/ui/base/overlay.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/ui/time_duration_span.html">
+
+<polymer-element name="pi-ui-v8-report"
+    extends="pi-ui-r-pi-report"
+    map-function-href="/perf_insights/mappers/v8_map_function.html"
+    map-function-name="v8ReportMapFunction"
+    reduce-function-href="/perf_insights/reducers/v8_reduce_function.html"
+    reduce-function-name="v8ReportReduceFunction">
+  <template>
+    <style>
+      :host {
+        display: flex;
+        flex-direction: column;
+      }
+
+      top-controls {
+        display: flex;
+        flex: 0 0 auto;
+        flex-flow: wrap;
+        background-color: rgb(236, 236, 236);
+        border-bottom: 1px solid #8e8e8e;
+        padding: 4px;
+      }
+
+      traces-processed {
+        display: flex;
+        flex: 0 0 auto;
+        flex-flow: wrap;
+        background-color: rgb(236, 236, 236);
+        border-bottom: 1px solid #8e8e8e;
+        padding: 4px;
+      }
+
+      content-pane {
+        min-height: 0;
+        display: flex;
+        flex-direction: row;
+      }
+
+      table-container {
+        flex: 1 1 60%;
+        display: flex;
+      }
+      #table {
+        flex: 1 1 60%;
+      }
+
+      right-pane {
+        border-left: 1px solid black;
+        display: flex;
+        flex-direction: column;
+        flex: 1 1 40%;
+        overflow: auto;
+      }
+      right-pane > * {
+        margin-bottom: 20px;
+      }
+
+      #links {
+        min-height: 0;
+      }
+    </style>
+    <top-controls>
+      <span>Group by:</span>
+      <tr-ui-b-grouping-table-groupby-picker id="picker">
+      </tr-ui-b-grouping-table-groupby-picker>
+    </top-controls>
+    <traces-processed id="traces_processed">
+    </traces-processed>
+    <content-pane>
+      <table-container>
+        <tr-ui-b-grouping-table id="table"></tr-ui-b-grouping-table>
+      </table-container>
+      <right-pane>
+        <div id="script-costs">
+          Script costs
+          <tr-ui-b-table id="script-costs-table"></tr-ui-b-table>
+        </div>
+
+        <div id="costs-histogram-container">
+          Histogram of
+          <select id="costs-histogram-cost-type"></select> values:
+          <tr-v-ui-histogram-span id="costs-histogram"></tr-v-ui-histogram-span>
+
+          Links
+          <pi-ui-trace-link-list id="links"></pi-ui-trace-link-list>
+
+          <br>
+          Scripts
+          <pi-ui-trace-link-list id="scripts"></pi-ui-trace-link-list>
+        </div>
+      </div>
+    </content>
+  </template>
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.mapResults_ = undefined;
+    },
+
+    ready: function() {
+      this.$.table.addEventListener(
+          'selection-changed', this.onSelectionChanged_.bind(this));
+
+      var topControls = this.shadowRoot.querySelector('top-controls');
+      var picker = this.$.picker;
+      picker.settingsKey = 'v8-report-grouping-picker';
+      picker.possibleGroups = [
+        {
+          key: 'framework', label: 'Framework',
+          dataFn: function(datum) { return datum.sliceCostInfo.framework; }
+        },
+        {
+          key: 'scriptUrl', label: 'Script URL',
+          dataFn: function(datum) { return datum.sliceCostInfo.scriptURLClean; }
+        },
+        {
+          key: 'threadGroup', label: 'Thread name',
+          dataFn: function(datum) { return datum.sliceCostInfo.threadGroup; }
+        },
+        {
+          key: 'railTypeName', label: 'RAIL Stage',
+          dataFn: function(datum) { return datum.sliceCostInfo.railTypeName; }
+        },
+        {
+          key: 'userFriendlyCategory', label: 'User Friendly Category',
+          dataFn: function(datum) {
+              return datum.sliceCostInfo.userFriendlyCategory || 'other';
+          }
+        },
+        {
+          key: 'title', label: 'Title',
+          dataFn: function(datum) { return datum.sliceCostInfo.title; }
+        },
+        {
+          key: 'domain', label: 'Domain',
+          dataFn: function(datum) { return datum.sliceCostInfo.domain; }
+        },
+        {
+          key: 'domainCategory', label: 'Domain Category',
+          dataFn: function(datum) {
+            return datum.sliceCostInfo.domainCategory;
+          }
+        }
+      ];
+      picker.defaultGroupKeys = [
+          'framework', 'userFriendlyCategory', 'scriptUrl'
+      ];
+      picker.addEventListener('current-groups-changed',
+                              this.updateContents_.bind(this));
+
+      this.updateRightPane_();
+      this.initCostsHistogram_();
+    },
+
+    get mapResults() {
+      return this.mapResults_;
+    },
+
+    set mapResults(mapResults) {
+      this.mapResults_ = mapResults;
+      this.updateContents_();
+    },
+
+    onSelectionChanged_: function(event) {
+      this.updateRightPane_();
+    },
+
+    updateRightPane_: function() {
+      this.updateScriptCosts_();
+      this.updateCostsHistogram_();
+      var panes = this.shadowRoot.querySelector('right-pane').children;
+      var isFirst = true;
+      for (var i = 0; i < panes.length; i++) {
+        if (getComputedStyle(panes[i]).display === 'none')
+          continue;
+        if (isFirst) {
+          panes[i].style.borderTop = '';
+          isFirst = false;
+          continue;
+        }
+        panes[i].style.borderTop = '1px solid black';
+      }
+    },
+
+    updateScriptCosts_: function() {
+      var rows = [];
+      var footerRows = [];
+
+      // Aggregate values.
+      var aggregated = new pi.m.SliceCostInfo();
+      if (this.$.table.selectedTableRow) {
+        this.$.table.selectedTableRow.data.forEach(function(datum) {
+          aggregated.push(undefined, datum.sliceCostInfo);
+        });
+      }
+      if (aggregated.jsTime === 0) {
+        this.shadowRoot.querySelector('#script-costs').style.display = 'none';
+        return;
+      }
+      this.shadowRoot.querySelector('#script-costs').style.display = '';
+
+      // Display aggregated data.
+      for (var state in tr.model.source_info.JSSourceState) {
+        var stateName = tr.model.source_info.JSSourceState[state];
+        rows.push({
+          label: stateName,
+          value: tr.v.ui.createTimeDurationSpan(
+              aggregated.jsTimeByState[stateName])
+        });
+      }
+      footerRows.push({
+        label: 'JS Time',
+        value: tr.v.ui.createTimeDurationSpan(aggregated.jsTime)
+      });
+
+      // Push to table.
+      var scriptCostsTable = this.shadowRoot.querySelector(
+          '#script-costs-table');
+      scriptCostsTable.tableColumns = [
+        {
+          title: 'Label',
+          value: function(row) { return row.label; },
+          width: '150px'
+        },
+        {
+          title: 'Value',
+          value: function(row) { return row.value; },
+          width: '100%'
+        }
+      ];
+      scriptCostsTable.showHeader = false;
+      scriptCostsTable.tableRows = rows;
+      scriptCostsTable.footerRows = footerRows;
+
+      scriptCostsTable.rebuild();
+    },
+
+    updateContents_: function() {
+      var table = this.$.table;
+
+      var results = this.mapResults_;
+      if (!results)
+        results = new pi.mre.MapResults();
+
+      var allSliceCosts = [];
+      var allTraceUrls = {};
+      results.forEach(function(result) {
+        tr.b.iterItems(result.pairs, function(name, sliceCosts) {
+          if (name != 'wr')
+            return;
+
+          tr.b.iterItems(sliceCosts, function(item_key, item) {
+            allSliceCosts.push({
+              sliceCostInfo: item
+            });
+            var keys = Object.keys(item.traceURLs);
+            keys.forEach(function(traceURL) {
+              allTraceUrls[traceURL] = true;
+            });
+          });
+        });
+      });
+
+      allTraceUrls = Object.keys(allTraceUrls);
+      this.$.traces_processed.innerText = 'Traces Processed: ' +
+                                          allTraceUrls.length;
+
+      table.rowStatsConstructor = this.createRowStatsConstructor_();
+
+      var columns = this.createColumns_();
+      table.tableColumns = columns;
+      table.sortColumnIndex = 2;
+      table.sortDescending = true;
+      table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
+      table.groupBy = this.$.picker.currentGroups.map(function(group) {
+        return group.dataFn;
+      });
+      table.dataToGroup = allSliceCosts;
+      table.rebuild();
+      this.updateRightPane_();
+    },
+
+    createRowStatsConstructor_: function() {
+      return function SliceCostRowStats(row) {
+        var stats = new pi.m.SliceCostInfo();
+        row.data.forEach(function(datum) {
+          stats.push(undefined, datum.sliceCostInfo);
+        });
+        return stats;
+      }
+    },
+
+    createColumns_: function() {
+      var columns = [
+        {
+          title: 'Title',
+          value: function(row) {
+            return row.title;
+          },
+          cmp: function(a, b) {
+            return a.title.localeCompare(b.title);
+          },
+          width: '500px'
+        },
+        {
+          title: 'Self time (total)',
+          textAlign: 'right',
+          value: function(row) {
+            var value = new tr.v.ScalarNumeric(
+                tr.v.Unit.byName.timeDurationInMs,
+                row.rowStats.selfTime);
+            var spanEl = tr.v.ui.createScalarSpan(value);
+            return spanEl;
+          },
+          cmp: function(a, b) {
+            return tr.b.comparePossiblyUndefinedValues(a, b,
+                function(a, b) {
+                  return a.rowStats.selfTime - b.rowStats.selfTime;
+                });
+          }
+        },
+        {
+          title: 'CPU Self time (total)',
+          textAlign: 'right',
+          value: function(row) {
+            var value = new tr.v.ScalarNumeric(
+                tr.v.Unit.byName.timeDurationInMs,
+                row.rowStats.cpuSelfTime);
+            var spanEl = tr.v.ui.createScalarSpan(value);
+            return spanEl;
+          },
+          cmp: function(a, b) {
+            return tr.b.comparePossiblyUndefinedValues(a, b,
+                function(a, b) {
+                  return a.rowStats.cpuSelfTime - b.rowStats.cpuSelfTime;
+                });
+          }
+        }
+      ];
+      return columns;
+    },
+
+    initCostsHistogram_: function() {
+      var histogram = this.shadowRoot.querySelector('#costs-histogram');
+      histogram.addEventListener('brushed-bins-changed',
+          this.onBrushedBinsChanged_.bind(this));
+
+      var options = [
+        {
+          label: 'Self time',
+          value: 'selfTime',
+          func: function(datum) {
+            return datum.sliceCostInfo.selfTimeHistogram;
+          }
+        },
+        {
+          label: 'CPU self time',
+          value: 'cpuSelfTime',
+          func: function(datum) {
+            return datum.sliceCostInfo.cpuSelfTimeHistogram;
+          }
+        },
+        {
+          label: 'JS time',
+          value: 'jsTime',
+          func: function(datum) { return datum.sliceCostInfo.jsTime; }
+        }
+      ];
+      for (var state in tr.model.source_info.JSSourceState) {
+        options.push({
+          label: 'JS time: ' + state,
+          value: 'jsTime.' + state,
+          func: function(datum) {
+            return datum.sliceCostInfo.jsTimeByState[state];
+          }
+        });
+      }
+      var oldSelector = this.shadowRoot.querySelector(
+          '#costs-histogram-cost-type');
+      var newSelector = tr.ui.b.createSelector(
+          this, 'currentSliceReportCostType',
+          'pi.app_main.currentSliceReportCostType',
+          options[0].value,
+          options);
+      newSelector.id = 'costs-histogram-cost-type';
+      oldSelector.parentElement.replaceChild(newSelector, oldSelector);
+    },
+
+    set currentSliceReportCostType(currentSliceReportCostType) {
+      this.updateCostsHistogram_();
+    },
+
+    updateCostsHistogram_: function() {
+      var container = this.shadowRoot.querySelector(
+          '#costs-histogram-container');
+
+      if (this.$.table.selectedTableRow === undefined) {
+        container.style.display = 'none';
+        return;
+      }
+
+      container.style.display = '';
+
+      var selector = this.shadowRoot.querySelector(
+          '#costs-histogram-cost-type');
+      var func = selector.selectedItem.func;
+
+      var histogram = tr.v.Histogram.createLinear(
+          tr.v.Unit.byName.timeDurationInMs,
+          tr.b.Range.fromExplicitRange(0, 100),
+          100);
+      this.$.table.selectedTableRow.data.forEach(function(datum) {
+        var value = func(datum);
+        histogram.addHistogram(tr.v.Histogram.fromDict(value));
+      });
+
+      var histogramSpan = this.shadowRoot.querySelector('#costs-histogram');
+      histogramSpan.isYLogScale = true;
+      histogramSpan.histogram = histogram;
+      this.onBrushedBinsChanged_();
+    },
+
+    onBrushedBinsChanged_: function() {
+      var histogramSpan = this.shadowRoot.querySelector('#costs-histogram');
+      var brushedBins = histogramSpan.brushedBins;
+      var urlSet = {};
+      var mapper_name = document.body.querySelector(
+          'pi-ui-pi-app-main').currentMapTracesDriver.toLowerCase();
+      brushedBins.forEach(function(bin) {
+        bin.sourceInfos.forEach(function(sourceInfo) {
+          var modified_url = sourceInfo.traceURL;
+          if (mapper_name.indexOf('cloud') != -1) {
+            // Because the cloud mapper currently downloads all the traces, the
+            // urls are incorrect.
+            var actual_url = sourceInfo.traceURL.split('/');
+            actual_url = actual_url[actual_url.length - 1];
+            actual_url = 'gs://performance-insights/' + actual_url;
+            modified_url = actual_url;
+          }
+
+          if (!(modified_url in urlSet)) {
+            urlSet[modified_url] = {};
+          }
+          urlSet[modified_url][sourceInfo.sourceURL] = 1;
+        });
+      });
+      var keys = Object.keys(urlSet);
+      keys.sort();
+      this.$.links.setTraceUrls(keys);
+
+      var namesAndUrls = [];
+      for (var traceUrl in urlSet) {
+        var uniqueScripts = Object.keys(urlSet[traceUrl]);
+        uniqueScripts.sort();
+        for (var i in uniqueScripts) {
+          var scriptName = uniqueScripts[i];
+          if (scriptName.length > 64) {
+            scriptName = scriptName.substring(0, 64) + '...';
+          }
+          namesAndUrls.push({
+            url: traceUrl,
+            name: scriptName
+          });
+        }
+      }
+      this.$.scripts.setTraceUrlsAndNames(namesAndUrls);
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/weather_report.html b/catapult/perf_insights/perf_insights/ui/reports/weather_report.html
deleted file mode 100644
index 994cc57..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/weather_report.html
+++ /dev/null
@@ -1,85 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
-<link rel="import" href="/tracing/ui/analysis/tab_view.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/reports/pi_report.html">
-<link rel="import" href="/perf_insights/ui/reports/rail_score_report.html">
-<link rel="import" href="/perf_insights/ui/reports/coverage_report.html">
-<link rel="import" href="/perf_insights/ui/reports/slice_cost_report.html">
-<link rel="import" href="/perf_insights/ui/grouping_table.html">
-<link rel="import" href="/perf_insights/ui/generic_results_view.html">
-
-<polymer-element name="pi-ui-r-weather-report"
-    extends="pi-ui-r-pi-report"
-    map-function-href="/perf_insights/mappers/weather_report_map_function.html"
-    map-function-name="weatherReportMapFunction">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-
-      tr-ui-a-tab-view {
-        flex: 1 1 auto;
-        min-height: 0;
-      }
-    </style>
-    <tr-ui-a-tab-view>
-      <div tab-label="Slice costs" selected>
-        <pi-ui-r-slice-cost-report class="sub-report">
-        </pi-ui-r-slice-cost-report>
-      </div>
-
-      <div tab-label="RAIL">
-        <pi-ui-r-rail-score-report class="sub-report">
-        </pi-ui-r-rail-score-report>
-      </div>
-
-      <div tab-label="Coverage">
-        <pi-ui-wr-coverage-report class="sub-report">
-        </pi-ui-wr-coverage-report>
-      </div>
-    </tr-ui-a-tab-view>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.mapResults_ = undefined;
-    },
-
-    get mapResults() {
-      return this.mapResults_;
-    },
-
-    set mapResults(mapResults) {
-      this.mapResults_ = mapResults;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      var results = this.mapResults_;
-      if (!results)
-        results = new tr.r.Results();
-
-      var tabView = this.shadowRoot.querySelector('tr-ui-a-tab-view');
-      tabView.tabStripHeadingText = results.allRunInfos.length + ' traces: ';
-
-      var reports = tr.b.asArray(
-          this.shadowRoot.querySelectorAll('.sub-report'));
-      reports.forEach(function(report) {
-        report.mapResults = results;
-      });
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/perf_insights/perf_insights/ui/reports/weather_report_test.html b/catapult/perf_insights/perf_insights/ui/reports/weather_report_test.html
deleted file mode 100644
index 89470c0..0000000
--- a/catapult/perf_insights/perf_insights/ui/reports/weather_report_test.html
+++ /dev/null
@@ -1,33 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
-<link rel="import" href="/perf_insights/ui/reports/weather_report.html">
-
-<link rel="import" href="/tracing/base/xhr.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-  test('savedData', function() {
-    // TODO(nduca): #1219, stop reading an actual wr file as our test data and
-    // use synthetic data.
-    var dataString = tr.b.getSync(
-        '/perf_insights/ui/reports/wr_result_view_test_data.json');
-    var results = pi.r.Results.fromDict(JSON.parse(dataString));
-
-    var view = document.createElement('pi-ui-r-weather-report');
-    view.mapResults = results;
-    this.addHTMLOutput(view);
-  });
-});
-</script>
-
diff --git a/catapult/perf_insights/perf_insights/ui/reports/wr_result_view_test_data.json b/catapult/perf_insights/perf_insights/ui/reports/wr_result_view_test_data.json
index 209d2d6..0c2c6e3 100644
--- a/catapult/perf_insights/perf_insights/ui/reports/wr_result_view_test_data.json
+++ b/catapult/perf_insights/perf_insights/ui/reports/wr_result_view_test_data.json
@@ -1,30 +1,6 @@
-{
-  "runs": {
-    "555521ba-d4ee-432d-a9bc-aa25cbb74c39": {
-      "url": "file:////Users/nduca/Local/measurmt-traces/desktop/v8-samples/trace_cnet.json.gz", 
-      "display_name": "trace_cnet.json.gz", 
-      "metadata": {
-        "tags": []
-      }, 
-      "type": "perf_insights.value.RunInfo", 
-      "run_id": "555521ba-d4ee-432d-a9bc-aa25cbb74c39"
-    }, 
-    "5617c958-fa57-455f-85c5-0fdbb25d456a": {
-      "url": "file:////Users/nduca/Local/measurmt-traces/desktop/v8-samples/trace_aol.json.gz", 
-      "display_name": "trace_aol.json.gz", 
-      "metadata": {
-        "tags": []
-      }, 
-      "type": "perf_insights.value.RunInfo", 
-      "run_id": "5617c958-fa57-455f-85c5-0fdbb25d456a"
-    }
-  }, 
-  "values": [
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "wr", 
-      "value": {
+[
+  {"pairs": {
+      "wr": {
         "sliceCosts": [
           {
             "cpuSelfTime": 729.1589999999819, 
@@ -35220,13 +35196,11 @@
           "unassociatedEventsCount": 64901
         }
       }, 
-      "run_id": "5617c958-fa57-455f-85c5-0fdbb25d456a"
+      "canonical_url": "trace_aol.json.gz"
+      }
     }, 
-    {
-      "important": false, 
-      "type": "dict", 
-      "name": "wr", 
-      "value": {
+    {"pairs" : {
+      "wr": {
         "sliceCosts": [
           {
             "cpuSelfTime": 788.7029999999828, 
@@ -60300,7 +60274,7 @@
           "unassociatedEventsCount": 2390
         }
       }, 
-      "run_id": "555521ba-d4ee-432d-a9bc-aa25cbb74c39"
+      "canonical_url": "trace_cnet.json.gz"
     }
-  ]
-}
\ No newline at end of file
+  }
+]
diff --git a/catapult/perf_insights/perf_insights/ui/trace_link.html b/catapult/perf_insights/perf_insights/ui/trace_link.html
index dbaf43a..6903ea0 100644
--- a/catapult/perf_insights/perf_insights/ui/trace_link.html
+++ b/catapult/perf_insights/perf_insights/ui/trace_link.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/xhr.html">
 <link rel="import" href="/tracing/ui/base/ui.html">
 
 <polymer-element name="pi-ui-trace-link" is="a">
@@ -46,8 +47,20 @@
       },
 
       updateLink_: function() {
-        this.$.link.href = BASE_TRACE_VIEWER_URL + this.traceUrl_;
         this.$.link.textContent = this.traceName_;
+        var self = this;
+        this.$.link.onclick = function() {
+          tr.b.postTextAsync('/perf_insights_examples/download?url=' +
+              encodeURIComponent(self.traceUrl_)).then(
+                function(json) {
+                  var results = JSON.parse(json);
+                  if (results.success) {
+                    window.open(BASE_TRACE_VIEWER_URL + results.file, '_blank');
+                  }
+                }).catch(function(e) {
+                 throw e;
+                });
+        };
       }
   });
 })();
diff --git a/catapult/perf_insights/perf_insights/value/__init__.py b/catapult/perf_insights/perf_insights/value/__init__.py
deleted file mode 100644
index 7cf91eb..0000000
--- a/catapult/perf_insights/perf_insights/value/__init__.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Simplified version of telemetry Value system, just enough for us to get
-# perf_insights up and running.
-class Value(object):
-  def __init__(self, run_info, name, units, description=None, important=False,
-               ir_stable_id=None):
-    self.run_info = run_info
-    self.name = name
-    self.units = units
-    self.description = description
-    self.important = important
-    self.ir_stable_id = ir_stable_id
-
-  def AsDict(self):
-    d = {
-      'run_id': self.run_info.run_id,
-      'name': self.name,
-      'important': self.important
-    }
-    # Only dump values if they're non-None, because Python json-ification turns
-    # these to null, instead of leaving them out.
-    if self.units is not None:
-      d['units'] = self.units
-
-    if self.description is not None:
-      d['description'] = self.description
-
-    if self.ir_stable_id is not None:
-      d['ir_stable_id'] = self.ir_stable_id
-
-    self._AsDictInto(d)
-    assert 'type' in d
-
-    return d
-
-  def _AsDictInto(self, d):
-    raise NotImplementedError()
-
-  @classmethod
-  def FromDict(cls, run_info, d):
-    assert d['run_id'] == run_info.run_id
-    if d['type'] == 'dict':
-      return DictValue.FromDict(run_info, d)
-    elif d['type'] == 'failure':
-      return FailureValue.FromDict(run_info, d)
-    elif d['type'] == 'skip':
-      return SkipValue.FromDict(run_info, d)
-    else:
-      raise NotImplementedError()
-
-
-class DictValue(Value):
-  def __init__(self,  run_info, name, value, description=None, important=False,
-               ir_stable_id=None):
-    assert isinstance(value, dict)
-    super(DictValue, self).__init__(run_info, name, units=None,
-                                    description=description,
-                                    important=important,
-                                    ir_stable_id=ir_stable_id)
-    self._value = value
-
-  def __repr__(self):
-    return '%s("%s", "%s")' % (self.__class__.__name__,
-                           self.name, self.value)
-
-  def _AsDictInto(self, d):
-    d['type'] = 'dict'
-    d['value'] = self._value
-
-  @classmethod
-  def FromDict(cls, run_info, d):
-    assert d.get('units', None) == None
-    return cls(run_info, name=d['name'],
-               description=d.get('description', None),
-               value=d['value'],
-               important=d['important'],
-               ir_stable_id=d.get('ir_stable_id', None))
-
-  @property
-  def value(self):
-      return self._value
-
-  def __getitem__(self, key):
-    return self._value[key]
-
-
-class FailureValue(Value):
-  def __init__(self, run_info, failure_type_name, description, stack,
-               important=False, ir_stable_id=None):
-    super(FailureValue, self).__init__(run_info,
-                                       name=failure_type_name,
-                                       units=None,
-                                       description=description,
-                                       important=important,
-                                       ir_stable_id=ir_stable_id)
-    assert isinstance(stack, basestring)
-    self.stack = stack
-
-  def __repr__(self):
-    return '%s("%s", "%s")' % (self.__class__.__name__,
-                           self.name, self.description)
-
-  def _AsDictInto(self, d):
-    d['type'] = 'failure'
-    d['stack_str'] = self.stack
-
-  @classmethod
-  def FromDict(cls, run_info, d):
-    assert d.get('units', None) == None
-    return cls(run_info,
-               failure_type_name=d['name'],
-               description=d.get('description', None),
-               stack=d['stack_str'],
-               important=d.get('important', False),
-               ir_stable_id=d.get('ir_stable_id', None))
-
-  def GetGTestPrintString(self):
-    return self.stack
-
-
-class SkipValue(Value):
-  def __init__(self, run_info, skipped_result_name,
-               description=None, important=False, ir_stable_id=None):
-    super(SkipValue, self).__init__(run_info,
-                                    name=skipped_result_name,
-                                    units=None,
-                                    description=description,
-                                    important=important,
-                                    ir_stable_id=ir_stable_id)
-
-  def __repr__(self):
-    return '%s("%s", "%s")' % (self.__class__.__name__,
-                               self.name, self.description)
-
-  def _AsDictInto(self, d):
-    d['type'] = 'skip'
-
-  @classmethod
-  def FromDict(cls, run_info, d):
-    assert d.get('units', None) == None
-    return cls(run_info,
-               skipped_result_name=d['name'],
-               description=d.get('description', None),
-               important=d.get('important', False),
-               ir_stable_id=d.get('ir_stable_id', None))
diff --git a/catapult/perf_insights/perf_insights/value/run_info.html b/catapult/perf_insights/perf_insights/value/run_info.html
deleted file mode 100644
index c1bd9cb..0000000
--- a/catapult/perf_insights/perf_insights/value/run_info.html
+++ /dev/null
@@ -1,62 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/guid.html">
-<script>
-'use strict';
-
-tr.exportTo('pi.v', function() {
-  // This value must stay sync'd with the constant of the same name
-  // in runInfo.py.
-  var PI_VALUE_RUN_INFO_ID = 'perf_insights.value.RunInfo';
-
-  function RunInfo(url, opt_displayName, opt_runId, opt_metadata) {
-    if (opt_runId !== undefined)
-      this.runId = opt_runId;
-    else
-      this.runId = 'pi.v.RunInfo-' + tr.b.GUID.allocate();
-    this.url = url;
-
-    if (opt_displayName !== undefined)
-      this.displayName = opt_displayName;
-    else
-      this.displayName = this.url;
-
-    if (opt_metadata !== undefined)
-      this.metadata = opt_metadata;
-    else
-      this.metadata = {};
-  }
-
-  RunInfo.fromDict = function(d) {
-    if (d.type !== PI_VALUE_RUN_INFO_ID)
-      throw new Error('Unsupported runInfo format: ' + d.type);
-    if (d.run_id === undefined)
-      throw new Error('Must contain run_id');
-    if (d.url === undefined)
-      throw new Error('Must contain url');
-    return new RunInfo(d.url, d.display_name, d.run_id, d.metadata);
-  };
-
-  RunInfo.prototype = {
-    asDict: function() {
-      var d = {
-        type: PI_VALUE_RUN_INFO_ID,
-        run_id: this.runId,
-        url: this.url,
-        metadata: this.metadata
-      };
-      if (this.displayName !== this.url)
-        d.display_name = this.displayName;
-      return d;
-    }
-  };
-
-  return {
-    RunInfo: RunInfo
-  };
-});
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/value/run_info.py b/catapult/perf_insights/perf_insights/value/run_info.py
deleted file mode 100644
index fd74ef0..0000000
--- a/catapult/perf_insights/perf_insights/value/run_info.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import uuid
-
-
-# This value must stay sync'd with the constant of the same name
-# in run_info.py.
-PI_VALUE_RUN_INFO_ID = 'perf_insights.value.RunInfo'
-
-
-class RunInfo(object):
-  def __init__(self, url, display_name=None, run_id=None, metadata=None):
-    if run_id is not None:
-      self.run_id = run_id
-    else:
-      self.run_id = str(uuid.uuid4())
-
-    self.url = url
-    self.display_name = display_name or url
-    self.metadata = metadata or {}
-
-  def AsDict(self):
-    d = {
-      'run_id': self.run_id,
-      'type': PI_VALUE_RUN_INFO_ID,
-      'url': self.url,
-      'metadata': self.metadata
-    }
-    if self.display_name != self.url:
-      d['display_name'] = self.display_name
-
-    return d
-
-  @staticmethod
-  def FromDict(d):
-    if d['type'] != PI_VALUE_RUN_INFO_ID:
-      raise Exception('Unsupported run_info format')
-    return RunInfo(d['url'],
-                   d['display_name'],
-                   run_id=d['run_id'],
-                   metadata=d.get('metadata', None))
-
diff --git a/catapult/perf_insights/perf_insights/value/value.html b/catapult/perf_insights/perf_insights/value/value.html
deleted file mode 100644
index 14525e6..0000000
--- a/catapult/perf_insights/perf_insights/value/value.html
+++ /dev/null
@@ -1,168 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/guid.html">
-<link rel="import" href="/tracing/base/utils.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<script>
-'use strict';
-
-tr.exportTo('pi.v', function() {
-  function Value(runInfo, name, opt_options) {
-    var options = opt_options || {};
-    this.guid = tr.b.GUID.allocate();
-    this.runInfo = runInfo;
-    this.name = name;
-    this.units = options.units;
-    this.description = options.description;
-    this.important = options.important !== undefined ?
-        options.important : false;
-    this.irStableID = options.irStableID;
-  }
-
-  Value.fromDict = function(runInfo, d) {
-    if (d.run_id !== runInfo.runId)
-       throw new Error('run_ids mismatch');
-
-    if (d.type === 'dict')
-      return DictValue.fromDict(runInfo, d);
-
-    if (d.type == 'failure')
-      return FailureValue.fromDict(runInfo, d);
-
-    if (d.type === 'skip')
-      return SkipValue.fromDict(runInfo, d);
-
-    throw new Error('Not implemented');
-  };
-
-  Value.prototype = {
-    asDict: function() {
-      return this.asJSON();
-    },
-    asJSON: function() {
-      var d = {
-        'run_id': this.runInfo.runId,
-        'name': this.name,
-        'units': this.units,
-        'description': this.description,
-        'important': this.important,
-        'ir_stable_id': this.irStableID
-      };
-      this._asDictInto(d);
-      if (d.type === undefined)
-        throw new Error('_asDictInto must set type field');
-      return d;
-    },
-
-    _asDictInto: function(d) {
-      throw new Error('Not implemented');
-    }
-  };
-
-
-  function DictValue(runInfo, name, value, opt_options) {
-    var options = opt_options || {};
-    Value.call(this, runInfo, name, options);
-    this.value = value;
-  }
-
-  DictValue.fromDict = function(runInfo, d) {
-    if (d.units !== undefined)
-      throw new Error('Expected units to be undefined');
-    if (d.value === undefined)
-      throw new Error('Expected value to be provided');
-    return new DictValue(runInfo, d.name, d.value, d);
-  }
-
-  DictValue.prototype = {
-    __proto__: Value.prototype,
-
-    _asDictInto: function(d) {
-      d.type = 'dict';
-      d.value = this.value;
-    }
-  };
-
-
-  function FailureValue(runInfo, name, opt_options) {
-    var options = opt_options || {};
-
-    var stack;
-    if (options.stack === undefined) {
-      if (options.stack_str === undefined) {
-        throw new Error('Expected stack_str or stack to be provided');
-      } else {
-        stack = options.stack_str;
-      }
-    } else {
-      stack = options.stack;
-    }
-
-    if (typeof stack !== 'string')
-      throw new Error('stack must be provided as a string');
-
-    Value.call(this, runInfo, name, options);
-    this.stack = stack;
-  }
-
-  FailureValue.fromError = function(runInfo, e) {
-    var ex = tr.b.normalizeException(e);
-    return new FailureValue(runInfo,
-                            ex.typeName,
-                            {description: ex.message,
-                             stack: ex.stack});
-
-  }
-
-  FailureValue.fromDict = function(runInfo, d) {
-    if (d.units !== undefined)
-      throw new Error('Expected units to be undefined');
-    if (d.name === undefined)
-      throw new Error('Expected stack_str to be provided');
-    return new FailureValue(runInfo, d.name, d);
-  }
-
-  FailureValue.prototype = {
-    __proto__: Value.prototype,
-
-    _asDictInto: function(d) {
-      d.type = 'failure';
-      d.stack_str = this.stack;
-    }
-  };
-
-
-  function SkipValue(runInfo, name, opt_options) {
-    var options = opt_options || {};
-    Value.call(this, runInfo, name, options);
-  }
-
-  SkipValue.fromDict = function(runInfo, d) {
-    if (d.units !== undefined)
-      throw new Error('Expected units to be undefined');
-    if (d.name === undefined)
-      throw new Error('Expected name to be provided');
-    return new SkipValue(runInfo, d.name, d);
-  }
-
-  SkipValue.prototype = {
-    __proto__: Value.prototype,
-
-    _asDictInto: function(d) {
-      d.type = 'skip';
-    }
-  };
-
-
-  return {
-    Value: Value,
-    DictValue: DictValue,
-    FailureValue: FailureValue,
-    SkipValue: SkipValue
-  };
-});
-</script>
diff --git a/catapult/perf_insights/perf_insights/value/value_test.html b/catapult/perf_insights/perf_insights/value/value_test.html
deleted file mode 100644
index 0c7bc60..0000000
--- a/catapult/perf_insights/perf_insights/value/value_test.html
+++ /dev/null
@@ -1,56 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/utils.html">
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('dictValueBasic', function() {
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    var v = new pi.v.DictValue(runInfo, 'MyFailure',
-                               {my_key: 'my_value'});
-    var d = v.asDict();
-
-    var v2 = pi.v.Value.fromDict(runInfo, d);
-    assert.instanceOf(v2, pi.v.DictValue);
-    assert.equal(v.name, v2.name);
-    assert.deepEqual(v.value, v2.value);
-  });
-
-  test('failureValueBasic', function() {
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    var v = new pi.v.FailureValue(runInfo, 'MyFailure',
-                                  {description: 'Description',
-                                   stack: tr.b.stackTraceAsString()});
-    var d = v.asDict();
-
-    var v2 = pi.v.Value.fromDict(runInfo, d);
-    assert.instanceOf(v2, pi.v.FailureValue);
-    assert.equal(v.name, v2.name);
-    assert.equal(v.description, v2.description);
-    assert.equal(v.stack, v2.stack);
-  });
-
-  test('skipValueBasic', function() {
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    var v = new pi.v.SkipValue(runInfo, 'MySkip',
-                                  {description: 'WhySkipped'});
-    var d = v.asDict();
-
-    var v2 = pi.v.Value.fromDict(runInfo, d);
-    assert.instanceOf(v2, pi.v.SkipValue);
-    assert.equal(v.name, v2.name);
-    assert.equal(v.description, v2.description);
-  });
-
-});
-
-</script>
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights/value/value_unittest.py b/catapult/perf_insights/perf_insights/value/value_unittest.py
deleted file mode 100644
index 28e060e..0000000
--- a/catapult/perf_insights/perf_insights/value/value_unittest.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import unittest
-
-from perf_insights import value as value_module
-from perf_insights.value import run_info as run_info_module
-
-class ValueTests(unittest.TestCase):
-  def testDict(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
-    d = {
-      'run_id': run_info.run_id,
-      'type': 'dict',
-      'name': 'MyDictValue',
-      'important': False,
-      'value': {'a': 1, 'b': 'b'}
-    }
-    v = value_module.Value.FromDict(run_info, d)
-    self.assertTrue(isinstance(v, value_module.DictValue))
-    d2 = v.AsDict()
-
-    self.assertEquals(d, d2)
-
-
-  def testFailure(self):
-    run_info = run_info_module.RunInfo('file:///a.json', '/a.json',
-                                       metadata={'m': 1})
-
-    d = {
-      'run_id': run_info.run_id,
-      'type': 'failure',
-      'name': 'Error',
-      'important': False,
-      'description': 'Some error message',
-      'stack_str': 'Some stack string'
-    }
-    v = value_module.Value.FromDict(run_info, d)
-    self.assertTrue(isinstance(v, value_module.FailureValue))
-    d2 = v.AsDict()
-
-    self.assertEquals(d, d2)
\ No newline at end of file
diff --git a/catapult/perf_insights/perf_insights_build/__init__.py b/catapult/perf_insights/perf_insights_build/__init__.py
index 890de61..987a3e1 100644
--- a/catapult/perf_insights/perf_insights_build/__init__.py
+++ b/catapult/perf_insights/perf_insights_build/__init__.py
@@ -1,9 +1,6 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-import os
-import sys
-
 
 import perf_insights_project
 perf_insights_project.UpdateSysPathIfNeeded()
diff --git a/catapult/perf_insights/perf_insights_build/perf_insights_dev_server_config.py b/catapult/perf_insights/perf_insights_build/perf_insights_dev_server_config.py
index 346b30e..3afeb51 100644
--- a/catapult/perf_insights/perf_insights_build/perf_insights_dev_server_config.py
+++ b/catapult/perf_insights/perf_insights_build/perf_insights_dev_server_config.py
@@ -2,22 +2,28 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import datetime
 import json
 import os
-import sys
+import tempfile
+import time
+import urllib
+import urllib2
 
 import perf_insights_project
 
 import webapp2
 from webapp2 import Route
 
+from perf_insights import cloud_storage
 from perf_insights import local_directory_corpus_driver
 from perf_insights import corpus_query
 from perf_insights import map_runner
-from perf_insights import function_handle
-from perf_insights import progress_reporter
+from perf_insights.mre import job as job_module
 from perf_insights.results import json_output_formatter
 
+MAX_TRACES = 10000
+
 
 def _RelPathToUnixPath(p):
   return p.replace(os.sep, '/')
@@ -37,15 +43,18 @@
 class RunMapFunctionHandler(webapp2.RequestHandler):
 
   def post(self, *args, **kwargs):  # pylint: disable=unused-argument
-    handle_dict = json.loads(self.request.body)
+    job_dict = json.loads(self.request.body)
 
-    map_function_handle = function_handle.FunctionHandle.FromDict(handle_dict)
-    handle_with_filenames = map_function_handle.ConvertHrefsToAbsFilenames(
-        self.app)
+    job = job_module.Job.FromDict(job_dict)
+
+    job_with_filenames = job_module.Job(
+        job.map_function_handle.ConvertHrefsToAbsFilenames(self.app),
+        job.reduce_function_handle.ConvertHrefsToAbsFilenames(self.app)
+            if job.reduce_function_handle else None)
 
     corpus_driver = local_directory_corpus_driver.LocalDirectoryCorpusDriver(
-        trace_directory = kwargs.pop('_pi_data_dir'),
-        url_resolver = self.app.GetURLForAbsFilename)
+        trace_directory=kwargs.pop('_pi_data_dir'),
+        url_resolver=self.app.GetURLForAbsFilename)
 
     # TODO(nduca): pass self.request.params to the map function [maybe].
     query_string = self.request.get('corpus_query', 'True')
@@ -53,20 +62,129 @@
 
     trace_handles = corpus_driver.GetTraceHandlesMatchingQuery(query)
 
-    self._RunMapper(trace_handles, handle_with_filenames)
+    self._RunMapper(trace_handles, job_with_filenames)
 
-
-  def _RunMapper(self, trace_handles, map_function_handle):
+  # TODO(eakuefner): Rename this and other things that assume we only have map
+  def _RunMapper(self, trace_handles, job):
     self.response.content_type = 'application/json'
     output_formatter = json_output_formatter.JSONOutputFormatter(
         self.response.out)
 
-    runner = map_runner.MapRunner(trace_handles, map_function_handle,
+    runner = map_runner.MapRunner(trace_handles, job,
                                   jobs=map_runner.AUTO_JOB_COUNT,
                                   output_formatters=[output_formatter])
     runner.Run()
 
 
+class RunDownloadHandler(webapp2.RequestHandler):
+
+  def post(self, *args, **kwargs):  # pylint: disable=unused-argument
+    self.response.content_type = 'application/json'
+
+    url = self.request.get('url', 'True')
+
+    # Doesn't need to be downloaded since it's not a cloud file, just return
+    # true and the path to the file.
+    if not 'gs://' in url:
+      self.response.write(json.dumps({'success': True, 'file': url}))
+      return
+
+    output_name = os.path.join(kwargs.pop('_pi_data_dir'), url.split('/')[-1])
+
+    try:
+      print 'Downloading: %s' % url
+      cloud_storage.Copy(url, output_name)
+    except cloud_storage.CloudStorageError:
+      print ' -> Failed to download: %s' % url
+      self.response.write(json.dumps({'success': False}))
+      return
+
+    output_name = os.path.join('/perf_insights/test_data', url.split('/')[-1])
+
+    self.response.write(json.dumps({'success': True, 'file': output_name}))
+
+
+class RunCloudMapperHandler(webapp2.RequestHandler):
+
+  def post(self, *args, **kwargs):  # pylint: disable=unused-argument
+    job_dict = json.loads(self.request.body)
+
+    job = job_module.Job.FromDict(job_dict)
+
+    job_with_filenames = job_module.Job(
+        job.map_function_handle.ConvertHrefsToAbsFilenames(self.app),
+        job.reduce_function_handle.ConvertHrefsToAbsFilenames(self.app))
+
+    mapper_handle = job_with_filenames.map_function_handle
+    reducer_handle = job_with_filenames.reduce_function_handle
+    with open(mapper_handle.modules_to_load[0].filename, 'r') as f:
+      mapper = f.read()
+    with open(reducer_handle.modules_to_load[0].filename, 'r') as f:
+      reducer = f.read()
+    mapper_name = job_with_filenames.map_function_handle.function_name
+    reducer_name = job_with_filenames.reduce_function_handle.function_name
+
+    query_string = self.request.get('corpus_query', 'True')
+    query = corpus_query.CorpusQuery.FromString(query_string)
+    if query.max_trace_handles > MAX_TRACES:
+      print 'Capping query at %d' % MAX_TRACES
+      query.max_trace_handles = MAX_TRACES
+    query_string = query.AsQueryString()
+
+    params = urllib.urlencode({
+        'query': query_string,
+        'mapper': mapper,
+        'mapper_function': mapper_name,
+        'reducer': reducer,
+        'reducer_function': reducer_name,
+        'revision': 'HEAD',
+        'corpus': 'https://performance-insights.appspot.com',
+        'timeout': 240,
+        'function_timeout': 120
+        })
+
+    cloud_mapper_url = 'https://performance-insights.appspot.com'
+    if self.request.get('local') == 'true':
+      cloud_mapper_url = 'http://localhost:8080'
+    create_url = '%s/cloud_mapper/create' % cloud_mapper_url
+
+    response = urllib2.urlopen(create_url, data=params)
+
+    response_data = response.read()
+    print response_data
+    results = json.loads(response_data)
+    if results['status']:
+      jobid = results['jobid']
+
+      status_url = '%s/cloud_mapper/status?jobid=%s' % (cloud_mapper_url, jobid)
+      start_time = datetime.datetime.now()
+      while datetime.datetime.now() - start_time < datetime.timedelta(
+          seconds=300):
+        time.sleep(1)
+        print 'Waiting for results.'
+        response = urllib2.urlopen(status_url)
+        results = json.loads(response.read())
+        if results['status'] == 'COMPLETE':
+          print 'Mapping complete. Downloading results.'
+          output_handle, output_name = tempfile.mkstemp()
+
+          try:
+            print '  -> %s' % results['data']
+            cloud_storage.Copy(results['data'], output_name)
+          except cloud_storage.CloudStorageError as e:
+            print 'Cloud storage error: %s' % str(e)
+            return
+
+          map_results = ''
+          with open(output_name, 'r') as f:
+            map_results = f.read()
+          os.close(output_handle)
+          self.response.write(map_results)
+          total_time = datetime.datetime.now() - start_time
+          print 'Time taken: %ss' % total_time.total_seconds()
+          print map_results[:128]
+          return
+
 class PerfInsightsDevServerConfig(object):
   def __init__(self):
     self.project = perf_insights_project.PerfInsightsProject()
@@ -89,6 +207,18 @@
             defaults={
               '_pi_data_dir':
                   os.path.abspath(os.path.expanduser(args.pi_data_dir))
+            }),
+      Route('/perf_insights_examples/run_cloud_mapper',
+            RunCloudMapperHandler,
+            defaults={
+              '_pi_data_dir':
+                  os.path.abspath(os.path.expanduser(args.pi_data_dir))
+            }),
+      Route('/perf_insights_examples/download',
+            RunDownloadHandler,
+            defaults={
+              '_pi_data_dir':
+                  os.path.abspath(os.path.expanduser(args.pi_data_dir))
             })
     ]
 
diff --git a/catapult/perf_insights/perf_insights_build/pi_report_to_html.html b/catapult/perf_insights/perf_insights_build/pi_report_to_html.html
index b15241d..75db3c2 100644
--- a/catapult/perf_insights/perf_insights_build/pi_report_to_html.html
+++ b/catapult/perf_insights/perf_insights_build/pi_report_to_html.html
@@ -4,7 +4,7 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/tracing/ui/base/overlay.html">
 <script>
 'use strict';
@@ -54,7 +54,9 @@
 
     var results;
     try {
-      results = pi.r.Results.fromDict(data);
+      results = data.map(function(rd) {
+        return pi.mre.MreResult.fromDict(rd);
+      });
     } catch (ex) {
       var overlay = new tr.ui.b.Overlay();
       overlay.title = 'Results.fromDict error';
@@ -67,6 +69,6 @@
 
   return {
     initPiReportNamed: initPiReportNamed
-  }
+  };
 });
 </script>
diff --git a/catapult/perf_insights/perf_insights_build/pi_report_to_html.py b/catapult/perf_insights/perf_insights_build/pi_report_to_html.py
index ebd40ea..c9dfbbd 100644
--- a/catapult/perf_insights/perf_insights_build/pi_report_to_html.py
+++ b/catapult/perf_insights/perf_insights_build/pi_report_to_html.py
@@ -5,7 +5,6 @@
 import codecs
 import os
 import sys
-import traceback
 import json
 
 from perf_insights import corpus_driver_cmdline
@@ -13,9 +12,8 @@
 from perf_insights import function_handle
 from perf_insights import map_runner
 from perf_insights import progress_reporter as progress_reporter_module
-from perf_insights.results import json_output_formatter
+from perf_insights.mre import job as job_module
 from py_vulcanize import generate
-import perf_insights
 import perf_insights_project
 import bs4
 
@@ -85,18 +83,19 @@
   module = function_handle.ModuleToLoad(filename=map_file)
   map_function_handle = function_handle.FunctionHandle([module],
                                                        map_function_name)
+  job = job_module.Job(map_function_handle, None)
 
   if map_file == None:
     raise Exception('Could not find %s' % map_function_href)
 
-  results = _MapTraces(corpus_driver, map_function_handle, query, stop_on_error,
+  results = _MapTraces(corpus_driver, job, query, stop_on_error,
                        jobs, quiet)
   if stop_on_error and results.had_failures:
-    sys.stderr.write('There were mapping errors. Aborting.');
+    sys.stderr.write('There were mapping errors. Aborting.')
     return 255
 
   if json_output:
-    json.dump(results.AsDict(), ofile, indent=2)
+    json.dump([result.AsDict() for result in results], ofile, indent=2)
   else:
     WriteResultsToFile(ofile, project,
                        pi_report_file, pi_report_element_name,
@@ -104,14 +103,14 @@
   return 0
 
 
-def _MapTraces(corpus_driver, map_function_handle, query, stop_on_error=False,
+def _MapTraces(corpus_driver, job, query, stop_on_error=False,
                jobs=1, quiet=False):
   trace_handles = corpus_driver.GetTraceHandlesMatchingQuery(query)
   if quiet:
     alt_progress_reporter = progress_reporter_module.ProgressReporter()
   else:
     alt_progress_reporter = None
-  runner = map_runner.MapRunner(trace_handles, map_function_handle,
+  runner = map_runner.MapRunner(trace_handles, job,
                   stop_on_error=stop_on_error,
                   progress_reporter=alt_progress_reporter,
                   jobs=jobs)
@@ -132,7 +131,7 @@
 
   load_sequence = vulcanizer.CalcLoadSequenceForModules(modules)
 
-  results_string = json.dumps(results.AsDict())
+  results_string = json.dumps([result.AsDict() for result in results])
 
   bootstrap_script = generate.ExtraScript(text_content="""
     document.addEventListener('DOMContentLoaded', function() {
diff --git a/catapult/perf_insights/perf_insights_build/pi_report_to_html_unittest.py b/catapult/perf_insights/perf_insights_build/pi_report_to_html_unittest.py
index 92fd283..a848b1a 100644
--- a/catapult/perf_insights/perf_insights_build/pi_report_to_html_unittest.py
+++ b/catapult/perf_insights/perf_insights_build/pi_report_to_html_unittest.py
@@ -13,7 +13,7 @@
 import perf_insights_project
 
 
-class TestArgs:
+class TestArgs(object):
   def __init__(self, trace_directory):
     self.corpus = 'local-directory'
     self.trace_directory = trace_directory
@@ -36,7 +36,7 @@
         res = pi_report_to_html.PiReportToHTML(
             tmpfile, corpus_driver,
             project.GetAbsPathFromHRef(
-                '/perf_insights/ui/reports/weather_report.html'),
+                '/perf_insights/ui/reports/startup_report.html'),
             corpus_query.CorpusQuery.FromString('MAX_TRACE_HANDLES=2'),
             quiet=True)
         self.assertEquals(res, 0)
diff --git a/catapult/perf_insights/perf_insights_build/run_vinn_tests.py b/catapult/perf_insights/perf_insights_build/run_vinn_tests.py
index 21418d5..68c8264 100644
--- a/catapult/perf_insights/perf_insights_build/run_vinn_tests.py
+++ b/catapult/perf_insights/perf_insights_build/run_vinn_tests.py
@@ -18,26 +18,30 @@
 
 def RunTests():
   project = perf_insights_project.PerfInsightsProject()
-  d8_test_module_filenames = ['/' + _RelPathToUnixPath(x)
-                              for x in project.FindAllD8TestModuleRelPaths()]
-  d8_test_module_filenames.sort()
+  headless_test_module_filenames = [
+      '/' + _RelPathToUnixPath(x)
+      for x in project.FindAllD8TestModuleRelPaths()]
+  headless_test_module_filenames.sort()
 
   cmd = """
-  loadHTML('/tracing/base/d8_tests.html');
+  HTMLImportsLoader.loadHTML('/tracing/base/headless_tests.html');
+  tr.b.unittest.loadAndRunTests(sys.argv.slice(1));
   """
   res = vinn.RunJsString(
     cmd, source_paths=list(project.source_paths),
-    js_args=d8_test_module_filenames, stdout=sys.stdout, stdin=sys.stdin)
+    js_args=headless_test_module_filenames, stdout=sys.stdout, stdin=sys.stdin)
   return res.returncode
 
+
 def Main(argv):
   parser = argparse.ArgumentParser(
       description='Run d8 tests.')
   parser.add_argument(
-    '--no-install-hooks', dest='install_hooks', action='store_false')
+      '--no-install-hooks', dest='install_hooks', action='store_false')
   parser.set_defaults(install_hooks=True)
   args = parser.parse_args(argv[1:])
   if args.install_hooks:
     install.InstallHooks()
 
-  sys.exit(RunTests())
\ No newline at end of file
+  sys.exit(RunTests())
+
diff --git a/catapult/perf_insights/perf_insights_examples/map_process_count.html b/catapult/perf_insights/perf_insights_examples/map_process_count.html
index 3ba86bd..ee95c66 100644
--- a/catapult/perf_insights/perf_insights_examples/map_process_count.html
+++ b/catapult/perf_insights/perf_insights_examples/map_process_count.html
@@ -6,16 +6,16 @@
 -->
 
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pie', function() {
-  function mapProcessCount(results, runInfo, model) {
-    results.addValue(new pi.v.DictValue(
-        runInfo,
+  function mapProcessCount(result, model) {
+    result.addPair(
         'load_info', {
           numProcesses: model.getAllProcesses().length
-        }));
+        });
   }
 
   pi.FunctionRegistry.register(mapProcessCount);
diff --git a/catapult/perf_insights/perf_insights_examples/map_process_count_test.html b/catapult/perf_insights/perf_insights_examples/map_process_count_test.html
index 01d73cc..d8e88bd 100644
--- a/catapult/perf_insights/perf_insights_examples/map_process_count_test.html
+++ b/catapult/perf_insights/perf_insights_examples/map_process_count_test.html
@@ -5,10 +5,9 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/perf_insights_examples/map_process_count.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 
 <script>
@@ -26,13 +25,12 @@
       }));
     });
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pie.mapProcessCountForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pie.mapProcessCountForTest(result, m);
 
-    assert.equal(results.allValues.length, 1);
-    assert.isTrue(results.allValues[0] instanceof pi.v.DictValue);
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+    assert.equal(result.failures.length, 0);
   });
 });
 
-</script>
\ No newline at end of file
+</script>
diff --git a/catapult/perf_insights/perf_insights_examples/map_startup_info.html b/catapult/perf_insights/perf_insights_examples/map_startup_info.html
index 39658a9..4e7500f 100644
--- a/catapult/perf_insights/perf_insights_examples/map_startup_info.html
+++ b/catapult/perf_insights/perf_insights_examples/map_startup_info.html
@@ -6,21 +6,23 @@
 -->
 
 <link rel="import" href="/perf_insights/function_handle.html">
-<link rel="import" href="/perf_insights/value/value.html">
 <link rel="import" href="/tracing/base/range.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_browser_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_renderer_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_browser_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_renderer_helper.html">
+<link rel="import" href="/tracing/value/value.html">
 
 <script>
+'use strict';
+
 tr.exportTo('pie', function() {
-  function mapStartupInfo(results, runInfo, model) {
-    var startupIRs = model.interactionRecords.filter(function(ir) {
-      return ir instanceof tr.e.rail.LoadInteractionRecord &&
-             ir.name === 'Startup';
+  function mapStartupInfo(result, model) {
+    var startupIRs = model.userModel.expectations.filter(function(ir) {
+      return ir instanceof tr.model.um.LoadExpectation &&
+             ir.initiatorTitle === tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP;
     });
     // Find the Startup IRs in the browser and renderer processes.
-    var ChromeBrowserHelper = tr.e.audits.ChromeBrowserHelper;
-    var ChromeRendererHelper = tr.e.audits.ChromeRendererHelper;
+    var ChromeBrowserHelper = tr.model.helpers.ChromeBrowserHelper;
+    var ChromeRendererHelper = tr.model.helpers.ChromeRendererHelper;
     var browser_startup = new tr.b.Range();
     var renderer_startup = new tr.b.Range();
     startupIRs.forEach(function(ir) {
@@ -37,15 +39,14 @@
     });
 
     if (browser_startup.isEmpty && renderer_startup.isEmpty) {
-      results.addValue(new pi.v.SkipValue(runInfo, 'startup_info'));
+      result.addPair('startup_info', null);
     } else {
-      results.addValue(new pi.v.DictValue(
-          runInfo,
+      result.addPair(
           'startup_info',
           {
             'browserStartup': browser_startup,
             'rendererStartup': renderer_startup
-          }));
+          });
     }
   }
 
diff --git a/catapult/perf_insights/perf_insights_examples/map_startup_info_test.html b/catapult/perf_insights/perf_insights_examples/map_startup_info_test.html
index 2a7bb88..74efb10 100644
--- a/catapult/perf_insights/perf_insights_examples/map_startup_info_test.html
+++ b/catapult/perf_insights/perf_insights_examples/map_startup_info_test.html
@@ -5,13 +5,12 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/perf_insights/value/value.html">
-<link rel="import" href="/perf_insights/value/run_info.html">
-<link rel="import" href="/perf_insights/results/results.html">
+<link rel="import" href="/perf_insights/mre/mre_result.html">
 <link rel="import" href="/perf_insights_examples/map_startup_info.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/load_interaction_record.html">
 <link rel="import" href="/tracing/model/thread_slice.html">
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
 
 <script>
 'use strict';
@@ -38,33 +37,35 @@
       t2_s2.parentContainer = t2;
       t2.sliceGroup.pushSlice(t2_s1);
       t2.sliceGroup.pushSlice(t2_s2);
-      var loadIr = new tr.e.rail.LoadInteractionRecord(m, 15, 45);
-      loadIr.name = 'Startup';
-      m.interactionRecords.push(loadIr);
+      var loadIr = new tr.model.um.LoadExpectation(
+          m, tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP, 15, 45);
+      m.userModel.expectations.push(loadIr);
       loadIr.associatedEvents.push(t2_s1);
       loadIr.associatedEvents.push(t2_s2);
     });
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pie.mapStartupInfoForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pie.mapStartupInfoForTest(result, m);
 
-    assert.equal(results.allValues.length, 1);
-    assert.equal(results.allValues[0].value.browserStartup.min, 10);
-    assert.equal(results.allValues[0].value.browserStartup.max, 80);
-    assert.isFalse(results.allValues[0].value.browserStartup.isEmpty);
-    assert.isTrue(results.allValues[0].value.rendererStartup.isEmpty);
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+    assert.equal(result.failures.length, 0);
+
+    var startupInfo = result.pairs.startup_info;
+
+    assert.equal(startupInfo.browserStartup.min, 10);
+    assert.equal(startupInfo.browserStartup.max, 80);
+    assert.isFalse(startupInfo.browserStartup.isEmpty);
+    assert.isTrue(startupInfo.rendererStartup.isEmpty);
   });
 
   test('mapperTestEmptyTrace', function() {
     var m = test_utils.newModel();
 
-    var results = new pi.r.Results();
-    var runInfo = new pi.v.RunInfo('my_test.json');
-    pie.mapStartupInfoForTest(results, runInfo, m);
+    var result = new pi.mre.MreResult();
+    pie.mapStartupInfoForTest(result, m);
 
-    assert.equal(results.allValues.length, 1);
-    assert.isTrue(results.allValues[0] instanceof pi.v.SkipValue);
+    assert.equal(tr.b.dictionaryLength(result.pairs), 1);
+    assert.equal(result.pairs.startup_info, null);
   });
 
 });
diff --git a/catapult/perf_insights/perf_insights_examples/perf_insights_viewer.html b/catapult/perf_insights/perf_insights_examples/perf_insights_viewer.html
index c364754..c7317b1 100644
--- a/catapult/perf_insights/perf_insights_examples/perf_insights_viewer.html
+++ b/catapult/perf_insights/perf_insights_examples/perf_insights_viewer.html
@@ -30,34 +30,24 @@
 </style>
 
 <link rel="import" href="/components/polymer/polymer.html">
+<link rel="import" href="/perf_insights/ui/corpus_drivers.html">
 <link rel="import" href="/perf_insights/ui/perf_insights_full_config.html">
 <link rel="import" href="/perf_insights/ui/pi_app_main.html">
-<link rel="import" href="/tracing/base/xhr.html">
-<link rel="import" href="/perf_insights/function_handle.html">
 
 </head>
 <body>
+
+  <pi-driver-cloudmapper id="pi-driver-cloudmapper">
+  </pi-driver-cloudmapper>
+
+  <pi-driver-devserver id="pi-driver-devserver">
+  </pi-driver-devserver>
+
+  <pi-driver-localcloudmapper id="pi-driver-localcloudmapper">
+  </pi-driver-localcloudmapper>
+
   <pi-ui-pi-app-main>
   </pi-ui-pi-app-main>
 
-  <script>
-  'use strict';
-
-  var g_appMain;
-
-  function domContentLoaded() {
-    function devServerDriver(mapFunctionHandle, corpusQuery) {
-      return tr.b.postAsync(
-          '/perf_insights_examples/run_map_function?corpus_query=' +
-          encodeURIComponent(corpusQuery),
-          JSON.stringify(mapFunctionHandle.asDict()));
-    }
-
-    g_appMain = document.body.querySelector('pi-ui-pi-app-main');
-    g_appMain.mapTracesDrivers = [devServerDriver];
-  }
-
-  document.addEventListener('DOMContentLoaded', domContentLoaded);
-  </script>
 </body>
 </html>
diff --git a/catapult/perf_insights/perf_insights_examples/trace_viewer.html b/catapult/perf_insights/perf_insights_examples/trace_viewer.html
index a07114d..3c35820 100644
--- a/catapult/perf_insights/perf_insights_examples/trace_viewer.html
+++ b/catapult/perf_insights/perf_insights_examples/trace_viewer.html
@@ -13,7 +13,7 @@
 
 <link rel="import" href="/components/polymer/polymer.html">
 <link rel="import" href="/tracing/base/xhr.html">
-<link rel="import" href="/tracing/base/time_function.html">
+<link rel="import" href="/tracing/base/timing.html">
 <link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/ui/extras/full_config.html">
 <link rel="import" href="/tracing/ui/timeline_view.html">
diff --git a/catapult/perf_insights/pylintrc b/catapult/perf_insights/pylintrc
new file mode 100644
index 0000000..72cd736
--- /dev/null
+++ b/catapult/perf_insights/pylintrc
@@ -0,0 +1,73 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  abstract-class-not-used,
+  bad-builtin,
+  bad-continuation,
+  broad-except,
+  eval-used,
+  fixme,
+  import-error,
+  invalid-name,
+  locally-disabled,
+  logging-not-lazy,
+  missing-docstring,
+  no-init,
+  no-member,
+  no-self-use,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/perf_insights/queue.yaml b/catapult/perf_insights/queue.yaml
index 7c02077..dede11e 100644
--- a/catapult/perf_insights/queue.yaml
+++ b/catapult/perf_insights/queue.yaml
@@ -6,7 +6,9 @@
 - name: default
   rate: 1/s
 
+# 8 tasks/instance * 4 instances
 - name: mapper-queue
   rate: 500/s
+  max_concurrent_requests: 64
   retry_parameters:
     task_retry_limit: 0
diff --git a/catapult/perf_insights/remote_worker.yaml b/catapult/perf_insights/remote_worker.yaml
index 29fe818..a4c8b31 100644
--- a/catapult/perf_insights/remote_worker.yaml
+++ b/catapult/perf_insights/remote_worker.yaml
@@ -4,12 +4,14 @@
 
 api_version: 1
 module: cloud-worker
-threadsafe: false
+threadsafe: true
 runtime: custom
 vm: true
 
+# If you change instances, change value in queue.yaml until we come up with
+# a better way to sync these.
 manual_scaling:
-  instances: 1
+  instances: 8
 
 resources:
   cpu: 32
diff --git a/catapult/pylintrc b/catapult/pylintrc
new file mode 100644
index 0000000..4a99517
--- /dev/null
+++ b/catapult/pylintrc
@@ -0,0 +1,75 @@
+[MASTER]
+
+extension-pkg-whitelist=numpy
+
+
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  fixme,
+  locally-disabled,
+  locally-enabled,
+  missing-docstring,
+  no-self-use,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|run|put|execute|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
+
+# Make sure : in dicts and trailing commas are checked for whitespace.
+no-space-check=
diff --git a/catapult/systrace/systrace/.gitignore b/catapult/systrace/.gitignore
similarity index 100%
rename from catapult/systrace/systrace/.gitignore
rename to catapult/systrace/.gitignore
diff --git a/catapult/systrace/PRESUBMIT.py b/catapult/systrace/PRESUBMIT.py
new file mode 100644
index 0000000..4ca7b86
--- /dev/null
+++ b/catapult/systrace/PRESUBMIT.py
@@ -0,0 +1,33 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return _CommonChecks(input_api, output_api)
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'devil'),
+      input_api.os_path.join(catapult_dir, 'telemetry'),
+      input_api.os_path.join(catapult_dir, 'tracing'),
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+  ]
diff --git a/catapult/systrace/bin/OWNERS b/catapult/systrace/bin/OWNERS
new file mode 100644
index 0000000..c288ed6
--- /dev/null
+++ b/catapult/systrace/bin/OWNERS
@@ -0,0 +1,4 @@
+per-file adb_profile_chrome=file://systrace/profile_chrome/OWNERS
+per-file adb_profile_chrome_startup=file://systrace/profile_chrome/OWNERS
+
+per-file systrace=file://systrace/systrace/OWNERS
diff --git a/catapult/systrace/bin/adb_profile_chrome b/catapult/systrace/bin/adb_profile_chrome
new file mode 100755
index 0000000..5231c8f
--- /dev/null
+++ b/catapult/systrace/bin/adb_profile_chrome
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+if __name__ == '__main__':
+  systrace_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+  sys.path.append(systrace_path)
+  from profile_chrome import main
+  sys.exit(main.main())
diff --git a/catapult/systrace/bin/adb_profile_chrome_startup b/catapult/systrace/bin/adb_profile_chrome_startup
new file mode 100755
index 0000000..7b1d860
--- /dev/null
+++ b/catapult/systrace/bin/adb_profile_chrome_startup
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import optparse
+import os
+import sys
+import webbrowser
+
+_SYSTRACE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+sys.path.append(_SYSTRACE_DIR)
+
+from profile_chrome import chrome_startup_controller
+from profile_chrome import controllers
+from profile_chrome import flags
+from profile_chrome import profiler
+from profile_chrome import systrace_controller
+from profile_chrome import ui
+
+_CATAPULT_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..')
+sys.path.append(os.path.join(_CATAPULT_DIR, 'devil'))
+
+from devil.android import device_utils
+
+
+def _CreateOptionParser():
+  parser = optparse.OptionParser(description='Record about://tracing profiles '
+                                 'from Android browsers startup, combined with '
+                                 'Android systrace. See http://dev.chromium.org'
+                                 '/developers/how-tos/trace-event-profiling-'
+                                 'tool for detailed instructions for '
+                                 'profiling.')
+  parser.add_option('--url', help='URL to visit on startup. Default: '
+                    'https://www.google.com. An empty URL launches Chrome with'
+                    ' a MAIN action instead of VIEW.',
+                    default='https://www.google.com', metavar='URL')
+  parser.add_option('--cold', help='Flush the OS page cache before starting the'
+                    ' browser. Note that this require a device with root '
+                    'access.', default=False, action='store_true')
+  parser.add_option_group(flags.SystraceOptions(parser))
+  parser.add_option_group(flags.OutputOptions(parser))
+
+  browsers = sorted(profiler.GetSupportedBrowsers().keys())
+  parser.add_option('-b', '--browser', help='Select among installed browsers. '
+                    'One of ' + ', '.join(browsers) + ', "stable" is used by '
+                    'default.', type='choice', choices=browsers,
+                    default='stable')
+  parser.add_option('-v', '--verbose', help='Verbose logging.',
+                    action='store_true')
+  parser.add_option('-z', '--compress', help='Compress the resulting trace '
+                    'with gzip. ', action='store_true')
+  parser.add_option('-t', '--time', help='Stops tracing after N seconds, 0 to '
+                    'manually stop (startup trace ends after at most 5s).',
+                    default=5, metavar='N', type='int')
+  return parser
+
+
+def main():
+  parser = _CreateOptionParser()
+  options, _ = parser.parse_args()
+
+  if options.verbose:
+    logging.getLogger().setLevel(logging.DEBUG)
+
+  devices = device_utils.DeviceUtils.HealthyDevices()
+  if len(devices) != 1:
+    logging.error('Exactly 1 device must be attached.')
+    return 1
+  device = devices[0]
+  package_info = profiler.GetSupportedBrowsers()[options.browser]
+
+  if options.systrace_categories in ['list', 'help']:
+    ui.PrintMessage('\n'.join(
+        systrace_controller.SystraceController.GetCategories(device)))
+    return 0
+  systrace_categories = (options.systrace_categories.split(',')
+                         if options.systrace_categories else [])
+  enabled_controllers = []
+  # Enable the systrace and chrome controller. The systrace controller should go
+  # first because otherwise the resulting traces miss early systrace data.
+  if systrace_categories:
+    enabled_controllers.append(systrace_controller.SystraceController(
+        device, systrace_categories, False))
+  enabled_controllers.append(
+      chrome_startup_controller.ChromeStartupTracingController(
+          device, package_info, options.cold, options.url))
+  if options.output:
+    options.output = os.path.expanduser(options.output)
+  result = profiler.CaptureProfile(enabled_controllers,
+                                   options.time,
+                                   output=options.output,
+                                   compress=options.compress,
+                                   write_json=options.json)
+  if options.view:
+    if sys.platform == 'darwin':
+      os.system('/usr/bin/open %s' % os.path.abspath(result))
+    else:
+      webbrowser.open(result)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/systrace/bin/run_tests b/catapult/systrace/bin/run_tests
index 6578a8b..d92825f 100755
--- a/catapult/systrace/bin/run_tests
+++ b/catapult/systrace/bin/run_tests
@@ -10,13 +10,15 @@
 import sys
 import unittest
 
-_CATAPULT = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+_SYSTRACE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.path.pardir))
 
 def main():
-  systrace_package_path = os.path.join(_CATAPULT, 'systrace', 'systrace')
+  systrace_package_path = os.path.join(_SYSTRACE_DIR, 'systrace')
   suite = unittest.TestLoader().discover(
-      systrace_package_path, pattern = '*_unittest.py')
+      systrace_package_path,
+      pattern = '*_unittest.py',
+      top_level_dir=_SYSTRACE_DIR)
   result = unittest.TextTestRunner(verbosity=2).run(suite)
   if result.wasSuccessful():
     sys.exit(0)
diff --git a/catapult/systrace/bin/systrace b/catapult/systrace/bin/systrace
new file mode 100755
index 0000000..d1fcab2
--- /dev/null
+++ b/catapult/systrace/bin/systrace
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+_SYSTRACE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.path.pardir))
+sys.path.insert(0, _SYSTRACE_DIR)
+from systrace import systrace
+
+if __name__ == '__main__':
+  sys.exit(systrace.main())
diff --git a/catapult/systrace/profile_chrome/OWNERS b/catapult/systrace/profile_chrome/OWNERS
new file mode 100644
index 0000000..51a6670
--- /dev/null
+++ b/catapult/systrace/profile_chrome/OWNERS
@@ -0,0 +1,2 @@
+skyostil@chromium.org
+zhenw@chromium.org
diff --git a/catapult/systrace/profile_chrome/__init__.py b/catapult/systrace/profile_chrome/__init__.py
new file mode 100644
index 0000000..e09d9ac
--- /dev/null
+++ b/catapult/systrace/profile_chrome/__init__.py
@@ -0,0 +1,10 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+_CATAPULT_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..')
+sys.path.append(os.path.join(_CATAPULT_DIR, 'devil'))
diff --git a/catapult/systrace/profile_chrome/chrome_controller.py b/catapult/systrace/profile_chrome/chrome_controller.py
new file mode 100644
index 0000000..5807ac5
--- /dev/null
+++ b/catapult/systrace/profile_chrome/chrome_controller.py
@@ -0,0 +1,115 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import re
+import time
+
+from profile_chrome import controllers
+
+from devil.android import device_errors
+from devil.android.sdk import intent
+
+
+_HEAP_PROFILE_MMAP_PROPERTY = 'heapprof.mmap'
+
+class ChromeTracingController(controllers.BaseController):
+  def __init__(self, device, package_info,
+               categories, ring_buffer, trace_memory=False):
+    controllers.BaseController.__init__(self)
+    self._device = device
+    self._package_info = package_info
+    self._categories = categories
+    self._ring_buffer = ring_buffer
+    self._logcat_monitor = self._device.GetLogcatMonitor()
+    self._trace_file = None
+    self._trace_interval = None
+    self._trace_memory = trace_memory
+    self._is_tracing = False
+    self._trace_start_re = \
+       re.compile(r'Logging performance trace to file')
+    self._trace_finish_re = \
+       re.compile(r'Profiler finished[.] Results are in (.*)[.]')
+
+  def __repr__(self):
+    return 'chrome trace'
+
+  @staticmethod
+  def GetCategories(device, package_info):
+    with device.GetLogcatMonitor() as logmon:
+      device.BroadcastIntent(intent.Intent(
+          action='%s.GPU_PROFILER_LIST_CATEGORIES' % package_info.package))
+      try:
+        json_category_list = logmon.WaitFor(
+            re.compile(r'{"traceCategoriesList(.*)'), timeout=5).group(0)
+      except device_errors.CommandTimeoutError:
+        raise RuntimeError('Performance trace category list marker not found. '
+                           'Is the correct version of the browser running?')
+
+    record_categories = set()
+    disabled_by_default_categories = set()
+    json_data = json.loads(json_category_list)['traceCategoriesList']
+    for item in json_data:
+      for category in item.split(','):
+        if category.startswith('disabled-by-default'):
+          disabled_by_default_categories.add(category)
+        else:
+          record_categories.add(category)
+
+    return list(record_categories), list(disabled_by_default_categories)
+
+  def StartTracing(self, interval):
+    self._trace_interval = interval
+    self._logcat_monitor.Start()
+    start_extras = {'categories': ','.join(self._categories)}
+    if self._ring_buffer:
+      start_extras['continuous'] = None
+    self._device.BroadcastIntent(intent.Intent(
+        action='%s.GPU_PROFILER_START' % self._package_info.package,
+        extras=start_extras))
+
+    if self._trace_memory:
+      self._device.EnableRoot()
+      self._device.SetProp(_HEAP_PROFILE_MMAP_PROPERTY, 1)
+
+    # Chrome logs two different messages related to tracing:
+    #
+    # 1. "Logging performance trace to file"
+    # 2. "Profiler finished. Results are in [...]"
+    #
+    # The first one is printed when tracing starts and the second one indicates
+    # that the trace file is ready to be pulled.
+    try:
+      self._logcat_monitor.WaitFor(self._trace_start_re, timeout=5)
+      self._is_tracing = True
+    except device_errors.CommandTimeoutError:
+      raise RuntimeError(
+          'Trace start marker not found. Possible causes: 1) Is the correct '
+          'version of the browser running? 2) Is the browser already launched?')
+
+  def StopTracing(self):
+    if self._is_tracing:
+      self._device.BroadcastIntent(intent.Intent(
+          action='%s.GPU_PROFILER_STOP' % self._package_info.package))
+      self._trace_file = self._logcat_monitor.WaitFor(
+          self._trace_finish_re, timeout=120).group(1)
+      self._is_tracing = False
+    if self._trace_memory:
+      self._device.SetProp(_HEAP_PROFILE_MMAP_PROPERTY, 0)
+
+  def PullTrace(self):
+    # Wait a bit for the browser to finish writing the trace file.
+    time.sleep(self._trace_interval / 4 + 1)
+
+    trace_file = self._trace_file.replace('/storage/emulated/0/', '/sdcard/')
+    host_file = os.path.join(os.path.curdir, os.path.basename(trace_file))
+    try:
+      self._device.PullFile(trace_file, host_file)
+    except device_errors.AdbCommandFailedError:
+      raise RuntimeError(
+          'Cannot pull the trace file. Have you granted Storage permission to '
+          'the browser? (Android Settings -> Apps -> [the browser app] -> '
+          'Permissions -> Storage)')
+    return host_file
diff --git a/catapult/systrace/profile_chrome/chrome_controller_unittest.py b/catapult/systrace/profile_chrome/chrome_controller_unittest.py
new file mode 100644
index 0000000..1ef791c
--- /dev/null
+++ b/catapult/systrace/profile_chrome/chrome_controller_unittest.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import json
+
+from profile_chrome import chrome_controller
+from profile_chrome import controllers_unittest
+
+
+class ChromeControllerTest(controllers_unittest.BaseControllerTest):
+  def testGetCategories(self):
+    categories = \
+        chrome_controller.ChromeTracingController.GetCategories(
+            self.device, self.package_info)
+
+    self.assertEquals(len(categories), 2)
+    self.assertTrue(categories[0])
+    self.assertTrue(categories[1])
+
+  def testTracing(self):
+    categories = '*'
+    ring_buffer = False
+    controller = chrome_controller.ChromeTracingController(self.device,
+                                                           self.package_info,
+                                                           categories,
+                                                           ring_buffer)
+
+    interval = 1
+    try:
+      controller.StartTracing(interval)
+    finally:
+      controller.StopTracing()
+
+    result = controller.PullTrace()
+    try:
+      with open(result) as f:
+        json.loads(f.read())
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/chrome_startup_controller.py b/catapult/systrace/profile_chrome/chrome_startup_controller.py
new file mode 100644
index 0000000..f048a36
--- /dev/null
+++ b/catapult/systrace/profile_chrome/chrome_startup_controller.py
@@ -0,0 +1,72 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import time
+
+from devil.android import flag_changer
+from devil.android.perf import cache_control
+from devil.android.sdk import intent
+
+from profile_chrome import controllers
+
+class ChromeStartupTracingController(controllers.BaseController):
+  def __init__(self, device, package_info, cold, url):
+    self._device = device
+    self._package_info = package_info
+    self._cold = cold
+    self._logcat_monitor = self._device.GetLogcatMonitor()
+    self._url = url
+    self._trace_file = None
+    self._trace_finish_re = re.compile(r' Completed startup tracing to (.*)')
+    self._flag_changer = flag_changer.FlagChanger(
+      self._device, self._package_info.cmdline_file)
+
+  def __repr__(self):
+    return 'Browser Startup Trace'
+
+  def _SetupTracing(self):
+    # TODO(lizeb): Figure out how to clean up the command-line file when
+    # _TearDownTracing() is not executed in StopTracing().
+    self._flag_changer.AddFlags(['--trace-startup'])
+    self._device.ForceStop(self._package_info.package)
+    if self._cold:
+      self._device.EnableRoot()
+      cache_control.CacheControl(self._device).DropRamCaches()
+    launch_intent = None
+    if self._url == '':
+      launch_intent = intent.Intent(
+          action='android.intent.action.MAIN',
+          package=self._package_info.package,
+          activity=self._package_info.activity)
+    else:
+      launch_intent = intent.Intent(
+          package=self._package_info.package,
+          activity=self._package_info.activity,
+          data=self._url,
+          extras={'create_new_tab': True})
+    self._device.StartActivity(launch_intent, blocking=True)
+
+  def _TearDownTracing(self):
+    self._flag_changer.Restore()
+
+  def StartTracing(self, interval):  # pylint: disable=unused-argument
+    self._SetupTracing()
+    self._logcat_monitor.Start()
+
+  def StopTracing(self):
+    try:
+      self._trace_file = self._logcat_monitor.WaitFor(
+          self._trace_finish_re).group(1)
+    finally:
+      self._TearDownTracing()
+
+  def PullTrace(self):
+    # Wait a bit for the browser to finish writing the trace file.
+    time.sleep(3)
+    trace_file = self._trace_file.replace('/storage/emulated/0/', '/sdcard/')
+    host_file = os.path.join(os.path.curdir, os.path.basename(trace_file))
+    self._device.PullFile(trace_file, host_file)
+    return host_file
diff --git a/catapult/systrace/profile_chrome/chrome_startup_controller_unittest.py b/catapult/systrace/profile_chrome/chrome_startup_controller_unittest.py
new file mode 100644
index 0000000..8d2d743
--- /dev/null
+++ b/catapult/systrace/profile_chrome/chrome_startup_controller_unittest.py
@@ -0,0 +1,28 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import json
+
+from profile_chrome import chrome_startup_controller
+from profile_chrome import controllers_unittest
+
+
+class ChromeControllerTest(controllers_unittest.BaseControllerTest):
+  def testTracing(self):
+    controller = chrome_startup_controller.ChromeStartupTracingController(
+        self.device, self.package_info, False, 'https://www.google.com')
+
+    interval = 1
+    try:
+      controller.StartTracing(interval)
+    finally:
+      controller.StopTracing()
+
+    result = controller.PullTrace()
+    try:
+      with open(result) as f:
+        json.loads(f.read())
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/controllers.py b/catapult/systrace/profile_chrome/controllers.py
new file mode 100644
index 0000000..156818b
--- /dev/null
+++ b/catapult/systrace/profile_chrome/controllers.py
@@ -0,0 +1,17 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import exceptions
+
+
+# pylint: disable=R0201
+class BaseController(object):
+  def StartTracing(self, _):
+    raise exceptions.NotImplementedError
+
+  def StopTracing(self):
+    raise exceptions.NotImplementedError
+
+  def PullTrace(self):
+    raise exceptions.NotImplementedError
diff --git a/catapult/systrace/profile_chrome/controllers_unittest.py b/catapult/systrace/profile_chrome/controllers_unittest.py
new file mode 100644
index 0000000..bcfa676
--- /dev/null
+++ b/catapult/systrace/profile_chrome/controllers_unittest.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from profile_chrome import profiler
+
+from devil.android import device_utils
+from devil.android.sdk import intent
+
+
+class BaseControllerTest(unittest.TestCase):
+  def setUp(self):
+    devices = device_utils.DeviceUtils.HealthyDevices()
+    self.browser = 'stable'
+    self.package_info = profiler.GetSupportedBrowsers()[self.browser]
+    self.device = devices[0]
+
+    self.device.ForceStop(self.package_info.package)
+    self.device.StartActivity(
+        intent.Intent(activity=self.package_info.activity,
+                      package=self.package_info.package),
+        blocking=True)
diff --git a/catapult/systrace/profile_chrome/ddms_controller.py b/catapult/systrace/profile_chrome/ddms_controller.py
new file mode 100644
index 0000000..3f26297
--- /dev/null
+++ b/catapult/systrace/profile_chrome/ddms_controller.py
@@ -0,0 +1,54 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import time
+
+from profile_chrome import controllers
+from profile_chrome import util
+
+_DDMS_SAMPLING_FREQUENCY_US = 100
+
+
+class DdmsController(controllers.BaseController):
+  def __init__(self, device, package_info):
+    controllers.BaseController.__init__(self)
+    self._device = device
+    self._package = package_info.package
+    self._output_file = None
+    self._supports_sampling = self._SupportsSampling()
+
+  def __repr__(self):
+    return 'ddms profile'
+
+  def _SupportsSampling(self):
+    for line in self._device.RunShellCommand('am --help'):
+      if re.match(r'.*am profile start.*--sampling', line):
+        return True
+    return False
+
+  def StartTracing(self, _):
+    self._output_file = (
+        '/data/local/tmp/ddms-profile-%s' % util.GetTraceTimestamp())
+    cmd = 'am profile start '
+    if self._supports_sampling:
+      cmd += '--sampling %d ' % _DDMS_SAMPLING_FREQUENCY_US
+    cmd += '%s %s' % (self._package, self._output_file)
+    self._device.RunShellCommand(cmd)
+
+  def StopTracing(self):
+    self._device.RunShellCommand('am profile stop %s' % self._package)
+
+  def PullTrace(self):
+    if not self._output_file:
+      return None
+
+    # Wait for the trace file to get written.
+    time.sleep(1)
+
+    host_file = os.path.join(
+        os.path.curdir, os.path.basename(self._output_file))
+    self._device.PullFile(self._output_file, host_file)
+    return host_file
diff --git a/catapult/systrace/profile_chrome/ddms_controller_unittest.py b/catapult/systrace/profile_chrome/ddms_controller_unittest.py
new file mode 100644
index 0000000..a9b5aae
--- /dev/null
+++ b/catapult/systrace/profile_chrome/ddms_controller_unittest.py
@@ -0,0 +1,26 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from profile_chrome import controllers_unittest
+from profile_chrome import ddms_controller
+
+
+class DdmsControllerTest(controllers_unittest.BaseControllerTest):
+  def testTracing(self):
+    controller = ddms_controller.DdmsController(self.device, self.package_info)
+
+    interval = 1
+    try:
+      controller.StartTracing(interval)
+    finally:
+      controller.StopTracing()
+
+    result = controller.PullTrace()
+    try:
+      with open(result) as f:
+        self.assertTrue(f.read().startswith('*version'))
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/flags.py b/catapult/systrace/profile_chrome/flags.py
new file mode 100644
index 0000000..dfbee5c
--- /dev/null
+++ b/catapult/systrace/profile_chrome/flags.py
@@ -0,0 +1,26 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+
+def SystraceOptions(parser):
+  systrace_opts = optparse.OptionGroup(parser, 'Systrace tracing options')
+  systrace_opts.add_option('-s', '--systrace', help='Capture a systrace with '
+                           'the chosen comma-delimited systrace categories. You'
+                           ' can also capture a combined Chrome + systrace by '
+                           'enabling both types of categories. Use "list" to '
+                           'see the available categories. Systrace is disabled '
+                           'by default.', metavar='SYS_CATEGORIES',
+                           dest='systrace_categories', default='')
+  return systrace_opts
+
+
+def OutputOptions(parser):
+  output_options = optparse.OptionGroup(parser, 'Output options')
+  output_options.add_option('-o', '--output', help='Save trace output to file.')
+  output_options.add_option('--json', help='Save trace as raw JSON instead of '
+                            'HTML.', action='store_true')
+  output_options.add_option('--view', help='Open resulting trace file in a '
+                            'browser.', action='store_true')
+  return output_options
diff --git a/catapult/systrace/profile_chrome/main.py b/catapult/systrace/profile_chrome/main.py
new file mode 100755
index 0000000..a731120
--- /dev/null
+++ b/catapult/systrace/profile_chrome/main.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import optparse
+import os
+import sys
+import webbrowser
+
+from profile_chrome import chrome_controller
+from profile_chrome import ddms_controller
+from profile_chrome import flags
+from profile_chrome import perf_controller
+from profile_chrome import profiler
+from profile_chrome import systrace_controller
+from profile_chrome import ui
+
+from devil.android import device_utils
+
+
+_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
+
+
+def _ComputeChromeCategories(options):
+  categories = []
+  if options.trace_frame_viewer:
+    categories.append('disabled-by-default-cc.debug')
+  if options.trace_ubercompositor:
+    categories.append('disabled-by-default-cc.debug*')
+  if options.trace_gpu:
+    categories.append('disabled-by-default-gpu.debug*')
+  if options.trace_flow:
+    categories.append('disabled-by-default-toplevel.flow')
+  if options.trace_memory:
+    categories.append('disabled-by-default-memory')
+  if options.trace_scheduler:
+    categories.append('disabled-by-default-blink.scheduler')
+    categories.append('disabled-by-default-cc.debug.scheduler')
+    categories.append('disabled-by-default-renderer.scheduler')
+  if options.chrome_categories:
+    categories += options.chrome_categories.split(',')
+  return categories
+
+
+def _ComputeSystraceCategories(options):
+  if not options.systrace_categories:
+    return []
+  return options.systrace_categories.split(',')
+
+
+def _ComputePerfCategories(options):
+  if not perf_controller.PerfProfilerController.IsSupported():
+    return []
+  if not options.perf_categories:
+    return []
+  return options.perf_categories.split(',')
+
+
+def _OptionalValueCallback(default_value):
+  def callback(option, _, __, parser):  # pylint: disable=unused-argument
+    value = default_value
+    if parser.rargs and not parser.rargs[0].startswith('-'):
+      value = parser.rargs.pop(0)
+    setattr(parser.values, option.dest, value)
+  return callback
+
+
+def _CreateOptionParser():
+  parser = optparse.OptionParser(description='Record about://tracing profiles '
+                                 'from Android browsers. See http://dev.'
+                                 'chromium.org/developers/how-tos/trace-event-'
+                                 'profiling-tool for detailed instructions for '
+                                 'profiling.')
+
+  timed_options = optparse.OptionGroup(parser, 'Timed tracing')
+  timed_options.add_option('-t', '--time', help='Profile for N seconds and '
+                          'download the resulting trace.', metavar='N',
+                           type='float')
+  parser.add_option_group(timed_options)
+
+  cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
+  cont_options.add_option('--continuous', help='Profile continuously until '
+                          'stopped.', action='store_true')
+  cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
+                          'ring buffer and save its contents when stopping '
+                          'instead of appending events into one long trace.',
+                          action='store_true')
+  parser.add_option_group(cont_options)
+
+  chrome_opts = optparse.OptionGroup(parser, 'Chrome tracing options')
+  chrome_opts.add_option('-c', '--categories', help='Select Chrome tracing '
+                         'categories with comma-delimited wildcards, '
+                         'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
+                         'Chrome\'s default categories. Chrome tracing can be '
+                         'disabled with "--categories=\'\'". Use "list" to '
+                         'see the available categories.',
+                         metavar='CHROME_CATEGORIES', dest='chrome_categories',
+                         default=_DEFAULT_CHROME_CATEGORIES)
+  chrome_opts.add_option('--trace-cc',
+                         help='Deprecated, use --trace-frame-viewer.',
+                         action='store_true')
+  chrome_opts.add_option('--trace-frame-viewer',
+                         help='Enable enough trace categories for '
+                         'compositor frame viewing.', action='store_true')
+  chrome_opts.add_option('--trace-ubercompositor',
+                         help='Enable enough trace categories for '
+                         'ubercompositor frame data.', action='store_true')
+  chrome_opts.add_option('--trace-gpu', help='Enable extra trace categories '
+                         'for GPU data.', action='store_true')
+  chrome_opts.add_option('--trace-flow', help='Enable extra trace categories '
+                         'for IPC message flows.', action='store_true')
+  chrome_opts.add_option('--trace-memory', help='Enable extra trace categories '
+                         'for memory profile. (tcmalloc required)',
+                         action='store_true')
+  chrome_opts.add_option('--trace-scheduler', help='Enable extra trace '
+                         'categories for scheduler state',
+                         action='store_true')
+  parser.add_option_group(chrome_opts)
+
+  parser.add_option_group(flags.SystraceOptions(parser))
+
+  if perf_controller.PerfProfilerController.IsSupported():
+    perf_opts = optparse.OptionGroup(parser, 'Perf profiling options')
+    perf_opts.add_option('-p', '--perf', help='Capture a perf profile with '
+                         'the chosen comma-delimited event categories. '
+                         'Samples CPU cycles by default. Use "list" to see '
+                         'the available sample types.', action='callback',
+                         default='', callback=_OptionalValueCallback('cycles'),
+                         metavar='PERF_CATEGORIES', dest='perf_categories')
+    parser.add_option_group(perf_opts)
+
+  ddms_options = optparse.OptionGroup(parser, 'Java tracing')
+  ddms_options.add_option('--ddms', help='Trace Java execution using DDMS '
+                          'sampling.', action='store_true')
+  parser.add_option_group(ddms_options)
+
+  parser.add_option_group(flags.OutputOptions(parser))
+
+  browsers = sorted(profiler.GetSupportedBrowsers().keys())
+  parser.add_option('-b', '--browser', help='Select among installed browsers. '
+                    'One of ' + ', '.join(browsers) + ', "stable" is used by '
+                    'default.', type='choice', choices=browsers,
+                    default='stable')
+  parser.add_option('-v', '--verbose', help='Verbose logging.',
+                    action='store_true')
+  parser.add_option('-z', '--compress', help='Compress the resulting trace '
+                    'with gzip. ', action='store_true')
+  parser.add_option('-d', '--device', help='The Android device ID to use, '
+                    'defaults to the value of ANDROID_SERIAL environment '
+                    'variable. If not specified, only 0 or 1 connected '
+                    'devices are supported.',
+                    default=os.environ.get('ANDROID_SERIAL', None))
+  return parser
+
+
+def main():
+  parser = _CreateOptionParser()
+  options, _args = parser.parse_args()  # pylint: disable=unused-variable
+  if options.trace_cc:
+    parser.error("""--trace-cc is deprecated.
+
+For basic jank busting uses, use  --trace-frame-viewer
+For detailed study of ubercompositor, pass --trace-ubercompositor.
+
+When in doubt, just try out --trace-frame-viewer.
+""")
+
+  if options.verbose:
+    logging.getLogger().setLevel(logging.DEBUG)
+
+  devices = device_utils.DeviceUtils.HealthyDevices()
+  device = None
+  if options.device:
+    device = next((d for d in devices if d == options.device), None)
+  elif len(devices) == 1:
+    device = devices[0]
+
+  if not device:
+    parser.error('Use -d/--device to select a device:\n' + '\n'.join(devices))
+  package_info = profiler.GetSupportedBrowsers()[options.browser]
+
+  if options.chrome_categories in ['list', 'help']:
+    ui.PrintMessage('Collecting record categories list...', eol='')
+    record_categories = []
+    disabled_by_default_categories = []
+    record_categories, disabled_by_default_categories = \
+        chrome_controller.ChromeTracingController.GetCategories(
+            device, package_info)
+
+    ui.PrintMessage('done')
+    ui.PrintMessage('Record Categories:')
+    ui.PrintMessage('\n'.join('\t%s' % item \
+        for item in sorted(record_categories)))
+
+    ui.PrintMessage('\nDisabled by Default Categories:')
+    ui.PrintMessage('\n'.join('\t%s' % item \
+        for item in sorted(disabled_by_default_categories)))
+
+    return 0
+
+  if options.systrace_categories in ['list', 'help']:
+    ui.PrintMessage('\n'.join(
+        systrace_controller.SystraceController.GetCategories(device)))
+    return 0
+
+  if (perf_controller.PerfProfilerController.IsSupported() and
+      options.perf_categories in ['list', 'help']):
+    ui.PrintMessage('\n'.join(
+        perf_controller.PerfProfilerController.GetCategories(device)))
+    return 0
+
+  if not options.time and not options.continuous:
+    ui.PrintMessage('Time interval or continuous tracing should be specified.')
+    return 1
+
+  chrome_categories = _ComputeChromeCategories(options)
+  systrace_categories = _ComputeSystraceCategories(options)
+  perf_categories = _ComputePerfCategories(options)
+
+  if chrome_categories and 'webview' in systrace_categories:
+    logging.warning('Using the "webview" category in systrace together with '
+                    'Chrome tracing results in duplicate trace events.')
+
+  enabled_controllers = []
+  if chrome_categories:
+    enabled_controllers.append(
+        chrome_controller.ChromeTracingController(device,
+                                                  package_info,
+                                                  chrome_categories,
+                                                  options.ring_buffer,
+                                                  options.trace_memory))
+  if systrace_categories:
+    enabled_controllers.append(
+        systrace_controller.SystraceController(device,
+                                               systrace_categories,
+                                               options.ring_buffer))
+
+  if perf_categories:
+    enabled_controllers.append(
+        perf_controller.PerfProfilerController(device,
+                                               perf_categories))
+
+  if options.ddms:
+    enabled_controllers.append(
+        ddms_controller.DdmsController(device,
+                                       package_info))
+
+  if not enabled_controllers:
+    ui.PrintMessage('No trace categories enabled.')
+    return 1
+
+  if options.output:
+    options.output = os.path.expanduser(options.output)
+  result = profiler.CaptureProfile(
+      enabled_controllers,
+      options.time if not options.continuous else 0,
+      output=options.output,
+      compress=options.compress,
+      write_json=options.json)
+  if options.view:
+    if sys.platform == 'darwin':
+      os.system('/usr/bin/open %s' % os.path.abspath(result))
+    else:
+      webbrowser.open(result)
diff --git a/catapult/systrace/profile_chrome/perf_controller.py b/catapult/systrace/profile_chrome/perf_controller.py
new file mode 100644
index 0000000..8afe6f3
--- /dev/null
+++ b/catapult/systrace/profile_chrome/perf_controller.py
@@ -0,0 +1,189 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import signal
+import subprocess
+import sys
+import tempfile
+
+from devil.android import device_temp_file
+from devil.android.perf import perf_control
+
+from profile_chrome import controllers
+from profile_chrome import ui
+
+_CATAPULT_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..')
+sys.path.append(os.path.join(_CATAPULT_DIR, 'telemetry'))
+try:
+  # pylint: disable=F0401
+  from telemetry.internal.platform.profiler import android_profiling_helper
+  from telemetry.internal.util import binary_manager
+except ImportError:
+  android_profiling_helper = None
+  binary_manager = None
+
+
+_PERF_OPTIONS = [
+    # Sample across all processes and CPUs to so that the current CPU gets
+    # recorded to each sample.
+    '--all-cpus',
+    # In perf 3.13 --call-graph requires an argument, so use the -g short-hand
+    # which does not.
+    '-g',
+    # Increase priority to avoid dropping samples. Requires root.
+    '--realtime', '80',
+    # Record raw samples to get CPU information.
+    '--raw-samples',
+    # Increase sampling frequency for better coverage.
+    '--freq', '2000',
+]
+
+
+class _PerfProfiler(object):
+  def __init__(self, device, perf_binary, categories):
+    self._device = device
+    self._output_file = device_temp_file.DeviceTempFile(
+        self._device.adb, prefix='perf_output')
+    self._log_file = tempfile.TemporaryFile()
+
+    # TODO(jbudorick) Look at providing a way to unhandroll this once the
+    #                 adb rewrite has fully landed.
+    device_param = (['-s', str(self._device)] if str(self._device) else [])
+    cmd = ['adb'] + device_param + \
+          ['shell', perf_binary, 'record',
+           '--output', self._output_file.name] + _PERF_OPTIONS
+    if categories:
+      cmd += ['--event', ','.join(categories)]
+    self._perf_control = perf_control.PerfControl(self._device)
+    self._perf_control.SetPerfProfilingMode()
+    self._perf_process = subprocess.Popen(cmd,
+                                          stdout=self._log_file,
+                                          stderr=subprocess.STDOUT)
+
+  def SignalAndWait(self):
+    self._device.KillAll('perf', signum=signal.SIGINT)
+    self._perf_process.wait()
+    self._perf_control.SetDefaultPerfMode()
+
+  def _FailWithLog(self, msg):
+    self._log_file.seek(0)
+    log = self._log_file.read()
+    raise RuntimeError('%s. Log output:\n%s' % (msg, log))
+
+  def PullResult(self, output_path):
+    if not self._device.FileExists(self._output_file.name):
+      self._FailWithLog('Perf recorded no data')
+
+    perf_profile = os.path.join(output_path,
+                                os.path.basename(self._output_file.name))
+    self._device.PullFile(self._output_file.name, perf_profile)
+    if not os.stat(perf_profile).st_size:
+      os.remove(perf_profile)
+      self._FailWithLog('Perf recorded a zero-sized file')
+
+    self._log_file.close()
+    self._output_file.close()
+    return perf_profile
+
+
+class PerfProfilerController(controllers.BaseController):
+  def __init__(self, device, categories):
+    controllers.BaseController.__init__(self)
+    self._device = device
+    self._categories = categories
+    self._perf_binary = self._PrepareDevice(device)
+    self._perf_instance = None
+
+  def __repr__(self):
+    return 'perf profile'
+
+  @staticmethod
+  def IsSupported():
+    return bool(android_profiling_helper)
+
+  @staticmethod
+  def _PrepareDevice(device):
+    if not 'BUILDTYPE' in os.environ:
+      os.environ['BUILDTYPE'] = 'Release'
+    if binary_manager.NeedsInit():
+      binary_manager.InitDependencyManager(None)
+    return android_profiling_helper.PrepareDeviceForPerf(device)
+
+  @classmethod
+  def GetCategories(cls, device):
+    perf_binary = cls._PrepareDevice(device)
+    return device.RunShellCommand('%s list' % perf_binary)
+
+  def StartTracing(self, _):
+    self._perf_instance = _PerfProfiler(self._device,
+                                        self._perf_binary,
+                                        self._categories)
+
+  def StopTracing(self):
+    if not self._perf_instance:
+      return
+    self._perf_instance.SignalAndWait()
+
+  @staticmethod
+  def _GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir,
+                                 required_libs, kallsyms):
+    cmd = '%s report -n -i %s --symfs %s --kallsyms %s' % (
+        os.path.relpath(perfhost_path, '.'), perf_profile, symfs_dir, kallsyms)
+    for lib in required_libs:
+      lib = os.path.join(symfs_dir, lib[1:])
+      if not os.path.exists(lib):
+        continue
+      objdump_path = android_profiling_helper.GetToolchainBinaryPath(
+          lib, 'objdump')
+      if objdump_path:
+        cmd += ' --objdump %s' % os.path.relpath(objdump_path, '.')
+        break
+    return cmd
+
+  def PullTrace(self):
+    symfs_dir = os.path.join(tempfile.gettempdir(),
+                             os.path.expandvars('$USER-perf-symfs'))
+    if not os.path.exists(symfs_dir):
+      os.makedirs(symfs_dir)
+    required_libs = set()
+
+    # Download the recorded perf profile.
+    perf_profile = self._perf_instance.PullResult(symfs_dir)
+    required_libs = \
+        android_profiling_helper.GetRequiredLibrariesForPerfProfile(
+            perf_profile)
+    if not required_libs:
+      logging.warning('No libraries required by perf trace. Most likely there '
+                      'are no samples in the trace.')
+
+    # Build a symfs with all the necessary libraries.
+    kallsyms = android_profiling_helper.CreateSymFs(self._device,
+                                                    symfs_dir,
+                                                    required_libs,
+                                                    use_symlinks=False)
+    perfhost_path = binary_manager.FetchPath(
+        android_profiling_helper.GetPerfhostName(), 'x86_64', 'linux')
+
+    ui.PrintMessage('\nNote: to view the profile in perf, run:')
+    ui.PrintMessage('  ' + self._GetInteractivePerfCommand(perfhost_path,
+        perf_profile, symfs_dir, required_libs, kallsyms))
+
+    # Convert the perf profile into JSON.
+    perf_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+                                    'third_party', 'perf_to_tracing.py')
+    json_file_name = os.path.basename(perf_profile)
+    with open(os.devnull, 'w') as dev_null, \
+        open(json_file_name, 'w') as json_file:
+      cmd = [perfhost_path, 'script', '-s', perf_script_path, '-i',
+             perf_profile, '--symfs', symfs_dir, '--kallsyms', kallsyms]
+      if subprocess.call(cmd, stdout=json_file, stderr=dev_null):
+        logging.warning('Perf data to JSON conversion failed. The result will '
+                        'not contain any perf samples. You can still view the '
+                        'perf data manually as shown above.')
+        return None
+
+    return json_file_name
diff --git a/catapult/systrace/profile_chrome/perf_controller_unittest.py b/catapult/systrace/profile_chrome/perf_controller_unittest.py
new file mode 100644
index 0000000..f6285a4
--- /dev/null
+++ b/catapult/systrace/profile_chrome/perf_controller_unittest.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import json
+
+from profile_chrome import controllers_unittest
+from profile_chrome import perf_controller
+from profile_chrome import ui
+
+
+class PerfProfilerControllerTest(controllers_unittest.BaseControllerTest):
+  def testGetCategories(self):
+    if not perf_controller.PerfProfilerController.IsSupported():
+      return
+    categories = \
+        perf_controller.PerfProfilerController.GetCategories(self.device)
+    assert 'cycles' in ' '.join(categories)
+
+  def testTracing(self):
+    if not perf_controller.PerfProfilerController.IsSupported():
+      return
+    ui.EnableTestMode()
+    categories = ['cycles']
+    controller = perf_controller.PerfProfilerController(self.device,
+                                                        categories)
+
+    interval = 1
+    try:
+      controller.StartTracing(interval)
+    finally:
+      controller.StopTracing()
+
+    result = controller.PullTrace()
+    try:
+      with open(result) as f:
+        json.loads(f.read())
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/profiler.py b/catapult/systrace/profile_chrome/profiler.py
new file mode 100644
index 0000000..dbfc06e
--- /dev/null
+++ b/catapult/systrace/profile_chrome/profiler.py
@@ -0,0 +1,88 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from devil.android.constants import chrome
+
+from profile_chrome import trace_packager
+from profile_chrome import ui
+
+
+def _StartTracing(controllers, interval):
+  for controller in controllers:
+    controller.StartTracing(interval)
+
+
+def _StopTracing(controllers):
+  for controller in controllers:
+    controller.StopTracing()
+
+
+def _PullTraces(controllers, output, compress, write_json):
+  ui.PrintMessage('Downloading...', eol='')
+  trace_files = [controller.PullTrace() for controller in controllers]
+  trace_files = [trace for trace in trace_files if trace]
+  if not trace_files:
+    ui.PrintMessage('No results')
+    return ''
+  result = trace_packager.PackageTraces(trace_files,
+                                        output=output,
+                                        compress=compress,
+                                        write_json=write_json)
+  ui.PrintMessage('done')
+  ui.PrintMessage('Trace written to file://%s' % os.path.abspath(result))
+  return result
+
+
+def GetSupportedBrowsers():
+  """Returns the package names of all supported browsers."""
+  # Add aliases for backwards compatibility.
+  supported_browsers = {
+    'stable': chrome.PACKAGE_INFO['chrome_stable'],
+    'beta': chrome.PACKAGE_INFO['chrome_beta'],
+    'dev': chrome.PACKAGE_INFO['chrome_dev'],
+    'build': chrome.PACKAGE_INFO['chrome'],
+  }
+  supported_browsers.update(chrome.PACKAGE_INFO)
+  unsupported_browsers = ['content_browsertests', 'gtest', 'legacy_browser']
+  for browser in unsupported_browsers:
+    if browser in supported_browsers:
+      del supported_browsers[browser]
+  return supported_browsers
+
+
+def CaptureProfile(controllers, interval, output=None, compress=False,
+                   write_json=False):
+  """Records a profiling trace saves the result to a file.
+
+  Args:
+    controllers: List of tracing controllers.
+    interval: Time interval to capture in seconds. An interval of None (or 0)
+        continues tracing until stopped by the user.
+    output: Output file name or None to use an automatically generated name.
+    compress: If True, the result will be compressed either with gzip or zip
+        depending on the number of captured subtraces.
+    write_json: If True, prefer JSON output over HTML.
+
+  Returns:
+    Path to saved profile.
+  """
+  trace_type = ' + '.join(map(str, controllers))
+  try:
+    _StartTracing(controllers, interval)
+    if interval:
+      ui.PrintMessage('Capturing %d-second %s. Press Enter to stop early...' % \
+          (interval, trace_type), eol='')
+      ui.WaitForEnter(interval)
+    else:
+      ui.PrintMessage('Capturing %s. Press Enter to stop...' % \
+          trace_type, eol='')
+      raw_input()
+  finally:
+    _StopTracing(controllers)
+  if interval:
+    ui.PrintMessage('done')
+
+  return _PullTraces(controllers, output, compress, write_json)
diff --git a/catapult/systrace/profile_chrome/profiler_unittest.py b/catapult/systrace/profile_chrome/profiler_unittest.py
new file mode 100644
index 0000000..ef55d70
--- /dev/null
+++ b/catapult/systrace/profile_chrome/profiler_unittest.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tempfile
+import unittest
+import zipfile
+
+from profile_chrome import profiler
+from profile_chrome import ui
+
+
+class FakeController(object):
+  def __init__(self, contents='fake-contents'):
+    self.contents = contents
+    self.interval = None
+    self.stopped = False
+    self.filename = None
+
+  def StartTracing(self, interval):
+    self.interval = interval
+
+  def StopTracing(self):
+    self.stopped = True
+
+  def PullTrace(self):
+    with tempfile.NamedTemporaryFile(delete=False) as f:
+      self.filename = f.name
+      f.write(self.contents)
+      return f.name
+
+  def __repr__(self):
+    return 'faketrace'
+
+
+class ProfilerTest(unittest.TestCase):
+  def setUp(self):
+    ui.EnableTestMode()
+
+  def testCaptureBasicProfile(self):
+    controller = FakeController()
+    interval = 1.5
+    result = profiler.CaptureProfile([controller], interval)
+
+    try:
+      self.assertEquals(controller.interval, interval)
+      self.assertTrue(controller.stopped)
+      self.assertTrue(os.path.exists(result))
+      self.assertFalse(os.path.exists(controller.filename))
+      self.assertTrue(result.endswith('.html'))
+    finally:
+      os.remove(result)
+
+  def testCaptureJsonProfile(self):
+    controller = FakeController()
+    result = profiler.CaptureProfile([controller], 1, write_json=True)
+
+    try:
+      self.assertFalse(result.endswith('.html'))
+      with open(result) as f:
+        self.assertEquals(f.read(), controller.contents)
+    finally:
+      os.remove(result)
+
+  def testCaptureMultipleProfiles(self):
+    controllers = [FakeController('c1'), FakeController('c2')]
+    result = profiler.CaptureProfile(controllers, 1, write_json=True)
+
+    try:
+      self.assertTrue(result.endswith('.zip'))
+      self.assertTrue(zipfile.is_zipfile(result))
+      with zipfile.ZipFile(result) as f:
+        self.assertEquals(
+            f.namelist(),
+            [controllers[0].filename[1:], controllers[1].filename[1:]])
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/run_tests b/catapult/systrace/profile_chrome/run_tests
new file mode 100755
index 0000000..6ae1854
--- /dev/null
+++ b/catapult/systrace/profile_chrome/run_tests
@@ -0,0 +1,3 @@
+#!/bin/sh
+cd $(dirname $0)/../
+exec python -m unittest discover profile_chrome '*_unittest.py' $@
diff --git a/catapult/systrace/profile_chrome/systrace_controller.py b/catapult/systrace/profile_chrome/systrace_controller.py
new file mode 100644
index 0000000..9386543
--- /dev/null
+++ b/catapult/systrace/profile_chrome/systrace_controller.py
@@ -0,0 +1,112 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import threading
+import zlib
+
+from devil.utils import cmd_helper
+
+from profile_chrome import controllers
+from profile_chrome import util
+
+
+_SYSTRACE_OPTIONS = [
+    # Compress the trace before sending it over USB.
+    '-z',
+    # Use a large trace buffer to increase the polling interval.
+    '-b', '16384'
+]
+
+# Interval in seconds for sampling systrace data.
+_SYSTRACE_INTERVAL = 15
+
+_TRACING_ON_PATH = '/sys/kernel/debug/tracing/tracing_on'
+
+
+class SystraceController(controllers.BaseController):
+  def __init__(self, device, categories, ring_buffer):
+    controllers.BaseController.__init__(self)
+    self._device = device
+    self._categories = categories
+    self._ring_buffer = ring_buffer
+    self._done = threading.Event()
+    self._thread = None
+    self._trace_data = None
+
+  def __repr__(self):
+    return 'systrace'
+
+  @staticmethod
+  def GetCategories(device):
+    return device.RunShellCommand('atrace --list_categories')
+
+  def StartTracing(self, _):
+    self._thread = threading.Thread(target=self._CollectData)
+    self._thread.start()
+
+  def StopTracing(self):
+    self._done.set()
+
+  def PullTrace(self):
+    self._thread.join()
+    self._thread = None
+    if self._trace_data:
+      output_name = 'systrace-%s' % util.GetTraceTimestamp()
+      with open(output_name, 'w') as out:
+        out.write(self._trace_data)
+      return output_name
+
+  def IsTracingOn(self):
+    result = self._RunAdbShellCommand(['cat', _TRACING_ON_PATH])
+    return result.strip() == '1'
+
+  def _RunAdbShellCommand(self, command):
+    # We use a separate interface to adb because the one from AndroidCommands
+    # isn't re-entrant.
+    # TODO(jbudorick) Look at providing a way to unhandroll this once the
+    #                 adb rewrite has fully landed.
+    device_param = (['-s', str(self._device)] if str(self._device) else [])
+    cmd = ['adb'] + device_param + ['shell'] + command
+    return cmd_helper.GetCmdOutput(cmd)
+
+  def _RunATraceCommand(self, command):
+    cmd = ['atrace', '--%s' % command] + _SYSTRACE_OPTIONS + self._categories
+    return self._RunAdbShellCommand(cmd)
+
+  def _ForceStopAtrace(self):
+    # atrace on pre-M Android devices cannot be stopped asynchronously
+    # correctly. Use synchronous mode to force stop.
+    cmd = ['atrace', '-t', '0']
+    return self._RunAdbShellCommand(cmd)
+
+  def _CollectData(self):
+    trace_data = []
+    self._RunATraceCommand('async_start')
+    try:
+      while not self._done.is_set():
+        self._done.wait(_SYSTRACE_INTERVAL)
+        if not self._ring_buffer or self._done.is_set():
+          trace_data.append(
+              self._DecodeTraceData(self._RunATraceCommand('async_dump')))
+    finally:
+      trace_data.append(
+          self._DecodeTraceData(self._RunATraceCommand('async_stop')))
+      if self.IsTracingOn():
+        self._ForceStopAtrace()
+    self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
+
+  @staticmethod
+  def _DecodeTraceData(trace_data):
+    try:
+      trace_start = trace_data.index('TRACE:')
+    except ValueError:
+      raise RuntimeError('Systrace start marker not found')
+    trace_data = trace_data[trace_start + 6:]
+
+    # Collapse CRLFs that are added by adb shell.
+    if trace_data.startswith('\r\n'):
+      trace_data = trace_data.replace('\r\n', '\n')
+
+    # Skip the initial newline.
+    return trace_data[1:]
diff --git a/catapult/systrace/profile_chrome/systrace_controller_unittest.py b/catapult/systrace/profile_chrome/systrace_controller_unittest.py
new file mode 100644
index 0000000..e827a08
--- /dev/null
+++ b/catapult/systrace/profile_chrome/systrace_controller_unittest.py
@@ -0,0 +1,37 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from profile_chrome import controllers_unittest
+from profile_chrome import systrace_controller
+
+
+class SystraceControllerTest(controllers_unittest.BaseControllerTest):
+  def testGetCategories(self):
+    categories = \
+        systrace_controller.SystraceController.GetCategories(self.device)
+    self.assertTrue(categories)
+    assert 'gfx' in ' '.join(categories)
+
+  def testTracing(self):
+    categories = ['gfx', 'input', 'view']
+    ring_buffer = False
+    controller = systrace_controller.SystraceController(self.device,
+                                                        categories,
+                                                        ring_buffer)
+
+    interval = 1
+    try:
+      controller.StartTracing(interval)
+    finally:
+      controller.StopTracing()
+    result = controller.PullTrace()
+
+    self.assertFalse(controller.IsTracingOn())
+    try:
+      with open(result) as f:
+        self.assertTrue('CPU#' in f.read())
+    finally:
+      os.remove(result)
diff --git a/catapult/systrace/profile_chrome/third_party/COPYING b/catapult/systrace/profile_chrome/third_party/COPYING
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/catapult/systrace/profile_chrome/third_party/COPYING
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/catapult/systrace/profile_chrome/third_party/README.chromium b/catapult/systrace/profile_chrome/third_party/README.chromium
new file mode 100644
index 0000000..5f58a65
--- /dev/null
+++ b/catapult/systrace/profile_chrome/third_party/README.chromium
@@ -0,0 +1,15 @@
+Name: Perf to JSON conversion script
+Short Name: perf_to_json
+URL: http://www.chromium.org
+Version: 0
+Date: 21.7.2014
+Revision: 0
+License: GPL
+License File: NOT_SHIPPED
+Security Critical: No
+
+Description:
+Script for converting perf script events into tracing JSON.
+
+Local Modifications:
+None.
diff --git a/catapult/systrace/profile_chrome/third_party/perf_to_tracing.py b/catapult/systrace/profile_chrome/third_party/perf_to_tracing.py
new file mode 100644
index 0000000..280937a
--- /dev/null
+++ b/catapult/systrace/profile_chrome/third_party/perf_to_tracing.py
@@ -0,0 +1,248 @@
+# Script for converting perf script events into tracing JSON.
+#
+# Generated by perf script -g python
+# Licensed under the terms of the GNU GPL License version 2
+
+import json
+import os
+import sys
+
+from collections import deque
+
+
+# Categorize DSOs by component.
+dso_to_comp = {
+    'libdvm.so': 'Java',
+    'libart.so': 'Java',
+    'libjavacore.so': 'Java',
+    'libandroid_runtime.so': 'Android',
+    'libgui.so': 'Android',
+    'libui.so': 'Android',
+    'libbinder.so': 'Android',
+    'libmemalloc.so': 'Android',
+    'libcrypto.so': 'Android',
+    'libcutils.so':'Android',
+    'libutils.so': 'Android',
+    '[kernel.kallsyms]': 'Kernel',
+    'libc.so': 'Standard Lib',
+    'libstdc++.so': 'Standard Lib',
+    'libm.so':'Standard Lib',
+    'libGLESv2_adreno.so': 'GPU Driver',
+    'libGLESv2_adreno200.so': 'GPU Driver',
+    'libq3dtools_adreno200.so': 'GPU Driver',
+    'libEGL_adreno.so': 'GPU Driver',
+    'libEGL_adreno200.so': 'GPU Driver',
+    'libEGL.so': 'GPU Driver',
+    'libgsl.so': 'GPU Driver',
+    'libGLESv2.so': 'GPU Driver',
+    'libsc-a3xx.so': 'GPU Driver',
+    'libadreno_utils.so': 'GPU Driver',
+    'eglsubAndroid.so': 'GPU Driver',
+    'gralloc.msm8960.so': 'GPU Driver',
+    'libadreno_utils': 'GPU Driver',
+    'libGLES_mali.so': 'GPU Driver',
+    'libchromeview.so': 'Chrome',
+    '[unknown]': '<unknown>',
+    '[UNKNOWN]': '<unknown>',
+}
+
+
+def FilterSymbolModule(module):
+  m = dso_to_comp.get(module, None)
+  if m:
+    return m
+  if module.find('libchrome.') == 0:
+    return 'Chrome'
+  if module.find('dalvik') >= 0 or module.find('@') >= 0:
+    return 'Java'
+  return module
+
+
+def FilterSymbolName(module, orign_module, name):
+  if module == 'Java':
+    return name
+  elif module == 'GPU Driver':
+    return name
+  if name == '':
+    return orign_module + ':unknown'
+  if name[0].isdigit() or name == '(nil)':
+    return orign_module + ':unknown'
+  return name
+
+
+class StackFrameNode:
+  def __init__(self, stack_id, name, category):
+    self.stack_id = stack_id
+    self.parent_id = 0
+    self.children = {}
+    self.category = category
+    self.name = name
+    self.samples = []
+    self.total_weight = 0.0
+    self.have_total_weight = False
+    self.parent = None
+
+  def ToDict(self, out_dict):
+    if self.stack_id:
+      node_dict = {}
+      node_dict['name'] = self.name
+      node_dict['category'] = self.category
+      if self.parent_id:
+        node_dict['parent'] = self.parent_id
+
+      out_dict[self.stack_id] = node_dict
+
+    for child in self.children.values():
+      child.ToDict(out_dict)
+    return out_dict
+
+  def GetTotalWeight(self):
+    if self.have_total_weight:
+      return self.total_weight
+    else:
+      # Sum up self samples weight, and children's total weights.
+      for s in self.samples:
+        self.total_weight += s.weight
+      for c in self.children.values():
+        self.total_weight += c.GetTotalWeight()
+      self.have_total_weight = True
+      return self.total_weight
+
+
+class PerfSample:
+  def __init__(self, stack_id, ts, cpu, tid, weight, samp_type, comm):
+    self.stack_id = stack_id
+    self.ts = ts
+    self.cpu = cpu
+    self.tid = tid
+    self.weight = weight
+    self.type = samp_type
+    self.comm = comm
+
+  def ToDict(self):
+    ret = {}
+    ret['ts'] = self.ts / 1000.0  # Timestamp in microseconds
+    ret['tid'] = self.tid  # Thread id
+    ret['cpu'] = self.cpu  # Sampled CPU
+    ret['weight'] = self.weight  # Sample weight
+    ret['name'] = self.type  # Sample type
+    ret['comm'] = self.comm  # Sample type
+    assert self.stack_id != 0
+    if self.stack_id:
+      ret['sf'] = self.stack_id  # Stack frame id
+    return ret
+
+
+samples = []
+root_chain = StackFrameNode(0, 'root', '[unknown]')
+next_stack_id = 1
+tot_period = 0
+saved_period = 0
+
+
+def process_event(param_dict):
+  global next_stack_id
+  global saved_period
+  global tot_period
+
+  samp_comm = param_dict['comm']
+  samp_tid = param_dict['tid']
+  samp_cpu = param_dict['cpu']
+  samp_ts = param_dict['time']
+  samp_period = param_dict['period']
+  samp_type = param_dict['ev_name']
+  tot_period += samp_period
+
+  # Parse call chain.
+  seen_syms = set()
+  chain = deque()
+  for cs in param_dict['cs']:
+    cs_name = cs[0]
+    cs_dso = os.path.basename(cs[1])
+    cs_category = FilterSymbolModule(cs_dso)
+    cs_name = FilterSymbolName(cs_category, cs_dso, cs_name)
+
+    if cs_category != '<unknown>' or len(chain) == 0:
+      sym = (cs_name, cs_category)
+      if sym in seen_syms:
+        while chain[0] != sym:
+          seen_syms.remove(chain[0])
+          chain.popleft()
+      else:
+        seen_syms.add(sym)
+        chain.appendleft(sym)
+
+      # Discard garbage stacktrace before __pthread_start()
+      if cs_name == '__pthread_start(void*)':
+        break
+
+  # Done reading call chain.  Add to stack frame tree.
+  stack_frame = root_chain
+  for call in chain:
+    if call in stack_frame.children:
+      stack_frame = stack_frame.children[call]
+    else:
+      new_node = StackFrameNode(next_stack_id, call[0], call[1])
+      next_stack_id += 1
+      new_node.parent_id = stack_frame.stack_id
+      stack_frame.children[call] = new_node
+      stack_frame = new_node
+
+  # Save sample.
+  sample = PerfSample(stack_frame.stack_id,
+                  samp_ts,
+                  samp_cpu,
+                  samp_tid,
+                  samp_period,
+                  samp_type,
+                  samp_comm)
+  samples.append(sample)
+  stack_frame.samples.append(sample)
+  saved_period += samp_period
+
+
+def trace_begin():
+  pass
+
+
+def trace_end():
+  # Return siblings of a call tree node.
+  def GetNodeSiblings(node):
+    if not node:
+      return []
+    if not node.parent:
+      return []
+    return node.parent.children.values()
+
+  # Try to reduce misplaced stack leaves by moving them up into sibling nodes.
+  def FixCallTree(node, parent):
+    # Get siblings of node's parent.
+    node.parent = parent
+    parent_siblings = GetNodeSiblings(parent)
+
+    # If parent's sibling has same node name, has no children and small weight,
+    # transplant sibling's samples into the current node.
+    for sibling in parent_siblings:
+      if sibling.name == node.name and \
+          len(sibling.children) == 0 and \
+          sibling.GetTotalWeight() <= node.GetTotalWeight() * 0.15:
+
+        # Transplant samples from sibling to current node.
+        for samp in sibling.samples:
+          samp.stack_id = node.stack_id
+          node.samples.append(samp)
+        sibling.samples = []
+        break
+
+    # Recurse child nodes.
+    for c in node.children.values():
+      FixCallTree(c, node)
+
+  FixCallTree(root_chain, None)
+
+  trace_dict = {}
+  trace_dict['samples'] = [s.ToDict() for s in samples]
+  trace_dict['stackFrames'] = root_chain.ToDict({})
+  trace_dict['traceEvents'] = []
+
+  json.dump(trace_dict, sys.stdout, indent=1)
diff --git a/catapult/systrace/profile_chrome/trace_packager.py b/catapult/systrace/profile_chrome/trace_packager.py
new file mode 100644
index 0000000..be5f808
--- /dev/null
+++ b/catapult/systrace/profile_chrome/trace_packager.py
@@ -0,0 +1,93 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import codecs
+import gzip
+import json
+import os
+import shutil
+import sys
+import zipfile
+
+from profile_chrome import util
+
+_CATAPULT_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..')
+sys.path.append(os.path.join(_CATAPULT_DIR, 'tracing'))
+# pylint: disable=F0401
+from tracing_build import trace2html
+
+
+def _PackageTracesAsHtml(trace_files, html_file):
+  with codecs.open(html_file, mode='w', encoding='utf-8') as f:
+    trace2html.WriteHTMLForTracesToFile(trace_files, f)
+  for trace_file in trace_files:
+    os.unlink(trace_file)
+
+
+def _CompressFile(host_file, output):
+  with gzip.open(output, 'wb') as out, \
+      open(host_file, 'rb') as input_file:
+    out.write(input_file.read())
+  os.unlink(host_file)
+
+
+def _ArchiveFiles(host_files, output):
+  with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) as z:
+    for host_file in host_files:
+      z.write(host_file)
+      os.unlink(host_file)
+
+
+def _MergeTracesIfNeeded(trace_files):
+  if len(trace_files) <= 1:
+    return trace_files
+  merge_candidates = []
+  for trace_file in trace_files:
+    with open(trace_file) as f:
+      # Try to detect a JSON file cheaply since that's all we can merge.
+      if f.read(1) != '{':
+        continue
+      f.seek(0)
+      try:
+        json_data = json.load(f)
+      except ValueError:
+        continue
+      merge_candidates.append((trace_file, json_data))
+  if len(merge_candidates) <= 1:
+    return trace_files
+
+  other_files = [f for f in trace_files
+                 if not f in [c[0] for c in merge_candidates]]
+  merged_file, merged_data = merge_candidates[0]
+  for trace_file, json_data in merge_candidates[1:]:
+    for key, value in json_data.items():
+      if not merged_data.get(key) or json_data[key]:
+        merged_data[key] = value
+    os.unlink(trace_file)
+
+  with open(merged_file, 'w') as f:
+    json.dump(merged_data, f)
+  return [merged_file] + other_files
+
+
+def PackageTraces(trace_files, output=None, compress=False, write_json=False):
+  trace_files = _MergeTracesIfNeeded(trace_files)
+  if not write_json:
+    html_file = os.path.splitext(trace_files[0])[0] + '.html'
+    _PackageTracesAsHtml(trace_files, html_file)
+    trace_files = [html_file]
+
+  if compress and len(trace_files) == 1:
+    result = output or trace_files[0] + '.gz'
+    _CompressFile(trace_files[0], result)
+  elif len(trace_files) > 1:
+    result = output or 'chrome-combined-trace-%s.zip' % util.GetTraceTimestamp()
+    _ArchiveFiles(trace_files, result)
+  elif output:
+    result = output
+    shutil.move(trace_files[0], result)
+  else:
+    result = trace_files[0]
+  return result
diff --git a/catapult/systrace/profile_chrome/trace_packager_unittest.py b/catapult/systrace/profile_chrome/trace_packager_unittest.py
new file mode 100644
index 0000000..97c1332
--- /dev/null
+++ b/catapult/systrace/profile_chrome/trace_packager_unittest.py
@@ -0,0 +1,35 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import tempfile
+import unittest
+
+from profile_chrome import trace_packager
+
+
+class TracePackagerTest(unittest.TestCase):
+  def testJsonTraceMerging(self):
+    t1 = {'traceEvents': [{'ts': 123, 'ph': 'b'}]}
+    t2 = {'traceEvents': [], 'stackFrames': ['blah']}
+
+    # Both trace files will be merged to a third file and will get deleted in
+    # the process, so there's no need for NamedTemporaryFile to do the
+    # deletion.
+    with tempfile.NamedTemporaryFile(delete=False) as f1, \
+        tempfile.NamedTemporaryFile(delete=False) as f2:
+      f1.write(json.dumps(t1))
+      f2.write(json.dumps(t2))
+      f1.flush()
+      f2.flush()
+
+      with tempfile.NamedTemporaryFile() as output:
+        trace_packager.PackageTraces([f1.name, f2.name],
+                                     output.name,
+                                     compress=False,
+                                     write_json=True)
+        with open(output.name) as output:
+          output = json.load(output)
+          self.assertEquals(output['traceEvents'], t1['traceEvents'])
+          self.assertEquals(output['stackFrames'], t2['stackFrames'])
diff --git a/catapult/systrace/profile_chrome/ui.py b/catapult/systrace/profile_chrome/ui.py
new file mode 100644
index 0000000..30ebd9b
--- /dev/null
+++ b/catapult/systrace/profile_chrome/ui.py
@@ -0,0 +1,27 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import select
+import sys
+
+
+def PrintMessage(heading, eol='\n'):
+  sys.stdout.write('%s%s' % (heading, eol))
+  sys.stdout.flush()
+
+
+def WaitForEnter(timeout):
+  select.select([sys.stdin], [], [], timeout)
+
+
+def EnableTestMode():
+  def NoOp(*_, **__):  # pylint: disable=unused-argument
+    pass
+  # pylint: disable=W0601
+  global PrintMessage
+  global WaitForEnter
+  PrintMessage = NoOp
+  WaitForEnter = NoOp
+  logging.getLogger().disabled = True
diff --git a/catapult/systrace/profile_chrome/util.py b/catapult/systrace/profile_chrome/util.py
new file mode 100644
index 0000000..75ef1b6
--- /dev/null
+++ b/catapult/systrace/profile_chrome/util.py
@@ -0,0 +1,8 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+def GetTraceTimestamp():
+  return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
diff --git a/catapult/systrace/pylintrc b/catapult/systrace/pylintrc
new file mode 100644
index 0000000..eb051e6
--- /dev/null
+++ b/catapult/systrace/pylintrc
@@ -0,0 +1,67 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO: Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  abstract-class-not-used,
+  bad-builtin,
+  bad-continuation,
+  eval-used,
+  fixme,
+  invalid-name,
+  locally-disabled,
+  missing-docstring,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/systrace/systrace/__init__.py b/catapult/systrace/systrace/__init__.py
new file mode 100644
index 0000000..ca3e206
--- /dev/null
+++ b/catapult/systrace/systrace/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/systrace/systrace/agents/atrace_agent.py b/catapult/systrace/systrace/agents/atrace_agent.py
index 0b9d743..3125821 100644
--- a/catapult/systrace/systrace/agents/atrace_agent.py
+++ b/catapult/systrace/systrace/agents/atrace_agent.py
@@ -10,8 +10,8 @@
 import time
 import zlib
 
-import systrace_agent
-import util
+from systrace import systrace_agent
+from systrace import util
 
 # Text that ADB sends, but does not need to be displayed to the user.
 ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
@@ -38,16 +38,16 @@
 # This list is based on the tags in frameworks/native/include/utils/Trace.h for
 # legacy platform.
 LEGACY_TRACE_TAG_BITS = (
-  ('gfx',       1<<1),
-  ('input',     1<<2),
-  ('view',      1<<3),
-  ('webview',   1<<4),
-  ('wm',        1<<5),
-  ('am',        1<<6),
-  ('sm',        1<<7),
-  ('audio',     1<<8),
-  ('video',     1<<9),
-  ('camera',    1<<10),
+    ('gfx', 1 << 1),
+    ('input', 1 << 2),
+    ('view', 1 << 3),
+    ('webview', 1 << 4),
+    ('wm', 1 << 5),
+    ('am', 1 << 6),
+    ('sm', 1 << 7),
+    ('audio', 1 << 8),
+    ('video', 1 << 9),
+    ('camera', 1 << 10),
 )
 
 
@@ -75,6 +75,7 @@
 
 
 class AtraceAgent(systrace_agent.SystraceAgent):
+
   def __init__(self, options, categories):
     super(AtraceAgent, self).__init__(options, categories)
     self._expect_trace = False
@@ -312,7 +313,9 @@
 
     return trace_data
 
+
 class AtraceLegacyAgent(AtraceAgent):
+
   def _construct_list_categories_command(self):
     LEGACY_CATEGORIES = """       sched - CPU Scheduling
         freq - CPU Frequency
@@ -376,6 +379,7 @@
 
     return extra_args
 
+
 class BootAgent(AtraceAgent):
   """AtraceAgent that specializes in tracing the boot sequence."""
 
@@ -419,6 +423,7 @@
           atrace_args + ['&&'] + setprop_args + ['&&'] + rm_args,
           self._options.device_serial)
 
+
 class FileReaderThread(threading.Thread):
   """Reads data from a file/pipe on a worker thread.
 
@@ -479,6 +484,7 @@
     assert chunk_size > 0
     self._chunk_size = chunk_size
 
+
 def get_default_categories(device_serial):
   categories_output, return_code = util.run_adb_shell(LIST_CATEGORIES_ARGS,
                                                     device_serial)
@@ -512,7 +518,7 @@
   """
 
   threads = {}
-  #start at line 1 to skip the top of the ps dump:
+  # start at line 1 to skip the top of the ps dump:
   text = trace_text.splitlines()
   for line in text[1:]:
     cols = line.split(None, 8)
@@ -523,6 +529,7 @@
 
   return threads
 
+
 def extract_tgids(trace_text):
   """Removes the procfs dump from the given trace text
   Args:
@@ -535,11 +542,12 @@
   for line in text:
     result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
     if result:
-      parent_pid, tgid = result.group(1,2)
-      tgid_2pid[tgid] = parent_pid;
+      parent_pid, tgid = result.group(1, 2)
+      tgid_2pid[tgid] = parent_pid
 
   return tgid_2pid
 
+
 def strip_and_decompress_trace(trace_data):
   """Fixes new-lines and decompresses trace data.
 
@@ -572,7 +580,6 @@
   return trace_data
 
 
-
 def fix_thread_names(trace_data, thread_names):
   """Replaces thread ids with their names.
 
@@ -615,9 +622,9 @@
     tid = m.group(2)
     if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
         and tid in pid2_tgid):
-          # returns Proc_name-PID (TGID)
-          # Binder_2-381 (-----) becomes Binder_2-381 (128)
-          return m.group(1) + '-' + m.group(2) + ' ( '+ pid2_tgid[tid]+ ')'
+      # returns Proc_name-PID (TGID)
+      # Binder_2-381 (-----) becomes Binder_2-381 (128)
+      return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
 
     return m.group(0)
 
@@ -625,8 +632,7 @@
   # Binder_2-895 (-----)
   trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
                       flags=re.MULTILINE)
-  return trace_data;
-
+  return trace_data
 
 
 def fix_circular_traces(out):
@@ -665,6 +671,7 @@
     out = out[:end_of_header] + out[start_of_full_trace:]
   return out
 
+
 def do_popen(args):
   try:
     adb = subprocess.Popen(args, stdout=subprocess.PIPE,
@@ -678,6 +685,7 @@
 
   return adb
 
+
 def do_preprocess_adb_cmd(command, serial):
   args = [command]
   dump, ret_code = util.run_adb_shell(args, serial)
diff --git a/catapult/systrace/systrace/agents/atrace_agent_unittest.py b/catapult/systrace/systrace/agents/atrace_agent_unittest.py
index 46a5ed2..5fbf24b 100644
--- a/catapult/systrace/systrace/agents/atrace_agent_unittest.py
+++ b/catapult/systrace/systrace/agents/atrace_agent_unittest.py
@@ -8,9 +8,8 @@
 import os
 import unittest
 
-import agents.atrace_agent as atrace_agent
-import systrace
-import util
+from systrace import systrace
+from systrace.agents import atrace_agent
 
 DEVICE_SERIAL = 'AG8404EC0444AGC'
 ATRACE_ARGS = ['atrace', '-z', '-t', '10', '-b', '4096']
@@ -55,6 +54,7 @@
 
 
 class AtraceAgentTest(unittest.TestCase):
+
   def test_construct_trace_command(self):
     options, categories = systrace.parse_options(SYSTRACE_CMD)
     agent = atrace_agent.AtraceAgent(options, categories)
@@ -62,10 +62,9 @@
     self.assertEqual(' '.join(TRACE_CMD), ' '.join(tracer_args))
     self.assertEqual(True, agent.expect_trace())
 
-
   def test_extract_thread_list(self):
     with contextlib.nested(open(ATRACE_EXTRACTED_THREADS, 'r'),
-                           open(ATRACE_THREAD_LIST)) as (f1,f2):
+                           open(ATRACE_THREAD_LIST)) as (f1, f2):
 
       atrace_result = f1.read()
       ps_dump = f2.read()
@@ -100,7 +99,7 @@
     with contextlib.nested(open(ATRACE_DATA_STRIPPED, 'r'),
                            open(ATRACE_DATA_RAW, 'r')) as (f1, f2):
       atrace_data = f1.read()
-      atrace_data_raw = f2.read();
+      atrace_data_raw = f2.read()
 
       options, categories = systrace.parse_options(STOP_FIX_UPS)
       agent = atrace_agent.AtraceAgent(options, categories)
@@ -143,13 +142,14 @@
 
       atrace_data = f2.read()
       tgid_map = eval(f1.read())
-      fixed = f3.read();
+      fixed = f3.read()
 
       res = atrace_agent.fix_missing_tgids(atrace_data, tgid_map)
       self.assertEqual(res, fixed)
 
 
 class AtraceLegacyAgentTest(unittest.TestCase):
+
   def test_construct_trace_command(self):
     options, categories = systrace.parse_options(SYSTRACE_CMD)
     agent = atrace_agent.AtraceLegacyAgent(options, categories)
@@ -159,6 +159,7 @@
 
 
 class BootAgentTest(unittest.TestCase):
+
   def test_boot(self):
     options, categories = systrace.parse_options(SYSTRACE_BOOT_CMD)
     agent = atrace_agent.BootAgent(options, categories)
diff --git a/catapult/systrace/systrace/agents/ftrace_agent.py b/catapult/systrace/systrace/agents/ftrace_agent.py
index 47d8cfe..758fc61 100644
--- a/catapult/systrace/systrace/agents/ftrace_agent.py
+++ b/catapult/systrace/systrace/agents/ftrace_agent.py
@@ -6,9 +6,11 @@
 import sys
 import time
 
-import systrace_agent
+from systrace import systrace_agent
+
 
 class FtraceAgentIo(object):
+
   @staticmethod
   def writeFile(path, data):
     with open(path, 'w') as f:
@@ -23,53 +25,71 @@
   def haveWritePermissions(path):
     return os.access(path, os.W_OK)
 
-FT_DIR          = "/sys/kernel/debug/tracing/"
-FT_CLOCK        = FT_DIR + "trace_clock"
-FT_BUFFER_SIZE  = FT_DIR + "buffer_size_kb"
-FT_TRACER       = FT_DIR + "current_tracer"
-FT_PRINT_TGID   = FT_DIR + "options/print-tgid"
-FT_TRACE_ON     = FT_DIR + "tracing_on"
-FT_TRACE        = FT_DIR + "trace"
+FT_DIR = "/sys/kernel/debug/tracing/"
+FT_CLOCK = FT_DIR + "trace_clock"
+FT_BUFFER_SIZE = FT_DIR + "buffer_size_kb"
+FT_TRACER = FT_DIR + "current_tracer"
+FT_PRINT_TGID = FT_DIR + "options/print-tgid"
+FT_TRACE_ON = FT_DIR + "tracing_on"
+FT_TRACE = FT_DIR + "trace"
 FT_TRACE_MARKER = FT_DIR + "trace_marker"
-FT_OVERWRITE    = FT_DIR + "options/overwrite"
+FT_OVERWRITE = FT_DIR + "options/overwrite"
 
 all_categories = {
-  "sched"      : {"desc" : "CPU Scheduling",
-                  "req" : ["sched/sched_switch/", "sched/sched_wakeup/"]},
-  "freq"       : {"desc" : "CPU Frequency",
-                  "req" : ["power/cpu_frequency/", "power/clock_set_rate/"]},
-  "irq"        : {"desc" : "CPU IRQS and IPIS",
-                  "req" : ["irq/"],
-                  "opt" : ["ipi/"]},
-  "workq"      : {"desc" : "Kernel workqueues",
-                  "req" : ["workqueue/"]},
-  "memreclaim" : {"desc" : "Kernel Memory Reclaim",
-                  "req" : ["vmscan/mm_vmscan_direct_reclaim_begin/",
-                           "vmscan/mm_vmscan_direct_reclaim_end/",
-                           "vmscan/mm_vmscan_kswapd_wake/",
-                           "vmscan/mm_vmscan_kswapd_sleep/"]},
-  "idle"       : {"desc" : "CPU Idle",
-                  "req" : ["power/cpu_idle/"]},
-  "regulators" : {"desc" : "Voltage and Current Regulators",
-                  "req" : ["regulator/"]},
-  "disk"       : {"desc" : "Disk I/O",
-                  "req" : ["block/block_rq_issue/",
-                           "block/block_rq_complete/"],
-                  "opt" : ["f2fs/f2fs_sync_file_enter/",
-                           "f2fs/f2fs_sync_file_exit/",
-                           "f2fs/f2fs_write_begin/",
-                           "f2fs/f2fs_write_end/",
-                           "ext4/ext4_da_write_begin/",
-                           "ext4/ext4_da_write_end/",
-                           "ext4/ext4_sync_file_enter/",
-                           "ext4/ext4_sync_file_exit/"]}
+    "sched": {
+          "desc": "CPU Scheduling",
+          "req": ["sched/sched_switch/", "sched/sched_wakeup/"]
+    },
+    "freq": {
+          "desc": "CPU Frequency",
+          "req": ["power/cpu_frequency/", "power/clock_set_rate/"]
+    },
+    "irq": {
+          "desc": "CPU IRQS and IPIS",
+          "req": ["irq/"],
+          "opt": ["ipi/"]
+    },
+    "workq": {
+          "desc": "Kernel workqueues",
+          "req": ["workqueue/"]
+    },
+    "memreclaim": {
+          "desc": "Kernel Memory Reclaim",
+          "req": ["vmscan/mm_vmscan_direct_reclaim_begin/",
+                  "vmscan/mm_vmscan_direct_reclaim_end/",
+                  "vmscan/mm_vmscan_kswapd_wake/",
+                  "vmscan/mm_vmscan_kswapd_sleep/"]
+    },
+    "idle": {
+          "desc": "CPU Idle",
+          "req": ["power/cpu_idle/"]
+    },
+    "regulators": {
+          "desc": "Voltage and Current Regulators",
+          "req": ["regulator/"]
+    },
+    "disk": {
+          "desc": "Disk I/O",
+          "req": ["block/block_rq_issue/",
+                  "block/block_rq_complete/"],
+          "opt": ["f2fs/f2fs_sync_file_enter/",
+                  "f2fs/f2fs_sync_file_exit/",
+                  "f2fs/f2fs_write_begin/",
+                  "f2fs/f2fs_write_end/",
+                  "ext4/ext4_da_write_begin/",
+                  "ext4/ext4_da_write_end/",
+                  "ext4/ext4_sync_file_enter/",
+                  "ext4/ext4_sync_file_exit/"]
+    }
 }
 
+
 def try_create_agent(options, categories):
   if options.target != 'linux':
     return False
   return FtraceAgent(options, categories, FtraceAgentIo)
 
+
 class FtraceAgent(systrace_agent.SystraceAgent):
 
   def __init__(self, options, categories, fio=FtraceAgentIo):
@@ -88,18 +108,18 @@
     self._expect_trace = False
 
   def _get_trace_buffer_size(self):
-      buffer_size = 4096
-      if ((self._options.trace_buf_size is not None)
-          and (self._options.trace_buf_size > 0)):
-        buffer_size = self._options.trace_buf_size
-      return buffer_size
+    buffer_size = 4096
+    if ((self._options.trace_buf_size is not None)
+        and (self._options.trace_buf_size > 0)):
+      buffer_size = self._options.trace_buf_size
+    return buffer_size
 
   def _get_trace_time(self):
-      wait_time = 5
-      if ((self._options.trace_time is not None)
-          and (self._options.trace_time > 0)):
-        wait_time = self._options.trace_time
-      return wait_time
+    wait_time = 5
+    if ((self._options.trace_time is not None)
+        and (self._options.trace_time > 0)):
+      wait_time = self._options.trace_time
+    return wait_time
 
   def start(self):
     """Start tracing.
@@ -190,18 +210,18 @@
     ret = []
     for event in all_categories:
       if self._is_category_available(event):
-         ret.append(event)
+        ret.append(event)
     return ret
 
   def _print_avail_categories(self):
-     avail = self._avail_categories()
-     if len(avail):
-       print "tracing options:"
-       for category in self._avail_categories():
-         desc = all_categories[category]["desc"]
-         print "{0: <16}".format(category), ": ", desc
-     else:
-       print "No tracing categories available - perhaps you need root?"
+    avail = self._avail_categories()
+    if len(avail):
+      print "tracing options:"
+      for category in self._avail_categories():
+        desc = all_categories[category]["desc"]
+        print "{0: <16}".format(category), ": ", desc
+    else:
+      print "No tracing categories available - perhaps you need root?"
 
   def _category_enable_paths(self, category):
     events_dir = FT_DIR + "events/"
diff --git a/catapult/systrace/systrace/agents/ftrace_agent_unittest.py b/catapult/systrace/systrace/agents/ftrace_agent_unittest.py
index 035ac09..9e8ea60 100644
--- a/catapult/systrace/systrace/agents/ftrace_agent_unittest.py
+++ b/catapult/systrace/systrace/agents/ftrace_agent_unittest.py
@@ -4,23 +4,22 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import contextlib
-import os
 import unittest
 
-import agents.ftrace_agent as ftrace_agent
-import systrace
-import util
+from systrace import systrace
+from systrace.agents import ftrace_agent
 
 SYSTRACE_HOST_CMD_DEFAULT = ['./systrace.py', '--target=linux']
 FT_DIR = "/sys/kernel/debug/tracing/"
 FT_EVENT_DIR = FT_DIR + "events/"
 FT_TRACE_ON = FT_DIR + "tracing_on"
 FT_TRACE = FT_DIR + "trace"
-FT_BUFFER_SIZE  = FT_DIR + "buffer_size_kb"
+FT_BUFFER_SIZE = FT_DIR + "buffer_size_kb"
+
 
 def make_test_io_interface(permitted_files):
   class TestIoImpl(object):
+
     @staticmethod
     def writeFile(path, data):
       permitted_files[path] = data
@@ -37,13 +36,14 @@
       return path in permitted_files
   return TestIoImpl
 
+
 class FtraceAgentTest(unittest.TestCase):
 
   def test_avail_categories(self):
     # sched only has required events
     permitted_files = {
-      FT_EVENT_DIR + "sched/sched_switch/enable" : "0",
-      FT_EVENT_DIR + "sched/sched_wakeup/enable" : "0"
+      FT_EVENT_DIR + "sched/sched_switch/enable": "0",
+      FT_EVENT_DIR + "sched/sched_wakeup/enable": "0"
     }
     io_interface = make_test_io_interface(permitted_files)
     options, categories = systrace.parse_options(SYSTRACE_HOST_CMD_DEFAULT)
@@ -59,8 +59,8 @@
 
     # block has some required, some optional events
     permitted_files = {
-      FT_EVENT_DIR + "block/block_rq_complete/enable" : "0",
-      FT_EVENT_DIR + "block/block_rq_issue/enable" : "0"
+      FT_EVENT_DIR + "block/block_rq_complete/enable": "0",
+      FT_EVENT_DIR + "block/block_rq_issue/enable": "0"
     }
     io_interface = make_test_io_interface(permitted_files)
     options, categories = systrace.parse_options(SYSTRACE_HOST_CMD_DEFAULT)
@@ -70,11 +70,11 @@
   def test_tracing_bootstrap(self):
     workq_event_path = FT_EVENT_DIR + "workqueue/enable"
     permitted_files = {
-      workq_event_path : "0",
+      workq_event_path: "0",
       FT_TRACE: "x"
     }
     io_interface = make_test_io_interface(permitted_files)
-    systrace_cmd = SYSTRACE_HOST_CMD_DEFAULT  + ["workq"]
+    systrace_cmd = SYSTRACE_HOST_CMD_DEFAULT + ["workq"]
     options, categories = systrace.parse_options(systrace_cmd)
     agent = ftrace_agent.FtraceAgent(options, categories, io_interface)
     self.assertEqual(['workq'], agent._avail_categories())
@@ -104,11 +104,11 @@
     ipi_event_path = FT_EVENT_DIR + "ipi/enable"
     irq_event_path = FT_EVENT_DIR + "irq/enable"
     permitted_files = {
-      ipi_event_path : "0",
-      irq_event_path : "0"
+      ipi_event_path: "0",
+      irq_event_path: "0"
     }
     io_interface = make_test_io_interface(permitted_files)
-    systrace_cmd = SYSTRACE_HOST_CMD_DEFAULT  + ["irq"]
+    systrace_cmd = SYSTRACE_HOST_CMD_DEFAULT + ["irq"]
     options, categories = systrace.parse_options(systrace_cmd)
     agent = ftrace_agent.FtraceAgent(options, categories, io_interface)
     self.assertEqual(['irq'], agent._avail_categories())
diff --git a/catapult/systrace/systrace/systrace-legacy.py b/catapult/systrace/systrace/systrace-legacy.py
index 859a416..aac4c17 100755
--- a/catapult/systrace/systrace/systrace-legacy.py
+++ b/catapult/systrace/systrace/systrace-legacy.py
@@ -10,29 +10,36 @@
 the kernel.  It creates an HTML file for visualizing the trace.
 """
 
-import errno, optparse, os, select, subprocess, sys, time, zlib
+import optparse
+import os
+import select
+import subprocess
+import sys
+import zlib
 
 # This list is based on the tags in frameworks/native/include/utils/Trace.h.
 trace_tag_bits = {
-  'gfx':      1<<1,
-  'input':    1<<2,
-  'view':     1<<3,
-  'webview':  1<<4,
-  'wm':       1<<5,
-  'am':       1<<6,
-  'sync':     1<<7,
-  'audio':    1<<8,
-  'video':    1<<9,
-  'camera':   1<<10,
+  'gfx': 1 << 1,
+  'input': 1 << 2,
+  'view': 1 << 3,
+  'webview': 1 << 4,
+  'wm': 1 << 5,
+  'am': 1 << 6,
+  'sync': 1 << 7,
+  'audio': 1 << 8,
+  'video': 1 << 9,
+  'camera': 1 << 10,
 }
 
 flattened_html_file = 'systrace_trace_viewer.html'
 
+
 def add_adb_serial(command, serial):
   if serial != None:
     command.insert(1, serial)
     command.insert(1, '-s')
 
+
 def main():
   parser = optparse.OptionParser()
   parser.add_option('-o', dest='output_file', help='write HTML to FILE',
@@ -73,7 +80,7 @@
                     type='string', help='')
   parser.add_option('-e', '--serial', dest='device_serial', type='string',
                     help='adb device serial number')
-  options, unused_args = parser.parse_args() # pylint: disable=unused-variable
+  options, unused_args = parser.parse_args()  # pylint: disable=unused-variable
 
   if options.link_assets or options.asset_dir != 'trace-viewer':
     parser.error('--link-assets and --asset-dir is deprecated.')
@@ -165,7 +172,7 @@
           if line == 'TRACE:\n':
             sys.stdout.write("downloading trace...")
             sys.stdout.flush()
-            out = ''.join(lines[i+1:])
+            out = ''.join(lines[i + 1:])
             html_prefix = read_asset(script_dir, 'prefix.html')
             html_file = open(html_filename, 'w')
             html_file.write(
@@ -204,6 +211,7 @@
     print >> sys.stderr, ('An error occured while capturing the trace.  Output '
                           'file was not written.')
 
+
 def read_asset(src_dir, filename):
   return open(os.path.join(src_dir, filename)).read()
 
diff --git a/catapult/systrace/systrace/systrace.py b/catapult/systrace/systrace/systrace.py
index 2a5aa38..738164b 100755
--- a/catapult/systrace/systrace/systrace.py
+++ b/catapult/systrace/systrace/systrace.py
@@ -25,8 +25,6 @@
 import optparse
 import os
 
-import util
-
 
 # The default agent directory.
 DEFAULT_AGENT_DIR = 'agents'
@@ -139,7 +137,7 @@
 
   html_file.write(html_suffix)
   html_file.close()
-  print('\n    wrote file://%s\n' % os.path.abspath(html_filename))
+  print '\n    wrote file://%s\n' % os.path.abspath(html_filename)
 
 
 def create_agents(options, categories):
@@ -193,7 +191,7 @@
     sys.exit(1)
 
   try:
-    update_systrace_trace_viewer = __import__('update_systrace_trace_viewer')
+    from . import update_systrace_trace_viewer
   except ImportError:
     pass
   else:
@@ -216,5 +214,11 @@
   return open(os.path.join(src_dir, filename)).read()
 
 
-if __name__ == '__main__':
+if __name__ == '__main__' and __package__ is None:
+  # Add current package to search path.
+  _SYSTRACE_DIR = os.path.abspath(
+      os.path.join(os.path.dirname(__file__), os.path.pardir))
+  sys.path.insert(0, _SYSTRACE_DIR)
+  __package__ = "systrace"  # pylint: disable=redefined-builtin
+
   main()
diff --git a/catapult/systrace/systrace/systrace_agent.py b/catapult/systrace/systrace/systrace_agent.py
index 376d4f2..bca5e5e 100644
--- a/catapult/systrace/systrace/systrace_agent.py
+++ b/catapult/systrace/systrace/systrace_agent.py
@@ -2,6 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+
 class SystraceAgent(object):
   """The base class for systrace agents.
 
diff --git a/catapult/systrace/systrace/update_systrace_trace_viewer.py b/catapult/systrace/systrace/update_systrace_trace_viewer.py
index 71eece0..09eb055 100755
--- a/catapult/systrace/systrace/update_systrace_trace_viewer.py
+++ b/catapult/systrace/systrace/update_systrace_trace_viewer.py
@@ -11,9 +11,9 @@
 import subprocess
 import sys
 
-catapult_path = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), '../..'))
-sys.path.append(os.path.join(catapult_path, 'tracing'))
+_CATAPULT_PATH = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
+sys.path.append(os.path.join(_CATAPULT_PATH, 'tracing'))
 from tracing_build import vulcanize_trace_viewer
 
 SYSTRACE_TRACE_VIEWER_HTML_FILE_ = 'systrace_trace_viewer.html'
@@ -22,7 +22,8 @@
 
 
 def create_catapult_rev_str_(revision):
-    return '<!--' + CATAPULT_REV_ + '=' + str(revision) + '-->'
+  return '<!--' + CATAPULT_REV_ + '=' + str(revision) + '-->'
+
 
 def get_catapult_rev_in_file_():
   assert os.path.exists(SYSTRACE_TRACE_VIEWER_HTML_FILE_)
@@ -32,10 +33,11 @@
     for line in lines[::-1]:
       if CATAPULT_REV_ in line:
         tokens = line.split(CATAPULT_REV_)
-        rev = re.sub('[=\->]', '', tokens[1]).strip()
+        rev = re.sub(r'[=\->]', '', tokens[1]).strip()
         break
   return rev
 
+
 def get_catapult_rev_in_git_():
   try:
     return subprocess.check_output(
diff --git a/catapult/systrace/systrace/util.py b/catapult/systrace/systrace/util.py
index 6566d08..283a6ab 100644
--- a/catapult/systrace/systrace/util.py
+++ b/catapult/systrace/systrace/util.py
@@ -71,6 +71,7 @@
 
   return (adb_output, adb_return_code)
 
+
 def get_device_sdk_version():
   """Uses adb to attempt to determine the SDK version of a running device."""
 
@@ -81,7 +82,7 @@
   # command-line so we can send the adb command to the correct device.
   parser = OptionParserIgnoreErrors()
   parser.add_option('-e', '--serial', dest='device_serial', type='string')
-  options, unused_args = parser.parse_args() # pylint: disable=unused-variable
+  options, unused_args = parser.parse_args()  # pylint: disable=unused-variable
 
   success = False
 
diff --git a/catapult/systrace/systrace/util_unittest.py b/catapult/systrace/systrace/util_unittest.py
index 585f682..21d8190 100644
--- a/catapult/systrace/systrace/util_unittest.py
+++ b/catapult/systrace/systrace/util_unittest.py
@@ -4,7 +4,7 @@
 
 import unittest
 
-import util
+from . import util
 
 DEVICE_SERIAL = 'AG8404EC0444AGC'
 LIST_TMP_ARGS = ['ls', '/data/local/tmp']
@@ -13,6 +13,7 @@
 
 
 class UtilTest(unittest.TestCase):
+
   def test_construct_adb_shell_command(self):
     command = util.construct_adb_shell_command(LIST_TMP_ARGS, None)
     self.assertEqual(' '.join(command), 'adb shell ls /data/local/tmp')
diff --git a/catapult/telemetry/BUILD.gn b/catapult/telemetry/BUILD.gn
new file mode 100644
index 0000000..b2027e5
--- /dev/null
+++ b/catapult/telemetry/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+group("telemetry_test_support") {
+  # Generic telemetry deps. For now, just include the whole catapult directory.
+  # TODO(nednguyen, aiolos): only include what telemetry needs.
+  # https://github.com/catapult-project/catapult/issues/1953
+  data = [
+    "../",
+  ]
+}
+
+executable("bitmaptools") {
+  sources = [
+    "telemetry/internal/image_processing/bitmaptools.cc",
+  ]
+
+  deps = [
+    "//build/config/sanitizers:deps",
+  ]
+}
diff --git a/catapult/telemetry/OWNERS b/catapult/telemetry/OWNERS
index 3303ce3..e559489 100644
--- a/catapult/telemetry/OWNERS
+++ b/catapult/telemetry/OWNERS
@@ -9,6 +9,7 @@
 nednguyen@google.com
 skyostil@chromium.org
 sullivan@chromium.org
+zhenw@chromium.org
 
 # emeritus:
 # chrishenry@google.com
diff --git a/catapult/telemetry/PRESUBMIT.py b/catapult/telemetry/PRESUBMIT.py
new file mode 100644
index 0000000..9ec2f20
--- /dev/null
+++ b/catapult/telemetry/PRESUBMIT.py
@@ -0,0 +1,126 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def _CommonChecks(input_api, output_api):
+  results = []
+
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='pylintrc'))
+  results += _CheckNoMoreUsageOfDeprecatedCode(
+      input_api, output_api, deprecated_code='GetChromiumSrcDir()',
+      crbug_number=511332)
+  return results
+
+
+def _RunArgs(args, input_api):
+  p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
+                                 stderr=input_api.subprocess.STDOUT)
+  out, _ = p.communicate()
+  return (out, p.returncode)
+
+
+def _CheckTelemetryBinaryDependencies(input_api, output_api):
+  """ Check that binary_dependencies.json has valid format and content.
+
+  This check should only be done in CheckChangeOnUpload() only since it invokes
+  network I/O.
+  """
+  results = []
+  telemetry_dir = input_api.PresubmitLocalPath()
+  telemetry_binary_dependencies_path = input_api.os_path.join(
+      telemetry_dir, 'telemetry', 'internal', 'binary_dependencies.json')
+  for f in input_api.AffectedFiles():
+    if not f.AbsoluteLocalPath() == telemetry_binary_dependencies_path:
+      continue
+    out, return_code = _RunArgs([
+        input_api.python_executable,
+        input_api.os_path.join(telemetry_dir, 'json_format'),
+        telemetry_binary_dependencies_path], input_api)
+    if return_code:
+      results.append(output_api.PresubmitError(
+           'Validating binary_dependencies.json failed:', long_text=out))
+      break
+    out, return_code = _RunArgs([
+        input_api.python_executable,
+        input_api.os_path.join(telemetry_dir, 'validate_binary_dependencies'),
+        telemetry_binary_dependencies_path], input_api)
+    if return_code:
+      results.append(output_api.PresubmitError(
+          'Validating binary_dependencies.json failed:', long_text=out))
+      break
+  return results
+
+
+def _CheckNoMoreUsageOfDeprecatedCode(
+    input_api, output_api, deprecated_code, crbug_number):
+  results = []
+  # These checks are not perfcet but should be good enough for most of our
+  # usecases.
+  def _IsAddedLine(line):
+    return line.startswith('+') and not line.startswith('+++ ')
+  def _IsRemovedLine(line):
+    return line.startswith('-') and not line.startswith('--- ')
+
+  presubmit_dir = input_api.os_path.join(
+      input_api.PresubmitLocalPath(), 'PRESUBMIT.py')
+
+  added_calls = 0
+  removed_calls = 0
+  for affected_file in input_api.AffectedFiles():
+    # Do not do the check on PRESUBMIT.py itself.
+    if affected_file.AbsoluteLocalPath() == presubmit_dir:
+      continue
+    for line in affected_file.GenerateScmDiff().splitlines():
+      if _IsAddedLine(line) and deprecated_code in line:
+        added_calls += 1
+      elif _IsRemovedLine(line) and deprecated_code in line:
+        removed_calls += 1
+
+  if added_calls > removed_calls:
+    results.append(output_api.PresubmitError(
+        'Your patch adds more instances of %s. Please see crbug.com/%i for'
+        'how to proceed.' % (deprecated_code, crbug_number)))
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  telemetry_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(telemetry_dir, '..')
+  return [
+      telemetry_dir,
+
+      input_api.os_path.join(telemetry_dir, 'third_party', 'altgraph'),
+      input_api.os_path.join(telemetry_dir, 'third_party', 'modulegraph'),
+      input_api.os_path.join(telemetry_dir, 'third_party', 'pexpect'),
+      input_api.os_path.join(telemetry_dir, 'third_party', 'png'),
+      input_api.os_path.join(telemetry_dir, 'third_party', 'webpagereplay'),
+      input_api.os_path.join(telemetry_dir, 'third_party', 'websocket-client'),
+
+      input_api.os_path.join(catapult_dir, 'catapult_base'),
+      input_api.os_path.join(catapult_dir, 'dependency_manager'),
+      input_api.os_path.join(catapult_dir, 'devil'),
+      input_api.os_path.join(catapult_dir, 'tracing'),
+      input_api.os_path.join(catapult_dir, 'common', 'py_trace_event'),
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'pyfakefs'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'pyserial'),
+      input_api.os_path.join(catapult_dir, 'third_party', 'typ'),
+  ]
+
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  results = []
+  results += _CommonChecks(input_api, output_api)
+  results += _CheckTelemetryBinaryDependencies(input_api, output_api)
+  return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  results = []
+  results += _CommonChecks(input_api, output_api)
+  return results
diff --git a/catapult/telemetry/README.md b/catapult/telemetry/README.md
index 7c39340..9f1d739 100644
--- a/catapult/telemetry/README.md
+++ b/catapult/telemetry/README.md
@@ -109,7 +109,7 @@
 
 ## Next Steps
 
-*   [Run Telemetry benchmarks locally](https://sites.google.com/a/chromium.org/dev/developers/telemetry/run_locally)
+*   [Run Telemetry benchmarks locally](/telemetry/docs/run_benchmarks_locally.md)
 *   [Record a story set](https://sites.google.com/a/chromium.org/dev/developers/telemetry/record_a_page_set)
     with Web Page Replay
 *   [Add a measurement](https://sites.google.com/a/chromium.org/dev/developers/telemetry/add_a_measurement)
diff --git a/catapult/telemetry/bin/.gitignore b/catapult/telemetry/bin/.gitignore
new file mode 100644
index 0000000..3f1bb9d
--- /dev/null
+++ b/catapult/telemetry/bin/.gitignore
@@ -0,0 +1,2 @@
+*/*
+!**.sha1
diff --git a/catapult/telemetry/bin/README.chromium b/catapult/telemetry/bin/README.chromium
new file mode 100644
index 0000000..b2cd84a
--- /dev/null
+++ b/catapult/telemetry/bin/README.chromium
@@ -0,0 +1,54 @@
+This directory contains prebuilt binaries used by Telemetry which allow it to
+be run without requiring any compilation.
+
+For usage instructions, see:
+http://www.chromium.org/developers/telemetry/upload_to_cloud_storage
+
+avconv:
+   version 0.8.9-4:0.8.9-0ubuntu0.12.04.1
+
+IEDriverServer binary:
+  Both 32-bit and 64-bit are of version 2.35.2.
+
+ipfw and ipfw_mod.ko:
+  Version 20120812
+
+perfhost_trusty:
+  Built from branch modified by vmiura on github. The git branch used is
+  "perf_tracing_changes" but in the directions below I have included the actual
+  hash of the checkout.
+
+  Make sure you have the proper libraries installed for symbol demangling:
+    shell> sudo apt-get install binutils-dev
+    shell> sudo apt-get install libiberty-dev
+
+  Directions for building perf:
+    shell> git clone https://github.com/vmiura/linux.git
+    shell> cd linux
+    shell> git checkout e1fe871e4a33712ad4964a70904d5d59188e3cc2
+    shell> cd tools/perf
+    shell> make
+    shell> ./perf test
+    Tests should mostly pass, except a few:
+     1: vmlinux symtab matches kallsyms                        : FAILED!
+     2: detect open syscall event                              : FAILED!
+     3: detect open syscall event on all cpus                  : FAILED!
+     4: read samples using the mmap interface                  : FAILED!
+     5: parse events tests                                     : FAILED!
+     [snip]
+     11: Check parsing of sched tracepoints fields              : FAILED!
+     12: Generate and check syscalls:sys_enter_open event fields: FAILED!
+     21: Test object code reading          :[kernel.kallsyms] ... FAILED!
+    shell> mv perf perfhost_trusty
+
+android/armeabi-v7a/perf:
+  Follow http://source.android.com/source/building.html
+  . build/envsetup.sh
+  lunch aosp_arm-user
+
+2013-09-26 - bulach - perf / perfhost / tcpdump:
+  git revert -n 93501d3 # issue with __strncpy_chk2
+  make -j32 perf perfhost tcpdump
+
+android/arm64-v8a/perf:
+  Same as above, with aarch64 architecture, from branch android-5.0.0_r2
diff --git a/catapult/telemetry/bin/run_tests b/catapult/telemetry/bin/run_tests
new file mode 100755
index 0000000..5b54a5d
--- /dev/null
+++ b/catapult/telemetry/bin/run_tests
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+TELEMETRY_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
+sys.path.append(TELEMETRY_DIR)
+from telemetry import project_config
+from telemetry.testing import unittest_runner
+
+
+def main():
+  config = project_config.ProjectConfig(
+      top_level_dir=TELEMETRY_DIR)
+  return unittest_runner.Run(config)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/telemetry/build/__init__.py b/catapult/telemetry/build/__init__.py
new file mode 100644
index 0000000..9228df8
--- /dev/null
+++ b/catapult/telemetry/build/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/build/generate_telemetry_harness.sh b/catapult/telemetry/build/generate_telemetry_harness.sh
new file mode 100755
index 0000000..b1e7f05
--- /dev/null
+++ b/catapult/telemetry/build/generate_telemetry_harness.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a script meant to be run by a bot to periodically release new versions
+# of the telemetry harness. It needs to be run from one level above src/ (such
+# as build/).
+
+src/tools/perf/find_dependencies \
+  src/tools/perf/run_benchmark \
+  src/tools/perf/record_wpr \
+  src/content/test/gpu/run_gpu_test.py \
+  --exclude=*/third_party/catapult/test_data/* \
+  -z $1
diff --git a/catapult/telemetry/build/linux_setup_msr.py b/catapult/telemetry/build/linux_setup_msr.py
new file mode 100755
index 0000000..a61828b
--- /dev/null
+++ b/catapult/telemetry/build/linux_setup_msr.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This is a script developers can use to set-up their workstation to let
+# Telemetry read the CPU's Model Specific Registers in order to get power
+# measurements. It can check if reading from MSRs is possible as any user, but
+# must run as root to make changes. Not all changes are sticky, so one has to
+# re-run this script after each reboot.
+#
+# This script is currently Debian/Ubuntu specific.
+
+import os
+import subprocess
+import sys
+
+MSR_DEV_FILE_PATH = '/dev/cpu/0/msr'
+RDMSR_PATH = '/usr/sbin/rdmsr'
+
+def _Usage(prog_name):
+  """Print a help message."""
+  print 'Run "%s" as a regular user to check if reading from the MSR ' \
+      'is possible.' % prog_name
+  print 'Run "%s enable" as root to automatically set up reading from ' \
+      'the MSR.' % prog_name
+
+
+def _CheckMsrKernelModule():
+  """Return whether the 'msr' kernel module is loaded."""
+  proc = subprocess.Popen('/sbin/lsmod', stdout=subprocess.PIPE)
+  stdout = proc.communicate()[0]
+  ret = proc.wait()
+  if ret != 0:
+    raise OSError('lsmod failed')
+
+  if not any([line.startswith('msr ') for line in stdout.splitlines()]):
+    print 'Error: MSR module not loaded.'
+    return False
+
+  return True
+
+
+def _CheckMsrDevNodes():
+  """Check whether the MSR /dev files have the right permissions."""
+  if not os.path.exists(MSR_DEV_FILE_PATH):
+    print 'Error: %s does not exist.' % MSR_DEV_FILE_PATH
+    return False
+
+  if not os.access(MSR_DEV_FILE_PATH, os.R_OK):
+    print 'Error: Cannot read from %s' % MSR_DEV_FILE_PATH
+    return False
+
+  return True
+
+
+def _CheckRdmsr():
+  """Check and make sure /usr/sbin/rdmsr is set up correctly."""
+  if not os.access(RDMSR_PATH, os.X_OK):
+    print 'Error: %s missing or not executable.' % RDMSR_PATH
+    return False
+
+  proc = subprocess.Popen(['/sbin/getcap', RDMSR_PATH], stdout=subprocess.PIPE)
+  stdout = proc.communicate()[0]
+  ret = proc.wait()
+  if ret != 0:
+    raise OSError('getcap failed')
+
+  if not 'cap_sys_rawio+ep' in stdout:
+    print 'Error: /usr/sbin/rdmsr needs RAWIO capability.'
+    return False
+
+  return True
+
+
+def _RunAllChecks():
+  """Check to make sure it is possible to read from the MSRs."""
+  if os.geteuid() == 0:
+    print 'WARNING: Running as root, msr permission check likely inaccurate.'
+
+  has_dev_node = _CheckMsrDevNodes() if _CheckMsrKernelModule() else False
+  has_rdmsr = _CheckRdmsr()
+  return has_dev_node and has_rdmsr
+
+
+def _EnableMsr(prog_name):
+  """Do all the setup needed to pass _RunAllChecks().
+
+  Needs to run as root."""
+  if os.geteuid() != 0:
+    print 'Error: Must run "%s enable" as root.' % prog_name
+    return False
+
+  print 'Loading msr kernel module.'
+  ret = subprocess.call(['/sbin/modprobe', 'msr'])
+  if ret != 0:
+    print 'Error: Cannot load msr module.'
+    return False
+
+  print 'Running chmod on %s.' % MSR_DEV_FILE_PATH
+  ret = subprocess.call(['/bin/chmod', 'a+r', MSR_DEV_FILE_PATH])
+  if ret != 0:
+    print 'Error: Cannot chmod %s.' % MSR_DEV_FILE_PATH
+    return False
+
+  if not os.access(RDMSR_PATH, os.F_OK):
+    print 'Need to install the msr-tools package.'
+    ret = subprocess.call(['/usr/bin/apt-get', 'install', '-y', 'msr-tools'])
+    if ret != 0:
+      print 'Error: Did not successfully install msr-tools.'
+      return False
+
+  print 'Running setcap on %s.' % RDMSR_PATH
+  ret = subprocess.call(['/sbin/setcap', 'cap_sys_rawio+ep', RDMSR_PATH])
+  if ret != 0:
+    print 'Error: Cannot give /usr/sbin/rdmsr RAWIO capability.'
+    return False
+
+  return True
+
+
+def main(prog_name, argv):
+  if len(argv) == 0:
+    if _RunAllChecks():
+      print 'Check succeeded'
+      return 0
+
+    print 'Check failed, try running "%s enable" as root to fix.' % prog_name
+    return 1
+
+  if len(argv) == 1:
+    if argv[0] == 'enable':
+      return 0 if _EnableMsr(prog_name) else 1
+
+    print 'Error: Unknown sub-command %s' % argv[0]
+    _Usage(prog_name)
+    return 1
+
+  print 'Error: Bad number of arguments'
+  _Usage(prog_name)
+  return 1
+
+
+if '__main__' == __name__:
+  sys.exit(main(os.path.basename(sys.argv[0]), sys.argv[1:]))
diff --git a/catapult/telemetry/build/update_docs.py b/catapult/telemetry/build/update_docs.py
new file mode 100644
index 0000000..7f43b58
--- /dev/null
+++ b/catapult/telemetry/build/update_docs.py
@@ -0,0 +1,148 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import optparse
+import os
+import pkgutil
+import pydoc
+import re
+import sys
+
+import telemetry
+from telemetry.core import util
+
+telemetry_dir = util.GetTelemetryDir()
+docs_dir = os.path.join(telemetry_dir, 'docs', 'pydoc')
+
+def RemoveAllDocs():
+  for dirname, _, filenames in os.walk(docs_dir):
+    for filename in filenames:
+      os.remove(os.path.join(dirname, filename))
+
+def GenerateHTMLForModule(module):
+  html = pydoc.html.page(pydoc.describe(module),
+                         pydoc.html.document(module, module.__name__))
+
+  # pydoc writes out html with links in a variety of funky ways. We need
+  # to fix them up.
+  assert not telemetry_dir.endswith(os.sep)
+  links = re.findall('(<a href="(.+?)">(.+?)</a>)', html)
+  for link_match in links:
+    link, href, link_text = link_match
+    if not href.startswith('file:'):
+      continue
+
+    new_href = href.replace('file:', '')
+    new_href = new_href.replace(telemetry_dir, '..')
+    new_href = new_href.replace(os.sep, '/')
+
+    new_link_text = link_text.replace(telemetry_dir + os.sep, '')
+
+    new_link = '<a href="%s">%s</a>' % (new_href, new_link_text)
+    html = html.replace(link, new_link)
+
+  # pydoc writes out html with absolute path file links. This is not suitable
+  # for checked in documentation. So, fix up the HTML after it is generated.
+  #html = re.sub('href="file:%s' % telemetry_dir, 'href="..', html)
+  #html = re.sub(telemetry_dir + os.sep, '', html)
+  return html
+
+def WriteHTMLForModule(module):
+  page = GenerateHTMLForModule(module)
+  path = os.path.join(docs_dir, '%s.html' % module.__name__)
+  with open(path, 'w') as f:
+    sys.stderr.write('Wrote %s\n' % os.path.relpath(path))
+    f.write(page)
+
+def GetAllModulesToDocument(module):
+  modules = [module]
+  for _, modname, _ in pkgutil.walk_packages(
+      module.__path__, module.__name__ + '.'):
+    if modname.endswith('_unittest'):
+      logging.debug("skipping %s due to being a unittest", modname)
+      continue
+
+    module = __import__(modname, fromlist=[""])
+    name, _ = os.path.splitext(module.__file__)
+    if not os.path.exists(name + '.py'):
+      logging.info("skipping %s due to being an orphan .pyc", module.__file__)
+      continue
+
+    modules.append(module)
+  return modules
+
+class AlreadyDocumentedModule(object):
+  def __init__(self, filename):
+    self.filename = filename
+
+  @property
+  def name(self):
+    basename = os.path.basename(self.filename)
+    return os.path.splitext(basename)[0]
+
+  @property
+  def contents(self):
+    with open(self.filename, 'r') as f:
+      return f.read()
+
+def GetAlreadyDocumentedModules():
+  modules = []
+  for dirname, _, filenames in os.walk(docs_dir):
+    for filename in filenames:
+      path = os.path.join(dirname, filename)
+      modules.append(AlreadyDocumentedModule(path))
+  return modules
+
+
+def IsUpdateDocsNeeded():
+  already_documented_modules = GetAlreadyDocumentedModules()
+  already_documented_modules_by_name = dict(
+    (module.name, module) for module in already_documented_modules)
+  current_modules = GetAllModulesToDocument(telemetry)
+
+  # Quick check: if the names of modules has changed, we definitely need
+  # an update.
+  already_documented_module_names = set(
+    m.name for m in already_documented_modules)
+
+  current_module_names = set([m.__name__ for m in current_modules])
+
+  if current_module_names != already_documented_module_names:
+    return True
+
+  # Generate the new docs and compare aganist the old. If changed, then a
+  # an update is needed.
+  for current_module in current_modules:
+    already_documented_module = already_documented_modules_by_name[
+      current_module.__name__]
+    current_html = GenerateHTMLForModule(current_module)
+    if current_html != already_documented_module.contents:
+      return True
+
+  return False
+
+def Main(args):
+  parser = optparse.OptionParser()
+  parser.add_option(
+      '-v', '--verbose', action='count', dest='verbosity',
+      help='Increase verbosity level (repeat as needed)')
+  options, args = parser.parse_args(args)
+  if options.verbosity >= 2:
+    logging.getLogger().setLevel(logging.DEBUG)
+  elif options.verbosity:
+    logging.getLogger().setLevel(logging.INFO)
+  else:
+    logging.getLogger().setLevel(logging.WARNING)
+
+  assert os.path.isdir(docs_dir), '%s does not exist' % docs_dir
+
+  RemoveAllDocs()
+
+  old_cwd = os.getcwd()
+  try:
+    os.chdir(telemetry_dir)
+    for module in GetAllModulesToDocument(telemetry):
+      WriteHTMLForModule(module)
+  finally:
+    os.chdir(old_cwd)
diff --git a/catapult/telemetry/cloud_storage b/catapult/telemetry/cloud_storage
new file mode 100755
index 0000000..edb2dd6
--- /dev/null
+++ b/catapult/telemetry/cloud_storage
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+
+from telemetry.core import util
+from telemetry.internal.util import command_line
+
+sys.path.insert(1, os.path.abspath(os.path.join(
+    util.GetCatapultDir(), 'catapult_base')))
+from catapult_base import cloud_storage
+
+
+BUCKETS = {bucket: easy_bucket_name for easy_bucket_name, bucket
+           in cloud_storage.BUCKET_ALIASES.iteritems()}
+
+
+def _GetPaths(path):
+  root, ext = os.path.splitext(path)
+  if ext == '.sha1':
+    file_path = root
+    hash_path = path
+  else:
+    file_path = path
+    hash_path = path + '.sha1'
+  return file_path, hash_path
+
+
+def _FindFilesInCloudStorage(files):
+  """Returns a dict of all files and which buckets they're in."""
+  # Preprocessing: get the contents of all buckets.
+  bucket_contents = {}
+  for bucket in BUCKETS:
+    try:
+      bucket_contents[bucket] = cloud_storage.List(bucket)
+    except (cloud_storage.PermissionError, cloud_storage.CredentialsError):
+      pass
+
+  # Check if each file is in the bucket contents.
+  file_buckets = {}
+  for path in files:
+    file_path, hash_path = _GetPaths(path)
+
+    if file_path in file_buckets:
+      # Ignore duplicates, if both data and sha1 file were in the file list.
+      continue
+    if not os.path.exists(hash_path):
+      # Probably got some non-Cloud Storage files in the file list. Ignore.
+      continue
+
+    file_hash = cloud_storage.ReadHash(hash_path)
+    file_buckets[file_path] = []
+    for bucket in BUCKETS:
+      if bucket in bucket_contents and file_hash in bucket_contents[bucket]:
+        file_buckets[file_path].append(bucket)
+
+  return file_buckets
+
+
+class Ls(command_line.Command):
+  """List which bucket each file is in."""
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    parser.add_argument('-r', '--recursive', action='store_true')
+    parser.add_argument('paths', nargs='+')
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    for path in args.paths:
+      if not os.path.exists(path):
+        parser.error('Path not found: %s' % path)
+
+  def Run(self, args):
+    def GetFilesInPaths(paths, recursive):
+      """If path is a dir, yields all files in path, otherwise just yields path.
+      If recursive is true, walks subdirectories recursively."""
+      for path in paths:
+        if not os.path.isdir(path):
+          yield path
+          continue
+
+        if recursive:
+          for root, _, filenames in os.walk(path):
+            for filename in filenames:
+              yield os.path.join(root, filename)
+        else:
+          for filename in os.listdir(path):
+            yield os.path.join(path, filename)
+
+    files = _FindFilesInCloudStorage(GetFilesInPaths(args.paths, args.recursive))
+
+    if not files:
+      print 'No files in Cloud Storage.'
+      return
+
+    for file_path, buckets in sorted(files.iteritems()):
+      if buckets:
+        buckets = [BUCKETS[bucket] for bucket in buckets]
+        print '%-11s  %s' % (','.join(buckets), file_path)
+      else:
+        print '%-11s  %s' % ('not found', file_path)
+
+
+class Mv(command_line.Command):
+  """Move files to the given bucket."""
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    parser.add_argument('files', nargs='+')
+    parser.add_argument('bucket', choices=cloud_storage.BUCKET_ALIASES)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    args.bucket = cloud_storage.BUCKET_ALIASES[args.bucket]
+
+  def Run(self, args):
+    files = _FindFilesInCloudStorage(args.files)
+
+    for file_path, buckets in sorted(files.iteritems()):
+      if not buckets:
+        raise IOError('%s not found in Cloud Storage.' % file_path)
+
+    for file_path, buckets in sorted(files.iteritems()):
+      if args.bucket in buckets:
+        buckets.remove(args.bucket)
+      if not buckets:
+        logging.info('Skipping %s, no action needed.' % file_path)
+        continue
+
+      # Move to the target bucket.
+      file_hash = cloud_storage.ReadHash(file_path + '.sha1')
+      cloud_storage.Move(buckets.pop(), args.bucket, file_hash)
+
+      # Delete all additional copies.
+      for bucket in buckets:
+        cloud_storage.Delete(bucket, file_hash)
+
+
+class Rm(command_line.Command):
+  """Remove files from Cloud Storage."""
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    parser.add_argument('files', nargs='+')
+
+  def Run(self, args):
+    files = _FindFilesInCloudStorage(args.files)
+    for file_path, buckets in sorted(files.iteritems()):
+      file_hash = cloud_storage.ReadHash(file_path + '.sha1')
+      for bucket in buckets:
+        cloud_storage.Delete(bucket, file_hash)
+
+
+class Upload(command_line.Command):
+  """Upload files to Cloud Storage."""
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    parser.add_argument('files', nargs='+')
+    parser.add_argument('bucket', choices=cloud_storage.BUCKET_ALIASES)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    args.bucket = cloud_storage.BUCKET_ALIASES[args.bucket]
+
+    for path in args.files:
+      if not os.path.exists(path):
+        parser.error('File not found: %s' % path)
+
+  def Run(self, args):
+    for file_path in args.files:
+      file_hash = cloud_storage.CalculateHash(file_path)
+
+      # Create or update the hash file.
+      hash_path = file_path + '.sha1'
+      with open(hash_path, 'wb') as f:
+        f.write(file_hash)
+        f.flush()
+
+      # Add the data to Cloud Storage.
+      cloud_storage.Insert(args.bucket, file_hash, file_path)
+
+      # Add the hash file to the branch, for convenience. :)
+      subprocess.call(['git', 'add', hash_path])
+
+
+class CloudStorageCommand(command_line.SubcommandCommand):
+  commands = (Ls, Mv, Rm, Upload)
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.INFO)
+  sys.exit(CloudStorageCommand.main())
diff --git a/catapult/telemetry/docs/pydoc/telemetry.android.android_story.html b/catapult/telemetry/docs/pydoc/telemetry.android.android_story.html
new file mode 100644
index 0000000..83abe1b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.android.android_story.html
@@ -0,0 +1,105 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.android.android_story</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.android.html"><font color="#ffffff">android</font></a>.android_story</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/android/android_story.py">telemetry/android/android_story.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.android.shared_android_state.html">telemetry.android.shared_android_state</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.html">telemetry.story</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.android.android_story.html#AndroidStory">AndroidStory</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidStory">class <strong>AndroidStory</strong></a>(<a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.android.android_story.html#AndroidStory">AndroidStory</a></dd>
+<dd><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidStory-Run"><strong>Run</strong></a>(self, shared_state)</dt><dd><tt>Execute&nbsp;the&nbsp;interactions&nbsp;with&nbsp;the&nbsp;applications.</tt></dd></dl>
+
+<dl><dt><a name="AndroidStory-__init__"><strong>__init__</strong></a>(self, start_intent, is_app_ready_predicate<font color="#909090">=None</font>, name<font color="#909090">=''</font>, labels<font color="#909090">=None</font>, is_local<font color="#909090">=False</font>)</dt><dd><tt>Creates&nbsp;a&nbsp;new&nbsp;story&nbsp;for&nbsp;Android&nbsp;app.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;start_intent:&nbsp;See&nbsp;AndroidPlatform.LaunchAndroidApplication.<br>
+&nbsp;&nbsp;is_app_ready_predicate:&nbsp;See&nbsp;AndroidPlatform.LaunchAndroidApplication.<br>
+&nbsp;&nbsp;name:&nbsp;See&nbsp;<a href="telemetry.story.story.html#Story">Story</a>.__init__.<br>
+&nbsp;&nbsp;labels:&nbsp;See&nbsp;<a href="telemetry.story.story.html#Story">Story</a>.__init__.<br>
+&nbsp;&nbsp;is_app_ready_predicate:&nbsp;See&nbsp;<a href="telemetry.story.story.html#Story">Story</a>.__init__.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>:<br>
+<dl><dt><a name="AndroidStory-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Converts&nbsp;a&nbsp;story&nbsp;object&nbsp;to&nbsp;a&nbsp;dict&nbsp;suitable&nbsp;for&nbsp;JSON&nbsp;output.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>display_name</strong></dt>
+</dl>
+<dl><dt><strong>file_safe_name</strong></dt>
+<dd><tt>A&nbsp;version&nbsp;of&nbsp;display_name&nbsp;that's&nbsp;safe&nbsp;to&nbsp;use&nbsp;as&nbsp;a&nbsp;filename.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;sanitizes&nbsp;special&nbsp;characters&nbsp;with&nbsp;underscores,<br>
+but&nbsp;it's&nbsp;okay&nbsp;to&nbsp;override&nbsp;it&nbsp;with&nbsp;a&nbsp;more&nbsp;specific&nbsp;implementation&nbsp;in<br>
+subclasses.</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>is_local</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.</tt></dd>
+</dl>
+<dl><dt><strong>labels</strong></dt>
+</dl>
+<dl><dt><strong>make_javascript_deterministic</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>serving_dir</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;absolute&nbsp;path&nbsp;to&nbsp;a&nbsp;directory&nbsp;with&nbsp;hash&nbsp;files&nbsp;to&nbsp;data&nbsp;that<br>
+should&nbsp;be&nbsp;updated&nbsp;from&nbsp;cloud&nbsp;storage,&nbsp;or&nbsp;None&nbsp;if&nbsp;no&nbsp;files&nbsp;need&nbsp;to&nbsp;be<br>
+updated.</tt></dd>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.android.html b/catapult/telemetry/docs/pydoc/telemetry.android.html
new file mode 100644
index 0000000..748edfe
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.android.html
@@ -0,0 +1,26 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.android</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.android</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/android/__init__.py">telemetry/android/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.android.android_story.html">android_story</a><br>
+</td><td width="25%" valign=top><a href="telemetry.android.shared_android_state.html">shared_android_state</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.android.shared_android_state.html b/catapult/telemetry/docs/pydoc/telemetry.android.shared_android_state.html
new file mode 100644
index 0000000..c285378
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.android.shared_android_state.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.android.shared_android_state</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.android.html"><font color="#ffffff">android</font></a>.shared_android_state</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/android/shared_android_state.py">telemetry/android/shared_android_state.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="telemetry.core.android_platform.html">telemetry.core.android_platform</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.story.html">telemetry.story</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_based_measurement.html">telemetry.web_perf.timeline_based_measurement</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.android.shared_android_state.html#SharedAndroidState">SharedAndroidState</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedAndroidState">class <strong>SharedAndroidState</strong></a>(<a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Manage&nbsp;test&nbsp;state/transitions&nbsp;across&nbsp;multiple&nbsp;android.AndroidStory's.<br>
+&nbsp;<br>
+WARNING:&nbsp;the&nbsp;class&nbsp;is&nbsp;not&nbsp;ready&nbsp;for&nbsp;public&nbsp;consumption.<br>
+Email&nbsp;telemetry@chromium.org&nbsp;if&nbsp;you&nbsp;feel&nbsp;like&nbsp;you&nbsp;must&nbsp;use&nbsp;it.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.android.shared_android_state.html#SharedAndroidState">SharedAndroidState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SharedAndroidState-CanRunStory"><strong>CanRunStory</strong></a>(self, story)</dt><dd><tt>This&nbsp;does&nbsp;not&nbsp;apply&nbsp;to&nbsp;android&nbsp;app&nbsp;stories.</tt></dd></dl>
+
+<dl><dt><a name="SharedAndroidState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedAndroidState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedAndroidState-TearDownState"><strong>TearDownState</strong></a>(self)</dt><dd><tt>Tear&nbsp;down&nbsp;anything&nbsp;created&nbsp;in&nbsp;the&nbsp;__init__&nbsp;method&nbsp;that&nbsp;is&nbsp;not&nbsp;needed.<br>
+&nbsp;<br>
+Currently,&nbsp;there&nbsp;is&nbsp;no&nbsp;clean-up&nbsp;needed&nbsp;from&nbsp;<a href="#SharedAndroidState">SharedAndroidState</a>.__init__.</tt></dd></dl>
+
+<dl><dt><a name="SharedAndroidState-WillRunStory"><strong>WillRunStory</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="SharedAndroidState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt><dd><tt>This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.setUpClass.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;test:&nbsp;a&nbsp;web_perf.TimelineBasedMeasurement&nbsp;instance.<br>
+&nbsp;&nbsp;options:&nbsp;a&nbsp;BrowserFinderOptions&nbsp;instance&nbsp;with&nbsp;command&nbsp;line&nbsp;options.<br>
+&nbsp;&nbsp;story_set:&nbsp;a&nbsp;story.StorySet&nbsp;instance.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.benchmark.html b/catapult/telemetry/docs/pydoc/telemetry.benchmark.html
new file mode 100644
index 0000000..fb5de7b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.benchmark.html
@@ -0,0 +1,299 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.benchmark</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.benchmark</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/benchmark.py">telemetry/benchmark.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="optparse.html">optparse</a><br>
+<a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.story_runner.html">telemetry.internal.story_runner</a><br>
+<a href="telemetry.web_perf.timeline_based_measurement.html">telemetry.web_perf.timeline_based_measurement</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.benchmark.html#BenchmarkMetadata">BenchmarkMetadata</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.benchmark.html#InvalidOptionsError">InvalidOptionsError</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>(<a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.benchmark.html#Benchmark">Benchmark</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Benchmark">class <strong>Benchmark</strong></a>(<a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Base&nbsp;class&nbsp;for&nbsp;a&nbsp;Telemetry&nbsp;benchmark.<br>
+&nbsp;<br>
+A&nbsp;benchmark&nbsp;packages&nbsp;a&nbsp;measurement&nbsp;and&nbsp;a&nbsp;PageSet&nbsp;together.<br>
+Benchmarks&nbsp;default&nbsp;to&nbsp;using&nbsp;TBM&nbsp;unless&nbsp;you&nbsp;override&nbsp;the&nbsp;value&nbsp;of<br>
+<a href="#Benchmark">Benchmark</a>.test,&nbsp;or&nbsp;override&nbsp;the&nbsp;CreatePageTest&nbsp;method.<br>
+&nbsp;<br>
+New&nbsp;benchmarks&nbsp;should&nbsp;override&nbsp;CreateStorySet.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.benchmark.html#Benchmark">Benchmark</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Benchmark-CreatePageTest"><strong>CreatePageTest</strong></a>(self, options)</dt><dd><tt>Return&nbsp;the&nbsp;PageTest&nbsp;for&nbsp;this&nbsp;<a href="#Benchmark">Benchmark</a>.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;for&nbsp;PageTest&nbsp;tests.<br>
+Override,&nbsp;override&nbsp;CreateTimelineBasedMeasurementOptions&nbsp;to&nbsp;configure<br>
+TimelineBasedMeasurement&nbsp;tests.&nbsp;Do&nbsp;not&nbsp;override&nbsp;both&nbsp;methods.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;options:&nbsp;a&nbsp;browser_options.BrowserFinderOptions&nbsp;instance<br>
+Returns:<br>
+&nbsp;&nbsp;|<a href="#Benchmark-test">test</a>()|&nbsp;if&nbsp;|test|&nbsp;is&nbsp;a&nbsp;PageTest&nbsp;class.<br>
+&nbsp;&nbsp;Otherwise,&nbsp;a&nbsp;TimelineBasedMeasurement&nbsp;instance.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-CreateStorySet"><strong>CreateStorySet</strong></a>(self, options)</dt><dd><tt>Creates&nbsp;the&nbsp;instance&nbsp;of&nbsp;StorySet&nbsp;used&nbsp;to&nbsp;run&nbsp;the&nbsp;benchmark.<br>
+&nbsp;<br>
+Can&nbsp;be&nbsp;overridden&nbsp;by&nbsp;subclasses.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-CreateTimelineBasedMeasurementOptions"><strong>CreateTimelineBasedMeasurementOptions</strong></a>(self)</dt><dd><tt>Return&nbsp;the&nbsp;TimelineBasedMeasurementOptions&nbsp;for&nbsp;this&nbsp;<a href="#Benchmark">Benchmark</a>.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;configure&nbsp;a&nbsp;TimelineBasedMeasurement&nbsp;benchmark.<br>
+Otherwise,&nbsp;override&nbsp;CreatePageTest&nbsp;for&nbsp;PageTest&nbsp;tests.&nbsp;Do&nbsp;not&nbsp;override<br>
+both&nbsp;methods.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Add&nbsp;browser&nbsp;options&nbsp;that&nbsp;are&nbsp;required&nbsp;by&nbsp;this&nbsp;benchmark.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-GetMetadata"><strong>GetMetadata</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Benchmark-GetTraceRerunCommands"><strong>GetTraceRerunCommands</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Benchmark-Run"><strong>Run</strong></a>(self, finder_options)</dt><dd><tt>Do&nbsp;not&nbsp;override&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-SetupBenchmarkDebugTraceRerunOptions"><strong>SetupBenchmarkDebugTraceRerunOptions</strong></a>(self, tbm_options)</dt><dd><tt>Setup&nbsp;tracing&nbsp;categories&nbsp;associated&nbsp;with&nbsp;debug&nbsp;trace&nbsp;option.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-SetupBenchmarkDefaultTraceRerunOptions"><strong>SetupBenchmarkDefaultTraceRerunOptions</strong></a>(self, tbm_options)</dt><dd><tt>Setup&nbsp;tracing&nbsp;categories&nbsp;associated&nbsp;with&nbsp;default&nbsp;trace&nbsp;option.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-SetupTraceRerunOptions"><strong>SetupTraceRerunOptions</strong></a>(self, browser_options, tbm_options)</dt></dl>
+
+<dl><dt><a name="Benchmark-__init__"><strong>__init__</strong></a>(self, max_failures<font color="#909090">=None</font>)</dt><dd><tt>Creates&nbsp;a&nbsp;new&nbsp;<a href="#Benchmark">Benchmark</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;max_failures:&nbsp;The&nbsp;number&nbsp;of&nbsp;story&nbsp;run's&nbsp;failures&nbsp;before&nbsp;bailing<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;from&nbsp;executing&nbsp;subsequent&nbsp;page&nbsp;runs.&nbsp;If&nbsp;None,&nbsp;we&nbsp;never&nbsp;bail.</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Benchmark-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-HasTraceRerunDebugOption"><strong>HasTraceRerunDebugOption</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-SetArgumentDefaults"><strong>SetArgumentDefaults</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-ShouldDisable"><strong>ShouldDisable</strong></a>(cls, possible_browser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;this&nbsp;method&nbsp;to&nbsp;disable&nbsp;a&nbsp;benchmark&nbsp;under&nbsp;specific&nbsp;conditions.<br>
+&nbsp;<br>
+Supports&nbsp;logic&nbsp;too&nbsp;complex&nbsp;for&nbsp;simple&nbsp;Enabled&nbsp;and&nbsp;Disabled&nbsp;decorators.<br>
+Decorators&nbsp;are&nbsp;still&nbsp;respected&nbsp;in&nbsp;cases&nbsp;where&nbsp;this&nbsp;function&nbsp;returns&nbsp;False.</tt></dd></dl>
+
+<dl><dt><a name="Benchmark-ValueCanBeAddedPredicate"><strong>ValueCanBeAddedPredicate</strong></a>(cls, value, is_first_result)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;|value|&nbsp;can&nbsp;be&nbsp;added&nbsp;to&nbsp;the&nbsp;test&nbsp;results.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;customize&nbsp;the&nbsp;logic&nbsp;of&nbsp;adding&nbsp;values&nbsp;to&nbsp;test<br>
+results.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;value:&nbsp;a&nbsp;value.Value&nbsp;instance&nbsp;(except&nbsp;failure.FailureValue,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;skip.SkipValue&nbsp;or&nbsp;trace.TraceValue&nbsp;which&nbsp;will&nbsp;always&nbsp;be&nbsp;added).<br>
+&nbsp;&nbsp;is_first_result:&nbsp;True&nbsp;if&nbsp;|value|&nbsp;is&nbsp;the&nbsp;first&nbsp;result&nbsp;for&nbsp;its<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;corresponding&nbsp;story.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;|value|&nbsp;should&nbsp;be&nbsp;added&nbsp;to&nbsp;the&nbsp;test&nbsp;results.<br>
+&nbsp;&nbsp;Otherwise,&nbsp;it&nbsp;returns&nbsp;False.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>max_failures</strong></dt>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>options</strong> = {}</dl>
+
+<dl><dt><strong>test</strong> = &lt;class 'telemetry.web_perf.timeline_based_measurement.TimelineBasedMeasurement'&gt;<dd><tt>Collects&nbsp;multiple&nbsp;metrics&nbsp;based&nbsp;on&nbsp;their&nbsp;interaction&nbsp;records.<br>
+&nbsp;<br>
+A&nbsp;timeline&nbsp;based&nbsp;measurement&nbsp;shifts&nbsp;the&nbsp;burden&nbsp;of&nbsp;what&nbsp;metrics&nbsp;to&nbsp;collect&nbsp;onto<br>
+the&nbsp;story&nbsp;under&nbsp;test.&nbsp;Instead&nbsp;of&nbsp;the&nbsp;measurement<br>
+having&nbsp;a&nbsp;fixed&nbsp;set&nbsp;of&nbsp;values&nbsp;it&nbsp;collects,&nbsp;the&nbsp;story&nbsp;being&nbsp;tested<br>
+issues&nbsp;(via&nbsp;javascript)&nbsp;an&nbsp;Interaction&nbsp;record&nbsp;into&nbsp;the&nbsp;user&nbsp;timing&nbsp;API&nbsp;that<br>
+describing&nbsp;what&nbsp;is&nbsp;happening&nbsp;at&nbsp;that&nbsp;time,&nbsp;as&nbsp;well&nbsp;as&nbsp;a&nbsp;standardized&nbsp;set<br>
+of&nbsp;flags&nbsp;describing&nbsp;the&nbsp;semantics&nbsp;of&nbsp;the&nbsp;work&nbsp;being&nbsp;done.&nbsp;The<br>
+TimelineBasedMeasurement&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;collects&nbsp;a&nbsp;trace&nbsp;that&nbsp;includes&nbsp;both&nbsp;these<br>
+interaction&nbsp;records,&nbsp;and&nbsp;a&nbsp;user-chosen&nbsp;amount&nbsp;of&nbsp;performance&nbsp;data&nbsp;using<br>
+Telemetry's&nbsp;various&nbsp;timeline-producing&nbsp;APIs,&nbsp;tracing&nbsp;especially.<br>
+&nbsp;<br>
+It&nbsp;then&nbsp;passes&nbsp;the&nbsp;recorded&nbsp;timeline&nbsp;to&nbsp;different&nbsp;TimelineBasedMetrics&nbsp;based<br>
+on&nbsp;those&nbsp;flags.&nbsp;As&nbsp;an&nbsp;example,&nbsp;this&nbsp;allows&nbsp;a&nbsp;single&nbsp;story&nbsp;run&nbsp;to&nbsp;produce<br>
+load&nbsp;timing&nbsp;data,&nbsp;smoothness&nbsp;data,&nbsp;critical&nbsp;jank&nbsp;information&nbsp;and&nbsp;overall&nbsp;cpu<br>
+usage&nbsp;information.<br>
+&nbsp;<br>
+For&nbsp;information&nbsp;on&nbsp;how&nbsp;to&nbsp;mark&nbsp;up&nbsp;a&nbsp;page&nbsp;to&nbsp;work&nbsp;with<br>
+TimelineBasedMeasurement,&nbsp;refer&nbsp;to&nbsp;the<br>
+perf.metrics.timeline_interaction_record&nbsp;module.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;options:&nbsp;an&nbsp;instance&nbsp;of&nbsp;timeline_based_measurement.Options.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;results_wrapper:&nbsp;A&nbsp;class&nbsp;that&nbsp;has&nbsp;the&nbsp;__init__&nbsp;method&nbsp;takes&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page_test_results&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;and&nbsp;the&nbsp;interaction&nbsp;record&nbsp;label.&nbsp;This<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;follows&nbsp;the&nbsp;ResultsWrapperInterface.&nbsp;Note:&nbsp;this&nbsp;class&nbsp;is&nbsp;not<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;supported&nbsp;long&nbsp;term&nbsp;and&nbsp;to&nbsp;be&nbsp;removed&nbsp;when&nbsp;crbug.com/453109&nbsp;is&nbsp;resolved.</tt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="Benchmark-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Benchmark-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BenchmarkMetadata">class <strong>BenchmarkMetadata</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="BenchmarkMetadata-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BenchmarkMetadata-__init__"><strong>__init__</strong></a>(self, name, description<font color="#909090">=''</font>, rerun_options<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>description</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>rerun_options</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InvalidOptionsError">class <strong>InvalidOptionsError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;for&nbsp;invalid&nbsp;benchmark&nbsp;options.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.benchmark.html#InvalidOptionsError">InvalidOptionsError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="InvalidOptionsError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#InvalidOptionsError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="InvalidOptionsError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InvalidOptionsError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InvalidOptionsError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidOptionsError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="InvalidOptionsError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(parser)</dt></dl>
+ <dl><dt><a name="-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(parser, args)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.benchmark_runner.html b/catapult/telemetry/docs/pydoc/telemetry.benchmark_runner.html
new file mode 100644
index 0000000..a6abee5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.benchmark_runner.html
@@ -0,0 +1,223 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.benchmark_runner</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.benchmark_runner</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/benchmark_runner.py">telemetry/benchmark_runner.py</a></font></td></tr></table>
+    <p><tt>Parses&nbsp;the&nbsp;command&nbsp;line,&nbsp;discovers&nbsp;the&nbsp;appropriate&nbsp;benchmarks,&nbsp;and&nbsp;runs&nbsp;them.<br>
+&nbsp;<br>
+Handles&nbsp;benchmark&nbsp;configuration,&nbsp;but&nbsp;all&nbsp;the&nbsp;logic&nbsp;for<br>
+actually&nbsp;running&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;in&nbsp;Benchmark&nbsp;and&nbsp;PageRunner.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.benchmark.html">telemetry.benchmark</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.internal.browser.browser_options.html">telemetry.internal.browser.browser_options</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+<a href="hashlib.html">hashlib</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+<a href="json.html">json</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.project_config.html">telemetry.project_config</a><br>
+<a href="telemetry.internal.util.ps_util.html">telemetry.internal.util.ps_util</a><br>
+<a href="sys.html">sys</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>(<a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.benchmark_runner.html#Help">Help</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.benchmark_runner.html#List">List</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.benchmark_runner.html#Run">Run</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Help">class <strong>Help</strong></a>(<a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Display&nbsp;help&nbsp;information&nbsp;about&nbsp;a&nbsp;command<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.benchmark_runner.html#Help">Help</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Help-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>usage</strong> = '[command]'</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>:<br>
+<dl><dt><a name="Help-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Help-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Help-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Help-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="Help-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Help-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="List">class <strong>List</strong></a>(<a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Lists&nbsp;the&nbsp;available&nbsp;benchmarks<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.benchmark_runner.html#List">List</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="List-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="List-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, _)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="List-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="List-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>usage</strong> = '[benchmark_name] [&lt;options&gt;]'</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>:<br>
+<dl><dt><a name="List-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="List-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="List-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Run">class <strong>Run</strong></a>(<a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#Run">Run</a>&nbsp;one&nbsp;or&nbsp;more&nbsp;benchmarks&nbsp;(default)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.benchmark_runner.html#Run">Run</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Run-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Run-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Run-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Run-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>usage</strong> = 'benchmark_name [page_set] [&lt;options&gt;]'</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>:<br>
+<dl><dt><a name="Run-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="Run-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Run-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetBenchmarkByName"><strong>GetBenchmarkByName</strong></a>(name, environment)</dt></dl>
+ <dl><dt><a name="-PrintBenchmarkList"><strong>PrintBenchmarkList</strong></a>(benchmarks, possible_browser, output_pipe<font color="#909090">=&lt;open file '&lt;stdout&gt;', mode 'w'&gt;</font>)</dt><dd><tt>Print&nbsp;benchmarks&nbsp;that&nbsp;are&nbsp;not&nbsp;filtered&nbsp;in&nbsp;the&nbsp;same&nbsp;order&nbsp;of&nbsp;benchmarks&nbsp;in<br>
+the&nbsp;|benchmarks|&nbsp;list.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;benchmarks:&nbsp;the&nbsp;list&nbsp;of&nbsp;benchmarks&nbsp;to&nbsp;be&nbsp;printed&nbsp;(in&nbsp;the&nbsp;same&nbsp;order&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list).<br>
+&nbsp;&nbsp;possible_browser:&nbsp;the&nbsp;possible_browser&nbsp;instance&nbsp;that's&nbsp;used&nbsp;for&nbsp;checking<br>
+&nbsp;&nbsp;&nbsp;&nbsp;which&nbsp;benchmarks&nbsp;are&nbsp;enabled.<br>
+&nbsp;&nbsp;output_pipe:&nbsp;the&nbsp;stream&nbsp;in&nbsp;which&nbsp;benchmarks&nbsp;are&nbsp;printed&nbsp;on.</tt></dd></dl>
+ <dl><dt><a name="-main"><strong>main</strong></a>(environment)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.android_action_runner.html b/catapult/telemetry/docs/pydoc/telemetry.core.android_action_runner.html
new file mode 100644
index 0000000..ec50ae5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.android_action_runner.html
@@ -0,0 +1,191 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.android_action_runner</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.android_action_runner</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/android_action_runner.py">telemetry/core/android_action_runner.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.android_action_runner.html#AndroidActionRunner">AndroidActionRunner</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.android_action_runner.html#ActionNotSupported">ActionNotSupported</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ActionNotSupported">class <strong>ActionNotSupported</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.android_action_runner.html#ActionNotSupported">ActionNotSupported</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ActionNotSupported-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ActionNotSupported-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ActionNotSupported-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ActionNotSupported-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ActionNotSupported-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ActionNotSupported-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ActionNotSupported-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidActionRunner">class <strong>AndroidActionRunner</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;an&nbsp;API&nbsp;for&nbsp;interacting&nbsp;with&nbsp;an&nbsp;android&nbsp;device.<br>
+&nbsp;<br>
+This&nbsp;makes&nbsp;use&nbsp;of&nbsp;functionality&nbsp;provided&nbsp;by&nbsp;the&nbsp;android&nbsp;input&nbsp;command.&nbsp;None<br>
+of&nbsp;the&nbsp;gestures&nbsp;here&nbsp;are&nbsp;guaranteed&nbsp;to&nbsp;be&nbsp;performant&nbsp;for&nbsp;telemetry&nbsp;tests&nbsp;and<br>
+there&nbsp;is&nbsp;no&nbsp;official&nbsp;support&nbsp;for&nbsp;this&nbsp;API.<br>
+&nbsp;<br>
+TODO(ariblue):&nbsp;Replace&nbsp;this&nbsp;API&nbsp;with&nbsp;a&nbsp;better&nbsp;implementation&nbsp;for&nbsp;interacting<br>
+with&nbsp;native&nbsp;components.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AndroidActionRunner-InputKeyEvent"><strong>InputKeyEvent</strong></a>(self, key)</dt><dd><tt>Send&nbsp;a&nbsp;single&nbsp;key&nbsp;input&nbsp;to&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;key:&nbsp;A&nbsp;key&nbsp;code&nbsp;number&nbsp;or&nbsp;name&nbsp;that&nbsp;will&nbsp;be&nbsp;sent&nbsp;to&nbsp;the&nbsp;device</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-InputPress"><strong>InputPress</strong></a>(self)</dt><dd><tt>Perform&nbsp;a&nbsp;press&nbsp;input.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-InputRoll"><strong>InputRoll</strong></a>(self, dx, dy)</dt><dd><tt>Perform&nbsp;a&nbsp;roll&nbsp;input.&nbsp;This&nbsp;sends&nbsp;a&nbsp;simple&nbsp;zero-pressure&nbsp;move&nbsp;event.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;dx:&nbsp;Change&nbsp;in&nbsp;the&nbsp;x&nbsp;coordinate&nbsp;due&nbsp;to&nbsp;move.<br>
+&nbsp;&nbsp;dy:&nbsp;Change&nbsp;in&nbsp;the&nbsp;y&nbsp;coordinate&nbsp;due&nbsp;to&nbsp;move.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-InputSwipe"><strong>InputSwipe</strong></a>(self, left_start_coord, top_start_coord, left_end_coord, top_end_coord, duration)</dt><dd><tt>Perform&nbsp;a&nbsp;swipe&nbsp;input.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_start_coord:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;gesture<br>
+&nbsp;&nbsp;top_start_coord:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;gesture<br>
+&nbsp;&nbsp;left_end_coord:&nbsp;The&nbsp;horizontal&nbsp;ending&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;gesture<br>
+&nbsp;&nbsp;top_end_coord:&nbsp;The&nbsp;vertical&nbsp;ending&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;gesture<br>
+&nbsp;&nbsp;duration:&nbsp;The&nbsp;length&nbsp;of&nbsp;time&nbsp;of&nbsp;the&nbsp;swipe&nbsp;in&nbsp;milliseconds</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-InputTap"><strong>InputTap</strong></a>(self, x_coord, y_coord)</dt><dd><tt>Perform&nbsp;a&nbsp;tap&nbsp;input&nbsp;at&nbsp;the&nbsp;given&nbsp;coordinates.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x_coord:&nbsp;The&nbsp;x&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;tap&nbsp;event.<br>
+&nbsp;&nbsp;y_coord:&nbsp;The&nbsp;y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;tap&nbsp;event.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-InputText"><strong>InputText</strong></a>(self, string)</dt><dd><tt>Convert&nbsp;the&nbsp;characters&nbsp;of&nbsp;the&nbsp;string&nbsp;into&nbsp;key&nbsp;events&nbsp;and&nbsp;send&nbsp;to&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;string:&nbsp;The&nbsp;string&nbsp;to&nbsp;send&nbsp;to&nbsp;the&nbsp;device.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-SmoothScrollBy"><strong>SmoothScrollBy</strong></a>(self, left_start_coord, top_start_coord, direction, scroll_distance)</dt><dd><tt>Perfrom&nbsp;gesture&nbsp;to&nbsp;scroll&nbsp;down&nbsp;on&nbsp;the&nbsp;android&nbsp;device.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-TurnScreenOff"><strong>TurnScreenOff</strong></a>(self)</dt><dd><tt>If&nbsp;device&nbsp;screen&nbsp;is&nbsp;on,&nbsp;turn&nbsp;screen&nbsp;off.<br>
+If&nbsp;the&nbsp;screen&nbsp;is&nbsp;already&nbsp;off,&nbsp;log&nbsp;a&nbsp;warning&nbsp;and&nbsp;return&nbsp;immediately.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;Timeout:&nbsp;If&nbsp;the&nbsp;screen&nbsp;is&nbsp;on&nbsp;and&nbsp;device&nbsp;fails&nbsp;to&nbsp;turn&nbsp;screen&nbsp;off.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-TurnScreenOn"><strong>TurnScreenOn</strong></a>(self)</dt><dd><tt>If&nbsp;device&nbsp;screen&nbsp;is&nbsp;off,&nbsp;turn&nbsp;screen&nbsp;on.<br>
+If&nbsp;the&nbsp;screen&nbsp;is&nbsp;already&nbsp;on,&nbsp;log&nbsp;a&nbsp;warning&nbsp;and&nbsp;return&nbsp;immediately.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;Timeout:&nbsp;If&nbsp;the&nbsp;screen&nbsp;is&nbsp;off&nbsp;and&nbsp;device&nbsp;fails&nbsp;to&nbsp;turn&nbsp;screen&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-UnlockScreen"><strong>UnlockScreen</strong></a>(self)</dt><dd><tt>If&nbsp;device&nbsp;screen&nbsp;is&nbsp;locked,&nbsp;unlocks&nbsp;it.<br>
+If&nbsp;the&nbsp;device&nbsp;is&nbsp;not&nbsp;locked,&nbsp;log&nbsp;a&nbsp;warning&nbsp;and&nbsp;return&nbsp;immediately.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;Timeout:&nbsp;If&nbsp;device&nbsp;fails&nbsp;to&nbsp;unlock&nbsp;screen.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-Wait"><strong>Wait</strong></a>(self, seconds)</dt><dd><tt>Wait&nbsp;for&nbsp;the&nbsp;number&nbsp;of&nbsp;seconds&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;seconds:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait.</tt></dd></dl>
+
+<dl><dt><a name="AndroidActionRunner-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.android_platform.html b/catapult/telemetry/docs/pydoc/telemetry.core.android_platform.html
new file mode 100644
index 0000000..8a56589
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.android_platform.html
@@ -0,0 +1,275 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.android_platform</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.android_platform</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/android_platform.py">telemetry/core/android_platform.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.android_action_runner.html">telemetry.core.android_action_runner</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.app.android_app.html">telemetry.internal.app.android_app</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.android_app_backend.html">telemetry.internal.backends.android_app_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.platform.html#Platform">telemetry.core.platform.Platform</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.android_platform.html#AndroidPlatform">AndroidPlatform</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidPlatform">class <strong>AndroidPlatform</strong></a>(<a href="telemetry.core.platform.html#Platform">telemetry.core.platform.Platform</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.android_platform.html#AndroidPlatform">AndroidPlatform</a></dd>
+<dd><a href="telemetry.core.platform.html#Platform">telemetry.core.platform.Platform</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidPlatform-LaunchAndroidApplication"><strong>LaunchAndroidApplication</strong></a>(self, start_intent, is_app_ready_predicate<font color="#909090">=None</font>, app_has_webviews<font color="#909090">=True</font>)</dt><dd><tt>Launches&nbsp;an&nbsp;Android&nbsp;application&nbsp;given&nbsp;the&nbsp;intent.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;start_intent:&nbsp;The&nbsp;intent&nbsp;to&nbsp;use&nbsp;to&nbsp;start&nbsp;the&nbsp;app.<br>
+&nbsp;&nbsp;is_app_ready_predicate:&nbsp;A&nbsp;predicate&nbsp;function&nbsp;to&nbsp;determine<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;whether&nbsp;the&nbsp;app&nbsp;is&nbsp;ready.&nbsp;This&nbsp;is&nbsp;a&nbsp;function&nbsp;that&nbsp;takes&nbsp;an<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;AndroidApp&nbsp;instance&nbsp;and&nbsp;return&nbsp;a&nbsp;boolean.&nbsp;When&nbsp;it&nbsp;is&nbsp;not&nbsp;passed&nbsp;in,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;app&nbsp;is&nbsp;ready&nbsp;when&nbsp;the&nbsp;intent&nbsp;to&nbsp;launch&nbsp;it&nbsp;is&nbsp;completed.<br>
+&nbsp;&nbsp;app_has_webviews:&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;app&nbsp;is&nbsp;expected&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;contain&nbsp;any&nbsp;WebViews.&nbsp;If&nbsp;True,&nbsp;the&nbsp;app&nbsp;will&nbsp;be&nbsp;launched&nbsp;with<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;appropriate&nbsp;webview&nbsp;flags,&nbsp;and&nbsp;the&nbsp;GetWebViews&nbsp;method&nbsp;of&nbsp;the&nbsp;returned<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;object&nbsp;may&nbsp;be&nbsp;used&nbsp;to&nbsp;access&nbsp;them.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;reference&nbsp;to&nbsp;the&nbsp;android_app&nbsp;launched.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>android_action_runner</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.platform.html#Platform">telemetry.core.platform.Platform</a>:<br>
+<dl><dt><a name="AndroidPlatform-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;bool&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;platform&nbsp;supports&nbsp;video&nbsp;capture.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;the&nbsp;disk&nbsp;cache&nbsp;can&nbsp;be&nbsp;flushed&nbsp;for&nbsp;specific&nbsp;files.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt><dd><tt>Returns&nbsp;whether&nbsp;the&nbsp;platform&nbsp;can&nbsp;launch&nbsp;the&nbsp;given&nbsp;application.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;network&nbsp;data&nbsp;can&nbsp;be&nbsp;retrieved,&nbsp;false&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;power&nbsp;can&nbsp;be&nbsp;monitored&nbsp;asynchronously&nbsp;via<br>
+<a href="#AndroidPlatform-StartMonitoringPower">StartMonitoringPower</a>()&nbsp;and&nbsp;<a href="#AndroidPlatform-StopMonitoringPower">StopMonitoringPower</a>().</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt><dd><tt>Platforms&nbsp;may&nbsp;be&nbsp;able&nbsp;to&nbsp;detect&nbsp;thermal&nbsp;throttling.<br>
+&nbsp;<br>
+Some&nbsp;fan-less&nbsp;computers&nbsp;go&nbsp;into&nbsp;a&nbsp;reduced&nbsp;performance&nbsp;mode&nbsp;when&nbsp;their&nbsp;heat<br>
+exceeds&nbsp;a&nbsp;certain&nbsp;threshold.&nbsp;Performance&nbsp;tests&nbsp;in&nbsp;particular&nbsp;should&nbsp;use&nbsp;this<br>
+API&nbsp;to&nbsp;detect&nbsp;if&nbsp;this&nbsp;has&nbsp;happened&nbsp;and&nbsp;interpret&nbsp;results&nbsp;accordingly.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatform-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;DNS&nbsp;cache&nbsp;completely.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;may&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;file&nbsp;cache&nbsp;completely.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;may&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;file&nbsp;cache&nbsp;for&nbsp;the&nbsp;specified&nbsp;directory.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;does&nbsp;not&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetArchName"><strong>GetArchName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>&nbsp;architecture.<br>
+&nbsp;<br>
+Examples:&nbsp;x86_64&nbsp;(posix),&nbsp;AMD64&nbsp;(win),&nbsp;armeabi-v7a,&nbsp;x86</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>&nbsp;device,&nbsp;or&nbsp;None.<br>
+&nbsp;<br>
+Examples:&nbsp;Nexus&nbsp;7,&nbsp;Nexus&nbsp;6,&nbsp;Desktop</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt><dd><tt>Get&nbsp;current&nbsp;network&nbsp;data.<br>
+Returns:<br>
+&nbsp;&nbsp;Tuple&nbsp;of&nbsp;(sent_data,&nbsp;received_data)&nbsp;in&nbsp;kb&nbsp;if&nbsp;data&nbsp;can&nbsp;be&nbsp;found,<br>
+&nbsp;&nbsp;None&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetOSName"><strong>GetOSName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>&nbsp;OS.<br>
+&nbsp;<br>
+Examples:&nbsp;WIN,&nbsp;MAC,&nbsp;LINUX,&nbsp;CHROMEOS</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;logically&nbsp;sortable,&nbsp;string-like&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>&nbsp;OS<br>
+version.<br>
+&nbsp;<br>
+Examples:&nbsp;VISTA,&nbsp;WIN7,&nbsp;LION,&nbsp;MOUNTAINLION</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-GetOSVersionNumber"><strong>GetOSVersionNumber</strong></a>(self)</dt><dd><tt>Returns&nbsp;an&nbsp;integer&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>&nbsp;OS&nbsp;major&nbsp;version.<br>
+&nbsp;<br>
+Examples:&nbsp;On&nbsp;Mac,&nbsp;13&nbsp;for&nbsp;Mavericks,&nbsp;14&nbsp;for&nbsp;Yosemite.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;device&nbsp;has&nbsp;been&nbsp;thermally&nbsp;throttled.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt><dd><tt>Installs&nbsp;the&nbsp;given&nbsp;application.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt><dd><tt>Returns&nbsp;whether&nbsp;an&nbsp;application&nbsp;is&nbsp;currently&nbsp;running.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-IsMonitoringPower"><strong>IsMonitoringPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;power&nbsp;is&nbsp;currently&nbsp;being&nbsp;monitored,&nbsp;false&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;device&nbsp;is&nbsp;currently&nbsp;thermally&nbsp;throttled.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt><dd><tt>"Launches&nbsp;the&nbsp;given&nbsp;|application|&nbsp;with&nbsp;a&nbsp;list&nbsp;of&nbsp;|parameters|&nbsp;on&nbsp;the&nbsp;OS.<br>
+&nbsp;<br>
+Set&nbsp;|elevate_privilege|&nbsp;to&nbsp;launch&nbsp;the&nbsp;application&nbsp;with&nbsp;root&nbsp;or&nbsp;admin&nbsp;rights.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;popen&nbsp;style&nbsp;process&nbsp;handle&nbsp;for&nbsp;host&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-SetHTTPServerDirectories"><strong>SetHTTPServerDirectories</strong></a>(self, paths)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;HTTP&nbsp;server&nbsp;was&nbsp;started,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-StartLocalServer"><strong>StartLocalServer</strong></a>(self, server)</dt><dd><tt>Starts&nbsp;a&nbsp;LocalServer&nbsp;and&nbsp;associates&nbsp;it&nbsp;with&nbsp;this&nbsp;platform.<br>
+|server.Close()|&nbsp;should&nbsp;be&nbsp;called&nbsp;manually&nbsp;to&nbsp;close&nbsp;the&nbsp;started&nbsp;server.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt><dd><tt>Starts&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;statistics.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser:&nbsp;The&nbsp;browser&nbsp;to&nbsp;monitor.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt><dd><tt>Starts&nbsp;capturing&nbsp;video.<br>
+&nbsp;<br>
+Outer&nbsp;framing&nbsp;may&nbsp;be&nbsp;included&nbsp;(from&nbsp;the&nbsp;OS,&nbsp;browser&nbsp;window,&nbsp;and&nbsp;webcam).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;min_bitrate_mbps:&nbsp;The&nbsp;minimum&nbsp;capture&nbsp;bitrate&nbsp;in&nbsp;MegaBits&nbsp;Per&nbsp;Second.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;platform&nbsp;is&nbsp;free&nbsp;to&nbsp;deliver&nbsp;a&nbsp;higher&nbsp;bitrate&nbsp;if&nbsp;it&nbsp;can&nbsp;do&nbsp;so<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;without&nbsp;increasing&nbsp;overhead.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;required&nbsp;|min_bitrate_mbps|&nbsp;can't&nbsp;be&nbsp;achieved.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-StopAllLocalServers"><strong>StopAllLocalServers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatform-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt><dd><tt>Stops&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;and&nbsp;returns&nbsp;stats<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;None&nbsp;if&nbsp;power&nbsp;measurement&nbsp;failed&nbsp;for&nbsp;some&nbsp;reason,&nbsp;otherwise&nbsp;a&nbsp;dict&nbsp;of<br>
+&nbsp;&nbsp;power&nbsp;utilization&nbsp;statistics&nbsp;containing:&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;An&nbsp;identifier&nbsp;for&nbsp;the&nbsp;data&nbsp;provider.&nbsp;Allows&nbsp;to&nbsp;evaluate&nbsp;the&nbsp;precision<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;of&nbsp;the&nbsp;data.&nbsp;Example&nbsp;values:&nbsp;monsoon,&nbsp;powermetrics,&nbsp;ds2784<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'identifier':&nbsp;identifier,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;instantaneous&nbsp;power&nbsp;(voltage&nbsp;*&nbsp;current)&nbsp;reading&nbsp;in&nbsp;milliwatts&nbsp;at<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;each&nbsp;sample.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'power_samples_mw':&nbsp;&nbsp;[mw0,&nbsp;mw1,&nbsp;...,&nbsp;mwN],<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;full&nbsp;system&nbsp;energy&nbsp;consumption&nbsp;during&nbsp;the&nbsp;sampling&nbsp;period&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;milliwatt&nbsp;hours.&nbsp;May&nbsp;be&nbsp;estimated&nbsp;by&nbsp;integrating&nbsp;power&nbsp;samples&nbsp;or&nbsp;may<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;be&nbsp;exact&nbsp;on&nbsp;supported&nbsp;hardware.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'energy_consumption_mwh':&nbsp;mwh,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;target&nbsp;application's&nbsp;energy&nbsp;consumption&nbsp;during&nbsp;the&nbsp;sampling&nbsp;period<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;in&nbsp;milliwatt&nbsp;hours.&nbsp;Should&nbsp;be&nbsp;returned&nbsp;iff<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;<a href="#AndroidPlatform-CanMeasurePerApplicationPower">CanMeasurePerApplicationPower</a>()&nbsp;return&nbsp;true.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'application_energy_consumption_mwh':&nbsp;mwh,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;A&nbsp;platform-specific&nbsp;dictionary&nbsp;of&nbsp;additional&nbsp;details&nbsp;about&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;utilization&nbsp;of&nbsp;individual&nbsp;hardware&nbsp;components.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;component_utilization:&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;...<br>
+&nbsp;&nbsp;&nbsp;&nbsp;}<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;<a href="telemetry.core.platform.html#Platform">Platform</a>-specific&nbsp;data&nbsp;not&nbsp;attributed&nbsp;to&nbsp;any&nbsp;particular&nbsp;hardware<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;component.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;platform_info:&nbsp;{<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Device-specific&nbsp;onboard&nbsp;temperature&nbsp;sensor.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'average_temperature_c':&nbsp;c,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;...<br>
+&nbsp;&nbsp;&nbsp;&nbsp;}<br>
+&nbsp;<br>
+&nbsp;&nbsp;}</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt><dd><tt>Stops&nbsp;capturing&nbsp;video.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;telemetry.core.video.Video&nbsp;object.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatform-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt><dd><tt>Takes&nbsp;a&nbsp;screenshot&nbsp;of&nbsp;the&nbsp;platform&nbsp;and&nbsp;save&nbsp;to&nbsp;|file_path|.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;this&nbsp;method&nbsp;may&nbsp;not&nbsp;be&nbsp;supported&nbsp;on&nbsp;all&nbsp;platform,&nbsp;so&nbsp;check&nbsp;with<br>
+CanTakeScreenshot&nbsp;before&nbsp;calling&nbsp;this.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;file_path:&nbsp;Where&nbsp;to&nbsp;save&nbsp;the&nbsp;screenshot&nbsp;to.&nbsp;If&nbsp;the&nbsp;platform&nbsp;is&nbsp;remote,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|file_path|&nbsp;is&nbsp;the&nbsp;path&nbsp;on&nbsp;the&nbsp;host&nbsp;platform.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.platform.html#Platform">telemetry.core.platform.Platform</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>http_server</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>local_servers</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;currently&nbsp;running&nbsp;local&nbsp;servers.</tt></dd>
+</dl>
+<dl><dt><strong>network_controller</strong></dt>
+<dd><tt>Control&nbsp;network&nbsp;settings&nbsp;and&nbsp;servers&nbsp;to&nbsp;simulate&nbsp;the&nbsp;Web.</tt></dd>
+</dl>
+<dl><dt><strong>tracing_controller</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.cros_interface.html b/catapult/telemetry/docs/pydoc/telemetry.core.cros_interface.html
new file mode 100644
index 0000000..1baf490
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.cros_interface.html
@@ -0,0 +1,355 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.cros_interface</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.cros_interface</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/cros_interface.py">telemetry/core/cros_interface.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;wrapper&nbsp;around&nbsp;ssh&nbsp;for&nbsp;common&nbsp;operations&nbsp;on&nbsp;a&nbsp;CrOS-based&nbsp;device</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="shutil.html">shutil</a><br>
+</td><td width="25%" valign=top><a href="stat.html">stat</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.cros_interface.html#CrOSInterface">CrOSInterface</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.cros_interface.html#LoginException">LoginException</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.cros_interface.html#DNSFailureException">DNSFailureException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.cros_interface.html#KeylessLoginRequiredException">KeylessLoginRequiredException</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOSInterface">class <strong>CrOSInterface</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="CrOSInterface-Chown"><strong>Chown</strong></a>(self, filename)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-CloseConnection"><strong>CloseConnection</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-CryptohomePath"><strong>CryptohomePath</strong></a>(self, user)</dt><dd><tt>Returns&nbsp;the&nbsp;cryptohome&nbsp;mount&nbsp;point&nbsp;for&nbsp;|user|.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-FileExistsOnDevice"><strong>FileExistsOnDevice</strong></a>(self, file_name)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-FilesystemMountedAt"><strong>FilesystemMountedAt</strong></a>(self, path)</dt><dd><tt>Returns&nbsp;the&nbsp;filesystem&nbsp;mounted&nbsp;at&nbsp;|path|</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-FormSSHCommandLine"><strong>FormSSHCommandLine</strong></a>(self, args, extra_ssh_args<font color="#909090">=None</font>)</dt><dd><tt>Constructs&nbsp;a&nbsp;subprocess-suitable&nbsp;command&nbsp;line&nbsp;for&nbsp;`ssh'.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-GetChromePid"><strong>GetChromePid</strong></a>(self)</dt><dd><tt>Returns&nbsp;pid&nbsp;of&nbsp;main&nbsp;chrome&nbsp;browser&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-GetChromeProcess"><strong>GetChromeProcess</strong></a>(self)</dt><dd><tt>Locates&nbsp;the&nbsp;the&nbsp;main&nbsp;chrome&nbsp;browser&nbsp;process.<br>
+&nbsp;<br>
+Chrome&nbsp;on&nbsp;cros&nbsp;is&nbsp;usually&nbsp;in&nbsp;/opt/google/chrome,&nbsp;but&nbsp;could&nbsp;be&nbsp;in<br>
+/usr/local/&nbsp;for&nbsp;developer&nbsp;workflows&nbsp;-&nbsp;debug&nbsp;chrome&nbsp;is&nbsp;too&nbsp;large&nbsp;to&nbsp;fit&nbsp;on<br>
+rootfs.<br>
+&nbsp;<br>
+Chrome&nbsp;spawns&nbsp;multiple&nbsp;processes&nbsp;for&nbsp;renderers.&nbsp;pids&nbsp;wrap&nbsp;around&nbsp;after&nbsp;they<br>
+are&nbsp;exhausted&nbsp;so&nbsp;looking&nbsp;for&nbsp;the&nbsp;smallest&nbsp;pid&nbsp;is&nbsp;not&nbsp;always&nbsp;correct.&nbsp;We<br>
+locate&nbsp;the&nbsp;session_manager's&nbsp;pid,&nbsp;and&nbsp;look&nbsp;for&nbsp;the&nbsp;chrome&nbsp;process&nbsp;that's&nbsp;an<br>
+immediate&nbsp;child.&nbsp;This&nbsp;is&nbsp;the&nbsp;main&nbsp;browser&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-GetFile"><strong>GetFile</strong></a>(self, filename, destfile<font color="#909090">=None</font>)</dt><dd><tt>Copies&nbsp;a&nbsp;local&nbsp;file&nbsp;|filename|&nbsp;to&nbsp;|destfile|&nbsp;on&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;filename:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;local&nbsp;source&nbsp;file.<br>
+&nbsp;&nbsp;destfile:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;file&nbsp;to&nbsp;copy&nbsp;to,&nbsp;and&nbsp;if&nbsp;it&nbsp;is&nbsp;not&nbsp;specified<br>
+&nbsp;&nbsp;&nbsp;&nbsp;then&nbsp;it&nbsp;is&nbsp;the&nbsp;basename&nbsp;of&nbsp;the&nbsp;source&nbsp;file.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-GetFileContents"><strong>GetFileContents</strong></a>(self, filename)</dt><dd><tt>Get&nbsp;the&nbsp;contents&nbsp;of&nbsp;a&nbsp;file&nbsp;on&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;filename:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;file&nbsp;on&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;string&nbsp;containing&nbsp;the&nbsp;contents&nbsp;of&nbsp;the&nbsp;file.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-GetRemotePort"><strong>GetRemotePort</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-IsCryptohomeMounted"><strong>IsCryptohomeMounted</strong></a>(self, username, is_guest)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;|user|'s&nbsp;cryptohome&nbsp;is&nbsp;mounted.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-IsHTTPServerRunningOnPort"><strong>IsHTTPServerRunningOnPort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-IsServiceRunning"><strong>IsServiceRunning</strong></a>(self, service_name)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-KillAllMatching"><strong>KillAllMatching</strong></a>(self, predicate)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-ListProcesses"><strong>ListProcesses</strong></a>(self)</dt><dd><tt>Returns&nbsp;(pid,&nbsp;cmd,&nbsp;ppid,&nbsp;state)&nbsp;of&nbsp;all&nbsp;processes&nbsp;on&nbsp;the&nbsp;device.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-PushContents"><strong>PushContents</strong></a>(self, text, remote_filename)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-PushFile"><strong>PushFile</strong></a>(self, filename, remote_filename)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-RestartUI"><strong>RestartUI</strong></a>(self, clear_enterprise_policy)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-RmRF"><strong>RmRF</strong></a>(self, filename)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-RunCmdOnDevice"><strong>RunCmdOnDevice</strong></a>(self, args, cwd<font color="#909090">=None</font>, quiet<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-TakeScreenShot"><strong>TakeScreenShot</strong></a>(self, screenshot_prefix)</dt><dd><tt>Takes&nbsp;a&nbsp;screenshot,&nbsp;useful&nbsp;for&nbsp;debugging&nbsp;failures.</tt></dd></dl>
+
+<dl><dt><a name="CrOSInterface-TryLogin"><strong>TryLogin</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="CrOSInterface-__init__"><strong>__init__</strong></a>(self, hostname<font color="#909090">=None</font>, ssh_port<font color="#909090">=None</font>, ssh_identity<font color="#909090">=None</font>)</dt><dd><tt>#&nbsp;pylint:&nbsp;disable=R0923</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>hostname</strong></dt>
+</dl>
+<dl><dt><strong>local</strong></dt>
+</dl>
+<dl><dt><strong>ssh_port</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DNSFailureException">class <strong>DNSFailureException</strong></a>(<a href="telemetry.core.cros_interface.html#LoginException">LoginException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.cros_interface.html#DNSFailureException">DNSFailureException</a></dd>
+<dd><a href="telemetry.core.cros_interface.html#LoginException">LoginException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.core.cros_interface.html#LoginException">LoginException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="DNSFailureException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DNSFailureException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DNSFailureException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DNSFailureException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DNSFailureException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DNSFailureException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DNSFailureException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="KeylessLoginRequiredException">class <strong>KeylessLoginRequiredException</strong></a>(<a href="telemetry.core.cros_interface.html#LoginException">LoginException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.cros_interface.html#KeylessLoginRequiredException">KeylessLoginRequiredException</a></dd>
+<dd><a href="telemetry.core.cros_interface.html#LoginException">LoginException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.core.cros_interface.html#LoginException">LoginException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="KeylessLoginRequiredException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#KeylessLoginRequiredException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="KeylessLoginRequiredException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#KeylessLoginRequiredException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="KeylessLoginRequiredException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LoginException">class <strong>LoginException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.cros_interface.html#LoginException">LoginException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="LoginException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#LoginException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="LoginException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="LoginException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="LoginException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetAllCmdOutput"><strong>GetAllCmdOutput</strong></a>(args, cwd<font color="#909090">=None</font>, quiet<font color="#909090">=False</font>)</dt><dd><tt>Open&nbsp;a&nbsp;subprocess&nbsp;to&nbsp;execute&nbsp;a&nbsp;program&nbsp;and&nbsp;returns&nbsp;its&nbsp;output.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;args:&nbsp;A&nbsp;string&nbsp;or&nbsp;a&nbsp;sequence&nbsp;of&nbsp;program&nbsp;arguments.&nbsp;The&nbsp;program&nbsp;to&nbsp;execute&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;string&nbsp;or&nbsp;the&nbsp;first&nbsp;item&nbsp;in&nbsp;the&nbsp;args&nbsp;sequence.<br>
+&nbsp;&nbsp;cwd:&nbsp;If&nbsp;not&nbsp;None,&nbsp;the&nbsp;subprocess's&nbsp;current&nbsp;directory&nbsp;will&nbsp;be&nbsp;changed&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|cwd|&nbsp;before&nbsp;it's&nbsp;executed.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;Captures&nbsp;and&nbsp;returns&nbsp;the&nbsp;command's&nbsp;stdout.<br>
+&nbsp;&nbsp;Prints&nbsp;the&nbsp;command's&nbsp;stderr&nbsp;to&nbsp;logger&nbsp;(which&nbsp;defaults&nbsp;to&nbsp;stdout).</tt></dd></dl>
+ <dl><dt><a name="-HasSSH"><strong>HasSSH</strong></a>()</dt></dl>
+ <dl><dt><a name="-RunCmd"><strong>RunCmd</strong></a>(args, cwd<font color="#909090">=None</font>, quiet<font color="#909090">=False</font>)</dt><dd><tt>Opens&nbsp;a&nbsp;subprocess&nbsp;to&nbsp;execute&nbsp;a&nbsp;program&nbsp;and&nbsp;returns&nbsp;its&nbsp;return&nbsp;value.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;args:&nbsp;A&nbsp;string&nbsp;or&nbsp;a&nbsp;sequence&nbsp;of&nbsp;program&nbsp;arguments.&nbsp;The&nbsp;program&nbsp;to&nbsp;execute&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;string&nbsp;or&nbsp;the&nbsp;first&nbsp;item&nbsp;in&nbsp;the&nbsp;args&nbsp;sequence.<br>
+&nbsp;&nbsp;cwd:&nbsp;If&nbsp;not&nbsp;None,&nbsp;the&nbsp;subprocess's&nbsp;current&nbsp;directory&nbsp;will&nbsp;be&nbsp;changed&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|cwd|&nbsp;before&nbsp;it's&nbsp;executed.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;Return&nbsp;code&nbsp;from&nbsp;the&nbsp;command&nbsp;execution.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.discover.html b/catapult/telemetry/docs/pydoc/telemetry.core.discover.html
new file mode 100644
index 0000000..5912099
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.discover.html
@@ -0,0 +1,75 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.discover</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.discover</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/discover.py">telemetry/core/discover.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.camel_case.html">telemetry.internal.util.camel_case</a><br>
+<a href="telemetry.internal.util.classes.html">telemetry.internal.util.classes</a><br>
+</td><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="fnmatch.html">fnmatch</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-DiscoverClasses"><strong>DiscoverClasses</strong></a>(*args, **kwargs)</dt><dd><tt>Discover&nbsp;all&nbsp;classes&nbsp;in&nbsp;|start_dir|&nbsp;which&nbsp;subclass&nbsp;|base_class|.<br>
+&nbsp;<br>
+Base&nbsp;classes&nbsp;that&nbsp;contain&nbsp;subclasses&nbsp;are&nbsp;ignored&nbsp;by&nbsp;default.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;start_dir:&nbsp;The&nbsp;directory&nbsp;to&nbsp;recursively&nbsp;search.<br>
+&nbsp;&nbsp;top_level_dir:&nbsp;The&nbsp;top&nbsp;level&nbsp;of&nbsp;the&nbsp;package,&nbsp;for&nbsp;importing.<br>
+&nbsp;&nbsp;base_class:&nbsp;The&nbsp;base&nbsp;class&nbsp;to&nbsp;search&nbsp;for.<br>
+&nbsp;&nbsp;pattern:&nbsp;Unix&nbsp;shell-style&nbsp;pattern&nbsp;for&nbsp;filtering&nbsp;the&nbsp;filenames&nbsp;to&nbsp;import.<br>
+&nbsp;&nbsp;index_by_class_name:&nbsp;If&nbsp;True,&nbsp;use&nbsp;class&nbsp;name&nbsp;converted&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;lowercase_with_underscores&nbsp;instead&nbsp;of&nbsp;module&nbsp;name&nbsp;in&nbsp;return&nbsp;dict&nbsp;keys.<br>
+&nbsp;&nbsp;directly_constructable:&nbsp;If&nbsp;True,&nbsp;will&nbsp;only&nbsp;return&nbsp;classes&nbsp;that&nbsp;can&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;constructed&nbsp;without&nbsp;arguments<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;dict&nbsp;of&nbsp;{module_name:&nbsp;class}&nbsp;or&nbsp;{underscored_class_name:&nbsp;class}</tt></dd></dl>
+ <dl><dt><a name="-DiscoverClassesInModule"><strong>DiscoverClassesInModule</strong></a>(*args, **kwargs)</dt><dd><tt>Discover&nbsp;all&nbsp;classes&nbsp;in&nbsp;|module|&nbsp;which&nbsp;subclass&nbsp;|base_class|.<br>
+&nbsp;<br>
+Base&nbsp;classes&nbsp;that&nbsp;contain&nbsp;subclasses&nbsp;are&nbsp;ignored&nbsp;by&nbsp;default.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;module:&nbsp;The&nbsp;module&nbsp;to&nbsp;search.<br>
+&nbsp;&nbsp;base_class:&nbsp;The&nbsp;base&nbsp;class&nbsp;to&nbsp;search&nbsp;for.<br>
+&nbsp;&nbsp;index_by_class_name:&nbsp;If&nbsp;True,&nbsp;use&nbsp;class&nbsp;name&nbsp;converted&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;lowercase_with_underscores&nbsp;instead&nbsp;of&nbsp;module&nbsp;name&nbsp;in&nbsp;return&nbsp;dict&nbsp;keys.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;dict&nbsp;of&nbsp;{module_name:&nbsp;class}&nbsp;or&nbsp;{underscored_class_name:&nbsp;class}</tt></dd></dl>
+ <dl><dt><a name="-DiscoverModules"><strong>DiscoverModules</strong></a>(*args, **kwargs)</dt><dd><tt>Discover&nbsp;all&nbsp;modules&nbsp;in&nbsp;|start_dir|&nbsp;which&nbsp;match&nbsp;|pattern|.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;start_dir:&nbsp;The&nbsp;directory&nbsp;to&nbsp;recursively&nbsp;search.<br>
+&nbsp;&nbsp;top_level_dir:&nbsp;The&nbsp;top&nbsp;level&nbsp;of&nbsp;the&nbsp;package,&nbsp;for&nbsp;importing.<br>
+&nbsp;&nbsp;pattern:&nbsp;Unix&nbsp;shell-style&nbsp;pattern&nbsp;for&nbsp;filtering&nbsp;the&nbsp;filenames&nbsp;to&nbsp;import.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;list&nbsp;of&nbsp;modules.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.exceptions.html b/catapult/telemetry/docs/pydoc/telemetry.core.exceptions.html
new file mode 100644
index 0000000..744db81
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.exceptions.html
@@ -0,0 +1,1241 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.exceptions</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.exceptions</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/exceptions.py">telemetry/core/exceptions.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">Error</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#AndroidDeviceParsingError">AndroidDeviceParsingError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#BrowserGoneException">BrowserGoneException</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#BrowserConnectionGoneException">BrowserConnectionGoneException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#DevtoolsTargetCrashException">DevtoolsTargetCrashException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#EvaluateException">EvaluateException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#InitializationError">InitializationError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#IntentionalException">IntentionalException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#LoginException">LoginException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#PackageDetectionError">PackageDetectionError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#PathMissingError">PathMissingError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#PlatformError">PlatformError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#ProcessGoneException">ProcessGoneException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#ProfilingException">ProfilingException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#TimeoutException">TimeoutException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#UnknownPackageError">UnknownPackageError</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidDeviceParsingError">class <strong>AndroidDeviceParsingError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;error&nbsp;when&nbsp;parsing&nbsp;output&nbsp;from&nbsp;an&nbsp;android&nbsp;device<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#AndroidDeviceParsingError">AndroidDeviceParsingError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="AndroidDeviceParsingError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#AndroidDeviceParsingError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="AndroidDeviceParsingError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#AndroidDeviceParsingError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="AndroidDeviceParsingError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AppCrashException">class <strong>AppCrashException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AppCrashException-__init__"><strong>__init__</strong></a>(self, app<font color="#909090">=None</font>, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="AppCrashException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="AppCrashException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#AppCrashException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="AppCrashException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="AppCrashException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#AppCrashException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="AppCrashException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="AppCrashException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserConnectionGoneException">class <strong>BrowserConnectionGoneException</strong></a>(<a href="telemetry.core.exceptions.html#BrowserGoneException">BrowserGoneException</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;browser&nbsp;that&nbsp;still&nbsp;exists&nbsp;but&nbsp;cannot&nbsp;be&nbsp;reached.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#BrowserConnectionGoneException">BrowserConnectionGoneException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#BrowserGoneException">BrowserGoneException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BrowserConnectionGoneException-__init__"><strong>__init__</strong></a>(self, app, msg<font color="#909090">='Browser exists but the connection is gone'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>:<br>
+<dl><dt><a name="BrowserConnectionGoneException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="BrowserConnectionGoneException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#BrowserConnectionGoneException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="BrowserConnectionGoneException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserConnectionGoneException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserConnectionGoneException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserGoneException">class <strong>BrowserGoneException</strong></a>(<a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;crash&nbsp;of&nbsp;the&nbsp;entire&nbsp;browser.<br>
+&nbsp;<br>
+In&nbsp;this&nbsp;state,&nbsp;all&nbsp;bets&nbsp;are&nbsp;pretty&nbsp;much&nbsp;off.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#BrowserGoneException">BrowserGoneException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BrowserGoneException-__init__"><strong>__init__</strong></a>(self, app, msg<font color="#909090">='Browser crashed'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>:<br>
+<dl><dt><a name="BrowserGoneException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="BrowserGoneException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#BrowserGoneException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="BrowserGoneException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserGoneException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserGoneException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="BrowserGoneException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserGoneException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DevtoolsTargetCrashException">class <strong>DevtoolsTargetCrashException</strong></a>(<a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;crash&nbsp;of&nbsp;the&nbsp;current&nbsp;devtools&nbsp;target&nbsp;but&nbsp;not&nbsp;the&nbsp;overall&nbsp;app.<br>
+&nbsp;<br>
+This&nbsp;can&nbsp;be&nbsp;a&nbsp;tab&nbsp;or&nbsp;a&nbsp;WebView.&nbsp;In&nbsp;this&nbsp;state,&nbsp;the&nbsp;tab/WebView&nbsp;is<br>
+gone,&nbsp;but&nbsp;the&nbsp;underlying&nbsp;browser&nbsp;is&nbsp;still&nbsp;alive.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#DevtoolsTargetCrashException">DevtoolsTargetCrashException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DevtoolsTargetCrashException-__init__"><strong>__init__</strong></a>(self, app, msg<font color="#909090">='Devtools target crashed'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#AppCrashException">AppCrashException</a>:<br>
+<dl><dt><a name="DevtoolsTargetCrashException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="DevtoolsTargetCrashException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DevtoolsTargetCrashException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DevtoolsTargetCrashException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevtoolsTargetCrashException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevtoolsTargetCrashException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Error">class <strong>Error</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Base&nbsp;class&nbsp;for&nbsp;Telemetry&nbsp;exceptions.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Error-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="Error-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="Error-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#Error-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="Error-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Error-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Error-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="Error-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="Error-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Error-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="Error-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="Error-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Error-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="EvaluateException">class <strong>EvaluateException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#EvaluateException">EvaluateException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="EvaluateException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="EvaluateException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#EvaluateException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="EvaluateException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="EvaluateException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#EvaluateException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="EvaluateException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="EvaluateException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InitializationError">class <strong>InitializationError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#InitializationError">InitializationError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="InitializationError-__init__"><strong>__init__</strong></a>(self, string)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="InitializationError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#InitializationError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="InitializationError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InitializationError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InitializationError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InitializationError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InitializationError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IntentionalException">class <strong>IntentionalException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represent&nbsp;an&nbsp;exception&nbsp;raised&nbsp;by&nbsp;a&nbsp;unittest&nbsp;which&nbsp;is&nbsp;not&nbsp;printed.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#IntentionalException">IntentionalException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="IntentionalException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="IntentionalException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#IntentionalException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="IntentionalException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="IntentionalException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#IntentionalException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="IntentionalException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="IntentionalException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LoginException">class <strong>LoginException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#LoginException">LoginException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="LoginException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="LoginException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#LoginException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="LoginException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="LoginException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#LoginException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="LoginException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="LoginException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PackageDetectionError">class <strong>PackageDetectionError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;error&nbsp;when&nbsp;parsing&nbsp;an&nbsp;Android&nbsp;APK's&nbsp;package.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#PackageDetectionError">PackageDetectionError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="PackageDetectionError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="PackageDetectionError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PackageDetectionError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PackageDetectionError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PackageDetectionError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PackageDetectionError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PackageDetectionError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PackageDetectionError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PathMissingError">class <strong>PathMissingError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;exception&nbsp;thrown&nbsp;when&nbsp;an&nbsp;expected&nbsp;path&nbsp;doesn't&nbsp;exist.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#PathMissingError">PathMissingError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="PathMissingError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="PathMissingError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PathMissingError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PathMissingError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PathMissingError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PathMissingError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PathMissingError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PathMissingError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PlatformError">class <strong>PlatformError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;exception&nbsp;thrown&nbsp;when&nbsp;constructing&nbsp;platform.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#PlatformError">PlatformError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="PlatformError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="PlatformError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PlatformError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PlatformError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PlatformError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PlatformError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PlatformError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PlatformError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProcessGoneException">class <strong>ProcessGoneException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;process&nbsp;that&nbsp;no&nbsp;longer&nbsp;exists&nbsp;for&nbsp;an&nbsp;unknown&nbsp;reason.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#ProcessGoneException">ProcessGoneException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="ProcessGoneException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="ProcessGoneException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ProcessGoneException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ProcessGoneException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ProcessGoneException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProcessGoneException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ProcessGoneException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ProcessGoneException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProfilingException">class <strong>ProfilingException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#ProfilingException">ProfilingException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="ProfilingException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="ProfilingException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ProfilingException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ProfilingException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ProfilingException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ProfilingException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ProfilingException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ProfilingException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimeoutException">class <strong>TimeoutException</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;operation&nbsp;failed&nbsp;to&nbsp;complete&nbsp;because&nbsp;of&nbsp;a&nbsp;timeout.<br>
+&nbsp;<br>
+It&nbsp;is&nbsp;possible&nbsp;that&nbsp;waiting&nbsp;for&nbsp;a&nbsp;longer&nbsp;period&nbsp;of&nbsp;time&nbsp;would&nbsp;result&nbsp;in&nbsp;a<br>
+successful&nbsp;operation.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#TimeoutException">TimeoutException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="TimeoutException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="TimeoutException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TimeoutException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TimeoutException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TimeoutException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimeoutException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TimeoutException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TimeoutException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="UnknownPackageError">class <strong>UnknownPackageError</strong></a>(<a href="telemetry.core.exceptions.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;exception&nbsp;when&nbsp;encountering&nbsp;an&nbsp;unsupported&nbsp;Android&nbsp;APK.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.exceptions.html#UnknownPackageError">UnknownPackageError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><a name="UnknownPackageError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="UnknownPackageError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#UnknownPackageError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="UnknownPackageError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="UnknownPackageError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnknownPackageError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="UnknownPackageError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="UnknownPackageError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.html b/catapult/telemetry/docs/pydoc/telemetry.core.html
new file mode 100644
index 0000000..d941689
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.html
@@ -0,0 +1,44 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.core</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.core</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/__init__.py">telemetry/core/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.android_action_runner.html">android_action_runner</a><br>
+<a href="telemetry.core.android_platform.html">android_platform</a><br>
+<a href="telemetry.core.cros_interface.html">cros_interface</a><br>
+<a href="telemetry.core.cros_interface_unittest.html">cros_interface_unittest</a><br>
+<a href="telemetry.core.discover.html">discover</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.discover_unittest.html">discover_unittest</a><br>
+<a href="telemetry.core.exceptions.html">exceptions</a><br>
+<a href="telemetry.core.local_server.html">local_server</a><br>
+<a href="telemetry.core.local_server_unittest.html">local_server_unittest</a><br>
+<a href="telemetry.core.memory_cache_http_server.html">memory_cache_http_server</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.memory_cache_http_server_unittest.html">memory_cache_http_server_unittest</a><br>
+<a href="telemetry.core.network_controller.html">network_controller</a><br>
+<a href="telemetry.core.os_version.html">os_version</a><br>
+<a href="telemetry.core.platform.html">platform</a><br>
+<a href="telemetry.core.platform_unittest.html">platform_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.profiling_controller.html">profiling_controller</a><br>
+<a href="telemetry.core.tracing_controller.html">tracing_controller</a><br>
+<a href="telemetry.core.tracing_controller_unittest.html">tracing_controller_unittest</a><br>
+<a href="telemetry.core.util.html">util</a><br>
+<a href="telemetry.core.util_unittest.html">util_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.local_server.html b/catapult/telemetry/docs/pydoc/telemetry.core.local_server.html
new file mode 100644
index 0000000..6f9cd22
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.local_server.html
@@ -0,0 +1,241 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.local_server</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.local_server</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/local_server.py">telemetry/core/local_server.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#LocalServer">LocalServer</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#LocalServerBackend">LocalServerBackend</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#LocalServerController">LocalServerController</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#tuple">__builtin__.tuple</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#NamedPort">NamedPort</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LocalServer">class <strong>LocalServer</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="LocalServer-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LocalServer-GetBackendStartupArgs"><strong>GetBackendStartupArgs</strong></a>(self)</dt><dd><tt>Returns&nbsp;whatever&nbsp;arguments&nbsp;are&nbsp;required&nbsp;to&nbsp;start&nbsp;up&nbsp;the&nbsp;backend</tt></dd></dl>
+
+<dl><dt><a name="LocalServer-Start"><strong>Start</strong></a>(self, local_server_controller)</dt></dl>
+
+<dl><dt><a name="LocalServer-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LocalServer-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LocalServer-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="LocalServer-__init__"><strong>__init__</strong></a>(self, server_backend_class)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_running</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LocalServerBackend">class <strong>LocalServerBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="LocalServerBackend-ServeForever"><strong>ServeForever</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LocalServerBackend-StartAndGetNamedPorts"><strong>StartAndGetNamedPorts</strong></a>(self, args)</dt><dd><tt>Starts&nbsp;the&nbsp;actual&nbsp;server&nbsp;and&nbsp;obtains&nbsp;any&nbsp;sockets&nbsp;on&nbsp;which&nbsp;it<br>
+should&nbsp;listen.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;<a href="#NamedPort">NamedPort</a>&nbsp;on&nbsp;which&nbsp;this&nbsp;backend&nbsp;is&nbsp;listening.</tt></dd></dl>
+
+<dl><dt><a name="LocalServerBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LocalServerController">class <strong>LocalServerController</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Manages&nbsp;the&nbsp;list&nbsp;of&nbsp;running&nbsp;servers<br>
+&nbsp;<br>
+This&nbsp;class&nbsp;manages&nbsp;the&nbsp;running&nbsp;servers,&nbsp;but&nbsp;also&nbsp;provides&nbsp;an&nbsp;isolation&nbsp;layer<br>
+to&nbsp;prevent&nbsp;<a href="#LocalServer">LocalServer</a>&nbsp;subclasses&nbsp;from&nbsp;accessing&nbsp;the&nbsp;browser&nbsp;backend&nbsp;directly.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="LocalServerController-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LocalServerController-CreateForwarder"><strong>CreateForwarder</strong></a>(self, port_pairs)</dt></dl>
+
+<dl><dt><a name="LocalServerController-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="LocalServerController-GetRunningServer"><strong>GetRunningServer</strong></a>(self, server_class, default_value)</dt></dl>
+
+<dl><dt><a name="LocalServerController-ServerDidClose"><strong>ServerDidClose</strong></a>(self, server)</dt></dl>
+
+<dl><dt><a name="LocalServerController-StartServer"><strong>StartServer</strong></a>(self, server)</dt></dl>
+
+<dl><dt><a name="LocalServerController-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>local_servers</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NamedPort">class <strong>NamedPort</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#NamedPort">NamedPort</a>(name,&nbsp;port)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.local_server.html#NamedPort">NamedPort</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="NamedPort-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#NamedPort">NamedPort</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="NamedPort-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#NamedPort">NamedPort</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="NamedPort-__new__"><strong>__new__</strong></a>(_cls, name, port)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#NamedPort">NamedPort</a>(name,&nbsp;port)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>name</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<dl><dt><strong>port</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('name', 'port')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="NamedPort-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#NamedPort-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#NamedPort-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#NamedPort-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="NamedPort-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#NamedPort-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.memory_cache_http_server.html b/catapult/telemetry/docs/pydoc/telemetry.core.memory_cache_http_server.html
new file mode 100644
index 0000000..1a50b5e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.memory_cache_http_server.html
@@ -0,0 +1,527 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.memory_cache_http_server</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.memory_cache_http_server</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/memory_cache_http_server.py">telemetry/core/memory_cache_http_server.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="BaseHTTPServer.html">BaseHTTPServer</a><br>
+<a href="SimpleHTTPServer.html">SimpleHTTPServer</a><br>
+<a href="SocketServer.html">SocketServer</a><br>
+</td><td width="25%" valign=top><a href="StringIO.html">StringIO</a><br>
+<a href="errno.html">errno</a><br>
+<a href="gzip.html">gzip</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.local_server.html">telemetry.core.local_server</a><br>
+<a href="mimetypes.html">mimetypes</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="socket.html">socket</a><br>
+<a href="sys.html">sys</a><br>
+<a href="urlparse.html">urlparse</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="SimpleHTTPServer.html#SimpleHTTPRequestHandler">SimpleHTTPServer.SimpleHTTPRequestHandler</a>(<a href="BaseHTTPServer.html#BaseHTTPRequestHandler">BaseHTTPServer.BaseHTTPRequestHandler</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPRequestHandler">MemoryCacheHTTPRequestHandler</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#tuple">__builtin__.tuple</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.memory_cache_http_server.html#ByteRange">ByteRange</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.core.memory_cache_http_server.html#ResourceAndRange">ResourceAndRange</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#LocalServer">telemetry.core.local_server.LocalServer</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPServer">MemoryCacheHTTPServer</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.local_server.html#LocalServerBackend">telemetry.core.local_server.LocalServerBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPServerBackend">MemoryCacheHTTPServerBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ByteRange">class <strong>ByteRange</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#ByteRange">ByteRange</a>(from_byte,&nbsp;to_byte)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.memory_cache_http_server.html#ByteRange">ByteRange</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ByteRange-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#ByteRange">ByteRange</a>&nbsp;object&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="ByteRange-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#ByteRange">ByteRange</a>&nbsp;object&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="ByteRange-__new__"><strong>__new__</strong></a>(_cls, from_byte, to_byte)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#ByteRange">ByteRange</a>(from_byte,&nbsp;to_byte)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>from_byte</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<dl><dt><strong>to_byte</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('from_byte', 'to_byte')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="ByteRange-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#ByteRange-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#ByteRange-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#ByteRange-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ByteRange-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#ByteRange-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryCacheHTTPRequestHandler">class <strong>MemoryCacheHTTPRequestHandler</strong></a>(<a href="SimpleHTTPServer.html#SimpleHTTPRequestHandler">SimpleHTTPServer.SimpleHTTPRequestHandler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPRequestHandler">MemoryCacheHTTPRequestHandler</a></dd>
+<dd><a href="SimpleHTTPServer.html#SimpleHTTPRequestHandler">SimpleHTTPServer.SimpleHTTPRequestHandler</a></dd>
+<dd><a href="BaseHTTPServer.html#BaseHTTPRequestHandler">BaseHTTPServer.BaseHTTPRequestHandler</a></dd>
+<dd><a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a></dd>
+<dd><a href="SocketServer.html#BaseRequestHandler">SocketServer.BaseRequestHandler</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-GetByteRange"><strong>GetByteRange</strong></a>(self, total_num_of_bytes)</dt><dd><tt>Parse&nbsp;the&nbsp;header&nbsp;and&nbsp;get&nbsp;the&nbsp;range&nbsp;values&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;total_num_of_bytes:&nbsp;Total&nbsp;#&nbsp;of&nbsp;bytes&nbsp;in&nbsp;requested&nbsp;resource,<br>
+&nbsp;&nbsp;used&nbsp;to&nbsp;calculate&nbsp;upper&nbsp;range&nbsp;limit.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;<a href="#ByteRange">ByteRange</a>&nbsp;namedtuple&nbsp;object&nbsp;with&nbsp;the&nbsp;requested&nbsp;byte-range&nbsp;values.<br>
+&nbsp;&nbsp;If&nbsp;no&nbsp;Range&nbsp;is&nbsp;explicitly&nbsp;requested&nbsp;or&nbsp;there&nbsp;is&nbsp;a&nbsp;failure&nbsp;parsing,<br>
+&nbsp;&nbsp;return&nbsp;None.<br>
+&nbsp;&nbsp;If&nbsp;range&nbsp;specified&nbsp;is&nbsp;in&nbsp;the&nbsp;format&nbsp;"N-",&nbsp;return&nbsp;N-END.&nbsp;Refer&nbsp;to<br>
+&nbsp;&nbsp;<a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html</a>&nbsp;for&nbsp;details.<br>
+&nbsp;&nbsp;If&nbsp;upper&nbsp;range&nbsp;limit&nbsp;is&nbsp;greater&nbsp;than&nbsp;total&nbsp;#&nbsp;of&nbsp;bytes,&nbsp;return&nbsp;upper&nbsp;index.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-SendHead"><strong>SendHead</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-do_GET"><strong>do_GET</strong></a>(self)</dt><dd><tt>Serve&nbsp;a&nbsp;GET&nbsp;request.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-do_HEAD"><strong>do_HEAD</strong></a>(self)</dt><dd><tt>Serve&nbsp;a&nbsp;HEAD&nbsp;request.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-handle"><strong>handle</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-log_error"><strong>log_error</strong></a>(self, fmt, *args)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-log_request"><strong>log_request</strong></a>(self, code<font color="#909090">='-'</font>, size<font color="#909090">='-'</font>)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>protocol_version</strong> = 'HTTP/1.1'</dl>
+
+<dl><dt><strong>wbufsize</strong> = -1</dl>
+
+<hr>
+Methods inherited from <a href="SimpleHTTPServer.html#SimpleHTTPRequestHandler">SimpleHTTPServer.SimpleHTTPRequestHandler</a>:<br>
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-copyfile"><strong>copyfile</strong></a>(self, source, outputfile)</dt><dd><tt>Copy&nbsp;all&nbsp;data&nbsp;between&nbsp;two&nbsp;file&nbsp;objects.<br>
+&nbsp;<br>
+The&nbsp;SOURCE&nbsp;argument&nbsp;is&nbsp;a&nbsp;file&nbsp;object&nbsp;open&nbsp;for&nbsp;reading<br>
+(or&nbsp;anything&nbsp;with&nbsp;a&nbsp;read()&nbsp;method)&nbsp;and&nbsp;the&nbsp;DESTINATION<br>
+argument&nbsp;is&nbsp;a&nbsp;file&nbsp;object&nbsp;open&nbsp;for&nbsp;writing&nbsp;(or<br>
+anything&nbsp;with&nbsp;a&nbsp;write()&nbsp;method).<br>
+&nbsp;<br>
+The&nbsp;only&nbsp;reason&nbsp;for&nbsp;overriding&nbsp;this&nbsp;would&nbsp;be&nbsp;to&nbsp;change<br>
+the&nbsp;block&nbsp;size&nbsp;or&nbsp;perhaps&nbsp;to&nbsp;replace&nbsp;newlines&nbsp;by&nbsp;CRLF<br>
+--&nbsp;note&nbsp;however&nbsp;that&nbsp;this&nbsp;the&nbsp;default&nbsp;server&nbsp;uses&nbsp;this<br>
+to&nbsp;copy&nbsp;binary&nbsp;data&nbsp;as&nbsp;well.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-guess_type"><strong>guess_type</strong></a>(self, path)</dt><dd><tt>Guess&nbsp;the&nbsp;type&nbsp;of&nbsp;a&nbsp;file.<br>
+&nbsp;<br>
+Argument&nbsp;is&nbsp;a&nbsp;PATH&nbsp;(a&nbsp;filename).<br>
+&nbsp;<br>
+Return&nbsp;value&nbsp;is&nbsp;a&nbsp;string&nbsp;of&nbsp;the&nbsp;form&nbsp;type/subtype,<br>
+usable&nbsp;for&nbsp;a&nbsp;MIME&nbsp;Content-type&nbsp;header.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;looks&nbsp;the&nbsp;file's&nbsp;extension<br>
+up&nbsp;in&nbsp;the&nbsp;table&nbsp;self.<strong>extensions_map</strong>,&nbsp;using&nbsp;application/octet-stream<br>
+as&nbsp;a&nbsp;default;&nbsp;however&nbsp;it&nbsp;would&nbsp;be&nbsp;permissible&nbsp;(if<br>
+slow)&nbsp;to&nbsp;look&nbsp;inside&nbsp;the&nbsp;data&nbsp;to&nbsp;make&nbsp;a&nbsp;better&nbsp;guess.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-list_directory"><strong>list_directory</strong></a>(self, path)</dt><dd><tt>Helper&nbsp;to&nbsp;produce&nbsp;a&nbsp;directory&nbsp;listing&nbsp;(absent&nbsp;index.html).<br>
+&nbsp;<br>
+Return&nbsp;value&nbsp;is&nbsp;either&nbsp;a&nbsp;file&nbsp;object,&nbsp;or&nbsp;None&nbsp;(indicating&nbsp;an<br>
+error).&nbsp;&nbsp;In&nbsp;either&nbsp;case,&nbsp;the&nbsp;headers&nbsp;are&nbsp;sent,&nbsp;making&nbsp;the<br>
+interface&nbsp;the&nbsp;same&nbsp;as&nbsp;for&nbsp;<a href="#MemoryCacheHTTPRequestHandler-send_head">send_head</a>().</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-send_head"><strong>send_head</strong></a>(self)</dt><dd><tt>Common&nbsp;code&nbsp;for&nbsp;GET&nbsp;and&nbsp;HEAD&nbsp;commands.<br>
+&nbsp;<br>
+This&nbsp;sends&nbsp;the&nbsp;response&nbsp;code&nbsp;and&nbsp;MIME&nbsp;headers.<br>
+&nbsp;<br>
+Return&nbsp;value&nbsp;is&nbsp;either&nbsp;a&nbsp;file&nbsp;object&nbsp;(which&nbsp;has&nbsp;to&nbsp;be&nbsp;copied<br>
+to&nbsp;the&nbsp;outputfile&nbsp;by&nbsp;the&nbsp;caller&nbsp;unless&nbsp;the&nbsp;command&nbsp;was&nbsp;HEAD,<br>
+and&nbsp;must&nbsp;be&nbsp;closed&nbsp;by&nbsp;the&nbsp;caller&nbsp;under&nbsp;all&nbsp;circumstances),&nbsp;or<br>
+None,&nbsp;in&nbsp;which&nbsp;case&nbsp;the&nbsp;caller&nbsp;has&nbsp;nothing&nbsp;further&nbsp;to&nbsp;do.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-translate_path"><strong>translate_path</strong></a>(self, path)</dt><dd><tt>Translate&nbsp;a&nbsp;/-separated&nbsp;PATH&nbsp;to&nbsp;the&nbsp;local&nbsp;filename&nbsp;syntax.<br>
+&nbsp;<br>
+Components&nbsp;that&nbsp;mean&nbsp;special&nbsp;things&nbsp;to&nbsp;the&nbsp;local&nbsp;file&nbsp;system<br>
+(e.g.&nbsp;drive&nbsp;or&nbsp;directory&nbsp;names)&nbsp;are&nbsp;ignored.&nbsp;&nbsp;(XXX&nbsp;They&nbsp;should<br>
+probably&nbsp;be&nbsp;diagnosed.)</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="SimpleHTTPServer.html#SimpleHTTPRequestHandler">SimpleHTTPServer.SimpleHTTPRequestHandler</a>:<br>
+<dl><dt><strong>extensions_map</strong> = {'': 'application/octet-stream', '.%': 'application/x-trash', '.323': 'text/h323', '.3gp': 'video/3gpp', '.7z': 'application/x-7z-compressed', '.a': 'application/octet-stream', '.abw': 'application/x-abiword', '.ai': 'application/postscript', '.aif': 'audio/x-aiff', '.aifc': 'audio/x-aiff', ...}</dl>
+
+<dl><dt><strong>server_version</strong> = 'SimpleHTTP/0.6'</dl>
+
+<hr>
+Methods inherited from <a href="BaseHTTPServer.html#BaseHTTPRequestHandler">BaseHTTPServer.BaseHTTPRequestHandler</a>:<br>
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-address_string"><strong>address_string</strong></a>(self)</dt><dd><tt>Return&nbsp;the&nbsp;client&nbsp;address&nbsp;formatted&nbsp;for&nbsp;logging.<br>
+&nbsp;<br>
+This&nbsp;version&nbsp;looks&nbsp;up&nbsp;the&nbsp;full&nbsp;hostname&nbsp;using&nbsp;gethostbyaddr(),<br>
+and&nbsp;tries&nbsp;to&nbsp;find&nbsp;a&nbsp;name&nbsp;that&nbsp;contains&nbsp;at&nbsp;least&nbsp;one&nbsp;dot.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-date_time_string"><strong>date_time_string</strong></a>(self, timestamp<font color="#909090">=None</font>)</dt><dd><tt>Return&nbsp;the&nbsp;current&nbsp;date&nbsp;and&nbsp;time&nbsp;formatted&nbsp;for&nbsp;a&nbsp;message&nbsp;header.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-end_headers"><strong>end_headers</strong></a>(self)</dt><dd><tt>Send&nbsp;the&nbsp;blank&nbsp;line&nbsp;ending&nbsp;the&nbsp;MIME&nbsp;headers.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-handle_one_request"><strong>handle_one_request</strong></a>(self)</dt><dd><tt>Handle&nbsp;a&nbsp;single&nbsp;HTTP&nbsp;request.<br>
+&nbsp;<br>
+You&nbsp;normally&nbsp;don't&nbsp;need&nbsp;to&nbsp;override&nbsp;this&nbsp;method;&nbsp;see&nbsp;the&nbsp;class<br>
+__doc__&nbsp;string&nbsp;for&nbsp;information&nbsp;on&nbsp;how&nbsp;to&nbsp;handle&nbsp;specific&nbsp;HTTP<br>
+commands&nbsp;such&nbsp;as&nbsp;GET&nbsp;and&nbsp;POST.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-log_date_time_string"><strong>log_date_time_string</strong></a>(self)</dt><dd><tt>Return&nbsp;the&nbsp;current&nbsp;time&nbsp;formatted&nbsp;for&nbsp;logging.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-log_message"><strong>log_message</strong></a>(self, format, *args)</dt><dd><tt>Log&nbsp;an&nbsp;arbitrary&nbsp;message.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;used&nbsp;by&nbsp;all&nbsp;other&nbsp;logging&nbsp;functions.&nbsp;&nbsp;Override<br>
+it&nbsp;if&nbsp;you&nbsp;have&nbsp;specific&nbsp;logging&nbsp;wishes.<br>
+&nbsp;<br>
+The&nbsp;first&nbsp;argument,&nbsp;FORMAT,&nbsp;is&nbsp;a&nbsp;format&nbsp;string&nbsp;for&nbsp;the<br>
+message&nbsp;to&nbsp;be&nbsp;logged.&nbsp;&nbsp;If&nbsp;the&nbsp;format&nbsp;string&nbsp;contains<br>
+any&nbsp;%&nbsp;escapes&nbsp;requiring&nbsp;parameters,&nbsp;they&nbsp;should&nbsp;be<br>
+specified&nbsp;as&nbsp;subsequent&nbsp;arguments&nbsp;(it's&nbsp;just&nbsp;like<br>
+printf!).<br>
+&nbsp;<br>
+The&nbsp;client&nbsp;ip&nbsp;address&nbsp;and&nbsp;current&nbsp;date/time&nbsp;are&nbsp;prefixed&nbsp;to&nbsp;every<br>
+message.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-parse_request"><strong>parse_request</strong></a>(self)</dt><dd><tt>Parse&nbsp;a&nbsp;request&nbsp;(internal).<br>
+&nbsp;<br>
+The&nbsp;request&nbsp;should&nbsp;be&nbsp;stored&nbsp;in&nbsp;self.<strong>raw_requestline</strong>;&nbsp;the&nbsp;results<br>
+are&nbsp;in&nbsp;self.<strong>command</strong>,&nbsp;self.<strong>path</strong>,&nbsp;self.<strong>request_version</strong>&nbsp;and<br>
+self.<strong>headers</strong>.<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;for&nbsp;success,&nbsp;False&nbsp;for&nbsp;failure;&nbsp;on&nbsp;failure,&nbsp;an<br>
+error&nbsp;is&nbsp;sent&nbsp;back.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-send_error"><strong>send_error</strong></a>(self, code, message<font color="#909090">=None</font>)</dt><dd><tt>Send&nbsp;and&nbsp;log&nbsp;an&nbsp;error&nbsp;reply.<br>
+&nbsp;<br>
+Arguments&nbsp;are&nbsp;the&nbsp;error&nbsp;code,&nbsp;and&nbsp;a&nbsp;detailed&nbsp;message.<br>
+The&nbsp;detailed&nbsp;message&nbsp;defaults&nbsp;to&nbsp;the&nbsp;short&nbsp;entry&nbsp;matching&nbsp;the<br>
+response&nbsp;code.<br>
+&nbsp;<br>
+This&nbsp;sends&nbsp;an&nbsp;error&nbsp;response&nbsp;(so&nbsp;it&nbsp;must&nbsp;be&nbsp;called&nbsp;before&nbsp;any<br>
+output&nbsp;has&nbsp;been&nbsp;generated),&nbsp;logs&nbsp;the&nbsp;error,&nbsp;and&nbsp;finally&nbsp;sends<br>
+a&nbsp;piece&nbsp;of&nbsp;HTML&nbsp;explaining&nbsp;the&nbsp;error&nbsp;to&nbsp;the&nbsp;user.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-send_header"><strong>send_header</strong></a>(self, keyword, value)</dt><dd><tt>Send&nbsp;a&nbsp;MIME&nbsp;header.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-send_response"><strong>send_response</strong></a>(self, code, message<font color="#909090">=None</font>)</dt><dd><tt>Send&nbsp;the&nbsp;response&nbsp;header&nbsp;and&nbsp;log&nbsp;the&nbsp;response&nbsp;code.<br>
+&nbsp;<br>
+Also&nbsp;send&nbsp;two&nbsp;standard&nbsp;headers&nbsp;with&nbsp;the&nbsp;server&nbsp;software<br>
+version&nbsp;and&nbsp;the&nbsp;current&nbsp;date.</tt></dd></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-version_string"><strong>version_string</strong></a>(self)</dt><dd><tt>Return&nbsp;the&nbsp;server&nbsp;software&nbsp;version&nbsp;string.</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="BaseHTTPServer.html#BaseHTTPRequestHandler">BaseHTTPServer.BaseHTTPRequestHandler</a>:<br>
+<dl><dt><strong>MessageClass</strong> = &lt;class mimetools.Message&gt;</dl>
+
+<dl><dt><strong>default_request_version</strong> = 'HTTP/0.9'</dl>
+
+<dl><dt><strong>error_content_type</strong> = 'text/html'</dl>
+
+<dl><dt><strong>error_message_format</strong> = '&lt;head&gt;<font color="#c040c0">\n</font>&lt;title&gt;Error response&lt;/title&gt;<font color="#c040c0">\n</font>&lt;/head&gt;<font color="#c040c0">\n</font>&lt;bo...ode explanation: %(code)s = %(explain)s.<font color="#c040c0">\n</font>&lt;/body&gt;<font color="#c040c0">\n</font>'</dl>
+
+<dl><dt><strong>monthname</strong> = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']</dl>
+
+<dl><dt><strong>responses</strong> = {100: ('Continue', 'Request received, please continue'), 101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'), 200: ('OK', 'Request fulfilled, document follows'), 201: ('Created', 'Document created, URL follows'), 202: ('Accepted', 'Request accepted, processing continues off-line'), 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), 204: ('No Content', 'Request fulfilled, nothing follows'), 205: ('Reset Content', 'Clear input form for further input.'), 206: ('Partial Content', 'Partial content follows.'), 300: ('Multiple Choices', 'Object has several resources -- see URI list'), ...}</dl>
+
+<dl><dt><strong>sys_version</strong> = 'Python/2.7.6'</dl>
+
+<dl><dt><strong>weekdayname</strong> = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']</dl>
+
+<hr>
+Methods inherited from <a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>:<br>
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-finish"><strong>finish</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-setup"><strong>setup</strong></a>(self)</dt></dl>
+
+<hr>
+Data and other attributes inherited from <a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>:<br>
+<dl><dt><strong>disable_nagle_algorithm</strong> = False</dl>
+
+<dl><dt><strong>rbufsize</strong> = -1</dl>
+
+<dl><dt><strong>timeout</strong> = None</dl>
+
+<hr>
+Methods inherited from <a href="SocketServer.html#BaseRequestHandler">SocketServer.BaseRequestHandler</a>:<br>
+<dl><dt><a name="MemoryCacheHTTPRequestHandler-__init__"><strong>__init__</strong></a>(self, request, client_address, server)</dt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryCacheHTTPServer">class <strong>MemoryCacheHTTPServer</strong></a>(<a href="telemetry.core.local_server.html#LocalServer">telemetry.core.local_server.LocalServer</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPServer">MemoryCacheHTTPServer</a></dd>
+<dd><a href="telemetry.core.local_server.html#LocalServer">telemetry.core.local_server.LocalServer</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MemoryCacheHTTPServer-GetBackendStartupArgs"><strong>GetBackendStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-UrlOf"><strong>UrlOf</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-__init__"><strong>__init__</strong></a>(self, paths)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>paths</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.local_server.html#LocalServer">telemetry.core.local_server.LocalServer</a>:<br>
+<dl><dt><a name="MemoryCacheHTTPServer-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-Start"><strong>Start</strong></a>(self, local_server_controller)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServer-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.local_server.html#LocalServer">telemetry.core.local_server.LocalServer</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_running</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryCacheHTTPServerBackend">class <strong>MemoryCacheHTTPServerBackend</strong></a>(<a href="telemetry.core.local_server.html#LocalServerBackend">telemetry.core.local_server.LocalServerBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.memory_cache_http_server.html#MemoryCacheHTTPServerBackend">MemoryCacheHTTPServerBackend</a></dd>
+<dd><a href="telemetry.core.local_server.html#LocalServerBackend">telemetry.core.local_server.LocalServerBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MemoryCacheHTTPServerBackend-ServeForever"><strong>ServeForever</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServerBackend-StartAndGetNamedPorts"><strong>StartAndGetNamedPorts</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="MemoryCacheHTTPServerBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.local_server.html#LocalServerBackend">telemetry.core.local_server.LocalServerBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ResourceAndRange">class <strong>ResourceAndRange</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#ResourceAndRange">ResourceAndRange</a>(resource,&nbsp;byte_range)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.memory_cache_http_server.html#ResourceAndRange">ResourceAndRange</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ResourceAndRange-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#ResourceAndRange">ResourceAndRange</a>&nbsp;object&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="ResourceAndRange-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#ResourceAndRange">ResourceAndRange</a>&nbsp;object&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="ResourceAndRange-__new__"><strong>__new__</strong></a>(_cls, resource, byte_range)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#ResourceAndRange">ResourceAndRange</a>(resource,&nbsp;byte_range)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>byte_range</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<dl><dt><strong>resource</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('resource', 'byte_range')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="ResourceAndRange-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#ResourceAndRange-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#ResourceAndRange-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#ResourceAndRange-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ResourceAndRange-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#ResourceAndRange-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.network_controller.html b/catapult/telemetry/docs/pydoc/telemetry.core.network_controller.html
new file mode 100644
index 0000000..01e3e24
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.network_controller.html
@@ -0,0 +1,62 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.network_controller</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.network_controller</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/network_controller.py">telemetry/core/network_controller.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.network_controller.html#NetworkController">NetworkController</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NetworkController">class <strong>NetworkController</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Control&nbsp;network&nbsp;settings&nbsp;and&nbsp;servers&nbsp;to&nbsp;simulate&nbsp;the&nbsp;Web.<br>
+&nbsp;<br>
+Network&nbsp;changes&nbsp;include&nbsp;forwarding&nbsp;device&nbsp;ports&nbsp;to&nbsp;host&nbsp;platform&nbsp;ports.<br>
+Web&nbsp;Page&nbsp;Replay&nbsp;is&nbsp;used&nbsp;to&nbsp;record&nbsp;and&nbsp;replay&nbsp;HTTP/HTTPS&nbsp;responses.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="NetworkController-SetReplayArgs"><strong>SetReplayArgs</strong></a>(self, archive_path, wpr_mode, netsim, extra_wpr_args, make_javascript_deterministic<font color="#909090">=False</font>)</dt><dd><tt>Save&nbsp;the&nbsp;arguments&nbsp;needed&nbsp;for&nbsp;replay.</tt></dd></dl>
+
+<dl><dt><a name="NetworkController-UpdateReplayForExistingBrowser"><strong>UpdateReplayForExistingBrowser</strong></a>(self)</dt><dd><tt>Restart&nbsp;replay&nbsp;if&nbsp;needed&nbsp;for&nbsp;an&nbsp;existing&nbsp;browser.<br>
+&nbsp;<br>
+TODO(slamm):&nbsp;Drop&nbsp;this&nbsp;method&nbsp;when&nbsp;the&nbsp;browser_backend&nbsp;dependencies&nbsp;are<br>
+moved&nbsp;to&nbsp;the&nbsp;platform.&nbsp;https://crbug.com/423962</tt></dd></dl>
+
+<dl><dt><a name="NetworkController-__init__"><strong>__init__</strong></a>(self, network_controller_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.os_version.html b/catapult/telemetry/docs/pydoc/telemetry.core.os_version.html
new file mode 100644
index 0000000..bb21b3f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.os_version.html
@@ -0,0 +1,350 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.os_version</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.os_version</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/os_version.py">telemetry/core/os_version.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#str">__builtin__.str</a>(<a href="__builtin__.html#basestring">__builtin__.basestring</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.os_version.html#OSVersion">OSVersion</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OSVersion">class <strong>OSVersion</strong></a>(<a href="__builtin__.html#str">__builtin__.str</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;pylint:&nbsp;disable=W0212<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.core.os_version.html#OSVersion">OSVersion</a></dd>
+<dd><a href="__builtin__.html#str">__builtin__.str</a></dd>
+<dd><a href="__builtin__.html#basestring">__builtin__.basestring</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="OSVersion-__ge__"><strong>__ge__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="OSVersion-__gt__"><strong>__gt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="OSVersion-__le__"><strong>__le__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="OSVersion-__lt__"><strong>__lt__</strong></a>(self, other)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="OSVersion-__new__"><strong>__new__</strong></a>(cls, friendly_name, sortable_name, *args, **kwargs)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="__builtin__.html#str">__builtin__.str</a>:<br>
+<dl><dt><a name="OSVersion-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__format__"><strong>__format__</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-__format__">__format__</a>(format_spec)&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;formatted&nbsp;version&nbsp;of&nbsp;S&nbsp;as&nbsp;described&nbsp;by&nbsp;format_spec.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__getnewargs__"><strong>__getnewargs__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="OSVersion-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__mod__"><strong>__mod__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__mod__">__mod__</a>(y)&nbsp;&lt;==&gt;&nbsp;x%y</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__rmod__"><strong>__rmod__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__rmod__">__rmod__</a>(y)&nbsp;&lt;==&gt;&nbsp;y%x</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-__sizeof__">__sizeof__</a>()&nbsp;-&gt;&nbsp;size&nbsp;of&nbsp;S&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#OSVersion-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;<a href="__builtin__.html#str">str</a>(x)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-capitalize"><strong>capitalize</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-capitalize">capitalize</a>()&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;with&nbsp;only&nbsp;its&nbsp;first&nbsp;character<br>
+capitalized.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-center"><strong>center</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-center">center</a>(width[,&nbsp;fillchar])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;S&nbsp;centered&nbsp;in&nbsp;a&nbsp;string&nbsp;of&nbsp;length&nbsp;width.&nbsp;Padding&nbsp;is<br>
+done&nbsp;using&nbsp;the&nbsp;specified&nbsp;fill&nbsp;character&nbsp;(default&nbsp;is&nbsp;a&nbsp;space)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-count"><strong>count</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-count">count</a>(sub[,&nbsp;start[,&nbsp;end]])&nbsp;-&gt;&nbsp;int<br>
+&nbsp;<br>
+Return&nbsp;the&nbsp;number&nbsp;of&nbsp;non-overlapping&nbsp;occurrences&nbsp;of&nbsp;substring&nbsp;sub&nbsp;in<br>
+string&nbsp;S[start:end].&nbsp;&nbsp;Optional&nbsp;arguments&nbsp;start&nbsp;and&nbsp;end&nbsp;are&nbsp;interpreted<br>
+as&nbsp;in&nbsp;slice&nbsp;notation.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-decode"><strong>decode</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-decode">decode</a>([encoding[,errors]])&nbsp;-&gt;&nbsp;object<br>
+&nbsp;<br>
+Decodes&nbsp;S&nbsp;using&nbsp;the&nbsp;codec&nbsp;registered&nbsp;for&nbsp;encoding.&nbsp;encoding&nbsp;defaults<br>
+to&nbsp;the&nbsp;default&nbsp;encoding.&nbsp;errors&nbsp;may&nbsp;be&nbsp;given&nbsp;to&nbsp;set&nbsp;a&nbsp;different&nbsp;error<br>
+handling&nbsp;scheme.&nbsp;Default&nbsp;is&nbsp;'strict'&nbsp;meaning&nbsp;that&nbsp;encoding&nbsp;errors&nbsp;raise<br>
+a&nbsp;UnicodeDecodeError.&nbsp;Other&nbsp;possible&nbsp;values&nbsp;are&nbsp;'ignore'&nbsp;and&nbsp;'replace'<br>
+as&nbsp;well&nbsp;as&nbsp;any&nbsp;other&nbsp;name&nbsp;registered&nbsp;with&nbsp;codecs.register_error&nbsp;that&nbsp;is<br>
+able&nbsp;to&nbsp;handle&nbsp;UnicodeDecodeErrors.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-encode"><strong>encode</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-encode">encode</a>([encoding[,errors]])&nbsp;-&gt;&nbsp;object<br>
+&nbsp;<br>
+Encodes&nbsp;S&nbsp;using&nbsp;the&nbsp;codec&nbsp;registered&nbsp;for&nbsp;encoding.&nbsp;encoding&nbsp;defaults<br>
+to&nbsp;the&nbsp;default&nbsp;encoding.&nbsp;errors&nbsp;may&nbsp;be&nbsp;given&nbsp;to&nbsp;set&nbsp;a&nbsp;different&nbsp;error<br>
+handling&nbsp;scheme.&nbsp;Default&nbsp;is&nbsp;'strict'&nbsp;meaning&nbsp;that&nbsp;encoding&nbsp;errors&nbsp;raise<br>
+a&nbsp;UnicodeEncodeError.&nbsp;Other&nbsp;possible&nbsp;values&nbsp;are&nbsp;'ignore',&nbsp;'replace'&nbsp;and<br>
+'xmlcharrefreplace'&nbsp;as&nbsp;well&nbsp;as&nbsp;any&nbsp;other&nbsp;name&nbsp;registered&nbsp;with<br>
+codecs.register_error&nbsp;that&nbsp;is&nbsp;able&nbsp;to&nbsp;handle&nbsp;UnicodeEncodeErrors.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-endswith"><strong>endswith</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-endswith">endswith</a>(suffix[,&nbsp;start[,&nbsp;end]])&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;S&nbsp;ends&nbsp;with&nbsp;the&nbsp;specified&nbsp;suffix,&nbsp;False&nbsp;otherwise.<br>
+With&nbsp;optional&nbsp;start,&nbsp;test&nbsp;S&nbsp;beginning&nbsp;at&nbsp;that&nbsp;position.<br>
+With&nbsp;optional&nbsp;end,&nbsp;stop&nbsp;comparing&nbsp;S&nbsp;at&nbsp;that&nbsp;position.<br>
+suffix&nbsp;can&nbsp;also&nbsp;be&nbsp;a&nbsp;tuple&nbsp;of&nbsp;strings&nbsp;to&nbsp;try.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-expandtabs"><strong>expandtabs</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-expandtabs">expandtabs</a>([tabsize])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;S&nbsp;where&nbsp;all&nbsp;tab&nbsp;characters&nbsp;are&nbsp;expanded&nbsp;using&nbsp;spaces.<br>
+If&nbsp;tabsize&nbsp;is&nbsp;not&nbsp;given,&nbsp;a&nbsp;tab&nbsp;size&nbsp;of&nbsp;8&nbsp;characters&nbsp;is&nbsp;assumed.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-find"><strong>find</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-find">find</a>(sub&nbsp;[,start&nbsp;[,end]])&nbsp;-&gt;&nbsp;int<br>
+&nbsp;<br>
+Return&nbsp;the&nbsp;lowest&nbsp;index&nbsp;in&nbsp;S&nbsp;where&nbsp;substring&nbsp;sub&nbsp;is&nbsp;found,<br>
+such&nbsp;that&nbsp;sub&nbsp;is&nbsp;contained&nbsp;within&nbsp;S[start:end].&nbsp;&nbsp;Optional<br>
+arguments&nbsp;start&nbsp;and&nbsp;end&nbsp;are&nbsp;interpreted&nbsp;as&nbsp;in&nbsp;slice&nbsp;notation.<br>
+&nbsp;<br>
+Return&nbsp;-1&nbsp;on&nbsp;failure.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-format"><strong>format</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-format">format</a>(*args,&nbsp;**kwargs)&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;formatted&nbsp;version&nbsp;of&nbsp;S,&nbsp;using&nbsp;substitutions&nbsp;from&nbsp;args&nbsp;and&nbsp;kwargs.<br>
+The&nbsp;substitutions&nbsp;are&nbsp;identified&nbsp;by&nbsp;braces&nbsp;('{'&nbsp;and&nbsp;'}').</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-index"><strong>index</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-index">index</a>(sub&nbsp;[,start&nbsp;[,end]])&nbsp;-&gt;&nbsp;int<br>
+&nbsp;<br>
+Like&nbsp;S.<a href="#OSVersion-find">find</a>()&nbsp;but&nbsp;raise&nbsp;ValueError&nbsp;when&nbsp;the&nbsp;substring&nbsp;is&nbsp;not&nbsp;found.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-isalnum"><strong>isalnum</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-isalnum">isalnum</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;alphanumeric<br>
+and&nbsp;there&nbsp;is&nbsp;at&nbsp;least&nbsp;one&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-isalpha"><strong>isalpha</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-isalpha">isalpha</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;alphabetic<br>
+and&nbsp;there&nbsp;is&nbsp;at&nbsp;least&nbsp;one&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-isdigit"><strong>isdigit</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-isdigit">isdigit</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;digits<br>
+and&nbsp;there&nbsp;is&nbsp;at&nbsp;least&nbsp;one&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-islower"><strong>islower</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-islower">islower</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;cased&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;lowercase&nbsp;and&nbsp;there&nbsp;is<br>
+at&nbsp;least&nbsp;one&nbsp;cased&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-isspace"><strong>isspace</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-isspace">isspace</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;whitespace<br>
+and&nbsp;there&nbsp;is&nbsp;at&nbsp;least&nbsp;one&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-istitle"><strong>istitle</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-istitle">istitle</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;S&nbsp;is&nbsp;a&nbsp;titlecased&nbsp;string&nbsp;and&nbsp;there&nbsp;is&nbsp;at&nbsp;least&nbsp;one<br>
+character&nbsp;in&nbsp;S,&nbsp;i.e.&nbsp;uppercase&nbsp;characters&nbsp;may&nbsp;only&nbsp;follow&nbsp;uncased<br>
+characters&nbsp;and&nbsp;lowercase&nbsp;characters&nbsp;only&nbsp;cased&nbsp;ones.&nbsp;Return&nbsp;False<br>
+otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-isupper"><strong>isupper</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-isupper">isupper</a>()&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;all&nbsp;cased&nbsp;characters&nbsp;in&nbsp;S&nbsp;are&nbsp;uppercase&nbsp;and&nbsp;there&nbsp;is<br>
+at&nbsp;least&nbsp;one&nbsp;cased&nbsp;character&nbsp;in&nbsp;S,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-join"><strong>join</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-join">join</a>(iterable)&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;string&nbsp;which&nbsp;is&nbsp;the&nbsp;concatenation&nbsp;of&nbsp;the&nbsp;strings&nbsp;in&nbsp;the<br>
+iterable.&nbsp;&nbsp;The&nbsp;separator&nbsp;between&nbsp;elements&nbsp;is&nbsp;S.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-ljust"><strong>ljust</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-ljust">ljust</a>(width[,&nbsp;fillchar])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;S&nbsp;left-justified&nbsp;in&nbsp;a&nbsp;string&nbsp;of&nbsp;length&nbsp;width.&nbsp;Padding&nbsp;is<br>
+done&nbsp;using&nbsp;the&nbsp;specified&nbsp;fill&nbsp;character&nbsp;(default&nbsp;is&nbsp;a&nbsp;space).</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-lower"><strong>lower</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-lower">lower</a>()&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;converted&nbsp;to&nbsp;lowercase.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-lstrip"><strong>lstrip</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-lstrip">lstrip</a>([chars])&nbsp;-&gt;&nbsp;string&nbsp;or&nbsp;unicode<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;with&nbsp;leading&nbsp;whitespace&nbsp;removed.<br>
+If&nbsp;chars&nbsp;is&nbsp;given&nbsp;and&nbsp;not&nbsp;None,&nbsp;remove&nbsp;characters&nbsp;in&nbsp;chars&nbsp;instead.<br>
+If&nbsp;chars&nbsp;is&nbsp;unicode,&nbsp;S&nbsp;will&nbsp;be&nbsp;converted&nbsp;to&nbsp;unicode&nbsp;before&nbsp;stripping</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-partition"><strong>partition</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-partition">partition</a>(sep)&nbsp;-&gt;&nbsp;(head,&nbsp;sep,&nbsp;tail)<br>
+&nbsp;<br>
+Search&nbsp;for&nbsp;the&nbsp;separator&nbsp;sep&nbsp;in&nbsp;S,&nbsp;and&nbsp;return&nbsp;the&nbsp;part&nbsp;before&nbsp;it,<br>
+the&nbsp;separator&nbsp;itself,&nbsp;and&nbsp;the&nbsp;part&nbsp;after&nbsp;it.&nbsp;&nbsp;If&nbsp;the&nbsp;separator&nbsp;is&nbsp;not<br>
+found,&nbsp;return&nbsp;S&nbsp;and&nbsp;two&nbsp;empty&nbsp;strings.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-replace"><strong>replace</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-replace">replace</a>(old,&nbsp;new[,&nbsp;count])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;string&nbsp;S&nbsp;with&nbsp;all&nbsp;occurrences&nbsp;of&nbsp;substring<br>
+old&nbsp;replaced&nbsp;by&nbsp;new.&nbsp;&nbsp;If&nbsp;the&nbsp;optional&nbsp;argument&nbsp;count&nbsp;is<br>
+given,&nbsp;only&nbsp;the&nbsp;first&nbsp;count&nbsp;occurrences&nbsp;are&nbsp;replaced.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rfind"><strong>rfind</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rfind">rfind</a>(sub&nbsp;[,start&nbsp;[,end]])&nbsp;-&gt;&nbsp;int<br>
+&nbsp;<br>
+Return&nbsp;the&nbsp;highest&nbsp;index&nbsp;in&nbsp;S&nbsp;where&nbsp;substring&nbsp;sub&nbsp;is&nbsp;found,<br>
+such&nbsp;that&nbsp;sub&nbsp;is&nbsp;contained&nbsp;within&nbsp;S[start:end].&nbsp;&nbsp;Optional<br>
+arguments&nbsp;start&nbsp;and&nbsp;end&nbsp;are&nbsp;interpreted&nbsp;as&nbsp;in&nbsp;slice&nbsp;notation.<br>
+&nbsp;<br>
+Return&nbsp;-1&nbsp;on&nbsp;failure.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rindex"><strong>rindex</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rindex">rindex</a>(sub&nbsp;[,start&nbsp;[,end]])&nbsp;-&gt;&nbsp;int<br>
+&nbsp;<br>
+Like&nbsp;S.<a href="#OSVersion-rfind">rfind</a>()&nbsp;but&nbsp;raise&nbsp;ValueError&nbsp;when&nbsp;the&nbsp;substring&nbsp;is&nbsp;not&nbsp;found.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rjust"><strong>rjust</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rjust">rjust</a>(width[,&nbsp;fillchar])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;S&nbsp;right-justified&nbsp;in&nbsp;a&nbsp;string&nbsp;of&nbsp;length&nbsp;width.&nbsp;Padding&nbsp;is<br>
+done&nbsp;using&nbsp;the&nbsp;specified&nbsp;fill&nbsp;character&nbsp;(default&nbsp;is&nbsp;a&nbsp;space)</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rpartition"><strong>rpartition</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rpartition">rpartition</a>(sep)&nbsp;-&gt;&nbsp;(head,&nbsp;sep,&nbsp;tail)<br>
+&nbsp;<br>
+Search&nbsp;for&nbsp;the&nbsp;separator&nbsp;sep&nbsp;in&nbsp;S,&nbsp;starting&nbsp;at&nbsp;the&nbsp;end&nbsp;of&nbsp;S,&nbsp;and&nbsp;return<br>
+the&nbsp;part&nbsp;before&nbsp;it,&nbsp;the&nbsp;separator&nbsp;itself,&nbsp;and&nbsp;the&nbsp;part&nbsp;after&nbsp;it.&nbsp;&nbsp;If&nbsp;the<br>
+separator&nbsp;is&nbsp;not&nbsp;found,&nbsp;return&nbsp;two&nbsp;empty&nbsp;strings&nbsp;and&nbsp;S.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rsplit"><strong>rsplit</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rsplit">rsplit</a>([sep&nbsp;[,maxsplit]])&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;strings<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;list&nbsp;of&nbsp;the&nbsp;words&nbsp;in&nbsp;the&nbsp;string&nbsp;S,&nbsp;using&nbsp;sep&nbsp;as&nbsp;the<br>
+delimiter&nbsp;string,&nbsp;starting&nbsp;at&nbsp;the&nbsp;end&nbsp;of&nbsp;the&nbsp;string&nbsp;and&nbsp;working<br>
+to&nbsp;the&nbsp;front.&nbsp;&nbsp;If&nbsp;maxsplit&nbsp;is&nbsp;given,&nbsp;at&nbsp;most&nbsp;maxsplit&nbsp;splits&nbsp;are<br>
+done.&nbsp;If&nbsp;sep&nbsp;is&nbsp;not&nbsp;specified&nbsp;or&nbsp;is&nbsp;None,&nbsp;any&nbsp;whitespace&nbsp;string<br>
+is&nbsp;a&nbsp;separator.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-rstrip"><strong>rstrip</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-rstrip">rstrip</a>([chars])&nbsp;-&gt;&nbsp;string&nbsp;or&nbsp;unicode<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;with&nbsp;trailing&nbsp;whitespace&nbsp;removed.<br>
+If&nbsp;chars&nbsp;is&nbsp;given&nbsp;and&nbsp;not&nbsp;None,&nbsp;remove&nbsp;characters&nbsp;in&nbsp;chars&nbsp;instead.<br>
+If&nbsp;chars&nbsp;is&nbsp;unicode,&nbsp;S&nbsp;will&nbsp;be&nbsp;converted&nbsp;to&nbsp;unicode&nbsp;before&nbsp;stripping</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-split"><strong>split</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-split">split</a>([sep&nbsp;[,maxsplit]])&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;strings<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;list&nbsp;of&nbsp;the&nbsp;words&nbsp;in&nbsp;the&nbsp;string&nbsp;S,&nbsp;using&nbsp;sep&nbsp;as&nbsp;the<br>
+delimiter&nbsp;string.&nbsp;&nbsp;If&nbsp;maxsplit&nbsp;is&nbsp;given,&nbsp;at&nbsp;most&nbsp;maxsplit<br>
+splits&nbsp;are&nbsp;done.&nbsp;If&nbsp;sep&nbsp;is&nbsp;not&nbsp;specified&nbsp;or&nbsp;is&nbsp;None,&nbsp;any<br>
+whitespace&nbsp;string&nbsp;is&nbsp;a&nbsp;separator&nbsp;and&nbsp;empty&nbsp;strings&nbsp;are&nbsp;removed<br>
+from&nbsp;the&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-splitlines"><strong>splitlines</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-splitlines">splitlines</a>(keepends=False)&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;strings<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;list&nbsp;of&nbsp;the&nbsp;lines&nbsp;in&nbsp;S,&nbsp;breaking&nbsp;at&nbsp;line&nbsp;boundaries.<br>
+Line&nbsp;breaks&nbsp;are&nbsp;not&nbsp;included&nbsp;in&nbsp;the&nbsp;resulting&nbsp;list&nbsp;unless&nbsp;keepends<br>
+is&nbsp;given&nbsp;and&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-startswith"><strong>startswith</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-startswith">startswith</a>(prefix[,&nbsp;start[,&nbsp;end]])&nbsp;-&gt;&nbsp;bool<br>
+&nbsp;<br>
+Return&nbsp;True&nbsp;if&nbsp;S&nbsp;starts&nbsp;with&nbsp;the&nbsp;specified&nbsp;prefix,&nbsp;False&nbsp;otherwise.<br>
+With&nbsp;optional&nbsp;start,&nbsp;test&nbsp;S&nbsp;beginning&nbsp;at&nbsp;that&nbsp;position.<br>
+With&nbsp;optional&nbsp;end,&nbsp;stop&nbsp;comparing&nbsp;S&nbsp;at&nbsp;that&nbsp;position.<br>
+prefix&nbsp;can&nbsp;also&nbsp;be&nbsp;a&nbsp;tuple&nbsp;of&nbsp;strings&nbsp;to&nbsp;try.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-strip"><strong>strip</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-strip">strip</a>([chars])&nbsp;-&gt;&nbsp;string&nbsp;or&nbsp;unicode<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;with&nbsp;leading&nbsp;and&nbsp;trailing<br>
+whitespace&nbsp;removed.<br>
+If&nbsp;chars&nbsp;is&nbsp;given&nbsp;and&nbsp;not&nbsp;None,&nbsp;remove&nbsp;characters&nbsp;in&nbsp;chars&nbsp;instead.<br>
+If&nbsp;chars&nbsp;is&nbsp;unicode,&nbsp;S&nbsp;will&nbsp;be&nbsp;converted&nbsp;to&nbsp;unicode&nbsp;before&nbsp;stripping</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-swapcase"><strong>swapcase</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-swapcase">swapcase</a>()&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;with&nbsp;uppercase&nbsp;characters<br>
+converted&nbsp;to&nbsp;lowercase&nbsp;and&nbsp;vice&nbsp;versa.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-title"><strong>title</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-title">title</a>()&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;titlecased&nbsp;version&nbsp;of&nbsp;S,&nbsp;i.e.&nbsp;words&nbsp;start&nbsp;with&nbsp;uppercase<br>
+characters,&nbsp;all&nbsp;remaining&nbsp;cased&nbsp;characters&nbsp;have&nbsp;lowercase.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-translate"><strong>translate</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-translate">translate</a>(table&nbsp;[,deletechars])&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S,&nbsp;where&nbsp;all&nbsp;characters&nbsp;occurring<br>
+in&nbsp;the&nbsp;optional&nbsp;argument&nbsp;deletechars&nbsp;are&nbsp;removed,&nbsp;and&nbsp;the<br>
+remaining&nbsp;characters&nbsp;have&nbsp;been&nbsp;mapped&nbsp;through&nbsp;the&nbsp;given<br>
+translation&nbsp;table,&nbsp;which&nbsp;must&nbsp;be&nbsp;a&nbsp;string&nbsp;of&nbsp;length&nbsp;256&nbsp;or&nbsp;None.<br>
+If&nbsp;the&nbsp;table&nbsp;argument&nbsp;is&nbsp;None,&nbsp;no&nbsp;translation&nbsp;is&nbsp;applied&nbsp;and<br>
+the&nbsp;operation&nbsp;simply&nbsp;removes&nbsp;the&nbsp;characters&nbsp;in&nbsp;deletechars.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-upper"><strong>upper</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-upper">upper</a>()&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Return&nbsp;a&nbsp;copy&nbsp;of&nbsp;the&nbsp;string&nbsp;S&nbsp;converted&nbsp;to&nbsp;uppercase.</tt></dd></dl>
+
+<dl><dt><a name="OSVersion-zfill"><strong>zfill</strong></a>(...)</dt><dd><tt>S.<a href="#OSVersion-zfill">zfill</a>(width)&nbsp;-&gt;&nbsp;string<br>
+&nbsp;<br>
+Pad&nbsp;a&nbsp;numeric&nbsp;string&nbsp;S&nbsp;with&nbsp;zeros&nbsp;on&nbsp;the&nbsp;left,&nbsp;to&nbsp;fill&nbsp;a&nbsp;field<br>
+of&nbsp;the&nbsp;specified&nbsp;width.&nbsp;&nbsp;The&nbsp;string&nbsp;S&nbsp;is&nbsp;never&nbsp;truncated.</tt></dd></dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ELCAPITAN</strong> = 'elcapitan'<br>
+<strong>LEOPARD</strong> = 'leopard'<br>
+<strong>LION</strong> = 'lion'<br>
+<strong>MAVERICKS</strong> = 'mavericks'<br>
+<strong>MOUNTAINLION</strong> = 'mountainlion'<br>
+<strong>SNOWLEOPARD</strong> = 'snowleopard'<br>
+<strong>VISTA</strong> = 'vista'<br>
+<strong>WIN7</strong> = 'win7'<br>
+<strong>WIN8</strong> = 'win8'<br>
+<strong>XP</strong> = 'xp'<br>
+<strong>YOSEMITE</strong> = 'yosemite'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.platform.html b/catapult/telemetry/docs/pydoc/telemetry.core.platform.html
new file mode 100644
index 0000000..46f4e38
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.platform.html
@@ -0,0 +1,269 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.platform</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.platform</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/platform.py">telemetry/core/platform.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+<a href="telemetry.core.local_server.html">telemetry.core.local_server</a><br>
+<a href="telemetry.core.memory_cache_http_server.html">telemetry.core.memory_cache_http_server</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.network_controller.html">telemetry.core.network_controller</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.internal.platform.platform_backend.html">telemetry.internal.platform.platform_backend</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.core.tracing_controller.html">telemetry.core.tracing_controller</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.platform.html#Platform">Platform</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Platform">class <strong>Platform</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;platform&nbsp;that&nbsp;the&nbsp;target&nbsp;browser&nbsp;is&nbsp;running&nbsp;on.<br>
+&nbsp;<br>
+Provides&nbsp;a&nbsp;limited&nbsp;interface&nbsp;to&nbsp;interact&nbsp;with&nbsp;the&nbsp;platform&nbsp;itself,&nbsp;where<br>
+possible.&nbsp;It's&nbsp;important&nbsp;to&nbsp;note&nbsp;that&nbsp;platforms&nbsp;may&nbsp;not&nbsp;provide&nbsp;a&nbsp;specific<br>
+API,&nbsp;so&nbsp;check&nbsp;with&nbsp;IsFooBar()&nbsp;for&nbsp;availability.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Platform-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;bool&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;platform&nbsp;supports&nbsp;video&nbsp;capture.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;the&nbsp;disk&nbsp;cache&nbsp;can&nbsp;be&nbsp;flushed&nbsp;for&nbsp;specific&nbsp;files.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt><dd><tt>Returns&nbsp;whether&nbsp;the&nbsp;platform&nbsp;can&nbsp;launch&nbsp;the&nbsp;given&nbsp;application.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;network&nbsp;data&nbsp;can&nbsp;be&nbsp;retrieved,&nbsp;false&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;power&nbsp;can&nbsp;be&nbsp;monitored&nbsp;asynchronously&nbsp;via<br>
+<a href="#Platform-StartMonitoringPower">StartMonitoringPower</a>()&nbsp;and&nbsp;<a href="#Platform-StopMonitoringPower">StopMonitoringPower</a>().</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt><dd><tt>Platforms&nbsp;may&nbsp;be&nbsp;able&nbsp;to&nbsp;detect&nbsp;thermal&nbsp;throttling.<br>
+&nbsp;<br>
+Some&nbsp;fan-less&nbsp;computers&nbsp;go&nbsp;into&nbsp;a&nbsp;reduced&nbsp;performance&nbsp;mode&nbsp;when&nbsp;their&nbsp;heat<br>
+exceeds&nbsp;a&nbsp;certain&nbsp;threshold.&nbsp;Performance&nbsp;tests&nbsp;in&nbsp;particular&nbsp;should&nbsp;use&nbsp;this<br>
+API&nbsp;to&nbsp;detect&nbsp;if&nbsp;this&nbsp;has&nbsp;happened&nbsp;and&nbsp;interpret&nbsp;results&nbsp;accordingly.</tt></dd></dl>
+
+<dl><dt><a name="Platform-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Platform-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="Platform-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;DNS&nbsp;cache&nbsp;completely.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;may&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="Platform-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;file&nbsp;cache&nbsp;completely.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;may&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="Platform-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt><dd><tt>Flushes&nbsp;the&nbsp;OS's&nbsp;file&nbsp;cache&nbsp;for&nbsp;the&nbsp;specified&nbsp;directory.<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;does&nbsp;not&nbsp;require&nbsp;root&nbsp;or&nbsp;administrator&nbsp;access.</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetArchName"><strong>GetArchName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="#Platform">Platform</a>&nbsp;architecture.<br>
+&nbsp;<br>
+Examples:&nbsp;x86_64&nbsp;(posix),&nbsp;AMD64&nbsp;(win),&nbsp;armeabi-v7a,&nbsp;x86</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="#Platform">Platform</a>&nbsp;device,&nbsp;or&nbsp;None.<br>
+&nbsp;<br>
+Examples:&nbsp;Nexus&nbsp;7,&nbsp;Nexus&nbsp;6,&nbsp;Desktop</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt><dd><tt>Get&nbsp;current&nbsp;network&nbsp;data.<br>
+Returns:<br>
+&nbsp;&nbsp;Tuple&nbsp;of&nbsp;(sent_data,&nbsp;received_data)&nbsp;in&nbsp;kb&nbsp;if&nbsp;data&nbsp;can&nbsp;be&nbsp;found,<br>
+&nbsp;&nbsp;None&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetOSName"><strong>GetOSName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;string&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="#Platform">Platform</a>&nbsp;OS.<br>
+&nbsp;<br>
+Examples:&nbsp;WIN,&nbsp;MAC,&nbsp;LINUX,&nbsp;CHROMEOS</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;logically&nbsp;sortable,&nbsp;string-like&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="#Platform">Platform</a>&nbsp;OS<br>
+version.<br>
+&nbsp;<br>
+Examples:&nbsp;VISTA,&nbsp;WIN7,&nbsp;LION,&nbsp;MOUNTAINLION</tt></dd></dl>
+
+<dl><dt><a name="Platform-GetOSVersionNumber"><strong>GetOSVersionNumber</strong></a>(self)</dt><dd><tt>Returns&nbsp;an&nbsp;integer&nbsp;description&nbsp;of&nbsp;the&nbsp;<a href="#Platform">Platform</a>&nbsp;OS&nbsp;major&nbsp;version.<br>
+&nbsp;<br>
+Examples:&nbsp;On&nbsp;Mac,&nbsp;13&nbsp;for&nbsp;Mavericks,&nbsp;14&nbsp;for&nbsp;Yosemite.</tt></dd></dl>
+
+<dl><dt><a name="Platform-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;device&nbsp;has&nbsp;been&nbsp;thermally&nbsp;throttled.</tt></dd></dl>
+
+<dl><dt><a name="Platform-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt><dd><tt>Installs&nbsp;the&nbsp;given&nbsp;application.</tt></dd></dl>
+
+<dl><dt><a name="Platform-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt><dd><tt>Returns&nbsp;whether&nbsp;an&nbsp;application&nbsp;is&nbsp;currently&nbsp;running.</tt></dd></dl>
+
+<dl><dt><a name="Platform-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="Platform-IsMonitoringPower"><strong>IsMonitoringPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;power&nbsp;is&nbsp;currently&nbsp;being&nbsp;monitored,&nbsp;false&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="Platform-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;device&nbsp;is&nbsp;currently&nbsp;thermally&nbsp;throttled.</tt></dd></dl>
+
+<dl><dt><a name="Platform-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt><dd><tt>"Launches&nbsp;the&nbsp;given&nbsp;|application|&nbsp;with&nbsp;a&nbsp;list&nbsp;of&nbsp;|parameters|&nbsp;on&nbsp;the&nbsp;OS.<br>
+&nbsp;<br>
+Set&nbsp;|elevate_privilege|&nbsp;to&nbsp;launch&nbsp;the&nbsp;application&nbsp;with&nbsp;root&nbsp;or&nbsp;admin&nbsp;rights.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;popen&nbsp;style&nbsp;process&nbsp;handle&nbsp;for&nbsp;host&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="Platform-SetHTTPServerDirectories"><strong>SetHTTPServerDirectories</strong></a>(self, paths)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;HTTP&nbsp;server&nbsp;was&nbsp;started,&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="Platform-StartLocalServer"><strong>StartLocalServer</strong></a>(self, server)</dt><dd><tt>Starts&nbsp;a&nbsp;LocalServer&nbsp;and&nbsp;associates&nbsp;it&nbsp;with&nbsp;this&nbsp;platform.<br>
+|server.Close()|&nbsp;should&nbsp;be&nbsp;called&nbsp;manually&nbsp;to&nbsp;close&nbsp;the&nbsp;started&nbsp;server.</tt></dd></dl>
+
+<dl><dt><a name="Platform-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt><dd><tt>Starts&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;statistics.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser:&nbsp;The&nbsp;browser&nbsp;to&nbsp;monitor.</tt></dd></dl>
+
+<dl><dt><a name="Platform-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt><dd><tt>Starts&nbsp;capturing&nbsp;video.<br>
+&nbsp;<br>
+Outer&nbsp;framing&nbsp;may&nbsp;be&nbsp;included&nbsp;(from&nbsp;the&nbsp;OS,&nbsp;browser&nbsp;window,&nbsp;and&nbsp;webcam).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;min_bitrate_mbps:&nbsp;The&nbsp;minimum&nbsp;capture&nbsp;bitrate&nbsp;in&nbsp;MegaBits&nbsp;Per&nbsp;Second.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;platform&nbsp;is&nbsp;free&nbsp;to&nbsp;deliver&nbsp;a&nbsp;higher&nbsp;bitrate&nbsp;if&nbsp;it&nbsp;can&nbsp;do&nbsp;so<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;without&nbsp;increasing&nbsp;overhead.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;required&nbsp;|min_bitrate_mbps|&nbsp;can't&nbsp;be&nbsp;achieved.</tt></dd></dl>
+
+<dl><dt><a name="Platform-StopAllLocalServers"><strong>StopAllLocalServers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Platform-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt><dd><tt>Stops&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;and&nbsp;returns&nbsp;stats<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;None&nbsp;if&nbsp;power&nbsp;measurement&nbsp;failed&nbsp;for&nbsp;some&nbsp;reason,&nbsp;otherwise&nbsp;a&nbsp;dict&nbsp;of<br>
+&nbsp;&nbsp;power&nbsp;utilization&nbsp;statistics&nbsp;containing:&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;An&nbsp;identifier&nbsp;for&nbsp;the&nbsp;data&nbsp;provider.&nbsp;Allows&nbsp;to&nbsp;evaluate&nbsp;the&nbsp;precision<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;of&nbsp;the&nbsp;data.&nbsp;Example&nbsp;values:&nbsp;monsoon,&nbsp;powermetrics,&nbsp;ds2784<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'identifier':&nbsp;identifier,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;instantaneous&nbsp;power&nbsp;(voltage&nbsp;*&nbsp;current)&nbsp;reading&nbsp;in&nbsp;milliwatts&nbsp;at<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;each&nbsp;sample.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'power_samples_mw':&nbsp;&nbsp;[mw0,&nbsp;mw1,&nbsp;...,&nbsp;mwN],<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;full&nbsp;system&nbsp;energy&nbsp;consumption&nbsp;during&nbsp;the&nbsp;sampling&nbsp;period&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;milliwatt&nbsp;hours.&nbsp;May&nbsp;be&nbsp;estimated&nbsp;by&nbsp;integrating&nbsp;power&nbsp;samples&nbsp;or&nbsp;may<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;be&nbsp;exact&nbsp;on&nbsp;supported&nbsp;hardware.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'energy_consumption_mwh':&nbsp;mwh,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;The&nbsp;target&nbsp;application's&nbsp;energy&nbsp;consumption&nbsp;during&nbsp;the&nbsp;sampling&nbsp;period<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;in&nbsp;milliwatt&nbsp;hours.&nbsp;Should&nbsp;be&nbsp;returned&nbsp;iff<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;<a href="#Platform-CanMeasurePerApplicationPower">CanMeasurePerApplicationPower</a>()&nbsp;return&nbsp;true.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'application_energy_consumption_mwh':&nbsp;mwh,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;A&nbsp;platform-specific&nbsp;dictionary&nbsp;of&nbsp;additional&nbsp;details&nbsp;about&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;utilization&nbsp;of&nbsp;individual&nbsp;hardware&nbsp;components.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;component_utilization:&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;...<br>
+&nbsp;&nbsp;&nbsp;&nbsp;}<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;<a href="#Platform">Platform</a>-specific&nbsp;data&nbsp;not&nbsp;attributed&nbsp;to&nbsp;any&nbsp;particular&nbsp;hardware<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;component.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;platform_info:&nbsp;{<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Device-specific&nbsp;onboard&nbsp;temperature&nbsp;sensor.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'average_temperature_c':&nbsp;c,<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;...<br>
+&nbsp;&nbsp;&nbsp;&nbsp;}<br>
+&nbsp;<br>
+&nbsp;&nbsp;}</tt></dd></dl>
+
+<dl><dt><a name="Platform-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt><dd><tt>Stops&nbsp;capturing&nbsp;video.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;telemetry.core.video.Video&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<dl><dt><a name="Platform-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt><dd><tt>Takes&nbsp;a&nbsp;screenshot&nbsp;of&nbsp;the&nbsp;platform&nbsp;and&nbsp;save&nbsp;to&nbsp;|file_path|.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;this&nbsp;method&nbsp;may&nbsp;not&nbsp;be&nbsp;supported&nbsp;on&nbsp;all&nbsp;platform,&nbsp;so&nbsp;check&nbsp;with<br>
+CanTakeScreenshot&nbsp;before&nbsp;calling&nbsp;this.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;file_path:&nbsp;Where&nbsp;to&nbsp;save&nbsp;the&nbsp;screenshot&nbsp;to.&nbsp;If&nbsp;the&nbsp;platform&nbsp;is&nbsp;remote,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|file_path|&nbsp;is&nbsp;the&nbsp;path&nbsp;on&nbsp;the&nbsp;host&nbsp;platform.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="Platform-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>http_server</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>local_servers</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;currently&nbsp;running&nbsp;local&nbsp;servers.</tt></dd>
+</dl>
+<dl><dt><strong>network_controller</strong></dt>
+<dd><tt>Control&nbsp;network&nbsp;settings&nbsp;and&nbsp;servers&nbsp;to&nbsp;simulate&nbsp;the&nbsp;Web.</tt></dd>
+</dl>
+<dl><dt><strong>tracing_controller</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetHostPlatform"><strong>GetHostPlatform</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetPlatformForDevice"><strong>GetPlatformForDevice</strong></a>(device, finder_options, logging<font color="#909090">=&lt;module 'logging' from '/usr/lib/python2.7/logging/__init__.pyc'&gt;</font>)</dt><dd><tt>Returns&nbsp;a&nbsp;platform&nbsp;instance&nbsp;for&nbsp;the&nbsp;device.<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;a&nbsp;device.Device&nbsp;instance.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.profiling_controller.html b/catapult/telemetry/docs/pydoc/telemetry.core.profiling_controller.html
new file mode 100644
index 0000000..c5d4b2c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.profiling_controller.html
@@ -0,0 +1,54 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.profiling_controller</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.profiling_controller</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/profiling_controller.py">telemetry/core/profiling_controller.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.profiling_controller.html#ProfilingController">ProfilingController</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProfilingController">class <strong>ProfilingController</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProfilingController-Start"><strong>Start</strong></a>(self, profiler_name, base_output_file)</dt></dl>
+
+<dl><dt><a name="ProfilingController-Stop"><strong>Stop</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ProfilingController-__init__"><strong>__init__</strong></a>(self, profiling_controller_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.tracing_controller.html b/catapult/telemetry/docs/pydoc/telemetry.core.tracing_controller.html
new file mode 100644
index 0000000..4557f4b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.tracing_controller.html
@@ -0,0 +1,72 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.tracing_controller</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.tracing_controller</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/tracing_controller.py">telemetry/core/tracing_controller.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.tracing_controller.html#TracingController">TracingController</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingController">class <strong>TracingController</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingController-IsChromeTracingSupported"><strong>IsChromeTracingSupported</strong></a>(self)</dt><dd><tt>Returns&nbsp;whether&nbsp;chrome&nbsp;tracing&nbsp;is&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingController-Start"><strong>Start</strong></a>(self, trace_options, category_filter, timeout<font color="#909090">=10</font>)</dt><dd><tt>Starts&nbsp;tracing.<br>
+&nbsp;<br>
+trace_options&nbsp;specifies&nbsp;which&nbsp;tracing&nbsp;systems&nbsp;to&nbsp;activate.&nbsp;Category&nbsp;filter<br>
+allows&nbsp;fine-tuning&nbsp;of&nbsp;the&nbsp;data&nbsp;that&nbsp;are&nbsp;collected&nbsp;by&nbsp;the&nbsp;selected&nbsp;tracing<br>
+systems.<br>
+&nbsp;<br>
+Some&nbsp;tracers&nbsp;are&nbsp;process-specific,&nbsp;e.g.&nbsp;chrome&nbsp;tracing,&nbsp;but&nbsp;are&nbsp;not<br>
+guaranteed&nbsp;to&nbsp;be&nbsp;supported.&nbsp;In&nbsp;order&nbsp;to&nbsp;support&nbsp;tracing&nbsp;of&nbsp;these&nbsp;kinds&nbsp;of<br>
+tracers,&nbsp;Start&nbsp;will&nbsp;succeed&nbsp;*always*,&nbsp;even&nbsp;if&nbsp;the&nbsp;tracing&nbsp;systems&nbsp;you&nbsp;have<br>
+requested&nbsp;are&nbsp;not&nbsp;supported.<br>
+&nbsp;<br>
+If&nbsp;you&nbsp;absolutely&nbsp;require&nbsp;a&nbsp;particular&nbsp;tracer&nbsp;to&nbsp;exist,&nbsp;then&nbsp;check<br>
+for&nbsp;its&nbsp;support&nbsp;after&nbsp;you&nbsp;have&nbsp;started&nbsp;the&nbsp;process&nbsp;in&nbsp;question.&nbsp;Or,&nbsp;have<br>
+your&nbsp;code&nbsp;fail&nbsp;gracefully&nbsp;when&nbsp;the&nbsp;data&nbsp;you&nbsp;require&nbsp;is&nbsp;not&nbsp;present&nbsp;in&nbsp;the<br>
+resulting&nbsp;trace.</tt></dd></dl>
+
+<dl><dt><a name="TracingController-Stop"><strong>Stop</strong></a>(self)</dt><dd><tt>Stops&nbsp;tracing&nbsp;and&nbsp;returns&nbsp;a&nbsp;TraceValue.</tt></dd></dl>
+
+<dl><dt><a name="TracingController-__init__"><strong>__init__</strong></a>(self, tracing_controller_backend)</dt><dd><tt>Provides&nbsp;control&nbsp;of&nbsp;the&nbsp;tracing&nbsp;systems&nbsp;supported&nbsp;by&nbsp;telemetry.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_tracing_running</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.core.util.html b/catapult/telemetry/docs/pydoc/telemetry.core.util.html
new file mode 100644
index 0000000..1c76b79
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.core.util.html
@@ -0,0 +1,104 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.core.util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.core.html"><font color="#ffffff">core</font></a>.util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/core/util.py">telemetry/core/util.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="glob.html">glob</a><br>
+<a href="imp.html">imp</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="socket.html">socket</a><br>
+<a href="sys.html">sys</a><br>
+<a href="time.html">time</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.util.html#PortKeeper">PortKeeper</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PortKeeper">class <strong>PortKeeper</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Port&nbsp;keeper&nbsp;hold&nbsp;an&nbsp;available&nbsp;port&nbsp;on&nbsp;the&nbsp;system.<br>
+&nbsp;<br>
+Before&nbsp;actually&nbsp;use&nbsp;the&nbsp;port,&nbsp;you&nbsp;must&nbsp;call&nbsp;<a href="#PortKeeper-Release">Release</a>().<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PortKeeper-Release"><strong>Release</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PortKeeper-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetBaseDir"><strong>GetBaseDir</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetBuildDirectories"><strong>GetBuildDirectories</strong></a>()</dt><dd><tt>Yields&nbsp;all&nbsp;combination&nbsp;of&nbsp;Chromium&nbsp;build&nbsp;output&nbsp;directories.</tt></dd></dl>
+ <dl><dt><a name="-GetChromiumSrcDir"><strong>GetChromiumSrcDir</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetPythonPageSetModule"><strong>GetPythonPageSetModule</strong></a>(file_path)</dt></dl>
+ <dl><dt><a name="-GetSequentialFileName"><strong>GetSequentialFileName</strong></a>(base_name)</dt><dd><tt>Returns&nbsp;the&nbsp;next&nbsp;sequential&nbsp;file&nbsp;name&nbsp;based&nbsp;on&nbsp;|base_name|&nbsp;and&nbsp;the<br>
+existing&nbsp;files.&nbsp;base_name&nbsp;should&nbsp;not&nbsp;contain&nbsp;extension.<br>
+e.g:&nbsp;if&nbsp;base_name&nbsp;is&nbsp;/tmp/test,&nbsp;and&nbsp;/tmp/test_000.json,<br>
+/tmp/test_001.mp3&nbsp;exist,&nbsp;this&nbsp;returns&nbsp;/tmp/test_002.&nbsp;In&nbsp;case&nbsp;no<br>
+other&nbsp;sequential&nbsp;file&nbsp;name&nbsp;exist,&nbsp;this&nbsp;will&nbsp;return&nbsp;/tmp/test_000</tt></dd></dl>
+ <dl><dt><a name="-GetTelemetryDir"><strong>GetTelemetryDir</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetTelemetryThirdPartyDir"><strong>GetTelemetryThirdPartyDir</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetUnittestDataDir"><strong>GetUnittestDataDir</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetUnreservedAvailableLocalPort"><strong>GetUnreservedAvailableLocalPort</strong></a>()</dt><dd><tt>Returns&nbsp;an&nbsp;available&nbsp;port&nbsp;on&nbsp;the&nbsp;system.<br>
+&nbsp;<br>
+WARNING:&nbsp;This&nbsp;method&nbsp;does&nbsp;not&nbsp;reserve&nbsp;the&nbsp;port&nbsp;it&nbsp;returns,&nbsp;so&nbsp;it&nbsp;may&nbsp;be&nbsp;used<br>
+by&nbsp;something&nbsp;else&nbsp;before&nbsp;you&nbsp;get&nbsp;to&nbsp;use&nbsp;it.&nbsp;This&nbsp;can&nbsp;lead&nbsp;to&nbsp;flake.</tt></dd></dl>
+ <dl><dt><a name="-IsRunningOnCrosDevice"><strong>IsRunningOnCrosDevice</strong></a>()</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;we're&nbsp;on&nbsp;a&nbsp;ChromeOS&nbsp;device.</tt></dd></dl>
+ <dl><dt><a name="-WaitFor"><strong>WaitFor</strong></a>(condition, timeout)</dt><dd><tt>Waits&nbsp;for&nbsp;up&nbsp;to&nbsp;|timeout|&nbsp;secs&nbsp;for&nbsp;the&nbsp;function&nbsp;|condition|&nbsp;to&nbsp;return&nbsp;True.<br>
+&nbsp;<br>
+Polling&nbsp;frequency&nbsp;is&nbsp;(elapsed_time&nbsp;/&nbsp;10),&nbsp;with&nbsp;a&nbsp;min&nbsp;of&nbsp;.1s&nbsp;and&nbsp;max&nbsp;of&nbsp;5s.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;Result&nbsp;of&nbsp;|condition|&nbsp;function&nbsp;(if&nbsp;present).</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.decorators.html b/catapult/telemetry/docs/pydoc/telemetry.decorators.html
new file mode 100644
index 0000000..b203e50
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.decorators.html
@@ -0,0 +1,117 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.decorators</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.decorators</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/decorators.py">telemetry/decorators.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.<br>
+#&nbsp;pylint:&nbsp;disable=W0212</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="datetime.html">datetime</a><br>
+<a href="functools.html">functools</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="types.html">types</a><br>
+<a href="warnings.html">warnings</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.decorators.html#Deprecated">Deprecated</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Deprecated">class <strong>Deprecated</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Deprecated-__call__"><strong>__call__</strong></a>(self, target)</dt></dl>
+
+<dl><dt><a name="Deprecated-__init__"><strong>__init__</strong></a>(self, year, month, day, extra_guidance<font color="#909090">=''</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-Cache"><strong>Cache</strong></a>(obj)</dt><dd><tt>Decorator&nbsp;for&nbsp;caching&nbsp;read-only&nbsp;properties.<br>
+&nbsp;<br>
+Example&nbsp;usage&nbsp;(always&nbsp;returns&nbsp;the&nbsp;same&nbsp;Foo&nbsp;instance):<br>
+&nbsp;&nbsp;@Cache<br>
+&nbsp;&nbsp;def&nbsp;CreateFoo():<br>
+&nbsp;&nbsp;&nbsp;&nbsp;return&nbsp;Foo()<br>
+&nbsp;<br>
+If&nbsp;CreateFoo()&nbsp;accepts&nbsp;parameters,&nbsp;a&nbsp;separate&nbsp;cached&nbsp;value&nbsp;is&nbsp;maintained<br>
+for&nbsp;each&nbsp;unique&nbsp;parameter&nbsp;combination.<br>
+&nbsp;<br>
+Cached&nbsp;methods&nbsp;maintain&nbsp;their&nbsp;cache&nbsp;for&nbsp;the&nbsp;lifetime&nbsp;of&nbsp;the&nbsp;/instance/,&nbsp;while<br>
+cached&nbsp;functions&nbsp;maintain&nbsp;their&nbsp;cache&nbsp;for&nbsp;the&nbsp;lifetime&nbsp;of&nbsp;the&nbsp;/module/.</tt></dd></dl>
+ <dl><dt><a name="-Disabled"><strong>Disabled</strong></a>(*args)</dt><dd><tt>Decorator&nbsp;for&nbsp;disabling&nbsp;tests/benchmarks.<br>
+&nbsp;<br>
+&nbsp;<br>
+If&nbsp;args&nbsp;are&nbsp;given,&nbsp;the&nbsp;test&nbsp;will&nbsp;be&nbsp;disabled&nbsp;if&nbsp;ANY&nbsp;of&nbsp;the&nbsp;args&nbsp;match&nbsp;the<br>
+browser&nbsp;type,&nbsp;OS&nbsp;name&nbsp;or&nbsp;OS&nbsp;version:<br>
+&nbsp;&nbsp;@<a href="#-Disabled">Disabled</a>('canary')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Disabled&nbsp;for&nbsp;canary&nbsp;browsers<br>
+&nbsp;&nbsp;@<a href="#-Disabled">Disabled</a>('win')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Disabled&nbsp;on&nbsp;Windows.<br>
+&nbsp;&nbsp;@<a href="#-Disabled">Disabled</a>('win',&nbsp;'linux')&nbsp;&nbsp;#&nbsp;Disabled&nbsp;on&nbsp;both&nbsp;Windows&nbsp;and&nbsp;Linux.<br>
+&nbsp;&nbsp;@<a href="#-Disabled">Disabled</a>('mavericks')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Disabled&nbsp;on&nbsp;Mac&nbsp;Mavericks&nbsp;(10.9)&nbsp;only.<br>
+&nbsp;&nbsp;@<a href="#-Disabled">Disabled</a>('all')&nbsp;&nbsp;#&nbsp;Unconditionally&nbsp;disabled.</tt></dd></dl>
+ <dl><dt><a name="-Enabled"><strong>Enabled</strong></a>(*args)</dt><dd><tt>Decorator&nbsp;for&nbsp;enabling&nbsp;tests/benchmarks.<br>
+&nbsp;<br>
+The&nbsp;test&nbsp;will&nbsp;be&nbsp;enabled&nbsp;if&nbsp;ANY&nbsp;of&nbsp;the&nbsp;args&nbsp;match&nbsp;the&nbsp;browser&nbsp;type,&nbsp;OS&nbsp;name<br>
+or&nbsp;OS&nbsp;version:<br>
+&nbsp;&nbsp;@<a href="#-Enabled">Enabled</a>('canary')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Enabled&nbsp;only&nbsp;for&nbsp;canary&nbsp;browsers<br>
+&nbsp;&nbsp;@<a href="#-Enabled">Enabled</a>('win')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Enabled&nbsp;only&nbsp;on&nbsp;Windows.<br>
+&nbsp;&nbsp;@<a href="#-Enabled">Enabled</a>('win',&nbsp;'linux')&nbsp;&nbsp;#&nbsp;Enabled&nbsp;only&nbsp;on&nbsp;Windows&nbsp;or&nbsp;Linux.<br>
+&nbsp;&nbsp;@<a href="#-Enabled">Enabled</a>('mavericks')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Enabled&nbsp;only&nbsp;on&nbsp;Mac&nbsp;Mavericks&nbsp;(10.9).</tt></dd></dl>
+ <dl><dt><a name="-IsEnabled"><strong>IsEnabled</strong></a>(test, possible_browser)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;|test|&nbsp;is&nbsp;enabled&nbsp;given&nbsp;the&nbsp;|possible_browser|.<br>
+&nbsp;<br>
+Use&nbsp;to&nbsp;respect&nbsp;the&nbsp;@Enabled&nbsp;/&nbsp;@Disabled&nbsp;decorators.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;test:&nbsp;A&nbsp;function&nbsp;or&nbsp;class&nbsp;that&nbsp;may&nbsp;contain&nbsp;_disabled_strings&nbsp;and/or<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;_enabled_strings&nbsp;attributes.<br>
+&nbsp;&nbsp;possible_browser:&nbsp;A&nbsp;PossibleBrowser&nbsp;to&nbsp;check&nbsp;whether&nbsp;|test|&nbsp;may&nbsp;run&nbsp;against.</tt></dd></dl>
+ <dl><dt><a name="-Isolated"><strong>Isolated</strong></a>(*args)</dt><dd><tt>Decorator&nbsp;for&nbsp;noting&nbsp;that&nbsp;tests&nbsp;must&nbsp;be&nbsp;run&nbsp;in&nbsp;isolation.<br>
+&nbsp;<br>
+The&nbsp;test&nbsp;will&nbsp;be&nbsp;run&nbsp;by&nbsp;itself&nbsp;(not&nbsp;concurrently&nbsp;with&nbsp;any&nbsp;other&nbsp;tests)<br>
+if&nbsp;ANY&nbsp;of&nbsp;the&nbsp;args&nbsp;match&nbsp;the&nbsp;browser&nbsp;type,&nbsp;OS&nbsp;name,&nbsp;or&nbsp;OS&nbsp;version.</tt></dd></dl>
+ <dl><dt><a name="-ShouldBeIsolated"><strong>ShouldBeIsolated</strong></a>(test, possible_browser)</dt></dl>
+ <dl><dt><a name="-ShouldSkip"><strong>ShouldSkip</strong></a>(test, possible_browser)</dt><dd><tt>Returns&nbsp;whether&nbsp;the&nbsp;test&nbsp;should&nbsp;be&nbsp;skipped&nbsp;and&nbsp;the&nbsp;reason&nbsp;for&nbsp;it.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.html b/catapult/telemetry/docs/pydoc/telemetry.html
new file mode 100644
index 0000000..e1962d2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.html
@@ -0,0 +1,44 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong>telemetry</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/__init__.py">telemetry/__init__.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;library&nbsp;for&nbsp;cross-platform&nbsp;browser&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.android.html"><strong>android</strong>&nbsp;(package)</a><br>
+<a href="telemetry.benchmark.html">benchmark</a><br>
+<a href="telemetry.benchmark_run_unittest.html">benchmark_run_unittest</a><br>
+<a href="telemetry.benchmark_runner.html">benchmark_runner</a><br>
+<a href="telemetry.benchmark_runner_unittest.html">benchmark_runner_unittest</a><br>
+<a href="telemetry.benchmark_unittest.html">benchmark_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.html"><strong>core</strong>&nbsp;(package)</a><br>
+<a href="telemetry.decorators.html">decorators</a><br>
+<a href="telemetry.decorators_unittest.html">decorators_unittest</a><br>
+<a href="telemetry.internal.html"><strong>internal</strong>&nbsp;(package)</a><br>
+<a href="telemetry.page.html"><strong>page</strong>&nbsp;(package)</a><br>
+<a href="telemetry.project_config.html">project_config</a><br>
+</td><td width="25%" valign=top><a href="telemetry.record_wpr.html">record_wpr</a><br>
+<a href="telemetry.record_wpr_unittest.html">record_wpr_unittest</a><br>
+<a href="telemetry.story.html"><strong>story</strong>&nbsp;(package)</a><br>
+<a href="telemetry.telemetry_dependencies_unittest.html">telemetry_dependencies_unittest</a><br>
+<a href="telemetry.testing.html"><strong>testing</strong>&nbsp;(package)</a><br>
+<a href="telemetry.timeline.html"><strong>timeline</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.html"><strong>util</strong>&nbsp;(package)</a><br>
+<a href="telemetry.value.html"><strong>value</strong>&nbsp;(package)</a><br>
+<a href="telemetry.web_perf.html"><strong>web_perf</strong>&nbsp;(package)</a><br>
+<a href="telemetry.wpr.html"><strong>wpr</strong>&nbsp;(package)</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.drag.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.drag.html
new file mode 100644
index 0000000..3169961
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.drag.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.drag</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.drag</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/drag.py">telemetry/internal/actions/drag.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;Telemetry&nbsp;page_action&nbsp;that&nbsp;performs&nbsp;the&nbsp;"drag"&nbsp;action&nbsp;on&nbsp;pages.<br>
+&nbsp;<br>
+Action&nbsp;parameters&nbsp;are:<br>
+-&nbsp;selector:&nbsp;If&nbsp;no&nbsp;selector&nbsp;is&nbsp;defined&nbsp;then&nbsp;the&nbsp;action&nbsp;attempts&nbsp;to&nbsp;drag&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document&nbsp;element&nbsp;on&nbsp;the&nbsp;page.<br>
+-&nbsp;element_function:&nbsp;CSS&nbsp;selector&nbsp;used&nbsp;to&nbsp;evaluate&nbsp;callback&nbsp;when&nbsp;test&nbsp;completes<br>
+-&nbsp;text:&nbsp;The&nbsp;element&nbsp;with&nbsp;exact&nbsp;text&nbsp;is&nbsp;selected.<br>
+-&nbsp;left_start_ratio:&nbsp;ratio&nbsp;of&nbsp;start&nbsp;point's&nbsp;left&nbsp;coordinate&nbsp;to&nbsp;the&nbsp;element<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;width.<br>
+-&nbsp;top_start_ratio:&nbsp;ratio&nbsp;of&nbsp;start&nbsp;point's&nbsp;top&nbsp;coordinate&nbsp;to&nbsp;the&nbsp;element&nbsp;height.<br>
+-&nbsp;left_end_ratio:&nbsp;ratio&nbsp;of&nbsp;end&nbsp;point's&nbsp;left&nbsp;coordinate&nbsp;to&nbsp;the&nbsp;element&nbsp;width.<br>
+-&nbsp;left_end_ratio:&nbsp;ratio&nbsp;of&nbsp;end&nbsp;point's&nbsp;top&nbsp;coordinate&nbsp;to&nbsp;the&nbsp;element&nbsp;height.<br>
+-&nbsp;speed_in_pixels_per_second:&nbsp;speed&nbsp;of&nbsp;the&nbsp;drag&nbsp;gesture&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+-&nbsp;use_touch:&nbsp;boolean&nbsp;value&nbsp;to&nbsp;specify&nbsp;if&nbsp;gesture&nbsp;should&nbsp;use&nbsp;touch&nbsp;input&nbsp;or&nbsp;not.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.drag.html#DragAction">DragAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DragAction">class <strong>DragAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.drag.html#DragAction">DragAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DragAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="DragAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="DragAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=None</font>, top_start_ratio<font color="#909090">=None</font>, left_end_ratio<font color="#909090">=None</font>, top_end_ratio<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, use_touch<font color="#909090">=False</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="DragAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.html
new file mode 100644
index 0000000..0a52825
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.html
@@ -0,0 +1,52 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.actions</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.actions</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/__init__.py">telemetry/internal/actions/__init__.py</a></font></td></tr></table>
+    <p></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.action_runner_unittest.html">action_runner_unittest</a><br>
+<a href="telemetry.internal.actions.drag.html">drag</a><br>
+<a href="telemetry.internal.actions.drag_unittest.html">drag_unittest</a><br>
+<a href="telemetry.internal.actions.javascript_click.html">javascript_click</a><br>
+<a href="telemetry.internal.actions.load_media.html">load_media</a><br>
+<a href="telemetry.internal.actions.load_media_unittest.html">load_media_unittest</a><br>
+<a href="telemetry.internal.actions.loop.html">loop</a><br>
+<a href="telemetry.internal.actions.loop_unittest.html">loop_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.media_action.html">media_action</a><br>
+<a href="telemetry.internal.actions.mouse_click.html">mouse_click</a><br>
+<a href="telemetry.internal.actions.mouse_click_unittest.html">mouse_click_unittest</a><br>
+<a href="telemetry.internal.actions.navigate.html">navigate</a><br>
+<a href="telemetry.internal.actions.navigate_unittest.html">navigate_unittest</a><br>
+<a href="telemetry.internal.actions.page_action.html">page_action</a><br>
+<a href="telemetry.internal.actions.page_action_unittest.html">page_action_unittest</a><br>
+<a href="telemetry.internal.actions.pinch.html">pinch</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.pinch_unittest.html">pinch_unittest</a><br>
+<a href="telemetry.internal.actions.play.html">play</a><br>
+<a href="telemetry.internal.actions.play_unittest.html">play_unittest</a><br>
+<a href="telemetry.internal.actions.repaint_continuously.html">repaint_continuously</a><br>
+<a href="telemetry.internal.actions.repeatable_scroll.html">repeatable_scroll</a><br>
+<a href="telemetry.internal.actions.repeatable_scroll_unittest.html">repeatable_scroll_unittest</a><br>
+<a href="telemetry.internal.actions.scroll.html">scroll</a><br>
+<a href="telemetry.internal.actions.scroll_bounce.html">scroll_bounce</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.scroll_unittest.html">scroll_unittest</a><br>
+<a href="telemetry.internal.actions.seek.html">seek</a><br>
+<a href="telemetry.internal.actions.seek_unittest.html">seek_unittest</a><br>
+<a href="telemetry.internal.actions.swipe.html">swipe</a><br>
+<a href="telemetry.internal.actions.tap.html">tap</a><br>
+<a href="telemetry.internal.actions.wait.html">wait</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.javascript_click.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.javascript_click.html
new file mode 100644
index 0000000..4765ecc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.javascript_click.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.javascript_click</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.javascript_click</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/javascript_click.py">telemetry/internal/actions/javascript_click.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.javascript_click.html#ClickElementAction">ClickElementAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ClickElementAction">class <strong>ClickElementAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.javascript_click.html#ClickElementAction">ClickElementAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ClickElementAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ClickElementAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="ClickElementAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ClickElementAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;action-specific&nbsp;setup&nbsp;before<br>
+Test.WillRunAction&nbsp;is&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.load_media.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.load_media.html
new file mode 100644
index 0000000..7830363
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.load_media.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.load_media</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.load_media</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/load_media.py">telemetry/internal/actions/load_media.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.media_action.html">telemetry.internal.actions.media_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.load_media.html#LoadMediaAction">LoadMediaAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LoadMediaAction">class <strong>LoadMediaAction</strong></a>(<a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>For&nbsp;calling&nbsp;load()&nbsp;on&nbsp;media&nbsp;elements&nbsp;and&nbsp;waiting&nbsp;for&nbsp;an&nbsp;event&nbsp;to&nbsp;fire.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.load_media.html#LoadMediaAction">LoadMediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="LoadMediaAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="LoadMediaAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Load&nbsp;the&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="LoadMediaAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=0</font>, event_to_await<font color="#909090">='canplaythrough'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>:<br>
+<dl><dt><a name="LoadMediaAction-HasEventCompletedOrError"><strong>HasEventCompletedOrError</strong></a>(self, tab, selector, event_name)</dt></dl>
+
+<dl><dt><a name="LoadMediaAction-LoadJS"><strong>LoadJS</strong></a>(self, tab, js_file_name)</dt><dd><tt>Loads&nbsp;and&nbsp;executes&nbsp;a&nbsp;JS&nbsp;file&nbsp;in&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="LoadMediaAction-WaitForEvent"><strong>WaitForEvent</strong></a>(self, tab, selector, event_name, timeout_in_seconds)</dt><dd><tt>Halts&nbsp;media&nbsp;action&nbsp;until&nbsp;the&nbsp;selector's&nbsp;event&nbsp;is&nbsp;fired.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;The&nbsp;tab&nbsp;to&nbsp;check&nbsp;for&nbsp;event&nbsp;on.<br>
+&nbsp;&nbsp;selector:&nbsp;Media&nbsp;element&nbsp;selector.<br>
+&nbsp;&nbsp;event_name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;check&nbsp;if&nbsp;fired&nbsp;or&nbsp;not.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;check&nbsp;for&nbsp;event,&nbsp;throws&nbsp;an&nbsp;exception&nbsp;if<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;not&nbsp;fired.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="LoadMediaAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.loop.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.loop.html
new file mode 100644
index 0000000..ca62f46
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.loop.html
@@ -0,0 +1,95 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.loop</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.loop</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/loop.py">telemetry/internal/actions/loop.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;Telemetry&nbsp;page_action&nbsp;that&nbsp;loops&nbsp;media&nbsp;playback.<br>
+&nbsp;<br>
+Action&nbsp;parameters&nbsp;are:<br>
+-&nbsp;loop_count:&nbsp;The&nbsp;number&nbsp;of&nbsp;times&nbsp;to&nbsp;loop&nbsp;media.<br>
+-&nbsp;selector:&nbsp;If&nbsp;no&nbsp;selector&nbsp;is&nbsp;defined&nbsp;then&nbsp;the&nbsp;action&nbsp;attempts&nbsp;to&nbsp;loop&nbsp;the&nbsp;first<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;'all'&nbsp;then&nbsp;loop&nbsp;all&nbsp;media&nbsp;elements.<br>
+-&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;wait&nbsp;for&nbsp;media&nbsp;to&nbsp;loop.&nbsp;Default&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;60&nbsp;sec&nbsp;x&nbsp;loop_count.&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.media_action.html">telemetry.internal.actions.media_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.loop.html#LoopAction">LoopAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LoopAction">class <strong>LoopAction</strong></a>(<a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.loop.html#LoopAction">LoopAction</a></dd>
+<dd><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="LoopAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="LoopAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Load&nbsp;the&nbsp;media&nbsp;metrics&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="LoopAction-__init__"><strong>__init__</strong></a>(self, loop_count, selector<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>:<br>
+<dl><dt><a name="LoopAction-HasEventCompletedOrError"><strong>HasEventCompletedOrError</strong></a>(self, tab, selector, event_name)</dt></dl>
+
+<dl><dt><a name="LoopAction-LoadJS"><strong>LoadJS</strong></a>(self, tab, js_file_name)</dt><dd><tt>Loads&nbsp;and&nbsp;executes&nbsp;a&nbsp;JS&nbsp;file&nbsp;in&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="LoopAction-WaitForEvent"><strong>WaitForEvent</strong></a>(self, tab, selector, event_name, timeout_in_seconds)</dt><dd><tt>Halts&nbsp;media&nbsp;action&nbsp;until&nbsp;the&nbsp;selector's&nbsp;event&nbsp;is&nbsp;fired.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;The&nbsp;tab&nbsp;to&nbsp;check&nbsp;for&nbsp;event&nbsp;on.<br>
+&nbsp;&nbsp;selector:&nbsp;Media&nbsp;element&nbsp;selector.<br>
+&nbsp;&nbsp;event_name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;check&nbsp;if&nbsp;fired&nbsp;or&nbsp;not.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;check&nbsp;for&nbsp;event,&nbsp;throws&nbsp;an&nbsp;exception&nbsp;if<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;not&nbsp;fired.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="LoopAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.media_action.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.media_action.html
new file mode 100644
index 0000000..80b6ea3
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.media_action.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.media_action</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.media_action</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/media_action.py">telemetry/internal/actions/media_action.py</a></font></td></tr></table>
+    <p><tt>Common&nbsp;media&nbsp;action&nbsp;functions.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.media_action.html#MediaAction">MediaAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MediaAction">class <strong>MediaAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.media_action.html#MediaAction">MediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MediaAction-HasEventCompletedOrError"><strong>HasEventCompletedOrError</strong></a>(self, tab, selector, event_name)</dt></dl>
+
+<dl><dt><a name="MediaAction-LoadJS"><strong>LoadJS</strong></a>(self, tab, js_file_name)</dt><dd><tt>Loads&nbsp;and&nbsp;executes&nbsp;a&nbsp;JS&nbsp;file&nbsp;in&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="MediaAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="MediaAction-WaitForEvent"><strong>WaitForEvent</strong></a>(self, tab, selector, event_name, timeout_in_seconds)</dt><dd><tt>Halts&nbsp;media&nbsp;action&nbsp;until&nbsp;the&nbsp;selector's&nbsp;event&nbsp;is&nbsp;fired.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;The&nbsp;tab&nbsp;to&nbsp;check&nbsp;for&nbsp;event&nbsp;on.<br>
+&nbsp;&nbsp;selector:&nbsp;Media&nbsp;element&nbsp;selector.<br>
+&nbsp;&nbsp;event_name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;check&nbsp;if&nbsp;fired&nbsp;or&nbsp;not.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;check&nbsp;for&nbsp;event,&nbsp;throws&nbsp;an&nbsp;exception&nbsp;if<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;not&nbsp;fired.</tt></dd></dl>
+
+<dl><dt><a name="MediaAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Loads&nbsp;the&nbsp;common&nbsp;media&nbsp;action&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="MediaAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.mouse_click.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.mouse_click.html
new file mode 100644
index 0000000..0d2d5b5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.mouse_click.html
@@ -0,0 +1,81 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.mouse_click</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.mouse_click</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/mouse_click.py">telemetry/internal/actions/mouse_click.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.mouse_click.html#MouseClickAction">MouseClickAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MouseClickAction">class <strong>MouseClickAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.mouse_click.html#MouseClickAction">MouseClickAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MouseClickAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="MouseClickAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Load&nbsp;the&nbsp;mouse&nbsp;click&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="MouseClickAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="MouseClickAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-read_js"><strong>read_js</strong></a>()</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.navigate.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.navigate.html
new file mode 100644
index 0000000..3c11b2a
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.navigate.html
@@ -0,0 +1,74 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.navigate</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.navigate</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/navigate.py">telemetry/internal/actions/navigate.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.navigate.html#NavigateAction">NavigateAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NavigateAction">class <strong>NavigateAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.navigate.html#NavigateAction">NavigateAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="NavigateAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="NavigateAction-__init__"><strong>__init__</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=60</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="NavigateAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="NavigateAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;action-specific&nbsp;setup&nbsp;before<br>
+Test.WillRunAction&nbsp;is&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.page_action.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.page_action.html
new file mode 100644
index 0000000..40ba46f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.page_action.html
@@ -0,0 +1,235 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.page_action</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.page_action</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/page_action.py">telemetry/internal/actions/page_action.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">PageAction</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageActionFailed">PageActionFailed</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageActionNotSupported">PageActionNotSupported</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageAction">class <strong>PageAction</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;action&nbsp;that&nbsp;a&nbsp;user&nbsp;might&nbsp;try&nbsp;to&nbsp;perform&nbsp;to&nbsp;a&nbsp;page.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PageAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="PageAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="PageAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;action-specific&nbsp;setup&nbsp;before<br>
+Test.WillRunAction&nbsp;is&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageActionFailed">class <strong>PageActionFailed</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.page_action.html#PageActionFailed">PageActionFailed</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="PageActionFailed-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PageActionFailed-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PageActionFailed-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PageActionFailed-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PageActionFailed-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionFailed-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="PageActionFailed-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageActionNotSupported">class <strong>PageActionNotSupported</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.page_action.html#PageActionNotSupported">PageActionNotSupported</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="PageActionNotSupported-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PageActionNotSupported-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PageActionNotSupported-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PageActionNotSupported-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PageActionNotSupported-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#PageActionNotSupported-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="PageActionNotSupported-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-EvaluateCallbackWithElement"><strong>EvaluateCallbackWithElement</strong></a>(tab, callback_js, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, wait<font color="#909090">=False</font>, timeout_in_seconds<font color="#909090">=60</font>)</dt><dd><tt>Evaluates&nbsp;the&nbsp;JavaScript&nbsp;callback&nbsp;with&nbsp;the&nbsp;given&nbsp;element.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;callback's&nbsp;return&nbsp;value,&nbsp;if&nbsp;any.&nbsp;The&nbsp;return&nbsp;value&nbsp;must&nbsp;be<br>
+&nbsp;&nbsp;convertible&nbsp;to&nbsp;JSON.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;A&nbsp;telemetry.core.Tab&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;&nbsp;callback_js:&nbsp;The&nbsp;JavaScript&nbsp;callback&nbsp;to&nbsp;call&nbsp;(as&nbsp;string).<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;callback&nbsp;receive&nbsp;2&nbsp;parameters:&nbsp;the&nbsp;element,&nbsp;and&nbsp;information<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;string&nbsp;about&nbsp;what&nbsp;method&nbsp;was&nbsp;used&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Example:&nbsp;'''<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;function(element,&nbsp;info)&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if&nbsp;(!element)&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;throw&nbsp;Error('Can&nbsp;not&nbsp;find&nbsp;element:&nbsp;'&nbsp;+&nbsp;info);<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;}<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;element.click()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;}'''<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;})()'.<br>
+&nbsp;&nbsp;wait:&nbsp;Whether&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;return&nbsp;value&nbsp;to&nbsp;be&nbsp;true.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;The&nbsp;timeout&nbsp;for&nbsp;wait&nbsp;(if&nbsp;waiting).</tt></dd></dl>
+ <dl><dt><a name="-IsGestureSourceTypeSupported"><strong>IsGestureSourceTypeSupported</strong></a>(*args, **kwargs)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>GESTURE_SOURCE_DEFAULT</strong> = 'DEFAULT'<br>
+<strong>GESTURE_SOURCE_MOUSE</strong> = 'MOUSE'<br>
+<strong>GESTURE_SOURCE_TOUCH</strong> = 'TOUCH'<br>
+<strong>SUPPORTED_GESTURE_SOURCES</strong> = ('DEFAULT', 'MOUSE', 'TOUCH')</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.pinch.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.pinch.html
new file mode 100644
index 0000000..c046f17
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.pinch.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.pinch</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.pinch</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/pinch.py">telemetry/internal/actions/pinch.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.pinch.html#PinchAction">PinchAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PinchAction">class <strong>PinchAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.pinch.html#PinchAction">PinchAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PinchAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="PinchAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="PinchAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_anchor_ratio<font color="#909090">=0.5</font>, top_anchor_ratio<font color="#909090">=0.5</font>, scale_factor<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="PinchAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.play.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.play.html
new file mode 100644
index 0000000..c810155
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.play.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.play</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.play</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/play.py">telemetry/internal/actions/play.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;Telemetry&nbsp;page_action&nbsp;that&nbsp;performs&nbsp;the&nbsp;"play"&nbsp;action&nbsp;on&nbsp;media&nbsp;elements.<br>
+&nbsp;<br>
+Media&nbsp;elements&nbsp;can&nbsp;be&nbsp;specified&nbsp;by&nbsp;a&nbsp;selector&nbsp;argument.&nbsp;If&nbsp;no&nbsp;selector&nbsp;is<br>
+defined&nbsp;then&nbsp;then&nbsp;the&nbsp;action&nbsp;attempts&nbsp;to&nbsp;play&nbsp;the&nbsp;first&nbsp;video&nbsp;element&nbsp;or&nbsp;audio<br>
+element&nbsp;on&nbsp;the&nbsp;page.&nbsp;A&nbsp;selector&nbsp;can&nbsp;also&nbsp;be&nbsp;'all'&nbsp;to&nbsp;play&nbsp;all&nbsp;media&nbsp;elements.<br>
+&nbsp;<br>
+Other&nbsp;arguments&nbsp;to&nbsp;use&nbsp;are:&nbsp;playing_event_timeout_in_seconds&nbsp;and<br>
+ended_event_timeout_in_seconds,&nbsp;which&nbsp;forces&nbsp;the&nbsp;action&nbsp;to&nbsp;wait&nbsp;until<br>
+playing&nbsp;and&nbsp;ended&nbsp;events&nbsp;get&nbsp;fired&nbsp;respectively.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.media_action.html">telemetry.internal.actions.media_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.play.html#PlayAction">PlayAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PlayAction">class <strong>PlayAction</strong></a>(<a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.play.html#PlayAction">PlayAction</a></dd>
+<dd><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PlayAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="PlayAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Load&nbsp;the&nbsp;media&nbsp;metrics&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="PlayAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, playing_event_timeout_in_seconds<font color="#909090">=0</font>, ended_event_timeout_in_seconds<font color="#909090">=0</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>:<br>
+<dl><dt><a name="PlayAction-HasEventCompletedOrError"><strong>HasEventCompletedOrError</strong></a>(self, tab, selector, event_name)</dt></dl>
+
+<dl><dt><a name="PlayAction-LoadJS"><strong>LoadJS</strong></a>(self, tab, js_file_name)</dt><dd><tt>Loads&nbsp;and&nbsp;executes&nbsp;a&nbsp;JS&nbsp;file&nbsp;in&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="PlayAction-WaitForEvent"><strong>WaitForEvent</strong></a>(self, tab, selector, event_name, timeout_in_seconds)</dt><dd><tt>Halts&nbsp;media&nbsp;action&nbsp;until&nbsp;the&nbsp;selector's&nbsp;event&nbsp;is&nbsp;fired.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;The&nbsp;tab&nbsp;to&nbsp;check&nbsp;for&nbsp;event&nbsp;on.<br>
+&nbsp;&nbsp;selector:&nbsp;Media&nbsp;element&nbsp;selector.<br>
+&nbsp;&nbsp;event_name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;check&nbsp;if&nbsp;fired&nbsp;or&nbsp;not.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;check&nbsp;for&nbsp;event,&nbsp;throws&nbsp;an&nbsp;exception&nbsp;if<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;not&nbsp;fired.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="PlayAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repaint_continuously.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repaint_continuously.html
new file mode 100644
index 0000000..4a1e7dc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repaint_continuously.html
@@ -0,0 +1,79 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.repaint_continuously</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.repaint_continuously</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/repaint_continuously.py">telemetry/internal/actions/repaint_continuously.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.repaint_continuously.html#RepaintContinuouslyAction">RepaintContinuouslyAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RepaintContinuouslyAction">class <strong>RepaintContinuouslyAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Continuously&nbsp;repaints&nbsp;the&nbsp;visible&nbsp;content&nbsp;by&nbsp;requesting&nbsp;animation&nbsp;frames<br>
+until&nbsp;self.<strong>seconds</strong>&nbsp;have&nbsp;elapsed&nbsp;AND&nbsp;at&nbsp;least&nbsp;three&nbsp;RAFs&nbsp;have&nbsp;been&nbsp;fired.&nbsp;Times<br>
+out&nbsp;after&nbsp;max(60,&nbsp;self.<strong>seconds</strong>),&nbsp;if&nbsp;less&nbsp;than&nbsp;three&nbsp;RAFs&nbsp;were&nbsp;fired.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.repaint_continuously.html#RepaintContinuouslyAction">RepaintContinuouslyAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="RepaintContinuouslyAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="RepaintContinuouslyAction-__init__"><strong>__init__</strong></a>(self, seconds)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="RepaintContinuouslyAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="RepaintContinuouslyAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;action-specific&nbsp;setup&nbsp;before<br>
+Test.WillRunAction&nbsp;is&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repeatable_scroll.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repeatable_scroll.html
new file mode 100644
index 0000000..22179ba
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.repeatable_scroll.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.repeatable_scroll</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.repeatable_scroll</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/repeatable_scroll.py">telemetry/internal/actions/repeatable_scroll.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_interaction_record.html">telemetry.web_perf.timeline_interaction_record</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.repeatable_scroll.html#RepeatableScrollAction">RepeatableScrollAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RepeatableScrollAction">class <strong>RepeatableScrollAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.repeatable_scroll.html#RepeatableScrollAction">RepeatableScrollAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="RepeatableScrollAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="RepeatableScrollAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="RepeatableScrollAction-__init__"><strong>__init__</strong></a>(self, x_scroll_distance_ratio<font color="#909090">=0.0</font>, y_scroll_distance_ratio<font color="#909090">=0.5</font>, repeat_count<font color="#909090">=0</font>, repeat_delay_ms<font color="#909090">=250</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="RepeatableScrollAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll.html
new file mode 100644
index 0000000..4240094
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll.html
@@ -0,0 +1,74 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.scroll</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.scroll</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/scroll.py">telemetry/internal/actions/scroll.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.scroll.html#ScrollAction">ScrollAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ScrollAction">class <strong>ScrollAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.scroll.html#ScrollAction">ScrollAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ScrollAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ScrollAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ScrollAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=None</font>, distance_expr<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, use_touch<font color="#909090">=False</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt><dd><tt>#&nbsp;TODO(chrishenry):&nbsp;Ignore&nbsp;attributes,&nbsp;to&nbsp;be&nbsp;deleted&nbsp;when&nbsp;usage&nbsp;in<br>
+#&nbsp;other&nbsp;repo&nbsp;is&nbsp;cleaned&nbsp;up.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="ScrollAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll_bounce.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll_bounce.html
new file mode 100644
index 0000000..bb44b14
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.scroll_bounce.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.scroll_bounce</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.scroll_bounce</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/scroll_bounce.py">telemetry/internal/actions/scroll_bounce.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.scroll_bounce.html#ScrollBounceAction">ScrollBounceAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ScrollBounceAction">class <strong>ScrollBounceAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.scroll_bounce.html#ScrollBounceAction">ScrollBounceAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ScrollBounceAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ScrollBounceAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="ScrollBounceAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=100</font>, overscroll<font color="#909090">=10</font>, repeat_count<font color="#909090">=10</font>, speed_in_pixels_per_second<font color="#909090">=400</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="ScrollBounceAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.seek.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.seek.html
new file mode 100644
index 0000000..42b7d31
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.seek.html
@@ -0,0 +1,100 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.seek</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.seek</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/seek.py">telemetry/internal/actions/seek.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;Telemetry&nbsp;page_action&nbsp;that&nbsp;performs&nbsp;the&nbsp;"seek"&nbsp;action&nbsp;on&nbsp;media&nbsp;elements.<br>
+&nbsp;<br>
+Action&nbsp;parameters&nbsp;are:<br>
+-&nbsp;seconds:&nbsp;The&nbsp;media&nbsp;time&nbsp;to&nbsp;seek&nbsp;to.&nbsp;Test&nbsp;fails&nbsp;if&nbsp;not&nbsp;provided.<br>
+-&nbsp;selector:&nbsp;If&nbsp;no&nbsp;selector&nbsp;is&nbsp;defined&nbsp;then&nbsp;the&nbsp;action&nbsp;attempts&nbsp;to&nbsp;seek&nbsp;the&nbsp;first<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;'all'&nbsp;then&nbsp;seek&nbsp;all&nbsp;media&nbsp;elements.<br>
+-&nbsp;timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;"seeked"&nbsp;event<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(dispatched&nbsp;when&nbsp;the&nbsp;seeked&nbsp;operation&nbsp;completes)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;fired.&nbsp;&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.<br>
+-&nbsp;log_time:&nbsp;If&nbsp;true&nbsp;the&nbsp;seek&nbsp;time&nbsp;is&nbsp;recorded,&nbsp;otherwise&nbsp;media<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;measurement&nbsp;will&nbsp;not&nbsp;be&nbsp;aware&nbsp;of&nbsp;the&nbsp;seek&nbsp;action.&nbsp;Used&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;perform&nbsp;multiple&nbsp;seeks.&nbsp;Default&nbsp;true.<br>
+-&nbsp;label:&nbsp;A&nbsp;suffix&nbsp;string&nbsp;to&nbsp;name&nbsp;the&nbsp;seek&nbsp;perf&nbsp;measurement.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.media_action.html">telemetry.internal.actions.media_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.seek.html#SeekAction">SeekAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SeekAction">class <strong>SeekAction</strong></a>(<a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.seek.html#SeekAction">SeekAction</a></dd>
+<dd><a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SeekAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="SeekAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Load&nbsp;the&nbsp;media&nbsp;metrics&nbsp;JS&nbsp;code&nbsp;prior&nbsp;to&nbsp;running&nbsp;the&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="SeekAction-__init__"><strong>__init__</strong></a>(self, seconds, selector<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=0</font>, log_time<font color="#909090">=True</font>, label<font color="#909090">=''</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.media_action.html#MediaAction">telemetry.internal.actions.media_action.MediaAction</a>:<br>
+<dl><dt><a name="SeekAction-HasEventCompletedOrError"><strong>HasEventCompletedOrError</strong></a>(self, tab, selector, event_name)</dt></dl>
+
+<dl><dt><a name="SeekAction-LoadJS"><strong>LoadJS</strong></a>(self, tab, js_file_name)</dt><dd><tt>Loads&nbsp;and&nbsp;executes&nbsp;a&nbsp;JS&nbsp;file&nbsp;in&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="SeekAction-WaitForEvent"><strong>WaitForEvent</strong></a>(self, tab, selector, event_name, timeout_in_seconds)</dt><dd><tt>Halts&nbsp;media&nbsp;action&nbsp;until&nbsp;the&nbsp;selector's&nbsp;event&nbsp;is&nbsp;fired.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;tab:&nbsp;The&nbsp;tab&nbsp;to&nbsp;check&nbsp;for&nbsp;event&nbsp;on.<br>
+&nbsp;&nbsp;selector:&nbsp;Media&nbsp;element&nbsp;selector.<br>
+&nbsp;&nbsp;event_name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;check&nbsp;if&nbsp;fired&nbsp;or&nbsp;not.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Timeout&nbsp;to&nbsp;check&nbsp;for&nbsp;event,&nbsp;throws&nbsp;an&nbsp;exception&nbsp;if<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;not&nbsp;fired.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="SeekAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.swipe.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.swipe.html
new file mode 100644
index 0000000..fe542d5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.swipe.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.swipe</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.swipe</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/swipe.py">telemetry/internal/actions/swipe.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.swipe.html#SwipeAction">SwipeAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SwipeAction">class <strong>SwipeAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.swipe.html#SwipeAction">SwipeAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SwipeAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="SwipeAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="SwipeAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='left'</font>, distance<font color="#909090">=100</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="SwipeAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.tap.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.tap.html
new file mode 100644
index 0000000..166ac78
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.tap.html
@@ -0,0 +1,75 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.tap</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.tap</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/tap.py">telemetry/internal/actions/tap.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.tap.html#TapAction">TapAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TapAction">class <strong>TapAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.tap.html#TapAction">TapAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TapAction-HasElementSelector"><strong>HasElementSelector</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TapAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="TapAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="TapAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_position_percentage<font color="#909090">=0.5</font>, top_position_percentage<font color="#909090">=0.5</font>, duration_ms<font color="#909090">=50</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="TapAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.actions.wait.html b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.wait.html
new file mode 100644
index 0000000..6294c76
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.actions.wait.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.actions.wait</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.actions.html"><font color="#ffffff">actions</font></a>.wait</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/actions/wait.py">telemetry/internal/actions/wait.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.actions.wait.html#WaitForElementAction">WaitForElementAction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WaitForElementAction">class <strong>WaitForElementAction</strong></a>(<a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.actions.wait.html#WaitForElementAction">WaitForElementAction</a></dd>
+<dd><a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WaitForElementAction-RunAction"><strong>RunAction</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="WaitForElementAction-__init__"><strong>__init__</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=60</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><a name="WaitForElementAction-CleanUp"><strong>CleanUp</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="WaitForElementAction-WillRunAction"><strong>WillRunAction</strong></a>(self, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;action-specific&nbsp;setup&nbsp;before<br>
+Test.WillRunAction&nbsp;is&nbsp;called.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.actions.page_action.html#PageAction">telemetry.internal.actions.page_action.PageAction</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_app.html b/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_app.html
new file mode 100644
index 0000000..4aa1e0d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_app.html
@@ -0,0 +1,93 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.app.android_app</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.app.html"><font color="#ffffff">app</font></a>.android_app</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/app/android_app.py">telemetry/internal/app/android_app.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.app.html">telemetry.internal.app</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.android_app.html#AndroidApp">AndroidApp</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidApp">class <strong>AndroidApp</strong></a>(<a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;running&nbsp;android&nbsp;app&nbsp;instance&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;in&nbsp;a&nbsp;limited&nbsp;way.<br>
+&nbsp;<br>
+Be&nbsp;sure&nbsp;to&nbsp;clean&nbsp;up&nbsp;after&nbsp;yourself&nbsp;by&nbsp;calling&nbsp;<a href="#AndroidApp-Close">Close</a>()&nbsp;when&nbsp;you&nbsp;are&nbsp;done&nbsp;with<br>
+the&nbsp;app.&nbsp;Or&nbsp;better&nbsp;yet:<br>
+&nbsp;&nbsp;with&nbsp;possible_android_app.Create(options)&nbsp;as&nbsp;android_app:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;...&nbsp;do&nbsp;all&nbsp;your&nbsp;operations&nbsp;on&nbsp;android_app&nbsp;here<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.app.android_app.html#AndroidApp">AndroidApp</a></dd>
+<dd><a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidApp-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidApp-GetProcess"><strong>GetProcess</strong></a>(self, subprocess_name)</dt><dd><tt>Returns&nbsp;the&nbsp;process&nbsp;with&nbsp;the&nbsp;specified&nbsp;subprocess&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="AndroidApp-GetProcesses"><strong>GetProcesses</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;current&nbsp;set&nbsp;of&nbsp;processes&nbsp;belonging&nbsp;to&nbsp;this&nbsp;app.</tt></dd></dl>
+
+<dl><dt><a name="AndroidApp-GetWebViews"><strong>GetWebViews</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;all&nbsp;WebViews&nbsp;belonging&nbsp;to&nbsp;all&nbsp;processes&nbsp;of&nbsp;the&nbsp;app.</tt></dd></dl>
+
+<dl><dt><a name="AndroidApp-__init__"><strong>__init__</strong></a>(self, app_backend, platform_backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><a name="AndroidApp-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidApp-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidApp-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidApp-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_process.html b/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_process.html
new file mode 100644
index 0000000..993c2d5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.app.android_process.html
@@ -0,0 +1,132 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.app.android_process</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.app.html"><font color="#ffffff">app</font></a>.android_process</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/app/android_process.py">telemetry/internal/app/android_process.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html">telemetry.internal.backends.chrome_inspector.devtools_client_backend</a><br>
+</td><td width="25%" valign=top><a href="devil.android.ports.html">devil.android.ports</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.android_process.html#AndroidProcess">AndroidProcess</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.android_process.html#WebViewNotFoundException">WebViewNotFoundException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidProcess">class <strong>AndroidProcess</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;single&nbsp;android&nbsp;process.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AndroidProcess-GetWebViews"><strong>GetWebViews</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidProcess-__init__"><strong>__init__</strong></a>(self, app_backend, pid, name)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebViewNotFoundException">class <strong>WebViewNotFoundException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.app.android_process.html#WebViewNotFoundException">WebViewNotFoundException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="WebViewNotFoundException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#WebViewNotFoundException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="WebViewNotFoundException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#WebViewNotFoundException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="WebViewNotFoundException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.app.html b/catapult/telemetry/docs/pydoc/telemetry.internal.app.html
new file mode 100644
index 0000000..460b3ff
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.app.html
@@ -0,0 +1,82 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.app</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.app</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/app/__init__.py">telemetry/internal/app/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.app.android_app.html">android_app</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.app.android_app_unittest.html">android_app_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.app.android_process.html">android_process</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.app.possible_app.html">possible_app</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.html#App">App</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="App">class <strong>App</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;running&nbsp;application&nbsp;instance&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;in&nbsp;a&nbsp;limited&nbsp;way.<br>
+&nbsp;<br>
+Be&nbsp;sure&nbsp;to&nbsp;clean&nbsp;up&nbsp;after&nbsp;yourself&nbsp;by&nbsp;calling&nbsp;<a href="#App-Close">Close</a>()&nbsp;when&nbsp;you&nbsp;are&nbsp;done&nbsp;with<br>
+the&nbsp;app.&nbsp;Or&nbsp;better&nbsp;yet:<br>
+&nbsp;&nbsp;with&nbsp;possible_app.Create(options)&nbsp;as&nbsp;app:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;...&nbsp;do&nbsp;all&nbsp;your&nbsp;operations&nbsp;on&nbsp;app&nbsp;here<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="App-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="App-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="App-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="App-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="App-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="App-__init__"><strong>__init__</strong></a>(self, app_backend, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.app.possible_app.html b/catapult/telemetry/docs/pydoc/telemetry.internal.app.possible_app.html
new file mode 100644
index 0000000..da762aa
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.app.possible_app.html
@@ -0,0 +1,67 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.app.possible_app</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.app.html"><font color="#ffffff">app</font></a>.possible_app</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/app/possible_app.py">telemetry/internal/app/possible_app.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.possible_app.html#PossibleApp">PossibleApp</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleApp">class <strong>PossibleApp</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;factory&nbsp;class&nbsp;that&nbsp;can&nbsp;be&nbsp;used&nbsp;to&nbsp;create&nbsp;a&nbsp;running&nbsp;instance&nbsp;of&nbsp;app.<br>
+&nbsp;<br>
+Call&nbsp;<a href="#PossibleApp-Create">Create</a>()&nbsp;to&nbsp;launch&nbsp;the&nbsp;app&nbsp;and&nbsp;begin&nbsp;manipulating&nbsp;it.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PossibleApp-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleApp-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt><dd><tt>Tests&nbsp;for&nbsp;extension&nbsp;support.</tt></dd></dl>
+
+<dl><dt><a name="PossibleApp-__init__"><strong>__init__</strong></a>(self, app_type, target_os)</dt></dl>
+
+<dl><dt><a name="PossibleApp-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_app_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_app_backend.html
new file mode 100644
index 0000000..d2c2a06
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_app_backend.html
@@ -0,0 +1,111 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.android_app_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.android_app_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/android_app_backend.py">telemetry/internal/backends/android_app_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.android_browser_backend_settings.html">telemetry.internal.backends.android_browser_backend_settings</a><br>
+<a href="telemetry.internal.backends.android_command_line_backend.html">telemetry.internal.backends.android_command_line_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.app.android_process.html">telemetry.internal.app.android_process</a><br>
+<a href="telemetry.internal.backends.app_backend.html">telemetry.internal.backends.app_backend</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_app_backend.html#AndroidAppBackend">AndroidAppBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidAppBackend">class <strong>AndroidAppBackend</strong></a>(<a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.android_app_backend.html#AndroidAppBackend">AndroidAppBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidAppBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetProcess"><strong>GetProcess</strong></a>(self, subprocess_name)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetProcesses"><strong>GetProcesses</strong></a>(self, process_filter<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetWebViews"><strong>GetWebViews</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-GetWebviewStartupArgs"><strong>GetWebviewStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-Start"><strong>Start</strong></a>(self)</dt><dd><tt>Start&nbsp;an&nbsp;Android&nbsp;app&nbsp;and&nbsp;wait&nbsp;for&nbsp;it&nbsp;to&nbsp;finish&nbsp;launching.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;app&nbsp;has&nbsp;webviews,&nbsp;the&nbsp;app&nbsp;is&nbsp;launched&nbsp;with&nbsp;the&nbsp;suitable<br>
+command&nbsp;line&nbsp;arguments.<br>
+&nbsp;<br>
+AppStory&nbsp;derivations&nbsp;can&nbsp;customize&nbsp;the&nbsp;wait-for-ready-state&nbsp;to&nbsp;wait<br>
+for&nbsp;a&nbsp;more&nbsp;specific&nbsp;event&nbsp;if&nbsp;needed.</tt></dd></dl>
+
+<dl><dt><a name="AndroidAppBackend-__init__"><strong>__init__</strong></a>(self, android_platform_backend, start_intent, is_app_ready_predicate<font color="#909090">=None</font>, app_has_webviews<font color="#909090">=True</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>device</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="AndroidAppBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="AndroidAppBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_browser_backend_settings.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_browser_backend_settings.html
new file mode 100644
index 0000000..771d1e0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_browser_backend_settings.html
@@ -0,0 +1,247 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.android_browser_backend_settings</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.android_browser_backend_settings</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/android_browser_backend_settings.py">telemetry/internal/backends/android_browser_backend_settings.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_browser_backend_settings.html#ChromeBackendSettings">ChromeBackendSettings</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_browser_backend_settings.html#ContentShellBackendSettings">ContentShellBackendSettings</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewBackendSettings">WebviewBackendSettings</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewShellBackendSettings">WebviewShellBackendSettings</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidBrowserBackendSettings">class <strong>AndroidBrowserBackendSettings</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AndroidBrowserBackendSettings-GetCommandLineFile"><strong>GetCommandLineFile</strong></a>(self, is_user_debug_build)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackendSettings-GetDevtoolsRemotePort"><strong>GetDevtoolsRemotePort</strong></a>(self, device)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackendSettings-__init__"><strong>__init__</strong></a>(self, activity, cmdline_file, package, pseudo_exec_name, supports_tab_control)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>profile_ignore_list</strong></dt>
+</dl>
+<dl><dt><strong>pseudo_exec_name</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeBackendSettings">class <strong>ChromeBackendSettings</strong></a>(<a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#ChromeBackendSettings">ChromeBackendSettings</a></dd>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ChromeBackendSettings-GetCommandLineFile"><strong>GetCommandLineFile</strong></a>(self, is_user_debug_build)</dt></dl>
+
+<dl><dt><a name="ChromeBackendSettings-GetDevtoolsRemotePort"><strong>GetDevtoolsRemotePort</strong></a>(self, device)</dt></dl>
+
+<dl><dt><a name="ChromeBackendSettings-__init__"><strong>__init__</strong></a>(self, package)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>profile_ignore_list</strong></dt>
+</dl>
+<dl><dt><strong>pseudo_exec_name</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ContentShellBackendSettings">class <strong>ContentShellBackendSettings</strong></a>(<a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#ContentShellBackendSettings">ContentShellBackendSettings</a></dd>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ContentShellBackendSettings-GetDevtoolsRemotePort"><strong>GetDevtoolsRemotePort</strong></a>(self, device)</dt></dl>
+
+<dl><dt><a name="ContentShellBackendSettings-__init__"><strong>__init__</strong></a>(self, package)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><a name="ContentShellBackendSettings-GetCommandLineFile"><strong>GetCommandLineFile</strong></a>(self, is_user_debug_build)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>profile_ignore_list</strong></dt>
+</dl>
+<dl><dt><strong>pseudo_exec_name</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebviewBackendSettings">class <strong>WebviewBackendSettings</strong></a>(<a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewBackendSettings">WebviewBackendSettings</a></dd>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WebviewBackendSettings-GetDevtoolsRemotePort"><strong>GetDevtoolsRemotePort</strong></a>(self, device)</dt></dl>
+
+<dl><dt><a name="WebviewBackendSettings-__init__"><strong>__init__</strong></a>(self, package, activity<font color="#909090">='org.chromium.webview_shell.TelemetryActivity'</font>, cmdline_file<font color="#909090">='/data/local/tmp/webview-command-line'</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><a name="WebviewBackendSettings-GetCommandLineFile"><strong>GetCommandLineFile</strong></a>(self, is_user_debug_build)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>profile_ignore_list</strong></dt>
+</dl>
+<dl><dt><strong>pseudo_exec_name</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebviewShellBackendSettings">class <strong>WebviewShellBackendSettings</strong></a>(<a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewBackendSettings">WebviewBackendSettings</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewShellBackendSettings">WebviewShellBackendSettings</a></dd>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewBackendSettings">WebviewBackendSettings</a></dd>
+<dd><a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WebviewShellBackendSettings-__init__"><strong>__init__</strong></a>(self, package)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#WebviewBackendSettings">WebviewBackendSettings</a>:<br>
+<dl><dt><a name="WebviewShellBackendSettings-GetDevtoolsRemotePort"><strong>GetDevtoolsRemotePort</strong></a>(self, device)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><a name="WebviewShellBackendSettings-GetCommandLineFile"><strong>GetCommandLineFile</strong></a>(self, is_user_debug_build)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.android_browser_backend_settings.html#AndroidBrowserBackendSettings">AndroidBrowserBackendSettings</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>profile_ignore_list</strong></dt>
+</dl>
+<dl><dt><strong>pseudo_exec_name</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_command_line_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_command_line_backend.html
new file mode 100644
index 0000000..acb7d5d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.android_command_line_backend.html
@@ -0,0 +1,74 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.android_command_line_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.android_command_line_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/android_command_line_backend.py">telemetry/internal/backends/android_command_line_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="pipes.html">pipes</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.android_command_line_backend.html#SetUpCommandLineFlags">SetUpCommandLineFlags</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SetUpCommandLineFlags">class <strong>SetUpCommandLineFlags</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;context&nbsp;manager&nbsp;for&nbsp;setting&nbsp;up&nbsp;the&nbsp;android&nbsp;command&nbsp;line&nbsp;flags.<br>
+&nbsp;<br>
+This&nbsp;provides&nbsp;a&nbsp;readable&nbsp;way&nbsp;of&nbsp;using&nbsp;the&nbsp;android&nbsp;command&nbsp;line&nbsp;backend&nbsp;class.<br>
+Example&nbsp;usage:<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;android_command_line_backend.<a href="#SetUpCommandLineFlags">SetUpCommandLineFlags</a>(<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;device,&nbsp;backend_settings,&nbsp;startup_args):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Something&nbsp;to&nbsp;run&nbsp;while&nbsp;the&nbsp;command&nbsp;line&nbsp;flags&nbsp;are&nbsp;set&nbsp;appropriately.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SetUpCommandLineFlags-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SetUpCommandLineFlags-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="SetUpCommandLineFlags-__init__"><strong>__init__</strong></a>(self, device, backend_settings, startup_args)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.app_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.app_backend.html
new file mode 100644
index 0000000..cfa3b69
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.app_backend.html
@@ -0,0 +1,72 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.app_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.app_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/app_backend.py">telemetry/internal/backends/app_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.app_backend.html#AppBackend">AppBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AppBackend">class <strong>AppBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AppBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="AppBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AppBackend-__init__"><strong>__init__</strong></a>(self, app_type, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.browser_backend.html
new file mode 100644
index 0000000..1555523
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.browser_backend.html
@@ -0,0 +1,218 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/browser_backend.py">telemetry/internal/backends/browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.app_backend.html">telemetry.internal.backends.app_backend</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+</td><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiling_controller_backend.html">telemetry.internal.platform.profiling_controller_backend</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="uuid.html">uuid</a><br>
+<a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.browser_backend.html#ExtensionsNotSupportedException">ExtensionsNotSupportedException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">BrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserBackend">class <strong>BrowserBackend</strong></a>(<a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;base&nbsp;class&nbsp;for&nbsp;browser&nbsp;backends.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="BrowserBackend-__init__"><strong>__init__</strong></a>(self, platform_backend, supports_extensions, browser_options, tab_list_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="BrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="BrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionsNotSupportedException">class <strong>ExtensionsNotSupportedException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.browser_backend.html#ExtensionsNotSupportedException">ExtensionsNotSupportedException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ExtensionsNotSupportedException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ExtensionsNotSupportedException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ExtensionsNotSupportedException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionsNotSupportedException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ExtensionsNotSupportedException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_backend.html
new file mode 100644
index 0000000..1a6ac27
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_backend.html
@@ -0,0 +1,194 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.android_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.android_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/android_browser_backend.py">telemetry/internal/backends/chrome/android_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.android_command_line_backend.html">telemetry.internal.backends.android_command_line_backend</a><br>
+<a href="telemetry.internal.platform.android_platform_backend.html">telemetry.internal.platform.android_platform_backend</a><br>
+<a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html">telemetry.internal.backends.chrome.chrome_browser_backend</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+</td><td width="25%" valign=top><a href="devil.android.sdk.intent.html">devil.android.sdk.intent</a><br>
+<a href="logging.html">logging</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.android_browser_backend.html#AndroidBrowserBackend">AndroidBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidBrowserBackend">class <strong>AndroidBrowserBackend</strong></a>(<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;backend&nbsp;for&nbsp;controlling&nbsp;a&nbsp;browser&nbsp;instance&nbsp;running&nbsp;on&nbsp;Android.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.android_browser_backend.html#AndroidBrowserBackend">AndroidBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-__init__"><strong>__init__</strong></a>(self, android_platform_backend, browser_options, backend_settings, output_profile_path, extensions_to_load, target_arch)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>device</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><a name="AndroidBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>extension_backend</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="AndroidBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="AndroidBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="AndroidBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_finder.html
new file mode 100644
index 0000000..f8020b1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.android_browser_finder.html
@@ -0,0 +1,132 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.android_browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.android_browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/android_browser_finder.py">telemetry/internal/backends/chrome/android_browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;android&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_backend.html">telemetry.internal.backends.chrome.android_browser_backend</a><br>
+<a href="telemetry.internal.backends.android_browser_backend_settings.html">telemetry.internal.backends.android_browser_backend_settings</a><br>
+<a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="devil.android.apk_helper.html">devil.android.apk_helper</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.android_browser_finder.html#PossibleAndroidBrowser">PossibleAndroidBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleAndroidBrowser">class <strong>PossibleAndroidBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;launchable&nbsp;android&nbsp;browser&nbsp;instance.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.android_browser_finder.html#PossibleAndroidBrowser">PossibleAndroidBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleAndroidBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-HaveLocalAPK"><strong>HaveLocalAPK</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, finder_options, android_platform, backend_settings, apk_name)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleAndroidBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-CanPossiblyHandlePath"><strong>CanPossiblyHandlePath</strong></a>(target_path)</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Finds&nbsp;all&nbsp;the&nbsp;possible&nbsp;browsers&nbsp;on&nbsp;one&nbsp;device.<br>
+&nbsp;<br>
+The&nbsp;device&nbsp;is&nbsp;either&nbsp;the&nbsp;only&nbsp;device&nbsp;on&nbsp;the&nbsp;host&nbsp;platform,<br>
+or&nbsp;|finder_options|&nbsp;specifies&nbsp;a&nbsp;particular&nbsp;device.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_options)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(possible_browsers)</dt><dd><tt>Return&nbsp;the&nbsp;newest&nbsp;possible&nbsp;browser.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>CHROME_PACKAGE_NAMES</strong> = {'android-chrome': ['com.google.android.apps.chrome', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, 'Chrome.apk'], 'android-chrome-beta': ['com.chrome.beta', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, None], 'android-chrome-canary': ['com.chrome.canary', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, None], 'android-chrome-dev': ['com.chrome.dev', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, None], 'android-chrome-work': ['com.chrome.work', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, None], 'android-chromium': ['org.chromium.chrome', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, 'ChromePublic.apk'], 'android-content-shell': ['org.chromium.content_shell_apk', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ContentShellBackendSettings'&gt;, 'ContentShell.apk'], 'android-jb-system-chrome': ['com.android.chrome', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.ChromeBackendSettings'&gt;, None], 'android-webview': ['org.chromium.webview_shell', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.WebviewBackendSettings'&gt;, None], 'android-webview-shell': ['org.chromium.android_webview.shell', &lt;class 'telemetry.internal.backends.android_browser_backend_settings.WebviewShellBackendSettings'&gt;, 'AndroidWebView.apk']}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.chrome_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.chrome_browser_backend.html
new file mode 100644
index 0000000..c599ca9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.chrome_browser_backend.html
@@ -0,0 +1,191 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.chrome_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.chrome_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/chrome_browser_backend.py">telemetry/internal/backends/chrome/chrome_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html">telemetry.internal.backends.chrome_inspector.devtools_client_backend</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.backends.chrome.extension_backend.html">telemetry.internal.backends.chrome.extension_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+<a href="logging.html">logging</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+<a href="pprint.html">pprint</a><br>
+<a href="shlex.html">shlex</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="telemetry.internal.backends.chrome.system_info_backend.html">telemetry.internal.backends.chrome.system_info_backend</a><br>
+<a href="telemetry.internal.backends.chrome.tab_list_backend.html">telemetry.internal.backends.chrome.tab_list_backend</a><br>
+<a href="telemetry.internal.browser.user_agent.html">telemetry.internal.browser.user_agent</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>(<a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">ChromeBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeBrowserBackend">class <strong>ChromeBrowserBackend</strong></a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;abstract&nbsp;class&nbsp;for&nbsp;chrome&nbsp;browser&nbsp;backends.&nbsp;Provides&nbsp;basic&nbsp;functionality<br>
+once&nbsp;a&nbsp;remote-debugger&nbsp;port&nbsp;has&nbsp;been&nbsp;established.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">ChromeBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ChromeBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-__init__"><strong>__init__</strong></a>(self, platform_backend, supports_tab_control, supports_extensions, browser_options, output_profile_path, extensions_to_load)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>extension_backend</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="ChromeBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="ChromeBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_backend.html
new file mode 100644
index 0000000..b10d981
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_backend.html
@@ -0,0 +1,191 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.cros_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.cros_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/cros_browser_backend.py">telemetry/internal/backends/chrome/cros_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html">telemetry.internal.backends.chrome.chrome_browser_backend</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.internal.backends.chrome.misc_web_contents_backend.html">telemetry.internal.backends.chrome.misc_web_contents_backend</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.cros_browser_backend.html#CrOSBrowserBackend">CrOSBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOSBrowserBackend">class <strong>CrOSBrowserBackend</strong></a>(<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.cros_browser_backend.html#CrOSBrowserBackend">CrOSBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOSBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-__init__"><strong>__init__</strong></a>(self, cros_platform_backend, browser_options, cri, is_guest, extensions_to_load)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>misc_web_contents_backend</strong></dt>
+<dd><tt>Access&nbsp;to&nbsp;chrome://oobe/login&nbsp;page.</tt></dd>
+</dl>
+<dl><dt><strong>oobe</strong></dt>
+</dl>
+<dl><dt><strong>oobe_exists</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><a name="CrOSBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>extension_backend</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="CrOSBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="CrOSBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_finder.html
new file mode 100644
index 0000000..aed268e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_finder.html
@@ -0,0 +1,115 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.cros_browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.cros_browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/cros_browser_finder.py">telemetry/internal/backends/chrome/cros_browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;CrOS&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">telemetry.internal.browser.browser_finder_exceptions</a><br>
+<a href="telemetry.internal.backends.chrome.cros_browser_backend.html">telemetry.internal.backends.chrome.cros_browser_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.cros_browser_with_oobe.html">telemetry.internal.backends.chrome.cros_browser_with_oobe</a><br>
+<a href="telemetry.internal.platform.cros_device.html">telemetry.internal.platform.cros_device</a><br>
+<a href="telemetry.core.cros_interface.html">telemetry.core.cros_interface</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.cros_browser_finder.html#PossibleCrOSBrowser">PossibleCrOSBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleCrOSBrowser">class <strong>PossibleCrOSBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;launchable&nbsp;CrOS&nbsp;browser&nbsp;instance.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.cros_browser_finder.html#PossibleCrOSBrowser">PossibleCrOSBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleCrOSBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, finder_options, cros_platform, is_guest)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleCrOSBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<dl><dt><a name="PossibleCrOSBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>(finder_options)</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Finds&nbsp;all&nbsp;available&nbsp;CrOS&nbsp;browsers,&nbsp;locally&nbsp;and&nbsp;remotely.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(possible_browsers)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_with_oobe.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_with_oobe.html
new file mode 100644
index 0000000..5565f52
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_browser_with_oobe.html
@@ -0,0 +1,183 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.cros_browser_with_oobe</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.cros_browser_with_oobe</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/cros_browser_with_oobe.py">telemetry/internal/backends/chrome/cros_browser_with_oobe.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.cros_browser_backend.html">telemetry.internal.backends.chrome.cros_browser_backend</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser.html#Browser">telemetry.internal.browser.browser.Browser</a>(<a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.cros_browser_with_oobe.html#CrOSBrowserWithOOBE">CrOSBrowserWithOOBE</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOSBrowserWithOOBE">class <strong>CrOSBrowserWithOOBE</strong></a>(<a href="telemetry.internal.browser.browser.html#Browser">telemetry.internal.browser.browser.Browser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Cros-specific&nbsp;browser.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.cros_browser_with_oobe.html#CrOSBrowserWithOOBE">CrOSBrowserWithOOBE</a></dd>
+<dd><a href="telemetry.internal.browser.browser.html#Browser">telemetry.internal.browser.browser.Browser</a></dd>
+<dd><a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOSBrowserWithOOBE-__init__"><strong>__init__</strong></a>(self, backend, platform_backend, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>oobe</strong></dt>
+<dd><tt>The&nbsp;login&nbsp;webui&nbsp;(also&nbsp;serves&nbsp;as&nbsp;ui&nbsp;for&nbsp;screenlock&nbsp;and<br>
+out-of-box-experience).</tt></dd>
+</dl>
+<dl><dt><strong>oobe_exists</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;the&nbsp;login/oobe/screenlock&nbsp;webui&nbsp;exists.&nbsp;This&nbsp;is&nbsp;more&nbsp;lightweight<br>
+than&nbsp;accessing&nbsp;the&nbsp;oobe&nbsp;property.</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.browser.html#Browser">telemetry.internal.browser.browser.Browser</a>:<br>
+<dl><dt><a name="CrOSBrowserWithOOBE-Close"><strong>Close</strong></a>(self)</dt><dd><tt>Closes&nbsp;this&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt><dd><tt>Returns&nbsp;low-level&nbsp;information&nbsp;about&nbsp;the&nbsp;system,&nbsp;if&nbsp;available.<br>
+&nbsp;<br>
+See&nbsp;the&nbsp;documentation&nbsp;of&nbsp;the&nbsp;SystemInfo&nbsp;class&nbsp;for&nbsp;more&nbsp;details.</tt></dd></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.browser.html#Browser">telemetry.internal.browser.browser.Browser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>cpu_stats</strong></dt>
+<dd><tt>Returns&nbsp;a&nbsp;dict&nbsp;of&nbsp;cpu&nbsp;statistics&nbsp;for&nbsp;the&nbsp;system.<br>
+{&nbsp;'Browser':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Gpu':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Renderer':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;}<br>
+}<br>
+Any&nbsp;of&nbsp;the&nbsp;above&nbsp;keys&nbsp;may&nbsp;be&nbsp;missing&nbsp;on&nbsp;a&nbsp;per-platform&nbsp;basis.</tt></dd>
+</dl>
+<dl><dt><strong>extensions</strong></dt>
+</dl>
+<dl><dt><strong>foreground_tab</strong></dt>
+</dl>
+<dl><dt><strong>memory_stats</strong></dt>
+<dd><tt>Returns&nbsp;a&nbsp;dict&nbsp;of&nbsp;memory&nbsp;statistics&nbsp;for&nbsp;the&nbsp;browser:<br>
+{&nbsp;'Browser':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Gpu':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Renderer':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'SystemCommitCharge':&nbsp;X,<br>
+&nbsp;&nbsp;'SystemTotalPhysicalMemory':&nbsp;Y,<br>
+&nbsp;&nbsp;'ProcessCount':&nbsp;Z,<br>
+}<br>
+Any&nbsp;of&nbsp;the&nbsp;above&nbsp;keys&nbsp;may&nbsp;be&nbsp;missing&nbsp;on&nbsp;a&nbsp;per-platform&nbsp;basis.</tt></dd>
+</dl>
+<dl><dt><strong>profiling_controller</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>tabs</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><a name="CrOSBrowserWithOOBE-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSBrowserWithOOBE-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_test_case.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_test_case.html
new file mode 100644
index 0000000..b12b369
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.cros_test_case.html
@@ -0,0 +1,339 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.cros_test_case</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.cros_test_case</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/cros_test_case.py">telemetry/internal/backends/chrome/cros_test_case.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.core.cros_interface.html">telemetry.core.cros_interface</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.extension_to_load.html">telemetry.internal.browser.extension_to_load</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.cros_test_case.html#CrOSTestCase">CrOSTestCase</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOSTestCase">class <strong>CrOSTestCase</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.cros_test_case.html#CrOSTestCase">CrOSTestCase</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOSTestCase-setUp"><strong>setUp</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="CrOSTestCase-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#CrOSTestCase-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#CrOSTestCase-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#CrOSTestCase-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#CrOSTestCase-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#CrOSTestCase-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#CrOSTestCase-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CrOSTestCase-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="CrOSTestCase-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;class&nbsp;fixture&nbsp;before&nbsp;running&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<dl><dt><a name="CrOSTestCase-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;class&nbsp;fixture&nbsp;after&nbsp;running&nbsp;all&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.crx_id.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.crx_id.html
new file mode 100644
index 0000000..f1b3cfe
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.crx_id.html
Binary files differ
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_backend.html
new file mode 100644
index 0000000..29637bb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_backend.html
@@ -0,0 +1,206 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.desktop_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.desktop_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/desktop_browser_backend.py">telemetry/internal/backends/chrome/desktop_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html">telemetry.internal.backends.chrome.chrome_browser_backend</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="datetime.html">datetime</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="glob.html">glob</a><br>
+<a href="heapq.html">heapq</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="random.html">random</a><br>
+<a href="re.html">re</a><br>
+<a href="shutil.html">shutil</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+<a href="time.html">time</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.desktop_browser_backend.html#DesktopBrowserBackend">DesktopBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DesktopBrowserBackend">class <strong>DesktopBrowserBackend</strong></a>(<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;backend&nbsp;for&nbsp;controlling&nbsp;a&nbsp;locally-executed&nbsp;browser&nbsp;instance,&nbsp;on&nbsp;Linux,<br>
+Mac&nbsp;or&nbsp;Windows.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.desktop_browser_backend.html#DesktopBrowserBackend">DesktopBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DesktopBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-__init__"><strong>__init__</strong></a>(self, desktop_platform_backend, browser_options, executable, flash_path, is_content_shell, browser_directory, output_profile_path, extensions_to_load)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><a name="DesktopBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>extension_backend</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="DesktopBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="DesktopBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="DesktopBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ParseCrashpadDateTime"><strong>ParseCrashpadDateTime</strong></a>(date_time_str)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_finder.html
new file mode 100644
index 0000000..124af91
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.desktop_browser_finder.html
@@ -0,0 +1,117 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.desktop_browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.desktop_browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/desktop_browser_finder.py">telemetry/internal/backends/chrome/desktop_browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;desktop&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="telemetry.internal.backends.chrome.desktop_browser_backend.html">telemetry.internal.backends.chrome.desktop_browser_backend</a><br>
+<a href="telemetry.internal.platform.desktop_device.html">telemetry.internal.platform.desktop_device</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.desktop_browser_finder.html#PossibleDesktopBrowser">PossibleDesktopBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleDesktopBrowser">class <strong>PossibleDesktopBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;desktop&nbsp;browser&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.desktop_browser_finder.html#PossibleDesktopBrowser">PossibleDesktopBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleDesktopBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, finder_options, executable, flash_path, is_content_shell, browser_directory, is_local_build<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleDesktopBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-CanPossiblyHandlePath"><strong>CanPossiblyHandlePath</strong></a>(target_path)</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Finds&nbsp;all&nbsp;the&nbsp;desktop&nbsp;browsers&nbsp;available&nbsp;on&nbsp;this&nbsp;machine.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(possible_browsers)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.extension_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.extension_backend.html
new file mode 100644
index 0000000..ca4bb7f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.extension_backend.html
@@ -0,0 +1,221 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.extension_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.extension_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/extension_backend.py">telemetry/internal/backends/chrome/extension_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.extension_page.html">telemetry.internal.browser.extension_page</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html">telemetry.internal.backends.chrome_inspector.inspector_backend_list</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="_abcoll.html#Mapping">_abcoll.Mapping</a>(<a href="_abcoll.html#Sized">_abcoll.Sized</a>, <a href="_abcoll.html#Iterable">_abcoll.Iterable</a>, <a href="_abcoll.html#Container">_abcoll.Container</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.extension_backend.html#ExtensionBackendDict">ExtensionBackendDict</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>(<a href="_abcoll.html#Sequence">_abcoll.Sequence</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.extension_backend.html#ExtensionBackendList">ExtensionBackendList</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionBackendDict">class <strong>ExtensionBackendDict</strong></a>(<a href="_abcoll.html#Mapping">_abcoll.Mapping</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;dynamic&nbsp;mapping&nbsp;of&nbsp;extension_id&nbsp;to&nbsp;extension_page.ExtensionPages.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.extension_backend.html#ExtensionBackendDict">ExtensionBackendDict</a></dd>
+<dd><a href="_abcoll.html#Mapping">_abcoll.Mapping</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ExtensionBackendDict-ContextIdToExtensionId"><strong>ContextIdToExtensionId</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__getitem__"><strong>__getitem__</strong></a>(self, extension_id)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__init__"><strong>__init__</strong></a>(self, browser_backend)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="_abcoll.html#Mapping">_abcoll.Mapping</a>:<br>
+<dl><dt><a name="ExtensionBackendDict-__contains__"><strong>__contains__</strong></a>(self, key)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendDict-get"><strong>get</strong></a>(self, key, default<font color="#909090">=None</font>)</dt><dd><tt>D.<a href="#ExtensionBackendDict-get">get</a>(k[,d])&nbsp;-&gt;&nbsp;D[k]&nbsp;if&nbsp;k&nbsp;in&nbsp;D,&nbsp;else&nbsp;d.&nbsp;&nbsp;d&nbsp;defaults&nbsp;to&nbsp;None.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-items"><strong>items</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-items">items</a>()&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;D's&nbsp;(key,&nbsp;value)&nbsp;pairs,&nbsp;as&nbsp;2-tuples</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-iteritems"><strong>iteritems</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-iteritems">iteritems</a>()&nbsp;-&gt;&nbsp;an&nbsp;iterator&nbsp;over&nbsp;the&nbsp;(key,&nbsp;value)&nbsp;items&nbsp;of&nbsp;D</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-iterkeys"><strong>iterkeys</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-iterkeys">iterkeys</a>()&nbsp;-&gt;&nbsp;an&nbsp;iterator&nbsp;over&nbsp;the&nbsp;keys&nbsp;of&nbsp;D</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-itervalues"><strong>itervalues</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-itervalues">itervalues</a>()&nbsp;-&gt;&nbsp;an&nbsp;iterator&nbsp;over&nbsp;the&nbsp;values&nbsp;of&nbsp;D</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-keys"><strong>keys</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-keys">keys</a>()&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;D's&nbsp;keys</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendDict-values"><strong>values</strong></a>(self)</dt><dd><tt>D.<a href="#ExtensionBackendDict-values">values</a>()&nbsp;-&gt;&nbsp;list&nbsp;of&nbsp;D's&nbsp;values</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Mapping">_abcoll.Mapping</a>:<br>
+<dl><dt><strong>__hash__</strong> = None</dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="ExtensionBackendDict-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionBackendList">class <strong>ExtensionBackendList</strong></a>(<a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;dynamic&nbsp;sequence&nbsp;of&nbsp;extension_page.ExtensionPages.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.extension_backend.html#ExtensionBackendList">ExtensionBackendList</a></dd>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a></dd>
+<dd><a href="_abcoll.html#Sequence">_abcoll.Sequence</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ExtensionBackendList-CreateWrapper"><strong>CreateWrapper</strong></a>(self, inspector_backend)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-ShouldIncludeContext"><strong>ShouldIncludeContext</strong></a>(self, context)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-__init__"><strong>__init__</strong></a>(self, browser_backend)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><a name="ExtensionBackendList-GetBackendFromContextId"><strong>GetBackendFromContextId</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-GetContextInfo"><strong>GetContextInfo</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-GetTabById"><strong>GetTabById</strong></a>(self, identifier)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-IterContextIds"><strong>IterContextIds</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-__getitem__"><strong>__getitem__</strong></a>(self, index)</dt><dd><tt>#&nbsp;TODO(nednguyen):&nbsp;Remove&nbsp;this&nbsp;method&nbsp;and&nbsp;turn&nbsp;inspector_backend_list&nbsp;API&nbsp;to<br>
+#&nbsp;dictionary-like&nbsp;API&nbsp;(crbug.com/398467)</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendList-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="_abcoll.html#Sequence">_abcoll.Sequence</a>:<br>
+<dl><dt><a name="ExtensionBackendList-__contains__"><strong>__contains__</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-__reversed__"><strong>__reversed__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExtensionBackendList-count"><strong>count</strong></a>(self, value)</dt><dd><tt>S.<a href="#ExtensionBackendList-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ExtensionBackendList-index"><strong>index</strong></a>(self, value)</dt><dd><tt>S.<a href="#ExtensionBackendList-index">index</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="ExtensionBackendList-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.html
new file mode 100644
index 0000000..3af1eec
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.html
@@ -0,0 +1,49 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.backends.chrome</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.chrome</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/__init__.py">telemetry/internal/backends/chrome/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_backend.html">android_browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome.android_browser_finder.html">android_browser_finder</a><br>
+<a href="telemetry.internal.backends.chrome.android_browser_finder_unittest.html">android_browser_finder_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html">chrome_browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome.chrome_browser_backend_unittest.html">chrome_browser_backend_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.cros_browser_backend.html">cros_browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome.cros_browser_finder.html">cros_browser_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.cros_browser_finder_unittest.html">cros_browser_finder_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.cros_browser_with_oobe.html">cros_browser_with_oobe</a><br>
+<a href="telemetry.internal.backends.chrome.cros_test_case.html">cros_test_case</a><br>
+<a href="telemetry.internal.backends.chrome.cros_unittest.html">cros_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.crx_id.html">crx_id</a><br>
+<a href="telemetry.internal.backends.chrome.crx_id_unittest.html">crx_id_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.desktop_browser_backend.html">desktop_browser_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.desktop_browser_finder.html">desktop_browser_finder</a><br>
+<a href="telemetry.internal.backends.chrome.desktop_browser_finder_unittest.html">desktop_browser_finder_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.extension_backend.html">extension_backend</a><br>
+<a href="telemetry.internal.backends.chrome.ios_browser_backend.html">ios_browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome.ios_browser_finder.html">ios_browser_finder</a><br>
+<a href="telemetry.internal.backends.chrome.ios_browser_finder_unittest.html">ios_browser_finder_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.misc_web_contents_backend.html">misc_web_contents_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.oobe.html">oobe</a><br>
+<a href="telemetry.internal.backends.chrome.system_info_backend.html">system_info_backend</a><br>
+<a href="telemetry.internal.backends.chrome.tab_list_backend.html">tab_list_backend</a><br>
+<a href="telemetry.internal.backends.chrome.tab_list_backend_unittest.html">tab_list_backend_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_backend.html
new file mode 100644
index 0000000..f798df7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_backend.html
@@ -0,0 +1,192 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.ios_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.ios_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/ios_browser_backend.py">telemetry/internal/backends/chrome/ios_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html">telemetry.internal.backends.chrome.chrome_browser_backend</a><br>
+<a href="contextlib.html">contextlib</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="logging.html">logging</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.system_info_backend.html">telemetry.internal.backends.chrome.system_info_backend</a><br>
+<a href="urllib2.html">urllib2</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.ios_browser_backend.html#IosBrowserBackend">IosBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IosBrowserBackend">class <strong>IosBrowserBackend</strong></a>(<a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.ios_browser_backend.html#IosBrowserBackend">IosBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="IosBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetDeviceUrls"><strong>GetDeviceUrls</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetWebSocketDebuggerUrls"><strong>GetWebSocketDebuggerUrls</strong></a>(self, device_urls)</dt><dd><tt>Get&nbsp;a&nbsp;list&nbsp;of&nbsp;the&nbsp;websocket&nbsp;debugger&nbsp;URLs&nbsp;to&nbsp;communicate&nbsp;with<br>
+all&nbsp;running&nbsp;UIWebViews.</tt></dd></dl>
+
+<dl><dt><a name="IosBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-UpdateRunningBrowsersInfo"><strong>UpdateRunningBrowsersInfo</strong></a>(self)</dt><dd><tt>Refresh&nbsp;to&nbsp;match&nbsp;current&nbsp;state&nbsp;of&nbsp;the&nbsp;running&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="IosBrowserBackend-__init__"><strong>__init__</strong></a>(self, ios_platform_backend, browser_options)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-extension_backend"><strong>extension_backend</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><a name="IosBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="IosBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="IosBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome.chrome_browser_backend.html#ChromeBrowserBackend">telemetry.internal.backends.chrome.chrome_browser_backend.ChromeBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="IosBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="IosBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="IosBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_finder.html
new file mode 100644
index 0000000..d8dc94c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.ios_browser_finder.html
@@ -0,0 +1,124 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.ios_browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.ios_browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/ios_browser_finder.py">telemetry/internal/backends/chrome/ios_browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;iOS&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_backend.html">telemetry.internal.backends.chrome_inspector.inspector_backend</a><br>
+<a href="telemetry.internal.backends.chrome.ios_browser_backend.html">telemetry.internal.backends.chrome.ios_browser_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.ios_device.html">telemetry.internal.platform.ios_device</a><br>
+<a href="telemetry.internal.platform.ios_platform_backend.html">telemetry.internal.platform.ios_platform_backend</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.ios_browser_finder.html#PossibleIOSBrowser">PossibleIOSBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleIOSBrowser">class <strong>PossibleIOSBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;running&nbsp;iOS&nbsp;browser&nbsp;instance.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.ios_browser_finder.html#PossibleIOSBrowser">PossibleIOSBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleIOSBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt><dd><tt>#&nbsp;TODO(baxley):&nbsp;Implement&nbsp;the&nbsp;following&nbsp;methods&nbsp;for&nbsp;iOS.</tt></dd></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, _)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleIOSBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleIOSBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Find&nbsp;all&nbsp;running&nbsp;iOS&nbsp;browsers&nbsp;on&nbsp;connected&nbsp;devices.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(_)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEVICE_LIST_URL</strong> = 'http://127.0.0.1:9221/json'<br>
+<strong>IOS_BROWSERS</strong> = {'CriOS': 'ios-chrome', 'Version': 'ios-safari'}<br>
+<strong>IOS_WEBKIT_DEBUG_PROXY</strong> = 'ios_webkit_debug_proxy'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.misc_web_contents_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.misc_web_contents_backend.html
new file mode 100644
index 0000000..b3066c7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.misc_web_contents_backend.html
@@ -0,0 +1,139 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.misc_web_contents_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.misc_web_contents_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/misc_web_contents_backend.py">telemetry/internal/backends/chrome/misc_web_contents_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html">telemetry.internal.backends.chrome_inspector.inspector_backend_list</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.oobe.html">telemetry.internal.backends.chrome.oobe</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>(<a href="_abcoll.html#Sequence">_abcoll.Sequence</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.misc_web_contents_backend.html#MiscWebContentsBackend">MiscWebContentsBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MiscWebContentsBackend">class <strong>MiscWebContentsBackend</strong></a>(<a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;dynamic&nbsp;sequence&nbsp;of&nbsp;web&nbsp;contents&nbsp;not&nbsp;related&nbsp;to&nbsp;tabs&nbsp;and&nbsp;extensions.<br>
+&nbsp;<br>
+Provides&nbsp;acccess&nbsp;to&nbsp;chrome://oobe/login&nbsp;page.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.misc_web_contents_backend.html#MiscWebContentsBackend">MiscWebContentsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a></dd>
+<dd><a href="_abcoll.html#Sequence">_abcoll.Sequence</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MiscWebContentsBackend-CreateWrapper"><strong>CreateWrapper</strong></a>(self, inspector_backend)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-GetOobe"><strong>GetOobe</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-ShouldIncludeContext"><strong>ShouldIncludeContext</strong></a>(self, context)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-__init__"><strong>__init__</strong></a>(self, browser_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>oobe_exists</strong></dt>
+<dd><tt>Lightweight&nbsp;property&nbsp;to&nbsp;determine&nbsp;if&nbsp;the&nbsp;oobe&nbsp;webui&nbsp;is&nbsp;visible.</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><a name="MiscWebContentsBackend-GetBackendFromContextId"><strong>GetBackendFromContextId</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-GetContextInfo"><strong>GetContextInfo</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-GetTabById"><strong>GetTabById</strong></a>(self, identifier)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-IterContextIds"><strong>IterContextIds</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-__getitem__"><strong>__getitem__</strong></a>(self, index)</dt><dd><tt>#&nbsp;TODO(nednguyen):&nbsp;Remove&nbsp;this&nbsp;method&nbsp;and&nbsp;turn&nbsp;inspector_backend_list&nbsp;API&nbsp;to<br>
+#&nbsp;dictionary-like&nbsp;API&nbsp;(crbug.com/398467)</tt></dd></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="_abcoll.html#Sequence">_abcoll.Sequence</a>:<br>
+<dl><dt><a name="MiscWebContentsBackend-__contains__"><strong>__contains__</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-__reversed__"><strong>__reversed__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-count"><strong>count</strong></a>(self, value)</dt><dd><tt>S.<a href="#MiscWebContentsBackend-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MiscWebContentsBackend-index"><strong>index</strong></a>(self, value)</dt><dd><tt>S.<a href="#MiscWebContentsBackend-index">index</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="MiscWebContentsBackend-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.oobe.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.oobe.html
new file mode 100644
index 0000000..57e61f2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.oobe.html
@@ -0,0 +1,246 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.oobe</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.oobe</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/oobe.py">telemetry/internal/backends/chrome/oobe.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.oobe.html#Oobe">Oobe</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Oobe">class <strong>Oobe</strong></a>(<a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.oobe.html#Oobe">Oobe</a></dd>
+<dd><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Oobe-NavigateFakeLogin"><strong>NavigateFakeLogin</strong></a>(self, username, password, gaia_id)</dt><dd><tt>Fake&nbsp;user&nbsp;login.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-NavigateGaiaLogin"><strong>NavigateGaiaLogin</strong></a>(self, username, password, enterprise_enroll<font color="#909090">=False</font>, for_user_triggered_enrollment<font color="#909090">=False</font>)</dt><dd><tt>Logs&nbsp;in&nbsp;using&nbsp;the&nbsp;GAIA&nbsp;webview&nbsp;or&nbsp;IFrame,&nbsp;whichever&nbsp;is<br>
+present.&nbsp;|enterprise_enroll|&nbsp;allows&nbsp;for&nbsp;enterprise&nbsp;enrollment.<br>
+|for_user_triggered_enrollment|&nbsp;should&nbsp;be&nbsp;False&nbsp;for&nbsp;remora&nbsp;enrollment.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-NavigateGuestLogin"><strong>NavigateGuestLogin</strong></a>(self)</dt><dd><tt>Logs&nbsp;in&nbsp;as&nbsp;guest.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-__init__"><strong>__init__</strong></a>(self, inspector_backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><a name="Oobe-CloseConnections"><strong>CloseConnections</strong></a>(self)</dt><dd><tt>Closes&nbsp;all&nbsp;TCP&nbsp;sockets&nbsp;held&nbsp;open&nbsp;by&nbsp;the&nbsp;browser.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException&nbsp;if&nbsp;the&nbsp;tab&nbsp;is&nbsp;not&nbsp;alive.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-EnableAllContexts"><strong>EnableAllContexts</strong></a>(self)</dt><dd><tt>Enable&nbsp;all&nbsp;contexts&nbsp;in&nbsp;a&nbsp;page.&nbsp;Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;available&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(self, expr, timeout<font color="#909090">=90</font>)</dt><dd><tt>Evalutes&nbsp;expr&nbsp;in&nbsp;JavaScript&nbsp;and&nbsp;returns&nbsp;the&nbsp;JSONized&nbsp;result.<br>
+&nbsp;<br>
+Consider&nbsp;using&nbsp;ExecuteJavaScript&nbsp;for&nbsp;cases&nbsp;where&nbsp;the&nbsp;result&nbsp;of&nbsp;the<br>
+expression&nbsp;is&nbsp;not&nbsp;needed.<br>
+&nbsp;<br>
+If&nbsp;evaluation&nbsp;throws&nbsp;in&nbsp;JavaScript,&nbsp;a&nbsp;Python&nbsp;EvaluateException&nbsp;will<br>
+be&nbsp;raised.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;result&nbsp;of&nbsp;the&nbsp;evaluation&nbsp;cannot&nbsp;be&nbsp;JSONized,&nbsp;then&nbsp;an<br>
+EvaluationException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-EvaluateJavaScriptInContext">EvaluateJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-EvaluateJavaScriptInContext"><strong>EvaluateJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(self, statement, timeout<font color="#909090">=90</font>)</dt><dd><tt>Executes&nbsp;statement&nbsp;in&nbsp;JavaScript.&nbsp;Does&nbsp;not&nbsp;return&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;statement&nbsp;failed&nbsp;to&nbsp;evaluate,&nbsp;EvaluateException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-ExecuteJavaScriptInContext">ExecuteJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-ExecuteJavaScriptInContext"><strong>ExecuteJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-GetUrl"><strong>GetUrl</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;to&nbsp;which&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;connected.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-GetWebviewContexts"><strong>GetWebviewContexts</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;webview&nbsp;contexts&nbsp;within&nbsp;the&nbsp;current&nbsp;inspector&nbsp;backend.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;objects&nbsp;representing&nbsp;the&nbsp;webview&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-HasReachedQuiescence"><strong>HasReachedQuiescence</strong></a>(self)</dt><dd><tt>Determine&nbsp;whether&nbsp;the&nbsp;page&nbsp;has&nbsp;reached&nbsp;quiescence&nbsp;after&nbsp;loading.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;2&nbsp;seconds&nbsp;have&nbsp;passed&nbsp;since&nbsp;last&nbsp;resource&nbsp;received,&nbsp;false<br>
+&nbsp;&nbsp;otherwise.<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-IsAlive"><strong>IsAlive</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;still&nbsp;operating&nbsp;normally.<br>
+&nbsp;<br>
+Since&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;function&nbsp;asynchronously,&nbsp;this&nbsp;method&nbsp;does&nbsp;not&nbsp;guarantee<br>
+that&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;will&nbsp;still&nbsp;be&nbsp;alive&nbsp;at&nbsp;any&nbsp;point&nbsp;in&nbsp;the&nbsp;future.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;opearting&nbsp;normally.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;url.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-StartTimelineRecording"><strong>StartTimelineRecording</strong></a>(self)</dt><dd><tt>Starts&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-StopTimelineRecording"><strong>StopTimelineRecording</strong></a>(self)</dt><dd><tt>Stops&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-SynthesizeScrollGesture"><strong>SynthesizeScrollGesture</strong></a>(self, x<font color="#909090">=100</font>, y<font color="#909090">=800</font>, xDistance<font color="#909090">=0</font>, yDistance<font color="#909090">=-500</font>, xOverscroll<font color="#909090">=None</font>, yOverscroll<font color="#909090">=None</font>, preventFling<font color="#909090">=True</font>, speed<font color="#909090">=None</font>, gestureSourceType<font color="#909090">=None</font>, repeatCount<font color="#909090">=None</font>, repeatDelayMs<font color="#909090">=None</font>, interactionMarkerName<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command&nbsp;that&nbsp;causes&nbsp;a&nbsp;repeatable&nbsp;browser&nbsp;driven&nbsp;scroll.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x:&nbsp;X&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;y:&nbsp;Y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;xDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;X&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;left).<br>
+&nbsp;&nbsp;yDistance:&nbsp;Ddistance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;up).<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;X&nbsp;axis.<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis.<br>
+&nbsp;&nbsp;preventFling:&nbsp;Prevents&nbsp;a&nbsp;fling&nbsp;gesture.<br>
+&nbsp;&nbsp;speed:&nbsp;Swipe&nbsp;speed&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+&nbsp;&nbsp;gestureSourceType:&nbsp;Which&nbsp;type&nbsp;of&nbsp;input&nbsp;events&nbsp;to&nbsp;be&nbsp;generated.<br>
+&nbsp;&nbsp;repeatCount:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;repeats&nbsp;beyond&nbsp;the&nbsp;first&nbsp;scroll.<br>
+&nbsp;&nbsp;repeatDelayMs:&nbsp;Number&nbsp;of&nbsp;milliseconds&nbsp;delay&nbsp;between&nbsp;each&nbsp;repeat.<br>
+&nbsp;&nbsp;interactionMarkerName:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;interaction&nbsp;markers&nbsp;to&nbsp;generate.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Oobe-WaitForDocumentReadyStateToBeComplete"><strong>WaitForDocumentReadyStateToBeComplete</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;finish&nbsp;loading.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-WaitForDocumentReadyStateToBeInteractiveOrBetter"><strong>WaitForDocumentReadyStateToBeInteractiveOrBetter</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;be&nbsp;interactive.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-WaitForJavaScriptExpression"><strong>WaitForJavaScriptExpression</strong></a>(self, expr, timeout, dump_page_state_on_timeout<font color="#909090">=True</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;given&nbsp;JavaScript&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;robust&nbsp;against&nbsp;any&nbsp;given&nbsp;Evaluation&nbsp;timing&nbsp;out.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;expr:&nbsp;The&nbsp;expression&nbsp;to&nbsp;evaluate.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;&nbsp;dump_page_state_on_timeout:&nbsp;Whether&nbsp;to&nbsp;provide&nbsp;additional&nbsp;information&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page&nbsp;state&nbsp;if&nbsp;a&nbsp;TimeoutException&nbsp;is&nbsp;thrown.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException:&nbsp;On&nbsp;a&nbsp;timeout.<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Oobe-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Oobe-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;navigation&nbsp;to&nbsp;complete.<br>
+&nbsp;<br>
+The&nbsp;current&nbsp;page&nbsp;is&nbsp;expect&nbsp;to&nbsp;be&nbsp;in&nbsp;a&nbsp;navigation.<br>
+This&nbsp;function&nbsp;returns&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;complete&nbsp;or&nbsp;when<br>
+the&nbsp;timeout&nbsp;has&nbsp;been&nbsp;exceeded.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+<dd><tt>Return&nbsp;the&nbsp;unique&nbsp;id&nbsp;string&nbsp;for&nbsp;this&nbsp;tab&nbsp;object.</tt></dd>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+<dl><dt><strong>timeline_model</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.system_info_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.system_info_backend.html
new file mode 100644
index 0000000..ea93819
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.system_info_backend.html
@@ -0,0 +1,62 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.system_info_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.system_info_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/system_info_backend.py">telemetry/internal/backends/chrome/system_info_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.camel_case.html">telemetry.internal.util.camel_case</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">telemetry.internal.backends.chrome_inspector.inspector_websocket</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.system_info.html">telemetry.internal.platform.system_info</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.system_info_backend.html#SystemInfoBackend">SystemInfoBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SystemInfoBackend">class <strong>SystemInfoBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SystemInfoBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self, timeout<font color="#909090">=10</font>)</dt></dl>
+
+<dl><dt><a name="SystemInfoBackend-__init__"><strong>__init__</strong></a>(self, devtools_port, devtools_page<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.tab_list_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.tab_list_backend.html
new file mode 100644
index 0000000..4cba6df
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome.tab_list_backend.html
@@ -0,0 +1,229 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome.tab_list_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome.html"><font color="#ffffff">chrome</font></a>.tab_list_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome/tab_list_backend.py">telemetry/internal/backends/chrome/tab_list_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html">telemetry.internal.backends.chrome_inspector.inspector_backend_list</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="telemetry.internal.browser.tab.html">telemetry.internal.browser.tab</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.tab_list_backend.html#TabUnexpectedResponseException">TabUnexpectedResponseException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>(<a href="_abcoll.html#Sequence">_abcoll.Sequence</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome.tab_list_backend.html#TabListBackend">TabListBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabListBackend">class <strong>TabListBackend</strong></a>(<a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;dynamic&nbsp;sequence&nbsp;of&nbsp;tab.Tabs&nbsp;in&nbsp;UI&nbsp;order.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.tab_list_backend.html#TabListBackend">TabListBackend</a></dd>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a></dd>
+<dd><a href="_abcoll.html#Sequence">_abcoll.Sequence</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TabListBackend-ActivateTab"><strong>ActivateTab</strong></a>(self, tab_id, timeout<font color="#909090">=30</font>)</dt><dd><tt>Activates&nbsp;the&nbsp;tab&nbsp;with&nbsp;the&nbsp;given&nbsp;debugger_url.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;devtools_client_backend.TabNotFoundError<br>
+&nbsp;&nbsp;<a href="#TabUnexpectedResponseException">TabUnexpectedResponseException</a></tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-CloseTab"><strong>CloseTab</strong></a>(self, tab_id, timeout<font color="#909090">=300</font>)</dt><dd><tt>Closes&nbsp;the&nbsp;tab&nbsp;with&nbsp;the&nbsp;given&nbsp;debugger_url.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;devtools_client_backend.TabNotFoundError<br>
+&nbsp;&nbsp;<a href="#TabUnexpectedResponseException">TabUnexpectedResponseException</a><br>
+&nbsp;&nbsp;exceptions.TimeoutException</tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-CreateWrapper"><strong>CreateWrapper</strong></a>(self, inspector_backend)</dt></dl>
+
+<dl><dt><a name="TabListBackend-Get"><strong>Get</strong></a>(self, index, ret)</dt><dd><tt>Returns&nbsp;self[index]&nbsp;if&nbsp;it&nbsp;exists,&nbsp;or&nbsp;ret&nbsp;if&nbsp;index&nbsp;is&nbsp;out&nbsp;of&nbsp;bounds.</tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-New"><strong>New</strong></a>(self, timeout)</dt><dd><tt>Makes&nbsp;a&nbsp;new&nbsp;tab.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;Tab&nbsp;object.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-ShouldIncludeContext"><strong>ShouldIncludeContext</strong></a>(self, context)</dt></dl>
+
+<dl><dt><a name="TabListBackend-__init__"><strong>__init__</strong></a>(self, browser_backend)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><a name="TabListBackend-GetBackendFromContextId"><strong>GetBackendFromContextId</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="TabListBackend-GetContextInfo"><strong>GetContextInfo</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="TabListBackend-GetTabById"><strong>GetTabById</strong></a>(self, identifier)</dt></dl>
+
+<dl><dt><a name="TabListBackend-IterContextIds"><strong>IterContextIds</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabListBackend-__getitem__"><strong>__getitem__</strong></a>(self, index)</dt><dd><tt>#&nbsp;TODO(nednguyen):&nbsp;Remove&nbsp;this&nbsp;method&nbsp;and&nbsp;turn&nbsp;inspector_backend_list&nbsp;API&nbsp;to<br>
+#&nbsp;dictionary-like&nbsp;API&nbsp;(crbug.com/398467)</tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabListBackend-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">telemetry.internal.backends.chrome_inspector.inspector_backend_list.InspectorBackendList</a>:<br>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="_abcoll.html#Sequence">_abcoll.Sequence</a>:<br>
+<dl><dt><a name="TabListBackend-__contains__"><strong>__contains__</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="TabListBackend-__reversed__"><strong>__reversed__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabListBackend-count"><strong>count</strong></a>(self, value)</dt><dd><tt>S.<a href="#TabListBackend-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TabListBackend-index"><strong>index</strong></a>(self, value)</dt><dd><tt>S.<a href="#TabListBackend-index">index</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="TabListBackend-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabUnexpectedResponseException">class <strong>TabUnexpectedResponseException</strong></a>(<a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome.tab_list_backend.html#TabUnexpectedResponseException">TabUnexpectedResponseException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="TabUnexpectedResponseException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TabUnexpectedResponseException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TabUnexpectedResponseException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabUnexpectedResponseException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TabUnexpectedResponseException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_client_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_client_backend.html
new file mode 100644
index 0000000..ff536e3
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_client_backend.html
@@ -0,0 +1,284 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.devtools_client_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.devtools_client_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/devtools_client_backend.py">telemetry/internal/backends/chrome_inspector/devtools_client_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+<a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html">telemetry.internal.platform.tracing_agent.chrome_tracing_agent</a><br>
+<a href="telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html">telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html">telemetry.internal.backends.chrome_inspector.devtools_http</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_backend.html">telemetry.internal.backends.chrome_inspector.inspector_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">telemetry.internal.backends.chrome_inspector.inspector_websocket</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.memory_backend.html">telemetry.internal.backends.chrome_inspector.memory_backend</a><br>
+<a href="re.html">re</a><br>
+<a href="socket.html">socket</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html">telemetry.internal.backends.chrome_inspector.tracing_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket.html">telemetry.internal.backends.chrome_inspector.websocket</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html#DevToolsClientBackend">DevToolsClientBackend</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html#TabNotFoundError">TabNotFoundError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DevToolsClientBackend">class <strong>DevToolsClientBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;that&nbsp;communicates&nbsp;with&nbsp;Chrome's&nbsp;devtools.<br>
+&nbsp;<br>
+This&nbsp;class&nbsp;owns&nbsp;a&nbsp;map&nbsp;of&nbsp;InspectorBackends.&nbsp;It&nbsp;is&nbsp;responsible&nbsp;for&nbsp;creating<br>
+them&nbsp;and&nbsp;destroying&nbsp;them.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="DevToolsClientBackend-ActivateTab"><strong>ActivateTab</strong></a>(self, tab_id, timeout)</dt><dd><tt>Activates&nbsp;the&nbsp;tab&nbsp;with&nbsp;the&nbsp;given&nbsp;id.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;<a href="#TabNotFoundError">TabNotFoundError</a></tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DevToolsClientBackend-CloseTab"><strong>CloseTab</strong></a>(self, tab_id, timeout)</dt><dd><tt>Closes&nbsp;the&nbsp;tab&nbsp;with&nbsp;the&nbsp;given&nbsp;id.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;<a href="#TabNotFoundError">TabNotFoundError</a></tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=30</font>)</dt><dd><tt>Dumps&nbsp;memory.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;GUID&nbsp;of&nbsp;the&nbsp;generated&nbsp;dump&nbsp;if&nbsp;successful,&nbsp;None&nbsp;otherwise.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;TracingTimeoutException:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;TracingUnrecoverableException:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;TracingUnexpectedResponseException:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-GetChromeBranchNumber"><strong>GetChromeBranchNumber</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DevToolsClientBackend-GetUpdatedInspectableContexts"><strong>GetUpdatedInspectableContexts</strong></a>(self)</dt><dd><tt>Returns&nbsp;an&nbsp;updated&nbsp;instance&nbsp;of&nbsp;_DevToolsContextMapBackend.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-GetUrl"><strong>GetUrl</strong></a>(self, tab_id)</dt><dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;of&nbsp;the&nbsp;tab&nbsp;with&nbsp;|tab_id|,&nbsp;as&nbsp;reported&nbsp;by&nbsp;devtools.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-IsAlive"><strong>IsAlive</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;DevTools&nbsp;server&nbsp;is&nbsp;available&nbsp;and&nbsp;connectable.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-IsChromeTracingSupported"><strong>IsChromeTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DevToolsClientBackend-IsInspectable"><strong>IsInspectable</strong></a>(self, tab_id)</dt><dd><tt>Whether&nbsp;the&nbsp;tab&nbsp;with&nbsp;|tab_id|&nbsp;is&nbsp;inspectable,&nbsp;as&nbsp;reported&nbsp;by&nbsp;devtools.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-RequestNewTab"><strong>RequestNewTab</strong></a>(self, timeout)</dt><dd><tt>Creates&nbsp;a&nbsp;new&nbsp;tab.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;JSON&nbsp;string&nbsp;as&nbsp;returned&nbsp;by&nbsp;DevTools.&nbsp;Example:<br>
+&nbsp;&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"description":&nbsp;"",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"devtoolsFrontendUrl":<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"/devtools/inspector.html?ws=host:port/devtools/page/id-string",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"id":&nbsp;"id-string",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"title":&nbsp;"Page&nbsp;Title",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"type":&nbsp;"page",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"url":&nbsp;"url",<br>
+&nbsp;&nbsp;&nbsp;&nbsp;"webSocketDebuggerUrl":&nbsp;"ws://host:port/devtools/page/id-string"<br>
+&nbsp;&nbsp;}<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=30</font>)</dt><dd><tt>Enable/disable&nbsp;suppressing&nbsp;memory&nbsp;pressure&nbsp;notifications.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;suppressed:&nbsp;If&nbsp;true,&nbsp;memory&nbsp;pressure&nbsp;notifications&nbsp;will&nbsp;be&nbsp;suppressed.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;MemoryTimeoutException:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;MemoryUnrecoverableException:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;MemoryUnexpectedResponseException:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=30</font>)</dt><dd><tt>Simulate&nbsp;a&nbsp;memory&nbsp;pressure&nbsp;notification.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;pressure&nbsp;level:&nbsp;The&nbsp;memory&nbsp;pressure&nbsp;level&nbsp;of&nbsp;the&nbsp;notification&nbsp;('moderate'<br>
+&nbsp;&nbsp;or&nbsp;'critical').<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;MemoryTimeoutException:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;MemoryUnrecoverableException:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;MemoryUnexpectedResponseException:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-StartChromeTracing"><strong>StartChromeTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=10</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_options:&nbsp;An&nbsp;tracing_options.TracingOptions&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;custom_categories:&nbsp;An&nbsp;optional&nbsp;string&nbsp;containing&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;comma&nbsp;separated&nbsp;categories&nbsp;that&nbsp;will&nbsp;be&nbsp;traced<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;instead&nbsp;of&nbsp;the&nbsp;default&nbsp;category&nbsp;set.&nbsp;&nbsp;Example:&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"webkit,cc,disabled-by-default-cc.debug"&nbsp;to&nbsp;trace&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;those&nbsp;three&nbsp;event&nbsp;categories.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientBackend-StopChromeTracing"><strong>StopChromeTracing</strong></a>(self, trace_data_builder, timeout<font color="#909090">=30</font>)</dt></dl>
+
+<dl><dt><a name="DevToolsClientBackend-__init__"><strong>__init__</strong></a>(self, devtools_port, remote_devtools_port, app_backend)</dt><dd><tt>Creates&nbsp;a&nbsp;new&nbsp;<a href="#DevToolsClientBackend">DevToolsClientBackend</a>.<br>
+&nbsp;<br>
+A&nbsp;DevTools&nbsp;agent&nbsp;must&nbsp;exist&nbsp;on&nbsp;the&nbsp;given&nbsp;devtools_port.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;devtools_port:&nbsp;The&nbsp;port&nbsp;to&nbsp;use&nbsp;to&nbsp;connect&nbsp;to&nbsp;DevTools&nbsp;agent.<br>
+&nbsp;&nbsp;remote_devtools_port:&nbsp;In&nbsp;some&nbsp;cases&nbsp;(e.g.,&nbsp;app&nbsp;running&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Android&nbsp;device,&nbsp;devtools_port&nbsp;is&nbsp;the&nbsp;forwarded&nbsp;port&nbsp;on&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;host&nbsp;platform.&nbsp;We&nbsp;also&nbsp;need&nbsp;to&nbsp;know&nbsp;the&nbsp;remote_devtools_port<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;so&nbsp;that&nbsp;we&nbsp;can&nbsp;uniquely&nbsp;identify&nbsp;the&nbsp;DevTools&nbsp;agent.<br>
+&nbsp;&nbsp;app_backend:&nbsp;For&nbsp;the&nbsp;app&nbsp;that&nbsp;contains&nbsp;the&nbsp;DevTools&nbsp;agent.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_tracing_running</strong></dt>
+</dl>
+<dl><dt><strong>remote_port</strong></dt>
+</dl>
+<dl><dt><strong>support_startup_tracing</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabNotFoundError">class <strong>TabNotFoundError</strong></a>(<a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html#TabNotFoundError">TabNotFoundError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="TabNotFoundError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="TabNotFoundError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TabNotFoundError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TabNotFoundError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TabNotFoundError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TabNotFoundError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TabNotFoundError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TabNotFoundError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsDevToolsAgentAvailable"><strong>IsDevToolsAgentAvailable</strong></a>(port, app_backend)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;a&nbsp;DevTools&nbsp;agent&nbsp;is&nbsp;available&nbsp;on&nbsp;the&nbsp;given&nbsp;port.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BROWSER_INSPECTOR_WEBSOCKET_URL</strong> = 'ws://127.0.0.1:%i/devtools/browser'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_http.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_http.html
new file mode 100644
index 0000000..03dfd8b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.devtools_http.html
@@ -0,0 +1,240 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.devtools_http</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.devtools_http</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/devtools_http.py">telemetry/internal/backends/chrome_inspector/devtools_http.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="errno.html">errno</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="httplib.html">httplib</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="socket.html">socket</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsHttp">DevToolsHttp</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientConnectionError">DevToolsClientConnectionError</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientUrlError">DevToolsClientUrlError</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DevToolsClientConnectionError">class <strong>DevToolsClientConnectionError</strong></a>(<a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientConnectionError">DevToolsClientConnectionError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="DevToolsClientConnectionError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DevToolsClientConnectionError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DevToolsClientConnectionError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientConnectionError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevToolsClientConnectionError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DevToolsClientUrlError">class <strong>DevToolsClientUrlError</strong></a>(<a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientConnectionError">DevToolsClientConnectionError</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientUrlError">DevToolsClientUrlError</a></dd>
+<dd><a href="telemetry.internal.backends.chrome_inspector.devtools_http.html#DevToolsClientConnectionError">DevToolsClientConnectionError</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="DevToolsClientUrlError-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DevToolsClientUrlError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DevToolsClientUrlError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DevToolsClientUrlError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DevToolsClientUrlError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DevToolsHttp">class <strong>DevToolsHttp</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;helper&nbsp;class&nbsp;to&nbsp;send&nbsp;and&nbsp;parse&nbsp;DevTools&nbsp;HTTP&nbsp;requests.<br>
+&nbsp;<br>
+This&nbsp;class&nbsp;maintains&nbsp;a&nbsp;persistent&nbsp;http&nbsp;connection&nbsp;to&nbsp;Chrome&nbsp;devtools.<br>
+Ideally,&nbsp;owners&nbsp;of&nbsp;instances&nbsp;of&nbsp;this&nbsp;class&nbsp;should&nbsp;call&nbsp;<a href="#DevToolsHttp-Disconnect">Disconnect</a>()&nbsp;before<br>
+disposing&nbsp;of&nbsp;the&nbsp;instance.&nbsp;Otherwise,&nbsp;the&nbsp;connection&nbsp;will&nbsp;not&nbsp;be&nbsp;closed&nbsp;until<br>
+the&nbsp;instance&nbsp;is&nbsp;garbage&nbsp;collected.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="DevToolsHttp-Disconnect"><strong>Disconnect</strong></a>(self)</dt><dd><tt>Closes&nbsp;the&nbsp;HTTP&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsHttp-Request"><strong>Request</strong></a>(self, path, timeout<font color="#909090">=30</font>)</dt><dd><tt>Sends&nbsp;a&nbsp;request&nbsp;to&nbsp;Chrome&nbsp;devtools.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;lazily&nbsp;creates&nbsp;an&nbsp;HTTP&nbsp;connection,&nbsp;if&nbsp;one&nbsp;does&nbsp;not&nbsp;already<br>
+exist.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;The&nbsp;DevTools&nbsp;URL&nbsp;path,&nbsp;without&nbsp;the&nbsp;/json/&nbsp;prefix.<br>
+&nbsp;&nbsp;timeout:&nbsp;Timeout&nbsp;defaults&nbsp;to&nbsp;30&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#DevToolsClientConnectionError">DevToolsClientConnectionError</a>:&nbsp;If&nbsp;the&nbsp;connection&nbsp;fails.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsHttp-RequestJson"><strong>RequestJson</strong></a>(self, path, timeout<font color="#909090">=30</font>)</dt><dd><tt>Sends&nbsp;a&nbsp;request&nbsp;and&nbsp;parse&nbsp;the&nbsp;response&nbsp;as&nbsp;JSON.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;The&nbsp;DevTools&nbsp;URL&nbsp;path,&nbsp;without&nbsp;the&nbsp;/json/&nbsp;prefix.<br>
+&nbsp;&nbsp;timeout:&nbsp;Timeout&nbsp;defaults&nbsp;to&nbsp;30&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#DevToolsClientConnectionError">DevToolsClientConnectionError</a>:&nbsp;If&nbsp;the&nbsp;connection&nbsp;fails.<br>
+&nbsp;&nbsp;ValueError:&nbsp;If&nbsp;the&nbsp;response&nbsp;is&nbsp;not&nbsp;a&nbsp;valid&nbsp;JSON.</tt></dd></dl>
+
+<dl><dt><a name="DevToolsHttp-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DevToolsHttp-__init__"><strong>__init__</strong></a>(self, devtools_port)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.html
new file mode 100644
index 0000000..0fc43b0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.html
@@ -0,0 +1,47 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.backends.chrome_inspector</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.chrome_inspector</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/__init__.py">telemetry/internal/backends/chrome_inspector/__init__.py</a></font></td></tr></table>
+    <p><tt>This&nbsp;package&nbsp;contains&nbsp;classes&nbsp;and&nbsp;methods&nbsp;for&nbsp;controlling&nbsp;the&nbsp;chrome<br>
+devtool.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html">devtools_client_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend_unittest.html">devtools_client_backend_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_http.html">devtools_http</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_http_unittest.html">devtools_http_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_backend.html">inspector_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html">inspector_backend_list</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_console.html">inspector_console</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_console_unittest.html">inspector_console_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_memory.html">inspector_memory</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_memory_unittest.html">inspector_memory_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_network.html">inspector_network</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_network_unittest.html">inspector_network_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_page.html">inspector_page</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_page_unittest.html">inspector_page_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_runtime.html">inspector_runtime</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_runtime_unittest.html">inspector_runtime_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">inspector_websocket</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_websocket_unittest.html">inspector_websocket_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html">memory_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.memory_backend_unittest.html">memory_backend_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html">tracing_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.tracing_backend_unittest.html">tracing_backend_unittest</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket.html">websocket</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket_unittest.html">websocket_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend.html
new file mode 100644
index 0000000..cab059e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend.html
@@ -0,0 +1,177 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_backend.py">telemetry/internal/backends/chrome_inspector/inspector_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_http.html">telemetry.internal.backends.chrome_inspector.devtools_http</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="functools.html">functools</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_console.html">telemetry.internal.backends.chrome_inspector.inspector_console</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_memory.html">telemetry.internal.backends.chrome_inspector.inspector_memory</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_network.html">telemetry.internal.backends.chrome_inspector.inspector_network</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_page.html">telemetry.internal.backends.chrome_inspector.inspector_page</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_runtime.html">telemetry.internal.backends.chrome_inspector.inspector_runtime</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">telemetry.internal.backends.chrome_inspector.inspector_websocket</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+<a href="socket.html">socket</a><br>
+<a href="sys.html">sys</a><br>
+<a href="telemetry.timeline.model.html">telemetry.timeline.model</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket.html">telemetry.internal.backends.chrome_inspector.websocket</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_backend.html#InspectorBackend">InspectorBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorBackend">class <strong>InspectorBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Class&nbsp;for&nbsp;communicating&nbsp;with&nbsp;a&nbsp;devtools&nbsp;client.<br>
+&nbsp;<br>
+The&nbsp;owner&nbsp;of&nbsp;an&nbsp;instance&nbsp;of&nbsp;this&nbsp;class&nbsp;is&nbsp;responsible&nbsp;for&nbsp;calling<br>
+<a href="#InspectorBackend-Disconnect">Disconnect</a>()&nbsp;before&nbsp;disposing&nbsp;of&nbsp;the&nbsp;instance.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorBackend-ClearCache"><strong>ClearCache</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-CollectGarbage"><strong>CollectGarbage</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-Disconnect"><strong>Disconnect</strong></a>(self)</dt><dd><tt>Disconnects&nbsp;the&nbsp;inspector&nbsp;websocket.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;intentionally&nbsp;leaves&nbsp;the&nbsp;self.<strong>_websocket</strong>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;around,&nbsp;so&nbsp;that<br>
+future&nbsp;calls&nbsp;it&nbsp;to&nbsp;it&nbsp;will&nbsp;fail&nbsp;with&nbsp;a&nbsp;relevant&nbsp;error.</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-EnableAllContexts"><strong>EnableAllContexts</strong></a>(inspector_backend, *args, **kwargs)</dt><dd><tt>Allows&nbsp;access&nbsp;to&nbsp;iframes.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(inspector_backend, *args, **kwargs)</dt><dd><tt>Evaluates&nbsp;a&nbsp;javascript&nbsp;expression&nbsp;and&nbsp;returns&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(inspector_backend, *args, **kwargs)</dt><dd><tt>Executes&nbsp;a&nbsp;javascript&nbsp;expression&nbsp;without&nbsp;returning&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-GetCookieByName"><strong>GetCookieByName</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-GetDOMStats"><strong>GetDOMStats</strong></a>(inspector_backend, *args, **kwargs)</dt><dd><tt>Gets&nbsp;memory&nbsp;stats&nbsp;from&nbsp;the&nbsp;DOM.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;inspector_memory.InspectorMemoryException<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-GetWebviewInspectorBackends"><strong>GetWebviewInspectorBackends</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;<a href="#InspectorBackend">InspectorBackend</a>&nbsp;instances&nbsp;associated&nbsp;with&nbsp;webviews.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-IsInspectable"><strong>IsInspectable</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;tab&nbsp;is&nbsp;inspectable,&nbsp;as&nbsp;reported&nbsp;by&nbsp;devtools.</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-Navigate"><strong>Navigate</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-Screenshot"><strong>Screenshot</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-StartTimelineRecording"><strong>StartTimelineRecording</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-StopTimelineRecording"><strong>StopTimelineRecording</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-SynthesizeScrollGesture"><strong>SynthesizeScrollGesture</strong></a>(inspector_backend, *args, **kwargs)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command&nbsp;that&nbsp;causes&nbsp;a&nbsp;repeatable&nbsp;browser&nbsp;driven&nbsp;scroll.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x:&nbsp;X&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;y:&nbsp;Y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;xDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;X&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;left).<br>
+&nbsp;&nbsp;yDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;up).<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;X&nbsp;axis.<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis.<br>
+&nbsp;&nbsp;preventFling:&nbsp;Prevents&nbsp;a&nbsp;fling&nbsp;gesture.<br>
+&nbsp;&nbsp;speed:&nbsp;Swipe&nbsp;speed&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+&nbsp;&nbsp;gestureSourceType:&nbsp;Which&nbsp;type&nbsp;of&nbsp;input&nbsp;events&nbsp;to&nbsp;be&nbsp;generated.<br>
+&nbsp;&nbsp;repeatCount:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;repeats&nbsp;beyond&nbsp;the&nbsp;first&nbsp;scroll.<br>
+&nbsp;&nbsp;repeatDelayMs:&nbsp;Number&nbsp;of&nbsp;milliseconds&nbsp;delay&nbsp;between&nbsp;each&nbsp;repeat.<br>
+&nbsp;&nbsp;interactionMarkerName:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;interaction&nbsp;markers&nbsp;to&nbsp;generate.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackend-WaitForNavigate"><strong>WaitForNavigate</strong></a>(inspector_backend, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorBackend-__init__"><strong>__init__</strong></a>(self, app, devtools_client, context, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>debugger_url</strong></dt>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+<dl><dt><strong>screenshot_supported</strong></dt>
+</dl>
+<dl><dt><strong>timeline_model</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;of&nbsp;the&nbsp;tab,&nbsp;as&nbsp;reported&nbsp;by&nbsp;devtools.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend_list.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend_list.html
new file mode 100644
index 0000000..6424181
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_backend_list.html
@@ -0,0 +1,141 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_backend_list</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_backend_list</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_backend_list.py">telemetry/internal/backends/chrome_inspector/inspector_backend_list.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="_abcoll.html#Sequence">_abcoll.Sequence</a>(<a href="_abcoll.html#Sized">_abcoll.Sized</a>, <a href="_abcoll.html#Iterable">_abcoll.Iterable</a>, <a href="_abcoll.html#Container">_abcoll.Container</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">InspectorBackendList</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorBackendList">class <strong>InspectorBackendList</strong></a>(<a href="_abcoll.html#Sequence">_abcoll.Sequence</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;dynamic&nbsp;sequence&nbsp;of&nbsp;active&nbsp;InspectorBackends.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_backend_list.html#InspectorBackendList">InspectorBackendList</a></dd>
+<dd><a href="_abcoll.html#Sequence">_abcoll.Sequence</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="InspectorBackendList-CreateWrapper"><strong>CreateWrapper</strong></a>(self, inspector_backend_instance)</dt><dd><tt>Override&nbsp;to&nbsp;return&nbsp;the&nbsp;wrapper&nbsp;API&nbsp;over&nbsp;InspectorBackend.<br>
+&nbsp;<br>
+The&nbsp;wrapper&nbsp;API&nbsp;is&nbsp;the&nbsp;public&nbsp;interface&nbsp;for&nbsp;InspectorBackend.&nbsp;It<br>
+may&nbsp;expose&nbsp;whatever&nbsp;methods&nbsp;are&nbsp;desired&nbsp;on&nbsp;top&nbsp;of&nbsp;that&nbsp;backend.</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackendList-GetBackendFromContextId"><strong>GetBackendFromContextId</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-GetContextInfo"><strong>GetContextInfo</strong></a>(self, context_id)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-GetTabById"><strong>GetTabById</strong></a>(self, identifier)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-IterContextIds"><strong>IterContextIds</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-ShouldIncludeContext"><strong>ShouldIncludeContext</strong></a>(self, _)</dt><dd><tt>Override&nbsp;this&nbsp;method&nbsp;to&nbsp;control&nbsp;which&nbsp;contexts&nbsp;are&nbsp;included.</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackendList-__getitem__"><strong>__getitem__</strong></a>(self, index)</dt><dd><tt>#&nbsp;TODO(nednguyen):&nbsp;Remove&nbsp;this&nbsp;method&nbsp;and&nbsp;turn&nbsp;inspector_backend_list&nbsp;API&nbsp;to<br>
+#&nbsp;dictionary-like&nbsp;API&nbsp;(crbug.com/398467)</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackendList-__init__"><strong>__init__</strong></a>(self, browser_backend)</dt><dd><tt>Constructor.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_backend:&nbsp;The&nbsp;BrowserBackend&nbsp;instance&nbsp;to&nbsp;query&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;InspectorBackends.</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackendList-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="_abcoll.html#Sequence">_abcoll.Sequence</a>:<br>
+<dl><dt><a name="InspectorBackendList-__contains__"><strong>__contains__</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-__reversed__"><strong>__reversed__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorBackendList-count"><strong>count</strong></a>(self, value)</dt><dd><tt>S.<a href="#InspectorBackendList-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InspectorBackendList-index"><strong>index</strong></a>(self, value)</dt><dd><tt>S.<a href="#InspectorBackendList-index">index</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="InspectorBackendList-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-DebuggerUrlToId"><strong>DebuggerUrlToId</strong></a>(debugger_url)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_console.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_console.html
new file mode 100644
index 0000000..7ce3e88
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_console.html
@@ -0,0 +1,52 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_console</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_console</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_console.py">telemetry/internal/backends/chrome_inspector/inspector_console.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_console.html#InspectorConsole">InspectorConsole</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorConsole">class <strong>InspectorConsole</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorConsole-__init__"><strong>__init__</strong></a>(self, inspector_websocket)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_memory.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_memory.html
new file mode 100644
index 0000000..d3648d1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_memory.html
@@ -0,0 +1,148 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_memory</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_memory</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_memory.py">telemetry/internal/backends/chrome_inspector/inspector_memory.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_memory.html#InspectorMemory">InspectorMemory</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_memory.html#InspectorMemoryException">InspectorMemoryException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorMemory">class <strong>InspectorMemory</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Communicates&nbsp;with&nbsp;the&nbsp;remote&nbsp;inspector's&nbsp;Memory&nbsp;domain.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorMemory-GetDOMCounters"><strong>GetDOMCounters</strong></a>(self, timeout)</dt><dd><tt>Retrieves&nbsp;DOM&nbsp;element&nbsp;counts.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;inspector&nbsp;backend&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;service&nbsp;the&nbsp;request&nbsp;before&nbsp;timing&nbsp;out.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;dictionary&nbsp;containing&nbsp;the&nbsp;counts&nbsp;associated&nbsp;with&nbsp;"nodes",&nbsp;"documents",<br>
+&nbsp;&nbsp;and&nbsp;"jsEventListeners".<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#InspectorMemoryException">InspectorMemoryException</a><br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemory-__init__"><strong>__init__</strong></a>(self, inspector_websocket)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorMemoryException">class <strong>InspectorMemoryException</strong></a>(<a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_memory.html#InspectorMemoryException">InspectorMemoryException</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="InspectorMemoryException-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="InspectorMemoryException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#InspectorMemoryException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="InspectorMemoryException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InspectorMemoryException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorMemoryException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InspectorMemoryException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InspectorMemoryException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_network.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_network.html
new file mode 100644
index 0000000..62e02a3
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_network.html
@@ -0,0 +1,209 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_network</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_network</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_network.py">telemetry/internal/backends/chrome_inspector/inspector_network.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_network.html#InspectorNetwork">InspectorNetwork</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_network.html#InspectorNetworkResponseData">InspectorNetworkResponseData</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_network.html#TimelineRecorder">TimelineRecorder</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_network.html#InspectorNetworkException">InspectorNetworkException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorNetwork">class <strong>InspectorNetwork</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorNetwork-ClearCache"><strong>ClearCache</strong></a>(self, timeout<font color="#909090">=60</font>)</dt><dd><tt>Clears&nbsp;the&nbsp;browser's&nbsp;disk&nbsp;and&nbsp;memory&nbsp;cache.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetwork-ClearResponseData"><strong>ClearResponseData</strong></a>(self)</dt><dd><tt>Clears&nbsp;recorded&nbsp;HTTP&nbsp;responses.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetwork-GetHTTPResponseBody"><strong>GetHTTPResponseBody</strong></a>(self, request_id, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<dl><dt><a name="InspectorNetwork-GetResponseData"><strong>GetResponseData</strong></a>(self)</dt><dd><tt>Returns&nbsp;all&nbsp;recorded&nbsp;HTTP&nbsp;responses.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetwork-HTTPResponseServedFromCache"><strong>HTTPResponseServedFromCache</strong></a>(self, request_id)</dt></dl>
+
+<dl><dt><a name="InspectorNetwork-StartMonitoringNetwork"><strong>StartMonitoringNetwork</strong></a>(self)</dt><dd><tt>Starts&nbsp;monitoring&nbsp;network&nbsp;notifications&nbsp;and&nbsp;recording&nbsp;HTTP&nbsp;responses.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetwork-StopMonitoringNetwork"><strong>StopMonitoringNetwork</strong></a>(self)</dt><dd><tt>Stops&nbsp;monitoring&nbsp;network&nbsp;notifications&nbsp;and&nbsp;recording&nbsp;HTTP&nbsp;responses.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetwork-__init__"><strong>__init__</strong></a>(self, inspector_websocket)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>timeline_recorder</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorNetworkException">class <strong>InspectorNetworkException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_network.html#InspectorNetworkException">InspectorNetworkException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="InspectorNetworkException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#InspectorNetworkException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="InspectorNetworkException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#InspectorNetworkException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="InspectorNetworkException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorNetworkResponseData">class <strong>InspectorNetworkResponseData</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorNetworkResponseData-AsTimelineEvent"><strong>AsTimelineEvent</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkResponseData-GetBody"><strong>GetBody</strong></a>(self, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkResponseData-GetHeader"><strong>GetHeader</strong></a>(self, name)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkResponseData-__init__"><strong>__init__</strong></a>(self, inspector_network, params)</dt></dl>
+
+<dl><dt><a name="InspectorNetworkResponseData-status_text"><strong>status_text</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="InspectorNetworkResponseData-FromTimelineEvent"><strong>FromTimelineEvent</strong></a>(event)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>headers</strong></dt>
+</dl>
+<dl><dt><strong>request_headers</strong></dt>
+</dl>
+<dl><dt><strong>request_id</strong></dt>
+</dl>
+<dl><dt><strong>served_from_cache</strong></dt>
+</dl>
+<dl><dt><strong>status</strong></dt>
+</dl>
+<dl><dt><strong>timestamp</strong></dt>
+</dl>
+<dl><dt><strong>timing</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineRecorder">class <strong>TimelineRecorder</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineRecorder-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineRecorder-Stop"><strong>Stop</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineRecorder-__init__"><strong>__init__</strong></a>(self, inspector_network)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_page.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_page.html
new file mode 100644
index 0000000..b0e110d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_page.html
@@ -0,0 +1,82 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_page</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_page</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_page.py">telemetry/internal/backends/chrome_inspector/inspector_page.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.util.image_util.html">telemetry.util.image_util</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_page.html#InspectorPage">InspectorPage</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorPage">class <strong>InspectorPage</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Class&nbsp;that&nbsp;controls&nbsp;a&nbsp;page&nbsp;connected&nbsp;by&nbsp;an&nbsp;inspector_websocket.<br>
+&nbsp;<br>
+This&nbsp;class&nbsp;provides&nbsp;utility&nbsp;methods&nbsp;for&nbsp;controlling&nbsp;a&nbsp;page&nbsp;connected&nbsp;by&nbsp;an<br>
+inspector_websocket.&nbsp;It&nbsp;does&nbsp;not&nbsp;perform&nbsp;any&nbsp;exception&nbsp;handling.&nbsp;All<br>
+inspector_websocket&nbsp;exceptions&nbsp;must&nbsp;be&nbsp;handled&nbsp;by&nbsp;the&nbsp;caller.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorPage-CaptureScreenshot"><strong>CaptureScreenshot</strong></a>(self, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<dl><dt><a name="InspectorPage-CollectGarbage"><strong>CollectGarbage</strong></a>(self, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<dl><dt><a name="InspectorPage-GetCookieByName"><strong>GetCookieByName</strong></a>(self, name, timeout<font color="#909090">=60</font>)</dt><dd><tt>Returns&nbsp;the&nbsp;value&nbsp;of&nbsp;the&nbsp;cookie&nbsp;by&nbsp;the&nbsp;given&nbsp;|name|.</tt></dd></dl>
+
+<dl><dt><a name="InspectorPage-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout<font color="#909090">=60</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;|url|.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.</tt></dd></dl>
+
+<dl><dt><a name="InspectorPage-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout<font color="#909090">=60</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;navigation&nbsp;to&nbsp;complete.<br>
+&nbsp;<br>
+The&nbsp;current&nbsp;page&nbsp;is&nbsp;expect&nbsp;to&nbsp;be&nbsp;in&nbsp;a&nbsp;navigation.&nbsp;This&nbsp;function&nbsp;returns<br>
+when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;complete&nbsp;or&nbsp;when&nbsp;the&nbsp;timeout&nbsp;has&nbsp;been&nbsp;exceeded.</tt></dd></dl>
+
+<dl><dt><a name="InspectorPage-__init__"><strong>__init__</strong></a>(self, inspector_websocket, timeout<font color="#909090">=60</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_runtime.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_runtime.html
new file mode 100644
index 0000000..bcbf564
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_runtime.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_runtime</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_runtime</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_runtime.py">telemetry/internal/backends/chrome_inspector/inspector_runtime.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_runtime.html#InspectorRuntime">InspectorRuntime</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorRuntime">class <strong>InspectorRuntime</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorRuntime-EnableAllContexts"><strong>EnableAllContexts</strong></a>(self)</dt><dd><tt>Allow&nbsp;access&nbsp;to&nbsp;iframes.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error</tt></dd></dl>
+
+<dl><dt><a name="InspectorRuntime-Evaluate"><strong>Evaluate</strong></a>(self, expr, context_id, timeout)</dt><dd><tt>Evaluates&nbsp;a&nbsp;javascript&nbsp;expression&nbsp;and&nbsp;returns&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+|context_id|&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.&nbsp;The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the<br>
+first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error</tt></dd></dl>
+
+<dl><dt><a name="InspectorRuntime-Execute"><strong>Execute</strong></a>(self, expr, context_id, timeout)</dt></dl>
+
+<dl><dt><a name="InspectorRuntime-RunInspectorCommand"><strong>RunInspectorCommand</strong></a>(self, command, timeout)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error</tt></dd></dl>
+
+<dl><dt><a name="InspectorRuntime-__init__"><strong>__init__</strong></a>(self, inspector_websocket)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_websocket.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_websocket.html
new file mode 100644
index 0000000..9c1ddda
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.inspector_websocket.html
@@ -0,0 +1,200 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.inspector_websocket</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.inspector_websocket</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/inspector_websocket.py">telemetry/internal/backends/chrome_inspector/inspector_websocket.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="errno.html">errno</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="socket.html">socket</a><br>
+<a href="time.html">time</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.websocket.html">telemetry.internal.backends.chrome_inspector.websocket</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html#InspectorWebsocket">InspectorWebsocket</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html#WebSocketDisconnected">WebSocketDisconnected</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorWebsocket">class <strong>InspectorWebsocket</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="InspectorWebsocket-AsyncRequest"><strong>AsyncRequest</strong></a>(self, req, callback)</dt><dd><tt>Sends&nbsp;an&nbsp;async&nbsp;request&nbsp;and&nbsp;returns&nbsp;immediately.<br>
+&nbsp;<br>
+Response&nbsp;will&nbsp;be&nbsp;handled&nbsp;in&nbsp;the&nbsp;|callback|&nbsp;later&nbsp;when&nbsp;DispatchNotifications<br>
+is&nbsp;invoked.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;callback:&nbsp;a&nbsp;function&nbsp;that&nbsp;takes&nbsp;inspector's&nbsp;response&nbsp;as&nbsp;the&nbsp;argument.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-Connect"><strong>Connect</strong></a>(self, url, timeout<font color="#909090">=10</font>)</dt><dd><tt>Connects&nbsp;the&nbsp;websocket.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-Disconnect"><strong>Disconnect</strong></a>(self)</dt><dd><tt>Disconnects&nbsp;the&nbsp;inspector&nbsp;websocket.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;websocket.WebSocketException<br>
+&nbsp;&nbsp;socket.error</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-DispatchNotifications"><strong>DispatchNotifications</strong></a>(self, timeout<font color="#909090">=10</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;responses&nbsp;from&nbsp;the&nbsp;websocket,&nbsp;dispatching&nbsp;them&nbsp;as&nbsp;necessary.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;websocket.WebSocketException:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;socket.error:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;exceptions.<a href="#WebSocketDisconnected">WebSocketDisconnected</a>:&nbsp;The&nbsp;socket&nbsp;was&nbsp;disconnected.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-RegisterDomain"><strong>RegisterDomain</strong></a>(self, domain_name, notification_handler)</dt><dd><tt>Registers&nbsp;a&nbsp;given&nbsp;domain&nbsp;for&nbsp;handling&nbsp;notification&nbsp;methods.<br>
+&nbsp;<br>
+For&nbsp;example,&nbsp;given&nbsp;inspector_backend:<br>
+&nbsp;&nbsp;&nbsp;def&nbsp;OnConsoleNotification(msg):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if&nbsp;msg['method']&nbsp;==&nbsp;'Console.messageAdded':<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;msg['params']['message']<br>
+&nbsp;&nbsp;&nbsp;inspector_backend.<a href="#InspectorWebsocket-RegisterDomain">RegisterDomain</a>('Console',&nbsp;OnConsoleNotification)<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;domain_name:&nbsp;The&nbsp;devtools&nbsp;domain&nbsp;name.&nbsp;E.g.,&nbsp;'Tracing',&nbsp;'Memory',&nbsp;'Page'.<br>
+&nbsp;&nbsp;notification_handler:&nbsp;Handler&nbsp;for&nbsp;devtools&nbsp;notification.&nbsp;Will&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;called&nbsp;if&nbsp;a&nbsp;devtools&nbsp;notification&nbsp;with&nbsp;matching&nbsp;domain&nbsp;is&nbsp;received<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;via&nbsp;DispatchNotifications.&nbsp;The&nbsp;handler&nbsp;accepts&nbsp;a&nbsp;single&nbsp;paramater:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;JSON&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;representing&nbsp;the&nbsp;notification.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-SendAndIgnoreResponse"><strong>SendAndIgnoreResponse</strong></a>(self, req)</dt><dd><tt>Sends&nbsp;a&nbsp;request&nbsp;without&nbsp;waiting&nbsp;for&nbsp;a&nbsp;response.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;websocket.WebSocketException:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;socket.error:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;exceptions.<a href="#WebSocketDisconnected">WebSocketDisconnected</a>:&nbsp;The&nbsp;socket&nbsp;was&nbsp;disconnected.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-SyncRequest"><strong>SyncRequest</strong></a>(self, req, timeout<font color="#909090">=10</font>)</dt><dd><tt>Sends&nbsp;a&nbsp;request&nbsp;and&nbsp;waits&nbsp;for&nbsp;a&nbsp;response.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;websocket.WebSocketException:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;socket.error:&nbsp;<a href="telemetry.core.exceptions.html#Error">Error</a>&nbsp;from&nbsp;websocket&nbsp;library.<br>
+&nbsp;&nbsp;exceptions.<a href="#WebSocketDisconnected">WebSocketDisconnected</a>:&nbsp;The&nbsp;socket&nbsp;was&nbsp;disconnected.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-UnregisterDomain"><strong>UnregisterDomain</strong></a>(self, domain_name)</dt><dd><tt>Unregisters&nbsp;a&nbsp;previously&nbsp;registered&nbsp;domain.</tt></dd></dl>
+
+<dl><dt><a name="InspectorWebsocket-__init__"><strong>__init__</strong></a>(self)</dt><dd><tt>Create&nbsp;a&nbsp;websocket&nbsp;handler&nbsp;for&nbsp;communicating&nbsp;with&nbsp;Inspectors.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>METHOD_NOT_FOUND_CODE</strong> = -32601</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebSocketDisconnected">class <strong>WebSocketDisconnected</strong></a>(<a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;attempt&nbsp;was&nbsp;made&nbsp;to&nbsp;use&nbsp;a&nbsp;web&nbsp;socket&nbsp;after&nbsp;it&nbsp;had&nbsp;been&nbsp;disconnected.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html#WebSocketDisconnected">WebSocketDisconnected</a></dd>
+<dd><a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><a name="WebSocketDisconnected-AddDebuggingMessage"><strong>AddDebuggingMessage</strong></a>(self, msg)</dt><dd><tt>Adds&nbsp;a&nbsp;message&nbsp;to&nbsp;the&nbsp;description&nbsp;of&nbsp;the&nbsp;exception.<br>
+&nbsp;<br>
+Many&nbsp;Telemetry&nbsp;exceptions&nbsp;arise&nbsp;from&nbsp;failures&nbsp;in&nbsp;another&nbsp;application.&nbsp;These<br>
+failures&nbsp;are&nbsp;difficult&nbsp;to&nbsp;pinpoint.&nbsp;This&nbsp;method&nbsp;allows&nbsp;Telemetry&nbsp;classes&nbsp;to<br>
+append&nbsp;useful&nbsp;debugging&nbsp;information&nbsp;to&nbsp;the&nbsp;exception.&nbsp;This&nbsp;method&nbsp;also&nbsp;logs<br>
+information&nbsp;about&nbsp;the&nbsp;location&nbsp;from&nbsp;where&nbsp;it&nbsp;was&nbsp;called.</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__init__"><strong>__init__</strong></a>(self, msg<font color="#909090">=''</font>)</dt></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.core.exceptions.html#Error">telemetry.core.exceptions.Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#WebSocketDisconnected-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="WebSocketDisconnected-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WebSocketDisconnected-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WebSocketDisconnected-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.memory_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.memory_backend.html
new file mode 100644
index 0000000..2258e4e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.memory_backend.html
@@ -0,0 +1,274 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.memory_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.memory_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/memory_backend.py">telemetry/internal/backends/chrome_inspector/memory_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">telemetry.internal.backends.chrome_inspector.inspector_websocket</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="socket.html">socket</a><br>
+</td><td width="25%" valign=top><a href="traceback.html">traceback</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket.html">telemetry.internal.backends.chrome_inspector.websocket</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryBackend">MemoryBackend</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryTimeoutException">MemoryTimeoutException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryUnexpectedResponseException">MemoryUnexpectedResponseException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryUnrecoverableException">MemoryUnrecoverableException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryBackend">class <strong>MemoryBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MemoryBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=30</font>)</dt><dd><tt>Enable/disable&nbsp;suppressing&nbsp;memory&nbsp;pressure&nbsp;notifications.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;suppressed:&nbsp;If&nbsp;true,&nbsp;memory&nbsp;pressure&nbsp;notifications&nbsp;will&nbsp;be&nbsp;suppressed.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#MemoryTimeoutException">MemoryTimeoutException</a>:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;<a href="#MemoryUnrecoverableException">MemoryUnrecoverableException</a>:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;<a href="#MemoryUnexpectedResponseException">MemoryUnexpectedResponseException</a>:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="MemoryBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=30</font>)</dt><dd><tt>Simulate&nbsp;a&nbsp;memory&nbsp;pressure&nbsp;notification.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;pressure&nbsp;level:&nbsp;The&nbsp;memory&nbsp;pressure&nbsp;level&nbsp;of&nbsp;the&nbsp;notification&nbsp;('moderate'<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;or&nbsp;'critical').<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#MemoryTimeoutException">MemoryTimeoutException</a>:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;<a href="#MemoryUnrecoverableException">MemoryUnrecoverableException</a>:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;<a href="#MemoryUnexpectedResponseException">MemoryUnexpectedResponseException</a>:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="MemoryBackend-__init__"><strong>__init__</strong></a>(self, inspector_socket)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryTimeoutException">class <strong>MemoryTimeoutException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryTimeoutException">MemoryTimeoutException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MemoryTimeoutException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MemoryTimeoutException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MemoryTimeoutException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryTimeoutException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimeoutException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryUnexpectedResponseException">class <strong>MemoryUnexpectedResponseException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryUnexpectedResponseException">MemoryUnexpectedResponseException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MemoryUnexpectedResponseException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MemoryUnexpectedResponseException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MemoryUnexpectedResponseException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnexpectedResponseException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnexpectedResponseException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryUnrecoverableException">class <strong>MemoryUnrecoverableException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.memory_backend.html#MemoryUnrecoverableException">MemoryUnrecoverableException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MemoryUnrecoverableException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MemoryUnrecoverableException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MemoryUnrecoverableException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MemoryUnrecoverableException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MemoryUnrecoverableException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.tracing_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.tracing_backend.html
new file mode 100644
index 0000000..fbd8de1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.tracing_backend.html
@@ -0,0 +1,392 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.tracing_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.tracing_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/tracing_backend.py">telemetry/internal/backends/chrome_inspector/tracing_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.inspector_websocket.html">telemetry.internal.backends.chrome_inspector.inspector_websocket</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="socket.html">socket</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="traceback.html">traceback</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.websocket.html">telemetry.internal.backends.chrome_inspector.websocket</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingBackend">TracingBackend</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingHasNotRunException">TracingHasNotRunException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingTimeoutException">TracingTimeoutException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnexpectedResponseException">TracingUnexpectedResponseException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnrecoverableException">TracingUnrecoverableException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnsupportedException">TracingUnsupportedException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingBackend">class <strong>TracingBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=30</font>)</dt><dd><tt>Dumps&nbsp;memory.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;GUID&nbsp;of&nbsp;the&nbsp;generated&nbsp;dump&nbsp;if&nbsp;successful,&nbsp;None&nbsp;otherwise.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#TracingTimeoutException">TracingTimeoutException</a>:&nbsp;If&nbsp;more&nbsp;than&nbsp;|timeout|&nbsp;seconds&nbsp;has&nbsp;passed<br>
+&nbsp;&nbsp;since&nbsp;the&nbsp;last&nbsp;time&nbsp;any&nbsp;data&nbsp;is&nbsp;received.<br>
+&nbsp;&nbsp;<a href="#TracingUnrecoverableException">TracingUnrecoverableException</a>:&nbsp;If&nbsp;there&nbsp;is&nbsp;a&nbsp;websocket&nbsp;error.<br>
+&nbsp;&nbsp;<a href="#TracingUnexpectedResponseException">TracingUnexpectedResponseException</a>:&nbsp;If&nbsp;the&nbsp;response&nbsp;contains&nbsp;an&nbsp;error<br>
+&nbsp;&nbsp;or&nbsp;does&nbsp;not&nbsp;contain&nbsp;the&nbsp;expected&nbsp;result.</tt></dd></dl>
+
+<dl><dt><a name="TracingBackend-IsTracingSupported"><strong>IsTracingSupported</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TracingBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=10</font>)</dt><dd><tt>When&nbsp;first&nbsp;called,&nbsp;starts&nbsp;tracing,&nbsp;and&nbsp;returns&nbsp;True.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;during&nbsp;tracing,&nbsp;tracing&nbsp;is&nbsp;unchanged,&nbsp;and&nbsp;it&nbsp;returns&nbsp;False.</tt></dd></dl>
+
+<dl><dt><a name="TracingBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder, timeout<font color="#909090">=30</font>)</dt><dd><tt>Stops&nbsp;tracing&nbsp;and&nbsp;pushes&nbsp;results&nbsp;to&nbsp;the&nbsp;supplied&nbsp;TraceDataBuilder.<br>
+&nbsp;<br>
+If&nbsp;this&nbsp;is&nbsp;called&nbsp;after&nbsp;tracing&nbsp;has&nbsp;been&nbsp;stopped,&nbsp;trace&nbsp;data&nbsp;from&nbsp;the&nbsp;last<br>
+tracing&nbsp;run&nbsp;is&nbsp;pushed.</tt></dd></dl>
+
+<dl><dt><a name="TracingBackend-__init__"><strong>__init__</strong></a>(self, inspector_socket, is_tracing_running<font color="#909090">=False</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_tracing_running</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingHasNotRunException">class <strong>TracingHasNotRunException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingHasNotRunException">TracingHasNotRunException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingHasNotRunException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingHasNotRunException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingHasNotRunException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingHasNotRunException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingHasNotRunException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingTimeoutException">class <strong>TracingTimeoutException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingTimeoutException">TracingTimeoutException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingTimeoutException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingTimeoutException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingTimeoutException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingTimeoutException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingTimeoutException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingTimeoutException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingTimeoutException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingUnexpectedResponseException">class <strong>TracingUnexpectedResponseException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnexpectedResponseException">TracingUnexpectedResponseException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingUnexpectedResponseException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingUnexpectedResponseException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingUnexpectedResponseException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnexpectedResponseException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnexpectedResponseException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingUnrecoverableException">class <strong>TracingUnrecoverableException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnrecoverableException">TracingUnrecoverableException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingUnrecoverableException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingUnrecoverableException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingUnrecoverableException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnrecoverableException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnrecoverableException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingUnsupportedException">class <strong>TracingUnsupportedException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.chrome_inspector.tracing_backend.html#TracingUnsupportedException">TracingUnsupportedException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingUnsupportedException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingUnsupportedException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingUnsupportedException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingUnsupportedException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingUnsupportedException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.websocket.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.websocket.html
new file mode 100644
index 0000000..8c20ae9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.chrome_inspector.websocket.html
@@ -0,0 +1,40 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.chrome_inspector.websocket</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.chrome_inspector.html"><font color="#ffffff">chrome_inspector</font></a>.websocket</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/chrome_inspector/websocket.py">telemetry/internal/backends/chrome_inspector/websocket.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="socket.html">socket</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-create_connection"><strong>create_connection</strong></a>(*args, **kwargs)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>absolute_import</strong> = _Feature((2, 5, 0, 'alpha', 1), (3, 0, 0, 'alpha', 0), 16384)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.codepen_credentials_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.codepen_credentials_backend.html
new file mode 100644
index 0000000..27388f2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.codepen_credentials_backend.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.codepen_credentials_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.codepen_credentials_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/codepen_credentials_backend.py">telemetry/internal/backends/codepen_credentials_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.form_based_credentials_backend.html">telemetry.internal.backends.form_based_credentials_backend</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.codepen_credentials_backend.html#CodePenCredentialsBackend">CodePenCredentialsBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CodePenCredentialsBackend">class <strong>CodePenCredentialsBackend</strong></a>(<a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.codepen_credentials_backend.html#CodePenCredentialsBackend">CodePenCredentialsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><a name="CodePenCredentialsBackend-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="CodePenCredentialsBackend-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CodePenCredentialsBackend-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="CodePenCredentialsBackend-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="CodePenCredentialsBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.facebook_credentials_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.facebook_credentials_backend.html
new file mode 100644
index 0000000..8f1cd05
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.facebook_credentials_backend.html
@@ -0,0 +1,156 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.facebook_credentials_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.facebook_credentials_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/facebook_credentials_backend.py">telemetry/internal/backends/facebook_credentials_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.form_based_credentials_backend.html">telemetry.internal.backends.form_based_credentials_backend</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend">FacebookCredentialsBackend</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend2">FacebookCredentialsBackend2</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FacebookCredentialsBackend">class <strong>FacebookCredentialsBackend</strong></a>(<a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend">FacebookCredentialsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><a name="FacebookCredentialsBackend-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+<dd><tt>Some&nbsp;sites&nbsp;have&nbsp;custom&nbsp;JS&nbsp;to&nbsp;log&nbsp;in.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FacebookCredentialsBackend2">class <strong>FacebookCredentialsBackend2</strong></a>(<a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend">FacebookCredentialsBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Facebook&nbsp;credential&nbsp;backend&nbsp;for&nbsp;https&nbsp;client.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend2">FacebookCredentialsBackend2</a></dd>
+<dd><a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend">FacebookCredentialsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.facebook_credentials_backend.html#FacebookCredentialsBackend">FacebookCredentialsBackend</a>:<br>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><a name="FacebookCredentialsBackend2-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend2-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend2-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend2-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FacebookCredentialsBackend2-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+<dd><tt>Some&nbsp;sites&nbsp;have&nbsp;custom&nbsp;JS&nbsp;to&nbsp;log&nbsp;in.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend.html
new file mode 100644
index 0000000..109b617
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend.html
@@ -0,0 +1,86 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.form_based_credentials_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.form_based_credentials_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/form_based_credentials_backend.py">telemetry/internal/backends/form_based_credentials_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">FormBasedCredentialsBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FormBasedCredentialsBackend">class <strong>FormBasedCredentialsBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FormBasedCredentialsBackend-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackend-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackend-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackend-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+<dd><tt>Some&nbsp;sites&nbsp;have&nbsp;custom&nbsp;JS&nbsp;to&nbsp;log&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend_unittest_base.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend_unittest_base.html
new file mode 100644
index 0000000..00d7328
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.form_based_credentials_backend_unittest_base.html
@@ -0,0 +1,336 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.form_based_credentials_backend_unittest_base</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.form_based_credentials_backend_unittest_base</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/form_based_credentials_backend_unittest_base.py">telemetry/internal/backends/form_based_credentials_backend_unittest_base.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.testing.simple_mock.html">telemetry.testing.simple_mock</a><br>
+</td><td width="25%" valign=top><a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.form_based_credentials_backend_unittest_base.html#FormBasedCredentialsBackendUnitTestBase">FormBasedCredentialsBackendUnitTestBase</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FormBasedCredentialsBackendUnitTestBase">class <strong>FormBasedCredentialsBackendUnitTestBase</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend_unittest_base.html#FormBasedCredentialsBackendUnitTestBase">FormBasedCredentialsBackendUnitTestBase</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-setUp"><strong>setUp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-testLoginUsingMock"><strong>testLoginUsingMock</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#FormBasedCredentialsBackendUnitTestBase-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;class&nbsp;fixture&nbsp;before&nbsp;running&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<dl><dt><a name="FormBasedCredentialsBackendUnitTestBase-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;class&nbsp;fixture&nbsp;after&nbsp;running&nbsp;all&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.google_credentials_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.google_credentials_backend.html
new file mode 100644
index 0000000..389a886
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.google_credentials_backend.html
@@ -0,0 +1,156 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.google_credentials_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.google_credentials_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/google_credentials_backend.py">telemetry/internal/backends/google_credentials_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.form_based_credentials_backend.html">telemetry.internal.backends.form_based_credentials_backend</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend">GoogleCredentialsBackend</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend2">GoogleCredentialsBackend2</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GoogleCredentialsBackend">class <strong>GoogleCredentialsBackend</strong></a>(<a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend">GoogleCredentialsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><a name="GoogleCredentialsBackend-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+<dd><tt>Some&nbsp;sites&nbsp;have&nbsp;custom&nbsp;JS&nbsp;to&nbsp;log&nbsp;in.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GoogleCredentialsBackend2">class <strong>GoogleCredentialsBackend2</strong></a>(<a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend">GoogleCredentialsBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Google&nbsp;credential&nbsp;backend&nbsp;for&nbsp;google2&nbsp;credential.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend2">GoogleCredentialsBackend2</a></dd>
+<dd><a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend">GoogleCredentialsBackend</a></dd>
+<dd><a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>credentials_type</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.google_credentials_backend.html#GoogleCredentialsBackend">GoogleCredentialsBackend</a>:<br>
+<dl><dt><strong>logged_in_javascript</strong></dt>
+<dd><tt>Evaluates&nbsp;to&nbsp;true&nbsp;iff&nbsp;already&nbsp;logged&nbsp;in.</tt></dd>
+</dl>
+<dl><dt><strong>login_form_id</strong></dt>
+</dl>
+<dl><dt><strong>login_input_id</strong></dt>
+</dl>
+<dl><dt><strong>password_input_id</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><a name="GoogleCredentialsBackend2-IsAlreadyLoggedIn"><strong>IsAlreadyLoggedIn</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend2-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend2-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, action_runner, config)</dt><dd><tt>Logs&nbsp;in&nbsp;to&nbsp;a&nbsp;test&nbsp;account.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;if&nbsp;could&nbsp;not&nbsp;get&nbsp;credential&nbsp;information.</tt></dd></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend2-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab)</dt></dl>
+
+<dl><dt><a name="GoogleCredentialsBackend2-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.form_based_credentials_backend.html#FormBasedCredentialsBackend">telemetry.internal.backends.form_based_credentials_backend.FormBasedCredentialsBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>login_button_javascript</strong></dt>
+<dd><tt>Some&nbsp;sites&nbsp;have&nbsp;custom&nbsp;JS&nbsp;to&nbsp;log&nbsp;in.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.html
new file mode 100644
index 0000000..59435ea
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.html
@@ -0,0 +1,43 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.backends</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.backends</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/__init__.py">telemetry/internal/backends/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.android_app_backend.html">android_app_backend</a><br>
+<a href="telemetry.internal.backends.android_browser_backend_settings.html">android_browser_backend_settings</a><br>
+<a href="telemetry.internal.backends.android_command_line_backend.html">android_command_line_backend</a><br>
+<a href="telemetry.internal.backends.android_command_line_backend_unittest.html">android_command_line_backend_unittest</a><br>
+<a href="telemetry.internal.backends.app_backend.html">app_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.browser_backend.html">browser_backend</a><br>
+<a href="telemetry.internal.backends.browser_backend_unittest.html">browser_backend_unittest</a><br>
+<a href="telemetry.internal.backends.chrome.html"><strong>chrome</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.html"><strong>chrome_inspector</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.backends.codepen_credentials_backend.html">codepen_credentials_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.codepen_credentials_backend_unittest.html">codepen_credentials_backend_unittest</a><br>
+<a href="telemetry.internal.backends.facebook_credentials_backend.html">facebook_credentials_backend</a><br>
+<a href="telemetry.internal.backends.facebook_credentials_backend_unittest.html">facebook_credentials_backend_unittest</a><br>
+<a href="telemetry.internal.backends.form_based_credentials_backend.html">form_based_credentials_backend</a><br>
+<a href="telemetry.internal.backends.form_based_credentials_backend_unittest_base.html">form_based_credentials_backend_unittest_base</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.google_credentials_backend.html">google_credentials_backend</a><br>
+<a href="telemetry.internal.backends.google_credentials_backend_unittest.html">google_credentials_backend_unittest</a><br>
+<a href="telemetry.internal.backends.mandoline.html"><strong>mandoline</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.backends.remote.html"><strong>remote</strong>&nbsp;(package)</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android.html
new file mode 100644
index 0000000..f64be17
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android.html
@@ -0,0 +1,94 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.android</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.android</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/android.py">telemetry/internal/backends/mandoline/android.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="devil.android.apk_helper.html">devil.android.apk_helper</a><br>
+<a href="atexit.html">atexit</a><br>
+<a href="devil.base_error.html">devil.base_error</a><br>
+<a href="pylib.constants.html">pylib.constants</a><br>
+</td><td width="25%" valign=top><a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+<a href="devil.android.device_utils.html">devil.android.device_utils</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="signal.html">signal</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+<a href="threading.html">threading</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.android.html#AndroidShell">AndroidShell</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidShell">class <strong>AndroidShell</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Used&nbsp;to&nbsp;set&nbsp;up&nbsp;and&nbsp;run&nbsp;a&nbsp;given&nbsp;mojo&nbsp;shell&nbsp;binary&nbsp;on&nbsp;an&nbsp;Android&nbsp;device.<br>
+|config|&nbsp;is&nbsp;the&nbsp;mopy.config.Config&nbsp;for&nbsp;the&nbsp;build.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AndroidShell-InitShell"><strong>InitShell</strong></a>(self, device<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;adb&nbsp;as&nbsp;root,&nbsp;and&nbsp;installs&nbsp;the&nbsp;apk&nbsp;as&nbsp;needed.&nbsp;&nbsp;|device|&nbsp;is&nbsp;the&nbsp;target<br>
+device&nbsp;to&nbsp;run&nbsp;on,&nbsp;if&nbsp;multiple&nbsp;devices&nbsp;are&nbsp;connected.&nbsp;Returns&nbsp;0&nbsp;on&nbsp;success&nbsp;or<br>
+a&nbsp;non-zero&nbsp;exit&nbsp;code&nbsp;on&nbsp;a&nbsp;terminal&nbsp;failure.</tt></dd></dl>
+
+<dl><dt><a name="AndroidShell-ShowLogs"><strong>ShowLogs</strong></a>(self, stdout<font color="#909090">=&lt;open file '&lt;stdout&gt;', mode 'w'&gt;</font>)</dt><dd><tt>Displays&nbsp;the&nbsp;mojo&nbsp;shell&nbsp;logs&nbsp;and&nbsp;returns&nbsp;the&nbsp;process&nbsp;reading&nbsp;the&nbsp;logs.</tt></dd></dl>
+
+<dl><dt><a name="AndroidShell-StartActivity"><strong>StartActivity</strong></a>(self, activity_name, arguments, stdout, on_fifo_closed, temp_gdb_dir<font color="#909090">=None</font>)</dt><dd><tt>Starts&nbsp;the&nbsp;shell&nbsp;with&nbsp;the&nbsp;given&nbsp;|arguments|,&nbsp;directing&nbsp;output&nbsp;to&nbsp;|stdout|.<br>
+|on_fifo_closed|&nbsp;will&nbsp;be&nbsp;run&nbsp;if&nbsp;the&nbsp;FIFO&nbsp;can't&nbsp;be&nbsp;found&nbsp;or&nbsp;when&nbsp;it's&nbsp;closed.<br>
+|temp_gdb_dir|&nbsp;is&nbsp;set&nbsp;to&nbsp;a&nbsp;location&nbsp;with&nbsp;appropriate&nbsp;symlinks&nbsp;for&nbsp;gdb&nbsp;to<br>
+find&nbsp;when&nbsp;attached&nbsp;to&nbsp;the&nbsp;device's&nbsp;remote&nbsp;process&nbsp;on&nbsp;startup.</tt></dd></dl>
+
+<dl><dt><a name="AndroidShell-__init__"><strong>__init__</strong></a>(self, config, chrome_root)</dt></dl>
+
+<dl><dt><a name="AndroidShell-kill"><strong>kill</strong></a>(self)</dt><dd><tt>Stops&nbsp;the&nbsp;mojo&nbsp;shell;&nbsp;matches&nbsp;the&nbsp;Popen.kill&nbsp;method&nbsp;signature.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>LOGCAT_TAGS</strong> = ['AndroidHandler', 'MojoFileHelper', 'MojoMain', 'MojoShellActivity', 'MojoShellApplication', 'chromium']<br>
+<strong>MAPPING_PREFIX</strong> = '--map-origin='</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_backend.html
new file mode 100644
index 0000000..a65bf6f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_backend.html
@@ -0,0 +1,189 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.android_mandoline_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.android_mandoline_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/android_mandoline_backend.py">telemetry/internal/backends/mandoline/android_mandoline_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.android.html">telemetry.internal.backends.mandoline.android</a><br>
+<a href="telemetry.internal.platform.android_platform_backend.html">telemetry.internal.platform.android_platform_backend</a><br>
+<a href="telemetry.internal.backends.mandoline.config.html">telemetry.internal.backends.mandoline.config</a><br>
+</td><td width="25%" valign=top><a href="pylib.constants.html">pylib.constants</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html">telemetry.internal.backends.mandoline.mandoline_browser_backend</a><br>
+<a href="os.html">os</a><br>
+<a href="random.html">random</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="sys.html">sys</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.android_mandoline_backend.html#AndroidMandolineBackend">AndroidMandolineBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidMandolineBackend">class <strong>AndroidMandolineBackend</strong></a>(<a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;backend&nbsp;for&nbsp;controlling&nbsp;a&nbsp;mandoline&nbsp;browser&nbsp;instance&nbsp;running&nbsp;on<br>
+Android.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.mandoline.android_mandoline_backend.html#AndroidMandolineBackend">AndroidMandolineBackend</a></dd>
+<dd><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidMandolineBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-__init__"><strong>__init__</strong></a>(self, android_platform_backend, browser_options, target_arch, browser_type, build_path, package, chrome_root)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>activity</strong></dt>
+</dl>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>device</strong></dt>
+</dl>
+<dl><dt><strong>package</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>:<br>
+<dl><dt><a name="AndroidMandolineBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="AndroidMandolineBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="AndroidMandolineBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="AndroidMandolineBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_finder.html
new file mode 100644
index 0000000..6131199
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.android_mandoline_finder.html
@@ -0,0 +1,120 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.android_mandoline_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.android_mandoline_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/android_mandoline_finder.py">telemetry/internal/backends/mandoline/android_mandoline_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;android&nbsp;mandoline&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="telemetry.internal.backends.mandoline.android_mandoline_backend.html">telemetry.internal.backends.mandoline.android_mandoline_backend</a><br>
+<a href="devil.android.apk_helper.html">devil.android.apk_helper</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.android_mandoline_finder.html#PossibleAndroidMandolineBrowser">PossibleAndroidMandolineBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleAndroidMandolineBrowser">class <strong>PossibleAndroidMandolineBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;launchable&nbsp;android&nbsp;mandoline&nbsp;browser&nbsp;instance.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.mandoline.android_mandoline_finder.html#PossibleAndroidMandolineBrowser">PossibleAndroidMandolineBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleAndroidMandolineBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-HaveLocalAPK"><strong>HaveLocalAPK</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, finder_options, android_platform, build_path, local_apk)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleAndroidMandolineBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleAndroidMandolineBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Finds&nbsp;all&nbsp;the&nbsp;possible&nbsp;browsers&nbsp;to&nbsp;run&nbsp;on&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+The&nbsp;device&nbsp;is&nbsp;either&nbsp;the&nbsp;only&nbsp;device&nbsp;on&nbsp;the&nbsp;host&nbsp;platform,<br>
+or&nbsp;|finder_options|&nbsp;specifies&nbsp;a&nbsp;particular&nbsp;device.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_options)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(possible_browsers)</dt><dd><tt>Returns&nbsp;the&nbsp;newest&nbsp;possible&nbsp;browser.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.config.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.config.html
new file mode 100644
index 0000000..08beced
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.config.html
@@ -0,0 +1,112 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.config</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.config</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/config.py">telemetry/internal/backends/mandoline/config.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="ast.html">ast</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="platform.html">platform</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.config.html#Config">Config</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Config">class <strong>Config</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#Config">Config</a>&nbsp;contains&nbsp;a&nbsp;dictionary&nbsp;that&nbsp;species&nbsp;a&nbsp;build&nbsp;configuration.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Config-__init__"><strong>__init__</strong></a>(self, build_dir<font color="#909090">=None</font>, target_os<font color="#909090">=None</font>, target_cpu<font color="#909090">=None</font>, is_debug<font color="#909090">=None</font>, is_verbose<font color="#909090">=None</font>, apk_name<font color="#909090">='MojoRunner.apk'</font>)</dt><dd><tt>Function&nbsp;arguments&nbsp;take&nbsp;precedence&nbsp;over&nbsp;GN&nbsp;args&nbsp;and&nbsp;default&nbsp;values.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="Config-GetHostCPU"><strong>GetHostCPU</strong></a>()</dt></dl>
+
+<dl><dt><a name="Config-GetHostOS"><strong>GetHostOS</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>apk_name</strong></dt>
+<dd><tt>Name&nbsp;of&nbsp;the&nbsp;APK&nbsp;file&nbsp;to&nbsp;run</tt></dd>
+</dl>
+<dl><dt><strong>build_dir</strong></dt>
+<dd><tt>Build&nbsp;directory&nbsp;path.</tt></dd>
+</dl>
+<dl><dt><strong>dcheck_always_on</strong></dt>
+<dd><tt>DCHECK&nbsp;and&nbsp;MOJO_DCHECK&nbsp;are&nbsp;fatal&nbsp;even&nbsp;in&nbsp;release&nbsp;builds</tt></dd>
+</dl>
+<dl><dt><strong>is_asan</strong></dt>
+<dd><tt>Is&nbsp;ASAN&nbsp;build?</tt></dd>
+</dl>
+<dl><dt><strong>is_debug</strong></dt>
+<dd><tt>Is&nbsp;Debug&nbsp;build?</tt></dd>
+</dl>
+<dl><dt><strong>is_verbose</strong></dt>
+<dd><tt>Should&nbsp;print&nbsp;additional&nbsp;logging&nbsp;information?</tt></dd>
+</dl>
+<dl><dt><strong>target_cpu</strong></dt>
+<dd><tt>CPU&nbsp;arch&nbsp;of&nbsp;the&nbsp;build/test&nbsp;target.</tt></dd>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>OS&nbsp;of&nbsp;the&nbsp;build/test&nbsp;target.</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>ARCH_ARM</strong> = 'arm'</dl>
+
+<dl><dt><strong>ARCH_X64</strong> = 'x64'</dl>
+
+<dl><dt><strong>ARCH_X86</strong> = 'x86'</dl>
+
+<dl><dt><strong>OS_ANDROID</strong> = 'android'</dl>
+
+<dl><dt><strong>OS_CHROMEOS</strong> = 'chromeos'</dl>
+
+<dl><dt><strong>OS_LINUX</strong> = 'linux'</dl>
+
+<dl><dt><strong>OS_MAC</strong> = 'mac'</dl>
+
+<dl><dt><strong>OS_WINDOWS</strong> = 'windows'</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_backend.html
new file mode 100644
index 0000000..a587529
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_backend.html
@@ -0,0 +1,180 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.desktop_mandoline_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.desktop_mandoline_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/desktop_mandoline_backend.py">telemetry/internal/backends/mandoline/desktop_mandoline_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+<a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html">telemetry.internal.backends.mandoline.mandoline_browser_backend</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="shutil.html">shutil</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.desktop_mandoline_backend.html#DesktopMandolineBackend">DesktopMandolineBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DesktopMandolineBackend">class <strong>DesktopMandolineBackend</strong></a>(<a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;backend&nbsp;for&nbsp;controlling&nbsp;a&nbsp;locally-executed&nbsp;browser&nbsp;instance,&nbsp;on&nbsp;Linux<br>
+or&nbsp;Windows.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.mandoline.desktop_mandoline_backend.html#DesktopMandolineBackend">DesktopMandolineBackend</a></dd>
+<dd><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DesktopMandolineBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-__init__"><strong>__init__</strong></a>(self, desktop_platform_backend, browser_options, executable, browser_directory)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>:<br>
+<dl><dt><a name="DesktopMandolineBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">telemetry.internal.backends.mandoline.mandoline_browser_backend.MandolineBrowserBackend</a>:<br>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="DesktopMandolineBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="DesktopMandolineBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="DesktopMandolineBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_finder.html
new file mode 100644
index 0000000..5b881aa
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.desktop_mandoline_finder.html
@@ -0,0 +1,117 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.desktop_mandoline_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.desktop_mandoline_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/desktop_mandoline_finder.py">telemetry/internal/backends/mandoline/desktop_mandoline_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;desktop&nbsp;mandoline&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">telemetry.internal.browser.browser</a><br>
+<a href="telemetry.internal.platform.desktop_device.html">telemetry.internal.platform.desktop_device</a><br>
+<a href="telemetry.internal.backends.mandoline.desktop_mandoline_backend.html">telemetry.internal.backends.mandoline.desktop_mandoline_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.desktop_mandoline_finder.html#PossibleDesktopMandolineBrowser">PossibleDesktopMandolineBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleDesktopMandolineBrowser">class <strong>PossibleDesktopMandolineBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;desktop&nbsp;mandoline&nbsp;browser&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.mandoline.desktop_mandoline_finder.html#PossibleDesktopMandolineBrowser">PossibleDesktopMandolineBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleDesktopMandolineBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, finder_options, executable, browser_directory)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleDesktopMandolineBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleDesktopMandolineBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-CanPossiblyHandlePath"><strong>CanPossiblyHandlePath</strong></a>(target_path)</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Finds&nbsp;all&nbsp;the&nbsp;desktop&nbsp;mandoline&nbsp;browsers&nbsp;available&nbsp;on&nbsp;this&nbsp;machine.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(_)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(possible_browsers)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.html
new file mode 100644
index 0000000..05bed22
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.html
@@ -0,0 +1,33 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.backends.mandoline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.mandoline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/__init__.py">telemetry/internal/backends/mandoline/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.android.html">android</a><br>
+<a href="telemetry.internal.backends.mandoline.android_mandoline_backend.html">android_mandoline_backend</a><br>
+<a href="telemetry.internal.backends.mandoline.android_mandoline_finder.html">android_mandoline_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.config.html">config</a><br>
+<a href="telemetry.internal.backends.mandoline.desktop_mandoline_backend.html">desktop_mandoline_backend</a><br>
+<a href="telemetry.internal.backends.mandoline.desktop_mandoline_finder.html">desktop_mandoline_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.desktop_mandoline_finder_unittest.html">desktop_mandoline_finder_unittest</a><br>
+<a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html">mandoline_browser_backend</a><br>
+<a href="telemetry.internal.backends.mandoline.paths.html">paths</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.mandoline_browser_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.mandoline_browser_backend.html
new file mode 100644
index 0000000..cfb90e8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.mandoline_browser_backend.html
@@ -0,0 +1,175 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.mandoline_browser_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.mandoline_browser_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/mandoline_browser_backend.py">telemetry/internal/backends/mandoline/mandoline_browser_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+<a href="telemetry.internal.backends.chrome_inspector.devtools_client_backend.html">telemetry.internal.backends.chrome_inspector.devtools_client_backend</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+<a href="logging.html">logging</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.tab_list_backend.html">telemetry.internal.backends.chrome.tab_list_backend</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>(<a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">MandolineBrowserBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MandolineBrowserBackend">class <strong>MandolineBrowserBackend</strong></a>(<a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;abstract&nbsp;class&nbsp;for&nbsp;mandoline&nbsp;browser&nbsp;backends.&nbsp;Provides&nbsp;basic<br>
+functionality&nbsp;once&nbsp;a&nbsp;remote-debugger&nbsp;port&nbsp;has&nbsp;been&nbsp;established.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.mandoline.mandoline_browser_backend.html#MandolineBrowserBackend">MandolineBrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a></dd>
+<dd><a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MandolineBrowserBackend-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetBrowserStartupArgs"><strong>GetBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetProcessName"><strong>GetProcessName</strong></a>(self, cmd_line)</dt><dd><tt>Returns&nbsp;a&nbsp;user-friendly&nbsp;name&nbsp;for&nbsp;the&nbsp;process&nbsp;of&nbsp;the&nbsp;given&nbsp;|cmd_line|.</tt></dd></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetReplayBrowserStartupArgs"><strong>GetReplayBrowserStartupArgs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-HasBrowserFinishedLaunching"><strong>HasBrowserFinishedLaunching</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-__init__"><strong>__init__</strong></a>(self, platform_backend, browser_options)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_directory</strong></dt>
+</dl>
+<dl><dt><strong>devtools_client</strong></dt>
+</dl>
+<dl><dt><strong>profile_directory</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>supports_tracing</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><a name="MandolineBrowserBackend-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-IsBrowserRunning"><strong>IsBrowserRunning</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-SetBrowser"><strong>SetBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-Start"><strong>Start</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-StartTracing"><strong>StartTracing</strong></a>(self, trace_options, custom_categories<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-StopTracing"><strong>StopTracing</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-UploadLogsToCloudStorage"><strong>UploadLogsToCloudStorage</strong></a>(self)</dt><dd><tt>Uploading&nbsp;log&nbsp;files&nbsp;produce&nbsp;by&nbsp;this&nbsp;browser&nbsp;instance&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Check&nbsp;supports_uploading_logs&nbsp;before&nbsp;calling&nbsp;this&nbsp;method.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.browser_backend.html#BrowserBackend">telemetry.internal.backends.browser_backend.BrowserBackend</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>profiling_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>should_ignore_certificate_errors</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;this&nbsp;browser&nbsp;backend&nbsp;supports&nbsp;extensions.</tt></dd>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_uploading_logs</strong></dt>
+</dl>
+<dl><dt><strong>tab_list_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_mode</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><a name="MandolineBrowserBackend-SetApp"><strong>SetApp</strong></a>(self, app)</dt></dl>
+
+<dl><dt><a name="MandolineBrowserBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.backends.app_backend.html#AppBackend">telemetry.internal.backends.app_backend.AppBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app</strong></dt>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>pid</strong></dt>
+</dl>
+<dl><dt><strong>platform_backend</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.paths.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.paths.html
new file mode 100644
index 0000000..839c9c1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.mandoline.paths.html
@@ -0,0 +1,64 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.mandoline.paths</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.mandoline.html"><font color="#ffffff">mandoline</font></a>.paths</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/mandoline/paths.py">telemetry/internal/backends/mandoline/paths.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.mandoline.paths.html#Paths">Paths</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Paths">class <strong>Paths</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;commonly&nbsp;used&nbsp;paths<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Paths-RelPath"><strong>RelPath</strong></a>(self, path)</dt><dd><tt>Returns&nbsp;the&nbsp;given&nbsp;path,&nbsp;relative&nbsp;to&nbsp;the&nbsp;current&nbsp;directory.</tt></dd></dl>
+
+<dl><dt><a name="Paths-SrcRelPath"><strong>SrcRelPath</strong></a>(self, path)</dt><dd><tt>Returns&nbsp;the&nbsp;given&nbsp;path,&nbsp;relative&nbsp;to&nbsp;self.<strong>src_root</strong>.</tt></dd></dl>
+
+<dl><dt><a name="Paths-__init__"><strong>__init__</strong></a>(self, config, chrome_root)</dt><dd><tt>Generate&nbsp;paths&nbsp;to&nbsp;binary&nbsp;artifacts&nbsp;from&nbsp;a&nbsp;Config&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.html
new file mode 100644
index 0000000..76f7e88
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.html
@@ -0,0 +1,26 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.backends.remote</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.remote</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/remote/__init__.py">telemetry/internal/backends/remote/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.remote.trybot_browser_finder.html">trybot_browser_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.remote.trybot_browser_finder_unittest.html">trybot_browser_finder_unittest</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.trybot_browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.trybot_browser_finder.html
new file mode 100644
index 0000000..9e5b6c5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.backends.remote.trybot_browser_finder.html
@@ -0,0 +1,200 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.backends.remote.trybot_browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.backends.html"><font color="#ffffff">backends</font></a>.<a href="telemetry.internal.backends.remote.html"><font color="#ffffff">remote</font></a>.trybot_browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/backends/remote/trybot_browser_finder.py">telemetry/internal/backends/remote/trybot_browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;perf&nbsp;trybots&nbsp;that&nbsp;can&nbsp;run&nbsp;telemetry&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="json.html">json</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">telemetry.internal.browser.possible_browser</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.trybot_device.html">telemetry.internal.platform.trybot_device</a><br>
+<a href="urllib2.html">urllib2</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.remote.trybot_browser_finder.html#TrybotError">TrybotError</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.backends.remote.trybot_browser_finder.html#PossibleTrybotBrowser">PossibleTrybotBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleTrybotBrowser">class <strong>PossibleTrybotBrowser</strong></a>(<a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;script&nbsp;that&nbsp;sends&nbsp;a&nbsp;job&nbsp;to&nbsp;a&nbsp;trybot.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.remote.trybot_browser_finder.html#PossibleTrybotBrowser">PossibleTrybotBrowser</a></dd>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleTrybotBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt><dd><tt>Sends&nbsp;a&nbsp;tryjob&nbsp;to&nbsp;a&nbsp;perf&nbsp;trybot.<br>
+&nbsp;<br>
+This&nbsp;creates&nbsp;a&nbsp;branch,&nbsp;telemetry-tryjob,&nbsp;switches&nbsp;to&nbsp;that&nbsp;branch,&nbsp;edits<br>
+the&nbsp;bisect&nbsp;config,&nbsp;commits&nbsp;it,&nbsp;uploads&nbsp;the&nbsp;CL&nbsp;to&nbsp;rietveld,&nbsp;and&nbsp;runs&nbsp;a<br>
+tryjob&nbsp;on&nbsp;the&nbsp;given&nbsp;bot.</tt></dd></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, _)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><a name="PossibleTrybotBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleTrybotBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">telemetry.internal.browser.possible_browser.PossibleBrowser</a>:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TrybotError">class <strong>TrybotError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.backends.remote.trybot_browser_finder.html#TrybotError">TrybotError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TrybotError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TrybotError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TrybotError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TrybotError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TrybotError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TrybotError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TrybotError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TrybotError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanFindAvailableBrowsers"><strong>CanFindAvailableBrowsers</strong></a>()</dt></dl>
+ <dl><dt><a name="-FindAllAvailableBrowsers"><strong>FindAllAvailableBrowsers</strong></a>(finder_options, device)</dt><dd><tt>Find&nbsp;all&nbsp;perf&nbsp;trybots&nbsp;on&nbsp;tryserver.chromium.perf.</tt></dd></dl>
+ <dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(finder_options)</dt></dl>
+ <dl><dt><a name="-SelectDefaultBrowser"><strong>SelectDefaultBrowser</strong></a>(_)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BLINK_CONFIG_FILENAME</strong> = 'Tools/run-perf-test.cfg'<br>
+<strong>CHROMIUM_CONFIG_FILENAME</strong> = 'tools/run-perf-test.cfg'<br>
+<strong>ERROR</strong> = 2<br>
+<strong>EXCLUDED_BOTS</strong> = set(['android_arm64_perf_bisect_builder', 'android_perf_bisect_builder', 'linux_perf_bisect_builder', 'linux_perf_bisector', 'linux_perf_tester', 'mac_perf_bisect_builder', ...])<br>
+<strong>INCLUDE_BOTS</strong> = ['trybot-all', 'trybot-all-win', 'trybot-all-mac', 'trybot-all-linux', 'trybot-all-android']<br>
+<strong>NO_CHANGES</strong> = 1<br>
+<strong>SUCCESS</strong> = 0</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser.html
new file mode 100644
index 0000000..53aff2e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser.html
@@ -0,0 +1,189 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser.py">telemetry/internal/browser/browser.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.app.html">telemetry.internal.app</a><br>
+<a href="telemetry.internal.backends.browser_backend.html">telemetry.internal.backends.browser_backend</a><br>
+<a href="telemetry.internal.browser.browser_credentials.html">telemetry.internal.browser.browser_credentials</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+</td><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.browser.extension_dict.html">telemetry.internal.browser.extension_dict</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.core.profiling_controller.html">telemetry.core.profiling_controller</a><br>
+<a href="sys.html">sys</a><br>
+<a href="telemetry.internal.browser.tab_list.html">telemetry.internal.browser.tab_list</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser.html#Browser">Browser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Browser">class <strong>Browser</strong></a>(<a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;running&nbsp;browser&nbsp;instance&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;in&nbsp;a&nbsp;limited&nbsp;way.<br>
+&nbsp;<br>
+To&nbsp;create&nbsp;a&nbsp;browser&nbsp;instance,&nbsp;use&nbsp;browser_finder.FindBrowser.<br>
+&nbsp;<br>
+Be&nbsp;sure&nbsp;to&nbsp;clean&nbsp;up&nbsp;after&nbsp;yourself&nbsp;by&nbsp;calling&nbsp;<a href="#Browser-Close">Close</a>()&nbsp;when&nbsp;you&nbsp;are&nbsp;done&nbsp;with<br>
+the&nbsp;browser.&nbsp;Or&nbsp;better&nbsp;yet:<br>
+&nbsp;&nbsp;browser_to_create&nbsp;=&nbsp;FindBrowser(options)<br>
+&nbsp;&nbsp;with&nbsp;browser_to_create.Create(options)&nbsp;as&nbsp;browser:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;...&nbsp;do&nbsp;all&nbsp;your&nbsp;operations&nbsp;on&nbsp;browser&nbsp;here<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser.html#Browser">Browser</a></dd>
+<dd><a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Browser-Close"><strong>Close</strong></a>(self)</dt><dd><tt>Closes&nbsp;this&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="Browser-DumpMemory"><strong>DumpMemory</strong></a>(self, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="Browser-GetStackTrace"><strong>GetStackTrace</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Browser-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Browser-GetSystemInfo"><strong>GetSystemInfo</strong></a>(self)</dt><dd><tt>Returns&nbsp;low-level&nbsp;information&nbsp;about&nbsp;the&nbsp;system,&nbsp;if&nbsp;available.<br>
+&nbsp;<br>
+See&nbsp;the&nbsp;documentation&nbsp;of&nbsp;the&nbsp;SystemInfo&nbsp;class&nbsp;for&nbsp;more&nbsp;details.</tt></dd></dl>
+
+<dl><dt><a name="Browser-SetMemoryPressureNotificationsSuppressed"><strong>SetMemoryPressureNotificationsSuppressed</strong></a>(self, suppressed, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="Browser-SimulateMemoryPressureNotification"><strong>SimulateMemoryPressureNotification</strong></a>(self, pressure_level, timeout<font color="#909090">=90</font>)</dt></dl>
+
+<dl><dt><a name="Browser-__init__"><strong>__init__</strong></a>(self, backend, platform_backend, credentials_path)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>cpu_stats</strong></dt>
+<dd><tt>Returns&nbsp;a&nbsp;dict&nbsp;of&nbsp;cpu&nbsp;statistics&nbsp;for&nbsp;the&nbsp;system.<br>
+{&nbsp;'Browser':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Gpu':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Renderer':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'CpuProcessTime':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'TotalTime':&nbsp;T<br>
+&nbsp;&nbsp;}<br>
+}<br>
+Any&nbsp;of&nbsp;the&nbsp;above&nbsp;keys&nbsp;may&nbsp;be&nbsp;missing&nbsp;on&nbsp;a&nbsp;per-platform&nbsp;basis.</tt></dd>
+</dl>
+<dl><dt><strong>extensions</strong></dt>
+</dl>
+<dl><dt><strong>foreground_tab</strong></dt>
+</dl>
+<dl><dt><strong>memory_stats</strong></dt>
+<dd><tt>Returns&nbsp;a&nbsp;dict&nbsp;of&nbsp;memory&nbsp;statistics&nbsp;for&nbsp;the&nbsp;browser:<br>
+{&nbsp;'Browser':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Gpu':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'Renderer':&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VM':&nbsp;R,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'VMPeak':&nbsp;S,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSize':&nbsp;T,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'WorkingSetSizePeak':&nbsp;U,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'ProportionalSetSize':&nbsp;V,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'PrivateDirty':&nbsp;W<br>
+&nbsp;&nbsp;},<br>
+&nbsp;&nbsp;'SystemCommitCharge':&nbsp;X,<br>
+&nbsp;&nbsp;'SystemTotalPhysicalMemory':&nbsp;Y,<br>
+&nbsp;&nbsp;'ProcessCount':&nbsp;Z,<br>
+}<br>
+Any&nbsp;of&nbsp;the&nbsp;above&nbsp;keys&nbsp;may&nbsp;be&nbsp;missing&nbsp;on&nbsp;a&nbsp;per-platform&nbsp;basis.</tt></dd>
+</dl>
+<dl><dt><strong>profiling_controller</strong></dt>
+</dl>
+<dl><dt><strong>supports_cpu_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_extensions</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_dumping</strong></dt>
+</dl>
+<dl><dt><strong>supports_memory_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_overriding_memory_pressure_notifications</strong></dt>
+</dl>
+<dl><dt><strong>supports_power_metrics</strong></dt>
+</dl>
+<dl><dt><strong>supports_system_info</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<dl><dt><strong>tabs</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><a name="Browser-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Browser-__exit__"><strong>__exit__</strong></a>(self, *args)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.html#App">telemetry.internal.app.App</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_credentials.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_credentials.html
new file mode 100644
index 0000000..5933292
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_credentials.html
@@ -0,0 +1,147 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser_credentials</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser_credentials</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser_credentials.py">telemetry/internal/browser/browser_credentials.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.codepen_credentials_backend.html">telemetry.internal.backends.codepen_credentials_backend</a><br>
+<a href="telemetry.internal.backends.facebook_credentials_backend.html">telemetry.internal.backends.facebook_credentials_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.google_credentials_backend.html">telemetry.internal.backends.google_credentials_backend</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_credentials.html#BrowserCredentials">BrowserCredentials</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_credentials.html#CredentialsError">CredentialsError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserCredentials">class <strong>BrowserCredentials</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="BrowserCredentials-Add"><strong>Add</strong></a>(self, credentials_type, data)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-AddBackend"><strong>AddBackend</strong></a>(self, backend)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-CanLogin"><strong>CanLogin</strong></a>(self, credentials_type)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-IsLoggedIn"><strong>IsLoggedIn</strong></a>(self, credentials_type)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-LoginNeeded"><strong>LoginNeeded</strong></a>(self, tab, credentials_type)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-LoginNoLongerNeeded"><strong>LoginNoLongerNeeded</strong></a>(self, tab, credentials_type)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-WarnIfMissingCredentials"><strong>WarnIfMissingCredentials</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="BrowserCredentials-__init__"><strong>__init__</strong></a>(self, backends<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>credentials_path</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CredentialsError">class <strong>CredentialsError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Error&nbsp;that&nbsp;can&nbsp;be&nbsp;thrown&nbsp;when&nbsp;logging&nbsp;in.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser_credentials.html#CredentialsError">CredentialsError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="CredentialsError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#CredentialsError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="CredentialsError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="CredentialsError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="CredentialsError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#CredentialsError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="CredentialsError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder.html
new file mode 100644
index 0000000..77c1c86
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder.html
@@ -0,0 +1,80 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser_finder.py">telemetry/internal/browser/browser_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;browsers&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="telemetry.internal.backends.mandoline.android_mandoline_finder.html">telemetry.internal.backends.mandoline.android_mandoline_finder</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">telemetry.internal.browser.browser_finder_exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.cros_browser_finder.html">telemetry.internal.backends.chrome.cros_browser_finder</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.backends.chrome.desktop_browser_finder.html">telemetry.internal.backends.chrome.desktop_browser_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.backends.mandoline.desktop_mandoline_finder.html">telemetry.internal.backends.mandoline.desktop_mandoline_finder</a><br>
+<a href="telemetry.internal.platform.device_finder.html">telemetry.internal.platform.device_finder</a><br>
+<a href="telemetry.internal.backends.chrome.ios_browser_finder.html">telemetry.internal.backends.chrome.ios_browser_finder</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="operator.html">operator</a><br>
+<a href="telemetry.internal.backends.remote.trybot_browser_finder.html">telemetry.internal.backends.remote.trybot_browser_finder</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindAllBrowserTypes"><strong>FindAllBrowserTypes</strong></a>(options)</dt></dl>
+ <dl><dt><a name="-FindBrowser"><strong>FindBrowser</strong></a>(*args, **kwargs)</dt><dd><tt>Finds&nbsp;the&nbsp;best&nbsp;PossibleBrowser&nbsp;object&nbsp;given&nbsp;a&nbsp;BrowserOptions&nbsp;object.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;A&nbsp;BrowserOptions&nbsp;object.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;PossibleBrowser&nbsp;object.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;BrowserFinderException:&nbsp;Options&nbsp;improperly&nbsp;set,&nbsp;or&nbsp;an&nbsp;error&nbsp;occurred.</tt></dd></dl>
+ <dl><dt><a name="-GetAllAvailableBrowserTypes"><strong>GetAllAvailableBrowserTypes</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;browser&nbsp;types.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;options:&nbsp;A&nbsp;BrowserOptions&nbsp;object.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;browser&nbsp;type&nbsp;strings.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;BrowserFinderException:&nbsp;Options&nbsp;are&nbsp;improperly&nbsp;set,&nbsp;or&nbsp;an&nbsp;error&nbsp;occurred.</tt></dd></dl>
+ <dl><dt><a name="-GetAllAvailableBrowsers"><strong>GetAllAvailableBrowsers</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;browsers&nbsp;on&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;options:&nbsp;A&nbsp;BrowserOptions&nbsp;object.<br>
+&nbsp;&nbsp;device:&nbsp;The&nbsp;target&nbsp;device,&nbsp;which&nbsp;can&nbsp;be&nbsp;None.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;browser&nbsp;instances.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;BrowserFinderException:&nbsp;Options&nbsp;are&nbsp;improperly&nbsp;set,&nbsp;or&nbsp;an&nbsp;error&nbsp;occurred.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BROWSER_FINDERS</strong> = [&lt;module 'telemetry.internal.backends.chrome.desk...rnal/backends/chrome/desktop_browser_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.chrome.andr...rnal/backends/chrome/android_browser_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.chrome.cros...nternal/backends/chrome/cros_browser_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.chrome.ios_...internal/backends/chrome/ios_browser_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.remote.tryb...ernal/backends/remote/trybot_browser_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.mandoline.d...backends/mandoline/desktop_mandoline_finder.pyc'&gt;, &lt;module 'telemetry.internal.backends.mandoline.a...backends/mandoline/android_mandoline_finder.pyc'&gt;]</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder_exceptions.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder_exceptions.html
new file mode 100644
index 0000000..f796cc5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_finder_exceptions.html
@@ -0,0 +1,149 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser_finder_exceptions</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser_finder_exceptions</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser_finder_exceptions.py">telemetry/internal/browser/browser_finder_exceptions.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_finder_exceptions.html#BrowserFinderException">BrowserFinderException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_finder_exceptions.html#BrowserTypeRequiredException">BrowserTypeRequiredException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserFinderException">class <strong>BrowserFinderException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser_finder_exceptions.html#BrowserFinderException">BrowserFinderException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="BrowserFinderException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#BrowserFinderException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="BrowserFinderException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserFinderException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserFinderException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserFinderException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserFinderException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserTypeRequiredException">class <strong>BrowserTypeRequiredException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser_finder_exceptions.html#BrowserTypeRequiredException">BrowserTypeRequiredException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="BrowserTypeRequiredException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#BrowserTypeRequiredException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="BrowserTypeRequiredException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#BrowserTypeRequiredException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="BrowserTypeRequiredException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_info.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_info.html
new file mode 100644
index 0000000..0d5c7f4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_info.html
@@ -0,0 +1,65 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser_info</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser_info</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser_info.py">telemetry/internal/browser/browser_info.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_info.html#BrowserInfo">BrowserInfo</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserInfo">class <strong>BrowserInfo</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;wrapper&nbsp;around&nbsp;browser&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;that&nbsp;allows&nbsp;looking&nbsp;up&nbsp;infos&nbsp;of&nbsp;the<br>
+browser.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="BrowserInfo-HasDiagonalScrollingSupport"><strong>HasDiagonalScrollingSupport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserInfo-HasFlingGestureSupport"><strong>HasFlingGestureSupport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserInfo-HasRepeatableSynthesizeScrollGesture"><strong>HasRepeatableSynthesizeScrollGesture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserInfo-HasWebGLSupport"><strong>HasWebGLSupport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserInfo-__init__"><strong>__init__</strong></a>(self, browser)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_options.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_options.html
new file mode 100644
index 0000000..c31e571
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.browser_options.html
@@ -0,0 +1,245 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.browser_options</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.browser_options</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/browser_options.py">telemetry/internal/browser/browser_options.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">telemetry.internal.browser.browser_finder_exceptions</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="copy.html">copy</a><br>
+<a href="telemetry.internal.platform.device_finder.html">telemetry.internal.platform.device_finder</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="net_configs.html">net_configs</a><br>
+<a href="optparse.html">optparse</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.profile_types.html">telemetry.internal.browser.profile_types</a><br>
+<a href="telemetry.internal.platform.profiler.profiler_finder.html">telemetry.internal.platform.profiler.profiler_finder</a><br>
+<a href="shlex.html">shlex</a><br>
+<a href="socket.html">socket</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_options.html#ChromeBrowserOptions">ChromeBrowserOptions</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_options.html#CrosBrowserOptions">CrosBrowserOptions</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="optparse.html#Values">optparse.Values</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.browser_options.html#BrowserFinderOptions">BrowserFinderOptions</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserFinderOptions">class <strong>BrowserFinderOptions</strong></a>(<a href="optparse.html#Values">optparse.Values</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Options&nbsp;to&nbsp;be&nbsp;used&nbsp;for&nbsp;discovering&nbsp;a&nbsp;browser.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="BrowserFinderOptions-AppendExtraBrowserArgs"><strong>AppendExtraBrowserArgs</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-Copy"><strong>Copy</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-CreateParser"><strong>CreateParser</strong></a>(self, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-MergeDefaultValues"><strong>MergeDefaultValues</strong></a>(self, defaults)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-__init__"><strong>__init__</strong></a>(self, browser_type<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="optparse.html#Values">optparse.Values</a>:<br>
+<dl><dt><a name="BrowserFinderOptions-__cmp__"><strong>__cmp__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-ensure_value"><strong>ensure_value</strong></a>(self, attr, value)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-read_file"><strong>read_file</strong></a>(self, filename, mode<font color="#909090">='careful'</font>)</dt></dl>
+
+<dl><dt><a name="BrowserFinderOptions-read_module"><strong>read_module</strong></a>(self, modname, mode<font color="#909090">='careful'</font>)</dt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserOptions">class <strong>BrowserOptions</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Options&nbsp;to&nbsp;be&nbsp;used&nbsp;for&nbsp;launching&nbsp;a&nbsp;browser.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="BrowserOptions-AppendExtraBrowserArgs"><strong>AppendExtraBrowserArgs</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="BrowserOptions-IsCrosBrowserOptions"><strong>IsCrosBrowserOptions</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserOptions-UpdateFromParseResults"><strong>UpdateFromParseResults</strong></a>(self, finder_options)</dt><dd><tt>Copies&nbsp;our&nbsp;options&nbsp;from&nbsp;finder_options</tt></dd></dl>
+
+<dl><dt><a name="BrowserOptions-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserOptions-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="BrowserOptions-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>browser_startup_timeout</strong></dt>
+</dl>
+<dl><dt><strong>extra_browser_args</strong></dt>
+</dl>
+<dl><dt><strong>finder_options</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeBrowserOptions">class <strong>ChromeBrowserOptions</strong></a>(<a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Chrome-specific&nbsp;browser&nbsp;options.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser_options.html#ChromeBrowserOptions">ChromeBrowserOptions</a></dd>
+<dd><a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ChromeBrowserOptions-__init__"><strong>__init__</strong></a>(self, br_options)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><a name="ChromeBrowserOptions-AppendExtraBrowserArgs"><strong>AppendExtraBrowserArgs</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserOptions-IsCrosBrowserOptions"><strong>IsCrosBrowserOptions</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ChromeBrowserOptions-UpdateFromParseResults"><strong>UpdateFromParseResults</strong></a>(self, finder_options)</dt><dd><tt>Copies&nbsp;our&nbsp;options&nbsp;from&nbsp;finder_options</tt></dd></dl>
+
+<dl><dt><a name="ChromeBrowserOptions-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><a name="ChromeBrowserOptions-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>browser_startup_timeout</strong></dt>
+</dl>
+<dl><dt><strong>extra_browser_args</strong></dt>
+</dl>
+<dl><dt><strong>finder_options</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrosBrowserOptions">class <strong>CrosBrowserOptions</strong></a>(<a href="telemetry.internal.browser.browser_options.html#ChromeBrowserOptions">ChromeBrowserOptions</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>ChromeOS-specific&nbsp;browser&nbsp;options.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.browser_options.html#CrosBrowserOptions">CrosBrowserOptions</a></dd>
+<dd><a href="telemetry.internal.browser.browser_options.html#ChromeBrowserOptions">ChromeBrowserOptions</a></dd>
+<dd><a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrosBrowserOptions-IsCrosBrowserOptions"><strong>IsCrosBrowserOptions</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosBrowserOptions-__init__"><strong>__init__</strong></a>(self, br_options)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><a name="CrosBrowserOptions-AppendExtraBrowserArgs"><strong>AppendExtraBrowserArgs</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="CrosBrowserOptions-UpdateFromParseResults"><strong>UpdateFromParseResults</strong></a>(self, finder_options)</dt><dd><tt>Copies&nbsp;our&nbsp;options&nbsp;from&nbsp;finder_options</tt></dd></dl>
+
+<dl><dt><a name="CrosBrowserOptions-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><a name="CrosBrowserOptions-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.browser_options.html#BrowserOptions">BrowserOptions</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>browser_startup_timeout</strong></dt>
+</dl>
+<dl><dt><strong>extra_browser_args</strong></dt>
+</dl>
+<dl><dt><strong>finder_options</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CreateChromeBrowserOptions"><strong>CreateChromeBrowserOptions</strong></a>(br_options)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_dict.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_dict.html
new file mode 100644
index 0000000..ea84cab
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_dict.html
@@ -0,0 +1,70 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.extension_dict</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.extension_dict</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/extension_dict.py">telemetry/internal/browser/extension_dict.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.extension_to_load.html">telemetry.internal.browser.extension_to_load</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.extension_dict.html#ExtensionDict">ExtensionDict</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionDict">class <strong>ExtensionDict</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Dictionary&nbsp;of&nbsp;ExtensionPage&nbsp;instances,&nbsp;with&nbsp;extension_id&nbsp;as&nbsp;key.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ExtensionDict-GetByExtensionId"><strong>GetByExtensionId</strong></a>(self, extension_id)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;extensions&nbsp;given&nbsp;an&nbsp;extension&nbsp;id.&nbsp;This&nbsp;is&nbsp;useful&nbsp;for<br>
+connecting&nbsp;to&nbsp;built-in&nbsp;apps&nbsp;and&nbsp;component&nbsp;extensions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionDict-__contains__"><strong>__contains__</strong></a>(self, load_extension)</dt><dd><tt>Checks&nbsp;if&nbsp;this&nbsp;ExtensionToLoad&nbsp;instance&nbsp;has&nbsp;been&nbsp;loaded</tt></dd></dl>
+
+<dl><dt><a name="ExtensionDict-__getitem__"><strong>__getitem__</strong></a>(self, load_extension)</dt><dd><tt>Given&nbsp;an&nbsp;ExtensionToLoad&nbsp;instance,&nbsp;returns&nbsp;the&nbsp;corresponding<br>
+ExtensionPage&nbsp;instance.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionDict-__init__"><strong>__init__</strong></a>(self, extension_backend)</dt></dl>
+
+<dl><dt><a name="ExtensionDict-keys"><strong>keys</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_page.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_page.html
new file mode 100644
index 0000000..adbac7d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_page.html
@@ -0,0 +1,250 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.extension_page</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.extension_page</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/extension_page.py">telemetry/internal/browser/extension_page.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.extension_page.html#ExtensionPage">ExtensionPage</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionPage">class <strong>ExtensionPage</strong></a>(<a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;extension&nbsp;page&nbsp;in&nbsp;the&nbsp;browser<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.extension_page.html#ExtensionPage">ExtensionPage</a></dd>
+<dd><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ExtensionPage-Reload"><strong>Reload</strong></a>(self)</dt><dd><tt>Reloading&nbsp;an&nbsp;extension&nbsp;page&nbsp;is&nbsp;used&nbsp;as&nbsp;a&nbsp;workaround&nbsp;for&nbsp;an&nbsp;extension<br>
+binding&nbsp;bug&nbsp;for&nbsp;old&nbsp;versions&nbsp;of&nbsp;Chrome&nbsp;(crbug.com/263162).&nbsp;After&nbsp;Navigate<br>
+returns,&nbsp;we&nbsp;are&nbsp;guaranteed&nbsp;that&nbsp;the&nbsp;inspected&nbsp;page&nbsp;is&nbsp;in&nbsp;the&nbsp;correct&nbsp;state.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-__init__"><strong>__init__</strong></a>(self, inspector_backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><a name="ExtensionPage-CloseConnections"><strong>CloseConnections</strong></a>(self)</dt><dd><tt>Closes&nbsp;all&nbsp;TCP&nbsp;sockets&nbsp;held&nbsp;open&nbsp;by&nbsp;the&nbsp;browser.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException&nbsp;if&nbsp;the&nbsp;tab&nbsp;is&nbsp;not&nbsp;alive.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-EnableAllContexts"><strong>EnableAllContexts</strong></a>(self)</dt><dd><tt>Enable&nbsp;all&nbsp;contexts&nbsp;in&nbsp;a&nbsp;page.&nbsp;Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;available&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(self, expr, timeout<font color="#909090">=90</font>)</dt><dd><tt>Evalutes&nbsp;expr&nbsp;in&nbsp;JavaScript&nbsp;and&nbsp;returns&nbsp;the&nbsp;JSONized&nbsp;result.<br>
+&nbsp;<br>
+Consider&nbsp;using&nbsp;ExecuteJavaScript&nbsp;for&nbsp;cases&nbsp;where&nbsp;the&nbsp;result&nbsp;of&nbsp;the<br>
+expression&nbsp;is&nbsp;not&nbsp;needed.<br>
+&nbsp;<br>
+If&nbsp;evaluation&nbsp;throws&nbsp;in&nbsp;JavaScript,&nbsp;a&nbsp;Python&nbsp;EvaluateException&nbsp;will<br>
+be&nbsp;raised.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;result&nbsp;of&nbsp;the&nbsp;evaluation&nbsp;cannot&nbsp;be&nbsp;JSONized,&nbsp;then&nbsp;an<br>
+EvaluationException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-EvaluateJavaScriptInContext">EvaluateJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-EvaluateJavaScriptInContext"><strong>EvaluateJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(self, statement, timeout<font color="#909090">=90</font>)</dt><dd><tt>Executes&nbsp;statement&nbsp;in&nbsp;JavaScript.&nbsp;Does&nbsp;not&nbsp;return&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;statement&nbsp;failed&nbsp;to&nbsp;evaluate,&nbsp;EvaluateException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-ExecuteJavaScriptInContext">ExecuteJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-ExecuteJavaScriptInContext"><strong>ExecuteJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-GetUrl"><strong>GetUrl</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;to&nbsp;which&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;connected.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-GetWebviewContexts"><strong>GetWebviewContexts</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;webview&nbsp;contexts&nbsp;within&nbsp;the&nbsp;current&nbsp;inspector&nbsp;backend.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;objects&nbsp;representing&nbsp;the&nbsp;webview&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-HasReachedQuiescence"><strong>HasReachedQuiescence</strong></a>(self)</dt><dd><tt>Determine&nbsp;whether&nbsp;the&nbsp;page&nbsp;has&nbsp;reached&nbsp;quiescence&nbsp;after&nbsp;loading.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;2&nbsp;seconds&nbsp;have&nbsp;passed&nbsp;since&nbsp;last&nbsp;resource&nbsp;received,&nbsp;false<br>
+&nbsp;&nbsp;otherwise.<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-IsAlive"><strong>IsAlive</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;still&nbsp;operating&nbsp;normally.<br>
+&nbsp;<br>
+Since&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;function&nbsp;asynchronously,&nbsp;this&nbsp;method&nbsp;does&nbsp;not&nbsp;guarantee<br>
+that&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;will&nbsp;still&nbsp;be&nbsp;alive&nbsp;at&nbsp;any&nbsp;point&nbsp;in&nbsp;the&nbsp;future.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;opearting&nbsp;normally.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;url.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-StartTimelineRecording"><strong>StartTimelineRecording</strong></a>(self)</dt><dd><tt>Starts&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-StopTimelineRecording"><strong>StopTimelineRecording</strong></a>(self)</dt><dd><tt>Stops&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-SynthesizeScrollGesture"><strong>SynthesizeScrollGesture</strong></a>(self, x<font color="#909090">=100</font>, y<font color="#909090">=800</font>, xDistance<font color="#909090">=0</font>, yDistance<font color="#909090">=-500</font>, xOverscroll<font color="#909090">=None</font>, yOverscroll<font color="#909090">=None</font>, preventFling<font color="#909090">=True</font>, speed<font color="#909090">=None</font>, gestureSourceType<font color="#909090">=None</font>, repeatCount<font color="#909090">=None</font>, repeatDelayMs<font color="#909090">=None</font>, interactionMarkerName<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command&nbsp;that&nbsp;causes&nbsp;a&nbsp;repeatable&nbsp;browser&nbsp;driven&nbsp;scroll.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x:&nbsp;X&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;y:&nbsp;Y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;xDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;X&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;left).<br>
+&nbsp;&nbsp;yDistance:&nbsp;Ddistance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;up).<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;X&nbsp;axis.<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis.<br>
+&nbsp;&nbsp;preventFling:&nbsp;Prevents&nbsp;a&nbsp;fling&nbsp;gesture.<br>
+&nbsp;&nbsp;speed:&nbsp;Swipe&nbsp;speed&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+&nbsp;&nbsp;gestureSourceType:&nbsp;Which&nbsp;type&nbsp;of&nbsp;input&nbsp;events&nbsp;to&nbsp;be&nbsp;generated.<br>
+&nbsp;&nbsp;repeatCount:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;repeats&nbsp;beyond&nbsp;the&nbsp;first&nbsp;scroll.<br>
+&nbsp;&nbsp;repeatDelayMs:&nbsp;Number&nbsp;of&nbsp;milliseconds&nbsp;delay&nbsp;between&nbsp;each&nbsp;repeat.<br>
+&nbsp;&nbsp;interactionMarkerName:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;interaction&nbsp;markers&nbsp;to&nbsp;generate.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-WaitForDocumentReadyStateToBeComplete"><strong>WaitForDocumentReadyStateToBeComplete</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;finish&nbsp;loading.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-WaitForDocumentReadyStateToBeInteractiveOrBetter"><strong>WaitForDocumentReadyStateToBeInteractiveOrBetter</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;be&nbsp;interactive.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-WaitForJavaScriptExpression"><strong>WaitForJavaScriptExpression</strong></a>(self, expr, timeout, dump_page_state_on_timeout<font color="#909090">=True</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;given&nbsp;JavaScript&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;robust&nbsp;against&nbsp;any&nbsp;given&nbsp;Evaluation&nbsp;timing&nbsp;out.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;expr:&nbsp;The&nbsp;expression&nbsp;to&nbsp;evaluate.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;&nbsp;dump_page_state_on_timeout:&nbsp;Whether&nbsp;to&nbsp;provide&nbsp;additional&nbsp;information&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page&nbsp;state&nbsp;if&nbsp;a&nbsp;TimeoutException&nbsp;is&nbsp;thrown.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException:&nbsp;On&nbsp;a&nbsp;timeout.<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#ExtensionPage-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPage-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;navigation&nbsp;to&nbsp;complete.<br>
+&nbsp;<br>
+The&nbsp;current&nbsp;page&nbsp;is&nbsp;expect&nbsp;to&nbsp;be&nbsp;in&nbsp;a&nbsp;navigation.<br>
+This&nbsp;function&nbsp;returns&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;complete&nbsp;or&nbsp;when<br>
+the&nbsp;timeout&nbsp;has&nbsp;been&nbsp;exceeded.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+<dd><tt>Return&nbsp;the&nbsp;unique&nbsp;id&nbsp;string&nbsp;for&nbsp;this&nbsp;tab&nbsp;object.</tt></dd>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+<dl><dt><strong>timeline_model</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-UrlToExtensionId"><strong>UrlToExtensionId</strong></a>(url)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_to_load.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_to_load.html
new file mode 100644
index 0000000..33162f9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.extension_to_load.html
@@ -0,0 +1,195 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.extension_to_load</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.extension_to_load</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/extension_to_load.py">telemetry/internal/browser/extension_to_load.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.crx_id.html">telemetry.internal.backends.chrome.crx_id</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.extension_to_load.html#ExtensionToLoad">ExtensionToLoad</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.extension_to_load.html#ExtensionPathNonExistentException">ExtensionPathNonExistentException</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.browser.extension_to_load.html#MissingPublicKeyException">MissingPublicKeyException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionPathNonExistentException">class <strong>ExtensionPathNonExistentException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.extension_to_load.html#ExtensionPathNonExistentException">ExtensionPathNonExistentException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ExtensionPathNonExistentException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ExtensionPathNonExistentException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ExtensionPathNonExistentException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ExtensionPathNonExistentException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ExtensionPathNonExistentException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExtensionToLoad">class <strong>ExtensionToLoad</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ExtensionToLoad-__init__"><strong>__init__</strong></a>(self, path, browser_type, is_component<font color="#909090">=False</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>extension_id</strong></dt>
+<dd><tt>Unique&nbsp;extension&nbsp;id&nbsp;of&nbsp;this&nbsp;extension.</tt></dd>
+</dl>
+<dl><dt><strong>is_component</strong></dt>
+<dd><tt>Whether&nbsp;this&nbsp;extension&nbsp;should&nbsp;be&nbsp;loaded&nbsp;as&nbsp;a&nbsp;component&nbsp;extension.</tt></dd>
+</dl>
+<dl><dt><strong>local_path</strong></dt>
+<dd><tt>Path&nbsp;to&nbsp;extension&nbsp;destination&nbsp;directory,&nbsp;for&nbsp;remote&nbsp;instances&nbsp;of<br>
+chrome</tt></dd>
+</dl>
+<dl><dt><strong>path</strong></dt>
+<dd><tt>Path&nbsp;to&nbsp;extension&nbsp;source&nbsp;directory.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MissingPublicKeyException">class <strong>MissingPublicKeyException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.extension_to_load.html#MissingPublicKeyException">MissingPublicKeyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MissingPublicKeyException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MissingPublicKeyException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MissingPublicKeyException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingPublicKeyException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MissingPublicKeyException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.html
new file mode 100644
index 0000000..cb39678
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.html
@@ -0,0 +1,46 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.browser</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.browser</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/__init__.py">telemetry/internal/browser/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser.html">browser</a><br>
+<a href="telemetry.internal.browser.browser_credentials.html">browser_credentials</a><br>
+<a href="telemetry.internal.browser.browser_credentials_unittest.html">browser_credentials_unittest</a><br>
+<a href="telemetry.internal.browser.browser_finder.html">browser_finder</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">browser_finder_exceptions</a><br>
+<a href="telemetry.internal.browser.browser_info.html">browser_info</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.browser_options.html">browser_options</a><br>
+<a href="telemetry.internal.browser.browser_options_unittest.html">browser_options_unittest</a><br>
+<a href="telemetry.internal.browser.browser_unittest.html">browser_unittest</a><br>
+<a href="telemetry.internal.browser.extension_dict.html">extension_dict</a><br>
+<a href="telemetry.internal.browser.extension_page.html">extension_page</a><br>
+<a href="telemetry.internal.browser.extension_to_load.html">extension_to_load</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.extension_unittest.html">extension_unittest</a><br>
+<a href="telemetry.internal.browser.possible_browser.html">possible_browser</a><br>
+<a href="telemetry.internal.browser.profile_types.html">profile_types</a><br>
+<a href="telemetry.internal.browser.profile_types_unittest.html">profile_types_unittest</a><br>
+<a href="telemetry.internal.browser.tab.html">tab</a><br>
+<a href="telemetry.internal.browser.tab_list.html">tab_list</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.tab_unittest.html">tab_unittest</a><br>
+<a href="telemetry.internal.browser.user_agent.html">user_agent</a><br>
+<a href="telemetry.internal.browser.user_agent_unittest.html">user_agent_unittest</a><br>
+<a href="telemetry.internal.browser.web_contents.html">web_contents</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.possible_browser.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.possible_browser.html
new file mode 100644
index 0000000..2f77507
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.possible_browser.html
@@ -0,0 +1,97 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.possible_browser</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.possible_browser</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/possible_browser.py">telemetry/internal/browser/possible_browser.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.app.possible_app.html">telemetry.internal.app.possible_app</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">PossibleBrowser</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PossibleBrowser">class <strong>PossibleBrowser</strong></a>(<a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;browser&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled.<br>
+&nbsp;<br>
+Call&nbsp;<a href="#PossibleBrowser-Create">Create</a>()&nbsp;to&nbsp;launch&nbsp;the&nbsp;browser&nbsp;and&nbsp;begin&nbsp;manipulating&nbsp;it..<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.possible_browser.html#PossibleBrowser">PossibleBrowser</a></dd>
+<dd><a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PossibleBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-RunRemote"><strong>RunRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, credentials_path)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-SupportsOptions"><strong>SupportsOptions</strong></a>(self, finder_options)</dt><dd><tt>Tests&nbsp;for&nbsp;extension&nbsp;support.</tt></dd></dl>
+
+<dl><dt><a name="PossibleBrowser-UpdateExecutableIfNeeded"><strong>UpdateExecutableIfNeeded</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-__init__"><strong>__init__</strong></a>(self, browser_type, target_os, supports_tab_control)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PossibleBrowser-last_modification_time"><strong>last_modification_time</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser_type</strong></dt>
+</dl>
+<dl><dt><strong>supports_tab_control</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.app.possible_app.html#PossibleApp">telemetry.internal.app.possible_app.PossibleApp</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>app_type</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>target_os</strong></dt>
+<dd><tt>Target&nbsp;OS,&nbsp;the&nbsp;app&nbsp;will&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.profile_types.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.profile_types.html
new file mode 100644
index 0000000..2311ceb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.profile_types.html
@@ -0,0 +1,46 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.profile_types</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.profile_types</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/profile_types.py">telemetry/internal/browser/profile_types.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetProfileDir"><strong>GetProfileDir</strong></a>(profile_type)</dt><dd><tt>Given&nbsp;a&nbsp;|profile_type|&nbsp;(as&nbsp;returned&nbsp;by&nbsp;<a href="#-GetProfileTypes">GetProfileTypes</a>()),&nbsp;return&nbsp;the<br>
+directory&nbsp;to&nbsp;use&nbsp;for&nbsp;that&nbsp;profile&nbsp;or&nbsp;None&nbsp;if&nbsp;the&nbsp;profile&nbsp;doesn't&nbsp;need&nbsp;a<br>
+profile&nbsp;directory&nbsp;(e.g.&nbsp;using&nbsp;the&nbsp;browser&nbsp;default&nbsp;profile).</tt></dd></dl>
+ <dl><dt><a name="-GetProfileTypes"><strong>GetProfileTypes</strong></a>()</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;all&nbsp;command&nbsp;line&nbsp;options&nbsp;that&nbsp;can&nbsp;be&nbsp;specified&nbsp;for<br>
+profile&nbsp;type.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BASE_PROFILE_TYPES</strong> = ['clean', 'default']<br>
+<strong>PROFILE_TYPE_MAPPING</strong> = {'power_user': 'extension_webrequest', 'typical_user': 'content_scripts1'}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab.html
new file mode 100644
index 0000000..2133051
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab.html
@@ -0,0 +1,395 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.tab</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.tab</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/tab.py">telemetry/internal/browser/tab.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.image_processing.video.html">telemetry.internal.image_processing.video</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.web_contents.html">telemetry.internal.browser.web_contents</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.tab.html#Tab">Tab</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Tab">class <strong>Tab</strong></a>(<a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;tab&nbsp;in&nbsp;the&nbsp;browser<br>
+&nbsp;<br>
+The&nbsp;important&nbsp;parts&nbsp;of&nbsp;the&nbsp;<a href="#Tab">Tab</a>&nbsp;object&nbsp;are&nbsp;in&nbsp;the&nbsp;runtime&nbsp;and&nbsp;page&nbsp;objects.<br>
+E.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Navigates&nbsp;the&nbsp;tab&nbsp;to&nbsp;a&nbsp;given&nbsp;url.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tab.<a href="#Tab-Navigate">Navigate</a>('<a href="http://www.google.com/">http://www.google.com/</a>')<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Evaluates&nbsp;1+1&nbsp;in&nbsp;the&nbsp;tab's&nbsp;JavaScript&nbsp;context.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tab.Evaluate('1+1')<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.browser.tab.html#Tab">Tab</a></dd>
+<dd><a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Tab-Activate"><strong>Activate</strong></a>(self)</dt><dd><tt>Brings&nbsp;this&nbsp;tab&nbsp;to&nbsp;the&nbsp;foreground&nbsp;asynchronously.<br>
+&nbsp;<br>
+Not&nbsp;all&nbsp;browsers&nbsp;or&nbsp;browser&nbsp;versions&nbsp;support&nbsp;this&nbsp;method.<br>
+Be&nbsp;sure&nbsp;to&nbsp;check&nbsp;browser.supports_tab_control.<br>
+&nbsp;<br>
+Please&nbsp;note:&nbsp;this&nbsp;is&nbsp;asynchronous.&nbsp;There&nbsp;is&nbsp;a&nbsp;delay&nbsp;between&nbsp;this&nbsp;call<br>
+and&nbsp;the&nbsp;page's&nbsp;documentVisibilityState&nbsp;becoming&nbsp;'visible',&nbsp;and&nbsp;yet&nbsp;more<br>
+delay&nbsp;until&nbsp;the&nbsp;actual&nbsp;tab&nbsp;is&nbsp;visible&nbsp;to&nbsp;the&nbsp;user.&nbsp;None&nbsp;of&nbsp;these&nbsp;delays<br>
+are&nbsp;included&nbsp;in&nbsp;this&nbsp;call.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;devtools_client_backend.TabNotFoundError<br>
+&nbsp;&nbsp;tab_list_backend.TabUnexpectedResponseException</tt></dd></dl>
+
+<dl><dt><a name="Tab-ClearCache"><strong>ClearCache</strong></a>(self, force)</dt><dd><tt>Clears&nbsp;the&nbsp;browser's&nbsp;networking&nbsp;related&nbsp;disk,&nbsp;memory&nbsp;and&nbsp;other&nbsp;caches.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;force:&nbsp;Iff&nbsp;true,&nbsp;navigates&nbsp;to&nbsp;about:blank&nbsp;which&nbsp;destroys&nbsp;the&nbsp;previous<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;renderer,&nbsp;ensuring&nbsp;that&nbsp;even&nbsp;"live"&nbsp;resources&nbsp;in&nbsp;the&nbsp;memory&nbsp;cache&nbsp;are<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;cleared.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException<br>
+&nbsp;&nbsp;errors.DeviceUnresponsiveError</tt></dd></dl>
+
+<dl><dt><a name="Tab-ClearHighlight"><strong>ClearHighlight</strong></a>(self, color)</dt><dd><tt>Clears&nbsp;a&nbsp;highlight&nbsp;of&nbsp;the&nbsp;given&nbsp;bitmap.RgbaColor.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-Close"><strong>Close</strong></a>(self)</dt><dd><tt>Closes&nbsp;this&nbsp;tab.<br>
+&nbsp;<br>
+Not&nbsp;all&nbsp;browsers&nbsp;or&nbsp;browser&nbsp;versions&nbsp;support&nbsp;this&nbsp;method.<br>
+Be&nbsp;sure&nbsp;to&nbsp;check&nbsp;browser.supports_tab_control.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError<br>
+&nbsp;&nbsp;devtools_client_backend.TabNotFoundError<br>
+&nbsp;&nbsp;tab_list_backend.TabUnexpectedResponseException<br>
+&nbsp;&nbsp;exceptions.TimeoutException</tt></dd></dl>
+
+<dl><dt><a name="Tab-CollectGarbage"><strong>CollectGarbage</strong></a>(self)</dt><dd><tt>Forces&nbsp;a&nbsp;garbage&nbsp;collection.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-GetCookieByName"><strong>GetCookieByName</strong></a>(self, name, timeout<font color="#909090">=60</font>)</dt><dd><tt>Returns&nbsp;the&nbsp;value&nbsp;of&nbsp;the&nbsp;cookie&nbsp;by&nbsp;the&nbsp;given&nbsp;|name|.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-Highlight"><strong>Highlight</strong></a>(self, color)</dt><dd><tt>Synchronously&nbsp;highlights&nbsp;entire&nbsp;tab&nbsp;contents&nbsp;with&nbsp;the&nbsp;given&nbsp;RgbaColor.<br>
+&nbsp;<br>
+TODO(tonyg):&nbsp;It&nbsp;is&nbsp;possible&nbsp;that&nbsp;the&nbsp;z-index&nbsp;hack&nbsp;here&nbsp;might&nbsp;not&nbsp;work&nbsp;for<br>
+all&nbsp;pages.&nbsp;If&nbsp;this&nbsp;happens,&nbsp;DevTools&nbsp;also&nbsp;provides&nbsp;a&nbsp;method&nbsp;for&nbsp;this.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-Screenshot"><strong>Screenshot</strong></a>(self, timeout<font color="#909090">=60</font>)</dt><dd><tt>Capture&nbsp;a&nbsp;screenshot&nbsp;of&nbsp;the&nbsp;tab's&nbsp;contents.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;telemetry.core.Bitmap.<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps, highlight_bitmap<font color="#909090">=RgbaColor(r=222, g=100, b=13, a=255)</font>)</dt><dd><tt>Starts&nbsp;capturing&nbsp;video&nbsp;of&nbsp;the&nbsp;tab's&nbsp;contents.<br>
+&nbsp;<br>
+This&nbsp;works&nbsp;by&nbsp;flashing&nbsp;the&nbsp;entire&nbsp;tab&nbsp;contents&nbsp;to&nbsp;a&nbsp;arbitrary&nbsp;color&nbsp;and&nbsp;then<br>
+starting&nbsp;video&nbsp;recording.&nbsp;When&nbsp;the&nbsp;frames&nbsp;are&nbsp;processed,&nbsp;we&nbsp;can&nbsp;look&nbsp;for<br>
+that&nbsp;flash&nbsp;as&nbsp;the&nbsp;content&nbsp;bounds.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;min_bitrate_mbps:&nbsp;The&nbsp;minimum&nbsp;caputre&nbsp;bitrate&nbsp;in&nbsp;MegaBits&nbsp;Per&nbsp;Second.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;platform&nbsp;is&nbsp;free&nbsp;to&nbsp;deliver&nbsp;a&nbsp;higher&nbsp;bitrate&nbsp;if&nbsp;it&nbsp;can&nbsp;do&nbsp;so<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;without&nbsp;increasing&nbsp;overhead.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException<br>
+&nbsp;&nbsp;ValueError:&nbsp;If&nbsp;the&nbsp;required&nbsp;|min_bitrate_mbps|&nbsp;can't&nbsp;be&nbsp;achieved.</tt></dd></dl>
+
+<dl><dt><a name="Tab-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt><dd><tt>Stops&nbsp;recording&nbsp;video&nbsp;of&nbsp;the&nbsp;tab's&nbsp;contents.<br>
+&nbsp;<br>
+This&nbsp;looks&nbsp;for&nbsp;the&nbsp;initial&nbsp;color&nbsp;flash&nbsp;in&nbsp;the&nbsp;first&nbsp;frame&nbsp;to&nbsp;establish&nbsp;the<br>
+tab&nbsp;content&nbsp;boundaries&nbsp;and&nbsp;then&nbsp;omits&nbsp;all&nbsp;frames&nbsp;displaying&nbsp;the&nbsp;flash.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;video:&nbsp;A&nbsp;video&nbsp;object&nbsp;which&nbsp;is&nbsp;a&nbsp;telemetry.core.Video</tt></dd></dl>
+
+<dl><dt><a name="Tab-__init__"><strong>__init__</strong></a>(self, inspector_backend, tab_list_backend, browser)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser</strong></dt>
+<dd><tt>The&nbsp;browser&nbsp;in&nbsp;which&nbsp;this&nbsp;tab&nbsp;resides.</tt></dd>
+</dl>
+<dl><dt><strong>dom_stats</strong></dt>
+<dd><tt>A&nbsp;dictionary&nbsp;populated&nbsp;with&nbsp;measured&nbsp;DOM&nbsp;statistics.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;dictionary&nbsp;contains:<br>
+{<br>
+&nbsp;&nbsp;'document_count':&nbsp;integer,<br>
+&nbsp;&nbsp;'node_count':&nbsp;integer,<br>
+&nbsp;&nbsp;'event_listener_count':&nbsp;integer<br>
+}<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;inspector_memory.InspectorMemoryException<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>screenshot_supported</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;the&nbsp;browser&nbsp;instance&nbsp;is&nbsp;capable&nbsp;of&nbsp;capturing&nbsp;screenshots.</tt></dd>
+</dl>
+<dl><dt><strong>url</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;of&nbsp;the&nbsp;tab,&nbsp;as&nbsp;reported&nbsp;by&nbsp;devtools.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;devtools_http.DevToolsClientConnectionError</tt></dd>
+</dl>
+<dl><dt><strong>video_capture_supported</strong></dt>
+<dd><tt>True&nbsp;if&nbsp;the&nbsp;browser&nbsp;instance&nbsp;is&nbsp;capable&nbsp;of&nbsp;capturing&nbsp;video.</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><a name="Tab-CloseConnections"><strong>CloseConnections</strong></a>(self)</dt><dd><tt>Closes&nbsp;all&nbsp;TCP&nbsp;sockets&nbsp;held&nbsp;open&nbsp;by&nbsp;the&nbsp;browser.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException&nbsp;if&nbsp;the&nbsp;tab&nbsp;is&nbsp;not&nbsp;alive.</tt></dd></dl>
+
+<dl><dt><a name="Tab-EnableAllContexts"><strong>EnableAllContexts</strong></a>(self)</dt><dd><tt>Enable&nbsp;all&nbsp;contexts&nbsp;in&nbsp;a&nbsp;page.&nbsp;Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;available&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(self, expr, timeout<font color="#909090">=90</font>)</dt><dd><tt>Evalutes&nbsp;expr&nbsp;in&nbsp;JavaScript&nbsp;and&nbsp;returns&nbsp;the&nbsp;JSONized&nbsp;result.<br>
+&nbsp;<br>
+Consider&nbsp;using&nbsp;ExecuteJavaScript&nbsp;for&nbsp;cases&nbsp;where&nbsp;the&nbsp;result&nbsp;of&nbsp;the<br>
+expression&nbsp;is&nbsp;not&nbsp;needed.<br>
+&nbsp;<br>
+If&nbsp;evaluation&nbsp;throws&nbsp;in&nbsp;JavaScript,&nbsp;a&nbsp;Python&nbsp;EvaluateException&nbsp;will<br>
+be&nbsp;raised.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;result&nbsp;of&nbsp;the&nbsp;evaluation&nbsp;cannot&nbsp;be&nbsp;JSONized,&nbsp;then&nbsp;an<br>
+EvaluationException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-EvaluateJavaScriptInContext">EvaluateJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-EvaluateJavaScriptInContext"><strong>EvaluateJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(self, statement, timeout<font color="#909090">=90</font>)</dt><dd><tt>Executes&nbsp;statement&nbsp;in&nbsp;JavaScript.&nbsp;Does&nbsp;not&nbsp;return&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;statement&nbsp;failed&nbsp;to&nbsp;evaluate,&nbsp;EvaluateException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-ExecuteJavaScriptInContext">ExecuteJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-ExecuteJavaScriptInContext"><strong>ExecuteJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-GetUrl"><strong>GetUrl</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;to&nbsp;which&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;connected.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="Tab-GetWebviewContexts"><strong>GetWebviewContexts</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;webview&nbsp;contexts&nbsp;within&nbsp;the&nbsp;current&nbsp;inspector&nbsp;backend.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;objects&nbsp;representing&nbsp;the&nbsp;webview&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="Tab-HasReachedQuiescence"><strong>HasReachedQuiescence</strong></a>(self)</dt><dd><tt>Determine&nbsp;whether&nbsp;the&nbsp;page&nbsp;has&nbsp;reached&nbsp;quiescence&nbsp;after&nbsp;loading.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;2&nbsp;seconds&nbsp;have&nbsp;passed&nbsp;since&nbsp;last&nbsp;resource&nbsp;received,&nbsp;false<br>
+&nbsp;&nbsp;otherwise.<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-IsAlive"><strong>IsAlive</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;still&nbsp;operating&nbsp;normally.<br>
+&nbsp;<br>
+Since&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;function&nbsp;asynchronously,&nbsp;this&nbsp;method&nbsp;does&nbsp;not&nbsp;guarantee<br>
+that&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;will&nbsp;still&nbsp;be&nbsp;alive&nbsp;at&nbsp;any&nbsp;point&nbsp;in&nbsp;the&nbsp;future.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;<a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>&nbsp;is&nbsp;opearting&nbsp;normally.</tt></dd></dl>
+
+<dl><dt><a name="Tab-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;url.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-StartTimelineRecording"><strong>StartTimelineRecording</strong></a>(self)</dt><dd><tt>Starts&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-StopTimelineRecording"><strong>StopTimelineRecording</strong></a>(self)</dt><dd><tt>Stops&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-SynthesizeScrollGesture"><strong>SynthesizeScrollGesture</strong></a>(self, x<font color="#909090">=100</font>, y<font color="#909090">=800</font>, xDistance<font color="#909090">=0</font>, yDistance<font color="#909090">=-500</font>, xOverscroll<font color="#909090">=None</font>, yOverscroll<font color="#909090">=None</font>, preventFling<font color="#909090">=True</font>, speed<font color="#909090">=None</font>, gestureSourceType<font color="#909090">=None</font>, repeatCount<font color="#909090">=None</font>, repeatDelayMs<font color="#909090">=None</font>, interactionMarkerName<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command&nbsp;that&nbsp;causes&nbsp;a&nbsp;repeatable&nbsp;browser&nbsp;driven&nbsp;scroll.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x:&nbsp;X&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;y:&nbsp;Y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;xDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;X&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;left).<br>
+&nbsp;&nbsp;yDistance:&nbsp;Ddistance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;up).<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;X&nbsp;axis.<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis.<br>
+&nbsp;&nbsp;preventFling:&nbsp;Prevents&nbsp;a&nbsp;fling&nbsp;gesture.<br>
+&nbsp;&nbsp;speed:&nbsp;Swipe&nbsp;speed&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+&nbsp;&nbsp;gestureSourceType:&nbsp;Which&nbsp;type&nbsp;of&nbsp;input&nbsp;events&nbsp;to&nbsp;be&nbsp;generated.<br>
+&nbsp;&nbsp;repeatCount:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;repeats&nbsp;beyond&nbsp;the&nbsp;first&nbsp;scroll.<br>
+&nbsp;&nbsp;repeatDelayMs:&nbsp;Number&nbsp;of&nbsp;milliseconds&nbsp;delay&nbsp;between&nbsp;each&nbsp;repeat.<br>
+&nbsp;&nbsp;interactionMarkerName:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;interaction&nbsp;markers&nbsp;to&nbsp;generate.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="Tab-WaitForDocumentReadyStateToBeComplete"><strong>WaitForDocumentReadyStateToBeComplete</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;finish&nbsp;loading.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-WaitForDocumentReadyStateToBeInteractiveOrBetter"><strong>WaitForDocumentReadyStateToBeInteractiveOrBetter</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;be&nbsp;interactive.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-WaitForJavaScriptExpression"><strong>WaitForJavaScriptExpression</strong></a>(self, expr, timeout, dump_page_state_on_timeout<font color="#909090">=True</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;given&nbsp;JavaScript&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;robust&nbsp;against&nbsp;any&nbsp;given&nbsp;Evaluation&nbsp;timing&nbsp;out.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;expr:&nbsp;The&nbsp;expression&nbsp;to&nbsp;evaluate.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;&nbsp;dump_page_state_on_timeout:&nbsp;Whether&nbsp;to&nbsp;provide&nbsp;additional&nbsp;information&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page&nbsp;state&nbsp;if&nbsp;a&nbsp;TimeoutException&nbsp;is&nbsp;thrown.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException:&nbsp;On&nbsp;a&nbsp;timeout.<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#Tab-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="Tab-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;navigation&nbsp;to&nbsp;complete.<br>
+&nbsp;<br>
+The&nbsp;current&nbsp;page&nbsp;is&nbsp;expect&nbsp;to&nbsp;be&nbsp;in&nbsp;a&nbsp;navigation.<br>
+This&nbsp;function&nbsp;returns&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;complete&nbsp;or&nbsp;when<br>
+the&nbsp;timeout&nbsp;has&nbsp;been&nbsp;exceeded.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.browser.web_contents.html#WebContents">telemetry.internal.browser.web_contents.WebContents</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+<dd><tt>Return&nbsp;the&nbsp;unique&nbsp;id&nbsp;string&nbsp;for&nbsp;this&nbsp;tab&nbsp;object.</tt></dd>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+<dl><dt><strong>timeline_model</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEFAULT_TAB_TIMEOUT</strong> = 60</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab_list.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab_list.html
new file mode 100644
index 0000000..cccfea5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.tab_list.html
@@ -0,0 +1,64 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.tab_list</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.tab_list</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/tab_list.py">telemetry/internal/browser/tab_list.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.tab_list.html#TabList">TabList</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabList">class <strong>TabList</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TabList-GetTabById"><strong>GetTabById</strong></a>(self, identifier)</dt><dd><tt>The&nbsp;identifier&nbsp;of&nbsp;a&nbsp;tab&nbsp;can&nbsp;be&nbsp;accessed&nbsp;with&nbsp;tab.id.</tt></dd></dl>
+
+<dl><dt><a name="TabList-New"><strong>New</strong></a>(self, timeout<font color="#909090">=300</font>)</dt></dl>
+
+<dl><dt><a name="TabList-__getitem__"><strong>__getitem__</strong></a>(self, index)</dt></dl>
+
+<dl><dt><a name="TabList-__init__"><strong>__init__</strong></a>(self, tab_list_backend)</dt></dl>
+
+<dl><dt><a name="TabList-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabList-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.user_agent.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.user_agent.html
new file mode 100644
index 0000000..cdc875e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.user_agent.html
@@ -0,0 +1,34 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.user_agent</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.user_agent</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/user_agent.py">telemetry/internal/browser/user_agent.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetChromeUserAgentArgumentFromType"><strong>GetChromeUserAgentArgumentFromType</strong></a>(user_agent_type)</dt><dd><tt>Returns&nbsp;a&nbsp;chrome&nbsp;user&nbsp;agent&nbsp;based&nbsp;on&nbsp;a&nbsp;user&nbsp;agent&nbsp;type.<br>
+This&nbsp;is&nbsp;derived&nbsp;from:<br>
+https://developers.google.com/chrome/mobile/docs/user-agent</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>UA_TYPE_MAPPING</strong> = {'desktop': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) A...TML, like Gecko) Chrome/40.0.2194.2 Safari/537.36', 'mobile': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus ...ke Gecko) Chrome/40.0.2194.2 Mobile Safari/535.36', 'tablet': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus ...TML, like Gecko) Chrome/40.0.2194.2 Safari/535.36', 'tablet_10_inch': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus ...TML, like Gecko) Chrome/40.0.2194.2 Safari/535.36'}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.browser.web_contents.html b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.web_contents.html
new file mode 100644
index 0000000..0a7a99e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.browser.web_contents.html
@@ -0,0 +1,238 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.browser.web_contents</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.browser.html"><font color="#ffffff">browser</font></a>.web_contents</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/browser/web_contents.py">telemetry/internal/browser/web_contents.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.browser.web_contents.html#WebContents">WebContents</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebContents">class <strong>WebContents</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;web&nbsp;contents&nbsp;in&nbsp;the&nbsp;browser<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="WebContents-CloseConnections"><strong>CloseConnections</strong></a>(self)</dt><dd><tt>Closes&nbsp;all&nbsp;TCP&nbsp;sockets&nbsp;held&nbsp;open&nbsp;by&nbsp;the&nbsp;browser.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException&nbsp;if&nbsp;the&nbsp;tab&nbsp;is&nbsp;not&nbsp;alive.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-EnableAllContexts"><strong>EnableAllContexts</strong></a>(self)</dt><dd><tt>Enable&nbsp;all&nbsp;contexts&nbsp;in&nbsp;a&nbsp;page.&nbsp;Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;available&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(self, expr, timeout<font color="#909090">=90</font>)</dt><dd><tt>Evalutes&nbsp;expr&nbsp;in&nbsp;JavaScript&nbsp;and&nbsp;returns&nbsp;the&nbsp;JSONized&nbsp;result.<br>
+&nbsp;<br>
+Consider&nbsp;using&nbsp;ExecuteJavaScript&nbsp;for&nbsp;cases&nbsp;where&nbsp;the&nbsp;result&nbsp;of&nbsp;the<br>
+expression&nbsp;is&nbsp;not&nbsp;needed.<br>
+&nbsp;<br>
+If&nbsp;evaluation&nbsp;throws&nbsp;in&nbsp;JavaScript,&nbsp;a&nbsp;Python&nbsp;EvaluateException&nbsp;will<br>
+be&nbsp;raised.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;result&nbsp;of&nbsp;the&nbsp;evaluation&nbsp;cannot&nbsp;be&nbsp;JSONized,&nbsp;then&nbsp;an<br>
+EvaluationException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-EvaluateJavaScriptInContext">EvaluateJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-EvaluateJavaScriptInContext"><strong>EvaluateJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(self, statement, timeout<font color="#909090">=90</font>)</dt><dd><tt>Executes&nbsp;statement&nbsp;in&nbsp;JavaScript.&nbsp;Does&nbsp;not&nbsp;return&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;statement&nbsp;failed&nbsp;to&nbsp;evaluate,&nbsp;EvaluateException&nbsp;will&nbsp;be&nbsp;raised.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-ExecuteJavaScriptInContext">ExecuteJavaScriptInContext</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-ExecuteJavaScriptInContext"><strong>ExecuteJavaScriptInContext</strong></a>(self, expr, context_id, timeout<font color="#909090">=90</font>)</dt><dd><tt>Similar&nbsp;to&nbsp;ExecuteJavaScript,&nbsp;except&nbsp;context_id&nbsp;can&nbsp;refer&nbsp;to&nbsp;an&nbsp;iframe.<br>
+The&nbsp;main&nbsp;page&nbsp;has&nbsp;context_id=1,&nbsp;the&nbsp;first&nbsp;iframe&nbsp;context_id=2,&nbsp;etc.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.EvaluateException<br>
+&nbsp;&nbsp;exceptions.WebSocketDisconnected<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-GetUrl"><strong>GetUrl</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;URL&nbsp;to&nbsp;which&nbsp;the&nbsp;<a href="#WebContents">WebContents</a>&nbsp;is&nbsp;connected.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-GetWebviewContexts"><strong>GetWebviewContexts</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;webview&nbsp;contexts&nbsp;within&nbsp;the&nbsp;current&nbsp;inspector&nbsp;backend.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;<a href="#WebContents">WebContents</a>&nbsp;objects&nbsp;representing&nbsp;the&nbsp;webview&nbsp;contexts.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;If&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;in&nbsp;inspector&nbsp;backend&nbsp;connection.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-HasReachedQuiescence"><strong>HasReachedQuiescence</strong></a>(self)</dt><dd><tt>Determine&nbsp;whether&nbsp;the&nbsp;page&nbsp;has&nbsp;reached&nbsp;quiescence&nbsp;after&nbsp;loading.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;2&nbsp;seconds&nbsp;have&nbsp;passed&nbsp;since&nbsp;last&nbsp;resource&nbsp;received,&nbsp;false<br>
+&nbsp;&nbsp;otherwise.<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-IsAlive"><strong>IsAlive</strong></a>(self)</dt><dd><tt>Whether&nbsp;the&nbsp;<a href="#WebContents">WebContents</a>&nbsp;is&nbsp;still&nbsp;operating&nbsp;normally.<br>
+&nbsp;<br>
+Since&nbsp;<a href="#WebContents">WebContents</a>&nbsp;function&nbsp;asynchronously,&nbsp;this&nbsp;method&nbsp;does&nbsp;not&nbsp;guarantee<br>
+that&nbsp;the&nbsp;<a href="#WebContents">WebContents</a>&nbsp;will&nbsp;still&nbsp;be&nbsp;alive&nbsp;at&nbsp;any&nbsp;point&nbsp;in&nbsp;the&nbsp;future.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;<a href="#WebContents">WebContents</a>&nbsp;is&nbsp;opearting&nbsp;normally.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout<font color="#909090">=90</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;url.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-StartTimelineRecording"><strong>StartTimelineRecording</strong></a>(self)</dt><dd><tt>Starts&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-StopTimelineRecording"><strong>StopTimelineRecording</strong></a>(self)</dt><dd><tt>Stops&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-SynthesizeScrollGesture"><strong>SynthesizeScrollGesture</strong></a>(self, x<font color="#909090">=100</font>, y<font color="#909090">=800</font>, xDistance<font color="#909090">=0</font>, yDistance<font color="#909090">=-500</font>, xOverscroll<font color="#909090">=None</font>, yOverscroll<font color="#909090">=None</font>, preventFling<font color="#909090">=True</font>, speed<font color="#909090">=None</font>, gestureSourceType<font color="#909090">=None</font>, repeatCount<font color="#909090">=None</font>, repeatDelayMs<font color="#909090">=None</font>, interactionMarkerName<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;an&nbsp;inspector&nbsp;command&nbsp;that&nbsp;causes&nbsp;a&nbsp;repeatable&nbsp;browser&nbsp;driven&nbsp;scroll.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x:&nbsp;X&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;y:&nbsp;Y&nbsp;coordinate&nbsp;of&nbsp;the&nbsp;start&nbsp;of&nbsp;the&nbsp;gesture&nbsp;in&nbsp;CSS&nbsp;pixels.<br>
+&nbsp;&nbsp;xDistance:&nbsp;Distance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;X&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;left).<br>
+&nbsp;&nbsp;yDistance:&nbsp;Ddistance&nbsp;to&nbsp;scroll&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis&nbsp;(positive&nbsp;to&nbsp;scroll&nbsp;up).<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;X&nbsp;axis.<br>
+&nbsp;&nbsp;xOverscroll:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back&nbsp;along&nbsp;the&nbsp;Y&nbsp;axis.<br>
+&nbsp;&nbsp;preventFling:&nbsp;Prevents&nbsp;a&nbsp;fling&nbsp;gesture.<br>
+&nbsp;&nbsp;speed:&nbsp;Swipe&nbsp;speed&nbsp;in&nbsp;pixels&nbsp;per&nbsp;second.<br>
+&nbsp;&nbsp;gestureSourceType:&nbsp;Which&nbsp;type&nbsp;of&nbsp;input&nbsp;events&nbsp;to&nbsp;be&nbsp;generated.<br>
+&nbsp;&nbsp;repeatCount:&nbsp;Number&nbsp;of&nbsp;additional&nbsp;repeats&nbsp;beyond&nbsp;the&nbsp;first&nbsp;scroll.<br>
+&nbsp;&nbsp;repeatDelayMs:&nbsp;Number&nbsp;of&nbsp;milliseconds&nbsp;delay&nbsp;between&nbsp;each&nbsp;repeat.<br>
+&nbsp;&nbsp;interactionMarkerName:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;interaction&nbsp;markers&nbsp;to&nbsp;generate.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-WaitForDocumentReadyStateToBeComplete"><strong>WaitForDocumentReadyStateToBeComplete</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;finish&nbsp;loading.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-WaitForDocumentReadyStateToBeInteractiveOrBetter"><strong>WaitForDocumentReadyStateToBeInteractiveOrBetter</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;document&nbsp;to&nbsp;be&nbsp;interactive.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-WaitForJavaScriptExpression">WaitForJavaScriptExpression</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list<br>
+&nbsp;&nbsp;of&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-WaitForJavaScriptExpression"><strong>WaitForJavaScriptExpression</strong></a>(self, expr, timeout, dump_page_state_on_timeout<font color="#909090">=True</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;given&nbsp;JavaScript&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;robust&nbsp;against&nbsp;any&nbsp;given&nbsp;Evaluation&nbsp;timing&nbsp;out.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;expr:&nbsp;The&nbsp;expression&nbsp;to&nbsp;evaluate.<br>
+&nbsp;&nbsp;timeout:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait&nbsp;for&nbsp;the&nbsp;expression&nbsp;to&nbsp;be&nbsp;True.<br>
+&nbsp;&nbsp;dump_page_state_on_timeout:&nbsp;Whether&nbsp;to&nbsp;provide&nbsp;additional&nbsp;information&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page&nbsp;state&nbsp;if&nbsp;a&nbsp;TimeoutException&nbsp;is&nbsp;thrown.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException:&nbsp;On&nbsp;a&nbsp;timeout.<br>
+&nbsp;&nbsp;exceptions.Error:&nbsp;See&nbsp;<a href="#WebContents-EvaluateJavaScript">EvaluateJavaScript</a>()&nbsp;for&nbsp;a&nbsp;detailed&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;possible&nbsp;exceptions.</tt></dd></dl>
+
+<dl><dt><a name="WebContents-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout<font color="#909090">=90</font>)</dt><dd><tt>Waits&nbsp;for&nbsp;the&nbsp;navigation&nbsp;to&nbsp;complete.<br>
+&nbsp;<br>
+The&nbsp;current&nbsp;page&nbsp;is&nbsp;expect&nbsp;to&nbsp;be&nbsp;in&nbsp;a&nbsp;navigation.<br>
+This&nbsp;function&nbsp;returns&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;complete&nbsp;or&nbsp;when<br>
+the&nbsp;timeout&nbsp;has&nbsp;been&nbsp;exceeded.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;exceptions.TimeoutException<br>
+&nbsp;&nbsp;exceptions.DevtoolsTargetCrashException</tt></dd></dl>
+
+<dl><dt><a name="WebContents-__init__"><strong>__init__</strong></a>(self, inspector_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+<dd><tt>Return&nbsp;the&nbsp;unique&nbsp;id&nbsp;string&nbsp;for&nbsp;this&nbsp;tab&nbsp;object.</tt></dd>
+</dl>
+<dl><dt><strong>message_output_stream</strong></dt>
+</dl>
+<dl><dt><strong>timeline_model</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEFAULT_WEB_CONTENTS_TIMEOUT</strong> = 90</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.android_forwarder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.android_forwarder.html
new file mode 100644
index 0000000..eff075e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.android_forwarder.html
@@ -0,0 +1,202 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.forwarders.android_forwarder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.forwarders.html"><font color="#ffffff">forwarders</font></a>.android_forwarder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/forwarders/android_forwarder.py">telemetry/internal/forwarders/android_forwarder.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="atexit.html">atexit</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+</td><td width="25%" valign=top><a href="devil.android.device_utils.html">devil.android.device_utils</a><br>
+<a href="pylib.forwarder.html">pylib.forwarder</a><br>
+<a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="re.html">re</a><br>
+<a href="socket.html">socket</a><br>
+</td><td width="25%" valign=top><a href="struct.html">struct</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidRndisConfigurator">AndroidRndisConfigurator</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidForwarder">AndroidForwarder</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidRndisForwarder">AndroidRndisForwarder</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidForwarderFactory">AndroidForwarderFactory</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidForwarder">class <strong>AndroidForwarder</strong></a>(<a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidForwarder">AndroidForwarder</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidForwarder-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidForwarder-__init__"><strong>__init__</strong></a>(self, device, port_pairs)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<dl><dt><strong>host_port</strong></dt>
+</dl>
+<dl><dt><strong>port_pairs</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidForwarderFactory">class <strong>AndroidForwarderFactory</strong></a>(<a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidForwarderFactory">AndroidForwarderFactory</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidForwarderFactory-Create"><strong>Create</strong></a>(self, port_pairs)</dt></dl>
+
+<dl><dt><a name="AndroidForwarderFactory-__init__"><strong>__init__</strong></a>(self, device, use_rndis)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>does_forwarder_override_dns</strong></dt>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidRndisConfigurator">class <strong>AndroidRndisConfigurator</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Configures&nbsp;a&nbsp;linux&nbsp;host&nbsp;to&nbsp;connect&nbsp;to&nbsp;an&nbsp;android&nbsp;device&nbsp;via&nbsp;RNDIS.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;we&nbsp;intentionally&nbsp;leave&nbsp;RNDIS&nbsp;running&nbsp;on&nbsp;the&nbsp;device.&nbsp;This&nbsp;is<br>
+because&nbsp;the&nbsp;setup&nbsp;is&nbsp;slow&nbsp;and&nbsp;potentially&nbsp;flaky&nbsp;and&nbsp;leaving&nbsp;it&nbsp;running<br>
+doesn't&nbsp;seem&nbsp;to&nbsp;interfere&nbsp;with&nbsp;any&nbsp;other&nbsp;developer&nbsp;or&nbsp;bot&nbsp;use-cases.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AndroidRndisConfigurator-OverrideRoutingPolicy"><strong>OverrideRoutingPolicy</strong></a>(self)</dt><dd><tt>Override&nbsp;any&nbsp;routing&nbsp;policy&nbsp;that&nbsp;could&nbsp;prevent<br>
+packets&nbsp;from&nbsp;reaching&nbsp;the&nbsp;rndis&nbsp;interface</tt></dd></dl>
+
+<dl><dt><a name="AndroidRndisConfigurator-RestoreRoutingPolicy"><strong>RestoreRoutingPolicy</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidRndisConfigurator-__init__"><strong>__init__</strong></a>(self, device)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidRndisForwarder">class <strong>AndroidRndisForwarder</strong></a>(<a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Forwards&nbsp;traffic&nbsp;using&nbsp;RNDIS.&nbsp;Assumes&nbsp;the&nbsp;device&nbsp;has&nbsp;root&nbsp;access.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.android_forwarder.html#AndroidRndisForwarder">AndroidRndisForwarder</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidRndisForwarder-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidRndisForwarder-__init__"><strong>__init__</strong></a>(self, device, rndis_configurator, port_pairs)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_port</strong></dt>
+</dl>
+<dl><dt><strong>port_pairs</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.cros_forwarder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.cros_forwarder.html
new file mode 100644
index 0000000..7c7b9ae
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.cros_forwarder.html
@@ -0,0 +1,116 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.forwarders.cros_forwarder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.forwarders.html"><font color="#ffffff">forwarders</font></a>.cros_forwarder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/forwarders/cros_forwarder.py">telemetry/internal/forwarders/cros_forwarder.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.forwarders.do_nothing_forwarder.html">telemetry.internal.forwarders.do_nothing_forwarder</a><br>
+<a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.cros_forwarder.html#CrOsSshForwarder">CrOsSshForwarder</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.cros_forwarder.html#CrOsForwarderFactory">CrOsForwarderFactory</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOsForwarderFactory">class <strong>CrOsForwarderFactory</strong></a>(<a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.cros_forwarder.html#CrOsForwarderFactory">CrOsForwarderFactory</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOsForwarderFactory-Create"><strong>Create</strong></a>(self, port_pairs, use_remote_port_forwarding<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;pylint:&nbsp;disable=arguments-differ</tt></dd></dl>
+
+<dl><dt><a name="CrOsForwarderFactory-__init__"><strong>__init__</strong></a>(self, cri)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>does_forwarder_override_dns</strong></dt>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOsSshForwarder">class <strong>CrOsSshForwarder</strong></a>(<a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.cros_forwarder.html#CrOsSshForwarder">CrOsSshForwarder</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOsSshForwarder-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrOsSshForwarder-__init__"><strong>__init__</strong></a>(self, cri, use_remote_port_forwarding, port_pairs)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>host_port</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<dl><dt><strong>port_pairs</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.do_nothing_forwarder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.do_nothing_forwarder.html
new file mode 100644
index 0000000..ad535fc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.do_nothing_forwarder.html
@@ -0,0 +1,316 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.forwarders.do_nothing_forwarder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.forwarders.html"><font color="#ffffff">forwarders</font></a>.do_nothing_forwarder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/forwarders/do_nothing_forwarder.py">telemetry/internal/forwarders/do_nothing_forwarder.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="contextlib.html">contextlib</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="socket.html">socket</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#ConnectionError">ConnectionError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#PortsMismatchError">PortsMismatchError</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#DoNothingForwarder">DoNothingForwarder</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#DoNothingForwarderFactory">DoNothingForwarderFactory</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ConnectionError">class <strong>ConnectionError</strong></a>(<a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;when&nbsp;unable&nbsp;to&nbsp;connect&nbsp;to&nbsp;local&nbsp;TCP&nbsp;ports.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#ConnectionError">ConnectionError</a></dd>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ConnectionError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ConnectionError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ConnectionError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ConnectionError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ConnectionError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ConnectionError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ConnectionError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DoNothingForwarder">class <strong>DoNothingForwarder</strong></a>(<a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Check&nbsp;that&nbsp;no&nbsp;forwarding&nbsp;is&nbsp;needed&nbsp;for&nbsp;the&nbsp;given&nbsp;port&nbsp;pairs.<br>
+&nbsp;<br>
+The&nbsp;local&nbsp;and&nbsp;remote&nbsp;ports&nbsp;must&nbsp;be&nbsp;equal.&nbsp;Otherwise,&nbsp;the&nbsp;"do&nbsp;nothing"<br>
+forwarder&nbsp;does&nbsp;not&nbsp;make&nbsp;sense.&nbsp;(Raises&nbsp;<a href="#PortsMismatchError">PortsMismatchError</a>.)<br>
+&nbsp;<br>
+Also,&nbsp;check&nbsp;that&nbsp;all&nbsp;TCP&nbsp;ports&nbsp;support&nbsp;connections.&nbsp;&nbsp;(Raises&nbsp;<a href="#ConnectionError">ConnectionError</a>.)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#DoNothingForwarder">DoNothingForwarder</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DoNothingForwarder-__init__"><strong>__init__</strong></a>(self, port_pairs)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>:<br>
+<dl><dt><a name="DoNothingForwarder-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#Forwarder">telemetry.internal.forwarders.Forwarder</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<dl><dt><strong>host_port</strong></dt>
+</dl>
+<dl><dt><strong>port_pairs</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DoNothingForwarderFactory">class <strong>DoNothingForwarderFactory</strong></a>(<a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#DoNothingForwarderFactory">DoNothingForwarderFactory</a></dd>
+<dd><a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DoNothingForwarderFactory-Create"><strong>Create</strong></a>(self, port_pairs)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.html#ForwarderFactory">telemetry.internal.forwarders.ForwarderFactory</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>does_forwarder_override_dns</strong></dt>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Error">class <strong>Error</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Base&nbsp;class&nbsp;for&nbsp;exceptions&nbsp;in&nbsp;this&nbsp;module.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="Error-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#Error-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="Error-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Error-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Error-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="Error-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="Error-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Error-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="Error-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="Error-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Error-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#Error-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="Error-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PortsMismatchError">class <strong>PortsMismatchError</strong></a>(<a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;when&nbsp;local&nbsp;and&nbsp;remote&nbsp;ports&nbsp;are&nbsp;not&nbsp;equal.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#PortsMismatchError">PortsMismatchError</a></dd>
+<dd><a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.forwarders.do_nothing_forwarder.html#Error">Error</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="PortsMismatchError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#PortsMismatchError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="PortsMismatchError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PortsMismatchError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="PortsMismatchError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#PortsMismatchError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="PortsMismatchError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.html b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.html
new file mode 100644
index 0000000..bf3f7d5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.forwarders.html
@@ -0,0 +1,293 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.forwarders</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.forwarders</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/forwarders/__init__.py">telemetry/internal/forwarders/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.forwarders.android_forwarder.html">android_forwarder</a><br>
+<a href="telemetry.internal.forwarders.cros_forwarder.html">cros_forwarder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.forwarders.cros_forwarder_unittest.html">cros_forwarder_unittest</a><br>
+<a href="telemetry.internal.forwarders.do_nothing_forwarder.html">do_nothing_forwarder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.forwarders.do_nothing_forwarder_unittest.html">do_nothing_forwarder_unittest</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#Forwarder">Forwarder</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#ForwarderFactory">ForwarderFactory</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#tuple">__builtin__.tuple</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#PortPair">PortPair</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.forwarders.html#PortPairs">PortPairs</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Forwarder">class <strong>Forwarder</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Forwarder-Close"><strong>Close</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Forwarder-__init__"><strong>__init__</strong></a>(self, port_pairs)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+<dl><dt><strong>host_port</strong></dt>
+</dl>
+<dl><dt><strong>port_pairs</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ForwarderFactory">class <strong>ForwarderFactory</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ForwarderFactory-Create"><strong>Create</strong></a>(self, port_pairs)</dt><dd><tt>Creates&nbsp;a&nbsp;forwarder&nbsp;that&nbsp;maps&nbsp;remote&nbsp;(device)&nbsp;&lt;-&gt;&nbsp;local&nbsp;(host)&nbsp;ports.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;port_pairs:&nbsp;A&nbsp;<a href="#PortPairs">PortPairs</a>&nbsp;instance&nbsp;that&nbsp;consists&nbsp;of&nbsp;a&nbsp;<a href="#PortPair">PortPair</a>&nbsp;mapping<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for&nbsp;each&nbsp;protocol.&nbsp;http&nbsp;is&nbsp;required.&nbsp;https&nbsp;and&nbsp;dns&nbsp;may&nbsp;be&nbsp;None.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>does_forwarder_override_dns</strong></dt>
+</dl>
+<dl><dt><strong>host_ip</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PortPair">class <strong>PortPair</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#PortPair">PortPair</a>(local_port,&nbsp;remote_port)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.html#PortPair">PortPair</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PortPair-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="PortPair-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="PortPair-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#PortPair">PortPair</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="PortPair-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#PortPair">PortPair</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="PortPair-__new__"><strong>__new__</strong></a>(_cls, local_port, remote_port)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#PortPair">PortPair</a>(local_port,&nbsp;remote_port)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>local_port</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<dl><dt><strong>remote_port</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('local_port', 'remote_port')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="PortPair-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPair-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="PortPair-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#PortPair-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="PortPair-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#PortPair-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PortPair-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#PortPair-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PortPairs">class <strong>PortPairs</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#PortPairs">PortPairs</a>(http,&nbsp;https,&nbsp;dns)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.forwarders.html#PortPairs">PortPairs</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PortPairs-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#PortPairs">PortPairs</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="PortPairs-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#PortPairs">PortPairs</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="PortPairs-__new__"><strong>__new__</strong></a>(_cls, http, https, dns)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#PortPairs">PortPairs</a>(http,&nbsp;https,&nbsp;dns)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>dns</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;2</tt></dd>
+</dl>
+<dl><dt><strong>http</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<dl><dt><strong>https</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('http', 'https', 'dns')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="PortPairs-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#PortPairs-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#PortPairs-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#PortPairs-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="PortPairs-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#PortPairs-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.html b/catapult/telemetry/docs/pydoc/telemetry.internal.html
new file mode 100644
index 0000000..cd82947
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.html
@@ -0,0 +1,36 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.internal</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/__init__.py">telemetry/internal/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.actions.html"><strong>actions</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.app.html"><strong>app</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.backends.html"><strong>backends</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.browser.html"><strong>browser</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.forwarders.html"><strong>forwarders</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.image_processing.html"><strong>image_processing</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.html"><strong>platform</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.results.html"><strong>results</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.story_runner.html">story_runner</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.story_runner_unittest.html">story_runner_unittest</a><br>
+<a href="telemetry.internal.testing.html"><strong>testing</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.util.html"><strong>util</strong>&nbsp;(package)</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing._bitmap.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing._bitmap.html
new file mode 100644
index 0000000..2c3f377
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing._bitmap.html
@@ -0,0 +1,97 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing._bitmap</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>._bitmap</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/_bitmap.py">telemetry/internal/image_processing/_bitmap.py</a></font></td></tr></table>
+    <p><tt><a href="#Bitmap">Bitmap</a>&nbsp;is&nbsp;a&nbsp;basic&nbsp;wrapper&nbsp;for&nbsp;image&nbsp;pixels.&nbsp;It&nbsp;includes&nbsp;some&nbsp;basic&nbsp;processing<br>
+tools:&nbsp;crop,&nbsp;find&nbsp;bounding&nbsp;box&nbsp;of&nbsp;a&nbsp;color&nbsp;and&nbsp;compute&nbsp;histogram&nbsp;of&nbsp;color&nbsp;values.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="array.html">array</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="cStringIO.html">cStringIO</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.color_histogram.html">telemetry.util.color_histogram</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="png.html">png</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.rgba_color.html">telemetry.util.rgba_color</a><br>
+<a href="struct.html">struct</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing._bitmap.html#Bitmap">Bitmap</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Bitmap">class <strong>Bitmap</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Utilities&nbsp;for&nbsp;parsing&nbsp;and&nbsp;inspecting&nbsp;a&nbsp;bitmap.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Bitmap-ColorHistogram"><strong>ColorHistogram</strong></a>(self, ignore_color<font color="#909090">=None</font>, tolerance<font color="#909090">=0</font>)</dt></dl>
+
+<dl><dt><a name="Bitmap-Crop"><strong>Crop</strong></a>(self, left, top, width, height)</dt></dl>
+
+<dl><dt><a name="Bitmap-Diff"><strong>Diff</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Bitmap-GetBoundingBox"><strong>GetBoundingBox</strong></a>(self, color, tolerance<font color="#909090">=0</font>)</dt></dl>
+
+<dl><dt><a name="Bitmap-GetPixelColor"><strong>GetPixelColor</strong></a>(self, x, y)</dt></dl>
+
+<dl><dt><a name="Bitmap-IsEqual"><strong>IsEqual</strong></a>(self, other, tolerance<font color="#909090">=0</font>)</dt></dl>
+
+<dl><dt><a name="Bitmap-WritePngFile"><strong>WritePngFile</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="Bitmap-__init__"><strong>__init__</strong></a>(self, bpp, width, height, pixels, metadata<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="Bitmap-FromPng"><strong>FromPng</strong></a>(png_data)</dt></dl>
+
+<dl><dt><a name="Bitmap-FromPngFile"><strong>FromPngFile</strong></a>(path)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>bpp</strong></dt>
+</dl>
+<dl><dt><strong>height</strong></dt>
+</dl>
+<dl><dt><strong>metadata</strong></dt>
+</dl>
+<dl><dt><strong>pixels</strong></dt>
+</dl>
+<dl><dt><strong>width</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.cv_util.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.cv_util.html
new file mode 100644
index 0000000..68c1a5e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.cv_util.html
@@ -0,0 +1,50 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.cv_util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.cv_util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/cv_util.py">telemetry/internal/image_processing/cv_util.py</a></font></td></tr></table>
+    <p><tt>This&nbsp;module&nbsp;provides&nbsp;implementations&nbsp;of&nbsp;common&nbsp;computer&nbsp;Vision&nbsp;operations.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+</td><td width="25%" valign=top><a href="numpy.html">numpy</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AreLinesOrthogonal"><strong>AreLinesOrthogonal</strong></a>(line1, line2, tolerance)</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;lines&nbsp;are&nbsp;within&nbsp;tolerance&nbsp;radians&nbsp;of&nbsp;being&nbsp;orthogonal.</tt></dd></dl>
+ <dl><dt><a name="-ExtendLines"><strong>ExtendLines</strong></a>(lines, length)</dt><dd><tt>Extends&nbsp;lines&nbsp;in&nbsp;an&nbsp;array&nbsp;to&nbsp;a&nbsp;given&nbsp;length,&nbsp;maintaining&nbsp;the&nbsp;center<br>
+point.&nbsp;Does&nbsp;not&nbsp;necessarily&nbsp;maintain&nbsp;point&nbsp;order.</tt></dd></dl>
+ <dl><dt><a name="-FindLineIntersection"><strong>FindLineIntersection</strong></a>(line1, line2)</dt><dd><tt>If&nbsp;the&nbsp;line&nbsp;segments&nbsp;intersect,&nbsp;returns&nbsp;True&nbsp;and&nbsp;their&nbsp;intersection.<br>
+Otherwise,&nbsp;returns&nbsp;False&nbsp;and&nbsp;the&nbsp;intersection&nbsp;of&nbsp;the&nbsp;line&nbsp;segments&nbsp;if&nbsp;they<br>
+were&nbsp;to&nbsp;be&nbsp;extended.</tt></dd></dl>
+ <dl><dt><a name="-IsPointApproxOnLine"><strong>IsPointApproxOnLine</strong></a>(point, line, tolerance<font color="#909090">=1</font>)</dt><dd><tt>Approximates&nbsp;distance&nbsp;between&nbsp;point&nbsp;and&nbsp;line&nbsp;for&nbsp;small&nbsp;distances&nbsp;using<br>
+the&nbsp;determinant&nbsp;and&nbsp;checks&nbsp;whether&nbsp;it's&nbsp;within&nbsp;the&nbsp;tolerance.&nbsp;Tolerance&nbsp;is<br>
+an&nbsp;approximate&nbsp;distance&nbsp;in&nbsp;pixels,&nbsp;precision&nbsp;decreases&nbsp;with&nbsp;distance.</tt></dd></dl>
+ <dl><dt><a name="-SqDistance"><strong>SqDistance</strong></a>(point1, point2)</dt><dd><tt>Computes&nbsp;the&nbsp;square&nbsp;of&nbsp;the&nbsp;distance&nbsp;between&nbsp;two&nbsp;points.</tt></dd></dl>
+ <dl><dt><a name="-SqDistances"><strong>SqDistances</strong></a>(points1, points2)</dt><dd><tt>Computes&nbsp;the&nbsp;square&nbsp;of&nbsp;the&nbsp;distance&nbsp;between&nbsp;two&nbsp;sets&nbsp;of&nbsp;points,&nbsp;or&nbsp;a<br>
+set&nbsp;of&nbsp;points&nbsp;and&nbsp;a&nbsp;point.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>division</strong> = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.fake_frame_generator.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.fake_frame_generator.html
new file mode 100644
index 0000000..249a327
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.fake_frame_generator.html
@@ -0,0 +1,114 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.fake_frame_generator</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.fake_frame_generator</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/fake_frame_generator.py">telemetry/internal/image_processing/fake_frame_generator.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.frame_generator.html">telemetry.internal.image_processing.frame_generator</a><br>
+</td><td width="25%" valign=top><a href="numpy.html">numpy</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.fake_frame_generator.html#FakeFrameGenerator">FakeFrameGenerator</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeFrameGenerator">class <strong>FakeFrameGenerator</strong></a>(<a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Fakes&nbsp;a&nbsp;Frame&nbsp;Generator,&nbsp;for&nbsp;testing.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;_frame_index:&nbsp;A&nbsp;frame&nbsp;read&nbsp;counter.<br>
+&nbsp;&nbsp;_timestamps:&nbsp;A&nbsp;generator&nbsp;of&nbsp;timestamps&nbsp;to&nbsp;return,&nbsp;or&nbsp;None.<br>
+&nbsp;&nbsp;_timestamp:&nbsp;The&nbsp;current&nbsp;timestamp.<br>
+&nbsp;&nbsp;_dimensions:&nbsp;The&nbsp;dimensions&nbsp;to&nbsp;return.<br>
+&nbsp;&nbsp;_channels:&nbsp;The&nbsp;number&nbsp;of&nbsp;color&nbsp;channels&nbsp;to&nbsp;return&nbsp;in&nbsp;the&nbsp;generated&nbsp;frames.<br>
+&nbsp;&nbsp;_frames:&nbsp;The&nbsp;number&nbsp;of&nbsp;frames&nbsp;to&nbsp;return&nbsp;before&nbsp;fake&nbsp;EOF.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.image_processing.fake_frame_generator.html#FakeFrameGenerator">FakeFrameGenerator</a></dd>
+<dd><a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FakeFrameGenerator-__init__"><strong>__init__</strong></a>(self, frames<font color="#909090">=1e+16</font>, dimensions<font color="#909090">=(320, 240)</font>, channels<font color="#909090">=3</font>, timestamps<font color="#909090">=&lt;generator object &lt;genexpr&gt;&gt;</font>)</dt><dd><tt>Initializes&nbsp;the&nbsp;<a href="#FakeFrameGenerator">FakeFrameGenerator</a>&nbsp;object.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;frames:&nbsp;int,&nbsp;The&nbsp;number&nbsp;of&nbsp;frames&nbsp;to&nbsp;return&nbsp;before&nbsp;fake&nbsp;EOF.<br>
+&nbsp;&nbsp;dimensions:&nbsp;(int,&nbsp;int),&nbsp;The&nbsp;dimensions&nbsp;to&nbsp;return.<br>
+&nbsp;&nbsp;timestamps:&nbsp;generator,&nbsp;A&nbsp;generator&nbsp;of&nbsp;timestamps&nbsp;to&nbsp;return.&nbsp;The&nbsp;default<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;value&nbsp;is&nbsp;an&nbsp;infinite&nbsp;0&nbsp;generator.<br>
+&nbsp;&nbsp;channels:&nbsp;int,&nbsp;The&nbsp;number&nbsp;of&nbsp;color&nbsp;channels&nbsp;to&nbsp;return&nbsp;in&nbsp;the&nbsp;generated<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;frames,&nbsp;1&nbsp;for&nbsp;greyscale,&nbsp;3&nbsp;for&nbsp;RGB.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>CurrentFrameNumber</strong></dt>
+</dl>
+<dl><dt><strong>CurrentTimestamp</strong></dt>
+</dl>
+<dl><dt><strong>Dimensions</strong></dt>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>:<br>
+<dl><dt><strong>Generator</strong></dt>
+<dd><tt>Returns:<br>
+A&nbsp;reference&nbsp;to&nbsp;the&nbsp;created&nbsp;generator.</tt></dd>
+</dl>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.frame_generator.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.frame_generator.html
new file mode 100644
index 0000000..8cfa3cc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.frame_generator.html
@@ -0,0 +1,160 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.frame_generator</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.frame_generator</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/frame_generator.py">telemetry/internal/image_processing/frame_generator.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="abc.html">abc</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">FrameGenerator</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.frame_generator.html#FrameReadError">FrameReadError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FrameGenerator">class <strong>FrameGenerator</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Defines&nbsp;an&nbsp;interface&nbsp;for&nbsp;reading&nbsp;input&nbsp;frames.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;_generator:&nbsp;A&nbsp;reference&nbsp;to&nbsp;the&nbsp;created&nbsp;generator.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FrameGenerator-__init__"><strong>__init__</strong></a>(self)</dt><dd><tt>Initializes&nbsp;the&nbsp;<a href="#FrameGenerator">FrameGenerator</a>&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>CurrentFrameNumber</strong></dt>
+<dd><tt>Returns:<br>
+int,&nbsp;The&nbsp;frame&nbsp;index&nbsp;of&nbsp;the&nbsp;current&nbsp;frame.</tt></dd>
+</dl>
+<dl><dt><strong>CurrentTimestamp</strong></dt>
+<dd><tt>Returns:<br>
+float,&nbsp;The&nbsp;timestamp&nbsp;of&nbsp;the&nbsp;current&nbsp;frame&nbsp;in&nbsp;milliseconds.</tt></dd>
+</dl>
+<dl><dt><strong>Dimensions</strong></dt>
+<dd><tt>Returns:<br>
+The&nbsp;dimensions&nbsp;of&nbsp;the&nbsp;frame&nbsp;sequence&nbsp;as&nbsp;a&nbsp;tuple&nbsp;int&nbsp;(width,&nbsp;height).<br>
+This&nbsp;value&nbsp;should&nbsp;be&nbsp;constant&nbsp;across&nbsp;frames.</tt></dd>
+</dl>
+<dl><dt><strong>Generator</strong></dt>
+<dd><tt>Returns:<br>
+A&nbsp;reference&nbsp;to&nbsp;the&nbsp;created&nbsp;generator.</tt></dd>
+</dl>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset(['CurrentFrameNumber', 'CurrentTimestamp', 'Dimensions', '_CreateGenerator'])</dl>
+
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FrameReadError">class <strong>FrameReadError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.image_processing.frame_generator.html#FrameReadError">FrameReadError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="FrameReadError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#FrameReadError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="FrameReadError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="FrameReadError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="FrameReadError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#FrameReadError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="FrameReadError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.html
new file mode 100644
index 0000000..5d9a36a
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.html
@@ -0,0 +1,37 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.image_processing</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.image_processing</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/__init__.py">telemetry/internal/image_processing/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.image_processing._bitmap.html">_bitmap</a><br>
+<a href="telemetry.internal.image_processing.cv_util.html">cv_util</a><br>
+<a href="telemetry.internal.image_processing.cv_util_unittest.html">cv_util_unittest</a><br>
+<a href="telemetry.internal.image_processing.fake_frame_generator.html">fake_frame_generator</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.frame_generator.html">frame_generator</a><br>
+<a href="telemetry.internal.image_processing.image_util_bitmap_impl.html">image_util_bitmap_impl</a><br>
+<a href="telemetry.internal.image_processing.image_util_numpy_impl.html">image_util_numpy_impl</a><br>
+<a href="telemetry.internal.image_processing.screen_finder.html">screen_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.screen_finder_unittest.html">screen_finder_unittest</a><br>
+<a href="telemetry.internal.image_processing.video.html">video</a><br>
+<a href="telemetry.internal.image_processing.video_file_frame_generator.html">video_file_frame_generator</a><br>
+<a href="telemetry.internal.image_processing.video_file_frame_generator_unittest.html">video_file_frame_generator_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.video_unittest.html">video_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_bitmap_impl.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_bitmap_impl.html
new file mode 100644
index 0000000..b92e1c8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_bitmap_impl.html
@@ -0,0 +1,53 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.image_util_bitmap_impl</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.image_util_bitmap_impl</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/image_util_bitmap_impl.py">telemetry/internal/image_processing/image_util_bitmap_impl.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.image_processing._bitmap.html">telemetry.internal.image_processing._bitmap</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AreEqual"><strong>AreEqual</strong></a>(bitmap1, bitmap2, tolerance, _)</dt></dl>
+ <dl><dt><a name="-Channels"><strong>Channels</strong></a>(bitmap)</dt></dl>
+ <dl><dt><a name="-Crop"><strong>Crop</strong></a>(bitmap, left, top, width, height)</dt></dl>
+ <dl><dt><a name="-Diff"><strong>Diff</strong></a>(bitmap1, bitmap2)</dt></dl>
+ <dl><dt><a name="-FromPng"><strong>FromPng</strong></a>(png_data)</dt></dl>
+ <dl><dt><a name="-FromPngFile"><strong>FromPngFile</strong></a>(path)</dt></dl>
+ <dl><dt><a name="-FromRGBPixels"><strong>FromRGBPixels</strong></a>(width, height, pixels, bpp)</dt></dl>
+ <dl><dt><a name="-GetBoundingBox"><strong>GetBoundingBox</strong></a>(bitmap, color, tolerance)</dt></dl>
+ <dl><dt><a name="-GetColorHistogram"><strong>GetColorHistogram</strong></a>(bitmap, ignore_color, tolerance)</dt></dl>
+ <dl><dt><a name="-GetPixelColor"><strong>GetPixelColor</strong></a>(bitmap, x, y)</dt></dl>
+ <dl><dt><a name="-Height"><strong>Height</strong></a>(bitmap)</dt></dl>
+ <dl><dt><a name="-Pixels"><strong>Pixels</strong></a>(bitmap)</dt></dl>
+ <dl><dt><a name="-Width"><strong>Width</strong></a>(bitmap)</dt></dl>
+ <dl><dt><a name="-WritePngFile"><strong>WritePngFile</strong></a>(bitmap, path)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>division</strong> = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_numpy_impl.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_numpy_impl.html
new file mode 100644
index 0000000..c5786d5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.image_util_numpy_impl.html
@@ -0,0 +1,58 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.image_util_numpy_impl</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.image_util_numpy_impl</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/image_util_numpy_impl.py">telemetry/internal/image_processing/image_util_numpy_impl.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.util.color_histogram.html">telemetry.util.color_histogram</a><br>
+<a href="cv2.html">cv2</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+<a href="numpy.html">numpy</a><br>
+</td><td width="25%" valign=top><a href="png.html">png</a><br>
+<a href="telemetry.util.rgba_color.html">telemetry.util.rgba_color</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AreEqual"><strong>AreEqual</strong></a>(image1, image2, tolerance, likely_equal)</dt></dl>
+ <dl><dt><a name="-Channels"><strong>Channels</strong></a>(image)</dt></dl>
+ <dl><dt><a name="-Crop"><strong>Crop</strong></a>(image, left, top, width, height)</dt></dl>
+ <dl><dt><a name="-Diff"><strong>Diff</strong></a>(image1, image2)</dt></dl>
+ <dl><dt><a name="-FromPng"><strong>FromPng</strong></a>(png_data)</dt></dl>
+ <dl><dt><a name="-FromPngFile"><strong>FromPngFile</strong></a>(path)</dt></dl>
+ <dl><dt><a name="-FromRGBPixels"><strong>FromRGBPixels</strong></a>(width, height, pixels, bpp)</dt></dl>
+ <dl><dt><a name="-GetBoundingBox"><strong>GetBoundingBox</strong></a>(image, color, tolerance)</dt></dl>
+ <dl><dt><a name="-GetColorHistogram"><strong>GetColorHistogram</strong></a>(image, ignore_color, tolerance)</dt></dl>
+ <dl><dt><a name="-GetPixelColor"><strong>GetPixelColor</strong></a>(image, x, y)</dt></dl>
+ <dl><dt><a name="-Height"><strong>Height</strong></a>(image)</dt></dl>
+ <dl><dt><a name="-Pixels"><strong>Pixels</strong></a>(image)</dt></dl>
+ <dl><dt><a name="-Width"><strong>Width</strong></a>(image)</dt></dl>
+ <dl><dt><a name="-WritePngFile"><strong>WritePngFile</strong></a>(image, path)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>division</strong> = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.screen_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.screen_finder.html
new file mode 100644
index 0000000..a635519
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.screen_finder.html
@@ -0,0 +1,168 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.screen_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.screen_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/screen_finder.py">telemetry/internal/image_processing/screen_finder.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.<br>
+#<br>
+#&nbsp;This&nbsp;script&nbsp;attempts&nbsp;to&nbsp;detect&nbsp;the&nbsp;region&nbsp;of&nbsp;a&nbsp;camera's&nbsp;field&nbsp;of&nbsp;view&nbsp;that<br>
+#&nbsp;contains&nbsp;the&nbsp;screen&nbsp;of&nbsp;the&nbsp;device&nbsp;we&nbsp;are&nbsp;testing.<br>
+#<br>
+#&nbsp;Usage:&nbsp;./screen_finder.py&nbsp;path_to_video&nbsp;0&nbsp;0&nbsp;--verbose</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="copy.html">copy</a><br>
+<a href="cv2.html">cv2</a><br>
+<a href="telemetry.internal.image_processing.cv_util.html">telemetry.internal.image_processing.cv_util</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+<a href="telemetry.internal.image_processing.frame_generator.html">telemetry.internal.image_processing.frame_generator</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="numpy.html">numpy</a><br>
+<a href="os.html">os</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.video_file_frame_generator.html">telemetry.internal.image_processing.video_file_frame_generator</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.screen_finder.html#ScreenFinder">ScreenFinder</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ScreenFinder">class <strong>ScreenFinder</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Finds&nbsp;and&nbsp;extracts&nbsp;device&nbsp;screens&nbsp;from&nbsp;video.<br>
+&nbsp;<br>
+Sample&nbsp;Usage:<br>
+&nbsp;&nbsp;sf&nbsp;=&nbsp;<a href="#ScreenFinder">ScreenFinder</a>(sys.argv[1])<br>
+&nbsp;&nbsp;while&nbsp;sf.<a href="#ScreenFinder-HasNext">HasNext</a>():<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ret,&nbsp;screen&nbsp;=&nbsp;sf.<a href="#ScreenFinder-GetNext">GetNext</a>()<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;_lost_corners:&nbsp;Each&nbsp;index&nbsp;represents&nbsp;whether&nbsp;or&nbsp;not&nbsp;we&nbsp;lost&nbsp;track&nbsp;of&nbsp;that<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;corner&nbsp;on&nbsp;the&nbsp;previous&nbsp;frame.&nbsp;Ordered&nbsp;by&nbsp;[top-right,&nbsp;top-left,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;bottom-left,&nbsp;bottom-right]<br>
+&nbsp;&nbsp;_frame:&nbsp;An&nbsp;unmodified&nbsp;copy&nbsp;of&nbsp;the&nbsp;frame&nbsp;we're&nbsp;currently&nbsp;processing.<br>
+&nbsp;&nbsp;_frame_debug:&nbsp;A&nbsp;copy&nbsp;of&nbsp;the&nbsp;frame&nbsp;we're&nbsp;currently&nbsp;processing,&nbsp;may&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;modified&nbsp;at&nbsp;any&nbsp;time,&nbsp;used&nbsp;for&nbsp;debugging.<br>
+&nbsp;&nbsp;_frame_grey:&nbsp;A&nbsp;greyscale&nbsp;copy&nbsp;of&nbsp;the&nbsp;frame&nbsp;we're&nbsp;currently&nbsp;processing.<br>
+&nbsp;&nbsp;_frame_edges:&nbsp;A&nbsp;Canny&nbsp;Edge&nbsp;detected&nbsp;copy&nbsp;of&nbsp;the&nbsp;frame&nbsp;we're&nbsp;currently<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;processing.<br>
+&nbsp;&nbsp;_screen_size:&nbsp;The&nbsp;size&nbsp;of&nbsp;device&nbsp;screen&nbsp;in&nbsp;the&nbsp;video&nbsp;when&nbsp;first&nbsp;detected.<br>
+&nbsp;&nbsp;_avg_corners:&nbsp;Exponentially&nbsp;weighted&nbsp;average&nbsp;of&nbsp;the&nbsp;previous&nbsp;corner<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;locations.<br>
+&nbsp;&nbsp;_prev_corners:&nbsp;The&nbsp;location&nbsp;of&nbsp;the&nbsp;corners&nbsp;in&nbsp;the&nbsp;previous&nbsp;frame.<br>
+&nbsp;&nbsp;_lost_corner_frames:&nbsp;A&nbsp;counter&nbsp;of&nbsp;the&nbsp;number&nbsp;of&nbsp;successive&nbsp;frames&nbsp;in&nbsp;which<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;we've&nbsp;lost&nbsp;a&nbsp;corner&nbsp;location.<br>
+&nbsp;&nbsp;_border:&nbsp;See&nbsp;|border|&nbsp;above.<br>
+&nbsp;&nbsp;_min_line_length:&nbsp;The&nbsp;minimum&nbsp;length&nbsp;a&nbsp;line&nbsp;must&nbsp;be&nbsp;before&nbsp;we&nbsp;consider&nbsp;it<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;a&nbsp;possible&nbsp;screen&nbsp;edge.<br>
+&nbsp;&nbsp;_frame_generator:&nbsp;See&nbsp;|frame_generator|&nbsp;above.<br>
+&nbsp;&nbsp;_width,&nbsp;_height:&nbsp;The&nbsp;width&nbsp;and&nbsp;height&nbsp;of&nbsp;the&nbsp;frame.<br>
+&nbsp;&nbsp;_anglesp5,&nbsp;_anglesm5:&nbsp;The&nbsp;angles&nbsp;for&nbsp;each&nbsp;point&nbsp;we&nbsp;look&nbsp;at&nbsp;in&nbsp;the&nbsp;grid<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;when&nbsp;computing&nbsp;brightness,&nbsp;constant&nbsp;across&nbsp;frames.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ScreenFinder-GetNext"><strong>GetNext</strong></a>(self)</dt><dd><tt>Gets&nbsp;the&nbsp;next&nbsp;screen&nbsp;image.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;numpy&nbsp;matrix&nbsp;containing&nbsp;the&nbsp;screen&nbsp;surrounded&nbsp;by&nbsp;the&nbsp;number&nbsp;of&nbsp;border<br>
+&nbsp;&nbsp;pixels&nbsp;specified&nbsp;in&nbsp;initialization,&nbsp;and&nbsp;the&nbsp;location&nbsp;of&nbsp;the&nbsp;detected<br>
+&nbsp;&nbsp;screen&nbsp;corners&nbsp;in&nbsp;the&nbsp;current&nbsp;frame,&nbsp;if&nbsp;a&nbsp;screen&nbsp;is&nbsp;found.&nbsp;The&nbsp;returned<br>
+&nbsp;&nbsp;screen&nbsp;is&nbsp;guaranteed&nbsp;to&nbsp;be&nbsp;the&nbsp;same&nbsp;size&nbsp;at&nbsp;each&nbsp;frame.<br>
+&nbsp;&nbsp;'None'&nbsp;and&nbsp;'None'&nbsp;if&nbsp;no&nbsp;screen&nbsp;was&nbsp;found&nbsp;on&nbsp;the&nbsp;current&nbsp;frame.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;FrameReadError:&nbsp;An&nbsp;error&nbsp;occurred&nbsp;in&nbsp;the&nbsp;FrameGenerator.<br>
+&nbsp;&nbsp;RuntimeError:&nbsp;This&nbsp;method&nbsp;was&nbsp;called&nbsp;when&nbsp;no&nbsp;frames&nbsp;were&nbsp;available.</tt></dd></dl>
+
+<dl><dt><a name="ScreenFinder-HasNext"><strong>HasNext</strong></a>(self)</dt><dd><tt>True&nbsp;if&nbsp;there&nbsp;are&nbsp;more&nbsp;frames&nbsp;available&nbsp;to&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="ScreenFinder-__init__"><strong>__init__</strong></a>(self, frame_generator, border<font color="#909090">=5</font>)</dt><dd><tt>Initializes&nbsp;the&nbsp;<a href="#ScreenFinder">ScreenFinder</a>&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;frame_generator:&nbsp;FrameGenerator,&nbsp;An&nbsp;initialized&nbsp;Video&nbsp;Frame&nbsp;Generator.<br>
+&nbsp;&nbsp;border:&nbsp;int,&nbsp;number&nbsp;of&nbsp;pixels&nbsp;of&nbsp;border&nbsp;to&nbsp;be&nbsp;kept&nbsp;when&nbsp;cropping&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;detected&nbsp;screen.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;FrameReadError:&nbsp;The&nbsp;frame&nbsp;generator&nbsp;may&nbsp;output&nbsp;a&nbsp;read&nbsp;error&nbsp;during<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;initialization.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>CANNY_HYSTERESIS_THRESH_HIGH</strong> = 500</dl>
+
+<dl><dt><strong>CANNY_HYSTERESIS_THRESH_LOW</strong> = 300</dl>
+
+<dl><dt><strong>CORNER_AVERAGE_WEIGHT</strong> = 0.5</dl>
+
+<dl><dt><strong>CornerData</strong> = &lt;class 'telemetry.internal.image_processing.screen_finder.CornerData'&gt;</dl>
+
+<dl><dt><strong>DEBUG</strong> = False</dl>
+
+<dl><dt><strong>MAX_INTERFRAME_MOTION</strong> = 25</dl>
+
+<dl><dt><strong>MIN_CORNER_ABSOLUTE_BRIGHTNESS</strong> = 60</dl>
+
+<dl><dt><strong>MIN_RELATIVE_BRIGHTNESS_FACTOR</strong> = 1.5</dl>
+
+<dl><dt><strong>MIN_SCREEN_WIDTH</strong> = 40</dl>
+
+<dl><dt><strong>RESET_AFTER_N_BAD_FRAMES</strong> = 2</dl>
+
+<dl><dt><strong>SMALL_ANGLE</strong> = 0.08726646259971647</dl>
+
+<dl><dt><strong>ScreenNotFoundError</strong> = &lt;class 'telemetry.internal.image_processing.screen_finder.ScreenNotFoundError'&gt;</dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-main"><strong>main</strong></a>()</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>division</strong> = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video.html
new file mode 100644
index 0000000..03367f9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video.html
@@ -0,0 +1,152 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.video</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.video</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/video.py">telemetry/internal/image_processing/video.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="telemetry.util.image_util.html">telemetry.util.image_util</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.util.rgba_color.html">telemetry.util.rgba_color</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.video.html#Video">Video</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.video.html#BoundingBoxNotFoundException">BoundingBoxNotFoundException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BoundingBoxNotFoundException">class <strong>BoundingBoxNotFoundException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.image_processing.video.html#BoundingBoxNotFoundException">BoundingBoxNotFoundException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="BoundingBoxNotFoundException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#BoundingBoxNotFoundException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="BoundingBoxNotFoundException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#BoundingBoxNotFoundException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="BoundingBoxNotFoundException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Video">class <strong>Video</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Utilities&nbsp;for&nbsp;storing&nbsp;and&nbsp;interacting&nbsp;with&nbsp;the&nbsp;video&nbsp;capture.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Video-GetVideoFrameIter"><strong>GetVideoFrameIter</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;iteration&nbsp;for&nbsp;processing&nbsp;the&nbsp;video&nbsp;capture.<br>
+&nbsp;<br>
+This&nbsp;looks&nbsp;for&nbsp;the&nbsp;initial&nbsp;color&nbsp;flash&nbsp;in&nbsp;the&nbsp;first&nbsp;frame&nbsp;to&nbsp;establish&nbsp;the<br>
+tab&nbsp;content&nbsp;boundaries&nbsp;and&nbsp;then&nbsp;omits&nbsp;all&nbsp;frames&nbsp;displaying&nbsp;the&nbsp;flash.<br>
+&nbsp;<br>
+Yields:<br>
+&nbsp;&nbsp;(time_ms,&nbsp;image)&nbsp;tuples&nbsp;representing&nbsp;each&nbsp;video&nbsp;keyframe.&nbsp;Only&nbsp;the&nbsp;first<br>
+&nbsp;&nbsp;frame&nbsp;is&nbsp;a&nbsp;run&nbsp;of&nbsp;sequential&nbsp;duplicate&nbsp;bitmaps&nbsp;is&nbsp;typically&nbsp;included.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;time_ms&nbsp;is&nbsp;milliseconds&nbsp;since&nbsp;navigationStart.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;image&nbsp;may&nbsp;be&nbsp;a&nbsp;telemetry.core.Bitmap,&nbsp;or&nbsp;a&nbsp;numpy&nbsp;array&nbsp;depending&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;whether&nbsp;numpy&nbsp;is&nbsp;installed.</tt></dd></dl>
+
+<dl><dt><a name="Video-UploadToCloudStorage"><strong>UploadToCloudStorage</strong></a>(self, bucket, target_path)</dt><dd><tt>Uploads&nbsp;video&nbsp;file&nbsp;to&nbsp;cloud&nbsp;storage.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;target_path:&nbsp;Path&nbsp;indicating&nbsp;where&nbsp;to&nbsp;store&nbsp;the&nbsp;file&nbsp;in&nbsp;cloud&nbsp;storage.</tt></dd></dl>
+
+<dl><dt><a name="Video-__init__"><strong>__init__</strong></a>(self, video_file_obj)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>HIGHLIGHT_ORANGE_FRAME</strong> = RgbaColor(r=222, g=100, b=13, a=255)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video_file_frame_generator.html b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video_file_frame_generator.html
new file mode 100644
index 0000000..72fbbe5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.image_processing.video_file_frame_generator.html
@@ -0,0 +1,118 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.image_processing.video_file_frame_generator</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.image_processing.html"><font color="#ffffff">image_processing</font></a>.video_file_frame_generator</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/image_processing/video_file_frame_generator.py">telemetry/internal/image_processing/video_file_frame_generator.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="cv2.html">cv2</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.frame_generator.html">telemetry.internal.image_processing.frame_generator</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.image_processing.video_file_frame_generator.html#VideoFileFrameGenerator">VideoFileFrameGenerator</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="VideoFileFrameGenerator">class <strong>VideoFileFrameGenerator</strong></a>(<a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;a&nbsp;Frame&nbsp;Generator&nbsp;for&nbsp;a&nbsp;video&nbsp;file.<br>
+&nbsp;<br>
+Sample&nbsp;Usage:<br>
+&nbsp;&nbsp;generator&nbsp;=&nbsp;<a href="#VideoFileFrameGenerator">VideoFileFrameGenerator</a>(sys.argv[1]).GetGenerator()<br>
+&nbsp;&nbsp;for&nbsp;frame&nbsp;in&nbsp;generator:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;#&nbsp;Do&nbsp;something<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;_capture:&nbsp;The&nbsp;openCV&nbsp;video&nbsp;capture.<br>
+&nbsp;&nbsp;_frame_count:&nbsp;The&nbsp;number&nbsp;of&nbsp;frames&nbsp;in&nbsp;the&nbsp;video&nbsp;capture.<br>
+&nbsp;&nbsp;_frame_index:&nbsp;The&nbsp;frame&nbsp;number&nbsp;of&nbsp;the&nbsp;current&nbsp;frame.<br>
+&nbsp;&nbsp;_timestamp:&nbsp;The&nbsp;timestamp&nbsp;of&nbsp;the&nbsp;current&nbsp;frame.<br>
+&nbsp;&nbsp;_dimensions:&nbsp;The&nbsp;dimensions&nbsp;of&nbsp;the&nbsp;video&nbsp;capture.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.image_processing.video_file_frame_generator.html#VideoFileFrameGenerator">VideoFileFrameGenerator</a></dd>
+<dd><a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="VideoFileFrameGenerator-__init__"><strong>__init__</strong></a>(self, video_filename, start_frame_index<font color="#909090">=0</font>)</dt><dd><tt>Initializes&nbsp;the&nbsp;<a href="#VideoFileFrameGenerator">VideoFileFrameGenerator</a>&nbsp;object.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;video_filename:&nbsp;str,&nbsp;The&nbsp;path&nbsp;to&nbsp;the&nbsp;video&nbsp;file.<br>
+&nbsp;&nbsp;start_frame_index:&nbsp;int,&nbsp;The&nbsp;number&nbsp;of&nbsp;frames&nbsp;to&nbsp;skip&nbsp;at&nbsp;the&nbsp;start&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;file.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;FrameReadError:&nbsp;A&nbsp;read&nbsp;error&nbsp;occurred&nbsp;during&nbsp;initialization.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>CurrentFrameNumber</strong></dt>
+</dl>
+<dl><dt><strong>CurrentTimestamp</strong></dt>
+</dl>
+<dl><dt><strong>Dimensions</strong></dt>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>:<br>
+<dl><dt><strong>Generator</strong></dt>
+<dd><tt>Returns:<br>
+A&nbsp;reference&nbsp;to&nbsp;the&nbsp;created&nbsp;generator.</tt></dd>
+</dl>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="telemetry.internal.image_processing.frame_generator.html#FrameGenerator">telemetry.internal.image_processing.frame_generator.FrameGenerator</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_device.html
new file mode 100644
index 0000000..8a13a01
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_device.html
@@ -0,0 +1,111 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.android_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.android_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/android_device.py">telemetry/internal/platform/android_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="pylib.constants.html">pylib.constants</a><br>
+<a href="telemetry.internal.platform.device.html">telemetry.internal.platform.device</a><br>
+<a href="devil.android.device_blacklist.html">devil.android.device_blacklist</a><br>
+</td><td width="25%" valign=top><a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+<a href="devil.android.device_utils.html">devil.android.device_utils</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.monsoon.html">telemetry.internal.platform.profiler.monsoon</a><br>
+<a href="os.html">os</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.android_device.html#AndroidDevice">AndroidDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidDevice">class <strong>AndroidDevice</strong></a>(<a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Class&nbsp;represents&nbsp;information&nbsp;for&nbsp;connecting&nbsp;to&nbsp;an&nbsp;android&nbsp;device.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;device_id:&nbsp;the&nbsp;device's&nbsp;serial&nbsp;string&nbsp;created&nbsp;by&nbsp;adb&nbsp;to&nbsp;uniquely<br>
+&nbsp;&nbsp;&nbsp;&nbsp;identify&nbsp;an&nbsp;emulator/device&nbsp;instance.&nbsp;This&nbsp;string&nbsp;can&nbsp;be&nbsp;found&nbsp;by&nbsp;running<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'adb&nbsp;devices'&nbsp;command<br>
+&nbsp;&nbsp;enable_performance_mode:&nbsp;when&nbsp;this&nbsp;is&nbsp;set&nbsp;to&nbsp;True,&nbsp;android&nbsp;platform&nbsp;will&nbsp;be<br>
+&nbsp;&nbsp;set&nbsp;to&nbsp;high&nbsp;performance&nbsp;mode&nbsp;after&nbsp;browser&nbsp;is&nbsp;started.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.android_device.html#AndroidDevice">AndroidDevice</a></dd>
+<dd><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidDevice-__init__"><strong>__init__</strong></a>(self, device_id, enable_performance_mode<font color="#909090">=True</font>)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="AndroidDevice-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>device_id</strong></dt>
+</dl>
+<dl><dt><strong>enable_performance_mode</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CanDiscoverDevices"><strong>CanDiscoverDevices</strong></a>()</dt><dd><tt>Returns&nbsp;true&nbsp;if&nbsp;devices&nbsp;are&nbsp;discoverable&nbsp;via&nbsp;adb.</tt></dd></dl>
+ <dl><dt><a name="-FindAllAvailableDevices"><strong>FindAllAvailableDevices</strong></a>(options)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;devices.</tt></dd></dl>
+ <dl><dt><a name="-GetDevice"><strong>GetDevice</strong></a>(finder_options)</dt><dd><tt>Return&nbsp;a&nbsp;Platform&nbsp;instance&nbsp;for&nbsp;the&nbsp;device&nbsp;specified&nbsp;by&nbsp;|finder_options|.</tt></dd></dl>
+ <dl><dt><a name="-GetDeviceSerials"><strong>GetDeviceSerials</strong></a>(blacklist)</dt><dd><tt>Return&nbsp;the&nbsp;list&nbsp;of&nbsp;device&nbsp;serials&nbsp;of&nbsp;healthy&nbsp;devices.<br>
+&nbsp;<br>
+If&nbsp;a&nbsp;preferred&nbsp;device&nbsp;has&nbsp;been&nbsp;set&nbsp;with&nbsp;ANDROID_SERIAL,&nbsp;it&nbsp;will&nbsp;be&nbsp;first&nbsp;in<br>
+the&nbsp;returned&nbsp;list.&nbsp;The&nbsp;arguments&nbsp;specify&nbsp;what&nbsp;devices&nbsp;to&nbsp;include&nbsp;in&nbsp;the&nbsp;list.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_platform_backend.html
new file mode 100644
index 0000000..5c3c211
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.android_platform_backend.html
@@ -0,0 +1,381 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.android_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.android_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/android_platform_backend.py">telemetry/internal/platform/android_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="adb_install_cert.html">adb_install_cert</a><br>
+<a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html">telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor</a><br>
+<a href="telemetry.internal.forwarders.android_forwarder.html">telemetry.internal.forwarders.android_forwarder</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html">telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor</a><br>
+<a href="telemetry.core.android_platform.html">telemetry.core.android_platform</a><br>
+<a href="telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html">telemetry.internal.platform.profiler.android_prebuilt_profiler_helper</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_temperature_monitor.html">telemetry.internal.platform.power_monitor.android_temperature_monitor</a><br>
+<a href="devil.android.battery_utils.html">devil.android.battery_utils</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+</td><td width="25%" valign=top><a href="devil.android.perf.cache_control.html">devil.android.perf.cache_control</a><br>
+<a href="certutils.html">certutils</a><br>
+<a href="pylib.constants.html">pylib.constants</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+<a href="devil.android.device_utils.html">devil.android.device_utils</a><br>
+<a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+<a href="telemetry.internal.platform.linux_based_platform_backend.html">telemetry.internal.platform.linux_based_platform_backend</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.internal.platform.power_monitor.monsoon_power_monitor.html">telemetry.internal.platform.power_monitor.monsoon_power_monitor</a><br>
+<a href="os.html">os</a><br>
+<a href="devil.android.perf.perf_control.html">devil.android.perf.perf_control</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="platformsettings.html">platformsettings</a><br>
+<a href="telemetry.internal.platform.power_monitor.power_monitor_controller.html">telemetry.internal.platform.power_monitor.power_monitor_controller</a><br>
+<a href="psutil.html">psutil</a><br>
+<a href="re.html">re</a><br>
+<a href="pylib.screenshot.html">pylib.screenshot</a><br>
+</td><td width="25%" valign=top><a href="shutil.html">shutil</a><br>
+<a href="stat.html">stat</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="devil.android.perf.surface_stats_collector.html">devil.android.perf.surface_stats_collector</a><br>
+<a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html">telemetry.internal.platform.power_monitor.sysfs_power_monitor</a><br>
+<a href="tempfile.html">tempfile</a><br>
+<a href="devil.android.perf.thermal_throttle.html">devil.android.perf.thermal_throttle</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="devil.android.sdk.version_codes.html">devil.android.sdk.version_codes</a><br>
+<a href="telemetry.internal.image_processing.video.html">telemetry.internal.image_processing.video</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.android_platform_backend.html#AndroidPlatformBackend">AndroidPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidPlatformBackend">class <strong>AndroidPlatformBackend</strong></a>(<a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.android_platform_backend.html#AndroidPlatformBackend">AndroidPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-DismissCrashDialogIfNeeded"><strong>DismissCrashDialogIfNeeded</strong></a>(self)</dt><dd><tt>Dismiss&nbsp;any&nbsp;error&nbsp;dialogs.<br>
+&nbsp;<br>
+Limit&nbsp;the&nbsp;number&nbsp;in&nbsp;case&nbsp;we&nbsp;have&nbsp;an&nbsp;error&nbsp;loop&nbsp;or&nbsp;we&nbsp;are&nbsp;failing&nbsp;to&nbsp;dismiss.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-ForwardHostToDevice"><strong>ForwardHostToDevice</strong></a>(self, host_port, device_port)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, fname)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetStackTrace"><strong>GetStackTrace</strong></a>(self, target_arch)</dt><dd><tt>Returns&nbsp;stack&nbsp;trace.<br>
+&nbsp;<br>
+The&nbsp;stack&nbsp;trace&nbsp;consists&nbsp;of&nbsp;raw&nbsp;logcat&nbsp;dump,&nbsp;logcat&nbsp;dump&nbsp;with&nbsp;symbols,<br>
+and&nbsp;stack&nbsp;info&nbsp;from&nbsp;tomstone&nbsp;files.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;target_arch:&nbsp;String&nbsp;specifying&nbsp;device&nbsp;architecture&nbsp;(eg.&nbsp;arm,&nbsp;arm64,&nbsp;mips,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;x86,&nbsp;x86_64)</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetStandardOutput"><strong>GetStandardOutput</strong></a>(self, number_of_lines<font color="#909090">=500</font>)</dt><dd><tt>Returns&nbsp;most&nbsp;recent&nbsp;lines&nbsp;of&nbsp;logcat&nbsp;dump.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;number_of_lines:&nbsp;Number&nbsp;of&nbsp;lines&nbsp;of&nbsp;log&nbsp;to&nbsp;return.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-InstallTestCa"><strong>InstallTestCa</strong></a>(self)</dt><dd><tt>Install&nbsp;a&nbsp;randomly&nbsp;generated&nbsp;root&nbsp;CA&nbsp;on&nbsp;the&nbsp;android&nbsp;device.<br>
+&nbsp;<br>
+This&nbsp;allows&nbsp;transparent&nbsp;HTTPS&nbsp;testing&nbsp;with&nbsp;WPR&nbsp;server&nbsp;without&nbsp;need<br>
+to&nbsp;tweak&nbsp;application&nbsp;network&nbsp;stack.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsAppRunning"><strong>IsAppRunning</strong></a>(self, process_name)</dt><dd><tt>Determine&nbsp;if&nbsp;the&nbsp;given&nbsp;process&nbsp;is&nbsp;running.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;process_name:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsScreenLocked"><strong>IsScreenLocked</strong></a>(self)</dt><dd><tt>Determines&nbsp;if&nbsp;device&nbsp;screen&nbsp;is&nbsp;locked.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsScreenOn"><strong>IsScreenOn</strong></a>(self)</dt><dd><tt>Determines&nbsp;if&nbsp;device&nbsp;screen&nbsp;is&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-KillApplication"><strong>KillApplication</strong></a>(self, application)</dt><dd><tt>Kill&nbsp;the&nbsp;given&nbsp;|application|.<br>
+&nbsp;<br>
+Might&nbsp;be&nbsp;used&nbsp;instead&nbsp;of&nbsp;ForceStop&nbsp;for&nbsp;efficiency&nbsp;reasons.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;application:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;to&nbsp;kill.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt><dd><tt>Launches&nbsp;the&nbsp;given&nbsp;|application|&nbsp;with&nbsp;a&nbsp;list&nbsp;of&nbsp;|parameters|&nbsp;on&nbsp;the&nbsp;OS.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;application:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;to&nbsp;launch.<br>
+&nbsp;&nbsp;parameters:&nbsp;A&nbsp;list&nbsp;of&nbsp;parameters&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;ActivityManager.<br>
+&nbsp;&nbsp;elevate_privilege:&nbsp;Currently&nbsp;unimplemented&nbsp;on&nbsp;Android.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, device_path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Return&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;device.<br>
+This&nbsp;method&nbsp;is&nbsp;the&nbsp;same&nbsp;as<br>
+devil.android.device_utils.DeviceUtils.PathExists.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-PullProfile"><strong>PullProfile</strong></a>(self, package, output_profile_path)</dt><dd><tt>Copy&nbsp;application&nbsp;profile&nbsp;from&nbsp;device&nbsp;to&nbsp;host&nbsp;machine.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;package:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;for&nbsp;which&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;profile&nbsp;is&nbsp;to&nbsp;be&nbsp;copied.<br>
+&nbsp;&nbsp;output_profile_dir:&nbsp;Location&nbsp;where&nbsp;profile&nbsp;to&nbsp;be&nbsp;stored&nbsp;on&nbsp;host&nbsp;machine.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt><dd><tt>Purges&nbsp;the&nbsp;unpinned&nbsp;ashmem&nbsp;memory&nbsp;for&nbsp;the&nbsp;whole&nbsp;system.<br>
+&nbsp;<br>
+This&nbsp;can&nbsp;be&nbsp;used&nbsp;to&nbsp;make&nbsp;memory&nbsp;measurements&nbsp;more&nbsp;stable.&nbsp;Requires&nbsp;root.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-PushProfile"><strong>PushProfile</strong></a>(self, package, new_profile_dir)</dt><dd><tt>Replace&nbsp;application&nbsp;profile&nbsp;with&nbsp;files&nbsp;found&nbsp;on&nbsp;host&nbsp;machine.<br>
+&nbsp;<br>
+Pushing&nbsp;the&nbsp;profile&nbsp;is&nbsp;slow,&nbsp;so&nbsp;we&nbsp;don't&nbsp;want&nbsp;to&nbsp;do&nbsp;it&nbsp;every&nbsp;time.<br>
+Avoid&nbsp;this&nbsp;by&nbsp;pushing&nbsp;to&nbsp;a&nbsp;safe&nbsp;location&nbsp;using&nbsp;PushChangedFiles,&nbsp;and<br>
+then&nbsp;copying&nbsp;into&nbsp;the&nbsp;correct&nbsp;location&nbsp;on&nbsp;each&nbsp;test&nbsp;run.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;package:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;for&nbsp;which&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;profile&nbsp;is&nbsp;to&nbsp;be&nbsp;updated.<br>
+&nbsp;&nbsp;new_profile_dir:&nbsp;Location&nbsp;where&nbsp;profile&nbsp;to&nbsp;be&nbsp;pushed&nbsp;is&nbsp;stored&nbsp;on&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;host&nbsp;machine.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-RemoveProfile"><strong>RemoveProfile</strong></a>(self, package, ignore_list)</dt><dd><tt>Delete&nbsp;application&nbsp;profile&nbsp;on&nbsp;device.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;package:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;for&nbsp;which&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;profile&nbsp;is&nbsp;to&nbsp;be&nbsp;deleted.<br>
+&nbsp;&nbsp;ignore_list:&nbsp;List&nbsp;of&nbsp;files&nbsp;to&nbsp;keep.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-RemoveTestCa"><strong>RemoveTestCa</strong></a>(self)</dt><dd><tt>Remove&nbsp;root&nbsp;CA&nbsp;generated&nbsp;by&nbsp;previous&nbsp;call&nbsp;to&nbsp;<a href="#AndroidPlatformBackend-InstallTestCa">InstallTestCa</a>().<br>
+&nbsp;<br>
+Removes&nbsp;the&nbsp;test&nbsp;root&nbsp;certificate&nbsp;from&nbsp;both&nbsp;the&nbsp;device&nbsp;and&nbsp;host&nbsp;machine.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, command)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SetDebugApp"><strong>SetDebugApp</strong></a>(self, package)</dt><dd><tt>Set&nbsp;application&nbsp;to&nbsp;debugging.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;package:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SetGraphicsMemoryTrackingEnabled"><strong>SetGraphicsMemoryTrackingEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SetRelaxSslCheck"><strong>SetRelaxSslCheck</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt><dd><tt>Starts&nbsp;the&nbsp;video&nbsp;capture&nbsp;at&nbsp;specified&nbsp;bitrate.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StopApplication"><strong>StopApplication</strong></a>(self, application)</dt><dd><tt>Stop&nbsp;the&nbsp;given&nbsp;|application|.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;application:&nbsp;The&nbsp;full&nbsp;package&nbsp;name&nbsp;string&nbsp;of&nbsp;the&nbsp;application&nbsp;to&nbsp;stop.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StopForwardingHost"><strong>StopForwardingHost</strong></a>(self, host_port)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-__init__"><strong>__init__</strong></a>(self, device, finder_options)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="AndroidPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="AndroidPlatformBackend-ParseCStateSample"><strong>ParseCStateSample</strong></a>(sample)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>device</strong></dt>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_test_ca_installed</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>log_file_path</strong></dt>
+</dl>
+<dl><dt><strong>use_rndis_forwarder</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+<dd><tt>Path&nbsp;to&nbsp;root&nbsp;certificate&nbsp;installed&nbsp;on&nbsp;browser&nbsp;(or&nbsp;None).<br>
+&nbsp;<br>
+If&nbsp;this&nbsp;is&nbsp;set,&nbsp;web&nbsp;page&nbsp;replay&nbsp;will&nbsp;use&nbsp;it&nbsp;to&nbsp;sign&nbsp;HTTPS&nbsp;responses.</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>:<br>
+<dl><dt><a name="AndroidPlatformBackend-GetClockTicks"><strong>GetClockTicks</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;clock&nbsp;ticks&nbsp;per&nbsp;second.<br>
+&nbsp;<br>
+The&nbsp;proper&nbsp;way&nbsp;is&nbsp;to&nbsp;call&nbsp;os.sysconf('SC_CLK_TCK')&nbsp;but&nbsp;that&nbsp;is&nbsp;not&nbsp;easy&nbsp;to<br>
+do&nbsp;on&nbsp;Android/CrOS.&nbsp;In&nbsp;practice,&nbsp;nearly&nbsp;all&nbsp;Linux&nbsp;machines&nbsp;have&nbsp;a&nbsp;USER_HZ<br>
+of&nbsp;100,&nbsp;so&nbsp;just&nbsp;return&nbsp;that.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt><dd><tt>#&nbsp;Get&nbsp;the&nbsp;commit&nbsp;charge&nbsp;in&nbsp;kB.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="AndroidPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="AndroidPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="AndroidPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_device.html
new file mode 100644
index 0000000..026a1e3
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_device.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.cros_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.cros_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/cros_device.py">telemetry/internal/platform/cros_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.cros_interface.html">telemetry.core.cros_interface</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.device.html">telemetry.internal.platform.device</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.cros_device.html#CrOSDevice">CrOSDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrOSDevice">class <strong>CrOSDevice</strong></a>(<a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.cros_device.html#CrOSDevice">CrOSDevice</a></dd>
+<dd><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrOSDevice-__init__"><strong>__init__</strong></a>(self, host_name, ssh_port, ssh_identity<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="CrOSDevice-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>host_name</strong></dt>
+</dl>
+<dl><dt><strong>ssh_identity</strong></dt>
+</dl>
+<dl><dt><strong>ssh_port</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindAllAvailableDevices"><strong>FindAllAvailableDevices</strong></a>(options)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;device&nbsp;types.</tt></dd></dl>
+ <dl><dt><a name="-IsRunningOnCrOS"><strong>IsRunningOnCrOS</strong></a>()</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_platform_backend.html
new file mode 100644
index 0000000..926b020
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.cros_platform_backend.html
@@ -0,0 +1,246 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.cros_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.cros_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/cros_platform_backend.py">telemetry/internal/platform/cros_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.cros_device.html">telemetry.internal.platform.cros_device</a><br>
+<a href="telemetry.internal.forwarders.cros_forwarder.html">telemetry.internal.forwarders.cros_forwarder</a><br>
+<a href="telemetry.core.cros_interface.html">telemetry.core.cros_interface</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.cros_power_monitor.html">telemetry.internal.platform.power_monitor.cros_power_monitor</a><br>
+<a href="telemetry.internal.platform.linux_based_platform_backend.html">telemetry.internal.platform.linux_based_platform_backend</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.util.ps_util.html">telemetry.internal.util.ps_util</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.cros_platform_backend.html#CrosPlatformBackend">CrosPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrosPlatformBackend">class <strong>CrosPlatformBackend</strong></a>(<a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.cros_platform_backend.html#CrosPlatformBackend">CrosPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrosPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, filename)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="CrosPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="CrosPlatformBackend-ParseCStateSample"><strong>ParseCStateSample</strong></a>(sample)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>cri</strong></dt>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>:<br>
+<dl><dt><a name="CrosPlatformBackend-GetClockTicks"><strong>GetClockTicks</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;clock&nbsp;ticks&nbsp;per&nbsp;second.<br>
+&nbsp;<br>
+The&nbsp;proper&nbsp;way&nbsp;is&nbsp;to&nbsp;call&nbsp;os.sysconf('SC_CLK_TCK')&nbsp;but&nbsp;that&nbsp;is&nbsp;not&nbsp;easy&nbsp;to<br>
+do&nbsp;on&nbsp;Android/CrOS.&nbsp;In&nbsp;practice,&nbsp;nearly&nbsp;all&nbsp;Linux&nbsp;machines&nbsp;have&nbsp;a&nbsp;USER_HZ<br>
+of&nbsp;100,&nbsp;so&nbsp;just&nbsp;return&nbsp;that.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt><dd><tt>#&nbsp;Get&nbsp;the&nbsp;commit&nbsp;charge&nbsp;in&nbsp;kB.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="CrosPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="CrosPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="CrosPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_device.html
new file mode 100644
index 0000000..8f6cfc8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_device.html
@@ -0,0 +1,81 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.desktop_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.desktop_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/desktop_device.py">telemetry/internal/platform/desktop_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.device.html">telemetry.internal.platform.device</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.desktop_device.html#DesktopDevice">DesktopDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DesktopDevice">class <strong>DesktopDevice</strong></a>(<a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.desktop_device.html#DesktopDevice">DesktopDevice</a></dd>
+<dd><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DesktopDevice-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="DesktopDevice-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindAllAvailableDevices"><strong>FindAllAvailableDevices</strong></a>(_)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;devices.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_platform_backend.html
new file mode 100644
index 0000000..fe3c387
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.desktop_platform_backend.html
@@ -0,0 +1,233 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.desktop_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.desktop_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/desktop_platform_backend.py">telemetry/internal/platform/desktop_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.platform_backend.html">telemetry.internal.platform.platform_backend</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">DesktopPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DesktopPlatformBackend">class <strong>DesktopPlatformBackend</strong></a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DesktopPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="DesktopPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>)</dt><dd><tt>Initalize&nbsp;an&nbsp;instance&nbsp;of&nbsp;<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">PlatformBackend</a>&nbsp;from&nbsp;a&nbsp;device&nbsp;optionally.<br>
+Call&nbsp;sites&nbsp;need&nbsp;to&nbsp;use&nbsp;SupportsDevice&nbsp;before&nbsp;intialization&nbsp;to&nbsp;check<br>
+whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;the&nbsp;device.<br>
+If&nbsp;device&nbsp;is&nbsp;None,&nbsp;this&nbsp;constructor&nbsp;returns&nbsp;the&nbsp;host&nbsp;platform&nbsp;backend<br>
+which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.platform.device.Device.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="DesktopPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="DesktopPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device.html
new file mode 100644
index 0000000..e8459be
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device.html
@@ -0,0 +1,68 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/device.py">telemetry/internal/platform/device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">Device</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Device">class <strong>Device</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;base&nbsp;class&nbsp;of&nbsp;devices.<br>
+A&nbsp;device&nbsp;instance&nbsp;contains&nbsp;all&nbsp;the&nbsp;necessary&nbsp;information&nbsp;for&nbsp;constructing<br>
+a&nbsp;platform&nbsp;backend&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;for&nbsp;remote&nbsp;platforms.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;name:&nbsp;A&nbsp;device&nbsp;name&nbsp;string&nbsp;in&nbsp;human-understandable&nbsp;term.<br>
+&nbsp;&nbsp;guid:&nbsp;A&nbsp;unique&nbsp;id&nbsp;of&nbsp;the&nbsp;device.&nbsp;Subclass&nbsp;of&nbsp;device&nbsp;must&nbsp;specify&nbsp;this<br>
+&nbsp;&nbsp;&nbsp;&nbsp;id&nbsp;properly&nbsp;so&nbsp;that&nbsp;device&nbsp;objects&nbsp;to&nbsp;a&nbsp;same&nbsp;actual&nbsp;device&nbsp;must&nbsp;have&nbsp;same<br>
+&nbsp;&nbsp;&nbsp;&nbsp;guid.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Device-__init__"><strong>__init__</strong></a>(self, name, guid)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Device-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device_finder.html
new file mode 100644
index 0000000..3a37b75
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.device_finder.html
@@ -0,0 +1,42 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.device_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.device_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/device_finder.py">telemetry/internal/platform/device_finder.py</a></font></td></tr></table>
+    <p><tt>Finds&nbsp;devices&nbsp;that&nbsp;can&nbsp;be&nbsp;controlled&nbsp;by&nbsp;telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.android_device.html">telemetry.internal.platform.android_device</a><br>
+<a href="telemetry.internal.platform.cros_device.html">telemetry.internal.platform.cros_device</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.desktop_device.html">telemetry.internal.platform.desktop_device</a><br>
+<a href="telemetry.internal.platform.ios_device.html">telemetry.internal.platform.ios_device</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.trybot_device.html">telemetry.internal.platform.trybot_device</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetDevicesMatchingOptions"><strong>GetDevicesMatchingOptions</strong></a>(options)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;devices&nbsp;matching&nbsp;the&nbsp;options.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEVICES</strong> = [&lt;module 'telemetry.internal.platform.android_dev.../telemetry/internal/platform/android_device.pyc'&gt;, &lt;module 'telemetry.internal.platform.cros_device...try/telemetry/internal/platform/cros_device.pyc'&gt;, &lt;module 'telemetry.internal.platform.desktop_dev.../telemetry/internal/platform/desktop_device.pyc'&gt;, &lt;module 'telemetry.internal.platform.ios_device'...etry/telemetry/internal/platform/ios_device.pyc'&gt;, &lt;module 'telemetry.internal.platform.trybot_devi...y/telemetry/internal/platform/trybot_device.pyc'&gt;]</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_device.html
new file mode 100644
index 0000000..34f3fc9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_device.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.gpu_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.gpu_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/gpu_device.py">telemetry/internal/platform/gpu_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.gpu_device.html#GPUDevice">GPUDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GPUDevice">class <strong>GPUDevice</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;information&nbsp;about&nbsp;an&nbsp;individual&nbsp;GPU&nbsp;device.<br>
+&nbsp;<br>
+On&nbsp;platforms&nbsp;which&nbsp;support&nbsp;them,&nbsp;the&nbsp;vendor_id&nbsp;and&nbsp;device_id&nbsp;are<br>
+PCI&nbsp;IDs.&nbsp;On&nbsp;other&nbsp;platforms,&nbsp;the&nbsp;vendor_string&nbsp;and&nbsp;device_string<br>
+are&nbsp;platform-dependent&nbsp;strings.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="GPUDevice-__init__"><strong>__init__</strong></a>(self, vendor_id, device_id, vendor_string, device_string)</dt></dl>
+
+<dl><dt><a name="GPUDevice-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="GPUDevice-FromDict"><strong>FromDict</strong></a>(cls, attrs)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#GPUDevice">GPUDevice</a>&nbsp;from&nbsp;a&nbsp;dictionary.&nbsp;Requires&nbsp;the<br>
+following&nbsp;attributes&nbsp;to&nbsp;be&nbsp;present&nbsp;in&nbsp;the&nbsp;dictionary:<br>
+&nbsp;<br>
+&nbsp;&nbsp;vendor_id<br>
+&nbsp;&nbsp;device_id<br>
+&nbsp;&nbsp;vendor_string<br>
+&nbsp;&nbsp;device_string<br>
+&nbsp;<br>
+Raises&nbsp;an&nbsp;exception&nbsp;if&nbsp;any&nbsp;attributes&nbsp;are&nbsp;missing.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>device_id</strong></dt>
+<dd><tt>The&nbsp;GPU&nbsp;device's&nbsp;PCI&nbsp;ID&nbsp;as&nbsp;a&nbsp;number,&nbsp;or&nbsp;0&nbsp;if&nbsp;not&nbsp;available.<br>
+&nbsp;<br>
+Most&nbsp;desktop&nbsp;machines&nbsp;supply&nbsp;this&nbsp;information&nbsp;rather&nbsp;than&nbsp;the<br>
+vendor&nbsp;and&nbsp;device&nbsp;strings.</tt></dd>
+</dl>
+<dl><dt><strong>device_string</strong></dt>
+<dd><tt>The&nbsp;GPU&nbsp;device's&nbsp;name&nbsp;as&nbsp;a&nbsp;string,&nbsp;or&nbsp;the&nbsp;empty&nbsp;string&nbsp;if&nbsp;not<br>
+available.<br>
+&nbsp;<br>
+Most&nbsp;mobile&nbsp;devices&nbsp;supply&nbsp;this&nbsp;information&nbsp;rather&nbsp;than&nbsp;the&nbsp;PCI<br>
+IDs.</tt></dd>
+</dl>
+<dl><dt><strong>vendor_id</strong></dt>
+<dd><tt>The&nbsp;GPU&nbsp;vendor's&nbsp;PCI&nbsp;ID&nbsp;as&nbsp;a&nbsp;number,&nbsp;or&nbsp;0&nbsp;if&nbsp;not&nbsp;available.<br>
+&nbsp;<br>
+Most&nbsp;desktop&nbsp;machines&nbsp;supply&nbsp;this&nbsp;information&nbsp;rather&nbsp;than&nbsp;the<br>
+vendor&nbsp;and&nbsp;device&nbsp;strings.</tt></dd>
+</dl>
+<dl><dt><strong>vendor_string</strong></dt>
+<dd><tt>The&nbsp;GPU&nbsp;vendor's&nbsp;name&nbsp;as&nbsp;a&nbsp;string,&nbsp;or&nbsp;the&nbsp;empty&nbsp;string&nbsp;if&nbsp;not<br>
+available.<br>
+&nbsp;<br>
+Most&nbsp;mobile&nbsp;devices&nbsp;supply&nbsp;this&nbsp;information&nbsp;rather&nbsp;than&nbsp;the&nbsp;PCI<br>
+IDs.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_info.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_info.html
new file mode 100644
index 0000000..45b8d04
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.gpu_info.html
@@ -0,0 +1,93 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.gpu_info</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.gpu_info</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/gpu_info.py">telemetry/internal/platform/gpu_info.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.gpu_device.html">telemetry.internal.platform.gpu_device</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.gpu_info.html#GPUInfo">GPUInfo</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GPUInfo">class <strong>GPUInfo</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;information&nbsp;about&nbsp;the&nbsp;GPUs&nbsp;on&nbsp;the&nbsp;system.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="GPUInfo-__init__"><strong>__init__</strong></a>(self, device_array, aux_attributes, feature_status, driver_bug_workarounds)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="GPUInfo-FromDict"><strong>FromDict</strong></a>(cls, attrs)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#GPUInfo">GPUInfo</a>&nbsp;from&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;attributes.<br>
+&nbsp;<br>
+Attributes&nbsp;currently&nbsp;required&nbsp;to&nbsp;be&nbsp;present&nbsp;in&nbsp;the&nbsp;dictionary:<br>
+&nbsp;&nbsp;devices&nbsp;(array&nbsp;of&nbsp;dictionaries,&nbsp;each&nbsp;of&nbsp;which&nbsp;contains<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;GPUDevice's&nbsp;required&nbsp;attributes)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>aux_attributes</strong></dt>
+<dd><tt>Returns&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;auxiliary,&nbsp;optional,&nbsp;attributes.<br>
+&nbsp;<br>
+On&nbsp;the&nbsp;Chrome&nbsp;browser,&nbsp;for&nbsp;example,&nbsp;this&nbsp;dictionary&nbsp;contains:<br>
+&nbsp;&nbsp;optimus&nbsp;(boolean)<br>
+&nbsp;&nbsp;amd_switchable&nbsp;(boolean)<br>
+&nbsp;&nbsp;lenovo_dcute&nbsp;(boolean)<br>
+&nbsp;&nbsp;driver_vendor&nbsp;(string)<br>
+&nbsp;&nbsp;driver_version&nbsp;(string)<br>
+&nbsp;&nbsp;driver_date&nbsp;(string)<br>
+&nbsp;&nbsp;gl_version_string&nbsp;(string)<br>
+&nbsp;&nbsp;gl_vendor&nbsp;(string)<br>
+&nbsp;&nbsp;gl_renderer&nbsp;(string)<br>
+&nbsp;&nbsp;gl_extensions&nbsp;(string)<br>
+&nbsp;&nbsp;display_link_version&nbsp;(string)</tt></dd>
+</dl>
+<dl><dt><strong>devices</strong></dt>
+<dd><tt>An&nbsp;array&nbsp;of&nbsp;GPUDevices.&nbsp;Element&nbsp;0&nbsp;is&nbsp;the&nbsp;primary&nbsp;GPU&nbsp;on&nbsp;the&nbsp;system.</tt></dd>
+</dl>
+<dl><dt><strong>driver_bug_workarounds</strong></dt>
+<dd><tt>Returns&nbsp;an&nbsp;optional&nbsp;array&nbsp;of&nbsp;driver&nbsp;bug&nbsp;workarounds.</tt></dd>
+</dl>
+<dl><dt><strong>feature_status</strong></dt>
+<dd><tt>Returns&nbsp;an&nbsp;optional&nbsp;dictionary&nbsp;of&nbsp;graphics&nbsp;features&nbsp;and&nbsp;their&nbsp;status.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.html
new file mode 100644
index 0000000..2e717fd
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.html
@@ -0,0 +1,63 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.platform</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.platform</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/__init__.py">telemetry/internal/platform/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.android_device.html">android_device</a><br>
+<a href="telemetry.internal.platform.android_device_unittest.html">android_device_unittest</a><br>
+<a href="telemetry.internal.platform.android_platform_backend.html">android_platform_backend</a><br>
+<a href="telemetry.internal.platform.android_platform_backend_unittest.html">android_platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.cros_device.html">cros_device</a><br>
+<a href="telemetry.internal.platform.cros_platform_backend.html">cros_platform_backend</a><br>
+<a href="telemetry.internal.platform.cros_platform_backend_unittest.html">cros_platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.desktop_device.html">desktop_device</a><br>
+<a href="telemetry.internal.platform.desktop_platform_backend.html">desktop_platform_backend</a><br>
+<a href="telemetry.internal.platform.device.html">device</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.device_finder.html">device_finder</a><br>
+<a href="telemetry.internal.platform.gpu_device.html">gpu_device</a><br>
+<a href="telemetry.internal.platform.gpu_device_unittest.html">gpu_device_unittest</a><br>
+<a href="telemetry.internal.platform.gpu_info.html">gpu_info</a><br>
+<a href="telemetry.internal.platform.gpu_info_unittest.html">gpu_info_unittest</a><br>
+<a href="telemetry.internal.platform.ios_device.html">ios_device</a><br>
+<a href="telemetry.internal.platform.ios_platform_backend.html">ios_platform_backend</a><br>
+<a href="telemetry.internal.platform.linux_based_platform_backend.html">linux_based_platform_backend</a><br>
+<a href="telemetry.internal.platform.linux_based_platform_backend_unittest.html">linux_based_platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.linux_platform_backend.html">linux_platform_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.linux_platform_backend_unittest.html">linux_platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.mac_platform_backend.html">mac_platform_backend</a><br>
+<a href="telemetry.internal.platform.mac_platform_backend_unittest.html">mac_platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.msr_server_win.html">msr_server_win</a><br>
+<a href="telemetry.internal.platform.network_controller_backend.html">network_controller_backend</a><br>
+<a href="telemetry.internal.platform.network_controller_backend_unittest.html">network_controller_backend_unittest</a><br>
+<a href="telemetry.internal.platform.platform_backend.html">platform_backend</a><br>
+<a href="telemetry.internal.platform.platform_backend_unittest.html">platform_backend_unittest</a><br>
+<a href="telemetry.internal.platform.posix_platform_backend.html">posix_platform_backend</a><br>
+<a href="telemetry.internal.platform.posix_platform_backend_unittest.html">posix_platform_backend_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html"><strong>power_monitor</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.platform.profiler.html"><strong>profiler</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.platform.profiling_controller_backend.html">profiling_controller_backend</a><br>
+<a href="telemetry.internal.platform.system_info.html">system_info</a><br>
+<a href="telemetry.internal.platform.system_info_unittest.html">system_info_unittest</a><br>
+<a href="telemetry.internal.platform.tracing_agent.html"><strong>tracing_agent</strong>&nbsp;(package)</a><br>
+<a href="telemetry.internal.platform.tracing_controller_backend.html">tracing_controller_backend</a><br>
+<a href="telemetry.internal.platform.trybot_device.html">trybot_device</a><br>
+<a href="telemetry.internal.platform.win_platform_backend.html">win_platform_backend</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_device.html
new file mode 100644
index 0000000..679e5e8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_device.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.ios_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.ios_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/ios_device.py">telemetry/internal/platform/ios_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.device.html">telemetry.internal.platform.device</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.ios_device.html#IOSDevice">IOSDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IOSDevice">class <strong>IOSDevice</strong></a>(<a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.ios_device.html#IOSDevice">IOSDevice</a></dd>
+<dd><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="IOSDevice-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="IOSDevice-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindAllAvailableDevices"><strong>FindAllAvailableDevices</strong></a>(options)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;devices.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>IOSSIM_BUILD_DIRECTORIES</strong> = ['Debug-iphonesimulator', 'Profile-iphonesimulator', 'Release-iphonesimulator']</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_platform_backend.html
new file mode 100644
index 0000000..41566e8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.ios_platform_backend.html
@@ -0,0 +1,244 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.ios_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.ios_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/ios_platform_backend.py">telemetry/internal/platform/ios_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.posix_platform_backend.html">telemetry.internal.platform.posix_platform_backend</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>(<a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.ios_platform_backend.html#IosPlatformBackend">IosPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IosPlatformBackend">class <strong>IosPlatformBackend</strong></a>(<a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#TODO(baxley):&nbsp;Put&nbsp;in&nbsp;real&nbsp;values.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.ios_platform_backend.html#IosPlatformBackend">IosPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="IosPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>:<br>
+<dl><dt><a name="IosPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt><dd><tt>Returns&nbsp;output&nbsp;of&nbsp;the&nbsp;'ps'&nbsp;command&nbsp;as&nbsp;a&nbsp;list&nbsp;of&nbsp;lines.<br>
+Subclass&nbsp;should&nbsp;override&nbsp;this&nbsp;function.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;columns:&nbsp;A&nbsp;list&nbsp;of&nbsp;require&nbsp;columns,&nbsp;e.g.,&nbsp;['pid',&nbsp;'pss'].<br>
+&nbsp;&nbsp;pid:&nbsp;If&nbsp;not&nbsp;None,&nbsp;returns&nbsp;only&nbsp;the&nbsp;information&nbsp;of&nbsp;the&nbsp;process<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;the&nbsp;pid.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, args)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>:<br>
+<dl><dt><a name="IosPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="IosPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="IosPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="IosPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="IosPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_based_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_based_platform_backend.html
new file mode 100644
index 0000000..bba8356
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_based_platform_backend.html
@@ -0,0 +1,278 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.linux_based_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.linux_based_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/linux_based_platform_backend.py">telemetry/internal/platform/linux_based_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.platform_backend.html">telemetry.internal.platform.platform_backend</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="resource.html">resource</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">LinuxBasedPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LinuxBasedPlatformBackend">class <strong>LinuxBasedPlatformBackend</strong></a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Abstract&nbsp;platform&nbsp;containing&nbsp;functionality&nbsp;shared&nbsp;by&nbsp;all&nbsp;Linux&nbsp;based&nbsp;OSes.<br>
+&nbsp;<br>
+This&nbsp;includes&nbsp;Android&nbsp;and&nbsp;ChromeOS.<br>
+&nbsp;<br>
+Subclasses&nbsp;must&nbsp;implement&nbsp;RunCommand,&nbsp;GetFileContents,&nbsp;GetPsOutput,&nbsp;and<br>
+ParseCStateSample.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">LinuxBasedPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="LinuxBasedPlatformBackend-GetClockTicks"><strong>GetClockTicks</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;clock&nbsp;ticks&nbsp;per&nbsp;second.<br>
+&nbsp;<br>
+The&nbsp;proper&nbsp;way&nbsp;is&nbsp;to&nbsp;call&nbsp;os.sysconf('SC_CLK_TCK')&nbsp;but&nbsp;that&nbsp;is&nbsp;not&nbsp;easy&nbsp;to<br>
+do&nbsp;on&nbsp;Android/CrOS.&nbsp;In&nbsp;practice,&nbsp;nearly&nbsp;all&nbsp;Linux&nbsp;machines&nbsp;have&nbsp;a&nbsp;USER_HZ<br>
+of&nbsp;100,&nbsp;so&nbsp;just&nbsp;return&nbsp;that.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, filename)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt><dd><tt>#&nbsp;Get&nbsp;the&nbsp;commit&nbsp;charge&nbsp;in&nbsp;kB.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, cmd)</dt><dd><tt>Runs&nbsp;the&nbsp;specified&nbsp;command.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;cmd:&nbsp;A&nbsp;list&nbsp;of&nbsp;program&nbsp;arguments&nbsp;or&nbsp;the&nbsp;path&nbsp;string&nbsp;of&nbsp;the&nbsp;program.<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;A&nbsp;string&nbsp;whose&nbsp;content&nbsp;is&nbsp;the&nbsp;output&nbsp;of&nbsp;the&nbsp;command.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="LinuxBasedPlatformBackend-ParseCStateSample"><strong>ParseCStateSample</strong></a>(sample)</dt><dd><tt>Parse&nbsp;a&nbsp;single&nbsp;c-state&nbsp;residency&nbsp;sample.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;A&nbsp;sample&nbsp;of&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;to&nbsp;be&nbsp;parsed.&nbsp;Organized&nbsp;as<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;CPU&nbsp;name&nbsp;to&nbsp;a&nbsp;string&nbsp;containing&nbsp;all&nbsp;c-state<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;names,&nbsp;the&nbsp;times&nbsp;in&nbsp;each&nbsp;state,&nbsp;the&nbsp;latency&nbsp;of&nbsp;each&nbsp;state,&nbsp;and&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;time&nbsp;at&nbsp;which&nbsp;the&nbsp;sample&nbsp;was&nbsp;taken&nbsp;all&nbsp;separated&nbsp;by&nbsp;newlines.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Ex:&nbsp;{'cpu0':&nbsp;'C0<br>
+C1<br>
+5000<br>
+2000<br>
+20<br>
+30<br>
+1406673171'}<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;associating&nbsp;a&nbsp;c-state&nbsp;with&nbsp;a&nbsp;time.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="LinuxBasedPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>)</dt><dd><tt>Initalize&nbsp;an&nbsp;instance&nbsp;of&nbsp;<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">PlatformBackend</a>&nbsp;from&nbsp;a&nbsp;device&nbsp;optionally.<br>
+Call&nbsp;sites&nbsp;need&nbsp;to&nbsp;use&nbsp;SupportsDevice&nbsp;before&nbsp;intialization&nbsp;to&nbsp;check<br>
+whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;the&nbsp;device.<br>
+If&nbsp;device&nbsp;is&nbsp;None,&nbsp;this&nbsp;constructor&nbsp;returns&nbsp;the&nbsp;host&nbsp;platform&nbsp;backend<br>
+which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.platform.device.Device.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="LinuxBasedPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="LinuxBasedPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_platform_backend.html
new file mode 100644
index 0000000..9539096
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.linux_platform_backend.html
@@ -0,0 +1,280 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.linux_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.linux_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/linux_platform_backend.py">telemetry/internal/platform/linux_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.platform.linux_based_platform_backend.html">telemetry.internal.platform.linux_based_platform_backend</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html">telemetry.internal.platform.power_monitor.msr_power_monitor</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.core.os_version.html">telemetry.core.os_version</a><br>
+</td><td width="25%" valign=top><a href="platform.html">platform</a><br>
+<a href="telemetry.internal.platform.posix_platform_backend.html">telemetry.internal.platform.posix_platform_backend</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_platform_backend.html#LinuxPlatformBackend">LinuxPlatformBackend</a>(<a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>, <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>)
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>(<a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.linux_platform_backend.html#LinuxPlatformBackend">LinuxPlatformBackend</a>(<a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>, <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>)
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LinuxPlatformBackend">class <strong>LinuxPlatformBackend</strong></a>(<a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>, <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.linux_platform_backend.html#LinuxPlatformBackend">LinuxPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="LinuxPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="LinuxPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt><dd><tt>Returns&nbsp;output&nbsp;of&nbsp;the&nbsp;'ps'&nbsp;command&nbsp;as&nbsp;a&nbsp;list&nbsp;of&nbsp;lines.<br>
+Subclass&nbsp;should&nbsp;override&nbsp;this&nbsp;function.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;columns:&nbsp;A&nbsp;list&nbsp;of&nbsp;require&nbsp;columns,&nbsp;e.g.,&nbsp;['pid',&nbsp;'pss'].<br>
+&nbsp;&nbsp;pid:&nbsp;If&nbsp;not&nbsp;None,&nbsp;returns&nbsp;only&nbsp;the&nbsp;information&nbsp;of&nbsp;the&nbsp;process<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;the&nbsp;pid.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, args)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-GetClockTicks"><strong>GetClockTicks</strong></a>(*args, **kwargs)</dt><dd><tt>Returns&nbsp;the&nbsp;number&nbsp;of&nbsp;clock&nbsp;ticks&nbsp;per&nbsp;second.<br>
+&nbsp;<br>
+The&nbsp;proper&nbsp;way&nbsp;is&nbsp;to&nbsp;call&nbsp;os.sysconf('SC_CLK_TCK')&nbsp;but&nbsp;that&nbsp;is&nbsp;not&nbsp;easy&nbsp;to<br>
+do&nbsp;on&nbsp;Android/CrOS.&nbsp;In&nbsp;practice,&nbsp;nearly&nbsp;all&nbsp;Linux&nbsp;machines&nbsp;have&nbsp;a&nbsp;USER_HZ<br>
+of&nbsp;100,&nbsp;so&nbsp;just&nbsp;return&nbsp;that.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt><dd><tt>#&nbsp;Get&nbsp;the&nbsp;commit&nbsp;charge&nbsp;in&nbsp;kB.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.internal.platform.linux_based_platform_backend.html#LinuxBasedPlatformBackend">telemetry.internal.platform.linux_based_platform_backend.LinuxBasedPlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-ParseCStateSample"><strong>ParseCStateSample</strong></a>(sample)</dt><dd><tt>Parse&nbsp;a&nbsp;single&nbsp;c-state&nbsp;residency&nbsp;sample.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;A&nbsp;sample&nbsp;of&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;to&nbsp;be&nbsp;parsed.&nbsp;Organized&nbsp;as<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;CPU&nbsp;name&nbsp;to&nbsp;a&nbsp;string&nbsp;containing&nbsp;all&nbsp;c-state<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;names,&nbsp;the&nbsp;times&nbsp;in&nbsp;each&nbsp;state,&nbsp;the&nbsp;latency&nbsp;of&nbsp;each&nbsp;state,&nbsp;and&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;time&nbsp;at&nbsp;which&nbsp;the&nbsp;sample&nbsp;was&nbsp;taken&nbsp;all&nbsp;separated&nbsp;by&nbsp;newlines.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Ex:&nbsp;{'cpu0':&nbsp;'C0<br>
+C1<br>
+5000<br>
+2000<br>
+20<br>
+30<br>
+1406673171'}<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;associating&nbsp;a&nbsp;c-state&nbsp;with&nbsp;a&nbsp;time.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="LinuxPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="LinuxPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.mac_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.mac_platform_backend.html
new file mode 100644
index 0000000..f5772ee
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.mac_platform_backend.html
@@ -0,0 +1,253 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.mac_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.mac_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/mac_platform_backend.py">telemetry/internal/platform/mac_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="ctypes.html">ctypes</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.os_version.html">telemetry.core.os_version</a><br>
+<a href="platform.html">platform</a><br>
+<a href="telemetry.internal.platform.posix_platform_backend.html">telemetry.internal.platform.posix_platform_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html">telemetry.internal.platform.power_monitor.powermetrics_power_monitor</a><br>
+<a href="telemetry.util.process_statistic_timeline_data.html">telemetry.util.process_statistic_timeline_data</a><br>
+<a href="resource.html">resource</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+<a href="time.html">time</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>(<a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.mac_platform_backend.html#MacPlatformBackend">MacPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MacPlatformBackend">class <strong>MacPlatformBackend</strong></a>(<a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.mac_platform_backend.html#MacPlatformBackend">MacPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MacPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;dict&nbsp;of&nbsp;cpu&nbsp;statistics&nbsp;for&nbsp;the&nbsp;process&nbsp;represented&nbsp;by&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt><dd><tt>Return&nbsp;current&nbsp;timestamp&nbsp;in&nbsp;seconds.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="MacPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">telemetry.internal.platform.posix_platform_backend.PosixPlatformBackend</a>:<br>
+<dl><dt><a name="MacPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt><dd><tt>Returns&nbsp;output&nbsp;of&nbsp;the&nbsp;'ps'&nbsp;command&nbsp;as&nbsp;a&nbsp;list&nbsp;of&nbsp;lines.<br>
+Subclass&nbsp;should&nbsp;override&nbsp;this&nbsp;function.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;columns:&nbsp;A&nbsp;list&nbsp;of&nbsp;require&nbsp;columns,&nbsp;e.g.,&nbsp;['pid',&nbsp;'pss'].<br>
+&nbsp;&nbsp;pid:&nbsp;If&nbsp;not&nbsp;None,&nbsp;returns&nbsp;only&nbsp;the&nbsp;information&nbsp;of&nbsp;the&nbsp;process<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;the&nbsp;pid.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, args)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>:<br>
+<dl><dt><a name="MacPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="MacPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="MacPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="MacPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="MacPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.msr_server_win.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.msr_server_win.html
new file mode 100644
index 0000000..3f42f64
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.msr_server_win.html
@@ -0,0 +1,184 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.msr_server_win</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.msr_server_win</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/msr_server_win.py">telemetry/internal/platform/msr_server_win.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;server&nbsp;that&nbsp;serves&nbsp;MSR&nbsp;values&nbsp;over&nbsp;TCP.&nbsp;Takes&nbsp;a&nbsp;port&nbsp;as&nbsp;its&nbsp;sole&nbsp;parameter.<br>
+&nbsp;<br>
+The&nbsp;reference&nbsp;client&nbsp;for&nbsp;this&nbsp;server&nbsp;is&nbsp;msr_power_monitor.MsrPowerMonitor.<br>
+&nbsp;<br>
+Must&nbsp;be&nbsp;run&nbsp;as&nbsp;Administrator.&nbsp;We&nbsp;use&nbsp;TCP&nbsp;instead&nbsp;of&nbsp;named&nbsp;pipes&nbsp;or&nbsp;another&nbsp;IPC<br>
+to&nbsp;avoid&nbsp;dealing&nbsp;with&nbsp;the&nbsp;pipe&nbsp;security&nbsp;mechanisms.&nbsp;We&nbsp;take&nbsp;the&nbsp;port&nbsp;as&nbsp;a<br>
+parameter&nbsp;instead&nbsp;of&nbsp;choosing&nbsp;one,&nbsp;because&nbsp;it's&nbsp;hard&nbsp;to&nbsp;communicate&nbsp;the&nbsp;port<br>
+number&nbsp;across&nbsp;integrity&nbsp;levels.<br>
+&nbsp;<br>
+Requires&nbsp;WinRing0&nbsp;to&nbsp;be&nbsp;installed&nbsp;in&nbsp;the&nbsp;Python&nbsp;directory.<br>
+msr_power_monitor.MsrPowerMonitor&nbsp;does&nbsp;this&nbsp;if&nbsp;needed.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="SocketServer.html">SocketServer</a><br>
+<a href="argparse.html">argparse</a><br>
+</td><td width="25%" valign=top><a href="ctypes.html">ctypes</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="struct.html">struct</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>(<a href="SocketServer.html#BaseRequestHandler">SocketServer.BaseRequestHandler</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.msr_server_win.html#MsrRequestHandler">MsrRequestHandler</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#OSError">exceptions.OSError</a>(<a href="exceptions.html#EnvironmentError">exceptions.EnvironmentError</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.msr_server_win.html#WinRing0Error">WinRing0Error</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MsrRequestHandler">class <strong>MsrRequestHandler</strong></a>(<a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.msr_server_win.html#MsrRequestHandler">MsrRequestHandler</a></dd>
+<dd><a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a></dd>
+<dd><a href="SocketServer.html#BaseRequestHandler">SocketServer.BaseRequestHandler</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MsrRequestHandler-handle"><strong>handle</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>:<br>
+<dl><dt><a name="MsrRequestHandler-finish"><strong>finish</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MsrRequestHandler-setup"><strong>setup</strong></a>(self)</dt></dl>
+
+<hr>
+Data and other attributes inherited from <a href="SocketServer.html#StreamRequestHandler">SocketServer.StreamRequestHandler</a>:<br>
+<dl><dt><strong>disable_nagle_algorithm</strong> = False</dl>
+
+<dl><dt><strong>rbufsize</strong> = -1</dl>
+
+<dl><dt><strong>timeout</strong> = None</dl>
+
+<dl><dt><strong>wbufsize</strong> = 0</dl>
+
+<hr>
+Methods inherited from <a href="SocketServer.html#BaseRequestHandler">SocketServer.BaseRequestHandler</a>:<br>
+<dl><dt><a name="MsrRequestHandler-__init__"><strong>__init__</strong></a>(self, request, client_address, server)</dt></dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WinRing0Error">class <strong>WinRing0Error</strong></a>(<a href="exceptions.html#OSError">exceptions.OSError</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.msr_server_win.html#WinRing0Error">WinRing0Error</a></dd>
+<dd><a href="exceptions.html#OSError">exceptions.OSError</a></dd>
+<dd><a href="exceptions.html#EnvironmentError">exceptions.EnvironmentError</a></dd>
+<dd><a href="exceptions.html#StandardError">exceptions.StandardError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#OSError">exceptions.OSError</a>:<br>
+<dl><dt><a name="WinRing0Error-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#OSError">exceptions.OSError</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#WinRing0Error-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#EnvironmentError">exceptions.EnvironmentError</a>:<br>
+<dl><dt><a name="WinRing0Error-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WinRing0Error-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#EnvironmentError">exceptions.EnvironmentError</a>:<br>
+<dl><dt><strong>errno</strong></dt>
+<dd><tt>exception&nbsp;errno</tt></dd>
+</dl>
+<dl><dt><strong>filename</strong></dt>
+<dd><tt>exception&nbsp;filename</tt></dd>
+</dl>
+<dl><dt><strong>strerror</strong></dt>
+<dd><tt>exception&nbsp;strerror</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="WinRing0Error-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#WinRing0Error-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="WinRing0Error-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="WinRing0Error-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-main"><strong>main</strong></a>()</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>WINRING0_STATUS_MESSAGES</strong> = ('No error', 'Unsupported platform', 'Driver not loaded. You may need to run as Administrator', 'Driver not found', 'Driver unloaded by other process', 'Driver not loaded because of executing on Network Drive', 'Unknown error')</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.network_controller_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.network_controller_backend.html
new file mode 100644
index 0000000..2525f4d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.network_controller_backend.html
@@ -0,0 +1,225 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.network_controller_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.network_controller_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/network_controller_backend.py">telemetry/internal/platform/network_controller_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.forwarders.html">telemetry.internal.forwarders</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.webpagereplay.html">telemetry.internal.util.webpagereplay</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.network_controller_backend.html#NetworkControllerBackend">NetworkControllerBackend</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.network_controller_backend.html#ArchiveDoesNotExistError">ArchiveDoesNotExistError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.platform.network_controller_backend.html#ReplayAndBrowserPortsError">ReplayAndBrowserPortsError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ArchiveDoesNotExistError">class <strong>ArchiveDoesNotExistError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;when&nbsp;the&nbsp;archive&nbsp;path&nbsp;does&nbsp;not&nbsp;exist&nbsp;for&nbsp;replay&nbsp;mode.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.network_controller_backend.html#ArchiveDoesNotExistError">ArchiveDoesNotExistError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ArchiveDoesNotExistError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ArchiveDoesNotExistError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ArchiveDoesNotExistError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveDoesNotExistError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveDoesNotExistError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NetworkControllerBackend">class <strong>NetworkControllerBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Control&nbsp;network&nbsp;settings&nbsp;and&nbsp;servers&nbsp;to&nbsp;simulate&nbsp;the&nbsp;Web.<br>
+&nbsp;<br>
+Network&nbsp;changes&nbsp;include&nbsp;forwarding&nbsp;device&nbsp;ports&nbsp;to&nbsp;host&nbsp;platform&nbsp;ports.<br>
+Web&nbsp;Page&nbsp;Replay&nbsp;is&nbsp;used&nbsp;to&nbsp;record&nbsp;and&nbsp;replay&nbsp;HTTP/HTTPS&nbsp;responses.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="NetworkControllerBackend-SetReplayArgs"><strong>SetReplayArgs</strong></a>(self, archive_path, wpr_mode, netsim, extra_wpr_args, make_javascript_deterministic<font color="#909090">=False</font>)</dt><dd><tt>Save&nbsp;the&nbsp;arguments&nbsp;needed&nbsp;for&nbsp;replay.<br>
+&nbsp;<br>
+To&nbsp;make&nbsp;the&nbsp;settings&nbsp;effective,&nbsp;this&nbsp;call&nbsp;must&nbsp;be&nbsp;followed&nbsp;by&nbsp;a&nbsp;call<br>
+to&nbsp;UpdateReplay.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;archive_path:&nbsp;a&nbsp;path&nbsp;to&nbsp;a&nbsp;specific&nbsp;WPR&nbsp;archive.<br>
+&nbsp;&nbsp;wpr_mode:&nbsp;one&nbsp;of&nbsp;wpr_modes.WPR_OFF,&nbsp;wpr_modes.WPR_APPEND,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;wpr_modes.WPR_REPLAY,&nbsp;or&nbsp;wpr_modes.WPR_RECORD.<br>
+&nbsp;&nbsp;netsim:&nbsp;a&nbsp;net_config&nbsp;string&nbsp;('dialup',&nbsp;'3g',&nbsp;'dsl',&nbsp;'cable',&nbsp;or&nbsp;'fios').<br>
+&nbsp;&nbsp;extra_wpr_args:&nbsp;a&nbsp;list&nbsp;of&nbsp;addtional&nbsp;replay&nbsp;args&nbsp;(or&nbsp;an&nbsp;empty&nbsp;list).<br>
+&nbsp;&nbsp;make_javascript_deterministic:&nbsp;True&nbsp;if&nbsp;replay&nbsp;should&nbsp;inject&nbsp;a&nbsp;script<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;make&nbsp;JavaScript&nbsp;behave&nbsp;deterministically&nbsp;(e.g.,&nbsp;override&nbsp;Date()).</tt></dd></dl>
+
+<dl><dt><a name="NetworkControllerBackend-StopReplay"><strong>StopReplay</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="NetworkControllerBackend-UpdateReplay"><strong>UpdateReplay</strong></a>(self, browser_backend<font color="#909090">=None</font>)</dt><dd><tt>Start&nbsp;or&nbsp;reuse&nbsp;Web&nbsp;Page&nbsp;Replay.<br>
+&nbsp;<br>
+UpdateReplay&nbsp;must&nbsp;be&nbsp;called&nbsp;after&nbsp;every&nbsp;call&nbsp;to&nbsp;SetReplayArgs.<br>
+&nbsp;<br>
+TODO(slamm):&nbsp;Update&nbsp;replay&nbsp;in&nbsp;SetReplayArgs&nbsp;once&nbsp;the&nbsp;browser_backend<br>
+&nbsp;&nbsp;&nbsp;&nbsp;dependencies&nbsp;move&nbsp;to&nbsp;platform.&nbsp;https://crbug.com/423962<br>
+&nbsp;&nbsp;&nbsp;&nbsp;browser_backend&nbsp;properties&nbsp;used:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;Input:&nbsp;wpr_port_pairs<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;Output:&nbsp;wpr_port_pairs&nbsp;(browser&nbsp;uses&nbsp;for&nbsp;--testing-fixed-*&nbsp;flags).<br>
+Args:<br>
+&nbsp;&nbsp;browser_backend:&nbsp;instance&nbsp;of&nbsp;telemetry.core.backends.browser_backend</tt></dd></dl>
+
+<dl><dt><a name="NetworkControllerBackend-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ReplayAndBrowserPortsError">class <strong>ReplayAndBrowserPortsError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;an&nbsp;existing&nbsp;browser&nbsp;would&nbsp;get&nbsp;different&nbsp;remote&nbsp;replay&nbsp;ports.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.network_controller_backend.html#ReplayAndBrowserPortsError">ReplayAndBrowserPortsError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ReplayAndBrowserPortsError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ReplayAndBrowserPortsError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ReplayAndBrowserPortsError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayAndBrowserPortsError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayAndBrowserPortsError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.platform_backend.html
new file mode 100644
index 0000000..41c9bfa
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.platform_backend.html
@@ -0,0 +1,225 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/platform_backend.py">telemetry/internal/platform/platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.forwarders.do_nothing_forwarder.html">telemetry.internal.forwarders.do_nothing_forwarder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.network_controller_backend.html">telemetry.internal.platform.network_controller_backend</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_controller_backend.html">telemetry.internal.platform.tracing_controller_backend</a><br>
+</td><td width="25%" valign=top><a href="weakref.html">weakref</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">PlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PlatformBackend">class <strong>PlatformBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PlatformBackend-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>)</dt><dd><tt>Initalize&nbsp;an&nbsp;instance&nbsp;of&nbsp;<a href="#PlatformBackend">PlatformBackend</a>&nbsp;from&nbsp;a&nbsp;device&nbsp;optionally.<br>
+Call&nbsp;sites&nbsp;need&nbsp;to&nbsp;use&nbsp;SupportsDevice&nbsp;before&nbsp;intialization&nbsp;to&nbsp;check<br>
+whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;the&nbsp;device.<br>
+If&nbsp;device&nbsp;is&nbsp;None,&nbsp;this&nbsp;constructor&nbsp;returns&nbsp;the&nbsp;host&nbsp;platform&nbsp;backend<br>
+which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.platform.device.Device.</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="PlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="PlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="PlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.posix_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.posix_platform_backend.html
new file mode 100644
index 0000000..cabca16
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.posix_platform_backend.html
@@ -0,0 +1,253 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.posix_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.posix_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/posix_platform_backend.py">telemetry/internal/platform/posix_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.desktop_platform_backend.html">telemetry.internal.platform.desktop_platform_backend</a><br>
+<a href="distutils.html">distutils</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.util.ps_util.html">telemetry.internal.util.ps_util</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="stat.html">stat</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">PosixPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PosixPlatformBackend">class <strong>PosixPlatformBackend</strong></a>(<a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.posix_platform_backend.html#PosixPlatformBackend">PosixPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PosixPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetFileContents"><strong>GetFileContents</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetPsOutput"><strong>GetPsOutput</strong></a>(self, columns, pid<font color="#909090">=None</font>)</dt><dd><tt>Returns&nbsp;output&nbsp;of&nbsp;the&nbsp;'ps'&nbsp;command&nbsp;as&nbsp;a&nbsp;list&nbsp;of&nbsp;lines.<br>
+Subclass&nbsp;should&nbsp;override&nbsp;this&nbsp;function.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;columns:&nbsp;A&nbsp;list&nbsp;of&nbsp;require&nbsp;columns,&nbsp;e.g.,&nbsp;['pid',&nbsp;'pss'].<br>
+&nbsp;&nbsp;pid:&nbsp;If&nbsp;not&nbsp;None,&nbsp;returns&nbsp;only&nbsp;the&nbsp;information&nbsp;of&nbsp;the&nbsp;process<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;the&nbsp;pid.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-RunCommand"><strong>RunCommand</strong></a>(self, args)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>:<br>
+<dl><dt><a name="PosixPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="PosixPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt><dd><tt>Cooperatively&nbsp;shut&nbsp;down&nbsp;the&nbsp;given&nbsp;process&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;<br>
+Currently&nbsp;this&nbsp;is&nbsp;only&nbsp;implemented&nbsp;on&nbsp;Windows.&nbsp;See<br>
+crbug.com/424024&nbsp;for&nbsp;background&nbsp;on&nbsp;why&nbsp;it&nbsp;was&nbsp;added.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;proc:&nbsp;a&nbsp;process&nbsp;object&nbsp;returned&nbsp;from&nbsp;subprocess.Popen.<br>
+&nbsp;&nbsp;app_name:&nbsp;on&nbsp;Windows,&nbsp;is&nbsp;the&nbsp;prefix&nbsp;of&nbsp;the&nbsp;application's&nbsp;window<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;name&nbsp;that&nbsp;should&nbsp;be&nbsp;searched&nbsp;for.&nbsp;This&nbsp;helps&nbsp;ensure<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;only&nbsp;the&nbsp;application's&nbsp;windows&nbsp;are&nbsp;closed.<br>
+&nbsp;<br>
+Returns&nbsp;True&nbsp;if&nbsp;it&nbsp;is&nbsp;believed&nbsp;the&nbsp;attempt&nbsp;succeeded.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt><dd><tt>Indicates&nbsp;whether&nbsp;CooperativelyShutdown,&nbsp;below,&nbsp;is&nbsp;supported.<br>
+It&nbsp;is&nbsp;not&nbsp;necessary&nbsp;to&nbsp;implement&nbsp;it&nbsp;on&nbsp;all&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt><dd><tt>Read&nbsp;a&nbsp;CPU&nbsp;model-specific&nbsp;register&nbsp;(MSR).<br>
+&nbsp;<br>
+Which&nbsp;MSRs&nbsp;are&nbsp;available&nbsp;depends&nbsp;on&nbsp;the&nbsp;CPU&nbsp;model.<br>
+On&nbsp;systems&nbsp;with&nbsp;multiple&nbsp;CPUs,&nbsp;this&nbsp;function&nbsp;may&nbsp;run&nbsp;on&nbsp;any&nbsp;CPU.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;msr_number:&nbsp;The&nbsp;number&nbsp;of&nbsp;the&nbsp;register&nbsp;to&nbsp;read.<br>
+&nbsp;&nbsp;start:&nbsp;The&nbsp;least&nbsp;significant&nbsp;bit&nbsp;to&nbsp;read,&nbsp;zero-indexed.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(Said&nbsp;another&nbsp;way,&nbsp;the&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;right-shift&nbsp;the&nbsp;MSR&nbsp;value.)<br>
+&nbsp;&nbsp;length:&nbsp;The&nbsp;number&nbsp;of&nbsp;bits&nbsp;to&nbsp;read.&nbsp;MSRs&nbsp;are&nbsp;64&nbsp;bits,&nbsp;even&nbsp;on&nbsp;32-bit&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>)</dt><dd><tt>Initalize&nbsp;an&nbsp;instance&nbsp;of&nbsp;PlatformBackend&nbsp;from&nbsp;a&nbsp;device&nbsp;optionally.<br>
+Call&nbsp;sites&nbsp;need&nbsp;to&nbsp;use&nbsp;SupportsDevice&nbsp;before&nbsp;intialization&nbsp;to&nbsp;check<br>
+whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;the&nbsp;device.<br>
+If&nbsp;device&nbsp;is&nbsp;None,&nbsp;this&nbsp;constructor&nbsp;returns&nbsp;the&nbsp;host&nbsp;platform&nbsp;backend<br>
+which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.platform.device.Device.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="PosixPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="PosixPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;is&nbsp;the&nbsp;platform&nbsp;backend&nbsp;to&nbsp;be&nbsp;used<br>
+for&nbsp;the&nbsp;host&nbsp;device&nbsp;which&nbsp;telemetry&nbsp;is&nbsp;running&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="PosixPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html
new file mode 100644
index 0000000..42a5df9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html
@@ -0,0 +1,91 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.android_dumpsys_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor.py">telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="csv.html">csv</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html#DumpsysPowerMonitor">DumpsysPowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DumpsysPowerMonitor">class <strong>DumpsysPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>&nbsp;that&nbsp;relies&nbsp;on&nbsp;the&nbsp;dumpsys&nbsp;batterystats&nbsp;to&nbsp;monitor&nbsp;the&nbsp;power<br>
+consumption&nbsp;of&nbsp;a&nbsp;single&nbsp;android&nbsp;application.&nbsp;This&nbsp;measure&nbsp;uses&nbsp;a&nbsp;heuristic<br>
+and&nbsp;is&nbsp;the&nbsp;same&nbsp;information&nbsp;end-users&nbsp;see&nbsp;with&nbsp;the&nbsp;battery&nbsp;application.<br>
+Available&nbsp;on&nbsp;Android&nbsp;L&nbsp;and&nbsp;higher&nbsp;releases.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html#DumpsysPowerMonitor">DumpsysPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DumpsysPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DumpsysPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="DumpsysPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DumpsysPowerMonitor-__init__"><strong>__init__</strong></a>(self, battery, platform_backend)</dt><dd><tt>Constructor.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;battery:&nbsp;A&nbsp;BatteryUtil&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;platform_backend:&nbsp;A&nbsp;LinuxBasedPlatformBackend&nbsp;instance.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="DumpsysPowerMonitor-ProcessPowerData"><strong>ProcessPowerData</strong></a>(power_data, voltage, package)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="DumpsysPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html
new file mode 100644
index 0000000..dd28987
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.android_fuelgauge_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor.py">telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html#FuelGaugePowerMonitor">FuelGaugePowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FuelGaugePowerMonitor">class <strong>FuelGaugePowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>&nbsp;that&nbsp;relies&nbsp;on&nbsp;the&nbsp;fuel&nbsp;gauge&nbsp;chips&nbsp;to&nbsp;monitor&nbsp;the&nbsp;power<br>
+consumption&nbsp;of&nbsp;a&nbsp;android&nbsp;device.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html#FuelGaugePowerMonitor">FuelGaugePowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FuelGaugePowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FuelGaugePowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="FuelGaugePowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FuelGaugePowerMonitor-__init__"><strong>__init__</strong></a>(self, battery, platform_backend)</dt><dd><tt>Constructor.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;battery:&nbsp;A&nbsp;BatteryUtil&nbsp;instance.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;platform_backend:&nbsp;A&nbsp;LinuxBasedPlatformBackend&nbsp;instance.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="FuelGaugePowerMonitor-ProcessPowerData"><strong>ProcessPowerData</strong></a>(voltage, fuel_gauge_delta)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="FuelGaugePowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_temperature_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_temperature_monitor.html
new file mode 100644
index 0000000..b663ab9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.android_temperature_monitor.html
@@ -0,0 +1,79 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.android_temperature_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.android_temperature_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/android_temperature_monitor.py">telemetry/internal/platform/power_monitor/android_temperature_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.android_temperature_monitor.html#AndroidTemperatureMonitor">AndroidTemperatureMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidTemperatureMonitor">class <strong>AndroidTemperatureMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Returns&nbsp;temperature&nbsp;results&nbsp;in&nbsp;power&nbsp;monitor&nbsp;dictionary&nbsp;format.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.android_temperature_monitor.html#AndroidTemperatureMonitor">AndroidTemperatureMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidTemperatureMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidTemperatureMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="AndroidTemperatureMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidTemperatureMonitor-__init__"><strong>__init__</strong></a>(self, device)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="AndroidTemperatureMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.cros_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.cros_power_monitor.html
new file mode 100644
index 0000000..e3688ed
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.cros_power_monitor.html
@@ -0,0 +1,171 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.cros_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.cros_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/cros_power_monitor.py">telemetry/internal/platform/power_monitor/cros_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html">telemetry.internal.platform.power_monitor.sysfs_power_monitor</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">telemetry.internal.platform.power_monitor.sysfs_power_monitor.SysfsPowerMonitor</a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.cros_power_monitor.html#CrosPowerMonitor">CrosPowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CrosPowerMonitor">class <strong>CrosPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">telemetry.internal.platform.power_monitor.sysfs_power_monitor.SysfsPowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>PowerMonitor&nbsp;that&nbsp;relies&nbsp;on&nbsp;'dump_power_status'&nbsp;to&nbsp;monitor&nbsp;power<br>
+consumption&nbsp;of&nbsp;a&nbsp;single&nbsp;ChromeOS&nbsp;application.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.cros_power_monitor.html#CrosPowerMonitor">CrosPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">telemetry.internal.platform.power_monitor.sysfs_power_monitor.SysfsPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CrosPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="CrosPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="CrosPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CrosPowerMonitor-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt><dd><tt>Constructor.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;platform_backend:&nbsp;A&nbsp;LinuxBasedPlatformBackend&nbsp;object.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_initial_power:&nbsp;The&nbsp;result&nbsp;of&nbsp;'dump_power_status'&nbsp;before&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_start_time:&nbsp;The&nbsp;epoch&nbsp;time&nbsp;at&nbsp;which&nbsp;the&nbsp;test&nbsp;starts&nbsp;executing.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="CrosPowerMonitor-IsOnBatteryPower"><strong>IsOnBatteryPower</strong></a>(status, board)</dt><dd><tt>Determines&nbsp;if&nbsp;the&nbsp;devices&nbsp;is&nbsp;being&nbsp;charged.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;status:&nbsp;The&nbsp;parsed&nbsp;result&nbsp;of&nbsp;'dump_power_status'<br>
+&nbsp;&nbsp;&nbsp;&nbsp;board:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;board&nbsp;running&nbsp;the&nbsp;test.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;True&nbsp;if&nbsp;the&nbsp;device&nbsp;is&nbsp;on&nbsp;battery&nbsp;power;&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-ParsePower"><strong>ParsePower</strong></a>(initial_stats, final_stats, length_h)</dt><dd><tt>Parse&nbsp;output&nbsp;of&nbsp;'dump_power_status'<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;initial_stats:&nbsp;The&nbsp;output&nbsp;of&nbsp;'dump_power_status'&nbsp;before&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;final_stats:&nbsp;The&nbsp;output&nbsp;of&nbsp;'dump_power_status'&nbsp;after&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;length_h:&nbsp;The&nbsp;length&nbsp;of&nbsp;the&nbsp;test&nbsp;in&nbsp;hours.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;in&nbsp;the&nbsp;format&nbsp;returned&nbsp;by&nbsp;<a href="#CrosPowerMonitor-StopMonitoringPower">StopMonitoringPower</a>().</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-ParsePowerStatus"><strong>ParsePowerStatus</strong></a>(sample)</dt><dd><tt>Parses&nbsp;'dump_power_status'&nbsp;command&nbsp;output.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;The&nbsp;output&nbsp;of&nbsp;'dump_power_status'<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;all&nbsp;fields&nbsp;from&nbsp;'dump_power_status'</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-SplitSample"><strong>SplitSample</strong></a>(sample)</dt><dd><tt>Splits&nbsp;a&nbsp;power&nbsp;and&nbsp;time&nbsp;sample&nbsp;into&nbsp;the&nbsp;two&nbsp;separate&nbsp;values.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;The&nbsp;result&nbsp;of&nbsp;calling&nbsp;'dump_power_status;&nbsp;date&nbsp;+%s'&nbsp;on&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;A&nbsp;tuple&nbsp;of&nbsp;power&nbsp;sample&nbsp;and&nbsp;epoch&nbsp;time&nbsp;of&nbsp;the&nbsp;sample.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">telemetry.internal.platform.power_monitor.sysfs_power_monitor.SysfsPowerMonitor</a>:<br>
+<dl><dt><a name="CrosPowerMonitor-GetCpuFreq"><strong>GetCpuFreq</strong></a>(self)</dt><dd><tt>Retrieve&nbsp;CPU&nbsp;frequency&nbsp;times&nbsp;from&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;frequency&nbsp;times&nbsp;for&nbsp;each&nbsp;CPU.</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-GetCpuState"><strong>GetCpuState</strong></a>(self)</dt><dd><tt>Retrieve&nbsp;CPU&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;from&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;for&nbsp;each&nbsp;CPU.</tt></dd></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">telemetry.internal.platform.power_monitor.sysfs_power_monitor.SysfsPowerMonitor</a>:<br>
+<dl><dt><a name="CrosPowerMonitor-CombineResults"><strong>CombineResults</strong></a>(cpu_stats, power_stats)</dt><dd><tt>Add&nbsp;frequency&nbsp;and&nbsp;c-state&nbsp;residency&nbsp;data&nbsp;to&nbsp;the&nbsp;power&nbsp;data.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;cpu_stats:&nbsp;Dictionary&nbsp;containing&nbsp;CPU&nbsp;statistics.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;power_stats:&nbsp;Dictionary&nbsp;containing&nbsp;power&nbsp;statistics.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;in&nbsp;the&nbsp;format&nbsp;returned&nbsp;by&nbsp;StopMonitoringPower.</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-ComputeCpuStats"><strong>ComputeCpuStats</strong></a>(initial, final)</dt><dd><tt>Parse&nbsp;the&nbsp;CPU&nbsp;c-state&nbsp;and&nbsp;frequency&nbsp;values&nbsp;saved&nbsp;during&nbsp;monitoring.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;initial:&nbsp;The&nbsp;parsed&nbsp;dictionary&nbsp;of&nbsp;initial&nbsp;statistics&nbsp;to&nbsp;be&nbsp;converted<br>
+&nbsp;&nbsp;&nbsp;&nbsp;into&nbsp;percentages.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;final:&nbsp;The&nbsp;parsed&nbsp;dictionary&nbsp;of&nbsp;final&nbsp;statistics&nbsp;to&nbsp;be&nbsp;converted<br>
+&nbsp;&nbsp;&nbsp;&nbsp;into&nbsp;percentages.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;percentages&nbsp;for&nbsp;each&nbsp;CPU&nbsp;as&nbsp;well&nbsp;as&nbsp;an&nbsp;average<br>
+&nbsp;&nbsp;&nbsp;&nbsp;across&nbsp;all&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="CrosPowerMonitor-ParseFreqSample"><strong>ParseFreqSample</strong></a>(sample)</dt><dd><tt>Parse&nbsp;a&nbsp;single&nbsp;frequency&nbsp;sample.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;The&nbsp;single&nbsp;sample&nbsp;of&nbsp;frequency&nbsp;data&nbsp;to&nbsp;be&nbsp;parsed.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;A&nbsp;dictionary&nbsp;associating&nbsp;a&nbsp;frequency&nbsp;with&nbsp;a&nbsp;time.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="CrosPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.html
new file mode 100644
index 0000000..bc7461c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.platform.power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/__init__.py">telemetry/internal/platform/power_monitor/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor.html">android_dumpsys_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_dumpsys_power_monitor_unittest.html">android_dumpsys_power_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor.html">android_fuelgauge_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_fuelgauge_power_monitor_unittest.html">android_fuelgauge_power_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.android_temperature_monitor.html">android_temperature_monitor</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.android_temperature_monitor_unittest.html">android_temperature_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.cros_power_monitor.html">cros_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.cros_power_monitor_unittest.html">cros_power_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.monsoon_power_monitor.html">monsoon_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.monsoon_power_monitor_unittest.html">monsoon_power_monitor_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html">msr_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.msr_power_monitor_unittest.html">msr_power_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.power_monitor_controller.html">power_monitor_controller</a><br>
+<a href="telemetry.internal.platform.power_monitor.power_monitor_controller_unittest.html">power_monitor_controller_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html">powermetrics_power_monitor</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.powermetrics_power_monitor_unittest.html">powermetrics_power_monitor_unittest</a><br>
+<a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html">sysfs_power_monitor</a><br>
+<a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor_unittest.html">sysfs_power_monitor_unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PowerMonitor">class <strong>PowerMonitor</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;power&nbsp;profiler.<br>
+&nbsp;<br>
+Provides&nbsp;an&nbsp;interface&nbsp;to&nbsp;register&nbsp;power&nbsp;consumption&nbsp;during&nbsp;a&nbsp;test.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<dl><dt><a name="PowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;power&nbsp;can&nbsp;be&nbsp;monitored&nbsp;asynchronously&nbsp;via<br>
+<a href="#PowerMonitor-StartMonitoringPower">StartMonitoringPower</a>()&nbsp;and&nbsp;<a href="#PowerMonitor-StopMonitoringPower">StopMonitoringPower</a>().</tt></dd></dl>
+
+<dl><dt><a name="PowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt><dd><tt>Starts&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;statistics.<br>
+&nbsp;<br>
+See&nbsp;Platform#StartMonitoringPower&nbsp;for&nbsp;the&nbsp;arguments&nbsp;format.</tt></dd></dl>
+
+<dl><dt><a name="PowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt><dd><tt>Stops&nbsp;monitoring&nbsp;power&nbsp;utilization&nbsp;and&nbsp;returns&nbsp;collects&nbsp;stats<br>
+&nbsp;<br>
+See&nbsp;Platform#StopMonitoringPower&nbsp;for&nbsp;the&nbsp;return&nbsp;format.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.monsoon_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.monsoon_power_monitor.html
new file mode 100644
index 0000000..fa24e9c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.monsoon_power_monitor.html
@@ -0,0 +1,89 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.monsoon_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.monsoon_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/monsoon_power_monitor.py">telemetry/internal/platform/power_monitor/monsoon_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.monsoon.html">telemetry.internal.platform.profiler.monsoon</a><br>
+<a href="multiprocessing.html">multiprocessing</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.monsoon_power_monitor.html#MonsoonPowerMonitor">MonsoonPowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MonsoonPowerMonitor">class <strong>MonsoonPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.monsoon_power_monitor.html#MonsoonPowerMonitor">MonsoonPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MonsoonPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MonsoonPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MonsoonPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MonsoonPowerMonitor-__init__"><strong>__init__</strong></a>(self, _, platform_backend)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="MonsoonPowerMonitor-ParseSamplingOutput"><strong>ParseSamplingOutput</strong></a>(powermonitor_output)</dt><dd><tt>Parse&nbsp;the&nbsp;output&nbsp;of&nbsp;of&nbsp;the&nbsp;samples&nbsp;collector&nbsp;process.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;in&nbsp;the&nbsp;format&nbsp;returned&nbsp;by&nbsp;<a href="#MonsoonPowerMonitor-StopMonitoringPower">StopMonitoringPower</a>().</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="MonsoonPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.msr_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.msr_power_monitor.html
new file mode 100644
index 0000000..b050d36
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.msr_power_monitor.html
@@ -0,0 +1,177 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.msr_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.msr_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/msr_power_monitor.py">telemetry/internal/platform/power_monitor/msr_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="platform.html">platform</a><br>
+<a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitorLinux">MsrPowerMonitorLinux</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitorWin">MsrPowerMonitorWin</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MsrPowerMonitor">class <strong>MsrPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MsrPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitor-__init__"><strong>__init__</strong></a>(self, backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="MsrPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MsrPowerMonitorLinux">class <strong>MsrPowerMonitorLinux</strong></a>(<a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitorLinux">MsrPowerMonitorLinux</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MsrPowerMonitorLinux-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a>:<br>
+<dl><dt><a name="MsrPowerMonitorLinux-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitorLinux-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitorLinux-__init__"><strong>__init__</strong></a>(self, backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="MsrPowerMonitorLinux-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MsrPowerMonitorWin">class <strong>MsrPowerMonitorWin</strong></a>(<a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitorWin">MsrPowerMonitorWin</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MsrPowerMonitorWin-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitorWin-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html#MsrPowerMonitor">MsrPowerMonitor</a>:<br>
+<dl><dt><a name="MsrPowerMonitorWin-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="MsrPowerMonitorWin-__init__"><strong>__init__</strong></a>(self, backend)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="MsrPowerMonitorWin-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>IA32_PACKAGE_THERM_STATUS</strong> = 433<br>
+<strong>IA32_TEMPERATURE_TARGET</strong> = 418<br>
+<strong>MSR_DRAM_ENERGY_STATUS</strong> = 1561<br>
+<strong>MSR_PKG_ENERGY_STATUS</strong> = 1553<br>
+<strong>MSR_PP0_ENERGY_STATUS</strong> = 1593<br>
+<strong>MSR_PP1_ENERGY_STATUS</strong> = 1601<br>
+<strong>MSR_RAPL_POWER_UNIT</strong> = 1542</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.power_monitor_controller.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.power_monitor_controller.html
new file mode 100644
index 0000000..e1ba28c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.power_monitor_controller.html
@@ -0,0 +1,81 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.power_monitor_controller</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.power_monitor_controller</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/power_monitor_controller.py">telemetry/internal/platform/power_monitor/power_monitor_controller.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="atexit.html">atexit</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.power_monitor_controller.html#PowerMonitorController">PowerMonitorController</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PowerMonitorController">class <strong>PowerMonitorController</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>&nbsp;that&nbsp;acts&nbsp;as&nbsp;facade&nbsp;for&nbsp;a&nbsp;list&nbsp;of&nbsp;<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>&nbsp;objects&nbsp;and&nbsp;uses<br>
+the&nbsp;first&nbsp;available&nbsp;one.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.power_monitor_controller.html#PowerMonitorController">PowerMonitorController</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PowerMonitorController-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PowerMonitorController-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PowerMonitorController-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PowerMonitorController-__init__"><strong>__init__</strong></a>(self, power_monitors, battery)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="PowerMonitorController-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html
new file mode 100644
index 0000000..7698b43
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html
@@ -0,0 +1,98 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.powermetrics_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.powermetrics_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py">telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.core.os_version.html">telemetry.core.os_version</a><br>
+<a href="plistlib.html">plistlib</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+<a href="shutil.html">shutil</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="xml.html">xml</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html#PowerMetricsPowerMonitor">PowerMetricsPowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PowerMetricsPowerMonitor">class <strong>PowerMetricsPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.powermetrics_power_monitor.html#PowerMetricsPowerMonitor">PowerMetricsPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PowerMetricsPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PowerMetricsPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="PowerMetricsPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PowerMetricsPowerMonitor-__init__"><strong>__init__</strong></a>(self, backend)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="PowerMetricsPowerMonitor-ParsePowerMetricsOutput"><strong>ParsePowerMetricsOutput</strong></a>(powermetrics_output)</dt><dd><tt>Parse&nbsp;output&nbsp;of&nbsp;powermetrics&nbsp;command&nbsp;line&nbsp;utility.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;in&nbsp;the&nbsp;format&nbsp;returned&nbsp;by&nbsp;<a href="#PowerMetricsPowerMonitor-StopMonitoringPower">StopMonitoringPower</a>()&nbsp;or&nbsp;None<br>
+&nbsp;&nbsp;&nbsp;&nbsp;if&nbsp;|powermetrics_output|&nbsp;is&nbsp;empty&nbsp;-&nbsp;crbug.com/353250&nbsp;.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>binary_path</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="PowerMetricsPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.sysfs_power_monitor.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.sysfs_power_monitor.html
new file mode 100644
index 0000000..82bec24
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.power_monitor.sysfs_power_monitor.html
@@ -0,0 +1,147 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.power_monitor.sysfs_power_monitor</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.power_monitor.html"><font color="#ffffff">power_monitor</font></a>.sysfs_power_monitor</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/power_monitor/sysfs_power_monitor.py">telemetry/internal/platform/power_monitor/sysfs_power_monitor.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.power_monitor.html">telemetry.internal.platform.power_monitor</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">SysfsPowerMonitor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SysfsPowerMonitor">class <strong>SysfsPowerMonitor</strong></a>(<a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">PowerMonitor</a>&nbsp;that&nbsp;relies&nbsp;on&nbsp;sysfs&nbsp;to&nbsp;monitor&nbsp;CPU&nbsp;statistics&nbsp;on&nbsp;several<br>
+different&nbsp;platforms.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.power_monitor.sysfs_power_monitor.html#SysfsPowerMonitor">SysfsPowerMonitor</a></dd>
+<dd><a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SysfsPowerMonitor-CanMonitorPower"><strong>CanMonitorPower</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-GetCpuFreq"><strong>GetCpuFreq</strong></a>(self)</dt><dd><tt>Retrieve&nbsp;CPU&nbsp;frequency&nbsp;times&nbsp;from&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;frequency&nbsp;times&nbsp;for&nbsp;each&nbsp;CPU.</tt></dd></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-GetCpuState"><strong>GetCpuState</strong></a>(self)</dt><dd><tt>Retrieve&nbsp;CPU&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;from&nbsp;the&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;for&nbsp;each&nbsp;CPU.</tt></dd></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, _browser)</dt></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-__init__"><strong>__init__</strong></a>(self, linux_based_platform_backend, standalone<font color="#909090">=False</font>)</dt><dd><tt>Constructor.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;linux_based_platform_backend:&nbsp;A&nbsp;LinuxBasedPlatformBackend&nbsp;object.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;standalone:&nbsp;If&nbsp;it&nbsp;is&nbsp;not&nbsp;wrapping&nbsp;another&nbsp;monitor,&nbsp;set&nbsp;to&nbsp;True.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_cpus:&nbsp;A&nbsp;list&nbsp;of&nbsp;the&nbsp;CPUs&nbsp;on&nbsp;the&nbsp;target&nbsp;device.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_end_time:&nbsp;The&nbsp;time&nbsp;the&nbsp;test&nbsp;stopped&nbsp;monitoring&nbsp;power.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_final_cstate:&nbsp;The&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;after&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_final_freq:&nbsp;The&nbsp;CPU&nbsp;frequency&nbsp;times&nbsp;after&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_initial_cstate:&nbsp;The&nbsp;c-state&nbsp;residency&nbsp;times&nbsp;before&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_initial_freq:&nbsp;The&nbsp;CPU&nbsp;frequency&nbsp;times&nbsp;before&nbsp;the&nbsp;test.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_platform:&nbsp;A&nbsp;LinuxBasedPlatformBackend&nbsp;object&nbsp;associated&nbsp;with&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;target&nbsp;platform.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;_start_time:&nbsp;The&nbsp;time&nbsp;the&nbsp;test&nbsp;started&nbsp;monitoring&nbsp;power.</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="SysfsPowerMonitor-CombineResults"><strong>CombineResults</strong></a>(cpu_stats, power_stats)</dt><dd><tt>Add&nbsp;frequency&nbsp;and&nbsp;c-state&nbsp;residency&nbsp;data&nbsp;to&nbsp;the&nbsp;power&nbsp;data.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;cpu_stats:&nbsp;Dictionary&nbsp;containing&nbsp;CPU&nbsp;statistics.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;power_stats:&nbsp;Dictionary&nbsp;containing&nbsp;power&nbsp;statistics.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;in&nbsp;the&nbsp;format&nbsp;returned&nbsp;by&nbsp;StopMonitoringPower.</tt></dd></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-ComputeCpuStats"><strong>ComputeCpuStats</strong></a>(initial, final)</dt><dd><tt>Parse&nbsp;the&nbsp;CPU&nbsp;c-state&nbsp;and&nbsp;frequency&nbsp;values&nbsp;saved&nbsp;during&nbsp;monitoring.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;initial:&nbsp;The&nbsp;parsed&nbsp;dictionary&nbsp;of&nbsp;initial&nbsp;statistics&nbsp;to&nbsp;be&nbsp;converted<br>
+&nbsp;&nbsp;&nbsp;&nbsp;into&nbsp;percentages.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;final:&nbsp;The&nbsp;parsed&nbsp;dictionary&nbsp;of&nbsp;final&nbsp;statistics&nbsp;to&nbsp;be&nbsp;converted<br>
+&nbsp;&nbsp;&nbsp;&nbsp;into&nbsp;percentages.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;Dictionary&nbsp;containing&nbsp;percentages&nbsp;for&nbsp;each&nbsp;CPU&nbsp;as&nbsp;well&nbsp;as&nbsp;an&nbsp;average<br>
+&nbsp;&nbsp;&nbsp;&nbsp;across&nbsp;all&nbsp;CPUs.</tt></dd></dl>
+
+<dl><dt><a name="SysfsPowerMonitor-ParseFreqSample"><strong>ParseFreqSample</strong></a>(sample)</dt><dd><tt>Parse&nbsp;a&nbsp;single&nbsp;frequency&nbsp;sample.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;sample:&nbsp;The&nbsp;single&nbsp;sample&nbsp;of&nbsp;frequency&nbsp;data&nbsp;to&nbsp;be&nbsp;parsed.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;A&nbsp;dictionary&nbsp;associating&nbsp;a&nbsp;frequency&nbsp;with&nbsp;a&nbsp;time.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><a name="SysfsPowerMonitor-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;power&nbsp;monitor&nbsp;can&nbsp;measure&nbsp;power&nbsp;for&nbsp;the&nbsp;target<br>
+application&nbsp;in&nbsp;isolation.&nbsp;False&nbsp;if&nbsp;power&nbsp;measurement&nbsp;is&nbsp;for&nbsp;full&nbsp;system<br>
+energy&nbsp;consumption.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.power_monitor.html#PowerMonitor">telemetry.internal.platform.power_monitor.PowerMonitor</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>CPU_PATH</strong> = '/sys/devices/system/cpu/'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html
new file mode 100644
index 0000000..967106d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html
@@ -0,0 +1,35 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.android_prebuilt_profiler_helper</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.android_prebuilt_profiler_helper</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/android_prebuilt_profiler_helper.py">telemetry/internal/platform/profiler/android_prebuilt_profiler_helper.py</a></font></td></tr></table>
+    <p><tt>Android-specific,&nbsp;installs&nbsp;pre-built&nbsp;profilers.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+</td><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetDevicePath"><strong>GetDevicePath</strong></a>(profiler_binary)</dt></dl>
+ <dl><dt><a name="-InstallOnDevice"><strong>InstallOnDevice</strong></a>(*args, **kwargs)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_profiling_helper.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_profiling_helper.html
new file mode 100644
index 0000000..fed15b2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_profiling_helper.html
@@ -0,0 +1,91 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.android_profiling_helper</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.android_profiling_helper</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/android_profiling_helper.py">telemetry/internal/platform/profiler/android_profiling_helper.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html">telemetry.internal.platform.profiler.android_prebuilt_profiler_helper</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="glob.html">glob</a><br>
+</td><td width="25%" valign=top><a href="hashlib.html">hashlib</a><br>
+<a href="logging.html">logging</a><br>
+<a href="devil.android.md5sum.html">devil.android.md5sum</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="platform.html">platform</a><br>
+<a href="re.html">re</a><br>
+<a href="shutil.html">shutil</a><br>
+<a href="sqlite3.html">sqlite3</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CreateSymFs"><strong>CreateSymFs</strong></a>(device, symfs_dir, libraries, use_symlinks<font color="#909090">=True</font>)</dt><dd><tt>Creates&nbsp;a&nbsp;symfs&nbsp;directory&nbsp;to&nbsp;be&nbsp;used&nbsp;for&nbsp;symbolizing&nbsp;profiles.<br>
+&nbsp;<br>
+Prepares&nbsp;a&nbsp;set&nbsp;of&nbsp;files&nbsp;("symfs")&nbsp;to&nbsp;be&nbsp;used&nbsp;with&nbsp;profilers&nbsp;such&nbsp;as&nbsp;perf&nbsp;for<br>
+converting&nbsp;binary&nbsp;addresses&nbsp;into&nbsp;human&nbsp;readable&nbsp;function&nbsp;names.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;DeviceUtils&nbsp;instance&nbsp;identifying&nbsp;the&nbsp;target&nbsp;device.<br>
+&nbsp;&nbsp;symfs_dir:&nbsp;Path&nbsp;where&nbsp;the&nbsp;symfs&nbsp;should&nbsp;be&nbsp;created.<br>
+&nbsp;&nbsp;libraries:&nbsp;Set&nbsp;of&nbsp;library&nbsp;file&nbsp;names&nbsp;that&nbsp;should&nbsp;be&nbsp;included&nbsp;in&nbsp;the&nbsp;symfs.<br>
+&nbsp;&nbsp;use_symlinks:&nbsp;If&nbsp;True,&nbsp;link&nbsp;instead&nbsp;of&nbsp;copy&nbsp;unstripped&nbsp;libraries&nbsp;into&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;symfs.&nbsp;This&nbsp;will&nbsp;speed&nbsp;up&nbsp;the&nbsp;operation,&nbsp;but&nbsp;the&nbsp;resulting&nbsp;symfs&nbsp;will&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;longer&nbsp;be&nbsp;valid&nbsp;if&nbsp;the&nbsp;linked&nbsp;files&nbsp;are&nbsp;modified,&nbsp;e.g.,&nbsp;by&nbsp;rebuilding.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;absolute&nbsp;path&nbsp;to&nbsp;the&nbsp;kernel&nbsp;symbols&nbsp;within&nbsp;the&nbsp;created&nbsp;symfs.</tt></dd></dl>
+ <dl><dt><a name="-GetPerfhostName"><strong>GetPerfhostName</strong></a>(*args, **kwargs)</dt></dl>
+ <dl><dt><a name="-GetRequiredLibrariesForPerfProfile"><strong>GetRequiredLibrariesForPerfProfile</strong></a>(profile_file)</dt><dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;libraries&nbsp;necessary&nbsp;to&nbsp;symbolize&nbsp;a&nbsp;given&nbsp;perf&nbsp;profile.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;profile_file:&nbsp;Path&nbsp;to&nbsp;perf&nbsp;profile&nbsp;to&nbsp;analyse.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;set&nbsp;of&nbsp;required&nbsp;library&nbsp;file&nbsp;names.</tt></dd></dl>
+ <dl><dt><a name="-GetRequiredLibrariesForVTuneProfile"><strong>GetRequiredLibrariesForVTuneProfile</strong></a>(profile_file)</dt><dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;libraries&nbsp;necessary&nbsp;to&nbsp;symbolize&nbsp;a&nbsp;given&nbsp;VTune&nbsp;profile.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;profile_file:&nbsp;Path&nbsp;to&nbsp;VTune&nbsp;profile&nbsp;to&nbsp;analyse.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;set&nbsp;of&nbsp;required&nbsp;library&nbsp;file&nbsp;names.</tt></dd></dl>
+ <dl><dt><a name="-GetToolchainBinaryPath"><strong>GetToolchainBinaryPath</strong></a>(library_file, binary_name)</dt><dd><tt>Return&nbsp;the&nbsp;path&nbsp;to&nbsp;an&nbsp;Android&nbsp;toolchain&nbsp;binary&nbsp;on&nbsp;the&nbsp;host.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;library_file:&nbsp;ELF&nbsp;library&nbsp;which&nbsp;is&nbsp;used&nbsp;to&nbsp;identify&nbsp;the&nbsp;used&nbsp;ABI,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;architecture&nbsp;and&nbsp;toolchain.<br>
+&nbsp;&nbsp;binary_name:&nbsp;Binary&nbsp;to&nbsp;search&nbsp;for,&nbsp;e.g.,&nbsp;'objdump'<br>
+Returns:<br>
+&nbsp;&nbsp;Full&nbsp;path&nbsp;to&nbsp;binary&nbsp;or&nbsp;None&nbsp;if&nbsp;the&nbsp;binary&nbsp;was&nbsp;not&nbsp;found.</tt></dd></dl>
+ <dl><dt><a name="-PrepareDeviceForPerf"><strong>PrepareDeviceForPerf</strong></a>(device)</dt><dd><tt>Set&nbsp;up&nbsp;a&nbsp;device&nbsp;for&nbsp;running&nbsp;perf.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;device:&nbsp;DeviceUtils&nbsp;instance&nbsp;identifying&nbsp;the&nbsp;target&nbsp;device.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;path&nbsp;to&nbsp;the&nbsp;installed&nbsp;perf&nbsp;binary&nbsp;on&nbsp;the&nbsp;device.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_screen_recorder_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_screen_recorder_profiler.html
new file mode 100644
index 0000000..b0fd553
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_screen_recorder_profiler.html
@@ -0,0 +1,82 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.android_screen_recorder_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.android_screen_recorder_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/android_screen_recorder_profiler.py">telemetry/internal/platform/profiler/android_screen_recorder_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="pylib.screenshot.html">pylib.screenshot</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.android_screen_recorder_profiler.html#AndroidScreenRecordingProfiler">AndroidScreenRecordingProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidScreenRecordingProfiler">class <strong>AndroidScreenRecordingProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Captures&nbsp;a&nbsp;screen&nbsp;recording&nbsp;on&nbsp;Android.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.android_screen_recorder_profiler.html#AndroidScreenRecordingProfiler">AndroidScreenRecordingProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidScreenRecordingProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidScreenRecordingProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="AndroidScreenRecordingProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="AndroidScreenRecordingProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="AndroidScreenRecordingProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="AndroidScreenRecordingProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_systrace_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_systrace_profiler.html
new file mode 100644
index 0000000..eeb0060
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_systrace_profiler.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.android_systrace_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.android_systrace_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/android_systrace_profiler.py">telemetry/internal/platform/profiler/android_systrace_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="StringIO.html">StringIO</a><br>
+<a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.tracing_options.html">telemetry.timeline.tracing_options</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="zipfile.html">zipfile</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.android_systrace_profiler.html#AndroidSystraceProfiler">AndroidSystraceProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidSystraceProfiler">class <strong>AndroidSystraceProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Collects&nbsp;a&nbsp;Systrace&nbsp;on&nbsp;Android.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.android_systrace_profiler.html#AndroidSystraceProfiler">AndroidSystraceProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidSystraceProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidSystraceProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state, device<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="AndroidSystraceProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="AndroidSystraceProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="AndroidSystraceProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="AndroidSystraceProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_traceview_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_traceview_profiler.html
new file mode 100644
index 0000000..38987a7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.android_traceview_profiler.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.android_traceview_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.android_traceview_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/android_traceview_profiler.py">telemetry/internal/platform/profiler/android_traceview_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.android_traceview_profiler.html#AndroidTraceviewProfiler">AndroidTraceviewProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AndroidTraceviewProfiler">class <strong>AndroidTraceviewProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Collects&nbsp;a&nbsp;Traceview&nbsp;on&nbsp;Android.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.android_traceview_profiler.html#AndroidTraceviewProfiler">AndroidTraceviewProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AndroidTraceviewProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AndroidTraceviewProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="AndroidTraceviewProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="AndroidTraceviewProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="AndroidTraceviewProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="AndroidTraceviewProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.html
new file mode 100644
index 0000000..36d9229
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.html
@@ -0,0 +1,105 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.platform.profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/__init__.py">telemetry/internal/platform/profiler/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html">android_prebuilt_profiler_helper</a><br>
+<a href="telemetry.internal.platform.profiler.android_profiling_helper.html">android_profiling_helper</a><br>
+<a href="telemetry.internal.platform.profiler.android_profiling_helper_unittest.html">android_profiling_helper_unittest</a><br>
+<a href="telemetry.internal.platform.profiler.android_screen_recorder_profiler.html">android_screen_recorder_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.android_screen_recorder_profiler_unittest.html">android_screen_recorder_profiler_unittest</a><br>
+<a href="telemetry.internal.platform.profiler.android_systrace_profiler.html">android_systrace_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.android_systrace_profiler_unittest.html">android_systrace_profiler_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_traceview_profiler.html">android_traceview_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.iprofiler_profiler.html">iprofiler_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.java_heap_profiler.html">java_heap_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.monsoon.html">monsoon</a><br>
+<a href="telemetry.internal.platform.profiler.monsoon_profiler.html">monsoon_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.netlog_profiler.html">netlog_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.oomkiller_profiler.html">oomkiller_profiler</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.perf_profiler.html">perf_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.perf_profiler_unittest.html">perf_profiler_unittest</a><br>
+<a href="telemetry.internal.platform.profiler.profiler_finder.html">profiler_finder</a><br>
+<a href="telemetry.internal.platform.profiler.sample_profiler.html">sample_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.strace_profiler.html">strace_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html">tcmalloc_heap_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.tcpdump_profiler.html">tcpdump_profiler</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.trace_profiler.html">trace_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.trace_profiler_unittest.html">trace_profiler_unittest</a><br>
+<a href="telemetry.internal.platform.profiler.v8_profiler.html">v8_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.vtune_profiler.html">vtune_profiler</a><br>
+<a href="telemetry.internal.platform.profiler.vtune_profiler_unittest.html">vtune_profiler_unittest</a><br>
+<a href="telemetry.internal.platform.profiler.win_pgo_profiler.html">win_pgo_profiler</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">Profiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Profiler">class <strong>Profiler</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;sampling&nbsp;profiler&nbsp;provided&nbsp;by&nbsp;the&nbsp;platform.<br>
+&nbsp;<br>
+A&nbsp;profiler&nbsp;is&nbsp;started&nbsp;on&nbsp;its&nbsp;constructor,&nbsp;and&nbsp;should<br>
+gather&nbsp;data&nbsp;until&nbsp;<a href="#Profiler-CollectProfile">CollectProfile</a>().<br>
+The&nbsp;life&nbsp;cycle&nbsp;is&nbsp;normally&nbsp;tied&nbsp;to&nbsp;a&nbsp;single&nbsp;page,<br>
+i.e.,&nbsp;multiple&nbsp;profilers&nbsp;will&nbsp;be&nbsp;created&nbsp;for&nbsp;a&nbsp;page&nbsp;set.<br>
+<a href="#Profiler-WillCloseBrowser">WillCloseBrowser</a>()&nbsp;is&nbsp;called&nbsp;right&nbsp;before&nbsp;the&nbsp;browser<br>
+is&nbsp;closed&nbsp;to&nbsp;allow&nbsp;any&nbsp;further&nbsp;cleanup.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Profiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt><dd><tt>Collect&nbsp;the&nbsp;profile&nbsp;from&nbsp;the&nbsp;profiler.</tt></dd></dl>
+
+<dl><dt><a name="Profiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Profiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="Profiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<dl><dt><a name="Profiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>True&nbsp;iff&nbsp;this&nbsp;profiler&nbsp;is&nbsp;currently&nbsp;supported&nbsp;by&nbsp;the&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="Profiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>User-friendly&nbsp;name&nbsp;of&nbsp;this&nbsp;profiler.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.iprofiler_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.iprofiler_profiler.html
new file mode 100644
index 0000000..3a406de
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.iprofiler_profiler.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.iprofiler_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.iprofiler_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/iprofiler_profiler.py">telemetry/internal/platform/profiler/iprofiler_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="pexpect.html">pexpect</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="signal.html">signal</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.iprofiler_profiler.html#IprofilerProfiler">IprofilerProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IprofilerProfiler">class <strong>IprofilerProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.iprofiler_profiler.html#IprofilerProfiler">IprofilerProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="IprofilerProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="IprofilerProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="IprofilerProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="IprofilerProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="IprofilerProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="IprofilerProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.java_heap_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.java_heap_profiler.html
new file mode 100644
index 0000000..250501b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.java_heap_profiler.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.java_heap_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.java_heap_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/java_heap_profiler.py">telemetry/internal/platform/profiler/java_heap_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="pylib.constants.html">pylib.constants</a><br>
+<a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="threading.html">threading</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.java_heap_profiler.html#JavaHeapProfiler">JavaHeapProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="JavaHeapProfiler">class <strong>JavaHeapProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Android-specific,&nbsp;trigger&nbsp;and&nbsp;fetch&nbsp;java&nbsp;heap&nbsp;dumps.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.java_heap_profiler.html#JavaHeapProfiler">JavaHeapProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="JavaHeapProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="JavaHeapProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="JavaHeapProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="JavaHeapProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="JavaHeapProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="JavaHeapProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon.html
new file mode 100644
index 0000000..9b8a4df
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon.html
@@ -0,0 +1,194 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.monsoon</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.monsoon</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/monsoon.py">telemetry/internal/platform/profiler/monsoon.py</a></font></td></tr></table>
+    <p><tt>Interface&nbsp;for&nbsp;a&nbsp;USB-connected&nbsp;<a href="#Monsoon">Monsoon</a>&nbsp;power&nbsp;meter.<br>
+&nbsp;<br>
+<a href="http://msoon.com/LabEquipment/PowerMonitor/">http://msoon.com/LabEquipment/PowerMonitor/</a><br>
+Currently&nbsp;Unix-only.&nbsp;Relies&nbsp;on&nbsp;fcntl,&nbsp;/dev,&nbsp;and&nbsp;/tmp.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="select.html">select</a><br>
+</td><td width="25%" valign=top><a href="serial.html">serial</a><br>
+<a href="struct.html">struct</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.monsoon.html#Monsoon">Monsoon</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#tuple">__builtin__.tuple</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.monsoon.html#Power">Power</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Monsoon">class <strong>Monsoon</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;a&nbsp;simple&nbsp;class&nbsp;to&nbsp;use&nbsp;the&nbsp;power&nbsp;meter.<br>
+&nbsp;<br>
+mon&nbsp;=&nbsp;monsoon.<a href="#Monsoon">Monsoon</a>()<br>
+mon.<a href="#Monsoon-SetVoltage">SetVoltage</a>(3.7)<br>
+mon.<a href="#Monsoon-StartDataCollection">StartDataCollection</a>()<br>
+mydata&nbsp;=&nbsp;[]<br>
+while&nbsp;len(mydata)&nbsp;&lt;&nbsp;1000:<br>
+&nbsp;&nbsp;mydata.extend(mon.<a href="#Monsoon-CollectData">CollectData</a>())<br>
+mon.<a href="#Monsoon-StopDataCollection">StopDataCollection</a>()<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Monsoon-CollectData"><strong>CollectData</strong></a>(self)</dt><dd><tt>Return&nbsp;some&nbsp;current&nbsp;samples.&nbsp;&nbsp;Call&nbsp;<a href="#Monsoon-StartDataCollection">StartDataCollection</a>()&nbsp;first.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-GetStatus"><strong>GetStatus</strong></a>(self)</dt><dd><tt>Requests&nbsp;and&nbsp;waits&nbsp;for&nbsp;status.&nbsp;&nbsp;Returns&nbsp;status&nbsp;dictionary.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-SetMaxCurrent"><strong>SetMaxCurrent</strong></a>(self, a)</dt><dd><tt>Set&nbsp;the&nbsp;max&nbsp;output&nbsp;current.&nbsp;the&nbsp;unit&nbsp;of&nbsp;|a|&nbsp;:&nbsp;Amperes</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-SetStartupCurrent"><strong>SetStartupCurrent</strong></a>(self, a)</dt><dd><tt>Set&nbsp;the&nbsp;max&nbsp;startup&nbsp;output&nbsp;current.&nbsp;the&nbsp;unit&nbsp;of&nbsp;|a|&nbsp;:&nbsp;Amperes</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-SetUsbPassthrough"><strong>SetUsbPassthrough</strong></a>(self, val)</dt><dd><tt>Set&nbsp;the&nbsp;USB&nbsp;passthrough&nbsp;mode:&nbsp;0&nbsp;=&nbsp;off,&nbsp;1&nbsp;=&nbsp;on,&nbsp;&nbsp;2&nbsp;=&nbsp;auto.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-SetVoltage"><strong>SetVoltage</strong></a>(self, v)</dt><dd><tt>Set&nbsp;the&nbsp;output&nbsp;voltage,&nbsp;0&nbsp;to&nbsp;disable.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-StartDataCollection"><strong>StartDataCollection</strong></a>(self)</dt><dd><tt>Tell&nbsp;the&nbsp;device&nbsp;to&nbsp;start&nbsp;collecting&nbsp;and&nbsp;sending&nbsp;measurement&nbsp;data.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-StopDataCollection"><strong>StopDataCollection</strong></a>(self)</dt><dd><tt>Tell&nbsp;the&nbsp;device&nbsp;to&nbsp;stop&nbsp;collecting&nbsp;measurement&nbsp;data.</tt></dd></dl>
+
+<dl><dt><a name="Monsoon-__init__"><strong>__init__</strong></a>(self, device<font color="#909090">=None</font>, serialno<font color="#909090">=None</font>, wait<font color="#909090">=True</font>)</dt><dd><tt>Establish&nbsp;a&nbsp;connection&nbsp;to&nbsp;a&nbsp;<a href="#Monsoon">Monsoon</a>.<br>
+&nbsp;<br>
+By&nbsp;default,&nbsp;opens&nbsp;the&nbsp;first&nbsp;available&nbsp;port,&nbsp;waiting&nbsp;if&nbsp;none&nbsp;are&nbsp;ready.<br>
+A&nbsp;particular&nbsp;port&nbsp;can&nbsp;be&nbsp;specified&nbsp;with&nbsp;'device',&nbsp;or&nbsp;a&nbsp;particular&nbsp;<a href="#Monsoon">Monsoon</a><br>
+can&nbsp;be&nbsp;specified&nbsp;with&nbsp;'serialno'&nbsp;(using&nbsp;the&nbsp;number&nbsp;printed&nbsp;on&nbsp;its&nbsp;back).<br>
+With&nbsp;wait=False,&nbsp;IOError&nbsp;is&nbsp;thrown&nbsp;if&nbsp;a&nbsp;device&nbsp;is&nbsp;not&nbsp;immediately&nbsp;available.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Power">class <strong>Power</strong></a>(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#Power">Power</a>(amps,&nbsp;volts)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.monsoon.html#Power">Power</a></dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Power-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;<a href="__builtin__.html#tuple">tuple</a>.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="Power-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="Power-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="Power-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="Power-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#Power">Power</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Power-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#Power">Power</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="Power-__new__"><strong>__new__</strong></a>(_cls, amps, volts)</dt><dd><tt>Create&nbsp;new&nbsp;instance&nbsp;of&nbsp;<a href="#Power">Power</a>(amps,&nbsp;volts)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd>
+</dl>
+<dl><dt><strong>amps</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<dl><dt><strong>volts</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>_fields</strong> = ('amps', 'volts')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="Power-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="Power-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="Power-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="Power-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="Power-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Power-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="Power-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="Power-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="Power-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="Power-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="Power-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="Power-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="Power-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="Power-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="Power-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="Power-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#Power-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="Power-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#Power-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="Power-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#Power-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="Power-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#Power-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon_profiler.html
new file mode 100644
index 0000000..a360bb1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.monsoon_profiler.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.monsoon_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.monsoon_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/monsoon_profiler.py">telemetry/internal/platform/profiler/monsoon_profiler.py</a></font></td></tr></table>
+    <p><tt><a href="telemetry.internal.platform.profiler.html#Profiler">Profiler</a>&nbsp;using&nbsp;data&nbsp;collected&nbsp;from&nbsp;a&nbsp;Monsoon&nbsp;power&nbsp;meter.<br>
+&nbsp;<br>
+<a href="http://msoon.com/LabEquipment/PowerMonitor/">http://msoon.com/LabEquipment/PowerMonitor/</a><br>
+Data&nbsp;collected&nbsp;is&nbsp;a&nbsp;namedtuple&nbsp;of&nbsp;(amps,&nbsp;volts),&nbsp;at&nbsp;5000&nbsp;samples/second.<br>
+Output&nbsp;graph&nbsp;plots&nbsp;power&nbsp;in&nbsp;watts&nbsp;over&nbsp;time&nbsp;in&nbsp;seconds.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="csv.html">csv</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.monsoon.html">telemetry.internal.platform.profiler.monsoon</a><br>
+<a href="multiprocessing.html">multiprocessing</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="telemetry.util.statistics.html">telemetry.util.statistics</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.monsoon_profiler.html#MonsoonProfiler">MonsoonProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MonsoonProfiler">class <strong>MonsoonProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.monsoon_profiler.html#MonsoonProfiler">MonsoonProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MonsoonProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MonsoonProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="MonsoonProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="MonsoonProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="MonsoonProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="MonsoonProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.netlog_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.netlog_profiler.html
new file mode 100644
index 0000000..60b7031
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.netlog_profiler.html
@@ -0,0 +1,82 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.netlog_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.netlog_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/netlog_profiler.py">telemetry/internal/platform/profiler/netlog_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.netlog_profiler.html#NetLogProfiler">NetLogProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NetLogProfiler">class <strong>NetLogProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.netlog_profiler.html#NetLogProfiler">NetLogProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="NetLogProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="NetLogProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="NetLogProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="NetLogProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="NetLogProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="NetLogProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.oomkiller_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.oomkiller_profiler.html
new file mode 100644
index 0000000..98adf55
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.oomkiller_profiler.html
@@ -0,0 +1,151 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.oomkiller_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.oomkiller_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/oomkiller_profiler.py">telemetry/internal/platform/profiler/oomkiller_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+</td><td width="25%" valign=top><a href="devil.android.sdk.intent.html">devil.android.sdk.intent</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.oomkiller_profiler.html#UnableToFindApplicationException">UnableToFindApplicationException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.oomkiller_profiler.html#OOMKillerProfiler">OOMKillerProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OOMKillerProfiler">class <strong>OOMKillerProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Android-specific,&nbsp;Launch&nbsp;the&nbsp;music&nbsp;application&nbsp;and&nbsp;check&nbsp;it&nbsp;is&nbsp;still&nbsp;alive<br>
+at&nbsp;the&nbsp;end&nbsp;of&nbsp;the&nbsp;run.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.oomkiller_profiler.html#OOMKillerProfiler">OOMKillerProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="OOMKillerProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="OOMKillerProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="OOMKillerProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OOMKillerProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OOMKillerProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="OOMKillerProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="UnableToFindApplicationException">class <strong>UnableToFindApplicationException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="exceptions.html#Exception">Exception</a>&nbsp;when&nbsp;unable&nbsp;to&nbsp;find&nbsp;a&nbsp;launched&nbsp;application<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.oomkiller_profiler.html#UnableToFindApplicationException">UnableToFindApplicationException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="UnableToFindApplicationException-__init__"><strong>__init__</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#UnableToFindApplicationException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="UnableToFindApplicationException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#UnableToFindApplicationException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="UnableToFindApplicationException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.perf_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.perf_profiler.html
new file mode 100644
index 0000000..0cea1c7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.perf_profiler.html
@@ -0,0 +1,93 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.perf_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.perf_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/perf_profiler.py">telemetry/internal/platform/profiler/perf_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_profiling_helper.html">telemetry.internal.platform.profiler.android_profiling_helper</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="devil.android.device_errors.html">devil.android.device_errors</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="devil.android.perf.perf_control.html">devil.android.perf.perf_control</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="signal.html">signal</a><br>
+<a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.perf_profiler.html#PerfProfiler">PerfProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PerfProfiler">class <strong>PerfProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.perf_profiler.html#PerfProfiler">PerfProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PerfProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PerfProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="PerfProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="PerfProfiler-GetTopSamples"><strong>GetTopSamples</strong></a>(cls, file_name, number)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Parses&nbsp;the&nbsp;perf&nbsp;generated&nbsp;profile&nbsp;in&nbsp;|file_name|&nbsp;and&nbsp;returns&nbsp;a<br>
+{function:&nbsp;period}&nbsp;dict&nbsp;of&nbsp;the&nbsp;|number|&nbsp;hottests&nbsp;functions.</tt></dd></dl>
+
+<dl><dt><a name="PerfProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="PerfProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="PerfProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.profiler_finder.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.profiler_finder.html
new file mode 100644
index 0000000..ba02cbf
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.profiler_finder.html
@@ -0,0 +1,37 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.profiler_finder</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.profiler_finder</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/profiler_finder.py">telemetry/internal/platform/profiler/profiler_finder.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindProfiler"><strong>FindProfiler</strong></a>(name)</dt></dl>
+ <dl><dt><a name="-GetAllAvailableProfilers"><strong>GetAllAvailableProfilers</strong></a>()</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.sample_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.sample_profiler.html
new file mode 100644
index 0000000..f6823e4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.sample_profiler.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.sample_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.sample_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/sample_profiler.py">telemetry/internal/platform/profiler/sample_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="signal.html">signal</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.sample_profiler.html#SampleProfiler">SampleProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SampleProfiler">class <strong>SampleProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.sample_profiler.html#SampleProfiler">SampleProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SampleProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SampleProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="SampleProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SampleProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="SampleProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="SampleProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.strace_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.strace_profiler.html
new file mode 100644
index 0000000..117ee86
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.strace_profiler.html
@@ -0,0 +1,87 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.strace_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.strace_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/strace_profiler.py">telemetry/internal/platform/profiler/strace_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="logging.html">logging</a><br>
+<a href="telemetry.timeline.model.html">telemetry.timeline.model</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="re.html">re</a><br>
+<a href="signal.html">signal</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.strace_profiler.html#StraceProfiler">StraceProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StraceProfiler">class <strong>StraceProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.strace_profiler.html#StraceProfiler">StraceProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="StraceProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StraceProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="StraceProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="StraceProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="StraceProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="StraceProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html
new file mode 100644
index 0000000..bb392e1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html
@@ -0,0 +1,82 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.tcmalloc_heap_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.tcmalloc_heap_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/tcmalloc_heap_profiler.py">telemetry/internal/platform/profiler/tcmalloc_heap_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.backends.chrome.android_browser_finder.html">telemetry.internal.backends.chrome.android_browser_finder</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html#TCMallocHeapProfiler">TCMallocHeapProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TCMallocHeapProfiler">class <strong>TCMallocHeapProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;Factory&nbsp;to&nbsp;instantiate&nbsp;the&nbsp;platform-specific&nbsp;profiler.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.tcmalloc_heap_profiler.html#TCMallocHeapProfiler">TCMallocHeapProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TCMallocHeapProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TCMallocHeapProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TCMallocHeapProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TCMallocHeapProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TCMallocHeapProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TCMallocHeapProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcpdump_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcpdump_profiler.html
new file mode 100644
index 0000000..4b0f44c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.tcpdump_profiler.html
@@ -0,0 +1,87 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.tcpdump_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.tcpdump_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/tcpdump_profiler.py">telemetry/internal/platform/profiler/tcpdump_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_prebuilt_profiler_helper.html">telemetry.internal.platform.profiler.android_prebuilt_profiler_helper</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="signal.html">signal</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.tcpdump_profiler.html#TCPDumpProfiler">TCPDumpProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TCPDumpProfiler">class <strong>TCPDumpProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;Factory&nbsp;to&nbsp;instantiate&nbsp;the&nbsp;platform-specific&nbsp;profiler.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.tcpdump_profiler.html#TCPDumpProfiler">TCPDumpProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TCPDumpProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TCPDumpProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TCPDumpProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TCPDumpProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="TCPDumpProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="TCPDumpProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.trace_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.trace_profiler.html
new file mode 100644
index 0000000..38b36e0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.trace_profiler.html
@@ -0,0 +1,175 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.trace_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.trace_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/trace_profiler.py">telemetry/internal/platform/profiler/trace_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="StringIO.html">StringIO</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.tracing_options.html">telemetry.timeline.tracing_options</a><br>
+<a href="zipfile.html">zipfile</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceAllProfiler">TraceAllProfiler</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceDetailedProfiler">TraceDetailedProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceAllProfiler">class <strong>TraceAllProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceAllProfiler">TraceAllProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TraceAllProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TraceAllProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>:<br>
+<dl><dt><a name="TraceAllProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>:<br>
+<dl><dt><a name="TraceAllProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="TraceAllProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="TraceAllProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceDetailedProfiler">class <strong>TraceDetailedProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceDetailedProfiler">TraceDetailedProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TraceDetailedProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TraceDetailedProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>:<br>
+<dl><dt><a name="TraceDetailedProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a>:<br>
+<dl><dt><a name="TraceDetailedProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="TraceDetailedProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="TraceDetailedProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceProfiler">class <strong>TraceProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.trace_profiler.html#TraceProfiler">TraceProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TraceProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state, categories<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TraceProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TraceProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="TraceProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;Browser's&nbsp;options&nbsp;before&nbsp;it&nbsp;is&nbsp;created.</tt></dd></dl>
+
+<dl><dt><a name="TraceProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.v8_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.v8_profiler.html
new file mode 100644
index 0000000..ff563d4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.v8_profiler.html
@@ -0,0 +1,83 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.v8_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.v8_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/v8_profiler.py">telemetry/internal/platform/profiler/v8_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.v8_profiler.html#V8Profiler">V8Profiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="V8Profiler">class <strong>V8Profiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.v8_profiler.html#V8Profiler">V8Profiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="V8Profiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="V8Profiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="V8Profiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="V8Profiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="V8Profiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="V8Profiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.vtune_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.vtune_profiler.html
new file mode 100644
index 0000000..ebafcb6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.vtune_profiler.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.vtune_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.vtune_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/vtune_profiler.py">telemetry/internal/platform/profiler/vtune_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.android_profiling_helper.html">telemetry.internal.platform.profiler.android_profiling_helper</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.vtune_profiler.html#VTuneProfiler">VTuneProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="VTuneProfiler">class <strong>VTuneProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.vtune_profiler.html#VTuneProfiler">VTuneProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="VTuneProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="VTuneProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="VTuneProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="VTuneProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="VTuneProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="VTuneProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.win_pgo_profiler.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.win_pgo_profiler.html
new file mode 100644
index 0000000..09063e4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiler.win_pgo_profiler.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiler.win_pgo_profiler</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.profiler.html"><font color="#ffffff">profiler</font></a>.win_pgo_profiler</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiler/win_pgo_profiler.py">telemetry/internal/platform/profiler/win_pgo_profiler.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="glob.html">glob</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.html">telemetry.internal.platform.profiler</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiler.win_pgo_profiler.html#WinPGOProfiler">WinPGOProfiler</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WinPGOProfiler">class <strong>WinPGOProfiler</strong></a>(<a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;profiler&nbsp;that&nbsp;run&nbsp;the&nbsp;Visual&nbsp;Studio&nbsp;PGO&nbsp;utility&nbsp;'pgosweep.exe'&nbsp;before<br>
+terminating&nbsp;a&nbsp;browser&nbsp;or&nbsp;a&nbsp;renderer&nbsp;process.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.profiler.win_pgo_profiler.html#WinPGOProfiler">WinPGOProfiler</a></dd>
+<dd><a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WinPGOProfiler-CollectProfile"><strong>CollectProfile</strong></a>(self)</dt><dd><tt>Collect&nbsp;the&nbsp;profile&nbsp;data&nbsp;for&nbsp;the&nbsp;current&nbsp;processes.</tt></dd></dl>
+
+<dl><dt><a name="WinPGOProfiler-__init__"><strong>__init__</strong></a>(self, browser_backend, platform_backend, output_path, state)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="WinPGOProfiler-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, browser_type, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="WinPGOProfiler-is_supported"><strong>is_supported</strong></a>(cls, browser_type)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="WinPGOProfiler-name"><strong>name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><a name="WinPGOProfiler-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(cls, browser_backend, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;stopped.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.profiler.html#Profiler">telemetry.internal.platform.profiler.Profiler</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiling_controller_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiling_controller_backend.html
new file mode 100644
index 0000000..b0abf19
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.profiling_controller_backend.html
@@ -0,0 +1,68 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.profiling_controller_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.profiling_controller_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/profiling_controller_backend.py">telemetry/internal/platform/profiling_controller_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.profiler_finder.html">telemetry.internal.platform.profiler.profiler_finder</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.profiling_controller_backend.html#ProfilingControllerBackend">ProfilingControllerBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProfilingControllerBackend">class <strong>ProfilingControllerBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProfilingControllerBackend-Start"><strong>Start</strong></a>(self, profiler_name, base_output_file)</dt><dd><tt>Starts&nbsp;profiling&nbsp;using&nbsp;|profiler_name|.&nbsp;Results&nbsp;are&nbsp;saved&nbsp;to<br>
+|base_output_file|.&lt;process_name&gt;.</tt></dd></dl>
+
+<dl><dt><a name="ProfilingControllerBackend-Stop"><strong>Stop</strong></a>(self)</dt><dd><tt>Stops&nbsp;all&nbsp;active&nbsp;profilers&nbsp;and&nbsp;saves&nbsp;their&nbsp;results.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;filenames&nbsp;produced&nbsp;by&nbsp;the&nbsp;profiler.</tt></dd></dl>
+
+<dl><dt><a name="ProfilingControllerBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ProfilingControllerBackend-__init__"><strong>__init__</strong></a>(self, platform_backend, browser_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.system_info.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.system_info.html
new file mode 100644
index 0000000..d20e988
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.system_info.html
@@ -0,0 +1,81 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.system_info</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.system_info</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/system_info.py">telemetry/internal/platform/system_info.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.gpu_info.html">telemetry.internal.platform.gpu_info</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.system_info.html#SystemInfo">SystemInfo</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SystemInfo">class <strong>SystemInfo</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Provides&nbsp;low-level&nbsp;system&nbsp;information.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SystemInfo-__init__"><strong>__init__</strong></a>(self, model_name, gpu_dict)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="SystemInfo-FromDict"><strong>FromDict</strong></a>(cls, attrs)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#SystemInfo">SystemInfo</a>&nbsp;from&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;attributes.<br>
+Attributes&nbsp;currently&nbsp;required&nbsp;to&nbsp;be&nbsp;present&nbsp;in&nbsp;the&nbsp;dictionary:<br>
+&nbsp;<br>
+&nbsp;&nbsp;model_name&nbsp;(string):&nbsp;a&nbsp;platform-dependent&nbsp;string<br>
+&nbsp;&nbsp;&nbsp;&nbsp;describing&nbsp;the&nbsp;model&nbsp;of&nbsp;machine,&nbsp;or&nbsp;the&nbsp;empty&nbsp;string&nbsp;if&nbsp;not<br>
+&nbsp;&nbsp;&nbsp;&nbsp;supported.<br>
+&nbsp;&nbsp;gpu&nbsp;(<a href="__builtin__.html#object">object</a>&nbsp;containing&nbsp;GPUInfo's&nbsp;required&nbsp;attributes)</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>gpu</strong></dt>
+<dd><tt>A&nbsp;GPUInfo&nbsp;object&nbsp;describing&nbsp;the&nbsp;graphics&nbsp;processor(s)&nbsp;on&nbsp;the&nbsp;system.</tt></dd>
+</dl>
+<dl><dt><strong>model_name</strong></dt>
+<dd><tt>A&nbsp;string&nbsp;describing&nbsp;the&nbsp;machine&nbsp;model.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;a&nbsp;highly&nbsp;platform-dependent&nbsp;value&nbsp;and&nbsp;not&nbsp;currently<br>
+specified&nbsp;for&nbsp;any&nbsp;machine&nbsp;type&nbsp;aside&nbsp;from&nbsp;Macs.&nbsp;On&nbsp;Mac&nbsp;OS,&nbsp;this<br>
+is&nbsp;the&nbsp;model&nbsp;identifier,&nbsp;reformatted&nbsp;slightly;&nbsp;for&nbsp;example,<br>
+'MacBookPro&nbsp;10.1'.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html
new file mode 100644
index 0000000..150718c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html
@@ -0,0 +1,211 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.tracing_agent.chrome_tracing_agent</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.tracing_agent.html"><font color="#ffffff">tracing_agent</font></a>.chrome_tracing_agent</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py">telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html">telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager</a><br>
+<a href="os.html">os</a><br>
+<a href="shutil.html">shutil</a><br>
+</td><td width="25%" valign=top><a href="stat.html">stat</a><br>
+<a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="traceback.html">traceback</a><br>
+<a href="telemetry.internal.platform.tracing_agent.html">telemetry.internal.platform.tracing_agent</a><br>
+<a href="telemetry.timeline.tracing_config.html">telemetry.timeline.tracing_config</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingStartedError">ChromeTracingStartedError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingStoppedError">ChromeTracingStoppedError</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingAgent">ChromeTracingAgent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeTracingAgent">class <strong>ChromeTracingAgent</strong></a>(<a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingAgent">ChromeTracingAgent</a></dd>
+<dd><a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ChromeTracingAgent-Start"><strong>Start</strong></a>(self, trace_options, category_filter, timeout)</dt></dl>
+
+<dl><dt><a name="ChromeTracingAgent-Stop"><strong>Stop</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="ChromeTracingAgent-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="ChromeTracingAgent-IsStartupTracingSupported"><strong>IsStartupTracingSupported</strong></a>(cls, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="ChromeTracingAgent-IsSupported"><strong>IsSupported</strong></a>(cls, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>trace_config</strong></dt>
+</dl>
+<dl><dt><strong>trace_config_file</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeTracingStartedError">class <strong>ChromeTracingStartedError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingStartedError">ChromeTracingStartedError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ChromeTracingStartedError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ChromeTracingStartedError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ChromeTracingStartedError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStartedError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStartedError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChromeTracingStoppedError">class <strong>ChromeTracingStoppedError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html#ChromeTracingStoppedError">ChromeTracingStoppedError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ChromeTracingStoppedError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ChromeTracingStoppedError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ChromeTracingStoppedError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ChromeTracingStoppedError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ChromeTracingStoppedError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html
new file mode 100644
index 0000000..ef6e46e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html
@@ -0,0 +1,30 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.tracing_agent.html"><font color="#ffffff">tracing_agent</font></a>.chrome_tracing_devtools_manager</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py">telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetActiveDevToolsClients"><strong>GetActiveDevToolsClients</strong></a>(platform_backend)</dt><dd><tt>Get&nbsp;DevTools&nbsp;clients&nbsp;that&nbsp;are&nbsp;still&nbsp;connectable.</tt></dd></dl>
+ <dl><dt><a name="-GetDevToolsClients"><strong>GetDevToolsClients</strong></a>(platform_backend)</dt><dd><tt>Get&nbsp;DevTools&nbsp;clients&nbsp;including&nbsp;the&nbsp;ones&nbsp;that&nbsp;are&nbsp;no&nbsp;longer&nbsp;connectable.</tt></dd></dl>
+ <dl><dt><a name="-IsSupported"><strong>IsSupported</strong></a>(platform_backend)</dt></dl>
+ <dl><dt><a name="-RegisterDevToolsClient"><strong>RegisterDevToolsClient</strong></a>(devtools_client_backend, platform_backend)</dt><dd><tt>Register&nbsp;DevTools&nbsp;client<br>
+&nbsp;<br>
+This&nbsp;should&nbsp;only&nbsp;be&nbsp;called&nbsp;from&nbsp;DevToolsClientBackend&nbsp;when&nbsp;it&nbsp;is&nbsp;initialized.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.display_tracing_agent.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.display_tracing_agent.html
new file mode 100644
index 0000000..1707a83
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.display_tracing_agent.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.tracing_agent.display_tracing_agent</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.<a href="telemetry.internal.platform.tracing_agent.html"><font color="#ffffff">tracing_agent</font></a>.display_tracing_agent</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/tracing_agent/display_tracing_agent.py">telemetry/internal/platform/tracing_agent/display_tracing_agent.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.html">telemetry.internal.platform.tracing_agent</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.display_tracing_agent.html#DisplayTracingAgent">DisplayTracingAgent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DisplayTracingAgent">class <strong>DisplayTracingAgent</strong></a>(<a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.tracing_agent.display_tracing_agent.html#DisplayTracingAgent">DisplayTracingAgent</a></dd>
+<dd><a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DisplayTracingAgent-Start"><strong>Start</strong></a>(self, trace_options, category_filter, _timeout)</dt></dl>
+
+<dl><dt><a name="DisplayTracingAgent-Stop"><strong>Stop</strong></a>(self, trace_data_builder)</dt></dl>
+
+<dl><dt><a name="DisplayTracingAgent-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="DisplayTracingAgent-IsSupported"><strong>IsSupported</strong></a>(cls, platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">telemetry.internal.platform.tracing_agent.TracingAgent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.html
new file mode 100644
index 0000000..dbf6213
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_agent.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.platform.tracing_agent</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.tracing_agent</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/tracing_agent/__init__.py">telemetry/internal/platform/tracing_agent/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html">chrome_tracing_agent</a><br>
+<a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent_unittest.html">chrome_tracing_agent_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_devtools_manager.html">chrome_tracing_devtools_manager</a><br>
+<a href="telemetry.internal.platform.tracing_agent.display_tracing_agent.html">display_tracing_agent</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.display_tracing_agent_unittest.html">display_tracing_agent_unittest</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_agent.html#TracingAgent">TracingAgent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingAgent">class <strong>TracingAgent</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;tracing&nbsp;agent&nbsp;provided&nbsp;by&nbsp;the&nbsp;platform.<br>
+&nbsp;<br>
+A&nbsp;tracing&nbsp;agent&nbsp;can&nbsp;gather&nbsp;data&nbsp;with&nbsp;<a href="#TracingAgent-Start">Start</a>()&nbsp;until&nbsp;<a href="#TracingAgent-Stop">Stop</a>().<br>
+Before&nbsp;constructing&nbsp;an&nbsp;<a href="#TracingAgent">TracingAgent</a>,&nbsp;check&nbsp;whether&nbsp;it's&nbsp;supported&nbsp;on&nbsp;the<br>
+platform&nbsp;with&nbsp;IsSupported&nbsp;method&nbsp;first.<br>
+&nbsp;<br>
+NOTE:&nbsp;All&nbsp;subclasses&nbsp;of&nbsp;<a href="#TracingAgent">TracingAgent</a>&nbsp;must&nbsp;not&nbsp;change&nbsp;the&nbsp;constructor's<br>
+parameters&nbsp;so&nbsp;the&nbsp;agents&nbsp;can&nbsp;be&nbsp;dynamically&nbsp;constructed&nbsp;in<br>
+tracing_controller_backend.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingAgent-Start"><strong>Start</strong></a>(self, trace_options, category_filter, timeout)</dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;tracing&nbsp;agent's&nbsp;custom&nbsp;logic&nbsp;to&nbsp;start&nbsp;tracing.<br>
+&nbsp;<br>
+Depending&nbsp;on&nbsp;trace_options&nbsp;and&nbsp;category_filter,&nbsp;the&nbsp;tracing&nbsp;agent&nbsp;may&nbsp;choose<br>
+to&nbsp;start&nbsp;or&nbsp;not&nbsp;start&nbsp;tracing.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;trace_options:&nbsp;an&nbsp;instance&nbsp;of&nbsp;tracing_options.TracingOptions&nbsp;that<br>
+&nbsp;&nbsp;&nbsp;&nbsp;control&nbsp;which&nbsp;core&nbsp;tracing&nbsp;systems&nbsp;should&nbsp;be&nbsp;enabled.<br>
+&nbsp;&nbsp;category_filter:&nbsp;an&nbsp;instance&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tracing_category_filter.TracingCategoryFilter<br>
+&nbsp;&nbsp;timeout:&nbsp;number&nbsp;of&nbsp;seconds&nbsp;that&nbsp;this&nbsp;tracing&nbsp;agent&nbsp;should&nbsp;try&nbsp;to&nbsp;start<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tracing&nbsp;until&nbsp;time&nbsp;out.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;tracing&nbsp;agent&nbsp;started&nbsp;succesfully.</tt></dd></dl>
+
+<dl><dt><a name="TracingAgent-Stop"><strong>Stop</strong></a>(self, trace_data_builder)</dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;tracing&nbsp;agent's&nbsp;custom&nbsp;logic&nbsp;to&nbsp;stop&nbsp;tracing.<br>
+&nbsp;<br>
+<a href="#TracingAgent-Stop">Stop</a>()&nbsp;should&nbsp;guarantee&nbsp;tracing&nbsp;is&nbsp;stopped,&nbsp;even&nbsp;if&nbsp;there&nbsp;may&nbsp;be&nbsp;exception.</tt></dd></dl>
+
+<dl><dt><a name="TracingAgent-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TracingAgent-IsSupported"><strong>IsSupported</strong></a>(cls, _platform_backend)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_controller_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_controller_backend.html
new file mode 100644
index 0000000..1059365
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.tracing_controller_backend.html
@@ -0,0 +1,143 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.tracing_controller_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.tracing_controller_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/tracing_controller_backend.py">telemetry/internal/platform/tracing_controller_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.chrome_tracing_agent.html">telemetry.internal.platform.tracing_agent.chrome_tracing_agent</a><br>
+<a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+<a href="traceback.html">traceback</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.tracing_agent.html">telemetry.internal.platform.tracing_agent</a><br>
+<a href="telemetry.timeline.tracing_category_filter.html">telemetry.timeline.tracing_category_filter</a><br>
+<a href="telemetry.timeline.tracing_options.html">telemetry.timeline.tracing_options</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_controller_backend.html#TracingControllerBackend">TracingControllerBackend</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.tracing_controller_backend.html#TracingControllerStoppedError">TracingControllerStoppedError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingControllerBackend">class <strong>TracingControllerBackend</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingControllerBackend-GetChromeTraceConfig"><strong>GetChromeTraceConfig</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingControllerBackend-GetChromeTraceConfigFile"><strong>GetChromeTraceConfigFile</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingControllerBackend-IsChromeTracingSupported"><strong>IsChromeTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingControllerBackend-Start"><strong>Start</strong></a>(self, trace_options, category_filter, timeout)</dt></dl>
+
+<dl><dt><a name="TracingControllerBackend-Stop"><strong>Stop</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingControllerBackend-__init__"><strong>__init__</strong></a>(self, platform_backend)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_tracing_running</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingControllerStoppedError">class <strong>TracingControllerStoppedError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.tracing_controller_backend.html#TracingControllerStoppedError">TracingControllerStoppedError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TracingControllerStoppedError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TracingControllerStoppedError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TracingControllerStoppedError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TracingControllerStoppedError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TracingControllerStoppedError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.trybot_device.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.trybot_device.html
new file mode 100644
index 0000000..34b3388
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.trybot_device.html
@@ -0,0 +1,80 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.trybot_device</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.trybot_device</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/trybot_device.py">telemetry/internal/platform/trybot_device.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.platform.device.html">telemetry.internal.platform.device</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.trybot_device.html#TrybotDevice">TrybotDevice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TrybotDevice">class <strong>TrybotDevice</strong></a>(<a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.trybot_device.html#TrybotDevice">TrybotDevice</a></dd>
+<dd><a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TrybotDevice-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TrybotDevice-GetAllConnectedDevices"><strong>GetAllConnectedDevices</strong></a>(cls, blacklist)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.device.html#Device">telemetry.internal.platform.device.Device</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>guid</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindAllAvailableDevices"><strong>FindAllAvailableDevices</strong></a>(_)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;available&nbsp;devices.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.platform.win_platform_backend.html b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.win_platform_backend.html
new file mode 100644
index 0000000..d08b9ea
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.platform.win_platform_backend.html
@@ -0,0 +1,261 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.platform.win_platform_backend</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.platform.html"><font color="#ffffff">platform</font></a>.win_platform_backend</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/platform/win_platform_backend.py">telemetry/internal/platform/win_platform_backend.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="atexit.html">atexit</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="collections.html">collections</a><br>
+<a href="contextlib.html">contextlib</a><br>
+<a href="ctypes.html">ctypes</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.desktop_platform_backend.html">telemetry.internal.platform.desktop_platform_backend</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+<a href="telemetry.internal.platform.power_monitor.msr_power_monitor.html">telemetry.internal.platform.power_monitor.msr_power_monitor</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.core.os_version.html">telemetry.core.os_version</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="platform.html">platform</a><br>
+<a href="re.html">re</a><br>
+<a href="socket.html">socket</a><br>
+<a href="struct.html">struct</a><br>
+<a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="time.html">time</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="zipfile.html">zipfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>(<a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.win_platform_backend.html#WinPlatformBackend">WinPlatformBackend</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WinPlatformBackend">class <strong>WinPlatformBackend</strong></a>(<a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.platform.win_platform_backend.html#WinPlatformBackend">WinPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a></dd>
+<dd><a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WinPlatformBackend-CanFlushIndividualFilesFromSystemCache"><strong>CanFlushIndividualFilesFromSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanMeasurePerApplicationPower"><strong>CanMeasurePerApplicationPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanMonitorPower"><strong>CanMonitorPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CloseMsrServer"><strong>CloseMsrServer</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CooperativelyShutdown"><strong>CooperativelyShutdown</strong></a>(self, proc, app_name)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetArchName"><strong>GetArchName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetChildPids"><strong>GetChildPids</strong></a>(self, pid)</dt><dd><tt>Retunds&nbsp;a&nbsp;list&nbsp;of&nbsp;child&nbsp;pids&nbsp;of&nbsp;|pid|.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetCommandLine"><strong>GetCommandLine</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetCpuStats"><strong>GetCpuStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetCpuTimestamp"><strong>GetCpuTimestamp</strong></a>(self)</dt><dd><tt>Return&nbsp;current&nbsp;timestamp&nbsp;in&nbsp;seconds.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetMemoryStats"><strong>GetMemoryStats</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetOSVersionName"><strong>GetOSVersionName</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetSystemCommitCharge"><strong>GetSystemCommitCharge</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetSystemProcessInfo"><strong>GetSystemProcessInfo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetSystemTotalPhysicalMemory"><strong>GetSystemTotalPhysicalMemory</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-IsCooperativeShutdownSupported"><strong>IsCooperativeShutdownSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-IsCurrentProcessElevated"><strong>IsCurrentProcessElevated</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-KillProcess"><strong>KillProcess</strong></a>(self, pid, kill_process_tree<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-LaunchApplication"><strong>LaunchApplication</strong></a>(self, application, parameters<font color="#909090">=None</font>, elevate_privilege<font color="#909090">=False</font>)</dt><dd><tt>Launch&nbsp;an&nbsp;application.&nbsp;Returns&nbsp;a&nbsp;PyHANDLE&nbsp;object.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-ReadMsr"><strong>ReadMsr</strong></a>(self, msr_number, start<font color="#909090">=0</font>, length<font color="#909090">=64</font>)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-StartMonitoringPower"><strong>StartMonitoringPower</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-StopMonitoringPower"><strong>StopMonitoringPower</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-close"><strong>close</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="WinPlatformBackend-IsPlatformBackendForHost"><strong>IsPlatformBackendForHost</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.desktop_platform_backend.html#DesktopPlatformBackend">telemetry.internal.platform.desktop_platform_backend.DesktopPlatformBackend</a>:<br>
+<dl><dt><a name="WinPlatformBackend-FlushSystemCacheForDirectory"><strong>FlushSystemCacheForDirectory</strong></a>(self, directory)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="WinPlatformBackend-CanCaptureVideo"><strong>CanCaptureVideo</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanLaunchApplication"><strong>CanLaunchApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanMonitorNetworkData"><strong>CanMonitorNetworkData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-DidCreateBrowser"><strong>DidCreateBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-FlushDnsCache"><strong>FlushDnsCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-FlushEntireSystemCache"><strong>FlushEntireSystemCache</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetNetworkData"><strong>GetNetworkData</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-GetRemotePort"><strong>GetRemotePort</strong></a>(self, port)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-InitPlatformBackend"><strong>InitPlatformBackend</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-InstallApplication"><strong>InstallApplication</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-IsApplicationRunning"><strong>IsApplicationRunning</strong></a>(self, application)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-IsDisplayTracingSupported"><strong>IsDisplayTracingSupported</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-PathExists"><strong>PathExists</strong></a>(self, path, timeout<font color="#909090">=None</font>, retries<font color="#909090">=None</font>)</dt><dd><tt>Tests&nbsp;whether&nbsp;the&nbsp;given&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;path&nbsp;in&nbsp;request.<br>
+&nbsp;&nbsp;timeout:&nbsp;timeout.<br>
+&nbsp;&nbsp;retries:&nbsp;num&nbsp;of&nbsp;retries.<br>
+Return:<br>
+&nbsp;&nbsp;Whether&nbsp;the&nbsp;path&nbsp;exists&nbsp;on&nbsp;the&nbsp;target&nbsp;platform.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-PurgeUnpinnedMemory"><strong>PurgeUnpinnedMemory</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-SetFullPerformanceModeEnabled"><strong>SetFullPerformanceModeEnabled</strong></a>(self, enabled)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-SetPlatform"><strong>SetPlatform</strong></a>(self, platform)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-StartDisplayTracing"><strong>StartDisplayTracing</strong></a>(self)</dt><dd><tt>Start&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-StartVideoCapture"><strong>StartVideoCapture</strong></a>(self, min_bitrate_mbps)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-StopDisplayTracing"><strong>StopDisplayTracing</strong></a>(self)</dt><dd><tt>Stop&nbsp;gathering&nbsp;a&nbsp;trace&nbsp;with&nbsp;frame&nbsp;timestamps&nbsp;close&nbsp;to&nbsp;physical&nbsp;display.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;raw&nbsp;tracing&nbsp;events&nbsp;that&nbsp;contains&nbsp;the&nbsp;timestamps&nbsp;of&nbsp;physical<br>
+display.</tt></dd></dl>
+
+<dl><dt><a name="WinPlatformBackend-StopVideoCapture"><strong>StopVideoCapture</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-WillCloseBrowser"><strong>WillCloseBrowser</strong></a>(self, browser, browser_backend)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><a name="WinPlatformBackend-CreatePlatformForDevice"><strong>CreatePlatformForDevice</strong></a>(cls, device, finder_options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="WinPlatformBackend-SupportsDevice"><strong>SupportsDevice</strong></a>(cls, device)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;whether&nbsp;this&nbsp;platform&nbsp;backend&nbsp;supports&nbsp;intialization&nbsp;from&nbsp;the<br>
+device.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.platform_backend.html#PlatformBackend">telemetry.internal.platform.platform_backend.PlatformBackend</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>forwarder_factory</strong></dt>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>is_video_capture_running</strong></dt>
+</dl>
+<dl><dt><strong>network_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<dl><dt><strong>running_browser_backends</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller_backend</strong></dt>
+</dl>
+<dl><dt><strong>wpr_ca_cert_path</strong></dt>
+</dl>
+<dl><dt><strong>wpr_http_device_port</strong></dt>
+</dl>
+<dl><dt><strong>wpr_https_device_port</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-TerminateProcess"><strong>TerminateProcess</strong></a>(process_handle)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>pywintypes</strong> = None<br>
+<strong>shell</strong> = None<br>
+<strong>shellcon</strong> = None<br>
+<strong>win32api</strong> = None<br>
+<strong>win32con</strong> = None<br>
+<strong>win32gui</strong> = None<br>
+<strong>win32process</strong> = None<br>
+<strong>win32security</strong> = None</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.buildbot_output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.buildbot_output_formatter.html
new file mode 100644
index 0000000..7b4addb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.buildbot_output_formatter.html
@@ -0,0 +1,75 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.buildbot_output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.buildbot_output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/buildbot_output_formatter.py">telemetry/internal/results/buildbot_output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.results.output_formatter.html">telemetry.internal.results.output_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.perf_tests_helper.html">telemetry.util.perf_tests_helper</a><br>
+<a href="telemetry.value.summary.html">telemetry.value.summary</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.buildbot_output_formatter.html#BuildbotOutputFormatter">BuildbotOutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BuildbotOutputFormatter">class <strong>BuildbotOutputFormatter</strong></a>(<a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.buildbot_output_formatter.html#BuildbotOutputFormatter">BuildbotOutputFormatter</a></dd>
+<dd><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BuildbotOutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt><dd><tt>Print&nbsp;summary&nbsp;data&nbsp;in&nbsp;a&nbsp;format&nbsp;expected&nbsp;by&nbsp;buildbot&nbsp;for&nbsp;perf&nbsp;dashboards.<br>
+&nbsp;<br>
+If&nbsp;any&nbsp;failed&nbsp;pages&nbsp;exist,&nbsp;only&nbsp;output&nbsp;individual&nbsp;page&nbsp;results,&nbsp;and&nbsp;do<br>
+not&nbsp;output&nbsp;any&nbsp;average&nbsp;data.</tt></dd></dl>
+
+<dl><dt><a name="BuildbotOutputFormatter-__init__"><strong>__init__</strong></a>(*args, **kwargs)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.chart_json_output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.chart_json_output_formatter.html
new file mode 100644
index 0000000..2953e7f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.chart_json_output_formatter.html
@@ -0,0 +1,99 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.chart_json_output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.chart_json_output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/chart_json_output_formatter.py">telemetry/internal/results/chart_json_output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="itertools.html">itertools</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="telemetry.internal.results.output_formatter.html">telemetry.internal.results.output_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.summary.html">telemetry.value.summary</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.chart_json_output_formatter.html#ChartJsonOutputFormatter">ChartJsonOutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ChartJsonOutputFormatter">class <strong>ChartJsonOutputFormatter</strong></a>(<a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;TODO(eakuefner):&nbsp;Transition&nbsp;this&nbsp;to&nbsp;translate&nbsp;Telemetry&nbsp;JSON.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.chart_json_output_formatter.html#ChartJsonOutputFormatter">ChartJsonOutputFormatter</a></dd>
+<dd><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ChartJsonOutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="ChartJsonOutputFormatter-__init__"><strong>__init__</strong></a>(self, output_stream, benchmark_metadata)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ResultsAsChartDict"><strong>ResultsAsChartDict</strong></a>(benchmark_metadata, page_specific_values, summary_values)</dt><dd><tt>Produces&nbsp;a&nbsp;dict&nbsp;for&nbsp;serialization&nbsp;to&nbsp;Chart&nbsp;JSON&nbsp;format&nbsp;from&nbsp;raw&nbsp;values.<br>
+&nbsp;<br>
+Chart&nbsp;JSON&nbsp;is&nbsp;a&nbsp;transformation&nbsp;of&nbsp;the&nbsp;basic&nbsp;Telemetry&nbsp;JSON&nbsp;format&nbsp;that<br>
+removes&nbsp;the&nbsp;page&nbsp;map,&nbsp;summarizes&nbsp;the&nbsp;raw&nbsp;values,&nbsp;and&nbsp;organizes&nbsp;the&nbsp;results<br>
+by&nbsp;chart&nbsp;and&nbsp;trace&nbsp;name.&nbsp;This&nbsp;function&nbsp;takes&nbsp;the&nbsp;key&nbsp;pieces&nbsp;of&nbsp;data&nbsp;needed&nbsp;to<br>
+perform&nbsp;this&nbsp;transformation&nbsp;(namely,&nbsp;lists&nbsp;of&nbsp;values&nbsp;and&nbsp;a&nbsp;benchmark&nbsp;metadata<br>
+object)&nbsp;and&nbsp;processes&nbsp;them&nbsp;into&nbsp;a&nbsp;dict&nbsp;which&nbsp;can&nbsp;be&nbsp;serialized&nbsp;using&nbsp;the&nbsp;json<br>
+module.<br>
+&nbsp;<br>
+Design&nbsp;doc&nbsp;for&nbsp;schema:&nbsp;<a href="http://goo.gl/kOtf1Y">http://goo.gl/kOtf1Y</a><br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page_specific_values:&nbsp;list&nbsp;of&nbsp;page-specific&nbsp;values<br>
+&nbsp;&nbsp;summary_values:&nbsp;list&nbsp;of&nbsp;summary&nbsp;values<br>
+&nbsp;&nbsp;benchmark_metadata:&nbsp;a&nbsp;benchmark.BenchmarkMetadata&nbsp;object<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;Chart&nbsp;JSON&nbsp;dict&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;given&nbsp;data.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.csv_pivot_table_output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.csv_pivot_table_output_formatter.html
new file mode 100644
index 0000000..ccde6e6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.csv_pivot_table_output_formatter.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.csv_pivot_table_output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.csv_pivot_table_output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/csv_pivot_table_output_formatter.py">telemetry/internal/results/csv_pivot_table_output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="csv.html">csv</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.output_formatter.html">telemetry.internal.results.output_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.csv_pivot_table_output_formatter.html#CsvPivotTableOutputFormatter">CsvPivotTableOutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CsvPivotTableOutputFormatter">class <strong>CsvPivotTableOutputFormatter</strong></a>(<a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Output&nbsp;the&nbsp;results&nbsp;as&nbsp;CSV&nbsp;suitable&nbsp;for&nbsp;reading&nbsp;into&nbsp;a&nbsp;spreadsheet.<br>
+&nbsp;<br>
+This&nbsp;will&nbsp;write&nbsp;a&nbsp;header&nbsp;row,&nbsp;and&nbsp;one&nbsp;row&nbsp;for&nbsp;each&nbsp;value.&nbsp;Each&nbsp;value&nbsp;row<br>
+contains&nbsp;the&nbsp;value&nbsp;and&nbsp;unit,&nbsp;identifies&nbsp;the&nbsp;value&nbsp;(story_set,&nbsp;page,&nbsp;name),&nbsp;and<br>
+(optionally)&nbsp;data&nbsp;from&nbsp;--output-trace-tag.&nbsp;This&nbsp;format&nbsp;matches&nbsp;what<br>
+spreadsheet&nbsp;programs&nbsp;expect&nbsp;as&nbsp;input&nbsp;for&nbsp;a&nbsp;"pivot&nbsp;table".<br>
+&nbsp;<br>
+A&nbsp;trace&nbsp;tag&nbsp;(--output-trace-tag)&nbsp;can&nbsp;be&nbsp;used&nbsp;to&nbsp;tag&nbsp;each&nbsp;value,&nbsp;to&nbsp;allow<br>
+easy&nbsp;combination&nbsp;of&nbsp;the&nbsp;resulting&nbsp;CSVs&nbsp;from&nbsp;several&nbsp;runs.<br>
+If&nbsp;the&nbsp;trace_tag&nbsp;contains&nbsp;a&nbsp;comma,&nbsp;it&nbsp;will&nbsp;be&nbsp;written&nbsp;as&nbsp;several<br>
+comma-separated&nbsp;values.<br>
+&nbsp;<br>
+This&nbsp;class&nbsp;only&nbsp;processes&nbsp;scalar&nbsp;values.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.csv_pivot_table_output_formatter.html#CsvPivotTableOutputFormatter">CsvPivotTableOutputFormatter</a></dd>
+<dd><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="CsvPivotTableOutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="CsvPivotTableOutputFormatter-__init__"><strong>__init__</strong></a>(self, output_stream, trace_tag<font color="#909090">=''</font>)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>FIELDS</strong> = ['story_set', 'page', 'name', 'value', 'units', 'run_index']</dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.gtest_progress_reporter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.gtest_progress_reporter.html
new file mode 100644
index 0000000..e96b455
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.gtest_progress_reporter.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.gtest_progress_reporter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.gtest_progress_reporter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/gtest_progress_reporter.py">telemetry/internal/results/gtest_progress_reporter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.progress_reporter.html">telemetry.internal.results.progress_reporter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.progress_reporter.html#ProgressReporter">telemetry.internal.results.progress_reporter.ProgressReporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.gtest_progress_reporter.html#GTestProgressReporter">GTestProgressReporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GTestProgressReporter">class <strong>GTestProgressReporter</strong></a>(<a href="telemetry.internal.results.progress_reporter.html#ProgressReporter">telemetry.internal.results.progress_reporter.ProgressReporter</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;progress&nbsp;reporter&nbsp;that&nbsp;outputs&nbsp;the&nbsp;progress&nbsp;report&nbsp;in&nbsp;gtest&nbsp;style.<br>
+&nbsp;<br>
+Be&nbsp;careful&nbsp;each&nbsp;print&nbsp;should&nbsp;only&nbsp;handle&nbsp;one&nbsp;string.&nbsp;Otherwise,&nbsp;the&nbsp;output<br>
+might&nbsp;be&nbsp;interrupted&nbsp;by&nbsp;Chrome&nbsp;logging,&nbsp;and&nbsp;the&nbsp;output&nbsp;interpretation&nbsp;might<br>
+be&nbsp;incorrect.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;&gt;&gt;&nbsp;self.<strong>_output_stream</strong>,&nbsp;"[&nbsp;OK&nbsp;]",&nbsp;testname<br>
+should&nbsp;be&nbsp;written&nbsp;as<br>
+&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;&gt;&gt;&nbsp;self.<strong>_output_stream</strong>,&nbsp;"[&nbsp;OK&nbsp;]&nbsp;%s"&nbsp;%&nbsp;testname<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.gtest_progress_reporter.html#GTestProgressReporter">GTestProgressReporter</a></dd>
+<dd><a href="telemetry.internal.results.progress_reporter.html#ProgressReporter">telemetry.internal.results.progress_reporter.ProgressReporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="GTestProgressReporter-DidAddValue"><strong>DidAddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-DidFinishAllTests"><strong>DidFinishAllTests</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-DidRunPage"><strong>DidRunPage</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-WillRunPage"><strong>WillRunPage</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-__init__"><strong>__init__</strong></a>(self, output_stream, output_skipped_tests_summary<font color="#909090">=False</font>)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.progress_reporter.html#ProgressReporter">telemetry.internal.results.progress_reporter.ProgressReporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.html
new file mode 100644
index 0000000..a17726b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.html
@@ -0,0 +1,43 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.results</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.results</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/__init__.py">telemetry/internal/results/__init__.py</a></font></td></tr></table>
+    <p><tt>The&nbsp;PageTestResults&nbsp;hierarchy&nbsp;provides&nbsp;a&nbsp;way&nbsp;of&nbsp;representing&nbsp;the&nbsp;results&nbsp;of<br>
+running&nbsp;the&nbsp;test&nbsp;or&nbsp;measurement&nbsp;on&nbsp;pages.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.results.base_test_results_unittest.html">base_test_results_unittest</a><br>
+<a href="telemetry.internal.results.buildbot_output_formatter.html">buildbot_output_formatter</a><br>
+<a href="telemetry.internal.results.buildbot_output_formatter_unittest.html">buildbot_output_formatter_unittest</a><br>
+<a href="telemetry.internal.results.chart_json_output_formatter.html">chart_json_output_formatter</a><br>
+<a href="telemetry.internal.results.chart_json_output_formatter_unittest.html">chart_json_output_formatter_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.csv_pivot_table_output_formatter.html">csv_pivot_table_output_formatter</a><br>
+<a href="telemetry.internal.results.csv_pivot_table_output_formatter_unittest.html">csv_pivot_table_output_formatter_unittest</a><br>
+<a href="telemetry.internal.results.gtest_progress_reporter.html">gtest_progress_reporter</a><br>
+<a href="telemetry.internal.results.gtest_progress_reporter_unittest.html">gtest_progress_reporter_unittest</a><br>
+<a href="telemetry.internal.results.html_output_formatter.html">html_output_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.html_output_formatter_unittest.html">html_output_formatter_unittest</a><br>
+<a href="telemetry.internal.results.json_output_formatter.html">json_output_formatter</a><br>
+<a href="telemetry.internal.results.json_output_formatter_unittest.html">json_output_formatter_unittest</a><br>
+<a href="telemetry.internal.results.output_formatter.html">output_formatter</a><br>
+<a href="telemetry.internal.results.page_test_results.html">page_test_results</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.page_test_results_unittest.html">page_test_results_unittest</a><br>
+<a href="telemetry.internal.results.progress_reporter.html">progress_reporter</a><br>
+<a href="telemetry.internal.results.results_options.html">results_options</a><br>
+<a href="telemetry.internal.results.story_run.html">story_run</a><br>
+<a href="telemetry.internal.results.story_run_unittest.html">story_run_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.html_output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.html_output_formatter.html
new file mode 100644
index 0000000..170dfc6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.html_output_formatter.html
@@ -0,0 +1,84 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.html_output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.html_output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/html_output_formatter.py">telemetry/internal/results/html_output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.results.chart_json_output_formatter.html">telemetry.internal.results.chart_json_output_formatter</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="datetime.html">datetime</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.results.output_formatter.html">telemetry.internal.results.output_formatter</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="telemetry.value.html">telemetry.value</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.html_output_formatter.html#HtmlOutputFormatter">HtmlOutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="HtmlOutputFormatter">class <strong>HtmlOutputFormatter</strong></a>(<a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;TODO(eakuefner):&nbsp;rewrite&nbsp;template&nbsp;to&nbsp;use&nbsp;Telemetry&nbsp;JSON&nbsp;directly<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.html_output_formatter.html#HtmlOutputFormatter">HtmlOutputFormatter</a></dd>
+<dd><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="HtmlOutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="HtmlOutputFormatter-GetCombinedResults"><strong>GetCombinedResults</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HtmlOutputFormatter-GetResults"><strong>GetResults</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HtmlOutputFormatter-__init__"><strong>__init__</strong></a>(self, output_stream, metadata, reset_results, upload_results, browser_type, results_label<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.json_output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.json_output_formatter.html
new file mode 100644
index 0000000..f1d4dc9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.json_output_formatter.html
@@ -0,0 +1,90 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.json_output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.json_output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/json_output_formatter.py">telemetry/internal/results/json_output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.output_formatter.html">telemetry.internal.results.output_formatter</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.json_output_formatter.html#JsonOutputFormatter">JsonOutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="JsonOutputFormatter">class <strong>JsonOutputFormatter</strong></a>(<a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.results.json_output_formatter.html#JsonOutputFormatter">JsonOutputFormatter</a></dd>
+<dd><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="JsonOutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="JsonOutputFormatter-__init__"><strong>__init__</strong></a>(self, output_stream, benchmark_metadata)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>benchmark_metadata</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.output_formatter.html#OutputFormatter">telemetry.internal.results.output_formatter.OutputFormatter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ResultsAsDict"><strong>ResultsAsDict</strong></a>(page_test_results, benchmark_metadata)</dt><dd><tt>Takes&nbsp;PageTestResults&nbsp;to&nbsp;a&nbsp;dict&nbsp;serializable&nbsp;to&nbsp;JSON.<br>
+&nbsp;<br>
+To&nbsp;serialize&nbsp;results&nbsp;as&nbsp;JSON&nbsp;we&nbsp;first&nbsp;convert&nbsp;them&nbsp;to&nbsp;a&nbsp;dict&nbsp;that&nbsp;can&nbsp;be<br>
+serialized&nbsp;by&nbsp;the&nbsp;json&nbsp;module.&nbsp;It&nbsp;also&nbsp;requires&nbsp;a&nbsp;benchmark_metadat&nbsp;object<br>
+for&nbsp;metadata&nbsp;to&nbsp;be&nbsp;integrated&nbsp;into&nbsp;the&nbsp;results&nbsp;(currently&nbsp;the&nbsp;benchmark<br>
+name).&nbsp;This&nbsp;function&nbsp;will&nbsp;also&nbsp;output&nbsp;trace&nbsp;files&nbsp;if&nbsp;they&nbsp;exist.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page_test_results:&nbsp;a&nbsp;PageTestResults&nbsp;object<br>
+&nbsp;&nbsp;benchmark_metadata:&nbsp;a&nbsp;benchmark.BenchmarkMetadata&nbsp;object</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.output_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.output_formatter.html
new file mode 100644
index 0000000..2588947
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.output_formatter.html
@@ -0,0 +1,72 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.output_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.output_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/output_formatter.py">telemetry/internal/results/output_formatter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.output_formatter.html#OutputFormatter">OutputFormatter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OutputFormatter">class <strong>OutputFormatter</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;formatter&nbsp;for&nbsp;PageTestResults.<br>
+&nbsp;<br>
+An&nbsp;<a href="#OutputFormatter">OutputFormatter</a>&nbsp;takes&nbsp;PageTestResults,&nbsp;formats&nbsp;the&nbsp;results<br>
+(telemetry.value.Value&nbsp;instances),&nbsp;and&nbsp;output&nbsp;the&nbsp;formatted&nbsp;results<br>
+in&nbsp;the&nbsp;given&nbsp;output&nbsp;stream.<br>
+&nbsp;<br>
+Examples&nbsp;of&nbsp;output&nbsp;formatter:&nbsp;CsvOutputFormatter&nbsp;produces&nbsp;results&nbsp;in<br>
+CSV&nbsp;format.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="OutputFormatter-Format"><strong>Format</strong></a>(self, page_test_results)</dt><dd><tt>Formats&nbsp;the&nbsp;given&nbsp;PageTestResults&nbsp;into&nbsp;the&nbsp;output&nbsp;stream.<br>
+&nbsp;<br>
+This&nbsp;will&nbsp;be&nbsp;called&nbsp;once&nbsp;at&nbsp;the&nbsp;end&nbsp;of&nbsp;a&nbsp;benchmark.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page_test_results:&nbsp;A&nbsp;PageTestResults&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;containing&nbsp;all&nbsp;results<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;from&nbsp;the&nbsp;current&nbsp;benchmark&nbsp;run.</tt></dd></dl>
+
+<dl><dt><a name="OutputFormatter-__init__"><strong>__init__</strong></a>(self, output_stream)</dt><dd><tt>Constructs&nbsp;a&nbsp;new&nbsp;formatter&nbsp;that&nbsp;writes&nbsp;to&nbsp;the&nbsp;output_stream.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;output_stream:&nbsp;The&nbsp;stream&nbsp;to&nbsp;write&nbsp;the&nbsp;formatted&nbsp;output&nbsp;to.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_stream</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.page_test_results.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.page_test_results.html
new file mode 100644
index 0000000..45c0ee5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.page_test_results.html
@@ -0,0 +1,152 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.page_test_results</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.page_test_results</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/page_test_results.py">telemetry/internal/results/page_test_results.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="collections.html">collections</a><br>
+<a href="copy.html">copy</a><br>
+<a href="datetime.html">datetime</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+<a href="telemetry.internal.results.json_output_formatter.html">telemetry.internal.results.json_output_formatter</a><br>
+<a href="logging.html">logging</a><br>
+<a href="random.html">random</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.progress_reporter.html">telemetry.internal.results.progress_reporter</a><br>
+<a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+<a href="telemetry.internal.results.story_run.html">telemetry.internal.results.story_run</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.trace.html">telemetry.value.trace</a><br>
+<a href="traceback.html">traceback</a><br>
+<a href="telemetry.value.html">telemetry.value</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.page_test_results.html#PageTestResults">PageTestResults</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageTestResults">class <strong>PageTestResults</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PageTestResults-AddProfilingFile"><strong>AddProfilingFile</strong></a>(self, page, file_handle)</dt></dl>
+
+<dl><dt><a name="PageTestResults-AddSummaryValue"><strong>AddSummaryValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="PageTestResults-AddValue"><strong>AddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="PageTestResults-CleanUp"><strong>CleanUp</strong></a>(self)</dt><dd><tt>Clean&nbsp;up&nbsp;any&nbsp;TraceValues&nbsp;contained&nbsp;within&nbsp;this&nbsp;results&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<dl><dt><a name="PageTestResults-DidRunPage"><strong>DidRunPage</strong></a>(self, page)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;page:&nbsp;The&nbsp;current&nbsp;page&nbsp;under&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="PageTestResults-FindAllPageSpecificValuesFromIRNamed"><strong>FindAllPageSpecificValuesFromIRNamed</strong></a>(self, tir_label, value_name)</dt></dl>
+
+<dl><dt><a name="PageTestResults-FindAllPageSpecificValuesNamed"><strong>FindAllPageSpecificValuesNamed</strong></a>(self, value_name)</dt></dl>
+
+<dl><dt><a name="PageTestResults-FindAllTraceValues"><strong>FindAllTraceValues</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestResults-FindPageSpecificValuesForPage"><strong>FindPageSpecificValuesForPage</strong></a>(self, page, value_name)</dt></dl>
+
+<dl><dt><a name="PageTestResults-FindValues"><strong>FindValues</strong></a>(self, predicate)</dt><dd><tt>Finds&nbsp;all&nbsp;values&nbsp;matching&nbsp;the&nbsp;specified&nbsp;predicate.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;predicate:&nbsp;A&nbsp;function&nbsp;that&nbsp;takes&nbsp;a&nbsp;Value&nbsp;and&nbsp;returns&nbsp;a&nbsp;bool.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;values&nbsp;matching&nbsp;|predicate|.</tt></dd></dl>
+
+<dl><dt><a name="PageTestResults-PrintSummary"><strong>PrintSummary</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestResults-UploadProfilingFilesToCloud"><strong>UploadProfilingFilesToCloud</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="PageTestResults-UploadTraceFilesToCloud"><strong>UploadTraceFilesToCloud</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="PageTestResults-WillRunPage"><strong>WillRunPage</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="PageTestResults-__copy__"><strong>__copy__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestResults-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestResults-__exit__"><strong>__exit__</strong></a>(self, _, __, ___)</dt></dl>
+
+<dl><dt><a name="PageTestResults-__init__"><strong>__init__</strong></a>(self, output_formatters<font color="#909090">=None</font>, progress_reporter<font color="#909090">=None</font>, trace_tag<font color="#909090">=''</font>, output_dir<font color="#909090">=None</font>, value_can_be_added_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;output_formatters:&nbsp;A&nbsp;list&nbsp;of&nbsp;output&nbsp;formatters.&nbsp;The&nbsp;output<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;formatters&nbsp;are&nbsp;typically&nbsp;used&nbsp;to&nbsp;format&nbsp;the&nbsp;test&nbsp;results,&nbsp;such<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;as&nbsp;CsvPivotTableOutputFormatter,&nbsp;which&nbsp;output&nbsp;the&nbsp;test&nbsp;results&nbsp;as&nbsp;CSV.<br>
+&nbsp;&nbsp;progress_reporter:&nbsp;An&nbsp;instance&nbsp;of&nbsp;progress_reporter.ProgressReporter,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;used&nbsp;to&nbsp;output&nbsp;test&nbsp;status/results&nbsp;progressively.<br>
+&nbsp;&nbsp;trace_tag:&nbsp;A&nbsp;string&nbsp;to&nbsp;append&nbsp;to&nbsp;the&nbsp;buildbot&nbsp;trace&nbsp;name.&nbsp;Currently&nbsp;only<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;used&nbsp;for&nbsp;buildbot.<br>
+&nbsp;&nbsp;output_dir:&nbsp;A&nbsp;string&nbsp;specified&nbsp;the&nbsp;directory&nbsp;where&nbsp;to&nbsp;store&nbsp;the&nbsp;test<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;artifacts,&nbsp;e.g:&nbsp;trace,&nbsp;videos,...<br>
+&nbsp;&nbsp;value_can_be_added_predicate:&nbsp;A&nbsp;function&nbsp;that&nbsp;takes&nbsp;two&nbsp;arguments:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;a&nbsp;value.Value&nbsp;instance&nbsp;(except&nbsp;failure.FailureValue,&nbsp;skip.SkipValue<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;or&nbsp;trace.TraceValue)&nbsp;and&nbsp;a&nbsp;boolean&nbsp;(True&nbsp;when&nbsp;the&nbsp;value&nbsp;is&nbsp;part&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;first&nbsp;result&nbsp;for&nbsp;the&nbsp;story).&nbsp;It&nbsp;returns&nbsp;True&nbsp;if&nbsp;the&nbsp;value<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;can&nbsp;be&nbsp;added&nbsp;to&nbsp;the&nbsp;test&nbsp;results&nbsp;and&nbsp;False&nbsp;otherwise.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>all_page_runs</strong></dt>
+</dl>
+<dl><dt><strong>all_page_specific_values</strong></dt>
+</dl>
+<dl><dt><strong>all_summary_values</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_page_run</strong></dt>
+</dl>
+<dl><dt><strong>failures</strong></dt>
+</dl>
+<dl><dt><strong>pages_that_failed</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;failed&nbsp;pages.</tt></dd>
+</dl>
+<dl><dt><strong>pages_that_succeeded</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;pages&nbsp;that&nbsp;succeeded.</tt></dd>
+</dl>
+<dl><dt><strong>pages_to_profiling_files</strong></dt>
+</dl>
+<dl><dt><strong>pages_to_profiling_files_cloud_url</strong></dt>
+</dl>
+<dl><dt><strong>serialized_trace_file_ids_to_paths</strong></dt>
+</dl>
+<dl><dt><strong>skipped_values</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.progress_reporter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.progress_reporter.html
new file mode 100644
index 0000000..6d9ae69
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.progress_reporter.html
@@ -0,0 +1,65 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.progress_reporter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.progress_reporter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/progress_reporter.py">telemetry/internal/results/progress_reporter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.progress_reporter.html#ProgressReporter">ProgressReporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProgressReporter">class <strong>ProgressReporter</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;that&nbsp;reports&nbsp;progress&nbsp;of&nbsp;a&nbsp;benchmark.<br>
+&nbsp;<br>
+The&nbsp;reporter&nbsp;produces&nbsp;output&nbsp;whenever&nbsp;a&nbsp;significant&nbsp;event&nbsp;happens<br>
+during&nbsp;the&nbsp;progress&nbsp;of&nbsp;a&nbsp;benchmark,&nbsp;including&nbsp;(but&nbsp;not&nbsp;limited&nbsp;to):<br>
+when&nbsp;a&nbsp;page&nbsp;run&nbsp;is&nbsp;started,&nbsp;when&nbsp;a&nbsp;page&nbsp;run&nbsp;finished,&nbsp;any&nbsp;failures<br>
+during&nbsp;a&nbsp;page&nbsp;run.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;outputs&nbsp;nothing.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProgressReporter-DidAddValue"><strong>DidAddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-DidFinishAllTests"><strong>DidFinishAllTests</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-DidRunPage"><strong>DidRunPage</strong></a>(self, page_test_results)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-WillRunPage"><strong>WillRunPage</strong></a>(self, page_test_results)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.results_options.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.results_options.html
new file mode 100644
index 0000000..a565e54
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.results_options.html
@@ -0,0 +1,48 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.results_options</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.results_options</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/results_options.py">telemetry/internal/results/results_options.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.results.buildbot_output_formatter.html">telemetry.internal.results.buildbot_output_formatter</a><br>
+<a href="telemetry.internal.results.chart_json_output_formatter.html">telemetry.internal.results.chart_json_output_formatter</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="telemetry.internal.results.csv_pivot_table_output_formatter.html">telemetry.internal.results.csv_pivot_table_output_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.gtest_progress_reporter.html">telemetry.internal.results.gtest_progress_reporter</a><br>
+<a href="telemetry.internal.results.html_output_formatter.html">telemetry.internal.results.html_output_formatter</a><br>
+<a href="telemetry.internal.results.json_output_formatter.html">telemetry.internal.results.json_output_formatter</a><br>
+<a href="optparse.html">optparse</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.results.page_test_results.html">telemetry.internal.results.page_test_results</a><br>
+<a href="telemetry.internal.results.progress_reporter.html">telemetry.internal.results.progress_reporter</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AddResultsOptions"><strong>AddResultsOptions</strong></a>(parser)</dt></dl>
+ <dl><dt><a name="-CreateResults"><strong>CreateResults</strong></a>(benchmark_metadata, options, value_can_be_added_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;options:&nbsp;Contains&nbsp;the&nbsp;options&nbsp;specified&nbsp;in&nbsp;AddResultsOptions.</tt></dd></dl>
+ <dl><dt><a name="-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(parser, args)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.results.story_run.html b/catapult/telemetry/docs/pydoc/telemetry.internal.results.story_run.html
new file mode 100644
index 0000000..f703b68
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.results.story_run.html
@@ -0,0 +1,83 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.results.story_run</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.results.html"><font color="#ffffff">results</font></a>.story_run</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/results/story_run.py">telemetry/internal/results/story_run.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.story_run.html#StoryRun">StoryRun</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StoryRun">class <strong>StoryRun</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="StoryRun-AddValue"><strong>AddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="StoryRun-__init__"><strong>__init__</strong></a>(self, story)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>failed</strong></dt>
+<dd><tt>Whether&nbsp;the&nbsp;current&nbsp;run&nbsp;failed.<br>
+&nbsp;<br>
+To&nbsp;be&nbsp;precise:&nbsp;returns&nbsp;true&nbsp;if&nbsp;there&nbsp;is&nbsp;a&nbsp;FailureValue&nbsp;but&nbsp;not<br>
+SkipValue&nbsp;in&nbsp;self.<strong>values</strong>.</tt></dd>
+</dl>
+<dl><dt><strong>ok</strong></dt>
+<dd><tt>Whether&nbsp;the&nbsp;current&nbsp;run&nbsp;is&nbsp;still&nbsp;ok.<br>
+&nbsp;<br>
+To&nbsp;be&nbsp;precise:&nbsp;returns&nbsp;true&nbsp;if&nbsp;there&nbsp;is&nbsp;neither&nbsp;FailureValue&nbsp;nor<br>
+SkipValue&nbsp;in&nbsp;self.<strong>values</strong>.</tt></dd>
+</dl>
+<dl><dt><strong>skipped</strong></dt>
+<dd><tt>Whether&nbsp;the&nbsp;current&nbsp;run&nbsp;is&nbsp;being&nbsp;skipped.<br>
+&nbsp;<br>
+To&nbsp;be&nbsp;precise:&nbsp;returns&nbsp;true&nbsp;if&nbsp;there&nbsp;is&nbsp;any&nbsp;SkipValue&nbsp;in&nbsp;self.<strong>values</strong>.</tt></dd>
+</dl>
+<dl><dt><strong>story</strong></dt>
+</dl>
+<dl><dt><strong>values</strong></dt>
+<dd><tt>The&nbsp;values&nbsp;that&nbsp;correspond&nbsp;to&nbsp;this&nbsp;story&nbsp;run.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.story_runner.html b/catapult/telemetry/docs/pydoc/telemetry.internal.story_runner.html
new file mode 100644
index 0000000..dabb2a7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.story_runner.html
@@ -0,0 +1,178 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.story_runner</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.story_runner</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/story_runner.py">telemetry/internal/story_runner.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="optparse.html">optparse</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.page.html">telemetry.page</a><br>
+<a href="telemetry.internal.actions.page_action.html">telemetry.internal.actions.page_action</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+<a href="telemetry.internal.results.results_options.html">telemetry.internal.results.results_options</a><br>
+<a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+<a href="telemetry.story.html">telemetry.story</a><br>
+<a href="telemetry.web_perf.story_test.html">telemetry.web_perf.story_test</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="time.html">time</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.story_runner.html#StoryGroup">StoryGroup</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.story_runner.html#ArchiveError">ArchiveError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ArchiveError">class <strong>ArchiveError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.story_runner.html#ArchiveError">ArchiveError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ArchiveError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ArchiveError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ArchiveError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StoryGroup">class <strong>StoryGroup</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="StoryGroup-AddStory"><strong>AddStory</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="StoryGroup-__init__"><strong>__init__</strong></a>(self, shared_state_class)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+<dl><dt><strong>stories</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(parser)</dt></dl>
+ <dl><dt><a name="-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(parser, args)</dt></dl>
+ <dl><dt><a name="-Run"><strong>Run</strong></a>(test, story_set, finder_options, results, max_failures<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;a&nbsp;given&nbsp;test&nbsp;against&nbsp;a&nbsp;given&nbsp;page_set&nbsp;with&nbsp;the&nbsp;given&nbsp;options.<br>
+&nbsp;<br>
+Stop&nbsp;execution&nbsp;for&nbsp;unexpected&nbsp;exceptions&nbsp;such&nbsp;as&nbsp;KeyboardInterrupt.<br>
+We&nbsp;"white&nbsp;list"&nbsp;certain&nbsp;exceptions&nbsp;for&nbsp;which&nbsp;the&nbsp;story&nbsp;runner<br>
+can&nbsp;continue&nbsp;running&nbsp;the&nbsp;remaining&nbsp;stories.</tt></dd></dl>
+ <dl><dt><a name="-RunBenchmark"><strong>RunBenchmark</strong></a>(benchmark, finder_options)</dt><dd><tt>Run&nbsp;this&nbsp;test&nbsp;with&nbsp;the&nbsp;given&nbsp;options.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;number&nbsp;of&nbsp;failure&nbsp;values&nbsp;(up&nbsp;to&nbsp;254)&nbsp;or&nbsp;255&nbsp;if&nbsp;there&nbsp;is&nbsp;an&nbsp;uncaught<br>
+&nbsp;&nbsp;exception.</tt></dd></dl>
+ <dl><dt><a name="-StoriesGroupedByStateClass"><strong>StoriesGroupedByStateClass</strong></a>(story_set, allow_multiple_groups)</dt><dd><tt>Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;story&nbsp;groups&nbsp;which&nbsp;each&nbsp;contains&nbsp;stories&nbsp;with<br>
+the&nbsp;same&nbsp;shared_state_class.<br>
+&nbsp;<br>
+Example:<br>
+&nbsp;&nbsp;Assume&nbsp;A1,&nbsp;A2,&nbsp;A3&nbsp;are&nbsp;stories&nbsp;with&nbsp;same&nbsp;shared&nbsp;story&nbsp;class,&nbsp;and<br>
+&nbsp;&nbsp;similar&nbsp;for&nbsp;B1,&nbsp;B2.<br>
+&nbsp;&nbsp;If&nbsp;their&nbsp;orders&nbsp;in&nbsp;story&nbsp;set&nbsp;is&nbsp;A1&nbsp;A2&nbsp;B1&nbsp;B2&nbsp;A3,&nbsp;then&nbsp;the&nbsp;grouping&nbsp;will<br>
+&nbsp;&nbsp;be&nbsp;[A1&nbsp;A2]&nbsp;[B1&nbsp;B2]&nbsp;[A3].<br>
+&nbsp;<br>
+It's&nbsp;purposefully&nbsp;done&nbsp;this&nbsp;way&nbsp;to&nbsp;make&nbsp;sure&nbsp;that&nbsp;order&nbsp;of<br>
+stories&nbsp;are&nbsp;the&nbsp;same&nbsp;of&nbsp;that&nbsp;defined&nbsp;in&nbsp;story_set.&nbsp;It's&nbsp;recommended&nbsp;that<br>
+stories&nbsp;with&nbsp;the&nbsp;same&nbsp;states&nbsp;should&nbsp;be&nbsp;arranged&nbsp;next&nbsp;to&nbsp;each&nbsp;others&nbsp;in<br>
+story&nbsp;sets&nbsp;to&nbsp;reduce&nbsp;the&nbsp;overhead&nbsp;of&nbsp;setting&nbsp;up&nbsp;&amp;&nbsp;tearing&nbsp;down&nbsp;the<br>
+shared&nbsp;story&nbsp;state.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html
new file mode 100644
index 0000000..b821f2f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html
@@ -0,0 +1,220 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.discoverable_classes.another_discover_dummyclass</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.internal.testing.discoverable_classes.html"><font color="#ffffff">discoverable_classes</font></a>.another_discover_dummyclass</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/discoverable_classes/another_discover_dummyclass.py">telemetry/internal/testing/discoverable_classes/another_discover_dummyclass.py</a></font></td></tr></table>
+    <p><tt>More&nbsp;dummy&nbsp;exception&nbsp;subclasses&nbsp;used&nbsp;by&nbsp;core/discover.py's&nbsp;unit&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html">telemetry.internal.testing.discoverable_classes.discover_dummyclass</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a>(<a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionImpl1">DummyExceptionImpl1</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionImpl2">DummyExceptionImpl2</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionWithParameterImpl1">DummyExceptionWithParameterImpl1</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DummyExceptionImpl1">class <strong>DummyExceptionImpl1</strong></a>(<a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionImpl1">DummyExceptionImpl1</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DummyExceptionImpl1-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DummyExceptionImpl1-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DummyExceptionImpl1-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl1-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl1-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DummyExceptionImpl2">class <strong>DummyExceptionImpl2</strong></a>(<a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionImpl2">DummyExceptionImpl2</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DummyExceptionImpl2-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DummyExceptionImpl2-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DummyExceptionImpl2-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionImpl2-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionImpl2-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DummyExceptionWithParameterImpl1">class <strong>DummyExceptionWithParameterImpl1</strong></a>(<a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#DummyExceptionWithParameterImpl1">DummyExceptionWithParameterImpl1</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html#_PrivateDummyException">_PrivateDummyException</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__init__"><strong>__init__</strong></a>(self, parameter)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DummyExceptionWithParameterImpl1-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl1-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl1-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.discover_dummyclass.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.discover_dummyclass.html
new file mode 100644
index 0000000..8349ad3
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.discover_dummyclass.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.discoverable_classes.discover_dummyclass</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.internal.testing.discoverable_classes.html"><font color="#ffffff">discoverable_classes</font></a>.discover_dummyclass</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/discoverable_classes/discover_dummyclass.py">telemetry/internal/testing/discoverable_classes/discover_dummyclass.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;dummy&nbsp;exception&nbsp;subclass&nbsp;used&nbsp;by&nbsp;core/discover.py's&nbsp;unit&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">DummyException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DummyException">class <strong>DummyException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">DummyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DummyException-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DummyException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DummyException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.html
new file mode 100644
index 0000000..c04239e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.html
@@ -0,0 +1,27 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.testing.discoverable_classes</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.discoverable_classes</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/discoverable_classes/__init__.py">telemetry/internal/testing/discoverable_classes/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.another_discover_dummyclass.html">another_discover_dummyclass</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html">discover_dummyclass</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html">parameter_discover_dummyclass</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html
new file mode 100644
index 0000000..ab5a37d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html
@@ -0,0 +1,97 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.internal.testing.discoverable_classes.html"><font color="#ffffff">discoverable_classes</font></a>.parameter_discover_dummyclass</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/discoverable_classes/parameter_discover_dummyclass.py">telemetry/internal/testing/discoverable_classes/parameter_discover_dummyclass.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;dummy&nbsp;exception&nbsp;subclass&nbsp;used&nbsp;by&nbsp;core/discover.py's&nbsp;unit&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html">telemetry.internal.testing.discoverable_classes.discover_dummyclass</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html#DummyExceptionWithParameterImpl2">DummyExceptionWithParameterImpl2</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DummyExceptionWithParameterImpl2">class <strong>DummyExceptionWithParameterImpl2</strong></a>(<a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.discoverable_classes.parameter_discover_dummyclass.html#DummyExceptionWithParameterImpl2">DummyExceptionWithParameterImpl2</a></dd>
+<dd><a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__init__"><strong>__init__</strong></a>(self, parameter1, parameter2)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.testing.discoverable_classes.discover_dummyclass.html#DummyException">telemetry.internal.testing.discoverable_classes.discover_dummyclass.DummyException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#DummyExceptionWithParameterImpl2-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#DummyExceptionWithParameterImpl2-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="DummyExceptionWithParameterImpl2-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.html
new file mode 100644
index 0000000..b7291d0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.html
@@ -0,0 +1,26 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.testing</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.testing</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/__init__.py">telemetry/internal/testing/__init__.py</a></font></td></tr></table>
+    <p></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.discoverable_classes.html"><strong>discoverable_classes</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.testing.page_sets.html"><strong>page_sets</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.testing.pages.html"><strong>pages</strong>&nbsp;(package)</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.testing.system_stub_test_module.html">system_stub_test_module</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.example_domain.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.example_domain.html
new file mode 100644
index 0000000..788e175
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.example_domain.html
@@ -0,0 +1,129 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.page_sets.example_domain</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.internal.testing.page_sets.html"><font color="#ffffff">page_sets</font></a>.example_domain</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/page_sets/example_domain.py">telemetry/internal/testing/page_sets/example_domain.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.page.page.html">telemetry.page.page</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.html">telemetry.story</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.page_sets.example_domain.html#ExampleDomainPageSet">ExampleDomainPageSet</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExampleDomainPageSet">class <strong>ExampleDomainPageSet</strong></a>(<a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.page_sets.example_domain.html#ExampleDomainPageSet">ExampleDomainPageSet</a></dd>
+<dd><a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ExampleDomainPageSet-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a>:<br>
+<dl><dt><a name="ExampleDomainPageSet-AddStory"><strong>AddStory</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-RemoveStory"><strong>RemoveStory</strong></a>(self, story)</dt><dd><tt>Removes&nbsp;a&nbsp;Story.<br>
+&nbsp;<br>
+Allows&nbsp;the&nbsp;stories&nbsp;to&nbsp;be&nbsp;filtered.</tt></dd></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-WprFilePathForStory"><strong>WprFilePathForStory</strong></a>(self, story)</dt><dd><tt>Convenient&nbsp;function&nbsp;to&nbsp;retrieve&nbsp;WPR&nbsp;archive&nbsp;file&nbsp;path.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;story:&nbsp;The&nbsp;Story&nbsp;to&nbsp;look&nbsp;up.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;WPR&nbsp;archive&nbsp;file&nbsp;path&nbsp;for&nbsp;the&nbsp;given&nbsp;Story,&nbsp;if&nbsp;found.<br>
+&nbsp;&nbsp;Otherwise,&nbsp;None.</tt></dd></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-__getitem__"><strong>__getitem__</strong></a>(self, key)</dt></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-__setitem__"><strong>__setitem__</strong></a>(self, key, value)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a>:<br>
+<dl><dt><a name="ExampleDomainPageSet-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Return&nbsp;a&nbsp;string&nbsp;explaining&nbsp;in&nbsp;human-understandable&nbsp;terms&nbsp;what&nbsp;this<br>
+story&nbsp;represents.<br>
+Note&nbsp;that&nbsp;this&nbsp;should&nbsp;be&nbsp;a&nbsp;classmethod&nbsp;so&nbsp;the&nbsp;benchmark_runner&nbsp;script&nbsp;can<br>
+display&nbsp;stories'&nbsp;names&nbsp;along&nbsp;with&nbsp;their&nbsp;descriptions&nbsp;in&nbsp;the&nbsp;list&nbsp;command.</tt></dd></dl>
+
+<dl><dt><a name="ExampleDomainPageSet-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;the&nbsp;string&nbsp;name&nbsp;of&nbsp;this&nbsp;<a href="telemetry.story.story_set.html#StorySet">StorySet</a>.<br>
+Note&nbsp;that&nbsp;this&nbsp;should&nbsp;be&nbsp;a&nbsp;classmethod&nbsp;so&nbsp;the&nbsp;benchmark_runner&nbsp;script&nbsp;can<br>
+match&nbsp;the&nbsp;story&nbsp;class&nbsp;with&nbsp;its&nbsp;name&nbsp;specified&nbsp;in&nbsp;the&nbsp;run&nbsp;command:<br>
+'Run&nbsp;&lt;User&nbsp;story&nbsp;test&nbsp;name&gt;&nbsp;&lt;User&nbsp;story&nbsp;class&nbsp;name&gt;'</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.story.story_set.html#StorySet">telemetry.story.story_set.StorySet</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>allow_mixed_story_states</strong></dt>
+<dd><tt>True&nbsp;iff&nbsp;Stories&nbsp;are&nbsp;allowed&nbsp;to&nbsp;have&nbsp;different&nbsp;StoryState&nbsp;classes.<br>
+&nbsp;<br>
+There&nbsp;are&nbsp;no&nbsp;checks&nbsp;in&nbsp;place&nbsp;for&nbsp;determining&nbsp;if&nbsp;SharedStates&nbsp;are<br>
+being&nbsp;assigned&nbsp;correctly&nbsp;to&nbsp;all&nbsp;Stories&nbsp;in&nbsp;a&nbsp;given&nbsp;StorySet.&nbsp;The<br>
+majority&nbsp;of&nbsp;test&nbsp;cases&nbsp;should&nbsp;not&nbsp;need&nbsp;the&nbsp;ability&nbsp;to&nbsp;have&nbsp;multiple<br>
+SharedStates,&nbsp;which&nbsp;usually&nbsp;implies&nbsp;you&nbsp;should&nbsp;be&nbsp;writing&nbsp;multiple<br>
+benchmarks&nbsp;instead.&nbsp;We&nbsp;provide&nbsp;errors&nbsp;to&nbsp;avoid&nbsp;accidentally&nbsp;assigning<br>
+or&nbsp;defaulting&nbsp;to&nbsp;the&nbsp;wrong&nbsp;SharedState.<br>
+Override&nbsp;at&nbsp;your&nbsp;own&nbsp;risk.&nbsp;Here&nbsp;be&nbsp;dragons.</tt></dd>
+</dl>
+<dl><dt><strong>archive_data_file</strong></dt>
+</dl>
+<dl><dt><strong>base_dir</strong></dt>
+<dd><tt>The&nbsp;base&nbsp;directory&nbsp;to&nbsp;resolve&nbsp;archive_data_file.<br>
+&nbsp;<br>
+This&nbsp;defaults&nbsp;to&nbsp;the&nbsp;directory&nbsp;containing&nbsp;the&nbsp;StorySet&nbsp;instance's&nbsp;class.</tt></dd>
+</dl>
+<dl><dt><strong>bucket</strong></dt>
+</dl>
+<dl><dt><strong>file_path</strong></dt>
+</dl>
+<dl><dt><strong>serving_dirs</strong></dt>
+</dl>
+<dl><dt><strong>wpr_archive_info</strong></dt>
+<dd><tt>Lazily&nbsp;constructs&nbsp;wpr_archive_info&nbsp;if&nbsp;it's&nbsp;not&nbsp;set&nbsp;and&nbsp;returns&nbsp;it.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.html
new file mode 100644
index 0000000..5ce64b4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.page_sets.html
@@ -0,0 +1,23 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.testing.page_sets</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.page_sets</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/page_sets/__init__.py">telemetry/internal/testing/page_sets/__init__.py</a></font></td></tr></table>
+    <p></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.page_sets.example_domain.html">example_domain</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.external_page.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.external_page.html
new file mode 100644
index 0000000..5f0c00c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.external_page.html
@@ -0,0 +1,130 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.pages.external_page</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.internal.testing.pages.html"><font color="#ffffff">pages</font></a>.external_page</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/pages/external_page.py">telemetry/internal/testing/pages/external_page.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.html#Page">telemetry.page.Page</a>(<a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.pages.external_page.html#ExternalPage">ExternalPage</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ExternalPage">class <strong>ExternalPage</strong></a>(<a href="telemetry.page.html#Page">telemetry.page.Page</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.testing.pages.external_page.html#ExternalPage">ExternalPage</a></dd>
+<dd><a href="telemetry.page.html#Page">telemetry.page.Page</a></dd>
+<dd><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ExternalPage-__init__"><strong>__init__</strong></a>(self, ps)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.page.html#Page">telemetry.page.Page</a>:<br>
+<dl><dt><a name="ExternalPage-AddCustomizeBrowserOptions"><strong>AddCustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Inherit&nbsp;page&nbsp;overrides&nbsp;this&nbsp;to&nbsp;add&nbsp;customized&nbsp;browser&nbsp;options.</tt></dd></dl>
+
+<dl><dt><a name="ExternalPage-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Converts&nbsp;a&nbsp;page&nbsp;object&nbsp;to&nbsp;a&nbsp;dict&nbsp;suitable&nbsp;for&nbsp;JSON&nbsp;output.</tt></dd></dl>
+
+<dl><dt><a name="ExternalPage-GetSyntheticDelayCategories"><strong>GetSyntheticDelayCategories</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ExternalPage-Run"><strong>Run</strong></a>(self, shared_state)</dt></dl>
+
+<dl><dt><a name="ExternalPage-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, action_runner)</dt></dl>
+
+<dl><dt><a name="ExternalPage-RunPageInteractions"><strong>RunPageInteractions</strong></a>(self, action_runner)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;define&nbsp;custom&nbsp;interactions&nbsp;with&nbsp;the&nbsp;page.<br>
+e.g:<br>
+&nbsp;&nbsp;def&nbsp;<a href="#ExternalPage-RunPageInteractions">RunPageInteractions</a>(self,&nbsp;action_runner):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.ScrollPage()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.TapElement(text='Next')</tt></dd></dl>
+
+<dl><dt><a name="ExternalPage-__cmp__"><strong>__cmp__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ExternalPage-__lt__"><strong>__lt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ExternalPage-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.html#Page">telemetry.page.Page</a>:<br>
+<dl><dt><strong>base_dir</strong></dt>
+</dl>
+<dl><dt><strong>credentials_path</strong></dt>
+</dl>
+<dl><dt><strong>display_name</strong></dt>
+</dl>
+<dl><dt><strong>file_path</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;path&nbsp;of&nbsp;the&nbsp;file,&nbsp;stripping&nbsp;the&nbsp;scheme&nbsp;and&nbsp;query&nbsp;string.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;file&nbsp;path,&nbsp;including&nbsp;the&nbsp;params,&nbsp;query,&nbsp;and&nbsp;fragment.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url_with_scheme</strong></dt>
+</dl>
+<dl><dt><strong>is_file</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;URL&nbsp;points&nbsp;to&nbsp;a&nbsp;file.</tt></dd>
+</dl>
+<dl><dt><strong>page_set</strong></dt>
+</dl>
+<dl><dt><strong>serving_dir</strong></dt>
+</dl>
+<dl><dt><strong>startup_url</strong></dt>
+</dl>
+<dl><dt><strong>story_set</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>file_safe_name</strong></dt>
+<dd><tt>A&nbsp;version&nbsp;of&nbsp;display_name&nbsp;that's&nbsp;safe&nbsp;to&nbsp;use&nbsp;as&nbsp;a&nbsp;filename.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;sanitizes&nbsp;special&nbsp;characters&nbsp;with&nbsp;underscores,<br>
+but&nbsp;it's&nbsp;okay&nbsp;to&nbsp;override&nbsp;it&nbsp;with&nbsp;a&nbsp;more&nbsp;specific&nbsp;implementation&nbsp;in<br>
+subclasses.</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>is_local</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.</tt></dd>
+</dl>
+<dl><dt><strong>labels</strong></dt>
+</dl>
+<dl><dt><strong>make_javascript_deterministic</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.html
new file mode 100644
index 0000000..a21c90c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.pages.html
@@ -0,0 +1,23 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.testing.pages</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.pages</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/pages/__init__.py">telemetry/internal/testing/pages/__init__.py</a></font></td></tr></table>
+    <p></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.testing.pages.external_page.html">external_page</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.testing.system_stub_test_module.html b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.system_stub_test_module.html
new file mode 100644
index 0000000..f5969be
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.testing.system_stub_test_module.html
@@ -0,0 +1,54 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.testing.system_stub_test_module</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.testing.html"><font color="#ffffff">testing</font></a>.system_stub_test_module</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/testing/system_stub_test_module.py">telemetry/internal/testing/system_stub_test_module.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.testing.system_stub_test_module.html#SystemStubTest">SystemStubTest</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SystemStubTest">class <strong>SystemStubTest</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Static methods defined here:<br>
+<dl><dt><a name="SystemStubTest-TestOpen"><strong>TestOpen</strong></a>(file_path)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.binary_manager.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.binary_manager.html
new file mode 100644
index 0000000..3ad540f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.binary_manager.html
@@ -0,0 +1,49 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.binary_manager</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.binary_manager</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/binary_manager.py">telemetry/internal/util/binary_manager.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="catapult_base.dependency_manager.html">catapult_base.dependency_manager</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FetchPath"><strong>FetchPath</strong></a>(binary_name, arch, platform)</dt><dd><tt>Return&nbsp;a&nbsp;path&nbsp;to&nbsp;the&nbsp;appropriate&nbsp;executable&nbsp;for&nbsp;&lt;binary_name&gt;,&nbsp;downloading<br>
+from&nbsp;cloud&nbsp;storage&nbsp;if&nbsp;needed,&nbsp;or&nbsp;None&nbsp;if&nbsp;it&nbsp;cannot&nbsp;be&nbsp;found.</tt></dd></dl>
+ <dl><dt><a name="-InitDependencyManager"><strong>InitDependencyManager</strong></a>(environment_config)</dt></dl>
+ <dl><dt><a name="-LocalPath"><strong>LocalPath</strong></a>(binary_name, arch, platform)</dt><dd><tt>Return&nbsp;a&nbsp;local&nbsp;path&nbsp;to&nbsp;the&nbsp;given&nbsp;binary&nbsp;name,&nbsp;or&nbsp;None&nbsp;if&nbsp;an&nbsp;executable<br>
+cannot&nbsp;be&nbsp;found.&nbsp;Will&nbsp;not&nbsp;download&nbsp;the&nbsp;executable.</tt></dd></dl>
+ <dl><dt><a name="-NeedsInit"><strong>NeedsInit</strong></a>()</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>TELEMETRY_PROJECT_CONFIG</strong> = '/usr/local/google/home/nednguyen/projects/chromi...metry/telemetry/internal/binary_dependencies.json'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.bootstrap.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.bootstrap.html
new file mode 100644
index 0000000..c025b3c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.bootstrap.html
@@ -0,0 +1,124 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.bootstrap</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.bootstrap</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/bootstrap.py">telemetry/internal/util/bootstrap.py</a></font></td></tr></table>
+    <p><tt>Bootstrap&nbsp;Chrome&nbsp;Telemetry&nbsp;by&nbsp;downloading&nbsp;all&nbsp;its&nbsp;files&nbsp;from&nbsp;SVN&nbsp;servers.<br>
+&nbsp;<br>
+Requires&nbsp;a&nbsp;DEPS&nbsp;file&nbsp;to&nbsp;specify&nbsp;which&nbsp;directories&nbsp;on&nbsp;which&nbsp;SVN&nbsp;servers<br>
+are&nbsp;required&nbsp;to&nbsp;run&nbsp;Telemetry.&nbsp;Format&nbsp;of&nbsp;that&nbsp;DEPS&nbsp;file&nbsp;is&nbsp;a&nbsp;subset&nbsp;of&nbsp;the<br>
+normal&nbsp;DEPS&nbsp;file&nbsp;format[1];&nbsp;currently&nbsp;only&nbsp;only&nbsp;the&nbsp;"deps"&nbsp;dictionary&nbsp;is<br>
+supported&nbsp;and&nbsp;nothing&nbsp;else.<br>
+&nbsp;<br>
+Fetches&nbsp;all&nbsp;files&nbsp;in&nbsp;the&nbsp;specified&nbsp;directories&nbsp;using&nbsp;WebDAV&nbsp;(SVN&nbsp;is&nbsp;WebDAV&nbsp;under<br>
+the&nbsp;hood).<br>
+&nbsp;<br>
+[1]&nbsp;<a href="http://dev.chromium.org/developers/how-tos/depottools#TOC-DEPS-file">http://dev.chromium.org/developers/how-tos/depottools#TOC-DEPS-file</a></tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="imp.html">imp</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="urllib.html">urllib</a><br>
+</td><td width="25%" valign=top><a href="urlparse.html">urlparse</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.bootstrap.html#DAVClientWrapper">DAVClientWrapper</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DAVClientWrapper">class <strong>DAVClientWrapper</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Knows&nbsp;how&nbsp;to&nbsp;retrieve&nbsp;subdirectories&nbsp;and&nbsp;files&nbsp;from&nbsp;WebDAV/SVN&nbsp;servers.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="DAVClientWrapper-GetDirList"><strong>GetDirList</strong></a>(self, path)</dt><dd><tt>Returns&nbsp;string&nbsp;names&nbsp;of&nbsp;all&nbsp;files&nbsp;and&nbsp;subdirs&nbsp;of&nbsp;path&nbsp;on&nbsp;the&nbsp;server.</tt></dd></dl>
+
+<dl><dt><a name="DAVClientWrapper-IsFile"><strong>IsFile</strong></a>(self, path)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;path&nbsp;is&nbsp;a&nbsp;file&nbsp;on&nbsp;the&nbsp;server,&nbsp;False&nbsp;if&nbsp;directory.</tt></dd></dl>
+
+<dl><dt><a name="DAVClientWrapper-Traverse"><strong>Traverse</strong></a>(self, src_path, dst_path)</dt><dd><tt>Walks&nbsp;the&nbsp;directory&nbsp;hierarchy&nbsp;pointed&nbsp;to&nbsp;by&nbsp;src_path&nbsp;download&nbsp;all&nbsp;files.<br>
+&nbsp;<br>
+Recursively&nbsp;walks&nbsp;src_path&nbsp;and&nbsp;saves&nbsp;all&nbsp;files&nbsp;and&nbsp;subfolders&nbsp;into<br>
+dst_path.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;src_path:&nbsp;string&nbsp;path&nbsp;on&nbsp;SVN&nbsp;server&nbsp;to&nbsp;save&nbsp;(absolute&nbsp;path&nbsp;on&nbsp;server).<br>
+&nbsp;&nbsp;dest_path:&nbsp;string&nbsp;local&nbsp;path&nbsp;(relative&nbsp;or&nbsp;absolute)&nbsp;to&nbsp;save&nbsp;to.</tt></dd></dl>
+
+<dl><dt><a name="DAVClientWrapper-__init__"><strong>__init__</strong></a>(self, root_url)</dt><dd><tt>Initialize&nbsp;SVN&nbsp;server&nbsp;root_url,&nbsp;save&nbsp;files&nbsp;to&nbsp;local&nbsp;dest_dir.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;root_url:&nbsp;string&nbsp;url&nbsp;of&nbsp;SVN/WebDAV&nbsp;server</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-DownloadDeps"><strong>DownloadDeps</strong></a>(destination_dir, url)</dt><dd><tt>Saves&nbsp;all&nbsp;the&nbsp;dependencies&nbsp;in&nbsp;deps_path.<br>
+&nbsp;<br>
+Opens&nbsp;and&nbsp;reads&nbsp;url,&nbsp;assuming&nbsp;the&nbsp;contents&nbsp;are&nbsp;in&nbsp;the&nbsp;simple&nbsp;DEPS-like&nbsp;file<br>
+format&nbsp;specified&nbsp;in&nbsp;the&nbsp;header&nbsp;of&nbsp;this&nbsp;file,&nbsp;then&nbsp;download&nbsp;all<br>
+files/directories&nbsp;listed&nbsp;to&nbsp;the&nbsp;destination_dir.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;destination_dir:&nbsp;String&nbsp;path&nbsp;to&nbsp;directory&nbsp;to&nbsp;download&nbsp;files&nbsp;into.<br>
+&nbsp;&nbsp;url:&nbsp;URL&nbsp;containing&nbsp;deps&nbsp;information&nbsp;to&nbsp;be&nbsp;evaluated.</tt></dd></dl>
+ <dl><dt><a name="-ListAllDepsPaths"><strong>ListAllDepsPaths</strong></a>(deps_file)</dt><dd><tt>Recursively&nbsp;returns&nbsp;a&nbsp;list&nbsp;of&nbsp;all&nbsp;paths&nbsp;indicated&nbsp;in&nbsp;this&nbsp;deps&nbsp;file.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;this&nbsp;discards&nbsp;information&nbsp;about&nbsp;where&nbsp;path&nbsp;dependencies&nbsp;come&nbsp;from,<br>
+so&nbsp;this&nbsp;is&nbsp;only&nbsp;useful&nbsp;in&nbsp;the&nbsp;context&nbsp;of&nbsp;a&nbsp;Chromium&nbsp;source&nbsp;checkout&nbsp;that&nbsp;has<br>
+already&nbsp;fetched&nbsp;all&nbsp;dependencies.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;deps_file:&nbsp;File&nbsp;containing&nbsp;deps&nbsp;information&nbsp;to&nbsp;be&nbsp;evaluated,&nbsp;in&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;format&nbsp;given&nbsp;in&nbsp;the&nbsp;header&nbsp;of&nbsp;this&nbsp;file.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;string&nbsp;paths&nbsp;starting&nbsp;under&nbsp;src&nbsp;that&nbsp;are&nbsp;required&nbsp;by&nbsp;the<br>
+&nbsp;&nbsp;given&nbsp;deps&nbsp;file,&nbsp;and&nbsp;all&nbsp;of&nbsp;its&nbsp;sub-dependencies.&nbsp;This&nbsp;amounts&nbsp;to<br>
+&nbsp;&nbsp;the&nbsp;keys&nbsp;of&nbsp;the&nbsp;'deps'&nbsp;dictionary.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>davclient</strong> = None</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.camel_case.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.camel_case.html
new file mode 100644
index 0000000..5aeb7cc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.camel_case.html
@@ -0,0 +1,36 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.camel_case</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.camel_case</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/camel_case.py">telemetry/internal/util/camel_case.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ToUnderscore"><strong>ToUnderscore</strong></a>(obj)</dt><dd><tt>Converts&nbsp;a&nbsp;string,&nbsp;list,&nbsp;or&nbsp;dict&nbsp;from&nbsp;camelCase&nbsp;to&nbsp;lower_with_underscores.<br>
+&nbsp;<br>
+Descends&nbsp;recursively&nbsp;into&nbsp;lists&nbsp;and&nbsp;dicts,&nbsp;converting&nbsp;all&nbsp;dict&nbsp;keys.<br>
+Returns&nbsp;a&nbsp;newly&nbsp;allocated&nbsp;object&nbsp;of&nbsp;the&nbsp;same&nbsp;structure&nbsp;as&nbsp;the&nbsp;input.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.classes.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.classes.html
new file mode 100644
index 0000000..edac79e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.classes.html
@@ -0,0 +1,33 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.classes</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.classes</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/classes.py">telemetry/internal/util/classes.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsDirectlyConstructable"><strong>IsDirectlyConstructable</strong></a>(cls)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;instance&nbsp;of&nbsp;|cls|&nbsp;can&nbsp;be&nbsp;construct&nbsp;without&nbsp;arguments.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.command_line.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.command_line.html
new file mode 100644
index 0000000..e72ea18
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.command_line.html
@@ -0,0 +1,243 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.command_line</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.command_line</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/command_line.py">telemetry/internal/util/command_line.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="argparse.html">argparse</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.camel_case.html">telemetry.internal.util.camel_case</a><br>
+</td><td width="25%" valign=top><a href="difflib.html">difflib</a><br>
+</td><td width="25%" valign=top><a href="optparse.html">optparse</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#Command">Command</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#OptparseCommand">OptparseCommand</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#SubcommandCommand">SubcommandCommand</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ArgumentHandlerMixIn">class <strong>ArgumentHandlerMixIn</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;structured&nbsp;way&nbsp;to&nbsp;handle&nbsp;command-line&nbsp;arguments.<br>
+&nbsp;<br>
+In&nbsp;AddCommandLineArgs,&nbsp;add&nbsp;command-line&nbsp;arguments.<br>
+In&nbsp;ProcessCommandLineArgs,&nbsp;validate&nbsp;them&nbsp;and&nbsp;store&nbsp;them&nbsp;in&nbsp;a&nbsp;private&nbsp;class<br>
+variable.&nbsp;This&nbsp;way,&nbsp;each&nbsp;class&nbsp;encapsulates&nbsp;its&nbsp;own&nbsp;arguments,&nbsp;without&nbsp;needing<br>
+to&nbsp;pass&nbsp;an&nbsp;arguments&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;around&nbsp;everywhere.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Class methods defined here:<br>
+<dl><dt><a name="ArgumentHandlerMixIn-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;accept&nbsp;custom&nbsp;command-line&nbsp;arguments.</tt></dd></dl>
+
+<dl><dt><a name="ArgumentHandlerMixIn-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;process&nbsp;command-line&nbsp;arguments.<br>
+&nbsp;<br>
+We&nbsp;pass&nbsp;in&nbsp;parser&nbsp;so&nbsp;we&nbsp;can&nbsp;call&nbsp;parser.error().</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Command">class <strong>Command</strong></a>(<a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;abstraction&nbsp;for&nbsp;things&nbsp;that&nbsp;run&nbsp;from&nbsp;the&nbsp;command-line.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.command_line.html#Command">Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Command-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Command-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Command-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="Command-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>:<br>
+<dl><dt><a name="Command-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;accept&nbsp;custom&nbsp;command-line&nbsp;arguments.</tt></dd></dl>
+
+<dl><dt><a name="Command-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;process&nbsp;command-line&nbsp;arguments.<br>
+&nbsp;<br>
+We&nbsp;pass&nbsp;in&nbsp;parser&nbsp;so&nbsp;we&nbsp;can&nbsp;call&nbsp;parser.error().</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OptparseCommand">class <strong>OptparseCommand</strong></a>(<a href="telemetry.internal.util.command_line.html#Command">Command</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;TODO:&nbsp;Convert&nbsp;everything&nbsp;to&nbsp;argparse.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="OptparseCommand-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="OptparseCommand-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OptparseCommand-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OptparseCommand-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, environment)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OptparseCommand-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>usage</strong> = ''</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">Command</a>:<br>
+<dl><dt><a name="OptparseCommand-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="OptparseCommand-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SubcommandCommand">class <strong>SubcommandCommand</strong></a>(<a href="telemetry.internal.util.command_line.html#Command">Command</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Combines&nbsp;Commands&nbsp;into&nbsp;one&nbsp;big&nbsp;command&nbsp;with&nbsp;sub-commands.<br>
+&nbsp;<br>
+E.g.&nbsp;"svn&nbsp;checkout",&nbsp;"svn&nbsp;update",&nbsp;and&nbsp;"svn&nbsp;commit"&nbsp;are&nbsp;separate&nbsp;sub-commands.<br>
+&nbsp;<br>
+Example&nbsp;usage:<br>
+&nbsp;&nbsp;class&nbsp;MyCommand(command_line.<a href="#SubcommandCommand">SubcommandCommand</a>):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;commands&nbsp;=&nbsp;(Help,&nbsp;List,&nbsp;Run)<br>
+&nbsp;<br>
+&nbsp;&nbsp;if&nbsp;__name__&nbsp;==&nbsp;'__main__':<br>
+&nbsp;&nbsp;&nbsp;&nbsp;sys.exit(MyCommand.<a href="#SubcommandCommand-main">main</a>())<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.command_line.html#SubcommandCommand">SubcommandCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SubcommandCommand-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="SubcommandCommand-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SubcommandCommand-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>commands</strong> = ()</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">Command</a>:<br>
+<dl><dt><a name="SubcommandCommand-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SubcommandCommand-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SubcommandCommand-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetMostLikelyMatchedObject"><strong>GetMostLikelyMatchedObject</strong></a>(objects, target_name, name_func<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, matched_score_threshold<font color="#909090">=0.4</font>)</dt><dd><tt>Matches&nbsp;objects&nbsp;whose&nbsp;names&nbsp;are&nbsp;most&nbsp;likely&nbsp;matched&nbsp;with&nbsp;target.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;objects:&nbsp;list&nbsp;of&nbsp;objects&nbsp;to&nbsp;match.<br>
+&nbsp;&nbsp;target_name:&nbsp;name&nbsp;to&nbsp;match.<br>
+&nbsp;&nbsp;name_func:&nbsp;function&nbsp;to&nbsp;get&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;name&nbsp;to&nbsp;match.&nbsp;Default&nbsp;bypass.<br>
+&nbsp;&nbsp;matched_score_threshold:&nbsp;threshold&nbsp;of&nbsp;likelihood&nbsp;to&nbsp;match.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;objects&nbsp;whose&nbsp;names&nbsp;are&nbsp;likely&nbsp;target_name.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.exception_formatter.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.exception_formatter.html
new file mode 100644
index 0000000..f740f7f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.exception_formatter.html
@@ -0,0 +1,37 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.exception_formatter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.exception_formatter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/exception_formatter.py">telemetry/internal/util/exception_formatter.py</a></font></td></tr></table>
+    <p><tt>Print&nbsp;prettier&nbsp;and&nbsp;more&nbsp;detailed&nbsp;exceptions.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="math.html">math</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="traceback.html">traceback</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-PrintFormattedException"><strong>PrintFormattedException</strong></a>(exception_class<font color="#909090">=None</font>, exception<font color="#909090">=None</font>, tb<font color="#909090">=None</font>, msg<font color="#909090">=None</font>)</dt></dl>
+ <dl><dt><a name="-PrintFormattedFrame"><strong>PrintFormattedFrame</strong></a>(frame, exception_string<font color="#909090">=None</font>)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.external_modules.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.external_modules.html
new file mode 100644
index 0000000..88bc19c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.external_modules.html
@@ -0,0 +1,50 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.external_modules</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.external_modules</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/external_modules.py">telemetry/internal/util/external_modules.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="importlib.html">importlib</a><br>
+</td><td width="25%" valign=top><a href="distutils.version.html">distutils.version</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ImportOptionalModule"><strong>ImportOptionalModule</strong></a>(module)</dt><dd><tt>Tries&nbsp;to&nbsp;import&nbsp;the&nbsp;desired&nbsp;module.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;module&nbsp;if&nbsp;successful,&nbsp;None&nbsp;if&nbsp;not.</tt></dd></dl>
+ <dl><dt><a name="-ImportRequiredModule"><strong>ImportRequiredModule</strong></a>(module)</dt><dd><tt>Tries&nbsp;to&nbsp;import&nbsp;the&nbsp;desired&nbsp;module.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;module&nbsp;on&nbsp;success,&nbsp;raises&nbsp;error&nbsp;on&nbsp;failure.<br>
+Raises:<br>
+&nbsp;&nbsp;ImportError:&nbsp;The&nbsp;import&nbsp;failed.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>MODULES</strong> = {'cv2': (StrictVersion ('2.4.8'), StrictVersion ('3.0')), 'numpy': (StrictVersion ('1.6.1'), None), 'psutil': (StrictVersion ('0.5'), None)}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.file_handle.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.file_handle.html
new file mode 100644
index 0000000..6d0a692
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.file_handle.html
@@ -0,0 +1,95 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.file_handle</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.file_handle</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/file_handle.py">telemetry/internal/util/file_handle.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.file_handle.html#FileHandle">FileHandle</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FileHandle">class <strong>FileHandle</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FileHandle-GetAbsPath"><strong>GetAbsPath</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;path&nbsp;to&nbsp;the&nbsp;pointed-to&nbsp;file&nbsp;relative&nbsp;to&nbsp;the&nbsp;given&nbsp;start&nbsp;path.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;start:&nbsp;A&nbsp;string&nbsp;representing&nbsp;a&nbsp;starting&nbsp;path.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;string&nbsp;giving&nbsp;the&nbsp;relative&nbsp;path&nbsp;from&nbsp;path&nbsp;to&nbsp;this&nbsp;file.</tt></dd></dl>
+
+<dl><dt><a name="FileHandle-__init__"><strong>__init__</strong></a>(self, temp_file<font color="#909090">=None</font>, absolute_path<font color="#909090">=None</font>)</dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#FileHandle">FileHandle</a>&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;<br>
+This&nbsp;constructor&nbsp;should&nbsp;not&nbsp;be&nbsp;used&nbsp;by&nbsp;the&nbsp;user;&nbsp;rather&nbsp;it&nbsp;is&nbsp;preferred&nbsp;to<br>
+use&nbsp;the&nbsp;module-level&nbsp;GetAbsPath&nbsp;and&nbsp;FromTempFile&nbsp;functions.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;temp_file:&nbsp;An&nbsp;instance&nbsp;of&nbsp;a&nbsp;temporary&nbsp;file&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;&nbsp;absolute_path:&nbsp;A&nbsp;path;&nbsp;should&nbsp;not&nbsp;be&nbsp;passed&nbsp;if&nbsp;tempfile&nbsp;is&nbsp;and&nbsp;vice-versa.<br>
+&nbsp;&nbsp;extension:&nbsp;A&nbsp;string&nbsp;that&nbsp;specifies&nbsp;the&nbsp;file&nbsp;extension.&nbsp;It&nbsp;must&nbsp;starts&nbsp;with<br>
+&nbsp;&nbsp;&nbsp;&nbsp;".".</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>extension</strong></dt>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FromFilePath"><strong>FromFilePath</strong></a>(path)</dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#FileHandle">FileHandle</a>&nbsp;from&nbsp;an&nbsp;absolute&nbsp;file&nbsp;path.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;A&nbsp;string&nbsp;giving&nbsp;the&nbsp;absolute&nbsp;path&nbsp;to&nbsp;a&nbsp;file.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;<a href="#FileHandle">FileHandle</a>&nbsp;referring&nbsp;to&nbsp;the&nbsp;file&nbsp;at&nbsp;the&nbsp;specified&nbsp;path.</tt></dd></dl>
+ <dl><dt><a name="-FromTempFile"><strong>FromTempFile</strong></a>(temp_file)</dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="#FileHandle">FileHandle</a>&nbsp;pointing&nbsp;to&nbsp;a&nbsp;temporary&nbsp;file.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;<a href="#FileHandle">FileHandle</a>&nbsp;referring&nbsp;to&nbsp;a&nbsp;named&nbsp;temporary&nbsp;file.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.find_dependencies.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.find_dependencies.html
new file mode 100644
index 0000000..5187ee5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.find_dependencies.html
@@ -0,0 +1,123 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.find_dependencies</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.find_dependencies</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/find_dependencies.py">telemetry/internal/util/find_dependencies.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.benchmark.html">telemetry.benchmark</a><br>
+<a href="telemetry.internal.util.bootstrap.html">telemetry.internal.util.bootstrap</a><br>
+<a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+<a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+</td><td width="25%" valign=top><a href="fnmatch.html">fnmatch</a><br>
+<a href="imp.html">imp</a><br>
+<a href="logging.html">logging</a><br>
+<a href="modulegraph.modulegraph.html">modulegraph.modulegraph</a><br>
+</td><td width="25%" valign=top><a href="optparse.html">optparse</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+<a href="telemetry.internal.util.path_set.html">telemetry.internal.util.path_set</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="zipfile.html">zipfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>(<a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.find_dependencies.html#FindDependenciesCommand">FindDependenciesCommand</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FindDependenciesCommand">class <strong>FindDependenciesCommand</strong></a>(<a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Prints&nbsp;all&nbsp;dependencies<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.find_dependencies.html#FindDependenciesCommand">FindDependenciesCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FindDependenciesCommand-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="FindDependenciesCommand-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, _)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="FindDependenciesCommand-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, _)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>:<br>
+<dl><dt><a name="FindDependenciesCommand-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="FindDependenciesCommand-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Main&nbsp;method&nbsp;to&nbsp;run&nbsp;this&nbsp;command&nbsp;as&nbsp;a&nbsp;standalone&nbsp;script.</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>:<br>
+<dl><dt><strong>usage</strong> = ''</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="FindDependenciesCommand-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="FindDependenciesCommand-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindBootstrapDependencies"><strong>FindBootstrapDependencies</strong></a>(base_dir)</dt></dl>
+ <dl><dt><a name="-FindDependencies"><strong>FindDependencies</strong></a>(target_paths, options)</dt></dl>
+ <dl><dt><a name="-FindExcludedFiles"><strong>FindExcludedFiles</strong></a>(files, options)</dt></dl>
+ <dl><dt><a name="-FindPageSetDependencies"><strong>FindPageSetDependencies</strong></a>(base_dir)</dt></dl>
+ <dl><dt><a name="-FindPythonDependencies"><strong>FindPythonDependencies</strong></a>(module_path)</dt></dl>
+ <dl><dt><a name="-ZipDependencies"><strong>ZipDependencies</strong></a>(target_paths, dependencies, options)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEPS_FILE</strong> = 'bootstrap_deps'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.global_hooks.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.global_hooks.html
new file mode 100644
index 0000000..10681f4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.global_hooks.html
@@ -0,0 +1,36 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.global_hooks</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.global_hooks</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/global_hooks.py">telemetry/internal/util/global_hooks.py</a></font></td></tr></table>
+    <p><tt>Hooks&nbsp;that&nbsp;apply&nbsp;globally&nbsp;to&nbsp;all&nbsp;scripts&nbsp;that&nbsp;import&nbsp;or&nbsp;use&nbsp;Telemetry.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+</td><td width="25%" valign=top><a href="signal.html">signal</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-InstallHooks"><strong>InstallHooks</strong></a>()</dt></dl>
+ <dl><dt><a name="-InstallStackDumpOnSigusr1"><strong>InstallStackDumpOnSigusr1</strong></a>()</dt><dd><tt>Catch&nbsp;SIGUSR1&nbsp;and&nbsp;print&nbsp;a&nbsp;stack&nbsp;trace.</tt></dd></dl>
+ <dl><dt><a name="-InstallTerminationHook"><strong>InstallTerminationHook</strong></a>()</dt><dd><tt>Catch&nbsp;SIGTERM,&nbsp;print&nbsp;a&nbsp;stack&nbsp;trace,&nbsp;and&nbsp;exit.</tt></dd></dl>
+ <dl><dt><a name="-InstallUnhandledExceptionFormatter"><strong>InstallUnhandledExceptionFormatter</strong></a>()</dt><dd><tt>Print&nbsp;prettier&nbsp;exceptions&nbsp;that&nbsp;also&nbsp;contain&nbsp;the&nbsp;stack&nbsp;frame's&nbsp;locals.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.html
new file mode 100644
index 0000000..bc906a0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.html
@@ -0,0 +1,47 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.internal.util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/__init__.py">telemetry/internal/util/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">binary_manager</a><br>
+<a href="telemetry.internal.util.binary_manager_unittest.html">binary_manager_unittest</a><br>
+<a href="telemetry.internal.util.bootstrap.html">bootstrap</a><br>
+<a href="telemetry.internal.util.camel_case.html">camel_case</a><br>
+<a href="telemetry.internal.util.camel_case_unittest.html">camel_case_unittest</a><br>
+<a href="telemetry.internal.util.classes.html">classes</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.classes_unittest.html">classes_unittest</a><br>
+<a href="telemetry.internal.util.command_line.html">command_line</a><br>
+<a href="telemetry.internal.util.command_line_unittest.html">command_line_unittest</a><br>
+<a href="telemetry.internal.util.exception_formatter.html">exception_formatter</a><br>
+<a href="telemetry.internal.util.external_modules.html">external_modules</a><br>
+<a href="telemetry.internal.util.file_handle.html">file_handle</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.file_handle_unittest.html">file_handle_unittest</a><br>
+<a href="telemetry.internal.util.find_dependencies.html">find_dependencies</a><br>
+<a href="telemetry.internal.util.find_dependencies_unittest.html">find_dependencies_unittest</a><br>
+<a href="telemetry.internal.util.global_hooks.html">global_hooks</a><br>
+<a href="telemetry.internal.util.path.html">path</a><br>
+<a href="telemetry.internal.util.path_set.html">path_set</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path_set_unittest.html">path_set_unittest</a><br>
+<a href="telemetry.internal.util.path_unittest.html">path_unittest</a><br>
+<a href="telemetry.internal.util.ps_util.html">ps_util</a><br>
+<a href="telemetry.internal.util.webpagereplay.html">webpagereplay</a><br>
+<a href="telemetry.internal.util.webpagereplay_unittest.html">webpagereplay_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.path.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.path.html
new file mode 100644
index 0000000..a1c3389
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.path.html
@@ -0,0 +1,42 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.path</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.path</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/path.py">telemetry/internal/util/path.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FindInstalledWindowsApplication"><strong>FindInstalledWindowsApplication</strong></a>(application_path)</dt><dd><tt>Search&nbsp;common&nbsp;Windows&nbsp;installation&nbsp;directories&nbsp;for&nbsp;an&nbsp;application.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;application_path:&nbsp;Path&nbsp;to&nbsp;application&nbsp;relative&nbsp;from&nbsp;installation&nbsp;location.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;string&nbsp;representing&nbsp;the&nbsp;full&nbsp;path,&nbsp;or&nbsp;None&nbsp;if&nbsp;not&nbsp;found.</tt></dd></dl>
+ <dl><dt><a name="-IsExecutable"><strong>IsExecutable</strong></a>(path)</dt></dl>
+ <dl><dt><a name="-IsSubpath"><strong>IsSubpath</strong></a>(subpath, superpath)</dt><dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;subpath&nbsp;is&nbsp;or&nbsp;is&nbsp;in&nbsp;superpath.</tt></dd></dl>
+ <dl><dt><a name="-ListFiles"><strong>ListFiles</strong></a>(base_directory, should_include_dir<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, should_include_file<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.path_set.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.path_set.html
new file mode 100644
index 0000000..1e96faa
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.path_set.html
@@ -0,0 +1,150 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.path_set</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.path_set</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/path_set.py">telemetry/internal/util/path_set.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="_abcoll.html#MutableSet">_abcoll.MutableSet</a>(<a href="_abcoll.html#Set">_abcoll.Set</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.path_set.html#PathSet">PathSet</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PathSet">class <strong>PathSet</strong></a>(<a href="_abcoll.html#MutableSet">_abcoll.MutableSet</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;set&nbsp;of&nbsp;paths.<br>
+&nbsp;<br>
+All&nbsp;mutation&nbsp;methods&nbsp;can&nbsp;take&nbsp;both&nbsp;directories&nbsp;or&nbsp;individual&nbsp;files,&nbsp;but&nbsp;the<br>
+iterator&nbsp;yields&nbsp;the&nbsp;individual&nbsp;files.&nbsp;All&nbsp;paths&nbsp;are&nbsp;automatically&nbsp;normalized.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.path_set.html#PathSet">PathSet</a></dd>
+<dd><a href="_abcoll.html#MutableSet">_abcoll.MutableSet</a></dd>
+<dd><a href="_abcoll.html#Set">_abcoll.Set</a></dd>
+<dd><a href="_abcoll.html#Sized">_abcoll.Sized</a></dd>
+<dd><a href="_abcoll.html#Iterable">_abcoll.Iterable</a></dd>
+<dd><a href="_abcoll.html#Container">_abcoll.Container</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PathSet-__contains__"><strong>__contains__</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="PathSet-__init__"><strong>__init__</strong></a>(self, iterable<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="PathSet-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PathSet-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PathSet-add"><strong>add</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="PathSet-discard"><strong>discard</strong></a>(self, path)</dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>__abstractmethods__</strong> = frozenset([])</dl>
+
+<hr>
+Methods inherited from <a href="_abcoll.html#MutableSet">_abcoll.MutableSet</a>:<br>
+<dl><dt><a name="PathSet-__iand__"><strong>__iand__</strong></a>(self, it)</dt></dl>
+
+<dl><dt><a name="PathSet-__ior__"><strong>__ior__</strong></a>(self, it)</dt></dl>
+
+<dl><dt><a name="PathSet-__isub__"><strong>__isub__</strong></a>(self, it)</dt></dl>
+
+<dl><dt><a name="PathSet-__ixor__"><strong>__ixor__</strong></a>(self, it)</dt></dl>
+
+<dl><dt><a name="PathSet-clear"><strong>clear</strong></a>(self)</dt><dd><tt>This&nbsp;is&nbsp;slow&nbsp;(creates&nbsp;N&nbsp;new&nbsp;iterators!)&nbsp;but&nbsp;effective.</tt></dd></dl>
+
+<dl><dt><a name="PathSet-pop"><strong>pop</strong></a>(self)</dt><dd><tt>Return&nbsp;the&nbsp;popped&nbsp;value.&nbsp;&nbsp;Raise&nbsp;KeyError&nbsp;if&nbsp;empty.</tt></dd></dl>
+
+<dl><dt><a name="PathSet-remove"><strong>remove</strong></a>(self, value)</dt><dd><tt>Remove&nbsp;an&nbsp;element.&nbsp;If&nbsp;not&nbsp;a&nbsp;member,&nbsp;raise&nbsp;a&nbsp;KeyError.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="_abcoll.html#Set">_abcoll.Set</a>:<br>
+<dl><dt><a name="PathSet-__and__"><strong>__and__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__ge__"><strong>__ge__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__gt__"><strong>__gt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__le__"><strong>__le__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__lt__"><strong>__lt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__or__"><strong>__or__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__sub__"><strong>__sub__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-__xor__"><strong>__xor__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PathSet-isdisjoint"><strong>isdisjoint</strong></a>(self, other)</dt><dd><tt>Return&nbsp;True&nbsp;if&nbsp;two&nbsp;sets&nbsp;have&nbsp;a&nbsp;null&nbsp;intersection.</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Set">_abcoll.Set</a>:<br>
+<dl><dt><strong>__hash__</strong> = None</dl>
+
+<hr>
+Class methods inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><a name="PathSet-__subclasshook__"><strong>__subclasshook__</strong></a>(cls, C)<font color="#909090"><font face="helvetica, arial"> from <a href="abc.html#ABCMeta">abc.ABCMeta</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="_abcoll.html#Sized">_abcoll.Sized</a>:<br>
+<dl><dt><strong>__metaclass__</strong> = &lt;class 'abc.ABCMeta'&gt;<dd><tt>Metaclass&nbsp;for&nbsp;defining&nbsp;Abstract&nbsp;Base&nbsp;Classes&nbsp;(ABCs).<br>
+&nbsp;<br>
+Use&nbsp;this&nbsp;metaclass&nbsp;to&nbsp;create&nbsp;an&nbsp;ABC.&nbsp;&nbsp;An&nbsp;ABC&nbsp;can&nbsp;be&nbsp;subclassed<br>
+directly,&nbsp;and&nbsp;then&nbsp;acts&nbsp;as&nbsp;a&nbsp;mix-in&nbsp;class.&nbsp;&nbsp;You&nbsp;can&nbsp;also&nbsp;register<br>
+unrelated&nbsp;concrete&nbsp;classes&nbsp;(even&nbsp;built-in&nbsp;classes)&nbsp;and&nbsp;unrelated<br>
+ABCs&nbsp;as&nbsp;'virtual&nbsp;subclasses'&nbsp;--&nbsp;these&nbsp;and&nbsp;their&nbsp;descendants&nbsp;will<br>
+be&nbsp;considered&nbsp;subclasses&nbsp;of&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;by&nbsp;the&nbsp;built-in<br>
+issubclass()&nbsp;function,&nbsp;but&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;won't&nbsp;show&nbsp;up&nbsp;in<br>
+their&nbsp;MRO&nbsp;(Method&nbsp;Resolution&nbsp;Order)&nbsp;nor&nbsp;will&nbsp;method<br>
+implementations&nbsp;defined&nbsp;by&nbsp;the&nbsp;registering&nbsp;ABC&nbsp;be&nbsp;callable&nbsp;(not<br>
+even&nbsp;via&nbsp;super()).</tt></dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.ps_util.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.ps_util.html
new file mode 100644
index 0000000..753536a
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.ps_util.html
@@ -0,0 +1,51 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.ps_util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.ps_util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/ps_util.py">telemetry/internal/util/ps_util.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="atexit.html">atexit</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-EnableListingStrayProcessesUponExitHook"><strong>EnableListingStrayProcessesUponExitHook</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetChildPids"><strong>GetChildPids</strong></a>(processes, pid)</dt><dd><tt>Returns&nbsp;all&nbsp;child&nbsp;processes&nbsp;of&nbsp;|pid|&nbsp;from&nbsp;the&nbsp;given&nbsp;|processes|&nbsp;list.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;processes:&nbsp;A&nbsp;tuple&nbsp;of&nbsp;(pid,&nbsp;ppid,&nbsp;state)&nbsp;as&nbsp;generated&nbsp;by&nbsp;ps.<br>
+&nbsp;&nbsp;pid:&nbsp;The&nbsp;pid&nbsp;for&nbsp;which&nbsp;to&nbsp;get&nbsp;children.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;child&nbsp;pids.</tt></dd></dl>
+ <dl><dt><a name="-GetPsOutputWithPlatformBackend"><strong>GetPsOutputWithPlatformBackend</strong></a>(platform_backend, columns, pid)</dt><dd><tt>Returns&nbsp;output&nbsp;of&nbsp;the&nbsp;'ps'&nbsp;command&nbsp;as&nbsp;a&nbsp;list&nbsp;of&nbsp;lines.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;platform_backend:&nbsp;The&nbsp;platform&nbsp;backend&nbsp;(LinuxBasedPlatformBackend&nbsp;or<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;PosixPlatformBackend).<br>
+&nbsp;&nbsp;columns:&nbsp;A&nbsp;list&nbsp;of&nbsp;require&nbsp;columns,&nbsp;e.g.,&nbsp;['pid',&nbsp;'pss'].<br>
+&nbsp;&nbsp;pid:&nbsp;If&nbsp;not&nbsp;None,&nbsp;returns&nbsp;only&nbsp;the&nbsp;information&nbsp;of&nbsp;the&nbsp;process&nbsp;with&nbsp;the&nbsp;pid.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.internal.util.webpagereplay.html b/catapult/telemetry/docs/pydoc/telemetry.internal.util.webpagereplay.html
new file mode 100644
index 0000000..0eec669
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.internal.util.webpagereplay.html
@@ -0,0 +1,294 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.internal.util.webpagereplay</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.internal.html"><font color="#ffffff">internal</font></a>.<a href="telemetry.internal.util.html"><font color="#ffffff">util</font></a>.webpagereplay</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/internal/util/webpagereplay.py">telemetry/internal/util/webpagereplay.py</a></font></td></tr></table>
+    <p><tt>Start&nbsp;and&nbsp;stop&nbsp;Web&nbsp;Page&nbsp;Replay.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="atexit.html">atexit</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="re.html">re</a><br>
+<a href="signal.html">signal</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+<a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+</td><td width="25%" valign=top><a href="urllib.html">urllib</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.webpagereplay.html#ReplayServer">ReplayServer</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.webpagereplay.html#ReplayNotFoundError">ReplayNotFoundError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.internal.util.webpagereplay.html#ReplayNotStartedError">ReplayNotStartedError</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ReplayError">class <strong>ReplayError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Catch-all&nbsp;exception&nbsp;for&nbsp;the&nbsp;module.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ReplayError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ReplayError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ReplayError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ReplayNotFoundError">class <strong>ReplayNotFoundError</strong></a>(<a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.webpagereplay.html#ReplayNotFoundError">ReplayNotFoundError</a></dd>
+<dd><a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ReplayNotFoundError-__init__"><strong>__init__</strong></a>(self, label, path)</dt></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ReplayNotFoundError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ReplayNotFoundError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotFoundError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayNotFoundError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ReplayNotStartedError">class <strong>ReplayNotStartedError</strong></a>(<a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.internal.util.webpagereplay.html#ReplayNotStartedError">ReplayNotStartedError</a></dd>
+<dd><a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.webpagereplay.html#ReplayError">ReplayError</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ReplayNotStartedError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ReplayNotStartedError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ReplayNotStartedError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ReplayNotStartedError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ReplayNotStartedError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ReplayServer">class <strong>ReplayServer</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Start&nbsp;and&nbsp;Stop&nbsp;Web&nbsp;Page&nbsp;Replay.<br>
+&nbsp;<br>
+Web&nbsp;Page&nbsp;Replay&nbsp;is&nbsp;a&nbsp;proxy&nbsp;that&nbsp;can&nbsp;record&nbsp;and&nbsp;"replay"&nbsp;web&nbsp;pages&nbsp;with<br>
+simulated&nbsp;network&nbsp;characteristics&nbsp;--&nbsp;without&nbsp;having&nbsp;to&nbsp;edit&nbsp;the&nbsp;pages<br>
+by&nbsp;hand.&nbsp;With&nbsp;WPR,&nbsp;tests&nbsp;can&nbsp;use&nbsp;"real"&nbsp;web&nbsp;content,&nbsp;and&nbsp;catch<br>
+performance&nbsp;issues&nbsp;that&nbsp;may&nbsp;result&nbsp;from&nbsp;introducing&nbsp;network&nbsp;delays&nbsp;and<br>
+bandwidth&nbsp;throttling.<br>
+&nbsp;<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#ReplayServer">ReplayServer</a>(archive_path):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;NavigateToURL(start_url)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;WaitUntil(...)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ReplayServer-StartServer"><strong>StartServer</strong></a>(self)</dt><dd><tt>Start&nbsp;Web&nbsp;Page&nbsp;Replay&nbsp;and&nbsp;verify&nbsp;that&nbsp;it&nbsp;started.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;(HTTP_PORT,&nbsp;HTTPS_PORT,&nbsp;DNS_PORT)&nbsp;&nbsp;#&nbsp;DNS_PORT&nbsp;is&nbsp;None&nbsp;if&nbsp;unused.<br>
+Raises:<br>
+&nbsp;&nbsp;<a href="#ReplayNotStartedError">ReplayNotStartedError</a>:&nbsp;if&nbsp;Replay&nbsp;start-up&nbsp;fails.</tt></dd></dl>
+
+<dl><dt><a name="ReplayServer-StopServer"><strong>StopServer</strong></a>(self)</dt><dd><tt>Stop&nbsp;Web&nbsp;Page&nbsp;Replay.</tt></dd></dl>
+
+<dl><dt><a name="ReplayServer-__enter__"><strong>__enter__</strong></a>(self)</dt><dd><tt>Add&nbsp;support&nbsp;for&nbsp;with-statement.</tt></dd></dl>
+
+<dl><dt><a name="ReplayServer-__exit__"><strong>__exit__</strong></a>(self, unused_exc_type, unused_exc_val, unused_exc_tb)</dt><dd><tt>Add&nbsp;support&nbsp;for&nbsp;with-statement.</tt></dd></dl>
+
+<dl><dt><a name="ReplayServer-__init__"><strong>__init__</strong></a>(self, archive_path, replay_host, http_port, https_port, dns_port, replay_options)</dt><dd><tt>Initialize&nbsp;<a href="#ReplayServer">ReplayServer</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;archive_path:&nbsp;a&nbsp;path&nbsp;to&nbsp;a&nbsp;specific&nbsp;WPR&nbsp;archive&nbsp;(required).<br>
+&nbsp;&nbsp;replay_host:&nbsp;the&nbsp;hostname&nbsp;to&nbsp;serve&nbsp;traffic.<br>
+&nbsp;&nbsp;http_port:&nbsp;an&nbsp;integer&nbsp;port&nbsp;on&nbsp;which&nbsp;to&nbsp;serve&nbsp;HTTP&nbsp;traffic.&nbsp;May&nbsp;be&nbsp;zero<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;let&nbsp;the&nbsp;OS&nbsp;choose&nbsp;an&nbsp;available&nbsp;port.<br>
+&nbsp;&nbsp;https_port:&nbsp;an&nbsp;integer&nbsp;port&nbsp;on&nbsp;which&nbsp;to&nbsp;serve&nbsp;HTTPS&nbsp;traffic.&nbsp;May&nbsp;be&nbsp;zero<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;let&nbsp;the&nbsp;OS&nbsp;choose&nbsp;an&nbsp;available&nbsp;port.<br>
+&nbsp;&nbsp;dns_port:&nbsp;an&nbsp;integer&nbsp;port&nbsp;on&nbsp;which&nbsp;to&nbsp;serve&nbsp;DNS&nbsp;traffic.&nbsp;May&nbsp;be&nbsp;zero<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;let&nbsp;the&nbsp;OS&nbsp;choose&nbsp;an&nbsp;available&nbsp;port.&nbsp;If&nbsp;None&nbsp;DNS&nbsp;forwarding&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;disabled.<br>
+&nbsp;&nbsp;replay_options:&nbsp;an&nbsp;iterable&nbsp;of&nbsp;options&nbsp;strings&nbsp;to&nbsp;forward&nbsp;to&nbsp;replay.py.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.page.action_runner.html b/catapult/telemetry/docs/pydoc/telemetry.page.action_runner.html
new file mode 100644
index 0000000..5ea1353
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.page.action_runner.html
@@ -0,0 +1,536 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.page.action_runner</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.page.html"><font color="#ffffff">page</font></a>.action_runner</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/page/action_runner.py">telemetry/page/action_runner.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="time.html">time</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_interaction_record.html">telemetry.web_perf.timeline_interaction_record</a><br>
+</td><td width="25%" valign=top><a href="urlparse.html">urlparse</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.action_runner.html#ActionRunner">ActionRunner</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.page.action_runner.html#Interaction">Interaction</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ActionRunner">class <strong>ActionRunner</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ActionRunner-ClickElement"><strong>ClickElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>)</dt><dd><tt>Click&nbsp;an&nbsp;element.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;})()'.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-CreateGestureInteraction"><strong>CreateGestureInteraction</strong></a>(self, label, repeatable<font color="#909090">=False</font>)</dt><dd><tt>Create&nbsp;an&nbsp;action.<a href="#Interaction">Interaction</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;that&nbsp;issues&nbsp;gesture-based<br>
+interaction&nbsp;record.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;similar&nbsp;to&nbsp;normal&nbsp;interaction&nbsp;record,&nbsp;but&nbsp;it&nbsp;will<br>
+auto-narrow&nbsp;the&nbsp;interaction&nbsp;time&nbsp;period&nbsp;to&nbsp;only&nbsp;include&nbsp;the<br>
+synthetic&nbsp;gesture&nbsp;event&nbsp;output&nbsp;by&nbsp;Chrome.&nbsp;This&nbsp;is&nbsp;typically&nbsp;use&nbsp;to<br>
+reduce&nbsp;noise&nbsp;in&nbsp;gesture-based&nbsp;analysis&nbsp;(e.g.,&nbsp;analysis&nbsp;for&nbsp;a<br>
+swipe/scroll).<br>
+&nbsp;<br>
+The&nbsp;interaction&nbsp;record&nbsp;label&nbsp;will&nbsp;be&nbsp;prepended&nbsp;with&nbsp;'Gesture_'.<br>
+&nbsp;<br>
+e.g:<br>
+&nbsp;&nbsp;with&nbsp;action_runner.<a href="#ActionRunner-CreateGestureInteraction">CreateGestureInteraction</a>('Scroll-1'):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.<a href="#ActionRunner-ScrollPage">ScrollPage</a>()<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;label:&nbsp;A&nbsp;label&nbsp;for&nbsp;this&nbsp;particular&nbsp;interaction.&nbsp;This&nbsp;can&nbsp;be&nbsp;any<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;user-defined&nbsp;string,&nbsp;but&nbsp;must&nbsp;not&nbsp;contain&nbsp;'/'.<br>
+&nbsp;&nbsp;repeatable:&nbsp;Whether&nbsp;other&nbsp;interactions&nbsp;may&nbsp;use&nbsp;the&nbsp;same&nbsp;logical&nbsp;name<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;as&nbsp;this&nbsp;interaction.&nbsp;All&nbsp;interactions&nbsp;with&nbsp;the&nbsp;same&nbsp;logical&nbsp;name&nbsp;must<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;have&nbsp;the&nbsp;same&nbsp;flags.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;An&nbsp;instance&nbsp;of&nbsp;action_runner.<a href="#Interaction">Interaction</a></tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-CreateInteraction"><strong>CreateInteraction</strong></a>(self, label, repeatable<font color="#909090">=False</font>)</dt><dd><tt>Create&nbsp;an&nbsp;action.<a href="#Interaction">Interaction</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;that&nbsp;issues&nbsp;interaction&nbsp;record.<br>
+&nbsp;<br>
+An&nbsp;interaction&nbsp;record&nbsp;is&nbsp;a&nbsp;labeled&nbsp;time&nbsp;period&nbsp;containing<br>
+interaction&nbsp;that&nbsp;developers&nbsp;care&nbsp;about.&nbsp;Each&nbsp;set&nbsp;of&nbsp;metrics<br>
+specified&nbsp;in&nbsp;flags&nbsp;will&nbsp;be&nbsp;calculated&nbsp;for&nbsp;this&nbsp;time&nbsp;period.<br>
+&nbsp;<br>
+To&nbsp;mark&nbsp;the&nbsp;start&nbsp;of&nbsp;interaction&nbsp;record,&nbsp;call&nbsp;Begin()&nbsp;method&nbsp;on&nbsp;the&nbsp;returned<br>
+<a href="__builtin__.html#object">object</a>.&nbsp;To&nbsp;mark&nbsp;the&nbsp;finish&nbsp;of&nbsp;interaction&nbsp;record,&nbsp;call&nbsp;End()&nbsp;method&nbsp;on<br>
+it.&nbsp;Or&nbsp;better&nbsp;yet,&nbsp;use&nbsp;the&nbsp;with&nbsp;statement&nbsp;to&nbsp;create&nbsp;an<br>
+interaction&nbsp;record&nbsp;that&nbsp;covers&nbsp;the&nbsp;actions&nbsp;in&nbsp;the&nbsp;with&nbsp;block.<br>
+&nbsp;<br>
+e.g:<br>
+&nbsp;&nbsp;with&nbsp;action_runner.<a href="#ActionRunner-CreateInteraction">CreateInteraction</a>('Animation-1'):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.<a href="#ActionRunner-TapElement">TapElement</a>(...)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.<a href="#ActionRunner-WaitForJavaScriptCondition">WaitForJavaScriptCondition</a>(...)<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;label:&nbsp;A&nbsp;label&nbsp;for&nbsp;this&nbsp;particular&nbsp;interaction.&nbsp;This&nbsp;can&nbsp;be&nbsp;any<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;user-defined&nbsp;string,&nbsp;but&nbsp;must&nbsp;not&nbsp;contain&nbsp;'/'.<br>
+&nbsp;&nbsp;repeatable:&nbsp;Whether&nbsp;other&nbsp;interactions&nbsp;may&nbsp;use&nbsp;the&nbsp;same&nbsp;logical&nbsp;name<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;as&nbsp;this&nbsp;interaction.&nbsp;All&nbsp;interactions&nbsp;with&nbsp;the&nbsp;same&nbsp;logical&nbsp;name&nbsp;must<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;have&nbsp;the&nbsp;same&nbsp;flags.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;An&nbsp;instance&nbsp;of&nbsp;action_runner.<a href="#Interaction">Interaction</a></tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-DragPage"><strong>DragPage</strong></a>(self, left_start_ratio, top_start_ratio, left_end_ratio, top_end_ratio, speed_in_pixels_per_second<font color="#909090">=800</font>, use_touch<font color="#909090">=False</font>, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>)</dt><dd><tt>Perform&nbsp;a&nbsp;drag&nbsp;gesture&nbsp;on&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+You&nbsp;should&nbsp;specify&nbsp;a&nbsp;start&nbsp;and&nbsp;an&nbsp;end&nbsp;point&nbsp;in&nbsp;ratios&nbsp;of&nbsp;page&nbsp;width&nbsp;and<br>
+height&nbsp;(see&nbsp;drag.js&nbsp;for&nbsp;full&nbsp;implementation).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;left_end_ratio:&nbsp;The&nbsp;horizontal&nbsp;ending&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_end_ratio:&nbsp;The&nbsp;vertical&nbsp;ending&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).<br>
+&nbsp;&nbsp;use_touch:&nbsp;Whether&nbsp;dragging&nbsp;should&nbsp;be&nbsp;done&nbsp;with&nbsp;touch&nbsp;input.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-EvaluateJavaScript"><strong>EvaluateJavaScript</strong></a>(self, expression)</dt><dd><tt>Returns&nbsp;the&nbsp;evaluation&nbsp;result&nbsp;of&nbsp;the&nbsp;given&nbsp;JavaScript&nbsp;expression.<br>
+&nbsp;<br>
+The&nbsp;evaluation&nbsp;results&nbsp;must&nbsp;be&nbsp;convertible&nbsp;to&nbsp;JSON.&nbsp;If&nbsp;the&nbsp;result<br>
+is&nbsp;not&nbsp;needed,&nbsp;use&nbsp;ExecuteJavaScript&nbsp;instead.<br>
+&nbsp;<br>
+Example:&nbsp;num&nbsp;=&nbsp;runner.<a href="#ActionRunner-EvaluateJavaScript">EvaluateJavaScript</a>('document.location.href')<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;expression:&nbsp;The&nbsp;expression&nbsp;to&nbsp;evaluate&nbsp;(provided&nbsp;as&nbsp;string).<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;EvaluationException:&nbsp;The&nbsp;statement&nbsp;expression&nbsp;failed&nbsp;to&nbsp;execute<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;or&nbsp;the&nbsp;evaluation&nbsp;result&nbsp;can&nbsp;not&nbsp;be&nbsp;JSON-ized.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ExecuteJavaScript"><strong>ExecuteJavaScript</strong></a>(self, statement)</dt><dd><tt>Executes&nbsp;a&nbsp;given&nbsp;JavaScript&nbsp;expression.&nbsp;Does&nbsp;not&nbsp;return&nbsp;the&nbsp;result.<br>
+&nbsp;<br>
+Example:&nbsp;runner.<a href="#ActionRunner-ExecuteJavaScript">ExecuteJavaScript</a>('var&nbsp;foo&nbsp;=&nbsp;1;');<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;statement:&nbsp;The&nbsp;statement&nbsp;to&nbsp;execute&nbsp;(provided&nbsp;as&nbsp;string).<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;EvaluationException:&nbsp;The&nbsp;statement&nbsp;failed&nbsp;to&nbsp;execute.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ForceGarbageCollection"><strong>ForceGarbageCollection</strong></a>(self)</dt><dd><tt>Forces&nbsp;JavaScript&nbsp;garbage&nbsp;collection&nbsp;on&nbsp;the&nbsp;page.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-LoadMedia"><strong>LoadMedia</strong></a>(self, selector<font color="#909090">=None</font>, event_timeout_in_seconds<font color="#909090">=0</font>, event_to_await<font color="#909090">='canplaythrough'</font>)</dt><dd><tt>Invokes&nbsp;load()&nbsp;on&nbsp;media&nbsp;elements&nbsp;and&nbsp;awaits&nbsp;an&nbsp;event.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.&nbsp;If&nbsp;none&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;specified,&nbsp;play&nbsp;the&nbsp;first&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;selector&nbsp;matches&nbsp;more&nbsp;than&nbsp;1&nbsp;media&nbsp;element,&nbsp;all&nbsp;of&nbsp;them&nbsp;will<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;be&nbsp;played.<br>
+&nbsp;&nbsp;event_timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;event&nbsp;to&nbsp;be&nbsp;fired.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.<br>
+&nbsp;&nbsp;event_to_await:&nbsp;Which&nbsp;event&nbsp;to&nbsp;await.&nbsp;For&nbsp;example:&nbsp;'canplaythrough'&nbsp;or<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'loadedmetadata'.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;TimeoutException:&nbsp;If&nbsp;the&nbsp;maximum&nbsp;waiting&nbsp;time&nbsp;is&nbsp;exceeded.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-LoopMedia"><strong>LoopMedia</strong></a>(self, loop_count, selector<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=None</font>)</dt><dd><tt>Loops&nbsp;a&nbsp;media&nbsp;playback.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;loop_count:&nbsp;The&nbsp;number&nbsp;of&nbsp;times&nbsp;to&nbsp;loop&nbsp;the&nbsp;playback.<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.&nbsp;If&nbsp;none&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;specified,&nbsp;loop&nbsp;the&nbsp;first&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;selector&nbsp;matches&nbsp;more&nbsp;than&nbsp;1&nbsp;media&nbsp;element,&nbsp;all&nbsp;of&nbsp;them&nbsp;will<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;be&nbsp;looped.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;looped&nbsp;playback&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;complete.&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.&nbsp;None&nbsp;(the&nbsp;default)&nbsp;means&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;wait&nbsp;loop_count&nbsp;*&nbsp;60&nbsp;seconds.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;TimeoutException:&nbsp;If&nbsp;the&nbsp;maximum&nbsp;waiting&nbsp;time&nbsp;is&nbsp;exceeded.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-MouseClick"><strong>MouseClick</strong></a>(self, selector<font color="#909090">=None</font>)</dt><dd><tt>Mouse&nbsp;click&nbsp;the&nbsp;given&nbsp;element.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-Navigate"><strong>Navigate</strong></a>(self, url, script_to_evaluate_on_commit<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=60</font>)</dt><dd><tt>Navigates&nbsp;to&nbsp;url.<br>
+&nbsp;<br>
+If&nbsp;|script_to_evaluate_on_commit|&nbsp;is&nbsp;given,&nbsp;the&nbsp;script&nbsp;source&nbsp;string&nbsp;will&nbsp;be<br>
+evaluated&nbsp;when&nbsp;the&nbsp;navigation&nbsp;is&nbsp;committed.&nbsp;This&nbsp;is&nbsp;after&nbsp;the&nbsp;context&nbsp;of<br>
+the&nbsp;page&nbsp;exists,&nbsp;but&nbsp;before&nbsp;any&nbsp;script&nbsp;on&nbsp;the&nbsp;page&nbsp;itself&nbsp;has&nbsp;executed.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-PauseInteractive"><strong>PauseInteractive</strong></a>(self)</dt><dd><tt>Pause&nbsp;the&nbsp;page&nbsp;execution&nbsp;and&nbsp;wait&nbsp;for&nbsp;terminal&nbsp;interaction.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;typically&nbsp;used&nbsp;for&nbsp;debugging.&nbsp;You&nbsp;can&nbsp;use&nbsp;this&nbsp;to&nbsp;pause<br>
+the&nbsp;page&nbsp;execution&nbsp;and&nbsp;inspect&nbsp;the&nbsp;browser&nbsp;state&nbsp;before<br>
+continuing.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-PinchElement"><strong>PinchElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_anchor_ratio<font color="#909090">=0.5</font>, top_anchor_ratio<font color="#909090">=0.5</font>, scale_factor<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>)</dt><dd><tt>Perform&nbsp;the&nbsp;pinch&nbsp;gesture&nbsp;on&nbsp;an&nbsp;element.<br>
+&nbsp;<br>
+It&nbsp;computes&nbsp;the&nbsp;pinch&nbsp;gesture&nbsp;automatically&nbsp;based&nbsp;on&nbsp;the&nbsp;anchor<br>
+coordinate&nbsp;and&nbsp;the&nbsp;scale&nbsp;factor.&nbsp;The&nbsp;scale&nbsp;factor&nbsp;is&nbsp;the&nbsp;ratio&nbsp;of<br>
+of&nbsp;the&nbsp;final&nbsp;span&nbsp;and&nbsp;the&nbsp;initial&nbsp;span&nbsp;of&nbsp;the&nbsp;gesture.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;}'.<br>
+&nbsp;&nbsp;left_anchor_ratio:&nbsp;The&nbsp;horizontal&nbsp;pinch&nbsp;anchor&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;top_anchor_ratio:&nbsp;The&nbsp;vertical&nbsp;pinch&nbsp;anchor&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;scale_factor:&nbsp;The&nbsp;ratio&nbsp;of&nbsp;the&nbsp;final&nbsp;span&nbsp;to&nbsp;the&nbsp;initial&nbsp;span.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;default&nbsp;scale&nbsp;factor&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3.0&nbsp;/&nbsp;(window.outerWidth/window.innerWidth).<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-PinchPage"><strong>PinchPage</strong></a>(self, left_anchor_ratio<font color="#909090">=0.5</font>, top_anchor_ratio<font color="#909090">=0.5</font>, scale_factor<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>)</dt><dd><tt>Perform&nbsp;the&nbsp;pinch&nbsp;gesture&nbsp;on&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+It&nbsp;computes&nbsp;the&nbsp;pinch&nbsp;gesture&nbsp;automatically&nbsp;based&nbsp;on&nbsp;the&nbsp;anchor<br>
+coordinate&nbsp;and&nbsp;the&nbsp;scale&nbsp;factor.&nbsp;The&nbsp;scale&nbsp;factor&nbsp;is&nbsp;the&nbsp;ratio&nbsp;of<br>
+of&nbsp;the&nbsp;final&nbsp;span&nbsp;and&nbsp;the&nbsp;initial&nbsp;span&nbsp;of&nbsp;the&nbsp;gesture.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_anchor_ratio:&nbsp;The&nbsp;horizontal&nbsp;pinch&nbsp;anchor&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_anchor_ratio:&nbsp;The&nbsp;vertical&nbsp;pinch&nbsp;anchor&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;scale_factor:&nbsp;The&nbsp;ratio&nbsp;of&nbsp;the&nbsp;final&nbsp;span&nbsp;to&nbsp;the&nbsp;initial&nbsp;span.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;default&nbsp;scale&nbsp;factor&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;3.0&nbsp;/&nbsp;(window.outerWidth/window.innerWidth).<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-PlayMedia"><strong>PlayMedia</strong></a>(self, selector<font color="#909090">=None</font>, playing_event_timeout_in_seconds<font color="#909090">=0</font>, ended_event_timeout_in_seconds<font color="#909090">=0</font>)</dt><dd><tt>Invokes&nbsp;the&nbsp;"play"&nbsp;action&nbsp;on&nbsp;media&nbsp;elements&nbsp;(such&nbsp;as&nbsp;video).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.&nbsp;If&nbsp;none&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;specified,&nbsp;play&nbsp;the&nbsp;first&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;selector&nbsp;matches&nbsp;more&nbsp;than&nbsp;1&nbsp;media&nbsp;element,&nbsp;all&nbsp;of&nbsp;them&nbsp;will<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;be&nbsp;played.<br>
+&nbsp;&nbsp;playing_event_timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;"playing"<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;event&nbsp;(dispatched&nbsp;when&nbsp;the&nbsp;media&nbsp;begins&nbsp;to&nbsp;play)&nbsp;to&nbsp;be&nbsp;fired.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.<br>
+&nbsp;&nbsp;ended_event_timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;"ended"<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;event&nbsp;(dispatched&nbsp;when&nbsp;playback&nbsp;completes)&nbsp;to&nbsp;be&nbsp;fired.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;TimeoutException:&nbsp;If&nbsp;the&nbsp;maximum&nbsp;waiting&nbsp;time&nbsp;is&nbsp;exceeded.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ReloadPage"><strong>ReloadPage</strong></a>(self)</dt><dd><tt>Reloads&nbsp;the&nbsp;page.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-RepaintContinuously"><strong>RepaintContinuously</strong></a>(self, seconds)</dt><dd><tt>Continuously&nbsp;repaints&nbsp;the&nbsp;visible&nbsp;content.<br>
+&nbsp;<br>
+It&nbsp;does&nbsp;this&nbsp;by&nbsp;requesting&nbsp;animation&nbsp;frames&nbsp;until&nbsp;the&nbsp;given&nbsp;number<br>
+of&nbsp;seconds&nbsp;have&nbsp;elapsed&nbsp;AND&nbsp;at&nbsp;least&nbsp;three&nbsp;RAFs&nbsp;have&nbsp;been<br>
+fired.&nbsp;Times&nbsp;out&nbsp;after&nbsp;max(60,&nbsp;self.<strong>seconds</strong>),&nbsp;if&nbsp;less&nbsp;than&nbsp;three<br>
+RAFs&nbsp;were&nbsp;fired.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-RepeatableBrowserDrivenScroll"><strong>RepeatableBrowserDrivenScroll</strong></a>(self, x_scroll_distance_ratio<font color="#909090">=0.0</font>, y_scroll_distance_ratio<font color="#909090">=0.5</font>, repeat_count<font color="#909090">=0</font>, repeat_delay_ms<font color="#909090">=250</font>)</dt><dd><tt>Perform&nbsp;a&nbsp;browser&nbsp;driven&nbsp;repeatable&nbsp;scroll&nbsp;gesture.<br>
+&nbsp;<br>
+The&nbsp;scroll&nbsp;gesture&nbsp;is&nbsp;driven&nbsp;from&nbsp;the&nbsp;browser,&nbsp;this&nbsp;is&nbsp;useful&nbsp;because&nbsp;the<br>
+main&nbsp;thread&nbsp;often&nbsp;isn't&nbsp;resposive&nbsp;but&nbsp;the&nbsp;browser&nbsp;process&nbsp;usually&nbsp;is,&nbsp;so&nbsp;the<br>
+delay&nbsp;between&nbsp;the&nbsp;scroll&nbsp;gestures&nbsp;should&nbsp;be&nbsp;consistent.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;x_scroll_distance_ratio:&nbsp;The&nbsp;horizontal&nbsp;lenght&nbsp;of&nbsp;the&nbsp;scroll&nbsp;as&nbsp;a&nbsp;fraction<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;of&nbsp;the&nbsp;screen&nbsp;width.<br>
+&nbsp;&nbsp;y_scroll_distance_ratio:&nbsp;The&nbsp;vertical&nbsp;lenght&nbsp;of&nbsp;the&nbsp;scroll&nbsp;as&nbsp;a&nbsp;fraction<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;of&nbsp;the&nbsp;screen&nbsp;height.<br>
+&nbsp;&nbsp;repeat_count:&nbsp;The&nbsp;number&nbsp;of&nbsp;additional&nbsp;times&nbsp;to&nbsp;repeat&nbsp;the&nbsp;gesture.<br>
+&nbsp;&nbsp;repeat_delay_ms:&nbsp;The&nbsp;delay&nbsp;in&nbsp;milliseconds&nbsp;between&nbsp;each&nbsp;scroll&nbsp;gesture.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ScrollBounceElement"><strong>ScrollBounceElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=100</font>, overscroll<font color="#909090">=10</font>, repeat_count<font color="#909090">=10</font>, speed_in_pixels_per_second<font color="#909090">=400</font>)</dt><dd><tt>Perform&nbsp;scroll&nbsp;bounce&nbsp;gesture&nbsp;on&nbsp;the&nbsp;element.<br>
+&nbsp;<br>
+This&nbsp;gesture&nbsp;scrolls&nbsp;on&nbsp;the&nbsp;element&nbsp;by&nbsp;the&nbsp;number&nbsp;of&nbsp;pixels&nbsp;specified&nbsp;in<br>
+distance,&nbsp;in&nbsp;the&nbsp;given&nbsp;direction,&nbsp;followed&nbsp;by&nbsp;a&nbsp;scroll&nbsp;by<br>
+(distance&nbsp;+&nbsp;overscroll)&nbsp;pixels&nbsp;in&nbsp;the&nbsp;opposite&nbsp;direction.<br>
+The&nbsp;above&nbsp;gesture&nbsp;is&nbsp;repeated&nbsp;repeat_count&nbsp;times.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;}'.<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;scroll,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;'down',&nbsp;'upleft',&nbsp;'upright',&nbsp;'downleft',&nbsp;or&nbsp;'downright'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;scroll&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;overscroll:&nbsp;The&nbsp;number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back,&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;addition&nbsp;to&nbsp;the&nbsp;givendistance.<br>
+&nbsp;&nbsp;repeat_count:&nbsp;How&nbsp;often&nbsp;we&nbsp;want&nbsp;to&nbsp;repeat&nbsp;the&nbsp;full&nbsp;gesture.<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ScrollBouncePage"><strong>ScrollBouncePage</strong></a>(self, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=100</font>, overscroll<font color="#909090">=10</font>, repeat_count<font color="#909090">=10</font>, speed_in_pixels_per_second<font color="#909090">=400</font>)</dt><dd><tt>Perform&nbsp;scroll&nbsp;bounce&nbsp;gesture&nbsp;on&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+This&nbsp;gesture&nbsp;scrolls&nbsp;the&nbsp;page&nbsp;by&nbsp;the&nbsp;number&nbsp;of&nbsp;pixels&nbsp;specified&nbsp;in<br>
+distance,&nbsp;in&nbsp;the&nbsp;given&nbsp;direction,&nbsp;followed&nbsp;by&nbsp;a&nbsp;scroll&nbsp;by<br>
+(distance&nbsp;+&nbsp;overscroll)&nbsp;pixels&nbsp;in&nbsp;the&nbsp;opposite&nbsp;direction.<br>
+The&nbsp;above&nbsp;gesture&nbsp;is&nbsp;repeated&nbsp;repeat_count&nbsp;times.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;scroll,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;'down',&nbsp;'upleft',&nbsp;'upright',&nbsp;'downleft',&nbsp;or&nbsp;'downright'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;scroll&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;overscroll:&nbsp;The&nbsp;number&nbsp;of&nbsp;additional&nbsp;pixels&nbsp;to&nbsp;scroll&nbsp;back,&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;addition&nbsp;to&nbsp;the&nbsp;givendistance.<br>
+&nbsp;&nbsp;repeat_count:&nbsp;How&nbsp;often&nbsp;we&nbsp;want&nbsp;to&nbsp;repeat&nbsp;the&nbsp;full&nbsp;gesture.<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ScrollElement"><strong>ScrollElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=None</font>, distance_expr<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, use_touch<font color="#909090">=False</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt><dd><tt>Perform&nbsp;scroll&nbsp;gesture&nbsp;on&nbsp;the&nbsp;element.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+You&nbsp;may&nbsp;specify&nbsp;distance&nbsp;or&nbsp;distance_expr,&nbsp;but&nbsp;not&nbsp;both.&nbsp;If<br>
+neither&nbsp;is&nbsp;specified,&nbsp;the&nbsp;default&nbsp;scroll&nbsp;distance&nbsp;is&nbsp;variable<br>
+depending&nbsp;on&nbsp;direction&nbsp;(see&nbsp;scroll.js&nbsp;for&nbsp;full&nbsp;implementation).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;}'.<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;scroll,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;'down',&nbsp;'upleft',&nbsp;'upright',&nbsp;'downleft',&nbsp;or&nbsp;'downright'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;scroll&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;distance_expr:&nbsp;A&nbsp;JavaScript&nbsp;expression&nbsp;(as&nbsp;string)&nbsp;that&nbsp;can&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;evaluated&nbsp;to&nbsp;compute&nbsp;scroll&nbsp;distance.&nbsp;Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'window.scrollTop'&nbsp;or&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;crazyMath();&nbsp;})()'.<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).<br>
+&nbsp;&nbsp;use_touch:&nbsp;Whether&nbsp;scrolling&nbsp;should&nbsp;be&nbsp;done&nbsp;with&nbsp;touch&nbsp;input.<br>
+&nbsp;&nbsp;synthetic_gesture_source:&nbsp;the&nbsp;source&nbsp;input&nbsp;device&nbsp;type&nbsp;for&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;synthetic&nbsp;gesture:&nbsp;'DEFAULT',&nbsp;'TOUCH'&nbsp;or&nbsp;'MOUSE'.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-ScrollPage"><strong>ScrollPage</strong></a>(self, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='down'</font>, distance<font color="#909090">=None</font>, distance_expr<font color="#909090">=None</font>, speed_in_pixels_per_second<font color="#909090">=800</font>, use_touch<font color="#909090">=False</font>, synthetic_gesture_source<font color="#909090">='DEFAULT'</font>)</dt><dd><tt>Perform&nbsp;scroll&nbsp;gesture&nbsp;on&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+You&nbsp;may&nbsp;specify&nbsp;distance&nbsp;or&nbsp;distance_expr,&nbsp;but&nbsp;not&nbsp;both.&nbsp;If<br>
+neither&nbsp;is&nbsp;specified,&nbsp;the&nbsp;default&nbsp;scroll&nbsp;distance&nbsp;is&nbsp;variable<br>
+depending&nbsp;on&nbsp;direction&nbsp;(see&nbsp;scroll.js&nbsp;for&nbsp;full&nbsp;implementation).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;scroll,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;'down',&nbsp;'upleft',&nbsp;'upright',&nbsp;'downleft',&nbsp;or&nbsp;'downright'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;scroll&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;distance_expr:&nbsp;A&nbsp;JavaScript&nbsp;expression&nbsp;(as&nbsp;string)&nbsp;that&nbsp;can&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;evaluated&nbsp;to&nbsp;compute&nbsp;scroll&nbsp;distance.&nbsp;Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'window.scrollTop'&nbsp;or&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;crazyMath();&nbsp;})()'.<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).<br>
+&nbsp;&nbsp;use_touch:&nbsp;Whether&nbsp;scrolling&nbsp;should&nbsp;be&nbsp;done&nbsp;with&nbsp;touch&nbsp;input.<br>
+&nbsp;&nbsp;synthetic_gesture_source:&nbsp;the&nbsp;source&nbsp;input&nbsp;device&nbsp;type&nbsp;for&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;synthetic&nbsp;gesture:&nbsp;'DEFAULT',&nbsp;'TOUCH'&nbsp;or&nbsp;'MOUSE'.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-SeekMedia"><strong>SeekMedia</strong></a>(self, seconds, selector<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=0</font>, log_time<font color="#909090">=True</font>, label<font color="#909090">=''</font>)</dt><dd><tt>Performs&nbsp;a&nbsp;seek&nbsp;action&nbsp;on&nbsp;media&nbsp;elements&nbsp;(such&nbsp;as&nbsp;video).<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;seconds:&nbsp;The&nbsp;media&nbsp;time&nbsp;to&nbsp;seek&nbsp;to.<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.&nbsp;If&nbsp;none&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;specified,&nbsp;seek&nbsp;the&nbsp;first&nbsp;media&nbsp;element&nbsp;on&nbsp;the&nbsp;page.&nbsp;If&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;selector&nbsp;matches&nbsp;more&nbsp;than&nbsp;1&nbsp;media&nbsp;element,&nbsp;all&nbsp;of&nbsp;them&nbsp;will<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;be&nbsp;seeked.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;Maximum&nbsp;waiting&nbsp;time&nbsp;for&nbsp;the&nbsp;"seeked"&nbsp;event<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(dispatched&nbsp;when&nbsp;the&nbsp;seeked&nbsp;operation&nbsp;completes)&nbsp;to&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;fired.&nbsp;&nbsp;0&nbsp;means&nbsp;do&nbsp;not&nbsp;wait.<br>
+&nbsp;&nbsp;log_time:&nbsp;Whether&nbsp;to&nbsp;log&nbsp;the&nbsp;seek&nbsp;time&nbsp;for&nbsp;the&nbsp;perf<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;measurement.&nbsp;Useful&nbsp;when&nbsp;performing&nbsp;multiple&nbsp;seek.<br>
+&nbsp;&nbsp;label:&nbsp;A&nbsp;suffix&nbsp;string&nbsp;to&nbsp;name&nbsp;the&nbsp;seek&nbsp;perf&nbsp;measurement.<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;TimeoutException:&nbsp;If&nbsp;the&nbsp;maximum&nbsp;waiting&nbsp;time&nbsp;is&nbsp;exceeded.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-SwipeElement"><strong>SwipeElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='left'</font>, distance<font color="#909090">=100</font>, speed_in_pixels_per_second<font color="#909090">=800</font>)</dt><dd><tt>Perform&nbsp;swipe&nbsp;gesture&nbsp;on&nbsp;the&nbsp;element.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;}'.<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;swipe,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;or&nbsp;'down'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;swipe&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-SwipePage"><strong>SwipePage</strong></a>(self, left_start_ratio<font color="#909090">=0.5</font>, top_start_ratio<font color="#909090">=0.5</font>, direction<font color="#909090">='left'</font>, distance<font color="#909090">=100</font>, speed_in_pixels_per_second<font color="#909090">=800</font>)</dt><dd><tt>Perform&nbsp;swipe&nbsp;gesture&nbsp;on&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;left_start_ratio:&nbsp;The&nbsp;horizontal&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;top_start_ratio:&nbsp;The&nbsp;vertical&nbsp;starting&nbsp;coordinate&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;gesture,&nbsp;as&nbsp;a&nbsp;ratio&nbsp;of&nbsp;the&nbsp;visible&nbsp;bounding&nbsp;rectangle&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;document.body.<br>
+&nbsp;&nbsp;direction:&nbsp;The&nbsp;direction&nbsp;of&nbsp;swipe,&nbsp;either&nbsp;'left',&nbsp;'right',<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'up',&nbsp;or&nbsp;'down'<br>
+&nbsp;&nbsp;distance:&nbsp;The&nbsp;distance&nbsp;to&nbsp;swipe&nbsp;(in&nbsp;pixel).<br>
+&nbsp;&nbsp;speed_in_pixels_per_second:&nbsp;The&nbsp;speed&nbsp;of&nbsp;the&nbsp;gesture&nbsp;(in&nbsp;pixels/s).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-TapElement"><strong>TapElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>)</dt><dd><tt>Tap&nbsp;an&nbsp;element.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;})()'.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-Wait"><strong>Wait</strong></a>(self, seconds)</dt><dd><tt>Wait&nbsp;for&nbsp;the&nbsp;number&nbsp;of&nbsp;seconds&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;seconds:&nbsp;The&nbsp;number&nbsp;of&nbsp;seconds&nbsp;to&nbsp;wait.</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-WaitForElement"><strong>WaitForElement</strong></a>(self, selector<font color="#909090">=None</font>, text<font color="#909090">=None</font>, element_function<font color="#909090">=None</font>, timeout_in_seconds<font color="#909090">=60</font>)</dt><dd><tt>Wait&nbsp;for&nbsp;an&nbsp;element&nbsp;to&nbsp;appear&nbsp;in&nbsp;the&nbsp;document.<br>
+&nbsp;<br>
+The&nbsp;element&nbsp;may&nbsp;be&nbsp;selected&nbsp;via&nbsp;selector,&nbsp;text,&nbsp;or&nbsp;element_function.<br>
+Only&nbsp;one&nbsp;of&nbsp;these&nbsp;arguments&nbsp;must&nbsp;be&nbsp;specified.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;selector:&nbsp;A&nbsp;CSS&nbsp;selector&nbsp;describing&nbsp;the&nbsp;element.<br>
+&nbsp;&nbsp;text:&nbsp;The&nbsp;element&nbsp;must&nbsp;contains&nbsp;this&nbsp;exact&nbsp;text.<br>
+&nbsp;&nbsp;element_function:&nbsp;A&nbsp;JavaScript&nbsp;function&nbsp;(as&nbsp;string)&nbsp;that&nbsp;is&nbsp;used<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;retrieve&nbsp;the&nbsp;element.&nbsp;For&nbsp;example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'(function()&nbsp;{&nbsp;return&nbsp;foo.element;&nbsp;})()'.<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds&nbsp;(default&nbsp;to&nbsp;60).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-WaitForJavaScriptCondition"><strong>WaitForJavaScriptCondition</strong></a>(self, condition, timeout_in_seconds<font color="#909090">=60</font>)</dt><dd><tt>Wait&nbsp;for&nbsp;a&nbsp;JavaScript&nbsp;condition&nbsp;to&nbsp;become&nbsp;true.<br>
+&nbsp;<br>
+Example:&nbsp;runner.<a href="#ActionRunner-WaitForJavaScriptCondition">WaitForJavaScriptCondition</a>('window.foo&nbsp;==&nbsp;10');<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;condition:&nbsp;The&nbsp;JavaScript&nbsp;condition&nbsp;(as&nbsp;string).<br>
+&nbsp;&nbsp;timeout_in_seconds:&nbsp;The&nbsp;timeout&nbsp;in&nbsp;seconds&nbsp;(default&nbsp;to&nbsp;60).</tt></dd></dl>
+
+<dl><dt><a name="ActionRunner-WaitForNavigate"><strong>WaitForNavigate</strong></a>(self, timeout_in_seconds_seconds<font color="#909090">=60</font>)</dt></dl>
+
+<dl><dt><a name="ActionRunner-__init__"><strong>__init__</strong></a>(self, tab, skip_waits<font color="#909090">=False</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>tab</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;tab&nbsp;on&nbsp;which&nbsp;actions&nbsp;are&nbsp;performed.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Interaction">class <strong>Interaction</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Interaction-Begin"><strong>Begin</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Interaction-End"><strong>End</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Interaction-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Interaction-__exit__"><strong>__exit__</strong></a>(self, exc_type, exc_value, traceback)</dt></dl>
+
+<dl><dt><a name="Interaction-__init__"><strong>__init__</strong></a>(self, action_runner, label, flags)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>GESTURE_SOURCE_DEFAULT</strong> = 'DEFAULT'<br>
+<strong>SUPPORTED_GESTURE_SOURCES</strong> = ('DEFAULT', 'MOUSE', 'TOUCH')</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.page.html b/catapult/telemetry/docs/pydoc/telemetry.page.html
new file mode 100644
index 0000000..e403666
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.page.html
@@ -0,0 +1,142 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.page</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.page</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/page/__init__.py">telemetry/page/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.page.action_runner.html">action_runner</a><br>
+<a href="telemetry.page.page.html">page</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.page_run_end_to_end_unittest.html">page_run_end_to_end_unittest</a><br>
+<a href="telemetry.page.page_test.html">page_test</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.page_test_unittest.html">page_test_unittest</a><br>
+<a href="telemetry.page.page_unittest.html">page_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.shared_page_state.html">shared_page_state</a><br>
+<a href="telemetry.page.shared_page_state_unittest.html">shared_page_state_unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.html#Page">Page</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Page">class <strong>Page</strong></a>(<a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.html#Page">Page</a></dd>
+<dd><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Page-AddCustomizeBrowserOptions"><strong>AddCustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Inherit&nbsp;page&nbsp;overrides&nbsp;this&nbsp;to&nbsp;add&nbsp;customized&nbsp;browser&nbsp;options.</tt></dd></dl>
+
+<dl><dt><a name="Page-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Converts&nbsp;a&nbsp;page&nbsp;object&nbsp;to&nbsp;a&nbsp;dict&nbsp;suitable&nbsp;for&nbsp;JSON&nbsp;output.</tt></dd></dl>
+
+<dl><dt><a name="Page-GetSyntheticDelayCategories"><strong>GetSyntheticDelayCategories</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Page-Run"><strong>Run</strong></a>(self, shared_state)</dt></dl>
+
+<dl><dt><a name="Page-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, action_runner)</dt></dl>
+
+<dl><dt><a name="Page-RunPageInteractions"><strong>RunPageInteractions</strong></a>(self, action_runner)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;define&nbsp;custom&nbsp;interactions&nbsp;with&nbsp;the&nbsp;page.<br>
+e.g:<br>
+&nbsp;&nbsp;def&nbsp;<a href="#Page-RunPageInteractions">RunPageInteractions</a>(self,&nbsp;action_runner):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.ScrollPage()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;action_runner.TapElement(text='Next')</tt></dd></dl>
+
+<dl><dt><a name="Page-__cmp__"><strong>__cmp__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Page-__init__"><strong>__init__</strong></a>(self, url, page_set<font color="#909090">=None</font>, base_dir<font color="#909090">=None</font>, name<font color="#909090">=''</font>, credentials_path<font color="#909090">=None</font>, credentials_bucket<font color="#909090">='chromium-telemetry'</font>, labels<font color="#909090">=None</font>, startup_url<font color="#909090">=''</font>, make_javascript_deterministic<font color="#909090">=True</font>, shared_page_state_class<font color="#909090">=&lt;class 'telemetry.page.shared_page_state.SharedPageState'&gt;</font>)</dt></dl>
+
+<dl><dt><a name="Page-__lt__"><strong>__lt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Page-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>base_dir</strong></dt>
+</dl>
+<dl><dt><strong>credentials_path</strong></dt>
+</dl>
+<dl><dt><strong>display_name</strong></dt>
+</dl>
+<dl><dt><strong>file_path</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;path&nbsp;of&nbsp;the&nbsp;file,&nbsp;stripping&nbsp;the&nbsp;scheme&nbsp;and&nbsp;query&nbsp;string.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;file&nbsp;path,&nbsp;including&nbsp;the&nbsp;params,&nbsp;query,&nbsp;and&nbsp;fragment.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url_with_scheme</strong></dt>
+</dl>
+<dl><dt><strong>is_file</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;URL&nbsp;points&nbsp;to&nbsp;a&nbsp;file.</tt></dd>
+</dl>
+<dl><dt><strong>page_set</strong></dt>
+</dl>
+<dl><dt><strong>serving_dir</strong></dt>
+</dl>
+<dl><dt><strong>startup_url</strong></dt>
+</dl>
+<dl><dt><strong>story_set</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>file_safe_name</strong></dt>
+<dd><tt>A&nbsp;version&nbsp;of&nbsp;display_name&nbsp;that's&nbsp;safe&nbsp;to&nbsp;use&nbsp;as&nbsp;a&nbsp;filename.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;sanitizes&nbsp;special&nbsp;characters&nbsp;with&nbsp;underscores,<br>
+but&nbsp;it's&nbsp;okay&nbsp;to&nbsp;override&nbsp;it&nbsp;with&nbsp;a&nbsp;more&nbsp;specific&nbsp;implementation&nbsp;in<br>
+subclasses.</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>is_local</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.</tt></dd>
+</dl>
+<dl><dt><strong>labels</strong></dt>
+</dl>
+<dl><dt><strong>make_javascript_deterministic</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.page.page.html b/catapult/telemetry/docs/pydoc/telemetry.page.page.html
new file mode 100644
index 0000000..8ef184a
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.page.page.html
@@ -0,0 +1,25 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.page.page</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.page.html"><font color="#ffffff">page</font></a>.page</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/page/page.py">telemetry/page/page.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.page.html">telemetry.page</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.page.page_test.html b/catapult/telemetry/docs/pydoc/telemetry.page.page_test.html
new file mode 100644
index 0000000..b335e1c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.page.page_test.html
@@ -0,0 +1,350 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.page.page_test</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.page.html"><font color="#ffffff">page</font></a>.page_test</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/page/page_test.py">telemetry/page/page_test.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.page.action_runner.html">telemetry.page.action_runner</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#PageTest">PageTest</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#MultiTabTestAppCrashError">MultiTabTestAppCrashError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#TestNotSupportedOnPlatformError">TestNotSupportedOnPlatformError</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.story_test.html#Failure">telemetry.web_perf.story_test.Failure</a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#MeasurementFailure">MeasurementFailure</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MeasurementFailure">class <strong>MeasurementFailure</strong></a>(<a href="telemetry.web_perf.story_test.html#Failure">telemetry.web_perf.story_test.Failure</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#PageTest">PageTest</a>&nbsp;<a href="exceptions.html#Exception">Exception</a>&nbsp;raised&nbsp;when&nbsp;an&nbsp;undesired&nbsp;but&nbsp;designed-for&nbsp;problem.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.page_test.html#MeasurementFailure">MeasurementFailure</a></dd>
+<dd><a href="telemetry.web_perf.story_test.html#Failure">telemetry.web_perf.story_test.Failure</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.story_test.html#Failure">telemetry.web_perf.story_test.Failure</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MeasurementFailure-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MeasurementFailure-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MeasurementFailure-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MeasurementFailure-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MeasurementFailure-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MeasurementFailure-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MeasurementFailure-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MultiTabTestAppCrashError">class <strong>MultiTabTestAppCrashError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#PageTest">PageTest</a>&nbsp;<a href="exceptions.html#Exception">Exception</a>&nbsp;raised&nbsp;after&nbsp;browser&nbsp;or&nbsp;tab&nbsp;crash&nbsp;for&nbsp;multi-tab&nbsp;tests.<br>
+&nbsp;<br>
+Used&nbsp;to&nbsp;abort&nbsp;the&nbsp;test&nbsp;rather&nbsp;than&nbsp;try&nbsp;to&nbsp;recover&nbsp;from&nbsp;an&nbsp;unknown&nbsp;state.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.page_test.html#MultiTabTestAppCrashError">MultiTabTestAppCrashError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MultiTabTestAppCrashError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MultiTabTestAppCrashError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MultiTabTestAppCrashError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MultiTabTestAppCrashError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MultiTabTestAppCrashError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageTest">class <strong>PageTest</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;styled&nbsp;on&nbsp;unittest.TestCase&nbsp;for&nbsp;creating&nbsp;page-specific&nbsp;tests.<br>
+&nbsp;<br>
+Test&nbsp;should&nbsp;override&nbsp;ValidateAndMeasurePage&nbsp;to&nbsp;perform&nbsp;test<br>
+validation&nbsp;and&nbsp;page&nbsp;measurement&nbsp;as&nbsp;necessary.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;class&nbsp;BodyChildElementMeasurement(<a href="#PageTest">PageTest</a>):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;def&nbsp;<a href="#PageTest-ValidateAndMeasurePage">ValidateAndMeasurePage</a>(self,&nbsp;page,&nbsp;tab,&nbsp;results):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;body_child_count&nbsp;=&nbsp;tab.EvaluateJavaScript(<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'document.body.children.length')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;results.AddValue(scalar.ScalarValue(<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;page,&nbsp;'body_children',&nbsp;'count',&nbsp;body_child_count))<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PageTest-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;test-specific&nbsp;options&nbsp;to&nbsp;the&nbsp;BrowserOptions&nbsp;<a href="__builtin__.html#object">object</a></tt></dd></dl>
+
+<dl><dt><a name="PageTest-DidNavigateToPage"><strong>DidNavigateToPage</strong></a>(self, page, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;operations&nbsp;right&nbsp;after&nbsp;the&nbsp;page&nbsp;is&nbsp;navigated&nbsp;and&nbsp;after<br>
+all&nbsp;waiting&nbsp;for&nbsp;completion&nbsp;has&nbsp;occurred.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-DidRunPage"><strong>DidRunPage</strong></a>(self, platform)</dt><dd><tt>Called&nbsp;after&nbsp;the&nbsp;test&nbsp;run&nbsp;method&nbsp;was&nbsp;run,&nbsp;even&nbsp;if&nbsp;it&nbsp;failed.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser)</dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;browser&nbsp;right&nbsp;after&nbsp;it&nbsp;has&nbsp;launched.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-RestartBrowserBeforeEachPage"><strong>RestartBrowserBeforeEachPage</strong></a>(self)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;restarted&nbsp;for&nbsp;the&nbsp;page?<br>
+&nbsp;<br>
+This&nbsp;returns&nbsp;true&nbsp;if&nbsp;the&nbsp;test&nbsp;needs&nbsp;to&nbsp;unconditionally&nbsp;restart&nbsp;the<br>
+browser&nbsp;for&nbsp;each&nbsp;page.&nbsp;It&nbsp;may&nbsp;be&nbsp;called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;started.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, page, tab)</dt><dd><tt>Navigates&nbsp;the&nbsp;tab&nbsp;to&nbsp;the&nbsp;page&nbsp;URL&nbsp;attribute.<br>
+&nbsp;<br>
+Runs&nbsp;the&nbsp;'navigate_steps'&nbsp;page&nbsp;attribute&nbsp;as&nbsp;a&nbsp;compound&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-SetOptions"><strong>SetOptions</strong></a>(self, options)</dt><dd><tt>Sets&nbsp;the&nbsp;BrowserFinderOptions&nbsp;instance&nbsp;to&nbsp;use.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-StopBrowserAfterPage"><strong>StopBrowserAfterPage</strong></a>(self, browser, page)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;stopped&nbsp;after&nbsp;the&nbsp;page&nbsp;is&nbsp;run?<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;called&nbsp;after&nbsp;a&nbsp;page&nbsp;is&nbsp;run&nbsp;to&nbsp;decide&nbsp;whether&nbsp;the&nbsp;browser&nbsp;needs&nbsp;to<br>
+be&nbsp;stopped&nbsp;to&nbsp;clean&nbsp;up&nbsp;its&nbsp;state.&nbsp;If&nbsp;it&nbsp;is&nbsp;stopped,&nbsp;then&nbsp;it&nbsp;will&nbsp;be<br>
+restarted&nbsp;to&nbsp;run&nbsp;the&nbsp;next&nbsp;page.<br>
+&nbsp;<br>
+A&nbsp;test&nbsp;that&nbsp;overrides&nbsp;this&nbsp;can&nbsp;look&nbsp;at&nbsp;both&nbsp;the&nbsp;page&nbsp;and&nbsp;the&nbsp;browser&nbsp;to<br>
+decide&nbsp;whether&nbsp;it&nbsp;needs&nbsp;to&nbsp;stop&nbsp;the&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-TabForPage"><strong>TabForPage</strong></a>(self, page, browser)</dt><dd><tt>Override&nbsp;to&nbsp;select&nbsp;a&nbsp;different&nbsp;tab&nbsp;for&nbsp;the&nbsp;page.&nbsp;&nbsp;For&nbsp;instance,&nbsp;to<br>
+create&nbsp;a&nbsp;new&nbsp;tab&nbsp;for&nbsp;every&nbsp;page,&nbsp;return&nbsp;browser.tabs.New().</tt></dd></dl>
+
+<dl><dt><a name="PageTest-ValidateAndMeasurePage"><strong>ValidateAndMeasurePage</strong></a>(self, page, tab, results)</dt><dd><tt>Override&nbsp;to&nbsp;check&nbsp;test&nbsp;assertions&nbsp;and&nbsp;perform&nbsp;measurement.<br>
+&nbsp;<br>
+When&nbsp;adding&nbsp;measurement&nbsp;results,&nbsp;call&nbsp;results.AddValue(...)&nbsp;for<br>
+each&nbsp;result.&nbsp;Raise&nbsp;an&nbsp;exception&nbsp;or&nbsp;add&nbsp;a&nbsp;failure.FailureValue&nbsp;on<br>
+failure.&nbsp;page_test.py&nbsp;also&nbsp;provides&nbsp;several&nbsp;base&nbsp;exception&nbsp;classes<br>
+to&nbsp;use.<br>
+&nbsp;<br>
+Prefer&nbsp;metric&nbsp;value&nbsp;names&nbsp;that&nbsp;are&nbsp;in&nbsp;accordance&nbsp;with&nbsp;python<br>
+variable&nbsp;style.&nbsp;e.g.,&nbsp;metric_name.&nbsp;The&nbsp;name&nbsp;'url'&nbsp;must&nbsp;not&nbsp;be&nbsp;used.<br>
+&nbsp;<br>
+Put&nbsp;together:<br>
+&nbsp;&nbsp;def&nbsp;<a href="#PageTest-ValidateAndMeasurePage">ValidateAndMeasurePage</a>(self,&nbsp;page,&nbsp;tab,&nbsp;results):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;res&nbsp;=&nbsp;tab.EvaluateJavaScript('2+2')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;if&nbsp;res&nbsp;!=&nbsp;4:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;raise&nbsp;<a href="exceptions.html#Exception">Exception</a>('Oh,&nbsp;wow.')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;results.AddValue(scalar.ScalarValue(<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;page,&nbsp;'two_plus_two',&nbsp;'count',&nbsp;res))<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page:&nbsp;A&nbsp;telemetry.page.Page&nbsp;instance.<br>
+&nbsp;&nbsp;tab:&nbsp;A&nbsp;telemetry.core.Tab&nbsp;instance.<br>
+&nbsp;&nbsp;results:&nbsp;A&nbsp;telemetry.results.PageTestResults&nbsp;instance.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-WillNavigateToPage"><strong>WillNavigateToPage</strong></a>(self, page, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;operations&nbsp;before&nbsp;the&nbsp;page&nbsp;is&nbsp;navigated,&nbsp;notably&nbsp;Telemetry<br>
+will&nbsp;already&nbsp;have&nbsp;performed&nbsp;the&nbsp;following&nbsp;operations&nbsp;on&nbsp;the&nbsp;browser&nbsp;before<br>
+calling&nbsp;this&nbsp;function:<br>
+*&nbsp;Ensure&nbsp;only&nbsp;one&nbsp;tab&nbsp;is&nbsp;open.<br>
+*&nbsp;Call&nbsp;WaitForDocumentReadyStateToComplete&nbsp;on&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-WillStartBrowser"><strong>WillStartBrowser</strong></a>(self, platform)</dt><dd><tt>Override&nbsp;to&nbsp;manipulate&nbsp;the&nbsp;browser&nbsp;environment&nbsp;before&nbsp;it&nbsp;launches.</tt></dd></dl>
+
+<dl><dt><a name="PageTest-__init__"><strong>__init__</strong></a>(self, needs_browser_restart_after_each_page<font color="#909090">=False</font>, clear_cache_before_each_run<font color="#909090">=False</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>clear_cache_before_each_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;the&nbsp;browser's&nbsp;disk&nbsp;and&nbsp;memory&nbsp;cache&nbsp;will&nbsp;be&nbsp;cleared<br>
+before&nbsp;each&nbsp;run.</tt></dd>
+</dl>
+<dl><dt><strong>close_tabs_before_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;all&nbsp;tabs&nbsp;are&nbsp;closed&nbsp;before&nbsp;running&nbsp;the&nbsp;test&nbsp;for&nbsp;the<br>
+first&nbsp;time.</tt></dd>
+</dl>
+<dl><dt><strong>is_multi_tab_test</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;test&nbsp;opens&nbsp;multiple&nbsp;tabs.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;test&nbsp;overrides&nbsp;TabForPage,&nbsp;it&nbsp;is&nbsp;deemed&nbsp;a&nbsp;multi-tab&nbsp;test.<br>
+Multi-tab&nbsp;tests&nbsp;do&nbsp;not&nbsp;retry&nbsp;after&nbsp;tab&nbsp;or&nbsp;browser&nbsp;crashes,&nbsp;whereas,<br>
+single-tab&nbsp;tests&nbsp;too.&nbsp;That&nbsp;is&nbsp;because&nbsp;the&nbsp;state&nbsp;of&nbsp;multi-tab&nbsp;tests<br>
+(e.g.,&nbsp;how&nbsp;many&nbsp;tabs&nbsp;are&nbsp;open,&nbsp;etc.)&nbsp;is&nbsp;unknown&nbsp;after&nbsp;crashes.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestNotSupportedOnPlatformError">class <strong>TestNotSupportedOnPlatformError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#PageTest">PageTest</a>&nbsp;<a href="exceptions.html#Exception">Exception</a>&nbsp;raised&nbsp;when&nbsp;a&nbsp;required&nbsp;feature&nbsp;is&nbsp;unavailable.<br>
+&nbsp;<br>
+The&nbsp;feature&nbsp;required&nbsp;to&nbsp;run&nbsp;the&nbsp;test&nbsp;could&nbsp;be&nbsp;part&nbsp;of&nbsp;the&nbsp;platform,<br>
+hardware&nbsp;configuration,&nbsp;or&nbsp;browser.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.page_test.html#TestNotSupportedOnPlatformError">TestNotSupportedOnPlatformError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TestNotSupportedOnPlatformError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TestNotSupportedOnPlatformError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TestNotSupportedOnPlatformError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TestNotSupportedOnPlatformError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TestNotSupportedOnPlatformError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.page.shared_page_state.html b/catapult/telemetry/docs/pydoc/telemetry.page.shared_page_state.html
new file mode 100644
index 0000000..326d143
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.page.shared_page_state.html
@@ -0,0 +1,385 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.page.shared_page_state</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.page.html"><font color="#ffffff">page</font></a>.shared_page_state</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/page/shared_page_state.py">telemetry/page/shared_page_state.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">telemetry.internal.browser.browser_finder_exceptions</a><br>
+<a href="telemetry.internal.browser.browser_info.html">telemetry.internal.browser.browser_info</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.internal.util.file_handle.html">telemetry.internal.util.file_handle</a><br>
+<a href="telemetry.util.image_util.html">telemetry.util.image_util</a><br>
+<a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+<a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.platform.profiler.profiler_finder.html">telemetry.internal.platform.profiler.profiler_finder</a><br>
+<a href="shutil.html">shutil</a><br>
+<a href="telemetry.story.html">telemetry.story</a><br>
+<a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+<a href="telemetry.web_perf.timeline_based_measurement.html">telemetry.web_perf.timeline_based_measurement</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+<a href="zipfile.html">zipfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#Shared10InchTabletPageState">Shared10InchTabletPageState</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#SharedDesktopPageState">SharedDesktopPageState</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#SharedMobilePageState">SharedMobilePageState</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#SharedTabletPageState">SharedTabletPageState</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Shared10InchTabletPageState">class <strong>Shared10InchTabletPageState</strong></a>(<a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.shared_page_state.html#Shared10InchTabletPageState">Shared10InchTabletPageState</a></dd>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><a name="Shared10InchTabletPageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="Shared10InchTabletPageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedDesktopPageState">class <strong>SharedDesktopPageState</strong></a>(<a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.shared_page_state.html#SharedDesktopPageState">SharedDesktopPageState</a></dd>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><a name="SharedDesktopPageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="SharedDesktopPageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="SharedDesktopPageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedDesktopPageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedMobilePageState">class <strong>SharedMobilePageState</strong></a>(<a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.shared_page_state.html#SharedMobilePageState">SharedMobilePageState</a></dd>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><a name="SharedMobilePageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="SharedMobilePageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="SharedMobilePageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedMobilePageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedPageState">class <strong>SharedPageState</strong></a>(<a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>This&nbsp;class&nbsp;contains&nbsp;all&nbsp;specific&nbsp;logic&nbsp;necessary&nbsp;to&nbsp;run&nbsp;a&nbsp;Chrome&nbsp;browser<br>
+benchmark.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SharedPageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="SharedPageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedPageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedPageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedPageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedPageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="SharedPageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedPageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedPageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedTabletPageState">class <strong>SharedTabletPageState</strong></a>(<a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.page.shared_page_state.html#SharedTabletPageState">SharedTabletPageState</a></dd>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><a name="SharedTabletPageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="SharedTabletPageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="SharedTabletPageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="SharedTabletPageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">SharedPageState</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.project_config.html b/catapult/telemetry/docs/pydoc/telemetry.project_config.html
new file mode 100644
index 0000000..111ea3e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.project_config.html
@@ -0,0 +1,73 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.project_config</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.project_config</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/project_config.py">telemetry/project_config.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.project_config.html#ProjectConfig">ProjectConfig</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProjectConfig">class <strong>ProjectConfig</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Contains&nbsp;information&nbsp;about&nbsp;the&nbsp;benchmark&nbsp;runtime&nbsp;environment.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;top_level_dir:&nbsp;A&nbsp;dir&nbsp;that&nbsp;contains&nbsp;benchmark,&nbsp;page&nbsp;test,&nbsp;and/or&nbsp;story<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;set&nbsp;dirs&nbsp;and&nbsp;associated&nbsp;artifacts.<br>
+&nbsp;&nbsp;benchmark_dirs:&nbsp;A&nbsp;list&nbsp;of&nbsp;dirs&nbsp;containing&nbsp;benchmarks.<br>
+&nbsp;&nbsp;benchmark_aliases:&nbsp;A&nbsp;dict&nbsp;of&nbsp;name:alias&nbsp;string&nbsp;pairs&nbsp;to&nbsp;be&nbsp;matched&nbsp;against<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;exactly&nbsp;during&nbsp;benchmark&nbsp;selection.<br>
+&nbsp;&nbsp;client_config:&nbsp;A&nbsp;path&nbsp;to&nbsp;a&nbsp;ProjectDependencies&nbsp;json&nbsp;file.<br>
+&nbsp;&nbsp;default_chrome_root:&nbsp;A&nbsp;path&nbsp;to&nbsp;chromium&nbsp;source&nbsp;directory.&nbsp;Many&nbsp;telemetry<br>
+&nbsp;&nbsp;&nbsp;&nbsp;features&nbsp;depend&nbsp;on&nbsp;chromium&nbsp;source&nbsp;tree's&nbsp;presence&nbsp;and&nbsp;those&nbsp;won't&nbsp;work<br>
+&nbsp;&nbsp;&nbsp;&nbsp;in&nbsp;case&nbsp;this&nbsp;is&nbsp;not&nbsp;specified.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProjectConfig-__init__"><strong>__init__</strong></a>(self, top_level_dir, benchmark_dirs<font color="#909090">=None</font>, benchmark_aliases<font color="#909090">=None</font>, client_config<font color="#909090">=None</font>, default_chrome_root<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>benchmark_aliases</strong></dt>
+</dl>
+<dl><dt><strong>benchmark_dirs</strong></dt>
+</dl>
+<dl><dt><strong>client_config</strong></dt>
+</dl>
+<dl><dt><strong>default_chrome_root</strong></dt>
+</dl>
+<dl><dt><strong>top_level_dir</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.record_wpr.html b/catapult/telemetry/docs/pydoc/telemetry.record_wpr.html
new file mode 100644
index 0000000..a762abd
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.record_wpr.html
@@ -0,0 +1,173 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.record_wpr</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.record_wpr</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/record_wpr.py">telemetry/record_wpr.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="argparse.html">argparse</a><br>
+<a href="telemetry.benchmark.html">telemetry.benchmark</a><br>
+<a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="telemetry.internal.browser.browser_options.html">telemetry.internal.browser.browser_options</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+<a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+<a href="logging.html">logging</a><br>
+<a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.results_options.html">telemetry.internal.results.results_options</a><br>
+<a href="telemetry.story.html">telemetry.story</a><br>
+<a href="telemetry.internal.story_runner.html">telemetry.internal.story_runner</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+<a href="telemetry.util.wpr_modes.html">telemetry.util.wpr_modes</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.record_wpr.html#WprRecorder">WprRecorder</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.record_wpr.html#RecorderPageTest">RecorderPageTest</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RecorderPageTest">class <strong>RecorderPageTest</strong></a>(<a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.record_wpr.html#RecorderPageTest">RecorderPageTest</a></dd>
+<dd><a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="RecorderPageTest-CleanUpAfterPage"><strong>CleanUpAfterPage</strong></a>(self, page, tab)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(self, options)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-DidNavigateToPage"><strong>DidNavigateToPage</strong></a>(self, page, tab)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, page, tab)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-ValidateAndMeasurePage"><strong>ValidateAndMeasurePage</strong></a>(self, page, tab, results)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-WillNavigateToPage"><strong>WillNavigateToPage</strong></a>(self, page, tab)</dt><dd><tt>Override&nbsp;to&nbsp;ensure&nbsp;all&nbsp;resources&nbsp;are&nbsp;fetched&nbsp;from&nbsp;network.</tt></dd></dl>
+
+<dl><dt><a name="RecorderPageTest-WillStartBrowser"><strong>WillStartBrowser</strong></a>(self, browser)</dt></dl>
+
+<dl><dt><a name="RecorderPageTest-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>:<br>
+<dl><dt><a name="RecorderPageTest-DidRunPage"><strong>DidRunPage</strong></a>(self, platform)</dt><dd><tt>Called&nbsp;after&nbsp;the&nbsp;test&nbsp;run&nbsp;method&nbsp;was&nbsp;run,&nbsp;even&nbsp;if&nbsp;it&nbsp;failed.</tt></dd></dl>
+
+<dl><dt><a name="RecorderPageTest-RestartBrowserBeforeEachPage"><strong>RestartBrowserBeforeEachPage</strong></a>(self)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;restarted&nbsp;for&nbsp;the&nbsp;page?<br>
+&nbsp;<br>
+This&nbsp;returns&nbsp;true&nbsp;if&nbsp;the&nbsp;test&nbsp;needs&nbsp;to&nbsp;unconditionally&nbsp;restart&nbsp;the<br>
+browser&nbsp;for&nbsp;each&nbsp;page.&nbsp;It&nbsp;may&nbsp;be&nbsp;called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;started.</tt></dd></dl>
+
+<dl><dt><a name="RecorderPageTest-SetOptions"><strong>SetOptions</strong></a>(self, options)</dt><dd><tt>Sets&nbsp;the&nbsp;BrowserFinderOptions&nbsp;instance&nbsp;to&nbsp;use.</tt></dd></dl>
+
+<dl><dt><a name="RecorderPageTest-StopBrowserAfterPage"><strong>StopBrowserAfterPage</strong></a>(self, browser, page)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;stopped&nbsp;after&nbsp;the&nbsp;page&nbsp;is&nbsp;run?<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;called&nbsp;after&nbsp;a&nbsp;page&nbsp;is&nbsp;run&nbsp;to&nbsp;decide&nbsp;whether&nbsp;the&nbsp;browser&nbsp;needs&nbsp;to<br>
+be&nbsp;stopped&nbsp;to&nbsp;clean&nbsp;up&nbsp;its&nbsp;state.&nbsp;If&nbsp;it&nbsp;is&nbsp;stopped,&nbsp;then&nbsp;it&nbsp;will&nbsp;be<br>
+restarted&nbsp;to&nbsp;run&nbsp;the&nbsp;next&nbsp;page.<br>
+&nbsp;<br>
+A&nbsp;test&nbsp;that&nbsp;overrides&nbsp;this&nbsp;can&nbsp;look&nbsp;at&nbsp;both&nbsp;the&nbsp;page&nbsp;and&nbsp;the&nbsp;browser&nbsp;to<br>
+decide&nbsp;whether&nbsp;it&nbsp;needs&nbsp;to&nbsp;stop&nbsp;the&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="RecorderPageTest-TabForPage"><strong>TabForPage</strong></a>(self, page, browser)</dt><dd><tt>Override&nbsp;to&nbsp;select&nbsp;a&nbsp;different&nbsp;tab&nbsp;for&nbsp;the&nbsp;page.&nbsp;&nbsp;For&nbsp;instance,&nbsp;to<br>
+create&nbsp;a&nbsp;new&nbsp;tab&nbsp;for&nbsp;every&nbsp;page,&nbsp;return&nbsp;browser.tabs.New().</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>clear_cache_before_each_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;the&nbsp;browser's&nbsp;disk&nbsp;and&nbsp;memory&nbsp;cache&nbsp;will&nbsp;be&nbsp;cleared<br>
+before&nbsp;each&nbsp;run.</tt></dd>
+</dl>
+<dl><dt><strong>close_tabs_before_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;all&nbsp;tabs&nbsp;are&nbsp;closed&nbsp;before&nbsp;running&nbsp;the&nbsp;test&nbsp;for&nbsp;the<br>
+first&nbsp;time.</tt></dd>
+</dl>
+<dl><dt><strong>is_multi_tab_test</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;test&nbsp;opens&nbsp;multiple&nbsp;tabs.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;test&nbsp;overrides&nbsp;TabForPage,&nbsp;it&nbsp;is&nbsp;deemed&nbsp;a&nbsp;multi-tab&nbsp;test.<br>
+Multi-tab&nbsp;tests&nbsp;do&nbsp;not&nbsp;retry&nbsp;after&nbsp;tab&nbsp;or&nbsp;browser&nbsp;crashes,&nbsp;whereas,<br>
+single-tab&nbsp;tests&nbsp;too.&nbsp;That&nbsp;is&nbsp;because&nbsp;the&nbsp;state&nbsp;of&nbsp;multi-tab&nbsp;tests<br>
+(e.g.,&nbsp;how&nbsp;many&nbsp;tabs&nbsp;are&nbsp;open,&nbsp;etc.)&nbsp;is&nbsp;unknown&nbsp;after&nbsp;crashes.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WprRecorder">class <strong>WprRecorder</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="WprRecorder-CreateResults"><strong>CreateResults</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="WprRecorder-HandleResults"><strong>HandleResults</strong></a>(self, results, upload_to_cloud_storage)</dt></dl>
+
+<dl><dt><a name="WprRecorder-Record"><strong>Record</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="WprRecorder-__init__"><strong>__init__</strong></a>(self, base_dir, target, args<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>options</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-Main"><strong>Main</strong></a>(base_dir)</dt><dd><tt>#&nbsp;TODO(nednguyen):&nbsp;use&nbsp;benchmark.Environment&nbsp;instead&nbsp;of&nbsp;base_dir&nbsp;for&nbsp;discovering<br>
+#&nbsp;benchmark&nbsp;&amp;&nbsp;story&nbsp;classes.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.story.html b/catapult/telemetry/docs/pydoc/telemetry.story.html
new file mode 100644
index 0000000..af7ef2b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.story.html
@@ -0,0 +1,40 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.story</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.story</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/story/__init__.py">telemetry/story/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.story.shared_state.html">shared_state</a><br>
+<a href="telemetry.story.story.html">story</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.story_filter.html">story_filter</a><br>
+<a href="telemetry.story.story_filter_unittest.html">story_filter_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.story_set.html">story_set</a><br>
+<a href="telemetry.story.story_set_unittest.html">story_set_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.story_unittest.html">story_unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>INTERNAL_BUCKET</strong> = 'chrome-telemetry'<br>
+<strong>PARTNER_BUCKET</strong> = 'chrome-partner-telemetry'<br>
+<strong>PUBLIC_BUCKET</strong> = 'chromium-telemetry'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.story.shared_state.html b/catapult/telemetry/docs/pydoc/telemetry.story.shared_state.html
new file mode 100644
index 0000000..b078cb0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.story.shared_state.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.story.shared_state</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.story.html"><font color="#ffffff">story</font></a>.shared_state</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/story/shared_state.py">telemetry/story/shared_state.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.shared_state.html#SharedState">SharedState</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SharedState">class <strong>SharedState</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;that&nbsp;manages&nbsp;the&nbsp;test&nbsp;state&nbsp;across&nbsp;multiple&nbsp;stories.<br>
+It's&nbsp;styled&nbsp;on&nbsp;unittest.TestCase&nbsp;for&nbsp;handling&nbsp;test&nbsp;setup&nbsp;&amp;&nbsp;teardown&nbsp;logic.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SharedState-CanRunStory"><strong>CanRunStory</strong></a>(self, story)</dt><dd><tt>Indicate&nbsp;whether&nbsp;the&nbsp;story&nbsp;can&nbsp;be&nbsp;run&nbsp;in&nbsp;the&nbsp;current&nbsp;configuration.<br>
+This&nbsp;is&nbsp;called&nbsp;after&nbsp;WillRunStory&nbsp;and&nbsp;before&nbsp;RunStory.&nbsp;Return&nbsp;True<br>
+if&nbsp;the&nbsp;story&nbsp;should&nbsp;be&nbsp;run,&nbsp;and&nbsp;False&nbsp;if&nbsp;it&nbsp;should&nbsp;be&nbsp;skipped.<br>
+Most&nbsp;subclasses&nbsp;will&nbsp;probably&nbsp;want&nbsp;to&nbsp;override&nbsp;this&nbsp;to&nbsp;always<br>
+return&nbsp;True.<br>
+Args:<br>
+&nbsp;&nbsp;story:&nbsp;a&nbsp;story.Story&nbsp;instance.</tt></dd></dl>
+
+<dl><dt><a name="SharedState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;after&nbsp;running&nbsp;each&nbsp;of&nbsp;all&nbsp;stories&nbsp;that<br>
+share&nbsp;this&nbsp;same&nbsp;state.<br>
+This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.tearDown.</tt></dd></dl>
+
+<dl><dt><a name="SharedState-RunStory"><strong>RunStory</strong></a>(self, results)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;before&nbsp;running&nbsp;each&nbsp;one&nbsp;of&nbsp;all&nbsp;stories<br>
+that&nbsp;share&nbsp;this&nbsp;same&nbsp;state.<br>
+This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.run.</tt></dd></dl>
+
+<dl><dt><a name="SharedState-TearDownState"><strong>TearDownState</strong></a>(self)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;after&nbsp;running&nbsp;multiple&nbsp;stories&nbsp;that<br>
+share&nbsp;this&nbsp;same&nbsp;state.<br>
+This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.tearDownClass.</tt></dd></dl>
+
+<dl><dt><a name="SharedState-WillRunStory"><strong>WillRunStory</strong></a>(self, story)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;before&nbsp;running&nbsp;each&nbsp;one&nbsp;of&nbsp;all&nbsp;stories<br>
+that&nbsp;share&nbsp;this&nbsp;same&nbsp;state.<br>
+This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.setUp.</tt></dd></dl>
+
+<dl><dt><a name="SharedState-__init__"><strong>__init__</strong></a>(self, test, options, story_set)</dt><dd><tt>This&nbsp;method&nbsp;is&nbsp;styled&nbsp;on&nbsp;unittest.TestCase.setUpClass.<br>
+Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;before&nbsp;running&nbsp;stories&nbsp;that<br>
+share&nbsp;this&nbsp;same&nbsp;state.<br>
+Args:<br>
+&nbsp;&nbsp;test:&nbsp;a&nbsp;page_test.PageTest&nbsp;or&nbsp;story_test.StoryTest&nbsp;instance.<br>
+&nbsp;&nbsp;options:&nbsp;a&nbsp;BrowserFinderOptions&nbsp;instance&nbsp;that&nbsp;contains&nbsp;command&nbsp;line<br>
+&nbsp;&nbsp;&nbsp;&nbsp;options.<br>
+&nbsp;&nbsp;story_set:&nbsp;a&nbsp;story.StorySet&nbsp;instance.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+<dd><tt>Override&nbsp;to&nbsp;return&nbsp;the&nbsp;platform&nbsp;which&nbsp;stories&nbsp;that&nbsp;share&nbsp;this&nbsp;same<br>
+state&nbsp;will&nbsp;be&nbsp;run&nbsp;on.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.story.story.html b/catapult/telemetry/docs/pydoc/telemetry.story.story.html
new file mode 100644
index 0000000..de69c84
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.story.story.html
@@ -0,0 +1,111 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.story.story</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.story.html"><font color="#ffffff">story</font></a>.story</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/story/story.py">telemetry/story/story.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.shared_state.html">telemetry.story.shared_state</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story.html#Story">Story</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Story">class <strong>Story</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;styled&nbsp;on&nbsp;unittest.TestCase&nbsp;for&nbsp;creating&nbsp;story&nbsp;tests.<br>
+&nbsp;<br>
+Tests&nbsp;should&nbsp;override&nbsp;Run&nbsp;to&nbsp;maybe&nbsp;start&nbsp;the&nbsp;application&nbsp;and&nbsp;perform&nbsp;actions<br>
+on&nbsp;it.&nbsp;To&nbsp;share&nbsp;state&nbsp;between&nbsp;different&nbsp;tests,&nbsp;one&nbsp;can&nbsp;define&nbsp;a<br>
+shared_state&nbsp;which&nbsp;contains&nbsp;hooks&nbsp;that&nbsp;will&nbsp;be&nbsp;called&nbsp;before&nbsp;and<br>
+after&nbsp;mutiple&nbsp;stories&nbsp;run&nbsp;and&nbsp;in&nbsp;between&nbsp;runs.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;shared_state_class:&nbsp;subclass&nbsp;of&nbsp;telemetry.story.shared_state.SharedState.<br>
+&nbsp;&nbsp;name:&nbsp;string&nbsp;name&nbsp;of&nbsp;this&nbsp;story&nbsp;that&nbsp;can&nbsp;be&nbsp;used&nbsp;for&nbsp;identifying&nbsp;this&nbsp;story<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;in&nbsp;results&nbsp;output.<br>
+&nbsp;&nbsp;labels:&nbsp;A&nbsp;list&nbsp;or&nbsp;set&nbsp;of&nbsp;string&nbsp;labels&nbsp;that&nbsp;are&nbsp;used&nbsp;for&nbsp;filtering.&nbsp;See<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;story.story_filter&nbsp;for&nbsp;more&nbsp;information.<br>
+&nbsp;&nbsp;is_local:&nbsp;If&nbsp;True,&nbsp;the&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Story-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Converts&nbsp;a&nbsp;story&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;to&nbsp;a&nbsp;dict&nbsp;suitable&nbsp;for&nbsp;JSON&nbsp;output.</tt></dd></dl>
+
+<dl><dt><a name="Story-Run"><strong>Run</strong></a>(self, shared_state)</dt><dd><tt>Execute&nbsp;the&nbsp;interactions&nbsp;with&nbsp;the&nbsp;applications&nbsp;and/or&nbsp;platforms.</tt></dd></dl>
+
+<dl><dt><a name="Story-__init__"><strong>__init__</strong></a>(self, shared_state_class, name<font color="#909090">=''</font>, labels<font color="#909090">=None</font>, is_local<font color="#909090">=False</font>, make_javascript_deterministic<font color="#909090">=True</font>)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;make_javascript_deterministic:&nbsp;Whether&nbsp;JavaScript&nbsp;performed&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page&nbsp;is&nbsp;made&nbsp;deterministic&nbsp;across&nbsp;multiple&nbsp;runs.&nbsp;This<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;requires&nbsp;that&nbsp;the&nbsp;web&nbsp;content&nbsp;is&nbsp;served&nbsp;via&nbsp;Web&nbsp;Page&nbsp;Replay<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;take&nbsp;effect.&nbsp;This&nbsp;setting&nbsp;does&nbsp;not&nbsp;affect&nbsp;stories&nbsp;containing&nbsp;no&nbsp;web<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;content&nbsp;or&nbsp;where&nbsp;the&nbsp;HTTP&nbsp;MIME&nbsp;type&nbsp;is&nbsp;not&nbsp;text/html.See&nbsp;also:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;_InjectScripts&nbsp;method&nbsp;in&nbsp;third_party/webpagereplay/httpclient.py.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>display_name</strong></dt>
+</dl>
+<dl><dt><strong>file_safe_name</strong></dt>
+<dd><tt>A&nbsp;version&nbsp;of&nbsp;display_name&nbsp;that's&nbsp;safe&nbsp;to&nbsp;use&nbsp;as&nbsp;a&nbsp;filename.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;sanitizes&nbsp;special&nbsp;characters&nbsp;with&nbsp;underscores,<br>
+but&nbsp;it's&nbsp;okay&nbsp;to&nbsp;override&nbsp;it&nbsp;with&nbsp;a&nbsp;more&nbsp;specific&nbsp;implementation&nbsp;in<br>
+subclasses.</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>is_local</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.</tt></dd>
+</dl>
+<dl><dt><strong>labels</strong></dt>
+</dl>
+<dl><dt><strong>make_javascript_deterministic</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>serving_dir</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;absolute&nbsp;path&nbsp;to&nbsp;a&nbsp;directory&nbsp;with&nbsp;hash&nbsp;files&nbsp;to&nbsp;data&nbsp;that<br>
+should&nbsp;be&nbsp;updated&nbsp;from&nbsp;cloud&nbsp;storage,&nbsp;or&nbsp;None&nbsp;if&nbsp;no&nbsp;files&nbsp;need&nbsp;to&nbsp;be<br>
+updated.</tt></dd>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.story.story_filter.html b/catapult/telemetry/docs/pydoc/telemetry.story.story_filter.html
new file mode 100644
index 0000000..6ed1bd2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.story.story_filter.html
@@ -0,0 +1,72 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.story.story_filter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.story.html"><font color="#ffffff">story</font></a>.story_filter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/story/story_filter.py">telemetry/story/story_filter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+</td><td width="25%" valign=top><a href="optparse.html">optparse</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story_filter.html#StoryFilter">StoryFilter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StoryFilter">class <strong>StoryFilter</strong></a>(<a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Filters&nbsp;stories&nbsp;in&nbsp;the&nbsp;story&nbsp;set&nbsp;based&nbsp;on&nbsp;command-line&nbsp;flags.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.story.story_filter.html#StoryFilter">StoryFilter</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="StoryFilter-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="StoryFilter-IsSelected"><strong>IsSelected</strong></a>(cls, story)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="StoryFilter-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.story.story_set.html b/catapult/telemetry/docs/pydoc/telemetry.story.story_set.html
new file mode 100644
index 0000000..73e8ec6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.story.story_set.html
@@ -0,0 +1,139 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.story.story_set</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.story.html"><font color="#ffffff">story</font></a>.story_set</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/story/story_set.py">telemetry/story/story_set.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.wpr.archive_info.html">telemetry.wpr.archive_info</a><br>
+</td><td width="25%" valign=top><a href="inspect.html">inspect</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.story.html">telemetry.story.story</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.story.story_set.html#StorySet">StorySet</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StorySet">class <strong>StorySet</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;collection&nbsp;of&nbsp;stories.<br>
+&nbsp;<br>
+A&nbsp;typical&nbsp;usage&nbsp;of&nbsp;<a href="#StorySet">StorySet</a>&nbsp;would&nbsp;be&nbsp;to&nbsp;subclass&nbsp;it&nbsp;and&nbsp;then&nbsp;call<br>
+AddStory&nbsp;for&nbsp;each&nbsp;Story.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="StorySet-AddStory"><strong>AddStory</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="StorySet-RemoveStory"><strong>RemoveStory</strong></a>(self, story)</dt><dd><tt>Removes&nbsp;a&nbsp;Story.<br>
+&nbsp;<br>
+Allows&nbsp;the&nbsp;stories&nbsp;to&nbsp;be&nbsp;filtered.</tt></dd></dl>
+
+<dl><dt><a name="StorySet-WprFilePathForStory"><strong>WprFilePathForStory</strong></a>(self, story)</dt><dd><tt>Convenient&nbsp;function&nbsp;to&nbsp;retrieve&nbsp;WPR&nbsp;archive&nbsp;file&nbsp;path.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;story:&nbsp;The&nbsp;Story&nbsp;to&nbsp;look&nbsp;up.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;WPR&nbsp;archive&nbsp;file&nbsp;path&nbsp;for&nbsp;the&nbsp;given&nbsp;Story,&nbsp;if&nbsp;found.<br>
+&nbsp;&nbsp;Otherwise,&nbsp;None.</tt></dd></dl>
+
+<dl><dt><a name="StorySet-__getitem__"><strong>__getitem__</strong></a>(self, key)</dt></dl>
+
+<dl><dt><a name="StorySet-__init__"><strong>__init__</strong></a>(self, archive_data_file<font color="#909090">=''</font>, cloud_storage_bucket<font color="#909090">=None</font>, base_dir<font color="#909090">=None</font>, serving_dirs<font color="#909090">=None</font>)</dt><dd><tt>Creates&nbsp;a&nbsp;new&nbsp;<a href="#StorySet">StorySet</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;archive_data_file:&nbsp;The&nbsp;path&nbsp;to&nbsp;Web&nbsp;Page&nbsp;Replay's&nbsp;archive&nbsp;data,&nbsp;relative<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;self.<strong>base_dir</strong>.<br>
+&nbsp;&nbsp;cloud_storage_bucket:&nbsp;The&nbsp;cloud&nbsp;storage&nbsp;bucket&nbsp;used&nbsp;to&nbsp;download<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Web&nbsp;Page&nbsp;Replay's&nbsp;archive&nbsp;data.&nbsp;Valid&nbsp;values&nbsp;are:&nbsp;None,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;story.PUBLIC_BUCKET,&nbsp;story.PARTNER_BUCKET,&nbsp;or&nbsp;story.INTERNAL_BUCKET<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(defined&nbsp;in&nbsp;telemetry.util.cloud_storage).<br>
+&nbsp;&nbsp;serving_dirs:&nbsp;A&nbsp;set&nbsp;of&nbsp;paths,&nbsp;relative&nbsp;to&nbsp;self.<strong>base_dir</strong>,&nbsp;to&nbsp;directories<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;containing&nbsp;hash&nbsp;files&nbsp;for&nbsp;non-wpr&nbsp;archive&nbsp;data&nbsp;stored&nbsp;in&nbsp;cloud<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;storage.</tt></dd></dl>
+
+<dl><dt><a name="StorySet-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySet-__len__"><strong>__len__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySet-__setitem__"><strong>__setitem__</strong></a>(self, key, value)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="StorySet-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Return&nbsp;a&nbsp;string&nbsp;explaining&nbsp;in&nbsp;human-understandable&nbsp;terms&nbsp;what&nbsp;this<br>
+story&nbsp;represents.<br>
+Note&nbsp;that&nbsp;this&nbsp;should&nbsp;be&nbsp;a&nbsp;classmethod&nbsp;so&nbsp;the&nbsp;benchmark_runner&nbsp;script&nbsp;can<br>
+display&nbsp;stories'&nbsp;names&nbsp;along&nbsp;with&nbsp;their&nbsp;descriptions&nbsp;in&nbsp;the&nbsp;list&nbsp;command.</tt></dd></dl>
+
+<dl><dt><a name="StorySet-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Returns&nbsp;the&nbsp;string&nbsp;name&nbsp;of&nbsp;this&nbsp;<a href="#StorySet">StorySet</a>.<br>
+Note&nbsp;that&nbsp;this&nbsp;should&nbsp;be&nbsp;a&nbsp;classmethod&nbsp;so&nbsp;the&nbsp;benchmark_runner&nbsp;script&nbsp;can<br>
+match&nbsp;the&nbsp;story&nbsp;class&nbsp;with&nbsp;its&nbsp;name&nbsp;specified&nbsp;in&nbsp;the&nbsp;run&nbsp;command:<br>
+'Run&nbsp;&lt;User&nbsp;story&nbsp;test&nbsp;name&gt;&nbsp;&lt;User&nbsp;story&nbsp;class&nbsp;name&gt;'</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>allow_mixed_story_states</strong></dt>
+<dd><tt>True&nbsp;iff&nbsp;Stories&nbsp;are&nbsp;allowed&nbsp;to&nbsp;have&nbsp;different&nbsp;StoryState&nbsp;classes.<br>
+&nbsp;<br>
+There&nbsp;are&nbsp;no&nbsp;checks&nbsp;in&nbsp;place&nbsp;for&nbsp;determining&nbsp;if&nbsp;SharedStates&nbsp;are<br>
+being&nbsp;assigned&nbsp;correctly&nbsp;to&nbsp;all&nbsp;Stories&nbsp;in&nbsp;a&nbsp;given&nbsp;StorySet.&nbsp;The<br>
+majority&nbsp;of&nbsp;test&nbsp;cases&nbsp;should&nbsp;not&nbsp;need&nbsp;the&nbsp;ability&nbsp;to&nbsp;have&nbsp;multiple<br>
+SharedStates,&nbsp;which&nbsp;usually&nbsp;implies&nbsp;you&nbsp;should&nbsp;be&nbsp;writing&nbsp;multiple<br>
+benchmarks&nbsp;instead.&nbsp;We&nbsp;provide&nbsp;errors&nbsp;to&nbsp;avoid&nbsp;accidentally&nbsp;assigning<br>
+or&nbsp;defaulting&nbsp;to&nbsp;the&nbsp;wrong&nbsp;SharedState.<br>
+Override&nbsp;at&nbsp;your&nbsp;own&nbsp;risk.&nbsp;Here&nbsp;be&nbsp;dragons.</tt></dd>
+</dl>
+<dl><dt><strong>archive_data_file</strong></dt>
+</dl>
+<dl><dt><strong>base_dir</strong></dt>
+<dd><tt>The&nbsp;base&nbsp;directory&nbsp;to&nbsp;resolve&nbsp;archive_data_file.<br>
+&nbsp;<br>
+This&nbsp;defaults&nbsp;to&nbsp;the&nbsp;directory&nbsp;containing&nbsp;the&nbsp;StorySet&nbsp;instance's&nbsp;class.</tt></dd>
+</dl>
+<dl><dt><strong>bucket</strong></dt>
+</dl>
+<dl><dt><strong>file_path</strong></dt>
+</dl>
+<dl><dt><strong>serving_dirs</strong></dt>
+</dl>
+<dl><dt><strong>wpr_archive_info</strong></dt>
+<dd><tt>Lazily&nbsp;constructs&nbsp;wpr_archive_info&nbsp;if&nbsp;it's&nbsp;not&nbsp;set&nbsp;and&nbsp;returns&nbsp;it.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.browser_test_case.html b/catapult/telemetry/docs/pydoc/telemetry.testing.browser_test_case.html
new file mode 100644
index 0000000..05e5d4f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.browser_test_case.html
@@ -0,0 +1,355 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.browser_test_case</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.browser_test_case</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/browser_test_case.py">telemetry/testing/browser_test_case.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+</td><td width="25%" valign=top><a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.browser_test_case.html#BrowserTestCase">BrowserTestCase</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BrowserTestCase">class <strong>BrowserTestCase</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.browser_test_case.html#BrowserTestCase">BrowserTestCase</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="BrowserTestCase-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;test-specific&nbsp;options&nbsp;to&nbsp;the&nbsp;BrowserOptions&nbsp;object</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-UrlOfUnittestFile"><strong>UrlOfUnittestFile</strong></a>(cls, filename)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="BrowserTestCase-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="BrowserTestCase-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="BrowserTestCase-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#BrowserTestCase-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#BrowserTestCase-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#BrowserTestCase-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#BrowserTestCase-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#BrowserTestCase-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#BrowserTestCase-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="BrowserTestCase-setUp"><strong>setUp</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;the&nbsp;test&nbsp;fixture&nbsp;before&nbsp;exercising&nbsp;it.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="BrowserTestCase-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-teardown_browser"><strong>teardown_browser</strong></a>()</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>current_browser</strong> = None<br>
+<strong>current_browser_options</strong> = None</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.disabled_cases.html b/catapult/telemetry/docs/pydoc/telemetry.testing.disabled_cases.html
new file mode 100644
index 0000000..d3982bb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.disabled_cases.html
@@ -0,0 +1,363 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.disabled_cases</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.disabled_cases</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/disabled_cases.py">telemetry/testing/disabled_cases.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.disabled_cases.html#DisabledCases">DisabledCases</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="DisabledCases">class <strong>DisabledCases</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;These&nbsp;are&nbsp;not&nbsp;real&nbsp;unittests.<br>
+#&nbsp;They&nbsp;are&nbsp;merely&nbsp;to&nbsp;test&nbsp;our&nbsp;Enable/Disable&nbsp;annotations.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.disabled_cases.html#DisabledCases">DisabledCases</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="DisabledCases-testAllDisabled"><strong>testAllDisabled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testAllEnabled"><strong>testAllEnabled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testChromeOSOnly"><strong>testChromeOSOnly</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testHasTabs"><strong>testHasTabs</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testMacOnly"><strong>testMacOnly</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testMavericksOnly"><strong>testMavericksOnly</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testNoChromeOS"><strong>testNoChromeOS</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testNoMac"><strong>testNoMac</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testNoMavericks"><strong>testNoMavericks</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testNoSystem"><strong>testNoSystem</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testNoWinLinux"><strong>testNoWinLinux</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testSystemOnly"><strong>testSystemOnly</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-testWinOrLinuxOnly"><strong>testWinOrLinuxOnly</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="DisabledCases-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="DisabledCases-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="DisabledCases-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="DisabledCases-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#DisabledCases-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="DisabledCases-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#DisabledCases-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#DisabledCases-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#DisabledCases-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#DisabledCases-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#DisabledCases-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="DisabledCases-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="DisabledCases-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="DisabledCases-setUp"><strong>setUp</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;the&nbsp;test&nbsp;fixture&nbsp;before&nbsp;exercising&nbsp;it.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="DisabledCases-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;class&nbsp;fixture&nbsp;before&nbsp;running&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<dl><dt><a name="DisabledCases-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;class&nbsp;fixture&nbsp;after&nbsp;running&nbsp;all&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.fakes.html b/catapult/telemetry/docs/pydoc/telemetry.testing.fakes.html
new file mode 100644
index 0000000..bc998a1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.fakes.html
@@ -0,0 +1,367 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.testing.fakes</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.fakes</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/fakes/__init__.py">telemetry/testing/fakes/__init__.py</a></font></td></tr></table>
+    <p><tt>Provides&nbsp;fakes&nbsp;for&nbsp;several&nbsp;of&nbsp;Telemetry's&nbsp;internal&nbsp;objects.<br>
+&nbsp;<br>
+These&nbsp;allow&nbsp;code&nbsp;like&nbsp;story_runner&nbsp;and&nbsp;Benchmark&nbsp;to&nbsp;be&nbsp;run&nbsp;and&nbsp;tested<br>
+without&nbsp;compiling&nbsp;or&nbsp;starting&nbsp;a&nbsp;browser.&nbsp;Class&nbsp;names&nbsp;prepended&nbsp;with&nbsp;an<br>
+underscore&nbsp;are&nbsp;intended&nbsp;to&nbsp;be&nbsp;implementation&nbsp;details,&nbsp;and&nbsp;should&nbsp;not<br>
+be&nbsp;subclassed;&nbsp;however,&nbsp;some,&nbsp;like&nbsp;_FakeBrowser,&nbsp;have&nbsp;public&nbsp;APIs&nbsp;that<br>
+may&nbsp;need&nbsp;to&nbsp;be&nbsp;called&nbsp;in&nbsp;tests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakeHTTPServer">FakeHTTPServer</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakeInspectorWebsocket">FakeInspectorWebsocket</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakePlatform">FakePlatform</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakeLinuxPlatform">FakeLinuxPlatform</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakePossibleBrowser">FakePossibleBrowser</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.platform.system_info.html#SystemInfo">telemetry.internal.platform.system_info.SystemInfo</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakeSystemInfo">FakeSystemInfo</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.page.shared_page_state.html#SharedPageState">telemetry.page.shared_page_state.SharedPageState</a>(<a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.fakes.html#FakeSharedPageState">FakeSharedPageState</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeHTTPServer">class <strong>FakeHTTPServer</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FakeHTTPServer-UrlOf"><strong>UrlOf</strong></a>(self, url)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeInspectorWebsocket">class <strong>FakeInspectorWebsocket</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FakeInspectorWebsocket-AddAsyncResponse"><strong>AddAsyncResponse</strong></a>(self, method, result, time)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-AddEvent"><strong>AddEvent</strong></a>(self, method, params, time)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-AddResponseHandler"><strong>AddResponseHandler</strong></a>(self, method, handler)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-AsyncRequest"><strong>AsyncRequest</strong></a>(self, request, callback)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-Connect"><strong>Connect</strong></a>(self, _)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-DispatchNotifications"><strong>DispatchNotifications</strong></a>(self, timeout)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-RegisterDomain"><strong>RegisterDomain</strong></a>(self, _, handler)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-SendAndIgnoreResponse"><strong>SendAndIgnoreResponse</strong></a>(self, request)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-SyncRequest"><strong>SyncRequest</strong></a>(self, request, *_args, **_kwargs)</dt></dl>
+
+<dl><dt><a name="FakeInspectorWebsocket-__init__"><strong>__init__</strong></a>(self, mock_timer)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeLinuxPlatform">class <strong>FakeLinuxPlatform</strong></a>(<a href="telemetry.testing.fakes.html#FakePlatform">FakePlatform</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.fakes.html#FakeLinuxPlatform">FakeLinuxPlatform</a></dd>
+<dd><a href="telemetry.testing.fakes.html#FakePlatform">FakePlatform</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FakeLinuxPlatform-CanTakeScreenshot"><strong>CanTakeScreenshot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-SetHTTPServerDirectories"><strong>SetHTTPServerDirectories</strong></a>(self, paths)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-TakeScreenshot"><strong>TakeScreenshot</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.testing.fakes.html#FakePlatform">FakePlatform</a>:<br>
+<dl><dt><a name="FakeLinuxPlatform-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeLinuxPlatform-StopAllLocalServers"><strong>StopAllLocalServers</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.testing.fakes.html#FakePlatform">FakePlatform</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>network_controller</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakePlatform">class <strong>FakePlatform</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FakePlatform-CanMonitorThermalThrottling"><strong>CanMonitorThermalThrottling</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-GetArchName"><strong>GetArchName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-GetDeviceTypeName"><strong>GetDeviceTypeName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-GetOSName"><strong>GetOSName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-GetOSVersionName"><strong>GetOSVersionName</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-HasBeenThermallyThrottled"><strong>HasBeenThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-IsThermallyThrottled"><strong>IsThermallyThrottled</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePlatform-StopAllLocalServers"><strong>StopAllLocalServers</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>is_host_platform</strong></dt>
+</dl>
+<dl><dt><strong>network_controller</strong></dt>
+</dl>
+<dl><dt><strong>tracing_controller</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakePossibleBrowser">class <strong>FakePossibleBrowser</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="FakePossibleBrowser-Create"><strong>Create</strong></a>(self, finder_options)</dt></dl>
+
+<dl><dt><a name="FakePossibleBrowser-IsRemote"><strong>IsRemote</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakePossibleBrowser-SetCredentialsPath"><strong>SetCredentialsPath</strong></a>(self, _)</dt></dl>
+
+<dl><dt><a name="FakePossibleBrowser-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+<dd><tt>The&nbsp;platform&nbsp;object&nbsp;from&nbsp;the&nbsp;returned&nbsp;browser.<br>
+&nbsp;<br>
+To&nbsp;change&nbsp;this&nbsp;or&nbsp;set&nbsp;it&nbsp;up,&nbsp;change&nbsp;the&nbsp;returned&nbsp;browser's<br>
+platform.</tt></dd>
+</dl>
+<dl><dt><strong>returned_browser</strong></dt>
+<dd><tt>The&nbsp;browser&nbsp;object&nbsp;that&nbsp;will&nbsp;be&nbsp;returned&nbsp;through&nbsp;later&nbsp;API&nbsp;calls.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeSharedPageState">class <strong>FakeSharedPageState</strong></a>(<a href="telemetry.page.shared_page_state.html#SharedPageState">telemetry.page.shared_page_state.SharedPageState</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.fakes.html#FakeSharedPageState">FakeSharedPageState</a></dd>
+<dd><a href="telemetry.page.shared_page_state.html#SharedPageState">telemetry.page.shared_page_state.SharedPageState</a></dd>
+<dd><a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FakeSharedPageState-ConfigurePossibleBrowser"><strong>ConfigurePossibleBrowser</strong></a>(self, possible_browser)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;configure&nbsp;the&nbsp;PossibleBrowser.<br>
+&nbsp;<br>
+Can&nbsp;make&nbsp;changes&nbsp;to&nbsp;the&nbsp;browser's&nbsp;configuration&nbsp;here&nbsp;via&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;possible_browser.returned_browser.returned_system_info&nbsp;=&nbsp;...</tt></dd></dl>
+
+<dl><dt><a name="FakeSharedPageState-DidRunStory"><strong>DidRunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="FakeSharedPageState-__init__"><strong>__init__</strong></a>(self, test, finder_options, story_set)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">telemetry.page.shared_page_state.SharedPageState</a>:<br>
+<dl><dt><a name="FakeSharedPageState-CanRunOnBrowser"><strong>CanRunOnBrowser</strong></a>(self, browser_info, page)</dt><dd><tt>Override&nbsp;this&nbsp;to&nbsp;return&nbsp;whether&nbsp;the&nbsp;browser&nbsp;brought&nbsp;up&nbsp;by&nbsp;this&nbsp;state<br>
+instance&nbsp;is&nbsp;suitable&nbsp;for&nbsp;running&nbsp;the&nbsp;given&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;browser_info:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.core.browser_info.BrowserInfo<br>
+&nbsp;&nbsp;page:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.page.Page</tt></dd></dl>
+
+<dl><dt><a name="FakeSharedPageState-CanRunStory"><strong>CanRunStory</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="FakeSharedPageState-GetPregeneratedProfileArchiveDir"><strong>GetPregeneratedProfileArchiveDir</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeSharedPageState-RunStory"><strong>RunStory</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="FakeSharedPageState-SetPregeneratedProfileArchiveDir"><strong>SetPregeneratedProfileArchiveDir</strong></a>(self, archive_path)</dt><dd><tt>Benchmarks&nbsp;can&nbsp;set&nbsp;a&nbsp;pre-generated&nbsp;profile&nbsp;archive&nbsp;to&nbsp;indicate&nbsp;that&nbsp;when<br>
+Chrome&nbsp;is&nbsp;launched,&nbsp;it&nbsp;should&nbsp;have&nbsp;a&nbsp;--user-data-dir&nbsp;set&nbsp;to&nbsp;the<br>
+pregenerated&nbsp;profile,&nbsp;rather&nbsp;than&nbsp;to&nbsp;an&nbsp;empty&nbsp;profile.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;benchmark&nbsp;is&nbsp;invoked&nbsp;with&nbsp;the&nbsp;option&nbsp;--profile-dir=&lt;dir&gt;,&nbsp;that<br>
+option&nbsp;overrides&nbsp;this&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="FakeSharedPageState-TearDownState"><strong>TearDownState</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FakeSharedPageState-WillRunStory"><strong>WillRunStory</strong></a>(self, page)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.shared_page_state.html#SharedPageState">telemetry.page.shared_page_state.SharedPageState</a>:<br>
+<dl><dt><strong>browser</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_tab</strong></dt>
+</dl>
+<dl><dt><strong>page_test</strong></dt>
+</dl>
+<dl><dt><strong>platform</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.shared_state.html#SharedState">telemetry.story.shared_state.SharedState</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FakeSystemInfo">class <strong>FakeSystemInfo</strong></a>(<a href="telemetry.internal.platform.system_info.html#SystemInfo">telemetry.internal.platform.system_info.SystemInfo</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.fakes.html#FakeSystemInfo">FakeSystemInfo</a></dd>
+<dd><a href="telemetry.internal.platform.system_info.html#SystemInfo">telemetry.internal.platform.system_info.SystemInfo</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FakeSystemInfo-__init__"><strong>__init__</strong></a>(self, model_name<font color="#909090">=''</font>, gpu_dict<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.platform.system_info.html#SystemInfo">telemetry.internal.platform.system_info.SystemInfo</a>:<br>
+<dl><dt><a name="FakeSystemInfo-FromDict"><strong>FromDict</strong></a>(cls, attrs)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Constructs&nbsp;a&nbsp;<a href="telemetry.internal.platform.system_info.html#SystemInfo">SystemInfo</a>&nbsp;from&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;attributes.<br>
+Attributes&nbsp;currently&nbsp;required&nbsp;to&nbsp;be&nbsp;present&nbsp;in&nbsp;the&nbsp;dictionary:<br>
+&nbsp;<br>
+&nbsp;&nbsp;model_name&nbsp;(string):&nbsp;a&nbsp;platform-dependent&nbsp;string<br>
+&nbsp;&nbsp;&nbsp;&nbsp;describing&nbsp;the&nbsp;model&nbsp;of&nbsp;machine,&nbsp;or&nbsp;the&nbsp;empty&nbsp;string&nbsp;if&nbsp;not<br>
+&nbsp;&nbsp;&nbsp;&nbsp;supported.<br>
+&nbsp;&nbsp;gpu&nbsp;(<a href="__builtin__.html#object">object</a>&nbsp;containing&nbsp;GPUInfo's&nbsp;required&nbsp;attributes)</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.platform.system_info.html#SystemInfo">telemetry.internal.platform.system_info.SystemInfo</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>gpu</strong></dt>
+<dd><tt>A&nbsp;GPUInfo&nbsp;object&nbsp;describing&nbsp;the&nbsp;graphics&nbsp;processor(s)&nbsp;on&nbsp;the&nbsp;system.</tt></dd>
+</dl>
+<dl><dt><strong>model_name</strong></dt>
+<dd><tt>A&nbsp;string&nbsp;describing&nbsp;the&nbsp;machine&nbsp;model.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;a&nbsp;highly&nbsp;platform-dependent&nbsp;value&nbsp;and&nbsp;not&nbsp;currently<br>
+specified&nbsp;for&nbsp;any&nbsp;machine&nbsp;type&nbsp;aside&nbsp;from&nbsp;Macs.&nbsp;On&nbsp;Mac&nbsp;OS,&nbsp;this<br>
+is&nbsp;the&nbsp;model&nbsp;identifier,&nbsp;reformatted&nbsp;slightly;&nbsp;for&nbsp;example,<br>
+'MacBookPro&nbsp;10.1'.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CreateBrowserFinderOptions"><strong>CreateBrowserFinderOptions</strong></a>(browser_type<font color="#909090">=None</font>)</dt><dd><tt>Creates&nbsp;fake&nbsp;browser&nbsp;finder&nbsp;options&nbsp;for&nbsp;discovering&nbsp;a&nbsp;browser.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.gtest_progress_reporter.html b/catapult/telemetry/docs/pydoc/telemetry.testing.gtest_progress_reporter.html
new file mode 100644
index 0000000..99b8a57
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.gtest_progress_reporter.html
@@ -0,0 +1,90 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.gtest_progress_reporter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.gtest_progress_reporter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/gtest_progress_reporter.py">telemetry/testing/gtest_progress_reporter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.exception_formatter.html">telemetry.internal.util.exception_formatter</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.progress_reporter.html">telemetry.testing.progress_reporter</a><br>
+<a href="time.html">time</a><br>
+</td><td width="25%" valign=top><a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.progress_reporter.html#ProgressReporter">telemetry.testing.progress_reporter.ProgressReporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.gtest_progress_reporter.html#GTestProgressReporter">GTestProgressReporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GTestProgressReporter">class <strong>GTestProgressReporter</strong></a>(<a href="telemetry.testing.progress_reporter.html#ProgressReporter">telemetry.testing.progress_reporter.ProgressReporter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.gtest_progress_reporter.html#GTestProgressReporter">GTestProgressReporter</a></dd>
+<dd><a href="telemetry.testing.progress_reporter.html#ProgressReporter">telemetry.testing.progress_reporter.ProgressReporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="GTestProgressReporter-Error"><strong>Error</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-Failure"><strong>Failure</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-Skip"><strong>Skip</strong></a>(self, test, reason)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-StartTest"><strong>StartTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-StartTestSuite"><strong>StartTestSuite</strong></a>(self, suite)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-StopTestRun"><strong>StopTestRun</strong></a>(self, result)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-StopTestSuite"><strong>StopTestSuite</strong></a>(self, suite)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-Success"><strong>Success</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-__init__"><strong>__init__</strong></a>(self, output_stream)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.testing.progress_reporter.html#ProgressReporter">telemetry.testing.progress_reporter.ProgressReporter</a>:<br>
+<dl><dt><a name="GTestProgressReporter-StartTestRun"><strong>StartTestRun</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="GTestProgressReporter-StopTest"><strong>StopTest</strong></a>(self, test)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.testing.progress_reporter.html#ProgressReporter">telemetry.testing.progress_reporter.ProgressReporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.html b/catapult/telemetry/docs/pydoc/telemetry.testing.html
new file mode 100644
index 0000000..e27124c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.html
@@ -0,0 +1,47 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.testing</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.testing</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/__init__.py">telemetry/testing/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.testing.browser_test_case.html">browser_test_case</a><br>
+<a href="telemetry.testing.decorators_unittest.html">decorators_unittest</a><br>
+<a href="telemetry.testing.disabled_cases.html">disabled_cases</a><br>
+<a href="telemetry.testing.fakes.html"><strong>fakes</strong>&nbsp;(package)</a><br>
+<a href="telemetry.testing.gtest_progress_reporter.html">gtest_progress_reporter</a><br>
+<a href="telemetry.testing.gtest_progress_reporter_unittest.html">gtest_progress_reporter_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.internal.html"><strong>internal</strong>&nbsp;(package)</a><br>
+<a href="telemetry.testing.options_for_unittests.html">options_for_unittests</a><br>
+<a href="telemetry.testing.page_test_test_case.html">page_test_test_case</a><br>
+<a href="telemetry.testing.progress_reporter.html">progress_reporter</a><br>
+<a href="telemetry.testing.progress_reporter_unittest.html">progress_reporter_unittest</a><br>
+<a href="telemetry.testing.run_chromeos_tests.html">run_chromeos_tests</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.run_tests.html">run_tests</a><br>
+<a href="telemetry.testing.run_tests_unittest.html">run_tests_unittest</a><br>
+<a href="telemetry.testing.simple_mock.html">simple_mock</a><br>
+<a href="telemetry.testing.simple_mock_unittest.html">simple_mock_unittest</a><br>
+<a href="telemetry.testing.story_set_smoke_test.html">story_set_smoke_test</a><br>
+<a href="telemetry.testing.stream.html">stream</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.system_stub.html">system_stub</a><br>
+<a href="telemetry.testing.system_stub_unittest.html">system_stub_unittest</a><br>
+<a href="telemetry.testing.tab_test_case.html">tab_test_case</a><br>
+<a href="telemetry.testing.test_page_test_results.html">test_page_test_results</a><br>
+<a href="telemetry.testing.unittest_runner.html">unittest_runner</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.internal.fake_gpu_info.html b/catapult/telemetry/docs/pydoc/telemetry.testing.internal.fake_gpu_info.html
new file mode 100644
index 0000000..7407598
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.internal.fake_gpu_info.html
@@ -0,0 +1,24 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.internal.fake_gpu_info</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.<a href="telemetry.testing.internal.html"><font color="#ffffff">internal</font></a>.fake_gpu_info</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/internal/fake_gpu_info.py">telemetry/testing/internal/fake_gpu_info.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>FAKE_GPU_INFO</strong> = {'aux_attributes': {'adapter_luid': 0.0, 'amd_switchable': False, 'basic_info_state': 1, 'can_lose_context': False, 'context_info_state': 1, 'direct_rendering': True, 'driver_date': '', 'driver_vendor': 'NVIDIA', 'driver_version': '331.79', 'gl_extensions': 'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arra..._depth_texture GL_SGIX_shadow GL_SUN_slice_accum ', ...}, 'devices': [{'device_id': 3576.0, 'device_string': '', 'vendor_id': 4318.0, 'vendor_string': ''}], 'driver_bug_workarounds': ['clear_uniforms_before_first_program_use', 'disable_gl_path_rendering', 'init_gl_position_in_vertex_shader', 'init_vertex_attributes', 'remove_pow_with_constant_exponent', 'scalarize_vec_and_mat_constructor_args', 'use_current_program_after_successful_link', 'use_virtualized_gl_contexts'], 'feature_status': {'2d_canvas': 'unavailable_software', 'flash_3d': 'enabled', 'flash_stage3d': 'enabled', 'flash_stage3d_baseline': 'enabled', 'gpu_compositing': 'enabled', 'multiple_raster_threads': 'enabled_on', 'rasterization': 'disabled_software', 'video_decode': 'unavailable_software', 'video_encode': 'enabled', 'webgl': 'enabled'}}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.internal.html b/catapult/telemetry/docs/pydoc/telemetry.testing.internal.html
new file mode 100644
index 0000000..df23179
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.internal.html
@@ -0,0 +1,25 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.testing.internal</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.internal</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/internal/__init__.py">telemetry/testing/internal/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.testing.internal.fake_gpu_info.html">fake_gpu_info</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.options_for_unittests.html b/catapult/telemetry/docs/pydoc/telemetry.testing.options_for_unittests.html
new file mode 100644
index 0000000..f07a4df
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.options_for_unittests.html
@@ -0,0 +1,32 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.options_for_unittests</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.options_for_unittests</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/options_for_unittests.py">telemetry/testing/options_for_unittests.py</a></font></td></tr></table>
+    <p><tt>This&nbsp;module&nbsp;provides&nbsp;the&nbsp;global&nbsp;variable&nbsp;options_for_unittests.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;set&nbsp;to&nbsp;a&nbsp;BrowserOptions&nbsp;object&nbsp;by&nbsp;the&nbsp;test&nbsp;harness,&nbsp;or&nbsp;None<br>
+if&nbsp;unit&nbsp;tests&nbsp;are&nbsp;not&nbsp;running.<br>
+&nbsp;<br>
+This&nbsp;allows&nbsp;multiple&nbsp;unit&nbsp;tests&nbsp;to&nbsp;use&nbsp;a&nbsp;specific<br>
+browser,&nbsp;in&nbsp;face&nbsp;of&nbsp;multiple&nbsp;options.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AreSet"><strong>AreSet</strong></a>()</dt></dl>
+ <dl><dt><a name="-GetCopy"><strong>GetCopy</strong></a>()</dt></dl>
+ <dl><dt><a name="-Pop"><strong>Pop</strong></a>()</dt></dl>
+ <dl><dt><a name="-Push"><strong>Push</strong></a>(options)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.page_test_test_case.html b/catapult/telemetry/docs/pydoc/telemetry.testing.page_test_test_case.html
new file mode 100644
index 0000000..6cad5ca
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.page_test_test_case.html
@@ -0,0 +1,490 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.page_test_test_case</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.page_test_test_case</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/page_test_test_case.py">telemetry/testing/page_test_test_case.py</a></font></td></tr></table>
+    <p><tt>Provide&nbsp;a&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;base&nbsp;class&nbsp;for&nbsp;PageTest&nbsp;subclasses'&nbsp;unittests.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.benchmark.html">telemetry.benchmark</a><br>
+<a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.page.html">telemetry.page.page</a><br>
+<a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+<a href="telemetry.internal.results.results_options.html">telemetry.internal.results.results_options</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.html">telemetry.story</a><br>
+<a href="telemetry.internal.story_runner.html">telemetry.internal.story_runner</a><br>
+<a href="unittest.html">unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.benchmark.html#BenchmarkMetadata">telemetry.benchmark.BenchmarkMetadata</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.page_test_test_case.html#EmptyMetadataForTest">EmptyMetadataForTest</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.page.html#Page">telemetry.page.Page</a>(<a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.page_test_test_case.html#BasicTestPage">BasicTestPage</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.page_test_test_case.html#PageTestTestCase">PageTestTestCase</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BasicTestPage">class <strong>BasicTestPage</strong></a>(<a href="telemetry.page.html#Page">telemetry.page.Page</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.page_test_test_case.html#BasicTestPage">BasicTestPage</a></dd>
+<dd><a href="telemetry.page.html#Page">telemetry.page.Page</a></dd>
+<dd><a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BasicTestPage-RunPageInteractions"><strong>RunPageInteractions</strong></a>(self, action_runner)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-__init__"><strong>__init__</strong></a>(self, url, story_set, base_dir)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.page.html#Page">telemetry.page.Page</a>:<br>
+<dl><dt><a name="BasicTestPage-AddCustomizeBrowserOptions"><strong>AddCustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Inherit&nbsp;page&nbsp;overrides&nbsp;this&nbsp;to&nbsp;add&nbsp;customized&nbsp;browser&nbsp;options.</tt></dd></dl>
+
+<dl><dt><a name="BasicTestPage-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Converts&nbsp;a&nbsp;page&nbsp;object&nbsp;to&nbsp;a&nbsp;dict&nbsp;suitable&nbsp;for&nbsp;JSON&nbsp;output.</tt></dd></dl>
+
+<dl><dt><a name="BasicTestPage-GetSyntheticDelayCategories"><strong>GetSyntheticDelayCategories</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-Run"><strong>Run</strong></a>(self, shared_state)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, action_runner)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-__cmp__"><strong>__cmp__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-__lt__"><strong>__lt__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="BasicTestPage-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.html#Page">telemetry.page.Page</a>:<br>
+<dl><dt><strong>base_dir</strong></dt>
+</dl>
+<dl><dt><strong>credentials_path</strong></dt>
+</dl>
+<dl><dt><strong>display_name</strong></dt>
+</dl>
+<dl><dt><strong>file_path</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;path&nbsp;of&nbsp;the&nbsp;file,&nbsp;stripping&nbsp;the&nbsp;scheme&nbsp;and&nbsp;query&nbsp;string.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;file&nbsp;path,&nbsp;including&nbsp;the&nbsp;params,&nbsp;query,&nbsp;and&nbsp;fragment.</tt></dd>
+</dl>
+<dl><dt><strong>file_path_url_with_scheme</strong></dt>
+</dl>
+<dl><dt><strong>is_file</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;URL&nbsp;points&nbsp;to&nbsp;a&nbsp;file.</tt></dd>
+</dl>
+<dl><dt><strong>page_set</strong></dt>
+</dl>
+<dl><dt><strong>serving_dir</strong></dt>
+</dl>
+<dl><dt><strong>startup_url</strong></dt>
+</dl>
+<dl><dt><strong>story_set</strong></dt>
+</dl>
+<dl><dt><strong>url</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.story.story.html#Story">telemetry.story.story.Story</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>file_safe_name</strong></dt>
+<dd><tt>A&nbsp;version&nbsp;of&nbsp;display_name&nbsp;that's&nbsp;safe&nbsp;to&nbsp;use&nbsp;as&nbsp;a&nbsp;filename.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;sanitizes&nbsp;special&nbsp;characters&nbsp;with&nbsp;underscores,<br>
+but&nbsp;it's&nbsp;okay&nbsp;to&nbsp;override&nbsp;it&nbsp;with&nbsp;a&nbsp;more&nbsp;specific&nbsp;implementation&nbsp;in<br>
+subclasses.</tt></dd>
+</dl>
+<dl><dt><strong>id</strong></dt>
+</dl>
+<dl><dt><strong>is_local</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;iff&nbsp;this&nbsp;story&nbsp;does&nbsp;not&nbsp;require&nbsp;network.</tt></dd>
+</dl>
+<dl><dt><strong>labels</strong></dt>
+</dl>
+<dl><dt><strong>make_javascript_deterministic</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>shared_state_class</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="EmptyMetadataForTest">class <strong>EmptyMetadataForTest</strong></a>(<a href="telemetry.benchmark.html#BenchmarkMetadata">telemetry.benchmark.BenchmarkMetadata</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.page_test_test_case.html#EmptyMetadataForTest">EmptyMetadataForTest</a></dd>
+<dd><a href="telemetry.benchmark.html#BenchmarkMetadata">telemetry.benchmark.BenchmarkMetadata</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="EmptyMetadataForTest-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.benchmark.html#BenchmarkMetadata">telemetry.benchmark.BenchmarkMetadata</a>:<br>
+<dl><dt><a name="EmptyMetadataForTest-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.benchmark.html#BenchmarkMetadata">telemetry.benchmark.BenchmarkMetadata</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>description</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>rerun_options</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PageTestTestCase">class <strong>PageTestTestCase</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;base&nbsp;class&nbsp;to&nbsp;simplify&nbsp;writing&nbsp;unit&nbsp;tests&nbsp;for&nbsp;PageTest&nbsp;subclasses.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.page_test_test_case.html#PageTestTestCase">PageTestTestCase</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="PageTestTestCase-CreateEmptyPageSet"><strong>CreateEmptyPageSet</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-CreateStorySetFromFileInUnittestDataDir"><strong>CreateStorySetFromFileInUnittestDataDir</strong></a>(self, test_filename)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-RunMeasurement"><strong>RunMeasurement</strong></a>(self, measurement, ps, options<font color="#909090">=None</font>)</dt><dd><tt>Runs&nbsp;a&nbsp;measurement&nbsp;against&nbsp;a&nbsp;pageset,&nbsp;returning&nbsp;the&nbsp;rows&nbsp;its&nbsp;outputs.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-TestTracingCleanedUp"><strong>TestTracingCleanedUp</strong></a>(self, measurement_class, options<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="PageTestTestCase-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#PageTestTestCase-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#PageTestTestCase-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#PageTestTestCase-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#PageTestTestCase-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#PageTestTestCase-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#PageTestTestCase-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="PageTestTestCase-setUp"><strong>setUp</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;the&nbsp;test&nbsp;fixture&nbsp;before&nbsp;exercising&nbsp;it.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="PageTestTestCase-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;class&nbsp;fixture&nbsp;before&nbsp;running&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<dl><dt><a name="PageTestTestCase-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;class&nbsp;fixture&nbsp;after&nbsp;running&nbsp;all&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.progress_reporter.html b/catapult/telemetry/docs/pydoc/telemetry.testing.progress_reporter.html
new file mode 100644
index 0000000..8f92ace
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.progress_reporter.html
@@ -0,0 +1,229 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.progress_reporter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.progress_reporter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/progress_reporter.py">telemetry/testing/progress_reporter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.path.html">telemetry.internal.util.path</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="unittest.html">unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.progress_reporter.html#ProgressReporter">ProgressReporter</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.progress_reporter.html#TestRunner">TestRunner</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="unittest.result.html#TestResult">unittest.result.TestResult</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.progress_reporter.html#TestResult">TestResult</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="unittest.suite.html#TestSuite">unittest.suite.TestSuite</a>(<a href="unittest.suite.html#BaseTestSuite">unittest.suite.BaseTestSuite</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.progress_reporter.html#TestSuite">TestSuite</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProgressReporter">class <strong>ProgressReporter</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProgressReporter-Error"><strong>Error</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-Failure"><strong>Failure</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-Skip"><strong>Skip</strong></a>(self, test, reason)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StartTest"><strong>StartTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StartTestRun"><strong>StartTestRun</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StartTestSuite"><strong>StartTestSuite</strong></a>(self, suite)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StopTest"><strong>StopTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StopTestRun"><strong>StopTestRun</strong></a>(self, result)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-StopTestSuite"><strong>StopTestSuite</strong></a>(self, suite)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-Success"><strong>Success</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="ProgressReporter-__init__"><strong>__init__</strong></a>(self, output_stream)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestResult">class <strong>TestResult</strong></a>(<a href="unittest.result.html#TestResult">unittest.result.TestResult</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.progress_reporter.html#TestResult">TestResult</a></dd>
+<dd><a href="unittest.result.html#TestResult">unittest.result.TestResult</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TestResult-__init__"><strong>__init__</strong></a>(self, progress_reporters)</dt></dl>
+
+<dl><dt><a name="TestResult-addError"><strong>addError</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="TestResult-addFailure"><strong>addFailure</strong></a>(self, test, err)</dt></dl>
+
+<dl><dt><a name="TestResult-addSkip"><strong>addSkip</strong></a>(self, test, reason)</dt></dl>
+
+<dl><dt><a name="TestResult-addSuccess"><strong>addSuccess</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="TestResult-startTest"><strong>startTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="TestResult-startTestRun"><strong>startTestRun</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestResult-startTestSuite"><strong>startTestSuite</strong></a>(self, suite)</dt></dl>
+
+<dl><dt><a name="TestResult-stopTest"><strong>stopTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="TestResult-stopTestRun"><strong>stopTestRun</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestResult-stopTestSuite"><strong>stopTestSuite</strong></a>(self, suite)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>failures_and_errors</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="unittest.result.html#TestResult">unittest.result.TestResult</a>:<br>
+<dl><dt><a name="TestResult-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestResult-addExpectedFailure"><strong>addExpectedFailure</strong></a>(self, test, err)</dt><dd><tt>Called&nbsp;when&nbsp;an&nbsp;expected&nbsp;failure/error&nbsp;occured.</tt></dd></dl>
+
+<dl><dt><a name="TestResult-addUnexpectedSuccess"><strong>addUnexpectedSuccess</strong></a>(self, *args, **kw)</dt><dd><tt>Called&nbsp;when&nbsp;a&nbsp;test&nbsp;was&nbsp;expected&nbsp;to&nbsp;fail,&nbsp;but&nbsp;succeed.</tt></dd></dl>
+
+<dl><dt><a name="TestResult-printErrors"><strong>printErrors</strong></a>(self)</dt><dd><tt>Called&nbsp;by&nbsp;<a href="#TestRunner">TestRunner</a>&nbsp;after&nbsp;test&nbsp;run</tt></dd></dl>
+
+<dl><dt><a name="TestResult-stop"><strong>stop</strong></a>(self)</dt><dd><tt>Indicates&nbsp;that&nbsp;the&nbsp;tests&nbsp;should&nbsp;be&nbsp;aborted</tt></dd></dl>
+
+<dl><dt><a name="TestResult-wasSuccessful"><strong>wasSuccessful</strong></a>(self)</dt><dd><tt>Tells&nbsp;whether&nbsp;or&nbsp;not&nbsp;this&nbsp;result&nbsp;was&nbsp;a&nbsp;success</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.result.html#TestResult">unittest.result.TestResult</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestRunner">class <strong>TestRunner</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TestRunner-run"><strong>run</strong></a>(self, test, progress_reporters, repeat_count, args)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestSuite">class <strong>TestSuite</strong></a>(<a href="unittest.suite.html#TestSuite">unittest.suite.TestSuite</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#TestSuite">TestSuite</a>&nbsp;that&nbsp;can&nbsp;delegate&nbsp;start&nbsp;and&nbsp;stop&nbsp;calls&nbsp;to&nbsp;a&nbsp;<a href="#TestResult">TestResult</a>&nbsp;<a href="__builtin__.html#object">object</a>.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.progress_reporter.html#TestSuite">TestSuite</a></dd>
+<dd><a href="unittest.suite.html#TestSuite">unittest.suite.TestSuite</a></dd>
+<dd><a href="unittest.suite.html#BaseTestSuite">unittest.suite.BaseTestSuite</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TestSuite-run"><strong>run</strong></a>(self, result)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.suite.html#TestSuite">unittest.suite.TestSuite</a>:<br>
+<dl><dt><a name="TestSuite-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;tests&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;<a href="#TestResult">TestResult</a></tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="unittest.suite.html#BaseTestSuite">unittest.suite.BaseTestSuite</a>:<br>
+<dl><dt><a name="TestSuite-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="TestSuite-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="TestSuite-__init__"><strong>__init__</strong></a>(self, tests<font color="#909090">=()</font>)</dt></dl>
+
+<dl><dt><a name="TestSuite-__iter__"><strong>__iter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestSuite-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="TestSuite-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestSuite-addTest"><strong>addTest</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="TestSuite-addTests"><strong>addTests</strong></a>(self, tests)</dt></dl>
+
+<dl><dt><a name="TestSuite-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.suite.html#BaseTestSuite">unittest.suite.BaseTestSuite</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.suite.html#BaseTestSuite">unittest.suite.BaseTestSuite</a>:<br>
+<dl><dt><strong>__hash__</strong> = None</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.run_chromeos_tests.html b/catapult/telemetry/docs/pydoc/telemetry.testing.run_chromeos_tests.html
new file mode 100644
index 0000000..ad96cd2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.run_chromeos_tests.html
@@ -0,0 +1,39 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.run_chromeos_tests</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.run_chromeos_tests</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/run_chromeos_tests.py">telemetry/testing/run_chromeos_tests.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.run_tests.html">telemetry.testing.run_tests</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-RunChromeOSTests"><strong>RunChromeOSTests</strong></a>(browser_type, tests_to_run)</dt><dd><tt>Run&nbsp;ChromeOS&nbsp;tests.<br>
+Args:<br>
+&nbsp;&nbsp;|browser_type|:&nbsp;string&nbsp;specifies&nbsp;which&nbsp;browser&nbsp;type&nbsp;to&nbsp;use.<br>
+&nbsp;&nbsp;|tests_to_run|:&nbsp;a&nbsp;list&nbsp;of&nbsp;tuples&nbsp;(top_level_dir,&nbsp;unit_tests),&nbsp;whereas<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|top_level_dir|&nbsp;specifies&nbsp;the&nbsp;top&nbsp;level&nbsp;directory&nbsp;for&nbsp;running&nbsp;tests,&nbsp;and<br>
+&nbsp;&nbsp;&nbsp;&nbsp;|unit_tests|&nbsp;is&nbsp;a&nbsp;list&nbsp;of&nbsp;string&nbsp;test&nbsp;names&nbsp;to&nbsp;run.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.run_tests.html b/catapult/telemetry/docs/pydoc/telemetry.testing.run_tests.html
new file mode 100644
index 0000000..ab9085d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.run_tests.html
@@ -0,0 +1,112 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.run_tests</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.run_tests</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/run_tests.py">telemetry/testing/run_tests.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+<a href="telemetry.internal.browser.browser_finder.html">telemetry.internal.browser.browser_finder</a><br>
+<a href="telemetry.internal.browser.browser_finder_exceptions.html">telemetry.internal.browser.browser_finder_exceptions</a><br>
+<a href="telemetry.internal.browser.browser_options.html">telemetry.internal.browser.browser_options</a><br>
+</td><td width="25%" valign=top><a href="telemetry.testing.browser_test_case.html">telemetry.testing.browser_test_case</a><br>
+<a href="telemetry.internal.util.command_line.html">telemetry.internal.util.command_line</a><br>
+<a href="telemetry.decorators.html">telemetry.decorators</a><br>
+<a href="telemetry.internal.platform.device_finder.html">telemetry.internal.platform.device_finder</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.testing.options_for_unittests.html">telemetry.testing.options_for_unittests</a><br>
+<a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+<a href="telemetry.internal.util.ps_util.html">telemetry.internal.util.ps_util</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="typ.html">typ</a><br>
+<a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>(<a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.run_tests.html#RunTestsCommand">RunTestsCommand</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RunTestsCommand">class <strong>RunTestsCommand</strong></a>(<a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Run&nbsp;unit&nbsp;tests<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.run_tests.html#RunTestsCommand">RunTestsCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#OptparseCommand">telemetry.internal.util.command_line.OptparseCommand</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a></dd>
+<dd><a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="RunTestsCommand-Run"><strong>Run</strong></a>(self, args)</dt></dl>
+
+<dl><dt><a name="RunTestsCommand-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="RunTestsCommand-AddCommandLineArgs"><strong>AddCommandLineArgs</strong></a>(cls, parser, _)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="RunTestsCommand-CreateParser"><strong>CreateParser</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="RunTestsCommand-ProcessCommandLineArgs"><strong>ProcessCommandLineArgs</strong></a>(cls, parser, args, _)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="RunTestsCommand-main"><strong>main</strong></a>(cls, args<font color="#909090">=None</font>, stream<font color="#909090">=None</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>usage</strong> = '[test_name ...] [&lt;options&gt;]'</dl>
+
+<hr>
+Class methods inherited from <a href="telemetry.internal.util.command_line.html#Command">telemetry.internal.util.command_line.Command</a>:<br>
+<dl><dt><a name="RunTestsCommand-Description"><strong>Description</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="RunTestsCommand-Name"><strong>Name</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.util.command_line.html#ArgumentHandlerMixIn">telemetry.internal.util.command_line.ArgumentHandlerMixIn</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetClassifier"><strong>GetClassifier</strong></a>(args, possible_browser)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.simple_mock.html b/catapult/telemetry/docs/pydoc/telemetry.testing.simple_mock.html
new file mode 100644
index 0000000..e3a991f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.simple_mock.html
@@ -0,0 +1,142 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.simple_mock</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.simple_mock</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/simple_mock.py">telemetry/testing/simple_mock.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;very&nbsp;very&nbsp;simple&nbsp;mock&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;harness.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.simple_mock.html#MockFunctionCall">MockFunctionCall</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.simple_mock.html#MockObject">MockObject</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.simple_mock.html#MockTimer">MockTimer</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.simple_mock.html#MockTrace">MockTrace</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MockFunctionCall">class <strong>MockFunctionCall</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MockFunctionCall-VerifyEquals"><strong>VerifyEquals</strong></a>(self, got)</dt></dl>
+
+<dl><dt><a name="MockFunctionCall-WhenCalled"><strong>WhenCalled</strong></a>(self, handler)</dt></dl>
+
+<dl><dt><a name="MockFunctionCall-WillReturn"><strong>WillReturn</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="MockFunctionCall-WithArgs"><strong>WithArgs</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="MockFunctionCall-__init__"><strong>__init__</strong></a>(self, name)</dt></dl>
+
+<dl><dt><a name="MockFunctionCall-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MockObject">class <strong>MockObject</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MockObject-ExpectCall"><strong>ExpectCall</strong></a>(self, func_name, *args)</dt></dl>
+
+<dl><dt><a name="MockObject-SetAttribute"><strong>SetAttribute</strong></a>(self, name, value)</dt></dl>
+
+<dl><dt><a name="MockObject-__init__"><strong>__init__</strong></a>(self, parent_mock<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="MockObject-__setattr__"><strong>__setattr__</strong></a>(self, name, value)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MockTimer">class <strong>MockTimer</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;mock&nbsp;timer&nbsp;to&nbsp;fake&nbsp;out&nbsp;the&nbsp;timing&nbsp;for&nbsp;a&nbsp;module.<br>
+Args:<br>
+&nbsp;&nbsp;module:&nbsp;module&nbsp;to&nbsp;fake&nbsp;out&nbsp;the&nbsp;time<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MockTimer-Restore"><strong>Restore</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MockTimer-SetTime"><strong>SetTime</strong></a>(self, time)</dt></dl>
+
+<dl><dt><a name="MockTimer-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MockTimer-__init__"><strong>__init__</strong></a>(self, module<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="MockTimer-sleep"><strong>sleep</strong></a>(self, time)</dt></dl>
+
+<dl><dt><a name="MockTimer-time"><strong>time</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MockTrace">class <strong>MockTrace</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MockTrace-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DONT_CARE</strong> = ''</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.story_set_smoke_test.html b/catapult/telemetry/docs/pydoc/telemetry.testing.story_set_smoke_test.html
new file mode 100644
index 0000000..22dd8ef
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.story_set_smoke_test.html
@@ -0,0 +1,360 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.story_set_smoke_test</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.story_set_smoke_test</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/story_set_smoke_test.py">telemetry/testing/story_set_smoke_test.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.wpr.archive_info.html">telemetry.wpr.archive_info</a><br>
+<a href="telemetry.internal.browser.browser_credentials.html">telemetry.internal.browser.browser_credentials</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.discover.html">telemetry.core.discover</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="os.html">os</a><br>
+<a href="telemetry.page.html">telemetry.page</a><br>
+</td><td width="25%" valign=top><a href="telemetry.story.html">telemetry.story</a><br>
+<a href="unittest.html">unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="unittest.case.html#TestCase">unittest.case.TestCase</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.story_set_smoke_test.html#StorySetSmokeTest">StorySetSmokeTest</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StorySetSmokeTest">class <strong>StorySetSmokeTest</strong></a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.story_set_smoke_test.html#StorySetSmokeTest">StorySetSmokeTest</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="StorySetSmokeTest-CheckArchive"><strong>CheckArchive</strong></a>(self, story_set)</dt><dd><tt>Verify&nbsp;that&nbsp;all&nbsp;URLs&nbsp;of&nbsp;pages&nbsp;in&nbsp;story_set&nbsp;have&nbsp;an&nbsp;associated&nbsp;archive.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-CheckAttributes"><strong>CheckAttributes</strong></a>(self, story_set)</dt><dd><tt>Verify&nbsp;that&nbsp;story_set&nbsp;and&nbsp;its&nbsp;stories&nbsp;base&nbsp;attributes&nbsp;have&nbsp;the&nbsp;right<br>
+types.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-CheckAttributesOfStoryBasicAttributes"><strong>CheckAttributesOfStoryBasicAttributes</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-CheckAttributesOfStorySetBasicAttributes"><strong>CheckAttributesOfStorySetBasicAttributes</strong></a>(self, story_set)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-CheckCredentials"><strong>CheckCredentials</strong></a>(self, story_set)</dt><dd><tt>Verify&nbsp;that&nbsp;all&nbsp;pages&nbsp;in&nbsp;story_set&nbsp;use&nbsp;proper&nbsp;credentials</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-CheckSharedStates"><strong>CheckSharedStates</strong></a>(self, story_set)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-GetAllStorySetClasses"><strong>GetAllStorySetClasses</strong></a>(self, story_sets_dir, top_level_dir)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-RunSmokeTest"><strong>RunSmokeTest</strong></a>(self, story_sets_dir, top_level_dir)</dt><dd><tt>Run&nbsp;smoke&nbsp;test&nbsp;on&nbsp;all&nbsp;story&nbsp;sets&nbsp;in&nbsp;story_sets_dir.<br>
+&nbsp;<br>
+Subclass&nbsp;of&nbsp;<a href="#StorySetSmokeTest">StorySetSmokeTest</a>&nbsp;is&nbsp;supposed&nbsp;to&nbsp;call&nbsp;this&nbsp;in&nbsp;some&nbsp;test<br>
+method&nbsp;to&nbsp;run&nbsp;smoke&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-setUp"><strong>setUp</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="StorySetSmokeTest-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__init__"><strong>__init__</strong></a>(self, methodName<font color="#909090">='runTest'</font>)</dt><dd><tt>Create&nbsp;an&nbsp;instance&nbsp;of&nbsp;the&nbsp;class&nbsp;that&nbsp;will&nbsp;use&nbsp;the&nbsp;named&nbsp;test<br>
+method&nbsp;when&nbsp;executed.&nbsp;Raises&nbsp;a&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;instance&nbsp;does<br>
+not&nbsp;have&nbsp;a&nbsp;method&nbsp;with&nbsp;the&nbsp;specified&nbsp;name.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;<a href="unittest.case.html#TestCase">TestCase</a>&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#StorySetSmokeTest-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#StorySetSmokeTest-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#StorySetSmokeTest-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#StorySetSmokeTest-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#StorySetSmokeTest-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#StorySetSmokeTest-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="StorySetSmokeTest-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Class methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="StorySetSmokeTest-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;setting&nbsp;up&nbsp;class&nbsp;fixture&nbsp;before&nbsp;running&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<dl><dt><a name="StorySetSmokeTest-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;class&nbsp;fixture&nbsp;after&nbsp;running&nbsp;all&nbsp;tests&nbsp;in&nbsp;the&nbsp;class.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.stream.html b/catapult/telemetry/docs/pydoc/telemetry.testing.stream.html
new file mode 100644
index 0000000..c0fc4a0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.stream.html
@@ -0,0 +1,56 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.stream</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.stream</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/stream.py">telemetry/testing/stream.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.stream.html#TestOutputStream">TestOutputStream</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestOutputStream">class <strong>TestOutputStream</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TestOutputStream-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestOutputStream-flush"><strong>flush</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestOutputStream-write"><strong>write</strong></a>(self, data)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>output_data</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.system_stub.html b/catapult/telemetry/docs/pydoc/telemetry.testing.system_stub.html
new file mode 100644
index 0000000..c6c9055
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.system_stub.html
@@ -0,0 +1,446 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.system_stub</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.system_stub</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/system_stub.py">telemetry/testing/system_stub.py</a></font></td></tr></table>
+    <p><tt>Provides&nbsp;stubs&nbsp;for&nbsp;os,&nbsp;sys&nbsp;and&nbsp;subprocess&nbsp;for&nbsp;testing<br>
+&nbsp;<br>
+This&nbsp;test&nbsp;allows&nbsp;one&nbsp;to&nbsp;test&nbsp;code&nbsp;that&nbsp;itself&nbsp;uses&nbsp;os,&nbsp;sys,&nbsp;and&nbsp;subprocess.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="ntpath.html">ntpath</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="posixpath.html">posixpath</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="shlex.html">shlex</a><br>
+<a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#AdbDevice">AdbDevice</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#AdbInstallCertStub">AdbInstallCertStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#CertUtilsStub">CertUtilsStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#CloudStorageModuleStub">CloudStorageModuleStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#LoggingStub">LoggingStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#OpenFunctionStub">OpenFunctionStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#OsModuleStub">OsModuleStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#Override">Override</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#PerfControlModuleStub">PerfControlModuleStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#PlatformSettingsStub">PlatformSettingsStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#RawInputFunctionStub">RawInputFunctionStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#SubprocessModuleStub">SubprocessModuleStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#SysModuleStub">SysModuleStub</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.testing.system_stub.html#ThermalThrottleModuleStub">ThermalThrottleModuleStub</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AdbDevice">class <strong>AdbDevice</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="AdbDevice-FileExists"><strong>FileExists</strong></a>(self, _)</dt></dl>
+
+<dl><dt><a name="AdbDevice-GetProp"><strong>GetProp</strong></a>(self, property_name)</dt></dl>
+
+<dl><dt><a name="AdbDevice-HasRoot"><strong>HasRoot</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AdbDevice-NeedsSU"><strong>NeedsSU</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AdbDevice-ReadFile"><strong>ReadFile</strong></a>(self, device_path, as_root<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="AdbDevice-RunShellCommand"><strong>RunShellCommand</strong></a>(self, args, **_kwargs)</dt></dl>
+
+<dl><dt><a name="AdbDevice-SetProp"><strong>SetProp</strong></a>(self, property_name, property_value)</dt></dl>
+
+<dl><dt><a name="AdbDevice-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AdbInstallCertStub">class <strong>AdbInstallCertStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>AndroidCertInstaller</strong> = &lt;class 'telemetry.testing.system_stub.AndroidCertInstaller'&gt;</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CertUtilsStub">class <strong>CertUtilsStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Static methods defined here:<br>
+<dl><dt><a name="CertUtilsStub-generate_dummy_ca_cert"><strong>generate_dummy_ca_cert</strong></a>()</dt></dl>
+
+<dl><dt><a name="CertUtilsStub-write_dummy_ca_cert"><strong>write_dummy_ca_cert</strong></a>(_ca_cert_str, _key_str, cert_path)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>openssl_import_error</strong> = None</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CloudStorageModuleStub">class <strong>CloudStorageModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="CloudStorageModuleStub-CalculateHash"><strong>CalculateHash</strong></a>(self, file_path)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-ChangeRemoteHashForTesting"><strong>ChangeRemoteHashForTesting</strong></a>(self, bucket, remote_path, new_hash)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-CheckPermissionLevelForBucket"><strong>CheckPermissionLevelForBucket</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-Exists"><strong>Exists</strong></a>(self, bucket, remote_path)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-Get"><strong>Get</strong></a>(self, bucket, remote_path, local_path)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetFilesInDirectoryIfChanged"><strong>GetFilesInDirectoryIfChanged</strong></a>(self, directory, bucket)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetHelper"><strong>GetHelper</strong></a>(self, bucket, remote_path, local_path, only_if_changed)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetIfChanged"><strong>GetIfChanged</strong></a>(self, local_path, bucket<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetLocalDataFiles"><strong>GetLocalDataFiles</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetLocalHashFiles"><strong>GetLocalHashFiles</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-GetRemotePathsForTesting"><strong>GetRemotePathsForTesting</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-Insert"><strong>Insert</strong></a>(self, bucket, remote_path, local_path)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-List"><strong>List</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-ReadHash"><strong>ReadHash</strong></a>(self, hash_path)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-SetCalculatedHashesForTesting"><strong>SetCalculatedHashesForTesting</strong></a>(self, calculated_hash_dictionary)</dt><dd><tt>#&nbsp;Set&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;data&nbsp;files&nbsp;and&nbsp;their&nbsp;"calculated"&nbsp;hashes.</tt></dd></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-SetHashFileContentsForTesting"><strong>SetHashFileContentsForTesting</strong></a>(self, hash_file_dictionary)</dt><dd><tt>#&nbsp;Set&nbsp;a&nbsp;dictionary&nbsp;of&nbsp;hash&nbsp;files&nbsp;and&nbsp;the&nbsp;hashes&nbsp;they&nbsp;should&nbsp;contain.</tt></dd></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-SetPermissionLevelForTesting"><strong>SetPermissionLevelForTesting</strong></a>(self, permission_level)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-SetRemotePathsForTesting"><strong>SetRemotePathsForTesting</strong></a>(self, remote_path_dict<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="CloudStorageModuleStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>BUCKET_ALIASES</strong> = {'internal': 'chrome-telemetry', 'partner': 'chrome-partner-telemetry', 'public': 'chromium-telemetry'}</dl>
+
+<dl><dt><strong>CREDENTIALS_ERROR_PERMISSION</strong> = -1</dl>
+
+<dl><dt><strong>CloudStorageError</strong> = &lt;class 'telemetry.testing.system_stub.CloudStorageError'&gt;</dl>
+
+<dl><dt><strong>CredentialsError</strong> = &lt;class 'telemetry.testing.system_stub.CredentialsError'&gt;</dl>
+
+<dl><dt><strong>INTERNAL_BUCKET</strong> = 'chrome-telemetry'</dl>
+
+<dl><dt><strong>INTERNAL_PERMISSION</strong> = 2</dl>
+
+<dl><dt><strong>NotFoundError</strong> = &lt;class 'telemetry.testing.system_stub.NotFoundError'&gt;</dl>
+
+<dl><dt><strong>PARTNER_BUCKET</strong> = 'chrome-partner-telemetry'</dl>
+
+<dl><dt><strong>PARTNER_PERMISSION</strong> = 1</dl>
+
+<dl><dt><strong>PUBLIC_BUCKET</strong> = 'chromium-telemetry'</dl>
+
+<dl><dt><strong>PUBLIC_PERMISSION</strong> = 0</dl>
+
+<dl><dt><strong>PermissionError</strong> = &lt;class 'telemetry.testing.system_stub.PermissionError'&gt;</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LoggingStub">class <strong>LoggingStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="LoggingStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="LoggingStub-error"><strong>error</strong></a>(self, msg, *args)</dt></dl>
+
+<dl><dt><a name="LoggingStub-info"><strong>info</strong></a>(self, msg, *args)</dt></dl>
+
+<dl><dt><a name="LoggingStub-warn"><strong>warn</strong></a>(self, msg, *args)</dt></dl>
+
+<dl><dt><a name="LoggingStub-warning"><strong>warning</strong></a>(self, msg, *args)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OpenFunctionStub">class <strong>OpenFunctionStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="OpenFunctionStub-__call__"><strong>__call__</strong></a>(self, name, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="OpenFunctionStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>FileStub</strong> = &lt;class 'telemetry.testing.system_stub.FileStub'&gt;</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="OsModuleStub">class <strong>OsModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="OsModuleStub-__init__"><strong>__init__</strong></a>(self, sys_module<font color="#909090">=&lt;module 'sys' (built-in)&gt;</font>)</dt></dl>
+
+<dl><dt><a name="OsModuleStub-access"><strong>access</strong></a>(self, path, _)</dt></dl>
+
+<dl><dt><a name="OsModuleStub-chdir"><strong>chdir</strong></a>(self, path)</dt></dl>
+
+<dl><dt><a name="OsModuleStub-getenv"><strong>getenv</strong></a>(self, name, value<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="OsModuleStub-walk"><strong>walk</strong></a>(self, top)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>OsEnvironModuleStub</strong> = &lt;class 'telemetry.testing.system_stub.OsEnvironModuleStub'&gt;</dl>
+
+<dl><dt><strong>OsPathModuleStub</strong> = &lt;class 'telemetry.testing.system_stub.OsPathModuleStub'&gt;</dl>
+
+<dl><dt><strong>X_OK</strong> = 1</dl>
+
+<dl><dt><strong>pathsep</strong> = ':'</dl>
+
+<dl><dt><strong>sep</strong> = '/'</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Override">class <strong>Override</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Override-Restore"><strong>Restore</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Override-__del__"><strong>__del__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Override-__init__"><strong>__init__</strong></a>(self, base_module, module_list)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PerfControlModuleStub">class <strong>PerfControlModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="PerfControlModuleStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>PerfControlStub</strong> = &lt;class 'telemetry.testing.system_stub.PerfControlStub'&gt;</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="PlatformSettingsStub">class <strong>PlatformSettingsStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Static methods defined here:<br>
+<dl><dt><a name="PlatformSettingsStub-HasSniSupport"><strong>HasSniSupport</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RawInputFunctionStub">class <strong>RawInputFunctionStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="RawInputFunctionStub-__call__"><strong>__call__</strong></a>(self, name, *args, **kwargs)</dt></dl>
+
+<dl><dt><a name="RawInputFunctionStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SubprocessModuleStub">class <strong>SubprocessModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SubprocessModuleStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SubprocessModuleStub-call"><strong>call</strong></a>(self, *args, **kwargs)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>PopenStub</strong> = &lt;class 'telemetry.testing.system_stub.PopenStub'&gt;</dl>
+
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SysModuleStub">class <strong>SysModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="SysModuleStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ThermalThrottleModuleStub">class <strong>ThermalThrottleModuleStub</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ThermalThrottleModuleStub-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>ThermalThrottleStub</strong> = &lt;class 'telemetry.testing.system_stub.ThermalThrottleStub'&gt;</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.tab_test_case.html b/catapult/telemetry/docs/pydoc/telemetry.testing.tab_test_case.html
new file mode 100644
index 0000000..b649ee2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.tab_test_case.html
@@ -0,0 +1,345 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.tab_test_case</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.tab_test_case</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/tab_test_case.py">telemetry/testing/tab_test_case.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.testing.browser_test_case.html">telemetry.testing.browser_test_case</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.browser_test_case.html#BrowserTestCase">telemetry.testing.browser_test_case.BrowserTestCase</a>(<a href="unittest.case.html#TestCase">unittest.case.TestCase</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.tab_test_case.html#TabTestCase">TabTestCase</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabTestCase">class <strong>TabTestCase</strong></a>(<a href="telemetry.testing.browser_test_case.html#BrowserTestCase">telemetry.testing.browser_test_case.BrowserTestCase</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.tab_test_case.html#TabTestCase">TabTestCase</a></dd>
+<dd><a href="telemetry.testing.browser_test_case.html#BrowserTestCase">telemetry.testing.browser_test_case.BrowserTestCase</a></dd>
+<dd><a href="unittest.case.html#TestCase">unittest.case.TestCase</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TabTestCase-Navigate"><strong>Navigate</strong></a>(self, filename, script_to_evaluate_on_commit<font color="#909090">=None</font>)</dt><dd><tt>Navigates&nbsp;|tab|&nbsp;to&nbsp;|filename|&nbsp;in&nbsp;the&nbsp;unittest&nbsp;data&nbsp;directory.<br>
+&nbsp;<br>
+Also&nbsp;sets&nbsp;up&nbsp;http&nbsp;server&nbsp;to&nbsp;point&nbsp;to&nbsp;the&nbsp;unittest&nbsp;data&nbsp;directory.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-__init__"><strong>__init__</strong></a>(self, *args)</dt></dl>
+
+<dl><dt><a name="TabTestCase-setUp"><strong>setUp</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>tabs</strong></dt>
+</dl>
+<hr>
+Class methods inherited from <a href="telemetry.testing.browser_test_case.html#BrowserTestCase">telemetry.testing.browser_test_case.BrowserTestCase</a>:<br>
+<dl><dt><a name="TabTestCase-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(cls, options)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;test-specific&nbsp;options&nbsp;to&nbsp;the&nbsp;BrowserOptions&nbsp;object</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-UrlOfUnittestFile"><strong>UrlOfUnittestFile</strong></a>(cls, filename)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TabTestCase-setUpClass"><strong>setUpClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TabTestCase-tearDownClass"><strong>tearDownClass</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Methods inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><a name="TabTestCase-__call__"><strong>__call__</strong></a>(self, *args, **kwds)</dt></dl>
+
+<dl><dt><a name="TabTestCase-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="TabTestCase-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-__ne__"><strong>__ne__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="TabTestCase-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-addCleanup"><strong>addCleanup</strong></a>(self, function, *args, **kwargs)</dt><dd><tt>Add&nbsp;a&nbsp;function,&nbsp;with&nbsp;arguments,&nbsp;to&nbsp;be&nbsp;called&nbsp;when&nbsp;the&nbsp;test&nbsp;is<br>
+completed.&nbsp;Functions&nbsp;added&nbsp;are&nbsp;called&nbsp;on&nbsp;a&nbsp;LIFO&nbsp;basis&nbsp;and&nbsp;are<br>
+called&nbsp;after&nbsp;tearDown&nbsp;on&nbsp;test&nbsp;failure&nbsp;or&nbsp;success.<br>
+&nbsp;<br>
+Cleanup&nbsp;items&nbsp;are&nbsp;called&nbsp;even&nbsp;if&nbsp;setUp&nbsp;fails&nbsp;(unlike&nbsp;tearDown).</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-addTypeEqualityFunc"><strong>addTypeEqualityFunc</strong></a>(self, typeobj, function)</dt><dd><tt>Add&nbsp;a&nbsp;type&nbsp;specific&nbsp;assertEqual&nbsp;style&nbsp;function&nbsp;to&nbsp;compare&nbsp;a&nbsp;type.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;is&nbsp;for&nbsp;use&nbsp;by&nbsp;TestCase&nbsp;subclasses&nbsp;that&nbsp;need&nbsp;to&nbsp;register<br>
+their&nbsp;own&nbsp;type&nbsp;equality&nbsp;functions&nbsp;to&nbsp;provide&nbsp;nicer&nbsp;error&nbsp;messages.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;typeobj:&nbsp;The&nbsp;data&nbsp;type&nbsp;to&nbsp;call&nbsp;this&nbsp;function&nbsp;on&nbsp;when&nbsp;both&nbsp;values<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;are&nbsp;of&nbsp;the&nbsp;same&nbsp;type&nbsp;in&nbsp;<a href="#TabTestCase-assertEqual">assertEqual</a>().<br>
+&nbsp;&nbsp;&nbsp;&nbsp;function:&nbsp;The&nbsp;callable&nbsp;taking&nbsp;two&nbsp;arguments&nbsp;and&nbsp;an&nbsp;optional<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;msg=&nbsp;argument&nbsp;that&nbsp;raises&nbsp;self.<strong>failureException</strong>&nbsp;with&nbsp;a<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;useful&nbsp;error&nbsp;message&nbsp;when&nbsp;the&nbsp;two&nbsp;arguments&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertAlmostEqual"><strong>assertAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertAlmostEquals"><strong>assertAlmostEquals</strong></a> = assertAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;more&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;two&nbsp;objects&nbsp;compare&nbsp;equal&nbsp;then&nbsp;they&nbsp;will&nbsp;automatically<br>
+compare&nbsp;almost&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertDictContainsSubset"><strong>assertDictContainsSubset</strong></a>(self, expected, actual, msg<font color="#909090">=None</font>)</dt><dd><tt>Checks&nbsp;whether&nbsp;actual&nbsp;is&nbsp;a&nbsp;superset&nbsp;of&nbsp;expected.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertDictEqual"><strong>assertDictEqual</strong></a>(self, d1, d2, msg<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="TabTestCase-assertEqual"><strong>assertEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertEquals"><strong>assertEquals</strong></a> = assertEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;unequal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'=='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertFalse"><strong>assertFalse</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;false.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertGreater"><strong>assertGreater</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertGreaterEqual"><strong>assertGreaterEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;&gt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIn"><strong>assertIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIs"><strong>assertIs</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIsInstance"><strong>assertIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(isinstance(obj,&nbsp;cls)),&nbsp;with&nbsp;a&nbsp;nicer<br>
+default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIsNone"><strong>assertIsNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Same&nbsp;as&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(obj&nbsp;is&nbsp;None),&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIsNot"><strong>assertIsNot</strong></a>(self, expr1, expr2, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;is&nbsp;not&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertIsNotNone"><strong>assertIsNotNone</strong></a>(self, obj, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsNone.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertItemsEqual"><strong>assertItemsEqual</strong></a>(self, expected_seq, actual_seq, msg<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;unordered&nbsp;sequence&nbsp;specific&nbsp;comparison.&nbsp;It&nbsp;asserts&nbsp;that<br>
+actual_seq&nbsp;and&nbsp;expected_seq&nbsp;have&nbsp;the&nbsp;same&nbsp;element&nbsp;counts.<br>
+Equivalent&nbsp;to::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#TabTestCase-assertEqual">assertEqual</a>(Counter(iter(actual_seq)),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Counter(iter(expected_seq)))<br>
+&nbsp;<br>
+Asserts&nbsp;that&nbsp;each&nbsp;element&nbsp;has&nbsp;the&nbsp;same&nbsp;count&nbsp;in&nbsp;both&nbsp;sequences.<br>
+Example:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;1,&nbsp;1]&nbsp;and&nbsp;[1,&nbsp;0,&nbsp;1]&nbsp;compare&nbsp;equal.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;[0,&nbsp;0,&nbsp;1]&nbsp;and&nbsp;[0,&nbsp;1]&nbsp;compare&nbsp;unequal.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertLess"><strong>assertLess</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertLessEqual"><strong>assertLessEqual</strong></a>(self, a, b, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;&lt;=&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertListEqual"><strong>assertListEqual</strong></a>(self, list1, list2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;list-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list1:&nbsp;The&nbsp;first&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;list2:&nbsp;The&nbsp;second&nbsp;list&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertMultiLineEqual"><strong>assertMultiLineEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Assert&nbsp;that&nbsp;two&nbsp;multi-line&nbsp;strings&nbsp;are&nbsp;equal.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotAlmostEqual"><strong>assertNotAlmostEqual</strong></a>(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotAlmostEquals"><strong>assertNotAlmostEquals</strong></a> = assertNotAlmostEqual(self, first, second, places<font color="#909090">=None</font>, msg<font color="#909090">=None</font>, delta<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;their<br>
+difference&nbsp;rounded&nbsp;to&nbsp;the&nbsp;given&nbsp;number&nbsp;of&nbsp;decimal&nbsp;places<br>
+(default&nbsp;7)&nbsp;and&nbsp;comparing&nbsp;to&nbsp;zero,&nbsp;or&nbsp;by&nbsp;comparing&nbsp;that&nbsp;the<br>
+between&nbsp;the&nbsp;two&nbsp;objects&nbsp;is&nbsp;less&nbsp;than&nbsp;the&nbsp;given&nbsp;delta.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;decimal&nbsp;places&nbsp;(from&nbsp;zero)&nbsp;are&nbsp;usually&nbsp;not&nbsp;the&nbsp;same<br>
+as&nbsp;significant&nbsp;digits&nbsp;(measured&nbsp;from&nbsp;the&nbsp;most&nbsp;signficant&nbsp;digit).<br>
+&nbsp;<br>
+Objects&nbsp;that&nbsp;are&nbsp;equal&nbsp;automatically&nbsp;fail.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotEqual"><strong>assertNotEqual</strong></a>(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotEquals"><strong>assertNotEquals</strong></a> = assertNotEqual(self, first, second, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;if&nbsp;the&nbsp;two&nbsp;objects&nbsp;are&nbsp;equal&nbsp;as&nbsp;determined&nbsp;by&nbsp;the&nbsp;'!='<br>
+operator.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotIn"><strong>assertNotIn</strong></a>(self, member, container, msg<font color="#909090">=None</font>)</dt><dd><tt>Just&nbsp;like&nbsp;<a href="#TabTestCase-assertTrue">assertTrue</a>(a&nbsp;not&nbsp;in&nbsp;b),&nbsp;but&nbsp;with&nbsp;a&nbsp;nicer&nbsp;default&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotIsInstance"><strong>assertNotIsInstance</strong></a>(self, obj, cls, msg<font color="#909090">=None</font>)</dt><dd><tt>Included&nbsp;for&nbsp;symmetry&nbsp;with&nbsp;assertIsInstance.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertNotRegexpMatches"><strong>assertNotRegexpMatches</strong></a>(self, text, unexpected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;if&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertRaises"><strong>assertRaises</strong></a>(self, excClass, callableObj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Fail&nbsp;unless&nbsp;an&nbsp;exception&nbsp;of&nbsp;class&nbsp;excClass&nbsp;is&nbsp;raised<br>
+by&nbsp;callableObj&nbsp;when&nbsp;invoked&nbsp;with&nbsp;arguments&nbsp;args&nbsp;and&nbsp;keyword<br>
+arguments&nbsp;kwargs.&nbsp;If&nbsp;a&nbsp;different&nbsp;type&nbsp;of&nbsp;exception&nbsp;is<br>
+raised,&nbsp;it&nbsp;will&nbsp;not&nbsp;be&nbsp;caught,&nbsp;and&nbsp;the&nbsp;test&nbsp;case&nbsp;will&nbsp;be<br>
+deemed&nbsp;to&nbsp;have&nbsp;suffered&nbsp;an&nbsp;error,&nbsp;exactly&nbsp;as&nbsp;for&nbsp;an<br>
+unexpected&nbsp;exception.<br>
+&nbsp;<br>
+If&nbsp;called&nbsp;with&nbsp;callableObj&nbsp;omitted&nbsp;or&nbsp;None,&nbsp;will&nbsp;return&nbsp;a<br>
+context&nbsp;object&nbsp;used&nbsp;like&nbsp;this::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#TabTestCase-assertRaises">assertRaises</a>(SomeException):<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;<br>
+The&nbsp;context&nbsp;manager&nbsp;keeps&nbsp;a&nbsp;reference&nbsp;to&nbsp;the&nbsp;exception&nbsp;as<br>
+the&nbsp;'exception'&nbsp;attribute.&nbsp;This&nbsp;allows&nbsp;you&nbsp;to&nbsp;inspect&nbsp;the<br>
+exception&nbsp;after&nbsp;the&nbsp;assertion::<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;<a href="#TabTestCase-assertRaises">assertRaises</a>(SomeException)&nbsp;as&nbsp;cm:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;do_something()<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the_exception&nbsp;=&nbsp;cm.exception<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#TabTestCase-assertEqual">assertEqual</a>(the_exception.error_code,&nbsp;3)</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertRaisesRegexp"><strong>assertRaisesRegexp</strong></a>(self, expected_exception, expected_regexp, callable_obj<font color="#909090">=None</font>, *args, **kwargs)</dt><dd><tt>Asserts&nbsp;that&nbsp;the&nbsp;message&nbsp;in&nbsp;a&nbsp;raised&nbsp;exception&nbsp;matches&nbsp;a&nbsp;regexp.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_exception:&nbsp;Exception&nbsp;class&nbsp;expected&nbsp;to&nbsp;be&nbsp;raised.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;expected_regexp:&nbsp;Regexp&nbsp;(re&nbsp;pattern&nbsp;object&nbsp;or&nbsp;string)&nbsp;expected<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;be&nbsp;found&nbsp;in&nbsp;error&nbsp;message.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;callable_obj:&nbsp;Function&nbsp;to&nbsp;be&nbsp;called.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;args:&nbsp;Extra&nbsp;args.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;kwargs:&nbsp;Extra&nbsp;kwargs.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertRegexpMatches"><strong>assertRegexpMatches</strong></a>(self, text, expected_regexp, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;the&nbsp;test&nbsp;unless&nbsp;the&nbsp;text&nbsp;matches&nbsp;the&nbsp;regular&nbsp;expression.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertSequenceEqual"><strong>assertSequenceEqual</strong></a>(self, seq1, seq2, msg<font color="#909090">=None</font>, seq_type<font color="#909090">=None</font>)</dt><dd><tt>An&nbsp;equality&nbsp;assertion&nbsp;for&nbsp;ordered&nbsp;sequences&nbsp;(like&nbsp;lists&nbsp;and&nbsp;tuples).<br>
+&nbsp;<br>
+For&nbsp;the&nbsp;purposes&nbsp;of&nbsp;this&nbsp;function,&nbsp;a&nbsp;valid&nbsp;ordered&nbsp;sequence&nbsp;type&nbsp;is&nbsp;one<br>
+which&nbsp;can&nbsp;be&nbsp;indexed,&nbsp;has&nbsp;a&nbsp;length,&nbsp;and&nbsp;has&nbsp;an&nbsp;equality&nbsp;operator.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq1:&nbsp;The&nbsp;first&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq2:&nbsp;The&nbsp;second&nbsp;sequence&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;seq_type:&nbsp;The&nbsp;expected&nbsp;datatype&nbsp;of&nbsp;the&nbsp;sequences,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;datatype&nbsp;should&nbsp;be&nbsp;enforced.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertSetEqual"><strong>assertSetEqual</strong></a>(self, set1, set2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;set-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set1:&nbsp;The&nbsp;first&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;set2:&nbsp;The&nbsp;second&nbsp;set&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.<br>
+&nbsp;<br>
+assertSetEqual&nbsp;uses&nbsp;ducktyping&nbsp;to&nbsp;support&nbsp;different&nbsp;types&nbsp;of&nbsp;sets,&nbsp;and<br>
+is&nbsp;optimized&nbsp;for&nbsp;sets&nbsp;specifically&nbsp;(parameters&nbsp;must&nbsp;support&nbsp;a<br>
+difference&nbsp;method).</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertTrue"><strong>assertTrue</strong></a>(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assertTupleEqual"><strong>assertTupleEqual</strong></a>(self, tuple1, tuple2, msg<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;tuple-specific&nbsp;equality&nbsp;assertion.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple1:&nbsp;The&nbsp;first&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;tuple2:&nbsp;The&nbsp;second&nbsp;tuple&nbsp;to&nbsp;compare.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;msg:&nbsp;Optional&nbsp;message&nbsp;to&nbsp;use&nbsp;on&nbsp;failure&nbsp;instead&nbsp;of&nbsp;a&nbsp;list&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;differences.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-assert_"><strong>assert_</strong></a> = assertTrue(self, expr, msg<font color="#909090">=None</font>)</dt><dd><tt>Check&nbsp;that&nbsp;the&nbsp;expression&nbsp;is&nbsp;true.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-countTestCases"><strong>countTestCases</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-debug"><strong>debug</strong></a>(self)</dt><dd><tt>Run&nbsp;the&nbsp;test&nbsp;without&nbsp;collecting&nbsp;errors&nbsp;in&nbsp;a&nbsp;TestResult</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-defaultTestResult"><strong>defaultTestResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-doCleanups"><strong>doCleanups</strong></a>(self)</dt><dd><tt>Execute&nbsp;all&nbsp;cleanup&nbsp;functions.&nbsp;Normally&nbsp;called&nbsp;for&nbsp;you&nbsp;after<br>
+tearDown.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-fail"><strong>fail</strong></a>(self, msg<font color="#909090">=None</font>)</dt><dd><tt>Fail&nbsp;immediately,&nbsp;with&nbsp;the&nbsp;given&nbsp;message.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-failIf"><strong>failIf</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failIfAlmostEqual"><strong>failIfAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failIfEqual"><strong>failIfEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failUnless"><strong>failUnless</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failUnlessAlmostEqual"><strong>failUnlessAlmostEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failUnlessEqual"><strong>failUnlessEqual</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-failUnlessRaises"><strong>failUnlessRaises</strong></a> = deprecated_func(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TabTestCase-id"><strong>id</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabTestCase-run"><strong>run</strong></a>(self, result<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="TabTestCase-shortDescription"><strong>shortDescription</strong></a>(self)</dt><dd><tt>Returns&nbsp;a&nbsp;one-line&nbsp;description&nbsp;of&nbsp;the&nbsp;test,&nbsp;or&nbsp;None&nbsp;if&nbsp;no<br>
+description&nbsp;has&nbsp;been&nbsp;provided.<br>
+&nbsp;<br>
+The&nbsp;default&nbsp;implementation&nbsp;of&nbsp;this&nbsp;method&nbsp;returns&nbsp;the&nbsp;first&nbsp;line&nbsp;of<br>
+the&nbsp;specified&nbsp;test&nbsp;method's&nbsp;docstring.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-skipTest"><strong>skipTest</strong></a>(self, reason)</dt><dd><tt>Skip&nbsp;this&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="TabTestCase-tearDown"><strong>tearDown</strong></a>(self)</dt><dd><tt>Hook&nbsp;method&nbsp;for&nbsp;deconstructing&nbsp;the&nbsp;test&nbsp;fixture&nbsp;after&nbsp;testing&nbsp;it.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="unittest.case.html#TestCase">unittest.case.TestCase</a>:<br>
+<dl><dt><strong>failureException</strong> = &lt;type 'exceptions.AssertionError'&gt;<dd><tt>Assertion&nbsp;failed.</tt></dl>
+
+<dl><dt><strong>longMessage</strong> = False</dl>
+
+<dl><dt><strong>maxDiff</strong> = 640</dl>
+
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.test_page_test_results.html b/catapult/telemetry/docs/pydoc/telemetry.testing.test_page_test_results.html
new file mode 100644
index 0000000..0766204
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.test_page_test_results.html
@@ -0,0 +1,143 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.test_page_test_results</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.test_page_test_results</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/test_page_test_results.py">telemetry/testing/test_page_test_results.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.page.page.html">telemetry.page.page</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.results.page_test_results.html">telemetry.internal.results.page_test_results</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.internal.results.page_test_results.html#PageTestResults">telemetry.internal.results.page_test_results.PageTestResults</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.testing.test_page_test_results.html#TestPageTestResults">TestPageTestResults</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TestPageTestResults">class <strong>TestPageTestResults</strong></a>(<a href="telemetry.internal.results.page_test_results.html#PageTestResults">telemetry.internal.results.page_test_results.PageTestResults</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.testing.test_page_test_results.html#TestPageTestResults">TestPageTestResults</a></dd>
+<dd><a href="telemetry.internal.results.page_test_results.html#PageTestResults">telemetry.internal.results.page_test_results.PageTestResults</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TestPageTestResults-AssertHasPageSpecificListOfScalarValues"><strong>AssertHasPageSpecificListOfScalarValues</strong></a>(self, name, units, expected_values)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-AssertHasPageSpecificScalarValue"><strong>AssertHasPageSpecificScalarValue</strong></a>(self, name, units, expected_value)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-GetPageSpecificValueNamed"><strong>GetPageSpecificValueNamed</strong></a>(self, name)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-__init__"><strong>__init__</strong></a>(self, test)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-__str__"><strong>__str__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.internal.results.page_test_results.html#PageTestResults">telemetry.internal.results.page_test_results.PageTestResults</a>:<br>
+<dl><dt><a name="TestPageTestResults-AddProfilingFile"><strong>AddProfilingFile</strong></a>(self, page, file_handle)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-AddSummaryValue"><strong>AddSummaryValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-AddValue"><strong>AddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-CleanUp"><strong>CleanUp</strong></a>(self)</dt><dd><tt>Clean&nbsp;up&nbsp;any&nbsp;TraceValues&nbsp;contained&nbsp;within&nbsp;this&nbsp;results&nbsp;object.</tt></dd></dl>
+
+<dl><dt><a name="TestPageTestResults-DidRunPage"><strong>DidRunPage</strong></a>(self, page)</dt><dd><tt>Args:<br>
+&nbsp;&nbsp;page:&nbsp;The&nbsp;current&nbsp;page&nbsp;under&nbsp;test.</tt></dd></dl>
+
+<dl><dt><a name="TestPageTestResults-FindAllPageSpecificValuesFromIRNamed"><strong>FindAllPageSpecificValuesFromIRNamed</strong></a>(self, tir_label, value_name)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-FindAllPageSpecificValuesNamed"><strong>FindAllPageSpecificValuesNamed</strong></a>(self, value_name)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-FindAllTraceValues"><strong>FindAllTraceValues</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-FindPageSpecificValuesForPage"><strong>FindPageSpecificValuesForPage</strong></a>(self, page, value_name)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-FindValues"><strong>FindValues</strong></a>(self, predicate)</dt><dd><tt>Finds&nbsp;all&nbsp;values&nbsp;matching&nbsp;the&nbsp;specified&nbsp;predicate.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;predicate:&nbsp;A&nbsp;function&nbsp;that&nbsp;takes&nbsp;a&nbsp;Value&nbsp;and&nbsp;returns&nbsp;a&nbsp;bool.<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;of&nbsp;values&nbsp;matching&nbsp;|predicate|.</tt></dd></dl>
+
+<dl><dt><a name="TestPageTestResults-PrintSummary"><strong>PrintSummary</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-UploadProfilingFilesToCloud"><strong>UploadProfilingFilesToCloud</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-UploadTraceFilesToCloud"><strong>UploadTraceFilesToCloud</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-WillRunPage"><strong>WillRunPage</strong></a>(self, page)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-__copy__"><strong>__copy__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TestPageTestResults-__exit__"><strong>__exit__</strong></a>(self, _, __, ___)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.internal.results.page_test_results.html#PageTestResults">telemetry.internal.results.page_test_results.PageTestResults</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>all_page_runs</strong></dt>
+</dl>
+<dl><dt><strong>all_page_specific_values</strong></dt>
+</dl>
+<dl><dt><strong>all_summary_values</strong></dt>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+<dl><dt><strong>current_page_run</strong></dt>
+</dl>
+<dl><dt><strong>failures</strong></dt>
+</dl>
+<dl><dt><strong>pages_that_failed</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;failed&nbsp;pages.</tt></dd>
+</dl>
+<dl><dt><strong>pages_that_succeeded</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;set&nbsp;of&nbsp;pages&nbsp;that&nbsp;succeeded.</tt></dd>
+</dl>
+<dl><dt><strong>pages_to_profiling_files</strong></dt>
+</dl>
+<dl><dt><strong>pages_to_profiling_files_cloud_url</strong></dt>
+</dl>
+<dl><dt><strong>serialized_trace_file_ids_to_paths</strong></dt>
+</dl>
+<dl><dt><strong>skipped_values</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.testing.unittest_runner.html b/catapult/telemetry/docs/pydoc/telemetry.testing.unittest_runner.html
new file mode 100644
index 0000000..b34d2d2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.testing.unittest_runner.html
@@ -0,0 +1,36 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.testing.unittest_runner</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.testing.html"><font color="#ffffff">testing</font></a>.unittest_runner</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/testing/unittest_runner.py">telemetry/testing/unittest_runner.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.util.html">telemetry.core.util</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-Run"><strong>Run</strong></a>(project_config, no_browser<font color="#909090">=False</font>, stream<font color="#909090">=None</font>)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.async_slice.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.async_slice.html
new file mode 100644
index 0000000..0ce0b23
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.async_slice.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.async_slice</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.async_slice</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/async_slice.py">telemetry/timeline/async_slice.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.async_slice.html#AsyncSlice">AsyncSlice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="AsyncSlice">class <strong>AsyncSlice</strong></a>(<a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;<a href="#AsyncSlice">AsyncSlice</a>&nbsp;represents&nbsp;an&nbsp;interval&nbsp;of&nbsp;time&nbsp;during&nbsp;which&nbsp;an<br>
+asynchronous&nbsp;operation&nbsp;is&nbsp;in&nbsp;progress.&nbsp;An&nbsp;<a href="#AsyncSlice">AsyncSlice</a>&nbsp;consumes&nbsp;no&nbsp;CPU&nbsp;time<br>
+itself&nbsp;and&nbsp;so&nbsp;is&nbsp;only&nbsp;associated&nbsp;with&nbsp;Threads&nbsp;at&nbsp;its&nbsp;start&nbsp;and&nbsp;end&nbsp;point.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.async_slice.html#AsyncSlice">AsyncSlice</a></dd>
+<dd><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="AsyncSlice-AddSubSlice"><strong>AddSubSlice</strong></a>(self, sub_slice)</dt></dl>
+
+<dl><dt><a name="AsyncSlice-IterEventsInThisContainerRecrusively"><strong>IterEventsInThisContainerRecrusively</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="AsyncSlice-__init__"><strong>__init__</strong></a>(self, category, name, timestamp, args<font color="#909090">=None</font>, duration<font color="#909090">=0</font>, start_thread<font color="#909090">=None</font>, end_thread<font color="#909090">=None</font>, thread_start<font color="#909090">=None</font>, thread_duration<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><a name="AsyncSlice-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.bounds.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.bounds.html
new file mode 100644
index 0000000..6c705b4
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.bounds.html
@@ -0,0 +1,88 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.bounds</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.bounds</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/bounds.py">telemetry/timeline/bounds.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.bounds.html#Bounds">Bounds</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Bounds">class <strong>Bounds</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;min-max&nbsp;bounds.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Bounds-AddBounds"><strong>AddBounds</strong></a>(self, bounds)</dt></dl>
+
+<dl><dt><a name="Bounds-AddEvent"><strong>AddEvent</strong></a>(self, event)</dt></dl>
+
+<dl><dt><a name="Bounds-AddValue"><strong>AddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="Bounds-Contains"><strong>Contains</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Bounds-ContainsInterval"><strong>ContainsInterval</strong></a>(self, start, end)</dt></dl>
+
+<dl><dt><a name="Bounds-Intersects"><strong>Intersects</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Bounds-Reset"><strong>Reset</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Bounds-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Bounds-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="Bounds-CompareByMinTimes"><strong>CompareByMinTimes</strong></a>(a, b)</dt></dl>
+
+<dl><dt><a name="Bounds-CreateFromEvent"><strong>CreateFromEvent</strong></a>(event)</dt></dl>
+
+<dl><dt><a name="Bounds-GetOverlap"><strong>GetOverlap</strong></a>(first_bounds_min, first_bounds_max, second_bounds_min, second_bounds_max)</dt></dl>
+
+<dl><dt><a name="Bounds-GetOverlapBetweenBounds"><strong>GetOverlapBetweenBounds</strong></a>(first_bounds, second_bounds)</dt><dd><tt>Compute&nbsp;the&nbsp;overlap&nbsp;duration&nbsp;between&nbsp;first_bounds&nbsp;and&nbsp;second_bounds.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>bounds</strong></dt>
+</dl>
+<dl><dt><strong>center</strong></dt>
+</dl>
+<dl><dt><strong>is_empty</strong></dt>
+</dl>
+<dl><dt><strong>max</strong></dt>
+</dl>
+<dl><dt><strong>min</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.counter.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.counter.html
new file mode 100644
index 0000000..321bffe
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.counter.html
@@ -0,0 +1,165 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.counter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.counter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/counter.py">telemetry/timeline/counter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event_container.html">telemetry.timeline.event_container</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.counter.html#CounterSample">CounterSample</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.counter.html#Counter">Counter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Counter">class <strong>Counter</strong></a>(<a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Stores&nbsp;all&nbsp;the&nbsp;samples&nbsp;for&nbsp;a&nbsp;given&nbsp;counter.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.counter.html#Counter">Counter</a></dd>
+<dd><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Counter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Counter-IterChildContainers"><strong>IterChildContainers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Counter-IterEventsInThisContainer"><strong>IterEventsInThisContainer</strong></a>(self, event_type_predicate, event_predicate)</dt></dl>
+
+<dl><dt><a name="Counter-__init__"><strong>__init__</strong></a>(self, parent, category, name)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>num_samples</strong></dt>
+</dl>
+<dl><dt><strong>num_series</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Counter-GetAllEvents"><strong>GetAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;List&nbsp;versions.&nbsp;These&nbsp;should&nbsp;always&nbsp;be&nbsp;simple&nbsp;expressions&nbsp;that&nbsp;list()&nbsp;on<br>
+#&nbsp;an&nbsp;underlying&nbsp;iter&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="Counter-GetAllEventsOfName"><strong>GetAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-GetAllToplevelSlicesOfName"><strong>GetAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllAsyncSlicesOfName"><strong>IterAllAsyncSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllAsyncSlicesStartsWithName"><strong>IterAllAsyncSlicesStartsWithName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllEvents"><strong>IterAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>, event_type_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, event_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Iterates&nbsp;all&nbsp;events&nbsp;in&nbsp;this&nbsp;container,&nbsp;pre-filtered&nbsp;by&nbsp;two&nbsp;predicates.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;<a href="__builtin__.html#object">object</a>,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])</tt></dd></dl>
+
+<dl><dt><a name="Counter-IterAllEventsOfName"><strong>IterAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;Helper&nbsp;functions&nbsp;for&nbsp;finding&nbsp;common&nbsp;kinds&nbsp;of&nbsp;events.&nbsp;Must&nbsp;always&nbsp;take&nbsp;an<br>
+#&nbsp;optinal&nbsp;recurisve&nbsp;parameter&nbsp;and&nbsp;be&nbsp;implemented&nbsp;in&nbsp;terms&nbsp;fo&nbsp;IterAllEvents.</tt></dd></dl>
+
+<dl><dt><a name="Counter-IterAllFlowEvents"><strong>IterAllFlowEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllSlices"><strong>IterAllSlices</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllSlicesInRange"><strong>IterAllSlicesInRange</strong></a>(self, start, end, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllSlicesOfName"><strong>IterAllSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Counter-IterAllToplevelSlicesOfName"><strong>IterAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Counter-IsAsyncSlice"><strong>IsAsyncSlice</strong></a>(t)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="CounterSample">class <strong>CounterSample</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;Doesn't&nbsp;inherit&nbsp;from&nbsp;TimelineEvent&nbsp;because&nbsp;its&nbsp;only&nbsp;a&nbsp;temporary&nbsp;wrapper&nbsp;of&nbsp;a<br>
+#&nbsp;counter&nbsp;sample&nbsp;into&nbsp;an&nbsp;event.&nbsp;During&nbsp;stable&nbsp;operation,&nbsp;the&nbsp;samples&nbsp;are&nbsp;stored<br>
+#&nbsp;a&nbsp;dense&nbsp;array&nbsp;of&nbsp;values&nbsp;rather&nbsp;than&nbsp;in&nbsp;the&nbsp;long-form&nbsp;done&nbsp;by&nbsp;an&nbsp;Event.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="CounterSample-__init__"><strong>__init__</strong></a>(self, counter, sample_index)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>category</strong></dt>
+</dl>
+<dl><dt><strong>duration</strong></dt>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>name</strong></dt>
+</dl>
+<dl><dt><strong>start</strong></dt>
+</dl>
+<dl><dt><strong>thread_duration</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+</dl>
+<dl><dt><strong>thread_start</strong></dt>
+</dl>
+<dl><dt><strong>value</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.event.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.event.html
new file mode 100644
index 0000000..659e231
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.event.html
@@ -0,0 +1,70 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.event</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.event</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/event.py">telemetry/timeline/event.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">TimelineEvent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineEvent">class <strong>TimelineEvent</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;timeline&nbsp;event.<br>
+&nbsp;<br>
+thread_start,&nbsp;thread_duration&nbsp;and&nbsp;thread_end&nbsp;are&nbsp;the&nbsp;start&nbsp;time,&nbsp;duration<br>
+and&nbsp;end&nbsp;time&nbsp;of&nbsp;this&nbsp;event&nbsp;as&nbsp;measured&nbsp;by&nbsp;the&nbsp;thread-specific&nbsp;CPU&nbsp;clock<br>
+(ticking&nbsp;when&nbsp;the&nbsp;thread&nbsp;is&nbsp;actually&nbsp;scheduled).&nbsp;Thread&nbsp;time&nbsp;is&nbsp;optional<br>
+on&nbsp;trace&nbsp;events&nbsp;and&nbsp;the&nbsp;corresponding&nbsp;attributes&nbsp;in&nbsp;<a href="#TimelineEvent">TimelineEvent</a>&nbsp;will&nbsp;be<br>
+set&nbsp;to&nbsp;None&nbsp;(not&nbsp;0)&nbsp;if&nbsp;not&nbsp;present.&nbsp;Users&nbsp;of&nbsp;this&nbsp;class&nbsp;need&nbsp;to&nbsp;properly<br>
+handle&nbsp;this&nbsp;case.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineEvent-__init__"><strong>__init__</strong></a>(self, category, name, start, duration, thread_start<font color="#909090">=None</font>, thread_duration<font color="#909090">=None</font>, args<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEvent-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.event_container.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.event_container.html
new file mode 100644
index 0000000..11f6ea2
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.event_container.html
@@ -0,0 +1,118 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.event_container</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.event_container</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/event_container.py">telemetry/timeline/event_container.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.async_slice.html">telemetry.timeline.async_slice</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.flow_event.html">telemetry.timeline.flow_event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.slice.html">telemetry.timeline.slice</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event_container.html#TimelineEventContainer">TimelineEventContainer</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineEventContainer">class <strong>TimelineEventContainer</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;a&nbsp;container&nbsp;for&nbsp;events.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineEventContainer-GetAllEvents"><strong>GetAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;List&nbsp;versions.&nbsp;These&nbsp;should&nbsp;always&nbsp;be&nbsp;simple&nbsp;expressions&nbsp;that&nbsp;list()&nbsp;on<br>
+#&nbsp;an&nbsp;underlying&nbsp;iter&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="TimelineEventContainer-GetAllEventsOfName"><strong>GetAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-GetAllToplevelSlicesOfName"><strong>GetAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllAsyncSlicesOfName"><strong>IterAllAsyncSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllAsyncSlicesStartsWithName"><strong>IterAllAsyncSlicesStartsWithName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllEvents"><strong>IterAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>, event_type_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, event_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Iterates&nbsp;all&nbsp;events&nbsp;in&nbsp;this&nbsp;container,&nbsp;pre-filtered&nbsp;by&nbsp;two&nbsp;predicates.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;<a href="__builtin__.html#object">object</a>,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])</tt></dd></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllEventsOfName"><strong>IterAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;Helper&nbsp;functions&nbsp;for&nbsp;finding&nbsp;common&nbsp;kinds&nbsp;of&nbsp;events.&nbsp;Must&nbsp;always&nbsp;take&nbsp;an<br>
+#&nbsp;optinal&nbsp;recurisve&nbsp;parameter&nbsp;and&nbsp;be&nbsp;implemented&nbsp;in&nbsp;terms&nbsp;fo&nbsp;IterAllEvents.</tt></dd></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllFlowEvents"><strong>IterAllFlowEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllSlices"><strong>IterAllSlices</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllSlicesInRange"><strong>IterAllSlicesInRange</strong></a>(self, start, end, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllSlicesOfName"><strong>IterAllSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterAllToplevelSlicesOfName"><strong>IterAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterChildContainers"><strong>IterChildContainers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineEventContainer-IterEventsInThisContainer"><strong>IterEventsInThisContainer</strong></a>(self, event_type_predicate, event_predicate)</dt><dd><tt>Iterates&nbsp;all&nbsp;the&nbsp;TimelineEvents&nbsp;in&nbsp;this&nbsp;container.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;<a href="__builtin__.html#object">object</a>,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])<br>
+&nbsp;<br>
+DO&nbsp;NOT&nbsp;ASSUME&nbsp;that&nbsp;the&nbsp;event_type_predicate&nbsp;will&nbsp;be&nbsp;called&nbsp;for&nbsp;every&nbsp;event<br>
+found.&nbsp;The&nbsp;relative&nbsp;calling&nbsp;order&nbsp;of&nbsp;the&nbsp;two&nbsp;is&nbsp;left&nbsp;up&nbsp;to&nbsp;the&nbsp;implementer<br>
+of&nbsp;the&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="TimelineEventContainer-__init__"><strong>__init__</strong></a>(self, name, parent)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TimelineEventContainer-IsAsyncSlice"><strong>IsAsyncSlice</strong></a>(t)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.flow_event.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.flow_event.html
new file mode 100644
index 0000000..9dbdf97
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.flow_event.html
@@ -0,0 +1,80 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.flow_event</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.flow_event</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/flow_event.py">telemetry/timeline/flow_event.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.flow_event.html#FlowEvent">FlowEvent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FlowEvent">class <strong>FlowEvent</strong></a>(<a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#FlowEvent">FlowEvent</a>&nbsp;represents&nbsp;an&nbsp;interval&nbsp;of&nbsp;time&nbsp;plus&nbsp;parameters&nbsp;associated<br>
+with&nbsp;that&nbsp;interval.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.flow_event.html#FlowEvent">FlowEvent</a></dd>
+<dd><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FlowEvent-__init__"><strong>__init__</strong></a>(self, category, event_id, name, start, args<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><a name="FlowEvent-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.html
new file mode 100644
index 0000000..b7b797c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.html
@@ -0,0 +1,56 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/__init__.py">telemetry/timeline/__init__.py</a></font></td></tr></table>
+    <p></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.async_slice.html">async_slice</a><br>
+<a href="telemetry.timeline.bounds.html">bounds</a><br>
+<a href="telemetry.timeline.bounds_unittest.html">bounds_unittest</a><br>
+<a href="telemetry.timeline.counter.html">counter</a><br>
+<a href="telemetry.timeline.counter_unittest.html">counter_unittest</a><br>
+<a href="telemetry.timeline.event.html">event</a><br>
+<a href="telemetry.timeline.event_container.html">event_container</a><br>
+<a href="telemetry.timeline.event_unittest.html">event_unittest</a><br>
+<a href="telemetry.timeline.flow_event.html">flow_event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.importer.html">importer</a><br>
+<a href="telemetry.timeline.inspector_importer.html">inspector_importer</a><br>
+<a href="telemetry.timeline.inspector_importer_unittest.html">inspector_importer_unittest</a><br>
+<a href="telemetry.timeline.memory_dump_event.html">memory_dump_event</a><br>
+<a href="telemetry.timeline.memory_dump_event_unittest.html">memory_dump_event_unittest</a><br>
+<a href="telemetry.timeline.model.html">model</a><br>
+<a href="telemetry.timeline.model_unittest.html">model_unittest</a><br>
+<a href="telemetry.timeline.process.html">process</a><br>
+<a href="telemetry.timeline.sample.html">sample</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.slice.html">slice</a><br>
+<a href="telemetry.timeline.slice_unittest.html">slice_unittest</a><br>
+<a href="telemetry.timeline.surface_flinger_importer.html">surface_flinger_importer</a><br>
+<a href="telemetry.timeline.tab_id_importer.html">tab_id_importer</a><br>
+<a href="telemetry.timeline.tab_id_importer_unittest.html">tab_id_importer_unittest</a><br>
+<a href="telemetry.timeline.thread.html">thread</a><br>
+<a href="telemetry.timeline.thread_unittest.html">thread_unittest</a><br>
+<a href="telemetry.timeline.trace_data.html">trace_data</a><br>
+<a href="telemetry.timeline.trace_data_unittest.html">trace_data_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_event_importer.html">trace_event_importer</a><br>
+<a href="telemetry.timeline.trace_event_importer_unittest.html">trace_event_importer_unittest</a><br>
+<a href="telemetry.timeline.tracing_category_filter.html">tracing_category_filter</a><br>
+<a href="telemetry.timeline.tracing_category_filter_unittest.html">tracing_category_filter_unittest</a><br>
+<a href="telemetry.timeline.tracing_config.html">tracing_config</a><br>
+<a href="telemetry.timeline.tracing_config_unittest.html">tracing_config_unittest</a><br>
+<a href="telemetry.timeline.tracing_options.html">tracing_options</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.importer.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.importer.html
new file mode 100644
index 0000000..b330a1b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.importer.html
@@ -0,0 +1,61 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.importer</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.importer</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/importer.py">telemetry/timeline/importer.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.importer.html#TimelineImporter">TimelineImporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineImporter">class <strong>TimelineImporter</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Reads&nbsp;TraceData&nbsp;and&nbsp;populates&nbsp;timeline&nbsp;model&nbsp;with&nbsp;what&nbsp;it&nbsp;finds.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineImporter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt><dd><tt>Called&nbsp;after&nbsp;all&nbsp;other&nbsp;importers&nbsp;for&nbsp;the&nbsp;model&nbsp;are&nbsp;run.</tt></dd></dl>
+
+<dl><dt><a name="TimelineImporter-ImportEvents"><strong>ImportEvents</strong></a>(self)</dt><dd><tt>Processes&nbsp;the&nbsp;event&nbsp;data&nbsp;in&nbsp;the&nbsp;wrapper&nbsp;and&nbsp;creates&nbsp;and&nbsp;adds<br>
+new&nbsp;timeline&nbsp;events&nbsp;to&nbsp;the&nbsp;model</tt></dd></dl>
+
+<dl><dt><a name="TimelineImporter-__init__"><strong>__init__</strong></a>(self, model, trace_data, import_order)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TimelineImporter-GetSupportedPart"><strong>GetSupportedPart</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.inspector_importer.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.inspector_importer.html
new file mode 100644
index 0000000..fe808e8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.inspector_importer.html
@@ -0,0 +1,77 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.inspector_importer</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.inspector_importer</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/inspector_importer.py">telemetry/timeline/inspector_importer.py</a></font></td></tr></table>
+    <p><tt>Imports&nbsp;event&nbsp;data&nbsp;obtained&nbsp;from&nbsp;the&nbsp;inspector's&nbsp;timeline.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.importer.html">telemetry.timeline.importer</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.thread.html">telemetry.timeline.thread</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.slice.html">telemetry.timeline.slice</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.inspector_importer.html#InspectorTimelineImporter">InspectorTimelineImporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InspectorTimelineImporter">class <strong>InspectorTimelineImporter</strong></a>(<a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.inspector_importer.html#InspectorTimelineImporter">InspectorTimelineImporter</a></dd>
+<dd><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="InspectorTimelineImporter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorTimelineImporter-ImportEvents"><strong>ImportEvents</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="InspectorTimelineImporter-__init__"><strong>__init__</strong></a>(self, model, trace_data)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="InspectorTimelineImporter-AddRawEventToThreadRecursive"><strong>AddRawEventToThreadRecursive</strong></a>(thread, raw_inspector_event)</dt></dl>
+
+<dl><dt><a name="InspectorTimelineImporter-GetSupportedPart"><strong>GetSupportedPart</strong></a>()</dt></dl>
+
+<dl><dt><a name="InspectorTimelineImporter-RawEventToTimelineEvent"><strong>RawEventToTimelineEvent</strong></a>(raw_inspector_event)</dt><dd><tt>Converts&nbsp;raw_inspector_event&nbsp;to&nbsp;TimelineEvent.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.memory_dump_event.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.memory_dump_event.html
new file mode 100644
index 0000000..52c8034
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.memory_dump_event.html
@@ -0,0 +1,231 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.memory_dump_event</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.memory_dump_event</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/memory_dump_event.py">telemetry/timeline/memory_dump_event.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="posixpath.html">posixpath</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.memory_dump_event.html#GlobalMemoryDump">GlobalMemoryDump</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.timeline.memory_dump_event.html#MemoryBucket">MemoryBucket</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.timeline.memory_dump_event.html#MmapCategory">MmapCategory</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.memory_dump_event.html#ProcessMemoryDumpEvent">ProcessMemoryDumpEvent</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GlobalMemoryDump">class <strong>GlobalMemoryDump</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Object&nbsp;to&nbsp;aggregate&nbsp;individual&nbsp;process&nbsp;dumps&nbsp;with&nbsp;the&nbsp;same&nbsp;dump&nbsp;id.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;process_dumps:&nbsp;A&nbsp;sequence&nbsp;of&nbsp;<a href="#ProcessMemoryDumpEvent">ProcessMemoryDumpEvent</a>&nbsp;objects,&nbsp;all&nbsp;sharing<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;same&nbsp;global&nbsp;dump&nbsp;id.<br>
+&nbsp;<br>
+Attributes:<br>
+&nbsp;&nbsp;dump_id:&nbsp;A&nbsp;string&nbsp;identifying&nbsp;this&nbsp;dump.<br>
+&nbsp;&nbsp;has_mmaps:&nbsp;True&nbsp;if&nbsp;the&nbsp;memory&nbsp;dump&nbsp;has&nbsp;mmaps&nbsp;information.&nbsp;If&nbsp;False&nbsp;then<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;GetMemoryUsage&nbsp;will&nbsp;report&nbsp;all&nbsp;zeros.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="GlobalMemoryDump-GetMemoryUsage"><strong>GetMemoryUsage</strong></a>(self)</dt><dd><tt>Get&nbsp;the&nbsp;aggregated&nbsp;memory&nbsp;usage&nbsp;over&nbsp;all&nbsp;processes&nbsp;in&nbsp;this&nbsp;dump.</tt></dd></dl>
+
+<dl><dt><a name="GlobalMemoryDump-IterProcessMemoryDumps"><strong>IterProcessMemoryDumps</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="GlobalMemoryDump-__init__"><strong>__init__</strong></a>(self, process_dumps)</dt></dl>
+
+<dl><dt><a name="GlobalMemoryDump-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>duration</strong></dt>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>start</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryBucket">class <strong>MemoryBucket</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Simple&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;to&nbsp;hold&nbsp;and&nbsp;aggregate&nbsp;memory&nbsp;values.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MemoryBucket-AddRegion"><strong>AddRegion</strong></a>(self, byte_stats)</dt></dl>
+
+<dl><dt><a name="MemoryBucket-GetValue"><strong>GetValue</strong></a>(self, name)</dt></dl>
+
+<dl><dt><a name="MemoryBucket-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="MemoryBucket-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MmapCategory">class <strong>MmapCategory</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MmapCategory-GetMatchingChild"><strong>GetMatchingChild</strong></a>(self, mapped_file)</dt><dd><tt>Get&nbsp;the&nbsp;first&nbsp;matching&nbsp;sub-category&nbsp;for&nbsp;a&nbsp;given&nbsp;mapped&nbsp;file.<br>
+&nbsp;<br>
+Returns&nbsp;None&nbsp;if&nbsp;the&nbsp;category&nbsp;has&nbsp;no&nbsp;children,&nbsp;or&nbsp;the&nbsp;DefaultCategory&nbsp;if<br>
+it&nbsp;does&nbsp;have&nbsp;children&nbsp;but&nbsp;none&nbsp;of&nbsp;them&nbsp;match.</tt></dd></dl>
+
+<dl><dt><a name="MmapCategory-Match"><strong>Match</strong></a>(self, mapped_file)</dt><dd><tt>Test&nbsp;whether&nbsp;a&nbsp;mapped&nbsp;file&nbsp;matches&nbsp;this&nbsp;category.</tt></dd></dl>
+
+<dl><dt><a name="MmapCategory-__init__"><strong>__init__</strong></a>(self, name, file_pattern, children<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;(sub)category&nbsp;for&nbsp;classifying&nbsp;memory&nbsp;maps.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;name:&nbsp;A&nbsp;string&nbsp;to&nbsp;identify&nbsp;the&nbsp;category.<br>
+&nbsp;&nbsp;file_pattern:&nbsp;A&nbsp;regex&nbsp;pattern,&nbsp;the&nbsp;category&nbsp;will&nbsp;aggregate&nbsp;memory&nbsp;usage<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for&nbsp;all&nbsp;mapped&nbsp;files&nbsp;matching&nbsp;this&nbsp;pattern.<br>
+&nbsp;&nbsp;children:&nbsp;A&nbsp;list&nbsp;of&nbsp;<a href="#MmapCategory">MmapCategory</a>&nbsp;objects,&nbsp;used&nbsp;to&nbsp;sub-categorize&nbsp;memory<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;usage.</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="MmapCategory-DefaultCategory"><strong>DefaultCategory</strong></a>(cls)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>An&nbsp;implicit&nbsp;'Others'&nbsp;match-all&nbsp;category&nbsp;with&nbsp;no&nbsp;children.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProcessMemoryDumpEvent">class <strong>ProcessMemoryDumpEvent</strong></a>(<a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;memory&nbsp;dump&nbsp;event&nbsp;belonging&nbsp;to&nbsp;a&nbsp;single&nbsp;timeline.Process&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;<br>
+It's&nbsp;a&nbsp;subclass&nbsp;of&nbsp;telemetry's&nbsp;<a href="telemetry.timeline.event.html#TimelineEvent">TimelineEvent</a>&nbsp;so&nbsp;it&nbsp;can&nbsp;be&nbsp;included&nbsp;in<br>
+the&nbsp;stream&nbsp;of&nbsp;events&nbsp;contained&nbsp;in&nbsp;timeline.model&nbsp;objects,&nbsp;and&nbsp;have&nbsp;its<br>
+timing&nbsp;correlated&nbsp;with&nbsp;that&nbsp;of&nbsp;other&nbsp;events&nbsp;in&nbsp;the&nbsp;model.<br>
+&nbsp;<br>
+Properties:<br>
+&nbsp;&nbsp;dump_id:&nbsp;A&nbsp;string&nbsp;to&nbsp;identify&nbsp;events&nbsp;belonging&nbsp;to&nbsp;the&nbsp;same&nbsp;global&nbsp;dump.<br>
+&nbsp;&nbsp;process:&nbsp;The&nbsp;timeline.Process&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;that&nbsp;owns&nbsp;this&nbsp;memory&nbsp;dump&nbsp;event.<br>
+&nbsp;&nbsp;has_mmaps:&nbsp;True&nbsp;if&nbsp;the&nbsp;memory&nbsp;dump&nbsp;has&nbsp;mmaps&nbsp;information.&nbsp;If&nbsp;False&nbsp;then<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;GetMemoryUsage&nbsp;will&nbsp;report&nbsp;all&nbsp;zeros.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.memory_dump_event.html#ProcessMemoryDumpEvent">ProcessMemoryDumpEvent</a></dd>
+<dd><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ProcessMemoryDumpEvent-GetMemoryBucket"><strong>GetMemoryBucket</strong></a>(self, path)</dt><dd><tt>Return&nbsp;the&nbsp;<a href="#MemoryBucket">MemoryBucket</a>&nbsp;associated&nbsp;with&nbsp;a&nbsp;category&nbsp;path.<br>
+&nbsp;<br>
+An&nbsp;empty&nbsp;bucket&nbsp;will&nbsp;be&nbsp;created&nbsp;if&nbsp;the&nbsp;path&nbsp;does&nbsp;not&nbsp;already&nbsp;exist.<br>
+&nbsp;<br>
+path:&nbsp;A&nbsp;string&nbsp;with&nbsp;path&nbsp;in&nbsp;the&nbsp;classification&nbsp;tree,&nbsp;e.g.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'/Android/Java&nbsp;runtime/Cache'.&nbsp;Note:&nbsp;no&nbsp;trailing&nbsp;slash,&nbsp;except&nbsp;for<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;root&nbsp;path&nbsp;'/'.</tt></dd></dl>
+
+<dl><dt><a name="ProcessMemoryDumpEvent-GetMemoryUsage"><strong>GetMemoryUsage</strong></a>(self)</dt><dd><tt>Get&nbsp;a&nbsp;dictionary&nbsp;with&nbsp;the&nbsp;memory&nbsp;usage&nbsp;of&nbsp;this&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="ProcessMemoryDumpEvent-GetMemoryValue"><strong>GetMemoryValue</strong></a>(self, category_path, discount_tracing<font color="#909090">=False</font>)</dt><dd><tt>Return&nbsp;a&nbsp;specific&nbsp;value&nbsp;from&nbsp;within&nbsp;a&nbsp;<a href="#MemoryBucket">MemoryBucket</a>.<br>
+&nbsp;<br>
+category_path:&nbsp;A&nbsp;string&nbsp;composed&nbsp;of&nbsp;a&nbsp;path&nbsp;in&nbsp;the&nbsp;classification&nbsp;tree,<br>
+&nbsp;&nbsp;&nbsp;&nbsp;followed&nbsp;by&nbsp;a&nbsp;'.',&nbsp;followed&nbsp;by&nbsp;a&nbsp;specific&nbsp;bucket&nbsp;value,&nbsp;e.g.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;'/Android/Java&nbsp;runtime/Cache.private_dirty_resident'.<br>
+discount_tracing:&nbsp;A&nbsp;boolean&nbsp;indicating&nbsp;whether&nbsp;the&nbsp;returned&nbsp;value&nbsp;should<br>
+&nbsp;&nbsp;&nbsp;&nbsp;be&nbsp;discounted&nbsp;by&nbsp;the&nbsp;resident&nbsp;size&nbsp;of&nbsp;the&nbsp;tracing&nbsp;allocator.</tt></dd></dl>
+
+<dl><dt><a name="ProcessMemoryDumpEvent-__init__"><strong>__init__</strong></a>(self, process, event)</dt></dl>
+
+<dl><dt><a name="ProcessMemoryDumpEvent-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>process_name</strong></dt>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BUCKET_ATTRS</strong> = {'private_clean_resident': 'pc', 'private_dirty_resident': 'pd', 'proportional_resident': 'pss', 'shared_clean_resident': 'sc', 'shared_dirty_resident': 'sd', 'swapped': 'sw'}<br>
+<strong>MMAPS_METRICS</strong> = {'mmaps_ashmem': ('/Android/Ashmem.proportional_resident', False), 'mmaps_java_heap': ('/Android/Java runtime/Spaces.proportional_resident', False), 'mmaps_native_heap': ('/Native heap.proportional_resident', True), 'mmaps_overall_pss': ('/.proportional_resident', True), 'mmaps_private_dirty': ('/.private_dirty_resident', True)}<br>
+<strong>ROOT_CATEGORY</strong> = &lt;telemetry.timeline.memory_dump_event.MmapCategory object&gt;</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.model.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.model.html
new file mode 100644
index 0000000..a11a530
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.model.html
@@ -0,0 +1,314 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.model</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.model</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/model.py">telemetry/timeline/model.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;container&nbsp;for&nbsp;timeline-based&nbsp;events&nbsp;and&nbsp;traces&nbsp;and&nbsp;can&nbsp;handle&nbsp;importing<br>
+raw&nbsp;event&nbsp;data&nbsp;from&nbsp;different&nbsp;sources.&nbsp;This&nbsp;model&nbsp;closely&nbsp;resembles&nbsp;that&nbsp;in&nbsp;the<br>
+trace_viewer&nbsp;project:<br>
+https://code.google.com/p/trace-viewer/</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.async_slice.html">telemetry.timeline.async_slice</a><br>
+<a href="telemetry.timeline.bounds.html">telemetry.timeline.bounds</a><br>
+<a href="telemetry.timeline.event_container.html">telemetry.timeline.event_container</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.inspector_importer.html">telemetry.timeline.inspector_importer</a><br>
+<a href="telemetry.timeline.process.html">telemetry.timeline.process</a><br>
+<a href="telemetry.timeline.slice.html">telemetry.timeline.slice</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.surface_flinger_importer.html">telemetry.timeline.surface_flinger_importer</a><br>
+<a href="telemetry.timeline.tab_id_importer.html">telemetry.timeline.tab_id_importer</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_event_importer.html">telemetry.timeline.trace_event_importer</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.model.html#MarkerMismatchError">MarkerMismatchError</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.timeline.model.html#MarkerOverlapError">MarkerOverlapError</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.model.html#TimelineModel">TimelineModel</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MarkerMismatchError">class <strong>MarkerMismatchError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.model.html#MarkerMismatchError">MarkerMismatchError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MarkerMismatchError-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MarkerMismatchError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MarkerMismatchError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MarkerMismatchError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MarkerMismatchError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerMismatchError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MarkerMismatchError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MarkerOverlapError">class <strong>MarkerOverlapError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.model.html#MarkerOverlapError">MarkerOverlapError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MarkerOverlapError-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MarkerOverlapError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MarkerOverlapError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MarkerOverlapError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MarkerOverlapError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MarkerOverlapError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MarkerOverlapError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineModel">class <strong>TimelineModel</strong></a>(<a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.model.html#TimelineModel">TimelineModel</a></dd>
+<dd><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TimelineModel-AddMappingFromTabIdToRendererThread"><strong>AddMappingFromTabIdToRendererThread</strong></a>(self, tab_id, renderer_thread)</dt></dl>
+
+<dl><dt><a name="TimelineModel-FinalizeImport"><strong>FinalizeImport</strong></a>(self, shift_world_to_zero<font color="#909090">=False</font>, importers<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-FindTimelineMarkers"><strong>FindTimelineMarkers</strong></a>(self, timeline_marker_names)</dt><dd><tt>Find&nbsp;the&nbsp;timeline&nbsp;events&nbsp;with&nbsp;the&nbsp;given&nbsp;names.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;number&nbsp;and&nbsp;order&nbsp;of&nbsp;events&nbsp;found&nbsp;does&nbsp;not&nbsp;match&nbsp;the&nbsp;names,<br>
+raise&nbsp;an&nbsp;error.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-GetAllProcesses"><strong>GetAllProcesses</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineModel-GetAllThreads"><strong>GetAllThreads</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineModel-GetOrCreateProcess"><strong>GetOrCreateProcess</strong></a>(self, pid)</dt></dl>
+
+<dl><dt><a name="TimelineModel-GetRendererProcessFromTabId"><strong>GetRendererProcessFromTabId</strong></a>(self, tab_id)</dt></dl>
+
+<dl><dt><a name="TimelineModel-GetRendererThreadFromTabId"><strong>GetRendererThreadFromTabId</strong></a>(self, tab_id)</dt></dl>
+
+<dl><dt><a name="TimelineModel-ImportTraces"><strong>ImportTraces</strong></a>(self, trace_data, shift_world_to_zero<font color="#909090">=True</font>)</dt><dd><tt>Populates&nbsp;the&nbsp;model&nbsp;with&nbsp;the&nbsp;provided&nbsp;trace&nbsp;data.<br>
+&nbsp;<br>
+trace_data&nbsp;must&nbsp;be&nbsp;an&nbsp;instance&nbsp;of&nbsp;TraceData.<br>
+&nbsp;<br>
+Passing&nbsp;shift_world_to_zero=True&nbsp;causes&nbsp;the&nbsp;events&nbsp;to&nbsp;be&nbsp;shifted&nbsp;such&nbsp;that<br>
+the&nbsp;first&nbsp;event&nbsp;starts&nbsp;at&nbsp;time&nbsp;0.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-IterChildContainers"><strong>IterChildContainers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterGlobalMemoryDumps"><strong>IterGlobalMemoryDumps</strong></a>(self)</dt><dd><tt>Iterate&nbsp;over&nbsp;the&nbsp;memory&nbsp;dump&nbsp;events&nbsp;of&nbsp;this&nbsp;model.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-SetGlobalMemoryDumps"><strong>SetGlobalMemoryDumps</strong></a>(self, global_memory_dumps)</dt><dd><tt>Populates&nbsp;the&nbsp;model&nbsp;with&nbsp;a&nbsp;sequence&nbsp;of&nbsp;GlobalMemoryDump&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-ShiftWorldToZero"><strong>ShiftWorldToZero</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineModel-UpdateBounds"><strong>UpdateBounds</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TimelineModel-__init__"><strong>__init__</strong></a>(self, trace_data<font color="#909090">=None</font>, shift_world_to_zero<font color="#909090">=True</font>)</dt><dd><tt>Initializes&nbsp;a&nbsp;<a href="#TimelineModel">TimelineModel</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;trace_data:&nbsp;trace_data.TraceData&nbsp;containing&nbsp;events&nbsp;to&nbsp;import<br>
+&nbsp;&nbsp;&nbsp;&nbsp;shift_world_to_zero:&nbsp;If&nbsp;true,&nbsp;the&nbsp;events&nbsp;will&nbsp;be&nbsp;shifted&nbsp;such&nbsp;that&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;first&nbsp;event&nbsp;starts&nbsp;at&nbsp;time&nbsp;0.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>bounds</strong></dt>
+</dl>
+<dl><dt><strong>browser_process</strong></dt>
+</dl>
+<dl><dt><strong>gpu_process</strong></dt>
+</dl>
+<dl><dt><strong>processes</strong></dt>
+</dl>
+<dl><dt><strong>surface_flinger_process</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="TimelineModel-GetAllEvents"><strong>GetAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;List&nbsp;versions.&nbsp;These&nbsp;should&nbsp;always&nbsp;be&nbsp;simple&nbsp;expressions&nbsp;that&nbsp;list()&nbsp;on<br>
+#&nbsp;an&nbsp;underlying&nbsp;iter&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-GetAllEventsOfName"><strong>GetAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-GetAllToplevelSlicesOfName"><strong>GetAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllAsyncSlicesOfName"><strong>IterAllAsyncSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllAsyncSlicesStartsWithName"><strong>IterAllAsyncSlicesStartsWithName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllEvents"><strong>IterAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>, event_type_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, event_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Iterates&nbsp;all&nbsp;events&nbsp;in&nbsp;this&nbsp;container,&nbsp;pre-filtered&nbsp;by&nbsp;two&nbsp;predicates.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;object,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-IterAllEventsOfName"><strong>IterAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;Helper&nbsp;functions&nbsp;for&nbsp;finding&nbsp;common&nbsp;kinds&nbsp;of&nbsp;events.&nbsp;Must&nbsp;always&nbsp;take&nbsp;an<br>
+#&nbsp;optinal&nbsp;recurisve&nbsp;parameter&nbsp;and&nbsp;be&nbsp;implemented&nbsp;in&nbsp;terms&nbsp;fo&nbsp;IterAllEvents.</tt></dd></dl>
+
+<dl><dt><a name="TimelineModel-IterAllFlowEvents"><strong>IterAllFlowEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllSlices"><strong>IterAllSlices</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllSlicesInRange"><strong>IterAllSlicesInRange</strong></a>(self, start, end, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllSlicesOfName"><strong>IterAllSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterAllToplevelSlicesOfName"><strong>IterAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="TimelineModel-IterEventsInThisContainer"><strong>IterEventsInThisContainer</strong></a>(self, event_type_predicate, event_predicate)</dt><dd><tt>Iterates&nbsp;all&nbsp;the&nbsp;TimelineEvents&nbsp;in&nbsp;this&nbsp;container.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;object,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])<br>
+&nbsp;<br>
+DO&nbsp;NOT&nbsp;ASSUME&nbsp;that&nbsp;the&nbsp;event_type_predicate&nbsp;will&nbsp;be&nbsp;called&nbsp;for&nbsp;every&nbsp;event<br>
+found.&nbsp;The&nbsp;relative&nbsp;calling&nbsp;order&nbsp;of&nbsp;the&nbsp;two&nbsp;is&nbsp;left&nbsp;up&nbsp;to&nbsp;the&nbsp;implementer<br>
+of&nbsp;the&nbsp;method.</tt></dd></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="TimelineModel-IsAsyncSlice"><strong>IsAsyncSlice</strong></a>(t)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsSliceOrAsyncSlice"><strong>IsSliceOrAsyncSlice</strong></a>(t)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.process.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.process.html
new file mode 100644
index 0000000..e3bda6e
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.process.html
@@ -0,0 +1,139 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.process</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.process</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/process.py">telemetry/timeline/process.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event_container.html">telemetry.timeline.event_container</a><br>
+<a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.memory_dump_event.html">telemetry.timeline.memory_dump_event</a><br>
+<a href="telemetry.timeline.counter.html">telemetry.timeline.counter</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.thread.html">telemetry.timeline.thread</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.process.html#Process">Process</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Process">class <strong>Process</strong></a>(<a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>The&nbsp;<a href="#Process">Process</a>&nbsp;represents&nbsp;a&nbsp;single&nbsp;userland&nbsp;process&nbsp;in&nbsp;the&nbsp;trace.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.process.html#Process">Process</a></dd>
+<dd><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Process-AddMemoryDumpEvent"><strong>AddMemoryDumpEvent</strong></a>(self, memory_dump)</dt><dd><tt>Add&nbsp;a&nbsp;ProcessMemoryDumpEvent&nbsp;to&nbsp;this&nbsp;process.</tt></dd></dl>
+
+<dl><dt><a name="Process-AutoCloseOpenSlices"><strong>AutoCloseOpenSlices</strong></a>(self, max_timestamp, thread_time_bounds)</dt></dl>
+
+<dl><dt><a name="Process-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Process-GetCounter"><strong>GetCounter</strong></a>(self, category, name)</dt></dl>
+
+<dl><dt><a name="Process-GetOrCreateCounter"><strong>GetOrCreateCounter</strong></a>(self, category, name)</dt></dl>
+
+<dl><dt><a name="Process-GetOrCreateThread"><strong>GetOrCreateThread</strong></a>(self, tid)</dt></dl>
+
+<dl><dt><a name="Process-IterChildContainers"><strong>IterChildContainers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Process-IterEventsInThisContainer"><strong>IterEventsInThisContainer</strong></a>(self, event_type_predicate, event_predicate)</dt></dl>
+
+<dl><dt><a name="Process-SetTraceBufferOverflowTimestamp"><strong>SetTraceBufferOverflowTimestamp</strong></a>(self, timestamp)</dt></dl>
+
+<dl><dt><a name="Process-__init__"><strong>__init__</strong></a>(self, parent, pid)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>counters</strong></dt>
+</dl>
+<dl><dt><strong>threads</strong></dt>
+</dl>
+<dl><dt><strong>trace_buffer_did_overflow</strong></dt>
+</dl>
+<dl><dt><strong>trace_buffer_overflow_event</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Process-GetAllEvents"><strong>GetAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;List&nbsp;versions.&nbsp;These&nbsp;should&nbsp;always&nbsp;be&nbsp;simple&nbsp;expressions&nbsp;that&nbsp;list()&nbsp;on<br>
+#&nbsp;an&nbsp;underlying&nbsp;iter&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="Process-GetAllEventsOfName"><strong>GetAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-GetAllToplevelSlicesOfName"><strong>GetAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllAsyncSlicesOfName"><strong>IterAllAsyncSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllAsyncSlicesStartsWithName"><strong>IterAllAsyncSlicesStartsWithName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllEvents"><strong>IterAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>, event_type_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, event_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Iterates&nbsp;all&nbsp;events&nbsp;in&nbsp;this&nbsp;container,&nbsp;pre-filtered&nbsp;by&nbsp;two&nbsp;predicates.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;object,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])</tt></dd></dl>
+
+<dl><dt><a name="Process-IterAllEventsOfName"><strong>IterAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;Helper&nbsp;functions&nbsp;for&nbsp;finding&nbsp;common&nbsp;kinds&nbsp;of&nbsp;events.&nbsp;Must&nbsp;always&nbsp;take&nbsp;an<br>
+#&nbsp;optinal&nbsp;recurisve&nbsp;parameter&nbsp;and&nbsp;be&nbsp;implemented&nbsp;in&nbsp;terms&nbsp;fo&nbsp;IterAllEvents.</tt></dd></dl>
+
+<dl><dt><a name="Process-IterAllFlowEvents"><strong>IterAllFlowEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllSlices"><strong>IterAllSlices</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllSlicesInRange"><strong>IterAllSlicesInRange</strong></a>(self, start, end, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllSlicesOfName"><strong>IterAllSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Process-IterAllToplevelSlicesOfName"><strong>IterAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Process-IsAsyncSlice"><strong>IsAsyncSlice</strong></a>(t)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.sample.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.sample.html
new file mode 100644
index 0000000..2206d48
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.sample.html
@@ -0,0 +1,85 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.sample</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.sample</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/sample.py">telemetry/timeline/sample.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.sample.html#Sample">Sample</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Sample">class <strong>Sample</strong></a>(<a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#Sample">Sample</a>&nbsp;represents&nbsp;a&nbsp;sample&nbsp;taken&nbsp;at&nbsp;an&nbsp;instant&nbsp;in&nbsp;time<br>
+plus&nbsp;parameters&nbsp;associated&nbsp;with&nbsp;that&nbsp;sample.<br>
+&nbsp;<br>
+NOTE:&nbsp;The&nbsp;<a href="#Sample">Sample</a>&nbsp;class&nbsp;implements&nbsp;the&nbsp;same&nbsp;interface&nbsp;as<br>
+Slice.&nbsp;These&nbsp;must&nbsp;be&nbsp;kept&nbsp;in&nbsp;sync.<br>
+&nbsp;<br>
+All&nbsp;time&nbsp;units&nbsp;are&nbsp;stored&nbsp;in&nbsp;milliseconds.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.sample.html#Sample">Sample</a></dd>
+<dd><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Sample-__init__"><strong>__init__</strong></a>(self, parent_thread, category, name, timestamp, args<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><a name="Sample-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.slice.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.slice.html
new file mode 100644
index 0000000..4e63552
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.slice.html
@@ -0,0 +1,103 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.slice</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.slice</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/slice.py">telemetry/timeline/slice.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.event.html">telemetry.timeline.event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.slice.html#Slice">Slice</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Slice">class <strong>Slice</strong></a>(<a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#Slice">Slice</a>&nbsp;represents&nbsp;an&nbsp;interval&nbsp;of&nbsp;time&nbsp;plus&nbsp;parameters&nbsp;associated<br>
+with&nbsp;that&nbsp;interval.<br>
+&nbsp;<br>
+NOTE:&nbsp;The&nbsp;Sample&nbsp;class&nbsp;implements&nbsp;the&nbsp;same&nbsp;interface&nbsp;as<br>
+<a href="#Slice">Slice</a>.&nbsp;These&nbsp;must&nbsp;be&nbsp;kept&nbsp;in&nbsp;sync.<br>
+&nbsp;<br>
+All&nbsp;time&nbsp;units&nbsp;are&nbsp;stored&nbsp;in&nbsp;milliseconds.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.slice.html#Slice">Slice</a></dd>
+<dd><a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Slice-AddSubSlice"><strong>AddSubSlice</strong></a>(self, sub_slice)</dt></dl>
+
+<dl><dt><a name="Slice-GetAllSubSlices"><strong>GetAllSubSlices</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Slice-GetAllSubSlicesOfName"><strong>GetAllSubSlicesOfName</strong></a>(self, name)</dt></dl>
+
+<dl><dt><a name="Slice-IterEventsInThisContainerRecrusively"><strong>IterEventsInThisContainerRecrusively</strong></a>(self, stack<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="Slice-__init__"><strong>__init__</strong></a>(self, parent_thread, category, name, timestamp, duration<font color="#909090">=0</font>, thread_timestamp<font color="#909090">=None</font>, thread_duration<font color="#909090">=None</font>, args<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>self_thread_time</strong></dt>
+<dd><tt>Thread&nbsp;(scheduled)&nbsp;time&nbsp;spent&nbsp;in&nbsp;this&nbsp;function&nbsp;less&nbsp;any&nbsp;thread&nbsp;time&nbsp;spent<br>
+in&nbsp;child&nbsp;events.&nbsp;Returns&nbsp;None&nbsp;if&nbsp;the&nbsp;slice&nbsp;or&nbsp;any&nbsp;of&nbsp;its&nbsp;children&nbsp;does&nbsp;not<br>
+have&nbsp;a&nbsp;thread_duration&nbsp;value.</tt></dd>
+</dl>
+<dl><dt><strong>self_time</strong></dt>
+<dd><tt>Time&nbsp;spent&nbsp;in&nbsp;this&nbsp;function&nbsp;less&nbsp;any&nbsp;time&nbsp;spent&nbsp;in&nbsp;child&nbsp;events.</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><a name="Slice-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event.html#TimelineEvent">telemetry.timeline.event.TimelineEvent</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>has_thread_timestamps</strong></dt>
+</dl>
+<dl><dt><strong>thread_end</strong></dt>
+<dd><tt>Thread-specific&nbsp;CPU&nbsp;time&nbsp;when&nbsp;this&nbsp;event&nbsp;ended.<br>
+&nbsp;<br>
+May&nbsp;be&nbsp;None&nbsp;if&nbsp;the&nbsp;trace&nbsp;event&nbsp;didn't&nbsp;have&nbsp;thread&nbsp;time&nbsp;data.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.surface_flinger_importer.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.surface_flinger_importer.html
new file mode 100644
index 0000000..0e98a27
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.surface_flinger_importer.html
@@ -0,0 +1,74 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.surface_flinger_importer</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.surface_flinger_importer</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/surface_flinger_importer.py">telemetry/timeline/surface_flinger_importer.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.importer.html">telemetry.timeline.importer</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.surface_flinger_importer.html#SurfaceFlingerTimelineImporter">SurfaceFlingerTimelineImporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SurfaceFlingerTimelineImporter">class <strong>SurfaceFlingerTimelineImporter</strong></a>(<a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.surface_flinger_importer.html#SurfaceFlingerTimelineImporter">SurfaceFlingerTimelineImporter</a></dd>
+<dd><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SurfaceFlingerTimelineImporter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt><dd><tt>Called&nbsp;by&nbsp;the&nbsp;Model&nbsp;after&nbsp;all&nbsp;other&nbsp;importers&nbsp;have&nbsp;imported&nbsp;their<br>
+events.</tt></dd></dl>
+
+<dl><dt><a name="SurfaceFlingerTimelineImporter-ImportEvents"><strong>ImportEvents</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SurfaceFlingerTimelineImporter-__init__"><strong>__init__</strong></a>(self, model, trace_data)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="SurfaceFlingerTimelineImporter-GetSupportedPart"><strong>GetSupportedPart</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.tab_id_importer.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.tab_id_importer.html
new file mode 100644
index 0000000..5eaad09
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.tab_id_importer.html
@@ -0,0 +1,138 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.tab_id_importer</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.tab_id_importer</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/tab_id_importer.py">telemetry/timeline/tab_id_importer.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.importer.html">telemetry.timeline.importer</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.tab_id_importer.html#TraceBufferOverflowException">TraceBufferOverflowException</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.tab_id_importer.html#TabIdImporter">TabIdImporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TabIdImporter">class <strong>TabIdImporter</strong></a>(<a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.tab_id_importer.html#TabIdImporter">TabIdImporter</a></dd>
+<dd><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TabIdImporter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabIdImporter-ImportEvents"><strong>ImportEvents</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TabIdImporter-__init__"><strong>__init__</strong></a>(self, model, trace_data)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TabIdImporter-GetSupportedPart"><strong>GetSupportedPart</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceBufferOverflowException">class <strong>TraceBufferOverflowException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.tab_id_importer.html#TraceBufferOverflowException">TraceBufferOverflowException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TraceBufferOverflowException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TraceBufferOverflowException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TraceBufferOverflowException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TraceBufferOverflowException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TraceBufferOverflowException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.thread.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.thread.html
new file mode 100644
index 0000000..09b8513
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.thread.html
@@ -0,0 +1,168 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.thread</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.thread</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/thread.py">telemetry/timeline/thread.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.async_slice.html">telemetry.timeline.async_slice</a><br>
+<a href="telemetry.timeline.event_container.html">telemetry.timeline.event_container</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.flow_event.html">telemetry.timeline.flow_event</a><br>
+<a href="telemetry.timeline.sample.html">telemetry.timeline.sample</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.slice.html">telemetry.timeline.slice</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.thread.html#Thread">Thread</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Thread">class <strong>Thread</strong></a>(<a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#Thread">Thread</a>&nbsp;stores&nbsp;all&nbsp;the&nbsp;trace&nbsp;events&nbsp;collected&nbsp;for&nbsp;a&nbsp;particular<br>
+thread.&nbsp;We&nbsp;organize&nbsp;the&nbsp;synchronous&nbsp;slices&nbsp;on&nbsp;a&nbsp;thread&nbsp;by&nbsp;"subrows,"&nbsp;where<br>
+subrow&nbsp;0&nbsp;has&nbsp;all&nbsp;the&nbsp;root&nbsp;slices,&nbsp;subrow&nbsp;1&nbsp;those&nbsp;nested&nbsp;1&nbsp;deep,&nbsp;and&nbsp;so&nbsp;on.<br>
+The&nbsp;asynchronous&nbsp;slices&nbsp;are&nbsp;stored&nbsp;in&nbsp;an&nbsp;AsyncSliceGroup&nbsp;object.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.thread.html#Thread">Thread</a></dd>
+<dd><a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="Thread-AddAsyncSlice"><strong>AddAsyncSlice</strong></a>(self, async_slice)</dt></dl>
+
+<dl><dt><a name="Thread-AddFlowEvent"><strong>AddFlowEvent</strong></a>(self, flow_event)</dt></dl>
+
+<dl><dt><a name="Thread-AddSample"><strong>AddSample</strong></a>(self, category, name, timestamp, args<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="Thread-AutoCloseOpenSlices"><strong>AutoCloseOpenSlices</strong></a>(self, max_timestamp, max_thread_timestamp)</dt></dl>
+
+<dl><dt><a name="Thread-BeginSlice"><strong>BeginSlice</strong></a>(self, category, name, timestamp, thread_timestamp<font color="#909090">=None</font>, args<font color="#909090">=None</font>)</dt><dd><tt>Opens&nbsp;a&nbsp;new&nbsp;slice&nbsp;for&nbsp;the&nbsp;thread.<br>
+Calls&nbsp;to&nbsp;beginSlice&nbsp;and&nbsp;endSlice&nbsp;must&nbsp;be&nbsp;made&nbsp;with<br>
+non-monotonically-decreasing&nbsp;timestamps.<br>
+&nbsp;<br>
+*&nbsp;category:&nbsp;Category&nbsp;to&nbsp;which&nbsp;the&nbsp;slice&nbsp;belongs.<br>
+*&nbsp;name:&nbsp;Name&nbsp;of&nbsp;the&nbsp;slice&nbsp;to&nbsp;add.<br>
+*&nbsp;timestamp:&nbsp;The&nbsp;timetsamp&nbsp;of&nbsp;the&nbsp;slice,&nbsp;in&nbsp;milliseconds.<br>
+*&nbsp;thread_timestamp:&nbsp;<a href="#Thread">Thread</a>&nbsp;specific&nbsp;clock&nbsp;(scheduled)&nbsp;timestamp&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;slice,&nbsp;in&nbsp;milliseconds.<br>
+*&nbsp;args:&nbsp;Arguments&nbsp;associated&nbsp;with<br>
+&nbsp;<br>
+Returns&nbsp;newly&nbsp;opened&nbsp;slice</tt></dd></dl>
+
+<dl><dt><a name="Thread-EndSlice"><strong>EndSlice</strong></a>(self, end_timestamp, end_thread_timestamp<font color="#909090">=None</font>)</dt><dd><tt>Ends&nbsp;the&nbsp;last&nbsp;begun&nbsp;slice&nbsp;in&nbsp;this&nbsp;group&nbsp;and&nbsp;pushes&nbsp;it&nbsp;onto&nbsp;the&nbsp;slice<br>
+array.<br>
+&nbsp;<br>
+*&nbsp;end_timestamp:&nbsp;Timestamp&nbsp;when&nbsp;the&nbsp;slice&nbsp;ended&nbsp;in&nbsp;milliseconds<br>
+*&nbsp;end_thread_timestamp:&nbsp;Timestamp&nbsp;when&nbsp;the&nbsp;scheduled&nbsp;time&nbsp;of&nbsp;the&nbsp;slice&nbsp;ended<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;in&nbsp;milliseconds<br>
+&nbsp;<br>
+returns&nbsp;completed&nbsp;slice.</tt></dd></dl>
+
+<dl><dt><a name="Thread-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Thread-IsTimestampValidForBeginOrEnd"><strong>IsTimestampValidForBeginOrEnd</strong></a>(self, timestamp)</dt></dl>
+
+<dl><dt><a name="Thread-IterChildContainers"><strong>IterChildContainers</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Thread-IterEventsInThisContainer"><strong>IterEventsInThisContainer</strong></a>(self, event_type_predicate, event_predicate)</dt></dl>
+
+<dl><dt><a name="Thread-PushCompleteSlice"><strong>PushCompleteSlice</strong></a>(self, category, name, timestamp, duration, thread_timestamp, thread_duration, args<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="Thread-PushSlice"><strong>PushSlice</strong></a>(self, new_slice)</dt></dl>
+
+<dl><dt><a name="Thread-__init__"><strong>__init__</strong></a>(self, process, tid)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>all_slices</strong></dt>
+</dl>
+<dl><dt><strong>async_slices</strong></dt>
+</dl>
+<dl><dt><strong>open_slice_count</strong></dt>
+</dl>
+<dl><dt><strong>samples</strong></dt>
+</dl>
+<dl><dt><strong>toplevel_slices</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Thread-GetAllEvents"><strong>GetAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;List&nbsp;versions.&nbsp;These&nbsp;should&nbsp;always&nbsp;be&nbsp;simple&nbsp;expressions&nbsp;that&nbsp;list()&nbsp;on<br>
+#&nbsp;an&nbsp;underlying&nbsp;iter&nbsp;method.</tt></dd></dl>
+
+<dl><dt><a name="Thread-GetAllEventsOfName"><strong>GetAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-GetAllToplevelSlicesOfName"><strong>GetAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllAsyncSlicesOfName"><strong>IterAllAsyncSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllAsyncSlicesStartsWithName"><strong>IterAllAsyncSlicesStartsWithName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllEvents"><strong>IterAllEvents</strong></a>(self, recursive<font color="#909090">=True</font>, event_type_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>, event_predicate<font color="#909090">=&lt;function &lt;lambda&gt;&gt;</font>)</dt><dd><tt>Iterates&nbsp;all&nbsp;events&nbsp;in&nbsp;this&nbsp;container,&nbsp;pre-filtered&nbsp;by&nbsp;two&nbsp;predicates.<br>
+&nbsp;<br>
+Only&nbsp;events&nbsp;with&nbsp;a&nbsp;type&nbsp;matching&nbsp;event_type_predicate&nbsp;AND&nbsp;matching&nbsp;event<br>
+event_predicate&nbsp;will&nbsp;be&nbsp;yielded.<br>
+&nbsp;<br>
+event_type_predicate&nbsp;is&nbsp;given&nbsp;an&nbsp;actual&nbsp;type&nbsp;object,&nbsp;e.g.:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_type_predicate(slice_module.Slice)<br>
+&nbsp;<br>
+event_predicate&nbsp;is&nbsp;given&nbsp;actual&nbsp;events:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;event_predicate(thread.slices[7])</tt></dd></dl>
+
+<dl><dt><a name="Thread-IterAllEventsOfName"><strong>IterAllEventsOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt><dd><tt>#&nbsp;Helper&nbsp;functions&nbsp;for&nbsp;finding&nbsp;common&nbsp;kinds&nbsp;of&nbsp;events.&nbsp;Must&nbsp;always&nbsp;take&nbsp;an<br>
+#&nbsp;optinal&nbsp;recurisve&nbsp;parameter&nbsp;and&nbsp;be&nbsp;implemented&nbsp;in&nbsp;terms&nbsp;fo&nbsp;IterAllEvents.</tt></dd></dl>
+
+<dl><dt><a name="Thread-IterAllFlowEvents"><strong>IterAllFlowEvents</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllSlices"><strong>IterAllSlices</strong></a>(self, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllSlicesInRange"><strong>IterAllSlicesInRange</strong></a>(self, start, end, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllSlicesOfName"><strong>IterAllSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<dl><dt><a name="Thread-IterAllToplevelSlicesOfName"><strong>IterAllToplevelSlicesOfName</strong></a>(self, name, recursive<font color="#909090">=True</font>)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><a name="Thread-IsAsyncSlice"><strong>IsAsyncSlice</strong></a>(t)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.event_container.html#TimelineEventContainer">telemetry.timeline.event_container.TimelineEventContainer</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_data.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_data.html
new file mode 100644
index 0000000..26ed691
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_data.html
@@ -0,0 +1,232 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.trace_data</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.trace_data</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/trace_data.py">telemetry/timeline/trace_data.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="json.html">json</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.trace_data.html#TraceData">TraceData</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.timeline.trace_data.html#TraceDataBuilder">TraceDataBuilder</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.timeline.trace_data.html#TraceDataPart">TraceDataPart</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.trace_data.html#NonSerializableTraceData">NonSerializableTraceData</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NonSerializableTraceData">class <strong>NonSerializableTraceData</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Raised&nbsp;when&nbsp;raw&nbsp;trace&nbsp;data&nbsp;cannot&nbsp;be&nbsp;serialized&nbsp;to&nbsp;<a href="#TraceData">TraceData</a>.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.trace_data.html#NonSerializableTraceData">NonSerializableTraceData</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="NonSerializableTraceData-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#NonSerializableTraceData-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="NonSerializableTraceData-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#NonSerializableTraceData-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="NonSerializableTraceData-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceData">class <strong>TraceData</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Validates,&nbsp;parses,&nbsp;and&nbsp;serializes&nbsp;raw&nbsp;data.<br>
+&nbsp;<br>
+NOTE:&nbsp;raw&nbsp;data&nbsp;must&nbsp;only&nbsp;include&nbsp;primitive&nbsp;objects!<br>
+By&nbsp;design,&nbsp;<a href="#TraceData">TraceData</a>&nbsp;must&nbsp;contain&nbsp;only&nbsp;data&nbsp;that&nbsp;is&nbsp;BOTH&nbsp;json-serializable<br>
+to&nbsp;a&nbsp;file,&nbsp;AND&nbsp;restorable&nbsp;once&nbsp;again&nbsp;from&nbsp;that&nbsp;file&nbsp;into&nbsp;<a href="#TraceData">TraceData</a>&nbsp;without<br>
+assistance&nbsp;from&nbsp;other&nbsp;classes.<br>
+&nbsp;<br>
+Raw&nbsp;data&nbsp;can&nbsp;be&nbsp;one&nbsp;of&nbsp;three&nbsp;standard&nbsp;trace_event&nbsp;formats:<br>
+1.&nbsp;Trace&nbsp;container&nbsp;format:&nbsp;a&nbsp;json-parseable&nbsp;dict.<br>
+2.&nbsp;A&nbsp;json-parseable&nbsp;array:&nbsp;assumed&nbsp;to&nbsp;be&nbsp;chrome&nbsp;trace&nbsp;data.<br>
+3.&nbsp;A&nbsp;json-parseable&nbsp;array&nbsp;missing&nbsp;the&nbsp;final&nbsp;']':&nbsp;assumed&nbsp;to&nbsp;be&nbsp;chrome&nbsp;trace<br>
+&nbsp;&nbsp;&nbsp;data.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TraceData-GetEventsFor"><strong>GetEventsFor</strong></a>(self, part)</dt></dl>
+
+<dl><dt><a name="TraceData-HasEventsFor"><strong>HasEventsFor</strong></a>(self, part)</dt></dl>
+
+<dl><dt><a name="TraceData-Serialize"><strong>Serialize</strong></a>(self, f, gzip_result<font color="#909090">=False</font>)</dt><dd><tt>Serializes&nbsp;the&nbsp;trace&nbsp;result&nbsp;to&nbsp;a&nbsp;file-like&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;<br>
+Always&nbsp;writes&nbsp;in&nbsp;the&nbsp;trace&nbsp;container&nbsp;format.</tt></dd></dl>
+
+<dl><dt><a name="TraceData-__init__"><strong>__init__</strong></a>(self, raw_data<font color="#909090">=None</font>)</dt><dd><tt>Creates&nbsp;<a href="#TraceData">TraceData</a>&nbsp;from&nbsp;the&nbsp;given&nbsp;data.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>active_parts</strong></dt>
+</dl>
+<dl><dt><strong>events_are_safely_mutable</strong></dt>
+<dd><tt>Returns&nbsp;true&nbsp;if&nbsp;the&nbsp;events&nbsp;in&nbsp;this&nbsp;value&nbsp;are&nbsp;completely&nbsp;sealed.<br>
+&nbsp;<br>
+Some&nbsp;importers&nbsp;want&nbsp;to&nbsp;take&nbsp;complex&nbsp;fields&nbsp;out&nbsp;of&nbsp;the&nbsp;TraceData&nbsp;and&nbsp;add<br>
+them&nbsp;to&nbsp;the&nbsp;model,&nbsp;changing&nbsp;them&nbsp;subtly&nbsp;as&nbsp;they&nbsp;do&nbsp;so.&nbsp;If&nbsp;the&nbsp;TraceData<br>
+was&nbsp;constructed&nbsp;with&nbsp;data&nbsp;that&nbsp;is&nbsp;shared&nbsp;with&nbsp;something&nbsp;outside&nbsp;the&nbsp;trace<br>
+data,&nbsp;for&nbsp;instance&nbsp;a&nbsp;test&nbsp;harness,&nbsp;then&nbsp;this&nbsp;mutation&nbsp;is&nbsp;unexpected.&nbsp;But,<br>
+if&nbsp;the&nbsp;values&nbsp;are&nbsp;sealed,&nbsp;then&nbsp;mutating&nbsp;the&nbsp;events&nbsp;is&nbsp;a&nbsp;lot&nbsp;faster.<br>
+&nbsp;<br>
+We&nbsp;know&nbsp;if&nbsp;events&nbsp;are&nbsp;sealed&nbsp;if&nbsp;the&nbsp;value&nbsp;came&nbsp;from&nbsp;a&nbsp;string,&nbsp;or&nbsp;if&nbsp;the<br>
+value&nbsp;came&nbsp;from&nbsp;a&nbsp;TraceDataBuilder.</tt></dd>
+</dl>
+<dl><dt><strong>metadata_records</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceDataBuilder">class <strong>TraceDataBuilder</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#TraceDataBuilder">TraceDataBuilder</a>&nbsp;helps&nbsp;build&nbsp;up&nbsp;a&nbsp;trace&nbsp;from&nbsp;multiple&nbsp;trace&nbsp;agents.<br>
+&nbsp;<br>
+<a href="#TraceData">TraceData</a>&nbsp;is&nbsp;supposed&nbsp;to&nbsp;be&nbsp;immutable,&nbsp;but&nbsp;it&nbsp;is&nbsp;useful&nbsp;during&nbsp;recording&nbsp;to<br>
+have&nbsp;a&nbsp;mutable&nbsp;version.&nbsp;That&nbsp;is&nbsp;<a href="#TraceDataBuilder">TraceDataBuilder</a>.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TraceDataBuilder-AddEventsTo"><strong>AddEventsTo</strong></a>(self, part, events)</dt><dd><tt>Note:&nbsp;this&nbsp;won't&nbsp;work&nbsp;when&nbsp;called&nbsp;from&nbsp;multiple&nbsp;browsers.<br>
+&nbsp;<br>
+Each&nbsp;browser's&nbsp;trace_event_impl&nbsp;zeros&nbsp;its&nbsp;timestamps&nbsp;when&nbsp;it&nbsp;writes&nbsp;them<br>
+out&nbsp;and&nbsp;doesn't&nbsp;write&nbsp;a&nbsp;timebase&nbsp;that&nbsp;can&nbsp;be&nbsp;used&nbsp;to&nbsp;re-sync&nbsp;them.</tt></dd></dl>
+
+<dl><dt><a name="TraceDataBuilder-AsData"><strong>AsData</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceDataBuilder-HasEventsFor"><strong>HasEventsFor</strong></a>(self, part)</dt></dl>
+
+<dl><dt><a name="TraceDataBuilder-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceDataPart">class <strong>TraceDataPart</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#TraceData">TraceData</a>&nbsp;can&nbsp;have&nbsp;a&nbsp;variety&nbsp;of&nbsp;events.<br>
+&nbsp;<br>
+These&nbsp;are&nbsp;called&nbsp;"parts"&nbsp;and&nbsp;are&nbsp;accessed&nbsp;by&nbsp;the&nbsp;following&nbsp;fixed&nbsp;field&nbsp;names.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TraceDataPart-__init__"><strong>__init__</strong></a>(self, raw_field_name)</dt></dl>
+
+<dl><dt><a name="TraceDataPart-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>raw_field_name</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ALL_TRACE_PARTS</strong> = set([TraceDataPart("traceEvents"), TraceDataPart("inspectorTimelineEvents"), TraceDataPart("surfaceFlinger"), TraceDataPart("tabIds")])<br>
+<strong>CHROME_TRACE_PART</strong> = TraceDataPart("traceEvents")<br>
+<strong>INSPECTOR_TRACE_PART</strong> = TraceDataPart("inspectorTimelineEvents")<br>
+<strong>SURFACE_FLINGER_PART</strong> = TraceDataPart("surfaceFlinger")<br>
+<strong>TAB_ID_PART</strong> = TraceDataPart("tabIds")</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_event_importer.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_event_importer.html
new file mode 100644
index 0000000..897503f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.trace_event_importer.html
@@ -0,0 +1,81 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.trace_event_importer</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.trace_event_importer</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/trace_event_importer.py">telemetry/timeline/trace_event_importer.py</a></font></td></tr></table>
+    <p><tt>TraceEventImporter&nbsp;imports&nbsp;TraceEvent-formatted&nbsp;data<br>
+into&nbsp;the&nbsp;provided&nbsp;model.<br>
+This&nbsp;is&nbsp;a&nbsp;port&nbsp;of&nbsp;the&nbsp;trace&nbsp;event&nbsp;importer&nbsp;from<br>
+https://code.google.com/p/trace-viewer/</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="copy.html">copy</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.importer.html">telemetry.timeline.importer</a><br>
+<a href="telemetry.timeline.memory_dump_event.html">telemetry.timeline.memory_dump_event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+<a href="telemetry.timeline.async_slice.html">telemetry.timeline.async_slice</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.flow_event.html">telemetry.timeline.flow_event</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.trace_event_importer.html#TraceEventTimelineImporter">TraceEventTimelineImporter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceEventTimelineImporter">class <strong>TraceEventTimelineImporter</strong></a>(<a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.timeline.trace_event_importer.html#TraceEventTimelineImporter">TraceEventTimelineImporter</a></dd>
+<dd><a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TraceEventTimelineImporter-FinalizeImport"><strong>FinalizeImport</strong></a>(self)</dt><dd><tt>Called&nbsp;by&nbsp;the&nbsp;Model&nbsp;after&nbsp;all&nbsp;other&nbsp;importers&nbsp;have&nbsp;imported&nbsp;their<br>
+events.</tt></dd></dl>
+
+<dl><dt><a name="TraceEventTimelineImporter-ImportEvents"><strong>ImportEvents</strong></a>(self)</dt><dd><tt>Walks&nbsp;through&nbsp;the&nbsp;events_&nbsp;list&nbsp;and&nbsp;outputs&nbsp;the&nbsp;structures&nbsp;discovered&nbsp;to<br>
+model_.</tt></dd></dl>
+
+<dl><dt><a name="TraceEventTimelineImporter-__init__"><strong>__init__</strong></a>(self, model, trace_data)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TraceEventTimelineImporter-GetSupportedPart"><strong>GetSupportedPart</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.timeline.importer.html#TimelineImporter">telemetry.timeline.importer.TimelineImporter</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_category_filter.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_category_filter.html
new file mode 100644
index 0000000..2d4bea6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_category_filter.html
@@ -0,0 +1,107 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.tracing_category_filter</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.tracing_category_filter</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/tracing_category_filter.py">telemetry/timeline/tracing_category_filter.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.tracing_category_filter.html#TracingCategoryFilter">TracingCategoryFilter</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingCategoryFilter">class <strong>TracingCategoryFilter</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;set&nbsp;of&nbsp;included&nbsp;and&nbsp;excluded&nbsp;categories&nbsp;that&nbsp;should&nbsp;be&nbsp;traced.<br>
+&nbsp;<br>
+The&nbsp;TraceCategoryFilter&nbsp;allows&nbsp;fine&nbsp;tuning&nbsp;of&nbsp;what&nbsp;data&nbsp;is&nbsp;traced.&nbsp;Basic<br>
+choice&nbsp;of&nbsp;which&nbsp;tracers&nbsp;to&nbsp;use&nbsp;is&nbsp;done&nbsp;by&nbsp;TracingOptions.<br>
+&nbsp;<br>
+Providing&nbsp;filter_string=None&nbsp;gives&nbsp;the&nbsp;default&nbsp;category&nbsp;filter,&nbsp;which&nbsp;leaves<br>
+what&nbsp;to&nbsp;trace&nbsp;up&nbsp;to&nbsp;the&nbsp;individual&nbsp;trace&nbsp;systems.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingCategoryFilter-AddExcludedCategory"><strong>AddExcludedCategory</strong></a>(self, category_glob)</dt><dd><tt>Explicitly&nbsp;disables&nbsp;anything&nbsp;matching&nbsp;category_glob.</tt></dd></dl>
+
+<dl><dt><a name="TracingCategoryFilter-AddIncludedCategory"><strong>AddIncludedCategory</strong></a>(self, category_glob)</dt><dd><tt>Explicitly&nbsp;enables&nbsp;anything&nbsp;matching&nbsp;category_glob.</tt></dd></dl>
+
+<dl><dt><a name="TracingCategoryFilter-AddSyntheticDelay"><strong>AddSyntheticDelay</strong></a>(self, delay)</dt></dl>
+
+<dl><dt><a name="TracingCategoryFilter-GetDictForChromeTracing"><strong>GetDictForChromeTracing</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingCategoryFilter-IsSubset"><strong>IsSubset</strong></a>(self, other)</dt><dd><tt>Determine&nbsp;if&nbsp;filter&nbsp;A&nbsp;(self)&nbsp;is&nbsp;a&nbsp;subset&nbsp;of&nbsp;filter&nbsp;B&nbsp;(other).<br>
+Returns&nbsp;True&nbsp;if&nbsp;A&nbsp;is&nbsp;a&nbsp;subset&nbsp;of&nbsp;B,&nbsp;False&nbsp;if&nbsp;A&nbsp;is&nbsp;not&nbsp;a&nbsp;subset&nbsp;of&nbsp;B,<br>
+and&nbsp;None&nbsp;if&nbsp;we&nbsp;can't&nbsp;tell&nbsp;for&nbsp;sure.</tt></dd></dl>
+
+<dl><dt><a name="TracingCategoryFilter-__init__"><strong>__init__</strong></a>(self, filter_string<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>disabled_by_default_categories</strong></dt>
+</dl>
+<dl><dt><strong>excluded_categories</strong></dt>
+</dl>
+<dl><dt><strong>filter_string</strong></dt>
+</dl>
+<dl><dt><strong>included_categories</strong></dt>
+</dl>
+<dl><dt><strong>stable_filter_string</strong></dt>
+</dl>
+<dl><dt><strong>synthetic_delays</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-CreateDebugOverheadFilter"><strong>CreateDebugOverheadFilter</strong></a>()</dt><dd><tt>Returns&nbsp;a&nbsp;filter&nbsp;with&nbsp;as&nbsp;many&nbsp;traces&nbsp;enabled&nbsp;as&nbsp;is&nbsp;useful.</tt></dd></dl>
+ <dl><dt><a name="-CreateMinimalOverheadFilter"><strong>CreateMinimalOverheadFilter</strong></a>()</dt><dd><tt>Returns&nbsp;a&nbsp;filter&nbsp;with&nbsp;the&nbsp;best-effort&nbsp;amount&nbsp;of&nbsp;overhead.</tt></dd></dl>
+ <dl><dt><a name="-CreateNoOverheadFilter"><strong>CreateNoOverheadFilter</strong></a>()</dt><dd><tt>Returns&nbsp;a&nbsp;filter&nbsp;with&nbsp;the&nbsp;least&nbsp;overhead&nbsp;possible.<br>
+&nbsp;<br>
+This&nbsp;contains&nbsp;no&nbsp;sub-traces&nbsp;of&nbsp;thread&nbsp;tasks,&nbsp;so&nbsp;it's&nbsp;only&nbsp;useful&nbsp;for<br>
+capturing&nbsp;the&nbsp;cpu-time&nbsp;spent&nbsp;on&nbsp;threads&nbsp;(as&nbsp;well&nbsp;as&nbsp;needed&nbsp;benchmark<br>
+traces).<br>
+&nbsp;<br>
+FIXME:&nbsp;Remove&nbsp;webkit.console&nbsp;when&nbsp;blink.console&nbsp;lands&nbsp;in&nbsp;chromium&nbsp;and<br>
+the&nbsp;ref&nbsp;builds&nbsp;are&nbsp;updated.&nbsp;crbug.com/386847</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_config.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_config.html
new file mode 100644
index 0000000..2208080
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_config.html
@@ -0,0 +1,69 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.tracing_config</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.tracing_config</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/tracing_config.py">telemetry/timeline/tracing_config.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="json.html">json</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.tracing_config.html#TracingConfig">TracingConfig</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingConfig">class <strong>TracingConfig</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Tracing&nbsp;config&nbsp;is&nbsp;the&nbsp;configuration&nbsp;for&nbsp;Chrome&nbsp;tracing.<br>
+&nbsp;<br>
+This&nbsp;produces&nbsp;the&nbsp;trace&nbsp;config&nbsp;JSON&nbsp;string&nbsp;for&nbsp;Chrome&nbsp;tracing.&nbsp;For&nbsp;the&nbsp;details<br>
+about&nbsp;the&nbsp;JSON&nbsp;string&nbsp;format,&nbsp;see&nbsp;base/trace_event/trace_config.h.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingConfig-GetTraceConfigJsonString"><strong>GetTraceConfigJsonString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingConfig-__init__"><strong>__init__</strong></a>(self, tracing_options, tracing_category_filter)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>tracing_category_filter</strong></dt>
+</dl>
+<dl><dt><strong>tracing_options</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_options.html b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_options.html
new file mode 100644
index 0000000..9db89e5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.timeline.tracing_options.html
@@ -0,0 +1,91 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.timeline.tracing_options</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.timeline.html"><font color="#ffffff">timeline</font></a>.tracing_options</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/timeline/tracing_options.py">telemetry/timeline/tracing_options.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.timeline.tracing_options.html#TracingOptions">TracingOptions</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TracingOptions">class <strong>TracingOptions</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Tracing&nbsp;options&nbsp;control&nbsp;which&nbsp;core&nbsp;tracing&nbsp;systems&nbsp;should&nbsp;be&nbsp;enabled.<br>
+&nbsp;<br>
+This&nbsp;simply&nbsp;turns&nbsp;on&nbsp;those&nbsp;systems.&nbsp;If&nbsp;those&nbsp;systems&nbsp;have&nbsp;additional&nbsp;options,<br>
+e.g.&nbsp;what&nbsp;to&nbsp;trace,&nbsp;then&nbsp;they&nbsp;are&nbsp;typically&nbsp;configured&nbsp;by&nbsp;adding<br>
+categories&nbsp;to&nbsp;the&nbsp;TracingCategoryFilter.<br>
+&nbsp;<br>
+Options:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;enable_chrome_trace:&nbsp;a&nbsp;boolean&nbsp;that&nbsp;specifies&nbsp;whether&nbsp;to&nbsp;enable<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;chrome&nbsp;tracing.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;enable_platform_display_trace:&nbsp;a&nbsp;boolean&nbsp;that&nbsp;specifies&nbsp;whether&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;platform&nbsp;display&nbsp;tracing.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;enable_android_graphics_memtrack:&nbsp;a&nbsp;boolean&nbsp;that&nbsp;specifies&nbsp;whether<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;to&nbsp;enable&nbsp;the&nbsp;memtrack_helper&nbsp;daemon&nbsp;to&nbsp;track&nbsp;graphics&nbsp;memory&nbsp;on<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Android&nbsp;(see&nbsp;goo.gl/4Y30p9).&nbsp;Doesn't&nbsp;have&nbsp;any&nbsp;effects&nbsp;on&nbsp;other&nbsp;OSs.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;The&nbsp;following&nbsp;ones&nbsp;are&nbsp;specific&nbsp;to&nbsp;chrome&nbsp;tracing.&nbsp;See<br>
+&nbsp;&nbsp;&nbsp;&nbsp;base/trace_event/trace_config.h&nbsp;for&nbsp;more&nbsp;information.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record_mode:&nbsp;can&nbsp;be&nbsp;any&nbsp;mode&nbsp;in&nbsp;RECORD_MODES.&nbsp;This&nbsp;corresponds&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record&nbsp;modes&nbsp;in&nbsp;chrome.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;enable_systrace:&nbsp;a&nbsp;boolean&nbsp;that&nbsp;specifies&nbsp;whether&nbsp;to&nbsp;enable&nbsp;systrace.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TracingOptions-GetDictForChromeTracing"><strong>GetDictForChromeTracing</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TracingOptions-GetTraceOptionsStringForChromeDevtool"><strong>GetTraceOptionsStringForChromeDevtool</strong></a>(self)</dt><dd><tt>Map&nbsp;Chrome&nbsp;tracing&nbsp;options&nbsp;in&nbsp;Telemetry&nbsp;to&nbsp;the&nbsp;DevTools&nbsp;API&nbsp;string.</tt></dd></dl>
+
+<dl><dt><a name="TracingOptions-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>enable_systrace</strong></dt>
+</dl>
+<dl><dt><strong>record_mode</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ECHO_TO_CONSOLE</strong> = 'trace-to-console'<br>
+<strong>ENABLE_SYSTRACE</strong> = 'enable-systrace'<br>
+<strong>RECORD_AS_MUCH_AS_POSSIBLE</strong> = 'record-as-much-as-possible'<br>
+<strong>RECORD_CONTINUOUSLY</strong> = 'record-continuously'<br>
+<strong>RECORD_MODES</strong> = ['record-until-full', 'record-continuously', 'record-as-much-as-possible', 'trace-to-console']<br>
+<strong>RECORD_UNTIL_FULL</strong> = 'record-until-full'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.color_histogram.html b/catapult/telemetry/docs/pydoc/telemetry.util.color_histogram.html
new file mode 100644
index 0000000..f52aaba
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.color_histogram.html
@@ -0,0 +1,159 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.color_histogram</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.color_histogram</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/color_histogram.py">telemetry/util/color_histogram.py</a></font></td></tr></table>
+    <p><tt>Color&nbsp;Histograms&nbsp;and&nbsp;implementations&nbsp;of&nbsp;functions&nbsp;operating&nbsp;on&nbsp;them.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+</td><td width="25%" valign=top><a href="numpy.html">numpy</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial">ColorHistogram(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.util.color_histogram.html#ColorHistogram">ColorHistogram</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ColorHistogram">class <strong>ColorHistogram</strong></a>(ColorHistogram)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.util.color_histogram.html#ColorHistogram">ColorHistogram</a></dd>
+<dd>ColorHistogram</dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ColorHistogram-Distance"><strong>Distance</strong></a>(self, other)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="ColorHistogram-__new__"><strong>__new__</strong></a>(cls, r, g, b, default_color<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from ColorHistogram:<br>
+<dl><dt><a name="ColorHistogram-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;tuple.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#ColorHistogram">ColorHistogram</a>&nbsp;object&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods inherited from ColorHistogram:<br>
+<dl><dt><a name="ColorHistogram-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#ColorHistogram">ColorHistogram</a>&nbsp;object&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from ColorHistogram:<br>
+<dl><dt><strong>b</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;2</tt></dd>
+</dl>
+<dl><dt><strong>default_color</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;3</tt></dd>
+</dl>
+<dl><dt><strong>g</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<dl><dt><strong>r</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from ColorHistogram:<br>
+<dl><dt><strong>_fields</strong> = ('r', 'g', 'b', 'default_color')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="ColorHistogram-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#ColorHistogram-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#ColorHistogram-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#ColorHistogram-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ColorHistogram-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#ColorHistogram-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-HistogramDistance"><strong>HistogramDistance</strong></a>(hist1, hist2, default_color<font color="#909090">=None</font>)</dt><dd><tt>Earth&nbsp;mover's&nbsp;distance.<br>
+<a href="http://en.wikipedia.org/wiki/Earth_mover's_distance">http://en.wikipedia.org/wiki/Earth_mover's_distance</a></tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>division</strong> = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.html b/catapult/telemetry/docs/pydoc/telemetry.util.html
new file mode 100644
index 0000000..b8777e7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.html
@@ -0,0 +1,36 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/__init__.py">telemetry/util/__init__.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;library&nbsp;for&nbsp;bootstrapping&nbsp;Telemetry&nbsp;preformance&nbsp;testing.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.util.color_histogram.html">color_histogram</a><br>
+<a href="telemetry.util.color_histogram_unittest.html">color_histogram_unittest</a><br>
+<a href="telemetry.util.image_util.html">image_util</a><br>
+<a href="telemetry.util.image_util_unittest.html">image_util_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.mac.html"><strong>mac</strong>&nbsp;(package)</a><br>
+<a href="telemetry.util.perf_result_data_type.html">perf_result_data_type</a><br>
+<a href="telemetry.util.perf_tests_helper.html">perf_tests_helper</a><br>
+<a href="telemetry.util.perf_tests_results_helper.html">perf_tests_results_helper</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.process_statistic_timeline_data.html">process_statistic_timeline_data</a><br>
+<a href="telemetry.util.process_statistic_timeline_data_unittest.html">process_statistic_timeline_data_unittest</a><br>
+<a href="telemetry.util.rgba_color.html">rgba_color</a><br>
+<a href="telemetry.util.statistics.html">statistics</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.statistics_unittest.html">statistics_unittest</a><br>
+<a href="telemetry.util.wpr_modes.html">wpr_modes</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.image_util.html b/catapult/telemetry/docs/pydoc/telemetry.util.image_util.html
new file mode 100644
index 0000000..6e94ebe
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.image_util.html
@@ -0,0 +1,90 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.image_util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.image_util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/image_util.py">telemetry/util/image_util.py</a></font></td></tr></table>
+    <p><tt>Provides&nbsp;implementations&nbsp;of&nbsp;basic&nbsp;image&nbsp;processing&nbsp;functions.<br>
+&nbsp;<br>
+Implements&nbsp;basic&nbsp;image&nbsp;processing&nbsp;functions,&nbsp;such&nbsp;as&nbsp;reading/writing&nbsp;images,<br>
+cropping,&nbsp;finding&nbsp;the&nbsp;bounding&nbsp;box&nbsp;of&nbsp;a&nbsp;color&nbsp;and&nbsp;diffing&nbsp;images.<br>
+&nbsp;<br>
+When&nbsp;numpy&nbsp;is&nbsp;present,&nbsp;image_util_numpy_impl&nbsp;is&nbsp;used&nbsp;for&nbsp;the&nbsp;implementation&nbsp;of<br>
+this&nbsp;interface.&nbsp;The&nbsp;old&nbsp;bitmap&nbsp;implementation&nbsp;(image_util_bitmap_impl)&nbsp;is&nbsp;used<br>
+as&nbsp;a&nbsp;fallback&nbsp;when&nbsp;numpy&nbsp;is&nbsp;not&nbsp;present.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="base64.html">base64</a><br>
+<a href="telemetry.internal.util.external_modules.html">telemetry.internal.util.external_modules</a><br>
+</td><td width="25%" valign=top><a href="telemetry.internal.image_processing.image_util_numpy_impl.html">telemetry.internal.image_processing.image_util_numpy_impl</a><br>
+<a href="telemetry.internal.image_processing.image_util_numpy_impl.html">telemetry.internal.image_processing.image_util_numpy_impl</a><br>
+</td><td width="25%" valign=top><a href="numpy.html">numpy</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AreEqual"><strong>AreEqual</strong></a>(image1, image2, tolerance<font color="#909090">=0</font>, likely_equal<font color="#909090">=True</font>)</dt><dd><tt>Determines&nbsp;whether&nbsp;two&nbsp;images&nbsp;are&nbsp;identical&nbsp;within&nbsp;a&nbsp;given&nbsp;tolerance.<br>
+Setting&nbsp;likely_equal&nbsp;to&nbsp;False&nbsp;enables&nbsp;short-circuit&nbsp;equality&nbsp;testing,&nbsp;which<br>
+is&nbsp;about&nbsp;2-3x&nbsp;slower&nbsp;for&nbsp;equal&nbsp;images,&nbsp;but&nbsp;can&nbsp;be&nbsp;image&nbsp;height&nbsp;times&nbsp;faster<br>
+if&nbsp;the&nbsp;images&nbsp;are&nbsp;not&nbsp;equal.</tt></dd></dl>
+ <dl><dt><a name="-Channels"><strong>Channels</strong></a>(image)</dt><dd><tt>Number&nbsp;of&nbsp;color&nbsp;channels&nbsp;in&nbsp;the&nbsp;image.</tt></dd></dl>
+ <dl><dt><a name="-Crop"><strong>Crop</strong></a>(image, left, top, width, height)</dt><dd><tt>Crops&nbsp;the&nbsp;current&nbsp;image&nbsp;down&nbsp;to&nbsp;the&nbsp;specified&nbsp;box.</tt></dd></dl>
+ <dl><dt><a name="-Diff"><strong>Diff</strong></a>(image1, image2)</dt><dd><tt>Returns&nbsp;a&nbsp;new&nbsp;image&nbsp;that&nbsp;represents&nbsp;the&nbsp;difference&nbsp;between&nbsp;this&nbsp;image<br>
+and&nbsp;another&nbsp;image.</tt></dd></dl>
+ <dl><dt><a name="-FromBase64Png"><strong>FromBase64Png</strong></a>(base64_png)</dt><dd><tt>Create&nbsp;an&nbsp;image&nbsp;from&nbsp;raw&nbsp;PNG&nbsp;data&nbsp;encoded&nbsp;in&nbsp;base64.</tt></dd></dl>
+ <dl><dt><a name="-FromPng"><strong>FromPng</strong></a>(png_data)</dt><dd><tt>Create&nbsp;an&nbsp;image&nbsp;from&nbsp;raw&nbsp;PNG&nbsp;data.</tt></dd></dl>
+ <dl><dt><a name="-FromPngFile"><strong>FromPngFile</strong></a>(path)</dt><dd><tt>Create&nbsp;an&nbsp;image&nbsp;from&nbsp;a&nbsp;PNG&nbsp;file.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;path:&nbsp;The&nbsp;path&nbsp;to&nbsp;the&nbsp;PNG&nbsp;file.</tt></dd></dl>
+ <dl><dt><a name="-FromRGBPixels"><strong>FromRGBPixels</strong></a>(width, height, pixels, bpp<font color="#909090">=3</font>)</dt><dd><tt>Create&nbsp;an&nbsp;image&nbsp;from&nbsp;an&nbsp;array&nbsp;of&nbsp;rgb&nbsp;pixels.<br>
+&nbsp;<br>
+Ignores&nbsp;alpha&nbsp;channel&nbsp;if&nbsp;present.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;width,&nbsp;height:&nbsp;int,&nbsp;the&nbsp;width&nbsp;and&nbsp;height&nbsp;of&nbsp;the&nbsp;image.<br>
+&nbsp;&nbsp;pixels:&nbsp;The&nbsp;flat&nbsp;array&nbsp;of&nbsp;pixels&nbsp;in&nbsp;the&nbsp;form&nbsp;of&nbsp;[r,g,b[,a],r,g,b[,a],...]<br>
+&nbsp;&nbsp;bpp:&nbsp;3&nbsp;for&nbsp;RGB,&nbsp;4&nbsp;for&nbsp;RGBA.</tt></dd></dl>
+ <dl><dt><a name="-GetBoundingBox"><strong>GetBoundingBox</strong></a>(image, color, tolerance<font color="#909090">=0</font>)</dt><dd><tt>Finds&nbsp;the&nbsp;minimum&nbsp;box&nbsp;surrounding&nbsp;all&nbsp;occurrences&nbsp;of&nbsp;bgr&nbsp;|color|.<br>
+&nbsp;<br>
+Ignores&nbsp;the&nbsp;alpha&nbsp;channel.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;color:&nbsp;RbgaColor,&nbsp;bounding&nbsp;box&nbsp;color.<br>
+&nbsp;&nbsp;tolerance:&nbsp;int,&nbsp;per-channel&nbsp;tolerance&nbsp;for&nbsp;the&nbsp;bounding&nbsp;box&nbsp;color.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;(top,&nbsp;left,&nbsp;width,&nbsp;height),&nbsp;match_count</tt></dd></dl>
+ <dl><dt><a name="-GetColorHistogram"><strong>GetColorHistogram</strong></a>(image, ignore_color<font color="#909090">=None</font>, tolerance<font color="#909090">=0</font>)</dt><dd><tt>Computes&nbsp;a&nbsp;histogram&nbsp;of&nbsp;the&nbsp;pixel&nbsp;colors&nbsp;in&nbsp;this&nbsp;image.<br>
+Args:<br>
+&nbsp;&nbsp;ignore_color:&nbsp;An&nbsp;RgbaColor&nbsp;to&nbsp;exclude&nbsp;from&nbsp;the&nbsp;bucket&nbsp;counts.<br>
+&nbsp;&nbsp;tolerance:&nbsp;A&nbsp;tolerance&nbsp;for&nbsp;the&nbsp;ignore_color.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;ColorHistogram&nbsp;namedtuple&nbsp;with&nbsp;256&nbsp;integers&nbsp;in&nbsp;each&nbsp;field:&nbsp;r,&nbsp;g,&nbsp;and&nbsp;b.</tt></dd></dl>
+ <dl><dt><a name="-GetPixelColor"><strong>GetPixelColor</strong></a>(image, x, y)</dt><dd><tt>Returns&nbsp;a&nbsp;RgbaColor&nbsp;for&nbsp;the&nbsp;pixel&nbsp;at&nbsp;(x,&nbsp;y).</tt></dd></dl>
+ <dl><dt><a name="-Height"><strong>Height</strong></a>(image)</dt><dd><tt>Height&nbsp;of&nbsp;the&nbsp;image.</tt></dd></dl>
+ <dl><dt><a name="-Pixels"><strong>Pixels</strong></a>(image)</dt><dd><tt>Flat&nbsp;RGB&nbsp;pixel&nbsp;array&nbsp;of&nbsp;the&nbsp;image.</tt></dd></dl>
+ <dl><dt><a name="-Width"><strong>Width</strong></a>(image)</dt><dd><tt>Width&nbsp;of&nbsp;the&nbsp;image.</tt></dd></dl>
+ <dl><dt><a name="-WritePngFile"><strong>WritePngFile</strong></a>(image, path)</dt><dd><tt>Write&nbsp;an&nbsp;image&nbsp;to&nbsp;a&nbsp;PNG&nbsp;file.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;image:&nbsp;an&nbsp;image&nbsp;object.<br>
+&nbsp;&nbsp;path:&nbsp;The&nbsp;path&nbsp;to&nbsp;the&nbsp;PNG&nbsp;file.&nbsp;Must&nbsp;end&nbsp;in&nbsp;'png'&nbsp;or&nbsp;an<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;AssertionError&nbsp;will&nbsp;be&nbsp;raised.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.mac.html b/catapult/telemetry/docs/pydoc/telemetry.util.mac.html
new file mode 100644
index 0000000..6d999cf
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.mac.html
@@ -0,0 +1,25 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.util.mac</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.mac</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/mac/__init__.py">telemetry/util/mac/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.util.mac.keychain_helper.html">keychain_helper</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.mac.keychain_helper.html b/catapult/telemetry/docs/pydoc/telemetry.util.mac.keychain_helper.html
new file mode 100644
index 0000000..870469d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.mac.keychain_helper.html
@@ -0,0 +1,43 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.mac.keychain_helper</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.<a href="telemetry.util.mac.html"><font color="#ffffff">mac</font></a>.keychain_helper</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/mac/keychain_helper.py">telemetry/util/mac/keychain_helper.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.internal.util.binary_manager.html">telemetry.internal.util.binary_manager</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.os_version.html">telemetry.core.os_version</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.platform.html">telemetry.core.platform</a><br>
+</td><td width="25%" valign=top><a href="subprocess.html">subprocess</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-DoesKeychainHaveTimeout"><strong>DoesKeychainHaveTimeout</strong></a>()</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;keychain&nbsp;will&nbsp;lock&nbsp;itself&nbsp;have&nbsp;a&nbsp;period&nbsp;of&nbsp;time.<br>
+&nbsp;<br>
+This&nbsp;method&nbsp;will&nbsp;trigger&nbsp;a&nbsp;blocking,&nbsp;modal&nbsp;dialog&nbsp;if&nbsp;the&nbsp;keychain&nbsp;is<br>
+locked.</tt></dd></dl>
+ <dl><dt><a name="-IsKeychainConfiguredForBotsWithChrome"><strong>IsKeychainConfiguredForBotsWithChrome</strong></a>()</dt></dl>
+ <dl><dt><a name="-IsKeychainConfiguredForBotsWithChromium"><strong>IsKeychainConfiguredForBotsWithChromium</strong></a>()</dt></dl>
+ <dl><dt><a name="-IsKeychainLocked"><strong>IsKeychainLocked</strong></a>()</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;keychain&nbsp;is&nbsp;locked,&nbsp;or&nbsp;if&nbsp;there&nbsp;is&nbsp;an&nbsp;error&nbsp;determining<br>
+the&nbsp;keychain&nbsp;state.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.perf_result_data_type.html b/catapult/telemetry/docs/pydoc/telemetry.util.perf_result_data_type.html
new file mode 100644
index 0000000..5265b99
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.perf_result_data_type.html
@@ -0,0 +1,38 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.perf_result_data_type</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.perf_result_data_type</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/perf_result_data_type.py">telemetry/util/perf_result_data_type.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsHistogram"><strong>IsHistogram</strong></a>(datatype)</dt></dl>
+ <dl><dt><a name="-IsValidType"><strong>IsValidType</strong></a>(datatype)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ALL_TYPES</strong> = ['default', 'unimportant', 'histogram', 'unimportant-histogram', 'informational']<br>
+<strong>DEFAULT</strong> = 'default'<br>
+<strong>HISTOGRAM</strong> = 'histogram'<br>
+<strong>INFORMATIONAL</strong> = 'informational'<br>
+<strong>UNIMPORTANT</strong> = 'unimportant'<br>
+<strong>UNIMPORTANT_HISTOGRAM</strong> = 'unimportant-histogram'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_helper.html b/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_helper.html
new file mode 100644
index 0000000..6feac66
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_helper.html
@@ -0,0 +1,25 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.perf_tests_helper</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.perf_tests_helper</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/perf_tests_helper.py">telemetry/util/perf_tests_helper.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.util.perf_tests_results_helper.html">telemetry.util.perf_tests_results_helper</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_results_helper.html b/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_results_helper.html
new file mode 100644
index 0000000..e8bb07b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.perf_tests_results_helper.html
@@ -0,0 +1,68 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.perf_tests_results_helper</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.perf_tests_results_helper</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/perf_tests_results_helper.py">telemetry/util/perf_tests_results_helper.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="json.html">json</a><br>
+<a href="math.html">math</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.perf_result_data_type.html">telemetry.util.perf_result_data_type</a><br>
+<a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-FlattenList"><strong>FlattenList</strong></a>(values)</dt><dd><tt>Returns&nbsp;a&nbsp;simple&nbsp;list&nbsp;without&nbsp;sub-lists.</tt></dd></dl>
+ <dl><dt><a name="-GeomMeanAndStdDevFromHistogram"><strong>GeomMeanAndStdDevFromHistogram</strong></a>(histogram_json)</dt></dl>
+ <dl><dt><a name="-PrintPages"><strong>PrintPages</strong></a>(page_list)</dt><dd><tt>Prints&nbsp;list&nbsp;of&nbsp;pages&nbsp;to&nbsp;stdout&nbsp;in&nbsp;the&nbsp;format&nbsp;required&nbsp;by&nbsp;perf&nbsp;tests.</tt></dd></dl>
+ <dl><dt><a name="-PrintPerfResult"><strong>PrintPerfResult</strong></a>(measurement, trace, values, units, result_type<font color="#909090">='default'</font>, print_to_stdout<font color="#909090">=True</font>)</dt><dd><tt>Prints&nbsp;numerical&nbsp;data&nbsp;to&nbsp;stdout&nbsp;in&nbsp;the&nbsp;format&nbsp;required&nbsp;by&nbsp;perf&nbsp;tests.<br>
+&nbsp;<br>
+The&nbsp;string&nbsp;args&nbsp;may&nbsp;be&nbsp;empty&nbsp;but&nbsp;they&nbsp;must&nbsp;not&nbsp;contain&nbsp;any&nbsp;colons&nbsp;(:)&nbsp;or<br>
+equals&nbsp;signs&nbsp;(=).<br>
+This&nbsp;is&nbsp;parsed&nbsp;by&nbsp;the&nbsp;buildbot&nbsp;using:<br>
+<a href="http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py">http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py</a><br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;measurement:&nbsp;A&nbsp;description&nbsp;of&nbsp;the&nbsp;quantity&nbsp;being&nbsp;measured,&nbsp;e.g.&nbsp;"vm_peak".<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;On&nbsp;the&nbsp;dashboard,&nbsp;this&nbsp;maps&nbsp;to&nbsp;a&nbsp;particular&nbsp;graph.&nbsp;Mandatory.<br>
+&nbsp;&nbsp;trace:&nbsp;A&nbsp;description&nbsp;of&nbsp;the&nbsp;particular&nbsp;data&nbsp;point,&nbsp;e.g.&nbsp;"reference".<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;On&nbsp;the&nbsp;dashboard,&nbsp;this&nbsp;maps&nbsp;to&nbsp;a&nbsp;particular&nbsp;"line"&nbsp;in&nbsp;the&nbsp;graph.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Mandatory.<br>
+&nbsp;&nbsp;values:&nbsp;A&nbsp;list&nbsp;of&nbsp;numeric&nbsp;measured&nbsp;values.&nbsp;An&nbsp;N-dimensional&nbsp;list&nbsp;will&nbsp;be<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;flattened&nbsp;and&nbsp;treated&nbsp;as&nbsp;a&nbsp;simple&nbsp;list.<br>
+&nbsp;&nbsp;units:&nbsp;A&nbsp;description&nbsp;of&nbsp;the&nbsp;units&nbsp;of&nbsp;measure,&nbsp;e.g.&nbsp;"bytes".<br>
+&nbsp;&nbsp;result_type:&nbsp;Accepts&nbsp;values&nbsp;of&nbsp;perf_result_data_type.ALL_TYPES.<br>
+&nbsp;&nbsp;print_to_stdout:&nbsp;If&nbsp;True,&nbsp;prints&nbsp;the&nbsp;output&nbsp;in&nbsp;stdout&nbsp;instead&nbsp;of&nbsp;returning<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;output&nbsp;to&nbsp;caller.<br>
+&nbsp;<br>
+&nbsp;&nbsp;Returns:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;String&nbsp;of&nbsp;the&nbsp;formated&nbsp;perf&nbsp;result.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>RESULT_TYPES</strong> = {'default': '*RESULT ', 'histogram': '*HISTOGRAM ', 'informational': '', 'unimportant': 'RESULT ', 'unimportant-histogram': 'HISTOGRAM '}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.process_statistic_timeline_data.html b/catapult/telemetry/docs/pydoc/telemetry.util.process_statistic_timeline_data.html
new file mode 100644
index 0000000..3695c0c
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.process_statistic_timeline_data.html
@@ -0,0 +1,114 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.process_statistic_timeline_data</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.process_statistic_timeline_data</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/process_statistic_timeline_data.py">telemetry/util/process_statistic_timeline_data.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.util.process_statistic_timeline_data.html#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.util.process_statistic_timeline_data.html#IdleWakeupTimelineData">IdleWakeupTimelineData</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IdleWakeupTimelineData">class <strong>IdleWakeupTimelineData</strong></a>(<a href="telemetry.util.process_statistic_timeline_data.html#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;<a href="#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a>&nbsp;to&nbsp;hold&nbsp;idle&nbsp;wakeups.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.util.process_statistic_timeline_data.html#IdleWakeupTimelineData">IdleWakeupTimelineData</a></dd>
+<dd><a href="telemetry.util.process_statistic_timeline_data.html#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.util.process_statistic_timeline_data.html#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a>:<br>
+<dl><dt><a name="IdleWakeupTimelineData-__add__"><strong>__add__</strong></a>(self, other)</dt><dd><tt>The&nbsp;result&nbsp;contains&nbsp;pids&nbsp;from&nbsp;both&nbsp;|self|&nbsp;and&nbsp;|other|,&nbsp;if&nbsp;duplicate<br>
+pids&nbsp;are&nbsp;found&nbsp;between&nbsp;objects,&nbsp;an&nbsp;error&nbsp;will&nbsp;occur.</tt></dd></dl>
+
+<dl><dt><a name="IdleWakeupTimelineData-__init__"><strong>__init__</strong></a>(self, pid, value)</dt></dl>
+
+<dl><dt><a name="IdleWakeupTimelineData-__sub__"><strong>__sub__</strong></a>(self, other)</dt><dd><tt>The&nbsp;results&nbsp;of&nbsp;subtraction&nbsp;is&nbsp;an&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;holding&nbsp;only&nbsp;the&nbsp;pids&nbsp;contained<br>
+in&nbsp;|self|.<br>
+&nbsp;<br>
+The&nbsp;motivation&nbsp;is&nbsp;that&nbsp;some&nbsp;processes&nbsp;may&nbsp;have&nbsp;died&nbsp;between&nbsp;two&nbsp;consecutive<br>
+measurements.&nbsp;The&nbsp;desired&nbsp;behavior&nbsp;is&nbsp;to&nbsp;only&nbsp;make&nbsp;calculations&nbsp;based&nbsp;on<br>
+the&nbsp;processes&nbsp;that&nbsp;are&nbsp;alive&nbsp;at&nbsp;the&nbsp;end&nbsp;of&nbsp;the&nbsp;second&nbsp;measurement.</tt></dd></dl>
+
+<dl><dt><a name="IdleWakeupTimelineData-total_sum"><strong>total_sum</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;sum&nbsp;of&nbsp;all&nbsp;values&nbsp;contained&nbsp;by&nbsp;this&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.util.process_statistic_timeline_data.html#ProcessStatisticTimelineData">ProcessStatisticTimelineData</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>value_by_pid</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ProcessStatisticTimelineData">class <strong>ProcessStatisticTimelineData</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Holds&nbsp;value&nbsp;of&nbsp;a&nbsp;stat&nbsp;for&nbsp;one&nbsp;or&nbsp;more&nbsp;processes.<br>
+&nbsp;<br>
+This&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;can&nbsp;hold&nbsp;a&nbsp;value&nbsp;for&nbsp;more&nbsp;than&nbsp;one&nbsp;pid&nbsp;by&nbsp;adding&nbsp;another<br>
+<a href="__builtin__.html#object">object</a>.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ProcessStatisticTimelineData-__add__"><strong>__add__</strong></a>(self, other)</dt><dd><tt>The&nbsp;result&nbsp;contains&nbsp;pids&nbsp;from&nbsp;both&nbsp;|self|&nbsp;and&nbsp;|other|,&nbsp;if&nbsp;duplicate<br>
+pids&nbsp;are&nbsp;found&nbsp;between&nbsp;objects,&nbsp;an&nbsp;error&nbsp;will&nbsp;occur.</tt></dd></dl>
+
+<dl><dt><a name="ProcessStatisticTimelineData-__init__"><strong>__init__</strong></a>(self, pid, value)</dt></dl>
+
+<dl><dt><a name="ProcessStatisticTimelineData-__sub__"><strong>__sub__</strong></a>(self, other)</dt><dd><tt>The&nbsp;results&nbsp;of&nbsp;subtraction&nbsp;is&nbsp;an&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;holding&nbsp;only&nbsp;the&nbsp;pids&nbsp;contained<br>
+in&nbsp;|self|.<br>
+&nbsp;<br>
+The&nbsp;motivation&nbsp;is&nbsp;that&nbsp;some&nbsp;processes&nbsp;may&nbsp;have&nbsp;died&nbsp;between&nbsp;two&nbsp;consecutive<br>
+measurements.&nbsp;The&nbsp;desired&nbsp;behavior&nbsp;is&nbsp;to&nbsp;only&nbsp;make&nbsp;calculations&nbsp;based&nbsp;on<br>
+the&nbsp;processes&nbsp;that&nbsp;are&nbsp;alive&nbsp;at&nbsp;the&nbsp;end&nbsp;of&nbsp;the&nbsp;second&nbsp;measurement.</tt></dd></dl>
+
+<dl><dt><a name="ProcessStatisticTimelineData-total_sum"><strong>total_sum</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;sum&nbsp;of&nbsp;all&nbsp;values&nbsp;contained&nbsp;by&nbsp;this&nbsp;<a href="__builtin__.html#object">object</a>.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>value_by_pid</strong></dt>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.rgba_color.html b/catapult/telemetry/docs/pydoc/telemetry.util.rgba_color.html
new file mode 100644
index 0000000..737d51f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.rgba_color.html
@@ -0,0 +1,160 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.rgba_color</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.rgba_color</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/rgba_color.py">telemetry/util/rgba_color.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial">RgbaColor(<a href="__builtin__.html#tuple">__builtin__.tuple</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.util.rgba_color.html#RgbaColor">RgbaColor</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RgbaColor">class <strong>RgbaColor</strong></a>(RgbaColor)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Encapsulates&nbsp;an&nbsp;RGBA&nbsp;color&nbsp;retrieved&nbsp;from&nbsp;an&nbsp;image.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.util.rgba_color.html#RgbaColor">RgbaColor</a></dd>
+<dd>RgbaColor</dd>
+<dd><a href="__builtin__.html#tuple">__builtin__.tuple</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="RgbaColor-AssertIsRGB"><strong>AssertIsRGB</strong></a>(self, r, g, b, tolerance<font color="#909090">=0</font>)</dt></dl>
+
+<dl><dt><a name="RgbaColor-AssertIsRGBA"><strong>AssertIsRGBA</strong></a>(self, r, g, b, a, tolerance<font color="#909090">=0</font>)</dt></dl>
+
+<dl><dt><a name="RgbaColor-IsEqual"><strong>IsEqual</strong></a>(self, expected_color, tolerance<font color="#909090">=0</font>)</dt><dd><tt>Verifies&nbsp;that&nbsp;the&nbsp;color&nbsp;is&nbsp;within&nbsp;a&nbsp;given&nbsp;tolerance&nbsp;of<br>
+the&nbsp;expected&nbsp;color.</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__int__"><strong>__int__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="RgbaColor-__new__"><strong>__new__</strong></a>(cls, r, g, b, a<font color="#909090">=255</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from RgbaColor:<br>
+<dl><dt><a name="RgbaColor-__getnewargs__"><strong>__getnewargs__</strong></a>(self)</dt><dd><tt>Return&nbsp;self&nbsp;as&nbsp;a&nbsp;plain&nbsp;tuple.&nbsp;&nbsp;Used&nbsp;by&nbsp;copy&nbsp;and&nbsp;pickle.</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__getstate__"><strong>__getstate__</strong></a>(self)</dt><dd><tt>Exclude&nbsp;the&nbsp;OrderedDict&nbsp;from&nbsp;pickling</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__repr__"><strong>__repr__</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;nicely&nbsp;formatted&nbsp;representation&nbsp;string</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-_asdict"><strong>_asdict</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;OrderedDict&nbsp;which&nbsp;maps&nbsp;field&nbsp;names&nbsp;to&nbsp;their&nbsp;values</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-_replace"><strong>_replace</strong></a>(_self, **kwds)</dt><dd><tt>Return&nbsp;a&nbsp;new&nbsp;<a href="#RgbaColor">RgbaColor</a>&nbsp;object&nbsp;replacing&nbsp;specified&nbsp;fields&nbsp;with&nbsp;new&nbsp;values</tt></dd></dl>
+
+<hr>
+Class methods inherited from RgbaColor:<br>
+<dl><dt><a name="RgbaColor-_make"><strong>_make</strong></a>(cls, iterable, new<font color="#909090">=&lt;built-in method __new__ of type object&gt;</font>, len<font color="#909090">=&lt;built-in function len&gt;</font>)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Make&nbsp;a&nbsp;new&nbsp;<a href="#RgbaColor">RgbaColor</a>&nbsp;object&nbsp;from&nbsp;a&nbsp;sequence&nbsp;or&nbsp;iterable</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from RgbaColor:<br>
+<dl><dt><strong>a</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;3</tt></dd>
+</dl>
+<dl><dt><strong>b</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;2</tt></dd>
+</dl>
+<dl><dt><strong>g</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;1</tt></dd>
+</dl>
+<dl><dt><strong>r</strong></dt>
+<dd><tt>Alias&nbsp;for&nbsp;field&nbsp;number&nbsp;0</tt></dd>
+</dl>
+<hr>
+Data and other attributes inherited from RgbaColor:<br>
+<dl><dt><strong>_fields</strong> = ('r', 'g', 'b', 'a')</dl>
+
+<hr>
+Methods inherited from <a href="__builtin__.html#tuple">__builtin__.tuple</a>:<br>
+<dl><dt><a name="RgbaColor-__add__"><strong>__add__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__add__">__add__</a>(y)&nbsp;&lt;==&gt;&nbsp;x+y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__contains__"><strong>__contains__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__contains__">__contains__</a>(y)&nbsp;&lt;==&gt;&nbsp;y&nbsp;in&nbsp;x</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__eq__"><strong>__eq__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__eq__">__eq__</a>(y)&nbsp;&lt;==&gt;&nbsp;x==y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__ge__"><strong>__ge__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__ge__">__ge__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;=y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__gt__"><strong>__gt__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__gt__">__gt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&gt;y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__hash__"><strong>__hash__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__hash__">__hash__</a>()&nbsp;&lt;==&gt;&nbsp;hash(x)</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__iter__"><strong>__iter__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__iter__">__iter__</a>()&nbsp;&lt;==&gt;&nbsp;iter(x)</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__le__"><strong>__le__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__le__">__le__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;=y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__len__"><strong>__len__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__len__">__len__</a>()&nbsp;&lt;==&gt;&nbsp;len(x)</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__lt__"><strong>__lt__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__lt__">__lt__</a>(y)&nbsp;&lt;==&gt;&nbsp;x&lt;y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__mul__"><strong>__mul__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__mul__">__mul__</a>(n)&nbsp;&lt;==&gt;&nbsp;x*n</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__ne__"><strong>__ne__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__ne__">__ne__</a>(y)&nbsp;&lt;==&gt;&nbsp;x!=y</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__rmul__"><strong>__rmul__</strong></a>(...)</dt><dd><tt>x.<a href="#RgbaColor-__rmul__">__rmul__</a>(n)&nbsp;&lt;==&gt;&nbsp;n*x</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-__sizeof__"><strong>__sizeof__</strong></a>(...)</dt><dd><tt>T.<a href="#RgbaColor-__sizeof__">__sizeof__</a>()&nbsp;--&nbsp;size&nbsp;of&nbsp;T&nbsp;in&nbsp;memory,&nbsp;in&nbsp;bytes</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-count"><strong>count</strong></a>(...)</dt><dd><tt>T.<a href="#RgbaColor-count">count</a>(value)&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;number&nbsp;of&nbsp;occurrences&nbsp;of&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="RgbaColor-index"><strong>index</strong></a>(...)</dt><dd><tt>T.<a href="#RgbaColor-index">index</a>(value,&nbsp;[start,&nbsp;[stop]])&nbsp;-&gt;&nbsp;integer&nbsp;--&nbsp;return&nbsp;first&nbsp;index&nbsp;of&nbsp;value.<br>
+Raises&nbsp;ValueError&nbsp;if&nbsp;the&nbsp;value&nbsp;is&nbsp;not&nbsp;present.</tt></dd></dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>WEB_PAGE_TEST_ORANGE</strong> = RgbaColor(r=222, g=100, b=13, a=255)<br>
+<strong>WHITE</strong> = RgbaColor(r=255, g=255, b=255, a=255)</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.statistics.html b/catapult/telemetry/docs/pydoc/telemetry.util.statistics.html
new file mode 100644
index 0000000..5f939a5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.statistics.html
@@ -0,0 +1,137 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.statistics</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.statistics</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/statistics.py">telemetry/util/statistics.py</a></font></td></tr></table>
+    <p><tt>A&nbsp;collection&nbsp;of&nbsp;statistical&nbsp;utility&nbsp;functions&nbsp;to&nbsp;be&nbsp;used&nbsp;by&nbsp;metrics.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="math.html">math</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ArithmeticMean"><strong>ArithmeticMean</strong></a>(data)</dt><dd><tt>Calculates&nbsp;arithmetic&nbsp;mean.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;data:&nbsp;A&nbsp;list&nbsp;of&nbsp;samples.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;arithmetic&nbsp;mean&nbsp;value,&nbsp;or&nbsp;0&nbsp;if&nbsp;the&nbsp;list&nbsp;is&nbsp;empty.</tt></dd></dl>
+ <dl><dt><a name="-Clamp"><strong>Clamp</strong></a>(value, low<font color="#909090">=0.0</font>, high<font color="#909090">=1.0</font>)</dt><dd><tt>Clamp&nbsp;a&nbsp;value&nbsp;between&nbsp;some&nbsp;low&nbsp;and&nbsp;high&nbsp;value.</tt></dd></dl>
+ <dl><dt><a name="-Discrepancy"><strong>Discrepancy</strong></a>(samples, location_count<font color="#909090">=None</font>)</dt><dd><tt>Computes&nbsp;the&nbsp;discrepancy&nbsp;of&nbsp;a&nbsp;set&nbsp;of&nbsp;1D&nbsp;samples&nbsp;from&nbsp;the&nbsp;interval&nbsp;[0,1].<br>
+&nbsp;<br>
+The&nbsp;samples&nbsp;must&nbsp;be&nbsp;sorted.&nbsp;We&nbsp;define&nbsp;the&nbsp;discrepancy&nbsp;of&nbsp;an&nbsp;empty&nbsp;set<br>
+of&nbsp;samples&nbsp;to&nbsp;be&nbsp;zero.<br>
+&nbsp;<br>
+<a href="http://en.wikipedia.org/wiki/Low-discrepancy_sequence">http://en.wikipedia.org/wiki/Low-discrepancy_sequence</a><br>
+<a href="http://mathworld.wolfram.com/Discrepancy.html">http://mathworld.wolfram.com/Discrepancy.html</a></tt></dd></dl>
+ <dl><dt><a name="-DivideIfPossibleOrZero"><strong>DivideIfPossibleOrZero</strong></a>(numerator, denominator)</dt><dd><tt>Returns&nbsp;the&nbsp;quotient,&nbsp;or&nbsp;zero&nbsp;if&nbsp;the&nbsp;denominator&nbsp;is&nbsp;zero.</tt></dd></dl>
+ <dl><dt><a name="-DurationsDiscrepancy"><strong>DurationsDiscrepancy</strong></a>(durations, absolute<font color="#909090">=True</font>, location_count<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;discrepancy&nbsp;based&nbsp;metric&nbsp;for&nbsp;measuring&nbsp;duration&nbsp;jank.<br>
+&nbsp;<br>
+DurationsDiscrepancy&nbsp;computes&nbsp;a&nbsp;jank&nbsp;metric&nbsp;which&nbsp;measures&nbsp;how&nbsp;irregular&nbsp;a<br>
+given&nbsp;sequence&nbsp;of&nbsp;intervals&nbsp;is.&nbsp;In&nbsp;order&nbsp;to&nbsp;minimize&nbsp;jank,&nbsp;each&nbsp;duration<br>
+should&nbsp;be&nbsp;equally&nbsp;long.&nbsp;This&nbsp;is&nbsp;similar&nbsp;to&nbsp;how&nbsp;timestamp&nbsp;jank&nbsp;works,<br>
+and&nbsp;we&nbsp;therefore&nbsp;reuse&nbsp;the&nbsp;timestamp&nbsp;discrepancy&nbsp;function&nbsp;above&nbsp;to&nbsp;compute&nbsp;a<br>
+similar&nbsp;duration&nbsp;discrepancy&nbsp;number.<br>
+&nbsp;<br>
+Because&nbsp;timestamp&nbsp;discrepancy&nbsp;is&nbsp;defined&nbsp;in&nbsp;terms&nbsp;of&nbsp;timestamps,&nbsp;we&nbsp;first<br>
+convert&nbsp;the&nbsp;list&nbsp;of&nbsp;durations&nbsp;to&nbsp;monotonically&nbsp;increasing&nbsp;timestamps.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;durations:&nbsp;List&nbsp;of&nbsp;interval&nbsp;lengths&nbsp;in&nbsp;milliseconds.<br>
+&nbsp;&nbsp;absolute:&nbsp;See&nbsp;TimestampsDiscrepancy.<br>
+&nbsp;&nbsp;interval_multiplier:&nbsp;See&nbsp;TimestampsDiscrepancy.</tt></dd></dl>
+ <dl><dt><a name="-GeneralizedMean"><strong>GeneralizedMean</strong></a>(values, exponent)</dt><dd><tt>See&nbsp;<a href="http://en.wikipedia.org/wiki/Generalized_mean">http://en.wikipedia.org/wiki/Generalized_mean</a></tt></dd></dl>
+ <dl><dt><a name="-GeometricMean"><strong>GeometricMean</strong></a>(values)</dt><dd><tt>Compute&nbsp;a&nbsp;rounded&nbsp;geometric&nbsp;mean&nbsp;from&nbsp;an&nbsp;array&nbsp;of&nbsp;values.</tt></dd></dl>
+ <dl><dt><a name="-Median"><strong>Median</strong></a>(values)</dt><dd><tt>Gets&nbsp;the&nbsp;median&nbsp;of&nbsp;a&nbsp;list&nbsp;of&nbsp;values.</tt></dd></dl>
+ <dl><dt><a name="-NormalizeSamples"><strong>NormalizeSamples</strong></a>(samples)</dt><dd><tt>Sorts&nbsp;the&nbsp;samples,&nbsp;and&nbsp;map&nbsp;them&nbsp;linearly&nbsp;to&nbsp;the&nbsp;range&nbsp;[0,1].<br>
+&nbsp;<br>
+They're&nbsp;mapped&nbsp;such&nbsp;that&nbsp;for&nbsp;the&nbsp;N&nbsp;samples,&nbsp;the&nbsp;first&nbsp;sample&nbsp;is&nbsp;0.5/N&nbsp;and&nbsp;the<br>
+last&nbsp;sample&nbsp;is&nbsp;(N-0.5)/N.<br>
+&nbsp;<br>
+Background:&nbsp;The&nbsp;discrepancy&nbsp;of&nbsp;the&nbsp;sample&nbsp;set&nbsp;i/(N-1);&nbsp;i=0,&nbsp;...,&nbsp;N-1&nbsp;is&nbsp;2/N,<br>
+twice&nbsp;the&nbsp;discrepancy&nbsp;of&nbsp;the&nbsp;sample&nbsp;set&nbsp;(i+1/2)/N;&nbsp;i=0,&nbsp;...,&nbsp;N-1.&nbsp;In&nbsp;our&nbsp;case<br>
+we&nbsp;don't&nbsp;want&nbsp;to&nbsp;distinguish&nbsp;between&nbsp;these&nbsp;two&nbsp;cases,&nbsp;as&nbsp;our&nbsp;original&nbsp;domain<br>
+is&nbsp;not&nbsp;bounded&nbsp;(it&nbsp;is&nbsp;for&nbsp;Monte&nbsp;Carlo&nbsp;integration,&nbsp;where&nbsp;discrepancy&nbsp;was<br>
+first&nbsp;used).</tt></dd></dl>
+ <dl><dt><a name="-Percentile"><strong>Percentile</strong></a>(values, percentile)</dt><dd><tt>Calculates&nbsp;the&nbsp;value&nbsp;below&nbsp;which&nbsp;a&nbsp;given&nbsp;percentage&nbsp;of&nbsp;values&nbsp;fall.<br>
+&nbsp;<br>
+For&nbsp;example,&nbsp;if&nbsp;17%&nbsp;of&nbsp;the&nbsp;values&nbsp;are&nbsp;less&nbsp;than&nbsp;5.0,&nbsp;then&nbsp;5.0&nbsp;is&nbsp;the&nbsp;17th<br>
+percentile&nbsp;for&nbsp;this&nbsp;set&nbsp;of&nbsp;values.&nbsp;When&nbsp;the&nbsp;percentage&nbsp;doesn't&nbsp;exactly<br>
+match&nbsp;a&nbsp;rank&nbsp;in&nbsp;the&nbsp;list&nbsp;of&nbsp;values,&nbsp;the&nbsp;percentile&nbsp;is&nbsp;computed&nbsp;using&nbsp;linear<br>
+interpolation&nbsp;between&nbsp;closest&nbsp;ranks.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;values:&nbsp;A&nbsp;list&nbsp;of&nbsp;numerical&nbsp;values.<br>
+&nbsp;&nbsp;percentile:&nbsp;A&nbsp;number&nbsp;between&nbsp;0&nbsp;and&nbsp;100.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;Nth&nbsp;percentile&nbsp;for&nbsp;the&nbsp;list&nbsp;of&nbsp;values,&nbsp;where&nbsp;N&nbsp;is&nbsp;the&nbsp;given&nbsp;percentage.</tt></dd></dl>
+ <dl><dt><a name="-StandardDeviation"><strong>StandardDeviation</strong></a>(data)</dt><dd><tt>Calculates&nbsp;the&nbsp;standard&nbsp;deviation.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;data:&nbsp;A&nbsp;list&nbsp;of&nbsp;samples.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;standard&nbsp;deviation&nbsp;of&nbsp;the&nbsp;samples&nbsp;provided.</tt></dd></dl>
+ <dl><dt><a name="-TimestampsDiscrepancy"><strong>TimestampsDiscrepancy</strong></a>(timestamps, absolute<font color="#909090">=True</font>, location_count<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;discrepancy&nbsp;based&nbsp;metric&nbsp;for&nbsp;measuring&nbsp;timestamp&nbsp;jank.<br>
+&nbsp;<br>
+TimestampsDiscrepancy&nbsp;quantifies&nbsp;the&nbsp;largest&nbsp;area&nbsp;of&nbsp;jank&nbsp;observed&nbsp;in&nbsp;a&nbsp;series<br>
+of&nbsp;timestamps.&nbsp;&nbsp;Note&nbsp;that&nbsp;this&nbsp;is&nbsp;different&nbsp;from&nbsp;metrics&nbsp;based&nbsp;on&nbsp;the<br>
+max_time_interval.&nbsp;For&nbsp;example,&nbsp;the&nbsp;time&nbsp;stamp&nbsp;series&nbsp;A&nbsp;=&nbsp;[0,1,2,3,5,6]&nbsp;and<br>
+B&nbsp;=&nbsp;[0,1,2,3,5,7]&nbsp;have&nbsp;the&nbsp;same&nbsp;max_time_interval&nbsp;=&nbsp;2,&nbsp;but<br>
+<a href="#-Discrepancy">Discrepancy</a>(B)&nbsp;&gt;&nbsp;<a href="#-Discrepancy">Discrepancy</a>(A).<br>
+&nbsp;<br>
+Two&nbsp;variants&nbsp;of&nbsp;discrepancy&nbsp;can&nbsp;be&nbsp;computed:<br>
+&nbsp;<br>
+Relative&nbsp;discrepancy&nbsp;is&nbsp;following&nbsp;the&nbsp;original&nbsp;definition&nbsp;of<br>
+discrepancy.&nbsp;It&nbsp;characterized&nbsp;the&nbsp;largest&nbsp;area&nbsp;of&nbsp;jank,&nbsp;relative&nbsp;to&nbsp;the<br>
+duration&nbsp;of&nbsp;the&nbsp;entire&nbsp;time&nbsp;stamp&nbsp;series.&nbsp;&nbsp;We&nbsp;normalize&nbsp;the&nbsp;raw&nbsp;results,<br>
+because&nbsp;the&nbsp;best&nbsp;case&nbsp;discrepancy&nbsp;for&nbsp;a&nbsp;set&nbsp;of&nbsp;N&nbsp;samples&nbsp;is&nbsp;1/N&nbsp;(for<br>
+equally&nbsp;spaced&nbsp;samples),&nbsp;and&nbsp;we&nbsp;want&nbsp;our&nbsp;metric&nbsp;to&nbsp;report&nbsp;0.0&nbsp;in&nbsp;that<br>
+case.<br>
+&nbsp;<br>
+Absolute&nbsp;discrepancy&nbsp;also&nbsp;characterizes&nbsp;the&nbsp;largest&nbsp;area&nbsp;of&nbsp;jank,&nbsp;but&nbsp;its<br>
+value&nbsp;wouldn't&nbsp;change&nbsp;(except&nbsp;for&nbsp;imprecisions&nbsp;due&nbsp;to&nbsp;a&nbsp;low<br>
+|interval_multiplier|)&nbsp;if&nbsp;additional&nbsp;'good'&nbsp;intervals&nbsp;were&nbsp;added&nbsp;to&nbsp;an<br>
+exisiting&nbsp;list&nbsp;of&nbsp;time&nbsp;stamps.&nbsp;&nbsp;Its&nbsp;range&nbsp;is&nbsp;[0,inf]&nbsp;and&nbsp;the&nbsp;unit&nbsp;is<br>
+milliseconds.<br>
+&nbsp;<br>
+The&nbsp;time&nbsp;stamp&nbsp;series&nbsp;C&nbsp;=&nbsp;[0,2,3,4]&nbsp;and&nbsp;D&nbsp;=&nbsp;[0,2,3,4,5]&nbsp;have&nbsp;the&nbsp;same<br>
+absolute&nbsp;discrepancy,&nbsp;but&nbsp;D&nbsp;has&nbsp;lower&nbsp;relative&nbsp;discrepancy&nbsp;than&nbsp;C.<br>
+&nbsp;<br>
+|timestamps|&nbsp;may&nbsp;be&nbsp;a&nbsp;list&nbsp;of&nbsp;lists&nbsp;S&nbsp;=&nbsp;[S_1,&nbsp;S_2,&nbsp;...,&nbsp;S_N],&nbsp;where&nbsp;each<br>
+S_i&nbsp;is&nbsp;a&nbsp;time&nbsp;stamp&nbsp;series.&nbsp;In&nbsp;that&nbsp;case,&nbsp;the&nbsp;discrepancy&nbsp;D(S)&nbsp;is:<br>
+D(S)&nbsp;=&nbsp;max(D(S_1),&nbsp;D(S_2),&nbsp;...,&nbsp;D(S_N))</tt></dd></dl>
+ <dl><dt><a name="-Total"><strong>Total</strong></a>(data)</dt><dd><tt>Returns&nbsp;the&nbsp;float&nbsp;value&nbsp;of&nbsp;a&nbsp;number&nbsp;or&nbsp;the&nbsp;sum&nbsp;of&nbsp;a&nbsp;list.</tt></dd></dl>
+ <dl><dt><a name="-TrapezoidalRule"><strong>TrapezoidalRule</strong></a>(data, dx)</dt><dd><tt>Calculate&nbsp;the&nbsp;integral&nbsp;according&nbsp;to&nbsp;the&nbsp;trapezoidal&nbsp;rule<br>
+&nbsp;<br>
+TrapezoidalRule&nbsp;approximates&nbsp;the&nbsp;definite&nbsp;integral&nbsp;of&nbsp;f&nbsp;from&nbsp;a&nbsp;to&nbsp;b&nbsp;by<br>
+the&nbsp;composite&nbsp;trapezoidal&nbsp;rule,&nbsp;using&nbsp;n&nbsp;subintervals.<br>
+<a href="http://en.wikipedia.org/wiki/Trapezoidal_rule#Uniform_grid">http://en.wikipedia.org/wiki/Trapezoidal_rule#Uniform_grid</a><br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;data:&nbsp;A&nbsp;list&nbsp;of&nbsp;samples<br>
+&nbsp;&nbsp;dx:&nbsp;The&nbsp;uniform&nbsp;distance&nbsp;along&nbsp;the&nbsp;x&nbsp;axis&nbsp;between&nbsp;any&nbsp;two&nbsp;samples<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;area&nbsp;under&nbsp;the&nbsp;curve&nbsp;defined&nbsp;by&nbsp;the&nbsp;samples&nbsp;and&nbsp;the&nbsp;uniform&nbsp;distance<br>
+&nbsp;&nbsp;according&nbsp;to&nbsp;the&nbsp;trapezoidal&nbsp;rule.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.util.wpr_modes.html b/catapult/telemetry/docs/pydoc/telemetry.util.wpr_modes.html
new file mode 100644
index 0000000..dcbd842
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.util.wpr_modes.html
@@ -0,0 +1,27 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.util.wpr_modes</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.util.html"><font color="#ffffff">util</font></a>.wpr_modes</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/util/wpr_modes.py">telemetry/util/wpr_modes.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2012&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>WPR_APPEND</strong> = 'wpr-append'<br>
+<strong>WPR_OFF</strong> = 'wpr-off'<br>
+<strong>WPR_RECORD</strong> = 'wpr-record'<br>
+<strong>WPR_REPLAY</strong> = 'wpr-replay'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.failure.html b/catapult/telemetry/docs/pydoc/telemetry.value.failure.html
new file mode 100644
index 0000000..68868c8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.failure.html
@@ -0,0 +1,151 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.failure</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.failure</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/failure.py">telemetry/value/failure.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="sys.html">sys</a><br>
+</td><td width="25%" valign=top><a href="traceback.html">traceback</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.html#Value">telemetry.value.Value</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.failure.html#FailureValue">FailureValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="FailureValue">class <strong>FailureValue</strong></a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.failure.html#FailureValue">FailureValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="FailureValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-__init__"><strong>__init__</strong></a>(self, page, exc_info, description<font color="#909090">=None</font>, tir_label<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;value&nbsp;representing&nbsp;a&nbsp;failure&nbsp;when&nbsp;running&nbsp;the&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page:&nbsp;The&nbsp;page&nbsp;where&nbsp;this&nbsp;failure&nbsp;occurs.<br>
+&nbsp;&nbsp;exc_info:&nbsp;The&nbsp;exception&nbsp;info&nbsp;(sys.<a href="#FailureValue-exc_info">exc_info</a>())&nbsp;corresponding&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;this&nbsp;failure.</tt></dd></dl>
+
+<dl><dt><a name="FailureValue-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="FailureValue-FromMessage"><strong>FromMessage</strong></a>(cls, page, message)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Creates&nbsp;a&nbsp;failure&nbsp;value&nbsp;for&nbsp;a&nbsp;given&nbsp;string&nbsp;message.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page:&nbsp;The&nbsp;page&nbsp;where&nbsp;this&nbsp;failure&nbsp;occurs.<br>
+&nbsp;&nbsp;message:&nbsp;A&nbsp;string&nbsp;message&nbsp;describing&nbsp;the&nbsp;failure.</tt></dd></dl>
+
+<dl><dt><a name="FailureValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="FailureValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="FailureValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>exc_info</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="FailureValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="FailureValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="FailureValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="FailureValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="FailureValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="FailureValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#FailureValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="FailureValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#FailureValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetStringFromExcInfo"><strong>GetStringFromExcInfo</strong></a>(exc_info)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.histogram.html b/catapult/telemetry/docs/pydoc/telemetry.value.histogram.html
new file mode 100644
index 0000000..9b6c3c1
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.histogram.html
@@ -0,0 +1,167 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.histogram</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.histogram</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/histogram.py">telemetry/value/histogram.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.histogram_util.html">telemetry.value.histogram_util</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.perf_tests_helper.html">telemetry.util.perf_tests_helper</a><br>
+<a href="telemetry.value.summarizable.html">telemetry.value.summarizable</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.histogram.html#HistogramValueBucket">HistogramValueBucket</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.histogram.html#HistogramValue">HistogramValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="HistogramValue">class <strong>HistogramValue</strong></a>(<a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.histogram.html#HistogramValue">HistogramValue</a></dd>
+<dd><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="HistogramValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-ToJSONString"><strong>ToJSONString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-__init__"><strong>__init__</strong></a>(self, page, name, units, raw_value<font color="#909090">=None</font>, raw_value_json<font color="#909090">=None</font>, important<font color="#909090">=True</font>, description<font color="#909090">=None</font>, tir_label<font color="#909090">=None</font>, improvement_direction<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="HistogramValue-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="HistogramValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="HistogramValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="HistogramValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><a name="HistogramValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><strong>improvement_direction</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="HistogramValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="HistogramValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="HistogramValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="HistogramValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="HistogramValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#HistogramValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="HistogramValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#HistogramValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="HistogramValueBucket">class <strong>HistogramValueBucket</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="HistogramValueBucket-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValueBucket-ToJSONString"><strong>ToJSONString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="HistogramValueBucket-__init__"><strong>__init__</strong></a>(self, low, high, count<font color="#909090">=0</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.histogram_util.html b/catapult/telemetry/docs/pydoc/telemetry.value.histogram_util.html
new file mode 100644
index 0000000..745510d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.histogram_util.html
@@ -0,0 +1,60 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.histogram_util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.histogram_util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/histogram_util.py">telemetry/value/histogram_util.py</a></font></td></tr></table>
+    <p><tt>This&nbsp;is&nbsp;a&nbsp;helper&nbsp;module&nbsp;to&nbsp;get&nbsp;and&nbsp;manipulate&nbsp;histogram&nbsp;data.<br>
+&nbsp;<br>
+The&nbsp;histogram&nbsp;data&nbsp;is&nbsp;the&nbsp;same&nbsp;data&nbsp;as&nbsp;is&nbsp;visible&nbsp;from&nbsp;"chrome://histograms".<br>
+More&nbsp;information&nbsp;can&nbsp;be&nbsp;found&nbsp;at:&nbsp;chromium/src/base/metrics/histogram.h</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.core.exceptions.html">telemetry.core.exceptions</a><br>
+</td><td width="25%" valign=top><a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AddHistograms"><strong>AddHistograms</strong></a>(histogram_jsons)</dt><dd><tt>Adds&nbsp;histograms&nbsp;together.&nbsp;Used&nbsp;for&nbsp;aggregating&nbsp;data.<br>
+&nbsp;<br>
+The&nbsp;parameter&nbsp;is&nbsp;a&nbsp;list&nbsp;of&nbsp;json&nbsp;serializations&nbsp;and&nbsp;the&nbsp;returned&nbsp;result&nbsp;is&nbsp;a<br>
+json&nbsp;serialization&nbsp;too.<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;the&nbsp;histograms&nbsp;to&nbsp;be&nbsp;added&nbsp;together&nbsp;are&nbsp;typically&nbsp;from&nbsp;different<br>
+processes.</tt></dd></dl>
+ <dl><dt><a name="-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(options)</dt><dd><tt>Allows&nbsp;histogram&nbsp;collection.</tt></dd></dl>
+ <dl><dt><a name="-GetHistogram"><strong>GetHistogram</strong></a>(histogram_type, histogram_name, tab)</dt><dd><tt>Get&nbsp;a&nbsp;json&nbsp;serialization&nbsp;of&nbsp;a&nbsp;histogram.</tt></dd></dl>
+ <dl><dt><a name="-GetHistogramBucketsFromJson"><strong>GetHistogramBucketsFromJson</strong></a>(histogram_json)</dt></dl>
+ <dl><dt><a name="-GetHistogramBucketsFromRawValue"><strong>GetHistogramBucketsFromRawValue</strong></a>(raw_value)</dt></dl>
+ <dl><dt><a name="-GetHistogramCount"><strong>GetHistogramCount</strong></a>(histogram_type, histogram_name, tab)</dt><dd><tt>Get&nbsp;the&nbsp;count&nbsp;of&nbsp;events&nbsp;for&nbsp;the&nbsp;given&nbsp;histograms.</tt></dd></dl>
+ <dl><dt><a name="-GetHistogramSum"><strong>GetHistogramSum</strong></a>(histogram_type, histogram_name, tab)</dt><dd><tt>Get&nbsp;the&nbsp;sum&nbsp;of&nbsp;events&nbsp;for&nbsp;the&nbsp;given&nbsp;histograms.</tt></dd></dl>
+ <dl><dt><a name="-SubtractHistogram"><strong>SubtractHistogram</strong></a>(histogram_json, start_histogram_json)</dt><dd><tt>Subtracts&nbsp;a&nbsp;previous&nbsp;histogram&nbsp;from&nbsp;a&nbsp;histogram.<br>
+&nbsp;<br>
+Both&nbsp;parameters&nbsp;and&nbsp;the&nbsp;returned&nbsp;result&nbsp;are&nbsp;json&nbsp;serializations.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>BROWSER_HISTOGRAM</strong> = 'browser_histogram'<br>
+<strong>RENDERER_HISTOGRAM</strong> = 'renderer_histogram'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.html b/catapult/telemetry/docs/pydoc/telemetry.value.html
new file mode 100644
index 0000000..8d397fd
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.html
@@ -0,0 +1,235 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.value</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.value</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/__init__.py">telemetry/value/__init__.py</a></font></td></tr></table>
+    <p><tt>The&nbsp;<a href="#Value">Value</a>&nbsp;hierarchy&nbsp;provides&nbsp;a&nbsp;way&nbsp;of&nbsp;representing&nbsp;the&nbsp;values&nbsp;measurements<br>
+produce&nbsp;such&nbsp;that&nbsp;they&nbsp;can&nbsp;be&nbsp;merged&nbsp;across&nbsp;runs,&nbsp;grouped&nbsp;by&nbsp;page,&nbsp;and&nbsp;output<br>
+to&nbsp;different&nbsp;targets.<br>
+&nbsp;<br>
+The&nbsp;core&nbsp;<a href="#Value">Value</a>&nbsp;concept&nbsp;provides&nbsp;the&nbsp;basic&nbsp;functionality:<br>
+-&nbsp;association&nbsp;with&nbsp;a&nbsp;page,&nbsp;may&nbsp;be&nbsp;none<br>
+-&nbsp;naming&nbsp;and&nbsp;units<br>
+-&nbsp;importance&nbsp;tracking&nbsp;[whether&nbsp;a&nbsp;value&nbsp;will&nbsp;show&nbsp;up&nbsp;on&nbsp;a&nbsp;waterfall&nbsp;or&nbsp;output<br>
+&nbsp;&nbsp;file&nbsp;by&nbsp;default]<br>
+-&nbsp;other&nbsp;metadata,&nbsp;such&nbsp;as&nbsp;a&nbsp;description&nbsp;of&nbsp;what&nbsp;was&nbsp;measured<br>
+-&nbsp;default&nbsp;conversion&nbsp;to&nbsp;scalar&nbsp;and&nbsp;string<br>
+-&nbsp;merging&nbsp;properties<br>
+&nbsp;<br>
+A&nbsp;page&nbsp;may&nbsp;actually&nbsp;run&nbsp;a&nbsp;few&nbsp;times&nbsp;during&nbsp;a&nbsp;single&nbsp;telemetry&nbsp;session.<br>
+Downstream&nbsp;consumers&nbsp;of&nbsp;test&nbsp;results&nbsp;typically&nbsp;want&nbsp;to&nbsp;group&nbsp;these&nbsp;runs<br>
+together,&nbsp;then&nbsp;compute&nbsp;summary&nbsp;statistics&nbsp;across&nbsp;runs.&nbsp;<a href="#Value">Value</a>&nbsp;provides&nbsp;the<br>
+Merge*&nbsp;family&nbsp;of&nbsp;methods&nbsp;for&nbsp;this&nbsp;kind&nbsp;of&nbsp;aggregation.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.failure.html">failure</a><br>
+<a href="telemetry.value.failure_unittest.html">failure_unittest</a><br>
+<a href="telemetry.value.histogram.html">histogram</a><br>
+<a href="telemetry.value.histogram_unittest.html">histogram_unittest</a><br>
+<a href="telemetry.value.histogram_util.html">histogram_util</a><br>
+<a href="telemetry.value.histogram_util_unittest.html">histogram_util_unittest</a><br>
+<a href="telemetry.value.improvement_direction.html">improvement_direction</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">list_of_scalar_values</a><br>
+<a href="telemetry.value.list_of_scalar_values_unittest.html">list_of_scalar_values_unittest</a><br>
+<a href="telemetry.value.list_of_string_values.html">list_of_string_values</a><br>
+<a href="telemetry.value.list_of_string_values_unittest.html">list_of_string_values_unittest</a><br>
+<a href="telemetry.value.merge_values.html">merge_values</a><br>
+<a href="telemetry.value.merge_values_unittest.html">merge_values_unittest</a><br>
+<a href="telemetry.value.none_values.html">none_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">scalar</a><br>
+<a href="telemetry.value.scalar_unittest.html">scalar_unittest</a><br>
+<a href="telemetry.value.skip.html">skip</a><br>
+<a href="telemetry.value.skip_unittest.html">skip_unittest</a><br>
+<a href="telemetry.value.string.html">string</a><br>
+<a href="telemetry.value.string_unittest.html">string_unittest</a><br>
+<a href="telemetry.value.summarizable.html">summarizable</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.summary.html">summary</a><br>
+<a href="telemetry.value.summary_unittest.html">summary_unittest</a><br>
+<a href="telemetry.value.trace.html">trace</a><br>
+<a href="telemetry.value.trace_unittest.html">trace_unittest</a><br>
+<a href="telemetry.value.value_unittest.html">value_unittest</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.html#Value">Value</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Value">class <strong>Value</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>An&nbsp;abstract&nbsp;value&nbsp;produced&nbsp;by&nbsp;a&nbsp;telemetry&nbsp;page&nbsp;test.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Value-AsDict"><strong>AsDict</strong></a>(self)</dt><dd><tt>Pre-serializes&nbsp;a&nbsp;value&nbsp;to&nbsp;a&nbsp;dict&nbsp;for&nbsp;output&nbsp;as&nbsp;JSON.</tt></dd></dl>
+
+<dl><dt><a name="Value-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Value-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt><dd><tt>Returns&nbsp;the&nbsp;buildbot's&nbsp;equivalent&nbsp;data_type.<br>
+&nbsp;<br>
+This&nbsp;should&nbsp;be&nbsp;one&nbsp;of&nbsp;the&nbsp;values&nbsp;accepted&nbsp;by&nbsp;perf_tests_results_helper.py.</tt></dd></dl>
+
+<dl><dt><a name="Value-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;buildbot's&nbsp;equivalent&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="Value-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="Value-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Value-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt><dd><tt>Gets&nbsp;a&nbsp;single&nbsp;scalar&nbsp;value&nbsp;that&nbsp;best-represents&nbsp;this&nbsp;value.<br>
+&nbsp;<br>
+Returns&nbsp;None&nbsp;if&nbsp;not&nbsp;possible.</tt></dd></dl>
+
+<dl><dt><a name="Value-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt><dd><tt>Gets&nbsp;a&nbsp;string&nbsp;value&nbsp;that&nbsp;best-represents&nbsp;this&nbsp;value.<br>
+&nbsp;<br>
+Returns&nbsp;None&nbsp;if&nbsp;not&nbsp;possible.</tt></dd></dl>
+
+<dl><dt><a name="Value-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="Value-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="Value-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Value-__init__"><strong>__init__</strong></a>(self, page, name, units, important, description, tir_label)</dt><dd><tt>A&nbsp;generic&nbsp;<a href="#Value">Value</a>&nbsp;<a href="__builtin__.html#object">object</a>.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page:&nbsp;A&nbsp;Page&nbsp;<a href="__builtin__.html#object">object</a>,&nbsp;may&nbsp;be&nbsp;given&nbsp;as&nbsp;None&nbsp;to&nbsp;indicate&nbsp;that&nbsp;the&nbsp;value<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;represents&nbsp;results&nbsp;for&nbsp;multiple&nbsp;pages.<br>
+&nbsp;&nbsp;name:&nbsp;A&nbsp;value&nbsp;name&nbsp;string,&nbsp;may&nbsp;contain&nbsp;a&nbsp;dot.&nbsp;Values&nbsp;from&nbsp;the&nbsp;same&nbsp;test<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;with&nbsp;the&nbsp;same&nbsp;prefix&nbsp;before&nbsp;the&nbsp;dot&nbsp;may&nbsp;be&nbsp;considered&nbsp;to&nbsp;belong&nbsp;to<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;same&nbsp;chart.<br>
+&nbsp;&nbsp;units:&nbsp;A&nbsp;units&nbsp;string.<br>
+&nbsp;&nbsp;important:&nbsp;Whether&nbsp;the&nbsp;value&nbsp;is&nbsp;"important".&nbsp;Causes&nbsp;the&nbsp;value&nbsp;to&nbsp;appear<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;by&nbsp;default&nbsp;in&nbsp;downstream&nbsp;UIs.<br>
+&nbsp;&nbsp;description:&nbsp;A&nbsp;string&nbsp;explaining&nbsp;in&nbsp;human-understandable&nbsp;terms&nbsp;what&nbsp;this<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;value&nbsp;represents.<br>
+&nbsp;&nbsp;tir_label:&nbsp;The&nbsp;string&nbsp;label&nbsp;of&nbsp;the&nbsp;TimelineInteractionRecord&nbsp;with<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;which&nbsp;this&nbsp;value&nbsp;is&nbsp;associated.</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="Value-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Combines&nbsp;the&nbsp;provided&nbsp;values&nbsp;into&nbsp;a&nbsp;single&nbsp;compound&nbsp;value.<br>
+&nbsp;<br>
+When&nbsp;a&nbsp;full&nbsp;pageset&nbsp;runs,&nbsp;a&nbsp;single&nbsp;value_name&nbsp;will&nbsp;usually&nbsp;end&nbsp;up&nbsp;getting<br>
+collected&nbsp;for&nbsp;multiple&nbsp;pages.&nbsp;For&nbsp;instance,&nbsp;we&nbsp;may&nbsp;end&nbsp;up&nbsp;with<br>
+&nbsp;&nbsp;&nbsp;[ScalarValue(page1,&nbsp;'a',&nbsp;&nbsp;1),<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'a',&nbsp;&nbsp;2)]<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;takes&nbsp;in&nbsp;the&nbsp;values&nbsp;of&nbsp;the&nbsp;same&nbsp;name,&nbsp;but&nbsp;across&nbsp;multiple<br>
+pages,&nbsp;and&nbsp;produces&nbsp;a&nbsp;single&nbsp;summary&nbsp;result&nbsp;value.&nbsp;In&nbsp;this&nbsp;instance,&nbsp;it<br>
+could&nbsp;produce&nbsp;a&nbsp;ScalarValue(None,&nbsp;'a',&nbsp;1.5)&nbsp;to&nbsp;indicate&nbsp;averaging,&nbsp;or&nbsp;even<br>
+ListOfScalarValues(None,&nbsp;'a',&nbsp;[1,&nbsp;2])&nbsp;if&nbsp;concatenated&nbsp;output&nbsp;was&nbsp;desired.<br>
+&nbsp;<br>
+Some&nbsp;results&nbsp;are&nbsp;so&nbsp;specific&nbsp;to&nbsp;a&nbsp;page&nbsp;that&nbsp;they&nbsp;make&nbsp;no&nbsp;sense&nbsp;when<br>
+aggregated&nbsp;across&nbsp;pages.&nbsp;If&nbsp;merging&nbsp;values&nbsp;of&nbsp;this&nbsp;type&nbsp;across&nbsp;pages&nbsp;is<br>
+non-sensical,&nbsp;this&nbsp;method&nbsp;may&nbsp;return&nbsp;None.</tt></dd></dl>
+
+<dl><dt><a name="Value-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Combines&nbsp;the&nbsp;provided&nbsp;list&nbsp;of&nbsp;values&nbsp;into&nbsp;a&nbsp;single&nbsp;compound&nbsp;value.<br>
+&nbsp;<br>
+When&nbsp;a&nbsp;page&nbsp;runs&nbsp;multiple&nbsp;times,&nbsp;it&nbsp;may&nbsp;produce&nbsp;multiple&nbsp;values.&nbsp;This<br>
+function&nbsp;is&nbsp;given&nbsp;the&nbsp;same-named&nbsp;values&nbsp;across&nbsp;the&nbsp;multiple&nbsp;runs,&nbsp;and&nbsp;has<br>
+the&nbsp;responsibility&nbsp;of&nbsp;producing&nbsp;a&nbsp;single&nbsp;result.<br>
+&nbsp;<br>
+It&nbsp;must&nbsp;return&nbsp;a&nbsp;single&nbsp;<a href="#Value">Value</a>.&nbsp;If&nbsp;merging&nbsp;does&nbsp;not&nbsp;make&nbsp;sense,&nbsp;the<br>
+implementation&nbsp;must&nbsp;pick&nbsp;a&nbsp;representative&nbsp;value&nbsp;from&nbsp;one&nbsp;of&nbsp;the&nbsp;runs.<br>
+&nbsp;<br>
+For&nbsp;instance,&nbsp;it&nbsp;may&nbsp;be&nbsp;given<br>
+&nbsp;&nbsp;&nbsp;&nbsp;[ScalarValue(page,&nbsp;'a',&nbsp;1),&nbsp;ScalarValue(page,&nbsp;'a',&nbsp;2)]<br>
+and&nbsp;it&nbsp;might&nbsp;produce<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page,&nbsp;'a',&nbsp;[1,&nbsp;2])</tt></dd></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="Value-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;a&nbsp;value&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+<a href="#Value">Value</a>&nbsp;dicts&nbsp;are&nbsp;produced&nbsp;by&nbsp;serialization&nbsp;to&nbsp;JSON,&nbsp;and&nbsp;must&nbsp;be&nbsp;accompanied<br>
+by&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages,&nbsp;also&nbsp;produced&nbsp;by&nbsp;serialization,&nbsp;in<br>
+order&nbsp;to&nbsp;be&nbsp;completely&nbsp;deserialized.&nbsp;If&nbsp;deserializing&nbsp;multiple&nbsp;values,&nbsp;use<br>
+ListOfValuesFromListOfDicts&nbsp;instead.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#Value-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="Value-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#Value-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="Value-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt><dd><tt>Gets&nbsp;the&nbsp;typename&nbsp;for&nbsp;serialization&nbsp;to&nbsp;JSON&nbsp;using&nbsp;AsDict.</tt></dd></dl>
+
+<dl><dt><a name="Value-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#Value-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ValueNameFromTraceAndChartName"><strong>ValueNameFromTraceAndChartName</strong></a>(trace_name, chart_name<font color="#909090">=None</font>)</dt><dd><tt>Mangles&nbsp;a&nbsp;trace&nbsp;name&nbsp;plus&nbsp;optional&nbsp;chart&nbsp;name&nbsp;into&nbsp;a&nbsp;standard&nbsp;string.<br>
+&nbsp;<br>
+A&nbsp;value&nbsp;might&nbsp;just&nbsp;be&nbsp;a&nbsp;bareword&nbsp;name,&nbsp;e.g.&nbsp;numPixels.&nbsp;In&nbsp;that&nbsp;case,&nbsp;its<br>
+chart&nbsp;may&nbsp;be&nbsp;None.<br>
+&nbsp;<br>
+But,&nbsp;a&nbsp;value&nbsp;might&nbsp;also&nbsp;be&nbsp;intended&nbsp;for&nbsp;display&nbsp;with&nbsp;other&nbsp;values,&nbsp;in&nbsp;which<br>
+case&nbsp;the&nbsp;chart&nbsp;name&nbsp;indicates&nbsp;that&nbsp;grouping.&nbsp;So,&nbsp;you&nbsp;might&nbsp;have<br>
+screen.numPixels,&nbsp;screen.resolution,&nbsp;where&nbsp;chartName='screen'.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT</strong> = 'merged-pages-result-output-context'<br>
+<strong>CONCATENATE</strong> = 'concatenate'<br>
+<strong>PER_PAGE_RESULT_OUTPUT_CONTEXT</strong> = 'per-page-result-output-context'<br>
+<strong>PICK_FIRST</strong> = 'pick-first'<br>
+<strong>SUMMARY_RESULT_OUTPUT_CONTEXT</strong> = 'summary-result-output-context'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.improvement_direction.html b/catapult/telemetry/docs/pydoc/telemetry.value.improvement_direction.html
new file mode 100644
index 0000000..8f4b222
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.improvement_direction.html
@@ -0,0 +1,33 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.improvement_direction</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.improvement_direction</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/improvement_direction.py">telemetry/value/improvement_direction.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsValid"><strong>IsValid</strong></a>(improvement_direction)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DOWN</strong> = 'down'<br>
+<strong>UP</strong> = 'up'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.list_of_scalar_values.html b/catapult/telemetry/docs/pydoc/telemetry.value.list_of_scalar_values.html
new file mode 100644
index 0000000..ec9dc88
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.list_of_scalar_values.html
@@ -0,0 +1,173 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.list_of_scalar_values</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.list_of_scalar_values</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/list_of_scalar_values.py">telemetry/value/list_of_scalar_values.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="math.html">math</a><br>
+<a href="telemetry.value.none_values.html">telemetry.value.none_values</a><br>
+</td><td width="25%" valign=top><a href="numbers.html">numbers</a><br>
+<a href="telemetry.value.summarizable.html">telemetry.value.summarizable</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.list_of_scalar_values.html#ListOfScalarValues">ListOfScalarValues</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ListOfScalarValues">class <strong>ListOfScalarValues</strong></a>(<a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#ListOfScalarValues">ListOfScalarValues</a>&nbsp;represents&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers.<br>
+&nbsp;<br>
+By&nbsp;default,&nbsp;std&nbsp;is&nbsp;the&nbsp;standard&nbsp;deviation&nbsp;of&nbsp;all&nbsp;numbers&nbsp;in&nbsp;the&nbsp;list.&nbsp;Std&nbsp;can<br>
+also&nbsp;be&nbsp;specified&nbsp;in&nbsp;the&nbsp;constructor&nbsp;if&nbsp;the&nbsp;numbers&nbsp;are&nbsp;not&nbsp;from&nbsp;the&nbsp;same<br>
+population.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.list_of_scalar_values.html#ListOfScalarValues">ListOfScalarValues</a></dd>
+<dd><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ListOfScalarValues-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-__init__"><strong>__init__</strong></a>(self, page, name, units, values, important<font color="#909090">=True</font>, description<font color="#909090">=None</font>, tir_label<font color="#909090">=None</font>, none_value_reason<font color="#909090">=None</font>, std<font color="#909090">=None</font>, same_page_merge_policy<font color="#909090">='concatenate'</font>, improvement_direction<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="ListOfScalarValues-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="ListOfScalarValues-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>std</strong></dt>
+</dl>
+<dl><dt><strong>variance</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><a name="ListOfScalarValues-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><strong>improvement_direction</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="ListOfScalarValues-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ListOfScalarValues-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="ListOfScalarValues-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#ListOfScalarValues-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="ListOfScalarValues-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#ListOfScalarValues-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-PooledStandardDeviation"><strong>PooledStandardDeviation</strong></a>(list_of_samples, list_of_variances<font color="#909090">=None</font>)</dt><dd><tt>Compute&nbsp;standard&nbsp;deviation&nbsp;for&nbsp;a&nbsp;list&nbsp;of&nbsp;samples.<br>
+&nbsp;<br>
+See:&nbsp;https://en.wikipedia.org/wiki/Pooled_variance&nbsp;for&nbsp;the&nbsp;formula.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;list_of_samples:&nbsp;a&nbsp;list&nbsp;of&nbsp;lists,&nbsp;each&nbsp;is&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers.<br>
+&nbsp;&nbsp;list_of_variances:&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers,&nbsp;the&nbsp;i-th&nbsp;element&nbsp;is&nbsp;the&nbsp;variance&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;i-th&nbsp;sample&nbsp;in&nbsp;list_of_samples.&nbsp;If&nbsp;this&nbsp;is&nbsp;None,&nbsp;we&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;<a href="#-Variance">Variance</a>(sample)&nbsp;to&nbsp;get&nbsp;the&nbsp;variance&nbsp;of&nbsp;the&nbsp;i-th&nbsp;sample.</tt></dd></dl>
+ <dl><dt><a name="-StandardDeviation"><strong>StandardDeviation</strong></a>(sample)</dt><dd><tt>Compute&nbsp;standard&nbsp;deviation&nbsp;for&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;sample:&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers.</tt></dd></dl>
+ <dl><dt><a name="-Variance"><strong>Variance</strong></a>(sample)</dt><dd><tt>Compute&nbsp;the&nbsp;population&nbsp;variance.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;sample:&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.merge_values.html b/catapult/telemetry/docs/pydoc/telemetry.value.merge_values.html
new file mode 100644
index 0000000..2939fca
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.merge_values.html
@@ -0,0 +1,90 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.merge_values</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.merge_values</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/merge_values.py">telemetry/value/merge_values.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-DefaultKeyFunc"><strong>DefaultKeyFunc</strong></a>(value)</dt><dd><tt>Keys&nbsp;values&nbsp;in&nbsp;a&nbsp;standard&nbsp;way&nbsp;for&nbsp;grouping&nbsp;in&nbsp;merging&nbsp;and&nbsp;summary.<br>
+&nbsp;<br>
+Merging&nbsp;and&nbsp;summarization&nbsp;can&nbsp;be&nbsp;parameterized&nbsp;by&nbsp;a&nbsp;function&nbsp;that&nbsp;groups<br>
+values&nbsp;into&nbsp;equivalence&nbsp;classes.&nbsp;Any&nbsp;function&nbsp;that&nbsp;returns&nbsp;a&nbsp;comparable<br>
+object&nbsp;can&nbsp;be&nbsp;used&nbsp;as&nbsp;a&nbsp;key_func,&nbsp;but&nbsp;merge_values&nbsp;and&nbsp;summary&nbsp;both&nbsp;use&nbsp;this<br>
+function&nbsp;by&nbsp;default,&nbsp;to&nbsp;allow&nbsp;the&nbsp;default&nbsp;grouping&nbsp;to&nbsp;change&nbsp;as&nbsp;Telemtry&nbsp;does.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;value:&nbsp;A&nbsp;Telemetry&nbsp;Value&nbsp;instance<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;comparable&nbsp;object&nbsp;used&nbsp;to&nbsp;group&nbsp;values.</tt></dd></dl>
+ <dl><dt><a name="-GroupStably"><strong>GroupStably</strong></a>(all_values, key_func)</dt><dd><tt>Groups&nbsp;an&nbsp;array&nbsp;by&nbsp;key_func,&nbsp;with&nbsp;the&nbsp;groups&nbsp;returned&nbsp;in&nbsp;a&nbsp;stable&nbsp;order.<br>
+&nbsp;<br>
+Returns&nbsp;a&nbsp;list&nbsp;of&nbsp;groups.</tt></dd></dl>
+ <dl><dt><a name="-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(all_values, key_func<font color="#909090">=&lt;function DefaultKeyFunc&gt;</font>)</dt><dd><tt>Merges&nbsp;values&nbsp;that&nbsp;measure&nbsp;the&nbsp;same&nbsp;thing&nbsp;on&nbsp;different&nbsp;pages.<br>
+&nbsp;<br>
+After&nbsp;using&nbsp;MergeLikeValuesFromSamePage,&nbsp;one&nbsp;still&nbsp;ends&nbsp;up&nbsp;with&nbsp;values&nbsp;from<br>
+different&nbsp;pages:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'x',&nbsp;1,&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'y',&nbsp;30,&nbsp;'bar')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'x',&nbsp;2,&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'y',&nbsp;40,&nbsp;'baz')<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;will&nbsp;group&nbsp;values&nbsp;with&nbsp;the&nbsp;same&nbsp;name&nbsp;and&nbsp;tir_label&nbsp;together:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(None,&nbsp;'x',&nbsp;[1,&nbsp;2],&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(None,&nbsp;'y',&nbsp;[30],&nbsp;'bar')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(None,&nbsp;'y',&nbsp;[40],&nbsp;'baz')<br>
+&nbsp;<br>
+The&nbsp;workhorse&nbsp;of&nbsp;this&nbsp;code&nbsp;is&nbsp;Value.MergeLikeValuesFromDifferentPages.<br>
+&nbsp;<br>
+Not&nbsp;all&nbsp;values&nbsp;that&nbsp;go&nbsp;into&nbsp;this&nbsp;function&nbsp;will&nbsp;come&nbsp;out:&nbsp;not&nbsp;every&nbsp;value&nbsp;can<br>
+be&nbsp;merged&nbsp;across&nbsp;pages.&nbsp;Values&nbsp;whose&nbsp;MergeLikeValuesFromDifferentPages&nbsp;returns<br>
+None&nbsp;will&nbsp;be&nbsp;omitted&nbsp;from&nbsp;the&nbsp;results.<br>
+&nbsp;<br>
+This&nbsp;requires&nbsp;(but&nbsp;assumes)&nbsp;that&nbsp;the&nbsp;values&nbsp;passed&nbsp;in&nbsp;with&nbsp;the&nbsp;same&nbsp;name&nbsp;pass<br>
+the&nbsp;Value.IsMergableWith&nbsp;test.&nbsp;If&nbsp;this&nbsp;is&nbsp;not&nbsp;obeyed,&nbsp;the&nbsp;results<br>
+will&nbsp;be&nbsp;undefined.</tt></dd></dl>
+ <dl><dt><a name="-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(all_values, key_func<font color="#909090">=&lt;function DefaultKeyFunc&gt;</font>)</dt><dd><tt>Merges&nbsp;values&nbsp;that&nbsp;measure&nbsp;the&nbsp;same&nbsp;thing&nbsp;on&nbsp;the&nbsp;same&nbsp;page.<br>
+&nbsp;<br>
+A&nbsp;page&nbsp;may&nbsp;end&nbsp;up&nbsp;being&nbsp;measured&nbsp;multiple&nbsp;times,&nbsp;meaning&nbsp;that&nbsp;we&nbsp;may&nbsp;end&nbsp;up<br>
+with&nbsp;something&nbsp;like&nbsp;this:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'x',&nbsp;1,&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'x',&nbsp;4,&nbsp;'bar')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'x',&nbsp;2,&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'x',&nbsp;5,&nbsp;'baz')<br>
+&nbsp;<br>
+This&nbsp;function&nbsp;will&nbsp;produce:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page1,&nbsp;'x',&nbsp;[1,&nbsp;2],&nbsp;'foo')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page2,&nbsp;'x',&nbsp;[4],&nbsp;'bar')<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page2,&nbsp;'x',&nbsp;[5],&nbsp;'baz')<br>
+&nbsp;<br>
+The&nbsp;workhorse&nbsp;of&nbsp;this&nbsp;code&nbsp;is&nbsp;Value.MergeLikeValuesFromSamePage.<br>
+&nbsp;<br>
+This&nbsp;requires&nbsp;(but&nbsp;assumes)&nbsp;that&nbsp;the&nbsp;values&nbsp;passed&nbsp;in&nbsp;with&nbsp;the&nbsp;same&nbsp;grouping<br>
+key&nbsp;pass&nbsp;the&nbsp;Value.IsMergableWith&nbsp;test.&nbsp;If&nbsp;this&nbsp;is&nbsp;not&nbsp;obeyed,&nbsp;the<br>
+results&nbsp;will&nbsp;be&nbsp;undefined.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.none_values.html b/catapult/telemetry/docs/pydoc/telemetry.value.none_values.html
new file mode 100644
index 0000000..f2df036
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.none_values.html
@@ -0,0 +1,168 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.none_values</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.none_values</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/none_values.py">telemetry/value/none_values.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.none_values.html#NoneValueMissingReason">NoneValueMissingReason</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.value.none_values.html#ValueMustHaveNoneValue">ValueMustHaveNoneValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NoneValueMissingReason">class <strong>NoneValueMissingReason</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.none_values.html#NoneValueMissingReason">NoneValueMissingReason</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="NoneValueMissingReason-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#NoneValueMissingReason-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="NoneValueMissingReason-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#NoneValueMissingReason-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="NoneValueMissingReason-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ValueMustHaveNoneValue">class <strong>ValueMustHaveNoneValue</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.none_values.html#ValueMustHaveNoneValue">ValueMustHaveNoneValue</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ValueMustHaveNoneValue-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ValueMustHaveNoneValue-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;object&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ValueMustHaveNoneValue-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ValueMustHaveNoneValue-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ValueMustHaveNoneValue-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ValidateNoneValueReason"><strong>ValidateNoneValueReason</strong></a>(value, none_value_reason)</dt><dd><tt>Ensures&nbsp;that&nbsp;the&nbsp;none_value_reason&nbsp;is&nbsp;appropriate&nbsp;for&nbsp;the&nbsp;given&nbsp;value.<br>
+&nbsp;<br>
+There&nbsp;is&nbsp;a&nbsp;logical&nbsp;equality&nbsp;between&nbsp;having&nbsp;a&nbsp;value&nbsp;of&nbsp;None&nbsp;and&nbsp;having&nbsp;a<br>
+reason&nbsp;for&nbsp;being&nbsp;None.&nbsp;That&nbsp;is&nbsp;to&nbsp;say,&nbsp;value&nbsp;is&nbsp;None&nbsp;if&nbsp;and&nbsp;only&nbsp;if<br>
+none_value_reason&nbsp;is&nbsp;a&nbsp;string.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>MERGE_FAILURE_REASON</strong> = 'Merging values containing a None value results in a None value.'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.scalar.html b/catapult/telemetry/docs/pydoc/telemetry.value.scalar.html
new file mode 100644
index 0000000..1ffc353
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.scalar.html
@@ -0,0 +1,141 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.scalar</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.scalar</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/scalar.py">telemetry/value/scalar.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+<a href="telemetry.value.none_values.html">telemetry.value.none_values</a><br>
+</td><td width="25%" valign=top><a href="numbers.html">numbers</a><br>
+<a href="telemetry.value.summarizable.html">telemetry.value.summarizable</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.scalar.html#ScalarValue">ScalarValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ScalarValue">class <strong>ScalarValue</strong></a>(<a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.scalar.html#ScalarValue">ScalarValue</a></dd>
+<dd><a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ScalarValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ScalarValue-__init__"><strong>__init__</strong></a>(self, page, name, units, value, important<font color="#909090">=True</font>, description<font color="#909090">=None</font>, tir_label<font color="#909090">=None</font>, none_value_reason<font color="#909090">=None</font>, improvement_direction<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;single&nbsp;value&nbsp;(float&nbsp;or&nbsp;integer)&nbsp;result&nbsp;from&nbsp;a&nbsp;test.<br>
+&nbsp;<br>
+A&nbsp;test&nbsp;that&nbsp;counts&nbsp;the&nbsp;number&nbsp;of&nbsp;DOM&nbsp;elements&nbsp;in&nbsp;a&nbsp;page&nbsp;might&nbsp;produce&nbsp;a<br>
+scalar&nbsp;value:<br>
+&nbsp;&nbsp;&nbsp;<a href="#ScalarValue">ScalarValue</a>(page,&nbsp;'num_dom_elements',&nbsp;'count',&nbsp;num_elements)</tt></dd></dl>
+
+<dl><dt><a name="ScalarValue-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="ScalarValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="ScalarValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="ScalarValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><a name="ScalarValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.summarizable.html#SummarizableValue">telemetry.value.summarizable.SummarizableValue</a>:<br>
+<dl><dt><strong>improvement_direction</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="ScalarValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="ScalarValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="ScalarValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="ScalarValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="ScalarValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="ScalarValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#ScalarValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="ScalarValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#ScalarValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.skip.html b/catapult/telemetry/docs/pydoc/telemetry.value.skip.html
new file mode 100644
index 0000000..13b67eb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.skip.html
@@ -0,0 +1,134 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.skip</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.skip</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/skip.py">telemetry/value/skip.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.html#Value">telemetry.value.Value</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.skip.html#SkipValue">SkipValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SkipValue">class <strong>SkipValue</strong></a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.skip.html#SkipValue">SkipValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SkipValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-__init__"><strong>__init__</strong></a>(self, page, reason, description<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;value&nbsp;representing&nbsp;a&nbsp;skipped&nbsp;page.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;page:&nbsp;The&nbsp;skipped&nbsp;page&nbsp;object.<br>
+&nbsp;&nbsp;reason:&nbsp;The&nbsp;string&nbsp;reason&nbsp;the&nbsp;page&nbsp;was&nbsp;skipped.</tt></dd></dl>
+
+<dl><dt><a name="SkipValue-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="SkipValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SkipValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="SkipValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>reason</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="SkipValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SkipValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="SkipValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="SkipValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="SkipValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="SkipValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#SkipValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="SkipValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#SkipValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.summarizable.html b/catapult/telemetry/docs/pydoc/telemetry.value.summarizable.html
new file mode 100644
index 0000000..61d3ba0
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.summarizable.html
@@ -0,0 +1,142 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.summarizable</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.summarizable</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/summarizable.py">telemetry/value/summarizable.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.html#Value">telemetry.value.Value</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.summarizable.html#SummarizableValue">SummarizableValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SummarizableValue">class <strong>SummarizableValue</strong></a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.summarizable.html#SummarizableValue">SummarizableValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SummarizableValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt><dd><tt>Returns&nbsp;the&nbsp;buildbot's&nbsp;equivalent&nbsp;data_type.<br>
+&nbsp;<br>
+This&nbsp;should&nbsp;be&nbsp;one&nbsp;of&nbsp;the&nbsp;values&nbsp;accepted&nbsp;by&nbsp;perf_tests_results_helper.py.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt><dd><tt>Returns&nbsp;the&nbsp;buildbot's&nbsp;equivalent&nbsp;value.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt><dd><tt>Gets&nbsp;a&nbsp;single&nbsp;scalar&nbsp;value&nbsp;that&nbsp;best-represents&nbsp;this&nbsp;value.<br>
+&nbsp;<br>
+Returns&nbsp;None&nbsp;if&nbsp;not&nbsp;possible.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt><dd><tt>Gets&nbsp;a&nbsp;string&nbsp;value&nbsp;that&nbsp;best-represents&nbsp;this&nbsp;value.<br>
+&nbsp;<br>
+Returns&nbsp;None&nbsp;if&nbsp;not&nbsp;possible.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-__init__"><strong>__init__</strong></a>(self, page, name, units, important, description, tir_label, improvement_direction)</dt><dd><tt>A&nbsp;summarizable&nbsp;value&nbsp;result&nbsp;from&nbsp;a&nbsp;test.</tt></dd></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="SummarizableValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="SummarizableValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="SummarizableValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>improvement_direction</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="SummarizableValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="SummarizableValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="SummarizableValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;a&nbsp;value&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+<a href="telemetry.value.html#Value">Value</a>&nbsp;dicts&nbsp;are&nbsp;produced&nbsp;by&nbsp;serialization&nbsp;to&nbsp;JSON,&nbsp;and&nbsp;must&nbsp;be&nbsp;accompanied<br>
+by&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages,&nbsp;also&nbsp;produced&nbsp;by&nbsp;serialization,&nbsp;in<br>
+order&nbsp;to&nbsp;be&nbsp;completely&nbsp;deserialized.&nbsp;If&nbsp;deserializing&nbsp;multiple&nbsp;values,&nbsp;use<br>
+ListOfValuesFromListOfDicts&nbsp;instead.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#SummarizableValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#SummarizableValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="SummarizableValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#SummarizableValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.summary.html b/catapult/telemetry/docs/pydoc/telemetry.value.summary.html
new file mode 100644
index 0000000..8d2b9eb
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.summary.html
@@ -0,0 +1,94 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.summary</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.summary</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/summary.py">telemetry/value/summary.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.failure.html">telemetry.value.failure</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.merge_values.html">telemetry.value.merge_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.skip.html">telemetry.value.skip</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.summary.html#Summary">Summary</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Summary">class <strong>Summary</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Computes&nbsp;summary&nbsp;values&nbsp;from&nbsp;the&nbsp;per-page-run&nbsp;values&nbsp;produced&nbsp;by&nbsp;a&nbsp;test.<br>
+&nbsp;<br>
+Some&nbsp;telemetry&nbsp;benchmark&nbsp;repeat&nbsp;a&nbsp;number&nbsp;of&nbsp;times&nbsp;in&nbsp;order&nbsp;to&nbsp;get&nbsp;a&nbsp;reliable<br>
+measurement.&nbsp;The&nbsp;test&nbsp;does&nbsp;not&nbsp;have&nbsp;to&nbsp;handle&nbsp;merging&nbsp;of&nbsp;these&nbsp;runs:<br>
+summarizer&nbsp;does&nbsp;it&nbsp;for&nbsp;you.<br>
+&nbsp;<br>
+For&nbsp;instance,&nbsp;if&nbsp;two&nbsp;pages&nbsp;run,&nbsp;3&nbsp;and&nbsp;1&nbsp;time&nbsp;respectively:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'foo',&nbsp;units='ms',&nbsp;1)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'foo',&nbsp;units='ms',&nbsp;1)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page1,&nbsp;'foo',&nbsp;units='ms',&nbsp;1)<br>
+&nbsp;&nbsp;&nbsp;&nbsp;ScalarValue(page2,&nbsp;'foo',&nbsp;units='ms',&nbsp;2)<br>
+&nbsp;<br>
+Then&nbsp;summarizer&nbsp;will&nbsp;produce&nbsp;two&nbsp;sets&nbsp;of&nbsp;values.&nbsp;First,<br>
+computed_per_page_values:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;[<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page1,&nbsp;'foo',&nbsp;units='ms',&nbsp;[1,1,1])],<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page2,&nbsp;'foo',&nbsp;units='ms',&nbsp;[2])]<br>
+&nbsp;&nbsp;&nbsp;&nbsp;]<br>
+&nbsp;<br>
+In&nbsp;addition,&nbsp;it&nbsp;will&nbsp;produce&nbsp;a&nbsp;summary&nbsp;value:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;[<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ListOfScalarValues(page=None,&nbsp;'foo',&nbsp;units='ms',&nbsp;[1,1,1,2])]<br>
+&nbsp;&nbsp;&nbsp;&nbsp;]<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Summary-__init__"><strong>__init__</strong></a>(self, all_page_specific_values, key_func<font color="#909090">=&lt;function DefaultKeyFunc&gt;</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>computed_per_page_values</strong></dt>
+</dl>
+<dl><dt><strong>computed_summary_values</strong></dt>
+</dl>
+<dl><dt><strong>interleaved_computed_per_page_values_and_summaries</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;computed&nbsp;per&nbsp;page&nbsp;values&nbsp;and&nbsp;summary&nbsp;values&nbsp;interleaved.<br>
+&nbsp;<br>
+All&nbsp;the&nbsp;results&nbsp;for&nbsp;a&nbsp;given&nbsp;name&nbsp;are&nbsp;printed&nbsp;together.&nbsp;First&nbsp;per&nbsp;page<br>
+values,&nbsp;then&nbsp;summary&nbsp;values.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.value.trace.html b/catapult/telemetry/docs/pydoc/telemetry.value.trace.html
new file mode 100644
index 0000000..3e1353a
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.value.trace.html
@@ -0,0 +1,168 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.value.trace</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.value.html"><font color="#ffffff">value</font></a>.trace</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/value/trace.py">telemetry/value/trace.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="StringIO.html">StringIO</a><br>
+<a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="datetime.html">datetime</a><br>
+<a href="telemetry.internal.util.file_handle.html">telemetry.internal.util.file_handle</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+<a href="random.html">random</a><br>
+<a href="shutil.html">shutil</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="tempfile.html">tempfile</a><br>
+<a href="tracing_build.trace2html.html">tracing_build.trace2html</a><br>
+<a href="telemetry.timeline.trace_data.html">telemetry.timeline.trace_data</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.html">telemetry.value</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.html#Value">telemetry.value.Value</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.value.trace.html#TraceValue">TraceValue</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceValue">class <strong>TraceValue</strong></a>(<a href="telemetry.value.html#Value">telemetry.value.Value</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.value.trace.html#TraceValue">TraceValue</a></dd>
+<dd><a href="telemetry.value.html#Value">telemetry.value.Value</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TraceValue-AsDict"><strong>AsDict</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-CleanUp"><strong>CleanUp</strong></a>(self)</dt><dd><tt>Cleans&nbsp;up&nbsp;tempfile&nbsp;after&nbsp;it&nbsp;is&nbsp;no&nbsp;longer&nbsp;needed.<br>
+&nbsp;<br>
+A&nbsp;cleaned&nbsp;up&nbsp;<a href="#TraceValue">TraceValue</a>&nbsp;cannot&nbsp;be&nbsp;used&nbsp;for&nbsp;further&nbsp;operations.&nbsp;<a href="#TraceValue-CleanUp">CleanUp</a>()<br>
+may&nbsp;be&nbsp;called&nbsp;more&nbsp;than&nbsp;once&nbsp;without&nbsp;error.</tt></dd></dl>
+
+<dl><dt><a name="TraceValue-GetBuildbotDataType"><strong>GetBuildbotDataType</strong></a>(self, output_context)</dt></dl>
+
+<dl><dt><a name="TraceValue-GetBuildbotValue"><strong>GetBuildbotValue</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-GetRepresentativeNumber"><strong>GetRepresentativeNumber</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-GetRepresentativeString"><strong>GetRepresentativeString</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-Serialize"><strong>Serialize</strong></a>(self, dir_path)</dt></dl>
+
+<dl><dt><a name="TraceValue-UploadToCloud"><strong>UploadToCloud</strong></a>(self, bucket)</dt></dl>
+
+<dl><dt><a name="TraceValue-__enter__"><strong>__enter__</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-__exit__"><strong>__exit__</strong></a>(self, _, __, ___)</dt></dl>
+
+<dl><dt><a name="TraceValue-__init__"><strong>__init__</strong></a>(self, page, trace_data, important<font color="#909090">=False</font>, description<font color="#909090">=None</font>)</dt><dd><tt>A&nbsp;value&nbsp;that&nbsp;contains&nbsp;a&nbsp;TraceData&nbsp;object&nbsp;and&nbsp;knows&nbsp;how&nbsp;to<br>
+output&nbsp;it.<br>
+&nbsp;<br>
+Adding&nbsp;TraceValues&nbsp;and&nbsp;outputting&nbsp;as&nbsp;JSON&nbsp;will&nbsp;produce&nbsp;a&nbsp;directory&nbsp;full&nbsp;of<br>
+HTML&nbsp;files&nbsp;called&nbsp;trace_files.&nbsp;Outputting&nbsp;as&nbsp;chart&nbsp;JSON&nbsp;will&nbsp;also&nbsp;produce<br>
+an&nbsp;index,&nbsp;files.html,&nbsp;linking&nbsp;to&nbsp;each&nbsp;of&nbsp;these&nbsp;files.</tt></dd></dl>
+
+<dl><dt><a name="TraceValue-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TraceValue-MergeLikeValuesFromDifferentPages"><strong>MergeLikeValuesFromDifferentPages</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<dl><dt><a name="TraceValue-MergeLikeValuesFromSamePage"><strong>MergeLikeValuesFromSamePage</strong></a>(cls, values)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TraceValue-GetJSONTypeName"><strong>GetJSONTypeName</strong></a>()</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>cleaned_up</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="TraceValue-AsDictWithoutBaseClassEntries"><strong>AsDictWithoutBaseClassEntries</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-GetChartAndTraceNameForComputedSummaryResult"><strong>GetChartAndTraceNameForComputedSummaryResult</strong></a>(self, trace_tag)</dt></dl>
+
+<dl><dt><a name="TraceValue-GetChartAndTraceNameForPerPageResult"><strong>GetChartAndTraceNameForPerPageResult</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="TraceValue-IsMergableWith"><strong>IsMergableWith</strong></a>(self, that)</dt></dl>
+
+<dl><dt><a name="TraceValue-__eq__"><strong>__eq__</strong></a>(self, other)</dt></dl>
+
+<dl><dt><a name="TraceValue-__hash__"><strong>__hash__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><a name="TraceValue-FromDict"><strong>FromDict</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;a&nbsp;value&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+<a href="telemetry.value.html#Value">Value</a>&nbsp;dicts&nbsp;are&nbsp;produced&nbsp;by&nbsp;serialization&nbsp;to&nbsp;JSON,&nbsp;and&nbsp;must&nbsp;be&nbsp;accompanied<br>
+by&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages,&nbsp;also&nbsp;produced&nbsp;by&nbsp;serialization,&nbsp;in<br>
+order&nbsp;to&nbsp;be&nbsp;completely&nbsp;deserialized.&nbsp;If&nbsp;deserializing&nbsp;multiple&nbsp;values,&nbsp;use<br>
+ListOfValuesFromListOfDicts&nbsp;instead.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#TraceValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="TraceValue-GetConstructorKwArgs"><strong>GetConstructorKwArgs</strong></a>(value_dict, page_dict)</dt><dd><tt>Produces&nbsp;constructor&nbsp;arguments&nbsp;from&nbsp;a&nbsp;value&nbsp;dict&nbsp;and&nbsp;a&nbsp;page&nbsp;dict.<br>
+&nbsp;<br>
+Takes&nbsp;a&nbsp;dict&nbsp;parsed&nbsp;from&nbsp;JSON&nbsp;and&nbsp;an&nbsp;index&nbsp;of&nbsp;pages&nbsp;and&nbsp;recovers&nbsp;the<br>
+keyword&nbsp;arguments&nbsp;to&nbsp;be&nbsp;passed&nbsp;to&nbsp;the&nbsp;constructor&nbsp;for&nbsp;deserializing&nbsp;the<br>
+dict.<br>
+&nbsp;<br>
+value_dict:&nbsp;a&nbsp;dictionary&nbsp;produced&nbsp;by&nbsp;<a href="#TraceValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<dl><dt><a name="TraceValue-ListOfValuesFromListOfDicts"><strong>ListOfValuesFromListOfDicts</strong></a>(value_dicts, page_dict)</dt><dd><tt>Takes&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;to&nbsp;values.<br>
+&nbsp;<br>
+Given&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;AsDict,&nbsp;this&nbsp;method<br>
+deserializes&nbsp;the&nbsp;dicts&nbsp;given&nbsp;a&nbsp;dict&nbsp;mapping&nbsp;page&nbsp;IDs&nbsp;to&nbsp;pages.<br>
+This&nbsp;method&nbsp;performs&nbsp;memoization&nbsp;for&nbsp;deserializing&nbsp;a&nbsp;list&nbsp;of&nbsp;values<br>
+efficiently,&nbsp;where&nbsp;FromDict&nbsp;is&nbsp;meant&nbsp;to&nbsp;handle&nbsp;one-offs.<br>
+&nbsp;<br>
+values:&nbsp;a&nbsp;list&nbsp;of&nbsp;value&nbsp;dicts&nbsp;produced&nbsp;by&nbsp;<a href="#TraceValue-AsDict">AsDict</a>()&nbsp;on&nbsp;a&nbsp;value&nbsp;subclass.<br>
+page_dict:&nbsp;a&nbsp;dictionary&nbsp;mapping&nbsp;IDs&nbsp;to&nbsp;page&nbsp;objects.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.value.html#Value">telemetry.value.Value</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>name_suffix</strong></dt>
+<dd><tt>Returns&nbsp;the&nbsp;string&nbsp;after&nbsp;a&nbsp;.&nbsp;in&nbsp;the&nbsp;name,&nbsp;or&nbsp;the&nbsp;full&nbsp;name&nbsp;otherwise.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.html
new file mode 100644
index 0000000..1a609e9
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.html
@@ -0,0 +1,33 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.web_perf</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.web_perf</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/__init__.py">telemetry/web_perf/__init__.py</a></font></td></tr></table>
+    <p><tt>The&nbsp;web_perf&nbsp;module&nbsp;provides&nbsp;utilities&nbsp;and&nbsp;measurements&nbsp;for&nbsp;benchmarking&nbsp;web<br>
+app's&nbsp;performance.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.html"><strong>metrics</strong>&nbsp;(package)</a><br>
+<a href="telemetry.web_perf.smooth_gesture_util.html">smooth_gesture_util</a><br>
+<a href="telemetry.web_perf.smooth_gesture_util_unittest.html">smooth_gesture_util_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.story_test.html">story_test</a><br>
+<a href="telemetry.web_perf.timeline_based_measurement.html">timeline_based_measurement</a><br>
+<a href="telemetry.web_perf.timeline_based_measurement_unittest.html">timeline_based_measurement_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_based_page_test.html">timeline_based_page_test</a><br>
+<a href="telemetry.web_perf.timeline_based_page_test_unittest.html">timeline_based_page_test_unittest</a><br>
+<a href="telemetry.web_perf.timeline_interaction_record.html">timeline_interaction_record</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_interaction_record_unittest.html">timeline_interaction_record_unittest</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.blob_timeline.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.blob_timeline.html
new file mode 100644
index 0000000..5bd7910
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.blob_timeline.html
@@ -0,0 +1,104 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.blob_timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.blob_timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/blob_timeline.py">telemetry/web_perf/metrics/blob_timeline.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.blob_timeline.html#BlobTimelineMetric">BlobTimelineMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="BlobTimelineMetric">class <strong>BlobTimelineMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#BlobTimelineMetric">BlobTimelineMetric</a>&nbsp;reports&nbsp;timing&nbsp;information&nbsp;about&nbsp;blob&nbsp;storage.<br>
+&nbsp;<br>
+The&nbsp;following&nbsp;metrics&nbsp;are&nbsp;added&nbsp;to&nbsp;the&nbsp;results:<br>
+&nbsp;&nbsp;*&nbsp;blob&nbsp;write&nbsp;times&nbsp;(blob_writes)<br>
+&nbsp;&nbsp;*&nbsp;blob&nbsp;read&nbsp;times&nbsp;(blob_reads)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.blob_timeline.html#BlobTimelineMetric">BlobTimelineMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="BlobTimelineMetric-AddResults"><strong>AddResults</strong></a>(self, model, renderer_thread, interactions, results)</dt></dl>
+
+<dl><dt><a name="BlobTimelineMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="BlobTimelineMetric-IsEventInInteraction"><strong>IsEventInInteraction</strong></a>(event, interaction)</dt></dl>
+
+<dl><dt><a name="BlobTimelineMetric-IsReadEvent"><strong>IsReadEvent</strong></a>(event)</dt></dl>
+
+<dl><dt><a name="BlobTimelineMetric-IsWriteEvent"><strong>IsWriteEvent</strong></a>(event)</dt></dl>
+
+<dl><dt><a name="BlobTimelineMetric-ThreadDurationIfPresent"><strong>ThreadDurationIfPresent</strong></a>(event)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="BlobTimelineMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="BlobTimelineMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>READ_EVENT_NAME</strong> = 'BlobRequest'<br>
+<strong>WRITE_EVENT_NAME</strong> = 'Registry::RegisterBlob'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.gpu_timeline.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.gpu_timeline.html
new file mode 100644
index 0000000..596a628
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.gpu_timeline.html
@@ -0,0 +1,112 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.gpu_timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.gpu_timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/gpu_timeline.py">telemetry/web_perf/metrics/gpu_timeline.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+<a href="math.html">math</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.model.html">telemetry.timeline.model</a><br>
+<a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td><td width="25%" valign=top><a href="sys.html">sys</a><br>
+<a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.gpu_timeline.html#GPUTimelineMetric">GPUTimelineMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="GPUTimelineMetric">class <strong>GPUTimelineMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Computes&nbsp;GPU&nbsp;based&nbsp;metrics.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.gpu_timeline.html#GPUTimelineMetric">GPUTimelineMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="GPUTimelineMetric-AddResults"><strong>AddResults</strong></a>(self, model, _, interaction_records, results)</dt></dl>
+
+<dl><dt><a name="GPUTimelineMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="GPUTimelineMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="GPUTimelineMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-TimelineName"><strong>TimelineName</strong></a>(name, source_type, value_type)</dt><dd><tt>Constructs&nbsp;the&nbsp;standard&nbsp;name&nbsp;given&nbsp;in&nbsp;the&nbsp;timeline.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;name:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;timeline,&nbsp;for&nbsp;example&nbsp;"total",&nbsp;or&nbsp;"render_compositor".<br>
+&nbsp;&nbsp;source_type:&nbsp;One&nbsp;of&nbsp;"cpu",&nbsp;"gpu"&nbsp;or&nbsp;None.&nbsp;None&nbsp;is&nbsp;only&nbsp;used&nbsp;for&nbsp;total&nbsp;times.<br>
+&nbsp;&nbsp;value_type:&nbsp;the&nbsp;type&nbsp;of&nbsp;value.&nbsp;For&nbsp;example&nbsp;"mean",&nbsp;"stddev"...etc.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEVICE_FRAME_END_MARKER</strong> = ('disabled-by-default-gpu.device', 'SwapBuffer')<br>
+<strong>SERVICE_FRAME_END_MARKER</strong> = ('disabled-by-default-gpu.service', 'SwapBuffer')<br>
+<strong>TOPLEVEL_DEVICE_CATEGORY</strong> = 'disabled-by-default-gpu.device'<br>
+<strong>TOPLEVEL_GL_CATEGORY</strong> = 'gpu_toplevel'<br>
+<strong>TOPLEVEL_SERVICE_CATEGORY</strong> = 'disabled-by-default-gpu.service'<br>
+<strong>TRACKED_GL_CONTEXT_NAME</strong> = {'BrowserCompositor': 'browser_compositor', 'Compositor': 'browser_compositor', 'RenderCompositor': 'render_compositor'}</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.html
new file mode 100644
index 0000000..40c9200
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.html
@@ -0,0 +1,51 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.web_perf.metrics</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.metrics</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/__init__.py">telemetry/web_perf/metrics/__init__.py</a></font></td></tr></table>
+    <p><tt>The&nbsp;web_perf.metrics&nbsp;module&nbsp;provides&nbsp;metrics&nbsp;for&nbsp;analyzing&nbsp;web&nbsp;performance.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.blob_timeline.html">blob_timeline</a><br>
+<a href="telemetry.web_perf.metrics.blob_timeline_unittest.html">blob_timeline_unittest</a><br>
+<a href="telemetry.web_perf.metrics.gpu_timeline.html">gpu_timeline</a><br>
+<a href="telemetry.web_perf.metrics.gpu_timeline_unittest.html">gpu_timeline_unittest</a><br>
+<a href="telemetry.web_perf.metrics.indexeddb_timeline.html">indexeddb_timeline</a><br>
+<a href="telemetry.web_perf.metrics.layout.html">layout</a><br>
+<a href="telemetry.web_perf.metrics.mainthread_jank_stats.html">mainthread_jank_stats</a><br>
+<a href="telemetry.web_perf.metrics.mainthread_jank_stats_unittest.html">mainthread_jank_stats_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.memory_timeline.html">memory_timeline</a><br>
+<a href="telemetry.web_perf.metrics.memory_timeline_unittest.html">memory_timeline_unittest</a><br>
+<a href="telemetry.web_perf.metrics.rendering_frame.html">rendering_frame</a><br>
+<a href="telemetry.web_perf.metrics.rendering_frame_unittest.html">rendering_frame_unittest</a><br>
+<a href="telemetry.web_perf.metrics.rendering_stats.html">rendering_stats</a><br>
+<a href="telemetry.web_perf.metrics.rendering_stats_unittest.html">rendering_stats_unittest</a><br>
+<a href="telemetry.web_perf.metrics.responsiveness_metric.html">responsiveness_metric</a><br>
+<a href="telemetry.web_perf.metrics.single_event.html">single_event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.single_event_unittest.html">single_event_unittest</a><br>
+<a href="telemetry.web_perf.metrics.smoothness.html">smoothness</a><br>
+<a href="telemetry.web_perf.metrics.smoothness_unittest.html">smoothness_unittest</a><br>
+<a href="telemetry.web_perf.metrics.text_selection.html">text_selection</a><br>
+<a href="telemetry.web_perf.metrics.timeline_based_metric.html">timeline_based_metric</a><br>
+<a href="telemetry.web_perf.metrics.timeline_based_metric_unittest.html">timeline_based_metric_unittest</a><br>
+<a href="telemetry.web_perf.metrics.trace_event_stats.html">trace_event_stats</a><br>
+<a href="telemetry.web_perf.metrics.trace_event_stats_unittest.html">trace_event_stats_unittest</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.v8_gc_latency.html">v8_gc_latency</a><br>
+<a href="telemetry.web_perf.metrics.v8_gc_latency_unittest.html">v8_gc_latency_unittest</a><br>
+<a href="telemetry.web_perf.metrics.webrtc_rendering_stats.html">webrtc_rendering_stats</a><br>
+<a href="telemetry.web_perf.metrics.webrtc_rendering_stats_unittest.html">webrtc_rendering_stats_unittest</a><br>
+<a href="telemetry.web_perf.metrics.webrtc_rendering_timeline.html">webrtc_rendering_timeline</a><br>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.indexeddb_timeline.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.indexeddb_timeline.html
new file mode 100644
index 0000000..7509b9b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.indexeddb_timeline.html
@@ -0,0 +1,80 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.indexeddb_timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.indexeddb_timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/indexeddb_timeline.py">telemetry/web_perf/metrics/indexeddb_timeline.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.indexeddb_timeline.html#IndexedDBTimelineMetric">IndexedDBTimelineMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="IndexedDBTimelineMetric">class <strong>IndexedDBTimelineMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Metrics&nbsp;for&nbsp;IndexedDB&nbsp;operations.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.indexeddb_timeline.html#IndexedDBTimelineMetric">IndexedDBTimelineMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="IndexedDBTimelineMetric-AddResults"><strong>AddResults</strong></a>(self, model, renderer_process, interactions, results)</dt></dl>
+
+<dl><dt><a name="IndexedDBTimelineMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="IndexedDBTimelineMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="IndexedDBTimelineMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.layout.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.layout.html
new file mode 100644
index 0000000..470bac7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.layout.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.layout</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.layout</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/layout.py">telemetry/web_perf/metrics/layout.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.single_event.html">telemetry.web_perf.metrics.single_event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.layout.html#LayoutMetric">LayoutMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="LayoutMetric">class <strong>LayoutMetric</strong></a>(<a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Reports&nbsp;directly&nbsp;durations&nbsp;of&nbsp;FrameView::performLayout&nbsp;events.<br>
+&nbsp;<br>
+&nbsp;&nbsp;layout:&nbsp;Durations&nbsp;of&nbsp;FrameView::performLayout&nbsp;events&nbsp;that&nbsp;were&nbsp;caused&nbsp;by&nbsp;and<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;start&nbsp;during&nbsp;user&nbsp;interaction.<br>
+&nbsp;<br>
+Layout&nbsp;happens&nbsp;no&nbsp;more&nbsp;than&nbsp;once&nbsp;per&nbsp;frame,&nbsp;so&nbsp;per-frame-ness&nbsp;is&nbsp;implied.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.layout.html#LayoutMetric">LayoutMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="LayoutMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>:<br>
+<dl><dt><a name="LayoutMetric-AddResults"><strong>AddResults</strong></a>(self, _model, renderer_thread, interactions, results)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="LayoutMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="LayoutMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>EVENT_NAME</strong> = 'FrameView::performLayout'<br>
+<strong>METRIC_NAME</strong> = 'layout'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.mainthread_jank_stats.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.mainthread_jank_stats.html
new file mode 100644
index 0000000..9b1d0cc
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.mainthread_jank_stats.html
@@ -0,0 +1,74 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.mainthread_jank_stats</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.mainthread_jank_stats</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/mainthread_jank_stats.py">telemetry/web_perf/metrics/mainthread_jank_stats.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.mainthread_jank_stats.html#MainthreadJankStats">MainthreadJankStats</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MainthreadJankStats">class <strong>MainthreadJankStats</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Utility&nbsp;class&nbsp;for&nbsp;extracting&nbsp;main&nbsp;thread&nbsp;jank&nbsp;statistics&nbsp;from&nbsp;the&nbsp;timeline<br>
+(or&nbsp;other&nbsp;loggin&nbsp;facilities),&nbsp;and&nbsp;providing&nbsp;them&nbsp;in&nbsp;a&nbsp;common&nbsp;format&nbsp;to<br>
+classes&nbsp;that&nbsp;compute&nbsp;benchmark&nbsp;metrics&nbsp;from&nbsp;this&nbsp;data.<br>
+&nbsp;<br>
+&nbsp;&nbsp;total_big_jank_thread_time&nbsp;is&nbsp;the&nbsp;total&nbsp;thread&nbsp;duration&nbsp;of&nbsp;all&nbsp;top<br>
+&nbsp;&nbsp;slices&nbsp;whose&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;overlapped&nbsp;with&nbsp;any&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;of<br>
+&nbsp;&nbsp;the&nbsp;records&nbsp;and&nbsp;the&nbsp;overlapped&nbsp;thread&nbsp;duration&nbsp;is&nbsp;greater&nbsp;than&nbsp;or&nbsp;equal<br>
+&nbsp;&nbsp;USER_PERCEIVABLE_DELAY_THRESHOLD_MS.<br>
+&nbsp;<br>
+&nbsp;&nbsp;biggest_jank_thread_time&nbsp;is&nbsp;the&nbsp;biggest&nbsp;thread&nbsp;duration&nbsp;of&nbsp;all<br>
+&nbsp;&nbsp;top&nbsp;slices&nbsp;whose&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;overlapped&nbsp;with&nbsp;any&nbsp;of&nbsp;records'&nbsp;thread<br>
+&nbsp;&nbsp;time&nbsp;ranges.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="MainthreadJankStats-__init__"><strong>__init__</strong></a>(self, renderer_thread, interaction_records)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>biggest_jank_thread_time</strong></dt>
+</dl>
+<dl><dt><strong>total_big_jank_thread_time</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>USER_PERCEIVABLE_DELAY_THRESHOLD_MS</strong> = 50</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.memory_timeline.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.memory_timeline.html
new file mode 100644
index 0000000..a1d2520
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.memory_timeline.html
@@ -0,0 +1,91 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.memory_timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.memory_timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/memory_timeline.py">telemetry/web_perf/metrics/memory_timeline.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+<a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+<a href="telemetry.timeline.memory_dump_event.html">telemetry.timeline.memory_dump_event</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.memory_timeline.html#MemoryTimelineMetric">MemoryTimelineMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MemoryTimelineMetric">class <strong>MemoryTimelineMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#MemoryTimelineMetric">MemoryTimelineMetric</a>&nbsp;reports&nbsp;summary&nbsp;stats&nbsp;from&nbsp;memory&nbsp;dump&nbsp;events.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.memory_timeline.html#MemoryTimelineMetric">MemoryTimelineMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="MemoryTimelineMetric-AddResults"><strong>AddResults</strong></a>(self, model, _renderer_thread, interactions, results)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="MemoryTimelineMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimelineMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<dl><dt><a name="MemoryTimelineMetric-__init__"><strong>__init__</strong></a>(self)</dt><dd><tt>Computes&nbsp;metrics&nbsp;from&nbsp;a&nbsp;telemetry.timeline&nbsp;Model&nbsp;and&nbsp;a&nbsp;range</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>DEFAULT_METRICS</strong> = ['mmaps_ashmem', 'mmaps_private_dirty', 'mmaps_overall_pss', 'mmaps_native_heap', 'mmaps_java_heap']</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_frame.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_frame.html
new file mode 100644
index 0000000..f3e3688
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_frame.html
@@ -0,0 +1,206 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.rendering_frame</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.rendering_frame</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/rendering_frame.py">telemetry/web_perf/metrics/rendering_frame.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.timeline.bounds.html">telemetry.timeline.bounds</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.slice.html">telemetry.timeline.slice</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.rendering_frame.html#RenderingFrame">RenderingFrame</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.rendering_frame.html#MissingData">MissingData</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.rendering_frame.html#NoBeginFrameIdException">NoBeginFrameIdException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="MissingData">class <strong>MissingData</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.rendering_frame.html#MissingData">MissingData</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="MissingData-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#MissingData-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="MissingData-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MissingData-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="MissingData-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#MissingData-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="MissingData-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NoBeginFrameIdException">class <strong>NoBeginFrameIdException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.rendering_frame.html#NoBeginFrameIdException">NoBeginFrameIdException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="NoBeginFrameIdException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#NoBeginFrameIdException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="NoBeginFrameIdException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#NoBeginFrameIdException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="NoBeginFrameIdException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RenderingFrame">class <strong>RenderingFrame</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Object&nbsp;with&nbsp;information&nbsp;about&nbsp;the&nbsp;triggering&nbsp;of&nbsp;a&nbsp;BeginMainFrame&nbsp;event.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="RenderingFrame-__init__"><strong>__init__</strong></a>(self, events)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="RenderingFrame-IsEventUseful"><strong>IsEventUseful</strong></a>(event)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>bounds</strong></dt>
+</dl>
+<dl><dt><strong>queueing_duration</strong></dt>
+</dl>
+<hr>
+Data and other attributes defined here:<br>
+<dl><dt><strong>begin_main_frame_event</strong> = 'ThreadProxy::BeginMainFrame'</dl>
+
+<dl><dt><strong>send_begin_frame_event</strong> = 'ThreadProxy::ScheduledActionSendBeginMainFrame'</dl>
+
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetFrameEventsInsideRange"><strong>GetFrameEventsInsideRange</strong></a>(renderer_process, timeline_range)</dt><dd><tt>Returns&nbsp;RenderingFrames&nbsp;for&nbsp;all&nbsp;relevant&nbsp;events&nbsp;in&nbsp;the&nbsp;timeline_range.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_stats.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_stats.html
new file mode 100644
index 0000000..721b672
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.rendering_stats.html
@@ -0,0 +1,120 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.rendering_stats</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.rendering_stats</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/rendering_stats.py">telemetry/web_perf/metrics/rendering_stats.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="itertools.html">itertools</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.rendering_frame.html">telemetry.web_perf.metrics.rendering_frame</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.rendering_stats.html#RenderingStats">RenderingStats</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="RenderingStats">class <strong>RenderingStats</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="RenderingStats-__init__"><strong>__init__</strong></a>(self, renderer_process, browser_process, surface_flinger_process, timeline_ranges)</dt><dd><tt>Utility&nbsp;class&nbsp;for&nbsp;extracting&nbsp;rendering&nbsp;statistics&nbsp;from&nbsp;the&nbsp;timeline&nbsp;(or<br>
+other&nbsp;loggin&nbsp;facilities),&nbsp;and&nbsp;providing&nbsp;them&nbsp;in&nbsp;a&nbsp;common&nbsp;format&nbsp;to&nbsp;classes<br>
+that&nbsp;compute&nbsp;benchmark&nbsp;metrics&nbsp;from&nbsp;this&nbsp;data.<br>
+&nbsp;<br>
+Stats&nbsp;are&nbsp;lists&nbsp;of&nbsp;lists&nbsp;of&nbsp;numbers.&nbsp;The&nbsp;outer&nbsp;list&nbsp;stores&nbsp;one&nbsp;list&nbsp;per<br>
+timeline&nbsp;range.<br>
+&nbsp;<br>
+All&nbsp;*_time&nbsp;values&nbsp;are&nbsp;measured&nbsp;in&nbsp;milliseconds.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-ComputeEventLatencies"><strong>ComputeEventLatencies</strong></a>(input_events)</dt><dd><tt>Compute&nbsp;input&nbsp;event&nbsp;latencies.<br>
+&nbsp;<br>
+Input&nbsp;event&nbsp;latency&nbsp;is&nbsp;the&nbsp;time&nbsp;from&nbsp;when&nbsp;the&nbsp;input&nbsp;event&nbsp;is&nbsp;created&nbsp;to<br>
+when&nbsp;its&nbsp;resulted&nbsp;page&nbsp;is&nbsp;swap&nbsp;buffered.<br>
+Input&nbsp;event&nbsp;on&nbsp;differnt&nbsp;platforms&nbsp;uses&nbsp;different&nbsp;LatencyInfo&nbsp;component&nbsp;to<br>
+record&nbsp;its&nbsp;creation&nbsp;timestamp.&nbsp;We&nbsp;go&nbsp;through&nbsp;the&nbsp;following&nbsp;component&nbsp;list<br>
+to&nbsp;find&nbsp;the&nbsp;creation&nbsp;timestamp:<br>
+1.&nbsp;INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT&nbsp;--&nbsp;when&nbsp;event&nbsp;is&nbsp;created&nbsp;in&nbsp;OS<br>
+2.&nbsp;INPUT_EVENT_LATENCY_UI_COMPONENT&nbsp;--&nbsp;when&nbsp;event&nbsp;reaches&nbsp;Chrome<br>
+3.&nbsp;INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT&nbsp;--&nbsp;when&nbsp;event&nbsp;reaches&nbsp;RenderWidget<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;latency&nbsp;starts&nbsp;with&nbsp;a<br>
+LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT&nbsp;component,&nbsp;then&nbsp;it&nbsp;is<br>
+classified&nbsp;as&nbsp;a&nbsp;scroll&nbsp;update&nbsp;instead&nbsp;of&nbsp;a&nbsp;normal&nbsp;input&nbsp;latency&nbsp;measure.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;A&nbsp;list&nbsp;sorted&nbsp;by&nbsp;increasing&nbsp;start&nbsp;time&nbsp;of&nbsp;latencies&nbsp;which&nbsp;are&nbsp;tuples&nbsp;of<br>
+&nbsp;&nbsp;(input_event_name,&nbsp;latency_in_ms).</tt></dd></dl>
+ <dl><dt><a name="-GetLatencyEvents"><strong>GetLatencyEvents</strong></a>(process, timeline_range)</dt><dd><tt>Get&nbsp;LatencyInfo&nbsp;trace&nbsp;events&nbsp;from&nbsp;the&nbsp;process's&nbsp;trace&nbsp;buffer&nbsp;that&nbsp;are<br>
+&nbsp;&nbsp;&nbsp;within&nbsp;the&nbsp;timeline_range.<br>
+&nbsp;<br>
+Input&nbsp;events&nbsp;dump&nbsp;their&nbsp;LatencyInfo&nbsp;into&nbsp;trace&nbsp;buffer&nbsp;as&nbsp;async&nbsp;trace&nbsp;event<br>
+of&nbsp;name&nbsp;starting&nbsp;with&nbsp;"InputLatency".&nbsp;Non-input&nbsp;events&nbsp;with&nbsp;name&nbsp;starting<br>
+with&nbsp;"Latency".&nbsp;The&nbsp;trace&nbsp;event&nbsp;has&nbsp;a&nbsp;memeber&nbsp;'data'&nbsp;containing&nbsp;its&nbsp;latency<br>
+history.</tt></dd></dl>
+ <dl><dt><a name="-GetTimestampEventName"><strong>GetTimestampEventName</strong></a>(process)</dt><dd><tt>Returns&nbsp;the&nbsp;name&nbsp;of&nbsp;the&nbsp;events&nbsp;used&nbsp;to&nbsp;count&nbsp;frame&nbsp;timestamps.</tt></dd></dl>
+ <dl><dt><a name="-HasRenderingStats"><strong>HasRenderingStats</strong></a>(process)</dt><dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;process&nbsp;contains&nbsp;at&nbsp;least&nbsp;one<br>
+BenchmarkInstrumentation::*<a href="#RenderingStats">RenderingStats</a>&nbsp;event&nbsp;with&nbsp;a&nbsp;frame.</tt></dd></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>APPROXIMATED_PIXEL_ERROR</strong> = 'approximated_pixel_percentages'<br>
+<strong>APPROXIMATED_VISIBLE_CONTENT_DATA</strong> = 'approximated_visible_content_area'<br>
+<strong>BEGIN_COMP_NAME</strong> = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'<br>
+<strong>BEGIN_SCROLL_UPDATE_COMP_NAME</strong> = 'LATENCY_BEGIN_SCROLL_LISTENER_UPDATE_MAIN_COMPONENT'<br>
+<strong>CHECKERBOARDED_PIXEL_ERROR</strong> = 'checkerboarded_pixel_percentages'<br>
+<strong>CHECKERBOARDED_VISIBLE_CONTENT_DATA</strong> = 'checkerboarded_visible_content_area'<br>
+<strong>END_COMP_NAME</strong> = 'INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT'<br>
+<strong>FORWARD_SCROLL_UPDATE_COMP_NAME</strong> = 'INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT'<br>
+<strong>GESTURE_SCROLL_UPDATE_EVENT_NAME</strong> = 'InputLatency::GestureScrollUpdate'<br>
+<strong>MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME</strong> = 'Latency::ScrollUpdate'<br>
+<strong>ORIGINAL_COMP_NAME</strong> = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'<br>
+<strong>UI_COMP_NAME</strong> = 'INPUT_EVENT_LATENCY_UI_COMPONENT'<br>
+<strong>VISIBLE_CONTENT_DATA</strong> = 'visible_content_area'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.responsiveness_metric.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.responsiveness_metric.html
new file mode 100644
index 0000000..0d8d702
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.responsiveness_metric.html
@@ -0,0 +1,96 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.responsiveness_metric</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.responsiveness_metric</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/responsiveness_metric.py">telemetry/web_perf/metrics/responsiveness_metric.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.mainthread_jank_stats.html">telemetry.web_perf.metrics.mainthread_jank_stats</a><br>
+<a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+<a href="telemetry.web_perf.timeline_interaction_record.html">telemetry.web_perf.timeline_interaction_record</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.responsiveness_metric.html#ResponsivenessMetric">ResponsivenessMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ResponsivenessMetric">class <strong>ResponsivenessMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Computes&nbsp;metrics&nbsp;that&nbsp;measure&nbsp;respsonsiveness&nbsp;on&nbsp;the&nbsp;record&nbsp;ranges.<br>
+&nbsp;<br>
+&nbsp;total_big_jank_thread_time&nbsp;is&nbsp;the&nbsp;total&nbsp;thread&nbsp;duration&nbsp;of&nbsp;all&nbsp;top<br>
+&nbsp;slices&nbsp;whose&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;overlapped&nbsp;with&nbsp;any&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;of<br>
+&nbsp;the&nbsp;records&nbsp;and&nbsp;the&nbsp;overlapped&nbsp;thread&nbsp;duration&nbsp;is&nbsp;greater&nbsp;than&nbsp;or&nbsp;equal<br>
+&nbsp;USER_PERCEIVABLE_DELAY_THRESHOLD_MS.<br>
+&nbsp;<br>
+&nbsp;biggest_jank_thread_time&nbsp;is&nbsp;the&nbsp;biggest&nbsp;thread&nbsp;duration&nbsp;of&nbsp;all<br>
+&nbsp;top&nbsp;slices&nbsp;whose&nbsp;thread&nbsp;time&nbsp;ranges&nbsp;overlapped&nbsp;with&nbsp;any&nbsp;of&nbsp;records'&nbsp;thread<br>
+&nbsp;time&nbsp;ranges.<br>
+&nbsp;<br>
+All&nbsp;*_time&nbsp;values&nbsp;are&nbsp;measured&nbsp;in&nbsp;milliseconds.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.responsiveness_metric.html#ResponsivenessMetric">ResponsivenessMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="ResponsivenessMetric-AddResults"><strong>AddResults</strong></a>(self, _, renderer_thread, interaction_records, results)</dt></dl>
+
+<dl><dt><a name="ResponsivenessMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="ResponsivenessMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="ResponsivenessMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.single_event.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.single_event.html
new file mode 100644
index 0000000..7f4e43b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.single_event.html
@@ -0,0 +1,27 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.single_event</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.single_event</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/single_event.py">telemetry/web_perf/metrics/single_event.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.smoothness.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.smoothness.html
new file mode 100644
index 0000000..5081ad7
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.smoothness.html
@@ -0,0 +1,112 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.smoothness</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.smoothness</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/smoothness.py">telemetry/web_perf/metrics/smoothness.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+<a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.util.perf_tests_helper.html">telemetry.util.perf_tests_helper</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.rendering_stats.html">telemetry.web_perf.metrics.rendering_stats</a><br>
+<a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.statistics.html">telemetry.util.statistics</a><br>
+<a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.smoothness.html#SmoothnessMetric">SmoothnessMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="SmoothnessMetric">class <strong>SmoothnessMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Computes&nbsp;metrics&nbsp;that&nbsp;measure&nbsp;smoothness&nbsp;of&nbsp;animations&nbsp;over&nbsp;given&nbsp;ranges.<br>
+&nbsp;<br>
+Animations&nbsp;are&nbsp;typically&nbsp;considered&nbsp;smooth&nbsp;if&nbsp;the&nbsp;frame&nbsp;rates&nbsp;are&nbsp;close&nbsp;to<br>
+60&nbsp;frames&nbsp;per&nbsp;second&nbsp;(fps)&nbsp;and&nbsp;uniformly&nbsp;distributed&nbsp;over&nbsp;the&nbsp;sequence.&nbsp;To<br>
+determine&nbsp;if&nbsp;a&nbsp;timeline&nbsp;range&nbsp;contains&nbsp;a&nbsp;smooth&nbsp;animation,&nbsp;we&nbsp;update&nbsp;the<br>
+results&nbsp;object&nbsp;with&nbsp;several&nbsp;representative&nbsp;metrics:<br>
+&nbsp;<br>
+&nbsp;&nbsp;frame_times:&nbsp;A&nbsp;list&nbsp;of&nbsp;raw&nbsp;frame&nbsp;times<br>
+&nbsp;&nbsp;mean_frame_time:&nbsp;The&nbsp;arithmetic&nbsp;mean&nbsp;of&nbsp;frame&nbsp;times<br>
+&nbsp;&nbsp;percentage_smooth:&nbsp;Percentage&nbsp;of&nbsp;frames&nbsp;that&nbsp;were&nbsp;hitting&nbsp;60&nbsp;FPS.<br>
+&nbsp;&nbsp;frame_time_discrepancy:&nbsp;The&nbsp;absolute&nbsp;discrepancy&nbsp;of&nbsp;frame&nbsp;timestamps<br>
+&nbsp;&nbsp;mean_pixels_approximated:&nbsp;The&nbsp;mean&nbsp;percentage&nbsp;of&nbsp;pixels&nbsp;approximated<br>
+&nbsp;&nbsp;queueing_durations:&nbsp;The&nbsp;queueing&nbsp;delay&nbsp;between&nbsp;compositor&nbsp;&amp;&nbsp;main&nbsp;threads<br>
+&nbsp;<br>
+Note&nbsp;that&nbsp;if&nbsp;any&nbsp;of&nbsp;the&nbsp;interaction&nbsp;records&nbsp;provided&nbsp;to&nbsp;AddResults&nbsp;have&nbsp;less<br>
+than&nbsp;2&nbsp;frames,&nbsp;we&nbsp;will&nbsp;return&nbsp;telemetry&nbsp;values&nbsp;with&nbsp;None&nbsp;values&nbsp;for&nbsp;each&nbsp;of<br>
+the&nbsp;smoothness&nbsp;metrics.&nbsp;Similarly,&nbsp;older&nbsp;browsers&nbsp;without&nbsp;support&nbsp;for<br>
+tracking&nbsp;the&nbsp;BeginMainFrame&nbsp;events&nbsp;will&nbsp;report&nbsp;a&nbsp;ListOfScalarValues&nbsp;with&nbsp;a<br>
+None&nbsp;value&nbsp;for&nbsp;the&nbsp;queueing&nbsp;duration&nbsp;metric.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.smoothness.html#SmoothnessMetric">SmoothnessMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="SmoothnessMetric-AddResults"><strong>AddResults</strong></a>(self, model, renderer_thread, interaction_records, results)</dt></dl>
+
+<dl><dt><a name="SmoothnessMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="SmoothnessMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="SmoothnessMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>NOT_ENOUGH_FRAMES_MESSAGE</strong> = "Not enough frames for smoothness metrics (at lea...der extremely slow<font color="#c040c0">\n</font>- Pages that can't be scrolled"</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.text_selection.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.text_selection.html
new file mode 100644
index 0000000..9418ac5
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.text_selection.html
@@ -0,0 +1,92 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.text_selection</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.text_selection</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/text_selection.py">telemetry/web_perf/metrics/text_selection.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.single_event.html">telemetry.web_perf.metrics.single_event</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.text_selection.html#TextSelectionMetric">TextSelectionMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TextSelectionMetric">class <strong>TextSelectionMetric</strong></a>(<a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Reports&nbsp;directly&nbsp;durations&nbsp;of&nbsp;WebLocalFrameImpl::moveRangeSelectionExtent<br>
+events&nbsp;associated&nbsp;with&nbsp;moving&nbsp;a&nbsp;selection&nbsp;extent.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.text_selection.html#TextSelectionMetric">TextSelectionMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TextSelectionMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.single_event.html#_SingleEventMetric">telemetry.web_perf.metrics.single_event._SingleEventMetric</a>:<br>
+<dl><dt><a name="TextSelectionMetric-AddResults"><strong>AddResults</strong></a>(self, _model, renderer_thread, interactions, results)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="TextSelectionMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="TextSelectionMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>EVENT_NAME</strong> = 'WebLocalFrameImpl::moveRangeSelectionExtent'<br>
+<strong>METRIC_NAME</strong> = 'text-selection'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.timeline_based_metric.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.timeline_based_metric.html
new file mode 100644
index 0000000..c0f1ed6
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.timeline_based_metric.html
@@ -0,0 +1,157 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.timeline_based_metric</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.timeline_based_metric</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/timeline_based_metric.py">telemetry/web_perf/metrics/timeline_based_metric.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">TimelineBasedMetric</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetricException">TimelineBasedMetricException</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineBasedMetric">class <strong>TimelineBasedMetric</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineBasedMetric-AddResults"><strong>AddResults</strong></a>(self, model, renderer_thread, interaction_records, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;for&nbsp;the&nbsp;interaction_records'&nbsp;time&nbsp;ranges.<br>
+&nbsp;<br>
+The&nbsp;override&nbsp;of&nbsp;this&nbsp;method&nbsp;should&nbsp;compute&nbsp;results&nbsp;on&nbsp;the&nbsp;data&nbsp;**only**<br>
+within&nbsp;the&nbsp;interaction_records'&nbsp;start&nbsp;and&nbsp;end&nbsp;time&nbsp;ranges.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;interaction_records:&nbsp;A&nbsp;list&nbsp;of&nbsp;instances&nbsp;of&nbsp;TimelineInteractionRecord.&nbsp;If<br>
+&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;override&nbsp;of&nbsp;this&nbsp;method&nbsp;doesn't&nbsp;support&nbsp;overlapped&nbsp;ranges,&nbsp;use<br>
+&nbsp;&nbsp;&nbsp;&nbsp;VerifyNonOverlappedRecords&nbsp;to&nbsp;check&nbsp;that&nbsp;no&nbsp;records&nbsp;are&nbsp;overlapped.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetric-__init__"><strong>__init__</strong></a>(self)</dt><dd><tt>Computes&nbsp;metrics&nbsp;from&nbsp;a&nbsp;telemetry.timeline&nbsp;Model&nbsp;and&nbsp;a&nbsp;range</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineBasedMetricException">class <strong>TimelineBasedMetricException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="exceptions.html#Exception">Exception</a>&nbsp;that&nbsp;can&nbsp;be&nbsp;thrown&nbsp;from&nbsp;metrics&nbsp;that&nbsp;implements<br>
+<a href="#TimelineBasedMetric">TimelineBasedMetric</a>&nbsp;to&nbsp;indicate&nbsp;a&nbsp;problem&nbsp;arised&nbsp;when&nbsp;computing&nbsp;the&nbsp;metric.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetricException">TimelineBasedMetricException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="TimelineBasedMetricException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#TimelineBasedMetricException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="TimelineBasedMetricException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#TimelineBasedMetricException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMetricException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-IsEventInInteractions"><strong>IsEventInInteractions</strong></a>(event, interaction_records)</dt><dd><tt>Return&nbsp;True&nbsp;if&nbsp;event&nbsp;is&nbsp;in&nbsp;any&nbsp;of&nbsp;the&nbsp;interaction&nbsp;records'&nbsp;time&nbsp;range.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;event:&nbsp;an&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.event.TimelineEvent.<br>
+&nbsp;&nbsp;interaction_records:&nbsp;a&nbsp;list&nbsp;of&nbsp;interaction&nbsp;records,&nbsp;whereas&nbsp;each&nbsp;record&nbsp;is<br>
+&nbsp;&nbsp;&nbsp;&nbsp;an&nbsp;instance&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;telemetry.web_perf.timeline_interaction_record.TimelineInteractionRecord.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;True&nbsp;if&nbsp;|event|'s&nbsp;start&nbsp;&amp;&nbsp;end&nbsp;time&nbsp;is&nbsp;in&nbsp;any&nbsp;of&nbsp;the&nbsp;|interaction_records|'s<br>
+&nbsp;&nbsp;time&nbsp;range.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.trace_event_stats.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.trace_event_stats.html
new file mode 100644
index 0000000..013c785
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.trace_event_stats.html
@@ -0,0 +1,107 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.trace_event_stats</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.trace_event_stats</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/trace_event_stats.py">telemetry/web_perf/metrics/trace_event_stats.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="collections.html">collections</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.trace_event_stats.html#TraceEventStats">TraceEventStats</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.trace_event_stats.html#TraceEventStatsInput">TraceEventStatsInput</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceEventStats">class <strong>TraceEventStats</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Reports&nbsp;durations&nbsp;and&nbsp;counts&nbsp;of&nbsp;given&nbsp;trace&nbsp;events.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TraceEventStats-AddInput"><strong>AddInput</strong></a>(self, trace_event_aggregator_input)</dt></dl>
+
+<dl><dt><a name="TraceEventStats-AddResults"><strong>AddResults</strong></a>(self, model, renderer_process, interactions, results)</dt></dl>
+
+<dl><dt><a name="TraceEventStats-__init__"><strong>__init__</strong></a>(self, trace_event_aggregator_inputs<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TraceEventStats-ThreadDurationIfPresent"><strong>ThreadDurationIfPresent</strong></a>(event)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TraceEventStatsInput">class <strong>TraceEventStatsInput</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Input&nbsp;for&nbsp;the&nbsp;<a href="#TraceEventStats">TraceEventStats</a>.<br>
+Using&nbsp;this&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;<a href="#TraceEventStats">TraceEventStats</a>&nbsp;will&nbsp;include&nbsp;two&nbsp;metrics,&nbsp;one&nbsp;with&nbsp;a<br>
+list&nbsp;of&nbsp;times&nbsp;of&nbsp;the&nbsp;given&nbsp;event,&nbsp;and&nbsp;one&nbsp;for&nbsp;the&nbsp;count&nbsp;of&nbsp;the&nbsp;events,&nbsp;named<br>
+`metric_name&nbsp;+&nbsp;'-count'`.<br>
+Args:<br>
+&nbsp;&nbsp;event_category:&nbsp;The&nbsp;category&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;track.<br>
+&nbsp;&nbsp;event_name:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;event&nbsp;to&nbsp;track.<br>
+&nbsp;&nbsp;metric_name:&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;metric&nbsp;name,&nbsp;which&nbsp;accumulates&nbsp;all&nbsp;of&nbsp;the<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;times&nbsp;of&nbsp;the&nbsp;events.<br>
+&nbsp;&nbsp;metric_description:&nbsp;Description&nbsp;of&nbsp;the&nbsp;metric.<br>
+&nbsp;&nbsp;units:&nbsp;Units&nbsp;for&nbsp;the&nbsp;metric.<br>
+&nbsp;&nbsp;process_name:&nbsp;(optional)&nbsp;The&nbsp;name&nbsp;of&nbsp;the&nbsp;process&nbsp;to&nbsp;inspect&nbsp;for&nbsp;the&nbsp;trace<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;events.&nbsp;Defaults&nbsp;to&nbsp;'Renderer'.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TraceEventStatsInput-__init__"><strong>__init__</strong></a>(self, event_category, event_name, metric_name, metric_description, units, process_name<font color="#909090">='Renderer'</font>)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="TraceEventStatsInput-GetEventId"><strong>GetEventId</strong></a>(event_category, event_name)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.v8_gc_latency.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.v8_gc_latency.html
new file mode 100644
index 0000000..15c3257
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.v8_gc_latency.html
@@ -0,0 +1,110 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.v8_gc_latency</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.v8_gc_latency</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/v8_gc_latency.py">telemetry/web_perf/metrics/v8_gc_latency.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+<a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+<a href="telemetry.util.statistics.html">telemetry.util.statistics</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.v8_gc_latency.html#V8EventStat">V8EventStat</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.v8_gc_latency.html#V8GCLatency">V8GCLatency</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="V8EventStat">class <strong>V8EventStat</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="V8EventStat-__init__"><strong>__init__</strong></a>(self, src_event_name, result_name, result_description)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>percentage_thread_duration_during_idle</strong></dt>
+</dl>
+<dl><dt><strong>thread_duration_outside_idle</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="V8GCLatency">class <strong>V8GCLatency</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.v8_gc_latency.html#V8GCLatency">V8GCLatency</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="V8GCLatency-AddResults"><strong>AddResults</strong></a>(self, model, renderer_thread, interaction_records, results)</dt></dl>
+
+<dl><dt><a name="V8GCLatency-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="V8GCLatency-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="V8GCLatency-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_stats.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_stats.html
new file mode 100644
index 0000000..3334d5f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_stats.html
@@ -0,0 +1,98 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.webrtc_rendering_stats</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.webrtc_rendering_stats</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/webrtc_rendering_stats.py">telemetry/web_perf/metrics/webrtc_rendering_stats.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="logging.html">logging</a><br>
+</td><td width="25%" valign=top><a href="telemetry.util.statistics.html">telemetry.util.statistics</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.webrtc_rendering_stats.html#TimeStats">TimeStats</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.webrtc_rendering_stats.html#WebMediaPlayerMsRenderingStats">WebMediaPlayerMsRenderingStats</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimeStats">class <strong>TimeStats</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Stats&nbsp;container&nbsp;for&nbsp;webrtc&nbsp;rendering&nbsp;metrics.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimeStats-__init__"><strong>__init__</strong></a>(self, drift_time<font color="#909090">=None</font>, mean_drift_time<font color="#909090">=None</font>, std_dev_drift_time<font color="#909090">=None</font>, percent_badly_out_of_sync<font color="#909090">=None</font>, percent_out_of_sync<font color="#909090">=None</font>, smoothness_score<font color="#909090">=None</font>, freezing_score<font color="#909090">=None</font>, rendering_length_error<font color="#909090">=None</font>, fps<font color="#909090">=None</font>, frame_distribution<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebMediaPlayerMsRenderingStats">class <strong>WebMediaPlayerMsRenderingStats</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Analyzes&nbsp;events&nbsp;of&nbsp;WebMediaPlayerMs&nbsp;type.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="WebMediaPlayerMsRenderingStats-GetTimeStats"><strong>GetTimeStats</strong></a>(self)</dt><dd><tt>Calculate&nbsp;time&nbsp;stamp&nbsp;stats&nbsp;for&nbsp;all&nbsp;remote&nbsp;stream&nbsp;events.</tt></dd></dl>
+
+<dl><dt><a name="WebMediaPlayerMsRenderingStats-__init__"><strong>__init__</strong></a>(self, events)</dt><dd><tt>Save&nbsp;relevant&nbsp;events&nbsp;according&nbsp;to&nbsp;their&nbsp;stream.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ACTUAL_RENDER_BEGIN</strong> = 'Actual Render Begin'<br>
+<strong>ACTUAL_RENDER_END</strong> = 'Actual Render End'<br>
+<strong>DISPLAY_HERTZ</strong> = 60.0<br>
+<strong>FROZEN_THRESHOLD</strong> = 6<br>
+<strong>IDEAL_RENDER_INSTANT</strong> = 'Ideal Render Instant'<br>
+<strong>SERIAL</strong> = 'Serial'<br>
+<strong>SEVERITY</strong> = 3<br>
+<strong>VSYNC_DURATION</strong> = 16666.666666666668</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_timeline.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_timeline.html
new file mode 100644
index 0000000..d9cdd5d
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.metrics.webrtc_rendering_timeline.html
@@ -0,0 +1,104 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.metrics.webrtc_rendering_timeline</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.<a href="telemetry.web_perf.metrics.html"><font color="#ffffff">metrics</font></a>.webrtc_rendering_timeline</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/metrics/webrtc_rendering_timeline.py">telemetry/web_perf/metrics/webrtc_rendering_timeline.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.value.improvement_direction.html">telemetry.value.improvement_direction</a><br>
+<a href="telemetry.value.list_of_scalar_values.html">telemetry.value.list_of_scalar_values</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.scalar.html">telemetry.value.scalar</a><br>
+<a href="telemetry.web_perf.metrics.webrtc_rendering_stats.html">telemetry.web_perf.metrics.webrtc_rendering_stats</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.metrics.webrtc_rendering_timeline.html#WebRtcRenderingTimelineMetric">WebRtcRenderingTimelineMetric</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WebRtcRenderingTimelineMetric">class <strong>WebRtcRenderingTimelineMetric</strong></a>(<a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>WebrtcRenderingTimelineMetric&nbsp;calculates&nbsp;metric&nbsp;for&nbsp;WebMediaPlayerMS.<br>
+&nbsp;<br>
+The&nbsp;following&nbsp;metrics&nbsp;are&nbsp;added&nbsp;to&nbsp;the&nbsp;results:<br>
+&nbsp;&nbsp;WebRTCRendering_drift_time&nbsp;us<br>
+&nbsp;&nbsp;WebRTCRendering_percent_badly_out_of_sync&nbsp;%<br>
+&nbsp;&nbsp;WebRTCRendering_percent_out_of_sync&nbsp;%<br>
+&nbsp;&nbsp;WebRTCRendering_fps&nbsp;FPS<br>
+&nbsp;&nbsp;WebRTCRendering_smoothness_score&nbsp;%<br>
+&nbsp;&nbsp;WebRTCRendering_freezing_score&nbsp;%<br>
+&nbsp;&nbsp;WebRTCRendering_rendering_length_error&nbsp;%<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.metrics.webrtc_rendering_timeline.html#WebRtcRenderingTimelineMetric">WebRtcRenderingTimelineMetric</a></dd>
+<dd><a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="WebRtcRenderingTimelineMetric-AddResults"><strong>AddResults</strong></a>(self, model, renderer_thread, interactions, results)</dt><dd><tt>Adding&nbsp;metrics&nbsp;to&nbsp;the&nbsp;results.</tt></dd></dl>
+
+<dl><dt><a name="WebRtcRenderingTimelineMetric-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Static methods defined here:<br>
+<dl><dt><a name="WebRtcRenderingTimelineMetric-IsMediaPlayerMSEvent"><strong>IsMediaPlayerMSEvent</strong></a>(event)</dt><dd><tt>Verify&nbsp;that&nbsp;the&nbsp;event&nbsp;is&nbsp;a&nbsp;webmediaplayerMS&nbsp;event.</tt></dd></dl>
+
+<hr>
+Methods inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><a name="WebRtcRenderingTimelineMetric-AddWholeTraceResults"><strong>AddWholeTraceResults</strong></a>(self, model, results)</dt><dd><tt>Computes&nbsp;and&nbsp;adds&nbsp;metrics&nbsp;corresponding&nbsp;to&nbsp;the&nbsp;entire&nbsp;trace.<br>
+&nbsp;<br>
+Override&nbsp;this&nbsp;method&nbsp;to&nbsp;compute&nbsp;results&nbsp;that&nbsp;correspond&nbsp;to&nbsp;the&nbsp;whole&nbsp;trace.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;model:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.model.TimelineModel.<br>
+&nbsp;&nbsp;results:&nbsp;An&nbsp;instance&nbsp;of&nbsp;page.PageTestResults.</tt></dd></dl>
+
+<dl><dt><a name="WebRtcRenderingTimelineMetric-VerifyNonOverlappedRecords"><strong>VerifyNonOverlappedRecords</strong></a>(self, interaction_records)</dt><dd><tt>This&nbsp;raises&nbsp;exceptions&nbsp;if&nbsp;interaction_records&nbsp;contain&nbsp;overlapped&nbsp;ranges.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.metrics.timeline_based_metric.html#TimelineBasedMetric">telemetry.web_perf.metrics.timeline_based_metric.TimelineBasedMetric</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>WEB_MEDIA_PLAYER_MS_EVENT</strong> = 'WebMediaPlayerMS::UpdateCurrentFrame'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.smooth_gesture_util.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.smooth_gesture_util.html
new file mode 100644
index 0000000..06743f8
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.smooth_gesture_util.html
@@ -0,0 +1,42 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.smooth_gesture_util</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.smooth_gesture_util</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/smooth_gesture_util.py">telemetry/web_perf/smooth_gesture_util.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="copy.html">copy</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.timeline_interaction_record.html">telemetry.web_perf.timeline_interaction_record</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetAdjustedInteractionIfContainGesture"><strong>GetAdjustedInteractionIfContainGesture</strong></a>(timeline, interaction_record)</dt><dd><tt>Returns&nbsp;a&nbsp;new&nbsp;interaction&nbsp;record&nbsp;if&nbsp;interaction_record&nbsp;contains&nbsp;geture<br>
+whose&nbsp;time&nbsp;range&nbsp;that&nbsp;overlaps&nbsp;with&nbsp;interaction_record's&nbsp;range.&nbsp;If&nbsp;not,<br>
+returns&nbsp;a&nbsp;clone&nbsp;of&nbsp;original&nbsp;interaction_record.<br>
+The&nbsp;synthetic&nbsp;gesture&nbsp;controller&nbsp;inserts&nbsp;a&nbsp;trace&nbsp;marker&nbsp;to&nbsp;precisely<br>
+demarcate&nbsp;when&nbsp;the&nbsp;gesture&nbsp;was&nbsp;running.&nbsp;We&nbsp;check&nbsp;for&nbsp;overlap,&nbsp;not&nbsp;inclusion,<br>
+because&nbsp;gesture_actions&nbsp;can&nbsp;start/end&nbsp;slightly&nbsp;outside&nbsp;the&nbsp;telemetry&nbsp;markers<br>
+on&nbsp;Windows.&nbsp;This&nbsp;problem&nbsp;is&nbsp;probably&nbsp;caused&nbsp;by&nbsp;a&nbsp;race&nbsp;condition&nbsp;between<br>
+the&nbsp;browser&nbsp;and&nbsp;renderer&nbsp;process&nbsp;submitting&nbsp;the&nbsp;trace&nbsp;events&nbsp;for&nbsp;the<br>
+markers.</tt></dd></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.story_test.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.story_test.html
new file mode 100644
index 0000000..762e837
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.story_test.html
@@ -0,0 +1,144 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.story_test</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.story_test</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/story_test.py">telemetry/web_perf/story_test.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.story_test.html#StoryTest">StoryTest</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.story_test.html#Failure">Failure</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Failure">class <strong>Failure</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="#StoryTest">StoryTest</a>&nbsp;<a href="exceptions.html#Exception">Exception</a>&nbsp;raised&nbsp;when&nbsp;an&nbsp;undesired&nbsp;but&nbsp;designed-for&nbsp;problem.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.story_test.html#Failure">Failure</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="Failure-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#Failure-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="Failure-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Failure-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="Failure-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="Failure-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="Failure-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Failure-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="Failure-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="Failure-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="Failure-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#Failure-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="Failure-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="StoryTest">class <strong>StoryTest</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;for&nbsp;creating&nbsp;story&nbsp;tests.<br>
+&nbsp;<br>
+The&nbsp;overall&nbsp;test&nbsp;run&nbsp;control&nbsp;flow&nbsp;follows&nbsp;this&nbsp;order:<br>
+&nbsp;&nbsp;test.WillRunStory<br>
+&nbsp;&nbsp;state.WillRunStory<br>
+&nbsp;&nbsp;state.RunStory<br>
+&nbsp;&nbsp;test.Measure<br>
+&nbsp;&nbsp;state.DidRunStory<br>
+&nbsp;&nbsp;test.DidRunStory<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="StoryTest-DidRunStory"><strong>DidRunStory</strong></a>(self, platform)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;after&nbsp;running&nbsp;the&nbsp;story,&nbsp;e.g.,&nbsp;clean&nbsp;up.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;run&nbsp;after&nbsp;state.DidRunStory.&nbsp;And&nbsp;this&nbsp;is&nbsp;always&nbsp;called&nbsp;even&nbsp;if&nbsp;the<br>
+test&nbsp;run&nbsp;failed.<br>
+Args:<br>
+&nbsp;&nbsp;platform:&nbsp;The&nbsp;platform&nbsp;that&nbsp;the&nbsp;story&nbsp;will&nbsp;run&nbsp;on.</tt></dd></dl>
+
+<dl><dt><a name="StoryTest-Measure"><strong>Measure</strong></a>(self, platform, results)</dt><dd><tt>Override&nbsp;to&nbsp;take&nbsp;the&nbsp;measurement.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;run&nbsp;only&nbsp;if&nbsp;state.RunStory&nbsp;is&nbsp;successful.<br>
+Args:<br>
+&nbsp;&nbsp;platform:&nbsp;The&nbsp;platform&nbsp;that&nbsp;the&nbsp;story&nbsp;will&nbsp;run&nbsp;on.<br>
+&nbsp;&nbsp;results:&nbsp;The&nbsp;results&nbsp;of&nbsp;running&nbsp;the&nbsp;story.</tt></dd></dl>
+
+<dl><dt><a name="StoryTest-WillRunStory"><strong>WillRunStory</strong></a>(self, platform)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;any&nbsp;action&nbsp;before&nbsp;running&nbsp;the&nbsp;story.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;run&nbsp;before&nbsp;state.WillRunStory.<br>
+Args:<br>
+&nbsp;&nbsp;platform:&nbsp;The&nbsp;platform&nbsp;that&nbsp;the&nbsp;story&nbsp;will&nbsp;run&nbsp;on.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_measurement.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_measurement.html
new file mode 100644
index 0000000..0761e8f
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_measurement.html
@@ -0,0 +1,269 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.timeline_based_measurement</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.timeline_based_measurement</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/timeline_based_measurement.py">telemetry/web_perf/timeline_based_measurement.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.web_perf.metrics.blob_timeline.html">telemetry.web_perf.metrics.blob_timeline</a><br>
+<a href="collections.html">collections</a><br>
+<a href="telemetry.web_perf.metrics.gpu_timeline.html">telemetry.web_perf.metrics.gpu_timeline</a><br>
+<a href="telemetry.web_perf.metrics.indexeddb_timeline.html">telemetry.web_perf.metrics.indexeddb_timeline</a><br>
+<a href="telemetry.web_perf.metrics.layout.html">telemetry.web_perf.metrics.layout</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="telemetry.web_perf.metrics.memory_timeline.html">telemetry.web_perf.metrics.memory_timeline</a><br>
+<a href="telemetry.timeline.model.html">telemetry.timeline.model</a><br>
+<a href="telemetry.web_perf.metrics.responsiveness_metric.html">telemetry.web_perf.metrics.responsiveness_metric</a><br>
+<a href="telemetry.web_perf.smooth_gesture_util.html">telemetry.web_perf.smooth_gesture_util</a><br>
+</td><td width="25%" valign=top><a href="telemetry.web_perf.metrics.smoothness.html">telemetry.web_perf.metrics.smoothness</a><br>
+<a href="telemetry.web_perf.story_test.html">telemetry.web_perf.story_test</a><br>
+<a href="telemetry.web_perf.metrics.text_selection.html">telemetry.web_perf.metrics.text_selection</a><br>
+<a href="telemetry.web_perf.metrics.timeline_based_metric.html">telemetry.web_perf.metrics.timeline_based_metric</a><br>
+<a href="telemetry.web_perf.timeline_interaction_record.html">telemetry.web_perf.timeline_interaction_record</a><br>
+</td><td width="25%" valign=top><a href="telemetry.value.trace.html">telemetry.value.trace</a><br>
+<a href="telemetry.timeline.tracing_category_filter.html">telemetry.timeline.tracing_category_filter</a><br>
+<a href="telemetry.timeline.tracing_options.html">telemetry.timeline.tracing_options</a><br>
+<a href="telemetry.web_perf.metrics.webrtc_rendering_timeline.html">telemetry.web_perf.metrics.webrtc_rendering_timeline</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_based_measurement.html#Options">Options</a>
+</font></dt><dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_based_measurement.html#ResultsWrapperInterface">ResultsWrapperInterface</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_based_measurement.html#InvalidInteractions">InvalidInteractions</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.story_test.html#StoryTest">telemetry.web_perf.story_test.StoryTest</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_based_measurement.html#TimelineBasedMeasurement">TimelineBasedMeasurement</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="InvalidInteractions">class <strong>InvalidInteractions</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.timeline_based_measurement.html#InvalidInteractions">InvalidInteractions</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="InvalidInteractions-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#InvalidInteractions-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="InvalidInteractions-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InvalidInteractions-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="InvalidInteractions-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#InvalidInteractions-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="InvalidInteractions-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="Options">class <strong>Options</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>A&nbsp;class&nbsp;to&nbsp;be&nbsp;used&nbsp;to&nbsp;configure&nbsp;<a href="#TimelineBasedMeasurement">TimelineBasedMeasurement</a>.<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;created&nbsp;and&nbsp;returned&nbsp;by<br>
+Benchmark.CreateTimelineBasedMeasurementOptions.<br>
+&nbsp;<br>
+By&nbsp;default,&nbsp;all&nbsp;the&nbsp;timeline&nbsp;based&nbsp;metrics&nbsp;in&nbsp;telemetry/web_perf/metrics&nbsp;are<br>
+used&nbsp;(see&nbsp;_GetAllTimelineBasedMetrics&nbsp;above).<br>
+To&nbsp;customize&nbsp;your&nbsp;metric&nbsp;needs,&nbsp;use&nbsp;<a href="#Options-SetTimelineBasedMetrics">SetTimelineBasedMetrics</a>().<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="Options-ExtendTraceCategoryFilter"><strong>ExtendTraceCategoryFilter</strong></a>(self, filters)</dt></dl>
+
+<dl><dt><a name="Options-GetTimelineBasedMetrics"><strong>GetTimelineBasedMetrics</strong></a>(self)</dt></dl>
+
+<dl><dt><a name="Options-SetTimelineBasedMetrics"><strong>SetTimelineBasedMetrics</strong></a>(self, metrics)</dt></dl>
+
+<dl><dt><a name="Options-__init__"><strong>__init__</strong></a>(self, overhead_level<font color="#909090">='no-overhead'</font>)</dt><dd><tt>As&nbsp;the&nbsp;amount&nbsp;of&nbsp;instrumentation&nbsp;increases,&nbsp;so&nbsp;does&nbsp;the&nbsp;overhead.<br>
+The&nbsp;user&nbsp;of&nbsp;the&nbsp;measurement&nbsp;chooses&nbsp;the&nbsp;overhead&nbsp;level&nbsp;that&nbsp;is&nbsp;appropriate,<br>
+and&nbsp;the&nbsp;tracing&nbsp;is&nbsp;filtered&nbsp;accordingly.<br>
+&nbsp;<br>
+overhead_level:&nbsp;Can&nbsp;either&nbsp;be&nbsp;a&nbsp;custom&nbsp;TracingCategoryFilter&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;or<br>
+&nbsp;&nbsp;&nbsp;&nbsp;one&nbsp;of&nbsp;NO_OVERHEAD_LEVEL,&nbsp;MINIMAL_OVERHEAD_LEVEL&nbsp;or<br>
+&nbsp;&nbsp;&nbsp;&nbsp;DEBUG_OVERHEAD_LEVEL.</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>category_filter</strong></dt>
+</dl>
+<dl><dt><strong>tracing_options</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ResultsWrapperInterface">class <strong>ResultsWrapperInterface</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>#&nbsp;TODO(nednguyen):&nbsp;Get&nbsp;rid&nbsp;of&nbsp;this&nbsp;results&nbsp;wrapper&nbsp;hack&nbsp;after&nbsp;we&nbsp;add&nbsp;interaction<br>
+#&nbsp;record&nbsp;to&nbsp;telemetry&nbsp;value&nbsp;system&nbsp;(crbug.com/453109)<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="ResultsWrapperInterface-AddValue"><strong>AddValue</strong></a>(self, value)</dt></dl>
+
+<dl><dt><a name="ResultsWrapperInterface-SetResults"><strong>SetResults</strong></a>(self, results)</dt></dl>
+
+<dl><dt><a name="ResultsWrapperInterface-SetTirLabel"><strong>SetTirLabel</strong></a>(self, tir_label)</dt></dl>
+
+<dl><dt><a name="ResultsWrapperInterface-__init__"><strong>__init__</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>current_page</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineBasedMeasurement">class <strong>TimelineBasedMeasurement</strong></a>(<a href="telemetry.web_perf.story_test.html#StoryTest">telemetry.web_perf.story_test.StoryTest</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Collects&nbsp;multiple&nbsp;metrics&nbsp;based&nbsp;on&nbsp;their&nbsp;interaction&nbsp;records.<br>
+&nbsp;<br>
+A&nbsp;timeline&nbsp;based&nbsp;measurement&nbsp;shifts&nbsp;the&nbsp;burden&nbsp;of&nbsp;what&nbsp;metrics&nbsp;to&nbsp;collect&nbsp;onto<br>
+the&nbsp;story&nbsp;under&nbsp;test.&nbsp;Instead&nbsp;of&nbsp;the&nbsp;measurement<br>
+having&nbsp;a&nbsp;fixed&nbsp;set&nbsp;of&nbsp;values&nbsp;it&nbsp;collects,&nbsp;the&nbsp;story&nbsp;being&nbsp;tested<br>
+issues&nbsp;(via&nbsp;javascript)&nbsp;an&nbsp;Interaction&nbsp;record&nbsp;into&nbsp;the&nbsp;user&nbsp;timing&nbsp;API&nbsp;that<br>
+describing&nbsp;what&nbsp;is&nbsp;happening&nbsp;at&nbsp;that&nbsp;time,&nbsp;as&nbsp;well&nbsp;as&nbsp;a&nbsp;standardized&nbsp;set<br>
+of&nbsp;flags&nbsp;describing&nbsp;the&nbsp;semantics&nbsp;of&nbsp;the&nbsp;work&nbsp;being&nbsp;done.&nbsp;The<br>
+<a href="#TimelineBasedMeasurement">TimelineBasedMeasurement</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;collects&nbsp;a&nbsp;trace&nbsp;that&nbsp;includes&nbsp;both&nbsp;these<br>
+interaction&nbsp;records,&nbsp;and&nbsp;a&nbsp;user-chosen&nbsp;amount&nbsp;of&nbsp;performance&nbsp;data&nbsp;using<br>
+Telemetry's&nbsp;various&nbsp;timeline-producing&nbsp;APIs,&nbsp;tracing&nbsp;especially.<br>
+&nbsp;<br>
+It&nbsp;then&nbsp;passes&nbsp;the&nbsp;recorded&nbsp;timeline&nbsp;to&nbsp;different&nbsp;TimelineBasedMetrics&nbsp;based<br>
+on&nbsp;those&nbsp;flags.&nbsp;As&nbsp;an&nbsp;example,&nbsp;this&nbsp;allows&nbsp;a&nbsp;single&nbsp;story&nbsp;run&nbsp;to&nbsp;produce<br>
+load&nbsp;timing&nbsp;data,&nbsp;smoothness&nbsp;data,&nbsp;critical&nbsp;jank&nbsp;information&nbsp;and&nbsp;overall&nbsp;cpu<br>
+usage&nbsp;information.<br>
+&nbsp;<br>
+For&nbsp;information&nbsp;on&nbsp;how&nbsp;to&nbsp;mark&nbsp;up&nbsp;a&nbsp;page&nbsp;to&nbsp;work&nbsp;with<br>
+<a href="#TimelineBasedMeasurement">TimelineBasedMeasurement</a>,&nbsp;refer&nbsp;to&nbsp;the<br>
+perf.metrics.timeline_interaction_record&nbsp;module.<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;&nbsp;&nbsp;options:&nbsp;an&nbsp;instance&nbsp;of&nbsp;timeline_based_measurement.<a href="#Options">Options</a>.<br>
+&nbsp;&nbsp;&nbsp;&nbsp;results_wrapper:&nbsp;A&nbsp;class&nbsp;that&nbsp;has&nbsp;the&nbsp;__init__&nbsp;method&nbsp;takes&nbsp;in<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;the&nbsp;page_test_results&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;and&nbsp;the&nbsp;interaction&nbsp;record&nbsp;label.&nbsp;This<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;class&nbsp;follows&nbsp;the&nbsp;<a href="#ResultsWrapperInterface">ResultsWrapperInterface</a>.&nbsp;Note:&nbsp;this&nbsp;class&nbsp;is&nbsp;not<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;supported&nbsp;long&nbsp;term&nbsp;and&nbsp;to&nbsp;be&nbsp;removed&nbsp;when&nbsp;crbug.com/453109&nbsp;is&nbsp;resolved.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.timeline_based_measurement.html#TimelineBasedMeasurement">TimelineBasedMeasurement</a></dd>
+<dd><a href="telemetry.web_perf.story_test.html#StoryTest">telemetry.web_perf.story_test.StoryTest</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TimelineBasedMeasurement-DidRunStory"><strong>DidRunStory</strong></a>(self, platform)</dt><dd><tt>Clean&nbsp;up&nbsp;after&nbsp;running&nbsp;the&nbsp;story.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMeasurement-Measure"><strong>Measure</strong></a>(self, platform, results)</dt><dd><tt>Collect&nbsp;all&nbsp;possible&nbsp;metrics&nbsp;and&nbsp;added&nbsp;them&nbsp;to&nbsp;results.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMeasurement-WillRunStory"><strong>WillRunStory</strong></a>(self, platform)</dt><dd><tt>Configure&nbsp;and&nbsp;start&nbsp;tracing.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedMeasurement-__init__"><strong>__init__</strong></a>(self, options, results_wrapper<font color="#909090">=None</font>)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.story_test.html#StoryTest">telemetry.web_perf.story_test.StoryTest</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>ALL_OVERHEAD_LEVELS</strong> = ['no-overhead', 'minimal-overhead', 'debug-overhead']<br>
+<strong>DEBUG_OVERHEAD_LEVEL</strong> = 'debug-overhead'<br>
+<strong>MINIMAL_OVERHEAD_LEVEL</strong> = 'minimal-overhead'<br>
+<strong>NO_OVERHEAD_LEVEL</strong> = 'no-overhead'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_page_test.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_page_test.html
new file mode 100644
index 0000000..e3cbb38
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_based_page_test.html
@@ -0,0 +1,136 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.timeline_based_page_test</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.timeline_based_page_test</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/timeline_based_page_test.py">telemetry/web_perf/timeline_based_page_test.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;(c)&nbsp;2015&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.page.page_test.html">telemetry.page.page_test</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_based_page_test.html#TimelineBasedPageTest">TimelineBasedPageTest</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineBasedPageTest">class <strong>TimelineBasedPageTest</strong></a>(<a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Page&nbsp;test&nbsp;that&nbsp;collects&nbsp;metrics&nbsp;with&nbsp;TimelineBasedMeasurement.<br>
+&nbsp;<br>
+WillRunStory(),&nbsp;Measure()&nbsp;and&nbsp;DidRunStory()&nbsp;are&nbsp;all&nbsp;done&nbsp;in&nbsp;story_runner<br>
+explicitly.&nbsp;We&nbsp;still&nbsp;need&nbsp;this&nbsp;wrapper&nbsp;around&nbsp;<a href="telemetry.page.page_test.html#PageTest">PageTest</a>&nbsp;because&nbsp;it&nbsp;executes<br>
+some&nbsp;browser&nbsp;related&nbsp;functions&nbsp;in&nbsp;the&nbsp;parent&nbsp;class,&nbsp;which&nbsp;is&nbsp;needed&nbsp;by<br>
+Timeline&nbsp;Based&nbsp;Measurement&nbsp;benchmarks.&nbsp;This&nbsp;class&nbsp;will&nbsp;be&nbsp;removed&nbsp;after<br>
+page_test's&nbsp;hooks&nbsp;are&nbsp;fully&nbsp;removed.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.timeline_based_page_test.html#TimelineBasedPageTest">TimelineBasedPageTest</a></dd>
+<dd><a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Methods defined here:<br>
+<dl><dt><a name="TimelineBasedPageTest-ValidateAndMeasurePage"><strong>ValidateAndMeasurePage</strong></a>(self, page, tab, results)</dt><dd><tt>Collect&nbsp;all&nbsp;possible&nbsp;metrics&nbsp;and&nbsp;added&nbsp;them&nbsp;to&nbsp;results.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-__init__"><strong>__init__</strong></a>(self, tbm)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>measurement</strong></dt>
+</dl>
+<hr>
+Methods inherited from <a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>:<br>
+<dl><dt><a name="TimelineBasedPageTest-CustomizeBrowserOptions"><strong>CustomizeBrowserOptions</strong></a>(self, options)</dt><dd><tt>Override&nbsp;to&nbsp;add&nbsp;test-specific&nbsp;options&nbsp;to&nbsp;the&nbsp;BrowserOptions&nbsp;object</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-DidNavigateToPage"><strong>DidNavigateToPage</strong></a>(self, page, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;operations&nbsp;right&nbsp;after&nbsp;the&nbsp;page&nbsp;is&nbsp;navigated&nbsp;and&nbsp;after<br>
+all&nbsp;waiting&nbsp;for&nbsp;completion&nbsp;has&nbsp;occurred.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-DidRunPage"><strong>DidRunPage</strong></a>(self, platform)</dt><dd><tt>Called&nbsp;after&nbsp;the&nbsp;test&nbsp;run&nbsp;method&nbsp;was&nbsp;run,&nbsp;even&nbsp;if&nbsp;it&nbsp;failed.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-DidStartBrowser"><strong>DidStartBrowser</strong></a>(self, browser)</dt><dd><tt>Override&nbsp;to&nbsp;customize&nbsp;the&nbsp;browser&nbsp;right&nbsp;after&nbsp;it&nbsp;has&nbsp;launched.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-RestartBrowserBeforeEachPage"><strong>RestartBrowserBeforeEachPage</strong></a>(self)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;restarted&nbsp;for&nbsp;the&nbsp;page?<br>
+&nbsp;<br>
+This&nbsp;returns&nbsp;true&nbsp;if&nbsp;the&nbsp;test&nbsp;needs&nbsp;to&nbsp;unconditionally&nbsp;restart&nbsp;the<br>
+browser&nbsp;for&nbsp;each&nbsp;page.&nbsp;It&nbsp;may&nbsp;be&nbsp;called&nbsp;before&nbsp;the&nbsp;browser&nbsp;is&nbsp;started.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-RunNavigateSteps"><strong>RunNavigateSteps</strong></a>(self, page, tab)</dt><dd><tt>Navigates&nbsp;the&nbsp;tab&nbsp;to&nbsp;the&nbsp;page&nbsp;URL&nbsp;attribute.<br>
+&nbsp;<br>
+Runs&nbsp;the&nbsp;'navigate_steps'&nbsp;page&nbsp;attribute&nbsp;as&nbsp;a&nbsp;compound&nbsp;action.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-SetOptions"><strong>SetOptions</strong></a>(self, options)</dt><dd><tt>Sets&nbsp;the&nbsp;BrowserFinderOptions&nbsp;instance&nbsp;to&nbsp;use.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-StopBrowserAfterPage"><strong>StopBrowserAfterPage</strong></a>(self, browser, page)</dt><dd><tt>Should&nbsp;the&nbsp;browser&nbsp;be&nbsp;stopped&nbsp;after&nbsp;the&nbsp;page&nbsp;is&nbsp;run?<br>
+&nbsp;<br>
+This&nbsp;is&nbsp;called&nbsp;after&nbsp;a&nbsp;page&nbsp;is&nbsp;run&nbsp;to&nbsp;decide&nbsp;whether&nbsp;the&nbsp;browser&nbsp;needs&nbsp;to<br>
+be&nbsp;stopped&nbsp;to&nbsp;clean&nbsp;up&nbsp;its&nbsp;state.&nbsp;If&nbsp;it&nbsp;is&nbsp;stopped,&nbsp;then&nbsp;it&nbsp;will&nbsp;be<br>
+restarted&nbsp;to&nbsp;run&nbsp;the&nbsp;next&nbsp;page.<br>
+&nbsp;<br>
+A&nbsp;test&nbsp;that&nbsp;overrides&nbsp;this&nbsp;can&nbsp;look&nbsp;at&nbsp;both&nbsp;the&nbsp;page&nbsp;and&nbsp;the&nbsp;browser&nbsp;to<br>
+decide&nbsp;whether&nbsp;it&nbsp;needs&nbsp;to&nbsp;stop&nbsp;the&nbsp;browser.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-TabForPage"><strong>TabForPage</strong></a>(self, page, browser)</dt><dd><tt>Override&nbsp;to&nbsp;select&nbsp;a&nbsp;different&nbsp;tab&nbsp;for&nbsp;the&nbsp;page.&nbsp;&nbsp;For&nbsp;instance,&nbsp;to<br>
+create&nbsp;a&nbsp;new&nbsp;tab&nbsp;for&nbsp;every&nbsp;page,&nbsp;return&nbsp;browser.tabs.New().</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-WillNavigateToPage"><strong>WillNavigateToPage</strong></a>(self, page, tab)</dt><dd><tt>Override&nbsp;to&nbsp;do&nbsp;operations&nbsp;before&nbsp;the&nbsp;page&nbsp;is&nbsp;navigated,&nbsp;notably&nbsp;Telemetry<br>
+will&nbsp;already&nbsp;have&nbsp;performed&nbsp;the&nbsp;following&nbsp;operations&nbsp;on&nbsp;the&nbsp;browser&nbsp;before<br>
+calling&nbsp;this&nbsp;function:<br>
+*&nbsp;Ensure&nbsp;only&nbsp;one&nbsp;tab&nbsp;is&nbsp;open.<br>
+*&nbsp;Call&nbsp;WaitForDocumentReadyStateToComplete&nbsp;on&nbsp;the&nbsp;tab.</tt></dd></dl>
+
+<dl><dt><a name="TimelineBasedPageTest-WillStartBrowser"><strong>WillStartBrowser</strong></a>(self, platform)</dt><dd><tt>Override&nbsp;to&nbsp;manipulate&nbsp;the&nbsp;browser&nbsp;environment&nbsp;before&nbsp;it&nbsp;launches.</tt></dd></dl>
+
+<hr>
+Data descriptors inherited from <a href="telemetry.page.page_test.html#PageTest">telemetry.page.page_test.PageTest</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>clear_cache_before_each_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;the&nbsp;browser's&nbsp;disk&nbsp;and&nbsp;memory&nbsp;cache&nbsp;will&nbsp;be&nbsp;cleared<br>
+before&nbsp;each&nbsp;run.</tt></dd>
+</dl>
+<dl><dt><strong>close_tabs_before_run</strong></dt>
+<dd><tt>When&nbsp;set&nbsp;to&nbsp;True,&nbsp;all&nbsp;tabs&nbsp;are&nbsp;closed&nbsp;before&nbsp;running&nbsp;the&nbsp;test&nbsp;for&nbsp;the<br>
+first&nbsp;time.</tt></dd>
+</dl>
+<dl><dt><strong>is_multi_tab_test</strong></dt>
+<dd><tt>Returns&nbsp;True&nbsp;if&nbsp;the&nbsp;test&nbsp;opens&nbsp;multiple&nbsp;tabs.<br>
+&nbsp;<br>
+If&nbsp;the&nbsp;test&nbsp;overrides&nbsp;TabForPage,&nbsp;it&nbsp;is&nbsp;deemed&nbsp;a&nbsp;multi-tab&nbsp;test.<br>
+Multi-tab&nbsp;tests&nbsp;do&nbsp;not&nbsp;retry&nbsp;after&nbsp;tab&nbsp;or&nbsp;browser&nbsp;crashes,&nbsp;whereas,<br>
+single-tab&nbsp;tests&nbsp;too.&nbsp;That&nbsp;is&nbsp;because&nbsp;the&nbsp;state&nbsp;of&nbsp;multi-tab&nbsp;tests<br>
+(e.g.,&nbsp;how&nbsp;many&nbsp;tabs&nbsp;are&nbsp;open,&nbsp;etc.)&nbsp;is&nbsp;unknown&nbsp;after&nbsp;crashes.</tt></dd>
+</dl>
+</td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_interaction_record.html b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_interaction_record.html
new file mode 100644
index 0000000..34a1639
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.web_perf.timeline_interaction_record.html
@@ -0,0 +1,317 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.web_perf.timeline_interaction_record</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.web_perf.html"><font color="#ffffff">web_perf</font></a>.timeline_interaction_record</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/web_perf/timeline_interaction_record.py">telemetry/web_perf/timeline_interaction_record.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.decorators.html">telemetry.decorators</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+</td><td width="25%" valign=top><a href="telemetry.timeline.bounds.html">telemetry.timeline.bounds</a><br>
+</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_interaction_record.html#TimelineInteractionRecord">TimelineInteractionRecord</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_interaction_record.html#ThreadTimeRangeOverlappedException">ThreadTimeRangeOverlappedException</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.web_perf.timeline_interaction_record.html#NoThreadTimeDataException">NoThreadTimeDataException</a>
+</font></dt></dl>
+</dd>
+</dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="NoThreadTimeDataException">class <strong>NoThreadTimeDataException</strong></a>(<a href="telemetry.web_perf.timeline_interaction_record.html#ThreadTimeRangeOverlappedException">ThreadTimeRangeOverlappedException</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="exceptions.html#Exception">Exception</a>&nbsp;that&nbsp;can&nbsp;be&nbsp;thrown&nbsp;if&nbsp;there&nbsp;is&nbsp;not&nbsp;sufficient&nbsp;thread&nbsp;time&nbsp;data<br>
+to&nbsp;compute&nbsp;the&nbsp;overlapped&nbsp;thread&nbsp;time&nbsp;range.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.timeline_interaction_record.html#NoThreadTimeDataException">NoThreadTimeDataException</a></dd>
+<dd><a href="telemetry.web_perf.timeline_interaction_record.html#ThreadTimeRangeOverlappedException">ThreadTimeRangeOverlappedException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors inherited from <a href="telemetry.web_perf.timeline_interaction_record.html#ThreadTimeRangeOverlappedException">ThreadTimeRangeOverlappedException</a>:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="NoThreadTimeDataException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#NoThreadTimeDataException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="NoThreadTimeDataException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#NoThreadTimeDataException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="NoThreadTimeDataException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ThreadTimeRangeOverlappedException">class <strong>ThreadTimeRangeOverlappedException</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt><a href="exceptions.html#Exception">Exception</a>&nbsp;that&nbsp;can&nbsp;be&nbsp;thrown&nbsp;when&nbsp;computing&nbsp;overlapped&nbsp;thread&nbsp;time&nbsp;range<br>
+with&nbsp;other&nbsp;events.<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.web_perf.timeline_interaction_record.html#ThreadTimeRangeOverlappedException">ThreadTimeRangeOverlappedException</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ThreadTimeRangeOverlappedException-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ThreadTimeRangeOverlappedException-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ThreadTimeRangeOverlappedException-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="TimelineInteractionRecord">class <strong>TimelineInteractionRecord</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
+<td colspan=2><tt>Represents&nbsp;an&nbsp;interaction&nbsp;that&nbsp;took&nbsp;place&nbsp;during&nbsp;a&nbsp;timeline&nbsp;recording.<br>
+&nbsp;<br>
+As&nbsp;a&nbsp;page&nbsp;runs,&nbsp;typically&nbsp;a&nbsp;number&nbsp;of&nbsp;different&nbsp;(simulated)&nbsp;user&nbsp;interactions<br>
+take&nbsp;place.&nbsp;For&nbsp;instance,&nbsp;a&nbsp;user&nbsp;might&nbsp;click&nbsp;a&nbsp;button&nbsp;in&nbsp;a&nbsp;mail&nbsp;app&nbsp;causing&nbsp;a<br>
+popup&nbsp;to&nbsp;animate&nbsp;in.&nbsp;Then&nbsp;they&nbsp;might&nbsp;press&nbsp;another&nbsp;button&nbsp;that&nbsp;sends&nbsp;data&nbsp;to&nbsp;a<br>
+server&nbsp;and&nbsp;simultaneously&nbsp;closes&nbsp;the&nbsp;popup&nbsp;without&nbsp;an&nbsp;animation.&nbsp;These&nbsp;are&nbsp;two<br>
+interactions.<br>
+&nbsp;<br>
+From&nbsp;the&nbsp;point&nbsp;of&nbsp;view&nbsp;of&nbsp;the&nbsp;page,&nbsp;each&nbsp;interaction&nbsp;might&nbsp;have&nbsp;a&nbsp;different<br>
+label:&nbsp;ClickComposeButton&nbsp;and&nbsp;SendEmail,&nbsp;for&nbsp;instance.&nbsp;From&nbsp;the&nbsp;point<br>
+of&nbsp;view&nbsp;of&nbsp;the&nbsp;benchmarking&nbsp;harness,&nbsp;the&nbsp;labels&nbsp;aren't&nbsp;so&nbsp;interesting&nbsp;as&nbsp;what<br>
+the&nbsp;performance&nbsp;expectations&nbsp;are&nbsp;for&nbsp;that&nbsp;interaction:&nbsp;was&nbsp;it&nbsp;loading<br>
+resources&nbsp;from&nbsp;the&nbsp;network?&nbsp;was&nbsp;there&nbsp;an&nbsp;animation?<br>
+&nbsp;<br>
+Determining&nbsp;these&nbsp;things&nbsp;is&nbsp;hard&nbsp;to&nbsp;do,&nbsp;simply&nbsp;by&nbsp;observing&nbsp;the&nbsp;state&nbsp;given&nbsp;to<br>
+a&nbsp;page&nbsp;from&nbsp;javascript.&nbsp;There&nbsp;are&nbsp;hints,&nbsp;for&nbsp;instance&nbsp;if&nbsp;network&nbsp;requests&nbsp;are<br>
+sent,&nbsp;or&nbsp;if&nbsp;a&nbsp;CSS&nbsp;animation&nbsp;is&nbsp;pending.&nbsp;But&nbsp;this&nbsp;is&nbsp;by&nbsp;no&nbsp;means&nbsp;a&nbsp;complete<br>
+story.<br>
+&nbsp;<br>
+Instead,&nbsp;we&nbsp;expect&nbsp;pages&nbsp;to&nbsp;mark&nbsp;up&nbsp;the&nbsp;timeline&nbsp;what&nbsp;they&nbsp;are&nbsp;doing,&nbsp;with<br>
+label&nbsp;and&nbsp;flags&nbsp;indicating&nbsp;the&nbsp;semantics&nbsp;of&nbsp;that&nbsp;interaction.&nbsp;This<br>
+is&nbsp;currently&nbsp;done&nbsp;by&nbsp;pushing&nbsp;markers&nbsp;into&nbsp;the&nbsp;console.time/timeEnd&nbsp;API:&nbsp;this<br>
+for&nbsp;instance&nbsp;can&nbsp;be&nbsp;issued&nbsp;in&nbsp;JS:<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;var&nbsp;str&nbsp;=&nbsp;'Interaction.SendEmail';<br>
+&nbsp;&nbsp;&nbsp;console.time(str);<br>
+&nbsp;&nbsp;&nbsp;setTimeout(function()&nbsp;{<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;console.timeEnd(str);<br>
+&nbsp;&nbsp;&nbsp;},&nbsp;1000);<br>
+&nbsp;<br>
+When&nbsp;run&nbsp;with&nbsp;perf.measurements.timeline_based_measurement&nbsp;running,&nbsp;this&nbsp;will<br>
+then&nbsp;cause&nbsp;a&nbsp;<a href="#TimelineInteractionRecord">TimelineInteractionRecord</a>&nbsp;to&nbsp;be&nbsp;created&nbsp;for&nbsp;this&nbsp;range&nbsp;with<br>
+all&nbsp;metrics&nbsp;reported&nbsp;for&nbsp;the&nbsp;marked&nbsp;up&nbsp;1000ms&nbsp;time-range.<br>
+&nbsp;<br>
+The&nbsp;valid&nbsp;interaction&nbsp;flags&nbsp;are:<br>
+&nbsp;&nbsp;&nbsp;*&nbsp;repeatable:&nbsp;Allows&nbsp;other&nbsp;interactions&nbsp;to&nbsp;use&nbsp;the&nbsp;same&nbsp;label<br>&nbsp;</tt></td></tr>
+<tr><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="TimelineInteractionRecord-GetBounds"><strong>GetBounds</strong></a>(*args, **kwargs)</dt></dl>
+
+<dl><dt><a name="TimelineInteractionRecord-GetOverlappedThreadTimeForSlice"><strong>GetOverlappedThreadTimeForSlice</strong></a>(self, timeline_slice)</dt><dd><tt>Get&nbsp;the&nbsp;thread&nbsp;duration&nbsp;of&nbsp;timeline_slice&nbsp;that&nbsp;overlaps&nbsp;with&nbsp;this&nbsp;record.<br>
+&nbsp;<br>
+There&nbsp;are&nbsp;two&nbsp;cases&nbsp;:<br>
+&nbsp;<br>
+Case&nbsp;1:&nbsp;timeline_slice&nbsp;runs&nbsp;in&nbsp;the&nbsp;same&nbsp;thread&nbsp;as&nbsp;the&nbsp;record.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;[&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;timeline_slice&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;]<br>
+&nbsp;&nbsp;THREAD&nbsp;1&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record&nbsp;starts&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record&nbsp;ends<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(relative&nbsp;order&nbsp;in&nbsp;thread&nbsp;time)<br>
+&nbsp;<br>
+&nbsp;&nbsp;As&nbsp;the&nbsp;thread&nbsp;timestamps&nbsp;in&nbsp;timeline_slice&nbsp;and&nbsp;record&nbsp;are&nbsp;consistent,&nbsp;we<br>
+&nbsp;&nbsp;simply&nbsp;use&nbsp;them&nbsp;to&nbsp;compute&nbsp;the&nbsp;overlap.<br>
+&nbsp;<br>
+Case&nbsp;2:&nbsp;timeline_slice&nbsp;runs&nbsp;in&nbsp;a&nbsp;different&nbsp;thread&nbsp;from&nbsp;the&nbsp;record's.<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>
+&nbsp;&nbsp;THREAD&nbsp;2&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;[&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;timeline_slice&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;]<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>
+&nbsp;&nbsp;THREAD&nbsp;1&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record&nbsp;starts&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;record&nbsp;ends<br>
+&nbsp;<br>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(relative&nbsp;order&nbsp;in&nbsp;wall-time)<br>
+&nbsp;<br>
+&nbsp;&nbsp;Unlike&nbsp;case&nbsp;1,&nbsp;thread&nbsp;timestamps&nbsp;of&nbsp;a&nbsp;thread&nbsp;are&nbsp;measured&nbsp;by&nbsp;its<br>
+&nbsp;&nbsp;thread-specific&nbsp;clock,&nbsp;which&nbsp;is&nbsp;inconsistent&nbsp;with&nbsp;that&nbsp;of&nbsp;the&nbsp;other<br>
+&nbsp;&nbsp;thread,&nbsp;and&nbsp;thus&nbsp;can't&nbsp;be&nbsp;used&nbsp;to&nbsp;compute&nbsp;the&nbsp;overlapped&nbsp;thread&nbsp;duration.<br>
+&nbsp;&nbsp;Hence,&nbsp;we&nbsp;use&nbsp;a&nbsp;heuristic&nbsp;to&nbsp;compute&nbsp;the&nbsp;overlap&nbsp;(see<br>
+&nbsp;&nbsp;_GetOverlappedThreadTimeForSliceInDifferentThread&nbsp;for&nbsp;more&nbsp;details)<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;timeline_slice:&nbsp;An&nbsp;instance&nbsp;of&nbsp;telemetry.timeline.slice.Slice</tt></dd></dl>
+
+<dl><dt><a name="TimelineInteractionRecord-__init__"><strong>__init__</strong></a>(self, label, start, end, async_event<font color="#909090">=None</font>, flags<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="TimelineInteractionRecord-__repr__"><strong>__repr__</strong></a>(self)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="TimelineInteractionRecord-FromAsyncEvent"><strong>FromAsyncEvent</strong></a>(cls, async_event)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt><dd><tt>Construct&nbsp;an&nbsp;timeline_interaction_record&nbsp;from&nbsp;an&nbsp;async&nbsp;event.<br>
+Args:<br>
+&nbsp;&nbsp;async_event:&nbsp;An&nbsp;instance&nbsp;of<br>
+&nbsp;&nbsp;&nbsp;&nbsp;telemetry.timeline.async_slices.AsyncSlice</tt></dd></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>end</strong></dt>
+</dl>
+<dl><dt><strong>label</strong></dt>
+</dl>
+<dl><dt><strong>repeatable</strong></dt>
+</dl>
+<dl><dt><strong>start</strong></dt>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-GetJavaScriptMarker"><strong>GetJavaScriptMarker</strong></a>(label, flags)</dt><dd><tt>Computes&nbsp;the&nbsp;marker&nbsp;string&nbsp;of&nbsp;an&nbsp;interaction&nbsp;record.<br>
+&nbsp;<br>
+This&nbsp;marker&nbsp;string&nbsp;can&nbsp;be&nbsp;used&nbsp;with&nbsp;JavaScript&nbsp;API&nbsp;console.time()<br>
+and&nbsp;console.timeEnd()&nbsp;to&nbsp;mark&nbsp;the&nbsp;beginning&nbsp;and&nbsp;end&nbsp;of&nbsp;the<br>
+interaction&nbsp;record..<br>
+&nbsp;<br>
+Args:<br>
+&nbsp;&nbsp;label:&nbsp;The&nbsp;label&nbsp;used&nbsp;to&nbsp;identify&nbsp;the&nbsp;interaction&nbsp;record.<br>
+&nbsp;&nbsp;flags:&nbsp;the&nbsp;flags&nbsp;for&nbsp;the&nbsp;interaction&nbsp;record&nbsp;see&nbsp;FLAGS&nbsp;above.<br>
+&nbsp;<br>
+Returns:<br>
+&nbsp;&nbsp;The&nbsp;interaction&nbsp;record&nbsp;marker&nbsp;string&nbsp;(e.g.,&nbsp;Interaction.Label/flag1,flag2).<br>
+&nbsp;<br>
+Raises:<br>
+&nbsp;&nbsp;AssertionError:&nbsp;If&nbsp;one&nbsp;or&nbsp;more&nbsp;of&nbsp;the&nbsp;flags&nbsp;is&nbsp;unrecognized.</tt></dd></dl>
+ <dl><dt><a name="-IsTimelineInteractionRecord"><strong>IsTimelineInteractionRecord</strong></a>(event_name)</dt></dl>
+</td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#55aa55">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><strong>FLAGS</strong> = ['repeatable']<br>
+<strong>REPEATABLE</strong> = 'repeatable'</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.wpr.archive_info.html b/catapult/telemetry/docs/pydoc/telemetry.wpr.archive_info.html
new file mode 100644
index 0000000..300d278
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.wpr.archive_info.html
@@ -0,0 +1,157 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: module telemetry.wpr.archive_info</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.<a href="telemetry.wpr.html"><font color="#ffffff">wpr</font></a>.archive_info</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/wpr/archive_info.py">telemetry/wpr/archive_info.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2013&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="catapult_base.cloud_storage.html">catapult_base.cloud_storage</a><br>
+<a href="json.html">json</a><br>
+</td><td width="25%" valign=top><a href="logging.html">logging</a><br>
+<a href="os.html">os</a><br>
+</td><td width="25%" valign=top><a href="re.html">re</a><br>
+<a href="shutil.html">shutil</a><br>
+</td><td width="25%" valign=top><a href="tempfile.html">tempfile</a><br>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ee77aa">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl>
+<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.wpr.archive_info.html#WprArchiveInfo">WprArchiveInfo</a>
+</font></dt></dl>
+</dd>
+<dt><font face="helvetica, arial"><a href="exceptions.html#Exception">exceptions.Exception</a>(<a href="exceptions.html#BaseException">exceptions.BaseException</a>)
+</font></dt><dd>
+<dl>
+<dt><font face="helvetica, arial"><a href="telemetry.wpr.archive_info.html#ArchiveError">ArchiveError</a>
+</font></dt></dl>
+</dd>
+</dl>
+ <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="ArchiveError">class <strong>ArchiveError</strong></a>(<a href="exceptions.html#Exception">exceptions.Exception</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt>Method resolution order:</dt>
+<dd><a href="telemetry.wpr.archive_info.html#ArchiveError">ArchiveError</a></dd>
+<dd><a href="exceptions.html#Exception">exceptions.Exception</a></dd>
+<dd><a href="exceptions.html#BaseException">exceptions.BaseException</a></dd>
+<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
+</dl>
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<hr>
+Methods inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><a name="ArchiveError-__init__"><strong>__init__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__init__">__init__</a>(...)&nbsp;initializes&nbsp;x;&nbsp;see&nbsp;help(type(x))&nbsp;for&nbsp;signature</tt></dd></dl>
+
+<hr>
+Data and other attributes inherited from <a href="exceptions.html#Exception">exceptions.Exception</a>:<br>
+<dl><dt><strong>__new__</strong> = &lt;built-in method __new__ of type object&gt;<dd><tt>T.<a href="#ArchiveError-__new__">__new__</a>(S,&nbsp;...)&nbsp;-&gt;&nbsp;a&nbsp;new&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;with&nbsp;type&nbsp;S,&nbsp;a&nbsp;subtype&nbsp;of&nbsp;T</tt></dl>
+
+<hr>
+Methods inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><a name="ArchiveError-__delattr__"><strong>__delattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__delattr__">__delattr__</a>('name')&nbsp;&lt;==&gt;&nbsp;del&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getattribute__"><strong>__getattribute__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getattribute__">__getattribute__</a>('name')&nbsp;&lt;==&gt;&nbsp;x.name</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getitem__"><strong>__getitem__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getitem__">__getitem__</a>(y)&nbsp;&lt;==&gt;&nbsp;x[y]</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__getslice__"><strong>__getslice__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__getslice__">__getslice__</a>(i,&nbsp;j)&nbsp;&lt;==&gt;&nbsp;x[i:j]<br>
+&nbsp;<br>
+Use&nbsp;of&nbsp;negative&nbsp;indices&nbsp;is&nbsp;not&nbsp;supported.</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__reduce__"><strong>__reduce__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveError-__repr__"><strong>__repr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__repr__">__repr__</a>()&nbsp;&lt;==&gt;&nbsp;repr(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__setattr__"><strong>__setattr__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__setattr__">__setattr__</a>('name',&nbsp;value)&nbsp;&lt;==&gt;&nbsp;x.name&nbsp;=&nbsp;value</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__setstate__"><strong>__setstate__</strong></a>(...)</dt></dl>
+
+<dl><dt><a name="ArchiveError-__str__"><strong>__str__</strong></a>(...)</dt><dd><tt>x.<a href="#ArchiveError-__str__">__str__</a>()&nbsp;&lt;==&gt;&nbsp;str(x)</tt></dd></dl>
+
+<dl><dt><a name="ArchiveError-__unicode__"><strong>__unicode__</strong></a>(...)</dt></dl>
+
+<hr>
+Data descriptors inherited from <a href="exceptions.html#BaseException">exceptions.BaseException</a>:<br>
+<dl><dt><strong>__dict__</strong></dt>
+</dl>
+<dl><dt><strong>args</strong></dt>
+</dl>
+<dl><dt><strong>message</strong></dt>
+</dl>
+</td></tr></table> <p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="WprArchiveInfo">class <strong>WprArchiveInfo</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+    
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%">Methods defined here:<br>
+<dl><dt><a name="WprArchiveInfo-AddNewTemporaryRecording"><strong>AddNewTemporaryRecording</strong></a>(self, temp_wpr_file_path<font color="#909090">=None</font>)</dt></dl>
+
+<dl><dt><a name="WprArchiveInfo-AddRecordedStories"><strong>AddRecordedStories</strong></a>(self, stories, upload_to_cloud_storage<font color="#909090">=False</font>)</dt></dl>
+
+<dl><dt><a name="WprArchiveInfo-DownloadArchivesIfNeeded"><strong>DownloadArchivesIfNeeded</strong></a>(self)</dt><dd><tt>Downloads&nbsp;archives&nbsp;iff&nbsp;the&nbsp;Archive&nbsp;has&nbsp;a&nbsp;bucket&nbsp;parameter&nbsp;and&nbsp;the&nbsp;user<br>
+has&nbsp;permission&nbsp;to&nbsp;access&nbsp;the&nbsp;bucket.<br>
+&nbsp;<br>
+Raises&nbsp;cloud&nbsp;storage&nbsp;Permissions&nbsp;or&nbsp;Credentials&nbsp;error&nbsp;when&nbsp;there&nbsp;is&nbsp;no<br>
+local&nbsp;copy&nbsp;of&nbsp;the&nbsp;archive&nbsp;and&nbsp;the&nbsp;user&nbsp;doesn't&nbsp;have&nbsp;permission&nbsp;to&nbsp;access<br>
+the&nbsp;archive's&nbsp;bucket.<br>
+&nbsp;<br>
+Warns&nbsp;when&nbsp;a&nbsp;bucket&nbsp;is&nbsp;not&nbsp;specified&nbsp;or&nbsp;when&nbsp;the&nbsp;user&nbsp;doesn't&nbsp;have<br>
+permission&nbsp;to&nbsp;access&nbsp;the&nbsp;archive's&nbsp;bucket&nbsp;but&nbsp;a&nbsp;local&nbsp;copy&nbsp;of&nbsp;the&nbsp;archive<br>
+exists.</tt></dd></dl>
+
+<dl><dt><a name="WprArchiveInfo-WprFilePathForStory"><strong>WprFilePathForStory</strong></a>(self, story)</dt></dl>
+
+<dl><dt><a name="WprArchiveInfo-__init__"><strong>__init__</strong></a>(self, file_path, data, bucket)</dt></dl>
+
+<hr>
+Class methods defined here:<br>
+<dl><dt><a name="WprArchiveInfo-FromFile"><strong>FromFile</strong></a>(cls, file_path, bucket)<font color="#909090"><font face="helvetica, arial"> from <a href="__builtin__.html#type">__builtin__.type</a></font></font></dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+</td></tr></table></td></tr></table><p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#eeaa77">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><dl><dt><a name="-AssertValidCloudStorageBucket"><strong>AssertValidCloudStorageBucket</strong></a>(bucket)</dt></dl>
+</td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/pydoc/telemetry.wpr.html b/catapult/telemetry/docs/pydoc/telemetry.wpr.html
new file mode 100644
index 0000000..70e4a8b
--- /dev/null
+++ b/catapult/telemetry/docs/pydoc/telemetry.wpr.html
@@ -0,0 +1,26 @@
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html><head><title>Python: package telemetry.wpr</title>
+<meta charset="utf-8">
+</head><body bgcolor="#f0f0f8">
+
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
+<tr bgcolor="#7799ee">
+<td valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong><a href="telemetry.html"><font color="#ffffff">telemetry</font></a>.wpr</strong></big></big></font></td
+><td align=right valign=bottom
+><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="../telemetry/wpr/__init__.py">telemetry/wpr/__init__.py</a></font></td></tr></table>
+    <p><tt>#&nbsp;Copyright&nbsp;2014&nbsp;The&nbsp;Chromium&nbsp;Authors.&nbsp;All&nbsp;rights&nbsp;reserved.<br>
+#&nbsp;Use&nbsp;of&nbsp;this&nbsp;source&nbsp;code&nbsp;is&nbsp;governed&nbsp;by&nbsp;a&nbsp;BSD-style&nbsp;license&nbsp;that&nbsp;can&nbsp;be<br>
+#&nbsp;found&nbsp;in&nbsp;the&nbsp;LICENSE&nbsp;file.</tt></p>
+<p>
+<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#aa55cc">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
+    
+<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="telemetry.wpr.archive_info.html">archive_info</a><br>
+</td><td width="25%" valign=top><a href="telemetry.wpr.archive_info_unittest.html">archive_info_unittest</a><br>
+</td><td width="25%" valign=top></td><td width="25%" valign=top></td></tr></table></td></tr></table>
+</body></html>
\ No newline at end of file
diff --git a/catapult/telemetry/docs/run_benchmarks_locally.md b/catapult/telemetry/docs/run_benchmarks_locally.md
new file mode 100644
index 0000000..a86e47e
--- /dev/null
+++ b/catapult/telemetry/docs/run_benchmarks_locally.md
@@ -0,0 +1,111 @@
+<!-- Copyright 2015 The Chromium Authors. All rights reserved.
+     Use of this source code is governed by a BSD-style license that can be
+     found in the LICENSE file.
+-->
+
+# Telemetry: Run Benchmarks Locally
+
+## Set Up
+
+If you don't have a Chromium checkout, download the
+[latest Telemetry archive](https://storage.googleapis.com/chromium-telemetry/snapshots/telemetry.zip).
+Unzip the archive. If you're running on Mac OS X, you're all set! For
+Windows, Linux, Android, or Chrome OS, read on.
+
+#### Windows
+
+Some benchmarks require you to have
+[pywin32](http://sourceforge.net/projects/pywin32/files/pywin32/Build%20219/).
+Be sure to install a version that matches the version and bitness of the Python
+you have installed.
+
+#### Linux
+
+Telemetry on Linux tries to scan for attached Android devices with
+[adb](https://developer.android.com/tools/help/adb.html).
+The included adb binary is 32-bit. On 64-bit machines, you need to install the
+libstdc++6:i386 package.
+
+#### Android
+
+Running on Android is supported with a Linux or Mac OS X host. Windows is not
+yet supported. There are also a few additional steps to set up:
+
+  1. Telemetry requires [adb](http://developer.android.com/tools/help/adb.html).
+     If you're running from the zip archive, adb is already included. But if
+     you're running with a Chromium checkout, ensure your .gclient file contains
+     target\_os = ['android'], then resync your code.
+  2. If running from an OS X host, you need to run ADB as root. First, you need
+     to install a "userdebug" build of Android on your device. Then run adb
+     root. Sometimes you may also need to run adb remount.
+  3. Enable [debugging over USB](http://developer.android.com/tools/device.html)
+     on your device.
+  4. You can get the name of your device with `adb devices` and use it with
+     Telemetry via --device=<device\_name>.
+
+#### Chrome OS
+
+See [Running Telemetry on Chrome OS](http://www.chromium.org/developers/telemetry/running-telemetry-on-chrome-os).
+
+## Benchmark Commands
+
+Telemetry benchmarks can be run with run\_benchmark.
+
+In the Telemetry zip archive, this is located at `telemetry/run_benchmark`.
+
+In the Chromium source tree, this is located at `src/tools/perf/run_benchmark`.
+
+#### Running a benchmark
+
+List the available benchmarks with `telemetry/run_benchmark list`.
+
+Here's an example for running a particular benchmark:
+
+`telemetry/run_benchmark --browser=canary smoothness.top_25_smooth`
+
+#### Running on another browser
+
+To list available browsers, use:
+
+`telemetry/run_benchmark --browser=list`
+
+If you're running telemetry from within a Chromium checkout, the release and
+debug browsers are what's built in out/Release and out/Debug, respectively.
+If you are trying to run on Android, you should see an entry similar to
+android-jb-system-chrome in this list. If it's not there, the device is not set
+up correctly.
+
+To run a specific browser executable:
+
+`telemetry/run_benchmark --browser=exact --browser-executable=/path/to/binary`
+
+To run on a Chromebook:
+
+`telemetry/run_benchmark --browser=cros-chrome --remote=[ip_address]`
+
+#### Options
+
+To see all options, run:
+
+`telemetry/run_benchmark run --help`
+
+Use --pageset-repeat to run the test repeatedly. For example:
+
+`telemetry/run_benchmark smoothness.top_25 --pageset-repeat=30`
+
+If you want to re-generate HTML results and add label, you can do this locally
+by using the parameters `--reset-results --results-label="foo"`
+
+`telemetry/run_benchmark smoothness.top_25 --reset-results
+--results-label="foo"`
+
+####Comparing Two Runs
+
+`telemetry/run_benchmark some_test --browser-executable=path/to/version/1
+--reset-results --results-label="Version 1"`
+
+`telemetry/run_benchmark some_test --browser-executable=path/to/version/2
+--results-label="Version 2"`
+
+The results will be written to in the `results.html` file in the same location
+of the `run_benchmark` script.
diff --git a/catapult/telemetry/examples/benchmarks/__init__.py b/catapult/telemetry/examples/benchmarks/__init__.py
new file mode 100644
index 0000000..b7bc89c
--- /dev/null
+++ b/catapult/telemetry/examples/benchmarks/__init__.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+# Since this is an example of telemetry benchmarks in top level telemetry
+# folder, we include the top level telemetry dir to sys.path so we can import
+# from telemetry directly in the other modules.
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
diff --git a/catapult/telemetry/examples/benchmarks/simple_story_set.py b/catapult/telemetry/examples/benchmarks/simple_story_set.py
new file mode 100644
index 0000000..f33434d
--- /dev/null
+++ b/catapult/telemetry/examples/benchmarks/simple_story_set.py
@@ -0,0 +1,39 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import story
+from telemetry import page
+
+
+class ExamplePage(page.Page):
+
+  def __init__(self, page_set):
+    super(ExamplePage, self).__init__(
+        url='https://google.com/search?q=lemon',
+        page_set=page_set)
+
+  def RunPageInteractions(self, action_runner):
+    # To see all the web APIs that action_runner supports, see:
+    # telemetry.page.action_runner module.
+
+    action_runner.Wait(0.5)
+    # Create interaction record will create a region of interest in tracing that
+    # cover the wait, tap, and scroll actions nested in the block below.
+    with action_runner.CreateInteraction('Scroll-And-Tap'):
+      action_runner.Wait(0.3)
+      action_runner.ScrollPage()
+      action_runner.TapElement(text='Next')
+    action_runner.Wait(1)
+    with action_runner.CreateInteraction('Scroll'):
+      action_runner.ScrollPage()
+    with action_runner.CreateInteraction('Wait-two'):
+      action_runner.Wait(1)
+
+
+class SimpleStorySet(story.StorySet):
+  def __init__(self):
+    super(SimpleStorySet, self).__init__(
+        archive_data_file='data/simple_story_set.json',
+        cloud_storage_bucket=story.PARTNER_BUCKET)
+    self.AddStory(ExamplePage(self))
diff --git a/catapult/telemetry/examples/benchmarks/tbm_benchmark.py b/catapult/telemetry/examples/benchmarks/tbm_benchmark.py
new file mode 100644
index 0000000..7f84356
--- /dev/null
+++ b/catapult/telemetry/examples/benchmarks/tbm_benchmark.py
@@ -0,0 +1,21 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry import benchmark
+from telemetry.web_perf import timeline_based_measurement
+
+from benchmarks import simple_story_set
+
+class TBMSample(benchmark.Benchmark):
+
+  def CreateStorySet(self, options):
+    return simple_story_set.SimpleStorySet()
+
+  def CreateTimelineBasedMeasurementOptions(self):
+    options = timeline_based_measurement.Options()
+    options.SetTimelineBasedMetric('sample_metric.html')
+    return options
+
+  @classmethod
+  def Name(cls):
+    return 'tbm_sample.tbm_sample'
diff --git a/catapult/telemetry/examples/credentials_example.json b/catapult/telemetry/examples/credentials_example.json
new file mode 100644
index 0000000..b3978b6
--- /dev/null
+++ b/catapult/telemetry/examples/credentials_example.json
@@ -0,0 +1,10 @@
+{
+  "google": {
+    "username": "<your google account here>",
+    "password": "<your google password here>"
+  },
+  "facebook": {
+    "username": "<your google account here>",
+    "password": "<your google password here>"
+  }
+}
diff --git a/catapult/telemetry/examples/run_benchmark b/catapult/telemetry/examples/run_benchmark
new file mode 100755
index 0000000..6b2d376
--- /dev/null
+++ b/catapult/telemetry/examples/run_benchmark
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+from telemetry import benchmark_runner
+
+def main():
+  top_level_dir = os.path.dirname(__file__)
+  config = benchmark_runner.ProjectConfig(
+      top_level_dir=top_level_dir,
+      benchmark_dirs=[os.path.join(top_level_dir, 'benchmarks')])
+  return benchmark_runner.main(config)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/telemetry/json_format b/catapult/telemetry/json_format
new file mode 100755
index 0000000..401594d
--- /dev/null
+++ b/catapult/telemetry/json_format
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import argparse
+import json
+import os
+import sys
+
+
+def GetFormattedJSONString(file_path):
+ with open(file_path, 'r') as f:
+    json_obj = json.load(f)
+    file_content = f.read()
+ return json.dumps(
+     json_obj, indent=2, sort_keys=True, separators=(',', ': '))
+
+
+def ValidateJSONFormat(file_path):
+ with open(file_path, 'r') as f:
+    file_content = f.read()
+ if file_content != GetFormattedJSONString(file_path):
+   raise Exception(
+       'Reformat your JSON file by running: %s --format %s' %
+       (__file__, file_path))
+ print >> sys.stdout, ('%s passes the JSON format validation' % file_path)
+
+
+def Format(file_path):
+  formatted_JSON_string = GetFormattedJSONString(file_path)
+  with open(file_path, 'w') as f:
+    f.write(formatted_JSON_string)
+
+
+def Main(args):
+  description = """A JSON formatting tool.
+
+  This is a tool that validate and reformats JSON file so that it complies with
+  a certain style. The JSON style imposed by this tool is:
+    * JSON array elements and object members are indented with 2 spaces.
+    * Dictionaries objects are sorted by key.
+    * Items are sperated by ', ' and ': '.
+  """
+  parser = argparse.ArgumentParser(
+      description=description, formatter_class=argparse.RawTextHelpFormatter)
+  parser.add_argument('file_path', type=str, help='The path to JSON file.')
+  parser.add_argument('--format', action='store_true', default=False,
+                      help='Format the JSON file.')
+  options = parser.parse_args(args)
+  if options.format:
+    Format(options.file_path)
+    return 0
+  ValidateJSONFormat(options.file_path)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv[1:]))
diff --git a/catapult/telemetry/list_telemetry_unittests b/catapult/telemetry/list_telemetry_unittests
new file mode 100755
index 0000000..56f0a0d
--- /dev/null
+++ b/catapult/telemetry/list_telemetry_unittests
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import sys
+
+
+def _ExtractQueuedTestName(line):
+  _, test_name, _ = line.split(' ')
+  return test_name
+
+
+def _ExtractPassedTestName(line):
+  _, test_name, _, _ = line.split(' ')
+  return test_name
+
+
+def _IsQueued(line):
+  return line.endswith(' queued')
+
+
+def _IsPassed(line):
+  return 'passed' in line.split(' ')
+
+
+def _ProcessLogFile(file_path):
+  passed_unittests = []
+  queued_unittests = []
+  with open(file_path, 'r') as f:
+    for line in f:
+      line = line.strip()
+      if not line.startswith('['):
+        continue
+      if _IsQueued(line):
+        queued_unittests.append(_ExtractQueuedTestName(line))
+      elif _IsPassed(line):
+        passed_unittests.append(_ExtractPassedTestName(line))
+  queued_unittests.sort()
+  passed_unittests.sort()
+  return queued_unittests, passed_unittests
+
+
+def main(args):
+  parser = argparse.ArgumentParser(
+      description=('Process telemetry unittests log to print out passed '
+                   'or queued tests.'))
+  parser.add_argument(
+      'filepath', help='path to log file of telemetry unittest')
+  parser.add_argument(
+      '-p', '--list-passed-tests', action='store_true',
+      help='List all the passed telemetry unittests')
+  parser.add_argument(
+      '-q', '--list-queued-tests', action='store_true',
+      help='List all the queued telemetry unittests')
+  options = parser.parse_args(args)
+  queued_unittests, passed_unittests = _ProcessLogFile(options.filepath)
+  if options.list_passed_tests:
+    print 'All passed telemetry unittests:\n'
+    print '\n'.join(passed_unittests)
+  if options.list_queued_tests:
+    print 'All queued telemetry unittests:\n'
+    print '\n'.join(queued_unittests)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/catapult/telemetry/pylintrc b/catapult/telemetry/pylintrc
new file mode 100644
index 0000000..e8c9d9b
--- /dev/null
+++ b/catapult/telemetry/pylintrc
@@ -0,0 +1,74 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO(telemetry-team): Shrink this list to as small as possible.
+disable=
+  design,
+  similarities,
+
+  abstract-class-little-used,
+  abstract-class-not-used,
+  bad-builtin,
+  bad-continuation,
+  broad-except,
+  fixme,
+  global-statement,
+  interface-not-implemented,
+  invalid-name,
+  locally-disabled,
+  locally-enabled,
+  logging-not-lazy,
+  missing-docstring,
+  no-member,
+  no-self-use,
+  protected-access,
+  star-args,
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[BASIC]
+
+# Regular expression which should only match correct function names.
+function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*))$
+
+# Regular expression which should only match correct method names.
+method-rgx=^(?:(?P<exempt>_[a-z0-9_]+__|get|post|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass)|(?P<camel_case>(_{0,2}|test|assert)[A-Z][a-zA-Z0-9_]*))$
+
+# Regular expression which should only match correct argument names.
+argument-rgx=^[a-z][a-z0-9_]*$
+
+# Regular expression which should only match correct variable names.
+variable-rgx=^[a-z][a-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names=main,_
+
+# List of builtins function names that should not be used, separated by a comma.
+bad-functions=apply,input,reduce
+
+
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=^\*{0,2}(_$|unused_)
+
+
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/telemetry/support/html_output/results-template.html b/catapult/telemetry/support/html_output/results-template.html
new file mode 100644
index 0000000..a6a5655
--- /dev/null
+++ b/catapult/telemetry/support/html_output/results-template.html
@@ -0,0 +1,1488 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>Telemetry Performance Test Results</title>
+<style type="text/css">
+
+section {
+    background: white;
+    padding: 10px;
+    position: relative;
+}
+
+.collapsed:before {
+    color: #ccc;
+    content: '\25B8\00A0';
+}
+
+.expanded:before {
+    color: #eee;
+    content: '\25BE\00A0';
+}
+
+.line-plots {
+    padding-left: 25px;
+}
+
+.line-plots > div {
+    display: inline-block;
+    width: 90px;
+    height: 40px;
+    margin-right: 10px;
+}
+
+.lage-line-plots {
+    padding-left: 25px;
+}
+
+.large-line-plots > div, .histogram-plots > div {
+    display: inline-block;
+    width: 400px;
+    height: 200px;
+    margin-right: 10px;
+}
+
+.large-line-plot-labels > div, .histogram-plot-labels > div {
+    display: inline-block;
+    width: 400px;
+    height: 11px;
+    margin-right: 10px;
+    color: #545454;
+    text-align: center;
+    font-size: 11px;
+}
+
+.closeButton {
+    display: inline-block;
+    background: #eee;
+    background: linear-gradient(rgb(220, 220, 220), rgb(255, 255, 255));
+    border: inset 1px #ddd;
+    border-radius: 4px;
+    float: right;
+    font-size: small;
+    -webkit-user-select: none;
+    font-weight: bold;
+    padding: 1px 4px;
+}
+
+.closeButton:hover {
+    background: #F09C9C;
+}
+
+.label {
+    cursor: text;
+}
+
+.label:hover {
+    background: #ffcc66;
+}
+
+section h1 {
+    text-align: center;
+    font-size: 1em;
+}
+
+section .tooltip {
+    position: absolute;
+    text-align: center;
+    background: #ffcc66;
+    border-radius: 5px;
+    padding: 0px 5px;
+}
+
+body {
+    padding: 0px;
+    margin: 0px;
+    font-family: sans-serif;
+}
+
+table {
+    background: white;
+    width: 100%;
+}
+
+table, td, th {
+    border-collapse: collapse;
+    padding: 5px;
+    white-space: nowrap;
+}
+
+.highlight:hover {
+   color: #202020;
+   background: #e0e0e0;
+}
+
+.nestedRow {
+    background: #f8f8f8;
+}
+
+.importantNestedRow {
+    background: #e0e0e0;
+    font-weight: bold;
+}
+
+table td {
+    position: relative;
+}
+
+th, td {
+    cursor: pointer;
+    cursor: hand;
+}
+
+th {
+    background: #e6eeee;
+    background: linear-gradient(rgb(244, 244, 244), rgb(217, 217, 217));
+    border: 1px solid #ccc;
+}
+
+th.sortUp:after {
+    content: ' \25BE';
+}
+
+th.sortDown:after {
+    content: ' \25B4';
+}
+
+td.comparison, td.result {
+    text-align: right;
+}
+
+td.better {
+    color: #6c6;
+}
+
+td.fadeOut {
+    opacity: 0.5;
+}
+
+td.unknown {
+    color: #ccc;
+}
+
+td.worse {
+    color: #c66;
+}
+
+td.reference {
+    font-style: italic;
+    font-weight: bold;
+    color: #444;
+}
+
+td.missing {
+    color: #ccc;
+    text-align: center;
+}
+
+td.missingReference {
+    color: #ccc;
+    text-align: center;
+    font-style: italic;
+}
+
+.checkbox {
+    display: inline-block;
+    background: #eee;
+    background: linear-gradient(rgb(220, 220, 220), rgb(200, 200, 200));
+    border: inset 1px #ddd;
+    border-radius: 5px;
+    margin: 10px;
+    font-size: small;
+    cursor: pointer;
+    cursor: hand;
+    -webkit-user-select: none;
+    font-weight: bold;
+}
+
+.checkbox span {
+    display: inline-block;
+    line-height: 100%;
+    padding: 5px 8px;
+    border: outset 1px transparent;
+}
+
+.checkbox .checked {
+    background: #e6eeee;
+    background: linear-gradient(rgb(255, 255, 255), rgb(235, 235, 235));
+    border: outset 1px #eee;
+    border-radius: 5px;
+}
+
+.openAllButton {
+    display: inline-block;
+    colour: #6c6
+    background: #eee;
+    background: linear-gradient(rgb(220, 220, 220), rgb(255, 255, 255));
+    border: inset 1px #ddd;
+    border-radius: 5px;
+    float: left;
+    font-size: small;
+    -webkit-user-select: none;
+    font-weight: bold;
+    padding: 1px 4px;
+}
+
+.openAllButton:hover {
+    background: #60f060;
+}
+
+.closeAllButton {
+    display: inline-block;
+    colour: #c66
+    background: #eee;
+    background: linear-gradient(rgb(220, 220, 220),rgb(255, 255, 255));
+    border: inset 1px #ddd;
+    border-radius: 5px;
+    float: left;
+    font-size: small;
+    -webkit-user-select: none;
+    font-weight: bold;
+    padding: 1px 4px;
+}
+
+.closeAllButton:hover {
+    background: #f04040;
+}
+
+</style>
+</head>
+<body onload="init()">
+<div style="padding: 0 10px; white-space: nowrap;">
+Result <span id="time-memory" class="checkbox"></span>
+Reference <span id="reference" class="checkbox"></span>
+Style <span id="scatter-line" class="checkbox"><span class="checked">Scatter</span><span>Line</span></span>
+<span class="checkbox"><span class="checked" id="undelete">Undelete</span></span><br>
+Run your test with --reset-results to clear all runs
+</div>
+<table id="container"></table>
+<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script>
+<script>
+%plugins%
+</script>
+<script>
+
+var EXPANDED = true;
+var COLLAPSED = false;
+var SMALLEST_PERCENT_DISPLAYED = 0.01;
+var INVISIBLE = false;
+var VISIBLE = true;
+var COMPARISON_SUFFIX = '_compare';
+var SORT_DOWN_CLASS = 'sortDown';
+var SORT_UP_CLASS = 'sortUp';
+var BETTER_CLASS = 'better';
+var WORSE_CLASS = 'worse';
+var UNKNOWN_CLASS = 'unknown'
+// px Indentation for graphs
+var GRAPH_INDENT = 64;
+var PADDING_UNDER_GRAPH = 5;
+// px Indentation for nested children left-margins
+var INDENTATION = 40;
+
+function TestResult(metric, values, associatedRun, std, degreesOfFreedom) {
+    if (values) {
+        if (values[0] instanceof Array) {
+            var flattenedValues = [];
+            for (var i = 0; i < values.length; i++)
+                flattenedValues = flattenedValues.concat(values[i]);
+            values = flattenedValues;
+        }
+
+        if (jQuery.type(values[0]) === 'string') {
+            try {
+                var current = JSON.parse(values[0]);
+                if (current.params.type === 'HISTOGRAM') {
+                    this.histogramValues = current;
+                    // Histogram results have no values (per se). Instead we calculate
+                    // the values from the histogram bins.
+                    var values = [];
+                    var buckets = current.buckets
+                    for (var i = 0; i < buckets.length; i++) {
+                        var bucket = buckets[i];
+                        var bucket_mean = (bucket.high + bucket.low) / 2;
+                        for (var b = 0; b < bucket.count; b++) {
+                            values.push(bucket_mean);
+                        }
+                    }
+                }
+            }
+            catch (e) {
+                console.error(e, e.stack);
+            }
+        }
+    } else {
+        values = [];
+    }
+
+    this.test = function() { return metric; }
+    this.values = function() { return values.map(function(value) { return metric.scalingFactor() * value; }); }
+    this.unscaledMean = function() { return Statistics.sum(values) / values.length; }
+    this.mean = function() { return metric.scalingFactor() * this.unscaledMean(); }
+    this.min = function() { return metric.scalingFactor() * Statistics.min(values); }
+    this.max = function() { return metric.scalingFactor() * Statistics.max(values); }
+    this.confidenceIntervalDelta = function() {
+        if (std !== undefined) {
+            return metric.scalingFactor() * Statistics.confidenceIntervalDeltaFromStd(0.95, values.length,
+                std, degreesOfFreedom);
+        }
+        return metric.scalingFactor() * Statistics.confidenceIntervalDelta(0.95, values.length,
+            Statistics.sum(values), Statistics.squareSum(values));
+    }
+    this.confidenceIntervalDeltaRatio = function() { return this.confidenceIntervalDelta() / this.mean(); }
+    this.percentDifference = function(other) {
+        if (other === undefined) {
+            return undefined;
+        }
+        return (other.unscaledMean() - this.unscaledMean()) / this.unscaledMean();
+    }
+    this.isStatisticallySignificant = function(other) {
+        if (other === undefined) {
+            return false;
+        }
+        var diff = Math.abs(other.mean() - this.mean());
+        return diff > this.confidenceIntervalDelta() && diff > other.confidenceIntervalDelta();
+    }
+    this.run = function() { return associatedRun; }
+}
+
+function TestRun(entry) {
+    this.id = function() { return entry['buildTime'].replace(/[:.-]/g,''); }
+    this.label = function() {
+        if (labelKey in localStorage)
+            return localStorage[labelKey];
+        return entry['label'];
+    }
+    this.setLabel = function(label) { localStorage[labelKey] = label; }
+    this.isHidden = function() { return localStorage[hiddenKey]; }
+    this.hide = function() { localStorage[hiddenKey] = true; }
+    this.show = function() { localStorage.removeItem(hiddenKey); }
+    this.description = function() {
+        return new Date(entry['buildTime']).toLocaleString() + '\n' + entry['platform'] + ' ' + this.label();
+    }
+
+    var labelKey = 'telemetry_label_' + this.id();
+    var hiddenKey = 'telemetry_hide_' + this.id();
+}
+
+function PerfTestMetric(name, metric, unit, isImportant) {
+    var testResults = [];
+    var cachedUnit = null;
+    var cachedScalingFactor = null;
+
+    // We can't do this in TestResult because all results for each test need to share the same unit and the same scaling factor.
+    function computeScalingFactorIfNeeded() {
+        // FIXME: We shouldn't be adjusting units on every test result.
+        // We can only do this on the first test.
+        if (!testResults.length || cachedUnit)
+            return;
+
+        var mean = testResults[0].unscaledMean(); // FIXME: We should look at all values.
+        var kilo = unit == 'bytes' ? 1024 : 1000;
+        if (mean > 10 * kilo * kilo && unit != 'ms') {
+            cachedScalingFactor = 1 / kilo / kilo;
+            cachedUnit = 'M ' + unit;
+        } else if (mean > 10 * kilo) {
+            cachedScalingFactor = 1 / kilo;
+            cachedUnit = unit == 'ms' ? 's' : ('K ' + unit);
+        } else {
+            cachedScalingFactor = 1;
+            cachedUnit = unit;
+        }
+    }
+
+    this.name = function() { return name + ':' + metric; }
+    this.isImportant = isImportant;
+    this.isMemoryTest = function() {
+        return (unit == 'kb' ||
+                unit == 'KB' ||
+                unit == 'MB' ||
+                unit == 'bytes' ||
+                unit == 'count' ||
+                !metric.indexOf('V8.'));
+    }
+    this.addResult = function(newResult) {
+        testResults.push(newResult);
+        cachedUnit = null;
+        cachedScalingFactor = null;
+    }
+    this.results = function() { return testResults; }
+    this.scalingFactor = function() {
+        computeScalingFactorIfNeeded();
+        return cachedScalingFactor;
+    }
+    this.unit = function() {
+        computeScalingFactorIfNeeded();
+        return cachedUnit;
+    }
+    this.biggerIsBetter = function() {
+        if (window.unitToBiggerIsBetter == undefined) {
+            window.unitToBiggerIsBetter = {};
+            var units = JSON.parse(document.getElementById('units-json').textContent);
+            for (var u in units) {
+                if (units[u].improvement_direction == 'up') {
+                    window.unitToBiggerIsBetter[u] = true;
+                }
+            }
+        }
+        return window.unitToBiggerIsBetter[unit];
+    }
+}
+
+function UndeleteManager() {
+    var key = 'telemetry_undeleteIds'
+    var undeleteIds = localStorage[key];
+    if (undeleteIds) {
+        undeleteIds = JSON.parse(undeleteIds);
+    } else {
+        undeleteIds = [];
+    }
+
+    this.ondelete = function(id) {
+        undeleteIds.push(id);
+        localStorage[key] = JSON.stringify(undeleteIds);
+    }
+    this.undeleteMostRecent = function() {
+        if (!this.mostRecentlyDeletedId())
+            return;
+        undeleteIds.pop();
+        localStorage[key] = JSON.stringify(undeleteIds);
+    }
+    this.mostRecentlyDeletedId = function() {
+        if (!undeleteIds.length)
+            return undefined;
+        return undeleteIds[undeleteIds.length-1];
+    }
+}
+var undeleteManager = new UndeleteManager();
+
+var plotColor = 'rgb(230,50,50)';
+var subpointsPlotOptions = {
+    lines: {show:true, lineWidth: 0},
+    color: plotColor,
+    points: {show: true, radius: 1},
+    bars: {show: false}};
+
+var mainPlotOptions = {
+    xaxis: {
+        min: -0.5,
+        tickSize: 1,
+    },
+    crosshair: { mode: 'y' },
+    series: { shadowSize: 0 },
+    bars: {show: true, align: 'center', barWidth: 0.5},
+    lines: { show: false },
+    points: { show: true },
+    grid: {
+        borderWidth: 1,
+        borderColor: '#ccc',
+        backgroundColor: '#fff',
+        hoverable: true,
+        autoHighlight: false,
+    }
+};
+
+var linePlotOptions = {
+    yaxis: { show: false },
+    xaxis: { show: false },
+    lines: { show: true },
+    grid: { borderWidth: 1, borderColor: '#ccc' },
+    colors: [ plotColor ]
+};
+
+var largeLinePlotOptions = {
+    xaxis: {
+        show: true,
+        tickDecimals: 0,
+    },
+    lines: { show: true },
+    grid: { borderWidth: 1, borderColor: '#ccc' },
+    colors: [ plotColor ]
+};
+
+var histogramPlotOptions = {
+    bars: {show: true, fill: 1}
+};
+
+function createPlot(container, test, useLargeLinePlots) {
+    if (test.results()[0].histogramValues) {
+        var section = $('<section><div class="histogram-plots"></div>'
+            + '<div class="histogram-plot-labels"></div>'
+            + '<span class="tooltip"></span></section>');
+        $(container).append(section);
+        attachHistogramPlots(test, section.children('.histogram-plots'));
+    }
+    else if (useLargeLinePlots) {
+        var section = $('<section><div class="large-line-plots"></div>'
+            + '<div class="large-line-plot-labels"></div>'
+            + '<span class="tooltip"></span></section>');
+        $(container).append(section);
+        attachLinePlots(test, section.children('.large-line-plots'), useLargeLinePlots);
+        attachLinePlotLabels(test, section.children('.large-line-plot-labels'));
+    } else {
+        var section = $('<section><div class="plot"></div><div class="line-plots"></div>'
+            + '<span class="tooltip"></span></section>');
+        section.children('.plot').css({'width': (100 * test.results().length + 25) + 'px', 'height': '300px'});
+        $(container).append(section);
+
+        var plotContainer = section.children('.plot');
+        var minIsZero = true;
+        attachPlot(test, plotContainer, minIsZero);
+
+        attachLinePlots(test, section.children('.line-plots'), useLargeLinePlots);
+
+        var tooltip = section.children('.tooltip');
+        plotContainer.bind('plothover', function(event, position, item) {
+            if (item) {
+                var postfix = item.series.id ? ' (' + item.series.id + ')' : '';
+                tooltip.html(item.datapoint[1].toPrecision(4) + postfix);
+                var sectionOffset = $(section).offset();
+                tooltip.css({left: item.pageX - sectionOffset.left - tooltip.outerWidth() / 2, top: item.pageY - sectionOffset.top + 10});
+                tooltip.fadeIn(200);
+            } else
+                tooltip.hide();
+        });
+        plotContainer.mouseout(function() {
+            tooltip.hide();
+        });
+        plotContainer.click(function(event) {
+            event.preventDefault();
+            minIsZero = !minIsZero;
+            attachPlot(test, plotContainer, minIsZero);
+        });
+    }
+    return section;
+}
+
+function attachLinePlots(test, container, useLargeLinePlots) {
+    var results = test.results();
+    var attachedPlot = false;
+
+    if (useLargeLinePlots) {
+        var maximum = 0;
+        for (var i = 0; i < results.length; i++) {
+            var values = results[i].values();
+            if (!values)
+                continue;
+            var local_max = Math.max.apply(Math, values);
+            if (local_max > maximum)
+                maximum = local_max;
+        }
+    }
+
+    for (var i = 0; i < results.length; i++) {
+        container.append('<div></div>');
+        var values = results[i].values();
+        if (!values)
+            continue;
+        attachedPlot = true;
+
+        if (useLargeLinePlots) {
+            var options = $.extend(true, {}, largeLinePlotOptions,
+                               {yaxis: {min: 0.0, max: maximum},
+                                xaxis: {min: 0.0, max: values.length - 1},
+                                points: {show: (values.length < 2) ? true : false}});
+        } else {
+            var options = $.extend(true, {}, linePlotOptions,
+                               {yaxis: {min: Math.min.apply(Math, values) * 0.9, max: Math.max.apply(Math, values) * 1.1},
+                                xaxis: {min: -0.5, max: values.length - 0.5},
+                                points: {show: (values.length < 2) ? true : false}});
+        }
+        $.plot(container.children().last(), [values.map(function(value, index) { return [index, value]; })], options);
+    }
+    if (!attachedPlot)
+        container.children().remove();
+}
+
+function attachHistogramPlots(test, container) {
+    var results = test.results();
+    var attachedPlot = false;
+
+    for (var i = 0; i < results.length; i++) {
+        container.append('<div></div>');
+        var histogram = results[i].histogramValues
+        if (!histogram)
+            continue;
+        attachedPlot = true;
+
+        var buckets = histogram.buckets
+        var bucket;
+        var max_count = 0;
+        for (var j = 0; j < buckets.length; j++) {
+            bucket = buckets[j];
+            max_count = Math.max(max_count, bucket.count);
+        }
+        var xmax = bucket.high * 1.1;
+        var ymax = max_count * 1.1;
+
+        var options = $.extend(true, {}, histogramPlotOptions,
+                           {yaxis: {min: 0.0, max: ymax},
+                            xaxis: {min: histogram.params.min, max: xmax}});
+        var plot = $.plot(container.children().last(), [[]], options);
+        // Flot only supports fixed with bars and our histogram's buckets are
+        // variable width, so we need to do our own bar drawing.
+        var ctx = plot.getCanvas().getContext("2d");
+        ctx.lineWidth="1";
+        ctx.fillStyle = "rgba(255, 0, 0, 0.2)";
+        ctx.strokeStyle="red";
+        for (var j = 0; j < buckets.length; j++) {
+            bucket = buckets[j];
+            var bl = plot.pointOffset({ x: bucket.low, y: 0});
+            var tr = plot.pointOffset({ x: bucket.high, y: bucket.count});
+            ctx.fillRect(bl.left, bl.top, tr.left - bl.left, tr.top - bl.top);
+            ctx.strokeRect(bl.left, bl.top, tr.left - bl.left, tr.top - bl.top);
+        }
+    }
+    if (!attachedPlot)
+        container.children().remove();
+}
+
+function attachLinePlotLabels(test, container) {
+    var results = test.results();
+    var attachedPlot = false;
+    for (var i = 0; i < results.length; i++) {
+        container.append('<div>' + results[i].run().label() + '</div>');
+    }
+}
+
+function attachPlot(test, plotContainer, minIsZero) {
+    var results = test.results();
+
+    var values = results.reduce(function(values, result, index) {
+        var newValues = result.values();
+        return newValues ? values.concat(newValues.map(function(value) { return [index, value]; })) : values;
+    }, []);
+
+    var plotData = [$.extend(true, {}, subpointsPlotOptions, {data: values})];
+    plotData.push({id: '&mu;', data: results.map(function(result, index) { return [index, result.mean()]; }), color: plotColor});
+
+    var overallMax = Statistics.max(results.map(function(result, index) { return result.max(); }));
+    var overallMin = Statistics.min(results.map(function(result, index) { return result.min(); }));
+    var margin = (overallMax - overallMin) * 0.1;
+    var currentPlotOptions = $.extend(true, {}, mainPlotOptions, {yaxis: {
+        min: minIsZero ? 0 : overallMin - margin,
+        max: minIsZero ? overallMax * 1.1 : overallMax + margin}});
+
+    currentPlotOptions.xaxis.max = results.length - 0.5;
+    currentPlotOptions.xaxis.ticks = results.map(function(result, index) { return [index, result.run().label()]; });
+
+    $.plot(plotContainer, plotData, currentPlotOptions);
+}
+
+function toFixedWidthPrecision(value) {
+    var decimal = value.toFixed(2);
+    return decimal;
+}
+
+function formatPercentage(fraction) {
+    var percentage = fraction * 100;
+    return (fraction * 100).toFixed(2) + '%';
+}
+
+function setUpSortClicks(runs)
+{
+    $('#nameColumn').click(sortByName);
+
+    $('#unitColumn').click(sortByUnit);
+
+    runs.forEach(function(run) {
+        $('#' + run.id()).click(sortByResult);
+        $('#' + run.id() + COMPARISON_SUFFIX).click(sortByReference);
+     });
+}
+
+function TestTypeSelector(tests) {
+    this.recognizers = {
+        'Time': function(test) { return !test.isMemoryTest(); },
+        'Memory': function(test) { return test.isMemoryTest(); }
+    };
+    this.testTypeNames = this.generateUsedTestTypeNames(tests);
+    // Default to selecting the first test-type name in the list.
+    this.testTypeName = this.testTypeNames[0];
+}
+
+TestTypeSelector.prototype = {
+    set testTypeName(testTypeName) {
+        this._testTypeName = testTypeName;
+        this.shouldShowTest = this.recognizers[testTypeName];
+    },
+
+    generateUsedTestTypeNames: function(allTests) {
+        var testTypeNames = [];
+
+        for (var recognizedTestName in this.recognizers) {
+            var recognizes = this.recognizers[recognizedTestName];
+            for (var testName in allTests) {
+                var test = allTests[testName];
+                if (recognizes(test)) {
+                    testTypeNames.push(recognizedTestName);
+                    break;
+                }
+            }
+        }
+
+        if (testTypeNames.length === 0) {
+            // No test types we recognize, add 'No Results' with a dummy recognizer.
+            var noResults = 'No Results';
+            this.recognizers[noResults] = function() { return false; };
+            testTypeNames.push(noResults);
+        } else if (testTypeNames.length > 1) {
+            // We have more than one test type, so add 'All' with a recognizer that always succeeds.
+            var allResults = 'All';
+            this.recognizers[allResults] = function() { return true; };
+            testTypeNames.push(allResults);
+        }
+
+        return testTypeNames;
+    },
+
+    buildButtonHTMLForUsedTestTypes: function() {
+        var selectedTestTypeName = this._testTypeName;
+        // Build spans for all recognised test names with the selected test highlighted.
+        return this.testTypeNames.map(function(testTypeName) {
+            var classAttribute = testTypeName === selectedTestTypeName ? ' class=checked' : '';
+            return '<span' + classAttribute + '>' + testTypeName + '</span>';
+        }).join('');
+    }
+};
+
+var topLevelRows;
+var allTableRows;
+
+function displayTable(tests, runs, testTypeSelector, referenceIndex, useLargeLinePlots) {
+    var resultHeaders = runs.map(function(run, index) {
+         var header = '<th id="' + run.id() + '" ' +
+                          'colspan=2 ' +
+                          'title="' + run.description() + '">' +
+                      '<span class="label" ' +
+                          'title="Edit run label">' +
+                          run.label() +
+                      '</span>' +
+                      '<div class="closeButton" ' +
+                          'title="Delete run">' +
+                          '&times;' +
+                      '</div>' +
+                  '</th>';
+                if (index !== referenceIndex) {
+                  header += '<th id="' + run.id() + COMPARISON_SUFFIX + '" ' +
+                                'title="Sort by better/worse">' +
+                                '&Delta;' +
+                                '</th>';
+                }
+         return header;
+    });
+
+    resultHeaders = resultHeaders.join('');
+
+    htmlString = '<thead>' +
+                     '<tr>' +
+                         '<th id="nameColumn">' +
+                             '<div class="openAllButton" ' +
+                                  'title="Open all rows or graphs">' +
+                                 'Open All' +
+                              '</div>' +
+                              '<div class="closeAllButton" ' +
+                                   'title="Close all rows">' +
+                                  'Close All' +
+                              '</div>' +
+                              'Test' +
+                          '</th>' +
+                          '<th id="unitColumn">' +
+                              'Unit' +
+                          '</th>' +
+                          resultHeaders +
+                     '</tr>' +
+                 '</head>' +
+                 '<tbody>' +
+                 '</tbody>';
+
+    $('#container').html(htmlString);
+
+    var testNames = [];
+    for (testName in tests)
+        testNames.push(testName);
+
+    allTableRows = [];
+    testNames.forEach(function(testName) {
+        var test = tests[testName];
+        if (testTypeSelector.shouldShowTest(test)) {
+            allTableRows.push(new TableRow(runs, test, referenceIndex, useLargeLinePlots));
+        }
+    });
+
+    // Build a list of top level rows with attached children
+    topLevelRows = [];
+    allTableRows.forEach(function(row) {
+        // Add us to top level if we are a top-level row...
+        if (row.hasNoURL) {
+            topLevelRows.push(row);
+            // Add a duplicate child row that holds the graph for the parent
+            var graphHolder = new TableRow(runs, row.test, referenceIndex, useLargeLinePlots);
+            graphHolder.isImportant = true;
+            graphHolder.URL = 'Summary';
+            graphHolder.hideRowData();
+            allTableRows.push(graphHolder);
+            row.addNestedChild(graphHolder);
+            return;
+        }
+
+        // ...or add us to our parent if we have one ...
+        for (var i = 0; i < allTableRows.length; i++) {
+            if (allTableRows[i].isParentOf(row)) {
+                allTableRows[i].addNestedChild(row);
+                return;
+            }
+        }
+
+        // ...otherwise this result is orphaned, display it at top level with a graph
+        row.hasGraph = true;
+        topLevelRows.push(row);
+    });
+
+    buildTable(topLevelRows);
+
+    $('.closeButton').click(function(event) {
+        for (var i = 0; i < runs.length; i++) {
+            if (runs[i].id() == event.target.parentNode.id) {
+                runs[i].hide();
+                undeleteManager.ondelete(runs[i].id());
+                location.reload();
+                break;
+            }
+        }
+        event.stopPropagation();
+    });
+
+    $('.closeAllButton').click(function(event) {
+        for (var i = 0; i < allTableRows.length; i++) {
+            allTableRows[i].closeRow();
+        }
+        event.stopPropagation();
+    });
+
+    $('.openAllButton').click(function(event) {
+        for (var i = 0; i < topLevelRows.length; i++) {
+            topLevelRows[i].openRow();
+        }
+        event.stopPropagation();
+    });
+
+    setUpSortClicks(runs);
+
+    $('.label').click(function(event) {
+        for (var i = 0; i < runs.length; i++) {
+            if (runs[i].id() == event.target.parentNode.id) {
+                $(event.target).replaceWith('<input id="labelEditor" type="text" value="' + runs[i].label()  + '">');
+                $('#labelEditor').focusout(function() {
+                    runs[i].setLabel(this.value);
+                    location.reload();
+                });
+                $('#labelEditor').keypress(function(event) {
+                    if (event.which == 13) {
+                        runs[i].setLabel(this.value);
+                        location.reload();
+                    }
+                });
+                $('#labelEditor').click(function(event) {
+                    event.stopPropagation();
+                });
+                $('#labelEditor').mousedown(function(event) {
+                    event.stopPropagation();
+                });
+                $('#labelEditor').select();
+                break;
+            }
+        }
+        event.stopPropagation();
+    });
+}
+
+function validForSorting(row) {
+    return ($.type(row.sortValue) === 'string') || !isNaN(row.sortValue);
+}
+
+var sortDirection = 1;
+
+function sortRows(rows) {
+    rows.sort(
+        function(rowA,rowB) {
+            if (validForSorting(rowA) !== validForSorting(rowB)) {
+                // Sort valid values upwards when compared to invalid
+                if (validForSorting(rowA)) {
+                    return -1;
+                }
+                if (validForSorting(rowB)) {
+                    return 1;
+                }
+            }
+
+            // Some rows always sort to the top
+            if (rowA.isImportant) {
+                return -1;
+            }
+            if (rowB.isImportant) {
+                return 1;
+            }
+
+            if (rowA.sortValue === rowB.sortValue) {
+                // Sort identical values by name to keep the sort stable,
+                // always keep name alphabetical (even if a & b sort values
+                // are invalid)
+                return rowA.test.name() > rowB.test.name() ? 1 : -1;
+            }
+
+            return rowA.sortValue > rowB.sortValue ? sortDirection : -sortDirection;
+         } );
+
+    // Sort the rows' children
+    rows.forEach(function(row) {
+        sortRows(row.children);
+    });
+}
+
+function buildTable(rows) {
+   rows.forEach(function(row) {
+     row.removeFromPage();
+   });
+
+   sortRows(rows);
+
+   rows.forEach(function(row) {
+     row.addToPage();
+   });
+}
+
+var activeSortHeaderElement = undefined;
+var columnSortDirection = {};
+
+function determineColumnSortDirection(element) {
+  columnDirection = columnSortDirection[element.id];
+
+  if (columnDirection === undefined) {
+    // First time we've sorted this row, default to down
+    columnSortDirection[element.id] = SORT_DOWN_CLASS;
+  } else if (element === activeSortHeaderElement) {
+    // Clicking on same header again, swap direction
+    columnSortDirection[element.id] = (columnDirection === SORT_UP_CLASS) ? SORT_DOWN_CLASS : SORT_UP_CLASS;
+  }
+}
+
+function updateSortDirection(element) {
+    // Remove old header's sort arrow
+    if (activeSortHeaderElement !== undefined) {
+        activeSortHeaderElement.classList.remove(columnSortDirection[activeSortHeaderElement.id]);
+    }
+
+    determineColumnSortDirection(element);
+
+    sortDirection = (columnSortDirection[element.id] === SORT_UP_CLASS) ? 1 : -1;
+
+    // Add new header's sort arrow
+    element.classList.add(columnSortDirection[element.id]);
+    activeSortHeaderElement = element;
+}
+
+function sortByName(event) {
+    updateSortDirection(event.toElement);
+
+    allTableRows.forEach(function(row) {
+       row.prepareToSortByName();
+    });
+
+    buildTable(topLevelRows);
+}
+
+function sortByUnit(event) {
+    updateSortDirection(event.toElement);
+
+    allTableRows.forEach(function(row) {
+        row.prepareToSortByUnit();
+    });
+
+    buildTable(topLevelRows);
+}
+
+function sortByResult(event) {
+    updateSortDirection(event.toElement);
+
+    var runId = event.target.id;
+
+    allTableRows.forEach(function(row) {
+        row.prepareToSortByTestResults(runId);
+    });
+
+    buildTable(topLevelRows);
+}
+
+function sortByReference(event) {
+    updateSortDirection(event.toElement);
+
+    // The element ID has _compare appended to allow us to set up a click event
+    // remove the _compare to return a useful Id
+    var runIdWithCompare = event.target.id;
+    var runId = runIdWithCompare.split('_')[0];
+
+    allTableRows.forEach(function(row) {
+        row.prepareToSortRelativeToReference(runId);
+    });
+
+    buildTable(topLevelRows);
+}
+
+function linearRegression(points) {
+    // Implement http://www.easycalculation.com/statistics/learn-correlation.php.
+    // x = magnitude
+    // y = iterations
+    var sumX = 0;
+    var sumY = 0;
+    var sumXSquared = 0;
+    var sumYSquared = 0;
+    var sumXTimesY = 0;
+
+    for (var i = 0; i < points.length; i++) {
+        var x = i;
+        var y = points[i];
+        sumX += x;
+        sumY += y;
+        sumXSquared += x * x;
+        sumYSquared += y * y;
+        sumXTimesY += x * y;
+    }
+
+    var r = (points.length * sumXTimesY - sumX * sumY) /
+        Math.sqrt((points.length * sumXSquared - sumX * sumX) *
+                  (points.length * sumYSquared - sumY * sumY));
+
+    if (isNaN(r) || r == Math.Infinity)
+        r = 0;
+
+    var slope = (points.length * sumXTimesY - sumX * sumY) / (points.length * sumXSquared - sumX * sumX);
+    var intercept = sumY / points.length - slope * sumX / points.length;
+    return {slope: slope, intercept: intercept, rSquared: r * r};
+}
+
+var warningSign = '<svg viewBox="0 0 100 100" style="width: 18px; height: 18px; vertical-align: bottom;" version="1.1">'
+    + '<polygon fill="red" points="50,10 90,80 10,80 50,10" stroke="red" stroke-width="10" stroke-linejoin="round" />'
+    + '<polygon fill="white" points="47,30 48,29, 50, 28.7, 52,29 53,30 50,60" stroke="white" stroke-width="10" stroke-linejoin="round" />'
+    + '<circle cx="50" cy="73" r="6" fill="white" />'
+    + '</svg>';
+
+function TableRow(runs, test, referenceIndex, useLargeLinePlots) {
+    this.runs = runs;
+    this.test = test;
+    this.referenceIndex = referenceIndex;
+    this.useLargeLinePlots = useLargeLinePlots;
+    this.children = [];
+
+    this.tableRow = $('<tr class="highlight">' +
+                            '<td class="test collapsed" >' +
+                                this.test.name() +
+                            '</td>' +
+                            '<td class="unit">' +
+                                this.test.unit() +
+                            '</td>' +
+                      '</tr>');
+
+    var runIndex = 0;
+    var results = this.test.results();
+    var referenceResult = undefined;
+
+    this.resultIndexMap = {};
+    for (var i = 0; i < results.length; i++) {
+        while (this.runs[runIndex] !== results[i].run())
+            runIndex++;
+        if (runIndex === this.referenceIndex)
+            referenceResult = results[i];
+        this.resultIndexMap[runIndex] = i;
+    }
+    for (var i = 0; i < this.runs.length; i++) {
+        var resultIndex = this.resultIndexMap[i];
+        if (resultIndex === undefined)
+            this.tableRow.append(this.markupForMissingRun(i == this.referenceIndex));
+        else
+            this.tableRow.append(this.markupForRun(results[resultIndex], referenceResult));
+    }
+
+    // Use the test name (without URL) to bind parents and their children
+    var nameAndURL = this.test.name().split('.');
+    var benchmarkName = nameAndURL.shift();
+    this.testName = nameAndURL.shift();
+    this.hasNoURL = (nameAndURL.length === 0);
+
+    if (!this.hasNoURL) {
+        // Re-join the URL
+        this.URL = nameAndURL.join('.');
+    }
+
+    this.isImportant = false;
+    this.hasGraph = false;
+    this.currentIndentationClass = ''
+    this.indentLevel = 0;
+    this.setRowNestedState(COLLAPSED);
+    this.setVisibility(VISIBLE);
+    this.prepareToSortByName();
+}
+
+TableRow.prototype.hideRowData = function() {
+    data = this.tableRow.children('td');
+
+    for (index in data) {
+        if (index > 0) {
+            // Blank out everything except the test name
+            data[index].innerHTML = '';
+        }
+    }
+}
+
+TableRow.prototype.prepareToSortByTestResults = function(runId) {
+    var testResults = this.test.results();
+    // Find the column in this row that matches the runId and prepare to
+    // sort by the mean of that test.
+    for (index in testResults) {
+        sourceId = testResults[index].run().id();
+        if (runId === sourceId) {
+            this.sortValue = testResults[index].mean();
+            return;
+        }
+    }
+    // This row doesn't have any results for the passed runId
+    this.sortValue = undefined;
+}
+
+TableRow.prototype.prepareToSortRelativeToReference = function(runId) {
+    var testResults = this.test.results();
+
+    // Get index of test results that correspond to the reference column.
+    var remappedReferenceIndex = this.resultIndexMap[this.referenceIndex];
+
+    if (remappedReferenceIndex === undefined) {
+        // This test has no results in the reference run.
+        this.sortValue = undefined;
+        return;
+    }
+
+    otherResults = testResults[remappedReferenceIndex];
+
+    // Find the column in this row that matches the runId and prepare to
+    // sort by the difference from the reference.
+    for (index in testResults) {
+        sourceId = testResults[index].run().id();
+        if (runId === sourceId) {
+            this.sortValue = testResults[index].percentDifference(otherResults);
+            if (this.test.biggerIsBetter()) {
+                // For this test bigger is not better
+                this.sortValue = -this.sortValue;
+            }
+            return;
+        }
+    }
+    // This row doesn't have any results for the passed runId
+    this.sortValue = undefined;
+}
+
+TableRow.prototype.prepareToSortByUnit = function() {
+    this.sortValue = this.test.unit().toLowerCase();
+}
+
+TableRow.prototype.prepareToSortByName = function() {
+    this.sortValue = this.test.name().toLowerCase();
+}
+
+TableRow.prototype.isParentOf = function(row) {
+    return this.hasNoURL && (this.testName === row.testName);
+}
+
+TableRow.prototype.addNestedChild = function(child) {
+    this.children.push(child);
+
+    // Indent child one step in from parent
+    child.indentLevel = this.indentLevel + INDENTATION;
+    child.hasGraph = true;
+    // Start child off as hidden (i.e. collapsed inside parent)
+    child.setVisibility(INVISIBLE);
+    child.updateIndentation();
+    // Show URL in the title column
+    child.tableRow.children()[0].innerHTML = child.URL;
+    // Set up class to change background colour of nested rows
+    if (child.isImportant) {
+        child.tableRow.addClass('importantNestedRow');
+    } else {
+        child.tableRow.addClass('nestedRow');
+    }
+}
+
+TableRow.prototype.setVisibility = function(visibility) {
+     this.visibility = visibility;
+     this.tableRow[0].style.display = (visibility === INVISIBLE) ? 'none' : '';
+}
+
+TableRow.prototype.setRowNestedState = function(newState) {
+    this.rowState = newState;
+    this.updateIndentation();
+}
+
+TableRow.prototype.updateIndentation = function() {
+    var element = this.tableRow.children('td').first();
+
+    element.removeClass(this.currentIndentationClass);
+
+    this.currentIndentationClass = (this.rowState === COLLAPSED) ? 'collapsed' : 'expanded';
+
+    element[0].style.marginLeft = this.indentLevel.toString() + 'px';
+    element[0].style.float = 'left';
+
+    element.addClass(this.currentIndentationClass);
+}
+
+TableRow.prototype.addToPage = function() {
+    $('#container').children('tbody').last().append(this.tableRow);
+
+    // Set up click callback
+    var owningObject = this;
+    this.tableRow.click(function(event) {
+        event.preventDefault();
+        owningObject.toggle();
+    });
+
+    // Add children to the page too
+    this.children.forEach(function(child) {
+        child.addToPage();
+    });
+}
+
+TableRow.prototype.removeFromPage = function() {
+    // Remove children
+    this.children.forEach(function(child) {
+        child.removeFromPage();
+    });
+    // Remove us
+    this.tableRow.remove();
+}
+
+
+TableRow.prototype.markupForRun = function(result, referenceResult) {
+    var comparisonCell = '';
+    var shouldCompare = result !== referenceResult;
+    if (shouldCompare) {
+        var comparisonText = '';
+        var className = '';
+
+        if (referenceResult) {
+            var percentDifference = referenceResult.percentDifference(result);
+            if (isNaN(percentDifference)) {
+                comparisonText = 'Unknown';
+                className = UNKNOWN_CLASS;
+            } else if (Math.abs(percentDifference) < SMALLEST_PERCENT_DISPLAYED) {
+                comparisonText = 'Equal';
+                // Show equal values in green
+                className = BETTER_CLASS;
+            } else {
+                var better = this.test.biggerIsBetter() ? percentDifference > 0 : percentDifference < 0;
+                comparisonText = formatPercentage(Math.abs(percentDifference)) + (better ? ' Better' : ' Worse');
+                className = better ? BETTER_CLASS : WORSE_CLASS;
+            }
+
+            if (!referenceResult.isStatisticallySignificant(result)) {
+                // Put result in brackets and fade if not statistically significant
+                className += ' fadeOut';
+                comparisonText = '(' + comparisonText + ')';
+            }
+        }
+        comparisonCell = '<td class="comparison ' + className + '">' + comparisonText + '</td>';
+    }
+
+    var values = result.values();
+    var warning = '';
+    var regressionAnalysis = '';
+    if (result.histogramValues) {
+        // Don't calculate regression result for histograms.
+    } else if (values && values.length > 3) {
+        regressionResult = linearRegression(values);
+        regressionAnalysis = 'slope=' + toFixedWidthPrecision(regressionResult.slope)
+            + ', R^2=' + toFixedWidthPrecision(regressionResult.rSquared);
+        if (regressionResult.rSquared > 0.6 && Math.abs(regressionResult.slope) > 0.01) {
+            warning = ' <span class="regression-warning" title="Detected a time dependency with ' + regressionAnalysis + '">' + warningSign + ' </span>';
+        }
+    }
+
+    var referenceClass = shouldCompare ? '' : 'reference';
+
+    var statistics = '&sigma;=' + toFixedWidthPrecision(result.confidenceIntervalDelta()) + ', min=' + toFixedWidthPrecision(result.min())
+     + ', max=' + toFixedWidthPrecision(result.max()) + '\n' + regressionAnalysis;
+
+    var confidence;
+    if (isNaN(result.confidenceIntervalDeltaRatio())) {
+        // Don't bother showing +- Nan as it is meaningless
+        confidence = '';
+    } else {
+        confidence = '&plusmn; ' + formatPercentage(result.confidenceIntervalDeltaRatio());
+    }
+
+    return '<td class="result ' + referenceClass + '" title="' + statistics + '">' + toFixedWidthPrecision(result.mean())
+        + '</td><td class="confidenceIntervalDelta ' + referenceClass + '" title="' + statistics + '">' + confidence + warning + '</td>' + comparisonCell;
+}
+
+TableRow.prototype.markupForMissingRun = function(isReference) {
+    if (isReference) {
+        return '<td colspan=2 class="missingReference">Missing</td>';
+    }
+    return '<td colspan=3 class="missing">Missing</td>';
+}
+
+TableRow.prototype.openRow = function() {
+    if (this.rowState === EXPANDED) {
+        // If we're already expanded, open our children instead
+        this.children.forEach(function(child) {
+            child.openRow();
+        });
+        return;
+    }
+
+    this.setRowNestedState(EXPANDED);
+
+    if (this.hasGraph) {
+        var firstCell = this.tableRow.children('td').first();
+        var plot = createPlot(firstCell, this.test, this.useLargeLinePlots);
+        plot.css({'position': 'absolute', 'z-index': 2});
+        var offset = this.tableRow.offset();
+        offset.left += GRAPH_INDENT;
+        offset.top += this.tableRow.outerHeight();
+        plot.offset(offset);
+        this.tableRow.children('td').css({'padding-bottom': plot.outerHeight() + PADDING_UNDER_GRAPH});
+    }
+
+    this.children.forEach(function(child) {
+        child.setVisibility(VISIBLE);
+    });
+
+    if (this.children.length === 1) {
+        // If we only have a single child...
+        var child = this.children[0];
+        if (child.isImportant) {
+          // ... and it is important (i.e. the summary row) just open it when
+          // parent is opened to save needless clicking
+          child.openRow();
+        }
+    }
+}
+
+TableRow.prototype.closeRow = function() {
+    if (this.rowState === COLLAPSED) {
+        return;
+    }
+
+    this.setRowNestedState(COLLAPSED);
+
+    if (this.hasGraph) {
+        var firstCell = this.tableRow.children('td').first();
+        firstCell.children('section').remove();
+        this.tableRow.children('td').css({'padding-bottom': ''});
+    }
+
+    this.children.forEach(function(child) {
+        // Make children invisible, but leave their collapsed status alone
+        child.setVisibility(INVISIBLE);
+    });
+}
+
+TableRow.prototype.toggle = function() {
+    if (this.rowState === EXPANDED) {
+        this.closeRow();
+    } else {
+        this.openRow();
+    }
+    return false;
+}
+
+function init() {
+    var runs = [];
+    var metrics = {};
+    var deletedRunsById = {};
+    $.each(JSON.parse(document.getElementById('results-json').textContent), function(index, entry) {
+        var run = new TestRun(entry);
+        if (run.isHidden()) {
+            deletedRunsById[run.id()] = run;
+            return;
+        }
+
+        runs.push(run);
+
+        function addTests(tests) {
+            for (var testName in tests) {
+                var rawMetrics = tests[testName].metrics;
+
+                for (var metricName in rawMetrics) {
+                    var fullMetricName = testName + ':' + metricName;
+                    var metric = metrics[fullMetricName];
+                    if (!metric) {
+                        metric = new PerfTestMetric(testName, metricName, rawMetrics[metricName].units, rawMetrics[metricName].important);
+                        metrics[fullMetricName] = metric;
+                    }
+                    // std & degrees_of_freedom could be undefined
+                    metric.addResult(
+                        new TestResult(metric, rawMetrics[metricName].current,
+                            run, rawMetrics[metricName]['std'], rawMetrics[metricName]['degrees_of_freedom']));
+                }
+            }
+        }
+
+        addTests(entry.tests);
+    });
+
+    var useLargeLinePlots = false;
+    var referenceIndex = 0;
+
+    var testTypeSelector = new TestTypeSelector(metrics);
+    var buttonHTML = testTypeSelector.buildButtonHTMLForUsedTestTypes();
+    $('#time-memory').append(buttonHTML);
+
+    $('#scatter-line').bind('change', function(event, checkedElement) {
+        useLargeLinePlots = checkedElement.textContent == 'Line';
+        displayTable(metrics, runs, testTypeSelector, referenceIndex, useLargeLinePlots);
+    });
+
+    runs.map(function(run, index) {
+        $('#reference').append('<span value="' + index + '"' + (index == referenceIndex ? ' class="checked"' : '') + ' title="' + run.description() + '">' + run.label() + '</span>');
+    })
+
+    $('#time-memory').bind('change', function(event, checkedElement) {
+        testTypeSelector.testTypeName = checkedElement.textContent;
+        displayTable(metrics, runs, testTypeSelector, referenceIndex, useLargeLinePlots);
+    });
+
+    $('#reference').bind('change', function(event, checkedElement) {
+        referenceIndex = parseInt(checkedElement.getAttribute('value'));
+        displayTable(metrics, runs, testTypeSelector, referenceIndex, useLargeLinePlots);
+    });
+
+    displayTable(metrics, runs, testTypeSelector, referenceIndex, useLargeLinePlots);
+
+    $('.checkbox').each(function(index, checkbox) {
+        $(checkbox).children('span').click(function(event) {
+            if ($(this).hasClass('checked'))
+                return;
+            $(checkbox).children('span').removeClass('checked');
+            $(this).addClass('checked');
+            $(checkbox).trigger('change', $(this));
+        });
+    });
+
+    runToUndelete = deletedRunsById[undeleteManager.mostRecentlyDeletedId()];
+
+    if (runToUndelete) {
+        $('#undelete').html('Undelete ' + runToUndelete.label());
+        $('#undelete').attr('title', runToUndelete.description());
+        $('#undelete').click(function(event) {
+            runToUndelete.show();
+            undeleteManager.undeleteMostRecent();
+            location.reload();
+        });
+    } else {
+        $('#undelete').hide();
+    }
+}
+
+</script>
+<script id="results-json" type="application/json">%json_results%</script>
+<script id="units-json" type="application/json">%json_units%</script>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry.gyp b/catapult/telemetry/telemetry.gyp
new file mode 100644
index 0000000..02ecd8d
--- /dev/null
+++ b/catapult/telemetry/telemetry.gyp
@@ -0,0 +1,16 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'bitmaptools',
+      'type': 'executable',
+      'sources': [
+        'telemetry/internal/image_processing/bitmaptools.cc',
+      ],
+      'toolsets': ['host'],
+    },
+  ],
+}
diff --git a/catapult/telemetry/telemetry.isolate b/catapult/telemetry/telemetry.isolate
new file mode 100644
index 0000000..829efec
--- /dev/null
+++ b/catapult/telemetry/telemetry.isolate
@@ -0,0 +1,19 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'conditions': [
+    ['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
+      'variables': {
+        'files': [
+          # For now, just include the whole catapult directory.
+          # TODO(nednguyen, aiolos): only include what telemetry needs.
+          # https://github.com/catapult-project/catapult/issues/1953
+          '../',
+          # For Telemetry's screenshot support.
+          '<(PRODUCT_DIR)/bitmaptools<(EXECUTABLE_SUFFIX)',
+        ],
+      },
+    }],
+  ]
+}
diff --git a/catapult/telemetry/telemetry/__init__.py b/catapult/telemetry/telemetry/__init__.py
new file mode 100644
index 0000000..c0636ed
--- /dev/null
+++ b/catapult/telemetry/telemetry/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A library for cross-platform browser tests."""
+import os
+import sys
+
+
+# Ensure Python >= 2.7.
+if sys.version_info < (2, 7):
+  print >> sys.stderr, 'Need Python 2.7 or greater.'
+  sys.exit(-1)
+
+
+def _JoinPath(*path_parts):
+  return os.path.abspath(os.path.join(*path_parts))
+
+
+def _AddDirToPythonPath(*path_parts):
+  path = _JoinPath(*path_parts)
+  if os.path.isdir(path) and path not in sys.path:
+    # Some call sites that use Telemetry assume that sys.path[0] is the
+    # directory containing the script, so we add these extra paths to right
+    # after sys.path[0].
+    sys.path.insert(1, path)
+
+
+# Add Catapult dependencies to our path.
+# util depends on catapult_base, so we can't use it to get the catapult dir.
+_CATAPULT_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..')
+_AddDirToPythonPath(_CATAPULT_DIR, 'catapult_base')
+_AddDirToPythonPath(_CATAPULT_DIR, 'dependency_manager')
+_AddDirToPythonPath(_CATAPULT_DIR, 'devil')
+_AddDirToPythonPath(_CATAPULT_DIR, 'tracing')
+_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_trace_event')
+
+
+from telemetry.core import util
+from telemetry.internal.util import global_hooks
+
+# Add Catapult third party dependencies into our path.
+_AddDirToPythonPath(util.GetCatapultThirdPartyDir(), 'typ')
+
+# Add Telemetry third party dependencies into our path.
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'altgraph')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mock')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'modulegraph')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mox3')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'pexpect')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'png')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'pyfakefs')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'pyserial')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'webpagereplay')
+_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'websocket-client')
+
+# Install Telemtry global hooks.
+global_hooks.InstallHooks()
diff --git a/catapult/telemetry/telemetry/android/__init__.py b/catapult/telemetry/telemetry/android/__init__.py
new file mode 100644
index 0000000..6bd5e3a
--- /dev/null
+++ b/catapult/telemetry/telemetry/android/__init__.py
@@ -0,0 +1,6 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.android.android_story import AndroidStory
+from telemetry.android.shared_android_state import SharedAndroidState
diff --git a/catapult/telemetry/telemetry/android/android_story.py b/catapult/telemetry/telemetry/android/android_story.py
new file mode 100644
index 0000000..20f7e93
--- /dev/null
+++ b/catapult/telemetry/telemetry/android/android_story.py
@@ -0,0 +1,28 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.android import shared_android_state
+from telemetry import story
+
+class AndroidStory(story.Story):
+  def __init__(self, start_intent, is_app_ready_predicate=None,
+               name='', labels=None, is_local=False):
+    """Creates a new story for Android app.
+
+    Args:
+      start_intent: See AndroidPlatform.LaunchAndroidApplication.
+      is_app_ready_predicate: See AndroidPlatform.LaunchAndroidApplication.
+      name: See Story.__init__.
+      labels: See Story.__init__.
+      is_app_ready_predicate: See Story.__init__.
+    """
+    super(AndroidStory, self).__init__(
+        shared_android_state.SharedAndroidState, name=name, labels=labels,
+        is_local=is_local)
+    self.start_intent = start_intent
+    self.is_app_ready_predicate = is_app_ready_predicate
+
+  def Run(self, shared_state):
+    """Execute the interactions with the applications."""
+    raise NotImplementedError
diff --git a/catapult/telemetry/telemetry/android/shared_android_state.py b/catapult/telemetry/telemetry/android/shared_android_state.py
new file mode 100644
index 0000000..bb813bd
--- /dev/null
+++ b/catapult/telemetry/telemetry/android/shared_android_state.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.core import android_platform
+from telemetry.core import platform
+from telemetry.internal.platform import android_device
+from telemetry import story as story_module
+from telemetry.web_perf import timeline_based_measurement
+
+
+class SharedAndroidState(story_module.SharedState):
+  """Manage test state/transitions across multiple android.AndroidStory's.
+
+  WARNING: the class is not ready for public consumption.
+  Email telemetry@chromium.org if you feel like you must use it.
+  """
+
+  def __init__(self, test, finder_options, story_set):
+    """This method is styled on unittest.TestCase.setUpClass.
+
+    Args:
+      test: a web_perf.TimelineBasedMeasurement instance.
+      options: a BrowserFinderOptions instance with command line options.
+      story_set: a story.StorySet instance.
+    """
+    super(SharedAndroidState, self).__init__(test, finder_options, story_set)
+    if not isinstance(
+        test, timeline_based_measurement.TimelineBasedMeasurement):
+      raise ValueError(
+          'SharedAndroidState only accepts TimelineBasedMeasurement tests'
+          ' (not %s).' % test.__class__)
+    self._test = test
+    self._finder_options = finder_options
+    self._android_app = None
+    self._current_story = None
+    device = android_device.GetDevice(finder_options)
+    assert device, 'Android device required.'
+    self._android_platform = platform.GetPlatformForDevice(
+        device, finder_options)
+    assert self._android_platform, 'Unable to create android platform.'
+    assert isinstance(
+        self._android_platform, android_platform.AndroidPlatform)
+
+  @property
+  def app(self):
+    return self._android_app
+
+  @property
+  def platform(self):
+    return self._android_platform
+
+  def WillRunStory(self, story):
+    assert not self._android_app
+    self._current_story = story
+    self._android_app = self._android_platform.LaunchAndroidApplication(
+        story.start_intent, story.is_app_ready_predicate)
+    self._test.WillRunStory(self._android_platform.tracing_controller)
+
+  def CanRunStory(self, story):
+    """This does not apply to android app stories."""
+    return True
+
+  def RunStory(self, results):
+    self._current_story.Run(self)
+    self._test.Measure(self._android_platform.tracing_controller, results)
+
+  def DidRunStory(self, results):
+    self._test.DidRunStory(self._android_platform.tracing_controller)
+    if self._android_app:
+      self._android_app.Close()
+      self._android_app = None
+
+  def TearDownState(self):
+    """Tear down anything created in the __init__ method that is not needed.
+
+    Currently, there is no clean-up needed from SharedAndroidState.__init__.
+    """
+    pass
diff --git a/catapult/telemetry/telemetry/benchmark.py b/catapult/telemetry/telemetry/benchmark.py
new file mode 100644
index 0000000..44511e7
--- /dev/null
+++ b/catapult/telemetry/telemetry/benchmark.py
@@ -0,0 +1,251 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+
+from telemetry import decorators
+from telemetry.internal import story_runner
+from telemetry.internal.util import command_line
+from telemetry.page import page_test
+from telemetry.web_perf import timeline_based_measurement
+
+Disabled = decorators.Disabled
+Enabled = decorators.Enabled
+
+
+class InvalidOptionsError(Exception):
+  """Raised for invalid benchmark options."""
+  pass
+
+
+class BenchmarkMetadata(object):
+  def __init__(self, name, description='', rerun_options=None):
+    self._name = name
+    self._description = description
+    self._rerun_options = rerun_options
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def description(self):
+    return self._description
+
+  @property
+  def rerun_options(self):
+    return self._rerun_options
+
+  def AsDict(self):
+    return {
+      'type': 'telemetry_benchmark',
+      'name': self._name,
+      'description': self._description,
+      'rerun_options': self._rerun_options,
+    }
+
+
+class Benchmark(command_line.Command):
+  """Base class for a Telemetry benchmark.
+
+  A benchmark packages a measurement and a PageSet together.
+  Benchmarks default to using TBM unless you override the value of
+  Benchmark.test, or override the CreatePageTest method.
+
+  New benchmarks should override CreateStorySet.
+  """
+  options = {}
+  page_set = None
+  test = timeline_based_measurement.TimelineBasedMeasurement
+
+  def __init__(self, max_failures=None):
+    """Creates a new Benchmark.
+
+    Args:
+      max_failures: The number of story run's failures before bailing
+          from executing subsequent page runs. If None, we never bail.
+    """
+    self._max_failures = max_failures
+    self._has_original_tbm_options = (
+        self.CreateTimelineBasedMeasurementOptions.__func__ ==
+        Benchmark.CreateTimelineBasedMeasurementOptions.__func__)
+    has_original_create_page_test = (
+        self.CreatePageTest.__func__ == Benchmark.CreatePageTest.__func__)
+    assert self._has_original_tbm_options or has_original_create_page_test, (
+        'Cannot override both CreatePageTest and '
+        'CreateTimelineBasedMeasurementOptions.')
+
+  # pylint: disable=unused-argument
+  @classmethod
+  def ShouldDisable(cls, possible_browser):
+    """Override this method to disable a benchmark under specific conditions.
+
+     Supports logic too complex for simple Enabled and Disabled decorators.
+     Decorators are still respected in cases where this function returns False.
+     """
+    return False
+
+  def Run(self, finder_options):
+    """Do not override this method."""
+    return story_runner.RunBenchmark(self, finder_options)
+
+  @property
+  def max_failures(self):
+    return self._max_failures
+
+  @classmethod
+  def Name(cls):
+    return '%s.%s' % (cls.__module__.split('.')[-1], cls.__name__)
+
+  @classmethod
+  def ShouldTearDownStateAfterEachStoryRun(cls):
+    """Override this method to tear down state after each story run.
+
+    Tearing down all states after each story run, e.g., clearing profiles,
+    stopping the browser, stopping local server, etc. So the browser will not be
+    reused among multiple stories. This is particularly useful to get the
+    startup part of launching the browser in each story.
+
+    This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but
+    not by PageTest based benchmarks.
+    """
+    return False
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    group = optparse.OptionGroup(parser, '%s test options' % cls.Name())
+    cls.AddBenchmarkCommandLineArgs(group)
+
+    if cls.HasTraceRerunDebugOption():
+      group.add_option(
+          '--rerun-with-debug-trace',
+          action='store_true',
+          help='Rerun option that enables more extensive tracing.')
+
+    if group.option_list:
+      parser.add_option_group(group)
+
+  @classmethod
+  def AddBenchmarkCommandLineArgs(cls, group):
+    del group  # unused
+
+  @classmethod
+  def HasTraceRerunDebugOption(cls):
+    return False
+
+  def GetTraceRerunCommands(self):
+    if self.HasTraceRerunDebugOption():
+      return [['Debug Trace', '--rerun-with-debug-trace']]
+    return []
+
+  def SetupTraceRerunOptions(self, browser_options, tbm_options):
+    if self.HasTraceRerunDebugOption():
+      if browser_options.rerun_with_debug_trace:
+        self.SetupBenchmarkDebugTraceRerunOptions(tbm_options)
+      else:
+        self.SetupBenchmarkDefaultTraceRerunOptions(tbm_options)
+
+  def SetupBenchmarkDefaultTraceRerunOptions(self, tbm_options):
+    """Setup tracing categories associated with default trace option."""
+
+  def SetupBenchmarkDebugTraceRerunOptions(self, tbm_options):
+    """Setup tracing categories associated with debug trace option."""
+
+  @classmethod
+  def SetArgumentDefaults(cls, parser):
+    default_values = parser.get_default_values()
+    invalid_options = [
+        o for o in cls.options if not hasattr(default_values, o)]
+    if invalid_options:
+      raise InvalidOptionsError('Invalid benchmark options: %s',
+                                ', '.join(invalid_options))
+    parser.set_defaults(**cls.options)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    pass
+
+  # pylint: disable=unused-argument
+  @classmethod
+  def ValueCanBeAddedPredicate(cls, value, is_first_result):
+    """Returns whether |value| can be added to the test results.
+
+    Override this method to customize the logic of adding values to test
+    results.
+
+    Args:
+      value: a value.Value instance (except failure.FailureValue,
+        skip.SkipValue or trace.TraceValue which will always be added).
+      is_first_result: True if |value| is the first result for its
+          corresponding story.
+
+    Returns:
+      True if |value| should be added to the test results.
+      Otherwise, it returns False.
+    """
+    return True
+
+  def CustomizeBrowserOptions(self, options):
+    """Add browser options that are required by this benchmark."""
+
+  def GetMetadata(self):
+    return BenchmarkMetadata(
+        self.Name(), self.__doc__, self.GetTraceRerunCommands())
+
+  def CreateTimelineBasedMeasurementOptions(self):
+    """Return the TimelineBasedMeasurementOptions for this Benchmark.
+
+    Override this method to configure a TimelineBasedMeasurement benchmark.
+    Otherwise, override CreatePageTest for PageTest tests. Do not override
+    both methods.
+    """
+    return timeline_based_measurement.Options()
+
+  def CreatePageTest(self, options):  # pylint: disable=unused-argument
+    """Return the PageTest for this Benchmark.
+
+    Override this method for PageTest tests.
+    Override, override CreateTimelineBasedMeasurementOptions to configure
+    TimelineBasedMeasurement tests. Do not override both methods.
+
+    Args:
+      options: a browser_options.BrowserFinderOptions instance
+    Returns:
+      |test()| if |test| is a PageTest class.
+      Otherwise, a TimelineBasedMeasurement instance.
+    """
+    is_page_test = issubclass(self.test, page_test.PageTest)
+    is_tbm = self.test == timeline_based_measurement.TimelineBasedMeasurement
+    if not is_page_test and not is_tbm:
+      raise TypeError('"%s" is not a PageTest or a TimelineBasedMeasurement.' %
+                      self.test.__name__)
+    if is_page_test:
+      assert self._has_original_tbm_options, (
+          'Cannot override CreateTimelineBasedMeasurementOptions '
+          'with a PageTest.')
+      return self.test()  # pylint: disable=no-value-for-parameter
+
+    opts = self.CreateTimelineBasedMeasurementOptions()
+    self.SetupTraceRerunOptions(options, opts)
+    return timeline_based_measurement.TimelineBasedMeasurement(opts)
+
+  def CreateStorySet(self, options):
+    """Creates the instance of StorySet used to run the benchmark.
+
+    Can be overridden by subclasses.
+    """
+    del options  # unused
+    # TODO(aiolos, nednguyen, eakufner): replace class attribute page_set with
+    # story_set.
+    if not self.page_set:
+      raise NotImplementedError('This test has no "page_set" attribute.')
+    return self.page_set()  # pylint: disable=not-callable
+
+
+def AddCommandLineArgs(parser):
+  story_runner.AddCommandLineArgs(parser)
+
+
+def ProcessCommandLineArgs(parser, args):
+  story_runner.ProcessCommandLineArgs(parser, args)
diff --git a/catapult/telemetry/telemetry/benchmark_run_unittest.py b/catapult/telemetry/telemetry/benchmark_run_unittest.py
new file mode 100644
index 0000000..215ee5d
--- /dev/null
+++ b/catapult/telemetry/telemetry/benchmark_run_unittest.py
@@ -0,0 +1,114 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import benchmark as benchmark_module
+from telemetry import page as page_module
+from telemetry.page import page_test
+from telemetry import story as story_module
+from telemetry.testing import fakes
+import mock
+
+
+# pylint: disable=abstract-method
+class DummyPageTest(page_test.PageTest):
+  def __init__(self):
+    super(DummyPageTest, self).__init__()
+    # Without disabling the above warning, this complains that
+    # ValidateAndMeasurePage is abstract; but defining it complains
+    # that its definition is overridden here.
+    self.ValidateAndMeasurePage = mock.Mock()
+
+
+# More end-to-end tests of Benchmark, shared_page_state and associated
+# classes using telemetry.testing.fakes, to avoid needing to construct
+# a real browser instance.
+
+class FakePage(page_module.Page):
+  def __init__(self, page_set):
+    super(FakePage, self).__init__(
+      url='http://nonexistentserver.com/nonexistentpage.html',
+      page_set=page_set,
+      shared_page_state_class=fakes.FakeSharedPageState)
+    self.RunNavigateSteps = mock.Mock()
+    self.RunPageInteractions = mock.Mock()
+
+class FakeBenchmark(benchmark_module.Benchmark):
+  def __init__(self, max_failures=None):
+    super(FakeBenchmark, self).__init__(max_failures)
+    self._fake_pages = []
+    self._fake_story_set = story_module.StorySet()
+    self._created_story_set = False
+    self.validator = DummyPageTest()
+
+  def CreatePageTest(self, options):
+    return self.validator
+
+  def GetFakeStorySet(self):
+    return self._fake_story_set
+
+  def AddFakePage(self, page):
+    if self._created_story_set:
+      raise Exception('Can not add any more fake pages')
+    self._fake_pages.append(page)
+
+  def CreateStorySet(self, options):
+    if self._created_story_set:
+      raise Exception('Can only create the story set once per FakeBenchmark')
+    for page in self._fake_pages:
+      self._fake_story_set.AddStory(page)
+    self._created_story_set = True
+    return self._fake_story_set
+
+
+class FailingPage(FakePage):
+  def __init__(self, page_set):
+    super(FailingPage, self).__init__(page_set)
+    self.RunNavigateSteps.side_effect = Exception('Deliberate exception')
+
+
+class BenchmarkRunTest(unittest.TestCase):
+  def setupBenchmark(self):
+    finder_options = fakes.CreateBrowserFinderOptions()
+    finder_options.browser_options.platform = fakes.FakeLinuxPlatform()
+    finder_options.output_formats = ['none']
+    finder_options.suppress_gtest_report = True
+    finder_options.output_dir = None
+    finder_options.upload_bucket = 'public'
+    finder_options.upload_results = False
+    benchmarkclass = FakeBenchmark
+    parser = finder_options.CreateParser()
+    benchmark_module.AddCommandLineArgs(parser)
+    benchmarkclass.AddCommandLineArgs(parser)
+    options, _ = parser.parse_args([])
+    benchmark_module.ProcessCommandLineArgs(parser, options)
+    benchmarkclass.ProcessCommandLineArgs(parser, options)
+    benchmark = benchmarkclass()
+    return benchmark, finder_options
+
+  def testPassingPage(self):
+    benchmark, finder_options = self.setupBenchmark()
+    manager = mock.Mock()
+    page = FakePage(benchmark.GetFakeStorySet())
+    page.RunNavigateSteps = manager.page.RunNavigateSteps
+    page.RunPageInteractions = manager.page.RunPageInteractions
+    benchmark.validator.ValidateAndMeasurePage = (
+      manager.validator.ValidateAndMeasurePage)
+    benchmark.AddFakePage(page)
+    self.assertEqual(benchmark.Run(finder_options), 0,
+                     'Test should run with no errors')
+    expected = [mock.call.page.RunNavigateSteps(mock.ANY),
+                mock.call.page.RunPageInteractions(mock.ANY),
+                mock.call.validator.ValidateAndMeasurePage(
+                  page, mock.ANY, mock.ANY)]
+    self.assertTrue(manager.mock_calls == expected)
+
+
+  def testFailingPage(self):
+    benchmark, finder_options = self.setupBenchmark()
+    page = FailingPage(benchmark.GetFakeStorySet())
+    benchmark.AddFakePage(page)
+    self.assertNotEqual(benchmark.Run(finder_options), 0, 'Test should fail')
+    self.assertFalse(page.RunPageInteractions.called)
diff --git a/catapult/telemetry/telemetry/benchmark_runner.py b/catapult/telemetry/telemetry/benchmark_runner.py
new file mode 100644
index 0000000..641359d
--- /dev/null
+++ b/catapult/telemetry/telemetry/benchmark_runner.py
@@ -0,0 +1,419 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Parses the command line, discovers the appropriate benchmarks, and runs them.
+
+Handles benchmark configuration, but all the logic for
+actually running the benchmark is in Benchmark and PageRunner."""
+
+import argparse
+import hashlib
+import json
+import logging
+import os
+import sys
+
+
+# We need to set logging format here to make sure that any other modules
+# imported by telemetry doesn't set the logging format before this, which will
+# make this a no-op call.
+# (See: https://docs.python.org/2/library/logging.html#logging.basicConfig)
+logging.basicConfig(
+    format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
+           '%(message)s')
+
+
+from telemetry import benchmark
+from telemetry.core import discover
+from telemetry import decorators
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import browser_options
+from telemetry.internal.util import binary_manager
+from telemetry.internal.util import command_line
+from telemetry.internal.util import ps_util
+from telemetry.util import matching
+from telemetry import project_config
+
+
+# TODO(aiolos): Remove this once clients move over to project_config version.
+ProjectConfig = project_config.ProjectConfig
+
+
+def _IsBenchmarkEnabled(benchmark_class, possible_browser):
+  return (issubclass(benchmark_class, benchmark.Benchmark) and
+          not benchmark_class.ShouldDisable(possible_browser) and
+          decorators.IsEnabled(benchmark_class, possible_browser)[0])
+
+
+def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
+  """ Print benchmarks that are not filtered in the same order of benchmarks in
+  the |benchmarks| list.
+
+  Args:
+    benchmarks: the list of benchmarks to be printed (in the same order of the
+      list).
+    possible_browser: the possible_browser instance that's used for checking
+      which benchmarks are enabled.
+    output_pipe: the stream in which benchmarks are printed on.
+  """
+  if not benchmarks:
+    print >> output_pipe, 'No benchmarks found!'
+    return
+  b = None  # Need this to stop pylint from complaining undefined variable.
+  if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
+    assert False, '|benchmarks| param contains non benchmark class: %s' % b
+
+  # Align the benchmark names to the longest one.
+  format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
+  disabled_benchmarks = []
+
+  print >> output_pipe, 'Available benchmarks %sare:' % (
+      'for %s ' % possible_browser.browser_type if possible_browser else '')
+
+  # Sort the benchmarks by benchmark name.
+  benchmarks = sorted(benchmarks, key=lambda b: b.Name())
+  for b in benchmarks:
+    if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
+      print >> output_pipe, format_string % (b.Name(), b.Description())
+    else:
+      disabled_benchmarks.append(b)
+
+  if disabled_benchmarks:
+    print >> output_pipe, (
+        '\nDisabled benchmarks for %s are (force run with -d):' %
+        possible_browser.browser_type)
+    for b in disabled_benchmarks:
+      print >> output_pipe, format_string % (b.Name(), b.Description())
+  print >> output_pipe, (
+      'Pass --browser to list benchmarks for another browser.\n')
+
+
+class Help(command_line.OptparseCommand):
+  """Display help information about a command"""
+
+  usage = '[command]'
+
+  def __init__(self, commands):
+    self._all_commands = commands
+
+  def Run(self, args):
+    if len(args.positional_args) == 1:
+      commands = _MatchingCommands(args.positional_args[0], self._all_commands)
+      if len(commands) == 1:
+        command = commands[0]
+        parser = command.CreateParser()
+        command.AddCommandLineArgs(parser, None)
+        parser.print_help()
+        return 0
+
+    print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
+    print >> sys.stderr, 'Available commands are:'
+    for command in self._all_commands:
+      print >> sys.stderr, '  %-10s %s' % (
+          command.Name(), command.Description())
+    print >> sys.stderr, ('"%s help <command>" to see usage information '
+                          'for a specific command.' % _ScriptName())
+    return 0
+
+
+class List(command_line.OptparseCommand):
+  """Lists the available benchmarks"""
+
+  usage = '[benchmark_name] [<options>]'
+
+  @classmethod
+  def CreateParser(cls):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
+    return parser
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser, _):
+    parser.add_option('-j', '--json-output-file', type='string')
+    parser.add_option('-n', '--num-shards', type='int', default=1)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args, environment):
+    if not args.positional_args:
+      args.benchmarks = _Benchmarks(environment)
+    elif len(args.positional_args) == 1:
+      args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
+                                            environment, exact_matches=False)
+    else:
+      parser.error('Must provide at most one benchmark name.')
+
+  def Run(self, args):
+    possible_browser = browser_finder.FindBrowser(args)
+    if args.browser_type in (
+        'release', 'release_x64', 'debug', 'debug_x64', 'canary',
+        'android-chromium', 'android-chrome'):
+      args.browser_type = 'reference'
+      possible_reference_browser = browser_finder.FindBrowser(args)
+    else:
+      possible_reference_browser = None
+    if args.json_output_file:
+      with open(args.json_output_file, 'w') as f:
+        f.write(_GetJsonBenchmarkList(possible_browser,
+                                      possible_reference_browser,
+                                      args.benchmarks, args.num_shards))
+    else:
+      PrintBenchmarkList(args.benchmarks, possible_browser)
+    return 0
+
+
+class Run(command_line.OptparseCommand):
+  """Run one or more benchmarks (default)"""
+
+  usage = 'benchmark_name [page_set] [<options>]'
+
+  @classmethod
+  def CreateParser(cls):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
+    return parser
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser, environment):
+    benchmark.AddCommandLineArgs(parser)
+
+    # Allow benchmarks to add their own command line options.
+    matching_benchmarks = []
+    for arg in sys.argv[1:]:
+      matching_benchmarks += _MatchBenchmarkName(arg, environment)
+
+    if matching_benchmarks:
+      # TODO(dtu): After move to argparse, add command-line args for all
+      # benchmarks to subparser. Using subparsers will avoid duplicate
+      # arguments.
+      matching_benchmark = matching_benchmarks.pop()
+      matching_benchmark.AddCommandLineArgs(parser)
+      # The benchmark's options override the defaults!
+      matching_benchmark.SetArgumentDefaults(parser)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args, environment):
+    all_benchmarks = _Benchmarks(environment)
+    if not args.positional_args:
+      possible_browser = (
+          browser_finder.FindBrowser(args) if args.browser_type else None)
+      PrintBenchmarkList(all_benchmarks, possible_browser)
+      sys.exit(-1)
+
+    input_benchmark_name = args.positional_args[0]
+    matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
+    if not matching_benchmarks:
+      print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
+      print >> sys.stderr
+      most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
+          all_benchmarks, input_benchmark_name, lambda x: x.Name())
+      if most_likely_matched_benchmarks:
+        print >> sys.stderr, 'Do you mean any of those benchmarks below?'
+        PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
+      sys.exit(-1)
+
+    if len(matching_benchmarks) > 1:
+      print >> sys.stderr, ('Multiple benchmarks named "%s".' %
+                            input_benchmark_name)
+      print >> sys.stderr, 'Did you mean one of these?'
+      print >> sys.stderr
+      PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
+      sys.exit(-1)
+
+    benchmark_class = matching_benchmarks.pop()
+    if len(args.positional_args) > 1:
+      parser.error('Too many arguments.')
+
+    assert issubclass(benchmark_class, benchmark.Benchmark), (
+        'Trying to run a non-Benchmark?!')
+
+    benchmark.ProcessCommandLineArgs(parser, args)
+    benchmark_class.ProcessCommandLineArgs(parser, args)
+
+    cls._benchmark = benchmark_class
+
+  def Run(self, args):
+    return min(255, self._benchmark().Run(args))
+
+
+def _ScriptName():
+  return os.path.basename(sys.argv[0])
+
+
+def _MatchingCommands(string, commands):
+  return [command for command in commands
+         if command.Name().startswith(string)]
+
+@decorators.Cache
+def _Benchmarks(environment):
+  benchmarks = []
+  for search_dir in environment.benchmark_dirs:
+    benchmarks += discover.DiscoverClasses(search_dir,
+                                           environment.top_level_dir,
+                                           benchmark.Benchmark,
+                                           index_by_class_name=True).values()
+  return benchmarks
+
+def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
+  def _Matches(input_string, search_string):
+    if search_string.startswith(input_string):
+      return True
+    for part in search_string.split('.'):
+      if part.startswith(input_string):
+        return True
+    return False
+
+  # Exact matching.
+  if exact_matches:
+    # Don't add aliases to search dict, only allow exact matching for them.
+    if input_benchmark_name in environment.benchmark_aliases:
+      exact_match = environment.benchmark_aliases[input_benchmark_name]
+    else:
+      exact_match = input_benchmark_name
+
+    for benchmark_class in _Benchmarks(environment):
+      if exact_match == benchmark_class.Name():
+        return [benchmark_class]
+    return []
+
+  # Fuzzy matching.
+  return [benchmark_class for benchmark_class in _Benchmarks(environment)
+          if _Matches(input_benchmark_name, benchmark_class.Name())]
+
+
+def GetBenchmarkByName(name, environment):
+  matched = _MatchBenchmarkName(name, environment, exact_matches=True)
+  # With exact_matches, len(matched) is either 0 or 1.
+  if len(matched) == 0:
+    return None
+  return matched[0]
+
+
+def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
+                          benchmark_classes, num_shards):
+  """Returns a list of all enabled benchmarks in a JSON format expected by
+  buildbots.
+
+  JSON format:
+  { "version": <int>,
+    "steps": {
+      <string>: {
+        "device_affinity": <int>,
+        "cmd": <string>,
+        "perf_dashboard_id": <string>,
+      },
+      ...
+    }
+  }
+  """
+  output = {
+    'version': 1,
+    'steps': {
+    }
+  }
+  for benchmark_class in benchmark_classes:
+    if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
+      continue
+
+    base_name = benchmark_class.Name()
+    base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
+                '-v', '--output-format=chartjson', '--upload-results',
+                base_name]
+    perf_dashboard_id = base_name
+
+    # Based on the current timings, we shift the result of the hash function to
+    # achieve better load balancing. Those shift values are to be revised when
+    # necessary. The shift value is calculated such that the total cycle time
+    # is minimized.
+    hash_shift = {
+      2 : 47,  # for old desktop configurations with 2 slaves
+      5 : 56,  # for new desktop configurations with 5 slaves
+      21 : 43  # for Android 3 slaves 7 devices configurations
+    }
+    shift = hash_shift.get(num_shards, 0)
+    base_name_hash = hashlib.sha1(base_name).hexdigest()
+    device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
+
+    output['steps'][base_name] = {
+      'cmd': ' '.join(base_cmd + [
+            '--browser=%s' % possible_browser.browser_type]),
+      'device_affinity': device_affinity,
+      'perf_dashboard_id': perf_dashboard_id,
+    }
+    if (possible_reference_browser and
+        _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
+      output['steps'][base_name + '.reference'] = {
+        'cmd': ' '.join(base_cmd + [
+              '--browser=reference', '--output-trace-tag=_ref']),
+        'device_affinity': device_affinity,
+        'perf_dashboard_id': perf_dashboard_id,
+      }
+
+  return json.dumps(output, indent=2, sort_keys=True)
+
+
+def main(environment, extra_commands=None):
+  ps_util.EnableListingStrayProcessesUponExitHook()
+
+  # Get the command name from the command line.
+  if len(sys.argv) > 1 and sys.argv[1] == '--help':
+    sys.argv[1] = 'help'
+
+  command_name = 'run'
+  for arg in sys.argv[1:]:
+    if not arg.startswith('-'):
+      command_name = arg
+      break
+
+  # TODO(eakuefner): Remove this hack after we port to argparse.
+  if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
+    command_name = 'run'
+    sys.argv[2] = '--help'
+
+  if extra_commands is None:
+    extra_commands = []
+  all_commands = [Help, List, Run] + extra_commands
+
+  # Validate and interpret the command name.
+  commands = _MatchingCommands(command_name, all_commands)
+  if len(commands) > 1:
+    print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
+                          % (command_name, _ScriptName()))
+    for command in commands:
+      print >> sys.stderr, '  %-10s %s' % (
+          command.Name(), command.Description())
+    return 1
+  if commands:
+    command = commands[0]
+  else:
+    command = Run
+
+  binary_manager.InitDependencyManager(environment.client_config)
+
+  # Parse and run the command.
+  parser = command.CreateParser()
+  command.AddCommandLineArgs(parser, environment)
+
+  # Set the default chrome root variable.
+  parser.set_defaults(chrome_root=environment.default_chrome_root)
+
+
+  if isinstance(parser, argparse.ArgumentParser):
+    commandline_args = sys.argv[1:]
+    options, args = parser.parse_known_args(commandline_args[1:])
+    command.ProcessCommandLineArgs(parser, options, args, environment)
+  else:
+    options, args = parser.parse_args()
+    if commands:
+      args = args[1:]
+    options.positional_args = args
+    command.ProcessCommandLineArgs(parser, options, environment)
+
+  if command == Help:
+    command_instance = command(all_commands)
+  else:
+    command_instance = command()
+  if isinstance(command_instance, command_line.OptparseCommand):
+    return command_instance.Run(options)
+  else:
+    return command_instance.Run(options, args)
diff --git a/catapult/telemetry/telemetry/benchmark_runner_unittest.py b/catapult/telemetry/telemetry/benchmark_runner_unittest.py
new file mode 100644
index 0000000..266f87a
--- /dev/null
+++ b/catapult/telemetry/telemetry/benchmark_runner_unittest.py
@@ -0,0 +1,115 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import benchmark
+from telemetry import benchmark_runner
+from telemetry.testing import stream
+import mock
+
+
+class BenchmarkFoo(benchmark.Benchmark):
+  """ Benchmark Foo for testing."""
+
+  @classmethod
+  def Name(cls):
+    return 'FooBenchmark'
+
+
+class BenchmarkBar(benchmark.Benchmark):
+  """ Benchmark Bar for testing long description line."""
+
+  @classmethod
+  def Name(cls):
+    return 'BarBenchmarkkkkk'
+
+class UnusualBenchmark(benchmark.Benchmark):
+  @classmethod
+  def Name(cls):
+    return 'I have a very unusual name'
+
+
+class BenchmarkRunnerUnittest(unittest.TestCase):
+  def setUp(self):
+    self._stream = stream.TestOutputStream()
+    self._mock_possible_browser = mock.MagicMock()
+    self._mock_possible_browser.browser_type = 'TestBrowser'
+
+  def testPrintBenchmarkListWithNoDisabledBenchmark(self):
+    expected_printed_stream = (
+        'Available benchmarks for TestBrowser are:\n'
+        '  BarBenchmarkkkkk  Benchmark Bar for testing long description line.\n'
+        '  FooBenchmark      Benchmark Foo for testing.\n'
+        'Pass --browser to list benchmarks for another browser.\n\n')
+    with mock.patch('telemetry.benchmark_runner.decorators') as mock_module:
+      mock_module.IsEnabled.return_value = (True, None)
+      benchmark_runner.PrintBenchmarkList(
+        [BenchmarkFoo, BenchmarkBar], self._mock_possible_browser, self._stream)
+      self.assertEquals(expected_printed_stream, self._stream.output_data)
+
+  def testPrintBenchmarkListWithOneDisabledBenchmark(self):
+    expected_printed_stream = (
+        'Available benchmarks for TestBrowser are:\n'
+        '  FooBenchmark      Benchmark Foo for testing.\n'
+        '\n'
+        'Disabled benchmarks for TestBrowser are (force run with -d):\n'
+        '  BarBenchmarkkkkk  Benchmark Bar for testing long description line.\n'
+        'Pass --browser to list benchmarks for another browser.\n\n')
+    with mock.patch('telemetry.benchmark_runner.decorators') as mock_module:
+      def FakeIsEnabled(benchmark_class, _):
+        if benchmark_class is BenchmarkFoo:
+          return True, None
+        else:
+          return False, 'Only supported BenchmarkFoo'
+      mock_module.IsEnabled = FakeIsEnabled
+      benchmark_runner.PrintBenchmarkList(
+        [BenchmarkFoo, BenchmarkBar], self._mock_possible_browser, self._stream)
+      self.assertEquals(expected_printed_stream, self._stream.output_data)
+
+  def testShouldDisable(self):
+    """Ensure that overridden ShouldDisable class methods are respected."""
+    expected_printed_stream = (
+        'Available benchmarks for TestBrowser are:\n'
+        '  BarBenchmarkkkkk  Benchmark Bar for testing long description line.\n'
+        '\n'
+        'Disabled benchmarks for TestBrowser are (force run with -d):\n'
+        '  FooBenchmark      Benchmark Foo for testing.\n'
+        'Pass --browser to list benchmarks for another browser.\n\n')
+    @classmethod
+    def FakeShouldDisable(cls, possible_browser):
+      del possible_browser  # unused
+      return cls is BenchmarkFoo
+    BenchmarkFoo.ShouldDisable = FakeShouldDisable
+    BenchmarkBar.ShouldDisable = FakeShouldDisable
+    benchmark_runner.PrintBenchmarkList(
+      [BenchmarkFoo, BenchmarkBar], self._mock_possible_browser, self._stream)
+    self.assertEquals(expected_printed_stream, self._stream.output_data)
+
+  def testShouldDisableComplex(self):
+    """Ensure that browser-dependent ShouldDisable overrides are respected."""
+    expected_printed_stream = (
+        # Expected output for 'TestBrowser':
+        'Available benchmarks for TestBrowser are:\n'
+        '  FooBenchmark      Benchmark Foo for testing.\n'
+        '\n'
+        'Disabled benchmarks for TestBrowser are (force run with -d):\n'
+        '  BarBenchmarkkkkk  Benchmark Bar for testing long description line.\n'
+        'Pass --browser to list benchmarks for another browser.\n\n'
+        # Expected output for 'MockBrowser':
+        'Available benchmarks for MockBrowser are:\n'
+        '  BarBenchmarkkkkk  Benchmark Bar for testing long description line.\n'
+        '  FooBenchmark      Benchmark Foo for testing.\n'
+        'Pass --browser to list benchmarks for another browser.\n\n')
+    @classmethod
+    def FakeShouldDisable(cls, possible_browser):
+      return cls is BenchmarkBar and not 'Mock' in possible_browser.browser_type
+    BenchmarkFoo.ShouldDisable = FakeShouldDisable
+    BenchmarkBar.ShouldDisable = FakeShouldDisable
+    benchmark_runner.PrintBenchmarkList(
+      [BenchmarkFoo, BenchmarkBar], self._mock_possible_browser, self._stream)
+    self._mock_possible_browser.browser_type = 'MockBrowser'
+    benchmark_runner.PrintBenchmarkList(
+      [BenchmarkFoo, BenchmarkBar], self._mock_possible_browser, self._stream)
+    self.assertEquals(expected_printed_stream, self._stream.output_data)
diff --git a/catapult/telemetry/telemetry/benchmark_unittest.py b/catapult/telemetry/telemetry/benchmark_unittest.py
new file mode 100644
index 0000000..d26dcce
--- /dev/null
+++ b/catapult/telemetry/telemetry/benchmark_unittest.py
@@ -0,0 +1,161 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import unittest
+
+from telemetry import android
+from telemetry import benchmark
+from telemetry.testing import options_for_unittests
+from telemetry.internal import story_runner
+from telemetry import page
+from telemetry.page import page_test
+from telemetry.page import shared_page_state
+from telemetry import story as story_module
+from telemetry.web_perf import timeline_based_measurement
+
+
+class DummyPageTest(page_test.PageTest):
+  def ValidateAndMeasurePage(self, *_):
+    pass
+
+
+class TestBenchmark(benchmark.Benchmark):
+  def __init__(self, story):
+    super(TestBenchmark, self).__init__()
+    self._story_set = story_module.StorySet()
+    self._story_set.AddStory(story)
+
+  def CreatePageTest(self, _):
+    return DummyPageTest()
+
+  def CreateStorySet(self, _):
+    return self._story_set
+
+
+class BenchmarkTest(unittest.TestCase):
+
+  def testPageTestWithIncompatibleStory(self):
+    b = TestBenchmark(story_module.Story(
+        shared_state_class=shared_page_state.SharedPageState))
+    with self.assertRaisesRegexp(
+        Exception, 'containing only telemetry.page.Page stories'):
+      b.Run(options_for_unittests.GetCopy())
+
+    state_class = story_module.SharedState
+    b = TestBenchmark(story_module.Story(
+        shared_state_class=state_class))
+    with self.assertRaisesRegexp(
+        Exception, 'containing only telemetry.page.Page stories'):
+      b.Run(options_for_unittests.GetCopy())
+
+    b = TestBenchmark(android.AndroidStory(start_intent=None))
+    with self.assertRaisesRegexp(
+        Exception, 'containing only telemetry.page.Page stories'):
+      b.Run(options_for_unittests.GetCopy())
+
+  def testPageTestWithCompatibleStory(self):
+    original_run_fn = story_runner.Run
+    was_run = [False]
+    def RunStub(*arg, **kwargs):
+      del arg, kwargs
+      was_run[0] = True
+    story_runner.Run = RunStub
+
+    try:
+      options = options_for_unittests.GetCopy()
+      options.output_formats = ['none']
+      options.suppress_gtest_report = True
+      parser = optparse.OptionParser()
+      benchmark.AddCommandLineArgs(parser)
+      options.MergeDefaultValues(parser.get_default_values())
+
+      b = TestBenchmark(page.Page(url='about:blank'))
+      b.Run(options)
+    finally:
+      story_runner.Run = original_run_fn
+
+    self.assertTrue(was_run[0])
+
+  def testOverriddenTbmOptionsAndPageTestRaises(self):
+    class FakeTimelineBasedMeasurementOptions(object):
+      pass
+
+    class OverrideBothBenchmark(benchmark.Benchmark):
+      def CreatePageTest(self, _):
+        return DummyPageTest()
+      def CreateTimelineBasedMeasurementOptions(self):
+        return FakeTimelineBasedMeasurementOptions()
+
+    assertion_regex = (
+        'Cannot override both CreatePageTest and '
+        'CreateTimelineBasedMeasurementOptions')
+    with self.assertRaisesRegexp(AssertionError, assertion_regex):
+      OverrideBothBenchmark()
+
+  def testBenchmarkMakesTbmTestByDefault(self):
+    class DefaultTbmBenchmark(benchmark.Benchmark):
+      pass
+
+    self.assertIsInstance(
+        DefaultTbmBenchmark().CreatePageTest(options=None),
+        timeline_based_measurement.TimelineBasedMeasurement)
+
+  def testUnknownTestTypeRaises(self):
+    class UnknownTestType(object):
+      pass
+    class UnknownTestTypeBenchmark(benchmark.Benchmark):
+      test = UnknownTestType
+
+    type_error_regex = (
+        '"UnknownTestType" is not a PageTest or a TimelineBasedMeasurement')
+    with self.assertRaisesRegexp(TypeError, type_error_regex):
+      UnknownTestTypeBenchmark().CreatePageTest(options=None)
+
+  def testOverriddenTbmOptionsAndPageTestTestAttributeRaises(self):
+    class FakeTimelineBasedMeasurementOptions(object):
+      pass
+
+    class OverrideOptionsOnPageTestBenchmark(benchmark.Benchmark):
+      test = DummyPageTest
+      def CreateTimelineBasedMeasurementOptions(self):
+        return FakeTimelineBasedMeasurementOptions()
+
+    assertion_regex = (
+        'Cannot override CreateTimelineBasedMeasurementOptions '
+        'with a PageTest')
+    with self.assertRaisesRegexp(AssertionError, assertion_regex):
+      OverrideOptionsOnPageTestBenchmark().CreatePageTest(options=None)
+
+  def testBenchmarkPredicate(self):
+    class PredicateBenchmark(TestBenchmark):
+      @classmethod
+      def ValueCanBeAddedPredicate(cls, value, is_first_result):
+        return False
+
+    original_run_fn = story_runner.Run
+    validPredicate = [False]
+
+    def RunStub(test, story_set_module, finder_options, results,
+                *args): # pylint: disable=unused-argument
+      predicate = results._value_can_be_added_predicate
+      valid = predicate == PredicateBenchmark.ValueCanBeAddedPredicate
+      validPredicate[0] = valid
+
+    story_runner.Run = RunStub
+
+    try:
+      options = options_for_unittests.GetCopy()
+      options.output_formats = ['none']
+      options.suppress_gtest_report = True
+      parser = optparse.OptionParser()
+      benchmark.AddCommandLineArgs(parser)
+      options.MergeDefaultValues(parser.get_default_values())
+
+      b = PredicateBenchmark(page.Page(url='about:blank'))
+      b.Run(options)
+    finally:
+      story_runner.Run = original_run_fn
+
+    self.assertTrue(validPredicate[0])
diff --git a/catapult/telemetry/telemetry/core/__init__.py b/catapult/telemetry/telemetry/core/__init__.py
new file mode 100644
index 0000000..efcc9c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/core/android_action_runner.py b/catapult/telemetry/telemetry/core/android_action_runner.py
new file mode 100644
index 0000000..6bd2855
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/android_action_runner.py
@@ -0,0 +1,161 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+
+from telemetry.core import util
+
+
+class ActionNotSupported(Exception):
+  pass
+
+
+class AndroidActionRunner(object):
+  """Provides an API for interacting with an android device.
+
+  This makes use of functionality provided by the android input command. None
+  of the gestures here are guaranteed to be performant for telemetry tests and
+  there is no official support for this API.
+
+  TODO(ariblue): Replace this API with a better implementation for interacting
+  with native components.
+  """
+
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+
+  def SmoothScrollBy(self, left_start_coord, top_start_coord, direction,
+                     scroll_distance):
+    """Perform gesture to scroll down on the android device.
+    """
+    if direction not in ['down', 'up', 'left', 'right']:
+      raise ActionNotSupported('Invalid scroll direction: %s' % direction)
+
+    # This velocity is slower so that the exact distance we specify is the
+    # distance the page travels.
+    duration = scroll_distance
+
+    # Note that the default behavior is swiping up for scrolling down.
+    if direction == 'down':
+      left_end_coord = left_start_coord
+      top_end_coord = top_start_coord - scroll_distance
+    elif direction == 'up':
+      left_end_coord = left_start_coord
+      top_end_coord = top_start_coord + scroll_distance
+    elif direction == 'right':
+      left_end_coord = left_start_coord - scroll_distance
+      top_end_coord = top_start_coord
+    elif direction == 'left':
+      left_end_coord = left_start_coord + scroll_distance
+      top_end_coord = top_start_coord
+
+    self.InputSwipe(left_start_coord, top_start_coord, left_end_coord,
+                    top_end_coord, duration)
+
+  def Wait(self, seconds):
+    """Wait for the number of seconds specified.
+
+    Args:
+      seconds: The number of seconds to wait.
+    """
+    time.sleep(seconds)
+
+  def InputText(self, string):
+    """Convert the characters of the string into key events and send to device.
+
+    Args:
+      string: The string to send to the device.
+    """
+    self._platform_backend.device.RunShellCommand('input text %s' % string)
+
+  def InputKeyEvent(self, key):
+    """Send a single key input to the device.
+
+    Args:
+      key: A key code number or name that will be sent to the device
+    """
+    self._platform_backend.device.RunShellCommand('input keyevent %s' % key)
+
+  def InputTap(self, x_coord, y_coord):
+    """Perform a tap input at the given coordinates.
+
+    Args:
+      x_coord: The x coordinate of the tap event.
+      y_coord: The y coordinate of the tap event.
+    """
+    self._platform_backend.device.RunShellCommand('input tap %s %s' % (x_coord,
+                                                                       y_coord))
+
+  def InputSwipe(self, left_start_coord, top_start_coord, left_end_coord,
+                 top_end_coord, duration):
+    """Perform a swipe input.
+
+    Args:
+      left_start_coord: The horizontal starting coordinate of the gesture
+      top_start_coord: The vertical starting coordinate of the gesture
+      left_end_coord: The horizontal ending coordinate of the gesture
+      top_end_coord: The vertical ending coordinate of the gesture
+      duration: The length of time of the swipe in milliseconds
+    """
+    self._platform_backend.device.RunShellCommand(
+        'input swipe %s %s %s %s %s' % (left_start_coord, top_start_coord,
+                                        left_end_coord, top_end_coord,
+                                        duration))
+
+  def InputPress(self):
+    """Perform a press input."""
+    self._platform_backend.device.RunShellCommand('input press')
+
+  def InputRoll(self, dx, dy):
+    """Perform a roll input. This sends a simple zero-pressure move event.
+
+    Args:
+      dx: Change in the x coordinate due to move.
+      dy: Change in the y coordinate due to move.
+    """
+    self._platform_backend.device.RunShellCommand('input roll %s %s' % (dx, dy))
+
+  def TurnScreenOn(self):
+    """If device screen is off, turn screen on.
+    If the screen is already on, log a warning and return immediately.
+
+    Raises:
+      Timeout: If the screen is off and device fails to turn screen on.
+    """
+    self._platform_backend.device.SetScreen(True)
+    util.WaitFor(self._platform_backend.device.IsScreenOn, 5)
+
+  def TurnScreenOff(self):
+    """If device screen is on, turn screen off.
+    If the screen is already off, log a warning and return immediately.
+
+    Raises:
+      Timeout: If the screen is on and device fails to turn screen off.
+    """
+
+    def is_screen_off():
+      return not self._platform_backend.device.IsScreenOn()
+
+    self._platform_backend.device.SetScreen(False)
+    util.WaitFor(is_screen_off, 5)
+
+  def UnlockScreen(self):
+    """If device screen is locked, unlocks it.
+    If the device is not locked, log a warning and return immediately.
+
+    Raises:
+      Timeout: If device fails to unlock screen.
+    """
+
+    def is_screen_unlocked():
+      return not self._platform_backend.IsScreenLocked()
+
+    if self._platform_backend.IsScreenLocked():
+      self._platform_backend.device.RunShellCommand('input keyevent 82')
+    else:
+      logging.warning('Screen not locked when expected.')
+      return
+
+    util.WaitFor(is_screen_unlocked, 5)
diff --git a/catapult/telemetry/telemetry/core/android_platform.py b/catapult/telemetry/telemetry/core/android_platform.py
new file mode 100644
index 0000000..8ddb9f2
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/android_platform.py
@@ -0,0 +1,57 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import android_action_runner
+from telemetry.core import platform
+from telemetry.internal.app import android_app
+from telemetry.internal.backends import android_app_backend
+
+
+class AndroidPlatform(platform.Platform):
+
+  def __init__(self, platform_backend):
+    super(AndroidPlatform, self).__init__(platform_backend)
+    self._android_action_runner = android_action_runner.AndroidActionRunner(
+        platform_backend)
+
+  @property
+  def android_action_runner(self):
+    return self._android_action_runner
+
+  @property
+  def system_ui(self):
+    """Returns an AppUi object to interact with Android's system UI.
+
+    See devil.android.app_ui for the documentation of the API provided.
+    """
+    return self._platform_backend.GetSystemUi()
+
+  def IsSvelte(self):
+    return self._platform_backend.IsSvelte()
+
+  def LaunchAndroidApplication(self,
+                               start_intent,
+                               is_app_ready_predicate=None,
+                               app_has_webviews=True):
+    """Launches an Android application given the intent.
+
+    Args:
+      start_intent: The intent to use to start the app.
+      is_app_ready_predicate: A predicate function to determine
+          whether the app is ready. This is a function that takes an
+          AndroidApp instance and return a boolean. When it is not passed in,
+          the app is ready when the intent to launch it is completed.
+      app_has_webviews: A boolean indicating whether the app is expected to
+          contain any WebViews. If True, the app will be launched with
+          appropriate webview flags, and the GetWebViews method of the returned
+          object may be used to access them.
+
+    Returns:
+      A reference to the android_app launched.
+    """
+    self._platform_backend.DismissCrashDialogIfNeeded()
+    app_backend = android_app_backend.AndroidAppBackend(
+        self._platform_backend, start_intent, is_app_ready_predicate,
+        app_has_webviews)
+    return android_app.AndroidApp(app_backend, self._platform_backend)
diff --git a/catapult/telemetry/telemetry/core/cros_interface.py b/catapult/telemetry/telemetry/core/cros_interface.py
new file mode 100644
index 0000000..3f1b243
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/cros_interface.py
@@ -0,0 +1,518 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A wrapper around ssh for common operations on a CrOS-based device"""
+import logging
+import os
+import re
+import shutil
+import stat
+import subprocess
+import tempfile
+
+# Some developers' workflow includes running the Chrome process from
+# /usr/local/... instead of the default location. We have to check for both
+# paths in order to support this workflow.
+_CHROME_PROCESS_REGEX = [re.compile(r'^/opt/google/chrome/chrome '),
+                         re.compile(r'^/usr/local/?.*/chrome/chrome ')]
+
+
+def RunCmd(args, cwd=None, quiet=False):
+  """Opens a subprocess to execute a program and returns its return value.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+
+  Returns:
+    Return code from the command execution.
+  """
+  if not quiet:
+    logging.debug(' '.join(args) + ' ' + (cwd or ''))
+  with open(os.devnull, 'w') as devnull:
+    p = subprocess.Popen(args=args,
+                         cwd=cwd,
+                         stdout=devnull,
+                         stderr=devnull,
+                         stdin=devnull,
+                         shell=False)
+    return p.wait()
+
+
+def GetAllCmdOutput(args, cwd=None, quiet=False):
+  """Open a subprocess to execute a program and returns its output.
+
+  Args:
+    args: A string or a sequence of program arguments. The program to execute is
+      the string or the first item in the args sequence.
+    cwd: If not None, the subprocess's current directory will be changed to
+      |cwd| before it's executed.
+
+  Returns:
+    Captures and returns the command's stdout.
+    Prints the command's stderr to logger (which defaults to stdout).
+  """
+  if not quiet:
+    logging.debug(' '.join(args) + ' ' + (cwd or ''))
+  with open(os.devnull, 'w') as devnull:
+    p = subprocess.Popen(args=args,
+                         cwd=cwd,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE,
+                         stdin=devnull)
+    stdout, stderr = p.communicate()
+    if not quiet:
+      logging.debug(' > stdout=[%s], stderr=[%s]', stdout, stderr)
+    return stdout, stderr
+
+
+def HasSSH():
+  try:
+    RunCmd(['ssh'], quiet=True)
+    RunCmd(['scp'], quiet=True)
+    logging.debug("HasSSH()->True")
+    return True
+  except OSError:
+    logging.debug("HasSSH()->False")
+    return False
+
+
+class LoginException(Exception):
+  pass
+
+
+class KeylessLoginRequiredException(LoginException):
+  pass
+
+
+class DNSFailureException(LoginException):
+  pass
+
+
+class CrOSInterface(object):
+
+  def __init__(self, hostname=None, ssh_port=None, ssh_identity=None):
+    self._hostname = hostname
+    self._ssh_port = ssh_port
+
+    # List of ports generated from GetRemotePort() that may not be in use yet.
+    self._reserved_ports = []
+
+    if self.local:
+      return
+
+    self._ssh_identity = None
+    self._ssh_args = ['-o ConnectTimeout=5', '-o StrictHostKeyChecking=no',
+                      '-o KbdInteractiveAuthentication=no',
+                      '-o PreferredAuthentications=publickey',
+                      '-o UserKnownHostsFile=/dev/null', '-o ControlMaster=no']
+
+    if ssh_identity:
+      self._ssh_identity = os.path.abspath(os.path.expanduser(ssh_identity))
+      os.chmod(self._ssh_identity, stat.S_IREAD)
+
+    # Establish master SSH connection using ControlPersist.
+    # Since only one test will be run on a remote host at a time,
+    # the control socket filename can be telemetry@hostname.
+    self._ssh_control_file = '/tmp/' + 'telemetry' + '@' + hostname
+    with open(os.devnull, 'w') as devnull:
+      subprocess.call(
+          self.FormSSHCommandLine(['-M', '-o ControlPersist=yes']),
+          stdin=devnull,
+          stdout=devnull,
+          stderr=devnull)
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, *args):
+    self.CloseConnection()
+
+  @property
+  def local(self):
+    return not self._hostname
+
+  @property
+  def hostname(self):
+    return self._hostname
+
+  @property
+  def ssh_port(self):
+    return self._ssh_port
+
+  def FormSSHCommandLine(self, args, extra_ssh_args=None):
+    """Constructs a subprocess-suitable command line for `ssh'.
+    """
+    if self.local:
+      # We run the command through the shell locally for consistency with
+      # how commands are run through SSH (crbug.com/239161). This work
+      # around will be unnecessary once we implement a persistent SSH
+      # connection to run remote commands (crbug.com/239607).
+      return ['sh', '-c', " ".join(args)]
+
+    full_args = ['ssh', '-o ForwardX11=no', '-o ForwardX11Trusted=no', '-n',
+                 '-S', self._ssh_control_file] + self._ssh_args
+    if self._ssh_identity is not None:
+      full_args.extend(['-i', self._ssh_identity])
+    if extra_ssh_args:
+      full_args.extend(extra_ssh_args)
+    full_args.append('root@%s' % self._hostname)
+    full_args.append('-p%d' % self._ssh_port)
+    full_args.extend(args)
+    return full_args
+
+  def _FormSCPCommandLine(self, src, dst, extra_scp_args=None):
+    """Constructs a subprocess-suitable command line for `scp'.
+
+    Note: this function is not designed to work with IPv6 addresses, which need
+    to have their addresses enclosed in brackets and a '-6' flag supplied
+    in order to be properly parsed by `scp'.
+    """
+    assert not self.local, "Cannot use SCP on local target."
+
+    args = ['scp', '-P', str(self._ssh_port)] + self._ssh_args
+    if self._ssh_identity:
+      args.extend(['-i', self._ssh_identity])
+    if extra_scp_args:
+      args.extend(extra_scp_args)
+    args += [src, dst]
+    return args
+
+  def _FormSCPToRemote(self,
+                       source,
+                       remote_dest,
+                       extra_scp_args=None,
+                       user='root'):
+    return self._FormSCPCommandLine(source,
+                                    '%s@%s:%s' % (user, self._hostname,
+                                                  remote_dest),
+                                    extra_scp_args=extra_scp_args)
+
+  def _FormSCPFromRemote(self,
+                         remote_source,
+                         dest,
+                         extra_scp_args=None,
+                         user='root'):
+    return self._FormSCPCommandLine('%s@%s:%s' % (user, self._hostname,
+                                                  remote_source),
+                                    dest,
+                                    extra_scp_args=extra_scp_args)
+
+  def _RemoveSSHWarnings(self, toClean):
+    """Removes specific ssh warning lines from a string.
+
+    Args:
+      toClean: A string that may be containing multiple lines.
+
+    Returns:
+      A copy of toClean with all the Warning lines removed.
+    """
+    # Remove the Warning about connecting to a new host for the first time.
+    return re.sub(
+        r'Warning: Permanently added [^\n]* to the list of known hosts.\s\n',
+        '', toClean)
+
+  def RunCmdOnDevice(self, args, cwd=None, quiet=False):
+    stdout, stderr = GetAllCmdOutput(
+        self.FormSSHCommandLine(args),
+        cwd,
+        quiet=quiet)
+    # The initial login will add the host to the hosts file but will also print
+    # a warning to stderr that we need to remove.
+    stderr = self._RemoveSSHWarnings(stderr)
+    return stdout, stderr
+
+  def TryLogin(self):
+    logging.debug('TryLogin()')
+    assert not self.local
+    stdout, stderr = self.RunCmdOnDevice(['echo', '$USER'], quiet=True)
+    if stderr != '':
+      if 'Host key verification failed' in stderr:
+        raise LoginException(('%s host key verification failed. ' +
+                              'SSH to it manually to fix connectivity.') %
+                             self._hostname)
+      if 'Operation timed out' in stderr:
+        raise LoginException('Timed out while logging into %s' % self._hostname)
+      if 'UNPROTECTED PRIVATE KEY FILE!' in stderr:
+        raise LoginException('Permissions for %s are too open. To fix this,\n'
+                             'chmod 600 %s' % (self._ssh_identity,
+                                               self._ssh_identity))
+      if 'Permission denied (publickey,keyboard-interactive)' in stderr:
+        raise KeylessLoginRequiredException('Need to set up ssh auth for %s' %
+                                            self._hostname)
+      if 'Could not resolve hostname' in stderr:
+        raise DNSFailureException('Unable to resolve the hostname for: %s' %
+                                  self._hostname)
+      raise LoginException('While logging into %s, got %s' % (self._hostname,
+                                                              stderr))
+    if stdout != 'root\n':
+      raise LoginException('Logged into %s, expected $USER=root, but got %s.' %
+                           (self._hostname, stdout))
+
+  def FileExistsOnDevice(self, file_name):
+    if self.local:
+      return os.path.exists(file_name)
+
+    stdout, stderr = self.RunCmdOnDevice(
+        [
+            'if', 'test', '-e', file_name, ';', 'then', 'echo', '1', ';', 'fi'
+        ],
+        quiet=True)
+    if stderr != '':
+      if "Connection timed out" in stderr:
+        raise OSError('Machine wasn\'t responding to ssh: %s' % stderr)
+      raise OSError('Unexpected error: %s' % stderr)
+    exists = stdout == '1\n'
+    logging.debug("FileExistsOnDevice(<text>, %s)->%s" % (file_name, exists))
+    return exists
+
+  def PushFile(self, filename, remote_filename):
+    if self.local:
+      args = ['cp', '-r', filename, remote_filename]
+      stdout, stderr = GetAllCmdOutput(args, quiet=True)
+      if stderr != '':
+        raise OSError('No such file or directory %s' % stderr)
+      return
+
+    args = self._FormSCPToRemote(
+        os.path.abspath(filename),
+        remote_filename,
+        extra_scp_args=['-r'])
+
+    stdout, stderr = GetAllCmdOutput(args, quiet=True)
+    stderr = self._RemoveSSHWarnings(stderr)
+    if stderr != '':
+      raise OSError('No such file or directory %s' % stderr)
+
+  def PushContents(self, text, remote_filename):
+    logging.debug("PushContents(<text>, %s)" % remote_filename)
+    with tempfile.NamedTemporaryFile() as f:
+      f.write(text)
+      f.flush()
+      self.PushFile(f.name, remote_filename)
+
+  def GetFile(self, filename, destfile=None):
+    """Copies a local file |filename| to |destfile| on the device.
+
+    Args:
+      filename: The name of the local source file.
+      destfile: The name of the file to copy to, and if it is not specified
+        then it is the basename of the source file.
+
+    """
+    logging.debug("GetFile(%s, %s)" % (filename, destfile))
+    if self.local:
+      if destfile is not None and destfile != filename:
+        shutil.copyfile(filename, destfile)
+      return
+
+    if destfile is None:
+      destfile = os.path.basename(filename)
+    args = self._FormSCPFromRemote(filename, os.path.abspath(destfile))
+
+    stdout, stderr = GetAllCmdOutput(args, quiet=True)
+    stderr = self._RemoveSSHWarnings(stderr)
+    if stderr != '':
+      raise OSError('No such file or directory %s' % stderr)
+
+  def GetFileContents(self, filename):
+    """Get the contents of a file on the device.
+
+    Args:
+      filename: The name of the file on the device.
+
+    Returns:
+      A string containing the contents of the file.
+    """
+    # TODO: handle the self.local case
+    assert not self.local
+    t = tempfile.NamedTemporaryFile()
+    self.GetFile(filename, t.name)
+    with open(t.name, 'r') as f2:
+      res = f2.read()
+      logging.debug("GetFileContents(%s)->%s" % (filename, res))
+      f2.close()
+      return res
+
+  def ListProcesses(self):
+    """Returns (pid, cmd, ppid, state) of all processes on the device."""
+    stdout, stderr = self.RunCmdOnDevice(
+        [
+            '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'
+        ],
+        quiet=True)
+    assert stderr == '', stderr
+    procs = []
+    for l in stdout.split('\n'):
+      if l == '':
+        continue
+      m = re.match(r'^\s*(\d+)\s+(\d+)\s+(.+)\s+(.+)', l, re.DOTALL)
+      assert m
+      procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),
+                    m.group(4)))
+    logging.debug("ListProcesses(<predicate>)->[%i processes]" % len(procs))
+    return procs
+
+  def _GetSessionManagerPid(self, procs):
+    """Returns the pid of the session_manager process, given the list of
+    processes."""
+    for pid, process, _, _ in procs:
+      argv = process.split()
+      if argv and os.path.basename(argv[0]) == 'session_manager':
+        return pid
+    return None
+
+  def GetChromeProcess(self):
+    """Locates the the main chrome browser process.
+
+    Chrome on cros is usually in /opt/google/chrome, but could be in
+    /usr/local/ for developer workflows - debug chrome is too large to fit on
+    rootfs.
+
+    Chrome spawns multiple processes for renderers. pids wrap around after they
+    are exhausted so looking for the smallest pid is not always correct. We
+    locate the session_manager's pid, and look for the chrome process that's an
+    immediate child. This is the main browser process.
+    """
+    procs = self.ListProcesses()
+    session_manager_pid = self._GetSessionManagerPid(procs)
+    if not session_manager_pid:
+      return None
+
+    # Find the chrome process that is the child of the session_manager.
+    for pid, process, ppid, _ in procs:
+      if ppid != session_manager_pid:
+        continue
+      for regex in _CHROME_PROCESS_REGEX:
+        path_match = re.match(regex, process)
+        if path_match is not None:
+          return {'pid': pid, 'path': path_match.group(), 'args': process}
+    return None
+
+  def GetChromePid(self):
+    """Returns pid of main chrome browser process."""
+    result = self.GetChromeProcess()
+    if result and 'pid' in result:
+      return result['pid']
+    return None
+
+  def RmRF(self, filename):
+    logging.debug("rm -rf %s" % filename)
+    self.RunCmdOnDevice(['rm', '-rf', filename], quiet=True)
+
+  def Chown(self, filename):
+    self.RunCmdOnDevice(['chown', '-R', 'chronos:chronos', filename])
+
+  def KillAllMatching(self, predicate):
+    kills = ['kill', '-KILL']
+    for pid, cmd, _, _ in self.ListProcesses():
+      if predicate(cmd):
+        logging.info('Killing %s, pid %d' % cmd, pid)
+        kills.append(pid)
+    logging.debug("KillAllMatching(<predicate>)->%i" % (len(kills) - 2))
+    if len(kills) > 2:
+      self.RunCmdOnDevice(kills, quiet=True)
+    return len(kills) - 2
+
+  def IsServiceRunning(self, service_name):
+    stdout, stderr = self.RunCmdOnDevice(['status', service_name], quiet=True)
+    assert stderr == '', stderr
+    running = 'running, process' in stdout
+    logging.debug("IsServiceRunning(%s)->%s" % (service_name, running))
+    return running
+
+  def GetRemotePort(self):
+    netstat = self.RunCmdOnDevice(['netstat', '-ant'])
+    netstat = netstat[0].split('\n')
+    ports_in_use = []
+
+    for line in netstat[2:]:
+      if not line:
+        continue
+      address_in_use = line.split()[3]
+      port_in_use = address_in_use.split(':')[-1]
+      ports_in_use.append(int(port_in_use))
+
+    ports_in_use.extend(self._reserved_ports)
+
+    new_port = sorted(ports_in_use)[-1] + 1
+    self._reserved_ports.append(new_port)
+
+    return new_port
+
+  def IsHTTPServerRunningOnPort(self, port):
+    wget_output = self.RunCmdOnDevice(['wget', 'localhost:%i' % (port), '-T1',
+                                       '-t1'])
+
+    if 'Connection refused' in wget_output[1]:
+      return False
+
+    return True
+
+  def FilesystemMountedAt(self, path):
+    """Returns the filesystem mounted at |path|"""
+    df_out, _ = self.RunCmdOnDevice(['/bin/df', path])
+    df_ary = df_out.split('\n')
+    # 3 lines for title, mount info, and empty line.
+    if len(df_ary) == 3:
+      line_ary = df_ary[1].split()
+      if line_ary:
+        return line_ary[0]
+    return None
+
+  def CryptohomePath(self, user):
+    """Returns the cryptohome mount point for |user|."""
+    stdout, stderr = self.RunCmdOnDevice(['cryptohome-path', 'user', "'%s'" %
+                                          user])
+    if stderr != '':
+      raise OSError('cryptohome-path failed: %s' % stderr)
+    return stdout.rstrip()
+
+  def IsCryptohomeMounted(self, username, is_guest):
+    """Returns True iff |user|'s cryptohome is mounted."""
+    profile_path = self.CryptohomePath(username)
+    mount = self.FilesystemMountedAt(profile_path)
+    mount_prefix = 'guestfs' if is_guest else '/home/.shadow/'
+    return mount and mount.startswith(mount_prefix)
+
+  def TakeScreenShot(self, screenshot_prefix):
+    """Takes a screenshot, useful for debugging failures."""
+    # TODO(achuith): Find a better location for screenshots. Cros autotests
+    # upload everything in /var/log so use /var/log/screenshots for now.
+    SCREENSHOT_DIR = '/var/log/screenshots/'
+    SCREENSHOT_EXT = '.png'
+
+    self.RunCmdOnDevice(['mkdir', '-p', SCREENSHOT_DIR])
+    # Large number of screenshots can increase hardware lab bandwidth
+    # dramatically, so keep this number low. crbug.com/524814.
+    for i in xrange(2):
+      screenshot_file = ('%s%s-%d%s' %
+                         (SCREENSHOT_DIR, screenshot_prefix, i, SCREENSHOT_EXT))
+      if not self.FileExistsOnDevice(screenshot_file):
+        self.RunCmdOnDevice([
+            '/usr/local/autotest/bin/screenshot.py', screenshot_file
+        ])
+        return
+    logging.warning('screenshot directory full.')
+
+  def RestartUI(self, clear_enterprise_policy):
+    logging.info('(Re)starting the ui (logs the user out)')
+    if clear_enterprise_policy:
+      self.RunCmdOnDevice(['stop', 'ui'])
+      self.RmRF('/var/lib/whitelist/*')
+      self.RmRF(r'/home/chronos/Local\ State')
+
+    if self.IsServiceRunning('ui'):
+      self.RunCmdOnDevice(['restart', 'ui'])
+    else:
+      self.RunCmdOnDevice(['start', 'ui'])
+
+  def CloseConnection(self):
+    if not self.local:
+      with open(os.devnull, 'w') as devnull:
+        subprocess.call(
+            self.FormSSHCommandLine(['-O', 'exit', self._hostname]),
+            stdout=devnull,
+            stderr=devnull)
diff --git a/catapult/telemetry/telemetry/core/cros_interface_unittest.py b/catapult/telemetry/telemetry/core/cros_interface_unittest.py
new file mode 100644
index 0000000..298e628
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/cros_interface_unittest.py
@@ -0,0 +1,227 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(nduca): Rewrite what some of these tests to use mocks instead of
+# actually talking to the device. This would improve our coverage quite
+# a bit.
+
+import socket
+import tempfile
+import unittest
+import mock
+
+from telemetry.core import cros_interface
+from telemetry import decorators
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import cros_forwarder
+from telemetry.testing import options_for_unittests
+
+
+class CrOSInterfaceTest(unittest.TestCase):
+
+  def _GetCRI(self):
+    remote = options_for_unittests.GetCopy().cros_remote
+    remote_ssh_port = options_for_unittests.GetCopy().cros_remote_ssh_port
+    return cros_interface.CrOSInterface(
+        remote, remote_ssh_port,
+        options_for_unittests.GetCopy().cros_ssh_identity)
+
+  @decorators.Enabled('cros-chrome')
+  def testPushContents(self):
+    with self._GetCRI() as cri:
+      cri.RunCmdOnDevice(['rm', '-rf', '/tmp/testPushContents'])
+      cri.PushContents('hello world', '/tmp/testPushContents')
+      contents = cri.GetFileContents('/tmp/testPushContents')
+      self.assertEquals(contents, 'hello world')
+
+  @decorators.Enabled('cros-chrome')
+  def testExists(self):
+    with self._GetCRI() as cri:
+      self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
+      self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
+      self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
+
+  @decorators.Enabled('linux')
+  def testExistsLocal(self):
+    with cros_interface.CrOSInterface() as cri:
+      self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
+      self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
+      self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
+
+  @decorators.Enabled('cros-chrome')
+  def testGetFileContents(self):  # pylint: disable=no-self-use
+    with self._GetCRI() as cri:
+      hosts = cri.GetFileContents('/etc/lsb-release')
+      self.assertTrue('CHROMEOS' in hosts)
+
+  @decorators.Enabled('cros-chrome')
+  def testGetFileContentsNonExistent(self):
+    with self._GetCRI() as cri:
+      f = tempfile.NamedTemporaryFile()
+      cri.PushContents('testGetFileNonExistent', f.name)
+      cri.RmRF(f.name)
+      self.assertRaises(OSError, lambda: cri.GetFileContents(f.name))
+
+  @decorators.Enabled('cros-chrome')
+  def testGetFile(self):  # pylint: disable=no-self-use
+    with self._GetCRI() as cri:
+      f = tempfile.NamedTemporaryFile()
+      cri.GetFile('/etc/lsb-release', f.name)
+      with open(f.name, 'r') as f2:
+        res = f2.read()
+        self.assertTrue('CHROMEOS' in res)
+
+  @decorators.Enabled('cros-chrome')
+  def testGetFileNonExistent(self):
+    with self._GetCRI() as cri:
+      f = tempfile.NamedTemporaryFile()
+      cri.PushContents('testGetFileNonExistent', f.name)
+      cri.RmRF(f.name)
+      self.assertRaises(OSError, lambda: cri.GetFile(f.name))
+
+  @decorators.Enabled('cros-chrome')
+  def testIsServiceRunning(self):
+    with self._GetCRI() as cri:
+      self.assertTrue(cri.IsServiceRunning('openssh-server'))
+
+  @decorators.Enabled('linux')
+  def testIsServiceRunningLocal(self):
+    with cros_interface.CrOSInterface() as cri:
+      self.assertTrue(cri.IsServiceRunning('dbus'))
+
+  @decorators.Enabled('cros-chrome')
+  def testGetRemotePortAndIsHTTPServerRunningOnPort(self):
+    with self._GetCRI() as cri:
+      # Create local server.
+      sock = socket.socket()
+      sock.bind(('', 0))
+      port = sock.getsockname()[1]
+      sock.listen(0)
+
+      # Get remote port and ensure that it was unused.
+      remote_port = cri.GetRemotePort()
+      self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
+
+      # Forward local server's port to remote device's remote_port.
+      forwarder = cros_forwarder.CrOsForwarderFactory(cri).Create(
+          forwarders.PortPairs(http=forwarders.PortPair(port, remote_port),
+                               https=None,
+                               dns=None))
+
+      # At this point, remote device should be able to connect to local server.
+      self.assertTrue(cri.IsHTTPServerRunningOnPort(remote_port))
+
+      # Next remote port shouldn't be the same as remote_port, since remote_port
+      # is now in use.
+      self.assertTrue(cri.GetRemotePort() != remote_port)
+
+      # Close forwarder and local server ports.
+      forwarder.Close()
+      sock.close()
+
+      # Device should no longer be able to connect to remote_port since it is no
+      # longer in use.
+      self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
+
+  @decorators.Enabled('cros-chrome')
+  def testGetRemotePortReservedPorts(self):
+    with self._GetCRI() as cri:
+      # Should return 2 separate ports even though the first one isn't
+      # technically being used yet.
+      remote_port_1 = cri.GetRemotePort()
+      remote_port_2 = cri.GetRemotePort()
+
+      self.assertTrue(remote_port_1 != remote_port_2)
+
+  @decorators.Enabled('cros-chrome')
+  def testTakeScreenShot(self):
+    with self._GetCRI() as cri:
+
+      def _Cleanup():
+        cri.RmRF('/var/log/screenshots/test-prefix*')
+
+      _Cleanup()
+      cri.TakeScreenShot('test-prefix')
+      self.assertTrue(cri.FileExistsOnDevice(
+          '/var/log/screenshots/test-prefix-0.png'))
+      _Cleanup()
+
+  # TODO(tengs): It would be best if we can filter this test and other tests
+  # that need to be run locally based on the platform of the system browser.
+  @decorators.Enabled('linux')
+  def testEscapeCmdArguments(self):
+    """Commands and their arguments that are executed through the cros
+    interface should follow bash syntax. This test needs to run on remotely
+    and locally on the device to check for consistency.
+    """
+    options = options_for_unittests.GetCopy()
+    with cros_interface.CrOSInterface(options.cros_remote,
+                                      options.cros_remote_ssh_port,
+                                      options.cros_ssh_identity) as cri:
+
+      # Check arguments with no special characters
+      stdout, _ = cri.RunCmdOnDevice(['echo', '--arg1=value1', '--arg2=value2',
+                                      '--arg3="value3"'])
+      assert stdout.strip() == '--arg1=value1 --arg2=value2 --arg3=value3'
+
+      # Check argument with special characters escaped
+      stdout, _ = cri.RunCmdOnDevice(['echo', '--arg=A\\; echo \\"B\\"'])
+      assert stdout.strip() == '--arg=A; echo "B"'
+
+      # Check argument with special characters in quotes
+      stdout, _ = cri.RunCmdOnDevice(['echo', "--arg='$HOME;;$PATH'"])
+      assert stdout.strip() == "--arg=$HOME;;$PATH"
+
+  @decorators.Enabled('cros-chrome', 'linux')
+  @mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
+  def testTryLoginSuccess(self, mock_run_cmd):
+    mock_run_cmd.return_value = ('root\n', '')
+    cri = cros_interface.CrOSInterface(
+        "testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
+    cri.TryLogin()
+    mock_run_cmd.assert_called_once_with(['echo', '$USER'], quiet=True)
+
+  @decorators.Enabled('cros-chrome', 'linux')
+  @mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
+  def testTryLoginStderr(self, mock_run_cmd):
+    cri = cros_interface.CrOSInterface(
+        "testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
+
+    mock_run_cmd.return_value = ('', 'Host key verification failed')
+    self.assertRaises(cros_interface.LoginException, cri.TryLogin)
+    self.assertRaisesRegexp(cros_interface.LoginException,
+                            r'.*host key verification failed..*', cri.TryLogin)
+
+    mock_run_cmd.return_value = ('', 'Operation timed out')
+    self.assertRaisesRegexp(cros_interface.LoginException,
+                            r'Timed out while logging into.*', cri.TryLogin)
+
+    mock_run_cmd.return_value = ('', 'UNPROTECTED PRIVATE KEY FILE!')
+    self.assertRaisesRegexp(cros_interface.LoginException,
+                            r'Permissions for .* are too open. To fix this.*',
+                            cri.TryLogin)
+
+    mock_run_cmd.return_value = (
+        '', 'Permission denied (publickey,keyboard-interactive)')
+    self.assertRaisesRegexp(cros_interface.KeylessLoginRequiredException,
+                            r'Need to set up ssh auth for .*', cri.TryLogin)
+
+    mock_run_cmd.return_value = ('', 'Fallback error case')
+    self.assertRaisesRegexp(cros_interface.LoginException,
+                            r'While logging into .*, got .*', cri.TryLogin)
+
+    mock_run_cmd.return_value = ('', 'Could not resolve hostname')
+    self.assertRaisesRegexp(cros_interface.DNSFailureException,
+                            r'Unable to resolve the hostname for:.*',
+                            cri.TryLogin)
+
+  @decorators.Enabled('cros-chrome', 'linux')
+  @mock.patch.object(cros_interface.CrOSInterface, 'RunCmdOnDevice')
+  def testTryLoginStdout(self, mock_run_cmd):
+    mock_run_cmd.return_value = ('notrooot', '')
+    cri = cros_interface.CrOSInterface(
+        "testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
+    self.assertRaisesRegexp(cros_interface.LoginException,
+                            r'Logged into .*, expected \$USER=root, but got .*',
+                            cri.TryLogin)
diff --git a/catapult/telemetry/telemetry/core/discover.py b/catapult/telemetry/telemetry/core/discover.py
new file mode 100644
index 0000000..8b05a5f
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/discover.py
@@ -0,0 +1,149 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import inspect
+import os
+import re
+import sys
+
+from telemetry import decorators
+from telemetry.internal.util import camel_case
+from telemetry.internal.util import classes as classes_module
+
+
+@decorators.Cache
+def DiscoverModules(start_dir, top_level_dir, pattern='*'):
+  """Discover all modules in |start_dir| which match |pattern|.
+
+  Args:
+    start_dir: The directory to recursively search.
+    top_level_dir: The top level of the package, for importing.
+    pattern: Unix shell-style pattern for filtering the filenames to import.
+
+  Returns:
+    list of modules.
+  """
+  # start_dir and top_level_dir must be consistent with each other.
+  start_dir = os.path.realpath(start_dir)
+  top_level_dir = os.path.realpath(top_level_dir)
+
+  modules = []
+  for dir_path, _, filenames in os.walk(start_dir):
+    for filename in filenames:
+      # Filter out unwanted filenames.
+      if filename.startswith('.') or filename.startswith('_'):
+        continue
+      if os.path.splitext(filename)[1] != '.py':
+        continue
+      if not fnmatch.fnmatch(filename, pattern):
+        continue
+
+      # Find the module.
+      module_rel_path = os.path.relpath(
+          os.path.join(dir_path, filename), top_level_dir)
+      module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
+
+      # Import the module.
+      try:
+        # Make sure that top_level_dir is the first path in the sys.path in case
+        # there are naming conflict in module parts.
+        original_sys_path = sys.path[:]
+        sys.path.insert(0, top_level_dir)
+        module = __import__(module_name, fromlist=[True])
+        modules.append(module)
+      finally:
+        sys.path = original_sys_path
+  return modules
+
+
+# TODO(dtu): Normalize all discoverable classes to have corresponding module
+# and class names, then always index by class name.
+@decorators.Cache
+def DiscoverClasses(start_dir,
+                    top_level_dir,
+                    base_class,
+                    pattern='*',
+                    index_by_class_name=True,
+                    directly_constructable=False):
+  """Discover all classes in |start_dir| which subclass |base_class|.
+
+  Base classes that contain subclasses are ignored by default.
+
+  Args:
+    start_dir: The directory to recursively search.
+    top_level_dir: The top level of the package, for importing.
+    base_class: The base class to search for.
+    pattern: Unix shell-style pattern for filtering the filenames to import.
+    index_by_class_name: If True, use class name converted to
+        lowercase_with_underscores instead of module name in return dict keys.
+    directly_constructable: If True, will only return classes that can be
+        constructed without arguments
+
+  Returns:
+    dict of {module_name: class} or {underscored_class_name: class}
+  """
+  modules = DiscoverModules(start_dir, top_level_dir, pattern)
+  classes = {}
+  for module in modules:
+    new_classes = DiscoverClassesInModule(
+        module, base_class, index_by_class_name, directly_constructable)
+    classes = dict(classes.items() + new_classes.items())
+  return classes
+
+
+@decorators.Cache
+def DiscoverClassesInModule(module,
+                            base_class,
+                            index_by_class_name=False,
+                            directly_constructable=False):
+  """Discover all classes in |module| which subclass |base_class|.
+
+  Base classes that contain subclasses are ignored by default.
+
+  Args:
+    module: The module to search.
+    base_class: The base class to search for.
+    index_by_class_name: If True, use class name converted to
+        lowercase_with_underscores instead of module name in return dict keys.
+
+  Returns:
+    dict of {module_name: class} or {underscored_class_name: class}
+  """
+  classes = {}
+  for _, obj in inspect.getmembers(module):
+    # Ensure object is a class.
+    if not inspect.isclass(obj):
+      continue
+    # Include only subclasses of base_class.
+    if not issubclass(obj, base_class):
+      continue
+    # Exclude the base_class itself.
+    if obj is base_class:
+      continue
+    # Exclude protected or private classes.
+    if obj.__name__.startswith('_'):
+      continue
+    # Include only the module in which the class is defined.
+    # If a class is imported by another module, exclude those duplicates.
+    if obj.__module__ != module.__name__:
+      continue
+
+    if index_by_class_name:
+      key_name = camel_case.ToUnderscore(obj.__name__)
+    else:
+      key_name = module.__name__.split('.')[-1]
+    if (not directly_constructable or
+        classes_module.IsDirectlyConstructable(obj)):
+      classes[key_name] = obj
+
+  return classes
+
+
+_counter = [0]
+
+
+def _GetUniqueModuleName():
+  _counter[0] += 1
+  return "module_" + str(_counter[0])
diff --git a/catapult/telemetry/telemetry/core/discover_unittest.py b/catapult/telemetry/telemetry/core/discover_unittest.py
new file mode 100644
index 0000000..bb6785c
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/discover_unittest.py
@@ -0,0 +1,109 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry.core import discover
+from telemetry.core import util
+
+
+class DiscoverTest(unittest.TestCase):
+
+  def setUp(self):
+    self._base_dir = util.GetUnittestDataDir()
+    self._start_dir = os.path.join(self._base_dir, 'discoverable_classes')
+    self._base_class = Exception
+
+  def testDiscoverClassesWithIndexByModuleName(self):
+    classes = discover.DiscoverClasses(self._start_dir,
+                                       self._base_dir,
+                                       self._base_class,
+                                       index_by_class_name=False)
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1',
+        'discover_dummyclass': 'DummyException',
+        'parameter_discover_dummyclass': 'DummyExceptionWithParameterImpl2'
+    }
+    self.assertEqual(actual_classes, expected_classes)
+
+  def testDiscoverDirectlyConstructableClassesWithIndexByClassName(self):
+    classes = discover.DiscoverClasses(self._start_dir,
+                                       self._base_dir,
+                                       self._base_class,
+                                       directly_constructable=True)
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'dummy_exception': 'DummyException',
+        'dummy_exception_impl1': 'DummyExceptionImpl1',
+        'dummy_exception_impl2': 'DummyExceptionImpl2',
+    }
+    self.assertEqual(actual_classes, expected_classes)
+
+  def testDiscoverClassesWithIndexByClassName(self):
+    classes = discover.DiscoverClasses(self._start_dir, self._base_dir,
+                                       self._base_class)
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'dummy_exception': 'DummyException',
+        'dummy_exception_impl1': 'DummyExceptionImpl1',
+        'dummy_exception_impl2': 'DummyExceptionImpl2',
+        'dummy_exception_with_parameter_impl1':
+            'DummyExceptionWithParameterImpl1',
+        'dummy_exception_with_parameter_impl2':
+            'DummyExceptionWithParameterImpl2'
+    }
+    self.assertEqual(actual_classes, expected_classes)
+
+  def testDiscoverClassesWithPatternAndIndexByModule(self):
+    classes = discover.DiscoverClasses(self._start_dir,
+                                       self._base_dir,
+                                       self._base_class,
+                                       pattern='another*',
+                                       index_by_class_name=False)
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1'
+    }
+    self.assertEqual(actual_classes, expected_classes)
+
+  def testDiscoverDirectlyConstructableClassesWithPatternAndIndexByClassName(
+      self):
+    classes = discover.DiscoverClasses(self._start_dir,
+                                       self._base_dir,
+                                       self._base_class,
+                                       pattern='another*',
+                                       directly_constructable=True)
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'dummy_exception_impl1': 'DummyExceptionImpl1',
+        'dummy_exception_impl2': 'DummyExceptionImpl2',
+    }
+    self.assertEqual(actual_classes, expected_classes)
+
+  def testDiscoverClassesWithPatternAndIndexByClassName(self):
+    classes = discover.DiscoverClasses(self._start_dir,
+                                       self._base_dir,
+                                       self._base_class,
+                                       pattern='another*')
+
+    actual_classes = dict((name, cls.__name__)
+                          for name, cls in classes.iteritems())
+    expected_classes = {
+        'dummy_exception_impl1': 'DummyExceptionImpl1',
+        'dummy_exception_impl2': 'DummyExceptionImpl2',
+        'dummy_exception_with_parameter_impl1':
+            'DummyExceptionWithParameterImpl1',
+    }
+    self.assertEqual(actual_classes, expected_classes)
diff --git a/catapult/telemetry/telemetry/core/exceptions.py b/catapult/telemetry/telemetry/core/exceptions.py
new file mode 100644
index 0000000..b16efaa
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/exceptions.py
@@ -0,0 +1,156 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import sys
+
+
+class Error(Exception):
+  """Base class for Telemetry exceptions."""
+
+  def __init__(self, msg=''):
+    super(Error, self).__init__(msg)
+    self._debugging_messages = []
+
+  def AddDebuggingMessage(self, msg):
+    """Adds a message to the description of the exception.
+
+    Many Telemetry exceptions arise from failures in another application. These
+    failures are difficult to pinpoint. This method allows Telemetry classes to
+    append useful debugging information to the exception. This method also logs
+    information about the location from where it was called.
+    """
+    frame = sys._getframe(1)
+    line_number = frame.f_lineno
+    file_name = frame.f_code.co_filename
+    function_name = frame.f_code.co_name
+    call_site = '%s:%s %s' % (file_name, line_number, function_name)
+    annotated_message = '(%s) %s' % (call_site, msg)
+
+    self._debugging_messages.append(annotated_message)
+
+  def __str__(self):
+    divider = '\n' + '*' * 80 + '\n'
+    output = super(Error, self).__str__()
+    for message in self._debugging_messages:
+      output += divider
+      output += message
+    return output
+
+
+class PlatformError(Error):
+  """ Represents an exception thrown when constructing platform. """
+
+
+class TimeoutException(Error):
+  """The operation failed to complete because of a timeout.
+
+  It is possible that waiting for a longer period of time would result in a
+  successful operation.
+  """
+  pass
+
+
+class AppCrashException(Error):
+
+  def __init__(self, app=None, msg=''):
+    super(AppCrashException, self).__init__(msg)
+    self._msg = msg
+    self._stack_trace = []
+    self._app_stdout = []
+    if app:
+      try:
+        self._stack_trace = app.GetStackTrace().splitlines()
+      except Exception as err:
+        logging.error('Problem when trying to gather stack trace: %s' % err)
+      try:
+        self._app_stdout = app.GetStandardOutput().splitlines()
+      except Exception as err:
+        logging.error('Problem when trying to gather standard output: %s' % err)
+
+  @property
+  def stack_trace(self):
+    return self._stack_trace
+
+  def __str__(self):
+    divider = '*' * 80
+    debug_messages = []
+    debug_messages.append(super(AppCrashException, self).__str__())
+    debug_messages.append('Stack Trace:')
+    debug_messages.append(divider)
+    debug_messages.extend(('\t%s' % l) for l in self._stack_trace)
+    debug_messages.append(divider)
+    debug_messages.append('Standard output:')
+    debug_messages.append(divider)
+    debug_messages.extend(('\t%s' % l) for l in self._app_stdout)
+    debug_messages.append(divider)
+    return '\n'.join(debug_messages)
+
+
+class DevtoolsTargetCrashException(AppCrashException):
+  """Represents a crash of the current devtools target but not the overall app.
+
+  This can be a tab or a WebView. In this state, the tab/WebView is
+  gone, but the underlying browser is still alive.
+  """
+
+  def __init__(self, app, msg='Devtools target crashed'):
+    super(DevtoolsTargetCrashException, self).__init__(app, msg)
+
+
+class BrowserGoneException(AppCrashException):
+  """Represents a crash of the entire browser.
+
+  In this state, all bets are pretty much off."""
+
+  def __init__(self, app, msg='Browser crashed'):
+    super(BrowserGoneException, self).__init__(app, msg)
+
+
+class BrowserConnectionGoneException(BrowserGoneException):
+  """Represents a browser that still exists but cannot be reached."""
+
+  def __init__(self, app, msg='Browser exists but the connection is gone'):
+    super(BrowserConnectionGoneException, self).__init__(app, msg)
+
+
+class ProcessGoneException(Error):
+  """Represents a process that no longer exists for an unknown reason."""
+
+
+class IntentionalException(Error):
+  """Represent an exception raised by a unittest which is not printed."""
+
+
+class InitializationError(Error):
+
+  def __init__(self, string):
+    super(InitializationError, self).__init__(string)
+
+
+class LoginException(Error):
+  pass
+
+
+class EvaluateException(Error):
+  pass
+
+
+class ProfilingException(Error):
+  pass
+
+
+class PathMissingError(Error):
+  """ Represents an exception thrown when an expected path doesn't exist. """
+
+
+class UnknownPackageError(Error):
+  """ Represents an exception when encountering an unsupported Android APK. """
+
+
+class PackageDetectionError(Error):
+  """ Represents an error when parsing an Android APK's package. """
+
+
+class AndroidDeviceParsingError(Error):
+  """Represents an error when parsing output from an android device"""
diff --git a/catapult/telemetry/telemetry/core/local_server.py b/catapult/telemetry/telemetry/core/local_server.py
new file mode 100644
index 0000000..5c20c45
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/local_server.py
@@ -0,0 +1,224 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(aiolos): this should be moved to catapult/base after the repo move.
+# It is used by tracing in tvcm/browser_controller.
+import collections
+import json
+import os
+import re
+import subprocess
+import sys
+
+from telemetry.core import util
+from telemetry.internal import forwarders
+
+NamedPort = collections.namedtuple('NamedPort', ['name', 'port'])
+
+
+class LocalServerBackend(object):
+
+  def __init__(self):
+    pass
+
+  def StartAndGetNamedPorts(self, args):
+    """Starts the actual server and obtains any sockets on which it
+    should listen.
+
+    Returns a list of NamedPort on which this backend is listening.
+    """
+    raise NotImplementedError()
+
+  def ServeForever(self):
+    raise NotImplementedError()
+
+
+class LocalServer(object):
+
+  def __init__(self, server_backend_class):
+    assert LocalServerBackend in server_backend_class.__bases__
+    server_module_name = server_backend_class.__module__
+    assert server_module_name in sys.modules, \
+        'The server class\' module must be findable via sys.modules'
+    assert getattr(sys.modules[server_module_name],
+                   server_backend_class.__name__), \
+        'The server class must getattrable from its __module__ by its __name__'
+
+    self._server_backend_class = server_backend_class
+    self._subprocess = None
+    self._devnull = None
+    self._local_server_controller = None
+    self.forwarder = None
+    self.host_ip = None
+
+  def Start(self, local_server_controller):
+    assert self._subprocess == None
+    self._local_server_controller = local_server_controller
+
+    self.host_ip = local_server_controller.host_ip
+
+    server_args = self.GetBackendStartupArgs()
+    server_args_as_json = json.dumps(server_args)
+    server_module_name = self._server_backend_class.__module__
+
+    self._devnull = open(os.devnull, 'w')
+    cmd = [
+        sys.executable,
+        '-m',
+        __name__,
+        'run_backend',
+        server_module_name,
+        self._server_backend_class.__name__,
+        server_args_as_json,
+    ]
+
+    env = os.environ.copy()
+    env['PYTHONPATH'] = os.pathsep.join(sys.path)
+
+    self._subprocess = subprocess.Popen(cmd,
+                                        cwd=util.GetTelemetryDir(),
+                                        env=env,
+                                        stdout=subprocess.PIPE)
+
+    named_ports = self._GetNamedPortsFromBackend()
+    named_port_pair_map = {'http': None, 'https': None, 'dns': None}
+    for name, port in named_ports:
+      assert name in named_port_pair_map, '%s forwarding is unsupported' % name
+      named_port_pair_map[name] = (forwarders.PortPair(
+          port, local_server_controller.GetRemotePort(port)))
+    self.forwarder = local_server_controller.CreateForwarder(
+        forwarders.PortPairs(**named_port_pair_map))
+
+  def _GetNamedPortsFromBackend(self):
+    named_ports_json = None
+    named_ports_re = re.compile('LocalServerBackend started: (?P<port>.+)')
+    # TODO: This will hang if the subprocess doesn't print the correct output.
+    while self._subprocess.poll() == None:
+      m = named_ports_re.match(self._subprocess.stdout.readline())
+      if m:
+        named_ports_json = m.group('port')
+        break
+
+    if not named_ports_json:
+      raise Exception('Server process died prematurely ' +
+                      'without giving us port pairs.')
+    return [NamedPort(**pair) for pair in json.loads(named_ports_json.lower())]
+
+  @property
+  def is_running(self):
+    return self._subprocess != None
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, *args):
+    self.Close()
+
+  def __del__(self):
+    self.Close()
+
+  def Close(self):
+    if self.forwarder:
+      self.forwarder.Close()
+      self.forwarder = None
+    if self._subprocess:
+      # TODO(tonyg): Should this block until it goes away?
+      self._subprocess.kill()
+      self._subprocess = None
+    if self._devnull:
+      self._devnull.close()
+      self._devnull = None
+    if self._local_server_controller:
+      self._local_server_controller.ServerDidClose(self)
+      self._local_server_controller = None
+
+  def GetBackendStartupArgs(self):
+    """Returns whatever arguments are required to start up the backend"""
+    raise NotImplementedError()
+
+
+class LocalServerController(object):
+  """Manages the list of running servers
+
+  This class manages the running servers, but also provides an isolation layer
+  to prevent LocalServer subclasses from accessing the browser backend directly.
+
+  """
+
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+    self._local_servers_by_class = {}
+    self.host_ip = self._platform_backend.forwarder_factory.host_ip
+
+  def StartServer(self, server):
+    assert not server.is_running, 'Server already started'
+    assert isinstance(server, LocalServer)
+    if server.__class__ in self._local_servers_by_class:
+      raise Exception(
+          'Canont have two servers of the same class running at once. ' +
+          'Locate the existing one and use it, or call Close() on it.')
+
+    server.Start(self)
+    self._local_servers_by_class[server.__class__] = server
+
+  def GetRunningServer(self, server_class, default_value):
+    return self._local_servers_by_class.get(server_class, default_value)
+
+  @property
+  def local_servers(self):
+    return self._local_servers_by_class.values()
+
+  def Close(self):
+    while len(self._local_servers_by_class):
+      server = self._local_servers_by_class.itervalues().next()
+      try:
+        server.Close()
+      except Exception:
+        import traceback
+        traceback.print_exc()
+
+  def CreateForwarder(self, port_pairs):
+    return self._platform_backend.forwarder_factory.Create(port_pairs)
+
+  def GetRemotePort(self, port):
+    return self._platform_backend.GetRemotePort(port)
+
+  def ServerDidClose(self, server):
+    del self._local_servers_by_class[server.__class__]
+
+
+def _LocalServerBackendMain(args):
+  assert len(args) == 4
+  (cmd, server_module_name, server_backend_class_name,
+   server_args_as_json) = args[:4]
+  assert cmd == 'run_backend'
+  server_module = __import__(server_module_name, fromlist=[True])
+  server_backend_class = getattr(server_module, server_backend_class_name)
+  server = server_backend_class()
+
+  server_args = json.loads(server_args_as_json)
+
+  named_ports = server.StartAndGetNamedPorts(server_args)
+  assert isinstance(named_ports, list)
+  for named_port in named_ports:
+    assert isinstance(named_port, NamedPort)
+
+  # Note: This message is scraped by the parent process'
+  # _GetNamedPortsFromBackend(). Do **not** change it.
+  # pylint: disable=protected-access
+  print 'LocalServerBackend started: %s' % json.dumps([pair._asdict()
+                                                       for pair in named_ports])
+  sys.stdout.flush()
+
+  return server.ServeForever()
+
+
+if __name__ == '__main__':
+  # This trick is needed because local_server.NamedPort is not the
+  # same as sys.modules['__main__'].NamedPort. The module itself is loaded
+  # twice, basically.
+  from telemetry.core import local_server  # pylint: disable=import-self
+  sys.exit(
+      local_server._LocalServerBackendMain(  # pylint: disable=protected-access
+          sys.argv[1:]))
diff --git a/catapult/telemetry/telemetry/core/local_server_unittest.py b/catapult/telemetry/telemetry/core/local_server_unittest.py
new file mode 100644
index 0000000..46ff85c
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/local_server_unittest.py
@@ -0,0 +1,89 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import BaseHTTPServer
+import SimpleHTTPServer
+
+from telemetry import decorators
+from telemetry.core import local_server
+from telemetry.testing import tab_test_case
+
+
+class SimpleLocalServerBackendRequestHandler(
+    SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+  def do_GET(self):
+    msg = """<!DOCTYPE html>
+<html>
+<body>
+hello world
+</body>
+"""
+
+    self.send_response(200)
+    self.send_header('Content-Type', 'text/html')
+    self.send_header('Content-Length', len(msg))
+    self.end_headers()
+    self.wfile.write(msg)
+
+  def log_request(self, code='-', size='-'):
+    pass
+
+
+class SimpleLocalServerBackend(BaseHTTPServer.HTTPServer,
+                               local_server.LocalServerBackend):
+
+  def __init__(self):
+    BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0),
+                                       SimpleLocalServerBackendRequestHandler)
+    local_server.LocalServerBackend.__init__(self)
+
+  def StartAndGetNamedPorts(self, args):
+    assert 'hello' in args
+    assert args['hello'] == 'world'
+    return [local_server.NamedPort('http', self.server_address[1])]
+
+  def ServeForever(self):
+    self.serve_forever()
+
+
+class SimpleLocalServer(local_server.LocalServer):
+
+  def __init__(self):
+    super(SimpleLocalServer, self).__init__(SimpleLocalServerBackend)
+
+  def GetBackendStartupArgs(self):
+    return {'hello': 'world'}
+
+  @property
+  def url(self):
+    return self.forwarder.url + '/'
+
+
+class LocalServerUnittest(tab_test_case.TabTestCase):
+
+  @classmethod
+  def setUpClass(cls):
+    super(LocalServerUnittest, cls).setUpClass()
+    cls._server = SimpleLocalServer()
+    cls._platform.StartLocalServer(cls._server)
+
+  @decorators.Disabled('all') # https://crbug.com/570955
+  def testLocalServer(self):
+    self.assertTrue(self._server in self._platform.local_servers)
+    self._tab.Navigate(self._server.url)
+    self._tab.WaitForDocumentReadyStateToBeComplete()
+    body_text = self._tab.EvaluateJavaScript('document.body.textContent')
+    body_text = body_text.strip()
+    self.assertEquals('hello world', body_text)
+
+  @decorators.Disabled('all') # https://crbug.com/570955
+  def testStartingAndRestarting(self):
+    server2 = SimpleLocalServer()
+    self.assertRaises(Exception,
+                      lambda: self._platform.StartLocalServer(server2))
+
+    self._server.Close()
+    self.assertTrue(self._server not in self._platform.local_servers)
+
+    self._platform.StartLocalServer(server2)
diff --git a/catapult/telemetry/telemetry/core/memory_cache_http_server.py b/catapult/telemetry/telemetry/core/memory_cache_http_server.py
new file mode 100644
index 0000000..a346eda
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/memory_cache_http_server.py
@@ -0,0 +1,276 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import BaseHTTPServer
+from collections import namedtuple
+import errno
+import gzip
+import mimetypes
+import os
+import SimpleHTTPServer
+import socket
+import SocketServer
+import StringIO
+import sys
+import urlparse
+
+from telemetry.core import local_server
+
+ByteRange = namedtuple('ByteRange', ['from_byte', 'to_byte'])
+ResourceAndRange = namedtuple('ResourceAndRange', ['resource', 'byte_range'])
+
+
+class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+  protocol_version = 'HTTP/1.1'  # override BaseHTTPServer setting
+  wbufsize = -1  # override StreamRequestHandler (a base class) setting
+
+  def handle(self):
+    try:
+      BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
+    except socket.error as e:
+      # Connection reset errors happen all the time due to the browser closing
+      # without terminating the connection properly.  They can be safely
+      # ignored.
+      if e[0] != errno.ECONNRESET:
+        raise
+
+  def do_GET(self):
+    """Serve a GET request."""
+    resource_range = self.SendHead()
+
+    if not resource_range or not resource_range.resource:
+      return
+    response = resource_range.resource['response']
+
+    if not resource_range.byte_range:
+      self.wfile.write(response)
+      return
+
+    start_index = resource_range.byte_range.from_byte
+    end_index = resource_range.byte_range.to_byte
+    self.wfile.write(response[start_index:end_index + 1])
+
+  def do_HEAD(self):
+    """Serve a HEAD request."""
+    self.SendHead()
+
+  def log_error(self, fmt, *args):
+    pass
+
+  def log_request(self, code='-', size='-'):
+    # Don't spam the console unless it is important.
+    pass
+
+  def SendHead(self):
+    path = os.path.realpath(self.translate_path(self.path))
+    if path not in self.server.resource_map:
+      self.send_error(404, 'File not found')
+      return None
+
+    resource = self.server.resource_map[path]
+    total_num_of_bytes = resource['content-length']
+    byte_range = self.GetByteRange(total_num_of_bytes)
+    if byte_range:
+      # request specified a range, so set response code to 206.
+      self.send_response(206)
+      self.send_header('Content-Range', 'bytes %d-%d/%d' %
+                       (byte_range.from_byte, byte_range.to_byte,
+                        total_num_of_bytes))
+      total_num_of_bytes = byte_range.to_byte - byte_range.from_byte + 1
+    else:
+      self.send_response(200)
+
+    self.send_header('Content-Length', str(total_num_of_bytes))
+    self.send_header('Content-Type', resource['content-type'])
+    self.send_header('Last-Modified',
+                     self.date_time_string(resource['last-modified']))
+    if resource['zipped']:
+      self.send_header('Content-Encoding', 'gzip')
+    self.end_headers()
+    return ResourceAndRange(resource, byte_range)
+
+  def GetByteRange(self, total_num_of_bytes):
+    """Parse the header and get the range values specified.
+
+    Args:
+      total_num_of_bytes: Total # of bytes in requested resource,
+      used to calculate upper range limit.
+    Returns:
+      A ByteRange namedtuple object with the requested byte-range values.
+      If no Range is explicitly requested or there is a failure parsing,
+      return None.
+      If range specified is in the format "N-", return N-END. Refer to
+      http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for details.
+      If upper range limit is greater than total # of bytes, return upper index.
+    """
+
+    range_header = self.headers.getheader('Range')
+    if range_header is None:
+      return None
+    if not range_header.startswith('bytes='):
+      return None
+
+    # The range header is expected to be a string in this format:
+    # bytes=0-1
+    # Get the upper and lower limits of the specified byte-range.
+    # We've already confirmed that range_header starts with 'bytes='.
+    byte_range_values = range_header[len('bytes='):].split('-')
+    from_byte = 0
+    to_byte = 0
+
+    if len(byte_range_values) == 2:
+      # If to_range is not defined return all bytes starting from from_byte.
+      to_byte = (int(byte_range_values[1]) if byte_range_values[1] else
+                 total_num_of_bytes - 1)
+      # If from_range is not defined return last 'to_byte' bytes.
+      from_byte = (int(byte_range_values[0]) if byte_range_values[0] else
+                   total_num_of_bytes - to_byte)
+    else:
+      return None
+
+    # Do some validation.
+    if from_byte < 0:
+      return None
+
+    # Make to_byte the end byte by default in edge cases.
+    if to_byte < from_byte or to_byte >= total_num_of_bytes:
+      to_byte = total_num_of_bytes - 1
+
+    return ByteRange(from_byte, to_byte)
+
+
+class _MemoryCacheHTTPServerImpl(SocketServer.ThreadingMixIn,
+                                 BaseHTTPServer.HTTPServer):
+  # Increase the request queue size. The default value, 5, is set in
+  # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer).
+  # Since we're intercepting many domains through this single server,
+  # it is quite possible to get more than 5 concurrent requests.
+  request_queue_size = 128
+
+  # Don't prevent python from exiting when there is thread activity.
+  daemon_threads = True
+
+  def __init__(self, host_port, handler, paths):
+    BaseHTTPServer.HTTPServer.__init__(self, host_port, handler)
+    self.resource_map = {}
+    for path in paths:
+      if os.path.isdir(path):
+        self.AddDirectoryToResourceMap(path)
+      else:
+        self.AddFileToResourceMap(path)
+
+  def AddDirectoryToResourceMap(self, directory_path):
+    """Loads all files in directory_path into the in-memory resource map."""
+    for root, dirs, files in os.walk(directory_path):
+      # Skip hidden files and folders (like .svn and .git).
+      files = [f for f in files if f[0] != '.']
+      dirs[:] = [d for d in dirs if d[0] != '.']
+
+      for f in files:
+        file_path = os.path.join(root, f)
+        if not os.path.exists(file_path):  # Allow for '.#' files
+          continue
+        self.AddFileToResourceMap(file_path)
+
+  def AddFileToResourceMap(self, file_path):
+    """Loads file_path into the in-memory resource map."""
+    file_path = os.path.realpath(file_path)
+    if file_path in self.resource_map:
+      return
+
+    with open(file_path, 'rb') as fd:
+      response = fd.read()
+      fs = os.fstat(fd.fileno())
+    content_type = mimetypes.guess_type(file_path)[0]
+    zipped = False
+    if content_type in ['text/html', 'text/css', 'application/javascript']:
+      zipped = True
+      sio = StringIO.StringIO()
+      gzf = gzip.GzipFile(fileobj=sio, compresslevel=9, mode='wb')
+      gzf.write(response)
+      gzf.close()
+      response = sio.getvalue()
+      sio.close()
+    self.resource_map[file_path] = {
+        'content-type': content_type,
+        'content-length': len(response),
+        'last-modified': fs.st_mtime,
+        'response': response,
+        'zipped': zipped
+    }
+
+    index = 'index.html'
+    if os.path.basename(file_path) == index:
+      dir_path = os.path.dirname(file_path)
+      self.resource_map[dir_path] = self.resource_map[file_path]
+
+
+class MemoryCacheHTTPServerBackend(local_server.LocalServerBackend):
+
+  def __init__(self):
+    super(MemoryCacheHTTPServerBackend, self).__init__()
+    self._httpd = None
+
+  def StartAndGetNamedPorts(self, args):
+    base_dir = args['base_dir']
+    os.chdir(base_dir)
+
+    paths = args['paths']
+    for path in paths:
+      if not os.path.realpath(path).startswith(os.path.realpath(os.getcwd())):
+        print >> sys.stderr, '"%s" is not under the cwd.' % path
+        sys.exit(1)
+
+    server_address = (args['host'], args['port'])
+    MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.1'
+    self._httpd = _MemoryCacheHTTPServerImpl(
+        server_address, MemoryCacheHTTPRequestHandler, paths)
+    return [local_server.NamedPort('http', self._httpd.server_address[1])]
+
+  def ServeForever(self):
+    return self._httpd.serve_forever()
+
+
+class MemoryCacheHTTPServer(local_server.LocalServer):
+
+  def __init__(self, paths):
+    super(MemoryCacheHTTPServer, self).__init__(MemoryCacheHTTPServerBackend)
+    self._base_dir = None
+
+    for path in paths:
+      assert os.path.exists(path), '%s does not exist.' % path
+
+    paths = list(paths)
+    self._paths = paths
+
+    self._paths_as_set = set(map(os.path.realpath, paths))
+
+    common_prefix = os.path.commonprefix(paths)
+    if os.path.isdir(common_prefix):
+      self._base_dir = common_prefix
+    else:
+      self._base_dir = os.path.dirname(common_prefix)
+
+  def GetBackendStartupArgs(self):
+    return {'base_dir': self._base_dir,
+            'paths': self._paths,
+            'host': self.host_ip,
+            'port': 0}
+
+  @property
+  def paths(self):
+    return self._paths_as_set
+
+  @property
+  def url(self):
+    return self.forwarder.url
+
+  def UrlOf(self, path):
+    relative_path = os.path.relpath(path, self._base_dir)
+    # Preserve trailing slash or backslash.
+    # It doesn't matter in a file path, but it does matter in a URL.
+    if path.endswith(os.sep) or (os.altsep and path.endswith(os.altsep)):
+      relative_path += '/'
+    return urlparse.urljoin(self.url, relative_path.replace(os.sep, '/'))
diff --git a/catapult/telemetry/telemetry/core/memory_cache_http_server_unittest.py b/catapult/telemetry/telemetry/core/memory_cache_http_server_unittest.py
new file mode 100644
index 0000000..32ea3ca
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/memory_cache_http_server_unittest.py
@@ -0,0 +1,66 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.core import util
+from telemetry.testing import tab_test_case
+
+
+class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    super(MemoryCacheHTTPServerTest, self).setUp()
+    self._test_filename = 'bear.webm'
+    _test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
+    self._test_file_size = os.stat(_test_file).st_size
+
+  def testBasicHostingAndRangeRequests(self):
+    self.Navigate('blank.html')
+    x = self._tab.EvaluateJavaScript('document.body.innerHTML')
+    x = x.strip()
+
+    # Test basic html hosting.
+    self.assertEquals(x, 'Hello world')
+
+    file_size = self._test_file_size
+    last_byte = file_size - 1
+    # Test byte range request: no end byte.
+    self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
+
+    # Test byte range request: greater than zero start byte.
+    self.CheckContentHeaders('100-', '100-%d' % last_byte, file_size - 100)
+
+    # Test byte range request: explicit byte range.
+    self.CheckContentHeaders('2-500', '2-500', '499')
+
+    # Test byte range request: no start byte.
+    self.CheckContentHeaders('-228', '%d-%d' % (file_size - 228, last_byte),
+                             '228')
+
+    # Test byte range request: end byte less than start byte.
+    self.CheckContentHeaders('100-5', '100-%d' % last_byte, file_size - 100)
+
+  def CheckContentHeaders(self, content_range_request, content_range_response,
+                          content_length_response):
+    self._tab.ExecuteJavaScript("""
+        var loaded = false;
+        var xmlhttp = new XMLHttpRequest();
+        xmlhttp.onload = function(e) {
+          loaded = true;
+        };
+        // Avoid cached content by appending unique URL param.
+        xmlhttp.open('GET', "%s?t=" + Date.now(), true);
+        xmlhttp.setRequestHeader('Range', 'bytes=%s');
+        xmlhttp.send();
+    """ % (self.UrlOfUnittestFile(self._test_filename), content_range_request))
+    self._tab.WaitForJavaScriptExpression('loaded', 5)
+    content_range = self._tab.EvaluateJavaScript(
+        'xmlhttp.getResponseHeader("Content-Range");')
+    content_range_response = 'bytes %s/%d' % (content_range_response,
+                                              self._test_file_size)
+    self.assertEquals(content_range, content_range_response)
+    content_length = self._tab.EvaluateJavaScript(
+        'xmlhttp.getResponseHeader("Content-Length");')
+    self.assertEquals(content_length, str(content_length_response))
diff --git a/catapult/telemetry/telemetry/core/network_controller.py b/catapult/telemetry/telemetry/core/network_controller.py
new file mode 100644
index 0000000..88244e1
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/network_controller.py
@@ -0,0 +1,30 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class NetworkController(object):
+  """Control network settings and servers to simulate the Web.
+
+  Network changes include forwarding device ports to host platform ports.
+  Web Page Replay is used to record and replay HTTP/HTTPS responses.
+  """
+
+  def __init__(self, network_controller_backend):
+    self._network_controller_backend = network_controller_backend
+
+  def Open(self, wpr_mode, extra_wpr_args):
+    self._network_controller_backend.Open(wpr_mode, extra_wpr_args)
+
+  @property
+  def is_open(self):
+    return self._network_controller_backend.is_open
+
+  def Close(self):
+    self._network_controller_backend.Close()
+
+  def StartReplay(self, archive_path, make_javascript_deterministic=False):
+    self._network_controller_backend.StartReplay(
+        archive_path, make_javascript_deterministic)
+
+  def StopReplay(self):
+    self._network_controller_backend.StopReplay()
diff --git a/catapult/telemetry/telemetry/core/os_version.py b/catapult/telemetry/telemetry/core/os_version.py
new file mode 100644
index 0000000..bc7bd44
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/os_version.py
@@ -0,0 +1,39 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+
+class OSVersion(str):
+  def __new__(cls, friendly_name, sortable_name):
+    version = str.__new__(cls, friendly_name)
+    version._sortable_name = sortable_name
+    return version
+
+  def __lt__(self, other):
+    return self._sortable_name < other._sortable_name
+
+  def __gt__(self, other):
+    return self._sortable_name > other._sortable_name
+
+  def __le__(self, other):
+    return self._sortable_name <= other._sortable_name
+
+  def __ge__(self, other):
+    return self._sortable_name >= other._sortable_name
+
+
+XP = OSVersion('xp', 5.1)
+VISTA = OSVersion('vista', 6.0)
+WIN7 = OSVersion('win7', 6.1)
+WIN8 = OSVersion('win8', 6.2)
+WIN10 = OSVersion('win10', 10)
+
+LEOPARD = OSVersion('leopard', 105)
+SNOWLEOPARD = OSVersion('snowleopard', 106)
+LION = OSVersion('lion', 107)
+MOUNTAINLION = OSVersion('mountainlion', 108)
+MAVERICKS = OSVersion('mavericks', 109)
+YOSEMITE = OSVersion('yosemite', 1010)
+ELCAPITAN = OSVersion('elcapitan', 1011)
diff --git a/catapult/telemetry/telemetry/core/platform.py b/catapult/telemetry/telemetry/core/platform.py
new file mode 100644
index 0000000..9aa4c81
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/platform.py
@@ -0,0 +1,400 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging as real_logging
+import os
+import sys
+
+from telemetry.core import discover
+from telemetry.core import local_server
+from telemetry.core import memory_cache_http_server
+from telemetry.core import network_controller
+from telemetry.core import tracing_controller
+from telemetry.core import util
+from telemetry.internal.platform import (platform_backend as
+                                         platform_backend_module)
+
+_host_platform = None
+# Remote platform is a dictionary from device ids to remote platform instances.
+_remote_platforms = {}
+
+
+def _InitHostPlatformIfNeeded():
+  global _host_platform
+  if _host_platform:
+    return
+  backend = None
+  backends = _IterAllPlatformBackendClasses()
+  for platform_backend_class in backends:
+    if platform_backend_class.IsPlatformBackendForHost():
+      backend = platform_backend_class()
+      break
+  if not backend:
+    raise NotImplementedError()
+  _host_platform = Platform(backend)
+
+
+def GetHostPlatform():
+  _InitHostPlatformIfNeeded()
+  return _host_platform
+
+
+def _IterAllPlatformBackendClasses():
+  platform_dir = os.path.dirname(os.path.realpath(
+      platform_backend_module.__file__))
+  return discover.DiscoverClasses(
+      platform_dir, util.GetTelemetryDir(),
+      platform_backend_module.PlatformBackend).itervalues()
+
+
+def GetPlatformForDevice(device, finder_options, logging=real_logging):
+  """ Returns a platform instance for the device.
+    Args:
+      device: a device.Device instance.
+  """
+  if device.guid in _remote_platforms:
+    return _remote_platforms[device.guid]
+  try:
+    for platform_backend_class in _IterAllPlatformBackendClasses():
+      if platform_backend_class.SupportsDevice(device):
+        _remote_platforms[device.guid] = (
+            platform_backend_class.CreatePlatformForDevice(device,
+                                                           finder_options))
+        return _remote_platforms[device.guid]
+    return None
+  except Exception:
+    current_exception = sys.exc_info()
+    logging.error('Fail to create platform instance for %s.', device.name)
+    raise current_exception[0], current_exception[1], current_exception[2]
+
+
+class Platform(object):
+  """The platform that the target browser is running on.
+
+  Provides a limited interface to interact with the platform itself, where
+  possible. It's important to note that platforms may not provide a specific
+  API, so check with IsFooBar() for availability.
+  """
+
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+    self._platform_backend.InitPlatformBackend()
+    self._platform_backend.SetPlatform(self)
+    self._network_controller = network_controller.NetworkController(
+        self._platform_backend.network_controller_backend)
+    self._tracing_controller = tracing_controller.TracingController(
+        self._platform_backend.tracing_controller_backend)
+    self._local_server_controller = local_server.LocalServerController(
+        self._platform_backend)
+    self._is_monitoring_power = False
+
+  @property
+  def is_host_platform(self):
+    return self == GetHostPlatform()
+
+  @property
+  def network_controller(self):
+    """Control network settings and servers to simulate the Web."""
+    return self._network_controller
+
+  @property
+  def tracing_controller(self):
+    return self._tracing_controller
+
+  def CanMonitorThermalThrottling(self):
+    """Platforms may be able to detect thermal throttling.
+
+    Some fan-less computers go into a reduced performance mode when their heat
+    exceeds a certain threshold. Performance tests in particular should use this
+    API to detect if this has happened and interpret results accordingly.
+    """
+    return self._platform_backend.CanMonitorThermalThrottling()
+
+  def IsThermallyThrottled(self):
+    """Returns True if the device is currently thermally throttled."""
+    return self._platform_backend.IsThermallyThrottled()
+
+  def HasBeenThermallyThrottled(self):
+    """Returns True if the device has been thermally throttled."""
+    return self._platform_backend.HasBeenThermallyThrottled()
+
+  def GetDeviceTypeName(self):
+    """Returns a string description of the Platform device, or None.
+
+    Examples: Nexus 7, Nexus 6, Desktop"""
+    return self._platform_backend.GetDeviceTypeName()
+
+  def GetArchName(self):
+    """Returns a string description of the Platform architecture.
+
+    Examples: x86_64 (posix), AMD64 (win), armeabi-v7a, x86"""
+    return self._platform_backend.GetArchName()
+
+  def GetOSName(self):
+    """Returns a string description of the Platform OS.
+
+    Examples: WIN, MAC, LINUX, CHROMEOS"""
+    return self._platform_backend.GetOSName()
+
+  def GetOSVersionName(self):
+    """Returns a logically sortable, string-like description of the Platform OS
+    version.
+
+    Examples: VISTA, WIN7, LION, MOUNTAINLION"""
+    return self._platform_backend.GetOSVersionName()
+
+  def GetOSVersionNumber(self):
+    """Returns an integer description of the Platform OS major version.
+
+    Examples: On Mac, 13 for Mavericks, 14 for Yosemite."""
+    return self._platform_backend.GetOSVersionNumber()
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    """Returns true if the disk cache can be flushed for specific files."""
+    return self._platform_backend.CanFlushIndividualFilesFromSystemCache()
+
+  def FlushEntireSystemCache(self):
+    """Flushes the OS's file cache completely.
+
+    This function may require root or administrator access."""
+    return self._platform_backend.FlushEntireSystemCache()
+
+  def FlushSystemCacheForDirectory(self, directory):
+    """Flushes the OS's file cache for the specified directory.
+
+    This function does not require root or administrator access."""
+    return self._platform_backend.FlushSystemCacheForDirectory(directory)
+
+  def FlushDnsCache(self):
+    """Flushes the OS's DNS cache completely.
+
+    This function may require root or administrator access."""
+    return self._platform_backend.FlushDnsCache()
+
+  def LaunchApplication(self,
+                        application,
+                        parameters=None,
+                        elevate_privilege=False):
+    """"Launches the given |application| with a list of |parameters| on the OS.
+
+    Set |elevate_privilege| to launch the application with root or admin rights.
+
+    Returns:
+      A popen style process handle for host platforms.
+    """
+    return self._platform_backend.LaunchApplication(
+        application,
+        parameters,
+        elevate_privilege=elevate_privilege)
+
+  def IsApplicationRunning(self, application):
+    """Returns whether an application is currently running."""
+    return self._platform_backend.IsApplicationRunning(application)
+
+  def CanLaunchApplication(self, application):
+    """Returns whether the platform can launch the given application."""
+    return self._platform_backend.CanLaunchApplication(application)
+
+  def InstallApplication(self, application):
+    """Installs the given application."""
+    return self._platform_backend.InstallApplication(application)
+
+  def CanCaptureVideo(self):
+    """Returns a bool indicating whether the platform supports video capture."""
+    return self._platform_backend.CanCaptureVideo()
+
+  def StartVideoCapture(self, min_bitrate_mbps):
+    """Starts capturing video.
+
+    Outer framing may be included (from the OS, browser window, and webcam).
+
+    Args:
+      min_bitrate_mbps: The minimum capture bitrate in MegaBits Per Second.
+          The platform is free to deliver a higher bitrate if it can do so
+          without increasing overhead.
+
+    Raises:
+      ValueError if the required |min_bitrate_mbps| can't be achieved.
+    """
+    return self._platform_backend.StartVideoCapture(min_bitrate_mbps)
+
+  def StopVideoCapture(self):
+    """Stops capturing video.
+
+    Returns:
+      A telemetry.core.video.Video object.
+    """
+    return self._platform_backend.StopVideoCapture()
+
+  def CanMonitorPower(self):
+    """Returns True iff power can be monitored asynchronously via
+    StartMonitoringPower() and StopMonitoringPower().
+    """
+    return self._platform_backend.CanMonitorPower()
+
+  def CanMeasurePerApplicationPower(self):
+    """Returns True if the power monitor can measure power for the target
+    application in isolation. False if power measurement is for full system
+    energy consumption."""
+    return self._platform_backend.CanMeasurePerApplicationPower()
+
+  def StartMonitoringPower(self, browser):
+    """Starts monitoring power utilization statistics.
+
+    Args:
+      browser: The browser to monitor.
+    """
+    assert self._platform_backend.CanMonitorPower()
+    self._platform_backend.StartMonitoringPower(browser)
+    self._is_monitoring_power = True
+
+  def StopMonitoringPower(self):
+    """Stops monitoring power utilization and returns stats
+
+    Returns:
+      None if power measurement failed for some reason, otherwise a dict of
+      power utilization statistics containing: {
+        # An identifier for the data provider. Allows to evaluate the precision
+        # of the data. Example values: monsoon, powermetrics, ds2784
+        'identifier': identifier,
+
+        # The instantaneous power (voltage * current) reading in milliwatts at
+        # each sample.
+        'power_samples_mw':  [mw0, mw1, ..., mwN],
+
+        # The full system energy consumption during the sampling period in
+        # milliwatt hours. May be estimated by integrating power samples or may
+        # be exact on supported hardware.
+        'energy_consumption_mwh': mwh,
+
+        # The target application's energy consumption during the sampling period
+        # in milliwatt hours. Should be returned iff
+        # CanMeasurePerApplicationPower() return true.
+        'application_energy_consumption_mwh': mwh,
+
+        # A platform-specific dictionary of additional details about the
+        # utilization of individual hardware components.
+        component_utilization: {
+          ...
+        }
+        # Platform-specific data not attributed to any particular hardware
+        # component.
+        platform_info: {
+
+          # Device-specific onboard temperature sensor.
+          'average_temperature_c': c,
+
+           ...
+        }
+
+      }
+    """
+    ret_val = self._platform_backend.StopMonitoringPower()
+    self._is_monitoring_power = False
+    return ret_val
+
+  def IsMonitoringPower(self):
+    """Returns true if power is currently being monitored, false otherwise."""
+    # TODO(rnephew): Remove when crbug.com/553601 is solved.
+    real_logging.info('IsMonitoringPower: %s', self._is_monitoring_power)
+    return self._is_monitoring_power
+
+  def CanMonitorNetworkData(self):
+    """Returns true if network data can be retrieved, false otherwise."""
+    return self._platform_backend.CanMonitorNetworkData()
+
+  def GetNetworkData(self, browser):
+    """Get current network data.
+    Returns:
+      Tuple of (sent_data, received_data) in kb if data can be found,
+      None otherwise.
+    """
+    assert browser.platform == self
+    return self._platform_backend.GetNetworkData(browser)
+
+  def IsCooperativeShutdownSupported(self):
+    """Indicates whether CooperativelyShutdown, below, is supported.
+    It is not necessary to implement it on all platforms."""
+    return self._platform_backend.IsCooperativeShutdownSupported()
+
+  def CooperativelyShutdown(self, proc, app_name):
+    """Cooperatively shut down the given process from subprocess.Popen.
+
+    Currently this is only implemented on Windows. See
+    crbug.com/424024 for background on why it was added.
+
+    Args:
+      proc: a process object returned from subprocess.Popen.
+      app_name: on Windows, is the prefix of the application's window
+          class name that should be searched for. This helps ensure
+          that only the application's windows are closed.
+
+    Returns True if it is believed the attempt succeeded.
+    """
+    return self._platform_backend.CooperativelyShutdown(proc, app_name)
+
+  def CanTakeScreenshot(self):
+    return self._platform_backend.CanTakeScreenshot()
+
+  # TODO(nednguyen): Implement this on Mac, Linux & Win. (crbug.com/369490)
+  def TakeScreenshot(self, file_path):
+    """ Takes a screenshot of the platform and save to |file_path|.
+
+    Note that this method may not be supported on all platform, so check with
+    CanTakeScreenshot before calling this.
+
+    Args:
+      file_path: Where to save the screenshot to. If the platform is remote,
+        |file_path| is the path on the host platform.
+
+    Returns True if it is believed the attempt succeeded.
+    """
+    return self._platform_backend.TakeScreenshot(file_path)
+
+  def StartLocalServer(self, server):
+    """Starts a LocalServer and associates it with this platform.
+    |server.Close()| should be called manually to close the started server.
+    """
+    self._local_server_controller.StartServer(server)
+
+  @property
+  def http_server(self):
+    return self._local_server_controller.GetRunningServer(
+        memory_cache_http_server.MemoryCacheHTTPServer, None)
+
+  def SetHTTPServerDirectories(self, paths):
+    """Returns True if the HTTP server was started, False otherwise."""
+    if isinstance(paths, basestring):
+      paths = set([paths])
+    paths = set(os.path.realpath(p) for p in paths)
+
+    # If any path is in a subdirectory of another, remove the subdirectory.
+    duplicates = set()
+    for parent_path in paths:
+      for sub_path in paths:
+        if parent_path == sub_path:
+          continue
+        if os.path.commonprefix((parent_path, sub_path)) == parent_path:
+          duplicates.add(sub_path)
+    paths -= duplicates
+
+    if self.http_server:
+      if paths and self.http_server.paths == paths:
+        return False
+
+      self.http_server.Close()
+
+    if not paths:
+      return False
+
+    server = memory_cache_http_server.MemoryCacheHTTPServer(paths)
+    self.StartLocalServer(server)
+    return True
+
+  def StopAllLocalServers(self):
+    self._local_server_controller.Close()
+
+  @property
+  def local_servers(self):
+    """Returns the currently running local servers."""
+    return self._local_server_controller.local_servers
diff --git a/catapult/telemetry/telemetry/core/platform_unittest.py b/catapult/telemetry/telemetry/core/platform_unittest.py
new file mode 100644
index 0000000..6454d84
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/platform_unittest.py
@@ -0,0 +1,46 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tempfile
+
+from telemetry import decorators
+from telemetry.core import os_version
+from telemetry.util import image_util
+from telemetry.testing import tab_test_case
+
+
+class PlatformScreenshotTest(tab_test_case.TabTestCase):
+
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testScreenshotSupported(self):
+    if self._platform.GetOSName() == 'android':
+      self.assertTrue(self._platform.CanTakeScreenshot())
+
+  # Run this test in serial to avoid multiple browsers pop up on the screen.
+  @decorators.Isolated
+  @decorators.Disabled('linux')  # crbug.com/563656
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testScreenshot(self):
+    if not self._platform.CanTakeScreenshot():
+      self.skipTest('Platform does not support screenshots, skipping test.')
+    # Skip the test on Mac 10.5, 10.6, 10.7 because png format isn't
+    # recognizable on Mac < 10.8 (crbug.com/369490#c13)
+    if (self._platform.GetOSName() == 'mac' and
+        self._platform.GetOSVersionName() in
+        (os_version.LEOPARD, os_version.SNOWLEOPARD, os_version.LION)):
+      self.skipTest('OS X version %s too old' % self._platform.GetOSName())
+    tf = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
+    tf.close()
+    try:
+      self.Navigate('screenshot_test.html')
+      self._platform.TakeScreenshot(tf.name)
+      # Assert that screenshot image contains the color of the triangle defined
+      # in screenshot_test.html.
+      img = image_util.FromPngFile(tf.name)
+      screenshot_pixels = image_util.Pixels(img)
+      special_colored_pixel = bytearray([217, 115, 43])
+      self.assertTrue(special_colored_pixel in screenshot_pixels)
+    finally:
+      os.remove(tf.name)
diff --git a/catapult/telemetry/telemetry/core/profiling_controller.py b/catapult/telemetry/telemetry/core/profiling_controller.py
new file mode 100644
index 0000000..0e008a7
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/profiling_controller.py
@@ -0,0 +1,15 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ProfilingController(object):
+
+  def __init__(self, profiling_controller_backend):
+    self._profiling_controller_backend = profiling_controller_backend
+
+  def Start(self, profiler_name, base_output_file):
+    self._profiling_controller_backend.Start(profiler_name, base_output_file)
+
+  def Stop(self):
+    return self._profiling_controller_backend.Stop()
diff --git a/catapult/telemetry/telemetry/core/tracing_controller.py b/catapult/telemetry/telemetry/core/tracing_controller.py
new file mode 100644
index 0000000..463f958
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/tracing_controller.py
@@ -0,0 +1,76 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.platform import tracing_agent
+
+
+class TracingController(tracing_agent.TracingAgent):
+
+  def __init__(self, tracing_controller_backend):
+    """Provides control of the tracing systems supported by telemetry."""
+    super(TracingController, self).__init__(
+        tracing_controller_backend._platform_backend)
+    self._tracing_controller_backend = tracing_controller_backend
+
+  def StartTracing(self, tracing_config, timeout=10):
+    """Starts tracing.
+
+    tracing config contains both tracing options and category filters.
+
+    trace_options specifies which tracing systems to activate. Category filter
+    allows fine-tuning of the data that are collected by the selected tracing
+    systems.
+
+    Some tracers are process-specific, e.g. chrome tracing, but are not
+    guaranteed to be supported. In order to support tracing of these kinds of
+    tracers, Start will succeed *always*, even if the tracing systems you have
+    requested are not supported.
+
+    If you absolutely require a particular tracer to exist, then check
+    for its support after you have started the process in question. Or, have
+    your code fail gracefully when the data you require is not present in the
+    resulting trace.
+    """
+    self._tracing_controller_backend.StartTracing(tracing_config, timeout)
+
+  def StopTracing(self):
+    """Stops tracing and returns a TraceValue."""
+    return self._tracing_controller_backend.StopTracing()
+
+  def FlushTracing(self):
+    """Flush tracing buffer and continue tracing.
+
+    Warning: This method is a temporary hack to enable multi-tab benchmarks
+    (see https://goo.gl/8Gjstr). Please contact Telemetry owners before using
+    it.
+    """
+    self._tracing_controller_backend.FlushTracing()
+
+  @property
+  def is_tracing_running(self):
+    return self._tracing_controller_backend.is_tracing_running
+
+  def IsChromeTracingSupported(self):
+    """Returns whether chrome tracing is supported."""
+    return self._tracing_controller_backend.IsChromeTracingSupported()
+
+  def StartAgentTracing(self, config, timeout=10):
+    """ Starts agent tracing for tracing controller"""
+    return self._tracing_controller_backend.StartAgentTracing(config, timeout)
+
+  def StopAgentTracing(self, trace_data_builder):
+    """ Stops agent tracing for tracing controller. """
+    return self._tracing_controller_backend.StopAgentTracing(trace_data_builder)
+
+  def SupportsExplicitClockSync(self):
+    return self._tracing_controller_backend.SupportsExplicitClockSync()
+
+  def RecordClockSyncMarker(self, sync_id,
+                            record_controller_clocksync_marker_callback):
+    return self._tracing_controller_backend.RecordClockSyncMarker(
+        sync_id, record_controller_clocksync_marker_callback)
+
+  def ClearStateIfNeeded(self):
+    """Clear tracing state if needed."""
+    self._tracing_controller_backend.ClearStateIfNeeded()
diff --git a/catapult/telemetry/telemetry/core/tracing_controller_unittest.py b/catapult/telemetry/telemetry/core/tracing_controller_unittest.py
new file mode 100644
index 0000000..094d382
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/tracing_controller_unittest.py
@@ -0,0 +1,185 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.core import platform as platform_module
+from telemetry.testing import browser_test_case
+from telemetry.testing import tab_test_case
+from telemetry.timeline import model as model_module
+from telemetry.timeline import tracing_config
+
+
+class TracingControllerTest(tab_test_case.TabTestCase):
+
+  @decorators.Isolated
+  def testModifiedConsoleTime(self):
+    tracing_controller = self._tab.browser.platform.tracing_controller
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    tracing_controller.StartTracing(config)
+    self.Navigate('blank.html')
+    self.assertEquals(
+        self._tab.EvaluateJavaScript('document.location.pathname;'),
+        '/blank.html')
+
+    self._tab.EvaluateJavaScript("""
+        window.__console_time = console.time;
+        console.time = function() { };
+        """)
+    with self.assertRaisesRegexp(Exception, 'Page stomped on console.time'):
+      tracing_controller.StopTracing()
+
+    # Restore console.time
+    self._tab.EvaluateJavaScript("""
+        console.time = window.__console_time;
+        delete window.__console_time;
+        """)
+
+    # Check that subsequent tests will be able to use tracing normally.
+    self.assertFalse(tracing_controller.is_tracing_running)
+    tracing_controller.StartTracing(config)
+    self.assertTrue(tracing_controller.is_tracing_running)
+    tracing_controller.StopTracing()
+    self.assertFalse(tracing_controller.is_tracing_running)
+
+  @decorators.Isolated
+  def testExceptionRaisedInStopTracing(self):
+    tracing_controller = self._tab.browser.platform.tracing_controller
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    tracing_controller.StartTracing(config)
+
+    self.Navigate('blank.html')
+    self._tab.EvaluateJavaScript("""
+        window.__console_time = console.time;
+        console.time = function() { };
+        """)
+    with self.assertRaisesRegexp(Exception, 'Page stomped on console.time'):
+      tracing_controller.StopTracing()
+
+    # Tracing is stopped even if there is exception.
+    self.assertFalse(tracing_controller.is_tracing_running)
+
+  @decorators.Isolated
+  def testGotTrace(self):
+    tracing_controller = self._browser.platform.tracing_controller
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    tracing_controller.StartTracing(config)
+
+    trace_data = tracing_controller.StopTracing()
+    # Test that trace data is parsable
+    model = model_module.TimelineModel(trace_data)
+    assert len(model.processes) > 0
+
+  @decorators.Isolated
+  def testStartAndStopTraceMultipleTimes(self):
+    tracing_controller = self._browser.platform.tracing_controller
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    tracing_controller.StartTracing(config)
+    self.assertFalse(tracing_controller.StartTracing(config))
+
+    trace_data = tracing_controller.StopTracing()
+    # Test that trace data is parsable
+    model_module.TimelineModel(trace_data)
+    self.assertFalse(tracing_controller.is_tracing_running)
+    # Calling stop again will raise exception
+    self.assertRaises(Exception, tracing_controller.StopTracing)
+
+  @decorators.Isolated
+  def testFlushTracing(self):
+    SUBTRACE_COUNT = 5
+
+    tab = self._browser.tabs[0]
+    def InjectMarker(index):
+      marker = 'test-marker-%d' % index
+      tab.EvaluateJavaScript('console.time("%s");' % marker)
+      tab.EvaluateJavaScript('console.timeEnd("%s");' % marker)
+
+    # Set up the tracing config.
+    tracing_controller = self._browser.platform.tracing_controller
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+
+    # Start tracing and inject a unique marker into the sub-trace.
+    tracing_controller.StartTracing(config)
+    self.assertTrue(tracing_controller.is_tracing_running)
+    InjectMarker(0)
+
+    # Flush tracing |SUBTRACE_COUNT - 1| times and inject a unique marker into
+    # the sub-trace each time.
+    for i in xrange(1, SUBTRACE_COUNT):
+      tracing_controller.FlushTracing()
+      self.assertTrue(tracing_controller.is_tracing_running)
+      InjectMarker(i)
+
+    # Stop tracing.
+    trace_data = tracing_controller.StopTracing()
+    self.assertFalse(tracing_controller.is_tracing_running)
+
+    # Test that trace data is parsable
+    model = model_module.TimelineModel(trace_data)
+
+    # Check that the markers 'test-marker-0', 'flush-tracing', 'test-marker-1',
+    # ..., 'flush-tracing', 'test-marker-|SUBTRACE_COUNT - 1|' are monotonic.
+    custom_markers = [marker for i in xrange(SUBTRACE_COUNT)
+                      for marker in model.FindTimelineMarkers(
+                          'test-marker-%d' % i)]
+    flush_markers = model.FindTimelineMarkers(
+        ['flush-tracing'] * (SUBTRACE_COUNT - 1))
+    markers = [marker for group in zip(custom_markers, flush_markers)
+               for marker in group] + custom_markers[-1:]
+
+    self.assertEquals(len(custom_markers), SUBTRACE_COUNT)
+    self.assertEquals(len(flush_markers), SUBTRACE_COUNT - 1)
+    self.assertEquals(len(markers), 2 * SUBTRACE_COUNT - 1)
+
+    for i in xrange(1, len(markers)):
+      self.assertLess(markers[i - 1].end, markers[i].start)
+
+  def _StartupTracing(self, platform):
+    # Stop browser
+    browser_test_case.teardown_browser()
+
+    # Start tracing
+    self.assertFalse(platform.tracing_controller.is_tracing_running)
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    platform.tracing_controller.StartTracing(config)
+    self.assertTrue(platform.tracing_controller.is_tracing_running)
+
+    try:
+      # Start browser
+      self.setUpClass()
+      self._browser.tabs[0].Navigate('about:blank')
+      self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
+      self.assertEquals(platform, self._browser.platform)
+
+      # Calling start tracing again will return False
+      self.assertFalse(platform.tracing_controller.StartTracing(config))
+
+      trace_data = platform.tracing_controller.StopTracing()
+      # Test that trace data is parsable
+      model_module.TimelineModel(trace_data)
+      self.assertFalse(platform.tracing_controller.is_tracing_running)
+      # Calling stop tracing again will raise exception
+      self.assertRaises(Exception, platform.tracing_controller.StopTracing)
+    finally:
+      if platform.tracing_controller.is_tracing_running:
+        platform.tracing_controller.StopTracing()
+      if self._browser:
+        self._browser.Close()
+        self._browser = None
+
+  @decorators.Enabled('android')
+  @decorators.Isolated
+  def testStartupTracingOnAndroid(self):
+    self._StartupTracing(self._browser.platform)
+
+  # Not enabled on win because of crbug.com/570955
+  @decorators.Enabled('linux', 'mac')
+  @decorators.Isolated
+  def testStartupTracingOnDesktop(self):
+    self._StartupTracing(platform_module.GetHostPlatform())
diff --git a/catapult/telemetry/telemetry/core/util.py b/catapult/telemetry/telemetry/core/util.py
new file mode 100644
index 0000000..9531efa
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/util.py
@@ -0,0 +1,183 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import glob
+import imp
+import inspect
+import logging
+import os
+import socket
+import sys
+import time
+
+from catapult_base import util as catapult_util  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+
+
+IsRunningOnCrosDevice = catapult_util.IsRunningOnCrosDevice
+GetCatapultDir = catapult_util.GetCatapultDir
+
+
+def GetBaseDir():
+  main_module = sys.modules['__main__']
+  if hasattr(main_module, '__file__'):
+    return os.path.dirname(os.path.abspath(main_module.__file__))
+  else:
+    return os.getcwd()
+
+
+def GetCatapultThirdPartyDir():
+  return os.path.normpath(os.path.join(GetCatapultDir(), 'third_party'))
+
+
+def GetTelemetryDir():
+  return os.path.normpath(os.path.join(
+      os.path.abspath(__file__), '..', '..', '..'))
+
+
+def GetTelemetryThirdPartyDir():
+  return os.path.join(GetTelemetryDir(), 'third_party')
+
+
+def GetUnittestDataDir():
+  return os.path.join(GetTelemetryDir(), 'telemetry', 'internal', 'testing')
+
+
+def GetChromiumSrcDir():
+  return os.path.normpath(os.path.join(GetTelemetryDir(), '..', '..', '..'))
+
+
+_counter = [0]
+
+
+def _GetUniqueModuleName():
+  _counter[0] += 1
+  return "page_set_module_" + str(_counter[0])
+
+
+def GetPythonPageSetModule(file_path):
+  return imp.load_source(_GetUniqueModuleName(), file_path)
+
+
+def WaitFor(condition, timeout):
+  """Waits for up to |timeout| secs for the function |condition| to return True.
+
+  Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.
+
+  Returns:
+    Result of |condition| function (if present).
+  """
+  min_poll_interval = 0.1
+  max_poll_interval = 5
+  output_interval = 300
+
+  def GetConditionString():
+    if condition.__name__ == '<lambda>':
+      try:
+        return inspect.getsource(condition).strip()
+      except IOError:
+        pass
+    return condition.__name__
+
+  start_time = time.time()
+  last_output_time = start_time
+  while True:
+    res = condition()
+    if res:
+      return res
+    now = time.time()
+    elapsed_time = now - start_time
+    last_output_elapsed_time = now - last_output_time
+    if elapsed_time > timeout:
+      raise exceptions.TimeoutException('Timed out while waiting %ds for %s.' %
+                                        (timeout, GetConditionString()))
+    if last_output_elapsed_time > output_interval:
+      logging.info('Continuing to wait %ds for %s. Elapsed: %ds.', timeout,
+                   GetConditionString(), elapsed_time)
+      last_output_time = time.time()
+    poll_interval = min(
+        max(elapsed_time / 10., min_poll_interval), max_poll_interval)
+    time.sleep(poll_interval)
+
+
+class PortKeeper(object):
+  """Port keeper hold an available port on the system.
+
+  Before actually use the port, you must call Release().
+  """
+
+  def __init__(self):
+    self._temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    self._temp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    self._temp_socket.bind(('', 0))
+    self._port = self._temp_socket.getsockname()[1]
+
+  @property
+  def port(self):
+    return self._port
+
+  def Release(self):
+    assert self._temp_socket, 'Already released'
+    self._temp_socket.close()
+    self._temp_socket = None
+
+
+def GetUnreservedAvailableLocalPort():
+  """Returns an available port on the system.
+
+  WARNING: This method does not reserve the port it returns, so it may be used
+  by something else before you get to use it. This can lead to flake.
+  """
+  tmp = socket.socket()
+  tmp.bind(('', 0))
+  port = tmp.getsockname()[1]
+  tmp.close()
+
+  return port
+
+
+def GetBuildDirectories(chrome_root=None):
+  """Yields all combination of Chromium build output directories."""
+  # chrome_root can be set to something else via --chrome-root.
+  if not chrome_root:
+    chrome_root = GetChromiumSrcDir()
+
+  # CHROMIUM_OUTPUT_DIR can be set by --chromium-output-directory.
+  output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
+  if output_dir:
+    yield os.path.join(chrome_root, output_dir)
+  elif os.path.exists('build.ninja'):
+    yield os.getcwd()
+  else:
+    out_dir = os.environ.get('CHROMIUM_OUT_DIR')
+    if out_dir:
+      build_dirs = [out_dir]
+    else:
+      build_dirs = ['build',
+                    'out',
+                    'xcodebuild']
+
+    build_types = ['Debug', 'Debug_x64', 'Release', 'Release_x64', 'Default']
+
+    for build_dir in build_dirs:
+      for build_type in build_types:
+        yield os.path.join(chrome_root, build_dir, build_type)
+
+
+def GetSequentialFileName(base_name):
+  """Returns the next sequential file name based on |base_name| and the
+  existing files. base_name should not contain extension.
+  e.g: if base_name is /tmp/test, and /tmp/test_000.json,
+  /tmp/test_001.mp3 exist, this returns /tmp/test_002. In case no
+  other sequential file name exist, this will return /tmp/test_000
+  """
+  name, ext = os.path.splitext(base_name)
+  assert ext == '', 'base_name cannot contain file extension.'
+  index = 0
+  while True:
+    output_name = '%s_%03d' % (name, index)
+    if not glob.glob(output_name + '.*'):
+      break
+    index = index + 1
+  return output_name
diff --git a/catapult/telemetry/telemetry/core/util_unittest.py b/catapult/telemetry/telemetry/core/util_unittest.py
new file mode 100644
index 0000000..0d8ace3
--- /dev/null
+++ b/catapult/telemetry/telemetry/core/util_unittest.py
@@ -0,0 +1,76 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import shutil
+import tempfile
+import unittest
+
+from telemetry.core import exceptions
+from telemetry.core import util
+
+
+class TestWait(unittest.TestCase):
+
+  def testNonTimeout(self):
+
+    def test():
+      return True
+
+    util.WaitFor(test, 0.1)
+
+  def testTimeout(self):
+
+    def test():
+      return False
+
+    self.assertRaises(exceptions.TimeoutException,
+                      lambda: util.WaitFor(test, 0.1))
+
+  def testCallable(self):
+    """Test methods and anonymous functions, functions are tested elsewhere."""
+
+    class Test(object):
+
+      def Method(self):
+        return 'test'
+
+    util.WaitFor(Test().Method, 0.1)
+
+    util.WaitFor(lambda: 1, 0.1)
+
+    # Test noncallable condition.
+    self.assertRaises(TypeError, lambda: util.WaitFor('test', 0.1))
+
+  def testReturn(self):
+    self.assertEquals('test', util.WaitFor(lambda: 'test', 0.1))
+
+
+class TestGetSequentialFileName(unittest.TestCase):
+
+  def __init__(self, *args, **kwargs):
+    super(TestGetSequentialFileName, self).__init__(*args, **kwargs)
+    self.test_directory = None
+
+  def setUp(self):
+    self.test_directory = tempfile.mkdtemp()
+
+  def testGetSequentialFileNameNoOtherSequentialFile(self):
+    next_json_test_file_path = util.GetSequentialFileName(os.path.join(
+        self.test_directory, 'test'))
+    self.assertEquals(
+        os.path.join(self.test_directory, 'test_000'), next_json_test_file_path)
+
+  def testGetSequentialFileNameWithOtherSequentialFiles(self):
+    # Create test_000.json, test_001.json, test_002.json in test directory.
+    for i in xrange(3):
+      with open(
+          os.path.join(self.test_directory, 'test_%03d.json' % i), 'w') as _:
+        pass
+    next_json_test_file_path = util.GetSequentialFileName(os.path.join(
+        self.test_directory, 'test'))
+    self.assertEquals(
+        os.path.join(self.test_directory, 'test_003'), next_json_test_file_path)
+
+  def tearDown(self):
+    shutil.rmtree(self.test_directory)
diff --git a/catapult/telemetry/telemetry/decorators.py b/catapult/telemetry/telemetry/decorators.py
new file mode 100644
index 0000000..7559384
--- /dev/null
+++ b/catapult/telemetry/telemetry/decorators.py
@@ -0,0 +1,252 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# pylint: disable=protected-access
+
+import datetime
+import functools
+import os
+import inspect
+import types
+import warnings
+
+
+def Cache(obj):
+  """Decorator for caching read-only properties.
+
+  Example usage (always returns the same Foo instance):
+    @Cache
+    def CreateFoo():
+      return Foo()
+
+  If CreateFoo() accepts parameters, a separate cached value is maintained
+  for each unique parameter combination.
+
+  Cached methods maintain their cache for the lifetime of the /instance/, while
+  cached functions maintain their cache for the lifetime of the /module/.
+  """
+  @functools.wraps(obj)
+  def Cacher(*args, **kwargs):
+    cacher = args[0] if inspect.getargspec(obj).args[:1] == ['self'] else obj
+    cacher.__cache = cacher.__cache if hasattr(cacher, '__cache') else {}
+    key = str(obj) + str(args) + str(kwargs)
+    if key not in cacher.__cache:
+      cacher.__cache[key] = obj(*args, **kwargs)
+    return cacher.__cache[key]
+  return Cacher
+
+
+class Deprecated(object):
+
+  def __init__(self, year, month, day, extra_guidance=''):
+    self._date_of_support_removal = datetime.date(year, month, day)
+    self._extra_guidance = extra_guidance
+
+  def _DisplayWarningMessage(self, target):
+    target_str = ''
+    if isinstance(target, types.FunctionType):
+      target_str = 'Function %s' % target.__name__
+    else:
+      target_str = 'Class %s' % target.__name__
+    warnings.warn('%s is deprecated. It will no longer be supported on %s. '
+                  'Please remove it or switch to an alternative before '
+                  'that time. %s\n'
+                  % (target_str,
+                     self._date_of_support_removal.strftime('%B %d, %Y'),
+                     self._extra_guidance),
+                  stacklevel=self._ComputeStackLevel())
+
+  def _ComputeStackLevel(self):
+    this_file, _ = os.path.splitext(__file__)
+    frame = inspect.currentframe()
+    i = 0
+    while True:
+      filename = frame.f_code.co_filename
+      if not filename.startswith(this_file):
+        return i
+      frame = frame.f_back
+      i += 1
+
+  def __call__(self, target):
+    if isinstance(target, types.FunctionType):
+      @functools.wraps(target)
+      def wrapper(*args, **kwargs):
+        self._DisplayWarningMessage(target)
+        return target(*args, **kwargs)
+      return wrapper
+    elif inspect.isclass(target):
+      original_ctor = target.__init__
+
+      # We have to handle case original_ctor is object.__init__ separately
+      # since object.__init__ does not have __module__ defined, which
+      # cause functools.wraps() to raise exception.
+      if original_ctor == object.__init__:
+        def new_ctor(*args, **kwargs):
+          self._DisplayWarningMessage(target)
+          return original_ctor(*args, **kwargs)
+      else:
+        @functools.wraps(original_ctor)
+        def new_ctor(*args, **kwargs):
+          self._DisplayWarningMessage(target)
+          return original_ctor(*args, **kwargs)
+
+      target.__init__ = new_ctor
+      return target
+    else:
+      raise TypeError('@Deprecated is only applicable to functions or classes')
+
+
+def Disabled(*args):
+  """Decorator for disabling tests/benchmarks.
+
+
+  If args are given, the test will be disabled if ANY of the args match the
+  browser type, OS name or OS version:
+    @Disabled('canary')        # Disabled for canary browsers
+    @Disabled('win')           # Disabled on Windows.
+    @Disabled('win', 'linux')  # Disabled on both Windows and Linux.
+    @Disabled('mavericks')     # Disabled on Mac Mavericks (10.9) only.
+    @Disabled('all')  # Unconditionally disabled.
+  """
+  def _Disabled(func):
+    if not hasattr(func, '_disabled_strings'):
+      func._disabled_strings = set()
+    func._disabled_strings.update(disabled_strings)
+    return func
+  assert args, (
+      "@Disabled(...) requires arguments. Use @Disabled('all') if you want to "
+      'unconditionally disable the test.')
+  assert not callable(args[0]), 'Please use @Disabled(..).'
+  disabled_strings = list(args)
+  for disabled_string in disabled_strings:
+    # TODO(tonyg): Validate that these strings are recognized.
+    assert isinstance(disabled_string, str), '@Disabled accepts a list of strs'
+  return _Disabled
+
+
+def Enabled(*args):
+  """Decorator for enabling tests/benchmarks.
+
+  The test will be enabled if ANY of the args match the browser type, OS name
+  or OS version:
+    @Enabled('canary')        # Enabled only for canary browsers
+    @Enabled('win')           # Enabled only on Windows.
+    @Enabled('win', 'linux')  # Enabled only on Windows or Linux.
+    @Enabled('mavericks')     # Enabled only on Mac Mavericks (10.9).
+  """
+  def _Enabled(func):
+    if not hasattr(func, '_enabled_strings'):
+      func._enabled_strings = set()
+    func._enabled_strings.update(enabled_strings)
+    return func
+  assert args, '@Enabled(..) requires arguments'
+  assert not callable(args[0]), 'Please use @Enabled(..).'
+  enabled_strings = list(args)
+  for enabled_string in enabled_strings:
+    # TODO(tonyg): Validate that these strings are recognized.
+    assert isinstance(enabled_string, str), '@Enabled accepts a list of strs'
+  return _Enabled
+
+
+# TODO(dpranke): Remove if we don't need this.
+def Isolated(*args):
+  """Decorator for noting that tests must be run in isolation.
+
+  The test will be run by itself (not concurrently with any other tests)
+  if ANY of the args match the browser type, OS name, or OS version."""
+  def _Isolated(func):
+    if not isinstance(func, types.FunctionType):
+      func._isolated_strings = isolated_strings
+      return func
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+      func(*args, **kwargs)
+    wrapper._isolated_strings = isolated_strings
+    return wrapper
+  if len(args) == 1 and callable(args[0]):
+    isolated_strings = []
+    return _Isolated(args[0])
+  isolated_strings = list(args)
+  for isolated_string in isolated_strings:
+    # TODO(tonyg): Validate that these strings are recognized.
+    assert isinstance(isolated_string, str), 'Isolated accepts a list of strs'
+  return _Isolated
+
+
+# TODO(nednguyen): Remove this and have call site just use ShouldSkip directly.
+def IsEnabled(test, possible_browser):
+  """Returns True iff |test| is enabled given the |possible_browser|.
+
+  Use to respect the @Enabled / @Disabled decorators.
+
+  Args:
+    test: A function or class that may contain _disabled_strings and/or
+          _enabled_strings attributes.
+    possible_browser: A PossibleBrowser to check whether |test| may run against.
+  """
+  should_skip, msg = ShouldSkip(test, possible_browser)
+  return (not should_skip, msg)
+
+
+def ShouldSkip(test, possible_browser):
+  """Returns whether the test should be skipped and the reason for it."""
+  platform_attributes = _PlatformAttributes(possible_browser)
+
+  if hasattr(test, '__name__'):
+    name = test.__name__
+  elif hasattr(test, '__class__'):
+    name = test.__class__.__name__
+  else:
+    name = str(test)
+
+  skip = 'Skipping %s (%s) because' % (name, str(test))
+  running = 'You are running %r.' % platform_attributes
+
+  if hasattr(test, '_disabled_strings'):
+    if 'all' in test._disabled_strings:
+      return (True, '%s it is unconditionally disabled.' % skip)
+    if set(test._disabled_strings) & set(platform_attributes):
+      return (True, '%s it is disabled for %s. %s' %
+                      (skip, ' and '.join(test._disabled_strings), running))
+
+  if hasattr(test, '_enabled_strings'):
+    if 'all' in test._enabled_strings:
+      return False, None  # No arguments to @Enabled means always enable.
+    if not set(test._enabled_strings) & set(platform_attributes):
+      return (True, '%s it is only enabled for %s. %s' %
+                      (skip, ' or '.join(test._enabled_strings), running))
+
+  return False, None
+
+
+def ShouldBeIsolated(test, possible_browser):
+  platform_attributes = _PlatformAttributes(possible_browser)
+  if hasattr(test, '_isolated_strings'):
+    isolated_strings = test._isolated_strings
+    if not isolated_strings:
+      return True # No arguments to @Isolated means always isolate.
+    for isolated_string in isolated_strings:
+      if isolated_string in platform_attributes:
+        return True
+    return False
+  return False
+
+
+def _PlatformAttributes(possible_browser):
+  """Returns a list of platform attribute strings."""
+  attributes = [a.lower() for a in [
+      possible_browser.browser_type,
+      possible_browser.platform.GetOSName(),
+      possible_browser.platform.GetOSVersionName(),
+  ]]
+  if possible_browser.supports_tab_control:
+    attributes.append('has tabs')
+  if 'content-shell' in possible_browser.browser_type:
+    attributes.append('content-shell')
+  if possible_browser.browser_type == 'reference':
+    ref_attributes = []
+    for attribute in attributes:
+      if attribute != 'reference':
+        ref_attributes.append('%s-reference' % attribute)
+    attributes.extend(ref_attributes)
+  return attributes
diff --git a/catapult/telemetry/telemetry/decorators_unittest.py b/catapult/telemetry/telemetry/decorators_unittest.py
new file mode 100644
index 0000000..a65bfb8
--- /dev/null
+++ b/catapult/telemetry/telemetry/decorators_unittest.py
@@ -0,0 +1,350 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import mock
+
+from telemetry.core import platform
+from telemetry import decorators
+from telemetry.internal.browser import possible_browser
+
+
+class FakeTest(object):
+  def SetEnabledStrings(self, enabled_strings):
+    # pylint: disable=attribute-defined-outside-init
+    self._enabled_strings = enabled_strings
+
+  def SetDisabledStrings(self, disabled_strings):
+    # pylint: disable=attribute-defined-outside-init
+    self._disabled_strings = disabled_strings
+
+
+class TestDisableDecorators(unittest.TestCase):
+
+  def testDisabledStringOnFunction(self):
+    @decorators.Disabled('bar')
+    def Sum():
+      return 1 + 1
+    self.assertEquals({'bar'}, Sum._disabled_strings)
+
+    @decorators.Disabled('bar')
+    @decorators.Disabled('baz')
+    @decorators.Disabled('bart', 'baz')
+    def Product():
+      return 1 * 1
+    self.assertEquals({'bar', 'bart', 'baz'}, Product._disabled_strings)
+
+  def testDisabledStringOnClass(self):
+    @decorators.Disabled('windshield')
+    class Ford(object):
+      pass
+    self.assertEquals({'windshield'}, Ford._disabled_strings)
+
+    @decorators.Disabled('windows', 'Drive')
+    @decorators.Disabled('wheel')
+    @decorators.Disabled('windows')
+    class Honda(object):
+      pass
+    self.assertEquals({'wheel', 'Drive', 'windows'}, Honda._disabled_strings)
+
+  def testDisabledStringOnMethod(self):
+    class Ford(object):
+      @decorators.Disabled('windshield')
+      def Drive(self):
+        pass
+    self.assertEquals({'windshield'}, Ford().Drive._disabled_strings)
+
+    class Honda(object):
+      @decorators.Disabled('windows', 'Drive')
+      @decorators.Disabled('wheel')
+      @decorators.Disabled('windows')
+      def Drive(self):
+        pass
+    self.assertEquals({'wheel', 'Drive', 'windows'},
+                      Honda().Drive._disabled_strings)
+
+class TestEnableDecorators(unittest.TestCase):
+
+  def testEnabledStringOnFunction(self):
+    @decorators.Enabled('minus', 'power')
+    def Sum():
+      return 1 + 1
+    self.assertEquals({'minus', 'power'}, Sum._enabled_strings)
+
+    @decorators.Enabled('dot')
+    @decorators.Enabled('product')
+    @decorators.Enabled('product', 'dot')
+    def Product():
+      return 1 * 1
+    self.assertEquals({'dot', 'product'}, Product._enabled_strings)
+
+  def testEnabledStringOnClass(self):
+    @decorators.Enabled('windshield', 'light')
+    class Ford(object):
+      pass
+    self.assertEquals({'windshield', 'light'}, Ford._enabled_strings)
+
+    @decorators.Enabled('wheel', 'Drive')
+    @decorators.Enabled('wheel')
+    @decorators.Enabled('windows')
+    class Honda(object):
+      pass
+    self.assertEquals({'wheel', 'Drive', 'windows'}, Honda._enabled_strings)
+
+  def testEnabledStringOnMethod(self):
+    class Ford(object):
+      @decorators.Enabled('windshield')
+      def Drive(self):
+        pass
+    self.assertEquals({'windshield'}, Ford().Drive._enabled_strings)
+
+    class Honda(object):
+      @decorators.Enabled('windows', 'Drive')
+      @decorators.Enabled('wheel', 'Drive')
+      @decorators.Enabled('windows')
+      def Drive(self):
+        pass
+    self.assertEquals({'wheel', 'Drive', 'windows'},
+                      Honda().Drive._enabled_strings)
+
+
+class TestShouldSkip(unittest.TestCase):
+
+  def setUp(self):
+    fake_platform = mock.Mock(spec_set=platform.Platform)
+    fake_platform.GetOSName.return_value = 'os_name'
+    fake_platform.GetOSVersionName.return_value = 'os_version_name'
+
+    self.possible_browser = mock.Mock(spec_set=possible_browser.PossibleBrowser)
+    self.possible_browser.browser_type = 'browser_type'
+    self.possible_browser.platform = fake_platform
+    self.possible_browser.supports_tab_control = False
+
+  def testEnabledStrings(self):
+    test = FakeTest()
+
+    # When no enabled_strings is given, everything should be enabled.
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_version_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name', 'another_os_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name', 'os_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name', 'another_os_version_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_version_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name-reference', 'another_os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference', 'os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference',
+                            'another_os_version_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+  def testDisabledStrings(self):
+    test = FakeTest()
+
+    # When no disabled_strings is given, nothing should be disabled.
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_version_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name', 'another_os_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name', 'os_name'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name', 'another_os_version_name'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_version_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name-reference', 'another_os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference', 'os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference',
+                             'another_os_version_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+  def testReferenceEnabledStrings(self):
+    self.possible_browser.browser_type = 'reference'
+    test = FakeTest()
+
+    # When no enabled_strings is given, everything should be enabled.
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_version_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['os_name-reference', 'another_os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference', 'os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetEnabledStrings(['another_os_name-reference',
+                            'another_os_version_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+  def testReferenceDisabledStrings(self):
+    self.possible_browser.browser_type = 'reference'
+    test = FakeTest()
+
+    # When no disabled_strings is given, nothing should be disabled.
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_version_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['os_name-reference', 'another_os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference', 'os_name-reference'])
+    self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+    test.SetDisabledStrings(['another_os_name-reference',
+                             'another_os_version_name-reference'])
+    self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
+class TestDeprecation(unittest.TestCase):
+
+  @mock.patch('warnings.warn')
+  def testFunctionDeprecation(self, warn_mock):
+    @decorators.Deprecated(2015, 12, 1)
+    def Foo(x):
+      return x
+    Foo(1)
+    warn_mock.assert_called_with(
+        'Function Foo is deprecated. It will no longer be supported on '
+        'December 01, 2015. Please remove it or switch to an alternative '
+        'before that time. \n', stacklevel=4)
+
+  @mock.patch('warnings.warn')
+  def testMethodDeprecated(self, warn_mock):
+
+    class Bar(object):
+      @decorators.Deprecated(2015, 12, 1, 'Testing only.')
+      def Foo(self, x):
+        return x
+
+    Bar().Foo(1)
+    warn_mock.assert_called_with(
+        'Function Foo is deprecated. It will no longer be supported on '
+        'December 01, 2015. Please remove it or switch to an alternative '
+        'before that time. Testing only.\n', stacklevel=4)
+
+  @mock.patch('warnings.warn')
+  def testClassWithoutInitDefinedDeprecated(self, warn_mock):
+    @decorators.Deprecated(2015, 12, 1)
+    class Bar(object):
+      def Foo(self, x):
+        return x
+
+    Bar().Foo(1)
+    warn_mock.assert_called_with(
+        'Class Bar is deprecated. It will no longer be supported on '
+        'December 01, 2015. Please remove it or switch to an alternative '
+        'before that time. \n', stacklevel=4)
+
+  @mock.patch('warnings.warn')
+  def testClassWithInitDefinedDeprecated(self, warn_mock):
+
+    @decorators.Deprecated(2015, 12, 1)
+    class Bar(object):
+      def __init__(self):
+        pass
+      def Foo(self, x):
+        return x
+
+    Bar().Foo(1)
+    warn_mock.assert_called_with(
+        'Class Bar is deprecated. It will no longer be supported on '
+        'December 01, 2015. Please remove it or switch to an alternative '
+        'before that time. \n', stacklevel=4)
+
+  @mock.patch('warnings.warn')
+  def testInheritedClassDeprecated(self, warn_mock):
+    class Ba(object):
+      pass
+
+    @decorators.Deprecated(2015, 12, 1)
+    class Bar(Ba):
+      def Foo(self, x):
+        return x
+
+    class Baz(Bar):
+      pass
+
+    Baz().Foo(1)
+    warn_mock.assert_called_with(
+        'Class Bar is deprecated. It will no longer be supported on '
+        'December 01, 2015. Please remove it or switch to an alternative '
+        'before that time. \n', stacklevel=4)
+
+  def testReturnValue(self):
+    class Bar(object):
+      @decorators.Deprecated(2015, 12, 1, 'Testing only.')
+      def Foo(self, x):
+        return x
+
+    self.assertEquals(5, Bar().Foo(5))
diff --git a/catapult/telemetry/telemetry/internal/__init__.py b/catapult/telemetry/telemetry/internal/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/telemetry/internal/actions/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/telemetry/internal/actions/__init__.py
diff --git a/catapult/telemetry/telemetry/internal/actions/action_runner_unittest.py b/catapult/telemetry/telemetry/internal/actions/action_runner_unittest.py
new file mode 100644
index 0000000..d17c4a7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/action_runner_unittest.py
@@ -0,0 +1,287 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.actions import page_action
+from telemetry.page import action_runner as action_runner_module
+from telemetry.testing import tab_test_case
+import mock
+from telemetry.timeline import model
+from telemetry.timeline import tracing_config
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+class ActionRunnerInteractionTest(tab_test_case.TabTestCase):
+
+  def GetInteractionRecords(self, trace_data):
+    timeline_model = model.TimelineModel(trace_data)
+    renderer_thread = timeline_model.GetRendererThreadFromTabId(self._tab.id)
+    return [
+        tir_module.TimelineInteractionRecord.FromAsyncEvent(e)
+        for e in renderer_thread.async_slices
+        if tir_module.IsTimelineInteractionRecord(e.name)
+        ]
+
+  def VerifyIssuingInteractionRecords(self, **interaction_kwargs):
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    self.Navigate('interaction_enabled_page.html')
+    action_runner.Wait(1)
+    config = tracing_config.TracingConfig()
+    config.SetNoOverheadFilter()
+    config.enable_chrome_trace = True
+    self._browser.platform.tracing_controller.StartTracing(config)
+    with action_runner.CreateInteraction('InteractionName',
+                                                 **interaction_kwargs):
+      pass
+    trace_data = self._browser.platform.tracing_controller.StopTracing()
+
+    records = self.GetInteractionRecords(trace_data)
+    self.assertEqual(
+        1, len(records),
+        'Failed to issue the interaction record on the tracing timeline.'
+        ' Trace data:\n%s' % repr(trace_data._raw_data))
+    self.assertEqual('InteractionName', records[0].label)
+    for attribute_name in interaction_kwargs:
+      self.assertTrue(getattr(records[0], attribute_name))
+
+  # Test disabled for android: crbug.com/437057
+  # Test disabled for linux: crbug.com/513874
+  @decorators.Disabled('android', 'chromeos', 'linux')
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testIssuingMultipleMeasurementInteractionRecords(self):
+    self.VerifyIssuingInteractionRecords(repeatable=True)
+
+
+class ActionRunnerTest(tab_test_case.TabTestCase):
+  def testExecuteJavaScript(self):
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    self.Navigate('blank.html')
+    action_runner.ExecuteJavaScript('var testing = 42;')
+    self.assertEqual(42, self._tab.EvaluateJavaScript('testing'))
+
+  def testWaitForNavigate(self):
+    self.Navigate('page_with_link.html')
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    action_runner.ClickElement('#clickme')
+    action_runner.WaitForNavigate()
+
+    self.assertTrue(self._tab.EvaluateJavaScript(
+        'document.readyState == "interactive" || '
+        'document.readyState == "complete"'))
+    self.assertEqual(
+        self._tab.EvaluateJavaScript('document.location.pathname;'),
+        '/blank.html')
+
+  def testWait(self):
+    action_runner = action_runner_module.ActionRunner(self._tab)
+    self.Navigate('blank.html')
+
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() { window.testing = 101; }, 50);')
+    action_runner.Wait(0.1)
+    self.assertEqual(101, self._tab.EvaluateJavaScript('window.testing'))
+
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() { window.testing = 102; }, 100);')
+    action_runner.Wait(0.2)
+    self.assertEqual(102, self._tab.EvaluateJavaScript('window.testing'))
+
+  def testWaitForJavaScriptCondition(self):
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    self.Navigate('blank.html')
+
+    action_runner.ExecuteJavaScript('window.testing = 219;')
+    action_runner.WaitForJavaScriptCondition(
+        'window.testing == 219', timeout_in_seconds=0.1)
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() { window.testing = 220; }, 50);')
+    action_runner.WaitForJavaScriptCondition(
+        'window.testing == 220', timeout_in_seconds=0.1)
+    self.assertEqual(220, self._tab.EvaluateJavaScript('window.testing'))
+
+  def testWaitForElement(self):
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    self.Navigate('blank.html')
+
+    action_runner.ExecuteJavaScript(
+        '(function() {'
+        '  var el = document.createElement("div");'
+        '  el.id = "test1";'
+        '  el.textContent = "foo";'
+        '  document.body.appendChild(el);'
+        '})()')
+    action_runner.WaitForElement('#test1', timeout_in_seconds=0.1)
+    action_runner.WaitForElement(text='foo', timeout_in_seconds=0.1)
+    action_runner.WaitForElement(
+        element_function='document.getElementById("test1")')
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() {'
+        '  var el = document.createElement("div");'
+        '  el.id = "test2";'
+        '  document.body.appendChild(el);'
+        '}, 50)')
+    action_runner.WaitForElement('#test2', timeout_in_seconds=0.1)
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() {'
+        '  document.getElementById("test2").textContent = "bar";'
+        '}, 50)')
+    action_runner.WaitForElement(text='bar', timeout_in_seconds=0.1)
+    action_runner.ExecuteJavaScript(
+        'window.setTimeout(function() {'
+        '  var el = document.createElement("div");'
+        '  el.id = "test3";'
+        '  document.body.appendChild(el);'
+        '}, 50)')
+    action_runner.WaitForElement(
+        element_function='document.getElementById("test3")')
+
+  def testWaitForElementWithWrongText(self):
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+    self.Navigate('blank.html')
+
+    action_runner.ExecuteJavaScript(
+        '(function() {'
+        '  var el = document.createElement("div");'
+        '  el.id = "test1";'
+        '  el.textContent = "foo";'
+        '  document.body.appendChild(el);'
+        '})()')
+    action_runner.WaitForElement('#test1', timeout_in_seconds=0.2)
+    def WaitForElement():
+      action_runner.WaitForElement(text='oo', timeout_in_seconds=0.2)
+    self.assertRaises(exceptions.TimeoutException, WaitForElement)
+
+  def testClickElement(self):
+    self.Navigate('page_with_clickables.html')
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 1;')
+    action_runner.ClickElement('#test')
+    self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest'))
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 2;')
+    action_runner.ClickElement(text='Click/tap me')
+    self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest'))
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 3;')
+    action_runner.ClickElement(
+        element_function='document.body.firstElementChild;')
+    self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest'))
+
+    def WillFail():
+      action_runner.ClickElement('#notfound')
+    self.assertRaises(exceptions.EvaluateException, WillFail)
+
+  @decorators.Disabled('android', 'debug',  # crbug.com/437068
+                       'chromeos')          # crbug.com/483212
+  def testTapElement(self):
+    self.Navigate('page_with_clickables.html')
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 1;')
+    action_runner.TapElement('#test')
+    self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest'))
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 2;')
+    action_runner.TapElement(text='Click/tap me')
+    self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest'))
+
+    action_runner.ExecuteJavaScript('valueSettableByTest = 3;')
+    action_runner.TapElement(
+        element_function='document.body.firstElementChild')
+    self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest'))
+
+    def WillFail():
+      action_runner.TapElement('#notfound')
+    self.assertRaises(exceptions.EvaluateException, WillFail)
+
+  @decorators.Disabled('android',   # crbug.com/437065.
+                       'chromeos')  # crbug.com/483212.
+  def testScroll(self):
+    if not page_action.IsGestureSourceTypeSupported(
+        self._tab, 'touch'):
+      return
+
+    self.Navigate('page_with_swipeables.html')
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+
+    action_runner.ScrollElement(
+        selector='#left-right', direction='right', left_start_ratio=0.9)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        'document.querySelector("#left-right").scrollLeft') > 75)
+    action_runner.ScrollElement(
+        selector='#top-bottom', direction='down', top_start_ratio=0.9)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        'document.querySelector("#top-bottom").scrollTop') > 75)
+
+    action_runner.ScrollPage(direction='right', left_start_ratio=0.9,
+                             distance=100)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        '(document.scrollingElement || document.body).scrollLeft') > 75)
+
+  @decorators.Disabled('android',   # crbug.com/437065.
+                       'chromeos')  # crbug.com/483212.
+  def testSwipe(self):
+    if not page_action.IsGestureSourceTypeSupported(
+        self._tab, 'touch'):
+      return
+
+    self.Navigate('page_with_swipeables.html')
+    action_runner = action_runner_module.ActionRunner(self._tab,
+                                                      skip_waits=True)
+
+    action_runner.SwipeElement(
+        selector='#left-right', direction='left', left_start_ratio=0.9)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        'document.querySelector("#left-right").scrollLeft') > 75)
+    action_runner.SwipeElement(
+        selector='#top-bottom', direction='up', top_start_ratio=0.9)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        'document.querySelector("#top-bottom").scrollTop') > 75)
+
+    action_runner.SwipePage(direction='left', left_start_ratio=0.9)
+    self.assertTrue(action_runner.EvaluateJavaScript(
+        '(document.scrollingElement || document.body).scrollLeft') > 75)
+
+
+class InteractionTest(unittest.TestCase):
+
+  def setUp(self):
+    self.mock_action_runner = mock.Mock(action_runner_module.ActionRunner)
+
+  def testIssuingInteractionRecordCommand(self):
+    with action_runner_module.Interaction(
+        self.mock_action_runner, label='ABC', flags=[]):
+      pass
+    expected_calls = [
+        mock.call.ExecuteJavaScript('console.time("Interaction.ABC");'),
+        mock.call.ExecuteJavaScript('console.timeEnd("Interaction.ABC");')]
+    self.assertEqual(expected_calls, self.mock_action_runner.mock_calls)
+
+  def testExceptionRaisedInWithInteraction(self):
+    class FooException(Exception):
+      pass
+    # Test that the Foo exception raised in the with block is propagated to the
+    # caller.
+    with self.assertRaises(FooException):
+      with action_runner_module.Interaction(
+          self.mock_action_runner, label='ABC', flags=[]):
+        raise FooException()
+
+    # Test that the end console.timeEnd(...) isn't called because exception was
+    # raised.
+    expected_calls = [
+        mock.call.ExecuteJavaScript('console.time("Interaction.ABC");')]
+    self.assertEqual(expected_calls, self.mock_action_runner.mock_calls)
diff --git a/catapult/telemetry/telemetry/internal/actions/drag.js b/catapult/telemetry/telemetry/internal/actions/drag.js
new file mode 100644
index 0000000..cee51ae
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/drag.js
@@ -0,0 +1,70 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides the DragAction object, which performs drag on a page
+// using given start and end positions:
+//   1. var action = new __DragAction(callback)
+//   2. action.start(drag_options)
+'use strict';
+
+(function() {
+  function DragGestureOptions(opt_options) {
+      this.element_ = opt_options.element;
+      this.left_start_ratio_ = opt_options.left_start_ratio;
+      this.top_start_ratio_ = opt_options.top_start_ratio;
+      this.left_end_ratio_ = opt_options.left_end_ratio;
+      this.top_end_ratio_ = opt_options.top_end_ratio;
+      this.speed_ = opt_options.speed;
+      this.gesture_source_type_ = opt_options.gesture_source_type;
+  }
+
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.smoothDrag);
+  }
+
+  // This class performs drag action using given start and end positions,
+  // by a single drag gesture.
+  function DragAction(opt_callback) {
+    this.beginMeasuringHook = function() {}
+    this.endMeasuringHook = function() {}
+
+    this.callback_ = opt_callback;
+  }
+
+  DragAction.prototype.start = function(opt_options) {
+    this.options_ = new DragGestureOptions(opt_options);
+    requestAnimationFrame(this.startGesture_.bind(this));
+  };
+
+  DragAction.prototype.startGesture_ = function() {
+    this.beginMeasuringHook();
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element_);
+    var start_left =
+        rect.left + (rect.width * this.options_.left_start_ratio_);
+    var start_top =
+        rect.top + (rect.height * this.options_.top_start_ratio_);
+    var end_left =
+        rect.left + (rect.width * this.options_.left_end_ratio_);
+    var end_top =
+        rect.top + (rect.height * this.options_.top_end_ratio_);
+    chrome.gpuBenchmarking.smoothDrag(
+        start_left, start_top, end_left, end_top,
+        this.onGestureComplete_.bind(this), this.options_.gesture_source_type_,
+        this.options_.speed_);
+  };
+
+  DragAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    // We're done.
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__DragAction = DragAction;
+  window.__DragAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/drag.py b/catapult/telemetry/telemetry/internal/actions/drag.py
new file mode 100644
index 0000000..39097a2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/drag.py
@@ -0,0 +1,107 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A Telemetry page_action that performs the "drag" action on pages.
+
+Action parameters are:
+- selector: If no selector is defined then the action attempts to drag the
+            document element on the page.
+- element_function: CSS selector used to evaluate callback when test completes
+- text: The element with exact text is selected.
+- left_start_ratio: ratio of start point's left coordinate to the element
+                    width.
+- top_start_ratio: ratio of start point's top coordinate to the element height.
+- left_end_ratio: ratio of end point's left coordinate to the element width.
+- left_end_ratio: ratio of end point's top coordinate to the element height.
+- speed_in_pixels_per_second: speed of the drag gesture in pixels per second.
+- use_touch: boolean value to specify if gesture should use touch input or not.
+"""
+
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class DragAction(page_action.PageAction):
+
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_start_ratio=None, top_start_ratio=None, left_end_ratio=None,
+               top_end_ratio=None, speed_in_pixels_per_second=800,
+               use_touch=False,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(DragAction, self).__init__()
+    self._selector = selector
+    self._text = text
+    self._element_function = element_function
+    self._left_start_ratio = left_start_ratio
+    self._top_start_ratio = top_start_ratio
+    self._left_end_ratio = left_end_ratio
+    self._top_end_ratio = top_end_ratio
+    self._speed = speed_in_pixels_per_second
+    self._use_touch = use_touch
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+  def WillRunAction(self, tab):
+    for js_file in ['gesture_common.js', 'drag.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic drag gestures.
+    if not tab.EvaluateJavaScript('window.__DragAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic drag not supported for this browser')
+
+    # Fail if this action requires touch and we can't send touch events.
+    if self._use_touch:
+      if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
+        raise page_action.PageActionNotSupported(
+            'Touch drag not supported for this browser')
+
+      if (self._synthetic_gesture_source ==
+          'chrome.gpuBenchmarking.MOUSE_INPUT'):
+        raise page_action.PageActionNotSupported(
+            'Drag requires touch on this page but mouse input was requested')
+
+    done_callback = 'function() { window.__dragActionDone = true; }'
+    tab.ExecuteJavaScript('''
+        window.__dragActionDone = false;
+        window.__dragAction = new __DragAction(%s);'''
+        % done_callback)
+
+  def RunAction(self, tab):
+    if (self._selector is None and self._text is None and
+        self._element_function is None):
+      self._element_function = 'document.body'
+
+    gesture_source_type = 'chrome.gpuBenchmarking.TOUCH_INPUT'
+    if (page_action.IsGestureSourceTypeSupported(tab, 'mouse') and
+        not self._use_touch):
+      gesture_source_type = 'chrome.gpuBenchmarking.MOUSE_INPUT'
+
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__dragAction.start({
+            element: element,
+            left_start_ratio: %s,
+            top_start_ratio: %s,
+            left_end_ratio: %s,
+            top_end_ratio: %s,
+            speed: %s,
+            gesture_source_type: %s
+          });
+        }''' % (self._left_start_ratio,
+                self._top_start_ratio,
+                self._left_end_ratio,
+                self._top_end_ratio,
+                self._speed,
+                gesture_source_type)
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector, text=self._text,
+        element_function=self._element_function)
+    tab.WaitForJavaScriptExpression('window.__dragActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/drag_unittest.py b/catapult/telemetry/telemetry/internal/actions/drag_unittest.py
new file mode 100644
index 0000000..439ade6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/drag_unittest.py
@@ -0,0 +1,71 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import math
+import os
+
+from telemetry import decorators
+from telemetry.internal.actions import drag
+from telemetry.internal.actions import page_action
+from telemetry.testing import tab_test_case
+
+
+class DragActionTest(tab_test_case.TabTestCase):
+  def CheckWithinRange(self, value, expected, error_ratio):
+    error_range = abs(expected * error_ratio)
+    return abs(value - expected) <= error_range
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testDragAction(self):
+    self.Navigate('draggable.html')
+
+    with open(os.path.join(os.path.dirname(__file__),
+                           'gesture_common.js')) as f:
+      js = f.read()
+      self._tab.ExecuteJavaScript(js)
+
+    div_width = self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).width')
+    div_height = self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).height')
+
+    i = drag.DragAction(left_start_ratio=0.5, top_start_ratio=0.5,
+            left_end_ratio=0.25, top_end_ratio=0.25)
+    try:
+      i.WillRunAction(self._tab)
+    except page_action.PageActionNotSupported:
+      logging.warning('This browser does not support drag gesture. Please try'
+                      ' updating chrome.')
+      return
+
+    self._tab.ExecuteJavaScript('''
+        window.__dragAction.beginMeasuringHook = function() {
+            window.__didBeginMeasuring = true;
+        };
+        window.__dragAction.endMeasuringHook = function() {
+            window.__didEndMeasuring = true;
+        };''')
+    i.RunAction(self._tab)
+
+    self.assertTrue(self._tab.EvaluateJavaScript('window.__didBeginMeasuring'))
+    self.assertTrue(self._tab.EvaluateJavaScript('window.__didEndMeasuring'))
+
+    div_position_x = self._tab.EvaluateJavaScript(
+        'document.getElementById("drag_div").offsetLeft')
+    div_position_y = self._tab.EvaluateJavaScript(
+        'document.getElementById("drag_div").offsetTop')
+
+    # 0.25 is the ratio of displacement to the initial size.
+    expected_x = math.floor(div_width * -0.25)
+    expected_y = math.floor(div_height * -0.25)
+    error_ratio = 0.1
+
+    self.assertTrue(
+        self.CheckWithinRange(div_position_x, expected_x, error_ratio),
+        msg="Moved element's left coordinate: %d, expected: %d" %
+        (div_position_x, expected_x))
+    self.assertTrue(
+        self.CheckWithinRange(div_position_y, expected_y, error_ratio),
+        msg="Moved element's top coordinate: %d, expected: %d" %
+        (div_position_y, expected_y))
diff --git a/catapult/telemetry/telemetry/internal/actions/gesture_common.js b/catapult/telemetry/telemetry/internal/actions/gesture_common.js
new file mode 100644
index 0000000..9522f65
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/gesture_common.js
@@ -0,0 +1,65 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides common functionality for synthetic gesture actions.
+'use strict';
+
+(function() {
+
+  // Returns the bounding rectangle wrt to the top-most document.
+  function getBoundingRect(el) {
+    var client_rect = el.getBoundingClientRect();
+    var bound = { left: client_rect.left,
+                  top: client_rect.top,
+                  width: client_rect.width,
+                  height: client_rect.height };
+
+    var frame = el.ownerDocument.defaultView.frameElement;
+    while (frame) {
+      var frame_bound = frame.getBoundingClientRect();
+      // This computation doesn't account for more complex CSS transforms on the
+      // frame (e.g. scaling or rotations).
+      bound.left += frame_bound.left;
+      bound.top += frame_bound.top;
+
+      frame = frame.ownerDocument.frameElement;
+    }
+    return bound;
+  }
+
+  function getBoundingVisibleRect(el) {
+    var rect = getBoundingRect(el);
+    if (rect.top < 0) {
+      rect.height += rect.top;
+      rect.top = 0;
+    }
+    if (rect.left < 0) {
+      rect.width += rect.left;
+      rect.left = 0;
+    }
+
+    // TODO(ymalik): Remove the fallback path once the visualViewportHeight and
+    // visualViewportWidth properties roll into stable.
+    var visualViewportHeight = window.innerHeight;
+    var visualViewportWidth = window.innerWidth;
+    if (chrome.gpuBenchmarking.visualViewportHeight) {
+      visualViewportHeight = chrome.gpuBenchmarking.visualViewportHeight();
+    }
+    if (chrome.gpuBenchmarking.visualViewportWidth) {
+      visualViewportWidth = chrome.gpuBenchmarking.visualViewportWidth();
+    }
+    var outsideHeight = (rect.top + rect.height) - visualViewportHeight;
+    var outsideWidth = (rect.left + rect.width) - visualViewportWidth;
+
+    if (outsideHeight > 0) {
+      rect.height -= outsideHeight;
+    }
+    if (outsideWidth > 0) {
+      rect.width -= outsideWidth;
+    }
+    return rect;
+  };
+
+  window.__GestureCommon_GetBoundingVisibleRect = getBoundingVisibleRect;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/javascript_click.py b/catapult/telemetry/telemetry/internal/actions/javascript_click.py
new file mode 100644
index 0000000..3a70e00
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/javascript_click.py
@@ -0,0 +1,25 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import page_action
+
+
+class ClickElementAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None):
+    super(ClickElementAction, self).__init__()
+    self.selector = selector
+    self.text = text
+    self.element_function = element_function
+
+  def RunAction(self, tab):
+    code = '''
+        function(element, errorMsg) {
+          if (!element) {
+            throw Error('Cannot find element: ' + errorMsg);
+          }
+          element.click();
+        }'''
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self.selector, text=self.text,
+        element_function=self.element_function)
diff --git a/catapult/telemetry/telemetry/internal/actions/load_media.js b/catapult/telemetry/telemetry/internal/actions/load_media.js
new file mode 100644
index 0000000..06479f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/load_media.js
@@ -0,0 +1,30 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+  function loadMediaAndAwait(selector, event_to_await) {
+    var mediaElements = window.__findMediaElements(selector);
+    for (var i = 0; i < mediaElements.length; i++) {
+      console.log('Listening for ' + event_to_await + ' on element: ' +
+                  mediaElements[i].src);
+      registerListeners(mediaElements[i], event_to_await);
+      loadMediaElement(mediaElements[i]);
+    }
+  }
+
+  function loadMediaElement(element) {
+    if (element instanceof HTMLMediaElement) {
+      element.load();
+    } else {
+      throw new Error('Can not load non media elements.');
+    }
+  }
+
+  function registerListeners(element, event_to_await) {
+    window.__registerHTML5ErrorEvents(element);
+    window.__registerHTML5EventCompleted(element, event_to_await);
+  }
+
+  window.__loadMediaAndAwait = loadMediaAndAwait;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/load_media.py b/catapult/telemetry/telemetry/internal/actions/load_media.py
new file mode 100644
index 0000000..c746004
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/load_media.py
@@ -0,0 +1,36 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import media_action
+from telemetry.internal.actions import page_action
+
+
+class LoadMediaAction(media_action.MediaAction):
+  """ For calling load() on media elements and waiting for an event to fire.
+  """
+
+  def __init__(self, selector=None, timeout_in_seconds=0,
+               event_to_await='canplaythrough'):
+    super(LoadMediaAction, self).__init__()
+    self._selector = selector or ''
+    self._timeout_in_seconds = timeout_in_seconds
+    self._event_to_await = event_to_await
+
+  def WillRunAction(self, tab):
+    """Load the JS code prior to running the action."""
+    super(LoadMediaAction, self).WillRunAction(tab)
+    self.LoadJS(tab, 'load_media.js')
+
+  def RunAction(self, tab):
+    try:
+      tab.ExecuteJavaScript('window.__loadMediaAndAwait("%s", "%s");'
+                            % (self._selector, self._event_to_await))
+      if self._timeout_in_seconds > 0:
+        self.WaitForEvent(tab, self._selector, self._event_to_await,
+                          self._timeout_in_seconds)
+    except exceptions.EvaluateException:
+      raise page_action.PageActionFailed('Failed waiting for event "%s" on '
+                                         'elements with selector = %s.' %
+                                         (self._event_to_await, self._selector))
diff --git a/catapult/telemetry/telemetry/internal/actions/load_media_unittest.py b/catapult/telemetry/telemetry/internal/actions/load_media_unittest.py
new file mode 100644
index 0000000..cfc7d1a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/load_media_unittest.py
@@ -0,0 +1,63 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.actions.load_media import LoadMediaAction
+from telemetry.testing import tab_test_case
+
+
+class LoadMediaActionTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    tab_test_case.TabTestCase.setUp(self)
+    self.Navigate('video_test.html')
+
+  def eventFired(self, selector, event):
+    return self._tab.EvaluateJavaScript(
+      'window.__hasEventCompleted("%s", "%s");' % (selector, event))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testAwaitedEventIsConfigurable(self):
+    """It's possible to wait for different events."""
+    action = LoadMediaAction(selector='#video_1', timeout_in_seconds=0.1,
+                             event_to_await='loadedmetadata')
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    self.assertTrue(self.eventFired('#video_1', 'loadedmetadata'))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testLoadWithNoSelector(self):
+    """With no selector the first media element is loaded."""
+    action = LoadMediaAction(timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    self.assertTrue(self.eventFired('#video_1', 'canplaythrough'))
+    self.assertFalse(self.eventFired('#audio_1', 'canplaythrough'))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testLoadWithSelector(self):
+    """Only the element matching the selector is loaded."""
+    action = LoadMediaAction(selector='#audio_1', timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    self.assertFalse(self.eventFired('#video_1', 'canplaythrough'))
+    self.assertTrue(self.eventFired('#audio_1', 'canplaythrough'))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testLoadWithAllSelector(self):
+    """Both elements are loaded with selector='all'."""
+    action = LoadMediaAction(selector='all', timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    self.assertTrue(self.eventFired('#video_1', 'canplaythrough'))
+    self.assertTrue(self.eventFired('#audio_1', 'canplaythrough'))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testLoadRaisesAnExceptionOnTimeout(self):
+    """The load action times out if the event does not fire."""
+    action = LoadMediaAction(selector='#video_1', timeout_in_seconds=0.1,
+                             event_to_await='a_nonexistent_event')
+    action.WillRunAction(self._tab)
+    self.assertRaises(exceptions.TimeoutException, action.RunAction, self._tab)
diff --git a/catapult/telemetry/telemetry/internal/actions/loop.js b/catapult/telemetry/telemetry/internal/actions/loop.js
new file mode 100644
index 0000000..f862784
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/loop.js
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file performs actions on media elements.
+(function() {
+  function loopMedia(selector, loopCount) {
+    // Loops media playback `loopCount` times.
+    var mediaElements = window.__findMediaElements(selector);
+    for (var i = 0; i < mediaElements.length; i++) {
+      loop(mediaElements[i], loopCount);
+    }
+  }
+
+  function loop(element, loopCount) {
+    if (element instanceof HTMLMediaElement)
+      loopHTML5Element(element, loopCount);
+    else
+      throw new Error('Can not play non HTML5 media elements.');
+  }
+
+  function loopHTML5Element(element, loopCount) {
+    window.__registerHTML5ErrorEvents(element);
+    element['loop_completed'] = false;
+    var currentLoop = 0;
+    var onLoop = function(e) {
+      ++currentLoop;
+      if (currentLoop == loopCount) {
+        element.pause();
+        element.removeEventListener('seeked', onLoop);
+        element['loop_completed'] = true;
+        // Dispatch endLoopEvent to mark end of looping.
+        var endLoopEvent = document.createEvent('Event');
+        endLoopEvent.initEvent('endLoop', false, false);
+        element.dispatchEvent(endLoopEvent);
+      }
+    };
+
+    element.addEventListener('seeked', onLoop);
+    element.loop = true;
+
+    // Dispatch willLoopEvent to measure loop time.
+    var willLoopEvent = document.createEvent('Event');
+    willLoopEvent.initEvent('willLoop', false, false);
+    willLoopEvent.loopCount = loopCount;
+    element.dispatchEvent(willLoopEvent);
+    // Reset HTML5 player to start playback from beginning.
+    element.load();
+    element.play();
+  }
+
+  window.__loopMedia = loopMedia;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/loop.py b/catapult/telemetry/telemetry/internal/actions/loop.py
new file mode 100644
index 0000000..240f6f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/loop.py
@@ -0,0 +1,41 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A Telemetry page_action that loops media playback.
+
+Action parameters are:
+- loop_count: The number of times to loop media.
+- selector: If no selector is defined then the action attempts to loop the first
+            media element on the page. If 'all' then loop all media elements.
+- timeout_in_seconds: Timeout to wait for media to loop. Default is
+                      60 sec x loop_count. 0 means do not wait.
+"""
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import media_action
+from telemetry.internal.actions import page_action
+
+
+class LoopAction(media_action.MediaAction):
+  def __init__(self, loop_count, selector=None, timeout_in_seconds=None):
+    super(LoopAction, self).__init__()
+    self._loop_count = loop_count
+    self._selector = selector if selector else ''
+    self._timeout_in_seconds = (
+        timeout_in_seconds if timeout_in_seconds else 60 * loop_count)
+
+  def WillRunAction(self, tab):
+    """Load the media metrics JS code prior to running the action."""
+    super(LoopAction, self).WillRunAction(tab)
+    self.LoadJS(tab, 'loop.js')
+
+  def RunAction(self, tab):
+    try:
+      tab.ExecuteJavaScript('window.__loopMedia("%s", %i);' %
+                            (self._selector, self._loop_count))
+      if self._timeout_in_seconds > 0:
+        self.WaitForEvent(tab, self._selector, 'loop', self._timeout_in_seconds)
+    except exceptions.EvaluateException:
+      raise page_action.PageActionFailed('Cannot loop media element(s) with '
+                                         'selector = %s.' % self._selector)
diff --git a/catapult/telemetry/telemetry/internal/actions/loop_unittest.py b/catapult/telemetry/telemetry/internal/actions/loop_unittest.py
new file mode 100644
index 0000000..690d1c5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/loop_unittest.py
@@ -0,0 +1,52 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.actions import loop
+from telemetry.testing import tab_test_case
+
+AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
+VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
+
+
+class LoopActionTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    tab_test_case.TabTestCase.setUp(self)
+    self.Navigate('video_test.html')
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testLoopWithNoSelector(self):
+    """Tests that with no selector Loop action loops first media element."""
+    action = loop.LoopAction(loop_count=2, selector='#video_1',
+                             timeout_in_seconds=10)
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    # Assert only first video has played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testLoopWithAllSelector(self):
+    """Tests that Loop action loops all video elements with selector='all'."""
+    action = loop.LoopAction(loop_count=2, selector='all',
+                             timeout_in_seconds=10)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
+    action.RunAction(self._tab)
+    # Assert all media elements played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
+    self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testLoopWaitForLoopTimeout(self):
+    """Tests that wait_for_loop timeout_in_secondss if video does not loop."""
+    action = loop.LoopAction(loop_count=2, selector='#video_1',
+                             timeout_in_seconds=1)
+    action.WillRunAction(self._tab)
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
+    self.assertRaises(exceptions.TimeoutException, action.RunAction, self._tab)
diff --git a/catapult/telemetry/telemetry/internal/actions/media_action.js b/catapult/telemetry/telemetry/internal/actions/media_action.js
new file mode 100644
index 0000000..e9df62d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/media_action.js
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// This file provides common functions for media actions.
+window.__findMediaElements = function(selector) {
+  // Returns elements matching the selector, otherwise returns the first video
+  // or audio tag element that can be found.
+  // If selector == 'all', returns all media elements.
+  if (selector == 'all') {
+    return document.querySelectorAll('video, audio');
+  } else if (selector) {
+    return document.querySelectorAll(selector);
+  } else {
+    var media = document.getElementsByTagName('video');
+    if (media.length > 0) {
+      return [media[0]];
+    } else {
+      media = document.getElementsByTagName('audio');
+      if (media.length > 0) {
+        return [media[0]];
+      }
+    }
+  }
+  console.error('Could not find any media elements matching: ' + selector);
+  return [];
+};
+
+window.__hasEventCompleted = function(selector, event_name) {
+  // Return true if the event_name fired for media satisfying the selector.
+  var mediaElements = window.__findMediaElements(selector);
+  for (var i = 0; i < mediaElements.length; i++) {
+    if (!mediaElements[i][event_name + '_completed'])
+      return false;
+  }
+  return true;
+};
+
+window.__registerHTML5ErrorEvents = function(element) {
+  // Listens to HTML5 media errors.
+  function onError(e) {
+    window.__error = 'Media error: ' + e.type + ', code:' + e.target.error.code;
+    throw new Error(window.__error);
+  }
+  element.addEventListener('error', onError);
+  element.addEventListener('abort', onError);
+};
+
+window.__registerHTML5EventCompleted = function(element, event_name) {
+  // Logs |even_name| on element when completed.
+  var logEventHappened = function(e) {
+    element[e.type + '_completed'] = true;
+    element.removeEventListener(event_name, logEventHappened);
+  }
+  element.addEventListener(event_name, logEventHappened);
+};
+
+window.__error = null;
diff --git a/catapult/telemetry/telemetry/internal/actions/media_action.py b/catapult/telemetry/telemetry/internal/actions/media_action.py
new file mode 100644
index 0000000..446f797
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/media_action.py
@@ -0,0 +1,51 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common media action functions."""
+
+import logging
+import os
+
+from telemetry.core import util
+from telemetry.internal.actions import page_action
+
+
+class MediaAction(page_action.PageAction):
+  def WillRunAction(self, tab):
+    """Loads the common media action JS code prior to running the action."""
+    self.LoadJS(tab, 'media_action.js')
+
+  def RunAction(self, tab):
+    super(MediaAction, self).RunAction(tab)
+
+  def LoadJS(self, tab, js_file_name):
+    """Loads and executes a JS file in the tab."""
+    with open(os.path.join(os.path.dirname(__file__), js_file_name)) as f:
+      js = f.read()
+      tab.ExecuteJavaScript(js)
+
+  def WaitForEvent(self, tab, selector, event_name, timeout_in_seconds):
+    """Halts media action until the selector's event is fired.
+
+    Args:
+      tab: The tab to check for event on.
+      selector: Media element selector.
+      event_name: Name of the event to check if fired or not.
+      timeout_in_seconds: Timeout to check for event, throws an exception if
+          not fired.
+    """
+    util.WaitFor(lambda:
+                     self.HasEventCompletedOrError(tab, selector, event_name),
+                 timeout=timeout_in_seconds)
+
+  def HasEventCompletedOrError(self, tab, selector, event_name):
+    if tab.EvaluateJavaScript(
+        'window.__hasEventCompleted("%s", "%s");' % (selector, event_name)):
+      return True
+    error = tab.EvaluateJavaScript('window.__error')
+    if error:
+      logging.error('Detected media error while waiting for %s: %s', event_name,
+                    error)
+      return True
+    return False
diff --git a/catapult/telemetry/telemetry/internal/actions/mouse_click.js b/catapult/telemetry/telemetry/internal/actions/mouse_click.js
new file mode 100644
index 0000000..e85239a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/mouse_click.js
@@ -0,0 +1,40 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+(function() {
+  function MouseClickAction(opt_callback) {
+    this.callback_ = opt_callback;
+  }
+
+  MouseClickAction.prototype.start = function(options) {
+    this.click_(options.element);
+  };
+
+  MouseClickAction.prototype.click_ = function(element) {
+    var triggerMouseEvent = this.triggerMouseEvent_;
+    var callback = this.callback_;
+    triggerMouseEvent(element, 'mouseover');
+    triggerMouseEvent(element, 'mousedown');
+    // ~100ms is typical for a mouse click's elapsed time.
+    window.setTimeout(
+      function() {
+        triggerMouseEvent(element, 'mouseup');
+        triggerMouseEvent(element, 'click', callback);
+      }, 100);
+  };
+
+  MouseClickAction.prototype.triggerMouseEvent_ = function(
+      node, eventType, callback) {
+    var clickEvent = document.createEvent('MouseEvents');
+    clickEvent.initEvent(eventType, true, true);
+    node.dispatchEvent(clickEvent);
+    if (callback) {
+      window.setTimeout(callback, 0);
+    }
+  };
+
+  window.__MouseClickAction = MouseClickAction;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/mouse_click.py b/catapult/telemetry/telemetry/internal/actions/mouse_click.py
new file mode 100644
index 0000000..069afe8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/mouse_click.py
@@ -0,0 +1,43 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from telemetry.internal.actions import page_action
+
+
+def read_js():
+  with open(os.path.join(os.path.dirname(__file__), 'mouse_click.js')) as f:
+    return f.read()
+
+
+class MouseClickAction(page_action.PageAction):
+  _MOUSE_CLICK_JAVASCRIPT = read_js()
+
+  def __init__(self, selector=None):
+    super(MouseClickAction, self).__init__()
+    self._selector = selector
+
+  def WillRunAction(self, tab):
+    """Load the mouse click JS code prior to running the action."""
+    super(MouseClickAction, self).WillRunAction(tab)
+    tab.ExecuteJavaScript(MouseClickAction._MOUSE_CLICK_JAVASCRIPT)
+    done_callback = 'function() { window.__mouseClickActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__mouseClickActionDone = false;
+        window.__mouseClickAction = new __MouseClickAction(%s);"""
+        % (done_callback))
+
+  def RunAction(self, tab):
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__mouseClickAction.start({
+            element: element
+          });
+        }'''
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector)
+    tab.WaitForJavaScriptExpression('window.__mouseClickActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/mouse_click_unittest.py b/catapult/telemetry/telemetry/internal/actions/mouse_click_unittest.py
new file mode 100644
index 0000000..6ad06c7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/mouse_click_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import mouse_click
+from telemetry.testing import tab_test_case
+
+
+class MouseClickActionTest(tab_test_case.TabTestCase):
+
+  def testMouseClickAction(self):
+    self.Navigate('blank.html')
+
+    self._tab.ExecuteJavaScript("""
+        (function() {
+           function createElement(id, textContent) {
+             var el = document.createElement("div");
+             el.id = id;
+             el.textContent = textContent;
+             document.body.appendChild(el);
+           }
+
+           createElement('test-1', 'foo');
+        })();""")
+    i = mouse_click.MouseClickAction(selector='#test-1')
+    i.WillRunAction(self._tab)
+    i.RunAction(self._tab)
+    self.assertTrue(self._tab.EvaluateJavaScript(
+        'window.__mouseClickActionDone'))
+
+  def testMouseClickActionOnNonExistingElement(self):
+    self.Navigate('blank.html')
+
+    self._tab.ExecuteJavaScript("""
+        (function() {
+           function createElement(id, textContent) {
+             var el = document.createElement("div");
+             el.id = id;
+             el.textContent = textContent;
+             document.body.appendChild(el);
+           }
+
+           createElement('test-1', 'foo');
+        })();""")
+    i = mouse_click.MouseClickAction(selector='#test-2')
+    i.WillRunAction(self._tab)
+    def WillFail():
+      i.RunAction(self._tab)
+    self.assertRaises(exceptions.EvaluateException, WillFail)
diff --git a/catapult/telemetry/telemetry/internal/actions/navigate.py b/catapult/telemetry/telemetry/internal/actions/navigate.py
new file mode 100644
index 0000000..353eb66
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/navigate.py
@@ -0,0 +1,29 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from telemetry.internal.actions import page_action
+
+
+class NavigateAction(page_action.PageAction):
+  def __init__(self, url, script_to_evaluate_on_commit=None,
+               timeout_in_seconds=60):
+    super(NavigateAction, self).__init__()
+    assert url, 'Must specify url for navigate action'
+    self._url = url
+    self._script_to_evaluate_on_commit = script_to_evaluate_on_commit
+    self._timeout_in_seconds = timeout_in_seconds
+
+  def RunAction(self, tab):
+    start_time = time.time()
+    tab.Navigate(self._url,
+                 self._script_to_evaluate_on_commit,
+                 self._timeout_in_seconds)
+
+    time_left_in_seconds = (start_time + self._timeout_in_seconds
+        - time.time())
+    time_left_in_seconds = max(0, time_left_in_seconds)
+    tab.WaitForDocumentReadyStateToBeInteractiveOrBetter(
+        time_left_in_seconds)
diff --git a/catapult/telemetry/telemetry/internal/actions/navigate_unittest.py b/catapult/telemetry/telemetry/internal/actions/navigate_unittest.py
new file mode 100644
index 0000000..a327d84
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/navigate_unittest.py
@@ -0,0 +1,15 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import navigate
+from telemetry.testing import tab_test_case
+
+
+class NavigateActionTest(tab_test_case.TabTestCase):
+  def testNavigateAction(self):
+    i = navigate.NavigateAction(url=self.UrlOfUnittestFile('blank.html'))
+    i.RunAction(self._tab)
+    self.assertEquals(
+        self._tab.EvaluateJavaScript('document.location.pathname;'),
+        '/blank.html')
diff --git a/catapult/telemetry/telemetry/internal/actions/page_action.py b/catapult/telemetry/telemetry/internal/actions/page_action.py
new file mode 100644
index 0000000..a77d296
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/page_action.py
@@ -0,0 +1,136 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry import decorators
+
+GESTURE_SOURCE_DEFAULT = 'DEFAULT'
+GESTURE_SOURCE_MOUSE = 'MOUSE'
+GESTURE_SOURCE_TOUCH = 'TOUCH'
+SUPPORTED_GESTURE_SOURCES = (GESTURE_SOURCE_DEFAULT,
+                             GESTURE_SOURCE_MOUSE,
+                             GESTURE_SOURCE_TOUCH)
+
+class PageActionNotSupported(Exception):
+  pass
+
+class PageActionFailed(Exception):
+  pass
+
+
+class PageAction(object):
+  """Represents an action that a user might try to perform to a page."""
+
+  def WillRunAction(self, tab):
+    """Override to do action-specific setup before
+    Test.WillRunAction is called."""
+    pass
+
+  def RunAction(self, tab):
+    raise NotImplementedError()
+
+  def CleanUp(self, tab):
+    pass
+
+def EvaluateCallbackWithElement(
+    tab, callback_js, selector=None, text=None, element_function=None,
+    wait=False, timeout_in_seconds=60):
+  """Evaluates the JavaScript callback with the given element.
+
+  The element may be selected via selector, text, or element_function.
+  Only one of these arguments must be specified.
+
+  Returns:
+    The callback's return value, if any. The return value must be
+    convertible to JSON.
+
+  Args:
+    tab: A telemetry.core.Tab object.
+    callback_js: The JavaScript callback to call (as string).
+        The callback receive 2 parameters: the element, and information
+        string about what method was used to retrieve the element.
+        Example: '''
+          function(element, info) {
+            if (!element) {
+              throw Error('Can not find element: ' + info);
+            }
+            element.click()
+          }'''
+    selector: A CSS selector describing the element.
+    text: The element must contains this exact text.
+    element_function: A JavaScript function (as string) that is used
+        to retrieve the element. For example:
+        '(function() { return foo.element; })()'.
+    wait: Whether to wait for the return value to be true.
+    timeout_in_seconds: The timeout for wait (if waiting).
+  """
+  count = 0
+  info_msg = ''
+  if element_function is not None:
+    count = count + 1
+    info_msg = 'using element_function "%s"' % re.escape(element_function)
+  if selector is not None:
+    count = count + 1
+    info_msg = 'using selector "%s"' % _EscapeSelector(selector)
+    element_function = 'document.querySelector(\'%s\')' % _EscapeSelector(
+        selector)
+  if text is not None:
+    count = count + 1
+    info_msg = 'using exact text match "%s"' % re.escape(text)
+    element_function = '''
+        (function() {
+          function _findElement(element, text) {
+            if (element.innerHTML == text) {
+              return element;
+            }
+
+            var childNodes = element.childNodes;
+            for (var i = 0, len = childNodes.length; i < len; ++i) {
+              var found = _findElement(childNodes[i], text);
+              if (found) {
+                return found;
+              }
+            }
+            return null;
+          }
+          return _findElement(document, '%s');
+        })()''' % text
+
+  if count != 1:
+    raise PageActionFailed(
+        'Must specify 1 way to retrieve element, but %s was specified.' % count)
+
+  code = '''
+      (function() {
+        var element = %s;
+        var callback = %s;
+        return callback(element, '%s');
+      })()''' % (element_function, callback_js, info_msg)
+
+  if wait:
+    tab.WaitForJavaScriptExpression(code, timeout_in_seconds)
+    return True
+  else:
+    return tab.EvaluateJavaScript(code)
+
+def _EscapeSelector(selector):
+  return selector.replace('\'', '\\\'')
+
+
+@decorators.Cache
+def IsGestureSourceTypeSupported(tab, gesture_source_type):
+  # TODO(dominikg): remove once support for
+  #                 'chrome.gpuBenchmarking.gestureSourceTypeSupported' has
+  #                 been rolled into reference build.
+  if tab.EvaluateJavaScript("""
+      typeof chrome.gpuBenchmarking.gestureSourceTypeSupported ===
+          'undefined'"""):
+    return (tab.browser.platform.GetOSName() != 'mac' or
+            gesture_source_type.lower() != 'touch')
+
+  return tab.EvaluateJavaScript("""
+      chrome.gpuBenchmarking.gestureSourceTypeSupported(
+          chrome.gpuBenchmarking.%s_INPUT)"""
+      % (gesture_source_type.upper()))
diff --git a/catapult/telemetry/telemetry/internal/actions/page_action_unittest.py b/catapult/telemetry/telemetry/internal/actions/page_action_unittest.py
new file mode 100644
index 0000000..33d3a69
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/page_action_unittest.py
@@ -0,0 +1,82 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for page_action."""
+
+from telemetry.internal.actions import page_action
+from telemetry.page import action_runner
+from telemetry.testing import tab_test_case
+
+
+class PageActionTest(tab_test_case.TabTestCase):
+
+  def testEvaluateCallbackWithElement(self):
+    self.Navigate('blank.html')
+    runner = action_runner.ActionRunner(self._tab)
+    runner.ExecuteJavaScript('''
+        (function() {
+           function createElement(id, textContent) {
+             var el = document.createElement("div");
+             el.id = id;
+             el.textContent = textContent;
+             document.body.appendChild(el);
+           }
+
+           createElement('test-1', 'foo');
+           createElement('test-2', 'bar');
+           createElement('test-3', 'baz');
+        })();''')
+    self.assertEqual(
+        'foo',
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el) { return el.textContent; }',
+            selector='#test-1'))
+    self.assertEqual(
+        'bar',
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el) { return el.textContent; }',
+            text='bar'))
+    self.assertEqual(
+        'baz',
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el) { return el.textContent; }',
+            element_function='document.getElementById("test-3")'))
+    self.assertEqual(
+        'baz',
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el) { return el.textContent; }',
+            element_function='''
+                (function() {
+                  return document.getElementById("test-3");
+                })()'''))
+
+    # Test for when the element is not found.
+    self.assertEqual(
+        None,
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el) { return el; }',
+            element_function='document.getElementById("test-4")'))
+
+    # Test the info message.
+    self.assertEqual(
+        'using selector "#test-1"',
+        page_action.EvaluateCallbackWithElement(
+            self._tab, 'function(el, info) { return info; }',
+            selector='#test-1'))
+
+  def testEvaluateCallbackWithElementWithConflictingParams(self):
+    def Evaluate1():
+      page_action.EvaluateCallbackWithElement(
+          self._tab, 'function() {}', selector='div', text='foo')
+    self.assertRaises(page_action.PageActionFailed, Evaluate1)
+
+    def Evaluate2():
+      page_action.EvaluateCallbackWithElement(
+          self._tab, 'function() {}', selector='div', element_function='foo')
+    self.assertRaises(page_action.PageActionFailed, Evaluate2)
+
+    def Evaluate3():
+      page_action.EvaluateCallbackWithElement(
+          self._tab, 'function() {}', text='foo', element_function='')
+    self.assertRaises(page_action.PageActionFailed, Evaluate3)
diff --git a/catapult/telemetry/telemetry/internal/actions/pinch.js b/catapult/telemetry/telemetry/internal/actions/pinch.js
new file mode 100644
index 0000000..bdada3a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/pinch.js
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides the PinchAction object, which zooms into or out of a
+// page by a given scale factor:
+//   1. var action = new __PinchAction(callback)
+//   2. action.start(pinch_options)
+'use strict';
+
+(function() {
+
+  function PinchGestureOptions(opt_options) {
+    if (opt_options) {
+      this.element_ = opt_options.element;
+      this.left_anchor_ratio_ = opt_options.left_anchor_ratio;
+      this.top_anchor_ratio_ = opt_options.top_anchor_ratio;
+      this.scale_factor_ = opt_options.scale_factor;
+      this.speed_ = opt_options.speed;
+    } else {
+      this.element_ = document.body;
+      this.left_anchor_ratio_ = 0.5;
+      this.top_anchor_ratio_ = 0.5;
+      this.scale_factor_ = 2.0;
+      this.speed_ = 800;
+    }
+  }
+
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.pinchBy);
+  }
+
+  // This class zooms into or out of a page, given a number of pixels for
+  // the synthetic pinch gesture to cover.
+  function PinchAction(opt_callback) {
+    var self = this;
+
+    this.beginMeasuringHook = function() {}
+    this.endMeasuringHook = function() {}
+
+    this.callback_ = opt_callback;
+  };
+
+  PinchAction.prototype.start = function(opt_options) {
+    this.options_ = new PinchGestureOptions(opt_options);
+
+    requestAnimationFrame(this.startPass_.bind(this));
+  };
+
+  PinchAction.prototype.startPass_ = function() {
+    this.beginMeasuringHook();
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element_);
+    var anchor_left =
+        rect.left + rect.width * this.options_.left_anchor_ratio_;
+    var anchor_top =
+        rect.top + rect.height * this.options_.top_anchor_ratio_;
+    chrome.gpuBenchmarking.pinchBy(this.options_.scale_factor_,
+                                   anchor_left, anchor_top,
+                                   this.onGestureComplete_.bind(this),
+                                   this.options_.speed_);
+  };
+
+  PinchAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__PinchAction = PinchAction;
+  window.__PinchAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/pinch.py b/catapult/telemetry/telemetry/internal/actions/pinch.py
new file mode 100644
index 0000000..c1d76b6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/pinch.py
@@ -0,0 +1,74 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class PinchAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_anchor_ratio=0.5, top_anchor_ratio=0.5,
+               scale_factor=None, speed_in_pixels_per_second=800,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(PinchAction, self).__init__()
+    self._selector = selector
+    self._text = text
+    self._element_function = element_function
+    self._left_anchor_ratio = left_anchor_ratio
+    self._top_anchor_ratio = top_anchor_ratio
+    self._scale_factor = scale_factor
+    self._speed = speed_in_pixels_per_second
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+    if (self._selector is None and self._text is None and
+        self._element_function is None):
+      self._element_function = 'document.body'
+
+  def WillRunAction(self, tab):
+    for js_file in ['gesture_common.js', 'pinch.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic pinch gestures.
+    if not tab.EvaluateJavaScript('window.__PinchAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic pinch not supported for this browser')
+
+    done_callback = 'function() { window.__pinchActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__pinchActionDone = false;
+        window.__pinchAction = new __PinchAction(%s);"""
+        % done_callback)
+
+  @staticmethod
+  def _GetDefaultScaleFactorForPage(tab):
+    current_scale_factor = tab.EvaluateJavaScript(
+        'window.outerWidth / window.innerWidth')
+    return 3.0 / current_scale_factor
+
+  def RunAction(self, tab):
+    scale_factor = (self._scale_factor if self._scale_factor else
+                    PinchAction._GetDefaultScaleFactorForPage(tab))
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__pinchAction.start({
+            element: element,
+            left_anchor_ratio: %s,
+            top_anchor_ratio: %s,
+            scale_factor: %s,
+            speed: %s
+          });
+        }''' % (self._left_anchor_ratio,
+                self._top_anchor_ratio,
+                scale_factor,
+                self._speed)
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector, text=self._text,
+        element_function=self._element_function)
+    tab.WaitForJavaScriptExpression('window.__pinchActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/pinch_unittest.py b/catapult/telemetry/telemetry/internal/actions/pinch_unittest.py
new file mode 100644
index 0000000..1c2ae33
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/pinch_unittest.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import page_action
+from telemetry.page import action_runner as action_runner_module
+from telemetry.testing import tab_test_case
+
+
+class PinchActionTest(tab_test_case.TabTestCase):
+  def setUp(self):
+    super(PinchActionTest, self).setUp()
+
+  def testPinchByApiCalledWithCorrectArguments(self):
+    self.Navigate('blank.html')
+    if not page_action.IsGestureSourceTypeSupported(self._tab, 'touch'):
+      return
+
+    action_runner = action_runner_module.ActionRunner(self._tab)
+    action_runner.ExecuteJavaScript('''
+        chrome.gpuBenchmarking.pinchBy = function(
+            scaleFactor, anchorLeft, anchorTop, callback, speed) {
+          window.__test_scaleFactor = scaleFactor;
+          window.__test_anchorLeft = anchorLeft;
+          window.__test_anchorTop = anchorTop;
+          window.__test_callback = callback;
+          window.__test_speed = speed;
+          window.__pinchActionDone = true;
+        };''')
+    action_runner.PinchPage(scale_factor=2)
+    self.assertEqual(
+        2, action_runner.EvaluateJavaScript('window.__test_scaleFactor'))
+    self.assertTrue(
+        action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorLeft)'))
+    self.assertTrue(
+        action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorTop)'))
+    self.assertTrue(
+        action_runner.EvaluateJavaScript('!!window.__test_callback'))
+    self.assertEqual(
+        800, action_runner.EvaluateJavaScript('window.__test_speed'))
diff --git a/catapult/telemetry/telemetry/internal/actions/play.js b/catapult/telemetry/telemetry/internal/actions/play.js
new file mode 100644
index 0000000..c646d78
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/play.js
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file performs actions on media elements.
+(function() {
+  function playMedia(selector) {
+    // Performs the "Play" action on media satisfying selector.
+    var mediaElements = window.__findMediaElements(selector);
+    for (var i = 0; i < mediaElements.length; i++) {
+      console.log('Playing element: ' + mediaElements[i].src);
+      play(mediaElements[i]);
+    }
+  }
+
+  function play(element) {
+    if (element instanceof HTMLMediaElement)
+      playHTML5Element(element);
+    else
+      throw new Error('Can not play non HTML5 media elements.');
+  }
+
+  function playHTML5Element(element) {
+    window.__registerHTML5ErrorEvents(element);
+    window.__registerHTML5EventCompleted(element, 'playing');
+    window.__registerHTML5EventCompleted(element, 'ended');
+
+    var willPlayEvent = document.createEvent('Event');
+    willPlayEvent.initEvent('willPlay', false, false);
+    element.dispatchEvent(willPlayEvent);
+    element.play();
+  }
+
+  window.__playMedia = playMedia;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/play.py b/catapult/telemetry/telemetry/internal/actions/play.py
new file mode 100644
index 0000000..dfa0af2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/play.py
@@ -0,0 +1,48 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A Telemetry page_action that performs the "play" action on media elements.
+
+Media elements can be specified by a selector argument. If no selector is
+defined then then the action attempts to play the first video element or audio
+element on the page. A selector can also be 'all' to play all media elements.
+
+Other arguments to use are: playing_event_timeout_in_seconds and
+ended_event_timeout_in_seconds, which forces the action to wait until
+playing and ended events get fired respectively.
+"""
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import media_action
+from telemetry.internal.actions import page_action
+
+
+class PlayAction(media_action.MediaAction):
+  def __init__(self, selector=None,
+               playing_event_timeout_in_seconds=0,
+               ended_event_timeout_in_seconds=0):
+    super(PlayAction, self).__init__()
+    self._selector = selector if selector else ''
+    self._playing_event_timeout_in_seconds = playing_event_timeout_in_seconds
+    self._ended_event_timeout_in_seconds = ended_event_timeout_in_seconds
+
+  def WillRunAction(self, tab):
+    """Load the media metrics JS code prior to running the action."""
+    super(PlayAction, self).WillRunAction(tab)
+    self.LoadJS(tab, 'play.js')
+
+  def RunAction(self, tab):
+    try:
+      tab.ExecuteJavaScript('window.__playMedia("%s");' % self._selector)
+      # Check if we need to wait for 'playing' event to fire.
+      if self._playing_event_timeout_in_seconds > 0:
+        self.WaitForEvent(tab, self._selector, 'playing',
+                          self._playing_event_timeout_in_seconds)
+      # Check if we need to wait for 'ended' event to fire.
+      if self._ended_event_timeout_in_seconds > 0:
+        self.WaitForEvent(tab, self._selector, 'ended',
+                          self._ended_event_timeout_in_seconds)
+    except exceptions.EvaluateException:
+      raise page_action.PageActionFailed('Cannot play media element(s) with '
+                                         'selector = %s.' % self._selector)
diff --git a/catapult/telemetry/telemetry/internal/actions/play_unittest.py b/catapult/telemetry/telemetry/internal/actions/play_unittest.py
new file mode 100644
index 0000000..20bf8d2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/play_unittest.py
@@ -0,0 +1,110 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.actions import play
+from telemetry.testing import tab_test_case
+
+AUDIO_1_PLAYING_CHECK = 'window.__hasEventCompleted("#audio_1", "playing");'
+VIDEO_1_PLAYING_CHECK = 'window.__hasEventCompleted("#video_1", "playing");'
+VIDEO_1_ENDED_CHECK = 'window.__hasEventCompleted("#video_1", "ended");'
+
+
+class PlayActionTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    tab_test_case.TabTestCase.setUp(self)
+    self.Navigate('video_test.html')
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testPlayWithNoSelector(self):
+    """Tests that with no selector Play action plays first video element."""
+    action = play.PlayAction(playing_event_timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+    action.RunAction(self._tab)
+    # Assert only first video has played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testPlayWithVideoSelector(self):
+    """Tests that Play action plays video element matching selector."""
+    action = play.PlayAction(selector='#video_1',
+                             playing_event_timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+    action.RunAction(self._tab)
+    # Assert only video matching selector has played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testPlayWithAllSelector(self):
+    """Tests that Play action plays all video elements with selector='all'."""
+    action = play.PlayAction(selector='all',
+                             playing_event_timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+    action.RunAction(self._tab)
+    # Assert all media elements played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_PLAYING_CHECK))
+
+  # http://crbug.com/273887
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testPlayWaitForPlayTimeout(self):
+    """Tests that wait_for_playing timeouts if video does not play."""
+    action = play.PlayAction(selector='#video_1',
+                             playing_event_timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    self._tab.EvaluateJavaScript('document.getElementById("video_1").src = ""')
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertRaises(exceptions.TimeoutException, action.RunAction, self._tab)
+
+  @decorators.Disabled('android', 'linux')  # crbug.com/418577
+  def testPlayWaitForEnded(self):
+    """Tests that wait_for_ended waits for video to end."""
+    action = play.PlayAction(selector='#video_1',
+                             ended_event_timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Assert video not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
+    action.RunAction(self._tab)
+    # Assert video ended.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testPlayWithoutWaitForEnded(self):
+    """Tests that wait_for_ended waits for video to end."""
+    action = play.PlayAction(selector='#video_1',
+                             ended_event_timeout_in_seconds=0)
+    action.WillRunAction(self._tab)
+    # Assert video not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
+    action.RunAction(self._tab)
+    # Assert video did not end.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testPlayWaitForEndedTimeout(self):
+    """Tests that action raises exception if timeout is reached."""
+    action = play.PlayAction(selector='#video_1',
+                             ended_event_timeout_in_seconds=0.1)
+    action.WillRunAction(self._tab)
+    # Assert video not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_PLAYING_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
+    self.assertRaises(exceptions.TimeoutException, action.RunAction, self._tab)
+    # Assert video did not end.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_ENDED_CHECK))
diff --git a/catapult/telemetry/telemetry/internal/actions/repaint_continuously.py b/catapult/telemetry/telemetry/internal/actions/repaint_continuously.py
new file mode 100644
index 0000000..a8c4957
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/repaint_continuously.py
@@ -0,0 +1,35 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from telemetry.core import util
+from telemetry.internal.actions import page_action
+
+
+class RepaintContinuouslyAction(page_action.PageAction):
+  """Continuously repaints the visible content by requesting animation frames
+  until self.seconds have elapsed AND at least three RAFs have been fired. Times
+  out after max(60, self.seconds), if less than three RAFs were fired.
+  """
+
+  def __init__(self, seconds):
+    super(RepaintContinuouslyAction, self).__init__()
+    self._seconds = seconds
+
+  def RunAction(self, tab):
+    tab.ExecuteJavaScript(
+        'window.__rafCount = 0;'
+        'window.__rafFunction = function() {'
+          'window.__rafCount += 1;'
+          'window.webkitRequestAnimationFrame(window.__rafFunction);'
+        '};'
+        'window.webkitRequestAnimationFrame(window.__rafFunction);')
+
+    # Wait until at least self.seconds have elapsed AND min_rafs have been
+    # fired. Use a hard time-out after 60 seconds (or self.seconds).
+    time.sleep(self._seconds)
+    def HasMinRafs():
+      return tab.EvaluateJavaScript('window.__rafCount;') >= 3
+    util.WaitFor(HasMinRafs, max(60 - self._seconds, 0))
diff --git a/catapult/telemetry/telemetry/internal/actions/repeatable_scroll.py b/catapult/telemetry/telemetry/internal/actions/repeatable_scroll.py
new file mode 100644
index 0000000..79ba4c4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/repeatable_scroll.py
@@ -0,0 +1,40 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import page_action
+from telemetry.web_perf import timeline_interaction_record
+
+
+class RepeatableScrollAction(page_action.PageAction):
+
+  def __init__(self, x_scroll_distance_ratio=0.0, y_scroll_distance_ratio=0.5,
+               repeat_count=0, repeat_delay_ms=250, timeout=60):
+    super(RepeatableScrollAction, self).__init__()
+    self._x_scroll_distance_ratio = x_scroll_distance_ratio
+    self._y_scroll_distance_ratio = y_scroll_distance_ratio
+    self._repeat_count = repeat_count
+    self._repeat_delay_ms = repeat_delay_ms
+    self._windowsize = []
+    self._timeout = timeout
+
+  def WillRunAction(self, tab):
+    # Get the dimensions of the screen.
+    window_info_js = 'window.innerWidth + "," + window.innerHeight'
+    js_result = tab.EvaluateJavaScript(window_info_js).split(',')
+
+    self._windowsize = [int(js_result[0]), int(js_result[1])]
+
+  def RunAction(self, tab):
+    # Set up a browser driven repeating scroll. The delay between the scrolls
+    # should be unaffected by render thread responsivness (or lack there of).
+    tab.SynthesizeScrollGesture(
+        x=int(self._windowsize[0] / 2),
+        y=int(self._windowsize[1] / 2),
+        xDistance=int(self._x_scroll_distance_ratio * self._windowsize[0]),
+        yDistance=int(-self._y_scroll_distance_ratio * self._windowsize[1]),
+        repeatCount=self._repeat_count,
+        repeatDelayMs=self._repeat_delay_ms,
+        interactionMarkerName=timeline_interaction_record.GetJavaScriptMarker(
+            'Gesture_ScrollAction', [timeline_interaction_record.REPEATABLE]),
+        timeout=self._timeout)
diff --git a/catapult/telemetry/telemetry/internal/actions/repeatable_scroll_unittest.py b/catapult/telemetry/telemetry/internal/actions/repeatable_scroll_unittest.py
new file mode 100644
index 0000000..f5341e9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/repeatable_scroll_unittest.py
@@ -0,0 +1,64 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import repeatable_scroll
+from telemetry.internal.browser import browser_info as browser_info_module
+from telemetry.testing import tab_test_case
+
+
+class RepeatableScrollActionTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    tab_test_case.TabTestCase.setUp(self)
+    self.Navigate('blank.html')
+
+    # Make page bigger than window so it's scrollable.
+    self._tab.ExecuteJavaScript('document.body.style.height = '
+                                ' (3 * window.innerHeight + 1) + "px";')
+
+    self.assertEquals(
+        self._tab.EvaluateJavaScript('document.documentElement.scrollTop '
+                                     '|| document.body.scrollTop'), 0)
+
+    self._browser_info = browser_info_module.BrowserInfo(self._tab.browser)
+    self._window_height = int(
+        self._tab.EvaluateJavaScript('window.innerHeight'))
+
+  def testRepeatableScrollActionNoRepeats(self):
+    if not self._browser_info.HasRepeatableSynthesizeScrollGesture():
+      return
+
+    expected_scroll = (self._window_height / 2) - 1
+
+    i = repeatable_scroll.RepeatableScrollAction(y_scroll_distance_ratio=0.5)
+    i.WillRunAction(self._tab)
+
+    i.RunAction(self._tab)
+
+    scroll_position = self._tab.EvaluateJavaScript(
+        '(document.documentElement.scrollTop || document.body.scrollTop)')
+    # We can only expect the final scroll position to be approximatly equal.
+    self.assertTrue(abs(scroll_position - expected_scroll) < 20,
+                    msg='scroll_position=%d;expected %d' % (scroll_position,
+                                                            expected_scroll))
+
+  def testRepeatableScrollActionTwoRepeats(self):
+    if not self._browser_info.HasRepeatableSynthesizeScrollGesture():
+      return
+
+    expected_scroll = ((self._window_height / 2) - 1) * 3
+
+    i = repeatable_scroll.RepeatableScrollAction(y_scroll_distance_ratio=0.5,
+                                                 repeat_count=2,
+                                                 repeat_delay_ms=1)
+    i.WillRunAction(self._tab)
+
+    i.RunAction(self._tab)
+
+    scroll_position = self._tab.EvaluateJavaScript(
+        '(document.documentElement.scrollTop || document.body.scrollTop)')
+    # We can only expect the final scroll position to be approximatly equal.
+    self.assertTrue(abs(scroll_position - expected_scroll) < 20,
+                    msg='scroll_position=%d;expected %d' % (scroll_position,
+                                                            expected_scroll))
diff --git a/catapult/telemetry/telemetry/internal/actions/scroll.js b/catapult/telemetry/telemetry/internal/actions/scroll.js
new file mode 100644
index 0000000..1a295d0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/scroll.js
@@ -0,0 +1,147 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides the ScrollAction object, which scrolls a page
+// to the bottom or for a specified distance:
+//   1. var action = new __ScrollAction(callback, opt_distance_func)
+//   2. action.start(scroll_options)
+'use strict';
+
+(function() {
+  var MAX_SCROLL_LENGTH_TIME_MS = 6250;
+
+  function ScrollGestureOptions(opt_options) {
+    if (opt_options) {
+      this.element_ = opt_options.element;
+      this.left_start_ratio_ = opt_options.left_start_ratio;
+      this.top_start_ratio_ = opt_options.top_start_ratio;
+      this.direction_ = opt_options.direction;
+      this.speed_ = opt_options.speed;
+      this.gesture_source_type_ = opt_options.gesture_source_type;
+    } else {
+      this.element_ = document.scrollingElement || document.body;
+      this.left_start_ratio_ = 0.5;
+      this.top_start_ratio_ = 0.5;
+      this.direction_ = 'down';
+      this.speed_ = 800;
+      this.gesture_source_type_ = chrome.gpuBenchmarking.DEFAULT_INPUT;
+    }
+  }
+
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.smoothScrollBy);
+  }
+
+  // This class scrolls a page from the top to the bottom once.
+  //
+  // The page is scrolled down by a single scroll gesture.
+  function ScrollAction(opt_callback, opt_distance_func) {
+    var self = this;
+
+    this.beginMeasuringHook = function() {};
+    this.endMeasuringHook = function() {};
+
+    this.callback_ = opt_callback;
+    this.distance_func_ = opt_distance_func;
+  }
+
+  ScrollAction.prototype.getScrollDistanceDown_ = function() {
+    var clientHeight;
+    // clientHeight is "special" for the body element.
+    if (this.element_ == document.body)
+      clientHeight = window.innerHeight;
+    else
+      clientHeight = this.element_.clientHeight;
+
+    return this.element_.scrollHeight -
+           this.element_.scrollTop -
+           clientHeight;
+  };
+
+  ScrollAction.prototype.getScrollDistanceUp_ = function() {
+    return this.element_.scrollTop;
+  };
+
+  ScrollAction.prototype.getScrollDistanceRight_ = function() {
+    var clientWidth;
+    // clientWidth is "special" for the body element.
+    if (this.element_ == document.body)
+      clientWidth = window.innerWidth;
+    else
+      clientWidth = this.element_.clientWidth;
+
+    return this.element_.scrollWidth - this.element_.scrollLeft - clientWidth;
+  };
+
+  ScrollAction.prototype.getScrollDistanceLeft_ = function() {
+    return this.element_.scrollLeft;
+  };
+
+  ScrollAction.prototype.getScrollDistance_ = function() {
+    if (this.distance_func_)
+      return this.distance_func_();
+
+    if (this.options_.direction_ == 'down') {
+      return this.getScrollDistanceDown_();
+    } else if (this.options_.direction_ == 'up') {
+      return this.getScrollDistanceUp_();
+    } else if (this.options_.direction_ == 'right') {
+      return this.getScrollDistanceRight_();
+    } else if (this.options_.direction_ == 'left') {
+      return this.getScrollDistanceLeft_();
+    } else if (this.options_.direction_ == 'upleft') {
+      return Math.min(this.getScrollDistanceUp_(),
+                      this.getScrollDistanceLeft_());
+    } else if (this.options_.direction_ == 'upright') {
+      return Math.min(this.getScrollDistanceUp_(),
+                      this.getScrollDistanceRight_());
+    } else if (this.options_.direction_ == 'downleft') {
+      return Math.min(this.getScrollDistanceDown_(),
+                      this.getScrollDistanceLeft_());
+    } else if (this.options_.direction_ == 'downright') {
+      return Math.min(this.getScrollDistanceDown_(),
+                      this.getScrollDistanceRight_());
+    }
+  };
+
+  ScrollAction.prototype.start = function(opt_options) {
+    this.options_ = new ScrollGestureOptions(opt_options);
+    // Assign this.element_ here instead of constructor, because the constructor
+    // ensures this method will be called after the document is loaded.
+    this.element_ = this.options_.element_;
+    requestAnimationFrame(this.startGesture_.bind(this));
+  };
+
+  ScrollAction.prototype.startGesture_ = function() {
+    this.beginMeasuringHook();
+
+    var max_scroll_length_pixels = (MAX_SCROLL_LENGTH_TIME_MS / 1000) *
+        this.options_.speed_;
+    var distance = Math.min(max_scroll_length_pixels,
+                            this.getScrollDistance_());
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element_);
+    var start_left =
+        rect.left + rect.width * this.options_.left_start_ratio_;
+    var start_top =
+        rect.top + rect.height * this.options_.top_start_ratio_;
+    chrome.gpuBenchmarking.smoothScrollBy(
+        distance, this.onGestureComplete_.bind(this), start_left, start_top,
+        this.options_.gesture_source_type_, this.options_.direction_,
+        this.options_.speed_);
+  };
+
+  ScrollAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    // We're done.
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__ScrollAction = ScrollAction;
+  window.__ScrollAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/scroll.py b/catapult/telemetry/telemetry/internal/actions/scroll.py
new file mode 100644
index 0000000..0cd53ef
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/scroll.py
@@ -0,0 +1,108 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class ScrollAction(page_action.PageAction):
+  # TODO(chrishenry): Ignore attributes, to be deleted when usage in
+  # other repo is cleaned up.
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_start_ratio=0.5, top_start_ratio=0.5, direction='down',
+               distance=None, distance_expr=None,
+               speed_in_pixels_per_second=800, use_touch=False,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(ScrollAction, self).__init__()
+    if direction not in ('down', 'up', 'left', 'right',
+                         'downleft', 'downright',
+                         'upleft', 'upright'):
+      raise page_action.PageActionNotSupported(
+          'Invalid scroll direction: %s' % self.direction)
+    self._selector = selector
+    self._text = text
+    self._element_function = element_function
+    self._left_start_ratio = left_start_ratio
+    self._top_start_ratio = top_start_ratio
+    self._direction = direction
+    self._speed = speed_in_pixels_per_second
+    self._use_touch = use_touch
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+    self._distance_func = 'null'
+    if distance:
+      assert not distance_expr
+      distance_expr = str(distance)
+    if distance_expr:
+      self._distance_func = ('function() { return 0 + %s; }' %
+                             distance_expr)
+
+  def WillRunAction(self, tab):
+    if self._direction in ('downleft', 'downright', 'upleft', 'upright'):
+      # Diagonal scrolling support was added in Chrome branch number 2332.
+      branch_num = (
+          tab.browser._browser_backend.devtools_client.GetChromeBranchNumber())
+      if branch_num < 2332:
+        raise ValueError('Diagonal scrolling requires Chrome branch number'
+                         ' 2332 or later. Found branch number %d' %
+                         branch_num)
+    for js_file in ['gesture_common.js', 'scroll.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic scroll gestures.
+    if not tab.EvaluateJavaScript('window.__ScrollAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic scroll not supported for this browser')
+
+    # Fail if this action requires touch and we can't send touch events.
+    if self._use_touch:
+      if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
+        raise page_action.PageActionNotSupported(
+            'Touch scroll not supported for this browser')
+
+      if (self._synthetic_gesture_source ==
+          'chrome.gpuBenchmarking.MOUSE_INPUT'):
+        raise page_action.PageActionNotSupported(
+            'Scroll requires touch on this page but mouse input was requested')
+
+    done_callback = 'function() { window.__scrollActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__scrollActionDone = false;
+        window.__scrollAction = new __ScrollAction(%s, %s);"""
+        % (done_callback, self._distance_func))
+
+  def RunAction(self, tab):
+    if (self._selector is None and self._text is None and
+        self._element_function is None):
+      self._element_function = '(document.scrollingElement || document.body)'
+
+    gesture_source_type = self._synthetic_gesture_source
+    if self._use_touch:
+      gesture_source_type = 'chrome.gpuBenchmarking.TOUCH_INPUT'
+
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__scrollAction.start({
+            element: element,
+            left_start_ratio: %s,
+            top_start_ratio: %s,
+            direction: '%s',
+            speed: %s,
+            gesture_source_type: %s
+          });
+        }''' % (self._left_start_ratio,
+                self._top_start_ratio,
+                self._direction,
+                self._speed,
+                gesture_source_type)
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector, text=self._text,
+        element_function=self._element_function)
+    tab.WaitForJavaScriptExpression('window.__scrollActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/scroll_bounce.js b/catapult/telemetry/telemetry/internal/actions/scroll_bounce.js
new file mode 100644
index 0000000..b41383c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/scroll_bounce.js
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+(function() {
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.scrollBounce);
+  }
+
+  function ScrollBounceAction(opt_callback) {
+    var self = this;
+
+    this.beginMeasuringHook = function() {}
+    this.endMeasuringHook = function() {}
+
+    this.callback_ = opt_callback;
+  }
+
+  ScrollBounceAction.prototype.start = function(options) {
+    this.options_ = options;
+    // Assign this.element_ here instead of constructor, because the constructor
+    // ensures this method will be called after the document is loaded.
+    this.element_ = this.options_.element;
+    requestAnimationFrame(this.startGesture_.bind(this));
+  };
+
+  ScrollBounceAction.prototype.startGesture_ = function() {
+    this.beginMeasuringHook();
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element);
+    var start_left =
+        rect.left + rect.width * this.options_.left_start_ratio;
+    var start_top =
+        rect.top + rect.height * this.options_.top_start_ratio;
+    chrome.gpuBenchmarking.scrollBounce(this.options_.direction,
+                                        this.options_.distance,
+                                        this.options_.overscroll,
+                                        this.options_.repeat_count,
+                                        this.onGestureComplete_.bind(this),
+                                        start_left, start_top,
+                                        this.options_.speed);
+  };
+
+  ScrollBounceAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    // We're done.
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__ScrollBounceAction = ScrollBounceAction;
+  window.__ScrollBounceAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/scroll_bounce.py b/catapult/telemetry/telemetry/internal/actions/scroll_bounce.py
new file mode 100644
index 0000000..672351f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/scroll_bounce.py
@@ -0,0 +1,98 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class ScrollBounceAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_start_ratio=0.5, top_start_ratio=0.5,
+               direction='down', distance=100,
+               overscroll=10, repeat_count=10,
+               speed_in_pixels_per_second=400,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(ScrollBounceAction, self).__init__()
+    if direction not in ['down', 'up', 'left', 'right']:
+      raise page_action.PageActionNotSupported(
+          'Invalid scroll direction: %s' % self.direction)
+    self._selector = selector
+    self._text = text
+    self._element_function = element_function
+    self._left_start_ratio = left_start_ratio
+    self._top_start_ratio = top_start_ratio
+    # Should be big enough to do more than just hide the URL bar.
+    self._distance = distance
+    self._direction = direction
+    # This needs to be < height / repeat_count so we don't walk off the screen.
+    # We also probably don't want to spend more than a couple frames in
+    # overscroll since it may mask any synthetic delays.
+    self._overscroll = overscroll
+    # It's the transitions we really want to stress, make this big.
+    self._repeat_count = repeat_count
+    # 7 pixels per frame should be plenty of frames.
+    self._speed = speed_in_pixels_per_second
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+    if (self._selector is None and self._text is None and
+        self._element_function is None):
+      self._element_function = '(document.scrollingElement || document.body)'
+
+  def WillRunAction(self, tab):
+    for js_file in ['gesture_common.js', 'scroll_bounce.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic scroll bounce gestures.
+    if not tab.EvaluateJavaScript(
+        'window.__ScrollBounceAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic scroll bounce not supported for this browser')
+
+    # Fail if we can't send touch events (bouncing is really only
+    # interesting for touch)
+    if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
+      raise page_action.PageActionNotSupported(
+          'Touch scroll not supported for this browser')
+
+    if (self._synthetic_gesture_source ==
+        'chrome.gpuBenchmarking.MOUSE_INPUT'):
+      raise page_action.PageActionNotSupported(
+          'ScrollBounce page action does not support mouse input')
+
+    done_callback = 'function() { window.__scrollBounceActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__scrollBounceActionDone = false;
+        window.__scrollBounceAction = new __ScrollBounceAction(%s);"""
+        % (done_callback))
+
+  def RunAction(self, tab):
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__scrollBounceAction.start({
+            element: element,
+            left_start_ratio: %s,
+            top_start_ratio: %s,
+            direction: '%s',
+            distance: %s,
+            overscroll: %s,
+            repeat_count: %s,
+            speed: %s
+          });
+        }''' % (self._left_start_ratio,
+                self._top_start_ratio,
+                self._direction,
+                self._distance,
+                self._overscroll,
+                self._repeat_count,
+                self._speed)
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector, text=self._text,
+        element_function=self._element_function)
+    tab.WaitForJavaScriptExpression('window.__scrollBounceActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/scroll_unittest.py b/catapult/telemetry/telemetry/internal/actions/scroll_unittest.py
new file mode 100644
index 0000000..b708dab
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/scroll_unittest.py
@@ -0,0 +1,123 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.internal.actions import scroll
+from telemetry.testing import tab_test_case
+
+class ScrollActionTest(tab_test_case.TabTestCase):
+  def testScrollAction(self):
+    self.Navigate('blank.html')
+
+    # Make page bigger than window so it's scrollable.
+    self._tab.ExecuteJavaScript("""document.body.style.height =
+                              (2 * window.innerHeight + 1) + 'px';""")
+
+    self.assertEquals(
+        self._tab.EvaluateJavaScript("""document.documentElement.scrollTop
+                                   || document.body.scrollTop"""), 0)
+
+    i = scroll.ScrollAction()
+    i.WillRunAction(self._tab)
+
+    self._tab.ExecuteJavaScript("""
+        window.__scrollAction.beginMeasuringHook = function() {
+            window.__didBeginMeasuring = true;
+        };
+        window.__scrollAction.endMeasuringHook = function() {
+            window.__didEndMeasuring = true;
+        };""")
+    i.RunAction(self._tab)
+
+    self.assertTrue(self._tab.EvaluateJavaScript('window.__didBeginMeasuring'))
+    self.assertTrue(self._tab.EvaluateJavaScript('window.__didEndMeasuring'))
+
+    scroll_position = self._tab.EvaluateJavaScript(
+        '(document.documentElement.scrollTop || document.body.scrollTop)')
+    self.assertTrue(scroll_position != 0,
+                    msg='scroll_position=%d;' % (scroll_position))
+
+  def testDiagonalScrollAction(self):
+    # Diagonal scrolling was not supported in the ScrollAction until Chrome
+    # branch number 2332
+    branch_num = self._tab.browser._browser_backend.devtools_client \
+        .GetChromeBranchNumber()
+    if branch_num < 2332:
+      return
+
+    self.Navigate('blank.html')
+
+    # Make page bigger than window so it's scrollable.
+    self._tab.ExecuteJavaScript("""document.body.style.height =
+                              (2 * window.innerHeight + 1) + 'px';""")
+    self._tab.ExecuteJavaScript("""document.body.style.width =
+                              (2 * window.innerWidth + 1) + 'px';""")
+
+    self.assertEquals(
+        self._tab.EvaluateJavaScript("""document.documentElement.scrollTop
+                                   || document.body.scrollTop"""), 0)
+    self.assertEquals(
+        self._tab.EvaluateJavaScript("""document.documentElement.scrollLeft
+                                   || document.body.scrollLeft"""), 0)
+
+    i = scroll.ScrollAction(direction='downright')
+    i.WillRunAction(self._tab)
+
+    i.RunAction(self._tab)
+
+    viewport_top = self._tab.EvaluateJavaScript(
+        '(document.documentElement.scrollTop || document.body.scrollTop)')
+    self.assertTrue(viewport_top != 0, msg='viewport_top=%d;' % viewport_top)
+
+    viewport_left = self._tab.EvaluateJavaScript(
+        '(document.documentElement.scrollLeft || document.body.scrollLeft)')
+    self.assertTrue(viewport_left != 0, msg='viewport_left=%d;' % viewport_left)
+
+  def testBoundingClientRect(self):
+    self.Navigate('blank.html')
+
+    with open(os.path.join(os.path.dirname(__file__),
+                           'gesture_common.js')) as f:
+      js = f.read()
+      self._tab.ExecuteJavaScript(js)
+
+    # Verify that the rect returned by getBoundingVisibleRect() in scroll.js is
+    # completely contained within the viewport. Scroll events dispatched by the
+    # scrolling API use the center of this rect as their location, and this
+    # location needs to be within the viewport bounds to correctly decide
+    # between main-thread and impl-thread scroll. If the scrollable area were
+    # not clipped to the viewport bounds, then the instance used here (the
+    # scrollable area being more than twice as tall as the viewport) would
+    # result in a scroll location outside of the viewport bounds.
+    self._tab.ExecuteJavaScript("""document.body.style.height =
+                           (3 * window.innerHeight + 1) + 'px';""")
+    self._tab.ExecuteJavaScript("""document.body.style.width =
+                           (3 * window.innerWidth + 1) + 'px';""")
+    self._tab.ExecuteJavaScript(
+        "window.scrollTo(window.innerWidth, window.innerHeight);")
+
+    rect_top = int(self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).top'))
+    rect_height = int(self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).height'))
+    rect_bottom = rect_top + rect_height
+
+    rect_left = int(self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).left'))
+    rect_width = int(self._tab.EvaluateJavaScript(
+        '__GestureCommon_GetBoundingVisibleRect(document.body).width'))
+    rect_right = rect_left + rect_width
+
+    viewport_height = int(self._tab.EvaluateJavaScript('window.innerHeight'))
+    viewport_width = int(self._tab.EvaluateJavaScript('window.innerWidth'))
+
+    self.assertTrue(rect_top >= 0,
+        msg='%s >= %s' % (rect_top, 0))
+    self.assertTrue(rect_left >= 0,
+        msg='%s >= %s' % (rect_left, 0))
+    self.assertTrue(rect_bottom <= viewport_height,
+        msg='%s + %s <= %s' % (rect_top, rect_height, viewport_height))
+    self.assertTrue(rect_right <= viewport_width,
+        msg='%s + %s <= %s' % (rect_left, rect_width, viewport_width))
diff --git a/catapult/telemetry/telemetry/internal/actions/seek.js b/catapult/telemetry/telemetry/internal/actions/seek.js
new file mode 100644
index 0000000..baef30c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/seek.js
@@ -0,0 +1,57 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file performs actions on media elements.
+(function() {
+  function seekMedia(selector, seekTime, logSeekTime, seekLabel) {
+    // Performs the "Seek" action on media satisfying selector.
+    var mediaElements = window.__findMediaElements(selector);
+    for (var i = 0; i < mediaElements.length; i++) {
+      if (mediaElements[i] instanceof HTMLMediaElement)
+        seekHTML5Element(mediaElements[i], seekTime, logSeekTime, seekLabel);
+      else
+        throw new Error('Can not seek non HTML5 media elements.');
+    }
+  }
+
+  function seekHTML5Element(element, seekTime, logSeekTime, seekLabel) {
+    function readyForSeek() {
+      seekHTML5ElementPostLoad(element, seekTime, logSeekTime, seekLabel);
+    }
+    if (element.readyState == element.HAVE_NOTHING) {
+      var onLoadedMetaData = function(e) {
+        element.removeEventListener('loadedmetadata', onLoadedMetaData);
+        readyForSeek();
+      };
+      element.addEventListener('loadedmetadata', onLoadedMetaData);
+      element.load();
+    } else {
+      readyForSeek();
+    }
+  }
+
+  function seekHTML5ElementPostLoad(element, seekTime, logSeekTime, seekLabel) {
+    // Reset seek completion since multiple seeks can run on same media element.
+    element['seeked_completed'] = false;
+    window.__registerHTML5ErrorEvents(element);
+    window.__registerHTML5EventCompleted(element, 'seeked');
+
+    if (logSeekTime) {
+      var willSeekEvent = document.createEvent('Event');
+      willSeekEvent.initEvent('willSeek', false, false);
+      if (seekLabel)
+        willSeekEvent.seekLabel = seekLabel;
+      else
+        willSeekEvent.seekLabel = seekTime;
+      element.dispatchEvent(willSeekEvent);
+    }
+    try {
+      element.currentTime = seekTime;
+    } catch (err) {
+      throw new Error('Cannot seek in network state: ' + element.networkState);
+    }
+  }
+
+  window.__seekMedia = seekMedia;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/seek.py b/catapult/telemetry/telemetry/internal/actions/seek.py
new file mode 100644
index 0000000..a89dfef
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/seek.py
@@ -0,0 +1,50 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A Telemetry page_action that performs the "seek" action on media elements.
+
+Action parameters are:
+- seconds: The media time to seek to. Test fails if not provided.
+- selector: If no selector is defined then the action attempts to seek the first
+            media element on the page. If 'all' then seek all media elements.
+- timeout_in_seconds: Maximum waiting time for the "seeked" event
+                      (dispatched when the seeked operation completes)
+                      to be fired.  0 means do not wait.
+- log_time: If true the seek time is recorded, otherwise media
+            measurement will not be aware of the seek action. Used to
+            perform multiple seeks. Default true.
+- label: A suffix string to name the seek perf measurement.
+"""
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import media_action
+from telemetry.internal.actions import page_action
+
+
+class SeekAction(media_action.MediaAction):
+  def __init__(self, seconds, selector=None, timeout_in_seconds=0,
+               log_time=True, label=''):
+    super(SeekAction, self).__init__()
+    self._seconds = seconds
+    self._selector = selector if selector else ''
+    self._timeout_in_seconds = timeout_in_seconds
+    self._log_time = log_time
+    self._label = label
+
+  def WillRunAction(self, tab):
+    """Load the media metrics JS code prior to running the action."""
+    super(SeekAction, self).WillRunAction(tab)
+    self.LoadJS(tab, 'seek.js')
+
+  def RunAction(self, tab):
+    try:
+      tab.ExecuteJavaScript(
+          'window.__seekMedia("%s", "%s", %i, "%s");' %
+          (self._selector, self._seconds, self._log_time, self._label))
+      if self._timeout_in_seconds > 0:
+        self.WaitForEvent(tab, self._selector, 'seeked',
+                          self._timeout_in_seconds)
+    except exceptions.EvaluateException:
+      raise page_action.PageActionFailed('Cannot seek media element(s) with '
+                                         'selector = %s.' % self._selector)
diff --git a/catapult/telemetry/telemetry/internal/actions/seek_unittest.py b/catapult/telemetry/telemetry/internal/actions/seek_unittest.py
new file mode 100644
index 0000000..bea3e63
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/seek_unittest.py
@@ -0,0 +1,66 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.actions import seek
+from telemetry.testing import tab_test_case
+
+AUDIO_1_SEEKED_CHECK = 'window.__hasEventCompleted("#audio_1", "seeked");'
+VIDEO_1_SEEKED_CHECK = 'window.__hasEventCompleted("#video_1", "seeked");'
+
+
+class SeekActionTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    tab_test_case.TabTestCase.setUp(self)
+    self.Navigate('video_test.html')
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testSeekWithNoSelector(self):
+    """Tests that with no selector Seek  action seeks first media element."""
+    action = seek.SeekAction(seconds=1, timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+    # Assert only first video has played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_SEEKED_CHECK))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testSeekWithVideoSelector(self):
+    """Tests that Seek action seeks video element matching selector."""
+    action = seek.SeekAction(seconds=1, selector='#video_1',
+                             timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_SEEKED_CHECK))
+    action.RunAction(self._tab)
+    # Assert only video matching selector has played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_SEEKED_CHECK))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testSeekWithAllSelector(self):
+    """Tests that Seek action seeks all video elements with selector='all'."""
+    action = seek.SeekAction(seconds=1, selector='all',
+                             timeout_in_seconds=5)
+    action.WillRunAction(self._tab)
+    # Both videos not playing before running action.
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_SEEKED_CHECK))
+    action.RunAction(self._tab)
+    # Assert all media elements played.
+    self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_SEEKED_CHECK))
+
+  @decorators.Disabled('linux')  # crbug.com/418577
+  def testSeekWaitForSeekTimeout(self):
+    """Tests that wait_for_seeked timeouts if video does not seek."""
+    action = seek.SeekAction(seconds=1, selector='#video_1',
+                             timeout_in_seconds=0.1)
+    action.WillRunAction(self._tab)
+    self._tab.EvaluateJavaScript('document.getElementById("video_1").src = ""')
+    self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_SEEKED_CHECK))
+    self.assertRaises(exceptions.TimeoutException, action.RunAction, self._tab)
diff --git a/catapult/telemetry/telemetry/internal/actions/swipe.js b/catapult/telemetry/telemetry/internal/actions/swipe.js
new file mode 100644
index 0000000..cf97d5c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/swipe.js
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+(function() {
+  function SwipeGestureOptions(opt_options) {
+    if (opt_options) {
+      this.element_ = opt_options.element;
+      this.left_start_ratio_ = opt_options.left_start_ratio;
+      this.top_start_ratio_ = opt_options.top_start_ratio;
+      this.direction_ = opt_options.direction;
+      this.distance_ = opt_options.distance;
+      this.speed_ = opt_options.speed;
+    } else {
+      this.element_ = document.body;
+      this.left_start_ratio_ = 0.5;
+      this.top_start_ratio_ = 0.5;
+      this.direction_ = 'left';
+      this.distance_ = 0;
+      this.speed_ = 800;
+    }
+  }
+
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.swipe);
+  }
+
+  // This class swipes a page for a specified distance.
+  function SwipeAction(opt_callback) {
+    var self = this;
+
+    this.beginMeasuringHook = function() {}
+    this.endMeasuringHook = function() {}
+
+    this.callback_ = opt_callback;
+  }
+
+  SwipeAction.prototype.start = function(opt_options) {
+    this.options_ = new SwipeGestureOptions(opt_options);
+    // Assign this.element_ here instead of constructor, because the constructor
+    // ensures this method will be called after the document is loaded.
+    this.element_ = this.options_.element_;
+    requestAnimationFrame(this.startGesture_.bind(this));
+  };
+
+  SwipeAction.prototype.startGesture_ = function() {
+    this.beginMeasuringHook();
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element_);
+    var start_left =
+        rect.left + rect.width * this.options_.left_start_ratio_;
+    var start_top =
+        rect.top + rect.height * this.options_.top_start_ratio_;
+    chrome.gpuBenchmarking.swipe(this.options_.direction_,
+                                 this.options_.distance_,
+                                 this.onGestureComplete_.bind(this),
+                                 start_left, start_top,
+                                 this.options_.speed_);
+  };
+
+  SwipeAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    // We're done.
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__SwipeAction = SwipeAction;
+  window.__SwipeAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/swipe.py b/catapult/telemetry/telemetry/internal/actions/swipe.py
new file mode 100644
index 0000000..3376de8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/swipe.py
@@ -0,0 +1,81 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class SwipeAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_start_ratio=0.5, top_start_ratio=0.5,
+               direction='left', distance=100, speed_in_pixels_per_second=800,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(SwipeAction, self).__init__()
+    if direction not in ['down', 'up', 'left', 'right']:
+      raise page_action.PageActionNotSupported(
+          'Invalid swipe direction: %s' % self.direction)
+    self._selector = selector
+    self._text = text
+    self._element_function = element_function
+    self._left_start_ratio = left_start_ratio
+    self._top_start_ratio = top_start_ratio
+    self._direction = direction
+    self._distance = distance
+    self._speed = speed_in_pixels_per_second
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+  def WillRunAction(self, tab):
+    for js_file in ['gesture_common.js', 'swipe.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic swipe gestures.
+    if not tab.EvaluateJavaScript('window.__SwipeAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic swipe not supported for this browser')
+
+    if (self._synthetic_gesture_source ==
+        'chrome.gpuBenchmarking.MOUSE_INPUT'):
+      raise page_action.PageActionNotSupported(
+          'Swipe page action does not support mouse input')
+
+    if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
+      raise page_action.PageActionNotSupported(
+          'Touch input not supported for this browser')
+
+    done_callback = 'function() { window.__swipeActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__swipeActionDone = false;
+        window.__swipeAction = new __SwipeAction(%s);"""
+        % (done_callback))
+
+  def RunAction(self, tab):
+    if (self._selector is None and self._text is None and
+        self._element_function is None):
+      self._element_function = '(document.scrollingElement || document.body)'
+    code = '''
+        function(element, info) {
+          if (!element) {
+            throw Error('Cannot find element: ' + info);
+          }
+          window.__swipeAction.start({
+            element: element,
+            left_start_ratio: %s,
+            top_start_ratio: %s,
+            direction: '%s',
+            distance: %s,
+            speed: %s
+          });
+        }''' % (self._left_start_ratio,
+                self._top_start_ratio,
+                self._direction,
+                self._distance,
+                self._speed)
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self._selector, text=self._text,
+        element_function=self._element_function)
+    tab.WaitForJavaScriptExpression('window.__swipeActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/tap.js b/catapult/telemetry/telemetry/internal/actions/tap.js
new file mode 100644
index 0000000..8d42247
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/tap.js
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+(function() {
+
+  function TapGestureOptions(opt_options) {
+    if (opt_options) {
+      this.element_ = opt_options.element;
+      this.left_position_percentage_ = opt_options.left_position_percentage;
+      this.top_position_percentage_ = opt_options.top_position_percentage;
+      this.duration_ms_ = opt_options.duration_ms;
+      this.gesture_source_type_ = opt_options.gesture_source_type;
+    } else {
+      this.element_ = document.body;
+      this.left_position_percentage_ = 0.5;
+      this.top_position_percentage_ = 0.5;
+      this.duration_ms_ = 50;
+      this.gesture_source_type_ = chrome.gpuBenchmarking.DEFAULT_INPUT;
+    }
+  }
+
+  function supportedByBrowser() {
+    return !!(window.chrome &&
+              chrome.gpuBenchmarking &&
+              chrome.gpuBenchmarking.tap);
+  }
+
+  function TapAction(opt_callback) {
+    var self = this;
+
+    this.beginMeasuringHook = function() {};
+    this.endMeasuringHook = function() {};
+
+    this.callback_ = opt_callback;
+  }
+
+  TapAction.prototype.start = function(opt_options) {
+    this.options_ = new TapGestureOptions(opt_options);
+    // Assign this.element_ here instead of constructor, because the constructor
+    // ensures this method will be called after the document is loaded.
+    this.element_ = this.options_.element_;
+
+    this.beginMeasuringHook();
+
+    var rect = __GestureCommon_GetBoundingVisibleRect(this.options_.element_);
+    var position_left =
+        rect.left + rect.width * this.options_.left_position_percentage_;
+    var position_top =
+        rect.top + rect.height * this.options_.top_position_percentage_;
+    if (position_left < 0 || position_left >= window.innerWidth ||
+        position_top < 0 || position_top >= window.innerHeight) {
+      throw new Error('Tap position is off-screen');
+    }
+    chrome.gpuBenchmarking.tap(position_left, position_top,
+                               this.onGestureComplete_.bind(this),
+                               this.options_.duration_ms_,
+                               this.options_.gesture_source_type_);
+  };
+
+  TapAction.prototype.onGestureComplete_ = function() {
+    this.endMeasuringHook();
+
+    // We're done.
+    if (this.callback_)
+      this.callback_();
+  };
+
+  window.__TapAction = TapAction;
+  window.__TapAction_SupportedByBrowser = supportedByBrowser;
+})();
diff --git a/catapult/telemetry/telemetry/internal/actions/tap.py b/catapult/telemetry/telemetry/internal/actions/tap.py
new file mode 100644
index 0000000..ba234aa
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/tap.py
@@ -0,0 +1,73 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.internal.actions import page_action
+
+
+class TapAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None,
+               left_position_percentage=0.5, top_position_percentage=0.5,
+               duration_ms=50,
+               synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
+    super(TapAction, self).__init__()
+    self.selector = selector
+    self.text = text
+    self.element_function = element_function
+    self.left_position_percentage = left_position_percentage
+    self.top_position_percentage = top_position_percentage
+    self.duration_ms = duration_ms
+    self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
+                                      synthetic_gesture_source)
+
+  def WillRunAction(self, tab):
+    for js_file in ['gesture_common.js', 'tap.js']:
+      with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
+        js = f.read()
+        tab.ExecuteJavaScript(js)
+
+    # Fail if browser doesn't support synthetic tap gestures.
+    if not tab.EvaluateJavaScript('window.__TapAction_SupportedByBrowser()'):
+      raise page_action.PageActionNotSupported(
+          'Synthetic tap not supported for this browser')
+
+    done_callback = 'function() { window.__tapActionDone = true; }'
+    tab.ExecuteJavaScript("""
+        window.__tapActionDone = false;
+        window.__tapAction = new __TapAction(%s);"""
+        % (done_callback))
+
+  def HasElementSelector(self):
+    return (self.element_function is not None or self.selector is not None or
+            self.text is not None)
+
+  def RunAction(self, tab):
+    if not self.HasElementSelector():
+      self.element_function = 'document.body'
+
+    tap_cmd = ('''
+        window.__tapAction.start({
+          element: element,
+          left_position_percentage: %s,
+          top_position_percentage: %s,
+          duration_ms: %s,
+          gesture_source_type: %s
+        });'''
+          % (self.left_position_percentage,
+             self.top_position_percentage,
+             self.duration_ms,
+             self._synthetic_gesture_source))
+    code = '''
+        function(element, errorMsg) {
+          if (!element) {
+            throw Error('Cannot find element: ' + errorMsg);
+          }
+          %s;
+        }''' % tap_cmd
+
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self.selector, text=self.text,
+        element_function=self.element_function)
+    tab.WaitForJavaScriptExpression('window.__tapActionDone', 60)
diff --git a/catapult/telemetry/telemetry/internal/actions/wait.py b/catapult/telemetry/telemetry/internal/actions/wait.py
new file mode 100644
index 0000000..3413f3c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/actions/wait.py
@@ -0,0 +1,22 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.actions import page_action
+
+
+class WaitForElementAction(page_action.PageAction):
+  def __init__(self, selector=None, text=None, element_function=None,
+               timeout_in_seconds=60):
+    super(WaitForElementAction, self).__init__()
+    self.selector = selector
+    self.text = text
+    self.element_function = element_function
+    self.timeout_in_seconds = timeout_in_seconds
+
+  def RunAction(self, tab):
+    code = 'function(element) { return element != null; }'
+    page_action.EvaluateCallbackWithElement(
+        tab, code, selector=self.selector, text=self.text,
+        element_function=self.element_function,
+        wait=True, timeout_in_seconds=self.timeout_in_seconds)
diff --git a/catapult/telemetry/telemetry/internal/app/__init__.py b/catapult/telemetry/telemetry/internal/app/__init__.py
new file mode 100644
index 0000000..af96cc6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/app/__init__.py
@@ -0,0 +1,41 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class App(object):
+  """ A running application instance that can be controlled in a limited way.
+
+  Be sure to clean up after yourself by calling Close() when you are done with
+  the app. Or better yet:
+    with possible_app.Create(options) as app:
+      ... do all your operations on app here
+  """
+  def __init__(self, app_backend, platform_backend):
+    assert platform_backend.platform != None
+    self._app_backend = app_backend
+    self._platform_backend = platform_backend
+    self._app_backend.SetApp(self)
+
+  @property
+  def app_type(self):
+    return self._app_backend.app_type
+
+  @property
+  def platform(self):
+    return self._platform_backend.platform
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, *args):
+    self.Close()
+
+  def Close(self):
+    raise NotImplementedError()
+
+  def GetStandardOutput(self):
+    return self._app_backend.GetStandardOutput()
+
+  def GetStackTrace(self):
+    return self._app_backend.GetStackTrace()
diff --git a/catapult/telemetry/telemetry/internal/app/android_app.py b/catapult/telemetry/telemetry/internal/app/android_app.py
new file mode 100644
index 0000000..0cee256
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/app/android_app.py
@@ -0,0 +1,42 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal import app
+
+
+class AndroidApp(app.App):
+  """A running android app instance that can be controlled in a limited way.
+
+  Be sure to clean up after yourself by calling Close() when you are done with
+  the app. Or better yet:
+    with possible_android_app.Create(options) as android_app:
+      ... do all your operations on android_app here
+  """
+  def __init__(self, app_backend, platform_backend):
+    super(AndroidApp, self).__init__(app_backend=app_backend,
+                                     platform_backend=platform_backend)
+    self._app_backend.Start()
+
+  @property
+  def ui(self):
+    """Returns an AppUi object to interact with the app's UI.
+
+    See devil.android.app_ui for the documentation of the API provided.
+    """
+    return self._app_backend.GetAppUi()
+
+  def Close(self):
+    self._app_backend.Close()
+
+  def GetProcesses(self):
+    """Returns the current set of processes belonging to this app."""
+    return self._app_backend.GetProcesses()
+
+  def GetProcess(self, subprocess_name):
+    """Returns the process with the specified subprocess name."""
+    return self._app_backend.GetProcess(subprocess_name)
+
+  def GetWebViews(self):
+    """Returns the set of all WebViews belonging to all processes of the app."""
+    return self._app_backend.GetWebViews()
diff --git a/catapult/telemetry/telemetry/internal/app/android_app_unittest.py b/catapult/telemetry/telemetry/internal/app/android_app_unittest.py
new file mode 100644
index 0000000..f63b2e0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/app/android_app_unittest.py
@@ -0,0 +1,55 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import unittest
+
+from telemetry.core import platform as platform_module
+from telemetry.internal.app import android_app
+from telemetry.internal.backends import android_app_backend
+from telemetry.internal.platform import android_device
+from telemetry.testing import options_for_unittests
+
+from devil.android.sdk import intent
+
+
+class AndroidAppTest(unittest.TestCase):
+  def setUp(self):
+    self._options = options_for_unittests.GetCopy()
+    self._device = android_device.GetDevice(self._options)
+
+  def CreateAndroidApp(self, start_intent):
+    platform = platform_module.GetPlatformForDevice(self._device, self._options)
+    platform_backend = platform._platform_backend
+    app_backend = android_app_backend.AndroidAppBackend(
+        platform_backend, start_intent)
+    return android_app.AndroidApp(app_backend, platform_backend)
+
+  def testWebView(self):
+    if self._device is None:
+      logging.warning('No device found, skipping test.')
+      return
+
+    start_intent = intent.Intent(
+        package='com.google.android.googlequicksearchbox',
+        activity='.SearchActivity',
+        action='com.google.android.googlequicksearchbox.GOOGLE_SEARCH',
+        data=None,
+        extras={'query': 'google'},
+        category=None)
+    search_app = self.CreateAndroidApp(start_intent)
+    search_process = search_app.GetProcess(':search')
+    search_process._UpdateDevToolsClient()
+
+    # TODO(ariblue): Replace the app used in this test with one in which the
+    # setWebContentsDebuggingEnabled method is called on the WebView class.
+    # This will configure webviews for debugging with chrome devtools inspector
+    # and allow us to remove this check.
+    if search_process._devtools_client is None:
+      return
+
+    webview = search_app.GetProcess(':search').GetWebViews().pop()
+    webview.Navigate('https://www.google.com/search?q=flowers')
+    time.sleep(5)
diff --git a/catapult/telemetry/telemetry/internal/app/android_process.py b/catapult/telemetry/telemetry/internal/app/android_process.py
new file mode 100644
index 0000000..3c9a294
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/app/android_process.py
@@ -0,0 +1,56 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends.chrome_inspector import devtools_client_backend
+from telemetry.internal.browser import web_contents
+
+try:
+  from devil.android import ports
+except ImportError:
+  ports = None
+
+class WebViewNotFoundException(Exception):
+  pass
+
+class AndroidProcess(object):
+  """Represents a single android process."""
+
+  def __init__(self, app_backend, pid, name):
+    self._app_backend = app_backend
+    self._pid = pid
+    self._name = name
+    self._local_port = ports.AllocateTestServerPort()
+    self._devtools_client = None
+
+  @property
+  def pid(self):
+    return self._pid
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def _remote_devtools_port(self):
+    return 'localabstract:webview_devtools_remote_%s' % str(self.pid)
+
+  def _UpdateDevToolsClient(self):
+    if self._devtools_client is None:
+      self._app_backend.platform_backend.ForwardHostToDevice(
+          self._local_port, self._remote_devtools_port)
+      if devtools_client_backend.IsDevToolsAgentAvailable(
+          self._local_port, self._app_backend):
+        self._devtools_client = devtools_client_backend.DevToolsClientBackend(
+            self._local_port, self._remote_devtools_port, self._app_backend)
+
+  def GetWebViews(self):
+    webviews = []
+    self._UpdateDevToolsClient()
+    if self._devtools_client is not None:
+      devtools_context_map = (
+          self._devtools_client.GetUpdatedInspectableContexts())
+      for context in devtools_context_map.contexts:
+        webviews.append(web_contents.WebContents(
+            devtools_context_map.GetInspectorBackend(context['id'])))
+    return webviews
diff --git a/catapult/telemetry/telemetry/internal/app/possible_app.py b/catapult/telemetry/telemetry/internal/app/possible_app.py
new file mode 100644
index 0000000..9e065dc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/app/possible_app.py
@@ -0,0 +1,43 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class PossibleApp(object):
+  """A factory class that can be used to create a running instance of app.
+
+  Call Create() to launch the app and begin manipulating it.
+  """
+
+  def __init__(self, app_type, target_os):
+    self._app_type = app_type
+    self._target_os = target_os
+    self._platform = None
+    self._platform_backend = None
+
+  def __repr__(self):
+    return 'PossibleApp(app_type=%s)' % self.app_type
+
+  @property
+  def app_type(self):
+    return self._app_type
+
+  @property
+  def target_os(self):
+    """Target OS, the app will run on."""
+    return self._target_os
+
+  @property
+  def platform(self):
+    self._InitPlatformIfNeeded()
+    return self._platform
+
+  def _InitPlatformIfNeeded(self):
+    raise NotImplementedError()
+
+  def Create(self, finder_options):
+    raise NotImplementedError()
+
+  def SupportsOptions(self, finder_options):
+    """Tests for extension support."""
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/backends/__init__.py b/catapult/telemetry/telemetry/internal/backends/__init__.py
new file mode 100644
index 0000000..9228df8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/backends/android_app_backend.py b/catapult/telemetry/telemetry/internal/backends/android_app_backend.py
new file mode 100644
index 0000000..ada3eb4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/android_app_backend.py
@@ -0,0 +1,132 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry.core import util
+from telemetry.internal.app import android_process
+from telemetry.internal.backends import android_browser_backend_settings
+from telemetry.internal.backends import android_command_line_backend
+from telemetry.internal.backends import app_backend
+
+from devil.android import app_ui
+
+
+class AndroidAppBackend(app_backend.AppBackend):
+
+  def __init__(self, android_platform_backend, start_intent,
+               is_app_ready_predicate=None, app_has_webviews=True):
+    super(AndroidAppBackend, self).__init__(
+        start_intent.package, android_platform_backend)
+    self._default_process_name = start_intent.package
+    self._start_intent = start_intent
+    self._is_app_ready_predicate = is_app_ready_predicate
+    self._is_running = False
+    self._app_has_webviews = app_has_webviews
+    self._existing_processes_by_pid = {}
+    self._app_ui = None
+
+  @property
+  def device(self):
+    return self.platform_backend.device
+
+  def GetAppUi(self):
+    if self._app_ui is None:
+      self._app_ui = app_ui.AppUi(self.device, self._start_intent.package)
+    return self._app_ui
+
+  def _LaunchAndWaitForApplication(self):
+    """Launch the app and wait for it to be ready."""
+    def is_app_ready():
+      return self._is_app_ready_predicate(self.app)
+
+    # When "is_app_ready_predicate" is provided, we use it to wait for the
+    # app to become ready, otherwise "blocking=True" is used as a fall back.
+    # TODO(slamm): check if the wait for "ps" check is really needed, or
+    # whether the "blocking=True" fall back is sufficient.
+    has_ready_predicate = self._is_app_ready_predicate is not None
+    self.device.StartActivity(
+        self._start_intent,
+        blocking=not has_ready_predicate,
+        force_stop=True,  # Ensure app was not running.
+    )
+    if has_ready_predicate:
+      util.WaitFor(is_app_ready, timeout=60)
+
+  def Start(self):
+    """Start an Android app and wait for it to finish launching.
+
+    If the app has webviews, the app is launched with the suitable
+    command line arguments.
+
+    AppStory derivations can customize the wait-for-ready-state to wait
+    for a more specific event if needed.
+    """
+    if self._app_has_webviews:
+      webview_startup_args = self.GetWebviewStartupArgs()
+      backend_settings = (
+          android_browser_backend_settings.WebviewBackendSettings(
+              'android-webview'))
+      with android_command_line_backend.SetUpCommandLineFlags(
+          self.device, backend_settings, webview_startup_args):
+        self._LaunchAndWaitForApplication()
+    else:
+      self._LaunchAndWaitForApplication()
+    self._is_running = True
+
+  def Close(self):
+    self._is_running = False
+    self.platform_backend.KillApplication(self._start_intent.package)
+
+  def IsAppRunning(self):
+    return self._is_running
+
+  def GetStandardOutput(self):
+    raise NotImplementedError
+
+  def GetStackTrace(self):
+    raise NotImplementedError
+
+  def GetProcesses(self, process_filter=None):
+    if process_filter is None:
+      # Match process names of the form: 'process_name[:subprocess]'.
+      process_filter = re.compile(
+          '^%s(:|$)' % re.escape(self._default_process_name)).match
+
+    processes = set()
+    ps_output = self.platform_backend.GetPsOutput(['pid', 'name'])
+    for pid, name in ps_output:
+      if not process_filter(name):
+        continue
+
+      if pid not in self._existing_processes_by_pid:
+        self._existing_processes_by_pid[pid] = android_process.AndroidProcess(
+            self, pid, name)
+      processes.add(self._existing_processes_by_pid[pid])
+    return processes
+
+  def GetProcess(self, subprocess_name):
+    assert subprocess_name.startswith(':')
+    process_name = self._default_process_name + subprocess_name
+    return self.GetProcesses(lambda n: n == process_name).pop()
+
+  def GetWebViews(self):
+    assert self._app_has_webviews
+    webviews = set()
+    for process in self.GetProcesses():
+      webviews.update(process.GetWebViews())
+    return webviews
+
+  def GetWebviewStartupArgs(self):
+    assert self._app_has_webviews
+    args = []
+
+    # Turn on GPU benchmarking extension for all runs. The only side effect of
+    # the extension being on is that render stats are tracked. This is believed
+    # to be effectively free. And, by doing so here, it avoids us having to
+    # programmatically inspect a pageset's actions in order to determine if it
+    # might eventually scroll.
+    args.append('--enable-gpu-benchmarking')
+
+    return args
diff --git a/catapult/telemetry/telemetry/internal/backends/android_app_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/android_app_backend_unittest.py
new file mode 100644
index 0000000..8140e89
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/android_app_backend_unittest.py
@@ -0,0 +1,38 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import collections
+import mock
+import unittest
+
+from telemetry.internal.backends import android_app_backend
+from devil.android.sdk import intent as intent_module
+
+
+_FakeAndroidProcess = collections.namedtuple(
+    'AndroidProcess', ['app_backend', 'pid', 'name'])
+
+
+class AndroidAppBackendUnittest(unittest.TestCase):
+
+  def setUp(self):
+    self.platform_backend = mock.Mock()
+    self.start_intent = intent_module.Intent(
+        package='com.example.my_app',
+        activity='com.example.my_app.LaunchMyApp')
+    self.app_backend = android_app_backend.AndroidAppBackend(
+        self.platform_backend, self.start_intent)
+
+  @mock.patch('telemetry.internal.backends.android_app_backend'
+              '.android_process.AndroidProcess', _FakeAndroidProcess)
+  def testGetProcesses(self):
+    # Only processes belonging to 'com.example.my_app' should match.
+    self.platform_backend.GetPsOutput.return_value = [
+      ['1111', 'com.example.my_app'],
+      ['2222', 'com.example.my_appointments_helper'],
+      ['3333', 'com.example.my_app:service'],
+      ['4444', 'com.example.some_other_app'],
+      ['5555', 'com_example_my_app'],
+    ]
+    process_pids = set(p.pid for p in self.app_backend.GetProcesses())
+    self.assertEquals(process_pids, set(['1111', '3333']))
diff --git a/catapult/telemetry/telemetry/internal/backends/android_browser_backend_settings.py b/catapult/telemetry/telemetry/internal/backends/android_browser_backend_settings.py
new file mode 100644
index 0000000..95647e8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/android_browser_backend_settings.py
@@ -0,0 +1,129 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+
+from telemetry.core import exceptions
+
+
+class AndroidBrowserBackendSettings(object):
+
+  def __init__(self, activity, cmdline_file, package, pseudo_exec_name,
+               supports_tab_control):
+    self._activity = activity
+    self._cmdline_file = cmdline_file
+    self._package = package
+    self._pseudo_exec_name = pseudo_exec_name
+    self._supports_tab_control = supports_tab_control
+
+  @property
+  def activity(self):
+    return self._activity
+
+  @property
+  def package(self):
+    return self._package
+
+  @property
+  def pseudo_exec_name(self):
+    return self._pseudo_exec_name
+
+  @property
+  def supports_tab_control(self):
+    return self._supports_tab_control
+
+  def GetCommandLineFile(self, is_user_debug_build):
+    del is_user_debug_build  # unused
+    return self._cmdline_file
+
+  def GetDevtoolsRemotePort(self, device):
+    raise NotImplementedError()
+
+  @property
+  def profile_ignore_list(self):
+    # Don't delete lib, since it is created by the installer.
+    return ['lib']
+
+
+class ChromeBackendSettings(AndroidBrowserBackendSettings):
+  # Stores a default Preferences file, re-used to speed up "--page-repeat".
+  _default_preferences_file = None
+
+  def GetCommandLineFile(self, is_user_debug_build):
+    if is_user_debug_build:
+      return '/data/local/tmp/chrome-command-line'
+    else:
+      return '/data/local/chrome-command-line'
+
+  def __init__(self, package):
+    super(ChromeBackendSettings, self).__init__(
+        activity='com.google.android.apps.chrome.Main',
+        cmdline_file=None,
+        package=package,
+        pseudo_exec_name='chrome',
+        supports_tab_control=True)
+
+  def GetDevtoolsRemotePort(self, device):
+    return 'localabstract:chrome_devtools_remote'
+
+
+class ContentShellBackendSettings(AndroidBrowserBackendSettings):
+  def __init__(self, package):
+    super(ContentShellBackendSettings, self).__init__(
+        activity='org.chromium.content_shell_apk.ContentShellActivity',
+        cmdline_file='/data/local/tmp/content-shell-command-line',
+        package=package,
+        pseudo_exec_name='content_shell',
+        supports_tab_control=False)
+
+  def GetDevtoolsRemotePort(self, device):
+    return 'localabstract:content_shell_devtools_remote'
+
+
+class WebviewBackendSettings(AndroidBrowserBackendSettings):
+  def __init__(self,
+               package,
+               activity='org.chromium.webview_shell.TelemetryActivity',
+               cmdline_file='/data/local/tmp/webview-command-line'):
+    super(WebviewBackendSettings, self).__init__(
+        activity=activity,
+        cmdline_file=cmdline_file,
+        package=package,
+        pseudo_exec_name='webview',
+        supports_tab_control=False)
+
+  def GetDevtoolsRemotePort(self, device):
+    # The DevTools socket name for WebView depends on the activity PID's.
+    retries = 0
+    timeout = 1
+    pid = None
+    while True:
+      pids = device.GetPids(self.package)
+      if not pids or self.package not in pids:
+        time.sleep(timeout)
+        retries += 1
+        timeout *= 2
+        if retries == 4:
+          logging.critical('android_browser_backend: Timeout while waiting for '
+                           'activity %s:%s to come up',
+                           self.package,
+                           self.activity)
+          raise exceptions.BrowserGoneException(self.browser,
+                                                'Timeout waiting for PID.')
+      if len(pids[self.package]) > 1:
+        raise Exception(
+            'At most one instance of process %s expected but found pids: '
+            '%s' % (self.package, pids))
+      pid = pids[self.package][0]
+      break
+    return 'localabstract:webview_devtools_remote_%s' % str(pid)
+
+
+class WebviewShellBackendSettings(WebviewBackendSettings):
+  def __init__(self, package):
+    super(WebviewShellBackendSettings, self).__init__(
+        activity='org.chromium.android_webview.shell.AwShellActivity',
+        cmdline_file='/data/local/tmp/android-webview-command-line',
+        package=package)
diff --git a/catapult/telemetry/telemetry/internal/backends/android_command_line_backend.py b/catapult/telemetry/telemetry/internal/backends/android_command_line_backend.py
new file mode 100644
index 0000000..e1200f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/android_command_line_backend.py
@@ -0,0 +1,111 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import pipes
+import sys
+
+from devil.android import device_errors  # pylint: disable=import-error
+
+
+def _QuoteIfNeeded(arg):
+  # Properly escape "key=valueA valueB" to "key='valueA valueB'"
+  # Values without spaces, or that seem to be quoted are left untouched.
+  # This is required so CommandLine.java can parse valueB correctly rather
+  # than as a separate switch.
+  params = arg.split('=', 1)
+  if len(params) != 2:
+    return arg
+  key, values = params
+  if ' ' not in values:
+    return arg
+  if values[0] in '"\'' and values[-1] == values[0]:
+    return arg
+  return '%s=%s' % (key, pipes.quote(values))
+
+
+class SetUpCommandLineFlags(object):
+  """A context manager for setting up the android command line flags.
+
+  This provides a readable way of using the android command line backend class.
+  Example usage:
+
+      with android_command_line_backend.SetUpCommandLineFlags(
+          device, backend_settings, startup_args):
+        # Something to run while the command line flags are set appropriately.
+  """
+  def __init__(self, device, backend_settings, startup_args):
+    self._android_command_line_backend = _AndroidCommandLineBackend(
+        device, backend_settings, startup_args)
+
+  def __enter__(self):
+    self._android_command_line_backend.SetUpCommandLineFlags()
+
+  def __exit__(self, *args):
+    self._android_command_line_backend.RestoreCommandLineFlags()
+
+
+class _AndroidCommandLineBackend(object):
+  """The backend for providing command line flags on android.
+
+  There are command line flags that Chromium accept in order to enable
+  particular features or modify otherwise default functionality. To set the
+  flags for Chrome on Android, specific files on the device must be updated
+  with the flags to enable. This class provides a wrapper around this
+  functionality.
+  """
+
+  def __init__(self, device, backend_settings, startup_args):
+    self._device = device
+    self._backend_settings = backend_settings
+    self._startup_args = startup_args
+    self._saved_command_line_file_contents = None
+
+  @property
+  def command_line_file(self):
+    return self._backend_settings.GetCommandLineFile(self._device.IsUserBuild())
+
+  def SetUpCommandLineFlags(self):
+    args = [self._backend_settings.pseudo_exec_name]
+    args.extend(self._startup_args)
+    content = ' '.join(_QuoteIfNeeded(arg) for arg in args)
+
+    try:
+      # Save the current command line to restore later, except if it appears to
+      # be a  Telemetry created one. This is to prevent a common bug where
+      # --host-resolver-rules borks people's browsers if something goes wrong
+      # with Telemetry.
+      self._saved_command_line_file_contents = self._ReadFile()
+      if (self._saved_command_line_file_contents and
+          '--host-resolver-rules' in self._saved_command_line_file_contents):
+        self._saved_command_line_file_contents = None
+    except device_errors.CommandFailedError:
+      self._saved_command_line_file_contents = None
+
+    try:
+      self._WriteFile(content)
+    except device_errors.CommandFailedError as exc:
+      logging.critical(exc)
+      logging.critical('Cannot set Chrome command line. '
+                       'Fix this by flashing to a userdebug build.')
+      sys.exit(1)
+
+  def RestoreCommandLineFlags(self):
+    if self._saved_command_line_file_contents is None:
+      self._RemoveFile()
+    else:
+      self._WriteFile(self._saved_command_line_file_contents)
+
+  def _ReadFile(self):
+    if self._device.PathExists(self.command_line_file):
+      return self._device.ReadFile(self.command_line_file, as_root=True)
+    else:
+      return None
+
+  def _WriteFile(self, contents):
+    self._device.WriteFile(self.command_line_file, contents, as_root=True)
+
+  def _RemoveFile(self):
+    self._device.RunShellCommand(['rm', '-f', self.command_line_file],
+                                 as_root=True, check_return=True)
diff --git a/catapult/telemetry/telemetry/internal/backends/android_command_line_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/android_command_line_backend_unittest.py
new file mode 100644
index 0000000..7f92519
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/android_command_line_backend_unittest.py
@@ -0,0 +1,103 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.backends import android_command_line_backend
+from telemetry.testing import options_for_unittests
+
+from devil.android import device_utils
+
+
+class _MockBackendSettings(object):
+  pseudo_exec_name = 'chrome'
+
+  def __init__(self, path):
+    self._path = path
+
+  def GetCommandLineFile(self, _):
+    return self._path
+
+
+class AndroidCommandLineBackendTest(unittest.TestCase):
+
+  def _GetDeviceForTest(self):
+    serial = options_for_unittests.GetCopy().device
+    if serial:
+      device = device_utils.DeviceUtils(serial)
+      return device
+    else:
+      devices = device_utils.DeviceUtils.HealthyDevices()
+      if not devices:
+        return None
+      return devices[0]
+
+  def testQuoteIfNeededNoEquals(self):
+    string = 'value'
+    self.assertEqual(string,
+                     android_command_line_backend._QuoteIfNeeded(string))
+
+  def testQuoteIfNeededNoSpaces(self):
+    string = 'key=valueA'
+    self.assertEqual(string,
+                     android_command_line_backend._QuoteIfNeeded(string))
+
+  def testQuoteIfNeededAlreadyQuoted(self):
+    string = "key='valueA valueB'"
+    self.assertEqual(string,
+                     android_command_line_backend._QuoteIfNeeded(string))
+
+  def testQuoteIfNeeded(self):
+    string = 'key=valueA valueB'
+    expected_output = "key='valueA valueB'"
+    self.assertEqual(expected_output,
+                     android_command_line_backend._QuoteIfNeeded(string))
+
+  @decorators.Enabled('android')
+  def testSetUpCommandLineFlagsCmdRestored(self):
+    """Test that a previous command line file is restored.
+
+    Requires a device connected to the host.
+    """
+    device = self._GetDeviceForTest()
+    if not device:
+      logging.warning('Skip the test because we cannot find any healthy device')
+      return
+    cmd_file = '/data/local/tmp/test_cmd2'
+    backend_settings = _MockBackendSettings(cmd_file)
+    startup_args = ['--some', '--test', '--args']
+    try:
+      device.WriteFile(cmd_file, 'chrome --args --to --save')
+      self.assertEqual('chrome --args --to --save',
+                       device.ReadFile(cmd_file).strip())
+      with android_command_line_backend.SetUpCommandLineFlags(
+          device, backend_settings, startup_args):
+        self.assertEqual('chrome --some --test --args',
+                         device.ReadFile(cmd_file).strip())
+      self.assertEqual('chrome --args --to --save',
+                       device.ReadFile(cmd_file).strip())
+    finally:
+      device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
+
+  @decorators.Enabled('android')
+  def testSetUpCommandLineFlagsCmdRemoved(self):
+    """Test that the command line file is removed if it did not exist before.
+
+    Requires a device connected to the host.
+    """
+    device = self._GetDeviceForTest()
+    if not device:
+      logging.warning('Skip the test because we cannot find any healthy device')
+      return
+    cmd_file = '/data/local/tmp/test_cmd'
+    backend_settings = _MockBackendSettings(cmd_file)
+    startup_args = ['--some', '--test', '--args']
+    device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
+    with android_command_line_backend.SetUpCommandLineFlags(
+        device, backend_settings, startup_args):
+      self.assertEqual('chrome --some --test --args',
+                       device.ReadFile(cmd_file).strip())
+    self.assertFalse(device.FileExists(cmd_file))
diff --git a/catapult/telemetry/telemetry/internal/backends/app_backend.py b/catapult/telemetry/telemetry/internal/backends/app_backend.py
new file mode 100644
index 0000000..6ad9812
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/app_backend.py
@@ -0,0 +1,48 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class AppBackend(object):
+  def __init__(self, app_type, platform_backend):
+    super(AppBackend, self).__init__()
+    self._app = None
+    self._app_type = app_type
+    self._platform_backend = platform_backend
+
+  def __del__(self):
+    self.Close()
+
+  def SetApp(self, app):
+    self._app = app
+
+  @property
+  def app(self):
+    return self._app
+
+  @property
+  def app_type(self):
+    return self._app_type
+
+  @property
+  def pid(self):
+    raise NotImplementedError
+
+  @property
+  def platform_backend(self):
+    return self._platform_backend
+
+  def Start(self):
+    raise NotImplementedError
+
+  def Close(self):
+    raise NotImplementedError
+
+  def IsAppRunning(self):
+    raise NotImplementedError
+
+  def GetStandardOutput(self):
+    raise NotImplementedError
+
+  def GetStackTrace(self):
+    raise NotImplementedError
diff --git a/catapult/telemetry/telemetry/internal/backends/browser_backend.py b/catapult/telemetry/telemetry/internal/backends/browser_backend.py
new file mode 100644
index 0000000..05388ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/browser_backend.py
@@ -0,0 +1,149 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import uuid
+import sys
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry import decorators
+from telemetry.internal.backends import app_backend
+from telemetry.internal.browser import web_contents
+from telemetry.internal.platform import profiling_controller_backend
+
+
+class ExtensionsNotSupportedException(Exception):
+  pass
+
+
+class BrowserBackend(app_backend.AppBackend):
+  """A base class for browser backends."""
+
+  def __init__(self, platform_backend, supports_extensions, browser_options,
+               tab_list_backend):
+    assert browser_options.browser_type
+    super(BrowserBackend, self).__init__(
+        browser_options.browser_type, platform_backend)
+    self._supports_extensions = supports_extensions
+    self.browser_options = browser_options
+    self._tab_list_backend_class = tab_list_backend
+    self._profiling_controller_backend = (
+        profiling_controller_backend.ProfilingControllerBackend(
+          platform_backend, self))
+
+  def SetBrowser(self, browser):
+    super(BrowserBackend, self).SetApp(app=browser)
+
+  @property
+  def log_file_path(self):
+    # Specific browser backend is responsible for overriding this properly.
+    raise NotImplementedError
+
+  def UploadLogsToCloudStorage(self):
+    """ Uploading log files produce by this browser instance to cloud storage.
+
+    Check supports_uploading_logs before calling this method.
+    """
+    assert self.supports_uploading_logs
+    remote_path = (self.browser_options.logs_cloud_remote_path or
+                   'log_%s' % uuid.uuid4())
+    cloud_url = cloud_storage.Insert(
+        bucket=self.browser_options.logs_cloud_bucket,
+        remote_path=remote_path,
+        local_path=self.log_file_path)
+    sys.stderr.write('Uploading browser log to %s\n' % cloud_url)
+
+  @property
+  def browser(self):
+    return self.app
+
+  @property
+  def profiling_controller_backend(self):
+    return self._profiling_controller_backend
+
+  @property
+  def browser_type(self):
+    return self.app_type
+
+  @property
+  def supports_uploading_logs(self):
+    # Specific browser backend is responsible for overriding this properly.
+    return False
+
+  @property
+  def supports_extensions(self):
+    """True if this browser backend supports extensions."""
+    return self._supports_extensions
+
+  @property
+  def supports_tab_control(self):
+    raise NotImplementedError()
+
+  @property
+  @decorators.Cache
+  def tab_list_backend(self):
+    return self._tab_list_backend_class(self)
+
+  @property
+  def supports_tracing(self):
+    raise NotImplementedError()
+
+  @property
+  def supports_system_info(self):
+    return False
+
+  def StartTracing(self, trace_options, custom_categories=None,
+                   timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    raise NotImplementedError()
+
+  def StopTracing(self, trace_data_builder):
+    raise NotImplementedError()
+
+  def Start(self):
+    raise NotImplementedError()
+
+  def IsBrowserRunning(self):
+    raise NotImplementedError()
+
+  def IsAppRunning(self):
+    return self.IsBrowserRunning()
+
+  def GetStandardOutput(self):
+    raise NotImplementedError()
+
+  def GetStackTrace(self):
+    raise NotImplementedError()
+
+  def GetSystemInfo(self):
+    raise NotImplementedError()
+
+  @property
+  def supports_memory_dumping(self):
+    return False
+
+  def DumpMemory(self, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    raise NotImplementedError()
+
+  @property
+  def supports_overriding_memory_pressure_notifications(self):
+    return False
+
+  def SetMemoryPressureNotificationsSuppressed(
+      self, suppressed, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    raise NotImplementedError()
+
+  def SimulateMemoryPressureNotification(
+      self, pressure_level, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    raise NotImplementedError()
+
+  @property
+  def supports_cpu_metrics(self):
+    raise NotImplementedError()
+
+  @property
+  def supports_memory_metrics(self):
+    raise NotImplementedError()
+
+  @property
+  def supports_power_metrics(self):
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/backends/browser_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/browser_backend_unittest.py
new file mode 100644
index 0000000..549de21
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/browser_backend_unittest.py
@@ -0,0 +1,32 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.internal.backends import browser_backend
+from telemetry.testing import options_for_unittests
+import mock
+
+
+class BrowserBackendLogsUploadingUnittest(unittest.TestCase):
+  def testUploadingToCLoudStorage(self):
+    # pylint: disable=abstract-method
+    class FakeBrowserBackend(browser_backend.BrowserBackend):
+      @property
+      def supports_uploading_logs(self):
+        return True
+
+      @property
+      def log_file_path(self):
+        return '/foo/bar'
+
+    options = options_for_unittests.GetCopy()
+    options.browser_options.enable_logging = True
+    options.browser_options.logs_cloud_bucket = 'ABC'
+    options.browser_options.logs_cloud_remote_path = 'def'
+
+    b = FakeBrowserBackend(None, False, options.browser_options, None)
+    with mock.patch('catapult_base.cloud_storage.Insert') as mock_insert:
+      b.UploadLogsToCloudStorage()
+      mock_insert.assert_called_with(
+        bucket='ABC', remote_path='def', local_path='/foo/bar')
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/__init__.py b/catapult/telemetry/telemetry/internal/backends/chrome/__init__.py
new file mode 100644
index 0000000..9228df8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_backend.py
new file mode 100644
index 0000000..fde39e0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_backend.py
@@ -0,0 +1,212 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import subprocess
+
+from telemetry.core import exceptions
+from telemetry.internal.platform import android_platform_backend as \
+  android_platform_backend_module
+from telemetry.core import util
+from telemetry.internal.backends import android_command_line_backend
+from telemetry.internal.backends import browser_backend
+from telemetry.internal.backends.chrome import chrome_browser_backend
+from telemetry.internal.browser import user_agent
+
+from devil.android.sdk import intent
+
+
+class AndroidBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
+  """The backend for controlling a browser instance running on Android."""
+  def __init__(self, android_platform_backend, browser_options,
+               backend_settings, output_profile_path, extensions_to_load):
+    assert isinstance(android_platform_backend,
+                      android_platform_backend_module.AndroidPlatformBackend)
+    super(AndroidBrowserBackend, self).__init__(
+        android_platform_backend,
+        supports_tab_control=backend_settings.supports_tab_control,
+        supports_extensions=False, browser_options=browser_options,
+        output_profile_path=output_profile_path,
+        extensions_to_load=extensions_to_load)
+
+    self._port_keeper = util.PortKeeper()
+    # Use the port hold by _port_keeper by default.
+    self._port = self._port_keeper.port
+
+
+    if len(extensions_to_load) > 0:
+      raise browser_backend.ExtensionsNotSupportedException(
+          'Android browser does not support extensions.')
+
+    # Initialize fields so that an explosion during init doesn't break in Close.
+    self._backend_settings = backend_settings
+    self._saved_sslflag = ''
+
+    # Kill old browser.
+    self._KillBrowser()
+
+    if self.device.HasRoot() or self.device.NeedsSU():
+      if self.browser_options.profile_dir:
+        self.platform_backend.PushProfile(
+            self._backend_settings.package,
+            self.browser_options.profile_dir)
+      elif not self.browser_options.dont_override_profile:
+        self.platform_backend.RemoveProfile(
+            self._backend_settings.package,
+            self._backend_settings.profile_ignore_list)
+
+    # Set the debug app if needed.
+    self.platform_backend.SetDebugApp(self._backend_settings.package)
+
+  @property
+  def log_file_path(self):
+    return None
+
+  @property
+  def device(self):
+    return self.platform_backend.device
+
+  def _KillBrowser(self):
+    if self.device.IsUserBuild():
+      self.platform_backend.StopApplication(self._backend_settings.package)
+    else:
+      self.platform_backend.KillApplication(self._backend_settings.package)
+
+  def Start(self):
+    self.device.RunShellCommand('logcat -c')
+    if self.browser_options.startup_url:
+      url = self.browser_options.startup_url
+    elif self.browser_options.profile_dir:
+      url = None
+    else:
+      # If we have no existing tabs start with a blank page since default
+      # startup with the NTP can lead to race conditions with Telemetry
+      url = 'about:blank'
+
+    self.platform_backend.DismissCrashDialogIfNeeded()
+
+    user_agent_dict = user_agent.GetChromeUserAgentDictFromType(
+        self.browser_options.browser_user_agent_type)
+
+    browser_startup_args = self.GetBrowserStartupArgs()
+    with android_command_line_backend.SetUpCommandLineFlags(
+        self.device, self._backend_settings, browser_startup_args):
+      self.device.StartActivity(
+          intent.Intent(package=self._backend_settings.package,
+                        activity=self._backend_settings.activity,
+                        action=None, data=url, category=None,
+                        extras=user_agent_dict),
+          blocking=True)
+
+      # TODO(crbug.com/404771): Move port forwarding to network_controller.
+      remote_devtools_port = self._backend_settings.GetDevtoolsRemotePort(
+          self.device)
+      try:
+        # Release reserved port right before forwarding host to device.
+        self._port_keeper.Release()
+        assert self._port == self._port_keeper.port, (
+          'Android browser backend must use reserved port by _port_keeper')
+        self.platform_backend.ForwardHostToDevice(
+            self._port, remote_devtools_port)
+      except Exception:
+        logging.exception('Failed to forward %s to %s.',
+            str(self._port), str(remote_devtools_port))
+        logging.warning('Currently forwarding:')
+        try:
+          for line in self.device.adb.ForwardList().splitlines():
+            logging.warning('  %s', line)
+        except Exception:
+          logging.warning('Exception raised while listing forwarded '
+                          'connections.')
+
+        logging.warning('Host tcp ports in use:')
+        try:
+          for line in subprocess.check_output(['netstat', '-t']).splitlines():
+            logging.warning('  %s', line)
+        except Exception:
+          logging.warning('Exception raised while listing tcp ports.')
+
+        logging.warning('Device unix domain sockets in use:')
+        try:
+          for line in self.device.ReadFile('/proc/net/unix', as_root=True,
+                                           force_pull=True).splitlines():
+            logging.warning('  %s', line)
+        except Exception:
+          logging.warning('Exception raised while listing unix domain sockets.')
+
+        raise
+
+      try:
+        self._WaitForBrowserToComeUp()
+        self._InitDevtoolsClientBackend(remote_devtools_port)
+      except exceptions.BrowserGoneException:
+        logging.critical('Failed to connect to browser.')
+        if not (self.device.HasRoot() or self.device.NeedsSU()):
+          logging.critical(
+            'Resolve this by either: '
+            '(1) Flashing to a userdebug build OR '
+            '(2) Manually enabling web debugging in Chrome at '
+            'Settings > Developer tools > Enable USB Web debugging.')
+        self.Close()
+        raise
+      except:
+        self.Close()
+        raise
+
+  def GetBrowserStartupArgs(self):
+    args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs()
+    args.append('--enable-remote-debugging')
+    args.append('--disable-fre')
+    args.append('--disable-external-intent-requests')
+    return args
+
+  @property
+  def pid(self):
+    pids = self.device.GetPids(self._backend_settings.package)
+    if not pids or self._backend_settings.package not in pids:
+      raise exceptions.BrowserGoneException(self.browser)
+    if len(pids[self._backend_settings.package]) > 1:
+      raise Exception(
+          'At most one instance of process %s expected but found pids: '
+          '%s' % (self._backend_settings.package, pids))
+    return int(pids[self._backend_settings.package][0])
+
+  @property
+  def browser_directory(self):
+    return None
+
+  @property
+  def profile_directory(self):
+    return self._backend_settings.profile_dir
+
+  @property
+  def package(self):
+    return self._backend_settings.package
+
+  @property
+  def activity(self):
+    return self._backend_settings.activity
+
+  def __del__(self):
+    self.Close()
+
+  def Close(self):
+    super(AndroidBrowserBackend, self).Close()
+
+    self._KillBrowser()
+
+    self.platform_backend.StopForwardingHost(self._port)
+
+    if self._output_profile_path:
+      self.platform_backend.PullProfile(
+          self._backend_settings.package, self._output_profile_path)
+
+  def IsBrowserRunning(self):
+    return self.platform_backend.IsAppRunning(self._backend_settings.package)
+
+  def GetStandardOutput(self):
+    return self.platform_backend.GetStandardOutput()
+
+  def GetStackTrace(self):
+    return self.platform_backend.GetStackTrace()
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder.py b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder.py
new file mode 100644
index 0000000..60e5d66
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder.py
@@ -0,0 +1,258 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds android browsers that can be controlled by telemetry."""
+
+import logging
+import os
+import sys
+
+from catapult_base import dependency_util
+from devil.android import apk_helper
+
+from telemetry.core import exceptions
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.backends import android_browser_backend_settings
+from telemetry.internal.backends.chrome import android_browser_backend
+from telemetry.internal.browser import browser
+from telemetry.internal.browser import possible_browser
+from telemetry.internal.platform import android_device
+from telemetry.internal.util import binary_manager
+
+
+CHROME_PACKAGE_NAMES = {
+  'android-content-shell':
+      ['org.chromium.content_shell_apk',
+       android_browser_backend_settings.ContentShellBackendSettings,
+       'ContentShell.apk'],
+  'android-webview':
+      ['org.chromium.webview_shell',
+       android_browser_backend_settings.WebviewBackendSettings,
+       None],
+  'android-webview-shell':
+      ['org.chromium.android_webview.shell',
+       android_browser_backend_settings.WebviewShellBackendSettings,
+       'AndroidWebView.apk'],
+  'android-chromium':
+      ['org.chromium.chrome',
+       android_browser_backend_settings.ChromeBackendSettings,
+       'ChromePublic.apk'],
+  'android-chrome':
+      ['com.google.android.apps.chrome',
+       android_browser_backend_settings.ChromeBackendSettings,
+       'Chrome.apk'],
+  'android-chrome-work':
+      ['com.chrome.work',
+       android_browser_backend_settings.ChromeBackendSettings,
+       None],
+  'android-chrome-beta':
+      ['com.chrome.beta',
+       android_browser_backend_settings.ChromeBackendSettings,
+       None],
+  'android-chrome-dev':
+      ['com.chrome.dev',
+       android_browser_backend_settings.ChromeBackendSettings,
+       None],
+  'android-chrome-canary':
+      ['com.chrome.canary',
+       android_browser_backend_settings.ChromeBackendSettings,
+       None],
+  'android-system-chrome':
+      ['com.android.chrome',
+       android_browser_backend_settings.ChromeBackendSettings,
+       None],
+}
+
+
+class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
+  """A launchable android browser instance."""
+  def __init__(self, browser_type, finder_options, android_platform,
+               backend_settings, apk_name):
+    super(PossibleAndroidBrowser, self).__init__(
+        browser_type, 'android', backend_settings.supports_tab_control)
+    assert browser_type in FindAllBrowserTypes(finder_options), (
+        'Please add %s to android_browser_finder.FindAllBrowserTypes' %
+         browser_type)
+    self._platform = android_platform
+    self._platform_backend = (
+        android_platform._platform_backend)  # pylint: disable=protected-access
+    self._backend_settings = backend_settings
+    self._local_apk = None
+
+    if browser_type == 'exact':
+      if not os.path.exists(apk_name):
+        raise exceptions.PathMissingError(
+            'Unable to find exact apk %s specified by --browser-executable' %
+            apk_name)
+      self._local_apk = apk_name
+    elif browser_type == 'reference':
+      if not os.path.exists(apk_name):
+        raise exceptions.PathMissingError(
+            'Unable to find reference apk at expected location %s.' % apk_name)
+      self._local_apk = apk_name
+    elif apk_name:
+      assert finder_options.chrome_root, (
+          'Must specify Chromium source to use apk_name')
+      chrome_root = finder_options.chrome_root
+      candidate_apks = []
+      for build_path in util.GetBuildDirectories(chrome_root):
+        apk_full_name = os.path.join(build_path, 'apks', apk_name)
+        if os.path.exists(apk_full_name):
+          last_changed = os.path.getmtime(apk_full_name)
+          candidate_apks.append((last_changed, apk_full_name))
+
+      if candidate_apks:
+        # Find the candidate .apk with the latest modification time.
+        newest_apk_path = sorted(candidate_apks)[-1][1]
+        self._local_apk = newest_apk_path
+
+  def __repr__(self):
+    return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
+
+  def _InitPlatformIfNeeded(self):
+    pass
+
+  def Create(self, finder_options):
+    self._InitPlatformIfNeeded()
+    browser_backend = android_browser_backend.AndroidBrowserBackend(
+        self._platform_backend,
+        finder_options.browser_options, self._backend_settings,
+        output_profile_path=finder_options.output_profile_path,
+        extensions_to_load=finder_options.extensions_to_load)
+    try:
+      return browser.Browser(
+          browser_backend, self._platform_backend, self._credentials_path)
+    except Exception:
+      logging.exception('Failure while creating Android browser.')
+      original_exception = sys.exc_info()
+      try:
+        browser_backend.Close()
+      except Exception:
+        logging.exception('Secondary failure while closing browser backend.')
+
+      raise original_exception[0], original_exception[1], original_exception[2]
+
+  def SupportsOptions(self, finder_options):
+    if len(finder_options.extensions_to_load) != 0:
+      return False
+    return True
+
+  def HaveLocalAPK(self):
+    return self._local_apk and os.path.exists(self._local_apk)
+
+  @decorators.Cache
+  def UpdateExecutableIfNeeded(self):
+    if self.HaveLocalAPK():
+      logging.warn('Installing %s on device if needed.' % self._local_apk)
+      self.platform.InstallApplication(self._local_apk)
+
+  def last_modification_time(self):
+    if self.HaveLocalAPK():
+      return os.path.getmtime(self._local_apk)
+    return -1
+
+
+def SelectDefaultBrowser(possible_browsers):
+  """Return the newest possible browser."""
+  if not possible_browsers:
+    return None
+  return max(possible_browsers, key=lambda b: b.last_modification_time())
+
+
+def CanFindAvailableBrowsers():
+  return android_device.CanDiscoverDevices()
+
+
+def CanPossiblyHandlePath(target_path):
+  return os.path.splitext(target_path.lower())[1] == '.apk'
+
+
+def FindAllBrowserTypes(options):
+  del options  # unused
+  return CHROME_PACKAGE_NAMES.keys() + ['exact', 'reference']
+
+
+def _FindAllPossibleBrowsers(finder_options, android_platform):
+  """Testable version of FindAllAvailableBrowsers."""
+  if not android_platform:
+    return []
+  possible_browsers = []
+
+  # Add the exact APK if given.
+  if (finder_options.browser_executable and
+      CanPossiblyHandlePath(finder_options.browser_executable)):
+    apk_name = os.path.basename(finder_options.browser_executable)
+    package_info = next((info for info in CHROME_PACKAGE_NAMES.itervalues()
+                         if info[2] == apk_name), None)
+
+    # It is okay if the APK name doesn't match any of known chrome browser APKs,
+    # since it may be of a different browser.
+    if package_info:
+      normalized_path = os.path.expanduser(finder_options.browser_executable)
+      exact_package = apk_helper.GetPackageName(normalized_path)
+      if not exact_package:
+        raise exceptions.PackageDetectionError(
+            'Unable to find package for %s specified by --browser-executable' %
+            normalized_path)
+
+      [package, backend_settings, _] = package_info
+      if package == exact_package:
+        possible_browsers.append(PossibleAndroidBrowser(
+            'exact',
+            finder_options,
+            android_platform,
+            backend_settings(package),
+            normalized_path))
+      else:
+        raise exceptions.UnknownPackageError(
+            '%s specified by --browser-executable has an unknown package: %s' %
+            (normalized_path, exact_package))
+
+  # Add the reference build if found.
+  os_version = dependency_util.GetChromeApkOsVersion(
+      android_platform.GetOSVersionName())
+  arch = android_platform.GetArchName()
+  try:
+    reference_build = binary_manager.FetchPath(
+        'chrome_stable', arch, 'android', os_version)
+  except (binary_manager.NoPathFoundError,
+          binary_manager.CloudStorageError):
+    reference_build = None
+
+  if reference_build and os.path.exists(reference_build):
+    # TODO(aiolos): how do we stably map the android chrome_stable apk to the
+    # correct package name?
+    package, backend_settings, _ = CHROME_PACKAGE_NAMES['android-chrome']
+    possible_browsers.append(PossibleAndroidBrowser(
+        'reference',
+        finder_options,
+        android_platform,
+        backend_settings(package),
+        reference_build))
+
+  # Add any known local versions.
+  for name, package_info in CHROME_PACKAGE_NAMES.iteritems():
+    package, backend_settings, local_apk = package_info
+    b = PossibleAndroidBrowser(name,
+                               finder_options,
+                               android_platform,
+                               backend_settings(package),
+                               local_apk)
+    if b.platform.CanLaunchApplication(package) or b.HaveLocalAPK():
+      possible_browsers.append(b)
+  return possible_browsers
+
+
+def FindAllAvailableBrowsers(finder_options, device):
+  """Finds all the possible browsers on one device.
+
+  The device is either the only device on the host platform,
+  or |finder_options| specifies a particular device.
+  """
+  if not isinstance(device, android_device.AndroidDevice):
+    return []
+  android_platform = platform.GetPlatformForDevice(device, finder_options)
+  return _FindAllPossibleBrowsers(finder_options, android_platform)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder_unittest.py
new file mode 100644
index 0000000..52646ca
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/android_browser_finder_unittest.py
@@ -0,0 +1,180 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+import mock
+from pyfakefs import fake_filesystem_unittest
+
+from telemetry.core import android_platform
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import android_platform_backend
+from telemetry.internal.util import binary_manager
+from telemetry.testing import options_for_unittests
+
+
+def FakeFetchPath(dependency, arch, os_name, os_version=None):
+  return os.path.join(
+      'dependency_dir', dependency, '%s_%s_%s.apk' % (
+        os_name, os_version, arch))
+
+
+class AndroidBrowserFinderTest(fake_filesystem_unittest.TestCase):
+  def setUp(self):
+    self.finder_options = options_for_unittests.GetCopy()
+    # Mock out what's needed for testing with exact APKs
+    self.setUpPyfakefs()
+    self._fetch_path_patcher = mock.patch(
+        'telemetry.internal.backends.chrome.android_browser_finder.binary_manager.FetchPath',  # pylint: disable=line-too-long
+        FakeFetchPath)
+    self._fetch_path_mock = self._fetch_path_patcher.start()
+    self._get_package_name_patcher = mock.patch(
+        'devil.android.apk_helper.GetPackageName')
+    self._get_package_name_mock = self._get_package_name_patcher.start()
+    self.fake_platform = mock.Mock(spec=android_platform.AndroidPlatform)
+    self.fake_platform.CanLaunchApplication.return_value = True
+    self.fake_platform._platform_backend = mock.create_autospec(
+        android_platform_backend, spec_set=True)
+    self.fake_platform.GetOSVersionName.return_value = 'L23ds5'
+    self.fake_platform.GetArchName.return_value = 'armeabi-v7a'
+    # The android_browser_finder converts the os version name to 'k' or 'l'
+    self.expected_reference_build = FakeFetchPath(
+        'chrome_stable', 'armeabi-v7a', 'android', 'l')
+
+  def tearDown(self):
+    self.tearDownPyfakefs()
+    self._get_package_name_patcher.stop()
+    self._fetch_path_patcher.stop()
+
+  def testNoPlatformReturnsEmptyList(self):
+    fake_platform = None
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, fake_platform)
+    self.assertEqual([], possible_browsers)
+
+  def testCanLaunchAlwaysTrueReturnsAllExceptExactAndReference(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    all_types = set(
+        android_browser_finder.FindAllBrowserTypes(self.finder_options))
+    expected_types = all_types - set(('exact', 'reference'))
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, self.fake_platform)
+    self.assertEqual(
+        expected_types,
+        set([b.browser_type for b in possible_browsers]))
+
+  def testCanLaunchAlwaysTrueReturnsAllExceptExact(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    self.fs.CreateFile(self.expected_reference_build)
+    all_types = set(
+        android_browser_finder.FindAllBrowserTypes(self.finder_options))
+    expected_types = all_types - set(('exact',))
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, self.fake_platform)
+    self.assertEqual(
+        expected_types,
+        set([b.browser_type for b in possible_browsers]))
+
+  def testCanLaunchAlwaysTrueWithExactApkReturnsAll(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    self.fs.CreateFile(
+        '/foo/ContentShell.apk')
+    self.fs.CreateFile(self.expected_reference_build)
+    self.finder_options.browser_executable = '/foo/ContentShell.apk'
+    self._get_package_name_mock.return_value = 'org.chromium.content_shell_apk'
+
+    expected_types = set(
+        android_browser_finder.FindAllBrowserTypes(self.finder_options))
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, self.fake_platform)
+    self.assertEqual(
+        expected_types,
+        set([b.browser_type for b in possible_browsers]))
+
+  def testErrorWithUnknownExactApk(self):
+    self.fs.CreateFile(
+        '/foo/ContentShell.apk')
+    self.finder_options.browser_executable = '/foo/ContentShell.apk'
+    self._get_package_name_mock.return_value = 'org.unknown.app'
+
+    self.assertRaises(Exception,
+        android_browser_finder._FindAllPossibleBrowsers,
+        self.finder_options, self.fake_platform)
+
+  def testErrorWithNonExistantExactApk(self):
+    self.finder_options.browser_executable = '/foo/ContentShell.apk'
+    self._get_package_name_mock.return_value = 'org.chromium.content_shell_apk'
+
+    self.assertRaises(Exception,
+        android_browser_finder._FindAllPossibleBrowsers,
+        self.finder_options, self.fake_platform)
+
+  def testNoErrorWithUnrecognizedApkName(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    self.fs.CreateFile(
+        '/foo/unknown.apk')
+    self.finder_options.browser_executable = '/foo/unknown.apk'
+
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, self.fake_platform)
+    self.assertNotIn('exact', [b.browser_type for b in possible_browsers])
+
+  def testNoErrorWithMissingReferenceBuild(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+      self.finder_options, self.fake_platform)
+    self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
+
+  def testNoErrorWithReferenceBuildCloudStorageError(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    with mock.patch(
+        'telemetry.internal.backends.chrome.android_browser_finder.binary_manager.FetchPath',  # pylint: disable=line-too-long
+        side_effect=binary_manager.CloudStorageError):
+      possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+        self.finder_options, self.fake_platform)
+    self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
+
+  def testNoErrorWithReferenceBuildNoPathFoundError(self):
+    if not self.finder_options.chrome_root:
+      self.skipTest('--chrome-root is not specified, skip the test')
+    self._fetch_path_mock.side_effect = binary_manager.NoPathFoundError
+    possible_browsers = android_browser_finder._FindAllPossibleBrowsers(
+      self.finder_options, self.fake_platform)
+    self.assertNotIn('reference', [b.browser_type for b in possible_browsers])
+
+
+class FakePossibleBrowser(object):
+  def __init__(self, last_modification_time):
+    self._last_modification_time = last_modification_time
+
+  def last_modification_time(self):
+    return self._last_modification_time
+
+
+class SelectDefaultBrowserTest(unittest.TestCase):
+  def testEmptyListGivesNone(self):
+    self.assertIsNone(android_browser_finder.SelectDefaultBrowser([]))
+
+  def testSinglePossibleReturnsSame(self):
+    possible_browsers = [FakePossibleBrowser(last_modification_time=1)]
+    self.assertIs(
+      possible_browsers[0],
+      android_browser_finder.SelectDefaultBrowser(possible_browsers))
+
+  def testListGivesNewest(self):
+    possible_browsers = [
+        FakePossibleBrowser(last_modification_time=2),
+        FakePossibleBrowser(last_modification_time=3),  # newest
+        FakePossibleBrowser(last_modification_time=1),
+        ]
+    self.assertIs(
+      possible_browsers[1],
+      android_browser_finder.SelectDefaultBrowser(possible_browsers))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend.py
new file mode 100644
index 0000000..06acf3a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend.py
@@ -0,0 +1,322 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import pprint
+import shlex
+import sys
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.backends import browser_backend
+from telemetry.internal.backends.chrome import extension_backend
+from telemetry.internal.backends.chrome import system_info_backend
+from telemetry.internal.backends.chrome import tab_list_backend
+from telemetry.internal.backends.chrome_inspector import devtools_client_backend
+from telemetry.internal.browser import user_agent
+from telemetry.internal.browser import web_contents
+from telemetry.testing import options_for_unittests
+
+
+class ChromeBrowserBackend(browser_backend.BrowserBackend):
+  """An abstract class for chrome browser backends. Provides basic functionality
+  once a remote-debugger port has been established."""
+  # It is OK to have abstract methods. pylint: disable=abstract-method
+
+  def __init__(self, platform_backend, supports_tab_control,
+               supports_extensions, browser_options, output_profile_path,
+               extensions_to_load):
+    super(ChromeBrowserBackend, self).__init__(
+        platform_backend=platform_backend,
+        supports_extensions=supports_extensions,
+        browser_options=browser_options,
+        tab_list_backend=tab_list_backend.TabListBackend)
+    self._port = None
+
+    self._supports_tab_control = supports_tab_control
+    self._devtools_client = None
+    self._system_info_backend = None
+
+    self._output_profile_path = output_profile_path
+    self._extensions_to_load = extensions_to_load
+
+    if (self.browser_options.dont_override_profile and
+        not options_for_unittests.AreSet()):
+      sys.stderr.write('Warning: Not overriding profile. This can cause '
+                       'unexpected effects due to profile-specific settings, '
+                       'such as about:flags settings, cookies, and '
+                       'extensions.\n')
+
+  @property
+  def devtools_client(self):
+    return self._devtools_client
+
+  @property
+  @decorators.Cache
+  def extension_backend(self):
+    if not self.supports_extensions:
+      return None
+    return extension_backend.ExtensionBackendDict(self)
+
+  def _ArgsNeedProxyServer(self, args):
+    """Returns True if args for Chrome indicate the need for proxy server."""
+    if '--enable-spdy-proxy-auth' in args:
+      return True
+    return [arg for arg in args if arg.startswith('--proxy-server=')]
+
+  def GetBrowserStartupArgs(self):
+    args = []
+    args.extend(self.browser_options.extra_browser_args)
+    args.append('--enable-net-benchmarking')
+    args.append('--metrics-recording-only')
+    args.append('--no-default-browser-check')
+    args.append('--no-first-run')
+
+    # Turn on GPU benchmarking extension for all runs. The only side effect of
+    # the extension being on is that render stats are tracked. This is believed
+    # to be effectively free. And, by doing so here, it avoids us having to
+    # programmatically inspect a pageset's actions in order to determine if it
+    # might eventually scroll.
+    args.append('--enable-gpu-benchmarking')
+
+    # Set --no-proxy-server to work around some XP issues unless
+    # some other flag indicates a proxy is needed.
+    if not self._ArgsNeedProxyServer(args):
+      self.browser_options.no_proxy_server = True
+
+    if self.browser_options.disable_background_networking:
+      args.append('--disable-background-networking')
+    args.extend(self.GetReplayBrowserStartupArgs())
+    args.extend(user_agent.GetChromeUserAgentArgumentFromType(
+        self.browser_options.browser_user_agent_type))
+
+    extensions = [extension.local_path
+                  for extension in self._extensions_to_load
+                  if not extension.is_component]
+    extension_str = ','.join(extensions)
+    if len(extensions) > 0:
+      args.append('--load-extension=%s' % extension_str)
+
+    component_extensions = [extension.local_path
+                            for extension in self._extensions_to_load
+                            if extension.is_component]
+    component_extension_str = ','.join(component_extensions)
+    if len(component_extensions) > 0:
+      args.append('--load-component-extension=%s' % component_extension_str)
+
+    if self.browser_options.no_proxy_server:
+      args.append('--no-proxy-server')
+
+    if self.browser_options.disable_component_extensions_with_background_pages:
+      args.append('--disable-component-extensions-with-background-pages')
+
+    # Disables the start page, as well as other external apps that can
+    # steal focus or make measurements inconsistent.
+    if self.browser_options.disable_default_apps:
+      args.append('--disable-default-apps')
+
+    if self.browser_options.enable_logging:
+      args.append('--enable-logging')
+      args.append('--v=1')
+    return args
+
+  def GetReplayBrowserStartupArgs(self):
+    network_backend = self.platform_backend.network_controller_backend
+    if not network_backend.is_replay_active:
+      return []
+    replay_args = []
+    if not network_backend.is_test_ca_installed:
+      # Ignore certificate errors if the platform backend has not created
+      # and installed a root certificate.
+      replay_args.append('--ignore-certificate-errors')
+    # Force hostnames to resolve to the replay's host_ip.
+    replay_args.append('--host-resolver-rules=MAP * %s,EXCLUDE localhost' %
+                         network_backend.host_ip)
+    # Force the browser to send HTTP/HTTPS requests to fixed ports if they
+    # are not the standard HTTP/HTTPS ports.
+    device_ports = network_backend.wpr_device_ports
+    if device_ports.http != 80:
+      replay_args.append('--testing-fixed-http-port=%s' % device_ports.http)
+    if device_ports.https != 443:
+      replay_args.append('--testing-fixed-https-port=%s' % device_ports.https)
+    return replay_args
+
+  def HasBrowserFinishedLaunching(self):
+    assert self._port, 'No DevTools port info available.'
+    return devtools_client_backend.IsDevToolsAgentAvailable(self._port, self)
+
+  def _InitDevtoolsClientBackend(self, remote_devtools_port=None):
+    """ Initiate the devtool client backend which allow browser connection
+    through browser' devtool.
+
+    Args:
+      remote_devtools_port: The remote devtools port, if
+          any. Otherwise assumed to be the same as self._port.
+    """
+    assert not self._devtools_client, (
+        'Devtool client backend cannot be init twice')
+    self._devtools_client = devtools_client_backend.DevToolsClientBackend(
+        self._port, remote_devtools_port or self._port, self)
+
+  def _WaitForBrowserToComeUp(self):
+    """ Wait for browser to come up. """
+    try:
+      timeout = self.browser_options.browser_startup_timeout
+      util.WaitFor(self.HasBrowserFinishedLaunching, timeout=timeout)
+    except (exceptions.TimeoutException, exceptions.ProcessGoneException) as e:
+      if not self.IsBrowserRunning():
+        raise exceptions.BrowserGoneException(self.browser, e)
+      raise exceptions.BrowserConnectionGoneException(self.browser, e)
+
+  def _WaitForExtensionsToLoad(self):
+    """ Wait for all extensions to load.
+    Be sure to check whether the browser_backend supports_extensions before
+    calling this method.
+    """
+    assert self._supports_extensions
+    assert self._devtools_client, (
+        'Waiting for extensions required devtool client to be initiated first')
+    try:
+      util.WaitFor(self._AllExtensionsLoaded, timeout=60)
+    except exceptions.TimeoutException:
+      logging.error('ExtensionsToLoad: ' +
+          repr([e.extension_id for e in self._extensions_to_load]))
+      logging.error('Extension list: ' +
+          pprint.pformat(self.extension_backend, indent=4))
+      raise
+
+  def _AllExtensionsLoaded(self):
+    # Extension pages are loaded from an about:blank page,
+    # so we need to check that the document URL is the extension
+    # page in addition to the ready state.
+    extension_ready_js = """
+        document.URL.lastIndexOf('chrome-extension://%s/', 0) == 0 &&
+        (document.readyState == 'complete' ||
+         document.readyState == 'interactive')
+    """
+    for e in self._extensions_to_load:
+      try:
+        extension_objects = self.extension_backend[e.extension_id]
+      except KeyError:
+        return False
+      for extension_object in extension_objects:
+        try:
+          res = extension_object.EvaluateJavaScript(
+              extension_ready_js % e.extension_id)
+        except exceptions.EvaluateException:
+          # If the inspected page is not ready, we will get an error
+          # when we evaluate a JS expression, but we can just keep polling
+          # until the page is ready (crbug.com/251913).
+          res = None
+
+        # TODO(tengs): We don't have full support for getting the Chrome
+        # version before launch, so for now we use a generic workaround to
+        # check for an extension binding bug in old versions of Chrome.
+        # See crbug.com/263162 for details.
+        if res and extension_object.EvaluateJavaScript(
+            'chrome.runtime == null'):
+          extension_object.Reload()
+        if not res:
+          return False
+    return True
+
+  @property
+  def browser_directory(self):
+    raise NotImplementedError()
+
+  @property
+  def profile_directory(self):
+    raise NotImplementedError()
+
+  @property
+  def supports_tab_control(self):
+    return self._supports_tab_control
+
+  @property
+  def supports_tracing(self):
+    return True
+
+  def StartTracing(self, trace_options, custom_categories=None,
+                   timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """
+    Args:
+        trace_options: An tracing_options.TracingOptions instance.
+        custom_categories: An optional string containing a list of
+                         comma separated categories that will be traced
+                         instead of the default category set.  Example: use
+                         "webkit,cc,disabled-by-default-cc.debug" to trace only
+                         those three event categories.
+    """
+    return self.devtools_client.StartChromeTracing(
+        trace_options, custom_categories, timeout)
+
+  def StopTracing(self, trace_data_builder):
+    self.devtools_client.StopChromeTracing(trace_data_builder)
+
+  def GetProcessName(self, cmd_line):
+    """Returns a user-friendly name for the process of the given |cmd_line|."""
+    if not cmd_line:
+      # TODO(tonyg): Eventually we should make all of these known and add an
+      # assertion.
+      return 'unknown'
+    if 'nacl_helper_bootstrap' in cmd_line:
+      return 'nacl_helper_bootstrap'
+    if ':sandboxed_process' in cmd_line:
+      return 'renderer'
+    if ':privileged_process' in cmd_line:
+      return 'gpu-process'
+    args = shlex.split(cmd_line)
+    types = [arg.split('=')[1] for arg in args if arg.startswith('--type=')]
+    if not types:
+      return 'browser'
+    return types[0]
+
+  def Close(self):
+    if self._devtools_client:
+      self._devtools_client.Close()
+      self._devtools_client = None
+
+  @property
+  def supports_system_info(self):
+    return self.GetSystemInfo() != None
+
+  def GetSystemInfo(self):
+    if self._system_info_backend is None:
+      self._system_info_backend = system_info_backend.SystemInfoBackend(
+          self._port)
+    return self._system_info_backend.GetSystemInfo()
+
+  @property
+  def supports_memory_dumping(self):
+    return True
+
+  def DumpMemory(self, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    return self.devtools_client.DumpMemory(timeout)
+
+  @property
+  def supports_overriding_memory_pressure_notifications(self):
+    return True
+
+  def SetMemoryPressureNotificationsSuppressed(
+      self, suppressed, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    self.devtools_client.SetMemoryPressureNotificationsSuppressed(
+        suppressed, timeout)
+
+  def SimulateMemoryPressureNotification(
+      self, pressure_level, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    self.devtools_client.SimulateMemoryPressureNotification(
+        pressure_level, timeout)
+
+  @property
+  def supports_cpu_metrics(self):
+    return True
+
+  @property
+  def supports_memory_metrics(self):
+    return True
+
+  @property
+  def supports_power_metrics(self):
+    return True
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend_unittest.py
new file mode 100644
index 0000000..52ad12c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend_unittest.py
@@ -0,0 +1,102 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+import mock
+
+from telemetry.internal import forwarders
+from telemetry.internal.backends.chrome import chrome_browser_backend
+from telemetry.util import wpr_modes
+
+
+class FakePlatformBackend(object):
+  def __init__(self, is_replay_active, wpr_http_device_port,
+               wpr_https_device_port, is_host_platform):
+    self.is_host_platform = is_host_platform
+
+    self.forwarder_factory = mock.Mock()
+
+    self.network_controller_backend = mock.Mock()
+    self.network_controller_backend.is_replay_active = is_replay_active
+    self.network_controller_backend.wpr_device_ports = forwarders.PortSet(
+        http=wpr_http_device_port, https=wpr_https_device_port, dns=None)
+    self.network_controller_backend.host_ip = '127.0.0.1'
+    self.network_controller_backend.is_test_ca_installed = False
+
+
+class FakeBrowserOptions(object):
+  def __init__(self, wpr_mode=wpr_modes.WPR_OFF):
+    self.wpr_mode = wpr_mode
+    self.browser_type = 'chrome'
+    self.dont_override_profile = False
+    self.browser_user_agent_type = 'desktop'
+    self.disable_background_networking = False
+    self.disable_component_extensions_with_background_pages = False
+    self.disable_default_apps = False
+    self.extra_browser_args = []
+    self.no_proxy_server = False
+    self.enable_logging = False
+
+
+class TestChromeBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
+  # The test does not need to define the abstract methods.
+  # pylint: disable=abstract-method
+
+  def __init__(self, browser_options,
+               wpr_http_device_port=None, wpr_https_device_port=None,
+               is_running_locally=False):
+    super(TestChromeBrowserBackend, self).__init__(
+        platform_backend=FakePlatformBackend(
+            browser_options.wpr_mode != wpr_modes.WPR_OFF,
+            wpr_http_device_port, wpr_https_device_port, is_running_locally),
+        supports_tab_control=False,
+        supports_extensions=False,
+        browser_options=browser_options,
+        output_profile_path=None,
+        extensions_to_load=[])
+
+
+class StartupArgsTest(unittest.TestCase):
+  """Test expected inputs for GetBrowserStartupArgs."""
+
+  def testNoProxyServer(self):
+    browser_options = FakeBrowserOptions()
+    browser_options.no_proxy_server = False
+    browser_options.extra_browser_args = ['--proxy-server=http=inter.net']
+    browser_backend = TestChromeBrowserBackend(browser_options)
+    self.assertNotIn('--no-proxy-server',
+                     browser_backend.GetBrowserStartupArgs())
+
+    browser_options.no_proxy_server = True
+    self.assertIn('--no-proxy-server', browser_backend.GetBrowserStartupArgs())
+
+class ReplayStartupArgsTest(unittest.TestCase):
+  """Test expected inputs for GetReplayBrowserStartupArgs."""
+
+  def testReplayOffGivesEmptyArgs(self):
+    browser_options = FakeBrowserOptions()
+    browser_backend = TestChromeBrowserBackend(browser_options)
+    self.assertEqual([], browser_backend.GetReplayBrowserStartupArgs())
+
+  def BasicArgsHelper(self, is_running_locally):
+    browser_options = FakeBrowserOptions(wpr_mode=wpr_modes.WPR_REPLAY)
+    browser_backend = TestChromeBrowserBackend(
+        browser_options,
+        wpr_http_device_port=456,
+        wpr_https_device_port=567,
+        is_running_locally=is_running_locally)
+    expected_args = [
+        '--host-resolver-rules=MAP * 127.0.0.1,EXCLUDE localhost',
+        '--ignore-certificate-errors',
+        '--testing-fixed-http-port=456',
+        '--testing-fixed-https-port=567'
+        ]
+    self.assertEqual(
+        expected_args,
+        sorted(browser_backend.GetReplayBrowserStartupArgs()))
+
+  def testBasicArgs(self):
+    # The result is the same regardless of whether running locally.
+    self.BasicArgsHelper(is_running_locally=True)
+    self.BasicArgsHelper(is_running_locally=False)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.html b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.html
new file mode 100644
index 0000000..07a643c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <script src="main.js"></script>
+  </head>
+  <body>
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.js b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.js
new file mode 100644
index 0000000..7d21eb3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/main.js
@@ -0,0 +1,18 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var PARENT_PAGE = 'chrome://oobe/';
+
+var msg = {
+  'method': 'loginUILoaded'
+};
+window.parent.postMessage(msg, PARENT_PAGE);
+
+var msg = {
+  'method': 'completeLogin',
+  'email': 'test@test.test',
+  'gaiaId': '12345',
+  'password': ''
+};
+window.parent.postMessage(msg, PARENT_PAGE);
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/manifest.json b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/manifest.json
new file mode 100644
index 0000000..ec809c6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/chromeos_login_ext/manifest.json
@@ -0,0 +1,16 @@
+{
+  // chrome-extension://mfffpogegjflfpflabcdkioaeobkgjik/
+  "key": "MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQC4L17nAfeTd6Xhtx96WhQ6DSr8KdHeQmfzgCkieKLCgUkWdwB9G1DCuh0EPMDn1MdtSwUAT7xE36APEzi0X/UpKjOVyX8tCC3aQcLoRAE0aJAvCcGwK7qIaQaczHmHKvPC2lrRdzSoMMTC5esvHX+ZqIBMi123FOL0dGW6OPKzIwIBIw==",
+  "name": "GaiaDummyAuthExtension",
+  "version": "0.0.1",
+  "manifest_version": 2,
+  "content_security_policy": "default-src 'self' blob: filesystem:; script-src 'self' blob: filesystem:; style-src 'self' blob: filesystem: 'unsafe-inline'",
+  "description": "GAIA Dummy Component Extension",
+  "web_accessible_resources": [
+    "main.html",
+    "main.js"
+  ],
+  "permissions": [
+      "chrome://oobe/"
+  ]
+}
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
new file mode 100644
index 0000000..fcad5db
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
@@ -0,0 +1,234 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.backends.chrome import chrome_browser_backend
+from telemetry.internal.backends.chrome import misc_web_contents_backend
+from telemetry.internal import forwarders
+
+
+class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
+  def __init__(self, cros_platform_backend, browser_options, cri, is_guest,
+               extensions_to_load):
+    super(CrOSBrowserBackend, self).__init__(
+        cros_platform_backend, supports_tab_control=True,
+        supports_extensions=not is_guest,
+        browser_options=browser_options,
+        output_profile_path=None, extensions_to_load=extensions_to_load)
+    assert browser_options.IsCrosBrowserOptions()
+    # Initialize fields so that an explosion during init doesn't break in Close.
+    self._cri = cri
+    self._is_guest = is_guest
+    self._forwarder = None
+    self._remote_debugging_port = self._cri.GetRemotePort()
+    self._port = self._remote_debugging_port
+
+    # Copy extensions to temp directories on the device.
+    # Note that we also perform this copy locally to ensure that
+    # the owner of the extensions is set to chronos.
+    for e in extensions_to_load:
+      extension_dir = cri.RunCmdOnDevice(
+          ['mktemp', '-d', '/tmp/extension_XXXXX'])[0].rstrip()
+      e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
+      cri.PushFile(e.path, extension_dir)
+      cri.Chown(extension_dir)
+
+    self._cri.RestartUI(self.browser_options.clear_enterprise_policy)
+    util.WaitFor(self.IsBrowserRunning, 20)
+
+    # Delete test user's cryptohome vault (user data directory).
+    if not self.browser_options.dont_override_profile:
+      self._cri.RunCmdOnDevice(['cryptohome', '--action=remove', '--force',
+                                '--user=%s' % self._username])
+
+  @property
+  def log_file_path(self):
+    return None
+
+  def GetBrowserStartupArgs(self):
+    args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
+    args.extend([
+            '--enable-smooth-scrolling',
+            '--enable-threaded-compositing',
+            # Allow devtools to connect to chrome.
+            '--remote-debugging-port=%i' % self._remote_debugging_port,
+            # Open a maximized window.
+            '--start-maximized',
+            # Disable system startup sound.
+            '--ash-disable-system-sounds',
+            # Ignore DMServer errors for policy fetches.
+            '--allow-failed-policy-fetch-for-test',
+            # Skip user image selection screen, and post login screens.
+            '--oobe-skip-postlogin',
+            # Debug logging.
+            '--vmodule=*/chromeos/net/*=2,*/chromeos/login/*=2'])
+
+    # Disable GAIA services unless we're using GAIA login, or if there's an
+    # explicit request for it.
+    if (self.browser_options.disable_gaia_services and
+        not self.browser_options.gaia_login):
+      args.append('--disable-gaia-services')
+
+    return args
+
+  @property
+  def pid(self):
+    return self._cri.GetChromePid()
+
+  @property
+  def browser_directory(self):
+    result = self._cri.GetChromeProcess()
+    if result and 'path' in result:
+      return os.path.dirname(result['path'])
+    return None
+
+  @property
+  def profile_directory(self):
+    return '/home/chronos/Default'
+
+  def __del__(self):
+    self.Close()
+
+  def Start(self):
+    # Escape all commas in the startup arguments we pass to Chrome
+    # because dbus-send delimits array elements by commas
+    startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
+
+    # Restart Chrome with the login extension and remote debugging.
+    logging.info('Restarting Chrome with flags and login')
+    args = ['dbus-send', '--system', '--type=method_call',
+            '--dest=org.chromium.SessionManager',
+            '/org/chromium/SessionManager',
+            'org.chromium.SessionManagerInterface.EnableChromeTesting',
+            'boolean:true',
+            'array:string:"%s"' % ','.join(startup_args)]
+    self._cri.RunCmdOnDevice(args)
+
+    if not self._cri.local:
+      # TODO(crbug.com/404771): Move port forwarding to network_controller.
+      self._port = util.GetUnreservedAvailableLocalPort()
+      self._forwarder = self._platform_backend.forwarder_factory.Create(
+          forwarders.PortPairs(
+              http=forwarders.PortPair(self._port, self._remote_debugging_port),
+              https=None,
+              dns=None), use_remote_port_forwarding=False)
+
+    # Wait for oobe.
+    self._WaitForBrowserToComeUp()
+    self._InitDevtoolsClientBackend(
+        remote_devtools_port=self._remote_debugging_port)
+    util.WaitFor(lambda: self.oobe_exists, 10)
+
+    if self.browser_options.auto_login:
+      try:
+        if self._is_guest:
+          pid = self.pid
+          self.oobe.NavigateGuestLogin()
+          # Guest browsing shuts down the current browser and launches an
+          # incognito browser in a separate process, which we need to wait for.
+          util.WaitFor(lambda: pid != self.pid, 10)
+        elif self.browser_options.gaia_login:
+          self.oobe.NavigateGaiaLogin(self._username, self._password)
+        else:
+          self.oobe.NavigateFakeLogin(self._username, self._password,
+              self._gaia_id)
+
+        self._WaitForLogin()
+      except exceptions.TimeoutException:
+        self._cri.TakeScreenShot('login-screen')
+        raise exceptions.LoginException('Timed out going through login screen. '
+                                        + self._GetLoginStatus())
+
+    logging.info('Browser is up!')
+
+  def Close(self):
+    super(CrOSBrowserBackend, self).Close()
+
+    if self._cri:
+      self._cri.RestartUI(False) # Logs out.
+      self._cri.CloseConnection()
+
+    util.WaitFor(lambda: not self._IsCryptohomeMounted(), 180)
+
+    if self._forwarder:
+      self._forwarder.Close()
+      self._forwarder = None
+
+    if self._cri:
+      for e in self._extensions_to_load:
+        self._cri.RmRF(os.path.dirname(e.local_path))
+
+    self._cri = None
+
+  def IsBrowserRunning(self):
+    return bool(self.pid)
+
+  def GetStandardOutput(self):
+    return 'Cannot get standard output on CrOS'
+
+  def GetStackTrace(self):
+    return 'Cannot get stack trace on CrOS'
+
+  @property
+  @decorators.Cache
+  def misc_web_contents_backend(self):
+    """Access to chrome://oobe/login page."""
+    return misc_web_contents_backend.MiscWebContentsBackend(self)
+
+  @property
+  def oobe(self):
+    return self.misc_web_contents_backend.GetOobe()
+
+  @property
+  def oobe_exists(self):
+    return self.misc_web_contents_backend.oobe_exists
+
+  @property
+  def _username(self):
+    return self.browser_options.username
+
+  @property
+  def _password(self):
+    return self.browser_options.password
+
+  @property
+  def _gaia_id(self):
+    return self.browser_options.gaia_id
+
+  def _IsCryptohomeMounted(self):
+    username = '$guest' if self._is_guest else self._username
+    return self._cri.IsCryptohomeMounted(username, self._is_guest)
+
+  def _GetLoginStatus(self):
+    """Returns login status. If logged in, empty string is returned."""
+    status = ''
+    if not self._IsCryptohomeMounted():
+      status += 'Cryptohome not mounted. '
+    if not self.HasBrowserFinishedLaunching():
+      status += 'Browser didn\'t launch. '
+    if self.oobe_exists:
+      status += 'OOBE not dismissed.'
+    return status
+
+  def _IsLoggedIn(self):
+    """Returns True if cryptohome has mounted, the browser is
+    responsive to devtools requests, and the oobe has been dismissed."""
+    return not self._GetLoginStatus()
+
+  def _WaitForLogin(self):
+    # Wait for cryptohome to mount.
+    util.WaitFor(self._IsLoggedIn, 60)
+
+    # For incognito mode, the session manager actually relaunches chrome with
+    # new arguments, so we have to wait for the browser to come up.
+    self._WaitForBrowserToComeUp()
+
+    # Wait for extensions to load.
+    if self._supports_extensions:
+      self._WaitForExtensionsToLoad()
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder.py
new file mode 100644
index 0000000..1e24177
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder.py
@@ -0,0 +1,136 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds CrOS browsers that can be controlled by telemetry."""
+
+import logging
+
+from telemetry.core import cros_interface
+from telemetry.core import platform as platform_module
+from telemetry.internal.backends.chrome import cros_browser_backend
+from telemetry.internal.backends.chrome import cros_browser_with_oobe
+from telemetry.internal.browser import browser
+from telemetry.internal.browser import browser_finder_exceptions
+from telemetry.internal.browser import possible_browser
+from telemetry.internal.platform import cros_device
+
+
+class PossibleCrOSBrowser(possible_browser.PossibleBrowser):
+  """A launchable CrOS browser instance."""
+  def __init__(self, browser_type, finder_options, cros_platform, is_guest):
+    super(PossibleCrOSBrowser, self).__init__(browser_type, 'cros', True)
+    assert browser_type in FindAllBrowserTypes(finder_options), (
+        'Please add %s to cros_browser_finder.FindAllBrowserTypes()' %
+         browser_type)
+    self._platform = cros_platform
+    self._platform_backend = (
+        cros_platform._platform_backend)  # pylint: disable=protected-access
+    self._is_guest = is_guest
+
+  def __repr__(self):
+    return 'PossibleCrOSBrowser(browser_type=%s)' % self.browser_type
+
+  def _InitPlatformIfNeeded(self):
+    pass
+
+  def Create(self, finder_options):
+    if finder_options.output_profile_path:
+      raise NotImplementedError(
+          'Profile generation is not yet supported on CrOS.')
+
+    browser_options = finder_options.browser_options
+    browser_backend = cros_browser_backend.CrOSBrowserBackend(
+        self._platform_backend,
+        browser_options, self._platform_backend.cri, self._is_guest,
+        extensions_to_load=finder_options.extensions_to_load)
+    if browser_options.create_browser_with_oobe:
+      return cros_browser_with_oobe.CrOSBrowserWithOOBE(
+          browser_backend,
+          self._platform_backend,
+          self._credentials_path)
+    return browser.Browser(
+        browser_backend, self._platform_backend, self._credentials_path)
+
+  def SupportsOptions(self, finder_options):
+    if (len(finder_options.extensions_to_load) != 0) and self._is_guest:
+      return False
+    return True
+
+  def UpdateExecutableIfNeeded(self):
+    pass
+
+def SelectDefaultBrowser(possible_browsers):
+  if cros_device.IsRunningOnCrOS():
+    for b in possible_browsers:
+      if b.browser_type == 'system':
+        return b
+  return None
+
+def CanFindAvailableBrowsers(finder_options):
+  return (cros_device.IsRunningOnCrOS() or
+          finder_options.cros_remote or
+          cros_interface.HasSSH())
+
+def FindAllBrowserTypes(_):
+  return [
+      'cros-chrome',
+      'cros-chrome-guest',
+      'system',
+      'system-guest',
+  ]
+
+def FindAllAvailableBrowsers(finder_options, device):
+  """Finds all available CrOS browsers, locally and remotely."""
+  browsers = []
+  if not isinstance(device, cros_device.CrOSDevice):
+    return browsers
+
+  if cros_device.IsRunningOnCrOS():
+    browsers = [PossibleCrOSBrowser('system', finder_options,
+                                    platform_module.GetHostPlatform(),
+                                    is_guest=False),
+                PossibleCrOSBrowser('system-guest', finder_options,
+                                    platform_module.GetHostPlatform(),
+                                    is_guest=True)]
+
+  # Check ssh
+  try:
+    # Retries required because of DNS issue in the lab documented in
+    # http://crbug/484726
+    retries = 0
+    while True:
+      try:
+        platform = platform_module.GetPlatformForDevice(device, finder_options)
+        break
+      except cros_interface.DNSFailureException, ex:
+        logging.warn('DNS Failure: %s', str(ex))
+        retries += 1
+        if retries > 1:
+          raise ex
+  except cros_interface.LoginException, ex:
+    if isinstance(ex, cros_interface.KeylessLoginRequiredException):
+      logging.warn('Could not ssh into %s. Your device must be configured',
+                   finder_options.cros_remote)
+      logging.warn('to allow passwordless login as root.')
+      logging.warn('For a test-build device, pass this to your script:')
+      logging.warn('   --identity $(CHROMITE)/ssh_keys/testing_rsa')
+      logging.warn('')
+      logging.warn('For a developer-mode device, the steps are:')
+      logging.warn(' - Ensure you have an id_rsa.pub (etc) on this computer')
+      logging.warn(' - On the chromebook:')
+      logging.warn('   -  Control-Alt-T; shell; sudo -s')
+      logging.warn('   -  openssh-server start')
+      logging.warn('   -  scp <this machine>:.ssh/id_rsa.pub /tmp/')
+      logging.warn('   -  mkdir /root/.ssh')
+      logging.warn('   -  chown go-rx /root/.ssh')
+      logging.warn('   -  cat /tmp/id_rsa.pub >> /root/.ssh/authorized_keys')
+      logging.warn('   -  chown 0600 /root/.ssh/authorized_keys')
+    raise browser_finder_exceptions.BrowserFinderException(str(ex))
+
+  browsers.extend([PossibleCrOSBrowser('cros-chrome', finder_options,
+                                       platform, is_guest=False),
+                   PossibleCrOSBrowser('cros-chrome-guest',
+                                       finder_options, platform,
+                                       is_guest=True)])
+  return browsers
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder_unittest.py
new file mode 100644
index 0000000..d467819
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_finder_unittest.py
@@ -0,0 +1,10 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(nduca): Add basic unit test for cros_browser_finder.
+#
+# Here, we should mock the cros_interface module (assuming its working) and
+# verify that the finder does the right thing. Because the finder delegates most
+# of its work to the CRI, the test code here is going to be comparatively
+# simple.
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_with_oobe.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_with_oobe.py
new file mode 100644
index 0000000..127a180
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_browser_with_oobe.py
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends.chrome import cros_browser_backend
+from telemetry.internal.browser import browser
+
+
+class CrOSBrowserWithOOBE(browser.Browser):
+  """Cros-specific browser."""
+  def __init__(self, backend, platform_backend, credentials_path):
+    assert isinstance(backend, cros_browser_backend.CrOSBrowserBackend)
+    super(CrOSBrowserWithOOBE, self).__init__(
+        backend, platform_backend, credentials_path)
+
+  @property
+  def oobe(self):
+    """The login webui (also serves as ui for screenlock and
+    out-of-box-experience).
+    """
+    return self._browser_backend.oobe
+
+  @property
+  def oobe_exists(self):
+    """True if the login/oobe/screenlock webui exists. This is more lightweight
+    than accessing the oobe property.
+    """
+    return self._browser_backend.oobe_exists
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_test_case.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_test_case.py
new file mode 100644
index 0000000..80b5ffb
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_test_case.py
@@ -0,0 +1,82 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.core import cros_interface
+from telemetry.core import util
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import extension_to_load
+from telemetry.testing import options_for_unittests
+
+
+class CrOSTestCase(unittest.TestCase):
+  def setUp(self):
+    options = options_for_unittests.GetCopy()
+    self._cri = cros_interface.CrOSInterface(options.cros_remote,
+                                             options.cros_remote_ssh_port,
+                                             options.cros_ssh_identity)
+    self._is_guest = options.browser_type == 'cros-chrome-guest'
+    self._username = options.browser_options.username
+    self._password = options.browser_options.password
+    self._gaia_id = options.browser_options.gaia_id
+    self._load_extension = None
+
+  def _CreateBrowser(self, autotest_ext=False, auto_login=True,
+                     gaia_login=False, username=None, password=None,
+                     gaia_id=None, dont_override_profile=False):
+    """Finds and creates a browser for tests. if autotest_ext is True,
+    also loads the autotest extension"""
+    options = options_for_unittests.GetCopy()
+
+    if autotest_ext:
+      extension_path = os.path.join(util.GetUnittestDataDir(), 'autotest_ext')
+      assert os.path.isdir(extension_path)
+      self._load_extension = extension_to_load.ExtensionToLoad(
+          path=extension_path,
+          browser_type=options.browser_type,
+          is_component=True)
+      options.extensions_to_load = [self._load_extension]
+
+    browser_to_create = browser_finder.FindBrowser(options)
+    self.assertTrue(browser_to_create)
+    browser_options = options.browser_options
+    browser_options.create_browser_with_oobe = True
+    browser_options.auto_login = auto_login
+    browser_options.gaia_login = gaia_login
+    browser_options.dont_override_profile = dont_override_profile
+    if username is not None:
+      browser_options.username = username
+    if password is not None:
+      browser_options.password = password
+    if gaia_id is not None:
+      browser_options.gaia_id = gaia_id
+
+    return browser_to_create.Create(options)
+
+  def _GetAutotestExtension(self, browser):
+    """Returns the autotest extension instance"""
+    extension = browser.extensions[self._load_extension]
+    self.assertTrue(extension)
+    return extension
+
+  def _IsCryptohomeMounted(self):
+    """Returns True if cryptohome is mounted. as determined by the cmd
+    cryptohome --action=is_mounted"""
+    return self._cri.RunCmdOnDevice(
+        ['/usr/sbin/cryptohome', '--action=is_mounted'])[0].strip() == 'true'
+
+  def _GetLoginStatus(self, browser):
+    extension = self._GetAutotestExtension(browser)
+    self.assertTrue(extension.EvaluateJavaScript(
+        "typeof('chrome.autotestPrivate') != 'undefined'"))
+    extension.ExecuteJavaScript('''
+        window.__login_status = null;
+        chrome.autotestPrivate.loginStatus(function(s) {
+          window.__login_status = s;
+        });
+    ''')
+    return util.WaitFor(
+        lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/cros_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/cros_unittest.py
new file mode 100644
index 0000000..8b38e88
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/cros_unittest.py
@@ -0,0 +1,175 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import urllib2
+import os
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.backends.chrome import cros_test_case
+
+
+class CrOSCryptohomeTest(cros_test_case.CrOSTestCase):
+  @decorators.Enabled('chromeos')
+  def testCryptohome(self):
+    """Verifies cryptohome mount status for regular and guest user and when
+    logged out"""
+    with self._CreateBrowser() as b:
+      self.assertEquals(1, len(b.tabs))
+      self.assertTrue(b.tabs[0].url)
+      self.assertTrue(self._IsCryptohomeMounted())
+
+      # TODO(achuith): Remove dependency on /home/chronos/user.
+      chronos_fs = self._cri.FilesystemMountedAt('/home/chronos/user')
+      self.assertTrue(chronos_fs)
+      if self._is_guest:
+        self.assertEquals(chronos_fs, 'guestfs')
+      else:
+        crypto_fs = self._cri.FilesystemMountedAt(
+            self._cri.CryptohomePath(self._username))
+        self.assertEquals(crypto_fs, chronos_fs)
+
+    self.assertFalse(self._IsCryptohomeMounted())
+    self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user'),
+                      '/dev/mapper/encstateful')
+
+
+class CrOSLoginTest(cros_test_case.CrOSTestCase):
+  def _GetCredentials(self):
+    """Read username and password from credentials.txt. The file is a single
+    line of the format username:password"""
+    username = None
+    password = None
+    credentials_file = os.path.join(os.path.dirname(__file__),
+                                    'credentials.txt')
+    if os.path.exists(credentials_file):
+      with open(credentials_file) as f:
+        username, password = f.read().strip().split(':')
+        # Remove dots.
+        username = username.replace('.', '')
+        # Canonicalize.
+        if username.find('@') == -1:
+          username += '@gmail.com'
+    return (username, password)
+
+  @decorators.Enabled('chromeos')
+  def testLoginStatus(self):
+    """Tests autotestPrivate.loginStatus"""
+    if self._is_guest:
+      return
+    with self._CreateBrowser(autotest_ext=True) as b:
+      login_status = self._GetLoginStatus(b)
+      self.assertEquals(type(login_status), dict)
+
+      self.assertEquals(not self._is_guest, login_status['isRegularUser'])
+      self.assertEquals(self._is_guest, login_status['isGuest'])
+      self.assertEquals(login_status['email'], self._username)
+      self.assertFalse(login_status['isScreenLocked'])
+
+  @decorators.Enabled('chromeos')
+  def testLogout(self):
+    """Tests autotestPrivate.logout"""
+    if self._is_guest:
+      return
+    with self._CreateBrowser(autotest_ext=True) as b:
+      extension = self._GetAutotestExtension(b)
+      try:
+        extension.ExecuteJavaScript('chrome.autotestPrivate.logout();')
+      except exceptions.Error:
+        pass
+      util.WaitFor(lambda: not self._IsCryptohomeMounted(), 20)
+
+  @decorators.Disabled('all')
+  def testGaiaLogin(self):
+    """Tests gaia login. Use credentials in credentials.txt if it exists,
+    otherwise use powerloadtest."""
+    if self._is_guest:
+      return
+    username, password = self._GetCredentials()
+    if not username or not password:
+      username = 'powerloadtest@gmail.com'
+      password = urllib2.urlopen(
+          'https://sites.google.com/a/chromium.org/dev/chromium-os/testing/'
+          'power-testing/pltp/pltp').read().rstrip()
+    with self._CreateBrowser(gaia_login=True,
+                             username=username,
+                             password=password):
+      self.assertTrue(util.WaitFor(self._IsCryptohomeMounted, 10))
+
+  @decorators.Enabled('chromeos')
+  def testEnterpriseEnroll(self):
+    """Tests enterprise enrollment. Credentials are expected to be found in a
+    credentials.txt file. The account must be from an enterprise domain and
+    have device enrollment permission. The device must be unowned."""
+    if self._is_guest:
+      return
+
+    username, password = self._GetCredentials()
+    if not username or not password:
+      return
+    # Enroll the device.
+    with self._CreateBrowser(auto_login=False) as browser:
+      browser.oobe.NavigateGaiaLogin(username, password,
+                                     enterprise_enroll=True,
+                                     for_user_triggered_enrollment=True)
+
+    # Check for the existence of the device policy file.
+    self.assertTrue(util.WaitFor(lambda: self._cri.FileExistsOnDevice(
+        '/home/.shadow/install_attributes.pb'), 15))
+
+
+class CrOSScreenLockerTest(cros_test_case.CrOSTestCase):
+  def _IsScreenLocked(self, browser):
+    return self._GetLoginStatus(browser)['isScreenLocked']
+
+  def _LockScreen(self, browser):
+    self.assertFalse(self._IsScreenLocked(browser))
+
+    extension = self._GetAutotestExtension(browser)
+    self.assertTrue(extension.EvaluateJavaScript(
+        "typeof chrome.autotestPrivate.lockScreen == 'function'"))
+    logging.info('Locking screen')
+    extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();')
+
+    logging.info('Waiting for the lock screen')
+    def ScreenLocked():
+      return (browser.oobe_exists and
+          browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'") and
+          browser.oobe.EvaluateJavaScript(
+          "typeof Oobe.authenticateForTesting == 'function'"))
+    util.WaitFor(ScreenLocked, 10)
+    self.assertTrue(self._IsScreenLocked(browser))
+
+  def _AttemptUnlockBadPassword(self, browser):
+    logging.info('Trying a bad password')
+    def ErrorBubbleVisible():
+      return not browser.oobe.EvaluateJavaScript('''
+          document.getElementById('bubble').hidden
+      ''')
+    self.assertFalse(ErrorBubbleVisible())
+    browser.oobe.ExecuteJavaScript('''
+        Oobe.authenticateForTesting('%s', 'bad');
+    ''' % self._username)
+    util.WaitFor(ErrorBubbleVisible, 10)
+    self.assertTrue(self._IsScreenLocked(browser))
+
+  def _UnlockScreen(self, browser):
+    logging.info('Unlocking')
+    browser.oobe.ExecuteJavaScript('''
+        Oobe.authenticateForTesting('%s', '%s');
+    ''' % (self._username, self._password))
+    util.WaitFor(lambda: not browser.oobe_exists, 10)
+    self.assertFalse(self._IsScreenLocked(browser))
+
+  @decorators.Disabled('all')
+  def testScreenLock(self):
+    """Tests autotestPrivate.screenLock"""
+    if self._is_guest:
+      return
+    with self._CreateBrowser(autotest_ext=True) as browser:
+      self._LockScreen(browser)
+      self._AttemptUnlockBadPassword(browser)
+      self._UnlockScreen(browser)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/crx_id.py b/catapult/telemetry/telemetry/internal/backends/chrome/crx_id.py
new file mode 100644
index 0000000..21d17a2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/crx_id.py
@@ -0,0 +1,130 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Read a CRX file and write out the App ID and the Full Hash of the ID.
+See: http://code.google.com/chrome/extensions/crx.html
+and 'http://stackoverflow.com/questions/'
+  + '1882981/google-chrome-alphanumeric-hashes-to-identify-extensions'
+for docs on the format.
+"""
+
+import base64
+import os
+import hashlib
+import json
+
+EXPECTED_CRX_MAGIC_NUM = 'Cr24'
+EXPECTED_CRX_VERSION = 2
+
+def HexToInt(hex_chars):
+  """ Convert bytes like \xab -> 171 """
+  val = 0
+  for i in xrange(len(hex_chars)):
+    val += pow(256, i) * ord(hex_chars[i])
+  return val
+
+def HexToMPDecimal(hex_chars):
+  """ Convert bytes to an MPDecimal string. Example \x00 -> "aa"
+      This gives us the AppID for a chrome extension.
+  """
+  result = ''
+  base = ord('a')
+  for i in xrange(len(hex_chars)):
+    value = ord(hex_chars[i])
+    dig1 = value / 16
+    dig2 = value % 16
+    result += chr(dig1 + base)
+    result += chr(dig2 + base)
+  return result
+
+def HexTo256(hex_chars):
+  """ Convert bytes to pairs of hex digits. E.g., \x00\x11 -> "{0x00, 0x11}"
+      The format is taylored for copy and paste into C code:
+      const uint8 sha256_hash[] = { ... }; """
+  result = []
+  for i in xrange(len(hex_chars)):
+    value = ord(hex_chars[i])
+    dig1 = value / 16
+    dig2 = value % 16
+    result.append('0x' + hex(dig1)[2:] + hex(dig2)[2:])
+  return '{%s}' % ', '.join(result)
+
+def GetPublicKeyPacked(f):
+  magic_num = f.read(4)
+  if magic_num != EXPECTED_CRX_MAGIC_NUM:
+    raise Exception('Invalid magic number: %s (expecting %s)' %
+                    (magic_num,
+                     EXPECTED_CRX_MAGIC_NUM))
+  version = f.read(4)
+  if not version[0] != EXPECTED_CRX_VERSION:
+    raise Exception('Invalid version number: %s (expecting %s)' %
+                    (version,
+                     EXPECTED_CRX_VERSION))
+  pub_key_len_bytes = HexToInt(f.read(4))
+  f.read(4)
+  return f.read(pub_key_len_bytes)
+
+def GetPublicKeyFromPath(filepath, is_win_path=False):
+  # Normalize the path for windows to have capital drive letters.
+  # We intentionally don't check if sys.platform == 'win32' and just
+  # check if this looks like drive letter so that we can test this
+  # even on posix systems.
+  if (len(filepath) >= 2 and
+      filepath[0].islower() and
+      filepath[1] == ':'):
+    filepath = filepath[0].upper() + filepath[1:]
+
+  # On Windows, filepaths are encoded using UTF-16, little endian byte order,
+  # using "wide characters" that are 16 bits in size. On POSIX systems, the
+  # encoding is generally UTF-8, which has the property of being equivalent to
+  # ASCII when only ASCII characters are in the path.
+  if is_win_path:
+    filepath = filepath.encode('utf-16le')
+
+  return filepath
+
+def GetPublicKeyUnpacked(f, filepath):
+  manifest = json.load(f)
+  if 'key' not in manifest:
+    # Use the path as the public key.
+    # See Extension::GenerateIdForPath in extension.cc
+    return GetPublicKeyFromPath(filepath)
+  else:
+    return base64.standard_b64decode(manifest['key'])
+
+def HasPublicKey(filename):
+  if os.path.isdir(filename):
+    with open(os.path.join(filename, 'manifest.json'), 'rb') as f:
+      manifest = json.load(f)
+      return 'key' in manifest
+  return False
+
+def GetPublicKey(filename, from_file_path, is_win_path=False):
+  if from_file_path:
+    return GetPublicKeyFromPath(
+        filename, is_win_path=is_win_path)
+
+  pub_key = ''
+  if os.path.isdir(filename):
+    # Assume it's an unpacked extension
+    f = open(os.path.join(filename, 'manifest.json'), 'rb')
+    pub_key = GetPublicKeyUnpacked(f, filename)
+    f.close()
+  else:
+    # Assume it's a packed extension.
+    f = open(filename, 'rb')
+    pub_key = GetPublicKeyPacked(f)
+    f.close()
+  return pub_key
+
+def GetCRXHash(filename, from_file_path=False, is_win_path=False):
+  pub_key = GetPublicKey(filename, from_file_path, is_win_path=is_win_path)
+  pub_key_hash = hashlib.sha256(pub_key).digest()
+  return HexTo256(pub_key_hash)
+
+def GetCRXAppID(filename, from_file_path=False, is_win_path=False):
+  pub_key = GetPublicKey(filename, from_file_path, is_win_path=is_win_path)
+  pub_key_hash = hashlib.sha256(pub_key).digest()
+  # AppID is the MPDecimal of only the first 128 bits of the hash.
+  return HexToMPDecimal(pub_key_hash[:128/8])
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/crx_id_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/crx_id_unittest.py
new file mode 100644
index 0000000..9cea905
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/crx_id_unittest.py
@@ -0,0 +1,78 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import shutil
+import unittest
+import tempfile
+
+from telemetry.core import util
+from telemetry.internal.backends.chrome import crx_id
+
+
+class CrxIdUnittest(unittest.TestCase):
+  CRX_ID_DIR = util.GetUnittestDataDir()
+  PACKED_CRX = os.path.join(CRX_ID_DIR,
+                            'jebgalgnebhfojomionfpkfelancnnkf.crx')
+
+  PACKED_APP_ID = 'jebgalgnebhfojomionfpkfelancnnkf'
+  PACKED_HASH_BYTES = \
+      '{0x94, 0x16, 0x0b, 0x6d, 0x41, 0x75, 0xe9, 0xec,' \
+      ' 0x8e, 0xd5, 0xfa, 0x54, 0xb0, 0xd2, 0xdd, 0xa5,' \
+      ' 0x6e, 0x05, 0x6b, 0xe8, 0x73, 0x47, 0xf6, 0xc4,' \
+      ' 0x11, 0x9f, 0xbc, 0xb3, 0x09, 0xb3, 0x5b, 0x40}'
+
+  UNPACKED_APP_ID = 'cbcdidchbppangcjoddlpdjlenngjldk'
+  UNPACKED_HASH_BYTES = \
+      '{0x21, 0x23, 0x83, 0x27, 0x1f, 0xf0, 0xd6, 0x29,' \
+      ' 0xe3, 0x3b, 0xf3, 0x9b, 0x4d, 0xd6, 0x9b, 0x3a,' \
+      ' 0xff, 0x7d, 0x6b, 0xc4, 0x78, 0x30, 0x47, 0xa6,' \
+      ' 0x23, 0x12, 0x72, 0x84, 0x9b, 0x9a, 0xf6, 0x3c}'
+
+
+  def testPackedHashAppId(self):
+    """ Test the output generated for a canned, packed CRX. """
+    self.assertEqual(crx_id.GetCRXAppID(self.PACKED_CRX),
+                     self.PACKED_APP_ID)
+    self.assertEqual(crx_id.GetCRXHash(self.PACKED_CRX),
+                     self.PACKED_HASH_BYTES)
+
+
+  def testUnpackedHashAppId(self):
+    """ Test the output generated for a canned, unpacked extension. """
+    unpacked_test_manifest_path = os.path.join(
+        self.CRX_ID_DIR, 'manifest_with_key.json')
+    temp_unpacked_crx = tempfile.mkdtemp()
+    shutil.copy2(unpacked_test_manifest_path,
+                 os.path.join(temp_unpacked_crx, 'manifest.json'))
+    self.assertEqual(crx_id.GetCRXAppID(temp_unpacked_crx),
+                     self.UNPACKED_APP_ID)
+    self.assertEqual(crx_id.GetCRXHash(temp_unpacked_crx),
+                     self.UNPACKED_HASH_BYTES)
+    self.assertTrue(crx_id.HasPublicKey(temp_unpacked_crx))
+    shutil.rmtree(temp_unpacked_crx)
+
+
+  def testFromFilePath(self):
+    """ Test calculation of extension id from file paths. """
+    self.assertEqual(crx_id.GetCRXAppID('/tmp/temp_extension',
+                                        from_file_path=True),
+                     'ajbbicncdkdlchpjplgjaglppbcbmaji')
+
+
+  def testFromWindowsPath(self):
+    self.assertEqual(crx_id.GetCRXAppID(r'D:\Documents\chrome\test_extension',
+                                        from_file_path=True,
+                                        is_win_path=True),
+                     'fegemedmbnhglnecjgbdhekaghkccplm')
+
+    # Test drive letter normalization.
+    kWinPathId = 'aiinlcdagjihibappcdnnhcccdokjlaf'
+    self.assertEqual(crx_id.GetCRXAppID(r'c:\temp_extension',
+                                        from_file_path=True,
+                                        is_win_path=True),
+                     kWinPathId)
+    self.assertEqual(crx_id.GetCRXAppID(r'C:\temp_extension',
+                                        from_file_path=True,
+                                        is_win_path=True),
+                     kWinPathId)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
new file mode 100644
index 0000000..4044ad9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
@@ -0,0 +1,606 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import glob
+import heapq
+import logging
+import os
+import os.path
+import random
+import re
+import shutil
+import subprocess as subprocess
+import sys
+import tempfile
+import time
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+import dependency_manager  # pylint: disable=import-error
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.backends import browser_backend
+from telemetry.internal.backends.chrome import chrome_browser_backend
+from telemetry.internal.util import path
+
+
+def ParseCrashpadDateTime(date_time_str):
+  # Python strptime does not support time zone parsing, strip it.
+  date_time_parts = date_time_str.split()
+  if len(date_time_parts) >= 3:
+    date_time_str = ' '.join(date_time_parts[:2])
+  return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
+
+
+def GetSymbolBinaries(minidump, arch_name, os_name):
+  # Returns binary file where symbols are located.
+  minidump_dump = binary_manager.FetchPath('minidump_dump', arch_name, os_name)
+  assert minidump_dump
+
+  symbol_binaries = []
+
+  minidump_cmd = [minidump_dump, minidump]
+  try:
+    with open(os.devnull, 'wb') as DEVNULL:
+      minidump_output = subprocess.check_output(minidump_cmd, stderr=DEVNULL)
+  except subprocess.CalledProcessError as e:
+    # For some reason minidump_dump always fails despite successful dumping.
+    minidump_output = e.output
+
+  minidump_binary_re = re.compile(r'\W+\(code_file\)\W+=\W\"(.*)\"')
+  for minidump_line in minidump_output.splitlines():
+    line_match = minidump_binary_re.match(minidump_line)
+    if line_match:
+      binary_path = line_match.group(1)
+      if not os.path.isfile(binary_path):
+        continue
+
+      # Filter out system binaries.
+      if (binary_path.startswith('/usr/lib/') or
+          binary_path.startswith('/System/Library/') or
+          binary_path.startswith('/lib/')):
+        continue
+
+      # Filter out other binary file types which have no symbols.
+      if (binary_path.endswith('.pak') or
+          binary_path.endswith('.bin') or
+          binary_path.endswith('.dat')):
+        continue
+
+      symbol_binaries.append(binary_path)
+  return symbol_binaries
+
+
+def GenerateBreakpadSymbols(minidump, arch, os_name, symbols_dir, browser_dir):
+  logging.info('Dumping breakpad symbols.')
+  generate_breakpad_symbols_command = binary_manager.FetchPath(
+      'generate_breakpad_symbols', arch, os_name)
+  if generate_breakpad_symbols_command is None:
+    return
+
+  for binary_path in GetSymbolBinaries(minidump, arch, os_name):
+    cmd = [
+        sys.executable,
+        generate_breakpad_symbols_command,
+        '--binary=%s' % binary_path,
+        '--symbols-dir=%s' % symbols_dir,
+        '--build-dir=%s' % browser_dir,
+        ]
+
+    try:
+      subprocess.check_output(cmd, stderr=open(os.devnull, 'w'))
+    except subprocess.CalledProcessError:
+      logging.warning('Failed to execute "%s"' % ' '.join(cmd))
+      return
+
+
+class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
+  """The backend for controlling a locally-executed browser instance, on Linux,
+  Mac or Windows.
+  """
+  def __init__(self, desktop_platform_backend, browser_options, executable,
+               flash_path, is_content_shell, browser_directory,
+               output_profile_path, extensions_to_load):
+    super(DesktopBrowserBackend, self).__init__(
+        desktop_platform_backend,
+        supports_tab_control=not is_content_shell,
+        supports_extensions=not is_content_shell,
+        browser_options=browser_options,
+        output_profile_path=output_profile_path,
+        extensions_to_load=extensions_to_load)
+
+    # Initialize fields so that an explosion during init doesn't break in Close.
+    self._proc = None
+    self._tmp_profile_dir = None
+    self._tmp_output_file = None
+
+    self._executable = executable
+    if not self._executable:
+      raise Exception('Cannot create browser, no executable found!')
+
+    assert not flash_path or os.path.exists(flash_path)
+    self._flash_path = flash_path
+
+    self._is_content_shell = is_content_shell
+
+    if len(extensions_to_load) > 0 and is_content_shell:
+      raise browser_backend.ExtensionsNotSupportedException(
+          'Content shell does not support extensions.')
+
+    self._browser_directory = browser_directory
+    self._port = None
+    self._tmp_minidump_dir = tempfile.mkdtemp()
+    self._crash_service = None
+    if self.browser_options.enable_logging:
+      self._log_file_path = os.path.join(tempfile.mkdtemp(), 'chrome.log')
+    else:
+      self._log_file_path = None
+
+    self._SetupProfile()
+
+  @property
+  def log_file_path(self):
+    return self._log_file_path
+
+  @property
+  def supports_uploading_logs(self):
+    return (self.browser_options.logs_cloud_bucket and self.log_file_path and
+            os.path.isfile(self.log_file_path))
+
+  def _SetupProfile(self):
+    if not self.browser_options.dont_override_profile:
+      if self._output_profile_path:
+        self._tmp_profile_dir = self._output_profile_path
+      else:
+        self._tmp_profile_dir = tempfile.mkdtemp()
+
+      profile_dir = self.browser_options.profile_dir
+      if profile_dir:
+        assert self._tmp_profile_dir != profile_dir
+        if self._is_content_shell:
+          logging.critical('Profiles cannot be used with content shell')
+          sys.exit(1)
+        logging.info("Using profile directory:'%s'." % profile_dir)
+        shutil.rmtree(self._tmp_profile_dir)
+        shutil.copytree(profile_dir, self._tmp_profile_dir)
+    # No matter whether we're using an existing profile directory or
+    # creating a new one, always delete the well-known file containing
+    # the active DevTools port number.
+    port_file = self._GetDevToolsActivePortPath()
+    if os.path.isfile(port_file):
+      try:
+        os.remove(port_file)
+      except Exception as e:
+        logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
+        sys.exit(1)
+
+  def _GetDevToolsActivePortPath(self):
+    return os.path.join(self.profile_directory, 'DevToolsActivePort')
+
+  def _GetCrashServicePipeName(self):
+    # Ensure a unique pipe name by using the name of the temp dir.
+    pipe = r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
+    return pipe
+
+  def _StartCrashService(self):
+    os_name = self.browser.platform.GetOSName()
+    if os_name != 'win':
+      return None
+    arch_name = self.browser.platform.GetArchName()
+    command = binary_manager.FetchPath('crash_service', arch_name, os_name)
+    if not command:
+      logging.warning('crash_service.exe not found for %s %s',
+                      arch_name, os_name)
+      return None
+    if not os.path.exists(command):
+      logging.warning('crash_service.exe not found for %s %s',
+                      arch_name, os_name)
+      return None
+
+    try:
+      crash_service = subprocess.Popen([
+          command,
+          '--no-window',
+          '--dumps-dir=%s' % self._tmp_minidump_dir,
+          '--pipe-name=%s' % self._GetCrashServicePipeName()])
+    except Exception:
+      logging.error(
+          'Failed to run %s --no-window --dump-dir=%s --pip-name=%s' % (
+            command, self._tmp_minidump_dir, self._GetCrashServicePipeName()))
+      logging.error('Running on platform: %s and arch: %s.', os_name, arch_name)
+      wmic_stdout, _ = subprocess.Popen(
+        ['wmic', 'process', 'get', 'CommandLine,Name,ProcessId,ParentProcessId',
+        '/format:csv'], stdout=subprocess.PIPE).communicate()
+      logging.error('Current running processes:\n%s' % wmic_stdout)
+      raise
+    return crash_service
+
+  def _GetCdbPath(self):
+    possible_paths = (
+        'Debugging Tools For Windows',
+        'Debugging Tools For Windows (x86)',
+        'Debugging Tools For Windows (x64)',
+        os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
+        os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
+        os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
+                     'x86'),
+        os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
+                     'x64'),
+    )
+    for possible_path in possible_paths:
+      app_path = os.path.join(possible_path, 'cdb.exe')
+      app_path = path.FindInstalledWindowsApplication(app_path)
+      if app_path:
+        return app_path
+    return None
+
+  def HasBrowserFinishedLaunching(self):
+    # In addition to the functional check performed by the base class, quickly
+    # check if the browser process is still alive.
+    if not self.IsBrowserRunning():
+      raise exceptions.ProcessGoneException(
+          "Return code: %d" % self._proc.returncode)
+    # Start DevTools on an ephemeral port and wait for the well-known file
+    # containing the port number to exist.
+    port_file = self._GetDevToolsActivePortPath()
+    if not os.path.isfile(port_file):
+      # File isn't ready yet. Return false. Will retry.
+      return False
+    # Attempt to avoid reading the file until it's populated.
+    got_port = False
+    try:
+      if os.stat(port_file).st_size > 0:
+        with open(port_file) as f:
+          port_string = f.read()
+          self._port = int(port_string)
+          logging.info('Discovered ephemeral port %s' % self._port)
+          got_port = True
+    except Exception:
+      # Both stat and open can throw exceptions.
+      pass
+    if not got_port:
+      # File isn't ready yet. Return false. Will retry.
+      return False
+    return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
+
+  def GetBrowserStartupArgs(self):
+    args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
+    self._port = 0
+    logging.info('Requested remote debugging port: %d' % self._port)
+    args.append('--remote-debugging-port=%i' % self._port)
+    args.append('--enable-crash-reporter-for-testing')
+    if not self._is_content_shell:
+      args.append('--window-size=1280,1024')
+      if self._flash_path:
+        args.append('--ppapi-flash-path=%s' % self._flash_path)
+      if not self.browser_options.dont_override_profile:
+        args.append('--user-data-dir=%s' % self._tmp_profile_dir)
+    else:
+      args.append('--data-path=%s' % self._tmp_profile_dir)
+
+    trace_config_file = (self.platform_backend.tracing_controller_backend
+                         .GetChromeTraceConfigFile())
+    if trace_config_file:
+      args.append('--trace-config-file=%s' % trace_config_file)
+    return args
+
+  def Start(self):
+    assert not self._proc, 'Must call Close() before Start()'
+
+    args = [self._executable]
+    args.extend(self.GetBrowserStartupArgs())
+    if self.browser_options.startup_url:
+      args.append(self.browser_options.startup_url)
+    env = os.environ.copy()
+    env['CHROME_HEADLESS'] = '1'  # Don't upload minidumps.
+    env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
+    env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
+    if self.browser_options.enable_logging:
+      sys.stderr.write(
+        'Chrome log file will be saved in %s\n' % self.log_file_path)
+      env['CHROME_LOG_FILE'] = self.log_file_path
+    self._crash_service = self._StartCrashService()
+    logging.info('Starting Chrome %s', args)
+    if not self.browser_options.show_stdout:
+      self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+      self._proc = subprocess.Popen(
+          args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
+    else:
+      self._proc = subprocess.Popen(args, env=env)
+
+    try:
+      self._WaitForBrowserToComeUp()
+      # browser is foregrounded by default on Windows and Linux, but not Mac.
+      if self.browser.platform.GetOSName() == 'mac':
+        subprocess.Popen([
+          'osascript', '-e', ('tell application "%s" to activate' %
+                              self._executable)])
+      self._InitDevtoolsClientBackend()
+      if self._supports_extensions:
+        self._WaitForExtensionsToLoad()
+    except:
+      self.Close()
+      raise
+
+  @property
+  def pid(self):
+    if self._proc:
+      return self._proc.pid
+    return None
+
+  @property
+  def browser_directory(self):
+    return self._browser_directory
+
+  @property
+  def profile_directory(self):
+    return self._tmp_profile_dir
+
+  def IsBrowserRunning(self):
+    return self._proc and self._proc.poll() == None
+
+  def GetStandardOutput(self):
+    if not self._tmp_output_file:
+      if self.browser_options.show_stdout:
+        # This can happen in the case that loading the Chrome binary fails.
+        # We print rather than using logging here, because that makes a
+        # recursive call to this function.
+        print >> sys.stderr, "Can't get standard output with --show-stdout"
+      return ''
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+  def _GetMostRecentCrashpadMinidump(self):
+    os_name = self.browser.platform.GetOSName()
+    arch_name = self.browser.platform.GetArchName()
+    try:
+      crashpad_database_util = binary_manager.FetchPath(
+          'crashpad_database_util', arch_name, os_name)
+      if not crashpad_database_util:
+        return None
+    except dependency_manager.NoPathFoundError:
+      return None
+
+    report_output = subprocess.check_output([
+        crashpad_database_util, '--database=' + self._tmp_minidump_dir,
+        '--show-pending-reports', '--show-completed-reports',
+        '--show-all-report-info'])
+
+    last_indentation = -1
+    reports_list = []
+    report_dict = {}
+    for report_line in report_output.splitlines():
+      # Report values are grouped together by the same indentation level.
+      current_indentation = 0
+      for report_char in report_line:
+        if not report_char.isspace():
+          break
+        current_indentation += 1
+
+      # Decrease in indentation level indicates a new report is being printed.
+      if current_indentation >= last_indentation:
+        report_key, report_value = report_line.split(':', 1)
+        if report_value:
+          report_dict[report_key.strip()] = report_value.strip()
+      elif report_dict:
+        try:
+          report_time = ParseCrashpadDateTime(report_dict['Creation time'])
+          report_path = report_dict['Path'].strip()
+          reports_list.append((report_time, report_path))
+        except (ValueError, KeyError) as e:
+          logging.warning('Crashpad report expected valid keys'
+                          ' "Path" and "Creation time": %s', e)
+        finally:
+          report_dict = {}
+
+      last_indentation = current_indentation
+
+    # Include the last report.
+    if report_dict:
+      try:
+        report_time = ParseCrashpadDateTime(report_dict['Creation time'])
+        report_path = report_dict['Path'].strip()
+        reports_list.append((report_time, report_path))
+      except (ValueError, KeyError) as e:
+        logging.warning('Crashpad report expected valid keys'
+                          ' "Path" and "Creation time": %s', e)
+
+    if reports_list:
+      _, most_recent_report_path = max(reports_list)
+      return most_recent_report_path
+
+    return None
+
+  def _GetMostRecentMinidump(self):
+    # Crashpad dump layout will be the standard eventually, check it first.
+    most_recent_dump = self._GetMostRecentCrashpadMinidump()
+
+    # Typical breakpad format is simply dump files in a folder.
+    if not most_recent_dump:
+      dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
+      if dumps:
+        most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
+
+    # As a sanity check, make sure the crash dump is recent.
+    if (most_recent_dump and
+        os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60))):
+      logging.warning('Crash dump is older than 5 minutes. May not be correct.')
+
+    return most_recent_dump
+
+  def _IsExecutableStripped(self):
+    if self.browser.platform.GetOSName() == 'mac':
+      try:
+        symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
+      except subprocess.CalledProcessError as err:
+        logging.warning('Error when checking whether executable is stripped: %s'
+                        % err.output)
+        # Just assume that binary is stripped to skip breakpad symbol generation
+        # if this check failed.
+        return True
+      num_symbols = len(symbols.splitlines())
+      # We assume that if there are more than 10 symbols the executable is not
+      # stripped.
+      return num_symbols < 10
+    else:
+      return False
+
+  def _GetStackFromMinidump(self, minidump):
+    os_name = self.browser.platform.GetOSName()
+    if os_name == 'win':
+      cdb = self._GetCdbPath()
+      if not cdb:
+        logging.warning('cdb.exe not found.')
+        return None
+      output = subprocess.check_output([cdb, '-y', self._browser_directory,
+                                        '-c', '.ecxr;k30;q', '-z', minidump])
+      # cdb output can start the stack with "ChildEBP", "Child-SP", and possibly
+      # other things we haven't seen yet. If we can't find the start of the
+      # stack, include output from the beginning.
+      stack_start = 0
+      stack_start_match = re.search("^Child(?:EBP|-SP)", output, re.MULTILINE)
+      if stack_start_match:
+        stack_start = stack_start_match.start()
+      stack_end = output.find('quit:')
+      return output[stack_start:stack_end]
+
+    arch_name = self.browser.platform.GetArchName()
+    stackwalk = binary_manager.FetchPath(
+        'minidump_stackwalk', arch_name, os_name)
+    if not stackwalk:
+      logging.warning('minidump_stackwalk binary not found.')
+      return None
+
+    with open(minidump, 'rb') as infile:
+      minidump += '.stripped'
+      with open(minidump, 'wb') as outfile:
+        outfile.write(''.join(infile.read().partition('MDMP')[1:]))
+
+    symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
+
+    symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
+    if symbols:
+      for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
+        if not os.path.isfile(symbol):
+          continue
+        with open(symbol, 'r') as f:
+          fields = f.readline().split()
+          if not fields:
+            continue
+          sha = fields[3]
+          binary = ' '.join(fields[4:])
+        symbol_path = os.path.join(symbols_path, binary, sha)
+        if os.path.exists(symbol_path):
+          continue
+        os.makedirs(symbol_path)
+        shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
+    else:
+      # On some platforms generating the symbol table can be very time
+      # consuming, skip it if there's nothing to dump.
+      if self._IsExecutableStripped():
+        logging.info('%s appears to be stripped, skipping symbol dump.' % (
+            self._executable))
+        return
+
+      GenerateBreakpadSymbols(minidump, arch_name, os_name,
+                              symbols_path, self._browser_directory)
+
+    return subprocess.check_output([stackwalk, minidump, symbols_path],
+                                   stderr=open(os.devnull, 'w'))
+
+  def _UploadMinidumpToCloudStorage(self, minidump_path):
+    """ Upload minidump_path to cloud storage and return the cloud storage url.
+    """
+    remote_path = ('minidump-%s-%i.dmp' %
+                   (datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
+                    random.randint(0, 1000000)))
+    try:
+      return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT, remote_path,
+                                  minidump_path)
+    except cloud_storage.CloudStorageError as err:
+      logging.error('Cloud storage error while trying to upload dump: %s' %
+                    repr(err))
+      return '<Missing link>'
+
+  def GetStackTrace(self):
+    most_recent_dump = self._GetMostRecentMinidump()
+    if not most_recent_dump:
+      return 'No crash dump found.'
+    logging.info('Minidump found: %s' % most_recent_dump)
+    stack = self._GetStackFromMinidump(most_recent_dump)
+    if not stack:
+      cloud_storage_link = self._UploadMinidumpToCloudStorage(most_recent_dump)
+      return ('Failed to symbolize minidump. Raw stack is uploaded to cloud '
+              'storage: %s.' % cloud_storage_link)
+    return stack
+
+  def __del__(self):
+    self.Close()
+
+  def _TryCooperativeShutdown(self):
+    if self.browser.platform.IsCooperativeShutdownSupported():
+      # Ideally there would be a portable, cooperative shutdown
+      # mechanism for the browser. This seems difficult to do
+      # correctly for all embedders of the content API. The only known
+      # problem with unclean shutdown of the browser process is on
+      # Windows, where suspended child processes frequently leak. For
+      # now, just solve this particular problem. See Issue 424024.
+      if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"):
+        try:
+          util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
+          logging.info('Successfully shut down browser cooperatively')
+        except exceptions.TimeoutException as e:
+          logging.warning('Failed to cooperatively shutdown. ' +
+                          'Proceeding to terminate: ' + str(e))
+
+  def Close(self):
+    super(DesktopBrowserBackend, self).Close()
+
+    # First, try to cooperatively shutdown.
+    if self.IsBrowserRunning():
+      self._TryCooperativeShutdown()
+
+    # Second, try to politely shutdown with SIGTERM.
+    if self.IsBrowserRunning():
+      self._proc.terminate()
+      try:
+        util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
+        self._proc = None
+      except exceptions.TimeoutException:
+        logging.warning('Failed to gracefully shutdown.')
+
+    # Shutdown aggressively if all above failed.
+    if self.IsBrowserRunning():
+      logging.warning('Proceed to kill the browser.')
+      self._proc.kill()
+    self._proc = None
+
+    if self._crash_service:
+      self._crash_service.kill()
+      self._crash_service = None
+
+    if self._output_profile_path:
+      # If we need the output then double check that it exists.
+      if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
+        raise Exception("No profile directory generated by Chrome: '%s'." %
+            self._tmp_profile_dir)
+    else:
+      # If we don't need the profile after the run then cleanup.
+      if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
+        shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
+        self._tmp_profile_dir = None
+
+    if self._tmp_output_file:
+      self._tmp_output_file.close()
+      self._tmp_output_file = None
+
+    if self._tmp_minidump_dir:
+      shutil.rmtree(self._tmp_minidump_dir, ignore_errors=True)
+      self._tmp_minidump_dir = None
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder.py b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder.py
new file mode 100644
index 0000000..0db44f1
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder.py
@@ -0,0 +1,289 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Finds desktop browsers that can be controlled by telemetry."""
+
+import logging
+import os
+import sys
+
+import dependency_manager  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+from telemetry.core import platform as platform_module
+from telemetry.internal.backends.chrome import desktop_browser_backend
+from telemetry.internal.browser import browser
+from telemetry.internal.browser import possible_browser
+from telemetry.internal.platform import desktop_device
+from telemetry.internal.util import binary_manager
+# This is a workaround for https://goo.gl/1tGNgd
+from telemetry.internal.util import path as path_module
+
+
+class PossibleDesktopBrowser(possible_browser.PossibleBrowser):
+  """A desktop browser that can be controlled."""
+
+  def __init__(self, browser_type, finder_options, executable, flash_path,
+               is_content_shell, browser_directory, is_local_build=False):
+    target_os = sys.platform.lower()
+    super(PossibleDesktopBrowser, self).__init__(
+        browser_type, target_os, not is_content_shell)
+    assert browser_type in FindAllBrowserTypes(finder_options), (
+        'Please add %s to desktop_browser_finder.FindAllBrowserTypes' %
+        browser_type)
+    self._local_executable = executable
+    self._flash_path = flash_path
+    self._is_content_shell = is_content_shell
+    self._browser_directory = browser_directory
+    self.is_local_build = is_local_build
+
+  def __repr__(self):
+    return 'PossibleDesktopBrowser(type=%s, executable=%s, flash=%s)' % (
+        self.browser_type, self._local_executable, self._flash_path)
+
+  def _InitPlatformIfNeeded(self):
+    if self._platform:
+      return
+
+    self._platform = platform_module.GetHostPlatform()
+
+    # pylint: disable=protected-access
+    self._platform_backend = self._platform._platform_backend
+
+  def Create(self, finder_options):
+    if self._flash_path and not os.path.exists(self._flash_path):
+      logging.warning(
+          'Could not find Flash at %s. Continuing without Flash.\n'
+          'To run with Flash, check it out via http://go/read-src-internal',
+          self._flash_path)
+      self._flash_path = None
+
+    self._InitPlatformIfNeeded()
+
+    browser_backend = desktop_browser_backend.DesktopBrowserBackend(
+        self._platform_backend,
+        finder_options.browser_options, self._local_executable,
+        self._flash_path, self._is_content_shell, self._browser_directory,
+        output_profile_path=finder_options.output_profile_path,
+        extensions_to_load=finder_options.extensions_to_load)
+    return browser.Browser(
+        browser_backend, self._platform_backend, self._credentials_path)
+
+  def SupportsOptions(self, finder_options):
+    if (len(finder_options.extensions_to_load) != 0) and self._is_content_shell:
+      return False
+    return True
+
+  def UpdateExecutableIfNeeded(self):
+    pass
+
+  def last_modification_time(self):
+    if os.path.exists(self._local_executable):
+      return os.path.getmtime(self._local_executable)
+    return -1
+
+def SelectDefaultBrowser(possible_browsers):
+  local_builds_by_date = [
+      b for b in sorted(possible_browsers,
+                        key=lambda b: b.last_modification_time())
+      if b.is_local_build]
+  if local_builds_by_date:
+    return local_builds_by_date[-1]
+  return None
+
+def CanFindAvailableBrowsers():
+  return not platform_module.GetHostPlatform().GetOSName() == 'chromeos'
+
+def CanPossiblyHandlePath(target_path):
+  _, extension = os.path.splitext(target_path.lower())
+  if sys.platform == 'darwin' or sys.platform.startswith('linux'):
+    return not extension
+  elif sys.platform.startswith('win'):
+    return extension == '.exe'
+  return False
+
+def FindAllBrowserTypes(_):
+  return [
+      'exact',
+      'reference',
+      'release',
+      'release_x64',
+      'debug',
+      'debug_x64',
+      'default',
+      'stable',
+      'beta',
+      'dev',
+      'canary',
+      'content-shell-debug',
+      'content-shell-debug_x64',
+      'content-shell-release',
+      'content-shell-release_x64',
+      'content-shell-default',
+      'system']
+
+def FindAllAvailableBrowsers(finder_options, device):
+  """Finds all the desktop browsers available on this machine."""
+  if not isinstance(device, desktop_device.DesktopDevice):
+    return []
+
+  browsers = []
+
+  if not CanFindAvailableBrowsers():
+    return []
+
+  has_x11_display = True
+  if (sys.platform.startswith('linux') and
+      os.getenv('DISPLAY') == None):
+    has_x11_display = False
+
+  os_name = platform_module.GetHostPlatform().GetOSName()
+  arch_name = platform_module.GetHostPlatform().GetArchName()
+  try:
+    flash_path = binary_manager.LocalPath('flash', arch_name, os_name)
+  except dependency_manager.NoPathFoundError:
+    flash_path = None
+    logging.warning(
+        'Chrome build location for %s_%s not found. Browser will be run '
+        'without Flash.', os_name, arch_name)
+
+  chromium_app_names = []
+  if sys.platform == 'darwin':
+    chromium_app_names.append('Chromium.app/Contents/MacOS/Chromium')
+    chromium_app_names.append('Google Chrome.app/Contents/MacOS/Google Chrome')
+    content_shell_app_name = 'Content Shell.app/Contents/MacOS/Content Shell'
+  elif sys.platform.startswith('linux'):
+    chromium_app_names.append('chrome')
+    content_shell_app_name = 'content_shell'
+  elif sys.platform.startswith('win'):
+    chromium_app_names.append('chrome.exe')
+    content_shell_app_name = 'content_shell.exe'
+  else:
+    raise Exception('Platform not recognized')
+
+  # Add the explicit browser executable if given and we can handle it.
+  if (finder_options.browser_executable and
+      CanPossiblyHandlePath(finder_options.browser_executable)):
+    is_content_shell = finder_options.browser_executable.endswith(
+        content_shell_app_name)
+    is_chrome_or_chromium = len([x for x in chromium_app_names if
+        finder_options.browser_executable.endswith(x)]) != 0
+
+    # It is okay if the executable name doesn't match any of known chrome
+    # browser executables, since it may be of a different browser.
+    if is_chrome_or_chromium or is_content_shell:
+      normalized_executable = os.path.expanduser(
+          finder_options.browser_executable)
+      if path_module.IsExecutable(normalized_executable):
+        browser_directory = os.path.dirname(finder_options.browser_executable)
+        browsers.append(PossibleDesktopBrowser(
+            'exact', finder_options, normalized_executable, flash_path,
+            is_content_shell,
+            browser_directory))
+      else:
+        raise exceptions.PathMissingError(
+            '%s specified by --browser-executable does not exist or is not '
+            'executable' %
+            normalized_executable)
+
+  def AddIfFound(browser_type, build_path, app_name, content_shell):
+    app = os.path.join(build_path, app_name)
+    if path_module.IsExecutable(app):
+      browsers.append(PossibleDesktopBrowser(
+          browser_type, finder_options, app, flash_path,
+          content_shell, build_path, is_local_build=True))
+      return True
+    return False
+
+  # Add local builds
+  for build_path in path_module.GetBuildDirectories(finder_options.chrome_root):
+    # TODO(agrieve): Extract browser_type from args.gn's is_debug.
+    browser_type = os.path.basename(build_path).lower()
+    for chromium_app_name in chromium_app_names:
+      AddIfFound(browser_type, build_path, chromium_app_name, False)
+    AddIfFound('content-shell-' + browser_type, build_path,
+               content_shell_app_name, True)
+
+  reference_build = None
+  if finder_options.browser_type == 'reference':
+    # Reference builds are only available in a Chromium checkout. We should not
+    # raise an error just because they don't exist.
+    os_name = platform_module.GetHostPlatform().GetOSName()
+    arch_name = platform_module.GetHostPlatform().GetArchName()
+    reference_build = binary_manager.FetchPath(
+        'reference_build', arch_name, os_name)
+
+  # Mac-specific options.
+  if sys.platform == 'darwin':
+    mac_canary_root = '/Applications/Google Chrome Canary.app/'
+    mac_canary = mac_canary_root + 'Contents/MacOS/Google Chrome Canary'
+    mac_system_root = '/Applications/Google Chrome.app'
+    mac_system = mac_system_root + '/Contents/MacOS/Google Chrome'
+    if path_module.IsExecutable(mac_canary):
+      browsers.append(PossibleDesktopBrowser('canary', finder_options,
+                                             mac_canary, None, False,
+                                             mac_canary_root))
+
+    if path_module.IsExecutable(mac_system):
+      browsers.append(PossibleDesktopBrowser('system', finder_options,
+                                             mac_system, None, False,
+                                             mac_system_root))
+
+    if reference_build and path_module.IsExecutable(reference_build):
+      reference_root = os.path.dirname(os.path.dirname(os.path.dirname(
+          reference_build)))
+      browsers.append(PossibleDesktopBrowser('reference', finder_options,
+                                             reference_build, None, False,
+                                             reference_root))
+
+  # Linux specific options.
+  if sys.platform.startswith('linux'):
+    versions = {
+        'system': os.path.split(os.path.realpath('/usr/bin/google-chrome'))[0],
+        'stable': '/opt/google/chrome',
+        'beta': '/opt/google/chrome-beta',
+        'dev': '/opt/google/chrome-unstable'
+    }
+
+    for version, root in versions.iteritems():
+      browser_path = os.path.join(root, 'chrome')
+      if path_module.IsExecutable(browser_path):
+        browsers.append(PossibleDesktopBrowser(version, finder_options,
+                                               browser_path, None, False, root))
+    if reference_build and path_module.IsExecutable(reference_build):
+      reference_root = os.path.dirname(reference_build)
+      browsers.append(PossibleDesktopBrowser('reference', finder_options,
+                                             reference_build, None, False,
+                                             reference_root))
+
+  # Win32-specific options.
+  if sys.platform.startswith('win'):
+    app_paths = [
+        ('system', os.path.join('Google', 'Chrome', 'Application')),
+        ('canary', os.path.join('Google', 'Chrome SxS', 'Application')),
+    ]
+    if reference_build:
+      app_paths.append(
+          ('reference', os.path.dirname(reference_build)))
+
+    for browser_name, app_path in app_paths:
+      for chromium_app_name in chromium_app_names:
+        full_path = path_module.FindInstalledWindowsApplication(
+            os.path.join(app_path, chromium_app_name))
+        if full_path:
+          browsers.append(PossibleDesktopBrowser(
+              browser_name, finder_options, full_path,
+              None, False, os.path.dirname(full_path)))
+
+  has_ozone_platform = False
+  for arg in finder_options.browser_options.extra_browser_args:
+    if "--ozone-platform" in arg:
+      has_ozone_platform = True
+
+  if len(browsers) and not has_x11_display and not has_ozone_platform:
+    logging.warning(
+      'Found (%s), but you do not have a DISPLAY environment set.' %
+      ','.join([b.browser_type for b in browsers]))
+    return []
+
+  return browsers
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder_unittest.py
new file mode 100644
index 0000000..a874fe2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/desktop_browser_finder_unittest.py
@@ -0,0 +1,305 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import stat
+import unittest
+
+from pyfakefs import fake_filesystem_unittest
+
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry.internal.backends.chrome import desktop_browser_finder
+from telemetry.internal.browser import browser_options
+from telemetry.internal.platform import desktop_device
+from telemetry.testing import system_stub
+
+
+# This file verifies the logic for finding a browser instance on all platforms
+# at once. It does so by providing stubs for the OS/sys/subprocess primitives
+# that the underlying finding logic usually uses to locate a suitable browser.
+# We prefer this approach to having to run the same test on every platform on
+# which we want this code to work.
+
+class FindTestBase(unittest.TestCase):
+  def setUp(self):
+    self._finder_options = browser_options.BrowserFinderOptions()
+    self._finder_options.chrome_root = '../../../'
+    self._finder_stubs = system_stub.Override(desktop_browser_finder,
+                                              ['os', 'subprocess', 'sys'])
+    self._path_stubs = system_stub.Override(
+        desktop_browser_finder.path_module, ['os', 'sys'])
+    self._catapult_path_stubs = system_stub.Override(
+        desktop_browser_finder.path_module.catapult_util, ['os', 'sys'])
+    self._util_stubs = system_stub.Override(util, ['os', 'sys'])
+    self._browser_finder_stubs = system_stub.Override(desktop_browser_finder,
+                                                      ['os', 'sys'])
+
+  def tearDown(self):
+    self._finder_stubs.Restore()
+    self._path_stubs.Restore()
+    self._catapult_path_stubs.Restore()
+    self._util_stubs.Restore()
+    self._browser_finder_stubs.Restore()
+
+  @property
+  def _files(self):
+    return self._catapult_path_stubs.os.path.files
+
+  def DoFindAll(self):
+    return desktop_browser_finder.FindAllAvailableBrowsers(
+      self._finder_options, desktop_device.DesktopDevice())
+
+  def DoFindAllTypes(self):
+    browsers = self.DoFindAll()
+    return [b.browser_type for b in browsers]
+
+  def CanFindAvailableBrowsers(self):
+    return desktop_browser_finder.CanFindAvailableBrowsers()
+
+
+def has_type(array, browser_type):
+  return len([x for x in array if x.browser_type == browser_type]) != 0
+
+
+class FindSystemTest(FindTestBase):
+  def setUp(self):
+    super(FindSystemTest, self).setUp()
+    self._finder_stubs.sys.platform = 'win32'
+    self._path_stubs.sys.platform = 'win32'
+    self._util_stubs.sys.platform = 'win32'
+    self._browser_finder_stubs.sys.platform = 'win32'
+
+  def testFindProgramFiles(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append(
+        'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe')
+    self._path_stubs.os.program_files = 'C:\\Program Files'
+    self.assertIn('system', self.DoFindAllTypes())
+
+  def testFindProgramFilesX86(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append(
+        'C:\\Program Files(x86)\\Google\\Chrome\\Application\\chrome.exe')
+    self._path_stubs.os.program_files_x86 = 'C:\\Program Files(x86)'
+    self.assertIn('system', self.DoFindAllTypes())
+
+  def testFindLocalAppData(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append(
+        'C:\\Local App Data\\Google\\Chrome\\Application\\chrome.exe')
+    self._path_stubs.os.local_app_data = 'C:\\Local App Data'
+    self.assertIn('system', self.DoFindAllTypes())
+
+
+class FindLocalBuildsTest(FindTestBase):
+  def setUp(self):
+    super(FindLocalBuildsTest, self).setUp()
+    self._finder_stubs.sys.platform = 'win32'
+    self._path_stubs.sys.platform = 'win32'
+    self._util_stubs.sys.platform = 'win32'
+    self._browser_finder_stubs.sys.platform = 'win32'
+
+  def testFindBuild(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
+    self.assertIn('release', self.DoFindAllTypes())
+
+  def testFindOut(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append('..\\..\\..\\out\\Release\\chrome.exe')
+    self.assertIn('release', self.DoFindAllTypes())
+
+  def testFindXcodebuild(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append('..\\..\\..\\xcodebuild\\Release\\chrome.exe')
+    self.assertIn('release', self.DoFindAllTypes())
+
+
+class OSXFindTest(FindTestBase):
+  def setUp(self):
+    super(OSXFindTest, self).setUp()
+    self._finder_stubs.sys.platform = 'darwin'
+    self._path_stubs.sys.platform = 'darwin'
+    self._util_stubs.sys.platform = 'darwin'
+    self._browser_finder_stubs.sys.platform = 'darwin'
+    self._files.append('/Applications/Google Chrome Canary.app/'
+                       'Contents/MacOS/Google Chrome Canary')
+    self._files.append('/Applications/Google Chrome.app/' +
+                       'Contents/MacOS/Google Chrome')
+    self._files.append(
+      '../../../out/Release/Chromium.app/Contents/MacOS/Chromium')
+    self._files.append(
+      '../../../out/Debug/Chromium.app/Contents/MacOS/Chromium')
+    self._files.append(
+      '../../../out/Release/Content Shell.app/Contents/MacOS/Content Shell')
+    self._files.append(
+      '../../../out/Debug/Content Shell.app/Contents/MacOS/Content Shell')
+
+  def testFindAll(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    types = self.DoFindAllTypes()
+    self.assertEquals(
+      set(types),
+      set(['debug', 'release',
+           'content-shell-debug', 'content-shell-release',
+           'canary', 'system']))
+
+  def testFindExact(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append(
+      '../../../foo1/Chromium.app/Contents/MacOS/Chromium')
+    self._finder_options.browser_executable = (
+        '../../../foo1/Chromium.app/Contents/MacOS/Chromium')
+    types = self.DoFindAllTypes()
+    self.assertTrue('exact' in types)
+
+  def testCannotFindExact(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append(
+      '../../../foo1/Chromium.app/Contents/MacOS/Chromium')
+    self._finder_options.browser_executable = (
+        '../../../foo2/Chromium.app/Contents/MacOS/Chromium')
+    self.assertRaises(Exception, self.DoFindAllTypes)
+
+class LinuxFindTest(fake_filesystem_unittest.TestCase):
+
+  def setUp(self):
+    if not platform.GetHostPlatform().GetOSName() == 'linux':
+      self.skipTest('Not running on Linux')
+    self.setUpPyfakefs()
+
+    self._finder_options = browser_options.BrowserFinderOptions()
+    self._finder_options.chrome_root = '/src/'
+
+  def CreateBrowser(self, path):
+    self.fs.CreateFile(path)
+    os.chmod(path, stat.S_IXUSR)
+
+  def DoFindAll(self):
+    return desktop_browser_finder.FindAllAvailableBrowsers(
+        self._finder_options, desktop_device.DesktopDevice())
+
+  def DoFindAllTypes(self):
+    return [b.browser_type for b in self.DoFindAll()]
+
+  def testFindAllWithCheckout(self):
+    for target in ['Release', 'Debug']:
+      for browser in ['chrome', 'content_shell']:
+        self.CreateBrowser('/src/out/%s/%s' % (target, browser))
+
+    self.assertEquals(
+        set(self.DoFindAllTypes()),
+        {'debug', 'release', 'content-shell-debug', 'content-shell-release'})
+
+  def testFindAllFailsIfNotExecutable(self):
+    self.fs.CreateFile('/src/out/Release/chrome')
+
+    self.assertFalse(self.DoFindAllTypes())
+
+  def testFindWithProvidedExecutable(self):
+    self.CreateBrowser('/foo/chrome')
+    self._finder_options.browser_executable = '/foo/chrome'
+    self.assertIn('exact', self.DoFindAllTypes())
+
+  def testFindWithProvidedApk(self):
+    self._finder_options.browser_executable = '/foo/chrome.apk'
+    self.assertNotIn('exact', self.DoFindAllTypes())
+
+  def testNoErrorWithNonChromeExecutableName(self):
+    self.fs.CreateFile('/foo/another_browser')
+    self._finder_options.browser_executable = '/foo/another_browser'
+    self.assertNotIn('exact', self.DoFindAllTypes())
+
+  def testFindAllWithInstalled(self):
+    official_names = ['chrome', 'chrome-beta', 'chrome-unstable']
+
+    for name in official_names:
+      self.CreateBrowser('/opt/google/%s/chrome' % name)
+
+    self.assertEquals(set(self.DoFindAllTypes()), {'stable', 'beta', 'dev'})
+
+  def testFindAllSystem(self):
+    self.CreateBrowser('/opt/google/chrome/chrome')
+    os.symlink('/opt/google/chrome/chrome', '/usr/bin/google-chrome')
+
+    self.assertEquals(set(self.DoFindAllTypes()), {'system', 'stable'})
+
+  def testFindAllSystemIsBeta(self):
+    self.CreateBrowser('/opt/google/chrome/chrome')
+    self.CreateBrowser('/opt/google/chrome-beta/chrome')
+    os.symlink('/opt/google/chrome-beta/chrome', '/usr/bin/google-chrome')
+
+    google_chrome = [browser for browser in self.DoFindAll()
+                     if browser.browser_type == 'system'][0]
+    self.assertEquals('/opt/google/chrome-beta',
+                      google_chrome._browser_directory)
+
+
+class WinFindTest(FindTestBase):
+  def setUp(self):
+    super(WinFindTest, self).setUp()
+
+    self._finder_stubs.sys.platform = 'win32'
+    self._path_stubs.sys.platform = 'win32'
+    self._util_stubs.sys.platform = 'win32'
+    self._browser_finder_stubs.sys.platform = 'win32'
+    self._path_stubs.os.local_app_data = 'c:\\Users\\Someone\\AppData\\Local'
+    self._files.append('c:\\tmp\\chrome.exe')
+    self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
+    self._files.append('..\\..\\..\\build\\Debug\\chrome.exe')
+    self._files.append('..\\..\\..\\build\\Release\\content_shell.exe')
+    self._files.append('..\\..\\..\\build\\Debug\\content_shell.exe')
+    self._files.append(self._path_stubs.os.local_app_data + '\\' +
+                       'Google\\Chrome\\Application\\chrome.exe')
+    self._files.append(self._path_stubs.os.local_app_data + '\\' +
+                       'Google\\Chrome SxS\\Application\\chrome.exe')
+
+  def testFindAllGivenDefaults(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    types = self.DoFindAllTypes()
+    self.assertEquals(set(types),
+                      set(['debug', 'release',
+                           'content-shell-debug', 'content-shell-release',
+                           'system', 'canary']))
+
+  def testFindAllWithExact(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._finder_options.browser_executable = 'c:\\tmp\\chrome.exe'
+    types = self.DoFindAllTypes()
+    self.assertEquals(
+        set(types),
+        set(['exact',
+             'debug', 'release',
+             'content-shell-debug', 'content-shell-release',
+             'system', 'canary']))
+
+  def testNoErrorWithUnrecognizedExecutableName(self):
+    if not self.CanFindAvailableBrowsers():
+      return
+
+    self._files.append('c:\\foo\\another_browser.exe')
+    self._finder_options.browser_dir = 'c:\\foo\\another_browser.exe'
+    self.assertNotIn('exact', self.DoFindAllTypes())
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/extension_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/extension_backend.py
new file mode 100644
index 0000000..b6abd97
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/extension_backend.py
@@ -0,0 +1,48 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from telemetry.internal.backends.chrome_inspector import inspector_backend_list
+from telemetry.internal.browser import extension_page
+
+
+class ExtensionBackendList(inspector_backend_list.InspectorBackendList):
+  """A dynamic sequence of extension_page.ExtensionPages."""
+
+  def __init__(self, browser_backend):
+    super(ExtensionBackendList, self).__init__(browser_backend)
+
+  def ShouldIncludeContext(self, context):
+    return context['url'].startswith('chrome-extension://')
+
+  def CreateWrapper(self, inspector_backend):
+    return extension_page.ExtensionPage(inspector_backend)
+
+class ExtensionBackendDict(collections.Mapping):
+  """A dynamic mapping of extension_id to extension_page.ExtensionPages."""
+
+  def __init__(self, browser_backend):
+    self._extension_backend_list = ExtensionBackendList(browser_backend)
+
+  def __getitem__(self, extension_id):
+    extensions = []
+    for context_id in self._extension_backend_list.IterContextIds():
+      if self.ContextIdToExtensionId(context_id) == extension_id:
+        extensions.append(
+            self._extension_backend_list.GetBackendFromContextId(context_id))
+    if not extensions:
+      raise KeyError('Cannot find an extension with id=%s' % extension_id)
+    return extensions
+
+  def __iter__(self):
+    for context_id in self._extension_backend_list.IterContextIds():
+      yield self._extension_backend_list.GetBackendFromContextId(context_id)
+
+  def __len__(self):
+    return len(self._extension_backend_list)
+
+  def ContextIdToExtensionId(self, context_id):
+    context = self._extension_backend_list.GetContextInfo(context_id)
+    return extension_page.UrlToExtensionId(context['url'])
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_backend.py
new file mode 100644
index 0000000..1c193f0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_backend.py
@@ -0,0 +1,146 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import json
+import logging
+import re
+import urllib2
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.backends.chrome import chrome_browser_backend
+from telemetry.internal.backends.chrome import system_info_backend
+
+
+class IosBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
+  _DEBUGGER_URL_BUILDER = 'ws://localhost:%i/devtools/page/%i'
+  _DEBUGGER_URL_REGEX = r'ws://localhost:(\d+)/devtools/page/(\d+)'
+  _DEVICE_LIST_URL = 'http://localhost:9221/json'
+
+  def __init__(self, ios_platform_backend, browser_options):
+    super(IosBrowserBackend, self).__init__(
+        ios_platform_backend,
+        supports_tab_control=False,
+        supports_extensions=False,
+        browser_options=browser_options,
+        output_profile_path=".",
+        extensions_to_load=None)
+    self._webviews = []
+    self._port = None
+    self._page = None
+    self._system_info_backend = None
+    self.UpdateRunningBrowsersInfo()
+
+  def UpdateRunningBrowsersInfo(self):
+    """ Refresh to match current state of the running browser.
+    """
+    device_urls = self.GetDeviceUrls()
+    urls = self.GetWebSocketDebuggerUrls(device_urls)
+    for url in urls:
+      m = re.match(self._DEBUGGER_URL_REGEX, url)
+      if m:
+        self._webviews.append([int(m.group(1)), int(m.group(2))])
+      else:
+        logging.error('Unexpected url format: %s' % url)
+
+    # TODO(baxley): For now, grab first item from |_webviews|. Ideally, we'd
+    # prefer to have the currently displayed tab, or something similar.
+    if self._webviews:
+      self._port = self._webviews[0][0]
+      self._page = self._webviews[0][1]
+
+  def GetDeviceUrls(self):
+    device_urls = []
+    try:
+      with contextlib.closing(
+          urllib2.urlopen(self._DEVICE_LIST_URL)) as device_list:
+        json_urls = device_list.read()
+        device_urls = json.loads(json_urls)
+        if not device_urls:
+          logging.debug('No iOS devices found. Will not try searching for iOS '
+                        'browsers.')
+          return []
+    except urllib2.URLError as e:
+      logging.debug('Error communicating with iOS device.')
+      logging.debug(str(e))
+      return []
+    return device_urls
+
+  def GetWebSocketDebuggerUrls(self, device_urls):
+    """ Get a list of the websocket debugger URLs to communicate with
+        all running UIWebViews.
+    """
+    data = []
+    # Loop through all devices.
+    for d in device_urls:
+      def GetData():
+        try:
+          with contextlib.closing(
+              # pylint: disable=cell-var-from-loop
+              urllib2.urlopen('http://%s/json' % d['url'])) as f:
+            json_result = f.read()
+            data = json.loads(json_result)
+            return data
+        except urllib2.URLError as e:
+          logging.debug('Error communicating with iOS device.')
+          logging.debug(e)
+          return False
+      try:
+        # Retry a few times since it can take a few seconds for this API to be
+        # ready, if ios_webkit_debug_proxy is just launched.
+        data = util.WaitFor(GetData, 5)
+      except exceptions.TimeoutException as e:
+        logging.debug('Timeout retrieving data from iOS device')
+        logging.debug(e)
+        return []
+
+    # Find all running UIWebViews.
+    debug_urls = []
+    for j in data:
+      debug_urls.append(j['webSocketDebuggerUrl'])
+
+    return debug_urls
+
+  def GetSystemInfo(self):
+    if self._system_info_backend is None:
+      self._system_info_backend = system_info_backend.SystemInfoBackend(
+          self._port, self._page)
+    return self._system_info_backend.GetSystemInfo()
+
+  def IsBrowserRunning(self):
+    return bool(self._webviews)
+
+  #TODO(baxley): The following were stubbed out to get the sunspider benchmark
+  # running. These should be implemented.
+  @property
+  def browser_directory(self):
+    logging.warn('Not implemented')
+    return None
+
+  @property
+  def profile_directory(self):
+    logging.warn('Not implemented')
+    return None
+
+  def Start(self):
+    logging.warn('Not implemented')
+
+  def extension_backend(self):
+    logging.warn('Not implemented')
+    return None
+
+  def GetBrowserStartupArgs(self):
+    logging.warn('Not implemented')
+    return None
+
+  def HasBrowserFinishedLaunching(self):
+    logging.warn('Not implemented')
+    return False
+
+  def GetStandardOutput(self):
+    raise NotImplementedError()
+
+  def GetStackTrace(self):
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder.py b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder.py
new file mode 100644
index 0000000..319c2c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder.py
@@ -0,0 +1,115 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds iOS browsers that can be controlled by telemetry."""
+
+import logging
+import re
+
+from telemetry.core import platform
+from telemetry.internal.backends.chrome import ios_browser_backend
+from telemetry.internal.backends.chrome_inspector import inspector_backend
+from telemetry.internal.browser import browser
+from telemetry.internal.browser import possible_browser
+from telemetry.internal.platform import ios_device
+from telemetry.internal.platform import ios_platform_backend
+
+
+# Key matches output from ios-webkit-debug-proxy and the value is a readable
+# description of the browser.
+IOS_BROWSERS = {'CriOS': 'ios-chrome', 'Version': 'ios-safari'}
+DEVICE_LIST_URL = 'http://127.0.0.1:9221/json'
+IOS_WEBKIT_DEBUG_PROXY = 'ios_webkit_debug_proxy'
+
+
+class PossibleIOSBrowser(possible_browser.PossibleBrowser):
+
+  """A running iOS browser instance."""
+  def __init__(self, browser_type, _):
+    super(PossibleIOSBrowser, self).__init__(browser_type, 'ios', True)
+
+  # TODO(baxley): Implement the following methods for iOS.
+  def Create(self, finder_options):
+    browser_backend = ios_browser_backend.IosBrowserBackend(
+        self._platform_backend, finder_options.browser_options)
+    return browser.Browser(
+        browser_backend, self._platform_backend, self._credentials_path)
+
+  def SupportsOptions(self, finder_options):
+    #TODO(baxley): Implement me.
+    return True
+
+  def UpdateExecutableIfNeeded(self):
+    #TODO(baxley): Implement me.
+    pass
+
+  def _InitPlatformIfNeeded(self):
+    if self._platform:
+      return
+
+    self._platform_backend = ios_platform_backend.IosPlatformBackend()
+    self._platform = platform.Platform(self._platform_backend)
+
+def SelectDefaultBrowser(_):
+  return None  # TODO(baxley): Implement me.
+
+
+def CanFindAvailableBrowsers():
+  # TODO(baxley): Add support for all platforms possible. Probably Linux,
+  # probably not Windows.
+  return platform.GetHostPlatform().GetOSName() == 'mac'
+
+
+def FindAllBrowserTypes(_):
+  return IOS_BROWSERS.values()
+
+
+def FindAllAvailableBrowsers(finder_options, device):
+  """Find all running iOS browsers on connected devices."""
+  if not isinstance(device, ios_device.IOSDevice):
+    return []
+
+  if not CanFindAvailableBrowsers():
+    return []
+
+  options = finder_options.browser_options
+
+  options.browser_type = 'ios-chrome'
+  host = platform.GetHostPlatform()
+  backend = ios_browser_backend.IosBrowserBackend(host, options)
+  # TODO(baxley): Use idevice to wake up device or log debug statement.
+  if not host.IsApplicationRunning(IOS_WEBKIT_DEBUG_PROXY):
+    host.LaunchApplication(IOS_WEBKIT_DEBUG_PROXY)
+    if not host.IsApplicationRunning(IOS_WEBKIT_DEBUG_PROXY):
+      return []
+
+  device_urls = backend.GetDeviceUrls()
+  if not device_urls:
+    logging.debug('Could not find any devices over %s.'
+                  % IOS_WEBKIT_DEBUG_PROXY)
+    return []
+
+  debug_urls = backend.GetWebSocketDebuggerUrls(device_urls)
+
+  # Get the userAgent for each UIWebView to find the browsers.
+  browser_pattern = (r'\)\s(%s)\/(\d+[\.\d]*)\sMobile'
+                     % '|'.join(IOS_BROWSERS.keys()))
+  browser_types = set()
+  for url in debug_urls:
+    context = {'webSocketDebuggerUrl': url, 'id': 1}
+    try:
+      inspector = inspector_backend.InspectorBackend(
+          backend.app, backend.devtools_client, context)
+      res = inspector.EvaluateJavaScript("navigator.userAgent")
+    finally:
+      inspector.Disconnect()
+    match_browsers = re.search(browser_pattern, res)
+    if match_browsers:
+      browser_types.add(match_browsers.group(1))
+
+  browsers = []
+  for browser_type in browser_types:
+    browsers.append(PossibleIOSBrowser(IOS_BROWSERS[browser_type],
+                                       finder_options))
+  return list(browsers)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder_unittest.py
new file mode 100644
index 0000000..b5b6756
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/ios_browser_finder_unittest.py
@@ -0,0 +1,26 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.backends.chrome import ios_browser_finder
+from telemetry.internal.browser import browser_options
+from telemetry.internal.platform import ios_device
+
+
+class IosBrowserFinderUnitTest(unittest.TestCase):
+  # TODO(baxley): Currently the tests require a device with Chrome running.
+  # This should be stubbed out so it runs on any system, with no device
+  # dependencies.
+  @decorators.Enabled('ios')
+  def testFindIosChrome(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    browsers = ios_browser_finder.FindAllAvailableBrowsers(
+      finder_options, ios_device.IOSDevice())
+    self.assertTrue(browsers)
+    for browser in browsers:
+      self.assertEqual('ios-chrome', browser.browser_type)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/misc_web_contents_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/misc_web_contents_backend.py
new file mode 100644
index 0000000..c2babbd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/misc_web_contents_backend.py
@@ -0,0 +1,36 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry.internal.backends.chrome import oobe
+from telemetry.internal.backends.chrome_inspector import inspector_backend_list
+
+
+class MiscWebContentsBackend(inspector_backend_list.InspectorBackendList):
+  """A dynamic sequence of web contents not related to tabs and extensions.
+
+  Provides acccess to chrome://oobe/login page.
+  """
+
+  def __init__(self, browser_backend):
+    super(MiscWebContentsBackend, self).__init__(browser_backend)
+
+  @property
+  def oobe_exists(self):
+    """Lightweight property to determine if the oobe webui is visible."""
+    try:
+      return bool(len(self))
+    except exceptions.Error:
+      return False
+
+  def GetOobe(self):
+    if not len(self):
+      return None
+    return self[0]
+
+  def ShouldIncludeContext(self, context):
+    return context.get('url').startswith('chrome://oobe')
+
+  def CreateWrapper(self, inspector_backend):
+    return oobe.Oobe(inspector_backend)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/oobe.py b/catapult/telemetry/telemetry/internal/backends/chrome/oobe.py
new file mode 100644
index 0000000..381ed64
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/oobe.py
@@ -0,0 +1,117 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from functools import partial
+import logging
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.browser import web_contents
+
+
+class Oobe(web_contents.WebContents):
+  def __init__(self, inspector_backend):
+    super(Oobe, self).__init__(inspector_backend)
+
+  def _GaiaIFrameContext(self):
+    max_context_id = self.EnableAllContexts()
+    logging.debug('%d contexts in Gaia page' % max_context_id)
+    for gaia_iframe_context in range(max_context_id + 1):
+      try:
+        if self.EvaluateJavaScriptInContext(
+            "document.readyState == 'complete' && "
+            "document.getElementById('Email') != null",
+            gaia_iframe_context):
+          return gaia_iframe_context
+      except exceptions.EvaluateException:
+        pass
+    return None
+
+  def _GaiaWebviewContext(self):
+    webview_contexts = self.GetWebviewContexts()
+    if webview_contexts:
+      return webview_contexts[0]
+    return None
+
+  def _ExecuteOobeApi(self, api, *args):
+    logging.info('Invoking %s' % api)
+    self.WaitForJavaScriptExpression("typeof Oobe == 'function'", 20)
+
+    if self.EvaluateJavaScript("typeof %s == 'undefined'" % api):
+      raise exceptions.LoginException('%s js api missing' % api)
+
+    js = api + '(' + ("'%s'," * len(args)).rstrip(',') + ');'
+    self.ExecuteJavaScript(js % args)
+
+  def NavigateGuestLogin(self):
+    """Logs in as guest."""
+    self._ExecuteOobeApi('Oobe.guestLoginForTesting')
+
+  def NavigateFakeLogin(self, username, password, gaia_id):
+    """Fake user login."""
+    self._ExecuteOobeApi('Oobe.loginForTesting', username, password, gaia_id)
+
+  def NavigateGaiaLogin(self, username, password,
+                        enterprise_enroll=False,
+                        for_user_triggered_enrollment=False):
+    """Logs in using the GAIA webview or IFrame, whichever is
+    present. |enterprise_enroll| allows for enterprise enrollment.
+    |for_user_triggered_enrollment| should be False for remora enrollment."""
+    self._ExecuteOobeApi('Oobe.skipToLoginForTesting')
+    if for_user_triggered_enrollment:
+      self._ExecuteOobeApi('Oobe.switchToEnterpriseEnrollmentForTesting')
+
+    self._NavigateGaiaLogin(username, password, enterprise_enroll)
+
+    if enterprise_enroll:
+      self.WaitForJavaScriptExpression('Oobe.isEnrollmentSuccessfulForTest()',
+                                       30)
+      self._ExecuteOobeApi('Oobe.enterpriseEnrollmentDone')
+
+  def _NavigateGaiaLogin(self, username, password, enterprise_enroll):
+    """Invokes NavigateIFrameLogin or NavigateWebViewLogin as appropriate."""
+    def _GetGaiaFunction():
+      if self._GaiaWebviewContext() is not None:
+        return partial(Oobe._NavigateWebViewLogin,
+                       wait_for_close=not enterprise_enroll)
+      elif self._GaiaIFrameContext() is not None:
+        return partial(Oobe._NavigateIFrameLogin,
+                       add_user_for_testing=not enterprise_enroll)
+      return None
+    util.WaitFor(_GetGaiaFunction, 20)(self, username, password)
+
+  def _NavigateIFrameLogin(self, username, password, add_user_for_testing):
+    """Logs into the IFrame-based GAIA screen"""
+    gaia_iframe_context = util.WaitFor(self._GaiaIFrameContext, timeout=30)
+
+    if add_user_for_testing:
+      self._ExecuteOobeApi('Oobe.showAddUserForTesting')
+    self.ExecuteJavaScriptInContext("""
+        document.getElementById('Email').value='%s';
+        document.getElementById('Passwd').value='%s';
+        document.getElementById('signIn').click();"""
+            % (username, password),
+        gaia_iframe_context)
+
+  def _NavigateWebViewLogin(self, username, password, wait_for_close):
+    """Logs into the webview-based GAIA screen"""
+    self._NavigateWebViewEntry('identifierId', username, 'identifierNext')
+    self._NavigateWebViewEntry('password', password, 'next')
+    if wait_for_close:
+      util.WaitFor(lambda: not self._GaiaWebviewContext(), 20)
+
+  def _NavigateWebViewEntry(self, field, value, nextField):
+    self._WaitForField(field)
+    self._WaitForField(nextField)
+    gaia_webview_context = self._GaiaWebviewContext()
+    gaia_webview_context.EvaluateJavaScript("""
+       document.getElementById('%s').value='%s';
+       document.getElementById('%s').click()"""
+           % (field, value, nextField))
+
+  def _WaitForField(self, field_id):
+    gaia_webview_context = util.WaitFor(self._GaiaWebviewContext, 5)
+    util.WaitFor(gaia_webview_context.HasReachedQuiescence, 20)
+    gaia_webview_context.WaitForJavaScriptExpression(
+        "document.getElementById('%s') != null" % field_id, 20)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/system_info_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/system_info_backend.py
new file mode 100644
index 0000000..72dbd3b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/system_info_backend.py
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.platform import system_info
+from telemetry.internal.util import camel_case
+
+
+class SystemInfoBackend(object):
+  def __init__(self, devtools_port, devtools_page=None):
+    self._port = devtools_port
+    self._page = devtools_page
+
+  def GetSystemInfo(self, timeout=10):
+    req = {'method': 'SystemInfo.getInfo'}
+    websocket = inspector_websocket.InspectorWebsocket()
+    try:
+      if self._page:
+        websocket.Connect('ws://127.0.0.1:%i/devtools/page/%i' %
+                          (self._port, self._page))
+      else:
+        websocket.Connect('ws://127.0.0.1:%i/devtools/browser' % self._port)
+      res = websocket.SyncRequest(req, timeout)
+    finally:
+      websocket.Disconnect()
+    if 'error' in res:
+      return None
+    return system_info.SystemInfo.FromDict(
+        camel_case.ToUnderscore(res['result']))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend.py
new file mode 100644
index 0000000..61c75ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend.py
@@ -0,0 +1,111 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.backends.chrome_inspector import inspector_backend_list
+from telemetry.internal.browser import tab
+
+
+class TabUnexpectedResponseException(exceptions.DevtoolsTargetCrashException):
+  pass
+
+
+class TabListBackend(inspector_backend_list.InspectorBackendList):
+  """A dynamic sequence of tab.Tabs in UI order."""
+
+  def __init__(self, browser_backend):
+    super(TabListBackend, self).__init__(browser_backend)
+
+  def New(self, timeout):
+    """Makes a new tab.
+
+    Returns:
+      A Tab object.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    if not self._browser_backend.supports_tab_control:
+      raise NotImplementedError("Browser doesn't support tab control.")
+    response = self._browser_backend.devtools_client.RequestNewTab(timeout)
+    try:
+      response = json.loads(response)
+      context_id = response['id']
+    except (KeyError, ValueError):
+      raise TabUnexpectedResponseException(
+          app=self._browser_backend.browser,
+          msg='Received response: %s' % response)
+    return self.GetBackendFromContextId(context_id)
+
+  def CloseTab(self, tab_id, timeout=300):
+    """Closes the tab with the given debugger_url.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      devtools_client_backend.TabNotFoundError
+      TabUnexpectedResponseException
+      exceptions.TimeoutException
+    """
+    assert self._browser_backend.supports_tab_control
+    # TODO(dtu): crbug.com/160946, allow closing the last tab on some platforms.
+    # For now, just create a new tab before closing the last tab.
+    if len(self) <= 1:
+      self.New(timeout)
+
+    response = self._browser_backend.devtools_client.CloseTab(tab_id, timeout)
+
+    if response != 'Target is closing':
+      raise TabUnexpectedResponseException(
+          app=self._browser_backend.browser,
+          msg='Received response: %s' % response)
+
+    util.WaitFor(lambda: tab_id not in self.IterContextIds(), timeout=5)
+
+  def ActivateTab(self, tab_id, timeout=30):
+    """Activates the tab with the given debugger_url.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      devtools_client_backend.TabNotFoundError
+      TabUnexpectedResponseException
+    """
+    assert self._browser_backend.supports_tab_control
+
+    response = self._browser_backend.devtools_client.ActivateTab(tab_id,
+                                                                 timeout)
+
+    if response != 'Target activated':
+      raise TabUnexpectedResponseException(
+          app=self._browser_backend.browser,
+          msg='Received response: %s' % response)
+
+  def Get(self, index, ret):
+    """Returns self[index] if it exists, or ret if index is out of bounds."""
+    if len(self) <= index:
+      return ret
+    return self[index]
+
+  def ShouldIncludeContext(self, context):
+    if 'type' in context:
+      return (context['type'] == 'page' or
+              context['url'] == 'chrome://media-router/')
+    # TODO: For compatibility with Chrome before r177683.
+    # This check is not completely correct, see crbug.com/190592.
+    return not context['url'].startswith('chrome-extension://')
+
+  def CreateWrapper(self, inspector_backend):
+    return tab.Tab(inspector_backend, self, self._browser_backend.browser)
+
+  def _HandleDevToolsConnectionError(self, error):
+    if not self._browser_backend.IsAppRunning():
+      error.AddDebuggingMessage('The browser is not running. It probably '
+                                'crashed.')
+    elif not self._browser_backend.HasBrowserFinishedLaunching():
+      error.AddDebuggingMessage('The browser exists but cannot be reached.')
+    else:
+      error.AddDebuggingMessage('The browser exists and can be reached. '
+                                'The devtools target probably crashed.')
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend_unittest.py
new file mode 100644
index 0000000..b05b9cd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome/tab_list_backend_unittest.py
@@ -0,0 +1,49 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.testing import tab_test_case
+
+
+class TabListBackendTest(tab_test_case.TabTestCase):
+  @decorators.Enabled('has tabs')
+  def testNewTab(self):
+    tabs = set(tab.id for tab in self.tabs)
+    for _ in xrange(10):
+      new_tab_id = self.tabs.New().id
+      self.assertNotIn(new_tab_id, tabs)
+      tabs.add(new_tab_id)
+      new_tabs = set(tab.id for tab in self.tabs)
+      self.assertEqual(tabs, new_tabs)
+
+  @decorators.Enabled('has tabs')
+  def testTabIdMatchesContextId(self):
+    # Ensure that there are two tabs.
+    while len(self.tabs) < 2:
+      self.tabs.New()
+
+    # Check that the tab.id matches context_id.
+    tabs = []
+    for context_id in self.tabs._tab_list_backend.IterContextIds():
+      tab = self.tabs.GetTabById(context_id)
+      self.assertEquals(tab.id, context_id)
+      tabs.append(self.tabs.GetTabById(context_id))
+
+  @decorators.Enabled('has tabs')
+  def testTabIdStableAfterTabCrash(self):
+    # Ensure that there are two tabs.
+    while len(self.tabs) < 2:
+      self.tabs.New()
+
+    tabs = [t for t in self.tabs]
+
+    # Crash the first tab.
+    self.assertRaises(exceptions.DevtoolsTargetCrashException,
+        lambda: tabs[0].Navigate('chrome://crash'))
+
+    # Fetching the second tab by id should still work. Fetching the first tab
+    # should raise an exception.
+    self.assertEquals(tabs[1], self.tabs.GetTabById(tabs[1].id))
+    self.assertRaises(KeyError, lambda: self.tabs.GetTabById(tabs[0].id))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/__init__.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/__init__.py
new file mode 100644
index 0000000..83c375c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/__init__.py
@@ -0,0 +1,6 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""This package contains classes and methods for controlling the chrome
+devtool.
+"""
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py
new file mode 100644
index 0000000..123629e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend.py
@@ -0,0 +1,475 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import re
+import socket
+import sys
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.backends import browser_backend
+from telemetry.internal.backends.chrome_inspector import devtools_http
+from telemetry.internal.backends.chrome_inspector import inspector_backend
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import memory_backend
+from telemetry.internal.backends.chrome_inspector import tracing_backend
+from telemetry.internal.backends.chrome_inspector import websocket
+from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
+from telemetry.internal.platform.tracing_agent import (
+    chrome_tracing_devtools_manager)
+from telemetry.timeline import trace_data as trace_data_module
+
+
+BROWSER_INSPECTOR_WEBSOCKET_URL = 'ws://127.0.0.1:%i/devtools/browser'
+
+
+class TabNotFoundError(exceptions.Error):
+  pass
+
+
+def IsDevToolsAgentAvailable(port, app_backend):
+  """Returns True if a DevTools agent is available on the given port."""
+  if (isinstance(app_backend, browser_backend.BrowserBackend) and
+      app_backend.supports_tracing):
+    inspector_websocket_instance = inspector_websocket.InspectorWebsocket()
+    try:
+      if not _IsInspectorWebsocketAvailable(inspector_websocket_instance, port):
+        return False
+    finally:
+      inspector_websocket_instance.Disconnect()
+
+  devtools_http_instance = devtools_http.DevToolsHttp(port)
+  try:
+    return _IsDevToolsAgentAvailable(devtools_http_instance)
+  finally:
+    devtools_http_instance.Disconnect()
+
+
+def _IsInspectorWebsocketAvailable(inspector_websocket_instance, port):
+  try:
+    inspector_websocket_instance.Connect(BROWSER_INSPECTOR_WEBSOCKET_URL % port)
+  except websocket.WebSocketException:
+    return False
+  except socket.error:
+    return False
+  except Exception as e:
+    sys.stderr.write('Unidentified exception while checking if wesocket is'
+                     'available on port %i. Exception message: %s\n' %
+                     (port, e.message))
+    return False
+  else:
+    return True
+
+
+# TODO(nednguyen): Find a more reliable way to check whether the devtool agent
+# is still alive.
+def _IsDevToolsAgentAvailable(devtools_http_instance):
+  try:
+    devtools_http_instance.Request('')
+  except devtools_http.DevToolsClientConnectionError:
+    return False
+  else:
+    return True
+
+
+class DevToolsClientBackend(object):
+  """An object that communicates with Chrome's devtools.
+
+  This class owns a map of InspectorBackends. It is responsible for creating
+  them and destroying them.
+  """
+  def __init__(self, devtools_port, remote_devtools_port, app_backend):
+    """Creates a new DevToolsClientBackend.
+
+    A DevTools agent must exist on the given devtools_port.
+
+    Args:
+      devtools_port: The port to use to connect to DevTools agent.
+      remote_devtools_port: In some cases (e.g., app running on
+          Android device, devtools_port is the forwarded port on the
+          host platform. We also need to know the remote_devtools_port
+          so that we can uniquely identify the DevTools agent.
+      app_backend: For the app that contains the DevTools agent.
+    """
+    self._devtools_port = devtools_port
+    self._remote_devtools_port = remote_devtools_port
+    self._devtools_http = devtools_http.DevToolsHttp(devtools_port)
+    self._browser_inspector_websocket = None
+    self._tracing_backend = None
+    self._memory_backend = None
+    self._app_backend = app_backend
+    self._devtools_context_map_backend = _DevToolsContextMapBackend(
+        self._app_backend, self)
+
+    if not self.supports_tracing:
+      return
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        self, self._app_backend.platform_backend)
+
+    # Telemetry has started Chrome tracing if there is trace config, so start
+    # tracing on this newly created devtools client if needed.
+    trace_config = (self._app_backend.platform_backend
+                    .tracing_controller_backend.GetChromeTraceConfig())
+    if not trace_config:
+      self._CreateTracingBackendIfNeeded(is_tracing_running=False)
+      return
+
+    if self.support_startup_tracing:
+      self._CreateTracingBackendIfNeeded(is_tracing_running=True)
+      return
+
+    self._CreateTracingBackendIfNeeded(is_tracing_running=False)
+    self.StartChromeTracing(
+        trace_config=trace_config,
+        custom_categories=trace_config.tracing_category_filter.filter_string)
+
+  @property
+  def remote_port(self):
+    return self._remote_devtools_port
+
+  @property
+  def supports_tracing(self):
+    if not isinstance(self._app_backend, browser_backend.BrowserBackend):
+      return False
+    return self._app_backend.supports_tracing
+
+  @property
+  def supports_overriding_memory_pressure_notifications(self):
+    if not isinstance(self._app_backend, browser_backend.BrowserBackend):
+      return False
+    return self._app_backend.supports_overriding_memory_pressure_notifications
+
+
+  @property
+  def is_tracing_running(self):
+    if not self.supports_tracing:
+      return False
+    if not self._tracing_backend:
+      return False
+    return self._tracing_backend.is_tracing_running
+
+  @property
+  def support_startup_tracing(self):
+    # Startup tracing with --trace-config-file flag was not supported until
+    # Chromium branch number 2512 (see crrev.com/1309243004 and
+    # crrev.com/1353583002).
+    if not chrome_tracing_agent.ChromeTracingAgent.IsStartupTracingSupported(
+        self._app_backend.platform_backend):
+      return False
+    # TODO(zhenw): Remove this once stable Chrome and reference browser have
+    # passed 2512.
+    return self.GetChromeBranchNumber() >= 2512
+
+  def IsAlive(self):
+    """Whether the DevTools server is available and connectable."""
+    return (self._devtools_http and
+        _IsDevToolsAgentAvailable(self._devtools_http))
+
+  def Close(self):
+    if self._tracing_backend:
+      self._tracing_backend.Close()
+      self._tracing_backend = None
+    if self._memory_backend:
+      self._memory_backend.Close()
+      self._memory_backend = None
+
+    if self._devtools_context_map_backend:
+      self._devtools_context_map_backend.Clear()
+
+    # Close the browser inspector socket last (in case the backend needs to
+    # interact with it before closing).
+    if self._browser_inspector_websocket:
+      self._browser_inspector_websocket.Disconnect()
+      self._browser_inspector_websocket = None
+
+    assert self._devtools_http
+    self._devtools_http.Disconnect()
+    self._devtools_http = None
+
+
+  @decorators.Cache
+  def GetChromeBranchNumber(self):
+    # Detect version information.
+    resp = self._devtools_http.RequestJson('version')
+    if 'Protocol-Version' in resp:
+      if 'Browser' in resp:
+        branch_number_match = re.search(r'Chrome/\d+\.\d+\.(\d+)\.\d+',
+                                        resp['Browser'])
+      else:
+        branch_number_match = re.search(
+            r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
+            resp['User-Agent'])
+
+      if branch_number_match:
+        branch_number = int(branch_number_match.group(1))
+        if branch_number:
+          return branch_number
+
+    # Branch number can't be determined, so fail any branch number checks.
+    return 0
+
+  def _ListInspectableContexts(self):
+    return self._devtools_http.RequestJson('')
+
+  def RequestNewTab(self, timeout):
+    """Creates a new tab.
+
+    Returns:
+      A JSON string as returned by DevTools. Example:
+      {
+        "description": "",
+        "devtoolsFrontendUrl":
+            "/devtools/inspector.html?ws=host:port/devtools/page/id-string",
+        "id": "id-string",
+        "title": "Page Title",
+        "type": "page",
+        "url": "url",
+        "webSocketDebuggerUrl": "ws://host:port/devtools/page/id-string"
+      }
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    return self._devtools_http.Request('new', timeout=timeout)
+
+  def CloseTab(self, tab_id, timeout):
+    """Closes the tab with the given id.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      TabNotFoundError
+    """
+    try:
+      return self._devtools_http.Request('close/%s' % tab_id,
+                                         timeout=timeout)
+    except devtools_http.DevToolsClientUrlError:
+      error = TabNotFoundError(
+          'Unable to close tab, tab id not found: %s' % tab_id)
+      raise error, None, sys.exc_info()[2]
+
+  def ActivateTab(self, tab_id, timeout):
+    """Activates the tab with the given id.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      TabNotFoundError
+    """
+    try:
+      return self._devtools_http.Request('activate/%s' % tab_id,
+                                         timeout=timeout)
+    except devtools_http.DevToolsClientUrlError:
+      error = TabNotFoundError(
+          'Unable to activate tab, tab id not found: %s' % tab_id)
+      raise error, None, sys.exc_info()[2]
+
+  def GetUrl(self, tab_id):
+    """Returns the URL of the tab with |tab_id|, as reported by devtools.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    for c in self._ListInspectableContexts():
+      if c['id'] == tab_id:
+        return c['url']
+    return None
+
+  def IsInspectable(self, tab_id):
+    """Whether the tab with |tab_id| is inspectable, as reported by devtools.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    contexts = self._ListInspectableContexts()
+    return tab_id in [c['id'] for c in contexts]
+
+  def GetUpdatedInspectableContexts(self):
+    """Returns an updated instance of _DevToolsContextMapBackend."""
+    contexts = self._ListInspectableContexts()
+    self._devtools_context_map_backend._Update(contexts)
+    return self._devtools_context_map_backend
+
+  def _CreateTracingBackendIfNeeded(self, is_tracing_running=False):
+    assert self.supports_tracing
+    if not self._tracing_backend:
+      self._CreateAndConnectBrowserInspectorWebsocketIfNeeded()
+      self._tracing_backend = tracing_backend.TracingBackend(
+          self._browser_inspector_websocket, is_tracing_running)
+
+  def _CreateMemoryBackendIfNeeded(self):
+    assert self.supports_overriding_memory_pressure_notifications
+    if not self._memory_backend:
+      self._CreateAndConnectBrowserInspectorWebsocketIfNeeded()
+      self._memory_backend = memory_backend.MemoryBackend(
+          self._browser_inspector_websocket)
+
+  def _CreateAndConnectBrowserInspectorWebsocketIfNeeded(self):
+    if not self._browser_inspector_websocket:
+      self._browser_inspector_websocket = (
+          inspector_websocket.InspectorWebsocket())
+      self._browser_inspector_websocket.Connect(
+          BROWSER_INSPECTOR_WEBSOCKET_URL % self._devtools_port)
+
+  def IsChromeTracingSupported(self):
+    if not self.supports_tracing:
+      return False
+    self._CreateTracingBackendIfNeeded()
+    return self._tracing_backend.IsTracingSupported()
+
+  def StartChromeTracing(
+      self, trace_config, custom_categories=None, timeout=10):
+    """
+    Args:
+        trace_config: An tracing_config.TracingConfig instance.
+        custom_categories: An optional string containing a list of
+                         comma separated categories that will be traced
+                         instead of the default category set.  Example: use
+                         "webkit,cc,disabled-by-default-cc.debug" to trace only
+                         those three event categories.
+    """
+    assert trace_config and trace_config.enable_chrome_trace
+    self._CreateTracingBackendIfNeeded()
+    return self._tracing_backend.StartTracing(
+        trace_config, custom_categories, timeout)
+
+  def StopChromeTracing(self, trace_data_builder, timeout=30):
+    assert self.is_tracing_running
+    try:
+      context_map = self.GetUpdatedInspectableContexts()
+      for context in context_map.contexts:
+        if context['type'] not in ['iframe', 'page', 'webview']:
+          continue
+        context_id = context['id']
+        backend = context_map.GetInspectorBackend(context_id)
+        success = backend.EvaluateJavaScript(
+            "console.time('" + backend.id + "');" +
+            "console.timeEnd('" + backend.id + "');" +
+            "console.time.toString().indexOf('[native code]') != -1;")
+        if not success:
+          raise Exception('Page stomped on console.time')
+        trace_data_builder.AddEventsTo(
+            trace_data_module.TAB_ID_PART, [backend.id])
+    finally:
+      self._tracing_backend.StopTracing(trace_data_builder, timeout)
+
+  def DumpMemory(self, timeout=30):
+    """Dumps memory.
+
+    Returns:
+      GUID of the generated dump if successful, None otherwise.
+
+    Raises:
+      TracingTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      TracingUnrecoverableException: If there is a websocket error.
+      TracingUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    self._CreateTracingBackendIfNeeded()
+    return self._tracing_backend.DumpMemory(timeout)
+
+  def SetMemoryPressureNotificationsSuppressed(self, suppressed, timeout=30):
+    """Enable/disable suppressing memory pressure notifications.
+
+    Args:
+      suppressed: If true, memory pressure notifications will be suppressed.
+      timeout: The timeout in seconds.
+
+    Raises:
+      MemoryTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      MemoryUnrecoverableException: If there is a websocket error.
+      MemoryUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    self._CreateMemoryBackendIfNeeded()
+    return self._memory_backend.SetMemoryPressureNotificationsSuppressed(
+        suppressed, timeout)
+
+  def SimulateMemoryPressureNotification(self, pressure_level, timeout=30):
+    """Simulate a memory pressure notification.
+
+    Args:
+      pressure level: The memory pressure level of the notification ('moderate'
+      or 'critical').
+      timeout: The timeout in seconds.
+
+    Raises:
+      MemoryTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      MemoryUnrecoverableException: If there is a websocket error.
+      MemoryUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    self._CreateMemoryBackendIfNeeded()
+    return self._memory_backend.SimulateMemoryPressureNotification(
+        pressure_level, timeout)
+
+
+class _DevToolsContextMapBackend(object):
+  def __init__(self, app_backend, devtools_client):
+    self._app_backend = app_backend
+    self._devtools_client = devtools_client
+    self._contexts = None
+    self._inspector_backends_dict = {}
+
+  @property
+  def contexts(self):
+    """The most up to date contexts data.
+
+    Returned in the order returned by devtools agent."""
+    return self._contexts
+
+  def GetContextInfo(self, context_id):
+    for context in self._contexts:
+      if context['id'] == context_id:
+        return context
+    raise KeyError('Cannot find a context with id=%s' % context_id)
+
+  def GetInspectorBackend(self, context_id):
+    """Gets an InspectorBackend instance for the given context_id.
+
+    This lazily creates InspectorBackend for the context_id if it does
+    not exist yet. Otherwise, it will return the cached instance."""
+    if context_id in self._inspector_backends_dict:
+      return self._inspector_backends_dict[context_id]
+
+    for context in self._contexts:
+      if context['id'] == context_id:
+        new_backend = inspector_backend.InspectorBackend(
+            self._app_backend.app, self._devtools_client, context)
+        self._inspector_backends_dict[context_id] = new_backend
+        return new_backend
+
+    raise KeyError('Cannot find a context with id=%s' % context_id)
+
+  def _Update(self, contexts):
+    # Remove InspectorBackend that is not in the current inspectable
+    # contexts list.
+    context_ids = [context['id'] for context in contexts]
+    for context_id in self._inspector_backends_dict.keys():
+      if context_id not in context_ids:
+        backend = self._inspector_backends_dict[context_id]
+        backend.Disconnect()
+        del self._inspector_backends_dict[context_id]
+
+    valid_contexts = []
+    for context in contexts:
+      # If the context does not have webSocketDebuggerUrl, skip it.
+      # If an InspectorBackend is already created for the tab,
+      # webSocketDebuggerUrl will be missing, and this is expected.
+      context_id = context['id']
+      if context_id not in self._inspector_backends_dict:
+        if 'webSocketDebuggerUrl' not in context:
+          logging.debug('webSocketDebuggerUrl missing, removing %s'
+                        % context_id)
+          continue
+      valid_contexts.append(context)
+    self._contexts = valid_contexts
+
+  def Clear(self):
+    for backend in self._inspector_backends_dict.values():
+      backend.Disconnect()
+    self._inspector_backends_dict = {}
+    self._contexts = None
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend_unittest.py
new file mode 100644
index 0000000..7efe1fd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_client_backend_unittest.py
@@ -0,0 +1,92 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.testing import browser_test_case
+from telemetry.timeline import model
+from telemetry.timeline import trace_data
+from telemetry.timeline import tracing_config
+
+
+class DevToolsClientBackendTest(browser_test_case.BrowserTestCase):
+  @property
+  def _browser_backend(self):
+    return self._browser._browser_backend
+
+  @property
+  def _devtools_client(self):
+    return self._browser_backend.devtools_client
+
+  def testGetChromeBranchNumber(self):
+    branch_num = self._devtools_client.GetChromeBranchNumber()
+    self.assertIsInstance(branch_num, int)
+    self.assertGreater(branch_num, 0)
+
+  def testIsAlive(self):
+    self.assertTrue(self._devtools_client.IsAlive())
+
+  @decorators.Enabled('has tabs')
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testGetUpdatedInspectableContexts(self):
+    self._browser.tabs.New()
+    c1 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c1.contexts), 2)
+    backends1 = [c1.GetInspectorBackend(c['id']) for c in c1.contexts]
+    tabs1 = list(self._browser.tabs)
+
+    c2 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c2.contexts), 2)
+    backends2 = [c2.GetInspectorBackend(c['id']) for c in c2.contexts]
+    tabs2 = list(self._browser.tabs)
+    self.assertEqual(backends2, backends1)
+    self.assertEqual(tabs2, tabs1)
+
+    self._browser.tabs.New()
+    c3 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c3.contexts), 3)
+    backends3 = [c3.GetInspectorBackend(c['id']) for c in c3.contexts]
+    tabs3 = list(self._browser.tabs)
+    self.assertEqual(backends3[1], backends1[0])
+    self.assertEqual(backends3[2], backends1[1])
+    self.assertEqual(tabs3[0], tabs1[0])
+    self.assertEqual(tabs3[1], tabs1[1])
+
+    self._browser.tabs[1].Close()
+    c4 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c4.contexts), 2)
+    backends4 = [c4.GetInspectorBackend(c['id']) for c in c4.contexts]
+    tabs4 = list(self._browser.tabs)
+    self.assertEqual(backends4[0], backends3[0])
+    self.assertEqual(backends4[1], backends3[1])
+    self.assertEqual(tabs4[0], tabs3[0])
+    self.assertEqual(tabs4[1], tabs3[2])
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testGetUpdatedInspectableContextsUpdateContextsData(self):
+    c1 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c1.contexts), 1)
+    self.assertEqual(c1.contexts[0]['url'], 'about:blank')
+
+    context_id = c1.contexts[0]['id']
+    backend = c1.GetInspectorBackend(context_id)
+    backend.Navigate(self.UrlOfUnittestFile('blank.html'), None, 10)
+    c2 = self._devtools_client.GetUpdatedInspectableContexts()
+    self.assertEqual(len(c2.contexts), 1)
+    self.assertTrue('blank.html' in c2.contexts[0]['url'])
+    self.assertEqual(c2.GetInspectorBackend(context_id), backend)
+
+  def testTracing(self):
+    devtools_client = self._devtools_client
+    if not devtools_client.IsChromeTracingSupported():
+      self.skipTest('Browser does not support tracing, skipping test.')
+
+    # Start Chrome tracing.
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    devtools_client.StartChromeTracing(config)
+
+    # Stop Chrome tracing and check that the resulting data is valid.
+    builder = trace_data.TraceDataBuilder()
+    devtools_client.StopChromeTracing(builder)
+    model.TimelineModel(builder.AsData())
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http.py
new file mode 100644
index 0000000..fecd768
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http.py
@@ -0,0 +1,107 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import errno
+import httplib
+import json
+import socket
+import sys
+
+from telemetry.core import exceptions
+
+
+class DevToolsClientConnectionError(exceptions.Error):
+  pass
+
+
+class DevToolsClientUrlError(DevToolsClientConnectionError):
+  pass
+
+
+class DevToolsHttp(object):
+  """A helper class to send and parse DevTools HTTP requests.
+
+  This class maintains a persistent http connection to Chrome devtools.
+  Ideally, owners of instances of this class should call Disconnect() before
+  disposing of the instance. Otherwise, the connection will not be closed until
+  the instance is garbage collected.
+  """
+
+  def __init__(self, devtools_port):
+    self._devtools_port = devtools_port
+    self._conn = None
+
+  def __del__(self):
+    self.Disconnect()
+
+  def _Connect(self, timeout):
+    """Attempts to establish a connection to Chrome devtools."""
+    assert not self._conn
+    try:
+      host_port = '127.0.0.1:%i' % self._devtools_port
+      self._conn = httplib.HTTPConnection(host_port, timeout=timeout)
+    except (socket.error, httplib.HTTPException) as e:
+      raise DevToolsClientConnectionError, (e,), sys.exc_info()[2]
+
+  def Disconnect(self):
+    """Closes the HTTP connection."""
+    if not self._conn:
+      return
+
+    try:
+      self._conn.close()
+    except (socket.error, httplib.HTTPException) as e:
+      raise DevToolsClientConnectionError, (e,), sys.exc_info()[2]
+    finally:
+      self._conn = None
+
+  def Request(self, path, timeout=30):
+    """Sends a request to Chrome devtools.
+
+    This method lazily creates an HTTP connection, if one does not already
+    exist.
+
+    Args:
+      path: The DevTools URL path, without the /json/ prefix.
+      timeout: Timeout defaults to 30 seconds.
+
+    Raises:
+      DevToolsClientConnectionError: If the connection fails.
+    """
+    assert timeout
+
+    if not self._conn:
+      self._Connect(timeout)
+
+    endpoint = '/json'
+    if path:
+      endpoint += '/' + path
+    if self._conn.sock:
+      self._conn.sock.settimeout(timeout)
+    else:
+      self._conn.timeout = timeout
+
+    try:
+      # By default, httplib avoids going through the default system proxy.
+      self._conn.request('GET', endpoint)
+      response = self._conn.getresponse()
+      return response.read()
+    except (socket.error, httplib.HTTPException) as e:
+      self.Disconnect()
+      if isinstance(e, socket.error) and e.errno == errno.ECONNREFUSED:
+        raise DevToolsClientUrlError, (e,), sys.exc_info()[2]
+      raise DevToolsClientConnectionError, (e,), sys.exc_info()[2]
+
+  def RequestJson(self, path, timeout=30):
+    """Sends a request and parse the response as JSON.
+
+    Args:
+      path: The DevTools URL path, without the /json/ prefix.
+      timeout: Timeout defaults to 30 seconds.
+
+    Raises:
+      DevToolsClientConnectionError: If the connection fails.
+      ValueError: If the response is not a valid JSON.
+    """
+    return json.loads(self.Request(path, timeout))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http_unittest.py
new file mode 100644
index 0000000..ff397ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/devtools_http_unittest.py
@@ -0,0 +1,19 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.backends.chrome_inspector import devtools_http
+
+
+class DevToolsHttpTest(unittest.TestCase):
+
+  def testUrlError(self):
+    with self.assertRaises(devtools_http.DevToolsClientUrlError):
+      devtools_http.DevToolsHttp(1000).Request('')
+
+  def testSocketError(self):
+    with self.assertRaises(devtools_http.DevToolsClientConnectionError) as e:
+      devtools_http.DevToolsHttp(1000).Request('')
+      self.assertnotisinstance(e, devtools_http.devtoolsclienturlerror)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend.py
new file mode 100644
index 0000000..29ddf87
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend.py
@@ -0,0 +1,346 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import functools
+import logging
+import os
+import socket
+import sys
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.backends.chrome_inspector import devtools_http
+from telemetry.internal.backends.chrome_inspector import inspector_console
+from telemetry.internal.backends.chrome_inspector import inspector_memory
+from telemetry.internal.backends.chrome_inspector import inspector_page
+from telemetry.internal.backends.chrome_inspector import inspector_runtime
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import websocket
+
+
+def _HandleInspectorWebSocketExceptions(func):
+  """Decorator for converting inspector_websocket exceptions.
+
+  When an inspector_websocket exception is thrown in the original function,
+  this decorator converts it into a telemetry exception and adds debugging
+  information.
+  """
+  @functools.wraps(func)
+  def inner(inspector_backend, *args, **kwargs):
+    try:
+      return func(inspector_backend, *args, **kwargs)
+    except (socket.error, websocket.WebSocketException,
+            inspector_websocket.WebSocketDisconnected) as e:
+      inspector_backend._ConvertExceptionFromInspectorWebsocket(e)
+
+  return inner
+
+
+class InspectorBackend(object):
+  """Class for communicating with a devtools client.
+
+  The owner of an instance of this class is responsible for calling
+  Disconnect() before disposing of the instance.
+  """
+  def __init__(self, app, devtools_client, context, timeout=60):
+    self._websocket = inspector_websocket.InspectorWebsocket()
+    self._websocket.RegisterDomain(
+        'Inspector', self._HandleInspectorDomainNotification)
+
+    self._app = app
+    self._devtools_client = devtools_client
+    # Be careful when using the context object, since the data may be
+    # outdated since this is never updated once InspectorBackend is
+    # created. Consider an updating strategy for this. (For an example
+    # of the subtlety, see the logic for self.url property.)
+    self._context = context
+
+    logging.debug('InspectorBackend._Connect() to %s', self.debugger_url)
+    try:
+      self._websocket.Connect(self.debugger_url)
+      self._console = inspector_console.InspectorConsole(self._websocket)
+      self._memory = inspector_memory.InspectorMemory(self._websocket)
+      self._page = inspector_page.InspectorPage(
+          self._websocket, timeout=timeout)
+      self._runtime = inspector_runtime.InspectorRuntime(self._websocket)
+    except (websocket.WebSocketException, exceptions.TimeoutException) as e:
+      self._ConvertExceptionFromInspectorWebsocket(e)
+
+  def Disconnect(self):
+    """Disconnects the inspector websocket.
+
+    This method intentionally leaves the self._websocket object around, so that
+    future calls it to it will fail with a relevant error.
+    """
+    if self._websocket:
+      self._websocket.Disconnect()
+
+  def __del__(self):
+    self.Disconnect()
+
+  @property
+  def app(self):
+    return self._app
+
+  @property
+  def url(self):
+    """Returns the URL of the tab, as reported by devtools.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    return self._devtools_client.GetUrl(self.id)
+
+  @property
+  def id(self):
+    return self._context['id']
+
+  @property
+  def debugger_url(self):
+    return self._context['webSocketDebuggerUrl']
+
+  def GetWebviewInspectorBackends(self):
+    """Returns a list of InspectorBackend instances associated with webviews.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    inspector_backends = []
+    devtools_context_map = self._devtools_client.GetUpdatedInspectableContexts()
+    for context in devtools_context_map.contexts:
+      if context['type'] == 'webview':
+        inspector_backends.append(
+            devtools_context_map.GetInspectorBackend(context['id']))
+    return inspector_backends
+
+  def IsInspectable(self):
+    """Whether the tab is inspectable, as reported by devtools."""
+    try:
+      return self._devtools_client.IsInspectable(self.id)
+    except devtools_http.DevToolsClientConnectionError:
+      return False
+
+  # Public methods implemented in JavaScript.
+
+  @property
+  @decorators.Cache
+  def screenshot_supported(self):
+    if (self.app.platform.GetOSName() == 'linux' and (
+        os.getenv('DISPLAY') not in [':0', ':0.0'])):
+      # Displays other than 0 mean we are likely running in something like
+      # xvfb where screenshotting doesn't work.
+      return False
+    return True
+
+  @_HandleInspectorWebSocketExceptions
+  def Screenshot(self, timeout):
+    assert self.screenshot_supported, 'Browser does not support screenshotting'
+    return self._page.CaptureScreenshot(timeout)
+
+  # Memory public methods.
+
+  @_HandleInspectorWebSocketExceptions
+  def GetDOMStats(self, timeout):
+    """Gets memory stats from the DOM.
+
+    Raises:
+      inspector_memory.InspectorMemoryException
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    dom_counters = self._memory.GetDOMCounters(timeout)
+    return {
+      'document_count': dom_counters['documents'],
+      'node_count': dom_counters['nodes'],
+      'event_listener_count': dom_counters['jsEventListeners']
+    }
+
+  # Page public methods.
+
+  @_HandleInspectorWebSocketExceptions
+  def WaitForNavigate(self, timeout):
+    self._page.WaitForNavigate(timeout)
+
+  @_HandleInspectorWebSocketExceptions
+  def Navigate(self, url, script_to_evaluate_on_commit, timeout):
+    self._page.Navigate(url, script_to_evaluate_on_commit, timeout)
+
+  @_HandleInspectorWebSocketExceptions
+  def GetCookieByName(self, name, timeout):
+    return self._page.GetCookieByName(name, timeout)
+
+  # Console public methods.
+
+  @_HandleInspectorWebSocketExceptions
+  def GetCurrentConsoleOutputBuffer(self, timeout=10):
+    return self._console.GetCurrentConsoleOutputBuffer(timeout)
+
+  # Runtime public methods.
+
+  @_HandleInspectorWebSocketExceptions
+  def ExecuteJavaScript(self, expr, context_id=None, timeout=60):
+    """Executes a javascript expression without returning the result.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self._runtime.Execute(expr, context_id, timeout)
+
+  @_HandleInspectorWebSocketExceptions
+  def EvaluateJavaScript(self, expr, context_id=None, timeout=60):
+    """Evaluates a javascript expression and returns the result.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._runtime.Evaluate(expr, context_id, timeout)
+
+  @_HandleInspectorWebSocketExceptions
+  def EnableAllContexts(self):
+    """Allows access to iframes.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._runtime.EnableAllContexts()
+
+  @_HandleInspectorWebSocketExceptions
+  def SynthesizeScrollGesture(self, x=100, y=800, xDistance=0, yDistance=-500,
+                              xOverscroll=None, yOverscroll=None,
+                              preventFling=True, speed=None,
+                              gestureSourceType=None, repeatCount=None,
+                              repeatDelayMs=None, interactionMarkerName=None,
+                              timeout=60):
+    """Runs an inspector command that causes a repeatable browser driven scroll.
+
+    Args:
+      x: X coordinate of the start of the gesture in CSS pixels.
+      y: Y coordinate of the start of the gesture in CSS pixels.
+      xDistance: Distance to scroll along the X axis (positive to scroll left).
+      yDistance: Distance to scroll along the Y axis (positive to scroll up).
+      xOverscroll: Number of additional pixels to scroll back along the X axis.
+      xOverscroll: Number of additional pixels to scroll back along the Y axis.
+      preventFling: Prevents a fling gesture.
+      speed: Swipe speed in pixels per second.
+      gestureSourceType: Which type of input events to be generated.
+      repeatCount: Number of additional repeats beyond the first scroll.
+      repeatDelayMs: Number of milliseconds delay between each repeat.
+      interactionMarkerName: The name of the interaction markers to generate.
+
+    Raises:
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    params = {
+        'x': x,
+        'y': y,
+        'xDistance': xDistance,
+        'yDistance': yDistance,
+        'preventFling': preventFling,
+    }
+
+    if xOverscroll is not None:
+      params['xOverscroll'] = xOverscroll
+
+    if yOverscroll is not None:
+      params['yOverscroll'] = yOverscroll
+
+    if speed is not None:
+      params['speed'] = speed
+
+    if repeatCount is not None:
+      params['repeatCount'] = repeatCount
+
+    if gestureSourceType is not None:
+      params['gestureSourceType'] = gestureSourceType
+
+    if repeatDelayMs is not None:
+      params['repeatDelayMs'] = repeatDelayMs
+
+    if interactionMarkerName is not None:
+      params['interactionMarkerName'] = interactionMarkerName
+
+    scroll_command = {
+      'method': 'Input.synthesizeScrollGesture',
+      'params': params
+    }
+    return self._runtime.RunInspectorCommand(scroll_command, timeout)
+
+  # Methods used internally by other backends.
+
+  def _HandleInspectorDomainNotification(self, res):
+    if (res['method'] == 'Inspector.detached' and
+        res.get('params', {}).get('reason', '') == 'replaced_with_devtools'):
+      self._WaitForInspectorToGoAway()
+      return
+    if res['method'] == 'Inspector.targetCrashed':
+      exception = exceptions.DevtoolsTargetCrashException(self.app)
+      self._AddDebuggingInformation(exception)
+      raise exception
+
+  def _WaitForInspectorToGoAway(self):
+    self._websocket.Disconnect()
+    raw_input('The connection to Chrome was lost to the inspector ui.\n'
+              'Please close the inspector and press enter to resume '
+              'Telemetry run...')
+    raise exceptions.DevtoolsTargetCrashException(
+        self.app, 'Devtool connection with the browser was interrupted due to '
+        'the opening of an inspector.')
+
+  def _ConvertExceptionFromInspectorWebsocket(self, error):
+    """Converts an Exception from inspector_websocket.
+
+    This method always raises a Telemetry exception. It appends debugging
+    information. The exact exception raised depends on |error|.
+
+    Args:
+      error: An instance of socket.error or websocket.WebSocketException.
+    Raises:
+      exceptions.TimeoutException: A timeout occurred.
+      exceptions.DevtoolsTargetCrashException: On any other error, the most
+        likely explanation is that the devtool's target crashed.
+    """
+    if isinstance(error, websocket.WebSocketTimeoutException):
+      new_error = exceptions.TimeoutException()
+      new_error.AddDebuggingMessage(exceptions.AppCrashException(
+          self.app, 'The app is probably crashed:\n'))
+    else:
+      new_error = exceptions.DevtoolsTargetCrashException(self.app)
+
+    original_error_msg = 'Original exception:\n' + str(error)
+    new_error.AddDebuggingMessage(original_error_msg)
+    self._AddDebuggingInformation(new_error)
+
+    raise new_error, None, sys.exc_info()[2]
+
+  def _AddDebuggingInformation(self, error):
+    """Adds debugging information to error.
+
+    Args:
+      error: An instance of exceptions.Error.
+    """
+    if self.IsInspectable():
+      msg = (
+          'Received a socket error in the browser connection and the tab '
+          'still exists. The operation probably timed out.'
+      )
+    else:
+      msg = (
+          'Received a socket error in the browser connection and the tab no '
+          'longer exists. The tab probably crashed.'
+      )
+    error.AddDebuggingMessage(msg)
+    error.AddDebuggingMessage('Debugger url: %s' % self.debugger_url)
+
+  @_HandleInspectorWebSocketExceptions
+  def CollectGarbage(self):
+    self._page.CollectGarbage()
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend_list.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend_list.py
new file mode 100644
index 0000000..7e1f8fb
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_backend_list.py
@@ -0,0 +1,124 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+from telemetry.core import exceptions
+
+
+def DebuggerUrlToId(debugger_url):
+  return debugger_url.split('/')[-1]
+
+
+class InspectorBackendList(collections.Sequence):
+  """A dynamic sequence of active InspectorBackends."""
+
+  def __init__(self, browser_backend):
+    """Constructor.
+
+    Args:
+      browser_backend: The BrowserBackend instance to query for
+          InspectorBackends.
+    """
+    self._browser_backend = browser_backend
+    self._devtools_context_map_backend = None
+    # A list of filtered contexts.
+    self._filtered_context_ids = []
+    # A cache of inspector backends, by context ID.
+    self._wrapper_dict = {}
+
+  @property
+  def _devtools_client(self):
+    return self._browser_backend.devtools_client
+
+  @property
+  def app(self):
+    return self._browser_backend.app
+
+  def GetContextInfo(self, context_id):
+    return self._devtools_context_map_backend.GetContextInfo(context_id)
+
+  def ShouldIncludeContext(self, _):
+    """Override this method to control which contexts are included."""
+    return True
+
+  def CreateWrapper(self, inspector_backend_instance):
+    """Override to return the wrapper API over InspectorBackend.
+
+    The wrapper API is the public interface for InspectorBackend. It
+    may expose whatever methods are desired on top of that backend.
+    """
+    raise NotImplementedError
+
+  # TODO(nednguyen): Remove this method and turn inspector_backend_list API to
+  # dictionary-like API (crbug.com/398467)
+  def __getitem__(self, index):
+    self._Update()
+    if index >= len(self._filtered_context_ids):
+      raise exceptions.DevtoolsTargetCrashException(
+          self.app,
+          'Web content with index %s may have crashed. '
+          'filtered_context_ids = %s' % (
+              index, repr(self._filtered_context_ids)))
+    context_id = self._filtered_context_ids[index]
+    return self.GetBackendFromContextId(context_id)
+
+  def GetTabById(self, identifier):
+    self._Update()
+    return self.GetBackendFromContextId(identifier)
+
+  def GetBackendFromContextId(self, context_id):
+    self._Update()
+    if context_id not in self._wrapper_dict:
+      try:
+        backend = self._devtools_context_map_backend.GetInspectorBackend(
+            context_id)
+      except exceptions.Error as e:
+        self._HandleDevToolsConnectionError(e)
+        raise e
+      # Propagate KeyError from GetInspectorBackend call.
+
+      wrapper = self.CreateWrapper(backend)
+      self._wrapper_dict[context_id] = wrapper
+    return self._wrapper_dict[context_id]
+
+  def IterContextIds(self):
+    self._Update()
+    return iter(self._filtered_context_ids)
+
+  def __iter__(self):
+    self._Update()
+    for context_id in self._filtered_context_ids:
+      yield self.GetTabById(context_id)
+
+  def __len__(self):
+    self._Update()
+    return len(self._filtered_context_ids)
+
+  def _Update(self):
+    backends_map = self._devtools_client.GetUpdatedInspectableContexts()
+    self._devtools_context_map_backend = backends_map
+
+    # Clear context ids that do not appear in the inspectable contexts.
+    context_ids = [context['id'] for context in backends_map.contexts]
+    self._filtered_context_ids = [context_id
+                                  for context_id in self._filtered_context_ids
+                                  if context_id in context_ids]
+    # Add new context ids.
+    for context in backends_map.contexts:
+      if (context['id'] not in self._filtered_context_ids and
+          self.ShouldIncludeContext(context)):
+        self._filtered_context_ids.append(context['id'])
+
+    # Clean up any backends for contexts that have gone away.
+    for context_id in self._wrapper_dict.keys():
+      if context_id not in self._filtered_context_ids:
+        del self._wrapper_dict[context_id]
+
+  def _HandleDevToolsConnectionError(self, error):
+    """Called when handling errors in connecting to the DevTools websocket.
+
+    This can be overwritten by sub-classes to add more debugging information to
+    errors.
+    """
+    pass
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console.py
new file mode 100644
index 0000000..0d7b5e5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console.py
@@ -0,0 +1,53 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import StringIO
+
+from telemetry.internal.backends.chrome_inspector import websocket
+
+
+class InspectorConsole(object):
+  def __init__(self, inspector_websocket):
+    self._inspector_websocket = inspector_websocket
+    self._inspector_websocket.RegisterDomain('Console', self._OnNotification)
+    self._message_output_stream = None
+    self._last_message = None
+    self._console_enabled = False
+
+  def _OnNotification(self, msg):
+    if msg['method'] == 'Console.messageAdded':
+      assert self._message_output_stream
+      if msg['params']['message']['url'] == 'chrome://newtab/':
+        return
+      self._last_message = '(%s) %s:%i: %s' % (
+        msg['params']['message']['level'],
+        msg['params']['message']['url'],
+        msg['params']['message']['line'],
+        msg['params']['message']['text'])
+      self._message_output_stream.write(
+        '%s\n' % self._last_message)
+
+    elif msg['method'] == 'Console.messageRepeatCountUpdated':
+      if self._message_output_stream:
+        self._message_output_stream.write(
+          '%s\n' % self._last_message)
+
+  def GetCurrentConsoleOutputBuffer(self, timeout=10):
+    self._message_output_stream = StringIO.StringIO()
+    self._EnableConsoleOutputStream()
+    try:
+      self._inspector_websocket.DispatchNotifications(timeout)
+      return self._message_output_stream.getvalue()
+    except websocket.WebSocketTimeoutException:
+      return self._message_output_stream.getvalue()
+    finally:
+      self._DisableConsoleOutputStream()
+      self._message_output_stream.close()
+      self._message_output_stream = None
+
+
+  def _EnableConsoleOutputStream(self):
+    self._inspector_websocket.SyncRequest({'method': 'Console.enable'})
+
+  def _DisableConsoleOutputStream(self):
+    self._inspector_websocket.SyncRequest({'method': 'Console.disable'})
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console_unittest.py
new file mode 100644
index 0000000..c3d08b9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_console_unittest.py
@@ -0,0 +1,29 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry.core import util
+from telemetry.testing import tab_test_case
+
+
+class TabConsoleTest(tab_test_case.TabTestCase):
+  def testConsoleOutputStream(self):
+    self.Navigate('page_that_logs_to_console.html')
+
+    initial = self._tab.EvaluateJavaScript('window.__logCount')
+    def GotLog():
+      current = self._tab.EvaluateJavaScript('window.__logCount')
+      return current > initial
+    util.WaitFor(GotLog, 5)
+
+    console_output = (
+        self._tab._inspector_backend.GetCurrentConsoleOutputBuffer())
+    lines = [l for l in console_output.split('\n') if len(l)]
+
+    self.assertTrue(len(lines) >= 1)
+    for line in lines:
+      prefix = 'http://(.+)/page_that_logs_to_console.html:9'
+      expected_line = r'\(log\) %s: Hello, world' % prefix
+      self.assertTrue(re.match(expected_line, line))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory.py
new file mode 100644
index 0000000..cd4ddf0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory.py
@@ -0,0 +1,53 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+
+from telemetry.core import exceptions
+
+
+class InspectorMemoryException(exceptions.Error):
+  pass
+
+
+class InspectorMemory(object):
+  """Communicates with the remote inspector's Memory domain."""
+
+  def __init__(self, inspector_websocket):
+    self._inspector_websocket = inspector_websocket
+    self._inspector_websocket.RegisterDomain('Memory', self._OnNotification)
+
+  def _OnNotification(self, msg):
+    pass
+
+  def GetDOMCounters(self, timeout):
+    """Retrieves DOM element counts.
+
+    Args:
+      timeout: The number of seconds to wait for the inspector backend to
+          service the request before timing out.
+
+    Returns:
+      A dictionary containing the counts associated with "nodes", "documents",
+      and "jsEventListeners".
+    Raises:
+      InspectorMemoryException
+      websocket.WebSocketException
+      socket.error
+      exceptions.WebSocketDisconnected
+    """
+    res = self._inspector_websocket.SyncRequest({
+      'method': 'Memory.getDOMCounters'
+    }, timeout)
+    if ('result' not in res or
+        'nodes' not in res['result'] or
+        'documents' not in res['result'] or
+        'jsEventListeners' not in res['result']):
+      raise InspectorMemoryException(
+          'Inspector returned unexpected result for Memory.getDOMCounters:\n' +
+          json.dumps(res, indent=2))
+    return {
+        'nodes': res['result']['nodes'],
+        'documents': res['result']['documents'],
+        'jsEventListeners': res['result']['jsEventListeners']
+    }
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory_unittest.py
new file mode 100644
index 0000000..9b7f5e0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_memory_unittest.py
@@ -0,0 +1,36 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.testing import tab_test_case
+
+
+class InspectorMemoryTest(tab_test_case.TabTestCase):
+
+  @decorators.Enabled('has tabs')
+  def testGetDOMStats(self):
+    # Due to an issue with CrOS, we create a new tab here rather than
+    # using the existing tab to get a consistent starting page on all platforms.
+    self._tab = self._browser.tabs.New()
+
+    self.Navigate('dom_counter_sample.html')
+
+    self._tab.ExecuteJavaScript('gc();')
+
+    # Document_count > 1 indicates that WebCore::Document loaded in Chrome
+    # is leaking! The baseline should exactly match the numbers on:
+    # internal/testing/dom_counter_sample.html
+    # Please contact kouhei@, hajimehoshi@ when rebaselining.
+    counts = self._tab.dom_stats
+    self.assertEqual(counts['document_count'], 1,
+        'Document leak is detected! '+
+        'The previous document is likely retained unexpectedly.')
+    self.assertEqual(counts['node_count'], 14,
+        'Node leak is detected!')
+    self.assertEqual(counts['event_listener_count'], 2,
+        'EventListener leak is detected!')
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.AppendExtraBrowserArgs('--js-flags=--expose-gc')
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page.py
new file mode 100644
index 0000000..2c3b087
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page.py
@@ -0,0 +1,156 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import time
+
+from telemetry.util import image_util
+
+
+class InspectorPage(object):
+  """Class that controls a page connected by an inspector_websocket.
+
+  This class provides utility methods for controlling a page connected by an
+  inspector_websocket. It does not perform any exception handling. All
+  inspector_websocket exceptions must be handled by the caller.
+  """
+  def __init__(self, inspector_websocket, timeout=60):
+    self._inspector_websocket = inspector_websocket
+    self._inspector_websocket.RegisterDomain('Page', self._OnNotification)
+
+    self._navigation_pending = False
+    self._navigation_url = ''  # Support for legacy backends.
+    self._navigation_frame_id = ''
+    self._navigated_frame_ids = None  # Holds frame ids while navigating.
+    self._script_to_evaluate_on_commit = None
+    # Turn on notifications. We need them to get the Page.frameNavigated event.
+    self._EnablePageNotifications(timeout=timeout)
+
+  def _OnNotification(self, msg):
+    if msg['method'] == 'Page.frameNavigated':
+      url = msg['params']['frame']['url']
+      if not self._navigated_frame_ids == None:
+        frame_id = msg['params']['frame']['id']
+        if self._navigation_frame_id == frame_id:
+          self._navigation_frame_id = ''
+          self._navigated_frame_ids = None
+          self._navigation_pending = False
+        else:
+          self._navigated_frame_ids.add(frame_id)
+      elif self._navigation_url == url:
+        # TODO(tonyg): Remove this when Chrome 38 goes stable.
+        self._navigation_url = ''
+        self._navigation_pending = False
+      elif (not url == 'chrome://newtab/' and not url == 'about:blank'
+        and not 'parentId' in msg['params']['frame']):
+        # Marks the navigation as complete and unblocks the
+        # WaitForNavigate call.
+        self._navigation_pending = False
+
+  def _SetScriptToEvaluateOnCommit(self, source):
+    existing_source = (self._script_to_evaluate_on_commit and
+                       self._script_to_evaluate_on_commit['source'])
+    if source == existing_source:
+      return
+    if existing_source:
+      request = {
+          'method': 'Page.removeScriptToEvaluateOnLoad',
+          'params': {
+              'identifier': self._script_to_evaluate_on_commit['id'],
+              }
+          }
+      self._inspector_websocket.SyncRequest(request)
+      self._script_to_evaluate_on_commit = None
+    if source:
+      request = {
+          'method': 'Page.addScriptToEvaluateOnLoad',
+          'params': {
+              'scriptSource': source,
+              }
+          }
+      res = self._inspector_websocket.SyncRequest(request)
+      self._script_to_evaluate_on_commit = {
+          'id': res['result']['identifier'],
+          'source': source
+          }
+
+  def _EnablePageNotifications(self, timeout=60):
+    request = {
+        'method': 'Page.enable'
+        }
+    res = self._inspector_websocket.SyncRequest(request, timeout)
+    assert len(res['result'].keys()) == 0
+
+  def WaitForNavigate(self, timeout=60):
+    """Waits for the navigation to complete.
+
+    The current page is expect to be in a navigation. This function returns
+    when the navigation is complete or when the timeout has been exceeded.
+    """
+    start_time = time.time()
+    remaining_time = timeout
+    self._navigation_pending = True
+    while self._navigation_pending and remaining_time > 0:
+      remaining_time = max(timeout - (time.time() - start_time), 0.0)
+      self._inspector_websocket.DispatchNotifications(remaining_time)
+
+  def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=60):
+    """Navigates to |url|.
+
+    If |script_to_evaluate_on_commit| is given, the script source string will be
+    evaluated when the navigation is committed. This is after the context of
+    the page exists, but before any script on the page itself has executed.
+    """
+
+    self._SetScriptToEvaluateOnCommit(script_to_evaluate_on_commit)
+    request = {
+        'method': 'Page.navigate',
+        'params': {
+            'url': url,
+            }
+        }
+    self._navigated_frame_ids = set()
+    res = self._inspector_websocket.SyncRequest(request, timeout)
+    if 'frameId' in res['result']:
+      # Modern backends are returning frameId from Page.navigate.
+      # Use it here to unblock upon precise navigation.
+      frame_id = res['result']['frameId']
+      if self._navigated_frame_ids and frame_id in self._navigated_frame_ids:
+        self._navigated_frame_ids = None
+        return
+      self._navigation_frame_id = frame_id
+    else:
+      # TODO(tonyg): Remove this when Chrome 38 goes stable.
+      self._navigated_frame_ids = None
+      self._navigation_url = url
+    self.WaitForNavigate(timeout)
+
+  def GetCookieByName(self, name, timeout=60):
+    """Returns the value of the cookie by the given |name|."""
+    request = {
+        'method': 'Page.getCookies'
+        }
+    res = self._inspector_websocket.SyncRequest(request, timeout)
+    cookies = res['result']['cookies']
+    for cookie in cookies:
+      if cookie['name'] == name:
+        return cookie['value']
+    return None
+
+  def CaptureScreenshot(self, timeout=60):
+    request = {
+        'method': 'Page.captureScreenshot'
+        }
+    # "Google API are missing..." infobar might cause a viewport resize
+    # which invalidates screenshot request. See crbug.com/459820.
+    for _ in range(2):
+      res = self._inspector_websocket.SyncRequest(request, timeout)
+      if res and ('result' in res) and ('data' in res['result']):
+        return image_util.FromBase64Png(res['result']['data'])
+    return None
+
+  def CollectGarbage(self, timeout=60):
+    request = {
+        'method': 'HeapProfiler.collectGarbage'
+        }
+    res = self._inspector_websocket.SyncRequest(request, timeout)
+    assert 'result' in res
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page_unittest.py
new file mode 100644
index 0000000..935b8df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_page_unittest.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.testing import tab_test_case
+from telemetry.util import image_util
+
+
+class InspectorPageTest(tab_test_case.TabTestCase):
+  def testPageNavigateToNormalUrl(self):
+    self.Navigate('blank.html')
+
+  def testCustomActionToNavigate(self):
+    self.Navigate('page_with_link.html')
+    self.assertEquals(
+        self._tab.EvaluateJavaScript('document.location.pathname;'),
+        '/page_with_link.html')
+
+    self._tab.ExecuteJavaScript('document.getElementById("clickme").click();')
+    self._tab.WaitForNavigate()
+
+    self.assertEquals(
+        self._tab.EvaluateJavaScript('document.location.pathname;'),
+        '/blank.html')
+
+  def testGetCookieByName(self):
+    self.Navigate('blank.html')
+    self._tab.ExecuteJavaScript('document.cookie="foo=bar"')
+    self.assertEquals(self._tab.GetCookieByName('foo'), 'bar')
+
+  def testScriptToEvaluateOnCommit(self):
+    self.Navigate('blank.html',
+                  script_to_evaluate_on_commit='var foo = "bar";')
+    self._tab.WaitForDocumentReadyStateToBeComplete()
+    self.assertEquals(self._tab.EvaluateJavaScript('foo'), 'bar')
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testCaptureScreenshot(self):
+    if not self._tab.screenshot_supported:
+      return
+    self.Navigate('green_rect.html')
+    res = image_util.Pixels(self._tab.Screenshot())
+    self.assertEquals(0x00, res[0])
+    self.assertEquals(0xFF, res[1])
+    self.assertEquals(0x00, res[2])
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime.py
new file mode 100644
index 0000000..b3c250e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime.py
@@ -0,0 +1,80 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.core import exceptions
+
+
+class InspectorRuntime(object):
+  def __init__(self, inspector_websocket):
+    self._inspector_websocket = inspector_websocket
+    self._inspector_websocket.RegisterDomain('Runtime', self._OnNotification)
+    self._contexts_enabled = False
+    self._max_context_id = None
+
+  def _OnNotification(self, msg):
+    if (self._contexts_enabled and
+        msg['method'] == 'Runtime.executionContextCreated'):
+      self._max_context_id = max(self._max_context_id,
+                                 msg['params']['context']['id'])
+
+  def Execute(self, expr, context_id, timeout):
+    self.Evaluate(expr + '; 0;', context_id, timeout)
+
+  def Evaluate(self, expr, context_id, timeout):
+    """Evaluates a javascript expression and returns the result.
+
+    |context_id| can refer to an iframe. The main page has context_id=1, the
+    first iframe context_id=2, etc.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      websocket.WebSocketException
+      socket.error
+    """
+    request = {
+      'method': 'Runtime.evaluate',
+      'params': {
+        'expression': expr,
+        'returnByValue': True
+        }
+      }
+    if context_id is not None:
+      self.EnableAllContexts()
+      request['params']['contextId'] = context_id
+    res = self._inspector_websocket.SyncRequest(request, timeout)
+    if 'error' in res:
+      raise exceptions.EvaluateException(res['error']['message'])
+
+    if 'wasThrown' in res['result'] and res['result']['wasThrown']:
+      # TODO(nduca): propagate stacks from javascript up to the python
+      # exception.
+      raise exceptions.EvaluateException(res['result']['result']['description'])
+    if res['result']['result']['type'] == 'undefined':
+      return None
+    return res['result']['result']['value']
+
+  def EnableAllContexts(self):
+    """Allow access to iframes.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      websocket.WebSocketException
+      socket.error
+    """
+    if not self._contexts_enabled:
+      self._contexts_enabled = True
+      self._inspector_websocket.SyncRequest({'method': 'Runtime.enable'},
+                                            timeout=30)
+    return self._max_context_id
+
+  def RunInspectorCommand(self, command, timeout):
+    """Runs an inspector command.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      websocket.WebSocketException
+      socket.error
+    """
+    res = self._inspector_websocket.SyncRequest(command, timeout)
+    return res
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime_unittest.py
new file mode 100644
index 0000000..7b90d06
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime_unittest.py
@@ -0,0 +1,82 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.testing import tab_test_case
+
+
+class InspectorRuntimeTest(tab_test_case.TabTestCase):
+  def testRuntimeEvaluateSimple(self):
+    res = self._tab.EvaluateJavaScript('1+1')
+    assert res == 2
+
+  def testRuntimeEvaluateThatFails(self):
+    self.assertRaises(exceptions.EvaluateException,
+                      lambda: self._tab.EvaluateJavaScript('fsdfsdfsf'))
+
+  def testRuntimeEvaluateOfSomethingThatCantJSONize(self):
+
+    def test():
+      self._tab.EvaluateJavaScript("""
+        var cur = {};
+        var root = {next: cur};
+        for (var i = 0; i < 1000; i++) {
+          next = {};
+          cur.next = next;
+          cur = next;
+        }
+        root;""")
+    self.assertRaises(exceptions.EvaluateException, test)
+
+  def testRuntimeExecuteOfSomethingThatCantJSONize(self):
+    self._tab.ExecuteJavaScript('window')
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testIFrame(self):
+    starting_contexts = self._tab.EnableAllContexts()
+
+    self.Navigate('host.html')
+
+    # Access host page.
+    test_defined_js = "typeof(testVar) != 'undefined'"
+    self._tab.WaitForJavaScriptExpression(test_defined_js, timeout=10)
+
+    expected_contexts = 4 + starting_contexts
+
+    util.WaitFor(lambda: self._tab.EnableAllContexts() == expected_contexts,
+                 timeout=10)
+
+    self.assertEquals(self._tab.EvaluateJavaScript('testVar'), 'host')
+
+    def TestVarReady(context_id):
+      """Returns True if the context and testVar are both ready."""
+      try:
+        return self._tab.EvaluateJavaScriptInContext(test_defined_js,
+                                                     context_id)
+      except exceptions.EvaluateException:
+        # This happens when the context is not ready.
+        return False
+
+    def TestVar(context_id):
+      """Waits for testVar and the context to be ready, then returns the value
+      of testVar."""
+      util.WaitFor(lambda: TestVarReady(context_id), timeout=10)
+      return self._tab.EvaluateJavaScriptInContext('testVar', context_id)
+
+    # Access parent page using EvaluateJavaScriptInContext.
+    self.assertEquals(TestVar(context_id=starting_contexts+1), 'host')
+
+    # Access the iframes without guarantees on which order they loaded.
+    iframe1 = TestVar(context_id=starting_contexts+2)
+    iframe2 = TestVar(context_id=starting_contexts+3)
+    iframe3 = TestVar(context_id=starting_contexts+4)
+    self.assertEqual(set([iframe1, iframe2, iframe3]),
+                     set(['iframe1', 'iframe2', 'iframe3']))
+
+    # Accessing a non-existent iframe throws an exception.
+    self.assertRaises(exceptions.EvaluateException,
+        lambda: self._tab.EvaluateJavaScriptInContext(
+          '1+1', context_id=starting_contexts+5))
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket.py
new file mode 100644
index 0000000..03237e6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket.py
@@ -0,0 +1,184 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import errno
+import json
+import logging
+import socket
+import time
+
+from telemetry.core import exceptions
+from telemetry.internal.backends.chrome_inspector import websocket
+
+class WebSocketDisconnected(exceptions.Error):
+  """An attempt was made to use a web socket after it had been disconnected."""
+  pass
+
+
+class InspectorWebsocket(object):
+
+  # See http://www.jsonrpc.org/specification#error_object.
+  METHOD_NOT_FOUND_CODE = -32601
+
+  def __init__(self):
+    """Create a websocket handler for communicating with Inspectors."""
+    self._socket = None
+    self._cur_socket_timeout = 0
+    self._next_request_id = 0
+    self._domain_handlers = {}
+    self._pending_callbacks = dict()
+
+  def RegisterDomain(self, domain_name, notification_handler):
+    """Registers a given domain for handling notification methods.
+
+    For example, given inspector_backend:
+       def OnConsoleNotification(msg):
+          if msg['method'] == 'Console.messageAdded':
+             print msg['params']['message']
+       inspector_backend.RegisterDomain('Console', OnConsoleNotification)
+
+    Args:
+      domain_name: The devtools domain name. E.g., 'Tracing', 'Memory', 'Page'.
+      notification_handler: Handler for devtools notification. Will be
+          called if a devtools notification with matching domain is received
+          via DispatchNotifications. The handler accepts a single paramater:
+          the JSON object representing the notification.
+    """
+    assert domain_name not in self._domain_handlers
+    self._domain_handlers[domain_name] = notification_handler
+
+  def UnregisterDomain(self, domain_name):
+    """Unregisters a previously registered domain."""
+    assert domain_name in self._domain_handlers
+    del self._domain_handlers[domain_name]
+
+  def Connect(self, url, timeout=10):
+    """Connects the websocket.
+
+    Raises:
+      websocket.WebSocketException
+      socket.error
+    """
+    assert not self._socket
+    self._socket = websocket.create_connection(url, timeout=timeout)
+    self._cur_socket_timeout = 0
+    self._next_request_id = 0
+
+  def Disconnect(self):
+    """Disconnects the inspector websocket.
+
+    Raises:
+      websocket.WebSocketException
+      socket.error
+    """
+    if self._socket:
+      self._socket.close()
+      self._socket = None
+
+  def SendAndIgnoreResponse(self, req):
+    """Sends a request without waiting for a response.
+
+    Raises:
+      websocket.WebSocketException: Error from websocket library.
+      socket.error: Error from websocket library.
+      exceptions.WebSocketDisconnected: The socket was disconnected.
+    """
+    self._SendRequest(req)
+
+  def _SendRequest(self, req):
+    if not self._socket:
+      raise WebSocketDisconnected()
+    req['id'] = self._next_request_id
+    self._next_request_id += 1
+    data = json.dumps(req)
+    self._socket.send(data)
+    if logging.getLogger().isEnabledFor(logging.DEBUG):
+      logging.debug('sent [%s]', json.dumps(req, indent=2, sort_keys=True))
+
+  def SyncRequest(self, req, timeout=10):
+    """Sends a request and waits for a response.
+
+    Raises:
+      websocket.WebSocketException: Error from websocket library.
+      socket.error: Error from websocket library.
+      exceptions.WebSocketDisconnected: The socket was disconnected.
+    """
+    self._SendRequest(req)
+
+    while True:
+      res = self._Receive(timeout)
+      if 'id' in res and res['id'] == req['id']:
+        return res
+
+  def AsyncRequest(self, req, callback):
+    """Sends an async request and returns immediately.
+
+    Response will be handled in the |callback| later when DispatchNotifications
+    is invoked.
+
+    Args:
+      callback: a function that takes inspector's response as the argument.
+    """
+    self._SendRequest(req)
+    self._pending_callbacks[req['id']] = callback
+
+  def DispatchNotifications(self, timeout=10):
+    """Waits for responses from the websocket, dispatching them as necessary.
+
+    Raises:
+      websocket.WebSocketException: Error from websocket library.
+      socket.error: Error from websocket library.
+      exceptions.WebSocketDisconnected: The socket was disconnected.
+    """
+    self._Receive(timeout)
+
+  def _SetTimeout(self, timeout):
+    if self._cur_socket_timeout != timeout:
+      self._socket.settimeout(timeout)
+      self._cur_socket_timeout = timeout
+
+  def _Receive(self, timeout=10):
+    if not self._socket:
+      raise WebSocketDisconnected()
+
+    self._SetTimeout(timeout)
+
+    while True:
+      try:
+        data = self._socket.recv()
+      except socket.error, e:
+        if e.errno == errno.EAGAIN:
+          # Resource is temporarily unavailable. Try again.
+          # See https://code.google.com/p/chromium/issues/detail?id=545853#c3
+          # for more details.
+          time.sleep(0.1)
+        else:
+          raise
+      else:
+        break
+
+    result = json.loads(data)
+    if logging.getLogger().isEnabledFor(logging.DEBUG):
+      logging.debug(
+          'got [%s]', json.dumps(result, indent=2, sort_keys=True))
+    if 'method' in result:
+      self._HandleNotification(result)
+    elif 'id' in result:
+      self._HandleAsyncResponse(result)
+    return result
+
+  def _HandleNotification(self, result):
+    mname = result['method']
+    dot_pos = mname.find('.')
+    domain_name = mname[:dot_pos]
+    if not domain_name in self._domain_handlers:
+      logging.warn('Unhandled inspector message: %s', result)
+      return
+
+    self._domain_handlers[domain_name](result)
+
+  def _HandleAsyncResponse(self, result):
+    callback = self._pending_callbacks.pop(result['id'], None)
+    if callback:
+      callback(result)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket_unittest.py
new file mode 100644
index 0000000..89cfd66
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/inspector_websocket_unittest.py
@@ -0,0 +1,185 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import errno
+import socket
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import websocket
+from telemetry.testing import simple_mock
+
+
+class FakeSocket(object):
+  """A fake websocket that allows test to send random data."""
+  def __init__(self, mock_timer):
+    self._mock_timer = mock_timer
+    self._responses = []
+    self._timeout = None
+
+  def AddResponse(self, response, time):
+    if self._responses:
+      assert self._responses[-1][1] < time, (
+          'Current response is scheduled earlier than previous response.')
+    self._responses.append((response, time))
+
+  def send(self, data):
+    pass
+
+  def recv(self):
+    if not self._responses:
+      raise Exception('No more recorded responses.')
+
+    response, time = self._responses.pop(0)
+    current_time = self._mock_timer.time()
+    if self._timeout is not None and time - current_time > self._timeout:
+      self._mock_timer.SetTime(current_time + self._timeout + 1)
+      raise websocket.WebSocketTimeoutException()
+
+    self._mock_timer.SetTime(time)
+    if isinstance(response, Exception):
+      raise response
+    return response
+
+  def settimeout(self, timeout):
+    self._timeout = timeout
+
+
+def _DoNothingHandler(elapsed_time):
+  del elapsed_time  # unused
+
+
+class InspectorWebsocketUnittest(unittest.TestCase):
+
+  def setUp(self):
+    self._mock_timer = simple_mock.MockTimer()
+
+  def tearDown(self):
+    self._mock_timer.Restore()
+
+  @decorators.Disabled('chromeos', 'mac')  # crbug.com/483212, crbug.com/498950
+  def testDispatchNotification(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+
+    results = []
+    def OnTestEvent(result):
+      results.append(result)
+
+    inspector.RegisterDomain('Test', OnTestEvent)
+    fake_socket.AddResponse('{"method": "Test.foo"}', 5)
+    inspector.DispatchNotifications()
+    self.assertEqual(1, len(results))
+    self.assertEqual('Test.foo', results[0]['method'])
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testDispatchNotificationTimedOut(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+
+    results = []
+    def OnTestEvent(result):
+      results.append(result)
+
+    inspector.RegisterDomain('Test', OnTestEvent)
+    fake_socket.AddResponse('{"method": "Test.foo"}', 11)
+    with self.assertRaises(
+        websocket.WebSocketTimeoutException):
+      inspector.DispatchNotifications(timeout=10)
+    self.assertEqual(0, len(results))
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUnregisterDomain(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+
+    results = []
+    def OnTestEvent(result):
+      results.append(result)
+
+    inspector.RegisterDomain('Test', OnTestEvent)
+    inspector.RegisterDomain('Test2', OnTestEvent)
+    inspector.UnregisterDomain('Test')
+
+    fake_socket.AddResponse('{"method": "Test.foo"}', 5)
+    fake_socket.AddResponse('{"method": "Test2.foo"}', 10)
+
+    inspector.DispatchNotifications()
+    self.assertEqual(0, len(results))
+
+    inspector.DispatchNotifications()
+    self.assertEqual(1, len(results))
+    self.assertEqual('Test2.foo', results[0]['method'])
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUnregisterDomainWithUnregisteredDomain(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    with self.assertRaises(AssertionError):
+      inspector.UnregisterDomain('Test')
+
+  def testAsyncRequest(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+    response_count = [0]
+
+    def callback0(response):
+      response_count[0] += 1
+      self.assertEqual(2, response_count[0])
+      self.assertEqual('response1', response['result']['data'])
+
+    def callback1(response):
+      response_count[0] += 1
+      self.assertEqual(1, response_count[0])
+      self.assertEqual('response2', response['result']['data'])
+
+    request1 = {'method': 'Test.foo'}
+    inspector.AsyncRequest(request1, callback0)
+    request2 = {'method': 'Test.foo'}
+    inspector.AsyncRequest(request2, callback1)
+    fake_socket.AddResponse('{"id": 5555555, "result": {}}', 1)
+    inspector.DispatchNotifications()
+    self.assertEqual(0, response_count[0])
+    fake_socket.AddResponse(
+        '{"id": %d, "result": {"data": "response2"}}' % request2['id'], 1)
+    fake_socket.AddResponse(
+        '{"id": %d, "result": {"data": "response1"}}' % request1['id'], 2)
+    inspector.DispatchNotifications()
+    inspector.DispatchNotifications()
+    self.assertEqual(2, response_count[0])
+    fake_socket.AddResponse('{"id": 6666666, "result": {}}', 1)
+    inspector.DispatchNotifications()
+    self.assertEqual(2, response_count[0])
+
+  def testEAGAIN(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+
+    error = socket.error(errno.EAGAIN, "error string")
+    fake_socket.AddResponse(error, 4)
+    fake_socket.AddResponse('{"asdf": "qwer"}', 5)
+
+    result = inspector._Receive()
+    self.assertEqual(result, {"asdf" : "qwer"})
+
+  def testSocketErrorOtherThanEAGAIN(self):
+    inspector = inspector_websocket.InspectorWebsocket()
+    fake_socket = FakeSocket(self._mock_timer)
+    # pylint: disable=protected-access
+    inspector._socket = fake_socket
+
+    error = socket.error(errno.EPIPE, "error string")
+    fake_socket.AddResponse(error, 4)
+
+    self.assertRaises(socket.error, inspector._Receive)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend.py
new file mode 100644
index 0000000..430fe73
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend.py
@@ -0,0 +1,95 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import socket
+import traceback
+
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import websocket
+
+
+class MemoryTimeoutException(Exception):
+  pass
+
+
+class MemoryUnrecoverableException(Exception):
+  pass
+
+
+class MemoryUnexpectedResponseException(Exception):
+  pass
+
+
+class MemoryBackend(object):
+
+  def __init__(self, inspector_socket):
+    self._inspector_websocket = inspector_socket
+
+  def SetMemoryPressureNotificationsSuppressed(self, suppressed, timeout=30):
+    """Enable/disable suppressing memory pressure notifications.
+
+    Args:
+      suppressed: If true, memory pressure notifications will be suppressed.
+      timeout: The timeout in seconds.
+
+    Raises:
+      MemoryTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      MemoryUnrecoverableException: If there is a websocket error.
+      MemoryUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    self._SendMemoryRequest('setPressureNotificationsSuppressed',
+                            {'suppressed': suppressed}, timeout)
+
+  def SimulateMemoryPressureNotification(self, pressure_level, timeout=30):
+    """Simulate a memory pressure notification.
+
+    Args:
+      pressure level: The memory pressure level of the notification ('moderate'
+          or 'critical').
+      timeout: The timeout in seconds.
+
+    Raises:
+      MemoryTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      MemoryUnrecoverableException: If there is a websocket error.
+      MemoryUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    self._SendMemoryRequest('simulatePressureNotification',
+                            {'level': pressure_level}, timeout)
+
+  def _SendMemoryRequest(self, command, params, timeout):
+    method = 'Memory.%s' % command
+    request = {
+      'method': method,
+      'params': params
+    }
+    try:
+      response = self._inspector_websocket.SyncRequest(request, timeout)
+    except websocket.WebSocketTimeoutException:
+      raise MemoryTimeoutException(
+          'Exception raised while sending a %s request:\n%s' %
+              (method, traceback.format_exc()))
+    except (socket.error, websocket.WebSocketException,
+            inspector_websocket.WebSocketDisconnected):
+      raise MemoryUnrecoverableException(
+          'Exception raised while sending a %s request:\n%s' %
+              (method, traceback.format_exc()))
+
+    if 'error' in response:
+      code = response['error']['code']
+      if code == inspector_websocket.InspectorWebsocket.METHOD_NOT_FOUND_CODE:
+        logging.warning(
+            '%s DevTools method not supported by the browser' % method)
+      else:
+        raise MemoryUnexpectedResponseException(
+            'Inspector returned unexpected response for %s:\n%s' %
+                (method, json.dumps(response, indent=2)))
+
+  def Close(self):
+    self._inspector_websocket = None
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend_unittest.py
new file mode 100644
index 0000000..5829d1d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/memory_backend_unittest.py
@@ -0,0 +1,152 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mock
+import unittest
+
+
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import memory_backend
+from telemetry.testing import fakes
+from telemetry.testing import simple_mock
+from telemetry.testing import tab_test_case
+
+
+class MemoryBackendTest(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    super(MemoryBackendTest, self).setUp()
+    if not self._browser.supports_overriding_memory_pressure_notifications:
+      self.skipTest('Browser does not support overriding memory pressure '
+                    'notification signals, skipping test.')
+
+  def testSetMemoryPressureNotificationsSuppressed(self):
+    def PerformCheck(suppressed):
+      # Check that the method sends the correct DevTools request.
+      with mock.patch.object(inspector_websocket.InspectorWebsocket,
+                             'SyncRequest') as mock_method:
+        self._browser.SetMemoryPressureNotificationsSuppressed(suppressed)
+        self.assertEqual(1, mock_method.call_count)
+        request = mock_method.call_args[0][0]
+        self.assertEqual('Memory.setPressureNotificationsSuppressed',
+                         request['method'])
+        self.assertEqual(suppressed, request['params']['suppressed'])
+
+      # Check that the request and the response from the browser are handled
+      # properly.
+      self._browser.SetMemoryPressureNotificationsSuppressed(suppressed)
+
+    PerformCheck(True)
+    PerformCheck(False)
+
+  def testSimulateMemoryPressureNotification(self):
+    def PerformCheck(pressure_level):
+      # Check that the method sends the correct DevTools request.
+      with mock.patch.object(inspector_websocket.InspectorWebsocket,
+                             'SyncRequest') as mock_method:
+        self._browser.SimulateMemoryPressureNotification(pressure_level)
+        self.assertEqual(1, mock_method.call_count)
+        request = mock_method.call_args[0][0]
+        self.assertEqual('Memory.simulatePressureNotification',
+                         request['method'])
+        self.assertEqual(pressure_level, request['params']['level'])
+
+      # Check that the request and the response from the browser are handled
+      # properly.
+      self._browser.SimulateMemoryPressureNotification(pressure_level)
+
+    PerformCheck('moderate')
+    PerformCheck('critical')
+
+
+class MemoryBackendUnitTest(unittest.TestCase):
+
+  def setUp(self):
+    self._mock_timer = simple_mock.MockTimer()
+    self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
+
+  def tearDown(self):
+    self._mock_timer.Restore()
+
+  def testSetMemoryPressureNotificationsSuppressedSuccess(self):
+    response_handler = mock.Mock(return_value={'result': {}})
+    self._inspector_socket.AddResponseHandler(
+        'Memory.setPressureNotificationsSuppressed', response_handler)
+    backend = memory_backend.MemoryBackend(self._inspector_socket)
+
+    backend.SetMemoryPressureNotificationsSuppressed(True)
+    self.assertEqual(1, response_handler.call_count)
+    self.assertTrue(response_handler.call_args[0][0]['params']['suppressed'])
+
+    backend.SetMemoryPressureNotificationsSuppressed(False)
+    self.assertEqual(2, response_handler.call_count)
+    self.assertFalse(response_handler.call_args[0][0]['params']['suppressed'])
+
+  def testSetMemoryPressureNotificationsSuppressedFailure(self):
+    response_handler = mock.Mock()
+    backend = memory_backend.MemoryBackend(self._inspector_socket)
+    self._inspector_socket.AddResponseHandler(
+        'Memory.setPressureNotificationsSuppressed', response_handler)
+
+    # If the DevTools method is missing, the backend should fail silently.
+    response_handler.return_value = {
+      'result': {},
+      'error': {
+        'code': -32601  # Method does not exist.
+      }
+    }
+    backend.SetMemoryPressureNotificationsSuppressed(True)
+    self.assertEqual(1, response_handler.call_count)
+
+    # All other errors should raise an exception.
+    response_handler.return_value = {
+      'result': {},
+      'error': {
+        'code': -32602  # Invalid method params.
+      }
+    }
+    self.assertRaises(memory_backend.MemoryUnexpectedResponseException,
+                      backend.SetMemoryPressureNotificationsSuppressed, True)
+
+  def testSimulateMemoryPressureNotificationSuccess(self):
+    response_handler = mock.Mock(return_value={'result': {}})
+    self._inspector_socket.AddResponseHandler(
+        'Memory.simulatePressureNotification', response_handler)
+    backend = memory_backend.MemoryBackend(self._inspector_socket)
+
+    backend.SimulateMemoryPressureNotification('critical')
+    self.assertEqual(1, response_handler.call_count)
+    self.assertEqual('critical',
+                     response_handler.call_args[0][0]['params']['level'])
+
+    backend.SimulateMemoryPressureNotification('moderate')
+    self.assertEqual(2, response_handler.call_count)
+    self.assertEqual('moderate',
+                     response_handler.call_args[0][0]['params']['level'])
+
+  def testSimulateMemoryPressureNotificationFailure(self):
+    response_handler = mock.Mock()
+    backend = memory_backend.MemoryBackend(self._inspector_socket)
+    self._inspector_socket.AddResponseHandler(
+        'Memory.simulatePressureNotification', response_handler)
+
+    # If the DevTools method is missing, the backend should fail silently.
+    response_handler.return_value = {
+      'result': {},
+      'error': {
+        'code': -32601  # Method does not exist.
+      }
+    }
+    backend.SimulateMemoryPressureNotification('critical')
+    self.assertEqual(1, response_handler.call_count)
+
+    # All other errors should raise an exception.
+    response_handler.return_value = {
+      'result': {},
+      'error': {
+        'code': -32602  # Invalid method params.
+      }
+    }
+    self.assertRaises(memory_backend.MemoryUnexpectedResponseException,
+                      backend.SimulateMemoryPressureNotification, 'critical')
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py
new file mode 100644
index 0000000..85a3cc9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py
@@ -0,0 +1,253 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import socket
+import time
+import traceback
+
+from telemetry import decorators
+from telemetry.internal.backends.chrome_inspector import inspector_websocket
+from telemetry.internal.backends.chrome_inspector import websocket
+from telemetry.timeline import trace_data as trace_data_module
+
+
+class TracingUnsupportedException(Exception):
+  pass
+
+
+class TracingTimeoutException(Exception):
+  pass
+
+
+class TracingUnrecoverableException(Exception):
+  pass
+
+
+class TracingHasNotRunException(Exception):
+  pass
+
+
+class TracingUnexpectedResponseException(Exception):
+  pass
+
+
+class _DevToolsStreamReader(object):
+  def __init__(self, inspector_socket, stream_handle):
+    self._inspector_websocket = inspector_socket
+    self._handle = stream_handle
+    self._callback = None
+    self._data = None
+
+  def Read(self, callback):
+    # Do not allow the instance of this class to be reused, as
+    # we only read data sequentially at the moment, so a stream
+    # can only be read once.
+    assert not self._callback
+    self._data = []
+    self._callback = callback
+    self._ReadChunkFromStream()
+    # The below is not a typo -- queue one extra read ahead to avoid latency.
+    self._ReadChunkFromStream()
+
+  def _ReadChunkFromStream(self):
+    # Limit max block size to avoid fragmenting memory in sock.recv(),
+    # (see https://github.com/liris/websocket-client/issues/163 for details)
+    req = {'method': 'IO.read', 'params': {
+        'handle': self._handle, 'size': 32768}}
+    self._inspector_websocket.AsyncRequest(req, self._GotChunkFromStream)
+
+  def _GotChunkFromStream(self, response):
+    # Quietly discard responses from reads queued ahead after EOF.
+    if self._data is None:
+      return
+    if 'error' in response:
+      raise TracingUnrecoverableException(
+          'Reading trace failed: %s' % response['error']['message'])
+    result = response['result']
+    self._data.append(result['data'])
+    if not result.get('eof', False):
+      self._ReadChunkFromStream()
+      return
+    req = {'method': 'IO.close', 'params': {'handle': self._handle}}
+    self._inspector_websocket.SendAndIgnoreResponse(req)
+    trace_string = ''.join(self._data)
+    self._data = None
+    self._callback(trace_string)
+
+
+class TracingBackend(object):
+
+  _TRACING_DOMAIN = 'Tracing'
+
+  def __init__(self, inspector_socket, is_tracing_running=False):
+    self._inspector_websocket = inspector_socket
+    self._inspector_websocket.RegisterDomain(
+        self._TRACING_DOMAIN, self._NotificationHandler)
+    self._trace_events = []
+    self._is_tracing_running = is_tracing_running
+    self._has_received_all_tracing_data = False
+
+  @property
+  def is_tracing_running(self):
+    return self._is_tracing_running
+
+  def StartTracing(self, trace_options, custom_categories=None, timeout=10):
+    """When first called, starts tracing, and returns True.
+
+    If called during tracing, tracing is unchanged, and it returns False.
+    """
+    if self.is_tracing_running:
+      return False
+    # Reset collected tracing data from previous tracing calls.
+    self._trace_events = []
+
+    if not self.IsTracingSupported():
+      raise TracingUnsupportedException(
+          'Chrome tracing not supported for this app.')
+
+    req = {
+      'method': 'Tracing.start',
+      'params': {
+        'options': trace_options.GetTraceOptionsStringForChromeDevtool(),
+        'transferMode': 'ReturnAsStream'
+      }
+    }
+    if custom_categories:
+      req['params']['categories'] = custom_categories
+    logging.info('Start Tracing Request: %s', repr(req))
+    response = self._inspector_websocket.SyncRequest(req, timeout)
+
+    if 'error' in response:
+      raise TracingUnexpectedResponseException(
+          'Inspector returned unexpected response for '
+          'Tracing.start:\n' + json.dumps(response, indent=2))
+
+    self._is_tracing_running = True
+    return True
+
+  def StopTracing(self, trace_data_builder, timeout=30):
+    """Stops tracing and pushes results to the supplied TraceDataBuilder.
+
+    If this is called after tracing has been stopped, trace data from the last
+    tracing run is pushed.
+    """
+    if not self.is_tracing_running:
+      if not self._trace_events:
+        raise TracingHasNotRunException()
+    else:
+      req = {'method': 'Tracing.end'}
+      self._inspector_websocket.SendAndIgnoreResponse(req)
+      # After Tracing.end, chrome browser will send asynchronous notifications
+      # containing trace data. This is until Tracing.tracingComplete is sent,
+      # which means there is no trace buffers pending flush.
+      self._CollectTracingData(timeout)
+    self._is_tracing_running = False
+    trace_data_builder.AddEventsTo(
+      trace_data_module.CHROME_TRACE_PART, self._trace_events)
+
+  def DumpMemory(self, timeout=30):
+    """Dumps memory.
+
+    Returns:
+      GUID of the generated dump if successful, None otherwise.
+
+    Raises:
+      TracingTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      TracingUnrecoverableException: If there is a websocket error.
+      TracingUnexpectedResponseException: If the response contains an error
+      or does not contain the expected result.
+    """
+    request = {
+      'method': 'Tracing.requestMemoryDump'
+    }
+    try:
+      response = self._inspector_websocket.SyncRequest(request, timeout)
+    except websocket.WebSocketTimeoutException:
+      raise TracingTimeoutException(
+          'Exception raised while sending a Tracing.requestMemoryDump '
+          'request:\n' + traceback.format_exc())
+    except (socket.error, websocket.WebSocketException,
+            inspector_websocket.WebSocketDisconnected):
+      raise TracingUnrecoverableException(
+          'Exception raised while sending a Tracing.requestMemoryDump '
+          'request:\n' + traceback.format_exc())
+
+
+    if ('error' in response or
+        'result' not in response or
+        'success' not in response['result'] or
+        'dumpGuid' not in response['result']):
+      raise TracingUnexpectedResponseException(
+          'Inspector returned unexpected response for '
+          'Tracing.requestMemoryDump:\n' + json.dumps(response, indent=2))
+
+    result = response['result']
+    return result['dumpGuid'] if result['success'] else None
+
+  def _CollectTracingData(self, timeout):
+    """Collects tracing data. Assumes that Tracing.end has already been sent.
+
+    Args:
+      timeout: The timeout in seconds.
+
+    Raises:
+      TracingTimeoutException: If more than |timeout| seconds has passed
+      since the last time any data is received.
+      TracingUnrecoverableException: If there is a websocket error.
+    """
+    self._has_received_all_tracing_data = False
+    start_time = time.time()
+    while True:
+      try:
+        self._inspector_websocket.DispatchNotifications(timeout)
+        start_time = time.time()
+      except websocket.WebSocketTimeoutException:
+        pass
+      except (socket.error, websocket.WebSocketException):
+        raise TracingUnrecoverableException(
+            'Exception raised while collecting tracing data:\n' +
+                traceback.format_exc())
+
+      if self._has_received_all_tracing_data:
+        break
+
+      elapsed_time = time.time() - start_time
+      if elapsed_time > timeout:
+        raise TracingTimeoutException(
+            'Only received partial trace data due to timeout after %s seconds. '
+            'If the trace data is big, you may want to increase the timeout '
+            'amount.' % elapsed_time)
+
+  def _NotificationHandler(self, res):
+    if 'Tracing.dataCollected' == res.get('method'):
+      value = res.get('params', {}).get('value')
+      self._trace_events.extend(value)
+    elif 'Tracing.tracingComplete' == res.get('method'):
+      stream_handle = res.get('params', {}).get('stream')
+      if not stream_handle:
+        self._has_received_all_tracing_data = True
+        return
+
+      if self._trace_events:
+        raise TracingUnexpectedResponseException(
+            'Got both dataCollected events and a stream from server')
+      reader = _DevToolsStreamReader(self._inspector_websocket, stream_handle)
+      reader.Read(self._ReceivedAllTraceDataFromStream)
+
+  def _ReceivedAllTraceDataFromStream(self, data):
+    self._trace_events = json.loads(data)
+    self._has_received_all_tracing_data = True
+
+  def Close(self):
+    self._inspector_websocket.UnregisterDomain(self._TRACING_DOMAIN)
+    self._inspector_websocket = None
+
+  @decorators.Cache
+  def IsTracingSupported(self):
+    req = {'method': 'Tracing.hasCompleted'}
+    res = self._inspector_websocket.SyncRequest(req)
+    return not res.get('response')
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
new file mode 100644
index 0000000..79708c4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
@@ -0,0 +1,222 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import timeit
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.backends.chrome_inspector import tracing_backend
+from telemetry.internal.backends.chrome_inspector.tracing_backend import _DevToolsStreamReader
+from telemetry.testing import fakes
+from telemetry.testing import simple_mock
+from telemetry.testing import tab_test_case
+from telemetry.timeline import model as model_module
+from telemetry.timeline import tracing_config
+
+
+class TracingBackendTest(tab_test_case.TabTestCase):
+
+  # Number of consecutively requested memory dumps.
+  _REQUESTED_DUMP_COUNT = 3
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.AppendExtraBrowserArgs([
+        # Memory maps currently cannot be retrieved on sandboxed processes.
+        # See crbug.com/461788.
+        '--no-sandbox',
+
+        # Workaround to disable periodic memory dumps. See crbug.com/513692.
+        '--enable-memory-benchmarking'
+    ])
+
+  def setUp(self):
+    super(TracingBackendTest, self).setUp()
+    self._tracing_controller = self._browser.platform.tracing_controller
+    if not self._tracing_controller.IsChromeTracingSupported():
+      self.skipTest('Browser does not support tracing, skipping test.')
+    if not self._browser.supports_memory_dumping:
+      self.skipTest('Browser does not support memory dumping, skipping test.')
+
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testDumpMemorySuccess(self):
+    # Check that dumping memory before tracing starts raises an exception.
+    self.assertRaises(Exception, self._browser.DumpMemory)
+
+    # Start tracing with memory dumps enabled.
+    config = tracing_config.TracingConfig()
+    config.tracing_category_filter.AddDisabledByDefault(
+        'disabled-by-default-memory-infra')
+    config.enable_chrome_trace = True
+    self._tracing_controller.StartTracing(config)
+
+    # Request several memory dumps in a row and test that they were all
+    # successfully created with unique IDs.
+    expected_dump_ids = []
+    for _ in xrange(self._REQUESTED_DUMP_COUNT):
+      dump_id = self._browser.DumpMemory()
+      self.assertIsNotNone(dump_id)
+      self.assertNotIn(dump_id, expected_dump_ids)
+      expected_dump_ids.append(dump_id)
+
+    trace_data = self._tracing_controller.StopTracing()
+
+    # Check that dumping memory after tracing stopped raises an exception.
+    self.assertRaises(Exception, self._browser.DumpMemory)
+
+    # Test that trace data is parsable.
+    model = model_module.TimelineModel(trace_data)
+    self.assertGreater(len(model.processes), 0)
+
+    # Test that the resulting model contains the requested memory dumps in the
+    # correct order (and nothing more).
+    actual_dump_ids = [d.dump_id for d in model.IterGlobalMemoryDumps()]
+    self.assertEqual(actual_dump_ids, expected_dump_ids)
+
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testDumpMemoryFailure(self):
+    # Check that dumping memory before tracing starts raises an exception.
+    self.assertRaises(Exception, self._browser.DumpMemory)
+
+    # Start tracing with memory dumps disabled.
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    self._tracing_controller.StartTracing(config)
+
+    # Check that the method returns None if the dump was not successful.
+    self.assertIsNone(self._browser.DumpMemory())
+
+    trace_data = self._tracing_controller.StopTracing()
+
+    # Check that dumping memory after tracing stopped raises an exception.
+    self.assertRaises(Exception, self._browser.DumpMemory)
+
+    # Test that trace data is parsable.
+    model = model_module.TimelineModel(trace_data)
+    self.assertGreater(len(model.processes), 0)
+
+    # Test that the resulting model contains no memory dumps.
+    self.assertEqual(len(list(model.IterGlobalMemoryDumps())), 0)
+
+
+class TracingBackendUnitTest(unittest.TestCase):
+
+  def setUp(self):
+    self._mock_timer = simple_mock.MockTimer(tracing_backend)
+    self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
+
+  def tearDown(self):
+    self._mock_timer.Restore()
+
+  def testCollectTracingDataTimeout(self):
+    self._inspector_socket.AddEvent(
+        'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
+    self._inspector_socket.AddEvent(
+        'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 19)
+    self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 35)
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+
+    # The third response is 16 seconds after the second response, so we expect
+    # a TracingTimeoutException.
+    with self.assertRaises(tracing_backend.TracingTimeoutException):
+      backend._CollectTracingData(10)
+    self.assertEqual(2, len(backend._trace_events))
+    self.assertFalse(backend._has_received_all_tracing_data)
+
+  def testCollectTracingDataNoTimeout(self):
+    self._inspector_socket.AddEvent(
+        'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
+    self._inspector_socket.AddEvent(
+        'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 14)
+    self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 19)
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+
+    backend._CollectTracingData(10)
+    self.assertEqual(2, len(backend._trace_events))
+    self.assertTrue(backend._has_received_all_tracing_data)
+
+  def testCollectTracingDataFromStream(self):
+    self._inspector_socket.AddEvent(
+        'Tracing.tracingComplete', {'stream': '42'}, 1)
+    self._inspector_socket.AddAsyncResponse(
+        'IO.read', {'data': '[{},{},{'}, 2)
+    self._inspector_socket.AddAsyncResponse(
+        'IO.read', {'data': '},{},{}]', 'eof': True}, 3)
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+
+    backend._CollectTracingData(10)
+    self.assertEqual(5, len(backend._trace_events))
+    self.assertTrue(backend._has_received_all_tracing_data)
+
+  def testDumpMemorySuccess(self):
+    self._inspector_socket.AddResponseHandler(
+        'Tracing.requestMemoryDump',
+        lambda req: {'result': {'success': True, 'dumpGuid': '42abc'}})
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+
+    self.assertEqual(backend.DumpMemory(), '42abc')
+
+  def testDumpMemoryFailure(self):
+    self._inspector_socket.AddResponseHandler(
+        'Tracing.requestMemoryDump',
+        lambda req: {'result': {'success': False, 'dumpGuid': '42abc'}})
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+
+    self.assertIsNone(backend.DumpMemory())
+
+  def testStartTracingFailure(self):
+    self._inspector_socket.AddResponseHandler(
+        'Tracing.start',
+        lambda req: {'error': {'message': 'Tracing is already started'}})
+    self._inspector_socket.AddResponseHandler(
+        'Tracing.hasCompleted', lambda req: {})
+    backend = tracing_backend.TracingBackend(self._inspector_socket)
+    self.assertRaisesRegexp(
+        tracing_backend.TracingUnexpectedResponseException,
+        'Tracing is already started',
+        backend.StartTracing, tracing_config.TracingConfig())
+
+
+class DevToolsStreamPerformanceTest(unittest.TestCase):
+  def setUp(self):
+    self._mock_timer = simple_mock.MockTimer(tracing_backend)
+    self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
+
+  def _MeasureReadTime(self, count):
+    mock_time = self._mock_timer.time() + 1
+    payload = ','.join(['{}'] * 5000)
+    self._inspector_socket.AddAsyncResponse('IO.read', {'data': '[' + payload},
+                                            mock_time)
+    startClock = timeit.default_timer()
+
+    done = {'done': False}
+    def mark_done(data):
+      del data  # unused
+      done['done'] = True
+
+    reader = _DevToolsStreamReader(self._inspector_socket, 'dummy')
+    reader.Read(mark_done)
+    while not done['done']:
+      mock_time += 1
+      if count > 0:
+        self._inspector_socket.AddAsyncResponse('IO.read', {'data': payload},
+            mock_time)
+      elif count == 0:
+        self._inspector_socket.AddAsyncResponse('IO.read',
+            {'data': payload + ']', 'eof': True}, mock_time)
+      count -= 1
+      self._inspector_socket.DispatchNotifications(10)
+    return timeit.default_timer() - startClock
+
+  def testReadTime(self):
+    n1 = 1000
+    while True:
+      t1 = self._MeasureReadTime(n1)
+      if t1 > 0.01:
+        break
+      n1 *= 5
+    t2 = self._MeasureReadTime(n1 * 10)
+    # Time is an illusion, CPU time is doubly so, allow great deal of tolerance.
+    toleranceFactor = 5
+    self.assertLess(t2, t1 * 10 * toleranceFactor)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket.py
new file mode 100644
index 0000000..2195cb6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket.py
@@ -0,0 +1,25 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import absolute_import
+
+import socket
+
+# pylint: disable=unused-import
+from websocket import create_connection as _create_connection
+from websocket import WebSocketConnectionClosedException
+from websocket import WebSocketException
+from websocket import WebSocketTimeoutException
+
+
+def create_connection(*args, **kwargs):
+  sockopt = kwargs.get('sockopt', [])
+
+  # By default, we set SO_REUSEADDR on all websockets used by Telemetry.
+  # This prevents spurious address in use errors on Windows.
+  #
+  # TODO(tonyg): We may want to set SO_NODELAY here as well.
+  sockopt.append((socket.SOL_SOCKET, socket.SO_REUSEADDR, 1))
+
+  kwargs['sockopt'] = sockopt
+  return _create_connection(*args, **kwargs)
diff --git a/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket_unittest.py b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket_unittest.py
new file mode 100644
index 0000000..426043e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/chrome_inspector/websocket_unittest.py
@@ -0,0 +1,61 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import base64
+import BaseHTTPServer
+import hashlib
+import socket
+import threading
+import unittest
+
+from telemetry.internal.backends.chrome_inspector import websocket
+
+
+# Minimal handler for a local websocket server.
+class _FakeWebSocketHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+  def do_GET(self):
+    key = self.headers.getheader('Sec-WebSocket-Key')
+
+    value = key + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+    hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
+
+    self.send_response(101)
+
+    self.send_header('Sec-Websocket-Accept', hashed)
+    self.send_header('upgrade', 'websocket')
+    self.send_header('connection', 'upgrade')
+    self.end_headers()
+
+    self.wfile.flush()
+
+
+class TestWebSocket(unittest.TestCase):
+  def testExports(self):
+    self.assertNotEqual(websocket.create_connection, None)
+    self.assertNotEqual(websocket.WebSocketException, None)
+    self.assertNotEqual(websocket.WebSocketTimeoutException, None)
+
+  def testSockOpts(self):
+    httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), _FakeWebSocketHandler)
+    ws_url = 'ws://127.0.0.1:%d' % httpd.server_port
+
+    threading.Thread(target=httpd.handle_request).start()
+    ws = websocket.create_connection(ws_url)
+    try:
+      self.assertNotEquals(
+          ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
+    finally:
+      ws.close()
+
+    threading.Thread(target=httpd.handle_request).start()
+    ws = websocket.create_connection(
+        ws_url,
+        sockopt=[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)])
+    try:
+      self.assertNotEquals(
+          ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
+      self.assertNotEquals(
+          ws.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
+    finally:
+      ws.close()
diff --git a/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend.py b/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend.py
new file mode 100644
index 0000000..6cd1a57
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends import form_based_credentials_backend
+
+
+class CodePenCredentialsBackend(
+    form_based_credentials_backend.FormBasedCredentialsBackend):
+
+  @property
+  def logged_in_javascript(self):
+    """Evaluates to true iff already logged in."""
+    return 'document.querySelector(".login-area") === null'
+
+  @property
+  def credentials_type(self):
+    return 'codepen'
+
+  @property
+  def url(self):
+    return 'https://codepen.io/login'
+
+  @property
+  def login_form_id(self):
+    return 'login-login-form'
+
+  @property
+  def login_button_javascript(self):
+    return """
+        LoginSettings.timeOnPageStartTime = 0;
+        document.getElementById("log-in-button").click();
+        """
+
+  @property
+  def login_input_id(self):
+    return 'login-email-field'
+
+  @property
+  def password_input_id(self):
+    return 'login-password-field_'
diff --git a/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend_unittest.py
new file mode 100644
index 0000000..2dc8ce7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/codepen_credentials_backend_unittest.py
@@ -0,0 +1,19 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.internal.backends import codepen_credentials_backend
+from telemetry.internal.backends \
+      import form_based_credentials_backend_unittest_base
+
+
+class TestCodePenCredentialsBackend(
+    form_based_credentials_backend_unittest_base.
+    FormBasedCredentialsBackendUnitTestBase):
+  def setUp(self):
+    self._credentials_type = 'codepen'
+
+  def testLoginUsingMock(self):
+    backend = codepen_credentials_backend.CodePenCredentialsBackend()
+    self._LoginUsingMock(backend, backend.url, backend.login_input_id,
+                         backend.password_input_id, backend.login_form_id,
+                         backend.logged_in_javascript)
diff --git a/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend.py b/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend.py
new file mode 100644
index 0000000..b1a25dc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends import form_based_credentials_backend
+
+
+class FacebookCredentialsBackend(
+    form_based_credentials_backend.FormBasedCredentialsBackend):
+
+  @property
+  def logged_in_javascript(self):
+    """Evaluates to true iff already logged in."""
+    return ('document.getElementById("fbNotificationsList")!== null || '
+            'document.getElementById("m_home_notice")!== null')
+
+  @property
+  def credentials_type(self):
+    return 'facebook'
+
+  @property
+  def url(self):
+    return 'http://www.facebook.com/'
+
+  @property
+  def login_form_id(self):
+    return 'login_form'
+
+  @property
+  def login_input_id(self):
+    return 'email'
+
+  @property
+  def password_input_id(self):
+    return 'pass'
+
+class FacebookCredentialsBackend2(FacebookCredentialsBackend):
+  """ Facebook credential backend for https client. """
+
+  @property
+  def credentials_type(self):
+    return 'facebook2'
+
+  @property
+  def url(self):
+    return 'https://www.facebook.com/'
diff --git a/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend_unittest.py
new file mode 100644
index 0000000..bbcd34a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/facebook_credentials_backend_unittest.py
@@ -0,0 +1,19 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.internal.backends import facebook_credentials_backend
+from telemetry.internal.backends \
+       import form_based_credentials_backend_unittest_base
+
+
+class TestFacebookCredentialsBackend(
+    form_based_credentials_backend_unittest_base.
+    FormBasedCredentialsBackendUnitTestBase):
+  def setUp(self):
+    self._credentials_type = 'facebook'
+
+  def testLoginUsingMock(self):
+    backend = facebook_credentials_backend.FacebookCredentialsBackend()
+    self._LoginUsingMock(backend, backend.url, backend.login_input_id,
+                         backend.password_input_id, backend.login_form_id,
+                         backend.logged_in_javascript)
diff --git a/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend.py b/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend.py
new file mode 100644
index 0000000..63c2f9b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend.py
@@ -0,0 +1,119 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+
+from telemetry.core import exceptions
+
+
+class FormBasedCredentialsBackend(object):
+  def __init__(self):
+    self._logged_in = False
+
+  def IsAlreadyLoggedIn(self, tab):
+    return tab.EvaluateJavaScript(self.logged_in_javascript)
+
+  @property
+  def credentials_type(self):
+    raise NotImplementedError()
+
+  @property
+  def url(self):
+    raise NotImplementedError()
+
+  @property
+  def login_form_id(self):
+    raise NotImplementedError()
+
+  @property
+  def login_button_javascript(self):
+    """Some sites have custom JS to log in."""
+    return None
+
+  @property
+  def login_input_id(self):
+    raise NotImplementedError()
+
+  @property
+  def password_input_id(self):
+    raise NotImplementedError()
+
+  @property
+  def logged_in_javascript(self):
+    """Evaluates to true iff already logged in."""
+    raise NotImplementedError()
+
+  def IsLoggedIn(self):
+    return self._logged_in
+
+  def _ResetLoggedInState(self):
+    """Makes the backend think we're not logged in even though we are.
+    Should only be used in unit tests to simulate --dont-override-profile.
+    """
+    self._logged_in = False
+
+  def _WaitForLoginState(self, action_runner):
+    """Waits until it can detect either the login form, or already logged in."""
+    condition = '(document.querySelector("#%s") !== null) || (%s)' % (
+        self.login_form_id, self.logged_in_javascript)
+    action_runner.WaitForJavaScriptCondition(condition, 60)
+
+  def _SubmitLoginFormAndWait(self, action_runner, tab, username, password):
+    """Submits the login form and waits for the navigation."""
+    tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+    email_id = 'document.querySelector("#%s #%s").value = "%s"; ' % (
+        self.login_form_id, self.login_input_id, username)
+    password = 'document.querySelector("#%s #%s").value = "%s"; ' % (
+        self.login_form_id, self.password_input_id, password)
+    tab.ExecuteJavaScript(email_id)
+    tab.ExecuteJavaScript(password)
+    if self.login_button_javascript:
+      tab.ExecuteJavaScript(self.login_button_javascript)
+    else:
+      tab.ExecuteJavaScript(
+          'document.getElementById("%s").submit();' % self.login_form_id)
+    # Wait for the form element to disappear as confirmation of the navigation.
+    action_runner.WaitForNavigate()
+
+
+  def LoginNeeded(self, tab, action_runner, config):
+    """Logs in to a test account.
+
+    Raises:
+      RuntimeError: if could not get credential information.
+    """
+    if self._logged_in:
+      return True
+
+    if 'username' not in config or 'password' not in config:
+      message = ('Credentials for "%s" must include username and password.' %
+                 self.credentials_type)
+      raise RuntimeError(message)
+
+    logging.debug('Logging into %s account...' % self.credentials_type)
+
+    if 'url' in config:
+      url = config['url']
+    else:
+      url = self.url
+
+    try:
+      logging.info('Loading %s...', url)
+      tab.Navigate(url)
+      self._WaitForLoginState(action_runner)
+
+      if self.IsAlreadyLoggedIn(tab):
+        self._logged_in = True
+        return True
+
+      self._SubmitLoginFormAndWait(
+          action_runner, tab, config['username'], config['password'])
+
+      self._logged_in = True
+      return True
+    except exceptions.TimeoutException:
+      logging.warning('Timed out while loading: %s', url)
+      return False
+
+  def LoginNoLongerNeeded(self, tab): # pylint: disable=unused-argument
+    assert self._logged_in
diff --git a/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend_unittest_base.py b/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend_unittest_base.py
new file mode 100644
index 0000000..8752c06
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/form_based_credentials_backend_unittest_base.py
@@ -0,0 +1,54 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.testing import simple_mock
+
+_ = simple_mock.DONT_CARE
+
+
+class FormBasedCredentialsBackendUnitTestBase(unittest.TestCase):
+  def setUp(self):
+    self._credentials_type = None
+
+  def testLoginUsingMock(self):
+    raise NotImplementedError()
+
+  def _LoginUsingMock(self, backend, login_page_url, email_element_id,
+                      password_element_id, form_element_id,
+                      already_logged_in_js): # pylint: disable=no-self-use
+    tab = simple_mock.MockObject()
+    ar = simple_mock.MockObject()
+
+    config = {'username': 'blah',
+              'password': 'blargh'}
+
+    tab.ExpectCall('Navigate', login_page_url)
+    tab.ExpectCall('EvaluateJavaScript', already_logged_in_js).WillReturn(False)
+    tab.ExpectCall('WaitForDocumentReadyStateToBeInteractiveOrBetter')
+
+    ar.ExpectCall('WaitForJavaScriptCondition',
+                  '(document.querySelector("#%s") !== null) || (%s)' % (
+                      form_element_id, already_logged_in_js), 60)
+    ar.ExpectCall('WaitForNavigate')
+
+    def VerifyEmail(js):
+      assert email_element_id in js
+      assert 'blah' in js
+    tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyEmail)
+
+    def VerifyPw(js):
+      assert password_element_id in js
+      assert 'largh' in js
+    tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyPw)
+
+    def VerifySubmit(js):
+      assert '.submit' in js or '.click' in js
+    tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifySubmit)
+
+    # Checking for form still up.
+    tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
+
+    backend.LoginNeeded(tab, ar, config)
diff --git a/catapult/telemetry/telemetry/internal/backends/google_credentials_backend.py b/catapult/telemetry/telemetry/internal/backends/google_credentials_backend.py
new file mode 100644
index 0000000..0c43182
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/google_credentials_backend.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.backends import form_based_credentials_backend
+
+
+class GoogleCredentialsBackend(
+    form_based_credentials_backend.FormBasedCredentialsBackend):
+
+  @property
+  def logged_in_javascript(self):
+    """Evaluates to true iff already logged in."""
+    return 'document.getElementById("gb")!== null'
+
+  @property
+  def credentials_type(self):
+    return 'google'
+
+  @property
+  def url(self):
+    # pylint: disable=line-too-long
+    # WPR doesn't support having 2 responses for the same URL (with/without
+    # session cookie), so after login behaviour differs with/without wpr.
+    # Sign-in URL is specified directly to overcome this.
+    return 'https://accounts.google.com/ServiceLogin?continue=https%3A%2F%2Faccounts.google.com%2FManageAccount'
+
+  @property
+  def login_form_id(self):
+    return 'gaia_loginform'
+
+  @property
+  def login_input_id(self):
+    return 'Email'
+
+  @property
+  def password_input_id(self):
+    return 'Passwd'
+
+
+class GoogleCredentialsBackend2(GoogleCredentialsBackend):
+  """ Google credential backend for google2 credential. """
+
+  @property
+  def credentials_type(self):
+    return 'google2'
diff --git a/catapult/telemetry/telemetry/internal/backends/google_credentials_backend_unittest.py b/catapult/telemetry/telemetry/internal/backends/google_credentials_backend_unittest.py
new file mode 100644
index 0000000..b30c051
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/backends/google_credentials_backend_unittest.py
@@ -0,0 +1,19 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.internal.backends import\
+       form_based_credentials_backend_unittest_base
+from telemetry.internal.backends import google_credentials_backend
+
+
+class TestGoogleCredentialsBackend(
+    form_based_credentials_backend_unittest_base.
+    FormBasedCredentialsBackendUnitTestBase):
+  def setUp(self):
+    self._credentials_type = 'google'
+
+  def testLoginUsingMock(self):
+    backend = google_credentials_backend.GoogleCredentialsBackend()
+    self._LoginUsingMock(backend, backend.url, backend.login_input_id,
+                         backend.password_input_id, backend.login_form_id,
+                         backend.logged_in_javascript)
diff --git a/catapult/telemetry/telemetry/internal/bin/README.md b/catapult/telemetry/telemetry/internal/bin/README.md
new file mode 100644
index 0000000..eb42425
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/bin/README.md
@@ -0,0 +1,8 @@
+<!-- Copyright 2015 The Chromium Authors. All rights reserved.
+     Use of this source code is governed by a BSD-style license that can be
+     found in the LICENSE file.
+-->
+Do not check files into this folder.
+____________________________________
+Files are downloaded to this folder from cloud storage by the BinaryManager,
+and any local files may be overwritten without warning.
diff --git a/catapult/telemetry/telemetry/internal/binary_dependencies.json b/catapult/telemetry/telemetry/internal/binary_dependencies.json
new file mode 100644
index 0000000..1a1c9ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/binary_dependencies.json
@@ -0,0 +1,329 @@
+{
+  "config_type": "BaseConfig",
+  "dependencies": {
+    "avconv": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "03896ec9bdc9f1d6fa388f893d8f62f454e7e707",
+          "download_path": "bin/linux/x86_64/avconv"
+        }
+      }
+    },
+    "bitmaptools": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "661ce936b3276f7ec3d687ab62be05b96d796f21",
+          "download_path": "bin/linux/x86_64/bitmaptools"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "c7b1bfc6399dc683058e88dac1ef0f877edea74b",
+          "download_path": "bin/mac/x86_64/bitmaptools"
+        },
+        "win_AMD64": {
+          "cloud_storage_hash": "ac4fee89a51662b9d920bce443c19b9b2929b198",
+          "download_path": "bin/win/AMD64/bitmaptools.exe"
+        },
+        "win_x86": {
+          "cloud_storage_hash": "ac4fee89a51662b9d920bce443c19b9b2929b198",
+          "download_path": "bin/win/x86/bitmaptools.exe"
+        }
+      }
+    },
+    "clear_system_cache": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "20739db88864685f6a0be66879e34a83813352cc",
+          "download_path": "bin/linux/x86_64/clear_system_cache"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "0f0ef42a9223592fa234ee5d248f6216fee2b677",
+          "download_path": "bin/mac/x86_64/clear_system_cache"
+        },
+        "win_AMD64": {
+          "cloud_storage_hash": "afe4fc71151f3aa176bc7137f0f6c9396bc18f3b",
+          "download_path": "bin/win/AMD64/clear_system_cache.exe"
+        }
+      }
+    },
+    "crash_service": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "win_AMD64": {
+          "cloud_storage_hash": "4a0961e972895f4af3b7cfab959c5bfd4de7174b",
+          "download_path": "bin/win/AMD64/crash_service.exe"
+        }
+      }
+    },
+    "determine_if_keychain_entry_is_decryptable": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "mac_x86_64": {
+          "cloud_storage_hash": "5daabb8e5d485a99efc9139634a8242fa60a25e7",
+          "download_path": "bin/mac/x86_64/determine_if_keychain_entry_is_decryptable"
+        }
+      }
+    },
+    "determine_if_keychain_is_locked": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "mac_x86_64": {
+          "cloud_storage_hash": "13a57efae9a680ac0f160b3567e02e81f4ac493c",
+          "download_path": "bin/mac/x86_64/determine_if_keychain_is_locked"
+        }
+      }
+    },
+    "device_forwarder": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "d10c0ddaa8586b20449e951216bee852fa0f8850",
+          "download_path": "bin/android/arm64-v8a/device_forwarder"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "66c3685e8f1ad29f2ed6cb5ea9188df7a42299c4",
+          "download_path": "bin/android/armeabi-v7a/device_forwarder"
+        }
+      }
+    },
+    "file_poller": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "fd5b417f78c7f7d9192a98967058709ded1d399d",
+          "download_path": "bin/android/arm64-v8a/file_poller"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "cf5c8fe920378ce30d057e76591d57f63fd31c1a",
+          "download_path": "bin/android/armeabi-v7a/file_poller"
+        }
+      }
+    },
+    "gdb": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm": {
+          "cloud_storage_hash": "9231584d135fb76bb7075d96ce387679de7ada7b",
+          "download_path": "bin/android/arm/arm-linux-androideabi-gdb"
+        },
+        "android_x64": {
+          "cloud_storage_hash": "09177be2fed00b44df0e777932828425440b23b3",
+          "download_path": "bin/android/x64/x86_64-linux-androideabi-gdb"
+        },
+        "android_x86": {
+          "cloud_storage_hash": "bcf02af039713a48b69b89bd7f0f9c81ed8183a4",
+          "download_path": "bin/android/x86/i686-linux-androideabi-gdb"
+        }
+      }
+    },
+    "host_forwarder": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "e2c2c955105c61ff5ab7e23d71019617894fb7ec",
+          "download_path": "bin/linux/x86_64/host_forwarder"
+        }
+      }
+    },
+    "hprof-conv": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "0b14eeee2e2a51cc94e361295379f69ee6f7cf8f",
+          "download_path": "../bin/linux/x86_64/hprof-conv"
+        }
+      }
+    },
+    "ipfw": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "0c96ed5a618083100cb2269e9c0400ef73708351",
+          "download_path": "bin/linux/x86_64/ipfw"
+        }
+      }
+    },
+    "ipfw_mod": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "91d3336cec855d4ed4aafadf7f9d496f3c09b07c",
+          "download_path": "bin/linux/x86_64/ipfw_mod.ko"
+        }
+      }
+    },
+    "ippet": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "win_AMD64": {
+          "cloud_storage_hash": "3022bd478f6a29b7a0f8335b9d8747c00e61977b",
+          "download_path": "bin/win/AMD64/ippet.zip"
+        }
+      }
+    },
+    "md5sum_bin": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "fca070d85c5c732bc6fb098fc061f53474148dc0",
+          "download_path": "bin/android/arm64-v8a/md5sum_bin"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "293c093c4487b2f1cdff6ed8c93b0214e45c4e7c",
+          "download_path": "bin/android/armeabi-v7a/md5sum_bin"
+        }
+      }
+    },
+    "md5sum_bin_host": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "1f5060df87c9332d423bf25a31ffcd4c6c0ef1bf",
+          "download_path": "bin/linux/x86_64/md5sum_bin_host"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "b068e3ba81a1fb7dbfe121edd2dac0c3942eb7e6",
+          "download_path": "bin/mac/x86_64/md5sum_bin_host"
+        }
+      }
+    },
+    "memtrack_helper": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "4f49c360f969128e12d98d4cf530717946c5ffcf",
+          "download_path": "bin/android/arm64-v8a/memtrack_helper"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "e96693a5ba39c47c6c8b9fd36694ac89616a780a",
+          "download_path": "bin/android/armeabi-v7a/memtrack_helper"
+        }
+      }
+    },
+    "minidump_dump": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "7e3711b77837f7851f0be5022ecf086f82225809",
+          "download_path": "bin/linux/x86_64/minidump_dump"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "c39bd7a3b9fa6279893b2d759045699d79ce4dcb",
+          "download_path": "bin/mac/x86_64/minidump_dump"
+        }
+      }
+    },
+    "minidump_stackwalk": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "a200a58176ababbdbc69021d4163954f60be7ab5",
+          "download_path": "bin/linux/x86_64/minidump_stackwalk"
+        },
+        "mac_x86_64": {
+          "cloud_storage_hash": "f6243ea3cc4da9a8ad17d16bfa4101e9a50c8ffc",
+          "download_path": "bin/mac/x86_64/minidump_stackwalk"
+        }
+      }
+    },
+    "perf": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "a43abae56791cf7de2b6f0fcbb0f06035b9e4b07",
+          "download_path": "bin/android/arm64-v8a/perf"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "c0173517d500728a8755eefcc1ed26ba31fce13b",
+          "download_path": "bin/android/armeabi-v7a/perf"
+        }
+      }
+    },
+    "perfhost": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "ddb9b3508fe3e8b056071fc7c3d069d13635d371",
+          "download_path": "bin/linux/x86_64/perfhost"
+        }
+      }
+    },
+    "perfhost_precise": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "ddb9b3508fe3e8b056071fc7c3d069d13635d371",
+          "download_path": "bin/linux/x86_64/perfhost_precise"
+        }
+      }
+    },
+    "perfhost_trusty": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "linux_x86_64": {
+          "cloud_storage_hash": "cd0980a3a0b119bc4764bbbac53429e1d9013e70",
+          "download_path": "bin/linux/x86_64/perfhost_trusty"
+        }
+      }
+    },
+    "purge_ashmem": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_arm64-v8a": {
+          "cloud_storage_hash": "fb1ef95e54208564418494b016ac525594370db8",
+          "download_path": "bin/android/arm64-v8a/purge_ashmem"
+        },
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "9c1a2aad6e92b1f407428a972c8acf036c039287",
+          "download_path": "bin/android/armeabi-v7a/purge_ashmem"
+        }
+      }
+    },
+    "tcpdump": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "android_armeabi-v7a": {
+          "cloud_storage_hash": "6cf2d56024208a08ceaf3a04ea98d5a362396fca",
+          "download_path": "bin/android/armeabi-v7a/tcpdump"
+        }
+      }
+    },
+    "winring0": {
+      "cloud_storage_base_folder": "binary_dependencies",
+      "cloud_storage_bucket": "chromium-telemetry",
+      "file_info": {
+        "win_AMD64": {
+          "cloud_storage_hash": "978305805fb52d838b3b4b09e9327da81f6973f7",
+          "download_path": "bin/win/AMD64/winring0.zip"
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/browser/__init__.py b/catapult/telemetry/telemetry/internal/browser/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/browser/browser.py b/catapult/telemetry/telemetry/internal/browser/browser.py
new file mode 100644
index 0000000..8d65d82
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser.py
@@ -0,0 +1,288 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import sys
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+from telemetry.core import profiling_controller
+from telemetry import decorators
+from telemetry.internal import app
+from telemetry.internal.backends import browser_backend
+from telemetry.internal.browser import browser_credentials
+from telemetry.internal.browser import extension_dict
+from telemetry.internal.browser import tab_list
+from telemetry.internal.browser import web_contents
+from telemetry.internal.util import exception_formatter
+
+
+class Browser(app.App):
+  """A running browser instance that can be controlled in a limited way.
+
+  To create a browser instance, use browser_finder.FindBrowser.
+
+  Be sure to clean up after yourself by calling Close() when you are done with
+  the browser. Or better yet:
+    browser_to_create = FindBrowser(options)
+    with browser_to_create.Create(options) as browser:
+      ... do all your operations on browser here
+  """
+  def __init__(self, backend, platform_backend, credentials_path):
+    super(Browser, self).__init__(app_backend=backend,
+                                  platform_backend=platform_backend)
+    try:
+      self._browser_backend = backend
+      self._platform_backend = platform_backend
+      self._tabs = tab_list.TabList(backend.tab_list_backend)
+      self.credentials = browser_credentials.BrowserCredentials()
+      self.credentials.credentials_path = credentials_path
+      self._platform_backend.DidCreateBrowser(self, self._browser_backend)
+      browser_options = self._browser_backend.browser_options
+      self.platform.FlushDnsCache()
+      if browser_options.clear_sytem_cache_for_browser_and_profile_on_start:
+        if self.platform.CanFlushIndividualFilesFromSystemCache():
+          self.platform.FlushSystemCacheForDirectory(
+              self._browser_backend.profile_directory)
+          self.platform.FlushSystemCacheForDirectory(
+              self._browser_backend.browser_directory)
+        else:
+          self.platform.FlushEntireSystemCache()
+
+      self._browser_backend.SetBrowser(self)
+      self._browser_backend.Start()
+      self._platform_backend.DidStartBrowser(self, self._browser_backend)
+      self._profiling_controller = profiling_controller.ProfilingController(
+          self._browser_backend.profiling_controller_backend)
+    except Exception:
+      exc_info = sys.exc_info()
+      logging.exception('Failure while starting browser backend.')
+      try:
+        self._platform_backend.WillCloseBrowser(self, self._browser_backend)
+      except Exception:
+        exception_formatter.PrintFormattedException(
+            msg='Exception raised while closing platform backend')
+      raise exc_info[0], exc_info[1], exc_info[2]
+
+  @property
+  def profiling_controller(self):
+    return self._profiling_controller
+
+  @property
+  def browser_type(self):
+    return self.app_type
+
+  @property
+  def supports_extensions(self):
+    return self._browser_backend.supports_extensions
+
+  @property
+  def supports_tab_control(self):
+    return self._browser_backend.supports_tab_control
+
+  @property
+  def tabs(self):
+    return self._tabs
+
+  @property
+  def foreground_tab(self):
+    for i in xrange(len(self._tabs)):
+      # The foreground tab is the first (only) one that isn't hidden.
+      # This only works through luck on Android, due to crbug.com/322544
+      # which means that tabs that have never been in the foreground return
+      # document.hidden as false; however in current code the Android foreground
+      # tab is always tab 0, which will be the first one that isn't hidden
+      if self._tabs[i].EvaluateJavaScript('!document.hidden'):
+        return self._tabs[i]
+    raise Exception("No foreground tab found")
+
+  @property
+  @decorators.Cache
+  def extensions(self):
+    if not self.supports_extensions:
+      raise browser_backend.ExtensionsNotSupportedException(
+          'Extensions not supported')
+    return extension_dict.ExtensionDict(self._browser_backend.extension_backend)
+
+  def _GetStatsCommon(self, pid_stats_function):
+    browser_pid = self._browser_backend.pid
+    result = {
+        'Browser': dict(pid_stats_function(browser_pid), **{'ProcessCount': 1}),
+        'Renderer': {'ProcessCount': 0},
+        'Gpu': {'ProcessCount': 0},
+        'Other': {'ProcessCount': 0}
+    }
+    process_count = 1
+    for child_pid in self._platform_backend.GetChildPids(browser_pid):
+      try:
+        child_cmd_line = self._platform_backend.GetCommandLine(child_pid)
+        child_stats = pid_stats_function(child_pid)
+      except exceptions.ProcessGoneException:
+        # It is perfectly fine for a process to have gone away between calling
+        # GetChildPids() and then further examining it.
+        continue
+      child_process_name = self._browser_backend.GetProcessName(child_cmd_line)
+      process_name_type_key_map = {'gpu-process': 'Gpu', 'renderer': 'Renderer'}
+      if child_process_name in process_name_type_key_map:
+        child_process_type_key = process_name_type_key_map[child_process_name]
+      else:
+        # TODO: identify other process types (zygote, plugin, etc), instead of
+        # lumping them in a single category.
+        child_process_type_key = 'Other'
+      result[child_process_type_key]['ProcessCount'] += 1
+      for k, v in child_stats.iteritems():
+        if k in result[child_process_type_key]:
+          result[child_process_type_key][k] += v
+        else:
+          result[child_process_type_key][k] = v
+      process_count += 1
+    for v in result.itervalues():
+      if v['ProcessCount'] > 1:
+        for k in v.keys():
+          if k.endswith('Peak'):
+            del v[k]
+      del v['ProcessCount']
+    result['ProcessCount'] = process_count
+    return result
+
+  @property
+  def memory_stats(self):
+    """Returns a dict of memory statistics for the browser:
+    { 'Browser': {
+        'VM': R,
+        'VMPeak': S,
+        'WorkingSetSize': T,
+        'WorkingSetSizePeak': U,
+        'ProportionalSetSize': V,
+        'PrivateDirty': W
+      },
+      'Gpu': {
+        'VM': R,
+        'VMPeak': S,
+        'WorkingSetSize': T,
+        'WorkingSetSizePeak': U,
+        'ProportionalSetSize': V,
+        'PrivateDirty': W
+      },
+      'Renderer': {
+        'VM': R,
+        'VMPeak': S,
+        'WorkingSetSize': T,
+        'WorkingSetSizePeak': U,
+        'ProportionalSetSize': V,
+        'PrivateDirty': W
+      },
+      'SystemCommitCharge': X,
+      'SystemTotalPhysicalMemory': Y,
+      'ProcessCount': Z,
+    }
+    Any of the above keys may be missing on a per-platform basis.
+    """
+    self._platform_backend.PurgeUnpinnedMemory()
+    result = self._GetStatsCommon(self._platform_backend.GetMemoryStats)
+    commit_charge = self._platform_backend.GetSystemCommitCharge()
+    if commit_charge:
+      result['SystemCommitCharge'] = commit_charge
+    total = self._platform_backend.GetSystemTotalPhysicalMemory()
+    if total:
+      result['SystemTotalPhysicalMemory'] = total
+    return result
+
+  @property
+  def cpu_stats(self):
+    """Returns a dict of cpu statistics for the system.
+    { 'Browser': {
+        'CpuProcessTime': S,
+        'TotalTime': T
+      },
+      'Gpu': {
+        'CpuProcessTime': S,
+        'TotalTime': T
+      },
+      'Renderer': {
+        'CpuProcessTime': S,
+        'TotalTime': T
+      }
+    }
+    Any of the above keys may be missing on a per-platform basis.
+    """
+    result = self._GetStatsCommon(self._platform_backend.GetCpuStats)
+    del result['ProcessCount']
+
+    # We want a single time value, not the sum for all processes.
+    cpu_timestamp = self._platform_backend.GetCpuTimestamp()
+    for process_type in result:
+      # Skip any process_types that are empty
+      if not len(result[process_type]):
+        continue
+      result[process_type].update(cpu_timestamp)
+    return result
+
+  def Close(self):
+    """Closes this browser."""
+    try:
+      if self._browser_backend.IsBrowserRunning():
+        self._platform_backend.WillCloseBrowser(self, self._browser_backend)
+
+      self._browser_backend.profiling_controller_backend.WillCloseBrowser()
+      if self._browser_backend.supports_uploading_logs:
+        try:
+          self._browser_backend.UploadLogsToCloudStorage()
+        except cloud_storage.CloudStorageError as e:
+          logging.error('Cannot upload browser log: %s' % str(e))
+    finally:
+      self._browser_backend.Close()
+      self.credentials = None
+
+
+  def GetStandardOutput(self):
+    return self._browser_backend.GetStandardOutput()
+
+  def GetStackTrace(self):
+    return self._browser_backend.GetStackTrace()
+
+  @property
+  def supports_system_info(self):
+    return self._browser_backend.supports_system_info
+
+  def GetSystemInfo(self):
+    """Returns low-level information about the system, if available.
+
+       See the documentation of the SystemInfo class for more details."""
+    return self._browser_backend.GetSystemInfo()
+
+  @property
+  def supports_memory_dumping(self):
+    return self._browser_backend.supports_memory_dumping
+
+  def DumpMemory(self, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    return self._browser_backend.DumpMemory(timeout)
+
+  @property
+  def supports_overriding_memory_pressure_notifications(self):
+    return (
+        self._browser_backend.supports_overriding_memory_pressure_notifications)
+
+  def SetMemoryPressureNotificationsSuppressed(
+      self, suppressed, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    self._browser_backend.SetMemoryPressureNotificationsSuppressed(
+        suppressed, timeout)
+
+  def SimulateMemoryPressureNotification(
+      self, pressure_level, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
+    self._browser_backend.SimulateMemoryPressureNotification(
+        pressure_level, timeout)
+
+  @property
+  def supports_cpu_metrics(self):
+    return self._browser_backend.supports_cpu_metrics
+
+  @property
+  def supports_memory_metrics(self):
+    return self._browser_backend.supports_memory_metrics
+
+  @property
+  def supports_power_metrics(self):
+    return self._browser_backend.supports_power_metrics
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_credentials.py b/catapult/telemetry/telemetry/internal/browser/browser_credentials.py
new file mode 100644
index 0000000..54b8fd1
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_credentials.py
@@ -0,0 +1,150 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+
+from telemetry.core import util
+from telemetry.internal.backends import codepen_credentials_backend
+from telemetry.internal.backends import facebook_credentials_backend
+from telemetry.internal.backends import google_credentials_backend
+from telemetry.testing import options_for_unittests
+
+
+class CredentialsError(Exception):
+  """Error that can be thrown when logging in."""
+
+
+class BrowserCredentials(object):
+  def __init__(self, backends=None):
+    self._credentials = {}
+    self._credentials_path = None
+    self._extra_credentials = {}
+
+    if backends is None:
+      backends = [
+        codepen_credentials_backend.CodePenCredentialsBackend(),
+        facebook_credentials_backend.FacebookCredentialsBackend(),
+        facebook_credentials_backend.FacebookCredentialsBackend2(),
+        google_credentials_backend.GoogleCredentialsBackend(),
+        google_credentials_backend.GoogleCredentialsBackend2()]
+
+    self._backends = {}
+    for backend in backends:
+      self._backends[backend.credentials_type] = backend
+
+  def AddBackend(self, backend):
+    assert backend.credentials_type not in self._backends
+    self._backends[backend.credentials_type] = backend
+
+  def IsLoggedIn(self, credentials_type):
+    if credentials_type not in self._backends:
+      raise CredentialsError(
+          'Unrecognized credentials type: %s', credentials_type)
+    if credentials_type not in self._credentials:
+      return False
+    return self._backends[credentials_type].IsLoggedIn()
+
+  def CanLogin(self, credentials_type):
+    if credentials_type not in self._backends:
+      raise CredentialsError(
+          'Unrecognized credentials type: %s', credentials_type)
+    return credentials_type in self._credentials
+
+  def LoginNeeded(self, tab, credentials_type):
+    if credentials_type not in self._backends:
+      raise CredentialsError(
+          'Unrecognized credentials type: %s', credentials_type)
+    if credentials_type not in self._credentials:
+      return False
+    from telemetry.page import action_runner
+    runner = action_runner.ActionRunner(tab)
+    return self._backends[credentials_type].LoginNeeded(
+      tab, runner, self._credentials[credentials_type])
+
+  def LoginNoLongerNeeded(self, tab, credentials_type):
+    assert credentials_type in self._backends
+    self._backends[credentials_type].LoginNoLongerNeeded(tab)
+
+  @property
+  def credentials_path(self):
+    return self._credentials_path
+
+  @credentials_path.setter
+  def credentials_path(self, credentials_path):
+    self._credentials_path = credentials_path
+    self._RebuildCredentials()
+
+  def Add(self, credentials_type, data):
+    if credentials_type not in self._extra_credentials:
+      self._extra_credentials[credentials_type] = {}
+    for k, v in data.items():
+      assert k not in self._extra_credentials[credentials_type]
+      self._extra_credentials[credentials_type][k] = v
+    self._RebuildCredentials()
+
+  def _ResetLoggedInState(self):
+    """Makes the backends think we're not logged in even though we are.
+    Should only be used in unit tests to simulate --dont-override-profile.
+    """
+    for backend in self._backends.keys():
+      # pylint: disable=protected-access
+      self._backends[backend]._ResetLoggedInState()
+
+  def _RebuildCredentials(self):
+    credentials = {}
+    if self._credentials_path == None:
+      pass
+    elif os.path.exists(self._credentials_path):
+      with open(self._credentials_path, 'r') as f:
+        credentials = json.loads(f.read())
+
+    # TODO(nduca): use system keychain, if possible.
+    homedir_credentials_path = os.path.expanduser('~/.telemetry-credentials')
+    homedir_credentials = {}
+
+    if (not options_for_unittests.GetCopy() and
+        os.path.exists(homedir_credentials_path)):
+      logging.info("Found ~/.telemetry-credentials. Its contents will be used "
+                   "when no other credentials can be found.")
+      with open(homedir_credentials_path, 'r') as f:
+        homedir_credentials = json.loads(f.read())
+
+    self._credentials = {}
+    all_keys = set(credentials.keys()).union(
+      homedir_credentials.keys()).union(
+      self._extra_credentials.keys())
+
+    for k in all_keys:
+      if k in credentials:
+        self._credentials[k] = credentials[k]
+      if k in homedir_credentials:
+        logging.info("Will use ~/.telemetry-credentials for %s logins." % k)
+        self._credentials[k] = homedir_credentials[k]
+      if k in self._extra_credentials:
+        self._credentials[k] = self._extra_credentials[k]
+
+  def WarnIfMissingCredentials(self, page):
+    if page.credentials and not self.CanLogin(page.credentials):
+      files_to_tweak = []
+      if page.credentials_path:
+        files_to_tweak.append(page.credentials_path)
+      files_to_tweak.append('~/.telemetry-credentials')
+
+      example_credentials_file = os.path.join(
+          util.GetTelemetryDir(), 'examples', 'credentials_example.json')
+
+      logging.warning("""
+        Credentials for %s were not found. page %s will not be tested.
+
+        To fix this, either follow the instructions to authenticate to gsutil
+        here:
+        http://www.chromium.org/developers/telemetry/upload_to_cloud_storage,
+
+        or add your own credentials to:
+            %s
+        An example credentials file you can copy from is here:
+            %s\n""" % (page.credentials, page, ' or '.join(files_to_tweak),
+                       example_credentials_file))
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_credentials_unittest.py b/catapult/telemetry/telemetry/internal/browser/browser_credentials_unittest.py
new file mode 100644
index 0000000..b172554
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_credentials_unittest.py
@@ -0,0 +1,72 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import tempfile
+import unittest
+
+from telemetry.internal.browser import browser_credentials
+
+
+SIMPLE_CREDENTIALS_STRING = """
+{
+  "google": {
+    "username": "example",
+    "password": "asdf"
+  }
+}
+"""
+
+class BackendStub(object):
+  def __init__(self, credentials_type):
+    self.login_needed_called = None
+    self.login_no_longer_needed_called = None
+    self.credentials_type = credentials_type
+
+  def LoginNeeded(self, config, _, tab):
+    self.login_needed_called = (config, tab)
+    return True
+
+  def LoginNoLongerNeeded(self, tab):
+    self.login_no_longer_needed_called = (tab, )
+
+
+class TestBrowserCredentials(unittest.TestCase):
+  def testCredentialsInfrastructure(self):
+    google_backend = BackendStub("google")
+    othersite_backend = BackendStub("othersite")
+    browser_cred = browser_credentials.BrowserCredentials(
+      [google_backend,
+       othersite_backend])
+    try:
+      with tempfile.NamedTemporaryFile(delete=False) as f:
+        f.write(SIMPLE_CREDENTIALS_STRING)
+
+      browser_cred.credentials_path = f.name
+
+      # Should true because it has a password and a backend.
+      self.assertTrue(browser_cred.CanLogin('google'))
+
+      # Should be false succeed because it has no password.
+      self.assertFalse(browser_cred.CanLogin('othersite'))
+
+      # Should fail because it has no backend.
+      self.assertRaises(
+        Exception,
+        lambda: browser_cred.CanLogin('foobar'))
+
+      tab = {}
+      ret = browser_cred.LoginNeeded(tab, 'google')
+      self.assertTrue(ret)
+      self.assertTrue(google_backend.login_needed_called is not None)
+      self.assertEqual(tab, google_backend.login_needed_called[0])
+      self.assertEqual("example",
+                       google_backend.login_needed_called[1]["username"])
+      self.assertEqual("asdf",
+                       google_backend.login_needed_called[1]["password"])
+
+      browser_cred.LoginNoLongerNeeded(tab, 'google')
+      self.assertTrue(google_backend.login_no_longer_needed_called is not None)
+      self.assertEqual(tab, google_backend.login_no_longer_needed_called[0])
+    finally:
+      os.remove(f.name)
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_finder.py b/catapult/telemetry/telemetry/internal/browser/browser_finder.py
new file mode 100644
index 0000000..92e5acc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_finder.py
@@ -0,0 +1,178 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds browsers that can be controlled by telemetry."""
+
+import logging
+import operator
+
+from telemetry import decorators
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.backends.chrome import cros_browser_finder
+from telemetry.internal.backends.chrome import desktop_browser_finder
+from telemetry.internal.backends.chrome import ios_browser_finder
+from telemetry.internal.browser import browser_finder_exceptions
+from telemetry.internal.platform import device_finder
+
+BROWSER_FINDERS = [
+  desktop_browser_finder,
+  android_browser_finder,
+  cros_browser_finder,
+  ios_browser_finder,
+  ]
+
+
+def FindAllBrowserTypes(options):
+  return reduce(operator.add,
+                [bf.FindAllBrowserTypes(options) for bf in BROWSER_FINDERS])
+
+
+@decorators.Cache
+def FindBrowser(options):
+  """Finds the best PossibleBrowser object given a BrowserOptions object.
+
+  Args:
+    A BrowserOptions object.
+
+  Returns:
+    A PossibleBrowser object.
+
+  Raises:
+    BrowserFinderException: Options improperly set, or an error occurred.
+  """
+  if options.__class__.__name__ == '_FakeBrowserFinderOptions':
+    return options.fake_possible_browser
+  if options.browser_type == 'exact' and options.browser_executable == None:
+    raise browser_finder_exceptions.BrowserFinderException(
+        '--browser=exact requires --browser-executable to be set.')
+  if options.browser_type != 'exact' and options.browser_executable != None:
+    raise browser_finder_exceptions.BrowserFinderException(
+        '--browser-executable requires --browser=exact.')
+
+  if options.browser_type == 'cros-chrome' and options.cros_remote == None:
+    raise browser_finder_exceptions.BrowserFinderException(
+        'browser_type=cros-chrome requires cros_remote be set.')
+  if (options.browser_type != 'cros-chrome' and
+      options.browser_type != 'cros-chrome-guest' and
+      options.cros_remote != None):
+    raise browser_finder_exceptions.BrowserFinderException(
+        '--remote requires --browser=cros-chrome or cros-chrome-guest.')
+
+  devices = device_finder.GetDevicesMatchingOptions(options)
+  browsers = []
+  default_browsers = []
+  for device in devices:
+    for finder in BROWSER_FINDERS:
+      if(options.browser_type and options.browser_type != 'any' and
+         options.browser_type not in finder.FindAllBrowserTypes(options)):
+        continue
+      curr_browsers = finder.FindAllAvailableBrowsers(options, device)
+      new_default_browser = finder.SelectDefaultBrowser(curr_browsers)
+      if new_default_browser:
+        default_browsers.append(new_default_browser)
+      browsers.extend(curr_browsers)
+
+  if options.browser_type == None:
+    if default_browsers:
+      default_browser = sorted(default_browsers,
+                               key=lambda b: b.last_modification_time())[-1]
+
+      logging.warning('--browser omitted. Using most recent local build: %s' %
+                      default_browser.browser_type)
+      default_browser.UpdateExecutableIfNeeded()
+      return default_browser
+
+    if len(browsers) == 1:
+      logging.warning('--browser omitted. Using only available browser: %s' %
+                      browsers[0].browser_type)
+      browsers[0].UpdateExecutableIfNeeded()
+      return browsers[0]
+
+    raise browser_finder_exceptions.BrowserTypeRequiredException(
+        '--browser must be specified. Available browsers:\n%s' %
+        '\n'.join(sorted(set([b.browser_type for b in browsers]))))
+
+  if options.browser_type == 'any':
+    types = FindAllBrowserTypes(options)
+    def CompareBrowsersOnTypePriority(x, y):
+      x_idx = types.index(x.browser_type)
+      y_idx = types.index(y.browser_type)
+      return x_idx - y_idx
+    browsers.sort(CompareBrowsersOnTypePriority)
+    if len(browsers) >= 1:
+      browsers[0].UpdateExecutableIfNeeded()
+      return browsers[0]
+    else:
+      return None
+
+  matching_browsers = [b for b in browsers
+      if b.browser_type == options.browser_type and b.SupportsOptions(options)]
+
+  chosen_browser = None
+  if len(matching_browsers) == 1:
+    chosen_browser = matching_browsers[0]
+  elif len(matching_browsers) > 1:
+    logging.warning('Multiple browsers of the same type found: %s' % (
+                    repr(matching_browsers)))
+    chosen_browser = sorted(matching_browsers,
+                            key=lambda b: b.last_modification_time())[-1]
+
+  if chosen_browser:
+    logging.info('Chose browser: %s' % (repr(chosen_browser)))
+    chosen_browser.UpdateExecutableIfNeeded()
+
+  return chosen_browser
+
+
+@decorators.Cache
+def GetAllAvailableBrowsers(options, device):
+  """Returns a list of available browsers on the device.
+
+  Args:
+    options: A BrowserOptions object.
+    device: The target device, which can be None.
+
+  Returns:
+    A list of browser instances.
+
+  Raises:
+    BrowserFinderException: Options are improperly set, or an error occurred.
+  """
+  if not device:
+    return []
+  possible_browsers = []
+  for browser_finder in BROWSER_FINDERS:
+    possible_browsers.extend(
+      browser_finder.FindAllAvailableBrowsers(options, device))
+  return possible_browsers
+
+
+@decorators.Cache
+def GetAllAvailableBrowserTypes(options):
+  """Returns a list of available browser types.
+
+  Args:
+    options: A BrowserOptions object.
+
+  Returns:
+    A list of browser type strings.
+
+  Raises:
+    BrowserFinderException: Options are improperly set, or an error occurred.
+  """
+  devices = device_finder.GetDevicesMatchingOptions(options)
+  possible_browsers = []
+  for device in devices:
+    possible_browsers.extend(GetAllAvailableBrowsers(options, device))
+  type_list = set([browser.browser_type for browser in possible_browsers])
+  # The reference build should be available for mac, linux and win, but the
+  # desktop browser finder won't return it in the list of browsers.
+  for browser in possible_browsers:
+    if (browser.target_os == 'darwin' or browser.target_os.startswith('linux')
+        or browser.target_os.startswith('win')):
+      type_list.add('reference')
+      break
+  type_list = list(type_list)
+  type_list.sort()
+  return type_list
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_finder_exceptions.py b/catapult/telemetry/telemetry/internal/browser/browser_finder_exceptions.py
new file mode 100644
index 0000000..76fbdcf
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_finder_exceptions.py
@@ -0,0 +1,11 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class BrowserTypeRequiredException(Exception):
+  pass
+
+
+class BrowserFinderException(Exception):
+  pass
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_info.py b/catapult/telemetry/telemetry/internal/browser/browser_info.py
new file mode 100644
index 0000000..5db7150
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_info.py
@@ -0,0 +1,70 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+_check_webgl_supported_script = """
+(function () {
+  var c = document.createElement('canvas');
+  var gl = c.getContext('webgl', { failIfMajorPerformanceCaveat: true });
+  if (gl == null) {
+    gl = c.getContext('experimental-webgl',
+        { failIfMajorPerformanceCaveat: true });
+    if (gl == null) {
+      return false;
+    }
+  }
+  return true;
+})();
+"""
+
+class BrowserInfo(object):
+  """A wrapper around browser object that allows looking up infos of the
+  browser.
+  """
+  def __init__(self, browser):
+    self._browser = browser
+
+  def HasWebGLSupport(self):
+    result = False
+    # If no tab is opened, open one and close it after evaluate
+    # _check_webgl_supported_script
+    if len(self._browser.tabs) == 0 and self._browser.supports_tab_control:
+      self._browser.tabs.New()
+      tab = self._browser.tabs[0]
+      result = tab.EvaluateJavaScript(_check_webgl_supported_script)
+      tab.Close()
+    elif len(self._browser.tabs) > 0:
+      tab = self._browser.tabs[0]
+      result = tab.EvaluateJavaScript(_check_webgl_supported_script)
+    return result
+
+  def HasFlingGestureSupport(self):
+    # Synthetic fling gestures weren't properly tracked by telemetry until
+    # Chromium branch number 2339 (see crrev.com/1003023002).
+    # TODO(jdduke): Resolve lack of branch number support for content_shell
+    # targets, see crbug.com/470273.
+    branch_num = (
+        self._browser._browser_backend.devtools_client.GetChromeBranchNumber())
+    return branch_num >= 2339
+
+  def HasDiagonalScrollingSupport(self):
+    # Diagonal scrolling was not supported in the ScrollAction until
+    # Chromium branch number 2332
+    branch_num = (
+        self._browser._browser_backend.devtools_client.GetChromeBranchNumber())
+    return branch_num >= 2332
+
+  def HasRepeatableSynthesizeScrollGesture(self):
+    # Repeatable SynthesizeScrollGesture scrolling was not supported until
+    # Chromium branch number 2480
+    branch_num = (
+        self._browser._browser_backend.devtools_client.GetChromeBranchNumber())
+    return branch_num >= 2480
+
+  @property
+  def browser_type(self):
+    return self._browser.browser_type
+
+  @property
+  def browser(self):
+    return self._browser
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_options.py b/catapult/telemetry/telemetry/internal/browser/browser_options.py
new file mode 100644
index 0000000..034116d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_options.py
@@ -0,0 +1,428 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import copy
+import logging
+import optparse
+import os
+import shlex
+import socket
+import sys
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import browser_finder_exceptions
+from telemetry.internal.browser import profile_types
+from telemetry.internal.platform import device_finder
+from telemetry.internal.platform.profiler import profiler_finder
+from telemetry.internal.util import binary_manager
+from telemetry.util import wpr_modes
+
+
+class BrowserFinderOptions(optparse.Values):
+  """Options to be used for discovering a browser."""
+
+  def __init__(self, browser_type=None):
+    optparse.Values.__init__(self)
+
+    self.browser_type = browser_type
+    self.browser_executable = None
+    self.chrome_root = None  # Path to src/
+    self.chromium_output_dir = None  # E.g.: out/Debug
+    self.device = None
+    self.cros_ssh_identity = None
+
+    self.extensions_to_load = []
+
+    # If set, copy the generated profile to this path on exit.
+    self.output_profile_path = None
+
+    self.cros_remote = None
+
+    self.profiler = None
+    self.verbosity = 0
+
+    self.browser_options = BrowserOptions()
+    self.output_file = None
+
+    self.android_blacklist_file = None
+    self.no_performance_mode = False
+
+  def __repr__(self):
+    return str(sorted(self.__dict__.items()))
+
+  def Copy(self):
+    return copy.deepcopy(self)
+
+  def CreateParser(self, *args, **kwargs):
+    parser = optparse.OptionParser(*args, **kwargs)
+
+    # Selection group
+    group = optparse.OptionGroup(parser, 'Which browser to use')
+    group.add_option('--browser',
+        dest='browser_type',
+        default=None,
+        help='Browser type to run, '
+             'in order of priority. Supported values: list,%s' %
+             ','.join(browser_finder.FindAllBrowserTypes(self)))
+    group.add_option('--browser-executable',
+        dest='browser_executable',
+        help='The exact browser to run.')
+    group.add_option('--chrome-root',
+        dest='chrome_root',
+        help='Where to look for chrome builds. '
+             'Defaults to searching parent dirs by default.')
+    group.add_option('--chromium-output-directory',
+        dest='chromium_output_dir',
+        help='Where to look for build artifacts. '
+             'Can also be specified by setting environment variable '
+             'CHROMIUM_OUTPUT_DIR.')
+    group.add_option('--device',
+        dest='device',
+        help='The device ID to use. '
+             'If not specified, only 0 or 1 connected devices are supported. '
+             'If specified as "android", all available Android devices are '
+             'used.')
+    group.add_option(
+        '--remote',
+        dest='cros_remote',
+        help='The hostname of a remote ChromeOS device to use.')
+    group.add_option(
+        '--remote-ssh-port',
+        type=int,
+        default=socket.getservbyname('ssh'),
+        dest='cros_remote_ssh_port',
+        help='The SSH port of the remote ChromeOS device (requires --remote).')
+    identity = None
+    testing_rsa = os.path.join(
+        util.GetTelemetryThirdPartyDir(), 'chromite', 'ssh_keys', 'testing_rsa')
+    if os.path.exists(testing_rsa):
+      identity = testing_rsa
+    group.add_option('--identity',
+        dest='cros_ssh_identity',
+        default=identity,
+        help='The identity file to use when ssh\'ing into the ChromeOS device')
+    parser.add_option_group(group)
+
+    # Debugging options
+    group = optparse.OptionGroup(parser, 'When things go wrong')
+    profiler_choices = profiler_finder.GetAllAvailableProfilers()
+    group.add_option(
+        '--profiler', default=None, type='choice',
+        choices=profiler_choices,
+        help='Record profiling data using this tool. Supported values: %s. '
+             '(Notice: this flag cannot be used for Timeline Based Measurement '
+             'benchmarks.)' % ', '.join(profiler_choices))
+    group.add_option(
+        '-v', '--verbose', action='count', dest='verbosity',
+        help='Increase verbosity level (repeat as needed)')
+    group.add_option('--print-bootstrap-deps',
+                     action='store_true',
+                     help='Output bootstrap deps list.')
+    parser.add_option_group(group)
+
+    # Platform options
+    group = optparse.OptionGroup(parser, 'Platform options')
+    group.add_option('--no-performance-mode', action='store_true',
+        help='Some platforms run on "full performance mode" where the '
+        'test is executed at maximum CPU speed in order to minimize noise '
+        '(specially important for dashboards / continuous builds). '
+        'This option prevents Telemetry from tweaking such platform settings.')
+    group.add_option('--android-blacklist-file',
+                     help='Device blacklist JSON file.')
+    parser.add_option_group(group)
+
+    # Browser options.
+    self.browser_options.AddCommandLineArgs(parser)
+
+    real_parse = parser.parse_args
+    def ParseArgs(args=None):
+      defaults = parser.get_default_values()
+      for k, v in defaults.__dict__.items():
+        if k in self.__dict__ and self.__dict__[k] != None:
+          continue
+        self.__dict__[k] = v
+      ret = real_parse(args, self) # pylint: disable=E1121
+
+      if self.verbosity >= 2:
+        logging.getLogger().setLevel(logging.DEBUG)
+      elif self.verbosity:
+        logging.getLogger().setLevel(logging.INFO)
+      else:
+        logging.getLogger().setLevel(logging.WARNING)
+
+      if self.chromium_output_dir:
+        os.environ['CHROMIUM_OUTPUT_DIR'] = self.chromium_output_dir
+
+      if self.device == 'list':
+        if binary_manager.NeedsInit():
+          binary_manager.InitDependencyManager(None)
+        devices = device_finder.GetDevicesMatchingOptions(self)
+        print 'Available devices:'
+        for device in devices:
+          print ' ', device.name
+        sys.exit(0)
+
+      if self.browser_executable and not self.browser_type:
+        self.browser_type = 'exact'
+      if self.browser_type == 'list':
+        if binary_manager.NeedsInit():
+          binary_manager.InitDependencyManager(None)
+        devices = device_finder.GetDevicesMatchingOptions(self)
+        if not devices:
+          sys.exit(0)
+        browser_types = {}
+        for device in devices:
+          try:
+            possible_browsers = browser_finder.GetAllAvailableBrowsers(self,
+                                                                       device)
+            browser_types[device.name] = sorted(
+              [browser.browser_type for browser in possible_browsers])
+          except browser_finder_exceptions.BrowserFinderException as ex:
+            print >> sys.stderr, 'ERROR: ', ex
+            sys.exit(1)
+        print 'Available browsers:'
+        if len(browser_types) == 0:
+          print '  No devices were found.'
+        for device_name in sorted(browser_types.keys()):
+          print '  ', device_name
+          for browser_type in browser_types[device_name]:
+            print '    ', browser_type
+        sys.exit(0)
+
+      # Parse browser options.
+      self.browser_options.UpdateFromParseResults(self)
+
+      return ret
+    parser.parse_args = ParseArgs
+    return parser
+
+  def AppendExtraBrowserArgs(self, args):
+    self.browser_options.AppendExtraBrowserArgs(args)
+
+  def MergeDefaultValues(self, defaults):
+    for k, v in defaults.__dict__.items():
+      self.ensure_value(k, v)
+
+class BrowserOptions(object):
+  """Options to be used for launching a browser."""
+  def __init__(self):
+    self.browser_type = None
+    self.show_stdout = False
+
+    # When set to True, the browser will use the default profile.  Telemetry
+    # will not provide an alternate profile directory.
+    self.dont_override_profile = False
+    self.profile_dir = None
+    self.profile_type = None
+    self._extra_browser_args = set()
+    self.extra_wpr_args = []
+    self.wpr_mode = wpr_modes.WPR_OFF
+    self.full_performance_mode = True
+
+    # The amount of time Telemetry should wait for the browser to start.
+    # This property is not exposed as a command line option.
+    self._browser_startup_timeout = 60
+
+    self.disable_background_networking = True
+    self.no_proxy_server = False
+    self.browser_user_agent_type = None
+
+    self.clear_sytem_cache_for_browser_and_profile_on_start = False
+    self.startup_url = 'about:blank'
+
+    # Background pages of built-in component extensions can interfere with
+    # performance measurements.
+    self.disable_component_extensions_with_background_pages = True
+    # Disable default apps.
+    self.disable_default_apps = True
+
+    self.enable_logging = False
+    # The cloud storage bucket & path for uploading logs data produced by the
+    # browser to.
+    # If logs_cloud_remote_path is None, a random remote path is generated every
+    # time the logs data is uploaded.
+    self.logs_cloud_bucket = cloud_storage.TELEMETRY_OUTPUT
+    self.logs_cloud_remote_path = None
+
+    # TODO(danduong): Find a way to store target_os here instead of
+    # finder_options.
+    self._finder_options = None
+
+    # Whether to take screen shot for failed page & put them in telemetry's
+    # profiling results.
+    self.take_screenshot_for_failed_page = False
+
+  def __repr__(self):
+    # This works around the infinite loop caused by the introduction of a
+    # circular reference with _finder_options.
+    obj = self.__dict__.copy()
+    del obj['_finder_options']
+    return str(sorted(obj.items()))
+
+  def IsCrosBrowserOptions(self):
+    return False
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+
+    ############################################################################
+    # Please do not add any more options here without first discussing with    #
+    # a telemetry owner. This is not the right place for platform-specific     #
+    # options.                                                                 #
+    ############################################################################
+
+    group = optparse.OptionGroup(parser, 'Browser options')
+    profile_choices = profile_types.GetProfileTypes()
+    group.add_option('--profile-type',
+        dest='profile_type',
+        type='choice',
+        default='clean',
+        choices=profile_choices,
+        help=('The user profile to use. A clean profile is used by default. '
+              'Supported values: ' + ', '.join(profile_choices)))
+    group.add_option('--profile-dir',
+        dest='profile_dir',
+        help='Profile directory to launch the browser with. '
+             'A clean profile is used by default')
+    group.add_option('--extra-browser-args',
+        dest='extra_browser_args_as_string',
+        help='Additional arguments to pass to the browser when it starts')
+    group.add_option('--extra-wpr-args',
+        dest='extra_wpr_args_as_string',
+        help=('Additional arguments to pass to Web Page Replay. '
+              'See third_party/webpagereplay/replay.py for usage.'))
+    group.add_option('--show-stdout',
+        action='store_true',
+        help='When possible, will display the stdout of the process')
+    group.add_option('--enable-browser-logging',
+        dest='enable_logging',
+        action='store_true',
+        help=('Enable browser logging. The log file is saved in temp directory.'
+              "Note that enabling this flag affects the browser's "
+              'performance'))
+    parser.add_option_group(group)
+
+    group = optparse.OptionGroup(parser, 'Compatibility options')
+    group.add_option('--gtest_output',
+        help='Ignored argument for compatibility with runtest.py harness')
+    parser.add_option_group(group)
+
+  def UpdateFromParseResults(self, finder_options):
+    """Copies our options from finder_options"""
+    browser_options_list = [
+        'extra_browser_args_as_string',
+        'extra_wpr_args_as_string',
+        'enable_logging',
+        'profile_dir',
+        'profile_type',
+        'show_stdout',
+        ]
+    for o in browser_options_list:
+      a = getattr(finder_options, o, None)
+      if a is not None:
+        setattr(self, o, a)
+        delattr(finder_options, o)
+
+    self.browser_type = finder_options.browser_type
+    self._finder_options = finder_options
+
+    if hasattr(self, 'extra_browser_args_as_string'):
+      tmp = shlex.split(
+        self.extra_browser_args_as_string)
+      self.AppendExtraBrowserArgs(tmp)
+      delattr(self, 'extra_browser_args_as_string')
+    if hasattr(self, 'extra_wpr_args_as_string'):
+      tmp = shlex.split(
+        self.extra_wpr_args_as_string)
+      self.extra_wpr_args.extend(tmp)
+      delattr(self, 'extra_wpr_args_as_string')
+    if self.profile_type == 'default':
+      self.dont_override_profile = True
+
+    if self.profile_dir and self.profile_type != 'clean':
+      logging.critical(
+          "It's illegal to specify both --profile-type and --profile-dir.\n"
+          "For more information see: http://goo.gl/ngdGD5")
+      sys.exit(1)
+
+    if self.profile_dir and not os.path.isdir(self.profile_dir):
+      logging.critical(
+          "Directory specified by --profile-dir (%s) doesn't exist "
+          "or isn't a directory.\n"
+          "For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
+      sys.exit(1)
+
+    if not self.profile_dir:
+      self.profile_dir = profile_types.GetProfileDir(self.profile_type)
+
+    # This deferred import is necessary because browser_options is imported in
+    # telemetry/telemetry/__init__.py.
+    finder_options.browser_options = CreateChromeBrowserOptions(self)
+
+  @property
+  def finder_options(self):
+    return self._finder_options
+
+  @property
+  def extra_browser_args(self):
+    return self._extra_browser_args
+
+  @property
+  def browser_startup_timeout(self):
+    return self._browser_startup_timeout
+
+  @browser_startup_timeout.setter
+  def browser_startup_timeout(self, value):
+    self._browser_startup_timeout = value
+
+  def AppendExtraBrowserArgs(self, args):
+    if isinstance(args, list):
+      self._extra_browser_args.update(args)
+    else:
+      self._extra_browser_args.add(args)
+
+
+def CreateChromeBrowserOptions(br_options):
+  browser_type = br_options.browser_type
+
+  if (platform.GetHostPlatform().GetOSName() == 'chromeos' or
+      (browser_type and browser_type.startswith('cros'))):
+    return CrosBrowserOptions(br_options)
+
+  return br_options
+
+
+class ChromeBrowserOptions(BrowserOptions):
+  """Chrome-specific browser options."""
+
+  def __init__(self, br_options):
+    super(ChromeBrowserOptions, self).__init__()
+    # Copy to self.
+    self.__dict__.update(br_options.__dict__)
+
+
+class CrosBrowserOptions(ChromeBrowserOptions):
+  """ChromeOS-specific browser options."""
+
+  def __init__(self, br_options):
+    super(CrosBrowserOptions, self).__init__(br_options)
+    # Create a browser with oobe property.
+    self.create_browser_with_oobe = False
+    # Clear enterprise policy before logging in.
+    self.clear_enterprise_policy = True
+    # Disable GAIA/enterprise services.
+    self.disable_gaia_services = True
+
+    self.auto_login = True
+    self.gaia_login = False
+    self.username = 'test@test.test'
+    self.password = ''
+    self.gaia_id = '12345'
+
+  def IsCrosBrowserOptions(self):
+    return True
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_options_unittest.py b/catapult/telemetry/telemetry/internal/browser/browser_options_unittest.py
new file mode 100644
index 0000000..08791a5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_options_unittest.py
@@ -0,0 +1,111 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import optparse
+import os
+import unittest
+
+from telemetry.internal.browser import browser_options
+
+
+class BrowserOptionsTest(unittest.TestCase):
+  def testDefaults(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.add_option('-x', action='store', default=3)
+    parser.parse_args(['--browser', 'any'])
+    self.assertEquals(options.x, 3) # pylint: disable=no-member
+
+  def testDefaultsPlusOverride(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.add_option('-x', action='store', default=3)
+    parser.parse_args(['--browser', 'any', '-x', 10])
+    self.assertEquals(options.x, 10) # pylint: disable=no-member
+
+  def testDefaultsDontClobberPresetValue(self):
+    options = browser_options.BrowserFinderOptions()
+    setattr(options, 'x', 7)
+    parser = options.CreateParser()
+    parser.add_option('-x', action='store', default=3)
+    parser.parse_args(['--browser', 'any'])
+    self.assertEquals(options.x, 7) # pylint: disable=no-member
+
+  def testCount0(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.add_option('-x', action='count', dest='v')
+    parser.parse_args(['--browser', 'any'])
+    self.assertEquals(options.v, None) # pylint: disable=no-member
+
+  def testCount2(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.add_option('-x', action='count', dest='v')
+    parser.parse_args(['--browser', 'any', '-xx'])
+    self.assertEquals(options.v, 2) # pylint: disable=no-member
+
+  def testOptparseMutabilityWhenSpecified(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.add_option('-x', dest='verbosity', action='store_true')
+    options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
+    self.assertEquals(options_ret, options)
+    self.assertTrue(options.verbosity)
+
+  def testOptparseMutabilityWhenNotSpecified(self):
+    options = browser_options.BrowserFinderOptions()
+
+    parser = options.CreateParser()
+    parser.add_option('-x', dest='verbosity', action='store_true')
+    options_ret, _ = parser.parse_args(['--browser', 'any'])
+    self.assertEquals(options_ret, options)
+    self.assertFalse(options.verbosity)
+
+  def testProfileDirDefault(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.parse_args(['--browser', 'any'])
+    self.assertEquals(options.browser_options.profile_dir, None)
+
+  def testProfileDir(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    # Need to use a directory that exists.
+    current_dir = os.path.dirname(__file__)
+    parser.parse_args(['--browser', 'any', '--profile-dir', current_dir])
+    self.assertEquals(options.browser_options.profile_dir, current_dir)
+
+  def testExtraBrowserArgs(self):
+    options = browser_options.BrowserFinderOptions()
+    parser = options.CreateParser()
+    parser.parse_args(['--extra-browser-args=--foo --bar'])
+
+    self.assertEquals(options.browser_options.extra_browser_args,
+                      set(['--foo', '--bar']))
+
+  def testMergeDefaultValues(self):
+    options = browser_options.BrowserFinderOptions()
+    options.already_true = True
+    options.already_false = False
+    options.override_to_true = False
+    options.override_to_false = True
+
+    parser = optparse.OptionParser()
+    parser.add_option('--already_true', action='store_true')
+    parser.add_option('--already_false', action='store_true')
+    parser.add_option('--unset', action='store_true')
+    parser.add_option('--default_true', action='store_true', default=True)
+    parser.add_option('--default_false', action='store_true', default=False)
+    parser.add_option('--override_to_true', action='store_true', default=False)
+    parser.add_option('--override_to_false', action='store_true', default=True)
+
+    options.MergeDefaultValues(parser.get_default_values())
+
+    self.assertTrue(options.already_true)
+    self.assertFalse(options.already_false)
+    self.assertTrue(options.unset is None)
+    self.assertTrue(options.default_true)
+    self.assertFalse(options.default_false)
+    self.assertFalse(options.override_to_true)
+    self.assertTrue(options.override_to_false)
diff --git a/catapult/telemetry/telemetry/internal/browser/browser_unittest.py b/catapult/telemetry/telemetry/internal/browser/browser_unittest.py
new file mode 100644
index 0000000..d692681
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/browser_unittest.py
@@ -0,0 +1,275 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.browser import browser as browser_module
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.platform import gpu_device
+from telemetry.internal.platform import gpu_info
+from telemetry.internal.platform import system_info
+from telemetry.internal.util import path
+from telemetry.testing import browser_test_case
+from telemetry.testing import options_for_unittests
+from telemetry.timeline import tracing_config
+
+import mock
+
+
+class IntentionalException(Exception):
+  pass
+
+
+class BrowserTest(browser_test_case.BrowserTestCase):
+  def testBrowserCreation(self):
+    self.assertEquals(1, len(self._browser.tabs))
+
+    # Different browsers boot up to different things.
+    assert self._browser.tabs[0].url
+
+  @decorators.Enabled('has tabs')
+  def testNewCloseTab(self):
+    existing_tab = self._browser.tabs[0]
+    self.assertEquals(1, len(self._browser.tabs))
+    existing_tab_url = existing_tab.url
+
+    new_tab = self._browser.tabs.New()
+    self.assertEquals(2, len(self._browser.tabs))
+    self.assertEquals(existing_tab.url, existing_tab_url)
+    self.assertEquals(new_tab.url, 'about:blank')
+
+    new_tab.Close()
+    self.assertEquals(1, len(self._browser.tabs))
+    self.assertEquals(existing_tab.url, existing_tab_url)
+
+  def testMultipleTabCalls(self):
+    self._browser.tabs[0].Navigate(self.UrlOfUnittestFile('blank.html'))
+    self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
+
+  def testTabCallByReference(self):
+    tab = self._browser.tabs[0]
+    tab.Navigate(self.UrlOfUnittestFile('blank.html'))
+    self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
+
+  @decorators.Enabled('has tabs')
+  @decorators.Disabled('win')  # crbug.com/321527
+  def testCloseReferencedTab(self):
+    self._browser.tabs.New()
+    tab = self._browser.tabs[0]
+    tab.Navigate(self.UrlOfUnittestFile('blank.html'))
+    tab.Close()
+    self.assertEquals(1, len(self._browser.tabs))
+
+  @decorators.Enabled('has tabs')
+  def testForegroundTab(self):
+    # Should be only one tab at this stage, so that must be the foreground tab
+    original_tab = self._browser.tabs[0]
+    self.assertEqual(self._browser.foreground_tab, original_tab)
+    new_tab = self._browser.tabs.New()
+    # New tab shouls be foreground tab
+    self.assertEqual(self._browser.foreground_tab, new_tab)
+    # Make sure that activating the background tab makes it the foreground tab
+    original_tab.Activate()
+    self.assertEqual(self._browser.foreground_tab, original_tab)
+    # Closing the current foreground tab should switch the foreground tab to the
+    # other tab
+    original_tab.Close()
+    self.assertEqual(self._browser.foreground_tab, new_tab)
+
+  def testGetSystemInfo(self):
+    if not self._browser.supports_system_info:
+      logging.warning(
+          'Browser does not support getting system info, skipping test.')
+      return
+
+    info = self._browser.GetSystemInfo()
+
+    self.assertTrue(isinstance(info, system_info.SystemInfo))
+    self.assertTrue(hasattr(info, 'model_name'))
+    self.assertTrue(hasattr(info, 'gpu'))
+    self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))
+    self.assertTrue(hasattr(info.gpu, 'devices'))
+    self.assertTrue(len(info.gpu.devices) > 0)
+    for g in info.gpu.devices:
+      self.assertTrue(isinstance(g, gpu_device.GPUDevice))
+
+  def testGetSystemInfoNotCachedObject(self):
+    if not self._browser.supports_system_info:
+      logging.warning(
+          'Browser does not support getting system info, skipping test.')
+      return
+
+    info_a = self._browser.GetSystemInfo()
+    info_b = self._browser.GetSystemInfo()
+    self.assertFalse(info_a is info_b)
+
+  def testGetSystemTotalMemory(self):
+    self.assertTrue(self._browser.memory_stats['SystemTotalPhysicalMemory'] > 0)
+
+  @decorators.Disabled('win')  # crbug.com/570955.
+  def testIsTracingRunning(self):
+    tracing_controller = self._browser.platform.tracing_controller
+    if not tracing_controller.IsChromeTracingSupported():
+      return
+    self.assertFalse(tracing_controller.is_tracing_running)
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    tracing_controller.StartTracing(config)
+    self.assertTrue(tracing_controller.is_tracing_running)
+    tracing_controller.StopTracing()
+    self.assertFalse(tracing_controller.is_tracing_running)
+
+
+class CommandLineBrowserTest(browser_test_case.BrowserTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.AppendExtraBrowserArgs('--user-agent=telemetry')
+
+  def testCommandLineOverriding(self):
+    # This test starts the browser with --user-agent=telemetry. This tests
+    # whether the user agent is then set.
+    t = self._browser.tabs[0]
+    t.Navigate(self.UrlOfUnittestFile('blank.html'))
+    t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+    self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'),
+                      'telemetry')
+
+class DirtyProfileBrowserTest(browser_test_case.BrowserTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.profile_type = 'small_profile'
+
+  @decorators.Disabled('chromeos')  # crbug.com/243912
+  def testDirtyProfileCreation(self):
+    self.assertEquals(1, len(self._browser.tabs))
+
+
+class BrowserLoggingTest(browser_test_case.BrowserTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.enable_logging = True
+
+  @decorators.Disabled('chromeos', 'android')
+  def testLogFileExist(self):
+    self.assertTrue(
+       os.path.isfile(self._browser._browser_backend.log_file_path))
+
+
+def _GenerateBrowserProfile(number_of_tabs):
+  """ Generate a browser profile which browser had |number_of_tabs| number of
+  tabs opened before it was closed.
+      Returns:
+        profile_dir: the directory of profile.
+  """
+  profile_dir = tempfile.mkdtemp()
+  options = options_for_unittests.GetCopy()
+  options.output_profile_path = profile_dir
+  browser_to_create = browser_finder.FindBrowser(options)
+  with browser_to_create.Create(options) as browser:
+    browser.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
+    blank_file_path = os.path.join(path.GetUnittestDataDir(), 'blank.html')
+    blank_url = browser.platform.http_server.UrlOf(blank_file_path)
+    browser.foreground_tab.Navigate(blank_url)
+    browser.foreground_tab.WaitForDocumentReadyStateToBeComplete()
+    for _ in xrange(number_of_tabs - 1):
+      tab = browser.tabs.New()
+      tab.Navigate(blank_url)
+      tab.WaitForDocumentReadyStateToBeComplete()
+  return profile_dir
+
+
+class BrowserCreationTest(unittest.TestCase):
+  def setUp(self):
+    self.mock_browser_backend = mock.MagicMock()
+    self.mock_platform_backend = mock.MagicMock()
+
+  def testCleanedUpCalledWhenExceptionRaisedInBrowserCreation(self):
+    self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
+        IntentionalException('Boom!'))
+    with self.assertRaises(IntentionalException):
+      browser_module.Browser(
+         self.mock_browser_backend, self.mock_platform_backend,
+         credentials_path=None)
+    self.assertTrue(self.mock_platform_backend.WillCloseBrowser.called)
+
+  def testOriginalExceptionNotSwallow(self):
+    self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
+        IntentionalException('Boom!'))
+    self.mock_platform_backend.WillCloseBrowser.side_effect = (
+        IntentionalException('Cannot close browser!'))
+    with self.assertRaises(IntentionalException) as context:
+      browser_module.Browser(
+         self.mock_browser_backend, self.mock_platform_backend,
+         credentials_path=None)
+    self.assertIn('Boom!', context.exception.message)
+
+
+class BrowserRestoreSessionTest(unittest.TestCase):
+
+  @classmethod
+  def setUpClass(cls):
+    cls._number_of_tabs = 4
+    cls._profile_dir = _GenerateBrowserProfile(cls._number_of_tabs)
+    cls._options = options_for_unittests.GetCopy()
+    cls._options.browser_options.AppendExtraBrowserArgs(
+        ['--restore-last-session'])
+    cls._options.browser_options.profile_dir = cls._profile_dir
+    cls._browser_to_create = browser_finder.FindBrowser(cls._options)
+
+  @decorators.Enabled('has tabs')
+  @decorators.Disabled('chromeos', 'win', 'mac')
+  # TODO(nednguyen): Enable this test on windowsn platform
+  def testRestoreBrowserWithMultipleTabs(self):
+    with self._browser_to_create.Create(self._options) as browser:
+      # The number of tabs will be self._number_of_tabs + 1 as it includes the
+      # old tabs and a new blank tab.
+      expected_number_of_tabs = self._number_of_tabs + 1
+      try:
+        util.WaitFor(lambda: len(browser.tabs) == expected_number_of_tabs, 10)
+      except:
+        logging.error('Number of tabs is %s' % len(browser.tabs))
+        raise
+      self.assertEquals(expected_number_of_tabs, len(browser.tabs))
+
+  @classmethod
+  def tearDownClass(cls):
+    shutil.rmtree(cls._profile_dir)
+
+
+class ReferenceBrowserTest(unittest.TestCase):
+
+  @decorators.Enabled('win', 'mac', 'linux')
+  def testBasicBrowserActions(self):
+    options = options_for_unittests.GetCopy()
+    options.browser_type = 'reference'
+    browser_to_create = browser_finder.FindBrowser(options)
+    self.assertIsNotNone(browser_to_create)
+    with browser_to_create.Create(options) as ref_browser:
+      tab = ref_browser.tabs.New()
+      tab.Navigate('about:blank')
+      self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))
+
+
+class TestBrowserOperationDoNotLeakTempFiles(unittest.TestCase):
+
+  @decorators.Enabled('win', 'mac', 'linux')
+  @decorators.Isolated
+  def testBrowserNotLeakingTempFiles(self):
+    options = options_for_unittests.GetCopy()
+    browser_to_create = browser_finder.FindBrowser(options)
+    self.assertIsNotNone(browser_to_create)
+    before_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
+    with browser_to_create.Create(options) as browser:
+      tab = browser.tabs.New()
+      tab.Navigate('about:blank')
+      self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))
+    after_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
+    self.assertEqual(before_browser_run_temp_dir_content,
+                     after_browser_run_temp_dir_content)
diff --git a/catapult/telemetry/telemetry/internal/browser/extension_dict.py b/catapult/telemetry/telemetry/internal/browser/extension_dict.py
new file mode 100644
index 0000000..95742b5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/extension_dict.py
@@ -0,0 +1,33 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.browser import extension_to_load
+
+
+class ExtensionDict(object):
+  """Dictionary of ExtensionPage instances, with extension_id as key."""
+
+  def __init__(self, extension_backend):
+    self._extension_backend = extension_backend
+
+  def __getitem__(self, load_extension):
+    """Given an ExtensionToLoad instance, returns the corresponding
+    ExtensionPage instance."""
+    if not isinstance(load_extension, extension_to_load.ExtensionToLoad):
+      raise TypeError("Input param must be of type ExtensionToLoad")
+    return self.GetByExtensionId(load_extension.extension_id)[0]
+
+  def __contains__(self, load_extension):
+    """Checks if this ExtensionToLoad instance has been loaded"""
+    if not isinstance(load_extension, extension_to_load.ExtensionToLoad):
+      raise TypeError("Input param must be of type ExtensionToLoad")
+    return load_extension.extension_id in self._extension_backend
+
+  def keys(self):
+    return self._extension_backend.keys()
+
+  def GetByExtensionId(self, extension_id):
+    """Returns a list of extensions given an extension id. This is useful for
+    connecting to built-in apps and component extensions."""
+    return self._extension_backend[extension_id]
diff --git a/catapult/telemetry/telemetry/internal/browser/extension_page.py b/catapult/telemetry/telemetry/internal/browser/extension_page.py
new file mode 100644
index 0000000..350de6c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/extension_page.py
@@ -0,0 +1,27 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry.internal.browser import web_contents
+
+
+def UrlToExtensionId(url):
+  return re.match(r"(chrome-extension://)([^/]+)", url).group(2)
+
+
+class ExtensionPage(web_contents.WebContents):
+  """Represents an extension page in the browser"""
+
+  def __init__(self, inspector_backend):
+    super(ExtensionPage, self).__init__(inspector_backend)
+    self.url = inspector_backend.url
+    self.extension_id = UrlToExtensionId(self.url)
+
+  def Reload(self):
+    """Reloading an extension page is used as a workaround for an extension
+    binding bug for old versions of Chrome (crbug.com/263162). After Navigate
+    returns, we are guaranteed that the inspected page is in the correct state.
+    """
+    self._inspector_backend.Navigate(self.url, None, 10)
diff --git a/catapult/telemetry/telemetry/internal/browser/extension_to_load.py b/catapult/telemetry/telemetry/internal/browser/extension_to_load.py
new file mode 100644
index 0000000..e40e375
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/extension_to_load.py
@@ -0,0 +1,66 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from telemetry.internal.backends.chrome import crx_id
+
+
+class ExtensionPathNonExistentException(Exception):
+  pass
+
+class MissingPublicKeyException(Exception):
+  pass
+
+class ExtensionToLoad(object):
+  def __init__(self, path, browser_type, is_component=False):
+    if not os.path.isdir(path):
+      raise ExtensionPathNonExistentException(
+          'Extension path not a directory %s' % path)
+    self._path = path
+    self._local_path = path
+    self._is_component = is_component
+    if is_component and not crx_id.HasPublicKey(path):
+      raise MissingPublicKeyException(
+         'Component extension %s must have a public key' % path)
+
+    # It is possible that we are running telemetry on Windows targeting
+    # a remote CrOS or Android device. In this case, we need the
+    # browser_type argument to determine how we should encode
+    # the extension path.
+    self._is_win = (os.name == 'nt'
+        and not (browser_type.startswith('android')
+                 or browser_type.startswith('cros')))
+
+  @property
+  def extension_id(self):
+    """Unique extension id of this extension."""
+    if crx_id.HasPublicKey(self._path):
+      # Calculate extension id from the public key.
+      return crx_id.GetCRXAppID(os.path.realpath(self._path))
+    else:
+      # Calculate extension id based on the path on the device.
+      return crx_id.GetCRXAppID(
+          os.path.realpath(self._local_path),
+          from_file_path=True,
+          is_win_path=self._is_win)
+
+  @property
+  def path(self):
+    """Path to extension source directory."""
+    return self._path
+
+  @property
+  def local_path(self):
+    """Path to extension destination directory, for remote instances of
+    chrome"""
+    return self._local_path
+
+  @local_path.setter
+  def local_path(self, local_path):
+    self._local_path = local_path
+
+  @property
+  def is_component(self):
+    """Whether this extension should be loaded as a component extension."""
+    return self._is_component
diff --git a/catapult/telemetry/telemetry/internal/browser/extension_unittest.py b/catapult/telemetry/telemetry/internal/browser/extension_unittest.py
new file mode 100644
index 0000000..44fea28
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/extension_unittest.py
@@ -0,0 +1,210 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import shutil
+import tempfile
+import unittest
+
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import extension_to_load
+from telemetry.testing import options_for_unittests
+
+
+class ExtensionTest(unittest.TestCase):
+  def setUp(self):
+    self._browser = None
+    self._extension = None
+    self._extension_id = None
+
+  def CreateBrowserWithExtension(self, ext_path):
+    extension_path = os.path.join(util.GetUnittestDataDir(), ext_path)
+    options = options_for_unittests.GetCopy()
+    load_extension = extension_to_load.ExtensionToLoad(
+        extension_path, options.browser_type)
+    options.extensions_to_load = [load_extension]
+    browser_to_create = browser_finder.FindBrowser(options)
+
+    if not browser_to_create:
+      # May not find a browser that supports extensions.
+      return False
+    self._browser = browser_to_create.Create(options)
+    self._extension = self._browser.extensions[load_extension]
+    self._extension_id = load_extension.extension_id
+    self.assertTrue(self._extension)
+    return True
+
+  def tearDown(self):
+    if self._browser:
+      self._browser.Close()
+
+  def testExtensionBasic(self):
+    """Test ExtensionPage's ExecuteJavaScript and EvaluateJavaScript."""
+    if not self.CreateBrowserWithExtension('simple_extension'):
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+    self.assertTrue(
+        self._extension.EvaluateJavaScript('chrome.runtime != null'))
+    self._extension.ExecuteJavaScript('setTestVar("abcdef")')
+    self.assertEquals('abcdef',
+                      self._extension.EvaluateJavaScript('_testVar'))
+
+  def testExtensionGetByExtensionId(self):
+    """Test GetByExtensionId for a simple extension with a background page."""
+    if not self.CreateBrowserWithExtension('simple_extension'):
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+    ext = self._browser.extensions.GetByExtensionId(self._extension_id)
+    self.assertEqual(1, len(ext))
+    self.assertEqual(ext[0], self._extension)
+    self.assertTrue(
+        ext[0].EvaluateJavaScript('chrome.runtime != null'))
+
+  @decorators.Disabled('mac')
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testWebApp(self):
+    """Tests GetByExtensionId for a web app with multiple pages."""
+    if not self.CreateBrowserWithExtension('simple_app'):
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+    extensions = self._browser.extensions.GetByExtensionId(self._extension_id)
+    extension_urls = set([ext.EvaluateJavaScript('location.href;')
+                          for ext in extensions])
+    expected_urls = set(['chrome-extension://' + self._extension_id + '/' + path
+                         for path in ['main.html', 'second.html',
+                                      '_generated_background_page.html']])
+
+    self.assertEqual(expected_urls, extension_urls)
+
+class NonExistentExtensionTest(unittest.TestCase):
+  def testNonExistentExtensionPath(self):
+    """Test that a non-existent extension path will raise an exception."""
+    extension_path = os.path.join(util.GetUnittestDataDir(), 'foo')
+    options = options_for_unittests.GetCopy()
+    self.assertRaises(extension_to_load.ExtensionPathNonExistentException,
+                      lambda: extension_to_load.ExtensionToLoad(
+                          extension_path, options.browser_type))
+
+  def testExtensionNotLoaded(self):
+    """Querying an extension that was not loaded will return None"""
+    extension_path = os.path.join(util.GetUnittestDataDir(), 'simple_extension')
+    options = options_for_unittests.GetCopy()
+    load_extension = extension_to_load.ExtensionToLoad(
+        extension_path, options.browser_type)
+    browser_to_create = browser_finder.FindBrowser(options)
+    with browser_to_create.Create(options) as b:
+      if b.supports_extensions:
+        self.assertRaises(KeyError, lambda: b.extensions[load_extension])
+
+class MultipleExtensionTest(unittest.TestCase):
+  def setUp(self):
+    """ Copy the manifest and background.js files of simple_extension to a
+    number of temporary directories to load as extensions"""
+    self._extension_dirs = [tempfile.mkdtemp() for _ in range(3)]
+    src_extension_dir = os.path.join(
+        util.GetUnittestDataDir(), 'simple_extension')
+    manifest_path = os.path.join(src_extension_dir, 'manifest.json')
+    script_path = os.path.join(src_extension_dir, 'background.js')
+    for d in self._extension_dirs:
+      shutil.copy(manifest_path, d)
+      shutil.copy(script_path, d)
+    options = options_for_unittests.GetCopy()
+    self._extensions_to_load = [extension_to_load.ExtensionToLoad(
+                                    d, options.browser_type)
+                                for d in self._extension_dirs]
+    options.extensions_to_load = self._extensions_to_load
+    browser_to_create = browser_finder.FindBrowser(options)
+    self._browser = None
+    # May not find a browser that supports extensions.
+    if browser_to_create:
+      self._browser = browser_to_create.Create(options)
+
+  def tearDown(self):
+    if self._browser:
+      self._browser.Close()
+    for d in self._extension_dirs:
+      shutil.rmtree(d)
+
+  def testMultipleExtensions(self):
+    if not self._browser:
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+
+    # Test contains.
+    loaded_extensions = [e for e in self._extensions_to_load
+                         if e in self._browser.extensions]
+    self.assertEqual(len(loaded_extensions), len(self._extensions_to_load))
+    for load_extension in self._extensions_to_load:
+      extension = self._browser.extensions[load_extension]
+      assert extension
+      self.assertTrue(
+          extension.EvaluateJavaScript('chrome.runtime != null'))
+      extension.ExecuteJavaScript('setTestVar("abcdef")')
+      self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
+
+
+class ComponentExtensionTest(unittest.TestCase):
+  def testComponentExtensionBasic(self):
+    extension_path = os.path.join(
+        util.GetUnittestDataDir(), 'component_extension')
+    options = options_for_unittests.GetCopy()
+    load_extension = extension_to_load.ExtensionToLoad(
+        extension_path, options.browser_type, is_component=True)
+
+    options.extensions_to_load = [load_extension]
+    browser_to_create = browser_finder.FindBrowser(options)
+    if not browser_to_create:
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+
+    with browser_to_create.Create(options) as b:
+      extension = b.extensions[load_extension]
+      self.assertTrue(
+          extension.EvaluateJavaScript('chrome.runtime != null'))
+      extension.ExecuteJavaScript('setTestVar("abcdef")')
+      self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
+
+  def testComponentExtensionNoPublicKey(self):
+    # simple_extension does not have a public key.
+    extension_path = os.path.join(util.GetUnittestDataDir(), 'simple_extension')
+    options = options_for_unittests.GetCopy()
+    self.assertRaises(extension_to_load.MissingPublicKeyException,
+                      lambda: extension_to_load.ExtensionToLoad(
+                          extension_path,
+                          browser_type=options.browser_type,
+                          is_component=True))
+
+
+class WebviewInExtensionTest(ExtensionTest):
+
+  # Flaky on windows, hits an exception: http://crbug.com/508325
+  @decorators.Disabled('win', 'linux')
+  def testWebviewInExtension(self):
+    """Tests GetWebviewContext() for a web app containing <webview> element."""
+    if not self.CreateBrowserWithExtension('webview_app'):
+      logging.warning('Did not find a browser that supports extensions, '
+                      'skipping test.')
+      return
+
+    self._browser.extensions.GetByExtensionId(self._extension_id)
+    webview_contexts = self._extension.GetWebviewContexts()
+    self.assertEquals(1, len(webview_contexts))
+    webview_context = webview_contexts[0]
+    webview_context.WaitForDocumentReadyStateToBeComplete()
+    # Check that the context has the right url from the <webview> element.
+    self.assertTrue(webview_context.GetUrl().startswith('data:'))
+    # Check |test_input_id| element is accessible from the webview context.
+    self.assertTrue(webview_context.EvaluateJavaScript(
+                    'document.getElementById("test_input_id") != null'))
+    # Check that |test_input_id| is not accessible from outside webview context
+    self.assertFalse(self._extension.EvaluateJavaScript(
+                    'document.getElementById("test_input_id") != null'))
diff --git a/catapult/telemetry/telemetry/internal/browser/network_quiescence.js b/catapult/telemetry/telemetry/internal/browser/network_quiescence.js
new file mode 100644
index 0000000..f317d6c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/network_quiescence.js
@@ -0,0 +1,107 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+/**
+ * @fileoverview This file provides a JavaScript helper function that
+ * determines when network quiescence has been reached based on the time since
+ * the last resource was received.
+ */
+(function() {
+
+  // Make executing this code idempotent.
+  if (window.__telemetry_testHasReachedNetworkQuiescence) {
+    return;
+  }
+
+  // Some benchmarks patch window.performance to make it deterministic.
+  // Save the original performance object before it is patched.
+  var real_performance = window.performance;
+
+  // Set the Resource Timing interface functions that will be used below
+  // to use whatever version is available currently regardless of vendor
+  // prefix.
+  real_performance.clearResourceTimings =
+      (real_performance.clearResourceTimings     ||
+       real_performance.mozClearResourceTimings  ||
+       real_performance.msClearResourceTimings   ||
+       real_performance.oClearResourceTimings    ||
+       real_performance.webkitClearResourceTimings);
+
+  real_performance.getEntriesByType =
+      (real_performance.getEntriesByType     ||
+       real_performance.mozGetEntriesByType  ||
+       real_performance.msGetEntriesByType   ||
+       real_performance.oGetEntriesByType    ||
+       real_performance.webkitGetEntriesByType);
+
+  // This variable will available to the function below and it will be
+  // persistent across different function calls. It stores the last
+  // entry in the list of PerformanceResourceTiming objects returned by
+  // real_performance.getEntriesByType('resource').
+  //
+  // The reason for doing it this way is because the buffer for
+  // PerformanceResourceTiming objects has a limit, and once it's full,
+  // new entries are not added. We're only interested in the last entry,
+  // so we can clear new entries when they're added.
+  var lastEntry = null;
+
+  // True when no resource has been loaded from the network for
+  //|QUIESCENCE_TIMEOUT_MS| milliseconds. This value is sticky.
+  var hasReachedQuiesence = false;
+
+  // Time to wait before declaring network quiescence in milliseconds.
+  var QUIESCENCE_TIMEOUT_MS = 2000;
+
+  /**
+   * This method uses the Resource Timing interface, which is described at
+   * http://www.w3.org/TR/resource-timing/. It determines whether the time
+   * since lodading any resources such as images and script files (including
+   * resources requested via XMLHttpRequest) has exceeded a threshold defined
+   # by |QUIESCENCE_TIMEOUT_MS|.
+   *
+   * @return {boolean} True if the time since either the load event, or the last
+   *   resource was received after the load event exceeds the aforementioned
+   *   threshold. This state is sticky, so once this function returns true for a
+   *   given page, it will always return true.
+   */
+  window.__telemetry_testHasReachedNetworkQuiescence = function() {
+    if (hasReachedQuiesence) {
+      return true;
+    }
+
+    if (window.document.readyState !== 'complete') {
+      return false;
+    }
+
+    var resourceTimings = real_performance.getEntriesByType('resource');
+    if (resourceTimings.length > 0) {
+      lastEntry = resourceTimings.pop();
+      real_performance.clearResourceTimings();
+    }
+
+    // The times for performance.now() and in the PerformanceResourceTiming
+    // objects are all in milliseconds since performance.timing.navigationStart,
+    // so we must also get load time in the same terms.
+    var timing = real_performance.timing;
+    var loadTime = timing.loadEventEnd - timing.navigationStart;
+    var lastResponseTimeMs = 0;
+
+    // If there have been no resource timing entries, or the last entry was
+    // before the load event, then use the time since the load event.
+    if (!lastEntry || lastEntry.responseEnd < loadTime) {
+      lastResponseTimeMs = real_performance.now() - loadTime;
+    } else {
+      lastResponseTimeMs = real_performance.now() - lastEntry.responseEnd;
+    }
+
+    if (lastResponseTimeMs >= QUIESCENCE_TIMEOUT_MS) {
+      hasReachedQuiesence = true;
+    }
+
+    return hasReachedQuiesence;
+  }
+
+})();
diff --git a/catapult/telemetry/telemetry/internal/browser/possible_browser.py b/catapult/telemetry/telemetry/internal/browser/possible_browser.py
new file mode 100644
index 0000000..cf73b10
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/possible_browser.py
@@ -0,0 +1,54 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.app import possible_app
+
+
+class PossibleBrowser(possible_app.PossibleApp):
+  """A browser that can be controlled.
+
+  Call Create() to launch the browser and begin manipulating it..
+  """
+
+  def __init__(self, browser_type, target_os, supports_tab_control):
+    super(PossibleBrowser, self).__init__(app_type=browser_type,
+                                          target_os=target_os)
+    self._supports_tab_control = supports_tab_control
+    self._credentials_path = None
+
+  def __repr__(self):
+    return 'PossibleBrowser(app_type=%s)' % self.app_type
+
+  @property
+  def browser_type(self):
+    return self.app_type
+
+  @property
+  def supports_tab_control(self):
+    return self._supports_tab_control
+
+  def _InitPlatformIfNeeded(self):
+    raise NotImplementedError()
+
+  def Create(self, finder_options):
+    raise NotImplementedError()
+
+  def SupportsOptions(self, finder_options):
+    """Tests for extension support."""
+    raise NotImplementedError()
+
+  def IsRemote(self):
+    return False
+
+  def RunRemote(self):
+    pass
+
+  def UpdateExecutableIfNeeded(self):
+    pass
+
+  def last_modification_time(self):
+    return -1
+
+  def SetCredentialsPath(self, credentials_path):
+    self._credentials_path = credentials_path
diff --git a/catapult/telemetry/telemetry/internal/browser/profile_types.py b/catapult/telemetry/telemetry/internal/browser/profile_types.py
new file mode 100644
index 0000000..73a0408
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/profile_types.py
@@ -0,0 +1,33 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.core import util
+
+BASE_PROFILE_TYPES = ['clean', 'default']
+
+PROFILE_TYPE_MAPPING = {
+  'typical_user': 'content_scripts1',
+  'power_user': 'extension_webrequest',
+}
+
+def GetProfileTypes():
+  """Returns a list of all command line options that can be specified for
+  profile type."""
+  return BASE_PROFILE_TYPES + PROFILE_TYPE_MAPPING.keys()
+
+def GetProfileDir(profile_type):
+  """Given a |profile_type| (as returned by GetProfileTypes()), return the
+  directory to use for that profile or None if the profile doesn't need a
+  profile directory (e.g. using the browser default profile).
+  """
+  if profile_type in BASE_PROFILE_TYPES:
+    return None
+
+  path = os.path.join(
+      util.GetTelemetryDir(), 'telemetry', 'internal', 'browser_profiles')
+
+  assert os.path.exists(path)
+  return path
diff --git a/catapult/telemetry/telemetry/internal/browser/profile_types_unittest.py b/catapult/telemetry/telemetry/internal/browser/profile_types_unittest.py
new file mode 100644
index 0000000..fd99c1a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/profile_types_unittest.py
@@ -0,0 +1,17 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.internal.browser import profile_types
+
+
+class ProfileTypesTest(unittest.TestCase):
+  def testGetProfileTypes(self):
+    types = profile_types.GetProfileTypes()
+
+    self.assertTrue('clean' in types)
+    self.assertTrue(len(types) > 0)
+
+  def testGetProfileDir(self):
+    self.assertFalse(profile_types.GetProfileDir('typical_user') is None)
diff --git a/catapult/telemetry/telemetry/internal/browser/tab.py b/catapult/telemetry/telemetry/internal/browser/tab.py
new file mode 100644
index 0000000..9c1f79f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/tab.py
@@ -0,0 +1,259 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.browser import web_contents
+from telemetry.internal.image_processing import video
+
+DEFAULT_TAB_TIMEOUT = 60
+
+
+class Tab(web_contents.WebContents):
+  """Represents a tab in the browser
+
+  The important parts of the Tab object are in the runtime and page objects.
+  E.g.:
+      # Navigates the tab to a given url.
+      tab.Navigate('http://www.google.com/')
+
+      # Evaluates 1+1 in the tab's JavaScript context.
+      tab.Evaluate('1+1')
+  """
+  def __init__(self, inspector_backend, tab_list_backend, browser):
+    super(Tab, self).__init__(inspector_backend)
+    self._tab_list_backend = tab_list_backend
+    self._browser = browser
+
+  @property
+  def browser(self):
+    """The browser in which this tab resides."""
+    return self._browser
+
+  @property
+  def url(self):
+    """Returns the URL of the tab, as reported by devtools.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+    """
+    return self._inspector_backend.url
+
+  @property
+  def dom_stats(self):
+    """A dictionary populated with measured DOM statistics.
+
+    Currently this dictionary contains:
+    {
+      'document_count': integer,
+      'node_count': integer,
+      'event_listener_count': integer
+    }
+
+    Raises:
+      inspector_memory.InspectorMemoryException
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    dom_counters = self._inspector_backend.GetDOMStats(
+        timeout=DEFAULT_TAB_TIMEOUT)
+    assert (len(dom_counters) == 3 and
+            all([x in dom_counters for x in ['document_count', 'node_count',
+                                             'event_listener_count']]))
+    return dom_counters
+
+  def Activate(self):
+    """Brings this tab to the foreground asynchronously.
+
+    Not all browsers or browser versions support this method.
+    Be sure to check browser.supports_tab_control.
+
+    Please note: this is asynchronous. There is a delay between this call
+    and the page's documentVisibilityState becoming 'visible', and yet more
+    delay until the actual tab is visible to the user. None of these delays
+    are included in this call.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      devtools_client_backend.TabNotFoundError
+      tab_list_backend.TabUnexpectedResponseException
+    """
+    self._tab_list_backend.ActivateTab(self.id)
+
+  def Close(self):
+    """Closes this tab.
+
+    Not all browsers or browser versions support this method.
+    Be sure to check browser.supports_tab_control.
+
+    Raises:
+      devtools_http.DevToolsClientConnectionError
+      devtools_client_backend.TabNotFoundError
+      tab_list_backend.TabUnexpectedResponseException
+      exceptions.TimeoutException
+    """
+    self._tab_list_backend.CloseTab(self.id)
+
+  @property
+  def screenshot_supported(self):
+    """True if the browser instance is capable of capturing screenshots."""
+    return self._inspector_backend.screenshot_supported
+
+  def Screenshot(self, timeout=DEFAULT_TAB_TIMEOUT):
+    """Capture a screenshot of the tab's contents.
+
+    Returns:
+      A telemetry.core.Bitmap.
+    Raises:
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.Screenshot(timeout)
+
+  @property
+  def video_capture_supported(self):
+    """True if the browser instance is capable of capturing video."""
+    return self.browser.platform.CanCaptureVideo()
+
+  def Highlight(self, color):
+    """Synchronously highlights entire tab contents with the given RgbaColor.
+
+    TODO(tonyg): It is possible that the z-index hack here might not work for
+    all pages. If this happens, DevTools also provides a method for this.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self.ExecuteJavaScript("""
+      (function() {
+        var screen = document.createElement('div');
+        screen.style.background = 'rgba(%d, %d, %d, %d)';
+        screen.style.position = 'fixed';
+        screen.style.top = '0';
+        screen.style.left = '0';
+        screen.style.width = '100%%';
+        screen.style.height = '100%%';
+        screen.style.zIndex = '2147483638';
+        document.body.appendChild(screen);
+        requestAnimationFrame(function() {
+          requestAnimationFrame(function() {
+            window.__telemetry_screen_%d = screen;
+          });
+        });
+      })();
+    """ % (color.r, color.g, color.b, color.a, int(color)))
+    self.WaitForJavaScriptExpression(
+        '!!window.__telemetry_screen_%d' % int(color), 5)
+
+  def ClearHighlight(self, color):
+    """Clears a highlight of the given bitmap.RgbaColor.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self.ExecuteJavaScript("""
+      (function() {
+        document.body.removeChild(window.__telemetry_screen_%d);
+        requestAnimationFrame(function() {
+          requestAnimationFrame(function() {
+            window.__telemetry_screen_%d = null;
+            console.time('__ClearHighlight.video_capture_start');
+            console.timeEnd('__ClearHighlight.video_capture_start');
+          });
+        });
+      })();
+    """ % (int(color), int(color)))
+    self.WaitForJavaScriptExpression(
+        '!window.__telemetry_screen_%d' % int(color), 5)
+
+  def StartVideoCapture(self, min_bitrate_mbps,
+                        highlight_bitmap=video.HIGHLIGHT_ORANGE_FRAME):
+    """Starts capturing video of the tab's contents.
+
+    This works by flashing the entire tab contents to a arbitrary color and then
+    starting video recording. When the frames are processed, we can look for
+    that flash as the content bounds.
+
+    Args:
+      min_bitrate_mbps: The minimum caputre bitrate in MegaBits Per Second.
+          The platform is free to deliver a higher bitrate if it can do so
+          without increasing overhead.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+      ValueError: If the required |min_bitrate_mbps| can't be achieved.
+    """
+    self.Highlight(highlight_bitmap)
+    self.browser.platform.StartVideoCapture(min_bitrate_mbps)
+    self.ClearHighlight(highlight_bitmap)
+
+  @property
+  def is_video_capture_running(self):
+    return self.browser.platform.is_video_capture_running
+
+  def StopVideoCapture(self):
+    """Stops recording video of the tab's contents.
+
+    This looks for the initial color flash in the first frame to establish the
+    tab content boundaries and then omits all frames displaying the flash.
+
+    Returns:
+      video: A video object which is a telemetry.core.Video
+    """
+    return self.browser.platform.StopVideoCapture()
+
+  def GetCookieByName(self, name, timeout=DEFAULT_TAB_TIMEOUT):
+    """Returns the value of the cookie by the given |name|.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.GetCookieByName(name, timeout)
+
+  def CollectGarbage(self):
+    """Forces a garbage collection.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self._inspector_backend.CollectGarbage()
+
+  def ClearCache(self, force):
+    """Clears the browser's networking related disk, memory and other caches.
+
+    Args:
+      force: Iff true, navigates to about:blank which destroys the previous
+          renderer, ensuring that even "live" resources in the memory cache are
+          cleared.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+      errors.DeviceUnresponsiveError
+    """
+    self.browser.platform.FlushDnsCache()
+    self.ExecuteJavaScript("""
+        if (window.chrome && chrome.benchmarking &&
+            chrome.benchmarking.clearCache) {
+          chrome.benchmarking.clearCache();
+          chrome.benchmarking.clearPredictorCache();
+          chrome.benchmarking.clearHostResolverCache();
+        }
+    """)
+    if force:
+      self.Navigate('about:blank')
diff --git a/catapult/telemetry/telemetry/internal/browser/tab_list.py b/catapult/telemetry/telemetry/internal/browser/tab_list.py
new file mode 100644
index 0000000..99bbaae
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/tab_list.py
@@ -0,0 +1,23 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+class TabList(object):
+  def __init__(self, tab_list_backend):
+    self._tab_list_backend = tab_list_backend
+
+  def New(self, timeout=300):
+    return self._tab_list_backend.New(timeout)
+
+  def __iter__(self):
+    return self._tab_list_backend.__iter__()
+
+  def __len__(self):
+    return self._tab_list_backend.__len__()
+
+  def __getitem__(self, index):
+    return self._tab_list_backend.__getitem__(index)
+
+  def GetTabById(self, identifier):
+    """The identifier of a tab can be accessed with tab.id."""
+    return self._tab_list_backend.GetTabById(identifier)
+
diff --git a/catapult/telemetry/telemetry/internal/browser/tab_unittest.py b/catapult/telemetry/telemetry/internal/browser/tab_unittest.py
new file mode 100644
index 0000000..f5faa6f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/tab_unittest.py
@@ -0,0 +1,249 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import tempfile
+import time
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.image_processing import video
+from telemetry.page import action_runner
+from telemetry.testing import tab_test_case
+from telemetry.timeline import model
+from telemetry.timeline import tracing_config
+from telemetry.util import image_util
+from telemetry.util import rgba_color
+
+
+def _IsDocumentVisible(tab):
+  return not tab.EvaluateJavaScript('document.hidden || document.webkitHidden')
+
+
+class FakePlatformBackend(object):
+  def __init__(self):
+    self.platform = FakePlatform()
+
+  def DidStartBrowser(self, _, _2):
+    pass
+
+  def WillCloseBrowser(self, _, _2):
+    pass
+
+
+class FakePlatform(object):
+  def __init__(self):
+    self._is_video_capture_running = False
+
+  #pylint: disable=unused-argument
+  def StartVideoCapture(self, min_bitrate_mbps):
+    self._is_video_capture_running = True
+
+  def StopVideoCapture(self):
+    self._is_video_capture_running = False
+    return video.Video(tempfile.NamedTemporaryFile())
+
+  @property
+  def is_video_capture_running(self):
+    return self._is_video_capture_running
+
+
+class TabTest(tab_test_case.TabTestCase):
+  def testNavigateAndWaitForCompleteState(self):
+    self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
+    self._tab.WaitForDocumentReadyStateToBeComplete()
+
+  def testNavigateAndWaitForInteractiveState(self):
+    self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
+    self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+
+  def testTabBrowserIsRightBrowser(self):
+    self.assertEquals(self._tab.browser, self._browser)
+
+  def testRendererCrash(self):
+    self.assertRaises(exceptions.DevtoolsTargetCrashException,
+                      lambda: self._tab.Navigate('chrome://crash',
+                                                 timeout=30))
+
+  def testTimeoutExceptionIncludeConsoleMessage(self):
+    self._tab.EvaluateJavaScript("""
+        window.__set_timeout_called = false;
+        function buggyReference() {
+          window.__set_timeout_called = true;
+          if (window.__one.not_defined === undefined)
+             window.__one = 1;
+        }
+        setTimeout(buggyReference, 200);""")
+    self._tab.WaitForJavaScriptExpression(
+        'window.__set_timeout_called === true', 5)
+    with self.assertRaises(exceptions.TimeoutException) as context:
+      self._tab.WaitForJavaScriptExpression(
+          'window.__one === 1', 1)
+      self.assertIn(
+        ("(error) :5: Uncaught TypeError: Cannot read property 'not_defined' "
+        'of undefined\n'),
+        context.exception.message)
+
+  @decorators.Enabled('has tabs')
+  def testActivateTab(self):
+    util.WaitFor(lambda: _IsDocumentVisible(self._tab), timeout=5)
+    new_tab = self._browser.tabs.New()
+    new_tab.Navigate('about:blank')
+    util.WaitFor(lambda: _IsDocumentVisible(new_tab), timeout=5)
+    self.assertFalse(_IsDocumentVisible(self._tab))
+    self._tab.Activate()
+    util.WaitFor(lambda: _IsDocumentVisible(self._tab), timeout=5)
+    self.assertFalse(_IsDocumentVisible(new_tab))
+
+  def testTabUrl(self):
+    self.assertEquals(self._tab.url, 'about:blank')
+    url = self.UrlOfUnittestFile('blank.html')
+    self._tab.Navigate(url)
+    self.assertEquals(self._tab.url, url)
+
+  #pylint: disable=protected-access
+  def testIsVideoCaptureRunning(self):
+    original_platform_backend = self._tab.browser._platform_backend
+    try:
+      self._tab.browser._platform_backend = FakePlatformBackend()
+      self.assertFalse(self._tab.is_video_capture_running)
+      self._tab.StartVideoCapture(min_bitrate_mbps=2)
+      self.assertTrue(self._tab.is_video_capture_running)
+      self.assertIsNotNone(self._tab.StopVideoCapture())
+      self.assertFalse(self._tab.is_video_capture_running)
+    finally:
+      self._tab.browser._platform_backend = original_platform_backend
+
+  # Test failing on android: http://crbug.com/437057
+  # and mac: http://crbug.com/468675
+  @decorators.Disabled('android', 'chromeos', 'mac')
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testHighlight(self):
+    self.assertEquals(self._tab.url, 'about:blank')
+    config = tracing_config.TracingConfig()
+    config.SetNoOverheadFilter()
+    config.enable_chrome_trace = True
+    self._browser.platform.tracing_controller.StartTracing(config)
+    self._tab.Highlight(rgba_color.WEB_PAGE_TEST_ORANGE)
+    self._tab.ClearHighlight(rgba_color.WEB_PAGE_TEST_ORANGE)
+    trace_data = self._browser.platform.tracing_controller.StopTracing()
+    timeline_model = model.TimelineModel(trace_data)
+    renderer_thread = timeline_model.GetRendererThreadFromTabId(
+        self._tab.id)
+    found_video_start_event = False
+    for event in renderer_thread.async_slices:
+      if event.name == '__ClearHighlight.video_capture_start':
+        found_video_start_event = True
+        break
+    self.assertTrue(found_video_start_event)
+
+  @decorators.Enabled('has tabs')
+  @decorators.Disabled('mac', 'linux')  # crbug.com/499207.
+  @decorators.Disabled('win')  # crbug.com/570955.
+  def testGetRendererThreadFromTabId(self):
+    self.assertEquals(self._tab.url, 'about:blank')
+    # Create 3 tabs. The third tab is closed before we call
+    # tracing_controller.StartTracing.
+    first_tab = self._tab
+    second_tab = self._browser.tabs.New()
+    second_tab.Navigate('about:blank')
+    second_tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+    third_tab = self._browser.tabs.New()
+    third_tab.Navigate('about:blank')
+    third_tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+    third_tab.Close()
+    config = tracing_config.TracingConfig()
+    config.SetNoOverheadFilter()
+    config.enable_chrome_trace = True
+    self._browser.platform.tracing_controller.StartTracing(config)
+    first_tab.ExecuteJavaScript('console.time("first-tab-marker");')
+    first_tab.ExecuteJavaScript('console.timeEnd("first-tab-marker");')
+    second_tab.ExecuteJavaScript('console.time("second-tab-marker");')
+    second_tab.ExecuteJavaScript('console.timeEnd("second-tab-marker");')
+    trace_data = self._browser.platform.tracing_controller.StopTracing()
+    timeline_model = model.TimelineModel(trace_data)
+
+    # Assert that the renderer_thread of the first tab contains
+    # 'first-tab-marker'.
+    renderer_thread = timeline_model.GetRendererThreadFromTabId(
+        first_tab.id)
+    first_tab_markers = [
+        renderer_thread.IterAllSlicesOfName('first-tab-marker')]
+    self.assertEquals(1, len(first_tab_markers))
+
+    # Close second tab and assert that the renderer_thread of the second tab
+    # contains 'second-tab-marker'.
+    second_tab.Close()
+    renderer_thread = timeline_model.GetRendererThreadFromTabId(
+        second_tab.id)
+    second_tab_markers = [
+        renderer_thread.IterAllSlicesOfName('second-tab-marker')]
+    self.assertEquals(1, len(second_tab_markers))
+
+    # Third tab wasn't available when we start tracing, so there is no
+    # renderer_thread corresponding to it in the the trace.
+    self.assertIs(None, timeline_model.GetRendererThreadFromTabId(third_tab.id))
+
+  @decorators.Disabled('android') # https://crbug.com/463933
+  def testTabIsAlive(self):
+    self.assertEquals(self._tab.url, 'about:blank')
+    self.assertTrue(self._tab.IsAlive())
+
+    self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
+    self.assertTrue(self._tab.IsAlive())
+
+    self.assertRaises(exceptions.DevtoolsTargetCrashException,
+        lambda: self._tab.Navigate(self.UrlOfUnittestFile('chrome://crash')))
+    self.assertFalse(self._tab.IsAlive())
+
+
+class GpuTabTest(tab_test_case.TabTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
+
+  # Test flaky on mac: crbug.com/358664, chromeos: crbug.com/483212.
+  @decorators.Disabled('android', 'mac', 'chromeos')
+  def testScreenshot(self):
+    if not self._tab.screenshot_supported:
+      logging.warning('Browser does not support screenshots, skipping test.')
+      return
+
+    self.Navigate('green_rect.html')
+    pixel_ratio = self._tab.EvaluateJavaScript('window.devicePixelRatio || 1')
+
+    screenshot = self._tab.Screenshot(5)
+    assert screenshot is not None
+    image_util.GetPixelColor(
+        screenshot, 0 * pixel_ratio, 0 * pixel_ratio).AssertIsRGB(
+            0, 255, 0, tolerance=2)
+    image_util.GetPixelColor(
+        screenshot, 31 * pixel_ratio, 31 * pixel_ratio).AssertIsRGB(
+            0, 255, 0, tolerance=2)
+    image_util.GetPixelColor(
+        screenshot, 32 * pixel_ratio, 32 * pixel_ratio).AssertIsRGB(
+            255, 255, 255, tolerance=2)
+
+
+class MediaRouterDialogTabTest(tab_test_case.TabTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.AppendExtraBrowserArgs('--media-router=1')
+
+  # There is no media router dialog on android/chromeos, it is a desktop-only
+  # feature.
+  @decorators.Disabled('android', 'chromeos')
+  def testMediaRouterDialog(self):
+    self._tab.Navigate(self.UrlOfUnittestFile('cast.html'))
+    self._tab.WaitForDocumentReadyStateToBeComplete()
+    runner = action_runner.ActionRunner(self._tab)
+    runner.TapElement(selector='#start_session_button')
+    # Wait for media router dialog
+    start_time = time.time()
+    while (time.time() - start_time < 5 and
+           len(self.tabs) != 2):
+      time.sleep(1)
+    self.assertEquals(len(self.tabs), 2)
+    self.assertEquals(self.tabs[1].url, 'chrome://media-router/')
diff --git a/catapult/telemetry/telemetry/internal/browser/user_agent.py b/catapult/telemetry/telemetry/internal/browser/user_agent.py
new file mode 100644
index 0000000..9c160c9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/user_agent.py
@@ -0,0 +1,37 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+UA_TYPE_MAPPING = {
+  'desktop':
+      'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) '
+      'AppleWebKit/537.36 (KHTML, like Gecko) '
+      'Chrome/40.0.2194.2 Safari/537.36',
+  'mobile':
+      'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) '
+      'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 Mobile '
+      'Safari/535.36',
+  'tablet':
+      'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 7 Build/IMM76B) '
+      'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 '
+      'Safari/535.36',
+  'tablet_10_inch':
+      'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus 10 Build/IMM76B) '
+      'AppleWebKit/535.36 (KHTML, like Gecko) Chrome/40.0.2194.2 '
+      'Safari/535.36',
+}
+
+
+def GetChromeUserAgentArgumentFromType(user_agent_type):
+  """Returns a chrome user agent based on a user agent type.
+  This is derived from:
+  https://developers.google.com/chrome/mobile/docs/user-agent
+  """
+  if user_agent_type:
+    return ['--user-agent=%s' % UA_TYPE_MAPPING[user_agent_type]]
+  return []
+
+def GetChromeUserAgentDictFromType(user_agent_type):
+  if user_agent_type:
+    return {'userAgent': UA_TYPE_MAPPING[user_agent_type]}
+  return ''
diff --git a/catapult/telemetry/telemetry/internal/browser/user_agent_unittest.py b/catapult/telemetry/telemetry/internal/browser/user_agent_unittest.py
new file mode 100644
index 0000000..19c5d68
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/user_agent_unittest.py
@@ -0,0 +1,38 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.internal.browser import user_agent
+from telemetry.testing import tab_test_case
+
+
+class MobileUserAgentTest(tab_test_case.TabTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.browser_user_agent_type = 'mobile'
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUserAgent(self):
+    ua = self._tab.EvaluateJavaScript('window.navigator.userAgent')
+    self.assertEquals(ua, user_agent.UA_TYPE_MAPPING['mobile'])
+
+class TabletUserAgentTest(tab_test_case.TabTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.browser_user_agent_type = 'tablet'
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUserAgent(self):
+    ua = self._tab.EvaluateJavaScript('window.navigator.userAgent')
+    self.assertEquals(ua, user_agent.UA_TYPE_MAPPING['tablet'])
+
+class DesktopUserAgentTest(tab_test_case.TabTestCase):
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    options.browser_user_agent_type = 'desktop'
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUserAgent(self):
+    ua = self._tab.EvaluateJavaScript('window.navigator.userAgent')
+    self.assertEquals(ua, user_agent.UA_TYPE_MAPPING['desktop'])
diff --git a/catapult/telemetry/telemetry/internal/browser/web_contents.py b/catapult/telemetry/telemetry/internal/browser/web_contents.py
new file mode 100644
index 0000000..b6135db
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser/web_contents.py
@@ -0,0 +1,284 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.core import exceptions
+from telemetry.core import util
+
+DEFAULT_WEB_CONTENTS_TIMEOUT = 90
+
+# TODO(achuith, dtu, nduca): Add unit tests specifically for WebContents,
+# independent of Tab.
+class WebContents(object):
+  """Represents web contents in the browser"""
+  def __init__(self, inspector_backend):
+    self._inspector_backend = inspector_backend
+
+    with open(os.path.join(os.path.dirname(__file__),
+        'network_quiescence.js')) as f:
+      self._quiescence_js = f.read()
+
+  @property
+  def id(self):
+    """Return the unique id string for this tab object."""
+    return self._inspector_backend.id
+
+  def GetUrl(self):
+    """Returns the URL to which the WebContents is connected.
+
+    Raises:
+      exceptions.Error: If there is an error in inspector backend connection.
+    """
+    return self._inspector_backend.url
+
+  def GetWebviewContexts(self):
+    """Returns a list of webview contexts within the current inspector backend.
+
+    Returns:
+      A list of WebContents objects representing the webview contexts.
+
+    Raises:
+      exceptions.Error: If there is an error in inspector backend connection.
+    """
+    webviews = []
+    inspector_backends = self._inspector_backend.GetWebviewInspectorBackends()
+    for inspector_backend in inspector_backends:
+      webviews.append(WebContents(inspector_backend))
+    return webviews
+
+  def WaitForDocumentReadyStateToBeComplete(self,
+      timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Waits for the document to finish loading.
+
+    Raises:
+      exceptions.Error: See WaitForJavaScriptExpression() for a detailed list
+      of possible exceptions.
+    """
+
+    self.WaitForJavaScriptExpression(
+        'document.readyState == "complete"', timeout)
+
+  def WaitForDocumentReadyStateToBeInteractiveOrBetter(self,
+      timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Waits for the document to be interactive.
+
+    Raises:
+      exceptions.Error: See WaitForJavaScriptExpression() for a detailed list
+      of possible exceptions.
+    """
+    self.WaitForJavaScriptExpression(
+        'document.readyState == "interactive" || '
+        'document.readyState == "complete"', timeout)
+
+  def WaitForJavaScriptExpression(self, expr, timeout):
+    """Waits for the given JavaScript expression to be True.
+
+    This method is robust against any given Evaluation timing out.
+
+    Args:
+      expr: The expression to evaluate.
+      timeout: The number of seconds to wait for the expression to be True.
+
+    Raises:
+      exceptions.TimeoutException: On a timeout.
+      exceptions.Error: See EvaluateJavaScript() for a detailed list of
+      possible exceptions.
+    """
+    def IsJavaScriptExpressionTrue():
+      try:
+        return bool(self.EvaluateJavaScript(expr))
+      except exceptions.TimeoutException:
+        # If the main thread is busy for longer than Evaluate's timeout, we
+        # may time out here early. Instead, we want to wait for the full
+        # timeout of this method.
+        return False
+    try:
+      util.WaitFor(IsJavaScriptExpressionTrue, timeout)
+    except exceptions.TimeoutException as e:
+      # Try to make timeouts a little more actionable by dumping console output.
+      debug_message = None
+      try:
+        debug_message = (
+            'Console output:\n%s' %
+            self._inspector_backend.GetCurrentConsoleOutputBuffer())
+      except Exception as e:
+        debug_message = (
+            'Exception thrown when trying to capture console output: %s' %
+            repr(e))
+      raise exceptions.TimeoutException(
+          e.message + '\n' + debug_message)
+
+  def HasReachedQuiescence(self):
+    """Determine whether the page has reached quiescence after loading.
+
+    Returns:
+      True if 2 seconds have passed since last resource received, false
+      otherwise.
+    Raises:
+      exceptions.Error: See EvaluateJavaScript() for a detailed list of
+      possible exceptions.
+    """
+
+    # Inclusion of the script that provides
+    # window.__telemetry_testHasReachedNetworkQuiescence()
+    # is idempotent, it's run on every call because WebContents doesn't track
+    # page loads and we need to execute anew for every newly loaded page.
+    has_reached_quiescence = (
+        self.EvaluateJavaScript(self._quiescence_js +
+            "window.__telemetry_testHasReachedNetworkQuiescence()"))
+    return has_reached_quiescence
+
+  def ExecuteJavaScript(self, statement, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Executes statement in JavaScript. Does not return the result.
+
+    If the statement failed to evaluate, EvaluateException will be raised.
+
+    Raises:
+      exceptions.Error: See ExecuteJavaScriptInContext() for a detailed list of
+      possible exceptions.
+    """
+    return self.ExecuteJavaScriptInContext(
+        statement, context_id=None, timeout=timeout)
+
+  def EvaluateJavaScript(self, expr, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Evalutes expr in JavaScript and returns the JSONized result.
+
+    Consider using ExecuteJavaScript for cases where the result of the
+    expression is not needed.
+
+    If evaluation throws in JavaScript, a Python EvaluateException will
+    be raised.
+
+    If the result of the evaluation cannot be JSONized, then an
+    EvaluationException will be raised.
+
+    Raises:
+      exceptions.Error: See EvaluateJavaScriptInContext() for a detailed list
+      of possible exceptions.
+    """
+    return self.EvaluateJavaScriptInContext(
+        expr, context_id=None, timeout=timeout)
+
+  def ExecuteJavaScriptInContext(self, expr, context_id,
+                                 timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Similar to ExecuteJavaScript, except context_id can refer to an iframe.
+    The main page has context_id=1, the first iframe context_id=2, etc.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.ExecuteJavaScript(
+        expr, context_id=context_id, timeout=timeout)
+
+  def EvaluateJavaScriptInContext(self, expr, context_id,
+                                  timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Similar to ExecuteJavaScript, except context_id can refer to an iframe.
+    The main page has context_id=1, the first iframe context_id=2, etc.
+
+    Raises:
+      exceptions.EvaluateException
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.EvaluateJavaScript(
+        expr, context_id=context_id, timeout=timeout)
+
+  def EnableAllContexts(self):
+    """Enable all contexts in a page. Returns the number of available contexts.
+
+    Raises:
+      exceptions.WebSocketDisconnected
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.EnableAllContexts()
+
+  def WaitForNavigate(self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Waits for the navigation to complete.
+
+    The current page is expect to be in a navigation.
+    This function returns when the navigation is complete or when
+    the timeout has been exceeded.
+
+    Raises:
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self._inspector_backend.WaitForNavigate(timeout)
+
+  def Navigate(self, url, script_to_evaluate_on_commit=None,
+               timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
+    """Navigates to url.
+
+    If |script_to_evaluate_on_commit| is given, the script source string will be
+    evaluated when the navigation is committed. This is after the context of
+    the page exists, but before any script on the page itself has executed.
+
+    Raises:
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    self._inspector_backend.Navigate(url, script_to_evaluate_on_commit, timeout)
+
+  def IsAlive(self):
+    """Whether the WebContents is still operating normally.
+
+    Since WebContents function asynchronously, this method does not guarantee
+    that the WebContents will still be alive at any point in the future.
+
+    Returns:
+      A boolean indicating whether the WebContents is opearting normally.
+    """
+    return self._inspector_backend.IsInspectable()
+
+  def CloseConnections(self):
+    """Closes all TCP sockets held open by the browser.
+
+    Raises:
+      exceptions.DevtoolsTargetCrashException if the tab is not alive.
+    """
+    if not self.IsAlive():
+      raise exceptions.DevtoolsTargetCrashException
+    self.ExecuteJavaScript('window.chrome && chrome.benchmarking &&'
+                           'chrome.benchmarking.closeConnections()')
+
+  def SynthesizeScrollGesture(self, x=100, y=800, xDistance=0, yDistance=-500,
+                              xOverscroll=None, yOverscroll=None,
+                              preventFling=True, speed=None,
+                              gestureSourceType=None, repeatCount=None,
+                              repeatDelayMs=None, interactionMarkerName=None,
+                              timeout=60):
+    """Runs an inspector command that causes a repeatable browser driven scroll.
+
+    Args:
+      x: X coordinate of the start of the gesture in CSS pixels.
+      y: Y coordinate of the start of the gesture in CSS pixels.
+      xDistance: Distance to scroll along the X axis (positive to scroll left).
+      yDistance: Ddistance to scroll along the Y axis (positive to scroll up).
+      xOverscroll: Number of additional pixels to scroll back along the X axis.
+      xOverscroll: Number of additional pixels to scroll back along the Y axis.
+      preventFling: Prevents a fling gesture.
+      speed: Swipe speed in pixels per second.
+      gestureSourceType: Which type of input events to be generated.
+      repeatCount: Number of additional repeats beyond the first scroll.
+      repeatDelayMs: Number of milliseconds delay between each repeat.
+      interactionMarkerName: The name of the interaction markers to generate.
+
+    Raises:
+      exceptions.TimeoutException
+      exceptions.DevtoolsTargetCrashException
+    """
+    return self._inspector_backend.SynthesizeScrollGesture(
+        x=x, y=y, xDistance=xDistance, yDistance=yDistance,
+        xOverscroll=xOverscroll, yOverscroll=yOverscroll,
+        preventFling=preventFling, speed=speed,
+        gestureSourceType=gestureSourceType, repeatCount=repeatCount,
+        repeatDelayMs=repeatDelayMs,
+        interactionMarkerName=interactionMarkerName,
+        timeout=timeout)
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json
new file mode 100644
index 0000000..56fdb6c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json
@@ -0,0 +1,8 @@
+{
+  "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+  "version": "1.0.0.0",
+  "name": "1 content script",
+  "content_scripts": [
+    { "matches": ["file://*"], "js": ["script.js"] }
+  ]
+}
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/script.js b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/script.js
new file mode 100644
index 0000000..afc1cf8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/script.js
@@ -0,0 +1,8 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Useless script just to test injection.
+var x = 1;
+var y = 2;
+var z = 3;
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Preferences b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Preferences
new file mode 100644
index 0000000..142e7ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/content_scripts1/Default/Preferences
@@ -0,0 +1,42 @@
+{
+   "download": {
+      "directory_upgrade": true,
+      "extensions_to_open": ""
+   },
+   "extensions": {
+      "autoupdate": {
+         "next_check": "12897640036342487"
+      },
+      "settings": {
+         "behllobkkfkfnphdnhnkndlbkcpglgmj": {
+            "active_permissions": {
+               "scriptable_host": [ "file:///*" ]
+            },
+            "granted_permissions": {
+               "scriptable_host": [ "file:///*" ]
+            },
+            "location": 1,
+            "manifest":
+            {
+              "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+              "version": "1.0.0.0",
+              "name": "1 content script",
+              "content_scripts": [
+                { "matches": ["file://*"], "js": ["script.js"] }
+              ]
+            },
+            "path": "behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0",
+            "state": 1
+         }
+      }
+   },
+   "profile": {
+      "exited_cleanly": true,
+      "id": "not-signed-in",
+      "name": "",
+      "nickname": ""
+   },
+   "session": {
+      "startup_urls": [  ]
+   }
+}
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/background.html b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/background.html
new file mode 100644
index 0000000..d3f9988
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/background.html
@@ -0,0 +1,10 @@
+<script>
+var onBefore = chrome.webRequest.onBeforeRequest;
+onBefore.addListener(function(info) {
+  return {"cancel": false};
+}, {urls: ["http://*/*"]}, ['blocking']);
+var onBefore = chrome.webRequest.onBeforeRequest;
+onBefore.addListener(function(info) {
+  return {"cancel": false};
+}, {urls: ["file://*/*"]}, ['blocking']);
+</script>
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json
new file mode 100644
index 0000000..835bc44
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Extensions/behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0/manifest.json
@@ -0,0 +1,9 @@
+{
+  "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+  "version": "1.0.0.0",
+  "name": "Webrequest",
+  "permissions": ["experimental", "webRequest"],
+  "background": {
+    "page": "background.html"
+  }
+}
diff --git a/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Preferences b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Preferences
new file mode 100644
index 0000000..e93588e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/browser_profiles/extension_webrequest/Default/Preferences
@@ -0,0 +1,77 @@
+{
+   "browser": {
+      "window_placement": {
+         "bottom": 1150,
+         "left": 10,
+         "maximized": false,
+         "right": 955,
+         "top": 10,
+         "work_area_bottom": 1160,
+         "work_area_left": 0,
+         "work_area_right": 1920,
+         "work_area_top": 0
+      }
+   },
+   "countryid_at_install": 21843,
+   "dns_prefetching": {
+      "host_referral_list": [ 2 ],
+      "startup_list": [ 1 ]
+   },
+   "download": {
+      "directory_upgrade": true,
+      "extensions_to_open": ""
+   },
+   "extensions": {
+      "autoupdate": {
+         "next_check": "12943417068538765"
+      },
+      "chrome_url_overrides": {
+         "bookmarks": [ "chrome-extension://eemcgdkfndhakfknompkggombfjjjeno/main.html" ]
+      },
+      "settings": {
+         "behllobkkfkfnphdnhnkndlbkcpglgmj": {
+            "granted_permissions": {
+               "api": [ "experimental" ],
+               "full": false
+            },
+            "install_time": "12943416715915765",
+            "location": 1,
+            "manifest": {
+               "background": {
+                 "page": "background.html"
+               },
+               "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+               "name": "Webrequest",
+               "permissions": [ "experimental", "webRequest" ],
+               "version": "1.0.0.0"
+            },
+            "path": "behllobkkfkfnphdnhnkndlbkcpglgmj/1.0.0.0",
+            "state": 1
+         }
+      }
+   },
+   "google": {
+      "services": {
+         "username": ""
+      }
+   },
+   "ntp": {
+      "pref_version": 3,
+      "promo_resource_cache_update": "1298943121.229765"
+   },
+   "plugins": {
+      "enabled_internal_pdf3": true
+   },
+   "profile": {
+      "content_settings": {
+         "pref_version": 1
+      },
+      "exited_cleanly": true,
+      "id": "not-signed-in",
+      "name": "",
+      "nickname": ""
+   },
+   "tabs": {
+      "use_vertical_tabs": false
+   }
+}
diff --git a/catapult/telemetry/telemetry/internal/forwarders/__init__.py b/catapult/telemetry/telemetry/internal/forwarders/__init__.py
new file mode 100644
index 0000000..796ab7f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/__init__.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+
+PortPair = collections.namedtuple('PortPair', ['local_port', 'remote_port'])
+PortSet = collections.namedtuple('PortSet', ['http', 'https', 'dns'])
+
+class PortPairs(collections.namedtuple('PortPairs', ['http', 'https', 'dns'])):
+  __slots__ = ()
+
+  @classmethod
+  def Zip(cls, local_ports, remote_ports):
+    """Zip a pair of PortSet's into a single PortPairs object."""
+    with_dns = local_ports.dns is not None and remote_ports.dns is not None
+    return cls(
+      PortPair(local_ports.http, remote_ports.http),
+      PortPair(local_ports.https, remote_ports.https),
+      PortPair(local_ports.dns, remote_ports.dns) if with_dns else None)
+
+  @property
+  def local_ports(self):
+    """Return a tuple of local ports only."""
+    return PortSet(*[p.local_port if p is not None else None for p in self])
+
+  @property
+  def remote_ports(self):
+    """Return a tuple of remote ports only."""
+    return PortSet(*[p.remote_port if p is not None else None for p in self])
+
+
+class ForwarderFactory(object):
+
+  def Create(self, port_pairs):
+    """Creates a forwarder that maps remote (device) <-> local (host) ports.
+
+    Args:
+      port_pairs: A PortPairs instance that consists of a PortPair mapping
+          for each protocol. http is required. https and dns may be None.
+    """
+    raise NotImplementedError()
+
+  @property
+  def host_ip(self):
+    return '127.0.0.1'
+
+
+class Forwarder(object):
+
+  def __init__(self, port_pairs):
+    assert port_pairs.http, 'HTTP port mapping is required.'
+    self._port_pairs = PortPairs(*[
+        PortPair(p.local_port, p.remote_port or p.local_port)
+        if p else None for p in port_pairs])
+    self._forwarding = True
+
+  @property
+  def host_port(self):
+    return self._port_pairs.http.remote_port
+
+  @property
+  def host_ip(self):
+    return '127.0.0.1'
+
+  @property
+  def port_pairs(self):
+    return self._port_pairs
+
+  @property
+  def url(self):
+    assert self.host_ip and self.host_port
+    return 'http://%s:%i' % (self.host_ip, self.host_port)
+
+  def Close(self):
+    self._port_pairs = None
+    self._forwarding = False
diff --git a/catapult/telemetry/telemetry/internal/forwarders/android_forwarder.py b/catapult/telemetry/telemetry/internal/forwarders/android_forwarder.py
new file mode 100644
index 0000000..1d581dc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/android_forwarder.py
@@ -0,0 +1,75 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import logging
+import subprocess
+
+from telemetry.internal import forwarders
+
+try:
+  from devil.android import forwarder
+except ImportError:
+  forwarder = None
+
+
+class AndroidForwarderFactory(forwarders.ForwarderFactory):
+
+  def __init__(self, device):
+    super(AndroidForwarderFactory, self).__init__()
+    self._device = device
+
+  def Create(self, port_pairs):
+    try:
+      return AndroidForwarder(self._device, port_pairs)
+    except Exception:
+      try:
+        logging.warning('Failed to create forwarder. '
+                        'Currently forwarded connections:')
+        for line in self._device.adb.ForwardList().splitlines():
+          logging.warning('  %s', line)
+      except Exception:
+        logging.warning('Exception raised while listing forwarded connections.')
+
+      logging.warning('Device tcp sockets in use:')
+      try:
+        for line in self._device.ReadFile('/proc/net/tcp', as_root=True,
+                                          force_pull=True).splitlines():
+          logging.warning('  %s', line)
+      except Exception:
+        logging.warning('Exception raised while listing tcp sockets.')
+
+      logging.warning('Alive webpagereplay instances:')
+      try:
+        for line in subprocess.check_output(['ps', '-ef']).splitlines():
+          if 'webpagereplay' in line:
+            logging.warning('  %s', line)
+      except Exception:
+        logging.warning('Exception raised while listing WPR intances.')
+
+      raise
+
+
+class AndroidForwarder(forwarders.Forwarder):
+
+  def __init__(self, device, port_pairs):
+    super(AndroidForwarder, self).__init__(port_pairs)
+    self._device = device
+    forwarder.Forwarder.Map([(p.remote_port, p.local_port)
+                             for p in port_pairs if p], self._device)
+    self._port_pairs = forwarders.PortPairs(*[
+        forwarders.PortPair(
+            p.local_port,
+            forwarder.Forwarder.DevicePortForHostPort(p.local_port))
+        if p else None for p in port_pairs])
+    atexit.register(self.Close)
+    # TODO(tonyg): Verify that each port can connect to host.
+
+  def Close(self):
+    if self._forwarding:
+      for port_pair in self._port_pairs:
+        if port_pair:
+          forwarder.Forwarder.UnmapDevicePort(
+              port_pair.remote_port, self._device)
+      super(AndroidForwarder, self).Close()
diff --git a/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder.py b/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder.py
new file mode 100644
index 0000000..d8a9ab3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder.py
@@ -0,0 +1,61 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import subprocess
+
+from telemetry.core import util
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import do_nothing_forwarder
+
+
+class CrOsForwarderFactory(forwarders.ForwarderFactory):
+
+  def __init__(self, cri):
+    super(CrOsForwarderFactory, self).__init__()
+    self._cri = cri
+
+  # pylint: disable=arguments-differ
+  def Create(self, port_pairs, use_remote_port_forwarding=True):
+    if self._cri.local:
+      return do_nothing_forwarder.DoNothingForwarder(port_pairs)
+    return CrOsSshForwarder(self._cri, use_remote_port_forwarding, port_pairs)
+
+
+class CrOsSshForwarder(forwarders.Forwarder):
+
+  def __init__(self, cri, use_remote_port_forwarding, port_pairs):
+    super(CrOsSshForwarder, self).__init__(port_pairs)
+    self._cri = cri
+    self._proc = None
+    forwarding_args = self._ForwardingArgs(
+        use_remote_port_forwarding, self.host_ip, port_pairs)
+    self._proc = subprocess.Popen(
+        self._cri.FormSSHCommandLine(['sleep', '999999999'], forwarding_args),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        stdin=subprocess.PIPE,
+        shell=False)
+    util.WaitFor(
+        lambda: self._cri.IsHTTPServerRunningOnPort(self.host_port), 60)
+    logging.debug('Server started on %s:%d', self.host_ip, self.host_port)
+
+  # pylint: disable=unused-argument
+  @staticmethod
+  def _ForwardingArgs(use_remote_port_forwarding, host_ip, port_pairs):
+    if use_remote_port_forwarding:
+      arg_format = '-R{pp.remote_port}:{host_ip}:{pp.local_port}'
+    else:
+      arg_format = '-L{pp.local_port}:{host_ip}:{pp.remote_port}'
+    return [arg_format.format(**locals()) for pp in port_pairs if pp]
+
+  @property
+  def host_port(self):
+    return self._port_pairs.http.remote_port
+
+  def Close(self):
+    if self._proc:
+      self._proc.kill()
+      self._proc = None
+    super(CrOsSshForwarder, self).Close()
diff --git a/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder_unittest.py b/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder_unittest.py
new file mode 100644
index 0000000..5689296
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder_unittest.py
@@ -0,0 +1,31 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import cros_forwarder
+
+# pylint: disable=protected-access
+class ForwardingArgsTest(unittest.TestCase):
+  port_pairs = forwarders.PortPairs(
+      http=forwarders.PortPair(111, 222),
+      https=forwarders.PortPair(333, 444),
+      dns=None)
+
+  def testForwardingArgsReverse(self):
+    forwarding_args = cros_forwarder.CrOsSshForwarder._ForwardingArgs(
+        use_remote_port_forwarding=True, host_ip='5.5.5.5',
+        port_pairs=self.port_pairs)
+    self.assertEqual(
+        ['-R222:5.5.5.5:111', '-R444:5.5.5.5:333'],
+        forwarding_args)
+
+  def testForwardingArgs(self):
+    forwarding_args = cros_forwarder.CrOsSshForwarder._ForwardingArgs(
+        use_remote_port_forwarding=False, host_ip='2.2.2.2',
+        port_pairs=self.port_pairs)
+    self.assertEqual(
+        ['-L111:2.2.2.2:222', '-L333:2.2.2.2:444'],
+        forwarding_args)
diff --git a/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder.py b/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder.py
new file mode 100644
index 0000000..1e08ff7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder.py
@@ -0,0 +1,77 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import logging
+import socket
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal import forwarders
+
+
+class Error(Exception):
+  """Base class for exceptions in this module."""
+  pass
+
+
+class PortsMismatchError(Error):
+  """Raised when local and remote ports are not equal."""
+  pass
+
+
+class ConnectionError(Error):
+  """Raised when unable to connect to local TCP ports."""
+  pass
+
+
+class DoNothingForwarderFactory(forwarders.ForwarderFactory):
+
+  def Create(self, port_pairs):
+    return DoNothingForwarder(port_pairs)
+
+
+class DoNothingForwarder(forwarders.Forwarder):
+  """Check that no forwarding is needed for the given port pairs.
+
+  The local and remote ports must be equal. Otherwise, the "do nothing"
+  forwarder does not make sense. (Raises PortsMismatchError.)
+
+  Also, check that all TCP ports support connections.  (Raises ConnectionError.)
+  """
+
+  def __init__(self, port_pairs):
+    super(DoNothingForwarder, self).__init__(port_pairs)
+    self._CheckPortPairs()
+
+  def _CheckPortPairs(self):
+    # namedtuple._asdict() is a public method. The method starts with an
+    # underscore to avoid conflicts with attribute names.
+    # pylint: disable=protected-access
+    for protocol, port_pair in self._port_pairs._asdict().items():
+      if not port_pair:
+        continue
+      local_port, remote_port = port_pair
+      if local_port != remote_port:
+        raise PortsMismatchError('Local port forwarding is not supported')
+      if protocol == 'dns':
+        logging.debug('Connection test SKIPPED for DNS: %s:%d',
+                      self.host_ip, local_port)
+        continue
+      try:
+        self._WaitForConnectionEstablished(
+            (self.host_ip, local_port), timeout=10)
+        logging.debug(
+            'Connection test succeeded for %s: %s:%d',
+            protocol.upper(), self.host_ip, local_port)
+      except exceptions.TimeoutException:
+        raise ConnectionError(
+            'Unable to connect to %s address: %s:%d',
+            protocol.upper(), self.host_ip, local_port)
+
+  def _WaitForConnectionEstablished(self, address, timeout):
+    def CanConnect():
+      with contextlib.closing(socket.socket()) as s:
+        return s.connect_ex(address) == 0
+    util.WaitFor(CanConnect, timeout)
diff --git a/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder_unittest.py b/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder_unittest.py
new file mode 100644
index 0000000..6477161
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/forwarders/do_nothing_forwarder_unittest.py
@@ -0,0 +1,71 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.core import exceptions
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import do_nothing_forwarder
+
+
+class TestDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
+  """Override _WaitForConnect to avoid actual socket connection."""
+
+  def __init__(self, port_pairs):
+    self.connected_addresses = []
+    super(TestDoNothingForwarder, self).__init__(port_pairs)
+
+  def _WaitForConnectionEstablished(self, address, timeout):
+    self.connected_addresses.append(address)
+
+
+class TestErrorDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
+  """Simulate a connection error."""
+
+  def _WaitForConnectionEstablished(self, address, timeout):
+    raise exceptions.TimeoutException
+
+
+class CheckPortPairsTest(unittest.TestCase):
+  def testChecksOnlyHttpHttps(self):
+    port_pairs = forwarders.PortPairs(
+        http=forwarders.PortPair(80, 80),
+        https=forwarders.PortPair(443, 443),
+        dns=forwarders.PortPair(53, 53))
+    f = TestDoNothingForwarder(port_pairs)
+    expected_connected_addresses = [
+        ('127.0.0.1', 80),
+        ('127.0.0.1', 443),
+        # Port 53 is skipped because it is UDP and does not support connections.
+        ]
+    self.assertEqual(expected_connected_addresses, f.connected_addresses)
+
+  def testNoDnsStillChecksHttpHttps(self):
+    port_pairs = forwarders.PortPairs(
+        http=forwarders.PortPair(5566, 5566),
+        https=forwarders.PortPair(7788, 7788),
+        dns=None)
+    f = TestDoNothingForwarder(port_pairs)
+    expected_connected_addresses = [
+        ('127.0.0.1', 5566),
+        ('127.0.0.1', 7788),
+        ]
+    self.assertEqual(expected_connected_addresses, f.connected_addresses)
+
+  def testPortMismatchRaisesPortsMismatchError(self):
+    # The do_nothing_forward cannot forward from one port to another.
+    port_pairs = forwarders.PortPairs(
+        http=forwarders.PortPair(80, 80),
+        https=forwarders.PortPair(8443, 443),
+        dns=None)
+    with self.assertRaises(do_nothing_forwarder.PortsMismatchError):
+      TestDoNothingForwarder(port_pairs)
+
+  def testConnectionTimeoutRaisesConnectionError(self):
+    port_pairs = forwarders.PortPairs(
+        http=forwarders.PortPair(80, 80),
+        https=forwarders.PortPair(8443, 443),
+        dns=None)
+    with self.assertRaises(do_nothing_forwarder.ConnectionError):
+      TestErrorDoNothingForwarder(port_pairs)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/__init__.py b/catapult/telemetry/telemetry/internal/image_processing/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/image_processing/_bitmap.py b/catapult/telemetry/telemetry/internal/image_processing/_bitmap.py
new file mode 100644
index 0000000..2cac013
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/_bitmap.py
@@ -0,0 +1,233 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Bitmap is a basic wrapper for image pixels. It includes some basic processing
+tools: crop, find bounding box of a color and compute histogram of color values.
+"""
+
+import array
+import cStringIO
+import struct
+import subprocess
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import platform
+from telemetry.util import color_histogram
+from telemetry.util import rgba_color
+
+import png
+
+
+class _BitmapTools(object):
+  """Wraps a child process of bitmaptools and allows for one command."""
+  CROP_PIXELS = 0
+  HISTOGRAM = 1
+  BOUNDING_BOX = 2
+
+  def __init__(self, dimensions, pixels):
+    binary = binary_manager.FetchPath(
+        'bitmaptools',
+        platform.GetHostPlatform().GetArchName(),
+        platform.GetHostPlatform().GetOSName())
+    assert binary, 'You must build bitmaptools first!'
+
+    self._popen = subprocess.Popen([binary],
+                                   stdin=subprocess.PIPE,
+                                   stdout=subprocess.PIPE,
+                                   stderr=subprocess.PIPE)
+
+    # dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
+    packed_dims = struct.pack('iiiiiii', *dimensions)
+    self._popen.stdin.write(packed_dims)
+    # If we got a list of ints, we need to convert it into a byte buffer.
+    if type(pixels) is not bytearray:
+      pixels = bytearray(pixels)
+    self._popen.stdin.write(pixels)
+
+  def _RunCommand(self, *command):
+    assert not self._popen.stdin.closed, (
+      'Exactly one command allowed per instance of tools.')
+    packed_command = struct.pack('i' * len(command), *command)
+    self._popen.stdin.write(packed_command)
+    self._popen.stdin.close()
+    length_packed = self._popen.stdout.read(struct.calcsize('i'))
+    if not length_packed:
+      raise Exception(self._popen.stderr.read())
+    length = struct.unpack('i', length_packed)[0]
+    return self._popen.stdout.read(length)
+
+  def CropPixels(self):
+    return self._RunCommand(_BitmapTools.CROP_PIXELS)
+
+  def Histogram(self, ignore_color, tolerance):
+    ignore_color_int = -1 if ignore_color is None else int(ignore_color)
+    response = self._RunCommand(_BitmapTools.HISTOGRAM,
+                                ignore_color_int, tolerance)
+    out = array.array('i')
+    out.fromstring(response)
+    assert len(out) == 768, (
+        'The ColorHistogram has the wrong number of buckets: %s' % len(out))
+    return color_histogram.ColorHistogram(out[:256], out[256:512], out[512:],
+                                    ignore_color)
+
+  def BoundingBox(self, color, tolerance):
+    response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
+                                tolerance)
+    unpacked = struct.unpack('iiiii', response)
+    box, count = unpacked[:4], unpacked[-1]
+    if box[2] < 0 or box[3] < 0:
+      box = None
+    return box, count
+
+
+class Bitmap(object):
+  """Utilities for parsing and inspecting a bitmap."""
+
+  def __init__(self, bpp, width, height, pixels, metadata=None):
+    assert bpp in [3, 4], 'Invalid bytes per pixel'
+    assert width > 0, 'Invalid width'
+    assert height > 0, 'Invalid height'
+    assert pixels, 'Must specify pixels'
+    assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
+
+    self._bpp = bpp
+    self._width = width
+    self._height = height
+    self._pixels = pixels
+    self._metadata = metadata or {}
+    self._crop_box = None
+
+  @property
+  def bpp(self):
+    return self._bpp
+
+  @property
+  def width(self):
+    return self._crop_box[2] if self._crop_box else self._width
+
+  @property
+  def height(self):
+    return self._crop_box[3] if self._crop_box else self._height
+
+  def _PrepareTools(self):
+    """Prepares an instance of _BitmapTools which allows exactly one command.
+    """
+    crop_box = self._crop_box or (0, 0, self._width, self._height)
+    return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
+                        self._pixels)
+
+  @property
+  def pixels(self):
+    if self._crop_box:
+      self._pixels = self._PrepareTools().CropPixels()
+      # pylint: disable=unpacking-non-sequence
+      _, _, self._width, self._height = self._crop_box
+      self._crop_box = None
+    if type(self._pixels) is not bytearray:
+      self._pixels = bytearray(self._pixels)
+    return self._pixels
+
+  @property
+  def metadata(self):
+    self._metadata['size'] = (self.width, self.height)
+    self._metadata['alpha'] = self.bpp == 4
+    self._metadata['bitdepth'] = 8
+    return self._metadata
+
+  def GetPixelColor(self, x, y):
+    pixels = self.pixels
+    base = self._bpp * (y * self._width + x)
+    if self._bpp == 4:
+      return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
+                                  pixels[base + 2], pixels[base + 3])
+    return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
+                                pixels[base + 2])
+
+  @staticmethod
+  def FromPng(png_data):
+    width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
+    return Bitmap(4 if meta['alpha'] else 3, width, height, pixels, meta)
+
+  @staticmethod
+  def FromPngFile(path):
+    with open(path, "rb") as f:
+      return Bitmap.FromPng(f.read())
+
+  def WritePngFile(self, path):
+    with open(path, "wb") as f:
+      png.Writer(**self.metadata).write_array(f, self.pixels)
+
+  def IsEqual(self, other, tolerance=0):
+    # Dimensions must be equal
+    if self.width != other.width or self.height != other.height:
+      return False
+
+    # Loop over each pixel and test for equality
+    if tolerance or self.bpp != other.bpp:
+      for y in range(self.height):
+        for x in range(self.width):
+          c0 = self.GetPixelColor(x, y)
+          c1 = other.GetPixelColor(x, y)
+          if not c0.IsEqual(c1, tolerance):
+            return False
+    else:
+      return self.pixels == other.pixels
+
+    return True
+
+  def Diff(self, other):
+    # Output dimensions will be the maximum of the two input dimensions
+    out_width = max(self.width, other.width)
+    out_height = max(self.height, other.height)
+
+    diff = [[0 for x in xrange(out_width * 3)] for x in xrange(out_height)]
+
+    # Loop over each pixel and write out the difference
+    for y in range(out_height):
+      for x in range(out_width):
+        if x < self.width and y < self.height:
+          c0 = self.GetPixelColor(x, y)
+        else:
+          c0 = rgba_color.RgbaColor(0, 0, 0, 0)
+
+        if x < other.width and y < other.height:
+          c1 = other.GetPixelColor(x, y)
+        else:
+          c1 = rgba_color.RgbaColor(0, 0, 0, 0)
+
+        offset = x * 3
+        diff[y][offset] = abs(c0.r - c1.r)
+        diff[y][offset+1] = abs(c0.g - c1.g)
+        diff[y][offset+2] = abs(c0.b - c1.b)
+
+    # This particular method can only save to a file, so the result will be
+    # written into an in-memory buffer and read back into a Bitmap
+    diff_img = png.from_array(diff, mode='RGB')
+    output = cStringIO.StringIO()
+    try:
+      diff_img.save(output)
+      diff = Bitmap.FromPng(output.getvalue())
+    finally:
+      output.close()
+
+    return diff
+
+  def GetBoundingBox(self, color, tolerance=0):
+    return self._PrepareTools().BoundingBox(color, tolerance)
+
+  def Crop(self, left, top, width, height):
+    cur_box = self._crop_box or (0, 0, self._width, self._height)
+    cur_left, cur_top, cur_width, cur_height = cur_box
+
+    if (left < 0 or top < 0 or
+        (left + width) > cur_width or
+        (top + height) > cur_height):
+      raise ValueError('Invalid dimensions')
+
+    self._crop_box = cur_left + left, cur_top + top, width, height
+    return self
+
+  def ColorHistogram(self, ignore_color=None, tolerance=0):
+    return self._PrepareTools().Histogram(ignore_color, tolerance)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/bitmaptools.cc b/catapult/telemetry/telemetry/internal/image_processing/bitmaptools.cc
new file mode 100644
index 0000000..7a335da
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/bitmaptools.cc
@@ -0,0 +1,264 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(WIN32)
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+enum Commands {
+  CROP_PIXELS = 0,
+  HISTOGRAM = 1,
+  BOUNDING_BOX = 2
+};
+
+bool ReadInt(int* out) {
+  return fread(out, sizeof(*out), 1, stdin) == 1;
+}
+
+void WriteResponse(void* data, int size) {
+  fwrite(&size, sizeof(size), 1, stdout);
+  fwrite(data, size, 1, stdout);
+  fflush(stdout);
+}
+
+struct Box {
+  Box() : left(), top(), right(), bottom() {}
+
+  // Expected input is:
+  // left, top, width, height
+  bool Read() {
+    int width;
+    int height;
+    if (!(ReadInt(&left) && ReadInt(&top) &&
+          ReadInt(&width) && ReadInt(&height))) {
+      fprintf(stderr, "Could not parse Box.\n");
+      return false;
+    }
+    if (left < 0 || top < 0 || width < 0 || height < 0) {
+      fprintf(stderr, "Box dimensions must be non-negative.\n");
+      return false;
+    }
+    right = left + width;
+    bottom = top + height;
+    return true;
+  }
+
+  void Union(int x, int y) {
+    if (left > x) left = x;
+    if (right <= x) right = x + 1;
+    if (top > y) top = y;
+    if (bottom <= y) bottom = y + 1;
+  }
+
+  int width() const { return right - left; }
+  int height() const { return bottom - top; }
+
+  int left;
+  int top;
+  int right;
+  int bottom;
+};
+
+
+// Represents a bitmap buffer with a crop box.
+struct Bitmap {
+  Bitmap() : pixels(NULL) {}
+
+  ~Bitmap() {
+    if (pixels)
+      delete[] pixels;
+  }
+
+  // Expected input is:
+  // bpp, width, height, box, pixels
+  bool Read() {
+    int bpp;
+    int width;
+    int height;
+    if (!(ReadInt(&bpp) && ReadInt(&width) && ReadInt(&height))) {
+      fprintf(stderr, "Could not parse Bitmap initializer.\n");
+      return false;
+    }
+    if (bpp <= 0 || width <= 0 || height <= 0) {
+      fprintf(stderr, "Dimensions must be positive.\n");
+      return false;
+    }
+
+    int size = width * height * bpp;
+
+    row_stride = width * bpp;
+    pixel_stride = bpp;
+    total_size = size;
+    row_size = row_stride;
+
+    if (!box.Read()) {
+      fprintf(stderr, "Expected crop box argument not found.\n");
+      return false;
+    }
+
+    if (box.bottom * row_stride > total_size ||
+        box.right * pixel_stride > row_size) {
+      fprintf(stderr, "Crop box overflows the bitmap.\n");
+      return false;
+    }
+
+    pixels = new unsigned char[size];
+    if (fread(pixels, sizeof(pixels[0]), size, stdin) <
+        static_cast<size_t>(size)) {
+      fprintf(stderr, "Not enough pixels found,\n");
+      return false;
+    }
+
+    total_size = (box.bottom - box.top) * row_stride;
+    row_size = (box.right - box.left) * pixel_stride;
+    data = pixels + box.top * row_stride + box.left * pixel_stride;
+    return true;
+  }
+
+  void WriteCroppedPixels() const {
+    int out_size = row_size * box.height();
+    unsigned char* out = new unsigned char[out_size];
+    unsigned char* dst = out;
+    for (const unsigned char* row = data;
+        row < data + total_size;
+        row += row_stride, dst += row_size) {
+      // No change in pixel_stride, so we can copy whole rows.
+      memcpy(dst, row, row_size);
+    }
+
+    WriteResponse(out, out_size);
+    delete[] out;
+  }
+
+  unsigned char* pixels;
+  Box box;
+  // Points at the top-left pixel in |pixels|.
+  const unsigned char* data;
+  // These counts are in bytes.
+  int row_stride;
+  int pixel_stride;
+  int total_size;
+  int row_size;
+};
+
+
+static inline
+bool PixelsEqual(const unsigned char* pixel1, const unsigned char* pixel2,
+                 int tolerance) {
+  // Note: this works for both RGB and RGBA. Alpha channel is ignored.
+  return (abs(pixel1[0] - pixel2[0]) <= tolerance) &&
+         (abs(pixel1[1] - pixel2[1]) <= tolerance) &&
+         (abs(pixel1[2] - pixel2[2]) <= tolerance);
+}
+
+
+static inline
+bool PixelsEqual(const unsigned char* pixel, int color, int tolerance) {
+  unsigned char pixel2[3] = { color >> 16, color >> 8, color };
+  return PixelsEqual(pixel, pixel2, tolerance);
+}
+
+
+static
+bool Histogram(const Bitmap& bmp) {
+  int ignore_color;
+  int tolerance;
+  if (!(ReadInt(&ignore_color) && ReadInt(&tolerance))) {
+    fprintf(stderr, "Could not parse HISTOGRAM command.\n");
+    return false;
+  }
+
+  const int kLength = 3 * 256;
+  int counts[kLength] = {};
+
+  for (const unsigned char* row = bmp.data; row < bmp.data + bmp.total_size;
+       row += bmp.row_stride) {
+    for (const unsigned char* pixel = row; pixel < row + bmp.row_size;
+       pixel += bmp.pixel_stride) {
+      if (ignore_color >= 0 && PixelsEqual(pixel, ignore_color, tolerance))
+        continue;
+      ++(counts[256 * 0 + pixel[0]]);
+      ++(counts[256 * 1 + pixel[1]]);
+      ++(counts[256 * 2 + pixel[2]]);
+    }
+  }
+
+  WriteResponse(counts, sizeof(counts));
+  return true;
+}
+
+
+static
+bool BoundingBox(const Bitmap& bmp) {
+  int color;
+  int tolerance;
+  if (!(ReadInt(&color) && ReadInt(&tolerance))) {
+    fprintf(stderr, "Could not parse BOUNDING_BOX command.\n");
+    return false;
+  }
+
+  Box box;
+  box.left = bmp.total_size;
+  box.top = bmp.total_size;
+  box.right = 0;
+  box.bottom = 0;
+
+  int count = 0;
+  int y = 0;
+  for (const unsigned char* row = bmp.data; row < bmp.data + bmp.total_size;
+       row += bmp.row_stride, ++y) {
+    int x = 0;
+    for (const unsigned char* pixel = row; pixel < row + bmp.row_size;
+         pixel += bmp.pixel_stride, ++x) {
+      if (!PixelsEqual(pixel, color, tolerance))
+        continue;
+      box.Union(x, y);
+      ++count;
+    }
+  }
+
+  int response[] = { box.left, box.top, box.width(), box.height(), count };
+  WriteResponse(response, sizeof(response));
+  return true;
+}
+
+
+int main() {
+  Bitmap bmp;
+  int command;
+
+#if defined(WIN32)
+  _setmode(_fileno(stdin), _O_BINARY);
+  _setmode(_fileno(stdout), _O_BINARY);
+#else
+  static_cast<void>(freopen(NULL, "rb", stdin));
+  static_cast<void>(freopen(NULL, "wb", stdout));
+#endif
+
+  if (!bmp.Read()) return -1;
+  if (!ReadInt(&command)) {
+    fprintf(stderr, "Expected command.\n");
+    return -1;
+  }
+  switch (command) {
+    case CROP_PIXELS:
+      bmp.WriteCroppedPixels();
+      break;
+    case BOUNDING_BOX:
+      if (!BoundingBox(bmp)) return -1;
+      break;
+    case HISTOGRAM:
+      if (!Histogram(bmp)) return -1;
+      break;
+    default:
+      fprintf(stderr, "Unrecognized command\n");
+      return -1;
+  }
+  return 0;
+}
diff --git a/catapult/telemetry/telemetry/internal/image_processing/cv_util.py b/catapult/telemetry/telemetry/internal/image_processing/cv_util.py
new file mode 100644
index 0000000..356c0d6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/cv_util.py
@@ -0,0 +1,90 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module provides implementations of common computer Vision operations."""
+
+from __future__ import division
+from telemetry.internal.util import external_modules
+
+np = external_modules.ImportRequiredModule('numpy')
+
+
+def AreLinesOrthogonal(line1, line2, tolerance):
+  """Returns true if lines are within tolerance radians of being orthogonal."""
+  # Map each line onto an angle between 0 and 180.
+  theta1 = np.arctan2(np.float(line1[1] - line1[3]),
+                      np.float(line1[0] - line1[2]))
+  theta2 = np.arctan2(np.float(line2[1] - line2[3]),
+                      np.float(line2[0] - line2[2]))
+  angle2 = abs(theta2 - theta1)
+  if angle2 >= np.pi:
+    angle2 -= np.pi
+  # If the difference between the angles is more than pi/2 - tolerance, the
+  # lines are not orthogonal.
+  return not abs(angle2 - (np.pi / 2.0)) > tolerance
+
+
+def FindLineIntersection(line1, line2):
+  """If the line segments intersect, returns True and their intersection.
+  Otherwise, returns False and the intersection of the line segments if they
+  were to be extended."""
+  # Compute g, and h, the factor by which each line must be extended to
+  # exactly touch the other line. If both are between 0 and 1, then the lines
+  # currently intersect. We use h to compute their intersection.
+  line1p1 = line1[:2]
+  line1p0 = line1[2:]
+  line2p1 = line2[:2]
+  line2p0 = line2[2:]
+  E = np.subtract(line1p1, line1p0)
+  F = np.subtract(line2p1, line2p0)
+  Pe = np.asfarray((-E[1], E[0]))
+  Pf = np.asfarray((-F[1], F[0]))
+  h = np.dot(np.subtract(line1p0, line2p0), Pe)
+  h = np.divide(h, np.dot(F, Pe))
+  g = np.dot(np.subtract(line2p0, line1p0), Pf)
+  g = np.divide(g, np.dot(E, Pf))
+  intersection = np.add(line2p0, np.dot(F, h))
+  intersect = (h >= -0.000001 and h <= 1.000001 and
+               g >= -0.000001 and g <= 1.000001)
+  return intersect, intersection
+
+
+def ExtendLines(lines, length):
+  """Extends lines in an array to a given length, maintaining the center
+  point. Does not necessarily maintain point order."""
+  half_length = length / 2.0
+  angles = np.arctan2(lines[:, 1] - lines[:, 3], lines[:, 0] - lines[:, 2])
+  xoffsets = half_length * np.cos(angles)
+  yoffsets = half_length * np.sin(angles)
+  centerx = (lines[:, 0] + lines[:, 2]) / 2.0
+  centery = (lines[:, 1] + lines[:, 3]) / 2.0
+  lines[:, 0] = centerx - xoffsets
+  lines[:, 2] = centerx + xoffsets
+  lines[:, 1] = centery - yoffsets
+  lines[:, 3] = centery + yoffsets
+  return lines
+
+
+def IsPointApproxOnLine(point, line, tolerance=1):
+  """Approximates distance between point and line for small distances using
+  the determinant and checks whether it's within the tolerance. Tolerance is
+  an approximate distance in pixels, precision decreases with distance."""
+  xd = line[0] - line[2]
+  yd = line[1] - line[3]
+  det = ((xd) * (point[1] - line[3])) - ((yd) * (point[0] - line[2]))
+  tolerance = float(tolerance) * (abs(xd) + abs(yd))
+  return abs(det) * 2.0 <= tolerance
+
+
+def SqDistances(points1, points2):
+  """Computes the square of the distance between two sets of points, or a
+  set of points and a point."""
+  d = np.square(points1 - points2)
+  return d[:, 0] + d[:, 1]
+
+
+def SqDistance(point1, point2):
+  """Computes the square of the distance between two points."""
+  d = np.square(point1 - point2)
+  return d[0] + d[1]
diff --git a/catapult/telemetry/telemetry/internal/image_processing/cv_util_unittest.py b/catapult/telemetry/telemetry/internal/image_processing/cv_util_unittest.py
new file mode 100644
index 0000000..d5d5cd7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/cv_util_unittest.py
@@ -0,0 +1,114 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.util import external_modules
+
+try:
+  np = external_modules.ImportRequiredModule('numpy')
+except ImportError:
+  pass
+else:
+  class CVUtilTest(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+      super(CVUtilTest, self).__init__(*args, **kwargs)
+      # Import modules with dependencies that may not be preset in test setup so
+      # that importing this unit test doesn't cause the test runner to raise an
+      # exception.
+      from telemetry.internal.image_processing import cv_util
+      self.cv_util = cv_util
+
+    def testAreLinesOrthogonalish(self):
+      l1 = np.asfarray((0, 0, 1, 0))
+      l2 = np.asfarray((0, 0, 0, 1))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l1, l2, 0))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l2, l1, 0))
+      self.assertFalse(self.cv_util.AreLinesOrthogonal(l1, l1,
+                                                       np.pi / 2 - 1e-10))
+      self.assertFalse(self.cv_util.AreLinesOrthogonal(l2, l2,
+                                                       np.pi / 2 - 1e-10))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l1, l1, np.pi / 2))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l2, l2, np.pi / 2))
+
+      l3 = np.asfarray((0, 0, 1, 1))
+      l4 = np.asfarray((1, 1, 0, 0))
+      self.assertFalse(self.cv_util.AreLinesOrthogonal(l3, l4,
+                                                       np.pi / 2 - 1e-10))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l3, l1, np.pi / 4))
+
+      l5 = np.asfarray((0, 1, 1, 0))
+      self.assertTrue(self.cv_util.AreLinesOrthogonal(l3, l5, 0))
+
+    def testFindLineIntersection(self):
+      l1 = np.asfarray((1, 1, 2, 1))
+      l2 = np.asfarray((1, 1, 1, 2))
+      ret, p = self.cv_util.FindLineIntersection(l1, l2)
+      self.assertTrue(ret)
+      self.assertTrue(np.array_equal(p, np.array([1, 1])))
+      l3 = np.asfarray((1.1, 1, 2, 1))
+      ret, p = self.cv_util.FindLineIntersection(l2, l3)
+      self.assertFalse(ret)
+      self.assertTrue(np.array_equal(p, np.array([1, 1])))
+      l4 = np.asfarray((2, 1, 1, 1))
+      l5 = np.asfarray((1, 2, 1, 1))
+      ret, p = self.cv_util.FindLineIntersection(l4, l5)
+      self.assertTrue(ret)
+      self.assertTrue(np.array_equal(p, np.array([1, 1])))
+      l6 = np.asfarray((1, 1, 0, 0))
+      l7 = np.asfarray((0, 1, 1, 0))
+      ret, p = self.cv_util.FindLineIntersection(l7, l6)
+      self.assertTrue(ret)
+      self.assertTrue(np.array_equal(p, np.array([0.5, 0.5])))
+      l8 = np.asfarray((0, 0, 0, 1))
+      l9 = np.asfarray((1, 0, 1, 1))
+      ret, p = self.cv_util.FindLineIntersection(l8, l9)
+      self.assertFalse(ret)
+      self.assertTrue(np.isnan(p[0]))
+
+    def testExtendLines(self):
+      l1 = (-1, 0, 1, 0)
+      l2 = (0, -1, 0, 1)
+      l3 = (4, 4, 6, 6)
+      l4 = (1, 1, 1, 1)
+      lines = self.cv_util.ExtendLines(np.asfarray([l1, l2, l3, l4],
+                                                   dtype=np.float64), 10)
+      lines = np.around(lines, 10)
+      expected0 = ((5.0, 0.0, -5.0, 0.0))
+      self.assertAlmostEqual(np.sum(np.abs(np.subtract(lines[0], expected0))),
+                             0.0, 7)
+      expected1 = ((0.0, 5.0, 0.0, -5.0))
+      self.assertAlmostEqual(np.sum(np.abs(np.subtract(lines[1], expected1))),
+                             0.0, 7)
+
+      off = np.divide(np.sqrt(50), 2, dtype=np.float64)
+      expected2 = ((5 + off, 5 + off, 5 - off, 5 - off))
+      self.assertAlmostEqual(np.sum(np.abs(np.subtract(lines[2], expected2))),
+                             0.0, 7)
+      expected3 = ((-4, 1, 6, 1))
+      self.assertAlmostEqual(np.sum(np.abs(np.subtract(lines[3], expected3))),
+                             0.0, 7)
+
+    def testIsPointApproxOnLine(self):
+      p1 = np.asfarray((-1, -1))
+      l1 = np.asfarray((0, 0, 100, 100))
+      p2 = np.asfarray((1, 2))
+      p3 = np.asfarray((2, 1))
+      p4 = np.asfarray((3, 1))
+      self.assertTrue(self.cv_util.IsPointApproxOnLine(p1, l1, 1 + 1e-7))
+      self.assertTrue(self.cv_util.IsPointApproxOnLine(p2, l1, 1 + 1e-7))
+      self.assertTrue(self.cv_util.IsPointApproxOnLine(p3, l1, 1 + 1e-7))
+      self.assertFalse(self.cv_util.IsPointApproxOnLine(p4, l1, 1 + 1e-7))
+
+    def testSqDistances(self):
+      p1 = np.array([[0, 2], [0, 3]])
+      p2 = np.array([2, 0])
+      dists = self.cv_util.SqDistance(p1, p2)
+      self.assertEqual(dists[0], 8)
+      self.assertEqual(dists[1], 13)
+
+    def testSqDistance(self):
+      p1 = np.array([0, 2])
+      p2 = np.array([2, 0])
+      self.assertEqual(self.cv_util.SqDistance(p1, p2), 8)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/fake_frame_generator.py b/catapult/telemetry/telemetry/internal/image_processing/fake_frame_generator.py
new file mode 100644
index 0000000..cd77541
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/fake_frame_generator.py
@@ -0,0 +1,62 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.image_processing import frame_generator
+from telemetry.internal.util import external_modules
+
+np = external_modules.ImportRequiredModule('numpy')
+
+
+class FakeFrameGenerator(frame_generator.FrameGenerator):
+  """ Fakes a Frame Generator, for testing.
+
+  Attributes:
+    _frame_index: A frame read counter.
+    _timestamps: A generator of timestamps to return, or None.
+    _timestamp: The current timestamp.
+    _dimensions: The dimensions to return.
+    _channels: The number of color channels to return in the generated frames.
+    _frames: The number of frames to return before fake EOF."""
+  def __init__(self, frames=1e16, dimensions=(320, 240), channels=3,
+               timestamps=(x for x in iter(int, 1))):
+    """ Initializes the FakeFrameGenerator object.
+
+    Args:
+      frames: int, The number of frames to return before fake EOF.
+      dimensions: (int, int), The dimensions to return.
+      timestamps: generator, A generator of timestamps to return. The default
+          value is an infinite 0 generator.
+      channels: int, The number of color channels to return in the generated
+          frames, 1 for greyscale, 3 for RGB."""
+    self._dimensions = dimensions
+    self._timestamps = timestamps
+    self._timestamp = 0
+    self._frame_index = -1
+    self._channels = channels
+    self._frames = frames
+
+    super(FakeFrameGenerator, self).__init__()
+
+  # OVERRIDE
+  def _CreateGenerator(self):
+    while self._frame_index < self._frames - 1:
+      self._frame_index += 1
+      self._timestamp = next(self._timestamps)
+      yield np.zeros((self._dimensions[0], self._dimensions[1],
+                      self._channels), np.uint8)
+
+  # OVERRIDE
+  @property
+  def CurrentTimestamp(self):
+    return self._timestamp
+
+  # OVERRIDE
+  @property
+  def CurrentFrameNumber(self):
+    return self._frame_index
+
+  # OVERRIDE
+  @property
+  def Dimensions(self):
+    return self._dimensions
diff --git a/catapult/telemetry/telemetry/internal/image_processing/frame_generator.py b/catapult/telemetry/telemetry/internal/image_processing/frame_generator.py
new file mode 100644
index 0000000..b701418
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/frame_generator.py
@@ -0,0 +1,62 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import abc
+
+
+class FrameReadError(Exception):
+  pass
+
+
+class FrameGenerator(object):
+  """ Defines an interface for reading input frames.
+
+  Attributes:
+    _generator: A reference to the created generator.
+  """
+  __metaclass__ = abc.ABCMeta
+
+  def __init__(self):
+    """ Initializes the FrameGenerator object. """
+    self._generator = self._CreateGenerator()
+
+  @abc.abstractmethod
+  def _CreateGenerator(self):
+    """ Creates a new generator.
+
+    Implemented in derived classes.
+
+    Raises:
+      FrameReadError: A error occurred in reading the frame.
+    """
+    raise NotImplementedError
+
+  @property
+  def Generator(self):
+    """ Returns:
+          A reference to the created generator.
+    """
+    return self._generator
+
+  @abc.abstractproperty
+  def CurrentTimestamp(self):
+    """ Returns:
+          float, The timestamp of the current frame in milliseconds.
+    """
+    raise NotImplementedError
+
+  @abc.abstractproperty
+  def CurrentFrameNumber(self):
+    """ Returns:
+          int, The frame index of the current frame.
+    """
+    raise NotImplementedError
+
+  @abc.abstractproperty
+  def Dimensions(self):
+    """ Returns:
+          The dimensions of the frame sequence as a tuple int (width, height).
+          This value should be constant across frames.
+    """
+    raise NotImplementedError
diff --git a/catapult/telemetry/telemetry/internal/image_processing/image_util_bitmap_impl.py b/catapult/telemetry/telemetry/internal/image_processing/image_util_bitmap_impl.py
new file mode 100644
index 0000000..18e4554
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/image_util_bitmap_impl.py
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import division
+
+from telemetry.internal.image_processing import _bitmap
+
+
+def Channels(bitmap):
+  return bitmap.bpp
+
+def Width(bitmap):
+  return bitmap.width
+
+def Height(bitmap):
+  return bitmap.height
+
+def Pixels(bitmap):
+  return bitmap.pixels
+
+def GetPixelColor(bitmap, x, y):
+  return bitmap.GetPixelColor(x, y)
+
+def WritePngFile(bitmap, path):
+  bitmap.WritePngFile(path)
+
+def FromRGBPixels(width, height, pixels, bpp):
+  return _bitmap.Bitmap(bpp, width, height, pixels)
+
+def FromPng(png_data):
+  return _bitmap.Bitmap.FromPng(png_data)
+
+def FromPngFile(path):
+  return _bitmap.Bitmap.FromPngFile(path)
+
+def AreEqual(bitmap1, bitmap2, tolerance, _):
+  return bitmap1.IsEqual(bitmap2, tolerance)
+
+def Diff(bitmap1, bitmap2):
+  return bitmap1.Diff(bitmap2)
+
+def GetBoundingBox(bitmap, color, tolerance):
+  return bitmap.GetBoundingBox(color, tolerance)
+
+def Crop(bitmap, left, top, width, height):
+  return bitmap.Crop(left, top, width, height)
+
+def GetColorHistogram(bitmap, ignore_color, tolerance):
+  return bitmap.ColorHistogram(ignore_color, tolerance)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/image_util_numpy_impl.py b/catapult/telemetry/telemetry/internal/image_processing/image_util_numpy_impl.py
new file mode 100644
index 0000000..d0da21d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/image_util_numpy_impl.py
@@ -0,0 +1,182 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import division
+
+from telemetry.internal.util import external_modules
+from telemetry.util import color_histogram
+from telemetry.util import rgba_color
+import png
+
+cv2 = external_modules.ImportOptionalModule('cv2')
+np = external_modules.ImportRequiredModule('numpy')
+
+
+def Channels(image):
+  return image.shape[2]
+
+def Width(image):
+  return image.shape[1]
+
+def Height(image):
+  return image.shape[0]
+
+def Pixels(image):
+  return bytearray(np.uint8(image[:, :, ::-1]).flat)  # Convert from bgr to rgb.
+
+def GetPixelColor(image, x, y):
+  bgr = image[y][x]
+  return rgba_color.RgbaColor(bgr[2], bgr[1], bgr[0])
+
+def WritePngFile(image, path):
+  if cv2 is not None:
+    cv2.imwrite(path, image)
+  else:
+    with open(path, "wb") as f:
+      metadata = {}
+      metadata['size'] = (Width(image), Height(image))
+      metadata['alpha'] = False
+      metadata['bitdepth'] = 8
+      img = image[:, :, ::-1]
+      pixels = img.reshape(-1).tolist()
+      png.Writer(**metadata).write_array(f, pixels)
+
+def FromRGBPixels(width, height, pixels, bpp):
+  img = np.array(pixels, order='F', dtype=np.uint8)
+  img.resize((height, width, bpp))
+  if bpp == 4:
+    img = img[:, :, :3]  # Drop alpha.
+  return img[:, :, ::-1]  # Convert from rgb to bgr.
+
+def FromPngFile(path):
+  if cv2 is not None:
+    img = cv2.imread(path, cv2.CV_LOAD_IMAGE_COLOR)
+    if img is None:
+      raise ValueError('Image at path {0} could not be read'.format(path))
+    return img
+  else:
+    with open(path, "rb") as f:
+      return FromPng(f.read())
+
+def FromPng(png_data):
+  if cv2 is not None:
+    file_bytes = np.asarray(bytearray(png_data), dtype=np.uint8)
+    return cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_COLOR)
+  else:
+    width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
+    return FromRGBPixels(width, height, pixels, 4 if meta['alpha'] else 3)
+
+def _SimpleDiff(image1, image2):
+  if cv2 is not None:
+    return cv2.absdiff(image1, image2)
+  else:
+    amax = np.maximum(image1, image2)
+    amin = np.minimum(image1, image2)
+    return amax - amin
+
+def AreEqual(image1, image2, tolerance, likely_equal):
+  if image1.shape != image2.shape:
+    return False
+  self_image = image1
+  other_image = image2
+  if tolerance:
+    if likely_equal:
+      return np.amax(_SimpleDiff(image1, image2)) <= tolerance
+    else:
+      for row in xrange(Height(image1)):
+        if np.amax(_SimpleDiff(image1[row], image2[row])) > tolerance:
+          return False
+      return True
+  else:
+    if likely_equal:
+      return (self_image == other_image).all()
+    else:
+      for row in xrange(Height(image1)):
+        if not (self_image[row] == other_image[row]).all():
+          return False
+      return True
+
+def Diff(image1, image2):
+  self_image = image1
+  other_image = image2
+  if image1.shape[2] != image2.shape[2]:
+    raise ValueError('Cannot diff images of differing bit depth')
+  if image1.shape[:2] != image2.shape[:2]:
+    width = max(Width(image1), Width(image2))
+    height = max(Height(image1), Height(image2))
+    self_image = np.zeros((width, height, image1.shape[2]), np.uint8)
+    other_image = np.zeros((width, height, image1.shape[2]), np.uint8)
+    self_image[0:Height(image1), 0:Width(image1)] = image1
+    other_image[0:Height(image2), 0:Width(image2)] = image2
+  return _SimpleDiff(self_image, other_image)
+
+def GetBoundingBox(image, color, tolerance):
+  if cv2 is not None:
+    color = np.array([color.b, color.g, color.r])
+    img = cv2.inRange(image, np.subtract(color[0:3], tolerance),
+                      np.add(color[0:3], tolerance))
+    count = cv2.countNonZero(img)
+    if count == 0:
+      return None, 0
+    contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+    contour = np.concatenate(contours)
+    return cv2.boundingRect(contour), count
+  else:
+    if tolerance:
+      color = np.array([color.b, color.g, color.r])
+      colorm = color - tolerance
+      colorp = color + tolerance
+      b = image[:, :, 0]
+      g = image[:, :, 1]
+      r = image[:, :, 2]
+      w = np.where(((b >= colorm[0]) & (b <= colorp[0]) &
+                    (g >= colorm[1]) & (g <= colorp[1]) &
+                    (r >= colorm[2]) & (r <= colorp[2])))
+    else:
+      w = np.where((image[:, :, 0] == color.b) &
+                   (image[:, :, 1] == color.g) &
+                   (image[:, :, 2] == color.r))
+    if len(w[0]) == 0:
+      return None, 0
+    return (w[1][0], w[0][0], w[1][-1] - w[1][0] + 1, w[0][-1] - w[0][0] + 1), \
+        len(w[0])
+
+def Crop(image, left, top, width, height):
+  img_height, img_width = image.shape[:2]
+  if (left < 0 or top < 0 or
+      (left + width) > img_width or
+      (top + height) > img_height):
+    raise ValueError('Invalid dimensions')
+  return image[top:top + height, left:left + width]
+
+def GetColorHistogram(image, ignore_color, tolerance):
+  if cv2 is not None:
+    mask = None
+    if ignore_color is not None:
+      color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
+      mask = ~cv2.inRange(image, np.subtract(color, tolerance),
+                          np.add(color, tolerance))
+
+    flatten = np.ndarray.flatten
+    hist_b = flatten(cv2.calcHist([image], [0], mask, [256], [0, 256]))
+    hist_g = flatten(cv2.calcHist([image], [1], mask, [256], [0, 256]))
+    hist_r = flatten(cv2.calcHist([image], [2], mask, [256], [0, 256]))
+  else:
+    filtered = image.reshape(-1, 3)
+    if ignore_color is not None:
+      color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
+      colorm = np.array(color) - tolerance
+      colorp = np.array(color) + tolerance
+      in_range = ((filtered[:, 0] < colorm[0]) | (filtered[:, 0] > colorp[0]) |
+                  (filtered[:, 1] < colorm[1]) | (filtered[:, 1] > colorp[1]) |
+                  (filtered[:, 2] < colorm[2]) | (filtered[:, 2] > colorp[2]))
+      filtered = np.compress(in_range, filtered, axis=0)
+    if len(filtered[:, 0]) == 0:
+      return color_histogram.ColorHistogram(np.zeros((256)), np.zeros((256)),
+                                      np.zeros((256)), ignore_color)
+    hist_b = np.bincount(filtered[:, 0], minlength=256)
+    hist_g = np.bincount(filtered[:, 1], minlength=256)
+    hist_r = np.bincount(filtered[:, 2], minlength=256)
+
+  return color_histogram.ColorHistogram(hist_r, hist_g, hist_b, ignore_color)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/screen_finder.py b/catapult/telemetry/telemetry/internal/image_processing/screen_finder.py
new file mode 100755
index 0000000..932d6df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/screen_finder.py
@@ -0,0 +1,857 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This script attempts to detect the region of a camera's field of view that
+# contains the screen of the device we are testing.
+#
+# Usage: ./screen_finder.py path_to_video 0 0 --verbose
+
+from __future__ import division
+
+import copy
+import logging
+import os
+import sys
+
+if __name__ == '__main__':
+  sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
+
+from telemetry.internal.image_processing import cv_util
+from telemetry.internal.image_processing import frame_generator as \
+    frame_generator_module
+from telemetry.internal.image_processing import video_file_frame_generator
+from telemetry.internal.util import external_modules
+
+np = external_modules.ImportRequiredModule('numpy')
+cv2 = external_modules.ImportRequiredModule('cv2')
+
+
+class ScreenFinder(object):
+  """Finds and extracts device screens from video.
+
+  Sample Usage:
+    sf = ScreenFinder(sys.argv[1])
+    while sf.HasNext():
+      ret, screen = sf.GetNext()
+
+  Attributes:
+    _lost_corners: Each index represents whether or not we lost track of that
+        corner on the previous frame. Ordered by [top-right, top-left,
+        bottom-left, bottom-right]
+    _frame: An unmodified copy of the frame we're currently processing.
+    _frame_debug: A copy of the frame we're currently processing, may be
+        modified at any time, used for debugging.
+    _frame_grey: A greyscale copy of the frame we're currently processing.
+    _frame_edges: A Canny Edge detected copy of the frame we're currently
+        processing.
+    _screen_size: The size of device screen in the video when first detected.
+    _avg_corners: Exponentially weighted average of the previous corner
+        locations.
+    _prev_corners: The location of the corners in the previous frame.
+    _lost_corner_frames: A counter of the number of successive frames in which
+        we've lost a corner location.
+    _border: See |border| above.
+    _min_line_length: The minimum length a line must be before we consider it
+        a possible screen edge.
+    _frame_generator: See |frame_generator| above.
+    _width, _height: The width and height of the frame.
+    _anglesp5, _anglesm5: The angles for each point we look at in the grid
+        when computing brightness, constant across frames."""
+
+  class ScreenNotFoundError(Exception):
+    pass
+
+  # Square of the distance a corner can travel in pixels between frames
+  MAX_INTERFRAME_MOTION = 25
+  # The minimum width line that may be considered a screen edge.
+  MIN_SCREEN_WIDTH = 40
+  # Number of frames with lost corners before we ignore MAX_INTERFRAME_MOTION
+  RESET_AFTER_N_BAD_FRAMES = 2
+  # The weight applied to the new screen location when exponentially averaging
+  # screen location.
+  # TODO(mthiesse): This should be framerate dependent, for lower framerates
+  # this value should approach 1. For higher framerates, this value should
+  # approach 0. The current 0.5 value works well in testing with 240 FPS.
+  CORNER_AVERAGE_WEIGHT = 0.5
+
+  # TODO(mthiesse): Investigate how to select the constants used here. In very
+  # bright videos, twice as bright may be too high, and the minimum of 60 may
+  # be too low.
+  # The factor by which a quadrant at an intersection must be brighter than
+  # the other quadrants to be considered a screen corner.
+  MIN_RELATIVE_BRIGHTNESS_FACTOR = 1.5
+  # The minimum average brightness required of an intersection quadrant to
+  # be considered a screen corner (on a scale of 0-255).
+  MIN_CORNER_ABSOLUTE_BRIGHTNESS = 60
+
+  # Low and high hysteresis parameters to be passed to the Canny edge
+  # detection algorithm.
+  CANNY_HYSTERESIS_THRESH_LOW = 300
+  CANNY_HYSTERESIS_THRESH_HIGH = 500
+
+  SMALL_ANGLE = 5 / 180 * np.pi  # 5 degrees in radians
+
+  DEBUG = False
+
+  def __init__(self, frame_generator, border=5):
+    """Initializes the ScreenFinder object.
+
+    Args:
+      frame_generator: FrameGenerator, An initialized Video Frame Generator.
+      border: int, number of pixels of border to be kept when cropping the
+          detected screen.
+
+    Raises:
+      FrameReadError: The frame generator may output a read error during
+          initialization."""
+    assert isinstance(frame_generator, frame_generator_module.FrameGenerator)
+    self._lost_corners = [False, False, False, False]
+    self._frame_debug = None
+    self._frame = None
+    self._frame_grey = None
+    self._frame_edges = None
+    self._screen_size = None
+    self._avg_corners = None
+    self._prev_corners = None
+    self._lost_corner_frames = 0
+    self._border = border
+    self._min_line_length = self.MIN_SCREEN_WIDTH
+    self._frame_generator = frame_generator
+    self._anglesp5 = None
+    self._anglesm5 = None
+
+    if not self._InitNextFrame():
+      logging.warn('Not enough frames in video feed!')
+      return
+
+    self._height, self._width = self._frame.shape[:2]
+
+  def _InitNextFrame(self):
+    """Called after processing each frame, reads in the next frame to ensure
+    HasNext() is accurate."""
+    self._frame_debug = None
+    self._frame = None
+    self._frame_grey = None
+    self._frame_edges = None
+    try:
+      frame = next(self._frame_generator.Generator)
+    except StopIteration:
+      return False
+    self._frame = frame
+    self._frame_debug = copy.copy(frame)
+    self._frame_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+    self._frame_edges = cv2.Canny(self._frame_grey,
+                                  self.CANNY_HYSTERESIS_THRESH_LOW,
+                                  self.CANNY_HYSTERESIS_THRESH_HIGH)
+    return True
+
+  def HasNext(self):
+    """True if there are more frames available to process. """
+    return self._frame is not None
+
+  def GetNext(self):
+    """Gets the next screen image.
+
+    Returns:
+      A numpy matrix containing the screen surrounded by the number of border
+      pixels specified in initialization, and the location of the detected
+      screen corners in the current frame, if a screen is found. The returned
+      screen is guaranteed to be the same size at each frame.
+      'None' and 'None' if no screen was found on the current frame.
+
+    Raises:
+      FrameReadError: An error occurred in the FrameGenerator.
+      RuntimeError: This method was called when no frames were available."""
+    if self._frame is None:
+      raise RuntimeError('No more frames available.')
+
+    logging.info('Processing frame: %d',
+                 self._frame_generator.CurrentFrameNumber)
+
+    # Finds straight lines in the image.
+    hlines = cv2.HoughLinesP(self._frame_edges, 1, np.pi / 180, 60,
+                             minLineLength=self._min_line_length,
+                             maxLineGap=100)
+
+    # Extends these straight lines to be long enough to ensure the screen edge
+    # lines intersect.
+    lines = cv_util.ExtendLines(np.float32(hlines[0]), 10000) \
+        if hlines is not None else []
+
+    # Find intersections in the lines; these are likely to be screen corners.
+    intersections = self._FindIntersections(lines)
+    if len(intersections[:, 0]) > 0:
+      points = np.vstack(intersections[:, 0].flat)
+      if (self._prev_corners is not None and len(points) >= 4 and
+          not self._HasMovedFast(points, self._prev_corners)):
+        corners = self._prev_corners
+        missing_corners = 0
+      else:
+        # Extract the corners from all intersections.
+        corners, missing_corners = self._FindCorners(
+            intersections, self._frame_grey)
+    else:
+      corners = np.empty((4, 2), np.float32)
+      corners[:] = np.nan
+      missing_corners = 4
+
+    screen = None
+    found_screen = True
+    final_corners = None
+    try:
+      # Handle the cases where we have missing corners.
+      screen_corners = self._NewScreenLocation(
+          corners, missing_corners, intersections)
+
+      final_corners = self._SmoothCorners(screen_corners)
+
+      # Create a perspective transform from our corners.
+      transform, w, h = self._GetTransform(final_corners, self._border)
+
+      # Apply the perspective transform to get our output.
+      screen = cv2.warpPerspective(
+          self._frame, transform, (int(w + 0.5), int(h + 0.5)))
+
+      self._prev_corners = final_corners
+
+    except self.ScreenNotFoundError as e:
+      found_screen = False
+      logging.info(e)
+
+    if self.DEBUG:
+      self._Debug(lines, corners, final_corners, screen)
+
+    self._InitNextFrame()
+    if found_screen:
+      return screen, self._prev_corners
+    return None, None
+
+  def _FindIntersections(self, lines):
+    """Finds intersections in a set of lines.
+
+    Filters pairs of lines that are less than 45 degrees apart. Filtering
+    these pairs helps dramatically reduce the number of points we have to
+    process, as these points could not represent screen corners anyways.
+
+    Returns:
+      The intersections, represented as a tuple of (point, line, line) of the
+      points and the lines that intersect there of all lines in the array that
+      are more than 45 degrees apart."""
+    intersections = np.empty((0, 3), np.float32)
+    for i in xrange(0, len(lines)):
+      for j in xrange(i + 1, len(lines)):
+        # Filter lines that are less than 45 (or greater than 135) degrees
+        # apart.
+        if not cv_util.AreLinesOrthogonal(lines[i], lines[j], (np.pi / 4.0)):
+          continue
+        ret, point = cv_util.FindLineIntersection(lines[i], lines[j])
+        point = np.float32(point)
+        if not ret:
+          continue
+        # If we know where the previous corners are, we can also filter
+        # intersections that are too far away from the previous corners to be
+        # where the screen has moved.
+        if self._prev_corners is not None and \
+           self._lost_corner_frames <= self.RESET_AFTER_N_BAD_FRAMES and \
+           not self._PointIsCloseToPreviousCorners(point):
+          continue
+        intersections = np.vstack((intersections,
+                                   np.array((point, lines[i], lines[j]))))
+    return intersections
+
+  def _PointIsCloseToPreviousCorners(self, point):
+    """True if the point is close to the previous corners."""
+    max_dist = self.MAX_INTERFRAME_MOTION
+    if cv_util.SqDistance(self._prev_corners[0], point) <= max_dist or \
+       cv_util.SqDistance(self._prev_corners[1], point) <= max_dist or \
+       cv_util.SqDistance(self._prev_corners[2], point) <= max_dist or \
+       cv_util.SqDistance(self._prev_corners[3], point) <= max_dist:
+      return True
+    return False
+
+  def _HasMovedFast(self, corners, prev_corners):
+    min_dist = np.zeros(4, np.float32)
+    for i in xrange(4):
+      dist = np.min(cv_util.SqDistances(corners, prev_corners[i]))
+      min_dist[i] = dist
+    # 3 corners can move up to one pixel before we consider the screen to have
+    # moved. TODO(mthiesse): Should this be relaxed? Resolution dependent?
+    if np.sum(min_dist) < 3:
+      return False
+    return True
+
+  class CornerData(object):
+
+    def __init__(self, corner_index, corner_location, brightness_score, line1,
+                 line2):
+      self.corner_index = corner_index
+      self.corner_location = corner_location
+      self.brightness_score = brightness_score
+      self.line1 = line1
+      self.line2 = line2
+
+    def __gt__(self, corner_data2):
+      return self.corner_index > corner_data2.corner_index
+
+    def __repr__(self):
+      return ('\nCorner index: ' + str(self.corner_index) +
+              ',\nCorner location: ' + str(self.corner_location) +
+              ',\nBrightness score: ' + str(self.brightness_score) +
+              ',\nline1: ' + str(self.line1) + ',\nline2: ' + str(self.line2))
+
+  def _FindCorners(self, intersections, grey_frame):
+    """Finds the screen corners in the image.
+
+    Given the set of intersections in the image, finds the intersections most
+    likely to be corners.
+
+    Args:
+      intersections: The array of intersections in the image.
+      grey_frame: The greyscale frame we're processing.
+
+    Returns:
+      An array of length 4 containing the positions of the corners, or nan for
+      each index where a corner could not be found, and a count of the number
+      of missing corners.
+      The corners are ordered as follows:
+        1 | 0
+        -----
+        2 | 3
+      Ex. 3 corners are found from a square of width 2 centered at the origin,
+      the output would look like:
+          '[[1, 1], [np.nan, np.nan], [-1, -1], [1, -1]], 1'"""
+    filtered = []
+    corners = np.empty((0, 2), np.float32)
+    for corner_pos, score, point, line1, line2 in \
+        self._LooksLikeCorner(intersections, grey_frame):
+      if self.DEBUG:
+        center = (int(point[0] + 0.5), int(point[1] + 0.5))
+        cv2.circle(self._frame_debug, center, 5, (0, 255, 0), 1)
+      point.resize(1, 2)
+      corners = np.append(corners, point, axis=0)
+      point.resize(2,)
+      corner_data = self.CornerData(corner_pos, point, score, line1, line2)
+      filtered.append(corner_data)
+
+    # De-duplicate corners because we may have found many false positives, or
+    # near-misses.
+    self._DeDupCorners(filtered, corners)
+
+    # Strip everything but the corner location.
+    filtered_corners = np.array(
+        [corner_data.corner_location for corner_data in filtered])
+    corner_indices = [corner_data.corner_index for corner_data in filtered]
+
+    # If we have found a corner to replace a lost corner, we want to check
+    # that the corner is not erroneous by ensuring it makes a rectangle with
+    # the 3 known good corners.
+    if len(filtered) == 4:
+      for i in xrange(4):
+        point_info = (filtered[i].corner_location,
+                      filtered[i].line1,
+                      filtered[i].line2)
+        if (self._lost_corners[i] and
+            not self._PointConnectsToCorners(filtered_corners, point_info)):
+          filtered_corners = np.delete(filtered_corners, i, 0)
+          corner_indices = np.delete(corner_indices, i, 0)
+          break
+
+    # Ensure corners are sorted properly, inserting nans for missing corners.
+    sorted_corners = np.empty((4, 2), np.float32)
+    sorted_corners[:] = np.nan
+    for i in xrange(len(filtered_corners)):
+      sorted_corners[corner_indices[i]] = filtered_corners[i]
+
+    # From this point on, our corners arrays are guaranteed to have 4
+    # elements, though some may be nan.
+
+    # Filter corners that have moved too far from the previous corner if we
+    # are not resetting known corner information.
+    reset_corners = (
+        (self._lost_corner_frames > self.RESET_AFTER_N_BAD_FRAMES)
+        and len(filtered_corners) == 4)
+    if self._prev_corners is not None and not reset_corners:
+      sqdists = cv_util.SqDistances(self._prev_corners, sorted_corners)
+      for i in xrange(4):
+        if np.isnan(sorted_corners[i][0]):
+          continue
+        if sqdists[i] > self.MAX_INTERFRAME_MOTION:
+          sorted_corners[i] = np.nan
+
+    real_corners = self._FindExactCorners(sorted_corners)
+    missing_corners = np.count_nonzero(np.isnan(real_corners)) / 2
+    return real_corners, missing_corners
+
+  def _LooksLikeCorner(self, intersections, grey_frame):
+    """Finds any intersections of lines that look like a screen corner.
+
+    Args:
+      intersections: The numpy array of points, and the lines that intersect
+          at the given point.
+      grey_frame: The greyscale frame we're processing.
+
+    Returns:
+      An array of: The corner location (0-3), the relative brightness score
+      (to be used to de-duplicate corners later), the point, and the lines
+      that make up the intersection, for all intersections that look like a
+      corner."""
+    points = np.vstack(intersections[:, 0].flat)
+    lines1 = np.vstack(intersections[:, 1].flat)
+    lines2 = np.vstack(intersections[:, 2].flat)
+    # Map the image to four quadrants defined as the regions between each of
+    # the lines that make up the intersection.
+    line1a1 = np.pi - np.arctan2(lines1[:, 1] - points[:, 1],
+                                 lines1[:, 0] - points[:, 0])
+    line1a2 = np.pi - np.arctan2(lines1[:, 3] - points[:, 1],
+                                 lines1[:, 2] - points[:, 0])
+    line2a1 = np.pi - np.arctan2(lines2[:, 1] - points[:, 1],
+                                 lines2[:, 0] - points[:, 0])
+    line2a2 = np.pi - np.arctan2(lines2[:, 3] - points[:, 1],
+                                 lines2[:, 2] - points[:, 0])
+    line1a1 = line1a1.reshape(-1, 1)
+    line1a2 = line1a2.reshape(-1, 1)
+    line2a1 = line2a1.reshape(-1, 1)
+    line2a2 = line2a2.reshape(-1, 1)
+
+    line_angles = np.concatenate((line1a1, line1a2, line2a1, line2a2), axis=1)
+    np.ndarray.sort(line_angles)
+
+    # TODO(mthiesse): Investigate whether these should scale with image or
+    # screen size. My intuition is that these don't scale with image size,
+    # though they may be affected by image quality and how blurry the corners
+    # are. See stackoverflow.com/q/7765810/ for inspiration.
+    avg_range = 8.0
+    num_points = 7
+
+    points_m_avg = points - avg_range
+    points_p_avg = points + avg_range
+    # Exclude points near frame boundaries.
+    include = np.where((points_m_avg[:, 0] > 0) & (points_m_avg[:, 1] > 0) &
+                       (points_p_avg[:, 0] < self._width) &
+                       (points_p_avg[:, 1] < self._height))
+    line_angles = line_angles[include]
+    points = points[include]
+    lines1 = lines1[include]
+    lines2 = lines2[include]
+    points_m_avg = points_m_avg[include]
+    points_p_avg = points_p_avg[include]
+    # Perform a 2-d linspace to generate the x, y ranges for each
+    # intersection.
+    arr1 = points_m_avg[:, 0].reshape(-1, 1)
+    arr2 = points_p_avg[:, 0].reshape(-1, 1)
+    lin = np.linspace(0, 1, num_points)
+    x_range = arr1 + (arr2 - arr1) * lin
+    arr1 = points_m_avg[:, 1].reshape(-1, 1)
+    arr2 = points_p_avg[:, 1].reshape(-1, 1)
+    y_range = arr1 + (arr2 - arr1) * lin
+
+    # The angles for each point we look at in the grid when computing
+    # brightness are constant across frames, so we can generate them once.
+    if self._anglesp5 is None:
+      ind = np.transpose([np.tile(x_range[0], num_points),
+                          np.repeat(y_range[0], num_points)])
+      vectors = ind - points[0]
+      angles = np.arctan2(vectors[:, 1], vectors[:, 0]) + np.pi
+      self._anglesp5 = angles + self.SMALL_ANGLE
+      self._anglesm5 = angles - self.SMALL_ANGLE
+    results = []
+    for i in xrange(len(y_range)):
+      # Generate our filters for which points belong to which quadrant.
+      one = np.where((self._anglesp5 <= line_angles[i, 1]) &
+                     (self._anglesm5 >= line_angles[i, 0]))
+      two = np.where((self._anglesp5 <= line_angles[i, 2]) &
+                     (self._anglesm5 >= line_angles[i, 1]))
+      thr = np.where((self._anglesp5 <= line_angles[i, 3]) &
+                     (self._anglesm5 >= line_angles[i, 2]))
+      fou = np.where((self._anglesp5 <= line_angles[i, 0]) |
+                     (self._anglesm5 >= line_angles[i, 3]))
+      # Take the cartesian product of our x and y ranges to get the full list
+      # of pixels to look at.
+      ind = np.transpose([np.tile(x_range[i], num_points),
+                          np.repeat(y_range[i], num_points)])
+
+      # Filter the full list by which indices belong to which quadrant, and
+      # convert to integers so we can index with them.
+      one_i = np.int32(np.rint(ind[one[0]]))
+      two_i = np.int32(np.rint(ind[two[0]]))
+      thr_i = np.int32(np.rint(ind[thr[0]]))
+      fou_i = np.int32(np.rint(ind[fou[0]]))
+
+      # Average the brightness of the pixels that belong to each quadrant.
+      q_1 = np.average(grey_frame[one_i[:, 1], one_i[:, 0]])
+      q_2 = np.average(grey_frame[two_i[:, 1], two_i[:, 0]])
+      q_3 = np.average(grey_frame[thr_i[:, 1], thr_i[:, 0]])
+      q_4 = np.average(grey_frame[fou_i[:, 1], fou_i[:, 0]])
+
+      avg_intensity = [(q_4, 0), (q_1, 1), (q_2, 2), (q_3, 3)]
+      # Sort by intensity.
+      avg_intensity.sort(reverse=True)
+
+      # Treat the point as a corner if one quadrant is at least twice as
+      # bright as the next brightest quadrant, with a minimum brightness
+      # requirement.
+      tau = (2.0 * np.pi)
+      min_factor = self.MIN_RELATIVE_BRIGHTNESS_FACTOR
+      min_brightness = self.MIN_RELATIVE_BRIGHTNESS_FACTOR
+      if avg_intensity[0][0] > avg_intensity[1][0] * min_factor and \
+         avg_intensity[0][0] > min_brightness:
+        bright_corner = avg_intensity[0][1]
+        if bright_corner == 0:
+          angle = np.pi - (line_angles[i, 0] + line_angles[i, 3]) / 2.0
+          if angle < 0:
+            angle = angle + tau
+        else:
+          angle = tau - (line_angles[i, bright_corner] +
+                         line_angles[i, bright_corner - 1]) / 2.0
+        score = avg_intensity[0][0] - avg_intensity[1][0]
+        # TODO(mthiesse): int(angle / (pi / 2.0)) will break if the screen is
+        # rotated through 45 degrees. Probably many other things will break as
+        # well, movement of corners from one quadrant to another hasn't been
+        # tested. We should support this eventually, but this is unlikely to
+        # cause issues for any test setups.
+        results.append((int(angle / (np.pi / 2.0)), score, points[i],
+                        lines1[i], lines2[i]))
+    return results
+
+  def _DeDupCorners(self, corner_data, corners):
+    """De-duplicate corners based on corner_index.
+
+    For each set of points representing a corner: If one point is part of the
+    rectangle and the other is not, filter the other one. If both or none are
+    part of the rectangle, filter based on score (highest relative brightness
+    of a quadrant). The reason we allow for neither to be part of the
+    rectangle is because we may not have found all four corners of the
+    rectangle, and in degenerate cases like this it's better to find 3 likely
+    corners than none.
+
+    Modifies corner_data directly.
+
+    Args:
+      corner_data: CornerData for each potential corner in the frame.
+      corners: List of all potential corners in the frame."""
+    # TODO(mthiesse): Ensure that the corners form a sensible rectangle. For
+    # example, it is currently possible (but unlikely) to detect a 'screen'
+    # where the bottom-left corner is above the top-left corner, while the
+    # bottom-right corner is below the top-right corner.
+
+    # Sort by corner_index to make de-duping easier.
+    corner_data.sort()
+
+    # De-dup corners.
+    c_old = None
+    for i in xrange(len(corner_data) - 1, 0, -1):
+      if corner_data[i].corner_index != corner_data[i - 1].corner_index:
+        c_old = None
+        continue
+      if c_old is None:
+        point_info = (corner_data[i].corner_location,
+                      corner_data[i].line1,
+                      corner_data[i].line2)
+        c_old = self._PointConnectsToCorners(corners, point_info, 2)
+      point_info_new = (corner_data[i - 1].corner_location,
+                        corner_data[i - 1].line1,
+                        corner_data[i - 1].line2)
+      c_new = self._PointConnectsToCorners(corners, point_info_new, 2)
+      if (not (c_old or c_new)) or (c_old and c_new):
+        if (corner_data[i].brightness_score <
+            corner_data[i - 1].brightness_score):
+          del corner_data[i]
+          c_old = c_new
+        else:
+          del corner_data[i - 1]
+      elif c_old:
+        del corner_data[i - 1]
+      else:
+        del corner_data[i]
+        c_old = c_new
+
+  def _PointConnectsToCorners(self, corners, point_info, tolerance=1):
+    """Checks if the lines of an intersection intersect with corners.
+
+    This is useful to check if the point is part of a rectangle specified by
+    |corners|.
+
+    Args:
+      point_info: A tuple of (point, line, line) representing an intersection
+          of two lines.
+      corners: corners that (hopefully) make up a rectangle.
+      tolerance: The tolerance (approximately in pixels) of the distance
+          between the corners and the lines for detecting if the point is on
+          the line.
+
+    Returns:
+      True if each of the two lines that make up the intersection where the
+      point is located connect the point to other corners."""
+    line1_connected = False
+    line2_connected = False
+    point, line1, line2 = point_info
+    for corner in corners:
+      if corner is None:
+        continue
+
+      # Filter out points that are too close to one another to be different
+      # corners.
+      sqdist = cv_util.SqDistance(corner, point)
+      if sqdist < self.MIN_SCREEN_WIDTH * self.MIN_SCREEN_WIDTH:
+        continue
+
+      line1_connected = line1_connected or \
+          cv_util.IsPointApproxOnLine(corner, line1, tolerance)
+      line2_connected = line2_connected or \
+          cv_util.IsPointApproxOnLine(corner, line2, tolerance)
+    if line1_connected and line2_connected:
+      return True
+    return False
+
+  def _FindExactCorners(self, sorted_corners):
+    """Attempts to find more accurate corner locations.
+
+    Args:
+      sorted_corners: The four screen corners, sorted by corner_index.
+
+    Returns:
+      A list of 4 probably more accurate corners, still sorted."""
+    real_corners = np.empty((4, 2), np.float32)
+    # Count missing corners, and search in a small area around our
+    # intersections representing corners to see if we can find a more exact
+    # corner, as the position of the intersections is noisy and not always
+    # perfectly accurate.
+    for i in xrange(4):
+      corner = sorted_corners[i]
+      if np.isnan(corner[0]):
+        real_corners[i] = np.nan
+        continue
+
+      # Almost unbelievably, in edge cases with floating point error, the
+      # width/height of the cropped corner image may be 2 or 4. This is fine
+      # though, as long as the width and height of the cropped corner are not
+      # hard-coded anywhere.
+      corner_image = self._frame_edges[corner[1] - 1:corner[1] + 2,
+                                       corner[0] - 1:corner[0] + 2]
+      ret, p = self._FindExactCorner(i <= 1, i == 1 or i == 2, corner_image)
+      if ret:
+        if self.DEBUG:
+          self._frame_edges[corner[1] - 1 + p[1]][corner[0] - 1 + p[0]] = 128
+        real_corners[i] = corner - 1 + p
+      else:
+        real_corners[i] = corner
+    return real_corners
+
+  def _FindExactCorner(self, top, left, img):
+    """Tries to finds the exact corner location for a given corner.
+
+    Searches for the top or bottom, left or right most lit
+    pixel in an edge-detected image, which should represent, with pixel
+    precision, as accurate a corner location as possible. (Though perhaps
+    up-sampling using cubic spline interpolation could get sub-pixel
+    precision)
+
+    TODO(mthiesse): This algorithm could be improved by including a larger
+    region to search in, but would have to be made smarter about which lit
+    pixels are on the detected screen edge and which are a not as it's
+    currently extremely easy to fool by things like notification icons in
+    screen corners.
+
+    Args:
+      top: boolean, whether or not we're looking for a top corner.
+      left: boolean, whether or not we're looking for a left corner.
+      img: A small cropping of the edge detected image in which to search.
+
+    Returns:
+      True and the location if a better corner location is found,
+      False otherwise."""
+    h, w = img.shape[:2]
+    cy = 0
+    starting_x = w - 1 if left else 0
+    cx = starting_x
+    if top:
+      y_range = xrange(h - 1, -1, -1)
+    else:
+      y_range = xrange(0, h, 1)
+    if left:
+      x_range = xrange(w - 1, -1, -1)
+    else:
+      x_range = xrange(0, w, 1)
+    for y in y_range:
+      for x in x_range:
+        if img[y][x] == 255:
+          cy = y
+          if (left and x <= cx) or (not left and x >= cx):
+            cx = x
+    if cx == starting_x and cy == 0 and img[0][starting_x] != 255:
+      return False, (0, 0)
+    return True, (cx, cy)
+
+  def _NewScreenLocation(self, new_corners, missing_corners, intersections):
+    """Computes the new screen location with best effort.
+
+    Creates the final list of corners that represents the best effort attempt
+    to find the new screen location. Handles degenerate cases where 3 or fewer
+    new corners are present, using previous corner and intersection data.
+
+    Args:
+      new_corners: The corners found by our search for corners.
+      missing_corners: The count of how many corners we're missing.
+      intersections: The intersections of straight lines found in the current
+          frame.
+
+    Returns:
+      An array of 4 new_corners hopefully representing the screen, or throws
+      an error if this is not possible.
+
+    Raises:
+      ValueError: Finding the screen location was not possible."""
+    screen_corners = copy.copy(new_corners)
+    if missing_corners == 0:
+      self._lost_corner_frames = 0
+      self._lost_corners = [False, False, False, False]
+      return screen_corners
+    if self._prev_corners is None:
+      raise self.ScreenNotFoundError(
+          'Could not locate screen on frame %d' %
+          self._frame_generator.CurrentFrameNumber)
+
+    self._lost_corner_frames += 1
+    if missing_corners > 1:
+      logging.info('Unable to properly detect screen corners, making '
+                   'potentially false assumptions on frame %d',
+                   self._frame_generator.CurrentFrameNumber)
+    # Replace missing new_corners with either nearest intersection to previous
+    # corner, or previous corner if no intersections are found.
+    for i in xrange(0, 4):
+      if not np.isnan(new_corners[i][0]):
+        self._lost_corners[i] = False
+        continue
+      self._lost_corners[i] = True
+      min_dist = self.MAX_INTERFRAME_MOTION
+      min_corner = None
+
+      for isection in intersections:
+        dist = cv_util.SqDistance(isection[0], self._prev_corners[i])
+        if dist >= min_dist:
+          continue
+        if missing_corners == 1:
+          # We know in this case that we have 3 corners present, meaning
+          # all 4 screen lines, and therefore intersections near screen
+          # corners present, so our new corner must connect to these
+          # other corners.
+          if not self._PointConnectsToCorners(new_corners, isection, 3):
+            continue
+        min_corner = isection[0]
+        min_dist = dist
+      screen_corners[i] = min_corner if min_corner is not None else \
+          self._prev_corners[i]
+
+    return screen_corners
+
+  def _SmoothCorners(self, corners):
+    """Smoothes the motion of corners, reduces noise.
+
+    Smoothes the motion of corners by computing an exponentially weighted
+    moving average of corner positions over time.
+
+    Args:
+      corners: The corners of the detected screen.
+
+    Returns:
+      The final corner positions."""
+    if self._avg_corners is None:
+      self._avg_corners = np.asfarray(corners, np.float32)
+    for i in xrange(0, 4):
+      # Keep an exponential moving average of the corner location to reduce
+      # noise.
+      new_contrib = np.multiply(self.CORNER_AVERAGE_WEIGHT, corners[i])
+      old_contrib = np.multiply(1 - self.CORNER_AVERAGE_WEIGHT,
+                                self._avg_corners[i])
+      self._avg_corners[i] = np.add(new_contrib, old_contrib)
+
+    return self._avg_corners
+
+  def _GetTransform(self, corners, border):
+    """Gets the perspective transform of the screen.
+
+    Args:
+      corners: The corners of the detected screen.
+      border: The number of pixels of border to crop along with the screen.
+
+    Returns:
+      A perspective transform and the width and height of the target
+      transform.
+
+    Raises:
+      ScreenNotFoundError: Something went wrong in detecting the screen."""
+    if self._screen_size is None:
+      w = np.sqrt(cv_util.SqDistance(corners[1], corners[0]))
+      h = np.sqrt(cv_util.SqDistance(corners[1], corners[2]))
+      if w < 1 or h < 1:
+        raise self.ScreenNotFoundError(
+            'Screen detected improperly (bad corners)')
+      if min(w, h) < self.MIN_SCREEN_WIDTH:
+        raise self.ScreenNotFoundError('Detected screen was too small.')
+
+      self._screen_size = (w, h)
+      # Extend min line length, if we can, to reduce the number of extraneous
+      # lines the line finder finds.
+      self._min_line_length = max(self._min_line_length, min(w, h) / 1.75)
+    w = self._screen_size[0]
+    h = self._screen_size[1]
+
+    target = np.zeros((4, 2), np.float32)
+    width = w + border
+    height = h + border
+    target[0] = np.asfarray((width, border))
+    target[1] = np.asfarray((border, border))
+    target[2] = np.asfarray((border, height))
+    target[3] = np.asfarray((width, height))
+    transform_w = width + border
+    transform_h = height + border
+    transform = cv2.getPerspectiveTransform(corners, target)
+    return transform, transform_w, transform_h
+
+  def _Debug(self, lines, corners, final_corners, screen):
+    for line in lines:
+      intline = ((int(line[0]), int(line[1])),
+                 (int(line[2]), int(line[3])))
+      cv2.line(self._frame_debug, intline[0], intline[1], (0, 0, 255), 1)
+    i = 0
+    for corner in corners:
+      if not np.isnan(corner[0]):
+        cv2.putText(
+            self._frame_debug, str(i), (int(corner[0]), int(corner[1])),
+            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 0), 1, cv2.CV_AA)
+        i += 1
+    if final_corners is not None:
+      for corner in final_corners:
+        cv2.circle(self._frame_debug,
+                   (int(corner[0]), int(corner[1])), 5, (255, 0, 255), 1)
+    cv2.imshow('original', self._frame)
+    cv2.imshow('debug', self._frame_debug)
+    if screen is not None:
+      cv2.imshow('screen', screen)
+    cv2.waitKey()
+
+# For being run as a script.
+# TODO(mthiesse): To be replaced with a better standalone script.
+# Ex: ./screen_finder.py path_to_video 0 5 --verbose
+
+
+def main():
+  start_frame = int(sys.argv[2]) if len(sys.argv) >= 3 else 0
+  vf = video_file_frame_generator.VideoFileFrameGenerator(sys.argv[1],
+                                                          start_frame)
+  if len(sys.argv) >= 4:
+    sf = ScreenFinder(vf, int(sys.argv[3]))
+  else:
+    sf = ScreenFinder(vf)
+  # TODO(mthiesse): Use argument parser to improve command line parsing.
+  if len(sys.argv) > 4 and sys.argv[4] == '--verbose':
+    logging.basicConfig(format='%(message)s', level=logging.INFO)
+  else:
+    logging.basicConfig(format='%(message)s', level=logging.WARN)
+  while sf.HasNext():
+    sf.GetNext()
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/telemetry/telemetry/internal/image_processing/screen_finder_unittest.py b/catapult/telemetry/telemetry/internal/image_processing/screen_finder_unittest.py
new file mode 100644
index 0000000..313b496
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/screen_finder_unittest.py
@@ -0,0 +1,368 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import copy
+import math
+import os
+import unittest
+
+from telemetry.core import util
+from telemetry.internal.util import external_modules
+
+try:
+  np = external_modules.ImportRequiredModule('numpy')
+  cv2 = external_modules.ImportRequiredModule('cv2')
+except (ImportError, NotImplementedError) as err:
+  pass
+else:
+  # pylint: disable=protected-access
+  class ScreenFinderTest(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+      super(ScreenFinderTest, self).__init__(*args, **kwargs)
+      # Import modules with dependencies that may not be preset in test setup so
+      # that importing this unit test doesn't cause the test runner to raise an
+      # exception.
+      from telemetry.internal.image_processing import fake_frame_generator
+      from telemetry.internal.image_processing import screen_finder
+      from telemetry.internal.image_processing import video_file_frame_generator
+      self.FakeFrameGenerator = fake_frame_generator.FakeFrameGenerator
+      self.VideoFileFrameGenerator = \
+          video_file_frame_generator.VideoFileFrameGenerator
+      self.ScreenFinder = screen_finder.ScreenFinder
+
+    def _GetScreenFinder(self, video_filename):
+      if not video_filename:
+        fg = self.FakeFrameGenerator()
+      else:
+        vid = os.path.join(util.GetUnittestDataDir(), video_filename)
+        fg = self.VideoFileFrameGenerator(vid)
+      return self.ScreenFinder(fg)
+
+    def testBasicFunctionality(self):
+      def CheckCorners(corners, expected):
+        for i in xrange(len(corners)):
+          for j in xrange(len(corners[i])):
+            self.assertAlmostEqual(corners[i][j], expected[i][j], delta=1.1)
+      expected = [[314, 60], [168, 58], [162, 274], [311, 276]]
+      sf = self._GetScreenFinder('screen_3_frames.mov')
+      self.assertTrue(sf.HasNext())
+      screen, corners = sf.GetNext()
+      CheckCorners(corners, expected)
+      self.assertIsNotNone(screen)
+      height, width = screen.shape[:2]
+      self.assertAlmostEqual(height, 226, delta=2)
+      self.assertAlmostEqual(width, 156, delta=2)
+      self.assertTrue(sf.HasNext())
+      screen, corners = sf.GetNext()
+      CheckCorners(corners, expected)
+      self.assertIsNotNone(screen)
+      height1, width1 = screen.shape[:2]
+      self.assertEqual(width, width1)
+      self.assertEqual(height, height1)
+      self.assertTrue(sf.HasNext())
+      screen, corners = sf.GetNext()
+      CheckCorners(corners, expected)
+      self.assertIsNotNone(screen)
+      height2, width2 = screen.shape[:2]
+      self.assertEqual(width, width2)
+      self.assertEqual(height, height2)
+      self.assertFalse(sf.HasNext())
+      error = ''
+      try:
+        sf.GetNext()
+      except RuntimeError as e:
+        error = str(e)
+      self.assertEqual(error, 'No more frames available.')
+
+    def testHasMovedFast(self):
+      sf = self._GetScreenFinder(None)
+      prev_corners = np.asfarray(([1000, 1000], [0, 1000], [0, 0], [1000, 0]))
+      self.assertFalse(sf._HasMovedFast(prev_corners, prev_corners))
+      not_moved = copy.deepcopy(prev_corners)
+      not_moved[0][1] += 1
+      not_moved[1][1] += 1
+      not_moved[3][0] += 0.9
+      self.assertFalse(sf._HasMovedFast(not_moved, prev_corners))
+      moved = copy.deepcopy(prev_corners)
+      moved[0][1] += math.sqrt(0.5)
+      moved[0][0] += math.sqrt(0.5)
+      moved[1][1] += 2.1
+      self.assertTrue(sf._HasMovedFast(moved, prev_corners))
+
+    def testPointConnectsToCorners(self):
+      sf = self._GetScreenFinder(None)
+      line1 = np.asfarray(((0, 0, 1, 0)))
+      line2 = np.asfarray(((0, 0, 0, 1)))
+      point = np.asfarray((0, 0))
+      point_info = (point, line1, line2)
+      corners = np.asfarray(((1, 0), (0, 1)))
+      self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 1))
+      corners = np.append(corners, (100, 1))
+      corners = np.append(corners, (1, 100))
+      corners = corners.reshape(-1, 2)
+      self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 2))
+      self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 0.5))
+      corners = np.append(corners, (100, 0))
+      corners = np.append(corners, (0, 100))
+      corners = corners.reshape(-1, 2)
+      self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 0))
+
+    def testFindIntersections(self):
+      def _BuildResult(point, line1, line2):
+        return [point, np.asfarray(line1).tolist(), np.asfarray(line2).tolist()]
+
+      def _IntersectionResultsToList(results):
+        result_list = []
+        for result in results:
+          point, line1, line2 = result
+          p = np.round(point).tolist()
+          l1 = np.round(line1).tolist()
+          l2 = np.round(line2).tolist()
+          result_list.append([p, l1, l2])
+        return result_list
+
+      sf = self._GetScreenFinder(None)
+      expected = []
+      lines = []
+      # Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
+      lines.append(np.asfarray(((0, 1001, 0, -1))))
+      lines.append(np.asfarray(((-1, 0, 1001, 0))))
+      lines.append(np.asfarray(((1000, 1001, 1000, -1))))
+      lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
+      expected.append(_BuildResult([0, 0], lines[0], lines[1]))
+      expected.append(_BuildResult([0, 1000], lines[0], lines[3]))
+      expected.append(_BuildResult([1000, 0], lines[1], lines[2]))
+      expected.append(_BuildResult([1000, 1000], lines[2], lines[3]))
+
+      # crosses 2 lines at 45 degrees.
+      lines.append(np.asfarray(((0, 500, 500, 0))))
+      expected.append(_BuildResult([0, 500], lines[0], lines[4]))
+      expected.append(_BuildResult([500, 0], lines[1], lines[4]))
+
+      # crosses 1 line at > 45 degrees, 1 line at < 45 degrees.
+      lines.append(np.asfarray(((0, 400, 600, 0))))
+      expected.append(_BuildResult([0, 400], lines[0], lines[5]))
+
+      # Test without previous corner data, all intersections should be found.
+      results = sf._FindIntersections(lines)
+      result_list = _IntersectionResultsToList(results)
+
+      for e in expected:
+        self.assertIn(e, result_list)
+      self.assertEqual(len(expected), len(result_list))
+
+      # Now introduce previous corners, but also reset conditions. No
+      # intersections should be lost.
+      corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
+      sf._prev_corners = np.asfarray(corners, np.float32)
+      sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES + 1
+      results = sf._FindIntersections(lines)
+      result_list = _IntersectionResultsToList(results)
+
+      for e in expected:
+        self.assertIn(e, result_list)
+      self.assertEqual(len(expected), len(result_list))
+
+      # Remove reset conditions, so intersections not near corners will be lost.
+      sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES
+      # First 4 intersections are the ones at the old corner locations.
+      expected = expected[0:4]
+      results = sf._FindIntersections(lines)
+      result_list = _IntersectionResultsToList(results)
+
+      for e in expected:
+        self.assertIn(e, result_list)
+      self.assertEqual(len(expected), len(result_list))
+
+    def testPointIsCloseToPreviousCorners(self):
+      sf = self._GetScreenFinder(None)
+      corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
+      sf._prev_corners = np.asfarray(corners, np.float32)
+      dist = math.sqrt(sf.MAX_INTERFRAME_MOTION)
+      sidedist1 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) - (1e-13)
+      sidedist2 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) + (1e-13)
+      point1 = (corners[3][0] + dist, corners[3][1])
+      self.assertTrue(sf._PointIsCloseToPreviousCorners(point1))
+      point2 = (corners[3][0] + sidedist1, corners[3][1] + sidedist1)
+      self.assertTrue(sf._PointIsCloseToPreviousCorners(point2))
+      point3 = (corners[1][0] + sidedist2, corners[1][1] + sidedist2)
+      self.assertFalse(sf._PointIsCloseToPreviousCorners(point3))
+
+    def testLooksLikeCorner(self):
+      # TODO: Probably easier to just do end to end tests.
+      pass
+
+    def testCornerData(self):
+      cd = self.ScreenFinder.CornerData('a', 'b', 'c', 'd', 'e')
+      self.assertEqual(cd.corner_index, 'a')
+      self.assertEqual(cd.corner_location, 'b')
+      self.assertEqual(cd.brightness_score, 'c')
+      self.assertEqual(cd.line1, 'd')
+      self.assertEqual(cd.line2, 'e')
+      cd_list = []
+      cd_list.append(self.ScreenFinder.CornerData(0, None, None, None, None))
+      cd_list.append(self.ScreenFinder.CornerData(3, None, None, None, None))
+      cd_list.append(self.ScreenFinder.CornerData(1, None, None, None, None))
+      cd_list.append(self.ScreenFinder.CornerData(2, None, None, None, None))
+      cd_list.sort()
+      for i in range(len(cd_list)):
+        self.assertEqual(i, cd_list[i].corner_index)
+
+    def testFindCorners(self):
+      # TODO: Probably easier to just do end to end tests.
+      pass
+
+    def testDeDupCorners(self):
+      sf = self._GetScreenFinder(None)
+      data = []
+      lines = []
+      lines.append(np.asfarray((0, 1001, 0, -1)))
+      lines.append(np.asfarray((-1, 0, 1001, 0)))
+      lines.append(np.asfarray((1000, 1001, 1000, -1)))
+      lines.append(np.asfarray((-1, 1000, 1001, 1000)))
+      lines.append(np.asfarray((0, 10, 10, 0)))
+      lines.append(np.asfarray((-1, 1001, 1001, 1001)))
+      corners = np.asfarray(((1000, 1000), (0, 1000), (0, 0),
+                             (1000, 0), (0, 10), (10, 0), (1000, 1001)))
+      data.append(self.ScreenFinder.CornerData(2, corners[2], 100,
+                                               lines[0], lines[1]))
+      data.append(self.ScreenFinder.CornerData(1, corners[1], 100,
+                                               lines[0], lines[3]))
+      data.append(self.ScreenFinder.CornerData(3, corners[3], 100,
+                                               lines[1], lines[2]))
+      data.append(self.ScreenFinder.CornerData(0, corners[0], 100,
+                                               lines[2], lines[3]))
+      data.append(self.ScreenFinder.CornerData(2, corners[4], 120,
+                                               lines[0], lines[4]))
+      data.append(self.ScreenFinder.CornerData(2, corners[5], 110,
+                                               lines[1], lines[4]))
+      data.append(self.ScreenFinder.CornerData(0, corners[6], 110,
+                                               lines[2], lines[5]))
+      dedup = copy.copy(data)
+      # Tests 2 non-duplicate corners, 1 corner with connected and unconnected
+      # corners, and 1 corner with two connected corners.
+      sf._DeDupCorners(dedup, corners)
+      self.assertEqual(len(dedup), 4)
+      self.assertIn(data[0], dedup)
+      self.assertIn(data[1], dedup)
+      self.assertIn(data[2], dedup)
+      self.assertIn(data[6], dedup)
+
+      # Same test, but this time the corner with connected and unconnected
+      # corners now only contains unconnected corners.
+      del data[0]
+      corners = np.delete(corners, 2, axis=0)
+      dedup2 = copy.copy(data)
+      sf._DeDupCorners(dedup2, corners)
+      self.assertEqual(len(dedup2), 4)
+      self.assertIn(data[3], dedup2)
+      self.assertIn(data[0], dedup2)
+      self.assertIn(data[1], dedup2)
+      self.assertIn(data[5], dedup2)
+
+    def testFindExactCorners(self):
+      sf = self._GetScreenFinder(None)
+      img = np.zeros((3, 3), np.uint8)
+      img[1][0] = 255
+      img[0][1] = 255
+      img[1][2] = 255
+      img[2][1] = 255
+      sf._frame_edges = img
+      corners = np.asfarray([(1, 1), (1, 1), (1, 1), (1, 1)])
+      expected = np.asfarray([(2, 0), (0, 0), (0, 2), (2, 2)])
+      ret = sf._FindExactCorners(corners)
+      np.testing.assert_equal(ret, expected)
+      img2 = np.zeros((3, 3), np.uint8)
+      img2[1][0] = 255
+      img2[1][1] = 255
+      img2[2][2] = 255
+      img2[2][1] = 255
+      sf._frame_edges = img2
+      expected2 = [(2, 1), (0, 1), (0, 2), (2, 2)]
+      ret2 = sf._FindExactCorners(corners)
+      np.testing.assert_equal(ret2, expected2)
+
+    def testSmoothCorners(self):
+      sf = self._GetScreenFinder(None)
+      corners = [[10, 10], [10, 10], [10, 10], [10, 10]]
+      ret = sf._SmoothCorners(corners).tolist()
+      self.assertListEqual(ret, corners)
+      corners = [[0, 0], [0, 0], [0, 0], [0, 0]]
+      expected = [[5, 5], [5, 5], [5, 5], [5, 5]]
+      ret = sf._SmoothCorners(corners).tolist()
+      self.assertListEqual(ret, expected)
+      expected = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
+      ret = sf._SmoothCorners(corners).tolist()
+      self.assertListEqual(ret, expected)
+
+    def testGetTransform(self):
+      sf = self._GetScreenFinder(None)
+      corners = np.array([[100, 1000], [0, 1000], [0, 0], [100, 0]], np.float32)
+      transform, w, h = sf._GetTransform(corners, 1)
+      transform = np.round(transform, 2)
+      expected = [[1., 0., 1.], [-0., -1., 1001.], [0., -0., 1.]]
+      self.assertListEqual(transform.tolist(), expected)
+      self.assertEqual(w, 102)
+      self.assertEqual(h, 1002)
+
+      corners = np.array([(200, 2000), (0, 2000), (0, 0), (200, 0)], np.float32)
+      transform, w, h = sf._GetTransform(corners, 5)
+      transform = np.round(transform, 2)
+      expected = [[0.5, 0.0, 5.0], [-0.0, -0.5, 1005.0], [-0.0, 0.0, 1.0]]
+      self.assertListEqual(transform.tolist(), expected)
+      self.assertEqual(w, 110)
+      self.assertEqual(h, 1010)
+
+    def testNewScreenLocation(self):
+      sf = self._GetScreenFinder(None)
+      corners_2 = np.asfarray([[np.nan, np.nan], [0, 1000], [np.nan, np.nan],
+                               [1000, 0]])
+      corners_3 = np.asfarray([[1000, 1000], [0, 1000], [np.nan, np.nan],
+                               [1000, 0]])
+      corners_4 = np.asfarray([[1000, 1000], [0, 1000], [0, 0], [1000, 0]])
+      lines = []
+      # Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
+      lines.append(np.asfarray(((0, 1001, 0, -1))))
+      lines.append(np.asfarray(((-1, 0, 1001, 0))))
+      lines.append(np.asfarray(((1000, 1001, 1000, -1))))
+      lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
+      # Additional intersections near a corner.
+      lines.append(np.asfarray(((0, 3, 7, 0))))
+      lines.append(np.asfarray(((0, 4, 6, 0))))
+      intersections = sf._FindIntersections(lines)
+      failed = False
+      try:
+        sf._NewScreenLocation(corners_3, 1, intersections)
+      except self.ScreenFinder.ScreenNotFoundError:
+        failed = True
+      self.assertTrue(failed)
+
+      sf._lost_corner_frames = 10
+      sf._lost_corners = [True, True, True, True]
+      ret = sf._NewScreenLocation(corners_4, 0, intersections)
+      np.testing.assert_equal(ret, corners_4)
+      self.assertListEqual(sf._lost_corners, [False, False, False, False])
+      self.assertEqual(sf._lost_corner_frames, 0)
+
+      sf._prev_corners = corners_4
+      ret = sf._NewScreenLocation(corners_3, 1, intersections)
+      ret = np.round(ret)
+      np.testing.assert_equal(ret, corners_4)
+      self.assertListEqual(sf._lost_corners, [False, False, True, False])
+      self.assertEqual(sf._lost_corner_frames, 1)
+
+      sf._prev_corners = np.asfarray([(1000, 1000), (0, 1000),
+                                      (0, 3), (1000, 0)])
+      ret = sf._NewScreenLocation(corners_3, 1, intersections)
+      ret = np.round(ret)
+      np.testing.assert_equal(ret, corners_4)
+      self.assertListEqual(sf._lost_corners, [False, False, True, False])
+      self.assertEqual(sf._lost_corner_frames, 2)
+
+      ret = sf._NewScreenLocation(corners_2, 2, intersections)
+      ret = np.round(ret)
+      expected = [[1000, 1000], [0, 1000], [0, 3], [1000, 0]]
+      np.testing.assert_equal(ret, expected)
+      self.assertListEqual(sf._lost_corners, [True, False, True, False])
+      self.assertEqual(sf._lost_corner_frames, 3)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/video.py b/catapult/telemetry/telemetry/internal/image_processing/video.py
new file mode 100644
index 0000000..43f2f28
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/video.py
@@ -0,0 +1,172 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import subprocess
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import platform
+from telemetry.util import image_util
+from telemetry.util import rgba_color
+
+HIGHLIGHT_ORANGE_FRAME = rgba_color.WEB_PAGE_TEST_ORANGE
+
+class BoundingBoxNotFoundException(Exception):
+  pass
+
+
+class Video(object):
+  """Utilities for storing and interacting with the video capture."""
+
+  def __init__(self, video_file_obj):
+    assert video_file_obj.delete
+    assert not video_file_obj.close_called
+    self._video_file_obj = video_file_obj
+    self._tab_contents_bounding_box = None
+
+  def UploadToCloudStorage(self, bucket, target_path):
+    """Uploads video file to cloud storage.
+
+    Args:
+      target_path: Path indicating where to store the file in cloud storage.
+    """
+    cloud_storage.Insert(bucket, target_path, self._video_file_obj.name)
+
+  def GetVideoFrameIter(self):
+    """Returns the iteration for processing the video capture.
+
+    This looks for the initial color flash in the first frame to establish the
+    tab content boundaries and then omits all frames displaying the flash.
+
+    Yields:
+      (time_ms, image) tuples representing each video keyframe. Only the first
+      frame is a run of sequential duplicate bitmaps is typically included.
+        time_ms is milliseconds since navigationStart.
+        image may be a telemetry.core.Bitmap, or a numpy array depending on
+        whether numpy is installed.
+    """
+    frame_generator = self._FramesFromMp4(self._video_file_obj.name)
+
+    # Flip through frames until we find the initial tab contents flash.
+    content_box = None
+    for _, bmp in frame_generator:
+      content_box = self._FindHighlightBoundingBox(
+          bmp, HIGHLIGHT_ORANGE_FRAME)
+      if content_box:
+        break
+
+    if not content_box:
+      raise BoundingBoxNotFoundException(
+          'Failed to identify tab contents in video capture.')
+
+    # Flip through frames until the flash goes away and emit that as frame 0.
+    timestamp = 0
+    for timestamp, bmp in frame_generator:
+      if not self._FindHighlightBoundingBox(bmp, HIGHLIGHT_ORANGE_FRAME):
+        yield 0, image_util.Crop(bmp, *content_box)
+        break
+
+    start_time = timestamp
+    for timestamp, bmp in frame_generator:
+      yield timestamp - start_time, image_util.Crop(bmp, *content_box)
+
+  def _FindHighlightBoundingBox(self, bmp, color, bounds_tolerance=8,
+                                color_tolerance=8):
+    """Returns the bounding box of the content highlight of the given color.
+
+    Raises:
+      BoundingBoxNotFoundException if the hightlight could not be found.
+    """
+    content_box, pixel_count = image_util.GetBoundingBox(bmp, color,
+        tolerance=color_tolerance)
+
+    if not content_box:
+      return None
+
+    # We assume arbitrarily that tabs are all larger than 200x200. If this
+    # fails it either means that assumption has changed or something is
+    # awry with our bounding box calculation.
+    if content_box[2] < 200 or content_box[3] < 200:
+      raise BoundingBoxNotFoundException('Unexpectedly small tab contents.')
+
+    # TODO(tonyg): Can this threshold be increased?
+    if pixel_count < 0.9 * content_box[2] * content_box[3]:
+      raise BoundingBoxNotFoundException(
+          'Low count of pixels in tab contents matching expected color.')
+
+    # Since we allow some fuzziness in bounding box finding, we want to make
+    # sure that the bounds are always stable across a run. So we cache the
+    # first box, whatever it may be.
+    #
+    # This relies on the assumption that since Telemetry doesn't know how to
+    # resize the window, we should always get the same content box for a tab.
+    # If this assumption changes, this caching needs to be reworked.
+    if not self._tab_contents_bounding_box:
+      self._tab_contents_bounding_box = content_box
+
+    # Verify that there is only minor variation in the bounding box. If it's
+    # just a few pixels, we can assume it's due to compression artifacts.
+    for x, y in zip(self._tab_contents_bounding_box, content_box):
+      if abs(x - y) > bounds_tolerance:
+        # If this fails, it means either that either the above assumption has
+        # changed or something is awry with our bounding box calculation.
+        raise BoundingBoxNotFoundException(
+            'Unexpected change in tab contents box.')
+
+    return self._tab_contents_bounding_box
+
+  def _FramesFromMp4(self, mp4_file):
+    host_platform = platform.GetHostPlatform()
+    if not host_platform.CanLaunchApplication('avconv'):
+      host_platform.InstallApplication('avconv')
+
+    def GetDimensions(video):
+      proc = subprocess.Popen(['avconv', '-i', video], stderr=subprocess.PIPE)
+      dimensions = None
+      output = ''
+      for line in proc.stderr.readlines():
+        output += line
+        if 'Video:' in line:
+          dimensions = line.split(',')[2]
+          dimensions = map(int, dimensions.split()[0].split('x'))
+          break
+      proc.communicate()
+      assert dimensions, ('Failed to determine video dimensions. output=%s' %
+                          output)
+      return dimensions
+
+    def GetFrameTimestampMs(stderr):
+      """Returns the frame timestamp in integer milliseconds from the dump log.
+
+      The expected line format is:
+      '  dts=1.715  pts=1.715\n'
+
+      We have to be careful to only read a single timestamp per call to avoid
+      deadlock because avconv interleaves its writes to stdout and stderr.
+      """
+      while True:
+        line = ''
+        next_char = ''
+        while next_char != '\n':
+          next_char = stderr.read(1)
+          line += next_char
+        if 'pts=' in line:
+          return int(1000 * float(line.split('=')[-1]))
+
+    dimensions = GetDimensions(mp4_file)
+    frame_length = dimensions[0] * dimensions[1] * 3
+    frame_data = bytearray(frame_length)
+
+    # Use rawvideo so that we don't need any external library to parse frames.
+    proc = subprocess.Popen(['avconv', '-i', mp4_file, '-vcodec',
+                             'rawvideo', '-pix_fmt', 'rgb24', '-dump',
+                             '-loglevel', 'debug', '-f', 'rawvideo', '-'],
+                            stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+    while True:
+      num_read = proc.stdout.readinto(frame_data)
+      if not num_read:
+        raise StopIteration
+      assert num_read == len(frame_data), 'Unexpected frame size: %d' % num_read
+      yield (GetFrameTimestampMs(proc.stderr),
+             image_util.FromRGBPixels(dimensions[0], dimensions[1], frame_data))
diff --git a/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator.py b/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator.py
new file mode 100644
index 0000000..dc209b4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator.py
@@ -0,0 +1,95 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.image_processing import frame_generator
+from telemetry.internal.util import external_modules
+
+cv2 = external_modules.ImportRequiredModule('cv2')
+
+
+class VideoFileFrameGenerator(frame_generator.FrameGenerator):
+  """Provides a Frame Generator for a video file.
+
+  Sample Usage:
+    generator = VideoFileFrameGenerator(sys.argv[1]).GetGenerator()
+    for frame in generator:
+      # Do something
+
+  Attributes:
+    _capture: The openCV video capture.
+    _frame_count: The number of frames in the video capture.
+    _frame_index: The frame number of the current frame.
+    _timestamp: The timestamp of the current frame.
+    _dimensions: The dimensions of the video capture."""
+  def __init__(self, video_filename, start_frame_index=0):
+    """Initializes the VideoFileFrameGenerator object.
+
+    Args:
+      video_filename: str, The path to the video file.
+      start_frame_index: int, The number of frames to skip at the start of the
+          file.
+
+    Raises:
+      FrameReadError: A read error occurred during initialization."""
+    self._capture = cv2.VideoCapture(video_filename)
+    self._frame_count = int(self._capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
+    self._frame_index = -1
+    self._timestamp = 0
+    width = self._capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
+    height = self._capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
+    self._dimensions = (int(width), int(height))
+    if self._frame_count <= start_frame_index:
+      raise frame_generator.FrameReadError('Not enough frames in capture.')
+    while self._frame_index < start_frame_index - 1:
+      self._ReadFrame(True)
+
+    super(self.__class__, self).__init__()
+
+  def _ReadFrame(self, skip_decode=False):
+    """Reads the next frame, updates attributes.
+
+    Args:
+      skip_decode: Whether or not to skip decoding. Useful for seeking.
+
+    Returns:
+      The frame if not EOF, 'None' if EOF.
+
+    Raises:
+      FrameReadError: Unexpectedly failed to read a frame from the capture."""
+    if self._frame_index >= self._frame_count - 1:
+      return None
+    self._timestamp = self._capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
+    if skip_decode:
+      ret = self._capture.grab()
+      frame = None
+    else:
+      ret, frame = self._capture.read()
+    if not ret:
+      raise frame_generator.FrameReadError(
+          'Failed to read frame from capture.')
+    self._frame_index += 1
+    return frame
+
+  # OVERRIDE
+  def _CreateGenerator(self):
+    while True:
+      frame = self._ReadFrame()
+      if frame is None:
+        break
+      yield frame
+
+  # OVERRIDE
+  @property
+  def CurrentTimestamp(self):
+    return self._timestamp
+
+  # OVERRIDE
+  @property
+  def CurrentFrameNumber(self):
+    return self._frame_index
+
+  # OVERRIDE
+  @property
+  def Dimensions(self):
+    return self._dimensions
diff --git a/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator_unittest.py b/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator_unittest.py
new file mode 100644
index 0000000..72632a2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/video_file_frame_generator_unittest.py
@@ -0,0 +1,86 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.core import util
+from telemetry.internal.image_processing import frame_generator
+from telemetry.internal.util import external_modules
+
+try:
+  cv2 = external_modules.ImportRequiredModule('cv2')
+except (ImportError, NotImplementedError):
+  pass
+else:
+  class VideoFileFrameGeneratorTest(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+      super(VideoFileFrameGeneratorTest, self).__init__(*args, **kwargs)
+      # Import modules with dependencies that may not be preset in test setup so
+      # that importing this unit test doesn't cause the test runner to raise an
+      # exception.
+      from telemetry.internal.image_processing import video_file_frame_generator
+      self.VideoFileFrameGenerator = \
+          video_file_frame_generator.VideoFileFrameGenerator
+
+    def testVideoFileFrameGeneratorSuccess(self):
+      vid = os.path.join(util.GetUnittestDataDir(), 'screen_3_frames.mov')
+      fg = self.VideoFileFrameGenerator(vid)
+      timestamps = [0, 33.367, 66.733]
+      self.assertTrue(isinstance(fg, frame_generator.FrameGenerator))
+
+      self.assertEqual(fg.CurrentFrameNumber, -1)
+      self.assertAlmostEqual(fg.CurrentTimestamp, 0, 3)
+      self.assertEqual(fg.Dimensions, (432, 320))
+      generator = fg.Generator
+      i = 0
+      for frame in generator:
+        self.assertEqual(fg.CurrentFrameNumber, i)
+        self.assertAlmostEqual(fg.CurrentTimestamp, timestamps[i], 3)
+        self.assertEqual(fg.Dimensions, (432, 320))
+        self.assertEqual(frame.shape[:2], (320, 432))
+        i += 1
+      self.assertEqual(i, 3)
+      try:
+        next(generator)
+        stopped = False
+      except StopIteration:
+        stopped = True
+      self.assertTrue(stopped)
+      try:
+        next(fg.Generator)
+        stopped = False
+      except StopIteration:
+        stopped = True
+      self.assertTrue(stopped)
+
+    def testVideoFileFrameGeneratorSkipFrames(self):
+      vid = os.path.join(util.GetUnittestDataDir(), 'screen_3_frames.mov')
+      fg = self.VideoFileFrameGenerator(vid, 2)
+      self.assertEqual(fg.CurrentFrameNumber, 1)
+      self.assertAlmostEqual(fg.CurrentTimestamp, 33.367, 3)
+      self.assertEqual(fg.Dimensions, (432, 320))
+      next(fg.Generator)
+      try:
+        next(fg.Generator)
+        stopped = False
+      except StopIteration:
+        stopped = True
+      self.assertTrue(stopped)
+
+    def testVideoFileFrameGeneratorFailure(self):
+      vid = os.path.join(util.GetUnittestDataDir(), 'screen_3_frames.mov')
+      try:
+        self.VideoFileFrameGenerator(vid, 4)
+        fail = False
+      except frame_generator.FrameReadError:
+        fail = True
+      self.assertTrue(fail)
+
+      try:
+        self.VideoFileFrameGenerator('not_a_file', 0)
+        fail = False
+      except frame_generator.FrameReadError:
+        fail = True
+      self.assertTrue(fail)
diff --git a/catapult/telemetry/telemetry/internal/image_processing/video_unittest.py b/catapult/telemetry/telemetry/internal/image_processing/video_unittest.py
new file mode 100644
index 0000000..0f83ff5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/image_processing/video_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import unittest
+
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.image_processing import video
+from telemetry.util import image_util
+
+
+class VideoTest(unittest.TestCase):
+
+  @decorators.Disabled('all')
+  def testFramesFromMp4(self):
+    host_platform = platform.GetHostPlatform()
+
+    try:
+      host_platform.InstallApplication('avconv')
+    finally:
+      if not host_platform.CanLaunchApplication('avconv'):
+        logging.warning('Test not supported on this platform')
+        return  # pylint: disable=lost-exception
+
+    vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4')
+    expected_timestamps = [
+      0,
+      763,
+      783,
+      940,
+      1715,
+      1732,
+      1842,
+      1926,
+      ]
+
+    video_obj = video.Video(vid)
+
+    # Calling _FramesFromMp4 should return all frames.
+    # pylint: disable=protected-access
+    for i, timestamp_bitmap in enumerate(video_obj._FramesFromMp4(vid)):
+      timestamp, bmp = timestamp_bitmap
+      self.assertEquals(timestamp, expected_timestamps[i])
+      expected_bitmap = image_util.FromPngFile(os.path.join(
+          util.GetUnittestDataDir(), 'frame%d.png' % i))
+      self.assertTrue(image_util.AreEqual(expected_bitmap, bmp))
diff --git a/catapult/telemetry/telemetry/internal/platform/__init__.py b/catapult/telemetry/telemetry/internal/platform/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/platform/android_device.py b/catapult/telemetry/telemetry/internal/platform/android_device.py
new file mode 100644
index 0000000..bcddb60
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/android_device.py
@@ -0,0 +1,199 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import os
+import re
+import subprocess
+
+from telemetry.core import util
+from telemetry.internal.platform import cros_device
+from telemetry.internal.platform import device
+from telemetry.internal.platform.profiler import monsoon
+
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+
+
+class AndroidDevice(device.Device):
+  """ Class represents information for connecting to an android device.
+
+  Attributes:
+    device_id: the device's serial string created by adb to uniquely
+      identify an emulator/device instance. This string can be found by running
+      'adb devices' command
+    enable_performance_mode: when this is set to True, android platform will be
+    set to high performance mode after browser is started.
+  """
+  def __init__(self, device_id, enable_performance_mode=True):
+    super(AndroidDevice, self).__init__(
+        name='Android device %s' % device_id, guid=device_id)
+    self._device_id = device_id
+    self._enable_performance_mode = enable_performance_mode
+
+  @classmethod
+  def GetAllConnectedDevices(cls, blacklist):
+    device_serials = GetDeviceSerials(blacklist)
+    return [cls(s) for s in device_serials]
+
+  @property
+  def device_id(self):
+    return self._device_id
+
+  @property
+  def enable_performance_mode(self):
+    return self._enable_performance_mode
+
+
+def _ListSerialsOfHealthyOnlineDevices(blacklist):
+  return [d.adb.GetDeviceSerial()
+          for d in device_utils.DeviceUtils.HealthyDevices(blacklist)
+          if d.IsOnline()]
+
+
+def GetDeviceSerials(blacklist):
+  """Return the list of device serials of healthy devices.
+
+  If a preferred device has been set with ANDROID_SERIAL, it will be first in
+  the returned list. The arguments specify what devices to include in the list.
+  """
+
+  device_serials = _ListSerialsOfHealthyOnlineDevices(blacklist)
+
+  # The monsoon provides power for the device, so for devices with no
+  # real battery, we need to turn them on after the monsoon enables voltage
+  # output to the device.
+  if not device_serials:
+    try:
+      m = monsoon.Monsoon(wait=False)
+      m.SetUsbPassthrough(1)
+      m.SetVoltage(3.8)
+      m.SetMaxCurrent(8)
+      logging.warn("""
+Monsoon power monitor detected, but no Android devices.
+
+The Monsoon's power output has been enabled. Please now ensure that:
+
+  1. The Monsoon's front and back USB are connected to the host.
+  2. The device is connected to the Monsoon's main and USB channels.
+  3. The device is turned on.
+
+Waiting for device...
+""")
+      util.WaitFor(_ListSerialsOfHealthyOnlineDevices(blacklist), 600)
+      device_serials = _ListSerialsOfHealthyOnlineDevices(blacklist)
+    except IOError:
+      return []
+
+  preferred_device = os.environ.get('ANDROID_SERIAL')
+  if preferred_device in device_serials:
+    logging.warn(
+        'ANDROID_SERIAL is defined. Put %s in the first of the'
+        'discovered devices list.' % preferred_device)
+    device_serials.remove(preferred_device)
+    device_serials.insert(0, preferred_device)
+  return device_serials
+
+
+def GetDevice(finder_options):
+  """Return a Platform instance for the device specified by |finder_options|."""
+  if not CanDiscoverDevices():
+    logging.info(
+        'No adb command found. Will not try searching for Android browsers.')
+    return None
+
+  if finder_options.android_blacklist_file:
+    blacklist = device_blacklist.Blacklist(
+        finder_options.android_blacklist_file)
+  else:
+    blacklist = None
+
+  if (finder_options.device
+      and finder_options.device in GetDeviceSerials(blacklist)):
+    return AndroidDevice(
+        finder_options.device,
+        enable_performance_mode=not finder_options.no_performance_mode)
+
+  devices = AndroidDevice.GetAllConnectedDevices(blacklist)
+  if len(devices) == 0:
+    logging.warn('No android devices found.')
+    return None
+  if len(devices) > 1:
+    logging.warn(
+        'Multiple devices attached. Please specify one of the following:\n' +
+        '\n'.join(['  --device=%s' % d.device_id for d in devices]))
+    return None
+  return devices[0]
+
+
+def _HasValidAdb():
+  """Returns true if adb is present.
+
+  Note that this currently will return True even if the adb that's present
+  cannot run on this system.
+  """
+  if os.name != 'posix' or cros_device.IsRunningOnCrOS():
+    return False
+
+  try:
+    adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
+  except device_errors.NoAdbError:
+    return False
+
+  if os.path.isabs(adb_path) and not os.path.exists(adb_path):
+    return False
+
+  return True
+
+
+def CanDiscoverDevices():
+  """Returns true if devices are discoverable via adb."""
+  if not _HasValidAdb():
+    return False
+
+  try:
+    with open(os.devnull, 'w') as devnull:
+      adb_process = subprocess.Popen(
+          ['adb', 'devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+          stdin=devnull)
+      stdout = adb_process.communicate()[0]
+    if re.search(re.escape('????????????\tno permissions'), stdout) != None:
+      logging.warn('adb devices gave a permissions error. '
+                   'Consider running adb as root:')
+      logging.warn('  adb kill-server')
+      logging.warn('  sudo `which adb` devices\n\n')
+    return True
+  except OSError:
+    pass
+  try:
+    adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
+    os.environ['PATH'] = os.pathsep.join(
+        [os.path.dirname(adb_path), os.environ['PATH']])
+    device_utils.DeviceUtils.HealthyDevices(None)
+    return True
+  except (device_errors.CommandFailedError, device_errors.CommandTimeoutError,
+          device_errors.NoAdbError, OSError):
+    return False
+
+
+def FindAllAvailableDevices(options):
+  """Returns a list of available devices.
+  """
+  devices = []
+  try:
+    if CanDiscoverDevices():
+      blacklist = None
+      if options.android_blacklist_file:
+        blacklist = device_blacklist.Blacklist(options.android_blacklist_file)
+      devices = AndroidDevice.GetAllConnectedDevices(blacklist)
+  finally:
+    if not devices and _HasValidAdb():
+      try:
+        adb_wrapper.AdbWrapper.KillServer()
+      except device_errors.NoAdbError as e:
+        logging.warning(
+            'adb reported as present, but NoAdbError thrown: %s', str(e))
+
+  return devices
diff --git a/catapult/telemetry/telemetry/internal/platform/android_device_unittest.py b/catapult/telemetry/telemetry/internal/platform/android_device_unittest.py
new file mode 100644
index 0000000..85eaadb
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/android_device_unittest.py
@@ -0,0 +1,147 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.browser import browser_options
+from telemetry.internal.platform import android_device
+from telemetry.testing import system_stub
+import mock
+
+from devil.android import device_utils
+from devil.android import device_blacklist
+
+
+class _BaseAndroidDeviceTest(unittest.TestCase):
+  def setUp(self):
+    def check_blacklist_arg(blacklist):
+      self.assertTrue(blacklist is None
+                      or isinstance(blacklist, device_blacklist.Blacklist))
+      return mock.DEFAULT
+
+    self._healthy_device_patcher = mock.patch(
+        'devil.android.device_utils.DeviceUtils.HealthyDevices')
+    self._healthy_device_mock = self._healthy_device_patcher.start()
+    self._healthy_device_mock.side_effect = check_blacklist_arg
+    self._android_device_stub = system_stub.Override(
+        android_device, ['subprocess', 'logging'])
+
+  def _GetMockDeviceUtils(self, device_serial, is_online=True):
+    device = device_utils.DeviceUtils(device_serial)
+    device.IsOnline = mock.MagicMock(return_value=is_online)
+    return device
+
+  def tearDown(self):
+    self._healthy_device_patcher.stop()
+    self._android_device_stub.Restore()
+
+
+class AndroidDeviceTest(_BaseAndroidDeviceTest):
+  @decorators.Enabled('android')
+  def testGetAllAttachedAndroidDevices(self):
+    self._healthy_device_mock.return_value = [
+        self._GetMockDeviceUtils('01'),
+        self._GetMockDeviceUtils('07', is_online=False),
+        self._GetMockDeviceUtils('02'),
+        self._GetMockDeviceUtils('03', is_online=False)]
+    self.assertEquals(
+        set(['01', '02']),
+        set(device.device_id for device in
+            android_device.AndroidDevice.GetAllConnectedDevices(None)))
+
+  @decorators.Enabled('android')
+  def testNoAdbReturnsNone(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with (
+        mock.patch('os.path.isabs', return_value=True)), (
+        mock.patch('os.path.exists', return_value=False)):
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertIsNone(android_device.GetDevice(finder_options))
+
+  @decorators.Enabled('android')
+  def testAdbNoDevicesReturnsNone(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = []
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertIsNone(android_device.GetDevice(finder_options))
+
+  @decorators.Enabled('android')
+  def testAdbTwoDevicesReturnsNone(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = [
+          self._GetMockDeviceUtils('015d14fec128220c'),
+          self._GetMockDeviceUtils('015d14fec128220d')]
+      device = android_device.GetDevice(finder_options)
+      self.assertEquals([
+          'Multiple devices attached. Please specify one of the following:\n'
+          '  --device=015d14fec128220c\n'
+          '  --device=015d14fec128220d'],
+          self._android_device_stub.logging.warnings)
+      self.assertIsNone(device)
+
+  @decorators.Enabled('android')
+  def testAdbPickOneDeviceReturnsDeviceInstance(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    finder_options.device = '555d14fecddddddd'  # pick one
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = [
+          self._GetMockDeviceUtils('015d14fec128220c'),
+          self._GetMockDeviceUtils('555d14fecddddddd')]
+      device = android_device.GetDevice(finder_options)
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertEquals('555d14fecddddddd', device.device_id)
+
+  @decorators.Enabled('android')
+  def testAdbOneDeviceReturnsDeviceInstance(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = [
+          self._GetMockDeviceUtils('015d14fec128220c')]
+      device = android_device.GetDevice(finder_options)
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertEquals('015d14fec128220c', device.device_id)
+
+
+class FindAllAvailableDevicesTest(_BaseAndroidDeviceTest):
+  @decorators.Enabled('android')
+  def testAdbNoDeviceReturnsEmptyList(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = []
+      devices = android_device.FindAllAvailableDevices(finder_options)
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertIsNotNone(devices)
+      self.assertEquals(len(devices), 0)
+
+  @decorators.Enabled('android')
+  def testAdbOneDeviceReturnsListWithOneDeviceInstance(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = [
+          self._GetMockDeviceUtils('015d14fec128220c')]
+      devices = android_device.FindAllAvailableDevices(finder_options)
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertIsNotNone(devices)
+      self.assertEquals(len(devices), 1)
+      self.assertEquals('015d14fec128220c', devices[0].device_id)
+
+  @decorators.Enabled('android')
+  def testAdbMultipleDevicesReturnsListWithAllDeviceInstances(self):
+    finder_options = browser_options.BrowserFinderOptions()
+    with mock.patch('os.path.isabs', return_value=False):
+      self._healthy_device_mock.return_value = [
+          self._GetMockDeviceUtils('015d14fec128220c'),
+          self._GetMockDeviceUtils('this0should0not0show', is_online=False),
+          self._GetMockDeviceUtils('015d14fec128220d'),
+          self._GetMockDeviceUtils('015d14fec128220e')]
+      devices = android_device.FindAllAvailableDevices(finder_options)
+      self.assertEquals([], self._android_device_stub.logging.warnings)
+      self.assertIsNotNone(devices)
+      self.assertEquals(len(devices), 3)
+      self.assertEquals(devices[0].guid, '015d14fec128220c')
+      self.assertEquals(devices[1].guid, '015d14fec128220d')
+      self.assertEquals(devices[2].guid, '015d14fec128220e')
diff --git a/catapult/telemetry/telemetry/internal/platform/android_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/android_platform_backend.py
new file mode 100644
index 0000000..05611f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/android_platform_backend.py
@@ -0,0 +1,751 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import subprocess
+import tempfile
+
+from telemetry.core import android_platform
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.forwarders import android_forwarder
+from telemetry.internal.image_processing import video
+from telemetry.internal.platform import android_device
+from telemetry.internal.platform import linux_based_platform_backend
+from telemetry.internal.platform.power_monitor import android_dumpsys_power_monitor
+from telemetry.internal.platform.power_monitor import android_fuelgauge_power_monitor
+from telemetry.internal.platform.power_monitor import android_temperature_monitor
+from telemetry.internal.platform.power_monitor import monsoon_power_monitor
+from telemetry.internal.platform.power_monitor import (
+  android_power_monitor_controller)
+from telemetry.internal.platform.power_monitor import sysfs_power_monitor
+from telemetry.internal.platform.profiler import android_prebuilt_profiler_helper
+from telemetry.internal.util import external_modules
+
+psutil = external_modules.ImportOptionalModule('psutil')
+import adb_install_cert
+
+from devil.android import app_ui
+from devil.android import battery_utils
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.perf import cache_control
+from devil.android.perf import perf_control
+from devil.android.perf import thermal_throttle
+from devil.android.sdk import version_codes
+from devil.android.tools import video_recorder
+
+try:
+  from devil.android.perf import surface_stats_collector
+except Exception:
+  surface_stats_collector = None
+
+
+_DEVICE_COPY_SCRIPT_FILE = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), 'efficient_android_directory_copy.sh'))
+_DEVICE_COPY_SCRIPT_LOCATION = (
+    '/data/local/tmp/efficient_android_directory_copy.sh')
+
+# TODO(nednguyen): Remove this method and update the client config to point to
+# the correct binary instead.
+def _FindLocallyBuiltPath(binary_name):
+  """Finds the most recently built |binary_name|."""
+  command = None
+  command_mtime = 0
+  required_mode = os.X_OK
+  if binary_name.endswith('.apk'):
+    required_mode = os.R_OK
+  for build_path in util.GetBuildDirectories():
+    candidate = os.path.join(build_path, binary_name)
+    if os.path.isfile(candidate) and os.access(candidate, required_mode):
+      candidate_mtime = os.stat(candidate).st_mtime
+      if candidate_mtime > command_mtime:
+        command = candidate
+        command_mtime = candidate_mtime
+  return command
+
+
+class AndroidPlatformBackend(
+    linux_based_platform_backend.LinuxBasedPlatformBackend):
+  def __init__(self, device):
+    assert device, (
+        'AndroidPlatformBackend can only be initialized from remote device')
+    super(AndroidPlatformBackend, self).__init__(device)
+    self._device = device_utils.DeviceUtils(device.device_id)
+    # Trying to root the device, if possible.
+    if not self._device.HasRoot():
+      try:
+        self._device.EnableRoot()
+      except device_errors.CommandFailedError:
+        logging.warning('Unable to root %s', str(self._device))
+    self._battery = battery_utils.BatteryUtils(self._device)
+    self._enable_performance_mode = device.enable_performance_mode
+    self._surface_stats_collector = None
+    self._perf_tests_setup = perf_control.PerfControl(self._device)
+    self._thermal_throttle = thermal_throttle.ThermalThrottle(self._device)
+    self._raw_display_frame_rate_measurements = []
+    try:
+      self._can_access_protected_file_contents = (
+          self._device.HasRoot() or self._device.NeedsSU())
+    except Exception:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    self._device_copy_script = None
+    self._power_monitor = (
+      android_power_monitor_controller.AndroidPowerMonitorController([
+        android_temperature_monitor.AndroidTemperatureMonitor(self._device),
+        monsoon_power_monitor.MonsoonPowerMonitor(self._device, self),
+        android_dumpsys_power_monitor.DumpsysPowerMonitor(
+          self._battery, self),
+        sysfs_power_monitor.SysfsPowerMonitor(self, standalone=True),
+        android_fuelgauge_power_monitor.FuelGaugePowerMonitor(
+            self._battery),
+    ], self._battery))
+    self._video_recorder = None
+    self._installed_applications = None
+
+    self._device_cert_util = None
+    self._system_ui = None
+
+    _FixPossibleAdbInstability()
+
+  @property
+  def log_file_path(self):
+    return None
+
+  @classmethod
+  def SupportsDevice(cls, device):
+    return isinstance(device, android_device.AndroidDevice)
+
+  @classmethod
+  def CreatePlatformForDevice(cls, device, finder_options):
+    assert cls.SupportsDevice(device)
+    platform_backend = AndroidPlatformBackend(device)
+    return android_platform.AndroidPlatform(platform_backend)
+
+  @property
+  def forwarder_factory(self):
+    if not self._forwarder_factory:
+      self._forwarder_factory = android_forwarder.AndroidForwarderFactory(
+          self._device)
+
+    return self._forwarder_factory
+
+  @property
+  def device(self):
+    return self._device
+
+  def GetSystemUi(self):
+    if self._system_ui is None:
+      self._system_ui = app_ui.AppUi(self.device, 'com.android.systemui')
+    return self._system_ui
+
+  def IsSvelte(self):
+    description = self._device.GetProp('ro.build.description', cache=True)
+    if description is not None:
+      return 'svelte' in description
+    else:
+      return False
+
+  def IsDisplayTracingSupported(self):
+    return bool(self.GetOSVersionName() >= 'J')
+
+  def StartDisplayTracing(self):
+    assert not self._surface_stats_collector
+    # Clear any leftover data from previous timed out tests
+    self._raw_display_frame_rate_measurements = []
+    self._surface_stats_collector = \
+        surface_stats_collector.SurfaceStatsCollector(self._device)
+    self._surface_stats_collector.Start()
+
+  def StopDisplayTracing(self):
+    if not self._surface_stats_collector:
+      return
+
+    try:
+      refresh_period, timestamps = self._surface_stats_collector.Stop()
+      pid = self._surface_stats_collector.GetSurfaceFlingerPid()
+    finally:
+      self._surface_stats_collector = None
+    # TODO(sullivan): should this code be inline, or live elsewhere?
+    events = []
+    for ts in timestamps:
+      events.append({
+        'cat': 'SurfaceFlinger',
+        'name': 'vsync_before',
+        'ts': ts,
+        'pid': pid,
+        'tid': pid,
+        'args': {'data': {
+          'frame_count': 1,
+          'refresh_period': refresh_period,
+        }}
+      })
+    return events
+
+  def CanTakeScreenshot(self):
+    return True
+
+  def TakeScreenshot(self, file_path):
+    return bool(self._device.TakeScreenshot(host_path=file_path))
+
+  def SetFullPerformanceModeEnabled(self, enabled):
+    if not self._enable_performance_mode:
+      logging.warning('CPU governor will not be set!')
+      return
+    if enabled:
+      self._perf_tests_setup.SetHighPerfMode()
+    else:
+      self._perf_tests_setup.SetDefaultPerfMode()
+
+  def CanMonitorThermalThrottling(self):
+    return True
+
+  def IsThermallyThrottled(self):
+    return self._thermal_throttle.IsThrottled()
+
+  def HasBeenThermallyThrottled(self):
+    return self._thermal_throttle.HasBeenThrottled()
+
+  def GetCpuStats(self, pid):
+    if not self._can_access_protected_file_contents:
+      logging.warning('CPU stats cannot be retrieved on non-rooted device.')
+      return {}
+    return super(AndroidPlatformBackend, self).GetCpuStats(pid)
+
+  def GetCpuTimestamp(self):
+    if not self._can_access_protected_file_contents:
+      logging.warning('CPU timestamp cannot be retrieved on non-rooted device.')
+      return {}
+    return super(AndroidPlatformBackend, self).GetCpuTimestamp()
+
+  def SetGraphicsMemoryTrackingEnabled(self, enabled):
+    if not enabled:
+      self.KillApplication('memtrack_helper')
+      return
+
+    if not android_prebuilt_profiler_helper.InstallOnDevice(
+        self._device, 'memtrack_helper'):
+      raise Exception('Error installing memtrack_helper.')
+    try:
+      cmd = android_prebuilt_profiler_helper.GetDevicePath('memtrack_helper')
+      cmd += ' -d'
+      self._device.RunShellCommand(cmd, as_root=True, check_return=True)
+    except Exception:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+
+  def PurgeUnpinnedMemory(self):
+    """Purges the unpinned ashmem memory for the whole system.
+
+    This can be used to make memory measurements more stable. Requires root.
+    """
+    if not self._can_access_protected_file_contents:
+      logging.warning('Cannot run purge_ashmem. Requires a rooted device.')
+      return
+
+    if not android_prebuilt_profiler_helper.InstallOnDevice(
+        self._device, 'purge_ashmem'):
+      raise Exception('Error installing purge_ashmem.')
+    try:
+      output = self._device.RunShellCommand(
+          android_prebuilt_profiler_helper.GetDevicePath('purge_ashmem'))
+    except Exception:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    for l in output:
+      logging.info(l)
+
+  def GetMemoryStats(self, pid):
+    memory_usage = self._device.GetMemoryUsageForPid(pid)
+    if not memory_usage:
+      return {}
+    return {'ProportionalSetSize': memory_usage['Pss'] * 1024,
+            'SharedDirty': memory_usage['Shared_Dirty'] * 1024,
+            'PrivateDirty': memory_usage['Private_Dirty'] * 1024,
+            'VMPeak': memory_usage['VmHWM'] * 1024}
+
+  def GetChildPids(self, pid):
+    child_pids = []
+    ps = self.GetPsOutput(['pid', 'name'])
+    for curr_pid, curr_name in ps:
+      if int(curr_pid) == pid:
+        name = curr_name
+        for curr_pid, curr_name in ps:
+          if curr_name.startswith(name) and curr_name != name:
+            child_pids.append(int(curr_pid))
+        break
+    return child_pids
+
+  @decorators.Cache
+  def GetCommandLine(self, pid):
+    ps = self.GetPsOutput(['pid', 'name'], pid)
+    if not ps:
+      raise exceptions.ProcessGoneException()
+    return ps[0][1]
+
+  @decorators.Cache
+  def GetArchName(self):
+    return self._device.GetABI()
+
+  def GetOSName(self):
+    return 'android'
+
+  def GetDeviceTypeName(self):
+    return self._device.product_model
+
+  @decorators.Cache
+  def GetOSVersionName(self):
+    return self._device.GetProp('ro.build.id')[0]
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    return False
+
+  def FlushEntireSystemCache(self):
+    cache = cache_control.CacheControl(self._device)
+    cache.DropRamCaches()
+
+  def FlushSystemCacheForDirectory(self, directory):
+    raise NotImplementedError()
+
+  def FlushDnsCache(self):
+    self._device.RunShellCommand('ndc resolver flushdefaultif', as_root=True)
+
+  def StopApplication(self, application):
+    """Stop the given |application|.
+
+    Args:
+       application: The full package name string of the application to stop.
+    """
+    self._device.ForceStop(application)
+
+  def KillApplication(self, application):
+    """Kill the given |application|.
+
+    Might be used instead of ForceStop for efficiency reasons.
+
+    Args:
+      application: The full package name string of the application to kill.
+    """
+    assert isinstance(application, basestring)
+    self._device.KillAll(application, blocking=True, quiet=True)
+
+  def LaunchApplication(
+      self, application, parameters=None, elevate_privilege=False):
+    """Launches the given |application| with a list of |parameters| on the OS.
+
+    Args:
+      application: The full package name string of the application to launch.
+      parameters: A list of parameters to be passed to the ActivityManager.
+      elevate_privilege: Currently unimplemented on Android.
+    """
+    if elevate_privilege:
+      raise NotImplementedError("elevate_privilege isn't supported on android.")
+    if not parameters:
+      parameters = ''
+    result_lines = self._device.RunShellCommand('am start %s %s' %
+                                                (parameters, application))
+    for line in result_lines:
+      if line.startswith('Error: '):
+        raise ValueError('Failed to start "%s" with error\n  %s' %
+                         (application, line))
+
+  def IsApplicationRunning(self, application):
+    return len(self._device.GetPids(application)) > 0
+
+  def CanLaunchApplication(self, application):
+    if not self._installed_applications:
+      self._installed_applications = self._device.RunShellCommand(
+          'pm list packages')
+    return 'package:' + application in self._installed_applications
+
+  def InstallApplication(self, application):
+    self._installed_applications = None
+    self._device.Install(application)
+
+  @decorators.Cache
+  def CanCaptureVideo(self):
+    return self.GetOSVersionName() >= 'K'
+
+  def StartVideoCapture(self, min_bitrate_mbps):
+    """Starts the video capture at specified bitrate."""
+    min_bitrate_mbps = max(min_bitrate_mbps, 0.1)
+    if min_bitrate_mbps > 100:
+      raise ValueError('Android video capture cannot capture at %dmbps. '
+                       'Max capture rate is 100mbps.' % min_bitrate_mbps)
+    if self.is_video_capture_running:
+      self._video_recorder.Stop()
+    self._video_recorder = video_recorder.VideoRecorder(
+        self._device, megabits_per_second=min_bitrate_mbps)
+    self._video_recorder.Start(timeout=5)
+
+  @property
+  def is_video_capture_running(self):
+    return self._video_recorder is not None
+
+  def StopVideoCapture(self):
+    assert self.is_video_capture_running, 'Must start video capture first'
+    self._video_recorder.Stop()
+    video_file_obj = tempfile.NamedTemporaryFile()
+    self._video_recorder.Pull(video_file_obj.name)
+    self._video_recorder = None
+
+    return video.Video(video_file_obj)
+
+  def CanMonitorPower(self):
+    return self._power_monitor.CanMonitorPower()
+
+  def StartMonitoringPower(self, browser):
+    self._power_monitor.StartMonitoringPower(browser)
+
+  def StopMonitoringPower(self):
+    return self._power_monitor.StopMonitoringPower()
+
+  def CanMonitorNetworkData(self):
+    return self._device.build_version_sdk >= version_codes.LOLLIPOP
+
+  def GetNetworkData(self, browser):
+    return self._battery.GetNetworkData(browser._browser_backend.package)
+
+  def PathExists(self, device_path, timeout=None, retries=None):
+    """ Return whether the given path exists on the device.
+    This method is the same as
+    devil.android.device_utils.DeviceUtils.PathExists.
+    """
+    return self._device.PathExists(
+        device_path, timeout=timeout, retries=retries)
+
+  def GetFileContents(self, fname):
+    if not self._can_access_protected_file_contents:
+      logging.warning('%s cannot be retrieved on non-rooted device.' % fname)
+      return ''
+    return self._device.ReadFile(fname, as_root=True)
+
+  def GetPsOutput(self, columns, pid=None):
+    assert columns == ['pid', 'name'] or columns == ['pid'], \
+        'Only know how to return pid and name. Requested: ' + columns
+    command = 'ps'
+    if pid:
+      command += ' -p %d' % pid
+    ps = self._device.RunShellCommand(command, large_output=True)[1:]
+    output = []
+    for line in ps:
+      data = line.split()
+      curr_pid = data[1]
+      curr_name = data[-1]
+      if columns == ['pid', 'name']:
+        output.append([curr_pid, curr_name])
+      else:
+        output.append([curr_pid])
+    return output
+
+  def RunCommand(self, command):
+    return '\n'.join(self._device.RunShellCommand(command))
+
+  @staticmethod
+  def ParseCStateSample(sample):
+    sample_stats = {}
+    for cpu in sample:
+      values = sample[cpu].splitlines()
+      # Each state has three values after excluding the time value.
+      num_states = (len(values) - 1) / 3
+      names = values[:num_states]
+      times = values[num_states:2 * num_states]
+      cstates = {'C0': int(values[-1]) * 10 ** 6}
+      for i, state in enumerate(names):
+        if state == 'C0':
+          # The Exynos cpuidle driver for the Nexus 10 uses the name 'C0' for
+          # its WFI state.
+          # TODO(tmandel): We should verify that no other Android device
+          # actually reports time in C0 causing this to report active time as
+          # idle time.
+          state = 'WFI'
+        cstates[state] = int(times[i])
+        cstates['C0'] -= int(times[i])
+      sample_stats[cpu] = cstates
+    return sample_stats
+
+  def SetRelaxSslCheck(self, value):
+    old_flag = self._device.GetProp('socket.relaxsslcheck')
+    self._device.SetProp('socket.relaxsslcheck', value)
+    return old_flag
+
+  def ForwardHostToDevice(self, host_port, device_port):
+    self._device.adb.Forward('tcp:%d' % host_port, device_port)
+
+  def StopForwardingHost(self, host_port):
+    for line in self._device.adb.ForwardList().strip().splitlines():
+      line = line.split(' ')
+      if line[0] == self._device and line[1] == 'tcp:%s' % host_port:
+        self._device.adb.ForwardRemove('tcp:%d' % host_port)
+        break
+    else:
+      logging.warning('Port %s not found in adb forward --list for device %s',
+                      host_port, self._device)
+
+  def DismissCrashDialogIfNeeded(self):
+    """Dismiss any error dialogs.
+
+    Limit the number in case we have an error loop or we are failing to dismiss.
+    """
+    for _ in xrange(10):
+      if not self._device.DismissCrashDialogIfNeeded():
+        break
+
+  def IsAppRunning(self, process_name):
+    """Determine if the given process is running.
+
+    Args:
+      process_name: The full package name string of the process.
+    """
+    return bool(self._device.GetPids(process_name))
+
+  @property
+  def supports_test_ca(self):
+    # TODO(nednguyen): figure out how to install certificate on Android M
+    # crbug.com/593152
+    return self._device.build_version_sdk <= version_codes.LOLLIPOP_MR1
+
+  def InstallTestCa(self, ca_cert_path):
+    """Install a randomly generated root CA on the android device.
+
+    This allows transparent HTTPS testing with WPR server without need
+    to tweak application network stack.
+
+    Note: If this method fails with any exception, then RemoveTestCa will be
+    automatically called by the network_controller_backend.
+    """
+    if self._device_cert_util is not None:
+      logging.warning('Test certificate authority is already installed.')
+      return
+    self._device_cert_util = adb_install_cert.AndroidCertInstaller(
+        self._device.adb.GetDeviceSerial(), None, ca_cert_path)
+    self._device_cert_util.install_cert(overwrite_cert=True)
+
+  def RemoveTestCa(self):
+    """Remove root CA from device installed by InstallTestCa.
+
+    Note: Any exceptions raised by this method will be logged but dismissed by
+    the network_controller_backend.
+    """
+    if self._device_cert_util is not None:
+      try:
+        self._device_cert_util.remove_cert()
+      finally:
+        self._device_cert_util = None
+
+  def PushProfile(self, package, new_profile_dir):
+    """Replace application profile with files found on host machine.
+
+    Pushing the profile is slow, so we don't want to do it every time.
+    Avoid this by pushing to a safe location using PushChangedFiles, and
+    then copying into the correct location on each test run.
+
+    Args:
+      package: The full package name string of the application for which the
+        profile is to be updated.
+      new_profile_dir: Location where profile to be pushed is stored on the
+        host machine.
+    """
+    (profile_parent, profile_base) = os.path.split(new_profile_dir)
+    # If the path ends with a '/' python split will return an empty string for
+    # the base name; so we now need to get the base name from the directory.
+    if not profile_base:
+      profile_base = os.path.basename(profile_parent)
+
+    saved_profile_location = '/sdcard/profile/%s' % profile_base
+    self._device.PushChangedFiles([(new_profile_dir, saved_profile_location)])
+
+    profile_dir = self._GetProfileDir(package)
+    try:
+      self._EfficientDeviceDirectoryCopy(
+          saved_profile_location, profile_dir)
+    except Exception:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    dumpsys = self._device.RunShellCommand('dumpsys package %s' % package)
+    id_line = next(line for line in dumpsys if 'userId=' in line)
+    uid = re.search(r'\d+', id_line).group()
+    files = self._device.RunShellCommand(
+        'ls "%s"' % profile_dir, as_root=True)
+    files.remove('lib')
+    paths = ['%s%s' % (profile_dir, f) for f in files]
+    for path in paths:
+      extended_path = '%s %s/* %s/*/* %s/*/*/*' % (path, path, path, path)
+      self._device.RunShellCommand(
+          'chown %s.%s %s' % (uid, uid, extended_path))
+
+  def _EfficientDeviceDirectoryCopy(self, source, dest):
+    if not self._device_copy_script:
+      self._device.adb.Push(
+          _DEVICE_COPY_SCRIPT_FILE,
+          _DEVICE_COPY_SCRIPT_LOCATION)
+      self._device_copy_script = _DEVICE_COPY_SCRIPT_FILE
+    self._device.RunShellCommand(
+        ['sh', self._device_copy_script, source, dest])
+
+  def RemoveProfile(self, package, ignore_list):
+    """Delete application profile on device.
+
+    Args:
+      package: The full package name string of the application for which the
+        profile is to be deleted.
+      ignore_list: List of files to keep.
+    """
+    profile_dir = self._GetProfileDir(package)
+    files = self._device.RunShellCommand(
+        'ls "%s"' % profile_dir, as_root=True)
+    paths = ['"%s%s"' % (profile_dir, f) for f in files
+             if f not in ignore_list]
+    self._device.RunShellCommand('rm -r %s' % ' '.join(paths), as_root=True)
+
+  def PullProfile(self, package, output_profile_path):
+    """Copy application profile from device to host machine.
+
+    Args:
+      package: The full package name string of the application for which the
+        profile is to be copied.
+      output_profile_dir: Location where profile to be stored on host machine.
+    """
+    profile_dir = self._GetProfileDir(package)
+    logging.info("Pulling profile directory from device: '%s'->'%s'.",
+                 profile_dir, output_profile_path)
+    # To minimize bandwidth it might be good to look at whether all the data
+    # pulled down is really needed e.g. .pak files.
+    if not os.path.exists(output_profile_path):
+      os.makedirs(output_profile_path)
+    try:
+      files = self._device.RunShellCommand(['ls', profile_dir])
+    except Exception:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    for f in files:
+      # Don't pull lib, since it is created by the installer.
+      if f != 'lib':
+        source = '%s%s' % (profile_dir, f)
+        dest = os.path.join(output_profile_path, f)
+        try:
+          self._device.PullFile(source, dest, timeout=240)
+        except device_errors.CommandFailedError:
+          logging.exception('Failed to pull %s to %s', source, dest)
+
+  def _GetProfileDir(self, package):
+    """Returns the on-device location where the application profile is stored
+    based on Android convention.
+
+    Args:
+      package: The full package name string of the application.
+    """
+    return '/data/data/%s/' % package
+
+  def SetDebugApp(self, package):
+    """Set application to debugging.
+
+    Args:
+      package: The full package name string of the application.
+    """
+    if self._device.IsUserBuild():
+      logging.debug('User build device, setting debug app')
+      self._device.RunShellCommand('am set-debug-app --persistent %s' % package)
+
+  def GetLogCat(self, number_of_lines=500):
+    """Returns most recent lines of logcat dump.
+
+    Args:
+      number_of_lines: Number of lines of log to return.
+    """
+    return '\n'.join(self._device.RunShellCommand(
+        'logcat -d -t %d' % number_of_lines))
+
+  def GetStandardOutput(self):
+    return None
+
+  def GetStackTrace(self):
+    """Returns stack trace.
+
+    The stack trace consists of raw logcat dump, logcat dump with symbols,
+    and stack info from tomstone files.
+    """
+    def Decorate(title, content):
+      return "%s\n%s\n%s\n" % (title, content, '*' * 80)
+    # Get the last lines of logcat (large enough to contain stacktrace)
+    logcat = self.GetLogCat()
+    ret = Decorate('Logcat', logcat)
+    stack = os.path.join(util.GetChromiumSrcDir(), 'third_party',
+                         'android_platform', 'development', 'scripts', 'stack')
+    # Try to symbolize logcat.
+    if os.path.exists(stack):
+      cmd = [stack]
+      cmd.append('--arch=%s' % self.GetArchName())
+      p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+      ret += Decorate('Stack from Logcat', p.communicate(input=logcat)[0])
+
+    # Try to get tombstones.
+    tombstones = os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
+                              'tombstones.py')
+    if os.path.exists(tombstones):
+      ret += Decorate('Tombstones',
+                      subprocess.Popen([tombstones, '-w', '--device',
+                                        self._device.adb.GetDeviceSerial()],
+                                       stdout=subprocess.PIPE).communicate()[0])
+    return ret
+
+  def IsScreenOn(self):
+    """Determines if device screen is on."""
+    return self._device.IsScreenOn()
+
+  @staticmethod
+  def _IsScreenLocked(input_methods):
+    """Parser method for IsScreenLocked()
+
+    Args:
+      input_methods: Output from dumpsys input_methods
+
+    Returns:
+      boolean: True if screen is locked, false if screen is not locked.
+
+    Raises:
+      ValueError: An unknown value is found for the screen lock state.
+      AndroidDeviceParsingError: Error in detecting screen state.
+
+    """
+    for line in input_methods:
+      if 'mHasBeenInactive' in line:
+        for pair in line.strip().split(' '):
+          key, value = pair.split('=', 1)
+          if key == 'mHasBeenInactive':
+            if value == 'true':
+              return True
+            elif value == 'false':
+              return False
+            else:
+              raise ValueError('Unknown value for %s: %s' % (key, value))
+    raise exceptions.AndroidDeviceParsingError(str(input_methods))
+
+  def IsScreenLocked(self):
+    """Determines if device screen is locked."""
+    input_methods = self._device.RunShellCommand('dumpsys input_method',
+                                                 check_return=True)
+    return self._IsScreenLocked(input_methods)
+
+def _FixPossibleAdbInstability():
+  """Host side workaround for crbug.com/268450 (adb instability).
+
+  The adb server has a race which is mitigated by binding to a single core.
+  """
+  if not psutil:
+    return
+  for process in psutil.process_iter():
+    try:
+      if psutil.version_info >= (2, 0):
+        if 'adb' in process.name():
+          process.cpu_affinity([0])
+      else:
+        if 'adb' in process.name:
+          process.set_cpu_affinity([0])
+    except (psutil.NoSuchProcess, psutil.AccessDenied):
+      logging.warn('Failed to set adb process CPU affinity')
diff --git a/catapult/telemetry/telemetry/internal/platform/android_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/android_platform_backend_unittest.py
new file mode 100644
index 0000000..b3d48ce
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/android_platform_backend_unittest.py
@@ -0,0 +1,212 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.platform import android_device
+from telemetry.internal.platform import android_platform_backend
+from telemetry.testing import system_stub
+import mock
+
+from devil.android import battery_utils
+from devil.android import device_utils
+
+class AndroidPlatformBackendTest(unittest.TestCase):
+  def setUp(self):
+    self._stubs = system_stub.Override(
+        android_platform_backend,
+        ['perf_control', 'thermal_throttle'])
+
+    # Skip _FixPossibleAdbInstability by setting psutil to None.
+    self._actual_ps_util = android_platform_backend.psutil
+    android_platform_backend.psutil = None
+    self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
+    self.battery_patcher.start()
+
+    def get_prop(name, cache=None):
+      del cache  # unused
+      return {'ro.product.cpu.abi': 'armeabi-v7a'}.get(name)
+
+    self.device_patcher = mock.patch.multiple(
+        device_utils.DeviceUtils,
+        HasRoot=mock.MagicMock(return_value=True),
+        GetProp=mock.MagicMock(side_effect=get_prop))
+    self.device_patcher.start()
+
+  def tearDown(self):
+    self._stubs.Restore()
+    android_platform_backend.psutil = self._actual_ps_util
+    self.battery_patcher.stop()
+    self.device_patcher.stop()
+
+  @decorators.Disabled('chromeos')
+  def testIsSvelte(self):
+    with mock.patch('devil.android.device_utils.DeviceUtils.GetProp',
+                    return_value='svelte'):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('12345'))
+      self.assertTrue(backend.IsSvelte())
+
+  @decorators.Disabled('chromeos')
+  def testIsNotSvelte(self):
+    with mock.patch('devil.android.device_utils.DeviceUtils.GetProp',
+                    return_value='foo'):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('12345'))
+      self.assertFalse(backend.IsSvelte())
+
+  @decorators.Disabled('chromeos')
+  def testGetCpuStats(self):
+    proc_stat_content = (
+        '7702 (.android.chrome) S 167 167 0 0 -1 1077936448 '
+        '3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 '
+        '4294967295 1074458624 1074463824 3197495984 3197494152 '
+        '1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 '
+        '1074470376 1074470912 1102155776\n')
+    with mock.patch('devil.android.device_utils.DeviceUtils.ReadFile',
+                    return_value=proc_stat_content):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('12345'))
+      cpu_stats = backend.GetCpuStats('7702')
+      self.assertEquals(cpu_stats, {'CpuProcessTime': 0.05})
+
+  @decorators.Disabled('chromeos')
+  def testGetCpuStatsInvalidPID(self):
+    # Mock an empty /proc/pid/stat.
+    with mock.patch('devil.android.device_utils.DeviceUtils.ReadFile',
+                    return_value=''):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('1234'))
+      cpu_stats = backend.GetCpuStats('7702')
+      self.assertEquals(cpu_stats, {})
+
+  def testAndroidParseCpuStates(self):
+    cstate = {
+      'cpu0': 'C0\nC1\n103203424\n5342040\n300\n500\n1403232500',
+      'cpu1': 'C0\n124361858\n300\n1403232500'
+    }
+    expected_cstate = {
+      'cpu0': {
+        'WFI': 103203424,
+        'C0': 1403232391454536,
+        'C1': 5342040
+      },
+      'cpu1': {
+        'WFI': 124361858,
+        'C0': 1403232375638142
+      }
+    }
+    # Use mock start and end times to allow for the test to calculate C0.
+    result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
+        cstate)
+    for cpu in result:
+      for state in result[cpu]:
+        self.assertAlmostEqual(result[cpu][state], expected_cstate[cpu][state])
+
+  def testInstallTestCaSuccess(self):
+    backend = android_platform_backend.AndroidPlatformBackend(
+        android_device.AndroidDevice('success'))
+    with mock.patch('adb_install_cert.AndroidCertInstaller'):
+      backend.InstallTestCa('testca.pem')
+      self.assertIsNotNone(backend._device_cert_util)
+
+      backend.RemoveTestCa()
+      self.assertIsNone(backend._device_cert_util)
+
+  def testIsScreenLockedTrue(self):
+    test_input = ['a=b', 'mHasBeenInactive=true']
+    backend = android_platform_backend.AndroidPlatformBackend(
+        android_device.AndroidDevice('success'))
+    self.assertTrue(backend._IsScreenLocked(test_input))
+
+  def testIsScreenLockedFalse(self):
+    test_input = ['a=b', 'mHasBeenInactive=false']
+    backend = android_platform_backend.AndroidPlatformBackend(
+        android_device.AndroidDevice('success'))
+    self.assertFalse(backend._IsScreenLocked(test_input))
+
+
+class AndroidPlatformBackendPsutilTest(unittest.TestCase):
+
+  class psutil_1_0(object):
+    version_info = (1, 0)
+    def __init__(self):
+      self.set_cpu_affinity_args = []
+    class Process(object):
+      def __init__(self, parent):
+        self._parent = parent
+        self.name = 'adb'
+      def set_cpu_affinity(self, cpus):
+        self._parent.set_cpu_affinity_args.append(cpus)
+    def process_iter(self):
+      return [self.Process(self)]
+
+  class psutil_2_0(object):
+    version_info = (2, 0)
+    def __init__(self):
+      self.set_cpu_affinity_args = []
+    class Process(object):
+      def __init__(self, parent):
+        self._parent = parent
+        self.set_cpu_affinity_args = []
+      def name(self):
+        return 'adb'
+      def cpu_affinity(self, cpus=None):
+        self._parent.set_cpu_affinity_args.append(cpus)
+    def process_iter(self):
+      return [self.Process(self)]
+
+  def setUp(self):
+    self._stubs = system_stub.Override(
+        android_platform_backend,
+        ['perf_control'])
+    self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
+    self.battery_patcher.start()
+    self._actual_ps_util = android_platform_backend.psutil
+
+    def get_prop(name, cache=None):
+      del cache  # unused
+      return {'ro.product.cpu.abi': 'armeabi-v7a'}.get(name)
+
+    self.device_patcher = mock.patch.multiple(
+        device_utils.DeviceUtils,
+        FileExists=mock.MagicMock(return_value=False),
+        GetProp=mock.MagicMock(side_effect=get_prop),
+        HasRoot=mock.MagicMock(return_value=True))
+    self.device_patcher.start()
+
+  def tearDown(self):
+    self._stubs.Restore()
+    android_platform_backend.psutil = self._actual_ps_util
+    self.battery_patcher.stop()
+    self.device_patcher.stop()
+
+  @decorators.Disabled('chromeos')
+  def testPsutil1(self):
+    psutil = self.psutil_1_0()
+    android_platform_backend.psutil = psutil
+
+    # Mock an empty /proc/pid/stat.
+    with mock.patch('devil.android.device_utils.DeviceUtils.ReadFile',
+                    return_value=''):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('1234'))
+      cpu_stats = backend.GetCpuStats('7702')
+      self.assertEquals({}, cpu_stats)
+      self.assertEquals([[0]], psutil.set_cpu_affinity_args)
+
+  @decorators.Disabled('chromeos')
+  def testPsutil2(self):
+    psutil = self.psutil_2_0()
+    android_platform_backend.psutil = psutil
+
+    # Mock an empty /proc/pid/stat.
+    with mock.patch('devil.android.device_utils.DeviceUtils.ReadFile',
+                    return_value=''):
+      backend = android_platform_backend.AndroidPlatformBackend(
+          android_device.AndroidDevice('1234'))
+      cpu_stats = backend.GetCpuStats('7702')
+      self.assertEquals({}, cpu_stats)
+      self.assertEquals([[0]], psutil.set_cpu_affinity_args)
diff --git a/catapult/telemetry/telemetry/internal/platform/cros_device.py b/catapult/telemetry/telemetry/internal/platform/cros_device.py
new file mode 100644
index 0000000..f27ebd1
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/cros_device.py
@@ -0,0 +1,54 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+
+from telemetry.core import cros_interface
+from telemetry.core import platform
+from telemetry.internal.platform import device
+
+
+class CrOSDevice(device.Device):
+  def __init__(self, host_name, ssh_port, ssh_identity, is_local):
+    super(CrOSDevice, self).__init__(
+        name='ChromeOs with host %s' % host_name or 'localhost',
+        guid='cros:%s' % host_name or 'localhost')
+    self._host_name = host_name
+    self._ssh_port = ssh_port
+    self._ssh_identity = ssh_identity
+    self._is_local = is_local
+
+  @classmethod
+  def GetAllConnectedDevices(cls, blacklist):
+    return []
+
+  @property
+  def host_name(self):
+    return self._host_name
+
+  @property
+  def ssh_port(self):
+    return self._ssh_port
+
+  @property
+  def ssh_identity(self):
+    return self._ssh_identity
+
+  @property
+  def is_local(self):
+    return self._is_local
+
+
+def IsRunningOnCrOS():
+  return platform.GetHostPlatform().GetOSName() == 'chromeos'
+
+
+def FindAllAvailableDevices(options):
+  """Returns a list of available device types."""
+  use_ssh = options.cros_remote and cros_interface.HasSSH()
+  if not use_ssh and not IsRunningOnCrOS():
+    logging.debug('No --remote specified, and not running on ChromeOs.')
+    return []
+
+  return [CrOSDevice(options.cros_remote, options.cros_remote_ssh_port,
+                     options.cros_ssh_identity, not use_ssh)]
diff --git a/catapult/telemetry/telemetry/internal/platform/cros_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/cros_platform_backend.py
new file mode 100644
index 0000000..d20bcbc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/cros_platform_backend.py
@@ -0,0 +1,164 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.core import cros_interface
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import cros_forwarder
+from telemetry.internal.platform import cros_device
+from telemetry.internal.platform import linux_based_platform_backend
+from telemetry.internal.platform.power_monitor import cros_power_monitor
+from telemetry.internal.util import ps_util
+
+
+class CrosPlatformBackend(
+    linux_based_platform_backend.LinuxBasedPlatformBackend):
+  def __init__(self, device=None):
+    super(CrosPlatformBackend, self).__init__(device)
+    if device and not device.is_local:
+      self._cri = cros_interface.CrOSInterface(
+          device.host_name, device.ssh_port, device.ssh_identity)
+      self._cri.TryLogin()
+    else:
+      self._cri = cros_interface.CrOSInterface()
+    self._powermonitor = cros_power_monitor.CrosPowerMonitor(self)
+
+  @classmethod
+  def IsPlatformBackendForHost(cls):
+    return util.IsRunningOnCrosDevice()
+
+  @classmethod
+  def SupportsDevice(cls, device):
+    return isinstance(device, cros_device.CrOSDevice)
+
+  @classmethod
+  def CreatePlatformForDevice(cls, device, finder_options):
+    assert cls.SupportsDevice(device)
+    return platform.Platform(CrosPlatformBackend(device))
+
+  @property
+  def cri(self):
+    return self._cri
+
+  @property
+  def forwarder_factory(self):
+    if not self._forwarder_factory:
+      self._forwarder_factory = cros_forwarder.CrOsForwarderFactory(self._cri)
+    return self._forwarder_factory
+
+  def GetRemotePort(self, port):
+    if self._cri.local:
+      return port
+    return self._cri.GetRemotePort()
+
+  def GetWprPortPairs(self):
+    """Return suitable port pairs to be used for web page replay."""
+    default_local_ports = super(CrosPlatformBackend, self).GetWprPortPairs(
+        ).local_ports
+    return forwarders.PortPairs.Zip(
+        default_local_ports,
+        forwarders.PortSet(
+          http=self.GetRemotePort(default_local_ports.http),
+          https=self.GetRemotePort(default_local_ports.https),
+          dns=None))
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def RunCommand(self, args):
+    if not isinstance(args, list):
+      args = [args]
+    stdout, stderr = self._cri.RunCmdOnDevice(args)
+    if stderr:
+      raise IOError('Failed to run: cmd = %s, stderr = %s' %
+                    (str(args), stderr))
+    return stdout
+
+  def GetFileContents(self, filename):
+    try:
+      return self.RunCommand(['cat', filename])
+    except AssertionError:
+      return ''
+
+  def GetPsOutput(self, columns, pid=None):
+    return ps_util.GetPsOutputWithPlatformBackend(self, columns, pid)
+
+  @staticmethod
+  def ParseCStateSample(sample):
+    sample_stats = {}
+    for cpu in sample:
+      values = sample[cpu].splitlines()
+      # There are three values per state after excluding the single time value.
+      num_states = (len(values) - 1) / 3
+      names = values[:num_states]
+      times = values[num_states:2 * num_states]
+      latencies = values[2 * num_states:]
+      # The last line in the sample contains the time.
+      cstates = {'C0': int(values[-1]) * 10 ** 6}
+      for i, state in enumerate(names):
+        if names[i] == 'POLL' and not int(latencies[i]):
+          # C0 state. Kernel stats aren't right, so calculate by
+          # subtracting all other states from total time (using epoch
+          # timer since we calculate differences in the end anyway).
+          # NOTE: Only x86 lists C0 under cpuidle, ARM does not.
+          continue
+        cstates['C0'] -= int(times[i])
+        if names[i] == '<null>':
+          # Kernel race condition that can happen while a new C-state gets
+          # added (e.g. AC->battery). Don't know the 'name' of the state
+          # yet, but its 'time' would be 0 anyway.
+          continue
+        cstates[state] = int(times[i])
+      sample_stats[cpu] = cstates
+    return sample_stats
+
+  def GetOSName(self):
+    return 'chromeos'
+
+  def GetOSVersionName(self):
+    return ''  # TODO: Implement this.
+
+  def GetChildPids(self, pid):
+    """Returns a list of child pids of |pid|."""
+    all_process_info = self._cri.ListProcesses()
+    processes = [(curr_pid, curr_ppid, curr_state)
+                 for curr_pid, _, curr_ppid, curr_state in all_process_info]
+    return ps_util.GetChildPids(processes, pid)
+
+  def GetCommandLine(self, pid):
+    procs = self._cri.ListProcesses()
+    return next((proc[1] for proc in procs if proc[0] == pid), None)
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    return True
+
+  def FlushEntireSystemCache(self):
+    raise NotImplementedError()
+
+  def FlushSystemCacheForDirectory(self, directory):
+    flush_command = (
+        '/usr/local/telemetry/src/src/out/Release/clear_system_cache')
+    self.RunCommand(['chmod', '+x', flush_command])
+    self.RunCommand([flush_command, '--recurse', directory])
+
+  def CanMonitorPower(self):
+    return self._powermonitor.CanMonitorPower()
+
+  def StartMonitoringPower(self, browser):
+    self._powermonitor.StartMonitoringPower(browser)
+
+  def StopMonitoringPower(self):
+    return self._powermonitor.StopMonitoringPower()
+
+  def PathExists(self, path, timeout=None, retries=None):
+    if timeout or retries:
+      logging.warning(
+          'PathExists: params timeout and retries are not support on CrOS.')
+    return self._cri.FileExistsOnDevice(path)
diff --git a/catapult/telemetry/telemetry/internal/platform/cros_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/cros_platform_backend_unittest.py
new file mode 100644
index 0000000..d6c8ac7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/cros_platform_backend_unittest.py
@@ -0,0 +1,38 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform import cros_platform_backend
+
+
+class CrosPlatformBackendTest(unittest.TestCase):
+  initial_cstate = {
+    'cpu0': 'POLL\nC1\nC2\nC3\n0\n138356189\n102416540\n'
+            '17158209182\n0\n1\n500\n1000\n1403211341',
+    'cpu1': 'POLL\nC1\nC2\nC3\n0\n107318149\n81786238\n'
+            '17348563431\n0\n1\n500\n1000\n1403211341'
+  }
+  expected_cstate = {
+    'cpu0': {
+      'C0': 1403193942018089,
+      'C1': 138356189,
+      'C2': 102416540,
+      'C3': 17158209182
+    },
+    'cpu1': {
+      'C0': 1403193803332182,
+      'C1': 107318149,
+      'C2': 81786238,
+      'C3': 17348563431
+    }
+  }
+  def testCrosParseCpuStates(self):
+    # Use mock start and end times to allow for the test to calculate C0.
+    results = cros_platform_backend.CrosPlatformBackend.ParseCStateSample(
+        self.initial_cstate)
+    for cpu in results:
+      for state in results[cpu]:
+        self.assertAlmostEqual(results[cpu][state],
+                               self.expected_cstate[cpu][state])
diff --git a/catapult/telemetry/telemetry/internal/platform/desktop_device.py b/catapult/telemetry/telemetry/internal/platform/desktop_device.py
new file mode 100644
index 0000000..0a9f0fa
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/desktop_device.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import platform
+from telemetry.internal.platform import device
+
+
+class DesktopDevice(device.Device):
+  def __init__(self):
+    super(DesktopDevice, self).__init__(name='desktop', guid='desktop')
+
+  @classmethod
+  def GetAllConnectedDevices(cls, blacklist):
+    return []
+
+
+def FindAllAvailableDevices(_):
+  """Returns a list of available devices.
+  """
+  # If the host platform is Chrome OS, the device is also considered as cros.
+  if platform.GetHostPlatform().GetOSName() == 'chromeos':
+    return []
+  return [DesktopDevice()]
diff --git a/catapult/telemetry/telemetry/internal/platform/desktop_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/desktop_platform_backend.py
new file mode 100644
index 0000000..510b534
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/desktop_platform_backend.py
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+
+from telemetry.internal.util import binary_manager
+from telemetry.internal.platform import platform_backend
+
+
+class DesktopPlatformBackend(platform_backend.PlatformBackend):
+
+  # This is an abstract class. It is OK to have abstract methods.
+  # pylint: disable=abstract-method
+
+  def FlushSystemCacheForDirectory(self, directory):
+    assert directory and os.path.exists(directory), \
+        'Target directory %s must exist' % directory
+    flush_command = binary_manager.FetchPath(
+        'clear_system_cache', self.GetArchName(), self.GetOSName())
+    assert flush_command, 'You must build clear_system_cache first'
+
+    subprocess.check_call([flush_command, '--recurse', directory])
+
+  def GetDeviceTypeName(self):
+    return 'Desktop'
diff --git a/catapult/telemetry/telemetry/internal/platform/device.py b/catapult/telemetry/telemetry/internal/platform/device.py
new file mode 100644
index 0000000..34ca721
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/device.py
@@ -0,0 +1,32 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class Device(object):
+  """ A base class of devices.
+  A device instance contains all the necessary information for constructing
+  a platform backend object for remote platforms.
+
+  Attributes:
+    name: A device name string in human-understandable term.
+    guid: A unique id of the device. Subclass of device must specify this
+      id properly so that device objects to a same actual device must have same
+      guid.
+    """
+
+  def __init__(self, name, guid):
+    self._name = name
+    self._guid = guid
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def guid(self):
+    return self._guid
+
+  @classmethod
+  def GetAllConnectedDevices(cls, blacklist):
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/platform/device_finder.py b/catapult/telemetry/telemetry/internal/platform/device_finder.py
new file mode 100644
index 0000000..20d9552
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/device_finder.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds devices that can be controlled by telemetry."""
+
+from telemetry.internal.platform import android_device
+from telemetry.internal.platform import cros_device
+from telemetry.internal.platform import desktop_device
+from telemetry.internal.platform import ios_device
+
+DEVICES = [
+  android_device,
+  cros_device,
+  desktop_device,
+  ios_device,
+]
+
+
+def _GetAllAvailableDevices(options):
+  """Returns a list of all available devices."""
+  devices = []
+  for device in DEVICES:
+    devices.extend(device.FindAllAvailableDevices(options))
+  return devices
+
+
+def GetDevicesMatchingOptions(options):
+  """Returns a list of devices matching the options."""
+  devices = []
+  if not options.device or options.device == 'list':
+    devices = _GetAllAvailableDevices(options)
+  elif options.device == 'android':
+    devices = android_device.FindAllAvailableDevices(options)
+  else:
+    devices = _GetAllAvailableDevices(options)
+    devices = [d for d in devices if d.guid == options.device]
+
+  devices.sort(key=lambda device: device.name)
+  return devices
diff --git a/catapult/telemetry/telemetry/internal/platform/efficient_android_directory_copy.sh b/catapult/telemetry/telemetry/internal/platform/efficient_android_directory_copy.sh
new file mode 100755
index 0000000..7021109
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/efficient_android_directory_copy.sh
@@ -0,0 +1,78 @@
+#!/system/bin/sh
+
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Android shell script to make the destination directory identical with the
+# source directory, without doing unnecessary copies. This assumes that the
+# the destination directory was originally a copy of the source directory, and
+# has since been modified.
+
+source=$1
+dest=$2
+echo copying $source to $dest
+
+delete_extra() {
+  # Don't delete symbolic links, since doing so deletes the vital lib link.
+  if [ ! -L "$1" ]
+  then
+    if [ ! -e "$source/$1" ]
+    then
+      echo rm -rf "$dest/$1"
+      rm -rf "$dest/$1"
+    elif [ -d "$1" ]
+    then
+      for f in "$1"/*
+      do
+       delete_extra "$f"
+      done
+    fi
+  fi
+}
+
+copy_if_older() {
+  if [ -d "$1" ] && [ -e "$dest/$1" ]
+  then
+    if [ ! -e "$dest/$1" ]
+    then
+      echo cp -a "$1" "$dest/$1"
+      cp -a "$1" "$dest/$1"
+    else
+      for f in "$1"/*
+      do
+        copy_if_older "$f"
+      done
+    fi
+  elif [ ! -e "$dest/$1" ] || [ "$1" -ot "$dest/$1" ] || [ "$1" -nt "$dest/$1" ]
+  then
+    # dates are different, so either the destination of the source has changed.
+    echo cp -a "$1" "$dest/$1"
+    cp -a "$1" "$dest/$1"
+  fi
+}
+
+if [ -e "$dest" ]
+then
+  echo cd "$dest"
+  cd "$dest"
+  for f in ./*
+  do
+    if [ -e "$f" ]
+    then
+      delete_extra "$f"
+    fi
+  done
+else
+  echo mkdir "$dest"
+  mkdir "$dest"
+fi
+echo cd "$source"
+cd "$source"
+for f in ./*
+do
+  if [ -e "$f" ]
+  then
+    copy_if_older "$f"
+  fi
+done
diff --git a/catapult/telemetry/telemetry/internal/platform/gpu_device.py b/catapult/telemetry/telemetry/internal/platform/gpu_device.py
new file mode 100644
index 0000000..926f927
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/gpu_device.py
@@ -0,0 +1,84 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class GPUDevice(object):
+  """Provides information about an individual GPU device.
+
+     On platforms which support them, the vendor_id and device_id are
+     PCI IDs. On other platforms, the vendor_string and device_string
+     are platform-dependent strings.
+  """
+
+  _VENDOR_ID_MAP = {
+    0x1002: 'ATI',
+    0x8086: 'Intel',
+    0x10de: 'Nvidia',
+    }
+
+  def __init__(self, vendor_id, device_id, vendor_string, device_string):
+    self._vendor_id = vendor_id
+    self._device_id = device_id
+    self._vendor_string = vendor_string
+    self._device_string = device_string
+
+  def __str__(self):
+    vendor = 'VENDOR = 0x%x' % self._vendor_id
+    vendor_string = self._vendor_string
+    if not vendor_string and self._vendor_id in self._VENDOR_ID_MAP:
+      vendor_string = self._VENDOR_ID_MAP[self._vendor_id]
+    if vendor_string:
+      vendor += ' (%s)' % vendor_string
+    device = 'DEVICE = 0x%x' % self._device_id
+    if self._device_string:
+      device += ' (%s)' % self._device_string
+    return '%s, %s' % (vendor, device)
+
+  @classmethod
+  def FromDict(cls, attrs):
+    """Constructs a GPUDevice from a dictionary. Requires the
+       following attributes to be present in the dictionary:
+
+         vendor_id
+         device_id
+         vendor_string
+         device_string
+
+       Raises an exception if any attributes are missing.
+    """
+    return cls(attrs['vendor_id'], attrs['device_id'],
+               attrs['vendor_string'], attrs['device_string'])
+
+  @property
+  def vendor_id(self):
+    """The GPU vendor's PCI ID as a number, or 0 if not available.
+
+       Most desktop machines supply this information rather than the
+       vendor and device strings."""
+    return self._vendor_id
+
+  @property
+  def device_id(self):
+    """The GPU device's PCI ID as a number, or 0 if not available.
+
+       Most desktop machines supply this information rather than the
+       vendor and device strings."""
+    return self._device_id
+
+  @property
+  def vendor_string(self):
+    """The GPU vendor's name as a string, or the empty string if not
+       available.
+
+       Most mobile devices supply this information rather than the PCI
+       IDs."""
+    return self._vendor_string
+
+  @property
+  def device_string(self):
+    """The GPU device's name as a string, or the empty string if not
+       available.
+
+       Most mobile devices supply this information rather than the PCI
+       IDs."""
+    return self._device_string
diff --git a/catapult/telemetry/telemetry/internal/platform/gpu_device_unittest.py b/catapult/telemetry/telemetry/internal/platform/gpu_device_unittest.py
new file mode 100644
index 0000000..d4ba384
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/gpu_device_unittest.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.internal.platform import gpu_device
+
+
+class TestGPUDevice(unittest.TestCase):
+
+  def testConstruction(self):
+    device = gpu_device.GPUDevice(1000, 2000, 'test_vendor', 'test_device')
+    self.assertEquals(device.vendor_id, 1000)
+    self.assertEquals(device.device_id, 2000)
+    self.assertEquals(device.vendor_string, 'test_vendor')
+    self.assertEquals(device.device_string, 'test_device')
+
+  def testFromDict(self):
+    dictionary = {'vendor_id': 3000,
+                  'device_id': 4000,
+                  'vendor_string': 'test_vendor_2',
+                  'device_string': 'test_device_2'}
+    device = gpu_device.GPUDevice.FromDict(dictionary)
+    self.assertEquals(device.vendor_id, 3000)
+    self.assertEquals(device.device_id, 4000)
+    self.assertEquals(device.vendor_string, 'test_vendor_2')
+    self.assertEquals(device.device_string, 'test_device_2')
+
+  def testMissingAttrsFromDict(self):
+    data = {
+        'vendor_id': 1,
+        'device_id': 2,
+        'vendor_string': 'a',
+        'device_string': 'b'
+    }
+
+    for k in data:
+      data_copy = data.copy()
+      del data_copy[k]
+      try:
+        gpu_device.GPUDevice.FromDict(data_copy)
+        self.fail('Should raise exception if attribute "%s" is missing' % k)
+      except AssertionError:
+        raise
+      except KeyError:
+        pass
diff --git a/catapult/telemetry/telemetry/internal/platform/gpu_info.py b/catapult/telemetry/telemetry/internal/platform/gpu_info.py
new file mode 100644
index 0000000..40a327b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/gpu_info.py
@@ -0,0 +1,67 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.platform import gpu_device
+
+
+class GPUInfo(object):
+  """Provides information about the GPUs on the system."""
+
+  def __init__(self, device_array, aux_attributes,
+               feature_status, driver_bug_workarounds):
+    if device_array == None:
+      raise Exception('Missing required "devices" property')
+    if len(device_array) == 0:
+      raise Exception('Missing at least one GPU in device_array')
+
+    self._devices = [gpu_device.GPUDevice.FromDict(d) for d in device_array]
+    self._aux_attributes = aux_attributes
+    self._feature_status = feature_status
+    self._driver_bug_workarounds = driver_bug_workarounds
+
+  @classmethod
+  def FromDict(cls, attrs):
+    """Constructs a GPUInfo from a dictionary of attributes.
+
+    Attributes currently required to be present in the dictionary:
+      devices (array of dictionaries, each of which contains
+          GPUDevice's required attributes)
+    """
+    return cls(attrs['devices'], attrs.get('aux_attributes'),
+               attrs.get('feature_status'),
+               attrs.get('driver_bug_workarounds'))
+
+  @property
+  def devices(self):
+    """An array of GPUDevices. Element 0 is the primary GPU on the system."""
+    return self._devices
+
+  @property
+  def aux_attributes(self):
+    """Returns a dictionary of auxiliary, optional, attributes.
+
+    On the Chrome browser, for example, this dictionary contains:
+      optimus (boolean)
+      amd_switchable (boolean)
+      lenovo_dcute (boolean)
+      driver_vendor (string)
+      driver_version (string)
+      driver_date (string)
+      gl_version_string (string)
+      gl_vendor (string)
+      gl_renderer (string)
+      gl_extensions (string)
+      display_link_version (string)
+    """
+    return self._aux_attributes
+
+  @property
+  def feature_status(self):
+    """Returns an optional dictionary of graphics features and their status."""
+    return self._feature_status
+
+  @property
+  def driver_bug_workarounds(self):
+    """Returns an optional array of driver bug workarounds."""
+    return self._driver_bug_workarounds
diff --git a/catapult/telemetry/telemetry/internal/platform/gpu_info_unittest.py b/catapult/telemetry/telemetry/internal/platform/gpu_info_unittest.py
new file mode 100644
index 0000000..bef95d3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/gpu_info_unittest.py
@@ -0,0 +1,84 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.internal.platform import gpu_device
+from telemetry.internal.platform import gpu_info
+
+
+class TestGPUInfo(unittest.TestCase):
+
+  def testConstruction(self):
+    data = {
+        'devices': [
+            {'vendor_id': 1000, 'device_id': 2000,
+             'vendor_string': 'a', 'device_string': 'b'},
+            {'vendor_id': 3000, 'device_id': 4000,
+             'vendor_string': 'k', 'device_string': 'l'}
+        ],
+        'aux_attributes': {
+            'optimus': False,
+            'amd_switchable': False,
+            'lenovo_dcute': False,
+            'driver_vendor': 'c',
+            'driver_version': 'd',
+            'driver_date': 'e',
+            'gl_version_string': 'g',
+            'gl_vendor': 'h',
+            'gl_renderer': 'i',
+            'gl_extensions': 'j',
+        }
+    }
+    info = gpu_info.GPUInfo.FromDict(data)
+    self.assertTrue(len(info.devices) == 2)
+    self.assertTrue(isinstance(info.devices[0], gpu_device.GPUDevice))
+    self.assertEquals(info.devices[0].vendor_id, 1000)
+    self.assertEquals(info.devices[0].device_id, 2000)
+    self.assertEquals(info.devices[0].vendor_string, 'a')
+    self.assertEquals(info.devices[0].device_string, 'b')
+    self.assertTrue(isinstance(info.devices[1], gpu_device.GPUDevice))
+    self.assertEquals(info.devices[1].vendor_id, 3000)
+    self.assertEquals(info.devices[1].device_id, 4000)
+    self.assertEquals(info.devices[1].vendor_string, 'k')
+    self.assertEquals(info.devices[1].device_string, 'l')
+    self.assertEquals(info.aux_attributes['optimus'], False)
+    self.assertEquals(info.aux_attributes['amd_switchable'], False)
+    self.assertEquals(info.aux_attributes['lenovo_dcute'], False)
+    self.assertEquals(info.aux_attributes['driver_vendor'], 'c')
+    self.assertEquals(info.aux_attributes['driver_version'], 'd')
+    self.assertEquals(info.aux_attributes['driver_date'], 'e')
+    self.assertEquals(info.aux_attributes['gl_version_string'], 'g')
+    self.assertEquals(info.aux_attributes['gl_vendor'], 'h')
+    self.assertEquals(info.aux_attributes['gl_renderer'], 'i')
+    self.assertEquals(info.aux_attributes['gl_extensions'], 'j')
+
+  def testMissingAttrsFromDict(self):
+    data = {
+        'devices': [{'vendor_id': 1000, 'device_id': 2000,
+                     'vendor_string': 'a', 'device_string': 'b'}]
+    }
+
+    for k in data:
+      data_copy = data.copy()
+      del data_copy[k]
+      try:
+        gpu_info.GPUInfo.FromDict(data_copy)
+        self.fail('Should raise exception if attribute "%s" is missing' % k)
+      except AssertionError:
+        raise
+      except KeyError:
+        pass
+
+  def testMissingDevices(self):
+    data = {
+        'devices': []
+    }
+
+    try:
+      gpu_info.GPUInfo.FromDict(data)
+      self.fail('Should raise exception if devices array is empty')
+    except AssertionError:
+      raise
+    except Exception:
+      pass
diff --git a/catapult/telemetry/telemetry/internal/platform/ios_device.py b/catapult/telemetry/telemetry/internal/platform/ios_device.py
new file mode 100644
index 0000000..fac0280
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/ios_device.py
@@ -0,0 +1,72 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import logging
+import re
+import subprocess
+
+from telemetry.core import platform
+from telemetry.internal.platform import device
+
+
+IOSSIM_BUILD_DIRECTORIES = [
+    'Debug-iphonesimulator',
+    'Profile-iphonesimulator',
+    'Release-iphonesimulator'
+]
+
+class IOSDevice(device.Device):
+  def __init__(self):
+    super(IOSDevice, self).__init__(name='ios', guid='ios')
+
+  @classmethod
+  def GetAllConnectedDevices(cls, blacklist):
+    return []
+
+
+def _IsIosDeviceAttached():
+  devices = subprocess.check_output('system_profiler SPUSBDataType', shell=True)
+  for line in devices.split('\n'):
+    if line and re.match(r'\s*(iPod|iPhone|iPad):', line):
+      return True
+  return False
+
+def _IsIosSimulatorAvailable(chrome_root):
+  """Determines whether an iOS simulator is present in the local checkout.
+
+  Assumes the iOS simulator (iossim) and Chromium have already been built.
+
+  Returns:
+    True if at least one simulator is found, otherwise False.
+  """
+  for build_dir in IOSSIM_BUILD_DIRECTORIES:
+    iossim_path = os.path.join(
+        chrome_root, 'out', build_dir, 'iossim')
+    chromium_path = os.path.join(
+        chrome_root, 'out', build_dir, 'Chromium.app')
+
+    # If the iOS simulator and Chromium app are present, return True
+    if os.path.exists(iossim_path) and os.path.exists(chromium_path):
+      return True
+
+  return False
+
+def FindAllAvailableDevices(options):
+  """Returns a list of available devices.
+  """
+  # TODO(baxley): Add support for all platforms possible. Probably Linux,
+  # probably not Windows.
+  if platform.GetHostPlatform().GetOSName() != 'mac':
+    return []
+
+  if options.chrome_root is None:
+    logging.warning('--chrome-root is not specified, skip iOS simulator tests.')
+    return []
+
+  if (not _IsIosDeviceAttached() and not
+      _IsIosSimulatorAvailable(options.chrome_root)):
+    return []
+
+  return [IOSDevice()]
diff --git a/catapult/telemetry/telemetry/internal/platform/ios_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/ios_platform_backend.py
new file mode 100644
index 0000000..1221422
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/ios_platform_backend.py
@@ -0,0 +1,62 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.internal.platform import posix_platform_backend
+
+#TODO(baxley): Put in real values.
+class IosPlatformBackend(posix_platform_backend.PosixPlatformBackend):
+  def __init__(self):
+    super(IosPlatformBackend, self).__init__()
+
+  def GetOSName(self):
+    # TODO(baxley): Get value from ideviceinfo.
+    logging.warn('Not implemented')
+    return 'ios'
+
+  def GetOSVersionName(self):
+    # TODO(baxley): Get value from ideviceinfo.
+    logging.warn('Not implemented')
+    return '7.1'
+
+  def SetFullPerformanceModeEnabled(self, enabled):
+    logging.warn('Not implemented')
+    return
+
+  def FlushDnsCache(self):
+    logging.warn('Not implemented')
+    return
+
+  def CanMonitorThermalThrottling(self):
+    logging.warn('Not implemented')
+    return False
+
+  def CanMonitorPower(self):
+    logging.warn('Not implemented')
+    return False
+
+  def StartMonitoringPower(self, browser):
+    raise NotImplementedError()
+
+  def StopMonitoringPower(self):
+    raise NotImplementedError()
+
+  def FlushEntireSystemCache(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def StopVideoCapture(self):
+    raise NotImplementedError()
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def GetSystemTotalPhysicalMemory(self):
+    raise NotImplementedError()
+
+  def InstallApplication(self, application):
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py
new file mode 100644
index 0000000..5a65d8c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py
@@ -0,0 +1,171 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+try:
+  import resource  # pylint: disable=import-error
+except ImportError:
+  resource = None  # Not available on all platforms
+
+import re
+
+from telemetry.core import exceptions
+from telemetry import decorators
+from telemetry.internal.platform import platform_backend
+
+
+class LinuxBasedPlatformBackend(platform_backend.PlatformBackend):
+
+  """Abstract platform containing functionality shared by all Linux based OSes.
+
+  This includes Android and ChromeOS.
+
+  Subclasses must implement RunCommand, GetFileContents, GetPsOutput, and
+  ParseCStateSample."""
+
+  # Get the commit charge in kB.
+  def GetSystemCommitCharge(self):
+    meminfo_contents = self.GetFileContents('/proc/meminfo')
+    meminfo = self._GetProcFileDict(meminfo_contents)
+    if not meminfo:
+      return None
+    return (self._ConvertToKb(meminfo['MemTotal'])
+            - self._ConvertToKb(meminfo['MemFree'])
+            - self._ConvertToKb(meminfo['Buffers'])
+            - self._ConvertToKb(meminfo['Cached']))
+
+  @decorators.Cache
+  def GetSystemTotalPhysicalMemory(self):
+    meminfo_contents = self.GetFileContents('/proc/meminfo')
+    meminfo = self._GetProcFileDict(meminfo_contents)
+    if not meminfo:
+      return None
+    return self._ConvertToBytes(meminfo['MemTotal'])
+
+  def GetCpuStats(self, pid):
+    results = {}
+    stats = self._GetProcFileForPid(pid, 'stat')
+    if not stats:
+      return results
+    stats = stats.split()
+    utime = float(stats[13])
+    stime = float(stats[14])
+    cpu_process_jiffies = utime + stime
+    clock_ticks = self.GetClockTicks()
+    results.update({'CpuProcessTime': cpu_process_jiffies / clock_ticks})
+    return results
+
+  def GetCpuTimestamp(self):
+    total_jiffies = self._GetProcJiffies()
+    clock_ticks = self.GetClockTicks()
+    return {'TotalTime': total_jiffies / clock_ticks}
+
+  def GetMemoryStats(self, pid):
+    status_contents = self._GetProcFileForPid(pid, 'status')
+    stats = self._GetProcFileForPid(pid, 'stat').split()
+    status = self._GetProcFileDict(status_contents)
+    if not status or not stats or 'Z' in status['State']:
+      return {}
+    vm = int(stats[22])
+    vm_peak = (self._ConvertToBytes(status['VmPeak'])
+               if 'VmPeak' in status else vm)
+    wss = int(stats[23]) * resource.getpagesize()
+    wss_peak = (self._ConvertToBytes(status['VmHWM'])
+                if 'VmHWM' in status else wss)
+
+    private_dirty_bytes = 0
+    for line in self._GetProcFileForPid(pid, 'smaps').splitlines():
+      if line.startswith('Private_Dirty:'):
+        private_dirty_bytes += self._ConvertToBytes(line.split(':')[1].strip())
+
+    return {'VM': vm,
+            'VMPeak': vm_peak,
+            'PrivateDirty': private_dirty_bytes,
+            'WorkingSetSize': wss,
+            'WorkingSetSizePeak': wss_peak}
+
+  @decorators.Cache
+  def GetClockTicks(self):
+    """Returns the number of clock ticks per second.
+
+    The proper way is to call os.sysconf('SC_CLK_TCK') but that is not easy to
+    do on Android/CrOS. In practice, nearly all Linux machines have a USER_HZ
+    of 100, so just return that.
+    """
+    return 100
+
+  def GetFileContents(self, filename):
+    raise NotImplementedError()
+
+  def GetPsOutput(self, columns, pid=None):
+    raise NotImplementedError()
+
+  def RunCommand(self, cmd):
+    """Runs the specified command.
+
+    Args:
+        cmd: A list of program arguments or the path string of the program.
+    Returns:
+        A string whose content is the output of the command.
+    """
+    raise NotImplementedError()
+
+  @staticmethod
+  def ParseCStateSample(sample):
+    """Parse a single c-state residency sample.
+
+    Args:
+        sample: A sample of c-state residency times to be parsed. Organized as
+            a dictionary mapping CPU name to a string containing all c-state
+            names, the times in each state, the latency of each state, and the
+            time at which the sample was taken all separated by newlines.
+            Ex: {'cpu0': 'C0\nC1\n5000\n2000\n20\n30\n1406673171'}
+
+    Returns:
+        Dictionary associating a c-state with a time.
+    """
+    raise NotImplementedError()
+
+  def _IsPidAlive(self, pid):
+    assert pid, 'pid is required'
+    return bool(self.GetPsOutput(['pid'], pid) == str(pid))
+
+  def _GetProcFileForPid(self, pid, filename):
+    try:
+      return self.GetFileContents('/proc/%s/%s' % (pid, filename))
+    except IOError:
+      if not self._IsPidAlive(pid):
+        raise exceptions.ProcessGoneException()
+      raise
+
+  def _ConvertToKb(self, value):
+    return int(value.replace('kB', ''))
+
+  def _ConvertToBytes(self, value):
+    return self._ConvertToKb(value) * 1024
+
+  def _GetProcFileDict(self, contents):
+    retval = {}
+    for line in contents.splitlines():
+      key, value = line.split(':')
+      retval[key.strip()] = value.strip()
+    return retval
+
+  def _GetProcJiffies(self):
+    """Parse '/proc/timer_list' output and returns the first jiffies attribute.
+
+    Multi-CPU machines will have multiple 'jiffies:' lines, all of which will be
+    essentially the same.  Return the first one."""
+    jiffies_timer_lines = self.RunCommand(
+        ['grep', 'jiffies', '/proc/timer_list'])
+    if not jiffies_timer_lines:
+      raise Exception('Unable to find jiffies from /proc/timer_list')
+    jiffies_timer_list = jiffies_timer_lines.splitlines()
+    # Each line should look something like 'jiffies: 4315883489'.
+    for line in jiffies_timer_list:
+      match = re.match(r'\s*jiffies\s*:\s*(\d+)', line)
+      if match:
+        value = match.group(1)
+        return float(value)
+    raise Exception('Unable to parse jiffies attribute: %s' %
+                    repr(jiffies_timer_lines))
diff --git a/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend_unittest.py
new file mode 100644
index 0000000..b7bc8cf
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend_unittest.py
@@ -0,0 +1,121 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import unittest
+
+from telemetry.core import util
+from telemetry.internal.platform import linux_based_platform_backend
+import mock
+
+
+class TestBackend(linux_based_platform_backend.LinuxBasedPlatformBackend):
+
+  # pylint: disable=abstract-method
+
+  def __init__(self):
+    super(TestBackend, self).__init__()
+    self._mock_files = {}
+
+  def SetMockFile(self, filename, output):
+    self._mock_files[filename] = output
+
+  def GetFileContents(self, filename):
+    return self._mock_files[filename]
+
+  def GetClockTicks(self):
+    return 41
+
+
+class LinuxBasedPlatformBackendTest(unittest.TestCase):
+
+  def SetMockFileInBackend(self, backend, real_file, mock_file):
+    with open(os.path.join(util.GetUnittestDataDir(), real_file)) as f:
+      backend.SetMockFile(mock_file, f.read())
+
+  def testGetSystemCommitCharge(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+
+    backend = TestBackend()
+    self.SetMockFileInBackend(backend, 'proc_meminfo', '/proc/meminfo')
+    result = backend.GetSystemCommitCharge()
+    # 25252140 == MemTotal - MemFree - Buffers - Cached (in kB)
+    self.assertEquals(result, 25252140)
+
+  def testGetSystemTotalPhysicalMemory(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+
+    backend = TestBackend()
+    self.SetMockFileInBackend(backend, 'proc_meminfo', '/proc/meminfo')
+    result = backend.GetSystemTotalPhysicalMemory()
+    # 67479191552 == MemTotal * 1024
+    self.assertEquals(result, 67479191552)
+
+  def testGetCpuStatsBasic(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+
+    backend = TestBackend()
+    self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
+    result = backend.GetCpuStats(1)
+    self.assertEquals(result, {'CpuProcessTime': 22.0})
+
+  def testGetCpuTimestampBasic(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+    jiffies_grep_string = """
+    jiffies
+jiffies  a1111
+    .last_jiffies   : 4307239958
+    .next_jiffies   : 4307239968
+    jiffies: 10505463300
+    jiffies: 10505463333
+    """
+    with mock.patch.object(
+        linux_based_platform_backend.LinuxBasedPlatformBackend,
+        'RunCommand', return_value=jiffies_grep_string) as mock_method:
+      backend = linux_based_platform_backend.LinuxBasedPlatformBackend()
+      result = backend.GetCpuTimestamp()
+      self.assertEquals(result, {'TotalTime': 105054633.0})
+    mock_method.assert_call_once_with(
+        ['grep', '-m', '1', 'jiffies:', '/proc/timer_list'])
+
+  def testGetMemoryStatsBasic(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+
+    backend = TestBackend()
+    self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
+    self.SetMockFileInBackend(backend, 'status', '/proc/1/status')
+    self.SetMockFileInBackend(backend, 'smaps', '/proc/1/smaps')
+    result = backend.GetMemoryStats(1)
+    self.assertEquals(result, {'PrivateDirty': 5324800,
+                               'VM': 1025978368,
+                               'VMPeak': 1050099712,
+                               'WorkingSetSize': 84000768,
+                               'WorkingSetSizePeak': 144547840})
+
+  def testGetMemoryStatsNoHWM(self):
+    if not linux_based_platform_backend.resource:
+      logging.warning('Test not supported')
+      return
+
+    backend = TestBackend()
+    self.SetMockFileInBackend(backend, 'stat', '/proc/1/stat')
+    self.SetMockFileInBackend(backend, 'status_nohwm', '/proc/1/status')
+    self.SetMockFileInBackend(backend, 'smaps', '/proc/1/smaps')
+    result = backend.GetMemoryStats(1)
+    self.assertEquals(result, {'PrivateDirty': 5324800,
+                               'VM': 1025978368,
+                               'VMPeak': 1025978368,
+                               'WorkingSetSize': 84000768,
+                               'WorkingSetSizePeak': 84000768})
diff --git a/catapult/telemetry/telemetry/internal/platform/linux_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/linux_platform_backend.py
new file mode 100644
index 0000000..9ec293a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/linux_platform_backend.py
@@ -0,0 +1,172 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import platform
+import subprocess
+import sys
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import os_version
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.platform import linux_based_platform_backend
+from telemetry.internal.platform import posix_platform_backend
+from telemetry.internal.platform.power_monitor import msr_power_monitor
+
+
+_POSSIBLE_PERFHOST_APPLICATIONS = [
+  'perfhost_precise',
+  'perfhost_trusty',
+]
+
+
+class LinuxPlatformBackend(
+    posix_platform_backend.PosixPlatformBackend,
+    linux_based_platform_backend.LinuxBasedPlatformBackend):
+  def __init__(self):
+    super(LinuxPlatformBackend, self).__init__()
+    self._power_monitor = msr_power_monitor.MsrPowerMonitorLinux(self)
+
+  @classmethod
+  def IsPlatformBackendForHost(cls):
+    return sys.platform.startswith('linux') and not util.IsRunningOnCrosDevice()
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  @decorators.Cache
+  def GetArchName(self):
+    return platform.machine()
+
+  def GetOSName(self):
+    return 'linux'
+
+  @decorators.Cache
+  def GetOSVersionName(self):
+    if not os.path.exists('/etc/lsb-release'):
+      raise NotImplementedError('Unknown Linux OS version')
+
+    codename = None
+    version = None
+    for line in self.GetFileContents('/etc/lsb-release').splitlines():
+      key, _, value = line.partition('=')
+      if key == 'DISTRIB_CODENAME':
+        codename = value.strip()
+      elif key == 'DISTRIB_RELEASE':
+        try:
+          version = float(value)
+        except ValueError:
+          version = 0
+      if codename and version:
+        break
+    return os_version.OSVersion(codename, version)
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    return True
+
+  def FlushEntireSystemCache(self):
+    p = subprocess.Popen(['/sbin/sysctl', '-w', 'vm.drop_caches=3'])
+    p.wait()
+    assert p.returncode == 0, 'Failed to flush system cache'
+
+  def CanLaunchApplication(self, application):
+    if application == 'ipfw' and not self._IsIpfwKernelModuleInstalled():
+      return False
+    return super(LinuxPlatformBackend, self).CanLaunchApplication(application)
+
+  def InstallApplication(self, application):
+    if application == 'ipfw':
+      self._InstallIpfw()
+    elif application == 'avconv':
+      self._InstallBinary(application, fallback_package='libav-tools')
+    elif application in _POSSIBLE_PERFHOST_APPLICATIONS:
+      self._InstallBinary(application)
+    else:
+      raise NotImplementedError(
+          'Please teach Telemetry how to install ' + application)
+
+  def CanMonitorPower(self):
+    return self._power_monitor.CanMonitorPower()
+
+  def CanMeasurePerApplicationPower(self):
+    return self._power_monitor.CanMeasurePerApplicationPower()
+
+  def StartMonitoringPower(self, browser):
+    self._power_monitor.StartMonitoringPower(browser)
+
+  def StopMonitoringPower(self):
+    return self._power_monitor.StopMonitoringPower()
+
+  def ReadMsr(self, msr_number, start=0, length=64):
+    cmd = ['rdmsr', '-d', str(msr_number)]
+    (out, err) = subprocess.Popen(cmd,
+                                  stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE).communicate()
+    if err:
+      raise OSError(err)
+    try:
+      result = int(out)
+    except ValueError:
+      raise OSError('Cannot interpret rdmsr output: %s' % out)
+    return result >> start & ((1 << length) - 1)
+
+  def _IsIpfwKernelModuleInstalled(self):
+    return 'ipfw_mod' in subprocess.Popen(
+        ['lsmod'], stdout=subprocess.PIPE).communicate()[0]
+
+  def _InstallIpfw(self):
+    ipfw_bin = binary_manager.FindPath(
+        'ipfw', self.GetArchName(), self.GetOSName())
+    ipfw_mod = binary_manager.FindPath(
+        'ipfw_mod.ko', self.GetArchName(), self.GetOSName())
+
+    try:
+      changed = cloud_storage.GetIfChanged(
+          ipfw_bin, cloud_storage.INTERNAL_BUCKET)
+      changed |= cloud_storage.GetIfChanged(
+          ipfw_mod, cloud_storage.INTERNAL_BUCKET)
+    except cloud_storage.CloudStorageError, e:
+      logging.error(str(e))
+      logging.error('You may proceed by manually building and installing'
+                    'dummynet for your kernel. See: '
+                    'http://info.iet.unipi.it/~luigi/dummynet/')
+      sys.exit(1)
+
+    if changed or not self.CanLaunchApplication('ipfw'):
+      if not self._IsIpfwKernelModuleInstalled():
+        subprocess.check_call(['/usr/bin/sudo', 'insmod', ipfw_mod])
+      os.chmod(ipfw_bin, 0755)
+      subprocess.check_call(
+          ['/usr/bin/sudo', 'cp', ipfw_bin, '/usr/local/sbin'])
+
+    assert self.CanLaunchApplication('ipfw'), 'Failed to install ipfw. ' \
+        'ipfw provided binaries are not supported for linux kernel < 3.13. ' \
+        'You may proceed by manually building and installing dummynet for ' \
+        'your kernel. See: http://info.iet.unipi.it/~luigi/dummynet/'
+
+  def _InstallBinary(self, bin_name, fallback_package=None):
+    bin_path = binary_manager.FetchPath(
+        bin_name, self.GetArchName(), self.GetOSName())
+    if not bin_path:
+      raise Exception('Could not find the binary package %s' % bin_name)
+    os.environ['PATH'] += os.pathsep + os.path.dirname(bin_path)
+
+    try:
+      cloud_storage.GetIfChanged(bin_path, cloud_storage.INTERNAL_BUCKET)
+      os.chmod(bin_path, 0755)
+    except cloud_storage.CloudStorageError, e:
+      logging.error(str(e))
+      if fallback_package:
+        raise Exception('You may proceed by manually installing %s via:\n'
+                        'sudo apt-get install %s' %
+                        (bin_name, fallback_package))
+
+    assert self.CanLaunchApplication(bin_name), 'Failed to install ' + bin_name
diff --git a/catapult/telemetry/telemetry/internal/platform/linux_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/linux_platform_backend_unittest.py
new file mode 100644
index 0000000..052d59f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/linux_platform_backend_unittest.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import decorators
+from telemetry.core import util
+from telemetry.internal.platform import linux_platform_backend
+import mock
+
+
+class LinuxPlatformBackendTest(unittest.TestCase):
+  @decorators.Enabled('linux')
+  def testGetOSVersionNameSaucy(self):
+    path = os.path.join(util.GetUnittestDataDir(), 'ubuntu-saucy-lsb-release')
+    with open(path) as f:
+      unbuntu_saucy_lsb_release_content = f.read()
+
+    with mock.patch.object(
+        linux_platform_backend.LinuxPlatformBackend, 'GetFileContents',
+        return_value=unbuntu_saucy_lsb_release_content) as mock_method:
+      backend = linux_platform_backend.LinuxPlatformBackend()
+      self.assertEqual(backend.GetOSVersionName(), 'saucy')
+      mock_method.assert_called_once_with('/etc/lsb-release')
+
+  @decorators.Enabled('linux')
+  def testGetOSVersionNameArch(self):
+    path = os.path.join(util.GetUnittestDataDir(), 'arch-lsb-release')
+    with open(path) as f:
+      arch_lsb_release_content = f.read()
+
+    with mock.patch.object(
+        linux_platform_backend.LinuxPlatformBackend, 'GetFileContents',
+        return_value=arch_lsb_release_content) as mock_method:
+      backend = linux_platform_backend.LinuxPlatformBackend()
+      # a distribution may not have a codename or a release number. We just
+      # check that GetOSVersionName doesn't raise an exception
+      backend.GetOSVersionName()
+      mock_method.assert_called_once_with('/etc/lsb-release')
diff --git a/catapult/telemetry/telemetry/internal/platform/mac_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/mac_platform_backend.py
new file mode 100644
index 0000000..fa76469
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/mac_platform_backend.py
@@ -0,0 +1,180 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import ctypes
+import os
+import platform
+import subprocess
+import sys
+import time
+
+from telemetry.core import os_version as os_version_module
+from telemetry import decorators
+from telemetry.internal.platform import posix_platform_backend
+from telemetry.internal.platform.power_monitor import powermetrics_power_monitor
+from telemetry.util import process_statistic_timeline_data
+
+try:
+  import resource  # pylint: disable=import-error
+except ImportError:
+  resource = None  # Not available on all platforms
+
+
+
+class MacPlatformBackend(posix_platform_backend.PosixPlatformBackend):
+  def __init__(self):
+    super(MacPlatformBackend, self).__init__()
+    self.libproc = None
+    self._power_monitor = powermetrics_power_monitor.PowerMetricsPowerMonitor(
+        self)
+
+  @classmethod
+  def IsPlatformBackendForHost(cls):
+    return sys.platform == 'darwin'
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def _GetIdleWakeupCount(self, pid):
+    top_output = self._GetTopOutput(pid, ['idlew'])
+
+    # Sometimes top won't return anything here, just ignore such cases -
+    # crbug.com/354812 .
+    if top_output[-2] != 'IDLEW':
+      return process_statistic_timeline_data.IdleWakeupTimelineData(pid, 0)
+    # Numbers reported by top may have a '+' appended.
+    wakeup_count = int(top_output[-1].strip('+ '))
+    return process_statistic_timeline_data.IdleWakeupTimelineData(pid,
+        wakeup_count)
+
+  def GetCpuStats(self, pid):
+    """Returns a dict of cpu statistics for the process represented by |pid|."""
+    class ProcTaskInfo(ctypes.Structure):
+      """Struct for proc_pidinfo() call."""
+      _fields_ = [("pti_virtual_size", ctypes.c_uint64),
+                  ("pti_resident_size", ctypes.c_uint64),
+                  ("pti_total_user", ctypes.c_uint64),
+                  ("pti_total_system", ctypes.c_uint64),
+                  ("pti_threads_user", ctypes.c_uint64),
+                  ("pti_threads_system", ctypes.c_uint64),
+                  ("pti_policy", ctypes.c_int32),
+                  ("pti_faults", ctypes.c_int32),
+                  ("pti_pageins", ctypes.c_int32),
+                  ("pti_cow_faults", ctypes.c_int32),
+                  ("pti_messages_sent", ctypes.c_int32),
+                  ("pti_messages_received", ctypes.c_int32),
+                  ("pti_syscalls_mach", ctypes.c_int32),
+                  ("pti_syscalls_unix", ctypes.c_int32),
+                  ("pti_csw", ctypes.c_int32),
+                  ("pti_threadnum", ctypes.c_int32),
+                  ("pti_numrunning", ctypes.c_int32),
+                  ("pti_priority", ctypes.c_int32)]
+      PROC_PIDTASKINFO = 4
+      def __init__(self):
+        self.size = ctypes.sizeof(self)
+        super(ProcTaskInfo, self).__init__()  # pylint: disable=bad-super-call
+
+    proc_info = ProcTaskInfo()
+    if not self.libproc:
+      self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))
+    self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,
+                              ctypes.byref(proc_info), proc_info.size)
+
+    # Convert nanoseconds to seconds.
+    cpu_time = (proc_info.pti_total_user / 1000000000.0 +
+                proc_info.pti_total_system / 1000000000.0)
+    results = {'CpuProcessTime': cpu_time,
+               'ContextSwitches': proc_info.pti_csw}
+
+    # top only reports idle wakeup count starting from OS X 10.9.
+    if self.GetOSVersionName() >= os_version_module.MAVERICKS:
+      results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})
+    return results
+
+  def GetCpuTimestamp(self):
+    """Return current timestamp in seconds."""
+    return {'TotalTime': time.time()}
+
+  def GetSystemCommitCharge(self):
+    vm_stat = self.RunCommand(['vm_stat'])
+    for stat in vm_stat.splitlines():
+      key, value = stat.split(':')
+      if key == 'Pages active':
+        pages_active = int(value.strip()[:-1])  # Strip trailing '.'
+        return pages_active * resource.getpagesize() / 1024
+    return 0
+
+  @decorators.Cache
+  def GetSystemTotalPhysicalMemory(self):
+    return int(self.RunCommand(['sysctl', '-n', 'hw.memsize']))
+
+  def PurgeUnpinnedMemory(self):
+    # TODO(pliard): Implement this.
+    pass
+
+  def GetMemoryStats(self, pid):
+    rss_vsz = self.GetPsOutput(['rss', 'vsz'], pid)
+    if rss_vsz:
+      rss, vsz = rss_vsz[0].split()
+      return {'VM': 1024 * int(vsz),
+              'WorkingSetSize': 1024 * int(rss)}
+    return {}
+
+  @decorators.Cache
+  def GetArchName(self):
+    return platform.machine()
+
+  def GetOSName(self):
+    return 'mac'
+
+  @decorators.Cache
+  def GetOSVersionName(self):
+    os_version = os.uname()[2]
+
+    if os_version.startswith('9.'):
+      return os_version_module.LEOPARD
+    if os_version.startswith('10.'):
+      return os_version_module.SNOWLEOPARD
+    if os_version.startswith('11.'):
+      return os_version_module.LION
+    if os_version.startswith('12.'):
+      return os_version_module.MOUNTAINLION
+    if os_version.startswith('13.'):
+      return os_version_module.MAVERICKS
+    if os_version.startswith('14.'):
+      return os_version_module.YOSEMITE
+    if os_version.startswith('15.'):
+      return os_version_module.ELCAPITAN
+
+    raise NotImplementedError('Unknown mac version %s.' % os_version)
+
+  def CanTakeScreenshot(self):
+    return True
+
+  def TakeScreenshot(self, file_path):
+    return subprocess.call(['screencapture', file_path])
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    return False
+
+  def FlushEntireSystemCache(self):
+    mavericks_or_later = self.GetOSVersionName() >= os_version_module.MAVERICKS
+    p = self.LaunchApplication('purge', elevate_privilege=mavericks_or_later)
+    p.communicate()
+    assert p.returncode == 0, 'Failed to flush system cache'
+
+  def CanMonitorPower(self):
+    return self._power_monitor.CanMonitorPower()
+
+  def CanMeasurePerApplicationPower(self):
+    return self._power_monitor.CanMeasurePerApplicationPower()
+
+  def StartMonitoringPower(self, browser):
+    self._power_monitor.StartMonitoringPower(browser)
+
+  def StopMonitoringPower(self):
+    return self._power_monitor.StopMonitoringPower()
diff --git a/catapult/telemetry/telemetry/internal/platform/mac_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/mac_platform_backend_unittest.py
new file mode 100644
index 0000000..c4195dd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/mac_platform_backend_unittest.py
@@ -0,0 +1,35 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.core import platform as platform_module
+from telemetry.core import os_version
+from telemetry import decorators
+
+
+class MacPlatformBackendTest(unittest.TestCase):
+  def testVersionCamparison(self):
+    self.assertGreater(os_version.YOSEMITE, os_version.MAVERICKS)
+    self.assertGreater(os_version.MAVERICKS, os_version.SNOWLEOPARD)
+    self.assertGreater(os_version.LION, os_version.LEOPARD)
+    self.assertEqual(os_version.YOSEMITE, 'yosemite')
+    self.assertEqual(os_version.MAVERICKS, 'mavericks')
+    self.assertEqual('%s2' % os_version.MAVERICKS, 'mavericks2')
+    self.assertEqual(''.join([os_version.MAVERICKS, '2']),
+                     'mavericks2')
+    self.assertEqual(os_version.LION.upper(), 'LION')
+
+  @decorators.Enabled('mac')
+  def testGetCPUStats(self):
+    platform = platform_module.GetHostPlatform()
+
+    backend = platform._platform_backend # pylint: disable=protected-access
+
+    cpu_stats = backend.GetCpuStats(os.getpid())
+    self.assertGreater(cpu_stats['CpuProcessTime'], 0)
+    self.assertTrue(cpu_stats.has_key('ContextSwitches'))
+    if backend.GetOSVersionName() >= os_version.MAVERICKS:
+      self.assertTrue(cpu_stats.has_key('IdleWakeupCount'))
diff --git a/catapult/telemetry/telemetry/internal/platform/msr_server_win.py b/catapult/telemetry/telemetry/internal/platform/msr_server_win.py
new file mode 100644
index 0000000..087407b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/msr_server_win.py
@@ -0,0 +1,114 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A server that serves MSR values over TCP. Takes a port as its sole parameter.
+
+The reference client for this server is msr_power_monitor.MsrPowerMonitor.
+
+Must be run as Administrator. We use TCP instead of named pipes or another IPC
+to avoid dealing with the pipe security mechanisms. We take the port as a
+parameter instead of choosing one, because it's hard to communicate the port
+number across integrity levels.
+
+Requires WinRing0 to be installed in the Python directory.
+msr_power_monitor.MsrPowerMonitor does this if needed.
+"""
+
+import argparse
+import ctypes
+import os
+import SocketServer
+import struct
+import sys
+try:
+  import win32api  # pylint: disable=import-error
+  import win32file  # pylint: disable=import-error
+except ImportError:
+  win32api = None
+  win32file = None
+
+
+WINRING0_STATUS_MESSAGES = (
+    'No error',
+    'Unsupported platform',
+    'Driver not loaded. You may need to run as Administrator',
+    'Driver not found',
+    'Driver unloaded by other process',
+    'Driver not loaded because of executing on Network Drive',
+    'Unknown error',
+)
+
+
+# The DLL initialization is global, so put it in a global variable.
+_winring0 = None
+
+
+class WinRing0Error(OSError):
+  pass
+
+
+def _WinRing0Path():
+  python_is_64_bit = sys.maxsize > 2 ** 32
+  dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
+  return os.path.join(os.path.dirname(sys.executable), dll_file_name)
+
+
+def _Initialize():
+  global _winring0
+  if not _winring0:
+    winring0 = ctypes.WinDLL(_WinRing0Path())
+    if not winring0.InitializeOls():
+      winring0_status = winring0.GetDllStatus()
+      raise WinRing0Error(winring0_status,
+                          'Unable to initialize WinRing0: %s' %
+                          WINRING0_STATUS_MESSAGES[winring0_status])
+    _winring0 = winring0
+
+
+def _Deinitialize():
+  global _winring0
+  if _winring0:
+    _winring0.DeinitializeOls()
+    _winring0 = None
+
+
+def _ReadMsr(msr_number):
+  low = ctypes.c_uint()
+  high = ctypes.c_uint()
+  _winring0.Rdmsr(ctypes.c_uint(msr_number),
+                  ctypes.byref(low), ctypes.byref(high))
+  return high.value << 32 | low.value
+
+
+class MsrRequestHandler(SocketServer.StreamRequestHandler):
+  def handle(self):
+    msr_number = struct.unpack('I', self.rfile.read(4))[0]
+    self.wfile.write(struct.pack('Q', _ReadMsr(msr_number)))
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('pipe_name', type=str)
+  args = parser.parse_args()
+
+  _Initialize()
+  try:
+    SocketServer.TCPServer.allow_reuse_address = True
+    server_address = ('127.0.0.1', 0)
+    server = SocketServer.TCPServer(server_address, MsrRequestHandler)
+    handle = win32file.CreateFile(args.pipe_name,
+                                  win32file.GENERIC_WRITE,
+                                  0, None,
+                                  win32file.OPEN_EXISTING,
+                                  0, None)
+    _, port = server.server_address
+    win32file.WriteFile(handle, str(port))
+    win32api.CloseHandle(handle)
+    server.serve_forever()
+  finally:
+    _Deinitialize()
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/telemetry/telemetry/internal/platform/network_controller_backend.py b/catapult/telemetry/telemetry/internal/platform/network_controller_backend.py
new file mode 100644
index 0000000..7ab269e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/network_controller_backend.py
@@ -0,0 +1,248 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import shutil
+import tempfile
+
+from telemetry.internal import forwarders
+from telemetry.internal.util import webpagereplay
+from telemetry.util import wpr_modes
+
+import certutils
+import platformsettings
+
+
+class ArchiveDoesNotExistError(Exception):
+  """Raised when the archive path does not exist for replay mode."""
+  pass
+
+
+class ReplayAndBrowserPortsError(Exception):
+  """Raised an existing browser would get different remote replay ports."""
+  pass
+
+
+class NetworkControllerBackend(object):
+  """Control network settings and servers to simulate the Web.
+
+  Network changes include forwarding device ports to host platform ports.
+  Web Page Replay is used to record and replay HTTP/HTTPS responses.
+  """
+
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+    self._wpr_mode = None
+    self._extra_wpr_args = None
+    self._wpr_port_pairs = None
+    self._archive_path = None
+    self._make_javascript_deterministic = None
+    self._forwarder = None
+    self._wpr_ca_cert_path = None
+    self._wpr_server = None
+
+  @property
+  def is_open(self):
+    return self._wpr_mode is not None
+
+  @property
+  def is_replay_active(self):
+    return self._forwarder is not None
+
+  @property
+  def host_ip(self):
+    return self._platform_backend.forwarder_factory.host_ip
+
+  @property
+  def wpr_device_ports(self):
+    try:
+      return self._forwarder.port_pairs.remote_ports
+    except AttributeError:
+      return None
+
+  @property
+  def is_test_ca_installed(self):
+    return self._wpr_ca_cert_path is not None
+
+  def Open(self, wpr_mode, extra_wpr_args):
+    """Configure and prepare target platform for network control.
+
+    This may, e.g., install test certificates and perform any needed setup
+    on the target platform.
+
+    After network interactions are over, clients should call the Close method.
+
+    Args:
+      wpr_mode: a mode for web page replay; available modes are
+          wpr_modes.WPR_OFF, wpr_modes.APPEND, wpr_modes.WPR_REPLAY, or
+          wpr_modes.WPR_RECORD.
+      extra_wpr_args: an list of extra arguments for web page replay.
+    """
+    assert not self.is_open, 'Network controller is already open'
+    self._wpr_mode = wpr_mode
+    self._extra_wpr_args = extra_wpr_args
+    self._wpr_port_pairs = self._platform_backend.GetWprPortPairs()
+    self._InstallTestCa()
+
+  def Close(self):
+    """Undo changes in the target platform used for network control.
+
+    Implicitly stops replay if currently active.
+    """
+    self.StopReplay()
+    self._RemoveTestCa()
+    self._make_javascript_deterministic = None
+    self._archive_path = None
+    self._wpr_port_pairs = None
+    self._extra_wpr_args = None
+    self._wpr_mode = None
+
+  def _InstallTestCa(self):
+    if not self._platform_backend.supports_test_ca:
+      return
+    assert not self.is_test_ca_installed, 'Test CA is already installed'
+    if certutils.openssl_import_error:
+      logging.warning(
+          'The OpenSSL module is unavailable. '
+          'Browsers may fall back to ignoring certificate errors.')
+      return
+    if not platformsettings.HasSniSupport():
+      logging.warning(
+          'Web Page Replay requires SNI support (pyOpenSSL 0.13 or greater) '
+          'to generate certificates from a test CA. '
+          'Browsers may fall back to ignoring certificate errors.')
+      return
+    self._wpr_ca_cert_path = os.path.join(tempfile.mkdtemp(), 'testca.pem')
+    try:
+      certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
+                                    cert_path=self._wpr_ca_cert_path)
+      self._platform_backend.InstallTestCa(self._wpr_ca_cert_path)
+      logging.info('Test certificate authority installed on target platform.')
+    except Exception:
+      logging.exception(
+          'Failed to install test certificate authority on target platform. '
+          'Browsers may fall back to ignoring certificate errors.')
+      self._RemoveTestCa()
+
+  def _RemoveTestCa(self):
+    if not self.is_test_ca_installed:
+      return
+    try:
+      self._platform_backend.RemoveTestCa()
+    except Exception:
+      # Best effort cleanup - show the error and continue.
+      logging.exception(
+          'Error trying to remove certificate authority from target platform.')
+    try:
+      shutil.rmtree(os.path.dirname(self._wpr_ca_cert_path), ignore_errors=True)
+    finally:
+      self._wpr_ca_cert_path = None
+
+  def StartReplay(self, archive_path, make_javascript_deterministic=False):
+    """Start web page replay from a given replay archive.
+
+    Starts as needed, and reuses if possible, the replay server on the host and
+    a forwarder from the host to the target platform.
+
+    Implementation details
+    ----------------------
+
+    The local host is where Telemetry is run. The remote is host where
+    the target application is run. The local and remote hosts may be
+    the same (e.g., testing a desktop browser) or different (e.g., testing
+    an android browser).
+
+    A replay server is started on the local host using the local ports, while
+    a forwarder ties the local to the remote ports.
+
+    Both local and remote ports may be zero. In that case they are determined
+    by the replay server and the forwarder respectively. Setting dns to None
+    disables DNS traffic.
+
+    Args:
+      archive_path: a path to a specific WPR archive.
+      make_javascript_deterministic: True if replay should inject a script
+          to make JavaScript behave deterministically (e.g., override Date()).
+    """
+    assert self.is_open, 'Network controller is not open'
+    if self._wpr_mode == wpr_modes.WPR_OFF:
+      return
+    if not archive_path:
+      # TODO(slamm, tonyg): Ideally, replay mode should be stopped when there is
+      # no archive path. However, if the replay server already started, and
+      # a file URL is tested with the
+      # telemetry.core.local_server.LocalServerController, then the
+      # replay server forwards requests to it. (Chrome is configured to use
+      # fixed ports fo all HTTP/HTTPS requests.)
+      return
+    if (self._wpr_mode == wpr_modes.WPR_REPLAY and
+        not os.path.exists(archive_path)):
+      raise ArchiveDoesNotExistError(
+          'Archive path does not exist: %s' % archive_path)
+    if (self._wpr_server is not None and
+        self._archive_path == archive_path and
+        self._make_javascript_deterministic == make_javascript_deterministic):
+      return  # We may reuse the existing replay server.
+
+    self._archive_path = archive_path
+    self._make_javascript_deterministic = make_javascript_deterministic
+    local_ports = self._StartReplayServer()
+    self._StartForwarder(local_ports)
+
+  def StopReplay(self):
+    """Stop web page replay.
+
+    Stops both the replay server and the forwarder if currently active.
+    """
+    if self._forwarder:
+      self._forwarder.Close()
+      self._forwarder = None
+    self._StopReplayServer()
+
+  def _StartReplayServer(self):
+    """Start the replay server and return the started local_ports."""
+    self._StopReplayServer()  # In case it was already running.
+    local_ports = self._wpr_port_pairs.local_ports
+    self._wpr_server = webpagereplay.ReplayServer(
+        self._archive_path,
+        self.host_ip,
+        local_ports.http,
+        local_ports.https,
+        local_ports.dns,
+        self._ReplayCommandLineArgs())
+    return self._wpr_server.StartServer()
+
+  def _StopReplayServer(self):
+    """Stop the replay server only."""
+    if self._wpr_server:
+      self._wpr_server.StopServer()
+      self._wpr_server = None
+
+  def _ReplayCommandLineArgs(self):
+    wpr_args = list(self._extra_wpr_args)
+    if self._wpr_mode == wpr_modes.WPR_APPEND:
+      wpr_args.append('--append')
+    elif self._wpr_mode == wpr_modes.WPR_RECORD:
+      wpr_args.append('--record')
+    if not self._make_javascript_deterministic:
+      wpr_args.append('--inject_scripts=')
+    if self._wpr_ca_cert_path:
+      wpr_args.extend([
+          '--should_generate_certs',
+          '--https_root_ca_cert_path=%s' % self._wpr_ca_cert_path])
+    return wpr_args
+
+  def _StartForwarder(self, local_ports):
+    """Start a forwarder from local_ports to the set WPR remote_ports."""
+    if self._forwarder is not None:
+      if local_ports == self._forwarder.port_pairs.local_ports:
+        return  # Safe to reuse existing forwarder.
+      self._forwarder.Close()
+    self._forwarder = self._platform_backend.forwarder_factory.Create(
+        forwarders.PortPairs.Zip(local_ports,
+                                 self._wpr_port_pairs.remote_ports))
+    # Override port pairts with values after defaults have been resolved;
+    # we should use the same set of ports when restarting replay.
+    self._wpr_port_pairs = self._forwarder.port_pairs
diff --git a/catapult/telemetry/telemetry/internal/platform/network_controller_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/network_controller_backend_unittest.py
new file mode 100644
index 0000000..4b58966
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/network_controller_backend_unittest.py
@@ -0,0 +1,386 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import mock
+import unittest
+
+from telemetry.internal import forwarders
+from telemetry.internal.platform import network_controller_backend
+from telemetry.util import wpr_modes
+
+
+DEFAULT_PORTS = forwarders.PortSet(http=1111, https=2222, dns=3333)
+FORWARDER_HOST_IP = '123.321.123.321'
+EXPECTED_WPR_CA_CERT_PATH = os.path.join('[tempdir]', 'testca.pem')
+
+
+class FakePlatformBackend(object):
+  def __init__(self):
+    self.forwarder_factory = FakeForwarderFactory()
+    self.supports_test_ca = True
+    self.is_test_ca_installed = False
+    self.faulty_cert_installer = False
+    self.wpr_port_pairs = None
+    # Normally test using all default ports.
+    self.SetWprPortPairs(http=(0, 0), https=(0, 0), dns=(0, 0))
+
+  def SetWprPortPairs(self, http, https, dns):
+    self.wpr_port_pairs = forwarders.PortPairs(
+        forwarders.PortPair(*http),
+        forwarders.PortPair(*https),
+        forwarders.PortPair(*dns) if dns is not None else None)
+
+  def GetWprPortPairs(self):
+    return self.wpr_port_pairs
+
+  def InstallTestCa(self, ca_cert_path):
+    del ca_cert_path  # Unused argument.
+    self.is_test_ca_installed = True
+    # Exception is raised after setting the "installed" value to confirm that
+    # cleaup code is being called in case of errors.
+    if self.faulty_cert_installer:
+      raise Exception('Cert install failed!')
+
+  def RemoveTestCa(self):
+    self.is_test_ca_installed = False
+
+
+class FakeForwarderFactory(object):
+  def __init__(self):
+    self.host_ip = FORWARDER_HOST_IP
+
+  def Create(self, port_pairs):
+    return forwarders.Forwarder(port_pairs)
+
+
+class FakeReplayServer(object):
+  DEFAULT_PORTS = NotImplemented  # Will be assigned during test setUp.
+
+  def __init__(self, archive_path, host_ip, http_port, https_port, dns_port,
+               replay_args):
+    self.archive_path = archive_path
+    self.host_ip = host_ip
+    self.ports = forwarders.PortSet(
+        http_port or self.DEFAULT_PORTS.http,
+        https_port or self.DEFAULT_PORTS.https,
+        dns_port or self.DEFAULT_PORTS.dns if dns_port is not None else None)
+    self.replay_args = replay_args
+    self.is_running = False
+
+  def StartServer(self):
+    assert not self.is_running
+    self.is_running = True
+    return self.ports
+
+  def StopServer(self):
+    assert self.is_running
+    self.is_running = False
+
+
+class TestNetworkControllerBackend(
+    network_controller_backend.NetworkControllerBackend):
+  """Expose some private properties for testing purposes."""
+
+  @property
+  def wpr_ca_cert_path(self):
+    return self._wpr_ca_cert_path
+
+  @property
+  def replay_server(self):
+    return self._wpr_server
+
+  @property
+  def forwarder(self):
+    return self._forwarder
+
+  @property
+  def platform_backend(self):
+    return self._platform_backend
+
+
+class NetworkControllerBackendTest(unittest.TestCase):
+  def Patch(self, *args, **kwargs):
+    """Patch an object for the duration of a test, and return its mock."""
+    patcher = mock.patch(*args, **kwargs)
+    mock_object = patcher.start()
+    self.addCleanup(patcher.stop)
+    return mock_object
+
+  def PatchImportedModule(self, name):
+    """Shorthand to patch a module imported by network_controller_backend."""
+    return self.Patch(
+        'telemetry.internal.platform.network_controller_backend.%s' % name)
+
+  def setUp(self):
+    # Always use our FakeReplayServer.
+    FakeReplayServer.DEFAULT_PORTS = DEFAULT_PORTS  # Use global defaults.
+    self.Patch(
+        'telemetry.internal.util.webpagereplay.ReplayServer', FakeReplayServer)
+
+    # Pretend that only some predefined set of files exist.
+    def fake_path_exists(filename):
+      return filename in ['some-archive.wpr', 'another-archive.wpr']
+
+    self.Patch('os.path.exists', side_effect=fake_path_exists)
+
+    # Mock some imported modules.
+    mock_certutils = self.PatchImportedModule('certutils')
+    mock_certutils.openssl_import_error = None
+    mock_certutils.generate_dummy_ca_cert.return_value = ('-', '-')
+
+    mock_platformsettings = self.PatchImportedModule('platformsettings')
+    mock_platformsettings.HasSniSupport.return_value = True
+
+    mock_tempfile = self.PatchImportedModule('tempfile')
+    mock_tempfile.mkdtemp.return_value = '[tempdir]'
+
+    self.PatchImportedModule('shutil')
+
+    self.network_controller_backend = TestNetworkControllerBackend(
+        FakePlatformBackend())
+
+  def testOpenCloseController(self):
+    b = self.network_controller_backend
+    self.assertFalse(b.is_open)
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg']) # Also installs test CA.
+    self.assertTrue(b.is_open)
+    self.assertTrue(b.is_test_ca_installed)
+    self.assertTrue(b.platform_backend.is_test_ca_installed)
+    b.Close() # Also removes test CA.
+    self.assertFalse(b.is_open)
+    self.assertFalse(b.is_test_ca_installed)
+    self.assertFalse(b.platform_backend.is_test_ca_installed)
+    b.Close()  # It's fine to close a closed controller.
+    self.assertFalse(b.is_open)
+
+  def testOpeningOpenControllerRaises(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    with self.assertRaises(AssertionError):
+      b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+
+  def testInstallTestCaFailure(self):
+    b = self.network_controller_backend
+    b.platform_backend.faulty_cert_installer = True
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg']) # Try to install test CA.
+
+    # Test CA is not installed, but the controller is otherwise open and safe
+    # to use.
+    self.assertTrue(b.is_open)
+    self.assertFalse(b.is_test_ca_installed)
+    self.assertFalse(b.platform_backend.is_test_ca_installed)
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+
+    b.Close() # No test CA to remove.
+    self.assertFalse(b.is_open)
+    self.assertFalse(b.is_test_ca_installed)
+    self.assertFalse(b.platform_backend.is_test_ca_installed)
+
+  def testStartStopReplay(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    self.assertFalse(b.is_replay_active)
+
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertTrue(b.replay_server.is_running)
+    self.assertIsNotNone(b.forwarder.port_pairs)
+
+    old_replay_server = b.replay_server
+    old_forwarder = b.forwarder
+    b.StopReplay()
+    self.assertFalse(b.is_replay_active)
+    self.assertFalse(old_replay_server.is_running)
+    self.assertIsNone(old_forwarder.port_pairs)
+    self.assertTrue(b.is_open)  # Controller is still open.
+
+    b.Close()
+    self.assertFalse(b.is_open)
+
+  def testClosingControllerAlsoStopsReplay(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertTrue(b.replay_server.is_running)
+    self.assertIsNotNone(b.forwarder.port_pairs)
+
+    old_replay_server = b.replay_server
+    old_forwarder = b.forwarder
+    b.Close()
+    self.assertFalse(b.is_replay_active)
+    self.assertFalse(old_replay_server.is_running)
+    self.assertIsNone(old_forwarder.port_pairs)
+    self.assertFalse(b.is_open)
+
+  def testReplayOnClosedControllerRaises(self):
+    b = self.network_controller_backend
+    self.assertFalse(b.is_open)
+    with self.assertRaises(AssertionError):
+      b.StartReplay('some-archive.wpr')
+
+  def testReplayWithSameArgsReuseServer(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertTrue(b.replay_server.is_running)
+
+    old_replay_server = b.replay_server
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertIs(b.replay_server, old_replay_server)
+    self.assertTrue(b.replay_server.is_running)
+
+  def testReplayWithDifferentArgsUseDifferentServer(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertTrue(b.replay_server.is_running)
+
+    old_replay_server = b.replay_server
+    b.StartReplay('another-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertIsNot(b.replay_server, old_replay_server)
+    self.assertTrue(b.replay_server.is_running)
+    self.assertFalse(old_replay_server.is_running)
+
+  def testReplayWithoutArchivePathDoesNotStopReplay(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+
+    b.StartReplay('some-archive.wpr')
+    self.assertTrue(b.is_replay_active)
+    self.assertTrue(b.replay_server.is_running)
+    old_replay_server = b.replay_server
+
+    b.StartReplay(None)
+    self.assertTrue(b.is_replay_active)
+    self.assertIs(b.replay_server, old_replay_server)
+    self.assertTrue(b.replay_server.is_running)
+    self.assertEqual(b.replay_server.archive_path, 'some-archive.wpr')
+
+  def testModeOffDoesNotCreateReplayServer(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_OFF, ['--some-arg'])
+    b.StartReplay('may-or-may-not-exist.wpr')
+    self.assertFalse(b.is_replay_active)
+    self.assertIsNone(b.replay_server)
+    self.assertIsNone(b.forwarder)
+
+  def testBadArchivePathRaises(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    with self.assertRaises(network_controller_backend.ArchiveDoesNotExistError):
+      b.StartReplay('does-not-exist.wpr')
+
+  def testBadArchivePathOnRecordIsOkay(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_RECORD, ['--some-arg'])
+    b.StartReplay('does-not-exist-yet.wpr')  # Does not raise.
+    self.assertTrue(b.is_replay_active)
+
+  def testReplayServerSettings(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_RECORD, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+
+    # Externally visible properties
+    self.assertTrue(b.is_replay_active)
+    self.assertEqual(b.host_ip, FORWARDER_HOST_IP)
+    self.assertEqual(b.wpr_device_ports, DEFAULT_PORTS)
+
+    # Private replay server settings.
+    self.assertTrue(b.replay_server.is_running)
+    self.assertEqual(b.replay_server.archive_path, 'some-archive.wpr')
+    self.assertEqual(b.replay_server.host_ip, FORWARDER_HOST_IP)
+    self.assertEqual(b.replay_server.replay_args, [
+        '--some-arg', '--record', '--inject_scripts=',
+        '--should_generate_certs',
+        '--https_root_ca_cert_path=%s' % EXPECTED_WPR_CA_CERT_PATH])
+
+  def testReplayServerOffSettings(self):
+    b = self.network_controller_backend
+    b.platform_backend.wpr_ca_cert_path = 'CERT_FILE'
+    b.Open(wpr_modes.WPR_OFF, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+
+    self.assertFalse(b.is_replay_active)
+    self.assertEqual(b.host_ip, FORWARDER_HOST_IP)
+    self.assertEqual(b.wpr_device_ports, None)
+    self.assertIsNone(b.replay_server)
+
+  def testUseDefaultPorts(self):
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertEqual(b.replay_server.ports, DEFAULT_PORTS)
+    self.assertEqual(b.wpr_device_ports, DEFAULT_PORTS)
+
+    # Invariant
+    self.assertEqual(b.forwarder.port_pairs.local_ports, b.replay_server.ports)
+    self.assertEqual(b.forwarder.port_pairs.remote_ports, b.wpr_device_ports)
+
+  def testUseDefaultLocalPorts(self):
+    b = self.network_controller_backend
+    b.platform_backend.SetWprPortPairs(
+        http=(0, 8888), https=(0, 4444), dns=(0, 2222))
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertEqual(b.replay_server.ports, DEFAULT_PORTS)
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(8888, 4444, 2222))
+
+    # Invariant
+    self.assertEqual(b.forwarder.port_pairs.local_ports, b.replay_server.ports)
+    self.assertEqual(b.forwarder.port_pairs.remote_ports, b.wpr_device_ports)
+
+  def testUseSpecificPorts(self):
+    b = self.network_controller_backend
+    b.platform_backend.SetWprPortPairs(
+        http=(88, 8888), https=(44, 4444), dns=None)
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertEqual(b.replay_server.ports, forwarders.PortSet(88, 44, None))
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(8888, 4444, None))
+
+    # Invariant
+    self.assertEqual(b.forwarder.port_pairs.local_ports, b.replay_server.ports)
+    self.assertEqual(b.forwarder.port_pairs.remote_ports, b.wpr_device_ports)
+
+  def testRestartReplayShouldReusePorts(self):
+    FakeReplayServer.DEFAULT_PORTS = forwarders.PortSet(123, 456, 789)
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(123, 456, 789))
+
+    # If replay restarts, the factory may use a different set of default ports.
+    FakeReplayServer.DEFAULT_PORTS = forwarders.PortSet(987, 654, 321)
+    b.StartReplay('another-archive.wpr')
+
+    # However same ports must be used, because apps/browsers may already be
+    # configured to use the old set of ports.
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(123, 456, 789))
+
+  def testNewControllerSessionMayUseDifferentPorts(self):
+    FakeReplayServer.DEFAULT_PORTS = forwarders.PortSet(123, 456, 789)
+    b = self.network_controller_backend
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(123, 456, 789))
+    b.Close()
+
+    # If replay restarts, the factory may use a different set of default ports.
+    FakeReplayServer.DEFAULT_PORTS = forwarders.PortSet(987, 654, 321)
+    b.Open(wpr_modes.WPR_REPLAY, ['--some-arg'])
+    b.StartReplay('some-archive.wpr')
+
+    # This time the network controller session was closed between replay's,
+    # so it's fine to use a different set of ports.
+    self.assertEqual(b.wpr_device_ports, forwarders.PortSet(987, 654, 321))
diff --git a/catapult/telemetry/telemetry/internal/platform/platform_backend.py b/catapult/telemetry/telemetry/internal/platform/platform_backend.py
new file mode 100644
index 0000000..3b769bc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/platform_backend.py
@@ -0,0 +1,295 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import weakref
+
+from telemetry.internal import forwarders
+from telemetry.internal.forwarders import do_nothing_forwarder
+from telemetry.internal.platform import network_controller_backend
+from telemetry.internal.platform import tracing_controller_backend
+
+
+# pylint: disable=unused-argument
+
+class PlatformBackend(object):
+
+  def __init__(self, device=None):
+    """ Initalize an instance of PlatformBackend from a device optionally.
+      Call sites need to use SupportsDevice before intialization to check
+      whether this platform backend supports the device.
+      If device is None, this constructor returns the host platform backend
+      which telemetry is running on.
+
+      Args:
+        device: an instance of telemetry.core.platform.device.Device.
+    """
+    if device and not self.SupportsDevice(device):
+      raise ValueError('Unsupported device: %s' % device.name)
+    self._platform = None
+    self._running_browser_backends = weakref.WeakSet()
+    self._network_controller_backend = None
+    self._tracing_controller_backend = None
+    self._forwarder_factory = None
+
+  def InitPlatformBackend(self):
+    self._network_controller_backend = (
+        network_controller_backend.NetworkControllerBackend(self))
+    self._tracing_controller_backend = (
+        tracing_controller_backend.TracingControllerBackend(self))
+
+  @classmethod
+  def IsPlatformBackendForHost(cls):
+    """ Returns whether this platform backend is the platform backend to be used
+    for the host device which telemetry is running on. """
+    return False
+
+  @classmethod
+  def SupportsDevice(cls, device):
+    """ Returns whether this platform backend supports intialization from the
+    device. """
+    return False
+
+  @classmethod
+  def CreatePlatformForDevice(cls, device, finder_options):
+    raise NotImplementedError
+
+  def SetPlatform(self, platform):
+    assert self._platform == None
+    self._platform = platform
+
+  @property
+  def platform(self):
+    return self._platform
+
+  @property
+  def is_host_platform(self):
+    return self._platform.is_host_platform
+
+  @property
+  def running_browser_backends(self):
+    return list(self._running_browser_backends)
+
+  @property
+  def network_controller_backend(self):
+    return self._network_controller_backend
+
+  @property
+  def tracing_controller_backend(self):
+    return self._tracing_controller_backend
+
+  @property
+  def forwarder_factory(self):
+    if not self._forwarder_factory:
+      self._forwarder_factory = do_nothing_forwarder.DoNothingForwarderFactory()
+    return self._forwarder_factory
+
+  def GetRemotePort(self, port):
+    return port
+
+  def DidCreateBrowser(self, browser, browser_backend):
+    browser_options = browser_backend.browser_options
+    self.SetFullPerformanceModeEnabled(browser_options.full_performance_mode)
+
+  def DidStartBrowser(self, browser, browser_backend):
+    assert browser not in self._running_browser_backends
+    self._running_browser_backends.add(browser_backend)
+
+  def WillCloseBrowser(self, browser, browser_backend):
+    is_last_browser = len(self._running_browser_backends) <= 1
+    if is_last_browser:
+      self.SetFullPerformanceModeEnabled(False)
+
+    self._running_browser_backends.discard(browser_backend)
+
+  def GetWprPortPairs(self):
+    """Return suitable port pairs to be used for web page replay."""
+    return forwarders.PortPairs(
+        http=forwarders.PortPair(0, 0),
+        https=forwarders.PortPair(0, 0),
+        dns=None)
+
+  def IsDisplayTracingSupported(self):
+    return False
+
+  def StartDisplayTracing(self):
+    """Start gathering a trace with frame timestamps close to physical
+    display."""
+    raise NotImplementedError()
+
+  def StopDisplayTracing(self):
+    """Stop gathering a trace with frame timestamps close to physical display.
+
+    Returns a raw tracing events that contains the timestamps of physical
+    display.
+    """
+    raise NotImplementedError()
+
+  def SetFullPerformanceModeEnabled(self, enabled):
+    pass
+
+  def CanMonitorThermalThrottling(self):
+    return False
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def GetSystemCommitCharge(self):
+    raise NotImplementedError()
+
+  def GetSystemTotalPhysicalMemory(self):
+    raise NotImplementedError()
+
+  def GetCpuStats(self, pid):
+    return {}
+
+  def GetCpuTimestamp(self):
+    return {}
+
+  def PurgeUnpinnedMemory(self):
+    pass
+
+  def GetMemoryStats(self, pid):
+    return {}
+
+  def GetChildPids(self, pid):
+    raise NotImplementedError()
+
+  def GetCommandLine(self, pid):
+    raise NotImplementedError()
+
+  def GetDeviceTypeName(self):
+    raise NotImplementedError()
+
+  def GetArchName(self):
+    raise NotImplementedError()
+
+  def GetOSName(self):
+    raise NotImplementedError()
+
+  def GetOSVersionName(self):
+    raise NotImplementedError()
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    raise NotImplementedError()
+
+  def FlushEntireSystemCache(self):
+    raise NotImplementedError()
+
+  def FlushSystemCacheForDirectory(self, directory):
+    raise NotImplementedError()
+
+  def FlushDnsCache(self):
+    pass
+
+  def LaunchApplication(
+      self, application, parameters=None, elevate_privilege=False):
+    raise NotImplementedError()
+
+  def IsApplicationRunning(self, application):
+    raise NotImplementedError()
+
+  def CanLaunchApplication(self, application):
+    return False
+
+  def InstallApplication(self, application):
+    raise NotImplementedError()
+
+  def CanCaptureVideo(self):
+    return False
+
+  def StartVideoCapture(self, min_bitrate_mbps):
+    raise NotImplementedError()
+
+  @property
+  def is_video_capture_running(self):
+    return False
+
+  def StopVideoCapture(self):
+    raise NotImplementedError()
+
+  def CanMonitorPower(self):
+    return False
+
+  def CanMeasurePerApplicationPower(self):
+    return False
+
+  def StartMonitoringPower(self, browser):
+    raise NotImplementedError()
+
+  def StopMonitoringPower(self):
+    raise NotImplementedError()
+
+  def CanMonitorNetworkData(self):
+    return False
+
+  def GetNetworkData(self, browser):
+    raise NotImplementedError()
+
+  def ReadMsr(self, msr_number, start=0, length=64):
+    """Read a CPU model-specific register (MSR).
+
+    Which MSRs are available depends on the CPU model.
+    On systems with multiple CPUs, this function may run on any CPU.
+
+    Args:
+      msr_number: The number of the register to read.
+      start: The least significant bit to read, zero-indexed.
+          (Said another way, the number of bits to right-shift the MSR value.)
+      length: The number of bits to read. MSRs are 64 bits, even on 32-bit CPUs.
+    """
+    raise NotImplementedError()
+
+  @property
+  def supports_test_ca(self):
+    """Indicates whether the platform supports installing test CA."""
+    return False
+
+  def InstallTestCa(self, ca_cert_path):
+    """Install a test CA on the platform."""
+    raise NotImplementedError()
+
+  def RemoveTestCa(self):
+    """Remove a previously installed test CA from the platform."""
+    raise NotImplementedError()
+
+  def CanTakeScreenshot(self):
+    return False
+
+  def TakeScreenshot(self, file_path):
+    raise NotImplementedError
+
+  def IsCooperativeShutdownSupported(self):
+    """Indicates whether CooperativelyShutdown, below, is supported.
+    It is not necessary to implement it on all platforms."""
+    return False
+
+  def CooperativelyShutdown(self, proc, app_name):
+    """Cooperatively shut down the given process from subprocess.Popen.
+
+    Currently this is only implemented on Windows. See
+    crbug.com/424024 for background on why it was added.
+
+    Args:
+      proc: a process object returned from subprocess.Popen.
+      app_name: on Windows, is the prefix of the application's window
+          class name that should be searched for. This helps ensure
+          that only the application's windows are closed.
+
+    Returns True if it is believed the attempt succeeded.
+    """
+    raise NotImplementedError()
+
+  def PathExists(self, path, timeout=None, retries=None):
+    """Tests whether the given path exists on the target platform.
+    Args:
+      path: path in request.
+      timeout: timeout.
+      retries: num of retries.
+    Return:
+      Whether the path exists on the target platform.
+    """
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/platform/platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/platform_backend_unittest.py
new file mode 100644
index 0000000..b2e9e98
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/platform_backend_unittest.py
@@ -0,0 +1,38 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import unittest
+
+from telemetry.core import platform as platform_module
+from telemetry import decorators
+
+
+class PlatformBackendTest(unittest.TestCase):
+  @decorators.Disabled('mac',       # crbug.com/440666
+                       'vista',     # crbug.com/479337
+                       'chromeos')  # crbug.com/483212
+  def testPowerMonitoringSync(self):
+    # Tests that the act of monitoring power doesn't blow up.
+    platform = platform_module.GetHostPlatform()
+    can_monitor_power = platform.CanMonitorPower()
+    self.assertIsInstance(can_monitor_power, bool)
+    if not can_monitor_power:
+      logging.warning('Test not supported on this platform.')
+      return
+
+    browser_mock = lambda: None
+    # Android needs to access the package of the monitored app.
+    if platform.GetOSName() == 'android':
+      # pylint: disable=protected-access
+      browser_mock._browser_backend = lambda: None
+      # Monitor the launcher, which is always present.
+      browser_mock._browser_backend.package = 'com.android.launcher'
+
+    platform.StartMonitoringPower(browser_mock)
+    time.sleep(0.001)
+    output = platform.StopMonitoringPower()
+    self.assertTrue(output.has_key('energy_consumption_mwh'))
+    self.assertTrue(output.has_key('identifier'))
diff --git a/catapult/telemetry/telemetry/internal/platform/posix_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/posix_platform_backend.py
new file mode 100644
index 0000000..d20dd1f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/posix_platform_backend.py
@@ -0,0 +1,153 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import distutils.spawn
+import logging
+import os
+import re
+import stat
+import subprocess
+import sys
+
+from telemetry.internal.platform import desktop_platform_backend
+from telemetry.internal.util import ps_util
+
+
+def _BinaryExistsInSudoersFiles(path, sudoers_file_contents):
+  """Returns True if the binary in |path| features in the sudoers file.
+  """
+  for line in sudoers_file_contents.splitlines():
+    if re.match(r'\s*\(.+\) NOPASSWD: %s(\s\S+)*$' % re.escape(path), line):
+      return True
+  return False
+
+
+def _CanRunElevatedWithSudo(path):
+  """Returns True if the binary at |path| appears in the sudoers file.
+  If this function returns true then the binary at |path| can be run via sudo
+  without prompting for a password.
+  """
+  sudoers = subprocess.check_output(['/usr/bin/sudo', '-l'])
+  return _BinaryExistsInSudoersFiles(path, sudoers)
+
+
+class PosixPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
+
+  # This is an abstract class. It is OK to have abstract methods.
+  # pylint: disable=abstract-method
+
+  def RunCommand(self, args):
+    return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
+
+  def GetFileContents(self, path):
+    with open(path, 'r') as f:
+      return f.read()
+
+  def GetPsOutput(self, columns, pid=None):
+    """Returns output of the 'ps' command as a list of lines.
+    Subclass should override this function.
+
+    Args:
+      columns: A list of require columns, e.g., ['pid', 'pss'].
+      pid: If not None, returns only the information of the process
+         with the pid.
+    """
+    return ps_util.GetPsOutputWithPlatformBackend(self, columns, pid)
+
+  def _GetTopOutput(self, pid, columns):
+    """Returns output of the 'top' command as a list of lines.
+
+    Args:
+      pid: pid of process to examine.
+      columns: A list of require columns, e.g., ['idlew', 'vsize'].
+    """
+    args = ['top']
+    args.extend(['-pid', str(pid), '-l', '1', '-s', '0', '-stats',
+        ','.join(columns)])
+    return self.RunCommand(args).splitlines()
+
+  def GetChildPids(self, pid):
+    """Returns a list of child pids of |pid|."""
+    ps_output = self.GetPsOutput(['pid', 'ppid', 'state'])
+    ps_line_re = re.compile(
+        r'\s*(?P<pid>\d+)\s*(?P<ppid>\d+)\s*(?P<state>\S*)\s*')
+    processes = []
+    for pid_ppid_state in ps_output:
+      m = ps_line_re.match(pid_ppid_state)
+      assert m, 'Did not understand ps output: %s' % pid_ppid_state
+      processes.append((m.group('pid'), m.group('ppid'), m.group('state')))
+    return ps_util.GetChildPids(processes, pid)
+
+  def GetCommandLine(self, pid):
+    command = self.GetPsOutput(['command'], pid)
+    return command[0] if command else None
+
+  def CanLaunchApplication(self, application):
+    return bool(distutils.spawn.find_executable(application))
+
+  def IsApplicationRunning(self, application):
+    ps_output = self.GetPsOutput(['command'])
+    application_re = re.compile(
+        r'(.*%s|^)%s(\s|$)' % (os.path.sep, application))
+    return any(application_re.match(cmd) for cmd in ps_output)
+
+  def LaunchApplication(
+      self, application, parameters=None, elevate_privilege=False):
+    assert application, 'Must specify application to launch'
+
+    if os.path.sep not in application:
+      application = distutils.spawn.find_executable(application)
+      assert application, 'Failed to find application in path'
+
+    args = [application]
+
+    if parameters:
+      assert isinstance(parameters, list), 'parameters must be a list'
+      args += parameters
+
+    def IsElevated():
+      """ Returns True if the current process is elevated via sudo i.e. running
+      sudo will not prompt for a password. Returns False if not authenticated
+      via sudo or if telemetry is run on a non-interactive TTY."""
+      # `sudo -v` will always fail if run from a non-interactive TTY.
+      p = subprocess.Popen(
+          ['/usr/bin/sudo', '-nv'], stdin=subprocess.PIPE,
+          stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+      stdout = p.communicate()[0]
+      # Some versions of sudo set the returncode based on whether sudo requires
+      # a password currently. Other versions return output when password is
+      # required and no output when the user is already authenticated.
+      return not p.returncode and not stdout
+
+    def IsSetUID(path):
+      """Returns True if the binary at |path| has the setuid bit set."""
+      return (os.stat(path).st_mode & stat.S_ISUID) == stat.S_ISUID
+
+    if elevate_privilege and not IsSetUID(application):
+      args = ['/usr/bin/sudo'] + args
+      if not _CanRunElevatedWithSudo(application) and not IsElevated():
+        if not sys.stdout.isatty():
+          # Without an interactive terminal (or a configured 'askpass', but
+          # that is rarely relevant), there's no way to prompt the user for
+          # sudo. Fail with a helpful error message. For more information, see:
+          #   https://code.google.com/p/chromium/issues/detail?id=426720
+          text = ('Telemetry needs to run %s with elevated privileges, but the '
+                 'setuid bit is not set and there is no interactive terminal '
+                 'for a prompt. Please ask an administrator to set the setuid '
+                 'bit on this executable and ensure that it is owned by a user '
+                 'with the necessary privileges. Aborting.' % application)
+          print text
+          raise Exception(text)
+        # Else, there is a tty that can be used for a useful interactive prompt.
+        print ('Telemetry needs to run %s under sudo. Please authenticate.' %
+               application)
+        # Synchronously authenticate.
+        subprocess.check_call(['/usr/bin/sudo', '-v'])
+
+    stderror_destination = subprocess.PIPE
+    if logging.getLogger().isEnabledFor(logging.DEBUG):
+      stderror_destination = None
+
+    return subprocess.Popen(
+        args, stdout=subprocess.PIPE, stderr=stderror_destination)
diff --git a/catapult/telemetry/telemetry/internal/platform/posix_platform_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/posix_platform_backend_unittest.py
new file mode 100644
index 0000000..e139980
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/posix_platform_backend_unittest.py
@@ -0,0 +1,87 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import sys
+import unittest
+
+from telemetry.core import platform as platform_module
+from telemetry import decorators
+from telemetry.internal.platform import posix_platform_backend
+
+
+class TestBackend(posix_platform_backend.PosixPlatformBackend):
+
+  # pylint: disable=abstract-method
+
+  def __init__(self):
+    super(TestBackend, self).__init__()
+    self._mock_ps_output = None
+
+  def SetMockPsOutput(self, output):
+    self._mock_ps_output = output
+
+  def GetPsOutput(self, columns, pid=None):
+    return self._mock_ps_output
+
+
+class PosixPlatformBackendTest(unittest.TestCase):
+
+  def testGetChildPidsWithGrandChildren(self):
+    backend = TestBackend()
+    backend.SetMockPsOutput(['1 0 S', '2 1 R', '3 2 S', '4 1 R', '5 4 R'])
+    result = backend.GetChildPids(1)
+    self.assertEquals(set(result), set([2, 3, 4, 5]))
+
+  def testGetChildPidsWithNoSuchPid(self):
+    backend = TestBackend()
+    backend.SetMockPsOutput(['1 0 S', '2 1 R', '3 2 S', '4 1 R', '5 4 R'])
+    result = backend.GetChildPids(6)
+    self.assertEquals(set(result), set())
+
+  def testGetChildPidsWithZombieChildren(self):
+    backend = TestBackend()
+    backend.SetMockPsOutput(['1 0 S', '2 1 R', '3 2 S', '4 1 R', '5 4 Z'])
+    result = backend.GetChildPids(1)
+    self.assertEquals(set(result), set([2, 3, 4]))
+
+  def testGetChildPidsWithMissingState(self):
+    backend = TestBackend()
+    backend.SetMockPsOutput(['  1 0 S  ', '  2 1', '3 2 '])
+    result = backend.GetChildPids(1)
+    self.assertEquals(set(result), set([2, 3]))
+
+  def testSudoersFileParsing(self):
+    binary_path = '/usr/bin/pkill'
+    self.assertFalse(
+        posix_platform_backend._BinaryExistsInSudoersFiles(binary_path, ''))
+    self.assertFalse(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '    (ALL) ALL'))
+    self.assertFalse(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '     (root) NOPASSWD: /usr/bin/pkill_DUMMY'))
+    self.assertFalse(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '     (root) NOPASSWD: pkill'))
+
+
+    self.assertTrue(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '(root) NOPASSWD: /usr/bin/pkill'))
+    self.assertTrue(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '     (root) NOPASSWD: /usr/bin/pkill'))
+    self.assertTrue(
+        posix_platform_backend._BinaryExistsInSudoersFiles(
+            binary_path, '     (root) NOPASSWD: /usr/bin/pkill arg1 arg2'))
+
+  @decorators.Enabled('linux', 'mac')
+  def testIsApplicationRunning(self):
+    platform = platform_module.GetHostPlatform()
+
+    self.assertFalse(platform.IsApplicationRunning('This_Is_A_Bad___App__Name'))
+    sys_exe = os.path.basename(sys.executable)
+    self.assertTrue(platform.IsApplicationRunning(sys_exe))
+    self.assertFalse(
+        platform.IsApplicationRunning('%s append_bad_after_space' % sys_exe))
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/__init__.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/__init__.py
new file mode 100644
index 0000000..4c3cf45
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/__init__.py
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.core import exceptions
+
+
+class PowerMonitor(object):
+  """A power profiler.
+
+  Provides an interface to register power consumption during a test.
+  """
+  def __init__(self):
+    self._monitoring = False
+
+  def CanMonitorPower(self):
+    """Returns True iff power can be monitored asynchronously via
+    StartMonitoringPower() and StopMonitoringPower().
+    """
+    return False
+
+  def CanMeasurePerApplicationPower(self):
+    """Returns True if the power monitor can measure power for the target
+    application in isolation. False if power measurement is for full system
+    energy consumption."""
+    return False
+
+  def _CheckStart(self):
+    assert not self._monitoring, "Already monitoring power."
+    self._monitoring = True
+
+  def _CheckStop(self):
+    assert self._monitoring, "Not monitoring power."
+    self._monitoring = False
+
+  def StartMonitoringPower(self, browser):
+    """Starts monitoring power utilization statistics.
+
+    See Platform#StartMonitoringPower for the arguments format.
+    """
+    raise NotImplementedError()
+
+  def StopMonitoringPower(self):
+    """Stops monitoring power utilization and returns collects stats
+
+    See Platform#StopMonitoringPower for the return format.
+    """
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor.py
new file mode 100644
index 0000000..e7980f4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import csv
+import logging
+
+from telemetry.internal.platform.power_monitor import android_power_monitor_base
+
+class DumpsysPowerMonitor(android_power_monitor_base.AndroidPowerMonitorBase):
+  """PowerMonitor that relies on the dumpsys batterystats to monitor the power
+  consumption of a single android application. This measure uses a heuristic
+  and is the same information end-users see with the battery application.
+  Available on Android L and higher releases.
+  """
+  def __init__(self, battery, platform_backend):
+    """Constructor.
+
+    Args:
+        battery: A BatteryUtil instance.
+        platform_backend: A LinuxBasedPlatformBackend instance.
+    """
+    super(DumpsysPowerMonitor, self).__init__()
+    self._battery = battery
+    self._browser = None
+    self._platform = platform_backend
+
+  def CanMonitorPower(self):
+    result = self._platform.RunCommand('dumpsys batterystats -c')
+    DUMP_VERSION_INDEX = 0
+    # Dumpsys power data is present in dumpsys versions 8 and 9
+    # which is found on L+ devices.
+    return (csv.reader(result).next()[DUMP_VERSION_INDEX] in ['8', '9'])
+
+  def StartMonitoringPower(self, browser):
+    self._CheckStart()
+    assert browser
+    self._browser = browser
+    # Disable the charging of the device over USB. This is necessary because the
+    # device only collects information about power usage when the device is not
+    # charging.
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    assert self._browser
+    package = self._browser._browser_backend.package
+    self._browser = None
+
+    voltage = self._ParseVoltage(self._battery.GetBatteryInfo().get('voltage'))
+    power_data = self._battery.GetPowerData()
+    power_results = self.ProcessPowerData(power_data, voltage, package)
+    self._LogPowerAnomalies(power_results, package)
+    return power_results
+
+  @staticmethod
+  def ProcessPowerData(power_data, voltage, package):
+    package_power_data = power_data['per_package'].get(package)
+    if not package_power_data:
+      logging.warning('No power data for %s in dumpsys output.' % package)
+      package_power = 0
+    else:
+      package_power = sum(package_power_data['data'])
+
+    return {'identifier': 'dumpsys',
+            'power_samples_mw': [],
+            'energy_consumption_mwh': power_data['system_total'] * voltage,
+            'application_energy_consumption_mwh': package_power * voltage}
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor_unittest.py
new file mode 100644
index 0000000..e38ebc5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_dumpsys_power_monitor_unittest.py
@@ -0,0 +1,92 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform.power_monitor import android_dumpsys_power_monitor
+from telemetry.internal.platform.power_monitor import pm_mock
+
+
+_PACKAGE = 'com.google.android.apps.chrome'
+
+_TYPICAL_POWER_DATA = {
+      'system_total': 2000.0,
+      'per_package': {
+        _PACKAGE: {'data': [23.9], 'uid': '12345'}
+      }
+    }
+
+_TYPICAL_POWER_DATA_MULTISAMPLE = {
+      'system_total': 2000.0,
+      'per_package': {
+        _PACKAGE: {'data': [23.9, 26.1], 'uid': '12345'}
+      }
+    }
+
+
+class DumpsysPowerMonitorMonitorTest(unittest.TestCase):
+
+  def testApplicationEnergyConsumption(self):
+    results = (
+        android_dumpsys_power_monitor.DumpsysPowerMonitor.ProcessPowerData(
+            _TYPICAL_POWER_DATA, 4.0, _PACKAGE))
+    self.assertEqual(results['identifier'], 'dumpsys')
+    self.assertAlmostEqual(results['application_energy_consumption_mwh'], 95.6)
+
+  def testSystemEnergyConsumption(self):
+    power_data = {
+      'system_total': 2000.0,
+      'per_package': {}
+    }
+    results = (
+        android_dumpsys_power_monitor.DumpsysPowerMonitor.ProcessPowerData(
+            power_data, 4.0, 'some.package'))
+    self.assertEqual(results['identifier'], 'dumpsys')
+    self.assertEqual(results['application_energy_consumption_mwh'], 0)
+    self.assertEqual(results['energy_consumption_mwh'], 8000.0)
+
+  def testMonitorCycle(self):
+    browser = pm_mock.MockBrowser(_PACKAGE)
+    battery = pm_mock.MockBattery(_TYPICAL_POWER_DATA_MULTISAMPLE, voltage=5.0)
+    backend = pm_mock.MockPlatformBackend()
+    pm = android_dumpsys_power_monitor.DumpsysPowerMonitor(battery, backend)
+    pm.StartMonitoringPower(browser)
+    result = pm.StopMonitoringPower()
+    self.assertEqual(result['identifier'], 'dumpsys')
+    self.assertEqual(result['power_samples_mw'], [])
+    self.assertAlmostEqual(result['application_energy_consumption_mwh'], 250.0)
+    self.assertAlmostEqual(result['energy_consumption_mwh'], 10000.0)
+
+  def testDoubleStop(self):
+    browser = pm_mock.MockBrowser(_PACKAGE)
+    battery = pm_mock.MockBattery(_TYPICAL_POWER_DATA_MULTISAMPLE, voltage=5.0)
+    backend = pm_mock.MockPlatformBackend()
+    pm = android_dumpsys_power_monitor.DumpsysPowerMonitor(battery, backend)
+    pm.StartMonitoringPower(browser)
+    pm.StopMonitoringPower()
+    with self.assertRaises(AssertionError):
+      pm.StopMonitoringPower()
+
+  def testDoubleStart(self):
+    browser = pm_mock.MockBrowser(_PACKAGE)
+    battery = pm_mock.MockBattery(_TYPICAL_POWER_DATA_MULTISAMPLE, voltage=5.0)
+    backend = pm_mock.MockPlatformBackend()
+    pm = android_dumpsys_power_monitor.DumpsysPowerMonitor(battery, backend)
+    pm.StartMonitoringPower(browser)
+    with self.assertRaises(AssertionError):
+      pm.StartMonitoringPower(browser)
+
+  def testBatteryChargingState(self):
+    browser = pm_mock.MockBrowser(_PACKAGE)
+    battery = pm_mock.MockBattery(_TYPICAL_POWER_DATA_MULTISAMPLE, voltage=5.0)
+    backend = pm_mock.MockPlatformBackend()
+    pm = android_dumpsys_power_monitor.DumpsysPowerMonitor(battery, backend)
+    self.assertEqual(battery.GetCharging(), True)
+    pm.StartMonitoringPower(browser)
+    self.assertEqual(battery.GetCharging(), True)
+    pm.StopMonitoringPower()
+    self.assertEqual(battery.GetCharging(), True)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor.py
new file mode 100644
index 0000000..7bd421b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor.py
@@ -0,0 +1,42 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.platform.power_monitor import android_power_monitor_base
+
+
+class FuelGaugePowerMonitor(android_power_monitor_base.AndroidPowerMonitorBase):
+  """PowerMonitor that relies on the fuel gauge chips to monitor the power
+  consumption of a android device.
+  """
+  def __init__(self, battery):
+    """Constructor.
+
+    Args:
+        battery: A BatteryUtil instance.
+        platform_backend: A LinuxBasedPlatformBackend instance.
+    """
+    super(FuelGaugePowerMonitor, self).__init__()
+    self._battery = battery
+    self._starting_fuel_gauge = None
+
+  def CanMonitorPower(self):
+    return self._battery.SupportsFuelGauge()
+
+  def StartMonitoringPower(self, browser):
+    self._CheckStart()
+    self._starting_fuel_gauge = self._battery.GetFuelGaugeChargeCounter()
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    # Convert from nAh to mAh.
+    fuel_gauge_delta = (
+        float((self._starting_fuel_gauge) -
+        self._battery.GetFuelGaugeChargeCounter()) / 1000000)
+    voltage = self._ParseVoltage(self._battery.GetBatteryInfo().get('voltage'))
+    return self.ProcessPowerData(voltage, fuel_gauge_delta)
+
+  @staticmethod
+  def ProcessPowerData(voltage, fuel_gauge_delta):
+    return {'identifier': 'fuel_gauge',
+            'fuel_gauge_energy_consumption_mwh': fuel_gauge_delta * voltage}
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor_unittest.py
new file mode 100644
index 0000000..6bce9c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_fuelgauge_power_monitor_unittest.py
@@ -0,0 +1,48 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform.power_monitor import (
+    android_fuelgauge_power_monitor)
+from telemetry.internal.platform.power_monitor import pm_mock
+
+
+class FuelGaugePowerMonitorMonitorTest(unittest.TestCase):
+
+  def testEnergyConsumption(self):
+    fuel_gauge_delta = 100
+    results = (
+        android_fuelgauge_power_monitor.FuelGaugePowerMonitor.ProcessPowerData(
+            4.0, fuel_gauge_delta))
+    self.assertEqual(results['identifier'], 'fuel_gauge')
+    self.assertEqual(
+        results.get('fuel_gauge_energy_consumption_mwh'), 400)
+
+  def testMonitorCycle(self):
+    battery = pm_mock.MockBattery(None, voltage=5.0, fuelgauge=[5.e6, 3.e6])
+    pm = android_fuelgauge_power_monitor.FuelGaugePowerMonitor(battery)
+    pm.StartMonitoringPower(None)
+    results = pm.StopMonitoringPower()
+    self.assertEqual(results['identifier'], 'fuel_gauge')
+    self.assertAlmostEqual(results['fuel_gauge_energy_consumption_mwh'], 10)
+
+  def testDoubleStop(self):
+    battery = pm_mock.MockBattery(None, voltage=5.0, fuelgauge=[5.e6, 3.e6])
+    pm = android_fuelgauge_power_monitor.FuelGaugePowerMonitor(battery)
+    pm.StartMonitoringPower(None)
+    pm.StopMonitoringPower()
+    with self.assertRaises(AssertionError):
+      pm.StopMonitoringPower()
+
+  def testDoubleStart(self):
+    battery = pm_mock.MockBattery(None, voltage=5.0, fuelgauge=[5.e6, 3.e6])
+    pm = android_fuelgauge_power_monitor.FuelGaugePowerMonitor(battery)
+    pm.StartMonitoringPower(None)
+    with self.assertRaises(AssertionError):
+      pm.StartMonitoringPower(None)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_base.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_base.py
new file mode 100644
index 0000000..afcc87d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_base.py
@@ -0,0 +1,36 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.internal.platform import power_monitor
+
+
+class AndroidPowerMonitorBase(power_monitor.PowerMonitor):
+
+  # Abstract class.
+  # pylint: disable=abstract-method
+
+  def _ParseVoltage(self, millivolts):
+    # Parse voltage information.
+    # If voltage is None, use 4.0 as default.
+    # Otherwise, convert millivolts to volts.
+    if millivolts is None:
+      # Converting at a nominal voltage of 4.0V, as those values are obtained by
+      # a heuristic, and 4.0V is the voltage we set when using a monsoon device.
+      voltage = 4.0
+      logging.warning('Unable to get device voltage. Using %s.', voltage)
+    else:
+      voltage = float(millivolts) / 1000
+      logging.info('Device voltage at %s', voltage)
+      return voltage
+
+  def _LogPowerAnomalies(self, power_data, package):
+    # Log anomalies in power data.
+    if power_data['energy_consumption_mwh'] == 0:
+      logging.warning('Power data is returning 0 for system total usage. %s'
+                      % (power_data))
+      if power_data['application_energy_consumption_mwh'] == 0:
+        logging.warning('Power data is returning 0 usage for %s. %s'
+                        % (package, power_data))
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller.py
new file mode 100644
index 0000000..dcd2d29
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller.py
@@ -0,0 +1,90 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import logging
+
+from telemetry.internal.platform.power_monitor import android_power_monitor_base
+
+def _ReenableChargingIfNeeded(battery):
+  if not battery.GetCharging():
+    battery.SetCharging(True)
+
+class AndroidPowerMonitorController(
+    android_power_monitor_base.AndroidPowerMonitorBase):
+  """
+  PowerMonitor that acts as facade for a list of PowerMonitor objects and uses
+  the first available one.
+  """
+  def __init__(self, power_monitors, battery):
+    super(AndroidPowerMonitorController, self).__init__()
+    self._candidate_power_monitors = power_monitors
+    self._active_monitors = []
+    self._battery = battery
+    atexit.register(_ReenableChargingIfNeeded, self._battery)
+
+  def CanMonitorPower(self):
+    return any(m.CanMonitorPower() for m in self._candidate_power_monitors)
+
+  def StartMonitoringPower(self, browser):
+    # TODO(rnephew): re-add assert when crbug.com/553601 is solved and
+    # StopMonitoringPower is called in the correct place.
+    if self._active_monitors:
+      logging.warning('StopMonitoringPower() not called when expected. Last '
+                      'results are likely not reported.')
+      self.StopMonitoringPower()
+    self._CheckStart()
+    self._ChargingOff(self._battery)
+    self._active_monitors = (
+        [m for m in self._candidate_power_monitors if m.CanMonitorPower()])
+    assert self._active_monitors, 'No available monitor.'
+    for monitor in self._active_monitors:
+      monitor.StartMonitoringPower(browser)
+
+  @staticmethod
+  def _MergePowerResults(combined_results, monitor_results):
+    """
+    Merges monitor_results into combined_results and leaves monitor_results
+    values if there are merge conflicts.
+    """
+    def _CheckDuplicateKeys(dict_one, dict_two, ignore_list=None):
+      for key in dict_one:
+        if key in dict_two and key not in ignore_list:
+          logging.warning('Found multiple instances of %s in power monitor '
+                          'entries. Using newest one.', key)
+    # Sub level power entries.
+    for part in ['platform_info', 'component_utilization']:
+      if part in monitor_results:
+        _CheckDuplicateKeys(combined_results[part], monitor_results[part])
+        combined_results[part].update(monitor_results[part])
+
+    # Top level power entries.
+    platform_info = combined_results['platform_info'].copy()
+    comp_utilization = combined_results['component_utilization'].copy()
+    _CheckDuplicateKeys(
+        combined_results, monitor_results,
+        ['identifier', 'platform_info', 'component_utilization'])
+    combined_results.update(monitor_results)
+    combined_results['platform_info'] = platform_info
+    combined_results['component_utilization'] = comp_utilization
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    self._ChargingOn(self._battery)
+    try:
+      results = {'platform_info': {}, 'component_utilization': {}}
+      for monitor in self._active_monitors:
+        self._MergePowerResults(results, monitor.StopMonitoringPower())
+      return results
+    finally:
+      self._active_monitors = []
+
+  def _ChargingOff(self, battery):
+    battery.SetCharging(False)
+
+  def _ChargingOn(self, battery):
+    if battery.GetCharging():
+      logging.warning('Charging re-enabled during test.'
+                      'Results may be inaccurate.')
+    battery.SetCharging(True)
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller_unittest.py
new file mode 100644
index 0000000..a77c3cc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_power_monitor_controller_unittest.py
@@ -0,0 +1,89 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform import power_monitor as power_monitor
+from telemetry.internal.platform.power_monitor import (
+  android_power_monitor_controller)
+import mock
+from devil.android import battery_utils
+
+# pylint: disable=import-error, unused-argument
+
+
+class AndroidPowerMonitorControllerTest(unittest.TestCase):
+  @mock.patch.object(battery_utils, 'BatteryUtils')
+  def testComposition(self, _):
+
+    class P1(power_monitor.PowerMonitor):
+      def StartMonitoringPower(self, browser):
+        raise NotImplementedError()
+      def StopMonitoringPower(self):
+        raise NotImplementedError()
+
+    class P2(power_monitor.PowerMonitor):
+      def __init__(self, value):
+        super(P2, self).__init__()
+        self._value = {'P2': value}
+      def CanMonitorPower(self):
+        return True
+      def StartMonitoringPower(self, browser):
+        pass
+      def StopMonitoringPower(self):
+        return self._value
+
+    class P3(power_monitor.PowerMonitor):
+      def __init__(self, value):
+        super(P3, self).__init__()
+        self._value = {'P3': value}
+      def CanMonitorPower(self):
+        return True
+      def StartMonitoringPower(self, browser):
+        pass
+      def StopMonitoringPower(self):
+        return self._value
+
+    battery = battery_utils.BatteryUtils(None)
+    controller = android_power_monitor_controller.AndroidPowerMonitorController(
+        [P1(), P2(1), P3(2)], battery)
+    self.assertEqual(controller.CanMonitorPower(), True)
+    controller.StartMonitoringPower(None)
+    controller_returns = controller.StopMonitoringPower()
+    self.assertEqual(controller_returns['P2'], 1)
+    self.assertEqual(controller_returns['P3'], 2)
+
+  @mock.patch.object(battery_utils, 'BatteryUtils')
+  def testReenableChargingIfNeeded(self, mock_battery):
+    battery = battery_utils.BatteryUtils(None)
+    battery.GetCharging.return_value = False
+    android_power_monitor_controller._ReenableChargingIfNeeded(battery)
+
+  def testMergePowerResultsOneEmpty(self):
+    dict_one = {'platform_info': {}, 'component_utilization': {}}
+    dict_two = {'test': 1, 'component_utilization': {'test': 2}}
+    results = {
+        'platform_info': {},
+        'component_utilization': {'test': 2},
+        'test': 1
+    }
+    (android_power_monitor_controller.AndroidPowerMonitorController.
+     _MergePowerResults(dict_one, dict_two))
+    self.assertDictEqual(dict_one, results)
+
+  def testMergePowerResultsSameEntry(self):
+    dict_one = {
+        'test': 1,
+        'component_utilization': {'test': 2},
+        'platform_info': {'test2': 'a'}
+    }
+    dict_two = {'test': 3, 'platform_info': {'test': 4}}
+    results = {
+        'test' : 3,
+        'component_utilization': {'test': 2},
+        'platform_info': {'test': 4, 'test2': 'a'}
+    }
+    (android_power_monitor_controller.AndroidPowerMonitorController.
+     _MergePowerResults(dict_one, dict_two))
+    self.assertDictEqual(dict_one, results)
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor.py
new file mode 100644
index 0000000..87d7c37
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor.py
@@ -0,0 +1,52 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.internal.platform import power_monitor
+
+try:
+  from devil.android import device_errors  # pylint: disable=import-error
+except ImportError:
+  device_errors = None
+
+
+_TEMPERATURE_FILE = '/sys/class/thermal/thermal_zone0/temp'
+
+
+class AndroidTemperatureMonitor(power_monitor.PowerMonitor):
+  """
+  Returns temperature results in power monitor dictionary format.
+  """
+  def __init__(self, device):
+    super(AndroidTemperatureMonitor, self).__init__()
+    self._device = device
+
+  def CanMonitorPower(self):
+    return self._GetBoardTemperatureCelsius() is not None
+
+  def StartMonitoringPower(self, browser):
+    # don't call _CheckStart() because this is temperature, not power
+    # therefore, StartMonitoringPower and StopMonitoringPower
+    # do not need to be paired
+    pass
+
+  def StopMonitoringPower(self):
+    avg_temp = self._GetBoardTemperatureCelsius()
+    if avg_temp is None:
+      return {'identifier': 'android_temperature_monitor'}
+    else:
+      return {'identifier': 'android_temperature_monitor',
+              'platform_info': {'average_temperature_c': avg_temp}}
+
+  def _GetBoardTemperatureCelsius(self):
+    try:
+      contents = self._device.ReadFile(_TEMPERATURE_FILE)
+      return float(contents) if contents else None
+    except ValueError:
+      logging.warning('String returned from device.ReadFile(_TEMPERATURE_FILE) '
+                      'in invalid format.')
+      return None
+    except device_errors.CommandFailedError:
+      return None
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor_unittest.py
new file mode 100644
index 0000000..882d819
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/android_temperature_monitor_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform.power_monitor import android_temperature_monitor
+from telemetry.testing import simple_mock
+
+_ = simple_mock.DONT_CARE
+
+
+class AndroidTemperatureMonitorTest(unittest.TestCase):
+
+  def testPowerMonitoringResultsWereUpdated(self):
+    mock_device_utils = simple_mock.MockObject()
+    mock_device_utils.ExpectCall('ReadFile', _).WillReturn('0')
+    mock_device_utils.ExpectCall('ReadFile', _).WillReturn('24')
+
+    monitor = android_temperature_monitor.AndroidTemperatureMonitor(
+        mock_device_utils)
+    self.assertTrue(monitor.CanMonitorPower())
+    monitor.StartMonitoringPower(None)
+    measurements = monitor.StopMonitoringPower()
+    expected_return = {
+        'identifier': 'android_temperature_monitor',
+        'platform_info': {'average_temperature_c': 24.0}
+    }
+    self.assertDictEqual(expected_return, measurements)
+
+  def testSysfsReadFailed(self):
+    mock_device_utils = simple_mock.MockObject()
+    mock_device_utils.ExpectCall('ReadFile', _).WillReturn('24')
+    mock_device_utils.ExpectCall('ReadFile', _).WillReturn(None)
+
+    monitor = android_temperature_monitor.AndroidTemperatureMonitor(
+        mock_device_utils)
+    self.assertTrue(monitor.CanMonitorPower())
+    monitor.StartMonitoringPower(None)
+    measurements = monitor.StopMonitoringPower()
+    self.assertTrue('identifier' in measurements)
+    self.assertTrue('platform_info' not in measurements)
+
+  def testSysfsReadFailedCanMonitor(self):
+    mock_device_utils = simple_mock.MockObject()
+    mock_device_utils.ExpectCall('ReadFile', _).WillReturn(None)
+
+    monitor = android_temperature_monitor.AndroidTemperatureMonitor(
+        mock_device_utils)
+    self.assertFalse(monitor.CanMonitorPower())
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor.py
new file mode 100644
index 0000000..7f922bd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor.py
@@ -0,0 +1,164 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import logging
+import re
+
+from telemetry import decorators
+from telemetry.internal.platform.power_monitor import sysfs_power_monitor
+
+
+class CrosPowerMonitor(sysfs_power_monitor.SysfsPowerMonitor):
+  """PowerMonitor that relies on 'dump_power_status' to monitor power
+  consumption of a single ChromeOS application.
+  """
+  def __init__(self, platform_backend):
+    """Constructor.
+
+    Args:
+        platform_backend: A LinuxBasedPlatformBackend object.
+
+    Attributes:
+        _initial_power: The result of 'dump_power_status' before the test.
+        _start_time: The epoch time at which the test starts executing.
+    """
+    super(CrosPowerMonitor, self).__init__(platform_backend)
+    self._initial_power = None
+    self._start_time = None
+
+  @decorators.Cache
+  def CanMonitorPower(self):
+    return super(CrosPowerMonitor, self).CanMonitorPower()
+
+  def StartMonitoringPower(self, browser):
+    super(CrosPowerMonitor, self).StartMonitoringPower(browser)
+    if self._IsOnBatteryPower():
+      sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
+      self._initial_power, self._start_time = CrosPowerMonitor.SplitSample(
+          sample)
+    else:
+      logging.warning('Device not on battery power during power monitoring. '
+                      'Results may be incorrect.')
+
+  def StopMonitoringPower(self):
+    # Don't need to call self._CheckStop here; it's called by the superclass
+    cpu_stats = super(CrosPowerMonitor, self).StopMonitoringPower()
+    power_stats = {}
+    if self._IsOnBatteryPower():
+      sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
+      final_power, end_time = CrosPowerMonitor.SplitSample(sample)
+      # The length of the test is used to measure energy consumption.
+      length_h = (end_time - self._start_time) / 3600.0
+      power_stats = CrosPowerMonitor.ParsePower(self._initial_power,
+                                                final_power, length_h)
+    else:
+      logging.warning('Device not on battery power during power monitoring. '
+                      'Results may be incorrect.')
+    return CrosPowerMonitor.CombineResults(cpu_stats, power_stats)
+
+  @staticmethod
+  def SplitSample(sample):
+    """Splits a power and time sample into the two separate values.
+
+    Args:
+        sample: The result of calling 'dump_power_status; date +%s' on the
+            device.
+
+    Returns:
+        A tuple of power sample and epoch time of the sample.
+    """
+    sample = sample.strip()
+    index = sample.rfind('\n')
+    power = sample[:index]
+    time = sample[index + 1:]
+    return power, int(time)
+
+  @staticmethod
+  def IsOnBatteryPower(status, board):
+    """Determines if the devices is being charged.
+
+    Args:
+        status: The parsed result of 'dump_power_status'
+        board: The name of the board running the test.
+
+    Returns:
+        True if the device is on battery power; False otherwise.
+    """
+    on_battery = status['line_power_connected'] == '0'
+    # Butterfly can incorrectly report AC online for some time after unplug.
+    # Check battery discharge state to confirm.
+    if board == 'butterfly':
+      on_battery |= status['battery_discharging'] == '1'
+    return on_battery
+
+  def _IsOnBatteryPower(self):
+    """Determines if the device is being charged.
+
+    Returns:
+        True if the device is on battery power; False otherwise.
+    """
+    status = CrosPowerMonitor.ParsePowerStatus(
+        self._platform.RunCommand(['dump_power_status']))
+    board_data = self._platform.RunCommand(['cat', '/etc/lsb-release'])
+    board = re.search('BOARD=(.*)', board_data).group(1)
+    return CrosPowerMonitor.IsOnBatteryPower(status, board)
+
+  @staticmethod
+  def ParsePowerStatus(sample):
+    """Parses 'dump_power_status' command output.
+
+    Args:
+        sample: The output of 'dump_power_status'
+
+    Returns:
+        Dictionary containing all fields from 'dump_power_status'
+    """
+    rv = collections.defaultdict(dict)
+    for ln in sample.splitlines():
+      words = ln.split()
+      assert len(words) == 2
+      rv[words[0]] = words[1]
+    return dict(rv)
+
+  @staticmethod
+  def ParsePower(initial_stats, final_stats, length_h):
+    """Parse output of 'dump_power_status'
+
+    Args:
+        initial_stats: The output of 'dump_power_status' before the test.
+        final_stats: The output of 'dump_power_status' after the test.
+        length_h: The length of the test in hours.
+
+    Returns:
+        Dictionary in the format returned by StopMonitoringPower().
+    """
+    initial = CrosPowerMonitor.ParsePowerStatus(initial_stats)
+    final = CrosPowerMonitor.ParsePowerStatus(final_stats)
+    # The charge value reported by 'dump_power_status' is not precise enough to
+    # give meaningful results across shorter tests, so average energy rate and
+    # the length of the test are used.
+    initial_power_mw = float(initial['battery_energy_rate']) * 10 ** 3
+    final_power_mw = float(final['battery_energy_rate']) * 10 ** 3
+    average_power_mw = (initial_power_mw + final_power_mw) / 2.0
+
+    # Duplicating CrOS battery fields where applicable.
+    def CopyFinalState(field, key):
+      """Copy fields from battery final state."""
+      if field in final:
+        battery[key] = float(final[field])
+
+    battery = {}
+    CopyFinalState('battery_charge_full', 'charge_full')
+    CopyFinalState('battery_charge_full_design', 'charge_full_design')
+    CopyFinalState('battery_charge', 'charge_now')
+    CopyFinalState('battery_current', 'current_now')
+    CopyFinalState('battery_energy', 'energy')
+    CopyFinalState('battery_energy_rate', 'energy_rate')
+    CopyFinalState('battery_voltage', 'voltage_now')
+
+    return {'identifier': 'dump_power_status',
+            'power_samples_mw': [initial_power_mw, final_power_mw],
+            'energy_consumption_mwh': average_power_mw * length_h,
+            'component_utilization': {'battery': battery}}
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor_unittest.py
new file mode 100644
index 0000000..2d4945e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor_unittest.py
@@ -0,0 +1,227 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform.power_monitor import cros_power_monitor
+
+
+class CrosPowerMonitorMonitorTest(unittest.TestCase):
+  initial_power = ('''line_power_connected 0
+battery_present 1
+battery_percent 70.20
+battery_charge 2.83
+battery_charge_full 4.03
+battery_charge_full_design 4.03
+battery_current 1.08
+battery_energy 31.83
+battery_energy_rate 12.78
+battery_voltage 11.82
+battery_discharging 1''')
+  final_power = ('''line_power_connected 0
+battery_present 1
+battery_percent 70.20
+battery_charge 2.83
+battery_charge_full 4.03
+battery_charge_full_design 4.03
+battery_current 1.08
+battery_energy 31.83
+battery_energy_rate 12.80
+battery_voltage 12.24
+battery_discharging 1''')
+  incomplete_final_power = ('''line_power_connected 0
+battery_present 1
+battery_percent 70.20
+battery_charge 2.83
+battery_charge_full 4.03
+battery_charge_full_design 4.03
+battery_energy_rate 12.80
+battery_discharging 1''')
+  expected_power = {
+    'energy_consumption_mwh': 2558.0,
+    'power_samples_mw': [12780.0, 12800.0],
+    'component_utilization': {
+      'battery': {
+        'charge_full': 4.03,
+        'charge_full_design': 4.03,
+        'charge_now': 2.83,
+        'current_now': 1.08,
+        'energy': 31.83,
+        'energy_rate': 12.80,
+        'voltage_now': 12.24
+      }
+    }
+  }
+  expected_incomplete_power = {
+    'energy_consumption_mwh': 2558.0,
+    'power_samples_mw': [12780.0, 12800.0],
+    'component_utilization': {
+      'battery': {
+        'charge_full': 4.03,
+        'charge_full_design': 4.03,
+        'charge_now': 2.83,
+        'energy_rate': 12.80,
+      }
+    }
+  }
+  expected_cpu = {
+    'whole_package': {
+      'frequency_percent': {
+        1700000000: 3.29254111574526,
+        1600000000: 0.0,
+        1500000000: 0.0,
+        1400000000: 0.15926805099535601,
+        1300000000: 0.47124116307273645,
+        1200000000: 0.818756100807525,
+        1100000000: 1.099381692400982,
+        1000000000: 2.5942528544384302,
+        900000000: 5.68661122326737,
+        800000000: 3.850545467654628,
+        700000000: 2.409691872245393,
+        600000000: 1.4693702487650486,
+        500000000: 2.4623575553879373,
+        400000000: 2.672038150383057,
+        300000000: 3.415770495015825,
+        200000000: 69.59817400982045
+      },
+      'cstate_residency_percent': {
+        'C0': 83.67623835616438535,
+        'C1': 0.2698609589041096,
+        'C2': 0.2780191780821918,
+        'C3': 15.77588150684931505
+      }
+    },
+    'cpu0': {
+      'frequency_percent': {
+        1700000000: 4.113700564971752,
+        1600000000: 0.0,
+        1500000000: 0.0,
+        1400000000: 0.1765536723163842,
+        1300000000: 0.4943502824858757,
+        1200000000: 0.7944915254237288,
+        1100000000: 1.2226341807909604,
+        1000000000: 3.0632062146892656,
+        900000000: 5.680614406779661,
+        800000000: 3.6679025423728815,
+        700000000: 2.379060734463277,
+        600000000: 1.4124293785310735,
+        500000000: 2.599752824858757,
+        400000000: 3.0102401129943503,
+        300000000: 3.650247175141243,
+        200000000: 67.73481638418079
+      },
+      'cstate_residency_percent': {
+        'C0': 76.76226164383562,
+        'C1': 0.3189164383561644,
+        'C2': 0.4544301369863014,
+        'C3': 22.4643917808219178
+      }
+    },
+    'cpu1': {
+      'frequency_percent': {
+        1700000000: 2.4713816665187682,
+        1600000000: 0.0,
+        1500000000: 0.0,
+        1400000000: 0.1419824296743278,
+        1300000000: 0.44813204365959713,
+        1200000000: 0.8430206761913214,
+        1100000000: 0.9761292040110037,
+        1000000000: 2.1252994941875945,
+        900000000: 5.69260803975508,
+        800000000: 4.033188392936374,
+        700000000: 2.4403230100275093,
+        600000000: 1.526311118999024,
+        500000000: 2.3249622859171177,
+        400000000: 2.3338361877717633,
+        300000000: 3.1812938148904073,
+        200000000: 71.46153163546012
+      },
+      'cstate_residency_percent': {
+        'C0': 90.5902150684931507,
+        'C1': 0.2208054794520548,
+        'C2': 0.1016082191780822,
+        'C3': 9.0873712328767123
+      }
+    }
+  }
+
+  def _assertPowerEqual(self, results, expected):
+    battery = results['component_utilization']['battery']
+    expected_battery = expected['component_utilization']['battery']
+    self.assertItemsEqual(battery.keys(), expected_battery.keys())
+    for value in battery:
+      self.assertAlmostEqual(battery[value], expected_battery[value])
+
+    self.assertAlmostEqual(results['energy_consumption_mwh'],
+                           expected['energy_consumption_mwh'])
+    self.assertAlmostEqual(results['power_samples_mw'][0],
+                           expected['power_samples_mw'][0])
+    self.assertAlmostEqual(results['power_samples_mw'][1],
+                           expected['power_samples_mw'][1])
+
+  def testParsePower(self):
+    results = cros_power_monitor.CrosPowerMonitor.ParsePower(
+        self.initial_power, self.final_power, 0.2)
+    self._assertPowerEqual(results, self.expected_power)
+
+  def testParseIncompletePowerState(self):
+    """Test the case where dump_power_status only outputs partial fields.
+
+    CrosPowerMonitor hard-coded expected fields from dump_power_status,
+    this test ensures it parses successfully when expected fields does not
+    exist. It's mainly for backward compatibility.
+    """
+    results = cros_power_monitor.CrosPowerMonitor.ParsePower(
+        self.initial_power, self.incomplete_final_power, 0.2)
+    self._assertPowerEqual(results, self.expected_incomplete_power)
+
+
+  def testSplitSample(self):
+    sample = self.initial_power + '\n1408739546\n'
+    power, time = cros_power_monitor.CrosPowerMonitor.SplitSample(sample)
+    self.assertEqual(power, self.initial_power)
+    self.assertEqual(time, 1408739546)
+
+  def testCombineResults(self):
+    result = cros_power_monitor.CrosPowerMonitor.CombineResults(
+        self.expected_cpu, self.expected_power)
+    comp_util = result['component_utilization']
+    # Test power values.
+    self.assertEqual(result['energy_consumption_mwh'],
+                     self.expected_power['energy_consumption_mwh'])
+    self.assertEqual(result['power_samples_mw'],
+                     self.expected_power['power_samples_mw'])
+    self.assertEqual(comp_util['battery'],
+                     self.expected_power['component_utilization']['battery'])
+    # Test frequency values.
+    self.assertDictEqual(
+        comp_util['whole_package']['frequency_percent'],
+        self.expected_cpu['whole_package']['frequency_percent'])
+    self.assertDictEqual(
+        comp_util['cpu0']['frequency_percent'],
+        self.expected_cpu['cpu0']['frequency_percent'])
+    self.assertDictEqual(
+        comp_util['cpu1']['frequency_percent'],
+        self.expected_cpu['cpu1']['frequency_percent'])
+    # Test c-state residency values.
+    self.assertDictEqual(
+        comp_util['whole_package']['cstate_residency_percent'],
+        self.expected_cpu['whole_package']['cstate_residency_percent'])
+    self.assertDictEqual(
+        comp_util['cpu0']['cstate_residency_percent'],
+        self.expected_cpu['cpu0']['cstate_residency_percent'])
+    self.assertDictEqual(
+        comp_util['cpu1']['cstate_residency_percent'],
+        self.expected_cpu['cpu1']['cstate_residency_percent'])
+
+  def testCanMonitorPower(self):
+    # TODO(tmandel): Add a test here where the device cannot monitor power.
+    initial_status = cros_power_monitor.CrosPowerMonitor.ParsePowerStatus(
+        self.initial_power)
+    final_status = cros_power_monitor.CrosPowerMonitor.ParsePowerStatus(
+        self.final_power)
+    self.assertTrue(cros_power_monitor.CrosPowerMonitor.IsOnBatteryPower(
+        initial_status, 'peppy'))
+    self.assertTrue(cros_power_monitor.CrosPowerMonitor.IsOnBatteryPower(
+        final_status, 'butterfly'))
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor.py
new file mode 100644
index 0000000..abbd9f4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor.py
@@ -0,0 +1,120 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import multiprocessing
+import tempfile
+import time
+
+from telemetry.core import exceptions
+from telemetry.internal.platform.power_monitor import android_power_monitor_base
+from telemetry.internal.platform.profiler import monsoon
+
+
+def _MonitorPower(device, is_collecting, output):
+  """Monitoring process
+     Args:
+       device: A profiler.monsoon object to collect samples from.
+       is_collecting: the event to synchronize on.
+       output: opened file to write the samples.
+  """
+  with output:
+    samples = []
+    start_time = None
+    end_time = None
+    try:
+      device.StartDataCollection()
+      is_collecting.set()
+      # First sample also calibrate the computation.
+      device.CollectData()
+      start_time = time.time()
+      while is_collecting.is_set():
+        new_data = device.CollectData()
+        assert new_data, 'Unable to collect data from device'
+        samples += new_data
+      end_time = time.time()
+    finally:
+      device.StopDataCollection()
+    result = {
+      'duration_s': end_time - start_time,
+      'samples': samples
+    }
+    json.dump(result, output)
+
+
+class MonsoonPowerMonitor(android_power_monitor_base.AndroidPowerMonitorBase):
+  def __init__(self, _, platform_backend):
+    super(MonsoonPowerMonitor, self).__init__()
+    self._powermonitor_process = None
+    self._powermonitor_output_file = None
+    self._is_collecting = None
+    self._monsoon = None
+    self._platform = platform_backend
+    try:
+      self._monsoon = monsoon.Monsoon(wait=False)
+      # Nominal Li-ion voltage is 3.7V, but it puts out 4.2V at max capacity.
+      # Use 4.0V to simulate a "~80%" charged battery. Google "li-ion voltage
+      # curve". This is true only for a single cell. (Most smartphones, some
+      # tablets.)
+      self._monsoon.SetVoltage(4.0)
+    except EnvironmentError:
+      self._monsoon = None
+
+  def CanMonitorPower(self):
+    return self._monsoon is not None
+
+  def StartMonitoringPower(self, browser):
+    self._CheckStart()
+    self._powermonitor_output_file = tempfile.TemporaryFile()
+    self._is_collecting = multiprocessing.Event()
+    self._powermonitor_process = multiprocessing.Process(
+        target=_MonitorPower,
+        args=(self._monsoon,
+              self._is_collecting,
+              self._powermonitor_output_file))
+    # Ensure child is not left behind: parent kills daemonic children on exit.
+    self._powermonitor_process.daemon = True
+    self._powermonitor_process.start()
+    if not self._is_collecting.wait(timeout=0.5):
+      self._powermonitor_process.terminate()
+      raise exceptions.ProfilingException('Failed to start data collection.')
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    try:
+      # Tell powermonitor to take an immediate sample and join.
+      self._is_collecting.clear()
+      self._powermonitor_process.join()
+      with self._powermonitor_output_file:
+        self._powermonitor_output_file.seek(0)
+        powermonitor_output = self._powermonitor_output_file.read()
+      assert powermonitor_output, 'PowerMonitor produced no output'
+      return MonsoonPowerMonitor.ParseSamplingOutput(powermonitor_output)
+    finally:
+      self._powermonitor_output_file = None
+      self._powermonitor_process = None
+      self._is_collecting = None
+
+  @staticmethod
+  def ParseSamplingOutput(powermonitor_output):
+    """Parse the output of of the samples collector process.
+
+    Returns:
+        Dictionary in the format returned by StopMonitoringPower().
+    """
+    result = json.loads(powermonitor_output)
+    if result['samples']:
+      timedelta_h = (result['duration_s'] / len(result['samples'])) / 3600.0
+      power_samples = [current_a * voltage_v * 10**3
+                       for (current_a, voltage_v) in result['samples']]
+      total_energy_consumption_mwh = sum(power_samples) * timedelta_h
+    else:
+      logging.warning('Sample information not available.')
+      power_samples = []
+      total_energy_consumption_mwh = 0
+
+    return {'identifier':'monsoon',
+            'power_samples_mw':power_samples,
+            'monsoon_energy_consumption_mwh':total_energy_consumption_mwh}
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor_unittest.py
new file mode 100644
index 0000000..f2f35c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/monsoon_power_monitor_unittest.py
@@ -0,0 +1,21 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import unittest
+
+from telemetry.internal.platform.power_monitor import monsoon_power_monitor
+
+
+class MonsoonPowerMonitorTest(unittest.TestCase):
+
+  def testEnergyComsumption(self):
+    data = {
+        'duration_s': 3600.0,
+        'samples': [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0), (4.0, 4.0)]
+    }
+    results = monsoon_power_monitor.MonsoonPowerMonitor.ParseSamplingOutput(
+        json.dumps(data))
+    self.assertEqual(results['power_samples_mw'], [1000, 4000, 9000, 16000])
+    self.assertEqual(results['monsoon_energy_consumption_mwh'], 7500)
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor.py
new file mode 100644
index 0000000..5cf3d16
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor.py
@@ -0,0 +1,143 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import platform
+import re
+
+from telemetry import decorators
+from telemetry.internal.platform import power_monitor
+
+
+MSR_RAPL_POWER_UNIT = 0x606
+MSR_PKG_ENERGY_STATUS = 0x611  # Whole package
+MSR_PP0_ENERGY_STATUS = 0x639  # Core
+MSR_PP1_ENERGY_STATUS = 0x641  # Uncore
+MSR_DRAM_ENERGY_STATUS = 0x619
+IA32_PACKAGE_THERM_STATUS = 0x1b1
+IA32_TEMPERATURE_TARGET = 0x1a2
+
+
+def _JoulesToMilliwattHours(value_joules):
+  return value_joules * 1000 / 3600.
+
+
+def _IsSandyBridgeOrLater(vendor, family, model):
+  # Model numbers from:
+  # https://software.intel.com/en-us/articles/intel-architecture-and-processor-identification-with-cpuid-model-and-family-numbers
+  # http://www.speedtraq.com
+  return ('Intel' in vendor and family == 6 and
+          (model in (0x2A, 0x2D) or model >= 0x30))
+
+
+class MsrPowerMonitor(power_monitor.PowerMonitor):
+  def __init__(self, backend):
+    super(MsrPowerMonitor, self).__init__()
+    self._backend = backend
+    self._start_energy_j = None
+    self._start_temp_c = None
+
+  def CanMonitorPower(self):
+    raise NotImplementedError()
+
+  def StartMonitoringPower(self, browser):
+    self._CheckStart()
+    self._start_energy_j = self._PackageEnergyJoules()
+    self._start_temp_c = self._TemperatureCelsius()
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    energy_consumption_j = self._PackageEnergyJoules() - self._start_energy_j
+    average_temp_c = (self._TemperatureCelsius() + self._start_temp_c) / 2.
+    if energy_consumption_j < 0:  # Correct overflow.
+      # The energy portion of the MSR is 4 bytes.
+      energy_consumption_j += 2 ** 32 * self._EnergyMultiplier()
+
+    self._start_energy_j = None
+    self._start_temp_c = None
+
+    return {
+        'identifier': 'msr',
+        'energy_consumption_mwh': _JoulesToMilliwattHours(energy_consumption_j),
+        'platform_info': {
+            'average_temperature_c': average_temp_c,
+        },
+    }
+
+  @decorators.Cache
+  def _EnergyMultiplier(self):
+    return 0.5 ** self._backend.ReadMsr(MSR_RAPL_POWER_UNIT, 8, 5)
+
+  def _PackageEnergyJoules(self):
+    return (self._backend.ReadMsr(MSR_PKG_ENERGY_STATUS, 0, 32) *
+            self._EnergyMultiplier())
+
+  def _TemperatureCelsius(self):
+    tcc_activation_temp = self._backend.ReadMsr(IA32_TEMPERATURE_TARGET, 16, 7)
+    if tcc_activation_temp <= 0:
+      tcc_activation_temp = 105
+    package_temp_headroom = self._backend.ReadMsr(
+        IA32_PACKAGE_THERM_STATUS, 16, 7)
+    return tcc_activation_temp - package_temp_headroom
+
+  def _CheckMSRs(self):
+    try:
+      if self._PackageEnergyJoules() <= 0:
+        logging.info('Cannot monitor power: no energy readings.')
+        return False
+
+      if self._TemperatureCelsius() <= 0:
+        logging.info('Cannot monitor power: no temperature readings.')
+        return False
+    except OSError as e:
+      logging.info('Cannot monitor power: %s' % e)
+      return False
+    return True
+
+
+class MsrPowerMonitorLinux(MsrPowerMonitor):
+  def CanMonitorPower(self):
+    vendor = None
+    family = None
+    model = None
+    cpuinfo = open('/proc/cpuinfo').read().splitlines()
+    for line in cpuinfo:
+      if vendor and family and model:
+        break
+      if line.startswith('vendor_id'):
+        vendor = line.split('\t')[1]
+      elif line.startswith('cpu family'):
+        family = int(line.split(' ')[2])
+      elif line.startswith('model\t\t'):
+        model = int(line.split(' ')[1])
+    if not _IsSandyBridgeOrLater(vendor, family, model):
+      logging.info('Cannot monitor power: pre-Sandy Bridge CPU.')
+      return False
+
+    if not self._CheckMSRs():
+      logging.info('Try running tools/telemetry/build/linux_setup_msr.py.')
+      return False
+
+    return True
+
+
+class MsrPowerMonitorWin(MsrPowerMonitor):
+  def CanMonitorPower(self):
+    family, model = map(int, re.match('.+ Family ([0-9]+) Model ([0-9]+)',
+                        platform.processor()).groups())
+    if not _IsSandyBridgeOrLater(platform.processor(), family, model):
+      logging.info('Cannot monitor power: pre-Sandy Bridge CPU.')
+      return False
+
+    try:
+      return self._CheckMSRs()
+    finally:
+      # Since _CheckMSRs() starts the MSR server on win platform, we must close
+      # it after checking to avoid leaking msr server process.
+      self._backend.CloseMsrServer()
+
+  def StopMonitoringPower(self):
+    power_statistics = super(MsrPowerMonitorWin, self).StopMonitoringPower()
+    self._backend.CloseMsrServer()
+    return power_statistics
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor_unittest.py
new file mode 100644
index 0000000..5734308
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor_unittest.py
@@ -0,0 +1,29 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.platform.power_monitor import msr_power_monitor
+from telemetry.internal.platform import win_platform_backend
+
+
+class MsrPowerMonitorTest(unittest.TestCase):
+  @decorators.Enabled('xp', 'win7', 'win8')  # http://crbug.com/479337
+  def testMsrRuns(self):
+    platform_backend = win_platform_backend.WinPlatformBackend()
+    power_monitor = msr_power_monitor.MsrPowerMonitorWin(platform_backend)
+    if not power_monitor.CanMonitorPower():
+      logging.warning('Test not supported on this platform.')
+      return
+
+    power_monitor.StartMonitoringPower(None)
+    time.sleep(0.01)
+    statistics = power_monitor.StopMonitoringPower()
+
+    self.assertEqual(statistics['identifier'], 'msr')
+    self.assertIn('energy_consumption_mwh', statistics)
+    self.assertGreater(statistics['energy_consumption_mwh'], 0)
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/pm_mock.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/pm_mock.py
new file mode 100644
index 0000000..d9bbab5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/pm_mock.py
@@ -0,0 +1,61 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class MockBrowserBackend(object):
+  def __init__(self, package):
+    self.package = package
+
+class MockBrowser(object):
+  def __init__(self, package):
+    self._browser_backend = MockBrowserBackend(package)
+
+class MockBattery(object):
+  def __init__(self,
+               power_results,
+               starts_charging=True,
+               voltage=4.0,
+               fuelgauge=None):
+    # voltage in millivolts
+    self._power_results = power_results
+    self._charging = starts_charging
+    self._voltage = voltage
+    self._fuelgauge = fuelgauge if fuelgauge else []
+    self._fuel_idx = 0
+
+  def SupportsFuelGauge(self):
+    return len(self._fuelgauge) >= 0
+
+  def GetFuelGaugeChargeCounter(self):
+    try:
+      x = self._fuelgauge[self._fuel_idx]
+      self._fuel_idx += 1
+      return x
+    except IndexError:
+      assert False, "Too many GetFuelGaugeChargeCounter() calls."
+
+  def GetCharging(self):
+    return self._charging
+
+  def SetCharging(self, charging):
+    if charging:
+      assert not self._charging, "Mock battery already charging."
+      self._charging = True
+    else:
+      assert self._charging, "Mock battery already not charging."
+      self._charging = False
+
+  def GetPowerData(self):
+    return self._power_results
+
+  def GetBatteryInfo(self):
+    # the voltage returned by GetBatteryInfo() is in millivolts
+    return {'voltage': int(self._voltage*1000)}
+
+class MockPlatformBackend(object):
+  def __init__(self, command_dict=None):
+    self._cdict = (command_dict if command_dict else {})
+
+  def RunCommand(self, command):
+    assert command in self._cdict, "Mock platform error: Unexpected command."
+    return self._cdict[command]
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py
new file mode 100644
index 0000000..aa86876
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py
@@ -0,0 +1,281 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import logging
+import os
+import plistlib
+import shutil
+import tempfile
+import xml.parsers.expat
+
+from telemetry.core import os_version
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.platform import power_monitor
+
+
+# TODO: rename this class (seems like this is used by mac)
+class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
+
+  def __init__(self, backend):
+    super(PowerMetricsPowerMonitor, self).__init__()
+    self._powermetrics_process = None
+    self._backend = backend
+    self._output_filename = None
+    self._output_directory = None
+
+  @property
+  def binary_path(self):
+    return '/usr/bin/powermetrics'
+
+  def StartMonitoringPower(self, browser):
+    self._CheckStart()
+    # Empirically powermetrics creates an empty output file immediately upon
+    # starting.  We detect file creation as a signal that measurement has
+    # started.  In order to avoid various race conditions in tempfile creation
+    # we create a temp directory and have powermetrics create it's output
+    # there rather than say, creating a tempfile, deleting it and reusing its
+    # name.
+    self._output_directory = tempfile.mkdtemp()
+    self._output_filename = os.path.join(self._output_directory,
+                                         'powermetrics.output')
+    args = ['-f', 'plist',
+            '-u', self._output_filename,
+            '-i0',
+            '--show-usage-summary']
+    self._powermetrics_process = self._backend.LaunchApplication(
+        self.binary_path, args, elevate_privilege=True)
+
+    # Block until output file is written to ensure this function call is
+    # synchronous in respect to powermetrics starting.
+    def _OutputFileExists():
+      return os.path.isfile(self._output_filename)
+    util.WaitFor(_OutputFileExists, 1)
+
+  @decorators.Cache
+  def CanMonitorPower(self):
+    mavericks_or_later = (
+        self._backend.GetOSVersionName() >= os_version.MAVERICKS)
+    binary_path = self.binary_path
+    return mavericks_or_later and self._backend.CanLaunchApplication(
+        binary_path)
+
+  @staticmethod
+  def _ParsePlistString(plist_string):
+    """Wrapper to parse a plist from a string and catch any errors.
+
+    Sometimes powermetrics will exit in the middle of writing it's output,
+    empirically it seems that it always writes at least one sample in it's
+    entirety so we can safely ignore any errors in it's output.
+
+    Returns:
+        Parser output on successful parse, None on parse error.
+    """
+    try:
+      return plistlib.readPlistFromString(plist_string)
+    except xml.parsers.expat.ExpatError:
+      return None
+
+  @staticmethod
+  def ParsePowerMetricsOutput(powermetrics_output):
+    """Parse output of powermetrics command line utility.
+
+    Returns:
+        Dictionary in the format returned by StopMonitoringPower() or None
+        if |powermetrics_output| is empty - crbug.com/353250 .
+    """
+    if len(powermetrics_output) == 0:
+      logging.warning('powermetrics produced zero length output')
+      return {}
+
+    # Container to collect samples for running averages.
+    # out_path - list containing the key path in the output dictionary.
+    # src_path - list containing the key path to get the data from in
+    #    powermetrics' output.
+    def ConstructMetric(out_path, src_path):
+      RunningAverage = collections.namedtuple('RunningAverage', [
+          'out_path', 'src_path', 'samples'])
+      return RunningAverage(out_path, src_path, [])
+
+    # List of RunningAverage objects specifying metrics we want to aggregate.
+    metrics = [
+        ConstructMetric(
+            ['platform_info', 'average_frequency_hz'],
+            ['processor', 'freq_hz']),
+        ConstructMetric(
+            ['platform_info', 'idle_percent'],
+            ['processor', 'packages', 0, 'c_state_ratio'])]
+
+    def DataWithMetricKeyPath(metric, powermetrics_output):
+      """Retrieve the sample from powermetrics' output for a given metric.
+
+      Args:
+          metric: The RunningAverage object we want to collect a new sample for.
+          powermetrics_output: Dictionary containing powermetrics output.
+
+      Returns:
+          The sample corresponding to |metric|'s keypath."""
+      # Get actual data corresponding to key path.
+      out_data = powermetrics_output
+      for k in metric.src_path:
+        out_data = out_data[k]
+
+      assert type(out_data) in [int, float], (
+          'Was expecting a number: %s (%s)' % (type(out_data), out_data))
+      return float(out_data)
+
+    sample_durations = []
+    total_energy_consumption_mwh = 0
+    # powermetrics outputs multiple plists separated by null terminators.
+    raw_plists = powermetrics_output.split('\0')
+    raw_plists = [x for x in raw_plists if len(x) > 0]
+    assert len(raw_plists) == 1
+
+    # -------- Examine contents of first plist for systems specs. --------
+    plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
+    if not plist:
+      logging.warning('powermetrics produced invalid output, output length: '
+                      '%d', len(powermetrics_output))
+      return {}
+
+    # Powermetrics doesn't record power usage when running on a VM.
+    hw_model = plist.get('hw_model')
+    if hw_model and hw_model.startswith('VMware'):
+      return {}
+
+    if 'GPU' in plist:
+      metrics.extend([
+          ConstructMetric(
+              ['component_utilization', 'gpu', 'average_frequency_hz'],
+              ['GPU', 0, 'freq_hz']),
+          ConstructMetric(
+              ['component_utilization', 'gpu', 'idle_percent'],
+              ['GPU', 0, 'c_state_ratio'])])
+
+    # There's no way of knowing ahead of time how many cpus and packages the
+    # current system has. Iterate over cores and cpus - construct metrics for
+    # each one.
+    if 'processor' in plist:
+      core_dict = plist['processor']['packages'][0]['cores']
+      num_cores = len(core_dict)
+      cpu_num = 0
+      for core_idx in xrange(num_cores):
+        num_cpus = len(core_dict[core_idx]['cpus'])
+        base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
+        for cpu_idx in xrange(num_cpus):
+          base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
+          # C State ratio is per-package, component CPUs of that package may
+          # have different frequencies.
+          metrics.append(ConstructMetric(
+              base_out_path + ['average_frequency_hz'],
+              base_src_path + ['cpus', cpu_idx, 'freq_hz']))
+          metrics.append(ConstructMetric(
+              base_out_path + ['idle_percent'],
+              base_src_path + ['c_state_ratio']))
+          cpu_num += 1
+
+    # -------- Parse Data Out of Plists --------
+    plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
+    if not plist:
+      logging.error('Error parsing plist.')
+      return {}
+
+    # Duration of this sample.
+    sample_duration_ms = int(plist['elapsed_ns']) / 10 ** 6
+    sample_durations.append(sample_duration_ms)
+
+    if 'processor' not in plist:
+      logging.error("'processor' field not found in plist.")
+      return {}
+    processor = plist['processor']
+
+    total_energy_consumption_mwh = (
+        (float(processor.get('package_joules', 0)) / 3600.) * 10 ** 3)
+
+    for m in metrics:
+      try:
+        m.samples.append(DataWithMetricKeyPath(m, plist))
+      except KeyError:
+        # Old CPUs don't have c-states, so if data is missing, just ignore it.
+        logging.info('Field missing from powermetrics output: %s', m.src_path)
+        continue
+
+    # -------- Collect and Process Data --------
+    out_dict = {}
+    out_dict['identifier'] = 'powermetrics'
+    out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
+
+    def StoreMetricAverage(metric, sample_durations, out):
+      """Calculate average value of samples in a metric and store in output
+         path as specified by metric.
+
+      Args:
+          metric: A RunningAverage object containing samples to average.
+          sample_durations: A list which parallels the samples list containing
+              the time slice for each sample.
+          out: The output dicat, average is stored in the location specified by
+              metric.out_path.
+      """
+      if len(metric.samples) == 0:
+        return
+
+      assert len(metric.samples) == len(sample_durations)
+      avg = 0
+      for i in xrange(len(metric.samples)):
+        avg += metric.samples[i] * sample_durations[i]
+      avg /= sum(sample_durations)
+
+      # Store data in output, creating empty dictionaries as we go.
+      for k in metric.out_path[:-1]:
+        if not out.has_key(k):
+          out[k] = {}
+        out = out[k]
+      out[metric.out_path[-1]] = avg
+
+    for m in metrics:
+      StoreMetricAverage(m, sample_durations, out_dict)
+    return out_dict
+
+  def _KillPowerMetricsProcess(self):
+    """Kill a running powermetrics process."""
+    try:
+      if self._powermetrics_process.poll() is None:
+        self._powermetrics_process.terminate()
+    except OSError as e:
+      logging.warning(
+          'Error when trying to terminate powermetric process: %s', repr(e))
+      if self._powermetrics_process.poll() is None:
+        # terminate() can fail when Powermetrics does not have the SetUID set.
+        self._backend.LaunchApplication(
+          '/usr/bin/pkill',
+          ['-SIGTERM', os.path.basename(self.binary_path)],
+          elevate_privilege=True)
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    # Tell powermetrics to take an immediate sample.
+    try:
+      self._KillPowerMetricsProcess()
+      (power_stdout, power_stderr) = self._powermetrics_process.communicate()
+      returncode = self._powermetrics_process.returncode
+      assert returncode in [0, -15], (
+          """powermetrics error
+          return code=%d
+          stdout=(%s)
+          stderr=(%s)""" % (returncode, power_stdout, power_stderr))
+
+      with open(self._output_filename, 'rb') as output_file:
+        powermetrics_output = output_file.read()
+      return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
+          powermetrics_output)
+    except Exception as e:
+      logging.warning(
+          'Error when trying to collect power monitoring data: %s', repr(e))
+      return PowerMetricsPowerMonitor.ParsePowerMetricsOutput('')
+    finally:
+      shutil.rmtree(self._output_directory)
+      self._output_directory = None
+      self._output_filename = None
+      self._powermetrics_process = None
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor_unittest.py
new file mode 100644
index 0000000..1c5a65c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor_unittest.py
@@ -0,0 +1,76 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import unittest
+
+from telemetry.core import os_version
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.platform import mac_platform_backend
+from telemetry.internal.platform.power_monitor import powermetrics_power_monitor
+
+
+def _parsePowerMetricsDataFromTestFile(output_file):
+  test_data_path = os.path.join(util.GetUnittestDataDir(), output_file)
+  with open(test_data_path, 'r') as f:
+    process_output = f.read()
+  return (powermetrics_power_monitor.PowerMetricsPowerMonitor.
+      ParsePowerMetricsOutput(process_output))
+
+
+class PowerMetricsPowerMonitorTest(unittest.TestCase):
+  @decorators.Enabled('mac')
+  def testCanMonitorPowerUsage(self):
+    backend = mac_platform_backend.MacPlatformBackend()
+    power_monitor = powermetrics_power_monitor.PowerMetricsPowerMonitor(backend)
+    mavericks_or_later = (
+        backend.GetOSVersionName() >= os_version.MAVERICKS)
+    # Should always be able to monitor power usage on OS Version >= 10.9 .
+    self.assertEqual(power_monitor.CanMonitorPower(), mavericks_or_later,
+        "Error checking powermetrics availability: '%s'" % '|'.join(os.uname()))
+
+  @decorators.Enabled('mac')
+  def testParseEmptyPowerMetricsOutput(self):
+    # Important to handle zero length powermetrics outout - crbug.com/353250 .
+    self.assertFalse(powermetrics_power_monitor.PowerMetricsPowerMonitor.
+        ParsePowerMetricsOutput(''))
+
+  @decorators.Enabled('mac')
+  def testParsePowerMetricsOutputFromVM(self):
+    # Don't fail when running on VM - crbug.com/423688.
+    self.assertEquals({},
+        _parsePowerMetricsDataFromTestFile('powermetrics_vmware.output'))
+
+  @decorators.Enabled('mac')
+  def testParsePowerMetricsOutput(self):
+    power_monitor = powermetrics_power_monitor.PowerMetricsPowerMonitor(
+        mac_platform_backend.MacPlatformBackend())
+    if not power_monitor.CanMonitorPower():
+      logging.warning('Test not supported on this platform.')
+      return
+
+    # Not supported on Mac at this time.
+    self.assertFalse(power_monitor.CanMeasurePerApplicationPower())
+
+    # Supported hardware reports power samples and energy consumption.
+    result = _parsePowerMetricsDataFromTestFile('powermetrics_output.output')
+
+    self.assertTrue(result['energy_consumption_mwh'] > 0)
+
+    # Verify platform info exists in output.
+    self.assertTrue(result['platform_info']['average_frequency_hz'] > 0)
+    self.assertTrue(result['platform_info']['idle_percent'] > 0)
+
+    # Verify that all component entries exist in output.
+    component_utilization = result['component_utilization']
+    for k in ['gpu'] + ['cpu%d' % x for x in range(8)]:
+      self.assertTrue(component_utilization[k]['average_frequency_hz'] > 0)
+      self.assertTrue(component_utilization[k]['idle_percent'] > 0)
+
+    # Unsupported hardware doesn't.
+    result = _parsePowerMetricsDataFromTestFile(
+        'powermetrics_output_unsupported_hardware.output')
+    self.assertNotIn('energy_consumption_mwh', result)
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor.py
new file mode 100644
index 0000000..85e5d30
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor.py
@@ -0,0 +1,230 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import logging
+import os
+import re
+
+from telemetry.internal.platform import power_monitor
+from telemetry import decorators
+
+
+CPU_PATH = '/sys/devices/system/cpu/'
+
+
+class SysfsPowerMonitor(power_monitor.PowerMonitor):
+  """PowerMonitor that relies on sysfs to monitor CPU statistics on several
+  different platforms.
+  """
+  # TODO(rnephew): crbug.com/513453
+  # Convert all platforms to use standalone power monitors.
+  def __init__(self, linux_based_platform_backend, standalone=False):
+    """Constructor.
+
+    Args:
+        linux_based_platform_backend: A LinuxBasedPlatformBackend object.
+        standalone: If it is not wrapping another monitor, set to True.
+
+    Attributes:
+        _cpus: A list of the CPUs on the target device.
+        _end_time: The time the test stopped monitoring power.
+        _final_cstate: The c-state residency times after the test.
+        _final_freq: The CPU frequency times after the test.
+        _initial_cstate: The c-state residency times before the test.
+        _initial_freq: The CPU frequency times before the test.
+        _platform: A LinuxBasedPlatformBackend object associated with the
+            target platform.
+        _start_time: The time the test started monitoring power.
+    """
+    super(SysfsPowerMonitor, self).__init__()
+    self._cpus = None
+    self._final_cstate = None
+    self._final_freq = None
+    self._initial_cstate = None
+    self._initial_freq = None
+    self._platform = linux_based_platform_backend
+    self._standalone = standalone
+
+  @decorators.Cache
+  def CanMonitorPower(self):
+    return bool(self._platform.RunCommand(
+        'if [ -e %s ]; then echo true; fi' % CPU_PATH))
+
+  def StartMonitoringPower(self, browser):
+    del browser  # unused
+    self._CheckStart()
+    if self.CanMonitorPower():
+      self._cpus = filter(  # pylint: disable=deprecated-lambda
+          lambda x: re.match(r'^cpu[0-9]+', x),
+          self._platform.RunCommand('ls %s' % CPU_PATH).split())
+      self._initial_freq = self.GetCpuFreq()
+      self._initial_cstate = self.GetCpuState()
+
+  def StopMonitoringPower(self):
+    self._CheckStop()
+    try:
+      out = {}
+      if SysfsPowerMonitor.CanMonitorPower(self):
+        self._final_freq = self.GetCpuFreq()
+        self._final_cstate = self.GetCpuState()
+        frequencies = SysfsPowerMonitor.ComputeCpuStats(
+            SysfsPowerMonitor.ParseFreqSample(self._initial_freq),
+            SysfsPowerMonitor.ParseFreqSample(self._final_freq))
+        cstates = SysfsPowerMonitor.ComputeCpuStats(
+            self._platform.ParseCStateSample(self._initial_cstate),
+            self._platform.ParseCStateSample(self._final_cstate))
+        for cpu in frequencies:
+          out[cpu] = {'frequency_percent': frequencies.get(cpu)}
+          out[cpu] = {'cstate_residency_percent': cstates.get(cpu)}
+      if self._standalone:
+        return self.CombineResults(out, {})
+      return out
+    finally:
+      self._initial_cstate = None
+      self._initial_freq = None
+
+  def GetCpuState(self):
+    """Retrieve CPU c-state residency times from the device.
+
+    Returns:
+        Dictionary containing c-state residency times for each CPU.
+    """
+    stats = {}
+    for cpu in self._cpus:
+      cpu_idle_path = os.path.join(CPU_PATH, cpu, 'cpuidle')
+      if not self._platform.PathExists(cpu_idle_path):
+        logging.warning(
+            'Cannot read cpu c-state residency times for %s due to %s not exist'
+            % (cpu, cpu_idle_path))
+        continue
+      cpu_state_path = os.path.join(cpu_idle_path, 'state*')
+      output = self._platform.RunCommand(
+          'cat %s %s %s; date +%%s' % (
+              os.path.join(cpu_state_path, 'name'),
+              os.path.join(cpu_state_path, 'time'),
+              os.path.join(cpu_state_path, 'latency')))
+      stats[cpu] = re.sub('\n\n+', '\n', output)
+    return stats
+
+  def GetCpuFreq(self):
+    """Retrieve CPU frequency times from the device.
+
+    Returns:
+        Dictionary containing frequency times for each CPU.
+    """
+    stats = {}
+    for cpu in self._cpus:
+      cpu_freq_path = os.path.join(
+          CPU_PATH, cpu, 'cpufreq/stats/time_in_state')
+      if not self._platform.PathExists(cpu_freq_path):
+        logging.warning(
+            'Cannot read cpu frequency times for %s due to %s not existing'
+            % (cpu, cpu_freq_path))
+        stats[cpu] = None
+        continue
+      try:
+        stats[cpu] = self._platform.GetFileContents(cpu_freq_path)
+      except Exception as e:
+        logging.warning(
+            'Cannot read cpu frequency times in %s due to error: %s' %
+            (cpu_freq_path, e.message))
+        stats[cpu] = None
+    return stats
+
+  @staticmethod
+  def ParseFreqSample(sample):
+    """Parse a single frequency sample.
+
+    Args:
+        sample: The single sample of frequency data to be parsed.
+
+    Returns:
+        A dictionary associating a frequency with a time.
+    """
+    sample_stats = {}
+    for cpu in sample:
+      frequencies = {}
+      if sample[cpu] is None:
+        sample_stats[cpu] = None
+        continue
+      for line in sample[cpu].splitlines():
+        pair = line.split()
+        freq = int(pair[0]) * 10 ** 3
+        timeunits = int(pair[1])
+        if freq in frequencies:
+          frequencies[freq] += timeunits
+        else:
+          frequencies[freq] = timeunits
+      sample_stats[cpu] = frequencies
+    return sample_stats
+
+  @staticmethod
+  def ComputeCpuStats(initial, final):
+    """Parse the CPU c-state and frequency values saved during monitoring.
+
+    Args:
+        initial: The parsed dictionary of initial statistics to be converted
+        into percentages.
+        final: The parsed dictionary of final statistics to be converted
+        into percentages.
+
+    Returns:
+        Dictionary containing percentages for each CPU as well as an average
+        across all CPUs.
+    """
+    cpu_stats = {}
+    # Each core might have different states or frequencies, so keep track of
+    # the total time in a state or frequency and how many cores report a time.
+    cumulative_times = collections.defaultdict(lambda: (0, 0))
+    for cpu in initial:
+      current_cpu = {}
+      total = 0
+      if not initial[cpu] or not final[cpu]:
+        cpu_stats[cpu] = collections.defaultdict(int)
+        continue
+      for state in initial[cpu]:
+        current_cpu[state] = final[cpu][state] - initial[cpu][state]
+        total += current_cpu[state]
+      if total == 0:
+        # Somehow it's possible for initial and final to have the same sum,
+        # but a different distribution, making total == 0. crbug.com/426430
+        cpu_stats[cpu] = collections.defaultdict(int)
+        continue
+      for state in current_cpu:
+        current_cpu[state] /= (float(total) / 100.0)
+        # Calculate the average c-state residency across all CPUs.
+        time, count = cumulative_times[state]
+        cumulative_times[state] = (time + current_cpu[state], count + 1)
+      cpu_stats[cpu] = current_cpu
+    average = {}
+    for state in cumulative_times:
+      time, count = cumulative_times[state]
+      average[state] = time / float(count)
+    cpu_stats['platform_info'] = average
+    return cpu_stats
+
+  @staticmethod
+  def CombineResults(cpu_stats, power_stats):
+    """Add frequency and c-state residency data to the power data.
+
+    Args:
+        cpu_stats: Dictionary containing CPU statistics.
+        power_stats: Dictionary containing power statistics.
+
+    Returns:
+        Dictionary in the format returned by StopMonitoringPower.
+    """
+    if not cpu_stats:
+      return power_stats
+    if 'component_utilization' not in power_stats:
+      power_stats['component_utilization'] = {}
+    if 'platform_info' in cpu_stats:
+      if 'platform_info' not in power_stats:
+        power_stats['platform_info'] = {}
+      power_stats['platform_info'].update(cpu_stats['platform_info'])
+      del cpu_stats['platform_info']
+    for cpu in cpu_stats:
+      power_stats['component_utilization'][cpu] = cpu_stats[cpu]
+    return power_stats
diff --git a/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor_unittest.py b/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor_unittest.py
new file mode 100644
index 0000000..4389590
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor_unittest.py
@@ -0,0 +1,285 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.platform import android_platform_backend
+from telemetry.internal.platform.power_monitor import sysfs_power_monitor
+
+
+class SysfsPowerMonitorMonitorTest(unittest.TestCase):
+  initial_freq = {
+    'cpu0': '1700000 6227\n1600000 0\n1500000 0\n1400000 28\n1300000 22\n'
+            '1200000 14\n1100000 19\n1000000 22\n900000 14\n800000 20\n'
+            '700000 15\n600000 23\n500000 23\n400000 9\n300000 28\n200000 179',
+    'cpu1': '1700000 11491\n1600000 0\n1500000 0\n1400000 248\n1300000 1166\n'
+            '1200000 2082\n1100000 2943\n1000000 6560\n900000 12517\n'
+            '800000 8690\n700000 5105\n600000 3800\n500000 5131\n400000 5479\n'
+            '300000 7571\n200000 133618',
+    'cpu2': '1700000 1131',
+    'cpu3': '1700000 1131'
+  }
+  final_freq = {
+    'cpu0': '1700000 7159\n1600000 0\n1500000 0\n1400000 68\n1300000 134\n'
+            '1200000 194\n1100000 296\n1000000 716\n900000 1301\n800000 851\n'
+            '700000 554\n600000 343\n500000 612\n400000 691\n300000 855\n'
+            '200000 15525',
+    'cpu1': '1700000 12048\n1600000 0\n1500000 0\n1400000 280\n1300000 1267\n'
+            '1200000 2272\n1100000 3163\n1000000 7039\n900000 13800\n'
+            '800000 9599\n700000 5655\n600000 4144\n500000 5655\n400000 6005\n'
+            '300000 8288\n200000 149724',
+    'cpu2': None,
+    'cpu3': ''
+  }
+  expected_initial_freq = {
+    'cpu0': {
+      1700000000: 6227,
+      1600000000: 0,
+      1500000000: 0,
+      1400000000: 28,
+      1300000000: 22,
+      1200000000: 14,
+      1100000000: 19,
+      1000000000: 22,
+      900000000: 14,
+      800000000: 20,
+      700000000: 15,
+      600000000: 23,
+      500000000: 23,
+      400000000: 9,
+      300000000: 28,
+      200000000: 179
+    },
+    'cpu1': {
+      1700000000: 11491,
+      1600000000: 0,
+      1500000000: 0,
+      1400000000: 248,
+      1300000000: 1166,
+      1200000000: 2082,
+      1100000000: 2943,
+      1000000000: 6560,
+      900000000: 12517,
+      800000000: 8690,
+      700000000: 5105,
+      600000000: 3800,
+      500000000: 5131,
+      400000000: 5479,
+      300000000: 7571,
+      200000000: 133618
+    },
+    'cpu2': {
+      1700000000: 1131
+    },
+    'cpu3': {
+      1700000000: 1131
+    }
+  }
+  expected_final_freq = {
+    'cpu0': {
+      1700000000: 7159,
+      1600000000: 0,
+      1500000000: 0,
+      1400000000: 68,
+      1300000000: 134,
+      1200000000: 194,
+      1100000000: 296,
+      1000000000: 716,
+      900000000: 1301,
+      800000000: 851,
+      700000000: 554,
+      600000000: 343,
+      500000000: 612,
+      400000000: 691,
+      300000000: 855,
+      200000000: 15525
+    },
+    'cpu1': {
+      1700000000: 12048,
+      1600000000: 0,
+      1500000000: 0,
+      1400000000: 280,
+      1300000000: 1267,
+      1200000000: 2272,
+      1100000000: 3163,
+      1000000000: 7039,
+      900000000: 13800,
+      800000000: 9599,
+      700000000: 5655,
+      600000000: 4144,
+      500000000: 5655,
+      400000000: 6005,
+      300000000: 8288,
+      200000000: 149724
+    },
+    'cpu2': None,
+    'cpu3': {}
+  }
+  expected_freq_percents = {
+    'platform_info': {
+      1700000000: 3.29254111574526,
+      1600000000: 0.0,
+      1500000000: 0.0,
+      1400000000: 0.15926805099535601,
+      1300000000: 0.47124116307273645,
+      1200000000: 0.818756100807525,
+      1100000000: 1.099381692400982,
+      1000000000: 2.5942528544384302,
+      900000000: 5.68661122326737,
+      800000000: 3.850545467654628,
+      700000000: 2.409691872245393,
+      600000000: 1.4693702487650486,
+      500000000: 2.4623575553879373,
+      400000000: 2.672038150383057,
+      300000000: 3.415770495015825,
+      200000000: 69.59817400982045
+    },
+    'cpu0': {
+      1700000000: 4.113700564971752,
+      1600000000: 0.0,
+      1500000000: 0.0,
+      1400000000: 0.1765536723163842,
+      1300000000: 0.4943502824858757,
+      1200000000: 0.7944915254237288,
+      1100000000: 1.2226341807909604,
+      1000000000: 3.0632062146892656,
+      900000000: 5.680614406779661,
+      800000000: 3.6679025423728815,
+      700000000: 2.379060734463277,
+      600000000: 1.4124293785310735,
+      500000000: 2.599752824858757,
+      400000000: 3.0102401129943503,
+      300000000: 3.650247175141243,
+      200000000: 67.73481638418079
+    },
+    'cpu1': {
+      1700000000: 2.4713816665187682,
+      1600000000: 0.0,
+      1500000000: 0.0,
+      1400000000: 0.1419824296743278,
+      1300000000: 0.44813204365959713,
+      1200000000: 0.8430206761913214,
+      1100000000: 0.9761292040110037,
+      1000000000: 2.1252994941875945,
+      900000000: 5.69260803975508,
+      800000000: 4.033188392936374,
+      700000000: 2.4403230100275093,
+      600000000: 1.526311118999024,
+      500000000: 2.3249622859171177,
+      400000000: 2.3338361877717633,
+      300000000: 3.1812938148904073,
+      200000000: 71.46153163546012
+    },
+    'cpu2': {
+      1700000000: 0.0,
+    },
+    'cpu3': {
+      1700000000: 0.0,
+   }
+  }
+
+  def testParseCpuFreq(self):
+    initial = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
+        self.initial_freq)
+    final = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
+        self.final_freq)
+    self.assertDictEqual(initial, self.expected_initial_freq)
+    self.assertDictEqual(final, self.expected_final_freq)
+
+  def testComputeCpuStats(self):
+    results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
+        self.expected_initial_freq, self.expected_final_freq)
+    for cpu in self.expected_freq_percents:
+      for freq in results[cpu]:
+        self.assertAlmostEqual(results[cpu][freq],
+                               self.expected_freq_percents[cpu][freq])
+
+  def testComputeCpuStatsWithMissingData(self):
+    results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
+        {'cpu1': {}}, {'cpu1': {}})
+    self.assertEqual(results['cpu1'][12345], 0)
+
+    results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
+        {'cpu1': {123: 0}}, {'cpu1': {123: 0}})
+    self.assertEqual(results['cpu1'][123], 0)
+
+    results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
+        {'cpu1': {123: 456}}, {'cpu1': {123: 456}})
+    self.assertEqual(results['cpu1'][123], 0)
+
+  def testComputeCpuStatsWithNumberChange(self):
+    results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
+        {'cpu1': {'C0': 10, 'WFI': 20}},
+        {'cpu1': {'C0': 20, 'WFI': 10}})
+    self.assertEqual(results['cpu1']['C0'], 0)
+    self.assertEqual(results['cpu1']['WFI'], 0)
+
+  def testGetCpuStateForAndroidDevices(self):
+    class PlatformStub(object):
+      def __init__(self, run_command_return_value):
+        self._run_command_return_value = run_command_return_value
+      def RunCommand(self, cmd):
+        del cmd  # unused
+        return self._run_command_return_value
+      def PathExists(self, path):
+        return 'cpu0' in path or 'cpu1' in path
+
+    cpu_state_from_samsung_note3 = (
+        "C0\n\nC1\n\nC2\n\nC3\n\n"
+        "53658520886\n1809072\n7073\n1722554\n"
+        "1\n35\n300\n500\n"
+        "1412949256\n")
+    expected_cstate_dict = {
+      'C0': 1412895593940415,
+      'C1': 1809072,
+      'C2': 7073,
+      'C3': 1722554,
+      'WFI': 53658520886
+    }
+    cpus = ["cpu%d" % cpu for cpu in range(4)]
+    expected_result = dict(zip(cpus, [expected_cstate_dict]*2))
+
+    sysfsmon = sysfs_power_monitor.SysfsPowerMonitor(
+      PlatformStub(cpu_state_from_samsung_note3))
+    # pylint: disable=protected-access
+    sysfsmon._cpus = cpus
+    cstate = sysfsmon.GetCpuState()
+    result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
+        cstate)
+    self.assertDictEqual(expected_result, result)
+
+  def testStandAlone(self):
+    class PlatformStub(object):
+      def __init__(self, run_command_return_value):
+        self._run_command_return_value = run_command_return_value
+      def RunCommand(self, cmd):
+        del cmd  # unused
+        return self._run_command_return_value
+      def PathExists(self, path):
+        del path  # unused
+        return True
+
+    cpu_state_from_samsung_note3 = (
+        "C0\n\nC1\n\nC2\n\nC3\n\n"
+        "53658520886\n1809072\n7073\n1722554\n"
+        "1\n35\n300\n500\n"
+        "1412949256\n")
+    expected_cstate_dict = {
+        'C0': 1412895593940415,
+        'C1': 1809072,
+        'C2': 7073,
+        'C3': 1722554,
+        'WFI': 53658520886
+    }
+    cpus = ["cpu%d" % cpu for cpu in range(2)]
+    expected_result = dict(zip(cpus, [expected_cstate_dict]*len(cpus)))
+
+    sysfsmon = sysfs_power_monitor.SysfsPowerMonitor(
+        PlatformStub(cpu_state_from_samsung_note3), standalone=True)
+    # pylint: disable=protected-access
+    sysfsmon._cpus = cpus
+    cstate = sysfsmon.GetCpuState()
+    result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
+        cstate)
+    self.assertDictEqual(expected_result, result)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/__init__.py b/catapult/telemetry/telemetry/internal/platform/profiler/__init__.py
new file mode 100644
index 0000000..4f035a3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/__init__.py
@@ -0,0 +1,68 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from telemetry.core import exceptions
+
+class Profiler(object):
+  """A sampling profiler provided by the platform.
+
+  A profiler is started on its constructor, and should
+  gather data until CollectProfile().
+  The life cycle is normally tied to a single page,
+  i.e., multiple profilers will be created for a page set.
+  WillCloseBrowser() is called right before the browser
+  is closed to allow any further cleanup.
+  """
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    self._browser_backend = browser_backend
+    self._platform_backend = platform_backend
+    self._output_path = output_path
+    self._state = state
+
+  @classmethod
+  def name(cls):
+    """User-friendly name of this profiler."""
+    raise NotImplementedError()
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    """True iff this profiler is currently supported by the platform."""
+    raise NotImplementedError()
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    """Override to customize the Browser's options before it is created."""
+    pass
+
+  @classmethod
+  def WillCloseBrowser(cls, browser_backend, platform_backend):
+    """Called before the browser is stopped."""
+    pass
+
+  def _GetProcessOutputFileMap(self):
+    """Returns a dict with pid: output_file."""
+    all_pids = ([self._browser_backend.pid] +
+                self._platform_backend.GetChildPids(self._browser_backend.pid))
+
+    process_name_counts = collections.defaultdict(int)
+    process_output_file_map = {}
+    for pid in all_pids:
+      try:
+        cmd_line = self._platform_backend.GetCommandLine(pid)
+        process_name = self._browser_backend.GetProcessName(cmd_line)
+        output_file = '%s.%s%s' % (self._output_path, process_name,
+                                   process_name_counts[process_name])
+        process_name_counts[process_name] += 1
+        process_output_file_map[pid] = output_file
+      except exceptions.ProcessGoneException:
+        # Ignore processes that disappeared since calling GetChildPids().
+        continue
+    return process_output_file_map
+
+  def CollectProfile(self):
+    """Collect the profile from the profiler."""
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_prebuilt_profiler_helper.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_prebuilt_profiler_helper.py
new file mode 100644
index 0000000..83adc59
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_prebuilt_profiler_helper.py
@@ -0,0 +1,32 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Android-specific, installs pre-built profilers."""
+
+import logging
+import os
+
+from telemetry.internal.util import binary_manager
+from telemetry import decorators
+
+_DEVICE_PROFILER_DIR = '/data/local/tmp/profilers/'
+
+
+def GetDevicePath(profiler_binary):
+  return os.path.join(_DEVICE_PROFILER_DIR, os.path.basename(profiler_binary))
+
+
+@decorators.Cache
+def InstallOnDevice(device, profiler_binary):
+  arch_name = device.GetABI()
+  host_path = binary_manager.FetchPath(profiler_binary, arch_name, 'android')
+  if not host_path:
+    logging.error('Profiler binary "%s" not found. Could not be installed',
+                  host_path)
+    return False
+
+  device_binary_path = GetDevicePath(profiler_binary)
+  device.PushChangedFiles([(host_path, device_binary_path)])
+  device.RunShellCommand('chmod 777 ' + device_binary_path)
+  return True
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper.py
new file mode 100644
index 0000000..6677442
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper.py
@@ -0,0 +1,315 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import glob
+import hashlib
+import logging
+import os
+import platform
+import re
+import shutil
+import subprocess
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import platform as telemetry_platform
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.platform.profiler import android_prebuilt_profiler_helper
+
+from devil.android import md5sum  # pylint: disable=import-error
+
+
+try:
+  import sqlite3
+except ImportError:
+  sqlite3 = None
+
+
+
+_TEXT_SECTION = '.text'
+
+
+def _ElfMachineId(elf_file):
+  headers = subprocess.check_output(['readelf', '-h', elf_file])
+  return re.match(r'.*Machine:\s+(\w+)', headers, re.DOTALL).group(1)
+
+
+def _ElfSectionAsString(elf_file, section):
+  return subprocess.check_output(['readelf', '-p', section, elf_file])
+
+
+def _ElfSectionMd5Sum(elf_file, section):
+  result = subprocess.check_output(
+      'readelf -p%s "%s" | md5sum' % (section, elf_file), shell=True)
+  return result.split(' ', 1)[0]
+
+
+def _FindMatchingUnstrippedLibraryOnHost(device, lib):
+  lib_base = os.path.basename(lib)
+
+  device_md5 = device.RunShellCommand('md5 "%s"' % lib, as_root=True)[0]
+  device_md5 = device_md5.split(' ', 1)[0]
+
+  def FindMatchingStrippedLibrary(out_path):
+    # First find a matching stripped library on the host. This avoids the need
+    # to pull the stripped library from the device, which can take tens of
+    # seconds.
+    host_lib_pattern = os.path.join(out_path, '*_apk', 'libs', '*', lib_base)
+    for stripped_host_lib in glob.glob(host_lib_pattern):
+      with open(stripped_host_lib) as f:
+        host_md5 = hashlib.md5(f.read()).hexdigest()
+        if host_md5 == device_md5:
+          return stripped_host_lib
+    return None
+
+  out_path = None
+  stripped_host_lib = None
+  for out_path in util.GetBuildDirectories():
+    stripped_host_lib = FindMatchingStrippedLibrary(out_path)
+    if stripped_host_lib:
+      break
+
+  if not stripped_host_lib:
+    return None
+
+  # The corresponding unstripped library will be under out/Release/lib.
+  unstripped_host_lib = os.path.join(out_path, 'lib', lib_base)
+
+  # Make sure the unstripped library matches the stripped one. We do this
+  # by comparing the hashes of text sections in both libraries. This isn't an
+  # exact guarantee, but should still give reasonable confidence that the
+  # libraries are compatible.
+  # TODO(skyostil): Check .note.gnu.build-id instead once we're using
+  # --build-id=sha1.
+  # pylint: disable=undefined-loop-variable
+  if (_ElfSectionMd5Sum(unstripped_host_lib, _TEXT_SECTION) !=
+      _ElfSectionMd5Sum(stripped_host_lib, _TEXT_SECTION)):
+    return None
+  return unstripped_host_lib
+
+
+@decorators.Cache
+def GetPerfhostName():
+  return 'perfhost_' + telemetry_platform.GetHostPlatform().GetOSVersionName()
+
+
+# Ignored directories for libraries that aren't useful for symbolization.
+_IGNORED_LIB_PATHS = [
+  '/data/dalvik-cache',
+  '/tmp'
+]
+
+
+def GetRequiredLibrariesForPerfProfile(profile_file):
+  """Returns the set of libraries necessary to symbolize a given perf profile.
+
+  Args:
+    profile_file: Path to perf profile to analyse.
+
+  Returns:
+    A set of required library file names.
+  """
+  with open(os.devnull, 'w') as dev_null:
+    perfhost_path = binary_manager.FetchPath(
+        GetPerfhostName(), 'x86_64', 'linux')
+    perf = subprocess.Popen([perfhost_path, 'script', '-i', profile_file],
+                             stdout=dev_null, stderr=subprocess.PIPE)
+    _, output = perf.communicate()
+  missing_lib_re = re.compile(
+      ('^Failed to open (.*), continuing without symbols|'
+       '^(.*[.]so).*not found, continuing without symbols'))
+  libs = set()
+  for line in output.split('\n'):
+    lib = missing_lib_re.match(line)
+    if lib:
+      lib = lib.group(1) or lib.group(2)
+      path = os.path.dirname(lib)
+      if (any(path.startswith(ignored_path)
+              for ignored_path in _IGNORED_LIB_PATHS)
+          or path == '/' or not path):
+        continue
+      libs.add(lib)
+  return libs
+
+
+def GetRequiredLibrariesForVTuneProfile(profile_file):
+  """Returns the set of libraries necessary to symbolize a given VTune profile.
+
+  Args:
+    profile_file: Path to VTune profile to analyse.
+
+  Returns:
+    A set of required library file names.
+  """
+  db_file = os.path.join(profile_file, 'sqlite-db', 'dicer.db')
+  conn = sqlite3.connect(db_file)
+
+  try:
+    # The 'dd_module_file' table lists all libraries on the device. Only the
+    # ones with 'bin_located_path' are needed for the profile.
+    query = 'SELECT bin_path, bin_located_path FROM dd_module_file'
+    return set(row[0] for row in conn.cursor().execute(query) if row[1])
+  finally:
+    conn.close()
+
+
+def _FileMetadataMatches(filea, fileb):
+  """Check if the metadata of two files matches."""
+  assert os.path.exists(filea)
+  if not os.path.exists(fileb):
+    return False
+
+  fields_to_compare = [
+      'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_size', 'st_uid']
+
+  filea_stat = os.stat(filea)
+  fileb_stat = os.stat(fileb)
+  for field in fields_to_compare:
+    # shutil.copy2 doesn't get ctime/mtime identical when the file system
+    # provides sub-second accuracy.
+    if int(getattr(filea_stat, field)) != int(getattr(fileb_stat, field)):
+      return False
+  return True
+
+
+def CreateSymFs(device, symfs_dir, libraries, use_symlinks=True):
+  """Creates a symfs directory to be used for symbolizing profiles.
+
+  Prepares a set of files ("symfs") to be used with profilers such as perf for
+  converting binary addresses into human readable function names.
+
+  Args:
+    device: DeviceUtils instance identifying the target device.
+    symfs_dir: Path where the symfs should be created.
+    libraries: Set of library file names that should be included in the symfs.
+    use_symlinks: If True, link instead of copy unstripped libraries into the
+      symfs. This will speed up the operation, but the resulting symfs will no
+      longer be valid if the linked files are modified, e.g., by rebuilding.
+
+  Returns:
+    The absolute path to the kernel symbols within the created symfs.
+  """
+  logging.info('Building symfs into %s.' % symfs_dir)
+
+  for lib in libraries:
+    device_dir = os.path.dirname(lib)
+    output_dir = os.path.join(symfs_dir, device_dir[1:])
+    if not os.path.exists(output_dir):
+      os.makedirs(output_dir)
+    output_lib = os.path.join(output_dir, os.path.basename(lib))
+
+    if lib.startswith('/data/app'):
+      # If this is our own library instead of a system one, look for a matching
+      # unstripped library under the out directory.
+      unstripped_host_lib = _FindMatchingUnstrippedLibraryOnHost(device, lib)
+      if not unstripped_host_lib:
+        logging.warning('Could not find symbols for %s.' % lib)
+        logging.warning('Is the correct output directory selected '
+                        '(CHROMIUM_OUT_DIR)? Did you install the APK after '
+                        'building?')
+        continue
+      if use_symlinks:
+        if os.path.lexists(output_lib):
+          os.remove(output_lib)
+        os.symlink(os.path.abspath(unstripped_host_lib), output_lib)
+      # Copy the unstripped library only if it has been changed to avoid the
+      # delay.
+      elif not _FileMetadataMatches(unstripped_host_lib, output_lib):
+        logging.info('Copying %s to %s' % (unstripped_host_lib, output_lib))
+        shutil.copy2(unstripped_host_lib, output_lib)
+    else:
+      # Otherwise save a copy of the stripped system library under the symfs so
+      # the profiler can at least use the public symbols of that library. To
+      # speed things up, only pull files that don't match copies we already
+      # have in the symfs.
+      if not os.path.exists(output_lib):
+        pull = True
+      else:
+        host_md5sums = md5sum.CalculateHostMd5Sums([output_lib])
+        try:
+          device_md5sums = md5sum.CalculateDeviceMd5Sums([lib], device)
+        except:
+          logging.exception('New exception caused by DeviceUtils conversion')
+          raise
+
+        pull = True
+        if host_md5sums and device_md5sums and output_lib in host_md5sums \
+          and lib in device_md5sums:
+          pull = host_md5sums[output_lib] != device_md5sums[lib]
+
+      if pull:
+        logging.info('Pulling %s to %s', lib, output_lib)
+        device.PullFile(lib, output_lib)
+
+  # Also pull a copy of the kernel symbols.
+  output_kallsyms = os.path.join(symfs_dir, 'kallsyms')
+  if not os.path.exists(output_kallsyms):
+    device.PullFile('/proc/kallsyms', output_kallsyms)
+  return output_kallsyms
+
+
+def PrepareDeviceForPerf(device):
+  """Set up a device for running perf.
+
+  Args:
+    device: DeviceUtils instance identifying the target device.
+
+  Returns:
+    The path to the installed perf binary on the device.
+  """
+  android_prebuilt_profiler_helper.InstallOnDevice(device, 'perf')
+  # Make sure kernel pointers are not hidden.
+  device.WriteFile('/proc/sys/kernel/kptr_restrict', '0', as_root=True)
+  return android_prebuilt_profiler_helper.GetDevicePath('perf')
+
+
+def GetToolchainBinaryPath(library_file, binary_name):
+  """Return the path to an Android toolchain binary on the host.
+
+  Args:
+    library_file: ELF library which is used to identify the used ABI,
+        architecture and toolchain.
+    binary_name: Binary to search for, e.g., 'objdump'
+  Returns:
+    Full path to binary or None if the binary was not found.
+  """
+  # Mapping from ELF machine identifiers to GNU toolchain names.
+  toolchain_configs = {
+    'x86': 'i686-linux-android',
+    'MIPS': 'mipsel-linux-android',
+    'ARM': 'arm-linux-androideabi',
+    'x86-64': 'x86_64-linux-android',
+    'AArch64': 'aarch64-linux-android',
+  }
+  toolchain_config = toolchain_configs[_ElfMachineId(library_file)]
+  host_os = platform.uname()[0].lower()
+  host_machine = platform.uname()[4]
+
+  elf_comment = _ElfSectionAsString(library_file, '.comment')
+  toolchain_version = re.match(r'.*GCC: \(GNU\) ([\w.]+)',
+                               elf_comment, re.DOTALL)
+  if not toolchain_version:
+    return None
+  toolchain_version = toolchain_version.group(1)
+  toolchain_version = toolchain_version.replace('.x', '')
+
+  toolchain_path = os.path.abspath(os.path.join(
+      util.GetChromiumSrcDir(), 'third_party', 'android_tools', 'ndk',
+      'toolchains', '%s-%s' % (toolchain_config, toolchain_version)))
+  if not os.path.exists(toolchain_path):
+    logging.warning(
+        'Unable to find toolchain binary %s: toolchain not found at %s',
+        binary_name, toolchain_path)
+    return None
+
+  path = os.path.join(
+      toolchain_path, 'prebuilt', '%s-%s' % (host_os, host_machine), 'bin',
+      '%s-%s' % (toolchain_config, binary_name))
+  if not os.path.exists(path):
+    logging.warning(
+        'Unable to find toolchain binary %s: binary not found at %s',
+        binary_name, path)
+    return None
+
+  return path
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper_unittest.py
new file mode 100644
index 0000000..d178aac
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_profiling_helper_unittest.py
@@ -0,0 +1,211 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import pickle
+import re
+import shutil
+import tempfile
+import time
+import unittest
+
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.platform.profiler import android_profiling_helper
+from telemetry.testing import simple_mock
+from telemetry.testing import tab_test_case
+
+
+def _GetLibrariesMappedIntoProcesses(device, pids):
+  libs = set()
+  for pid in pids:
+    maps_file = '/proc/%d/maps' % pid
+    maps = device.ReadFile(maps_file, as_root=True).splitlines()
+    for map_line in maps:
+      lib = re.match(r'.*\s(/.*[.]so)$', map_line)
+      if lib:
+        libs.add(lib.group(1))
+  return libs
+
+
+class TestFileMetadataMatches(unittest.TestCase):
+  def setUp(self):
+    self.tempdir = tempfile.mkdtemp()
+    self.filename_a = os.path.join(self.tempdir, 'filea')
+    self.filename_b = os.path.join(self.tempdir, 'fileb')
+
+    with open(self.filename_a, 'w') as f:
+      f.write('testing')
+
+  def tearDown(self):
+    shutil.rmtree(self.tempdir)
+
+  def testDoesntMatchNonExistant(self):
+    self.assertFalse(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testDoesntMatchJustExistence(self):
+    with open(self.filename_b, 'w') as f:
+      f.write('blah')
+
+    self.assertFalse(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testDoesntMatchCopy(self):
+    # This test can run so fast that the file system doesn't have enough
+    # accuracy to differentiate between the copy and initial file times.
+    # Hence we need to guarantee a delay here.
+    time.sleep(3)
+    shutil.copy(self.filename_a, self.filename_b)
+    self.assertFalse(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testMatchesAfterCopy2(self):
+    shutil.copy2(self.filename_a, self.filename_b)
+    self.assertTrue(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testDoesntMatchAfterCopy2ThenModify(self):
+    shutil.copy2(self.filename_a, self.filename_b)
+
+    filea = open(self.filename_a, 'w')
+    filea.write('moar testing!')
+    filea.close()
+
+    self.assertFalse(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testDoesntMatchAfterCopy2ThenModifyStats(self):
+    shutil.copy2(self.filename_a, self.filename_b)
+    os.utime(self.filename_a, (20, 20))
+    self.assertFalse(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+  def testMatchesAfterCopyStatWithDifferentContent(self):
+    fileb = open(self.filename_b, 'w')
+    fileb.write('blahing')
+    fileb.close()
+
+    shutil.copystat(self.filename_a, self.filename_b)
+
+    self.assertTrue(
+        android_profiling_helper._FileMetadataMatches(
+            self.filename_a, self.filename_b))
+
+
+class TestAndroidProfilingHelper(unittest.TestCase):
+
+  @decorators.Enabled('linux')
+  def testGetRequiredLibrariesForPerfProfile(self):
+    perf_output = os.path.join(
+        util.GetUnittestDataDir(), 'sample_perf_report_output.txt')
+    with open(perf_output) as f:
+      perf_output = f.read()
+
+    mock_popen = simple_mock.MockObject()
+    mock_popen.ExpectCall('communicate').WillReturn([None, perf_output])
+
+    mock_subprocess = simple_mock.MockObject()
+    mock_subprocess.ExpectCall(
+        'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
+    mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
+
+    real_subprocess = android_profiling_helper.subprocess
+    android_profiling_helper.subprocess = mock_subprocess
+    try:
+      libs = android_profiling_helper.GetRequiredLibrariesForPerfProfile('foo')
+      self.assertEqual(libs, set([
+          '/data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so',
+          '/system/lib/libart.so',
+          '/system/lib/libc.so',
+          '/system/lib/libm.so']))
+    finally:
+      android_profiling_helper.subprocess = real_subprocess
+
+  @decorators.Enabled('android')
+  def testGetRequiredLibrariesForVTuneProfile(self):
+    vtune_db_output = os.path.join(
+        util.GetUnittestDataDir(), 'sample_vtune_db_output')
+    with open(vtune_db_output, 'rb') as f:
+      vtune_db_output = pickle.load(f)
+
+    mock_cursor = simple_mock.MockObject()
+    mock_cursor.ExpectCall(
+        'execute').WithArgs(simple_mock.DONT_CARE).WillReturn(vtune_db_output)
+
+    mock_conn = simple_mock.MockObject()
+    mock_conn.ExpectCall('cursor').WillReturn(mock_cursor)
+    mock_conn.ExpectCall('close')
+
+    mock_sqlite3 = simple_mock.MockObject()
+    mock_sqlite3.ExpectCall(
+        'connect').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_conn)
+
+    real_sqlite3 = android_profiling_helper.sqlite3
+    android_profiling_helper.sqlite3 = mock_sqlite3
+    try:
+      libs = android_profiling_helper.GetRequiredLibrariesForVTuneProfile('foo')
+      self.assertEqual(libs, set([
+          '/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so',
+          '/system/lib/libdvm.so',
+          '/system/lib/libc.so',
+          '/system/lib/libm.so']))
+    finally:
+      android_profiling_helper.sqlite3 = real_sqlite3
+
+
+class TestAndroidProfilingHelperTabTestCase(tab_test_case.TabTestCase):
+
+  def setUp(self):
+    super(TestAndroidProfilingHelperTabTestCase, self).setUp()
+    # pylint: disable=protected-access
+    browser_backend = self._browser._browser_backend
+    self._device = browser_backend.device()
+
+  @decorators.Enabled('android')
+  def testCreateSymFs(self):
+    # pylint: disable=protected-access
+    browser_pid = self._browser._browser_backend.pid
+    pids = ([browser_pid] +
+        self._browser._platform_backend.GetChildPids(browser_pid))
+    libs = _GetLibrariesMappedIntoProcesses(self._device, pids)
+    assert libs
+
+    symfs_dir = tempfile.mkdtemp()
+    try:
+      kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir,
+                                                      libs)
+
+      # Check that we have kernel symbols.
+      assert os.path.exists(kallsyms)
+
+      is_unstripped = re.compile(r'^/data/app(-lib)?/.*\.so$')
+      has_unstripped = False
+
+      # Check that all requested libraries are present.
+      for lib in libs:
+        has_unstripped = has_unstripped or is_unstripped.match(lib)
+        assert os.path.exists(os.path.join(symfs_dir, lib[1:])), \
+            '%s not found in symfs' % lib
+
+      # Make sure we found at least one unstripped library.
+      assert has_unstripped
+    finally:
+      shutil.rmtree(symfs_dir)
+
+  # Test fails: crbug.com/437081
+  # @decorators.Enabled('android')
+  @decorators.Disabled('all')
+  def testGetToolchainBinaryPath(self):
+    with tempfile.NamedTemporaryFile() as libc:
+      self._device.PullFile('/system/lib/libc.so', libc.name)
+      path = android_profiling_helper.GetToolchainBinaryPath(libc.name,
+                                                             'objdump')
+      assert path and os.path.exists(path)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler.py
new file mode 100644
index 0000000..c1d4031
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# devil may not be available if we're not in an Android checkout.
+try:
+  from devil.android.tools import video_recorder
+except ImportError:
+  video_recorder = None
+
+from telemetry.internal.platform import profiler
+from telemetry.internal.backends.chrome import android_browser_finder
+
+
+_VIDEO_MEGABITS_PER_SECOND = 4
+
+
+class AndroidScreenRecordingProfiler(profiler.Profiler):
+  """Captures a screen recording on Android."""
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(AndroidScreenRecordingProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    self._output_path = output_path + '.mp4'
+    self._recorder = video_recorder.VideoRecorder(
+        browser_backend.device,
+        megabits_per_second=_VIDEO_MEGABITS_PER_SECOND)
+    self._recorder.Start()
+
+  @classmethod
+  def name(cls):
+    return 'android-screen-recorder'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  def CollectProfile(self):
+    self._recorder.Stop()
+    self._recorder.Pull(self._output_path)
+
+    print 'Screen recording saved as %s' % self._output_path
+    print 'To view, open in Chrome or a video player'
+    return [self._output_path]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler_unittest.py
new file mode 100644
index 0000000..8e6632b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_screen_recorder_profiler_unittest.py
@@ -0,0 +1,30 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import tempfile
+
+from telemetry import decorators
+from telemetry.internal.platform.profiler import (
+    android_screen_recorder_profiler)
+from telemetry.testing import tab_test_case
+
+
+class AndroidScreenRecorderProfilerTest(tab_test_case.TabTestCase):
+  @decorators.Enabled('android')
+  def testRecording(self):
+    out_dir = tempfile.mkdtemp()
+    try:
+      # pylint: disable=protected-access
+      profiler = (
+          android_screen_recorder_profiler.AndroidScreenRecordingProfiler(
+              self._browser._browser_backend,
+              self._browser._platform_backend,
+              os.path.join(out_dir, 'android_screen_recording'),
+              {}))
+      result = profiler.CollectProfile()[0]
+      self.assertTrue(os.path.exists(result))
+    finally:
+      shutil.rmtree(out_dir)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler.py
new file mode 100644
index 0000000..a8523b9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler.py
@@ -0,0 +1,78 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import StringIO
+import subprocess
+import zipfile
+
+from telemetry.core import util
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import profiler
+from telemetry.timeline import trace_data as trace_data_module
+from telemetry.timeline import tracing_config
+
+_SYSTRACE_CATEGORIES = [
+    'gfx',
+    'input',
+    'view',
+    'sched',
+    'freq',
+]
+
+class AndroidSystraceProfiler(profiler.Profiler):
+  """Collects a Systrace on Android."""
+
+  def __init__(self, browser_backend, platform_backend, output_path, state,
+               device=None):
+    super(AndroidSystraceProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    assert self._browser_backend.supports_tracing
+    self._output_path = output_path + '-trace.zip'
+    self._systrace_output_path = output_path + '.systrace'
+
+    # Use telemetry's own tracing backend instead the combined mode in
+    # adb_profile_chrome because some benchmarks also do tracing of their own
+    # and the two methods conflict.
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    self._browser_backend.StartTracing(config, timeout=10)
+    command = ['python', os.path.join(util.GetCatapultDir(), 'systrace', 'bin',
+                                      'adb_profile_chrome'),
+               '--categories', '', '--continuous', '--output',
+               self._systrace_output_path, '--json', '--systrace',
+               ','.join(_SYSTRACE_CATEGORIES)]
+    if device:
+      command.extend(['--device', device])
+    self._profiler = subprocess.Popen(command, stdin=subprocess.PIPE,
+                                      stdout=subprocess.PIPE)
+
+  @classmethod
+  def name(cls):
+    return 'android-systrace'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  def CollectProfile(self):
+    self._profiler.communicate(input='\n')
+    trace_result_builder = trace_data_module.TraceDataBuilder()
+    self._browser_backend.StopTracing(trace_result_builder)
+    trace_result = trace_result_builder.AsData()
+
+    trace_file = StringIO.StringIO()
+    trace_result.Serialize(trace_file)
+
+    # Merge the chrome and systraces into a zip file.
+    with zipfile.ZipFile(self._output_path, 'w', zipfile.ZIP_DEFLATED) as z:
+      z.writestr('trace.json', trace_file.getvalue())
+      z.write(self._systrace_output_path, 'systrace')
+      os.unlink(self._systrace_output_path)
+
+    print 'Systrace saved as %s' % self._output_path
+    print 'To view, open in chrome://tracing'
+    return [self._output_path]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler_unittest.py
new file mode 100644
index 0000000..f94232e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_systrace_profiler_unittest.py
@@ -0,0 +1,32 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import shutil
+import tempfile
+import zipfile
+
+from telemetry import decorators
+from telemetry.internal.platform.profiler import android_systrace_profiler
+from telemetry.testing import tab_test_case
+
+
+class TestAndroidSystraceProfiler(tab_test_case.TabTestCase):
+  @decorators.Enabled('android')
+  def testSystraceProfiler(self):
+    try:
+      out_dir = tempfile.mkdtemp()
+      # pylint: disable=protected-access
+      profiler = android_systrace_profiler.AndroidSystraceProfiler(
+          self._browser._browser_backend,
+          self._browser._platform_backend,
+          os.path.join(out_dir, 'systrace'),
+          {},
+          self._device)
+      result = profiler.CollectProfile()[0]
+      self.assertTrue(zipfile.is_zipfile(result))
+      with zipfile.ZipFile(result) as z:
+        self.assertEquals(len(z.namelist()), 2)
+        self.assertIn('sched_wakeup', z.open('systrace').read())
+    finally:
+      shutil.rmtree(out_dir)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/android_traceview_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/android_traceview_profiler.py
new file mode 100644
index 0000000..3471df4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/android_traceview_profiler.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+
+from telemetry.core import util
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import profiler
+
+try:
+  from devil.android import device_errors  # pylint: disable=import-error
+except ImportError:
+  device_errors = None
+
+
+class AndroidTraceviewProfiler(profiler.Profiler):
+  """Collects a Traceview on Android."""
+
+  _DEFAULT_DEVICE_DIR = '/data/local/tmp/traceview'
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(AndroidTraceviewProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+
+    if self._browser_backend.device.FileExists(self._DEFAULT_DEVICE_DIR):
+      self._browser_backend.device.RunShellCommand(
+          'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
+    else:
+      self._browser_backend.device.RunShellCommand(
+          'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
+      self._browser_backend.device.RunShellCommand(
+          'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
+
+    self._trace_files = []
+    for pid in self._GetProcessOutputFileMap().iterkeys():
+      device_dump_file = '%s/%s.trace' % (self._DEFAULT_DEVICE_DIR, pid)
+      self._trace_files.append((pid, device_dump_file))
+      self._browser_backend.device.RunShellCommand('am profile %s start %s' %
+                                                   (pid, device_dump_file))
+
+
+  @classmethod
+  def name(cls):
+    return 'android-traceview'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  def CollectProfile(self):
+    output_files = []
+    for pid, trace_file in self._trace_files:
+      self._browser_backend.device.RunShellCommand('am profile %s stop' % pid)
+      # pylint: disable=cell-var-from-loop
+      util.WaitFor(lambda: self._FileSize(trace_file) > 0, timeout=10)
+      output_files.append(trace_file)
+    try:
+      self._browser_backend.device.PullFile(
+          self._DEFAULT_DEVICE_DIR, self._output_path)
+    except:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    self._browser_backend.device.RunShellCommand(
+        'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
+    print 'Traceview profiles available in ', self._output_path
+    print 'Use third_party/android_tools/sdk/tools/monitor '
+    print 'then use "File->Open File" to visualize them.'
+    return output_files
+
+  def _FileSize(self, file_name):
+    try:
+      return self._browser_backend.device.Stat(file_name).st_size
+    except device_errors.CommandFailedError:
+      return 0
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/iprofiler_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/iprofiler_profiler.py
new file mode 100644
index 0000000..e7137f8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/iprofiler_profiler.py
@@ -0,0 +1,97 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import signal
+import sys
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.platform import profiler
+
+try:
+  import pexpect  # pylint: disable=import-error
+except ImportError:
+  pass
+
+
+class _SingleProcessIprofilerProfiler(object):
+  """An internal class for using iprofiler for a given process."""
+  def __init__(self, pid, output_path):
+    self._output_path = output_path
+    output_dir = os.path.dirname(self._output_path)
+    output_file = os.path.basename(self._output_path)
+    self._proc = pexpect.spawn(
+        'iprofiler', ['-timeprofiler', '-T', '300', '-a', str(pid),
+                      '-d', output_dir, '-o', output_file],
+        timeout=300)
+    while True:
+      if self._proc.getecho():
+        output = self._proc.readline().strip()
+        if not output:
+          continue
+        if 'iprofiler: Profiling process' in output:
+          break
+        print output
+      self._proc.interact(escape_character='\x0d')
+      if 'Failed to authorize rights' in output:
+        raise exceptions.ProfilingException(
+            'Failed to authorize rights for iprofiler\n')
+      if 'iprofiler error' in output:
+        raise exceptions.ProfilingException(
+            'Failed to start iprofiler for process %s\n' %
+            self._output_path.split('.')[1])
+      self._proc.write('\x0d')
+      print
+      def Echo():
+        return self._proc.getecho()
+      util.WaitFor(Echo, timeout=5)
+
+  def CollectProfile(self):
+    self._proc.kill(signal.SIGINT)
+    try:
+      self._proc.wait()
+    except pexpect.ExceptionPexpect:
+      pass
+    finally:
+      self._proc = None
+
+    print 'To view the profile, run:'
+    print '  open -a Instruments %s.dtps' % self._output_path
+    return self._output_path
+
+
+class IprofilerProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(IprofilerProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    process_output_file_map = self._GetProcessOutputFileMap()
+    self._process_profilers = []
+    for pid, output_file in process_output_file_map.iteritems():
+      if '.utility' in output_file:
+        # The utility process may not have been started by Telemetry.
+        # So we won't have permissing to profile it
+        continue
+      self._process_profilers.append(
+          _SingleProcessIprofilerProfiler(pid, output_file))
+
+  @classmethod
+  def name(cls):
+    return 'iprofiler'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if sys.platform != 'darwin':
+      return False
+    if browser_type == 'any':
+      return True
+    return (not browser_type.startswith('android') and
+            not browser_type.startswith('cros'))
+
+  def CollectProfile(self):
+    output_files = []
+    for single_process in self._process_profilers:
+      output_files.append(single_process.CollectProfile())
+    return output_files
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/java_heap_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/java_heap_profiler.py
new file mode 100644
index 0000000..5ca082a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/java_heap_profiler.py
@@ -0,0 +1,97 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+import threading
+
+from telemetry.core import platform
+from telemetry.core import util
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import profiler
+from telemetry.internal.util import binary_manager
+
+try:
+  from devil.android import device_errors  # pylint: disable=import-error
+except ImportError:
+  device_errors = None
+
+
+class JavaHeapProfiler(profiler.Profiler):
+  """Android-specific, trigger and fetch java heap dumps."""
+
+  _DEFAULT_DEVICE_DIR = '/data/local/tmp/javaheap'
+  # TODO(bulach): expose this as a command line option somehow.
+  _DEFAULT_INTERVAL = 20
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(JavaHeapProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    self._run_count = 1
+
+    self._DumpJavaHeap(False)
+
+    self._timer = threading.Timer(self._DEFAULT_INTERVAL, self._OnTimer)
+    self._timer.start()
+
+  @classmethod
+  def name(cls):
+    return 'java-heap'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  def CollectProfile(self):
+    self._timer.cancel()
+    self._DumpJavaHeap(True)
+    try:
+      self._browser_backend.device.PullFile(
+          self._DEFAULT_DEVICE_DIR, self._output_path)
+    except:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    self._browser_backend.device.RunShellCommand(
+        'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
+    output_files = []
+    for f in os.listdir(self._output_path):
+      if os.path.splitext(f)[1] == '.aprof':
+        input_file = os.path.join(self._output_path, f)
+        output_file = input_file.replace('.aprof', '.hprof')
+        hprof_conv = binary_manager.FetchPath(
+            'hprof-conv',
+            platform.GetHostPlatform().GetArchName(),
+            platform.GetHostPlatform().GetOSName())
+        subprocess.call([hprof_conv, input_file, output_file])
+        output_files.append(output_file)
+    return output_files
+
+  def _OnTimer(self):
+    self._DumpJavaHeap(False)
+
+  def _DumpJavaHeap(self, wait_for_completion):
+    if not self._browser_backend.device.FileExists(
+        self._DEFAULT_DEVICE_DIR):
+      self._browser_backend.device.RunShellCommand(
+          'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
+      self._browser_backend.device.RunShellCommand(
+          'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
+
+    device_dump_file = None
+    for pid in self._GetProcessOutputFileMap().iterkeys():
+      device_dump_file = '%s/%s.%s.aprof' % (self._DEFAULT_DEVICE_DIR, pid,
+                                             self._run_count)
+      self._browser_backend.device.RunShellCommand('am dumpheap %s %s' %
+                                                   (pid, device_dump_file))
+    if device_dump_file and wait_for_completion:
+      util.WaitFor(lambda: self._FileSize(device_dump_file) > 0, timeout=2)
+    self._run_count += 1
+
+  def _FileSize(self, file_name):
+    try:
+      return self._browser_backend.device.Stat(file_name).st_size
+    except device_errors.CommandFailedError:
+      return 0
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/monsoon.py b/catapult/telemetry/telemetry/internal/platform/profiler/monsoon.py
new file mode 100644
index 0000000..72425f7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/monsoon.py
@@ -0,0 +1,292 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Interface for a USB-connected Monsoon power meter.
+
+http://msoon.com/LabEquipment/PowerMonitor/
+Currently Unix-only. Relies on fcntl, /dev, and /tmp.
+"""
+
+import collections
+import logging
+import os
+import select
+import struct
+import time
+
+import serial  # pylint: disable=import-error,no-name-in-module
+import serial.tools.list_ports  # pylint: disable=import-error,no-name-in-module
+
+
+Power = collections.namedtuple('Power', ['amps', 'volts'])
+
+
+class Monsoon(object):
+  """Provides a simple class to use the power meter.
+
+  mon = monsoon.Monsoon()
+  mon.SetVoltage(3.7)
+  mon.StartDataCollection()
+  mydata = []
+  while len(mydata) < 1000:
+    mydata.extend(mon.CollectData())
+  mon.StopDataCollection()
+  """
+
+  def __init__(self, device=None, serialno=None, wait=True):
+    """Establish a connection to a Monsoon.
+
+    By default, opens the first available port, waiting if none are ready.
+    A particular port can be specified with 'device', or a particular Monsoon
+    can be specified with 'serialno' (using the number printed on its back).
+    With wait=False, IOError is thrown if a device is not immediately available.
+    """
+    assert float(serial.VERSION) >= 2.7, \
+     'Monsoon requires pyserial v2.7 or later. You have %s' % serial.VERSION
+
+    self._coarse_ref = self._fine_ref = self._coarse_zero = self._fine_zero = 0
+    self._coarse_scale = self._fine_scale = 0
+    self._last_seq = 0
+    self._voltage_multiplier = None
+
+    if device:
+      self.ser = serial.Serial(device, timeout=1)
+      return
+
+    while 1:
+      for (port, desc, _) in serial.tools.list_ports.comports():
+        if not desc.lower().startswith('mobile device power monitor'):
+          continue
+        tmpname = '/tmp/monsoon.%s.%s' % (os.uname()[0], os.path.basename(port))
+        self._tempfile = open(tmpname, 'w')
+        try:  # Use a lockfile to ensure exclusive access.
+          # Put the import in here to avoid doing it on unsupported platforms.
+          import fcntl  # pylint: disable=import-error
+          fcntl.lockf(self._tempfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError:
+          logging.error('device %s is in use', port)
+          continue
+
+        try:  # Try to open the device.
+          self.ser = serial.Serial(port, timeout=1)
+          self.StopDataCollection()  # Just in case.
+          self._FlushInput()  # Discard stale input.
+          status = self.GetStatus()
+        except IOError, e:
+          logging.error('error opening device %s: %s', port, e)
+          continue
+
+        if not status:
+          logging.error('no response from device %s', port)
+        elif serialno and status['serialNumber'] != serialno:
+          logging.error('device %s is #%d', port, status['serialNumber'])
+        else:
+          if status['hardwareRevision'] == 1:
+            self._voltage_multiplier = 62.5 / 10**6
+          else:
+            self._voltage_multiplier = 125.0 / 10**6
+          return
+
+      self._tempfile = None
+      if not wait:
+        raise IOError('No device found')
+      logging.info('waiting for device...')
+      time.sleep(1)
+
+  def GetStatus(self):
+    """Requests and waits for status.  Returns status dictionary."""
+
+    # status packet format
+    STATUS_FORMAT = '>BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH'
+    STATUS_FIELDS = [
+        'packetType', 'firmwareVersion', 'protocolVersion',
+        'mainFineCurrent', 'usbFineCurrent', 'auxFineCurrent', 'voltage1',
+        'mainCoarseCurrent', 'usbCoarseCurrent', 'auxCoarseCurrent', 'voltage2',
+        'outputVoltageSetting', 'temperature', 'status', 'leds',
+        'mainFineResistor', 'serialNumber', 'sampleRate',
+        'dacCalLow', 'dacCalHigh',
+        'powerUpCurrentLimit', 'runTimeCurrentLimit', 'powerUpTime',
+        'usbFineResistor', 'auxFineResistor',
+        'initialUsbVoltage', 'initialAuxVoltage',
+        'hardwareRevision', 'temperatureLimit', 'usbPassthroughMode',
+        'mainCoarseResistor', 'usbCoarseResistor', 'auxCoarseResistor',
+        'defMainFineResistor', 'defUsbFineResistor', 'defAuxFineResistor',
+        'defMainCoarseResistor', 'defUsbCoarseResistor', 'defAuxCoarseResistor',
+        'eventCode', 'eventData',
+    ]
+
+    self._SendStruct('BBB', 0x01, 0x00, 0x00)
+    while 1:  # Keep reading, discarding non-status packets.
+      data = self._ReadPacket()
+      if not data:
+        return None
+      if len(data) != struct.calcsize(STATUS_FORMAT) or data[0] != '\x10':
+        logging.debug('wanted status, dropped type=0x%02x, len=%d',
+                      ord(data[0]), len(data))
+        continue
+
+      status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, data)))
+      assert status['packetType'] == 0x10
+      for k in status.keys():
+        if k.endswith('VoltageSetting'):
+          status[k] = 2.0 + status[k] * 0.01
+        elif k.endswith('FineCurrent'):
+          pass  # Needs calibration data.
+        elif k.endswith('CoarseCurrent'):
+          pass  # Needs calibration data.
+        elif k.startswith('voltage') or k.endswith('Voltage'):
+          status[k] = status[k] * 0.000125
+        elif k.endswith('Resistor'):
+          status[k] = 0.05 + status[k] * 0.0001
+          if k.startswith('aux') or k.startswith('defAux'):
+            status[k] += 0.05
+        elif k.endswith('CurrentLimit'):
+          status[k] = 8 * (1023 - status[k]) / 1023.0
+      return status
+
+
+  def SetVoltage(self, v):
+    """Set the output voltage, 0 to disable."""
+    if v == 0:
+      self._SendStruct('BBB', 0x01, 0x01, 0x00)
+    else:
+      self._SendStruct('BBB', 0x01, 0x01, int((v - 2.0) * 100))
+
+  def SetStartupCurrent(self, a):
+    """Set the max startup output current. the unit of |a| : Amperes """
+    assert a >= 0 and a <= 8
+
+    val = 1023 - int((a/8.0)*1023)
+    self._SendStruct('BBB', 0x01, 0x08, val & 0xff)
+    self._SendStruct('BBB', 0x01, 0x09, val >> 8)
+
+  def SetMaxCurrent(self, a):
+    """Set the max output current. the unit of |a| : Amperes """
+    assert a >= 0 and a <= 8
+
+    val = 1023 - int((a/8.0)*1023)
+    self._SendStruct('BBB', 0x01, 0x0a, val & 0xff)
+    self._SendStruct('BBB', 0x01, 0x0b, val >> 8)
+
+  def SetUsbPassthrough(self, val):
+    """Set the USB passthrough mode: 0 = off, 1 = on,  2 = auto."""
+    self._SendStruct('BBB', 0x01, 0x10, val)
+
+
+  def StartDataCollection(self):
+    """Tell the device to start collecting and sending measurement data."""
+    self._SendStruct('BBB', 0x01, 0x1b, 0x01)  # Mystery command.
+    self._SendStruct('BBBBBBB', 0x02, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8)
+
+
+  def StopDataCollection(self):
+    """Tell the device to stop collecting measurement data."""
+    self._SendStruct('BB', 0x03, 0x00)  # Stop.
+
+
+  def CollectData(self):
+    """Return some current samples.  Call StartDataCollection() first."""
+    while 1:  # Loop until we get data or a timeout.
+      data = self._ReadPacket()
+      if not data:
+        return None
+      if len(data) < 4 + 8 + 1 or data[0] < '\x20' or data[0] > '\x2F':
+        logging.debug('wanted data, dropped type=0x%02x, len=%d',
+            ord(data[0]), len(data))
+        continue
+
+      seq, packet_type, x, _ = struct.unpack('BBBB', data[:4])
+      data = [struct.unpack(">hhhh", data[x:x+8])
+              for x in range(4, len(data) - 8, 8)]
+
+      if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
+        logging.info('data sequence skipped, lost packet?')
+      self._last_seq = seq
+
+      if packet_type == 0:
+        if not self._coarse_scale or not self._fine_scale:
+          logging.info('waiting for calibration, dropped data packet')
+          continue
+
+        out = []
+        for main, usb, _, voltage in data:
+          main_voltage_v = self._voltage_multiplier * (voltage & ~3)
+          sample = 0.0
+          if main & 1:
+            sample += ((main & ~1) - self._coarse_zero) * self._coarse_scale
+          else:
+            sample += (main - self._fine_zero) * self._fine_scale
+          if usb & 1:
+            sample += ((usb & ~1) - self._coarse_zero) * self._coarse_scale
+          else:
+            sample += (usb - self._fine_zero) * self._fine_scale
+          out.append(Power(sample, main_voltage_v))
+        return out
+
+      elif packet_type == 1:
+        self._fine_zero = data[0][0]
+        self._coarse_zero = data[1][0]
+
+      elif packet_type == 2:
+        self._fine_ref = data[0][0]
+        self._coarse_ref = data[1][0]
+
+      else:
+        logging.debug('discarding data packet type=0x%02x', packet_type)
+        continue
+
+      if self._coarse_ref != self._coarse_zero:
+        self._coarse_scale = 2.88 / (self._coarse_ref - self._coarse_zero)
+      if self._fine_ref != self._fine_zero:
+        self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
+
+
+  def _SendStruct(self, fmt, *args):
+    """Pack a struct (without length or checksum) and send it."""
+    data = struct.pack(fmt, *args)
+    data_len = len(data) + 1
+    checksum = (data_len + sum(struct.unpack('B' * len(data), data))) % 256
+    out = struct.pack('B', data_len) + data + struct.pack('B', checksum)
+    self.ser.write(out)
+
+
+  def _ReadPacket(self):
+    """Read a single data record as a string (without length or checksum)."""
+    len_char = self.ser.read(1)
+    if not len_char:
+      logging.error('timeout reading from serial port')
+      return None
+
+    data_len = struct.unpack('B', len_char)
+    data_len = ord(len_char)
+    if not data_len:
+      return ''
+
+    result = self.ser.read(data_len)
+    if len(result) != data_len:
+      return None
+    body = result[:-1]
+    checksum = (data_len + sum(struct.unpack('B' * len(body), body))) % 256
+    if result[-1] != struct.pack('B', checksum):
+      logging.error('invalid checksum from serial port')
+      return None
+    return result[:-1]
+
+  def _FlushInput(self):
+    """Flush all read data until no more available."""
+    self.ser.flush()
+    flushed = 0
+    while True:
+      ready_r, _, ready_x = select.select([self.ser], [], [self.ser], 0)
+      if len(ready_x) > 0:
+        logging.error('exception from serial port')
+        return None
+      elif len(ready_r) > 0:
+        flushed += 1
+        self.ser.read(1)  # This may cause underlying buffering.
+        self.ser.flush()  # Flush the underlying buffer too.
+      else:
+        break
+    if flushed > 0:
+      logging.debug('dropped >%d bytes', flushed)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/monsoon_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/monsoon_profiler.py
new file mode 100644
index 0000000..e8712e6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/monsoon_profiler.py
@@ -0,0 +1,97 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Profiler using data collected from a Monsoon power meter.
+
+http://msoon.com/LabEquipment/PowerMonitor/
+Data collected is a namedtuple of (amps, volts), at 5000 samples/second.
+Output graph plots power in watts over time in seconds.
+"""
+
+import csv
+import multiprocessing
+
+from telemetry.core import exceptions
+from telemetry.internal.platform import profiler
+from telemetry.internal.platform.profiler import monsoon
+from telemetry.util import statistics
+
+
+def _CollectData(output_path, is_collecting):
+  mon = monsoon.Monsoon(wait=False)
+  # Note: Telemetry requires the device to be connected by USB, but that
+  # puts it in charging mode. This increases the power consumption.
+  mon.SetUsbPassthrough(1)
+  # Nominal Li-ion voltage is 3.7V, but it puts out 4.2V at max capacity. Use
+  # 4.0V to simulate a "~80%" charged battery. Google "li-ion voltage curve".
+  # This is true only for a single cell. (Most smartphones, some tablets.)
+  mon.SetVoltage(4.0)
+
+  samples = []
+  try:
+    mon.StartDataCollection()
+    # Do one CollectData() to make the Monsoon set up, which takes about
+    # 0.3 seconds, and only signal that we've started after that.
+    mon.CollectData()
+    is_collecting.set()
+    while is_collecting.is_set():
+      samples += mon.CollectData()
+  finally:
+    mon.StopDataCollection()
+
+  # Add x-axis labels.
+  plot_data = [(i / 5000., sample.amps * sample.volts)
+               for i, sample in enumerate(samples)]
+
+  # Print data in csv.
+  with open(output_path, 'w') as output_file:
+    output_writer = csv.writer(output_file)
+    output_writer.writerows(plot_data)
+    output_file.flush()
+
+  power_samples = [s.amps * s.volts for s in samples]
+
+  print 'Monsoon profile power readings in watts:'
+  print '  Total    = %f' % statistics.TrapezoidalRule(power_samples, 1/5000.)
+  print ('  Average  = %f' % statistics.ArithmeticMean(power_samples) +
+         '+-%f' % statistics.StandardDeviation(power_samples))
+  print '  Peak     = %f' % max(power_samples)
+  print '  Duration = %f' % (len(power_samples) / 5000.)
+
+  print 'To view the Monsoon profile, run:'
+  print ('  echo "set datafile separator \',\'; plot \'%s\' with lines" | '
+      'gnuplot --persist' % output_path)
+
+
+class MonsoonProfiler(profiler.Profiler):
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(MonsoonProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    # We collect the data in a separate process, so we can continuously
+    # read the samples from the USB port while running the test.
+    self._is_collecting = multiprocessing.Event()
+    self._collector = multiprocessing.Process(
+        target=_CollectData, args=(output_path, self._is_collecting))
+    self._collector.start()
+    if not self._is_collecting.wait(timeout=0.5):
+      self._collector.terminate()
+      raise exceptions.ProfilingException('Failed to start data collection.')
+
+  @classmethod
+  def name(cls):
+    return 'monsoon'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    try:
+      monsoon.Monsoon(wait=False)
+    except EnvironmentError:
+      return False
+    else:
+      return True
+
+  def CollectProfile(self):
+    self._is_collecting.clear()
+    self._collector.join()
+    return [self._output_path]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/netlog_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/netlog_profiler.py
new file mode 100644
index 0000000..1e27a6b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/netlog_profiler.py
@@ -0,0 +1,51 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import tempfile
+
+from telemetry.internal.platform import profiler
+
+
+class NetLogProfiler(profiler.Profiler):
+
+  _NET_LOG_ARG = '--log-net-log='
+
+  @classmethod
+  def name(cls):
+    return 'netlog'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    return not browser_type.startswith('cros')
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    if browser_type.startswith('android'):
+      dump_file = '/sdcard/net-internals-profile.json'
+    else:
+      dump_file = tempfile.mkstemp()[1]
+    options.AppendExtraBrowserArgs([cls._NET_LOG_ARG + dump_file])
+
+  def CollectProfile(self):
+    # Find output filename from browser argument.
+    for i in self._browser_backend.browser_options.extra_browser_args:
+      if i.startswith(self._NET_LOG_ARG):
+        output_file = i[len(self._NET_LOG_ARG):]
+    assert output_file
+    # On Android pull the output file to the host.
+    if self._platform_backend.GetOSName() == 'android':
+      host_output_file = '%s.json' % self._output_path
+      try:
+        self._browser_backend.device.PullFile(
+            output_file, host_output_file)
+      except:
+        logging.exception('New exception caused by DeviceUtils conversion')
+        raise
+      # Clean the device
+      self._browser_backend.device.RunShellCommand('rm %s' % output_file)
+      output_file = host_output_file
+    print 'Net-internals log saved as %s' % output_file
+    print 'To view, open in chrome://net-internals'
+    return [output_file]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/oomkiller_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/oomkiller_profiler.py
new file mode 100644
index 0000000..4aa750f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/oomkiller_profiler.py
@@ -0,0 +1,78 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.internal.util import binary_manager
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import profiler
+
+from devil.android.sdk import intent  # pylint: disable=import-error
+
+class UnableToFindApplicationException(Exception):
+  """Exception when unable to find a launched application"""
+
+  def __init__(self, application):
+    super(UnableToFindApplicationException, self).__init__()
+    self.application = application
+
+  def __str__(self):
+    return repr(self.application)
+
+
+class OOMKillerProfiler(profiler.Profiler):
+  """Android-specific, Launch the music application and check it is still alive
+  at the end of the run."""
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(OOMKillerProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    if not 'mem_consumer_launched' in state:
+      state['mem_consumer_launched'] = True
+      arch_name = self._browser_backend.device.GetABI()
+      mem_consumer_path = binary_manager.FetchPath(
+          os.path.join('apks', 'MemConsumer.apk'), arch_name, 'android')
+      assert mem_consumer_path, ('Could not find memconsumer app. Please build '
+                                 'memconsumer target.')
+      if not self._platform_backend.CanLaunchApplication(
+          'org.chromium.memconsumerg'):
+        self._platform_backend.InstallApplication(mem_consumer_path)
+      self._browser_backend.device.GoHome()
+      self._platform_backend.LaunchApplication(
+          'org.chromium.memconsumer/.MemConsumer',
+          '--ei memory 20')
+      # Bring the browser to the foreground after launching the mem consumer
+      self._browser_backend.device.StartActivity(
+          intent.Intent(package=browser_backend.package,
+                        activity=browser_backend.activity),
+          blocking=True)
+
+  @classmethod
+  def name(cls):
+    return 'oomkiller'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  @classmethod
+  def WillCloseBrowser(cls, browser_backend, platform_backend):
+    browser_backend.device.ForceStop('org.chromium.memconsumer')
+
+  def CollectProfile(self):
+    missing_applications = self._MissingApplications()
+    if not len(missing_applications):
+      return []
+    raise UnableToFindApplicationException(', '.join(missing_applications))
+
+  def _MissingApplications(self):
+    # TODO(qsr): Add com.android.launcher to the list, when the reason why the
+    # launcher is often killed is understood.
+    must_have_apps = [
+        'org.chromium.memconsumer',
+    ]
+    return [app for app in must_have_apps if
+            not self._platform_backend.IsApplicationRunning(app)]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler.py
new file mode 100644
index 0000000..9218ae8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler.py
@@ -0,0 +1,257 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import signal
+import subprocess
+import sys
+import tempfile
+
+from devil.android import device_errors  # pylint: disable=import-error
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import platform
+from telemetry.internal.platform import profiler
+from telemetry.internal.platform.profiler import android_profiling_helper
+
+from devil.android.perf import perf_control  # pylint: disable=import-error
+
+
+_PERF_OPTIONS = [
+    # Sample across all processes and CPUs to so that the current CPU gets
+    # recorded to each sample.
+    '--all-cpus',
+    # In perf 3.13 --call-graph requires an argument, so use the -g short-hand
+    # which does not.
+    '-g',
+    # Record raw samples to get CPU information.
+    '--raw-samples',
+    # Increase sampling frequency for better coverage.
+    '--freq', '2000',
+]
+
+_PERF_OPTIONS_ANDROID = [
+    # Increase priority to avoid dropping samples. Requires root.
+    '--realtime', '80',
+]
+
+
+def _NicePath(path):
+  rel_path = os.path.relpath(path, os.curdir)
+  return rel_path if len(rel_path) < len(path) else path
+
+
+def _PrepareHostForPerf():
+  kptr_file = '/proc/sys/kernel/kptr_restrict'
+  with open(kptr_file) as f:
+    if f.read().strip() != '0':
+      logging.warning('Making kernel symbols unrestricted. You might have to '
+          'enter your password for "sudo".')
+      with tempfile.NamedTemporaryFile() as zero:
+        zero.write('0')
+        zero.flush()
+        subprocess.call(['/usr/bin/sudo', 'cp', zero.name, kptr_file])
+
+
+def _InstallPerfHost():
+  perfhost_name = android_profiling_helper.GetPerfhostName()
+  host = platform.GetHostPlatform()
+  if not host.CanLaunchApplication(perfhost_name):
+    host.InstallApplication(perfhost_name)
+  return binary_manager.FetchPath(perfhost_name, 'x86_64', 'linux')
+
+
+class _SingleProcessPerfProfiler(object):
+  """An internal class for using perf for a given process.
+
+  On android, this profiler uses pre-built binaries from AOSP.
+  See more details in prebuilt/android/README.txt.
+  """
+  def __init__(self, pid, output_file, browser_backend, platform_backend,
+               perf_binary, perfhost_binary):
+    self._pid = pid
+    self._browser_backend = browser_backend
+    self._platform_backend = platform_backend
+    self._output_file = output_file
+    self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+    self._is_android = platform_backend.GetOSName() == 'android'
+    self._perf_binary = perf_binary
+    self._perfhost_binary = perfhost_binary
+    cmd_prefix = []
+    perf_args = ['record', '--pid', str(pid)]
+    if self._is_android:
+      cmd_prefix = ['adb', '-s', browser_backend.device.adb.GetDeviceSerial(),
+                   'shell', perf_binary]
+      perf_args += _PERF_OPTIONS_ANDROID
+      output_file = os.path.join('/sdcard', 'perf_profiles',
+                                 os.path.basename(output_file))
+      self._device_output_file = output_file
+      browser_backend.device.RunShellCommand(
+          'mkdir -p ' + os.path.dirname(self._device_output_file))
+      browser_backend.device.RunShellCommand(
+          'rm -f ' + self._device_output_file)
+    else:
+      cmd_prefix = [perf_binary]
+    perf_args += ['--output', output_file] + _PERF_OPTIONS
+    self._proc = subprocess.Popen(cmd_prefix + perf_args,
+        stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
+
+  def CollectProfile(self):
+    if ('renderer' in self._output_file and
+        not self._is_android and
+        not self._platform_backend.GetCommandLine(self._pid)):
+      logging.warning('Renderer was swapped out during profiling. '
+                      'To collect a full profile rerun with '
+                      '"--extra-browser-args=--single-process"')
+    if self._is_android:
+      device = self._browser_backend.device
+      try:
+        binary_name = os.path.basename(self._perf_binary)
+        device.KillAll(binary_name, signum=signal.SIGINT, blocking=True,
+                       quiet=True)
+      except device_errors.CommandFailedError:
+        logging.warning('The perf process could not be killed on the device.')
+    self._proc.send_signal(signal.SIGINT)
+    exit_code = self._proc.wait()
+    try:
+      if exit_code == 128:
+        raise Exception(
+            """perf failed with exit code 128.
+Try rerunning this script under sudo or setting
+/proc/sys/kernel/perf_event_paranoid to "-1".\nOutput:\n%s""" %
+            self._GetStdOut())
+      elif exit_code not in (0, -2):
+        raise Exception(
+            'perf failed with exit code %d. Output:\n%s' % (exit_code,
+                                                            self._GetStdOut()))
+    finally:
+      self._tmp_output_file.close()
+    cmd = '%s report -n -i %s' % (_NicePath(self._perfhost_binary),
+                                  self._output_file)
+    if self._is_android:
+      device = self._browser_backend.device
+      try:
+        device.PullFile(self._device_output_file, self._output_file)
+      except:
+        logging.exception('New exception caused by DeviceUtils conversion')
+        raise
+      required_libs = \
+          android_profiling_helper.GetRequiredLibrariesForPerfProfile(
+              self._output_file)
+      symfs_root = os.path.dirname(self._output_file)
+      kallsyms = android_profiling_helper.CreateSymFs(device,
+                                                      symfs_root,
+                                                      required_libs,
+                                                      use_symlinks=True)
+      cmd += ' --symfs %s --kallsyms %s' % (symfs_root, kallsyms)
+      for lib in required_libs:
+        lib = os.path.join(symfs_root, lib[1:])
+        if not os.path.exists(lib):
+          continue
+        objdump_path = android_profiling_helper.GetToolchainBinaryPath(
+            lib, 'objdump')
+        if objdump_path:
+          cmd += ' --objdump %s' % _NicePath(objdump_path)
+          break
+
+    print 'To view the profile, run:'
+    print ' ', cmd
+    return self._output_file
+
+  def _GetStdOut(self):
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+
+class PerfProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(PerfProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    process_output_file_map = self._GetProcessOutputFileMap()
+    self._process_profilers = []
+    self._perf_control = None
+
+    perf_binary = perfhost_binary = _InstallPerfHost()
+    try:
+      if platform_backend.GetOSName() == 'android':
+        device = browser_backend.device
+        perf_binary = android_profiling_helper.PrepareDeviceForPerf(device)
+        self._perf_control = perf_control.PerfControl(device)
+        self._perf_control.SetPerfProfilingMode()
+      else:
+        _PrepareHostForPerf()
+
+      for pid, output_file in process_output_file_map.iteritems():
+        if 'zygote' in output_file:
+          continue
+        self._process_profilers.append(
+            _SingleProcessPerfProfiler(
+                pid, output_file, browser_backend, platform_backend,
+                perf_binary, perfhost_binary))
+    except:
+      if self._perf_control:
+        self._perf_control.SetDefaultPerfMode()
+      raise
+
+  @classmethod
+  def name(cls):
+    return 'perf'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if sys.platform != 'linux2':
+      return False
+    if platform.GetHostPlatform().GetOSName() == 'chromeos':
+      return False
+    return True
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    options.AppendExtraBrowserArgs([
+        '--no-sandbox',
+        '--allow-sandbox-debugging',
+    ])
+
+  def CollectProfile(self):
+    if self._perf_control:
+      self._perf_control.SetDefaultPerfMode()
+    output_files = []
+    for single_process in self._process_profilers:
+      output_files.append(single_process.CollectProfile())
+    return output_files
+
+  @classmethod
+  def GetTopSamples(cls, file_name, number):
+    """Parses the perf generated profile in |file_name| and returns a
+    {function: period} dict of the |number| hottests functions.
+    """
+    assert os.path.exists(file_name)
+    with open(os.devnull, 'w') as devnull:
+      _InstallPerfHost()
+      report = subprocess.Popen(
+          [android_profiling_helper.GetPerfhostName(),
+           'report', '--show-total-period', '-U', '-t', '^', '-i', file_name],
+          stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
+    period_by_function = {}
+    for line in report.split('\n'):
+      if not line or line.startswith('#'):
+        continue
+      fields = line.split('^')
+      if len(fields) != 5:
+        continue
+      period = int(fields[1])
+      function = fields[4].partition(' ')[2]
+      function = re.sub('<.*>', '', function)  # Strip template params.
+      function = re.sub('[(].*[)]', '', function)  # Strip function params.
+      period_by_function[function] = period
+      if len(period_by_function) == number:
+        break
+    return period_by_function
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler_unittest.py
new file mode 100644
index 0000000..76f0d3d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/perf_profiler_unittest.py
@@ -0,0 +1,51 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import os
+import unittest
+
+from telemetry.core import util
+from telemetry.internal.platform.profiler import perf_profiler
+from telemetry.testing import options_for_unittests
+from telemetry.testing import simple_mock
+
+
+class TestPerfProfiler(unittest.TestCase):
+  def testPerfProfiler(self):
+    options = options_for_unittests.GetCopy()
+    if not perf_profiler.PerfProfiler.is_supported(options.browser_type):
+      logging.warning('PerfProfiler is not supported. Skipping test')
+      return
+
+    profile_file = os.path.join(
+        util.GetUnittestDataDir(), 'perf_report_output.txt')
+    with open(profile_file) as f:
+      perf_report_output = f.read()
+
+    mock_popen = simple_mock.MockObject()
+    mock_popen.ExpectCall('communicate').WillReturn([perf_report_output])
+
+    mock_subprocess = simple_mock.MockObject()
+    mock_subprocess.ExpectCall(
+        'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
+    mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
+
+    real_subprocess = perf_profiler.subprocess
+    perf_profiler.subprocess = mock_subprocess
+    try:
+      self.assertEqual(
+          perf_profiler.PerfProfiler.GetTopSamples(profile_file, 10),
+          {'v8::internal::StaticMarkingVisitor::MarkMapContents': 63615201,
+           'v8::internal::RelocIterator::next': 38271931,
+           'v8::internal::LAllocator::MeetConstraintsBetween': 42913933,
+           'v8::internal::FlexibleBodyVisitor::Visit': 31909537,
+           'v8::internal::LiveRange::CreateAssignedOperand': 42913933,
+           'void v8::internal::RelocInfo::Visit': 96878864,
+           'WebCore::HTMLTokenizer::nextToken': 48240439,
+           'v8::internal::Scanner::ScanIdentifierOrKeyword': 46054550,
+           'sk_memset32_SSE2': 45121317,
+           'v8::internal::HeapObject::Size': 39786862
+           })
+    finally:
+      perf_profiler.subprocess = real_subprocess
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/profiler_finder.py b/catapult/telemetry/telemetry/internal/platform/profiler/profiler_finder.py
new file mode 100644
index 0000000..33c31e5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/profiler_finder.py
@@ -0,0 +1,28 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.core import discover
+from telemetry.internal.platform import profiler
+from telemetry.core import util
+
+
+def _DiscoverProfilers():
+  profiler_dir = os.path.dirname(__file__)
+  return discover.DiscoverClasses(profiler_dir, util.GetTelemetryDir(),
+                                  profiler.Profiler,
+                                  index_by_class_name=True).values()
+
+
+def FindProfiler(name):
+  for p in _DiscoverProfilers():
+    if p.name() == name:
+      return p
+  return None
+
+
+def GetAllAvailableProfilers():
+  return sorted([p.name() for p in _DiscoverProfilers()
+                 if p.is_supported(browser_type='any')])
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/sample_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/sample_profiler.py
new file mode 100644
index 0000000..63001b5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/sample_profiler.py
@@ -0,0 +1,90 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import signal
+import subprocess
+import sys
+import tempfile
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.platform import profiler
+
+
+class _SingleProcessSampleProfiler(object):
+  """An internal class for using iprofiler for a given process."""
+  def __init__(self, pid, output_path):
+    self._output_path = output_path
+    self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+    self._proc = subprocess.Popen(
+        ['sample', str(pid), '-mayDie', '-file', self._output_path],
+        stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
+    def IsStarted():
+      stdout = self._GetStdOut()
+      if 'sample cannot examine process' in stdout:
+        raise exceptions.ProfilingException(
+            'Failed to start sample for process %s\n' %
+            self._output_path.split('.')[1])
+      return 'Sampling process' in stdout
+    util.WaitFor(IsStarted, 120)
+
+  def CollectProfile(self):
+    self._proc.send_signal(signal.SIGINT)
+    exit_code = self._proc.wait()
+    try:
+      if exit_code:
+        raise Exception(
+            'sample failed with exit code %d. Output:\n%s' % (
+            exit_code, self._GetStdOut()))
+    finally:
+      self._proc = None
+      self._tmp_output_file.close()
+
+    print 'To view the profile, run:'
+    print '  open -a TextEdit %s' % self._output_path
+
+    return self._output_path
+
+  def _GetStdOut(self):
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+
+class SampleProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(SampleProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    process_output_file_map = self._GetProcessOutputFileMap()
+    self._process_profilers = []
+    for pid, output_file in process_output_file_map.iteritems():
+      if '.utility' in output_file:
+        # The utility process may not have been started by Telemetry.
+        # So we won't have permissing to profile it
+        continue
+      self._process_profilers.append(
+          _SingleProcessSampleProfiler(pid, output_file))
+
+  @classmethod
+  def name(cls):
+    return 'sample'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if sys.platform != 'darwin':
+      return False
+    if browser_type == 'any':
+      return True
+    return (not browser_type.startswith('android') and
+            not browser_type.startswith('cros'))
+
+  def CollectProfile(self):
+    output_paths = []
+    for single_process in self._process_profilers:
+      output_paths.append(single_process.CollectProfile())
+    return output_paths
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/strace_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/strace_profiler.py
new file mode 100644
index 0000000..5b9fb6a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/strace_profiler.py
@@ -0,0 +1,255 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import re
+import signal
+import subprocess
+import sys
+import tempfile
+
+from telemetry.internal.platform import profiler
+from telemetry.timeline import model
+from telemetry.timeline import trace_data as trace_data_module
+
+# Parses one line of strace output, for example:
+# 6052  1311456063.159722 read(8, "\1\0\0\0\0\0\0\0", 8) = 8 <0.000022>
+_STRACE_LINE_RE = re.compile(
+    r'^(?P<tid>\d+)\s+'
+    r'(?P<ts>\d+)'
+    r'(?P<micro>.\d+)\s+'
+    r'(?P<func>.*?)'
+    r'[(](?P<args>.*?)[)]\s+=\s+'
+    r'(?P<ret>.*?)\s+'
+    r'<(?P<dur>[\d.]+)>$')
+
+_UNFINISHED_LINE_RE = re.compile(
+    r'^(?P<tid>\d+)\s+'
+    r'(?P<line>.*?)'
+    r'<unfinished ...>$')
+
+_RESUMED_LINE_RE = re.compile(
+    r'^(?P<tid>\d+)\s+'
+    r'(?P<ts>\d+)'
+    r'(?P<micro>.\d+)\s+'
+    r'<[.][.][.]\s(?P<func>.*?)\sresumed>'
+    r'(?P<line>.*?)$')
+
+_KILLED_LINE_RE = re.compile(
+    r'^(?P<tid>\d+)\s+'
+    r'(?P<ts>\d+)'
+    r'(?P<micro>.\d+)\s+'
+    r'[+][+][+] killed by SIGKILL [+][+][+]$')
+
+
+def _StraceToChromeTrace(pid, infile):
+  """Returns chrometrace json format for |infile| strace output."""
+  # Map of fd:file_name for open file descriptors. Useful for displaying
+  # file name instead of the descriptor number.
+  fd_map = {}
+
+  # Map of tid:interrupted_call for the interrupted call on each thread. It is
+  # possible to context switch during a system call. In this case we must
+  # match up the lines.
+  interrupted_call_map = {}
+
+  out = []
+  with open(infile, 'r') as f:
+    for line in f.readlines():
+      # Ignore kill lines for now.
+      m = _KILLED_LINE_RE.match(line)
+      if m:
+        continue
+
+      # If this line is interrupted, then remember it and continue.
+      m = _UNFINISHED_LINE_RE.match(line)
+      if m:
+        assert m.group('tid') not in interrupted_call_map
+        interrupted_call_map[m.group('tid')] = line
+        continue
+
+      # If this is a resume of a previous line, stitch it together.
+      interrupted = False
+      m = _RESUMED_LINE_RE.match(line)
+      if m:
+        interrupted = True
+        assert m.group('tid') in interrupted_call_map
+        line = interrupted_call_map[m.group('tid')].replace(
+            '<unfinished ...>', m.group('line'))
+        del interrupted_call_map[m.group('tid')]
+
+      # At this point we can do a normal match.
+      m = _STRACE_LINE_RE.match(line)
+      if not m:
+        if ('exit' not in line and
+            'Profiling timer expired' not in line and
+            '<unavailable>' not in line):
+          logging.warn('Failed to parse line: %s' % line)
+        continue
+
+      ts_begin = int(1000000 * (int(m.group('ts')) + float(m.group('micro'))))
+      ts_end = ts_begin + int(1000000 * float(m.group('dur')))
+      tid = int(m.group('tid'))
+      function_name = unicode(m.group('func'), errors='ignore')
+      function_args = unicode(m.group('args'), errors='ignore')
+      ret = unicode(m.group('ret'), errors='ignore')
+      cat = 'strace'
+
+      possible_fd_arg = None
+      first_arg = function_args.split(',')[0]
+      if first_arg and first_arg.strip().isdigit():
+        possible_fd_arg = first_arg.strip()
+
+      if function_name == 'open' and ret.isdigit():
+        # 1918  1311606151.649379 open("/foo/bar.so", O_RDONLY) = 7 <0.000088>
+        fd_map[ret] = first_arg
+
+      args = {
+          'args': function_args,
+          'ret': ret,
+          }
+      if interrupted:
+        args['interrupted'] = True
+      if possible_fd_arg and possible_fd_arg in fd_map:
+        args['fd%s' % first_arg] = fd_map[possible_fd_arg]
+
+      out.append({
+          'cat': cat,
+          'pid': pid,
+          'tid': tid,
+          'ts': ts_begin,
+          'ph': 'B',  # Begin
+          'name': function_name,
+          })
+      out.append({
+          'cat': cat,
+          'pid': pid,
+          'tid': tid,
+          'ts': ts_end,
+          'ph': 'E',  # End
+          'name': function_name,
+          'args': args,
+          })
+
+  return out
+
+
+def _GenerateTraceMetadata(timeline_model):
+  out = []
+  for process in timeline_model.processes:
+    out.append({
+        'name': 'process_name',
+        'ph': 'M',  # Metadata
+        'pid': process,
+        'args': {
+          'name': timeline_model.processes[process].name
+          }
+        })
+    for thread in timeline_model.processes[process].threads:
+      out.append({
+          'name': 'thread_name',
+          'ph': 'M',  # Metadata
+          'pid': process,
+          'tid': thread,
+          'args': {
+            'name': timeline_model.processes[process].threads[thread].name
+            }
+          })
+  return out
+
+
+class _SingleProcessStraceProfiler(object):
+  """An internal class for using perf for a given process."""
+  def __init__(self, pid, output_file, platform_backend):
+    self._pid = pid
+    self._platform_backend = platform_backend
+    self._output_file = output_file
+    self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+    self._proc = subprocess.Popen(
+        ['strace', '-ttt', '-f', '-T', '-p', str(pid), '-o', output_file],
+        stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
+
+  def CollectProfile(self):
+    if ('renderer' in self._output_file and
+        not self._platform_backend.GetCommandLine(self._pid)):
+      logging.warning('Renderer was swapped out during profiling. '
+                      'To collect a full profile rerun with '
+                      '"--extra-browser-args=--single-process"')
+    self._proc.send_signal(signal.SIGINT)
+    exit_code = self._proc.wait()
+    try:
+      if exit_code:
+        raise Exception('strace failed with exit code %d. Output:\n%s' % (
+                        exit_code, self._GetStdOut()))
+    finally:
+      self._tmp_output_file.close()
+
+    return _StraceToChromeTrace(self._pid, self._output_file)
+
+  def _GetStdOut(self):
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+
+class StraceProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(StraceProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    assert self._browser_backend.supports_tracing
+    self._browser_backend.browser.StartTracing(None, timeout=10)
+    process_output_file_map = self._GetProcessOutputFileMap()
+    self._process_profilers = []
+    self._output_file = output_path + '.json'
+    for pid, output_file in process_output_file_map.iteritems():
+      if 'zygote' in output_file:
+        continue
+      self._process_profilers.append(
+          _SingleProcessStraceProfiler(pid, output_file, platform_backend))
+
+  @classmethod
+  def name(cls):
+    return 'strace'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if sys.platform != 'linux2':
+      return False
+    # TODO(tonyg): This should be supported on android and cros.
+    if (browser_type.startswith('android') or
+       browser_type.startswith('cros')):
+      return False
+    return True
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    options.AppendExtraBrowserArgs([
+        '--no-sandbox',
+        '--allow-sandbox-debugging'
+    ])
+
+  def CollectProfile(self):
+    print 'Processing trace...'
+
+    out_json = []
+
+    for single_process in self._process_profilers:
+      out_json.extend(single_process.CollectProfile())
+
+    trace_data_builder = trace_data_module.TraceDataBuilder()
+    self._browser_backend.browser.StopTracing(trace_data_builder)
+    timeline_model = model.TimelineModel(trace_data_builder.AsData())
+    out_json.extend(_GenerateTraceMetadata(timeline_model))
+
+    with open(self._output_file, 'w') as f:
+      f.write(json.dumps(out_json, separators=(',', ':')))
+
+    print 'Trace saved as %s' % self._output_file
+    print 'To view, open in chrome://tracing'
+    return [self._output_file]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/tcmalloc_heap_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/tcmalloc_heap_profiler.py
new file mode 100644
index 0000000..a11ae4c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/tcmalloc_heap_profiler.py
@@ -0,0 +1,148 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import sys
+
+from telemetry.internal.backends.chrome import android_browser_finder
+from telemetry.internal.platform import profiler
+
+# Environment variables to (android properties, default value) mapping.
+_ENV_VARIABLES = {
+  'HEAP_PROFILE_TIME_INTERVAL': ('heapprof.time_interval', 20),
+  'HEAP_PROFILE_MMAP': ('heapprof.mmap', 1),
+  'DEEP_HEAP_PROFILE': ('heapprof.deep_heap_profile', 1),
+}
+
+
+class _TCMallocHeapProfilerAndroid(object):
+  """An internal class to set android properties and fetch dumps from device."""
+
+  _DEFAULT_DEVICE_DIR = '/data/local/tmp/heap'
+
+  def __init__(self, browser_backend, output_path):
+    self._browser_backend = browser_backend
+    self._output_path = output_path
+
+    _ENV_VARIABLES['HEAPPROFILE'] = ('heapprof',
+        os.path.join(self._DEFAULT_DEVICE_DIR, 'dmprof'))
+
+    self._SetDeviceProperties(_ENV_VARIABLES)
+
+  def _SetDeviceProperties(self, properties):
+    device_configured = False
+    # This profiler requires adb root to set properties.
+    try:
+      self._browser_backend.device.EnableRoot()
+    except:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    for values in properties.itervalues():
+      device_property = self._browser_backend.device.GetProp(values[0])
+      if not device_property or not device_property.strip():
+        self._browser_backend.device.SetProp(values[0], values[1])
+        device_configured = True
+    if not self._browser_backend.device.FileExists(
+        self._DEFAULT_DEVICE_DIR):
+      self._browser_backend.device.RunShellCommand(
+          'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
+      self._browser_backend.device.RunShellCommand(
+          'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
+      device_configured = True
+    if device_configured:
+      raise Exception('Device required special config, run again.')
+
+  def CollectProfile(self):
+    try:
+      self._browser_backend.device.PullFile(
+          self._DEFAULT_DEVICE_DIR, self._output_path)
+    except:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    self._browser_backend.device.RunShellCommand(
+        'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
+    if os.path.exists(self._output_path):
+      logging.info('TCMalloc dumps pulled to %s', self._output_path)
+      with file(os.path.join(self._output_path,
+                             'browser.pid'), 'w') as pid_file:
+        pid_file.write(str(self._browser_backend.pid).rjust(5, '0'))
+    return [self._output_path]
+
+
+class _TCMallocHeapProfilerLinux(object):
+  """An internal class to set environment variables and fetch dumps."""
+
+  _DEFAULT_DIR = '/tmp/tcmalloc/'
+
+  def __init__(self, browser_backend):
+    self._browser_backend = browser_backend
+    _ENV_VARIABLES['HEAPPROFILE'] = ('heapprof', self._DEFAULT_DIR + 'dmprof')
+    self._CheckEnvironmentVariables(_ENV_VARIABLES)
+
+  def _CheckEnvironmentVariables(self, env_vars):
+    msg = ''
+    for key, values in env_vars.iteritems():
+      if key not in os.environ:
+        msg += '%s=%s ' % (key, str(values[1]))
+    if msg:
+      raise Exception('Need environment variables, try again with:\n %s' % msg)
+    if not os.path.exists(os.environ['HEAPPROFILE']):
+      os.makedirs(os.environ['HEAPPROFILE'])
+    assert os.path.isdir(os.environ['HEAPPROFILE']), 'HEAPPROFILE is not a dir'
+
+  def CollectProfile(self):
+    with file(os.path.join(os.path.dirname(os.environ['HEAPPROFILE']),
+                           'browser.pid'), 'w') as pid_file:
+      pid_file.write(str(self._browser_backend.pid))
+    print 'TCMalloc dumps available ', os.environ['HEAPPROFILE']
+    return [os.environ['HEAPPROFILE']]
+
+
+class TCMallocHeapProfiler(profiler.Profiler):
+  """A Factory to instantiate the platform-specific profiler."""
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(TCMallocHeapProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    if platform_backend.GetOSName() == 'android':
+      self._platform_profiler = _TCMallocHeapProfilerAndroid(
+          browser_backend, output_path)
+    else:
+      self._platform_profiler = _TCMallocHeapProfilerLinux(browser_backend)
+
+  @classmethod
+  def name(cls):
+    return 'tcmalloc-heap'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type.startswith('cros'):
+      return False
+    if sys.platform.startswith('linux'):
+      return True
+    if browser_type == 'any':
+      return android_browser_finder.CanFindAvailableBrowsers()
+    return browser_type.startswith('android')
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    options.AppendExtraBrowserArgs('--no-sandbox')
+    options.AppendExtraBrowserArgs('--enable-memory-benchmarking')
+
+  @classmethod
+  def WillCloseBrowser(cls, browser_backend, platform_backend):
+    # The tcmalloc_heap_profiler dumps files at regular
+    # intervals (~20 secs).
+    # This is a minor optimization to ensure it'll dump the last file when
+    # the test completes.
+    for i in xrange(len(browser_backend.browser.tabs)):
+      browser_backend.browser.tabs[i].ExecuteJavaScript("""
+        if (chrome && chrome.memoryBenchmarking) {
+          chrome.memoryBenchmarking.heapProfilerDump('renderer', 'final');
+          chrome.memoryBenchmarking.heapProfilerDump('browser', 'final');
+        }
+      """)
+
+  def CollectProfile(self):
+    return self._platform_profiler.CollectProfile()
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/tcpdump_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/tcpdump_profiler.py
new file mode 100644
index 0000000..b9039b4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/tcpdump_profiler.py
@@ -0,0 +1,130 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import signal
+import subprocess
+import sys
+import tempfile
+
+from telemetry.internal.platform import profiler
+from telemetry.internal.platform.profiler import android_prebuilt_profiler_helper
+
+_TCP_DUMP_BASE_OPTS = ['-i', 'any', '-p', '-s', '0', '-w']
+
+
+class _TCPDumpProfilerAndroid(object):
+  """An internal class to collect TCP dumps on android.
+
+  This profiler uses pre-built binaries from AOSP.
+  See more details in prebuilt/android/README.txt.
+  """
+
+  _DEVICE_DUMP_FILE = '/sdcard/tcpdump_profiles/capture.pcap'
+
+  def __init__(self, device, output_path):
+    self._device = device
+    self._output_path = output_path
+    self._device.RunShellCommand('mkdir -p ' +
+                                 os.path.dirname(self._DEVICE_DUMP_FILE))
+    self._proc = subprocess.Popen(
+        ['adb', '-s', self._device.adb.GetDeviceSerial(),
+         'shell', android_prebuilt_profiler_helper.GetDevicePath('tcpdump')] +
+         _TCP_DUMP_BASE_OPTS +
+         [self._DEVICE_DUMP_FILE])
+
+  def CollectProfile(self):
+    tcpdump_pid = self._device.GetPids('tcpdump')
+    if not tcpdump_pid or not 'tcpdump' in tcpdump_pid:
+      raise Exception('Unable to find TCPDump. Check your device is rooted '
+          'and tcpdump is installed at ' +
+          android_prebuilt_profiler_helper.GetDevicePath('tcpdump'))
+    if len(tcpdump_pid['tcpdump']) > 1:
+      raise Exception(
+          'At most one instance of process tcpdump expected but found pids: '
+          '%s' % tcpdump_pid)
+    tcpdump_pid = int(tcpdump_pid['tcpdump'][0])
+    self._device.RunShellCommand('kill -term ' + tcpdump_pid)
+    self._proc.terminate()
+    host_dump = os.path.join(self._output_path,
+                             os.path.basename(self._DEVICE_DUMP_FILE))
+    try:
+      self._device.PullFile(self._DEVICE_DUMP_FILE, host_dump)
+    except:
+      logging.exception('New exception caused by DeviceUtils conversion')
+      raise
+    print 'TCP dump available at: %s ' % host_dump
+    print 'Use Wireshark to open it.'
+    return host_dump
+
+
+class _TCPDumpProfilerLinux(object):
+  """An internal class to collect TCP dumps on linux desktop."""
+
+  _DUMP_FILE = 'capture.pcap'
+
+  def __init__(self, output_path):
+    if not os.path.exists(output_path):
+      os.makedirs(output_path)
+    self._dump_file = os.path.join(output_path, self._DUMP_FILE)
+    self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+    try:
+      self._proc = subprocess.Popen(
+          ['tcpdump'] + _TCP_DUMP_BASE_OPTS + [self._dump_file],
+          stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
+    except OSError as e:
+      raise Exception('Unable to execute TCPDump, please check your '
+          'installation. ' + str(e))
+
+  def CollectProfile(self):
+    self._proc.send_signal(signal.SIGINT)
+    exit_code = self._proc.wait()
+    try:
+      if exit_code:
+        raise Exception(
+            'tcpdump failed with exit code %d. Output:\n%s' %
+            (exit_code, self._GetStdOut()))
+    finally:
+      self._tmp_output_file.close()
+    print 'TCP dump available at: ', self._dump_file
+    print 'Use Wireshark to open it.'
+    return self._dump_file
+
+  def _GetStdOut(self):
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+
+class TCPDumpProfiler(profiler.Profiler):
+  """A Factory to instantiate the platform-specific profiler."""
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(TCPDumpProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    if platform_backend.GetOSName() == 'android':
+      android_prebuilt_profiler_helper.InstallOnDevice(
+          browser_backend.device, 'tcpdump')
+      self._platform_profiler = _TCPDumpProfilerAndroid(
+          browser_backend.device, output_path)
+    else:
+      self._platform_profiler = _TCPDumpProfilerLinux(output_path)
+
+  @classmethod
+  def name(cls):
+    return 'tcpdump'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if browser_type.startswith('cros'):
+      return False
+    if sys.platform.startswith('linux'):
+      return True
+    return browser_type.startswith('android')
+
+  def CollectProfile(self):
+    return self._platform_profiler.CollectProfile()
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler.py
new file mode 100644
index 0000000..3e5d0e8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler.py
@@ -0,0 +1,80 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import StringIO
+import zipfile
+
+from telemetry.internal.platform import profiler
+from telemetry.timeline import trace_data as trace_data_module
+from telemetry.timeline import tracing_config
+
+
+class TraceProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state,
+               categories=None):
+    super(TraceProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    assert self._browser_backend.supports_tracing
+    # We always want flow events when tracing via telemetry.
+    categories_with_flow = 'disabled-by-default-toplevel.flow'
+    if categories:
+      categories_with_flow += ',%s' % categories
+    config = tracing_config.TracingConfig()
+    config.enable_chrome_trace = True
+    self._browser_backend.StartTracing(
+        config, categories_with_flow, timeout=10)
+
+  @classmethod
+  def name(cls):
+    return 'trace'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    return True
+
+  def CollectProfile(self):
+    print 'Processing trace...'
+
+    trace_result_builder = trace_data_module.TraceDataBuilder()
+    self._browser_backend.StopTracing(trace_result_builder)
+    trace_result = trace_result_builder.AsData()
+
+    trace_file = '%s.zip' % self._output_path
+
+    with zipfile.ZipFile(trace_file, 'w', zipfile.ZIP_DEFLATED) as z:
+      trace_data = StringIO.StringIO()
+      trace_result.Serialize(trace_data)
+      trace_name = '%s.json' % os.path.basename(self._output_path)
+      z.writestr(trace_name, trace_data.getvalue())
+
+    print 'Trace saved as %s' % trace_file
+    print 'To view, open in chrome://tracing'
+
+    return [trace_file]
+
+
+class TraceDetailedProfiler(TraceProfiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(TraceDetailedProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state,
+        categories='disabled-by-default-cc.debug*')
+
+  @classmethod
+  def name(cls):
+    return 'trace-detailed'
+
+
+class TraceAllProfiler(TraceProfiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(TraceAllProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state,
+        categories='disabled-by-default-*')
+
+  @classmethod
+  def name(cls):
+    return 'trace-all'
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler_unittest.py
new file mode 100644
index 0000000..7d844cf
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/trace_profiler_unittest.py
@@ -0,0 +1,33 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+import os
+import shutil
+import tempfile
+import zipfile
+
+from telemetry import decorators
+from telemetry.internal.platform.profiler import trace_profiler
+from telemetry.testing import tab_test_case
+
+
+class TestTraceProfiler(tab_test_case.TabTestCase):
+
+  @decorators.Disabled('win')  # crbug.com/570955
+  def testTraceProfiler(self):
+    try:
+      out_dir = tempfile.mkdtemp()
+      profiler = trace_profiler.TraceProfiler(
+          self._browser._browser_backend,
+          self._browser._platform_backend,
+          os.path.join(out_dir, 'trace'),
+          {})
+      result = profiler.CollectProfile()[0]
+      self.assertTrue(zipfile.is_zipfile(result))
+      with zipfile.ZipFile(result) as z:
+        self.assertEquals(len(z.namelist()), 1)
+        with z.open(z.namelist()[0]) as f:
+          json.load(f)
+    finally:
+      shutil.rmtree(out_dir)
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/v8_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/v8_profiler.py
new file mode 100644
index 0000000..b40ad19
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/v8_profiler.py
@@ -0,0 +1,54 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import re
+import tempfile
+
+from telemetry.internal.platform import profiler
+
+
+class V8Profiler(profiler.Profiler):
+
+  _V8_ARG = '--js-flags=--logfile=%s --prof --log-timer-events'
+
+  @classmethod
+  def name(cls):
+    return 'v8'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    return not browser_type.startswith('cros')
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    if browser_type.startswith('android'):
+      dump_file = '/data/local/tmp/v8-profile.log'
+    else:
+      dump_file = tempfile.mkstemp()[1]
+    options.AppendExtraBrowserArgs([cls._V8_ARG % dump_file, '--no-sandbox'])
+
+  def CollectProfile(self):
+    # Find output filename from browser argument.
+    for i in self._browser_backend.browser_options.extra_browser_args:
+      match = re.match(self._V8_ARG % r'(\S+)', i)
+      if match:
+        output_file = match.groups(0)[0]
+    assert output_file
+    # On Android pull the output file to the host.
+    if self._platform_backend.GetOSName() == 'android':
+      host_output_file = '%s.log' % self._output_path
+      try:
+        self._browser_backend.device.PullFile(
+            output_file, host_output_file)
+      except:
+        logging.exception('New exception caused by DeviceUtils conversion')
+        raise
+      # Clean the device
+      self._browser_backend.device.RunShellCommand('rm %s' % output_file)
+      output_file = host_output_file
+    print 'V8 profile saved as %s' % output_file
+    print 'To view, open in ' \
+          'http://v8.googlecode.com/svn/trunk/tools/tick-processor.html'
+    return [output_file]
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler.py
new file mode 100644
index 0000000..d9a2fac
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler.py
@@ -0,0 +1,155 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import subprocess
+import sys
+import tempfile
+
+from telemetry.core import exceptions
+from telemetry.internal.platform import profiler
+from telemetry.internal.platform.profiler import android_profiling_helper
+
+
+class _SingleProcessVTuneProfiler(object):
+  """An internal class for using vtune for a given process."""
+  def __init__(self, pid, output_file, browser_backend, platform_backend):
+    self._pid = pid
+    self._browser_backend = browser_backend
+    self._platform_backend = platform_backend
+    self._output_file = output_file
+    self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
+    cmd = ['amplxe-cl', '-collect', 'hotspots',
+           '-target-pid', str(pid), '-r', self._output_file]
+    self._is_android = platform_backend.GetOSName() == 'android'
+    if self._is_android:
+      cmd += ['-target-system', 'android']
+
+    self._proc = subprocess.Popen(
+        cmd, stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
+
+  def CollectProfile(self):
+    if 'renderer' in self._output_file:
+      try:
+        self._platform_backend.GetCommandLine(self._pid)
+      except exceptions.ProcessGoneException:
+        logging.warning('Renderer was swapped out during profiling. '
+                        'To collect a full profile rerun with '
+                        '"--extra-browser-args=--single-process"')
+    subprocess.call(['amplxe-cl', '-command', 'stop', '-r', self._output_file])
+
+    exit_code = self._proc.wait()
+    try:
+      # 1: amplxe: Error: Cannot find a running process with the specified ID.
+      #    Provide a valid PID.
+      if exit_code not in (0, 1):
+        raise Exception(
+            'amplxe-cl failed with exit code %d. Output:\n%s' % (exit_code,
+            self._GetStdOut()))
+    finally:
+      self._tmp_output_file.close()
+
+    if exit_code:
+      # The renderer process was swapped out. Now that we made sure VTune has
+      # stopped, return without further processing the invalid profile.
+      return self._output_file
+
+    if self._is_android:
+      required_libs = \
+          android_profiling_helper.GetRequiredLibrariesForVTuneProfile(
+              self._output_file)
+
+      device = self._browser_backend.device
+      symfs_root = os.path.dirname(self._output_file)
+      android_profiling_helper.CreateSymFs(device,
+                                           symfs_root,
+                                           required_libs,
+                                           use_symlinks=True)
+      logging.info('Resolving symbols in profile.')
+      subprocess.call(['amplxe-cl', '-finalize', '-r', self._output_file,
+                       '-search-dir', symfs_root])
+
+    print 'To view the profile, run:'
+    print '  amplxe-gui %s' % self._output_file
+
+    return self._output_file
+
+  def _GetStdOut(self):
+    self._tmp_output_file.flush()
+    try:
+      with open(self._tmp_output_file.name) as f:
+        return f.read()
+    except IOError:
+      return ''
+
+
+class VTuneProfiler(profiler.Profiler):
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(VTuneProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+    process_output_file_map = self._GetProcessOutputFileMap()
+    self._process_profilers = []
+
+    has_renderer = False
+    for pid, output_file in process_output_file_map.iteritems():
+      if 'renderer' in output_file:
+        has_renderer = True
+        break
+
+    for pid, output_file in process_output_file_map.iteritems():
+      if has_renderer:
+        if not 'renderer' in output_file:
+          continue
+      elif not 'browser0' in output_file:
+        continue
+
+      self._process_profilers.append(
+          _SingleProcessVTuneProfiler(pid, output_file, browser_backend,
+                                      platform_backend))
+
+  @classmethod
+  def name(cls):
+    return 'vtune'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    if sys.platform != 'linux2':
+      return False
+    if browser_type.startswith('cros'):
+      return False
+    try:
+      proc = subprocess.Popen(['amplxe-cl', '-version'],
+                              stderr=subprocess.STDOUT,
+                              stdout=subprocess.PIPE)
+      proc.communicate()
+      if proc.returncode != 0:
+        return False
+
+      if browser_type.startswith('android'):
+        # VTune checks if 'su' is available on the device.
+        proc = subprocess.Popen(['adb', 'shell', 'su', '-c', 'id'],
+                                stderr=subprocess.STDOUT,
+                                stdout=subprocess.PIPE)
+        return 'not found' not in proc.communicate()[0]
+
+      return True
+    except OSError:
+      return False
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    options.AppendExtraBrowserArgs([
+        '--no-sandbox',
+        '--allow-sandbox-debugging',
+    ])
+
+  def CollectProfile(self):
+    print 'Processing profile, this will take a few minutes...'
+
+    output_files = []
+    for single_process in self._process_profilers:
+      output_files.append(single_process.CollectProfile())
+    return output_files
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler_unittest.py b/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler_unittest.py
new file mode 100644
index 0000000..d763d77
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/vtune_profiler_unittest.py
@@ -0,0 +1,118 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.platform.profiler import vtune_profiler
+from telemetry.testing import options_for_unittests
+from telemetry.testing import simple_mock
+from telemetry.testing import tab_test_case
+
+
+class MockPopen(object):
+  def __init__(self, returncode, stdout=None, stderr=None):
+    self.returncode = returncode
+    self.stdout = stdout
+    self.stderr = stderr
+
+  def communicate(self):
+    return (self.stdout, self.stderr)
+
+  def wait(self):
+    return self.returncode
+
+
+class MockSubprocess(object):
+  def __init__(self):
+    self.PIPE = simple_mock.MockObject()
+    self.STDOUT = simple_mock.MockObject()
+    self._num_collect_calls = 0
+    self._num_stop_calls = 0
+
+  @property
+  def num_collect_calls(self):
+    return self._num_collect_calls
+
+  @property
+  def num_stop_calls(self):
+    return self._num_stop_calls
+
+  def Popen(self, cmd, **_):
+    self._AnalyzeCommand(cmd)
+    return MockPopen(0)
+
+  def call(self, cmd):
+    self._AnalyzeCommand(cmd)
+
+  def _AnalyzeCommand(self, cmd):
+    if MockSubprocess._IsCollectCommand(cmd):
+      self._num_collect_calls += 1
+    elif MockSubprocess._IsStopCommand(cmd):
+      self._num_stop_calls += 1
+
+  @staticmethod
+  def _IsCollectCommand(cmd):
+    return '-collect' in cmd
+
+  @staticmethod
+  def _IsStopCommand(cmd):
+    try:
+      cmd_idx = cmd.index('-command') + 1
+      return cmd_idx < len(cmd) and cmd[cmd_idx] == 'stop'
+    except ValueError:
+      return False
+
+
+class TestVTuneProfiler(unittest.TestCase):
+
+  def testVTuneProfilerIsSupported(self):
+    options = options_for_unittests.GetCopy()
+
+    mock_subprocess = simple_mock.MockObject()
+    mock_subprocess.ExpectCall(
+        'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(MockPopen(0))
+    mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
+    mock_subprocess.SetAttribute('STDOUT', simple_mock.MockObject())
+
+    real_subprocess = vtune_profiler.subprocess
+    vtune_profiler.subprocess = mock_subprocess
+
+    if options.browser_type.startswith('android'):
+      # On Android we're querying if 'su' is available.
+      mock_subprocess.ExpectCall('Popen').WithArgs(
+          simple_mock.DONT_CARE).WillReturn(MockPopen(0, 'su', None))
+
+    try:
+      self.assertTrue(
+          vtune_profiler.VTuneProfiler.is_supported(options.browser_type) or
+          sys.platform != 'linux2' or
+          options.browser_type.startswith('cros'))
+    finally:
+      vtune_profiler.subprocess = real_subprocess
+
+
+class TestVTuneProfilerTabTestCase(tab_test_case.TabTestCase):
+
+  # This test is only meant to be run if VTune is installed locally. Please
+  # run it locally if you are modifying related code, but it's disabled on the
+  # bots because they don't have VTune. See crbug.com/437085
+  @decorators.Disabled('all')
+  def testVTuneProfiler(self):
+    mock_subprocess = MockSubprocess()
+    real_subprocess = vtune_profiler.subprocess
+    vtune_profiler.subprocess = mock_subprocess
+
+    try:
+      # pylint: disable=protected-access
+      profiler = vtune_profiler.VTuneProfiler(self._browser._browser_backend,
+                                              self._browser._platform_backend,
+                                              'tmp',
+                                              {})
+      profiler.CollectProfile()
+      self.assertEqual(mock_subprocess.num_collect_calls,
+                       mock_subprocess.num_stop_calls)
+    finally:
+      vtune_profiler.subprocess = real_subprocess
diff --git a/catapult/telemetry/telemetry/internal/platform/profiler/win_pgo_profiler.py b/catapult/telemetry/telemetry/internal/platform/profiler/win_pgo_profiler.py
new file mode 100644
index 0000000..163e9e6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiler/win_pgo_profiler.py
@@ -0,0 +1,95 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import glob
+import os
+import subprocess
+import sys
+
+from telemetry.internal.platform import profiler
+
+_PGOSWEEP_EXECUTABLE = 'pgosweep.exe'
+
+
+class WinPGOProfiler(profiler.Profiler):
+  """A profiler that run the Visual Studio PGO utility 'pgosweep.exe' before
+  terminating a browser or a renderer process.
+  """
+
+  def __init__(self, browser_backend, platform_backend, output_path, state):
+    super(WinPGOProfiler, self).__init__(
+        browser_backend, platform_backend, output_path, state)
+
+    pgosweep_is_in_path = False
+    for entry in os.environ['PATH'].split(os.pathsep):
+      if os.path.exists(os.path.join(entry, _PGOSWEEP_EXECUTABLE)):
+        pgosweep_is_in_path = True
+        break
+    if not pgosweep_is_in_path:
+      raise IOError(2, '%s isn\'t in the current path, run vcvarsall.bat to fix'
+           ' this.' % _PGOSWEEP_EXECUTABLE)
+
+    self._browser_dir = browser_backend.browser_directory
+    self._chrome_child_pgc_counter = self._GetNextProfileIndex('chrome_child')
+
+  def _GetNextProfileIndex(self, dll_name):
+    """Scan the directory containing the DLL |dll_name| to find the next index
+    to use for the profile data files.
+
+    Args:
+      dll_name: The name of the DLL for which we want to get the next index to
+          to use.
+    """
+    max_index = 0
+    pgc_files = glob.glob(os.path.join(self._browser_dir,
+                                       '%s!*.pgc' % dll_name))
+    for pgc_file in pgc_files:
+      max_index = max(max_index,
+          int(os.path.splitext(os.path.split(pgc_file)[1])[0].split('!')[1]))
+    return max_index + 1
+
+  def _RunPGOSweep(self, pid, dll_name, index):
+    """Run the pgosweep utility to gather the profile data of a given process.
+
+    Args:
+      pid: The PID of the process we're interested in.
+      dll_name: The name of the DLL for which we want the profile data.
+      index: The index to use for the profile data file.
+
+    Returns the name of the profile data file.
+    """
+    pgc_filename = '%s\\%s!%d.pgc' % (self._browser_dir, dll_name, index)
+    subprocess.Popen([_PGOSWEEP_EXECUTABLE,
+                      '/pid:%d' % pid,
+                      '%s.dll' % dll_name,
+                      pgc_filename]
+                    ).wait()
+    return pgc_filename
+
+  @classmethod
+  def name(cls):
+    return 'win_pgo_profiler'
+
+  @classmethod
+  def is_supported(cls, browser_type):
+    # This profiler only make sense when doing a Windows build with Visual
+    # Studio (minimal supported version is 2013 Update 2).
+    return sys.platform.startswith('win')
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, browser_type, options):
+    # The sandbox need to be disabled if we want to be able to gather the
+    # profile data.
+    options.AppendExtraBrowserArgs('--no-sandbox')
+
+  def CollectProfile(self):
+    """Collect the profile data for the current processes."""
+    output_files = []
+    for pid, output_file in self._GetProcessOutputFileMap().iteritems():
+      if 'renderer' in output_file:
+        output_files.append(self._RunPGOSweep(pid,
+                                              'chrome_child',
+                                              self._chrome_child_pgc_counter))
+        self._chrome_child_pgc_counter += 1
+    return output_files
diff --git a/catapult/telemetry/telemetry/internal/platform/profiling_controller_backend.py b/catapult/telemetry/telemetry/internal/platform/profiling_controller_backend.py
new file mode 100644
index 0000000..744bc28
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/profiling_controller_backend.py
@@ -0,0 +1,47 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.internal.platform.profiler import profiler_finder
+
+
+class ProfilingControllerBackend(object):
+  def __init__(self, platform_backend, browser_backend):
+    self._platform_backend = platform_backend
+    self._browser_backend = browser_backend
+    self._active_profilers = []
+    self._profilers_states = {}
+
+  def Start(self, profiler_name, base_output_file):
+    """Starts profiling using |profiler_name|. Results are saved to
+    |base_output_file|.<process_name>."""
+    assert not self._active_profilers, 'Already profiling. Must stop first.'
+
+    profiler_class = profiler_finder.FindProfiler(profiler_name)
+
+    if not profiler_class.is_supported(self._browser_backend.browser_type):
+      raise Exception('The %s profiler is not '
+                      'supported on this platform.' % profiler_name)
+
+    if not profiler_class in self._profilers_states:
+      self._profilers_states[profiler_class] = {}
+
+    self._active_profilers.append(
+        profiler_class(self._browser_backend, self._platform_backend,
+            base_output_file, self._profilers_states[profiler_class]))
+
+  def Stop(self):
+    """Stops all active profilers and saves their results.
+
+    Returns:
+      A list of filenames produced by the profiler.
+    """
+    output_files = []
+    for profiler in self._active_profilers:
+      output_files.extend(profiler.CollectProfile())
+    self._active_profilers = []
+    return output_files
+
+  def WillCloseBrowser(self):
+    for profiler_class in self._profilers_states:
+      profiler_class.WillCloseBrowser(
+        self._browser_backend, self._platform_backend)
diff --git a/catapult/telemetry/telemetry/internal/platform/system_info.py b/catapult/telemetry/telemetry/internal/platform/system_info.py
new file mode 100644
index 0000000..8cf6d17
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/system_info.py
@@ -0,0 +1,41 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.internal.platform import gpu_info
+
+
+class SystemInfo(object):
+  """Provides low-level system information."""
+
+  def __init__(self, model_name, gpu_dict):
+    if (model_name == None) or (gpu_dict == None):
+      raise Exception("Missing model_name or gpu_dict argument")
+    self._model_name = model_name
+    self._gpu = gpu_info.GPUInfo.FromDict(gpu_dict)
+
+  @classmethod
+  def FromDict(cls, attrs):
+    """Constructs a SystemInfo from a dictionary of attributes.
+       Attributes currently required to be present in the dictionary:
+
+         model_name (string): a platform-dependent string
+           describing the model of machine, or the empty string if not
+           supported.
+         gpu (object containing GPUInfo's required attributes)
+    """
+    return cls(attrs["model_name"], attrs["gpu"])
+
+  @property
+  def model_name(self):
+    """A string describing the machine model.
+
+       This is a highly platform-dependent value and not currently
+       specified for any machine type aside from Macs. On Mac OS, this
+       is the model identifier, reformatted slightly; for example,
+       'MacBookPro 10.1'."""
+    return self._model_name
+
+  @property
+  def gpu(self):
+    """A GPUInfo object describing the graphics processor(s) on the system."""
+    return self._gpu
diff --git a/catapult/telemetry/telemetry/internal/platform/system_info_unittest.py b/catapult/telemetry/telemetry/internal/platform/system_info_unittest.py
new file mode 100644
index 0000000..d4c341e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/system_info_unittest.py
@@ -0,0 +1,68 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.internal.platform import gpu_device
+from telemetry.internal.platform import gpu_info
+from telemetry.internal.platform import system_info
+
+
+class TestSystemInfo(unittest.TestCase):
+
+  def testConstruction(self):
+    data = {
+        'model_name': 'MacBookPro 10.1',
+        'gpu': {
+            'devices': [
+                {'vendor_id': 1000, 'device_id': 2000,
+                 'vendor_string': 'a', 'device_string': 'b'},
+            ]
+        }
+    }
+    info = system_info.SystemInfo.FromDict(data)
+    self.assertTrue(isinstance(info, system_info.SystemInfo))
+    self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))
+    self.assertEquals(info.model_name, 'MacBookPro 10.1')
+    self.assertTrue(len(info.gpu.devices) == 1)
+    self.assertTrue(isinstance(info.gpu.devices[0], gpu_device.GPUDevice))
+    self.assertEquals(info.gpu.devices[0].vendor_id, 1000)
+    self.assertEquals(info.gpu.devices[0].device_id, 2000)
+    self.assertEquals(info.gpu.devices[0].vendor_string, 'a')
+    self.assertEquals(info.gpu.devices[0].device_string, 'b')
+
+  def testEmptyModelName(self):
+    data = {
+        'model_name': '',
+        'gpu': {
+            'devices': [
+                {'vendor_id': 1000, 'device_id': 2000,
+                 'vendor_string': 'a', 'device_string': 'b'},
+            ]
+        }
+    }
+    try:
+      info = system_info.SystemInfo.FromDict(data)
+      self.assertEquals(info.model_name, '')
+    except AssertionError:
+      raise
+    except Exception:
+      self.fail('Should not raise exception for empty model_name string')
+
+  def testMissingAttrsFromDict(self):
+    data = {
+        'model_name': 'MacBookPro 10.1',
+        'devices': [{'vendor_id': 1000, 'device_id': 2000,
+                     'vendor_string': 'a', 'device_string': 'b'}]
+    }
+
+    for k in data:
+      data_copy = data.copy()
+      del data_copy[k]
+      try:
+        system_info.SystemInfo.FromDict(data_copy)
+        self.fail('Should raise exception if attribute "%s" is missing' % k)
+      except AssertionError:
+        raise
+      except KeyError:
+        pass
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/__init__.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/__init__.py
new file mode 100644
index 0000000..58c0b17
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/__init__.py
@@ -0,0 +1,81 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TracingAgent(object):
+  """A tracing agent provided by the platform.
+
+  A tracing agent can gather data with Start() until Stop().
+  Before constructing an TracingAgent, check whether it's supported on the
+  platform with IsSupported method first.
+
+  NOTE: All subclasses of TracingAgent must not change the constructor's
+  parameters so the agents can be dynamically constructed in
+  tracing_controller_backend.
+
+  """
+
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+
+  @classmethod
+  def IsSupported(cls, platform_backend):
+    del platform_backend  # unused
+    return False
+
+  def StartAgentTracing(self, config, timeout):
+    """ Override to add tracing agent's custom logic to start tracing.
+
+    Depending on trace_options and category_filter, the tracing agent may choose
+    to start or not start tracing.
+
+    Args:
+      config: tracing_config instance that contains trace_option and
+        category_filter
+        trace_options: an instance of tracing_options.TracingOptions that
+          control which core tracing systems should be enabled.
+        category_filter: an instance of
+          tracing_category_filter.TracingCategoryFilter
+      timeout: number of seconds that this tracing agent should try to start
+        tracing until time out.
+
+    Returns:
+      True if tracing agent started successfully.
+    """
+    raise NotImplementedError
+
+  def StopAgentTracing(self, trace_data_builder):
+    """ Override to add tracing agent's custom logic to stop tracing.
+
+    StopAgentTracing() should guarantee tracing is stopped, even if there may
+    be exception.
+    """
+    raise NotImplementedError
+
+  def SupportsFlushingAgentTracing(self):
+    """ Override to indicate support of flushing tracing. """
+    return False
+
+  def FlushAgentTracing(self, config, timeout, trace_data_builder):
+    """ Override to add tracing agent's custom logic to flush tracing. """
+    del config, timeout, trace_data_builder  # unused
+    raise NotImplementedError
+
+  def SupportsExplicitClockSync(self):
+    """ Override to indicate support of explicit clock syncing. """
+    return False
+
+  def RecordClockSyncMarker(self, sync_id,
+                            record_controller_clocksync_marker_callback):
+    """ Override to record clock sync marker.
+
+    Only override if supports explicit clock syncing.
+    Args:
+      sync_id: Unqiue id for sync event.
+      record_controller_clocksync_marker_callback: Function that takes sync_id
+        and a timestamp as argument.
+    """
+    del sync_id # unused
+    del record_controller_clocksync_marker_callback # unused
+    raise NotImplementedError
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py
new file mode 100644
index 0000000..e05ff2a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py
@@ -0,0 +1,221 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import logging
+import os
+import shutil
+import stat
+import sys
+import tempfile
+import traceback
+
+from telemetry.internal.platform import tracing_agent
+from telemetry.internal.platform.tracing_agent import (
+    chrome_tracing_devtools_manager)
+
+_DESKTOP_OS_NAMES = ['linux', 'mac', 'win']
+_STARTUP_TRACING_OS_NAMES = _DESKTOP_OS_NAMES + ['android']
+
+# The trace config file path should be the same as specified in
+# src/components/tracing/trace_config_file.[h|cc]
+_CHROME_TRACE_CONFIG_DIR_ANDROID = '/data/local/'
+_CHROME_TRACE_CONFIG_FILE_NAME = 'chrome-trace-config.json'
+
+
+def ClearStarupTracingStateIfNeeded(platform_backend):
+  # Trace config file has fixed path on Android and temporary path on desktop.
+  if platform_backend.GetOSName() == 'android':
+    trace_config_file = os.path.join(_CHROME_TRACE_CONFIG_DIR_ANDROID,
+                                     _CHROME_TRACE_CONFIG_FILE_NAME)
+    platform_backend.device.RunShellCommand(
+        ['rm', '-f', trace_config_file], check_return=True, as_root=True)
+
+
+class ChromeTracingStartedError(Exception):
+  pass
+
+
+class ChromeTracingStoppedError(Exception):
+  pass
+
+
+class ChromeTracingAgent(tracing_agent.TracingAgent):
+  def __init__(self, platform_backend):
+    super(ChromeTracingAgent, self).__init__(platform_backend)
+    self._trace_config = None
+    self._trace_config_file = None
+
+  @property
+  def trace_config(self):
+    # Trace config is also used to check if Chrome tracing is running or not.
+    return self._trace_config
+
+  @property
+  def trace_config_file(self):
+    return self._trace_config_file
+
+  @classmethod
+  def IsStartupTracingSupported(cls, platform_backend):
+    if platform_backend.GetOSName() in _STARTUP_TRACING_OS_NAMES:
+      return True
+    else:
+      return False
+
+  @classmethod
+  def IsSupported(cls, platform_backend):
+    if cls.IsStartupTracingSupported(platform_backend):
+      return True
+    else:
+      return chrome_tracing_devtools_manager.IsSupported(platform_backend)
+
+  def _StartStartupTracing(self, config):
+    if not self.IsStartupTracingSupported(self._platform_backend):
+      return False
+    self._CreateTraceConfigFile(config)
+    return True
+
+  def _StartDevToolsTracing(self, config, timeout):
+    if not chrome_tracing_devtools_manager.IsSupported(self._platform_backend):
+      return False
+    devtools_clients = (chrome_tracing_devtools_manager
+        .GetActiveDevToolsClients(self._platform_backend))
+    if not devtools_clients:
+      return False
+    for client in devtools_clients:
+      if client.is_tracing_running:
+        raise ChromeTracingStartedError(
+            'Tracing is already running on devtools at port %s on platform'
+            'backend %s.' % (client.remote_port, self._platform_backend))
+      client.StartChromeTracing(
+          config, config.tracing_category_filter.filter_string, timeout)
+    return True
+
+  def StartAgentTracing(self, config, timeout):
+    if not config.enable_chrome_trace:
+      return False
+
+    if self._trace_config:
+      raise ChromeTracingStartedError(
+          'Tracing is already running on platform backend %s.'
+          % self._platform_backend)
+
+    if (config.enable_android_graphics_memtrack and
+        self._platform_backend.GetOSName() == 'android'):
+      self._platform_backend.SetGraphicsMemoryTrackingEnabled(True)
+
+    # Chrome tracing Agent needs to start tracing for chrome browsers that are
+    # not yet started, and for the ones that already are. For the former, we
+    # first setup the trace_config_file, which allows browsers that starts after
+    # this point to use it for enabling tracing upon browser startup. For the
+    # latter, we invoke start tracing command through devtools for browsers that
+    # are already started and tracked by chrome_tracing_devtools_manager.
+    started_startup_tracing = self._StartStartupTracing(config)
+    started_devtools_tracing = self._StartDevToolsTracing(config, timeout)
+    if started_startup_tracing or started_devtools_tracing:
+      self._trace_config = config
+      return True
+    return False
+
+  def StopAgentTracing(self, trace_data_builder):
+    if not self._trace_config:
+      raise ChromeTracingStoppedError(
+          'Tracing is not running on platform backend %s.'
+          % self._platform_backend)
+
+    if self.IsStartupTracingSupported(self._platform_backend):
+      self._RemoveTraceConfigFile()
+
+    # We get all DevTools clients including the stale ones, so that we get an
+    # exception if there is a stale client. This is because we will potentially
+    # lose data if there is a stale client.
+    devtools_clients = (chrome_tracing_devtools_manager
+        .GetDevToolsClients(self._platform_backend))
+    raised_execption_messages = []
+    for client in devtools_clients:
+      try:
+        client.StopChromeTracing(trace_data_builder)
+      except Exception:
+        raised_execption_messages.append(
+          'Error when trying to stop Chrome tracing on devtools at port %s:\n%s'
+          % (client.remote_port,
+             ''.join(traceback.format_exception(*sys.exc_info()))))
+
+    if (self._trace_config.enable_android_graphics_memtrack and
+        self._platform_backend.GetOSName() == 'android'):
+      self._platform_backend.SetGraphicsMemoryTrackingEnabled(False)
+
+    self._trace_config = None
+    if raised_execption_messages:
+      raise ChromeTracingStoppedError(
+          'Exceptions raised when trying to stop Chrome devtool tracing:\n' +
+          '\n'.join(raised_execption_messages))
+
+  def _CreateTraceConfigFileString(self, config):
+    # See src/components/tracing/trace_config_file.h for the format
+    trace_config_str = config.GetChromeTraceConfigJsonString()
+    return '{"trace_config":' + trace_config_str + '}'
+
+  def _CreateTraceConfigFile(self, config):
+    assert not self._trace_config_file
+    if self._platform_backend.GetOSName() == 'android':
+      self._trace_config_file = os.path.join(_CHROME_TRACE_CONFIG_DIR_ANDROID,
+                                             _CHROME_TRACE_CONFIG_FILE_NAME)
+      self._platform_backend.device.WriteFile(self._trace_config_file,
+          self._CreateTraceConfigFileString(config), as_root=True)
+      # The config file has fixed path on Android. We need to ensure it is
+      # always cleaned up.
+      atexit.register(self._RemoveTraceConfigFile)
+    elif self._platform_backend.GetOSName() in _DESKTOP_OS_NAMES:
+      self._trace_config_file = os.path.join(tempfile.mkdtemp(),
+                                             _CHROME_TRACE_CONFIG_FILE_NAME)
+      with open(self._trace_config_file, 'w') as f:
+        trace_config_string = self._CreateTraceConfigFileString(config)
+        logging.info('Trace config file string: %s', trace_config_string)
+        f.write(trace_config_string)
+      os.chmod(self._trace_config_file,
+               os.stat(self._trace_config_file).st_mode | stat.S_IROTH)
+    else:
+      raise NotImplementedError
+
+  def _RemoveTraceConfigFile(self):
+    if not self._trace_config_file:
+      return
+    if self._platform_backend.GetOSName() == 'android':
+      self._platform_backend.device.RunShellCommand(
+          ['rm', '-f', self._trace_config_file], check_return=True,
+          as_root=True)
+    elif self._platform_backend.GetOSName() in _DESKTOP_OS_NAMES:
+      if os.path.exists(self._trace_config_file):
+        os.remove(self._trace_config_file)
+      shutil.rmtree(os.path.dirname(self._trace_config_file))
+    else:
+      raise NotImplementedError
+    self._trace_config_file = None
+
+  def SupportsFlushingAgentTracing(self):
+    return True
+
+  def FlushAgentTracing(self, config, timeout, trace_data_builder):
+    if not self._trace_config:
+      raise ChromeTracingStoppedError(
+          'Tracing is not running on platform backend %s.'
+          % self._platform_backend)
+
+    for backend in self._IterInspectorBackends():
+      backend.EvaluateJavaScript("console.time('flush-tracing');")
+
+    self.StopAgentTracing(trace_data_builder)
+    self.StartAgentTracing(config, timeout)
+
+    for backend in self._IterInspectorBackends():
+      backend.EvaluateJavaScript("console.timeEnd('flush-tracing');")
+
+  def _IterInspectorBackends(self):
+    for client in chrome_tracing_devtools_manager.GetDevToolsClients(
+        self._platform_backend):
+      context_map = client.GetUpdatedInspectableContexts()
+      for context in context_map.contexts:
+        if context['type'] in ['iframe', 'page', 'webview']:
+          yield context_map.GetInspectorBackend(context['id'])
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent_unittest.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent_unittest.py
new file mode 100644
index 0000000..3e73ec0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent_unittest.py
@@ -0,0 +1,336 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import platform
+import stat
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
+from telemetry.internal.platform.tracing_agent import (
+    chrome_tracing_devtools_manager)
+from telemetry.timeline import tracing_config
+
+from devil.android import device_utils
+
+
+class FakeTracingControllerBackend(object):
+  def __init__(self):
+    self.is_tracing_running = False
+
+
+class FakePlatformBackend(object):
+  def __init__(self):
+    self.tracing_controller_backend = FakeTracingControllerBackend()
+
+  def GetOSName(self):
+    return ''
+
+class FakeAndroidPlatformBackend(FakePlatformBackend):
+  def __init__(self):
+    super(FakeAndroidPlatformBackend, self).__init__()
+    devices = device_utils.DeviceUtils.HealthyDevices(None)
+    self.device = devices[0]
+
+  def GetOSName(self):
+    return 'android'
+
+class FakeDesktopPlatformBackend(FakePlatformBackend):
+  def GetOSName(self):
+    system = platform.system()
+    if system == 'Linux':
+      return 'linux'
+    if system == 'Darwin':
+      return 'mac'
+    if system == 'Windows':
+      return 'win'
+
+
+class FakeContextMap(object):
+  def __init__(self, contexts):
+    self.contexts = contexts
+
+
+class FakeDevtoolsClient(object):
+  def __init__(self, remote_port):
+    self.is_alive = True
+    self.is_tracing_running = False
+    self.remote_port = remote_port
+    self.will_raise_exception_in_stop_tracing = False
+
+  def IsAlive(self):
+    return self.is_alive
+
+  def StartChromeTracing(self, trace_options, filter_string, timeout=10):
+    del trace_options, filter_string, timeout  # unused
+    self.is_tracing_running = True
+
+  def StopChromeTracing(self, trace_data_builder):
+    del trace_data_builder  # unused
+    self.is_tracing_running = False
+    if self.will_raise_exception_in_stop_tracing:
+      raise Exception
+
+  def IsChromeTracingSupported(self):
+    return True
+
+  def GetUpdatedInspectableContexts(self):
+    return FakeContextMap([])
+
+
+class ChromeTracingAgentTest(unittest.TestCase):
+  def setUp(self):
+    self.platform1 = FakePlatformBackend()
+    self.platform2 = FakePlatformBackend()
+    self.platform3 = FakePlatformBackend()
+
+  def StartTracing(self, platform_backend, enable_chrome_trace=True):
+    assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
+    agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
+    config = tracing_config.TracingConfig()
+    config.tracing_category_filter.AddIncludedCategory('foo')
+    config.enable_chrome_trace = enable_chrome_trace
+    agent._platform_backend.tracing_controller_backend.is_tracing_running = True
+    agent._test_config = config
+    agent.StartAgentTracing(config, 10)
+    return agent
+
+  def FlushTracing(self, agent):
+    agent.FlushAgentTracing(agent._test_config, 10, None)
+
+  def StopTracing(self, agent):
+    agent._platform_backend.tracing_controller_backend.is_tracing_running = (
+        False)
+    agent.StopAgentTracing(None)
+
+  def testRegisterDevtoolsClient(self):
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(1), self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(2), self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(3), self.platform1)
+
+    tracing_agent_of_platform1 = self.StartTracing(self.platform1)
+
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(4), self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(5), self.platform2)
+
+    self.StopTracing(tracing_agent_of_platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        FakeDevtoolsClient(6), self.platform1)
+
+  def testIsSupportWithoutStartupTracingSupport(self):
+    self.assertFalse(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
+    self.assertFalse(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
+    self.assertFalse(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
+
+    devtool1 = FakeDevtoolsClient(1)
+    devtool2 = FakeDevtoolsClient(2)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool1, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool2, self.platform2)
+    devtool2.is_alive = False
+
+    # Chrome tracing is only supported on platform 1 since only platform 1 has
+    # an alive devtool.
+    self.assertTrue(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
+    self.assertFalse(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
+    self.assertFalse(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
+
+  @decorators.Enabled('linux', 'mac', 'win')
+  def testIsSupportOnDesktopPlatform(self):
+    # Chrome tracing is always supported on desktop platforms because of startup
+    # tracing.
+    desktop_platform = FakeDesktopPlatformBackend()
+    self.assertTrue(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
+
+    devtool = FakeDevtoolsClient(1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool, desktop_platform)
+    self.assertTrue(
+        chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
+
+  def testStartAndStopTracing(self):
+    devtool1 = FakeDevtoolsClient(1)
+    devtool2 = FakeDevtoolsClient(2)
+    devtool3 = FakeDevtoolsClient(3)
+    devtool4 = FakeDevtoolsClient(2)
+    # Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool1, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool2, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool3, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool4, self.platform2)
+    devtool2.is_alive = False
+
+    tracing_agent1 = self.StartTracing(self.platform1)
+    with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
+      self.StartTracing(self.platform1)
+
+    self.assertTrue(devtool1.is_tracing_running)
+    self.assertFalse(devtool2.is_tracing_running)
+    self.assertTrue(devtool3.is_tracing_running)
+    # Devtool 4 shouldn't have tracing started although it has the same remote
+    # port as devtool 2
+    self.assertFalse(devtool4.is_tracing_running)
+
+    self.StopTracing(tracing_agent1)
+    self.assertFalse(devtool1.is_tracing_running)
+    self.assertFalse(devtool2.is_tracing_running)
+    self.assertFalse(devtool3.is_tracing_running)
+    self.assertFalse(devtool4.is_tracing_running)
+    # Test that it should be ok to start & stop tracing on platform1 again.
+    tracing_agent1 = self.StartTracing(self.platform1)
+    self.StopTracing(tracing_agent1)
+
+    tracing_agent2 = self.StartTracing(self.platform2)
+    self.assertTrue(devtool4.is_tracing_running)
+    self.StopTracing(tracing_agent2)
+    self.assertFalse(devtool4.is_tracing_running)
+
+  def testFlushTracing(self):
+    devtool1 = FakeDevtoolsClient(1)
+    devtool2 = FakeDevtoolsClient(2)
+    devtool3 = FakeDevtoolsClient(3)
+    devtool4 = FakeDevtoolsClient(2)
+
+    # Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2.
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool1, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool2, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool3, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool4, self.platform2)
+    devtool2.is_alive = False
+
+    tracing_agent1 = self.StartTracing(self.platform1)
+
+    self.assertTrue(devtool1.is_tracing_running)
+    self.assertFalse(devtool2.is_tracing_running)
+    self.assertTrue(devtool3.is_tracing_running)
+    # Devtool 4 shouldn't have tracing started although it has the same remote
+    # port as devtool 2.
+    self.assertFalse(devtool4.is_tracing_running)
+
+    for _ in xrange(5):
+      self.FlushTracing(tracing_agent1)
+      self.assertTrue(devtool1.is_tracing_running)
+      self.assertFalse(devtool2.is_tracing_running)
+      self.assertTrue(devtool3.is_tracing_running)
+      self.assertFalse(devtool4.is_tracing_running)
+
+    self.StopTracing(tracing_agent1)
+    self.assertFalse(devtool1.is_tracing_running)
+    self.assertFalse(devtool2.is_tracing_running)
+    self.assertFalse(devtool3.is_tracing_running)
+    self.assertFalse(devtool4.is_tracing_running)
+
+    # Test that it is ok to start, flush & stop tracing on platform1 again.
+    tracing_agent1 = self.StartTracing(self.platform1)
+    self.FlushTracing(tracing_agent1)
+    self.StopTracing(tracing_agent1)
+
+    tracing_agent2 = self.StartTracing(self.platform2)
+    self.assertTrue(devtool4.is_tracing_running)
+    self.FlushTracing(tracing_agent2)
+    self.assertTrue(devtool4.is_tracing_running)
+    self.StopTracing(tracing_agent2)
+    self.assertFalse(devtool4.is_tracing_running)
+
+  def testExceptionRaisedInStopTracing(self):
+    devtool1 = FakeDevtoolsClient(1)
+    devtool2 = FakeDevtoolsClient(2)
+    # Register devtools 1, 2 on platform 1
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool1, self.platform1)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool2, self.platform1)
+    tracing_agent1 = self.StartTracing(self.platform1)
+
+    self.assertTrue(devtool1.is_tracing_running)
+    self.assertTrue(devtool2.is_tracing_running)
+
+    devtool1.will_raise_exception_in_stop_tracing = True
+    with self.assertRaises(chrome_tracing_agent.ChromeTracingStoppedError):
+      self.StopTracing(tracing_agent1)
+    # Tracing is stopped on both devtools clients even if there is exception.
+    self.assertIsNone(tracing_agent1.trace_config)
+    self.assertFalse(devtool1.is_tracing_running)
+    self.assertFalse(devtool2.is_tracing_running)
+
+    devtool1.is_alive = False
+    devtool2.is_alive = False
+    # Register devtools 3 on platform 1 should not raise any exception.
+    devtool3 = FakeDevtoolsClient(3)
+    chrome_tracing_devtools_manager.RegisterDevToolsClient(
+        devtool3, self.platform1)
+
+    # Start & Stop tracing on platform 1 should work just fine.
+    tracing_agent2 = self.StartTracing(self.platform1)
+    self.StopTracing(tracing_agent2)
+
+  @decorators.Enabled('android')
+  def testCreateAndRemoveTraceConfigFileOnAndroid(self):
+    platform_backend = FakeAndroidPlatformBackend()
+    agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
+    self.assertIsNone(agent.trace_config_file)
+
+    config = tracing_config.TracingConfig()
+    agent._CreateTraceConfigFile(config)
+    self.assertIsNotNone(agent.trace_config_file)
+    self.assertTrue(platform_backend.device.PathExists(agent.trace_config_file))
+    config_file_str = platform_backend.device.ReadFile(agent.trace_config_file,
+                                                       as_root=True)
+    self.assertEqual(agent._CreateTraceConfigFileString(config),
+                     config_file_str.strip())
+
+    config_file_path = agent.trace_config_file
+    agent._RemoveTraceConfigFile()
+    self.assertFalse(platform_backend.device.PathExists(config_file_path))
+    self.assertIsNone(agent.trace_config_file)
+    # robust to multiple file removal
+    agent._RemoveTraceConfigFile()
+    self.assertFalse(platform_backend.device.PathExists(config_file_path))
+    self.assertIsNone(agent.trace_config_file)
+
+  @decorators.Enabled('linux', 'mac', 'win')
+  def testCreateAndRemoveTraceConfigFileOnDesktop(self):
+    platform_backend = FakeDesktopPlatformBackend()
+    agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
+    self.assertIsNone(agent.trace_config_file)
+
+    config = tracing_config.TracingConfig()
+    agent._CreateTraceConfigFile(config)
+    self.assertIsNotNone(agent.trace_config_file)
+    self.assertTrue(os.path.exists(agent.trace_config_file))
+    self.assertTrue(os.stat(agent.trace_config_file).st_mode & stat.S_IROTH)
+    with open(agent.trace_config_file, 'r') as f:
+      config_file_str = f.read()
+      self.assertEqual(agent._CreateTraceConfigFileString(config),
+                       config_file_str.strip())
+
+    config_file_path = agent.trace_config_file
+    agent._RemoveTraceConfigFile()
+    self.assertFalse(os.path.exists(config_file_path))
+    self.assertIsNone(agent.trace_config_file)
+    # robust to multiple file removal
+    agent._RemoveTraceConfigFile()
+    self.assertFalse(os.path.exists(config_file_path))
+    self.assertIsNone(agent.trace_config_file)
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py
new file mode 100644
index 0000000..ea2bde2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py
@@ -0,0 +1,58 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A singleton map from platform backends to maps of uniquely-identifying
+# remote port (which may be the same as local port) to DevToolsClientBackend.
+# There is no guarantee that the devtools agent is still alive.
+_platform_backends_to_devtools_clients_maps = {}
+
+
+def _RemoveStaleDevToolsClient(platform_backend):
+  """Removes DevTools clients that are no longer connectable."""
+  devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
+      platform_backend, {})
+  devtools_clients_map = {
+      port: client
+      for port, client in devtools_clients_map.iteritems()
+      if client.IsAlive()
+      }
+  _platform_backends_to_devtools_clients_maps[platform_backend] = (
+      devtools_clients_map)
+
+
+def RegisterDevToolsClient(devtools_client_backend, platform_backend):
+  """Register DevTools client
+
+  This should only be called from DevToolsClientBackend when it is initialized.
+  """
+  remote_port = str(devtools_client_backend.remote_port)
+  if platform_backend not in _platform_backends_to_devtools_clients_maps:
+    _platform_backends_to_devtools_clients_maps[platform_backend] = {}
+  devtools_clients_map = (
+    _platform_backends_to_devtools_clients_maps[platform_backend])
+  devtools_clients_map[remote_port] = devtools_client_backend
+
+
+def IsSupported(platform_backend):
+  _RemoveStaleDevToolsClient(platform_backend)
+  devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
+      platform_backend, {})
+  for _, devtools_client in devtools_clients_map.iteritems():
+    if devtools_client.IsChromeTracingSupported():
+      return True
+  return False
+
+
+def GetDevToolsClients(platform_backend):
+  """Get DevTools clients including the ones that are no longer connectable."""
+  devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
+      platform_backend, {})
+  if not devtools_clients_map:
+    return []
+  return devtools_clients_map.values()
+
+def GetActiveDevToolsClients(platform_backend):
+  """Get DevTools clients that are still connectable."""
+  _RemoveStaleDevToolsClient(platform_backend)
+  return GetDevToolsClients(platform_backend)
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent.py
new file mode 100644
index 0000000..074026f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent.py
@@ -0,0 +1,26 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.platform import tracing_agent
+from telemetry.timeline import trace_data
+
+
+class DisplayTracingAgent(tracing_agent.TracingAgent):
+  def __init__(self, platform_backend):
+    super(DisplayTracingAgent, self).__init__(platform_backend)
+
+  @classmethod
+  def IsSupported(cls, platform_backend):
+    return platform_backend.IsDisplayTracingSupported()
+
+  def StartAgentTracing(self, config, timeout):
+    del timeout  # unused
+    if config.enable_platform_display_trace:
+      self._platform_backend.StartDisplayTracing()
+      return True
+
+  def StopAgentTracing(self, trace_data_builder):
+    surface_flinger_trace_data = self._platform_backend.StopDisplayTracing()
+    trace_data_builder.AddEventsTo(
+          trace_data.SURFACE_FLINGER_PART, surface_flinger_trace_data)
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent_unittest.py b/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent_unittest.py
new file mode 100644
index 0000000..eb8720e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_agent/display_tracing_agent_unittest.py
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import mock
+import unittest
+
+from telemetry.internal.platform import android_platform_backend
+from telemetry.internal.platform.tracing_agent import display_tracing_agent
+from telemetry.timeline import tracing_config
+
+# pylint: disable=super-init-not-called, abstract-method, unused-argument
+class FakeAndroidPlatformBackend(
+    android_platform_backend.AndroidPlatformBackend):
+  def __init__(self):
+    self._device = 0
+    self._raw_display_frame_rate_measurements = []
+    self._surface_stats_collector = None
+
+  @property
+  def surface_stats_collector(self):
+    return self._surface_stats_collector
+
+  def IsDisplayTracingSupported(self):
+    return True
+
+
+class DisplayTracingAgentTest(unittest.TestCase):
+  def setUp(self):
+    self._config = tracing_config.TracingConfig()
+    self._config.enable_platform_display_trace = True
+    self._platform_backend = FakeAndroidPlatformBackend()
+    self._agent = display_tracing_agent.DisplayTracingAgent(
+        self._platform_backend)
+
+  @mock.patch(
+      'devil.android.perf.surface_stats_collector.SurfaceStatsCollector')
+  def testStartAndStopTracing(self, MockSurfaceStatsCollector):
+    self._agent.StartAgentTracing(self._config, 10)
+    # Second start tracing will raise error.
+    with self.assertRaises(AssertionError):
+      self._agent.StartAgentTracing(self._config, 10)
+    self._platform_backend.surface_stats_collector.Stop.return_value = (0, [])
+    self._agent.StopAgentTracing(mock.MagicMock())
+
+    # Can start and stop tracing multiple times.
+    self._agent.StartAgentTracing(self._config, 10)
+    self._platform_backend.surface_stats_collector.Stop.return_value = (0, [])
+    self._agent.StopAgentTracing(mock.MagicMock())
+
+  @mock.patch(
+      'devil.android.perf.surface_stats_collector.SurfaceStatsCollector')
+  def testExceptionRaisedInStopTracing(self, MockSurfaceStatsCollector):
+    self._agent.StartAgentTracing(self._config, 10)
+    self._platform_backend.surface_stats_collector.Stop.side_effect = Exception(
+        'Raise error when stopping tracing.')
+    with self.assertRaises(Exception):
+      self._agent.StopAgentTracing(mock.MagicMock())
+
+    # Tracing is stopped even if there is exception. And the agent can start
+    # tracing again.
+    self._agent.StartAgentTracing(self._config, 10)
+    self._platform_backend.surface_stats_collector.Stop.side_effect = None
+    self._platform_backend.surface_stats_collector.Stop.return_value = (0, [])
+    self._agent.StopAgentTracing(mock.MagicMock())
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend.py b/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend.py
new file mode 100644
index 0000000..c4abe60
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend.py
@@ -0,0 +1,239 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import ast
+import atexit
+import contextlib
+import gc
+import logging
+import os
+import sys
+import tempfile
+import traceback
+import uuid
+
+from py_trace_event import trace_event
+from telemetry.core import discover
+from telemetry.core import util
+from telemetry.internal.platform import tracing_agent
+from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
+from telemetry.timeline import trace_data as trace_data_module
+from telemetry.timeline import tracing_config
+
+
+def _IterAllTracingAgentClasses():
+  tracing_agent_dir = os.path.join(
+      os.path.dirname(os.path.realpath(__file__)), 'tracing_agent')
+  return discover.DiscoverClasses(
+      tracing_agent_dir, util.GetTelemetryDir(),
+      tracing_agent.TracingAgent).itervalues()
+
+
+class TracingControllerStoppedError(Exception):
+  pass
+
+
+class _TracingState(object):
+
+  def __init__(self, config, timeout):
+    self._builder = trace_data_module.TraceDataBuilder()
+    self._config = config
+    self._timeout = timeout
+
+  @property
+  def builder(self):
+    return self._builder
+
+  @property
+  def config(self):
+    return self._config
+
+  @property
+  def timeout(self):
+    return self._timeout
+
+
+class TracingControllerBackend(object):
+  def __init__(self, platform_backend):
+    self._platform_backend = platform_backend
+    self._current_state = None
+    self._supported_agents_classes = [
+        agent_classes for agent_classes in _IterAllTracingAgentClasses() if
+        agent_classes.IsSupported(platform_backend)]
+    self._active_agents_instances = []
+    self._trace_log = None
+    self._is_tracing_controllable = True
+
+  def StartTracing(self, config, timeout):
+    if self.is_tracing_running:
+      return False
+
+    assert isinstance(config, tracing_config.TracingConfig)
+    assert len(self._active_agents_instances) == 0
+
+    self._current_state = _TracingState(config, timeout)
+    # Hack: chrome tracing agent may only depend on the number of alive chrome
+    # devtools processes, rather platform (when startup tracing is not
+    # supported), hence we add it to the list of supported agents here if it was
+    # not added.
+    if (chrome_tracing_agent.ChromeTracingAgent.IsSupported(
+        self._platform_backend) and
+        not chrome_tracing_agent.ChromeTracingAgent in
+        self._supported_agents_classes):
+      self._supported_agents_classes.append(
+          chrome_tracing_agent.ChromeTracingAgent)
+
+    self.StartAgentTracing(config, timeout)
+    for agent_class in self._supported_agents_classes:
+      agent = agent_class(self._platform_backend)
+      if agent.StartAgentTracing(config, timeout):
+        self._active_agents_instances.append(agent)
+    return True
+
+  def _GenerateClockSyncId(self):
+    return str(uuid.uuid4())
+
+  @contextlib.contextmanager
+  def _DisableGarbageCollection(self):
+    try:
+      gc.disable()
+      yield
+    finally:
+      gc.enable()
+
+  def StopTracing(self):
+    assert self.is_tracing_running, 'Can only stop tracing when tracing is on.'
+    self._IssueClockSyncMarker()
+    builder = self._current_state.builder
+
+    raised_exception_messages = []
+    for agent in self._active_agents_instances + [self]:
+      try:
+        agent.StopAgentTracing(builder)
+      except Exception: # pylint: disable=broad-except
+        raised_exception_messages.append(
+            ''.join(traceback.format_exception(*sys.exc_info())))
+
+    self._active_agents_instances = []
+    self._current_state = None
+
+    if raised_exception_messages:
+      raise TracingControllerStoppedError(
+          'Exceptions raised when trying to stop tracing:\n' +
+          '\n'.join(raised_exception_messages))
+
+    return builder.AsData()
+
+  def FlushTracing(self):
+    assert self.is_tracing_running, 'Can only flush tracing when tracing is on.'
+    self._IssueClockSyncMarker()
+
+    raised_exception_messages = []
+    # Flushing the controller's pytrace is not supported.
+    for agent in self._active_agents_instances:
+      try:
+        if agent.SupportsFlushingAgentTracing():
+          agent.FlushAgentTracing(self._current_state.config,
+                                  self._current_state.timeout,
+                                  self._current_state.builder)
+      except Exception: # pylint: disable=broad-except
+        raised_exception_messages.append(
+            ''.join(traceback.format_exception(*sys.exc_info())))
+
+    if raised_exception_messages:
+      raise TracingControllerStoppedError(
+          'Exceptions raised when trying to stop tracing:\n' +
+          '\n'.join(raised_exception_messages))
+
+  def StartAgentTracing(self, config, timeout):
+    self._is_tracing_controllable = self._IsTracingControllable()
+    if not self._is_tracing_controllable:
+      return False
+
+    tf = tempfile.NamedTemporaryFile(delete=False)
+    self._trace_log = tf.name
+    tf.close()
+    del config # unused
+    del timeout # unused
+    assert not trace_event.trace_is_enabled(), 'Tracing already running.'
+    trace_event.trace_enable(self._trace_log)
+    assert trace_event.trace_is_enabled(), 'Tracing didn\'t enable properly.'
+    return True
+
+  def StopAgentTracing(self, trace_data_builder):
+    if not self._is_tracing_controllable:
+      return
+    assert trace_event.trace_is_enabled(), 'Tracing not running'
+    trace_event.trace_disable()
+    assert not trace_event.trace_is_enabled(), 'Tracing didnt disable properly.'
+    with open(self._trace_log, 'r') as fp:
+      data = ast.literal_eval(fp.read() + ']')
+    trace_data_builder.AddEventsTo(trace_data_module.TELEMETRY_PART, data)
+    try:
+      os.remove(self._trace_log)
+      self._trace_log = None
+    except OSError:
+      logging.exception('Error when deleting %s, will try again at exit.',
+                        self._trace_log)
+      def DeleteAtExit(path):
+        os.remove(path)
+      atexit.register(DeleteAtExit, self._trace_log)
+    self._trace_log = None
+
+  def SupportsExplicitClockSync(self):
+    return True
+
+  def _RecordIssuerClockSyncMarker(self, sync_id, issue_ts):
+    """ Record clock sync event.
+
+    Args:
+      sync_id: Unqiue id for sync event.
+      issue_ts: timestamp before issuing clocksync to agent.
+    """
+    if self._is_tracing_controllable:
+      trace_event.clock_sync(sync_id, issue_ts=issue_ts)
+
+  def _IssueClockSyncMarker(self):
+    with self._DisableGarbageCollection():
+      for agent in self._active_agents_instances:
+        if agent.SupportsExplicitClockSync():
+          sync_id = self._GenerateClockSyncId()
+          agent.RecordClockSyncMarker(sync_id,
+                                      self._RecordIssuerClockSyncMarker)
+
+  def IsChromeTracingSupported(self):
+    return chrome_tracing_agent.ChromeTracingAgent.IsSupported(
+        self._platform_backend)
+
+  @property
+  def is_tracing_running(self):
+    return self._current_state is not None
+
+  def _GetActiveChromeTracingAgent(self):
+    if not self.is_tracing_running:
+      return None
+    if not self._current_state.config.enable_chrome_trace:
+      return None
+    for agent in self._active_agents_instances:
+      if isinstance(agent, chrome_tracing_agent.ChromeTracingAgent):
+        return agent
+    return None
+
+  def GetChromeTraceConfig(self):
+    agent = self._GetActiveChromeTracingAgent()
+    if agent:
+      return agent.trace_config
+    return None
+
+  def GetChromeTraceConfigFile(self):
+    agent = self._GetActiveChromeTracingAgent()
+    if agent:
+      return agent.trace_config_file
+    return None
+
+  def _IsTracingControllable(self):
+    return trace_event.is_tracing_controllable()
+
+  def ClearStateIfNeeded(self):
+    chrome_tracing_agent.ClearStarupTracingStateIfNeeded(self._platform_backend)
diff --git a/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend_unittest.py b/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend_unittest.py
new file mode 100644
index 0000000..0bf898e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend_unittest.py
@@ -0,0 +1,335 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import gc
+import logging
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.platform import linux_based_platform_backend
+from telemetry.internal.platform import tracing_agent
+from telemetry.internal.platform import tracing_controller_backend
+from telemetry.timeline import tracing_config
+from telemetry.timeline import trace_data
+
+
+class PlatformBackend(linux_based_platform_backend.LinuxBasedPlatformBackend):
+  # pylint: disable=abstract-method
+  def __init__(self):
+    super(PlatformBackend, self).__init__()
+    self._mock_files = {}
+
+  def GetOSName(self):
+    return 'android'
+
+
+class FakeTracingAgentBase(tracing_agent.TracingAgent):
+  def __init__(self, platform, start=True, clock_sync=True):
+    super(FakeTracingAgentBase, self).__init__(platform)
+    self._start = start
+    self._clock_sync = clock_sync
+    self._sync_seen = False
+
+  def StartAgentTracing(self, config, timeout):
+    return self._start
+
+  def StopAgentTracing(self, trace_data_builder):
+    pass
+
+  def SupportsExplicitClockSync(self):
+    return self._clock_sync
+
+  def RecordClockSyncMarker(self, sync_id, callback):
+    if not self._clock_sync:
+      raise NotImplementedError
+    self._sync_seen = True
+    callback(sync_id, 1)
+
+
+class FakeTracingAgentStartAndClockSync(FakeTracingAgentBase):
+  def __init__(self, platform):
+    super(FakeTracingAgentStartAndClockSync, self).__init__(platform,
+                                                            start=True,
+                                                            clock_sync=True)
+
+
+class FakeTracingAgentStartAndNoClockSync(FakeTracingAgentBase):
+  def __init__(self, platform):
+    super(FakeTracingAgentStartAndNoClockSync, self).__init__(platform,
+                                                            start=True,
+                                                            clock_sync=False)
+
+
+class FakeTracingAgentNoStartAndNoClockSync(FakeTracingAgentBase):
+  def __init__(self, platform):
+    super(FakeTracingAgentNoStartAndNoClockSync, self).__init__(platform,
+                                                            start=False,
+                                                            clock_sync=False)
+
+
+class FakeTracingAgentNoStartAndClockSync(FakeTracingAgentBase):
+  def __init__(self, platform):
+    super(FakeTracingAgentNoStartAndClockSync, self).__init__(platform,
+                                                              start=False,
+                                                              clock_sync=True)
+
+
+class TracingControllerBackendTest(unittest.TestCase):
+  def _getControllerLogAsList(self, data):
+    return data.GetEventsFor(trace_data.TELEMETRY_PART)
+
+  def _getSyncCount(self, data):
+    return len([entry for entry in self._getControllerLogAsList(data)
+                if entry.get('name') == 'clock_sync'])
+
+  def setUp(self):
+    self.platform = PlatformBackend()
+    self.controller = (
+        tracing_controller_backend.TracingControllerBackend(self.platform))
+    self.controller._supported_agents_classes = [FakeTracingAgentBase]
+    self.config = tracing_config.TracingConfig()
+    self.controller_log = self.controller._trace_log
+
+  def tearDown(self):
+    if self.controller.is_tracing_running:
+      self.controller.StopTracing()
+
+  @decorators.Isolated
+  def testStartTracing(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+
+  @decorators.Isolated
+  def testDoubleStartTracing(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertFalse(self.controller.StartTracing(self.config, 30))
+
+  @decorators.Isolated
+  def testStopTracingNotStarted(self):
+    with self.assertRaises(AssertionError):
+      self.controller.StopTracing()
+
+  @decorators.Isolated
+  def testStopTracing(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    data = self.controller.StopTracing()
+    self.assertEqual(self._getSyncCount(data), 1)
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEqual(self.controller._trace_log, None)
+
+  @decorators.Isolated
+  def testDoubleStopTracing(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    with self.assertRaises(AssertionError):
+      self.controller.StopTracing()
+
+  @decorators.Isolated
+  def testMultipleStartStop(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    data = self.controller.StopTracing()
+    self.assertEqual(self._getSyncCount(data), 1)
+    sync_event_one = [x for x in self._getControllerLogAsList(data)
+                      if x.get('name') == 'clock_sync'][0]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEqual(self.controller._trace_log, None)
+    # Run 2
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    data = self.controller.StopTracing()
+    self.assertEqual(self._getSyncCount(data), 1)
+    sync_event_two = [x for x in self._getControllerLogAsList(data)
+                      if x.get('name') == 'clock_sync'][0]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertFalse(self.controller._trace_log, None)
+    # Test difference between events
+    self.assertNotEqual(sync_event_one, sync_event_two)
+
+  @decorators.Isolated
+  def testFlush(self):
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertIsNone(self.controller._current_state)
+
+    # Start tracing.
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertIs(self.controller._current_state.config, self.config)
+    self.assertEqual(self.controller._current_state.timeout, 30)
+    self.assertIsNotNone(self.controller._current_state.builder)
+
+    # Flush tracing several times.
+    for _ in xrange(5):
+      self.controller.FlushTracing()
+      self.assertTrue(self.controller.is_tracing_running)
+      self.assertIs(self.controller._current_state.config, self.config)
+      self.assertEqual(self.controller._current_state.timeout, 30)
+      self.assertIsNotNone(self.controller._current_state.builder)
+
+    # Stop tracing.
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertIsNone(self.controller._current_state)
+
+    self.assertEqual(self._getSyncCount(data), 6)
+
+  @decorators.Isolated
+  def testNoWorkingAgents(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentNoStartAndNoClockSync
+    ]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertEquals(self.controller._active_agents_instances, [])
+    data = self.controller.StopTracing()
+    self.assertEqual(self._getSyncCount(data), 0)
+    self.assertFalse(self.controller.is_tracing_running)
+
+  @decorators.Isolated
+  def testNoClockSyncSupport(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentStartAndNoClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+    ]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEquals(self._getSyncCount(data), 0)
+
+  @decorators.Isolated
+  def testClockSyncSupport(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentStartAndNoClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndNoClockSync
+    ]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertEquals(len(self.controller._active_agents_instances), 3)
+    # No sync event before running StopTracing().
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEquals(self._getSyncCount(data), 2)
+
+  @decorators.Isolated
+  def testMultipleAgents(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync
+    ]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertEquals(len(self.controller._active_agents_instances), 4)
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEquals(self._getSyncCount(data), 2)
+
+  @decorators.Isolated
+  def testGenerateRandomSyncId(self):
+    ids = []
+    for _ in xrange(1000):
+      i = self.controller._GenerateClockSyncId()
+      self.assertFalse(i in ids)
+      ids.append(i)
+
+  @decorators.Isolated
+  def testRecordIssuerClockSyncMarker(self):
+    sync_id = 'test_id'
+    ts = 1
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync
+    ]
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.controller._RecordIssuerClockSyncMarker(sync_id, ts)
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEquals(self._getSyncCount(data), 1)
+    log = self._getControllerLogAsList(data)
+    for entry in log:
+      if entry.get('name') == 'clock_sync':
+        self.assertEqual(entry['args']['sync_id'], sync_id)
+        self.assertEqual(entry['args']['issue_ts'], 1)
+
+  @decorators.Isolated
+  def testIssueClockSyncMarker_normalUse(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync
+    ]
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertTrue(self.controller.StartTracing(self.config, 30))
+    self.assertTrue(self.controller.is_tracing_running)
+    self.assertEquals(len(self.controller._active_agents_instances), 4)
+    self.controller._IssueClockSyncMarker()
+    data = self.controller.StopTracing()
+    self.assertFalse(self.controller.is_tracing_running)
+    self.assertEquals(self._getSyncCount(data), 4)
+
+  @decorators.Isolated
+  def testIssueClockSyncMarker_tracingNotControllable(self):
+    self.controller._supported_agents_classes = [
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentNoStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync,
+        FakeTracingAgentStartAndNoClockSync
+    ]
+    original_controllable = self.controller._IsTracingControllable
+    self.controller._IsTracingControllable = lambda: False
+    try:
+      self.assertFalse(self.controller.is_tracing_running)
+      self.assertTrue(self.controller.StartTracing(self.config, 30))
+      self.assertTrue(self.controller.is_tracing_running)
+      self.assertEquals(len(self.controller._active_agents_instances), 4)
+      self.controller._IssueClockSyncMarker()
+      data = self.controller.StopTracing()
+      self.assertFalse(self.controller.is_tracing_running)
+      self.assertEquals(self._getSyncCount(data), 0)
+    finally:
+      self.controller._IsTracingControllable = original_controllable
+
+  @decorators.Isolated
+  def testDisableGarbageCollection(self):
+    self.assertTrue(gc.isenabled())
+    with self.controller._DisableGarbageCollection():
+      self.assertFalse(gc.isenabled())
+    self.assertTrue(gc.isenabled())
+
+
+if __name__ == '__main__':
+  logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main(verbosity=2)
+
diff --git a/catapult/telemetry/telemetry/internal/platform/win_platform_backend.py b/catapult/telemetry/telemetry/internal/platform/win_platform_backend.py
new file mode 100644
index 0000000..d436892
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/platform/win_platform_backend.py
@@ -0,0 +1,420 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import collections
+import contextlib
+import ctypes
+import logging
+import os
+import platform
+import re
+import socket
+import struct
+import subprocess
+import sys
+import time
+import zipfile
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+from telemetry.core import os_version as os_version_module
+from telemetry import decorators
+from telemetry.internal.platform import desktop_platform_backend
+from telemetry.internal.platform.power_monitor import msr_power_monitor
+from telemetry.internal.util import path
+
+try:
+  import pywintypes  # pylint: disable=import-error
+  import win32api  # pylint: disable=import-error
+  from win32com.shell import shell  # pylint: disable=no-name-in-module
+  from win32com.shell import shellcon  # pylint: disable=no-name-in-module
+  import win32con  # pylint: disable=import-error
+  import win32file  # pylint: disable=import-error
+  import win32gui  # pylint: disable=import-error
+  import win32pipe  # pylint: disable=import-error
+  import win32process  # pylint: disable=import-error
+  import win32security  # pylint: disable=import-error
+except ImportError:
+  pywintypes = None
+  shell = None
+  shellcon = None
+  win32api = None
+  win32con = None
+  win32file = None
+  win32gui = None
+  win32pipe = None
+  win32process = None
+  win32security = None
+
+
+def _InstallWinRing0():
+  """WinRing0 is used for reading MSRs."""
+  executable_dir = os.path.dirname(sys.executable)
+
+  python_is_64_bit = sys.maxsize > 2 ** 32
+  dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
+  dll_path = os.path.join(executable_dir, dll_file_name)
+
+  os_is_64_bit = platform.machine().endswith('64')
+  driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
+  driver_path = os.path.join(executable_dir, driver_file_name)
+
+  # Check for WinRing0 and download if needed.
+  if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
+    win_binary_dir = os.path.join(
+        path.GetTelemetryDir(), 'bin', 'win', 'AMD64')
+    zip_path = os.path.join(win_binary_dir, 'winring0.zip')
+    cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
+    try:
+      with zipfile.ZipFile(zip_path, 'r') as zip_file:
+        error_message = (
+            'Failed to extract %s into %s. If python claims that '
+            'the zip file is locked, this may be a lie. The problem may be '
+            'that python does not have write permissions to the destination '
+            'directory.'
+        )
+        # Install DLL.
+        if not os.path.exists(dll_path):
+          try:
+            zip_file.extract(dll_file_name, executable_dir)
+          except:
+            logging.error(error_message % (dll_file_name, executable_dir))
+            raise
+
+        # Install kernel driver.
+        if not os.path.exists(driver_path):
+          try:
+            zip_file.extract(driver_file_name, executable_dir)
+          except:
+            logging.error(error_message % (driver_file_name, executable_dir))
+            raise
+    finally:
+      os.remove(zip_path)
+
+
+def TerminateProcess(process_handle):
+  if not process_handle:
+    return
+  if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
+    win32process.TerminateProcess(process_handle, 0)
+  process_handle.close()
+
+
+class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
+  def __init__(self):
+    super(WinPlatformBackend, self).__init__()
+    self._msr_server_handle = None
+    self._msr_server_port = None
+    self._power_monitor = msr_power_monitor.MsrPowerMonitorWin(self)
+
+  @classmethod
+  def IsPlatformBackendForHost(cls):
+    return sys.platform == 'win32'
+
+  def __del__(self):
+    self.close()
+
+  def close(self):
+    self.CloseMsrServer()
+
+  def CloseMsrServer(self):
+    if not self._msr_server_handle:
+      return
+
+    TerminateProcess(self._msr_server_handle)
+    self._msr_server_handle = None
+    self._msr_server_port = None
+
+  def IsThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def HasBeenThermallyThrottled(self):
+    raise NotImplementedError()
+
+  def GetSystemCommitCharge(self):
+    performance_info = self._GetPerformanceInfo()
+    return performance_info.CommitTotal * performance_info.PageSize / 1024
+
+  @decorators.Cache
+  def GetSystemTotalPhysicalMemory(self):
+    performance_info = self._GetPerformanceInfo()
+    return performance_info.PhysicalTotal * performance_info.PageSize / 1024
+
+  def GetCpuStats(self, pid):
+    cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
+    # Convert 100 nanosecond units to seconds
+    cpu_time = (cpu_info['UserTime'] / 1e7 +
+                cpu_info['KernelTime'] / 1e7)
+    return {'CpuProcessTime': cpu_time}
+
+  def GetCpuTimestamp(self):
+    """Return current timestamp in seconds."""
+    return {'TotalTime': time.time()}
+
+  def GetMemoryStats(self, pid):
+    memory_info = self._GetWin32ProcessInfo(
+        win32process.GetProcessMemoryInfo, pid)
+    return {'VM': memory_info['PagefileUsage'],
+            'VMPeak': memory_info['PeakPagefileUsage'],
+            'WorkingSetSize': memory_info['WorkingSetSize'],
+            'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
+
+  def KillProcess(self, pid, kill_process_tree=False):
+    # os.kill for Windows is Python 2.7.
+    cmd = ['taskkill', '/F', '/PID', str(pid)]
+    if kill_process_tree:
+      cmd.append('/T')
+    subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                     stderr=subprocess.STDOUT).communicate()
+
+  def GetSystemProcessInfo(self):
+    # [3:] To skip 2 blank lines and header.
+    lines = subprocess.Popen(
+        ['wmic', 'process', 'get',
+         'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
+         '/format:csv'],
+        stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
+    process_info = []
+    for line in lines:
+      if not line:
+        continue
+      parts = line.split(',')
+      pi = {}
+      pi['ProcessId'] = int(parts[-1])
+      pi['ParentProcessId'] = int(parts[-2])
+      pi['Name'] = parts[-3]
+      creation_date = None
+      if parts[-4]:
+        creation_date = float(re.split('[+-]', parts[-4])[0])
+      pi['CreationDate'] = creation_date
+      pi['CommandLine'] = ','.join(parts[1:-4])
+      process_info.append(pi)
+    return process_info
+
+  def GetChildPids(self, pid):
+    """Retunds a list of child pids of |pid|."""
+    ppid_map = collections.defaultdict(list)
+    creation_map = {}
+    for pi in self.GetSystemProcessInfo():
+      ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
+      if pi['CreationDate']:
+        creation_map[pi['ProcessId']] = pi['CreationDate']
+
+    def _InnerGetChildPids(pid):
+      if not pid or pid not in ppid_map:
+        return []
+      ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
+      for child in ret:
+        if child == pid:
+          continue
+        ret.extend(_InnerGetChildPids(child))
+      return ret
+
+    return _InnerGetChildPids(pid)
+
+  def GetCommandLine(self, pid):
+    for pi in self.GetSystemProcessInfo():
+      if pid == pi['ProcessId']:
+        return pi['CommandLine']
+    raise exceptions.ProcessGoneException()
+
+  @decorators.Cache
+  def GetArchName(self):
+    return platform.machine()
+
+  def GetOSName(self):
+    return 'win'
+
+  @decorators.Cache
+  def GetOSVersionName(self):
+    os_version = platform.uname()[3]
+
+    if os_version.startswith('5.1.'):
+      return os_version_module.XP
+    if os_version.startswith('6.0.'):
+      return os_version_module.VISTA
+    if os_version.startswith('6.1.'):
+      return os_version_module.WIN7
+    if os_version.startswith('6.2.'):
+      return os_version_module.WIN8
+    if os_version.startswith('10.'):
+      return os_version_module.WIN10
+
+    raise NotImplementedError('Unknown win version %s.' % os_version)
+
+  def CanFlushIndividualFilesFromSystemCache(self):
+    return True
+
+  def _GetWin32ProcessInfo(self, func, pid):
+    mask = (win32con.PROCESS_QUERY_INFORMATION |
+            win32con.PROCESS_VM_READ)
+    handle = None
+    try:
+      handle = win32api.OpenProcess(mask, False, pid)
+      return func(handle)
+    except pywintypes.error, e:
+      errcode = e[0]
+      if errcode == 87:
+        raise exceptions.ProcessGoneException()
+      raise
+    finally:
+      if handle:
+        win32api.CloseHandle(handle)
+
+  def _GetPerformanceInfo(self):
+    class PerformanceInfo(ctypes.Structure):
+      """Struct for GetPerformanceInfo() call
+      http://msdn.microsoft.com/en-us/library/ms683210
+      """
+      _fields_ = [('size', ctypes.c_ulong),
+                  ('CommitTotal', ctypes.c_size_t),
+                  ('CommitLimit', ctypes.c_size_t),
+                  ('CommitPeak', ctypes.c_size_t),
+                  ('PhysicalTotal', ctypes.c_size_t),
+                  ('PhysicalAvailable', ctypes.c_size_t),
+                  ('SystemCache', ctypes.c_size_t),
+                  ('KernelTotal', ctypes.c_size_t),
+                  ('KernelPaged', ctypes.c_size_t),
+                  ('KernelNonpaged', ctypes.c_size_t),
+                  ('PageSize', ctypes.c_size_t),
+                  ('HandleCount', ctypes.c_ulong),
+                  ('ProcessCount', ctypes.c_ulong),
+                  ('ThreadCount', ctypes.c_ulong)]
+
+      def __init__(self):
+        self.size = ctypes.sizeof(self)
+        # pylint: disable=bad-super-call
+        super(PerformanceInfo, self).__init__()
+
+    performance_info = PerformanceInfo()
+    ctypes.windll.psapi.GetPerformanceInfo(
+        ctypes.byref(performance_info), performance_info.size)
+    return performance_info
+
+  def IsCurrentProcessElevated(self):
+    if self.GetOSVersionName() < os_version_module.VISTA:
+      # TOKEN_QUERY is not defined before Vista. All processes are elevated.
+      return True
+
+    handle = win32process.GetCurrentProcess()
+    with contextlib.closing(
+        win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
+      return bool(win32security.GetTokenInformation(
+          token, win32security.TokenElevation))
+
+  def LaunchApplication(
+      self, application, parameters=None, elevate_privilege=False):
+    """Launch an application. Returns a PyHANDLE object."""
+
+    parameters = ' '.join(parameters) if parameters else ''
+    if elevate_privilege and not self.IsCurrentProcessElevated():
+      # Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
+      # elevate privileges. A new console will be created if the new process has
+      # different permissions than this process.
+      proc_info = shell.ShellExecuteEx(
+          fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
+          lpVerb='runas' if elevate_privilege else '',
+          lpFile=application,
+          lpParameters=parameters,
+          nShow=win32con.SW_HIDE)
+      if proc_info['hInstApp'] <= 32:
+        raise Exception('Unable to launch %s' % application)
+      return proc_info['hProcess']
+    else:
+      handle, _, _, _ = win32process.CreateProcess(
+          None, application + ' ' + parameters, None, None, False,
+          win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
+      return handle
+
+  def CanMonitorPower(self):
+    return self._power_monitor.CanMonitorPower()
+
+  def CanMeasurePerApplicationPower(self):
+    return self._power_monitor.CanMeasurePerApplicationPower()
+
+  def StartMonitoringPower(self, browser):
+    self._power_monitor.StartMonitoringPower(browser)
+
+  def StopMonitoringPower(self):
+    return self._power_monitor.StopMonitoringPower()
+
+  def _StartMsrServerIfNeeded(self):
+    if self._msr_server_handle:
+      return
+
+    _InstallWinRing0()
+
+    pipe_name = r"\\.\pipe\msr_server_pipe_{}".format(os.getpid())
+    # Try to open a named pipe to receive a msr port number from server process.
+    pipe = win32pipe.CreateNamedPipe(
+        pipe_name,
+        win32pipe.PIPE_ACCESS_INBOUND,
+        win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT,
+        1, 32, 32, 300, None)
+    parameters = (
+        os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
+        pipe_name,
+    )
+    self._msr_server_handle = self.LaunchApplication(
+        sys.executable, parameters, elevate_privilege=True)
+    if pipe != win32file.INVALID_HANDLE_VALUE:
+      if win32pipe.ConnectNamedPipe(pipe, None) == 0:
+        self._msr_server_port = int(win32file.ReadFile(pipe, 32)[1])
+      win32api.CloseHandle(pipe)
+    # Wait for server to start.
+    try:
+      socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
+    except socket.error:
+      self.CloseMsrServer()
+    atexit.register(TerminateProcess, self._msr_server_handle)
+
+  def ReadMsr(self, msr_number, start=0, length=64):
+    self._StartMsrServerIfNeeded()
+    if not self._msr_server_handle:
+      raise OSError('Unable to start MSR server.')
+
+    sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 5)
+    try:
+      sock.sendall(struct.pack('I', msr_number))
+      response = sock.recv(8)
+    finally:
+      sock.close()
+    return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
+
+  def IsCooperativeShutdownSupported(self):
+    return True
+
+  def CooperativelyShutdown(self, proc, app_name):
+    pid = proc.pid
+
+    # http://timgolden.me.uk/python/win32_how_do_i/
+    #   find-the-window-for-my-subprocess.html
+    #
+    # It seems that intermittently this code manages to find windows
+    # that don't belong to Chrome -- for example, the cmd.exe window
+    # running slave.bat on the tryservers. Try to be careful about
+    # finding only Chrome's windows. This works for both the browser
+    # and content_shell.
+    #
+    # It seems safest to send the WM_CLOSE messages after discovering
+    # all of the sub-process's windows.
+    def find_chrome_windows(hwnd, hwnds):
+      _, win_pid = win32process.GetWindowThreadProcessId(hwnd)
+      if (pid == win_pid and
+          win32gui.IsWindowVisible(hwnd) and
+          win32gui.IsWindowEnabled(hwnd) and
+          win32gui.GetClassName(hwnd).lower().startswith(app_name)):
+        hwnds.append(hwnd)
+      return True
+    hwnds = []
+    win32gui.EnumWindows(find_chrome_windows, hwnds)
+    if hwnds:
+      for hwnd in hwnds:
+        win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
+      return True
+    else:
+      logging.info('Did not find any windows owned by target process')
+    return False
diff --git a/catapult/telemetry/telemetry/internal/results/__init__.py b/catapult/telemetry/telemetry/internal/results/__init__.py
new file mode 100644
index 0000000..172bd9e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/__init__.py
@@ -0,0 +1,8 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+The PageTestResults hierarchy provides a way of representing the results of
+running the test or measurement on pages.
+"""
+
diff --git a/catapult/telemetry/telemetry/internal/results/base_test_results_unittest.py b/catapult/telemetry/telemetry/internal/results/base_test_results_unittest.py
new file mode 100644
index 0000000..84e0438
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/base_test_results_unittest.py
@@ -0,0 +1,53 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import sys
+import unittest
+
+from telemetry.core import exceptions
+
+
+class BaseTestResultsUnittest(unittest.TestCase):
+
+  def CreateException(self):
+    try:
+      raise exceptions.IntentionalException
+    except Exception:
+      return sys.exc_info()
+
+  def assertEquals(self, ex, res):
+    # This helps diagnose result mismatches.
+    if ex != res and isinstance(ex, list):
+      def CleanList(l):
+        res = []
+        for x in l:
+          x = x.split('\n')
+          res.extend(x)
+        return res
+      ex = CleanList(ex)
+      res = CleanList(res)
+      max_len = max(len(ex), len(res))
+      max_width = max([len(x) for x in ex + res])
+      max_width = max(10, max_width)
+      print 'Lists differ!'
+      print '%*s | %*s' % (max_width, 'expected', max_width, 'result')
+      for i in range(max_len):
+        if i < len(ex):
+          e = ex[i]
+        else:
+          e = ''
+        if i < len(res):
+          r = res[i]
+        else:
+          r = ''
+        if e != r:
+          sep = '*'
+        else:
+          sep = '|'
+        print '%*s %s %*s' % (max_width, e, sep, max_width, r)
+      print ''
+    if ex != res and isinstance(ex, str) and isinstance(res, str):
+      print 'Strings differ!'
+      print 'exepected:\n%s' % repr(ex)
+      print 'result:\n%s\n' % repr(res)
+    super(BaseTestResultsUnittest, self).assertEquals(ex, res)
diff --git a/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter.py b/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter.py
new file mode 100644
index 0000000..4a48d1b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter.py
@@ -0,0 +1,120 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.internal.results import output_formatter
+from telemetry.util import perf_tests_helper
+from telemetry import value as value_module
+from telemetry.value import summary as summary_module
+
+
+@decorators.Deprecated(2016, 2, 29, 'Chart JSON is a supported alternative. '
+                       'See https://goo.gl/8daFav .')
+class BuildbotOutputFormatter(output_formatter.OutputFormatter):
+  def __init__(self, output_stream, trace_tag=''):
+    super(BuildbotOutputFormatter, self).__init__(output_stream)
+    self._trace_tag = trace_tag
+
+  def _PrintPerfResult(self, measurement, trace, v, units,
+                       result_type='default'):
+    output = perf_tests_helper.PrintPerfResult(
+        measurement, trace, v, units, result_type, print_to_stdout=False)
+    self.output_stream.write(output + '\n')
+    self.output_stream.flush()
+
+  def Format(self, page_test_results):
+    """Print summary data in a format expected by buildbot for perf dashboards.
+
+    If any failed pages exist, only output individual page results, and do
+    not output any average data.
+    """
+    had_failures = len(page_test_results.failures) > 0
+
+    # Print out the list of unique pages.
+    perf_tests_helper.PrintPages(
+        [page.display_name for page in page_test_results.pages_that_succeeded])
+    summary = summary_module.Summary(page_test_results.all_page_specific_values)
+    for value in summary.interleaved_computed_per_page_values_and_summaries:
+      if value.page:
+        self._PrintComputedPerPageValue(value)
+      else:
+        self._PrintComputedSummaryValue(value, had_failures)
+    self._PrintOverallResults(page_test_results)
+
+  def _PrintComputedPerPageValue(self, value):
+    # We dont print per-page-values when there is a trace tag.
+    if self._trace_tag:
+      return
+
+    # Actually print the result.
+    buildbot_value = value.GetBuildbotValue()
+    buildbot_data_type = value.GetBuildbotDataType(
+        output_context=value_module.PER_PAGE_RESULT_OUTPUT_CONTEXT)
+    if buildbot_value is None or buildbot_data_type is None:
+      return
+
+    buildbot_measurement_name, buildbot_trace_name = (
+        value.GetChartAndTraceNameForPerPageResult())
+    if value.tir_label:
+      buildbot_measurement_name = '%s-%s' % (value.tir_label,
+                                             buildbot_measurement_name)
+    self._PrintPerfResult(buildbot_measurement_name,
+                          buildbot_trace_name,
+                          buildbot_value, value.units, buildbot_data_type)
+
+  def _PrintComputedSummaryValue(self, value, had_failures):
+    # If there were any page errors, we typically will print nothing.
+    #
+    # Note: this branch is structured less-densely to improve legibility.
+    if had_failures:
+      return
+
+    buildbot_value = value.GetBuildbotValue()
+    buildbot_data_type = value.GetBuildbotDataType(
+        output_context=value_module.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT)
+    if buildbot_value is None or buildbot_data_type is None:
+      return
+
+    buildbot_measurement_name, buildbot_trace_name = (
+        value.GetChartAndTraceNameForComputedSummaryResult(
+            self._trace_tag))
+    if value.tir_label:
+      buildbot_measurement_name = '%s-%s' % (value.tir_label,
+                                             buildbot_measurement_name)
+      buildbot_trace_name = '%s-%s' % (value.tir_label,
+                                       buildbot_trace_name)
+    self._PrintPerfResult(buildbot_measurement_name,
+                          buildbot_trace_name,
+                          buildbot_value, value.units, buildbot_data_type)
+
+  def _PrintOverallResults(self, page_test_results):
+    # If there were no failed pages, output the overall results (results not
+    # associated with a page).
+    had_failures = len(page_test_results.failures) > 0
+    if not had_failures:
+      for value in page_test_results.all_summary_values:
+        buildbot_value = value.GetBuildbotValue()
+        buildbot_data_type = value.GetBuildbotDataType(
+            output_context=value_module.SUMMARY_RESULT_OUTPUT_CONTEXT)
+        buildbot_measurement_name, buildbot_trace_name = (
+            value.GetChartAndTraceNameForComputedSummaryResult(
+                self._trace_tag))
+        self._PrintPerfResult(
+            buildbot_measurement_name,
+            buildbot_trace_name,
+            buildbot_value,
+            value.units,
+            buildbot_data_type)
+
+    # Print the number of failed and errored pages.
+    self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
+                          [len(page_test_results.failures)], 'count',
+                          'unimportant')
+
+    # TODO(chrishenry): Remove this in a separate patch to reduce the risk
+    # of rolling back due to buildbot breakage.
+    # Also fix src/tools/bisect-perf-regression_test.py when this is
+    # removed.
+    self._PrintPerfResult('telemetry_page_measurement_results', 'num_errored',
+                          [0], 'count', 'unimportant')
diff --git a/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter_unittest.py b/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter_unittest.py
new file mode 100644
index 0000000..0187d41
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/buildbot_output_formatter_unittest.py
@@ -0,0 +1,42 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.internal.results import base_test_results_unittest
+from telemetry.internal.results import buildbot_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry import story as story_module
+from telemetry.testing import stream
+from telemetry.value import scalar
+
+class BuildbotOutputFormatterUnittest(
+    base_test_results_unittest.BaseTestResultsUnittest):
+  def setUp(self):
+    self._test_output_stream = stream.TestOutputStream()
+
+  def testTirLabelOutput(self):
+    story_set = story_module.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(page_module.Page('http://www.foo.com/', story_set,
+                       story_set.base_dir))
+
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(story_set.stories[0])
+    results.AddValue(scalar.ScalarValue(story_set.stories[0], 'a', 'ms', 42,
+                                        tir_label='bar'))
+    results.DidRunPage(story_set.stories[0])
+
+    formatter = buildbot_output_formatter.BuildbotOutputFormatter(
+        self._test_output_stream)
+    formatter.Format(results)
+
+    expected = ('RESULT bar-a: http___www.foo.com_= 42 ms\n'
+                '*RESULT bar-a: bar-a= 42 ms\n'
+                'RESULT telemetry_page_measurement_results: num_failed= 0 '+
+                    'count\n'
+                'RESULT telemetry_page_measurement_results: num_errored= 0 '+
+                    'count\n')
+    self.assertEquals(expected, self._test_output_stream.output_data)
diff --git a/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter.py b/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter.py
new file mode 100644
index 0000000..a3855ee
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter.py
@@ -0,0 +1,83 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+import json
+
+from telemetry.internal.results import output_formatter
+from telemetry.value import summary as summary_module
+
+def ResultsAsChartDict(benchmark_metadata, page_specific_values,
+                       summary_values):
+  """Produces a dict for serialization to Chart JSON format from raw values.
+
+  Chart JSON is a transformation of the basic Telemetry JSON format that
+  removes the page map, summarizes the raw values, and organizes the results
+  by chart and trace name. This function takes the key pieces of data needed to
+  perform this transformation (namely, lists of values and a benchmark metadata
+  object) and processes them into a dict which can be serialized using the json
+  module.
+
+  Design doc for schema: http://goo.gl/kOtf1Y
+
+  Args:
+    page_specific_values: list of page-specific values
+    summary_values: list of summary values
+    benchmark_metadata: a benchmark.BenchmarkMetadata object
+
+  Returns:
+    A Chart JSON dict corresponding to the given data.
+  """
+  summary = summary_module.Summary(page_specific_values)
+  values = itertools.chain(
+      summary.interleaved_computed_per_page_values_and_summaries,
+      summary_values)
+  charts = collections.defaultdict(dict)
+
+  for value in values:
+    if value.page:
+      chart_name, trace_name = (value.GetChartAndTraceNameForPerPageResult())
+    else:
+      chart_name, trace_name = (
+          value.GetChartAndTraceNameForComputedSummaryResult(None))
+      if chart_name == trace_name:
+        trace_name = 'summary'
+
+    if value.tir_label:
+      chart_name = value.tir_label + '@@' + chart_name
+
+    # This intentionally overwrites the trace if it already exists because this
+    # is expected of output from the buildbots currently.
+    # See: crbug.com/413393
+    charts[chart_name][trace_name] = value.AsDict()
+
+  result_dict = {
+    'format_version': '0.1',
+    'next_version': '0.2',
+    # TODO(sullivan): benchmark_name, benchmark_description, and
+    # trace_rerun_options should be removed when incrementing format_version
+    # to 0.1.
+    'benchmark_name': benchmark_metadata.name,
+    'benchmark_description': benchmark_metadata.description,
+    'trace_rerun_options': benchmark_metadata.rerun_options,
+    'benchmark_metadata': benchmark_metadata.AsDict(),
+    'charts': charts,
+  }
+
+  return result_dict
+
+# TODO(eakuefner): Transition this to translate Telemetry JSON.
+class ChartJsonOutputFormatter(output_formatter.OutputFormatter):
+  def __init__(self, output_stream, benchmark_metadata):
+    super(ChartJsonOutputFormatter, self).__init__(output_stream)
+    self._benchmark_metadata = benchmark_metadata
+
+  def Format(self, page_test_results):
+    json.dump(ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_test_results.all_page_specific_values,
+        page_test_results.all_summary_values),
+              self.output_stream, indent=2)
+    self.output_stream.write('\n')
diff --git a/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py b/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py
new file mode 100644
index 0000000..90035ab
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py
@@ -0,0 +1,199 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import StringIO
+import unittest
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.internal.results import chart_json_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+
+
+def _MakeStorySet():
+  ps = story.StorySet(base_dir=os.path.dirname(__file__))
+  ps.AddStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
+  ps.AddStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
+  return ps
+
+class ChartJsonTest(unittest.TestCase):
+  def setUp(self):
+    self._output = StringIO.StringIO()
+    self._story_set = _MakeStorySet()
+    self._benchmark_metadata = benchmark.BenchmarkMetadata(
+        'benchmark_name', 'benchmark_description')
+    self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
+        self._output, self._benchmark_metadata)
+
+  def testOutputAndParse(self):
+    results = page_test_results.PageTestResults()
+
+    self._output.truncate(0)
+
+    results.WillRunPage(self._story_set[0])
+    v0 = scalar.ScalarValue(results.current_page, 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    results.AddValue(v0)
+    results.DidRunPage(self._story_set[0])
+
+    self._formatter.Format(results)
+    d = json.loads(self._output.getvalue())
+    self.assertIn('foo', d['charts'])
+
+  def testAsChartDictSerializable(self):
+    v0 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    page_specific_values = [v0]
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+    json.dumps(d)
+
+  def testAsChartDictBaseKeys(self):
+    page_specific_values = []
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertEquals(d['format_version'], '0.1')
+    self.assertEquals(d['next_version'], '0.2')
+    self.assertEquals(d['benchmark_metadata']['name'], 'benchmark_name')
+    self.assertEquals(d['benchmark_metadata']['description'],
+                      'benchmark_description')
+    self.assertEquals(d['benchmark_metadata']['type'], 'telemetry_benchmark')
+
+  def testAsChartDictNoDescription(self):
+    page_specific_values = []
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        benchmark.BenchmarkMetadata('benchmark_name', ''),
+        page_specific_values,
+        summary_values)
+
+    self.assertEquals('', d['benchmark_metadata']['description'])
+
+  def testAsChartDictPageSpecificValuesSamePageWithInteractionRecord(self):
+    v0 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN,
+                            tir_label='MyIR')
+    v1 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 4,
+                            improvement_direction=improvement_direction.DOWN,
+                            tir_label='MyIR')
+    page_specific_values = [v0, v1]
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('MyIR@@foo' in d['charts'])
+    self.assertTrue('http://www.foo.com/' in d['charts']['MyIR@@foo'])
+
+  def testAsChartDictPageSpecificValuesSamePageWithoutInteractionRecord(self):
+    v0 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    v1 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 4,
+                            improvement_direction=improvement_direction.DOWN)
+    page_specific_values = [v0, v1]
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('foo' in d['charts'])
+    self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
+
+  def testAsChartDictPageSpecificValuesAndComputedSummaryWithTraceName(self):
+    v0 = scalar.ScalarValue(self._story_set[0], 'foo.bar', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    v1 = scalar.ScalarValue(self._story_set[1], 'foo.bar', 'seconds', 4,
+                            improvement_direction=improvement_direction.DOWN)
+    page_specific_values = [v0, v1]
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('foo' in d['charts'])
+    self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
+    self.assertTrue('http://www.bar.com/' in d['charts']['foo'])
+    self.assertTrue('bar' in d['charts']['foo'])
+
+  def testAsChartDictPageSpecificValuesAndComputedSummaryWithoutTraceName(self):
+    v0 = scalar.ScalarValue(self._story_set[0], 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    v1 = scalar.ScalarValue(self._story_set[1], 'foo', 'seconds', 4,
+                            improvement_direction=improvement_direction.DOWN)
+    page_specific_values = [v0, v1]
+    summary_values = []
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('foo' in d['charts'])
+    self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
+    self.assertTrue('http://www.bar.com/' in d['charts']['foo'])
+    self.assertTrue('summary' in d['charts']['foo'])
+
+  def testAsChartDictSummaryValueWithTraceName(self):
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        None, 'foo.bar', 'seconds', [3, 4],
+        improvement_direction=improvement_direction.DOWN)
+    page_specific_values = []
+    summary_values = [v0]
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('bar' in d['charts']['foo'])
+
+  def testAsChartDictSummaryValueWithoutTraceName(self):
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        None, 'foo', 'seconds', [3, 4],
+        improvement_direction=improvement_direction.DOWN)
+    page_specific_values = []
+    summary_values = [v0]
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertTrue('summary' in d['charts']['foo'])
+
+  def testAsChartDictValueSmokeTest(self):
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        None, 'foo.bar', 'seconds', [3, 4],
+        improvement_direction=improvement_direction.DOWN)
+    page_specific_values = []
+    summary_values = [v0]
+
+    d = chart_json_output_formatter.ResultsAsChartDict(
+        self._benchmark_metadata,
+        page_specific_values,
+        summary_values)
+
+    self.assertEquals(d['charts']['foo']['bar']['values'], [3, 4])
diff --git a/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter.py b/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter.py
new file mode 100644
index 0000000..de91b79
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter.py
@@ -0,0 +1,61 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import csv
+
+from telemetry.internal.results import output_formatter
+from telemetry.value import scalar
+
+
+class CsvPivotTableOutputFormatter(output_formatter.OutputFormatter):
+  """Output the results as CSV suitable for reading into a spreadsheet.
+
+  This will write a header row, and one row for each value. Each value row
+  contains the value and unit, identifies the value (story_set, page, name), and
+  (optionally) data from --output-trace-tag. This format matches what
+  spreadsheet programs expect as input for a "pivot table".
+
+  A trace tag (--output-trace-tag) can be used to tag each value, to allow
+  easy combination of the resulting CSVs from several runs.
+  If the trace_tag contains a comma, it will be written as several
+  comma-separated values.
+
+  This class only processes scalar values.
+  """
+
+  FIELDS = ['story_set', 'page', 'name', 'value', 'units', 'run_index']
+
+  def __init__(self, output_stream, trace_tag=''):
+    super(CsvPivotTableOutputFormatter, self).__init__(output_stream)
+    self._trace_tag = trace_tag
+
+  def Format(self, page_test_results):
+    csv_writer = csv.writer(self.output_stream)
+
+    # Observe trace_tag. Use comma to split up the trace tag.
+    tag_values = self._trace_tag.split(',')
+    tag_values = [x for x in tag_values if x] # filter empty list entries
+    tag_headers = ['trace_tag_%d' % i for i in range(len(tag_values))]
+
+    # Write header.
+    csv_writer.writerow(self.FIELDS + tag_headers)
+
+    # Write all values. Each row contains a value + page-level metadata.
+    for run in page_test_results.all_page_runs:
+      run_index = page_test_results.all_page_runs.index(run)
+      page_dict = {
+          'page': run.story.display_name,
+          'story_set': run.story.page_set.Name(),
+          'run_index': run_index,
+      }
+      for value in run.values:
+        if isinstance(value, scalar.ScalarValue):
+          value_dict = {
+            'name': value.name,
+            'value': value.value,
+            'units': value.units,
+          }
+          value_dict.update(page_dict.items())
+          csv_writer.writerow(
+              [value_dict[field] for field in self.FIELDS] + tag_values)
diff --git a/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter_unittest.py b/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter_unittest.py
new file mode 100644
index 0000000..9df2f39
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter_unittest.py
@@ -0,0 +1,113 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import StringIO
+import unittest
+
+from telemetry import story
+from telemetry.internal.results import csv_pivot_table_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+
+
+def _MakeStorySet():
+  story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+  story_set.AddStory(
+      page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+  return story_set
+
+
+class CsvPivotTableOutputFormatterTest(unittest.TestCase):
+
+  # The line separator used by CSV formatter.
+  _LINE_SEPARATOR = '\r\n'
+
+  def setUp(self):
+    self._output = StringIO.StringIO()
+    self._story_set = _MakeStorySet()
+    self._results = page_test_results.PageTestResults()
+    self._formatter = None
+    self.MakeFormatter()
+
+  def MakeFormatter(self, trace_tag=''):
+    self._formatter = (
+        csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
+            self._output, trace_tag))
+
+  def SimulateBenchmarkRun(self, dict_of_values):
+    """Simulate one run of a benchmark, using the supplied values.
+
+    Args:
+      dict_of_values: dictionary w/ Page instance as key, a list of Values
+          as value.
+    """
+    for page, values in dict_of_values.iteritems():
+      self._results.WillRunPage(page)
+      for v in values:
+        v.page = page
+        self._results.AddValue(v)
+      self._results.DidRunPage(page)
+
+  def Format(self):
+    self._formatter.Format(self._results)
+    return self._output.getvalue()
+
+  def testSimple(self):
+    # Test a simple benchmark with only one value:
+    self.SimulateBenchmarkRun({
+        self._story_set[0]: [scalar.ScalarValue(
+            None, 'foo', 'seconds', 3,
+            improvement_direction=improvement_direction.DOWN)]})
+    expected = self._LINE_SEPARATOR.join([
+        'story_set,page,name,value,units,run_index',
+        'story_set,http://www.foo.com/,foo,3,seconds,0',
+        ''])
+
+    self.assertEqual(expected, self.Format())
+
+  def testMultiplePagesAndValues(self):
+    self.SimulateBenchmarkRun({
+        self._story_set[0]: [
+            scalar.ScalarValue(
+              None, 'foo', 'seconds', 4,
+              improvement_direction=improvement_direction.DOWN)],
+        self._story_set[1]: [
+            scalar.ScalarValue(
+                None, 'foo', 'seconds', 3.4,
+                improvement_direction=improvement_direction.DOWN),
+            scalar.ScalarValue(
+                None, 'bar', 'km', 10,
+                improvement_direction=improvement_direction.DOWN),
+            scalar.ScalarValue(
+                None, 'baz', 'count', 5,
+                improvement_direction=improvement_direction.DOWN)]})
+
+    # Parse CSV output into list of lists.
+    csv_string = self.Format()
+    lines = csv_string.split(self._LINE_SEPARATOR)
+    values = [s.split(',') for s in lines[1:-1]]
+
+    self.assertEquals(len(values), 4)  # We expect 4 value in total.
+    self.assertEquals(len(set((v[1] for v in values))), 2)  # 2 pages.
+    self.assertEquals(len(set((v[2] for v in values))), 3)  # 3 value names.
+
+  def testTraceTag(self):
+    self.MakeFormatter(trace_tag='date,option')
+    self.SimulateBenchmarkRun({
+        self._story_set[0]: [
+            scalar.ScalarValue(
+                None, 'foo', 'seconds', 3,
+                improvement_direction=improvement_direction.DOWN),
+            scalar.ScalarValue(
+                None, 'bar', 'tons', 5,
+                improvement_direction=improvement_direction.DOWN)]})
+    output = self.Format().split(self._LINE_SEPARATOR)
+
+    self.assertTrue(output[0].endswith(',trace_tag_0,trace_tag_1'))
+    for line in output[1:-1]:
+      self.assertTrue(line.endswith(',date,option'))
diff --git a/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter.py b/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter.py
new file mode 100644
index 0000000..c704633
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter.py
@@ -0,0 +1,96 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from telemetry.internal.results import progress_reporter
+from telemetry.value import failure
+from telemetry.value import skip
+
+
+class GTestProgressReporter(progress_reporter.ProgressReporter):
+  """A progress reporter that outputs the progress report in gtest style.
+
+  Be careful each print should only handle one string. Otherwise, the output
+  might be interrupted by Chrome logging, and the output interpretation might
+  be incorrect. For example:
+      print >> self._output_stream, "[ OK ]", testname
+  should be written as
+      print >> self._output_stream, "[ OK ] %s" % testname
+  """
+
+  def __init__(self, output_stream, output_skipped_tests_summary=False):
+    super(GTestProgressReporter, self).__init__()
+    self._output_stream = output_stream
+    self._timestamp = None
+    self._output_skipped_tests_summary = output_skipped_tests_summary
+
+  def _GetMs(self):
+    assert self._timestamp is not None, 'Did not call WillRunPage.'
+    return (time.time() - self._timestamp) * 1000
+
+  def DidAddValue(self, value):
+    super(GTestProgressReporter, self).DidAddValue(value)
+    if isinstance(value, failure.FailureValue):
+      print >> self._output_stream, failure.GetStringFromExcInfo(
+          value.exc_info)
+      self._output_stream.flush()
+    elif isinstance(value, skip.SkipValue):
+      print >> self._output_stream, '===== SKIPPING TEST %s: %s =====' % (
+          value.page.display_name, value.reason)
+    # TODO(chrishenry): Consider outputting metric values as well. For
+    # e.g., it can replace BuildbotOutputFormatter in
+    # --output-format=html, which we used only so that users can grep
+    # the results without opening results.html.
+
+  def WillRunPage(self, page_test_results):
+    super(GTestProgressReporter, self).WillRunPage(page_test_results)
+    print >> self._output_stream, '[ RUN      ] %s' % (
+        page_test_results.current_page.display_name)
+    self._output_stream.flush()
+    self._timestamp = time.time()
+
+  def DidRunPage(self, page_test_results):
+    super(GTestProgressReporter, self).DidRunPage(page_test_results)
+    page = page_test_results.current_page
+    if page_test_results.current_page_run.failed:
+      print >> self._output_stream, '[  FAILED  ] %s (%0.f ms)' % (
+          page.display_name, self._GetMs())
+    else:
+      print >> self._output_stream, '[       OK ] %s (%0.f ms)' % (
+          page.display_name, self._GetMs())
+    self._output_stream.flush()
+
+  def DidFinishAllTests(self, page_test_results):
+    super(GTestProgressReporter, self).DidFinishAllTests(page_test_results)
+    successful_runs = []
+    failed_runs = []
+    for run in page_test_results.all_page_runs:
+      if run.failed:
+        failed_runs.append(run)
+      else:
+        successful_runs.append(run)
+
+    unit = 'test' if len(successful_runs) == 1 else 'tests'
+    print >> self._output_stream, '[  PASSED  ] %d %s.' % (
+        (len(successful_runs), unit))
+    if len(failed_runs) > 0:
+      unit = 'test' if len(failed_runs) == 1 else 'tests'
+      print >> self._output_stream, '[  FAILED  ] %d %s, listed below:' % (
+          (len(page_test_results.failures), unit))
+      for failed_run in failed_runs:
+        print >> self._output_stream, '[  FAILED  ]  %s' % (
+            failed_run.story.display_name)
+      print >> self._output_stream
+      count = len(failed_runs)
+      unit = 'TEST' if count == 1 else 'TESTS'
+      print >> self._output_stream, '%d FAILED %s' % (count, unit)
+    print >> self._output_stream
+
+    if self._output_skipped_tests_summary:
+      if len(page_test_results.skipped_values) > 0:
+        print >> self._output_stream, 'Skipped pages:\n%s\n' % ('\n'.join(
+            v.page.display_name for v in page_test_results.skipped_values))
+
+    self._output_stream.flush()
diff --git a/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter_unittest.py b/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter_unittest.py
new file mode 100644
index 0000000..7f936f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/gtest_progress_reporter_unittest.py
@@ -0,0 +1,189 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import traceback
+
+from telemetry import story
+from telemetry.internal.results import base_test_results_unittest
+from telemetry.internal.results import gtest_progress_reporter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.testing import simple_mock
+from telemetry.testing import stream
+from telemetry.value import failure
+from telemetry.value import skip
+
+
+def _MakeStorySet():
+  story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+  story_set.AddStory(
+      page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.roz.com/', story_set, story_set.base_dir))
+  return story_set
+
+
+class GTestProgressReporterTest(
+    base_test_results_unittest.BaseTestResultsUnittest):
+
+  def setUp(self):
+    super(GTestProgressReporterTest, self).setUp()
+    self._mock_timer = simple_mock.MockTimer(gtest_progress_reporter)
+
+    self._output_stream = stream.TestOutputStream()
+    self._reporter = gtest_progress_reporter.GTestProgressReporter(
+        self._output_stream)
+
+  def tearDown(self):
+    self._mock_timer.Restore()
+
+  def testSingleSuccessPage(self):
+    test_story_set = _MakeStorySet()
+
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    results.WillRunPage(test_story_set.stories[0])
+    self._mock_timer.SetTime(0.007)
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.PrintSummary()
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n'
+                '[  PASSED  ] 1 test.\n\n')
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
+
+  def testSingleFailedPage(self):
+    test_story_set = _MakeStorySet()
+
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    results.WillRunPage(test_story_set.stories[0])
+    exc_info = self.CreateException()
+    results.AddValue(failure.FailureValue(test_story_set.stories[0], exc_info))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.PrintSummary()
+    exception_trace = ''.join(traceback.format_exception(*exc_info))
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '%s\n'
+                '[  FAILED  ] http://www.foo.com/ (0 ms)\n'
+                '[  PASSED  ] 0 tests.\n'
+                '[  FAILED  ] 1 test, listed below:\n'
+                '[  FAILED  ]  http://www.foo.com/\n\n'
+                '1 FAILED TEST\n\n' % exception_trace)
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
+
+  def testSingleSkippedPage(self):
+    test_story_set = _MakeStorySet()
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    results.WillRunPage(test_story_set.stories[0])
+    self._mock_timer.SetTime(0.007)
+    results.AddValue(skip.SkipValue(test_story_set.stories[0],
+        'Page skipped for testing reason'))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.PrintSummary()
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '===== SKIPPING TEST http://www.foo.com/:'
+                ' Page skipped for testing reason =====\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n'
+                '[  PASSED  ] 1 test.\n\n')
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
+
+  def testPassAndFailedPages(self):
+    test_story_set = _MakeStorySet()
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    exc_info = self.CreateException()
+
+    results.WillRunPage(test_story_set.stories[0])
+    self._mock_timer.SetTime(0.007)
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.WillRunPage(test_story_set.stories[1])
+    self._mock_timer.SetTime(0.009)
+    results.AddValue(failure.FailureValue(test_story_set.stories[1], exc_info))
+    results.DidRunPage(test_story_set.stories[1])
+
+    results.WillRunPage(test_story_set.stories[2])
+    self._mock_timer.SetTime(0.015)
+    results.AddValue(failure.FailureValue(test_story_set.stories[2], exc_info))
+    results.DidRunPage(test_story_set.stories[2])
+
+    results.WillRunPage(test_story_set.stories[3])
+    self._mock_timer.SetTime(0.020)
+    results.DidRunPage(test_story_set.stories[3])
+
+    results.PrintSummary()
+    exception_trace = ''.join(traceback.format_exception(*exc_info))
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n'
+                '[ RUN      ] http://www.bar.com/\n'
+                '%s\n'
+                '[  FAILED  ] http://www.bar.com/ (2 ms)\n'
+                '[ RUN      ] http://www.baz.com/\n'
+                '%s\n'
+                '[  FAILED  ] http://www.baz.com/ (6 ms)\n'
+                '[ RUN      ] http://www.roz.com/\n'
+                '[       OK ] http://www.roz.com/ (5 ms)\n'
+                '[  PASSED  ] 2 tests.\n'
+                '[  FAILED  ] 2 tests, listed below:\n'
+                '[  FAILED  ]  http://www.bar.com/\n'
+                '[  FAILED  ]  http://www.baz.com/\n\n'
+                '2 FAILED TESTS\n\n' % (exception_trace, exception_trace))
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
+
+  def testStreamingResults(self):
+    test_story_set = _MakeStorySet()
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    exc_info = self.CreateException()
+
+    results.WillRunPage(test_story_set.stories[0])
+    self._mock_timer.SetTime(0.007)
+    results.DidRunPage(test_story_set.stories[0])
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n')
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
+
+    results.WillRunPage(test_story_set.stories[1])
+    self._mock_timer.SetTime(0.009)
+    exception_trace = ''.join(traceback.format_exception(*exc_info))
+    results.AddValue(failure.FailureValue(test_story_set.stories[1], exc_info))
+    results.DidRunPage(test_story_set.stories[1])
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n'
+                '[ RUN      ] http://www.bar.com/\n'
+                '%s\n'
+                '[  FAILED  ] http://www.bar.com/ (2 ms)\n' % exception_trace)
+
+  def testOutputSkipInformation(self):
+    test_story_set = _MakeStorySet()
+    self._reporter = gtest_progress_reporter.GTestProgressReporter(
+        self._output_stream, output_skipped_tests_summary=True)
+    results = page_test_results.PageTestResults(
+        progress_reporter=self._reporter)
+    results.WillRunPage(test_story_set.stories[0])
+    self._mock_timer.SetTime(0.007)
+    results.AddValue(skip.SkipValue(test_story_set.stories[0],
+        'Page skipped for testing reason'))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.PrintSummary()
+    expected = ('[ RUN      ] http://www.foo.com/\n'
+                '===== SKIPPING TEST http://www.foo.com/:'
+                ' Page skipped for testing reason =====\n'
+                '[       OK ] http://www.foo.com/ (7 ms)\n'
+                '[  PASSED  ] 1 test.\n'
+                '\n'
+                'Skipped pages:\n'
+                'http://www.foo.com/\n'
+                '\n')
+    self.assertEquals(expected, ''.join(self._output_stream.output_data))
diff --git a/catapult/telemetry/telemetry/internal/results/html_output_formatter.py b/catapult/telemetry/telemetry/internal/results/html_output_formatter.py
new file mode 100644
index 0000000..46d264c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/html_output_formatter.py
@@ -0,0 +1,190 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import json
+import logging
+import os
+import re
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import util
+from telemetry.internal.results import chart_json_output_formatter
+from telemetry.internal.results import output_formatter
+from telemetry import value as value_module
+from telemetry.value import list_of_scalar_values
+
+
+_TEMPLATE_HTML_PATH = os.path.join(
+    util.GetTelemetryDir(), 'support', 'html_output', 'results-template.html')
+_JS_PLUGINS = [os.path.join('flot', 'jquery.flot.min.js'),
+               os.path.join('WebKit', 'PerformanceTests', 'resources',
+                            'jquery.tablesorter.min.js'),
+               os.path.join('WebKit', 'PerformanceTests', 'resources',
+                            'statistics.js')]
+_UNIT_JSON = os.path.join(
+    util.GetTelemetryDir(), 'telemetry', 'value', 'unit-info.json')
+
+
+def _DatetimeInEs5CompatibleFormat(dt):
+  return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
+
+
+def _ShortDatetimeInEs5CompatibleFormat(dt):
+  return dt.strftime('%Y-%m-%d %H:%M:%S')
+
+
+# TODO(eakuefner): rewrite template to use Telemetry JSON directly
+class HtmlOutputFormatter(output_formatter.OutputFormatter):
+  def __init__(self, output_stream, metadata, reset_results, upload_results,
+      browser_type, results_label=None):
+    super(HtmlOutputFormatter, self).__init__(output_stream)
+    self._metadata = metadata
+    self._reset_results = reset_results
+    self._upload_results = upload_results
+    self._build_time = self._GetBuildTime()
+    self._existing_results = self._ReadExistingResults(output_stream)
+    if results_label:
+      self._results_label = results_label
+    else:
+      self._results_label = '%s (%s)' % (
+          metadata.name, _ShortDatetimeInEs5CompatibleFormat(self._build_time))
+    self._result = {
+        'buildTime': _DatetimeInEs5CompatibleFormat(self._build_time),
+        'label': self._results_label,
+        'platform': browser_type,
+        'tests': {}
+        }
+
+  def _GetBuildTime(self):
+    return datetime.datetime.utcnow()
+
+  def _GetHtmlTemplate(self):
+    with open(_TEMPLATE_HTML_PATH) as f:
+      return f.read()
+
+  def _GetPlugins(self):
+    plugins = ''
+    for p in _JS_PLUGINS:
+      with open(os.path.join(util.GetTelemetryThirdPartyDir(), p)) as f:
+        plugins += f.read()
+    return plugins
+
+  def _GetUnitJson(self):
+    with open(_UNIT_JSON) as f:
+      return f.read()
+
+  def _ReadExistingResults(self, output_stream):
+    results_html = output_stream.read()
+    if self._reset_results or not results_html:
+      return []
+    m = re.search(
+        '^<script id="results-json" type="application/json">(.*?)</script>$',
+        results_html, re.MULTILINE | re.DOTALL)
+    if not m:
+      logging.warn('Failed to extract previous results from HTML output')
+      return []
+    return json.loads(m.group(1))[:512]
+
+  def _SaveResults(self, results):
+    self._output_stream.seek(0)
+    self._output_stream.write(results)
+    self._output_stream.truncate()
+
+  def _PrintPerfResult(self, measurement, trace, values, units,
+                       result_type='default', std=None):
+    metric_name = measurement
+    if trace != measurement:
+      metric_name += '.' + trace
+    self._result['tests'].setdefault(self._test_name, {})
+    self._result['tests'][self._test_name].setdefault('metrics', {})
+    metric_data = {
+        'current': values,
+        'units': units,
+        'important': result_type == 'default'
+        }
+    if std is not None:
+      metric_data['std'] = std
+    self._result['tests'][self._test_name]['metrics'][metric_name] = metric_data
+
+  def _TranslateChartJson(self, chart_json_dict):
+    dummy_dict = dict()
+
+    for chart_name, traces in chart_json_dict['charts'].iteritems():
+      for trace_name, value_dict in traces.iteritems():
+        # TODO(eakuefner): refactor summarization so we don't have to jump
+        # through hoops like this.
+        if 'page_id' in value_dict:
+          del value_dict['page_id']
+          result_type = 'nondefault'
+        else:
+          result_type = 'default'
+
+        # Note: we explicitly ignore TraceValues because Buildbot did.
+        if value_dict['type'] == 'trace':
+          continue
+        value = value_module.Value.FromDict(value_dict, dummy_dict)
+
+        perf_value = value.GetBuildbotValue()
+
+        if '@@' in chart_name:
+          chart_name_to_print = '%s-%s' % tuple(chart_name.split('@@'))
+        else:
+          chart_name_to_print = str(chart_name)
+
+        if trace_name == 'summary':
+          trace_name = chart_name_to_print
+
+        std = None
+        if isinstance(value, list_of_scalar_values.ListOfScalarValues):
+          std = value.std
+
+        self._PrintPerfResult(chart_name_to_print, trace_name, perf_value,
+                              value.units, result_type, std)
+
+  @property
+  def _test_name(self):
+    return self._metadata.name
+
+  def GetResults(self):
+    return self._result
+
+  def GetCombinedResults(self):
+    all_results = list(self._existing_results)
+    all_results.append(self.GetResults())
+    return all_results
+
+  def Format(self, page_test_results):
+    chart_json_dict = chart_json_output_formatter.ResultsAsChartDict(
+        self._metadata, page_test_results.all_page_specific_values,
+        page_test_results.all_summary_values)
+
+    self._TranslateChartJson(chart_json_dict)
+    self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
+                          [len(page_test_results.failures)], 'count',
+                          'unimportant')
+
+    html = self._GetHtmlTemplate()
+    html = html.replace('%json_results%', json.dumps(self.GetCombinedResults()))
+    html = html.replace('%json_units%', self._GetUnitJson())
+    html = html.replace('%plugins%', self._GetPlugins())
+    self._SaveResults(html)
+
+    if self._upload_results:
+      file_path = os.path.abspath(self._output_stream.name)
+      file_name = 'html-results/results-%s' % datetime.datetime.now().strftime(
+          '%Y-%m-%d_%H-%M-%S')
+      try:
+        cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, file_name, file_path)
+        print
+        print ('View online at '
+               'http://storage.googleapis.com/chromium-telemetry/%s'
+               % file_name)
+      except cloud_storage.PermissionError as e:
+        logging.error('Cannot upload profiling files to cloud storage due to '
+                      ' permission error: %s' % e.message)
+    print
+    print 'View result at file://%s' % os.path.abspath(
+        self._output_stream.name)
diff --git a/catapult/telemetry/telemetry/internal/results/html_output_formatter_unittest.py b/catapult/telemetry/telemetry/internal/results/html_output_formatter_unittest.py
new file mode 100644
index 0000000..c738e49
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/html_output_formatter_unittest.py
@@ -0,0 +1,265 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import datetime
+import os
+import StringIO
+import unittest
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.internal.results import html_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+
+
+def _MakeStorySet():
+  story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+  story_set.AddStory(
+      page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+  return story_set
+
+
+class DeterministicHtmlOutputFormatter(
+    html_output_formatter.HtmlOutputFormatter):
+  def _GetBuildTime(self):
+    return datetime.datetime(1998, 9, 4, 13, 0, 0, 7777)
+
+  def _GetRevision(self):
+    return 'revision'
+
+class FakeMetadataForTest(benchmark.BenchmarkMetadata):
+  def __init__(self):
+    super(FakeMetadataForTest, self).__init__('test_name')
+
+# Wrap string IO with a .name property so that it behaves more like a file.
+class StringIOFile(StringIO.StringIO):
+  name = 'fake_output_file'
+
+
+class HtmlOutputFormatterTest(unittest.TestCase):
+
+  def setUp(self):
+    self.maxDiff = 100000
+
+  def test_basic_summary(self):
+    test_story_set = _MakeStorySet()
+    output_file = StringIOFile()
+
+    # Run the first time and verify the results are written to the HTML file.
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(test_story_set.stories[0])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.DOWN))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.WillRunPage(test_story_set.stories[1])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[1], 'a', 'seconds', 7,
+        improvement_direction=improvement_direction.DOWN))
+    results.DidRunPage(test_story_set.stories[1])
+
+    formatter = DeterministicHtmlOutputFormatter(
+        output_file, FakeMetadataForTest(), False, False, 'browser_type')
+    formatter.Format(results)
+    expected = {
+      "platform": "browser_type",
+      "buildTime": "1998-09-04T13:00:00.007777",
+      "label": 'test_name (1998-09-04 13:00:00)',
+      "tests": {
+        "test_name": {
+          "metrics": {
+            "a": {
+              "current": [3, 7],
+              "std": 2.8284271247461903,
+              "units": "seconds",
+              "important": True
+            },
+            "telemetry_page_measurement_results.num_failed": {
+              "current": [0],
+              "units": "count",
+              "important": False
+            },
+            "a.http://www.bar.com/": {
+              "current": [7],
+              "std": 0.0,
+              "units": "seconds",
+              "important": False
+            },
+            "a.http://www.foo.com/": {
+              "current": [3],
+              "std": 0.0,
+              "units": "seconds",
+              "important": False
+            }
+          }
+        }
+      },
+    }
+    self.assertEquals(expected, formatter.GetResults())
+
+    # Run the second time and verify the results are appended to the HTML file.
+    output_file.seek(0)
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(test_story_set.stories[0])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[0], 'a', 'seconds', 4,
+        improvement_direction=improvement_direction.DOWN))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.WillRunPage(test_story_set.stories[1])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[1], 'a', 'seconds', 8,
+        improvement_direction=improvement_direction.DOWN))
+    results.DidRunPage(test_story_set.stories[1])
+
+    formatter = DeterministicHtmlOutputFormatter(
+        output_file, FakeMetadataForTest(), False, False, 'browser_type')
+    formatter.Format(results)
+    expected = [
+      {
+        "platform": "browser_type",
+        "buildTime": "1998-09-04T13:00:00.007777",
+        "label": 'test_name (1998-09-04 13:00:00)',
+        "tests": {
+          "test_name": {
+            "metrics": {
+              "a": {
+                "current": [3, 7],
+                "units": "seconds",
+                "std": 2.8284271247461903,
+                "important": True
+              },
+              "telemetry_page_measurement_results.num_failed": {
+                "current": [0],
+                "units": "count",
+                "important": False
+              },
+              "a.http://www.bar.com/": {
+                "current": [7],
+                "std": 0.0,
+                "units": "seconds",
+                "important": False
+              },
+              "a.http://www.foo.com/": {
+                "current": [3],
+                "std": 0.0,
+                "units": "seconds",
+                "important": False
+              }
+            }
+          }
+        },
+      },
+      {
+        "platform": "browser_type",
+        "buildTime": "1998-09-04T13:00:00.007777",
+        "label": 'test_name (1998-09-04 13:00:00)',
+        "tests": {
+          "test_name": {
+            "metrics": {
+              "a": {
+                "current": [4, 8],
+                'std': 2.8284271247461903,
+                "units": "seconds",
+                "important": True
+              },
+              "telemetry_page_measurement_results.num_failed": {
+                "current": [0],
+                "units": "count",
+                "important": False,
+              },
+              "a.http://www.bar.com/": {
+                "current": [8],
+                "std": 0.0,
+                "units": "seconds",
+                "important": False
+              },
+              "a.http://www.foo.com/": {
+                "current": [4],
+                "std": 0.0,
+                "units": "seconds",
+                "important": False
+              }
+            }
+          }
+        },
+      }]
+    self.assertEquals(expected, formatter.GetCombinedResults())
+    last_output_len = len(output_file.getvalue())
+
+    # Now reset the results and verify the old ones are gone.
+    output_file.seek(0)
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(test_story_set.stories[0])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[0], 'a', 'seconds', 5,
+        improvement_direction=improvement_direction.DOWN))
+    results.DidRunPage(test_story_set.stories[0])
+
+    results.WillRunPage(test_story_set.stories[1])
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[1], 'a', 'seconds', 9,
+        improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(
+        test_story_set.stories[1], 'b', 'seconds', 20, tir_label='foo'))
+    results.DidRunPage(test_story_set.stories[1])
+
+    formatter = DeterministicHtmlOutputFormatter(
+       output_file, FakeMetadataForTest(), True, False, 'browser_type')
+    formatter.Format(results)
+    expected = [{
+      "platform": "browser_type",
+      "buildTime": "1998-09-04T13:00:00.007777",
+      "label": 'test_name (1998-09-04 13:00:00)',
+      "tests": {
+        "test_name": {
+          "metrics": {
+            "a": {
+              "current": [5, 9],
+              'std': 2.8284271247461903,
+              "units": "seconds",
+              "important": True
+            },
+            "telemetry_page_measurement_results.num_failed": {
+              "current": [0],
+              "units": "count",
+              "important": False
+            },
+            "a.http://www.bar.com/": {
+              "current": [9],
+              "std": 0.0,
+              "units": "seconds",
+              "important": False
+            },
+            "a.http://www.foo.com/": {
+              "current": [5],
+              "std": 0.0,
+              "units": "seconds",
+              "important": False
+            },
+            "foo-b.http://www.bar.com/": {
+              "current": [20],
+              "std": 0.0,
+              "units": "seconds",
+              "important": False
+            },
+            "foo-b": {
+              "current": [20],
+              "std": 0.0,
+              "units": "seconds",
+              "important": True
+            }
+          }
+        }
+      },
+    }]
+    self.assertEquals(expected, formatter.GetCombinedResults())
+    self.assertTrue(len(output_file.getvalue()) < last_output_len)
diff --git a/catapult/telemetry/telemetry/internal/results/json_output_formatter.py b/catapult/telemetry/telemetry/internal/results/json_output_formatter.py
new file mode 100644
index 0000000..81dd35f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/json_output_formatter.py
@@ -0,0 +1,59 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+from telemetry.internal.results import output_formatter
+
+
+def ResultsAsDict(page_test_results, benchmark_metadata):
+  """Takes PageTestResults to a dict serializable to JSON.
+
+  To serialize results as JSON we first convert them to a dict that can be
+  serialized by the json module. It also requires a benchmark_metadat object
+  for metadata to be integrated into the results (currently the benchmark
+  name). This function will also output trace files if they exist.
+
+  Args:
+    page_test_results: a PageTestResults object
+    benchmark_metadata: a benchmark.BenchmarkMetadata object
+  """
+  result_dict = {
+    'format_version': '0.2',
+    'next_version': '0.3',
+    # TODO(sullivan): benchmark_name should be removed when updating
+    # format_version to 0.3.
+    'benchmark_name': benchmark_metadata.name,
+    'benchmark_metadata': benchmark_metadata.AsDict(),
+    'summary_values': [v.AsDict() for v in
+                       page_test_results.all_summary_values],
+    'per_page_values': [v.AsDict() for v in
+                        page_test_results.all_page_specific_values],
+    'pages': {p.id: p.AsDict() for p in _GetAllPages(page_test_results)}
+  }
+  if page_test_results.serialized_trace_file_ids_to_paths:
+    result_dict['files'] = page_test_results.serialized_trace_file_ids_to_paths
+  return result_dict
+
+
+def _GetAllPages(page_test_results):
+  pages = set(page_run.story for page_run in
+              page_test_results.all_page_runs)
+  return pages
+
+
+class JsonOutputFormatter(output_formatter.OutputFormatter):
+  def __init__(self, output_stream, benchmark_metadata):
+    super(JsonOutputFormatter, self).__init__(output_stream)
+    self._benchmark_metadata = benchmark_metadata
+
+  @property
+  def benchmark_metadata(self):
+    return self._benchmark_metadata
+
+  def Format(self, page_test_results):
+    json.dump(
+        ResultsAsDict(page_test_results, self.benchmark_metadata),
+        self.output_stream, indent=2)
+    self.output_stream.write('\n')
diff --git a/catapult/telemetry/telemetry/internal/results/json_output_formatter_unittest.py b/catapult/telemetry/telemetry/internal/results/json_output_formatter_unittest.py
new file mode 100644
index 0000000..46cdd58
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/json_output_formatter_unittest.py
@@ -0,0 +1,138 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+import os
+import shutil
+import StringIO
+import tempfile
+import unittest
+
+from telemetry import story
+from telemetry import benchmark
+from telemetry.internal.results import json_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.timeline import trace_data
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.value import trace
+
+
+def _MakeStorySet():
+  story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+  story_set.AddStory(
+      page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+  story_set.AddStory(
+      page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+  return story_set
+
+def _HasPage(pages, page):
+  return pages.get(page.id, None) != None
+
+def _HasValueNamed(values, name):
+  return len([x for x in values if x['name'] == name]) == 1
+
+class JsonOutputFormatterTest(unittest.TestCase):
+  def setUp(self):
+    self._output = StringIO.StringIO()
+    self._story_set = _MakeStorySet()
+    self._formatter = json_output_formatter.JsonOutputFormatter(
+        self._output,
+        benchmark.BenchmarkMetadata('benchmark_name'))
+
+  def testOutputAndParse(self):
+    results = page_test_results.PageTestResults()
+
+    self._output.truncate(0)
+
+    results.WillRunPage(self._story_set[0])
+    v0 = scalar.ScalarValue(results.current_page, 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    results.AddValue(v0)
+    results.DidRunPage(self._story_set[0])
+
+    self._formatter.Format(results)
+    json.loads(self._output.getvalue())
+
+  def testAsDictBaseKeys(self):
+    results = page_test_results.PageTestResults()
+    d = json_output_formatter.ResultsAsDict(results,
+        self._formatter.benchmark_metadata)
+
+    self.assertEquals(d['format_version'], '0.2')
+    self.assertEquals(d['next_version'], '0.3')
+    self.assertEquals(d['benchmark_metadata']['name'], 'benchmark_name')
+
+  def testAsDictWithOnePage(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self._story_set[0])
+    v0 = scalar.ScalarValue(results.current_page, 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    results.AddValue(v0)
+    results.DidRunPage(self._story_set[0])
+
+    d = json_output_formatter.ResultsAsDict(results,
+        self._formatter.benchmark_metadata)
+
+    self.assertTrue(_HasPage(d['pages'], self._story_set[0]))
+    self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo'))
+
+  def testAsDictWithTraceValue(self):
+    tempdir = tempfile.mkdtemp()
+    try:
+      results = page_test_results.PageTestResults()
+      results.WillRunPage(self._story_set[0])
+      v0 = trace.TraceValue(
+          results.current_page,
+          trace_data.TraceData({'event': 'test'}))
+      results.AddValue(v0)
+      results.DidRunPage(self._story_set[0])
+      results._SerializeTracesToDirPath(tempdir)
+      d = json_output_formatter.ResultsAsDict(results,
+          self._formatter.benchmark_metadata)
+
+      self.assertTrue(_HasPage(d['pages'], self._story_set[0]))
+      self.assertTrue(_HasValueNamed(d['per_page_values'], 'trace'))
+      self.assertEquals(len(d['files']), 1)
+      output_trace_path = d['files'].values()[0]
+      self.assertTrue(output_trace_path.startswith(tempdir))
+      self.assertTrue(os.path.exists(output_trace_path))
+    finally:
+      shutil.rmtree(tempdir)
+
+
+
+  def testAsDictWithTwoPages(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self._story_set[0])
+    v0 = scalar.ScalarValue(results.current_page, 'foo', 'seconds', 3,
+                            improvement_direction=improvement_direction.DOWN)
+    results.AddValue(v0)
+    results.DidRunPage(self._story_set[0])
+
+    results.WillRunPage(self._story_set[1])
+    v1 = scalar.ScalarValue(results.current_page, 'bar', 'seconds', 4,
+                            improvement_direction=improvement_direction.DOWN)
+    results.AddValue(v1)
+    results.DidRunPage(self._story_set[1])
+
+    d = json_output_formatter.ResultsAsDict(results,
+        self._formatter.benchmark_metadata)
+
+    self.assertTrue(_HasPage(d['pages'], self._story_set[0]))
+    self.assertTrue(_HasPage(d['pages'], self._story_set[1]))
+    self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo'))
+    self.assertTrue(_HasValueNamed(d['per_page_values'], 'bar'))
+
+  def testAsDictWithSummaryValueOnly(self):
+    results = page_test_results.PageTestResults()
+    v = scalar.ScalarValue(None, 'baz', 'seconds', 5,
+                           improvement_direction=improvement_direction.DOWN)
+    results.AddSummaryValue(v)
+
+    d = json_output_formatter.ResultsAsDict(results,
+        self._formatter.benchmark_metadata)
+
+    self.assertFalse(d['pages'])
+    self.assertTrue(_HasValueNamed(d['summary_values'], 'baz'))
diff --git a/catapult/telemetry/telemetry/internal/results/output_formatter.py b/catapult/telemetry/telemetry/internal/results/output_formatter.py
new file mode 100644
index 0000000..6226232
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/output_formatter.py
@@ -0,0 +1,37 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class OutputFormatter(object):
+  """A formatter for PageTestResults.
+
+  An OutputFormatter takes PageTestResults, formats the results
+  (telemetry.value.Value instances), and output the formatted results
+  in the given output stream.
+
+  Examples of output formatter: CsvOutputFormatter produces results in
+  CSV format."""
+
+  def __init__(self, output_stream):
+    """Constructs a new formatter that writes to the output_stream.
+
+    Args:
+      output_stream: The stream to write the formatted output to.
+    """
+    self._output_stream = output_stream
+
+  def Format(self, page_test_results):
+    """Formats the given PageTestResults into the output stream.
+
+    This will be called once at the end of a benchmark.
+
+    Args:
+      page_test_results: A PageTestResults object containing all results
+         from the current benchmark run.
+    """
+    raise NotImplementedError()
+
+  @property
+  def output_stream(self):
+    return self._output_stream
diff --git a/catapult/telemetry/telemetry/internal/results/page_test_results.py b/catapult/telemetry/telemetry/internal/results/page_test_results.py
new file mode 100644
index 0000000..d168125
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/page_test_results.py
@@ -0,0 +1,269 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import copy
+import datetime
+import logging
+import random
+import sys
+import traceback
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.internal.results import json_output_formatter
+from telemetry.internal.results import progress_reporter as reporter_module
+from telemetry.internal.results import story_run
+from telemetry import value as value_module
+from telemetry.value import failure
+from telemetry.value import skip
+from telemetry.value import trace
+
+
+class PageTestResults(object):
+  def __init__(self, output_formatters=None,
+               progress_reporter=None, trace_tag='', output_dir=None,
+               value_can_be_added_predicate=lambda v, is_first: True):
+    """
+    Args:
+      output_formatters: A list of output formatters. The output
+          formatters are typically used to format the test results, such
+          as CsvPivotTableOutputFormatter, which output the test results as CSV.
+      progress_reporter: An instance of progress_reporter.ProgressReporter,
+          to be used to output test status/results progressively.
+      trace_tag: A string to append to the buildbot trace name. Currently only
+          used for buildbot.
+      output_dir: A string specified the directory where to store the test
+          artifacts, e.g: trace, videos,...
+      value_can_be_added_predicate: A function that takes two arguments:
+          a value.Value instance (except failure.FailureValue, skip.SkipValue
+          or trace.TraceValue) and a boolean (True when the value is part of
+          the first result for the story). It returns True if the value
+          can be added to the test results and False otherwise.
+    """
+    # TODO(chrishenry): Figure out if trace_tag is still necessary.
+
+    super(PageTestResults, self).__init__()
+    self._progress_reporter = (
+        progress_reporter if progress_reporter is not None
+        else reporter_module.ProgressReporter())
+    self._output_formatters = (
+        output_formatters if output_formatters is not None else [])
+    self._trace_tag = trace_tag
+    self._output_dir = output_dir
+    self._value_can_be_added_predicate = value_can_be_added_predicate
+
+    self._current_page_run = None
+    self._all_page_runs = []
+    self._all_stories = set()
+    self._representative_value_for_each_value_name = {}
+    self._all_summary_values = []
+    self._serialized_trace_file_ids_to_paths = {}
+    self._pages_to_profiling_files = collections.defaultdict(list)
+    self._pages_to_profiling_files_cloud_url = collections.defaultdict(list)
+
+  def __copy__(self):
+    cls = self.__class__
+    result = cls.__new__(cls)
+    for k, v in self.__dict__.items():
+      if isinstance(v, collections.Container):
+        v = copy.copy(v)
+      setattr(result, k, v)
+    return result
+
+  @property
+  def pages_to_profiling_files(self):
+    return self._pages_to_profiling_files
+
+  @property
+  def serialized_trace_file_ids_to_paths(self):
+    return self._serialized_trace_file_ids_to_paths
+
+  @property
+  def pages_to_profiling_files_cloud_url(self):
+    return self._pages_to_profiling_files_cloud_url
+
+  @property
+  def all_page_specific_values(self):
+    values = []
+    for run in self._all_page_runs:
+      values += run.values
+    if self._current_page_run:
+      values += self._current_page_run.values
+    return values
+
+  @property
+  def all_summary_values(self):
+    return self._all_summary_values
+
+  @property
+  def current_page(self):
+    assert self._current_page_run, 'Not currently running test.'
+    return self._current_page_run.story
+
+  @property
+  def current_page_run(self):
+    assert self._current_page_run, 'Not currently running test.'
+    return self._current_page_run
+
+  @property
+  def all_page_runs(self):
+    return self._all_page_runs
+
+  @property
+  def pages_that_succeeded(self):
+    """Returns the set of pages that succeeded."""
+    pages = set(run.story for run in self.all_page_runs)
+    pages.difference_update(self.pages_that_failed)
+    return pages
+
+  @property
+  def pages_that_failed(self):
+    """Returns the set of failed pages."""
+    failed_pages = set()
+    for run in self.all_page_runs:
+      if run.failed:
+        failed_pages.add(run.story)
+    return failed_pages
+
+  @property
+  def failures(self):
+    values = self.all_page_specific_values
+    return [v for v in values if isinstance(v, failure.FailureValue)]
+
+  @property
+  def skipped_values(self):
+    values = self.all_page_specific_values
+    return [v for v in values if isinstance(v, skip.SkipValue)]
+
+  def _GetStringFromExcInfo(self, err):
+    return ''.join(traceback.format_exception(*err))
+
+  def CleanUp(self):
+    """Clean up any TraceValues contained within this results object."""
+    for run in self._all_page_runs:
+      for v in run.values:
+        if isinstance(v, trace.TraceValue):
+          v.CleanUp()
+          run.values.remove(v)
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, _, __, ___):
+    self.CleanUp()
+
+  def WillRunPage(self, page):
+    assert not self._current_page_run, 'Did not call DidRunPage.'
+    self._current_page_run = story_run.StoryRun(page)
+    self._progress_reporter.WillRunPage(self)
+
+  def DidRunPage(self, page):  # pylint: disable=unused-argument
+    """
+    Args:
+      page: The current page under test.
+    """
+    assert self._current_page_run, 'Did not call WillRunPage.'
+    self._progress_reporter.DidRunPage(self)
+    self._all_page_runs.append(self._current_page_run)
+    self._all_stories.add(self._current_page_run.story)
+    self._current_page_run = None
+
+  def AddValue(self, value):
+    assert self._current_page_run, 'Not currently running test.'
+    self._ValidateValue(value)
+    is_first_result = (
+      self._current_page_run.story not in self._all_stories)
+    if not (isinstance(value, skip.SkipValue) or
+            isinstance(value, failure.FailureValue) or
+            isinstance(value, trace.TraceValue) or
+            self._value_can_be_added_predicate(value, is_first_result)):
+      return
+    # TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
+    self._current_page_run.AddValue(value)
+    self._progress_reporter.DidAddValue(value)
+
+  def AddProfilingFile(self, page, file_handle):
+    self._pages_to_profiling_files[page].append(file_handle)
+
+  def AddSummaryValue(self, value):
+    assert value.page is None
+    self._ValidateValue(value)
+    self._all_summary_values.append(value)
+
+  def _ValidateValue(self, value):
+    assert isinstance(value, value_module.Value)
+    if value.name not in self._representative_value_for_each_value_name:
+      self._representative_value_for_each_value_name[value.name] = value
+    representative_value = self._representative_value_for_each_value_name[
+        value.name]
+    assert value.IsMergableWith(representative_value)
+
+  def PrintSummary(self):
+    self._progress_reporter.DidFinishAllTests(self)
+
+    # Only serialize the trace if output_format is json.
+    if (self._output_dir and
+        any(isinstance(o, json_output_formatter.JsonOutputFormatter)
+            for o in self._output_formatters)):
+      self._SerializeTracesToDirPath(self._output_dir)
+    for output_formatter in self._output_formatters:
+      output_formatter.Format(self)
+
+  def FindValues(self, predicate):
+    """Finds all values matching the specified predicate.
+
+    Args:
+      predicate: A function that takes a Value and returns a bool.
+    Returns:
+      A list of values matching |predicate|.
+    """
+    values = []
+    for value in self.all_page_specific_values:
+      if predicate(value):
+        values.append(value)
+    return values
+
+  def FindPageSpecificValuesForPage(self, page, value_name):
+    return self.FindValues(lambda v: v.page == page and v.name == value_name)
+
+  def FindAllPageSpecificValuesNamed(self, value_name):
+    return self.FindValues(lambda v: v.name == value_name)
+
+  def FindAllPageSpecificValuesFromIRNamed(self, tir_label, value_name):
+    return self.FindValues(lambda v: v.name == value_name
+                           and v.tir_label == tir_label)
+
+  def FindAllTraceValues(self):
+    return self.FindValues(lambda v: isinstance(v, trace.TraceValue))
+
+  def _SerializeTracesToDirPath(self, dir_path):
+    """ Serialize all trace values to files in dir_path and return a list of
+    file handles to those files. """
+    for value in self.FindAllTraceValues():
+      fh = value.Serialize(dir_path)
+      self._serialized_trace_file_ids_to_paths[fh.id] = fh.GetAbsPath()
+
+  def UploadTraceFilesToCloud(self, bucket):
+    for value in self.FindAllTraceValues():
+      value.UploadToCloud(bucket)
+
+  def UploadProfilingFilesToCloud(self, bucket):
+    for page, file_handle_list in self._pages_to_profiling_files.iteritems():
+      for file_handle in file_handle_list:
+        remote_path = ('profiler-file-id_%s-%s%-d%s' % (
+            file_handle.id,
+            datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
+            random.randint(1, 100000),
+            file_handle.extension))
+        try:
+          cloud_url = cloud_storage.Insert(
+              bucket, remote_path, file_handle.GetAbsPath())
+          sys.stderr.write(
+              'View generated profiler files online at %s for page %s\n' %
+              (cloud_url, page.display_name))
+          self._pages_to_profiling_files_cloud_url[page].append(cloud_url)
+        except cloud_storage.PermissionError as e:
+          logging.error('Cannot upload profiling files to cloud storage due to '
+                        ' permission error: %s' % e.message)
diff --git a/catapult/telemetry/telemetry/internal/results/page_test_results_unittest.py b/catapult/telemetry/telemetry/internal/results/page_test_results_unittest.py
new file mode 100644
index 0000000..b5b543d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/page_test_results_unittest.py
@@ -0,0 +1,427 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+from telemetry.internal.results import base_test_results_unittest
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.timeline import trace_data
+from telemetry.value import failure
+from telemetry.value import histogram
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.value import skip
+from telemetry.value import trace
+
+class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(page_module.Page("http://www.bar.com/", story_set, story_set.base_dir))
+    story_set.AddStory(page_module.Page("http://www.baz.com/", story_set, story_set.base_dir))
+    story_set.AddStory(page_module.Page("http://www.foo.com/", story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+  def testFailures(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(
+        failure.FailureValue(self.pages[0], self.CreateException()))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.DidRunPage(self.pages[1])
+
+    self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
+    self.assertEqual(set([self.pages[1]]), results.pages_that_succeeded)
+
+    self.assertEqual(2, len(results.all_page_runs))
+    self.assertTrue(results.all_page_runs[0].failed)
+    self.assertTrue(results.all_page_runs[1].ok)
+
+  def testSkips(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.DidRunPage(self.pages[1])
+
+    self.assertTrue(results.all_page_runs[0].skipped)
+    self.assertEqual(self.pages[0], results.all_page_runs[0].story)
+    self.assertEqual(set([self.pages[0], self.pages[1]]),
+                     results.pages_that_succeeded)
+
+    self.assertEqual(2, len(results.all_page_runs))
+    self.assertTrue(results.all_page_runs[0].skipped)
+    self.assertTrue(results.all_page_runs[1].ok)
+
+  def testBasic(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[1])
+
+    results.PrintSummary()
+
+    values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
+    self.assertEquals(1, len(values))
+    v = values[0]
+    self.assertEquals(v.name, 'a')
+    self.assertEquals(v.page, self.pages[0])
+
+    values = results.FindAllPageSpecificValuesNamed('a')
+    assert len(values) == 2
+
+  def testUrlIsInvalidValue(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    self.assertRaises(
+      AssertionError,
+      lambda: results.AddValue(scalar.ScalarValue(
+          self.pages[0], 'url', 'string', 'foo',
+          improvement_direction=improvement_direction.UP)))
+
+  def testAddSummaryValueWithPageSpecified(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    self.assertRaises(
+      AssertionError,
+      lambda: results.AddSummaryValue(scalar.ScalarValue(
+          self.pages[0], 'a', 'units', 3,
+          improvement_direction=improvement_direction.UP)))
+
+  def testUnitChange(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    self.assertRaises(
+      AssertionError,
+      lambda: results.AddValue(scalar.ScalarValue(
+          self.pages[1], 'a', 'foobgrobbers', 3,
+          improvement_direction=improvement_direction.UP)))
+
+  def testTypeChange(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    self.assertRaises(
+      AssertionError,
+      lambda: results.AddValue(histogram.HistogramValue(
+          self.pages[1], 'a', 'seconds',
+          raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
+          improvement_direction=improvement_direction.UP)))
+
+  def testGetPagesThatSucceededAllPagesFail(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'message'))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 7,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(failure.FailureValue.FromMessage(self.pages[1], 'message'))
+    results.DidRunPage(self.pages[1])
+
+    results.PrintSummary()
+    self.assertEquals(0, len(results.pages_that_succeeded))
+
+  def testGetSuccessfulPageValuesMergedNoFailures(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    self.assertEquals(1, len(results.all_page_specific_values))
+    results.DidRunPage(self.pages[0])
+
+  def testGetAllValuesForSuccessfulPages(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    value1 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(value1)
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    value2 = scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(value2)
+    results.DidRunPage(self.pages[1])
+
+    results.WillRunPage(self.pages[2])
+    value3 = scalar.ScalarValue(
+        self.pages[2], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(value3)
+    results.DidRunPage(self.pages[2])
+
+    self.assertEquals(
+        [value1, value2, value3], results.all_page_specific_values)
+
+  def testGetAllValuesForSuccessfulPagesOnePageFails(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    value1 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(value1)
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    value2 = failure.FailureValue.FromMessage(self.pages[1], 'Failure')
+    results.AddValue(value2)
+    results.DidRunPage(self.pages[1])
+
+    results.WillRunPage(self.pages[2])
+    value3 = scalar.ScalarValue(
+        self.pages[2], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(value3)
+    results.DidRunPage(self.pages[2])
+
+    self.assertEquals(
+        [value1, value2, value3], results.all_page_specific_values)
+
+  def testFindValues(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    v0 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    v1 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 4,
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(self.pages[1])
+
+    values = results.FindValues(lambda v: v.value == 3)
+    self.assertEquals([v0], values)
+
+  def testValueWithTIRLabel(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    v0 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3, tir_label='foo',
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    v1 = scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3, tir_label='bar',
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(self.pages[0])
+
+    values = results.FindAllPageSpecificValuesFromIRNamed('foo', 'a')
+    self.assertEquals([v0], values)
+
+  def testTraceValue(self):
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.pages[0])
+    results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 1})))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 2})))
+    results.DidRunPage(self.pages[1])
+
+    results.PrintSummary()
+
+    values = results.FindAllTraceValues()
+    self.assertEquals(2, len(values))
+
+  def testCleanUpCleansUpTraceValues(self):
+    results = page_test_results.PageTestResults()
+    v0 = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
+    v1 = trace.TraceValue(None, trace_data.TraceData({'test': 2}))
+
+    results.WillRunPage(self.pages[0])
+    results.AddValue(v0)
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(v1)
+    results.DidRunPage(self.pages[1])
+
+    results.CleanUp()
+    self.assertTrue(v0.cleaned_up)
+    self.assertTrue(v1.cleaned_up)
+
+  def testNoTracesLeftAfterCleanUp(self):
+    results = page_test_results.PageTestResults()
+    v0 = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
+    v1 = trace.TraceValue(None, trace_data.TraceData({'test': 2}))
+
+    results.WillRunPage(self.pages[0])
+    results.AddValue(v0)
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(v1)
+    results.DidRunPage(self.pages[1])
+
+    results.CleanUp()
+    self.assertFalse(results.FindAllTraceValues())
+
+
+class PageTestResultsFilterTest(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+  def testFilterValue(self):
+    def AcceptValueNamed_a(value, _):
+      return value.name == 'a'
+    results = page_test_results.PageTestResults(
+        value_can_be_added_predicate=AcceptValueNamed_a)
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'b', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+
+    results.WillRunPage(self.pages[1])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'd', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[1])
+    results.PrintSummary()
+    self.assertEquals(
+        [('a', 'http://www.foo.com/'), ('a', 'http://www.bar.com/')],
+        [(v.name, v.page.url) for v in results.all_page_specific_values])
+
+  def testFilterIsFirstResult(self):
+    def AcceptSecondValues(_, is_first_result):
+      return not is_first_result
+    results = page_test_results.PageTestResults(
+        value_can_be_added_predicate=AcceptSecondValues)
+
+    # First results (filtered out)
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 7,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'b', 'seconds', 8,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+    results.WillRunPage(self.pages[1])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 5,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'd', 'seconds', 6,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[1])
+
+    # Second results
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'a', 'seconds', 3,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'b', 'seconds', 4,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[0])
+    results.WillRunPage(self.pages[1])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'a', 'seconds', 1,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(scalar.ScalarValue(
+        self.pages[1], 'd', 'seconds', 2,
+        improvement_direction=improvement_direction.UP))
+    results.DidRunPage(self.pages[1])
+    results.PrintSummary()
+    expected_values = [
+        ('a', 'http://www.foo.com/', 3),
+        ('b', 'http://www.foo.com/', 4),
+        ('a', 'http://www.bar.com/', 1),
+        ('d', 'http://www.bar.com/', 2)]
+    actual_values = [(v.name, v.page.url, v.value)
+                     for v in results.all_page_specific_values]
+    self.assertEquals(expected_values, actual_values)
+
+  def testFailureValueCannotBeFiltered(self):
+    def AcceptValueNamed_a(value, _):
+      return value.name == 'a'
+    results = page_test_results.PageTestResults(
+        value_can_be_added_predicate=AcceptValueNamed_a)
+    results.WillRunPage(self.pages[0])
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'b', 'seconds', 8,
+        improvement_direction=improvement_direction.UP))
+    failure_value = failure.FailureValue.FromMessage(self.pages[0], 'failure')
+    results.AddValue(failure_value)
+    results.DidRunPage(self.pages[0])
+    results.PrintSummary()
+
+    # Although predicate says only accept values named 'a', the failure value is
+    # added anyway.
+    self.assertEquals(len(results.all_page_specific_values), 1)
+    self.assertIn(failure_value, results.all_page_specific_values)
+
+  def testSkipValueCannotBeFiltered(self):
+    def AcceptValueNamed_a(value, _):
+      return value.name == 'a'
+    results = page_test_results.PageTestResults(
+        value_can_be_added_predicate=AcceptValueNamed_a)
+    results.WillRunPage(self.pages[0])
+    skip_value = skip.SkipValue(self.pages[0], 'skip for testing')
+    results.AddValue(scalar.ScalarValue(
+        self.pages[0], 'b', 'seconds', 8,
+        improvement_direction=improvement_direction.UP))
+    results.AddValue(skip_value)
+    results.DidRunPage(self.pages[0])
+    results.PrintSummary()
+
+    # Although predicate says only accept value with named 'a', skip value is
+    # added anyway.
+    self.assertEquals(len(results.all_page_specific_values), 1)
+    self.assertIn(skip_value, results.all_page_specific_values)
diff --git a/catapult/telemetry/telemetry/internal/results/progress_reporter.py b/catapult/telemetry/telemetry/internal/results/progress_reporter.py
new file mode 100644
index 0000000..3eea999
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/progress_reporter.py
@@ -0,0 +1,27 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ProgressReporter(object):
+  """A class that reports progress of a benchmark.
+
+  The reporter produces output whenever a significant event happens
+  during the progress of a benchmark, including (but not limited to):
+  when a page run is started, when a page run finished, any failures
+  during a page run.
+
+  The default implementation outputs nothing.
+  """
+
+  def DidAddValue(self, value):
+    pass
+
+  def WillRunPage(self, page_test_results):
+    pass
+
+  def DidRunPage(self, page_test_results):
+    pass
+
+  def DidFinishAllTests(self, page_test_results):
+    pass
diff --git a/catapult/telemetry/telemetry/internal/results/results_options.py b/catapult/telemetry/telemetry/internal/results/results_options.py
new file mode 100644
index 0000000..e4386f6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/results_options.py
@@ -0,0 +1,171 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import sys
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import util
+from telemetry.internal.results import buildbot_output_formatter
+from telemetry.internal.results import chart_json_output_formatter
+from telemetry.internal.results import csv_pivot_table_output_formatter
+from telemetry.internal.results import gtest_progress_reporter
+from telemetry.internal.results import html_output_formatter
+from telemetry.internal.results import json_output_formatter
+from telemetry.internal.results import page_test_results
+from telemetry.internal.results import progress_reporter
+
+# Allowed output formats. The default is the first item in the list.
+_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'gtest', 'json',
+    'chartjson', 'csv-pivot-table', 'none')
+
+
+# Filenames to use for given output formats.
+_OUTPUT_FILENAME_LOOKUP = {
+    'html': 'results.html',
+    'json': 'results.json',
+    'chartjson': 'results-chart.json',
+    'csv-pivot-table': 'results-pivot-table.csv'
+}
+
+
+def AddResultsOptions(parser):
+  group = optparse.OptionGroup(parser, 'Results options')
+  group.add_option('--output-format', action='append', dest='output_formats',
+                    choices=_OUTPUT_FORMAT_CHOICES, default=[],
+                    help='Output format. Defaults to "%%default". '
+                    'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
+  group.add_option('-o', '--output',
+                    dest='output_file',
+                    default=None,
+                    help='Redirects output to a file. Defaults to stdout.')
+  group.add_option('--output-dir', default=util.GetBaseDir(),
+                    help='Where to save output data after the run.')
+  group.add_option('--output-trace-tag',
+                    default='',
+                    help='Append a tag to the key of each result trace. Use '
+                    'with html, buildbot, csv-pivot-table output formats.')
+  group.add_option('--reset-results', action='store_true',
+                    help='Delete all stored results.')
+  group.add_option('--upload-results', action='store_true',
+                    help='Upload the results to cloud storage.')
+  group.add_option('--upload-bucket', default='output',
+                    choices=cloud_storage.BUCKET_ALIAS_NAMES,
+                    help='Storage bucket to use for the uploaded results. ' +
+                    'Defaults to output bucket. Supported values are: ' +
+                    ', '.join(cloud_storage.BUCKET_ALIAS_NAMES) + '.')
+  group.add_option('--results-label',
+                    default=None,
+                    help='Optional label to use for the results of a run .')
+  group.add_option('--suppress_gtest_report',
+                   default=False,
+                   help='Whether to suppress GTest progress report.')
+  parser.add_option_group(group)
+
+
+def ProcessCommandLineArgs(parser, args):
+  # TODO(ariblue): Delete this flag entirely at some future data, when the
+  # existence of such a flag has been long forgotten.
+  if args.output_file:
+    parser.error('This flag is deprecated. Please use --output-dir instead.')
+
+  try:
+    os.makedirs(args.output_dir)
+  except OSError:
+    # Do nothing if the output directory already exists. Existing files will
+    # get overwritten.
+    pass
+
+  args.output_dir = os.path.expanduser(args.output_dir)
+
+
+def _GetOutputStream(output_format, output_dir):
+  assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
+  assert output_format not in ('gtest', 'none'), (
+      'Cannot set stream for \'gtest\' or \'none\' output formats.')
+
+  if output_format == 'buildbot':
+    return sys.stdout
+
+  assert output_format in _OUTPUT_FILENAME_LOOKUP, (
+      'No known filename for the \'%s\' output format' % output_format)
+  output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
+
+  # TODO(eakuefner): Factor this hack out after we rewrite HTMLOutputFormatter.
+  if output_format == 'html':
+    open(output_file, 'a').close() # Create file if it doesn't exist.
+    return open(output_file, 'r+')
+  else:
+    return open(output_file, 'w+')
+
+
+def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
+  if suppress_gtest_report:
+    return progress_reporter.ProgressReporter()
+
+  return gtest_progress_reporter.GTestProgressReporter(
+      sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
+
+
+def CreateResults(benchmark_metadata, options,
+                  value_can_be_added_predicate=lambda v, is_first: True):
+  """
+  Args:
+    options: Contains the options specified in AddResultsOptions.
+  """
+  if not options.output_formats:
+    options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
+
+  output_formatters = []
+  for output_format in options.output_formats:
+    if output_format == 'none' or output_format == "gtest":
+      continue
+
+    output_stream = _GetOutputStream(output_format, options.output_dir)
+    if output_format == 'csv-pivot-table':
+      output_formatters.append(
+          csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
+              output_stream, trace_tag=options.output_trace_tag))
+    elif output_format == 'buildbot':
+      output_formatters.append(
+          buildbot_output_formatter.BuildbotOutputFormatter(
+              output_stream, trace_tag=options.output_trace_tag))
+    elif output_format == 'html':
+      # TODO(chrishenry): We show buildbot output so that users can grep
+      # through the results easily without needing to open the html
+      # file.  Another option for this is to output the results directly
+      # in gtest-style results (via some sort of progress reporter),
+      # as we plan to enable gtest-style output for all output formatters.
+      output_formatters.append(
+          buildbot_output_formatter.BuildbotOutputFormatter(
+              sys.stdout, trace_tag=options.output_trace_tag))
+      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
+          output_stream, benchmark_metadata, options.reset_results,
+          options.upload_results, options.browser_type,
+          options.results_label))
+    elif output_format == 'json':
+      output_formatters.append(json_output_formatter.JsonOutputFormatter(
+          output_stream, benchmark_metadata))
+    elif output_format == 'chartjson':
+      output_formatters.append(
+          chart_json_output_formatter.ChartJsonOutputFormatter(
+              output_stream, benchmark_metadata))
+    else:
+      # Should never be reached. The parser enforces the choices.
+      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
+                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
+
+  # TODO(chrishenry): This is here to not change the output of
+  # gtest. Let's try enabling skipped tests summary for gtest test
+  # results too (in a separate patch), and see if we break anything.
+  output_skipped_tests_summary = 'gtest' in options.output_formats
+
+  reporter = _GetProgressReporter(output_skipped_tests_summary,
+                                  options.suppress_gtest_report)
+  return page_test_results.PageTestResults(
+      output_formatters=output_formatters, progress_reporter=reporter,
+      output_dir=options.output_dir,
+      value_can_be_added_predicate=value_can_be_added_predicate)
diff --git a/catapult/telemetry/telemetry/internal/results/story_run.py b/catapult/telemetry/telemetry/internal/results/story_run.py
new file mode 100644
index 0000000..078d0fc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/story_run.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import failure
+from telemetry.value import skip
+
+
+class StoryRun(object):
+  def __init__(self, story):
+    self._story = story
+    self._values = []
+
+  def AddValue(self, value):
+    self._values.append(value)
+
+  @property
+  def story(self):
+    return self._story
+
+  @property
+  def values(self):
+    """The values that correspond to this story run."""
+    return self._values
+
+  @property
+  def ok(self):
+    """Whether the current run is still ok.
+
+    To be precise: returns true if there is neither FailureValue nor
+    SkipValue in self.values.
+    """
+    return not self.skipped and not self.failed
+
+  @property
+  def skipped(self):
+    """Whether the current run is being skipped.
+
+    To be precise: returns true if there is any SkipValue in self.values.
+    """
+    return any(isinstance(v, skip.SkipValue) for v in self.values)
+
+  @property
+  def failed(self):
+    """Whether the current run failed.
+
+    To be precise: returns true if there is a FailureValue but not
+    SkipValue in self.values.
+    """
+    return not self.skipped and any(
+        isinstance(v, failure.FailureValue) for v in self.values)
diff --git a/catapult/telemetry/telemetry/internal/results/story_run_unittest.py b/catapult/telemetry/telemetry/internal/results/story_run_unittest.py
new file mode 100644
index 0000000..bb93eed
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/results/story_run_unittest.py
@@ -0,0 +1,80 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.results import story_run
+from telemetry.story import shared_state
+from telemetry.story import story_set
+from telemetry import story as story_module
+from telemetry.value import failure
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.value import skip
+
+
+# pylint: disable=abstract-method
+class SharedStateBar(shared_state.SharedState):
+  pass
+
+class StoryFoo(story_module.Story):
+  def __init__(self, name='', labels=None):
+    super(StoryFoo, self).__init__(
+        SharedStateBar, name, labels)
+
+class StoryRunTest(unittest.TestCase):
+  def setUp(self):
+    self.story_set = story_set.StorySet()
+    self.story_set.AddStory(StoryFoo())
+
+  @property
+  def stories(self):
+    return self.story_set.stories
+
+  def testStoryRunFailed(self):
+    run = story_run.StoryRun(self.stories[0])
+    run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
+    self.assertFalse(run.ok)
+    self.assertTrue(run.failed)
+    self.assertFalse(run.skipped)
+
+    run = story_run.StoryRun(self.stories[0])
+    run.AddValue(scalar.ScalarValue(
+        self.stories[0], 'a', 's', 1,
+        improvement_direction=improvement_direction.UP))
+    run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
+    self.assertFalse(run.ok)
+    self.assertTrue(run.failed)
+    self.assertFalse(run.skipped)
+
+  def testStoryRunSkipped(self):
+    run = story_run.StoryRun(self.stories[0])
+    run.AddValue(failure.FailureValue.FromMessage(self.stories[0], 'test'))
+    run.AddValue(skip.SkipValue(self.stories[0], 'test'))
+    self.assertFalse(run.ok)
+    self.assertFalse(run.failed)
+    self.assertTrue(run.skipped)
+
+    run = story_run.StoryRun(self.stories[0])
+    run.AddValue(scalar.ScalarValue(
+        self.stories[0], 'a', 's', 1,
+        improvement_direction=improvement_direction.UP))
+    run.AddValue(skip.SkipValue(self.stories[0], 'test'))
+    self.assertFalse(run.ok)
+    self.assertFalse(run.failed)
+    self.assertTrue(run.skipped)
+
+  def testStoryRunSucceeded(self):
+    run = story_run.StoryRun(self.stories[0])
+    self.assertTrue(run.ok)
+    self.assertFalse(run.failed)
+    self.assertFalse(run.skipped)
+
+    run = story_run.StoryRun(self.stories[0])
+    run.AddValue(scalar.ScalarValue(
+        self.stories[0], 'a', 's', 1,
+        improvement_direction=improvement_direction.UP))
+    self.assertTrue(run.ok)
+    self.assertFalse(run.failed)
+    self.assertFalse(run.skipped)
diff --git a/catapult/telemetry/telemetry/internal/story_runner.py b/catapult/telemetry/telemetry/internal/story_runner.py
new file mode 100644
index 0000000..f975ca3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/story_runner.py
@@ -0,0 +1,410 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import optparse
+import os
+import sys
+import time
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+from telemetry.internal.actions import page_action
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.results import results_options
+from telemetry.internal.util import exception_formatter
+from telemetry import page
+from telemetry.page import page_test
+from telemetry import story as story_module
+from telemetry.util import wpr_modes
+from telemetry.value import failure
+from telemetry.value import skip
+from telemetry.web_perf import story_test
+
+
+class ArchiveError(Exception):
+  pass
+
+
+def AddCommandLineArgs(parser):
+  story_module.StoryFilter.AddCommandLineArgs(parser)
+  results_options.AddResultsOptions(parser)
+
+  # Page set options
+  group = optparse.OptionGroup(parser, 'Page set repeat options')
+  group.add_option('--page-repeat', default=1, type='int',
+                   help='Number of times to repeat each individual page '
+                   'before proceeding with the next page in the pageset.')
+  group.add_option('--pageset-repeat', default=1, type='int',
+                   help='Number of times to repeat the entire pageset.')
+  group.add_option('--max-failures', default=None, type='int',
+                   help='Maximum number of test failures before aborting '
+                   'the run. Defaults to the number specified by the '
+                   'PageTest.')
+  parser.add_option_group(group)
+
+  # WPR options
+  group = optparse.OptionGroup(parser, 'Web Page Replay options')
+  group.add_option('--use-live-sites',
+      dest='use_live_sites', action='store_true',
+      help='Run against live sites and ignore the Web Page Replay archives.')
+  parser.add_option_group(group)
+
+  parser.add_option('-d', '--also-run-disabled-tests',
+                    dest='run_disabled_tests',
+                    action='store_true', default=False,
+                    help='Ignore @Disabled and @Enabled restrictions.')
+
+def ProcessCommandLineArgs(parser, args):
+  story_module.StoryFilter.ProcessCommandLineArgs(parser, args)
+  results_options.ProcessCommandLineArgs(parser, args)
+
+  # Page set options
+  if args.page_repeat < 1:
+    parser.error('--page-repeat must be a positive integer.')
+  if args.pageset_repeat < 1:
+    parser.error('--pageset-repeat must be a positive integer.')
+
+
+def _RunStoryAndProcessErrorIfNeeded(story, results, state, test):
+  def ProcessError():
+    results.AddValue(failure.FailureValue(story, sys.exc_info()))
+  try:
+    if isinstance(test, story_test.StoryTest):
+      test.WillRunStory(state.platform)
+    state.WillRunStory(story)
+    if not state.CanRunStory(story):
+      results.AddValue(skip.SkipValue(
+          story,
+          'Skipped because story is not supported '
+          '(SharedState.CanRunStory() returns False).'))
+      return
+    state.RunStory(results)
+    if isinstance(test, story_test.StoryTest):
+      test.Measure(state.platform, results)
+  except (page_test.Failure, exceptions.TimeoutException,
+          exceptions.LoginException, exceptions.ProfilingException):
+    ProcessError()
+  except exceptions.Error:
+    ProcessError()
+    raise
+  except page_action.PageActionNotSupported as e:
+    results.AddValue(
+        skip.SkipValue(story, 'Unsupported page action: %s' % e))
+  except Exception:
+    results.AddValue(
+        failure.FailureValue(
+            story, sys.exc_info(), 'Unhandlable exception raised.'))
+    raise
+  finally:
+    has_existing_exception = (sys.exc_info() != (None, None, None))
+    try:
+      state.DidRunStory(results)
+      # if state.DidRunStory raises exception, things are messed up badly and we
+      # do not need to run test.DidRunStory at that point.
+      if isinstance(test, story_test.StoryTest):
+        test.DidRunStory(state.platform)
+      else:
+        test.DidRunPage(state.platform)
+    except Exception:
+      if not has_existing_exception:
+        raise
+      # Print current exception and propagate existing exception.
+      exception_formatter.PrintFormattedException(
+          msg='Exception raised when cleaning story run: ')
+
+
+class StoryGroup(object):
+  def __init__(self, shared_state_class):
+    self._shared_state_class = shared_state_class
+    self._stories = []
+
+  @property
+  def shared_state_class(self):
+    return self._shared_state_class
+
+  @property
+  def stories(self):
+    return self._stories
+
+  def AddStory(self, story):
+    assert (story.shared_state_class is
+            self._shared_state_class)
+    self._stories.append(story)
+
+
+def StoriesGroupedByStateClass(story_set, allow_multiple_groups):
+  """ Returns a list of story groups which each contains stories with
+  the same shared_state_class.
+
+  Example:
+    Assume A1, A2, A3 are stories with same shared story class, and
+    similar for B1, B2.
+    If their orders in story set is A1 A2 B1 B2 A3, then the grouping will
+    be [A1 A2] [B1 B2] [A3].
+
+  It's purposefully done this way to make sure that order of
+  stories are the same of that defined in story_set. It's recommended that
+  stories with the same states should be arranged next to each others in
+  story sets to reduce the overhead of setting up & tearing down the
+  shared story state.
+  """
+  story_groups = []
+  story_groups.append(
+      StoryGroup(story_set[0].shared_state_class))
+  for story in story_set:
+    if (story.shared_state_class is not
+        story_groups[-1].shared_state_class):
+      if not allow_multiple_groups:
+        raise ValueError('This StorySet is only allowed to have one '
+                         'SharedState but contains the following '
+                         'SharedState classes: %s, %s.\n Either '
+                         'remove the extra SharedStates or override '
+                         'allow_mixed_story_states.' % (
+                         story_groups[-1].shared_state_class,
+                         story.shared_state_class))
+      story_groups.append(
+          StoryGroup(story.shared_state_class))
+    story_groups[-1].AddStory(story)
+  return story_groups
+
+
+def Run(test, story_set, finder_options, results, max_failures=None,
+        should_tear_down_state_after_each_story_run=False):
+  """Runs a given test against a given page_set with the given options.
+
+  Stop execution for unexpected exceptions such as KeyboardInterrupt.
+  We "white list" certain exceptions for which the story runner
+  can continue running the remaining stories.
+  """
+  # Filter page set based on options.
+  stories = filter(story_module.StoryFilter.IsSelected, story_set)
+
+  if (not finder_options.use_live_sites and story_set.bucket and
+      finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
+    serving_dirs = story_set.serving_dirs
+    for directory in serving_dirs:
+      cloud_storage.GetFilesInDirectoryIfChanged(directory,
+                                                 story_set.bucket)
+    if not _UpdateAndCheckArchives(
+        story_set.archive_data_file, story_set.wpr_archive_info,
+        stories):
+      return
+
+  if not stories:
+    return
+
+  # Effective max failures gives priority to command-line flag value.
+  effective_max_failures = finder_options.max_failures
+  if effective_max_failures is None:
+    effective_max_failures = max_failures
+
+  story_groups = StoriesGroupedByStateClass(
+      stories,
+      story_set.allow_mixed_story_states)
+
+  for group in story_groups:
+    state = None
+    try:
+      for _ in xrange(finder_options.pageset_repeat):
+        for story in group.stories:
+          for _ in xrange(finder_options.page_repeat):
+            if not state:
+              # Construct shared state by using a copy of finder_options. Shared
+              # state may update the finder_options. If we tear down the shared
+              # state after this story run, we want to construct the shared
+              # state for the next story from the original finder_options.
+              state = group.shared_state_class(
+                  test, finder_options.Copy(), story_set)
+            results.WillRunPage(story)
+            try:
+              _WaitForThermalThrottlingIfNeeded(state.platform)
+              _RunStoryAndProcessErrorIfNeeded(story, results, state, test)
+            except exceptions.Error:
+              # Catch all Telemetry errors to give the story a chance to retry.
+              # The retry is enabled by tearing down the state and creating
+              # a new state instance in the next iteration.
+              try:
+                # If TearDownState raises, do not catch the exception.
+                # (The Error was saved as a failure value.)
+                state.TearDownState()
+              finally:
+                # Later finally-blocks use state, so ensure it is cleared.
+                state = None
+            finally:
+              has_existing_exception = sys.exc_info() != (None, None, None)
+              try:
+                if state:
+                  _CheckThermalThrottling(state.platform)
+                results.DidRunPage(story)
+              except Exception:
+                if not has_existing_exception:
+                  raise
+                # Print current exception and propagate existing exception.
+                exception_formatter.PrintFormattedException(
+                    msg='Exception from result processing:')
+              if state and should_tear_down_state_after_each_story_run:
+                state.TearDownState()
+                state = None
+          if (effective_max_failures is not None and
+              len(results.failures) > effective_max_failures):
+            logging.error('Too many failures. Aborting.')
+            return
+    finally:
+      if state:
+        has_existing_exception = sys.exc_info() != (None, None, None)
+        try:
+          state.TearDownState()
+        except Exception:
+          if not has_existing_exception:
+            raise
+          # Print current exception and propagate existing exception.
+          exception_formatter.PrintFormattedException(
+              msg='Exception from TearDownState:')
+
+
+def RunBenchmark(benchmark, finder_options):
+  """Run this test with the given options.
+
+  Returns:
+    The number of failure values (up to 254) or 255 if there is an uncaught
+    exception.
+  """
+  benchmark.CustomizeBrowserOptions(finder_options.browser_options)
+
+  possible_browser = browser_finder.FindBrowser(finder_options)
+  if possible_browser and benchmark.ShouldDisable(possible_browser):
+    logging.warning('%s is disabled on the selected browser', benchmark.Name())
+    if finder_options.run_disabled_tests:
+      logging.warning(
+          'Running benchmark anyway due to: --also-run-disabled-tests')
+    else:
+      logging.warning(
+          'Try --also-run-disabled-tests to force the benchmark to run.')
+      return 1
+
+  pt = benchmark.CreatePageTest(finder_options)
+  pt.__name__ = benchmark.__class__.__name__
+
+  if hasattr(benchmark, '_disabled_strings'):
+    # pylint: disable=protected-access
+    pt._disabled_strings = benchmark._disabled_strings
+  if hasattr(benchmark, '_enabled_strings'):
+    # pylint: disable=protected-access
+    pt._enabled_strings = benchmark._enabled_strings
+
+  stories = benchmark.CreateStorySet(finder_options)
+  if isinstance(pt, page_test.PageTest):
+    if any(not isinstance(p, page.Page) for p in stories.stories):
+      raise Exception(
+          'PageTest must be used with StorySet containing only '
+          'telemetry.page.Page stories.')
+
+  benchmark_metadata = benchmark.GetMetadata()
+  with results_options.CreateResults(
+      benchmark_metadata, finder_options,
+      benchmark.ValueCanBeAddedPredicate) as results:
+    try:
+      Run(pt, stories, finder_options, results, benchmark.max_failures,
+          benchmark.ShouldTearDownStateAfterEachStoryRun())
+      return_code = min(254, len(results.failures))
+    except Exception:
+      exception_formatter.PrintFormattedException()
+      return_code = 255
+
+    try:
+      bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket]
+      if finder_options.upload_results:
+        results.UploadTraceFilesToCloud(bucket)
+        results.UploadProfilingFilesToCloud(bucket)
+    finally:
+      results.PrintSummary()
+  return return_code
+
+
+def _UpdateAndCheckArchives(archive_data_file, wpr_archive_info,
+                            filtered_stories):
+  """Verifies that all stories are local or have WPR archives.
+
+  Logs warnings and returns False if any are missing.
+  """
+  # Report any problems with the entire story set.
+  if any(not story.is_local for story in filtered_stories):
+    if not archive_data_file:
+      logging.error('The story set is missing an "archive_data_file" '
+                    'property.\nTo run from live sites pass the flag '
+                    '--use-live-sites.\nTo create an archive file add an '
+                    'archive_data_file property to the story set and then '
+                    'run record_wpr.')
+      raise ArchiveError('No archive data file.')
+    if not wpr_archive_info:
+      logging.error('The archive info file is missing.\n'
+                    'To fix this, either add svn-internal to your '
+                    '.gclient using http://goto/read-src-internal, '
+                    'or create a new archive using record_wpr.')
+      raise ArchiveError('No archive info file.')
+    wpr_archive_info.DownloadArchivesIfNeeded()
+
+  # Report any problems with individual story.
+  stories_missing_archive_path = []
+  stories_missing_archive_data = []
+  for story in filtered_stories:
+    if not story.is_local:
+      archive_path = wpr_archive_info.WprFilePathForStory(story)
+      if not archive_path:
+        stories_missing_archive_path.append(story)
+      elif not os.path.isfile(archive_path):
+        stories_missing_archive_data.append(story)
+  if stories_missing_archive_path:
+    logging.error(
+        'The story set archives for some stories do not exist.\n'
+        'To fix this, record those stories using record_wpr.\n'
+        'To ignore this warning and run against live sites, '
+        'pass the flag --use-live-sites.')
+    logging.error(
+        'stories without archives: %s',
+        ', '.join(story.display_name
+                  for story in stories_missing_archive_path))
+  if stories_missing_archive_data:
+    logging.error(
+        'The story set archives for some stories are missing.\n'
+        'Someone forgot to check them in, uploaded them to the '
+        'wrong cloud storage bucket, or they were deleted.\n'
+        'To fix this, record those stories using record_wpr.\n'
+        'To ignore this warning and run against live sites, '
+        'pass the flag --use-live-sites.')
+    logging.error(
+        'stories missing archives: %s',
+        ', '.join(story.display_name
+                  for story in stories_missing_archive_data))
+  if stories_missing_archive_path or stories_missing_archive_data:
+    raise ArchiveError('Archive file is missing stories.')
+  # Only run valid stories if no problems with the story set or
+  # individual stories.
+  return True
+
+
+def _WaitForThermalThrottlingIfNeeded(platform):
+  if not platform.CanMonitorThermalThrottling():
+    return
+  thermal_throttling_retry = 0
+  while (platform.IsThermallyThrottled() and
+         thermal_throttling_retry < 3):
+    logging.warning('Thermally throttled, waiting (%d)...',
+                    thermal_throttling_retry)
+    thermal_throttling_retry += 1
+    time.sleep(thermal_throttling_retry * 2)
+
+  if thermal_throttling_retry and platform.IsThermallyThrottled():
+    logging.warning('Device is thermally throttled before running '
+                    'performance tests, results will vary.')
+
+
+def _CheckThermalThrottling(platform):
+  if not platform.CanMonitorThermalThrottling():
+    return
+  if platform.HasBeenThermallyThrottled():
+    logging.warning('Device has been thermally throttled during '
+                    'performance tests, results will vary.')
diff --git a/catapult/telemetry/telemetry/internal/story_runner_unittest.py b/catapult/telemetry/telemetry/internal/story_runner_unittest.py
new file mode 100644
index 0000000..a6cd6cf
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/story_runner_unittest.py
@@ -0,0 +1,724 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import StringIO
+import sys
+import unittest
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry import benchmark
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
+from telemetry.internal.util import exception_formatter as ex_formatter_module
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry import story as story_module
+from telemetry.testing import options_for_unittests
+from telemetry.testing import system_stub
+import mock
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+from telemetry.value import summary as summary_module
+from telemetry.web_perf import story_test
+from telemetry.web_perf import timeline_based_measurement
+from telemetry.wpr import archive_info
+
+# This linter complains if we define classes nested inside functions.
+# pylint: disable=bad-super-call
+
+
+class FakePlatform(object):
+  def CanMonitorThermalThrottling(self):
+    return False
+
+
+class TestSharedState(story_module.SharedState):
+
+  _platform = FakePlatform()
+
+  @classmethod
+  def SetTestPlatform(cls, platform):
+    cls._platform = platform
+
+  def __init__(self, test, options, story_set):
+    super(TestSharedState, self).__init__(
+        test, options, story_set)
+    self._test = test
+    self._current_story = None
+
+  @property
+  def platform(self):
+    return self._platform
+
+  def WillRunStory(self, story):
+    self._current_story = story
+
+  def CanRunStory(self, story):
+    return True
+
+  def RunStory(self, results):
+    raise NotImplementedError
+
+  def DidRunStory(self, results):
+    pass
+
+  def TearDownState(self):
+    pass
+
+
+class TestSharedPageState(TestSharedState):
+  def RunStory(self, results):
+    self._test.RunPage(self._current_story, None, results)
+
+
+class FooStoryState(TestSharedPageState):
+  pass
+
+
+class BarStoryState(TestSharedPageState):
+  pass
+
+
+class DummyTest(page_test.PageTest):
+  def RunPage(self, *_):
+    pass
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    pass
+
+
+class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
+  def __init__(self):
+    super(EmptyMetadataForTest, self).__init__('')
+
+
+class DummyLocalStory(story_module.Story):
+  def __init__(self, shared_state_class, name=''):
+    super(DummyLocalStory, self).__init__(
+        shared_state_class, name=name)
+
+  def Run(self, shared_state):
+    pass
+
+  @property
+  def is_local(self):
+    return True
+
+class MixedStateStorySet(story_module.StorySet):
+  @property
+  def allow_mixed_story_states(self):
+    return True
+
+def SetupStorySet(allow_multiple_story_states, story_state_list):
+  if allow_multiple_story_states:
+    story_set = MixedStateStorySet()
+  else:
+    story_set = story_module.StorySet()
+  for story_state in story_state_list:
+    story_set.AddStory(DummyLocalStory(story_state))
+  return story_set
+
+def _GetOptionForUnittest():
+  options = options_for_unittests.GetCopy()
+  options.output_formats = ['none']
+  options.suppress_gtest_report = False
+  parser = options.CreateParser()
+  story_runner.AddCommandLineArgs(parser)
+  options.MergeDefaultValues(parser.get_default_values())
+  story_runner.ProcessCommandLineArgs(parser, options)
+  return options
+
+
+class FakeExceptionFormatterModule(object):
+  @staticmethod
+  def PrintFormattedException(
+      exception_class=None, exception=None, tb=None, msg=None):
+    pass
+
+
+def GetNumberOfSuccessfulPageRuns(results):
+  return len([run for run in results.all_page_runs if run.ok or run.skipped])
+
+
+class TestOnlyException(Exception):
+  pass
+
+
+class StoryRunnerTest(unittest.TestCase):
+
+  def setUp(self):
+    self.fake_stdout = StringIO.StringIO()
+    self.actual_stdout = sys.stdout
+    sys.stdout = self.fake_stdout
+    self.options = _GetOptionForUnittest()
+    self.results = results_options.CreateResults(
+        EmptyMetadataForTest(), self.options)
+    self._story_runner_logging_stub = None
+
+  def SuppressExceptionFormatting(self):
+    """Fake out exception formatter to avoid spamming the unittest stdout."""
+    story_runner.exception_formatter = FakeExceptionFormatterModule
+    self._story_runner_logging_stub = system_stub.Override(
+      story_runner, ['logging'])
+
+  def RestoreExceptionFormatter(self):
+    story_runner.exception_formatter = ex_formatter_module
+    if self._story_runner_logging_stub:
+      self._story_runner_logging_stub.Restore()
+      self._story_runner_logging_stub = None
+
+  def tearDown(self):
+    sys.stdout = self.actual_stdout
+    self.RestoreExceptionFormatter()
+
+  def testStoriesGroupedByStateClass(self):
+    foo_states = [FooStoryState, FooStoryState, FooStoryState,
+                  FooStoryState, FooStoryState]
+    mixed_states = [FooStoryState, FooStoryState, FooStoryState,
+                    BarStoryState, FooStoryState]
+    # StorySet's are only allowed to have one SharedState.
+    story_set = SetupStorySet(False, foo_states)
+    story_groups = (
+        story_runner.StoriesGroupedByStateClass(
+            story_set, False))
+    self.assertEqual(len(story_groups), 1)
+    story_set = SetupStorySet(False, mixed_states)
+    self.assertRaises(
+        ValueError,
+        story_runner.StoriesGroupedByStateClass,
+        story_set, False)
+    # BaseStorySets are allowed to have multiple SharedStates.
+    mixed_story_set = SetupStorySet(True, mixed_states)
+    story_groups = (
+        story_runner.StoriesGroupedByStateClass(
+            mixed_story_set, True))
+    self.assertEqual(len(story_groups), 3)
+    self.assertEqual(story_groups[0].shared_state_class,
+                     FooStoryState)
+    self.assertEqual(story_groups[1].shared_state_class,
+                     BarStoryState)
+    self.assertEqual(story_groups[2].shared_state_class,
+                     FooStoryState)
+
+  def RunStoryTest(self, s, expected_successes):
+    test = DummyTest()
+    story_runner.Run(
+        test, s, self.options, self.results)
+    self.assertEquals(0, len(self.results.failures))
+    self.assertEquals(expected_successes,
+                      GetNumberOfSuccessfulPageRuns(self.results))
+
+  def testStoryTest(self):
+    all_foo = [FooStoryState, FooStoryState, FooStoryState]
+    one_bar = [FooStoryState, FooStoryState, BarStoryState]
+    story_set = SetupStorySet(True, one_bar)
+    self.RunStoryTest(story_set, 3)
+    story_set = SetupStorySet(True, all_foo)
+    self.RunStoryTest(story_set, 6)
+    story_set = SetupStorySet(False, all_foo)
+    self.RunStoryTest(story_set, 9)
+    story_set = SetupStorySet(False, one_bar)
+    test = DummyTest()
+    self.assertRaises(ValueError, story_runner.Run, test, story_set,
+                      self.options, self.results)
+
+  def testSuccessfulTimelineBasedMeasurementTest(self):
+    """Check that PageTest is not required for story_runner.Run.
+
+    Any PageTest related calls or attributes need to only be called
+    for PageTest tests.
+    """
+    class TestSharedTbmState(TestSharedState):
+      def RunStory(self, results):
+        pass
+
+    TEST_WILL_RUN_STORY = 'test.WillRunStory'
+    TEST_MEASURE = 'test.Measure'
+    TEST_DID_RUN_STORY = 'test.DidRunStory'
+
+    EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
+                               TEST_MEASURE,
+                               TEST_DID_RUN_STORY]
+
+    test = timeline_based_measurement.TimelineBasedMeasurement(
+        timeline_based_measurement.Options())
+
+    manager = mock.MagicMock()
+    test.WillRunStory = mock.MagicMock()
+    test.Measure = mock.MagicMock()
+    test.DidRunStory = mock.MagicMock()
+    manager.attach_mock(test.WillRunStory, TEST_WILL_RUN_STORY)
+    manager.attach_mock(test.Measure, TEST_MEASURE)
+    manager.attach_mock(test.DidRunStory, TEST_DID_RUN_STORY)
+
+    story_set = story_module.StorySet()
+    story_set.AddStory(DummyLocalStory(TestSharedTbmState))
+    story_set.AddStory(DummyLocalStory(TestSharedTbmState))
+    story_set.AddStory(DummyLocalStory(TestSharedTbmState))
+    story_runner.Run(
+        test, story_set, self.options, self.results)
+    self.assertEquals(0, len(self.results.failures))
+    self.assertEquals(3, GetNumberOfSuccessfulPageRuns(self.results))
+
+    self.assertEquals(3*EXPECTED_CALLS_IN_ORDER,
+                      [call[0] for call in manager.mock_calls])
+
+  def testCallOrderBetweenStoryTestAndSharedState(self):
+    """Check that the call order between StoryTest and SharedState is correct.
+    """
+    TEST_WILL_RUN_STORY = 'test.WillRunStory'
+    TEST_MEASURE = 'test.Measure'
+    TEST_DID_RUN_STORY = 'test.DidRunStory'
+    STATE_WILL_RUN_STORY = 'state.WillRunStory'
+    STATE_RUN_STORY = 'state.RunStory'
+    STATE_DID_RUN_STORY = 'state.DidRunStory'
+
+    EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
+                               STATE_WILL_RUN_STORY,
+                               STATE_RUN_STORY,
+                               TEST_MEASURE,
+                               STATE_DID_RUN_STORY,
+                               TEST_DID_RUN_STORY]
+
+    class TestStoryTest(story_test.StoryTest):
+      def WillRunStory(self, platform):
+        pass
+
+      def Measure(self, platform, results):
+        pass
+
+      def DidRunStory(self, platform):
+        pass
+
+    class TestSharedStateForStoryTest(TestSharedState):
+      def RunStory(self, results):
+        pass
+
+    @mock.patch.object(TestStoryTest, 'WillRunStory')
+    @mock.patch.object(TestStoryTest, 'Measure')
+    @mock.patch.object(TestStoryTest, 'DidRunStory')
+    @mock.patch.object(TestSharedStateForStoryTest, 'WillRunStory')
+    @mock.patch.object(TestSharedStateForStoryTest, 'RunStory')
+    @mock.patch.object(TestSharedStateForStoryTest, 'DidRunStory')
+    def GetCallsInOrder(state_DidRunStory, state_RunStory, state_WillRunStory,
+                        test_DidRunStory, test_Measure, test_WillRunStory):
+      manager = mock.MagicMock()
+      manager.attach_mock(test_WillRunStory, TEST_WILL_RUN_STORY)
+      manager.attach_mock(test_Measure, TEST_MEASURE)
+      manager.attach_mock(test_DidRunStory, TEST_DID_RUN_STORY)
+      manager.attach_mock(state_WillRunStory, STATE_WILL_RUN_STORY)
+      manager.attach_mock(state_RunStory, STATE_RUN_STORY)
+      manager.attach_mock(state_DidRunStory, STATE_DID_RUN_STORY)
+
+      test = TestStoryTest()
+      story_set = story_module.StorySet()
+      story_set.AddStory(DummyLocalStory(TestSharedStateForStoryTest))
+      story_runner.Run(test, story_set, self.options, self.results)
+      return [call[0] for call in manager.mock_calls]
+
+    calls_in_order = GetCallsInOrder() # pylint: disable=no-value-for-parameter
+    self.assertEquals(EXPECTED_CALLS_IN_ORDER, calls_in_order)
+
+  def testTearDownStateAfterEachStoryRun(self):
+    class TestSharedStateForTearDown(TestSharedState):
+      num_of_tear_downs = 0
+
+      def RunStory(self, results):
+        pass
+
+      def TearDownState(self):
+        TestSharedStateForTearDown.num_of_tear_downs += 1
+
+    story_set = story_module.StorySet()
+    story_set.AddStory(DummyLocalStory(TestSharedStateForTearDown))
+    story_set.AddStory(DummyLocalStory(TestSharedStateForTearDown))
+    story_set.AddStory(DummyLocalStory(TestSharedStateForTearDown))
+
+    TestSharedStateForTearDown.num_of_tear_downs = 0
+    story_runner.Run(mock.MagicMock(), story_set, self.options, self.results)
+    self.assertEquals(TestSharedStateForTearDown.num_of_tear_downs, 1)
+
+    TestSharedStateForTearDown.num_of_tear_downs = 0
+    story_runner.Run(mock.MagicMock(), story_set, self.options, self.results,
+                     should_tear_down_state_after_each_story_run=True)
+    self.assertEquals(TestSharedStateForTearDown.num_of_tear_downs, 3)
+
+  def testTearDownIsCalledOnceForEachStoryGroupWithPageSetRepeat(self):
+    self.options.pageset_repeat = 3
+    fooz_init_call_counter = [0]
+    fooz_tear_down_call_counter = [0]
+    barz_init_call_counter = [0]
+    barz_tear_down_call_counter = [0]
+    class FoozStoryState(FooStoryState):
+      def __init__(self, test, options, storyz):
+        super(FoozStoryState, self).__init__(
+          test, options, storyz)
+        fooz_init_call_counter[0] += 1
+      def TearDownState(self):
+        fooz_tear_down_call_counter[0] += 1
+
+    class BarzStoryState(BarStoryState):
+      def __init__(self, test, options, storyz):
+        super(BarzStoryState, self).__init__(
+          test, options, storyz)
+        barz_init_call_counter[0] += 1
+      def TearDownState(self):
+        barz_tear_down_call_counter[0] += 1
+    def AssertAndCleanUpFoo():
+      self.assertEquals(1, fooz_init_call_counter[0])
+      self.assertEquals(1, fooz_tear_down_call_counter[0])
+      fooz_init_call_counter[0] = 0
+      fooz_tear_down_call_counter[0] = 0
+
+    story_set1_list = [FoozStoryState, FoozStoryState, FoozStoryState,
+                       BarzStoryState, BarzStoryState]
+    story_set1 = SetupStorySet(True, story_set1_list)
+    self.RunStoryTest(story_set1, 15)
+    AssertAndCleanUpFoo()
+    self.assertEquals(1, barz_init_call_counter[0])
+    self.assertEquals(1, barz_tear_down_call_counter[0])
+    barz_init_call_counter[0] = 0
+    barz_tear_down_call_counter[0] = 0
+
+    story_set2_list = [FoozStoryState, FoozStoryState, FoozStoryState,
+                       FoozStoryState]
+    story_set2 = SetupStorySet(False, story_set2_list)
+    self.RunStoryTest(story_set2, 27)
+    AssertAndCleanUpFoo()
+    self.assertEquals(0, barz_init_call_counter[0])
+    self.assertEquals(0, barz_tear_down_call_counter[0])
+
+  def testAppCrashExceptionCausesFailureValue(self):
+    self.SuppressExceptionFormatting()
+    story_set = story_module.StorySet()
+    class SharedStoryThatCausesAppCrash(TestSharedPageState):
+      def WillRunStory(self, story):
+        raise exceptions.AppCrashException(msg='App Foo crashes')
+
+    story_set.AddStory(DummyLocalStory(
+          SharedStoryThatCausesAppCrash))
+    story_runner.Run(
+        DummyTest(), story_set, self.options, self.results)
+    self.assertEquals(1, len(self.results.failures))
+    self.assertEquals(0, GetNumberOfSuccessfulPageRuns(self.results))
+    self.assertIn('App Foo crashes', self.fake_stdout.getvalue())
+
+  def testExceptionRaisedInSharedStateTearDown(self):
+    self.SuppressExceptionFormatting()
+    story_set = story_module.StorySet()
+    class SharedStoryThatCausesAppCrash(TestSharedPageState):
+      def TearDownState(self):
+        raise TestOnlyException()
+
+    story_set.AddStory(DummyLocalStory(
+          SharedStoryThatCausesAppCrash))
+    with self.assertRaises(TestOnlyException):
+      story_runner.Run(
+          DummyTest(), story_set, self.options, self.results)
+
+  def testUnknownExceptionIsFatal(self):
+    self.SuppressExceptionFormatting()
+    story_set = story_module.StorySet()
+
+    class UnknownException(Exception):
+      pass
+
+    # This erroneous test is set up to raise exception for the 2nd story
+    # run.
+    class Test(page_test.PageTest):
+      def __init__(self, *args):
+        super(Test, self).__init__(*args)
+        self.run_count = 0
+
+      def RunPage(self, *_):
+        old_run_count = self.run_count
+        self.run_count += 1
+        if old_run_count == 1:
+          raise UnknownException('FooBarzException')
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    s1 = DummyLocalStory(TestSharedPageState)
+    s2 = DummyLocalStory(TestSharedPageState)
+    story_set.AddStory(s1)
+    story_set.AddStory(s2)
+    test = Test()
+    with self.assertRaises(UnknownException):
+      story_runner.Run(
+          test, story_set, self.options, self.results)
+    self.assertEqual(set([s2]), self.results.pages_that_failed)
+    self.assertEqual(set([s1]), self.results.pages_that_succeeded)
+    self.assertIn('FooBarzException', self.fake_stdout.getvalue())
+
+  def testRaiseBrowserGoneExceptionFromRunPage(self):
+    self.SuppressExceptionFormatting()
+    story_set = story_module.StorySet()
+
+    class Test(page_test.PageTest):
+      def __init__(self, *args):
+        super(Test, self).__init__(*args)
+        self.run_count = 0
+
+      def RunPage(self, *_):
+        old_run_count = self.run_count
+        self.run_count += 1
+        if old_run_count == 0:
+          raise exceptions.BrowserGoneException(
+              None, 'i am a browser crash message')
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    story_set.AddStory(DummyLocalStory(TestSharedPageState))
+    story_set.AddStory(DummyLocalStory(TestSharedPageState))
+    test = Test()
+    story_runner.Run(
+        test, story_set, self.options, self.results)
+    self.assertEquals(2, test.run_count)
+    self.assertEquals(1, len(self.results.failures))
+    self.assertEquals(1, GetNumberOfSuccessfulPageRuns(self.results))
+
+  def testAppCrashThenRaiseInTearDownFatal(self):
+    self.SuppressExceptionFormatting()
+    story_set = story_module.StorySet()
+
+    unit_test_events = []  # track what was called when
+    class DidRunTestError(Exception):
+      pass
+
+    class TestTearDownSharedState(TestSharedPageState):
+      def TearDownState(self):
+        unit_test_events.append('tear-down-state')
+        raise DidRunTestError
+
+
+    class Test(page_test.PageTest):
+      def __init__(self, *args):
+        super(Test, self).__init__(*args)
+        self.run_count = 0
+
+      def RunPage(self, *_):
+        old_run_count = self.run_count
+        self.run_count += 1
+        if old_run_count == 0:
+          unit_test_events.append('app-crash')
+          raise exceptions.AppCrashException
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    story_set.AddStory(DummyLocalStory(TestTearDownSharedState))
+    story_set.AddStory(DummyLocalStory(TestTearDownSharedState))
+    test = Test()
+
+    with self.assertRaises(DidRunTestError):
+      story_runner.Run(
+          test, story_set, self.options, self.results)
+    self.assertEqual(['app-crash', 'tear-down-state'], unit_test_events)
+    # The AppCrashException gets added as a failure.
+    self.assertEquals(1, len(self.results.failures))
+
+  def testPagesetRepeat(self):
+    story_set = story_module.StorySet()
+
+    # TODO(eakuefner): Factor this out after flattening page ref in Value
+    blank_story = DummyLocalStory(TestSharedPageState, name='blank')
+    green_story = DummyLocalStory(TestSharedPageState, name='green')
+    story_set.AddStory(blank_story)
+    story_set.AddStory(green_story)
+
+    class Measurement(page_test.PageTest):
+      i = 0
+      def RunPage(self, page, _, results):
+        self.i += 1
+        results.AddValue(scalar.ScalarValue(
+            page, 'metric', 'unit', self.i,
+            improvement_direction=improvement_direction.UP))
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    self.options.page_repeat = 1
+    self.options.pageset_repeat = 2
+    self.options.output_formats = []
+    results = results_options.CreateResults(
+      EmptyMetadataForTest(), self.options)
+    story_runner.Run(
+        Measurement(), story_set, self.options, results)
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    blank_value = list_of_scalar_values.ListOfScalarValues(
+        blank_story, 'metric', 'unit', [1, 3],
+        improvement_direction=improvement_direction.UP)
+    green_value = list_of_scalar_values.ListOfScalarValues(
+        green_story, 'metric', 'unit', [2, 4],
+        improvement_direction=improvement_direction.UP)
+    merged_value = list_of_scalar_values.ListOfScalarValues(
+        None, 'metric', 'unit', [1, 2, 3, 4],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(4, GetNumberOfSuccessfulPageRuns(results))
+    self.assertEquals(0, len(results.failures))
+    self.assertEquals(3, len(values))
+    self.assertIn(blank_value, values)
+    self.assertIn(green_value, values)
+    self.assertIn(merged_value, values)
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUpdateAndCheckArchives(self):
+    usr_stub = system_stub.Override(story_runner, ['cloud_storage'])
+    wpr_stub = system_stub.Override(archive_info, ['cloud_storage'])
+    archive_data_dir = os.path.join(
+        util.GetTelemetryDir(),
+        'telemetry', 'internal', 'testing', 'archive_files')
+    try:
+      story_set = story_module.StorySet()
+      story_set.AddStory(page_module.Page(
+          'http://www.testurl.com', story_set, story_set.base_dir))
+      # Page set missing archive_data_file.
+      self.assertRaises(
+          story_runner.ArchiveError,
+          story_runner._UpdateAndCheckArchives,
+          story_set.archive_data_file,
+          story_set.wpr_archive_info,
+          story_set.stories)
+
+      story_set = story_module.StorySet(
+          archive_data_file='missing_archive_data_file.json')
+      story_set.AddStory(page_module.Page(
+          'http://www.testurl.com', story_set, story_set.base_dir))
+      # Page set missing json file specified in archive_data_file.
+      self.assertRaises(
+          story_runner.ArchiveError,
+          story_runner._UpdateAndCheckArchives,
+          story_set.archive_data_file,
+          story_set.wpr_archive_info,
+          story_set.stories)
+
+      story_set = story_module.StorySet(
+          archive_data_file=os.path.join(archive_data_dir, 'test.json'),
+          cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
+      story_set.AddStory(page_module.Page(
+          'http://www.testurl.com', story_set, story_set.base_dir))
+      # Page set with valid archive_data_file.
+      self.assertTrue(story_runner._UpdateAndCheckArchives(
+            story_set.archive_data_file, story_set.wpr_archive_info,
+            story_set.stories))
+      story_set.AddStory(page_module.Page(
+          'http://www.google.com', story_set, story_set.base_dir))
+      # Page set with an archive_data_file which exists but is missing a page.
+      self.assertRaises(
+          story_runner.ArchiveError,
+          story_runner._UpdateAndCheckArchives,
+          story_set.archive_data_file,
+          story_set.wpr_archive_info,
+          story_set.stories)
+
+      story_set = story_module.StorySet(
+          archive_data_file=
+              os.path.join(archive_data_dir, 'test_missing_wpr_file.json'),
+          cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
+      story_set.AddStory(page_module.Page(
+          'http://www.testurl.com', story_set, story_set.base_dir))
+      story_set.AddStory(page_module.Page(
+          'http://www.google.com', story_set, story_set.base_dir))
+      # Page set with an archive_data_file which exists and contains all pages
+      # but fails to find a wpr file.
+      self.assertRaises(
+          story_runner.ArchiveError,
+          story_runner._UpdateAndCheckArchives,
+          story_set.archive_data_file,
+          story_set.wpr_archive_info,
+          story_set.stories)
+    finally:
+      usr_stub.Restore()
+      wpr_stub.Restore()
+
+
+  def _testMaxFailuresOptionIsRespectedAndOverridable(
+      self, num_failing_stories, runner_max_failures, options_max_failures,
+      expected_num_failures):
+    class SimpleSharedState(story_module.SharedState):
+      _fake_platform = FakePlatform()
+      _current_story = None
+
+      @property
+      def platform(self):
+        return self._fake_platform
+
+      def WillRunStory(self, story):
+        self._current_story = story
+
+      def RunStory(self, results):
+        self._current_story.Run(self)
+
+      def DidRunStory(self, results):
+        pass
+
+      def CanRunStory(self, story):
+        return True
+
+      def TearDownState(self):
+        pass
+
+    class FailingStory(story_module.Story):
+      def __init__(self):
+        super(FailingStory, self).__init__(
+            shared_state_class=SimpleSharedState,
+            is_local=True)
+        self.was_run = False
+
+      def Run(self, shared_state):
+        self.was_run = True
+        raise page_test.Failure
+
+    self.SuppressExceptionFormatting()
+
+    story_set = story_module.StorySet()
+    for _ in range(num_failing_stories):
+      story_set.AddStory(FailingStory())
+
+    options = _GetOptionForUnittest()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    if options_max_failures:
+      options.max_failures = options_max_failures
+
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(
+        DummyTest(), story_set, options,
+        results, max_failures=runner_max_failures)
+    self.assertEquals(0, GetNumberOfSuccessfulPageRuns(results))
+    self.assertEquals(expected_num_failures, len(results.failures))
+    for ii, story in enumerate(story_set.stories):
+      self.assertEqual(story.was_run, ii < expected_num_failures)
+
+  def testMaxFailuresNotSpecified(self):
+    self._testMaxFailuresOptionIsRespectedAndOverridable(
+        num_failing_stories=5, runner_max_failures=None,
+        options_max_failures=None, expected_num_failures=5)
+
+  def testMaxFailuresSpecifiedToRun(self):
+    # Runs up to max_failures+1 failing tests before stopping, since
+    # every tests after max_failures failures have been encountered
+    # may all be passing.
+    self._testMaxFailuresOptionIsRespectedAndOverridable(
+        num_failing_stories=5, runner_max_failures=3,
+        options_max_failures=None, expected_num_failures=4)
+
+  def testMaxFailuresOption(self):
+    # Runs up to max_failures+1 failing tests before stopping, since
+    # every tests after max_failures failures have been encountered
+    # may all be passing.
+    self._testMaxFailuresOptionIsRespectedAndOverridable(
+        num_failing_stories=5, runner_max_failures=3,
+        options_max_failures=1, expected_num_failures=2)
diff --git a/catapult/telemetry/telemetry/internal/testing/.gitignore b/catapult/telemetry/telemetry/internal/testing/.gitignore
new file mode 100644
index 0000000..4842ffe
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/.gitignore
@@ -0,0 +1 @@
+internal
\ No newline at end of file
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/telemetry/internal/testing/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/telemetry/internal/testing/__init__.py
diff --git a/catapult/telemetry/telemetry/internal/testing/animated_page.html b/catapult/telemetry/telemetry/internal/testing/animated_page.html
new file mode 100644
index 0000000..3eb9502
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/animated_page.html
@@ -0,0 +1,21 @@
+<!doctype html>
+<html>
+  <head>
+    <style type="text/css">
+    @-webkit-keyframes rotating {
+      from{
+        -webkit-transform: rotate(0deg);
+      }
+      to{
+        -webkit-transform: rotate(360deg);
+      }
+    }
+    .rotating {
+      -webkit-animation: rotating 2s linear infinite;
+    }
+    </style>
+  </head>
+  <body>
+    <img src="image.png" class="rotating">
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/arch-lsb-release b/catapult/telemetry/telemetry/internal/testing/arch-lsb-release
new file mode 100644
index 0000000..1e02092
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/arch-lsb-release
@@ -0,0 +1,4 @@
+LSB_VERSION=1.4
+DISTRIB_ID=Arch
+DISTRIB_RELEASE=rolling
+DISTRIB_DESCRIPTION="Arch Linux"
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test.json b/catapult/telemetry/telemetry/internal/testing/archive_files/test.json
new file mode 100644
index 0000000..c007ad7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test.json
@@ -0,0 +1,8 @@
+{
+    "description": "Describes the Web Page Replay archives for a page set. Don't edit by hand! Use record_wpr for updating.",
+    "archives": {
+        "test_000.wpr": [
+            "http://www.testurl.com"
+        ]
+    }
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr b/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr
new file mode 100644
index 0000000..32d459f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr
@@ -0,0 +1 @@
+File needed for story_runner_unittest testCheckArchives.
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr.sha1 b/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr.sha1
new file mode 100644
index 0000000..e27e86d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_000.wpr.sha1
@@ -0,0 +1 @@
+6bd6f4b08b44abc206f68471c0444df156d0fd1a
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_missing_wpr_file.json b/catapult/telemetry/telemetry/internal/testing/archive_files/test_missing_wpr_file.json
new file mode 100644
index 0000000..0f4673d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_missing_wpr_file.json
@@ -0,0 +1,11 @@
+{
+    "description": "Describes the Web Page Replay archives for a page set. Don't edit by hand! Use record_wpr for updating.",
+    "archives": {
+        "test_000.wpr": [
+            "http://www.testurl.com"
+        ],
+        "test_missing_wpr_file.wpr": [
+            "http://www.google.com"
+        ]
+    }
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_page_set.py b/catapult/telemetry/telemetry/internal/testing/archive_files/test_page_set.py
new file mode 100644
index 0000000..ec53709
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_page_set.py
@@ -0,0 +1,36 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import story
+from telemetry.page import page
+from telemetry.internal.testing.pages.external_page import ExternalPage
+
+
+class InternalPage(page.Page):
+  def __init__(self, story_set):
+    super(InternalPage, self).__init__('file://bar.html', story=story_set)
+
+class TestPageSet(story.StorySet):
+  """A pageset for testing purpose"""
+
+  def __init__(self):
+    super(TestPageSet, self).__init__(
+      archive_data_file='data/archive_files/test.json',
+      credentials_path='data/credential',
+      user_agent_type='desktop',
+      bucket=story.PUBLIC_BUCKET)
+
+    #top google property; a google tab is often open
+    class Google(page.Page):
+      def __init__(self, story_set):
+        # pylint: disable=bad-super-call
+        super(Google, self).__init__('https://www.google.com',
+                                     page_set=story_set)
+
+      def RunGetActionRunner(self, action_runner):
+        return action_runner
+
+    self.AddStory(Google(self))
+    self.AddStory(InternalPage(self))
+    self.AddStory(ExternalPage(self))
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_one_page_set.py b/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_one_page_set.py
new file mode 100644
index 0000000..4fb1198
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_one_page_set.py
@@ -0,0 +1,12 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import story
+
+
+class TestSimpleOnePageSet(story.StorySet):
+  def __init__(self):
+    super(TestSimpleOnePageSet, self).__init__(
+      archive_data_file='data/archive_files/test.json',
+      credentials_path='data/credential')
diff --git a/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_two_page_set.py b/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_two_page_set.py
new file mode 100644
index 0000000..c17c5f3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/archive_files/test_simple_two_page_set.py
@@ -0,0 +1,12 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import story
+
+
+class TestSimpleTwoPageSet(story.StorySet):
+  def __init__(self):
+    super(TestSimpleTwoPageSet, self).__init__(
+      archive_data_file='data/archive_files/test.json',
+      credentials_path='data/credential')
diff --git a/catapult/telemetry/telemetry/internal/testing/autotest_ext/background.js b/catapult/telemetry/telemetry/internal/testing/autotest_ext/background.js
new file mode 100644
index 0000000..1513ac0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/autotest_ext/background.js
@@ -0,0 +1,4 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
diff --git a/catapult/telemetry/telemetry/internal/testing/autotest_ext/manifest.json b/catapult/telemetry/telemetry/internal/testing/autotest_ext/manifest.json
new file mode 100644
index 0000000..1eb454e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/autotest_ext/manifest.json
@@ -0,0 +1,13 @@
+{
+  "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+  "description": "Telemetry ChromeOS Autotest component extension",
+  "name": "Telemetry ChromeOS AutoTest Component Extension",
+  "background": {
+    "scripts": ["background.js"]
+  },
+  "manifest_version": 2,
+  "version": "0.1",
+  "permissions" : [
+    "autotestPrivate"
+  ]
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/bear.webm b/catapult/telemetry/telemetry/internal/testing/bear.webm
new file mode 100644
index 0000000..a1b4150
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/bear.webm
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/blank.html b/catapult/telemetry/telemetry/internal/testing/blank.html
new file mode 100644
index 0000000..8d0ce09
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/blank.html
@@ -0,0 +1,8 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+</head>
+<body>
+Hello world
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/blink_style.html b/catapult/telemetry/telemetry/internal/testing/blink_style.html
new file mode 100644
index 0000000..213020c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/blink_style.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html>
+<body>
+Test Page for Blink Style measurement.
+<script>
+var css = '';
+for (var i = 0; i < 1000; i++) {
+  css += 'div { background: green }\n';
+}
+var style = document.createElement('style');
+style.textContent = css;
+document.head.appendChild(style);
+for (var i = 0; i < 1000; i++) {
+  document.body.appendChild(document.createElement('div'));
+}
+</script>
diff --git a/catapult/telemetry/telemetry/internal/testing/cast.html b/catapult/telemetry/telemetry/internal/testing/cast.html
new file mode 100644
index 0000000..80314a3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/cast.html
@@ -0,0 +1,23 @@
+<!DOCTYPE HTML>
+<html xmlns="http://www.w3.org/1999/xhtml">
+  <head>
+    <title>Media Router Dialog Test</title>
+    <script type="text/javascript">
+      'use strict';
+      var startSessionPromise = null;
+      var presentationUrl = 'http://www.google.com/#__testprovider__=true';
+      var startSessionRequest = new PresentationRequest(presentationUrl);
+      window.navigator.presentation.defaultRequest = startSessionRequest;
+
+      function startSession() {
+        startSessionPromise = startSessionRequest.start();
+        console.log('start session');
+      }
+    </script>
+  </head>
+  <body>
+    <button id="start_session_button" onclick="startSession()">
+      Start session
+    </button>
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/component_extension/background.js b/catapult/telemetry/telemetry/internal/testing/component_extension/background.js
new file mode 100644
index 0000000..303ef9f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/component_extension/background.js
@@ -0,0 +1,8 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var _testVar;
+function setTestVar(x) {
+  _testVar = x;
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/component_extension/manifest.json b/catapult/telemetry/telemetry/internal/testing/component_extension/manifest.json
new file mode 100644
index 0000000..da1b970
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/component_extension/manifest.json
@@ -0,0 +1,10 @@
+{
+  "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
+  "description": "Simple test component extension which has just a background script",
+  "name": "Simple Telemetry Test Component Extension",
+  "background": {
+    "scripts": ["background.js"]
+  },
+  "manifest_version": 2,
+  "version": "0.1"
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/create_many_objects.html b/catapult/telemetry/telemetry/internal/testing/create_many_objects.html
new file mode 100644
index 0000000..4893563
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/create_many_objects.html
@@ -0,0 +1,20 @@
+<!DOCTYPE html>
+<script>
+var maxObjects = 100000;
+
+var TestRunner = function() {
+  this.isDone = false;
+};
+
+var testRunner = null;
+window.onload = function () {
+  testRunner = new TestRunner();
+
+  // Create a lot of objects can trigger a Blink GC.
+  for (var i = 0; i < maxObjects; i++) {
+    new TextDecoder();
+  }
+
+  testRunner.isDone = true;
+}
+</script>
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/__init__.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/dog_object.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/dog_object.py
new file mode 100644
index 0000000..dfba650
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/dog/dog/dog_object.py
@@ -0,0 +1,15 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+sys.path.append(os.path.join(
+  os.path.dirname(__file__), '..', '..', 'other_animals', 'cat'))
+
+from cat import cat_object  # pylint: disable=import-error
+
+class Dog(object):
+  def CreateEnemy(self):
+    return cat_object.Cat()
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/__init__.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/cat_object.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/cat_object.py
new file mode 100644
index 0000000..41be7ba
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/cat_object.py
@@ -0,0 +1,7 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class Cat(object):
+  def Run(self):
+    print 'Meow'
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/__init__.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/__init__.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/horn_object.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/horn_object.py
new file mode 100644
index 0000000..1a883ef
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/horn_object.py
@@ -0,0 +1,7 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class Horn(object):
+  def IsBig(self):
+    return True
diff --git a/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/moose_object.py b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/moose_object.py
new file mode 100644
index 0000000..1e87ea4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/moose_object.py
@@ -0,0 +1,13 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from horn import horn_object  # pylint: disable=relative-import
+
+class Moose(object):
+  def __init__(self):
+    self._horn = horn_object.Horn()
+
+  def Run(self):
+    if self._horn.IsBig():
+      print 'I need to drop my horn! It is big!'
diff --git a/catapult/telemetry/telemetry/internal/testing/discoverable_classes/__init__.py b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/__init__.py
new file mode 100644
index 0000000..9228df8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/testing/discoverable_classes/another_discover_dummyclass.py b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/another_discover_dummyclass.py
new file mode 100644
index 0000000..88581be
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/another_discover_dummyclass.py
@@ -0,0 +1,33 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""More dummy exception subclasses used by core/discover.py's unit tests."""
+
+# Import class instead of module explicitly so that inspect.getmembers() returns
+# two Exception subclasses in this current file.
+# Suppress complaints about unable to import class.  The directory path is
+# added at runtime by telemetry test runner.
+#pylint: disable=import-error
+from telemetry.internal.testing.discoverable_classes import discover_dummyclass
+
+
+class _PrivateDummyException(discover_dummyclass.DummyException):
+  def __init__(self):
+    super(_PrivateDummyException, self).__init__()
+
+
+class DummyExceptionImpl1(_PrivateDummyException):
+  def __init__(self):
+    super(DummyExceptionImpl1, self).__init__()
+
+
+class DummyExceptionImpl2(_PrivateDummyException):
+  def __init__(self):
+    super(DummyExceptionImpl2, self).__init__()
+
+
+class DummyExceptionWithParameterImpl1(_PrivateDummyException):
+  def __init__(self, parameter):
+    super(DummyExceptionWithParameterImpl1, self).__init__()
+    del parameter
diff --git a/catapult/telemetry/telemetry/internal/testing/discoverable_classes/discover_dummyclass.py b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/discover_dummyclass.py
new file mode 100644
index 0000000..15dcb35
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/discover_dummyclass.py
@@ -0,0 +1,9 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A dummy exception subclass used by core/discover.py's unit tests."""
+
+class DummyException(Exception):
+  def __init__(self):
+    super(DummyException, self).__init__()
diff --git a/catapult/telemetry/telemetry/internal/testing/discoverable_classes/parameter_discover_dummyclass.py b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/parameter_discover_dummyclass.py
new file mode 100644
index 0000000..3360fbd
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/discoverable_classes/parameter_discover_dummyclass.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A dummy exception subclass used by core/discover.py's unit tests."""
+from telemetry.internal.testing.discoverable_classes import discover_dummyclass
+
+class DummyExceptionWithParameterImpl2(discover_dummyclass.DummyException):
+  def __init__(self, parameter1, parameter2):
+    super(DummyExceptionWithParameterImpl2, self).__init__()
+    del parameter1, parameter2
diff --git a/catapult/telemetry/telemetry/internal/testing/dom_counter_sample.html b/catapult/telemetry/telemetry/internal/testing/dom_counter_sample.html
new file mode 100644
index 0000000..483d1e0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/dom_counter_sample.html
@@ -0,0 +1,12 @@
+<html>
+  <title>DOM counter unit test page</title>
+  <body>
+    <h1 onclick="">Webpage to be used for DOM counter unit testing.</h1>
+    <p onclick="">
+      This webpage is meant to be used by a unit test that checks DOM element
+      counters.  The test expects there to be exactly 1 document, 14 nodes,
+      and 2 event listeners.  Beware, modifying the HTML of this page may
+      change the element count, causing the unit test to fail!
+    </p>
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/draggable.html b/catapult/telemetry/telemetry/internal/testing/draggable.html
new file mode 100644
index 0000000..8af37ba
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/draggable.html
@@ -0,0 +1,52 @@
+<html>
+  <head>
+    <style>
+      body { margin: 0; }
+      #drag_div {
+        width: 100%;
+        height: 100%;
+        background: blue;
+        position: relative;
+      }
+    </style>
+    <script>
+    offsetX = 0;
+    offsetY = 0;
+
+    function drag(event) {
+      d = document.getElementById('drag_div');
+      offsetX = event.clientX - d.offsetLeft;
+      offsetY = event.clientY - d.offsetTop;
+    }
+
+    function drop(event) {
+      d = document.getElementById('drag_div');
+      d.style.left = event.clientX - offsetX;
+      d.style.top = event.clientY - offsetY;
+    }
+
+    function touchStart(event) {
+      d = document.getElementById('drag_div');
+      offsetX = event.touches[0].clientX - d.offsetLeft;
+      offsetY = event.touches[0].clientY - d.offsetTop;
+    }
+
+    function touchEnd(event) {
+      d = document.getElementById('drag_div');
+      d.style.left = event.changedTouches[0].clientX - offsetX;
+      d.style.top = event.changedTouches[0].clientY - offsetY;
+    }
+
+    </script>
+  </head>
+
+  <body id ="body">
+    <div id="drag_div"> </div>
+  </body>
+  <script>
+  document.getElementById('body').addEventListener('mouseup', drop);
+  document.getElementById('body').addEventListener('touchend', touchEnd);
+  document.getElementById('drag_div').addEventListener('mousedown', drag);
+  document.getElementById('drag_div').addEventListener('touchstart', touchStart);
+  </script>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/favicon.ico b/catapult/telemetry/telemetry/internal/testing/favicon.ico
new file mode 100644
index 0000000..0069d5d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/favicon.ico
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame0.png b/catapult/telemetry/telemetry/internal/testing/frame0.png
new file mode 100644
index 0000000..956b62c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame0.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame1.png b/catapult/telemetry/telemetry/internal/testing/frame1.png
new file mode 100644
index 0000000..8f59394
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame1.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame2.png b/catapult/telemetry/telemetry/internal/testing/frame2.png
new file mode 100644
index 0000000..f0bf178
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame2.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame3.png b/catapult/telemetry/telemetry/internal/testing/frame3.png
new file mode 100644
index 0000000..f36b453
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame3.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame4.png b/catapult/telemetry/telemetry/internal/testing/frame4.png
new file mode 100644
index 0000000..ef9d38a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame4.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame5.png b/catapult/telemetry/telemetry/internal/testing/frame5.png
new file mode 100644
index 0000000..ef1926d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame5.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame6.png b/catapult/telemetry/telemetry/internal/testing/frame6.png
new file mode 100644
index 0000000..0a3028f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame6.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/frame7.png b/catapult/telemetry/telemetry/internal/testing/frame7.png
new file mode 100644
index 0000000..9b6b6e5
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/frame7.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/green_rect.html b/catapult/telemetry/telemetry/internal/testing/green_rect.html
new file mode 100644
index 0000000..478c755
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/green_rect.html
@@ -0,0 +1,19 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <style>
+  html, body {
+    margin: 0;
+    padding: 0;
+  }
+  #green {
+    width: 32px;
+    height: 32px;
+    background-color: rgb(0, 255, 0);
+  }
+  </style>
+</head>
+<body>
+  <div id="green"></div>
+</body>
+</html>
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/host.html b/catapult/telemetry/telemetry/internal/testing/host.html
new file mode 100644
index 0000000..46cfb06
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/host.html
@@ -0,0 +1,12 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<script type="text/javascript">var testVar = "host";</script>
+</head>
+<body>
+This is the host page.
+<br>
+<iframe src="iframe1.html"></iframe>
+<iframe src="iframe3.html"></iframe>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/iframe1.html b/catapult/telemetry/telemetry/internal/testing/iframe1.html
new file mode 100644
index 0000000..972b8fb
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/iframe1.html
@@ -0,0 +1,11 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<script type="text/javascript">var testVar="iframe1";</script>
+</head>
+<body>
+This is IFrame 1.
+<br>
+<iframe src="iframe2.html"></iframe>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/iframe2.html b/catapult/telemetry/telemetry/internal/testing/iframe2.html
new file mode 100644
index 0000000..778f8a0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/iframe2.html
@@ -0,0 +1,9 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<script type="text/javascript">var testVar="iframe2";</script>
+</head>
+<body>
+This is IFrame 2.
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/iframe3.html b/catapult/telemetry/telemetry/internal/testing/iframe3.html
new file mode 100644
index 0000000..f992152
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/iframe3.html
@@ -0,0 +1,9 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<script type="text/javascript">var testVar="iframe3";</script>
+</head>
+<body>
+This is IFrame 3.
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/image.png b/catapult/telemetry/telemetry/internal/testing/image.png
new file mode 100644
index 0000000..82c2870
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/image.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/image_decoding.html b/catapult/telemetry/telemetry/internal/testing/image_decoding.html
new file mode 100644
index 0000000..59c1f0d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/image_decoding.html
@@ -0,0 +1,8 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+</head>
+<body>
+  <img src="image.png">
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/interaction_enabled_page.html b/catapult/telemetry/telemetry/internal/testing/interaction_enabled_page.html
new file mode 100644
index 0000000..761f912
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/interaction_enabled_page.html
@@ -0,0 +1,96 @@
+<!doctype html>
+<html>
+  <head>
+    <meta name="viewport" content="user-scalable:no">
+    <style type="text/css">
+      body { height: 1500px; }
+      #center {
+         position: fixed;
+         left: 40%;;
+         width: 50%;
+         height: 250px;
+         top: 25%;
+         background-color: grey;
+         -webkit-transform: scale(0.25, 0.25);
+         -webkit-transition: -webkit-transform 1s;
+      }
+
+      #drawer {
+         position: fixed;
+         top: 0;
+         left: 0;
+         height: 100%;
+         width: 120px;
+         background-color: red;
+         -webkit-transform: translate3d(-1000px, 0, 0);
+         -webkit-transition: -webkit-transform 1s;
+      }
+
+    </style>
+    <script>
+    'use strict';
+    window.animationDone = false;
+    function makeAnimation() {
+      var centerEl = document.querySelector('#center');
+      centerEl.style.webkitTransform = 'scale(1.0, 1.0)';
+      console.time('Interaction.CenterAnimation');
+      centerEl.addEventListener('transitionend', function() {
+        console.timeEnd('Interaction.CenterAnimation');
+        var drawerEl = document.querySelector('#drawer');
+        drawerEl.style.webkitTransform = 'translate3D(0, 0, 0)';
+        console.time('Interaction.DrawerAnimation');
+        drawerEl.addEventListener('transitionend', function() {
+          console.timeEnd('Interaction.DrawerAnimation');
+          window.animationDone = true;
+        });
+      });
+    }
+    </script>
+
+    <script>
+    'use strict';
+    var jankMs = 100;
+    var slowMs = 200;
+    window.jankScriptDone = false;
+    window.slowScriptDone = false;
+    function waitMs(ms) {
+      var startTime = window.performance.now();
+      var currTime = startTime;
+      while (currTime - startTime < ms) {
+        var currTime = window.performance.now();
+      }
+    }
+    function makeJank() {
+      console.time('Interaction.JankThreadJSRun');
+      waitMs(jankMs);
+      console.timeEnd('Interaction.JankThreadJSRun');
+      window.jankScriptDone = true;
+    }
+    function makeSlow() {
+      console.time('Interaction.SlowThreadJsRun');
+      waitMs(slowMs);
+      console.timeEnd('Interaction.SlowThreadJsRun');
+      window.slowScriptDone = true;
+    }
+    </script>
+
+  </head>
+  <body>
+    <div id="center">
+      This is something in the middle.
+    </div>
+    <div id="drawer">
+      This is a drawer.
+    </div>
+    <button type="button" id="animating-button" onclick="makeAnimation()">
+      Click or tap this to trigger an animation.
+    </div>
+    <button type="button" id="jank-button" onclick="makeJank()">
+      Click or tap this to make jank of 100ms (approximately).
+    </div>
+    <button type="button" id="slow-button" onclick="makeSlow()">
+      Click or tap this to make wait 200ms (approximately).
+    </div>
+
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/jebgalgnebhfojomionfpkfelancnnkf.crx b/catapult/telemetry/telemetry/internal/testing/jebgalgnebhfojomionfpkfelancnnkf.crx
new file mode 100644
index 0000000..4fb2a3b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/jebgalgnebhfojomionfpkfelancnnkf.crx
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/manifest_with_key.json b/catapult/telemetry/telemetry/internal/testing/manifest_with_key.json
new file mode 100644
index 0000000..4b41dd4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/manifest_with_key.json
@@ -0,0 +1,7 @@
+{
+  "name": "unpacked extension",
+  "description": "this is an unpacked extension with a key in it",
+  "version": "1",
+  "manifest_version": 2,
+  "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCoq+cJMuoIaoL2hx//QoIeHnNkXLAEu3IJGcLpM95qbmw9VnAplFI0tpSv4IpuJ1DPPsdsEMhONu1mPhK9xd3BHCtzqXRfRsnx/uOap4NTcUimxiUH3uuX9xkCNWO8EihdV0atnrKROhhnyIxmhgKmKfAYLheOrSGSXP0A4SqaBQIDAQAB"
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/non_scrollable_page.html b/catapult/telemetry/telemetry/internal/testing/non_scrollable_page.html
new file mode 100644
index 0000000..5770408
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/non_scrollable_page.html
@@ -0,0 +1,10 @@
+<!doctype html>
+<html>
+  <head>
+    <style type="text/css">
+    </style>
+  </head>
+  <body>
+    Hello, world.
+  </body>
+</html>
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/telemetry/internal/testing/page_sets/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/telemetry/internal/testing/page_sets/__init__.py
diff --git a/catapult/telemetry/telemetry/internal/testing/page_sets/data/.gitignore b/catapult/telemetry/telemetry/internal/testing/page_sets/data/.gitignore
new file mode 100644
index 0000000..2f9e5f9
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_sets/data/.gitignore
@@ -0,0 +1 @@
+*.wpr
diff --git a/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain.json b/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain.json
new file mode 100644
index 0000000..20dec47
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain.json
@@ -0,0 +1,9 @@
+{
+    "description": "Describes the Web Page Replay archives for a user story set. Don't edit by hand! Use record_wpr for updating.", 
+    "archives": {
+        "example_domain_001.wpr": [
+            "http://www.example.com",
+            "https://www.example.com"
+        ]
+    }
+}
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain_001.wpr.sha1 b/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain_001.wpr.sha1
new file mode 100644
index 0000000..fdfac39
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_sets/data/example_domain_001.wpr.sha1
@@ -0,0 +1 @@
+5e49b8152e40b5df427a8e73062045ddde2edcb8
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/page_sets/example_domain.py b/catapult/telemetry/telemetry/internal/testing/page_sets/example_domain.py
new file mode 100644
index 0000000..c56e0ae
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_sets/example_domain.py
@@ -0,0 +1,16 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import story
+from telemetry.page import page
+
+
+class ExampleDomainPageSet(story.StorySet):
+  def __init__(self):
+    super(ExampleDomainPageSet, self).__init__(
+      archive_data_file='data/example_domain.json',
+      cloud_storage_bucket=story.PUBLIC_BUCKET)
+
+    self.AddStory(page.Page('http://www.example.com', self))
+    self.AddStory(page.Page('https://www.example.com', self))
diff --git a/catapult/telemetry/telemetry/internal/testing/page_that_logs_to_console.html b/catapult/telemetry/telemetry/internal/testing/page_that_logs_to_console.html
new file mode 100644
index 0000000..373eebc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_that_logs_to_console.html
@@ -0,0 +1,14 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+</head>
+<body>
+<script>
+  window.__logCount = 0;
+  setInterval(function() {
+    console.log("Hello, world")
+    window.__logCount += 1
+  }, 100);
+</script>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/page_with_clickables.html b/catapult/telemetry/telemetry/internal/testing/page_with_clickables.html
new file mode 100644
index 0000000..a1bfc07
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_with_clickables.html
@@ -0,0 +1,16 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+</head>
+<body>
+<div id="test">Click/tap me</div>
+<script>
+var el = document.getElementById('test');
+var valueSettableByTest = 0;
+var valueToTest = 0;
+el.addEventListener('click', function() {
+  valueToTest = valueSettableByTest;
+});
+</script>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/page_with_link.html b/catapult/telemetry/telemetry/internal/testing/page_with_link.html
new file mode 100644
index 0000000..dc3c008
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_with_link.html
@@ -0,0 +1,8 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+</head>
+<body>
+<a id="clickme" href="blank.html">Click me</a>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/page_with_swipeables.html b/catapult/telemetry/telemetry/internal/testing/page_with_swipeables.html
new file mode 100644
index 0000000..bd209dc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/page_with_swipeables.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<style>
+.swipeable {
+  overflow: scroll;
+  height: 200px;
+  width: 200px;
+}
+.left-right-item,
+.top-bottom-item {
+  height: 100px;
+  width: 100px;
+}
+.left-right-item {
+  display: inline-block;
+}
+.red-bg {
+  background-color: red;
+}
+.blue-bg {
+  background-color: blue;
+}
+.tall-and-wide {
+  background-color: grey;
+  height: 5000px;
+  width: 5000px;
+}
+</style>
+</head>
+<body>
+
+<div id="left-right" class="swipeable">
+  <div style="width: 1000px;">
+    <div class="left-right-item red-bg">Test</div>
+    <div class="left-right-item blue-bg">Test</div>
+    <div class="left-right-item red-bg">Test</div>
+    <div class="left-right-item blue-bg">Test</div>
+  </div>
+</div>
+
+<div id="top-bottom" class="swipeable">
+  <div class="top-bottom-item red-bg">Test</div>
+  <div class="top-bottom-item blue-bg">Test</div>
+  <div class="top-bottom-item red-bg">Test</div>
+  <div class="top-bottom-item blue-bg">Test</div>
+</div>
+
+<div class="tall-and-wide"></div>
+
+</body>
+</html>
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/telemetry/internal/testing/pages/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/telemetry/internal/testing/pages/__init__.py
diff --git a/catapult/telemetry/telemetry/internal/testing/pages/external_page.py b/catapult/telemetry/telemetry/internal/testing/pages/external_page.py
new file mode 100755
index 0000000..c0a1cf3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/pages/external_page.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.page.page import Page
+
+
+class ExternalPage(Page):
+  def __init__(self, ps):
+    super(ExternalPage, self).__init__('file://foo.html', page_set=ps)
diff --git a/catapult/telemetry/telemetry/internal/testing/perf_report_output.txt b/catapult/telemetry/telemetry/internal/testing/perf_report_output.txt
new file mode 100644
index 0000000..d9e9c44
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/perf_report_output.txt
@@ -0,0 +1,2497 @@
+# ========
+# captured on: Thu Aug 15 09:40:36 2013
+# hostname : tonyg-linux.mtv.corp.google.com
+# os release : 3.2.5-gg1236
+# perf version : 3.2.5
+# arch : x86_64
+# nrcpus online : 32
+# nrcpus avail : 32
+# cpudesc : Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz
+# cpuid : GenuineIntel,6,45,7
+# total memory : 65904780 kB
+# cmdline : /usr/bin/perf_3.2.5-gg1236 record --call-graph --pid 20916 --output /tmp/tmprt1Qz7/http___www_techcrunch_com__001.renderer0 
+# event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, id = { 40116, 40117, 40118, 40119, 40120, 40121, 40122, 40123, 40124 }
+# HEADER_CPU_TOPOLOGY info available, use -I to display
+# HEADER_NUMA_TOPOLOGY info available, use -I to display
+# ========
+#
+# Events: 825  cycles
+#
+# Overhead^Period^Command^Shared Object^Symbol
+3.22^96878864^HTMLParserThrea^chrome               ^[.] void v8::internal::RelocInfo::Visit<v8::internal::MarkCompactMarkingVisitor>(v8::internal::Heap*)
+            |
+            --- void v8::internal::RelocInfo::Visit<v8::internal::MarkCompactMarkingVisitor>(v8::internal::Heap*)
+
+2.11^63615201^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::MarkMapContents(v8::internal::Heap*, v8::internal::Map*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::MarkMapContents(v8::internal::Heap*, v8::internal::Map*)
+
+1.60^48240439^HTMLParserThrea^chrome               ^[.] WebCore::HTMLTokenizer::nextToken(WebCore::SegmentedString&, WebCore::HTMLToken&)
+            |
+            --- WebCore::HTMLTokenizer::nextToken(WebCore::SegmentedString&, WebCore::HTMLToken&)
+               |          
+                --100.00%-- 0x3b2abdf74074
+
+1.53^46054550^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::ScanIdentifierOrKeyword()
+            |
+            --- v8::internal::Scanner::ScanIdentifierOrKeyword()
+               |          
+               |--33.87%-- 0x3b2abe3e3700
+               |          
+               |--33.85%-- 0x3b2abdf52240
+               |          
+                --32.28%-- 0x3b2abd766000
+
+1.50^45121317^HTMLParserThrea^chrome               ^[.] sk_memset32_SSE2(unsigned int*, unsigned int, int)
+            |
+            --- sk_memset32_SSE2(unsigned int*, unsigned int, int)
+
+1.43^42913933^HTMLParserThrea^chrome               ^[.] v8::internal::LiveRange::CreateAssignedOperand(v8::internal::Zone*)
+            |
+            --- v8::internal::LiveRange::CreateAssignedOperand(v8::internal::Zone*)
+
+1.43^42913933^HTMLParserThrea^chrome               ^[.] v8::internal::LAllocator::MeetConstraintsBetween(v8::internal::LInstruction*, v8::internal::LInstruction*, int)
+            |
+            --- v8::internal::LAllocator::MeetConstraintsBetween(v8::internal::LInstruction*, v8::internal::LInstruction*, int)
+
+1.32^39786862^HTMLParserThrea^chrome               ^[.] v8::internal::HeapObject::Size()
+            |
+            --- v8::internal::HeapObject::Size()
+               |          
+                --100.00%-- 0x1d00000015
+
+1.27^38271931^HTMLParserThrea^chrome               ^[.] v8::internal::RelocIterator::next()
+            |
+            --- v8::internal::RelocIterator::next()
+               |          
+               |--85.80%-- 0x228100000001
+               |          
+                --14.20%-- 0x7fff30e90200
+
+1.06^31909537^HTMLParserThrea^chrome               ^[.] v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::FixedArray::BodyDescriptor, void>::Visit(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::FixedArray::BodyDescriptor, void>::Visit(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.98^29461075^HTMLParserThrea^chrome               ^[.] operator new(unsigned long)
+            |
+            --- operator new(unsigned long)
+
+0.88^26612604^HTMLParserThrea^chrome               ^[.] v8::internal::InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(unsigned char*)
+            |
+            --- v8::internal::InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(unsigned char*)
+
+0.83^25019326^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::Scan()
+            |
+            --- v8::internal::Scanner::Scan()
+               |          
+               |--31.35%-- 0x7fff30e9065000
+               |          
+               |--25.38%-- 0x7fff30e915a000
+               |          
+               |--22.29%-- 0x7fff30e9081000
+               |          
+                --20.99%-- 0x7fff30e91900
+                          0x900000001
+
+0.80^24225440^HTMLParserThrea^chrome               ^[.] WebCore::TokenPreloadScanner::scan(WebCore::CompactHTMLToken const&, WebCore::SegmentedString const&, WTF::Vector<WTF::OwnPtr<WebCore::PreloadRequest>, 0ul>&)
+            |
+            --- WebCore::TokenPreloadScanner::scan(WebCore::CompactHTMLToken const&, WebCore::SegmentedString const&, WTF::Vector<WTF::OwnPtr<WebCore::PreloadRequest>, 0ul>&)
+
+0.73^22115352^HTMLParserThrea^chrome               ^[.] v8::internal::DescriptorArray::Append(v8::internal::Descriptor*)
+            |
+            --- v8::internal::DescriptorArray::Append(v8::internal::Descriptor*)
+
+0.73^21947849^HTMLParserThrea^chrome               ^[.] v8::internal::HRepresentationChangesPhase::InsertRepresentationChangesForValue(v8::internal::HValue*)
+            |
+            --- v8::internal::HRepresentationChangesPhase::InsertRepresentationChangesForValue(v8::internal::HValue*)
+
+0.72^21805774^HTMLParserThrea^chrome               ^[.] SkBlitLCD16OpaqueRow_SSE2(unsigned int*, unsigned short const*, unsigned int, int, unsigned int)
+            |
+            --- SkBlitLCD16OpaqueRow_SSE2(unsigned int*, unsigned short const*, unsigned int, int, unsigned int)
+
+0.71^21252690^HTMLParserThrea^chrome               ^[.] int v8::internal::Search<(v8::internal::SearchMode)1, v8::internal::DescriptorArray>(v8::internal::DescriptorArray*, v8::internal::Name*, int)
+            |
+            --- int v8::internal::Search<(v8::internal::SearchMode)1, v8::internal::DescriptorArray>(v8::internal::DescriptorArray*, v8::internal::Name*, int)
+
+0.63^18864914^HTMLParserThrea^chrome               ^[.] v8::internal::HashTable<v8::internal::StringTableShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::Isolate*, v8::internal::HashTableKey*)
+            |
+            --- v8::internal::HashTable<v8::internal::StringTableShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::Isolate*, v8::internal::HashTableKey*)
+               |          
+                --100.00%-- 0x3b2abdff2b80
+
+0.56^16917055^HTMLParserThrea^chrome               ^[.] WebCore::LayoutUnit::LayoutUnit(int)
+            |
+            --- WebCore::LayoutUnit::LayoutUnit(int)
+               |          
+                --100.00%-- (nil)
+
+0.54^16308133^HTMLParserThrea^chrome               ^[.] event_base_loop
+            |
+            --- event_base_loop
+                0x7f7a0b959fc0
+                epoll_init
+                0x7265727265666572
+
+0.51^15412114^HTMLParserThrea^chrome               ^[.] tc_malloc
+            |
+            --- tc_malloc
+
+0.51^15239999^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::Next()
+            |
+            --- v8::internal::Scanner::Next()
+               |          
+               |--23.51%-- 0x7fff30e9054a
+               |          
+               |--23.51%-- 0x7fff30e906ca
+               |          
+               |--23.38%-- (nil)
+               |          
+               |--15.20%-- 0x7fff30e9081000
+               |          
+                --14.39%-- 0x7fff30e9048c
+
+0.47^14231099^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::DoScavenge(v8::internal::ObjectVisitor*, unsigned char*)
+            |
+            --- v8::internal::Heap::DoScavenge(v8::internal::ObjectVisitor*, unsigned char*)
+
+0.47^14229607^HTMLParserThrea^chrome               ^[.] bool WebCore::SelectorChecker::checkOne<WebCore::DOMSiblingTraversalStrategy>(WebCore::SelectorChecker::SelectorCheckingContext const&, WebCore::DOMSiblingTraversalStrategy const&) const
+            |
+            --- bool WebCore::SelectorChecker::checkOne<WebCore::DOMSiblingTraversalStrategy>(WebCore::SelectorChecker::SelectorCheckingContext const&, WebCore::DOMSiblingTraversalStrategy const&) const
+               |          
+               |--25.11%-- 0x1e83176cd048
+               |          
+               |--25.08%-- 0x7f7a0b9980e000
+               |          
+               |--24.93%-- 0x7f7a00000000
+               |          
+                --24.88%-- 0x7f7a0b997c9000
+
+0.46^13955450^HTMLParserThrea^chrome               ^[.] tc_realloc
+            |
+            --- tc_realloc
+
+0.46^13941187^HTMLParserThrea^chrome               ^[.] WebCore::minimumValueForLength(WebCore::Length const&, WebCore::LayoutUnit, WebCore::RenderView*, bool)
+            |
+            --- WebCore::minimumValueForLength(WebCore::Length const&, WebCore::LayoutUnit, WebCore::RenderView*, bool)
+
+0.46^13834654^HTMLParserThrea^chrome               ^[.] v8::internal::JSReceiver::LocalLookup(v8::internal::Name*, v8::internal::LookupResult*, bool)
+            |
+            --- v8::internal::JSReceiver::LocalLookup(v8::internal::Name*, v8::internal::LookupResult*, bool)
+               |          
+               |--33.05%-- 0x7fff30e91860
+               |          
+               |--26.80%-- (nil)
+               |          
+               |--20.72%-- 0x7fff30e91a10
+               |          
+                --19.43%-- 0x7fff30e90860
+
+0.44^13202625^HTMLParserThrea^chrome               ^[.] v8::internal::Expression::Expression(v8::internal::Isolate*)
+            |
+            --- v8::internal::Expression::Expression(v8::internal::Isolate*)
+               |          
+               |--35.68%-- 0x7fff30e9198000
+               |          
+               |--32.51%-- 0x7f7a00000000
+               |          
+                --31.81%-- (nil)
+
+0.44^13149418^HTMLParserThrea^chrome               ^[.] v8::internal::String::IsOneByteEqualTo(v8::internal::Vector<unsigned char const>)
+            |
+            --- v8::internal::String::IsOneByteEqualTo(v8::internal::Vector<unsigned char const>)
+
+0.43^13061272^HTMLParserThrea^chrome               ^[.] event_active
+            |
+            --- event_active
+                0x40
+
+0.43^12899765^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::Name*, v8::internal::LookupResult*)
+            |
+            --- v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::Name*, v8::internal::LookupResult*)
+               |          
+               |--79.39%-- (nil)
+               |          
+                --20.61%-- 0x7fff30e90ab0
+
+0.42^12696531^HTMLParserThrea^chrome               ^[.] v8::internal::IsIdentifier(v8::internal::UnicodeCache*, v8::internal::Name*)
+            |
+            --- v8::internal::IsIdentifier(v8::internal::UnicodeCache*, v8::internal::Name*)
+
+0.41^12442842^HTMLParserThrea^chrome               ^[.] v8::internal::PointersUpdatingVisitor::VisitPointers(v8::internal::Object**, v8::internal::Object**)
+            |
+            --- v8::internal::PointersUpdatingVisitor::VisitPointers(v8::internal::Object**, v8::internal::Object**)
+
+0.41^12212739^HTMLParserThrea^chrome               ^[.] v8::internal::StubCompiler::CheckPrototypes(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Register, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Register, v8::internal::Register, v8::internal::Register, v8::internal::Handle<v8::internal::Name>, int, v8::internal::Label*, v8::internal::PrototypeCheckType)
+            |
+            --- v8::internal::StubCompiler::CheckPrototypes(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Register, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Register, v8::internal::Register, v8::internal::Register, v8::internal::Handle<v8::internal::Name>, int, v8::internal::Label*, v8::internal::PrototypeCheckType)
+               |          
+                --100.00%-- 0x29f988404121
+
+0.40^11929902^HTMLParserThrea^chrome               ^[.] _ZN2v88internal12BinarySearchILNS0_10SearchModeE1ENS0_15DescriptorArrayEEEiPT0_PNS0_4NameEiii.constprop.530
+            |
+            --- _ZN2v88internal12BinarySearchILNS0_10SearchModeE1ENS0_15DescriptorArrayEEEiPT0_PNS0_4NameEiii.constprop.530
+
+0.36^10844486^HTMLParserThrea^chrome               ^[.] content::VideoCaptureMessageFilter::OnMessageReceived(IPC::Message const&)
+            |
+            --- content::VideoCaptureMessageFilter::OnMessageReceived(IPC::Message const&)
+                0xfffffffc0000000e
+                IPC::ChannelProxy::Context::OnChannelConnected(int)
+                0x6c894cfb8948d824
+
+0.36^10745270^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitJSFunctionStrongCode(v8::internal::Heap*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitJSFunctionStrongCode(v8::internal::Heap*, v8::internal::HeapObject*)
+
+0.36^10717680^HTMLParserThrea^chrome               ^[.] v8::internal::TemplateHashMapImpl<v8::internal::ZoneAllocationPolicy>::Lookup(void*, unsigned int, bool, v8::internal::ZoneAllocationPolicy)
+            |
+            --- v8::internal::TemplateHashMapImpl<v8::internal::ZoneAllocationPolicy>::Lookup(void*, unsigned int, bool, v8::internal::ZoneAllocationPolicy)
+
+0.36^10710349^HTMLParserThrea^chrome               ^[.] v8::internal::Code::CopyFrom(v8::internal::CodeDesc const&)
+            |
+            --- v8::internal::Code::CopyFrom(v8::internal::CodeDesc const&)
+
+0.35^10674297^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseBinaryExpression(int, bool, bool*)
+            |
+            --- v8::internal::Parser::ParseBinaryExpression(int, bool, bool*)
+               |          
+               |--66.59%-- (nil)
+               |          
+                --33.41%-- 0x7f7a0b98e010
+
+0.35^10650066^HTMLParserThrea^chrome               ^[.] WebCore::SelectorFilter::pushParentStackFrame(WebCore::Element*)
+            |
+            --- WebCore::SelectorFilter::pushParentStackFrame(WebCore::Element*)
+
+0.35^10614251^HTMLParserThrea^libc-2.15.so         ^[.] __memcpy_ssse3_back
+            |
+            --- __memcpy_ssse3_back
+
+0.35^10611142^HTMLParserThrea^chrome               ^[.] v8::internal::GlobalHandles::ComputeObjectGroupsAndImplicitReferences()
+            |
+            --- v8::internal::GlobalHandles::ComputeObjectGroupsAndImplicitReferences()
+
+0.35^10527946^HTMLParserThrea^chrome               ^[.] tcmalloc::FL_PopRange(void**, int, void**, void**)
+            |
+            --- tcmalloc::FL_PopRange(void**, int, void**, void**)
+
+0.35^10514181^HTMLParserThrea^libpthread-2.15.so   ^[.] pthread_getspecific
+            |
+            --- pthread_getspecific
+
+0.35^10497704^HTMLParserThrea^chrome               ^[.] v8::internal::IC::PostPatching(unsigned char*, v8::internal::Code*, v8::internal::Code*)
+            |
+            --- v8::internal::IC::PostPatching(unsigned char*, v8::internal::Code*, v8::internal::Code*)
+
+0.35^10452932^HTMLParserThrea^chrome               ^[.] _ZN2v88internal12StringHasher13AddCharactersIhEEvPKT_i.constprop.518
+            |
+            --- _ZN2v88internal12StringHasher13AddCharactersIhEEvPKT_i.constprop.518
+
+0.35^10426463^HTMLParserThrea^chrome               ^[.] _ZN2v88internalL13LookupForReadENS0_6HandleINS0_6ObjectEEENS1_INS0_6StringEEEPNS0_12LookupResultE.constprop.171
+            |
+            --- _ZN2v88internalL13LookupForReadENS0_6HandleINS0_6ObjectEEENS1_INS0_6StringEEEPNS0_12LookupResultE.constprop.171
+               |          
+               |--65.70%-- (nil)
+               |          
+                --34.30%-- 0x7fff30e91e38
+
+0.34^10228287^HTMLParserThrea^chrome               ^[.] WTF::Unicode::convertLatin1ToUTF8(unsigned char const**, unsigned char const*, char**, char*)
+            |
+            --- WTF::Unicode::convertLatin1ToUTF8(unsigned char const**, unsigned char const*, char**, char*)
+               |          
+               |--34.63%-- 0x3b2abdeedb48
+               |          
+               |--32.97%-- webkit_glue::(anonymous namespace)::HeaderFlattener::visitHeader(WebKit::WebString const&, WebKit::WebString const&)
+               |          0x3b2abdfe9230
+               |          
+                --32.40%-- void WTF::StringBuilder::reallocateBuffer<unsigned char>(unsigned int)
+
+0.34^10139628^HTMLParserThrea^chrome               ^[.] base::subtle::RefCountedThreadSafeBase::Release() const
+            |
+            --- base::subtle::RefCountedThreadSafeBase::Release() const
+               |          
+               |--63.45%-- base::internal::Invoker<1, base::internal::BindState<base::internal::RunnableAdapter<void (base::BaseTimerTaskInternal::*)()>, void ()(base::BaseTimerTaskInternal*), void ()(base::internal::OwnedWrapper<base::BaseTimerTaskInternal>)>, void ()(base::BaseTimerTaskInternal*)>::Run(base::internal::BindStateBase*)
+               |          
+                --36.55%-- 0x7f7a0b8ff200
+
+0.33^9932889^HTMLParserThrea^chrome               ^[.] void WebCore::StyleResolver::applyProperties<(WebCore::StyleResolver::StyleApplicationPass)1>(WebCore::StyleResolverState&, WebCore::StylePropertySet const*, WebCore::StyleRule*, bool, bool, WebCore::PropertyWhitelistType)
+            |
+            --- void WebCore::StyleResolver::applyProperties<(WebCore::StyleResolver::StyleApplicationPass)1>(WebCore::StyleResolverState&, WebCore::StylePropertySet const*, WebCore::StyleRule*, bool, bool, WebCore::PropertyWhitelistType)
+
+0.33^9928749^HTMLParserThrea^chrome               ^[.] v8::internal::CodeStub::GetCode(v8::internal::Isolate*)
+            |
+            --- v8::internal::CodeStub::GetCode(v8::internal::Isolate*)
+
+0.33^9868335^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateSharedFunctionInfo(v8::internal::Object*)
+            |
+            --- v8::internal::Heap::AllocateSharedFunctionInfo(v8::internal::Object*)
+
+0.33^9807337^HTMLParserThrea^chrome               ^[.] v8::internal::Zone::New(int)
+            |
+            --- v8::internal::Zone::New(int)
+               |          
+                --100.00%-- 0x7fff30e91d48
+
+0.32^9579492^HTMLParserThrea^chrome               ^[.] v8::internal::Object::GetProperty(v8::internal::Object*, v8::internal::LookupResult*, v8::internal::Name*, PropertyAttributes*)
+            |
+            --- v8::internal::Object::GetProperty(v8::internal::Object*, v8::internal::LookupResult*, v8::internal::Name*, PropertyAttributes*)
+               |          
+               |--58.83%-- 0x2281c1418f41
+               |          
+                --41.17%-- (nil)
+
+0.32^9578890^HTMLParserThrea^chrome               ^[.] _ZN2v88internal15DescriptorArray3SetEiPNS0_10DescriptorERKNS1_16WhitenessWitnessE.isra.252
+            |
+            --- _ZN2v88internal15DescriptorArray3SetEiPNS0_10DescriptorERKNS1_16WhitenessWitnessE.isra.252
+
+0.31^9412432^HTMLParserThrea^chrome               ^[.] WebCore::ScrollView::unscaledVisibleContentSize(WebCore::ScrollableArea::VisibleContentRectIncludesScrollbars) const
+            |
+            --- WebCore::ScrollView::unscaledVisibleContentSize(WebCore::ScrollableArea::VisibleContentRectIncludesScrollbars) const
+
+0.31^9384389^HTMLParserThrea^chrome               ^[.] v8::internal::CallICBase::ReceiverToObjectIfRequired(v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>)
+            |
+            --- v8::internal::CallICBase::ReceiverToObjectIfRequired(v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>)
+               |          
+               |--37.40%-- 0x2281c141ab41
+               |          
+               |--35.85%-- 0x2281c1419541
+               |          
+                --26.75%-- 0x2281c141a941
+
+0.31^9309826^HTMLParserThrea^chrome               ^[.] v8::internal::Factory::NewFunctionFromSharedFunctionInfo(v8::internal::Handle<v8::internal::SharedFunctionInfo>, v8::internal::Handle<v8::internal::Context>, v8::internal::PretenureFlag)
+            |
+            --- v8::internal::Factory::NewFunctionFromSharedFunctionInfo(v8::internal::Handle<v8::internal::SharedFunctionInfo>, v8::internal::Handle<v8::internal::Context>, v8::internal::PretenureFlag)
+
+0.31^9238181^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseAssignmentExpression(bool, bool*)
+            |
+            --- v8::internal::Parser::ParseAssignmentExpression(bool, bool*)
+               |          
+               |--73.43%-- (nil)
+               |          
+                --26.57%-- 0x7fff30e9136000
+
+0.30^9076438^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject* WebCore::bidiNextShared<WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun> >(WebCore::RenderObject*, WebCore::RenderObject*, WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>*, WebCore::EmptyInlineBehavior, bool*)
+            |
+            --- WebCore::RenderObject* WebCore::bidiNextShared<WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun> >(WebCore::RenderObject*, WebCore::RenderObject*, WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>*, WebCore::EmptyInlineBehavior, bool*)
+               |          
+                --100.00%-- 0x3b2abd841928
+
+0.30^8941888^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateStruct(v8::internal::InstanceType)
+            |
+            --- v8::internal::Heap::AllocateStruct(v8::internal::InstanceType)
+
+0.29^8602723^HTMLParserThrea^chrome               ^[.] v8::internal::LoadIC::ComputeLoadHandler(v8::internal::LookupResult*, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::String>)
+            |
+            --- v8::internal::LoadIC::ComputeLoadHandler(v8::internal::LookupResult*, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::String>)
+               |          
+                --100.00%-- 0x7fff30e90648
+
+0.28^8470525^HTMLParserThrea^chrome               ^[.] void v8::internal::String::WriteToFlat<unsigned char>(v8::internal::String*, unsigned char*, int, int)
+            |
+            --- void v8::internal::String::WriteToFlat<unsigned char>(v8::internal::String*, unsigned char*, int, int)
+
+0.28^8366209^HTMLParserThrea^chrome               ^[.] WTF::StringImpl::hashSlowCase() const
+            |
+            --- WTF::StringImpl::hashSlowCase() const
+               |          
+               |--36.45%-- 0x7f7a11ebab98
+               |          0x7f7a0b9bd000
+               |          
+               |--33.90%-- 0xfffe02a2
+               |          0x1e83176903e8
+               |          
+                --29.65%-- 0x3b2abd1fa100
+
+0.27^8156738^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_ParallelRecompile(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_ParallelRecompile(int, v8::internal::Object**, v8::internal::Isolate*)
+                0x2281c142b871
+                0x2281c1f35545
+                0x2281c1f31dba
+                0x2281c140e854
+                0x2281c142b65e
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.27^8131676^HTMLParserThrea^chrome               ^[.] v8::internal::FixedArray::set(int, v8::internal::Object*)
+            |
+            --- v8::internal::FixedArray::set(int, v8::internal::Object*)
+
+0.27^8075110^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateRaw(int, v8::internal::AllocationSpace, v8::internal::AllocationSpace)
+            |
+            --- v8::internal::Heap::AllocateRaw(int, v8::internal::AllocationSpace, v8::internal::AllocationSpace)
+
+0.26^7972021^HTMLParserThrea^chrome               ^[.] WebCore::MarkupAccumulator::appendCharactersReplacingEntities(WTF::StringBuilder&, WTF::String const&, unsigned int, unsigned int, WebCore::EntityMask)
+            |
+            --- WebCore::MarkupAccumulator::appendCharactersReplacingEntities(WTF::StringBuilder&, WTF::String const&, unsigned int, unsigned int, WebCore::EntityMask)
+
+0.25^7610316^HTMLParserThrea^chrome               ^[.] _ZN12_GLOBAL__N_121do_free_with_callbackEPvPFvS0_E.constprop.42
+            |
+            --- _ZN12_GLOBAL__N_121do_free_with_callbackEPvPFvS0_E.constprop.42
+
+0.25^7488722^HTMLParserThrea^chrome               ^[.] WebCore::NodeListV8Internal::indexedPropertyGetterCallback(unsigned int, v8::PropertyCallbackInfo<v8::Value> const&)
+            |
+            --- WebCore::NodeListV8Internal::indexedPropertyGetterCallback(unsigned int, v8::PropertyCallbackInfo<v8::Value> const&)
+                0x7f7a0b900000
+               |          
+                --100.00%-- 0x7f7a0b900000
+
+0.25^7420533^HTMLParserThrea^chrome               ^[.] webkit_glue::WebThreadBase::TaskObserverAdapter::DidProcessTask(base::PendingTask const&)
+            |
+            --- webkit_glue::WebThreadBase::TaskObserverAdapter::DidProcessTask(base::PendingTask const&)
+                0x7f7a0b8ff200
+
+0.24^7201464^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::computeRectForRepaint(WebCore::RenderLayerModelObject const*, WebCore::LayoutRect&, bool) const
+            |
+            --- WebCore::RenderBox::computeRectForRepaint(WebCore::RenderLayerModelObject const*, WebCore::LayoutRect&, bool) const
+               |          
+               |--50.28%-- 0x124000002bc0
+               |          
+                --49.72%-- 0x194000003e80
+
+0.24^7186236^HTMLParserThrea^chrome               ^[.] v8::internal::StoreBuffer::IteratePointersInStoreBuffer(void (*)(v8::internal::HeapObject**, v8::internal::HeapObject*), bool)
+            |
+            --- v8::internal::StoreBuffer::IteratePointersInStoreBuffer(void (*)(v8::internal::HeapObject**, v8::internal::HeapObject*), bool)
+
+0.24^7180913^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitJSFunction(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitJSFunction(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.24^7180572^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::ClearNonLivePrototypeTransitions(v8::internal::Map*)
+            |
+            --- v8::internal::MarkCompactCollector::ClearNonLivePrototypeTransitions(v8::internal::Map*)
+
+0.24^7178618^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::SweepSpace(v8::internal::PagedSpace*, v8::internal::MarkCompactCollector::SweeperType)
+            |
+            --- v8::internal::MarkCompactCollector::SweepSpace(v8::internal::PagedSpace*, v8::internal::MarkCompactCollector::SweeperType)
+
+0.24^7154254^HTMLParserThrea^chrome               ^[.] WTF::HashTableConstIterator<WebCore::RenderObject const*, WebCore::RenderObject const*, WTF::IdentityExtractor, WTF::PtrHash<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*> > WTF::HashTable<WebCore::RenderObject const*, WebCore::RenderObject const*, WTF::IdentityExtractor, WTF::PtrHash<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*>, WTF::HashTraits
+            |
+            --- WTF::HashTableConstIterator<WebCore::RenderObject const*, WebCore::RenderObject const*, WTF::IdentityExtractor, WTF::PtrHash<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*> > WTF::HashTable<WebCore::RenderObject const*, WebCore::RenderObject const*, WTF::IdentityExtractor, WTF::PtrHash<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*>, WTF::HashTraits<WebCore::RenderObject const*> >::find<WTF::IdentityHashTranslator<WTF::PtrHash<WebCore::RenderObject const*> >, WebCore::RenderObject const*>(WebCore::RenderObject const* const&) const
+
+0.24^7150261^HTMLParserThrea^chrome               ^[.] void url_parse::(anonymous namespace)::DoParseAuthority<char>(char const*, url_parse::Component const&, url_parse::Component*, url_parse::Component*, url_parse::Component*, url_parse::Component*)
+            |
+            --- void url_parse::(anonymous namespace)::DoParseAuthority<char>(char const*, url_parse::Component const&, url_parse::Component*, url_parse::Component*, url_parse::Component*, url_parse::Component*)
+               |          
+                --100.00%-- 0xffffffff00000000
+
+0.24^7147589^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateFixedArray(int)
+            |
+            --- v8::internal::Heap::AllocateFixedArray(int)
+
+0.24^7145279^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::MustAllocate(v8::internal::Variable*)
+            |
+            --- v8::internal::Scope::MustAllocate(v8::internal::Variable*)
+
+0.24^7140300^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::fillAvailableMeasure(WebCore::LayoutUnit, WebCore::LayoutUnit&, WebCore::LayoutUnit&) const
+            |
+            --- WebCore::RenderBox::fillAvailableMeasure(WebCore::LayoutUnit, WebCore::LayoutUnit&, WebCore::LayoutUnit&) const
+
+0.24^7139300^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::computeInlinePreferredLogicalWidths(WebCore::LayoutUnit&, WebCore::LayoutUnit&)
+            |
+            --- WebCore::RenderBlock::computeInlinePreferredLogicalWidths(WebCore::LayoutUnit&, WebCore::LayoutUnit&)
+
+0.24^7134579^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::ScanString()
+            |
+            --- v8::internal::Scanner::ScanString()
+
+0.24^7132161^HTMLParserThrea^chrome               ^[.] WebCore::ElementRuleCollector::collectRuleIfMatches(WebCore::RuleData const&, WebCore::MatchRequest const&, WebCore::RuleRange&)
+            |
+            --- WebCore::ElementRuleCollector::collectRuleIfMatches(WebCore::RuleData const&, WebCore::MatchRequest const&, WebCore::RuleRange&)
+
+0.24^7127556^HTMLParserThrea^chrome               ^[.] v8::internal::TemplateHashMapImpl<v8::internal::FreeStoreAllocationPolicy>::Lookup(void*, unsigned int, bool, v8::internal::FreeStoreAllocationPolicy)
+            |
+            --- v8::internal::TemplateHashMapImpl<v8::internal::FreeStoreAllocationPolicy>::Lookup(void*, unsigned int, bool, v8::internal::FreeStoreAllocationPolicy)
+
+0.24^7124874^HTMLParserThrea^chrome               ^[.] url_canon::RemoveURLWhitespace(char const*, int, url_canon::CanonOutputT<char>*, int*)
+            |
+            --- url_canon::RemoveURLWhitespace(char const*, int, url_canon::CanonOutputT<char>*, int*)
+
+0.24^7120126^HTMLParserThrea^chrome               ^[.] v8::internal::RelocInfoWriter::Write(v8::internal::RelocInfo const*)
+            |
+            --- v8::internal::RelocInfoWriter::Write(v8::internal::RelocInfo const*)
+
+0.24^7115866^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseMemberWithNewPrefixesExpression(v8::internal::PositionStack*, bool*)
+            |
+            --- v8::internal::Parser::ParseMemberWithNewPrefixesExpression(v8::internal::PositionStack*, bool*)
+               |          
+               |--50.15%-- 0x7fff30e9065000
+               |          
+                --49.85%-- 0x7fff30e914d000
+
+0.24^7113138^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::AddFastPropertyUsingMap(v8::internal::Map*, v8::internal::Name*, v8::internal::Object*, int, v8::internal::Representation)
+            |
+            --- v8::internal::JSObject::AddFastPropertyUsingMap(v8::internal::Map*, v8::internal::Name*, v8::internal::Object*, int, v8::internal::Representation)
+
+0.24^7106943^HTMLParserThrea^chrome               ^[.] v8::internal::StoreBuffer::Compact()
+            |
+            --- v8::internal::StoreBuffer::Compact()
+
+0.24^7104154^HTMLParserThrea^chrome               ^[.] WebCore::VisitedLinkState::determineLinkStateSlowCase(WebCore::Element*)
+            |
+            --- WebCore::VisitedLinkState::determineLinkStateSlowCase(WebCore::Element*)
+
+0.24^7089266^HTMLParserThrea^chrome               ^[.] WebCore::selectorIdentifierHash(WebCore::CSSSelector const*)
+            |
+            --- WebCore::selectorIdentifierHash(WebCore::CSSSelector const*)
+
+0.24^7086923^HTMLParserThrea^chrome               ^[.] v8::internal::InnerPointerToCodeCache::GetCacheEntry(unsigned char*)
+            |
+            --- v8::internal::InnerPointerToCodeCache::GetCacheEntry(unsigned char*)
+               |          
+                --100.00%-- 0x7fff30e912c0
+                          0xc6a3430a8a9
+
+0.24^7081815^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseUnaryExpression(bool*)
+            |
+            --- v8::internal::Parser::ParseUnaryExpression(bool*)
+                (nil)
+
+0.24^7079188^HTMLParserThrea^chrome               ^[.] WebCore::SelectorFilter::fastRejectSelector(WebCore::CSSSelector const*) const
+            |
+            --- WebCore::SelectorFilter::fastRejectSelector(WebCore::CSSSelector const*) const
+
+0.24^7077714^HTMLParserThrea^chrome               ^[.] v8::internal::CallICBase::LoadFunction(v8::internal::InlineCacheState, int, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::String>)
+            |
+            --- v8::internal::CallICBase::LoadFunction(v8::internal::InlineCacheState, int, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::String>)
+               |          
+               |--50.54%-- 0x7fff30e90d30
+               |          
+                --49.46%-- 0x7fff30e90270
+
+0.23^7065769^HTMLParserThrea^chrome               ^[.] WebCore::RenderBoxModelObject::computedCSSPadding(WebCore::Length) const
+            |
+            --- WebCore::RenderBoxModelObject::computedCSSPadding(WebCore::Length) const
+
+0.23^7059576^HTMLParserThrea^chrome               ^[.] v8::internal::Deserializer::ReadChunk(v8::internal::Object**, v8::internal::Object**, int, unsigned char*)
+            |
+            --- v8::internal::Deserializer::ReadChunk(v8::internal::Object**, v8::internal::Object**, int, unsigned char*)
+                0x7f7a0b900000
+
+0.23^7058229^HTMLParserThrea^chrome               ^[.] visitedlink::VisitedLinkCommon::IsVisited(unsigned long) const
+            |
+            --- visitedlink::VisitedLinkCommon::IsVisited(unsigned long) const
+
+0.23^7051436^HTMLParserThrea^chrome               ^[.] v8::internal::AllocateFixedArrayWithFiller(v8::internal::Heap*, int, v8::internal::PretenureFlag, v8::internal::Object*)
+            |
+            --- v8::internal::AllocateFixedArrayWithFiller(v8::internal::Heap*, int, v8::internal::PretenureFlag, v8::internal::Object*)
+
+0.23^7043481^HTMLParserThrea^chrome               ^[.] v8::internal::FuncNameInferrer::PushLiteralName(v8::internal::Handle<v8::internal::String>)
+            |
+            --- v8::internal::FuncNameInferrer::PushLiteralName(v8::internal::Handle<v8::internal::String>)
+
+0.23^7023232^HTMLParserThrea^chrome               ^[.] v8::internal::VariableProxy::VariableProxy(v8::internal::Isolate*, v8::internal::Handle<v8::internal::String>, bool, v8::internal::Interface*, int)
+            |
+            --- v8::internal::VariableProxy::VariableProxy(v8::internal::Isolate*, v8::internal::Handle<v8::internal::String>, bool, v8::internal::Interface*, int)
+               |          
+                --100.00%-- 0x7fff30e90ed8
+
+0.23^7022335^HTMLParserThrea^chrome               ^[.] WebCore::V8CSSStyleDeclaration::createWrapper(WTF::PassRefPtr<WebCore::CSSStyleDeclaration>, v8::Handle<v8::Object>, v8::Isolate*)
+            |
+            --- WebCore::V8CSSStyleDeclaration::createWrapper(WTF::PassRefPtr<WebCore::CSSStyleDeclaration>, v8::Handle<v8::Object>, v8::Isolate*)
+                0x7f7a0b93a3c0
+
+0.23^7015773^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::LineBreaker::nextSegmentBreak(WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>&, WebCore::LineInfo&, WebCore::RenderBlock::RenderTextInfo&, WebCore::RenderBlock::FloatingObject*, unsigned int, WTF::Vector<WebCore::WordMeasurement, 64ul>&)
+            |
+            --- WebCore::RenderBlock::LineBreaker::nextSegmentBreak(WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>&, WebCore::LineInfo&, WebCore::RenderBlock::RenderTextInfo&, WebCore::RenderBlock::FloatingObject*, unsigned int, WTF::Vector<WebCore::WordMeasurement, 64ul>&)
+
+0.23^7009841^HTMLParserThrea^chrome               ^[.] v8::internal::CodeCache::LookupDefaultCache(v8::internal::Name*, unsigned int)
+            |
+            --- v8::internal::CodeCache::LookupDefaultCache(v8::internal::Name*, unsigned int)
+
+0.23^6940000^HTMLParserThrea^chrome               ^[.] v8::internal::PositionsRecorder::RecordPosition(int)
+            |
+            --- v8::internal::PositionsRecorder::RecordPosition(int)
+
+0.23^6866416^HTMLParserThrea^chrome               ^[.] _ZN2v88internal6Parser25ParseVariableDeclarationsENS1_26VariableDeclarationContextEPNS1_29VariableDeclarationPropertiesEPNS0_8ZoneListINS0_6HandleINS0_6StringEEEEEPS8_Pb.constprop.454
+            |
+            --- _ZN2v88internal6Parser25ParseVariableDeclarationsENS1_26VariableDeclarationContextEPNS1_29VariableDeclarationPropertiesEPNS0_8ZoneListINS0_6HandleINS0_6StringEEEEEPS8_Pb.constprop.454
+
+0.23^6858851^HTMLParserThrea^chrome               ^[.] S32A_Opaque_BlitRow32_SSE2(unsigned int*, unsigned int const*, int, unsigned int)
+            |
+            --- S32A_Opaque_BlitRow32_SSE2(unsigned int*, unsigned int const*, int, unsigned int)
+
+0.23^6846415^HTMLParserThrea^chrome               ^[.] v8::internal::String::SlowEquals(v8::internal::String*)
+            |
+            --- v8::internal::String::SlowEquals(v8::internal::String*)
+
+0.22^6771375^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::Expect(v8::internal::Token::Value, bool*)
+            |
+            --- v8::internal::Parser::Expect(v8::internal::Token::Value, bool*)
+               |          
+               |--52.15%-- 0x7fff30e905d002
+               |          
+                --47.85%-- (nil)
+
+0.22^6645010^HTMLParserThrea^chrome               ^[.] v8::internal::DeoptimizerData::FindDeoptimizingCode(unsigned char*)
+            |
+            --- v8::internal::DeoptimizerData::FindDeoptimizingCode(unsigned char*)
+
+0.22^6627879^HTMLParserThrea^chrome               ^[.] v8::internal::StoreIC::Store(v8::internal::InlineCacheState, v8::internal::StrictModeFlag, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::Object>, v8::internal::JSReceiver::StoreFromKeyed)
+            |
+            --- v8::internal::StoreIC::Store(v8::internal::InlineCacheState, v8::internal::StrictModeFlag, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::Object>, v8::internal::JSReceiver::StoreFromKeyed)
+               |          
+               |--53.63%-- 0xc6a34268609
+               |          
+                --46.37%-- 0xc6a34ea2a81
+
+0.22^6600293^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<WTF::AtomicString, WTF::AtomicString, WTF::IdentityExtractor, WTF::AtomicStringHash, WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> > > WTF::HashTable<WTF::AtomicString, WTF::AtomicString, WTF::IdentityExtractor, WTF::AtomicStringHash, WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> >::add<WTF::IdentityHashTranslator<WTF::AtomicStringHash>, WTF::AtomicString, 
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<WTF::AtomicString, WTF::AtomicString, WTF::IdentityExtractor, WTF::AtomicStringHash, WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> > > WTF::HashTable<WTF::AtomicString, WTF::AtomicString, WTF::IdentityExtractor, WTF::AtomicStringHash, WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> >::add<WTF::IdentityHashTranslator<WTF::AtomicStringHash>, WTF::AtomicString, WTF::AtomicString>(WTF::AtomicString const&, WTF::AtomicString const&)
+
+0.22^6596875^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::bind(v8::internal::Label*)
+            |
+            --- v8::internal::Assembler::bind(v8::internal::Label*)
+
+0.22^6594480^HTMLParserThrea^chrome               ^[.] WebCore::InspectorInstrumentation::instrumentingAgentsForPage(WebCore::Page*)
+            |
+            --- WebCore::InspectorInstrumentation::instrumentingAgentsForPage(WebCore::Page*)
+
+0.22^6579973^HTMLParserThrea^chrome               ^[.] v8::internal::IC::PatchCache(v8::internal::InlineCacheState, v8::internal::StrictModeFlag, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::Code>)
+            |
+            --- v8::internal::IC::PatchCache(v8::internal::InlineCacheState, v8::internal::StrictModeFlag, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::Code>)
+               |          
+               |--54.27%-- 0x29f988404121
+               |          
+                --45.73%-- 0xc6a3487f2f1
+
+0.22^6576754^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::movq(v8::internal::Register, v8::internal::Handle<v8::internal::Object>, v8::internal::RelocInfo::Mode)
+            |
+            --- v8::internal::Assembler::movq(v8::internal::Register, v8::internal::Handle<v8::internal::Object>, v8::internal::RelocInfo::Mode)
+
+0.22^6564875^HTMLParserThrea^libpthread-2.15.so   ^[.] pthread_mutex_lock
+            |
+            --- pthread_mutex_lock
+
+0.22^6559671^HTMLParserThrea^chrome               ^[.] v8::internal::VariableProxy::node_type() const
+            |
+            --- v8::internal::VariableProxy::node_type() const
+
+0.22^6543901^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_NewClosure(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_NewClosure(int, v8::internal::Object**, v8::internal::Isolate*)
+               |          
+                --100.00%-- 0x2281c1ab1279
+                          0x2281c142b664
+                          0x2281c1417d97
+                          v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.22^6487316^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateConsString(v8::internal::String*, v8::internal::String*)
+            |
+            --- v8::internal::Heap::AllocateConsString(v8::internal::String*, v8::internal::String*)
+
+0.21^6470256^HTMLParserThrea^chrome               ^[.] v8::internal::StringTable::LookupOneByteString(v8::internal::Vector<unsigned char const>, v8::internal::Object**)
+            |
+            --- v8::internal::StringTable::LookupOneByteString(v8::internal::Vector<unsigned char const>, v8::internal::Object**)
+
+0.21^6461476^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::SetLocalPropertyIgnoreAttributes(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::Object::ValueType)
+            |
+            --- v8::internal::JSObject::SetLocalPropertyIgnoreAttributes(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::Object::ValueType)
+
+0.21^6410633^HTMLParserThrea^chrome               ^[.] v8::internal::BaseLoadStoreStubCompiler::CompilePolymorphicIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::Handle<v8::internal::Name>, v8::internal::Code::StubType, v8::internal::IcCheckType)
+            |
+            --- v8::internal::BaseLoadStoreStubCompiler::CompilePolymorphicIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::Handle<v8::internal::Name>, v8::internal::Code::StubType, v8::internal::IcCheckType)
+                0x7f7a0b900000
+
+0.21^6354296^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseExpressionOrLabelledStatement(v8::internal::ZoneList<v8::internal::Handle<v8::internal::String> >*, bool*)
+            |
+            --- v8::internal::Parser::ParseExpressionOrLabelledStatement(v8::internal::ZoneList<v8::internal::Handle<v8::internal::String> >*, bool*)
+               |          
+               |--55.30%-- 0x7fff30e9145000
+               |          
+                --44.70%-- 0x7fff30e906e000
+
+0.21^6319435^HTMLParserThrea^chrome               ^[.] _ZN2v88internal14NameDictionary9FindEntryEPNS0_4NameE.part.385
+            |
+            --- _ZN2v88internal14NameDictionary9FindEntryEPNS0_4NameE.part.385
+               |          
+                --100.00%-- (nil)
+
+0.21^6243685^HTMLParserThrea^chrome               ^[.] WebCore::Font::glyphDataAndPageForCharacter(int, bool, WebCore::FontDataVariant) const
+            |
+            --- WebCore::Font::glyphDataAndPageForCharacter(int, bool, WebCore::FontDataVariant) const
+                0x7fff30e8f0e0
+
+0.20^6145083^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::AccumulatorValueContext::Plug(v8::internal::Register) const
+            |
+            --- v8::internal::FullCodeGenerator::AccumulatorValueContext::Plug(v8::internal::Register) const
+
+0.20^6137312^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::SetPropertyForResult(v8::internal::LookupResult*, v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, v8::internal::JSReceiver::StoreFromKeyed)
+            |
+            --- v8::internal::JSObject::SetPropertyForResult(v8::internal::LookupResult*, v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, v8::internal::JSReceiver::StoreFromKeyed)
+
+0.20^6135468^HTMLParserThrea^chrome               ^[.] v8::internal::Factory::NewNumber(double, v8::internal::PretenureFlag)
+            |
+            --- v8::internal::Factory::NewNumber(double, v8::internal::PretenureFlag)
+               |          
+                --100.00%-- 0x7fff30e9160000
+
+0.20^6051127^HTMLParserThrea^chrome               ^[.] WebCore::WidthIterator::glyphDataForCharacter(int, bool, int, unsigned int&)
+            |
+            --- WebCore::WidthIterator::glyphDataForCharacter(int, bool, int, unsigned int&)
+               |          
+               |--58.44%-- 0x7fff30e8cfa0
+               |          
+                --41.56%-- 0x7fff30e8f550
+
+0.20^6000454^HTMLParserThrea^chrome               ^[.] v8::internal::CodeCache::UpdateDefaultCache(v8::internal::Name*, v8::internal::Code*)
+            |
+            --- v8::internal::CodeCache::UpdateDefaultCache(v8::internal::Name*, v8::internal::Code*)
+               |          
+                --100.00%-- 0xc6a355788f1
+
+0.20^5976974^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::containingBlock() const
+            |
+            --- WebCore::RenderObject::containingBlock() const
+
+0.20^5926952^HTMLParserThrea^chrome               ^[.] v8::internal::GlobalHandles::Create(v8::internal::Object*)
+            |
+            --- v8::internal::GlobalHandles::Create(v8::internal::Object*)
+
+0.20^5880116^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseLeftHandSideExpression(bool*)
+            |
+            --- v8::internal::Parser::ParseLeftHandSideExpression(bool*)
+               |          
+                --100.00%-- 0x7fff30e9151000
+
+0.19^5834557^HTMLParserThrea^chrome               ^[.] WebCore::CachedRawResource::willSendRequest(WebCore::ResourceRequest&, WebCore::ResourceResponse const&)
+            |
+            --- WebCore::CachedRawResource::willSendRequest(WebCore::ResourceRequest&, WebCore::ResourceResponse const&)
+
+0.18^5527800^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::paint(WebCore::PaintInfo&, WebCore::LayoutPoint const&)
+            |
+            --- WebCore::RenderBlock::paint(WebCore::PaintInfo&, WebCore::LayoutPoint const&)
+
+0.18^5461500^HTMLParserThrea^chrome               ^[.] WebCore::Document::updateStyleIfNeeded()
+            |
+            --- WebCore::Document::updateStyleIfNeeded()
+
+0.18^5428693^HTMLParserThrea^libc-2.15.so         ^[.] __memset_sse2
+            |
+            --- __memset_sse2
+
+0.18^5423805^HTMLParserThrea^chrome               ^[.] WebCore::FrameView::paintContents(WebCore::GraphicsContext*, WebCore::IntRect const&)
+            |
+            --- WebCore::FrameView::paintContents(WebCore::GraphicsContext*, WebCore::IntRect const&)
+               |          
+               |--61.15%-- 0x5a000002d8
+               |          
+                --38.85%-- 0x2e00000092
+
+0.18^5363793^HTMLParserThrea^chrome               ^[.] WebCore::HTTPHeaderMap::get(WTF::AtomicString const&) const
+            |
+            --- WebCore::HTTPHeaderMap::get(WTF::AtomicString const&) const
+
+0.18^5306454^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::Scope(v8::internal::Scope*, v8::internal::ScopeType, v8::internal::Zone*)
+            |
+            --- v8::internal::Scope::Scope(v8::internal::Scope*, v8::internal::ScopeType, v8::internal::Zone*)
+
+0.18^5274362^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::bind_to(v8::internal::Label*, int)
+            |
+            --- v8::internal::Assembler::bind_to(v8::internal::Label*, int)
+               |          
+                --100.00%-- 0x3b2abd7c3000
+
+0.18^5270432^HTMLParserThrea^chrome               ^[.] SkDraw::drawPosText(char const*, unsigned long, float const*, float, int, SkPaint const&) const
+            |
+            --- SkDraw::drawPosText(char const*, unsigned long, float const*, float, int, SkPaint const&) const
+                (nil)
+               |          
+               |--55.20%-- 0x4f005200260003
+               |          
+                --44.80%-- 0x52004a00440003
+
+0.17^5196475^HTMLParserThrea^chrome               ^[.] WTF::equalNonNull(WTF::StringImpl const*, WTF::StringImpl const*)
+            |
+            --- WTF::equalNonNull(WTF::StringImpl const*, WTF::StringImpl const*)
+                0x7f7a0b975100
+
+0.17^5103842^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::convertToLayerCoords(WebCore::RenderLayer const*, WebCore::LayoutPoint&) const
+            |
+            --- WebCore::RenderLayer::convertToLayerCoords(WebCore::RenderLayer const*, WebCore::LayoutPoint&) const
+
+0.17^4967343^HTMLParserThrea^chrome               ^[.] _ZN2v88internal4ListINS0_16FuncNameInferrer4NameENS0_20ZoneAllocationPolicyEE3AddERKS3_S4_.constprop.51
+            |
+            --- _ZN2v88internal4ListINS0_16FuncNameInferrer4NameENS0_20ZoneAllocationPolicyEE3AddERKS3_S4_.constprop.51
+
+0.16^4900762^HTMLParserThrea^libc-2.15.so         ^[.] __memcmp_sse4_1
+            |
+            --- __memcmp_sse4_1
+               |          
+                --100.00%-- 0x7f7a0b975100
+
+0.15^4610114^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::GetPropertyWithCallback(v8::internal::Object*, v8::internal::Object*, v8::internal::Name*)
+            |
+            --- v8::internal::JSObject::GetPropertyWithCallback(v8::internal::Object*, v8::internal::Object*, v8::internal::Name*)
+
+0.15^4535632^HTMLParserThrea^chrome               ^[.] v8::HandleScope::HandleScope()
+            |
+            --- v8::HandleScope::HandleScope()
+
+0.15^4419717^HTMLParserThrea^chrome               ^[.] WebCore::MarkupAccumulator::appendStartTag(WebCore::Node*, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+            |
+            --- WebCore::MarkupAccumulator::appendStartTag(WebCore::Node*, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+
+0.14^4269939^HTMLParserThrea^chrome               ^[.] cssyyparse(WebCore::CSSParser*)
+            |
+            --- cssyyparse(WebCore::CSSParser*)
+               |          
+               |--56.10%-- content::PepperGraphics2DHost::Paint(SkCanvas*, gfx::Rect const&, gfx::Rect const&)
+               |          
+                --43.90%-- 0x1b800000b600
+
+0.14^4189741^HTMLParserThrea^chrome               ^[.] v8::internal::HandleScope::Extend(v8::internal::Isolate*)
+            |
+            --- v8::internal::HandleScope::Extend(v8::internal::Isolate*)
+
+0.13^3989323^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::overflowRectForPaintRejection() const
+            |
+            --- WebCore::RenderBox::overflowRectForPaintRejection() const
+
+0.12^3691476^HTMLParserThrea^libstdc++.so.6.0.16  ^[.] std::_Rb_tree_increment(std::_Rb_tree_node_base*)
+            |
+            --- std::_Rb_tree_increment(std::_Rb_tree_node_base*)
+
+0.12^3619871^HTMLParserThrea^chrome               ^[.] WebCore::ChildNodeInsertionNotifier::notifyDescendantInsertedIntoTree(WebCore::ContainerNode*)
+            |
+            --- WebCore::ChildNodeInsertionNotifier::notifyDescendantInsertedIntoTree(WebCore::ContainerNode*)
+                (nil)
+
+0.12^3610233^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::addLayoutOverflow(WebCore::LayoutRect const&)
+            |
+            --- WebCore::RenderBox::addLayoutOverflow(WebCore::LayoutRect const&)
+
+0.12^3602214^HTMLParserThrea^chrome               ^[.] v8::internal::RelocIterator::RelocIterator(v8::internal::Code*, int)
+            |
+            --- v8::internal::RelocIterator::RelocIterator(v8::internal::Code*, int)
+
+0.12^3592533^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::MigrateObject(unsigned char*, unsigned char*, int, v8::internal::AllocationSpace)
+            |
+            --- v8::internal::MarkCompactCollector::MigrateObject(unsigned char*, unsigned char*, int, v8::internal::AllocationSpace)
+
+0.12^3591487^HTMLParserThrea^chrome               ^[.] v8::internal::StringTableCleaner::VisitPointers(v8::internal::Object**, v8::internal::Object**)
+            |
+            --- v8::internal::StringTableCleaner::VisitPointers(v8::internal::Object**, v8::internal::Object**)
+
+0.12^3590966^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(v8::internal::NewSpace*, v8::internal::NewSpacePage*)
+            |
+            --- v8::internal::MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(v8::internal::NewSpace*, v8::internal::NewSpacePage*)
+
+0.12^3590884^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::ClearNonLiveReferences()
+            |
+            --- v8::internal::MarkCompactCollector::ClearNonLiveReferences()
+
+0.12^3590478^HTMLParserThrea^chrome               ^[.] void std::__introsort_loop<v8::internal::ObjectGroupConnection*, long>(v8::internal::ObjectGroupConnection*, v8::internal::ObjectGroupConnection*, long)
+            |
+            --- void std::__introsort_loop<v8::internal::ObjectGroupConnection*, long>(v8::internal::ObjectGroupConnection*, v8::internal::ObjectGroupConnection*, long)
+
+0.12^3590200^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitPropertyCell(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitPropertyCell(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.12^3589094^HTMLParserThrea^chrome               ^[.] long v8::internal::MarkCompactCollector::SweepConservatively<(v8::internal::MarkCompactCollector::SweepingParallelism)0>(v8::internal::PagedSpace*, v8::internal::FreeList*, v8::internal::Page*)
+            |
+            --- long v8::internal::MarkCompactCollector::SweepConservatively<(v8::internal::MarkCompactCollector::SweepingParallelism)0>(v8::internal::PagedSpace*, v8::internal::FreeList*, v8::internal::Page*)
+
+0.12^3589023^HTMLParserThrea^chrome               ^[.] WebCore::RenderStyle::~RenderStyle()
+            |
+            --- WebCore::RenderStyle::~RenderStyle()
+
+0.12^3588259^HTMLParserThrea^chrome               ^[.] v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::JSObject::BodyDescriptor, void>::Visit(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::JSObject::BodyDescriptor, void>::Visit(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.12^3588121^HTMLParserThrea^chrome               ^[.] v8::internal::GlobalHandles::IterateWeakRoots(v8::internal::ObjectVisitor*)
+            |
+            --- v8::internal::GlobalHandles::IterateWeakRoots(v8::internal::ObjectVisitor*)
+
+0.12^3587651^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::MarkTransitionArray(v8::internal::Heap*, v8::internal::TransitionArray*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::MarkTransitionArray(v8::internal::Heap*, v8::internal::TransitionArray*)
+
+0.12^3585248^HTMLParserThrea^chrome               ^[.] v8::internal::SequentialStringKey<unsigned char>::Hash()
+            |
+            --- v8::internal::SequentialStringKey<unsigned char>::Hash()
+
+0.12^3584278^HTMLParserThrea^chrome               ^[.] v8::internal::ChoiceNode::Emit(v8::internal::RegExpCompiler*, v8::internal::Trace*)
+            |
+            --- v8::internal::ChoiceNode::Emit(v8::internal::RegExpCompiler*, v8::internal::Trace*)
+                0x3b2abd5271d8
+
+0.12^3583348^HTMLParserThrea^chrome               ^[.] v8::internal::LoadIC::UpdateMonomorphicIC(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::Code>, v8::internal::Handle<v8::internal::String>, v8::internal::StrictModeFlag)
+            |
+            --- v8::internal::LoadIC::UpdateMonomorphicIC(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::Code>, v8::internal::Handle<v8::internal::String>, v8::internal::StrictModeFlag)
+                0xc6a34dd2791
+
+0.12^3582808^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseIfStatement(v8::internal::ZoneList<v8::internal::Handle<v8::internal::String> >*, bool*)
+            |
+            --- v8::internal::Parser::ParseIfStatement(v8::internal::ZoneList<v8::internal::Handle<v8::internal::String> >*, bool*)
+                0x3b2abdf3f010
+
+0.12^3582142^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::addOverflowFromInlineChildren()
+            |
+            --- WebCore::RenderBlock::addOverflowFromInlineChildren()
+
+0.12^3581759^HTMLParserThrea^chrome               ^[.] _ZN7WebCore10RenderText29computePreferredLogicalWidthsEfRN3WTF7HashSetIPKNS_14SimpleFontDataENS1_7PtrHashIS5_EENS1_10HashTraitsIS5_EEEERNS_13GlyphOverflowE.part.184
+            |
+            --- _ZN7WebCore10RenderText29computePreferredLogicalWidthsEfRN3WTF7HashSetIPKNS_14SimpleFontDataENS1_7PtrHashIS5_EENS1_10HashTraitsIS5_EEEERNS_13GlyphOverflowE.part.184
+
+0.12^3581038^HTMLParserThrea^chrome               ^[.] WTF::String WebCore::v8StringToWebCoreString<WTF::String>(v8::Handle<v8::String>, WebCore::ExternalMode)
+            |
+            --- WTF::String WebCore::v8StringToWebCoreString<WTF::String>(v8::Handle<v8::String>, WebCore::ExternalMode)
+                0xc6a34d43fe9
+
+0.12^3581023^HTMLParserThrea^chrome               ^[.] WebCore::RenderInline::marginLeft() const
+            |
+            --- WebCore::RenderInline::marginLeft() const
+
+0.12^3580522^HTMLParserThrea^chrome               ^[.] WTF::Vector<WebCore::GraphicsContext::DeferredSaveState, 0ul>::expandCapacity(unsigned long)
+            |
+            --- WTF::Vector<WebCore::GraphicsContext::DeferredSaveState, 0ul>::expandCapacity(unsigned long)
+                0x100017718028
+
+0.12^3580338^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_MaterializeRegExpLiteral(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_MaterializeRegExpLiteral(int, v8::internal::Object**, v8::internal::Isolate*)
+
+0.12^3579940^HTMLParserThrea^chrome               ^[.] v8::internal::FunctionLiteral::AllowsLazyCompilation()
+            |
+            --- v8::internal::FunctionLiteral::AllowsLazyCompilation()
+
+0.12^3579555^HTMLParserThrea^chrome               ^[.] WebCore::XMLHttpRequest::isAllowedHTTPMethod(WTF::String const&)
+            |
+            --- WebCore::XMLHttpRequest::isAllowedHTTPMethod(WTF::String const&)
+                0x7f7a0b900000
+
+0.12^3579477^HTMLParserThrea^chrome               ^[.] ppapi::PpapiGlobals::Get()
+            |
+            --- ppapi::PpapiGlobals::Get()
+
+0.12^3579477^HTMLParserThrea^chrome               ^[.] v8::internal::StaticVisitorBase::GetVisitorId(int, int)
+            |
+            --- v8::internal::StaticVisitorBase::GetVisitorId(int, int)
+
+0.12^3578703^HTMLParserThrea^chrome               ^[.] v8::internal::LoadIC::ComputePolymorphicIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, int, v8::internal::Handle<v8::internal::Name>, v8::internal::StrictModeFlag)
+            |
+            --- v8::internal::LoadIC::ComputePolymorphicIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, int, v8::internal::Handle<v8::internal::Name>, v8::internal::StrictModeFlag)
+                0xc6a354cfa21
+
+0.12^3578592^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::BuildObjectLiteralConstantProperties(v8::internal::ZoneList<v8::internal::ObjectLiteralProperty*>*, v8::internal::Handle<v8::internal::FixedArray>, bool*, bool*, int*, bool*)
+            |
+            --- v8::internal::Parser::BuildObjectLiteralConstantProperties(v8::internal::ZoneList<v8::internal::ObjectLiteralProperty*>*, v8::internal::Handle<v8::internal::FixedArray>, bool*, bool*, int*, bool*)
+
+0.12^3578004^HTMLParserThrea^chrome               ^[.] v8::internal::StubCache::ComputePolymorphicLoadIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, int, v8::internal::Handle<v8::internal::Name>)
+            |
+            --- v8::internal::StubCache::ComputePolymorphicLoadIC(v8::internal::List<v8::internal::Handle<v8::internal::Map>, v8::internal::FreeStoreAllocationPolicy>*, v8::internal::List<v8::internal::Handle<v8::internal::Code>, v8::internal::FreeStoreAllocationPolicy>*, int, v8::internal::Handle<v8::internal::Name>)
+                0xc6a3444c2a9
+
+0.12^3577719^HTMLParserThrea^chrome               ^[.] v8::internal::BoyerMooreLookahead::FindBestInterval(int, int, int*, int*)
+            |
+            --- v8::internal::BoyerMooreLookahead::FindBestInterval(int, int, int*, int*)
+
+0.12^3577405^HTMLParserThrea^chrome               ^[.] WebCore::RenderBoxModelObject::continuation() const
+            |
+            --- WebCore::RenderBoxModelObject::continuation() const
+
+0.12^3576942^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::hasPercentHeightDescendant(WebCore::RenderBox*)
+            |
+            --- WebCore::RenderBlock::hasPercentHeightDescendant(WebCore::RenderBox*)
+
+0.12^3575468^HTMLParserThrea^chrome               ^[.] v8::internal::VariableProxy* v8::internal::Scope::NewUnresolved<v8::internal::AstConstructionVisitor>(v8::internal::AstNodeFactory<v8::internal::AstConstructionVisitor>*, v8::internal::Handle<v8::internal::String>, v8::internal::Interface*, int)
+            |
+            --- v8::internal::VariableProxy* v8::internal::Scope::NewUnresolved<v8::internal::AstConstructionVisitor>(v8::internal::AstNodeFactory<v8::internal::AstConstructionVisitor>*, v8::internal::Handle<v8::internal::String>, v8::internal::Interface*, int)
+                0x7fff30e91928
+
+0.12^3575428^HTMLParserThrea^chrome               ^[.] WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>::createBidiRunsForLine(WebCore::InlineIterator const&, WebCore::VisualDirectionOverride, bool)
+            |
+            --- WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>::createBidiRunsForLine(WebCore::InlineIterator const&, WebCore::VisualDirectionOverride, bool)
+                0x3b2abd843120
+
+0.12^3574896^HTMLParserThrea^chrome               ^[.] v8::internal::FixedArray::set(int, v8::internal::Object*, v8::internal::WriteBarrierMode)
+            |
+            --- v8::internal::FixedArray::set(int, v8::internal::Object*, v8::internal::WriteBarrierMode)
+
+0.12^3574498^HTMLParserThrea^chrome               ^[.] v8::internal::String::Equals(v8::internal::String*)
+            |
+            --- v8::internal::String::Equals(v8::internal::String*)
+
+0.12^3574405^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::ExpressionContext::IsTest() const
+            |
+            --- v8::internal::FullCodeGenerator::ExpressionContext::IsTest() const
+
+0.12^3574327^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::ResolveVariable(v8::internal::CompilationInfo*, v8::internal::VariableProxy*, v8::internal::AstNodeFactory<v8::internal::AstNullVisitor>*)
+            |
+            --- v8::internal::Scope::ResolveVariable(v8::internal::CompilationInfo*, v8::internal::VariableProxy*, v8::internal::AstNodeFactory<v8::internal::AstNullVisitor>*)
+
+0.12^3573789^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateMap(v8::internal::InstanceType, int, v8::internal::ElementsKind)
+            |
+            --- v8::internal::Heap::AllocateMap(v8::internal::InstanceType, int, v8::internal::ElementsKind)
+
+0.12^3573631^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime::CreateArrayLiteralBoilerplate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::FixedArray>, v8::internal::Handle<v8::internal::FixedArray>)
+            |
+            --- v8::internal::Runtime::CreateArrayLiteralBoilerplate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::FixedArray>, v8::internal::Handle<v8::internal::FixedArray>)
+
+0.12^3573214^HTMLParserThrea^chrome               ^[.] WTF::HashTableIterator<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::HashTraits<WebCore::Cache
+            |
+            --- WTF::HashTableIterator<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::HashTraits<WebCore::CachedResource*> > WTF::HashTable<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::HashTraits<WebCore::CachedResource*> >::find<WTF::IdentityHashTranslator<WTF::PtrHash<WebCore::CachedResource*> >, WebCore::CachedResource*>(WebCore::CachedResource* const&)
+
+0.12^3573010^HTMLParserThrea^chrome               ^[.] Pickle::Resize(unsigned long)
+            |
+            --- Pickle::Resize(unsigned long)
+
+0.12^3572646^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::floatingObjects() const
+            |
+            --- WebCore::RenderBlock::floatingObjects() const
+
+0.12^3572547^HTMLParserThrea^chrome               ^[.] WebCore::LiveNodeListBase::item(unsigned int) const
+            |
+            --- WebCore::LiveNodeListBase::item(unsigned int) const
+
+0.12^3572456^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::computeLogicalHeight(WebCore::LayoutUnit, WebCore::LayoutUnit, WebCore::RenderBox::LogicalExtentComputedValues&) const
+            |
+            --- WebCore::RenderBox::computeLogicalHeight(WebCore::LayoutUnit, WebCore::LayoutUnit, WebCore::RenderBox::LogicalExtentComputedValues&) const
+                (nil)
+
+0.12^3572406^HTMLParserThrea^chrome               ^[.] WebCore::ScriptController::callFunction(v8::Handle<v8::Function>, v8::Handle<v8::Object>, int, v8::Handle<v8::Value>*)
+            |
+            --- WebCore::ScriptController::callFunction(v8::Handle<v8::Function>, v8::Handle<v8::Object>, int, v8::Handle<v8::Value>*)
+
+0.12^3571839^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<WTF::AtomicStringImpl*, WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleDa
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<WTF::AtomicStringImpl*, WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::HashTraits<WTF::AtomicStringImpl*> > > WTF::HashTable<WTF::AtomicStringImpl*, WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WTF::AtomicStringImpl*, WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::HashTraits<WTF::AtomicStringImpl*> >::add<WTF::HashMapTranslator<WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WTF::LinkedStack<WebCore::RuleData> > > >, WTF::PtrHash<WTF::AtomicStringImpl*> >, WTF::AtomicStringImpl*, WTF::PassOwnPtr<WTF::LinkedStack<WebCore::RuleData> > >(WTF::AtomicStringImpl* const&, WTF::PassOwnPtr<WTF::LinkedStack<WebCore::RuleData> > const&)
+
+0.12^3571626^HTMLParserThrea^chrome               ^[.] v8::internal::Factory::ObjectLiteralMapFromCache(v8::internal::Handle<v8::internal::Context>, v8::internal::Handle<v8::internal::FixedArray>)
+            |
+            --- v8::internal::Factory::ObjectLiteralMapFromCache(v8::internal::Handle<v8::internal::Context>, v8::internal::Handle<v8::internal::FixedArray>)
+
+0.12^3571455^HTMLParserThrea^chrome               ^[.] WebCore::RenderView::intervalArena()
+            |
+            --- WebCore::RenderView::intervalArena()
+
+0.12^3571267^HTMLParserThrea^chrome               ^[.] v8::internal::VarAndOrder::Compare(v8::internal::VarAndOrder const*, v8::internal::VarAndOrder const*)
+            |
+            --- v8::internal::VarAndOrder::Compare(v8::internal::VarAndOrder const*, v8::internal::VarAndOrder const*)
+
+0.12^3570914^HTMLParserThrea^chrome               ^[.] bool WebCore::shouldInvalidateNodeListCachesForAttr<2u>(unsigned int const*, WebCore::QualifiedName const&)
+            |
+            --- bool WebCore::shouldInvalidateNodeListCachesForAttr<2u>(unsigned int const*, WebCore::QualifiedName const&)
+
+0.12^3570523^HTMLParserThrea^chrome               ^[.] v8::internal::CodeCacheHashTableKey::IsMatch(v8::internal::Object*)
+            |
+            --- v8::internal::CodeCacheHashTableKey::IsMatch(v8::internal::Object*)
+
+0.12^3570308^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::hasRelativeDimensions() const
+            |
+            --- WebCore::RenderBox::hasRelativeDimensions() const
+
+0.12^3570109^HTMLParserThrea^chrome               ^[.] WTF::HashMap<WTF::AtomicStringImpl*, WTF::OwnPtr<WebCore::RuleData>, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WebCore::RuleData> > >::get(WTF::AtomicStringImpl* const&) const
+            |
+            --- WTF::HashMap<WTF::AtomicStringImpl*, WTF::OwnPtr<WebCore::RuleData>, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::OwnPtr<WebCore::RuleData> > >::get(WTF::AtomicStringImpl* const&) const
+                void WTF::Vector<WebCore::StyleRule*, 64ul>::appendSlowCase<WebCore::StyleRule*>(WebCore::StyleRule* const&)
+
+0.12^3569976^HTMLParserThrea^chrome               ^[.] v8::internal::IC::address() const
+            |
+            --- v8::internal::IC::address() const
+
+0.12^3569735^HTMLParserThrea^chrome               ^[.] v8::internal::StringTable::LookupUtf8String(v8::internal::Vector<char const>, v8::internal::Object**)
+            |
+            --- v8::internal::StringTable::LookupUtf8String(v8::internal::Vector<char const>, v8::internal::Object**)
+
+0.12^3569690^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::updateLogicalWidthForAlignment(WebCore::ETextAlign const&, WebCore::BidiRun*, float&, float&, float&, int)
+            |
+            --- WebCore::RenderBlock::updateLogicalWidthForAlignment(WebCore::ETextAlign const&, WebCore::BidiRun*, float&, float&, float&, int)
+
+0.12^3569632^HTMLParserThrea^chrome               ^[.] content::GetContentClient()
+            |
+            --- content::GetContentClient()
+                WebCore::Node::invalidateNodeListCachesInAncestors(WebCore::QualifiedName const*, WebCore::Element*)
+
+0.12^3569389^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::SkipSingleLineComment()
+            |
+            --- v8::internal::Scanner::SkipSingleLineComment()
+
+0.12^3568302^HTMLParserThrea^chrome               ^[.] v8::internal::PagedSpace::AllocateRaw(int)
+            |
+            --- v8::internal::PagedSpace::AllocateRaw(int)
+                0x5d00001000
+
+0.12^3567675^HTMLParserThrea^chrome               ^[.] v8::internal::HashTable<v8::internal::NameDictionaryShape, v8::internal::Name*>::FindEntry(v8::internal::Isolate*, v8::internal::Name*)
+            |
+            --- v8::internal::HashTable<v8::internal::NameDictionaryShape, v8::internal::Name*>::FindEntry(v8::internal::Isolate*, v8::internal::Name*)
+
+0.12^3567487^HTMLParserThrea^chrome               ^[.] WebCore::ImageLoader::updateFromElement()
+            |
+            --- WebCore::ImageLoader::updateFromElement()
+
+0.12^3567473^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::localToContainerQuad(WebCore::FloatQuad const&, WebCore::RenderLayerModelObject const*, unsigned int, bool*) const
+            |
+            --- WebCore::RenderObject::localToContainerQuad(WebCore::FloatQuad const&, WebCore::RenderLayerModelObject const*, unsigned int, bool*) const
+
+0.12^3567274^HTMLParserThrea^chrome               ^[.] webCoreInitializeScriptWrappableForInterface(WebCore::DocumentFragment*)
+            |
+            --- webCoreInitializeScriptWrappableForInterface(WebCore::DocumentFragment*)
+
+0.12^3567005^HTMLParserThrea^chrome               ^[.] WebCore::Frame::settings() const
+            |
+            --- WebCore::Frame::settings() const
+
+0.12^3566636^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseIdentifier(bool*)
+            |
+            --- v8::internal::Parser::ParseIdentifier(bool*)
+                0x7fff30e90bf000
+
+0.12^3566359^HTMLParserThrea^chrome               ^[.] WebCore::CachedImage::didAddClient(WebCore::CachedResourceClient*)
+            |
+            --- WebCore::CachedImage::didAddClient(WebCore::CachedResourceClient*)
+
+0.12^3566119^HTMLParserThrea^chrome               ^[.] WebCore::RenderStyle::diff(WebCore::RenderStyle const*, unsigned int&) const
+            |
+            --- WebCore::RenderStyle::diff(WebCore::RenderStyle const*, unsigned int&) const
+
+0.12^3565713^HTMLParserThrea^chrome               ^[.] WebCore::LiveNodeListBase::invalidateCache() const
+            |
+            --- WebCore::LiveNodeListBase::invalidateCache() const
+
+0.12^3565698^HTMLParserThrea^chrome               ^[.] WebCore::SelectorChecker::SelectorChecker(WebCore::Document*, WebCore::SelectorChecker::Mode)
+            |
+            --- WebCore::SelectorChecker::SelectorChecker(WebCore::Document*, WebCore::SelectorChecker::Mode)
+
+0.12^3565455^HTMLParserThrea^chrome               ^[.] WebCore::HTMLTreeBuilder::processEndTag(WebCore::AtomicHTMLToken*)
+            |
+            --- WebCore::HTMLTreeBuilder::processEndTag(WebCore::AtomicHTMLToken*)
+                0x7f7a11e52eb0
+
+0.12^3565182^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::arithmetic_op_32(unsigned char, v8::internal::Register, v8::internal::Register)
+            |
+            --- v8::internal::Assembler::arithmetic_op_32(unsigned char, v8::internal::Register, v8::internal::Register)
+
+0.12^3565074^HTMLParserThrea^chrome               ^[.] v8::internal::TranslationBuffer::Add(int, v8::internal::Zone*)
+            |
+            --- v8::internal::TranslationBuffer::Add(int, v8::internal::Zone*)
+
+0.12^3565037^HTMLParserThrea^chrome               ^[.] WebCore::Node::parentOrShadowHostElement() const
+            |
+            --- WebCore::Node::parentOrShadowHostElement() const
+
+0.12^3564789^HTMLParserThrea^chrome               ^[.] void v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::StructBodyDescriptor, void>::VisitSpecialized<56>(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- void v8::internal::FlexibleBodyVisitor<v8::internal::MarkCompactMarkingVisitor, v8::internal::StructBodyDescriptor, void>::VisitSpecialized<56>(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.12^3564677^HTMLParserThrea^chrome               ^[.] v8::internal::Map::RawCopy(int)
+            |
+            --- v8::internal::Map::RawCopy(int)
+
+0.12^3563819^HTMLParserThrea^chrome               ^[.] _ZN2v88internal8LCodeGen16WriteTranslationEPNS0_12LEnvironmentEPNS0_11TranslationE.constprop.384
+            |
+            --- _ZN2v88internal8LCodeGen16WriteTranslationEPNS0_12LEnvironmentEPNS0_11TranslationE.constprop.384
+
+0.12^3563122^HTMLParserThrea^chrome               ^[.] WebCore::MatchedProperties::~MatchedProperties()
+            |
+            --- WebCore::MatchedProperties::~MatchedProperties()
+                0x3b2abdb056b8
+                0x1e83176feba8
+
+0.12^3562575^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::MakeCode(v8::internal::CompilationInfo*)
+            |
+            --- v8::internal::FullCodeGenerator::MakeCode(v8::internal::CompilationInfo*)
+
+0.12^3561995^HTMLParserThrea^chrome               ^[.] v8::internal::BaseLoadStoreStubCompiler::GetICCode(v8::internal::Code::Kind, v8::internal::Code::StubType, v8::internal::Handle<v8::internal::Name>, v8::internal::InlineCacheState)
+            |
+            --- v8::internal::BaseLoadStoreStubCompiler::GetICCode(v8::internal::Code::Kind, v8::internal::Code::StubType, v8::internal::Handle<v8::internal::Name>, v8::internal::InlineCacheState)
+                0x9d2ae798179
+
+0.12^3561893^HTMLParserThrea^chrome               ^[.] int v8::internal::FlexibleBodyVisitor<v8::internal::NewSpaceScavenger, v8::internal::JSObject::BodyDescriptor, int>::VisitSpecialized<24>(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- int v8::internal::FlexibleBodyVisitor<v8::internal::NewSpaceScavenger, v8::internal::JSObject::BodyDescriptor, int>::VisitSpecialized<24>(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.12^3561621^HTMLParserThrea^chrome               ^[.] WTF::AtomicString::addSlowCase(WTF::StringImpl*)
+            |
+            --- WTF::AtomicString::addSlowCase(WTF::StringImpl*)
+
+0.12^3561375^HTMLParserThrea^chrome               ^[.] WebCore::StyleResolver::styleForElement(WebCore::Element*, WebCore::RenderStyle*, WebCore::StyleSharingBehavior, WebCore::RuleMatchingBehavior, WebCore::RenderRegion*)
+            |
+            --- WebCore::StyleResolver::styleForElement(WebCore::Element*, WebCore::RenderStyle*, WebCore::StyleSharingBehavior, WebCore::RuleMatchingBehavior, WebCore::RenderRegion*)
+
+0.12^3561200^HTMLParserThrea^chrome               ^[.] v8::internal::Builtins::LoadIC_PreMonomorphic()
+            |
+            --- v8::internal::Builtins::LoadIC_PreMonomorphic()
+                0x29f9884600a9
+
+0.12^3561181^HTMLParserThrea^chrome               ^[.] WebCore::FloatPoint::FloatPoint(WebCore::LayoutPoint const&)
+            |
+            --- WebCore::FloatPoint::FloatPoint(WebCore::LayoutPoint const&)
+                WebCore::RenderBox::clippedOverflowRectForRepaint(WebCore::RenderLayerModelObject const*) const
+                WebCore::RenderBox::contentWidth() const
+
+0.12^3561117^HTMLParserThrea^chrome               ^[.] v8::internal::LoadStubCompiler::registers()
+            |
+            --- v8::internal::LoadStubCompiler::registers()
+                0xc6a34d0ef91
+
+0.12^3561066^HTMLParserThrea^chrome               ^[.] WebCore::BorderData::BorderData()
+            |
+            --- WebCore::BorderData::BorderData()
+
+0.12^3560598^HTMLParserThrea^chrome               ^[.] WebCore::HTMLElement::parseAttribute(WebCore::QualifiedName const&, WTF::AtomicString const&)
+            |
+            --- WebCore::HTMLElement::parseAttribute(WebCore::QualifiedName const&, WTF::AtomicString const&)
+
+0.12^3560313^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::createLineBoxesFromBidiRuns(WebCore::BidiRunList<WebCore::BidiRun>&, WebCore::InlineIterator const&, WebCore::LineInfo&, WebCore::VerticalPositionCache&, WebCore::BidiRun*, WTF::Vector<WebCore::WordMeasurement, 64ul>&)
+            |
+            --- WebCore::RenderBlock::createLineBoxesFromBidiRuns(WebCore::BidiRunList<WebCore::BidiRun>&, WebCore::InlineIterator const&, WebCore::LineInfo&, WebCore::VerticalPositionCache&, WebCore::BidiRun*, WTF::Vector<WebCore::WordMeasurement, 64ul>&)
+                0x3b2abd54deb0
+
+0.12^3560066^HTMLParserThrea^chrome               ^[.] void WebCore::StringCache::setReturnValueFromString<v8::PropertyCallbackInfo<v8::Value> >(v8::PropertyCallbackInfo<v8::Value> const&, WTF::StringImpl*, v8::Isolate*)
+            |
+            --- void WebCore::StringCache::setReturnValueFromString<v8::PropertyCallbackInfo<v8::Value> >(v8::PropertyCallbackInfo<v8::Value> const&, WTF::StringImpl*, v8::Isolate*)
+                0xc6a35a2a0c9
+                v8::internal::JSReceiver::LocalLookup(v8::internal::Name*, v8::internal::LookupResult*, bool)
+
+0.12^3560037^HTMLParserThrea^chrome               ^[.] v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitSharedFunctionInfo(v8::internal::Map*, v8::internal::HeapObject*)
+            |
+            --- v8::internal::StaticMarkingVisitor<v8::internal::MarkCompactMarkingVisitor>::VisitSharedFunctionInfo(v8::internal::Map*, v8::internal::HeapObject*)
+
+0.12^3559792^HTMLParserThrea^chrome               ^[.] WebCore::ComposedShadowTreeWalker::traverseParent(WebCore::Node const*, WebCore::NodeRenderingTraversal::ParentDetails*) const
+            |
+            --- WebCore::ComposedShadowTreeWalker::traverseParent(WebCore::Node const*, WebCore::NodeRenderingTraversal::ParentDetails*) const
+
+0.12^3559628^HTMLParserThrea^chrome               ^[.] WebCore::Element::rebuildPresentationAttributeStyle()
+            |
+            --- WebCore::Element::rebuildPresentationAttributeStyle()
+                0x1e83176beb48
+
+0.12^3558753^HTMLParserThrea^chrome               ^[.] v8::internal::LoadStubCompiler::JitEvent(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::Code>)
+            |
+            --- v8::internal::LoadStubCompiler::JitEvent(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::Code>)
+                0xc6a354902f1
+
+0.12^3557965^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::lea(v8::internal::Register, v8::internal::Operand const&)
+            |
+            --- v8::internal::Assembler::lea(v8::internal::Register, v8::internal::Operand const&)
+
+0.12^3557486^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::updateShapeInsideInfoAfterStyleChange(WebCore::ShapeValue const*, WebCore::ShapeValue const*)
+            |
+            --- WebCore::RenderBlock::updateShapeInsideInfoAfterStyleChange(WebCore::ShapeValue const*, WebCore::ShapeValue const*)
+
+0.12^3556781^HTMLParserThrea^libc-2.15.so         ^[.] __strlen_sse2_pminub
+            |
+            --- __strlen_sse2_pminub
+
+0.12^3556662^HTMLParserThrea^chrome               ^[.] WebCore::ElementRuleCollector::collectMatchingRulesForList(WebCore::RuleData const*, WebCore::MatchRequest const&, WebCore::RuleRange&)
+            |
+            --- WebCore::ElementRuleCollector::collectMatchingRulesForList(WebCore::RuleData const*, WebCore::MatchRequest const&, WebCore::RuleRange&)
+
+0.12^3556305^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::Scavenge()
+            |
+            --- v8::internal::Heap::Scavenge()
+
+0.12^3556183^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::linkToEndLineIfNeeded(WebCore::LineLayoutState&)
+            |
+            --- WebCore::RenderBlock::linkToEndLineIfNeeded(WebCore::LineLayoutState&)
+
+0.12^3555975^HTMLParserThrea^chrome               ^[.] WebCore::SharedStyleFinder::locateCousinList(WebCore::Element*, unsigned int&) const
+            |
+            --- WebCore::SharedStyleFinder::locateCousinList(WebCore::Element*, unsigned int&) const
+                0x176da48800000000
+
+0.12^3555563^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::PrepareForBailoutForId(v8::internal::BailoutId, v8::internal::FullCodeGenerator::State)
+            |
+            --- v8::internal::FullCodeGenerator::PrepareForBailoutForId(v8::internal::BailoutId, v8::internal::FullCodeGenerator::State)
+
+0.12^3555363^HTMLParserThrea^chrome               ^[.] v8::internal::PropertyCallbackArguments::Call(v8::Handle<v8::Value> (*)(v8::Local<v8::String>, v8::AccessorInfo const&), v8::Local<v8::String>)
+            |
+            --- v8::internal::PropertyCallbackArguments::Call(v8::Handle<v8::Value> (*)(v8::Local<v8::String>, v8::AccessorInfo const&), v8::Local<v8::String>)
+                0x7f7a0b900000
+
+0.12^3554978^HTMLParserThrea^chrome               ^[.] v8::internal::Object::Lookup(v8::internal::Name*, v8::internal::LookupResult*)
+            |
+            --- v8::internal::Object::Lookup(v8::internal::Name*, v8::internal::LookupResult*)
+
+0.12^3554874^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateRawOneByteString(int, v8::internal::PretenureFlag)
+            |
+            --- v8::internal::Heap::AllocateRawOneByteString(int, v8::internal::PretenureFlag)
+
+0.12^3554475^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::styleWillChange(WebCore::StyleDifference, WebCore::RenderStyle const*)
+            |
+            --- WebCore::RenderBlock::styleWillChange(WebCore::StyleDifference, WebCore::RenderStyle const*)
+
+0.12^3553383^HTMLParserThrea^chrome               ^[.] base::Time::Now()
+            |
+            --- base::Time::Now()
+
+0.12^3553107^HTMLParserThrea^chrome               ^[.] WebCore::ElementRuleCollector::~ElementRuleCollector()
+            |
+            --- WebCore::ElementRuleCollector::~ElementRuleCollector()
+
+0.12^3550744^HTMLParserThrea^chrome               ^[.] net::HttpResponseHeaders::FindHeader(unsigned long, base::BasicStringPiece<std::string> const&) const
+            |
+            --- net::HttpResponseHeaders::FindHeader(unsigned long, base::BasicStringPiece<std::string> const&) const
+
+0.12^3550703^HTMLParserThrea^chrome               ^[.] WebCore::NodeRenderingContext::createRendererForElementIfNeeded()
+            |
+            --- WebCore::NodeRenderingContext::createRendererForElementIfNeeded()
+
+0.12^3550144^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::ScavengeObject(v8::internal::HeapObject**, v8::internal::HeapObject*)
+            |
+            --- v8::internal::Heap::ScavengeObject(v8::internal::HeapObject**, v8::internal::HeapObject*)
+
+0.12^3550090^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayerModelObject::styleDidChange(WebCore::StyleDifference, WebCore::RenderStyle const*)
+            |
+            --- WebCore::RenderLayerModelObject::styleDidChange(WebCore::StyleDifference, WebCore::RenderStyle const*)
+
+0.12^3549513^HTMLParserThrea^chrome               ^[.] std::_Rb_tree<extensions::ChromeV8Context*, extensions::ChromeV8Context*, std::_Identity<extensions::ChromeV8Context*>, std::less<extensions::ChromeV8Context*>, std::allocator<extensions::ChromeV8Context*> >::_M_insert_unique(extensions::ChromeV8Context* const&)
+            |
+            --- std::_Rb_tree<extensions::ChromeV8Context*, extensions::ChromeV8Context*, std::_Identity<extensions::ChromeV8Context*>, std::less<extensions::ChromeV8Context*>, std::allocator<extensions::ChromeV8Context*> >::_M_insert_unique(extensions::ChromeV8Context* const&)
+
+0.12^3549422^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::clearFloats()
+            |
+            --- WebCore::RenderBlock::clearFloats()
+
+0.12^3548487^HTMLParserThrea^chrome               ^[.] WebCore::RenderReplaced::computeAspectRatioInformationForRenderBox(WebCore::RenderBox*, WebCore::FloatSize&, double&, bool&) const
+            |
+            --- WebCore::RenderReplaced::computeAspectRatioInformationForRenderBox(WebCore::RenderBox*, WebCore::FloatSize&, double&, bool&) const
+                0x3b2abdce1ed8
+
+0.12^3545724^HTMLParserThrea^chrome               ^[.] WebCore::SVGRenderSupport::styleChanged(WebCore::RenderObject*)
+            |
+            --- WebCore::SVGRenderSupport::styleChanged(WebCore::RenderObject*)
+
+0.12^3545370^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::AllocateNonParameterLocals()
+            |
+            --- v8::internal::Scope::AllocateNonParameterLocals()
+                0x700000007
+
+0.12^3545160^HTMLParserThrea^chrome               ^[.] WebCore::Element::hasFlagsSetDuringStylingOfChildren() const
+            |
+            --- WebCore::Element::hasFlagsSetDuringStylingOfChildren() const
+
+0.12^3543621^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::visualOverflowRectForPropagation(WebCore::RenderStyle*) const
+            |
+            --- WebCore::RenderBox::visualOverflowRectForPropagation(WebCore::RenderStyle*) const
+
+0.12^3542672^HTMLParserThrea^libm-2.15.so         ^[.] __ceilf_sse41
+            |
+            --- __ceilf_sse41
+
+0.12^3542332^HTMLParserThrea^chrome               ^[.] v8::internal::CallStubCompiler::CompileHandlerFrontend(v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::Name>, v8::internal::CheckType, v8::internal::Label*)
+            |
+            --- v8::internal::CallStubCompiler::CompileHandlerFrontend(v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::Name>, v8::internal::CheckType, v8::internal::Label*)
+                0x7f7a11d17f90
+
+0.12^3542050^HTMLParserThrea^chrome               ^[.] v8::internal::MarkCompactCollector::RecordRelocSlot(v8::internal::RelocInfo*, v8::internal::Object*)
+            |
+            --- v8::internal::MarkCompactCollector::RecordRelocSlot(v8::internal::RelocInfo*, v8::internal::Object*)
+
+0.12^3540984^HTMLParserThrea^chrome               ^[.] v8::internal::RegExpMacroAssemblerX64::CheckSpecialCharacterClass(unsigned short, v8::internal::Label*)
+            |
+            --- v8::internal::RegExpMacroAssemblerX64::CheckSpecialCharacterClass(unsigned short, v8::internal::Label*)
+
+0.12^3537796^HTMLParserThrea^chrome               ^[.] SkBitmapProcState::chooseProcs(SkMatrix const&, SkPaint const&)
+            |
+            --- SkBitmapProcState::chooseProcs(SkMatrix const&, SkPaint const&)
+                0x3f80000041400000
+
+0.12^3537246^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::CreateCode(v8::internal::CodeDesc const&, unsigned int, v8::internal::Handle<v8::internal::Object>, bool, bool)
+            |
+            --- v8::internal::Heap::CreateCode(v8::internal::CodeDesc const&, unsigned int, v8::internal::Handle<v8::internal::Object>, bool, bool)
+                0x8f00001000
+
+0.12^3535475^HTMLParserThrea^chrome               ^[.] WebCore::Element::getAttribute(WebCore::QualifiedName const&) const
+            |
+            --- WebCore::Element::getAttribute(WebCore::QualifiedName const&) const
+
+0.12^3534788^HTMLParserThrea^chrome               ^[.] v8::internal::ToBooleanIC_Miss(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::ToBooleanIC_Miss(int, v8::internal::Object**, v8::internal::Isolate*)
+
+0.12^3534268^HTMLParserThrea^chrome               ^[.] WebCore::FormElementKey::deref() const
+            |
+            --- WebCore::FormElementKey::deref() const
+
+0.12^3533584^HTMLParserThrea^chrome               ^[.] std::basic_string<unsigned short, base::string16_char_traits, std::allocator<unsigned short> >::assign(std::basic_string<unsigned short, base::string16_char_traits, std::allocator<unsigned short> > const&)
+            |
+            --- std::basic_string<unsigned short, base::string16_char_traits, std::allocator<unsigned short> >::assign(std::basic_string<unsigned short, base::string16_char_traits, std::allocator<unsigned short> > const&)
+                0x33d2a411c441
+
+0.12^3533503^HTMLParserThrea^chrome               ^[.] void v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::EvacuateObject<(v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::ObjectContents)1, (v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::SizeRestriction)0, 8>(v8::internal::Map*, v8::internal::HeapObject**, v8::internal::HeapObject*, int)
+            |
+            --- void v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::EvacuateObject<(v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::ObjectContents)1, (v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::SizeRestriction)0, 8>(v8::internal::Map*, v8::internal::HeapObject**, v8::internal::HeapObject*, int)
+
+0.12^3532801^HTMLParserThrea^chrome               ^[.] WebCore::HTMLConstructionSite::indexOfFirstUnopenFormattingElement(unsigned int&) const
+            |
+            --- WebCore::HTMLConstructionSite::indexOfFirstUnopenFormattingElement(unsigned int&) const
+
+0.12^3531175^HTMLParserThrea^chrome               ^[.] v8::internal::StubCache::ComputeLoadNonexistent(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::JSObject>)
+            |
+            --- v8::internal::StubCache::ComputeLoadNonexistent(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::JSObject>)
+
+0.12^3529077^HTMLParserThrea^chrome               ^[.] WTF::currentTime()
+            |
+            --- WTF::currentTime()
+
+0.12^3527952^HTMLParserThrea^chrome               ^[.] void WebCore::SelectorDataList::execute<false>(WebCore::Node*, WTF::Vector<WTF::RefPtr<WebCore::Node>, 0ul>&) const
+            |
+            --- void WebCore::SelectorDataList::execute<false>(WebCore::Node*, WTF::Vector<WTF::RefPtr<WebCore::Node>, 0ul>&) const
+
+0.12^3527820^HTMLParserThrea^chrome               ^[.] v8::preparser::PreParser::ParseStatement(bool*)
+            |
+            --- v8::preparser::PreParser::ParseStatement(bool*)
+                0x3b2abd78300000
+
+0.12^3526909^HTMLParserThrea^chrome               ^[.] v8::internal::MacroAssembler::LoadSmiConstant(v8::internal::Register, v8::internal::Smi*)
+            |
+            --- v8::internal::MacroAssembler::LoadSmiConstant(v8::internal::Register, v8::internal::Smi*)
+
+0.12^3526173^HTMLParserThrea^chrome               ^[.] v8::internal::MacroAssembler::CheckMap(v8::internal::Register, v8::internal::Handle<v8::internal::Map>, v8::internal::Label*, v8::internal::SmiCheckType)
+            |
+            --- v8::internal::MacroAssembler::CheckMap(v8::internal::Register, v8::internal::Handle<v8::internal::Map>, v8::internal::Label*, v8::internal::SmiCheckType)
+
+0.12^3525548^HTMLParserThrea^chrome               ^[.] v8::internal::String::SlowTryFlatten(v8::internal::PretenureFlag)
+            |
+            --- v8::internal::String::SlowTryFlatten(v8::internal::PretenureFlag)
+
+0.12^3525251^HTMLParserThrea^chrome               ^[.] WebCore::RuleSet::addChildRules(WTF::Vector<WTF::RefPtr<WebCore::StyleRuleBase>, 0ul> const&, WebCore::MediaQueryEvaluator const&, WebCore::StyleResolver*, WebCore::ContainerNode const*, bool, WebCore::AddRuleFlags)
+            |
+            --- WebCore::RuleSet::addChildRules(WTF::Vector<WTF::RefPtr<WebCore::StyleRuleBase>, 0ul> const&, WebCore::MediaQueryEvaluator const&, WebCore::StyleResolver*, WebCore::ContainerNode const*, bool, WebCore::AddRuleFlags)
+
+0.12^3524914^HTMLParserThrea^chrome               ^[.] v8::internal::PositionsRecorder::WriteRecordedPositions()
+            |
+            --- v8::internal::PositionsRecorder::WriteRecordedPositions()
+
+0.12^3524273^HTMLParserThrea^chrome               ^[.] WebCore::FrameLoader::checkLoadComplete()
+            |
+            --- WebCore::FrameLoader::checkLoadComplete()
+
+0.12^3523358^HTMLParserThrea^chrome               ^[.] v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::EvacuateFixedArray(v8::internal::Map*, v8::internal::HeapObject**, v8::internal::HeapObject*)
+            |
+            --- v8::internal::ScavengingVisitor<(v8::internal::MarksHandling)1, (v8::internal::LoggingAndProfiling)1>::EvacuateFixedArray(v8::internal::Map*, v8::internal::HeapObject**, v8::internal::HeapObject*)
+
+0.12^3522817^HTMLParserThrea^chrome               ^[.] WebCore::HTMLFrameElementBase::attach(WebCore::Node::AttachContext const&)
+            |
+            --- WebCore::HTMLFrameElementBase::attach(WebCore::Node::AttachContext const&)
+
+0.12^3522399^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime::SetObjectProperty(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, PropertyAttributes, v8::internal::StrictModeFlag)
+            |
+            --- v8::internal::Runtime::SetObjectProperty(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>, PropertyAttributes, v8::internal::StrictModeFlag)
+                0x2281c143442a
+                0x2281c143180c
+                0x2281c14310e9
+                0x2281c140e854
+                0x2281c14317b0
+                0x2281c14310e9
+                0x2281c140e854
+                0x2281c14317b0
+                0x2281c14310e9
+                0x2281c140e854
+                0x2281c142b65e
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.12^3521835^HTMLParserThrea^chrome               ^[.] v8::Object::SetAlignedPointerInInternalField(int, void*)
+            |
+            --- v8::Object::SetAlignedPointerInInternalField(int, void*)
+
+0.12^3519670^HTMLParserThrea^chrome               ^[.] v8::internal::HDeadCodeEliminationPhase::RemoveDeadInstructions()
+            |
+            --- v8::internal::HDeadCodeEliminationPhase::RemoveDeadInstructions()
+                0x7fff30e8edb0
+
+0.12^3518719^HTMLParserThrea^chrome               ^[.] v8::internal::LCodeGen::Comment(char const*, ...)
+            |
+            --- v8::internal::LCodeGen::Comment(char const*, ...)
+
+0.12^3518131^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::HasRealNamedProperty(v8::internal::Isolate*, v8::internal::Name*)
+            |
+            --- v8::internal::JSObject::HasRealNamedProperty(v8::internal::Isolate*, v8::internal::Name*)
+
+0.12^3516705^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_GetTemplateField(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_GetTemplateField(int, v8::internal::Object**, v8::internal::Isolate*)
+                0x2281c1431084
+                0x2281c140e854
+                0x2281c142b65e
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.12^3515242^HTMLParserThrea^chrome               ^[.] void WebCore::StyleResolver::applyProperties<(WebCore::StyleResolver::StyleApplicationPass)0>(WebCore::StyleResolverState&, WebCore::StylePropertySet const*, WebCore::StyleRule*, bool, bool, WebCore::PropertyWhitelistType)
+            |
+            --- void WebCore::StyleResolver::applyProperties<(WebCore::StyleResolver::StyleApplicationPass)0>(WebCore::StyleResolverState&, WebCore::StylePropertySet const*, WebCore::StyleRule*, bool, bool, WebCore::PropertyWhitelistType)
+
+0.12^3514809^HTMLParserThrea^chrome               ^[.] url_util::LowerCaseEqualsASCII(char const*, char const*, char const*)
+            |
+            --- url_util::LowerCaseEqualsASCII(char const*, char const*, char const*)
+
+0.12^3514002^HTMLParserThrea^chrome               ^[.] v8::internal::ScavengeVisitor::VisitPointer(v8::internal::Object**)
+            |
+            --- v8::internal::ScavengeVisitor::VisitPointer(v8::internal::Object**)
+
+0.12^3513688^HTMLParserThrea^chrome               ^[.] _ZN2v88internal15DescriptorArray6AppendEPNS0_10DescriptorERKNS1_16WhitenessWitnessE.constprop.537
+            |
+            --- _ZN2v88internal15DescriptorArray6AppendEPNS0_10DescriptorERKNS1_16WhitenessWitnessE.constprop.537
+
+0.12^3512883^HTMLParserThrea^chrome               ^[.] v8::internal::LoadIC_Miss(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::LoadIC_Miss(int, v8::internal::Object**, v8::internal::Isolate*)
+                0x7fff30e92040
+
+0.12^3511238^HTMLParserThrea^chrome               ^[.] WebCore::V8ScriptRunner::precompileScript(v8::Handle<v8::String>, WebCore::CachedScript*)
+            |
+            --- WebCore::V8ScriptRunner::precompileScript(v8::Handle<v8::String>, WebCore::CachedScript*)
+
+0.12^3506824^HTMLParserThrea^chrome               ^[.] v8::internal::Factory::NewAllocationSite()
+            |
+            --- v8::internal::Factory::NewAllocationSite()
+
+0.12^3504680^HTMLParserThrea^chrome               ^[.] WebCore::InputTypeNames::text()
+            |
+            --- WebCore::InputTypeNames::text()
+
+0.12^3503829^HTMLParserThrea^chrome               ^[.] v8::internal::FastElementsAccessor<v8::internal::FastPackedObjectElementsAccessor, v8::internal::ElementsKindTraits<(v8::internal::ElementsKind)2>, 8>::SetLengthWithoutNormalize(v8::internal::FixedArrayBase*, v8::internal::JSArray*, v8::internal::Object*, unsigned int)
+            |
+            --- v8::internal::FastElementsAccessor<v8::internal::FastPackedObjectElementsAccessor, v8::internal::ElementsKindTraits<(v8::internal::ElementsKind)2>, 8>::SetLengthWithoutNormalize(v8::internal::FixedArrayBase*, v8::internal::JSArray*, v8::internal::Object*, unsigned int)
+
+0.12^3500368^HTMLParserThrea^chrome               ^[.] WTF::Vector<WTF::String, 0ul>::expandCapacity(unsigned long)
+            |
+            --- WTF::Vector<WTF::String, 0ul>::expandCapacity(unsigned long)
+                0x1000
+
+0.12^3497657^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::adjustStyleDifference(WebCore::StyleDifference, unsigned int) const
+            |
+            --- WebCore::RenderObject::adjustStyleDifference(WebCore::StyleDifference, unsigned int) const
+
+0.12^3494287^HTMLParserThrea^chrome               ^[.] v8::internal::IC::Clear(unsigned char*)
+            |
+            --- v8::internal::IC::Clear(unsigned char*)
+
+0.12^3492300^HTMLParserThrea^chrome               ^[.] ppapi::proxy::InterfaceProxy::Send(IPC::Message*)
+            |
+            --- ppapi::proxy::InterfaceProxy::Send(IPC::Message*)
+
+0.12^3492214^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::Assembler(v8::internal::Isolate*, void*, int)
+            |
+            --- v8::internal::Assembler::Assembler(v8::internal::Isolate*, void*, int)
+                0xc6a346f4711
+
+0.12^3489812^HTMLParserThrea^chrome               ^[.] WebCore::SelectorCheckerFastPath::matchesRightmostSelector(WebCore::SelectorChecker::VisitedMatchType) const
+            |
+            --- WebCore::SelectorCheckerFastPath::matchesRightmostSelector(WebCore::SelectorChecker::VisitedMatchType) const
+
+0.12^3488486^HTMLParserThrea^chrome               ^[.] tc_free
+            |
+            --- tc_free
+                0x100000000
+                0x29f988404121
+
+0.12^3488229^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::updateFillImages(WebCore::FillLayer const*, WebCore::FillLayer const*)
+            |
+            --- WebCore::RenderObject::updateFillImages(WebCore::FillLayer const*, WebCore::FillLayer const*)
+
+0.12^3486534^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParsePostfixExpression(bool*)
+            |
+            --- v8::internal::Parser::ParsePostfixExpression(bool*)
+                0x7fff30e905d002
+
+0.12^3484804^HTMLParserThrea^chrome               ^[.] v8::internal::SharedStoreIC_ExtendStorage(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::SharedStoreIC_ExtendStorage(int, v8::internal::Object**, v8::internal::Isolate*)
+
+0.12^3484146^HTMLParserThrea^chrome               ^[.] WebCore::RenderCounter::rendererSubtreeAttached(WebCore::RenderObject*)
+            |
+            --- WebCore::RenderCounter::rendererSubtreeAttached(WebCore::RenderObject*)
+
+0.12^3483547^HTMLParserThrea^chrome               ^[.] WebCore::LiveNodeListBase::itemBeforeOrAfterCachedItem(unsigned int, WebCore::ContainerNode*) const
+            |
+            --- WebCore::LiveNodeListBase::itemBeforeOrAfterCachedItem(unsigned int, WebCore::ContainerNode*) const
+
+0.12^3482072^HTMLParserThrea^chrome               ^[.] v8::internal::CPU::FlushICache(void*, unsigned long)
+            |
+            --- v8::internal::CPU::FlushICache(void*, unsigned long)
+
+0.12^3481935^HTMLParserThrea^chrome               ^[.] WebCore::StyleResolver::matchAuthorRules(WebCore::Element*, WebCore::ElementRuleCollector&, bool)
+            |
+            --- WebCore::StyleResolver::matchAuthorRules(WebCore::Element*, WebCore::ElementRuleCollector&, bool)
+                0x1e83176abb08
+
+0.12^3481470^HTMLParserThrea^chrome               ^[.] v8::internal::StackFrame::GetCallerState(v8::internal::StackFrame::State*) const
+            |
+            --- v8::internal::StackFrame::GetCallerState(v8::internal::StackFrame::State*) const
+                0x7fff30e8f698
+                0x9d2ae7aa1e1
+
+0.12^3479937^HTMLParserThrea^chrome               ^[.] WTF::AtomicString::add(unsigned char const*)
+            |
+            --- WTF::AtomicString::add(unsigned char const*)
+
+0.12^3476115^HTMLParserThrea^chrome               ^[.] WebCore::StyleAdjuster::adjustRenderStyle(WebCore::RenderStyle*, WebCore::RenderStyle*, WebCore::Element*)
+            |
+            --- WebCore::StyleAdjuster::adjustRenderStyle(WebCore::RenderStyle*, WebCore::RenderStyle*, WebCore::Element*)
+
+0.12^3474363^HTMLParserThrea^chrome               ^[.] WebCore::CSSValue::deref()
+            |
+            --- WebCore::CSSValue::deref()
+
+0.12^3473710^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::movq(v8::internal::Register, long, v8::internal::RelocInfo::Mode)
+            |
+            --- v8::internal::Assembler::movq(v8::internal::Register, long, v8::internal::RelocInfo::Mode)
+
+0.12^3471996^HTMLParserThrea^chrome               ^[.] v8::internal::RegExpEngine::Compile(v8::internal::RegExpCompileData*, bool, bool, bool, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::String>, bool, v8::internal::Zone*)
+            |
+            --- v8::internal::RegExpEngine::Compile(v8::internal::RegExpCompileData*, bool, bool, bool, v8::internal::Handle<v8::internal::String>, v8::internal::Handle<v8::internal::String>, bool, v8::internal::Zone*)
+                0xc6a3439fca1
+
+0.12^3468177^HTMLParserThrea^chrome               ^[.] v8::internal::Context::global_proxy()
+            |
+            --- v8::internal::Context::global_proxy()
+
+0.12^3467384^HTMLParserThrea^chrome               ^[.] WTF::Vector<std::pair<void (*)(WebCore::Node*), WTF::RefPtr<WebCore::Node> >, 0ul>::shrinkCapacity(unsigned long)
+            |
+            --- WTF::Vector<std::pair<void (*)(WebCore::Node*), WTF::RefPtr<WebCore::Node> >, 0ul>::shrinkCapacity(unsigned long)
+
+0.12^3466196^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::GetSymbol()
+            |
+            --- v8::internal::Parser::GetSymbol()
+                0x7fff30e9182000
+
+0.12^3462208^HTMLParserThrea^chrome               ^[.] v8::internal::JSReceiver::SetProperty(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, v8::internal::JSReceiver::StoreFromKeyed)
+            |
+            --- v8::internal::JSReceiver::SetProperty(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, v8::internal::JSReceiver::StoreFromKeyed)
+                0x7fff30e91950
+
+0.12^3461655^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::shouldBeNormalFlowOnlyIgnoringCompositedScrolling() const
+            |
+            --- WebCore::RenderLayer::shouldBeNormalFlowOnlyIgnoringCompositedScrolling() const
+
+0.11^3453572^HTMLParserThrea^chrome               ^[.] v8::internal::HBasicBlock::HBasicBlock(v8::internal::HGraph*)
+            |
+            --- v8::internal::HBasicBlock::HBasicBlock(v8::internal::HGraph*)
+
+0.11^3450775^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::invalidateContainerPreferredLogicalWidths()
+            |
+            --- WebCore::RenderObject::invalidateContainerPreferredLogicalWidths()
+
+0.11^3446446^HTMLParserThrea^chrome               ^[.] WebCore::LayoutUnit::round() const
+            |
+            --- WebCore::LayoutUnit::round() const
+
+0.11^3445539^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_RegExpExec(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_RegExpExec(int, v8::internal::Object**, v8::internal::Isolate*)
+                0xc6a3540bd09
+
+0.11^3445347^HTMLParserThrea^chrome               ^[.] v8::internal::BaseLoadStubCompiler::GenerateLoadCallback(v8::internal::Register, v8::internal::Handle<v8::internal::ExecutableAccessorInfo>)
+            |
+            --- v8::internal::BaseLoadStubCompiler::GenerateLoadCallback(v8::internal::Register, v8::internal::Handle<v8::internal::ExecutableAccessorInfo>)
+
+0.11^3444801^HTMLParserThrea^chrome               ^[.] v8::internal::BufferedUtf16CharacterStream::ReadBlock()
+            |
+            --- v8::internal::BufferedUtf16CharacterStream::ReadBlock()
+                0x7fff30e91798
+
+0.11^3436515^HTMLParserThrea^chrome               ^[.] WTF::Vector<unsigned short, 256ul>::shrinkCapacity(unsigned long)
+            |
+            --- WTF::Vector<unsigned short, 256ul>::shrinkCapacity(unsigned long)
+
+0.11^3435352^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseFunctionLiteral(v8::internal::Handle<v8::internal::String>, bool, bool, int, v8::internal::FunctionLiteral::FunctionType, bool*)
+            |
+            --- v8::internal::Parser::ParseFunctionLiteral(v8::internal::Handle<v8::internal::String>, bool, bool, int, v8::internal::FunctionLiteral::FunctionType, bool*)
+                0x7fff30e903a000
+
+0.11^3434720^HTMLParserThrea^chrome               ^[.] WebCore::ImageSource::setData(WebCore::SharedBuffer*, bool)
+            |
+            --- WebCore::ImageSource::setData(WebCore::SharedBuffer*, bool)
+
+0.11^3432682^HTMLParserThrea^chrome               ^[.] v8::internal::StandardFrame::ComputeCallerState(v8::internal::StackFrame::State*) const
+            |
+            --- v8::internal::StandardFrame::ComputeCallerState(v8::internal::StackFrame::State*) const
+                0x7fff30e90df8
+                0xc6a346b4fe9
+
+0.11^3432008^HTMLParserThrea^chrome               ^[.] v8::internal::VariableProxy::Accept(v8::internal::AstVisitor*)
+            |
+            --- v8::internal::VariableProxy::Accept(v8::internal::AstVisitor*)
+
+0.11^3420656^HTMLParserThrea^chrome               ^[.] SkBlitter::Choose(SkBitmap const&, SkMatrix const&, SkPaint const&, void*, unsigned long)
+            |
+            --- SkBlitter::Choose(SkBitmap const&, SkMatrix const&, SkPaint const&, void*, unsigned long)
+
+0.11^3420637^HTMLParserThrea^chrome               ^[.] v8::internal::Map::UpdateCodeCache(v8::internal::Name*, v8::internal::Code*)
+            |
+            --- v8::internal::Map::UpdateCodeCache(v8::internal::Name*, v8::internal::Code*)
+
+0.11^3419913^HTMLParserThrea^chrome               ^[.] WebCore::Element::childrenChanged(bool, WebCore::Node*, WebCore::Node*, int)
+            |
+            --- WebCore::Element::childrenChanged(bool, WebCore::Node*, WebCore::Node*, int)
+
+0.11^3413310^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::ResolveVariablesRecursively(v8::internal::CompilationInfo*, v8::internal::AstNodeFactory<v8::internal::AstNullVisitor>*)
+            |
+            --- v8::internal::Scope::ResolveVariablesRecursively(v8::internal::CompilationInfo*, v8::internal::AstNodeFactory<v8::internal::AstNullVisitor>*)
+
+0.11^3413062^HTMLParserThrea^chrome               ^[.] void WTF::Vector<WebCore::MatchedProperties, 0ul>::append<WebCore::MatchedProperties>(WebCore::MatchedProperties const*, unsigned long)
+            |
+            --- void WTF::Vector<WebCore::MatchedProperties, 0ul>::append<WebCore::MatchedProperties>(WebCore::MatchedProperties const*, unsigned long)
+
+0.11^3409803^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::movq(v8::internal::Register, v8::internal::Operand const&)
+            |
+            --- v8::internal::Assembler::movq(v8::internal::Register, v8::internal::Operand const&)
+
+0.11^3406785^HTMLParserThrea^chrome               ^[.] v8::internal::HashTable<v8::internal::MapCacheShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::Isolate*, v8::internal::HashTableKey*)
+            |
+            --- v8::internal::HashTable<v8::internal::MapCacheShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::Isolate*, v8::internal::HashTableKey*)
+
+0.11^3404799^HTMLParserThrea^chrome               ^[.] WebCore::RenderLineBoxList::removeLineBox(WebCore::InlineFlowBox*)
+            |
+            --- WebCore::RenderLineBoxList::removeLineBox(WebCore::InlineFlowBox*)
+
+0.11^3398149^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::clippingRootForPainting() const
+            |
+            --- WebCore::RenderLayer::clippingRootForPainting() const
+                0x2bc00000a000
+
+0.11^3390095^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::VarOperand(v8::internal::Variable*, v8::internal::Register)
+            |
+            --- v8::internal::FullCodeGenerator::VarOperand(v8::internal::Variable*, v8::internal::Register)
+                0x7fff30e91980
+
+0.11^3388424^HTMLParserThrea^chrome               ^[.] WebCore::DOMTimer::fired()
+            |
+            --- WebCore::DOMTimer::fired()
+
+0.11^3378612^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::SetFastElementsCapacityAndLength(int, int, v8::internal::JSObject::SetFastElementsCapacitySmiMode)
+            |
+            --- v8::internal::JSObject::SetFastElementsCapacityAndLength(int, int, v8::internal::JSObject::SetFastElementsCapacitySmiMode)
+
+0.11^3374619^HTMLParserThrea^chrome               ^[.] v8::internal::Code::MakeCodeAgeSequenceYoung(unsigned char*)
+            |
+            --- v8::internal::Code::MakeCodeAgeSequenceYoung(unsigned char*)
+                0x2281c140e854
+                0x2281c1e636e1
+                0x2281c1e52c8f
+                0x2281c142b664
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.11^3373628^HTMLParserThrea^chrome               ^[.] ucase_fold_46
+            |
+            --- ucase_fold_46
+
+0.11^3372159^HTMLParserThrea^chrome               ^[.] WebCore::LayoutState::LayoutState(WebCore::LayoutState*, WebCore::RenderBox*, WebCore::LayoutSize const&, WebCore::LayoutUnit, bool, WebCore::ColumnInfo*)
+            |
+            --- WebCore::LayoutState::LayoutState(WebCore::LayoutState*, WebCore::RenderBox*, WebCore::LayoutSize const&, WebCore::LayoutUnit, bool, WebCore::ColumnInfo*)
+
+0.11^3365891^HTMLParserThrea^chrome               ^[.] v8::internal::HOptimizedGraphBuilder::BuildGraph()
+            |
+            --- v8::internal::HOptimizedGraphBuilder::BuildGraph()
+
+0.11^3359190^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::Initialize()
+            |
+            --- v8::internal::Scope::Initialize()
+
+0.11^3356725^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::updateScrollbarsAfterLayout()
+            |
+            --- WebCore::RenderLayer::updateScrollbarsAfterLayout()
+
+0.11^3356459^HTMLParserThrea^chrome               ^[.] _ZN7WebCoreplEiRKNS_10LayoutUnitE.constprop.860
+            |
+            --- _ZN7WebCoreplEiRKNS_10LayoutUnitE.constprop.860
+
+0.11^3355399^HTMLParserThrea^chrome               ^[.] v8::internal::ExitFrame::GetCallerStackPointer() const
+            |
+            --- v8::internal::ExitFrame::GetCallerStackPointer() const
+                (nil)
+
+0.11^3351779^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::layoutRunsAndFloatsInRange(WebCore::LineLayoutState&, WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>&, WebCore::InlineIterator const&, WebCore::BidiStatus const&, unsigned int)
+            |
+            --- WebCore::RenderBlock::layoutRunsAndFloatsInRange(WebCore::LineLayoutState&, WebCore::BidiResolver<WebCore::InlineIterator, WebCore::BidiRun>&, WebCore::InlineIterator const&, WebCore::BidiStatus const&, unsigned int)
+                0x3b2abd866648
+
+0.11^3344999^HTMLParserThrea^chrome               ^[.] v8::internal::CompilationCacheRegExp::Lookup(v8::internal::Handle<v8::internal::String>, v8::internal::JSRegExp::Flags)
+            |
+            --- v8::internal::CompilationCacheRegExp::Lookup(v8::internal::Handle<v8::internal::String>, v8::internal::JSRegExp::Flags)
+
+0.11^3344476^HTMLParserThrea^chrome               ^[.] tracked_objects::DeathData::RecordDeath(int, int, int)
+            |
+            --- tracked_objects::DeathData::RecordDeath(int, int, int)
+
+0.11^3343357^HTMLParserThrea^chrome               ^[.] void WTF::Vector<WebCore::SelectorFilter::ParentStackFrame, 0ul>::appendSlowCase<WebCore::SelectorFilter::ParentStackFrame>(WebCore::SelectorFilter::ParentStackFrame const&)
+            |
+            --- void WTF::Vector<WebCore::SelectorFilter::ParentStackFrame, 0ul>::appendSlowCase<WebCore::SelectorFilter::ParentStackFrame>(WebCore::SelectorFilter::ParentStackFrame const&)
+
+0.11^3341336^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::VisitObjectLiteral(v8::internal::ObjectLiteral*)
+            |
+            --- v8::internal::FullCodeGenerator::VisitObjectLiteral(v8::internal::ObjectLiteral*)
+
+0.11^3340676^HTMLParserThrea^chrome               ^[.] v8::internal::Assignment::Accept(v8::internal::AstVisitor*)
+            |
+            --- v8::internal::Assignment::Accept(v8::internal::AstVisitor*)
+
+0.11^3336841^HTMLParserThrea^chrome               ^[.] v8::internal::MacroAssembler::Set(v8::internal::Register, long)
+            |
+            --- v8::internal::MacroAssembler::Set(v8::internal::Register, long)
+                0x3b2abd624000
+
+0.11^3335871^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::ScanRegExpPattern(bool)
+            |
+            --- v8::internal::Scanner::ScanRegExpPattern(bool)
+                0x10e8a1901
+
+0.11^3313926^HTMLParserThrea^chrome               ^[.] v8::internal::ToBooleanStub::Types::UpdateStatus(v8::internal::Handle<v8::internal::Object>)
+            |
+            --- v8::internal::ToBooleanStub::Types::UpdateStatus(v8::internal::Handle<v8::internal::Object>)
+                0x2281c14140c1
+
+0.11^3300775^HTMLParserThrea^chrome               ^[.] WebCore::SVGRenderSupport::setRendererHasSVGShadow(WebCore::RenderObject*, bool)
+            |
+            --- WebCore::SVGRenderSupport::setRendererHasSVGShadow(WebCore::RenderObject*, bool)
+
+0.11^3292871^HTMLParserThrea^chrome               ^[.] base::debug::TraceLog::GetInstance()
+            |
+            --- base::debug::TraceLog::GetInstance()
+
+0.11^3289620^HTMLParserThrea^chrome               ^[.] WebCore::FrameView::pagination() const
+            |
+            --- WebCore::FrameView::pagination() const
+                (nil)
+                0x3b2abdc16c30
+
+0.11^3274771^HTMLParserThrea^chrome               ^[.] v8::internal::Factory::NewMap(v8::internal::InstanceType, int, v8::internal::ElementsKind)
+            |
+            --- v8::internal::Factory::NewMap(v8::internal::InstanceType, int, v8::internal::ElementsKind)
+
+0.11^3268228^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::updateBlockChildDirtyBitsBeforeLayout(bool, WebCore::RenderBox*)
+            |
+            --- WebCore::RenderBlock::updateBlockChildDirtyBitsBeforeLayout(bool, WebCore::RenderBox*)
+
+0.11^3264333^HTMLParserThrea^chrome               ^[.] v8::internal::CreateObjectLiteralBoilerplate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::FixedArray>, v8::internal::Handle<v8::internal::FixedArray>, bool, bool)
+            |
+            --- v8::internal::CreateObjectLiteralBoilerplate(v8::internal::Isolate*, v8::internal::Handle<v8::internal::FixedArray>, v8::internal::Handle<v8::internal::FixedArray>, bool, bool)
+
+0.11^3263293^HTMLParserThrea^chrome               ^[.] content::RenderViewObserver::DidClearWindowObject(WebKit::WebFrame*)
+            |
+            --- content::RenderViewObserver::DidClearWindowObject(WebKit::WebFrame*)
+
+0.11^3263262^HTMLParserThrea^chrome               ^[.] WebCore::V8HiddenPropertyName::sleepFunction()
+            |
+            --- WebCore::V8HiddenPropertyName::sleepFunction()
+                0x5bf5d67bd1
+
+0.11^3253742^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::boundingBox(WebCore::RenderLayer const*, unsigned int, WebCore::LayoutPoint const*) const
+            |
+            --- WebCore::RenderLayer::boundingBox(WebCore::RenderLayer const*, unsigned int, WebCore::LayoutPoint const*) const
+                (nil)
+
+0.11^3250656^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_StringCharCodeAt(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_StringCharCodeAt(int, v8::internal::Object**, v8::internal::Isolate*)
+                0x2281c1449ea3
+                0x2281c14497a7
+                0x2281c1425b83
+                0x2281c141b848
+                0x2281c1443ab6
+                0x2281c14480ae
+                0x2281c144938f
+                0x2281c142b664
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.11^3240988^HTMLParserThrea^chrome               ^[.] WTF::equalIgnoringCaseNonNull(WTF::StringImpl const*, WTF::StringImpl const*)
+            |
+            --- WTF::equalIgnoringCaseNonNull(WTF::StringImpl const*, WTF::StringImpl const*)
+
+0.11^3237736^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::call(v8::internal::Handle<v8::internal::Code>, v8::internal::RelocInfo::Mode, v8::internal::TypeFeedbackId)
+            |
+            --- v8::internal::Assembler::call(v8::internal::Handle<v8::internal::Code>, v8::internal::RelocInfo::Mode, v8::internal::TypeFeedbackId)
+
+0.11^3228754^HTMLParserThrea^libpthread-2.15.so   ^[.] pthread_mutex_unlock
+            |
+            --- pthread_mutex_unlock
+
+0.11^3225835^HTMLParserThrea^chrome               ^[.] WebCore::RenderObject::repaintUsingContainer(WebCore::RenderLayerModelObject const*, WebCore::IntRect const&) const
+            |
+            --- WebCore::RenderObject::repaintUsingContainer(WebCore::RenderLayerModelObject const*, WebCore::IntRect const&) const
+
+0.11^3219559^HTMLParserThrea^chrome               ^[.] icu_46::RuleBasedBreakIterator::handleNext(icu_46::RBBIStateTable const*)
+            |
+            --- icu_46::RuleBasedBreakIterator::handleNext(icu_46::RBBIStateTable const*)
+
+0.11^3215902^HTMLParserThrea^chrome               ^[.] v8::internal::Scanner::Scanner(v8::internal::UnicodeCache*)
+            |
+            --- v8::internal::Scanner::Scanner(v8::internal::UnicodeCache*)
+                0x7f7a0b900000
+
+0.11^3202188^HTMLParserThrea^chrome               ^[.] v8::Object::FindInstanceInPrototypeChain(v8::Handle<v8::FunctionTemplate>)
+            |
+            --- v8::Object::FindInstanceInPrototypeChain(v8::Handle<v8::FunctionTemplate>)
+
+0.11^3202083^HTMLParserThrea^chrome               ^[.] WebCore::Element::attach(WebCore::Node::AttachContext const&)
+            |
+            --- WebCore::Element::attach(WebCore::Node::AttachContext const&)
+
+0.11^3200673^HTMLParserThrea^chrome               ^[.] v8::internal::CompareIC::NewInputState(v8::internal::CompareIC::State, v8::internal::Handle<v8::internal::Object>)
+            |
+            --- v8::internal::CompareIC::NewInputState(v8::internal::CompareIC::State, v8::internal::Handle<v8::internal::Object>)
+
+0.11^3176018^HTMLParserThrea^chrome               ^[.] WebCore::Element::recalcStyle(WebCore::Node::StyleChange)
+            |
+            --- WebCore::Element::recalcStyle(WebCore::Node::StyleChange)
+
+0.11^3170916^HTMLParserThrea^chrome               ^[.] WebCore::MajorGCWrapperVisitor::VisitPersistentHandle(v8::Persistent<v8::Value>*, unsigned short)
+            |
+            --- WebCore::MajorGCWrapperVisitor::VisitPersistentHandle(v8::Persistent<v8::Value>*, unsigned short)
+                0x80e894c5308
+
+0.10^3137671^HTMLParserThrea^chrome               ^[.] v8::internal::DependentCode::DeoptimizeDependentCodeGroup(v8::internal::Isolate*, v8::internal::DependentCode::DependencyGroup)
+            |
+            --- v8::internal::DependentCode::DeoptimizeDependentCodeGroup(v8::internal::Isolate*, v8::internal::DependentCode::DependencyGroup)
+
+0.10^3136480^HTMLParserThrea^chrome               ^[.] WebCore::DOMWindow::document() const
+            |
+            --- WebCore::DOMWindow::document() const
+
+0.10^3136403^HTMLParserThrea^chrome               ^[.] _ZNK3WTF7HashMapIPN7WebCore12RenderObjectENS_6RefPtrINS1_18CompositeAnimationEEENS_7PtrHashIS3_EENS_10HashTraitsIS3_EENS9_IS6_EEE3getERKS3_.isra.185
+            |
+            --- _ZNK3WTF7HashMapIPN7WebCore12RenderObjectENS_6RefPtrINS1_18CompositeAnimationEEENS_7PtrHashIS3_EENS_10HashTraitsIS3_EENS9_IS6_EEE3getERKS3_.isra.185
+
+0.10^3126165^HTMLParserThrea^chrome               ^[.] v8::internal::VariableProxy::BindTo(v8::internal::Variable*)
+            |
+            --- v8::internal::VariableProxy::BindTo(v8::internal::Variable*)
+                0x3b2abdf3f010
+
+0.10^3124159^HTMLParserThrea^chrome               ^[.] WebCore::InlineTextBox::logicalOverflowRect() const
+            |
+            --- WebCore::InlineTextBox::logicalOverflowRect() const
+                0xaf00000000
+                0x3b2abdda8680
+
+0.10^3121836^HTMLParserThrea^chrome               ^[.] v8::internal::JSObject::SetPropertyViaPrototypes(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, bool*)
+            |
+            --- v8::internal::JSObject::SetPropertyViaPrototypes(v8::internal::Name*, v8::internal::Object*, PropertyAttributes, v8::internal::StrictModeFlag, bool*)
+
+0.10^3107295^HTMLParserThrea^chrome               ^[.] WebCore::ElementStyleResources::ElementStyleResources()
+            |
+            --- WebCore::ElementStyleResources::ElementStyleResources()
+
+0.10^3103474^HTMLParserThrea^chrome               ^[.] void WTF::deleteAllValues<true, WebCore::RenderBlock::FloatingObject*, WTF::HashTable<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WebCore::RenderBlock::FloatingObjectHashFunctions>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCo
+            |
+            --- void WTF::deleteAllValues<true, WebCore::RenderBlock::FloatingObject*, WTF::HashTable<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WebCore::RenderBlock::FloatingObjectHashFunctions>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*> > const>(WTF::HashTable<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WebCore::RenderBlock::FloatingObjectHashFunctions>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::RenderBlock::FloatingObject*, 4ul>*> > const&)
+
+0.10^3103474^HTMLParserThrea^chrome               ^[.] base::subtle::RefCountedBase::Release() const
+            |
+            --- base::subtle::RefCountedBase::Release() const
+
+0.10^3092884^HTMLParserThrea^chrome               ^[.] WebCore::RenderView::usesCompositing() const
+            |
+            --- WebCore::RenderView::usesCompositing() const
+
+0.10^3090079^HTMLParserThrea^chrome               ^[.] webCoreInitializeScriptWrappableForInterface(WebCore::Element*)
+            |
+            --- webCoreInitializeScriptWrappableForInterface(WebCore::Element*)
+
+0.10^3084985^HTMLParserThrea^chrome               ^[.] WebCore::ScopedStyleTree::pushStyleCache(WebCore::ContainerNode const*, WebCore::ContainerNode const*)
+            |
+            --- WebCore::ScopedStyleTree::pushStyleCache(WebCore::ContainerNode const*, WebCore::ContainerNode const*)
+
+0.10^3075629^HTMLParserThrea^chrome               ^[.] v8::internal::HydrogenCodeStub::MinorKey()
+            |
+            --- v8::internal::HydrogenCodeStub::MinorKey()
+
+0.10^3075535^HTMLParserThrea^librt-2.15.so        ^[.] clock_gettime
+            |
+            --- clock_gettime
+
+0.10^3066941^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::jmp(v8::internal::Handle<v8::internal::Code>, v8::internal::RelocInfo::Mode)
+            |
+            --- v8::internal::Assembler::jmp(v8::internal::Handle<v8::internal::Code>, v8::internal::RelocInfo::Mode)
+
+0.10^3064555^HTMLParserThrea^chrome               ^[.] WebCore::RenderStyle::getRoundedBorderFor(WebCore::LayoutRect const&, WebCore::RenderView*, bool, bool) const
+            |
+            --- WebCore::RenderStyle::getRoundedBorderFor(WebCore::LayoutRect const&, WebCore::RenderView*, bool, bool) const
+
+0.10^3056872^HTMLParserThrea^chrome               ^[.] WebCore::LayoutRect::intersect(WebCore::LayoutRect const&)
+            |
+            --- WebCore::LayoutRect::intersect(WebCore::LayoutRect const&)
+                (nil)
+
+0.10^3056623^HTMLParserThrea^chrome               ^[.] v8::internal::CodeCacheHashTable::Put(v8::internal::Name*, v8::internal::Code*)
+            |
+            --- v8::internal::CodeCacheHashTable::Put(v8::internal::Name*, v8::internal::Code*)
+
+0.10^3054623^HTMLParserThrea^chrome               ^[.] v8::internal::ActionNode::GetQuickCheckDetails(v8::internal::QuickCheckDetails*, v8::internal::RegExpCompiler*, int, bool)
+            |
+            --- v8::internal::ActionNode::GetQuickCheckDetails(v8::internal::QuickCheckDetails*, v8::internal::RegExpCompiler*, int, bool)
+                0x3b2a00000002
+
+0.10^3051571^HTMLParserThrea^chrome               ^[.] _ZN2v88internal12PropertyCell17SetValueInferTypeEPNS0_6ObjectENS0_16WriteBarrierModeE.part.326
+            |
+            --- _ZN2v88internal12PropertyCell17SetValueInferTypeEPNS0_6ObjectENS0_16WriteBarrierModeE.part.326
+
+0.10^3047011^HTMLParserThrea^chrome               ^[.] WebCore::RenderBoxModelObject::paintFillLayerExtended(WebCore::PaintInfo const&, WebCore::Color const&, WebCore::FillLayer const*, WebCore::LayoutRect const&, WebCore::BackgroundBleedAvoidance, WebCore::InlineFlowBox*, WebCore::LayoutSize const&, WebCore::CompositeOperator, WebCore::RenderObject*)
+            |
+            --- WebCore::RenderBoxModelObject::paintFillLayerExtended(WebCore::PaintInfo const&, WebCore::Color const&, WebCore::FillLayer const*, WebCore::LayoutRect const&, WebCore::BackgroundBleedAvoidance, WebCore::InlineFlowBox*, WebCore::LayoutSize const&, WebCore::CompositeOperator, WebCore::RenderObject*)
+
+0.10^3047011^HTMLParserThrea^chrome               ^[.] WebCore::Timer<WebCore::EventSender<WebCore::ImageLoader> >::fired()
+            |
+            --- WebCore::Timer<WebCore::EventSender<WebCore::ImageLoader> >::fired()
+
+0.10^3042405^HTMLParserThrea^chrome               ^[.] SkGlyphCache::getGlyphIDMetrics(unsigned short)
+            |
+            --- SkGlyphCache::getGlyphIDMetrics(unsigned short)
+
+0.10^3034275^HTMLParserThrea^chrome               ^[.] WebCore::StyleBuilder::applyProperty(WebCore::CSSPropertyID, WebCore::StyleResolver*, WebCore::StyleResolverState&, WebCore::CSSValue*, bool, bool)
+            |
+            --- WebCore::StyleBuilder::applyProperty(WebCore::CSSPropertyID, WebCore::StyleResolver*, WebCore::StyleResolverState&, WebCore::CSSValue*, bool, bool)
+                0x1e83176a8568
+
+0.10^3027837^HTMLParserThrea^chrome               ^[.] v8::preparser::PreParser::ParseBinaryExpression(int, bool, bool*)
+            |
+            --- v8::preparser::PreParser::ParseBinaryExpression(int, bool, bool*)
+                0x7f7a0b96214000
+
+0.10^3027190^HTMLParserThrea^chrome               ^[.] v8::internal::CallOptimization::GetPrototypeDepthOfExpectedType(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::JSObject>) const
+            |
+            --- v8::internal::CallOptimization::GetPrototypeDepthOfExpectedType(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::JSObject>) const
+
+0.10^3025376^HTMLParserThrea^chrome               ^[.] v8::Function::Call(v8::Handle<v8::Object>, int, v8::Handle<v8::Value>*)
+            |
+            --- v8::Function::Call(v8::Handle<v8::Object>, int, v8::Handle<v8::Value>*)
+
+0.10^3019937^HTMLParserThrea^chrome               ^[.] net::HttpResponseHeaders::EnumerateHeader(void**, base::BasicStringPiece<std::string> const&, std::string*) const
+            |
+            --- net::HttpResponseHeaders::EnumerateHeader(void**, base::BasicStringPiece<std::string> const&, std::string*) const
+                0x7fff30e91800
+
+0.10^3017488^HTMLParserThrea^chrome               ^[.] v8::internal::Parser::ParseLazy()
+            |
+            --- v8::internal::Parser::ParseLazy()
+
+0.10^3014944^HTMLParserThrea^chrome               ^[.] WebCore::MarkupAccumulator::appendAttribute(WTF::StringBuilder&, WebCore::Element*, WebCore::Attribute const&, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+            |
+            --- WebCore::MarkupAccumulator::appendAttribute(WTF::StringBuilder&, WebCore::Element*, WebCore::Attribute const&, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+
+0.10^3005563^HTMLParserThrea^chrome               ^[.] tcmalloc::PageHeap::Carve(tcmalloc::Span*, unsigned long)
+            |
+            --- tcmalloc::PageHeap::Carve(tcmalloc::Span*, unsigned long)
+
+0.10^3002471^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::paintOutlineForFragments(WTF::Vector<WebCore::LayerFragment, 1ul> const&, WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int, WebCore::RenderObject*)
+            |
+            --- WebCore::RenderLayer::paintOutlineForFragments(WTF::Vector<WebCore::LayerFragment, 1ul> const&, WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int, WebCore::RenderObject*)
+
+0.10^2988466^HTMLParserThrea^chrome               ^[.] v8::internal::CompareIC::TargetState(v8::internal::CompareIC::State, v8::internal::CompareIC::State, v8::internal::CompareIC::State, bool, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>)
+            |
+            --- v8::internal::CompareIC::TargetState(v8::internal::CompareIC::State, v8::internal::CompareIC::State, v8::internal::CompareIC::State, bool, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::Object>)
+
+0.10^2985776^HTMLParserThrea^chrome               ^[.] v8::internal::CodeStub::UseSpecialCache()
+            |
+            --- v8::internal::CodeStub::UseSpecialCache()
+
+0.10^2983793^HTMLParserThrea^chrome               ^[.] v8::internal::Genesis::TransferNamedProperties(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::JSObject>)
+            |
+            --- v8::internal::Genesis::TransferNamedProperties(v8::internal::Handle<v8::internal::JSObject>, v8::internal::Handle<v8::internal::JSObject>)
+
+0.10^2978233^HTMLParserThrea^chrome               ^[.] v8::internal::ReturnStatement::Accept(v8::internal::AstVisitor*)
+            |
+            --- v8::internal::ReturnStatement::Accept(v8::internal::AstVisitor*)
+
+0.10^2961361^HTMLParserThrea^chrome               ^[.] WebCore::HTMLInputElement::parseAttribute(WebCore::QualifiedName const&, WTF::AtomicString const&)
+            |
+            --- WebCore::HTMLInputElement::parseAttribute(WebCore::QualifiedName const&, WTF::AtomicString const&)
+                WebCore::V8PerIsolateData::hasInstance(WebCore::WrapperTypeInfo*, v8::Handle<v8::Value>, WebCore::WrapperWorldType)
+
+0.10^2950771^HTMLParserThrea^chrome               ^[.] webkit::ppapi::PluginInstance::Paint(SkCanvas*, gfx::Rect const&, gfx::Rect const&)
+            |
+            --- webkit::ppapi::PluginInstance::Paint(SkCanvas*, gfx::Rect const&, gfx::Rect const&)
+
+0.10^2941407^HTMLParserThrea^chrome               ^[.] WebCore::DocumentStyleSheetCollection::combineCSSFeatureFlags(WebCore::RuleFeatureSet const&)
+            |
+            --- WebCore::DocumentStyleSheetCollection::combineCSSFeatureFlags(WebCore::RuleFeatureSet const&)
+
+0.10^2927492^HTMLParserThrea^chrome               ^[.] SkCanvas::~SkCanvas()
+            |
+            --- SkCanvas::~SkCanvas()
+
+0.10^2924222^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<unsigned long, WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::IntHash<unsigned long>, WTF::HashMapValueTraits<WTF::HashTraits<unsigned long>, WTF::HashTraits<WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::HashTraits<unsigned long> > > WTF::HashTable<unsigned long, WTF::KeyValueP
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<unsigned long, WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::IntHash<unsigned long>, WTF::HashMapValueTraits<WTF::HashTraits<unsigned long>, WTF::HashTraits<WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::HashTraits<unsigned long> > > WTF::HashTable<unsigned long, WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<unsigned long, WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::IntHash<unsigned long>, WTF::HashMapValueTraits<WTF::HashTraits<unsigned long>, WTF::HashTraits<WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::HashTraits<unsigned long> >::add<WTF::HashMapTranslator<WTF::HashMapValueTraits<WTF::HashTraits<unsigned long>, WTF::HashTraits<WTF::OwnPtr<WebCore::ProgressItem> > >, WTF::IntHash<unsigned long> >, unsigned long, WTF::PassOwnPtr<WebCore::ProgressItem> >(unsigned long const&, WTF::PassOwnPtr<WebCore::ProgressItem> const&)
+
+0.10^2912173^HTMLParserThrea^chrome               ^[.] v8::internal::Genesis::CreateNewGlobals(v8::Handle<v8::ObjectTemplate>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::GlobalObject>*)
+            |
+            --- v8::internal::Genesis::CreateNewGlobals(v8::Handle<v8::ObjectTemplate>, v8::internal::Handle<v8::internal::Object>, v8::internal::Handle<v8::internal::GlobalObject>*)
+
+0.10^2898436^HTMLParserThrea^chrome               ^[.] v8::internal::Runtime_FunctionSetName(int, v8::internal::Object**, v8::internal::Isolate*)
+            |
+            --- v8::internal::Runtime_FunctionSetName(int, v8::internal::Object**, v8::internal::Isolate*)
+                0x2281c14315f1
+                0x2281c14310e9
+                0x2281c1434402
+                0x2281c142b664
+                0x2281c1417d97
+                v8::internal::Invoke(bool, v8::internal::Handle<v8::internal::JSFunction>, v8::internal::Handle<v8::internal::Object>, int, v8::internal::Handle<v8::internal::Object>*, bool*)
+
+0.10^2892794^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::AllocateUninitializedFixedArray(int)
+            |
+            --- v8::internal::Heap::AllocateUninitializedFixedArray(int)
+
+0.10^2891663^HTMLParserThrea^chrome               ^[.] content::RenderViewImpl::didCommitProvisionalLoad(WebKit::WebFrame*, bool)
+            |
+            --- content::RenderViewImpl::didCommitProvisionalLoad(WebKit::WebFrame*, bool)
+
+0.10^2867709^HTMLParserThrea^chrome               ^[.] v8::internal::Object::GetPropertyWithReceiver(v8::internal::Object*, v8::internal::Name*, PropertyAttributes*)
+            |
+            --- v8::internal::Object::GetPropertyWithReceiver(v8::internal::Object*, v8::internal::Name*, PropertyAttributes*)
+
+0.09^2852873^HTMLParserThrea^chrome               ^[.] WebCore::RenderLineBoxList::deleteLineBoxes(WebCore::RenderArena*)
+            |
+            --- WebCore::RenderLineBoxList::deleteLineBoxes(WebCore::RenderArena*)
+
+0.09^2848135^HTMLParserThrea^chrome               ^[.] base::internal::IncomingTaskQueue::PostPendingTask(base::PendingTask*)
+            |
+            --- base::internal::IncomingTaskQueue::PostPendingTask(base::PendingTask*)
+
+0.09^2845960^HTMLParserThrea^chrome               ^[.] WebCore::V8HTMLEmbedElement::namedPropertyGetterCustom(v8::Local<v8::String>, v8::PropertyCallbackInfo<v8::Value> const&)
+            |
+            --- WebCore::V8HTMLEmbedElement::namedPropertyGetterCustom(v8::Local<v8::String>, v8::PropertyCallbackInfo<v8::Value> const&)
+                0x7f7a0b900000
+
+0.09^2831855^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::paintLayerContents(WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int)
+            |
+            --- WebCore::RenderLayer::paintLayerContents(WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int)
+                (nil)
+
+0.09^2830116^HTMLParserThrea^chrome               ^[.] v8::internal::RegExpParser::Advance()
+            |
+            --- v8::internal::RegExpParser::Advance()
+                0x3b2abdce1010
+
+0.09^2802116^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::logicalLeftFloatOffsetForLine(WebCore::LayoutUnit, WebCore::LayoutUnit, WebCore::LayoutUnit*, WebCore::LayoutUnit, WebCore::RenderBlock::ShapeOutsideFloatOffsetMode) const
+            |
+            --- WebCore::RenderBlock::logicalLeftFloatOffsetForLine(WebCore::LayoutUnit, WebCore::LayoutUnit, WebCore::LayoutUnit*, WebCore::LayoutUnit, WebCore::RenderBlock::ShapeOutsideFloatOffsetMode) const
+
+0.09^2792893^HTMLParserThrea^chrome               ^[.] v8::internal::HashTable<v8::internal::UnseededNumberDictionaryShape, unsigned int>::FindEntry(v8::internal::Isolate*, unsigned int)
+            |
+            --- v8::internal::HashTable<v8::internal::UnseededNumberDictionaryShape, unsigned int>::FindEntry(v8::internal::Isolate*, unsigned int)
+
+0.09^2758347^HTMLParserThrea^chrome               ^[.] v8::internal::ElementsAccessorBase<v8::internal::DictionaryElementsAccessor, v8::internal::ElementsKindTraits<(v8::internal::ElementsKind)6> >::Get(v8::internal::Object*, v8::internal::JSObject*, unsigned int, v8::internal::FixedArrayBase*)
+            |
+            --- v8::internal::ElementsAccessorBase<v8::internal::DictionaryElementsAccessor, v8::internal::ElementsKindTraits<(v8::internal::ElementsKind)6> >::Get(v8::internal::Object*, v8::internal::JSObject*, unsigned int, v8::internal::FixedArrayBase*)
+
+0.09^2738185^HTMLParserThrea^chrome               ^[.] v8::internal::LoadFieldStub::MajorKey()
+            |
+            --- v8::internal::LoadFieldStub::MajorKey()
+
+0.09^2719813^HTMLParserThrea^chrome               ^[.] _ZN2v88internal9Execution4CallENS0_6HandleINS0_6ObjectEEES4_iPS4_Pbb.constprop.84
+            |
+            --- _ZN2v88internal9Execution4CallENS0_6HandleINS0_6ObjectEEES4_iPS4_Pbb.constprop.84
+
+0.09^2709340^HTMLParserThrea^chrome               ^[.] operator delete(void*)
+            |
+            --- operator delete(void*)
+
+0.09^2697911^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::AllocateVariablesRecursively()
+            |
+            --- v8::internal::Scope::AllocateVariablesRecursively()
+
+0.09^2685980^HTMLParserThrea^chrome               ^[.] IPC::SyncChannel::SendWithTimeout(IPC::Message*, int)
+            |
+            --- IPC::SyncChannel::SendWithTimeout(IPC::Message*, int)
+
+0.09^2673092^HTMLParserThrea^chrome               ^[.] v8::internal::StubCache::ComputeCallPreMonomorphic(int, v8::internal::Code::Kind, int)
+            |
+            --- v8::internal::StubCache::ComputeCallPreMonomorphic(int, v8::internal::Code::Kind, int)
+
+0.09^2660139^HTMLParserThrea^chrome               ^[.] v8::internal::HashTable<v8::internal::CodeCacheHashTableShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::HashTableKey*)
+            |
+            --- v8::internal::HashTable<v8::internal::CodeCacheHashTableShape, v8::internal::HashTableKey*>::FindEntry(v8::internal::HashTableKey*)
+
+0.09^2639411^HTMLParserThrea^chrome               ^[.] base::subtle::RefCountedThreadSafeBase::HasOneRef() const
+            |
+            --- base::subtle::RefCountedThreadSafeBase::HasOneRef() const
+                0x3b2abd1f8b20
+
+0.09^2639411^HTMLParserThrea^chrome               ^[.] PickleIterator::ReadLong(long*)
+            |
+            --- PickleIterator::ReadLong(long*)
+                0x2e40c81e951be0
+
+0.09^2632999^HTMLParserThrea^chrome               ^[.] IPC::SyncChannel::SyncContext::DispatchMessages()
+            |
+            --- IPC::SyncChannel::SyncContext::DispatchMessages()
+
+0.09^2600329^HTMLParserThrea^chrome               ^[.] v8::internal::Operand::Operand(v8::internal::Register, int)
+            |
+            --- v8::internal::Operand::Operand(v8::internal::Register, int)
+                0x3b2abd628000
+
+0.09^2577224^HTMLParserThrea^chrome               ^[.] v8::internal::CompareIC::ComputeCondition(v8::internal::Token::Value)
+            |
+            --- v8::internal::CompareIC::ComputeCondition(v8::internal::Token::Value)
+
+0.09^2566312^HTMLParserThrea^chrome               ^[.] v8::internal::Rewriter::Rewrite(v8::internal::CompilationInfo*)
+            |
+            --- v8::internal::Rewriter::Rewrite(v8::internal::CompilationInfo*)
+                0x7f7a0b900000
+
+0.08^2546113^HTMLParserThrea^chrome               ^[.] WebCore::RenderBoxModelObject::borderLeft() const
+            |
+            --- WebCore::RenderBoxModelObject::borderLeft() const
+
+0.08^2538991^HTMLParserThrea^chrome               ^[.] v8::internal::Handle<v8::internal::String> v8::internal::URIEscape::Escape<unsigned char>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::String>)
+            |
+            --- v8::internal::Handle<v8::internal::String> v8::internal::URIEscape::Escape<unsigned char>(v8::internal::Isolate*, v8::internal::Handle<v8::internal::String>)
+
+0.08^2527291^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<WTF::StringImpl*, WTF::StringImpl*, WTF::IdentityExtractor, WTF::StringHash, WTF::HashTraits<WTF::StringImpl*>, WTF::HashTraits<WTF::StringImpl*> > > WTF::HashTable<WTF::StringImpl*, WTF::StringImpl*, WTF::IdentityExtractor, WTF::StringHash, WTF::HashTraits<WTF::StringImpl*>, WTF::HashTraits<WTF::StringImpl*> >::add<WTF::IdentityHashTranslator<WTF::StringHash>, WTF::StringImpl*, WTF::StringImpl*>(WTF::Stri
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<WTF::StringImpl*, WTF::StringImpl*, WTF::IdentityExtractor, WTF::StringHash, WTF::HashTraits<WTF::StringImpl*>, WTF::HashTraits<WTF::StringImpl*> > > WTF::HashTable<WTF::StringImpl*, WTF::StringImpl*, WTF::IdentityExtractor, WTF::StringHash, WTF::HashTraits<WTF::StringImpl*>, WTF::HashTraits<WTF::StringImpl*> >::add<WTF::IdentityHashTranslator<WTF::StringHash>, WTF::StringImpl*, WTF::StringImpl*>(WTF::StringImpl* const&, WTF::StringImpl* const&)
+                0x7fff30e91098
+
+0.08^2505118^HTMLParserThrea^chrome               ^[.] v8::internal::AssertionNode::Emit(v8::internal::RegExpCompiler*, v8::internal::Trace*)
+            |
+            --- v8::internal::AssertionNode::Emit(v8::internal::RegExpCompiler*, v8::internal::Trace*)
+                0xa
+
+0.08^2499686^HTMLParserThrea^chrome               ^[.] base::SampleVector::GetBucketIndex(int) const
+            |
+            --- base::SampleVector::GetBucketIndex(int) const
+
+0.08^2493442^HTMLParserThrea^chrome               ^[.] v8::internal::RegExpImpl::SetLastMatchInfo(v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::String>, int, int*)
+            |
+            --- v8::internal::RegExpImpl::SetLastMatchInfo(v8::internal::Handle<v8::internal::JSArray>, v8::internal::Handle<v8::internal::String>, int, int*)
+
+0.08^2449547^HTMLParserThrea^chrome               ^[.] content::(anonymous namespace)::RendererMessageLoopObserver::DidProcessTask(base::PendingTask const&)
+            |
+            --- content::(anonymous namespace)::RendererMessageLoopObserver::DidProcessTask(base::PendingTask const&)
+                0x7f7a0b8ff200
+
+0.08^2382741^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::H
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::HashTraits<WebCore::CachedResource*> > > WTF::HashTable<WebCore::CachedResource*, WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> >, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WebCore::CachedResource*, WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*>, WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::HashTraits<WebCore::CachedResource*> >::add<WTF::HashMapTranslator<WTF::HashMapValueTraits<WTF::HashTraits<WebCore::CachedResource*>, WTF::HashTraits<WTF::RefPtr<WebCore::ResourceTimingInfo> > >, WTF::PtrHash<WebCore::CachedResource*> >, WebCore::CachedResource*, WTF::PassRefPtr<WebCore::ResourceTimingInfo> >(WebCore::CachedResource* const&, WTF::PassRefPtr<WebCore::ResourceTimingInfo> const&)
+
+0.08^2347799^HTMLParserThrea^chrome               ^[.] v8::internal::Isolate::MayNamedAccess(v8::internal::JSObject*, v8::internal::Object*, v8::AccessType)
+            |
+            --- v8::internal::Isolate::MayNamedAccess(v8::internal::JSObject*, v8::internal::Object*, v8::AccessType)
+
+0.08^2337746^HTMLParserThrea^chrome               ^[.] v8::internal::Heap::NumberFromDouble(double, v8::internal::PretenureFlag)
+            |
+            --- v8::internal::Heap::NumberFromDouble(double, v8::internal::PretenureFlag)
+
+0.08^2334937^HTMLParserThrea^chrome               ^[.] WebCore::LayoutRect::intersects(WebCore::LayoutRect const&) const
+            |
+            --- WebCore::LayoutRect::intersects(WebCore::LayoutRect const&) const
+
+0.08^2315633^HTMLParserThrea^chrome               ^[.] _ZN7content14RenderViewImpl15willSendRequestEPN6WebKit8WebFrameEjRNS1_13WebURLRequestERKNS1_14WebURLResponseE.part.953
+            |
+            --- _ZN7content14RenderViewImpl15willSendRequestEPN6WebKit8WebFrameEjRNS1_13WebURLRequestERKNS1_14WebURLResponseE.part.953
+                0x7f7a11c1f320
+                0x7fff30e90fd0
+
+0.08^2309876^HTMLParserThrea^libstdc++.so.6.0.16  ^[.] std::string::find_first_not_of(char const*, unsigned long, unsigned long) const
+            |
+            --- std::string::find_first_not_of(char const*, unsigned long, unsigned long) const
+                (nil)
+
+0.08^2271635^HTMLParserThrea^chrome               ^[.] base::TaskQueue::Swap(base::TaskQueue*)
+            |
+            --- base::TaskQueue::Swap(base::TaskQueue*)
+
+0.08^2270859^HTMLParserThrea^chrome               ^[.] v8::internal::Map::ShareDescriptor(v8::internal::DescriptorArray*, v8::internal::Descriptor*)
+            |
+            --- v8::internal::Map::ShareDescriptor(v8::internal::DescriptorArray*, v8::internal::Descriptor*)
+
+0.08^2267153^HTMLParserThrea^chrome               ^[.] v8::internal::Map::EnsureDescriptorSlack(v8::internal::Handle<v8::internal::Map>, int)
+            |
+            --- v8::internal::Map::EnsureDescriptorSlack(v8::internal::Handle<v8::internal::Map>, int)
+
+0.07^2254719^HTMLParserThrea^chrome               ^[.] v8::internal::Map::IndexInCodeCache(v8::internal::Object*, v8::internal::Code*)
+            |
+            --- v8::internal::Map::IndexInCodeCache(v8::internal::Object*, v8::internal::Code*)
+
+0.07^2218397^HTMLParserThrea^chrome               ^[.] WebCore::RenderBlock::paintContents(WebCore::PaintInfo&, WebCore::LayoutPoint const&)
+            |
+            --- WebCore::RenderBlock::paintContents(WebCore::PaintInfo&, WebCore::LayoutPoint const&)
+
+0.07^2215989^HTMLParserThrea^chrome               ^[.] SkRegion::freeRuns()
+            |
+            --- SkRegion::freeRuns()
+
+0.07^2213591^HTMLParserThrea^libpthread-2.15.so   ^[.] pthread_cond_destroy@@GLIBC_2.3.2
+            |
+            --- pthread_cond_destroy@@GLIBC_2.3.2
+
+0.07^2185498^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::paintForegroundForFragmentsWithPhase(WebCore::PaintPhase, WTF::Vector<WebCore::LayerFragment, 1ul> const&, WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int, WebCore::RenderObject*)
+            |
+            --- WebCore::RenderLayer::paintForegroundForFragmentsWithPhase(WebCore::PaintPhase, WTF::Vector<WebCore::LayerFragment, 1ul> const&, WebCore::GraphicsContext*, WebCore::RenderLayer::LayerPaintingInfo const&, unsigned int, WebCore::RenderObject*)
+                (nil)
+
+0.07^2181719^HTMLParserThrea^chrome               ^[.] v8::internal::JSFunction::CompileLazy(v8::internal::Handle<v8::internal::JSFunction>, v8::internal::ClearExceptionFlag)
+            |
+            --- v8::internal::JSFunction::CompileLazy(v8::internal::Handle<v8::internal::JSFunction>, v8::internal::ClearExceptionFlag)
+
+0.07^2181709^HTMLParserThrea^chrome               ^[.] base::subtle::RefCountedThreadSafeBase::AddRef() const
+            |
+            --- base::subtle::RefCountedThreadSafeBase::AddRef() const
+
+0.07^2162161^HTMLParserThrea^chrome               ^[.] v8::internal::LChunk::MarkEmptyBlocks()
+            |
+            --- v8::internal::LChunk::MarkEmptyBlocks()
+
+0.07^2154798^HTMLParserThrea^chrome               ^[.] v8::preparser::PreParser::ParsePostfixExpression(bool*)
+            |
+            --- v8::preparser::PreParser::ParsePostfixExpression(bool*)
+
+0.07^2153747^HTMLParserThrea^chrome               ^[.] WebCore::deviceScaleFactor(WebCore::Frame*)
+            |
+            --- WebCore::deviceScaleFactor(WebCore::Frame*)
+                (nil)
+
+0.07^2111797^HTMLParserThrea^chrome               ^[.] v8::preparser::PreParser::ParseConditionalExpression(bool, bool*)
+            |
+            --- v8::preparser::PreParser::ParseConditionalExpression(bool, bool*)
+                0x3b2abe08368000
+
+0.07^2107366^HTMLParserThrea^chrome               ^[.] ppapi::thunk::subtle::EnterBase::EnterBase(int)
+            |
+            --- ppapi::thunk::subtle::EnterBase::EnterBase(int)
+
+0.07^2061079^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::EmitBackEdgeTable()
+            |
+            --- v8::internal::FullCodeGenerator::EmitBackEdgeTable()
+
+0.07^2055678^HTMLParserThrea^chrome               ^[.] WebCore::MarkupAccumulator::appendStartMarkup(WTF::StringBuilder&, WebCore::Node const*, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+            |
+            --- WebCore::MarkupAccumulator::appendStartMarkup(WTF::StringBuilder&, WebCore::Node const*, WTF::HashMap<WTF::AtomicStringImpl*, WTF::AtomicStringImpl*, WTF::PtrHash<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*>, WTF::HashTraits<WTF::AtomicStringImpl*> >*)
+                (nil)
+
+0.07^2053845^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::flipForWritingMode(WebCore::LayoutRect&) const
+            |
+            --- WebCore::RenderBox::flipForWritingMode(WebCore::LayoutRect&) const
+
+0.07^2043240^HTMLParserThrea^chrome               ^[.] net::HttpUtil::HeadersIterator::GetNext()
+            |
+            --- net::HttpUtil::HeadersIterator::GetNext()
+                0x7f7a0689c4d8
+
+0.07^2038325^HTMLParserThrea^chrome               ^[.] WTF::HashTable<WTF::AtomicString, WTF::KeyValuePair<WTF::AtomicString, WTF::AtomicString>, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WTF::AtomicString, WTF::AtomicString> >, WTF::CaseFoldingHash, WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> >, WTF::HashTraits<WTF::AtomicString> >::rehash(int)
+            |
+            --- WTF::HashTable<WTF::AtomicString, WTF::KeyValuePair<WTF::AtomicString, WTF::AtomicString>, WTF::KeyValuePairKeyExtractor<WTF::KeyValuePair<WTF::AtomicString, WTF::AtomicString> >, WTF::CaseFoldingHash, WTF::HashMapValueTraits<WTF::HashTraits<WTF::AtomicString>, WTF::HashTraits<WTF::AtomicString> >, WTF::HashTraits<WTF::AtomicString> >::rehash(int)
+
+0.07^2038325^HTMLParserThrea^chrome               ^[.] WebCore::KURL::isValid() const
+            |
+            --- WebCore::KURL::isValid() const
+
+0.07^2030555^HTMLParserThrea^chrome               ^[.] v8::internal::Scope::LocalLookup(v8::internal::Handle<v8::internal::String>)
+            |
+            --- v8::internal::Scope::LocalLookup(v8::internal::Handle<v8::internal::String>)
+
+0.07^2017287^HTMLParserThrea^chrome               ^[.] WebKit::WebFrameImpl::document() const
+            |
+            --- WebKit::WebFrameImpl::document() const
+
+0.07^1991309^HTMLParserThrea^chrome               ^[.] v8::internal::Assembler::int3()
+            |
+            --- v8::internal::Assembler::int3()
+
+0.07^1989976^HTMLParserThrea^chrome               ^[.] __gnu_cxx::hashtable<std::pair<int const, std::pair<ppapi::Resource*, int> >, int, __gnu_cxx::hash<int>, std::_Select1st<std::pair<int const, std::pair<ppapi::Resource*, int> > >, std::equal_to<int>, std::allocator<std::pair<ppapi::Resource*, int> > >::find_or_insert(std::pair<int const, std::pair<ppapi::Resource*, int> > const&)
+            |
+            --- __gnu_cxx::hashtable<std::pair<int const, std::pair<ppapi::Resource*, int> >, int, __gnu_cxx::hash<int>, std::_Select1st<std::pair<int const, std::pair<ppapi::Resource*, int> > >, std::equal_to<int>, std::allocator<std::pair<ppapi::Resource*, int> > >::find_or_insert(std::pair<int const, std::pair<ppapi::Resource*, int> > const&)
+                (nil)
+
+0.07^1989976^HTMLParserThrea^chrome               ^[.] content::PaintAggregator::InvalidateRect(gfx::Rect const&)
+            |
+            --- content::PaintAggregator::InvalidateRect(gfx::Rect const&)
+
+0.07^1983969^HTMLParserThrea^chrome               ^[.] tracked_objects::ThreadData::TallyADeath(tracked_objects::Births const&, int, int)
+            |
+            --- tracked_objects::ThreadData::TallyADeath(tracked_objects::Births const&, int, int)
+
+0.07^1983969^HTMLParserThrea^chrome               ^[.] WebCore::HTMLDocumentParser::pumpTokenizer(WebCore::HTMLDocumentParser::SynchronousMode)
+            |
+            --- WebCore::HTMLDocumentParser::pumpTokenizer(WebCore::HTMLDocumentParser::SynchronousMode)
+
+0.07^1964943^HTMLParserThrea^chrome               ^[.] v8::internal::NormalizedMapCache::Get(v8::internal::JSObject*, v8::internal::PropertyNormalizationMode)
+            |
+            --- v8::internal::NormalizedMapCache::Get(v8::internal::JSObject*, v8::internal::PropertyNormalizationMode)
+
+0.07^1960139^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::visualOverflowRect() const
+            |
+            --- WebCore::RenderBox::visualOverflowRect() const
+
+0.06^1953892^HTMLParserThrea^chrome               ^[.] base::MessagePumpDefault::ScheduleWork()
+            |
+            --- base::MessagePumpDefault::ScheduleWork()
+
+0.06^1922033^HTMLParserThrea^chrome               ^[.] v8::preparser::PreParser::ParseIdentifier(bool*)
+            |
+            --- v8::preparser::PreParser::ParseIdentifier(bool*)
+                0x3b2abe08368000
+
+0.06^1902957^HTMLParserThrea^chrome               ^[.] base::MessageLoop::ScheduleWork(bool)
+            |
+            --- base::MessageLoop::ScheduleWork(bool)
+
+0.06^1857157^HTMLParserThrea^chrome               ^[.] v8::internal::FullCodeGenerator::DoTest(v8::internal::Expression*, v8::internal::Label*, v8::internal::Label*, v8::internal::Label*)
+            |
+            --- v8::internal::FullCodeGenerator::DoTest(v8::internal::Expression*, v8::internal::Label*, v8::internal::Label*, v8::internal::Label*)
+                0x7fff30e91bc0
+
+0.06^1758928^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::ancestorStackingContainer() const
+            |
+            --- WebCore::RenderLayer::ancestorStackingContainer() const
+
+0.06^1708834^HTMLParserThrea^libcairo.so.2.11000.2^[.] _cairo_matrix_is_identity
+            |
+            --- _cairo_matrix_is_identity
+
+0.06^1707254^HTMLParserThrea^chrome               ^[.] WebCore::RenderLineBoxList::anyLineIntersectsRect(WebCore::RenderBoxModelObject*, WebCore::LayoutRect const&, WebCore::LayoutPoint const&, WebCore::LayoutUnit) const
+            |
+            --- WebCore::RenderLineBoxList::anyLineIntersectsRect(WebCore::RenderBoxModelObject*, WebCore::LayoutRect const&, WebCore::LayoutPoint const&, WebCore::LayoutUnit) const
+
+0.06^1695178^HTMLParserThrea^chrome               ^[.] base::internal::CallbackBase::CallbackBase(base::internal::BindStateBase*)
+            |
+            --- base::internal::CallbackBase::CallbackBase(base::internal::BindStateBase*)
+
+0.06^1682330^HTMLParserThrea^chrome               ^[.] WebCore::RenderBox::clippedOverflowRectForRepaint(WebCore::RenderLayerModelObject const*) const
+            |
+            --- WebCore::RenderBox::clippedOverflowRectForRepaint(WebCore::RenderLayerModelObject const*) const
+
+0.06^1665533^HTMLParserThrea^chrome               ^[.] v8::internal::Zone::DeleteAll()
+            |
+            --- v8::internal::Zone::DeleteAll()
+
+0.05^1641632^HTMLParserThrea^chrome               ^[.] WebCore::Color::blend(WebCore::Color const&) const
+            |
+            --- WebCore::Color::blend(WebCore::Color const&) const
+
+0.05^1611521^HTMLParserThrea^chrome               ^[.] WebKit::currentTimeFunction()
+            |
+            --- WebKit::currentTimeFunction()
+
+0.05^1539536^HTMLParserThrea^chrome               ^[.] WebCore::RenderLayer::collectFragments(WTF::Vector<WebCore::LayerFragment, 1ul>&, WebCore::RenderLayer const*, WebCore::RenderRegion*, WebCore::LayoutRect const&, WebCore::ClipRectsType, WebCore::OverlayScrollbarSizeRelevancy, WebCore::ShouldRespectOverflowClip, WebCore::LayoutPoint const*, WebCore::LayoutRect const*)
+            |
+            --- WebCore::RenderLayer::collectFragments(WTF::Vector<WebCore::LayerFragment, 1ul>&, WebCore::RenderLayer const*, WebCore::RenderRegion*, WebCore::LayoutRect const&, WebCore::ClipRectsType, WebCore::OverlayScrollbarSizeRelevancy, WebCore::ShouldRespectOverflowClip, WebCore::LayoutPoint const*, WebCore::LayoutRect const*)
+
+0.05^1523423^HTMLParserThrea^chrome               ^[.] v8::internal::StubCache::ComputeLoadNormal(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::JSObject>)
+            |
+            --- v8::internal::StubCache::ComputeLoadNormal(v8::internal::Handle<v8::internal::Name>, v8::internal::Handle<v8::internal::JSObject>)
+                (nil)
+
+0.05^1521966^HTMLParserThrea^chrome               ^[.] void url_canon::(anonymous namespace)::DoHost<char, unsigned char>(char const*, url_parse::Component const&, url_canon::CanonOutputT<char>*, url_canon::CanonHostInfo*)
+            |
+            --- void url_canon::(anonymous namespace)::DoHost<char, unsigned char>(char const*, url_parse::Component const&, url_canon::CanonOutputT<char>*, url_canon::CanonHostInfo*)
+                0xffffffff00000000
+
+0.05^1443413^HTMLParserThrea^chrome               ^[.] WTF::HashTableAddResult<WTF::HashTableIterator<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WTF::PtrHash<WebCore::CachedResource*> >, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*> > > WTF::HashTable<WTF::ListHashSetNode<WebCore::Cac
+            |
+            --- WTF::HashTableAddResult<WTF::HashTableIterator<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WTF::PtrHash<WebCore::CachedResource*> >, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*> > > WTF::HashTable<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*, WTF::IdentityExtractor, WTF::ListHashSetNodeHashFunctions<WTF::PtrHash<WebCore::CachedResource*> >, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*>, WTF::HashTraits<WTF::ListHashSetNode<WebCore::CachedResource*, 256ul>*> >::add<WTF::ListHashSetTranslator<WTF::PtrHash<WebCore::CachedResource*> >, WebCore::CachedResource*, WTF::ListHashSetNodeAllocator<WebCore::CachedResource*, 256ul>*>(WebCore::CachedResource* const&, WTF::ListHashSetNodeAllocator<WebCore::CachedResource*, 256ul>* const&)
+                (nil)
+
+0.05^1380694^HTMLParserThrea^chrome               ^[.] v8::internal::HBasicBlock::Finish(v8::internal::HControlInstruction*)
+            |
+            --- v8::internal::HBasicBlock::Finish(v8::internal::HControlInstruction*)
+
+0.04^1244009^HTMLParserThrea^chrome               ^[.] WebCore::Node::isWebVTTElement() const
+            |
+            --- WebCore::Node::isWebVTTElement() const
+                0x7fff30e91700
+
+
+
+#
+# (For a higher level overview, try: perf report --sort comm,dso)
+#
diff --git a/catapult/telemetry/telemetry/internal/testing/powermetrics_output.output b/catapult/telemetry/telemetry/internal/testing/powermetrics_output.output
new file mode 100644
index 0000000..d952096
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/powermetrics_output.output
@@ -0,0 +1,3375 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+	<key>GPU</key>
+	<array>
+		<dict>
+			<key>freq_hz</key>
+			<real>1</real>
+			<key>c_state_ns</key>
+			<integer>4295987562</integer>
+			<key>c_state_ratio</key>
+			<real>1</real>
+			<key>GPU number</key>
+			<integer>0</integer>
+			<key>misc counters</key>
+			<array>
+				<dict>
+					<key>GPU Busy</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>FB Test Case</key>
+					<real>0</real>
+				</dict>
+			</array>
+			<key>c_states</key>
+			<array>
+				<dict>
+					<key>used_ns</key>
+					<integer>8737642</integer>
+					<key>used_ratio</key>
+					<real>0.00203391</real>
+				</dict>
+				<dict>
+					<key>used_ns</key>
+					<integer>4287249920</integer>
+					<key>used_ratio</key>
+					<real>0.997969</real>
+				</dict>
+			</array>
+			<key>p_states</key>
+			<array>
+				<dict>
+					<key>frequency</key>
+					<string>1300000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1250000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1200000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1150000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1100000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1050000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>1000000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>950000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>900000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>850000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>800000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>750000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>700000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>650000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>600000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>550000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>500000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>450000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>400000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>350000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>300000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>250000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+				<dict>
+					<key>frequency</key>
+					<string>200000000</string>
+					<key>used_ns</key>
+					<integer>0</integer>
+					<key>used_ratio</key>
+					<real>0</real>
+				</dict>
+			</array>
+			<key>freq_ratio</key>
+			<real>0</real>
+			<key>name</key>
+			<string>IntelIG</string>
+		</dict>
+	</array>
+	<key>timestamp</key>
+	<date>2014-06-13T17:07:20Z</date>
+	<key>tasks</key>
+	<array>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>592</integer>
+			<key>cputime_ms_per_s</key>
+			<real>23.4812</real>
+			<key>cputime_userland_ratio</key>
+			<real>0</real>
+			<key>intr_wakeups</key>
+			<integer>687</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>137.889</real>
+			<key>started_ns</key>
+			<integer>0</integer>
+			<key>cputime_ns</key>
+			<integer>100812002</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>3.72673</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>16</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>160.016</real>
+			<key>pid</key>
+			<integer>0</integer>
+			<key>name</key>
+			<string>kernel_task</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>141</integer>
+			<key>cputime_ms_per_s</key>
+			<real>24.2995</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.6389860000000001</real>
+			<key>intr_wakeups</key>
+			<integer>145</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>32.8418</real>
+			<key>started_ns</key>
+			<integer>1610501000</integer>
+			<key>cputime_ns</key>
+			<integer>104325487</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>17.469</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>75</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>12.1119</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>52</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>33.7735</real>
+			<key>pid</key>
+			<integer>113</integer>
+			<key>name</key>
+			<string>WindowServer</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>40</integer>
+			<key>cputime_ms_per_s</key>
+			<real>21.3695</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.970651</real>
+			<key>intr_wakeups</key>
+			<integer>44</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>9.31682</real>
+			<key>started_ns</key>
+			<integer>7367983585000</integer>
+			<key>cputime_ns</key>
+			<integer>91746051</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>10.2485</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>44</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>10.2485</real>
+			<key>pid</key>
+			<integer>12745</integer>
+			<key>name</key>
+			<string>Python</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>20</integer>
+			<key>cputime_ms_per_s</key>
+			<real>18.167</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.914095</real>
+			<key>intr_wakeups</key>
+			<integer>22</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>4.65841</real>
+			<key>started_ns</key>
+			<integer>43264491000</integer>
+			<key>cputime_ns</key>
+			<integer>77996425</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0.23292</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>1</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>5.12425</real>
+			<key>pid</key>
+			<integer>1318</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>117</integer>
+			<key>cputime_ms_per_s</key>
+			<real>10.6337</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.8333199999999999</real>
+			<key>intr_wakeups</key>
+			<integer>125</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>27.2517</real>
+			<key>started_ns</key>
+			<integer>10444809000</integer>
+			<key>cputime_ns</key>
+			<integer>45653817</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>29.1151</real>
+			<key>pid</key>
+			<integer>583</integer>
+			<key>name</key>
+			<string>Terminal</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>65</integer>
+			<key>cputime_ms_per_s</key>
+			<real>11.7345</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.88018</real>
+			<key>intr_wakeups</key>
+			<integer>75</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>15.1398</real>
+			<key>started_ns</key>
+			<integer>7911274078000</integer>
+			<key>cputime_ns</key>
+			<integer>50379825</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>17.469</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>75</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>17.469</real>
+			<key>pid</key>
+			<integer>13560</integer>
+			<key>name</key>
+			<string>Python</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>47</integer>
+			<key>cputime_ms_per_s</key>
+			<real>8.64067</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.584309</real>
+			<key>intr_wakeups</key>
+			<integer>50</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>10.9473</real>
+			<key>started_ns</key>
+			<integer>10437471000</integer>
+			<key>cputime_ns</key>
+			<integer>37097106</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0.931682</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>4</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>11.646</real>
+			<key>pid</key>
+			<integer>582</integer>
+			<key>name</key>
+			<string>Google Chrome</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>14</integer>
+			<key>cputime_ms_per_s</key>
+			<real>7.9918</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.919737</real>
+			<key>intr_wakeups</key>
+			<integer>17</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>3.26089</real>
+			<key>started_ns</key>
+			<integer>10465929000</integer>
+			<key>cputime_ns</key>
+			<integer>34311297</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>3.95965</real>
+			<key>pid</key>
+			<integer>587</integer>
+			<key>name</key>
+			<string>SystemUIServer</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>6</integer>
+			<key>cputime_ms_per_s</key>
+			<real>6.42884</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.582192</real>
+			<key>intr_wakeups</key>
+			<integer>7</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>1.39752</real>
+			<key>started_ns</key>
+			<integer>639336000</integer>
+			<key>cputime_ns</key>
+			<integer>27601023</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>1.63044</real>
+			<key>pid</key>
+			<integer>25</integer>
+			<key>name</key>
+			<string>syslogd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>45</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.681478</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.365057</real>
+			<key>intr_wakeups</key>
+			<integer>49</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>10.4814</real>
+			<key>started_ns</key>
+			<integer>49189572000</integer>
+			<key>cputime_ns</key>
+			<integer>2925797</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>11.4131</real>
+			<key>pid</key>
+			<integer>1551</integer>
+			<key>name</key>
+			<string>GoogleTalkPlugin</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>12</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.399903</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.489519</real>
+			<key>intr_wakeups</key>
+			<integer>15</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>2.79504</real>
+			<key>started_ns</key>
+			<integer>1881153000</integer>
+			<key>cputime_ns</key>
+			<integer>1716908</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>3.49381</real>
+			<key>pid</key>
+			<integer>145</integer>
+			<key>name</key>
+			<string>gagent</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>5</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.475033</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.725976</real>
+			<key>intr_wakeups</key>
+			<integer>6</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>1.1646</real>
+			<key>started_ns</key>
+			<integer>8483361342000</integer>
+			<key>cputime_ns</key>
+			<integer>2039463</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>1.39752</real>
+			<key>pid</key>
+			<integer>14091</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.679799</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.256151</real>
+			<key>intr_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0</real>
+			<key>started_ns</key>
+			<integer>8917493171000</integer>
+			<key>cputime_ns</key>
+			<integer>2918588</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0</real>
+			<key>pid</key>
+			<integer>14209</integer>
+			<key>name</key>
+			<string>powermetrics</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.211558</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.6101799999999999</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>1493715000</integer>
+			<key>cputime_ns</key>
+			<integer>908286</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>95</integer>
+			<key>name</key>
+			<string>Python</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.181734</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.677046</real>
+			<key>intr_wakeups</key>
+			<integer>3</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>12546437000</integer>
+			<key>cputime_ns</key>
+			<integer>780239</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.698761</real>
+			<key>pid</key>
+			<integer>695</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>3</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.117981</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.252391</real>
+			<key>intr_wakeups</key>
+			<integer>4</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.698761</real>
+			<key>started_ns</key>
+			<integer>1676947000</integer>
+			<key>cputime_ns</key>
+			<integer>506528</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.931682</real>
+			<key>pid</key>
+			<integer>132</integer>
+			<key>name</key>
+			<string>pacemaker</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.125953</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.55472</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>11624896000</integer>
+			<key>cputime_ns</key>
+			<integer>540756</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>654</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.108103</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.648833</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>6838183543000</integer>
+			<key>cputime_ns</key>
+			<integer>464121</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>12260</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.120981</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.682463</real>
+			<key>intr_wakeups</key>
+			<integer>3</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>1491205000</integer>
+			<key>cputime_ns</key>
+			<integer>519408</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.698761</real>
+			<key>pid</key>
+			<integer>79</integer>
+			<key>name</key>
+			<string>fseventsd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0486729</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.254202</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>1622879000</integer>
+			<key>cputime_ns</key>
+			<integer>208968</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>114</integer>
+			<key>name</key>
+			<string>ocspd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.08862200000000001</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.635134</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>12714103000</integer>
+			<key>cputime_ns</key>
+			<integer>380482</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>698</integer>
+			<key>name</key>
+			<string>Google Chrome He</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0410855</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.189299</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>started_ns</key>
+			<integer>1492092000</integer>
+			<key>cputime_ns</key>
+			<integer>176393</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>85</integer>
+			<key>name</key>
+			<string>launchservicesd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.110853</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.279103</real>
+			<key>intr_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0</real>
+			<key>started_ns</key>
+			<integer>624120000</integer>
+			<key>cputime_ns</key>
+			<integer>475925</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0</real>
+			<key>pid</key>
+			<integer>20</integer>
+			<key>name</key>
+			<string>notifyd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0619692</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.401646</real>
+			<key>intr_wakeups</key>
+			<integer>2</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>1489680000</integer>
+			<key>cputime_ns</key>
+			<integer>266053</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.465841</real>
+			<key>pid</key>
+			<integer>69</integer>
+			<key>name</key>
+			<string>mds</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0550766</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.362267</real>
+			<key>intr_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>2992466000</integer>
+			<key>cputime_ns</key>
+			<integer>236461</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>pid</key>
+			<integer>202</integer>
+			<key>name</key>
+			<string>CVMServer</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0873813</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.842692</real>
+			<key>intr_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0</real>
+			<key>started_ns</key>
+			<integer>7393764015000</integer>
+			<key>cputime_ns</key>
+			<integer>375155</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0</real>
+			<key>pid</key>
+			<integer>13253</integer>
+			<key>name</key>
+			<string>syslog</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.010856</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.187307</real>
+			<key>intr_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>10349417000</integer>
+			<key>cputime_ns</key>
+			<integer>46608</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>pid</key>
+			<integer>573</integer>
+			<key>name</key>
+			<string>cfprefsd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.00967575</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.216677</real>
+			<key>intr_wakeups</key>
+			<integer>1</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>started_ns</key>
+			<integer>865104000</integer>
+			<key>cputime_ns</key>
+			<integer>41541</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0.23292</real>
+			<key>pid</key>
+			<integer>30</integer>
+			<key>name</key>
+			<string>cfprefsd</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.039699</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.240431</real>
+			<key>intr_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0</real>
+			<key>started_ns</key>
+			<integer>603081000</integer>
+			<key>cputime_ns</key>
+			<integer>170440</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0</real>
+			<key>pid</key>
+			<integer>17</integer>
+			<key>name</key>
+			<string>UserEventAgent</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+		<dict>
+			<key>idle_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s</key>
+			<real>0.0260172</real>
+			<key>cputime_userland_ratio</key>
+			<real>0.260627</real>
+			<key>intr_wakeups</key>
+			<integer>0</integer>
+			<key>cputime_ms_per_s_reliable</key>
+			<true/>
+			<key>idle_wakeups_per_s</key>
+			<real>0</real>
+			<key>started_ns</key>
+			<integer>10467715000</integer>
+			<key>cputime_ns</key>
+			<integer>111700</integer>
+			<key>timer_wakeups</key>
+			<array>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>2000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+				<dict>
+					<key>wakeups_per_s</key>
+					<real>0</real>
+					<key>interval_ns</key>
+					<integer>5000000</integer>
+					<key>wakeups</key>
+					<integer>0</integer>
+				</dict>
+			</array>
+			<key>intr_wakeups_per_s</key>
+			<real>0</real>
+			<key>pid</key>
+			<integer>588</integer>
+			<key>name</key>
+			<string>Finder</string>
+			<key>started_ns_reliable</key>
+			<false/>
+		</dict>
+	</array>
+	<key>hw_model</key>
+	<string>MacBookPro11,3</string>
+	<key>interrupts</key>
+	<array>
+		<dict>
+			<key>cpu</key>
+			<integer>0</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>116</integer>
+					<key>events</key>
+					<integer>167</integer>
+					<key>events_per_s</key>
+					<real>38.8753</real>
+					<key>name</key>
+					<string>XHC1</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>119</integer>
+					<key>events</key>
+					<integer>463</integer>
+					<key>events_per_s</key>
+					<real>107.78</real>
+					<key>name</key>
+					<string>GFX0</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>122</integer>
+					<key>events</key>
+					<integer>65</integer>
+					<key>events_per_s</key>
+					<real>15.1311</real>
+					<key>name</key>
+					<string>ARPT</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>635</integer>
+					<key>events_per_s</key>
+					<real>147.819</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>12</integer>
+					<key>events_per_s</key>
+					<real>2.79343</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>1</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>3</integer>
+					<key>events_per_s</key>
+					<real>0.698358</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>3</integer>
+					<key>events_per_s</key>
+					<real>0.698358</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>2</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>92</integer>
+					<key>events_per_s</key>
+					<real>21.4163</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>16</integer>
+					<key>events_per_s</key>
+					<real>3.72458</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>3</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>3</integer>
+					<key>events_per_s</key>
+					<real>0.698358</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>4</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>131</integer>
+					<key>events_per_s</key>
+					<real>30.495</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>20</integer>
+					<key>events_per_s</key>
+					<real>4.65572</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>5</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>2</integer>
+					<key>events_per_s</key>
+					<real>0.465572</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>3</integer>
+					<key>events_per_s</key>
+					<real>0.698358</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>6</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>221</integer>
+					<key>events</key>
+					<integer>111</integer>
+					<key>events_per_s</key>
+					<real>25.8393</real>
+					<key>name</key>
+					<string>TMR</string>
+				</dict>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>15</integer>
+					<key>events_per_s</key>
+					<real>3.49179</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+		<dict>
+			<key>cpu</key>
+			<integer>7</integer>
+			<key>vectors</key>
+			<array>
+				<dict>
+					<key>vector</key>
+					<integer>222</integer>
+					<key>events</key>
+					<integer>2</integer>
+					<key>events_per_s</key>
+					<real>0.465572</real>
+					<key>name</key>
+					<string>IPI</string>
+				</dict>
+			</array>
+		</dict>
+	</array>
+	<key>backlight</key>
+	<dict>
+		<key>min</key>
+		<integer>0</integer>
+		<key>value</key>
+		<integer>687</integer>
+		<key>max</key>
+		<integer>1024</integer>
+	</dict>
+	<key>kern_osversion</key>
+	<string>13D65</string>
+	<key>all_tasks</key>
+	<dict>
+		<key>idle_wakeups</key>
+		<integer>1125</integer>
+		<key>cputime_ms_per_s</key>
+		<real>136.35</real>
+		<key>cputime_userland_ratio</key>
+		<real>0.65911</real>
+		<key>intr_wakeups</key>
+		<integer>1269</integer>
+		<key>cputime_ms_per_s_reliable</key>
+		<true/>
+		<key>idle_wakeups_per_s</key>
+		<real>261.884</real>
+		<key>started_ns</key>
+		<integer>0</integer>
+		<key>cputime_ns</key>
+		<integer>585732853</integer>
+		<key>timer_wakeups</key>
+		<array>
+			<dict>
+				<key>wakeups_per_s</key>
+				<real>39.5736</real>
+				<key>interval_ns</key>
+				<integer>2000000</integer>
+				<key>wakeups</key>
+				<integer>170</integer>
+			</dict>
+			<dict>
+				<key>wakeups_per_s</key>
+				<real>22.5803</real>
+				<key>interval_ns</key>
+				<integer>5000000</integer>
+				<key>wakeups</key>
+				<integer>97</integer>
+			</dict>
+		</array>
+		<key>intr_wakeups_per_s</key>
+		<real>295.406</real>
+		<key>pid</key>
+		<integer>-2</integer>
+		<key>name</key>
+		<string>ALL_TASKS</string>
+		<key>started_ns_reliable</key>
+		<false/>
+	</dict>
+	<key>disk</key>
+	<dict>
+		<key>rbytes_diff</key>
+		<integer>0</integer>
+		<key>rops_per_s</key>
+		<real>0</real>
+		<key>rops_diff</key>
+		<integer>0</integer>
+		<key>wops_diff</key>
+		<integer>0</integer>
+		<key>wops_per_s</key>
+		<real>0</real>
+		<key>wbytes_per_s</key>
+		<real>0</real>
+		<key>rbytes_per_s</key>
+		<real>0</real>
+		<key>wbytes_diff</key>
+		<integer>0</integer>
+	</dict>
+	<key>processor</key>
+	<dict>
+		<key>freq_hz</key>
+		<real>1668120000</real>
+		<key>package_joules</key>
+		<real>13.0469</real>
+		<key>packages</key>
+		<array>
+			<dict>
+				<key>c_state_ns</key>
+				<integer>3723350796</integer>
+				<key>c_state_ratio</key>
+				<real>0.866745</real>
+				<key>package</key>
+				<integer>0</integer>
+				<key>c_states</key>
+				<array>
+					<dict>
+						<key>used_ns</key>
+						<integer>191313723</integer>
+						<key>used_ratio</key>
+						<real>0.0445352</real>
+						<key>name</key>
+						<string>C2</string>
+					</dict>
+					<dict>
+						<key>used_ns</key>
+						<integer>743617321</integer>
+						<key>used_ratio</key>
+						<real>0.173104</real>
+						<key>name</key>
+						<string>C3</string>
+					</dict>
+					<dict>
+						<key>used_ns</key>
+						<integer>2788419751</integer>
+						<key>used_ratio</key>
+						<real>0.649106</real>
+						<key>name</key>
+						<string>C6</string>
+					</dict>
+					<dict>
+						<key>used_ns</key>
+						<integer>0</integer>
+						<key>used_ratio</key>
+						<real>0</real>
+						<key>name</key>
+						<string>C7</string>
+					</dict>
+				</array>
+				<key>cores</key>
+				<array>
+					<dict>
+						<key>c_state_ns</key>
+						<integer>3942072317</integer>
+						<key>c_state_ratio</key>
+						<real>0.91766</real>
+						<key>core</key>
+						<integer>0</integer>
+						<key>c_states</key>
+						<array>
+							<dict>
+								<key>used_ns</key>
+								<integer>9035527</integer>
+								<key>used_ratio</key>
+								<real>0.00210335</real>
+								<key>name</key>
+								<string>C3</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>0</integer>
+								<key>used_ratio</key>
+								<real>0</real>
+								<key>name</key>
+								<string>C6</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>3933036790</integer>
+								<key>used_ratio</key>
+								<real>0.915557</real>
+								<key>name</key>
+								<string>C7</string>
+							</dict>
+						</array>
+						<key>cpus</key>
+						<array>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>139</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>32.3573</real>
+										<key>idle_count</key>
+										<integer>25</integer>
+										<key>idle_per_s</key>
+										<real>32.3573</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>16</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>3.72458</real>
+										<key>idle_count</key>
+										<integer>47</integer>
+										<key>idle_per_s</key>
+										<real>3.72458</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>59</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>13.7344</real>
+										<key>idle_count</key>
+										<integer>75</integer>
+										<key>idle_per_s</key>
+										<real>13.7344</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>263</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>61.2228</real>
+										<key>idle_count</key>
+										<integer>171</integer>
+										<key>idle_per_s</key>
+										<real>61.2228</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>356</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>82.8719</real>
+										<key>idle_count</key>
+										<integer>63</integer>
+										<key>idle_per_s</key>
+										<real>82.8719</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>266</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>61.9211</real>
+										<key>idle_count</key>
+										<integer>89</integer>
+										<key>idle_per_s</key>
+										<real>61.9211</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>78</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>18.1573</real>
+										<key>idle_count</key>
+										<integer>154</integer>
+										<key>idle_per_s</key>
+										<real>18.1573</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>10</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>2.32786</real>
+										<key>idle_count</key>
+										<integer>130</integer>
+										<key>idle_per_s</key>
+										<real>2.32786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>4</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>0.931145</real>
+										<key>idle_count</key>
+										<integer>140</integer>
+										<key>idle_per_s</key>
+										<real>0.931145</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>7</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>1.6295</real>
+										<key>idle_count</key>
+										<integer>134</integer>
+										<key>idle_per_s</key>
+										<real>1.6295</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>119</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>50</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>0</integer>
+								<key>freq_hz</key>
+								<real>1478510000</real>
+								<key>freq_ratio</key>
+								<real>0.569975</real>
+							</dict>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>11</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>2.56065</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>2.56065</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>5</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>1.16393</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>1.16393</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>4</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>0.931145</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0.931145</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>4</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>0.931145</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.931145</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>1</integer>
+								<key>freq_hz</key>
+								<real>1604120000</real>
+								<key>freq_ratio</key>
+								<real>0.6183959999999999</real>
+							</dict>
+						</array>
+					</dict>
+					<dict>
+						<key>c_state_ns</key>
+						<integer>4165689565</integer>
+						<key>c_state_ratio</key>
+						<real>0.969715</real>
+						<key>core</key>
+						<integer>1</integer>
+						<key>c_states</key>
+						<array>
+							<dict>
+								<key>used_ns</key>
+								<integer>3710669</integer>
+								<key>used_ratio</key>
+								<real>0.000863793</real>
+								<key>name</key>
+								<string>C3</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>0</integer>
+								<key>used_ratio</key>
+								<real>0</real>
+								<key>name</key>
+								<string>C6</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>4161978895</integer>
+								<key>used_ratio</key>
+								<real>0.968851</real>
+								<key>name</key>
+								<string>C7</string>
+							</dict>
+						</array>
+						<key>cpus</key>
+						<array>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>325</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>75.6555</real>
+										<key>idle_count</key>
+										<integer>28</integer>
+										<key>idle_per_s</key>
+										<real>75.6555</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>19</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>4.42294</real>
+										<key>idle_count</key>
+										<integer>61</integer>
+										<key>idle_per_s</key>
+										<real>4.42294</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>58</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>13.5016</real>
+										<key>idle_count</key>
+										<integer>104</integer>
+										<key>idle_per_s</key>
+										<real>13.5016</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>84</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>19.554</real>
+										<key>idle_count</key>
+										<integer>87</integer>
+										<key>idle_per_s</key>
+										<real>19.554</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>29</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>6.7508</real>
+										<key>idle_count</key>
+										<integer>36</integer>
+										<key>idle_per_s</key>
+										<real>6.7508</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>18</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>4.19015</real>
+										<key>idle_count</key>
+										<integer>24</integer>
+										<key>idle_per_s</key>
+										<real>4.19015</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>39</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>9.078659999999999</real>
+										<key>idle_count</key>
+										<integer>19</integer>
+										<key>idle_per_s</key>
+										<real>9.078659999999999</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>20</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>9</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>2.09508</real>
+										<key>idle_count</key>
+										<integer>31</integer>
+										<key>idle_per_s</key>
+										<real>2.09508</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>37</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>44</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>62</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>2</integer>
+								<key>freq_hz</key>
+								<real>2073780000</real>
+								<key>freq_ratio</key>
+								<real>0.799453</real>
+							</dict>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>9</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>2.09508</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>2.09508</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>3</integer>
+								<key>freq_hz</key>
+								<real>1989580000</real>
+								<key>freq_ratio</key>
+								<real>0.766994</real>
+							</dict>
+						</array>
+					</dict>
+					<dict>
+						<key>c_state_ns</key>
+						<integer>4141777109</integer>
+						<key>c_state_ratio</key>
+						<real>0.964148</real>
+						<key>core</key>
+						<integer>2</integer>
+						<key>c_states</key>
+						<array>
+							<dict>
+								<key>used_ns</key>
+								<integer>884933</integer>
+								<key>used_ratio</key>
+								<real>0.000206</real>
+								<key>name</key>
+								<string>C3</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>0</integer>
+								<key>used_ratio</key>
+								<real>0</real>
+								<key>name</key>
+								<string>C6</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>4140892176</integer>
+								<key>used_ratio</key>
+								<real>0.963942</real>
+								<key>name</key>
+								<string>C7</string>
+							</dict>
+						</array>
+						<key>cpus</key>
+						<array>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>396</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>92.1833</real>
+										<key>idle_count</key>
+										<integer>26</integer>
+										<key>idle_per_s</key>
+										<real>92.1833</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>28</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>6.51801</real>
+										<key>idle_count</key>
+										<integer>58</integer>
+										<key>idle_per_s</key>
+										<real>6.51801</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>69</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>16.0622</real>
+										<key>idle_count</key>
+										<integer>124</integer>
+										<key>idle_per_s</key>
+										<real>16.0622</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>97</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>22.5803</real>
+										<key>idle_count</key>
+										<integer>115</integer>
+										<key>idle_per_s</key>
+										<real>22.5803</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>37</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>8.61309</real>
+										<key>idle_count</key>
+										<integer>36</integer>
+										<key>idle_per_s</key>
+										<real>8.61309</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>34</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>7.91473</real>
+										<key>idle_count</key>
+										<integer>34</integer>
+										<key>idle_per_s</key>
+										<real>7.91473</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>32</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>7.44916</real>
+										<key>idle_count</key>
+										<integer>18</integer>
+										<key>idle_per_s</key>
+										<real>7.44916</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>7</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>1.6295</real>
+										<key>idle_count</key>
+										<integer>40</integer>
+										<key>idle_per_s</key>
+										<real>1.6295</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>17</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>3.95736</real>
+										<key>idle_count</key>
+										<integer>40</integer>
+										<key>idle_per_s</key>
+										<real>3.95736</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>64</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>72</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>75</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>4</integer>
+								<key>freq_hz</key>
+								<real>1768190000</real>
+								<key>freq_ratio</key>
+								<real>0.6816449999999999</real>
+							</dict>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>16</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>3.72458</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>3.72458</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>5</integer>
+								<key>freq_hz</key>
+								<real>2250530000</real>
+								<key>freq_ratio</key>
+								<real>0.867594</real>
+							</dict>
+						</array>
+					</dict>
+					<dict>
+						<key>c_state_ns</key>
+						<integer>4177380735</integer>
+						<key>c_state_ratio</key>
+						<real>0.972436</real>
+						<key>core</key>
+						<integer>3</integer>
+						<key>c_states</key>
+						<array>
+							<dict>
+								<key>used_ns</key>
+								<integer>9674041</integer>
+								<key>used_ratio</key>
+								<real>0.00225198</real>
+								<key>name</key>
+								<string>C3</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>0</integer>
+								<key>used_ratio</key>
+								<real>0</real>
+								<key>name</key>
+								<string>C6</string>
+							</dict>
+							<dict>
+								<key>used_ns</key>
+								<integer>4167706693</integer>
+								<key>used_ratio</key>
+								<real>0.970184</real>
+								<key>name</key>
+								<string>C7</string>
+							</dict>
+						</array>
+						<key>cpus</key>
+						<array>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>378</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>87.9932</real>
+										<key>idle_count</key>
+										<integer>29</integer>
+										<key>idle_per_s</key>
+										<real>87.9932</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>28</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>6.51801</real>
+										<key>idle_count</key>
+										<integer>51</integer>
+										<key>idle_per_s</key>
+										<real>6.51801</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>62</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>14.4327</real>
+										<key>idle_count</key>
+										<integer>119</integer>
+										<key>idle_per_s</key>
+										<real>14.4327</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>84</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>19.554</real>
+										<key>idle_count</key>
+										<integer>103</integer>
+										<key>idle_per_s</key>
+										<real>19.554</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>32</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>7.44916</real>
+										<key>idle_count</key>
+										<integer>41</integer>
+										<key>idle_per_s</key>
+										<real>7.44916</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>27</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>6.28523</real>
+										<key>idle_count</key>
+										<integer>23</integer>
+										<key>idle_per_s</key>
+										<real>6.28523</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>29</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>6.7508</real>
+										<key>idle_count</key>
+										<integer>17</integer>
+										<key>idle_per_s</key>
+										<real>6.7508</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>10</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>2.32786</real>
+										<key>idle_count</key>
+										<integer>30</integer>
+										<key>idle_per_s</key>
+										<real>2.32786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>12</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>2.79343</real>
+										<key>idle_count</key>
+										<integer>32</integer>
+										<key>idle_per_s</key>
+										<real>2.79343</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>73</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>51</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>72</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>6</integer>
+								<key>freq_hz</key>
+								<real>1635700000</real>
+								<key>freq_ratio</key>
+								<real>0.6305730000000001</real>
+							</dict>
+							<dict>
+								<key>duty_cycles</key>
+								<array>
+									<dict>
+										<key>active_count</key>
+										<integer>15</integer>
+										<key>interval_ns</key>
+										<integer>16000</integer>
+										<key>active_per_s</key>
+										<real>3.49179</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>3.49179</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>3</integer>
+										<key>interval_ns</key>
+										<integer>32000</integer>
+										<key>active_per_s</key>
+										<real>0.698358</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.698358</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>2</integer>
+										<key>interval_ns</key>
+										<integer>64000</integer>
+										<key>active_per_s</key>
+										<real>0.465572</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.465572</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>128000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>1</integer>
+										<key>interval_ns</key>
+										<integer>256000</integer>
+										<key>active_per_s</key>
+										<real>0.232786</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0.232786</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>512000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>1024000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>2048000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>4096000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>2</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>8192000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>1</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>16384000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+									<dict>
+										<key>active_count</key>
+										<integer>0</integer>
+										<key>interval_ns</key>
+										<integer>32768000</integer>
+										<key>active_per_s</key>
+										<real>0</real>
+										<key>idle_count</key>
+										<integer>0</integer>
+										<key>idle_per_s</key>
+										<real>0</real>
+									</dict>
+								</array>
+								<key>cpu</key>
+								<integer>7</integer>
+								<key>freq_hz</key>
+								<real>1818650000</real>
+								<key>freq_ratio</key>
+								<real>0.701099</real>
+							</dict>
+						</array>
+					</dict>
+				</array>
+			</dict>
+		</array>
+		<key>package_watts</key>
+		<real>3.03713</real>
+		<key>llc_flushed_ratio</key>
+		<real>0.850534</real>
+		<key>freq_ratio</key>
+		<real>0.643069</real>
+	</dict>
+	<key>network</key>
+	<dict>
+		<key>obytes</key>
+		<integer>258</integer>
+		<key>obyte_rate</key>
+		<real>60.0588</real>
+		<key>opacket_rate</key>
+		<real>0.698358</real>
+		<key>ipackets</key>
+		<integer>8</integer>
+		<key>ibytes</key>
+		<integer>2449</integer>
+		<key>ibyte_rate</key>
+		<real>570.093</real>
+		<key>ipacket_rate</key>
+		<real>1.86229</real>
+		<key>opackets</key>
+		<integer>3</integer>
+	</dict>
+	<key>elapsed_ns</key>
+	<integer>4295788088</integer>
+	<key>is_delta</key>
+	<false/>
+</dict>
+</plist>
diff --git a/catapult/telemetry/telemetry/internal/testing/powermetrics_output_unsupported_hardware.output b/catapult/telemetry/telemetry/internal/testing/powermetrics_output_unsupported_hardware.output
new file mode 100644
index 0000000..ce03f93
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/powermetrics_output_unsupported_hardware.output
@@ -0,0 +1,505 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+<key>is_delta</key><true/>
+<key>elapsed_ns</key><integer>57758028</integer>
+<key>hw_model</key><string>MacBookPro5,4</string>
+<key>kern_osversion</key><string>13B42</string>
+<key>timestamp</key><date>2014-01-26T03:58:59Z</date>
+<key>tasks</key>
+<array>
+<dict>
+<key>pid</key><integer>6555</integer>
+<key>name</key><string>Google Chrome He</string>
+<key>started_ns</key><integer>284136698238000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>35244306</integer>
+<key>cputime_ms_per_s</key><real>610.206</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.889658</real>
+<key>intr_wakeups</key><integer>3</integer>
+<key>intr_wakeups_per_s</key><real>51.9408</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>6546</integer>
+<key>name</key><string>Google Chrome He</string>
+<key>started_ns</key><integer>284134682369000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>32548397</integer>
+<key>cputime_ms_per_s</key><real>563.53</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.966536</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>6544</integer>
+<key>name</key><string>Google Chrome Ca</string>
+<key>started_ns</key><integer>284134369378000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>8773988</integer>
+<key>cputime_ms_per_s</key><real>151.909</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.78863</real>
+<key>intr_wakeups</key><integer>3</integer>
+<key>intr_wakeups_per_s</key><real>51.9408</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>6543</integer>
+<key>name</key><string>Python</string>
+<key>started_ns</key><integer>284133951261000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>5664752</integer>
+<key>cputime_ms_per_s</key><real>98.0773</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.813448</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>6556</integer>
+<key>name</key><string>powermetrics</string>
+<key>started_ns</key><integer>284137559665000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>5653312</integer>
+<key>cputime_ms_per_s</key><real>97.8792</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.275642</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>17.3136</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>4658</integer>
+<key>name</key><string>powermetrics</string>
+<key>started_ns</key><integer>283169551318000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>4295690</integer>
+<key>cputime_ms_per_s</key><real>74.3739</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.255386</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>17.3136</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>923</integer>
+<key>name</key><string>Google Chrome</string>
+<key>started_ns</key><integer>159793249000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>3993669</integer>
+<key>cputime_ms_per_s</key><real>69.1448</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.885849</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>34.6272</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>939</integer>
+<key>name</key><string>coreaudiod</string>
+<key>started_ns</key><integer>160547555000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>3831594</integer>
+<key>cputime_ms_per_s</key><real>66.3387</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.111444</real>
+<key>intr_wakeups</key><integer>20</integer>
+<key>intr_wakeups_per_s</key><real>346.272</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>20</integer>
+  <key>wakeups_per_s</key><real>346.272</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>171</integer>
+<key>name</key><string>WindowServer</string>
+<key>started_ns</key><integer>80236634000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2596609</integer>
+<key>cputime_ms_per_s</key><real>44.9567</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.7211</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>17.3136</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>92729</integer>
+<key>name</key><string>Google Chrome He</string>
+<key>started_ns</key><integer>280934551284000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2189391</integer>
+<key>cputime_ms_per_s</key><real>37.9063</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.859925</real>
+<key>intr_wakeups</key><integer>9</integer>
+<key>intr_wakeups_per_s</key><real>155.822</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>0</integer>
+<key>name</key><string>kernel_task</string>
+<key>started_ns</key><integer>5162903000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>1223535</integer>
+<key>cputime_ms_per_s</key><real>21.1838</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0</real>
+<key>intr_wakeups</key><integer>19</integer>
+<key>intr_wakeups_per_s</key><real>328.959</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>60</integer>
+<key>name</key><string>mds</string>
+<key>started_ns</key><integer>71568427000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>33490</integer>
+<key>cputime_ms_per_s</key><real>0.579833</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.718901</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>14230</integer>
+<key>name</key><string>CIJScannerRegist</string>
+<key>started_ns</key><integer>17913578888000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>28198</integer>
+<key>cputime_ms_per_s</key><real>0.488209</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.271189</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>34.6272</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>92766</integer>
+<key>name</key><string>GoogleTalkPlugin</string>
+<key>started_ns</key><integer>281459670930000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>22858</integer>
+<key>cputime_ms_per_s</key><real>0.395755</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.442558</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>17.3136</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+<key>all_tasks</key>
+<dict>
+<key>pid</key><integer>-2</integer>
+<key>name</key><string>ALL_TASKS</string>
+<key>started_ns</key><integer>0</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>106099789</integer>
+<key>cputime_ms_per_s</key><real>1836.97</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.798862</real>
+<key>intr_wakeups</key><integer>62</integer>
+<key>intr_wakeups_per_s</key><real>1073.44</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>20</integer>
+  <key>wakeups_per_s</key><real>346.272</real>
+</dict>
+</array>
+</dict>
+<key>backlight</key>
+<dict>
+<key>value</key><integer>412</integer>
+<key>min</key><integer>0</integer>
+<key>max</key><integer>1024</integer>
+</dict>
+<key>network</key>
+<dict>
+<key>opackets</key><integer>24</integer>
+<key>opacket_rate</key><real>415.527</real>
+<key>ipackets</key><integer>24</integer>
+<key>ipacket_rate</key><real>415.527</real>
+<key>obytes</key><integer>7244</integer>
+<key>obyte_rate</key><real>125420</real>
+<key>ibytes</key><integer>7244</integer>
+<key>ibyte_rate</key><real>125420</real>
+</dict>
+<key>disk</key>
+<dict>
+<key>rops_diff</key><integer>0</integer>
+<key>rops_per_s</key><real>0</real>
+<key>wops_diff</key><integer>0</integer>
+<key>wops_per_s</key><real>0</real>
+<key>rbytes_diff</key><integer>0</integer>
+<key>rbytes_per_s</key><real>0</real>
+<key>wbytes_diff</key><integer>0</integer>
+<key>wbytes_per_s</key><real>0</real>
+</dict>
+<key>interrupts</key>
+<array>
+<dict>
+<key>cpu</key><integer>0</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>HPET</string>
+<key>vector</key><integer>66</integer>
+<key>events</key><integer>2</integer>
+<key>events_per_s</key><real>34.6272</real>
+</dict>
+<dict>
+<key>name</key><string>IGPU</string>
+<key>vector</key><integer>146</integer>
+<key>events</key><integer>2</integer>
+<key>events_per_s</key><real>34.6272</real>
+</dict>
+<dict>
+<key>name</key><string>ARPT</string>
+<key>vector</key><integer>148</integer>
+<key>events</key><integer>2</integer>
+<key>events_per_s</key><real>34.6272</real>
+</dict>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>43</integer>
+<key>events_per_s</key><real>744.485</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>47</integer>
+<key>events_per_s</key><real>813.74</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>1</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>15</integer>
+<key>events_per_s</key><real>259.704</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>56</integer>
+<key>events_per_s</key><real>969.562</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+</plist>
diff --git a/catapult/telemetry/telemetry/internal/testing/powermetrics_vmware.output b/catapult/telemetry/telemetry/internal/testing/powermetrics_vmware.output
new file mode 100644
index 0000000..8cf3b8d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/powermetrics_vmware.output
@@ -0,0 +1,2889 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+<key>is_delta</key><false/>
+<key>elapsed_ns</key><integer>2791294949</integer>
+<key>hw_model</key><string>VMware7,1</string>
+<key>kern_osversion</key><string>13F34</string>
+<key>timestamp</key><date>2014-12-09T13:48:10Z</date>
+<key>tasks</key>
+<array>
+<dict>
+<key>pid</key><integer>8283</integer>
+<key>name</key><string>Python</string>
+<key>started_ns</key><integer>258047690000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2785827455</integer>
+<key>cputime_ms_per_s</key><real>998.698</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.907007</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>0</integer>
+<key>name</key><string>kernel_task</string>
+<key>started_ns</key><integer>252926000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>22377453</integer>
+<key>cputime_ms_per_s</key><real>8.02215</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0</real>
+<key>intr_wakeups</key><integer>393</integer>
+<key>intr_wakeups_per_s</key><real>140.888</real>
+<key>idle_wakeups</key><integer>142</integer>
+<key>idle_wakeups_per_s</key><real>50.9059</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>124</integer>
+<key>name</key><string>grr</string>
+<key>started_ns</key><integer>8507377000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2568839</integer>
+<key>cputime_ms_per_s</key><real>0.92091</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.718355</real>
+<key>intr_wakeups</key><integer>67</integer>
+<key>intr_wakeups_per_s</key><real>24.019</real>
+<key>idle_wakeups</key><integer>52</integer>
+<key>idle_wakeups_per_s</key><real>18.6416</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>2</integer>
+  <key>wakeups_per_s</key><real>0.716985</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>1</integer>
+  <key>wakeups_per_s</key><real>0.358493</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>116</integer>
+<key>name</key><string>vmware-tools-dae</string>
+<key>started_ns</key><integer>8411762000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>3273185</integer>
+<key>cputime_ms_per_s</key><real>1.17341</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.449496</real>
+<key>intr_wakeups</key><integer>31</integer>
+<key>intr_wakeups_per_s</key><real>11.1133</real>
+<key>idle_wakeups</key><integer>22</integer>
+<key>idle_wakeups_per_s</key><real>7.88684</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>267</integer>
+<key>name</key><string>vmware-tools-dae</string>
+<key>started_ns</key><integer>73620941000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>3680193</integer>
+<key>cputime_ms_per_s</key><real>1.31932</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.437701</real>
+<key>intr_wakeups</key><integer>32</integer>
+<key>intr_wakeups_per_s</key><real>11.4718</real>
+<key>idle_wakeups</key><integer>9</integer>
+<key>idle_wakeups_per_s</key><real>3.22643</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>90</integer>
+<key>name</key><string>mds</string>
+<key>started_ns</key><integer>8404018000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>4231481</integer>
+<key>cputime_ms_per_s</key><real>1.51695</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.422196</real>
+<key>intr_wakeups</key><integer>12</integer>
+<key>intr_wakeups_per_s</key><real>4.30191</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>47</integer>
+<key>name</key><string>fseventsd</string>
+<key>started_ns</key><integer>5848136000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>3609311</integer>
+<key>cputime_ms_per_s</key><real>1.29391</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.425773</real>
+<key>intr_wakeups</key><integer>16</integer>
+<key>intr_wakeups_per_s</key><real>5.73588</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>69</integer>
+<key>name</key><string>netbiosd</string>
+<key>started_ns</key><integer>8370786000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2217146</integer>
+<key>cputime_ms_per_s</key><real>0.79483</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.319186</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>8289</integer>
+<key>name</key><string>powermetrics</string>
+<key>started_ns</key><integer>266634178000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>1857249</integer>
+<key>cputime_ms_per_s</key><real>0.66581</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.244225</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>230</integer>
+<key>name</key><string>Finder</string>
+<key>started_ns</key><integer>72342540000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>1460335</integer>
+<key>cputime_ms_per_s</key><real>0.523519</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.403851</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>50</integer>
+<key>name</key><string>mDNSResponder</string>
+<key>started_ns</key><integer>5850938000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>1156791</integer>
+<key>cputime_ms_per_s</key><real>0.414701</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.499696</real>
+<key>intr_wakeups</key><integer>8</integer>
+<key>intr_wakeups_per_s</key><real>2.86794</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>128</integer>
+<key>name</key><string>nrpe</string>
+<key>started_ns</key><integer>8596070000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>158225</integer>
+<key>cputime_ms_per_s</key><real>0.0567225</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.148194</real>
+<key>intr_wakeups</key><integer>5</integer>
+<key>intr_wakeups_per_s</key><real>1.79246</real>
+<key>idle_wakeups</key><integer>4</integer>
+<key>idle_wakeups_per_s</key><real>1.43397</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>26</integer>
+<key>name</key><string>cfprefsd</string>
+<key>started_ns</key><integer>5283770000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>357267</integer>
+<key>cputime_ms_per_s</key><real>0.128078</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.16081</real>
+<key>intr_wakeups</key><integer>3</integer>
+<key>intr_wakeups_per_s</key><real>1.07548</real>
+<key>idle_wakeups</key><integer>3</integer>
+<key>idle_wakeups_per_s</key><real>1.07548</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>72</integer>
+<key>name</key><string>snmpd</string>
+<key>started_ns</key><integer>8399046000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>254431</integer>
+<key>cputime_ms_per_s</key><real>0.0912116</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.289497</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>87</integer>
+<key>name</key><string>pacemaker</string>
+<key>started_ns</key><integer>8403252000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>234206</integer>
+<key>cputime_ms_per_s</key><real>0.0839611</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.310556</real>
+<key>intr_wakeups</key><integer>3</integer>
+<key>intr_wakeups_per_s</key><real>1.07548</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>95</integer>
+<key>name</key><string>locationd</string>
+<key>started_ns</key><integer>8405499000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>161023</integer>
+<key>cputime_ms_per_s</key><real>0.0577255</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.196748</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>73</integer>
+<key>name</key><string>httpd</string>
+<key>started_ns</key><integer>8399290000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>65001</integer>
+<key>cputime_ms_per_s</key><real>0.0233024</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.298288</real>
+<key>intr_wakeups</key><integer>3</integer>
+<key>intr_wakeups_per_s</key><real>1.07548</real>
+<key>idle_wakeups</key><integer>1</integer>
+<key>idle_wakeups_per_s</key><real>0.358493</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>117</integer>
+<key>name</key><string>ruby</string>
+<key>started_ns</key><integer>8412025000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>250926</integer>
+<key>cputime_ms_per_s</key><real>0.0899551</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.789161</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>14</integer>
+<key>name</key><string>kextd</string>
+<key>started_ns</key><integer>3391443000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>239039</integer>
+<key>cputime_ms_per_s</key><real>0.0856937</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.502717</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>20</integer>
+<key>name</key><string>configd</string>
+<key>started_ns</key><integer>3427308000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>209575</integer>
+<key>cputime_ms_per_s</key><real>0.0751311</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.266702</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>93</integer>
+<key>name</key><string>loginwindow</string>
+<key>started_ns</key><integer>8404920000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>209443</integer>
+<key>cputime_ms_per_s</key><real>0.0750838</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.213891</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>272</integer>
+<key>name</key><string>com.apple.ShareK</string>
+<key>started_ns</key><integer>73682045000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>190765</integer>
+<key>cputime_ms_per_s</key><real>0.0683878</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.183666</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>8248</integer>
+<key>name</key><string>sshd</string>
+<key>started_ns</key><integer>203501449000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>186285</integer>
+<key>cputime_ms_per_s</key><real>0.0667818</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.258147</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>205</integer>
+<key>name</key><string>UserEventAgent</string>
+<key>started_ns</key><integer>12002515000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>166112</integer>
+<key>cputime_ms_per_s</key><real>0.0595499</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.258067</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>260</integer>
+<key>name</key><string>identityservices</string>
+<key>started_ns</key><integer>73617898000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>162945</integer>
+<key>cputime_ms_per_s</key><real>0.0584146</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.153935</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>17</integer>
+<key>name</key><string>notifyd</string>
+<key>started_ns</key><integer>3409006000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>155892</integer>
+<key>cputime_ms_per_s</key><real>0.0558861</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.244599</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>245</integer>
+<key>name</key><string>AirPlayUIAgent</string>
+<key>started_ns</key><integer>72643963000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>143964</integer>
+<key>cputime_ms_per_s</key><real>0.05161</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.296227</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>249</integer>
+<key>name</key><string>NotificationCent</string>
+<key>started_ns</key><integer>73038119000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>138477</integer>
+<key>cputime_ms_per_s</key><real>0.049643</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.175401</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>19</integer>
+<key>name</key><string>diskarbitrationd</string>
+<key>started_ns</key><integer>3425697000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>133407</integer>
+<key>cputime_ms_per_s</key><real>0.0478254</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.293141</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>112</integer>
+<key>name</key><string>apsd</string>
+<key>started_ns</key><integer>8410568000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>133265</integer>
+<key>cputime_ms_per_s</key><real>0.0477745</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.188587</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>254</integer>
+<key>name</key><string>ReportPanic</string>
+<key>started_ns</key><integer>73615217000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>128367</integer>
+<key>cputime_ms_per_s</key><real>0.0460186</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.208917</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>227</integer>
+<key>name</key><string>Dock</string>
+<key>started_ns</key><integer>72337119000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>126905</integer>
+<key>cputime_ms_per_s</key><real>0.0454945</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.271258</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>18</integer>
+<key>name</key><string>powerd</string>
+<key>started_ns</key><integer>3420877000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>125879</integer>
+<key>cputime_ms_per_s</key><real>0.0451267</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.211783</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>250</integer>
+<key>name</key><string>com.apple.dock.e</string>
+<key>started_ns</key><integer>73506698000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>122322</integer>
+<key>cputime_ms_per_s</key><real>0.0438515</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.187816</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>22</integer>
+<key>name</key><string>syslogd</string>
+<key>started_ns</key><integer>3438979000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>121278</integer>
+<key>cputime_ms_per_s</key><real>0.0434773</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.240629</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>111</integer>
+<key>name</key><string>autofsd</string>
+<key>started_ns</key><integer>8410315000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>117402</integer>
+<key>cputime_ms_per_s</key><real>0.0420877</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.186803</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>99</integer>
+<key>name</key><string>hidd</string>
+<key>started_ns</key><integer>8406630000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>116557</integer>
+<key>cputime_ms_per_s</key><real>0.0417848</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.206037</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>248</integer>
+<key>name</key><string>ARDAgent</string>
+<key>started_ns</key><integer>72729660000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>116430</integer>
+<key>cputime_ms_per_s</key><real>0.0417393</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.216465</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>80</integer>
+<key>name</key><string>stackshot</string>
+<key>started_ns</key><integer>8401291000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>115387</integer>
+<key>cputime_ms_per_s</key><real>0.0413654</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.276002</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>139</integer>
+<key>name</key><string>rpcsvchost</string>
+<key>started_ns</key><integer>8709470000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>112551</integer>
+<key>cputime_ms_per_s</key><real>0.0403487</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.273076</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>130</integer>
+<key>name</key><string>WindowServer</string>
+<key>started_ns</key><integer>8647186000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>110836</integer>
+<key>cputime_ms_per_s</key><real>0.0397339</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.239417</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>522</integer>
+<key>name</key><string>suhelperd</string>
+<key>started_ns</key><integer>83826131000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>106108</integer>
+<key>cputime_ms_per_s</key><real>0.0380389</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.180627</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>114</integer>
+<key>name</key><string>airportd</string>
+<key>started_ns</key><integer>8411228000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>105857</integer>
+<key>cputime_ms_per_s</key><real>0.0379489</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.192524</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>261</integer>
+<key>name</key><string>helpd</string>
+<key>started_ns</key><integer>73618414000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>105836</integer>
+<key>cputime_ms_per_s</key><real>0.0379414</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.278535</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>520</integer>
+<key>name</key><string>softwareupdated</string>
+<key>started_ns</key><integer>83764815000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>105122</integer>
+<key>cputime_ms_per_s</key><real>0.0376855</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.205019</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>259</integer>
+<key>name</key><string>imagent</string>
+<key>started_ns</key><integer>73617455000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>103551</integer>
+<key>cputime_ms_per_s</key><real>0.0371223</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.18247</real>
+<key>intr_wakeups</key><integer>2</integer>
+<key>intr_wakeups_per_s</key><real>0.716985</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>211</integer>
+<key>name</key><string>fontd</string>
+<key>started_ns</key><integer>12043121000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>101409</integer>
+<key>cputime_ms_per_s</key><real>0.0363544</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.240639</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>77</integer>
+<key>name</key><string>usbmuxd</string>
+<key>started_ns</key><integer>8400477000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>99736</integer>
+<key>cputime_ms_per_s</key><real>0.0357546</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.236936</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>115</integer>
+<key>name</key><string>daemondo</string>
+<key>started_ns</key><integer>8411524000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>92755</integer>
+<key>cputime_ms_per_s</key><real>0.033252</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.290475</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>231</integer>
+<key>name</key><string>coreaudiod</string>
+<key>started_ns</key><integer>72346216000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>89762</integer>
+<key>cputime_ms_per_s</key><real>0.032179</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.28248</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>49</integer>
+<key>name</key><string>networkd</string>
+<key>started_ns</key><integer>5850133000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>89467</integer>
+<key>cputime_ms_per_s</key><real>0.0320733</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.232935</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>160</integer>
+<key>name</key><string>ocspd</string>
+<key>started_ns</key><integer>10495124000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>87848</integer>
+<key>cputime_ms_per_s</key><real>0.0314929</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.239163</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>23</integer>
+<key>name</key><string>opendirectoryd</string>
+<key>started_ns</key><integer>3453740000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>87217</integer>
+<key>cputime_ms_per_s</key><real>0.0312666</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.202231</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>113</integer>
+<key>name</key><string>aosnotifyd</string>
+<key>started_ns</key><integer>8410931000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>86484</integer>
+<key>cputime_ms_per_s</key><real>0.0310039</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.280919</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>273</integer>
+<key>name</key><string>secd</string>
+<key>started_ns</key><integer>73714612000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>86365</integer>
+<key>cputime_ms_per_s</key><real>0.0309612</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.303746</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>218</integer>
+<key>name</key><string>CalendarAgent</string>
+<key>started_ns</key><integer>12364928000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>86062</integer>
+<key>cputime_ms_per_s</key><real>0.0308526</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.260138</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>251</integer>
+<key>name</key><string>WiFiKeychainProx</string>
+<key>started_ns</key><integer>73614011000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>82732</integer>
+<key>cputime_ms_per_s</key><real>0.0296588</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.207731</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>8288</integer>
+<key>name</key><string>sudo</string>
+<key>started_ns</key><integer>266624234000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>78962</integer>
+<key>cputime_ms_per_s</key><real>0.0283073</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.223145</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>237</integer>
+<key>name</key><string>ubd</string>
+<key>started_ns</key><integer>72509873000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>76577</integer>
+<key>cputime_ms_per_s</key><real>0.0274523</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.324732</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>97</integer>
+<key>name</key><string>kdc</string>
+<key>started_ns</key><integer>8406021000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>70459</integer>
+<key>cputime_ms_per_s</key><real>0.025259</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.264281</real>
+<key>intr_wakeups</key><integer>1</integer>
+<key>intr_wakeups_per_s</key><real>0.358493</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>pid</key><integer>638</integer>
+<key>name</key><string>Python</string>
+<key>started_ns</key><integer>104013927000</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>67435</integer>
+<key>cputime_ms_per_s</key><real>0.0241749</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.235634</real>
+<key>intr_wakeups</key><integer>0</integer>
+<key>intr_wakeups_per_s</key><real>0</real>
+<key>idle_wakeups</key><integer>0</integer>
+<key>idle_wakeups_per_s</key><real>0</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>0</integer>
+  <key>wakeups_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+<key>all_tasks</key>
+warning: time went backwards by 1418132621000000000 ns
+<dict>
+<key>pid</key><integer>-2</integer>
+<key>name</key><string>ALL_TASKS</string>
+<key>started_ns</key><integer>0</integer>
+<key>started_ns_reliable</key><false/>
+<key>cputime_ns</key><integer>2838933344</integer>
+<key>cputime_ms_per_s</key><real>1017.07</real>
+<key>cputime_ms_per_s_reliable</key><true/>
+<key>cputime_userland_ratio</key><real>0.894385</real>
+<key>intr_wakeups</key><integer>623</integer>
+<key>intr_wakeups_per_s</key><real>223.194</real>
+<key>idle_wakeups</key><integer>239</integer>
+<key>idle_wakeups_per_s</key><real>85.6233</real>
+<key>timer_wakeups</key>
+<array>
+<dict>
+  <key>interval_ns</key><integer>2000000</integer>
+  <key>wakeups</key><integer>2</integer>
+  <key>wakeups_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+  <key>interval_ns</key><integer>5000000</integer>
+  <key>wakeups</key><integer>1</integer>
+  <key>wakeups_per_s</key><real>0.358257</real>
+</dict>
+</array>
+</dict>
+<key>network</key>
+<dict>
+<key>opackets</key><integer>2</integer>
+<key>opacket_rate</key><real>0.716513</real>
+<key>ipackets</key><integer>41</integer>
+<key>ipacket_rate</key><real>14.6885</real>
+<key>obytes</key><integer>168</integer>
+<key>obyte_rate</key><real>60.1871</real>
+<key>ibytes</key><integer>6304</integer>
+<key>ibyte_rate</key><real>2258.45</real>
+</dict>
+<key>disk</key>
+<dict>
+<key>rops_diff</key><integer>1</integer>
+<key>rops_per_s</key><real>0.358257</real>
+<key>wops_diff</key><integer>251</integer>
+<key>wops_per_s</key><real>89.9224</real>
+<key>rbytes_diff</key><integer>4096</integer>
+<key>rbytes_per_s</key><real>1467.42</real>
+<key>wbytes_diff</key><integer>65906688</integer>
+<key>wbytes_per_s</key><real>2.36115e+07</real>
+</dict>
+<key>interrupts</key>
+<array>
+<dict>
+<key>cpu</key><integer>0</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>CHN1</string>
+<key>vector</key><integer>79</integer>
+<key>events</key><integer>9</integer>
+<key>events_per_s</key><real>3.22431</real>
+</dict>
+<dict>
+<key>name</key><string>scsi</string>
+<key>vector</key><integer>81</integer>
+<key>events</key><integer>239</integer>
+<key>events_per_s</key><real>85.6233</real>
+</dict>
+<dict>
+<key>name</key><string>S2F0</string>
+<key>vector</key><integer>83</integer>
+<key>events</key><integer>34</integer>
+<key>events_per_s</key><real>12.1807</real>
+</dict>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>108</integer>
+<key>events_per_s</key><real>38.6917</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>92</integer>
+<key>events_per_s</key><real>32.9596</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>1</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>126</integer>
+<key>events_per_s</key><real>45.1403</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>183</integer>
+<key>events_per_s</key><real>65.561</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>2</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>41</integer>
+<key>events_per_s</key><real>14.6885</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>152</integer>
+<key>events_per_s</key><real>54.455</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>3</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>97</integer>
+<key>events_per_s</key><real>34.7509</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>72</integer>
+<key>events_per_s</key><real>25.7945</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>4</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>10</integer>
+<key>events_per_s</key><real>3.58257</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>104</integer>
+<key>events_per_s</key><real>37.2587</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>5</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>94</integer>
+<key>events_per_s</key><real>33.6761</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>140</integer>
+<key>events_per_s</key><real>50.1559</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>6</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>53</integer>
+<key>events_per_s</key><real>18.9876</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>150</integer>
+<key>events_per_s</key><real>53.7385</real>
+</dict>
+</array>
+</dict>
+<dict>
+<key>cpu</key><integer>7</integer>
+<key>vectors</key>
+<array>
+<dict>
+<key>name</key><string>TMR</string>
+<key>vector</key><integer>221</integer>
+<key>events</key><integer>1</integer>
+<key>events_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>name</key><string>IPI</string>
+<key>vector</key><integer>222</integer>
+<key>events</key><integer>28</integer>
+<key>events_per_s</key><real>10.0312</real>
+</dict>
+</array>
+</dict>
+</array>
+<key>processor</key>
+<dict>
+<key>llc_flushed_ratio</key><real>0</real>
+<key>packages</key>
+<array>
+<dict>
+<key>package</key><integer>0</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C2</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cores</key>
+<array>
+<dict>
+<key>core</key><integer>0</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>0</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>162</integer>
+<key>active_per_s</key><real>58.0376</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>58.0376</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>6</integer>
+<key>active_per_s</key><real>2.14954</real>
+<key>idle_count</key><integer>15</integer>
+<key>idle_per_s</key><real>2.14954</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>28</integer>
+<key>active_per_s</key><real>10.0312</real>
+<key>idle_count</key><integer>104</integer>
+<key>idle_per_s</key><real>10.0312</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>62</integer>
+<key>active_per_s</key><real>22.2119</real>
+<key>idle_count</key><integer>8</integer>
+<key>idle_per_s</key><real>22.2119</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>23</integer>
+<key>active_per_s</key><real>8.2399</real>
+<key>idle_count</key><integer>5</integer>
+<key>idle_per_s</key><real>8.2399</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>2</integer>
+<key>active_per_s</key><real>0.716513</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>19</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>20</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>31</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>29</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>26</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>1</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>1</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>98</integer>
+<key>active_per_s</key><real>35.1092</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>35.1092</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>24</integer>
+<key>active_per_s</key><real>8.59816</real>
+<key>idle_count</key><integer>21</integer>
+<key>idle_per_s</key><real>8.59816</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>51</integer>
+<key>active_per_s</key><real>18.2711</real>
+<key>idle_count</key><integer>43</integer>
+<key>idle_per_s</key><real>18.2711</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>34</integer>
+<key>active_per_s</key><real>12.1807</real>
+<key>idle_count</key><integer>12</integer>
+<key>idle_per_s</key><real>12.1807</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>13</integer>
+<key>active_per_s</key><real>4.65734</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>4.65734</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>3</integer>
+<key>active_per_s</key><real>1.07477</real>
+<key>idle_count</key><integer>5</integer>
+<key>idle_per_s</key><real>1.07477</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>2</integer>
+<key>active_per_s</key><real>0.716513</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>19</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>21</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>25</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>25</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>29</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>2</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>2</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>79</integer>
+<key>active_per_s</key><real>28.3023</real>
+<key>idle_count</key><integer>5</integer>
+<key>idle_per_s</key><real>28.3023</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>25</integer>
+<key>active_per_s</key><real>8.95642</real>
+<key>idle_count</key><integer>29</integer>
+<key>idle_per_s</key><real>8.95642</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>39</integer>
+<key>active_per_s</key><real>13.972</real>
+<key>idle_count</key><integer>17</integer>
+<key>idle_per_s</key><real>13.972</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>26</integer>
+<key>active_per_s</key><real>9.31467</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>9.31467</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>11</integer>
+<key>active_per_s</key><real>3.94082</real>
+<key>idle_count</key><integer>7</integer>
+<key>idle_per_s</key><real>3.94082</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>9</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>12</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>14</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>26</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>26</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>3</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>3</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>37</integer>
+<key>active_per_s</key><real>13.2555</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>13.2555</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>9</integer>
+<key>active_per_s</key><real>3.22431</real>
+<key>idle_count</key><integer>12</integer>
+<key>idle_per_s</key><real>3.22431</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>15</integer>
+<key>active_per_s</key><real>5.37385</real>
+<key>idle_count</key><integer>14</integer>
+<key>idle_per_s</key><real>5.37385</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>11</integer>
+<key>active_per_s</key><real>3.94082</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>3.94082</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>5</integer>
+<key>active_per_s</key><real>1.79128</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>1.79128</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>10</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>11</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>package</key><integer>1</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C2</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C8</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C9</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C10</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cores</key>
+<array>
+<dict>
+<key>core</key><integer>4</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>4</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>23</integer>
+<key>active_per_s</key><real>8.2399</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>8.2399</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>10</integer>
+<key>active_per_s</key><real>3.58257</real>
+<key>idle_count</key><integer>21</integer>
+<key>idle_per_s</key><real>3.58257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>30</integer>
+<key>active_per_s</key><real>10.7477</real>
+<key>idle_count</key><integer>13</integer>
+<key>idle_per_s</key><real>10.7477</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>22</integer>
+<key>active_per_s</key><real>7.88165</real>
+<key>idle_count</key><integer>5</integer>
+<key>idle_per_s</key><real>7.88165</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>9</integer>
+<key>active_per_s</key><real>3.22431</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>3.22431</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>8</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>11</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>5</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>5</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>5</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>97</integer>
+<key>active_per_s</key><real>34.7509</real>
+<key>idle_count</key><integer>3</integer>
+<key>idle_per_s</key><real>34.7509</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>20</integer>
+<key>active_per_s</key><real>7.16513</real>
+<key>idle_count</key><integer>60</integer>
+<key>idle_per_s</key><real>7.16513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>18</integer>
+<key>active_per_s</key><real>6.44862</real>
+<key>idle_count</key><integer>38</integer>
+<key>idle_per_s</key><real>6.44862</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>9</integer>
+<key>active_per_s</key><real>3.22431</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>3.22431</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>2</integer>
+<key>active_per_s</key><real>0.716513</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>4</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>6</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>6</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>78</integer>
+<key>active_per_s</key><real>27.944</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>27.944</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>40</integer>
+<key>active_per_s</key><real>14.3303</real>
+<key>idle_count</key><integer>29</integer>
+<key>idle_per_s</key><real>14.3303</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>31</integer>
+<key>active_per_s</key><real>11.106</real>
+<key>idle_count</key><integer>36</integer>
+<key>idle_per_s</key><real>11.106</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>31</integer>
+<key>active_per_s</key><real>11.106</real>
+<key>idle_count</key><integer>7</integer>
+<key>idle_per_s</key><real>11.106</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>2</integer>
+<key>active_per_s</key><real>0.716513</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>6</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>10</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>9</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>21</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>18</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+<dict>
+<key>core</key><integer>7</integer>
+<key>c_state_ns</key><integer>0</integer>
+<key>c_state_ratio</key><real>0</real>
+<key>c_states</key>
+<array>
+<dict>
+<key>name</key><string>C3</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C6</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+<dict>
+<key>name</key><string>C7</string>
+<key>used_ns</key><integer>0</integer>
+<key>used_ratio</key><real>0</real>
+</dict>
+</array>
+<key>cpus</key>
+<array>
+<dict>
+<key>cpu</key><integer>7</integer>
+<key>duty_cycles</key>
+<array>
+<dict>
+<key>interval_ns</key><integer>16000</integer>
+<key>active_count</key><integer>11</integer>
+<key>active_per_s</key><real>3.94082</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>3.94082</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>64000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>2</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>128000</integer>
+<key>active_count</key><integer>2</integer>
+<key>active_per_s</key><real>0.716513</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0.716513</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>256000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>512000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>1024000</integer>
+<key>active_count</key><integer>1</integer>
+<key>active_per_s</key><real>0.358257</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0.358257</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>2048000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>4096000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>8192000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>16384000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>0</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+<dict>
+<key>interval_ns</key><integer>32768000</integer>
+<key>active_count</key><integer>0</integer>
+<key>active_per_s</key><real>0</real>
+<key>idle_count</key><integer>1</integer>
+<key>idle_per_s</key><real>0</real>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+</array>
+</dict>
+</dict>
+</plist>
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/proc_meminfo b/catapult/telemetry/telemetry/internal/testing/proc_meminfo
new file mode 100644
index 0000000..600101a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/proc_meminfo
@@ -0,0 +1,43 @@
+MemTotal:       65897648 kB
+MemFree:         1084568 kB
+Buffers:         1221540 kB
+Cached:         38339400 kB
+SwapCached:       119564 kB
+Active:         38582600 kB
+Inactive:       20341624 kB
+Active(anon):   17113656 kB
+Inactive(anon):  2566996 kB
+Active(file):   21468944 kB
+Inactive(file): 17774628 kB
+Unevictable:       27280 kB
+Mlocked:           27280 kB
+SwapTotal:       1048572 kB
+SwapFree:              8 kB
+Dirty:               452 kB
+Writeback:             0 kB
+AnonPages:      19271176 kB
+Mapped:           724456 kB
+Shmem:            290412 kB
+Slab:            5047568 kB
+SReclaimable:    4773516 kB
+SUnreclaim:       274052 kB
+KernelStack:       21824 kB
+PageTables:       133852 kB
+NFS_Unstable:          0 kB
+Bounce:                0 kB
+WritebackTmp:          0 kB
+CommitLimit:    33997396 kB
+Committed_AS:   36218868 kB
+VmallocTotal:   34359738367 kB
+VmallocUsed:      338372 kB
+VmallocChunk:   34325659540 kB
+HardwareCorrupted:     0 kB
+AnonHugePages:   9410560 kB
+HugePages_Total:       0
+HugePages_Free:        0
+HugePages_Rsvd:        0
+HugePages_Surp:        0
+Hugepagesize:       2048 kB
+DirectMap4k:     6369212 kB
+DirectMap2M:    58564608 kB
+DirectMap1G:     2097152 kB
diff --git a/catapult/telemetry/telemetry/internal/testing/sample_perf_report_output.txt b/catapult/telemetry/telemetry/internal/testing/sample_perf_report_output.txt
new file mode 100644
index 0000000..2f7dd93
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/sample_perf_report_output.txt
@@ -0,0 +1,10 @@
+Failed to open /data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so, continuing without symbols
+Failed to open /data/dalvik-cache/arm/data@app@com.google.android.apps.chrome-2.apk@classes.dex, continuing without symbols
+No kallsyms or vmlinux with build-id 3b63ca692cbb756f837744e061d02a790bf637d5 was found
+[kernel.kallsyms] with build id 3b63ca692cbb756f837744e061d02a790bf637d5 not found, continuing without symbols
+Failed to open /system/lib/libc.so, continuing without symbols
+Failed to open /system/lib/libm.so, continuing without symbols
+Failed to open /tmp/perf-18246.map, continuing without symbols
+Failed to open /system/lib/libart.so, continuing without symbols
+Failed to open /init, continuing without symbols
+Failed to open [vectors], continuing without symbols
diff --git a/catapult/telemetry/telemetry/internal/testing/sample_vtune_db_output b/catapult/telemetry/telemetry/internal/testing/sample_vtune_db_output
new file mode 100644
index 0000000..71c6eaa
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/sample_vtune_db_output
@@ -0,0 +1,316 @@
+(lp0
+(V/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so
+p1
+V/tmp/amplxe-tmp-dominikg/modules.android/libchrome.2019.0.so/00a9b3533743e080ab94e760bc891ab8/libchrome.2019.0.so
+p2
+tp3
+a(V/system/bin/app_process
+p4
+Ntp5
+a(V/system/bin/linker
+p6
+Ntp7
+a(V/system/lib/libcutils.so
+p8
+Ntp9
+a(V/system/lib/liblog.so
+p10
+Ntp11
+a(V/system/lib/libc.so
+p12
+V/tmp/amplxe-tmp-dominikg/modules.android/libc.so/abc82d050be774f342adeb7e91fe5a0a/libc.so
+p13
+tp14
+a(V/system/lib/libstdc++.so
+p15
+Ntp16
+a(V/system/lib/libm.so
+p17
+V/tmp/amplxe-tmp-dominikg/modules.android/libm.so/55c87bc955f6c7bf84cce8522fb1e88b/libm.so
+p18
+tp19
+a(V/system/lib/libutils.so
+p20
+Ntp21
+a(V/system/lib/libcorkscrew.so
+p22
+Ntp23
+a(V/system/lib/libgccdemangle.so
+p24
+Ntp25
+a(V/system/lib/libz.so
+p26
+Ntp27
+a(V/system/lib/libbinder.so
+p28
+Ntp29
+a(V/system/lib/libandroid_runtime.so
+p30
+Ntp31
+a(V/system/lib/libandroidfw.so
+p32
+Ntp33
+a(V/system/lib/libskia.so
+p34
+Ntp35
+a(V/system/lib/libemoji.so
+p36
+Ntp37
+a(V/system/lib/libjpeg.so
+p38
+Ntp39
+a(V/system/lib/libexpat.so
+p40
+Ntp41
+a(V/system/lib/libEGL.so
+p42
+Ntp43
+a(V/system/lib/libGLES_trace.so
+p44
+Ntp45
+a(V/system/lib/libstlport.so
+p46
+Ntp47
+a(V/system/lib/libGLESv2.so
+p48
+Ntp49
+a(V/system/lib/libnativehelper.so
+p50
+Ntp51
+a(V/system/lib/libnetutils.so
+p52
+Ntp53
+a(V/system/lib/libui.so
+p54
+Ntp55
+a(V/system/lib/libhardware.so
+p56
+Ntp57
+a(V/system/lib/libsync.so
+p58
+Ntp59
+a(V/system/lib/libgui.so
+p60
+Ntp61
+a(V/system/lib/libcamera_client.so
+p62
+Ntp63
+a(V/system/lib/libcamera_metadata.so
+p64
+Ntp65
+a(V/system/lib/libsqlite.so
+p66
+Ntp67
+a(V/system/lib/libicuuc.so
+p68
+Ntp69
+a(V/system/lib/libgabi++.so
+p70
+Ntp71
+a(V/system/lib/libicui18n.so
+p72
+Ntp73
+a(V/system/lib/libdvm.so
+p74
+V/tmp/amplxe-tmp-dominikg/modules.android/libdvm.so/8f6c9167e20b377dc90646a1bac0e201/libdvm.so
+p75
+tp76
+a(V/system/lib/libselinux.so
+p77
+Ntp78
+a(V/system/lib/libGLESv1_CM.so
+p79
+Ntp80
+a(V/system/lib/libETC1.so
+p81
+Ntp82
+a(V/system/lib/libhardware_legacy.so
+p83
+Ntp84
+a(V/system/lib/libwpa_client.so
+p85
+Ntp86
+a(V/system/lib/libsonivox.so
+p87
+Ntp88
+a(V/system/lib/libcrypto.so
+p89
+Ntp90
+a(V/system/lib/libssl.so
+p91
+Ntp92
+a(V/system/lib/libmedia.so
+p93
+Ntp94
+a(V/system/lib/libstagefright_foundation.so
+p95
+Ntp96
+a(V/system/lib/libaudioutils.so
+p97
+Ntp98
+a(V/system/lib/libspeexresampler.so
+p99
+Ntp100
+a(V/system/lib/libaudioresample.so
+p101
+Ntp102
+a(V/system/lib/libusbhost.so
+p103
+Ntp104
+a(V/system/lib/libharfbuzz_ng.so
+p105
+Ntp106
+a(V/system/lib/libhwui.so
+p107
+Ntp108
+a(V/system/lib/libRS.so
+p109
+Ntp110
+a(V/system/lib/libbcc.so
+p111
+Ntp112
+a(V/system/lib/libbcinfo.so
+p113
+Ntp114
+a(V/system/lib/libLLVM.so
+p115
+Ntp116
+a(V/system/lib/libRScpp.so
+p117
+Ntp118
+a(V/system/lib/libjavacore.so
+p119
+Ntp120
+a(V/system/lib/libdrmframework_jni.so
+p121
+Ntp122
+a(V/system/lib/libdrmframework.so
+p123
+Ntp124
+a(V/system/lib/libstagefright_yuv.so
+p125
+Ntp126
+a(V/system/lib/libdrm.so
+p127
+Ntp128
+a(V/system/lib/libmedia_jni.so
+p129
+Ntp130
+a(V/system/lib/libstagefright_omx.so
+p131
+Ntp132
+a(V/system/lib/libvorbisidec.so
+p133
+Ntp134
+a(V/system/lib/libva.so
+p135
+Ntp136
+a(V/system/lib/libva-android.so
+p137
+Ntp138
+a(V/system/lib/libva-tpi.so
+p139
+Ntp140
+a(V/system/lib/libva_videodecoder.so
+p141
+Ntp142
+a(V/system/lib/libmixvbp.so
+p143
+Ntp144
+a(V/system/lib/libasfparser.so
+p145
+Ntp146
+a(V/system/lib/libstagefright_enc_common.so
+p147
+Ntp148
+a(V/system/lib/libstagefright_avc_common.so
+p149
+Ntp150
+a(V/system/lib/libmultidisplay.so
+p151
+Ntp152
+a(V/system/lib/libsepdrm.so
+p153
+Ntp154
+a(V/system/lib/libmtp.so
+p155
+Ntp156
+a(V/system/lib/libexif.so
+p157
+Ntp158
+a(V/system/lib/libstagefright_amrnb_common.so
+p159
+Ntp160
+a(V/system/lib/libexif_jni.so
+p161
+Ntp162
+a(V/system/lib/libsoundpool.so
+p163
+Ntp164
+a(V/system/lib/libvideoeditor_jni.so
+p165
+Ntp166
+a(V/system/lib/libaudioflinger.so
+p167
+Ntp168
+a(V/system/lib/libcommon_time_client.so
+p169
+Ntp170
+a(V/system/lib/libnbaio.so
+p171
+Ntp172
+a(V/system/lib/libeffects.so
+p173
+Ntp174
+a(V/system/lib/libpowermanager.so
+p175
+Ntp176
+a(V/system/lib/libvideoeditor_core.so
+p177
+Ntp178
+a(V/system/lib/libvideoeditor_osal.so
+p179
+Ntp180
+a(V/system/lib/libvideoeditor_videofilters.so
+p181
+Ntp182
+a(V/system/lib/libvideoeditorplayer.so
+p183
+Ntp184
+a(V/system/lib/libsharedbuffer.so
+p185
+Ntp186
+a(V/system/lib/libva_videoencoder.so
+p187
+Ntp188
+a(V/system/lib/libintelmetadatabuffer.so
+p189
+Ntp190
+a(V/system/lib/librs_jni.so
+p191
+Ntp192
+a(V/system/lib/libandroid.so
+p193
+Ntp194
+a(V/system/lib/libharfbuzz.so
+p195
+Ntp196
+a(V/system/lib/libstagefright.so
+p197
+Ntp198
+a(V/system/lib/libwebcore.so
+p199
+Ntp200
+a(V/system/lib/libchromium_net.so
+p201
+Ntp202
+a(V/data/app-lib/com.google.android.apps.chrome-1/libchromium_android_linker.so
+p203
+Ntp204
+a(V/system/lib/libjnigraphics.so
+p205
+Ntp206
+a(V/dev/ashmem/dalvik-jit-code-cache (deleted)
+p207
+Ntp208
+a.
\ No newline at end of file
diff --git a/catapult/telemetry/telemetry/internal/testing/screen_3_frames.mov b/catapult/telemetry/telemetry/internal/testing/screen_3_frames.mov
new file mode 100644
index 0000000..7a90f07
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/screen_3_frames.mov
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/screenshot_test.html b/catapult/telemetry/telemetry/internal/testing/screenshot_test.html
new file mode 100644
index 0000000..239e9a7
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/screenshot_test.html
@@ -0,0 +1,19 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <style>
+  html, body {
+    margin: 0;
+    padding: 0;
+  }
+  #colorful {
+    width: 32px;
+    height: 32px;
+    background-color: rgb(217, 115, 43);
+  }
+  </style>
+</head>
+<body>
+  <div id="colorful"></div>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/scrollable_page.html b/catapult/telemetry/telemetry/internal/testing/scrollable_page.html
new file mode 100644
index 0000000..c50175b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/scrollable_page.html
@@ -0,0 +1,33 @@
+<!doctype html>
+<html>
+  <head>
+    <style type="text/css">
+      body { height: 200vh; }
+    </style>
+  </head>
+  <body>
+    <!--
+    Below info are used in smoothness unittest only. use URL below to get the 
+    diagnostic info which is used when this page failed in test due to the 
+    scroll bar didn't show up.
+    scrollable_page.html?show_scroll_diagnosis_info
+    -->
+    <div id="info"></div>
+    <script>
+      var txt = "<h3>Screen info (Used for diagnosis):</h3>"
+                + "<p>Total width/height: " + screen.width + "*" + screen.height
+                + "</p><p>Available width/height: " + screen.availWidth + "*" 
+                + screen.availHeight + "</p><p>Color depth: "
+                + screen.colorDepth + "</p><p>Color resolution: "
+                + screen.pixelDepth + "</p><p>Body scrollable height: "
+                + document.body.scrollHeight + "</p><p>Body offset height: "
+                + document.body.offsetHeight + "</p><p>Body client height: "
+                + document.body.clientHeight + "</p><p>Window inner height: "
+                + window.innerHeight + "</p><p>Window device Pixel Ratio: "
+                + window.devicePixelRatio + "</p>";
+      if (window.location.search.substr(1) == "show_scroll_diagnosis_info") {
+          document.getElementById("info").innerHTML = txt;
+      }
+    </script>
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_app/background.js b/catapult/telemetry/telemetry/internal/testing/simple_app/background.js
new file mode 100644
index 0000000..01412a3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_app/background.js
@@ -0,0 +1,6 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+chrome.app.window.create('main.html');
+chrome.app.window.create('second.html');
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_app/main.html b/catapult/telemetry/telemetry/internal/testing/simple_app/main.html
new file mode 100644
index 0000000..bb67633
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_app/main.html
@@ -0,0 +1,5 @@
+<html>
+<body>
+This is the simple telemetry webapp main page.
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_app/manifest.json b/catapult/telemetry/telemetry/internal/testing/simple_app/manifest.json
new file mode 100644
index 0000000..5275205
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_app/manifest.json
@@ -0,0 +1,11 @@
+{
+  "description": "Simple Telemetry Test WebApp",
+  "name": "Simple Telemetry Test WebApp",
+  "app": {
+    "background": {
+      "scripts": ["background.js"]
+    }
+  },
+  "manifest_version": 2,
+  "version": "1.0"
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_app/second.html b/catapult/telemetry/telemetry/internal/testing/simple_app/second.html
new file mode 100644
index 0000000..debc8fc
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_app/second.html
@@ -0,0 +1,5 @@
+<html>
+<body>
+This is the simple telemetry webapp secondary page.
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_extension/background.js b/catapult/telemetry/telemetry/internal/testing/simple_extension/background.js
new file mode 100644
index 0000000..303ef9f
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_extension/background.js
@@ -0,0 +1,8 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var _testVar;
+function setTestVar(x) {
+  _testVar = x;
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/simple_extension/manifest.json b/catapult/telemetry/telemetry/internal/testing/simple_extension/manifest.json
new file mode 100644
index 0000000..d4bb6a3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/simple_extension/manifest.json
@@ -0,0 +1,9 @@
+{
+  "description": "Simple test extension which has just a background script",
+  "name": "Simple Telemetry Test Extension",
+  "background": {
+    "scripts": ["background.js"]
+  },
+  "manifest_version": 2,
+  "version": "0.1"
+}
diff --git a/catapult/telemetry/telemetry/internal/testing/smaps b/catapult/telemetry/telemetry/internal/testing/smaps
new file mode 100644
index 0000000..7902e52
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/smaps
@@ -0,0 +1,1065 @@
+00400000-004a2000 r-xp 00000000 08:01 2883606                            /bin/zsh4
+Size:                648 kB
+Rss:                 584 kB
+Pss:                  30 kB
+Shared_Clean:        584 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          584 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+006a1000-006a2000 r--p 000a1000 08:01 2883606                            /bin/zsh4
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+006a2000-006a8000 rw-p 000a2000 08:01 2883606                            /bin/zsh4
+Size:                 24 kB
+Rss:                  24 kB
+Pss:                  24 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        24 kB
+Referenced:           24 kB
+Anonymous:            24 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+006a8000-006bc000 rw-p 00000000 00:00 0 
+Size:                 80 kB
+Rss:                  44 kB
+Pss:                  44 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        44 kB
+Referenced:           44 kB
+Anonymous:            44 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+0137c000-0168d000 rw-p 00000000 00:00 0                                  [heap]
+Size:               3140 kB
+Rss:                3116 kB
+Pss:                3116 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:      3116 kB
+Referenced:         3116 kB
+Anonymous:          3116 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557c58000-7ff557c68000 r-xp 00000000 08:01 7082269                    /usr/lib/zsh/4.3.17/zsh/computil.so
+Size:                 64 kB
+Rss:                  52 kB
+Pss:                   4 kB
+Shared_Clean:         52 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           52 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557c68000-7ff557e67000 ---p 00010000 08:01 7082269                    /usr/lib/zsh/4.3.17/zsh/computil.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557e67000-7ff557e68000 r--p 0000f000 08:01 7082269                    /usr/lib/zsh/4.3.17/zsh/computil.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557e68000-7ff557e69000 rw-p 00010000 08:01 7082269                    /usr/lib/zsh/4.3.17/zsh/computil.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557e69000-7ff557e72000 r-xp 00000000 08:01 7082280                    /usr/lib/zsh/4.3.17/zsh/parameter.so
+Size:                 36 kB
+Rss:                  28 kB
+Pss:                   2 kB
+Shared_Clean:         28 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           28 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff557e72000-7ff558071000 ---p 00009000 08:01 7082280                    /usr/lib/zsh/4.3.17/zsh/parameter.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558071000-7ff558072000 r--p 00008000 08:01 7082280                    /usr/lib/zsh/4.3.17/zsh/parameter.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558072000-7ff558073000 rw-p 00009000 08:01 7082280                    /usr/lib/zsh/4.3.17/zsh/parameter.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558073000-7ff55807a000 r-xp 00000000 08:01 7082272                    /usr/lib/zsh/4.3.17/zsh/zutil.so
+Size:                 28 kB
+Rss:                  24 kB
+Pss:                   1 kB
+Shared_Clean:         24 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           24 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55807a000-7ff558279000 ---p 00007000 08:01 7082272                    /usr/lib/zsh/4.3.17/zsh/zutil.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558279000-7ff55827a000 r--p 00006000 08:01 7082272                    /usr/lib/zsh/4.3.17/zsh/zutil.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55827a000-7ff55827b000 rw-p 00007000 08:01 7082272                    /usr/lib/zsh/4.3.17/zsh/zutil.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55827b000-7ff55829d000 r-xp 00000000 08:01 7082291                    /usr/lib/zsh/4.3.17/zsh/complete.so
+Size:                136 kB
+Rss:                 128 kB
+Pss:                   8 kB
+Shared_Clean:        128 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          128 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55829d000-7ff55849c000 ---p 00022000 08:01 7082291                    /usr/lib/zsh/4.3.17/zsh/complete.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55849c000-7ff55849d000 r--p 00021000 08:01 7082291                    /usr/lib/zsh/4.3.17/zsh/complete.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55849d000-7ff55849e000 rw-p 00022000 08:01 7082291                    /usr/lib/zsh/4.3.17/zsh/complete.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55849e000-7ff5584dd000 r-xp 00000000 08:01 7082277                    /usr/lib/zsh/4.3.17/zsh/zle.so
+Size:                252 kB
+Rss:                 224 kB
+Pss:                  13 kB
+Shared_Clean:        224 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          224 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5584dd000-7ff5586dd000 ---p 0003f000 08:01 7082277                    /usr/lib/zsh/4.3.17/zsh/zle.so
+Size:               2048 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5586dd000-7ff5586de000 r--p 0003f000 08:01 7082277                    /usr/lib/zsh/4.3.17/zsh/zle.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5586de000-7ff5586e5000 rw-p 00040000 08:01 7082277                    /usr/lib/zsh/4.3.17/zsh/zle.so
+Size:                 28 kB
+Rss:                  28 kB
+Pss:                  28 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        28 kB
+Referenced:           28 kB
+Anonymous:            28 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5586e5000-7ff5586e7000 r-xp 00000000 08:01 7082293                    /usr/lib/zsh/4.3.17/zsh/terminfo.so
+Size:                  8 kB
+Rss:                   8 kB
+Pss:                   0 kB
+Shared_Clean:          8 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            8 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5586e7000-7ff5588e6000 ---p 00002000 08:01 7082293                    /usr/lib/zsh/4.3.17/zsh/terminfo.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5588e6000-7ff5588e7000 r--p 00001000 08:01 7082293                    /usr/lib/zsh/4.3.17/zsh/terminfo.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5588e7000-7ff5588e8000 rw-p 00002000 08:01 7082293                    /usr/lib/zsh/4.3.17/zsh/terminfo.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5588e8000-7ff5588ec000 r-xp 00000000 08:01 6688948                    /usr/lib/libnss_cache.so.2.0 (deleted)
+Size:                 16 kB
+Rss:                  12 kB
+Pss:                   0 kB
+Shared_Clean:         12 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           12 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff5588ec000-7ff558aeb000 ---p 00004000 08:01 6688948                    /usr/lib/libnss_cache.so.2.0 (deleted)
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558aeb000-7ff558aec000 r--p 00003000 08:01 6688948                    /usr/lib/libnss_cache.so.2.0 (deleted)
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558aec000-7ff558aed000 rw-p 00004000 08:01 6688948                    /usr/lib/libnss_cache.so.2.0 (deleted)
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558aed000-7ff558af9000 r-xp 00000000 08:01 41681577                   /lib/x86_64-linux-gnu/libnss_files-2.15.so
+Size:                 48 kB
+Rss:                  16 kB
+Pss:                   0 kB
+Shared_Clean:         16 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           16 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558af9000-7ff558cf8000 ---p 0000c000 08:01 41681577                   /lib/x86_64-linux-gnu/libnss_files-2.15.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558cf8000-7ff558cf9000 r--p 0000b000 08:01 41681577                   /lib/x86_64-linux-gnu/libnss_files-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558cf9000-7ff558cfa000 rw-p 0000c000 08:01 41681577                   /lib/x86_64-linux-gnu/libnss_files-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff558d1e000-7ff559be1000 r--p 00000000 08:01 7209368                    /usr/lib/locale/locale-archive
+Size:              15116 kB
+Rss:                  76 kB
+Pss:                  10 kB
+Shared_Clean:         68 kB
+Shared_Dirty:          0 kB
+Private_Clean:         8 kB
+Private_Dirty:         0 kB
+Referenced:           76 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559be1000-7ff559d96000 r-xp 00000000 08:01 41681428                   /lib/x86_64-linux-gnu/libc-2.15.so
+Size:               1748 kB
+Rss:                 696 kB
+Pss:                   7 kB
+Shared_Clean:        696 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          696 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559d96000-7ff559f96000 ---p 001b5000 08:01 41681428                   /lib/x86_64-linux-gnu/libc-2.15.so
+Size:               2048 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559f96000-7ff559f9a000 r--p 001b5000 08:01 41681428                   /lib/x86_64-linux-gnu/libc-2.15.so
+Size:                 16 kB
+Rss:                  16 kB
+Pss:                  16 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        16 kB
+Referenced:           16 kB
+Anonymous:            16 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559f9a000-7ff559f9c000 rw-p 001b9000 08:01 41681428                   /lib/x86_64-linux-gnu/libc-2.15.so
+Size:                  8 kB
+Rss:                   8 kB
+Pss:                   8 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         8 kB
+Referenced:            8 kB
+Anonymous:             8 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559f9c000-7ff559fa1000 rw-p 00000000 00:00 0 
+Size:                 20 kB
+Rss:                  20 kB
+Pss:                  20 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        20 kB
+Referenced:           20 kB
+Anonymous:            20 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff559fa1000-7ff55a09c000 r-xp 00000000 08:01 41681674                   /lib/x86_64-linux-gnu/libm-2.15.so
+Size:               1004 kB
+Rss:                  64 kB
+Pss:                   1 kB
+Shared_Clean:         64 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           64 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a09c000-7ff55a29b000 ---p 000fb000 08:01 41681674                   /lib/x86_64-linux-gnu/libm-2.15.so
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a29b000-7ff55a29c000 r--p 000fa000 08:01 41681674                   /lib/x86_64-linux-gnu/libm-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a29c000-7ff55a29d000 rw-p 000fb000 08:01 41681674                   /lib/x86_64-linux-gnu/libm-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a29d000-7ff55a2bf000 r-xp 00000000 08:01 41681144                   /lib/x86_64-linux-gnu/libtinfo.so.5.9
+Size:                136 kB
+Rss:                 124 kB
+Pss:                   3 kB
+Shared_Clean:        124 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          124 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a2bf000-7ff55a4bf000 ---p 00022000 08:01 41681144                   /lib/x86_64-linux-gnu/libtinfo.so.5.9
+Size:               2048 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a4bf000-7ff55a4c3000 r--p 00022000 08:01 41681144                   /lib/x86_64-linux-gnu/libtinfo.so.5.9
+Size:                 16 kB
+Rss:                  16 kB
+Pss:                  16 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        16 kB
+Referenced:           16 kB
+Anonymous:            16 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a4c3000-7ff55a4c4000 rw-p 00026000 08:01 41681144                   /lib/x86_64-linux-gnu/libtinfo.so.5.9
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a4c4000-7ff55a4c6000 r-xp 00000000 08:01 41681743                   /lib/x86_64-linux-gnu/libdl-2.15.so
+Size:                  8 kB
+Rss:                   8 kB
+Pss:                   0 kB
+Shared_Clean:          8 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            8 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a4c6000-7ff55a6c6000 ---p 00002000 08:01 41681743                   /lib/x86_64-linux-gnu/libdl-2.15.so
+Size:               2048 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a6c6000-7ff55a6c7000 r--p 00002000 08:01 41681743                   /lib/x86_64-linux-gnu/libdl-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a6c7000-7ff55a6c8000 rw-p 00003000 08:01 41681743                   /lib/x86_64-linux-gnu/libdl-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a6c8000-7ff55a6cc000 r-xp 00000000 08:01 41681169                   /lib/x86_64-linux-gnu/libcap.so.2.22
+Size:                 16 kB
+Rss:                  12 kB
+Pss:                   0 kB
+Shared_Clean:         12 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           12 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a6cc000-7ff55a8cb000 ---p 00004000 08:01 41681169                   /lib/x86_64-linux-gnu/libcap.so.2.22
+Size:               2044 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a8cb000-7ff55a8cc000 r--p 00003000 08:01 41681169                   /lib/x86_64-linux-gnu/libcap.so.2.22
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a8cc000-7ff55a8cd000 rw-p 00004000 08:01 41681169                   /lib/x86_64-linux-gnu/libcap.so.2.22
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a8cd000-7ff55a8ef000 r-xp 00000000 08:01 41681675                   /lib/x86_64-linux-gnu/ld-2.15.so
+Size:                136 kB
+Rss:                 120 kB
+Pss:                   1 kB
+Shared_Clean:        120 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:          120 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55a930000-7ff55aac9000 rw-p 00000000 00:00 0 
+Size:               1636 kB
+Rss:                1636 kB
+Pss:                1636 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:      1636 kB
+Referenced:         1636 kB
+Anonymous:          1636 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aacc000-7ff55aad8000 rw-p 00000000 00:00 0 
+Size:                 48 kB
+Rss:                  48 kB
+Pss:                  48 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        48 kB
+Referenced:           48 kB
+Anonymous:            48 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aad9000-7ff55aae1000 rw-p 00000000 00:00 0 
+Size:                 32 kB
+Rss:                  32 kB
+Pss:                  32 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        32 kB
+Referenced:           32 kB
+Anonymous:            32 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aae2000-7ff55aae6000 rw-p 00000000 00:00 0 
+Size:                 16 kB
+Rss:                  16 kB
+Pss:                  16 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        16 kB
+Referenced:           16 kB
+Anonymous:            16 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aae6000-7ff55aaed000 r--s 00000000 08:01 6826621                    /usr/lib/x86_64-linux-gnu/gconv/gconv-modules.cache
+Size:                 28 kB
+Rss:                  24 kB
+Pss:                   0 kB
+Shared_Clean:         24 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:           24 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aaed000-7ff55aaef000 rw-p 00000000 00:00 0 
+Size:                  8 kB
+Rss:                   8 kB
+Pss:                   8 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         8 kB
+Referenced:            8 kB
+Anonymous:             8 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aaef000-7ff55aaf0000 r--p 00022000 08:01 41681675                   /lib/x86_64-linux-gnu/ld-2.15.so
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   4 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         4 kB
+Referenced:            4 kB
+Anonymous:             4 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7ff55aaf0000-7ff55aaf2000 rw-p 00023000 08:01 41681675                   /lib/x86_64-linux-gnu/ld-2.15.so
+Size:                  8 kB
+Rss:                   8 kB
+Pss:                   8 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         8 kB
+Referenced:            8 kB
+Anonymous:             8 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7fff76a3d000-7fff76a5e000 rw-p 00000000 00:00 0                          [stack]
+Size:                136 kB
+Rss:                  84 kB
+Pss:                  84 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:        84 kB
+Referenced:           84 kB
+Anonymous:            84 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+7fff76bbb000-7fff76bbc000 r-xp 00000000 00:00 0                          [vdso]
+Size:                  4 kB
+Rss:                   4 kB
+Pss:                   0 kB
+Shared_Clean:          4 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            4 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
+ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]
+Size:                  4 kB
+Rss:                   0 kB
+Pss:                   0 kB
+Shared_Clean:          0 kB
+Shared_Dirty:          0 kB
+Private_Clean:         0 kB
+Private_Dirty:         0 kB
+Referenced:            0 kB
+Anonymous:             0 kB
+AnonHugePages:         0 kB
+Swap:                  0 kB
+KernelPageSize:        4 kB
+MMUPageSize:           4 kB
+Locked:                0 kB
diff --git a/catapult/telemetry/telemetry/internal/testing/stat b/catapult/telemetry/telemetry/internal/testing/stat
new file mode 100644
index 0000000..a2c20d1
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/stat
@@ -0,0 +1 @@
+12911 (chrome) S 16468 4631 4631 0 -1 4202560 145133 0 0 0 831 71 0 0 20 0 4 0 62513410 1025978368 20508 18446744073709551615 1 1 0 0 0 0 0 67112962 1073808616 18446744073709551615 0 0 17 23 0 0 0 0 0
diff --git a/catapult/telemetry/telemetry/internal/testing/status b/catapult/telemetry/telemetry/internal/testing/status
new file mode 100644
index 0000000..cb3d4f6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/status
@@ -0,0 +1,39 @@
+Name:	chrome
+State:	S (sleeping)
+Tgid:	12911
+Pid:	12911
+PPid:	16468
+TracerPid:	0
+Uid:	20989	20989	20989	20989
+Gid:	5000	5000	5000	5000
+FDSize:	64
+Groups:	4 20 24 25 44 46 104 128 499 5000 5001 5762 5825 66187 66609 66975 68604 74787 74990 75209 75279 76551 76613 76701 76830 77056 77281 78255 79910 79982 80665 80824 
+VmPeak:	 1025488 kB
+VmSize:	 1001932 kB
+VmLck:	       0 kB
+VmPin:	       0 kB
+VmHWM:	  141160 kB
+VmRSS:	   82032 kB
+VmData:	  660664 kB
+VmStk:	     136 kB
+VmExe:	   91844 kB
+VmLib:	   52088 kB
+VmPTE:	     852 kB
+VmSwap:	       0 kB
+Threads:	4
+SigQ:	0/514714
+SigPnd:	0000000000000000
+ShdPnd:	0000000000000000
+SigBlk:	0000000000000000
+SigIgn:	0000000004001002
+SigCgt:	00000001c00104e8
+CapInh:	0000000000000000
+CapPrm:	0000000000000000
+CapEff:	0000000000000000
+CapBnd:	ffffffffffffffff
+Cpus_allowed:	ffffffff
+Cpus_allowed_list:	0-31
+Mems_allowed:	00000000,00000003
+Mems_allowed_list:	0-1
+voluntary_ctxt_switches:	3061
+nonvoluntary_ctxt_switches:	730
diff --git a/catapult/telemetry/telemetry/internal/testing/status_nohwm b/catapult/telemetry/telemetry/internal/testing/status_nohwm
new file mode 100644
index 0000000..5d94aa4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/status_nohwm
@@ -0,0 +1,37 @@
+Name:	chrome
+State:	S (sleeping)
+Tgid:	12911
+Pid:	12911
+PPid:	16468
+TracerPid:	0
+Uid:	20989	20989	20989	20989
+Gid:	5000	5000	5000	5000
+FDSize:	64
+Groups:	4 20 24 25 44 46 104 128 499 5000 5001 5762 5825 66187 66609 66975 68604 74787 74990 75209 75279 76551 76613 76701 76830 77056 77281 78255 79910 79982 80665 80824 
+VmSize:	 1001932 kB
+VmLck:	       0 kB
+VmPin:	       0 kB
+VmRSS:	   82032 kB
+VmData:	  660664 kB
+VmStk:	     136 kB
+VmExe:	   91844 kB
+VmLib:	   52088 kB
+VmPTE:	     852 kB
+VmSwap:	       0 kB
+Threads:	4
+SigQ:	0/514714
+SigPnd:	0000000000000000
+ShdPnd:	0000000000000000
+SigBlk:	0000000000000000
+SigIgn:	0000000004001002
+SigCgt:	00000001c00104e8
+CapInh:	0000000000000000
+CapPrm:	0000000000000000
+CapEff:	0000000000000000
+CapBnd:	ffffffffffffffff
+Cpus_allowed:	ffffffff
+Cpus_allowed_list:	0-31
+Mems_allowed:	00000000,00000003
+Mems_allowed_list:	0-1
+voluntary_ctxt_switches:	3063
+nonvoluntary_ctxt_switches:	730
diff --git a/catapult/telemetry/telemetry/internal/testing/system_stub_test_module.py b/catapult/telemetry/telemetry/internal/testing/system_stub_test_module.py
new file mode 100644
index 0000000..97e3ef3
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/system_stub_test_module.py
@@ -0,0 +1,7 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+class SystemStubTest(object):
+  @staticmethod
+  def TestOpen(file_path):
+    return open(file_path)
diff --git a/catapult/telemetry/telemetry/internal/testing/test_png.png b/catapult/telemetry/telemetry/internal/testing/test_png.png
new file mode 100644
index 0000000..3aaf03b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/test_png.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/test_png_2.png b/catapult/telemetry/telemetry/internal/testing/test_png_2.png
new file mode 100644
index 0000000..f44a4a6
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/test_png_2.png
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/ubuntu-saucy-lsb-release b/catapult/telemetry/telemetry/internal/testing/ubuntu-saucy-lsb-release
new file mode 100644
index 0000000..382258e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/ubuntu-saucy-lsb-release
@@ -0,0 +1,4 @@
+DISTRIB_ID=Ubuntu
+DISTRIB_RELEASE=13.10
+DISTRIB_CODENAME=saucy
+DISTRIB_DESCRIPTION="Ubuntu 13.10"
diff --git a/catapult/telemetry/telemetry/internal/testing/vid.mp4 b/catapult/telemetry/telemetry/internal/testing/vid.mp4
new file mode 100644
index 0000000..117ff0c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/vid.mp4
Binary files differ
diff --git a/catapult/telemetry/telemetry/internal/testing/video_test.html b/catapult/telemetry/telemetry/internal/testing/video_test.html
new file mode 100644
index 0000000..ce8529c
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/video_test.html
@@ -0,0 +1,7 @@
+<!DOCTYPE html>
+<html>
+  <body>
+    <video id="video_1" src="bear.webm" controls></video>
+    <audio id="audio_1" src="bear.webm" controls></audio>
+  </body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/webview_app/background.js b/catapult/telemetry/telemetry/internal/testing/webview_app/background.js
new file mode 100644
index 0000000..5f284a8
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/webview_app/background.js
@@ -0,0 +1,5 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+chrome.app.window.create('main.html');
diff --git a/catapult/telemetry/telemetry/internal/testing/webview_app/main.html b/catapult/telemetry/telemetry/internal/testing/webview_app/main.html
new file mode 100644
index 0000000..0294246
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/webview_app/main.html
@@ -0,0 +1,7 @@
+<html>
+<body>
+This is the simple telemetry webapp main page with a &lt;webview&gt; element.
+<webview id="foo" src="data:text/html;charset=utf-8,<html><body><input id='test_input_id' type='text'></body></html>" style="width:640px; height:480px" autosize="on" minwidth="576" minheight="432">
+</webview>
+</body>
+</html>
diff --git a/catapult/telemetry/telemetry/internal/testing/webview_app/manifest.json b/catapult/telemetry/telemetry/internal/testing/webview_app/manifest.json
new file mode 100644
index 0000000..b24438e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/testing/webview_app/manifest.json
@@ -0,0 +1,14 @@
+{
+  "description": "Simple Telemetry Test WebApp Containing <webview> Element",
+  "name": "Simple <webview> Telemetry Test WebApp",
+  "app": {
+    "background": {
+      "scripts": ["background.js"]
+    }
+  },
+  "permissions": [
+    "webview"
+  ],
+  "manifest_version": 2,
+  "version": "1.0"
+}
diff --git a/catapult/telemetry/telemetry/internal/util/__init__.py b/catapult/telemetry/telemetry/internal/util/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/internal/util/binary_manager.py b/catapult/telemetry/telemetry/internal/util/binary_manager.py
new file mode 100644
index 0000000..6af0c97
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/binary_manager.py
@@ -0,0 +1,67 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from catapult_base import binary_manager
+from dependency_manager import base_config
+from dependency_manager import exceptions as dependency_manager_exceptions
+from devil import devil_env
+
+from telemetry.core import exceptions
+from telemetry.core import util
+
+
+TELEMETRY_PROJECT_CONFIG = os.path.join(
+    util.GetTelemetryDir(), 'telemetry', 'internal', 'binary_dependencies.json')
+
+
+CHROME_BINARY_CONFIG = os.path.join(util.GetCatapultDir(), 'catapult_base',
+                                    'catapult_base', 'chrome_binaries.json')
+
+
+NoPathFoundError = dependency_manager_exceptions.NoPathFoundError
+CloudStorageError = dependency_manager_exceptions.CloudStorageError
+
+
+_binary_manager = None
+
+
+def NeedsInit():
+  return not _binary_manager
+
+
+def InitDependencyManager(environment_config):
+  global _binary_manager
+  if _binary_manager:
+    raise exceptions.InitializationError(
+        'Trying to re-initialize the binary manager with config %s'
+        % environment_config)
+  configs = [base_config.BaseConfig(TELEMETRY_PROJECT_CONFIG),
+             base_config.BaseConfig(CHROME_BINARY_CONFIG)]
+  if environment_config:
+    configs.insert(0, base_config.BaseConfig(environment_config))
+  _binary_manager = binary_manager.BinaryManager(configs)
+
+  devil_env.config.Initialize()
+
+
+def FetchPath(binary_name, arch, os_name, os_version=None):
+  """ Return a path to the appropriate executable for <binary_name>, downloading
+      from cloud storage if needed, or None if it cannot be found.
+  """
+  if _binary_manager is None:
+    raise exceptions.InitializationError(
+        'Called FetchPath with uninitialized binary manager.')
+  return _binary_manager.FetchPath(binary_name, arch, os_name, os_version)
+
+
+def LocalPath(binary_name, arch, os_name, os_version=None):
+  """ Return a local path to the given binary name, or None if an executable
+      cannot be found. Will not download the executable.
+      """
+  if _binary_manager is None:
+    raise exceptions.InitializationError(
+        'Called LocalPath with uninitialized binary manager.')
+  return _binary_manager.LocalPath(binary_name, arch, os_name, os_version)
diff --git a/catapult/telemetry/telemetry/internal/util/binary_manager_unittest.py b/catapult/telemetry/telemetry/internal/util/binary_manager_unittest.py
new file mode 100644
index 0000000..2c7f79d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/binary_manager_unittest.py
@@ -0,0 +1,112 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.core import exceptions
+from telemetry.internal.util import binary_manager
+import mock
+
+
+class BinaryManagerTest(unittest.TestCase):
+  def setUp(self):
+    # We need to preserve the real initialized dependecny_manager.
+    self.actual_binary_manager = binary_manager._binary_manager
+    binary_manager._binary_manager = None
+
+  def tearDown(self):
+    binary_manager._binary_manager = self.actual_binary_manager
+
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.binary_manager.BinaryManager') # pylint: disable=line-too-long
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.base_config.BaseConfig')
+  def testInitializationNoEnvironmentConfig(
+      self, base_config_mock, binary_manager_mock):
+    base_config_mock.side_effect = ['base_config_object1',
+                                    'base_config_object2',
+                                    'base_config_object3']
+    binary_manager.InitDependencyManager(None)
+    base_config_mock.assert_has_calls([
+        mock.call.base_config.BaseConfig(
+            binary_manager.TELEMETRY_PROJECT_CONFIG),
+        mock.call.base_config.BaseConfig(
+            binary_manager.CHROME_BINARY_CONFIG)])
+    self.assertEqual(2, base_config_mock.call_count)
+    binary_manager_mock.assert_called_once_with(['base_config_object1',
+                                              'base_config_object2'])
+
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.binary_manager.BinaryManager') # pylint: disable=line-too-long
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.base_config.BaseConfig')
+  def testInitializationWithEnvironmentConfig(
+      self, base_config_mock, binary_manager_mock):
+    base_config_mock.side_effect = ['base_config_object1',  # TELEMETRY_PROJECT
+                                    'base_config_object2',  # CHROME_BINARY
+                                    'base_config_object3']  # ENVIRONMENT
+    environment_config = os.path.join('some', 'config', 'path')
+    binary_manager.InitDependencyManager(environment_config)
+    expected_calls = [mock.call(binary_manager.TELEMETRY_PROJECT_CONFIG),
+                      mock.call(binary_manager.CHROME_BINARY_CONFIG),
+                      mock.call(environment_config)]
+    self.assertEqual(expected_calls, base_config_mock.call_args_list)
+    # Make sure the environment config is passed first.
+    binary_manager_mock.assert_called_once_with(
+        ['base_config_object3', 'base_config_object1', 'base_config_object2'])
+
+  def testReinitialization(self):
+    binary_manager.InitDependencyManager(None)
+    self.assertRaises(exceptions.InitializationError,
+                      binary_manager.InitDependencyManager, None)
+
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.binary_manager.BinaryManager') # pylint: disable=line-too-long
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.base_config.BaseConfig')
+  def testFetchPathInitialized(self, base_config_mock, binary_manager_mock):
+    base_config_mock.return_value = 'base_config_object'
+    expected = [mock.call.binary_manager.BinaryManager(
+                   ['base_config_object']),
+                mock.call.binary_manager.BinaryManager().FetchPath(
+                    'dep', 'plat_arch')]
+    binary_manager.InitDependencyManager(None)
+    binary_manager.FetchPath('dep', 'plat', 'arch')
+    binary_manager_mock.assert_call_args(expected)
+    base_config_mock.assert_has_calls([
+        mock.call.base_config.BaseConfig(
+            binary_manager.TELEMETRY_PROJECT_CONFIG),
+        mock.call.base_config.BaseConfig(
+            binary_manager.CHROME_BINARY_CONFIG)])
+    self.assertEqual(2, base_config_mock.call_count)
+
+  def testFetchPathUninitialized(self):
+    self.assertRaises(exceptions.InitializationError,
+                      binary_manager.FetchPath, 'dep', 'plat', 'arch')
+
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.binary_manager.BinaryManager') # pylint: disable=line-too-long
+  @mock.patch(
+      'telemetry.internal.util.binary_manager.base_config.BaseConfig')
+  def testLocalPathInitialized(self, base_config_mock, binary_manager_mock):
+    base_config_mock.return_value = 'base_config_object'
+    expected = [mock.call.binary_manager.BinaryManager(
+                   ['base_config_object']),
+                mock.call.binary_manager.BinaryManager().LocalPath(
+                    'dep', 'plat_arch')]
+    binary_manager.InitDependencyManager(None)
+    binary_manager.LocalPath('dep', 'plat', 'arch')
+    binary_manager_mock.assert_call_args(expected)
+    base_config_mock.assert_has_calls([
+        mock.call.base_config.BaseConfig(
+            binary_manager.TELEMETRY_PROJECT_CONFIG),
+        mock.call.base_config.BaseConfig(
+            binary_manager.CHROME_BINARY_CONFIG)])
+    self.assertEqual(2, base_config_mock.call_count)
+
+  def testLocalPathUninitialized(self):
+    self.assertRaises(exceptions.InitializationError,
+                      binary_manager.LocalPath, 'dep', 'plat', 'arch')
+
diff --git a/catapult/telemetry/telemetry/internal/util/camel_case.py b/catapult/telemetry/telemetry/internal/util/camel_case.py
new file mode 100644
index 0000000..9a76890
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/camel_case.py
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+
+def ToUnderscore(obj):
+  """Converts a string, list, or dict from camelCase to lower_with_underscores.
+
+  Descends recursively into lists and dicts, converting all dict keys.
+  Returns a newly allocated object of the same structure as the input.
+  """
+  if isinstance(obj, basestring):
+    return re.sub('(?!^)([A-Z]+)', r'_\1', obj).lower()
+
+  elif isinstance(obj, list):
+    return [ToUnderscore(item) for item in obj]
+
+  elif isinstance(obj, dict):
+    output = {}
+    for k, v in obj.iteritems():
+      if isinstance(v, list) or isinstance(v, dict):
+        output[ToUnderscore(k)] = ToUnderscore(v)
+      else:
+        output[ToUnderscore(k)] = v
+    return output
+
+  else:
+    return obj
diff --git a/catapult/telemetry/telemetry/internal/util/camel_case_unittest.py b/catapult/telemetry/telemetry/internal/util/camel_case_unittest.py
new file mode 100644
index 0000000..9ba4d3a
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/camel_case_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.util import camel_case
+
+
+class CamelCaseTest(unittest.TestCase):
+
+  def testString(self):
+    self.assertEqual(camel_case.ToUnderscore('camelCase'), 'camel_case')
+    self.assertEqual(camel_case.ToUnderscore('CamelCase'), 'camel_case')
+    self.assertEqual(camel_case.ToUnderscore('Camel2Case'), 'camel2_case')
+    self.assertEqual(camel_case.ToUnderscore('Camel2Case2'), 'camel2_case2')
+    self.assertEqual(camel_case.ToUnderscore('2012Q3'), '2012_q3')
+
+  def testList(self):
+    camel_case_list = ['CamelCase', ['NestedList']]
+    underscore_list = ['camel_case', ['nested_list']]
+    self.assertEqual(camel_case.ToUnderscore(camel_case_list), underscore_list)
+
+  def testDict(self):
+    camel_case_dict = {
+        'gpu': {
+            'vendorId': 1000,
+            'deviceId': 2000,
+            'vendorString': 'aString',
+            'deviceString': 'bString'},
+        'secondaryGpus': [
+            {'vendorId': 3000, 'deviceId': 4000,
+             'vendorString': 'k', 'deviceString': 'l'}
+        ]
+    }
+    underscore_dict = {
+        'gpu': {
+            'vendor_id': 1000,
+            'device_id': 2000,
+            'vendor_string': 'aString',
+            'device_string': 'bString'},
+        'secondary_gpus': [
+            {'vendor_id': 3000, 'device_id': 4000,
+             'vendor_string': 'k', 'device_string': 'l'}
+        ]
+    }
+    self.assertEqual(camel_case.ToUnderscore(camel_case_dict), underscore_dict)
+
+  def testOther(self):
+    self.assertEqual(camel_case.ToUnderscore(self), self)
diff --git a/catapult/telemetry/telemetry/internal/util/classes.py b/catapult/telemetry/telemetry/internal/util/classes.py
new file mode 100644
index 0000000..0f90a06
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/classes.py
@@ -0,0 +1,22 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import inspect
+
+
+def IsDirectlyConstructable(cls):
+  """Returns True if instance of |cls| can be construct without arguments."""
+  assert inspect.isclass(cls)
+  if not hasattr(cls, '__init__'):
+    # Case |class A: pass|.
+    return True
+  if cls.__init__ is object.__init__:
+    # Case |class A(object): pass|.
+    return True
+  # Case |class (object):| with |__init__| other than |object.__init__|.
+  args, _, _, defaults = inspect.getargspec(cls.__init__)
+  if defaults is None:
+    defaults = ()
+  # Return true if |self| is only arg without a default.
+  return len(args) == len(defaults) + 1
diff --git a/catapult/telemetry/telemetry/internal/util/classes_unittest.py b/catapult/telemetry/telemetry/internal/util/classes_unittest.py
new file mode 100644
index 0000000..dc7ac51
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/classes_unittest.py
@@ -0,0 +1,45 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.util import classes
+
+
+class ClassWithoutInitDefOne: # pylint: disable=old-style-class, no-init
+  pass
+
+
+class ClassWithoutInitDefTwo(object):
+  pass
+
+
+class ClassWhoseInitOnlyHasSelf(object):
+  def __init__(self):
+    pass
+
+
+class ClassWhoseInitWithDefaultArguments(object):
+  def __init__(self, dog=1, cat=None, cow=None, fud='a'):
+    pass
+
+
+class ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments(object):
+  def __init__(self, x, dog=1, cat=None, fish=None, fud='a'):
+    pass
+
+
+class ClassesUnitTest(unittest.TestCase):
+
+  def testIsDirectlyConstructableReturnsTrue(self):
+    self.assertTrue(classes.IsDirectlyConstructable(ClassWithoutInitDefOne))
+    self.assertTrue(classes.IsDirectlyConstructable(ClassWithoutInitDefTwo))
+    self.assertTrue(classes.IsDirectlyConstructable(ClassWhoseInitOnlyHasSelf))
+    self.assertTrue(
+        classes.IsDirectlyConstructable(ClassWhoseInitWithDefaultArguments))
+
+  def testIsDirectlyConstructableReturnsFalse(self):
+    self.assertFalse(
+        classes.IsDirectlyConstructable(
+            ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments))
diff --git a/catapult/telemetry/telemetry/internal/util/command_line.py b/catapult/telemetry/telemetry/internal/util/command_line.py
new file mode 100644
index 0000000..2a0d603
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/command_line.py
@@ -0,0 +1,121 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import optparse
+
+from telemetry.internal.util import camel_case
+
+
+class ArgumentHandlerMixIn(object):
+  """A structured way to handle command-line arguments.
+
+  In AddCommandLineArgs, add command-line arguments.
+  In ProcessCommandLineArgs, validate them and store them in a private class
+  variable. This way, each class encapsulates its own arguments, without needing
+  to pass an arguments object around everywhere.
+  """
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    """Override to accept custom command-line arguments."""
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    """Override to process command-line arguments.
+
+    We pass in parser so we can call parser.error()."""
+
+
+class Command(ArgumentHandlerMixIn):
+  """An abstraction for things that run from the command-line."""
+
+  @classmethod
+  def Name(cls):
+    return camel_case.ToUnderscore(cls.__name__)
+
+  @classmethod
+  def Description(cls):
+    if cls.__doc__:
+      return cls.__doc__.splitlines()[0]
+    else:
+      return ''
+
+  def Run(self, args):
+    raise NotImplementedError()
+
+  @classmethod
+  def main(cls, args=None):
+    """Main method to run this command as a standalone script."""
+    parser = argparse.ArgumentParser()
+    cls.AddCommandLineArgs(parser)
+    args = parser.parse_args(args=args)
+    cls.ProcessCommandLineArgs(parser, args)
+    return min(cls().Run(args), 255)
+
+
+# TODO: Convert everything to argparse.
+class OptparseCommand(Command):
+  usage = ''
+
+  @classmethod
+  def CreateParser(cls):
+    return optparse.OptionParser('%%prog %s %s' % (cls.Name(), cls.usage),
+                                 description=cls.Description())
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser, environment):
+    # pylint: disable=arguments-differ
+    pass
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args, environment):
+    # pylint: disable=arguments-differ
+    pass
+
+  def Run(self, args):
+    raise NotImplementedError()
+
+  @classmethod
+  def main(cls, args=None):
+    """Main method to run this command as a standalone script."""
+    parser = cls.CreateParser()
+    cls.AddCommandLineArgs(parser, None)
+    options, args = parser.parse_args(args=args)
+    options.positional_args = args
+    cls.ProcessCommandLineArgs(parser, options, None)
+    return min(cls().Run(options), 255)
+
+
+class SubcommandCommand(Command):
+  """Combines Commands into one big command with sub-commands.
+
+  E.g. "svn checkout", "svn update", and "svn commit" are separate sub-commands.
+
+  Example usage:
+    class MyCommand(command_line.SubcommandCommand):
+      commands = (Help, List, Run)
+
+    if __name__ == '__main__':
+      sys.exit(MyCommand.main())
+  """
+
+  commands = ()
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    subparsers = parser.add_subparsers()
+
+    for command in cls.commands:
+      subparser = subparsers.add_parser(
+          command.Name(), help=command.Description())
+      subparser.set_defaults(command=command)
+      command.AddCommandLineArgs(subparser)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    args.command.ProcessCommandLineArgs(parser, args)
+
+  def Run(self, args):
+    return args.command().Run(args)
diff --git a/catapult/telemetry/telemetry/internal/util/exception_formatter.py b/catapult/telemetry/telemetry/internal/util/exception_formatter.py
new file mode 100644
index 0000000..5c3f86e
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/exception_formatter.py
@@ -0,0 +1,107 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Print prettier and more detailed exceptions."""
+
+import logging
+import math
+import os
+import sys
+import traceback
+
+from telemetry.core import exceptions
+from telemetry.core import util
+
+
+def PrintFormattedException(exception_class=None, exception=None, tb=None,
+                            msg=None):
+  logging.info('Try printing formatted exception: %s %s %s' %
+               (exception_class, exception, tb))
+  assert bool(exception_class) == bool(exception) == bool(tb), (
+      'Must specify all or none of exception_class, exception, and tb')
+
+  if not exception_class:
+    exception_class, exception, tb = sys.exc_info()
+
+  if exception_class == exceptions.IntentionalException:
+    return
+
+  def _GetFinalFrame(tb_level):
+    while tb_level.tb_next:
+      tb_level = tb_level.tb_next
+    return tb_level.tb_frame
+
+  processed_tb = traceback.extract_tb(tb)
+  frame = _GetFinalFrame(tb)
+  exception_list = traceback.format_exception_only(exception_class, exception)
+  exception_string = '\n'.join(l.strip() for l in exception_list)
+
+  if msg:
+    print >> sys.stderr
+    print >> sys.stderr, msg
+
+  _PrintFormattedTrace(processed_tb, frame, exception_string)
+
+
+def PrintFormattedFrame(frame, exception_string=None):
+  _PrintFormattedTrace(traceback.extract_stack(frame), frame, exception_string)
+
+
+def _PrintFormattedTrace(processed_tb, frame, exception_string=None):
+  """Prints an Exception in a more useful format than the default.
+
+  TODO(tonyg): Consider further enhancements. For instance:
+    - Report stacks to maintainers like depot_tools does.
+    - Add a debug flag to automatically start pdb upon exception.
+  """
+  print >> sys.stderr
+
+  # Format the traceback.
+  base_dir = os.path.abspath(util.GetChromiumSrcDir())
+  print >> sys.stderr, 'Traceback (most recent call last):'
+  for filename, line, function, text in processed_tb:
+    filename = os.path.abspath(filename)
+    if filename.startswith(base_dir):
+      filename = filename[len(base_dir)+1:]
+    print >> sys.stderr, '  %s at %s:%d' % (function, filename, line)
+    print >> sys.stderr, '    %s' % text
+
+  # Format the exception.
+  if exception_string:
+    print >> sys.stderr, exception_string
+
+  # Format the locals.
+  local_variables = [(variable, value) for variable, value in
+                     frame.f_locals.iteritems() if variable != 'self']
+  print >> sys.stderr
+  print >> sys.stderr, 'Locals:'
+  if local_variables:
+    longest_variable = max(len(v) for v, _ in local_variables)
+    for variable, value in sorted(local_variables):
+      value = repr(value)
+      possibly_truncated_value = _AbbreviateMiddleOfString(value, ' ... ', 1024)
+      truncation_indication = ''
+      if len(possibly_truncated_value) != len(value):
+        truncation_indication = ' (truncated)'
+      print >> sys.stderr, '  %s: %s%s' % (variable.ljust(longest_variable + 1),
+                                           possibly_truncated_value,
+                                           truncation_indication)
+  else:
+    print >> sys.stderr, '  No locals!'
+
+  print >> sys.stderr
+  sys.stderr.flush()
+
+
+def _AbbreviateMiddleOfString(target, middle, max_length):
+  if max_length < 0:
+    raise ValueError('Must provide positive max_length')
+  if len(middle) > max_length:
+    raise ValueError('middle must not be greater than max_length')
+
+  if len(target) <= max_length:
+    return target
+  half_length = (max_length - len(middle)) / 2.
+  return (target[:int(math.floor(half_length))] + middle +
+          target[-int(math.ceil(half_length)):])
diff --git a/catapult/telemetry/telemetry/internal/util/external_modules.py b/catapult/telemetry/telemetry/internal/util/external_modules.py
new file mode 100644
index 0000000..92faf59
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/external_modules.py
@@ -0,0 +1,56 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import importlib
+
+from distutils import version
+
+MODULES = {
+  'cv2': (version.StrictVersion('2.4.8'), version.StrictVersion('3.0.0')),
+  'numpy': (version.StrictVersion('1.6.1'), None),
+  'psutil': (version.StrictVersion('0.5.0'), None),
+}
+
+def ImportRequiredModule(module):
+  """Tries to import the desired module.
+
+  Returns:
+    The module on success, raises error on failure.
+  Raises:
+    ImportError: The import failed."""
+  versions = MODULES.get(module)
+  if versions is None:
+    raise NotImplementedError('Please teach telemetry about module %s.' %
+                              module)
+  min_version, max_version = versions
+
+  module = importlib.import_module(module)
+  try:
+    if ((min_version is not None and
+            version.StrictVersion(module.__version__) < min_version) or
+        (max_version is not None and
+            version.StrictVersion(module.__version__) >= max_version)):
+      raise ImportError(('Incorrect {0} version found, expected {1} <= version '
+                         '< {2}, found version {3}').format(
+          module, min_version, max_version, module.__version__))
+  except ValueError as e:
+    # This error is raised when a module returns and incorrectly formatted
+    # version string. ex. '$build 1456a'
+    if 'invalid version number' in str(e):
+      raise ImportError(('Incorrectly formatted {0} version found, expected '
+                         '{1} <= version < {2}, found version {3}').format(
+          module, min_version, max_version, module.__version__))
+    else:
+      raise
+  return module
+
+def ImportOptionalModule(module):
+  """Tries to import the desired module.
+
+  Returns:
+    The module if successful, None if not."""
+  try:
+    return ImportRequiredModule(module)
+  except ImportError:
+    return None
diff --git a/catapult/telemetry/telemetry/internal/util/file_handle.py b/catapult/telemetry/telemetry/internal/util/file_handle.py
new file mode 100644
index 0000000..8133b0d
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/file_handle.py
@@ -0,0 +1,73 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+
+_next_file_id = 0
+
+
+class FileHandle(object):
+  def __init__(self, temp_file=None, absolute_path=None):
+    """Constructs a FileHandle object.
+
+    This constructor should not be used by the user; rather it is preferred to
+    use the module-level GetAbsPath and FromTempFile functions.
+
+    Args:
+      temp_file: An instance of a temporary file object.
+      absolute_path: A path; should not be passed if tempfile is and vice-versa.
+      extension: A string that specifies the file extension. It must starts with
+        ".".
+    """
+    # Exactly one of absolute_path or temp_file must be specified.
+    assert (absolute_path is None) != (temp_file is None)
+    self._temp_file = temp_file
+    self._absolute_path = absolute_path
+
+    global _next_file_id
+    self._id = _next_file_id
+    _next_file_id += 1
+
+  @property
+  def id(self):
+    return self._id
+
+  @property
+  def extension(self):
+    return os.path.splitext(self.GetAbsPath())[1]
+
+  def GetAbsPath(self):
+    """Returns the path to the pointed-to file relative to the given start path.
+
+    Args:
+      start: A string representing a starting path.
+    Returns:
+      A string giving the relative path from path to this file.
+    """
+    if self._temp_file:
+      self._temp_file.close()
+      return self._temp_file.name
+    else:
+      return self._absolute_path
+
+
+def FromTempFile(temp_file):
+  """Constructs a FileHandle pointing to a temporary file.
+
+  Returns:
+    A FileHandle referring to a named temporary file.
+  """
+  return FileHandle(temp_file)
+
+
+def FromFilePath(path):
+  """Constructs a FileHandle from an absolute file path.
+
+  Args:
+    path: A string giving the absolute path to a file.
+  Returns:
+    A FileHandle referring to the file at the specified path.
+  """
+  return FileHandle(None, os.path.abspath(path))
diff --git a/catapult/telemetry/telemetry/internal/util/file_handle_unittest.py b/catapult/telemetry/telemetry/internal/util/file_handle_unittest.py
new file mode 100644
index 0000000..28c4265
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/file_handle_unittest.py
@@ -0,0 +1,29 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tempfile
+import unittest
+
+from telemetry.internal.util import file_handle
+
+
+class FileHandleUnittest(unittest.TestCase):
+
+  def setUp(self):
+    self.temp_file_txt = tempfile.NamedTemporaryFile(
+        suffix='.txt', delete=False)
+    self.abs_path_html = tempfile.NamedTemporaryFile(
+        suffix='.html', delete=False).name
+
+  def tearDown(self):
+    os.remove(self.abs_path_html)
+
+  def testCreatingFileHandle(self):
+    fh1 = file_handle.FromTempFile(self.temp_file_txt)
+    self.assertEquals(fh1.extension, '.txt')
+
+    fh2 = file_handle.FromFilePath(self.abs_path_html)
+    self.assertEquals(fh2.extension, '.html')
+    self.assertNotEquals(fh1.id, fh2.id)
diff --git a/catapult/telemetry/telemetry/internal/util/global_hooks.py b/catapult/telemetry/telemetry/internal/util/global_hooks.py
new file mode 100644
index 0000000..d343575
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/global_hooks.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Hooks that apply globally to all scripts that import or use Telemetry."""
+import signal
+import sys
+
+from telemetry.internal.util import exception_formatter
+
+
+def InstallHooks():
+  InstallUnhandledExceptionFormatter()
+  InstallStackDumpOnSigusr1()
+  InstallTerminationHook()
+
+def InstallUnhandledExceptionFormatter():
+  """Print prettier exceptions that also contain the stack frame's locals."""
+  sys.excepthook = exception_formatter.PrintFormattedException
+
+
+def InstallStackDumpOnSigusr1():
+  """Catch SIGUSR1 and print a stack trace."""
+  # Windows doesn't define SIGUSR1.
+  if not hasattr(signal, 'SIGUSR1'):
+    return
+
+  def PrintDiagnostics(_, stack_frame):
+    exception_string = 'SIGUSR1 received, printed stack trace'
+    exception_formatter.PrintFormattedFrame(stack_frame, exception_string)
+  signal.signal(signal.SIGUSR1, PrintDiagnostics)
+
+
+def InstallTerminationHook():
+  """Catch SIGTERM, print a stack trace, and exit."""
+  def PrintStackAndExit(sig, stack_frame):
+    exception_string = 'Received signal %s, exiting' % sig
+    exception_formatter.PrintFormattedFrame(stack_frame, exception_string)
+    sys.exit(-1)
+  signal.signal(signal.SIGTERM, PrintStackAndExit)
diff --git a/catapult/telemetry/telemetry/internal/util/path.py b/catapult/telemetry/telemetry/internal/util/path.py
new file mode 100644
index 0000000..3f454c0
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/path.py
@@ -0,0 +1,62 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from telemetry.core import util
+from catapult_base import util as catapult_util  # pylint: disable=import-error
+
+# TODO(aiolos): Move these functions to catapult_base or here.
+GetBaseDir = util.GetBaseDir
+GetTelemetryDir = util.GetTelemetryDir
+GetUnittestDataDir = util.GetUnittestDataDir
+GetChromiumSrcDir = util.GetChromiumSrcDir
+GetBuildDirectories = util.GetBuildDirectories
+
+IsExecutable = catapult_util.IsExecutable
+
+
+def FindInstalledWindowsApplication(application_path):
+  """Search common Windows installation directories for an application.
+
+  Args:
+    application_path: Path to application relative from installation location.
+  Returns:
+    A string representing the full path, or None if not found.
+  """
+  search_paths = [os.getenv('PROGRAMFILES(X86)'),
+                  os.getenv('PROGRAMFILES'),
+                  os.getenv('LOCALAPPDATA')]
+  search_paths += os.getenv('PATH', '').split(os.pathsep)
+
+  for search_path in search_paths:
+    if not search_path:
+      continue
+    path = os.path.join(search_path, application_path)
+    if IsExecutable(path):
+      return path
+
+  return None
+
+
+def IsSubpath(subpath, superpath):
+  """Returns True iff subpath is or is in superpath."""
+  subpath = os.path.realpath(subpath)
+  superpath = os.path.realpath(superpath)
+
+  while len(subpath) >= len(superpath):
+    if subpath == superpath:
+      return True
+    subpath = os.path.split(subpath)[0]
+  return False
+
+
+def ListFiles(base_directory, should_include_dir=lambda _: True,
+              should_include_file=lambda _: True):
+  matching_files = []
+  for root, dirs, files in os.walk(base_directory):
+    dirs[:] = [dir_name for dir_name in dirs if should_include_dir(dir_name)]
+    matching_files += [os.path.join(root, file_name)
+                       for file_name in files if should_include_file(file_name)]
+  return sorted(matching_files)
diff --git a/catapult/telemetry/telemetry/internal/util/path_set.py b/catapult/telemetry/telemetry/internal/util/path_set.py
new file mode 100644
index 0000000..0b46783
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/path_set.py
@@ -0,0 +1,44 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import os
+
+
+class PathSet(collections.MutableSet):
+  """A set of paths.
+
+  All mutation methods can take both directories or individual files, but the
+  iterator yields the individual files. All paths are automatically normalized.
+  """
+  def __init__(self, iterable=None):
+    self._paths = set()
+    if iterable:
+      self |= iterable
+
+  def __contains__(self, path):
+    return os.path.realpath(path) in self._paths
+
+  def __iter__(self):
+    return iter(self._paths)
+
+  def __len__(self):
+    return len(self._paths)
+
+  def add(self, path):
+    path = os.path.realpath(path)
+    if os.path.isfile(path):
+      self._paths.add(path)
+    for root, _, files in os.walk(path):
+      for basename in files:
+        file_path = os.path.join(root, basename)
+        if os.path.isfile(file_path):
+          self._paths.add(file_path)
+
+  def discard(self, path):
+    path = os.path.realpath(path)
+    self._paths.discard(path)
+    for root, _, files in os.walk(path):
+      for basename in files:
+        self._paths.discard(os.path.join(root, basename))
diff --git a/catapult/telemetry/telemetry/internal/util/path_set_unittest.py b/catapult/telemetry/telemetry/internal/util/path_set_unittest.py
new file mode 100755
index 0000000..6cc0ec1
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/path_set_unittest.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.internal.util import path_set
+
+
+class PathSetTest(unittest.TestCase):
+  def testCreate(self):
+    ps = path_set.PathSet()
+    self.assertEqual(len(ps), 0)  # Check __len__.
+    self.assertFalse(__file__ in ps)
+    for _ in ps:  # Check __iter__.
+      self.fail('New set is not empty.')
+
+    ps = path_set.PathSet([__file__])
+    self.assertEqual(len(ps), 1)
+    self.assertTrue(__file__ in ps)
+    self.assertEqual(ps.pop(), os.path.realpath(__file__))
+
+  def testAdd(self):
+    ps = path_set.PathSet()
+    ps.add(__file__)
+    self.assertEqual(len(ps), 1)
+    self.assertTrue(__file__ in ps)
+    self.assertEqual(ps.pop(), os.path.realpath(__file__))
+
+  def testDiscard(self):
+    ps = path_set.PathSet([__file__])
+    ps.discard(__file__)
+    self.assertEqual(len(ps), 0)
+    self.assertFalse(__file__ in ps)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/telemetry/internal/util/path_unittest.py b/catapult/telemetry/telemetry/internal/util/path_unittest.py
new file mode 100644
index 0000000..17f87e4
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/path_unittest.py
@@ -0,0 +1,21 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+from telemetry import decorators
+from telemetry.internal.util import path
+
+
+class PathTest(unittest.TestCase):
+  def testIsExecutable(self):
+    self.assertFalse(path.IsExecutable('nonexistent_file'))
+    self.assertTrue(path.IsExecutable(sys.executable))
+
+  @decorators.Enabled('win')
+  def testFindInstalledWindowsApplication(self):
+    self.assertTrue(path.FindInstalledWindowsApplication(os.path.join(
+        'Internet Explorer', 'iexplore.exe')))
diff --git a/catapult/telemetry/telemetry/internal/util/ps_util.py b/catapult/telemetry/telemetry/internal/util/ps_util.py
new file mode 100644
index 0000000..8042e70
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/ps_util.py
@@ -0,0 +1,91 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import inspect
+import logging
+import os
+
+from collections import defaultdict
+
+
+def GetChildPids(processes, pid):
+  """Returns all child processes of |pid| from the given |processes| list.
+
+  Args:
+    processes: A tuple of (pid, ppid, state) as generated by ps.
+    pid: The pid for which to get children.
+
+  Returns:
+    A list of child pids.
+  """
+  child_dict = defaultdict(list)
+  for curr_pid, curr_ppid, state in processes:
+    if 'Z' in state:
+      continue  # Ignore zombie processes
+    child_dict[int(curr_ppid)].append(int(curr_pid))
+  queue = [pid]
+  child_ids = []
+  while queue:
+    parent = queue.pop()
+    if parent in child_dict:
+      children = child_dict[parent]
+      queue.extend(children)
+      child_ids.extend(children)
+  return child_ids
+
+
+def GetPsOutputWithPlatformBackend(platform_backend, columns, pid):
+  """Returns output of the 'ps' command as a list of lines.
+
+  Args:
+    platform_backend: The platform backend (LinuxBasedPlatformBackend or
+        PosixPlatformBackend).
+    columns: A list of require columns, e.g., ['pid', 'pss'].
+    pid: If not None, returns only the information of the process with the pid.
+  """
+  args = ['ps']
+  args.extend(['-p', str(pid)] if pid != None else ['-e'])
+  for c in columns:
+    args.extend(['-o', c + '='])
+  return platform_backend.RunCommand(args).splitlines()
+
+
+def EnableListingStrayProcessesUponExitHook():
+  def _ListAllSubprocesses():
+    try:
+      import psutil
+    except ImportError:
+      logging.warning(
+          'psutil is not installed on the system. Not listing possible '
+          'leaked processes. To install psutil, see: '
+          'https://pypi.python.org/pypi/psutil')
+      return
+    telemetry_pid = os.getpid()
+    parent = psutil.Process(telemetry_pid)
+    if hasattr(parent, 'children'):
+      children = parent.children(recursive=True)
+    else:  # Some old version of psutil use get_children instead children.
+      children = parent.get_children()
+    if children:
+      leak_processes_info = []
+      for p in children:
+        if inspect.ismethod(p.name):
+          name = p.name()
+        else:  # Process.name is a property in old versions of psutil.
+          name = p.name
+        process_info = '%s (%s)' % (name, p.pid)
+        try:
+          if inspect.ismethod(p.cmdline):
+            cmdline = p.cmdline()
+          else:
+            cmdline = p.cmdline
+          process_info += ' - %s' % cmdline
+        except Exception as e:
+          logging.warning(str(e))
+        leak_processes_info.append(process_info)
+      logging.error('Telemetry leaks these processes: %s',
+                    ', '.join(leak_processes_info))
+
+  atexit.register(_ListAllSubprocesses)
diff --git a/catapult/telemetry/telemetry/internal/util/webpagereplay.py b/catapult/telemetry/telemetry/internal/util/webpagereplay.py
new file mode 100644
index 0000000..355900b
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/webpagereplay.py
@@ -0,0 +1,328 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Start and stop Web Page Replay."""
+
+import atexit
+import logging
+import os
+import re
+import signal
+import subprocess
+import sys
+import tempfile
+import urllib
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal import forwarders
+
+_REPLAY_DIR = os.path.join(
+    util.GetTelemetryThirdPartyDir(), 'webpagereplay')
+
+
+class ReplayError(Exception):
+  """Catch-all exception for the module."""
+  pass
+
+
+class ReplayNotFoundError(ReplayError):
+  def __init__(self, label, path):
+    super(ReplayNotFoundError, self).__init__()
+    self.args = (label, path)
+
+  def __str__(self):
+    label, path = self.args
+    return 'Path does not exist for %s: %s' % (label, path)
+
+
+class ReplayNotStartedError(ReplayError):
+  pass
+
+
+class ReplayServer(object):
+  """Start and Stop Web Page Replay.
+
+  Web Page Replay is a proxy that can record and "replay" web pages with
+  simulated network characteristics -- without having to edit the pages
+  by hand. With WPR, tests can use "real" web content, and catch
+  performance issues that may result from introducing network delays and
+  bandwidth throttling.
+
+  Example:
+     with ReplayServer(archive_path):
+       self.NavigateToURL(start_url)
+       self.WaitUntil(...)
+  """
+
+  def __init__(self, archive_path, replay_host, http_port, https_port, dns_port,
+               replay_options):
+    """Initialize ReplayServer.
+
+    Args:
+      archive_path: a path to a specific WPR archive (required).
+      replay_host: the hostname to serve traffic.
+      http_port: an integer port on which to serve HTTP traffic. May be zero
+          to let the OS choose an available port.
+      https_port: an integer port on which to serve HTTPS traffic. May be zero
+          to let the OS choose an available port.
+      dns_port: an integer port on which to serve DNS traffic. May be zero
+          to let the OS choose an available port. If None DNS forwarding is
+          disabled.
+      replay_options: an iterable of options strings to forward to replay.py.
+    """
+    self.archive_path = archive_path
+    self._replay_host = replay_host
+    self._use_dns_server = dns_port is not None
+    self._started_ports = {}  # a dict such as {'http': 80, 'https': 443}
+
+    # A temporary path for storing stdout & stderr of the webpagereplay
+    # subprocess.
+    self._temp_log_file_path = None
+
+    replay_py = os.path.join(_REPLAY_DIR, 'replay.py')
+    self._cmd_line = self._GetCommandLine(
+        replay_py, self._replay_host, http_port, https_port, dns_port,
+        replay_options, archive_path)
+
+    if '--record' in replay_options:
+      self._CheckPath('archive directory', os.path.dirname(self.archive_path))
+    elif not os.path.exists(self.archive_path):
+      self._CheckPath('archive file', self.archive_path)
+    self._CheckPath('replay script', replay_py)
+
+    self.replay_process = None
+
+  @staticmethod
+  def _GetCommandLine(replay_py, host_ip, http_port, https_port, dns_port,
+                      replay_options, archive_path):
+    """Set WPR command-line options. Can be overridden if needed."""
+    cmd_line = [sys.executable, replay_py]
+    cmd_line.extend([
+        '--host=%s' % host_ip,
+        '--port=%s' % http_port,
+        '--ssl_port=%s' % https_port
+        ])
+    if dns_port is not None:
+      # Note that if --host is not '127.0.0.1', Replay will override the local
+      # DNS nameserver settings to point to the replay-started DNS server.
+      cmd_line.append('--dns_port=%s' % dns_port)
+    else:
+      cmd_line.append('--no-dns_forwarding')
+    cmd_line.extend([
+        '--use_closest_match',
+        '--log_level=warning'
+        ])
+    cmd_line.extend(replay_options)
+    cmd_line.append(archive_path)
+    return cmd_line
+
+  def _CheckPath(self, label, path):
+    if not os.path.exists(path):
+      raise ReplayNotFoundError(label, path)
+
+  def _OpenLogFile(self):
+    """Opens the log file for writing."""
+    log_dir = os.path.dirname(self._temp_log_file_path)
+    if not os.path.exists(log_dir):
+      os.makedirs(log_dir)
+    return open(self._temp_log_file_path, 'w')
+
+  def _LogLines(self):
+    """Yields the log lines."""
+    if not os.path.isfile(self._temp_log_file_path):
+      return
+    with open(self._temp_log_file_path) as f:
+      for line in f:
+        yield line
+
+  def _IsStarted(self):
+    """Returns true if the server is up and running."""
+    if self.replay_process.poll() is not None:
+      # The process terminated.
+      return False
+
+    def HasIncompleteStartedPorts():
+      return ('http' not in self._started_ports or
+              'https' not in self._started_ports or
+              (self._use_dns_server and 'dns' not in self._started_ports))
+    if HasIncompleteStartedPorts():
+      self._started_ports = self._ParseLogFilePorts(self._LogLines())
+    if HasIncompleteStartedPorts():
+      return False
+    try:
+      # HTTPS may require SNI (which urllib does not speak), so only check
+      # that HTTP responds.
+      return 200 == self._UrlOpen('web-page-replay-generate-200').getcode()
+    except IOError:
+      return False
+
+  @staticmethod
+  def _ParseLogFilePorts(log_lines):
+    """Returns the ports on which replay listens as reported in its log file.
+
+    Only matches HTTP, HTTPS, and DNS. One call may return only some
+    of the ports depending on what has been written to the log file.
+
+    Example log lines:
+        2014-09-03 17:04:27,978 WARNING HTTP server started on 127.0.0.1:51673
+        2014-09-03 17:04:27,978 WARNING HTTPS server started on 127.0.0.1:35270
+
+    Returns:
+      a dict with ports available in log_lines. For example,
+         {}  # no ports found
+         {'http': 1234, 'https': 2345, 'dns': 3456}
+    """
+    ports = {}
+    port_re = re.compile(
+        r'.*?(?P<protocol>HTTP|HTTPS|DNS)'
+        r' server started on '
+        r'(?P<host>[^:]*):'
+        r'(?P<port>\d+)')
+    for line in log_lines:
+      m = port_re.match(line.strip())
+      if m:
+        protocol = m.group('protocol').lower()
+        ports[protocol] = int(m.group('port'))
+    return ports
+
+  def StartServer(self):
+    """Start Web Page Replay and verify that it started.
+
+    Returns:
+      A forwarders.PortSet(http, https, dns) tuple; with dns None if unused.
+    Raises:
+      ReplayNotStartedError: if Replay start-up fails.
+    """
+    is_posix = sys.platform.startswith('linux') or sys.platform == 'darwin'
+    logging.debug('Starting Web-Page-Replay: %s', self._cmd_line)
+    self._CreateTempLogFilePath()
+    with self._OpenLogFile() as log_fh:
+      self.replay_process = subprocess.Popen(
+          self._cmd_line, stdout=log_fh, stderr=subprocess.STDOUT,
+          preexec_fn=(_ResetInterruptHandler if is_posix else None))
+    try:
+      util.WaitFor(self._IsStarted, 30)
+      atexit.register(self.StopServer)
+      return forwarders.PortSet(
+          self._started_ports['http'],
+          self._started_ports['https'],
+          self._started_ports.get('dns'),  # None if unused
+          )
+    except exceptions.TimeoutException:
+      raise ReplayNotStartedError(
+          'Web Page Replay failed to start. Log output:\n%s' %
+          ''.join(self._LogLines()))
+
+  def StopServer(self):
+    """Stop Web Page Replay."""
+    if self._IsStarted():
+      try:
+        self._StopReplayProcess()
+      finally:
+        # TODO(rnephew): Upload logs to google storage. crbug.com/525787
+        self._CleanUpTempLogFilePath()
+    else:
+      logging.warning('Attempting to stop WPR server that is not running.')
+
+  def _StopReplayProcess(self):
+    if not self.replay_process:
+      return
+
+    logging.debug('Trying to stop Web-Page-Replay gracefully')
+    try:
+      if self._started_ports:
+        self._UrlOpen('web-page-replay-command-exit').close()
+    except IOError:
+      # IOError is possible because the server might exit without response.
+      pass
+
+    try:
+      util.WaitFor(lambda: self.replay_process.poll() is not None, 10)
+    except exceptions.TimeoutException:
+      try:
+        # Use a SIGINT so that it can do graceful cleanup.
+        self.replay_process.send_signal(signal.SIGINT)
+      except:  # pylint: disable=bare-except
+        # On Windows, we are left with no other option than terminate().
+        is_primary_nameserver_changed_by_replay = (
+            self._use_dns_server and self._replay_host == '127.0.0.1')
+        if is_primary_nameserver_changed_by_replay:
+          # Replay changes the DNS nameserver configuration so that DNS
+          # requests are resolved by replay's own DNS server. It resolves
+          # all DNS requests to it own IP address to it can server the
+          # HTTP and HTTPS requests.
+          # If the replay host is not '127.0.0.1', then replay skips the
+          # nameserver change because it assumes a different mechanism
+          # will be used to route DNS requests to replay's DNS server.
+          logging.warning(
+              'Unable to stop Web-Page-Replay gracefully.\n'
+              'Replay changed the DNS nameserver configuration to make replay '
+              'the primary nameserver. That might not be restored!')
+        try:
+          self.replay_process.terminate()
+        except:  # pylint: disable=bare-except
+          pass
+      self.replay_process.wait()
+
+  def _CreateTempLogFilePath(self):
+    assert self._temp_log_file_path is None
+    handle, self._temp_log_file_path = tempfile.mkstemp()
+    os.close(handle)
+
+  def _CleanUpTempLogFilePath(self):
+    assert self._temp_log_file_path
+    if logging.getLogger('').isEnabledFor(logging.INFO):
+      with open(self._temp_log_file_path, 'r') as f:
+        wpr_log_content = '\n'.join([
+            '************************** WPR LOG *****************************',
+            f.read(),
+            '************************** END OF WPR LOG **********************'])
+      logging.debug(wpr_log_content)
+    os.remove(self._temp_log_file_path)
+    self._temp_log_file_path = None
+
+  def __enter__(self):
+    """Add support for with-statement."""
+    self.StartServer()
+    return self
+
+  def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
+    """Add support for with-statement."""
+    self.StopServer()
+
+  def _UrlOpen(self, url_path, protocol='http'):
+    """Open a Replay URL.
+
+    For matching requests in the archive, Replay relies on the "Host:" header.
+    For Replay command URLs, the "Host:" header is not needed.
+
+    Args:
+      url_path: WPR server request path.
+      protocol: 'http' or 'https'
+    Returns:
+      a file-like object from urllib.urlopen
+    """
+    url = '%s://%s:%s/%s' % (
+        protocol, self._replay_host, self._started_ports[protocol], url_path)
+    return urllib.urlopen(url, proxies={})
+
+def _ResetInterruptHandler():
+  """Reset the interrupt handler back to the default.
+
+  The replay process is stopped gracefully by making an HTTP request
+  ('web-page-replay-command-exit'). The graceful exit is important for
+  restoring the DNS configuration. If the HTTP request fails, the fallback
+  is to send SIGINT to the process.
+
+  On posix system, running this function before starting replay fixes a
+  bug that shows up when Telemetry is run as a background command from a
+  script. https://crbug.com/254572.
+
+  Background: Signal masks on Linux are inherited from parent
+  processes. If anything invoking us accidentally masks SIGINT
+  (e.g. by putting a process in the background from a shell script),
+  sending a SIGINT to the child will fail to terminate it.
+  """
+  signal.signal(signal.SIGINT, signal.SIG_DFL)
diff --git a/catapult/telemetry/telemetry/internal/util/webpagereplay_unittest.py b/catapult/telemetry/telemetry/internal/util/webpagereplay_unittest.py
new file mode 100644
index 0000000..518e0d2
--- /dev/null
+++ b/catapult/telemetry/telemetry/internal/util/webpagereplay_unittest.py
@@ -0,0 +1,68 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import unittest
+
+from telemetry.internal.util import webpagereplay
+
+
+# pylint: disable=protected-access
+class CreateCommandTest(unittest.TestCase):
+  def testHasDnsGivesDnsPort(self):
+    expected_cmd_line = [
+        sys.executable, 'replay.py', '--host=127.0.0.1',
+        '--port=2', '--ssl_port=1', '--dns_port=0',
+        '--use_closest_match', '--log_level=warning', '--extra_arg', 'foo.wpr']
+    cmd_line = webpagereplay.ReplayServer._GetCommandLine(
+        'replay.py', '127.0.0.1', 2, 1, 0, ['--extra_arg'], 'foo.wpr')
+    self.assertEqual(expected_cmd_line, cmd_line)
+
+  def testNoDnsGivesNoDnsForwarding(self):
+    expected_cmd_line = [
+        sys.executable, 'replay.py', '--host=127.0.0.1',
+        '--port=8080', '--ssl_port=8443', '--no-dns_forwarding',
+        '--use_closest_match', '--log_level=warning', 'bar.wpr']
+    cmd_line = webpagereplay.ReplayServer._GetCommandLine(
+        'replay.py', '127.0.0.1', 8080, 8443, None, [], 'bar.wpr')
+    self.assertEqual(expected_cmd_line, cmd_line)
+
+
+# pylint: disable=protected-access
+class ParseLogFilePortsTest(unittest.TestCase):
+  def testEmptyLinesGivesEmptyDict(self):
+    log_lines = iter([])
+    self.assertEqual(
+      {},
+      webpagereplay.ReplayServer._ParseLogFilePorts(log_lines))
+
+  def testSingleMatchGivesSingleElementDict(self):
+    log_lines = iter([
+        'extra stuff',
+        '2014-09-27 17:04:27,11 WARNING HTTP server started on 127.0.0.1:5167',
+        'extra stuff',
+        ])
+    self.assertEqual(
+        {'http': 5167},
+        webpagereplay.ReplayServer._ParseLogFilePorts(log_lines))
+
+  def testUnknownProtocolSkipped(self):
+    log_lines = iter([
+        '2014-09-27 17:04:27,11 WARNING FOO server started on 127.0.0.1:1111',
+        '2014-09-27 17:04:27,12 WARNING HTTP server started on 127.0.0.1:5167',
+        ])
+    self.assertEqual(
+        {'http': 5167},
+        webpagereplay.ReplayServer._ParseLogFilePorts(log_lines))
+
+  def testTypicalLogLinesGiveFullDict(self):
+    log_lines = iter([
+        'extra',
+        '2014-09-27 17:04:27,11 WARNING DNS server started on 127.0.0.1:2345',
+        '2014-09-27 17:04:27,12 WARNING HTTP server started on 127.0.0.1:3456',
+        '2014-09-27 17:04:27,13 WARNING HTTPS server started on 127.0.0.1:4567',
+        ])
+    self.assertEqual(
+        {'dns': 2345, 'http': 3456, 'https': 4567},
+        webpagereplay.ReplayServer._ParseLogFilePorts(log_lines))
diff --git a/catapult/telemetry/telemetry/page/__init__.py b/catapult/telemetry/telemetry/page/__init__.py
new file mode 100644
index 0000000..b814086
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/__init__.py
@@ -0,0 +1,208 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import inspect
+import logging
+import os
+import urlparse
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry import story
+from telemetry.page import shared_page_state
+from telemetry.page import action_runner as action_runner_module
+
+
+class Page(story.Story):
+
+  def __init__(self, url, page_set=None, base_dir=None, name='',
+               credentials_path=None,
+               credentials_bucket=cloud_storage.PUBLIC_BUCKET, labels=None,
+               startup_url='', make_javascript_deterministic=True,
+               shared_page_state_class=shared_page_state.SharedPageState):
+    self._url = url
+
+    super(Page, self).__init__(
+        shared_page_state_class, name=name, labels=labels,
+        is_local=self._scheme in ['file', 'chrome', 'about'],
+        make_javascript_deterministic=make_javascript_deterministic)
+
+    self._page_set = page_set
+    # Default value of base_dir is the directory of the file that defines the
+    # class of this page instance.
+    if base_dir is None:
+      base_dir = os.path.dirname(inspect.getfile(self.__class__))
+    self._base_dir = base_dir
+    self._name = name
+    if credentials_path:
+      credentials_path = os.path.join(self._base_dir, credentials_path)
+      cloud_storage.GetIfChanged(credentials_path, credentials_bucket)
+      if not os.path.exists(credentials_path):
+        logging.error('Invalid credentials path: %s' % credentials_path)
+        credentials_path = None
+    self._credentials_path = credentials_path
+
+    # Whether to collect garbage on the page before navigating & performing
+    # page actions.
+    self._collect_garbage_before_run = True
+
+    # These attributes can be set dynamically by the page.
+    self.synthetic_delays = dict()
+    self._startup_url = startup_url
+    self.credentials = None
+    self.skip_waits = False
+    self.script_to_evaluate_on_commit = None
+    self._SchemeErrorCheck()
+
+  @property
+  def credentials_path(self):
+    return self._credentials_path
+
+  @property
+  def startup_url(self):
+    return self._startup_url
+
+  def _SchemeErrorCheck(self):
+    if not self._scheme:
+      raise ValueError('Must prepend the URL with scheme (e.g. file://)')
+
+    if self.startup_url:
+      startup_url_scheme = urlparse.urlparse(self.startup_url).scheme
+      if not startup_url_scheme:
+        raise ValueError('Must prepend the URL with scheme (e.g. http://)')
+      if startup_url_scheme == 'file':
+        raise ValueError('startup_url with local file scheme is not supported')
+
+  def Run(self, shared_state):
+    current_tab = shared_state.current_tab
+    # Collect garbage from previous run several times to make the results more
+    # stable if needed.
+    if self._collect_garbage_before_run:
+      for _ in xrange(0, 5):
+        current_tab.CollectGarbage()
+    shared_state.page_test.WillNavigateToPage(self, current_tab)
+    shared_state.page_test.RunNavigateSteps(self, current_tab)
+    shared_state.page_test.DidNavigateToPage(self, current_tab)
+    action_runner = action_runner_module.ActionRunner(
+        current_tab, skip_waits=self.skip_waits)
+    self.RunPageInteractions(action_runner)
+
+  def RunNavigateSteps(self, action_runner):
+    url = self.file_path_url_with_scheme if self.is_file else self.url
+    action_runner.Navigate(
+        url, script_to_evaluate_on_commit=self.script_to_evaluate_on_commit)
+
+  def RunPageInteractions(self, action_runner):
+    """Override this to define custom interactions with the page.
+    e.g:
+      def RunPageInteractions(self, action_runner):
+        action_runner.ScrollPage()
+        action_runner.TapElement(text='Next')
+    """
+    pass
+
+  def AsDict(self):
+    """Converts a page object to a dict suitable for JSON output."""
+    d = {
+        'id': self._id,
+        'url': self._url,
+    }
+    if self._name:
+      d['name'] = self._name
+    return d
+
+  @property
+  def story_set(self):
+    return self._page_set
+
+  # TODO(nednguyen, aiolos): deprecate this property.
+  @property
+  def page_set(self):
+    return self._page_set
+
+  @property
+  def url(self):
+    return self._url
+
+  def GetSyntheticDelayCategories(self):
+    result = []
+    for delay, options in self.synthetic_delays.items():
+      options = '%f;%s' % (options.get('target_duration', 0),
+                           options.get('mode', 'static'))
+      result.append('DELAY(%s;%s)' % (delay, options))
+    return result
+
+  def __lt__(self, other):
+    return self.url < other.url
+
+  def __cmp__(self, other):
+    x = cmp(self.name, other.name)
+    if x != 0:
+      return x
+    return cmp(self.url, other.url)
+
+  def __str__(self):
+    return self.url
+
+  def AddCustomizeBrowserOptions(self, options):
+    """ Inherit page overrides this to add customized browser options."""
+    pass
+
+  @property
+  def _scheme(self):
+    return urlparse.urlparse(self.url).scheme
+
+  @property
+  def is_file(self):
+    """Returns True iff this URL points to a file."""
+    return self._scheme == 'file'
+
+  @property
+  def file_path(self):
+    """Returns the path of the file, stripping the scheme and query string."""
+    assert self.is_file
+    # Because ? is a valid character in a filename,
+    # we have to treat the URL as a non-file by removing the scheme.
+    parsed_url = urlparse.urlparse(self.url[7:])
+    return os.path.normpath(os.path.join(
+        self._base_dir, parsed_url.netloc + parsed_url.path))
+
+  @property
+  def base_dir(self):
+    return self._base_dir
+
+  @property
+  def file_path_url(self):
+    """Returns the file path, including the params, query, and fragment."""
+    assert self.is_file
+    file_path_url = os.path.normpath(
+        os.path.join(self._base_dir, self.url[7:]))
+    # Preserve trailing slash or backslash.
+    # It doesn't matter in a file path, but it does matter in a URL.
+    if self.url.endswith('/'):
+      file_path_url += os.sep
+    return file_path_url
+
+  @property
+  def file_path_url_with_scheme(self):
+    return 'file://' + self.file_path_url
+
+  @property
+  def serving_dir(self):
+    if not self.is_file:
+      return None
+    file_path = os.path.realpath(self.file_path)
+    if os.path.isdir(file_path):
+      return file_path
+    else:
+      return os.path.dirname(file_path)
+
+  @property
+  def display_name(self):
+    if self.name:
+      return self.name
+    if not self.is_file:
+      return self.url
+    all_urls = [p.url.rstrip('/') for p in self.page_set if p.is_file]
+    common_prefix = os.path.dirname(os.path.commonprefix(all_urls))
+    return self.url[len(common_prefix):].strip('/')
diff --git a/catapult/telemetry/telemetry/page/action_runner.py b/catapult/telemetry/telemetry/page/action_runner.py
new file mode 100644
index 0000000..aa11bdd
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/action_runner.py
@@ -0,0 +1,726 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import urlparse
+
+from telemetry.internal.actions.drag import DragAction
+from telemetry.internal.actions.javascript_click import ClickElementAction
+from telemetry.internal.actions.load_media import LoadMediaAction
+from telemetry.internal.actions.loop import LoopAction
+from telemetry.internal.actions.mouse_click import MouseClickAction
+from telemetry.internal.actions.navigate import NavigateAction
+from telemetry.internal.actions.page_action import GESTURE_SOURCE_DEFAULT
+from telemetry.internal.actions.page_action import SUPPORTED_GESTURE_SOURCES
+from telemetry.internal.actions.pinch import PinchAction
+from telemetry.internal.actions.play import PlayAction
+from telemetry.internal.actions.repaint_continuously import (
+    RepaintContinuouslyAction)
+from telemetry.internal.actions.repeatable_scroll import RepeatableScrollAction
+from telemetry.internal.actions.scroll import ScrollAction
+from telemetry.internal.actions.scroll_bounce import ScrollBounceAction
+from telemetry.internal.actions.seek import SeekAction
+from telemetry.internal.actions.swipe import SwipeAction
+from telemetry.internal.actions.tap import TapAction
+from telemetry.internal.actions.wait import WaitForElementAction
+from telemetry.web_perf import timeline_interaction_record
+
+
+class ActionRunner(object):
+
+  def __init__(self, tab, skip_waits=False):
+    self._tab = tab
+    self._skip_waits = skip_waits
+
+  @property
+  def tab(self):
+    """Returns the tab on which actions are performed."""
+    return self._tab
+
+  def _RunAction(self, action):
+    action.WillRunAction(self._tab)
+    action.RunAction(self._tab)
+
+  def CreateInteraction(self, label, repeatable=False):
+    """ Create an action.Interaction object that issues interaction record.
+
+    An interaction record is a labeled time period containing
+    interaction that developers care about. Each set of metrics
+    specified in flags will be calculated for this time period.
+
+    To mark the start of interaction record, call Begin() method on the returned
+    object. To mark the finish of interaction record, call End() method on
+    it. Or better yet, use the with statement to create an
+    interaction record that covers the actions in the with block.
+
+    e.g:
+      with action_runner.CreateInteraction('Animation-1'):
+        action_runner.TapElement(...)
+        action_runner.WaitForJavaScriptCondition(...)
+
+    Args:
+      label: A label for this particular interaction. This can be any
+          user-defined string, but must not contain '/'.
+      repeatable: Whether other interactions may use the same logical name
+          as this interaction. All interactions with the same logical name must
+          have the same flags.
+
+    Returns:
+      An instance of action_runner.Interaction
+    """
+    flags = []
+    if repeatable:
+      flags.append(timeline_interaction_record.REPEATABLE)
+
+    return Interaction(self._tab, label, flags)
+
+  def CreateGestureInteraction(self, label, repeatable=False):
+    """ Create an action.Interaction object that issues gesture-based
+    interaction record.
+
+    This is similar to normal interaction record, but it will
+    auto-narrow the interaction time period to only include the
+    synthetic gesture event output by Chrome. This is typically use to
+    reduce noise in gesture-based analysis (e.g., analysis for a
+    swipe/scroll).
+
+    The interaction record label will be prepended with 'Gesture_'.
+
+    e.g:
+      with action_runner.CreateGestureInteraction('Scroll-1'):
+        action_runner.ScrollPage()
+
+    Args:
+      label: A label for this particular interaction. This can be any
+          user-defined string, but must not contain '/'.
+      repeatable: Whether other interactions may use the same logical name
+          as this interaction. All interactions with the same logical name must
+          have the same flags.
+
+    Returns:
+      An instance of action_runner.Interaction
+    """
+    return self.CreateInteraction('Gesture_' + label, repeatable)
+
+  def Navigate(self, url, script_to_evaluate_on_commit=None,
+               timeout_in_seconds=60):
+    """Navigates to |url|.
+
+    If |script_to_evaluate_on_commit| is given, the script source string will be
+    evaluated when the navigation is committed. This is after the context of
+    the page exists, but before any script on the page itself has executed.
+    """
+    if urlparse.urlparse(url).scheme == 'file':
+      url = self._tab.browser.platform.http_server.UrlOf(url[7:])
+
+    self._RunAction(NavigateAction(
+        url=url,
+        script_to_evaluate_on_commit=script_to_evaluate_on_commit,
+        timeout_in_seconds=timeout_in_seconds))
+
+  def WaitForNavigate(self, timeout_in_seconds_seconds=60):
+    start_time = time.time()
+    self._tab.WaitForNavigate(timeout_in_seconds_seconds)
+
+    time_left_in_seconds = (start_time + timeout_in_seconds_seconds
+                            - time.time())
+    time_left_in_seconds = max(0, time_left_in_seconds)
+    self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter(
+        time_left_in_seconds)
+
+  def ReloadPage(self):
+    """Reloads the page."""
+    self._tab.ExecuteJavaScript('window.location.reload()')
+    self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+
+  def ExecuteJavaScript(self, statement):
+    """Executes a given JavaScript expression. Does not return the result.
+
+    Example: runner.ExecuteJavaScript('var foo = 1;');
+
+    Args:
+      statement: The statement to execute (provided as string).
+
+    Raises:
+      EvaluationException: The statement failed to execute.
+    """
+    self._tab.ExecuteJavaScript(statement)
+
+  def EvaluateJavaScript(self, expression):
+    """Returns the evaluation result of the given JavaScript expression.
+
+    The evaluation results must be convertible to JSON. If the result
+    is not needed, use ExecuteJavaScript instead.
+
+    Example: num = runner.EvaluateJavaScript('document.location.href')
+
+    Args:
+      expression: The expression to evaluate (provided as string).
+
+    Raises:
+      EvaluationException: The statement expression failed to execute
+          or the evaluation result can not be JSON-ized.
+    """
+    return self._tab.EvaluateJavaScript(expression)
+
+  def Wait(self, seconds):
+    """Wait for the number of seconds specified.
+
+    Args:
+      seconds: The number of seconds to wait.
+    """
+    if not self._skip_waits:
+      time.sleep(seconds)
+
+  def WaitForJavaScriptCondition(self, condition, timeout_in_seconds=60):
+    """Wait for a JavaScript condition to become true.
+
+    Example: runner.WaitForJavaScriptCondition('window.foo == 10');
+
+    Args:
+      condition: The JavaScript condition (as string).
+      timeout_in_seconds: The timeout in seconds (default to 60).
+    """
+    self._tab.WaitForJavaScriptExpression(condition, timeout_in_seconds)
+
+  def WaitForElement(self, selector=None, text=None, element_function=None,
+                     timeout_in_seconds=60):
+    """Wait for an element to appear in the document.
+
+    The element may be selected via selector, text, or element_function.
+    Only one of these arguments must be specified.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          '(function() { return foo.element; })()'.
+      timeout_in_seconds: The timeout in seconds (default to 60).
+    """
+    self._RunAction(WaitForElementAction(
+        selector=selector, text=text, element_function=element_function,
+        timeout_in_seconds=timeout_in_seconds))
+
+  def TapElement(self, selector=None, text=None, element_function=None):
+    """Tap an element.
+
+    The element may be selected via selector, text, or element_function.
+    Only one of these arguments must be specified.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          '(function() { return foo.element; })()'.
+    """
+    self._RunAction(TapAction(
+        selector=selector, text=text, element_function=element_function))
+
+  def ClickElement(self, selector=None, text=None, element_function=None):
+    """Click an element.
+
+    The element may be selected via selector, text, or element_function.
+    Only one of these arguments must be specified.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          '(function() { return foo.element; })()'.
+    """
+    self._RunAction(ClickElementAction(
+        selector=selector, text=text, element_function=element_function))
+
+  def DragPage(self, left_start_ratio, top_start_ratio, left_end_ratio,
+               top_end_ratio, speed_in_pixels_per_second=800, use_touch=False,
+               selector=None, text=None, element_function=None):
+    """Perform a drag gesture on the page.
+
+    You should specify a start and an end point in ratios of page width and
+    height (see drag.js for full implementation).
+
+    Args:
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      left_end_ratio: The horizontal ending coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_end_ratio: The vertical ending coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+      use_touch: Whether dragging should be done with touch input.
+    """
+    self._RunAction(DragAction(
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        left_end_ratio=left_end_ratio, top_end_ratio=top_end_ratio,
+        speed_in_pixels_per_second=speed_in_pixels_per_second,
+        use_touch=use_touch, selector=selector, text=text,
+        element_function=element_function))
+
+  def PinchPage(self, left_anchor_ratio=0.5, top_anchor_ratio=0.5,
+                scale_factor=None, speed_in_pixels_per_second=800):
+    """Perform the pinch gesture on the page.
+
+    It computes the pinch gesture automatically based on the anchor
+    coordinate and the scale factor. The scale factor is the ratio of
+    of the final span and the initial span of the gesture.
+
+    Args:
+      left_anchor_ratio: The horizontal pinch anchor coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_anchor_ratio: The vertical pinch anchor coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      scale_factor: The ratio of the final span to the initial span.
+          The default scale factor is
+          3.0 / (window.outerWidth/window.innerWidth).
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(PinchAction(
+        left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
+        scale_factor=scale_factor,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def PinchElement(self, selector=None, text=None, element_function=None,
+                   left_anchor_ratio=0.5, top_anchor_ratio=0.5,
+                   scale_factor=None, speed_in_pixels_per_second=800):
+    """Perform the pinch gesture on an element.
+
+    It computes the pinch gesture automatically based on the anchor
+    coordinate and the scale factor. The scale factor is the ratio of
+    of the final span and the initial span of the gesture.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          'function() { return foo.element; }'.
+      left_anchor_ratio: The horizontal pinch anchor coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      top_anchor_ratio: The vertical pinch anchor coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      scale_factor: The ratio of the final span to the initial span.
+          The default scale factor is
+          3.0 / (window.outerWidth/window.innerWidth).
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(PinchAction(
+        selector=selector, text=text, element_function=element_function,
+        left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
+        scale_factor=scale_factor,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def ScrollPage(self, left_start_ratio=0.5, top_start_ratio=0.5,
+                 direction='down', distance=None, distance_expr=None,
+                 speed_in_pixels_per_second=800, use_touch=False,
+                 synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
+    """Perform scroll gesture on the page.
+
+    You may specify distance or distance_expr, but not both. If
+    neither is specified, the default scroll distance is variable
+    depending on direction (see scroll.js for full implementation).
+
+    Args:
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      direction: The direction of scroll, either 'left', 'right',
+          'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
+      distance: The distance to scroll (in pixel).
+      distance_expr: A JavaScript expression (as string) that can be
+          evaluated to compute scroll distance. Example:
+          'window.scrollTop' or '(function() { return crazyMath(); })()'.
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+      use_touch: Whether scrolling should be done with touch input.
+      synthetic_gesture_source: the source input device type for the
+          synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
+    """
+    assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
+    self._RunAction(ScrollAction(
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance, distance_expr=distance_expr,
+        speed_in_pixels_per_second=speed_in_pixels_per_second,
+        use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
+
+  def RepeatableBrowserDrivenScroll(self, x_scroll_distance_ratio=0.0,
+                                    y_scroll_distance_ratio=0.5,
+                                    repeat_count=0,
+                                    repeat_delay_ms=250,
+                                    timeout=60):
+    """Perform a browser driven repeatable scroll gesture.
+
+    The scroll gesture is driven from the browser, this is useful because the
+    main thread often isn't resposive but the browser process usually is, so the
+    delay between the scroll gestures should be consistent.
+
+    Args:
+      x_scroll_distance_ratio: The horizontal length of the scroll as a fraction
+          of the screen width.
+      y_scroll_distance_ratio: The vertical length of the scroll as a fraction
+          of the screen height.
+      repeat_count: The number of additional times to repeat the gesture.
+      repeat_delay_ms: The delay in milliseconds between each scroll gesture.
+    """
+    self._RunAction(RepeatableScrollAction(
+        x_scroll_distance_ratio=x_scroll_distance_ratio,
+        y_scroll_distance_ratio=y_scroll_distance_ratio,
+        repeat_count=repeat_count,
+        repeat_delay_ms=repeat_delay_ms, timeout=timeout))
+
+  def ScrollElement(self, selector=None, text=None, element_function=None,
+                    left_start_ratio=0.5, top_start_ratio=0.5,
+                    direction='down', distance=None, distance_expr=None,
+                    speed_in_pixels_per_second=800, use_touch=False,
+                    synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
+    """Perform scroll gesture on the element.
+
+    The element may be selected via selector, text, or element_function.
+    Only one of these arguments must be specified.
+
+    You may specify distance or distance_expr, but not both. If
+    neither is specified, the default scroll distance is variable
+    depending on direction (see scroll.js for full implementation).
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          'function() { return foo.element; }'.
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      direction: The direction of scroll, either 'left', 'right',
+          'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
+      distance: The distance to scroll (in pixel).
+      distance_expr: A JavaScript expression (as string) that can be
+          evaluated to compute scroll distance. Example:
+          'window.scrollTop' or '(function() { return crazyMath(); })()'.
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+      use_touch: Whether scrolling should be done with touch input.
+      synthetic_gesture_source: the source input device type for the
+          synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
+    """
+    assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
+    self._RunAction(ScrollAction(
+        selector=selector, text=text, element_function=element_function,
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance, distance_expr=distance_expr,
+        speed_in_pixels_per_second=speed_in_pixels_per_second,
+        use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
+
+  def ScrollBouncePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
+                       direction='down', distance=100,
+                       overscroll=10, repeat_count=10,
+                       speed_in_pixels_per_second=400):
+    """Perform scroll bounce gesture on the page.
+
+    This gesture scrolls the page by the number of pixels specified in
+    distance, in the given direction, followed by a scroll by
+    (distance + overscroll) pixels in the opposite direction.
+    The above gesture is repeated repeat_count times.
+
+    Args:
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      direction: The direction of scroll, either 'left', 'right',
+          'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
+      distance: The distance to scroll (in pixel).
+      overscroll: The number of additional pixels to scroll back, in
+          addition to the givendistance.
+      repeat_count: How often we want to repeat the full gesture.
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(ScrollBounceAction(
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance,
+        overscroll=overscroll, repeat_count=repeat_count,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def ScrollBounceElement(
+      self, selector=None, text=None, element_function=None,
+      left_start_ratio=0.5, top_start_ratio=0.5,
+      direction='down', distance=100,
+      overscroll=10, repeat_count=10,
+      speed_in_pixels_per_second=400):
+    """Perform scroll bounce gesture on the element.
+
+    This gesture scrolls on the element by the number of pixels specified in
+    distance, in the given direction, followed by a scroll by
+    (distance + overscroll) pixels in the opposite direction.
+    The above gesture is repeated repeat_count times.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          'function() { return foo.element; }'.
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      direction: The direction of scroll, either 'left', 'right',
+          'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
+      distance: The distance to scroll (in pixel).
+      overscroll: The number of additional pixels to scroll back, in
+          addition to the given distance.
+      repeat_count: How often we want to repeat the full gesture.
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(ScrollBounceAction(
+        selector=selector, text=text, element_function=element_function,
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance,
+        overscroll=overscroll, repeat_count=repeat_count,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def MouseClick(self, selector=None):
+    """Mouse click the given element.
+
+    Args:
+      selector: A CSS selector describing the element.
+    """
+    self._RunAction(MouseClickAction(selector=selector))
+
+  def SwipePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
+                direction='left', distance=100, speed_in_pixels_per_second=800):
+    """Perform swipe gesture on the page.
+
+    Args:
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          document.body.
+      direction: The direction of swipe, either 'left', 'right',
+          'up', or 'down'
+      distance: The distance to swipe (in pixel).
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(SwipeAction(
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def SwipeElement(self, selector=None, text=None, element_function=None,
+                   left_start_ratio=0.5, top_start_ratio=0.5,
+                   direction='left', distance=100,
+                   speed_in_pixels_per_second=800):
+    """Perform swipe gesture on the element.
+
+    The element may be selected via selector, text, or element_function.
+    Only one of these arguments must be specified.
+
+    Args:
+      selector: A CSS selector describing the element.
+      text: The element must contains this exact text.
+      element_function: A JavaScript function (as string) that is used
+          to retrieve the element. For example:
+          'function() { return foo.element; }'.
+      left_start_ratio: The horizontal starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      top_start_ratio: The vertical starting coordinate of the
+          gesture, as a ratio of the visible bounding rectangle for
+          the element.
+      direction: The direction of swipe, either 'left', 'right',
+          'up', or 'down'
+      distance: The distance to swipe (in pixel).
+      speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
+    """
+    self._RunAction(SwipeAction(
+        selector=selector, text=text, element_function=element_function,
+        left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
+        direction=direction, distance=distance,
+        speed_in_pixels_per_second=speed_in_pixels_per_second))
+
+  def LoadMedia(self, selector=None, event_timeout_in_seconds=0,
+                event_to_await='canplaythrough'):
+    """Invokes load() on media elements and awaits an event.
+
+    Args:
+      selector: A CSS selector describing the element. If none is
+          specified, play the first media element on the page. If the
+          selector matches more than 1 media element, all of them will
+          be played.
+      event_timeout_in_seconds: Maximum waiting time for the event to be fired.
+          0 means do not wait.
+      event_to_await: Which event to await. For example: 'canplaythrough' or
+          'loadedmetadata'.
+
+    Raises:
+      TimeoutException: If the maximum waiting time is exceeded.
+    """
+    self._RunAction(LoadMediaAction(
+        selector=selector, timeout_in_seconds=event_timeout_in_seconds,
+        event_to_await=event_to_await))
+
+  def PlayMedia(self, selector=None,
+                playing_event_timeout_in_seconds=0,
+                ended_event_timeout_in_seconds=0):
+    """Invokes the "play" action on media elements (such as video).
+
+    Args:
+      selector: A CSS selector describing the element. If none is
+          specified, play the first media element on the page. If the
+          selector matches more than 1 media element, all of them will
+          be played.
+      playing_event_timeout_in_seconds: Maximum waiting time for the "playing"
+          event (dispatched when the media begins to play) to be fired.
+          0 means do not wait.
+      ended_event_timeout_in_seconds: Maximum waiting time for the "ended"
+          event (dispatched when playback completes) to be fired.
+          0 means do not wait.
+
+    Raises:
+      TimeoutException: If the maximum waiting time is exceeded.
+    """
+    self._RunAction(PlayAction(
+        selector=selector,
+        playing_event_timeout_in_seconds=playing_event_timeout_in_seconds,
+        ended_event_timeout_in_seconds=ended_event_timeout_in_seconds))
+
+  def SeekMedia(self, seconds, selector=None, timeout_in_seconds=0,
+                log_time=True, label=''):
+    """Performs a seek action on media elements (such as video).
+
+    Args:
+      seconds: The media time to seek to.
+      selector: A CSS selector describing the element. If none is
+          specified, seek the first media element on the page. If the
+          selector matches more than 1 media element, all of them will
+          be seeked.
+      timeout_in_seconds: Maximum waiting time for the "seeked" event
+          (dispatched when the seeked operation completes) to be
+          fired.  0 means do not wait.
+      log_time: Whether to log the seek time for the perf
+          measurement. Useful when performing multiple seek.
+      label: A suffix string to name the seek perf measurement.
+
+    Raises:
+      TimeoutException: If the maximum waiting time is exceeded.
+    """
+    self._RunAction(SeekAction(
+        seconds=seconds, selector=selector,
+        timeout_in_seconds=timeout_in_seconds,
+        log_time=log_time, label=label))
+
+  def LoopMedia(self, loop_count, selector=None, timeout_in_seconds=None):
+    """Loops a media playback.
+
+    Args:
+      loop_count: The number of times to loop the playback.
+      selector: A CSS selector describing the element. If none is
+          specified, loop the first media element on the page. If the
+          selector matches more than 1 media element, all of them will
+          be looped.
+      timeout_in_seconds: Maximum waiting time for the looped playback to
+          complete. 0 means do not wait. None (the default) means to
+          wait loop_count * 60 seconds.
+
+    Raises:
+      TimeoutException: If the maximum waiting time is exceeded.
+    """
+    self._RunAction(LoopAction(
+        loop_count=loop_count, selector=selector,
+        timeout_in_seconds=timeout_in_seconds))
+
+  def ForceGarbageCollection(self):
+    """Forces JavaScript garbage collection on the page."""
+    self._tab.CollectGarbage()
+
+  def SimulateMemoryPressureNotification(self, pressure_level):
+    """Simulate memory pressure notification.
+
+    Args:
+      pressure_level: 'moderate' or 'critical'.
+    """
+    self._tab.browser.SimulateMemoryPressureNotification(pressure_level)
+
+  def PauseInteractive(self):
+    """Pause the page execution and wait for terminal interaction.
+
+    This is typically used for debugging. You can use this to pause
+    the page execution and inspect the browser state before
+    continuing.
+    """
+    raw_input("Interacting... Press Enter to continue.")
+
+  def RepaintContinuously(self, seconds):
+    """Continuously repaints the visible content.
+
+    It does this by requesting animation frames until the given number
+    of seconds have elapsed AND at least three RAFs have been
+    fired. Times out after max(60, self.seconds), if less than three
+    RAFs were fired."""
+    self._RunAction(RepaintContinuouslyAction(
+        seconds=0 if self._skip_waits else seconds))
+
+
+class Interaction(object):
+
+  def __init__(self, action_runner, label, flags):
+    assert action_runner
+    assert label
+    assert isinstance(flags, list)
+
+    self._action_runner = action_runner
+    self._label = label
+    self._flags = flags
+    self._started = False
+
+  def __enter__(self):
+    self.Begin()
+    return self
+
+  def __exit__(self, exc_type, exc_value, traceback):
+    if exc_value is None:
+      self.End()
+    else:
+      logging.warning(
+          'Exception was raised in the with statement block, the end of '
+          'interaction record is not marked.')
+
+  def Begin(self):
+    assert not self._started
+    self._started = True
+    self._action_runner.ExecuteJavaScript(
+        'console.time("%s");' %
+        timeline_interaction_record.GetJavaScriptMarker(
+        self._label, self._flags))
+
+  def End(self):
+    assert self._started
+    self._started = False
+    self._action_runner.ExecuteJavaScript(
+        'console.timeEnd("%s");' %
+        timeline_interaction_record.GetJavaScriptMarker(
+        self._label, self._flags))
diff --git a/catapult/telemetry/telemetry/page/page.py b/catapult/telemetry/telemetry/page/page.py
new file mode 100644
index 0000000..b838894
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/page.py
@@ -0,0 +1,8 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(eakuefner): Refactor references to Page and kill this hack.
+from telemetry import page
+
+Page = page.Page
diff --git a/catapult/telemetry/telemetry/page/page_run_end_to_end_unittest.py b/catapult/telemetry/telemetry/page/page_run_end_to_end_unittest.py
new file mode 100644
index 0000000..2150bc2
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/page_run_end_to_end_unittest.py
@@ -0,0 +1,738 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import shutil
+import sys
+import StringIO
+import tempfile
+import unittest
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import user_agent
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
+from telemetry.internal.testing.page_sets import example_domain
+from telemetry.internal.util import exception_formatter
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry.page import shared_page_state
+from telemetry.util import image_util
+from telemetry.testing import fakes
+from telemetry.testing import options_for_unittests
+from telemetry.testing import system_stub
+
+
+SIMPLE_CREDENTIALS_STRING = """
+{
+  "test": {
+    "username": "example",
+    "password": "asdf"
+  }
+}
+"""
+
+
+class DummyTest(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, *_):
+    pass
+
+
+def SetUpStoryRunnerArguments(options):
+  parser = options.CreateParser()
+  story_runner.AddCommandLineArgs(parser)
+  options.MergeDefaultValues(parser.get_default_values())
+  story_runner.ProcessCommandLineArgs(parser, options)
+
+
+class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
+
+  def __init__(self):
+    super(EmptyMetadataForTest, self).__init__('')
+
+
+class StubCredentialsBackend(object):
+
+  def __init__(self, login_return_value):
+    self.did_get_login = False
+    self.did_get_login_no_longer_needed = False
+    self.login_return_value = login_return_value
+
+  @property
+  def credentials_type(self):
+    return 'test'
+
+  def LoginNeeded(self, *_):
+    self.did_get_login = True
+    return self.login_return_value
+
+  def LoginNoLongerNeeded(self, _):
+    self.did_get_login_no_longer_needed = True
+
+
+def GetSuccessfulPageRuns(results):
+  return [run for run in results.all_page_runs if run.ok or run.skipped]
+
+
+def CaptureStderr(func, output_buffer):
+  def wrapper(*args, **kwargs):
+    original_stderr, sys.stderr = sys.stderr, output_buffer
+    try:
+      return func(*args, **kwargs)
+    finally:
+      sys.stderr = original_stderr
+  return wrapper
+
+
+# TODO: remove test cases that use real browsers and replace with a
+# story_runner or shared_page_state unittest that tests the same logic.
+class ActualPageRunEndToEndTests(unittest.TestCase):
+  # TODO(nduca): Move the basic "test failed, test succeeded" tests from
+  # page_test_unittest to here.
+
+  def setUp(self):
+    self._story_runner_logging_stub = None
+    self._formatted_exception_buffer = StringIO.StringIO()
+    self._original_formatter = exception_formatter.PrintFormattedException
+
+  def tearDown(self):
+    self.RestoreExceptionFormatter()
+
+  def CaptureFormattedException(self):
+    exception_formatter.PrintFormattedException = CaptureStderr(
+        exception_formatter.PrintFormattedException,
+        self._formatted_exception_buffer)
+    self._story_runner_logging_stub = system_stub.Override(
+        story_runner, ['logging'])
+
+  @property
+  def formatted_exception(self):
+    return self._formatted_exception_buffer.getvalue()
+
+  def RestoreExceptionFormatter(self):
+    exception_formatter.PrintFormattedException = self._original_formatter
+    if self._story_runner_logging_stub:
+      self._story_runner_logging_stub.Restore()
+      self._story_runner_logging_stub = None
+
+  def assertFormattedExceptionIsEmpty(self):
+    self.longMessage = False
+    self.assertEquals(
+        '', self.formatted_exception,
+        msg='Expected empty formatted exception: actual=%s' % '\n   > '.join(
+            self.formatted_exception.split('\n')))
+
+  def assertFormattedExceptionOnlyHas(self, expected_exception_name):
+    self.longMessage = True
+    actual_exception_names = re.findall(r'^Traceback.*?^(\w+)',
+                                        self.formatted_exception,
+                                        re.DOTALL | re.MULTILINE)
+    self.assertEquals([expected_exception_name], actual_exception_names,
+                      msg='Full formatted exception: %s' % '\n   > '.join(
+                          self.formatted_exception.split('\n')))
+
+  def testRaiseBrowserGoneExceptionFromRestartBrowserBeforeEachPage(self):
+    self.CaptureFormattedException()
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+
+    class Test(page_test.PageTest):
+
+      def __init__(self, *args):
+        super(Test, self).__init__(
+            *args, needs_browser_restart_after_each_page=True)
+        self.run_count = 0
+
+      def RestartBrowserBeforeEachPage(self):
+        # This will only be called twice with 3 pages.
+        old_run_count = self.run_count
+        self.run_count += 1
+        if old_run_count == 1:
+          raise exceptions.BrowserGoneException(None)
+        return self._needs_browser_restart_after_each_page
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    test = Test()
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+    self.assertEquals(2, test.run_count)
+    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
+    self.assertEquals(1, len(results.failures))
+    self.assertFormattedExceptionIsEmpty()
+
+  def testNeedsBrowserRestartAfterEachPage(self):
+    self.CaptureFormattedException()
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+
+    class Test(page_test.PageTest):
+
+      def __init__(self, *args, **kwargs):
+        super(Test, self).__init__(*args, **kwargs)
+        self.browser_starts = 0
+
+      def DidStartBrowser(self, *args):
+        super(Test, self).DidStartBrowser(*args)
+        self.browser_starts += 1
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    test = Test(needs_browser_restart_after_each_page=True)
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
+    self.assertEquals(2, test.browser_starts)
+    self.assertFormattedExceptionIsEmpty()
+
+  def testCredentialsWhenLoginFails(self):
+    self.CaptureFormattedException()
+    credentials_backend = StubCredentialsBackend(login_return_value=False)
+    did_run = self.runCredentialsTest(credentials_backend)
+    assert credentials_backend.did_get_login
+    assert not credentials_backend.did_get_login_no_longer_needed
+    assert not did_run
+    self.assertFormattedExceptionIsEmpty()
+
+  def testCredentialsWhenLoginSucceeds(self):
+    credentials_backend = StubCredentialsBackend(login_return_value=True)
+    did_run = self.runCredentialsTest(credentials_backend)
+    assert credentials_backend.did_get_login
+    assert credentials_backend.did_get_login_no_longer_needed
+    assert did_run
+
+  def runCredentialsTest(self, credentials_backend):
+    story_set = story.StorySet()
+    did_run = [False]
+
+    try:
+      with tempfile.NamedTemporaryFile(delete=False) as f:
+        page = page_module.Page(
+            'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
+            credentials_path=f.name)
+        page.credentials = "test"
+        story_set.AddStory(page)
+
+        f.write(SIMPLE_CREDENTIALS_STRING)
+
+      class TestThatInstallsCredentialsBackend(page_test.PageTest):
+
+        def __init__(self, credentials_backend):
+          super(TestThatInstallsCredentialsBackend, self).__init__()
+          self._credentials_backend = credentials_backend
+
+        def DidStartBrowser(self, browser):
+          browser.credentials.AddBackend(self._credentials_backend)
+
+        def ValidateAndMeasurePage(self, *_):
+          did_run[0] = True
+
+      test = TestThatInstallsCredentialsBackend(credentials_backend)
+      options = options_for_unittests.GetCopy()
+      options.output_formats = ['none']
+      options.suppress_gtest_report = True
+      SetUpStoryRunnerArguments(options)
+      results = results_options.CreateResults(EmptyMetadataForTest(), options)
+      story_runner.Run(test, story_set, options, results)
+    finally:
+      os.remove(f.name)
+
+    return did_run[0]
+
+  @decorators.Disabled('chromeos')  # crbug.com/483212
+  def testUserAgent(self):
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
+        shared_page_state_class=shared_page_state.SharedTabletPageState)
+    story_set.AddStory(page)
+
+    class TestUserAgent(page_test.PageTest):
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, results  # unused
+        actual_user_agent = tab.EvaluateJavaScript('window.navigator.userAgent')
+        expected_user_agent = user_agent.UA_TYPE_MAPPING['tablet']
+        assert actual_user_agent.strip() == expected_user_agent
+
+        # This is so we can check later that the test actually made it into this
+        # function. Previously it was timing out before even getting here, which
+        # should fail, but since it skipped all the asserts, it slipped by.
+        self.hasRun = True  # pylint: disable=attribute-defined-outside-init
+
+    test = TestUserAgent()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+
+    self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
+
+  # Ensure that story_runner forces exactly 1 tab before running a page.
+  @decorators.Enabled('has tabs')
+  def testOneTab(self):
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir())
+    story_set.AddStory(page)
+
+    class TestOneTab(page_test.PageTest):
+
+      def DidStartBrowser(self, browser):
+        browser.tabs.New()
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, results  # unused
+        assert len(tab.browser.tabs) == 1
+
+    test = TestOneTab()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+
+  # Ensure that story_runner allows >1 tab for multi-tab test.
+  @decorators.Enabled('has tabs')
+  def testMultipleTabsOkayForMultiTabTest(self):
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir())
+    story_set.AddStory(page)
+
+    class TestMultiTabs(page_test.PageTest):
+      def TabForPage(self, page, browser):
+        del page  # unused
+        return browser.tabs.New()
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, results  # unused
+        assert len(tab.browser.tabs) == 2
+
+    test = TestMultiTabs()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+
+  # Ensure that story_runner allows the test to customize the browser
+  # before it launches.
+  def testBrowserBeforeLaunch(self):
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir())
+    story_set.AddStory(page)
+
+    class TestBeforeLaunch(page_test.PageTest):
+
+      def __init__(self):
+        super(TestBeforeLaunch, self).__init__()
+        self._did_call_will_start = False
+        self._did_call_did_start = False
+
+      def WillStartBrowser(self, platform):
+        self._did_call_will_start = True
+        # TODO(simonjam): Test that the profile is available.
+
+      def DidStartBrowser(self, browser):
+        assert self._did_call_will_start
+        self._did_call_did_start = True
+
+      def ValidateAndMeasurePage(self, *_):
+        assert self._did_call_did_start
+
+    test = TestBeforeLaunch()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+
+  def testRunPageWithStartupUrl(self):
+    num_times_browser_closed = [0]
+
+    class TestSharedState(shared_page_state.SharedPageState):
+
+      def _StopBrowser(self):
+        super(TestSharedState, self)._StopBrowser()
+        num_times_browser_closed[0] += 1
+
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
+        startup_url='about:blank', shared_page_state_class=TestSharedState)
+    story_set.AddStory(page)
+
+    class Measurement(page_test.PageTest):
+
+      def __init__(self):
+        super(Measurement, self).__init__()
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, tab, results  # not used
+
+    options = options_for_unittests.GetCopy()
+    options.page_repeat = 2
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    if not browser_finder.FindBrowser(options):
+      return
+    test = Measurement()
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+    self.assertEquals('about:blank', options.browser_options.startup_url)
+    # _StopBrowser should be called 2 times:
+    # 1. browser restarts after page 1 run
+    # 2. in the TearDownState after all the pages have run.
+    self.assertEquals(num_times_browser_closed[0], 2)
+
+  # Ensure that story_runner calls cleanUp when a page run fails.
+  def testCleanUpPage(self):
+    story_set = story.StorySet()
+    page = page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir())
+    story_set.AddStory(page)
+
+    class Test(page_test.PageTest):
+
+      def __init__(self):
+        super(Test, self).__init__()
+        self.did_call_clean_up = False
+
+      def ValidateAndMeasurePage(self, *_):
+        raise page_test.Failure
+
+      def DidRunPage(self, platform):
+        del platform  # unused
+        self.did_call_clean_up = True
+
+    test = Test()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+    assert test.did_call_clean_up
+
+  # Ensure skipping the test if shared state cannot be run on the browser.
+  def testSharedPageStateCannotRunOnBrowser(self):
+    story_set = story.StorySet()
+
+    class UnrunnableSharedState(shared_page_state.SharedPageState):
+      def CanRunOnBrowser(self, browser_info, page):
+        del browser_info, page  # unused
+        return False
+
+      def ValidateAndMeasurePage(self, _):
+        pass
+
+    story_set.AddStory(page_module.Page(
+        url='file://blank.html', page_set=story_set,
+        base_dir=util.GetUnittestDataDir(),
+        shared_page_state_class=UnrunnableSharedState))
+
+    class Test(page_test.PageTest):
+
+      def __init__(self, *args, **kwargs):
+        super(Test, self).__init__(*args, **kwargs)
+        self.will_navigate_to_page_called = False
+
+      def ValidateAndMeasurePage(self, *args):
+        del args  # unused
+        raise Exception('Exception should not be thrown')
+
+      def WillNavigateToPage(self, page, tab):
+        del page, tab  # unused
+        self.will_navigate_to_page_called = True
+
+    test = Test()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results)
+    self.assertFalse(test.will_navigate_to_page_called)
+    self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
+    self.assertEquals(1, len(results.skipped_values))
+    self.assertEquals(0, len(results.failures))
+
+  def testRunPageWithProfilingFlag(self):
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page(
+        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir()))
+
+    class Measurement(page_test.PageTest):
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        pass
+
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    options.reset_results = None
+    options.upload_results = None
+    options.results_label = None
+    options.output_dir = tempfile.mkdtemp()
+    options.profiler = 'trace'
+    try:
+      SetUpStoryRunnerArguments(options)
+      results = results_options.CreateResults(EmptyMetadataForTest(), options)
+      story_runner.Run(Measurement(), story_set, options, results)
+      self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
+      self.assertEquals(0, len(results.failures))
+      self.assertEquals(0, len(results.all_page_specific_values))
+      self.assertTrue(os.path.isfile(
+          os.path.join(options.output_dir, 'blank_html.zip')))
+    finally:
+      shutil.rmtree(options.output_dir)
+
+  def _RunPageTestThatRaisesAppCrashException(self, test, max_failures):
+    class TestPage(page_module.Page):
+
+      def RunNavigateSteps(self, _):
+        raise exceptions.AppCrashException
+
+    story_set = story.StorySet()
+    for _ in range(5):
+      story_set.AddStory(
+          TestPage('file://blank.html', story_set,
+                   base_dir=util.GetUnittestDataDir()))
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(test, story_set, options, results,
+                     max_failures=max_failures)
+    return results
+
+  def testSingleTabMeansCrashWillCauseFailureValue(self):
+    self.CaptureFormattedException()
+
+    class SingleTabTest(page_test.PageTest):
+      # Test is not multi-tab because it does not override TabForPage.
+
+      def ValidateAndMeasurePage(self, *_):
+        pass
+
+    test = SingleTabTest()
+    results = self._RunPageTestThatRaisesAppCrashException(
+        test, max_failures=1)
+    self.assertEquals([], GetSuccessfulPageRuns(results))
+    self.assertEquals(2, len(results.failures))  # max_failures + 1
+    self.assertFormattedExceptionIsEmpty()
+
+  @decorators.Enabled('has tabs')
+  def testMultipleTabsMeansCrashRaises(self):
+    self.CaptureFormattedException()
+
+    class MultipleTabsTest(page_test.PageTest):
+      # Test *is* multi-tab because it overrides TabForPage.
+
+      def TabForPage(self, page, browser):
+        return browser.tabs.New()
+
+      def ValidateAndMeasurePage(self, *_):
+        pass
+
+    test = MultipleTabsTest()
+    with self.assertRaises(page_test.MultiTabTestAppCrashError):
+      self._RunPageTestThatRaisesAppCrashException(test, max_failures=1)
+    self.assertFormattedExceptionOnlyHas('AppCrashException')
+
+  def testWebPageReplay(self):
+    story_set = example_domain.ExampleDomainPageSet()
+    body = []
+
+    class TestWpr(page_test.PageTest):
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, results  # unused
+        body.append(tab.EvaluateJavaScript('document.body.innerText'))
+
+      def DidRunPage(self, platform):
+        # Force the replay server to restart between pages; this verifies that
+        # the restart mechanism works.
+        platform.network_controller.StopReplay()
+
+    test = TestWpr()
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+
+    story_runner.Run(test, story_set, options, results)
+
+    self.longMessage = True
+    self.assertIn('Example Domain', body[0],
+                  msg='URL: %s' % story_set.stories[0].url)
+    self.assertIn('Example Domain', body[1],
+                  msg='URL: %s' % story_set.stories[1].url)
+
+    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
+    self.assertEquals(0, len(results.failures))
+
+  def testScreenShotTakenForFailedPage(self):
+    self.CaptureFormattedException()
+    platform_screenshot_supported = [False]
+    tab_screenshot_supported = [False]
+    chrome_version_screen_shot = [None]
+
+    class FailingTestPage(page_module.Page):
+
+      def RunNavigateSteps(self, action_runner):
+        action_runner.Navigate(self._url)
+        platform_screenshot_supported[0] = (
+            action_runner.tab.browser.platform.CanTakeScreenshot)
+        tab_screenshot_supported[0] = action_runner.tab.screenshot_supported
+        if not platform_screenshot_supported[0] and tab_screenshot_supported[0]:
+          chrome_version_screen_shot[0] = action_runner.tab.Screenshot()
+        raise exceptions.AppCrashException
+
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page('file://blank.html', story_set))
+    failing_page = FailingTestPage('chrome://version', story_set)
+    story_set.AddStory(failing_page)
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.browser_options.take_screenshot_for_failed_page = True
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(DummyTest(), story_set, options, results,
+                     max_failures=2)
+    self.assertEquals(1, len(results.failures))
+    if not platform_screenshot_supported[0] and tab_screenshot_supported[0]:
+      self.assertEquals(1, len(results.pages_to_profiling_files))
+      self.assertIn(failing_page,
+                    results.pages_to_profiling_files)
+      screenshot_file_path = (
+          results.pages_to_profiling_files[failing_page][0].GetAbsPath())
+      try:
+        actual_screenshot = image_util.FromPngFile(screenshot_file_path)
+        self.assertEquals(image_util.Pixels(chrome_version_screen_shot[0]),
+                          image_util.Pixels(actual_screenshot))
+      finally:  # Must clean up screenshot file if exists.
+        os.remove(screenshot_file_path)
+
+  def testNoProfilingFilesCreatedForPageByDefault(self):
+    self.CaptureFormattedException()
+
+    class FailingTestPage(page_module.Page):
+
+      def RunNavigateSteps(self, action_runner):
+        action_runner.Navigate(self._url)
+        raise exceptions.AppCrashException
+
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page('file://blank.html', story_set))
+    failing_page = FailingTestPage('chrome://version', story_set)
+    story_set.AddStory(failing_page)
+    options = options_for_unittests.GetCopy()
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(DummyTest(), story_set, options, results,
+                     max_failures=2)
+    self.assertEquals(1, len(results.failures))
+    self.assertEquals(0, len(results.pages_to_profiling_files))
+
+
+class FakePageRunEndToEndTests(unittest.TestCase):
+
+  def setUp(self):
+    self.options = fakes.CreateBrowserFinderOptions()
+    self.options.output_formats = ['none']
+    self.options.suppress_gtest_report = True
+    SetUpStoryRunnerArguments(self.options)
+
+  def testNoScreenShotTakenForFailedPageDueToNoSupport(self):
+    self.options.browser_options.take_screenshot_for_failed_page = True
+
+    class FailingTestPage(page_module.Page):
+
+      def RunNavigateSteps(self, action_runner):
+        raise exceptions.AppCrashException
+
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page('file://blank.html', story_set))
+    failing_page = FailingTestPage('chrome://version', story_set)
+    story_set.AddStory(failing_page)
+    results = results_options.CreateResults(
+        EmptyMetadataForTest(), self.options)
+    story_runner.Run(DummyTest(), story_set, self.options, results,
+                     max_failures=2)
+    self.assertEquals(1, len(results.failures))
+    self.assertEquals(0, len(results.pages_to_profiling_files))
+
+  def testScreenShotTakenForFailedPageOnSupportedPlatform(self):
+    fake_platform = self.options.fake_possible_browser.returned_browser.platform
+    expected_png_base64 = """
+ iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
+ JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A
+ T8tgwbJAAAAABJRU5ErkJggg==
+"""
+    fake_platform.screenshot_png_data = expected_png_base64
+    self.options.browser_options.take_screenshot_for_failed_page = True
+
+    class FailingTestPage(page_module.Page):
+
+      def RunNavigateSteps(self, action_runner):
+        raise exceptions.AppCrashException
+    story_set = story.StorySet()
+    story_set.AddStory(page_module.Page('file://blank.html', story_set))
+    failing_page = FailingTestPage('chrome://version', story_set)
+    story_set.AddStory(failing_page)
+
+    results = results_options.CreateResults(
+        EmptyMetadataForTest(), self.options)
+    story_runner.Run(DummyTest(), story_set, self.options, results,
+                     max_failures=2)
+    self.assertEquals(1, len(results.failures))
+    self.assertEquals(1, len(results.pages_to_profiling_files))
+    self.assertIn(failing_page,
+                  results.pages_to_profiling_files)
+    screenshot_file_path = (
+        results.pages_to_profiling_files[failing_page][0].GetAbsPath())
+    try:
+      actual_screenshot_img = image_util.FromPngFile(screenshot_file_path)
+      self.assertTrue(image_util.AreEqual(
+                      image_util.FromBase64Png(expected_png_base64),
+                      actual_screenshot_img))
+    finally:  # Must clean up screenshot file if exists.
+      os.remove(screenshot_file_path)
diff --git a/catapult/telemetry/telemetry/page/page_test.py b/catapult/telemetry/telemetry/page/page_test.py
new file mode 100644
index 0000000..bb2e5ce
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/page_test.py
@@ -0,0 +1,182 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.core import exceptions
+from telemetry.page import action_runner as action_runner_module
+
+# Export story_test.Failure to this page_test module
+from telemetry.web_perf.story_test import Failure
+
+
+class TestNotSupportedOnPlatformError(Exception):
+  """PageTest Exception raised when a required feature is unavailable.
+
+  The feature required to run the test could be part of the platform,
+  hardware configuration, or browser.
+  """
+
+
+class MultiTabTestAppCrashError(Exception):
+  """PageTest Exception raised after browser or tab crash for multi-tab tests.
+
+  Used to abort the test rather than try to recover from an unknown state.
+  """
+
+
+class MeasurementFailure(Failure):
+  """PageTest Exception raised when an undesired but designed-for problem."""
+
+
+class PageTest(object):
+  """A class styled on unittest.TestCase for creating page-specific tests.
+
+  Test should override ValidateAndMeasurePage to perform test
+  validation and page measurement as necessary.
+
+     class BodyChildElementMeasurement(PageTest):
+       def ValidateAndMeasurePage(self, page, tab, results):
+         body_child_count = tab.EvaluateJavaScript(
+             'document.body.children.length')
+         results.AddValue(scalar.ScalarValue(
+             page, 'body_children', 'count', body_child_count))
+  """
+
+  def __init__(self,
+               needs_browser_restart_after_each_page=False,
+               clear_cache_before_each_run=False):
+    super(PageTest, self).__init__()
+
+    self.options = None
+    self._needs_browser_restart_after_each_page = (
+        needs_browser_restart_after_each_page)
+    self._clear_cache_before_each_run = clear_cache_before_each_run
+    self._close_tabs_before_run = True
+
+  @property
+  def is_multi_tab_test(self):
+    """Returns True if the test opens multiple tabs.
+
+    If the test overrides TabForPage, it is deemed a multi-tab test.
+    Multi-tab tests do not retry after tab or browser crashes, whereas,
+    single-tab tests too. That is because the state of multi-tab tests
+    (e.g., how many tabs are open, etc.) is unknown after crashes.
+    """
+    return self.TabForPage.__func__ is not PageTest.TabForPage.__func__
+
+  @property
+  def clear_cache_before_each_run(self):
+    """When set to True, the browser's disk and memory cache will be cleared
+    before each run."""
+    return self._clear_cache_before_each_run
+
+  @property
+  def close_tabs_before_run(self):
+    """When set to True, all tabs are closed before running the test for the
+    first time."""
+    return self._close_tabs_before_run
+
+  @close_tabs_before_run.setter
+  def close_tabs_before_run(self, close_tabs):
+    self._close_tabs_before_run = close_tabs
+
+  def RestartBrowserBeforeEachPage(self):
+    """ Should the browser be restarted for the page?
+
+    This returns true if the test needs to unconditionally restart the
+    browser for each page. It may be called before the browser is started.
+    """
+    return self._needs_browser_restart_after_each_page
+
+  def StopBrowserAfterPage(self, browser, page):
+    """Should the browser be stopped after the page is run?
+
+    This is called after a page is run to decide whether the browser needs to
+    be stopped to clean up its state. If it is stopped, then it will be
+    restarted to run the next page.
+
+    A test that overrides this can look at both the page and the browser to
+    decide whether it needs to stop the browser.
+    """
+    del browser, page  # unused
+    return False
+
+  def CustomizeBrowserOptions(self, options):
+    """Override to add test-specific options to the BrowserOptions object"""
+
+  def WillStartBrowser(self, platform):
+    """Override to manipulate the browser environment before it launches."""
+
+  def DidStartBrowser(self, browser):
+    """Override to customize the browser right after it has launched."""
+
+  def SetOptions(self, options):
+    """Sets the BrowserFinderOptions instance to use."""
+    self.options = options
+
+  def WillNavigateToPage(self, page, tab):
+    """Override to do operations before the page is navigated, notably Telemetry
+    will already have performed the following operations on the browser before
+    calling this function:
+    * Ensure only one tab is open.
+    * Call WaitForDocumentReadyStateToComplete on the tab."""
+
+  def DidNavigateToPage(self, page, tab):
+    """Override to do operations right after the page is navigated and after
+    all waiting for completion has occurred."""
+
+  def DidRunPage(self, platform):
+    """Called after the test run method was run, even if it failed."""
+
+  def TabForPage(self, page, browser):   # pylint: disable=unused-argument
+    """Override to select a different tab for the page.  For instance, to
+    create a new tab for every page, return browser.tabs.New()."""
+    try:
+      return browser.tabs[0]
+    # The tab may have gone away in some case, so we create a new tab and retry
+    # (See crbug.com/496280)
+    except exceptions.DevtoolsTargetCrashException as e:
+      logging.error('Tab may have crashed: %s' % str(e))
+      browser.tabs.New()
+      # See comment in shared_page_state.WillRunStory for why this waiting
+      # is needed.
+      browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
+      return browser.tabs[0]
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    """Override to check test assertions and perform measurement.
+
+    When adding measurement results, call results.AddValue(...) for
+    each result. Raise an exception or add a failure.FailureValue on
+    failure. page_test.py also provides several base exception classes
+    to use.
+
+    Prefer metric value names that are in accordance with python
+    variable style. e.g., metric_name. The name 'url' must not be used.
+
+    Put together:
+      def ValidateAndMeasurePage(self, page, tab, results):
+        res = tab.EvaluateJavaScript('2+2')
+        if res != 4:
+          raise Exception('Oh, wow.')
+        results.AddValue(scalar.ScalarValue(
+            page, 'two_plus_two', 'count', res))
+
+    Args:
+      page: A telemetry.page.Page instance.
+      tab: A telemetry.core.Tab instance.
+      results: A telemetry.results.PageTestResults instance.
+    """
+    raise NotImplementedError
+
+  # Deprecated: do not use this hook. (crbug.com/470147)
+  def RunNavigateSteps(self, page, tab):
+    """Navigates the tab to the page URL attribute.
+
+    Runs the 'navigate_steps' page attribute as a compound action.
+    """
+    action_runner = action_runner_module.ActionRunner(
+        tab, skip_waits=page.skip_waits)
+    page.RunNavigateSteps(action_runner)
diff --git a/catapult/telemetry/telemetry/page/page_test_unittest.py b/catapult/telemetry/telemetry/page/page_test_unittest.py
new file mode 100644
index 0000000..ad4e554
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/page_test_unittest.py
@@ -0,0 +1,196 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import unittest
+
+from telemetry import decorators
+from telemetry import story
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry.testing import options_for_unittests
+from telemetry.testing import page_test_test_case
+from telemetry.util import wpr_modes
+from telemetry.wpr import archive_info
+
+
+class PageTestThatFails(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    raise page_test.Failure
+
+
+class PageTestForBlank(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    contents = tab.EvaluateJavaScript('document.body.textContent')
+    if contents.strip() != 'Hello world':
+      raise page_test.MeasurementFailure(
+          'Page contents were: ' + contents)
+
+
+class PageTestForReplay(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    # Web Page Replay returns '404 Not found' if a page is not in the archive.
+    contents = tab.EvaluateJavaScript('document.body.textContent')
+    if '404 Not Found' in contents.strip():
+      raise page_test.MeasurementFailure('Page not in archive.')
+
+
+class PageTestQueryParams(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    query = tab.EvaluateJavaScript('window.location.search')
+    expected = '?foo=1'
+    if query.strip() != expected:
+      raise page_test.MeasurementFailure(
+          'query was %s, not %s.' % (query, expected))
+
+
+class PageTestWithAction(page_test.PageTest):
+
+  def __init__(self):
+    super(PageTestWithAction, self).__init__()
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    pass
+
+
+class PageWithAction(page_module.Page):
+
+  def __init__(self, url, story_set):
+    super(PageWithAction, self).__init__(url, story_set, story_set.base_dir)
+    self.run_test_action_called = False
+
+  def RunPageInteractions(self, _):
+    self.run_test_action_called = True
+
+
+class PageTestUnitTest(page_test_test_case.PageTestTestCase):
+
+  def setUp(self):
+    self._options = options_for_unittests.GetCopy()
+    self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
+
+  def testGotToBlank(self):
+    story_set = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
+    measurement = PageTestForBlank()
+    all_results = self.RunMeasurement(
+        measurement, story_set, options=self._options)
+    self.assertEquals(0, len(all_results.failures))
+
+  def testGotQueryParams(self):
+    story_set = self.CreateStorySetFromFileInUnittestDataDir(
+        'blank.html?foo=1')
+    measurement = PageTestQueryParams()
+    all_results = self.RunMeasurement(
+        measurement, story_set, options=self._options)
+    self.assertEquals(0, len(all_results.failures))
+
+  def testFailure(self):
+    story_set = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
+    measurement = PageTestThatFails()
+    all_results = self.RunMeasurement(
+        measurement, story_set, options=self._options)
+    self.assertEquals(1, len(all_results.failures))
+
+  # This test is disabled because it runs against live sites, and needs to be
+  # fixed. crbug.com/179038
+  @decorators.Disabled('all')
+  def testRecordAndReplay(self):
+    test_archive = '/tmp/google.wpr'
+    google_url = 'http://www.google.com/'
+    foo_url = 'http://www.foo.com/'
+    archive_info_template = ("""
+{
+"archives": {
+  "%s": ["%s"]
+}
+}
+""")
+    try:
+      story_set = story.StorySet.PageSet()
+      measurement = PageTestForReplay()
+
+      # First record an archive with only www.google.com.
+      self._options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
+
+      story_set._wpr_archive_info = archive_info.WprArchiveInfo(
+          '', json.loads(archive_info_template % (test_archive, google_url)),
+          story_set.bucket)
+      story_set.pages = [page_module.Page(google_url, story_set)]
+      all_results = self.RunMeasurement(
+          measurement, story_set, options=self._options)
+      self.assertEquals(0, len(all_results.failures))
+
+      # Now replay it and verify that google.com is found but foo.com is not.
+      self._options.browser_options.wpr_mode = wpr_modes.WPR_REPLAY
+
+      story_set._wpr_archive_info = archive_info.WprArchiveInfo(
+          '', json.loads(archive_info_template % (test_archive, foo_url)),
+          story_set.bucket)
+      story_set.pages = [page_module.Page(foo_url, story_set)]
+      all_results = self.RunMeasurement(
+          measurement, story_set, options=self._options)
+      self.assertEquals(1, len(all_results.failures))
+
+      story_set._wpr_archive_info = archive_info.WprArchiveInfo(
+          '', json.loads(archive_info_template % (test_archive, google_url)),
+          story_set.bucket)
+      story_set.pages = [page_module.Page(google_url, story_set)]
+      all_results = self.RunMeasurement(
+          measurement, story_set, options=self._options)
+      self.assertEquals(0, len(all_results.failures))
+
+      self.assertTrue(os.path.isfile(test_archive))
+
+    finally:
+      if os.path.isfile(test_archive):
+        os.remove(test_archive)
+
+  def testRunActions(self):
+    story_set = self.CreateEmptyPageSet()
+    page = PageWithAction('file://blank.html', story_set)
+    story_set.AddStory(page)
+    measurement = PageTestWithAction()
+    self.RunMeasurement(measurement, story_set, options=self._options)
+    self.assertTrue(page.run_test_action_called)
+
+
+class MultiTabPageTestUnitTest(unittest.TestCase):
+
+  def testNoTabForPageReturnsFalse(self):
+    class PageTestWithoutTabForPage(page_test.PageTest):
+
+      def ValidateAndMeasurePage(self, *_):
+        pass
+    test = PageTestWithoutTabForPage()
+    self.assertFalse(test.is_multi_tab_test)
+
+  def testHasTabForPageReturnsTrue(self):
+    class PageTestWithTabForPage(page_test.PageTest):
+
+      def ValidateAndMeasurePage(self, *_):
+        pass
+
+      def TabForPage(self, *_):
+        pass
+    test = PageTestWithTabForPage()
+    self.assertTrue(test.is_multi_tab_test)
+
+  def testHasTabForPageInAncestor(self):
+    class PageTestWithTabForPage(page_test.PageTest):
+
+      def ValidateAndMeasurePage(self, *_):
+        pass
+
+      def TabForPage(self, *_):
+        pass
+
+    class PageTestWithTabForPageInParent(PageTestWithTabForPage):
+      pass
+    test = PageTestWithTabForPageInParent()
+    self.assertTrue(test.is_multi_tab_test)
diff --git a/catapult/telemetry/telemetry/page/page_unittest.py b/catapult/telemetry/telemetry/page/page_unittest.py
new file mode 100644
index 0000000..7620465
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/page_unittest.py
@@ -0,0 +1,216 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+from telemetry.page import page
+
+import mock
+
+
+class TestPage(unittest.TestCase):
+
+  def assertPathEqual(self, path1, path2):
+    self.assertEqual(os.path.normpath(path1), os.path.normpath(path2))
+
+  def testFilePathRelative(self):
+    apage = page.Page('file://somedir/otherdir/file.html',
+                      None, base_dir='basedir')
+    self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
+
+  def testFilePathAbsolute(self):
+    apage = page.Page('file:///somedir/otherdir/file.html',
+                      None, base_dir='basedir')
+    self.assertPathEqual(apage.file_path, '/somedir/otherdir/file.html')
+
+  def testFilePathQueryString(self):
+    apage = page.Page('file://somedir/otherdir/file.html?key=val',
+                      None, base_dir='basedir')
+    self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
+
+  def testFilePathUrlQueryString(self):
+    apage = page.Page('file://somedir/file.html?key=val',
+                      None, base_dir='basedir')
+    self.assertPathEqual(apage.file_path_url,
+                         'basedir/somedir/file.html?key=val')
+
+  def testFilePathUrlTrailingSeparator(self):
+    apage = page.Page('file://somedir/otherdir/',
+                      None, base_dir='basedir')
+    self.assertPathEqual(apage.file_path_url, 'basedir/somedir/otherdir/')
+    self.assertTrue(apage.file_path_url.endswith(os.sep) or
+                    (os.altsep and apage.file_path_url.endswith(os.altsep)))
+
+  def testSort(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page.Page('http://www.bar.com/', story_set, story_set.base_dir))
+
+    pages = sorted([story_set.stories[0], story_set.stories[1]])
+    self.assertEquals([story_set.stories[1], story_set.stories[0]],
+                      pages)
+
+  def testGetUrlBaseDirAndFileForUrlBaseDir(self):
+    base_dir = os.path.dirname(__file__)
+    file_path = os.path.join(
+        os.path.dirname(base_dir), 'otherdir', 'file.html')
+    story_set = story.StorySet(base_dir=base_dir,
+                               serving_dirs=[os.path.join('..', 'somedir', '')])
+    story_set.AddStory(
+        page.Page('file://../otherdir/file.html', story_set,
+                  story_set.base_dir))
+    self.assertPathEqual(story_set[0].file_path, file_path)
+
+  def testDisplayUrlForHttp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page.Page('http://www.bar.com/', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'http://www.foo.com/')
+    self.assertEquals(story_set[1].display_name, 'http://www.bar.com/')
+
+  def testDisplayUrlForHttps(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page.Page('https://www.bar.com/', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'http://www.foo.com/')
+    self.assertEquals(story_set[1].display_name, 'https://www.bar.com/')
+
+  def testDisplayUrlForFile(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/foo.html', story_set, story_set.base_dir))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/bar.html', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'foo.html')
+    self.assertEquals(story_set[1].display_name, 'bar.html')
+
+  def testDisplayUrlForFilesDifferingBySuffix(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/foo.html', story_set, story_set.base_dir))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/foo1.html', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'foo.html')
+    self.assertEquals(story_set[1].display_name, 'foo1.html')
+
+  def testDisplayUrlForFileOfDifferentPaths(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page(
+            'file://../../somedir/foo.html', story_set, story_set.base_dir))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/bar.html', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'somedir/foo.html')
+    self.assertEquals(story_set[1].display_name, 'otherdir/bar.html')
+
+  def testDisplayUrlForFileDirectories(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page('file://../../otherdir/foo', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page.Page('file://../../otherdir/bar', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'foo')
+    self.assertEquals(story_set[1].display_name, 'bar')
+
+  def testDisplayUrlForSingleFile(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(page.Page(
+        'file://../../otherdir/foo.html', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'foo.html')
+
+  def testDisplayUrlForSingleDirectory(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page.Page('file://../../otherdir/foo', story_set, story_set.base_dir))
+
+    self.assertEquals(story_set[0].display_name, 'foo')
+
+  def testPagesHaveDifferentIds(self):
+    p0 = page.Page("http://example.com")
+    p1 = page.Page("http://example.com")
+    self.assertNotEqual(p0.id, p1.id)
+
+  def testNamelessPageAsDict(self):
+    nameless_dict = page.Page('http://example.com/').AsDict()
+    self.assertIn('id', nameless_dict)
+    del nameless_dict['id']
+    self.assertEquals({
+                      'url': 'http://example.com/',
+                      }, nameless_dict)
+
+  def testNamedPageAsDict(self):
+    named_dict = page.Page('http://example.com/', name='Example').AsDict()
+    self.assertIn('id', named_dict)
+    del named_dict['id']
+    self.assertEquals({
+                      'url': 'http://example.com/',
+                      'name': 'Example'
+                      }, named_dict)
+
+  def testIsLocal(self):
+    p = page.Page('file://foo.html')
+    self.assertTrue(p.is_local)
+
+    p = page.Page('chrome://extensions')
+    self.assertTrue(p.is_local)
+
+    p = page.Page('about:blank')
+    self.assertTrue(p.is_local)
+
+    p = page.Page('http://foo.com')
+    self.assertFalse(p.is_local)
+
+
+class TestPageRun(unittest.TestCase):
+
+  def testFiveGarbageCollectionCallsByDefault(self):
+    mock_shared_state = mock.Mock()
+    p = page.Page('file://foo.html')
+    p.Run(mock_shared_state)
+    expected = [mock.call.current_tab.CollectGarbage(),
+                mock.call.current_tab.CollectGarbage(),
+                mock.call.current_tab.CollectGarbage(),
+                mock.call.current_tab.CollectGarbage(),
+                mock.call.current_tab.CollectGarbage(),
+                mock.call.page_test.WillNavigateToPage(
+                p, mock_shared_state.current_tab),
+                mock.call.page_test.RunNavigateSteps(
+                p, mock_shared_state.current_tab),
+                mock.call.page_test.DidNavigateToPage(
+                p, mock_shared_state.current_tab)]
+    self.assertEquals(mock_shared_state.mock_calls, expected)
+
+  def testNoGarbageCollectionCalls(self):
+    mock_shared_state = mock.Mock()
+
+    class NonGarbageCollectPage(page.Page):
+
+      def __init__(self, url):
+        super(NonGarbageCollectPage, self).__init__(url)
+        self._collect_garbage_before_run = False
+
+    p = NonGarbageCollectPage('file://foo.html')
+    p.Run(mock_shared_state)
+    expected = [mock.call.page_test.WillNavigateToPage(
+                p, mock_shared_state.current_tab),
+                mock.call.page_test.RunNavigateSteps(
+                p, mock_shared_state.current_tab),
+                mock.call.page_test.DidNavigateToPage(
+                p, mock_shared_state.current_tab)]
+    self.assertEquals(mock_shared_state.mock_calls, expected)
diff --git a/catapult/telemetry/telemetry/page/shared_page_state.py b/catapult/telemetry/telemetry/page/shared_page_state.py
new file mode 100644
index 0000000..454a087
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/shared_page_state.py
@@ -0,0 +1,532 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import shutil
+import sys
+import tempfile
+import zipfile
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import browser_finder_exceptions
+from telemetry.internal.browser import browser_info as browser_info_module
+from telemetry.internal.platform.profiler import profiler_finder
+from telemetry.internal.util import exception_formatter
+from telemetry.internal.util import file_handle
+from telemetry.page import page_test
+from telemetry import story
+from telemetry.util import image_util
+from telemetry.util import wpr_modes
+from telemetry.web_perf import timeline_based_measurement
+
+
+def _PrepareFinderOptions(finder_options, test, device_type):
+  browser_options = finder_options.browser_options
+  # Set up user agent.
+  browser_options.browser_user_agent_type = device_type
+
+  test.CustomizeBrowserOptions(finder_options.browser_options)
+  if finder_options.profiler:
+    profiler_class = profiler_finder.FindProfiler(finder_options.profiler)
+    profiler_class.CustomizeBrowserOptions(browser_options.browser_type,
+                                           finder_options)
+
+
+class SharedPageState(story.SharedState):
+  """
+  This class contains all specific logic necessary to run a Chrome browser
+  benchmark.
+  """
+
+  _device_type = None
+
+  def __init__(self, test, finder_options, story_set):
+    super(SharedPageState, self).__init__(test, finder_options, story_set)
+    if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement):
+      assert not finder_options.profiler, (
+          'This is a Timeline Based Measurement benchmark. You cannot run it '
+          'with the --profiler flag. If you need trace data, tracing is always '
+          ' enabled in Timeline Based Measurement benchmarks and you can get '
+          'the trace data by using --output-format=json.')
+      # This is to avoid the cyclic-import caused by timeline_based_page_test.
+      from telemetry.web_perf import timeline_based_page_test
+      self._test = timeline_based_page_test.TimelineBasedPageTest(test)
+    else:
+      self._test = test
+    device_type = self._device_type
+    # TODO(aiolos, nednguyen): Remove this logic of pulling out user_agent_type
+    # from story_set once all page_set are converted to story_set
+    # (crbug.com/439512).
+
+    def _IsPageSetInstance(s):
+      # This is needed to avoid importing telemetry.page.page_set which will
+      # cause cyclic import.
+      return 'PageSet' == s.__class__.__name__ or 'PageSet' in (
+          list(c.__name__ for c in s.__class__.__bases__))
+    if not device_type and _IsPageSetInstance(story_set):
+      device_type = story_set.user_agent_type
+    _PrepareFinderOptions(finder_options, self._test, device_type)
+    self._browser = None
+    self._finder_options = finder_options
+    self._possible_browser = self._GetPossibleBrowser(
+        self._test, finder_options)
+
+    self._first_browser = True
+    self._did_login_for_current_page = False
+    self._current_page = None
+    self._current_tab = None
+    self._migrated_profile = None
+
+    self._pregenerated_profile_archive_dir = None
+    self._test.SetOptions(self._finder_options)
+
+    # TODO(crbug/404771): Move network controller options out of
+    # browser_options and into finder_options.
+    browser_options = self._finder_options.browser_options
+    if self._finder_options.use_live_sites:
+      wpr_mode = wpr_modes.WPR_OFF
+    elif browser_options.wpr_mode == wpr_modes.WPR_RECORD:
+      wpr_mode = wpr_modes.WPR_RECORD
+    else:
+      wpr_mode = wpr_modes.WPR_REPLAY
+
+    self.platform.network_controller.Open(wpr_mode,
+                                          browser_options.extra_wpr_args)
+
+
+  @property
+  def browser(self):
+    return self._browser
+
+  def _FindBrowser(self, finder_options):
+    possible_browser = browser_finder.FindBrowser(finder_options)
+    if not possible_browser:
+      raise browser_finder_exceptions.BrowserFinderException(
+          'No browser found.\n\nAvailable browsers:\n%s\n' %
+          '\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
+    return possible_browser
+
+  def _GetPossibleBrowser(self, test, finder_options):
+    """Return a possible_browser with the given options for |test|. """
+    possible_browser = self._FindBrowser(finder_options)
+    finder_options.browser_options.browser_type = (
+        possible_browser.browser_type)
+
+    enabled, msg = decorators.IsEnabled(test, possible_browser)
+    if not enabled and not finder_options.run_disabled_tests:
+      logging.warning(msg)
+      logging.warning('You are trying to run a disabled test.')
+      logging.warning(
+          'Pass --also-run-disabled-tests to squelch this message.')
+      sys.exit(0)
+
+    if possible_browser.IsRemote():
+      possible_browser.RunRemote()
+      sys.exit(0)
+    return possible_browser
+
+  def _TryCaptureScreenShot(self, page, tab, results):
+    try:
+      # TODO(nednguyen): once all platforms support taking screenshot,
+      # remove the tab checking logic and consider moving this to story_runner.
+      # (crbug.com/369490)
+      if tab.browser.platform.CanTakeScreenshot():
+        tf = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
+        tf.close()
+        tab.browser.platform.TakeScreenshot(tf.name)
+        results.AddProfilingFile(page, file_handle.FromTempFile(tf))
+      elif tab.IsAlive() and tab.screenshot_supported:
+        tf = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
+        tf.close()
+        image = tab.Screenshot()
+        image_util.WritePngFile(image, tf.name)
+        results.AddProfilingFile(page, file_handle.FromTempFile(tf))
+      else:
+        logging.warning(
+            'Either tab has crashed or browser does not support taking tab '
+            'screenshot. Skip taking screenshot on failure.')
+    except Exception as e:
+      logging.warning('Exception when trying to capture screenshot: %s',
+                      repr(e))
+
+  def DidRunStory(self, results):
+    if self._finder_options.profiler:
+      self._StopProfiling(results)
+    # We might hang while trying to close the connection, and need to guarantee
+    # the page will get cleaned up to avoid future tests failing in weird ways.
+    try:
+      if self._current_tab and self._current_tab.IsAlive():
+        self._current_tab.CloseConnections()
+    except Exception:
+      if self._current_tab:
+        self._current_tab.Close()
+    finally:
+      if self._current_page.credentials and self._did_login_for_current_page:
+        self.browser.credentials.LoginNoLongerNeeded(
+            self._current_tab, self._current_page.credentials)
+      if self._test.StopBrowserAfterPage(self.browser, self._current_page):
+        self._StopBrowser()
+      self._current_page = None
+      self._current_tab = None
+
+  @property
+  def platform(self):
+    return self._possible_browser.platform
+
+  def _StartBrowser(self, page):
+    assert self._browser is None
+    self._possible_browser.SetCredentialsPath(page.credentials_path)
+
+    self._test.WillStartBrowser(self.platform)
+    if page.startup_url:
+      self._finder_options.browser_options.startup_url = page.startup_url
+    self._browser = self._possible_browser.Create(self._finder_options)
+    self._test.DidStartBrowser(self.browser)
+
+    if self._first_browser:
+      self._first_browser = False
+      self.browser.credentials.WarnIfMissingCredentials(page)
+      logging.info('OS: %s %s',
+                   self.platform.GetOSName(),
+                   self.platform.GetOSVersionName())
+      if self.browser.supports_system_info:
+        system_info = self.browser.GetSystemInfo()
+        if system_info.model_name:
+          logging.info('Model: %s', system_info.model_name)
+        if system_info.gpu:
+          for i, device in enumerate(system_info.gpu.devices):
+            logging.info('GPU device %d: %s', i, device)
+          if system_info.gpu.aux_attributes:
+            logging.info('GPU Attributes:')
+            for k, v in sorted(system_info.gpu.aux_attributes.iteritems()):
+              logging.info('  %-20s: %s', k, v)
+          if system_info.gpu.feature_status:
+            logging.info('Feature Status:')
+            for k, v in sorted(system_info.gpu.feature_status.iteritems()):
+              logging.info('  %-20s: %s', k, v)
+          if system_info.gpu.driver_bug_workarounds:
+            logging.info('Driver Bug Workarounds:')
+            for workaround in system_info.gpu.driver_bug_workarounds:
+              logging.info('  %s', workaround)
+        else:
+          logging.info('No GPU devices')
+      else:
+        logging.warning('System info not supported')
+
+  def WillRunStory(self, page):
+    if not self.platform.tracing_controller.is_tracing_running:
+      # For TimelineBasedMeasurement benchmarks, tracing has already started.
+      # For PageTest benchmarks, tracing has not yet started. We need to make
+      # sure no tracing state is left before starting the browser for PageTest
+      # benchmarks.
+      self.platform.tracing_controller.ClearStateIfNeeded()
+
+    if self._ShouldDownloadPregeneratedProfileArchive():
+      self._DownloadPregeneratedProfileArchive()
+
+      if self._ShouldMigrateProfile():
+        self._MigratePregeneratedProfile()
+
+    page_set = page.page_set
+    self._current_page = page
+    if self._browser and (self._test.RestartBrowserBeforeEachPage()
+                          or page.startup_url):
+      assert not self.platform.tracing_controller.is_tracing_running, (
+          'Should not restart browser when tracing is already running. For '
+          'TimelineBasedMeasurement (TBM) benchmarks, you should not use '
+          'startup_url. Use benchmark.ShouldTearDownStateAfterEachStoryRun '
+          'instead.')
+      self._StopBrowser()
+    started_browser = not self.browser
+
+    archive_path = page_set.WprFilePathForStory(page)
+    # TODO(nednguyen, perezju): Ideally we should just let the network
+    # controller raise an exception when the archive_path is not found.
+    if archive_path is not None and not os.path.isfile(archive_path):
+      logging.warning('WPR archive missing: %s', archive_path)
+      archive_path = None
+    self.platform.network_controller.StartReplay(
+        archive_path, page.make_javascript_deterministic)
+
+    if self.browser:
+      # Set new credential path for browser.
+      self.browser.credentials.credentials_path = page.credentials_path
+    else:
+      self._StartBrowser(page)
+    if self.browser.supports_tab_control and self._test.close_tabs_before_run:
+      # Create a tab if there's none.
+      if len(self.browser.tabs) == 0:
+        self.browser.tabs.New()
+
+      # Ensure only one tab is open, unless the test is a multi-tab test.
+      if not self._test.is_multi_tab_test:
+        while len(self.browser.tabs) > 1:
+          self.browser.tabs[-1].Close()
+
+      # Must wait for tab to commit otherwise it can commit after the next
+      # navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()
+      # will cancel the next navigation because it's pending. This manifests as
+      # the first navigation in a PageSet freezing indefinitely because the
+      # navigation was silently canceled when |self.browser.tabs[0]| was
+      # committed. Only do this when we just started the browser, otherwise
+      # there are cases where previous pages in a PageSet never complete
+      # loading so we'll wait forever.
+      if started_browser:
+        self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
+
+    # Start profiling if needed.
+    if self._finder_options.profiler:
+      self._StartProfiling(self._current_page)
+
+  def CanRunStory(self, page):
+    return self.CanRunOnBrowser(browser_info_module.BrowserInfo(self.browser),
+                                page)
+
+  def CanRunOnBrowser(self, browser_info,
+                      page):  # pylint: disable=unused-argument
+    """Override this to return whether the browser brought up by this state
+    instance is suitable for running the given page.
+
+    Args:
+      browser_info: an instance of telemetry.core.browser_info.BrowserInfo
+      page: an instance of telemetry.page.Page
+    """
+    del browser_info, page  # unused
+    return True
+
+  def _PreparePage(self):
+    self._current_tab = self._test.TabForPage(self._current_page, self.browser)
+    if self._current_page.is_file:
+      self.platform.SetHTTPServerDirectories(
+          self._current_page.page_set.serving_dirs |
+          set([self._current_page.serving_dir]))
+
+    if self._current_page.credentials:
+      if not self.browser.credentials.LoginNeeded(
+          self._current_tab, self._current_page.credentials):
+        raise page_test.Failure(
+            'Login as ' + self._current_page.credentials + ' failed')
+      self._did_login_for_current_page = True
+
+    if self._test.clear_cache_before_each_run:
+      self._current_tab.ClearCache(force=True)
+
+  @property
+  def current_page(self):
+    return self._current_page
+
+  @property
+  def current_tab(self):
+    return self._current_tab
+
+  @property
+  def page_test(self):
+    return self._test
+
+  def RunStory(self, results):
+    try:
+      self._PreparePage()
+      self._current_page.Run(self)
+      self._test.ValidateAndMeasurePage(
+          self._current_page, self._current_tab, results)
+    except exceptions.Error:
+      if self._finder_options.browser_options.take_screenshot_for_failed_page:
+        self._TryCaptureScreenShot(self._current_page, self._current_tab,
+                                   results)
+      if self._test.is_multi_tab_test:
+        # Avoid trying to recover from an unknown multi-tab state.
+        exception_formatter.PrintFormattedException(
+            msg='Telemetry Error during multi tab test:')
+        raise page_test.MultiTabTestAppCrashError
+      raise
+    except Exception:
+      if self._finder_options.browser_options.take_screenshot_for_failed_page:
+        self._TryCaptureScreenShot(self._current_page, self._current_tab,
+                                   results)
+      raise
+
+  def TearDownState(self):
+    if self._migrated_profile:
+      shutil.rmtree(self._migrated_profile)
+      self._migrated_profile = None
+
+    self._StopBrowser()
+    self.platform.StopAllLocalServers()
+    self.platform.network_controller.Close()
+
+  def _StopBrowser(self):
+    if self._browser:
+      self._browser.Close()
+      self._browser = None
+
+  def _StartProfiling(self, page):
+    output_file = os.path.join(self._finder_options.output_dir,
+                               page.file_safe_name)
+    is_repeating = (self._finder_options.page_repeat != 1 or
+                    self._finder_options.pageset_repeat != 1)
+    if is_repeating:
+      output_file = util.GetSequentialFileName(output_file)
+    self.browser.profiling_controller.Start(
+        self._finder_options.profiler, output_file)
+
+  def _StopProfiling(self, results):
+    if self.browser:
+      profiler_files = self.browser.profiling_controller.Stop()
+      for f in profiler_files:
+        if os.path.isfile(f):
+          results.AddProfilingFile(self._current_page,
+                                   file_handle.FromFilePath(f))
+
+  def _ShouldMigrateProfile(self):
+    return not self._migrated_profile
+
+  def _MigrateProfile(self, finder_options, found_browser,
+                      initial_profile, final_profile):
+    """Migrates a profile to be compatible with a newer version of Chrome.
+
+    Launching Chrome with the old profile will perform the migration.
+    """
+    # Save the current input and output profiles.
+    saved_input_profile = finder_options.browser_options.profile_dir
+    saved_output_profile = finder_options.output_profile_path
+
+    # Set the input and output profiles.
+    finder_options.browser_options.profile_dir = initial_profile
+    finder_options.output_profile_path = final_profile
+
+    # Launch the browser, then close it.
+    browser = found_browser.Create(finder_options)
+    browser.Close()
+
+    # Load the saved input and output profiles.
+    finder_options.browser_options.profile_dir = saved_input_profile
+    finder_options.output_profile_path = saved_output_profile
+
+  def _MigratePregeneratedProfile(self):
+    """Migrates the pre-generated profile by launching Chrome with it.
+
+    On success, updates self._migrated_profile and
+    self._finder_options.browser_options.profile_dir with the directory of the
+    migrated profile.
+    """
+    self._migrated_profile = tempfile.mkdtemp()
+    logging.info("Starting migration of pre-generated profile to %s",
+                 self._migrated_profile)
+    pregenerated_profile = self._finder_options.browser_options.profile_dir
+
+    possible_browser = self._FindBrowser(self._finder_options)
+    self._MigrateProfile(self._finder_options, possible_browser,
+                         pregenerated_profile, self._migrated_profile)
+    self._finder_options.browser_options.profile_dir = self._migrated_profile
+    logging.info("Finished migration of pre-generated profile to %s",
+                 self._migrated_profile)
+
+  def GetPregeneratedProfileArchiveDir(self):
+    return self._pregenerated_profile_archive_dir
+
+  def SetPregeneratedProfileArchiveDir(self, archive_path):
+    """
+    Benchmarks can set a pre-generated profile archive to indicate that when
+    Chrome is launched, it should have a --user-data-dir set to the
+    pre-generated profile, rather than to an empty profile.
+
+    If the benchmark is invoked with the option --profile-dir=<dir>, that
+    option overrides this value.
+    """
+    self._pregenerated_profile_archive_dir = archive_path
+
+  def _ShouldDownloadPregeneratedProfileArchive(self):
+    """Whether to download a pre-generated profile archive."""
+    # There is no pre-generated profile archive.
+    if not self.GetPregeneratedProfileArchiveDir():
+      return False
+
+    # If profile dir is specified on command line, use that instead.
+    if self._finder_options.browser_options.profile_dir:
+      logging.warning("Profile directory specified on command line: %s, this"
+                      "overrides the benchmark's default profile directory.",
+                      self._finder_options.browser_options.profile_dir)
+      return False
+
+    # If the browser is remote, a local download has no effect.
+    if self._possible_browser.IsRemote():
+      return False
+
+    return True
+
+  def _DownloadPregeneratedProfileArchive(self):
+    """Download and extract the profile directory archive if one exists.
+
+    On success, updates self._finder_options.browser_options.profile_dir with
+    the directory of the extracted profile.
+    """
+    # Download profile directory from cloud storage.
+    generated_profile_archive_path = self.GetPregeneratedProfileArchiveDir()
+
+    try:
+      cloud_storage.GetIfChanged(generated_profile_archive_path,
+                                 cloud_storage.PUBLIC_BUCKET)
+    except (cloud_storage.CredentialsError,
+            cloud_storage.PermissionError) as e:
+      if os.path.exists(generated_profile_archive_path):
+        # If the profile directory archive exists, assume the user has their
+        # own local copy simply warn.
+        logging.warning('Could not download Profile archive: %s',
+                        generated_profile_archive_path)
+      else:
+        # If the archive profile directory doesn't exist, this is fatal.
+        logging.error('Can not run without required profile archive: %s. '
+                      'If you believe you have credentials, follow the '
+                      'instructions below.',
+                      generated_profile_archive_path)
+        logging.error(str(e))
+        sys.exit(-1)
+
+    # Check to make sure the zip file exists.
+    if not os.path.isfile(generated_profile_archive_path):
+      raise Exception("Profile directory archive not downloaded: ",
+                      generated_profile_archive_path)
+
+    # The location to extract the profile into.
+    extracted_profile_dir_path = (
+        os.path.splitext(generated_profile_archive_path)[0])
+
+    # Unzip profile directory.
+    with zipfile.ZipFile(generated_profile_archive_path) as f:
+      try:
+        f.extractall(os.path.dirname(generated_profile_archive_path))
+      except e:
+        # Cleanup any leftovers from unzipping.
+        if os.path.exists(extracted_profile_dir_path):
+          shutil.rmtree(extracted_profile_dir_path)
+        logging.error("Error extracting profile directory zip file: %s", e)
+        sys.exit(-1)
+
+    # Run with freshly extracted profile directory.
+    logging.info("Using profile archive directory: %s",
+                 extracted_profile_dir_path)
+    self._finder_options.browser_options.profile_dir = (
+        extracted_profile_dir_path)
+
+
+class SharedMobilePageState(SharedPageState):
+  _device_type = 'mobile'
+
+
+class SharedDesktopPageState(SharedPageState):
+  _device_type = 'desktop'
+
+
+class SharedTabletPageState(SharedPageState):
+  _device_type = 'tablet'
+
+
+class Shared10InchTabletPageState(SharedPageState):
+  _device_type = 'tablet_10_inch'
diff --git a/catapult/telemetry/telemetry/page/shared_page_state_unittest.py b/catapult/telemetry/telemetry/page/shared_page_state_unittest.py
new file mode 100644
index 0000000..bff235d
--- /dev/null
+++ b/catapult/telemetry/telemetry/page/shared_page_state_unittest.py
@@ -0,0 +1,104 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal import story_runner
+from telemetry.page import page
+from telemetry.page import page_test
+from telemetry.page import shared_page_state
+from telemetry import story as story_module
+from telemetry.testing import fakes
+from telemetry.util import wpr_modes
+
+
+def SetUpPageRunnerArguments(options):
+  parser = options.CreateParser()
+  story_runner.AddCommandLineArgs(parser)
+  options.MergeDefaultValues(parser.get_default_values())
+  story_runner.ProcessCommandLineArgs(parser, options)
+
+
+class DummyTest(page_test.PageTest):
+
+  def ValidateAndMeasurePage(self, *_):
+    pass
+
+
+class SharedPageStateTests(unittest.TestCase):
+
+  def setUp(self):
+    self.options = fakes.CreateBrowserFinderOptions()
+    self.options.use_live_sites = False
+    self.options.output_formats = ['none']
+    self.options.suppress_gtest_report = True
+
+  def testUseLiveSitesFlagSet(self):
+    self.options.use_live_sites = True
+    run_state = shared_page_state.SharedPageState(
+        DummyTest(), self.options, story_module.StorySet())
+    self.assertTrue(run_state.platform.network_controller.is_open)
+    self.assertEquals(run_state.platform.network_controller.wpr_mode,
+                      wpr_modes.WPR_OFF)
+
+  def testUseLiveSitesFlagUnset(self):
+    run_state = shared_page_state.SharedPageState(
+        DummyTest(), self.options, story_module.StorySet())
+    self.assertTrue(run_state.platform.network_controller.is_open)
+    self.assertEquals(run_state.platform.network_controller.wpr_mode,
+                      wpr_modes.WPR_REPLAY)
+
+  def testConstructorCallsSetOptions(self):
+    test = DummyTest()
+    shared_page_state.SharedPageState(
+        test, self.options, story_module.StorySet())
+    self.assertEqual(test.options, self.options)
+
+  def assertUserAgentSetCorrectly(
+      self, shared_page_state_class, expected_user_agent):
+    story = page.Page(
+        'http://www.google.com',
+        shared_page_state_class=shared_page_state_class)
+    test = DummyTest()
+    story_set = story_module.StorySet()
+    story_set.AddStory(story)
+    story.shared_state_class(test, self.options, story_set)
+    browser_options = self.options.browser_options
+    actual_user_agent = browser_options.browser_user_agent_type
+    self.assertEqual(expected_user_agent, actual_user_agent)
+
+  def testPageStatesUserAgentType(self):
+    self.assertUserAgentSetCorrectly(
+        shared_page_state.SharedMobilePageState, 'mobile')
+    self.assertUserAgentSetCorrectly(
+        shared_page_state.SharedDesktopPageState, 'desktop')
+    self.assertUserAgentSetCorrectly(
+        shared_page_state.SharedTabletPageState, 'tablet')
+    self.assertUserAgentSetCorrectly(
+        shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
+    self.assertUserAgentSetCorrectly(
+        shared_page_state.SharedPageState, None)
+
+  def testBrowserStartupURLSetCorrectly(self):
+    story_set = story_module.StorySet()
+    google_page = page.Page(
+        'http://www.google.com',
+        startup_url='http://www.google.com', page_set=story_set)
+    example_page = page.Page(
+        'https://www.example.com',
+        startup_url='https://www.example.com', page_set=story_set)
+    gmail_page = page.Page(
+        'https://www.gmail.com',
+        startup_url='https://www.gmail.com', page_set=story_set)
+
+    for p in (google_page, example_page, gmail_page):
+      story_set.AddStory(p)
+
+    shared_state = shared_page_state.SharedPageState(
+        DummyTest(), self.options, story_set)
+
+    for p in (google_page, example_page, gmail_page):
+      shared_state.WillRunStory(p)
+      self.assertEquals(
+          p.startup_url, self.options.browser_options.startup_url)
diff --git a/catapult/telemetry/telemetry/project_config.py b/catapult/telemetry/telemetry/project_config.py
new file mode 100644
index 0000000..a391e81
--- /dev/null
+++ b/catapult/telemetry/telemetry/project_config.py
@@ -0,0 +1,47 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ProjectConfig(object):
+  """Contains information about the benchmark runtime environment.
+
+  Attributes:
+    top_level_dir: A dir that contains benchmark, page test, and/or story
+        set dirs and associated artifacts.
+    benchmark_dirs: A list of dirs containing benchmarks.
+    benchmark_aliases: A dict of name:alias string pairs to be matched against
+        exactly during benchmark selection.
+    client_config: A path to a ProjectDependencies json file.
+    default_chrome_root: A path to chromium source directory. Many telemetry
+      features depend on chromium source tree's presence and those won't work
+      in case this is not specified.
+  """
+  def __init__(self, top_level_dir, benchmark_dirs=None,
+               benchmark_aliases=None, client_config=None,
+               default_chrome_root=None):
+    self._top_level_dir = top_level_dir
+    self._benchmark_dirs = benchmark_dirs or []
+    self._benchmark_aliases = benchmark_aliases or dict()
+    self._client_config = client_config or ''
+    self._default_chrome_root = default_chrome_root
+
+  @property
+  def top_level_dir(self):
+    return self._top_level_dir
+
+  @property
+  def benchmark_dirs(self):
+    return self._benchmark_dirs
+
+  @property
+  def benchmark_aliases(self):
+    return self._benchmark_aliases
+
+  @property
+  def client_config(self):
+    return self._client_config
+
+  @property
+  def default_chrome_root(self):
+    return self._default_chrome_root
diff --git a/catapult/telemetry/telemetry/record_wpr.py b/catapult/telemetry/telemetry/record_wpr.py
new file mode 100644
index 0000000..d46975b
--- /dev/null
+++ b/catapult/telemetry/telemetry/record_wpr.py
@@ -0,0 +1,283 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import logging
+import sys
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.core import discover
+from telemetry.core import util
+from telemetry.internal.browser import browser_options
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
+from telemetry.internal.util import binary_manager
+from telemetry.page import page_test
+from telemetry.util import matching
+from telemetry.util import wpr_modes
+from telemetry.web_perf import timeline_based_measurement
+from telemetry.web_perf import timeline_based_page_test
+
+
+class RecorderPageTest(page_test.PageTest):
+  def __init__(self):
+    super(RecorderPageTest, self).__init__()
+    self.page_test = None
+
+  def CustomizeBrowserOptions(self, options):
+    if self.page_test:
+      self.page_test.CustomizeBrowserOptions(options)
+
+  def WillStartBrowser(self, browser):
+    if self.page_test:
+      self.page_test.WillStartBrowser(browser)
+
+  def DidStartBrowser(self, browser):
+    if self.page_test:
+      self.page_test.DidStartBrowser(browser)
+
+  def WillNavigateToPage(self, page, tab):
+    """Override to ensure all resources are fetched from network."""
+    tab.ClearCache(force=False)
+    if self.page_test:
+      self.page_test.WillNavigateToPage(page, tab)
+
+  def DidNavigateToPage(self, page, tab):
+    if self.page_test:
+      self.page_test.DidNavigateToPage(page, tab)
+    tab.WaitForDocumentReadyStateToBeComplete()
+    util.WaitFor(tab.HasReachedQuiescence, 30)
+
+  def CleanUpAfterPage(self, page, tab):
+    if self.page_test:
+      self.page_test.CleanUpAfterPage(page, tab)
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    if self.page_test:
+      self.page_test.ValidateAndMeasurePage(page, tab, results)
+
+  def RunNavigateSteps(self, page, tab):
+    if self.page_test:
+      self.page_test.RunNavigateSteps(page, tab)
+    else:
+      super(RecorderPageTest, self).RunNavigateSteps(page, tab)
+
+
+def _GetSubclasses(base_dir, cls):
+  """Returns all subclasses of |cls| in |base_dir|.
+
+  Args:
+    cls: a class
+
+  Returns:
+    dict of {underscored_class_name: benchmark class}
+  """
+  return discover.DiscoverClasses(base_dir, base_dir, cls,
+                                  index_by_class_name=True)
+
+
+def _MaybeGetInstanceOfClass(target, base_dir, cls):
+  if isinstance(target, cls):
+    return target
+  classes = _GetSubclasses(base_dir, cls)
+  return classes[target]() if target in classes else None
+
+
+def _PrintAllImpl(all_items, item_name, output_stream):
+  output_stream.write('Available %s\' names with descriptions:\n' % item_name)
+  keys = sorted(all_items.keys())
+  key_description = [(k, all_items[k].Description()) for k in keys]
+  _PrintPairs(key_description, output_stream)
+  output_stream.write('\n')
+
+
+def _PrintAllBenchmarks(base_dir, output_stream):
+  # TODO: reuse the logic of finding supported benchmarks in benchmark_runner.py
+  # so this only prints out benchmarks that are supported by the recording
+  # platform.
+  _PrintAllImpl(_GetSubclasses(base_dir, benchmark.Benchmark), 'benchmarks',
+                output_stream)
+
+
+def _PrintAllStories(base_dir, output_stream):
+  # TODO: actually print all stories once record_wpr support general
+  # stories recording.
+  _PrintAllImpl(_GetSubclasses(base_dir, story.StorySet), 'story sets',
+                output_stream)
+
+
+def _PrintPairs(pairs, output_stream, prefix=''):
+  """Prints a list of string pairs with alignment."""
+  first_column_length = max(len(a) for a, _ in pairs)
+  format_string = '%s%%-%ds  %%s\n' % (prefix, first_column_length)
+  for a, b in pairs:
+    output_stream.write(format_string % (a, b.strip()))
+
+
+class WprRecorder(object):
+
+  def __init__(self, base_dir, target, args=None):
+    self._base_dir = base_dir
+    self._record_page_test = RecorderPageTest()
+    self._options = self._CreateOptions()
+
+    self._benchmark = _MaybeGetInstanceOfClass(target, base_dir,
+                                               benchmark.Benchmark)
+    self._parser = self._options.CreateParser(usage='See %prog --help')
+    self._AddCommandLineArgs()
+    self._ParseArgs(args)
+    self._ProcessCommandLineArgs()
+    if self._benchmark is not None:
+      test = self._benchmark.CreatePageTest(self.options)
+      if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement):
+        test = timeline_based_page_test.TimelineBasedPageTest(test)
+      # This must be called after the command line args are added.
+      self._record_page_test.page_test = test
+
+    self._page_set_base_dir = (
+        self._options.page_set_base_dir if self._options.page_set_base_dir
+        else self._base_dir)
+    self._story_set = self._GetStorySet(target)
+
+  @property
+  def options(self):
+    return self._options
+
+  def _CreateOptions(self):
+    options = browser_options.BrowserFinderOptions()
+    options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
+    options.browser_options.no_proxy_server = True
+    return options
+
+  def CreateResults(self):
+    if self._benchmark is not None:
+      benchmark_metadata = self._benchmark.GetMetadata()
+    else:
+      benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
+
+    return results_options.CreateResults(benchmark_metadata, self._options)
+
+  def _AddCommandLineArgs(self):
+    self._parser.add_option('--page-set-base-dir', action='store',
+                            type='string')
+    story_runner.AddCommandLineArgs(self._parser)
+    if self._benchmark is not None:
+      self._benchmark.AddCommandLineArgs(self._parser)
+      self._benchmark.SetArgumentDefaults(self._parser)
+    self._parser.add_option('--upload', action='store_true')
+    self._SetArgumentDefaults()
+
+  def _SetArgumentDefaults(self):
+    self._parser.set_defaults(**{'output_formats': ['none']})
+
+  def _ParseArgs(self, args=None):
+    args_to_parse = sys.argv[1:] if args is None else args
+    self._parser.parse_args(args_to_parse)
+
+  def _ProcessCommandLineArgs(self):
+    story_runner.ProcessCommandLineArgs(self._parser, self._options)
+
+    if self._options.use_live_sites:
+      self._parser.error("Can't --use-live-sites while recording")
+
+    if self._benchmark is not None:
+      self._benchmark.ProcessCommandLineArgs(self._parser, self._options)
+
+  def _GetStorySet(self, target):
+    if self._benchmark is not None:
+      return self._benchmark.CreateStorySet(self._options)
+    story_set = _MaybeGetInstanceOfClass(target, self._page_set_base_dir,
+                                         story.StorySet)
+    if story_set is None:
+      sys.stderr.write('Target %s is neither benchmark nor story set.\n'
+                       % target)
+      if not self._HintMostLikelyBenchmarksStories(target):
+        sys.stderr.write(
+            'Found no similar benchmark or story. Please use '
+            '--list-benchmarks or --list-stories to list candidates.\n')
+        self._parser.print_usage()
+      sys.exit(1)
+    return story_set
+
+  def _HintMostLikelyBenchmarksStories(self, target):
+    def _Impl(all_items, category_name):
+      candidates = matching.GetMostLikelyMatchedObject(
+          all_items.iteritems(), target, name_func=lambda kv: kv[1].Name())
+      if candidates:
+        sys.stderr.write('\nDo you mean any of those %s below?\n' %
+                         category_name)
+        _PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr)
+        return True
+      return False
+
+    has_benchmark_hint = _Impl(
+        _GetSubclasses(self._base_dir, benchmark.Benchmark), 'benchmarks')
+    has_story_hint = _Impl(
+        _GetSubclasses(self._base_dir, story.StorySet), 'stories')
+    return has_benchmark_hint or has_story_hint
+
+  def Record(self, results):
+    assert self._story_set.wpr_archive_info, (
+      'Pageset archive_data_file path must be specified.')
+    self._story_set.wpr_archive_info.AddNewTemporaryRecording()
+    self._record_page_test.CustomizeBrowserOptions(self._options)
+    story_runner.Run(self._record_page_test, self._story_set,
+        self._options, results)
+
+  def HandleResults(self, results, upload_to_cloud_storage):
+    if results.failures or results.skipped_values:
+      logging.warning('Some pages failed and/or were skipped. The recording '
+                      'has not been updated for these pages.')
+    results.PrintSummary()
+    self._story_set.wpr_archive_info.AddRecordedStories(
+        results.pages_that_succeeded,
+        upload_to_cloud_storage)
+
+
+def Main(environment):
+
+  parser = argparse.ArgumentParser(
+      usage='Record a benchmark or a story (page set).')
+  parser.add_argument(
+      'benchmark',
+      help=('benchmark name. This argument is optional. If both benchmark name '
+            'and story name are specified, this takes precedence as the '
+            'target of the recording.'),
+      nargs='?')
+  parser.add_argument('--story', help='story (page set) name')
+  parser.add_argument('--list-stories', dest='list_stories',
+                      action='store_true', help='list all story names.')
+  parser.add_argument('--list-benchmarks', dest='list_benchmarks',
+                      action='store_true', help='list all benchmark names.')
+  parser.add_argument('--upload', action='store_true',
+                      help='upload to cloud storage.')
+  args, extra_args = parser.parse_known_args()
+
+  if args.list_benchmarks or args.list_stories:
+    if args.list_benchmarks:
+      _PrintAllBenchmarks(environment.top_level_dir, sys.stderr)
+    if args.list_stories:
+      _PrintAllStories(environment.top_level_dir, sys.stderr)
+    return 0
+
+  target = args.benchmark or args.story
+
+  if not target:
+    sys.stderr.write('Please specify target (benchmark or story). Please refer '
+                     'usage below\n\n')
+    parser.print_help()
+    return 0
+
+  binary_manager.InitDependencyManager(environment.client_config)
+
+
+  # TODO(nednguyen): update WprRecorder so that it handles the difference
+  # between recording a benchmark vs recording a story better based on
+  # the distinction between args.benchmark & args.story
+  wpr_recorder = WprRecorder(environment.top_level_dir, target, extra_args)
+  results = wpr_recorder.CreateResults()
+  wpr_recorder.Record(results)
+  wpr_recorder.HandleResults(results, args.upload)
+  return min(255, len(results.failures))
diff --git a/catapult/telemetry/telemetry/record_wpr_unittest.py b/catapult/telemetry/telemetry/record_wpr_unittest.py
new file mode 100644
index 0000000..9760cb0
--- /dev/null
+++ b/catapult/telemetry/telemetry/record_wpr_unittest.py
@@ -0,0 +1,238 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.core import util
+from telemetry import decorators
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry import record_wpr
+from telemetry.testing import options_for_unittests
+from telemetry.testing import tab_test_case
+from telemetry.util import wpr_modes
+
+
+class MockPage(page_module.Page):
+  def __init__(self, story_set, url):
+    super(MockPage, self).__init__(url=url,
+                                   page_set=story_set,
+                                   base_dir=util.GetUnittestDataDir())
+    self.func_calls = []
+
+  def RunNavigateSteps(self, action_runner):
+    self.func_calls.append('RunNavigateSteps')
+    super(MockPage, self).RunNavigateSteps(action_runner)
+
+  def RunPageInteractions(self, _):
+    self.func_calls.append('RunPageInteractions')
+
+  def RunSmoothness(self, _):
+    self.func_calls.append('RunSmoothness')
+
+class MockStorySet(story.StorySet):
+  def __init__(self, url=''):
+    super(MockStorySet, self).__init__(
+        archive_data_file='data/archive_files/test.json')
+    self.AddStory(MockPage(self, url))
+
+
+class MockPageTest(page_test.PageTest):
+  def __init__(self):
+    super(MockPageTest, self).__init__()
+    self._action_name_to_run = "RunPageInteractions"
+    self.func_calls = []
+
+  def CustomizeBrowserOptions(self, options):
+    self.func_calls.append('CustomizeBrowserOptions')
+
+  def WillNavigateToPage(self, page, tab):
+    self.func_calls.append('WillNavigateToPage')
+
+  def DidNavigateToPage(self, page, tab):
+    self.func_calls.append('DidNavigateToPage')
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    self.func_calls.append('ValidateAndMeasurePage')
+
+  def WillStartBrowser(self, platform):
+    self.func_calls.append('WillStartBrowser')
+
+  def DidStartBrowser(self, browser):
+    self.func_calls.append('DidStartBrowser')
+
+class MockBenchmark(benchmark.Benchmark):
+  test = MockPageTest
+
+  def __init__(self):
+    super(MockBenchmark, self).__init__()
+    self.mock_story_set = None
+
+  @classmethod
+  def AddBenchmarkCommandLineArgs(cls, group):
+    group.add_option('', '--mock-benchmark-url', action='store', type='string')
+
+  def CreateStorySet(self, options):
+    kwargs = {}
+    if options.mock_benchmark_url:
+      kwargs['url'] = options.mock_benchmark_url
+    self.mock_story_set = MockStorySet(**kwargs)
+    return self.mock_story_set
+
+
+class MockTimelineBasedMeasurementBenchmark(benchmark.Benchmark):
+
+  def __init__(self):
+    super(MockTimelineBasedMeasurementBenchmark, self).__init__()
+    self.mock_story_set = None
+
+  @classmethod
+  def AddBenchmarkCommandLineArgs(cls, group):
+    group.add_option('', '--mock-benchmark-url', action='store', type='string')
+
+  def CreateStorySet(self, options):
+    kwargs = {}
+    if options.mock_benchmark_url:
+      kwargs['url'] = options.mock_benchmark_url
+    self.mock_story_set = MockStorySet(**kwargs)
+    return self.mock_story_set
+
+
+class RecordWprUnitTests(tab_test_case.TabTestCase):
+
+  _base_dir = util.GetUnittestDataDir()
+  _test_data_dir = os.path.join(util.GetUnittestDataDir(), 'page_tests')
+
+  @classmethod
+  def setUpClass(cls):
+    sys.path.extend([cls._base_dir, cls._test_data_dir])
+    super(RecordWprUnitTests, cls).setUpClass()
+    cls._url = cls.UrlOfUnittestFile('blank.html')
+    cls._test_options = options_for_unittests.GetCopy()
+
+  # When the RecorderPageTest is created from a PageSet, we do not have a
+  # PageTest to use. In this case, we will record every available action.
+  def testRunPage_AllActions(self):
+    record_page_test = record_wpr.RecorderPageTest()
+    page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
+
+    record_page_test.RunNavigateSteps(page, self._tab)
+    self.assertTrue('RunNavigateSteps' in page.func_calls)
+
+  # When the RecorderPageTest is created from a Benchmark, the benchmark will
+  # have a PageTest, specified by its test attribute.
+  def testRunPage_OnlyRunBenchmarkAction(self):
+    record_page_test = record_wpr.RecorderPageTest()
+    record_page_test.page_test = MockBenchmark().test()
+    page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
+    record_page_test.ValidateAndMeasurePage(page, self._tab, results=None)
+
+  def testRunPage_CallBenchmarksPageTestsFunctions(self):
+    record_page_test = record_wpr.RecorderPageTest()
+    record_page_test.page_test = MockBenchmark().test()
+    page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
+    record_page_test.ValidateAndMeasurePage(page, self._tab, results=None)
+    self.assertEqual(1, len(record_page_test.page_test.func_calls))
+    self.assertEqual('ValidateAndMeasurePage',
+                     record_page_test.page_test.func_calls[0])
+
+  def GetBrowserDeviceFlags(self):
+    flags = ['--browser', self._browser.browser_type,
+             '--remote', self._test_options.cros_remote,
+             '--device', self._device]
+    if self._test_options.chrome_root:
+      flags += ['--chrome-root', self._test_options.chrome_root]
+    return flags
+
+  @decorators.Disabled('chromeos') # crbug.com/404868.
+  def testWprRecorderWithPageSet(self):
+    flags = self.GetBrowserDeviceFlags()
+    mock_story_set = MockStorySet(url=self._url)
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
+                                          mock_story_set, flags)
+    results = wpr_recorder.CreateResults()
+    wpr_recorder.Record(results)
+    self.assertEqual(set(mock_story_set.stories), results.pages_that_succeeded)
+
+  def testWprRecorderWithBenchmark(self):
+    flags = self.GetBrowserDeviceFlags()
+    flags.extend(['--mock-benchmark-url', self._url])
+    mock_benchmark = MockBenchmark()
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
+                                          flags)
+    results = wpr_recorder.CreateResults()
+    wpr_recorder.Record(results)
+    self.assertEqual(set(mock_benchmark.mock_story_set.stories),
+                     results.pages_that_succeeded)
+
+  def testWprRecorderWithTimelineBasedMeasurementBenchmark(self):
+    flags = self.GetBrowserDeviceFlags()
+    flags.extend(['--mock-benchmark-url', self._url])
+    mock_benchmark = MockTimelineBasedMeasurementBenchmark()
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
+                                          flags)
+    results = wpr_recorder.CreateResults()
+    wpr_recorder.Record(results)
+    self.assertEqual(set(mock_benchmark.mock_story_set.stories),
+                     results.pages_that_succeeded)
+
+  def testPageSetBaseDirFlag(self):
+    flags = self.GetBrowserDeviceFlags()
+    flags.extend(['--page-set-base-dir', self._test_data_dir,
+                  '--mock-benchmark-url', self._url])
+    mock_benchmark = MockBenchmark()
+    wpr_recorder = record_wpr.WprRecorder(
+        'non-existent-dummy-dir', mock_benchmark, flags)
+    results = wpr_recorder.CreateResults()
+    wpr_recorder.Record(results)
+    self.assertEqual(set(mock_benchmark.mock_story_set.stories),
+                     results.pages_that_succeeded)
+
+  def testCommandLineFlags(self):
+    flags = [
+        '--page-repeat', '2',
+        '--mock-benchmark-url', self._url,
+        '--upload',
+    ]
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
+                                          flags)
+    # page_runner command-line args
+    self.assertEquals(2, wpr_recorder.options.page_repeat)
+    # benchmark command-line args
+    self.assertEquals(self._url, wpr_recorder.options.mock_benchmark_url)
+    # record_wpr command-line arg to upload to cloud-storage.
+    self.assertTrue(wpr_recorder.options.upload)
+    # invalid command-line args
+    self.assertFalse(hasattr(wpr_recorder.options, 'not_a_real_option'))
+
+  def testRecordingEnabled(self):
+    flags = ['--mock-benchmark-url', self._url]
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
+                                          flags)
+    self.assertEqual(wpr_modes.WPR_RECORD,
+                     wpr_recorder.options.browser_options.wpr_mode)
+
+  # When the RecorderPageTest CustomizeBrowserOptions/WillStartBrowser/
+  # DidStartBrowser function is called, it forwards the call to the PageTest
+  def testRecorderPageTest_BrowserMethods(self):
+    flags = ['--mock-benchmark-url', self._url]
+    record_page_test = record_wpr.RecorderPageTest()
+    record_page_test.page_test = MockBenchmark().test()
+    wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
+                                          flags)
+    record_page_test.CustomizeBrowserOptions(wpr_recorder.options)
+    record_page_test.WillStartBrowser(self._tab.browser.platform)
+    record_page_test.DidStartBrowser(self._tab.browser)
+    self.assertTrue(
+        'CustomizeBrowserOptions' in record_page_test.page_test.func_calls)
+    self.assertTrue('WillStartBrowser' in record_page_test.page_test.func_calls)
+    self.assertTrue('DidStartBrowser' in record_page_test.page_test.func_calls)
+
+  def testUseLiveSitesUnsupported(self):
+    flags = ['--use-live-sites']
+    with self.assertRaises(SystemExit):
+      record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(), flags)
diff --git a/catapult/telemetry/telemetry/story/__init__.py b/catapult/telemetry/telemetry/story/__init__.py
new file mode 100644
index 0000000..1a898db
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.story.shared_state import SharedState
+from telemetry.story.story import Story
+from telemetry.story.story_filter import StoryFilter
+from telemetry.story.story_set import StorySet
+
+
+PUBLIC_BUCKET = cloud_storage.PUBLIC_BUCKET
+PARTNER_BUCKET = cloud_storage.PARTNER_BUCKET
+INTERNAL_BUCKET = cloud_storage.INTERNAL_BUCKET
diff --git a/catapult/telemetry/telemetry/story/shared_state.py b/catapult/telemetry/telemetry/story/shared_state.py
new file mode 100644
index 0000000..0b360d3
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/shared_state.py
@@ -0,0 +1,68 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class SharedState(object):
+  """A class that manages the test state across multiple stories.
+  It's styled on unittest.TestCase for handling test setup & teardown logic.
+
+  """
+
+  def __init__(self, test, options, story_set):
+    """ This method is styled on unittest.TestCase.setUpClass.
+    Override to do any action before running stories that
+    share this same state.
+    Args:
+      test: a page_test.PageTest or story_test.StoryTest instance.
+      options: a BrowserFinderOptions instance that contains command line
+        options.
+      story_set: a story.StorySet instance.
+    """
+    pass
+
+  @property
+  def platform(self):
+    """ Override to return the platform which stories that share this same
+    state will be run on.
+    """
+    raise NotImplementedError()
+
+  def WillRunStory(self, story):
+    """ Override to do any action before running each one of all stories
+    that share this same state.
+    This method is styled on unittest.TestCase.setUp.
+    """
+    raise NotImplementedError()
+
+  def DidRunStory(self, results):
+    """ Override to do any action after running each of all stories that
+    share this same state.
+    This method is styled on unittest.TestCase.tearDown.
+    """
+    raise NotImplementedError()
+
+  def CanRunStory(self, story):
+    """Indicate whether the story can be run in the current configuration.
+    This is called after WillRunStory and before RunStory. Return True
+    if the story should be run, and False if it should be skipped.
+    Most subclasses will probably want to override this to always
+    return True.
+    Args:
+      story: a story.Story instance.
+    """
+    raise NotImplementedError()
+
+  def RunStory(self, results):
+    """ Override to do any action before running each one of all stories
+    that share this same state.
+    This method is styled on unittest.TestCase.run.
+    """
+    raise NotImplementedError()
+
+  def TearDownState(self):
+    """ Override to do any action after running multiple stories that
+    share this same state.
+    This method is styled on unittest.TestCase.tearDownClass.
+    """
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/story/story.py b/catapult/telemetry/telemetry/story/story.py
new file mode 100644
index 0000000..760661e
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story.py
@@ -0,0 +1,119 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry.story import shared_state as shared_state_module
+
+_next_story_id = 0
+
+
+class Story(object):
+  """A class styled on unittest.TestCase for creating story tests.
+
+  Tests should override Run to maybe start the application and perform actions
+  on it. To share state between different tests, one can define a
+  shared_state which contains hooks that will be called before and
+  after mutiple stories run and in between runs.
+
+  Args:
+    shared_state_class: subclass of telemetry.story.shared_state.SharedState.
+    name: string name of this story that can be used for identifying this story
+        in results output.
+    labels: A list or set of string labels that are used for filtering. See
+        story.story_filter for more information.
+    is_local: If True, the story does not require network.
+  """
+
+  def __init__(self, shared_state_class, name='', labels=None,
+               is_local=False, make_javascript_deterministic=True):
+    """
+    Args:
+      make_javascript_deterministic: Whether JavaScript performed on
+          the page is made deterministic across multiple runs. This
+          requires that the web content is served via Web Page Replay
+          to take effect. This setting does not affect stories containing no web
+          content or where the HTTP MIME type is not text/html.See also:
+          _InjectScripts method in third_party/webpagereplay/httpclient.py.
+    """
+    assert issubclass(shared_state_class,
+                      shared_state_module.SharedState)
+    self._shared_state_class = shared_state_class
+    self._name = name
+    global _next_story_id
+    self._id = _next_story_id
+    _next_story_id += 1
+    if labels is None:
+      labels = set([])
+    elif isinstance(labels, list):
+      labels = set(labels)
+    else:
+      assert isinstance(labels, set)
+    self._labels = labels
+    self._is_local = is_local
+    self._make_javascript_deterministic = make_javascript_deterministic
+
+  def Run(self, shared_state):
+    """Execute the interactions with the applications and/or platforms."""
+    raise NotImplementedError
+
+  @property
+  def labels(self):
+    return self._labels
+
+  @property
+  def shared_state_class(self):
+    return self._shared_state_class
+
+  @property
+  def id(self):
+    return self._id
+
+  @property
+  def name(self):
+    return self._name
+
+  def AsDict(self):
+    """Converts a story object to a dict suitable for JSON output."""
+    d = {
+      'id': self._id,
+    }
+    if self._name:
+      d['name'] = self._name
+    return d
+
+  @property
+  def file_safe_name(self):
+    """A version of display_name that's safe to use as a filename.
+
+    The default implementation sanitizes special characters with underscores,
+    but it's okay to override it with a more specific implementation in
+    subclasses.
+    """
+    # This fail-safe implementation is safe for subclasses to override.
+    return re.sub('[^a-zA-Z0-9]', '_', self.display_name)
+
+  @property
+  def display_name(self):
+    if self.name:
+      return self.name
+    else:
+      return self.__class__.__name__
+
+  @property
+  def is_local(self):
+    """Returns True iff this story does not require network."""
+    return self._is_local
+
+  @property
+  def serving_dir(self):
+    """Returns the absolute path to a directory with hash files to data that
+       should be updated from cloud storage, or None if no files need to be
+       updated.
+    """
+    return None
+
+  @property
+  def make_javascript_deterministic(self):
+    return self._make_javascript_deterministic
diff --git a/catapult/telemetry/telemetry/story/story_filter.py b/catapult/telemetry/telemetry/story/story_filter.py
new file mode 100644
index 0000000..10551b2
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story_filter.py
@@ -0,0 +1,81 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import re
+
+from telemetry.internal.util import command_line
+
+
+class _StoryMatcher(object):
+  def __init__(self, pattern):
+    self._regex = None
+    self.has_compile_error = False
+    if pattern:
+      try:
+        self._regex = re.compile(pattern)
+      except re.error:
+        self.has_compile_error = True
+
+  def __nonzero__(self):
+    return self._regex is not None
+
+  def HasMatch(self, story):
+    return self and bool(
+        self._regex.search(story.display_name) or
+        (story.name and self._regex.search(story.name)))
+
+
+class _StoryLabelMatcher(object):
+  def __init__(self, labels_str):
+    self._labels = labels_str.split(',') if labels_str else None
+
+  def __nonzero__(self):
+    return self._labels is not None
+
+  def HasLabelIn(self, story):
+    return self and bool(story.labels.intersection(self._labels))
+
+
+class StoryFilter(command_line.ArgumentHandlerMixIn):
+  """Filters stories in the story set based on command-line flags."""
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser):
+    group = optparse.OptionGroup(parser, 'User story filtering options')
+    group.add_option('--story-filter',
+        help='Use only stories whose names match the given filter regexp.')
+    group.add_option('--story-filter-exclude',
+        help='Exclude stories whose names match the given filter regexp.')
+    group.add_option('--story-label-filter',
+        help='Use only stories that have any of these labels')
+    group.add_option('--story-label-filter-exclude',
+        help='Exclude stories that have any of these labels')
+    parser.add_option_group(group)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args):
+    cls._include_regex = _StoryMatcher(args.story_filter)
+    cls._exclude_regex = _StoryMatcher(args.story_filter_exclude)
+    cls._include_labels = _StoryLabelMatcher(args.story_label_filter)
+    cls._exclude_labels = _StoryLabelMatcher(args.story_label_filter_exclude)
+
+    if cls._include_regex.has_compile_error:
+      raise parser.error('--story-filter: Invalid regex.')
+    if cls._exclude_regex.has_compile_error:
+      raise parser.error('--story-filter-exclude: Invalid regex.')
+
+  @classmethod
+  def IsSelected(cls, story):
+    # Exclude filters take priority.
+    if cls._exclude_labels.HasLabelIn(story):
+      return False
+    if cls._exclude_regex.HasMatch(story):
+      return False
+
+    if cls._include_labels and not cls._include_labels.HasLabelIn(story):
+      return False
+    if cls._include_regex and not cls._include_regex.HasMatch(story):
+      return False
+    return True
diff --git a/catapult/telemetry/telemetry/story/story_filter_unittest.py b/catapult/telemetry/telemetry/story/story_filter_unittest.py
new file mode 100644
index 0000000..2530849
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story_filter_unittest.py
@@ -0,0 +1,94 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import story
+from telemetry.page import page
+from telemetry.story import story_filter as story_filter_module
+
+
+class FilterTest(unittest.TestCase):
+
+  def setUp(self):
+    story_set = story.StorySet()
+    self.p1 = page.Page(
+      url='file://your/smile/widen.html', page_set=story_set,
+      name='MayYour.smile_widen', labels=['label1', 'label2'])
+    self.p2 = page.Page(
+      url='file://share_a/smile/too.html', page_set=story_set,
+      name='ShareA.smiles_too', labels=['label1'])
+    self.p3 = page.Page(
+      url='file://share_a/smile/too.html', page_set=story_set,
+      labels=['label2'])
+    self.pages = [self.p1, self.p2, self.p3]
+
+  @staticmethod
+  def ProcessCommandLineArgs(parser=None, **kwargs):
+    class Options(object):
+      def __init__(
+          self, story_filter=None, story_filter_exclude=None,
+          story_label_filter=None, story_label_filter_exclude=None):
+        self.story_filter = story_filter
+        self.story_filter_exclude = story_filter_exclude
+        self.story_label_filter = story_label_filter
+        self.story_label_filter_exclude = story_label_filter_exclude
+    story_filter_module.StoryFilter.ProcessCommandLineArgs(
+        parser, Options(**kwargs))
+
+  def PageSelections(self):
+    return [story_filter_module.StoryFilter.IsSelected(p) for p in self.pages]
+
+  def testNoFilterMatchesAll(self):
+    self.ProcessCommandLineArgs()
+    self.assertEquals([True, True, True], self.PageSelections())
+
+  def testBadRegexCallsParserError(self):
+    class MockParserException(Exception):
+      pass
+    class MockParser(object):
+      def error(self, _):
+        raise MockParserException
+    with self.assertRaises(MockParserException):
+      self.ProcessCommandLineArgs(parser=MockParser(), story_filter='+')
+
+  def testUniqueSubstring(self):
+    self.ProcessCommandLineArgs(story_filter='smile_widen')
+    self.assertEquals([True, False, False], self.PageSelections())
+
+  def testSharedSubstring(self):
+    self.ProcessCommandLineArgs(story_filter='smile')
+    self.assertEquals([True, True, True], self.PageSelections())
+
+  def testNoMatch(self):
+    self.ProcessCommandLineArgs(story_filter='frown')
+    self.assertEquals([False, False, False], self.PageSelections())
+
+  def testExclude(self):
+    self.ProcessCommandLineArgs(story_filter_exclude='ShareA')
+    self.assertEquals([True, False, True], self.PageSelections())
+
+  def testExcludeTakesPriority(self):
+    self.ProcessCommandLineArgs(
+        story_filter='smile',
+        story_filter_exclude='wide')
+    self.assertEquals([False, True, True], self.PageSelections())
+
+  def testNoNameMatchesDisplayName(self):
+    self.ProcessCommandLineArgs(story_filter='share_a/smile')
+    self.assertEquals([False, False, True], self.PageSelections())
+
+  def testNoLabelMatch(self):
+    self.ProcessCommandLineArgs(story_label_filter='labelX')
+    self.assertEquals([False, False, False], self.PageSelections())
+
+  def testLabelsAllMatch(self):
+    self.ProcessCommandLineArgs(story_label_filter='label1,label2')
+    self.assertEquals([True, True, True], self.PageSelections())
+
+  def testExcludeLabelTakesPriority(self):
+    self.ProcessCommandLineArgs(
+        story_label_filter='label1',
+        story_label_filter_exclude='label2')
+    self.assertEquals([False, True, False], self.PageSelections())
diff --git a/catapult/telemetry/telemetry/story/story_set.py b/catapult/telemetry/telemetry/story/story_set.py
new file mode 100644
index 0000000..667cbd0
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story_set.py
@@ -0,0 +1,155 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import inspect
+import os
+
+from telemetry.story import story as story_module
+from telemetry.wpr import archive_info
+
+
+class StorySet(object):
+  """A collection of stories.
+
+  A typical usage of StorySet would be to subclass it and then call
+  AddStory for each Story.
+  """
+
+  def __init__(self, archive_data_file='', cloud_storage_bucket=None,
+               base_dir=None, serving_dirs=None):
+    """Creates a new StorySet.
+
+    Args:
+      archive_data_file: The path to Web Page Replay's archive data, relative
+          to self.base_dir.
+      cloud_storage_bucket: The cloud storage bucket used to download
+          Web Page Replay's archive data. Valid values are: None,
+          story.PUBLIC_BUCKET, story.PARTNER_BUCKET, or story.INTERNAL_BUCKET
+          (defined in telemetry.util.cloud_storage).
+      serving_dirs: A set of paths, relative to self.base_dir, to directories
+          containing hash files for non-wpr archive data stored in cloud
+          storage.
+    """
+    self.stories = []
+    self._archive_data_file = archive_data_file
+    self._wpr_archive_info = None
+    archive_info.AssertValidCloudStorageBucket(cloud_storage_bucket)
+    self._cloud_storage_bucket = cloud_storage_bucket
+    if base_dir:
+      if not os.path.isdir(base_dir):
+        raise ValueError('Invalid directory path of base_dir: %s' % base_dir)
+      self._base_dir = base_dir
+    else:
+      self._base_dir = os.path.dirname(inspect.getfile(self.__class__))
+    # Convert any relative serving_dirs to absolute paths.
+    self._serving_dirs = set(os.path.realpath(os.path.join(self.base_dir, d))
+                             for d in serving_dirs or [])
+
+  @property
+  def allow_mixed_story_states(self):
+    """True iff Stories are allowed to have different StoryState classes.
+
+    There are no checks in place for determining if SharedStates are
+    being assigned correctly to all Stories in a given StorySet. The
+    majority of test cases should not need the ability to have multiple
+    SharedStates, which usually implies you should be writing multiple
+    benchmarks instead. We provide errors to avoid accidentally assigning
+    or defaulting to the wrong SharedState.
+    Override at your own risk. Here be dragons.
+    """
+    return False
+
+  @property
+  def file_path(self):
+    return inspect.getfile(self.__class__).replace('.pyc', '.py')
+
+  @property
+  def base_dir(self):
+    """The base directory to resolve archive_data_file.
+
+    This defaults to the directory containing the StorySet instance's class.
+    """
+    return self._base_dir
+
+  @property
+  def serving_dirs(self):
+    all_serving_dirs = self._serving_dirs.copy()
+    for story in self.stories:
+      if story.serving_dir:
+        all_serving_dirs.add(story.serving_dir)
+    return all_serving_dirs
+
+  @property
+  def archive_data_file(self):
+    return self._archive_data_file
+
+  @property
+  def bucket(self):
+    return self._cloud_storage_bucket
+
+  @property
+  def wpr_archive_info(self):
+    """Lazily constructs wpr_archive_info if it's not set and returns it."""
+    if self.archive_data_file and not self._wpr_archive_info:
+      self._wpr_archive_info = archive_info.WprArchiveInfo.FromFile(
+          os.path.join(self.base_dir, self.archive_data_file), self.bucket)
+    return self._wpr_archive_info
+
+  def AddStory(self, story):
+    assert isinstance(story, story_module.Story)
+    self.stories.append(story)
+
+  def RemoveStory(self, story):
+    """Removes a Story.
+
+    Allows the stories to be filtered.
+    """
+    self.stories.remove(story)
+
+  @classmethod
+  def Name(cls):
+    """Returns the string name of this StorySet.
+    Note that this should be a classmethod so the benchmark_runner script can
+    match the story class with its name specified in the run command:
+    'Run <User story test name> <User story class name>'
+    """
+    return cls.__module__.split('.')[-1]
+
+  @classmethod
+  def Description(cls):
+    """Return a string explaining in human-understandable terms what this
+    story represents.
+    Note that this should be a classmethod so the benchmark_runner script can
+    display stories' names along with their descriptions in the list command.
+    """
+    if cls.__doc__:
+      return cls.__doc__.splitlines()[0]
+    else:
+      return ''
+
+  def WprFilePathForStory(self, story):
+    """Convenient function to retrieve WPR archive file path.
+
+    Args:
+      story: The Story to look up.
+
+    Returns:
+      The WPR archive file path for the given Story, if found.
+      Otherwise, None.
+    """
+    if not self.wpr_archive_info:
+      return None
+    return self.wpr_archive_info.WprFilePathForStory(story)
+
+  def __iter__(self):
+    return self.stories.__iter__()
+
+  def __len__(self):
+    return len(self.stories)
+
+  def __getitem__(self, key):
+    return self.stories[key]
+
+  def __setitem__(self, key, value):
+    self.stories[key] = value
diff --git a/catapult/telemetry/telemetry/story/story_set_unittest.py b/catapult/telemetry/telemetry/story/story_set_unittest.py
new file mode 100644
index 0000000..63e6247
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story_set_unittest.py
@@ -0,0 +1,80 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+
+
+# pylint: disable=abstract-method
+class SharedStateBar(story.SharedState):
+  pass
+
+
+class StoryFoo(story.Story):
+  def __init__(self, name='', labels=None):
+    super(StoryFoo, self).__init__(
+        SharedStateBar, name, labels)
+
+
+class StorySetFoo(story.StorySet):
+  """ StorySetFoo is a story set created for testing purpose. """
+  pass
+
+
+class StorySetTest(unittest.TestCase):
+
+  def testStorySetTestName(self):
+    self.assertEquals('story_set_unittest', StorySetFoo.Name())
+
+  def testStorySetTestDescription(self):
+    self.assertEquals(
+        ' StorySetFoo is a story set created for testing purpose. ',
+        StorySetFoo.Description())
+
+  def testBaseDir(self):
+    story_set = StorySetFoo()
+    base_dir = story_set.base_dir
+    self.assertTrue(os.path.isdir(base_dir))
+    self.assertEqual(base_dir, os.path.dirname(__file__))
+
+  def testFilePath(self):
+    story_set = StorySetFoo()
+    self.assertEqual(os.path.abspath(__file__).replace('.pyc', '.py'),
+                     story_set.file_path)
+
+  def testCloudBucket(self):
+    blank_story_set = story.StorySet()
+    self.assertEqual(blank_story_set.bucket, None)
+
+    public_story_set = story.StorySet(
+        cloud_storage_bucket=story.PUBLIC_BUCKET)
+    self.assertEqual(public_story_set.bucket, story.PUBLIC_BUCKET)
+
+    partner_story_set = story.StorySet(
+        cloud_storage_bucket=story.PARTNER_BUCKET)
+    self.assertEqual(partner_story_set.bucket, story.PARTNER_BUCKET)
+
+    internal_story_set = story.StorySet(
+        cloud_storage_bucket=story.INTERNAL_BUCKET)
+    self.assertEqual(internal_story_set.bucket, story.INTERNAL_BUCKET)
+
+    with self.assertRaises(ValueError):
+      story.StorySet(cloud_storage_bucket='garbage_bucket')
+
+  def testRemoveWithEmptySetRaises(self):
+    story_set = story.StorySet()
+    foo_story = StoryFoo()
+    with self.assertRaises(ValueError):
+      story_set.RemoveStory(foo_story)
+
+  def testBasicAddRemove(self):
+    story_set = story.StorySet()
+    foo_story = StoryFoo()
+    story_set.AddStory(foo_story)
+    self.assertEqual([foo_story], story_set.stories)
+
+    story_set.RemoveStory(foo_story)
+    self.assertEqual([], story_set.stories)
diff --git a/catapult/telemetry/telemetry/story/story_unittest.py b/catapult/telemetry/telemetry/story/story_unittest.py
new file mode 100644
index 0000000..7fdead9
--- /dev/null
+++ b/catapult/telemetry/telemetry/story/story_unittest.py
@@ -0,0 +1,60 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import story
+from telemetry.story import shared_state
+
+
+# pylint: disable=abstract-method
+class SharedStateBar(shared_state.SharedState):
+  pass
+
+
+class StoryFoo(story.Story):
+  def __init__(self, name='', labels=None):
+    super(StoryFoo, self).__init__(
+        SharedStateBar, name, labels)
+
+
+class StoryTest(unittest.TestCase):
+  def testStoriesHaveDifferentIds(self):
+    s0 = story.Story(SharedStateBar, 'foo')
+    s1 = story.Story(SharedStateBar, 'bar')
+    self.assertNotEqual(s0.id, s1.id)
+
+  def testNamelessStoryDisplayName(self):
+    s = StoryFoo()
+    self.assertEquals('StoryFoo', s.display_name)
+
+  def testNamedStoryDisplayName(self):
+    s = StoryFoo('Bar')
+    self.assertEquals('Bar', s.display_name)
+
+  def testStoryFileSafeName(self):
+    s = StoryFoo('Foo Bar:Baz~0')
+    self.assertEquals('Foo_Bar_Baz_0', s.file_safe_name)
+
+  def testNamelessStoryAsDict(self):
+    s = story.Story(SharedStateBar)
+    s_dict = s.AsDict()
+    self.assertEquals(s_dict['id'], s.id)
+    self.assertNotIn('name', s_dict)
+
+  def testNamedStoryAsDict(self):
+    s = story.Story(SharedStateBar, 'Foo')
+    s_dict = s.AsDict()
+    self.assertEquals(s_dict['id'], s.id)
+    self.assertEquals('Foo', s_dict['name'])
+
+  def testMakeJavaScriptDeterministic(self):
+    s = story.Story(SharedStateBar)
+    self.assertTrue(s.make_javascript_deterministic)
+
+    s = story.Story(SharedStateBar, make_javascript_deterministic=False)
+    self.assertFalse(s.make_javascript_deterministic)
+
+    s = story.Story(SharedStateBar, make_javascript_deterministic=True)
+    self.assertTrue(s.make_javascript_deterministic)
diff --git a/catapult/telemetry/telemetry/testing/__init__.py b/catapult/telemetry/telemetry/testing/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/testing/browser_test_case.py b/catapult/telemetry/telemetry/testing/browser_test_case.py
new file mode 100644
index 0000000..6845ae1
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/browser_test_case.py
@@ -0,0 +1,69 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.util import path
+from telemetry.testing import options_for_unittests
+
+current_browser_options = None
+current_browser = None
+
+
+def teardown_browser():
+  global current_browser
+  global current_browser_options
+
+  if current_browser:
+    current_browser.Close()
+  current_browser = None
+  current_browser_options = None
+
+
+class BrowserTestCase(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    cls._platform = None
+    global current_browser
+    global current_browser_options
+
+    options = options_for_unittests.GetCopy()
+
+    cls.CustomizeBrowserOptions(options.browser_options)
+    if not current_browser or (current_browser_options !=
+                               options.browser_options):
+      if current_browser:
+        teardown_browser()
+
+      browser_to_create = browser_finder.FindBrowser(options)
+      if not browser_to_create:
+        raise Exception('No browser found, cannot continue test.')
+
+      try:
+        current_browser = browser_to_create.Create(options)
+        current_browser_options = options.browser_options
+      except:
+        cls.tearDownClass()
+        raise
+    cls._browser = current_browser
+    cls._platform = current_browser.platform
+    cls._device = options.device
+
+  @classmethod
+  def tearDownClass(cls):
+    if cls._platform:
+      cls._platform.StopAllLocalServers()
+
+  @classmethod
+  def CustomizeBrowserOptions(cls, options):
+    """Override to add test-specific options to the BrowserOptions object"""
+    pass
+
+  @classmethod
+  def UrlOfUnittestFile(cls, filename):
+    cls._platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
+    file_path = os.path.join(path.GetUnittestDataDir(), filename)
+    return cls._platform.http_server.UrlOf(file_path)
diff --git a/catapult/telemetry/telemetry/testing/decorators_unittest.py b/catapult/telemetry/telemetry/testing/decorators_unittest.py
new file mode 100644
index 0000000..f27d36c
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/decorators_unittest.py
@@ -0,0 +1,58 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import decorators
+
+_counter = 0
+
+
+class Foo(object):
+  @decorators.Cache
+  def GetCountCached(self, _):
+    global _counter
+    _counter = _counter + 1
+    return _counter
+
+
+def CreateFooUncached(_):
+  return Foo()
+
+
+@decorators.Cache
+def CreateFooCached(_):
+  return Foo()
+
+
+class DecoratorsUnitTest(unittest.TestCase):
+  # pylint: disable=blacklisted-name
+
+  def testCacheDecorator(self):
+    self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(2))
+    self.assertNotEquals(CreateFooCached(1), CreateFooCached(2))
+
+    self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(1))
+    self.assertEquals(CreateFooCached(1), CreateFooCached(1))
+
+  def testCacheableMemberCachesOnlyForSameArgs(self):
+    foo = Foo()
+    value_of_one = foo.GetCountCached(1)
+
+    self.assertEquals(value_of_one, foo.GetCountCached(1))
+    self.assertNotEquals(value_of_one, foo.GetCountCached(2))
+
+  def testCacheableMemberHasSeparateCachesForSiblingInstances(self):
+    foo = Foo()
+    sibling_foo = Foo()
+
+    self.assertNotEquals(foo.GetCountCached(1), sibling_foo.GetCountCached(1))
+
+  def testCacheableMemberHasSeparateCachesForNextGenerationInstances(self):
+    foo = Foo()
+    last_generation_count = foo.GetCountCached(1)
+    foo = None
+    foo = Foo()
+
+    self.assertNotEquals(last_generation_count, foo.GetCountCached(1))
diff --git a/catapult/telemetry/telemetry/testing/disabled_cases.py b/catapult/telemetry/telemetry/testing/disabled_cases.py
new file mode 100644
index 0000000..bb4641a
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/disabled_cases.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry import decorators
+
+
+# These are not real unittests.
+# They are merely to test our Enable/Disable annotations.
+class DisabledCases(unittest.TestCase):
+
+  def testAllEnabled(self):
+    pass
+
+  @decorators.Disabled('all')
+  def testAllDisabled(self):
+    pass
+
+  @decorators.Enabled('mavericks')
+  def testMavericksOnly(self):
+    pass
+
+  @decorators.Disabled('mavericks')
+  def testNoMavericks(self):
+    pass
+
+  @decorators.Enabled('mac')
+  def testMacOnly(self):
+    pass
+
+  @decorators.Disabled('mac')
+  def testNoMac(self):
+    pass
+
+  @decorators.Enabled('chromeos')
+  def testChromeOSOnly(self):
+    pass
+
+  @decorators.Disabled('chromeos')
+  def testNoChromeOS(self):
+    pass
+
+  @decorators.Enabled('win', 'linux')
+  def testWinOrLinuxOnly(self):
+    pass
+
+  @decorators.Disabled('win', 'linux')
+  def testNoWinLinux(self):
+    pass
+
+  @decorators.Enabled('system')
+  def testSystemOnly(self):
+    pass
+
+  @decorators.Disabled('system')
+  def testNoSystem(self):
+    pass
+
+  @decorators.Enabled('has tabs')
+  def testHasTabs(self):
+    pass
diff --git a/catapult/telemetry/telemetry/testing/fakes/__init__.py b/catapult/telemetry/telemetry/testing/fakes/__init__.py
new file mode 100644
index 0000000..41dd87a
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/fakes/__init__.py
@@ -0,0 +1,459 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides fakes for several of Telemetry's internal objects.
+
+These allow code like story_runner and Benchmark to be run and tested
+without compiling or starting a browser. Class names prepended with an
+underscore are intended to be implementation details, and should not
+be subclassed; however, some, like _FakeBrowser, have public APIs that
+may need to be called in tests.
+"""
+
+from telemetry.internal.backends.chrome_inspector import websocket
+from telemetry.internal.browser import browser_options
+from telemetry.internal.platform import system_info
+from telemetry.page import shared_page_state
+from telemetry.util import image_util
+from telemetry.testing.internal import fake_gpu_info
+
+
+# Classes and functions which are intended to be part of the public
+# fakes API.
+
+class FakePlatform(object):
+  def __init__(self):
+    self._network_controller = None
+    self._tracing_controller = None
+
+  @property
+  def is_host_platform(self):
+    raise NotImplementedError
+
+  @property
+  def network_controller(self):
+    if self._network_controller is None:
+      self._network_controller = _FakeNetworkController()
+    return  self._network_controller
+
+  @property
+  def tracing_controller(self):
+    if self._tracing_controller is None:
+      self._tracing_controller = _FakeTracingController()
+    return  self._tracing_controller
+
+  def CanMonitorThermalThrottling(self):
+    return False
+
+  def IsThermallyThrottled(self):
+    return False
+
+  def HasBeenThermallyThrottled(self):
+    return False
+
+  def GetDeviceTypeName(self):
+    raise NotImplementedError
+
+  def GetArchName(self):
+    raise NotImplementedError
+
+  def GetOSName(self):
+    raise NotImplementedError
+
+  def GetOSVersionName(self):
+    raise NotImplementedError
+
+  def StopAllLocalServers(self):
+    pass
+
+
+class FakeLinuxPlatform(FakePlatform):
+  def __init__(self):
+    super(FakeLinuxPlatform, self).__init__()
+    self.screenshot_png_data = None
+    self.http_server_directories = []
+    self.http_server = FakeHTTPServer()
+
+  @property
+  def is_host_platform(self):
+    return True
+
+  def GetDeviceTypeName(self):
+    return 'Desktop'
+
+  def GetArchName(self):
+    return 'x86_64'
+
+  def GetOSName(self):
+    return 'linux'
+
+  def GetOSVersionName(self):
+    return 'trusty'
+
+  def CanTakeScreenshot(self):
+    return bool(self.screenshot_png_data)
+
+  def TakeScreenshot(self, file_path):
+    if not self.CanTakeScreenshot():
+      raise NotImplementedError
+    img = image_util.FromBase64Png(self.screenshot_png_data)
+    image_util.WritePngFile(img, file_path)
+    return True
+
+  def SetHTTPServerDirectories(self, paths):
+    self.http_server_directories.append(paths)
+
+
+class FakeHTTPServer(object):
+  def UrlOf(self, url):
+    del url  # unused
+    return 'file:///foo'
+
+
+class FakePossibleBrowser(object):
+  def __init__(self):
+    self._returned_browser = _FakeBrowser(FakeLinuxPlatform())
+    self.browser_type = 'linux'
+    self.supports_tab_control = False
+    self.is_remote = False
+
+  @property
+  def returned_browser(self):
+    """The browser object that will be returned through later API calls."""
+    return self._returned_browser
+
+  def Create(self, finder_options):
+    del finder_options  # unused
+    return self.returned_browser
+
+  @property
+  def platform(self):
+    """The platform object from the returned browser.
+
+    To change this or set it up, change the returned browser's
+    platform.
+    """
+    return self.returned_browser.platform
+
+  def IsRemote(self):
+    return self.is_remote
+
+  def SetCredentialsPath(self, _):
+    pass
+
+
+class FakeSharedPageState(shared_page_state.SharedPageState):
+  def __init__(self, test, finder_options, story_set):
+    super(FakeSharedPageState, self).__init__(test, finder_options, story_set)
+
+  def _GetPossibleBrowser(self, test, finder_options):
+    p = FakePossibleBrowser()
+    self.ConfigurePossibleBrowser(p)
+    return p
+
+  def ConfigurePossibleBrowser(self, possible_browser):
+    """Override this to configure the PossibleBrowser.
+
+    Can make changes to the browser's configuration here via e.g.:
+       possible_browser.returned_browser.returned_system_info = ...
+    """
+    pass
+
+
+  def DidRunStory(self, results):
+    # TODO(kbr): add a test which throws an exception from DidRunStory
+    # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe .
+    super(FakeSharedPageState, self).DidRunStory(results)
+
+
+class FakeSystemInfo(system_info.SystemInfo):
+  def __init__(self, model_name='', gpu_dict=None):
+    if gpu_dict == None:
+      gpu_dict = fake_gpu_info.FAKE_GPU_INFO
+    super(FakeSystemInfo, self).__init__(model_name, gpu_dict)
+
+
+class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions):
+  def __init__(self, *args, **kwargs):
+    browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs)
+    self.fake_possible_browser = FakePossibleBrowser()
+
+
+def CreateBrowserFinderOptions(browser_type=None):
+  """Creates fake browser finder options for discovering a browser."""
+  return _FakeBrowserFinderOptions(browser_type=browser_type)
+
+
+# Internal classes. Note that end users may still need to both call
+# and mock out methods of these classes, but they should not be
+# subclassed.
+
+class _FakeBrowser(object):
+  def __init__(self, platform):
+    self._tabs = _FakeTabList(self)
+    self._returned_system_info = FakeSystemInfo()
+    self._platform = platform
+    self._browser_type = 'release'
+
+  @property
+  def platform(self):
+    return self._platform
+
+  @platform.setter
+  def platform(self, incoming):
+    """Allows overriding of the fake browser's platform object."""
+    assert isinstance(incoming, FakePlatform)
+    self._platform = incoming
+
+  @property
+  def returned_system_info(self):
+    """The object which will be returned from calls to GetSystemInfo."""
+    return self._returned_system_info
+
+  @returned_system_info.setter
+  def returned_system_info(self, incoming):
+    """Allows overriding of the returned SystemInfo object.
+
+    Incoming argument must be an instance of FakeSystemInfo."""
+    assert isinstance(incoming, FakeSystemInfo)
+    self._returned_system_info = incoming
+
+  @property
+  def browser_type(self):
+    """The browser_type this browser claims to be ('debug', 'release', etc.)"""
+    return self._browser_type
+
+  @browser_type.setter
+  def browser_type(self, incoming):
+    """Allows setting of the browser_type."""
+    self._browser_type = incoming
+
+  @property
+  def credentials(self):
+    return _FakeCredentials()
+
+  def Close(self):
+    pass
+
+  @property
+  def supports_system_info(self):
+    return True
+
+  def GetSystemInfo(self):
+    return self.returned_system_info
+
+  @property
+  def supports_tab_control(self):
+    return True
+
+  @property
+  def tabs(self):
+    return self._tabs
+
+
+class _FakeCredentials(object):
+  def WarnIfMissingCredentials(self, _):
+    pass
+
+
+class _FakeTracingController(object):
+  def __init__(self):
+    self._is_tracing = False
+
+  def StartTracing(self, tracing_config, timeout=10):
+    self._is_tracing = True
+    del tracing_config
+    del timeout
+
+  def StopTracing(self):
+    self._is_tracing = False
+
+  @property
+  def is_tracing_running(self):
+    return self._is_tracing
+
+  def ClearStateIfNeeded(self):
+    pass
+
+
+class _FakeNetworkController(object):
+  def __init__(self):
+    self.wpr_mode = None
+    self.extra_wpr_args = None
+    self.is_replay_active = False
+    self.is_open = False
+
+  def Open(self, wpr_mode, extra_wpr_args):
+    self.wpr_mode = wpr_mode
+    self.extra_wpr_args = extra_wpr_args
+    self.is_open = True
+
+  def Close(self):
+    self.wpr_mode = None
+    self.extra_wpr_args = None
+    self.is_replay_active = False
+    self.is_open = False
+
+  def StartReplay(self, archive_path, make_javascript_deterministic=False):
+    del make_javascript_deterministic  # Unused.
+    assert self.is_open
+    self.is_replay_active = archive_path is not None
+
+  def StopReplay(self):
+    self.is_replay_active = False
+
+
+class _FakeTab(object):
+  def __init__(self, browser, tab_id):
+    self._browser = browser
+    self._tab_id = str(tab_id)
+    self._collect_garbage_count = 0
+    self.test_png = None
+
+  @property
+  def collect_garbage_count(self):
+    return self._collect_garbage_count
+
+  @property
+  def id(self):
+    return self._tab_id
+
+  @property
+  def browser(self):
+    return self._browser
+
+  def WaitForDocumentReadyStateToBeComplete(self, timeout=0):
+    pass
+
+  def Navigate(self, url, script_to_evaluate_on_commit=None,
+               timeout=0):
+    pass
+
+  def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0):
+    pass
+
+  def IsAlive(self):
+    return True
+
+  def CloseConnections(self):
+    pass
+
+  def CollectGarbage(self):
+    self._collect_garbage_count += 1
+
+  def Close(self):
+    pass
+
+  @property
+  def screenshot_supported(self):
+    return self.test_png is not None
+
+  def Screenshot(self):
+    assert self.screenshot_supported, 'Screenshot is not supported'
+    return image_util.FromBase64Png(self.test_png)
+
+
+class _FakeTabList(object):
+  _current_tab_id = 0
+
+  def __init__(self, browser):
+    self._tabs = []
+    self._browser = browser
+
+  def New(self, timeout=300):
+    del timeout  # unused
+    type(self)._current_tab_id += 1
+    t = _FakeTab(self._browser, type(self)._current_tab_id)
+    self._tabs.append(t)
+    return t
+
+  def __iter__(self):
+    return self._tabs.__iter__()
+
+  def __len__(self):
+    return len(self._tabs)
+
+  def __getitem__(self, index):
+    return self._tabs[index]
+
+  def GetTabById(self, identifier):
+    """The identifier of a tab can be accessed with tab.id."""
+    for tab in self._tabs:
+      if tab.id == identifier:
+        return tab
+    return None
+
+
+class FakeInspectorWebsocket(object):
+  _NOTIFICATION_EVENT = 1
+  _NOTIFICATION_CALLBACK = 2
+
+  """A fake InspectorWebsocket.
+
+  A fake that allows tests to send pregenerated data. Normal
+  InspectorWebsockets allow for any number of domain handlers. This fake only
+  allows up to 1 domain handler, and assumes that the domain of the response
+  always matches that of the handler.
+  """
+  def __init__(self, mock_timer):
+    self._mock_timer = mock_timer
+    self._notifications = []
+    self._response_handlers = {}
+    self._pending_callbacks = {}
+    self._handler = None
+
+  def RegisterDomain(self, _, handler):
+    self._handler = handler
+
+  def AddEvent(self, method, params, time):
+    if self._notifications:
+      assert self._notifications[-1][1] < time, (
+          'Current response is scheduled earlier than previous response.')
+    response = {'method': method, 'params': params}
+    self._notifications.append((response, time, self._NOTIFICATION_EVENT))
+
+  def AddAsyncResponse(self, method, result, time):
+    if self._notifications:
+      assert self._notifications[-1][1] < time, (
+          'Current response is scheduled earlier than previous response.')
+    response = {'method': method, 'result': result}
+    self._notifications.append((response, time, self._NOTIFICATION_CALLBACK))
+
+  def AddResponseHandler(self, method, handler):
+    self._response_handlers[method] = handler
+
+  def SyncRequest(self, request, *args, **kwargs):
+    del args, kwargs  # unused
+    handler = self._response_handlers[request['method']]
+    return handler(request) if handler else None
+
+  def AsyncRequest(self, request, callback):
+    self._pending_callbacks.setdefault(request['method'], []).append(callback)
+
+  def SendAndIgnoreResponse(self, request):
+    pass
+
+  def Connect(self, _):
+    pass
+
+  def DispatchNotifications(self, timeout):
+    current_time = self._mock_timer.time()
+    if not self._notifications:
+      self._mock_timer.SetTime(current_time + timeout + 1)
+      raise websocket.WebSocketTimeoutException()
+
+    response, time, kind = self._notifications[0]
+    if time - current_time > timeout:
+      self._mock_timer.SetTime(current_time + timeout + 1)
+      raise websocket.WebSocketTimeoutException()
+
+    self._notifications.pop(0)
+    self._mock_timer.SetTime(time + 1)
+    if kind == self._NOTIFICATION_EVENT:
+      self._handler(response)
+    elif kind == self._NOTIFICATION_CALLBACK:
+      callback = self._pending_callbacks.get(response['method']).pop(0)
+      callback(response)
+    else:
+      raise Exception('Unexpected response type')
diff --git a/catapult/telemetry/telemetry/testing/gtest_progress_reporter.py b/catapult/telemetry/telemetry/testing/gtest_progress_reporter.py
new file mode 100644
index 0000000..68fd878
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/gtest_progress_reporter.py
@@ -0,0 +1,87 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import unittest
+
+from telemetry.internal.util import exception_formatter
+from telemetry.testing import progress_reporter
+
+
+def _FormatTestName(test):
+  chunks = test.id().split('.')[2:]
+  return '.'.join(chunks)
+
+
+class GTestProgressReporter(progress_reporter.ProgressReporter):
+  def __init__(self, output_stream):
+    super(GTestProgressReporter, self).__init__(output_stream)
+    self._suite_start_time = None
+    self._test_start_time = None
+
+  def _Print(self, *args):
+    print >> self._output_stream, ' '.join(map(str, args))
+    self._output_stream.flush()
+
+  def _TestTimeMs(self):
+    return (time.time() - self._test_start_time) * 1000
+
+  def StartTest(self, test):
+    self._Print('[ RUN      ]', _FormatTestName(test))
+    self._test_start_time = time.time()
+
+  def StartTestSuite(self, suite):
+    contains_test_suites = any(isinstance(test, unittest.TestSuite)
+                               for test in suite)
+    if not contains_test_suites:
+      test_count = len([test for test in suite])
+      unit = 'test' if test_count == 1 else 'tests'
+      self._Print('[----------]', test_count, unit)
+      self._suite_start_time = time.time()
+
+  def StopTestSuite(self, suite):
+    contains_test_suites = any(isinstance(test, unittest.TestSuite)
+                               for test in suite)
+    if not contains_test_suites:
+      test_count = len([test for test in suite])
+      unit = 'test' if test_count == 1 else 'tests'
+      elapsed_ms = (time.time() - self._suite_start_time) * 1000
+      self._Print('[----------]', test_count, unit,
+                  '(%d ms total)' % elapsed_ms)
+      self._Print()
+
+  def StopTestRun(self, result):
+    unit = 'test' if len(result.successes) == 1 else 'tests'
+    self._Print('[  PASSED  ]', len(result.successes), '%s.' % unit)
+    if result.errors or result.failures:
+      all_errors = result.errors[:]
+      all_errors.extend(result.failures)
+      unit = 'test' if len(all_errors) == 1 else 'tests'
+      self._Print('[  FAILED  ]', len(all_errors), '%s, listed below:' % unit)
+      for test, _ in all_errors:
+        self._Print('[  FAILED  ] ', _FormatTestName(test))
+    if not result.wasSuccessful():
+      self._Print()
+      count = len(result.errors) + len(result.failures)
+      unit = 'TEST' if count == 1 else 'TESTS'
+      self._Print(count, 'FAILED', unit)
+    self._Print()
+
+  def Error(self, test, err):
+    self.Failure(test, err)
+
+  def Failure(self, test, err):
+    exception_formatter.PrintFormattedException(*err)
+    test_name = _FormatTestName(test)
+    self._Print('[  FAILED  ]', test_name, '(%0.f ms)' % self._TestTimeMs())
+
+  def Success(self, test):
+    test_name = _FormatTestName(test)
+    self._Print('[       OK ]', test_name, '(%0.f ms)' % self._TestTimeMs())
+
+  def Skip(self, test, reason):
+    test_name = _FormatTestName(test)
+    logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason)
+    self.Success(test)
diff --git a/catapult/telemetry/telemetry/testing/gtest_progress_reporter_unittest.py b/catapult/telemetry/telemetry/testing/gtest_progress_reporter_unittest.py
new file mode 100644
index 0000000..a6bdcb2
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/gtest_progress_reporter_unittest.py
@@ -0,0 +1,111 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import unittest
+
+from telemetry.core import exceptions
+from telemetry.testing import gtest_progress_reporter
+from telemetry.testing import simple_mock
+from telemetry.testing import stream
+
+
+try:
+  raise exceptions.IntentionalException()
+except exceptions.IntentionalException:
+  INTENTIONAL_EXCEPTION = sys.exc_info()
+
+
+class TestFoo(unittest.TestCase):
+  # Test method doesn't have test- prefix intentionally. This is so that
+  # run_test script won't run this test.
+  def runTezt(self):
+    pass
+
+
+class TestResultWithSuccesses(unittest.TestResult):
+  def __init__(self):
+    super(TestResultWithSuccesses, self).__init__()
+    self.successes = []
+
+  def addSuccess(self, test):
+    super(TestResultWithSuccesses, self).addSuccess(test)
+    self.successes.append(test)
+
+
+class GTestProgressReporterTest(unittest.TestCase):
+  def setUp(self):
+    super(GTestProgressReporterTest, self).setUp()
+    self._stream = stream.TestOutputStream()
+    self._formatter = gtest_progress_reporter.GTestProgressReporter(
+        self._stream)
+
+    self._mock_timer = simple_mock.MockTimer(gtest_progress_reporter)
+
+  def tearDown(self):
+    self._mock_timer.Restore()
+
+  def testTestSuiteWithWrapperSuite(self):
+    suite = unittest.TestSuite()
+    suite.addTest(unittest.TestSuite())
+    self._formatter.StartTestSuite(suite)
+    self._formatter.StopTestSuite(suite)
+
+    self.assertEqual(self._stream.output_data, '')
+
+  def testTestSuiteWithTestCase(self):
+    suite = unittest.TestSuite()
+    suite.addTest(TestFoo(methodName='runTezt'))
+    self._formatter.StartTestSuite(suite)
+    self._mock_timer.SetTime(0.042)
+    self._formatter.StopTestSuite(suite)
+
+    expected = ('[----------] 1 test\n'
+                '[----------] 1 test (42 ms total)\n\n')
+    self.assertEqual(self._stream.output_data, expected)
+
+  def testCaseFailure(self):
+    test = TestFoo(methodName='runTezt')
+    self._formatter.StartTest(test)
+    self._mock_timer.SetTime(0.042)
+    self._formatter.Failure(test, INTENTIONAL_EXCEPTION)
+
+    expected = (
+        '[ RUN      ] gtest_progress_reporter_unittest.TestFoo.runTezt\n'
+        '[  FAILED  ] gtest_progress_reporter_unittest.TestFoo.runTezt '
+        '(42 ms)\n')
+    self.assertEqual(self._stream.output_data, expected)
+
+  def testCaseSuccess(self):
+    test = TestFoo(methodName='runTezt')
+    self._formatter.StartTest(test)
+    self._mock_timer.SetTime(0.042)
+    self._formatter.Success(test)
+
+    expected = (
+        '[ RUN      ] gtest_progress_reporter_unittest.TestFoo.runTezt\n'
+        '[       OK ] gtest_progress_reporter_unittest.TestFoo.runTezt '
+        '(42 ms)\n')
+    self.assertEqual(self._stream.output_data, expected)
+
+  def testStopTestRun(self):
+    result = TestResultWithSuccesses()
+    self._formatter.StopTestRun(result)
+
+    expected = '[  PASSED  ] 0 tests.\n\n'
+    self.assertEqual(self._stream.output_data, expected)
+
+  def testStopTestRunWithFailureAndSuccess(self):
+    test = TestFoo(methodName='runTezt')
+    result = TestResultWithSuccesses()
+    result.addSuccess(test)
+    result.addFailure(test, INTENTIONAL_EXCEPTION)
+    self._formatter.StopTestRun(result)
+
+    expected = (
+        '[  PASSED  ] 1 test.\n'
+        '[  FAILED  ] 1 test, listed below:\n'
+        '[  FAILED  ]  gtest_progress_reporter_unittest.TestFoo.runTezt\n\n'
+        '1 FAILED TEST\n\n')
+    self.assertEqual(self._stream.output_data, expected)
diff --git a/catapult/telemetry/telemetry/testing/internal/__init__.py b/catapult/telemetry/telemetry/testing/internal/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/internal/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/testing/internal/fake_gpu_info.py b/catapult/telemetry/telemetry/testing/internal/fake_gpu_info.py
new file mode 100644
index 0000000..b28b40e
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/internal/fake_gpu_info.py
@@ -0,0 +1,242 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This dictionary of GPU information was captured from a run of
+# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
+# telemetry.internal.platform's GPUInfo class, and specifically the
+# attributes it expects to find in the dictionary; if the code changes
+# in an incompatible way, tests using this fake GPU info will begin
+# failing, indicating this fake data must be updated.
+#
+# To regenerate it, import pdb in
+# telemetry/internal/platform/gpu_info.py and add a call to
+# pdb.set_trace() in GPUInfo.FromDict before the return statement.
+# Print the attrs dictionary in the debugger and copy/paste the result
+# on the right-hand side of this assignment. Then run:
+#
+#   pyformat [this file name] | sed -e "s/'/'/g"
+#
+# and put the output into this file.
+
+FAKE_GPU_INFO = {
+    'feature_status':
+        {
+            'flash_stage3d': 'enabled',
+            'gpu_compositing': 'enabled',
+            'video_decode': 'unavailable_software',
+            'flash_3d': 'enabled',
+            'webgl': 'enabled',
+            'video_encode': 'enabled',
+            'multiple_raster_threads': 'enabled_on',
+            '2d_canvas': 'unavailable_software',
+            'rasterization': 'disabled_software',
+            'flash_stage3d_baseline': 'enabled'
+        },
+    'aux_attributes':
+        {
+            'optimus': False,
+            'sandboxed': True,
+            'basic_info_state': 1,
+            'adapter_luid': 0.0,
+            'driver_version': '331.79',
+            'direct_rendering': True,
+            'amd_switchable': False,
+            'context_info_state': 1,
+            'process_crash_count': 0,
+            'pixel_shader_version': '4.40',
+            'gl_ws_version': '1.4',
+            'lenovo_dcute': False,
+            'can_lose_context': False,
+            'driver_vendor': 'NVIDIA',
+            'max_msaa_samples': '64',
+            'software_rendering': False,
+            'gl_version': '4.4.0 NVIDIA 331.79',
+            'gl_ws_vendor': 'NVIDIA Corporation',
+            'vertex_shader_version': '4.40',
+            'initialization_time': 1.284043,
+            'gl_reset_notification_strategy': 33362,
+            'gl_ws_extensions':
+                'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
+                'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
+                'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
+                'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
+                'GLX_ARB_create_context GLX_ARB_create_context_profile '
+                'GLX_EXT_create_context_es_profile '
+                'GLX_EXT_create_context_es2_profile '
+                'GLX_ARB_create_context_robustness GLX_ARB_multisample '
+                'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
+                ' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
+                'GLX_NV_copy_image GLX_NV_video_capture ',
+            'gl_renderer': 'Quadro 600/PCIe/SSE2',
+            'driver_date': '',
+            'gl_vendor': 'NVIDIA Corporation',
+            'gl_extensions':
+                'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
+                'GL_ARB_base_instance GL_ARB_blend_func_extended '
+                'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
+                'GL_ARB_clear_texture GL_ARB_color_buffer_float '
+                'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
+                ' GL_ARB_conservative_depth GL_ARB_compute_shader '
+                'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
+                'GL_ARB_copy_image GL_ARB_debug_output '
+                'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
+                'GL_ARB_depth_texture GL_ARB_draw_buffers '
+                'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
+                'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
+                'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
+                'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
+                'GL_ARB_explicit_uniform_location '
+                'GL_ARB_fragment_coord_conventions '
+                'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
+                'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
+                'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
+                'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
+                'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
+                'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
+                'GL_ARB_half_float_vertex GL_ARB_imaging '
+                'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
+                'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
+                'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
+                'GL_ARB_map_buffer_range GL_ARB_multi_bind '
+                'GL_ARB_multi_draw_indirect GL_ARB_multisample '
+                'GL_ARB_multitexture GL_ARB_occlusion_query '
+                'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
+                'GL_ARB_point_parameters GL_ARB_point_sprite '
+                'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
+                'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
+                'GL_ARB_sample_shading GL_ARB_sampler_objects '
+                'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
+                'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
+                'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
+                'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
+                'GL_ARB_shader_objects GL_ARB_shader_precision '
+                'GL_ARB_query_buffer_object '
+                'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
+                ' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
+                'GL_ARB_shading_language_420pack '
+                'GL_ARB_shading_language_include '
+                'GL_ARB_shading_language_packing GL_ARB_shadow '
+                'GL_ARB_stencil_texturing GL_ARB_sync '
+                'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
+                'GL_ARB_texture_buffer_object '
+                'GL_ARB_texture_buffer_object_rgb32 '
+                'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
+                'GL_ARB_texture_compression_bptc '
+                'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
+                'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
+                'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
+                'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
+                'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
+                'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
+                'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
+                'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
+                'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
+                'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
+                'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
+                'GL_ARB_texture_view GL_ARB_timer_query '
+                'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
+                'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
+                'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
+                'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
+                'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
+                'GL_ARB_vertex_program GL_ARB_vertex_shader '
+                'GL_ARB_vertex_type_10f_11f_11f_rev '
+                'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
+                'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
+                'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
+                ' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
+                'GL_EXT_blend_color GL_EXT_blend_equation_separate '
+                'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
+                'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
+                'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
+                'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
+                'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
+                'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
+                'GL_EXT_framebuffer_multisample '
+                'GL_EXTX_framebuffer_mixed_formats '
+                'GL_EXT_framebuffer_multisample_blit_scaled '
+                'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
+                'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
+                'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
+                'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
+                'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
+                'GL_EXT_point_parameters GL_EXT_provoking_vertex '
+                'GL_EXT_rescale_normal GL_EXT_secondary_color '
+                'GL_EXT_separate_shader_objects '
+                'GL_EXT_separate_specular_color '
+                'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
+                'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
+                ' GL_EXT_texture_array GL_EXT_texture_buffer_object '
+                'GL_EXT_texture_compression_dxt1 '
+                'GL_EXT_texture_compression_latc '
+                'GL_EXT_texture_compression_rgtc '
+                'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
+                'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
+                'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
+                'GL_EXT_texture_integer GL_EXT_texture_lod '
+                'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
+                'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
+                'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
+                'GL_EXT_texture_storage GL_EXT_texture_swizzle '
+                'GL_EXT_timer_query GL_EXT_transform_feedback2 '
+                'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
+                'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
+                'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
+                'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
+                'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
+                'GL_NV_blend_equation_advanced GL_NV_blend_square '
+                'GL_NV_compute_program5 GL_NV_conditional_render '
+                'GL_NV_copy_depth_to_color GL_NV_copy_image '
+                'GL_NV_depth_buffer_float GL_NV_depth_clamp '
+                'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
+                'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
+                'GL_NV_fog_distance GL_NV_fragment_program '
+                'GL_NV_fragment_program_option GL_NV_fragment_program2 '
+                'GL_NV_framebuffer_multisample_coverage '
+                'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
+                'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
+                'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
+                'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
+                'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
+                'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
+                'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
+                ' GL_NV_path_rendering GL_NV_pixel_data_range '
+                'GL_NV_point_sprite GL_NV_primitive_restart '
+                'GL_NV_register_combiners GL_NV_register_combiners2 '
+                'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
+                'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
+                'GL_ARB_sparse_texture GL_NV_texgen_reflection '
+                'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
+                'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
+                'GL_NV_texture_multisample GL_NV_texture_rectangle '
+                'GL_NV_texture_shader GL_NV_texture_shader2 '
+                'GL_NV_texture_shader3 GL_NV_transform_feedback '
+                'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
+                'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
+                'GL_NV_vertex_attrib_integer_64bit '
+                'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
+                'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
+                'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
+                'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
+                'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
+                'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
+        },
+    'devices':
+        [
+            {
+                'device_string': '',
+                'vendor_id': 4318.0,
+                'device_id': 3576.0,
+                'vendor_string': ''
+            }],
+    'driver_bug_workarounds':
+        ['clear_uniforms_before_first_program_use',
+         'disable_gl_path_rendering',
+         'init_gl_position_in_vertex_shader',
+         'init_vertex_attributes',
+         'remove_pow_with_constant_exponent',
+         'scalarize_vec_and_mat_constructor_args',
+         'use_current_program_after_successful_link',
+         'use_virtualized_gl_contexts']
+}
diff --git a/catapult/telemetry/telemetry/testing/options_for_unittests.py b/catapult/telemetry/telemetry/testing/options_for_unittests.py
new file mode 100644
index 0000000..1c47fbf
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/options_for_unittests.py
@@ -0,0 +1,32 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module provides the global variable options_for_unittests.
+
+This is set to a BrowserOptions object by the test harness, or None
+if unit tests are not running.
+
+This allows multiple unit tests to use a specific
+browser, in face of multiple options."""
+
+
+_options = []
+
+
+def Push(options):
+  _options.append(options)
+
+
+def Pop():
+  return _options.pop()
+
+
+def GetCopy():
+  if not AreSet():
+    return None
+  return _options[-1].Copy()
+
+
+def AreSet():
+  return bool(_options)
diff --git a/catapult/telemetry/telemetry/testing/page_test_test_case.py b/catapult/telemetry/telemetry/testing/page_test_test_case.py
new file mode 100644
index 0000000..ef8c372
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/page_test_test_case.py
@@ -0,0 +1,106 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provide a TestCase base class for PageTest subclasses' unittests."""
+
+import unittest
+
+from telemetry import benchmark
+from telemetry import story
+from telemetry.core import exceptions
+from telemetry.core import util
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry.testing import options_for_unittests
+
+
+class BasicTestPage(page_module.Page):
+  def __init__(self, url, story_set, base_dir):
+    super(BasicTestPage, self).__init__(url, story_set, base_dir)
+
+  def RunPageInteractions(self, action_runner):
+    with action_runner.CreateGestureInteraction('ScrollAction'):
+      action_runner.ScrollPage()
+
+
+class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
+  def __init__(self):
+    super(EmptyMetadataForTest, self).__init__('')
+
+
+class PageTestTestCase(unittest.TestCase):
+  """A base class to simplify writing unit tests for PageTest subclasses."""
+
+  def CreateStorySetFromFileInUnittestDataDir(self, test_filename):
+    ps = self.CreateEmptyPageSet()
+    page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir)
+    ps.AddStory(page)
+    return ps
+
+  def CreateEmptyPageSet(self):
+    base_dir = util.GetUnittestDataDir()
+    ps = story.StorySet(base_dir=base_dir)
+    return ps
+
+  def RunMeasurement(self, measurement, ps,
+      options=None):
+    """Runs a measurement against a pageset, returning the rows its outputs."""
+    if options is None:
+      options = options_for_unittests.GetCopy()
+    assert options
+    temp_parser = options.CreateParser()
+    story_runner.AddCommandLineArgs(temp_parser)
+    defaults = temp_parser.get_default_values()
+    for k, v in defaults.__dict__.items():
+      if hasattr(options, k):
+        continue
+      setattr(options, k, v)
+
+    if isinstance(measurement, page_test.PageTest):
+      measurement.CustomizeBrowserOptions(options.browser_options)
+    options.output_file = None
+    options.output_formats = ['none']
+    options.suppress_gtest_report = True
+    options.output_trace_tag = None
+    story_runner.ProcessCommandLineArgs(temp_parser, options)
+    results = results_options.CreateResults(EmptyMetadataForTest(), options)
+    story_runner.Run(measurement, ps, options, results)
+    return results
+
+  def TestTracingCleanedUp(self, measurement_class, options=None):
+    ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
+    start_tracing_called = [False]
+    stop_tracing_called = [False]
+
+    class BuggyMeasurement(measurement_class):
+      def __init__(self, *args, **kwargs):
+        measurement_class.__init__(self, *args, **kwargs)
+
+      # Inject fake tracing methods to tracing_controller
+      def TabForPage(self, page, browser):
+        ActualStartTracing = browser.platform.tracing_controller.StartTracing
+        def FakeStartTracing(*args, **kwargs):
+          ActualStartTracing(*args, **kwargs)
+          start_tracing_called[0] = True
+          raise exceptions.IntentionalException
+        browser.StartTracing = FakeStartTracing
+
+        ActualStopTracing = browser.platform.tracing_controller.StopTracing
+        def FakeStopTracing(*args, **kwargs):
+          result = ActualStopTracing(*args, **kwargs)
+          stop_tracing_called[0] = True
+          return result
+        browser.platform.tracing_controller.StopTracing = FakeStopTracing
+
+        return measurement_class.TabForPage(self, page, browser)
+
+    measurement = BuggyMeasurement()
+    try:
+      self.RunMeasurement(measurement, ps, options=options)
+    except page_test.TestNotSupportedOnPlatformError:
+      pass
+    if start_tracing_called[0]:
+      self.assertTrue(stop_tracing_called[0])
diff --git a/catapult/telemetry/telemetry/testing/progress_reporter.py b/catapult/telemetry/telemetry/testing/progress_reporter.py
new file mode 100644
index 0000000..036192b
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/progress_reporter.py
@@ -0,0 +1,131 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+import sys
+
+from telemetry.internal.util import path
+from telemetry.testing import options_for_unittests
+
+
+class ProgressReporter(object):
+  def __init__(self, output_stream):
+    self._output_stream = output_stream
+
+  def StartTest(self, test):
+    pass
+
+  def StartTestSuite(self, suite):
+    pass
+
+  def StartTestRun(self):
+    pass
+
+  def StopTest(self, test):
+    pass
+
+  def StopTestSuite(self, suite):
+    pass
+
+  def StopTestRun(self, result):
+    pass
+
+  def Error(self, test, err):
+    pass
+
+  def Failure(self, test, err):
+    pass
+
+  def Success(self, test):
+    pass
+
+  def Skip(self, test, reason):
+    pass
+
+
+class TestSuite(unittest.TestSuite):
+  """TestSuite that can delegate start and stop calls to a TestResult object."""
+  def run(self, result):  # pylint: disable=arguments-differ
+    if hasattr(result, 'startTestSuite'):
+      result.startTestSuite(self)
+    result = super(TestSuite, self).run(result)
+    if hasattr(result, 'stopTestSuite'):
+      result.stopTestSuite(self)
+    return result
+
+
+class TestRunner(object):
+  def run(self, test, progress_reporters, repeat_count, args):
+    sys.path.append(path.GetUnittestDataDir())
+    result = TestResult(progress_reporters)
+    result.startTestRun()
+    try:
+      options_for_unittests.Push(args)
+      for _ in xrange(repeat_count):
+        test(result)
+    finally:
+      options_for_unittests.Pop()
+      result.stopTestRun()
+
+    return result
+
+
+class TestResult(unittest.TestResult):
+  def __init__(self, progress_reporters):
+    super(TestResult, self).__init__()
+    self.successes = []
+    self._progress_reporters = progress_reporters
+
+  @property
+  def failures_and_errors(self):
+    return self.failures + self.errors
+
+  def startTest(self, test):
+    super(TestResult, self).startTest(test)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StartTest(test)
+
+  def startTestSuite(self, suite):
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StartTestSuite(suite)
+
+  def startTestRun(self):
+    super(TestResult, self).startTestRun()
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StartTestRun()
+
+  def stopTest(self, test):
+    super(TestResult, self).stopTest(test)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StopTest(test)
+
+  def stopTestSuite(self, suite):
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StopTestSuite(suite)
+
+  def stopTestRun(self):
+    super(TestResult, self).stopTestRun()
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.StopTestRun(self)
+
+  def addError(self, test, err):
+    super(TestResult, self).addError(test, err)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.Error(test, err)
+
+  def addFailure(self, test, err):
+    super(TestResult, self).addFailure(test, err)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.Failure(test, err)
+
+  def addSuccess(self, test):
+    super(TestResult, self).addSuccess(test)
+    self.successes.append(test)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.Success(test)
+
+  def addSkip(self, test, reason):
+    super(TestResult, self).addSkip(test, reason)
+    for progress_reporter in self._progress_reporters:
+      progress_reporter.Skip(test, reason)
diff --git a/catapult/telemetry/telemetry/testing/progress_reporter_unittest.py b/catapult/telemetry/telemetry/testing/progress_reporter_unittest.py
new file mode 100644
index 0000000..2a60882
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/progress_reporter_unittest.py
@@ -0,0 +1,54 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.testing import progress_reporter
+
+
+class TestFoo(unittest.TestCase):
+  # Test method doesn't have test- prefix intentionally. This is so that
+  # run_test script won't run this test.
+  def RunPassingTest(self):
+    pass
+
+  def RunFailingTest(self):
+    self.fail('expected failure')
+
+
+class LoggingProgressReporter(object):
+  def __init__(self):
+    self._call_log = []
+
+  @property
+  def call_log(self):
+    return tuple(self._call_log)
+
+  def __getattr__(self, name):
+    def wrapper(*_):
+      self._call_log.append(name)
+    return wrapper
+
+
+class ProgressReporterTest(unittest.TestCase):
+  def testTestRunner(self):
+    suite = progress_reporter.TestSuite()
+    suite.addTest(TestFoo(methodName='RunPassingTest'))
+    suite.addTest(TestFoo(methodName='RunFailingTest'))
+
+    reporter = LoggingProgressReporter()
+    runner = progress_reporter.TestRunner()
+    progress_reporters = (reporter,)
+    result = runner.run(suite, progress_reporters, 1, None)
+
+    self.assertEqual(len(result.successes), 1)
+    self.assertEqual(len(result.failures), 1)
+    self.assertEqual(len(result.failures_and_errors), 1)
+    expected = (
+        'StartTestRun', 'StartTestSuite',
+        'StartTest', 'Success', 'StopTest',
+        'StartTest', 'Failure', 'StopTest',
+        'StopTestSuite', 'StopTestRun',
+    )
+    self.assertEqual(reporter.call_log, expected)
diff --git a/catapult/telemetry/telemetry/testing/run_chromeos_tests.py b/catapult/telemetry/telemetry/testing/run_chromeos_tests.py
new file mode 100644
index 0000000..2313321
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/run_chromeos_tests.py
@@ -0,0 +1,60 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+
+from telemetry.testing import run_tests
+
+
+def RunChromeOSTests(browser_type, tests_to_run):
+  """ Run ChromeOS tests.
+  Args:
+    |browser_type|: string specifies which browser type to use.
+    |tests_to_run|: a list of tuples (top_level_dir, unit_tests), whereas
+      |top_level_dir| specifies the top level directory for running tests, and
+      |unit_tests| is a list of string test names to run.
+  """
+  stream = _LoggingOutputStream()
+  error_string = ''
+
+  for (top_level_dir, unit_tests) in tests_to_run:
+    logging.info('Running unit tests in %s with browser_type "%s".' %
+                 (top_level_dir, browser_type))
+
+    ret = _RunOneSetOfTests(browser_type, top_level_dir, unit_tests, stream)
+    if ret:
+      error_string += 'The unit tests of %s failed.\n' % top_level_dir
+  return error_string
+
+
+def _RunOneSetOfTests(browser_type, top_level_dir, tests, stream):
+  args = ['--browser', browser_type,
+          '--top-level-dir', top_level_dir,
+          '--jobs', '1',
+          '--disable-logging-config'] + tests
+  return run_tests.RunTestsCommand.main(args, stream=stream)
+
+
+class _LoggingOutputStream(object):
+
+  def __init__(self):
+    self._buffer = []
+
+  def write(self, s):
+    """Buffer a string write. Log it when we encounter a newline."""
+    if '\n' in s:
+      segments = s.split('\n')
+      segments[0] = ''.join(self._buffer + [segments[0]])
+      log_level = logging.getLogger().getEffectiveLevel()
+      try:  # TODO(dtu): We need this because of crbug.com/394571
+        logging.getLogger().setLevel(logging.INFO)
+        for line in segments[:-1]:
+          logging.info(line)
+      finally:
+        logging.getLogger().setLevel(log_level)
+      self._buffer = [segments[-1]]
+    else:
+      self._buffer.append(s)
+
+  def flush(self):
+    pass
diff --git a/catapult/telemetry/telemetry/testing/run_tests.py b/catapult/telemetry/telemetry/testing/run_tests.py
new file mode 100644
index 0000000..c0e5034
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/run_tests.py
@@ -0,0 +1,257 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import sys
+
+from telemetry.core import util
+from telemetry.core import platform as platform_module
+from telemetry import decorators
+from telemetry.internal.browser import browser_finder
+from telemetry.internal.browser import browser_finder_exceptions
+from telemetry.internal.browser import browser_options
+from telemetry.internal.platform import android_device
+from telemetry.internal.util import binary_manager
+from telemetry.internal.util import command_line
+from telemetry.internal.util import ps_util
+from telemetry.testing import browser_test_case
+from telemetry.testing import options_for_unittests
+
+from catapult_base import xvfb
+
+import typ
+
+
+class RunTestsCommand(command_line.OptparseCommand):
+  """Run unit tests"""
+
+  usage = '[test_name ...] [<options>]'
+  xvfb_process = None
+
+  def __init__(self):
+    super(RunTestsCommand, self).__init__()
+    self.stream = sys.stdout
+
+  @classmethod
+  def CreateParser(cls):
+    options = browser_options.BrowserFinderOptions()
+    options.browser_type = 'any'
+    parser = options.CreateParser('%%prog %s' % cls.usage)
+    return parser
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser, _):
+    parser.add_option('--start-xvfb', action='store_true',
+                      default=False, help='Start Xvfb display if needed.')
+    parser.add_option('--repeat-count', type='int', default=1,
+                      help='Repeats each a provided number of times.')
+    parser.add_option('--no-browser', action='store_true', default=False,
+                      help='Don\'t require an actual browser to run the tests.')
+    parser.add_option('-d', '--also-run-disabled-tests',
+                      dest='run_disabled_tests',
+                      action='store_true', default=False,
+                      help='Ignore @Disabled and @Enabled restrictions.')
+    parser.add_option('--exact-test-filter', action='store_true', default=False,
+                      help='Treat test filter as exact matches (default is '
+                           'substring matches).')
+    parser.add_option('--client-config', dest='client_config', default=None)
+    parser.add_option('--disable-logging-config', action='store_true',
+                      default=False, help='Configure logging (default on)')
+
+    typ.ArgumentParser.add_option_group(parser,
+                                        "Options for running the tests",
+                                        running=True,
+                                        skip=['-d', '-v', '--verbose'])
+    typ.ArgumentParser.add_option_group(parser,
+                                        "Options for reporting the results",
+                                        reporting=True)
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, args, _):
+    # We retry failures by default unless we're running a list of tests
+    # explicitly.
+    if not args.retry_limit and not args.positional_args:
+      args.retry_limit = 3
+
+    if args.no_browser:
+      return
+
+    if args.start_xvfb and xvfb.ShouldStartXvfb():
+      cls.xvfb_process = xvfb.StartXvfb()
+    try:
+      possible_browser = browser_finder.FindBrowser(args)
+    except browser_finder_exceptions.BrowserFinderException, ex:
+      parser.error(ex)
+
+    if not possible_browser:
+      parser.error('No browser found of type %s. Cannot run tests.\n'
+                   'Re-run with --browser=list to see '
+                   'available browser types.' % args.browser_type)
+
+  @classmethod
+  def main(cls, args=None, stream=None):  # pylint: disable=arguments-differ
+    # We override the superclass so that we can hook in the 'stream' arg.
+    parser = cls.CreateParser()
+    cls.AddCommandLineArgs(parser, None)
+    options, positional_args = parser.parse_args(args)
+    options.positional_args = positional_args
+
+    try:
+      # Must initialize the DependencyManager before calling
+      # browser_finder.FindBrowser(args)
+      binary_manager.InitDependencyManager(options.client_config)
+      cls.ProcessCommandLineArgs(parser, options, None)
+
+      obj = cls()
+      if stream is not None:
+        obj.stream = stream
+      return obj.Run(options)
+    finally:
+      if cls.xvfb_process:
+        cls.xvfb_process.kill()
+
+  def Run(self, args):
+    runner = typ.Runner()
+    if self.stream:
+      runner.host.stdout = self.stream
+
+    if args.no_browser:
+      possible_browser = None
+      platform = platform_module.GetHostPlatform()
+    else:
+      possible_browser = browser_finder.FindBrowser(args)
+      platform = possible_browser.platform
+
+    # Telemetry seems to overload the system if we run one test per core,
+    # so we scale things back a fair amount. Many of the telemetry tests
+    # are long-running, so there's a limit to how much parallelism we
+    # can effectively use for now anyway.
+    #
+    # It should be possible to handle multiple devices if we adjust the
+    # browser_finder code properly, but for now we only handle one on ChromeOS.
+    if platform.GetOSName() == 'chromeos':
+      runner.args.jobs = 1
+    elif platform.GetOSName() == 'android':
+      android_devs = android_device.FindAllAvailableDevices(args)
+      runner.args.jobs = len(android_devs)
+      if runner.args.jobs == 0:
+        raise RuntimeError("No Android device found")
+      print 'Running tests with %d Android device(s).' % runner.args.jobs
+    elif platform.GetOSVersionName() == 'xp':
+      # For an undiagnosed reason, XP falls over with more parallelism.
+      # See crbug.com/388256
+      runner.args.jobs = max(int(args.jobs) // 4, 1)
+    else:
+      runner.args.jobs = max(int(args.jobs) // 2, 1)
+
+    runner.args.metadata = args.metadata
+    runner.args.passthrough = args.passthrough
+    runner.args.path = args.path
+    runner.args.retry_limit = args.retry_limit
+    runner.args.test_results_server = args.test_results_server
+    runner.args.test_type = args.test_type
+    runner.args.top_level_dir = args.top_level_dir
+    runner.args.write_full_results_to = args.write_full_results_to
+    runner.args.write_trace_to = args.write_trace_to
+    runner.args.list_only = args.list_only
+
+    runner.args.path.append(util.GetUnittestDataDir())
+
+    # Always print out these info for the ease of debugging.
+    runner.args.timing = True
+    runner.args.verbose = 3
+
+    runner.classifier = GetClassifier(args, possible_browser)
+    runner.context = args
+    runner.setup_fn = _SetUpProcess
+    runner.teardown_fn = _TearDownProcess
+    runner.win_multiprocessing = typ.WinMultiprocessing.importable
+    try:
+      ret, _, _ = runner.run()
+    except KeyboardInterrupt:
+      print >> sys.stderr, "interrupted, exiting"
+      ret = 130
+    return ret
+
+
+def GetClassifier(args, possible_browser):
+
+  def ClassifyTestWithoutBrowser(test_set, test):
+    name = test.id()
+    if (not args.positional_args
+        or _MatchesSelectedTest(name, args.positional_args,
+                                  args.exact_test_filter)):
+      # TODO(telemetry-team): Make sure that all telemetry unittest that invokes
+      # actual browser are subclasses of browser_test_case.BrowserTestCase
+      # (crbug.com/537428)
+      if issubclass(test.__class__, browser_test_case.BrowserTestCase):
+        test_set.tests_to_skip.append(typ.TestInput(
+            name, msg='Skip the test because it requires a browser.'))
+      else:
+        test_set.parallel_tests.append(typ.TestInput(name))
+
+  def ClassifyTestWithBrowser(test_set, test):
+    name = test.id()
+    if (not args.positional_args
+        or _MatchesSelectedTest(name, args.positional_args,
+                                args.exact_test_filter)):
+      assert hasattr(test, '_testMethodName')
+      method = getattr(
+          test, test._testMethodName)  # pylint: disable=protected-access
+      should_skip, reason = decorators.ShouldSkip(method, possible_browser)
+      if should_skip and not args.run_disabled_tests:
+        test_set.tests_to_skip.append(typ.TestInput(name, msg=reason))
+      elif decorators.ShouldBeIsolated(method, possible_browser):
+        test_set.isolated_tests.append(typ.TestInput(name))
+      else:
+        test_set.parallel_tests.append(typ.TestInput(name))
+
+  if possible_browser:
+    return ClassifyTestWithBrowser
+  else:
+    return ClassifyTestWithoutBrowser
+
+
+def _MatchesSelectedTest(name, selected_tests, selected_tests_are_exact):
+  if not selected_tests:
+    return False
+  if selected_tests_are_exact:
+    return any(name in selected_tests)
+  else:
+    return any(test in name for test in selected_tests)
+
+
+def _SetUpProcess(child, context): # pylint: disable=unused-argument
+  ps_util.EnableListingStrayProcessesUponExitHook()
+  if binary_manager.NeedsInit():
+    # Typ doesn't keep the DependencyManager initialization in the child
+    # processes.
+    binary_manager.InitDependencyManager(context.client_config)
+  args = context
+  # We need to reset the handlers in case some other parts of telemetry already
+  # set it to make this work.
+  if not args.disable_logging_config:
+    logging.getLogger().handlers = []
+    logging.basicConfig(
+        level=logging.INFO,
+        format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d'
+              '  %(message)s')
+  if args.device and args.device == 'android':
+    android_devices = android_device.FindAllAvailableDevices(args)
+    if not android_devices:
+      raise RuntimeError("No Android device found")
+    android_devices.sort(key=lambda device: device.name)
+    args.device = android_devices[child.worker_num-1].guid
+  options_for_unittests.Push(args)
+
+
+def _TearDownProcess(child, context): # pylint: disable=unused-argument
+  # It's safe to call teardown_browser even if we did not start any browser
+  # in any of the tests.
+  browser_test_case.teardown_browser()
+  options_for_unittests.Pop()
+
+
+if __name__ == '__main__':
+  ret_code = RunTestsCommand.main()
+  sys.exit(ret_code)
diff --git a/catapult/telemetry/telemetry/testing/run_tests_unittest.py b/catapult/telemetry/telemetry/testing/run_tests_unittest.py
new file mode 100644
index 0000000..3feb88e
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/run_tests_unittest.py
@@ -0,0 +1,106 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.core import util
+from telemetry.testing import run_tests
+
+
+class MockArgs(object):
+  def __init__(self):
+    self.positional_args = []
+    self.exact_test_filter = True
+    self.run_disabled_tests = False
+
+
+class MockPossibleBrowser(object):
+  def __init__(self, browser_type, os_name, os_version_name,
+               supports_tab_control):
+    self.browser_type = browser_type
+    self.platform = MockPlatform(os_name, os_version_name)
+    self.supports_tab_control = supports_tab_control
+
+
+class MockPlatform(object):
+  def __init__(self, os_name, os_version_name):
+    self.os_name = os_name
+    self.os_version_name = os_version_name
+
+  def GetOSName(self):
+    return self.os_name
+
+  def GetOSVersionName(self):
+    return self.os_version_name
+
+
+class RunTestsUnitTest(unittest.TestCase):
+
+  def _GetEnabledTests(self, browser_type, os_name, os_version_name,
+                       supports_tab_control):
+
+    runner = run_tests.typ.Runner()
+    host = runner.host
+    runner.top_level_dir = util.GetTelemetryDir()
+    runner.args.tests = [host.join(util.GetTelemetryDir(),
+        'telemetry', 'testing', 'disabled_cases.py')]
+    possible_browser = MockPossibleBrowser(
+        browser_type, os_name, os_version_name, supports_tab_control)
+    runner.classifier = run_tests.GetClassifier(MockArgs(), possible_browser)
+    _, test_set = runner.find_tests(runner.args)
+    return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
+
+  def testSystemMacMavericks(self):
+    self.assertEquals(
+        set(['testAllEnabled',
+             'testMacOnly',
+             'testMavericksOnly',
+             'testNoChromeOS',
+             'testNoWinLinux',
+             'testSystemOnly',
+             'testHasTabs']),
+        self._GetEnabledTests('system', 'mac', 'mavericks', True))
+
+  def testSystemMacLion(self):
+    self.assertEquals(
+        set(['testAllEnabled',
+             'testMacOnly',
+             'testNoChromeOS',
+             'testNoMavericks',
+             'testNoWinLinux',
+             'testSystemOnly',
+             'testHasTabs']),
+        self._GetEnabledTests('system', 'mac', 'lion', True))
+
+  def testCrosGuestChromeOS(self):
+    self.assertEquals(
+        set(['testAllEnabled',
+             'testChromeOSOnly',
+             'testNoMac',
+             'testNoMavericks',
+             'testNoSystem',
+             'testNoWinLinux',
+             'testHasTabs']),
+        self._GetEnabledTests('cros-guest', 'chromeos', '', True))
+
+  def testCanaryWindowsWin7(self):
+    self.assertEquals(
+        set(['testAllEnabled',
+             'testNoChromeOS',
+             'testNoMac',
+             'testNoMavericks',
+             'testNoSystem',
+             'testWinOrLinuxOnly',
+             'testHasTabs']),
+        self._GetEnabledTests('canary', 'win', 'win7', True))
+
+  def testDoesntHaveTabs(self):
+    self.assertEquals(
+        set(['testAllEnabled',
+             'testNoChromeOS',
+             'testNoMac',
+             'testNoMavericks',
+             'testNoSystem',
+             'testWinOrLinuxOnly']),
+        self._GetEnabledTests('canary', 'win', 'win7', False))
diff --git a/catapult/telemetry/telemetry/testing/simple_mock.py b/catapult/telemetry/telemetry/testing/simple_mock.py
new file mode 100644
index 0000000..4b0391f
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/simple_mock.py
@@ -0,0 +1,132 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A very very simple mock object harness."""
+from types import ModuleType
+
+DONT_CARE = ''
+
+class MockFunctionCall(object):
+  def __init__(self, name):
+    self.name = name
+    self.args = tuple()
+    self.return_value = None
+    self.when_called_handlers = []
+
+  def WithArgs(self, *args):
+    self.args = args
+    return self
+
+  def WillReturn(self, value):
+    self.return_value = value
+    return self
+
+  def WhenCalled(self, handler):
+    self.when_called_handlers.append(handler)
+
+  def VerifyEquals(self, got):
+    if self.name != got.name:
+      raise Exception('Self %s, got %s' % (repr(self), repr(got)))
+    if len(self.args) != len(got.args):
+      raise Exception('Self %s, got %s' % (repr(self), repr(got)))
+    for i in range(len(self.args)):
+      self_a = self.args[i]
+      got_a = got.args[i]
+      if self_a == DONT_CARE:
+        continue
+      if self_a != got_a:
+        raise Exception('Self %s, got %s' % (repr(self), repr(got)))
+
+  def __repr__(self):
+    def arg_to_text(a):
+      if a == DONT_CARE:
+        return '_'
+      return repr(a)
+    args_text = ', '.join([arg_to_text(a) for a in self.args])
+    if self.return_value in (None, DONT_CARE):
+      return '%s(%s)' % (self.name, args_text)
+    return '%s(%s)->%s' % (self.name, args_text, repr(self.return_value))
+
+class MockTrace(object):
+  def __init__(self):
+    self.expected_calls = []
+    self.next_call_index = 0
+
+class MockObject(object):
+  def __init__(self, parent_mock=None):
+    if parent_mock:
+      self._trace = parent_mock._trace # pylint: disable=protected-access
+    else:
+      self._trace = MockTrace()
+
+  def __setattr__(self, name, value):
+    if (not hasattr(self, '_trace') or
+        hasattr(value, 'is_hook')):
+      object.__setattr__(self, name, value)
+      return
+    assert isinstance(value, MockObject)
+    object.__setattr__(self, name, value)
+
+  def SetAttribute(self, name, value):
+    setattr(self, name, value)
+
+  def ExpectCall(self, func_name, *args):
+    assert self._trace.next_call_index == 0
+    if not hasattr(self, func_name):
+      self._install_hook(func_name)
+
+    call = MockFunctionCall(func_name)
+    self._trace.expected_calls.append(call)
+    call.WithArgs(*args)
+    return call
+
+  def _install_hook(self, func_name):
+    def handler(*args, **_):
+      got_call = MockFunctionCall(
+        func_name).WithArgs(*args).WillReturn(DONT_CARE)
+      if self._trace.next_call_index >= len(self._trace.expected_calls):
+        raise Exception(
+          'Call to %s was not expected, at end of programmed trace.' %
+          repr(got_call))
+      expected_call = self._trace.expected_calls[
+        self._trace.next_call_index]
+      expected_call.VerifyEquals(got_call)
+      self._trace.next_call_index += 1
+      for h in expected_call.when_called_handlers:
+        h(*args)
+      return expected_call.return_value
+    handler.is_hook = True
+    setattr(self, func_name, handler)
+
+
+class MockTimer(object):
+  """ A mock timer to fake out the timing for a module.
+    Args:
+      module: module to fake out the time
+  """
+  def __init__(self, module=None):
+    self._elapsed_time = 0
+    self._module = module
+    self._actual_time = None
+    if module:
+      assert isinstance(module, ModuleType)
+      self._actual_time = module.time
+      self._module.time = self
+
+  def sleep(self, time):
+    self._elapsed_time += time
+
+  def time(self):
+    return self._elapsed_time
+
+  def SetTime(self, time):
+    self._elapsed_time = time
+
+  def __del__(self):
+    self.Restore()
+
+  def Restore(self):
+    if self._module:
+      self._module.time = self._actual_time
+      self._module = None
+      self._actual_time = None
diff --git a/catapult/telemetry/telemetry/testing/simple_mock_unittest.py b/catapult/telemetry/telemetry/testing/simple_mock_unittest.py
new file mode 100644
index 0000000..220302a
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/simple_mock_unittest.py
@@ -0,0 +1,82 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.testing import simple_mock
+
+_ = simple_mock.DONT_CARE
+
+class SimpleMockUnitTest(unittest.TestCase):
+  def testBasic(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo')
+
+    mock.foo()
+
+  def testReturn(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo').WillReturn(7)
+
+    ret = mock.foo()
+    self.assertEquals(ret, 7)
+
+  def testArgs(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo').WithArgs(3, 4)
+
+    mock.foo(3, 4)
+
+  def testArgs2(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo', 3, 4)
+
+    mock.foo(3, 4)
+
+  def testArgsMismatch(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo').WithArgs(3, 4)
+
+    self.assertRaises(Exception,
+                      lambda: mock.foo(4, 4))
+
+
+  def testArgsDontCare(self):
+    mock = simple_mock.MockObject()
+    mock.ExpectCall('foo').WithArgs(_, 4)
+
+    mock.foo(4, 4)
+
+  def testOnCall(self):
+    mock = simple_mock.MockObject()
+
+    handler_called = []
+    def Handler(arg0):
+      assert arg0 == 7
+      handler_called.append(True)
+    mock.ExpectCall('baz', 7).WhenCalled(Handler)
+
+    mock.baz(7)
+    self.assertTrue(len(handler_called) > 0)
+
+
+  def testSubObject(self):
+    mock = simple_mock.MockObject()
+    mock.bar = simple_mock.MockObject(mock)
+
+    mock.ExpectCall('foo').WithArgs(_, 4)
+    mock.bar.ExpectCall('baz')
+
+    mock.foo(0, 4)
+    mock.bar.baz()
+
+  def testSubObjectMismatch(self):
+    mock = simple_mock.MockObject()
+    mock.bar = simple_mock.MockObject(mock)
+
+    mock.ExpectCall('foo').WithArgs(_, 4)
+    mock.bar.ExpectCall('baz')
+
+    self.assertRaises(
+      Exception,
+      lambda: mock.bar.baz()) # pylint: disable=unnecessary-lambda
diff --git a/catapult/telemetry/telemetry/testing/story_set_smoke_test.py b/catapult/telemetry/telemetry/testing/story_set_smoke_test.py
new file mode 100644
index 0000000..78f200c
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/story_set_smoke_test.py
@@ -0,0 +1,143 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import unittest
+
+from telemetry.core import discover
+from telemetry.internal.browser import browser_credentials
+from telemetry import page
+from telemetry import story as story_module
+from telemetry.wpr import archive_info
+
+
+class StorySetSmokeTest(unittest.TestCase):
+
+  def setUp(self):
+    # Make sure the added failure message is appended to the default failure
+    # message.
+    self.longMessage = True
+
+  def GetAllStorySetClasses(self, story_sets_dir, top_level_dir):
+    # We can't test page sets that aren't directly constructible since we
+    # don't know what arguments to put for the constructor.
+    return discover.DiscoverClasses(story_sets_dir, top_level_dir,
+                                    story_module.StorySet,
+                                    directly_constructable=True).values()
+
+  def CheckArchive(self, story_set):
+    """Verify that all URLs of pages in story_set have an associated archive."""
+    # TODO: Eventually these should be fatal.
+    if not story_set.archive_data_file:
+      logging.warning('Skipping %s: no archive data file', story_set.file_path)
+      return
+
+    logging.info('Testing %s', story_set.file_path)
+
+    archive_data_file_path = os.path.join(story_set.base_dir,
+                                          story_set.archive_data_file)
+    self.assertTrue(os.path.exists(archive_data_file_path),
+                    msg='Archive data file not found for %s' %
+                    story_set.file_path)
+
+    wpr_archive_info = archive_info.WprArchiveInfo.FromFile(
+        archive_data_file_path, story_set.bucket)
+    for story in story_set.stories:
+      if isinstance(story, page.Page) and story.url.startswith('http'):
+        self.assertTrue(wpr_archive_info.WprFilePathForStory(story),
+                        msg='No archive found for %s in %s' % (
+                            story.url, story_set.archive_data_file))
+
+  def CheckCredentials(self, story_set):
+    """Verify that all pages in story_set use proper credentials"""
+    for story in story_set.stories:
+      if not isinstance(story, page.Page):
+        continue
+      credentials = browser_credentials.BrowserCredentials()
+      if story.credentials_path:
+        credentials.credentials_path = (
+            os.path.join(story.base_dir, story.credentials_path))
+      fail_message = ('page %s of %s has invalid credentials %s' %
+                      (story.url, story_set.file_path, story.credentials))
+      if story.credentials:
+        try:
+          self.assertTrue(credentials.CanLogin(story.credentials), fail_message)
+        except browser_credentials.CredentialsError:
+          self.fail(fail_message)
+
+  def CheckAttributes(self, story_set):
+    """Verify that story_set and its stories base attributes have the right
+       types.
+    """
+    self.CheckAttributesOfStorySetBasicAttributes(story_set)
+    for story in story_set.stories:
+      self.CheckAttributesOfStoryBasicAttributes(story)
+
+  def CheckAttributesOfStorySetBasicAttributes(self, story_set):
+    if story_set.base_dir is not None:
+      self.assertTrue(
+          isinstance(story_set.base_dir, str),
+          msg='story_set %\'s base_dir must have type string')
+
+    self.assertTrue(
+        isinstance(story_set.archive_data_file, str),
+        msg='story_set\'s archive_data_file path must have type string')
+
+  def CheckAttributesOfStoryBasicAttributes(self, story):
+    self.assertTrue(not hasattr(story, 'disabled'))
+    self.assertTrue(
+       isinstance(story.name, str),
+       msg='story %s \'s name field must have type string' % story.display_name)
+    self.assertTrue(
+       isinstance(story.labels, set),
+       msg='story %s \'s labels field must have type set' % story.display_name)
+    for l in story.labels:
+      self.assertTrue(
+         isinstance(l, str),
+         msg='label %s in story %s \'s labels must have type string'
+         % (str(l), story.display_name))
+    if not isinstance(story, page.Page):
+      return
+    self.assertTrue(
+       # We use basestring instead of str because story's URL can be string of
+       # unicode.
+       isinstance(story.url, basestring),
+       msg='page %s \'s url must have type string' % story.display_name)
+    self.assertTrue(
+        isinstance(story.startup_url, str),
+        msg=('page %s \'s startup_url field must have type string'
+            % story.display_name))
+    self.assertIsInstance(
+        story.make_javascript_deterministic, bool,
+        msg='page %s \'s make_javascript_deterministic must have type bool'
+            % story.display_name)
+
+  def CheckSharedStates(self, story_set):
+    if not story_set.allow_mixed_story_states:
+      shared_state_class = (
+          story_set.stories[0].shared_state_class)
+      for story in story_set:
+        self.assertIs(
+            shared_state_class,
+            story.shared_state_class,
+            msg='story %s\'s shared_state_class field is different '
+            'from other story\'s shared_state_class whereas '
+            'story set %s disallows having mixed states' %
+            (story, story_set))
+
+  def RunSmokeTest(self, story_sets_dir, top_level_dir):
+    """Run smoke test on all story sets in story_sets_dir.
+
+    Subclass of StorySetSmokeTest is supposed to call this in some test
+    method to run smoke test.
+    """
+    story_sets = self.GetAllStorySetClasses(story_sets_dir, top_level_dir)
+    for story_set_class in story_sets:
+      story_set = story_set_class()
+      logging.info('Testing %s', story_set.file_path)
+      self.CheckArchive(story_set)
+      self.CheckCredentials(story_set)
+      self.CheckAttributes(story_set)
+      self.CheckSharedStates(story_set)
diff --git a/catapult/telemetry/telemetry/testing/stream.py b/catapult/telemetry/telemetry/testing/stream.py
new file mode 100644
index 0000000..4d97ab4
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/stream.py
@@ -0,0 +1,18 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestOutputStream(object):
+  def __init__(self):
+    self._output_data = []
+
+  @property
+  def output_data(self):
+    return ''.join(self._output_data)
+
+  def write(self, data):
+    self._output_data.append(data)
+
+  def flush(self):
+    pass
diff --git a/catapult/telemetry/telemetry/testing/system_stub.py b/catapult/telemetry/telemetry/testing/system_stub.py
new file mode 100644
index 0000000..42d7791
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/system_stub.py
@@ -0,0 +1,474 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides stubs for os, sys and subprocess for testing
+
+This test allows one to test code that itself uses os, sys, and subprocess.
+"""
+
+import ntpath
+import os
+import posixpath
+import re
+import shlex
+import sys
+
+
+class Override(object):
+  def __init__(self, base_module, module_list):
+    stubs = {'cloud_storage': CloudStorageModuleStub,
+             'open': OpenFunctionStub,
+             'os': OsModuleStub,
+             'perf_control': PerfControlModuleStub,
+             'raw_input': RawInputFunctionStub,
+             'subprocess': SubprocessModuleStub,
+             'sys': SysModuleStub,
+             'thermal_throttle': ThermalThrottleModuleStub,
+             'logging': LoggingStub,
+    }
+    self.adb_commands = None
+    self.os = None
+    self.subprocess = None
+    self.sys = None
+
+    self._base_module = base_module
+    self._overrides = {}
+
+    for module_name in module_list:
+      self._overrides[module_name] = getattr(base_module, module_name, None)
+      setattr(self, module_name, stubs[module_name]())
+      setattr(base_module, module_name, getattr(self, module_name))
+
+    if self.os and self.sys:
+      self.os.path.sys = self.sys
+
+  def __del__(self):
+    assert not len(self._overrides)
+
+  def Restore(self):
+    for module_name, original_module in self._overrides.iteritems():
+      if original_module is None:
+        # This will happen when we override built-in functions, like open.
+        # If we don't delete the attribute, we will shadow the built-in
+        # function with an attribute set to None.
+        delattr(self._base_module, module_name)
+      else:
+        setattr(self._base_module, module_name, original_module)
+    self._overrides = {}
+
+
+class AdbDevice(object):
+
+  def __init__(self):
+    self.has_root = False
+    self.needs_su = False
+    self.shell_command_handlers = {}
+    self.mock_content = []
+    self.system_properties = {}
+    if self.system_properties.get('ro.product.cpu.abi') == None:
+      self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
+
+  def HasRoot(self):
+    return self.has_root
+
+  def NeedsSU(self):
+    return self.needs_su
+
+  def RunShellCommand(self, args, **kwargs):
+    del kwargs  # unused
+    if isinstance(args, basestring):
+      args = shlex.split(args)
+    handler = self.shell_command_handlers[args[0]]
+    return handler(args)
+
+  def FileExists(self, _):
+    return False
+
+  def ReadFile(self, device_path, as_root=False):
+    del device_path, as_root  # unused
+    return self.mock_content
+
+  def GetProp(self, property_name):
+    return self.system_properties[property_name]
+
+  def SetProp(self, property_name, property_value):
+    self.system_properties[property_name] = property_value
+
+
+class CloudStorageModuleStub(object):
+  PUBLIC_BUCKET = 'chromium-telemetry'
+  PARTNER_BUCKET = 'chrome-partner-telemetry'
+  INTERNAL_BUCKET = 'chrome-telemetry'
+  BUCKET_ALIASES = {
+    'public': PUBLIC_BUCKET,
+    'partner': PARTNER_BUCKET,
+    'internal': INTERNAL_BUCKET,
+  }
+
+  # These are used to test for CloudStorage errors.
+  INTERNAL_PERMISSION = 2
+  PARTNER_PERMISSION = 1
+  PUBLIC_PERMISSION = 0
+  # Not logged in.
+  CREDENTIALS_ERROR_PERMISSION = -1
+
+  class NotFoundError(Exception):
+    pass
+
+  class CloudStorageError(Exception):
+    pass
+
+  class PermissionError(CloudStorageError):
+    pass
+
+  class CredentialsError(CloudStorageError):
+    pass
+
+  def __init__(self):
+    self.default_remote_paths = {CloudStorageModuleStub.INTERNAL_BUCKET:{},
+                                 CloudStorageModuleStub.PARTNER_BUCKET:{},
+                                 CloudStorageModuleStub.PUBLIC_BUCKET:{}}
+    self.remote_paths = self.default_remote_paths
+    self.local_file_hashes = {}
+    self.local_hash_files = {}
+    self.permission_level = CloudStorageModuleStub.INTERNAL_PERMISSION
+    self.downloaded_files = []
+
+  def SetPermissionLevelForTesting(self, permission_level):
+    self.permission_level = permission_level
+
+  def CheckPermissionLevelForBucket(self, bucket):
+    if bucket == CloudStorageModuleStub.PUBLIC_BUCKET:
+      return
+    elif (self.permission_level ==
+          CloudStorageModuleStub.CREDENTIALS_ERROR_PERMISSION):
+      raise CloudStorageModuleStub.CredentialsError()
+    elif bucket == CloudStorageModuleStub.PARTNER_BUCKET:
+      if self.permission_level < CloudStorageModuleStub.PARTNER_PERMISSION:
+        raise CloudStorageModuleStub.PermissionError()
+    elif bucket == CloudStorageModuleStub.INTERNAL_BUCKET:
+      if self.permission_level < CloudStorageModuleStub.INTERNAL_PERMISSION:
+        raise CloudStorageModuleStub.PermissionError()
+    elif bucket not in self.remote_paths:
+      raise CloudStorageModuleStub.NotFoundError()
+
+  def SetRemotePathsForTesting(self, remote_path_dict=None):
+    if not remote_path_dict:
+      self.remote_paths = self.default_remote_paths
+      return
+    self.remote_paths = remote_path_dict
+
+  def GetRemotePathsForTesting(self):
+    if not self.remote_paths:
+      self.remote_paths = self.default_remote_paths
+    return self.remote_paths
+
+  # Set a dictionary of data files and their "calculated" hashes.
+  def SetCalculatedHashesForTesting(self, calculated_hash_dictionary):
+    self.local_file_hashes = calculated_hash_dictionary
+
+  def GetLocalDataFiles(self):
+    return self.local_file_hashes.keys()
+
+  # Set a dictionary of hash files and the hashes they should contain.
+  def SetHashFileContentsForTesting(self, hash_file_dictionary):
+    self.local_hash_files = hash_file_dictionary
+
+  def GetLocalHashFiles(self):
+    return self.local_hash_files.keys()
+
+  def ChangeRemoteHashForTesting(self, bucket, remote_path, new_hash):
+    self.remote_paths[bucket][remote_path] = new_hash
+
+  def List(self, bucket):
+    if not bucket or not bucket in self.remote_paths:
+      bucket_error = ('Incorrect bucket specified, correct buckets:' +
+                      str(self.remote_paths))
+      raise CloudStorageModuleStub.CloudStorageError(bucket_error)
+    CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
+    return list(self.remote_paths[bucket].keys())
+
+  def Exists(self, bucket, remote_path):
+    CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
+    return remote_path in self.remote_paths[bucket]
+
+  def Insert(self, bucket, remote_path, local_path):
+    CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
+    if not local_path in self.GetLocalDataFiles():
+      file_path_error = 'Local file path does not exist'
+      raise CloudStorageModuleStub.CloudStorageError(file_path_error)
+    self.remote_paths[bucket][remote_path] = (
+      CloudStorageModuleStub.CalculateHash(self, local_path))
+    return remote_path
+
+  def GetHelper(self, bucket, remote_path, local_path, only_if_changed):
+    CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
+    if not remote_path in self.remote_paths[bucket]:
+      if only_if_changed:
+        return False
+      raise CloudStorageModuleStub.NotFoundError('Remote file does not exist.')
+    remote_hash = self.remote_paths[bucket][remote_path]
+    local_hash = self.local_file_hashes[local_path]
+    if only_if_changed and remote_hash == local_hash:
+      return False
+    self.downloaded_files.append(remote_path)
+    self.local_file_hashes[local_path] = remote_hash
+    self.local_hash_files[local_path + '.sha1'] = remote_hash
+    return remote_hash
+
+  def Get(self, bucket, remote_path, local_path):
+    return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
+                                            local_path, False)
+
+  def GetIfChanged(self, local_path, bucket=None):
+    remote_path = os.path.basename(local_path)
+    if bucket:
+      return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
+                                              local_path, True)
+    result = CloudStorageModuleStub.GetHelper(
+        self, self.PUBLIC_BUCKET, remote_path, local_path, True)
+    if not result:
+      result = CloudStorageModuleStub.GetHelper(
+          self, self.PARTNER_BUCKET, remote_path, local_path, True)
+    if not result:
+      result = CloudStorageModuleStub.GetHelper(
+          self, self.INTERNAL_BUCKET, remote_path, local_path, True)
+    return result
+
+  def GetFilesInDirectoryIfChanged(self, directory, bucket):
+    if os.path.dirname(directory) == directory: # If in the root dir.
+      raise ValueError('Trying to serve root directory from HTTP server.')
+    for dirpath, _, filenames in os.walk(directory):
+      for filename in filenames:
+        path, extension = os.path.splitext(
+            os.path.join(dirpath, filename))
+        if extension != '.sha1':
+          continue
+        self.GetIfChanged(path, bucket)
+
+  def CalculateHash(self, file_path):
+    return self.local_file_hashes[file_path]
+
+  def ReadHash(self, hash_path):
+    return self.local_hash_files[hash_path]
+
+
+class LoggingStub(object):
+  def __init__(self):
+    self.warnings = []
+    self.errors = []
+
+  def info(self, msg, *args):
+    pass
+
+  def error(self, msg, *args):
+    self.errors.append(msg % args)
+
+  def warning(self, msg, *args):
+    self.warnings.append(msg % args)
+
+  def warn(self, msg, *args):
+    self.warning(msg, *args)
+
+
+class OpenFunctionStub(object):
+  class FileStub(object):
+    def __init__(self, data):
+      self._data = data
+
+    def __enter__(self):
+      return self
+
+    def __exit__(self, *args):
+      pass
+
+    def read(self, size=None):
+      if size:
+        return self._data[:size]
+      else:
+        return self._data
+
+    def write(self, data):
+      self._data.write(data)
+
+    def close(self):
+      pass
+
+  def __init__(self):
+    self.files = {}
+
+  def __call__(self, name, *args, **kwargs):
+    return OpenFunctionStub.FileStub(self.files[name])
+
+
+class OsModuleStub(object):
+  class OsEnvironModuleStub(object):
+    def get(self, _):
+      return None
+
+  class OsPathModuleStub(object):
+    def __init__(self, sys_module):
+      self.sys = sys_module
+      self.files = []
+      self.dirs = []
+
+    def exists(self, path):
+      return path in self.files
+
+    def isfile(self, path):
+      return path in self.files
+
+    def isdir(self, path):
+      return path in self.dirs
+
+    def join(self, *paths):
+      def IsAbsolutePath(path):
+        if self.sys.platform.startswith('win'):
+          return re.match('[a-zA-Z]:\\\\', path)
+        else:
+          return path.startswith('/')
+
+      # Per Python specification, if any component is an absolute path,
+      # discard previous components.
+      for index, path in reversed(list(enumerate(paths))):
+        if IsAbsolutePath(path):
+          paths = paths[index:]
+          break
+
+      if self.sys.platform.startswith('win'):
+        tmp = os.path.join(*paths)
+        return tmp.replace('/', '\\')
+      else:
+        tmp = os.path.join(*paths)
+        return tmp.replace('\\', '/')
+
+    def basename(self, path):
+      if self.sys.platform.startswith('win'):
+        return ntpath.basename(path)
+      else:
+        return posixpath.basename(path)
+
+    @staticmethod
+    def abspath(path):
+      return os.path.abspath(path)
+
+    @staticmethod
+    def expanduser(path):
+      return os.path.expanduser(path)
+
+    @staticmethod
+    def dirname(path):
+      return os.path.dirname(path)
+
+    @staticmethod
+    def realpath(path):
+      return os.path.realpath(path)
+
+    @staticmethod
+    def split(path):
+      return os.path.split(path)
+
+    @staticmethod
+    def splitext(path):
+      return os.path.splitext(path)
+
+    @staticmethod
+    def splitdrive(path):
+      return os.path.splitdrive(path)
+
+  X_OK = os.X_OK
+
+  sep = os.sep
+  pathsep = os.pathsep
+
+  def __init__(self, sys_module=sys):
+    self.path = OsModuleStub.OsPathModuleStub(sys_module)
+    self.environ = OsModuleStub.OsEnvironModuleStub()
+    self.display = ':0'
+    self.local_app_data = None
+    self.sys_path = None
+    self.program_files = None
+    self.program_files_x86 = None
+    self.devnull = os.devnull
+    self._directory = {}
+
+  def access(self, path, _):
+    return path in self.path.files
+
+  def getenv(self, name, value=None):
+    if name == 'DISPLAY':
+      env = self.display
+    elif name == 'LOCALAPPDATA':
+      env = self.local_app_data
+    elif name == 'PATH':
+      env = self.sys_path
+    elif name == 'PROGRAMFILES':
+      env = self.program_files
+    elif name == 'PROGRAMFILES(X86)':
+      env = self.program_files_x86
+    else:
+      raise NotImplementedError('Unsupported getenv')
+    return env if env else value
+
+  def chdir(self, path):
+    pass
+
+  def walk(self, top):
+    for dir_name in self._directory:
+      yield top, dir_name, self._directory[dir_name]
+
+
+class PerfControlModuleStub(object):
+  class PerfControlStub(object):
+    def __init__(self, adb):
+      pass
+
+  def __init__(self):
+    self.PerfControl = PerfControlModuleStub.PerfControlStub
+
+
+class RawInputFunctionStub(object):
+  def __init__(self):
+    self.input = ''
+
+  def __call__(self, name, *args, **kwargs):
+    return self.input
+
+
+class SubprocessModuleStub(object):
+  class PopenStub(object):
+    def __init__(self):
+      self.communicate_result = ('', '')
+      self.returncode_result = 0
+
+    def __call__(self, args, **kwargs):
+      return self
+
+    def communicate(self):
+      return self.communicate_result
+
+    @property
+    def returncode(self):
+      return self.returncode_result
+
+  def __init__(self):
+    self.Popen = SubprocessModuleStub.PopenStub()
+    self.PIPE = None
+
+  def call(self, *args, **kwargs):
+    pass
+
+
+class SysModuleStub(object):
+  def __init__(self):
+    self.platform = ''
+
+
+class ThermalThrottleModuleStub(object):
+  class ThermalThrottleStub(object):
+    def __init__(self, adb):
+      pass
+
+  def __init__(self):
+    self.ThermalThrottle = ThermalThrottleModuleStub.ThermalThrottleStub
diff --git a/catapult/telemetry/telemetry/testing/system_stub_unittest.py b/catapult/telemetry/telemetry/testing/system_stub_unittest.py
new file mode 100644
index 0000000..5a23ed4
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/system_stub_unittest.py
@@ -0,0 +1,251 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+PERF_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+from telemetry.testing import system_stub
+from telemetry.internal.testing import system_stub_test_module
+
+class CloudStorageTest(unittest.TestCase):
+  SUCCESS_FILE_HASH = 'success'.zfill(40)
+  PUBLIC_FILE_HASH = 'public'.zfill(40)
+  PARTNER_FILE_HASH = 'partner'.zfill(40)
+  INTERNAL_FILE_HASH = 'internal'.zfill(40)
+  UPDATED_HASH = 'updated'.zfill(40)
+
+  def setUp(self):
+    self.cloud_storage = system_stub.CloudStorageModuleStub()
+
+    # Files in Cloud Storage.
+    self.remote_files = ['preset_public_file.wpr',
+                         'preset_partner_file.wpr',
+                         'preset_internal_file.wpr']
+    self.remote_paths = {
+      self.cloud_storage.PUBLIC_BUCKET:
+        {'preset_public_file.wpr':CloudStorageTest.PUBLIC_FILE_HASH},
+      self.cloud_storage.PARTNER_BUCKET:
+        {'preset_partner_file.wpr':CloudStorageTest.PARTNER_FILE_HASH},
+      self.cloud_storage.INTERNAL_BUCKET:
+        {'preset_internal_file.wpr':CloudStorageTest.INTERNAL_FILE_HASH}}
+
+    # Local data files and hashes.
+    self.data_files = [
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'),
+        os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'),
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
+        os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'),
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr')]
+    self.local_file_hashes = {
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'):
+            CloudStorageTest.SUCCESS_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'):
+            CloudStorageTest.SUCCESS_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'):
+            CloudStorageTest.PUBLIC_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'):
+            CloudStorageTest.PARTNER_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'):
+            CloudStorageTest.INTERNAL_FILE_HASH,
+    }
+    self.cloud_storage.SetCalculatedHashesForTesting(self.local_file_hashes)
+    # Local hash files and their contents.
+    local_hash_files = {
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr.sha1'):
+            CloudStorageTest.SUCCESS_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr.sha1'):
+            'wronghash'.zfill(40),
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr.sha1'):
+            CloudStorageTest.PUBLIC_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr.sha1'):
+            CloudStorageTest.PARTNER_FILE_HASH,
+        os.path.join(os.path.sep, 'path', 'to',
+                     'preset_internal_file.wpr.sha1'):
+            CloudStorageTest.INTERNAL_FILE_HASH,
+    }
+    self.cloud_storage.SetHashFileContentsForTesting(local_hash_files)
+
+  def testSetup(self):
+    self.assertEqual(self.local_file_hashes,
+                     self.cloud_storage.local_file_hashes)
+    self.assertEqual(set(self.data_files),
+                     set(self.cloud_storage.GetLocalDataFiles()))
+    self.assertEqual(self.cloud_storage.default_remote_paths,
+                     self.cloud_storage.GetRemotePathsForTesting())
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertEqual(self.remote_paths,
+                     self.cloud_storage.GetRemotePathsForTesting())
+
+  def testExistsEmptyCloudStorage(self):
+    # Test empty remote files dictionary.
+    self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
+                                               'preset_public_file.wpr'))
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
+
+  def testExistsNonEmptyCloudStorage(self):
+    # Test non-empty remote files dictionary.
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'fake_file'))
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.PARTNER_BUCKET, 'fake_file'))
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.INTERNAL_BUCKET, 'fake_file'))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testNonEmptyInsertAndExistsPublic(self):
+    # Test non-empty remote files dictionary.
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
+                                               'success.wpr'))
+    self.cloud_storage.Insert(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testEmptyInsertAndExistsPublic(self):
+    # Test empty remote files dictionary.
+    self.assertFalse(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
+    self.cloud_storage.Insert(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
+
+  def testEmptyInsertAndGet(self):
+    self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
+                      self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+                      os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+                              os.path.join(os.path.sep, 'path', 'to',
+                                           'success.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
+    self.assertEqual(CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
+
+  def testNonEmptyInsertAndGet(self):
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
+                      self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+                      os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+                              os.path.join(os.path.sep, 'path', 'to',
+                                           'success.wpr'))
+    self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
+                                              'success.wpr'))
+    self.assertEqual(
+        CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
+            self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+            os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testGetIfChanged(self):
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertRaises(
+        self.cloud_storage.NotFoundError, self.cloud_storage.Get,
+        self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    self.assertFalse(self.cloud_storage.GetIfChanged(
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
+        self.cloud_storage.PUBLIC_BUCKET))
+    self.cloud_storage.ChangeRemoteHashForTesting(
+        self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr',
+        CloudStorageTest.UPDATED_HASH)
+    self.assertTrue(self.cloud_storage.GetIfChanged(
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
+        self.cloud_storage.PUBLIC_BUCKET))
+    self.assertFalse(self.cloud_storage.GetIfChanged(
+        os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
+        self.cloud_storage.PUBLIC_BUCKET))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testList(self):
+    self.assertEqual([],
+                     self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.assertEqual(['preset_public_file.wpr'],
+                     self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testPermissionError(self):
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.cloud_storage.SetPermissionLevelForTesting(
+        self.cloud_storage.PUBLIC_PERMISSION)
+    self.assertRaises(
+        self.cloud_storage.PermissionError, self.cloud_storage.Get,
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
+    self.assertRaises(
+        self.cloud_storage.PermissionError, self.cloud_storage.GetIfChanged,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'),
+        self.cloud_storage.INTERNAL_BUCKET)
+    self.assertRaises(
+        self.cloud_storage.PermissionError, self.cloud_storage.List,
+        self.cloud_storage.INTERNAL_BUCKET)
+    self.assertRaises(
+        self.cloud_storage.PermissionError, self.cloud_storage.Exists,
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
+    self.assertRaises(
+        self.cloud_storage.PermissionError, self.cloud_storage.Insert,
+        self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testCredentialsError(self):
+    self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
+    self.cloud_storage.SetPermissionLevelForTesting(
+        self.cloud_storage.CREDENTIALS_ERROR_PERMISSION)
+    self.assertRaises(
+        self.cloud_storage.CredentialsError, self.cloud_storage.Get,
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
+    self.assertRaises(
+        self.cloud_storage.CredentialsError, self.cloud_storage.GetIfChanged,
+        self.cloud_storage.INTERNAL_BUCKET,
+        os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
+    self.assertRaises(
+        self.cloud_storage.CredentialsError, self.cloud_storage.List,
+        self.cloud_storage.INTERNAL_BUCKET)
+    self.assertRaises(
+        self.cloud_storage.CredentialsError, self.cloud_storage.Exists,
+        self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
+    self.assertRaises(
+        self.cloud_storage.CredentialsError, self.cloud_storage.Insert,
+        self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
+        os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
+    # Reset state.
+    self.cloud_storage.SetRemotePathsForTesting()
+
+  def testOpenRestoresCorrectly(self):
+    file_path = os.path.realpath(__file__)
+    stubs = system_stub.Override(system_stub_test_module, ['open'])
+    stubs.open.files = {file_path:'contents'}
+    f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
+    self.assertEqual(type(f), system_stub.OpenFunctionStub.FileStub)
+    stubs.open.files = {}
+    stubs.Restore()
+    # This will throw an error if the open stub wasn't restored correctly.
+    f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
+    self.assertEqual(type(f), file)
diff --git a/catapult/telemetry/telemetry/testing/tab_test_case.py b/catapult/telemetry/telemetry/testing/tab_test_case.py
new file mode 100644
index 0000000..2fc493b
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/tab_test_case.py
@@ -0,0 +1,48 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.core import exceptions
+from telemetry.testing import browser_test_case
+
+
+class TabTestCase(browser_test_case.BrowserTestCase):
+  def __init__(self, *args):
+    super(TabTestCase, self).__init__(*args)
+    self._tab = None
+
+  def setUp(self):
+    super(TabTestCase, self).setUp()
+
+    if self._browser.supports_tab_control:
+      try:
+        while len(self._browser.tabs) < 1:
+          self._browser.tabs.New()
+        while len(self._browser.tabs) > 1:
+          self._browser.tabs[0].Close()
+        self._tab = self._browser.tabs[0]
+      except exceptions.TimeoutException:
+        self._RestartBrowser()
+    else:
+      self._RestartBrowser()
+    self._tab.Navigate('about:blank')
+    self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
+
+  def Navigate(self, filename, script_to_evaluate_on_commit=None):
+    """Navigates |tab| to |filename| in the unittest data directory.
+
+    Also sets up http server to point to the unittest data directory.
+    """
+    url = self.UrlOfUnittestFile(filename)
+    self._tab.Navigate(url, script_to_evaluate_on_commit)
+    self._tab.WaitForDocumentReadyStateToBeComplete()
+
+  def _RestartBrowser(self):
+    if not self._browser.tabs:
+      self.tearDownClass()
+      self.setUpClass()
+    self._tab = self._browser.tabs[0]
+
+  @property
+  def tabs(self):
+    return self._browser.tabs
diff --git a/catapult/telemetry/telemetry/testing/test_page_test_results.py b/catapult/telemetry/telemetry/testing/test_page_test_results.py
new file mode 100644
index 0000000..3770c72
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/test_page_test_results.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.results import page_test_results
+from telemetry.page import page as page_module
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+
+
+class TestPageTestResults(
+    page_test_results.PageTestResults):
+  def __init__(self, test):
+    super(TestPageTestResults, self).__init__()
+    self.test = test
+    page = page_module.Page("http://www.google.com", {})
+    self.WillRunPage(page)
+
+  def GetPageSpecificValueNamed(self, name):
+    values = [value for value in self.all_page_specific_values
+         if value.name == name]
+    assert len(values) == 1, 'Could not find value named %s' % name
+    return values[0]
+
+  def AssertHasPageSpecificScalarValue(self, name, units, expected_value):
+    value = self.GetPageSpecificValueNamed(name)
+    self.test.assertEquals(units, value.units)
+    self.test.assertTrue(isinstance(value, scalar.ScalarValue))
+    self.test.assertEquals(expected_value, value.value)
+
+  def AssertHasPageSpecificListOfScalarValues(self, name, units,
+                                              expected_values):
+    value = self.GetPageSpecificValueNamed(name)
+    self.test.assertEquals(units, value.units)
+    self.test.assertTrue(
+        isinstance(value, list_of_scalar_values.ListOfScalarValues))
+    self.test.assertItemsEqual(expected_values, value.values)
+
+  def __str__(self):
+    return '\n'.join([repr(x) for x in self.all_page_specific_values])
diff --git a/catapult/telemetry/telemetry/testing/unittest_runner.py b/catapult/telemetry/telemetry/testing/unittest_runner.py
new file mode 100644
index 0000000..b63fa04
--- /dev/null
+++ b/catapult/telemetry/telemetry/testing/unittest_runner.py
@@ -0,0 +1,39 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+from telemetry.core import util
+
+
+def Run(project_config, no_browser=False):
+  args = sys.argv[1:]
+  assert '--top-level-dir' not in args, (
+      'Top level directory for running tests should be specified through '
+      'the instance of telemetry.project_config.ProjectConfig.')
+  assert '--client-config' not in args, (
+      'Client config file to be used for telemetry should be specified through '
+      'the instance of telemetry.project_config.ProjectConfig.')
+  assert project_config.top_level_dir, 'Must specify top level dir for project'
+  args.extend(['--top-level-dir', project_config.top_level_dir])
+  if project_config.client_config:
+    args.extend(['--client-config', project_config.client_config])
+  if no_browser and not '--no-browser' in args:
+    args.extend(['--no-browser'])
+
+  if project_config.default_chrome_root and not '--chrome-root' in args:
+    args.extend(['--chrome-root', project_config.default_chrome_root])
+
+  env = os.environ.copy()
+  telemetry_dir = util.GetTelemetryDir()
+  if 'PYTHONPATH' in env:
+    env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir])
+  else:
+    env['PYTHONPATH'] = telemetry_dir
+
+  path_to_run_tests = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                                   'run_tests.py')
+  return subprocess.call([sys.executable, path_to_run_tests] + args, env=env)
diff --git a/catapult/telemetry/telemetry/timeline/OWNERS b/catapult/telemetry/telemetry/timeline/OWNERS
new file mode 100644
index 0000000..d12cc95
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/OWNERS
@@ -0,0 +1 @@
+tengs@chromium.org
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/telemetry/timeline/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/telemetry/timeline/__init__.py
diff --git a/catapult/telemetry/telemetry/timeline/async_slice.py b/catapult/telemetry/telemetry/timeline/async_slice.py
new file mode 100644
index 0000000..2a5068f
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/async_slice.py
@@ -0,0 +1,31 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.event as event
+
+
+class AsyncSlice(event.TimelineEvent):
+  """An AsyncSlice represents an interval of time during which an
+  asynchronous operation is in progress. An AsyncSlice consumes no CPU time
+  itself and so is only associated with Threads at its start and end point.
+  """
+  def __init__(self, category, name, timestamp, args=None,
+               duration=0, start_thread=None, end_thread=None,
+               thread_start=None, thread_duration=None):
+    super(AsyncSlice, self).__init__(
+        category, name, timestamp, duration, thread_start, thread_duration,
+        args)
+    self.parent_slice = None
+    self.start_thread = start_thread
+    self.end_thread = end_thread
+    self.sub_slices = []
+    self.id = None
+
+  def AddSubSlice(self, sub_slice):
+    assert sub_slice.parent_slice == self
+    self.sub_slices.append(sub_slice)
+
+  def IterEventsInThisContainerRecrusively(self):
+    for sub_slice in self.sub_slices:
+      yield sub_slice
diff --git a/catapult/telemetry/telemetry/timeline/bounds.py b/catapult/telemetry/telemetry/timeline/bounds.py
new file mode 100644
index 0000000..dd7a4ef
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/bounds.py
@@ -0,0 +1,114 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class Bounds(object):
+  """Represents a min-max bounds."""
+  def __init__(self):
+    self.is_empty_ = True
+    self.min_ = None
+    self.max_ = None
+
+  @staticmethod
+  def CreateFromEvent(event):
+    bounds = Bounds()
+    bounds.AddEvent(event)
+    return bounds
+
+  def __repr__(self):
+    if self.is_empty_:
+      return "Bounds()"
+    else:
+      return "Bounds(min=%s,max=%s)" % (self.min_, self.max_)
+
+  @property
+  def is_empty(self):
+    return self.is_empty_
+
+  @property
+  def min(self):
+    if self.is_empty_:
+      return None
+    return self.min_
+
+  @property
+  def max(self):
+    if self.is_empty_:
+      return None
+    return self.max_
+
+  @property
+  def bounds(self):
+    if self.is_empty_:
+      return None
+    return self.max_ - self.min_
+
+  @property
+  def center(self):
+    return (self.min_ + self.max_) * 0.5
+
+  def Contains(self, other):
+    if self.is_empty or other.is_empty:
+      return False
+    return self.min <= other.min and self.max >= other.max
+
+  def ContainsInterval(self, start, end):
+    return self.min <= start and self.max >= end
+
+  def Intersects(self, other):
+    if self.is_empty or other.is_empty:
+      return False
+    return not (other.max < self.min or other.min > self.max)
+
+  def Reset(self):
+    self.is_empty_ = True
+    self.min_ = None
+    self.max_ = None
+
+  def AddBounds(self, bounds):
+    if bounds.is_empty:
+      return
+    self.AddValue(bounds.min_)
+    self.AddValue(bounds.max_)
+
+  def AddValue(self, value):
+    if self.is_empty_:
+      self.max_ = value
+      self.min_ = value
+      self.is_empty_ = False
+      return
+
+    self.max_ = max(self.max_, value)
+    self.min_ = min(self.min_, value)
+
+  def AddEvent(self, event):
+    self.AddValue(event.start)
+    self.AddValue(event.start + event.duration)
+
+  @staticmethod
+  def CompareByMinTimes(a, b):
+    if not a.is_empty and not b.is_empty:
+      return a.min_ - b.min_
+
+    if a.is_empty and not b.is_empty:
+      return -1
+
+    if not a.is_empty and b.is_empty:
+      return 1
+
+    return 0
+
+  @staticmethod
+  def GetOverlapBetweenBounds(first_bounds, second_bounds):
+    """Compute the overlap duration between first_bounds and second_bounds."""
+    return Bounds.GetOverlap(first_bounds.min_, first_bounds.max_,
+                             second_bounds.min_, second_bounds.max_)
+
+  @staticmethod
+  def GetOverlap(first_bounds_min, first_bounds_max,
+                 second_bounds_min, second_bounds_max):
+    assert first_bounds_min <= first_bounds_max
+    assert second_bounds_min <= second_bounds_max
+    overlapped_range_start = max(first_bounds_min, second_bounds_min)
+    overlapped_range_end = min(first_bounds_max, second_bounds_max)
+    return max(overlapped_range_end - overlapped_range_start, 0)
diff --git a/catapult/telemetry/telemetry/timeline/bounds_unittest.py b/catapult/telemetry/telemetry/timeline/bounds_unittest.py
new file mode 100644
index 0000000..0b06124
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/bounds_unittest.py
@@ -0,0 +1,20 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.timeline import bounds
+
+
+class BoundsTests(unittest.TestCase):
+
+  def testGetOverlap(self):
+    # Non overlap cases.
+    self.assertEquals(0, bounds.Bounds.GetOverlap(10, 20, 30, 40))
+    self.assertEquals(0, bounds.Bounds.GetOverlap(30, 40, 10, 20))
+    # Overlap cases.
+    self.assertEquals(10, bounds.Bounds.GetOverlap(10, 30, 20, 40))
+    self.assertEquals(10, bounds.Bounds.GetOverlap(20, 40, 10, 30))
+    # Inclusive cases.
+    self.assertEquals(10, bounds.Bounds.GetOverlap(10, 40, 20, 30))
+    self.assertEquals(10, bounds.Bounds.GetOverlap(20, 30, 10, 40))
diff --git a/catapult/telemetry/telemetry/timeline/counter.py b/catapult/telemetry/telemetry/timeline/counter.py
new file mode 100644
index 0000000..63aacc6
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/counter.py
@@ -0,0 +1,112 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.event_container as event_container
+
+
+# Doesn't inherit from TimelineEvent because its only a temporary wrapper of a
+# counter sample into an event. During stable operation, the samples are stored
+# a dense array of values rather than in the long-form done by an Event.
+class CounterSample(object):
+  def __init__(self, counter, sample_index):
+    self._counter = counter
+    self._sample_index = sample_index
+
+  @property
+  def category(self):
+    return self._counter.category
+
+  @property
+  def name(self):
+    return self._counter.full_name
+
+  @property
+  def value(self):
+    return self._counter.samples[self._sample_index]
+
+  @property
+  def start(self):
+    return self._counter.timestamps[self._sample_index]
+
+  @start.setter
+  def start(self, start):
+    self._counter.timestamps[self._sample_index] = start
+
+  @property
+  def duration(self):
+    return 0
+
+  @property
+  def end(self):
+    return self.start
+
+  @property
+  def thread_start(self):
+    return None
+
+  @property
+  def thread_duration(self):
+    return None
+
+  @property
+  def thread_end(self):
+    return None
+
+
+class Counter(event_container.TimelineEventContainer):
+  """ Stores all the samples for a given counter.
+  """
+  def __init__(self, parent, category, name):
+    super(Counter, self).__init__(name, parent)
+    self.category = category
+    self.full_name = category + '.' + name
+    self.samples = []
+    self.timestamps = []
+    self.series_names = []
+    self.totals = []
+    self.max_total = 0
+
+  def IterChildContainers(self):
+    return
+    yield # pylint: disable=unreachable
+
+  def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
+    if not event_type_predicate(CounterSample) or not self.timestamps:
+      return
+
+    # Pass event_predicate a reused CounterSample instance to avoid
+    # creating a ton of garbage for rejected samples.
+    test_sample = CounterSample(self, 0)
+    for i in xrange(len(self.timestamps)):
+      test_sample._sample_index = i  # pylint: disable=protected-access
+      if event_predicate(test_sample):
+        yield CounterSample(self, i)
+
+  @property
+  def num_series(self):
+    return len(self.series_names)
+
+  @property
+  def num_samples(self):
+    return len(self.timestamps)
+
+  def FinalizeImport(self):
+    if self.num_series * self.num_samples != len(self.samples):
+      raise ValueError(
+          'Length of samples must be a multiple of length of timestamps.')
+
+    self.totals = []
+    self.max_total = 0
+    if not len(self.samples):
+      return
+
+    max_total = None
+    for i in xrange(self.num_samples):
+      total = 0
+      for j in xrange(self.num_series):
+        total += self.samples[i * self.num_series + j]
+        self.totals.append(total)
+      if max_total is None or total > max_total:
+        max_total = total
+    self.max_total = max_total
diff --git a/catapult/telemetry/telemetry/timeline/counter_unittest.py b/catapult/telemetry/telemetry/timeline/counter_unittest.py
new file mode 100644
index 0000000..a97d4ac
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/counter_unittest.py
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import types
+import unittest
+
+from telemetry.timeline import counter as counter_module
+
+
+class FakeProcess(object):
+  pass
+
+
+class CounterIterEventsInThisContainerTest(unittest.TestCase):
+
+  def setUp(self):
+    parent = FakeProcess()
+    self.counter = counter_module.Counter(parent, 'cat', 'name')
+
+  def assertIsEmptyIterator(self, itr):
+    self.assertIsInstance(itr, types.GeneratorType)
+    self.assertRaises(StopIteration, itr.next)
+
+  def testEmptyTimestamps(self):
+    self.assertIsEmptyIterator(self.counter.IterEventsInThisContainer(
+        event_type_predicate=lambda x: True,
+        event_predicate=lambda x: True))
+
+  def testEventTypeMismatch(self):
+    self.counter.timestamps = [111, 222]
+    self.assertIsEmptyIterator(self.counter.IterEventsInThisContainer(
+        event_type_predicate=lambda x: False,
+        event_predicate=lambda x: True))
+
+  def testNoEventMatch(self):
+    self.counter.timestamps = [111, 222]
+    self.assertIsEmptyIterator(self.counter.IterEventsInThisContainer(
+        event_type_predicate=lambda x: True,
+        event_predicate=lambda x: False))
+
+  def testAllMatch(self):
+    self.counter.timestamps = [111, 222]
+    self.counter.samples = [100, 200]
+    events = self.counter.IterEventsInThisContainer(
+        event_type_predicate=lambda x: True,
+        event_predicate=lambda x: True)
+    self.assertIsInstance(events, types.GeneratorType)
+    eventlist = list(events)
+    self.assertEqual([111, 222], [s.start for s in eventlist])
+    self.assertEqual(['cat.name', 'cat.name'], [s.name for s in eventlist])
+    self.assertEqual([100, 200], [s.value for s in eventlist])
+
+  def testPartialMatch(self):
+    self.counter.timestamps = [111, 222]
+    self.counter.samples = [100, 200]
+    events = self.counter.IterEventsInThisContainer(
+        event_type_predicate=lambda x: True,
+        event_predicate=lambda x: x.start > 200)
+    self.assertIsInstance(events, types.GeneratorType)
+    eventlist = list(events)
+    self.assertEqual([222], [s.start for s in eventlist])
+    self.assertEqual(['cat.name'], [s.name for s in eventlist])
+    self.assertEqual([200], [s.value for s in eventlist])
diff --git a/catapult/telemetry/telemetry/timeline/event.py b/catapult/telemetry/telemetry/timeline/event.py
new file mode 100644
index 0000000..996cba9
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/event.py
@@ -0,0 +1,56 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class TimelineEvent(object):
+  """Represents a timeline event.
+
+  thread_start, thread_duration and thread_end are the start time, duration
+  and end time of this event as measured by the thread-specific CPU clock
+  (ticking when the thread is actually scheduled). Thread time is optional
+  on trace events and the corresponding attributes in TimelineEvent will be
+  set to None (not 0) if not present. Users of this class need to properly
+  handle this case.
+  """
+  def __init__(self, category, name, start, duration, thread_start=None,
+               thread_duration=None, args=None):
+    self.category = category
+    self.name = name
+    self.start = start
+    self.duration = duration
+    self.thread_start = thread_start
+    self.thread_duration = thread_duration
+    self.args = args
+
+  @property
+  def end(self):
+    return self.start + self.duration
+
+  @property
+  def has_thread_timestamps(self):
+    return self.thread_start is not None and self.thread_duration is not None
+
+  @property
+  def thread_end(self):
+    """Thread-specific CPU time when this event ended.
+
+    May be None if the trace event didn't have thread time data.
+    """
+    if self.thread_start == None or self.thread_duration == None:
+      return None
+    return self.thread_start + self.thread_duration
+
+  def __repr__(self):
+    if self.args:
+      args_str = ', ' + repr(self.args)
+    else:
+      args_str = ''
+
+    return ("TimelineEvent(name='%s', start=%f, duration=%s, " +
+            "thread_start=%s, thread_duration=%s%s)") % (
+                self.name,
+                self.start,
+                self.duration,
+                self.thread_start,
+                self.thread_duration,
+                args_str)
diff --git a/catapult/telemetry/telemetry/timeline/event_container.py b/catapult/telemetry/telemetry/timeline/event_container.py
new file mode 100644
index 0000000..4d41e68
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/event_container.py
@@ -0,0 +1,144 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.timeline import async_slice as async_slice_module
+from telemetry.timeline import flow_event as flow_event_module
+from telemetry.timeline import slice as slice_module
+
+
+class TimelineEventContainer(object):
+  """Represents a container for events.
+
+  """
+  def __init__(self, name, parent):
+    self.parent = parent
+    self.name = name
+
+  @staticmethod
+  def IsAsyncSlice(t):
+    return t == async_slice_module.AsyncSlice
+
+  # Basic functions that subclasses of TimelineEventContainer should implement
+  # in order to expose their events. New methods should be added to this part of
+  # the code only when absolutely certain they're needed.
+
+  def IterChildContainers(self):
+    raise NotImplementedError()
+
+  def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
+    """Iterates all the TimelineEvents in this container.
+
+    Only events with a type matching event_type_predicate AND matching event
+    event_predicate will be yielded.
+
+    event_type_predicate is given an actual type object, e.g.:
+        event_type_predicate(slice_module.Slice)
+
+    event_predicate is given actual events:
+        event_predicate(thread.slices[7])
+
+    DO NOT ASSUME that the event_type_predicate will be called for every event
+    found. The relative calling order of the two is left up to the implementer
+    of the method.
+
+    """
+    del event_type_predicate, event_predicate  # unused
+    return
+    yield # pylint: disable=unreachable
+
+
+  def IterAllEvents(self,
+                    recursive=True,
+                    event_type_predicate=lambda t: True,
+                    event_predicate=lambda e: True):
+    """Iterates all events in this container, pre-filtered by two predicates.
+
+    Only events with a type matching event_type_predicate AND matching event
+    event_predicate will be yielded.
+
+    event_type_predicate is given an actual type object, e.g.:
+        event_type_predicate(slice_module.Slice)
+
+    event_predicate is given actual events:
+        event_predicate(thread.slices[7])
+    """
+    if not recursive:
+      for e in self.IterEventsInThisContainer(
+          event_type_predicate, event_predicate):
+        yield e
+      return
+
+    # TODO(nduca): Write this as a proper iterator instead of one that creates a
+    # list and then iterates it.
+    containers = []
+    def GetContainersRecursive(container):
+      containers.append(container)
+      for container in container.IterChildContainers():
+        GetContainersRecursive(container)
+    GetContainersRecursive(self)
+
+    # Actually create the iterator.
+    for c in containers:
+      for e in c.IterEventsInThisContainer(event_type_predicate,
+                                           event_predicate):
+        yield e
+
+  # Helper functions for finding common kinds of events. Must always take an
+  # optinal recurisve parameter and be implemented in terms fo IterAllEvents.
+  def IterAllEventsOfName(self, name, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: True,
+      event_predicate=lambda e: e.name == name)
+
+  def IterAllSlices(self, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: t == slice_module.Slice)
+
+  def IterAllSlicesInRange(self, start, end, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: t == slice_module.Slice,
+      event_predicate=lambda s: s.start >= start and s.end <= end)
+
+  def IterAllSlicesOfName(self, name, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: t == slice_module.Slice,
+      event_predicate=lambda e: e.name == name)
+
+  def IterAllToplevelSlicesOfName(self, name, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: t == slice_module.Slice,
+      event_predicate=lambda e: e.name == name and e.parent_slice == None)
+
+  def IterAllAsyncSlicesOfName(self, name, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=self.IsAsyncSlice,
+      event_predicate=lambda e: e.name == name)
+
+  def IterAllAsyncSlicesStartsWithName(self, name, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=self.IsAsyncSlice,
+      event_predicate=lambda e: e.name.startswith(name))
+
+  def IterAllFlowEvents(self, recursive=True):
+    return self.IterAllEvents(
+      recursive=recursive,
+      event_type_predicate=lambda t: t == flow_event_module.FlowEvent)
+
+  # List versions. These should always be simple expressions that list() on
+  # an underlying iter method.
+  def GetAllEvents(self, recursive=True):
+    return list(self.IterAllEvents(recursive=recursive))
+
+  def GetAllEventsOfName(self, name, recursive=True):
+    return list(self.IterAllEventsOfName(name, recursive))
+
+  def GetAllToplevelSlicesOfName(self, name, recursive=True):
+    return list(self.IterAllToplevelSlicesOfName(name, recursive))
diff --git a/catapult/telemetry/telemetry/timeline/event_unittest.py b/catapult/telemetry/telemetry/timeline/event_unittest.py
new file mode 100644
index 0000000..380fea4
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/event_unittest.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import event
+
+
+class TimelineEventTest(unittest.TestCase):
+  def testHasThreadTimestamps(self):
+    # No thread_start and no thread_duration
+    event_1 = event.TimelineEvent('test', 'foo', 0, 10)
+    # Has thread_start but no thread_duration
+    event_2 = event.TimelineEvent('test', 'foo', 0, 10, 2)
+    # Has thread_duration but no thread_start
+    event_3 = event.TimelineEvent('test', 'foo', 0, 10, None, 4)
+    # Has thread_start and thread_duration
+    event_4 = event.TimelineEvent('test', 'foo', 0, 10, 2, 4)
+
+    self.assertFalse(event_1.has_thread_timestamps)
+    self.assertFalse(event_2.has_thread_timestamps)
+    self.assertFalse(event_3.has_thread_timestamps)
+    self.assertTrue(event_4.has_thread_timestamps)
diff --git a/catapult/telemetry/telemetry/timeline/flow_event.py b/catapult/telemetry/telemetry/timeline/flow_event.py
new file mode 100644
index 0000000..8a39215
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/flow_event.py
@@ -0,0 +1,15 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.event as event
+
+
+class FlowEvent(event.TimelineEvent):
+  """A FlowEvent represents an interval of time plus parameters associated
+  with that interval.
+  """
+  def __init__(self, category, event_id, name, start, args=None):
+    super(FlowEvent, self).__init__(
+        category, name, start, duration=0, args=args)
+    self.event_id = event_id
diff --git a/catapult/telemetry/telemetry/timeline/importer.py b/catapult/telemetry/telemetry/timeline/importer.py
new file mode 100644
index 0000000..5c355a9
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/importer.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TimelineImporter(object):
+  """Reads TraceData and populates timeline model with what it finds."""
+  def __init__(self, model, trace_data, import_order):
+    self._model = model
+    self._trace_data = trace_data
+    self.import_order = import_order
+
+  @staticmethod
+  def GetSupportedPart():
+    raise NotImplementedError
+
+  def ImportEvents(self):
+    """Processes the event data in the wrapper and creates and adds
+    new timeline events to the model"""
+    raise NotImplementedError
+
+  def FinalizeImport(self):
+    """Called after all other importers for the model are run."""
+    raise NotImplementedError
diff --git a/catapult/telemetry/telemetry/timeline/inspector_importer.py b/catapult/telemetry/telemetry/timeline/inspector_importer.py
new file mode 100644
index 0000000..f25c9a1
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/inspector_importer.py
@@ -0,0 +1,73 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Imports event data obtained from the inspector's timeline."""
+
+from telemetry.timeline import importer
+import telemetry.timeline.slice as tracing_slice
+import telemetry.timeline.thread as timeline_thread
+from telemetry.timeline import trace_data as trace_data_module
+
+
+class InspectorTimelineImporter(importer.TimelineImporter):
+  def __init__(self, model, trace_data):
+    super(InspectorTimelineImporter, self).__init__(model,
+                                                    trace_data,
+                                                    import_order=1)
+    self._events = trace_data.GetEventsFor(
+      trace_data_module.INSPECTOR_TRACE_PART)
+
+  @staticmethod
+  def GetSupportedPart():
+    return trace_data_module.INSPECTOR_TRACE_PART
+
+  def ImportEvents(self):
+    render_process = self._model.GetOrCreateProcess(0)
+    for raw_event in self._events:
+      thread = render_process.GetOrCreateThread(raw_event.get('thread', 0))
+      InspectorTimelineImporter.AddRawEventToThreadRecursive(thread, raw_event)
+
+  def FinalizeImport(self):
+    pass
+
+  @staticmethod
+  def AddRawEventToThreadRecursive(thread, raw_inspector_event):
+    pending_slice = None
+    if ('startTime' in raw_inspector_event and
+        'type' in raw_inspector_event):
+      args = {}
+      for x in raw_inspector_event:
+        if x in ('startTime', 'endTime', 'children'):
+          continue
+        args[x] = raw_inspector_event[x]
+      if len(args) == 0:
+        args = None
+      start_time = raw_inspector_event['startTime']
+      end_time = raw_inspector_event.get('endTime', start_time)
+
+      pending_slice = tracing_slice.Slice(
+        thread, 'inspector',
+        raw_inspector_event['type'],
+        start_time,
+        thread_timestamp=None,
+        args=args)
+
+    for child in raw_inspector_event.get('children', []):
+      InspectorTimelineImporter.AddRawEventToThreadRecursive(
+          thread, child)
+
+    if pending_slice:
+      pending_slice.duration = end_time - pending_slice.start
+      thread.PushSlice(pending_slice)
+
+  @staticmethod
+  def RawEventToTimelineEvent(raw_inspector_event):
+    """Converts raw_inspector_event to TimelineEvent."""
+    thread = timeline_thread.Thread(None, 0)
+    InspectorTimelineImporter.AddRawEventToThreadRecursive(
+        thread, raw_inspector_event)
+    thread.FinalizeImport()
+    assert len(thread.toplevel_slices) <= 1
+    if len(thread.toplevel_slices) == 0:
+      return None
+    return thread.toplevel_slices[0]
diff --git a/catapult/telemetry/telemetry/timeline/inspector_importer_unittest.py b/catapult/telemetry/telemetry/timeline/inspector_importer_unittest.py
new file mode 100644
index 0000000..fc379d1
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/inspector_importer_unittest.py
@@ -0,0 +1,149 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.timeline import inspector_importer
+from telemetry.timeline import model
+from telemetry.timeline import trace_data
+
+_BACKGROUND_MESSAGE = {
+  'data': {},
+  'type': 'BeginFrame',
+  'thread': '2',
+  'startTime': 1352783525921.824}
+
+_SAMPLE_MESSAGE = {
+  'children': [
+    {'data': {},
+     'startTime': 1352783525921.823,
+     'type': 'BeginFrame',
+     'usedHeapSize': 1870736},
+    {'children': [],
+     'data': {'height': 723,
+              'width': 1272,
+              'x': 0,
+              'y': 0},
+     'endTime': 1352783525921.8992,
+     'frameId': '10.2',
+     'startTime': 1352783525921.8281,
+     'type': 'Layout',
+     'usedHeapSize': 1870736},
+    {'children': [
+        {'children': [],
+         'data': {'imageType': 'PNG'},
+         'endTime': 1352783525927.7939,
+         'startTime': 1352783525922.4241,
+         'type': 'DecodeImage',
+         'usedHeapSize': 1870736}
+        ],
+     'data': {'height': 432,
+              'width': 1272,
+              'x': 0,
+              'y': 8},
+     'endTime': 1352783525927.9822,
+     'frameId': '10.2',
+     'startTime': 1352783525921.9292,
+     'type': 'Paint',
+     'usedHeapSize': 1870736}
+    ],
+  'data': {},
+  'endTime': 1352783525928.041,
+  'startTime': 1352783525921.8049,
+  'type': 'Program'}
+
+class InspectorEventParsingTest(unittest.TestCase):
+  def testParsingWithSampleData(self):
+    root_event = (inspector_importer.InspectorTimelineImporter
+        .RawEventToTimelineEvent(_SAMPLE_MESSAGE))
+    self.assertTrue(root_event)
+    decode_image_event = [
+      child for child in root_event.IterEventsInThisContainerRecrusively()
+      if child.name == 'DecodeImage'][0]
+    self.assertEquals(decode_image_event.args['data']['imageType'], 'PNG')
+    self.assertTrue(decode_image_event.duration > 0)
+
+  def testParsingWithSimpleData(self):
+    raw_event = {'type': 'Foo',
+                 'startTime': 1,
+                 'endTime': 3,
+                 'children': []}
+    event = (inspector_importer.InspectorTimelineImporter
+        .RawEventToTimelineEvent(raw_event))
+    self.assertEquals('Foo', event.name)
+    self.assertEquals(1, event.start)
+    self.assertEquals(3, event.end)
+    self.assertEquals(2, event.duration)
+    self.assertEquals([], event.sub_slices)
+
+  def testParsingWithArgs(self):
+    raw_event = {'type': 'Foo',
+                 'startTime': 1,
+                 'endTime': 3,
+                 'foo': 7,
+                 'bar': {'x': 1}}
+    event = (inspector_importer.InspectorTimelineImporter
+        .RawEventToTimelineEvent(raw_event))
+    self.assertEquals('Foo', event.name)
+    self.assertEquals(1, event.start)
+    self.assertEquals(3, event.end)
+    self.assertEquals(2, event.duration)
+    self.assertEquals([], event.sub_slices)
+    self.assertEquals(7, event.args['foo'])
+    self.assertEquals(1, event.args['bar']['x'])
+
+  def testEventsWithNoStartTimeAreDropped(self):
+    raw_event = {'type': 'Foo',
+                 'endTime': 1,
+                 'children': []}
+    event = (inspector_importer.InspectorTimelineImporter.
+        RawEventToTimelineEvent(raw_event))
+    self.assertEquals(None, event)
+
+  def testEventsWithNoEndTimeAreOk(self):
+    raw_event = {'type': 'Foo',
+                 'startTime': 1,
+                 'children': []}
+    event = (inspector_importer.InspectorTimelineImporter.
+        RawEventToTimelineEvent(raw_event))
+    self.assertEquals(1, event.start)
+    self.assertEquals(1, event.end)
+
+  def testOutOfOrderData(self):
+    builder = trace_data.TraceDataBuilder()
+    builder.AddEventsTo(
+      trace_data.INSPECTOR_TRACE_PART, [{
+      'startTime': 5295.004, 'endTime': 5305.004,
+      'data': {}, 'type': 'Program',
+      'children': [
+        {'startTime': 5295.004, 'data': {'id': 0}, 'type': 'BeginFrame', },
+        {'startTime': 4492.973, 'endTime': 4493.086, 'data': {'rootNode': -3},
+         'type': 'PaintSetup'},
+        {'startTime': 5298.004, 'endTime': 5301.004, 'type': 'Paint',
+         'frameId': '53228.1',
+         'data': {'rootNode': -3, 'clip': [0, 0, 1018, 0, 1018, 764, 0, 764],
+                  'layerId': 10}, 'children': []},
+        {'startTime': 5301.004, 'endTime': 5305.004, 'data': {},
+         'type': 'CompositeLayers', 'children': []},
+        {'startTime': 5305.004, 'data': {}, 'type': 'MarkFirstPaint'}
+    ]}])
+    model.TimelineModel(builder.AsData(), shift_world_to_zero=False)
+
+class InspectorImporterTest(unittest.TestCase):
+  def testImport(self):
+    builder = trace_data.TraceDataBuilder()
+    builder.AddEventsTo(trace_data.INSPECTOR_TRACE_PART,
+                        [_BACKGROUND_MESSAGE, _SAMPLE_MESSAGE])
+    m = model.TimelineModel(builder.AsData(), shift_world_to_zero=False)
+    self.assertEquals(1, len(m.processes))
+    process = m.processes.values()[0]
+    threads = process.threads
+    self.assertEquals(2, len(threads))
+    renderer_thread = threads[0]
+    self.assertEquals(1, len(renderer_thread.toplevel_slices))
+    self.assertEquals('Program',
+                      renderer_thread.toplevel_slices[0].name)
+    second_thread = threads['2']
+    self.assertEquals(1, len(second_thread.toplevel_slices))
+    self.assertEquals('BeginFrame',
+                      second_thread.toplevel_slices[0].name)
diff --git a/catapult/telemetry/telemetry/timeline/memory_dump_event.py b/catapult/telemetry/telemetry/timeline/memory_dump_event.py
new file mode 100644
index 0000000..e8c0af2
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/memory_dump_event.py
@@ -0,0 +1,343 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import posixpath
+import re
+
+from telemetry.timeline import event as timeline_event
+
+
+class MmapCategory(object):
+  _DEFAULT_CATEGORY = None
+
+  def __init__(self, name, file_pattern, children=None):
+    """A (sub)category for classifying memory maps.
+
+    Args:
+      name: A string to identify the category.
+      file_pattern: A regex pattern, the category will aggregate memory usage
+          for all mapped files matching this pattern.
+      children: A list of MmapCategory objects, used to sub-categorize memory
+          usage.
+    """
+    self.name = name
+    self._file_pattern = re.compile(file_pattern) if file_pattern else None
+    self._children = list(children) if children else None
+
+  @classmethod
+  def DefaultCategory(cls):
+    """An implicit 'Others' match-all category with no children."""
+    if cls._DEFAULT_CATEGORY is None:
+      cls._DEFAULT_CATEGORY = cls('Others', None)
+    return cls._DEFAULT_CATEGORY
+
+  def Match(self, mapped_file):
+    """Test whether a mapped file matches this category."""
+    return (self._file_pattern is None
+            or bool(self._file_pattern.search(mapped_file)))
+
+  def GetMatchingChild(self, mapped_file):
+    """Get the first matching sub-category for a given mapped file.
+
+    Returns None if the category has no children, or the DefaultCategory if
+    it does have children but none of them match.
+    """
+    if not self._children:
+      return None
+    for child in self._children:
+      if child.Match(mapped_file):
+        return child
+    return type(self).DefaultCategory()
+
+
+ROOT_CATEGORY = MmapCategory('/', None, [
+  MmapCategory('Android', r'^\/dev\/ashmem(?!\/libc malloc)', [
+    MmapCategory('Java runtime', r'^\/dev\/ashmem\/dalvik-', [
+      MmapCategory('Spaces', r'\/dalvik-(alloc|main|large'
+                             r' object|non moving|zygote) space', [
+        MmapCategory('Normal', r'\/dalvik-(alloc|main)'),
+        MmapCategory('Large', r'\/dalvik-large object'),
+        MmapCategory('Zygote', r'\/dalvik-zygote'),
+        MmapCategory('Non-moving', r'\/dalvik-non moving')
+      ]),
+      MmapCategory('Linear Alloc', r'\/dalvik-LinearAlloc'),
+      MmapCategory('Indirect Reference Table', r'\/dalvik-indirect.ref'),
+      MmapCategory('Cache', r'\/dalvik-jit-code-cache'),
+      MmapCategory('Accounting', None)
+    ]),
+    MmapCategory('Cursor', r'\/CursorWindow'),
+    MmapCategory('Ashmem', None)
+  ]),
+  MmapCategory('Native heap',
+               r'^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|$)'),
+  MmapCategory('Stack', r'^\[stack'),
+  MmapCategory('Files',
+               r'\.((((so)|(jar)|(apk)|(ttf)|(odex)|(oat)|(art))$)|(dex))', [
+    MmapCategory('so', r'\.so$'),
+    MmapCategory('jar', r'\.jar$'),
+    MmapCategory('apk', r'\.apk$'),
+    MmapCategory('ttf', r'\.ttf$'),
+    MmapCategory('dex', r'\.((dex)|(odex$))'),
+    MmapCategory('oat', r'\.oat$'),
+    MmapCategory('art', r'\.art$'),
+  ]),
+  MmapCategory('Devices', r'(^\/dev\/)|(anon_inode:dmabuf)', [
+    MmapCategory('GPU', r'\/((nv)|(mali)|(kgsl))'),
+    MmapCategory('DMA', r'anon_inode:dmabuf'),
+  ]),
+  MmapCategory('Discounted tracing overhead',
+               r'\[discounted tracing overhead\]')
+])
+
+
+# Map long descriptive attribute names, as understood by MemoryBucket.GetValue,
+# to the short keys used by events in raw json traces.
+BUCKET_ATTRS = {
+  'proportional_resident': 'pss',
+  'private_dirty_resident': 'pd',
+  'private_clean_resident': 'pc',
+  'shared_dirty_resident': 'sd',
+  'shared_clean_resident': 'sc',
+  'swapped': 'sw'}
+
+
+# Map of {memory_key: (category_path, discount_tracing), ...}.
+# When discount_tracing is True, we have to discount the resident_size of the
+# tracing allocator to get the correct value for that key.
+MMAPS_METRICS = {
+  'mmaps_overall_pss': ('/.proportional_resident', True),
+  'mmaps_private_dirty' : ('/.private_dirty_resident', True),
+  'mmaps_java_heap': ('/Android/Java runtime/Spaces.proportional_resident',
+                      False),
+  'mmaps_ashmem': ('/Android/Ashmem.proportional_resident', False),
+  'mmaps_native_heap': ('/Native heap.proportional_resident', True)}
+
+
+class MemoryBucket(object):
+  """Simple object to hold and aggregate memory values."""
+  def __init__(self):
+    self._bucket = dict.fromkeys(BUCKET_ATTRS.iterkeys(), 0)
+
+  def __repr__(self):
+    values = ', '.join('%s=%d' % (src_key, self._bucket[dst_key])
+                       for dst_key, src_key
+                       in sorted(BUCKET_ATTRS.iteritems()))
+    return '%s[%s]' % (type(self).__name__, values)
+
+  def AddRegion(self, byte_stats):
+    for dst_key, src_key in BUCKET_ATTRS.iteritems():
+      self._bucket[dst_key] += int(byte_stats.get(src_key, '0'), 16)
+
+  def GetValue(self, name):
+    return self._bucket[name]
+
+
+class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
+  """A memory dump event belonging to a single timeline.Process object.
+
+  It's a subclass of telemetry's TimelineEvent so it can be included in
+  the stream of events contained in timeline.model objects, and have its
+  timing correlated with that of other events in the model.
+
+  Args:
+    process: The Process object associated with the memory dump.
+    dump_events: A list of dump events of the process with the same dump id.
+
+  Properties:
+    dump_id: A string to identify events belonging to the same global dump.
+    process: The timeline.Process object that owns this memory dump event.
+    has_mmaps: True if the memory dump has mmaps information. If False then
+        GetMemoryUsage will report all zeros.
+  """
+  def __init__(self, process, dump_events):
+    assert dump_events
+
+    start_time = min(event['ts'] for event in dump_events) / 1000.0
+    duration = max(event['ts'] for event in dump_events) / 1000.0 - start_time
+    super(ProcessMemoryDumpEvent, self).__init__('memory', 'memory_dump',
+                                                 start_time, duration)
+
+    self.process = process
+    self.dump_id = dump_events[0]['id']
+
+    allocator_dumps = {}
+    vm_regions = []
+    for event in dump_events:
+      assert (event['ph'] == 'v' and self.process.pid == event['pid'] and
+              self.dump_id == event['id'])
+      try:
+        allocator_dumps.update(event['args']['dumps']['allocators'])
+      except KeyError:
+        pass  # It's ok if any of those keys are not present.
+      try:
+        value = event['args']['dumps']['process_mmaps']['vm_regions']
+        assert not vm_regions
+        vm_regions = value
+      except KeyError:
+        pass  # It's ok if any of those keys are not present.
+
+    self._allocators = {}
+    parent_path = ''
+    parent_has_size = False
+    for allocator_name, size_values in sorted(allocator_dumps.iteritems()):
+      if ((allocator_name.startswith(parent_path) and parent_has_size) or
+          allocator_name.startswith('global/')):
+        continue
+      parent_path = allocator_name + '/'
+      parent_has_size = 'size' in size_values['attrs']
+      name_parts = allocator_name.split('/')
+      allocator_name = name_parts[0]
+      # For 'gpu/android_memtrack/*' we want to keep track of individual
+      # components. E.g. 'gpu/android_memtrack/gl' will be stored as
+      # 'android_memtrack_gl' in the allocators dict.
+      if (len(name_parts) == 3 and allocator_name == 'gpu' and
+          name_parts[1] == 'android_memtrack'):
+        allocator_name = '_'.join(name_parts[1:3])
+      allocator = self._allocators.setdefault(allocator_name, {})
+      for size_key, size_value in size_values['attrs'].iteritems():
+        if size_value['units'] == 'bytes':
+          allocator[size_key] = (allocator.get(size_key, 0)
+                                 + int(size_value['value'], 16))
+    # we need to discount tracing from malloc size.
+    try:
+      self._allocators['malloc']['size'] -= self._allocators['tracing']['size']
+    except KeyError:
+      pass  # It's ok if any of those keys are not present.
+
+    self.has_mmaps = bool(vm_regions)
+    self._buckets = {}
+    for vm_region in vm_regions:
+      self._AddRegion(vm_region)
+
+  @property
+  def process_name(self):
+    return self.process.name
+
+  def _AddRegion(self, vm_region):
+    path = ''
+    category = ROOT_CATEGORY
+    while category:
+      path = posixpath.join(path, category.name)
+      self.GetMemoryBucket(path).AddRegion(vm_region['bs'])
+      mapped_file = vm_region['mf']
+      category = category.GetMatchingChild(mapped_file)
+
+  def __repr__(self):
+    values = ['pid=%d' % self.process.pid]
+    for key, value in sorted(self.GetMemoryUsage().iteritems()):
+      values.append('%s=%d' % (key, value))
+    values = ', '.join(values)
+    return '%s[%s]' % (type(self).__name__, values)
+
+  def GetMemoryBucket(self, path):
+    """Return the MemoryBucket associated with a category path.
+
+    An empty bucket will be created if the path does not already exist.
+
+    path: A string with path in the classification tree, e.g.
+        '/Android/Java runtime/Cache'. Note: no trailing slash, except for
+        the root path '/'.
+    """
+    if not path in self._buckets:
+      self._buckets[path] = MemoryBucket()
+    return self._buckets[path]
+
+  def GetMemoryValue(self, category_path, discount_tracing=False):
+    """Return a specific value from within a MemoryBucket.
+
+    category_path: A string composed of a path in the classification tree,
+        followed by a '.', followed by a specific bucket value, e.g.
+        '/Android/Java runtime/Cache.private_dirty_resident'.
+    discount_tracing: A boolean indicating whether the returned value should
+        be discounted by the resident size of the tracing allocator.
+    """
+    path, name = category_path.rsplit('.', 1)
+    value = self.GetMemoryBucket(path).GetValue(name)
+    if discount_tracing and 'tracing' in self._allocators:
+      value -= self._allocators['tracing'].get('resident_size', 0)
+    return value
+
+  def GetMemoryUsage(self):
+    """Get a dictionary with the memory usage of this process."""
+    usage = {}
+    for name, values in self._allocators.iteritems():
+      # If you wish to track more attributes here, make sure they are correctly
+      # calculated by the ProcessMemoryDumpEvent method. All dumps whose parent
+      # has "size" attribute are ignored to avoid double counting. So, the
+      # other attributes are totals of only top level dumps.
+      if 'size' in values:
+        usage['allocator_%s' % name] = values['size']
+      if 'allocated_objects_size' in values:
+        usage['allocated_objects_%s' % name] = values['allocated_objects_size']
+      if 'memtrack_pss' in values:
+        usage[name] = values['memtrack_pss']
+    if self.has_mmaps:
+      usage.update((key, self.GetMemoryValue(*value))
+                   for key, value in MMAPS_METRICS.iteritems())
+    return usage
+
+
+class GlobalMemoryDump(object):
+  """Object to aggregate individual process dumps with the same dump id.
+
+  Args:
+    process_dumps: A sequence of ProcessMemoryDumpEvent objects, all sharing
+        the same global dump id.
+
+  Attributes:
+    dump_id: A string identifying this dump.
+    has_mmaps: True if the memory dump has mmaps information. If False then
+        GetMemoryUsage will report all zeros.
+  """
+  def __init__(self, process_dumps):
+    assert process_dumps
+    # Keep dumps sorted in chronological order.
+    self._process_dumps = sorted(process_dumps, key=lambda dump: dump.start)
+
+    # All process dump events should have the same dump id.
+    dump_ids = set(dump.dump_id for dump in self._process_dumps)
+    assert len(dump_ids) == 1
+    self.dump_id = dump_ids.pop()
+
+    # Either all processes have mmaps or none of them do.
+    have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
+    assert len(have_mmaps) == 1
+    self.has_mmaps = have_mmaps.pop()
+
+  @property
+  def start(self):
+    return self._process_dumps[0].start
+
+  @property
+  def end(self):
+    return max(dump.end for dump in self._process_dumps)
+
+  @property
+  def duration(self):
+    return self.end - self.start
+
+  @property
+  def pids(self):
+    return set(d.process.pid for d in self._process_dumps)
+
+  def IterProcessMemoryDumps(self):
+    return iter(self._process_dumps)
+
+  def CountProcessMemoryDumps(self):
+    return len(self._process_dumps)
+
+  def __repr__(self):
+    values = ['id=%s' % self.dump_id]
+    for key, value in sorted(self.GetMemoryUsage().iteritems()):
+      values.append('%s=%d' % (key, value))
+    values = ', '.join(values)
+    return '%s[%s]' % (type(self).__name__, values)
+
+  def GetMemoryUsage(self):
+    """Get the aggregated memory usage over all processes in this dump."""
+    result = {}
+    for dump in self._process_dumps:
+      for key, value in dump.GetMemoryUsage().iteritems():
+        result[key] = result.get(key, 0) + value
+    return result
diff --git a/catapult/telemetry/telemetry/timeline/memory_dump_event_unittest.py b/catapult/telemetry/telemetry/timeline/memory_dump_event_unittest.py
new file mode 100644
index 0000000..966f9ad
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/memory_dump_event_unittest.py
@@ -0,0 +1,278 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import memory_dump_event
+import mock
+
+
+def MakeRawMemoryDumpEvent(dump_id='123456ABCDEF', pid=1234, start=0,
+                           mmaps=None, allocators=None):
+
+  def vm_region(mapped_file, byte_stats):
+    return {
+      'mf': mapped_file,
+      'bs': {k: hex(v) for k, v in byte_stats.iteritems()}}
+
+  def attrs(sizes):
+    return {'attrs': {k: {'value': hex(v), 'units': 'bytes'}
+                      for k, v in sizes.iteritems()}}
+
+  if allocators is None:
+    allocators = {}
+
+  event = {'ph': 'v', 'id': dump_id, 'pid': pid, 'ts': start * 1000,
+           'args': {'dumps': {'allocators': {
+               name: attrs(sizes) for name, sizes in allocators.iteritems()}}}}
+  if mmaps:
+    event['args']['dumps']['process_mmaps'] = {
+      'vm_regions': [vm_region(mapped_file, byte_stats)
+                     for mapped_file, byte_stats in mmaps.iteritems()]}
+
+  return event
+
+
+def TestProcessDumpEvent(dump_id='123456ABCDEF', pid=1234, start=0, mmaps=None,
+                         allocators=None):
+  event = MakeRawMemoryDumpEvent(dump_id, pid, start, mmaps=mmaps,
+                                allocators=allocators)
+  process = mock.Mock()
+  process.pid = event['pid']
+  return memory_dump_event.ProcessMemoryDumpEvent(process, [event])
+
+
+class ProcessMemoryDumpEventUnitTest(unittest.TestCase):
+
+  def testProcessMemoryDump_allocators(self):
+    process = mock.Mock()
+    process.pid = 1234
+    events = [
+      MakeRawMemoryDumpEvent(
+        pid=process.pid, allocators={
+          'v8': {'size': 10, 'allocated_objects_size': 5},
+          'v8/allocated_objects': {'size': 4},
+          'skia': {'not_size': 10,
+          'allocated_objects_size': 5},
+          'skia/cache1': {'size': 24}
+        }
+      ),
+      MakeRawMemoryDumpEvent(
+        pid=process.pid, allocators={
+          'skia/cache2': {'not_size': 20},
+          'skia/cache2/obj1': {'size': 8},
+          'skia/cache2/obj2': {'size': 9},
+          'skia_different/obj': {'size': 30},
+          'skia_different/obj/not_counted': {'size': 26},
+          'global/0xdead': {'size': 26}
+        }
+      )
+    ]
+    memory_dump = memory_dump_event.ProcessMemoryDumpEvent(process, events)
+
+    EXPECTED_ALLOCATORS = {
+      'skia': {
+        'allocated_objects_size': 5,
+        'not_size': 30,
+        'size': 41
+      },
+      'v8': {
+        'allocated_objects_size': 5,
+        'size': 10
+      },
+      'skia_different': {'size': 30}
+    }
+
+    self.assertEquals(memory_dump._allocators, EXPECTED_ALLOCATORS)
+
+  def testProcessMemoryDump_mmaps(self):
+    ALL = [2 ** x for x in range(8)]
+    (JAVA_SPACES, JAVA_CACHE, ASHMEM, NATIVE_1, NATIVE_2, STACK, FILES_APK,
+     DEVICE_GPU) = ALL
+
+    memory_dump = TestProcessDumpEvent(mmaps={
+      '/dev/ashmem/dalvik-space-foo': {'pss': JAVA_SPACES},
+      '/dev/ashmem/dalvik-jit-code-cache': {'pss': JAVA_CACHE},
+      '/dev/ashmem/other-random-stuff': {'pss': ASHMEM},
+      '[heap] bar': {'pss': NATIVE_1},
+      '': {'pss': NATIVE_2},
+      '[stack thingy]': {'pss': STACK},
+      'my_little_app.apk': {'pss': FILES_APK},
+      '/dev/mali': {'pss': DEVICE_GPU}
+    })
+
+    EXPECTED = {
+      '/': sum(ALL),
+      '/Android/Java runtime': JAVA_SPACES + JAVA_CACHE,
+      '/Android/Ashmem': ASHMEM,
+      '/Android': JAVA_SPACES + JAVA_CACHE + ASHMEM,
+      '/Native heap': NATIVE_1 + NATIVE_2,
+      '/Stack': STACK,
+      '/Files/apk': FILES_APK,
+      '/Devices': DEVICE_GPU}
+
+    self.assertTrue(memory_dump.has_mmaps)
+    for path, value in EXPECTED.iteritems():
+      self.assertEquals(
+          value,
+          memory_dump.GetMemoryBucket(path).GetValue('proportional_resident'))
+
+  def testProcessMemoryDump_composability(self):
+    java_spaces = 100
+    process = mock.Mock()
+    process.pid = 1234
+    allocators = {'v8': {'size': 10}}
+    mmaps = {'/dev/ashmem/dalvik-space-foo': {'pss': java_spaces}}
+
+    events = [MakeRawMemoryDumpEvent(pid=process.pid, allocators=allocators),
+              MakeRawMemoryDumpEvent(pid=process.pid, mmaps=mmaps)]
+    memory_dump = memory_dump_event.ProcessMemoryDumpEvent(process, events)
+
+    self.assertEquals(memory_dump._allocators, allocators)
+
+    EXPECTED_MMAPS = {
+      '/': java_spaces,
+      '/Android/Java runtime': java_spaces,
+      '/Android': java_spaces,
+    }
+
+    self.assertTrue(memory_dump.has_mmaps)
+    for path, value in EXPECTED_MMAPS.iteritems():
+      self.assertEquals(value,
+          memory_dump.GetMemoryBucket(path).GetValue('proportional_resident'))
+
+
+class MemoryDumpEventUnitTest(unittest.TestCase):
+  def testRepr(self):
+    process_dump1 = TestProcessDumpEvent(
+        mmaps={'/dev/ashmem/other-ashmem': {'pss': 5}},
+        allocators={'v8': {'size': 10, 'allocated_objects_size' : 5}})
+    process_dump2 = TestProcessDumpEvent(
+        mmaps={'/dev/ashmem/libc malloc': {'pss': 42, 'pd': 27}},
+        allocators={'v8': {'size': 20, 'allocated_objects_size' : 10},
+                    'oilpan': {'size': 40}})
+    global_dump = memory_dump_event.GlobalMemoryDump(
+        [process_dump1, process_dump2])
+
+    self.assertEquals(
+        repr(process_dump1),
+        'ProcessMemoryDumpEvent[pid=1234, allocated_objects_v8=5,'
+        ' allocator_v8=10, mmaps_ashmem=5, mmaps_java_heap=0,'
+        ' mmaps_native_heap=0, mmaps_overall_pss=5, mmaps_private_dirty=0]')
+    self.assertEquals(
+        repr(process_dump2),
+        'ProcessMemoryDumpEvent[pid=1234, allocated_objects_v8=10,'
+        ' allocator_oilpan=40, allocator_v8=20, mmaps_ashmem=0,'
+        ' mmaps_java_heap=0, mmaps_native_heap=42, mmaps_overall_pss=42,'
+        ' mmaps_private_dirty=27]')
+    self.assertEquals(
+        repr(global_dump),
+        'GlobalMemoryDump[id=123456ABCDEF, allocated_objects_v8=15,'
+        ' allocator_oilpan=40, allocator_v8=30, mmaps_ashmem=5,'
+        ' mmaps_java_heap=0, mmaps_native_heap=42, mmaps_overall_pss=47,'
+        ' mmaps_private_dirty=27]')
+
+  def testDumpEventsTiming(self):
+    process = mock.Mock()
+    process.pid = 1
+    composable_dump = memory_dump_event.ProcessMemoryDumpEvent(
+        process,
+        [
+          MakeRawMemoryDumpEvent(pid=process.pid, start=8),
+          MakeRawMemoryDumpEvent(pid=process.pid, start=16),
+          MakeRawMemoryDumpEvent(pid=process.pid, start=10)
+        ])
+    self.assertAlmostEquals(8.0, composable_dump.start)
+    self.assertAlmostEquals(16.0, composable_dump.end)
+
+    memory_dump = memory_dump_event.GlobalMemoryDump([
+        composable_dump,
+        TestProcessDumpEvent(pid=3, start=8),
+        TestProcessDumpEvent(pid=2, start=13),
+        TestProcessDumpEvent(pid=4, start=7)])
+
+    self.assertFalse(memory_dump.has_mmaps)
+    self.assertEquals(4, len(list(memory_dump.IterProcessMemoryDumps())))
+    self.assertItemsEqual([1, 2, 3, 4], memory_dump.pids)
+    self.assertAlmostEquals(7.0, memory_dump.start)
+    self.assertAlmostEquals(16.0, memory_dump.end)
+    self.assertAlmostEquals(9.0, memory_dump.duration)
+
+  def testGetMemoryUsage(self):
+    ALL = [2 ** x for x in range(7)]
+    (JAVA_HEAP_1, JAVA_HEAP_2, ASHMEM_1, ASHMEM_2, NATIVE,
+     DIRTY_1, DIRTY_2) = ALL
+
+    memory_dump = memory_dump_event.GlobalMemoryDump([
+        TestProcessDumpEvent(pid=1, mmaps={
+            '/dev/ashmem/dalvik-alloc space': {'pss': JAVA_HEAP_1}}),
+        TestProcessDumpEvent(pid=2, mmaps={
+            '/dev/ashmem/other-ashmem': {'pss': ASHMEM_1, 'pd': DIRTY_1}}),
+        TestProcessDumpEvent(pid=3, mmaps={
+            '[heap] native': {'pss': NATIVE, 'pd': DIRTY_2},
+            '/dev/ashmem/dalvik-zygote space': {'pss': JAVA_HEAP_2}}),
+        TestProcessDumpEvent(pid=4, mmaps={
+            '/dev/ashmem/other-ashmem': {'pss': ASHMEM_2}})])
+
+    self.assertTrue(memory_dump.has_mmaps)
+    self.assertItemsEqual([1, 2, 3, 4], memory_dump.pids)
+    self.assertEquals({'mmaps_overall_pss': sum(ALL[:5]),
+                       'mmaps_private_dirty': DIRTY_1 + DIRTY_2,
+                       'mmaps_java_heap': JAVA_HEAP_1 + JAVA_HEAP_2,
+                       'mmaps_ashmem': ASHMEM_1 + ASHMEM_2,
+                       'mmaps_native_heap': NATIVE},
+                      memory_dump.GetMemoryUsage())
+
+  def testGetMemoryUsageWithAllocators(self):
+    process_dump1 = TestProcessDumpEvent(
+        mmaps={'/dev/ashmem/other-ashmem': {'pss': 5}},
+        allocators={'v8': {'size': 10, 'allocated_objects_size' : 5}})
+    process_dump2 = TestProcessDumpEvent(
+        mmaps={'/dev/ashmem/other-ashmem': {'pss': 5}},
+        allocators={'v8': {'size': 20, 'allocated_objects_size' : 10}})
+    memory_dump = memory_dump_event.GlobalMemoryDump(
+        [process_dump1, process_dump2])
+    self.assertEquals({'mmaps_overall_pss': 10,
+                       'mmaps_private_dirty': 0,
+                       'mmaps_java_heap': 0,
+                       'mmaps_ashmem': 10,
+                       'mmaps_native_heap': 0,
+                       'allocator_v8': 30,
+                       'allocated_objects_v8': 15},
+                      memory_dump.GetMemoryUsage())
+
+  def testGetMemoryUsageWithAndroidMemtrack(self):
+    GL1, EGL1, GL2, EGL2 = [2 ** x for x in range(4)]
+    process_dump1 = TestProcessDumpEvent(
+        allocators={'gpu/android_memtrack/gl': {'memtrack_pss' : GL1},
+                    'gpu/android_memtrack/graphics': {'memtrack_pss': EGL1}})
+    process_dump2 = TestProcessDumpEvent(
+        allocators={'gpu/android_memtrack/gl': {'memtrack_pss' : GL2},
+                    'gpu/android_memtrack/graphics': {'memtrack_pss': EGL2}})
+    memory_dump = memory_dump_event.GlobalMemoryDump(
+        [process_dump1, process_dump2])
+    self.assertEquals({'android_memtrack_gl': GL1 + GL2,
+                       'android_memtrack_graphics': EGL1 + EGL2},
+                      memory_dump.GetMemoryUsage())
+
+  def testGetMemoryUsageDiscountsTracing(self):
+    ALL = [2 ** x for x in range(5)]
+    (HEAP, DIRTY, MALLOC, TRACING_1, TRACING_2) = ALL
+
+    memory_dump = memory_dump_event.GlobalMemoryDump([
+        TestProcessDumpEvent(
+            mmaps={'/dev/ashmem/libc malloc': {'pss': HEAP + TRACING_2,
+                                               'pd': DIRTY + TRACING_2}},
+            allocators={
+                'tracing': {'size': TRACING_1, 'resident_size': TRACING_2},
+                'malloc': {'size': MALLOC + TRACING_1}})])
+
+    self.assertEquals({'mmaps_overall_pss': HEAP,
+                       'mmaps_private_dirty': DIRTY,
+                       'mmaps_java_heap': 0,
+                       'mmaps_ashmem': 0,
+                       'mmaps_native_heap': HEAP,
+                       'allocator_tracing': TRACING_1,
+                       'allocator_malloc': MALLOC},
+                      memory_dump.GetMemoryUsage())
diff --git a/catapult/telemetry/telemetry/timeline/model.py b/catapult/telemetry/telemetry/timeline/model.py
new file mode 100644
index 0000000..c1e71a8
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/model.py
@@ -0,0 +1,280 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A container for timeline-based events and traces and can handle importing
+raw event data from different sources. This model closely resembles that in the
+trace_viewer project:
+https://code.google.com/p/trace-viewer/
+"""
+
+import logging
+from operator import attrgetter
+
+from telemetry.timeline import async_slice as async_slice_module
+from telemetry.timeline import bounds
+from telemetry.timeline import event_container
+from telemetry.timeline import inspector_importer
+from telemetry.timeline import process as process_module
+from telemetry.timeline import slice as slice_module
+from telemetry.timeline import surface_flinger_importer
+from telemetry.timeline import tab_id_importer
+from telemetry.timeline import trace_data as trace_data_module
+from telemetry.timeline import trace_event_importer
+
+# Register importers for data
+
+_IMPORTERS = [
+    inspector_importer.InspectorTimelineImporter,
+    tab_id_importer.TabIdImporter,
+    trace_event_importer.TraceEventTimelineImporter,
+    surface_flinger_importer.SurfaceFlingerTimelineImporter
+]
+
+
+class MarkerMismatchError(Exception):
+  def __init__(self):
+    super(MarkerMismatchError, self).__init__(
+        'Number or order of timeline markers does not match provided labels')
+
+
+class MarkerOverlapError(Exception):
+  def __init__(self):
+    super(MarkerOverlapError, self).__init__(
+        'Overlapping timeline markers found')
+
+
+def IsSliceOrAsyncSlice(t):
+  if t == async_slice_module.AsyncSlice:
+    return True
+  return t == slice_module.Slice
+
+
+class TimelineModel(event_container.TimelineEventContainer):
+  def __init__(self, trace_data=None, shift_world_to_zero=True):
+    """ Initializes a TimelineModel.
+
+    Args:
+        trace_data: trace_data.TraceData containing events to import
+        shift_world_to_zero: If true, the events will be shifted such that the
+            first event starts at time 0.
+    """
+    super(TimelineModel, self).__init__(name='TimelineModel', parent=None)
+    self._bounds = bounds.Bounds()
+    self._thread_time_bounds = {}
+    self._processes = {}
+    self._browser_process = None
+    self._gpu_process = None
+    self._surface_flinger_process = None
+    self._frozen = False
+    self._tab_ids_to_renderer_threads_map = {}
+    self.import_errors = []
+    self.metadata = []
+    self.flow_events = []
+    self._global_memory_dumps = None
+    if trace_data is not None:
+      self.ImportTraces(trace_data, shift_world_to_zero=shift_world_to_zero)
+
+  def SetGlobalMemoryDumps(self, global_memory_dumps):
+    """Populates the model with a sequence of GlobalMemoryDump objects."""
+    assert not self._frozen and self._global_memory_dumps is None
+    # Keep dumps sorted in chronological order.
+    self._global_memory_dumps = tuple(sorted(global_memory_dumps,
+                                             key=lambda dump: dump.start))
+
+  def IterGlobalMemoryDumps(self):
+    """Iterate over the memory dump events of this model."""
+    return iter(self._global_memory_dumps or [])
+
+  def IterChildContainers(self):
+    for process in self._processes.itervalues():
+      yield process
+
+  def GetAllProcesses(self):
+    return self._processes.values()
+
+  def GetAllThreads(self):
+    threads = []
+    for process in self._processes.values():
+      threads.extend(process.threads.values())
+    return threads
+
+  @property
+  def bounds(self):
+    return self._bounds
+
+  @property
+  def processes(self):
+    return self._processes
+
+  @property
+  def browser_process(self):
+    return self._browser_process
+
+  @browser_process.setter
+  def browser_process(self, browser_process):
+    self._browser_process = browser_process
+
+  @property
+  def gpu_process(self):
+    return self._gpu_process
+
+  @gpu_process.setter
+  def gpu_process(self, gpu_process):
+    self._gpu_process = gpu_process
+
+  @property
+  def surface_flinger_process(self):
+    return self._surface_flinger_process
+
+  @surface_flinger_process.setter
+  def surface_flinger_process(self, surface_flinger_process):
+    self._surface_flinger_process = surface_flinger_process
+
+  def AddMappingFromTabIdToRendererThread(self, tab_id, renderer_thread):
+    if self._frozen:
+      raise Exception('Cannot add mapping from tab id to renderer thread once '
+                      'trace is imported')
+    self._tab_ids_to_renderer_threads_map[tab_id] = renderer_thread
+
+  def ImportTraces(self, trace_data, shift_world_to_zero=True):
+    """Populates the model with the provided trace data.
+
+    trace_data must be an instance of TraceData.
+
+    Passing shift_world_to_zero=True causes the events to be shifted such that
+    the first event starts at time 0.
+    """
+    if self._frozen:
+      raise Exception("Cannot add events once trace is imported")
+    assert isinstance(trace_data, trace_data_module.TraceData)
+
+    importers = self._CreateImporters(trace_data)
+
+    for importer in importers:
+      # TODO: catch exceptions here and add it to error list
+      importer.ImportEvents()
+    for record in trace_data.metadata_records:
+      self.metadata.append(record)
+    self.FinalizeImport(shift_world_to_zero, importers)
+
+  def FinalizeImport(self, shift_world_to_zero=False, importers=None):
+    if importers == None:
+      importers = []
+    self.UpdateBounds()
+    if not self.bounds.is_empty:
+      for process in self._processes.itervalues():
+        process.AutoCloseOpenSlices(self.bounds.max,
+                                    self._thread_time_bounds)
+
+    for importer in importers:
+      importer.FinalizeImport()
+
+    for process in self.processes.itervalues():
+      process.FinalizeImport()
+
+    if shift_world_to_zero:
+      self.ShiftWorldToZero()
+    self.UpdateBounds()
+
+    # Because of FinalizeImport, it would probably be a good idea
+    # to prevent the timeline from from being modified.
+    self._frozen = True
+
+  def ShiftWorldToZero(self):
+    self.UpdateBounds()
+    if self._bounds.is_empty:
+      return
+    shift_amount = self._bounds.min
+    for event in self.IterAllEvents():
+      event.start -= shift_amount
+
+  def UpdateBounds(self):
+    self._bounds.Reset()
+    for event in self.IterAllEvents():
+      self._bounds.AddValue(event.start)
+      self._bounds.AddValue(event.end)
+
+    self._thread_time_bounds = {}
+    for thread in self.GetAllThreads():
+      self._thread_time_bounds[thread] = bounds.Bounds()
+      for event in thread.IterEventsInThisContainer(
+          event_type_predicate=lambda t: True,
+          event_predicate=lambda e: True):
+        if event.thread_start != None:
+          self._thread_time_bounds[thread].AddValue(event.thread_start)
+        if event.thread_end != None:
+          self._thread_time_bounds[thread].AddValue(event.thread_end)
+
+  def GetOrCreateProcess(self, pid):
+    if pid not in self._processes:
+      assert not self._frozen
+      self._processes[pid] = process_module.Process(self, pid)
+    return self._processes[pid]
+
+  def FindTimelineMarkers(self, timeline_marker_names):
+    """Find the timeline events with the given names.
+
+    If the number and order of events found does not match the names,
+    raise an error.
+    """
+    # Make sure names are in a list and remove all None names
+    if not isinstance(timeline_marker_names, list):
+      timeline_marker_names = [timeline_marker_names]
+    names = [x for x in timeline_marker_names if x is not None]
+
+    # Gather all events that match the names and sort them.
+    events = []
+    name_set = set()
+    for name in names:
+      name_set.add(name)
+
+    def IsEventNeeded(event):
+      if event.parent_slice != None:
+        return
+      return event.name in name_set
+
+    events = list(self.IterAllEvents(
+      recursive=True,
+      event_type_predicate=IsSliceOrAsyncSlice,
+      event_predicate=IsEventNeeded))
+    events.sort(key=attrgetter('start'))
+
+    # Check if the number and order of events matches the provided names,
+    # and that the events don't overlap.
+    if len(events) != len(names):
+      raise MarkerMismatchError()
+    for (i, event) in enumerate(events):
+      if event.name != names[i]:
+        raise MarkerMismatchError()
+    for i in xrange(0, len(events)):
+      for j in xrange(i+1, len(events)):
+        if events[j].start < events[i].start + events[i].duration:
+          raise MarkerOverlapError()
+
+    return events
+
+  def GetRendererProcessFromTabId(self, tab_id):
+    renderer_thread = self.GetRendererThreadFromTabId(tab_id)
+    if renderer_thread:
+      return renderer_thread.parent
+    return None
+
+  def GetRendererThreadFromTabId(self, tab_id):
+    return self._tab_ids_to_renderer_threads_map.get(tab_id, None)
+
+  def _CreateImporters(self, trace_data):
+    def FindImporterClassForPart(part):
+      for importer_class in _IMPORTERS:
+        if importer_class.GetSupportedPart() == part:
+          return importer_class
+
+    importers = []
+    for part in trace_data.active_parts:
+      importer_class = FindImporterClassForPart(part)
+      if not importer_class:
+        logging.warning('No importer found for %s' % repr(part))
+      else:
+        importers.append(importer_class(self, trace_data))
+        importers.sort(key=lambda k: k.import_order)
+
+    return importers
diff --git a/catapult/telemetry/telemetry/timeline/model_unittest.py b/catapult/telemetry/telemetry/timeline/model_unittest.py
new file mode 100644
index 0000000..34ac85b
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/model_unittest.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import model as model_module
+from telemetry.timeline import trace_data
+
+
+class TimelineModelUnittest(unittest.TestCase):
+  def testEmptyImport(self):
+    model_module.TimelineModel(trace_data.TraceData())
+
+  def testBrowserProcess(self):
+    builder = trace_data.TraceDataBuilder()
+    builder.AddEventsTo(trace_data.CHROME_TRACE_PART, [
+      {"name": "process_name", "args": {"name": "Browser"},
+       "pid": 5, "ph": "M"},
+      {"name": "thread_name", "args": {"name": "CrBrowserMain"},
+       "pid": 5, "tid": 32578, "ph": "M"}
+    ])
+    model = model_module.TimelineModel(builder.AsData())
+    self.assertEquals(5, model.browser_process.pid)
diff --git a/catapult/telemetry/telemetry/timeline/process.py b/catapult/telemetry/telemetry/timeline/process.py
new file mode 100644
index 0000000..8accaa5
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/process.py
@@ -0,0 +1,100 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.counter as tracing_counter
+import telemetry.timeline.event as event_module
+import telemetry.timeline.event_container as event_container
+import telemetry.timeline.thread as tracing_thread
+from telemetry.timeline import memory_dump_event
+
+
+class Process(event_container.TimelineEventContainer):
+  """The Process represents a single userland process in the trace.
+  """
+  def __init__(self, parent, pid):
+    super(Process, self).__init__('process %s' % pid, parent)
+    self.pid = pid
+    self.labels = None
+    self._threads = {}
+    self._counters = {}
+    self._trace_buffer_overflow_event = None
+    self._memory_dump_events = {}
+
+  @property
+  def trace_buffer_did_overflow(self):
+    return self._trace_buffer_overflow_event is not None
+
+  @property
+  def trace_buffer_overflow_event(self):
+    return self._trace_buffer_overflow_event
+
+  @property
+  def threads(self):
+    return self._threads
+
+  @property
+  def counters(self):
+    return self._counters
+
+  def IterChildContainers(self):
+    for thread in self._threads.itervalues():
+      yield thread
+    for counter in self._counters.itervalues():
+      yield counter
+
+  def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
+    if (self.trace_buffer_did_overflow and
+        event_type_predicate(event_module.TimelineEvent) and
+        event_predicate(self._trace_buffer_overflow_event)):
+      yield self._trace_buffer_overflow_event
+    if (self._memory_dump_events and
+        event_type_predicate(memory_dump_event.ProcessMemoryDumpEvent)):
+      for memory_dump in self._memory_dump_events.itervalues():
+        if event_predicate(memory_dump):
+          yield memory_dump
+
+  def GetOrCreateThread(self, tid):
+    thread = self.threads.get(tid, None)
+    if thread:
+      return thread
+    thread = tracing_thread.Thread(self, tid)
+    self._threads[tid] = thread
+    return thread
+
+  def GetCounter(self, category, name):
+    counter_id = category + '.' + name
+    if counter_id in self.counters:
+      return self.counters[counter_id]
+    raise ValueError(
+        'Counter %s not found in process with id %s.' % (counter_id,
+                                                         self.pid))
+  def GetOrCreateCounter(self, category, name):
+    try:
+      return self.GetCounter(category, name)
+    except ValueError:
+      ctr = tracing_counter.Counter(self, category, name)
+      self._counters[ctr.full_name] = ctr
+      return ctr
+
+  def AutoCloseOpenSlices(self, max_timestamp, thread_time_bounds):
+    for thread in self._threads.itervalues():
+      thread.AutoCloseOpenSlices(max_timestamp, thread_time_bounds[thread].max)
+
+  def SetTraceBufferOverflowTimestamp(self, timestamp):
+    # TODO: use instant event for trace_buffer_overflow_event
+    self._trace_buffer_overflow_event = event_module.TimelineEvent(
+        "TraceBufferInfo", "trace_buffer_overflowed", timestamp, 0)
+
+  def AddMemoryDumpEvent(self, memory_dump):
+    """Add a ProcessMemoryDumpEvent to this process."""
+    if memory_dump.dump_id in self._memory_dump_events:
+      raise ValueError('Duplicate memory dump id %s in process with id %s.' % (
+          memory_dump.dump_id, self.pid))
+    self._memory_dump_events[memory_dump.dump_id] = memory_dump
+
+  def FinalizeImport(self):
+    for thread in self._threads.itervalues():
+      thread.FinalizeImport()
+    for counter in self._counters.itervalues():
+      counter.FinalizeImport()
diff --git a/catapult/telemetry/telemetry/timeline/sample.py b/catapult/telemetry/telemetry/timeline/sample.py
new file mode 100644
index 0000000..806f60f
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/sample.py
@@ -0,0 +1,20 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.event as timeline_event
+
+
+class Sample(timeline_event.TimelineEvent):
+  """A Sample represents a sample taken at an instant in time
+  plus parameters associated with that sample.
+
+  NOTE: The Sample class implements the same interface as
+  Slice. These must be kept in sync.
+
+  All time units are stored in milliseconds.
+  """
+  def __init__(self, parent_thread, category, name, timestamp, args=None):
+    super(Sample, self).__init__(
+        category, name, timestamp, 0, args=args)
+    self.parent_thread = parent_thread
diff --git a/catapult/telemetry/telemetry/timeline/slice.py b/catapult/telemetry/telemetry/timeline/slice.py
new file mode 100644
index 0000000..3a39a80
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/slice.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import telemetry.timeline.event as timeline_event
+
+
+class Slice(timeline_event.TimelineEvent):
+  """A Slice represents an interval of time plus parameters associated
+  with that interval.
+
+  NOTE: The Sample class implements the same interface as
+  Slice. These must be kept in sync.
+
+  All time units are stored in milliseconds.
+  """
+  def __init__(self, parent_thread, category, name, timestamp, duration=0,
+               thread_timestamp=None, thread_duration=None, args=None):
+    super(Slice, self).__init__(
+        category, name, timestamp, duration, thread_timestamp, thread_duration,
+        args)
+    self.parent_thread = parent_thread
+    self.parent_slice = None
+    self.sub_slices = []
+    self.did_not_finish = False
+
+  def AddSubSlice(self, sub_slice):
+    assert sub_slice.parent_slice == self
+    self.sub_slices.append(sub_slice)
+
+  def IterEventsInThisContainerRecrusively(self, stack=None):
+    # This looks awkward, but it lets us create only a single iterator instead
+    # of having to create one iterator for every subslice found.
+    if stack == None:
+      stack = []
+    else:
+      assert len(stack) == 0
+    stack.extend(reversed(self.sub_slices))
+    while len(stack):
+      s = stack.pop()
+      yield s
+      stack.extend(reversed(s.sub_slices))
+
+  @property
+  def self_time(self):
+    """Time spent in this function less any time spent in child events."""
+    child_total = sum(
+      [e.duration for e in self.sub_slices])
+    return self.duration - child_total
+
+  @property
+  def self_thread_time(self):
+    """Thread (scheduled) time spent in this function less any thread time spent
+    in child events. Returns None if the slice or any of its children does not
+    have a thread_duration value.
+    """
+    if not self.thread_duration:
+      return None
+
+    child_total = 0
+    for e in self.sub_slices:
+      if e.thread_duration == None:
+        return None
+      child_total += e.thread_duration
+
+    return self.thread_duration - child_total
+
+  def _GetSubSlicesRecursive(self):
+    for sub_slice in self.sub_slices:
+      for s in sub_slice.GetAllSubSlices():
+        yield s
+      yield sub_slice
+
+  def GetAllSubSlices(self):
+    return list(self._GetSubSlicesRecursive())
+
+  def GetAllSubSlicesOfName(self, name):
+    return [e for e in self.GetAllSubSlices() if e.name == name]
diff --git a/catapult/telemetry/telemetry/timeline/slice_unittest.py b/catapult/telemetry/telemetry/timeline/slice_unittest.py
new file mode 100644
index 0000000..67106c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/slice_unittest.py
@@ -0,0 +1,35 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline.slice import Slice
+
+
+class SliceTest(unittest.TestCase):
+  def testChildrenLogic(self):
+    # [      top          ]
+    #   [ a  ]    [  b  ]
+    #    [x]
+    top = Slice(None, 'cat', 'top', 0, duration=10, thread_timestamp=0,
+                thread_duration=5)
+    a = Slice(None, 'cat', 'a', 1, duration=2, thread_timestamp=0.5,
+              thread_duration=1)
+    x = Slice(None, 'cat', 'x', 1.5, duration=0.25, thread_timestamp=0.75,
+              thread_duration=0.125)
+    b = Slice(None, 'cat', 'b', 5, duration=2, thread_timestamp=None,
+              thread_duration=None)
+    top.sub_slices.extend([a, b])
+    a.sub_slices.append(x)
+
+    all_children = list(top.IterEventsInThisContainerRecrusively())
+    self.assertEquals([a, x, b], all_children)
+
+    self.assertEquals(x.self_time, 0.25)
+    self.assertEquals(a.self_time, 1.75) # 2 - 0.25
+    self.assertEquals(top.self_time, 6) # 10 - 2 - 2
+
+    self.assertEquals(x.self_thread_time, 0.125)
+    self.assertEquals(a.self_thread_time, 0.875) # 1 - 0.125
+    self.assertEquals(top.self_thread_time, None) # b has no thread time
diff --git a/catapult/telemetry/telemetry/timeline/surface_flinger_importer.py b/catapult/telemetry/telemetry/timeline/surface_flinger_importer.py
new file mode 100644
index 0000000..bc4e72d
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/surface_flinger_importer.py
@@ -0,0 +1,36 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.timeline import importer
+from telemetry.timeline import trace_data as trace_data_module
+
+class SurfaceFlingerTimelineImporter(importer.TimelineImporter):
+  def __init__(self, model, trace_data):
+    super(SurfaceFlingerTimelineImporter, self).__init__(
+        model, trace_data, import_order=2)
+    self._events = trace_data.GetEventsFor(
+        trace_data_module.SURFACE_FLINGER_PART)
+    self._surface_flinger_process = None
+
+  @staticmethod
+  def GetSupportedPart():
+    return trace_data_module.SURFACE_FLINGER_PART
+
+  def ImportEvents(self):
+    for event in self._events:
+      self._surface_flinger_process = self._model.GetOrCreateProcess(
+          event['pid'])
+      self._surface_flinger_process.name = 'SurfaceFlinger'
+      thread = self._surface_flinger_process.GetOrCreateThread(event['tid'])
+      thread.BeginSlice(event['cat'],
+                        event['name'],
+                        event['ts'],
+                        args=event.get('args'))
+      thread.EndSlice(event['ts'])
+
+  def FinalizeImport(self):
+    """Called by the Model after all other importers have imported their
+    events."""
+    self._model.UpdateBounds()
+    self._model.surface_flinger_process = self._surface_flinger_process
diff --git a/catapult/telemetry/telemetry/timeline/tab_id_importer.py b/catapult/telemetry/telemetry/timeline/tab_id_importer.py
new file mode 100644
index 0000000..6edbe90
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tab_id_importer.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.timeline import importer
+from telemetry.timeline import trace_data as trace_data_module
+
+class TraceBufferOverflowException(Exception):
+  pass
+
+
+class TabIdImporter(importer.TimelineImporter):
+  def __init__(self, model, trace_data):
+    # Needs to run after all other importers so overflow events have been
+    # created on the model.
+    super(TabIdImporter, self).__init__(
+        model,
+        trace_data,
+        import_order=999)
+    self._trace_data = trace_data
+
+  @staticmethod
+  def GetSupportedPart():
+    return trace_data_module.TAB_ID_PART
+
+  def ImportEvents(self):
+    pass
+
+  def FinalizeImport(self):
+    self._CheckTraceBufferOverflow()
+    self._CreateTabIdsToThreadsMap()
+
+  def _CheckTraceBufferOverflow(self):
+    # Since _CreateTabIdsToThreadsMap() relies on markers output on timeline
+    # tracing data, it may not work in case we have trace events dropped due to
+    # trace buffer overflow.
+    for process in self._model.GetAllProcesses():
+      if process.trace_buffer_did_overflow:
+        raise TraceBufferOverflowException(
+            'Trace buffer of process with pid=%d overflowed at timestamp %d. '
+            'Raw trace data:\n%s' %
+            (process.pid, process.trace_buffer_overflow_event.start,
+             repr(self._trace_data)))
+
+  def _CreateTabIdsToThreadsMap(self):
+    tab_id_events = self._trace_data.GetEventsFor(
+        trace_data_module.TAB_ID_PART)
+
+    for tab_id in tab_id_events:
+      try:
+        timeline_markers = self._model.FindTimelineMarkers(tab_id)
+      # If timeline_markers with name equals |tab_id| can't be found, it's
+      # non-fatal.
+      except Exception:
+        logging.warning('Cannot find timeline marker for tab with id=%s' %
+                        tab_id)
+        continue
+      assert len(timeline_markers) == 1
+      assert timeline_markers[0].start_thread == timeline_markers[0].end_thread
+      self._model.AddMappingFromTabIdToRendererThread(
+          tab_id, timeline_markers[0].start_thread)
diff --git a/catapult/telemetry/telemetry/timeline/tab_id_importer_unittest.py b/catapult/telemetry/telemetry/timeline/tab_id_importer_unittest.py
new file mode 100644
index 0000000..bdb6da2
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tab_id_importer_unittest.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import model as timeline_model
+from telemetry.timeline import tab_id_importer
+from telemetry.timeline import trace_data as trace_data_module
+
+class TabIdImporterUnitTest(unittest.TestCase):
+  def testImportOverflowedTrace(self):
+    builder = trace_data_module.TraceDataBuilder()
+    builder.AddEventsTo(trace_data_module.CHROME_TRACE_PART, [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 9, 'cat': 'foo',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 10, 'cat': 'foo',
+       'tid': 2, 'ph': 'E'},
+      {'name': 'trace_buffer_overflowed',
+       'args': {'overflowed_at_ts': 12},
+        'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
+    ])
+    builder.AddEventsTo(
+        trace_data_module.TAB_ID_PART, ['tab-id-1', 'tab-id-2'])
+
+    with self.assertRaises(tab_id_importer.TraceBufferOverflowException) \
+        as context:
+      timeline_model.TimelineModel(builder.AsData())
+    self.assertTrue(
+        'Trace buffer of process with pid=2 overflowed at timestamp 12' in
+        context.exception.message)
+
+  def testTraceEventsWithTabIdsMarkers(self):
+    builder = trace_data_module.TraceDataBuilder()
+    builder.AddEventsTo(trace_data_module.CHROME_TRACE_PART, [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 20, 'tts': 10, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      # tab-id-1
+      {'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
+       'tid': 1,
+         'ph': 'S', 'id': 72},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 30, 'tts': 20, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 35, 'cat': 'foo',
+       'tid': 1,
+         'ph': 'F', 'id': 72},
+      # tab-id-2
+      {'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
+       'tid': 2,
+         'ph': 'S', 'id': 72},
+      {'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 26, 'cat': 'foo',
+       'tid': 2,
+         'ph': 'F', 'id': 72},
+     ])
+    builder.AddEventsTo(
+        trace_data_module.TAB_ID_PART, ['tab-id-1', 'tab-id-2'])
+
+    m = timeline_model.TimelineModel(builder.AsData())
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-1'))
+    self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-2'))
+
+    p = processes[0]
+    self.assertEqual(2, len(p.threads))
+    self.assertIs(p.threads[1], m.GetRendererThreadFromTabId('tab-id-1'))
+    self.assertIs(p.threads[2], m.GetRendererThreadFromTabId('tab-id-2'))
diff --git a/catapult/telemetry/telemetry/timeline/thread.py b/catapult/telemetry/telemetry/timeline/thread.py
new file mode 100644
index 0000000..5810ae3
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/thread.py
@@ -0,0 +1,268 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import telemetry.timeline.async_slice as async_slice_module
+import telemetry.timeline.event_container as event_container
+import telemetry.timeline.flow_event as flow_event_module
+import telemetry.timeline.sample as sample_module
+import telemetry.timeline.slice as slice_module
+
+
+class Thread(event_container.TimelineEventContainer):
+  """A Thread stores all the trace events collected for a particular
+  thread. We organize the synchronous slices on a thread by "subrows," where
+  subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
+  The asynchronous slices are stored in an AsyncSliceGroup object.
+  """
+  def __init__(self, process, tid):
+    super(Thread, self).__init__('thread %s' % tid, parent=process)
+    self.tid = tid
+    self._async_slices = []
+    self._flow_events = []
+    self._samples = []
+    self._toplevel_slices = []
+    self._all_slices = []
+
+    # State only valid during import.
+    self._open_slices = []
+    self._newly_added_slices = []
+
+  @property
+  def toplevel_slices(self):
+    return self._toplevel_slices
+
+  @property
+  def all_slices(self):
+    return self._all_slices
+
+  @property
+  def samples(self):
+    return self._samples
+
+  @property
+  def async_slices(self):
+    return self._async_slices
+
+  @property
+  def open_slice_count(self):
+    return len(self._open_slices)
+
+  def IterChildContainers(self):
+    return
+    yield # pylint: disable=unreachable
+
+  def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
+    if event_type_predicate(slice_module.Slice):
+      for s in self._newly_added_slices:
+        if event_predicate(s):
+          yield s
+      for s in self._all_slices:
+        if event_predicate(s):
+          yield s
+
+    if event_type_predicate(async_slice_module.AsyncSlice):
+      for async_slice in self._async_slices:
+        if event_predicate(async_slice):
+          yield async_slice
+        for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
+          if event_predicate(sub_slice):
+            yield sub_slice
+
+    if event_type_predicate(flow_event_module.FlowEvent):
+      for flow_event in self._flow_events:
+        if event_predicate(flow_event):
+          yield flow_event
+
+    if event_type_predicate(sample_module.Sample):
+      for sample in self._samples:
+        if event_predicate(sample):
+          yield sample
+
+  def AddSample(self, category, name, timestamp, args=None):
+    if len(self._samples) and timestamp < self._samples[-1].start:
+      raise ValueError(
+          'Samples must be added in increasing timestamp order')
+    sample = sample_module.Sample(self,
+        category, name, timestamp, args=args)
+    self._samples.append(sample)
+
+  def AddAsyncSlice(self, async_slice):
+    self._async_slices.append(async_slice)
+
+  def AddFlowEvent(self, flow_event):
+    self._flow_events.append(flow_event)
+
+  def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
+                 args=None):
+    """Opens a new slice for the thread.
+    Calls to beginSlice and endSlice must be made with
+    non-monotonically-decreasing timestamps.
+
+    * category: Category to which the slice belongs.
+    * name: Name of the slice to add.
+    * timestamp: The timetsamp of the slice, in milliseconds.
+    * thread_timestamp: Thread specific clock (scheduled) timestamp of the
+                        slice, in milliseconds.
+    * args: Arguments associated with
+
+    Returns newly opened slice
+    """
+    if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
+      raise ValueError(
+          'Slices must be added in increasing timestamp order')
+    new_slice = slice_module.Slice(self, category, name, timestamp,
+                                    thread_timestamp=thread_timestamp,
+                                    args=args)
+    self._open_slices.append(new_slice)
+    new_slice.did_not_finish = True
+    self.PushSlice(new_slice)
+    return new_slice
+
+  def EndSlice(self, end_timestamp, end_thread_timestamp=None):
+    """ Ends the last begun slice in this group and pushes it onto the slice
+    array.
+
+    * end_timestamp: Timestamp when the slice ended in milliseconds
+    * end_thread_timestamp: Timestamp when the scheduled time of the slice ended
+                            in milliseconds
+
+    returns completed slice.
+    """
+    if not len(self._open_slices):
+      raise ValueError(
+          'EndSlice called without an open slice')
+    curr_slice = self._open_slices.pop()
+    if end_timestamp < curr_slice.start:
+      raise ValueError(
+          'Slice %s end time is before its start.' % curr_slice.name)
+    curr_slice.duration = end_timestamp - curr_slice.start
+    # On Windows, it is possible to have a value for |end_thread_timestamp|
+    # but not for |curr_slice.thread_start|, because it takes some time to
+    # initialize the thread time timer.
+    if curr_slice.thread_start != None and end_thread_timestamp != None:
+      curr_slice.thread_duration = (end_thread_timestamp -
+                                    curr_slice.thread_start)
+    curr_slice.did_not_finish = False
+    return curr_slice
+
+  def PushCompleteSlice(self, category, name, timestamp, duration,
+                        thread_timestamp, thread_duration, args=None):
+    new_slice = slice_module.Slice(self, category, name, timestamp,
+                                   thread_timestamp=thread_timestamp,
+                                   args=args)
+    if duration == None:
+      new_slice.did_not_finish = True
+    else:
+      new_slice.duration = duration
+      new_slice.thread_duration = thread_duration
+    self.PushSlice(new_slice)
+    return new_slice
+
+  def PushMarkSlice(self, category, name, timestamp, thread_timestamp,
+        args=None):
+    new_slice = slice_module.Slice(self, category, name, timestamp,
+                                   thread_timestamp=thread_timestamp,
+                                   args=args)
+    self.PushSlice(new_slice)
+    return new_slice
+
+  def PushSlice(self, new_slice):
+    self._newly_added_slices.append(new_slice)
+    return new_slice
+
+  def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
+    for s in self._newly_added_slices:
+      if s.did_not_finish:
+        s.duration = max_timestamp - s.start
+        assert s.duration >= 0
+        if s.thread_start != None:
+          s.thread_duration = max_thread_timestamp - s.thread_start
+          assert s.thread_duration >= 0
+    self._open_slices = []
+
+  def IsTimestampValidForBeginOrEnd(self, timestamp):
+    if not len(self._open_slices):
+      return True
+    return timestamp >= self._open_slices[-1].start
+
+  def FinalizeImport(self):
+    self._BuildSliceSubRows()
+
+  def _BuildSliceSubRows(self):
+    """This function works by walking through slices by start time.
+
+     The basic idea here is to insert each slice as deep into the subrow
+     list as it can go such that every subslice is fully contained by its
+     parent slice.
+
+     Visually, if we start with this:
+      0:  [    a       ]
+      1:    [  b  ]
+      2:    [c][d]
+
+     To place this slice:
+                   [e]
+     We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
+     even intersect). So we go to row 1. That gives us [b], and [d] wont fit
+     into that either. So, we go to row 0 and its last slice, [a]. That can
+     completely contain [e], so that means we should add [e] as a subslice
+     of [a]. That puts it on row 1, yielding:
+      0:  [    a       ]
+      1:    [  b  ][e]
+      2:    [c][d]
+
+     If we then get this slice:
+                          [f]
+     We do the same deepest-to-shallowest walk of the subrows trying to fit
+     it. This time, it doesn't fit in any open slice. So, we simply append
+     it to row 0 (a root slice):
+      0:  [    a       ]  [f]
+      1:    [  b  ][e]
+    """
+    def CompareSlices(s1, s2):
+      if s1.start == s2.start:
+        # Break ties by having the slice with the greatest
+        # end timestamp come first.
+        return cmp(s2.end, s1.end)
+      return cmp(s1.start, s2.start)
+
+    assert len(self._toplevel_slices) == 0
+    assert len(self._all_slices) == 0
+    if not len(self._newly_added_slices):
+      return
+
+    self._all_slices.extend(self._newly_added_slices)
+
+    sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
+    root_slice = sorted_slices[0]
+    self._toplevel_slices.append(root_slice)
+    for s in sorted_slices[1:]:
+      if not self._AddSliceIfBounds(root_slice, s):
+        root_slice = s
+        self._toplevel_slices.append(root_slice)
+    self._newly_added_slices = []
+
+
+  def _AddSliceIfBounds(self, root, child):
+    """Adds a child slice to a root slice its proper row.
+    Return False if the child slice is not in the bounds
+    of the root slice.
+
+    Because we know that the start time of child is >= the start time
+    of all other slices seen so far, we can just check the last slice
+    of each row for bounding.
+    """
+    # The source trace data is in microseconds but we store it as milliseconds
+    # in floating-point. Since we can't represent micros as millis perfectly,
+    # two end=start+duration combos that should be the same will be slightly
+    # different. Round back to micros to ensure equality below.
+    child_end_micros = round(child.end * 1000)
+    root_end_micros = round(root.end * 1000)
+    if child.start >= root.start and child_end_micros <= root_end_micros:
+      if len(root.sub_slices) > 0:
+        if self._AddSliceIfBounds(root.sub_slices[-1], child):
+          return True
+      child.parent_slice = root
+      root.AddSubSlice(child)
+      return True
+    return False
diff --git a/catapult/telemetry/telemetry/timeline/thread_unittest.py b/catapult/telemetry/telemetry/timeline/thread_unittest.py
new file mode 100644
index 0000000..081a100
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/thread_unittest.py
@@ -0,0 +1,32 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.timeline import model as model_module
+
+
+class ThreadUnittest(unittest.TestCase):
+
+  def testIterAllSlicesInRange(self):
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    #    [       X     ] [   Y    ] [   U   ]
+    #        [   Z   ]     [ T ]
+    #      |                           |
+    #    start                        end
+    renderer_main.BeginSlice('cat1', 'X', 10)
+    renderer_main.BeginSlice('cat1', 'Z', 20)
+    renderer_main.EndSlice(30)
+    renderer_main.EndSlice(40)
+    renderer_main.BeginSlice('cat1', 'Y', 50)
+    renderer_main.BeginSlice('cat1', 'T', 52)
+    renderer_main.EndSlice(55)
+    renderer_main.EndSlice(60)
+    renderer_main.BeginSlice('cat1', 'U', 60)
+    renderer_main.EndSlice(70)
+
+    model.FinalizeImport(shift_world_to_zero=False)
+    slice_names = set(s.name for s in
+                      renderer_main.IterAllSlicesInRange(start=12, end=65))
+    self.assertEqual(slice_names, {'Z', 'Y', 'T'})
diff --git a/catapult/telemetry/telemetry/timeline/trace_data.py b/catapult/telemetry/telemetry/timeline/trace_data.py
new file mode 100644
index 0000000..cf6585b
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/trace_data.py
@@ -0,0 +1,184 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+class NonSerializableTraceData(Exception):
+  """Raised when raw trace data cannot be serialized to TraceData."""
+  pass
+
+
+def _ValidateRawData(raw):
+  try:
+    json.dumps(raw)
+  except TypeError as e:
+    raise NonSerializableTraceData('TraceData is not serilizable: %s' % e)
+  except ValueError as e:
+    raise NonSerializableTraceData('TraceData is not serilizable: %s' % e)
+
+
+class TraceDataPart(object):
+  """TraceData can have a variety of events.
+
+  These are called "parts" and are accessed by the following fixed field names.
+  """
+  def __init__(self, raw_field_name):
+    self._raw_field_name = raw_field_name
+
+  def __repr__(self):
+    return 'TraceDataPart("%s")' % self._raw_field_name
+
+  @property
+  def raw_field_name(self):
+    return self._raw_field_name
+
+
+CHROME_TRACE_PART = TraceDataPart('traceEvents')
+INSPECTOR_TRACE_PART = TraceDataPart('inspectorTimelineEvents')
+SURFACE_FLINGER_PART = TraceDataPart('surfaceFlinger')
+TAB_ID_PART = TraceDataPart('tabIds')
+TELEMETRY_PART = TraceDataPart('telemetry')
+
+ALL_TRACE_PARTS = {CHROME_TRACE_PART,
+                   INSPECTOR_TRACE_PART,
+                   SURFACE_FLINGER_PART,
+                   TAB_ID_PART,
+                   TELEMETRY_PART}
+
+
+def _HasEventsFor(part, raw):
+  assert isinstance(part, TraceDataPart)
+  if part.raw_field_name not in raw:
+    return False
+  return len(raw[part.raw_field_name]) > 0
+
+
+class TraceData(object):
+  """Validates, parses, and serializes raw data.
+
+  NOTE: raw data must only include primitive objects!
+  By design, TraceData must contain only data that is BOTH json-serializable
+  to a file, AND restorable once again from that file into TraceData without
+  assistance from other classes.
+
+  Raw data can be one of three standard trace_event formats:
+  1. Trace container format: a json-parseable dict.
+  2. A json-parseable array: assumed to be chrome trace data.
+  3. A json-parseable array missing the final ']': assumed to be chrome trace
+     data.
+  """
+  def __init__(self, raw_data=None):
+    """Creates TraceData from the given data."""
+    self._raw_data = {}
+    self._events_are_safely_mutable = False
+    if not raw_data:
+      return
+    _ValidateRawData(raw_data)
+
+    if isinstance(raw_data, basestring):
+      if raw_data.startswith('[') and not raw_data.endswith(']'):
+        if raw_data.endswith(','):
+          raw_data = raw_data[:-1]
+        raw_data += ']'
+      json_data = json.loads(raw_data)
+      # The parsed data isn't shared with anyone else, so we mark this value
+      # as safely mutable.
+      self._events_are_safely_mutable = True
+    else:
+      json_data = raw_data
+
+    if isinstance(json_data, dict):
+      self._raw_data = json_data
+    elif isinstance(json_data, list):
+      if len(json_data) == 0:
+        self._raw_data = {}
+      self._raw_data = {CHROME_TRACE_PART.raw_field_name: json_data}
+    else:
+      raise Exception('Unrecognized data format.')
+
+  def _SetFromBuilder(self, d):
+    self._raw_data = d
+    self._events_are_safely_mutable = True
+
+  @property
+  def events_are_safely_mutable(self):
+    """Returns true if the events in this value are completely sealed.
+
+    Some importers want to take complex fields out of the TraceData and add
+    them to the model, changing them subtly as they do so. If the TraceData
+    was constructed with data that is shared with something outside the trace
+    data, for instance a test harness, then this mutation is unexpected. But,
+    if the values are sealed, then mutating the events is a lot faster.
+
+    We know if events are sealed if the value came from a string, or if the
+    value came from a TraceDataBuilder.
+    """
+    return self._events_are_safely_mutable
+
+  @property
+  def active_parts(self):
+    return {p for p in ALL_TRACE_PARTS if p.raw_field_name in self._raw_data}
+
+  @property
+  def metadata_records(self):
+    part_field_names = {p.raw_field_name for p in ALL_TRACE_PARTS}
+    for k, v in self._raw_data.iteritems():
+      if k in part_field_names:
+        continue
+      yield {
+        'name': k,
+        'value': v
+      }
+
+  def HasEventsFor(self, part):
+    return _HasEventsFor(part, self._raw_data)
+
+  def GetEventsFor(self, part):
+    if not self.HasEventsFor(part):
+      return []
+    assert isinstance(part, TraceDataPart)
+    return self._raw_data[part.raw_field_name]
+
+  def Serialize(self, f, gzip_result=False):
+    """Serializes the trace result to a file-like object.
+
+    Always writes in the trace container format.
+    """
+    assert not gzip_result, 'Not implemented'
+    json.dump(self._raw_data, f)
+
+
+class TraceDataBuilder(object):
+  """TraceDataBuilder helps build up a trace from multiple trace agents.
+
+  TraceData is supposed to be immutable, but it is useful during recording to
+  have a mutable version. That is TraceDataBuilder.
+  """
+  def __init__(self):
+    self._raw_data = {}
+
+  def AsData(self):
+    if self._raw_data == None:
+      raise Exception('Can only AsData once')
+
+    data = TraceData()
+    data._SetFromBuilder(self._raw_data)
+    self._raw_data = None
+    return data
+
+  def AddEventsTo(self, part, events):
+    """Note: this won't work when called from multiple browsers.
+
+    Each browser's trace_event_impl zeros its timestamps when it writes them
+    out and doesn't write a timebase that can be used to re-sync them.
+    """
+    assert isinstance(part, TraceDataPart)
+    assert isinstance(events, list)
+    if self._raw_data == None:
+      raise Exception('Already called AsData() on this builder.')
+
+    self._raw_data.setdefault(part.raw_field_name, []).extend(events)
+
+  def HasEventsFor(self, part):
+    return _HasEventsFor(part, self._raw_data)
diff --git a/catapult/telemetry/telemetry/timeline/trace_data_unittest.py b/catapult/telemetry/telemetry/timeline/trace_data_unittest.py
new file mode 100644
index 0000000..e0ac642
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/trace_data_unittest.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import cStringIO
+import json
+import unittest
+
+from telemetry.timeline import trace_data
+
+class TraceDataTest(unittest.TestCase):
+  def testSerialize(self):
+    ri = trace_data.TraceData({'traceEvents': [1, 2, 3]})
+    f = cStringIO.StringIO()
+    ri.Serialize(f)
+    d = f.getvalue()
+
+    self.assertIn('traceEvents', d)
+    self.assertIn('[1, 2, 3]', d)
+
+    json.loads(d)
+
+  def testValidateWithNonPrimativeRaises(self):
+    with self.assertRaises(trace_data.NonSerializableTraceData):
+      trace_data.TraceData({'hello': TraceDataTest})
+
+  def testValidateWithCircularReferenceRaises(self):
+    a = []
+    d = {'foo': a}
+    a.append(d)
+    with self.assertRaises(trace_data.NonSerializableTraceData):
+      trace_data.TraceData(d)
+
+  def testEmptyArrayValue(self):
+    # We can import empty lists and empty string.
+    d = trace_data.TraceData([])
+    self.assertFalse(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+
+  def testEmptyStringValue(self):
+    d = trace_data.TraceData('')
+    self.assertFalse(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+
+  def testListForm(self):
+    d = trace_data.TraceData([{'ph': 'B'}])
+    self.assertTrue(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+    self.assertEquals(1, len(d.GetEventsFor(trace_data.CHROME_TRACE_PART)))
+
+  def testStringForm(self):
+    d = trace_data.TraceData('[{"ph": "B"}]')
+    self.assertTrue(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+    self.assertEquals(1, len(d.GetEventsFor(trace_data.CHROME_TRACE_PART)))
+
+  def testStringForm2(self):
+    d = trace_data.TraceData('{"inspectorTimelineEvents": [1]}')
+    self.assertTrue(d.HasEventsFor(trace_data.INSPECTOR_TRACE_PART))
+    self.assertEquals(1, len(d.GetEventsFor(trace_data.INSPECTOR_TRACE_PART)))
+
+  def testCorrectlyMalformedStringForm(self):
+    d = trace_data.TraceData("""[
+      {"ph": "B"}""")
+    self.assertTrue(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+
+  def testCorrectlyMalformedStringForm2(self):
+    d = trace_data.TraceData("""[
+      {"ph": "B"},""")
+    self.assertTrue(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+
+class TraceDataBuilderTest(unittest.TestCase):
+  def testBasicChrome(self):
+    builder = trace_data.TraceDataBuilder()
+    builder.AddEventsTo(trace_data.CHROME_TRACE_PART, [1, 2, 3])
+    builder.AddEventsTo(trace_data.TAB_ID_PART, ['tab-7'])
+
+    d = builder.AsData()
+    self.assertTrue(d.HasEventsFor(trace_data.CHROME_TRACE_PART))
+    self.assertTrue(d.HasEventsFor(trace_data.TAB_ID_PART))
+
+    self.assertRaises(Exception, builder.AsData)
diff --git a/catapult/telemetry/telemetry/timeline/trace_event_importer.py b/catapult/telemetry/telemetry/timeline/trace_event_importer.py
new file mode 100644
index 0000000..2113f08
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/trace_event_importer.py
@@ -0,0 +1,448 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""TraceEventImporter imports TraceEvent-formatted data
+into the provided model.
+This is a port of the trace event importer from
+https://code.google.com/p/trace-viewer/
+"""
+
+import collections
+import copy
+
+import telemetry.timeline.async_slice as tracing_async_slice
+import telemetry.timeline.flow_event as tracing_flow_event
+from telemetry.timeline import importer
+from telemetry.timeline import memory_dump_event
+from telemetry.timeline import trace_data as trace_data_module
+
+
+class TraceEventTimelineImporter(importer.TimelineImporter):
+  def __init__(self, model, trace_data):
+    super(TraceEventTimelineImporter, self).__init__(
+        model, trace_data, import_order=1)
+    assert isinstance(trace_data, trace_data_module.TraceData)
+    self._trace_data = trace_data
+
+    self._all_async_events = []
+    self._all_object_events = []
+    self._all_flow_events = []
+    self._all_memory_dumps_by_dump_id = collections.defaultdict(list)
+
+    self._events = trace_data.GetEventsFor(trace_data_module.CHROME_TRACE_PART)
+
+  @staticmethod
+  def GetSupportedPart():
+    return trace_data_module.CHROME_TRACE_PART
+
+  def _GetOrCreateProcess(self, pid):
+    return self._model.GetOrCreateProcess(pid)
+
+  def _DeepCopyIfNeeded(self, obj):
+    if self._trace_data.events_are_safely_mutable:
+      return obj
+    return copy.deepcopy(obj)
+
+  def _ProcessAsyncEvent(self, event):
+    """Helper to process an 'async finish' event, which will close an
+    open slice.
+    """
+    thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+    self._all_async_events.append({
+        'event': event,
+        'thread': thread})
+
+  def _ProcessCounterEvent(self, event):
+    """Helper that creates and adds samples to a Counter object based on
+    'C' phase events.
+    """
+    if 'id' in event:
+      ctr_name = event['name'] + '[' + str(event['id']) + ']'
+    else:
+      ctr_name = event['name']
+
+    ctr = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateCounter(event['cat'], ctr_name))
+    # Initialize the counter's series fields if needed.
+    if len(ctr.series_names) == 0:
+      #TODO: implement counter object
+      for series_name in event['args']:
+        ctr.series_names.append(series_name)
+      if len(ctr.series_names) == 0:
+        self._model.import_errors.append('Expected counter ' + event['name'] +
+            ' to have at least one argument to use as a value.')
+        # Drop the counter.
+        del ctr.parent.counters[ctr.full_name]
+        return
+
+    # Add the sample values.
+    ctr.timestamps.append(event['ts'] / 1000.0)
+    for series_name in ctr.series_names:
+      if series_name not in event['args']:
+        ctr.samples.append(0)
+        continue
+      ctr.samples.append(event['args'][series_name])
+
+  def _ProcessObjectEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+      .GetOrCreateThread(event['tid']))
+    self._all_object_events.append({
+        'event': event,
+        'thread': thread})
+
+  def _ProcessDurationEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+      .GetOrCreateThread(event['tid']))
+    if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
+      self._model.import_errors.append(
+          'Timestamps are moving backward.')
+      return
+
+    if event['ph'] == 'B':
+      thread.BeginSlice(event['cat'],
+                        event['name'],
+                        event['ts'] / 1000.0,
+                        event['tts'] / 1000.0 if 'tts' in event else None,
+                        event['args'])
+    elif event['ph'] == 'E':
+      thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+      if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
+        self._model.import_errors.append(
+            'Timestamps are moving backward.')
+        return
+      if not thread.open_slice_count:
+        self._model.import_errors.append(
+            'E phase event without a matching B phase event.')
+        return
+
+      new_slice = thread.EndSlice(
+          event['ts'] / 1000.0,
+          event['tts'] / 1000.0 if 'tts' in event else None)
+      for arg_name, arg_value in event.get('args', {}).iteritems():
+        if arg_name in new_slice.args:
+          self._model.import_errors.append(
+              'Both the B and E phases of ' + new_slice.name +
+              ' provided values for argument ' + arg_name + '. ' +
+              'The value of the E phase event will be used.')
+        new_slice.args[arg_name] = arg_value
+
+  def _ProcessCompleteEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+    thread.PushCompleteSlice(
+        event['cat'],
+        event['name'],
+        event['ts'] / 1000.0,
+        event['dur'] / 1000.0 if 'dur' in event else None,
+        event['tts'] / 1000.0 if 'tts' in event else None,
+        event['tdur'] / 1000.0 if 'tdur' in event else None,
+        event['args'])
+
+  def _ProcessMarkEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+    thread.PushMarkSlice(
+        event['cat'],
+        event['name'],
+        event['ts'] / 1000.0,
+        event['tts'] / 1000.0 if 'tts' in event else None,
+        event['args'] if 'args' in event else None)
+
+  def _ProcessMetadataEvent(self, event):
+    if event['name'] == 'thread_name':
+      thread = (self._GetOrCreateProcess(event['pid'])
+          .GetOrCreateThread(event['tid']))
+      thread.name = event['args']['name']
+    elif event['name'] == 'process_name':
+      process = self._GetOrCreateProcess(event['pid'])
+      process.name = event['args']['name']
+    elif event['name'] == 'process_labels':
+      process = self._GetOrCreateProcess(event['pid'])
+      process.labels = event['args']['labels']
+    elif event['name'] == 'trace_buffer_overflowed':
+      process = self._GetOrCreateProcess(event['pid'])
+      process.SetTraceBufferOverflowTimestamp(event['args']['overflowed_at_ts'])
+    else:
+      self._model.import_errors.append(
+          'Unrecognized metadata name: ' + event['name'])
+
+  def _ProcessInstantEvent(self, event):
+    # Treat an Instant event as a duration 0 slice.
+    # SliceTrack's redraw() knows how to handle this.
+    thread = (self._GetOrCreateProcess(event['pid'])
+      .GetOrCreateThread(event['tid']))
+    thread.BeginSlice(event['cat'],
+                      event['name'],
+                      event['ts'] / 1000.0,
+                      args=event.get('args'))
+    thread.EndSlice(event['ts'] / 1000.0)
+
+  def _ProcessSampleEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+    thread.AddSample(event['cat'],
+                     event['name'],
+                     event['ts'] / 1000.0,
+                     event.get('args'))
+
+  def _ProcessFlowEvent(self, event):
+    thread = (self._GetOrCreateProcess(event['pid'])
+        .GetOrCreateThread(event['tid']))
+    self._all_flow_events.append({
+        'event': event,
+        'thread': thread})
+
+  def _ProcessMemoryDumpEvents(self, events):
+    # Dictionary to order dumps by id and process.
+    global_dumps = {}
+    for event in events:
+      global_dump = global_dumps.setdefault(event['id'], {})
+      dump_events = global_dump.setdefault(event['pid'], [])
+      dump_events.append(event)
+    for dump_id, global_dump in global_dumps.iteritems():
+      for pid, dump_events in global_dump.iteritems():
+        process = self._GetOrCreateProcess(pid)
+        memory_dump = memory_dump_event.ProcessMemoryDumpEvent(process,
+                                                               dump_events)
+        process.AddMemoryDumpEvent(memory_dump)
+        self._all_memory_dumps_by_dump_id[dump_id].append(memory_dump)
+
+  def ImportEvents(self):
+    """Walks through the events_ list and outputs the structures discovered to
+    model_.
+    """
+    memory_dump_events = []
+    for event in self._events:
+      phase = event.get('ph', None)
+      if phase == 'B' or phase == 'E':
+        self._ProcessDurationEvent(event)
+      elif phase == 'X':
+        self._ProcessCompleteEvent(event)
+      # Note, S, F, T are deprecated and replaced by 'b' and 'e'. For
+      # backwards compatibility continue to support them here.
+      elif phase == 'S' or phase == 'F' or phase == 'T':
+        self._ProcessAsyncEvent(event)
+      elif phase == 'b' or phase == 'e':
+        self._ProcessAsyncEvent(event)
+      # Note, I is historic. The instant event marker got changed, but we
+      # want to support loading old trace files so we have both I and i.
+      elif phase == 'I' or phase == 'i':
+        self._ProcessInstantEvent(event)
+      elif phase == 'P':
+        self._ProcessSampleEvent(event)
+      elif phase == 'C':
+        self._ProcessCounterEvent(event)
+      elif phase == 'M':
+        self._ProcessMetadataEvent(event)
+      elif phase == 'N' or phase == 'D' or phase == 'O':
+        self._ProcessObjectEvent(event)
+      elif phase == 's' or phase == 't' or phase == 'f':
+        self._ProcessFlowEvent(event)
+      elif phase == 'v':
+        memory_dump_events.append(event)
+      elif phase == 'R':
+        self._ProcessMarkEvent(event)
+      else:
+        self._model.import_errors.append('Unrecognized event phase: ' +
+            phase + '(' + event['name'] + ')')
+
+    # Memory dumps of a process with the same dump id need to be merged before
+    # processing. So, memory dump events are processed all at once.
+    self._ProcessMemoryDumpEvents(memory_dump_events)
+    return self._model
+
+  def FinalizeImport(self):
+    """Called by the Model after all other importers have imported their
+    events."""
+    self._model.UpdateBounds()
+
+    # We need to reupdate the bounds in case the minimum start time changes
+    self._model.UpdateBounds()
+    self._CreateAsyncSlices()
+    self._CreateFlowSlices()
+    self._SetBrowserProcess()
+    self._SetGpuProcess()
+    self._CreateExplicitObjects()
+    self._CreateImplicitObjects()
+    self._CreateMemoryDumps()
+
+  def _CreateAsyncSlices(self):
+    if len(self._all_async_events) == 0:
+      return
+
+    self._all_async_events.sort(key=lambda x: x['event']['ts'])
+
+    async_event_states_by_name_then_id = {}
+
+    all_async_events = self._all_async_events
+    for async_event_state in all_async_events:
+      event = async_event_state['event']
+      name = event.get('name', None)
+      if name is None:
+        self._model.import_errors.append(
+            'Async events (ph: b, e, S, T or F) require an name parameter.')
+        continue
+
+      event_id = event.get('id')
+      if event_id is None:
+        self._model.import_errors.append(
+            'Async events (ph: b, e, S, T or F) require an id parameter.')
+        continue
+
+      # TODO(simonjam): Add a synchronous tick on the appropriate thread.
+
+      if event['ph'] == 'S' or event['ph'] == 'b':
+        if not name in async_event_states_by_name_then_id:
+          async_event_states_by_name_then_id[name] = {}
+        if event_id in async_event_states_by_name_then_id[name]:
+          self._model.import_errors.append(
+              'At %d, a slice of the same id %s was already open.' % (
+                  event['ts'], event_id))
+          continue
+
+        async_event_states_by_name_then_id[name][event_id] = []
+        async_event_states_by_name_then_id[name][event_id].append(
+            async_event_state)
+      else:
+        if name not in async_event_states_by_name_then_id:
+          self._model.import_errors.append(
+              'At %d, no slice named %s was open.' % (event['ts'], name,))
+          continue
+        if event_id not in async_event_states_by_name_then_id[name]:
+          self._model.import_errors.append(
+              'At %d, no slice named %s with id=%s was open.' % (
+                  event['ts'], name, event_id))
+          continue
+        events = async_event_states_by_name_then_id[name][event_id]
+        events.append(async_event_state)
+
+        if event['ph'] == 'F' or event['ph'] == 'e':
+          # Create a slice from start to end.
+          async_slice = tracing_async_slice.AsyncSlice(
+              events[0]['event']['cat'],
+              name,
+              events[0]['event']['ts'] / 1000.0)
+
+          async_slice.duration = ((event['ts'] / 1000.0)
+              - (events[0]['event']['ts'] / 1000.0))
+
+          async_slice.start_thread = events[0]['thread']
+          async_slice.end_thread = async_event_state['thread']
+          if async_slice.start_thread == async_slice.end_thread:
+            if 'tts' in event and 'tts' in events[0]['event']:
+              async_slice.thread_start = events[0]['event']['tts'] / 1000.0
+              async_slice.thread_duration = ((event['tts'] / 1000.0)
+                  - (events[0]['event']['tts'] / 1000.0))
+          async_slice.id = event_id
+          async_slice.args = events[0]['event']['args']
+
+          # Create sub_slices for each step.
+          for j in xrange(1, len(events)):
+            sub_name = name
+            if events[j - 1]['event']['ph'] == 'T':
+              sub_name = name + ':' + events[j - 1]['event']['args']['step']
+            sub_slice = tracing_async_slice.AsyncSlice(
+                events[0]['event']['cat'],
+                sub_name,
+                events[j - 1]['event']['ts'] / 1000.0)
+            sub_slice.parent_slice = async_slice
+
+            sub_slice.duration = ((events[j]['event']['ts'] / 1000.0)
+                - (events[j - 1]['event']['ts'] / 1000.0))
+
+            sub_slice.start_thread = events[j - 1]['thread']
+            sub_slice.end_thread = events[j]['thread']
+            if sub_slice.start_thread == sub_slice.end_thread:
+              if 'tts' in events[j]['event'] and \
+                  'tts' in events[j - 1]['event']:
+                sub_slice.thread_duration = \
+                    ((events[j]['event']['tts'] / 1000.0)
+                        - (events[j - 1]['event']['tts'] / 1000.0))
+
+            sub_slice.id = event_id
+            sub_slice.args = events[j - 1]['event']['args']
+
+            async_slice.AddSubSlice(sub_slice)
+
+          # The args for the finish event go in the last sub_slice.
+          last_slice = async_slice.sub_slices[-1]
+          for arg_name, arg_value in event['args'].iteritems():
+            last_slice.args[arg_name] = arg_value
+
+          # Add |async_slice| to the start-thread's async_slices.
+          async_slice.start_thread.AddAsyncSlice(async_slice)
+          del async_event_states_by_name_then_id[name][event_id]
+
+  def _CreateExplicitObjects(self):
+    # TODO(tengs): Implement object instance parsing
+    pass
+
+  def _CreateImplicitObjects(self):
+    # TODO(tengs): Implement object instance parsing
+    pass
+
+  def _CreateFlowSlices(self):
+    if len(self._all_flow_events) == 0:
+      return
+
+    self._all_flow_events.sort(key=lambda x: x['event']['ts'])
+
+    flow_id_to_event = {}
+    for data in self._all_flow_events:
+      event = data['event']
+      thread = data['thread']
+      if 'name' not in event:
+        self._model.import_errors.append(
+          'Flow events (ph: s, t or f) require a name parameter.')
+        continue
+      if 'id' not in event:
+        self._model.import_errors.append(
+          'Flow events (ph: s, t or f) require an id parameter.')
+        continue
+
+      flow_event = tracing_flow_event.FlowEvent(
+          event['cat'],
+          event['id'],
+          event['name'],
+          event['ts'] / 1000.0,
+          event['args'])
+      thread.AddFlowEvent(flow_event)
+
+      if event['ph'] == 's':
+        if event['id'] in flow_id_to_event:
+          self._model.import_errors.append(
+              'event id %s already seen when encountering start of'
+              'flow event.' % event['id'])
+          continue
+        flow_id_to_event[event['id']] = flow_event
+      elif event['ph'] == 't' or event['ph'] == 'f':
+        if not event['id'] in flow_id_to_event:
+          self._model.import_errors.append(
+            'Found flow phase %s for id: %s but no flow start found.' % (
+                event['ph'], event['id']))
+          continue
+        flow_position = flow_id_to_event[event['id']]
+        self._model.flow_events.append([flow_position, flow_event])
+
+        if event['ph'] == 'f':
+          del flow_id_to_event[event['id']]
+        else:
+          # Make this event the next start event in this flow.
+          flow_id_to_event[event['id']] = flow_event
+
+  def _CreateMemoryDumps(self):
+    self._model.SetGlobalMemoryDumps(
+        memory_dump_event.GlobalMemoryDump(events)
+        for events in self._all_memory_dumps_by_dump_id.itervalues())
+
+  def _SetBrowserProcess(self):
+    for thread in self._model.GetAllThreads():
+      if thread.name == 'CrBrowserMain':
+        self._model.browser_process = thread.parent
+
+  def _SetGpuProcess(self):
+    for thread in self._model.GetAllThreads():
+      if thread.name == 'CrGpuMain':
+        self._model.gpu_process = thread.parent
diff --git a/catapult/telemetry/telemetry/timeline/trace_event_importer_unittest.py b/catapult/telemetry/telemetry/timeline/trace_event_importer_unittest.py
new file mode 100644
index 0000000..049ba00
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/trace_event_importer_unittest.py
@@ -0,0 +1,1134 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=too-many-lines
+
+import unittest
+
+import telemetry.timeline.counter as tracing_counter
+import telemetry.timeline.model as timeline_model
+from telemetry.timeline import trace_data as trace_data_module
+
+
+def FindEventNamed(events, name):
+  for event in events:
+    if event.name == name:
+      return event
+  raise ValueError('No event found with name %s' % name)
+
+class TraceEventTimelineImporterTest(unittest.TestCase):
+
+  def testBasicSingleThreadNonnestedParsing(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'tts': 280, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'tts': 310, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 356, 'cat': 'bar',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 631, 'tts': 357, 'cat': 'bar',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 633, 'cat': 'baz',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 637, 'cat': 'baz',
+       'tid': 53, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+    self.assertEqual(52, p.pid)
+
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[53]
+    self.assertEqual(3, len(t.all_slices))
+    self.assertEqual(53, t.tid)
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.end)
+    self.assertAlmostEqual(280 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((310 - 280) / 1000.0, slice_event.thread_duration)
+    self.assertAlmostEqual(310 / 1000.0, slice_event.thread_end)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+    slice_event = t.all_slices[1]
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('bar', slice_event.category)
+    self.assertAlmostEqual((629 - 520) / 1000.0, slice_event.start)
+    self.assertAlmostEqual((631 - 629) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual((631 - 520) / 1000.0, slice_event.end)
+    self.assertAlmostEqual(356 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((357 - 356) / 1000.0, slice_event.thread_duration)
+    self.assertAlmostEqual(357 / 1000.0, slice_event.thread_end)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+    slice_event = t.all_slices[2]
+    self.assertEqual('c', slice_event.name)
+    self.assertEqual('baz', slice_event.category)
+    self.assertAlmostEqual((633 - 520) / 1000.0, slice_event.start)
+    self.assertAlmostEqual((637 - 633) / 1000.0, slice_event.duration)
+    self.assertEqual(None, slice_event.thread_start)
+    self.assertEqual(None, slice_event.thread_duration)
+    self.assertEqual(None, slice_event.thread_end)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+
+  def testArgumentDupeCreatesNonFailingImportError(self):
+    events = [
+      {'name': 'a', 'args': {'x': 1}, 'pid': 1, 'ts': 520, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {'x': 2}, 'pid': 1, 'ts': 560, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    t = processes[0].threads[1]
+    slice_a = FindEventNamed(t.all_slices, 'a')
+
+    self.assertEqual(2, slice_a.args['x'])
+    self.assertEqual(1, len(m.import_errors))
+
+  def testCategoryBeginEndMismatchPreferslice_begin(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'bar',
+       'tid': 53, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+    self.assertEqual(52, p.pid)
+
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[53]
+    self.assertEqual(1, len(t.all_slices))
+    self.assertEqual(53, t.tid)
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+
+  def testNestedParsing(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 2, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 3, 'cat': 'bar',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 4, 'cat': 'bar',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    t = m.GetAllProcesses()[0].threads[1]
+
+    slice_a = FindEventNamed(t.all_slices, 'a')
+    slice_b = FindEventNamed(t.all_slices, 'b')
+
+    self.assertEqual('a', slice_a.name)
+    self.assertEqual('foo', slice_a.category)
+    self.assertAlmostEqual(0.001, slice_a.start)
+    self.assertAlmostEqual(0.006, slice_a.duration)
+    self.assertAlmostEqual(0.002, slice_a.thread_start)
+    self.assertAlmostEqual(0.003, slice_a.thread_duration)
+
+    self.assertEqual('b', slice_b.name)
+    self.assertEqual('bar', slice_b.category)
+    self.assertAlmostEqual(0.003, slice_b.start)
+    self.assertAlmostEqual(0.002, slice_b.duration)
+    self.assertAlmostEqual(0.003, slice_b.thread_start)
+    self.assertAlmostEqual(0.001, slice_b.thread_duration)
+
+  def testAutoclosing(self):
+    events = [
+      # Slices that don't finish.
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 2, 'cat': 'foo',
+       'tid': 2, 'ph': 'B'},
+
+      # Slices on thread 1 and 2 that do finish to give an 'end time' to make
+      # autoclosing work.
+      {'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1.5, 'cat': 'bar',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 3, 'cat': 'bar',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'd', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 2.5, 'cat': 'bar',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'd', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'bar',
+       'tid': 2, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t1 = p.threads[1]
+    slice_event = FindEventNamed(t1.all_slices, 'a')
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertTrue(slice_event.did_not_finish)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual((7 - 1) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((3 - 1) / 1000.0, slice_event.thread_duration)
+
+    t2 = p.threads[2]
+    slice_event = FindEventNamed(t2.all_slices, 'b')
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertTrue(slice_event.did_not_finish)
+    self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.start)
+    self.assertAlmostEqual((7 - 2) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(2 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((5 - 2) / 1000.0, slice_event.thread_duration)
+
+  def testAutoclosingLoneBegin(self):
+    events = [
+      # Slice that doesn't finish.
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[1]
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertTrue(slice_event.did_not_finish)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual(0, slice_event.duration)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual(0, slice_event.thread_duration)
+
+  def testAutoclosingWithSubTasks(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b1', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b1', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'b2', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    t = m.GetAllProcesses()[0].threads[1]
+
+    slice_a = FindEventNamed(t.all_slices, 'a')
+    slice_b1 = FindEventNamed(t.all_slices, 'b1')
+    slice_b2 = FindEventNamed(t.all_slices, 'b2')
+
+    self.assertAlmostEqual(0.003, slice_a.end)
+    self.assertAlmostEqual(0.003, slice_b1.end)
+    self.assertAlmostEqual(0.003, slice_b2.end)
+
+  def testAutoclosingWithEventsOutsideBounds(self):
+    events = [
+      # Slice that begins before min and ends after max of the other threads.
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 0, 'tts': 0, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+
+      # Slice that does finish to give an 'end time' to establish a basis
+      {'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'bar',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'bar',
+       'tid': 2, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    p = m.GetAllProcesses()[0]
+    t1 = p.threads[1]
+    t1_thread_time_bounds = (
+        m._thread_time_bounds[t1]) # pylint: disable=protected-access
+    self.assertAlmostEqual(0.000, t1_thread_time_bounds.min)
+    self.assertAlmostEqual(0.003, t1_thread_time_bounds.max)
+    self.assertEqual(2, len(t1.all_slices))
+
+    slice_event = FindEventNamed(t1.all_slices, 'a')
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual(0.006, slice_event.duration)
+    self.assertAlmostEqual(0, slice_event.thread_start)
+    self.assertAlmostEqual(0.003, slice_event.thread_duration)
+
+    t2 = p.threads[2]
+    t2_thread_time_bounds = (
+        m._thread_time_bounds[t2]) # pylint: disable=protected-access
+    self.assertAlmostEqual(0.001, t2_thread_time_bounds.min)
+    self.assertAlmostEqual(0.002, t2_thread_time_bounds.max)
+    slice2 = FindEventNamed(t2.all_slices, 'c')
+    self.assertEqual('c', slice2.name)
+    self.assertEqual('bar', slice2.category)
+    self.assertAlmostEqual(0.002, slice2.start)
+    self.assertAlmostEqual(0.002, slice2.duration)
+    self.assertAlmostEqual(0.001, slice2.thread_start)
+    self.assertAlmostEqual(0.001, slice2.thread_duration)
+
+    self.assertAlmostEqual(0.000, m.bounds.min)
+    self.assertAlmostEqual(0.006, m.bounds.max)
+
+  def testNestedAutoclosing(self):
+    events = [
+      # Tasks that don't finish.
+      {'name': 'a1', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a2', 'args': {}, 'pid': 1, 'ts': 1.5, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+
+      # Slice that does finish to give an 'end time' to make autoclosing work.
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
+       'tid': 2, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    t1 = m.GetAllProcesses()[0].threads[1]
+    t2 = m.GetAllProcesses()[0].threads[2]
+
+    slice_a1 = FindEventNamed(t1.all_slices, 'a1')
+    slice_a2 = FindEventNamed(t1.all_slices, 'a2')
+    FindEventNamed(t2.all_slices, 'b')
+
+    self.assertAlmostEqual(0.002, slice_a1.end)
+    self.assertAlmostEqual(0.002, slice_a2.end)
+
+  def testMultipleThreadParsing(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'bar',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'bar',
+       'tid': 2, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+
+    self.assertEqual(2, len(p.threads))
+
+    # Check thread 1.
+    t = p.threads[1]
+    self.assertAlmostEqual(1, len(t.all_slices))
+    self.assertAlmostEqual(1, t.tid)
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
+
+    # Check thread 2.
+    t = p.threads[2]
+    self.assertAlmostEqual(1, len(t.all_slices))
+    self.assertAlmostEqual(2, t.tid)
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('bar', slice_event.category)
+    self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
+    self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
+
+  def testMultiplePidParsing(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 6, 'tts': 3, 'cat': 'bar',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 8, 'tts': 4, 'cat': 'bar',
+       'tid': 2, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(2, len(processes))
+
+    p = processes[0]
+    self.assertEqual(1, p.pid)
+    self.assertEqual(1, len(p.threads))
+
+    # Check process 1 thread 1.
+    t = p.threads[1]
+    self.assertEqual(1, len(t.all_slices))
+    self.assertEqual(1, t.tid)
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertAlmostEqual(0, slice_event.start)
+    self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
+
+    # Check process 2 thread 2.
+    # TODO: will this be in deterministic order?
+    p = processes[1]
+    self.assertEqual(2, p.pid)
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[2]
+    self.assertEqual(1, len(t.all_slices))
+    self.assertEqual(2, t.tid)
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('bar', slice_event.category)
+    self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
+    self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
+
+    # Check getAllThreads.
+    self.assertEqual([processes[0].threads[1],
+                      processes[1].threads[2]],
+                      m.GetAllThreads())
+
+  def testThreadNames(self):
+    events = [
+      {'name': 'thread_name', 'args': {'name': 'Thread 1'},
+        'pid': 1, 'ts': 0, 'tid': 1, 'ph': 'M'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 3, 'cat': 'foo',
+       'tid': 2, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 2, 'ts': 4, 'cat': 'foo',
+       'tid': 2, 'ph': 'E'},
+      {'name': 'thread_name', 'args': {'name': 'Thread 2'},
+        'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual('Thread 1', processes[0].threads[1].name)
+    self.assertEqual('Thread 2', processes[1].threads[2].name)
+
+  def testParsingWhenEndComesFirst(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 4, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 5, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[1]
+    self.assertEqual(1, len(t.all_slices))
+    self.assertEqual('a', t.all_slices[0].name)
+    self.assertEqual('foo', t.all_slices[0].category)
+    self.assertEqual(0.004, t.all_slices[0].start)
+    self.assertEqual(0.001, t.all_slices[0].duration)
+    self.assertEqual(0.004, t.all_slices[0].thread_start)
+    self.assertEqual(0.001, t.all_slices[0].thread_duration)
+    self.assertEqual(1, len(m.import_errors))
+
+  def testImmediateParsing(self):
+    events = [
+      # Need to include immediates inside a task so the timeline
+      # recentering/zeroing doesn't clobber their timestamp.
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
+       'tid': 1, 'ph': 'B'},
+      {'name': 'immediate', 'args': {}, 'pid': 1, 'ts': 4, 'cat': 'bar',
+       'tid': 1, 'ph': 'I'},
+      {'name': 'slower', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'baz',
+       'tid': 1, 'ph': 'i'},
+      {'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'foo',
+       'tid': 1, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[1]
+    self.assertEqual(3, len(t.all_slices))
+
+    i = m.GetAllEventsOfName('immediate')[0]
+    self.assertEqual('immediate', i.name)
+    self.assertEqual('bar', i.category)
+    self.assertAlmostEqual(0.004, i.start)
+    self.assertAlmostEqual(0, i.duration)
+
+    slower = m.GetAllEventsOfName('slower')[0]
+    self.assertEqual('slower', slower.name)
+    self.assertEqual('baz', slower.category)
+    self.assertAlmostEqual(0.008, slower.start)
+    self.assertAlmostEqual(0, slower.duration)
+
+    a = m.GetAllEventsOfName('a')[0]
+    self.assertEqual('a', a.name)
+    self.assertEqual('foo', a.category)
+    self.assertAlmostEqual(0.002, a.start)
+    self.assertAlmostEqual(0.006, a.duration)
+    self.assertAlmostEqual(0.001, a.thread_start)
+    self.assertAlmostEqual(0.003, a.thread_duration)
+
+
+  def testSimpleCounter(self):
+    events = [
+      {'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
+       'tid': 1, 'ph': 'C'},
+      {'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
+       'tid': 1, 'ph': 'C'},
+      {'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 20, 'cat': 'foo',
+       'tid': 1, 'ph': 'C'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    ctr = p.counters['foo.ctr']
+
+    self.assertEqual('ctr', ctr.name)
+    self.assertEqual('foo', ctr.category)
+    self.assertEqual(3, ctr.num_samples)
+    self.assertEqual(1, ctr.num_series)
+
+    self.assertEqual(['value'], ctr.series_names)
+    self.assertEqual([0, 0.01, 0.02], ctr.timestamps)
+    self.assertEqual([0, 10, 0], ctr.samples)
+    self.assertEqual([0, 10, 0], ctr.totals)
+    self.assertEqual(10, ctr.max_total)
+
+  def testInstanceCounter(self):
+    events = [
+      {'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
+       'tid': 1,
+       'ph': 'C', 'id': 0},
+      {'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
+       'tid': 1,
+       'ph': 'C', 'id': 0},
+      {'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
+       'tid': 1,
+       'ph': 'C', 'id': 1},
+      {'name': 'ctr', 'args': {'value': 20}, 'pid': 1, 'ts': 15, 'cat': 'foo',
+       'tid': 1,
+       'ph': 'C', 'id': 1},
+      {'name': 'ctr', 'args': {'value': 30}, 'pid': 1, 'ts': 18, 'cat': 'foo',
+       'tid': 1,
+       'ph': 'C', 'id': 1},
+      {'name': 'ctr', 'args': {'value': 40}, 'pid': 1, 'ts': 20, 'cat': 'bar',
+       'tid': 1,
+       'ph': 'C', 'id': 2}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    ctr = p.counters['foo.ctr[0]']
+    self.assertEqual('ctr[0]', ctr.name)
+    self.assertEqual('foo', ctr.category)
+    self.assertEqual(2, ctr.num_samples)
+    self.assertEqual(1, ctr.num_series)
+    self.assertEqual([0, 0.01], ctr.timestamps)
+    self.assertEqual([0, 10], ctr.samples)
+
+    ctr = m.GetAllProcesses()[0].counters['foo.ctr[1]']
+    self.assertEqual('ctr[1]', ctr.name)
+    self.assertEqual('foo', ctr.category)
+    self.assertEqual(3, ctr.num_samples)
+    self.assertEqual(1, ctr.num_series)
+    self.assertEqual([0.01, 0.015, 0.018], ctr.timestamps)
+    self.assertEqual([10, 20, 30], ctr.samples)
+
+    ctr = m.GetAllProcesses()[0].counters['bar.ctr[2]']
+    self.assertEqual('ctr[2]', ctr.name)
+    self.assertEqual('bar', ctr.category)
+    self.assertEqual(1, ctr.num_samples)
+    self.assertEqual(1, ctr.num_series)
+    self.assertEqual([0.02], ctr.timestamps)
+    self.assertEqual([40], ctr.samples)
+
+  def testMultiCounterUpdateBounds(self):
+    ctr = tracing_counter.Counter(None, 'testBasicCounter',
+        'testBasicCounter')
+    ctr.series_names = ['value1', 'value2']
+    ctr.timestamps = [0, 1, 2, 3, 4, 5, 6, 7]
+    ctr.samples = [0, 0,
+                   1, 0,
+                   1, 1,
+                   2, 1.1,
+                   3, 0,
+                   1, 7,
+                   3, 0,
+                   3.1, 0.5]
+    ctr.FinalizeImport()
+    self.assertEqual(8, ctr.max_total)
+    self.assertEqual([0, 0,
+                       1, 1,
+                       1, 2,
+                       2, 3.1,
+                       3, 3,
+                       1, 8,
+                       3, 3,
+                       3.1, 3.6], ctr.totals)
+
+  def testMultiCounter(self):
+    events = [
+      {'name': 'ctr', 'args': {'value1': 0, 'value2': 7}, 'pid': 1, 'ts': 0,
+       'cat': 'foo', 'tid': 1, 'ph': 'C'},
+      {'name': 'ctr', 'args': {'value1': 10, 'value2': 4}, 'pid': 1, 'ts': 10,
+       'cat': 'foo', 'tid': 1, 'ph': 'C'},
+      {'name': 'ctr', 'args': {'value1': 0, 'value2': 1}, 'pid': 1, 'ts': 20,
+       'cat': 'foo', 'tid': 1, 'ph': 'C'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    ctr = p.counters['foo.ctr']
+    self.assertEqual('ctr', ctr.name)
+
+    self.assertEqual('ctr', ctr.name)
+    self.assertEqual('foo', ctr.category)
+    self.assertEqual(3, ctr.num_samples)
+    self.assertEqual(2, ctr.num_series)
+
+    self.assertEqual(sorted(['value1', 'value2']), sorted(ctr.series_names))
+    self.assertEqual(sorted([0, 0.01, 0.02]), sorted(ctr.timestamps))
+    self.assertEqual(sorted([0, 7, 10, 4, 0, 1]), sorted(ctr.samples))
+    # We can't check ctr.totals here because it can change depending on
+    # the order in which the series names are added.
+    self.assertEqual(14, ctr.max_total)
+
+  def testStartFinishOneSliceOneThread(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'cat',
+       'tid': 53,
+         'ph': 'F', 'id': 72},
+      {'name': 'a', 'pid': 52, 'ts': 524, 'cat': 'cat',
+       'tid': 53,
+         'ph': 'S', 'id': 72, 'args': {'foo': 'bar'}}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+
+    events = list(m.IterAllEvents())
+    self.assertEqual(2, len(events))
+
+    processes = m.GetAllProcesses()
+    t = processes[0].threads[53]
+    slices = t.async_slices
+    self.assertEqual(1, len(slices))
+    self.assertEqual('a', slices[0].name)
+    self.assertEqual('cat', slices[0].category)
+    self.assertEqual(72, slices[0].id)
+    self.assertEqual('bar', slices[0].args['foo'])
+    self.assertEqual(0, slices[0].start)
+    self.assertAlmostEqual((60 - 24) / 1000.0, slices[0].duration)
+    self.assertEqual(t, slices[0].start_thread)
+    self.assertEqual(t, slices[0].end_thread)
+
+  def testEndArgsAddedToSlice(self):
+    events = [
+      {'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 520, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'a', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[53]
+    self.assertEqual(1, len(t.all_slices))
+    self.assertEqual(53, t.tid)
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertEqual(0, slice_event.start)
+    self.assertEqual(1, slice_event.args['x'])
+    self.assertEqual(2, slice_event.args['y'])
+
+  def testEndArgOverrwritesOriginalArgValueIfDuplicated(self):
+    events = [
+      {'name': 'b', 'args': {'z': 3}, 'pid': 52, 'ts': 629, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'b', 'args': {'z': 4}, 'pid': 52, 'ts': 631, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[53]
+    slice_event = t.all_slices[0]
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertEqual(0, slice_event.start)
+    self.assertEqual(4, slice_event.args['z'])
+
+  def testSliceHierarchy(self):
+    """The slice hierarchy should look something like this:
+           [            a            ]
+              [      b      ]  [ d ]
+              [ c ]     [ e ]
+    """
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 100, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 200, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 135, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'd', 'args': {}, 'pid': 52, 'ts': 175, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'd', 'args': {}, 'pid': 52, 'ts': 190, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'},
+      {'name': 'e', 'args': {}, 'pid': 52, 'ts': 155, 'cat': 'foo',
+       'tid': 53, 'ph': 'B'},
+      {'name': 'e', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
+       'tid': 53, 'ph': 'E'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
+    processes = m.GetAllProcesses()
+    self.assertEqual(1, len(processes))
+    p = processes[0]
+
+    self.assertEqual(1, len(p.threads))
+    t = p.threads[53]
+
+    slice_a = t.all_slices[0]
+    self.assertEqual(4, len(slice_a.GetAllSubSlices()))
+    self.assertEqual('a', slice_a.name)
+    self.assertEqual(100 / 1000.0, slice_a.start)
+    self.assertEqual(200 / 1000.0, slice_a.end)
+    self.assertEqual(2, len(slice_a.sub_slices))
+
+    slice_b = slice_a.sub_slices[0]
+    self.assertEqual('b', slice_b.name)
+    self.assertEqual(2, len(slice_b.sub_slices))
+    self.assertEqual('c', slice_b.sub_slices[0].name)
+    self.assertEqual('e', slice_b.sub_slices[1].name)
+
+    slice_d = slice_a.sub_slices[1]
+    self.assertEqual('d', slice_d.name)
+    self.assertEqual(0, len(slice_d.sub_slices))
+
+  def testAsyncEndArgAddedToSlice(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'c', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53,
+         'ph': 'F', 'id': 72},
+      {'name': 'c', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
+       'tid': 53,
+         'ph': 'S', 'id': 72}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    t = m.GetAllProcesses()[0].threads[53]
+    self.assertEqual(1, len(t.async_slices))
+    parent_slice = t.async_slices[0]
+    self.assertEqual('c', parent_slice.name)
+    self.assertEqual('foo', parent_slice.category)
+
+    self.assertEqual(1, len(parent_slice.sub_slices))
+    sub_slice = parent_slice.sub_slices[0]
+    self.assertEqual(1, sub_slice.args['x'])
+    self.assertEqual(2, sub_slice.args['y'])
+
+  def testAsyncEndArgOverrwritesOriginalArgValueIfDuplicated(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'd', 'args': {'z': 4}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53,
+         'ph': 'F', 'id': 72},
+      {'name': 'd', 'args': {'z': 3}, 'pid': 52, 'ts': 524, 'cat': 'foo',
+       'tid': 53,
+         'ph': 'S', 'id': 72}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    t = m.GetAllProcesses()[0].threads[53]
+    self.assertEqual(1, len(t.async_slices))
+    parent_slice = t.async_slices[0]
+    self.assertEqual('d', parent_slice.name)
+    self.assertEqual('foo', parent_slice.category)
+
+    self.assertEqual(1, len(parent_slice.sub_slices))
+    sub_slice = parent_slice.sub_slices[0]
+    self.assertEqual(4, sub_slice.args['z'])
+
+  def testAsyncStepsInOneThread(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53, 'ph': 'F', 'id': 72, 'tts': 25},
+      {'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
+       'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72, 'tts': 20},
+      {'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
+       'tid': 53, 'ph': 'S', 'id': 72, 'tts': 17}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    t = m.GetAllProcesses()[0].threads[53]
+    self.assertEqual(1, len(t.async_slices))
+    parent_slice = t.async_slices[0]
+    self.assertEqual('a', parent_slice.name)
+    self.assertEqual('foo', parent_slice.category)
+    self.assertEqual(0, parent_slice.start)
+    self.assertAlmostEqual(17/1000.0, parent_slice.thread_start)
+    self.assertAlmostEqual(25/1000.0, parent_slice.thread_end)
+
+    self.assertEqual(2, len(parent_slice.sub_slices))
+    sub_slice = parent_slice.sub_slices[0]
+    self.assertEqual('a', sub_slice.name)
+    self.assertEqual('foo', sub_slice.category)
+    self.assertAlmostEqual(0, sub_slice.start)
+    self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.duration)
+    self.assertAlmostEqual((20 - 17) / 1000.0, sub_slice.thread_duration)
+    self.assertEqual(1, sub_slice.args['x'])
+
+    sub_slice = parent_slice.sub_slices[1]
+    self.assertEqual('a:s1', sub_slice.name)
+    self.assertEqual('foo', sub_slice.category)
+    self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.start)
+    self.assertAlmostEqual((560 - 548) / 1000.0, sub_slice.duration)
+    self.assertAlmostEqual((25 - 20) / 1000.0, sub_slice.thread_duration)
+    self.assertEqual(2, sub_slice.args['y'])
+    self.assertEqual(3, sub_slice.args['z'])
+
+  def testAsyncStepsMissingStart(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53, 'ph': 'F', 'id': 72},
+      {'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
+       'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    t = m.GetAllProcesses()[0].threads[53]
+    self.assertTrue(t is not None)
+
+  def testAsyncStepsMissingFinish(self):
+    events = [
+      # Time is intentionally out of order.
+      {'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
+       'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72},
+      {'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
+       'tid': 53, 'ph': 'S', 'id': 72}
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    t = m.GetAllProcesses()[0].threads[53]
+    self.assertTrue(t is not None)
+
+  def testImportSamples(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
+       'tid': 53, 'ph': 'P'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
+       'tid': 53, 'ph': 'P'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 558, 'cat': 'test',
+       'tid': 53, 'ph': 'P'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[53]
+    self.assertEqual(3, len(t.samples))
+    self.assertEqual(0.0, t.samples[0].start)
+    self.assertEqual(0.0, t.samples[1].start)
+    self.assertAlmostEqual(0.01, t.samples[2].start)
+    self.assertEqual('a', t.samples[0].name)
+    self.assertEqual('b', t.samples[1].name)
+    self.assertEqual('c', t.samples[2].name)
+    self.assertEqual(0, len(m.import_errors))
+
+  def testImportSamplesMissingArgs(self):
+    events = [
+      {'name': 'a', 'pid': 52, 'ts': 548, 'cat': 'test',
+       'tid': 53, 'ph': 'P'},
+      {'name': 'b', 'pid': 52, 'ts': 548, 'cat': 'test',
+       'tid': 53, 'ph': 'P'},
+      {'name': 'c', 'pid': 52, 'ts': 549, 'cat': 'test',
+       'tid': 53, 'ph': 'P'}
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[53]
+    self.assertEqual(3, len(t.samples))
+    self.assertEqual(0, len(m.import_errors))
+
+  def testImportCompleteEvent(self):
+    events = [
+      {'name': 'a', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 538, 'dur': 1,
+       'tdur': 1, 'cat': 'baz', 'tid': 53, 'ph': 'X'},
+      {'name': 'b', 'args': {}, 'pid': 52, 'ts': 730, 'tts': 620, 'dur': 20,
+       'tdur': 14, 'cat': 'foo', 'tid': 53, 'ph': 'X'},
+      {'name': 'c', 'args': {}, 'pid': 52, 'ts': 740, 'tts': 625, 'cat': 'baz',
+       'tid': 53, 'ph': 'X'},
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[53]
+    self.assertEqual(3, len(t.all_slices))
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertAlmostEqual(0.0, slice_event.start)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(538 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual(1 / 1000.0, slice_event.thread_duration)
+    self.assertFalse(slice_event.did_not_finish)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+    slice_event = t.all_slices[1]
+    self.assertEqual('b', slice_event.name)
+    self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
+    self.assertAlmostEqual(20 / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(620 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual(14 / 1000.0, slice_event.thread_duration)
+    self.assertFalse(slice_event.did_not_finish)
+    self.assertEqual(1, len(slice_event.sub_slices))
+    self.assertEqual(t.all_slices[2], slice_event.sub_slices[0])
+
+    slice_event = t.all_slices[2]
+    self.assertEqual('c', slice_event.name)
+    self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
+    self.assertAlmostEqual(10 / 1000.0, slice_event.duration)
+    self.assertAlmostEqual(625 / 1000.0, slice_event.thread_start)
+    self.assertAlmostEqual(9 / 1000.0, slice_event.thread_duration)
+    self.assertTrue(slice_event.did_not_finish)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+  def testImportMarkEvent(self):
+    events = [
+      {'name': 'a', 'pid': 52, 'ts': 629, 'cat': 'baz', 'tid': 53, 'ph': 'R'},
+      {'name': 'b', 'pid': 52, 'ts': 730, 'cat': 'foo', 'tid': 53, 'ph': 'R'},
+      {'name': 'c', 'pid': 52, 'ts': 740, 'cat': 'baz', 'tid': 53, 'ph': 'R'},
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[53]
+    self.assertEqual(3, len(t.all_slices))
+
+    slice_event = t.all_slices[0]
+    self.assertEqual('a', slice_event.name)
+    self.assertEqual('baz', slice_event.category)
+    self.assertAlmostEqual(0.0, slice_event.start)
+    self.assertFalse(slice_event.did_not_finish)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+    slice_event = t.all_slices[1]
+    self.assertEqual('b', slice_event.name)
+    self.assertEqual('foo', slice_event.category)
+    self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
+    self.assertFalse(slice_event.did_not_finish)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+    slice_event = t.all_slices[2]
+    self.assertEqual('c', slice_event.name)
+    self.assertEqual('baz', slice_event.category)
+    self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
+    self.assertFalse(slice_event.did_not_finish)
+    self.assertEqual(0, len(slice_event.sub_slices))
+
+  def testImportFlowEvent(self):
+    events = [
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
+       'ph': 's', 'args': {}},
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
+       'ph': 't', 'args': {}},
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
+       'ph': 'f', 'args': {}},
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    p = m.GetAllProcesses()[0]
+    t = p.threads[53]
+    self.assertTrue(t is not None)
+    self.assertEqual(2, len(m.flow_events))
+
+    start = m.flow_events[0][0]
+    step = m.flow_events[0][1]
+    finish = m.flow_events[1][1]
+
+    self.assertEqual('a', start.name)
+    self.assertEqual('foo', start.category)
+    self.assertEqual(72, start.event_id)
+    self.assertEqual(0, start.start)
+    self.assertEqual(0, start.duration)
+
+    self.assertEqual(start.name, step.name)
+    self.assertEqual(start.category, step.category)
+    self.assertEqual(start.event_id, step.event_id)
+    self.assertAlmostEqual(12 / 1000.0, step.start)
+    self.assertEquals(0, step.duration)
+
+    self.assertEqual(start.name, finish.name)
+    self.assertEqual(start.category, finish.category)
+    self.assertEqual(start.event_id, finish.event_id)
+    self.assertAlmostEqual((20 + 12) / 1000.0, finish.start)
+    self.assertEqual(0, finish.duration)
+
+  def testImportOutOfOrderFlowEvent(self):
+    events = [
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
+       'ph': 's', 'args': {}},
+      {'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 148,
+       'ph': 's', 'args': {}},
+      {'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
+       'ph': 'f', 'args': {}},
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
+       'ph': 't', 'args': {}},
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
+       'ph': 'f', 'args': {}},
+    ]
+
+    expected = [[0.4, 0.412], [0.0, 0.422], [0.412, 0.432]]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    self.assertEqual(3, len(m.flow_events))
+
+    for i in range(len(expected)):
+      self.assertAlmostEqual(expected[i][0], m.flow_events[i][0].start)
+      self.assertAlmostEqual(expected[i][1], m.flow_events[i][1].start)
+
+  def testImportErrornousFlowEvent(self):
+    events = [
+      {'name': 'a', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 548,
+       'ph': 's', 'args': {}},
+      {'name': 'a2', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 550,
+       'ph': 's', 'args': {}},
+      {'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
+       'ph': 'f', 'args': {}},
+      {'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
+       'ph': 't', 'args': {}},
+    ]
+
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    self.assertEqual(0, len(m.flow_events))
+
+  def testImportMemoryDumpEvents(self):
+    events = [
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 123,
+       'id': '1234ABCD'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 134,
+       'id': '1234ABCD'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 144,
+       'id': '1234ABCD'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 245,
+       'id': '1234ABDF'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 256,
+       'id': '1234ABDF'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 233,
+       'id': '1234ABDF'},
+    ]
+
+    expected_processes = set([52, 54])
+    expected_results = [['1234ABCD', 0, 21], ['1234ABDF', 110, 23]]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    assert set(p.pid for p in m.GetAllProcesses()) == expected_processes
+
+    memory_dumps = list(m.IterGlobalMemoryDumps())
+    self.assertEqual(len(expected_results), len(memory_dumps))
+    for memory_dump, test_values in zip(memory_dumps, expected_results):
+      assert len(list(memory_dump.IterProcessMemoryDumps())) == len(
+          expected_processes)
+      dump_id, start, duration = test_values
+      self.assertEquals(dump_id, memory_dump.dump_id)
+      self.assertAlmostEqual(start / 1000.0, memory_dump.start)
+      self.assertAlmostEqual(duration / 1000.0, memory_dump.duration)
+
+  def testImportOutOfOrderMemoryDumpEvents(self):
+    events = [
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 245,
+       'id': '1234ABDF'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 134,
+       'id': '1234ABCD'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 256,
+       'id': '1234ABDF'},
+      {'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 123,
+       'id': '1234ABCD'},
+    ]
+
+    expected = [['1234ABCD', 0, 11], ['1234ABDF', 122, 11]]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    memory_dumps = list(m.IterGlobalMemoryDumps())
+    self.assertEqual(len(expected), len(memory_dumps))
+    for memory_dump, test_values in zip(memory_dumps, expected):
+      dump_id, start, duration = test_values
+      self.assertEquals(dump_id, memory_dump.dump_id)
+      self.assertAlmostEqual(start / 1000.0, memory_dump.start)
+      self.assertAlmostEqual(duration / 1000.0, memory_dump.duration)
+
+  def testMetadataImport(self):
+    events = [
+      {'cat': '__metadata', 'pid': 14689, 'tid': 14740, 'ts': 245,
+       'ph': 'M', 'name': 'process_name', 'args': {'name': 'Browser'}},
+      {'cat': '__metadata', 'pid': 23828, 'tid': 23828, 'ts': 0,
+       'ph': 'M', 'name': 'process_labels',
+       'args': {'labels': 'huge image - Google Search'}}
+    ]
+
+    expected = [
+      [None, 'Browser'],
+      ['huge image - Google Search', 'process 23828']
+    ]
+    trace_data = trace_data_module.TraceData(events)
+    m = timeline_model.TimelineModel(trace_data)
+    processes = m.GetAllProcesses()
+
+    self.assertEqual(len(processes), len(expected))
+    for process, test_values in zip(processes, expected):
+      process_labels, process_name = test_values
+      self.assertEquals(process_labels, process.labels)
+      self.assertEquals(process_name, process.name)
diff --git a/catapult/telemetry/telemetry/timeline/tracing_category_filter.py b/catapult/telemetry/telemetry/timeline/tracing_category_filter.py
new file mode 100644
index 0000000..c417d13
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tracing_category_filter.py
@@ -0,0 +1,202 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+
+def CreateNoOverheadFilter():
+  """Returns a filter with the least overhead possible.
+
+  This contains no sub-traces of thread tasks, so it's only useful for
+  capturing the cpu-time spent on threads (as well as needed benchmark
+  traces).
+
+  FIXME: Remove webkit.console when blink.console lands in chromium and
+  the ref builds are updated. crbug.com/386847
+  """
+  categories = [
+    "toplevel",
+    "benchmark",
+    "webkit.console",
+    "blink.console",
+    "trace_event_overhead"
+  ]
+  return TracingCategoryFilter(filter_string=','.join(categories))
+
+
+def CreateMinimalOverheadFilter():
+  """Returns a filter with the best-effort amount of overhead."""
+  return TracingCategoryFilter(filter_string='')
+
+
+def CreateDebugOverheadFilter():
+  """Returns a filter with as many traces enabled as is useful."""
+  return TracingCategoryFilter(filter_string='*,disabled-by-default-cc.debug')
+
+
+_delay_re = re.compile(r'DELAY[(][A-Za-z0-9._;]+[)]')
+
+
+class TracingCategoryFilter(object):
+  """A set of included and excluded categories that should be traced.
+
+  The TraceCategoryFilter allows fine tuning of what data is traced. Basic
+  choice of which tracers to use is done by TracingOptions.
+
+  Providing filter_string=None gives the default category filter, which leaves
+  what to trace up to the individual trace systems.
+  """
+  def __init__(self, filter_string=None):
+    self._included_categories = set()
+    self._excluded_categories = set()
+    self._disabled_by_default_categories = set()
+    self._synthetic_delays = set()
+    self.contains_wildcards = False
+    self.AddFilterString(filter_string)
+
+  def AddFilterString(self, filter_string):
+    if filter_string == None:
+      return
+
+    if '*' in filter_string or '?' in filter_string:
+      self.contains_wildcards = True
+
+    filter_set = set([cf.strip() for cf in filter_string.split(',')])
+    for category in filter_set:
+      if category == '':
+        continue
+
+      if _delay_re.match(category):
+        self._synthetic_delays.add(category)
+        continue
+
+      if category[0] == '-':
+        assert not category[1:] in self._included_categories
+        self._excluded_categories.add(category[1:])
+        continue
+
+      if category.startswith('disabled-by-default-'):
+        self._disabled_by_default_categories.add(category)
+        continue
+
+      assert not category in self._excluded_categories
+      self._included_categories.add(category)
+
+  @property
+  def included_categories(self):
+    return self._included_categories
+
+  @property
+  def excluded_categories(self):
+    return self._excluded_categories
+
+  @property
+  def disabled_by_default_categories(self):
+    return self._disabled_by_default_categories
+
+  @property
+  def synthetic_delays(self):
+    return self._synthetic_delays
+
+  @property
+  def filter_string(self):
+    return self._GetFilterString(stable_output=False)
+
+  @property
+  def stable_filter_string(self):
+    return self._GetFilterString(stable_output=True)
+
+  def _GetFilterString(self, stable_output):
+    # Note: This outputs fields in an order that intentionally matches
+    # trace_event_impl's CategoryFilter string order.
+    lists = []
+    lists.append(self._included_categories)
+    lists.append(self._disabled_by_default_categories)
+    lists.append(['-%s' % x for x in self._excluded_categories])
+    lists.append(self._synthetic_delays)
+    categories = []
+    for l in lists:
+      if stable_output:
+        l = list(l)
+        l.sort()
+      categories.extend(l)
+    return ','.join(categories)
+
+  def GetDictForChromeTracing(self):
+    INCLUDED_CATEGORIES_PARAM = 'included_categories'
+    EXCLUDED_CATEGORIES_PARAM = 'excluded_categories'
+    SYNTHETIC_DELAYS_PARAM = 'synthetic_delays'
+
+    result = {}
+    if self._included_categories or self._disabled_by_default_categories:
+      result[INCLUDED_CATEGORIES_PARAM] = list(
+        self._included_categories | self._disabled_by_default_categories)
+    if self._excluded_categories:
+      result[EXCLUDED_CATEGORIES_PARAM] = list(self._excluded_categories)
+    if self._synthetic_delays:
+      result[SYNTHETIC_DELAYS_PARAM] = list(self._synthetic_delays)
+    return result
+
+  def AddDisabledByDefault(self, category):
+    assert category.startswith('disabled-by-default-')
+    self._disabled_by_default_categories.add(category)
+
+  def AddIncludedCategory(self, category_glob):
+    """Explicitly enables anything matching category_glob."""
+    assert not category_glob.startswith('disabled-by-default-')
+    assert not category_glob in self._excluded_categories
+    self._included_categories.add(category_glob)
+
+  def AddExcludedCategory(self, category_glob):
+    """Explicitly disables anything matching category_glob."""
+    assert not category_glob.startswith('disabled-by-default-')
+    assert not category_glob in self._included_categories
+    self._excluded_categories.add(category_glob)
+
+  def AddSyntheticDelay(self, delay):
+    assert _delay_re.match(delay)
+    self._synthetic_delays.add(delay)
+
+  def IsSubset(self, other):
+    """ Determine if filter A (self) is a subset of filter B (other).
+        Returns True if A is a subset of B, False if A is not a subset of B,
+        and None if we can't tell for sure.
+    """
+    # We don't handle filters with wildcards in this test.
+    if self.contains_wildcards or other.contains_wildcards:
+      return None
+
+    # Disabled categories get into a trace if and only if they are contained in
+    # the 'disabled' set. Return False if A's disabled set is not a subset of
+    # B's disabled set.
+    if not self.disabled_by_default_categories <= \
+       other.disabled_by_default_categories:
+      return False
+
+    # If A defines more or different synthetic delays than B, then A is not a
+    # subset.
+    if not self.synthetic_delays <= other.synthetic_delays:
+      return False
+
+    if self.included_categories and other.included_categories:
+      # A and B have explicit include lists. If A includes something that B
+      # doesn't, return False.
+      if not self.included_categories <= other.included_categories:
+        return False
+    elif self.included_categories:
+      # Only A has an explicit include list. If A includes something that B
+      # excludes, return False.
+      if self.included_categories.intersection(other.excluded_categories):
+        return False
+    elif other.included_categories:
+      # Only B has an explicit include list. We don't know which categories are
+      # contained in the default list, so return None.
+      return None
+    else:
+      # None of the filter have explicit include list. If B excludes categories
+      # that A doesn't exclude, return False.
+      if not other.excluded_categories <= self.excluded_categories:
+        return False
+
+    return True
diff --git a/catapult/telemetry/telemetry/timeline/tracing_category_filter_unittest.py b/catapult/telemetry/telemetry/timeline/tracing_category_filter_unittest.py
new file mode 100644
index 0000000..5569afa
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tracing_category_filter_unittest.py
@@ -0,0 +1,157 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import tracing_category_filter
+
+
+class TracingCategoryFilterTests(unittest.TestCase):
+  def CheckCategoryFilters(self, cf):
+    self.assertEquals(set(['x']), set(cf.included_categories))
+    self.assertEquals(set(['y']), set(cf.excluded_categories))
+    self.assertEquals(set(['disabled-by-default-z']),
+        set(cf.disabled_by_default_categories))
+    self.assertEquals(set(['DELAY(7;foo)']), set(cf.synthetic_delays))
+
+    self.assertTrue('x' in cf.filter_string)
+    self.assertEquals(
+        'x,disabled-by-default-z,-y,DELAY(7;foo)',
+        cf.stable_filter_string)
+
+  def testBasic(self):
+    cf = tracing_category_filter.TracingCategoryFilter(
+        'x,-y,disabled-by-default-z,DELAY(7;foo)')
+    self.CheckCategoryFilters(cf)
+
+  def testBasicWithSpace(self):
+    cf = tracing_category_filter.TracingCategoryFilter(
+        ' x ,\n-y\t,disabled-by-default-z ,DELAY(7;foo)')
+    self.CheckCategoryFilters(cf)
+
+
+class CategoryFilterTest(unittest.TestCase):
+  def testAddIncludedCategory(self):
+    a = tracing_category_filter.TracingCategoryFilter()
+    a.AddIncludedCategory('foo')
+    a.AddIncludedCategory('bar')
+    a.AddIncludedCategory('foo')
+    self.assertEquals(a.stable_filter_string, 'bar,foo')
+
+  def testAddExcludedCategory(self):
+    a = tracing_category_filter.TracingCategoryFilter()
+    a.AddExcludedCategory('foo')
+    a.AddExcludedCategory('bar')
+    a.AddExcludedCategory('foo')
+    self.assertEquals(a.stable_filter_string, '-bar,-foo')
+
+  def testIncludeAndExcludeCategoryRaisesAssertion(self):
+    a = tracing_category_filter.TracingCategoryFilter()
+    a.AddIncludedCategory('foo')
+    self.assertRaises(AssertionError, a.AddExcludedCategory, 'foo')
+
+    a = tracing_category_filter.TracingCategoryFilter()
+    a.AddExcludedCategory('foo')
+    self.assertRaises(AssertionError, a.AddIncludedCategory, 'foo')
+
+    self.assertRaises(AssertionError,
+                      tracing_category_filter.TracingCategoryFilter, 'foo,-foo')
+
+    self.assertRaises(AssertionError,
+                      tracing_category_filter.TracingCategoryFilter, '-foo,foo')
+
+
+  def testIsSubset(self):
+    b = tracing_category_filter.TracingCategoryFilter()
+    a = tracing_category_filter.TracingCategoryFilter()
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter()
+    a = tracing_category_filter.TracingCategoryFilter("test1,test2")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter()
+    a = tracing_category_filter.TracingCategoryFilter("-test1,-test2")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter("test1,test2")
+    a = tracing_category_filter.TracingCategoryFilter()
+    self.assertEquals(a.IsSubset(b), None)
+
+    b = tracing_category_filter.TracingCategoryFilter()
+    a = tracing_category_filter.TracingCategoryFilter("test*")
+    self.assertEquals(a.IsSubset(b), None)
+
+    b = tracing_category_filter.TracingCategoryFilter("test?")
+    a = tracing_category_filter.TracingCategoryFilter()
+    self.assertEquals(a.IsSubset(b), None)
+
+    b = tracing_category_filter.TracingCategoryFilter("test1")
+    a = tracing_category_filter.TracingCategoryFilter("test1,test2")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter("-test1")
+    a = tracing_category_filter.TracingCategoryFilter("test1")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter("test1,test2")
+    a = tracing_category_filter.TracingCategoryFilter("test2,test1")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter("-test1,-test2")
+    a = tracing_category_filter.TracingCategoryFilter("-test2")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "disabled-by-default-test1")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "disabled-by-default-test1,disabled-by-default-test2")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "disabled-by-default-test1")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "disabled-by-default-test2")
+    self.assertEquals(a.IsSubset(b), False)
+
+  def testIsSubsetWithSyntheticDelays(self):
+    b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
+    a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
+    a = tracing_category_filter.TracingCategoryFilter()
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter()
+    a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
+    a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.032)")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.016;static)")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.016;oneshot)")
+    self.assertEquals(a.IsSubset(b), False)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.016),DELAY(bar;0.1)")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(bar;0.1),DELAY(foo;0.016)")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.016),DELAY(bar;0.1)")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(bar;0.1)")
+    self.assertEquals(a.IsSubset(b), True)
+
+    b = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.016),DELAY(bar;0.1)")
+    a = tracing_category_filter.TracingCategoryFilter(
+        "DELAY(foo;0.032),DELAY(bar;0.1)")
+    self.assertEquals(a.IsSubset(b), False)
diff --git a/catapult/telemetry/telemetry/timeline/tracing_config.py b/catapult/telemetry/telemetry/timeline/tracing_config.py
new file mode 100644
index 0000000..365f71d
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tracing_config.py
@@ -0,0 +1,181 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+from telemetry.timeline import tracing_category_filter
+
+ECHO_TO_CONSOLE = 'trace-to-console'
+ENABLE_SYSTRACE = 'enable-systrace'
+RECORD_AS_MUCH_AS_POSSIBLE = 'record-as-much-as-possible'
+RECORD_CONTINUOUSLY = 'record-continuously'
+RECORD_UNTIL_FULL = 'record-until-full'
+
+# Map telemetry's tracing record_mode to the DevTools API string.
+# (The keys happen to be the same as the values.)
+RECORD_MODE_MAP = {
+  RECORD_UNTIL_FULL: 'record-until-full',
+  RECORD_CONTINUOUSLY: 'record-continuously',
+  RECORD_AS_MUCH_AS_POSSIBLE: 'record-as-much-as-possible',
+  ECHO_TO_CONSOLE: 'trace-to-console'
+}
+
+
+class MemoryDumpConfig(object):
+  """Stores the triggers for memory dumps in tracing config."""
+  def __init__(self):
+    self._triggers = []
+
+  def AddTrigger(self, mode, periodic_interval_ms):
+    """Adds a new trigger to config.
+
+    Args:
+      periodic_interval_ms: Dump time period in milliseconds.
+      level_of_detail: Memory dump level of detail string.
+          Valid arguments are "light" and "detailed".
+    """
+    assert mode in ['light', 'detailed']
+    assert periodic_interval_ms > 0
+    self._triggers.append({'mode': mode,
+                           'periodic_interval_ms': periodic_interval_ms})
+
+  def GetDictForChromeTracing(self):
+    """Returns the dump config as dictionary for chrome tracing."""
+    # An empty trigger list would mean no periodic memory dumps.
+    return {'memory_dump_config': {'triggers': self._triggers}}
+
+
+class TracingConfig(object):
+  """Tracing config is the configuration for Chrome tracing.
+
+  This produces the trace config JSON string for Chrome tracing. For the details
+  about the JSON string format, see base/trace_event/trace_config.h.
+
+  Contains tracing options:
+  Tracing options control which core tracing systems should be enabled.
+
+  This simply turns on those systems. If those systems have additional options,
+  e.g. what to trace, then they are typically configured by adding
+  categories to the TracingCategoryFilter.
+
+  Options:
+      enable_chrome_trace: a boolean that specifies whether to enable
+          chrome tracing.
+      enable_platform_display_trace: a boolean that specifies whether to
+          platform display tracing.
+      enable_android_graphics_memtrack: a boolean that specifies whether
+          to enable the memtrack_helper daemon to track graphics memory on
+          Android (see goo.gl/4Y30p9). Doesn't have any effects on other OSs.
+
+      The following ones are specific to chrome tracing. See
+      base/trace_event/trace_config.h for more information.
+          record_mode: can be any mode in RECORD_MODE_MAP. This corresponds to
+                       record modes in chrome.
+          enable_systrace: a boolean that specifies whether to enable systrace.
+
+  """
+
+  def __init__(self):
+    # Trace options.
+    self.enable_chrome_trace = False
+    self.enable_platform_display_trace = False
+    self.enable_android_graphics_memtrack = False
+    self._record_mode = RECORD_AS_MUCH_AS_POSSIBLE
+    self._enable_systrace = False
+    # Tracing category filter.
+    self._tracing_category_filter = (
+        tracing_category_filter.TracingCategoryFilter())
+    self._memory_dump_config = None
+
+  @property
+  def tracing_category_filter(self):
+    return self._tracing_category_filter
+
+  def GetChromeTraceConfigJsonString(self):
+    result = {}
+    result.update(self.GetDictForChromeTracing())
+    result.update(self._tracing_category_filter.GetDictForChromeTracing())
+    if self._memory_dump_config:
+      result.update(self._memory_dump_config.GetDictForChromeTracing())
+    return json.dumps(result, sort_keys=True)
+
+  def SetNoOverheadFilter(self):
+    """Sets a filter with the least overhead possible.
+
+    This contains no sub-traces of thread tasks, so it's only useful for
+    capturing the cpu-time spent on threads (as well as needed benchmark
+    traces).
+
+    FIXME: Remove webkit.console when blink.console lands in chromium and
+    the ref builds are updated. crbug.com/386847
+    """
+    categories = [
+      "toplevel",
+      "benchmark",
+      "webkit.console",
+      "blink.console",
+      "trace_event_overhead"
+    ]
+    self._tracing_category_filter = (
+        tracing_category_filter.TracingCategoryFilter(
+            filter_string=','.join(categories)))
+
+  def SetMinimalOverheadFilter(self):
+    self._tracing_category_filter = (
+        tracing_category_filter.TracingCategoryFilter(filter_string=''))
+
+  def SetDebugOverheadFilter(self):
+    self._tracing_category_filter = (
+        tracing_category_filter.TracingCategoryFilter(
+            filter_string='*,disabled-by-default-cc.debug'))
+
+  def SetTracingCategoryFilter(self, cf):
+    if isinstance(cf, tracing_category_filter.TracingCategoryFilter):
+      self._tracing_category_filter = cf
+    else:
+      raise TypeError(
+          'Must pass SetTracingCategoryFilter a TracingCategoryFilter instance')
+
+  def SetMemoryDumpConfig(self, dump_config):
+    if isinstance(dump_config, MemoryDumpConfig):
+      self._memory_dump_config = dump_config
+    else:
+      raise TypeError(
+          'Must pass SetMemoryDumpConfig a MemoryDumpConfig instance')
+
+  # Trace Options
+  @property
+  def record_mode(self):
+    return self._record_mode
+
+  @record_mode.setter
+  def record_mode(self, value):
+    assert value in RECORD_MODE_MAP
+    self._record_mode = value
+
+  @property
+  def enable_systrace(self):
+    return self._enable_systrace
+
+  @enable_systrace.setter
+  def enable_systrace(self, value):
+    self._enable_systrace = value
+
+  def GetTraceOptionsStringForChromeDevtool(self):
+    """Map Chrome tracing options in Telemetry to the DevTools API string."""
+    result = [RECORD_MODE_MAP[self._record_mode]]
+    if self._enable_systrace:
+      result.append(ENABLE_SYSTRACE)
+    return ','.join(result)
+
+  def GetDictForChromeTracing(self):
+    RECORD_MODE_PARAM = 'record_mode'
+    ENABLE_SYSTRACE_PARAM = 'enable_systrace'
+
+    result = {}
+    result[RECORD_MODE_PARAM] = (
+        RECORD_MODE_MAP[self._record_mode])
+    if self._enable_systrace:
+      result[ENABLE_SYSTRACE_PARAM] = True
+    return result
diff --git a/catapult/telemetry/telemetry/timeline/tracing_config_unittest.py b/catapult/telemetry/telemetry/timeline/tracing_config_unittest.py
new file mode 100644
index 0000000..06578bc
--- /dev/null
+++ b/catapult/telemetry/telemetry/timeline/tracing_config_unittest.py
@@ -0,0 +1,64 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import tracing_category_filter
+from telemetry.timeline import tracing_config
+
+
+class TracingConfigTests(unittest.TestCase):
+  def testDefault(self):
+    config = tracing_config.TracingConfig()
+    config_string = config.GetChromeTraceConfigJsonString()
+    self.assertEquals(
+        '{'
+          '"record_mode": "record-as-much-as-possible"'
+        '}',
+        config_string)
+
+  def testBasic(self):
+    category_filter = tracing_category_filter.TracingCategoryFilter(
+        'x,-y,disabled-by-default-z,DELAY(7;foo)')
+    config = tracing_config.TracingConfig()
+    config.SetTracingCategoryFilter(category_filter)
+    config.enable_systrace = True
+    config.record_mode = tracing_config.RECORD_UNTIL_FULL
+    config_string = config.GetChromeTraceConfigJsonString()
+    self.assertEquals(
+        '{'
+          '"enable_systrace": true, '
+          '"excluded_categories": ["y"], '
+          '"included_categories": ["x", "disabled-by-default-z"], '
+          '"record_mode": "record-until-full", '
+          '"synthetic_delays": ["DELAY(7;foo)"]'
+        '}',
+        config_string)
+
+  def testMemoryDumpConfigFormat(self):
+    config = tracing_config.TracingConfig()
+    dump_config = tracing_config.MemoryDumpConfig()
+    config.SetMemoryDumpConfig(dump_config)
+    self.assertEquals(
+        '{'
+          '"memory_dump_config": {"triggers": []}, '
+          '"record_mode": "record-as-much-as-possible"'
+        '}',
+        config.GetChromeTraceConfigJsonString())
+
+    dump_config.AddTrigger("light", 250)
+    dump_config.AddTrigger("detailed", 2000)
+    config.SetMemoryDumpConfig(dump_config)
+    self.assertEquals(
+        '{'
+          '"memory_dump_config": '
+            '{'
+              '"triggers": ['
+                '{"mode": "light", "periodic_interval_ms": 250}, '
+                '{"mode": "detailed", "periodic_interval_ms": 2000}'
+              ']'
+            '}, '
+          '"record_mode": "record-as-much-as-possible"'
+        '}',
+        config.GetChromeTraceConfigJsonString())
diff --git a/catapult/telemetry/telemetry/util/__init__.py b/catapult/telemetry/telemetry/util/__init__.py
new file mode 100644
index 0000000..08483cc
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A library for bootstrapping Telemetry performance testing."""
diff --git a/catapult/telemetry/telemetry/util/color_histogram.py b/catapult/telemetry/telemetry/util/color_histogram.py
new file mode 100644
index 0000000..76bb4b9
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/color_histogram.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Color Histograms and implementations of functions operating on them."""
+
+from __future__ import division
+
+import collections
+
+from telemetry.internal.util import external_modules
+
+np = external_modules.ImportOptionalModule('numpy')
+
+
+def HistogramDistance(hist1, hist2, default_color=None):
+  """Earth mover's distance.
+  http://en.wikipedia.org/wiki/Earth_mover's_distance"""
+  if len(hist1) != len(hist2):
+    raise ValueError('Trying to compare histograms '
+                     'of different sizes, %s != %s' % (len(hist1), len(hist2)))
+  if len(hist1) == 0:
+    return 0
+
+  sum_func = np.sum if np is not None else sum
+
+  n1 = sum_func(hist1)
+  n2 = sum_func(hist2)
+  if (n1 == 0 or n2 == 0) and default_color is None:
+    raise ValueError('Histogram has no data and no default color.')
+  if n1 == 0:
+    hist1[default_color] = 1
+    n1 = 1
+  if n2 == 0:
+    hist2[default_color] = 1
+    n2 = 1
+
+  if np is not None:
+    remainder = np.multiply(hist1, n2) - np.multiply(hist2, n1)
+    cumsum = np.cumsum(remainder)
+    total = np.sum(np.abs(cumsum))
+  else:
+    total = 0
+    remainder = 0
+    for value1, value2 in zip(hist1, hist2):
+      remainder += value1 * n2 - value2 * n1
+      total += abs(remainder)
+    assert remainder == 0, (
+        '%s pixel(s) left over after computing histogram distance.'
+        % abs(remainder))
+  return abs(float(total) / n1 / n2)
+
+
+class ColorHistogram(
+    collections.namedtuple('ColorHistogram', ['r', 'g', 'b', 'default_color'])):
+  # pylint: disable=no-init
+  # pylint: disable=super-on-old-class
+
+  def __new__(cls, r, g, b, default_color=None):
+    return super(ColorHistogram, cls).__new__(cls, r, g, b, default_color)
+
+  def Distance(self, other):
+    total = 0
+    for i in xrange(3):
+      default_color = self[3][i] if self[3] is not None else None
+      total += HistogramDistance(self[i], other[i], default_color)
+    return total
diff --git a/catapult/telemetry/telemetry/util/color_histogram_unittest.py b/catapult/telemetry/telemetry/util/color_histogram_unittest.py
new file mode 100644
index 0000000..a853d53
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/color_histogram_unittest.py
@@ -0,0 +1,117 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.util import color_histogram
+from telemetry.util import image_util
+from telemetry.util import rgba_color
+
+class HistogramDistanceTest(unittest.TestCase):
+  def testNoData(self):
+    hist1 = []
+    hist2 = []
+    self.assertEqual(color_histogram.HistogramDistance(hist1, hist2), 0)
+
+    hist1 = [0, 0, 0]
+    hist2 = [0, 0, 0]
+    self.assertRaises(
+        ValueError, lambda: color_histogram.HistogramDistance(hist1, hist2))
+
+  def testWrongSizes(self):
+    hist1 = [1]
+    hist2 = [1, 0]
+    self.assertRaises(
+        ValueError, lambda: color_histogram.HistogramDistance(hist1, hist2))
+
+  def testNoDistance(self):
+    hist1 = [2, 4, 1, 8, 0, 0]
+    hist2 = [2, 4, 1, 8, 0, 0]
+    self.assertEqual(color_histogram.HistogramDistance(hist1, hist2), 0)
+
+  def testNormalizeCounts(self):
+    hist1 = [0, 0, 1, 0, 0]
+    hist2 = [0, 0, 0, 0, 7]
+    self.assertEqual(color_histogram.HistogramDistance(hist1, hist2), 2)
+    self.assertEqual(color_histogram.HistogramDistance(hist2, hist1), 2)
+
+  def testDistance(self):
+    hist1 = [2, 0, 1, 3, 4]
+    hist2 = [3, 1, 2, 4, 0]
+    self.assertEqual(color_histogram.HistogramDistance(hist1, hist2), 1)
+    self.assertEqual(color_histogram.HistogramDistance(hist2, hist1), 1)
+
+    hist1 = [0, 1, 3, 1]
+    hist2 = [2, 2, 1, 0]
+    self.assertEqual(color_histogram.HistogramDistance(hist1, hist2), 1.2)
+    self.assertEqual(color_histogram.HistogramDistance(hist2, hist1), 1.2)
+
+
+class HistogramTest(unittest.TestCase):
+  def testHistogram(self):
+    pixels = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3,
+              1, 2, 3, 8, 7, 6, 5, 4, 6, 1, 2, 3,
+              1, 2, 3, 8, 7, 6, 5, 4, 6, 1, 2, 3]
+    bmp = image_util.FromRGBPixels(4, 3, pixels)
+    bmp = image_util.Crop(bmp, 1, 1, 2, 2)
+
+    hist = image_util.GetColorHistogram(bmp)
+    for i in xrange(3):
+      self.assertEquals(sum(hist[i]),
+                        image_util.Width(bmp) * image_util.Height(bmp))
+    self.assertEquals(hist.r[1], 0)
+    self.assertEquals(hist.r[5], 2)
+    self.assertEquals(hist.r[8], 2)
+    self.assertEquals(hist.g[2], 0)
+    self.assertEquals(hist.g[4], 2)
+    self.assertEquals(hist.g[7], 2)
+    self.assertEquals(hist.b[3], 0)
+    self.assertEquals(hist.b[6], 4)
+
+  def testHistogramIgnoreColor(self):
+    pixels = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3,
+              1, 2, 3, 8, 7, 6, 5, 4, 6, 1, 2, 3,
+              1, 2, 3, 8, 7, 6, 5, 4, 6, 1, 2, 3]
+    bmp = image_util.FromRGBPixels(4, 3, pixels)
+
+    hist = image_util.GetColorHistogram(
+        bmp, ignore_color=rgba_color.RgbaColor(1, 2, 3))
+    self.assertEquals(hist.r[1], 0)
+    self.assertEquals(hist.r[5], 2)
+    self.assertEquals(hist.r[8], 2)
+    self.assertEquals(hist.g[2], 0)
+    self.assertEquals(hist.g[4], 2)
+    self.assertEquals(hist.g[7], 2)
+    self.assertEquals(hist.b[3], 0)
+    self.assertEquals(hist.b[6], 4)
+
+  def testHistogramIgnoreColorTolerance(self):
+    pixels = [1, 2, 3, 4, 5, 6,
+              7, 8, 9, 8, 7, 6]
+    bmp = image_util.FromRGBPixels(2, 2, pixels)
+
+    hist = image_util.GetColorHistogram(
+        bmp, ignore_color=rgba_color.RgbaColor(0, 1, 2), tolerance=1)
+    self.assertEquals(hist.r[1], 0)
+    self.assertEquals(hist.r[4], 1)
+    self.assertEquals(hist.r[7], 1)
+    self.assertEquals(hist.r[8], 1)
+    self.assertEquals(hist.g[2], 0)
+    self.assertEquals(hist.g[5], 1)
+    self.assertEquals(hist.g[7], 1)
+    self.assertEquals(hist.g[8], 1)
+    self.assertEquals(hist.b[3], 0)
+    self.assertEquals(hist.b[6], 2)
+    self.assertEquals(hist.b[9], 1)
+
+  def testHistogramDistanceIgnoreColor(self):
+    pixels = [1, 2, 3, 1, 2, 3,
+              1, 2, 3, 1, 2, 3]
+    bmp = image_util.FromRGBPixels(2, 2, pixels)
+
+    hist1 = image_util.GetColorHistogram(
+        bmp, ignore_color=rgba_color.RgbaColor(1, 2, 3))
+    hist2 = image_util.GetColorHistogram(bmp)
+
+    self.assertEquals(hist1.Distance(hist2), 0)
diff --git a/catapult/telemetry/telemetry/util/command_line.py b/catapult/telemetry/telemetry/util/command_line.py
new file mode 100644
index 0000000..faf870f
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/command_line.py
@@ -0,0 +1,39 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+
+from telemetry.internal.util import command_line
+
+
+class ArgParseCommand(command_line.Command):
+  usage = ''
+
+  @classmethod
+  def CreateParser(cls):
+    return argparse.ArgumentParser('%%prog %s %s' % (cls.Name(), cls.usage),
+                                   description=cls.Description())
+
+  @classmethod
+  def AddCommandLineArgs(cls, parser, environment):
+    # pylint: disable=arguments-differ
+    pass
+
+  @classmethod
+  def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
+    # pylint: disable=arguments-differ
+    pass
+
+  def Run(self, options, extra_args=None):
+    # pylint: disable=arguments-differ
+    raise NotImplementedError()
+
+  @classmethod
+  def main(cls, args=None):
+    """Main method to run this command as a standalone script."""
+    parser = cls.CreateParser()
+    cls.AddCommandLineArgs(parser, None)
+    options, extra_args = parser.parse_known_args(args=args)
+    cls.ProcessCommandLineArgs(parser, options, extra_args, None)
+    return min(cls().Run(options, extra_args), 255)
diff --git a/catapult/telemetry/telemetry/util/image_util.py b/catapult/telemetry/telemetry/util/image_util.py
new file mode 100644
index 0000000..809dc57
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/image_util.py
@@ -0,0 +1,121 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides implementations of basic image processing functions.
+
+Implements basic image processing functions, such as reading/writing images,
+cropping, finding the bounding box of a color and diffing images.
+
+When numpy is present, image_util_numpy_impl is used for the implementation of
+this interface. The old bitmap implementation (image_util_bitmap_impl) is used
+as a fallback when numpy is not present."""
+
+import base64
+
+from telemetry.internal.util import external_modules
+
+np = external_modules.ImportOptionalModule('numpy')
+
+if np is None:
+  from telemetry.internal.image_processing import image_util_bitmap_impl
+  impl = image_util_bitmap_impl
+else:
+  from telemetry.internal.image_processing import image_util_numpy_impl
+  impl = image_util_numpy_impl
+
+
+def Channels(image):
+  """Number of color channels in the image."""
+  return impl.Channels(image)
+
+def Width(image):
+  """Width of the image."""
+  return impl.Width(image)
+
+def Height(image):
+  """Height of the image."""
+  return impl.Height(image)
+
+def Pixels(image):
+  """Flat RGB pixel array of the image."""
+  return impl.Pixels(image)
+
+def GetPixelColor(image, x, y):
+  """Returns a RgbaColor for the pixel at (x, y)."""
+  return impl.GetPixelColor(image, x, y)
+
+def WritePngFile(image, path):
+  """Write an image to a PNG file.
+
+  Args:
+    image: an image object.
+    path: The path to the PNG file. Must end in 'png' or an
+          AssertionError will be raised."""
+  assert path.endswith('png')
+  return impl.WritePngFile(image, path)
+
+def FromRGBPixels(width, height, pixels, bpp=3):
+  """Create an image from an array of rgb pixels.
+
+  Ignores alpha channel if present.
+
+  Args:
+    width, height: int, the width and height of the image.
+    pixels: The flat array of pixels in the form of [r,g,b[,a],r,g,b[,a],...]
+    bpp: 3 for RGB, 4 for RGBA."""
+  return impl.FromRGBPixels(width, height, pixels, bpp)
+
+def FromPng(png_data):
+  """Create an image from raw PNG data."""
+  return impl.FromPng(png_data)
+
+def FromPngFile(path):
+  """Create an image from a PNG file.
+
+  Args:
+    path: The path to the PNG file."""
+  return impl.FromPngFile(path)
+
+def FromBase64Png(base64_png):
+  """Create an image from raw PNG data encoded in base64."""
+  return FromPng(base64.b64decode(base64_png))
+
+def AreEqual(image1, image2, tolerance=0, likely_equal=True):
+  """Determines whether two images are identical within a given tolerance.
+  Setting likely_equal to False enables short-circuit equality testing, which
+  is about 2-3x slower for equal images, but can be image height times faster
+  if the images are not equal."""
+  return impl.AreEqual(image1, image2, tolerance, likely_equal)
+
+def Diff(image1, image2):
+  """Returns a new image that represents the difference between this image
+  and another image."""
+  return impl.Diff(image1, image2)
+
+def GetBoundingBox(image, color, tolerance=0):
+  """Finds the minimum box surrounding all occurrences of bgr |color|.
+
+  Ignores the alpha channel.
+
+  Args:
+    color: RbgaColor, bounding box color.
+    tolerance: int, per-channel tolerance for the bounding box color.
+
+  Returns:
+    (top, left, width, height), match_count"""
+  return impl.GetBoundingBox(image, color, tolerance)
+
+def Crop(image, left, top, width, height):
+  """Crops the current image down to the specified box."""
+  return impl.Crop(image, left, top, width, height)
+
+def GetColorHistogram(image, ignore_color=None, tolerance=0):
+  """Computes a histogram of the pixel colors in this image.
+  Args:
+    ignore_color: An RgbaColor to exclude from the bucket counts.
+    tolerance: A tolerance for the ignore_color.
+
+  Returns:
+    A ColorHistogram namedtuple with 256 integers in each field: r, g, and b."""
+  return impl.GetColorHistogram(image, ignore_color, tolerance)
diff --git a/catapult/telemetry/telemetry/util/image_util_unittest.py b/catapult/telemetry/telemetry/util/image_util_unittest.py
new file mode 100644
index 0000000..4cb10e6
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/image_util_unittest.py
@@ -0,0 +1,128 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tempfile
+import unittest
+
+from telemetry.core import util
+from telemetry.util import image_util
+from telemetry.util import rgba_color
+
+# This is a simple base64 encoded 2x2 PNG which contains, in order, a single
+# Red, Yellow, Blue, and Green pixel.
+test_png = """
+ iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
+ JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A
+ T8tgwbJAAAAABJRU5ErkJggg==
+"""
+test_png_path = os.path.join(util.GetUnittestDataDir(), 'test_png.png')
+test_png_2_path = os.path.join(util.GetUnittestDataDir(), 'test_png_2.png')
+
+class ImageUtilTest(unittest.TestCase):
+  def testReadFromBase64Png(self):
+    bmp = image_util.FromBase64Png(test_png)
+
+    self.assertEquals(2, image_util.Width(bmp))
+    self.assertEquals(2, image_util.Height(bmp))
+
+    image_util.GetPixelColor(bmp, 0, 0).AssertIsRGB(255, 0, 0)
+    image_util.GetPixelColor(bmp, 1, 1).AssertIsRGB(0, 255, 0)
+    image_util.GetPixelColor(bmp, 0, 1).AssertIsRGB(0, 0, 255)
+    image_util.GetPixelColor(bmp, 1, 0).AssertIsRGB(255, 255, 0)
+
+  def testReadFromPngFile(self):
+    file_bmp = image_util.FromPngFile(test_png_path)
+
+    self.assertEquals(2, image_util.Width(file_bmp))
+    self.assertEquals(2, image_util.Height(file_bmp))
+
+    image_util.GetPixelColor(file_bmp, 0, 0).AssertIsRGB(255, 0, 0)
+    image_util.GetPixelColor(file_bmp, 1, 1).AssertIsRGB(0, 255, 0)
+    image_util.GetPixelColor(file_bmp, 0, 1).AssertIsRGB(0, 0, 255)
+    image_util.GetPixelColor(file_bmp, 1, 0).AssertIsRGB(255, 255, 0)
+
+  def testWritePngToPngFile(self):
+    orig = image_util.FromPngFile(test_png_path)
+    temp_file = tempfile.NamedTemporaryFile(suffix='.png').name
+    image_util.WritePngFile(orig, temp_file)
+    new_file = image_util.FromPngFile(temp_file)
+    self.assertTrue(image_util.AreEqual(orig, new_file, likely_equal=True))
+
+  def testWritePngWithoutPngSuffixThrows(self):
+    orig = image_util.FromPngFile(test_png_path)
+    temp_file = tempfile.NamedTemporaryFile().name
+    self.assertRaises(AssertionError, image_util.WritePngFile,
+                      orig, temp_file)
+
+  def testWriteCroppedBmpToPngFile(self):
+    pixels = [255, 0, 0, 255, 255, 0, 0, 0, 0,
+              255, 255, 0, 0, 255, 0, 0, 0, 0]
+    orig = image_util.FromRGBPixels(3, 2, pixels)
+    orig = image_util.Crop(orig, 0, 0, 2, 2)
+    temp_file = tempfile.NamedTemporaryFile(suffix='.png').name
+    image_util.WritePngFile(orig, temp_file)
+    new_file = image_util.FromPngFile(temp_file)
+    self.assertTrue(image_util.AreEqual(orig, new_file, likely_equal=True))
+
+  def testIsEqual(self):
+    bmp = image_util.FromBase64Png(test_png)
+    file_bmp = image_util.FromPngFile(test_png_path)
+    self.assertTrue(image_util.AreEqual(bmp, file_bmp, likely_equal=True))
+
+  def testDiff(self):
+    file_bmp = image_util.FromPngFile(test_png_path)
+    file_bmp_2 = image_util.FromPngFile(test_png_2_path)
+
+    diff_bmp = image_util.Diff(file_bmp, file_bmp)
+
+    self.assertEquals(2, image_util.Width(diff_bmp))
+    self.assertEquals(2, image_util.Height(diff_bmp))
+
+    image_util.GetPixelColor(diff_bmp, 0, 0).AssertIsRGB(0, 0, 0)
+    image_util.GetPixelColor(diff_bmp, 1, 1).AssertIsRGB(0, 0, 0)
+    image_util.GetPixelColor(diff_bmp, 0, 1).AssertIsRGB(0, 0, 0)
+    image_util.GetPixelColor(diff_bmp, 1, 0).AssertIsRGB(0, 0, 0)
+
+    diff_bmp = image_util.Diff(file_bmp, file_bmp_2)
+
+    self.assertEquals(3, image_util.Width(diff_bmp))
+    self.assertEquals(3, image_util.Height(diff_bmp))
+
+    image_util.GetPixelColor(diff_bmp, 0, 0).AssertIsRGB(0, 255, 255)
+    image_util.GetPixelColor(diff_bmp, 1, 1).AssertIsRGB(255, 0, 255)
+    image_util.GetPixelColor(diff_bmp, 0, 1).AssertIsRGB(255, 255, 0)
+    image_util.GetPixelColor(diff_bmp, 1, 0).AssertIsRGB(0, 0, 255)
+
+    image_util.GetPixelColor(diff_bmp, 0, 2).AssertIsRGB(255, 255, 255)
+    image_util.GetPixelColor(diff_bmp, 1, 2).AssertIsRGB(255, 255, 255)
+    image_util.GetPixelColor(diff_bmp, 2, 0).AssertIsRGB(255, 255, 255)
+    image_util.GetPixelColor(diff_bmp, 2, 1).AssertIsRGB(255, 255, 255)
+    image_util.GetPixelColor(diff_bmp, 2, 2).AssertIsRGB(255, 255, 255)
+
+  def testGetBoundingBox(self):
+    pixels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+              0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
+              0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+    bmp = image_util.FromRGBPixels(4, 3, pixels)
+    box, count = image_util.GetBoundingBox(bmp, rgba_color.RgbaColor(1, 0, 0))
+    self.assertEquals(box, (1, 1, 2, 1))
+    self.assertEquals(count, 2)
+
+    box, count = image_util.GetBoundingBox(bmp, rgba_color.RgbaColor(0, 1, 0))
+    self.assertEquals(box, None)
+    self.assertEquals(count, 0)
+
+  def testCrop(self):
+    pixels = [0, 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0,
+              0, 1, 0, 1, 1, 0, 2, 1, 0, 3, 1, 0,
+              0, 2, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0]
+    bmp = image_util.FromRGBPixels(4, 3, pixels)
+    bmp = image_util.Crop(bmp, 1, 2, 2, 1)
+
+    self.assertEquals(2, image_util.Width(bmp))
+    self.assertEquals(1, image_util.Height(bmp))
+    image_util.GetPixelColor(bmp, 0, 0).AssertIsRGB(1, 2, 0)
+    image_util.GetPixelColor(bmp, 1, 0).AssertIsRGB(2, 2, 0)
+    self.assertEquals(image_util.Pixels(bmp), bytearray([1, 2, 0, 2, 2, 0]))
diff --git a/catapult/telemetry/telemetry/util/mac/README b/catapult/telemetry/telemetry/util/mac/README
new file mode 100644
index 0000000..0c5538b
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/mac/README
@@ -0,0 +1,2 @@
+This directory contains files needed to run Chrome browser Telemetry tests on
+OSX.
diff --git a/catapult/telemetry/telemetry/util/mac/__init__.py b/catapult/telemetry/telemetry/util/mac/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/mac/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/util/mac/determine_if_keychain_entry_is_decryptable.c b/catapult/telemetry/telemetry/util/mac/determine_if_keychain_entry_is_decryptable.c
new file mode 100644
index 0000000..5facb61
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/mac/determine_if_keychain_entry_is_decryptable.c
@@ -0,0 +1,94 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This program determines whether a specific entry in the default OSX Keychain
+// is decryptable by all applications without a user prompt.
+//
+// This program uses APIs only available on OSX 10.7+.
+//
+// Input format:
+//  determine_if_keychain_entry_is_decryptable [service name] [account name]
+//
+// Return values:
+//   0 - The entry doesn't exist, or the ACLs are correct.
+//   1 - The ACLs are incorrect.
+//   >=2 - Unexpected error.
+//
+// To compile, run: "clang -framework Security -framework CoreFoundation
+//                   -o determine_if_keychain_entry_is_decryptable
+//                   determine_if_keychain_entry_is_decryptable.c"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <Security/Security.h>
+#include <string.h>
+
+int main(int argc, char* argv[]) {
+  // There must be exactly 2 arguments to the program.
+  if (argc != 3)
+    return 2;
+
+  const char* service_name = argv[1];
+  const char* account_name = argv[2];
+  SecKeychainItemRef item;
+  OSStatus status = SecKeychainFindGenericPassword(NULL, strlen(service_name),
+      service_name, strlen(account_name), account_name, NULL, NULL, &item);
+
+  // There is no keychain item.
+  if (status == errSecItemNotFound)
+    return 0;
+
+  // Unexpected error.
+  if (status != errSecSuccess)
+    return 3;
+
+  SecAccessRef access;
+  status = SecKeychainItemCopyAccess(item, &access);
+
+  // Unexpected error.
+  if (status != errSecSuccess) {
+    CFRelease(access);
+    CFRelease(item);
+    return 4;
+  }
+
+  CFArrayRef acl_list =
+      SecAccessCopyMatchingACLList(access, kSecACLAuthorizationDecrypt);
+
+  for (CFIndex i = 0; i < CFArrayGetCount(acl_list); ++i) {
+    SecACLRef acl = (SecACLRef)CFArrayGetValueAtIndex(acl_list, i);
+
+    CFArrayRef application_list;
+    CFStringRef description;
+    SecKeychainPromptSelector prompt_selector;
+    status = SecACLCopyContents(acl, &application_list, &description,
+                                &prompt_selector);
+
+    // Unexpected error.
+    if (status != errSecSuccess) {
+      CFRelease(acl_list);
+      CFRelease(access);
+      CFRelease(item);
+      return 5;
+    }
+
+    // Check whether this acl gives decryption access to all applications.
+    bool found_correct_acl = (application_list == NULL);
+    CFRelease(description);
+    if (application_list)
+      CFRelease(application_list);
+
+    if (found_correct_acl) {
+      CFRelease(acl_list);
+      CFRelease(access);
+      CFRelease(item);
+      return 0;
+    }
+  }
+
+  // No acl was found that gave decryption access to all applications.
+  CFRelease(acl_list);
+  CFRelease(access);
+  CFRelease(item);
+  return 1;
+}
diff --git a/catapult/telemetry/telemetry/util/mac/determine_if_keychain_is_locked.c b/catapult/telemetry/telemetry/util/mac/determine_if_keychain_is_locked.c
new file mode 100644
index 0000000..ddf0aff
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/mac/determine_if_keychain_is_locked.c
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This program determines whether the default OSX Keychain is unlocked without
+// causing a user interaction prompt.
+// Return values:
+//   0 - The default keychain is unlocked.
+//   1 - The default keychain is locked.
+//   2 - Unexpected error.
+//
+// To compile, run: "clang -framework Security
+//                   -o determine_if_keychain_is_locked
+//                   determine_if_keychain_is_locked.c"
+
+#include <Security/Security.h>
+
+int main() {
+  SecKeychainStatus keychain_status;
+  OSStatus os_status = SecKeychainGetStatus(NULL, &keychain_status);
+  if (os_status != errSecSuccess)
+    return 2;
+
+  return (keychain_status & kSecUnlockStateStatus) ? 0 : 1;
+}
diff --git a/catapult/telemetry/telemetry/util/mac/keychain_helper.py b/catapult/telemetry/telemetry/util/mac/keychain_helper.py
new file mode 100644
index 0000000..40219ea
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/mac/keychain_helper.py
@@ -0,0 +1,65 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import subprocess
+
+from telemetry.internal.util import binary_manager
+from telemetry.core import platform
+from telemetry.core import os_version
+
+def _PathForExecutable(executable_name):
+  """Fetches the executable from cloud storage, and returns its path."""
+  arch_name = platform.GetHostPlatform().GetArchName()
+  return binary_manager.FetchPath(executable_name, arch_name, 'mac')
+
+def IsKeychainLocked():
+  """
+  Returns True if the keychain is locked, or if there is an error determining
+  the keychain state.
+  """
+  path = _PathForExecutable('determine_if_keychain_is_locked')
+
+  child = subprocess.Popen(path, stdout=subprocess.PIPE)
+  child.communicate()
+  return child.returncode != 0
+
+def DoesKeychainHaveTimeout():
+  """
+  Returns True if the keychain will lock itself have a period of time.
+
+  This method will trigger a blocking, modal dialog if the keychain is
+  locked.
+  """
+  command = ("/usr/bin/security", "show-keychain-info")
+  child = subprocess.Popen(command, stderr=subprocess.PIPE)
+  stderr = child.communicate()[1]
+  return "no-timeout" not in stderr
+
+def _IsKeychainConfiguredForBots(service_name, account_name):
+  """
+  Returns True if the keychain entry associated with |service_name| and
+  |account_name| is correctly configured for running telemetry tests on bots.
+
+  This method will trigger a blocking, modal dialog if the keychain is
+  locked.
+  """
+  # The executable requires OSX 10.7+ APIs.
+  if (platform.GetHostPlatform().GetOSVersionName() <
+      os_version.LION):
+    return False
+
+  path = _PathForExecutable('determine_if_keychain_entry_is_decryptable')
+
+  command = (path, service_name, account_name)
+  child = subprocess.Popen(command)
+  child.communicate()
+  return child.returncode == 0
+
+def IsKeychainConfiguredForBotsWithChrome():
+  return _IsKeychainConfiguredForBots("Chrome Safe Storage",
+      "Chrome")
+
+def IsKeychainConfiguredForBotsWithChromium():
+  return _IsKeychainConfiguredForBots("Chromium Safe Storage",
+      "Chromium")
diff --git a/catapult/telemetry/telemetry/util/matching.py b/catapult/telemetry/telemetry/util/matching.py
new file mode 100644
index 0000000..1ecd182
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/matching.py
@@ -0,0 +1,27 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import difflib
+
+
+def GetMostLikelyMatchedObject(objects, target_name,
+                               name_func=lambda x: x,
+                               matched_score_threshold=0.4):
+  """Matches objects whose names are most likely matched with target.
+
+  Args:
+    objects: list of objects to match.
+    target_name: name to match.
+    name_func: function to get object name to match. Default bypass.
+    matched_score_threshold: threshold of likelihood to match.
+
+  Returns:
+    A list of objects whose names are likely target_name.
+  """
+  def MatchScore(obj):
+    return difflib.SequenceMatcher(
+        isjunk=None, a=name_func(obj), b=target_name).ratio()
+  object_score = [(o, MatchScore(o)) for o in objects]
+  result = [x for x in object_score if x[1] > matched_score_threshold]
+  return [x[0] for x in sorted(result, key=lambda r: r[1], reverse=True)]
diff --git a/catapult/telemetry/telemetry/util/matching_unittest.py b/catapult/telemetry/telemetry/util/matching_unittest.py
new file mode 100644
index 0000000..a43c044
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/matching_unittest.py
@@ -0,0 +1,47 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.util import matching
+
+
+class BenchmarkFoo(object):
+  """ Benchmark Foo for testing."""
+  @classmethod
+  def Name(cls):
+    return 'FooBenchmark'
+
+
+class BenchmarkBar(object):
+  """ Benchmark Bar for testing long description line."""
+  @classmethod
+  def Name(cls):
+    return 'BarBenchmarkkkkk'
+
+
+class UnusualBenchmark(object):
+  @classmethod
+  def Name(cls):
+    return 'I have a very unusual name'
+
+
+class CommandLineUnittest(unittest.TestCase):
+  def testGetMostLikelyMatchedObject(self):
+    # Test moved from telemetry/benchmark_runner_unittest.py
+    all_benchmarks = [BenchmarkFoo, BenchmarkBar, UnusualBenchmark]
+    self.assertEquals(
+        [BenchmarkFoo, BenchmarkBar],
+        matching.GetMostLikelyMatchedObject(
+            all_benchmarks, 'BenchmarkFooz', name_func=lambda x: x.Name()))
+
+    self.assertEquals(
+        [BenchmarkBar, BenchmarkFoo],
+        matching.GetMostLikelyMatchedObject(
+            all_benchmarks, 'BarBenchmark', name_func=lambda x: x.Name()))
+
+    self.assertEquals(
+        [UnusualBenchmark],
+        matching.GetMostLikelyMatchedObject(
+            all_benchmarks, 'unusual', name_func=lambda x: x.Name()))
diff --git a/catapult/telemetry/telemetry/util/perf_result_data_type.py b/catapult/telemetry/telemetry/util/perf_result_data_type.py
new file mode 100644
index 0000000..dbaf794
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/perf_result_data_type.py
@@ -0,0 +1,20 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+DEFAULT = 'default'
+UNIMPORTANT = 'unimportant'
+HISTOGRAM = 'histogram'
+UNIMPORTANT_HISTOGRAM = 'unimportant-histogram'
+INFORMATIONAL = 'informational'
+
+ALL_TYPES = [DEFAULT, UNIMPORTANT, HISTOGRAM, UNIMPORTANT_HISTOGRAM,
+             INFORMATIONAL]
+
+
+def IsValidType(datatype):
+  return datatype in ALL_TYPES
+
+
+def IsHistogram(datatype):
+  return datatype == HISTOGRAM or datatype == UNIMPORTANT_HISTOGRAM
diff --git a/catapult/telemetry/telemetry/util/perf_tests_helper.py b/catapult/telemetry/telemetry/util/perf_tests_helper.py
new file mode 100644
index 0000000..aaf51be
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/perf_tests_helper.py
@@ -0,0 +1,14 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.util import perf_tests_results_helper
+
+
+FlattenList = \
+    perf_tests_results_helper.FlattenList
+GeomMeanAndStdDevFromHistogram = \
+    perf_tests_results_helper.GeomMeanAndStdDevFromHistogram
+PrintPerfResult = \
+    perf_tests_results_helper.PrintPerfResult
+PrintPages = \
+    perf_tests_results_helper.PrintPages
diff --git a/catapult/telemetry/telemetry/util/perf_tests_results_helper.py b/catapult/telemetry/telemetry/util/perf_tests_results_helper.py
new file mode 100644
index 0000000..72ccfb0
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/perf_tests_results_helper.py
@@ -0,0 +1,165 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import sys
+
+import json
+import math
+
+from telemetry.util import perf_result_data_type
+
+
+# Mapping from result type to test output
+RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
+                perf_result_data_type.DEFAULT: '*RESULT ',
+                perf_result_data_type.INFORMATIONAL: '',
+                perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
+                perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}
+
+
+def _EscapePerfResult(s):
+  """Escapes |s| for use in a perf result."""
+  return re.sub(r'[\:|=/#&,]', '_', s)
+
+
+def FlattenList(values):
+  """Returns a simple list without sub-lists."""
+  ret = []
+  for entry in values:
+    if isinstance(entry, list):
+      ret.extend(FlattenList(entry))
+    else:
+      ret.append(entry)
+  return ret
+
+
+def GeomMeanAndStdDevFromHistogram(histogram_json):
+  histogram = json.loads(histogram_json)
+  # Handle empty histograms gracefully.
+  if not 'buckets' in histogram:
+    return 0.0, 0.0
+  count = 0
+  sum_of_logs = 0
+  for bucket in histogram['buckets']:
+    if 'high' in bucket:
+      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
+    else:
+      bucket['mean'] = bucket['low']
+    if bucket['mean'] > 0:
+      sum_of_logs += math.log(bucket['mean']) * bucket['count']
+      count += bucket['count']
+
+  if count == 0:
+    return 0.0, 0.0
+
+  sum_of_squares = 0
+  geom_mean = math.exp(sum_of_logs / count)
+  for bucket in histogram['buckets']:
+    if bucket['mean'] > 0:
+      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
+  return geom_mean, math.sqrt(sum_of_squares / count)
+
+
+def _ValueToString(v):
+  # Special case for floats so we don't print using scientific notation.
+  if isinstance(v, float):
+    return '%f' % v
+  else:
+    return str(v)
+
+
+def _MeanAndStdDevFromList(values):
+  avg = None
+  sd = None
+  if len(values) > 1:
+    try:
+      value = '[%s]' % ','.join([_ValueToString(v) for v in values])
+      avg = sum([float(v) for v in values]) / len(values)
+      sqdiffs = [(float(v) - avg) ** 2 for v in values]
+      variance = sum(sqdiffs) / (len(values) - 1)
+      sd = math.sqrt(variance)
+    except ValueError:
+      value = ', '.join(values)
+  else:
+    value = values[0]
+  return value, avg, sd
+
+
+def PrintPages(page_list):
+  """Prints list of pages to stdout in the format required by perf tests."""
+  print 'Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list])
+
+
+def PrintPerfResult(measurement, trace, values, units,
+                    result_type=perf_result_data_type.DEFAULT,
+                    print_to_stdout=True):
+  """Prints numerical data to stdout in the format required by perf tests.
+
+  The string args may be empty but they must not contain any colons (:) or
+  equals signs (=).
+  This is parsed by the buildbot using:
+  http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/process_log_utils.py
+
+  Args:
+    measurement: A description of the quantity being measured, e.g. "vm_peak".
+        On the dashboard, this maps to a particular graph. Mandatory.
+    trace: A description of the particular data point, e.g. "reference".
+        On the dashboard, this maps to a particular "line" in the graph.
+        Mandatory.
+    values: A list of numeric measured values. An N-dimensional list will be
+        flattened and treated as a simple list.
+    units: A description of the units of measure, e.g. "bytes".
+    result_type: Accepts values of perf_result_data_type.ALL_TYPES.
+    print_to_stdout: If True, prints the output in stdout instead of returning
+        the output to caller.
+
+    Returns:
+      String of the formated perf result.
+  """
+  assert perf_result_data_type.IsValidType(result_type), \
+         'result type: %s is invalid' % result_type
+
+  trace_name = _EscapePerfResult(trace)
+
+  if (result_type == perf_result_data_type.UNIMPORTANT or
+      result_type == perf_result_data_type.DEFAULT or
+      result_type == perf_result_data_type.INFORMATIONAL):
+    assert isinstance(values, list)
+    assert '/' not in measurement
+    flattened_values = FlattenList(values)
+    assert len(flattened_values)
+    value, avg, sd = _MeanAndStdDevFromList(flattened_values)
+    output = '%s%s: %s%s%s %s' % (
+        RESULT_TYPES[result_type],
+        _EscapePerfResult(measurement),
+        trace_name,
+        # Do not show equal sign if the trace is empty. Usually it happens when
+        # measurement is enough clear to describe the result.
+        '= ' if trace_name else '',
+        value,
+        units)
+  else:
+    assert perf_result_data_type.IsHistogram(result_type)
+    assert isinstance(values, list)
+    # The histograms can only be printed individually, there's no computation
+    # across different histograms.
+    assert len(values) == 1
+    value = values[0]
+    output = '%s%s: %s= %s %s' % (
+        RESULT_TYPES[result_type],
+        _EscapePerfResult(measurement),
+        trace_name,
+        value,
+        units)
+    avg, sd = GeomMeanAndStdDevFromHistogram(value)
+
+  if avg:
+    output += '\nAvg %s: %f%s' % (measurement, avg, units)
+  if sd:
+    output += '\nSd  %s: %f%s' % (measurement, sd, units)
+  if print_to_stdout:
+    print output
+    sys.stdout.flush()
+  return output
diff --git a/catapult/telemetry/telemetry/util/process_statistic_timeline_data.py b/catapult/telemetry/telemetry/util/process_statistic_timeline_data.py
new file mode 100644
index 0000000..589e8d3
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/process_statistic_timeline_data.py
@@ -0,0 +1,58 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ProcessStatisticTimelineData(object):
+  """Holds value of a stat for one or more processes.
+
+  This object can hold a value for more than one pid by adding another
+  object."""
+
+  def __init__(self, pid, value):
+    super(ProcessStatisticTimelineData, self).__init__()
+    assert value >= 0
+    self._value_by_pid = {pid: value}
+
+  def __sub__(self, other):
+    """The results of subtraction is an object holding only the pids contained
+    in |self|.
+
+    The motivation is that some processes may have died between two consecutive
+    measurements. The desired behavior is to only make calculations based on
+    the processes that are alive at the end of the second measurement."""
+    # pylint: disable=protected-access
+    ret = self.__class__(0, 0)
+    my_dict = self._value_by_pid
+
+    ret._value_by_pid = (
+        {k: my_dict[k] - other._value_by_pid.get(k, 0) for
+            k in my_dict.keys()})
+    return ret
+
+  def __add__(self, other):
+    """The result contains pids from both |self| and |other|, if duplicate
+    pids are found between objects, an error will occur. """
+    # pylint: disable=protected-access
+    intersecting_pids = (set(self._value_by_pid.keys()) &
+        set(other._value_by_pid.keys()))
+    assert len(intersecting_pids) == 0
+
+    ret = self.__class__(0, 0)
+    ret._value_by_pid = {}
+    ret._value_by_pid.update(self._value_by_pid)
+    ret._value_by_pid.update(other._value_by_pid)
+    return ret
+
+  @property
+  def value_by_pid(self):
+    return self._value_by_pid
+
+  def total_sum(self):
+    """Returns the sum of all values contained by this object. """
+    return sum(self._value_by_pid.values())
+
+
+class IdleWakeupTimelineData(ProcessStatisticTimelineData):
+  """A ProcessStatisticTimelineData to hold idle wakeups."""
+  pass
diff --git a/catapult/telemetry/telemetry/util/process_statistic_timeline_data_unittest.py b/catapult/telemetry/telemetry/util/process_statistic_timeline_data_unittest.py
new file mode 100644
index 0000000..98f0346
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/process_statistic_timeline_data_unittest.py
@@ -0,0 +1,47 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.util import process_statistic_timeline_data
+
+
+class ProcessStatisticTimelineDataTest(unittest.TestCase):
+
+  def testProcessStatisticValueMath(self):
+    pid1 = 1
+    pid2 = 2
+
+    a = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 5)
+    b = process_statistic_timeline_data.ProcessStatisticTimelineData(pid2, 1)
+    c = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 1)
+
+    # Test addition.
+    addition_result = (a + b).value_by_pid
+    self.assertEquals(5, addition_result[pid1])
+    self.assertEquals(1, addition_result[pid2])
+    self.assertEquals(2, len(addition_result.keys()))
+
+    # Test subtraction.
+    subtraction_result = ((a + b) - c).value_by_pid
+    self.assertEquals(4, subtraction_result[pid1])
+    self.assertEquals(1, subtraction_result[pid2])
+    self.assertEquals(2, len(subtraction_result.keys()))
+
+    # Test subtraction with a pid that exists only in rhs.
+    subtraction_results1 = (a - (b + c)).value_by_pid
+    self.assertEquals(4, subtraction_results1[pid1])
+    self.assertEquals(1, len(subtraction_results1.keys()))
+
+    # Test calculation of total sum.
+    self.assertEquals(6, (a + b).total_sum())
+
+  def testProcessStatisticValueSummary(self):
+    pid1 = 1
+    pid2 = 2
+
+    a = process_statistic_timeline_data.ProcessStatisticTimelineData(pid1, 1)
+    b = process_statistic_timeline_data.ProcessStatisticTimelineData(pid2, 99)
+    c = a + b
+    self.assertEquals(100, c.total_sum())
diff --git a/catapult/telemetry/telemetry/util/rgba_color.py b/catapult/telemetry/telemetry/util/rgba_color.py
new file mode 100644
index 0000000..84e0235
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/rgba_color.py
@@ -0,0 +1,33 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+class RgbaColor(collections.namedtuple('RgbaColor', ['r', 'g', 'b', 'a'])):
+  """Encapsulates an RGBA color retrieved from an image."""
+  def __new__(cls, r, g, b, a=255):
+    return super(RgbaColor, cls).__new__(cls, r, g, b, a)
+
+  def __int__(self):
+    return (self.r << 16) | (self.g << 8) | self.b
+
+  def IsEqual(self, expected_color, tolerance=0):
+    """Verifies that the color is within a given tolerance of
+    the expected color."""
+    r_diff = abs(self.r - expected_color.r)
+    g_diff = abs(self.g - expected_color.g)
+    b_diff = abs(self.b - expected_color.b)
+    a_diff = abs(self.a - expected_color.a)
+    return (r_diff <= tolerance and g_diff <= tolerance
+        and b_diff <= tolerance and a_diff <= tolerance)
+
+  def AssertIsRGB(self, r, g, b, tolerance=0):
+    assert self.IsEqual(RgbaColor(r, g, b), tolerance)
+
+  def AssertIsRGBA(self, r, g, b, a, tolerance=0):
+    assert self.IsEqual(RgbaColor(r, g, b, a), tolerance)
+
+
+WEB_PAGE_TEST_ORANGE = RgbaColor(222, 100, 13)
+WHITE = RgbaColor(255, 255, 255)
diff --git a/catapult/telemetry/telemetry/util/statistics.py b/catapult/telemetry/telemetry/util/statistics.py
new file mode 100644
index 0000000..381c6b4
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/statistics.py
@@ -0,0 +1,346 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A collection of statistical utility functions to be used by metrics."""
+
+import math
+
+
+def Clamp(value, low=0.0, high=1.0):
+  """Clamp a value between some low and high value."""
+  return min(max(value, low), high)
+
+
+def NormalizeSamples(samples):
+  """Sorts the samples, and map them linearly to the range [0,1].
+
+  They're mapped such that for the N samples, the first sample is 0.5/N and the
+  last sample is (N-0.5)/N.
+
+  Background: The discrepancy of the sample set i/(N-1); i=0, ..., N-1 is 2/N,
+  twice the discrepancy of the sample set (i+1/2)/N; i=0, ..., N-1. In our case
+  we don't want to distinguish between these two cases, as our original domain
+  is not bounded (it is for Monte Carlo integration, where discrepancy was
+  first used).
+  """
+  if not samples:
+    return samples, 1.0
+  samples = sorted(samples)
+  low = min(samples)
+  high = max(samples)
+  new_low = 0.5 / len(samples)
+  new_high = (len(samples)-0.5) / len(samples)
+  if high-low == 0.0:
+    return [0.5] * len(samples), 1.0
+  scale = (new_high - new_low) / (high - low)
+  for i in xrange(0, len(samples)):
+    samples[i] = float(samples[i] - low) * scale + new_low
+  return samples, scale
+
+
+def Discrepancy(samples, location_count=None):
+  """Computes the discrepancy of a set of 1D samples from the interval [0,1].
+
+  The samples must be sorted. We define the discrepancy of an empty set
+  of samples to be zero.
+
+  http://en.wikipedia.org/wiki/Low-discrepancy_sequence
+  http://mathworld.wolfram.com/Discrepancy.html
+  """
+  if not samples:
+    return 0.0
+
+  max_local_discrepancy = 0
+  inv_sample_count = 1.0 / len(samples)
+  locations = []
+  # For each location, stores the number of samples less than that location.
+  count_less = []
+  # For each location, stores the number of samples less than or equal to that
+  # location.
+  count_less_equal = []
+
+  if location_count:
+    # Generate list of equally spaced locations.
+    sample_index = 0
+    for i in xrange(0, int(location_count)):
+      location = float(i) / (location_count-1)
+      locations.append(location)
+      while sample_index < len(samples) and samples[sample_index] < location:
+        sample_index += 1
+      count_less.append(sample_index)
+      while  sample_index < len(samples) and samples[sample_index] <= location:
+        sample_index += 1
+      count_less_equal.append(sample_index)
+  else:
+    # Populate locations with sample positions. Append 0 and 1 if necessary.
+    if samples[0] > 0.0:
+      locations.append(0.0)
+      count_less.append(0)
+      count_less_equal.append(0)
+    for i in xrange(0, len(samples)):
+      locations.append(samples[i])
+      count_less.append(i)
+      count_less_equal.append(i+1)
+    if samples[-1] < 1.0:
+      locations.append(1.0)
+      count_less.append(len(samples))
+      count_less_equal.append(len(samples))
+
+  # Compute discrepancy as max(overshoot, -undershoot), where
+  # overshoot = max(count_closed(i, j)/N - length(i, j)) for all i < j,
+  # undershoot = min(count_open(i, j)/N - length(i, j)) for all i < j,
+  # N = len(samples),
+  # count_closed(i, j) is the number of points between i and j including ends,
+  # count_open(i, j) is the number of points between i and j excluding ends,
+  # length(i, j) is locations[i] - locations[j].
+
+  # The following algorithm is modification of Kadane's algorithm,
+  # see https://en.wikipedia.org/wiki/Maximum_subarray_problem.
+
+  # The maximum of (count_closed(k, i-1)/N - length(k, i-1)) for any k < i-1.
+  max_diff = 0
+  # The minimum of (count_open(k, i-1)/N - length(k, i-1)) for any k < i-1.
+  min_diff = 0
+  for i in xrange(1, len(locations)):
+    length = locations[i] - locations[i - 1]
+    count_closed = count_less_equal[i] - count_less[i - 1]
+    count_open = count_less[i] - count_less_equal[i - 1]
+    # Number of points that are added if we extend a closed range that
+    # ends at location (i-1).
+    count_closed_increment = count_less_equal[i] - count_less_equal[i - 1]
+    # Number of points that are added if we extend an open range that
+    # ends at location (i-1).
+    count_open_increment = count_less[i] - count_less[i - 1]
+
+    # Either extend the previous optimal range or start a new one.
+    max_diff = max(
+        float(count_closed_increment) * inv_sample_count - length + max_diff,
+        float(count_closed) * inv_sample_count - length)
+    min_diff = min(
+        float(count_open_increment) * inv_sample_count - length + min_diff,
+        float(count_open) * inv_sample_count - length)
+
+    max_local_discrepancy = max(max_diff, -min_diff, max_local_discrepancy)
+  return max_local_discrepancy
+
+
+def TimestampsDiscrepancy(timestamps, absolute=True,
+                          location_count=None):
+  """A discrepancy based metric for measuring timestamp jank.
+
+  TimestampsDiscrepancy quantifies the largest area of jank observed in a series
+  of timestamps.  Note that this is different from metrics based on the
+  max_time_interval. For example, the time stamp series A = [0,1,2,3,5,6] and
+  B = [0,1,2,3,5,7] have the same max_time_interval = 2, but
+  Discrepancy(B) > Discrepancy(A).
+
+  Two variants of discrepancy can be computed:
+
+  Relative discrepancy is following the original definition of
+  discrepancy. It characterized the largest area of jank, relative to the
+  duration of the entire time stamp series.  We normalize the raw results,
+  because the best case discrepancy for a set of N samples is 1/N (for
+  equally spaced samples), and we want our metric to report 0.0 in that
+  case.
+
+  Absolute discrepancy also characterizes the largest area of jank, but its
+  value wouldn't change (except for imprecisions due to a low
+  |interval_multiplier|) if additional 'good' intervals were added to an
+  exisiting list of time stamps.  Its range is [0,inf] and the unit is
+  milliseconds.
+
+  The time stamp series C = [0,2,3,4] and D = [0,2,3,4,5] have the same
+  absolute discrepancy, but D has lower relative discrepancy than C.
+
+  |timestamps| may be a list of lists S = [S_1, S_2, ..., S_N], where each
+  S_i is a time stamp series. In that case, the discrepancy D(S) is:
+  D(S) = max(D(S_1), D(S_2), ..., D(S_N))
+  """
+  if not timestamps:
+    return 0.0
+
+  if isinstance(timestamps[0], list):
+    range_discrepancies = [TimestampsDiscrepancy(r) for r in timestamps]
+    return max(range_discrepancies)
+
+  samples, sample_scale = NormalizeSamples(timestamps)
+  discrepancy = Discrepancy(samples, location_count)
+  inv_sample_count = 1.0 / len(samples)
+  if absolute:
+    # Compute absolute discrepancy
+    discrepancy /= sample_scale
+  else:
+    # Compute relative discrepancy
+    discrepancy = Clamp((discrepancy-inv_sample_count) / (1.0-inv_sample_count))
+  return discrepancy
+
+
+def DurationsDiscrepancy(durations, absolute=True,
+                         location_count=None):
+  """A discrepancy based metric for measuring duration jank.
+
+  DurationsDiscrepancy computes a jank metric which measures how irregular a
+  given sequence of intervals is. In order to minimize jank, each duration
+  should be equally long. This is similar to how timestamp jank works,
+  and we therefore reuse the timestamp discrepancy function above to compute a
+  similar duration discrepancy number.
+
+  Because timestamp discrepancy is defined in terms of timestamps, we first
+  convert the list of durations to monotonically increasing timestamps.
+
+  Args:
+    durations: List of interval lengths in milliseconds.
+    absolute: See TimestampsDiscrepancy.
+    interval_multiplier: See TimestampsDiscrepancy.
+  """
+  if not durations:
+    return 0.0
+
+  timestamps = reduce(lambda x, y: x + [x[-1] + y], durations, [0])
+  return TimestampsDiscrepancy(timestamps, absolute, location_count)
+
+
+def ArithmeticMean(data):
+  """Calculates arithmetic mean.
+
+  Args:
+    data: A list of samples.
+
+  Returns:
+    The arithmetic mean value, or 0 if the list is empty.
+  """
+  numerator_total = Total(data)
+  denominator_total = Total(len(data))
+  return DivideIfPossibleOrZero(numerator_total, denominator_total)
+
+
+def StandardDeviation(data):
+  """Calculates the standard deviation.
+
+  Args:
+    data: A list of samples.
+
+  Returns:
+    The standard deviation of the samples provided.
+  """
+  if len(data) == 1:
+    return 0.0
+
+  mean = ArithmeticMean(data)
+  variances = [float(x) - mean for x in data]
+  variances = [x * x for x in variances]
+  std_dev = math.sqrt(ArithmeticMean(variances))
+
+  return std_dev
+
+
+def TrapezoidalRule(data, dx):
+  """ Calculate the integral according to the trapezoidal rule
+
+  TrapezoidalRule approximates the definite integral of f from a to b by
+  the composite trapezoidal rule, using n subintervals.
+  http://en.wikipedia.org/wiki/Trapezoidal_rule#Uniform_grid
+
+  Args:
+    data: A list of samples
+    dx: The uniform distance along the x axis between any two samples
+
+  Returns:
+    The area under the curve defined by the samples and the uniform distance
+    according to the trapezoidal rule.
+  """
+
+  n = len(data) - 1
+  s = data[0] + data[n]
+
+  if n == 0:
+    return 0.0
+
+  for i in range(1, n):
+    s += 2 * data[i]
+
+  return s * dx / 2.0
+
+def Total(data):
+  """Returns the float value of a number or the sum of a list."""
+  if type(data) == float:
+    total = data
+  elif type(data) == int:
+    total = float(data)
+  elif type(data) == list:
+    total = float(sum(data))
+  else:
+    raise TypeError
+  return total
+
+
+def DivideIfPossibleOrZero(numerator, denominator):
+  """Returns the quotient, or zero if the denominator is zero."""
+  return (float(numerator) / float(denominator)) if denominator else 0
+
+
+def GeneralizedMean(values, exponent):
+  """See http://en.wikipedia.org/wiki/Generalized_mean"""
+  if not values:
+    return 0.0
+  sum_of_powers = 0.0
+  for v in values:
+    sum_of_powers += v ** exponent
+  return (sum_of_powers / len(values)) ** (1.0/exponent)
+
+
+def Median(values):
+  """Gets the median of a list of values."""
+  return Percentile(values, 50)
+
+
+def Percentile(values, percentile):
+  """Calculates the value below which a given percentage of values fall.
+
+  For example, if 17% of the values are less than 5.0, then 5.0 is the 17th
+  percentile for this set of values. When the percentage doesn't exactly
+  match a rank in the list of values, the percentile is computed using linear
+  interpolation between closest ranks.
+
+  Args:
+    values: A list of numerical values.
+    percentile: A number between 0 and 100.
+
+  Returns:
+    The Nth percentile for the list of values, where N is the given percentage.
+  """
+  if not values:
+    return 0.0
+  sorted_values = sorted(values)
+  n = len(values)
+  percentile /= 100.0
+  if percentile <= 0.5 / n:
+    return sorted_values[0]
+  elif percentile >= (n - 0.5) / n:
+    return sorted_values[-1]
+  else:
+    floor_index = int(math.floor(n * percentile -  0.5))
+    floor_value = sorted_values[floor_index]
+    ceil_value = sorted_values[floor_index+1]
+    alpha = n * percentile - 0.5 - floor_index
+    return floor_value + alpha * (ceil_value - floor_value)
+
+
+def GeometricMean(values):
+  """Compute a rounded geometric mean from an array of values."""
+  if not values:
+    return None
+  # To avoid infinite value errors, make sure no value is less than 0.001.
+  new_values = []
+  for value in values:
+    if value > 0.001:
+      new_values.append(value)
+    else:
+      new_values.append(0.001)
+  # Compute the sum of the log of the values.
+  log_sum = sum(map(math.log, new_values))
+  # Raise e to that sum over the number of values.
+  mean = math.pow(math.e, (log_sum / len(new_values)))
+  # Return the rounded mean.
+  return int(round(mean))
diff --git a/catapult/telemetry/telemetry/util/statistics_unittest.py b/catapult/telemetry/telemetry/util/statistics_unittest.py
new file mode 100644
index 0000000..8468a16
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/statistics_unittest.py
@@ -0,0 +1,219 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import math
+import random
+import unittest
+
+from telemetry.util import statistics
+
+
+def Relax(samples, iterations=10):
+  """Lloyd relaxation in 1D.
+
+  Keeps the position of the first and last sample.
+  """
+  for _ in xrange(0, iterations):
+    voronoi_boundaries = []
+    for i in xrange(1, len(samples)):
+      voronoi_boundaries.append((samples[i] + samples[i-1]) * 0.5)
+
+    relaxed_samples = []
+    relaxed_samples.append(samples[0])
+    for i in xrange(1, len(samples)-1):
+      relaxed_samples.append(
+          (voronoi_boundaries[i-1] + voronoi_boundaries[i]) * 0.5)
+    relaxed_samples.append(samples[-1])
+    samples = relaxed_samples
+  return samples
+
+def CreateRandomSamples(num_samples):
+  samples = []
+  position = 0.0
+  samples.append(position)
+  for _ in xrange(1, num_samples):
+    position += random.random()
+    samples.append(position)
+  return samples
+
+class StatisticsUnitTest(unittest.TestCase):
+
+  def testNormalizeSamples(self):
+    samples = []
+    normalized_samples, scale = statistics.NormalizeSamples(samples)
+    self.assertEquals(normalized_samples, [])
+    self.assertEquals(scale, 1.0)
+
+    samples = [0.0, 0.0]
+    normalized_samples, scale = statistics.NormalizeSamples(samples)
+    self.assertEquals(normalized_samples, [0.5, 0.5])
+    self.assertEquals(scale, 1.0)
+
+    samples = [0.0, 1.0/3.0, 2.0/3.0, 1.0]
+    normalized_samples, scale = statistics.NormalizeSamples(samples)
+    self.assertEquals(normalized_samples, [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0])
+    self.assertEquals(scale, 0.75)
+
+    samples = [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0]
+    normalized_samples, scale = statistics.NormalizeSamples(samples)
+    self.assertEquals(normalized_samples, samples)
+    self.assertEquals(scale, 1.0)
+
+  def testDiscrepancyRandom(self):
+    """Tests NormalizeSamples and Discrepancy with random samples.
+
+    Generates 10 sets of 10 random samples, computes the discrepancy,
+    relaxes the samples using Llloyd's algorithm in 1D, and computes the
+    discrepancy of the relaxed samples. Discrepancy of the relaxed samples
+    must be less than or equal to the discrepancy of the original samples.
+    """
+    random.seed(1234567)
+    for _ in xrange(0, 10):
+      samples = CreateRandomSamples(10)
+      samples = statistics.NormalizeSamples(samples)[0]
+      d = statistics.Discrepancy(samples)
+      relaxed_samples = Relax(samples)
+      d_relaxed = statistics.Discrepancy(relaxed_samples)
+      self.assertTrue(d_relaxed <= d)
+
+  def testDiscrepancyAnalytic(self):
+    """Computes discrepancy for sample sets with known statistics."""
+    samples = []
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.0)
+
+    samples = [0.5]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.5)
+
+    samples = [0.0, 1.0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 1.0)
+
+    samples = [0.5, 0.5, 0.5]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 1.0)
+
+    samples = [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.25)
+
+    samples = [1.0/8.0, 5.0/8.0, 5.0/8.0, 7.0/8.0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.5)
+
+    samples = [1.0/8.0, 3.0/8.0, 5.0/8.0, 5.0/8.0, 7.0/8.0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.4)
+
+    samples = [0.0, 1.0/3.0, 2.0/3.0, 1.0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.5)
+
+    samples = statistics.NormalizeSamples(samples)[0]
+    d = statistics.Discrepancy(samples)
+    self.assertEquals(d, 0.25)
+
+  def testTimestampsDiscrepancy(self):
+    time_stamps = []
+    d_abs = statistics.TimestampsDiscrepancy(time_stamps, True)
+    self.assertEquals(d_abs, 0.0)
+
+    time_stamps = [4]
+    d_abs = statistics.TimestampsDiscrepancy(time_stamps, True)
+    self.assertEquals(d_abs, 0.5)
+
+    time_stamps_a = [0, 1, 2, 3, 5, 6]
+    time_stamps_b = [0, 1, 2, 3, 5, 7]
+    time_stamps_c = [0, 2, 3, 4]
+    time_stamps_d = [0, 2, 3, 4, 5]
+
+    d_abs_a = statistics.TimestampsDiscrepancy(time_stamps_a, True)
+    d_abs_b = statistics.TimestampsDiscrepancy(time_stamps_b, True)
+    d_abs_c = statistics.TimestampsDiscrepancy(time_stamps_c, True)
+    d_abs_d = statistics.TimestampsDiscrepancy(time_stamps_d, True)
+    d_rel_a = statistics.TimestampsDiscrepancy(time_stamps_a, False)
+    d_rel_b = statistics.TimestampsDiscrepancy(time_stamps_b, False)
+    d_rel_c = statistics.TimestampsDiscrepancy(time_stamps_c, False)
+    d_rel_d = statistics.TimestampsDiscrepancy(time_stamps_d, False)
+
+    self.assertTrue(d_abs_a < d_abs_b)
+    self.assertTrue(d_rel_a < d_rel_b)
+    self.assertTrue(d_rel_d < d_rel_c)
+    self.assertAlmostEquals(d_abs_d, d_abs_c)
+
+  def testDiscrepancyMultipleRanges(self):
+    samples = [[0.0, 1.2, 2.3, 3.3], [6.3, 7.5, 8.4], [4.2, 5.4, 5.9]]
+    d_0 = statistics.TimestampsDiscrepancy(samples[0])
+    d_1 = statistics.TimestampsDiscrepancy(samples[1])
+    d_2 = statistics.TimestampsDiscrepancy(samples[2])
+    d = statistics.TimestampsDiscrepancy(samples)
+    self.assertEquals(d, max(d_0, d_1, d_2))
+
+  def testApproximateDiscrepancy(self):
+    """Tests approimate discrepancy implementation by comparing to exact
+    solution.
+    """
+    random.seed(1234567)
+    for _ in xrange(0, 5):
+      samples = CreateRandomSamples(10)
+      samples = statistics.NormalizeSamples(samples)[0]
+      d = statistics.Discrepancy(samples)
+      d_approx = statistics.Discrepancy(samples, 500)
+      self.assertEquals(round(d, 2), round(d_approx, 2))
+
+  def testPercentile(self):
+    # The 50th percentile is the median value.
+    self.assertEquals(3, statistics.Percentile([4, 5, 1, 3, 2], 50))
+    self.assertEquals(2.5, statistics.Percentile([5, 1, 3, 2], 50))
+    # When the list of values is empty, 0 is returned.
+    self.assertEquals(0, statistics.Percentile([], 50))
+    # When the given percentage is very low, the lowest value is given.
+    self.assertEquals(1, statistics.Percentile([2, 1, 5, 4, 3], 5))
+    # When the given percentage is very high, the highest value is given.
+    self.assertEquals(5, statistics.Percentile([5, 2, 4, 1, 3], 95))
+    # Linear interpolation between closest ranks is used. Using the example
+    # from <http://en.wikipedia.org/wiki/Percentile>:
+    self.assertEquals(27.5, statistics.Percentile([15, 20, 35, 40, 50], 40))
+
+  def testArithmeticMean(self):
+    # The ArithmeticMean function computes the simple average.
+    self.assertAlmostEquals(40/3.0, statistics.ArithmeticMean([10, 10, 20]))
+    self.assertAlmostEquals(15.0, statistics.ArithmeticMean([10, 20]))
+    # If the 'count' is zero, then zero is returned.
+    self.assertEquals(0, statistics.ArithmeticMean([]))
+
+  def testDurationsDiscrepancy(self):
+    durations = []
+    d = statistics.DurationsDiscrepancy(durations)
+    self.assertEquals(d, 0.0)
+
+    durations = [4]
+    d = statistics.DurationsDiscrepancy(durations)
+    self.assertEquals(d, 4.0)
+
+    durations_a = [1, 1, 1, 1, 1]
+    durations_b = [1, 1, 2, 1, 1]
+    durations_c = [1, 2, 1, 2, 1]
+
+    d_a = statistics.DurationsDiscrepancy(durations_a)
+    d_b = statistics.DurationsDiscrepancy(durations_b)
+    d_c = statistics.DurationsDiscrepancy(durations_c)
+
+    self.assertTrue(d_a < d_b < d_c)
+
+  def testStandardDeviation(self):
+    self.assertAlmostEquals(math.sqrt(2/3.0),
+                            statistics.StandardDeviation([1, 2, 3]))
+    self.assertEquals(0, statistics.StandardDeviation([1]))
+    self.assertEquals(0, statistics.StandardDeviation([]))
+
+  def testTrapezoidalRule(self):
+    self.assertEquals(4, statistics.TrapezoidalRule([1, 2, 3], 1))
+    self.assertEquals(2, statistics.TrapezoidalRule([1, 2, 3], .5))
+    self.assertEquals(0, statistics.TrapezoidalRule([1, 2, 3], 0))
+    self.assertEquals(-4, statistics.TrapezoidalRule([1, 2, 3], -1))
+    self.assertEquals(3, statistics.TrapezoidalRule([-1, 2, 3], 1))
+    self.assertEquals(0, statistics.TrapezoidalRule([1], 1))
+    self.assertEquals(0, statistics.TrapezoidalRule([0], 1))
diff --git a/catapult/telemetry/telemetry/util/wpr_modes.py b/catapult/telemetry/telemetry/util/wpr_modes.py
new file mode 100644
index 0000000..262a4fb
--- /dev/null
+++ b/catapult/telemetry/telemetry/util/wpr_modes.py
@@ -0,0 +1,7 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+WPR_APPEND = 'wpr-append'
+WPR_OFF = 'wpr-off'
+WPR_RECORD = 'wpr-record'
+WPR_REPLAY = 'wpr-replay'
diff --git a/catapult/telemetry/telemetry/value/__init__.py b/catapult/telemetry/telemetry/value/__init__.py
new file mode 100644
index 0000000..dbd7085
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/__init__.py
@@ -0,0 +1,352 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+The Value hierarchy provides a way of representing the values measurements
+produce such that they can be merged across runs, grouped by page, and output
+to different targets.
+
+The core Value concept provides the basic functionality:
+- association with a page, may be none
+- naming and units
+- importance tracking [whether a value will show up on a waterfall or output
+  file by default]
+- other metadata, such as a description of what was measured
+- default conversion to scalar and string
+- merging properties
+
+A page may actually run a few times during a single telemetry session.
+Downstream consumers of test results typically want to group these runs
+together, then compute summary statistics across runs. Value provides the
+Merge* family of methods for this kind of aggregation.
+"""
+import os
+
+from telemetry.core import discover
+from telemetry.core import util
+
+# When combining a pair of Values togehter, it is sometimes ambiguous whether
+# the values should be concatenated, or one should be picked as representative.
+# The possible merging policies are listed here.
+CONCATENATE = 'concatenate'
+PICK_FIRST = 'pick-first'
+
+# When converting a Value to its buildbot equivalent, the context in which the
+# value is being interpreted actually affects the conversion. This is insane,
+# but there you have it. There are three contexts in which Values are converted
+# for use by buildbot, represented by these output-intent values.
+PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
+COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
+SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
+
+class Value(object):
+  """An abstract value produced by a telemetry page test.
+  """
+  def __init__(self, page, name, units, important, description,
+               tir_label, grouping_keys):
+    """A generic Value object.
+
+    Args:
+      page: A Page object, may be given as None to indicate that the value
+          represents results for multiple pages.
+      name: A value name string, may contain a dot. Values from the same test
+          with the same prefix before the dot may be considered to belong to
+          the same chart.
+      units: A units string.
+      important: Whether the value is "important". Causes the value to appear
+          by default in downstream UIs.
+      description: A string explaining in human-understandable terms what this
+          value represents.
+      tir_label: The string label of the TimelineInteractionRecord with
+          which this value is associated.
+      grouping_keys: A dict that maps grouping key names to grouping keys.
+    """
+    # TODO(eakuefner): Check story here after migration (crbug.com/442036)
+    if not isinstance(name, basestring):
+      raise ValueError('name field of Value must be string.')
+    if not isinstance(units, basestring):
+      raise ValueError('units field of Value must be string.')
+    if not isinstance(important, bool):
+      raise ValueError('important field of Value must be bool.')
+    if not ((description is None) or isinstance(description, basestring)):
+      raise ValueError('description field of Value must absent or string.')
+    if not ((tir_label is None) or
+            isinstance(tir_label, basestring)):
+      raise ValueError('tir_label field of Value must absent or '
+                       'string.')
+    if not ((grouping_keys is None) or isinstance(grouping_keys, dict)):
+      raise ValueError('grouping_keys field of Value must be absent or dict')
+
+    if grouping_keys is None:
+      grouping_keys = {}
+
+    self.page = page
+    self.name = name
+    self.units = units
+    self.important = important
+    self.description = description
+    self.tir_label = tir_label
+    self.grouping_keys = grouping_keys
+
+  def __eq__(self, other):
+    return hash(self) == hash(other)
+
+  def __hash__(self):
+    return hash(str(self))
+
+  def IsMergableWith(self, that):
+    return (self.units == that.units and
+            type(self) == type(that) and
+            self.important == that.important)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    """Combines the provided list of values into a single compound value.
+
+    When a page runs multiple times, it may produce multiple values. This
+    function is given the same-named values across the multiple runs, and has
+    the responsibility of producing a single result.
+
+    It must return a single Value. If merging does not make sense, the
+    implementation must pick a representative value from one of the runs.
+
+    For instance, it may be given
+        [ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
+    and it might produce
+        ListOfScalarValues(page, 'a', [1, 2])
+    """
+    raise NotImplementedError()
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    """Combines the provided values into a single compound value.
+
+    When a full pageset runs, a single value_name will usually end up getting
+    collected for multiple pages. For instance, we may end up with
+       [ScalarValue(page1, 'a',  1),
+        ScalarValue(page2, 'a',  2)]
+
+    This function takes in the values of the same name, but across multiple
+    pages, and produces a single summary result value. In this instance, it
+    could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
+    ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
+
+    Some results are so specific to a page that they make no sense when
+    aggregated across pages. If merging values of this type across pages is
+    non-sensical, this method may return None.
+    """
+    raise NotImplementedError()
+
+  def _IsImportantGivenOutputIntent(self, output_context):
+    if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
+      return False
+    elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
+      return self.important
+    elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
+      return self.important
+
+  def GetBuildbotDataType(self, output_context):
+    """Returns the buildbot's equivalent data_type.
+
+    This should be one of the values accepted by perf_tests_results_helper.py.
+    """
+    raise NotImplementedError()
+
+  def GetBuildbotValue(self):
+    """Returns the buildbot's equivalent value."""
+    raise NotImplementedError()
+
+  def GetChartAndTraceNameForPerPageResult(self):
+    chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
+    trace_name = self.page.display_name
+    return chart_name, trace_name
+
+  @property
+  def name_suffix(self):
+    """Returns the string after a . in the name, or the full name otherwise."""
+    if '.' in self.name:
+      return self.name.split('.', 1)[1]
+    else:
+      return self.name
+
+  def GetChartAndTraceNameForComputedSummaryResult(
+      self, trace_tag):
+    chart_name, trace_name = (
+        _ConvertValueNameToChartAndTraceName(self.name))
+    if trace_tag:
+      return chart_name, trace_name + trace_tag
+    else:
+      return chart_name, trace_name
+
+  def GetRepresentativeNumber(self):
+    """Gets a single scalar value that best-represents this value.
+
+    Returns None if not possible.
+    """
+    raise NotImplementedError()
+
+  def GetRepresentativeString(self):
+    """Gets a string value that best-represents this value.
+
+    Returns None if not possible.
+    """
+    raise NotImplementedError()
+
+  @staticmethod
+  def GetJSONTypeName():
+    """Gets the typename for serialization to JSON using AsDict."""
+    raise NotImplementedError()
+
+  def AsDict(self):
+    """Pre-serializes a value to a dict for output as JSON."""
+    return self._AsDictImpl()
+
+  def _AsDictImpl(self):
+    d = {
+      'name': self.name,
+      'type': self.GetJSONTypeName(),
+      'units': self.units,
+      'important': self.important
+    }
+
+    if self.description:
+      d['description'] = self.description
+
+    if self.tir_label:
+      d['tir_label'] = self.tir_label
+
+    if self.page:
+      d['page_id'] = self.page.id
+
+    if self.grouping_keys:
+      d['grouping_keys'] = self.grouping_keys
+
+    return d
+
+  def AsDictWithoutBaseClassEntries(self):
+    full_dict = self.AsDict()
+    base_dict_keys = set(self._AsDictImpl().keys())
+
+    # Extracts only entries added by the subclass.
+    return dict([(k, v) for (k, v) in full_dict.iteritems()
+                  if k not in base_dict_keys])
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    """Produces a value from a value dict and a page dict.
+
+    Value dicts are produced by serialization to JSON, and must be accompanied
+    by a dict mapping page IDs to pages, also produced by serialization, in
+    order to be completely deserialized. If deserializing multiple values, use
+    ListOfValuesFromListOfDicts instead.
+
+    value_dict: a dictionary produced by AsDict() on a value subclass.
+    page_dict: a dictionary mapping IDs to page objects.
+    """
+    return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
+
+  @staticmethod
+  def ListOfValuesFromListOfDicts(value_dicts, page_dict):
+    """Takes a list of value dicts to values.
+
+    Given a list of value dicts produced by AsDict, this method
+    deserializes the dicts given a dict mapping page IDs to pages.
+    This method performs memoization for deserializing a list of values
+    efficiently, where FromDict is meant to handle one-offs.
+
+    values: a list of value dicts produced by AsDict() on a value subclass.
+    page_dict: a dictionary mapping IDs to page objects.
+    """
+    value_dir = os.path.dirname(__file__)
+    value_classes = discover.DiscoverClasses(
+        value_dir, util.GetTelemetryDir(),
+        Value, index_by_class_name=True)
+
+    value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
+        value_classes)
+
+    values = []
+    for value_dict in value_dicts:
+      value_class = value_classes[value_json_types[value_dict['type']]]
+      assert 'FromDict' in value_class.__dict__, \
+             'Subclass doesn\'t override FromDict'
+      values.append(value_class.FromDict(value_dict, page_dict))
+
+    return values
+
+  @staticmethod
+  def GetConstructorKwArgs(value_dict, page_dict):
+    """Produces constructor arguments from a value dict and a page dict.
+
+    Takes a dict parsed from JSON and an index of pages and recovers the
+    keyword arguments to be passed to the constructor for deserializing the
+    dict.
+
+    value_dict: a dictionary produced by AsDict() on a value subclass.
+    page_dict: a dictionary mapping IDs to page objects.
+    """
+    d = {
+      'name': value_dict['name'],
+      'units': value_dict['units']
+    }
+
+    description = value_dict.get('description', None)
+    if description:
+      d['description'] = description
+    else:
+      d['description'] = None
+
+    page_id = value_dict.get('page_id', None)
+    if page_id is not None:
+      d['page'] = page_dict[int(page_id)]
+    else:
+      d['page'] = None
+
+    d['important'] = False
+
+    tir_label = value_dict.get('tir_label', None)
+    if tir_label:
+      d['tir_label'] = tir_label
+    else:
+      d['tir_label'] = None
+
+    grouping_keys = value_dict.get('grouping_keys', None)
+    if grouping_keys:
+      d['grouping_keys'] = grouping_keys
+    else:
+      d['grouping_keys'] = None
+
+    return d
+
+def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
+  """Mangles a trace name plus optional chart name into a standard string.
+
+  A value might just be a bareword name, e.g. numPixels. In that case, its
+  chart may be None.
+
+  But, a value might also be intended for display with other values, in which
+  case the chart name indicates that grouping. So, you might have
+  screen.numPixels, screen.resolution, where chartName='screen'.
+  """
+  assert trace_name != 'url', 'The name url cannot be used'
+  if chart_name:
+    return '%s.%s' % (chart_name, trace_name)
+  else:
+    assert '.' not in trace_name, ('Trace names cannot contain "." with an '
+        'empty chart_name since this is used to delimit chart_name.trace_name.')
+    return trace_name
+
+def _ConvertValueNameToChartAndTraceName(value_name):
+  """Converts a value_name into the equivalent chart-trace name pair.
+
+  Buildbot represents values by the measurement name and an optional trace name,
+  whereas telemetry represents values with a chart_name.trace_name convention,
+  where chart_name is optional. This convention is also used by chart_json.
+
+  This converts from the telemetry convention to the buildbot convention,
+  returning a 2-tuple (measurement_name, trace_name).
+  """
+  if '.' in value_name:
+    return value_name.split('.', 1)
+  else:
+    return value_name, value_name
diff --git a/catapult/telemetry/telemetry/value/failure.py b/catapult/telemetry/telemetry/value/failure.py
new file mode 100644
index 0000000..27086f4
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/failure.py
@@ -0,0 +1,102 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import traceback
+
+from telemetry import value as value_module
+
+
+class FailureValue(value_module.Value):
+
+  def __init__(self, page, exc_info, description=None, tir_label=None,
+               grouping_keys=None):
+    """A value representing a failure when running the page.
+
+    Args:
+      page: The page where this failure occurs.
+      exc_info: The exception info (sys.exc_info()) corresponding to
+          this failure.
+    """
+    exc_type = exc_info[0].__name__
+    super(FailureValue, self).__init__(page, exc_type, '', True, description,
+                                       tir_label, grouping_keys)
+    self._exc_info = exc_info
+
+  @classmethod
+  def FromMessage(cls, page, message):
+    """Creates a failure value for a given string message.
+
+    Args:
+      page: The page where this failure occurs.
+      message: A string message describing the failure.
+    """
+    exc_info = cls._GetExcInfoFromMessage(message)
+    return FailureValue(page, exc_info)
+
+  @staticmethod
+  def _GetExcInfoFromMessage(message):
+    try:
+      raise Exception(message)
+    except Exception:
+      return sys.exc_info()
+
+  def __repr__(self):
+    if self.page:
+      page_name = self.page.display_name
+    else:
+      page_name = 'None'
+    return 'FailureValue(%s, %s)' % (
+        page_name, GetStringFromExcInfo(self._exc_info))
+
+  @property
+  def exc_info(self):
+    return self._exc_info
+
+  def GetBuildbotDataType(self, output_context):
+    return None
+
+  def GetBuildbotValue(self):
+    return None
+
+  def GetChartAndTraceNameForPerPageResult(self):
+    return None
+
+  def GetRepresentativeNumber(self):
+    return None
+
+  def GetRepresentativeString(self):
+    return None
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'failure'
+
+  def AsDict(self):
+    d = super(FailureValue, self).AsDict()
+    d['value'] = GetStringFromExcInfo(self.exc_info)
+    return d
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
+    del kwargs['name']
+    del kwargs['units']
+    if 'important' in kwargs:
+      del kwargs['important']
+    kwargs['exc_info'] = FailureValue._GetExcInfoFromMessage(
+        value_dict['value'])
+
+    return FailureValue(**kwargs)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert False, 'Should not be called.'
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    assert False, 'Should not be called.'
+
+def GetStringFromExcInfo(exc_info):
+  return ''.join(traceback.format_exception(*exc_info))
diff --git a/catapult/telemetry/telemetry/value/failure_unittest.py b/catapult/telemetry/telemetry/value/failure_unittest.py
new file mode 100644
index 0000000..9a60d65
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/failure_unittest.py
@@ -0,0 +1,72 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import traceback
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+from telemetry.value import failure
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    self.story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    self.story_set.AddStory(page_module.Page(
+        'http://www.bar.com/', self.story_set, self.story_set.base_dir))
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    v = failure.FailureValue.FromMessage(self.pages[0], 'Failure')
+
+    exc_info_str = failure.GetStringFromExcInfo(v.exc_info)
+    expected = 'FailureValue(http://www.bar.com/, %s)' % exc_info_str
+
+    self.assertEquals(expected, str(v))
+
+  def testName(self):
+    v0 = failure.FailureValue.FromMessage(self.pages[0], 'Failure')
+    self.assertEquals('Exception', v0.name)
+    try:
+      raise NotImplementedError()
+    except Exception:
+      v1 = failure.FailureValue(self.pages[0], sys.exc_info())
+    self.assertEquals('NotImplementedError', v1.name)
+
+  def testBuildbotAndRepresentativeValue(self):
+    v = failure.FailureValue.FromMessage(self.pages[0], 'Failure')
+    self.assertIsNone(v.GetBuildbotValue())
+    self.assertIsNone(v.GetBuildbotDataType(
+        value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
+    self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())
+    self.assertIsNone(v.GetRepresentativeNumber())
+    self.assertIsNone(v.GetRepresentativeString())
+
+  def testAsDict(self):
+    v = failure.FailureValue.FromMessage(self.pages[0], 'Failure')
+    d = v.AsDictWithoutBaseClassEntries()
+    self.assertTrue(d['value'].find('Exception: Failure') > -1)
+
+  def testFromDict(self):
+    try:
+      raise Exception('test')
+    except Exception:
+      exc_info = sys.exc_info()
+    d = {
+      'type': 'failure',
+      'name': exc_info[0].__name__,
+      'units': '',
+      'value': ''.join(traceback.format_exception(*exc_info))
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, failure.FailureValue))
+    self.assertEquals(v.name, 'Exception')
diff --git a/catapult/telemetry/telemetry/value/histogram.py b/catapult/telemetry/telemetry/value/histogram.py
new file mode 100644
index 0000000..b6d859b
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/histogram.py
@@ -0,0 +1,138 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+
+from telemetry.util import perf_tests_helper
+from telemetry import value as value_module
+from telemetry.value import histogram_util
+from telemetry.value import summarizable
+
+
+class HistogramValueBucket(object):
+  def __init__(self, low, high, count=0):
+    self.low = low
+    self.high = high
+    self.count = count
+
+  def AsDict(self):
+    return {
+      'low': self.low,
+      'high': self.high,
+      'count': self.count
+    }
+
+  def ToJSONString(self):
+    return '{%s}' % ', '.join([
+      '"low": %i' % self.low,
+      '"high": %i' % self.high,
+      '"count": %i' % self.count])
+
+class HistogramValue(summarizable.SummarizableValue):
+  def __init__(self, page, name, units,
+               raw_value=None, raw_value_json=None, important=True,
+               description=None, tir_label=None, improvement_direction=None,
+               grouping_keys=None):
+    super(HistogramValue, self).__init__(page, name, units, important,
+                                         description, tir_label,
+                                         improvement_direction, grouping_keys)
+    if raw_value_json:
+      assert raw_value == None, \
+             'Don\'t specify both raw_value and raw_value_json'
+      raw_value = json.loads(raw_value_json)
+    if raw_value:
+      self.buckets = []
+      for bucket in histogram_util.GetHistogramBucketsFromRawValue(raw_value):
+        self.buckets.append(HistogramValueBucket(
+          low=bucket['low'],
+          high=bucket['high'],
+          count=bucket['count']))
+    else:
+      self.buckets = []
+
+  def __repr__(self):
+    if self.page:
+      page_name = self.page.display_name
+    else:
+      page_name = 'None'
+    return ('HistogramValue(%s, %s, %s, raw_json_string=%s, '
+            'important=%s, description=%s, tir_label=%s, '
+            'improvement_direction=%s, grouping_keys=%s)') % (
+                page_name,
+                self.name, self.units,
+                self.ToJSONString(),
+                self.important,
+                self.description,
+                self.tir_label,
+                self.improvement_direction,
+                self.grouping_keys)
+
+  def GetBuildbotDataType(self, output_context):
+    if self._IsImportantGivenOutputIntent(output_context):
+      return 'histogram'
+    return 'unimportant-histogram'
+
+  def GetBuildbotValue(self):
+    # More buildbot insanity: perf_tests_results_helper requires the histogram
+    # to be an array of size one.
+    return [self.ToJSONString()]
+
+  def ToJSONString(self):
+    # This has to hand-JSONify the histogram to ensure the order of keys
+    # produced is stable across different systems.
+    #
+    # This is done because the buildbot unittests are string equality
+    # assertions. Thus, tests that contain histograms require stable
+    # stringification of the histogram.
+    #
+    # Sigh, buildbot, Y U gotta be that way.
+    return '{"buckets": [%s]}' % (
+      ', '.join([b.ToJSONString() for b in self.buckets]))
+
+  def GetRepresentativeNumber(self):
+    (mean, _) = perf_tests_helper.GeomMeanAndStdDevFromHistogram(
+        self.ToJSONString())
+    return mean
+
+  def GetRepresentativeString(self):
+    return self.GetBuildbotValue()
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'histogram'
+
+  def AsDict(self):
+    d = super(HistogramValue, self).AsDict()
+    d['buckets'] = [b.AsDict() for b in self.buckets]
+    return d
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
+    kwargs['raw_value'] = value_dict
+
+    if 'improvement_direction' in value_dict:
+      kwargs['improvement_direction'] = value_dict['improvement_direction']
+
+    return HistogramValue(**kwargs)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert len(values) > 0
+    v0 = values[0]
+    return HistogramValue(
+        v0.page, v0.name, v0.units,
+        raw_value_json=histogram_util.AddHistograms(
+            [v.ToJSONString() for v in values]),
+        description=v0.description,
+        important=v0.important, tir_label=v0.tir_label,
+        improvement_direction=v0.improvement_direction,
+        grouping_keys=v0.grouping_keys)
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    # Histograms cannot be merged across pages, at least for now. It should be
+    # theoretically possible, just requires more work. Instead, return None.
+    # This signals to the merging code that the data is unmergable and it will
+    # cope accordingly.
+    return None
diff --git a/catapult/telemetry/telemetry/value/histogram_unittest.py b/catapult/telemetry/telemetry/value/histogram_unittest.py
new file mode 100644
index 0000000..3e8e662
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/histogram_unittest.py
@@ -0,0 +1,137 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+from telemetry.value import histogram as histogram_module
+from telemetry.value import improvement_direction
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page("http://www.bar.com/", story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page("http://www.baz.com/", story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page("http://www.foo.com/", story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    page = self.pages[0]
+    v = histogram_module.HistogramValue(
+            page, 'x', 'counts',
+           raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
+           important=True, description='desc', tir_label='my_ir',
+           improvement_direction=improvement_direction.UP)
+    expected = ('HistogramValue(http://www.bar.com/, x, counts, '
+                'raw_json_string={"buckets": [{"low": 1, "high": 2, "count": '
+                '1}]}, important=True, description=desc, tir_label=my_ir, '
+                'improvement_direction=up, grouping_keys={})')
+
+    self.assertEquals(expected, str(v))
+
+  def testHistogramBasic(self):
+    page0 = self.pages[0]
+    histogram = histogram_module.HistogramValue(
+        page0, 'x', 'counts',
+        raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
+        important=False, improvement_direction=improvement_direction.UP)
+    self.assertEquals(
+      ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'],
+      histogram.GetBuildbotValue())
+    self.assertEquals(1.5,
+                      histogram.GetRepresentativeNumber())
+    self.assertEquals(
+      ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'],
+      histogram.GetBuildbotValue())
+
+    self.assertEquals(
+        'unimportant-histogram',
+        histogram.GetBuildbotDataType(value.SUMMARY_RESULT_OUTPUT_CONTEXT))
+    histogram.important = True
+    self.assertEquals(
+        'histogram',
+        histogram.GetBuildbotDataType(value.SUMMARY_RESULT_OUTPUT_CONTEXT))
+
+  def testBucketAsDict(self):
+    bucket = histogram_module.HistogramValueBucket(33, 45, 78)
+    d = bucket.AsDict()
+
+    self.assertEquals(d, {
+          'low': 33,
+          'high': 45,
+          'count': 78
+        })
+
+  def testAsDict(self):
+    histogram = histogram_module.HistogramValue(
+        None, 'x', 'counts',
+        raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
+        important=False, improvement_direction=improvement_direction.DOWN)
+    d = histogram.AsDictWithoutBaseClassEntries()
+
+    self.assertEquals(['buckets'], d.keys())
+    self.assertTrue(isinstance(d['buckets'], list))
+    self.assertEquals(len(d['buckets']), 1)
+
+  def testFromDict(self):
+    d = {
+      'type': 'histogram',
+      'name': 'x',
+      'units': 'counts',
+      'buckets': [{'low': 1, 'high': 2, 'count': 1}],
+      'improvement_direction': 'down',
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, histogram_module.HistogramValue))
+    self.assertEquals(
+      ['{"buckets": [{"low": 1, "high": 2, "count": 1}]}'],
+      v.GetBuildbotValue())
+    self.assertEquals(improvement_direction.DOWN, v.improvement_direction)
+
+  def testFromDictWithoutImprovementDirection(self):
+    d = {
+      'type': 'histogram',
+      'name': 'x',
+      'units': 'counts',
+      'buckets': [{'low': 1, 'high': 2, 'count': 1}],
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, histogram_module.HistogramValue))
+    self.assertIsNone(v.improvement_direction)
+
+  def testMergeLikeValuesFromSamePage(self):
+    d1 = {
+      'type': 'histogram',
+      'name': 'x',
+      'units': 'counts',
+      'description': 'histogram-based metric',
+      'buckets': [{'low': 1, 'high': 3, 'count': 1}],
+    }
+
+    d2 = {
+      'type': 'histogram',
+      'name': 'x',
+      'units': 'counts',
+      'description': 'histogram-based metric',
+      'buckets': [{'low': 2, 'high': 4, 'count': 1}],
+    }
+
+    v0, v1 = value.Value.FromDict(d1, {}), value.Value.FromDict(d2, {})
+
+    vM = histogram_module.HistogramValue.MergeLikeValuesFromSamePage([v0, v1])
+    self.assertTrue(isinstance(vM, histogram_module.HistogramValue))
+    self.assertEquals('histogram-based metric', vM.description)
diff --git a/catapult/telemetry/telemetry/value/histogram_util.py b/catapult/telemetry/telemetry/value/histogram_util.py
new file mode 100644
index 0000000..2a67586
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/histogram_util.py
@@ -0,0 +1,140 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This is a helper module to get and manipulate histogram data.
+
+The histogram data is the same data as is visible from "chrome://histograms".
+More information can be found at: chromium/src/base/metrics/histogram.h
+"""
+
+import collections
+import json
+import logging
+
+from telemetry.core import exceptions
+
+BROWSER_HISTOGRAM = 'browser_histogram'
+RENDERER_HISTOGRAM = 'renderer_histogram'
+
+
+def GetHistogramBucketsFromJson(histogram_json):
+  return GetHistogramBucketsFromRawValue(json.loads(histogram_json))
+
+
+def GetHistogramBucketsFromRawValue(raw_value):
+  buckets = raw_value.get('buckets', [])
+  if buckets:
+    # If there are values greater than the maximum allowable for the histogram,
+    # the highest bucket will have a 'low': maxvalue entry in the dict but no
+    # 'high' entry. Code often assumes the 'high' value will always be present,
+    # and uses it to get bucket mean. So default it to the same value as low.
+    buckets[-1].setdefault('high', buckets[-1]['low'])
+  return buckets
+
+
+def CustomizeBrowserOptions(options):
+  """Allows histogram collection."""
+  options.AppendExtraBrowserArgs(['--enable-stats-collection-bindings'])
+
+
+def SubtractHistogram(histogram_json, start_histogram_json):
+  """Subtracts a previous histogram from a histogram.
+
+  Both parameters and the returned result are json serializations.
+  """
+  start_histogram = json.loads(start_histogram_json)
+  start_histogram_buckets = GetHistogramBucketsFromRawValue(start_histogram)
+  # It's ok if the start histogram is empty (we had no data, maybe even no
+  # histogram at all, at the start of the test).
+  if not start_histogram_buckets:
+    return histogram_json
+
+  histogram = json.loads(histogram_json)
+  if ('pid' in start_histogram and 'pid' in histogram
+      and start_histogram['pid'] != histogram['pid']):
+    raise Exception(
+        'Trying to compare histograms from different processes (%d and %d)'
+        % (start_histogram['pid'], histogram['pid']))
+
+  start_histogram_bucket_counts = dict()
+  for b in start_histogram_buckets:
+    start_histogram_bucket_counts[b['low']] = b['count']
+
+  new_buckets = []
+  for b in GetHistogramBucketsFromRawValue(histogram):
+    new_bucket = b
+    low = b['low']
+    if low in start_histogram_bucket_counts:
+      new_bucket['count'] = b['count'] - start_histogram_bucket_counts[low]
+      if new_bucket['count'] < 0:
+        logging.error('Histogram subtraction error, starting histogram most '
+                      'probably invalid.')
+    if new_bucket['count']:
+      new_buckets.append(new_bucket)
+  histogram['buckets'] = new_buckets
+  histogram['count'] -= start_histogram['count']
+
+  return json.dumps(histogram)
+
+
+def AddHistograms(histogram_jsons):
+  """Adds histograms together. Used for aggregating data.
+
+  The parameter is a list of json serializations and the returned result is a
+  json serialization too.
+
+  Note that the histograms to be added together are typically from different
+  processes.
+  """
+
+  buckets = collections.defaultdict(int)
+  for histogram_json in histogram_jsons:
+    for b in GetHistogramBucketsFromJson(histogram_json):
+      key = (b['low'], b['high'])
+      buckets[key] += b['count']
+
+  buckets = [{'low': key[0], 'high': key[1], 'count': value}
+      for key, value in buckets.iteritems()]
+  buckets.sort(key=lambda h: h['low'])
+
+  result_histogram = {}
+  result_histogram['buckets'] = buckets
+  return json.dumps(result_histogram)
+
+
+def GetHistogram(histogram_type, histogram_name, tab):
+  """Get a json serialization of a histogram."""
+  assert histogram_type in [BROWSER_HISTOGRAM, RENDERER_HISTOGRAM]
+  function = 'getHistogram'
+  if histogram_type == BROWSER_HISTOGRAM:
+    function = 'getBrowserHistogram'
+  try:
+    histogram_json = tab.EvaluateJavaScript(
+        'statsCollectionController.%s("%s")' %
+        (function, histogram_name))
+  except exceptions.EvaluateException:
+    # Sometimes JavaScript flakily fails to execute: http://crbug.com/508431
+    histogram_json = None
+  if histogram_json:
+    return histogram_json
+  return None
+
+
+def GetHistogramCount(histogram_type, histogram_name, tab):
+  """Get the count of events for the given histograms."""
+  histogram_json = GetHistogram(histogram_type, histogram_name, tab)
+  histogram = json.loads(histogram_json)
+  if 'count' in histogram:
+    return histogram['count']
+  else:
+    return 0
+
+def GetHistogramSum(histogram_type, histogram_name, tab):
+  """Get the sum of events for the given histograms."""
+  histogram_json = GetHistogram(histogram_type, histogram_name, tab)
+  histogram = json.loads(histogram_json)
+  if 'sum' in histogram:
+    return histogram['sum']
+  else:
+    return 0
diff --git a/catapult/telemetry/telemetry/value/histogram_util_unittest.py b/catapult/telemetry/telemetry/value/histogram_util_unittest.py
new file mode 100644
index 0000000..d20caca
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/histogram_util_unittest.py
@@ -0,0 +1,80 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import unittest
+
+from telemetry.value import histogram_util
+
+class TestHistogram(unittest.TestCase):
+  def testSubtractHistogram(self):
+    baseline_histogram = """{"count": 3, "buckets": [
+        {"low": 1, "high": 2, "count": 1},
+        {"low": 2, "high": 3, "count": 2}]}"""
+
+    later_histogram = """{"count": 14, "buckets": [
+        {"low": 1, "high": 2, "count": 1},
+        {"low": 2, "high": 3, "count": 3},
+        {"low": 3, "high": 4, "count": 10}]}"""
+
+    new_histogram = json.loads(
+        histogram_util.SubtractHistogram(later_histogram, baseline_histogram))
+    new_buckets = dict()
+    for b in new_histogram['buckets']:
+      new_buckets[b['low']] = b['count']
+    self.assertFalse(1 in new_buckets)
+    self.assertEquals(1, new_buckets[2])
+    self.assertEquals(10, new_buckets[3])
+
+
+  def testAddHistograms(self):
+    histograms = []
+    histograms.append("""{"count": 3, "buckets": [
+        {"low": 1, "high": 2, "count": 1},
+        {"low": 2, "high": 3, "count": 2}]}""")
+
+    histograms.append("""{"count": 20, "buckets": [
+        {"low": 2, "high": 3, "count": 10},
+        {"low": 3, "high": 4, "count": 10}]}""")
+
+    histograms.append("""{"count": 15, "buckets": [
+        {"low": 1, "high": 2, "count": 4},
+        {"low": 3, "high": 4, "count": 11}]}""")
+
+    new_histogram = json.loads(
+        histogram_util.AddHistograms(histograms))
+    new_buckets = dict()
+    for b in new_histogram['buckets']:
+      new_buckets[b['low']] = b['count']
+    self.assertEquals(5, new_buckets[1])
+    self.assertEquals(12, new_buckets[2])
+    self.assertEquals(21, new_buckets[3])
+
+
+  def testGetHistogramBucketsFromRawValue_Max(self):
+    raw_value = {'buckets': [
+      {'count': 4, 'low': 10, 'high': 15,},
+      {'count': 6, 'low': 16, 'high': 18,},
+      {'count': 8, 'low': 19},
+    ]}
+    buckets = histogram_util.GetHistogramBucketsFromRawValue(raw_value)
+    self.assertEquals([
+      {'count': 4, 'low': 10, 'high': 15,},
+      {'count': 6, 'low': 16, 'high': 18,},
+      {'count': 8, 'low': 19, 'high': 19},],
+      buckets)
+
+
+  def testGetHistogramBucketsFromJson(self):
+    json_value = json.dumps({'buckets': [
+      {'count': 4, 'low': 10, 'high': 15,},
+      {'count': 6, 'low': 16, 'high': 18,},
+      {'count': 8, 'low': 19, 'high': 25},
+    ]})
+    buckets = histogram_util.GetHistogramBucketsFromJson(json_value)
+    self.assertEquals([
+      {'count': 4, 'low': 10, 'high': 15,},
+      {'count': 6, 'low': 16, 'high': 18,},
+      {'count': 8, 'low': 19, 'high': 25},],
+      buckets)
diff --git a/catapult/telemetry/telemetry/value/improvement_direction.py b/catapult/telemetry/telemetry/value/improvement_direction.py
new file mode 100644
index 0000000..aca75ca
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/improvement_direction.py
@@ -0,0 +1,9 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+UP = 'up'
+DOWN = 'down'
+
+def IsValid(improvement_direction):
+  return improvement_direction in [UP, DOWN]
diff --git a/catapult/telemetry/telemetry/value/list_of_scalar_values.py b/catapult/telemetry/telemetry/value/list_of_scalar_values.py
new file mode 100644
index 0000000..8b34f63
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/list_of_scalar_values.py
@@ -0,0 +1,229 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import numbers
+import math
+
+from telemetry import value as value_module
+from telemetry.value import none_values
+from telemetry.value import summarizable
+
+
+def Variance(sample):
+  """ Compute the population variance.
+
+    Args:
+      sample: a list of numbers.
+  """
+  k = len(sample) - 1  # Bessel correction
+  if k <= 0:
+    return 0
+  m = _Mean(sample)
+  return sum((x - m)**2 for x in sample)/k
+
+
+def StandardDeviation(sample):
+  """ Compute standard deviation for a list of numbers.
+
+    Args:
+      sample: a list of numbers.
+  """
+  return math.sqrt(Variance(sample))
+
+
+def PooledStandardDeviation(list_of_samples, list_of_variances=None):
+  """ Compute standard deviation for a list of samples.
+
+  See: https://en.wikipedia.org/wiki/Pooled_variance for the formula.
+
+  Args:
+    list_of_samples: a list of lists, each is a list of numbers.
+    list_of_variances: a list of numbers, the i-th element is the variance of
+      the i-th sample in list_of_samples. If this is None, we use
+      Variance(sample) to get the variance of the i-th sample.
+  """
+  pooled_variance = 0.0
+  total_degrees_of_freedom = 0
+  for i in xrange(len(list_of_samples)):
+    l = list_of_samples[i]
+    k = len(l) - 1  # Bessel correction
+    if k <= 0:
+      continue
+    variance = list_of_variances[i] if list_of_variances else Variance(l)
+    pooled_variance += k * variance
+    total_degrees_of_freedom += k
+  if total_degrees_of_freedom:
+    return (pooled_variance/total_degrees_of_freedom) ** 0.5
+  return 0
+
+
+def _Mean(values):
+  return float(sum(values)) / len(values) if len(values) > 0 else 0.0
+
+
+class ListOfScalarValues(summarizable.SummarizableValue):
+  """ ListOfScalarValues represents a list of numbers.
+
+  By default, std is the standard deviation of all numbers in the list. Std can
+  also be specified in the constructor if the numbers are not from the same
+  population.
+  """
+  def __init__(self, page, name, units, values,
+               important=True, description=None,
+               tir_label=None, none_value_reason=None,
+               std=None, same_page_merge_policy=value_module.CONCATENATE,
+               improvement_direction=None, grouping_keys=None):
+    super(ListOfScalarValues, self).__init__(page, name, units, important,
+                                             description, tir_label,
+                                             improvement_direction,
+                                             grouping_keys)
+    if values is not None:
+      assert isinstance(values, list)
+      assert len(values) > 0
+      assert all(isinstance(v, numbers.Number) for v in values)
+      assert std is None or isinstance(std, numbers.Number)
+    else:
+      assert std is None
+    none_values.ValidateNoneValueReason(values, none_value_reason)
+    self.values = values
+    self.none_value_reason = none_value_reason
+    self.same_page_merge_policy = same_page_merge_policy
+    if values is not None and std is None:
+      std = StandardDeviation(values)
+    assert std is None or std >= 0, (
+        'standard deviation cannot be negative: %s' % std)
+    self._std = std
+
+  @property
+  def std(self):
+    return self._std
+
+  @property
+  def variance(self):
+    return self._std ** 2
+
+  def __repr__(self):
+    if self.page:
+      page_name = self.page.display_name
+    else:
+      page_name = 'None'
+    if self.same_page_merge_policy == value_module.CONCATENATE:
+      merge_policy = 'CONCATENATE'
+    else:
+      merge_policy = 'PICK_FIRST'
+    return ('ListOfScalarValues(%s, %s, %s, %s, '
+            'important=%s, description=%s, tir_label=%s, std=%s, '
+            'same_page_merge_policy=%s, improvement_direction=%s, '
+            'grouping_keys=%s)') % (
+                page_name,
+                self.name,
+                self.units,
+                repr(self.values),
+                self.important,
+                self.description,
+                self.tir_label,
+                self.std,
+                merge_policy,
+                self.improvement_direction,
+                self.grouping_keys)
+
+  def GetBuildbotDataType(self, output_context):
+    if self._IsImportantGivenOutputIntent(output_context):
+      return 'default'
+    return 'unimportant'
+
+  def GetBuildbotValue(self):
+    return self.values
+
+  def GetRepresentativeNumber(self):
+    return _Mean(self.values)
+
+  def GetRepresentativeString(self):
+    return repr(self.values)
+
+  def IsMergableWith(self, that):
+    return (super(ListOfScalarValues, self).IsMergableWith(that) and
+            self.same_page_merge_policy == that.same_page_merge_policy)
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'list_of_scalar_values'
+
+  def AsDict(self):
+    d = super(ListOfScalarValues, self).AsDict()
+    d['values'] = self.values
+    d['std'] = self.std
+
+    if self.none_value_reason is not None:
+      d['none_value_reason'] = self.none_value_reason
+
+    return d
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
+    kwargs['values'] = value_dict['values']
+    kwargs['std'] = value_dict['std']
+
+    if 'improvement_direction' in value_dict:
+      kwargs['improvement_direction'] = value_dict['improvement_direction']
+    if 'none_value_reason' in value_dict:
+      kwargs['none_value_reason'] = value_dict['none_value_reason']
+
+    return ListOfScalarValues(**kwargs)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert len(values) > 0
+    v0 = values[0]
+
+    if v0.same_page_merge_policy == value_module.PICK_FIRST:
+      return ListOfScalarValues(
+          v0.page, v0.name, v0.units,
+          values[0].values,
+          important=v0.important,
+          same_page_merge_policy=v0.same_page_merge_policy,
+          none_value_reason=v0.none_value_reason,
+          improvement_direction=v0.improvement_direction,
+          grouping_keys=v0.grouping_keys)
+
+    assert v0.same_page_merge_policy == value_module.CONCATENATE
+    return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label,
+                                v0.grouping_keys)
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    assert len(values) > 0
+    v0 = values[0]
+    return cls._MergeLikeValues(values, None, v0.name, v0.tir_label,
+                                v0.grouping_keys)
+
+  @classmethod
+  def _MergeLikeValues(cls, values, page, name, tir_label, grouping_keys):
+    v0 = values[0]
+    merged_values = []
+    list_of_samples = []
+    none_value_reason = None
+    pooled_std = None
+    for v in values:
+      if v.values is None:
+        merged_values = None
+        none_value_reason = none_values.MERGE_FAILURE_REASON
+        break
+      merged_values.extend(v.values)
+      list_of_samples.append(v.values)
+    if merged_values:
+      pooled_std = PooledStandardDeviation(
+          list_of_samples, list_of_variances=[v.variance for v in values])
+    return ListOfScalarValues(
+        page, name, v0.units,
+        merged_values,
+        important=v0.important,
+        description=v0.description,
+        tir_label=tir_label,
+        same_page_merge_policy=v0.same_page_merge_policy,
+        std=pooled_std,
+        none_value_reason=none_value_reason,
+        improvement_direction=v0.improvement_direction,
+        grouping_keys=grouping_keys)
diff --git a/catapult/telemetry/telemetry/value/list_of_scalar_values_unittest.py b/catapult/telemetry/telemetry/value/list_of_scalar_values_unittest.py
new file mode 100644
index 0000000..e87f268
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/list_of_scalar_values_unittest.py
@@ -0,0 +1,293 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import none_values
+
+
+class StatisticComputationTest(unittest.TestCase):
+  def testVariance(self):
+    self.assertAlmostEqual(
+        list_of_scalar_values.Variance([]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.Variance([3]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.Variance([600, 470, 170, 430, 300]), 27130)
+
+  def testStandardDeviation(self):
+    self.assertAlmostEqual(
+        list_of_scalar_values.StandardDeviation([]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.StandardDeviation([1]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.StandardDeviation([600, 470, 170, 430, 300]),
+        164.71186, places=4)
+
+  def testPooledVariance(self):
+    self.assertAlmostEqual(
+        list_of_scalar_values.PooledStandardDeviation([[], [], []]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.PooledStandardDeviation([[1], [], [3], []]), 0)
+    self.assertAlmostEqual(
+        list_of_scalar_values.PooledStandardDeviation([[1], [2], [3], [4]]), 0)
+    self.assertAlmostEqual(list_of_scalar_values.PooledStandardDeviation(
+        [[600, 470, 170, 430, 300],           # variance = 27130, std = 164.7
+        [4000, 4020, 4230],                   # variance = 16233, std = 127.41
+        [260, 700, 800, 900, 0, 120, 150]]),  # variance = 136348, std = 369.2
+        282.7060,  # SQRT((27130 4 + 16233*2 + 136348*6)/12)
+        places=4)
+    self.assertAlmostEqual(list_of_scalar_values.PooledStandardDeviation(
+        [[600, 470, 170, 430, 300],
+         [4000, 4020, 4230],
+         [260, 700, 800, 900, 0, 120, 150]],
+        list_of_variances=[100000, 200000, 300000]),
+        465.47466,  # SQRT((100000*4 + 200000* 2 + 300000*6)/12)
+        places=4)
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    page = self.pages[0]
+    v = list_of_scalar_values.ListOfScalarValues(
+        page, 'x', 'unit', [10, 9, 9, 7], important=True, description='desc',
+        tir_label='my_ir', std=42, same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.DOWN)
+
+    expected = ('ListOfScalarValues(http://www.bar.com/, x, unit, '
+                '[10, 9, 9, 7], important=True, description=desc, '
+                'tir_label=my_ir, std=42, '
+                'same_page_merge_policy=CONCATENATE, '
+                'improvement_direction=down, grouping_keys={})')
+
+    self.assertEquals(expected, str(v))
+
+  def testListSamePageMergingWithSamePageConcatenatePolicy(self):
+    page0 = self.pages[0]
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [10, 9, 9, 7], same_page_merge_policy=value.CONCATENATE,
+        description='list-based metric',
+        improvement_direction=improvement_direction.DOWN)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [300, 302, 303, 304], same_page_merge_policy=value.CONCATENATE,
+        description='list-based metric',
+        improvement_direction=improvement_direction.DOWN)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = (list_of_scalar_values.ListOfScalarValues.
+          MergeLikeValuesFromSamePage([v0, v1]))
+    self.assertEquals(page0, vM.page)
+    self.assertEquals('x', vM.name)
+    self.assertEquals('unit', vM.units)
+    self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
+    self.assertEquals(True, vM.important)
+    self.assertEquals([10, 9, 9, 7, 300, 302, 303, 304], vM.values)
+    # SQRT((19/12 * 3 + 35/12 * 3)/6) = 1.5
+    self.assertAlmostEqual(1.5, vM.std)
+    self.assertEquals('list-based metric', vM.description)
+    self.assertEquals(improvement_direction.DOWN, vM.improvement_direction)
+
+  def testListSamePageMergingWithPickFirstPolicy(self):
+    page0 = self.pages[0]
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [1, 2], same_page_merge_policy=value.PICK_FIRST,
+        improvement_direction=improvement_direction.UP)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [3, 4], same_page_merge_policy=value.PICK_FIRST,
+        improvement_direction=improvement_direction.UP)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = (list_of_scalar_values.ListOfScalarValues.
+          MergeLikeValuesFromSamePage([v0, v1]))
+    self.assertEquals(page0, vM.page)
+    self.assertEquals('x', vM.name)
+    self.assertEquals('unit', vM.units)
+    self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy)
+    self.assertEquals(True, vM.important)
+    self.assertEquals([1, 2], vM.values)
+    self.assertEquals(improvement_direction.UP, vM.improvement_direction)
+
+  def testListDifferentPageMerging(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [1, 2], same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.DOWN)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page1, 'x', 'unit',
+        [3, 4], same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.DOWN)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = (list_of_scalar_values.ListOfScalarValues.
+          MergeLikeValuesFromDifferentPages([v0, v1]))
+    self.assertEquals(None, vM.page)
+    self.assertEquals('x', vM.name)
+    self.assertEquals('unit', vM.units)
+    self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
+    self.assertEquals(True, vM.important)
+    self.assertEquals([1, 2, 3, 4], vM.values)
+    self.assertEquals(improvement_direction.DOWN, vM.improvement_direction)
+
+  def testListWithNoneValueMerging(self):
+    page0 = self.pages[0]
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [1, 2], same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.UP)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n',
+        improvement_direction=improvement_direction.UP)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = (list_of_scalar_values.ListOfScalarValues.
+          MergeLikeValuesFromSamePage([v0, v1]))
+    self.assertEquals(None, vM.values)
+    self.assertEquals(none_values.MERGE_FAILURE_REASON,
+                      vM.none_value_reason)
+    self.assertEquals(improvement_direction.UP, vM.improvement_direction)
+
+  def testListWithNoneValueMustHaveNoneReason(self):
+    page0 = self.pages[0]
+    self.assertRaises(none_values.NoneValueMissingReason,
+                      lambda: list_of_scalar_values.ListOfScalarValues(
+                          page0, 'x', 'unit', None,
+                          improvement_direction=improvement_direction.DOWN))
+
+  def testListWithNoneReasonMustHaveNoneValue(self):
+    page0 = self.pages[0]
+    self.assertRaises(none_values.ValueMustHaveNoneValue,
+                      lambda: list_of_scalar_values.ListOfScalarValues(
+                          page0, 'x', 'unit', [1, 2],
+                          none_value_reason='n',
+                          improvement_direction=improvement_direction.UP))
+
+  def testAsDict(self):
+    v = list_of_scalar_values.ListOfScalarValues(
+        None, 'x', 'unit', [1, 2],
+        same_page_merge_policy=value.PICK_FIRST, important=False,
+        improvement_direction=improvement_direction.DOWN)
+    d = v.AsDictWithoutBaseClassEntries()
+
+    self.assertEquals(d['values'], [1, 2])
+    self.assertAlmostEqual(d['std'], 0.7071, places=4)
+
+  def testMergedValueAsDict(self):
+    page0 = self.pages[0]
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [10, 9, 9, 7], same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.DOWN)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'x', 'unit',
+        [300, 302, 303, 304], same_page_merge_policy=value.CONCATENATE,
+        improvement_direction=improvement_direction.DOWN)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = (list_of_scalar_values.ListOfScalarValues.
+          MergeLikeValuesFromSamePage([v0, v1]))
+    d = vM.AsDict()
+    self.assertEquals(d['values'], [10, 9, 9, 7, 300, 302, 303, 304])
+    # SQRT((19/12 * 3 + 35/12 * 3)/6)
+    self.assertAlmostEqual(d['std'], 1.5)
+
+
+  def testNoneValueAsDict(self):
+    v = list_of_scalar_values.ListOfScalarValues(
+        None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST,
+        important=False, none_value_reason='n',
+        improvement_direction=improvement_direction.UP)
+    d = v.AsDictWithoutBaseClassEntries()
+
+    self.assertEquals(d, {
+          'values': None,
+          'none_value_reason': 'n',
+          'std': None
+        })
+
+  def testFromDictInts(self):
+    d = {
+      'type': 'list_of_scalar_values',
+      'name': 'x',
+      'units': 'unit',
+      'values': [1, 2],
+      'std': 0.7071,
+      'improvement_direction': improvement_direction.DOWN
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
+    self.assertEquals(v.values, [1, 2])
+    self.assertEquals(v.std, 0.7071)
+    self.assertEquals(improvement_direction.DOWN, v.improvement_direction)
+
+  def testFromDictFloats(self):
+    d = {
+      'type': 'list_of_scalar_values',
+      'name': 'x',
+      'units': 'unit',
+      'values': [1.3, 2.7, 4.5, 2.1, 3.4],
+      'std': 0.901,
+      'improvement_direction': improvement_direction.UP
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
+    self.assertEquals(v.values, [1.3, 2.7, 4.5, 2.1, 3.4])
+    self.assertEquals(v.std, 0.901)
+
+  def testFromDictWithoutImprovementDirection(self):
+    d = {
+      'type': 'list_of_scalar_values',
+      'name': 'x',
+      'units': 'unit',
+      'values': [1, 2],
+      'std': 0.7071,
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
+    self.assertIsNone(v.improvement_direction)
+
+  def testFromDictNoneValue(self):
+    d = {
+      'type': 'list_of_scalar_values',
+      'name': 'x',
+      'units': 'unit',
+      'values': None,
+      'std': None,
+      'none_value_reason': 'n',
+      'improvement_direction': improvement_direction.DOWN
+    }
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
+    self.assertEquals(v.values, None)
+    self.assertEquals(v.none_value_reason, 'n')
diff --git a/catapult/telemetry/telemetry/value/merge_values.py b/catapult/telemetry/telemetry/value/merge_values.py
new file mode 100644
index 0000000..0eb3302
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/merge_values.py
@@ -0,0 +1,144 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import failure
+from telemetry.value import skip
+
+
+# TODO(eakuefner): Get rid of this as part of crbug.com/525688
+def DefaultKeyFunc(value):
+  """Keys values in a standard way for grouping in merging and summary.
+
+  Merging and summarization can be parameterized by a function that groups
+  values into equivalence classes. Any function that returns a comparable
+  object can be used as a key_func, but merge_values and summary both use this
+  function by default, to allow the default grouping to change as Telemtry does.
+
+  Args:
+    value: A Telemetry Value instance
+
+  Returns:
+    A comparable object used to group values.
+  """
+  # Grouping keys is a dict but key funcs need to be hashable, so we transform
+  # it to a sorted tuple of tuples here.
+  grouping_keys_sorted = tuple(sorted(value.grouping_keys.iteritems()))
+  return ((('value_name', value.name), ('ue_stable_id', value.tir_label)) +
+          grouping_keys_sorted)
+
+
+def MergeLikeValuesFromSamePage(all_values, key_func=DefaultKeyFunc):
+  """Merges values that measure the same thing on the same page.
+
+  A page may end up being measured multiple times, meaning that we may end up
+  with something like this:
+       ScalarValue(page1, 'x', 1, 'foo')
+       ScalarValue(page2, 'x', 4, 'bar')
+       ScalarValue(page1, 'x', 2, 'foo')
+       ScalarValue(page2, 'x', 5, 'baz')
+
+  This function will produce:
+       ListOfScalarValues(page1, 'x', [1, 2], 'foo')
+       ListOfScalarValues(page2, 'x', [4], 'bar')
+       ListOfScalarValues(page2, 'x', [5], 'baz')
+
+  The workhorse of this code is Value.MergeLikeValuesFromSamePage.
+
+  This requires (but assumes) that the values passed in with the same grouping
+  key pass the Value.IsMergableWith test. If this is not obeyed, the
+  results will be undefined.
+  """
+  return _MergeLikeValuesCommon(
+      all_values,
+      lambda x: (x.page, key_func(x)),
+      lambda v0, merge_group: v0.MergeLikeValuesFromSamePage(merge_group))
+
+
+def MergeLikeValuesFromDifferentPages(all_values, key_func=DefaultKeyFunc):
+  """Merges values that measure the same thing on different pages.
+
+  After using MergeLikeValuesFromSamePage, one still ends up with values from
+  different pages:
+       ScalarValue(page1, 'x', 1, 'foo')
+       ScalarValue(page1, 'y', 30, 'bar')
+       ScalarValue(page2, 'x', 2, 'foo')
+       ScalarValue(page2, 'y', 40, 'baz')
+
+  This function will group values with the same name and tir_label together:
+       ListOfScalarValues(None, 'x', [1, 2], 'foo')
+       ListOfScalarValues(None, 'y', [30], 'bar')
+       ListOfScalarValues(None, 'y', [40], 'baz')
+
+  The workhorse of this code is Value.MergeLikeValuesFromDifferentPages.
+
+  Not all values that go into this function will come out: not every value can
+  be merged across pages. Values whose MergeLikeValuesFromDifferentPages returns
+  None will be omitted from the results.
+
+  This requires (but assumes) that the values passed in with the same name pass
+  the Value.IsMergableWith test. If this is not obeyed, the results
+  will be undefined.
+  """
+  return _MergeLikeValuesCommon(
+      all_values,
+      key_func,
+      lambda v0, merge_group: v0.MergeLikeValuesFromDifferentPages(merge_group))
+
+
+def _MergeLikeValuesCommon(all_values, key_func, merge_func):
+  """Groups all_values by key_func then applies merge_func to the groups.
+
+  This takes the all_values list and groups each item in that using the key
+  provided by key_func. This produces groups of values with like keys. Thes are
+  then handed to the merge_func to produce a new key. If merge_func produces a
+  non-None return, it is added to the list of returned values.
+  """
+  # When merging, we want to merge values in a consistent order, e.g. so that
+  # Scalar(1), Scalar(2) predictably produces ListOfScalarValues([1,2]) rather
+  # than 2,1.
+  #
+  # To do this, the values are sorted by key up front. Then, grouping is
+  # performed using a dictionary, but as new groups are found, the order in
+  # which they were found is also noted.
+  #
+  # Merging is then performed on groups in group-creation-order. This ensures
+  # that the returned array is in a stable order, group by group.
+  #
+  # Within a group, the order is stable because of the original sort.
+  all_values = list(all_values)
+  merge_groups = GroupStably(all_values, key_func)
+
+  res = []
+  for merge_group in merge_groups:
+    v0 = merge_group[0]
+    vM = merge_func(v0, merge_group)
+    if vM:
+      res.append(vM)
+  return res
+
+def GroupStably(all_values, key_func):
+  """Groups an array by key_func, with the groups returned in a stable order.
+
+  Returns a list of groups.
+  """
+  all_values = list(all_values)
+
+  merge_groups = {}
+  merge_groups_in_creation_order = []
+  for value in all_values:
+    # TODO(chrishenry): This is temporary. When we figure out the
+    # right summarization strategy for page runs with failures/skips, we
+    # should use that instead.
+    should_skip_value = (isinstance(value, failure.FailureValue) or
+                         isinstance(value, skip.SkipValue))
+
+    if should_skip_value:
+      continue
+
+    key = key_func(value)
+    if key not in merge_groups:
+      merge_groups[key] = []
+      merge_groups_in_creation_order.append(merge_groups[key])
+    merge_groups[key].append(value)
+  return merge_groups_in_creation_order
diff --git a/catapult/telemetry/telemetry/value/merge_values_unittest.py b/catapult/telemetry/telemetry/value/merge_values_unittest.py
new file mode 100644
index 0000000..b326946
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/merge_values_unittest.py
@@ -0,0 +1,255 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import merge_values
+from telemetry.value import scalar
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class MergeValueTest(TestBase):
+  def testDefaultKeyFunc(self):
+    page0 = self.pages[0]
+
+    value = scalar.ScalarValue(
+        page0, 'x', 'units', 1,
+        improvement_direction=improvement_direction.UP,
+        tir_label='foo', grouping_keys={'soup': 'nuts', 'a': 'b'})
+
+    self.assertEquals((('value_name', 'x'), ('ue_stable_id', 'foo'),
+                      ('a', 'b'), ('soup', 'nuts')),
+                      merge_values.DefaultKeyFunc(value))
+
+  def testSamePageMergeBasic(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'x', 'units', 4,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page0, 'x', 'units', 2,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'x', 'units', 5,
+                      improvement_direction=improvement_direction.UP)]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    # Sort the results so that their order is predictable for the subsequent
+    # assertions.
+    merged_values.sort(key=lambda x: x.page.url)
+
+    self.assertEquals(2, len(merged_values))
+
+    self.assertEquals((page0, 'x'),
+                      (merged_values[0].page, merged_values[0].name))
+    self.assertEquals([1, 2], merged_values[0].values)
+
+    self.assertEquals((page1, 'x'),
+                      (merged_values[1].page, merged_values[1].name))
+    self.assertEquals([4, 5], merged_values[1].values)
+
+  def testSamePageMergeNonstandardKeyFunc(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'x', 'units', 4,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page0, 'y', 'units', 2,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'y', 'units', 5,
+                      improvement_direction=improvement_direction.UP)]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(
+      all_values, key_func=lambda v: v.page.display_name)
+    # Sort the results so that their order is predictable for the subsequent
+    # assertions.
+    merged_values.sort(key=lambda x: x.page.url)
+
+    self.assertEquals(2, len(merged_values))
+    self.assertEquals([1, 2], merged_values[0].values)
+    self.assertEquals([4, 5], merged_values[1].values)
+
+  def testSamePageMergeOneValue(self):
+    page0 = self.pages[0]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.DOWN)]
+
+    # Sort the results so that their order is predictable for the subsequent
+    # assertions.
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    self.assertEquals(1, len(merged_values))
+    self.assertEquals(all_values[0].name, merged_values[0].name)
+    self.assertEquals(all_values[0].units, merged_values[0].units)
+
+  def testSamePageMergeWithInteractionRecord(self):
+    page0 = self.pages[0]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'foo-x', 'units', 1, tir_label='foo',
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page0, 'foo-x', 'units', 4, tir_label='foo',
+                      improvement_direction=improvement_direction.UP)]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    self.assertEquals(1, len(merged_values))
+    self.assertEquals('foo', merged_values[0].tir_label)
+
+  def testSamePageMergeWithTwoInteractionRecords(self):
+    page0 = self.pages[0]
+
+    all_values = [scalar.ScalarValue(page0, 'x', 'units', 1, tir_label='foo'),
+                  scalar.ScalarValue(page0, 'x', 'units', 4, tir_label='bar')]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    self.assertEquals(2, len(merged_values))
+    self.assertEquals('foo', merged_values[0].tir_label)
+    self.assertEquals('bar', merged_values[1].tir_label)
+
+  def testSamePageMergeWithSameGroupingKey(self):
+    page0 = self.pages[0]
+    all_values = [scalar.ScalarValue(page0, 'x', 'units', 1,
+                                     grouping_keys={'foo': 'bar'}),
+                  scalar.ScalarValue(page0, 'x', 'units', 4,
+                                     grouping_keys={'foo': 'bar'})]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    self.assertEquals(1, len(merged_values))
+    self.assertEquals({'foo': 'bar'}, merged_values[0].grouping_keys)
+
+  def testSamePageMergeWithDifferentGroupingKey(self):
+    page0 = self.pages[0]
+    all_values = [scalar.ScalarValue(page0, 'x', 'units', 1,
+                                     grouping_keys={'foo': 'bar'}),
+                  scalar.ScalarValue(page0, 'x', 'units', 4,
+                                     grouping_keys={'foo': 'qux'})]
+
+    merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
+    self.assertEquals(2, len(merged_values))
+    self.assertEquals({'foo': 'bar'}, merged_values[0].grouping_keys)
+    self.assertEquals({'foo': 'qux'}, merged_values[1].grouping_keys)
+
+  def testDifferentPageMergeBasic(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'x', 'units', 2,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page0, 'y', 'units', 10,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'y', 'units', 20,
+                      improvement_direction=improvement_direction.UP)]
+
+    merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
+    merged_values.sort(key=lambda x: x.name)
+    self.assertEquals(2, len(merged_values))
+
+    self.assertEquals((None, 'x'),
+                      (merged_values[0].page, merged_values[0].name))
+    self.assertEquals([1, 2], merged_values[0].values)
+
+    self.assertEquals((None, 'y'),
+                      (merged_values[1].page, merged_values[1].name))
+    self.assertEquals([10, 20], merged_values[1].values)
+
+  def testDifferentPageMergeNonstandardKeyFunc(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'x', 'units', 2,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page0, 'y', 'units', 10,
+                      improvement_direction=improvement_direction.UP),
+                  scalar.ScalarValue(
+                      page1, 'y', 'units', 20,
+                      improvement_direction=improvement_direction.UP)]
+
+    merged_values = merge_values.MergeLikeValuesFromDifferentPages(
+      all_values, key_func=lambda v: True)
+
+    self.assertEquals(1, len(merged_values))
+    self.assertEquals([1, 2, 10, 20], merged_values[0].values)
+
+  def testDifferentPageMergeSingleValueStillMerges(self):
+    page0 = self.pages[0]
+
+    all_values = [scalar.ScalarValue(
+                      page0, 'x', 'units', 1,
+                      improvement_direction=improvement_direction.DOWN)]
+
+    # Sort the results so that their order is predictable for the subsequent
+    # assertions.
+    merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
+    self.assertEquals(1, len(merged_values))
+
+    self.assertEquals((None, 'x'),
+                      (merged_values[0].page, merged_values[0].name))
+    self.assertTrue(
+        isinstance(merged_values[0], list_of_scalar_values.ListOfScalarValues))
+    self.assertEquals([1], merged_values[0].values)
+
+  def testDifferentPageMergeWithInteractionRecord(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    v0 = scalar.ScalarValue(page0, 'x', 'units', 1, tir_label='foo')
+    v1 = scalar.ScalarValue(page0, 'y', 'units', 30, tir_label='bar')
+    v2 = scalar.ScalarValue(page1, 'x', 'units', 2, tir_label='foo')
+    v3 = scalar.ScalarValue(page1, 'y', 'units', 40, tir_label='baz')
+
+    all_values = [v0, v1, v2, v3]
+
+    merged_x = list_of_scalar_values.ListOfScalarValues(
+      None, 'x', 'units', [1, 2], tir_label='foo')
+    merged_y_bar = list_of_scalar_values.ListOfScalarValues(
+      None, 'y', 'units', [30], tir_label='bar')
+    merged_y_baz = list_of_scalar_values.ListOfScalarValues(
+      None, 'y', 'units', [40], tir_label='baz')
+
+    merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
+    merged_values.sort(key=lambda x: x.tir_label)
+
+    self.assertEquals([merged_y_bar, merged_y_baz, merged_x], merged_values)
diff --git a/catapult/telemetry/telemetry/value/none_values.py b/catapult/telemetry/telemetry/value/none_values.py
new file mode 100644
index 0000000..7e2759a
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/none_values.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+MERGE_FAILURE_REASON = (
+    'Merging values containing a None value results in a None value.')
+
+class NoneValueMissingReason(Exception):
+  pass
+
+class ValueMustHaveNoneValue(Exception):
+  pass
+
+def ValidateNoneValueReason(value, none_value_reason):
+  """Ensures that the none_value_reason is appropriate for the given value.
+
+  There is a logical equality between having a value of None and having a
+  reason for being None. That is to say, value is None if and only if
+  none_value_reason is a string.
+  """
+  if value is None and not isinstance(none_value_reason, basestring):
+    raise NoneValueMissingReason()
+  if value is not None and none_value_reason is not None:
+    raise ValueMustHaveNoneValue()
diff --git a/catapult/telemetry/telemetry/value/scalar.py b/catapult/telemetry/telemetry/value/scalar.py
new file mode 100644
index 0000000..2789b81
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/scalar.py
@@ -0,0 +1,118 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import numbers
+
+from telemetry import value as value_module
+from telemetry.value import list_of_scalar_values
+from telemetry.value import none_values
+from telemetry.value import summarizable
+
+
+class ScalarValue(summarizable.SummarizableValue):
+  def __init__(self, page, name, units, value, important=True,
+               description=None, tir_label=None,
+               none_value_reason=None, improvement_direction=None,
+               grouping_keys=None):
+    """A single value (float or integer) result from a test.
+
+    A test that counts the number of DOM elements in a page might produce a
+    scalar value:
+       ScalarValue(page, 'num_dom_elements', 'count', num_elements)
+    """
+    super(ScalarValue, self).__init__(page, name, units, important, description,
+                                      tir_label, improvement_direction,
+                                      grouping_keys)
+    assert value is None or isinstance(value, numbers.Number)
+    none_values.ValidateNoneValueReason(value, none_value_reason)
+    self.value = value
+    self.none_value_reason = none_value_reason
+
+  def __repr__(self):
+    if self.page:
+      page_name = self.page.display_name
+    else:
+      page_name = 'None'
+    return ('ScalarValue(%s, %s, %s, %s, important=%s, description=%s, '
+            'tir_label=%s, improvement_direction=%s, grouping_keys=%s') % (
+                page_name,
+                self.name,
+                self.units,
+                self.value,
+                self.important,
+                self.description,
+                self.tir_label,
+                self.improvement_direction,
+                self.grouping_keys)
+
+  def GetBuildbotDataType(self, output_context):
+    if self._IsImportantGivenOutputIntent(output_context):
+      return 'default'
+    return 'unimportant'
+
+  def GetBuildbotValue(self):
+    # Buildbot's print_perf_results method likes to get lists for all values,
+    # even when they are scalar, so list-ize the return value.
+    return [self.value]
+
+  def GetRepresentativeNumber(self):
+    return self.value
+
+  def GetRepresentativeString(self):
+    return str(self.value)
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'scalar'
+
+  def AsDict(self):
+    d = super(ScalarValue, self).AsDict()
+    d['value'] = self.value
+
+    if self.none_value_reason is not None:
+      d['none_value_reason'] = self.none_value_reason
+
+    return d
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
+    kwargs['value'] = value_dict['value']
+
+    if 'improvement_direction' in value_dict:
+      kwargs['improvement_direction'] = value_dict['improvement_direction']
+    if 'none_value_reason' in value_dict:
+      kwargs['none_value_reason'] = value_dict['none_value_reason']
+
+    return ScalarValue(**kwargs)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert len(values) > 0
+    v0 = values[0]
+    return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label,
+                                v0.grouping_keys)
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    assert len(values) > 0
+    v0 = values[0]
+    return cls._MergeLikeValues(values, None, v0.name, v0.tir_label,
+                                v0.grouping_keys)
+
+  @classmethod
+  def _MergeLikeValues(cls, values, page, name, tir_label, grouping_keys):
+    v0 = values[0]
+    merged_value = [v.value for v in values]
+    none_value_reason = None
+    if None in merged_value:
+      merged_value = None
+      none_value_reason = none_values.MERGE_FAILURE_REASON
+    return list_of_scalar_values.ListOfScalarValues(
+        page, name, v0.units, merged_value, important=v0.important,
+        description=v0.description,
+        tir_label=tir_label,
+        none_value_reason=none_value_reason,
+        improvement_direction=v0.improvement_direction,
+        grouping_keys=grouping_keys)
diff --git a/catapult/telemetry/telemetry/value/scalar_unittest.py b/catapult/telemetry/telemetry/value/scalar_unittest.py
new file mode 100644
index 0000000..135a335
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/scalar_unittest.py
@@ -0,0 +1,200 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+from telemetry.value import improvement_direction
+from telemetry.value import none_values
+from telemetry.value import scalar
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    page0 = self.pages[0]
+    v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
+                           description='desc', tir_label='my_ir',
+                           improvement_direction=improvement_direction.DOWN)
+
+    expected = ('ScalarValue(http://www.bar.com/, x, unit, 3, important=True, '
+                'description=desc, tir_label=my_ir, '
+                'improvement_direction=down, grouping_keys={}')
+
+    self.assertEquals(expected, str(v))
+
+  def testBuildbotValueType(self):
+    page0 = self.pages[0]
+    v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
+                           improvement_direction=improvement_direction.DOWN)
+    self.assertEquals('default', v.GetBuildbotDataType(
+        value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
+    self.assertEquals([3], v.GetBuildbotValue())
+    self.assertEquals(('x', page0.display_name),
+                      v.GetChartAndTraceNameForPerPageResult())
+
+    v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False,
+                           improvement_direction=improvement_direction.DOWN)
+    self.assertEquals(
+        'unimportant',
+        v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
+
+  def testScalarSamePageMerging(self):
+    page0 = self.pages[0]
+    v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
+                            description='important metric',
+                            improvement_direction=improvement_direction.UP)
+    v1 = scalar.ScalarValue(page0, 'x', 'unit', 2,
+                            description='important metric',
+                            improvement_direction=improvement_direction.UP)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
+    self.assertEquals(page0, vM.page)
+    self.assertEquals('x', vM.name)
+    self.assertEquals('unit', vM.units)
+    self.assertEquals('important metric', vM.description)
+    self.assertEquals(True, vM.important)
+    self.assertEquals([1, 2], vM.values)
+    self.assertEquals(improvement_direction.UP, vM.improvement_direction)
+
+  def testScalarDifferentPageMerging(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+    v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
+                            description='important metric',
+                            improvement_direction=improvement_direction.UP)
+    v1 = scalar.ScalarValue(page1, 'x', 'unit', 2,
+                            description='important metric',
+                            improvement_direction=improvement_direction.UP)
+
+    vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
+    self.assertEquals(None, vM.page)
+    self.assertEquals('x', vM.name)
+    self.assertEquals('unit', vM.units)
+    self.assertEquals('important metric', vM.description)
+    self.assertEquals(True, vM.important)
+    self.assertEquals([1, 2], vM.values)
+    self.assertEquals(improvement_direction.UP, vM.improvement_direction)
+
+  def testScalarWithNoneValueMerging(self):
+    page0 = self.pages[0]
+    v0 = scalar.ScalarValue(
+        page0, 'x', 'unit', 1, improvement_direction=improvement_direction.DOWN)
+    v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n',
+                            improvement_direction=improvement_direction.DOWN)
+    self.assertTrue(v1.IsMergableWith(v0))
+
+    vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
+    self.assertEquals(None, vM.values)
+    self.assertEquals(none_values.MERGE_FAILURE_REASON,
+                      vM.none_value_reason)
+
+  def testScalarWithNoneValueMustHaveNoneReason(self):
+    page0 = self.pages[0]
+    self.assertRaises(none_values.NoneValueMissingReason,
+                      lambda: scalar.ScalarValue(
+                          page0, 'x', 'unit', None,
+                          improvement_direction=improvement_direction.UP))
+
+  def testScalarWithNoneReasonMustHaveNoneValue(self):
+    page0 = self.pages[0]
+    self.assertRaises(none_values.ValueMustHaveNoneValue,
+                      lambda: scalar.ScalarValue(
+                          page0, 'x', 'unit', 1, none_value_reason='n',
+                          improvement_direction=improvement_direction.UP))
+
+  def testAsDict(self):
+    v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False,
+                           improvement_direction=improvement_direction.DOWN)
+    d = v.AsDictWithoutBaseClassEntries()
+
+    self.assertEquals(d, {
+          'value': 42
+        })
+
+  def testNoneValueAsDict(self):
+    v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
+                           none_value_reason='n',
+                           improvement_direction=improvement_direction.DOWN)
+    d = v.AsDictWithoutBaseClassEntries()
+
+    self.assertEquals(d, {
+          'value': None,
+          'none_value_reason': 'n'
+        })
+
+  def testFromDictInt(self):
+    d = {
+      'type': 'scalar',
+      'name': 'x',
+      'units': 'unit',
+      'value': 42,
+      'improvement_direction': improvement_direction.DOWN,
+    }
+
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, scalar.ScalarValue))
+    self.assertEquals(v.value, 42)
+    self.assertEquals(v.improvement_direction, improvement_direction.DOWN)
+
+  def testFromDictFloat(self):
+    d = {
+      'type': 'scalar',
+      'name': 'x',
+      'units': 'unit',
+      'value': 42.4,
+      'improvement_direction': improvement_direction.UP,
+    }
+
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, scalar.ScalarValue))
+    self.assertEquals(v.value, 42.4)
+
+  def testFromDictWithoutImprovementDirection(self):
+    d = {
+      'type': 'scalar',
+      'name': 'x',
+      'units': 'unit',
+      'value': 42,
+    }
+
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, scalar.ScalarValue))
+    self.assertIsNone(v.improvement_direction)
+
+  def testFromDictNoneValue(self):
+    d = {
+      'type': 'scalar',
+      'name': 'x',
+      'units': 'unit',
+      'value': None,
+      'none_value_reason': 'n',
+      'improvement_direction': improvement_direction.UP,
+    }
+
+    v = value.Value.FromDict(d, {})
+
+    self.assertTrue(isinstance(v, scalar.ScalarValue))
+    self.assertEquals(v.value, None)
+    self.assertEquals(v.none_value_reason, 'n')
diff --git a/catapult/telemetry/telemetry/value/skip.py b/catapult/telemetry/telemetry/value/skip.py
new file mode 100644
index 0000000..1dd0108
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/skip.py
@@ -0,0 +1,75 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import value as value_module
+
+
+class SkipValue(value_module.Value):
+
+  def __init__(self, page, reason, description=None):
+    """A value representing a skipped page.
+
+    Args:
+      page: The skipped page object.
+      reason: The string reason the page was skipped.
+    """
+    super(SkipValue, self).__init__(page, 'skip', '', True, description, None,
+                                    None)
+    self._reason = reason
+
+  def __repr__(self):
+    page_name = self.page.display_name
+    return 'SkipValue(%s, %s, description=%s)' % (page_name, self._reason,
+                                                  self.description)
+
+  @property
+  def reason(self):
+    return self._reason
+
+  def GetBuildbotDataType(self, output_context):
+    return None
+
+  def GetBuildbotValue(self):
+    return None
+
+  def GetChartAndTraceNameForPerPageResult(self):
+    return None
+
+  def GetRepresentativeNumber(self):
+    return None
+
+  def GetRepresentativeString(self):
+    return None
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'skip'
+
+  def AsDict(self):
+    d = super(SkipValue, self).AsDict()
+    d['reason'] = self._reason
+    return d
+
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
+    del kwargs['name']
+    del kwargs['units']
+    if 'important' in kwargs:
+      del kwargs['important']
+    kwargs['reason'] = value_dict['reason']
+    if 'tir_label' in kwargs:
+      del kwargs['tir_label']
+    if 'grouping_keys' in kwargs:
+      del kwargs['grouping_keys']
+
+    return SkipValue(**kwargs)
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert False, 'Should not be called.'
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    assert False, 'Should not be called.'
diff --git a/catapult/telemetry/telemetry/value/skip_unittest.py b/catapult/telemetry/telemetry/value/skip_unittest.py
new file mode 100644
index 0000000..9b11579
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/skip_unittest.py
@@ -0,0 +1,59 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+from telemetry.value import skip
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    v = skip.SkipValue(self.pages[0], 'page skipped for testing reason',
+                       description='desc')
+
+    expected = ('SkipValue(http://www.bar.com/, '
+                'page skipped for testing reason, '
+                'description=desc)')
+
+    self.assertEquals(expected, str(v))
+
+  def testBuildbotAndRepresentativeValue(self):
+    v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
+    self.assertIsNone(v.GetBuildbotValue())
+    self.assertIsNone(v.GetBuildbotDataType(
+        value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
+    self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())
+    self.assertIsNone(v.GetRepresentativeNumber())
+    self.assertIsNone(v.GetRepresentativeString())
+
+  def testAsDict(self):
+    v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
+    d = v.AsDictWithoutBaseClassEntries()
+    self.assertEquals(d['reason'], 'page skipped for testing reason')
+
+  def testFromDict(self):
+    d = {
+      'type': 'skip',
+      'name': 'skip',
+      'units': '',
+      'reason': 'page skipped for testing reason'
+    }
+    v = value.Value.FromDict(d, {})
+    self.assertTrue(isinstance(v, skip.SkipValue))
+    self.assertEquals(v.reason, 'page skipped for testing reason')
diff --git a/catapult/telemetry/telemetry/value/summarizable.py b/catapult/telemetry/telemetry/value/summarizable.py
new file mode 100644
index 0000000..9cfcf46
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/summarizable.py
@@ -0,0 +1,72 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import value as value_module
+from telemetry.value import (improvement_direction
+                             as improvement_direction_module)
+
+
+class SummarizableValue(value_module.Value):
+  def __init__(self, page, name, units, important, description, tir_label,
+               improvement_direction, grouping_keys):
+    """A summarizable value result from a test."""
+    super(SummarizableValue, self).__init__(
+        page, name, units, important, description, tir_label, grouping_keys)
+# TODO(eakuefner): uncomment this assert after Telemetry clients are fixed.
+# Note: Telemetry unittests satisfy this assert.
+#    assert improvement_direction_module.IsValid(improvement_direction)
+    self._improvement_direction = improvement_direction
+
+  @property
+  def improvement_direction(self):
+    return self._improvement_direction
+
+  def AsDict(self):
+    d = super(SummarizableValue, self).AsDict()
+    if improvement_direction_module.IsValid(self.improvement_direction):
+      d['improvement_direction'] = self.improvement_direction
+    return d
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'summarizable'
+
+  def AsDictWithoutBaseClassEntries(self):
+    d = super(SummarizableValue, self).AsDictWithoutBaseClassEntries()
+    if 'improvement_direction' in d:
+      del d['improvement_direction']
+    return d
+
+  def GetBuildbotDataType(self, output_context):
+    """Returns the buildbot's equivalent data_type.
+
+    This should be one of the values accepted by perf_tests_results_helper.py.
+    """
+    raise NotImplementedError()
+
+  def GetBuildbotValue(self):
+    """Returns the buildbot's equivalent value."""
+    raise NotImplementedError()
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    raise NotImplementedError()
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    raise NotImplementedError()
+
+  def GetRepresentativeNumber(self):
+    """Gets a single scalar value that best-represents this value.
+
+    Returns None if not possible.
+    """
+    raise NotImplementedError()
+
+  def GetRepresentativeString(self):
+    """Gets a string value that best-represents this value.
+
+    Returns None if not possible.
+    """
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/value/summarizable_unittest.py b/catapult/telemetry/telemetry/value/summarizable_unittest.py
new file mode 100644
index 0000000..54a94b2
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/summarizable_unittest.py
@@ -0,0 +1,30 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.value import summarizable
+
+class SummarizableTest(unittest.TestCase):
+
+  def testAsDictWithoutImprovementDirection(self):
+    value = summarizable.SummarizableValue(
+        None, 'foo', 'bars', important=False, description='desc',
+        tir_label=None, improvement_direction=None, grouping_keys=None)
+
+    self.assertNotIn('improvement_direction', value.AsDict())
+
+  def testAsDictWithoutBaseClassEntries(self):
+    value = summarizable.SummarizableValue(
+        None, 'foo', 'bars', important=False, description='desc',
+        tir_label=None, improvement_direction=None, grouping_keys=None)
+
+    self.assertFalse(value.AsDictWithoutBaseClassEntries())
+
+  def testAsDictWithInvalidImprovementDirection(self):
+    # TODO(eakuefner): Remove this test when we check I.D. in constructor
+    value = summarizable.SummarizableValue(
+        None, 'foo', 'bars', important=False, description='desc',
+        tir_label=None, improvement_direction='baz', grouping_keys=None)
+
+    self.assertNotIn('improvement_direction', value.AsDict())
diff --git a/catapult/telemetry/telemetry/value/summary.py b/catapult/telemetry/telemetry/value/summary.py
new file mode 100644
index 0000000..5dde5c9
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/summary.py
@@ -0,0 +1,150 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+
+from telemetry.value import failure
+from telemetry.value import merge_values
+from telemetry.value import skip
+
+
+class Summary(object):
+  """Computes summary values from the per-page-run values produced by a test.
+
+  Some telemetry benchmark repeat a number of times in order to get a reliable
+  measurement. The test does not have to handle merging of these runs:
+  summarizer does it for you.
+
+  For instance, if two pages run, 3 and 1 time respectively:
+      ScalarValue(page1, 'foo', units='ms', 1)
+      ScalarValue(page1, 'foo', units='ms', 1)
+      ScalarValue(page1, 'foo', units='ms', 1)
+      ScalarValue(page2, 'foo', units='ms', 2)
+
+  Then summarizer will produce two sets of values. First,
+  computed_per_page_values:
+      [
+         ListOfScalarValues(page1, 'foo', units='ms', [1,1,1])],
+         ListOfScalarValues(page2, 'foo', units='ms', [2])]
+      ]
+
+  In addition, it will produce a summary value:
+      [
+         ListOfScalarValues(page=None, 'foo', units='ms', [1,1,1,2])]
+      ]
+
+  """
+  def __init__(self, all_page_specific_values,
+               key_func=merge_values.DefaultKeyFunc):
+    had_failures = any(isinstance(v, failure.FailureValue) for v in
+        all_page_specific_values)
+    self.had_failures = had_failures
+    self._computed_per_page_values = []
+    self._computed_summary_values = []
+    self._interleaved_computed_per_page_values_and_summaries = []
+    self._key_func = key_func
+    self._ComputePerPageValues(all_page_specific_values)
+
+  @property
+  def computed_per_page_values(self):
+    return self._computed_per_page_values
+
+  @property
+  def computed_summary_values(self):
+    return self._computed_summary_values
+
+  @property
+  def interleaved_computed_per_page_values_and_summaries(self):
+    """Returns the computed per page values and summary values interleaved.
+
+    All the results for a given name are printed together. First per page
+    values, then summary values.
+
+    """
+    return self._interleaved_computed_per_page_values_and_summaries
+
+  def _ComputePerPageValues(self, all_page_specific_values):
+    all_successful_page_values = [
+        v for v in all_page_specific_values if not (isinstance(
+            v, failure.FailureValue) or isinstance(v, skip.SkipValue))]
+
+    # We will later need to determine how many values were originally created
+    # for each value name, to apply a workaround meant to clean up the printf
+    # output.
+    num_successful_pages_for_key = defaultdict(int)
+    for v in all_successful_page_values:
+      num_successful_pages_for_key[self._key_func(v)] += 1
+
+    # By here, due to page repeat options, all_values_from_successful_pages
+    # contains values of the same name not only from mulitple pages, but also
+    # from the same name. So even if, for instance, only one page ran, it may
+    # have run twice, producing two 'x' values.
+    #
+    # So, get rid of the repeated pages by merging.
+    merged_page_values = merge_values.MergeLikeValuesFromSamePage(
+        all_successful_page_values, self._key_func)
+
+    # Now we have a bunch of values, but there is only one value_name per page.
+    # Suppose page1 and page2 ran, producing values x and y. We want to print
+    #    x for page1
+    #    x for page2
+    #    x for page1, page2 combined
+    #
+    #    y for page1
+    #    y for page2
+    #    y for page1, page2 combined
+    #
+    # We already have the x values in the values array. But, we will need
+    # them indexable by summary key.
+    #
+    # The following dict maps summary_key -> list of pages that have values of
+    # that name.
+    per_page_values_by_key = defaultdict(list)
+    for value in merged_page_values:
+      per_page_values_by_key[self._key_func(value)].append(value)
+
+    # We already have the x values in the values array. But, we also need
+    # the values merged across the pages. And, we will need them indexed by
+    # summary key so that we can find them when printing out value names in
+    # alphabetical order.
+    merged_pages_value_by_key = {}
+    if not self.had_failures:
+      for value in merge_values.MergeLikeValuesFromDifferentPages(
+          all_successful_page_values, self._key_func):
+        value_key = self._key_func(value)
+        assert value_key not in merged_pages_value_by_key
+        merged_pages_value_by_key[value_key] = value
+
+    keys = sorted(set([self._key_func(v) for v in merged_page_values]))
+
+    # Time to walk through the values by key, printing first the page-specific
+    # values and then the merged_site value.
+    for key in keys:
+      per_page_values = per_page_values_by_key.get(key, [])
+
+      # Sort the values by their URL.
+      sorted_per_page_values = list(per_page_values)
+      sorted_per_page_values.sort(
+          key=lambda per_page_values: per_page_values.page.display_name)
+
+      # Output the page-specific results.
+      num_successful_pages_for_this_key = (
+          num_successful_pages_for_key[key])
+      for per_page_value in sorted_per_page_values:
+        self._ComputePerPageValue(per_page_value,
+                                  num_successful_pages_for_this_key)
+
+      # Output the combined values.
+      merged_pages_value = merged_pages_value_by_key.get(key, None)
+      if merged_pages_value:
+        self._computed_summary_values.append(merged_pages_value)
+        self._interleaved_computed_per_page_values_and_summaries.append(
+            merged_pages_value)
+
+  def _ComputePerPageValue(
+      self, value, num_successful_pages_for_this_value_name):
+    if num_successful_pages_for_this_value_name >= 1:
+      # Save the result.
+      self._computed_per_page_values.append(value)
+      self._interleaved_computed_per_page_values_and_summaries.append(value)
diff --git a/catapult/telemetry/telemetry/value/summary_unittest.py b/catapult/telemetry/telemetry/value/summary_unittest.py
new file mode 100644
index 0000000..75ba4a3
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/summary_unittest.py
@@ -0,0 +1,425 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+from telemetry.internal.results import page_test_results
+from telemetry import page as page_module
+from telemetry.value import failure
+from telemetry.value import histogram
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+from telemetry.value import summary as summary_module
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+
+class SummaryTest(TestBase):
+  def testBasicSummary(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    v0_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+    v1_list = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [7],
+        improvement_direction=improvement_direction.UP)
+    merged_value = list_of_scalar_values.ListOfScalarValues(
+        None, 'a', 'seconds', [3, 7],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(3, len(values))
+    self.assertIn(v0_list, values)
+    self.assertIn(v1_list, values)
+    self.assertIn(merged_value, values)
+
+  def testBasicSummaryWithOnlyOnePage(self):
+    page0 = self.pages[0]
+
+    results = page_test_results.PageTestResults()
+
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    v0_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+    merged_list = list_of_scalar_values.ListOfScalarValues(
+        None, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(2, len(values))
+    self.assertIn(v0_list, values)
+    self.assertIn(merged_list, values)
+
+  def testBasicSummaryNonuniformResults(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+    page2 = self.pages[2]
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    v1 = scalar.ScalarValue(page0, 'b', 'seconds', 10,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v2 = scalar.ScalarValue(page1, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v2)
+    v3 = scalar.ScalarValue(page1, 'b', 'seconds', 10,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v3)
+    results.DidRunPage(page1)
+
+    results.WillRunPage(page2)
+    v4 = scalar.ScalarValue(page2, 'a', 'seconds', 7,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v4)
+    # Note, page[2] does not report a 'b' metric.
+    results.DidRunPage(page2)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    v0_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+    v1_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'b', 'seconds', [10],
+        improvement_direction=improvement_direction.UP)
+    v2_list = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+    v3_list = list_of_scalar_values.ListOfScalarValues(
+        page1, 'b', 'seconds', [10],
+        improvement_direction=improvement_direction.UP)
+    v4_list = list_of_scalar_values.ListOfScalarValues(
+        page2, 'a', 'seconds', [7],
+        improvement_direction=improvement_direction.UP)
+
+    a_summary = list_of_scalar_values.ListOfScalarValues(
+        None, 'a', 'seconds', [3, 3, 7],
+        improvement_direction=improvement_direction.UP)
+    b_summary = list_of_scalar_values.ListOfScalarValues(
+        None, 'b', 'seconds', [10, 10],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(7, len(values))
+    self.assertIn(v0_list, values)
+    self.assertIn(v1_list, values)
+    self.assertIn(v2_list, values)
+    self.assertIn(v3_list, values)
+    self.assertIn(v4_list, values)
+    self.assertIn(a_summary, values)
+    self.assertIn(b_summary, values)
+
+  def testBasicSummaryPassAndFailPage(self):
+    """If a page failed, only print summary for individual pages."""
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    v1 = failure.FailureValue.FromMessage(page0, 'message')
+    results.AddValue(v1)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v2 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v2)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    v0_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3],
+        improvement_direction=improvement_direction.UP)
+    v2_list = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [7],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(2, len(values))
+    self.assertIn(v0_list, values)
+    self.assertIn(v2_list, values)
+
+  def testRepeatedPagesetOneIterationOnePageFails(self):
+    """Page fails on one iteration, no averaged results should print."""
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    v2 = failure.FailureValue.FromMessage(page1, 'message')
+    results.AddValue(v2)
+    results.DidRunPage(page1)
+
+    results.WillRunPage(page0)
+    v3 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v3)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v4 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v4)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    page0_aggregated = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3, 4],
+        improvement_direction=improvement_direction.UP)
+    page1_aggregated = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [7, 8],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(2, len(values))
+    self.assertIn(page0_aggregated, values)
+    self.assertIn(page1_aggregated, values)
+
+  def testRepeatedPages(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page0)
+    v2 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v2)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page1)
+
+    results.WillRunPage(page1)
+    v3 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v3)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    page0_aggregated = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [3, 4],
+        improvement_direction=improvement_direction.UP)
+    page1_aggregated = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [7, 8],
+        improvement_direction=improvement_direction.UP)
+    a_summary = list_of_scalar_values.ListOfScalarValues(
+        None, 'a', 'seconds', [3, 4, 7, 8],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(3, len(values))
+    self.assertIn(page0_aggregated, values)
+    self.assertIn(page1_aggregated, values)
+    self.assertIn(a_summary, values)
+
+  def testPageRunsTwice(self):
+    page0 = self.pages[0]
+
+    results = page_test_results.PageTestResults()
+
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'b', 'seconds', 2,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page0)
+    v1 = scalar.ScalarValue(page0, 'b', 'seconds', 3,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page0)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    page0_aggregated = list_of_scalar_values.ListOfScalarValues(
+        page0, 'b', 'seconds', [2, 3],
+        improvement_direction=improvement_direction.UP)
+    b_summary = list_of_scalar_values.ListOfScalarValues(
+        None, 'b', 'seconds', [2, 3],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(2, len(values))
+    self.assertIn(page0_aggregated, values)
+    self.assertIn(b_summary, values)
+
+  def testListValue(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+
+    results.WillRunPage(page0)
+    v0 = list_of_scalar_values.ListOfScalarValues(
+        page0, 'b', 'seconds', [2, 2],
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v1 = list_of_scalar_values.ListOfScalarValues(
+        page1, 'b', 'seconds', [3, 3],
+        improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    b_summary = list_of_scalar_values.ListOfScalarValues(
+        None, 'b', 'seconds', [2, 2, 3, 3], std=0.0,
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(3, len(values))
+    self.assertIn(v0, values)
+    self.assertIn(v1, values)
+    self.assertIn(b_summary, values)
+
+  def testHistogram(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(page0)
+    v0 = histogram.HistogramValue(
+        page0, 'a', 'units',
+        raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
+        important=False, improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v1 = histogram.HistogramValue(
+        page1, 'a', 'units',
+        raw_value_json='{"buckets": [{"low": 2, "high": 3, "count": 1}]}',
+        important=False, improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(results.all_page_specific_values)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    self.assertEquals(2, len(values))
+    self.assertIn(v0, values)
+    self.assertIn(v1, values)
+
+  def testSummaryUsesKeyFunc(self):
+    page0 = self.pages[0]
+    page1 = self.pages[1]
+
+    results = page_test_results.PageTestResults()
+
+    results.WillRunPage(page0)
+    v0 = scalar.ScalarValue(page0, 'a', 'seconds', 20,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v0)
+
+    v1 = scalar.ScalarValue(page0, 'b', 'seconds', 42,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v1)
+    results.DidRunPage(page0)
+
+    results.WillRunPage(page1)
+    v2 = scalar.ScalarValue(page1, 'a', 'seconds', 20,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v2)
+
+    v3 = scalar.ScalarValue(page1, 'b', 'seconds', 42,
+                            improvement_direction=improvement_direction.UP)
+    results.AddValue(v3)
+    results.DidRunPage(page1)
+
+    summary = summary_module.Summary(
+        results.all_page_specific_values,
+        key_func=lambda v: True)
+    values = summary.interleaved_computed_per_page_values_and_summaries
+
+    v0_list = list_of_scalar_values.ListOfScalarValues(
+        page0, 'a', 'seconds', [20, 42],
+        improvement_direction=improvement_direction.UP)
+    v2_list = list_of_scalar_values.ListOfScalarValues(
+        page1, 'a', 'seconds', [20, 42],
+        improvement_direction=improvement_direction.UP)
+    merged_value = list_of_scalar_values.ListOfScalarValues(
+        None, 'a', 'seconds', [20, 42, 20, 42],
+        improvement_direction=improvement_direction.UP)
+
+    self.assertEquals(3, len(values))
+    self.assertIn(v0_list, values)
+    self.assertIn(v2_list, values)
+    self.assertIn(merged_value, values)
diff --git a/catapult/telemetry/telemetry/value/trace.py b/catapult/telemetry/telemetry/value/trace.py
new file mode 100644
index 0000000..818e15d
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/trace.py
@@ -0,0 +1,151 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import logging
+import os
+import random
+import shutil
+import StringIO
+import sys
+import tempfile
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.internal.util import file_handle
+from telemetry.timeline import trace_data as trace_data_module
+from telemetry import value as value_module
+
+from tracing_build import trace2html
+
+
+class TraceValue(value_module.Value):
+  def __init__(self, page, trace_data, important=False, description=None):
+    """A value that contains a TraceData object and knows how to
+    output it.
+
+    Adding TraceValues and outputting as JSON will produce a directory full of
+    HTML files called trace_files. Outputting as chart JSON will also produce
+    an index, files.html, linking to each of these files.
+    """
+    super(TraceValue, self).__init__(
+        page, name='trace', units='', important=important,
+        description=description, tir_label=None, grouping_keys=None)
+    self._temp_file = self._GetTempFileHandle(trace_data)
+    self._cloud_url = None
+    self._serialized_file_handle = None
+
+  def _GetTempFileHandle(self, trace_data):
+    if self.page:
+      title = self.page.display_name
+    else:
+      title = ''
+    content = StringIO.StringIO()
+    trace2html.WriteHTMLForTraceDataToFile(
+        [trace_data.GetEventsFor(trace_data_module.CHROME_TRACE_PART)],
+        title,
+        content)
+    tf = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
+    tf.write(content.getvalue().encode('utf-8'))
+    tf.close()
+    return file_handle.FromTempFile(tf)
+
+  def __repr__(self):
+    if self.page:
+      page_name = self.page.display_name
+    else:
+      page_name = 'None'
+    return 'TraceValue(%s, %s)' % (page_name, self.name)
+
+  def CleanUp(self):
+    """Cleans up tempfile after it is no longer needed.
+
+    A cleaned up TraceValue cannot be used for further operations. CleanUp()
+    may be called more than once without error.
+    """
+    if self._temp_file is None:
+      return
+    os.remove(self._temp_file.GetAbsPath())
+    self._temp_file = None
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, _, __, ___):
+    self.CleanUp()
+
+  @property
+  def cleaned_up(self):
+    return self._temp_file is None
+
+  @property
+  def filename(self):
+    return self._temp_file.GetAbsPath()
+
+  def GetBuildbotDataType(self, output_context):
+    return None
+
+  def GetBuildbotValue(self):
+    return None
+
+  def GetRepresentativeNumber(self):
+    return None
+
+  def GetRepresentativeString(self):
+    return None
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'trace'
+
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    assert len(values) > 0
+    return values[0]
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    return None
+
+  def AsDict(self):
+    if self._temp_file is None:
+      raise ValueError('Tried to serialize TraceValue without tempfile.')
+    d = super(TraceValue, self).AsDict()
+    if self._serialized_file_handle:
+      d['file_id'] = self._serialized_file_handle.id
+    if self._cloud_url:
+      d['cloud_url'] = self._cloud_url
+    return d
+
+  def Serialize(self, dir_path):
+    if self._temp_file is None:
+      raise ValueError('Tried to serialize nonexistent trace.')
+    file_name = str(self._temp_file.id) + self._temp_file.extension
+    file_path = os.path.abspath(os.path.join(dir_path, file_name))
+    shutil.copy(self._temp_file.GetAbsPath(), file_path)
+    self._serialized_file_handle = file_handle.FromFilePath(file_path)
+    return self._serialized_file_handle
+
+  def UploadToCloud(self, bucket):
+    if self._temp_file is None:
+      raise ValueError('Tried to upload nonexistent trace to Cloud Storage.')
+    try:
+      if self._serialized_file_handle:
+        fh = self._serialized_file_handle
+      else:
+        fh = self._temp_file
+      remote_path = ('trace-file-id_%s-%s-%d%s' % (
+          fh.id,
+          datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
+          random.randint(1, 100000),
+          fh.extension))
+      self._cloud_url = cloud_storage.Insert(
+          bucket, remote_path, fh.GetAbsPath())
+      sys.stderr.write(
+          'View generated trace files online at %s for page %s\n' %
+          (self._cloud_url, self.page.url if self.page else 'unknown'))
+      return self._cloud_url
+    except cloud_storage.PermissionError as e:
+      logging.error('Cannot upload trace files to cloud storage due to '
+                    ' permission error: %s' % e.message)
diff --git a/catapult/telemetry/telemetry/value/trace_unittest.py b/catapult/telemetry/telemetry/value/trace_unittest.py
new file mode 100644
index 0000000..d348146
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/trace_unittest.py
@@ -0,0 +1,129 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry.testing import system_stub
+from telemetry.timeline import trace_data
+from telemetry.value import trace
+
+
+class TestBase(unittest.TestCase):
+
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
+    self.story_set = story_set
+
+    self._cloud_storage_stub = system_stub.Override(trace, ['cloud_storage'])
+
+  def tearDown(self):
+    if self._cloud_storage_stub:
+      self._cloud_storage_stub.Restore()
+      self._cloud_storage_stub = None
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+
+class TestSet(object):
+  """ A test set that represents a set that contains any key. """
+
+  def __contains__(self, key):
+    return True
+
+
+class TestDefaultDict(object):
+  """ A test default dict that represents a dictionary that contains any key
+  with value |default_value|. """
+
+  def __init__(self, default_value):
+    self._default_value = default_value
+    self._test_set = TestSet()
+
+  def __contains__(self, key):
+    return key in self._test_set
+
+  def __getitem__(self, key):
+    return self._default_value
+
+  def keys(self):
+    return self._test_set
+
+
+class ValueTest(TestBase):
+  def testRepr(self):
+    v = trace.TraceValue(self.pages[0], trace_data.TraceData({'test': 1}),
+                         important=True, description='desc')
+
+    self.assertEquals('TraceValue(http://www.bar.com/, trace)', str(v))
+
+  def testAsDictWhenTraceSerializedAndUploaded(self):
+    tempdir = tempfile.mkdtemp()
+    try:
+      v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
+      fh = v.Serialize(tempdir)
+      trace.cloud_storage.SetCalculatedHashesForTesting(
+          {fh.GetAbsPath(): 123})
+      bucket = trace.cloud_storage.PUBLIC_BUCKET
+      cloud_url = v.UploadToCloud(bucket)
+      d = v.AsDict()
+      self.assertEqual(d['file_id'], fh.id)
+      self.assertEqual(d['cloud_url'], cloud_url)
+    finally:
+      shutil.rmtree(tempdir)
+
+  def testAsDictWhenTraceIsNotSerializedAndUploaded(self):
+    test_temp_file = tempfile.NamedTemporaryFile(delete=False)
+    try:
+      v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
+      trace.cloud_storage.SetCalculatedHashesForTesting(
+          TestDefaultDict(123))
+      bucket = trace.cloud_storage.PUBLIC_BUCKET
+      cloud_url = v.UploadToCloud(bucket)
+      d = v.AsDict()
+      self.assertEqual(d['cloud_url'], cloud_url)
+    finally:
+      if os.path.exists(test_temp_file.name):
+        test_temp_file.close()
+        os.remove(test_temp_file.name)
+
+
+def _IsEmptyDir(path):
+  return os.path.exists(path) and not os.listdir(path)
+
+
+class NoLeakedTempfilesTests(TestBase):
+
+  def setUp(self):
+    super(NoLeakedTempfilesTests, self).setUp()
+    self.temp_test_dir = tempfile.mkdtemp()
+    self.actual_tempdir = trace.tempfile.tempdir
+    trace.tempfile.tempdir = self.temp_test_dir
+
+  def testNoLeakedTempFileOnImplicitCleanUp(self):
+    with trace.TraceValue(None, trace_data.TraceData({'test': 1})):
+      pass
+    self.assertTrue(_IsEmptyDir(self.temp_test_dir))
+
+  def testNoLeakedTempFileWhenUploadingTrace(self):
+    v = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
+    v.CleanUp()
+    self.assertTrue(_IsEmptyDir(self.temp_test_dir))
+
+  def tearDown(self):
+    super(NoLeakedTempfilesTests, self).tearDown()
+    shutil.rmtree(self.temp_test_dir)
+    trace.tempfile.tempdir = self.actual_tempdir
diff --git a/catapult/telemetry/telemetry/value/translate_common_values.py b/catapult/telemetry/telemetry/value/translate_common_values.py
new file mode 100644
index 0000000..b9bdfe0
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/translate_common_values.py
@@ -0,0 +1,35 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.value import failure
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+
+
+def TranslateMreFailure(mre_failure, page):
+  return failure.FailureValue.FromMessage(page, mre_failure.stack)
+
+
+def TranslateScalarValue(scalar_value, page):
+  assert (scalar_value['type'] == 'numeric' and
+          scalar_value['numeric']['type'] == 'scalar')
+  scalar_value['value'] = scalar_value['numeric']['value']
+
+  name = scalar_value['grouping_keys']['name']
+
+  unit_parts = scalar_value['numeric']['unit'].split('_')
+  if len(unit_parts) != 2:
+    raise ValueError('Must specify improvement direction for value ' + name)
+
+  scalar_value['units'] = unit_parts[0]
+
+  if unit_parts[1] == 'biggerIsBetter':
+    scalar_value['improvement_direction'] = improvement_direction.UP
+  else:
+    assert unit_parts[1] == 'smallerIsBetter'
+    scalar_value['improvement_direction'] = improvement_direction.DOWN
+
+  scalar_value['page_id'] = page.id
+  scalar_value['name'] = name
+  del scalar_value['grouping_keys']['name']
+  return scalar.ScalarValue.FromDict(scalar_value, {page.id: page})
diff --git a/catapult/telemetry/telemetry/value/translate_common_values_unittest.py b/catapult/telemetry/telemetry/value/translate_common_values_unittest.py
new file mode 100644
index 0000000..2cabe09
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/translate_common_values_unittest.py
@@ -0,0 +1,35 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+import os
+
+from perf_insights import function_handle
+from perf_insights.mre import failure
+from perf_insights.mre import job as job_module
+
+from telemetry import page
+from telemetry import story
+from telemetry.value import translate_common_values
+
+
+def _SingleFileFunctionHandle(filename, function_name, guid):
+  return function_handle.FunctionHandle(
+      modules_to_load=[function_handle.ModuleToLoad(filename=filename)],
+      function_name=function_name, guid=guid)
+
+
+class TranslateCommonValuesTest(unittest.TestCase):
+  def testTranslateMreFailure(self):
+    map_function_handle = _SingleFileFunctionHandle('foo.html', 'Foo', '2')
+    reduce_function_handle = _SingleFileFunctionHandle('bar.html', 'Bar', '3')
+    job = job_module.Job(map_function_handle, reduce_function_handle, '1')
+
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    p = page.Page('http://www.foo.com/', story_set, story_set.base_dir)
+
+    f = failure.Failure(job, 'foo', '/a.json', 'MyFailure', 'failure', 'stack')
+    fv = translate_common_values.TranslateMreFailure(f, p)
+
+    self.assertIn('stack', str(fv))
diff --git a/catapult/telemetry/telemetry/value/unit-info.json b/catapult/telemetry/telemetry/value/unit-info.json
new file mode 100644
index 0000000..6a6c931
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/unit-info.json
@@ -0,0 +1,221 @@
+{
+  "_description" : "This file contains info about our performance test units, used by the perf dashboard (http://chromeperf.appspot.com).",
+
+  "%": {
+    "improvement_direction": "down",
+    "why": "Percent CPU usage. Used by cpu metric."
+  },
+  "bit/s": {
+    "improvement_direction": "down",
+    "why": "Bitrate."
+  },
+  "bytes/s": {
+    "improvement_direction": "down",
+    "why": "Bitrate."
+  },
+  "bytes": {
+    "improvement_direction": "down"
+  },
+  "chars/s": {
+    "improvement_direction": "up"
+  },
+  "Celsius": {
+    "improvement_direction": "down",
+    "why": "Colder machines are faster."
+  },
+  "commit_count": {
+    "improvement_direction": "up",
+    "why": "layer_tree_host_perftest"
+  },
+  "count": {
+    "improvement_direction": "down",
+    "why": "Processes"
+  },
+  "coverage%": {
+    "improvement_direction": "up",
+    "why": "Used in alloy-perf-test/cts%/passed."
+  },
+  "dB": {
+    "improvement_direction": "up",
+    "why": "Decibels peak signal-to-noise ratio. Used by WebRTC quality tests."
+  },
+  "files": {
+    "improvement_direction": "down",
+    "why": "Static initializers"
+  },
+  "fps": {
+    "improvement_direction": "up",
+    "why": "The faster the better. Used by scirra benchmark."
+  },
+  "frames": {
+    "improvement_direction": "down",
+    "why": "Dropped frames."
+  },
+  "frames-per-second": {
+    "improvement_direction": "up",
+    "why": "Synonym for fps."
+  },
+  "frame_count": {
+    "improvement_direction": "up",
+    "why": "layer_tree_host_perftest"
+  },
+  "frame_time": {
+    "improvement_direction": "down"
+  },
+  "garbage_collections": {
+    "improvement_direction": "down",
+    "why": "Number of GCs needed to collect an object. Less is better."
+  },
+  "Hz": {
+    "improvement_direction": "up",
+    "why": "Higher frequencies are faster."
+  },
+  "janks": {
+    "improvement_direction": "down",
+    "why": "Fewer janks is better."
+  },
+  "kb": {
+    "improvement_direction": "down",
+    "why": "Synonym for KB, used in memory and io metrics."
+  },
+  "available_kB": {
+    "improvement_direction": "up",
+    "why": "kB of memory available. More memory available is better."
+  },
+  "KB": {
+    "improvement_direction": "down",
+    "why": "KB of memory usage. Less memory usage is better. Used in endure."
+  },
+  "lines": {
+    "improvement_direction": "up",
+    "why": "Coverage. More test coverage is better."
+  },
+  "load": {
+    "improvement_direction": "down"
+  },
+  "MB": {
+    "improvement_direction": "down"
+  },
+  "mips": {
+    "improvement_direction": "up",
+    "why": "More instructions processed per time unit."
+  },
+  "mpixels_sec": {
+    "improvement_direction": "up",
+    "why": "More pixels processed per time unit."
+  },
+  "mtexel_sec": {
+    "improvement_direction": "up",
+    "why": "More texels processed per time unit."
+  },
+  "mtri_sec": {
+    "improvement_direction": "up",
+    "why": "More triangles processed per time unit."
+  },
+  "mvtx_sec": {
+    "improvement_direction": "up",
+    "why": "More vertices processed per time unit."
+  },
+  "ms": {
+    "improvement_direction": "down",
+    "why": "Used in many Telemetry measurements. Fewer ms of time means faster."
+  },
+  "ms/1000 elements": {
+    "improvement_direction": "down"
+  },
+  "milliseconds": {
+    "improvement_direction": "down",
+    "why": "Synonym for ms."
+  },
+  "milliseconds-per-frame": {
+    "improvement_direction": "down"
+  },
+  "minutes": {
+    "improvement_direction": "down",
+    "why": "Used for NaCl build time."
+  },
+  "mWh": {
+    "improvement_direction": "down",
+    "why": "Fewer milliwatt-hours means less energy consumed."
+  },
+  "objects (bigger is better)": {
+    "improvement_direction": "up",
+    "why": "Used in spaceport benchmark."
+  },
+  "ObjectsAt30FPS": {
+    "improvement_direction": "up"
+  },
+  "packets": {
+    "improvement_direction": "down",
+    "why": "Monitors how many packets we use to accomplish something."
+  },
+  "percent": {
+    "improvement_direction": "down",
+    "why": "Synonym for %, used in memory metric for percent fragmentation."
+  },
+  "points": {
+    "improvement_direction": "up",
+    "why": "Synonym for score, used in ChromeOS touchpad tests."
+  },
+  "ports": {
+    "improvement_direction": "down"
+  },
+  "reduction%": {
+    "improvement_direction": "up",
+    "why": "Used in draw_property measurement to indicate relative improvement."
+  },
+  "relocs": {
+    "improvement_direction": "down"
+  },
+  "runs/ms": {
+    "improvement_direction": "up",
+    "why": "Higher runs/ms implies faster execution."
+  },
+  "runs/s": {
+    "improvement_direction": "up",
+    "why": "Used in dromaeo. Higher runs/s implies faster execution."
+  },
+  "runs_per_s": {
+    "improvement_direction": "up",
+    "why": "Synonym for runs/s, used in dromaeo data sent by cros bots."
+  },
+  "runs_per_second": {
+    "improvement_direction": "up",
+    "why": "Synonym for runs/s."
+  },
+  "score": {
+    "improvement_direction": "up",
+    "why": "Used in a variety of benchmarks where a higher score is better."
+  },
+  "score_(bigger_is_better)": {
+    "improvement_direction": "up",
+    "why": "Synonym for score."
+  },
+  "score (bigger is better)": {
+    "improvement_direction": "up",
+    "why": "Synonym for score, used in jsgamebench and dom_perf."
+  },
+  "sec": {
+    "improvement_direction": "down"
+  },
+  "seconds": {
+    "improvement_direction": "down"
+  },
+  "tasks": {
+    "improvement_direction": "down"
+  },
+  "tokens/s": {
+    "improvement_direction": "up"
+  },
+  "us": {
+    "improvement_direction": "down"
+  },
+  "vsyncs": {
+    "improvement_direction": "down",
+    "why": "Used in smoothness benchmarks. Number of vsyncs to generate a frame, never < 1.0"
+  },
+  "idle%": {
+    "improvement_direction": "up",
+    "why": "Percentage of work done in idle time."
+  }
+}
diff --git a/catapult/telemetry/telemetry/value/value_unittest.py b/catapult/telemetry/telemetry/value/value_unittest.py
new file mode 100644
index 0000000..9cf9a4c
--- /dev/null
+++ b/catapult/telemetry/telemetry/value/value_unittest.py
@@ -0,0 +1,324 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from telemetry import story
+from telemetry import page as page_module
+from telemetry import value
+
+
+class TestBase(unittest.TestCase):
+  def setUp(self):
+    story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    story_set.AddStory(
+        page_module.Page("http://www.bar.com/", story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page("http://www.baz.com/", story_set, story_set.base_dir))
+    story_set.AddStory(
+        page_module.Page("http://www.foo.com/", story_set, story_set.base_dir))
+    self.story_set = story_set
+
+  @property
+  def pages(self):
+    return self.story_set.stories
+
+class ValueForTest(value.Value):
+  @classmethod
+  def MergeLikeValuesFromSamePage(cls, values):
+    pass
+
+  @classmethod
+  def MergeLikeValuesFromDifferentPages(cls, values):
+    pass
+
+  def GetBuildbotDataType(self, output_context):
+    pass
+
+  def GetBuildbotValue(self):
+    pass
+
+  def GetChartAndTraceNameForComputedSummaryResult(
+      self, trace_tag):
+    pass
+
+  def GetRepresentativeNumber(self):
+    pass
+
+  def GetRepresentativeString(self):
+    pass
+
+  @staticmethod
+  def GetJSONTypeName():
+    pass
+
+class ValueForAsDictTest(ValueForTest):
+  @staticmethod
+  def GetJSONTypeName():
+    return 'baz'
+
+class ValueForFromDictTest(ValueForTest):
+  @staticmethod
+  def FromDict(value_dict, page_dict):
+    kwargs = value.Value.GetConstructorKwArgs(value_dict, page_dict)
+    return ValueForFromDictTest(**kwargs)
+
+  @staticmethod
+  def GetJSONTypeName():
+    return 'value_for_from_dict_test'
+
+class ValueTest(TestBase):
+  def testCompat(self):
+    page0 = self.pages[0]
+    page1 = self.pages[0]
+
+    a = value.Value(page0, 'x', 'unit', important=False, description=None,
+                    tir_label='foo', grouping_keys=None)
+    b = value.Value(page1, 'x', 'unit', important=False, description=None,
+                    tir_label='foo', grouping_keys=None)
+    self.assertTrue(b.IsMergableWith(a))
+
+    a = value.Value(page0, 'x', 'unit', important=False, description=None,
+                    tir_label='foo', grouping_keys=None)
+    b = value.Value(page0, 'x', 'unit', important=False, description=None,
+                     tir_label='bar', grouping_keys=None)
+    self.assertTrue(b.IsMergableWith(a))
+
+  def testIncompat(self):
+    page0 = self.pages[0]
+
+    a = value.Value(page0, 'x', 'unit', important=False, description=None,
+                    tir_label=None, grouping_keys=None)
+    b = value.Value(page0, 'x', 'incompatUnit', important=False,
+                    tir_label=None, description=None, grouping_keys=None)
+    self.assertFalse(b.IsMergableWith(a))
+
+    a = value.Value(page0, 'x', 'unit', important=False, description=None,
+                    tir_label=None, grouping_keys=None)
+    b = value.Value(page0, 'x', 'unit', important=True, description=None,
+                    tir_label=None, grouping_keys=None)
+    self.assertFalse(b.IsMergableWith(a))
+
+    a = value.Value(page0, 'x', 'unit', important=False, description=None,
+                    tir_label=None, grouping_keys=None)
+    b = ValueForTest(page0, 'x', 'unit', important=True, description=None,
+                     tir_label=None, grouping_keys=None)
+    self.assertFalse(b.IsMergableWith(a))
+
+  def testNameMustBeString(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 42, 'unit', important=False, description=None,
+                  tir_label=None, grouping_keys=None)
+
+  def testUnitsMustBeString(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 'x', 42, important=False, description=None,
+                  tir_label=None, grouping_keys=None)
+
+  def testImportantMustBeBool(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 'x', 'unit', important='foo', description=None,
+                  tir_label=None, grouping_keys=None)
+
+  def testDescriptionMustBeStringOrNone(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 'x', 'unit', important=False, description=42,
+                  tir_label=None, grouping_keys=None)
+
+  def testInteractionRecordMustBeStringOrNone(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 'x', 'unit', important=False, description=None,
+                  tir_label=42, grouping_keys=None)
+
+  def testGroupingKeysMustBeDictOrNone(self):
+    with self.assertRaises(ValueError):
+      value.Value(None, 'x', 'unit', important=False, description=None,
+                  tir_label=42, grouping_keys='foo')
+
+  def testAsDictBaseKeys(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=True, description=None,
+                           tir_label='bar', grouping_keys={'foo': 'baz'})
+    d = v.AsDict()
+
+    self.assertEquals(d, {
+          'name': 'x',
+          'type': 'baz',
+          'units': 'unit',
+          'important': True,
+          'tir_label': 'bar',
+          'grouping_keys': {'foo': 'baz'}
+        })
+
+  def testAsDictWithPage(self):
+    page0 = self.pages[0]
+
+    v = ValueForAsDictTest(page0, 'x', 'unit', important=False,
+                           description=None, tir_label=None, grouping_keys=None)
+    d = v.AsDict()
+
+    self.assertIn('page_id', d)
+
+  def testAsDictWithoutPage(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
+                           tir_label=None, grouping_keys=None)
+    d = v.AsDict()
+
+    self.assertNotIn('page_id', d)
+
+  def testAsDictWithDescription(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=False,
+                           description='Some description.',
+                           tir_label=None, grouping_keys=None)
+    d = v.AsDict()
+    self.assertEqual('Some description.', d['description'])
+
+  def testAsDictWithoutDescription(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
+                           tir_label=None, grouping_keys=None)
+    self.assertNotIn('description', v.AsDict())
+
+  def testAsDictWithInteractionRecord(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=False,
+                           description='Some description.',
+                           tir_label='foo', grouping_keys=None)
+    d = v.AsDict()
+    self.assertEqual('foo', d['tir_label'])
+
+  def testAsDictWithoutInteractionRecord(self):
+    v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
+                           tir_label=None, grouping_keys=None)
+    self.assertNotIn('tir_label', v.AsDict())
+
+  def testFromDictBaseKeys(self):
+    d = {
+      'type': 'value_for_from_dict_test',
+      'name': 'x',
+      'units': 'unit'
+    }
+
+    v = value.Value.FromDict(d, None)
+    self.assertEquals(v.name, 'x')
+    self.assertTrue(isinstance(v, ValueForFromDictTest))
+    self.assertEquals(v.units, 'unit')
+
+  def testFromDictWithPage(self):
+    page0 = self.pages[0]
+    page_dict = {page0.id: page0}
+
+    d = {
+      'type': 'value_for_from_dict_test',
+      'name': 'x',
+      'units': 'unit',
+      'page_id': page0.id
+    }
+
+    v = value.Value.FromDict(d, page_dict)
+
+    self.assertEquals(v.page.id, page0.id)
+
+  def testFromDictWithPageId0(self):
+    page_dict = {0: 'foo'}
+
+    d = {
+      'type': 'value_for_from_dict_test',
+      'name': 'x',
+      'units': 'unit',
+      'page_id': 0
+    }
+
+    v = value.Value.FromDict(d, page_dict)
+
+    self.assertEquals(v.page, 'foo')
+
+  def testFromDictWithoutPage(self):
+    d = {
+      'type': 'value_for_from_dict_test',
+      'name': 'x',
+      'units': 'unit'
+    }
+
+    v = value.Value.FromDict(d, {})
+
+    self.assertEquals(v.page, None)
+
+  def testFromDictWithDescription(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit',
+          'description': 'foo'
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.description, 'foo')
+
+  def testFromDictWithoutDescription(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit'
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.description, None)
+
+  def testFromDictWithInteractionRecord(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit',
+          'description': 'foo',
+          'tir_label': 'bar'
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.tir_label, 'bar')
+
+  def testFromDictWithoutInteractionRecord(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit'
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.tir_label, None)
+
+  def testFromDictWithGroupingKeys(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit',
+          'description': 'foo',
+          'tir_label': 'bar',
+          'grouping_keys': {'foo': 'bar'}
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.grouping_keys, {'foo': 'bar'})
+
+  def testFromDictWithoutGroupingKeys(self):
+    d = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit'
+        }
+
+    v = value.Value.FromDict(d, {})
+    self.assertEquals(v.grouping_keys, {})
+
+  def testListOfValuesFromListOfDicts(self):
+    d0 = {
+          'type': 'value_for_from_dict_test',
+          'name': 'x',
+          'units': 'unit'
+        }
+    d1 = {
+          'type': 'value_for_from_dict_test',
+          'name': 'y',
+          'units': 'unit'
+        }
+    vs = value.Value.ListOfValuesFromListOfDicts([d0, d1], {})
+    self.assertEquals(vs[0].name, 'x')
+    self.assertEquals(vs[1].name, 'y')
diff --git a/catapult/telemetry/telemetry/web_perf/__init__.py b/catapult/telemetry/telemetry/web_perf/__init__.py
new file mode 100644
index 0000000..648af8e
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/__init__.py
@@ -0,0 +1,7 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+The web_perf module provides utilities and measurements for benchmarking web
+app's performance.
+"""
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/__init__.py b/catapult/telemetry/telemetry/web_perf/metrics/__init__.py
new file mode 100644
index 0000000..89406a1
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/__init__.py
@@ -0,0 +1,6 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+The web_perf.metrics module provides metrics for analyzing web performance.
+"""
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline.py
new file mode 100644
index 0000000..db81f69
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline.py
@@ -0,0 +1,117 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+WRITE_EVENT_NAME = 'Registry::RegisterBlob'
+READ_EVENT_NAME = 'BlobRequest'
+
+
+class BlobTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """BlobTimelineMetric reports timing information about blob storage.
+
+  The following metrics are added to the results:
+    * blob write times (blob_writes)
+    * blob read times (blob_reads)
+  """
+
+  def __init__(self):
+    super(BlobTimelineMetric, self).__init__()
+
+  @staticmethod
+  def IsWriteEvent(event):
+    return event.name == WRITE_EVENT_NAME
+
+  @staticmethod
+  def IsReadEvent(event):
+    return event.name == READ_EVENT_NAME
+
+  @staticmethod
+  def IsEventInInteraction(event, interaction):
+    return interaction.start <= event.start <= interaction.end
+
+  @staticmethod
+  def ThreadDurationIfPresent(event):
+    if event.thread_duration:
+      return event.thread_duration
+    else:
+      return event.duration
+
+  def AddResults(self, model, renderer_thread, interactions, results):
+    assert interactions
+
+    write_events = []
+    read_events = []
+    for event in model.IterAllEvents(
+        event_predicate=lambda e: self.IsWriteEvent(e) or self.IsReadEvent(e)):
+      if self.IsReadEvent(event):
+        read_events.append(event)
+      else:
+        write_events.append(event)
+
+    # Only these private methods are tested for mocking simplicity.
+    self._AddWriteResultsInternal(write_events, interactions, results)
+    self._AddReadResultsInternal(read_events, interactions, results)
+
+  def _AddWriteResultsInternal(self, events, interactions, results):
+    writes = []
+    for event in events:
+      if (self.IsWriteEvent(event) and
+          any(self.IsEventInInteraction(event, interaction)
+              for interaction in interactions)):
+        writes.append(self.ThreadDurationIfPresent(event))
+    if writes:
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+          page=results.current_page,
+          tir_label=interactions[0].label,
+          name='blob-writes',
+          units='ms',
+          values=writes,
+          description='List of durations of blob writes.',
+          improvement_direction=improvement_direction.DOWN))
+    else:
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+          page=results.current_page,
+          tir_label=interactions[0].label,
+          name='blob-writes',
+          units='ms',
+          values=None,
+          none_value_reason='No blob write events found for this interaction.',
+          improvement_direction=improvement_direction.DOWN))
+
+  def _AddReadResultsInternal(self, events, interactions, results):
+    reads = dict()
+    for event in events:
+      if (not self.IsReadEvent(event) or
+          not any(self.IsEventInInteraction(event, interaction)
+                 for interaction in interactions)):
+        continue
+      # Every blob has unique UUID.  To get the total time for reading
+      # a blob, we add up the time of all events with the same blob UUID.
+      uuid = event.args['uuid']
+      if uuid not in reads:
+        reads[uuid] = 0
+      reads[uuid] += self.ThreadDurationIfPresent(event)
+
+    if reads:
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+          page=results.current_page,
+          tir_label=interactions[0].label,
+          name='blob-reads',
+          units='ms',
+          values=reads.values(),
+          description='List of read times for blobs.',
+          improvement_direction=improvement_direction.DOWN))
+    else:
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+          page=results.current_page,
+          tir_label=interactions[0].label,
+          name='blob-reads',
+          units='ms',
+          values=None,
+          none_value_reason='No blob read events found for this interaction.',
+          improvement_direction=improvement_direction.DOWN))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline_unittest.py
new file mode 100644
index 0000000..7f2efb7
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/blob_timeline_unittest.py
@@ -0,0 +1,124 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from collections import namedtuple
+from telemetry.internal.results import page_test_results
+from telemetry.page import page
+from telemetry.web_perf.metrics import blob_timeline
+from telemetry.web_perf import timeline_interaction_record
+
+
+FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args')
+Interaction = timeline_interaction_record.TimelineInteractionRecord
+TEST_INTERACTION_LABEL = 'Action_TestInteraction'
+WRITE_EVENT_NAME = 'Registry::RegisterBlob'
+READ_EVENT_NAME = 'BlobRequest'
+
+
+def GetBlobMetrics(events, interactions):
+  results = page_test_results.PageTestResults()
+  test_page = page.Page('file://blank.html')
+  results.WillRunPage(test_page)
+  blob_timeline.BlobTimelineMetric()._AddWriteResultsInternal(
+      events, interactions, results)  # pylint:disable=protected-access
+  blob_timeline.BlobTimelineMetric()._AddReadResultsInternal(
+      events, interactions, results)  # pylint:disable=protected-access
+  return_dict = dict((value.name, value.values) for value in
+                     results.current_page_run.values)
+  results.DidRunPage(test_page)
+  return return_dict
+
+def FakeWriteEvent(start, end, thread_duration=None):
+  if not thread_duration:
+    thread_duration = end - start
+  return FakeEvent(blob_timeline.WRITE_EVENT_NAME,
+                   start, end, thread_duration, {'uuid':'fakeuuid'})
+
+def FakeReadEvent(start, end, uuid, thread_duration=None):
+  if not thread_duration:
+    thread_duration = end - start
+  return FakeEvent(blob_timeline.READ_EVENT_NAME,
+                   start, end, thread_duration, {'uuid': uuid})
+
+def TestInteraction(start, end):
+  return Interaction(TEST_INTERACTION_LABEL, start, end)
+
+
+class BlobTimelineMetricUnitTest(unittest.TestCase):
+  def testWriteMetric(self):
+    events = [FakeWriteEvent(0, 1),
+              FakeWriteEvent(9, 11),
+              FakeWriteEvent(10, 13),
+              FakeWriteEvent(20, 24),
+              FakeWriteEvent(21, 26),
+              FakeWriteEvent(29, 35),
+              FakeWriteEvent(30, 37),
+              FakeWriteEvent(40, 48),
+              FakeWriteEvent(41, 50),
+              FakeEvent('something', 10, 13, 3, {}),
+              FakeEvent('FrameView::something', 20, 24, 4, {}),
+              FakeEvent('SomeThing::performLayout', 30, 37, 7, {}),
+              FakeEvent('something else', 40, 48, 8, {})]
+    interactions = [TestInteraction(10, 20),
+                    TestInteraction(30, 40)]
+
+    # The first event starts before the first interaction, so it is ignored.
+    # The second event starts before the first interaction, so it is ignored.
+    # The third event starts during the first interaction, and its duration is
+    # 13 - 10 = 3.
+    # The fourth event starts during the first interaction, and its duration is
+    # 24 - 20 = 4.
+    # The fifth event starts between the two interactions, so it is ignored.
+    # The sixth event starts between the two interactions, so it is ignored.
+    # The seventh event starts during the second interaction, and its duration
+    # is 37 - 30 = 7.
+    # The eighth event starts during the second interaction and its duration is
+    # 48 - 40 = 8.
+    # The ninth event starts after the last interaction, so it is ignored.
+    # The rest of the events are not layout events, so they are ignored.
+    self.assertEqual({'blob-reads': None, 'blob-writes': [3, 4, 7, 8]},
+        GetBlobMetrics(events, interactions))
+
+  def testReadMetric(self):
+    events = [FakeReadEvent(0, 1, 'a'),
+              FakeReadEvent(9, 11, 'a'),
+              FakeReadEvent(10, 13, 'b', 1), # counts
+              FakeReadEvent(15, 18, 'b'),    # counts
+              FakeReadEvent(21, 26, 'b'),
+              FakeReadEvent(29, 35, 'c'),
+              FakeReadEvent(31, 32, 'e'),    # counts
+              FakeReadEvent(34, 36, 'e', 1), # counts
+              FakeReadEvent(32, 37, 'd'),    # counts
+              FakeEvent('something', 10, 13, 3, {}),
+              FakeEvent('something else', 40, 48, 8, {})]
+    interactions = [TestInteraction(10, 20),
+                    TestInteraction(30, 40)]
+
+    # We ignore events outside of the interaction intervals, and we use the
+    # beginning of the first event of the interval and the end of the last
+    # event.
+    # 18 - 10 = 8
+    # 37 - 32 = 5
+    self.assertEqual({'blob-reads': [4, 2, 5], 'blob-writes': None},
+        GetBlobMetrics(events, interactions))
+
+  def testReadAndWriteMetrics(self):
+    events = [FakeReadEvent(0, 1, 'a'),
+              FakeReadEvent(9, 11, 'a'),
+              FakeReadEvent(10, 13, 'b'),     # counts
+              FakeWriteEvent(15, 18),         # counts
+              FakeReadEvent(21, 26, 'c'),
+              FakeReadEvent(29, 35, 'd'),
+              FakeWriteEvent(31, 34, 1), # counts
+              FakeReadEvent(32, 33, 'e'),     # counts
+              FakeReadEvent(34, 35, 'e'),     # counts
+              FakeEvent('something', 31, 33, 2, {})]
+    interactions = [TestInteraction(10, 20),
+                    TestInteraction(30, 35)]
+
+    # We use the read events in the interactions, so the same as the test above.
+    self.assertEqual({'blob-reads': [3, 2], 'blob-writes': [3, 1]},
+      GetBlobMetrics(events, interactions))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline.py
new file mode 100644
index 0000000..ff362be
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline.py
@@ -0,0 +1,240 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import collections
+import math
+import sys
+
+from telemetry.timeline import model as model_module
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import timeline_based_metric
+
+TOPLEVEL_GL_CATEGORY = 'gpu_toplevel'
+TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service'
+TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device'
+
+SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer')
+DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer')
+
+TRACKED_GL_CONTEXT_NAME = {'RenderCompositor': 'render_compositor',
+                           'BrowserCompositor': 'browser_compositor',
+                           'Compositor': 'browser_compositor'}
+
+
+def _CalculateFrameTimes(events_per_frame, event_data_func):
+  """Given a list of events per frame and a function to extract event time data,
+     returns a list of frame times."""
+  times_per_frame = []
+  for event_list in events_per_frame:
+    event_times = [event_data_func(event) for event in event_list]
+    times_per_frame.append(sum(event_times))
+  return times_per_frame
+
+
+def _CPUFrameTimes(events_per_frame):
+  """Given a list of events per frame, returns a list of CPU frame times."""
+  # CPU event frames are calculated using the event thread duration.
+  # Some platforms do not support thread_duration, convert those to 0.
+  return _CalculateFrameTimes(events_per_frame,
+                              lambda event: event.thread_duration or 0)
+
+
+def _GPUFrameTimes(events_per_frame):
+  """Given a list of events per frame, returns a list of GPU frame times."""
+  # GPU event frames are asynchronous slices which use the event duration.
+  return _CalculateFrameTimes(events_per_frame,
+                              lambda event: event.duration)
+
+
+def TimelineName(name, source_type, value_type):
+  """Constructs the standard name given in the timeline.
+
+  Args:
+    name: The name of the timeline, for example "total", or "render_compositor".
+    source_type: One of "cpu", "gpu" or None. None is only used for total times.
+    value_type: the type of value. For example "mean", "stddev"...etc.
+  """
+  if source_type:
+    return '%s_%s_%s_time' % (name, value_type, source_type)
+  else:
+    return '%s_%s_time' % (name, value_type)
+
+
+class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """Computes GPU based metrics."""
+
+  def __init__(self):
+    super(GPUTimelineMetric, self).__init__()
+
+  def AddResults(self, model, _, interaction_records, results):
+    self.VerifyNonOverlappedRecords(interaction_records)
+    service_times = self._CalculateGPUTimelineData(model)
+    for value_item, durations in service_times.iteritems():
+      count = len(durations)
+      avg = 0.0
+      stddev = 0.0
+      maximum = 0.0
+      if count:
+        avg = sum(durations) / count
+        stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count)
+        maximum = max(durations)
+
+      name, src = value_item
+
+      if src:
+        frame_times_name = '%s_%s_frame_times' % (name, src)
+      else:
+        frame_times_name = '%s_frame_times' % (name)
+
+      if durations:
+        results.AddValue(list_of_scalar_values.ListOfScalarValues(
+            results.current_page, frame_times_name, 'ms', durations,
+            tir_label=interaction_records[0].label,
+            improvement_direction=improvement_direction.DOWN))
+
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, TimelineName(name, src, 'max'), 'ms', maximum,
+          tir_label=interaction_records[0].label,
+          improvement_direction=improvement_direction.DOWN))
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, TimelineName(name, src, 'mean'), 'ms', avg,
+          tir_label=interaction_records[0].label,
+          improvement_direction=improvement_direction.DOWN))
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, TimelineName(name, src, 'stddev'), 'ms', stddev,
+          tir_label=interaction_records[0].label,
+          improvement_direction=improvement_direction.DOWN))
+
+  def _CalculateGPUTimelineData(self, model):
+    """Uses the model and calculates the times for various values for each
+       frame. The return value will be a dictionary of the following format:
+         {
+           (EVENT_NAME1, SRC1_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
+           (EVENT_NAME2, SRC2_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
+         }
+
+       Events:
+         swap - The time in milliseconds between each swap marker.
+         total - The amount of time spent in the renderer thread.
+         TRACKED_NAMES: Using the TRACKED_GL_CONTEXT_NAME dict, we
+                        include the traces per frame for the
+                        tracked name.
+       Source Types:
+         None - This will only be valid for the "swap" event.
+         cpu - For an event, the "cpu" source type signifies time spent on the
+               gpu thread using the CPU. This uses the "gpu.service" markers.
+         gpu - For an event, the "gpu" source type signifies time spent on the
+               gpu thread using the GPU. This uses the "gpu.device" markers.
+    """
+    all_service_events = []
+    current_service_frame_end = sys.maxint
+    current_service_events = []
+
+    all_device_events = []
+    current_device_frame_end = sys.maxint
+    current_device_events = []
+
+    tracked_events = {}
+    tracked_events.update(
+        dict([((value, 'cpu'), [])
+              for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
+    tracked_events.update(
+        dict([((value, 'gpu'), [])
+              for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
+
+    # These will track traces within the current frame.
+    current_tracked_service_events = collections.defaultdict(list)
+    current_tracked_device_events = collections.defaultdict(list)
+
+    event_iter = model.IterAllEvents(
+        event_type_predicate=model_module.IsSliceOrAsyncSlice)
+    for event in event_iter:
+      # Look for frame end markers
+      if (event.category, event.name) == SERVICE_FRAME_END_MARKER:
+        current_service_frame_end = event.end
+      elif (event.category, event.name) == DEVICE_FRAME_END_MARKER:
+        current_device_frame_end = event.end
+
+      # Track all other toplevel gl category markers
+      elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY:
+        base_name = event.name
+        dash_index = base_name.rfind('-')
+        if dash_index != -1:
+          base_name = base_name[:dash_index]
+        tracked_name = TRACKED_GL_CONTEXT_NAME.get(base_name, None)
+
+        if event.category == TOPLEVEL_SERVICE_CATEGORY:
+          # Check if frame has ended.
+          if event.start >= current_service_frame_end:
+            if current_service_events:
+              all_service_events.append(current_service_events)
+              for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+                tracked_events[(value, 'cpu')].append(
+                    current_tracked_service_events[value])
+            current_service_events = []
+            current_service_frame_end = sys.maxint
+            current_tracked_service_events.clear()
+
+          current_service_events.append(event)
+          if tracked_name:
+            current_tracked_service_events[tracked_name].append(event)
+
+        elif event.category == TOPLEVEL_DEVICE_CATEGORY:
+          # Check if frame has ended.
+          if event.start >= current_device_frame_end:
+            if current_device_events:
+              all_device_events.append(current_device_events)
+              for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+                tracked_events[(value, 'gpu')].append(
+                    current_tracked_device_events[value])
+            current_device_events = []
+            current_device_frame_end = sys.maxint
+            current_tracked_device_events.clear()
+
+          current_device_events.append(event)
+          if tracked_name:
+            current_tracked_device_events[tracked_name].append(event)
+
+    # Append Data for Last Frame.
+    if current_service_events:
+      all_service_events.append(current_service_events)
+      for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+        tracked_events[(value, 'cpu')].append(
+            current_tracked_service_events[value])
+    if current_device_events:
+      all_device_events.append(current_device_events)
+      for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+        tracked_events[(value, 'gpu')].append(
+            current_tracked_device_events[value])
+
+    # Calculate Mean Frame Time for the CPU side.
+    frame_times = []
+    if all_service_events:
+      prev_frame_end = all_service_events[0][0].start
+      for event_list in all_service_events:
+        last_service_event_in_frame = event_list[-1]
+        frame_times.append(last_service_event_in_frame.end - prev_frame_end)
+        prev_frame_end = last_service_event_in_frame.end
+
+    # Create the timeline data dictionary for service side traces.
+    total_frame_value = ('swap', None)
+    cpu_frame_value = ('total', 'cpu')
+    gpu_frame_value = ('total', 'gpu')
+    timeline_data = {}
+    timeline_data[total_frame_value] = frame_times
+    timeline_data[cpu_frame_value] = _CPUFrameTimes(all_service_events)
+    for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+      cpu_value = (value, 'cpu')
+      timeline_data[cpu_value] = _CPUFrameTimes(tracked_events[cpu_value])
+
+    # Add in GPU side traces if it was supported (IE. device traces exist).
+    if all_device_events:
+      timeline_data[gpu_frame_value] = _GPUFrameTimes(all_device_events)
+      for value in TRACKED_GL_CONTEXT_NAME.itervalues():
+        gpu_value = (value, 'gpu')
+        tracked_gpu_event = tracked_events[gpu_value]
+        timeline_data[gpu_value] = _GPUFrameTimes(tracked_gpu_event)
+
+    return timeline_data
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py
new file mode 100644
index 0000000..2af5b10
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py
@@ -0,0 +1,313 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.testing import test_page_test_results
+from telemetry.timeline import async_slice as async_slice_module
+from telemetry.timeline import model as model_module
+from telemetry.timeline import slice as slice_module
+from telemetry.web_perf.metrics import gpu_timeline
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+SERVICE_FRAME_END_CATEGORY, SERVICE_FRAME_END_NAME = \
+    gpu_timeline.SERVICE_FRAME_END_MARKER
+
+DEVICE_FRAME_END_CATEGORY, DEVICE_FRAME_END_NAME = \
+    gpu_timeline.DEVICE_FRAME_END_MARKER
+
+INTERACTION_RECORDS = [tir_module.TimelineInteractionRecord("test-record",
+                                                            0,
+                                                            float('inf'))]
+
+
+def _CreateGPUSlices(parent_thread, name, start_time, duration, offset=0):
+  args = {'gl_category': gpu_timeline.TOPLEVEL_GL_CATEGORY}
+  return (slice_module.Slice(parent_thread,
+                             gpu_timeline.TOPLEVEL_SERVICE_CATEGORY,
+                             name, start_time,
+                             args=args,
+                             duration=duration,
+                             thread_duration=duration),
+          async_slice_module.AsyncSlice(gpu_timeline.TOPLEVEL_DEVICE_CATEGORY,
+                             name, start_time + offset,
+                             args=args,
+                             duration=duration))
+
+def _CreateFrameEndSlices(parent_thread, start_time, duration, offset=0):
+  args = {'gl_category': gpu_timeline.TOPLEVEL_GL_CATEGORY}
+  return (slice_module.Slice(parent_thread,
+                             SERVICE_FRAME_END_CATEGORY,
+                             SERVICE_FRAME_END_NAME,
+                             start_time,
+                             args=args,
+                             duration=duration,
+                             thread_duration=duration),
+          async_slice_module.AsyncSlice(DEVICE_FRAME_END_CATEGORY,
+                             DEVICE_FRAME_END_NAME,
+                             start_time + offset,
+                             args=args,
+                             duration=duration))
+
+
+def _AddSliceToThread(parent_thread, slice_item):
+  if isinstance(slice_item, slice_module.Slice):
+    parent_thread.PushSlice(slice_item)
+  elif isinstance(slice_item, async_slice_module.AsyncSlice):
+    parent_thread.AddAsyncSlice(slice_item)
+  else:
+    assert False, "Invalid Slice Item Type: %s" % type(slice_item)
+
+
+class GPUTimelineTest(unittest.TestCase):
+  def GetResults(self, metric, model, renderer_thread, interaction_records):
+    results = test_page_test_results.TestPageTestResults(self)
+    metric.AddResults(model, renderer_thread, interaction_records, results)
+    return results
+
+  def testExpectedResults(self):
+    """Test a simply trace will output all expected results."""
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
+      _AddSliceToThread(test_thread, slice_item)
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for name, src_type in (('swap', None), ('total', 'cpu'), ('total', 'gpu')):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'max'), 'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'mean'), 'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'stddev'), 'ms', 0)
+
+    for tracked_name in gpu_timeline.TRACKED_GL_CONTEXT_NAME.values():
+      for source_type in ('cpu', 'gpu'):
+        results.AssertHasPageSpecificScalarValue(
+            gpu_timeline.TimelineName(tracked_name, source_type, 'max'),
+                                      'ms', 0)
+        results.AssertHasPageSpecificScalarValue(
+            gpu_timeline.TimelineName(tracked_name, source_type, 'mean'),
+                                      'ms', 0)
+        results.AssertHasPageSpecificScalarValue(
+            gpu_timeline.TimelineName(tracked_name, source_type, 'stddev'),
+                                      'ms', 0)
+
+  def testNoDeviceTraceResults(self):
+    """Test expected results when missing device traces."""
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    service_slice, _ = _CreateGPUSlices(test_thread, 'test_item', 100, 10)
+    _AddSliceToThread(test_thread, service_slice)
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for name, source_type in (('swap', None), ('total', 'cpu')):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'max'), 'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'mean'), 'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'stddev'), 'ms', 0)
+
+    self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                      gpu_timeline.TimelineName('total', 'gpu', 'max'))
+    self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                      gpu_timeline.TimelineName('total', 'gpu', 'mean'))
+    self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                      gpu_timeline.TimelineName('total', 'gpu', 'stddev'))
+
+    for name in gpu_timeline.TRACKED_GL_CONTEXT_NAME.values():
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, 'cpu', 'max'), 'ms', 0)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, 'cpu', 'mean'), 'ms', 0)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, 'cpu', 'stddev'), 'ms', 0)
+
+      self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                        gpu_timeline.TimelineName(name, 'gpu', 'max'))
+      self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                        gpu_timeline.TimelineName(name, 'gpu', 'mean'))
+      self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
+                        gpu_timeline.TimelineName(name, 'gpu', 'stddev'))
+
+  def testFrameSeparation(self):
+    """Test frames are correctly calculated using the frame end marker."""
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+
+    # First frame is 10 seconds.
+    for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
+      _AddSliceToThread(test_thread, slice_item)
+
+    # Mark frame end.
+    for slice_item in _CreateFrameEndSlices(test_thread, 105, 5):
+      _AddSliceToThread(test_thread, slice_item)
+
+    # Second frame is 20 seconds.
+    for slice_item in _CreateGPUSlices(test_thread, 'test_item', 110, 20):
+      _AddSliceToThread(test_thread, slice_item)
+
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for name, source_type in (('swap', None),
+                              ('total', 'cpu'),
+                              ('total', 'gpu')):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'max'), 'ms', 20)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'mean'), 'ms', 15)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, source_type, 'stddev'), 'ms', 5)
+
+  def testFrameSeparationBeforeMarker(self):
+    """Test frames are correctly calculated using the frame end marker."""
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+
+    # Mark frame end.
+    for slice_item in _CreateFrameEndSlices(test_thread, 105, 5):
+      _AddSliceToThread(test_thread, slice_item)
+
+    # First frame is 10 seconds.
+    for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
+      _AddSliceToThread(test_thread, slice_item)
+
+    # Second frame is 20 seconds.
+    for slice_item in _CreateGPUSlices(test_thread, 'test_item', 110, 20):
+      _AddSliceToThread(test_thread, slice_item)
+
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for name, src_type in (('swap', None), ('total', 'cpu'), ('total', 'gpu')):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'max'), 'ms', 20)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'mean'), 'ms', 15)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(name, src_type, 'stddev'), 'ms', 5)
+
+  def testTrackedNameTraces(self):
+    """Be sure tracked names are being recorded correctly."""
+    self.assertGreater(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 0)
+
+    marker, result = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems().next()
+
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    for slice_item in _CreateGPUSlices(test_thread, marker, 100, 10):
+      _AddSliceToThread(test_thread, slice_item)
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for source_type in ('cpu', 'gpu'):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'max'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'mean'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'stddev'),
+          'ms', 0)
+
+  def testTrackedNameWithContextIDTraces(self):
+    """Be sure tracked names with context IDs are recorded correctly."""
+    self.assertGreater(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 0)
+
+    marker, result = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems().next()
+    context_id = '-0x1234'
+
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    for slice_item in _CreateGPUSlices(test_thread, marker + context_id,
+                                       100, 10):
+      _AddSliceToThread(test_thread, slice_item)
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for source_type in ('cpu', 'gpu'):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'max'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'mean'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result, source_type, 'stddev'),
+          'ms', 0)
+
+  def testOutOfOrderDeviceTraces(self):
+    """Out of order device traces are still matched up to correct services."""
+    self.assertGreaterEqual(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 2)
+
+    tracked_names_iter = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems()
+    marker1_name, result1_name = tracked_names_iter.next()
+    result2_name = result1_name
+    while result2_name == result1_name:
+      marker2_name, result2_name = tracked_names_iter.next()
+
+    model = model_module.TimelineModel()
+    test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+
+    # marker1 lasts for 10 seconds.
+    service_item1, device_item1 = _CreateGPUSlices(test_thread, marker1_name,
+                                                   100, 10)
+    # marker2 lasts for 20 seconds.
+    service_item2, device_item2 = _CreateGPUSlices(test_thread, marker2_name,
+                                                   200, 20)
+
+    # Append out of order
+    _AddSliceToThread(test_thread, service_item1)
+    _AddSliceToThread(test_thread, service_item2)
+    _AddSliceToThread(test_thread, device_item2)
+    _AddSliceToThread(test_thread, device_item1)
+
+    model.FinalizeImport()
+
+    metric = gpu_timeline.GPUTimelineMetric()
+    results = self.GetResults(metric, model=model, renderer_thread=test_thread,
+                              interaction_records=INTERACTION_RECORDS)
+
+    for source_type in ('cpu', 'gpu'):
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result1_name, source_type, 'max'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result1_name, source_type, 'mean'),
+          'ms', 10)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result1_name, source_type, 'stddev'),
+          'ms', 0)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result2_name, source_type, 'max'),
+          'ms', 20)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result2_name, source_type, 'mean'),
+          'ms', 20)
+      results.AssertHasPageSpecificScalarValue(
+          gpu_timeline.TimelineName(result2_name, source_type, 'stddev'),
+          'ms', 0)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py
new file mode 100644
index 0000000..1dc8382
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py
@@ -0,0 +1,80 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from telemetry.web_perf.metrics import timeline_based_metric
+from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats
+from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput
+
+
+class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """Metrics for IndexedDB operations.
+  """
+
+  def __init__(self):
+    super(IndexedDBTimelineMetric, self).__init__()
+    self._stats = TraceEventStats()
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBDatabase::GetOperation',
+      metric_name='idb-gets',
+      metric_description='The duration of all "get" ops in IndexedDB',
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBDatabase::PutOperation',
+      metric_name='idb-puts',
+      metric_description='The duration of all "put" ops in IndexedDB',
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBFactoryImpl::Open',
+      metric_name='idb-opens',
+      metric_description='The duration of all "open" ops in IndexedDB',
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBTransaction::Commit',
+      metric_name='idb-transaction-commits',
+      metric_description=('The duration of all "commit" ops of ' +
+                               'transactions in IndexedDB.'),
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBFactoryImpl::DeleteDatabase',
+      metric_name='idb-database-deletes',
+      metric_description=('The duration of all "delete" ops of ' +
+                               'IndexedDB databases.'),
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBDatabase::OpenCursorOperation',
+      metric_name='idb-cursor-opens',
+      metric_description=('The duration of all "open" ops of ' +
+                               'IndexedDB cursors.'),
+      units='ms',
+      process_name='Browser'))
+
+    self._stats.AddInput(TraceEventStatsInput(
+      event_category='IndexedDB',
+      event_name='IndexedDBCursor::CursorIterationOperation',
+      metric_name='idb-cursor-iterations',
+      metric_description=('The duration of all "iteration" ops of ' +
+                               'IndexedDB cursors.'),
+      units='ms',
+      process_name='Browser'))
+
+  def AddResults(self, model, renderer_process, interactions, results):
+    self._stats.AddResults(model, renderer_process, interactions, results)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline.py
new file mode 100644
index 0000000..cc1879d
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline.py
@@ -0,0 +1,51 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+JITTER_EVENT_NAME = 'jitter'
+
+
+class JitterTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """JitterTimelineMetric reports jitter in composited layers.
+
+  This jitter is due to the main thread attempting to fix the position of a
+  scrolling composited layer. 'jitter-amount' is the metric added to the
+  results.
+  """
+
+  def __init__(self):
+    super(JitterTimelineMetric, self).__init__()
+
+  @staticmethod
+  def IsJitterEvent(event):
+    return event.name == JITTER_EVENT_NAME
+
+  def AddResults(self, model, renderer_thread, interactions, results):
+    assert interactions
+
+    jitter_events = []
+    for event in model.IterAllEvents(
+        event_predicate=self.IsJitterEvent):
+      jitter_events.append(event)
+
+    self._AddJitterResultsInternal(jitter_events, interactions, results)
+
+  def _AddJitterResultsInternal(self, events, interactions, results):
+    jitters = []
+    for event in events:
+      if timeline_based_metric.IsEventInInteractions(event, interactions):
+        jitters.append(event.args['value'])
+    if jitters:
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+          page=results.current_page,
+          tir_label=interactions[0].label,
+          name='jitter-amount',
+          units='score',
+          values=jitters,
+          description='Jitter each frame',
+          improvement_direction=improvement_direction.DOWN))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline_unittest.py
new file mode 100644
index 0000000..d2a93dc
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/jitter_timeline_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from collections import namedtuple
+from telemetry.internal.results import page_test_results
+from telemetry.page import page
+from telemetry.web_perf.metrics import jitter_timeline
+from telemetry.web_perf import timeline_interaction_record
+
+
+FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args')
+Interaction = timeline_interaction_record.TimelineInteractionRecord
+TEST_INTERACTION_LABEL = 'Action_TestInteraction'
+JITTER_EVENT_NAME = 'jitter'
+
+def GetJitterMetrics(events, interactions):
+  results = page_test_results.PageTestResults()
+  test_page = page.Page('file://blank.html')
+  results.WillRunPage(test_page)
+  jitter_timeline.JitterTimelineMetric()._AddJitterResultsInternal(
+      events, interactions, results)
+  return_dict = dict((value.name, value.values) for value in
+                     results.current_page_run.values)
+  results.DidRunPage(test_page)
+  return return_dict
+
+def FakeJitterEvent(start, end, value, thread_duration=None):
+  if not thread_duration:
+    thread_duration = end - start
+  return FakeEvent(jitter_timeline.JITTER_EVENT_NAME,
+          start, end, thread_duration, {'value':value})
+
+def TestInteraction(start, end):
+  return Interaction(TEST_INTERACTION_LABEL, start, end)
+
+
+class JitterTimelineMetricUnitTest(unittest.TestCase):
+  def testJitterMetric(self):
+    events = [FakeJitterEvent(0, 1, 10),
+              FakeJitterEvent(5, 10, 5),
+              FakeJitterEvent(15, 34, 45)]
+    interactions = [TestInteraction(4, 14)]
+    # The first and the last event do not start during the interaction, so
+    # they are ignored. The second event starts during the interaction, and its
+    # value is 5.
+    self.assertEqual({'jitter-amount': [5]},
+        GetJitterMetrics(events, interactions))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/layout.py b/catapult/telemetry/telemetry/web_perf/metrics/layout.py
new file mode 100644
index 0000000..fb1c5f2
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/layout.py
@@ -0,0 +1,22 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.web_perf.metrics import single_event
+
+EVENT_NAME = 'FrameView::performLayout'
+METRIC_NAME = 'layout'
+
+class LayoutMetric(single_event._SingleEventMetric):
+  """Reports directly durations of FrameView::performLayout events.
+
+    layout: Durations of FrameView::performLayout events that were caused by and
+            start during user interaction.
+
+  Layout happens no more than once per frame, so per-frame-ness is implied.
+  """
+
+  def __init__(self):
+    super(LayoutMetric, self).__init__(EVENT_NAME, METRIC_NAME,
+        metric_description=('List of durations of layouts that were caused by '
+                            'and start during interactions'))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats.py b/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats.py
new file mode 100644
index 0000000..66a86a7
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats.py
@@ -0,0 +1,91 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# A top level slice of a main thread can cause the webapp to behave
+# unresponsively if its thread duration is greater than or equals to
+# USER_PERCEIVABLE_DELAY_THRESHOLD_MS. Human eyes can perceive delay at low as
+# 100ms, but since we use thread time instead of wall-time, we reduce the
+# threshold further to 50ms to make room for other OS's activities.
+USER_PERCEIVABLE_DELAY_THRESHOLD_MS = 50
+
+
+class _MainthreadJankStat(object):
+  """A small wrapper class for storing mainthread jank stats computed for
+  single record.
+  """
+
+  def __init__(self):
+    self.sum_big_top_slices_thread_time = 0
+    self.biggest_top_slice_thread_time = 0
+
+
+def _ComputeMainthreadJankStatsForRecord(renderer_thread, record):
+  """Computes the mainthread jank stat on a record range.
+
+  Returns:
+      An instance of _MainthreadJankStat, which has:
+
+      sum_big_top_slices_thread_time is the total thread duration of all top
+      slices whose thread time ranges overlapped with (thread_start, thread_end)
+      and the overlapped thread duration is greater than or equal
+      USER_PERCEIVABLE_DELAY_THRESHOLD_MS.
+
+      biggest_top_slice_thread_time is the biggest thread duration of all
+      top slices whose thread time ranges overlapped with
+      (thread_start, thread_end).
+
+      Note: thread duration of each slices is computed using overlapped range
+      with (thread_start, thread_end).
+  """
+  stat = _MainthreadJankStat()
+  for s in renderer_thread.toplevel_slices:
+    jank_thread_duration = record.GetOverlappedThreadTimeForSlice(s)
+    stat.biggest_top_slice_thread_time = max(
+        stat.biggest_top_slice_thread_time, jank_thread_duration)
+    if jank_thread_duration >= USER_PERCEIVABLE_DELAY_THRESHOLD_MS:
+      stat.sum_big_top_slices_thread_time += jank_thread_duration
+  return stat
+
+
+class MainthreadJankStats(object):
+  """
+    Utility class for extracting main thread jank statistics from the timeline
+    (or other loggin facilities), and providing them in a common format to
+    classes that compute benchmark metrics from this data.
+
+      total_big_jank_thread_time is the total thread duration of all top
+      slices whose thread time ranges overlapped with any thread time ranges of
+      the records and the overlapped thread duration is greater than or equal
+      USER_PERCEIVABLE_DELAY_THRESHOLD_MS.
+
+      biggest_jank_thread_time is the biggest thread duration of all
+      top slices whose thread time ranges overlapped with any of records' thread
+      time ranges.
+  """
+
+  def __init__(self, renderer_thread, interaction_records):
+    self._renderer_thread = renderer_thread
+    self._interaction_records = interaction_records
+    self._total_big_jank_thread_time = 0
+    self._biggest_jank_thread_time = 0
+    self._ComputeMainthreadJankStats()
+
+  @property
+  def total_big_jank_thread_time(self):
+    return self._total_big_jank_thread_time
+
+  @property
+  def biggest_jank_thread_time(self):
+    return self._biggest_jank_thread_time
+
+  def _ComputeMainthreadJankStats(self):
+    for record in self._interaction_records:
+      record_jank_stat = _ComputeMainthreadJankStatsForRecord(
+          self._renderer_thread, record)
+      self._total_big_jank_thread_time += (
+          record_jank_stat.sum_big_top_slices_thread_time)
+      self._biggest_jank_thread_time = (
+          max(self._biggest_jank_thread_time,
+              record_jank_stat.biggest_top_slice_thread_time))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats_unittest.py
new file mode 100644
index 0000000..093d686
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats_unittest.py
@@ -0,0 +1,118 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import async_slice
+from telemetry.timeline import model as model_module
+from telemetry.web_perf.metrics import mainthread_jank_stats
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+class MainthreadJankTests(unittest.TestCase):
+
+  def CreateTestRecord(self, name, start, end, thread_start, thread_end,
+                       parent_thread):
+    s = async_slice.AsyncSlice(
+        'cat', 'Interaction.%s' % name,
+        timestamp=start, duration=end - start, start_thread=parent_thread,
+        end_thread=parent_thread, thread_start=thread_start,
+        thread_duration=thread_end - thread_start)
+    return tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
+
+  def testComputeMainthreadJankStatsForRecord(self):
+    # The slice hierarchy should look something like this:
+    # [  MessageLoop::RunTask   ] [MessageLoop::RunTask][  MessagLoop::RunTask ]
+    #                                 [ foo ]                  [ bar ]
+    #            |                                                |
+    #          200ms                                            800ms
+    #       (thread_start)                                   (thread_end)
+    #
+    # Note: all timings mentioned here and in comments below are thread time.
+
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    renderer_main.name = 'CrRendererMain'
+
+    #   [     MessageLoop::RunTask             ]
+    # 100ms                                   300ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 112, 100)
+    renderer_main.EndSlice(240, 300)
+
+    #   [     MessageLoop::RunTask             ]
+    # 450ms     [   foo  ]                    475 ms
+    #         460ms    470ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 462, 450)
+    renderer_main.BeginSlice('otherlevel', 'foo', 468, 460)
+    renderer_main.EndSlice(475, 470)
+    renderer_main.EndSlice(620, 475)
+
+    #   [     MessageLoop::RunTask             ]
+    #  620ms     [   bar  ]                   900ms
+    #         750ms    850ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 652, 620)
+    renderer_main.BeginSlice('otherlevel', 'bar', 785, 750)
+    renderer_main.EndSlice(875, 850)
+    renderer_main.EndSlice(1040, 900)
+
+    model.FinalizeImport(shift_world_to_zero=False)
+
+    # Make a record that starts at 200ms and ends at 800ms in thread time
+    record = self.CreateTestRecord('test', 100, 700, 200, 800, renderer_main)
+    # pylint: disable=protected-access
+    stat = mainthread_jank_stats._ComputeMainthreadJankStatsForRecord(
+        renderer_main, record)
+
+    # The overlapped between thread time range(200ms -> 800ms)
+    # with the first top slice (100ms -> 300ms) is 300 - 200 = 100ms,
+    # with the second slice (450ms -> 475ms) is 475 - 450 = 25 ms,
+    # with the third slice (620ms -> 900ms) is 800 - 620 = 180 ms.
+    #
+    # Hence we have 2 big top slices which overlapped duration > 50ms,
+    # the biggest top slice is 180ms, and the total big top slice's thread time
+    # is 100 + 180 = 280ms.
+    self.assertEquals(180, stat.biggest_top_slice_thread_time)
+    self.assertEquals(280, stat.sum_big_top_slices_thread_time)
+
+  def testMainthreadJankStats(self):
+    # [ MessageLoop::RunTask]  [MessageLoop::RunTask]  [MessagLoop::RunTask]
+    # 10                   100 120                 400 450                750
+    #     [  record_1  ]       [  record_2  ]   [            record_3        ]
+    #     40          70      120          200  220                         900
+
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    renderer_main.name = 'CrRendererMain'
+
+    #   [     MessageLoop::RunTask  ]
+    #   10ms                       100ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 12, 10)
+    renderer_main.EndSlice(120, 100)
+
+    #   [     MessageLoop::RunTask  ]
+    #   120ms                      200ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 115, 120)
+    renderer_main.EndSlice(410, 400)
+
+    #   [     MessageLoop::RunTask  ]
+    #  220ms                       900ms
+    renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 477, 450)
+    renderer_main.EndSlice(772, 750)
+
+    model.FinalizeImport(shift_world_to_zero=False)
+
+    test_records = [
+        self.CreateTestRecord('record_1', 10, 80, 40, 70, renderer_main),
+        self.CreateTestRecord('record_2', 100, 210, 120, 200, renderer_main),
+        self.CreateTestRecord('record_3', 215, 920, 220, 900, renderer_main)
+    ]
+
+    stats = mainthread_jank_stats.MainthreadJankStats(
+        renderer_main, test_records)
+    # Main thread janks covered by records' ranges are:
+    # Record 1: (40ms -> 70ms)
+    # Record 2: (120ms -> 200ms)
+    # Record 3: (220ms -> 400ms), (450ms -> 750ms)
+    self.assertEquals(560, stats.total_big_jank_thread_time)
+    self.assertEquals(300, stats.biggest_jank_thread_time)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline.py
new file mode 100644
index 0000000..6c62813
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline.py
@@ -0,0 +1,94 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from telemetry.timeline import memory_dump_event
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+DEFAULT_METRICS = memory_dump_event.MMAPS_METRICS.keys()
+
+
+class MemoryTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """MemoryTimelineMetric reports summary stats from memory dump events."""
+
+  def AddResults(self, model, renderer_thread, interactions, results):
+    # Note: This method will be called by TimelineBasedMeasurement once for
+    # each thread x interaction_label combination; where |interactions| is
+    # a list of all interactions sharing the same label that occurred in the
+    # given |renderer_thread|.
+
+    def ContainedIn(dump, interaction):
+      return interaction.start < dump.start and dump.end < interaction.end
+
+    def OccursDuringInteractions(dump):
+      return (
+          # Dump must contain the rendrerer process that requested it,
+          renderer_thread.parent.pid in dump.pids and
+          # ... and fall within the span of an interaction record.
+          any(ContainedIn(dump, interaction) for interaction in interactions))
+
+    def ReportResultsForProcess(memory_dumps, process_name):
+      if not memory_dumps:
+        metric_values = dict.fromkeys(DEFAULT_METRICS)
+        num_processes = None
+        none_reason = 'No memory dumps with mmaps found within interactions'
+      else:
+        metric_values = collections.defaultdict(list)
+        num_processes = []
+        for dump in memory_dumps:
+          for metric, value in dump.GetMemoryUsage().iteritems():
+            metric_values[metric].append(value)
+          num_processes.append(dump.CountProcessMemoryDumps())
+        none_reason = None
+      for metric, values in metric_values.iteritems():
+        results.AddValue(list_of_scalar_values.ListOfScalarValues(
+            page=results.current_page,
+            name='memory_%s_%s' % (metric, process_name),
+            units='bytes',
+            tir_label=interactions[0].label,
+            values=values,
+            none_value_reason=none_reason,
+            improvement_direction=improvement_direction.DOWN))
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+            page=results.current_page,
+            name='process_count_%s' % process_name,
+            units='count',
+            tir_label=interactions[0].label,
+            values=num_processes,
+            none_value_reason=none_reason,
+            improvement_direction=improvement_direction.DOWN))
+
+    memory_dumps = filter(OccursDuringInteractions,
+                          model.IterGlobalMemoryDumps())
+
+    # Either all dumps should contain memory maps (Android, Linux), or none
+    # of them (Windows, Mac).
+    assert len(set(dump.has_mmaps for dump in memory_dumps)) <= 1
+
+    ReportResultsForProcess(memory_dumps, 'total')
+
+    memory_dumps_by_process_name = collections.defaultdict(list)
+    for memory_dump in memory_dumps:
+      # Split this global memory_dump into individual process dumps, and then
+      # group them by their process names.
+      process_dumps_by_name = collections.defaultdict(list)
+      for process_dump in memory_dump.IterProcessMemoryDumps():
+        process_name = process_dump.process_name.lower().replace(' ', '_')
+        process_dumps_by_name[process_name].append(process_dump)
+
+      # Merge process dumps that have the same process name into a new
+      # global dump. Note: this is slightly abusing GlobalMemoryDump so that
+      # we can say dump.GetMemoryUsage() on the created dump objects to obtain
+      # the memory usage aggregated per type. This should no longer be needed
+      # after moving to TBMv2. See: http://crbug.com/581716
+      for process_name, process_dumps in process_dumps_by_name.iteritems():
+        memory_dumps_by_process_name[process_name].append(
+            memory_dump_event.GlobalMemoryDump(process_dumps))
+
+    for process_name, memory_dumps in memory_dumps_by_process_name.iteritems():
+      ReportResultsForProcess(memory_dumps, process_name)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline_unittest.py
new file mode 100644
index 0000000..2badfa8
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/memory_timeline_unittest.py
@@ -0,0 +1,202 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import unittest
+
+from telemetry.internal.results import page_test_results
+from telemetry.page import page
+import mock
+from telemetry.timeline import memory_dump_event
+from telemetry.web_perf.metrics import memory_timeline
+from telemetry.web_perf import timeline_interaction_record
+
+
+def MockProcessDumpEvent(dump_id, name, start, memory_usage, pid=1234):
+  process_dump = mock.Mock()
+  process_dump.dump_id = dump_id
+  process_dump.process.pid = pid
+  process_dump.process_name = name
+  process_dump.start = start
+  process_dump.end = start
+  if memory_usage is None:
+    memory_usage = {}
+  elif not isinstance(memory_usage, dict):
+    memory_usage = dict.fromkeys(memory_timeline.DEFAULT_METRICS, memory_usage)
+  process_dump.has_mmaps = any(metric in memory_usage for metric
+                               in memory_timeline.DEFAULT_METRICS)
+  process_dump.GetMemoryUsage = mock.Mock(return_value=memory_usage)
+  return process_dump
+
+
+def MockTimelineModel(process_dumps):
+  dumps_by_id = collections.defaultdict(list)
+  for process_dump in process_dumps:
+    dumps_by_id[process_dump.dump_id].append(process_dump)
+
+  global_dumps = sorted((memory_dump_event.GlobalMemoryDump(dumps)
+                         for dumps in dumps_by_id.itervalues()),
+                        key=lambda dump: dump.start)
+
+  mock_model = mock.Mock()
+  mock_model.IterGlobalMemoryDumps = mock.Mock(return_value=global_dumps)
+  return mock_model
+
+
+def TestInteraction(start, end):
+  return timeline_interaction_record.TimelineInteractionRecord(
+      'Action_TestInteraction', start, end)
+
+
+class MemoryTimelineMetricUnitTest(unittest.TestCase):
+  def getResultsDict(self, model, interactions, renderer_pid=1234):
+    def strip_prefix(key):
+      if key.startswith('memory_'):
+        return key[len('memory_'):]
+      elif key.startswith('process_count_'):
+        return key
+      else:
+        self.fail('Unexpected key: %r' % key)
+
+    mock_thread = mock.Mock()
+    mock_thread.parent.pid = renderer_pid
+    results = page_test_results.PageTestResults()
+    test_page = page.Page('http://google.com')
+    results.WillRunPage(test_page)
+    metric = memory_timeline.MemoryTimelineMetric()
+    metric.AddResults(model, mock_thread, interactions, results)
+    result_dict = {strip_prefix(v.name): v.values
+                   for v in results.current_page_run.values}
+    results.DidRunPage(test_page)
+    return result_dict
+
+  def getOverallPssTotal(self, model, interactions, renderer_pid=1234):
+    results = self.getResultsDict(
+        model, interactions, renderer_pid=renderer_pid)
+    self.assertTrue('mmaps_overall_pss_total' in results)
+    return results['mmaps_overall_pss_total']
+
+  def testSingleMemoryDump(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, 123)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual([123], self.getOverallPssTotal(model, interactions))
+
+  def testMultipleMemoryDumps(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, 123),
+        MockProcessDumpEvent('dump2', 'browser', 5, 456)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual([123, 456], self.getOverallPssTotal(model, interactions))
+
+  def testMultipleInteractions(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, 123),
+        MockProcessDumpEvent('dump2', 'browser', 5, 456),
+        MockProcessDumpEvent('dump3', 'browser', 13, 789)])
+    interactions = [TestInteraction(1, 10),
+                    TestInteraction(12, 15)]
+    self.assertEqual([123, 456, 789],
+                      self.getOverallPssTotal(model, interactions))
+
+  def testDumpsOutsideInteractionsAreFilteredOut(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 1, 111),
+        MockProcessDumpEvent('dump2', 'browser', 5, 123),
+        MockProcessDumpEvent('dump3', 'browser', 11, 456),
+        MockProcessDumpEvent('dump4', 'browser', 13, 555),
+        MockProcessDumpEvent('dump5', 'browser', 17, 789)])
+    interactions = [TestInteraction(3, 10),
+                    TestInteraction(12, 15)]
+    self.assertEqual([123, 555], self.getOverallPssTotal(model, interactions))
+
+  def testDumpsFromOtherBrowserAreFilteredOut(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 5, 1, pid=1111),
+        MockProcessDumpEvent('dump1', 'renderer', 4, 2, pid=2222),
+        MockProcessDumpEvent('dump2', 'browser', 14, 4, pid=1111),
+        MockProcessDumpEvent('dump2', 'renderer', 13, 8, pid=2222),
+        MockProcessDumpEvent('dump3', 'browser', 4, 16, pid=3333)])
+    interactions = [TestInteraction(3, 10),
+                    TestInteraction(12, 15)]
+    self.assertEqual(
+        [3, 12],
+        self.getOverallPssTotal(model, interactions, renderer_pid=2222))
+
+  def testDumpsWithNoMemoryMaps(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, {'blink': 123}),
+        MockProcessDumpEvent('dump2', 'browser', 5, {'blink': 456})])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual(
+        self.getResultsDict(model, interactions),
+        {
+          'blink_total': [123, 456],
+          'blink_browser': [123, 456],
+          'process_count_total': [1, 1],
+          'process_count_browser': [1, 1]
+        })
+
+  def testDumpsWithSomeMemoryMaps(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, 123),
+        MockProcessDumpEvent('dump2', 'browser', 5, None)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertRaises(AssertionError, self.getResultsDict, model, interactions)
+
+  def testReturnsNoneWhenAllDumpsAreFilteredOut(self):
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'bowser', 0, 123),
+        MockProcessDumpEvent('dump2', 'browser', 11, 789)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual(None, self.getOverallPssTotal(model, interactions))
+
+  def testResultsBrokenDownByProcess(self):
+    metrics = memory_timeline.DEFAULT_METRICS
+    stats1 = {metric: value for value, metric in enumerate(metrics)}
+    stats2 = {metric: value for value, metric in enumerate(reversed(metrics))}
+    total = len(metrics) - 1
+
+    expected = {
+      'process_count_browser': [1],
+      'process_count_gpu_process': [1],
+      'process_count_total': [2],
+    }
+    expected.update(('%s_browser' % metric, [value])
+                    for metric, value in stats1.iteritems())
+    expected.update(('%s_gpu_process' % metric, [value])
+                    for metric, value in stats2.iteritems())
+    expected.update(('%s_total' % metric, [total]) for metric in metrics)
+
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'browser', 2, stats1),
+        MockProcessDumpEvent('dump1', 'GPU Process', 5, stats2)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual(expected, self.getResultsDict(model, interactions))
+
+  def testResultsBrokenDownByProcessWithMultipleRenderers(self):
+    metrics = memory_timeline.DEFAULT_METRICS
+    total = len(metrics) - 1
+    stats1 = {metric: value for value, metric in enumerate(metrics)}
+    stats2 = {metric: value for value, metric in enumerate(reversed(metrics))}
+    stats3 = {metric: total for metric in metrics}
+
+    expected = {
+      'process_count_renderer': [2],
+      'process_count_browser': [1],
+      'process_count_total': [3],
+    }
+    for metric in metrics:
+      expected.update([
+        ('%s_renderer' % metric, [total]),
+        ('%s_browser' % metric, [total]),
+        ('%s_total' % metric, [2 * total]),
+      ])
+
+    model = MockTimelineModel([
+        MockProcessDumpEvent('dump1', 'renderer', 3, stats1),
+        MockProcessDumpEvent('dump1', 'renderer', 4, stats2),
+        MockProcessDumpEvent('dump1', 'browser', 5, stats3)])
+    interactions = [TestInteraction(1, 10)]
+    self.assertEqual(expected, self.getResultsDict(model, interactions))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame.py b/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame.py
new file mode 100644
index 0000000..48f8546
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame.py
@@ -0,0 +1,86 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+
+from telemetry.timeline import bounds
+from telemetry.timeline import slice as slice_module
+
+
+class MissingData(Exception):
+  pass
+
+
+class NoBeginFrameIdException(Exception):
+  pass
+
+
+class RenderingFrame(object):
+  """Object with information about the triggering of a BeginMainFrame event."""
+  send_begin_frame_event = 'ThreadProxy::ScheduledActionSendBeginMainFrame'
+  begin_main_frame_event = 'ThreadProxy::BeginMainFrame'
+
+  def __init__(self, events):
+    all_send_begin_frame_events = [e for e in events
+                                   if e.name == self.send_begin_frame_event]
+    if len(all_send_begin_frame_events) != 1:
+      raise MissingData('There must be at exactly one %s event.' %
+                        self.send_begin_frame_event)
+
+    all_begin_main_frame_events = [e for e in events
+                                   if e.name == self.begin_main_frame_event]
+    if not all_begin_main_frame_events:
+      raise MissingData('There must be at least one %s event.' %
+                        self.begin_main_frame_event)
+    all_begin_main_frame_events.sort(key=lambda e: e.start)
+
+    self._send_begin_frame = all_send_begin_frame_events[0]
+    self._begin_main_frame = all_begin_main_frame_events[-1]
+
+    self._bounds = bounds.Bounds()
+    self._bounds.AddEvent(self._begin_main_frame)
+    self._bounds.AddEvent(self._send_begin_frame)
+
+  @staticmethod
+  def IsEventUseful(event):
+    return event.name in [RenderingFrame.send_begin_frame_event,
+                          RenderingFrame.begin_main_frame_event]
+
+  @property
+  def bounds(self):
+    return self._bounds
+
+  @property
+  def queueing_duration(self):
+    return self._begin_main_frame.start - self._send_begin_frame.start
+
+
+def GetFrameEventsInsideRange(renderer_process, timeline_range):
+  """Returns RenderingFrames for all relevant events in the timeline_range."""
+  # First filter all events from the renderer_process and turn them into a
+  # dictonary of the form:
+  #   {0: [send_begin_frame, begin_main_frame, begin_main_frame],
+  #    1: [begin_main_frame, send_begin_frame],
+  #    2: [send_begin_frame, begin_main_frame]}
+  begin_frame_events_by_id = defaultdict(list)
+  for event in renderer_process.IterAllEvents(
+      event_type_predicate=lambda t: t == slice_module.Slice,
+      event_predicate=RenderingFrame.IsEventUseful):
+    begin_frame_id = event.args.get('begin_frame_id', None)
+    if begin_frame_id is None:
+      raise NoBeginFrameIdException('Event is missing a begin_frame_id.')
+    begin_frame_events_by_id[begin_frame_id].append(event)
+
+  # Now, create RenderingFrames for events wherever possible.
+  frames = []
+  for events in begin_frame_events_by_id.values():
+    try:
+      frame = RenderingFrame(events)
+      if frame.bounds.Intersects(timeline_range):
+        frames.append(frame)
+    except MissingData:
+      continue
+  frames.sort(key=lambda frame: frame.bounds.min)
+
+  return frames
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame_unittest.py
new file mode 100644
index 0000000..95f6d93
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/rendering_frame_unittest.py
@@ -0,0 +1,163 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import telemetry.timeline.bounds as timeline_bounds
+from telemetry.timeline import model
+import telemetry.timeline.slice as tracing_slice
+from telemetry.web_perf.metrics. \
+    rendering_frame import GetFrameEventsInsideRange
+from telemetry.web_perf.metrics.rendering_frame import MissingData
+from telemetry.web_perf.metrics.rendering_frame import RenderingFrame
+
+
+class RenderingFrameTestData(object):
+
+  def __init__(self):
+    self._begin_frame_id = 0
+    self._events = []
+    self._renderer_process = model.TimelineModel().GetOrCreateProcess(pid=1)
+    self._main_thread = self._renderer_process.GetOrCreateThread(tid=11)
+    self._compositor_thread = self._renderer_process.GetOrCreateThread(tid=12)
+
+  @property
+  def events(self):
+    return self._events
+
+  @property
+  def renderer_process(self):
+    return self._renderer_process
+
+  def AddSendEvent(self, ts=0, duration=1):
+    self._begin_frame_id += 1
+    event = self._CreateEvent(
+        RenderingFrame.send_begin_frame_event, ts, duration)
+    self._compositor_thread.PushSlice(event)
+
+  def AddBeginMainFrameEvent(self, ts=0, duration=1):
+    event = self._CreateEvent(
+        RenderingFrame.begin_main_frame_event, ts, duration)
+    self._main_thread.PushSlice(event)
+
+  def FinalizeImport(self):
+    self._renderer_process.FinalizeImport()
+
+  def _CreateEvent(self, event_name, ts, duration):
+    event = tracing_slice.Slice(None, 'cc,benchmark', event_name, ts,
+        duration=duration, args={'begin_frame_id': self._begin_frame_id})
+    self._events.append(event)
+    return event
+
+
+def GenerateTimelineRange(start=0, end=100):
+  timeline_range = timeline_bounds.Bounds()
+  timeline_range.AddValue(start)
+  timeline_range.AddValue(end)
+  return timeline_range
+
+
+class RenderingFrameUnitTest(unittest.TestCase):
+
+  def testRenderingFrame(self):
+    d = RenderingFrameTestData()
+    d.AddSendEvent(ts=10)
+    d.AddBeginMainFrameEvent(ts=20)
+    d.FinalizeImport()
+
+    frame = RenderingFrame(d.events)
+    self.assertEquals(10, frame.queueing_duration)
+
+  def testRenderingFrameMissingSendBeginFrameEvents(self):
+    d = RenderingFrameTestData()
+    d.AddBeginMainFrameEvent(ts=10)
+    d.FinalizeImport()
+
+    self.assertRaises(MissingData, RenderingFrame, d.events)
+
+  def testRenderingFrameDuplicateSendBeginFrameEvents(self):
+    d = RenderingFrameTestData()
+    d.AddSendEvent(ts=10)
+    d.AddBeginMainFrameEvent(ts=20)
+    d.AddSendEvent(ts=30)
+    d.FinalizeImport()
+
+    self.assertRaises(MissingData, RenderingFrame, d.events)
+
+  def testRenderingFrameMissingBeginMainFrameEvents(self):
+    d = RenderingFrameTestData()
+    d.AddSendEvent(ts=10)
+    d.FinalizeImport()
+
+    self.assertRaises(MissingData, RenderingFrame, d.events)
+
+  def testRenderingFrameDuplicateBeginMainFrameEvents(self):
+    d = RenderingFrameTestData()
+    d.AddSendEvent(ts=10)
+    d.AddBeginMainFrameEvent(ts=20)
+    d.AddBeginMainFrameEvent(ts=30)
+    d.AddBeginMainFrameEvent(ts=40)
+    d.FinalizeImport()
+
+    frame = RenderingFrame(d.events)
+    self.assertEquals(30, frame.queueing_duration)
+
+  def testFrameEventMissingBeginFrameId(self):
+    timeline = model.TimelineModel()
+    process = timeline.GetOrCreateProcess(pid=1)
+    main_thread = process.GetOrCreateThread(tid=11)
+    timeline_range = timeline_bounds.Bounds()
+
+    # Create an event without the begin_frame_id argument
+    event = tracing_slice.Slice(
+        None, 'cc,benchmark', RenderingFrame.begin_main_frame_event, 0)
+    main_thread.PushSlice(event)
+    process.FinalizeImport()
+    self.assertRaises(Exception, GetFrameEventsInsideRange, process,
+                      timeline_range)
+
+  def testGetFrameEventsInsideRange(self):
+    """Test a basic sequenece, with expected frame queueing delays A and B.
+
+                 |----A----|    |--B--|
+         Main:        [1]  [1]        [2]
+
+    Compositor:  [1]            [2]
+    """
+    d = RenderingFrameTestData()
+    d.AddSendEvent(ts=10)
+    d.AddBeginMainFrameEvent(ts=20)
+    d.AddBeginMainFrameEvent(ts=30)
+    d.AddSendEvent(ts=40)
+    d.AddBeginMainFrameEvent(ts=50)
+    d.FinalizeImport()
+
+    timeline_range = GenerateTimelineRange()
+    frame_events = GetFrameEventsInsideRange(d.renderer_process, timeline_range)
+
+    self.assertEquals(2, len(frame_events))
+    self.assertEquals(20, frame_events[0].queueing_duration)
+    self.assertEquals(10, frame_events[1].queueing_duration)
+
+  def testFrameEventsMissingDataNotIncluded(self):
+    """Test a sequenece missing an initial SendBeginFrame.
+
+    Only one frame should be returned, with expected frame queueing delay A.
+                           |--A--|
+          Main:  [0]  [0]        [2]
+
+    Compositor:            [2]
+    """
+    d = RenderingFrameTestData()
+    d.AddBeginMainFrameEvent(ts=20)
+    d.AddBeginMainFrameEvent(ts=30)
+    d.AddSendEvent(ts=40)
+    d.AddBeginMainFrameEvent(ts=50)
+    d.FinalizeImport()
+
+    timeline_range = GenerateTimelineRange()
+    frame_events = GetFrameEventsInsideRange(d.renderer_process, timeline_range)
+
+    self.assertEquals(1, len(frame_events))
+    self.assertEquals(10, frame_events[0].queueing_duration)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats.py b/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats.py
new file mode 100644
index 0000000..65bdbee
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats.py
@@ -0,0 +1,296 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import itertools
+
+from operator import attrgetter
+
+from telemetry.web_perf.metrics import rendering_frame
+
+# These are LatencyInfo component names indicating the various components
+# that the input event has travelled through.
+# This is when the input event first reaches chrome.
+UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
+# This is when the input event was originally created by OS.
+ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
+# This is when the input event was sent from browser to renderer.
+BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
+# This is when an input event is turned into a scroll update.
+BEGIN_SCROLL_UPDATE_COMP_NAME = (
+    'LATENCY_BEGIN_SCROLL_LISTENER_UPDATE_MAIN_COMPONENT')
+# This is when a scroll update is forwarded to the main thread.
+FORWARD_SCROLL_UPDATE_COMP_NAME = (
+    'INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT')
+# This is when the input event has reached swap buffer.
+END_COMP_NAME = 'INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT'
+
+# Name for a main thread scroll update latency event.
+MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME = 'Latency::ScrollUpdate'
+# Name for a gesture scroll update latency event.
+GESTURE_SCROLL_UPDATE_EVENT_NAME = 'InputLatency::GestureScrollUpdate'
+
+# These are keys used in the 'data' field dictionary located in
+# BenchmarkInstrumentation::ImplThreadRenderingStats.
+VISIBLE_CONTENT_DATA = 'visible_content_area'
+APPROXIMATED_VISIBLE_CONTENT_DATA = 'approximated_visible_content_area'
+CHECKERBOARDED_VISIBLE_CONTENT_DATA = 'checkerboarded_visible_content_area'
+# These are keys used in the 'errors' field  dictionary located in
+# RenderingStats in this file.
+APPROXIMATED_PIXEL_ERROR = 'approximated_pixel_percentages'
+CHECKERBOARDED_PIXEL_ERROR = 'checkerboarded_pixel_percentages'
+
+
+def GetLatencyEvents(process, timeline_range):
+  """Get LatencyInfo trace events from the process's trace buffer that are
+     within the timeline_range.
+
+  Input events dump their LatencyInfo into trace buffer as async trace event
+  of name starting with "InputLatency". Non-input events with name starting
+  with "Latency". The trace event has a member 'data' containing its latency
+  history.
+
+  """
+  latency_events = []
+  if not process:
+    return latency_events
+  for event in itertools.chain(
+      process.IterAllAsyncSlicesStartsWithName('InputLatency'),
+      process.IterAllAsyncSlicesStartsWithName('Latency')):
+    if event.start >= timeline_range.min and event.end <= timeline_range.max:
+      for ss in event.sub_slices:
+        if 'data' in ss.args:
+          latency_events.append(ss)
+  return latency_events
+
+
+def ComputeEventLatencies(input_events):
+  """ Compute input event latencies.
+
+  Input event latency is the time from when the input event is created to
+  when its resulted page is swap buffered.
+  Input event on different platforms uses different LatencyInfo component to
+  record its creation timestamp. We go through the following component list
+  to find the creation timestamp:
+  1. INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT -- when event is created in OS
+  2. INPUT_EVENT_LATENCY_UI_COMPONENT -- when event reaches Chrome
+  3. INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT -- when event reaches RenderWidget
+
+  If the latency starts with a
+  LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT component, then it is
+  classified as a scroll update instead of a normal input latency measure.
+
+  Returns:
+    A list sorted by increasing start time of latencies which are tuples of
+    (input_event_name, latency_in_ms).
+  """
+  input_event_latencies = []
+  for event in input_events:
+    data = event.args['data']
+    if END_COMP_NAME in data:
+      end_time = data[END_COMP_NAME]['time']
+      if ORIGINAL_COMP_NAME in data:
+        start_time = data[ORIGINAL_COMP_NAME]['time']
+      elif UI_COMP_NAME in data:
+        start_time = data[UI_COMP_NAME]['time']
+      elif BEGIN_COMP_NAME in data:
+        start_time = data[BEGIN_COMP_NAME]['time']
+      elif BEGIN_SCROLL_UPDATE_COMP_NAME in data:
+        start_time = data[BEGIN_SCROLL_UPDATE_COMP_NAME]['time']
+      else:
+        raise ValueError('LatencyInfo has no begin component')
+      latency = (end_time - start_time) / 1000.0
+      input_event_latencies.append((start_time, event.name, latency))
+
+  input_event_latencies.sort()
+  return [(name, latency) for _, name, latency in input_event_latencies]
+
+
+def HasRenderingStats(process):
+  """ Returns True if the process contains at least one
+      BenchmarkInstrumentation::*RenderingStats event with a frame.
+  """
+  if not process:
+    return False
+  for event in process.IterAllSlicesOfName(
+      'BenchmarkInstrumentation::DisplayRenderingStats'):
+    if 'data' in event.args and event.args['data']['frame_count'] == 1:
+      return True
+  for event in process.IterAllSlicesOfName(
+      'BenchmarkInstrumentation::ImplThreadRenderingStats'):
+    if 'data' in event.args and event.args['data']['frame_count'] == 1:
+      return True
+  return False
+
+def GetTimestampEventName(process):
+  """ Returns the name of the events used to count frame timestamps. """
+  if process.name == 'SurfaceFlinger':
+    return 'vsync_before'
+
+  event_name = 'BenchmarkInstrumentation::DisplayRenderingStats'
+  for event in process.IterAllSlicesOfName(event_name):
+    if 'data' in event.args and event.args['data']['frame_count'] == 1:
+      return event_name
+
+  return 'BenchmarkInstrumentation::ImplThreadRenderingStats'
+
+class RenderingStats(object):
+  def __init__(self, renderer_process, browser_process, surface_flinger_process,
+               timeline_ranges):
+    """
+    Utility class for extracting rendering statistics from the timeline (or
+    other loggin facilities), and providing them in a common format to classes
+    that compute benchmark metrics from this data.
+
+    Stats are lists of lists of numbers. The outer list stores one list per
+    timeline range.
+
+    All *_time values are measured in milliseconds.
+    """
+    assert len(timeline_ranges) > 0
+    self.refresh_period = None
+
+    # Find the top level process with rendering stats (browser or renderer).
+    if surface_flinger_process:
+      timestamp_process = surface_flinger_process
+      self._GetRefreshPeriodFromSurfaceFlingerProcess(surface_flinger_process)
+    elif HasRenderingStats(browser_process):
+      timestamp_process = browser_process
+    else:
+      timestamp_process = renderer_process
+
+    timestamp_event_name = GetTimestampEventName(timestamp_process)
+
+    # A lookup from list names below to any errors or exceptions encountered
+    # in attempting to generate that list.
+    self.errors = {}
+
+    self.frame_timestamps = []
+    self.frame_times = []
+    self.approximated_pixel_percentages = []
+    self.checkerboarded_pixel_percentages = []
+    # End-to-end latency for input event - from when input event is
+    # generated to when the its resulted page is swap buffered.
+    self.input_event_latency = []
+    self.frame_queueing_durations = []
+    # Latency from when a scroll update is sent to the main thread until the
+    # resulting frame is swapped.
+    self.main_thread_scroll_latency = []
+    # Latency for a GestureScrollUpdate input event.
+    self.gesture_scroll_update_latency = []
+
+    for timeline_range in timeline_ranges:
+      self.frame_timestamps.append([])
+      self.frame_times.append([])
+      self.approximated_pixel_percentages.append([])
+      self.checkerboarded_pixel_percentages.append([])
+      self.input_event_latency.append([])
+      self.main_thread_scroll_latency.append([])
+      self.gesture_scroll_update_latency.append([])
+
+      if timeline_range.is_empty:
+        continue
+      self._InitFrameTimestampsFromTimeline(
+          timestamp_process, timestamp_event_name, timeline_range)
+      self._InitImplThreadRenderingStatsFromTimeline(
+          renderer_process, timeline_range)
+      self._InitInputLatencyStatsFromTimeline(
+          browser_process, renderer_process, timeline_range)
+      self._InitFrameQueueingDurationsFromTimeline(
+          renderer_process, timeline_range)
+
+  def _GetRefreshPeriodFromSurfaceFlingerProcess(self, surface_flinger_process):
+    for event in surface_flinger_process.IterAllEventsOfName('vsync_before'):
+      self.refresh_period = event.args['data']['refresh_period']
+      return
+
+  def _InitInputLatencyStatsFromTimeline(
+      self, browser_process, renderer_process, timeline_range):
+    latency_events = GetLatencyEvents(browser_process, timeline_range)
+    # Plugin input event's latency slice is generated in renderer process.
+    latency_events.extend(GetLatencyEvents(renderer_process, timeline_range))
+    event_latencies = ComputeEventLatencies(latency_events)
+    # Don't include scroll updates in the overall input latency measurement,
+    # because scroll updates can take much more time to process than other
+    # input events and would therefore add noise to overall latency numbers.
+    self.input_event_latency[-1] = [
+        latency for name, latency in event_latencies
+        if name != MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME]
+    self.main_thread_scroll_latency[-1] = [
+        latency for name, latency in event_latencies
+        if name == MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME]
+    self.gesture_scroll_update_latency[-1] = [
+        latency for name, latency in event_latencies
+        if name == GESTURE_SCROLL_UPDATE_EVENT_NAME]
+
+  def _GatherEvents(self, event_name, process, timeline_range):
+    events = []
+    for event in process.IterAllSlicesOfName(event_name):
+      if event.start >= timeline_range.min and event.end <= timeline_range.max:
+        if 'data' not in event.args:
+          continue
+        events.append(event)
+    events.sort(key=attrgetter('start'))
+    return events
+
+  def _AddFrameTimestamp(self, event):
+    frame_count = event.args['data']['frame_count']
+    if frame_count > 1:
+      raise ValueError('trace contains multi-frame render stats')
+    if frame_count == 1:
+      self.frame_timestamps[-1].append(
+          event.start)
+      if len(self.frame_timestamps[-1]) >= 2:
+        self.frame_times[-1].append(
+            self.frame_timestamps[-1][-1] - self.frame_timestamps[-1][-2])
+
+  def _InitFrameTimestampsFromTimeline(
+      self, process, timestamp_event_name, timeline_range):
+    for event in self._GatherEvents(
+        timestamp_event_name, process, timeline_range):
+      self._AddFrameTimestamp(event)
+
+  def _InitImplThreadRenderingStatsFromTimeline(self, process, timeline_range):
+    event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
+    for event in self._GatherEvents(event_name, process, timeline_range):
+      data = event.args['data']
+      if VISIBLE_CONTENT_DATA not in data:
+        self.errors[APPROXIMATED_PIXEL_ERROR] = (
+          'Calculating approximated_pixel_percentages not possible because '
+          'visible_content_area was missing.')
+        self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
+          'Calculating checkerboarded_pixel_percentages not possible because '
+          'visible_content_area was missing.')
+        return
+      visible_content_area = data[VISIBLE_CONTENT_DATA]
+      if visible_content_area == 0:
+        self.errors[APPROXIMATED_PIXEL_ERROR] = (
+          'Calculating approximated_pixel_percentages would have caused '
+          'a divide-by-zero')
+        self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
+          'Calculating checkerboarded_pixel_percentages would have caused '
+          'a divide-by-zero')
+        return
+      if APPROXIMATED_VISIBLE_CONTENT_DATA in data:
+        self.approximated_pixel_percentages[-1].append(
+          round(float(data[APPROXIMATED_VISIBLE_CONTENT_DATA]) /
+                float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
+      else:
+        self.errors[APPROXIMATED_PIXEL_ERROR] = (
+          'approximated_pixel_percentages was not recorded')
+      if CHECKERBOARDED_VISIBLE_CONTENT_DATA in data:
+        self.checkerboarded_pixel_percentages[-1].append(
+          round(float(data[CHECKERBOARDED_VISIBLE_CONTENT_DATA]) /
+                float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
+      else:
+        self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
+          'checkerboarded_pixel_percentages was not recorded')
+
+  def _InitFrameQueueingDurationsFromTimeline(self, process, timeline_range):
+    try:
+      events = rendering_frame.GetFrameEventsInsideRange(process,
+                                                         timeline_range)
+      new_frame_queueing_durations = [e.queueing_duration for e in events]
+      self.frame_queueing_durations.append(new_frame_queueing_durations)
+    except rendering_frame.NoBeginFrameIdException:
+      self.errors['frame_queueing_durations'] = (
+          'Current chrome version does not support the queueing delay metric.')
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats_unittest.py
new file mode 100644
index 0000000..0609bbd
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats_unittest.py
@@ -0,0 +1,543 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+import unittest
+
+from telemetry.timeline import async_slice
+from telemetry.timeline import bounds
+from telemetry.timeline import model
+from telemetry.util import perf_tests_helper
+from telemetry.util import statistics
+from telemetry.web_perf.metrics import rendering_stats
+
+
+class MockTimer(object):
+  """A mock timer class which can generate random durations.
+
+  An instance of this class is used as a global timer to generate random
+  durations for stats and consistent timestamps for all mock trace events.
+  The unit of time is milliseconds.
+  """
+
+  def __init__(self):
+    self.milliseconds = 0
+
+  def Advance(self, low=0.1, high=1):
+    delta = random.uniform(low, high)
+    self.milliseconds += delta
+    return delta
+
+  def AdvanceAndGet(self, low=0.1, high=1):
+    self.Advance(low, high)
+    return self.milliseconds
+
+
+class ReferenceRenderingStats(object):
+  """ Stores expected data for comparison with actual RenderingStats """
+
+  def __init__(self):
+    self.frame_timestamps = []
+    self.frame_times = []
+    self.approximated_pixel_percentages = []
+    self.checkerboarded_pixel_percentages = []
+
+  def AppendNewRange(self):
+    self.frame_timestamps.append([])
+    self.frame_times.append([])
+    self.approximated_pixel_percentages.append([])
+    self.checkerboarded_pixel_percentages.append([])
+
+
+class ReferenceInputLatencyStats(object):
+  """ Stores expected data for comparison with actual input latency stats """
+
+  def __init__(self):
+    self.input_event_latency = []
+    self.input_event = []
+
+
+def AddSurfaceFlingerStats(mock_timer, thread, first_frame,
+                           ref_stats=None):
+  """ Adds a random surface flinger stats event.
+
+  thread: The timeline model thread to which the event will be added.
+  first_frame: Is this the first frame within the bounds of an action?
+  ref_stats: A ReferenceRenderingStats object to record expected values.
+  """
+  # Create randonm data and timestap for impl thread rendering stats.
+  data = {'frame_count': 1,
+          'refresh_period': 16.6666}
+  timestamp = mock_timer.AdvanceAndGet()
+
+  # Add a slice with the event data to the given thread.
+  thread.PushCompleteSlice(
+      'SurfaceFlinger', 'vsync_before',
+      timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
+      args={'data': data})
+
+  if not ref_stats:
+    return
+
+  # Add timestamp only if a frame was output
+  if data['frame_count'] == 1:
+    if not first_frame:
+      # Add frame_time if this is not the first frame in within the bounds of an
+      # action.
+      prev_timestamp = ref_stats.frame_timestamps[-1][-1]
+      ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
+    ref_stats.frame_timestamps[-1].append(timestamp)
+
+
+def AddDisplayRenderingStats(mock_timer, thread, first_frame,
+                             ref_stats=None):
+  """ Adds a random display rendering stats event.
+
+  thread: The timeline model thread to which the event will be added.
+  first_frame: Is this the first frame within the bounds of an action?
+  ref_stats: A ReferenceRenderingStats object to record expected values.
+  """
+  # Create randonm data and timestap for main thread rendering stats.
+  data = {'frame_count': 1}
+  timestamp = mock_timer.AdvanceAndGet()
+
+  # Add a slice with the event data to the given thread.
+  thread.PushCompleteSlice(
+      'benchmark', 'BenchmarkInstrumentation::DisplayRenderingStats',
+      timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
+      args={'data': data})
+
+  if not ref_stats:
+    return
+
+  # Add timestamp only if a frame was output
+  if not first_frame:
+    # Add frame_time if this is not the first frame in within the bounds of an
+    # action.
+    prev_timestamp = ref_stats.frame_timestamps[-1][-1]
+    ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
+  ref_stats.frame_timestamps[-1].append(timestamp)
+
+
+def AddImplThreadRenderingStats(mock_timer, thread, first_frame,
+                                ref_stats=None):
+  """ Adds a random impl thread rendering stats event.
+
+  thread: The timeline model thread to which the event will be added.
+  first_frame: Is this the first frame within the bounds of an action?
+  ref_stats: A ReferenceRenderingStats object to record expected values.
+  """
+  # Create randonm data and timestap for impl thread rendering stats.
+  data = {'frame_count': 1,
+          'visible_content_area': random.uniform(0, 100),
+          'approximated_visible_content_area': random.uniform(0, 5),
+          'checkerboarded_visible_content_area': random.uniform(0, 5)}
+  timestamp = mock_timer.AdvanceAndGet()
+
+  # Add a slice with the event data to the given thread.
+  thread.PushCompleteSlice(
+      'benchmark', 'BenchmarkInstrumentation::ImplThreadRenderingStats',
+      timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
+      args={'data': data})
+
+  if not ref_stats:
+    return
+
+  # Add timestamp only if a frame was output
+  if data['frame_count'] == 1:
+    if not first_frame:
+      # Add frame_time if this is not the first frame in within the bounds of an
+      # action.
+      prev_timestamp = ref_stats.frame_timestamps[-1][-1]
+      ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
+    ref_stats.frame_timestamps[-1].append(timestamp)
+
+  ref_stats.approximated_pixel_percentages[-1].append(
+      round(statistics.DivideIfPossibleOrZero(
+          data['approximated_visible_content_area'],
+          data['visible_content_area']) * 100.0, 3))
+
+  ref_stats.checkerboarded_pixel_percentages[-1].append(
+      round(statistics.DivideIfPossibleOrZero(
+          data['checkerboarded_visible_content_area'],
+          data['visible_content_area']) * 100.0, 3))
+
+def AddInputLatencyStats(mock_timer, start_thread, end_thread,
+                         ref_latency_stats=None):
+  """ Adds a random input latency stats event.
+
+  start_thread: The start thread on which the async slice is added.
+  end_thread: The end thread on which the async slice is ended.
+  ref_latency_stats: A ReferenceInputLatencyStats object for expected values.
+  """
+
+  original_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
+  ui_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
+  begin_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
+  forward_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
+  end_comp_time = mock_timer.AdvanceAndGet(10, 20) * 1000.0
+
+  data = {rendering_stats.ORIGINAL_COMP_NAME: {'time': original_comp_time},
+          rendering_stats.UI_COMP_NAME: {'time': ui_comp_time},
+          rendering_stats.BEGIN_COMP_NAME: {'time': begin_comp_time},
+          rendering_stats.END_COMP_NAME: {'time': end_comp_time}}
+
+  timestamp = mock_timer.AdvanceAndGet(2, 4)
+
+  tracing_async_slice = async_slice.AsyncSlice(
+      'benchmark', 'InputLatency', timestamp)
+
+  async_sub_slice = async_slice.AsyncSlice(
+      'benchmark', rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME, timestamp)
+  async_sub_slice.args = {'data': data}
+  async_sub_slice.parent_slice = tracing_async_slice
+  async_sub_slice.start_thread = start_thread
+  async_sub_slice.end_thread = end_thread
+
+  tracing_async_slice.sub_slices.append(async_sub_slice)
+  tracing_async_slice.start_thread = start_thread
+  tracing_async_slice.end_thread = end_thread
+  start_thread.AddAsyncSlice(tracing_async_slice)
+
+  # Add scroll update latency info.
+  scroll_update_data = {
+      rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME: {'time': begin_comp_time},
+      rendering_stats.FORWARD_SCROLL_UPDATE_COMP_NAME:
+          {'time': forward_comp_time},
+      rendering_stats.END_COMP_NAME: {'time': end_comp_time}
+  }
+
+  scroll_async_slice = async_slice.AsyncSlice(
+      'benchmark', 'InputLatency', timestamp)
+
+  scroll_async_sub_slice = async_slice.AsyncSlice(
+      'benchmark', rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME,
+      timestamp)
+  scroll_async_sub_slice.args = {'data': scroll_update_data}
+  scroll_async_sub_slice.parent_slice = scroll_async_slice
+  scroll_async_sub_slice.start_thread = start_thread
+  scroll_async_sub_slice.end_thread = end_thread
+
+  scroll_async_slice.sub_slices.append(scroll_async_sub_slice)
+  scroll_async_slice.start_thread = start_thread
+  scroll_async_slice.end_thread = end_thread
+  start_thread.AddAsyncSlice(scroll_async_slice)
+
+  # Also add some dummy frame statistics so we can feed the resulting timeline
+  # to RenderingStats.
+  AddImplThreadRenderingStats(mock_timer, end_thread, False)
+
+  if not ref_latency_stats:
+    return
+
+  ref_latency_stats.input_event.append(async_sub_slice)
+  ref_latency_stats.input_event.append(scroll_async_sub_slice)
+  ref_latency_stats.input_event_latency.append((
+      rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME,
+      (data[rendering_stats.END_COMP_NAME]['time'] -
+       data[rendering_stats.ORIGINAL_COMP_NAME]['time']) / 1000.0))
+  scroll_update_time = (
+      scroll_update_data[rendering_stats.END_COMP_NAME]['time'] -
+      scroll_update_data[rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME]['time'])
+  ref_latency_stats.input_event_latency.append((
+      rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME,
+      scroll_update_time / 1000.0))
+
+
+class RenderingStatsUnitTest(unittest.TestCase):
+
+  def testHasRenderingStats(self):
+    timeline = model.TimelineModel()
+    timer = MockTimer()
+
+    # A process without rendering stats
+    process_without_stats = timeline.GetOrCreateProcess(pid=1)
+    thread_without_stats = process_without_stats.GetOrCreateThread(tid=11)
+    process_without_stats.FinalizeImport()
+    self.assertFalse(rendering_stats.HasRenderingStats(thread_without_stats))
+
+    # A process with rendering stats, but no frames in them
+    process_without_frames = timeline.GetOrCreateProcess(pid=2)
+    thread_without_frames = process_without_frames.GetOrCreateThread(tid=21)
+    process_without_frames.FinalizeImport()
+    self.assertFalse(rendering_stats.HasRenderingStats(thread_without_frames))
+
+    # A process with rendering stats and frames in them
+    process_with_frames = timeline.GetOrCreateProcess(pid=3)
+    thread_with_frames = process_with_frames.GetOrCreateThread(tid=31)
+    AddImplThreadRenderingStats(timer, thread_with_frames, True, None)
+    process_with_frames.FinalizeImport()
+    self.assertTrue(rendering_stats.HasRenderingStats(thread_with_frames))
+
+  def testBothSurfaceFlingerAndDisplayStats(self):
+    timeline = model.TimelineModel()
+    timer = MockTimer()
+
+    ref_stats = ReferenceRenderingStats()
+    ref_stats.AppendNewRange()
+    surface_flinger = timeline.GetOrCreateProcess(pid=4)
+    surface_flinger.name = 'SurfaceFlinger'
+    surface_flinger_thread = surface_flinger.GetOrCreateThread(tid=41)
+    renderer = timeline.GetOrCreateProcess(pid=2)
+    browser = timeline.GetOrCreateProcess(pid=3)
+    browser_main = browser.GetOrCreateThread(tid=31)
+    browser_main.BeginSlice('webkit.console', 'ActionA',
+                            timer.AdvanceAndGet(2, 4), '')
+
+    # Create SurfaceFlinger stats and display rendering stats.
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddSurfaceFlingerStats(timer, surface_flinger_thread, first, ref_stats)
+      timer.Advance(2, 4)
+
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddDisplayRenderingStats(timer, browser_main, first, None)
+      timer.Advance(5, 10)
+
+    browser_main.EndSlice(timer.AdvanceAndGet())
+    timer.Advance(2, 4)
+
+    browser.FinalizeImport()
+    renderer.FinalizeImport()
+    timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
+    timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
+                       for marker in timeline_markers]
+    stats = rendering_stats.RenderingStats(
+        renderer, browser, surface_flinger, timeline_ranges)
+
+    # Compare rendering stats to reference - Only SurfaceFlinger stats should
+    # count
+    self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
+    self.assertEquals(stats.frame_times, ref_stats.frame_times)
+
+  def testBothDisplayAndImplStats(self):
+    timeline = model.TimelineModel()
+    timer = MockTimer()
+
+    ref_stats = ReferenceRenderingStats()
+    ref_stats.AppendNewRange()
+    renderer = timeline.GetOrCreateProcess(pid=2)
+    browser = timeline.GetOrCreateProcess(pid=3)
+    browser_main = browser.GetOrCreateThread(tid=31)
+    browser_main.BeginSlice('webkit.console', 'ActionA',
+                            timer.AdvanceAndGet(2, 4), '')
+
+    # Create main, impl, and display rendering stats.
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddImplThreadRenderingStats(timer, browser_main, first, None)
+      timer.Advance(2, 4)
+
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddDisplayRenderingStats(timer, browser_main, first, ref_stats)
+      timer.Advance(5, 10)
+
+    browser_main.EndSlice(timer.AdvanceAndGet())
+    timer.Advance(2, 4)
+
+    browser.FinalizeImport()
+    renderer.FinalizeImport()
+    timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
+    timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
+                       for marker in timeline_markers]
+    stats = rendering_stats.RenderingStats(
+        renderer, browser, None, timeline_ranges)
+
+    # Compare rendering stats to reference - Only display stats should count
+    self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
+    self.assertEquals(stats.frame_times, ref_stats.frame_times)
+
+  def testRangeWithoutFrames(self):
+    timer = MockTimer()
+    timeline = model.TimelineModel()
+
+    # Create a renderer process, with a main thread and impl thread.
+    renderer = timeline.GetOrCreateProcess(pid=2)
+    renderer_main = renderer.GetOrCreateThread(tid=21)
+    renderer_compositor = renderer.GetOrCreateThread(tid=22)
+
+    # Create 10 main and impl rendering stats events for Action A.
+    renderer_main.BeginSlice('webkit.console', 'ActionA',
+                             timer.AdvanceAndGet(2, 4), '')
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+    timer.Advance(2, 4)
+
+    # Create 5 main and impl rendering stats events not within any action.
+    for i in xrange(0, 5):
+      first = (i == 0)
+      AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
+
+    # Create Action B without any frames. This should trigger
+    # NotEnoughFramesError when the RenderingStats object is created.
+    renderer_main.BeginSlice('webkit.console', 'ActionB',
+                             timer.AdvanceAndGet(2, 4), '')
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    renderer.FinalizeImport()
+
+    timeline_markers = timeline.FindTimelineMarkers(['ActionA', 'ActionB'])
+    timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
+                       for marker in timeline_markers]
+
+    stats = rendering_stats.RenderingStats(
+        renderer, None, None, timeline_ranges)
+    self.assertEquals(0, len(stats.frame_timestamps[1]))
+
+  def testFromTimeline(self):
+    timeline = model.TimelineModel()
+
+    # Create a browser process and a renderer process, and a main thread and
+    # impl thread for each.
+    browser = timeline.GetOrCreateProcess(pid=1)
+    browser_compositor = browser.GetOrCreateThread(tid=12)
+    renderer = timeline.GetOrCreateProcess(pid=2)
+    renderer_main = renderer.GetOrCreateThread(tid=21)
+    renderer_compositor = renderer.GetOrCreateThread(tid=22)
+
+    timer = MockTimer()
+    renderer_ref_stats = ReferenceRenderingStats()
+    browser_ref_stats = ReferenceRenderingStats()
+
+    # Create 10 main and impl rendering stats events for Action A.
+    renderer_main.BeginSlice('webkit.console', 'ActionA',
+                             timer.AdvanceAndGet(2, 4), '')
+    renderer_ref_stats.AppendNewRange()
+    browser_ref_stats.AppendNewRange()
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddImplThreadRenderingStats(
+          timer, renderer_compositor, first, renderer_ref_stats)
+      AddImplThreadRenderingStats(
+          timer, browser_compositor, first, browser_ref_stats)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    # Create 5 main and impl rendering stats events not within any action.
+    for i in xrange(0, 5):
+      first = (i == 0)
+      AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
+      AddImplThreadRenderingStats(timer, browser_compositor, first, None)
+
+    # Create 10 main and impl rendering stats events for Action B.
+    renderer_main.BeginSlice('webkit.console', 'ActionB',
+                             timer.AdvanceAndGet(2, 4), '')
+    renderer_ref_stats.AppendNewRange()
+    browser_ref_stats.AppendNewRange()
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddImplThreadRenderingStats(
+          timer, renderer_compositor, first, renderer_ref_stats)
+      AddImplThreadRenderingStats(
+          timer, browser_compositor, first, browser_ref_stats)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    # Create 10 main and impl rendering stats events for Action A.
+    renderer_main.BeginSlice('webkit.console', 'ActionA',
+                             timer.AdvanceAndGet(2, 4), '')
+    renderer_ref_stats.AppendNewRange()
+    browser_ref_stats.AppendNewRange()
+    for i in xrange(0, 10):
+      first = (i == 0)
+      AddImplThreadRenderingStats(
+          timer, renderer_compositor, first, renderer_ref_stats)
+      AddImplThreadRenderingStats(
+          timer, browser_compositor, first, browser_ref_stats)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+    timer.Advance(2, 4)
+
+    browser.FinalizeImport()
+    renderer.FinalizeImport()
+
+    timeline_markers = timeline.FindTimelineMarkers(
+        ['ActionA', 'ActionB', 'ActionA'])
+    timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
+                       for marker in timeline_markers]
+    stats = rendering_stats.RenderingStats(
+        renderer, browser, None, timeline_ranges)
+
+    # Compare rendering stats to reference.
+    self.assertEquals(stats.frame_timestamps,
+                      browser_ref_stats.frame_timestamps)
+    self.assertEquals(stats.frame_times, browser_ref_stats.frame_times)
+    self.assertEquals(stats.approximated_pixel_percentages,
+                      renderer_ref_stats.approximated_pixel_percentages)
+    self.assertEquals(stats.checkerboarded_pixel_percentages,
+                      renderer_ref_stats.checkerboarded_pixel_percentages)
+
+  def testInputLatencyFromTimeline(self):
+    timeline = model.TimelineModel()
+
+    # Create a browser process and a renderer process.
+    browser = timeline.GetOrCreateProcess(pid=1)
+    browser_main = browser.GetOrCreateThread(tid=11)
+    renderer = timeline.GetOrCreateProcess(pid=2)
+    renderer_main = renderer.GetOrCreateThread(tid=21)
+
+    timer = MockTimer()
+    ref_latency = ReferenceInputLatencyStats()
+
+    # Create 10 input latency stats events for Action A.
+    renderer_main.BeginSlice('webkit.console', 'ActionA',
+                             timer.AdvanceAndGet(2, 4), '')
+    for _ in xrange(0, 10):
+      AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    # Create 5 input latency stats events not within any action.
+    timer.Advance(2, 4)
+    for _ in xrange(0, 5):
+      AddInputLatencyStats(timer, browser_main, renderer_main, None)
+
+    # Create 10 input latency stats events for Action B.
+    renderer_main.BeginSlice('webkit.console', 'ActionB',
+                             timer.AdvanceAndGet(2, 4), '')
+    for _ in xrange(0, 10):
+      AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    # Create 10 input latency stats events for Action A.
+    renderer_main.BeginSlice('webkit.console', 'ActionA',
+                             timer.AdvanceAndGet(2, 4), '')
+    for _ in xrange(0, 10):
+      AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
+    renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
+
+    browser.FinalizeImport()
+    renderer.FinalizeImport()
+
+    latency_events = []
+
+    timeline_markers = timeline.FindTimelineMarkers(
+        ['ActionA', 'ActionB', 'ActionA'])
+    timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
+                       for marker in timeline_markers]
+    for timeline_range in timeline_ranges:
+      if timeline_range.is_empty:
+        continue
+      latency_events.extend(rendering_stats.GetLatencyEvents(
+          browser, timeline_range))
+
+    self.assertEquals(latency_events, ref_latency.input_event)
+    event_latency_result = rendering_stats.ComputeEventLatencies(latency_events)
+    self.assertEquals(event_latency_result,
+                      ref_latency.input_event_latency)
+
+    stats = rendering_stats.RenderingStats(
+        renderer, browser, None, timeline_ranges)
+    self.assertEquals(
+        perf_tests_helper.FlattenList(stats.input_event_latency),
+        [latency for name, latency in ref_latency.input_event_latency
+         if name != rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME])
+    self.assertEquals(
+        perf_tests_helper.FlattenList(stats.main_thread_scroll_latency),
+        [latency for name, latency in ref_latency.input_event_latency
+         if name == rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME])
+    self.assertEquals(
+        perf_tests_helper.FlattenList(stats.gesture_scroll_update_latency),
+        [latency for name, latency in ref_latency.input_event_latency
+         if name == rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME])
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/responsiveness_metric.py b/catapult/telemetry/telemetry/web_perf/metrics/responsiveness_metric.py
new file mode 100644
index 0000000..8dde951
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/responsiveness_metric.py
@@ -0,0 +1,55 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import mainthread_jank_stats
+from telemetry.web_perf.metrics import timeline_based_metric
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+class ResponsivenessMetric(timeline_based_metric.TimelineBasedMetric):
+  """Computes metrics that measure respsonsiveness on the record ranges.
+
+      total_big_jank_thread_time is the total thread duration of all top
+      slices whose thread time ranges overlapped with any thread time ranges of
+      the records and the overlapped thread duration is greater than or equal
+      USER_PERCEIVABLE_DELAY_THRESHOLD_MS.
+
+      biggest_jank_thread_time is the biggest thread duration of all
+      top slices whose thread time ranges overlapped with any of records' thread
+      time ranges.
+
+     All *_time values are measured in milliseconds.
+  """
+
+  def __init__(self):
+    super(ResponsivenessMetric, self).__init__()
+
+  def AddResults(self, _, renderer_thread, interaction_records, results):
+    self.VerifyNonOverlappedRecords(interaction_records)
+    try:
+      jank_stats = mainthread_jank_stats.MainthreadJankStats(
+          renderer_thread, interaction_records)
+    # TODO(nednguyen): maybe fall back to use wall-time for computing the
+    # metrics.
+    except tir_module.NoThreadTimeDataException as e:
+      #TODO(nednguyen): Report the warning with page_results system.
+      logging.warning(
+          'Main thread jank metrics cannot be computed for records %s since '
+          'trace does not contain thread time data. %s',
+          repr(interaction_records), repr(e))
+      return
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'responsive-total_big_jank_thread_time', 'ms',
+        jank_stats.total_big_jank_thread_time,
+        tir_label=interaction_records[0].label,
+        improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'responsive-biggest_jank_thread_time', 'ms',
+        jank_stats.biggest_jank_thread_time,
+        tir_label=interaction_records[0].label,
+        improvement_direction=improvement_direction.DOWN))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/single_event.py b/catapult/telemetry/telemetry/web_perf/metrics/single_event.py
new file mode 100644
index 0000000..5d510c3
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/single_event.py
@@ -0,0 +1,46 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+class _SingleEventMetric(timeline_based_metric.TimelineBasedMetric):
+  """Reports directly durations of specific trace events that start during the
+  user interaction.
+  """
+
+  def __init__(self, trace_event_name, metric_name, metric_description=None):
+    super(_SingleEventMetric, self).__init__()
+    self._TRACE_EVENT_NAME = trace_event_name
+    self._metric_name = metric_name
+    self._metric_description = metric_description
+
+  def AddResults(self, model, renderer_thread, interactions, results):
+    del model  # unused
+    assert interactions
+    self._AddResultsInternal(renderer_thread.parent.IterAllSlices(),
+                             interactions, results)
+
+  def _AddResultsInternal(self, events, interactions, results):
+    events_found = []
+    for event in events:
+      if (event.name == self._TRACE_EVENT_NAME) and any(
+              interaction.start <= event.start <= interaction.end
+              for interaction in interactions):
+        if event.has_thread_timestamps:
+          events_found.append(event.thread_duration)
+        else:
+          events_found.append(event.duration)
+    if not events_found:
+      return
+    results.AddValue(list_of_scalar_values.ListOfScalarValues(
+      page=results.current_page,
+      tir_label=interactions[0].label,
+      name=self._metric_name,
+      units='ms',
+      values=events_found,
+      description=self._metric_description,
+      improvement_direction=improvement_direction.DOWN))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/single_event_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/single_event_unittest.py
new file mode 100644
index 0000000..f96ef9f
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/single_event_unittest.py
@@ -0,0 +1,72 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from collections import namedtuple
+from telemetry.internal.results import page_test_results
+from telemetry.page import page
+from telemetry.web_perf.metrics import single_event
+from telemetry.web_perf import timeline_interaction_record
+
+TRACE_EVENT_NAME = 'FrameView::performLayout'
+METRIC_NAME = 'layout'
+FakeEventTuple = namedtuple(
+    'Event',
+    'start, end, name, duration, thread_duration, has_thread_timestamps')
+Interaction = timeline_interaction_record.TimelineInteractionRecord
+
+class SingleEventTestMetric(single_event._SingleEventMetric):
+  def __init__(self):
+    super(SingleEventTestMetric, self).__init__(TRACE_EVENT_NAME, METRIC_NAME)
+
+def GetSingleEventMetrics(events, interactions):
+  results = page_test_results.PageTestResults()
+  results.WillRunPage(page.Page('file://blank.html'))
+  SingleEventTestMetric()._AddResultsInternal(events, interactions, results)
+  return dict((value.name, value.values) for value in
+              results.current_page_run.values)
+
+def FakeEvent(start, end, name=TRACE_EVENT_NAME):
+  dur = end - start
+  return FakeEventTuple(start, end, name, dur, dur, True)
+
+
+class SingleEventMetricUnitTest(unittest.TestCase):
+  def testSingleEventMetric(self):
+    events = [FakeEvent(0, 1),
+              FakeEvent(9, 11),
+              FakeEventTuple(10, 13, TRACE_EVENT_NAME, 3, 0, False),
+              FakeEvent(20, 24),
+              FakeEvent(21, 26),
+              FakeEvent(29, 35),
+              FakeEvent(30, 37),
+              FakeEvent(40, 48),
+              FakeEvent(41, 50),
+              FakeEvent(10, 13, name='something'),
+              FakeEvent(20, 24, name='FrameView::something'),
+              FakeEvent(30, 37, name='SomeThing::performLayout'),
+              FakeEvent(40, 48, name='something else')]
+    interactions = [Interaction('interaction', 10, 20),
+                    Interaction('interaction', 30, 40)]
+
+    self.assertFalse(GetSingleEventMetrics(events, []))
+    self.assertFalse(GetSingleEventMetrics([], interactions))
+
+    # The first event starts before the first interaction, so it is ignored.
+    # The second event starts before the first interaction, so it is ignored.
+    # The third event starts during the first interaction, and its duration is
+    # 13 - 10 = 3.
+    # The fourth event starts during the first interaction, and its duration is
+    # 24 - 20 = 4.
+    # The fifth event starts between the two interactions, so it is ignored.
+    # The sixth event starts between the two interactions, so it is ignored.
+    # The seventh event starts during the second interaction, and its duration
+    # is 37 - 30 = 7.
+    # The eighth event starts during the second interaction, and its duration is
+    # 48 - 40 = 8.
+    # The ninth event starts after the last interaction, so it is ignored.
+    # The rest of the events have the wrong name, so they are ignored.
+    self.assertEqual({METRIC_NAME: [3, 4, 7, 8]}, GetSingleEventMetrics(
+        events, interactions))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/smoothness.py b/catapult/telemetry/telemetry/web_perf/metrics/smoothness.py
new file mode 100644
index 0000000..fee30ff
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/smoothness.py
@@ -0,0 +1,349 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.util import perf_tests_helper
+from telemetry.util import statistics
+from telemetry.value import improvement_direction
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import rendering_stats
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+NOT_ENOUGH_FRAMES_MESSAGE = (
+  'Not enough frames for smoothness metrics (at least two are required).\n'
+  'Issues that have caused this in the past:\n'
+  '- Browser bugs that prevents the page from redrawing\n'
+  '- Bugs in the synthetic gesture code\n'
+  '- Page and benchmark out of sync (e.g. clicked element was renamed)\n'
+  '- Pages that render extremely slow\n'
+  '- Pages that can\'t be scrolled')
+
+
+class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric):
+  """Computes metrics that measure smoothness of animations over given ranges.
+
+  Animations are typically considered smooth if the frame rates are close to
+  60 frames per second (fps) and uniformly distributed over the sequence. To
+  determine if a timeline range contains a smooth animation, we update the
+  results object with several representative metrics:
+
+    frame_times: A list of raw frame times
+    mean_frame_time: The arithmetic mean of frame times
+    percentage_smooth: Percentage of frames that were hitting 60 FPS.
+    frame_time_discrepancy: The absolute discrepancy of frame timestamps
+    mean_pixels_approximated: The mean percentage of pixels approximated
+    queueing_durations: The queueing delay between compositor & main threads
+
+  Note that if any of the interaction records provided to AddResults have less
+  than 2 frames, we will return telemetry values with None values for each of
+  the smoothness metrics. Similarly, older browsers without support for
+  tracking the BeginMainFrame events will report a ListOfScalarValues with a
+  None value for the queueing duration metric.
+  """
+
+  def __init__(self):
+    super(SmoothnessMetric, self).__init__()
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    self.VerifyNonOverlappedRecords(interaction_records)
+    renderer_process = renderer_thread.parent
+    stats = rendering_stats.RenderingStats(
+      renderer_process, model.browser_process, model.surface_flinger_process,
+      [r.GetBounds() for r in interaction_records])
+    has_surface_flinger_stats = model.surface_flinger_process is not None
+    self._PopulateResultsFromStats(results, stats, has_surface_flinger_stats)
+
+  def _PopulateResultsFromStats(self, results, stats,
+                                has_surface_flinger_stats):
+    page = results.current_page
+    values = [
+        self._ComputeQueueingDuration(page, stats),
+        self._ComputeFrameTimeDiscrepancy(page, stats),
+        self._ComputeMeanPixelsApproximated(page, stats),
+        self._ComputeMeanPixelsCheckerboarded(page, stats)
+    ]
+    values += self._ComputeLatencyMetric(page, stats, 'input_event_latency',
+                                         stats.input_event_latency)
+    values += self._ComputeLatencyMetric(page, stats,
+                                         'main_thread_scroll_latency',
+                                         stats.main_thread_scroll_latency)
+    values.append(self._ComputeFirstGestureScrollUpdateLatencies(page, stats))
+    values += self._ComputeFrameTimeMetric(page, stats)
+    if has_surface_flinger_stats:
+      values += self._ComputeSurfaceFlingerMetric(page, stats)
+
+    for v in values:
+      results.AddValue(v)
+
+  def _HasEnoughFrames(self, list_of_frame_timestamp_lists):
+    """Whether we have collected at least two frames in every timestamp list."""
+    return all(len(s) >= 2 for s in list_of_frame_timestamp_lists)
+
+  @staticmethod
+  def _GetNormalizedDeltas(data, refresh_period, min_normalized_delta=None):
+    deltas = [t2 - t1 for t1, t2 in zip(data, data[1:])]
+    if min_normalized_delta != None:
+      deltas = [d for d in deltas
+                if d / refresh_period >= min_normalized_delta]
+    return (deltas, [delta / refresh_period for delta in deltas])
+
+  @staticmethod
+  def _JoinTimestampRanges(frame_timestamps):
+    """Joins ranges of timestamps, adjusting timestamps to remove deltas
+    between the start of a range and the end of the prior range.
+    """
+    timestamps = []
+    for timestamp_range in frame_timestamps:
+      if len(timestamps) == 0:
+        timestamps.extend(timestamp_range)
+      else:
+        for i in range(1, len(timestamp_range)):
+          timestamps.append(timestamps[-1] +
+              timestamp_range[i] - timestamp_range[i-1])
+    return timestamps
+
+  def _ComputeSurfaceFlingerMetric(self, page, stats):
+    jank_count = None
+    avg_surface_fps = None
+    max_frame_delay = None
+    frame_lengths = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      timestamps = self._JoinTimestampRanges(stats.frame_timestamps)
+      frame_count = len(timestamps)
+      milliseconds = timestamps[-1] - timestamps[0]
+      min_normalized_frame_length = 0.5
+
+      frame_lengths, normalized_frame_lengths = \
+          self._GetNormalizedDeltas(timestamps, stats.refresh_period,
+                                    min_normalized_frame_length)
+      if len(frame_lengths) < frame_count - 1:
+        logging.warning('Skipping frame lengths that are too short.')
+        frame_count = len(frame_lengths) + 1
+      if len(frame_lengths) == 0:
+        raise Exception('No valid frames lengths found.')
+      _, normalized_changes = \
+          self._GetNormalizedDeltas(frame_lengths, stats.refresh_period)
+      jankiness = [max(0, round(change)) for change in normalized_changes]
+      pause_threshold = 20
+      jank_count = sum(1 for change in jankiness
+                       if change > 0 and change < pause_threshold)
+      avg_surface_fps = int(round((frame_count - 1) * 1000.0 / milliseconds))
+      max_frame_delay = round(max(normalized_frame_lengths))
+      frame_lengths = normalized_frame_lengths
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+
+    return (
+        scalar.ScalarValue(
+            page, 'avg_surface_fps', 'fps', avg_surface_fps,
+            description='Average frames per second as measured by the '
+                        'platform\'s SurfaceFlinger.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.UP),
+        scalar.ScalarValue(
+            page, 'jank_count', 'janks', jank_count,
+            description='Number of changes in frame rate as measured by the '
+                        'platform\'s SurfaceFlinger.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.DOWN),
+        scalar.ScalarValue(
+            page, 'max_frame_delay', 'vsyncs', max_frame_delay,
+            description='Largest frame time as measured by the platform\'s '
+                        'SurfaceFlinger.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.DOWN),
+        list_of_scalar_values.ListOfScalarValues(
+            page, 'frame_lengths', 'vsyncs', frame_lengths,
+            description='Frame time in vsyncs as measured by the platform\'s '
+                        'SurfaceFlinger.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.DOWN)
+    )
+
+  def _ComputeLatencyMetric(self, page, stats, name, list_of_latency_lists):
+    """Returns Values for the mean and discrepancy for given latency stats."""
+    mean_latency = None
+    latency_discrepancy = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      latency_list = perf_tests_helper.FlattenList(list_of_latency_lists)
+      if len(latency_list) == 0:
+        return ()
+      mean_latency = round(statistics.ArithmeticMean(latency_list), 3)
+      latency_discrepancy = (
+          round(statistics.DurationsDiscrepancy(latency_list), 4))
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return (
+      scalar.ScalarValue(
+          page, 'mean_%s' % name, 'ms', mean_latency,
+          description='Arithmetic mean of the raw %s values' % name,
+          none_value_reason=none_value_reason,
+          improvement_direction=improvement_direction.DOWN),
+      scalar.ScalarValue(
+          page, '%s_discrepancy' % name, 'ms', latency_discrepancy,
+          description='Discrepancy of the raw %s values' % name,
+          none_value_reason=none_value_reason,
+          improvement_direction=improvement_direction.DOWN)
+    )
+
+  def _ComputeFirstGestureScrollUpdateLatencies(self, page, stats):
+    """Returns a ListOfScalarValuesValues of gesture scroll update latencies.
+
+    Returns a Value for the first gesture scroll update latency for each
+    interaction record in |stats|.
+    """
+    none_value_reason = None
+    first_gesture_scroll_update_latencies = [round(latencies[0], 4)
+        for latencies in stats.gesture_scroll_update_latency
+        if len(latencies)]
+    if (not self._HasEnoughFrames(stats.frame_timestamps) or
+        not first_gesture_scroll_update_latencies):
+      first_gesture_scroll_update_latencies = None
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return list_of_scalar_values.ListOfScalarValues(
+        page, 'first_gesture_scroll_update_latency', 'ms',
+        first_gesture_scroll_update_latencies,
+        description='First gesture scroll update latency measures the time it '
+                    'takes to process the very first gesture scroll update '
+                    'input event. The first scroll gesture can often get '
+                    'delayed by work related to page loading.',
+        none_value_reason=none_value_reason,
+        improvement_direction=improvement_direction.DOWN)
+
+  def _ComputeQueueingDuration(self, page, stats):
+    """Returns a Value for the frame queueing durations."""
+    queueing_durations = None
+    none_value_reason = None
+    if 'frame_queueing_durations' in stats.errors:
+      none_value_reason = stats.errors['frame_queueing_durations']
+    elif self._HasEnoughFrames(stats.frame_timestamps):
+      queueing_durations = perf_tests_helper.FlattenList(
+          stats.frame_queueing_durations)
+      if len(queueing_durations) == 0:
+        queueing_durations = None
+        none_value_reason = 'No frame queueing durations recorded.'
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return list_of_scalar_values.ListOfScalarValues(
+        page, 'queueing_durations', 'ms', queueing_durations,
+        description='The frame queueing duration quantifies how out of sync '
+                    'the compositor and renderer threads are. It is the amount '
+                    'of wall time that elapses between a '
+                    'ScheduledActionSendBeginMainFrame event in the compositor '
+                    'thread and the corresponding BeginMainFrame event in the '
+                    'main thread.',
+        none_value_reason=none_value_reason,
+        improvement_direction=improvement_direction.DOWN)
+
+  def _ComputeFrameTimeMetric(self, page, stats):
+    """Returns Values for the frame time metrics.
+
+    This includes the raw and mean frame times, as well as the percentage of
+    frames that were hitting 60 fps.
+    """
+    frame_times = None
+    mean_frame_time = None
+    percentage_smooth = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      frame_times = perf_tests_helper.FlattenList(stats.frame_times)
+      mean_frame_time = round(statistics.ArithmeticMean(frame_times), 3)
+      # We use 17ms as a somewhat looser threshold, instead of 1000.0/60.0.
+      smooth_threshold = 17.0
+      smooth_count = sum(1 for t in frame_times if t < smooth_threshold)
+      percentage_smooth = float(smooth_count) / len(frame_times) * 100.0
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return (
+        list_of_scalar_values.ListOfScalarValues(
+            page, 'frame_times', 'ms', frame_times,
+            description='List of raw frame times, helpful to understand the '
+                        'other metrics.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.DOWN),
+        scalar.ScalarValue(
+            page, 'mean_frame_time', 'ms', mean_frame_time,
+            description='Arithmetic mean of frame times.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.DOWN),
+        scalar.ScalarValue(
+            page, 'percentage_smooth', 'score', percentage_smooth,
+            description='Percentage of frames that were hitting 60 fps.',
+            none_value_reason=none_value_reason,
+            improvement_direction=improvement_direction.UP)
+    )
+
+  def _ComputeFrameTimeDiscrepancy(self, page, stats):
+    """Returns a Value for the absolute discrepancy of frame time stamps."""
+
+    frame_discrepancy = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      frame_discrepancy = round(statistics.TimestampsDiscrepancy(
+          stats.frame_timestamps), 4)
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return scalar.ScalarValue(
+        page, 'frame_time_discrepancy', 'ms', frame_discrepancy,
+        description='Absolute discrepancy of frame time stamps, where '
+                    'discrepancy is a measure of irregularity. It quantifies '
+                    'the worst jank. For a single pause, discrepancy '
+                    'corresponds to the length of this pause in milliseconds. '
+                    'Consecutive pauses increase the discrepancy. This metric '
+                    'is important because even if the mean and 95th '
+                    'percentile are good, one long pause in the middle of an '
+                    'interaction is still bad.',
+        none_value_reason=none_value_reason,
+        improvement_direction=improvement_direction.DOWN)
+
+  def _ComputeMeanPixelsApproximated(self, page, stats):
+    """Add the mean percentage of pixels approximated.
+
+    This looks at tiles which are missing or of low or non-ideal resolution.
+    """
+    mean_pixels_approximated = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      mean_pixels_approximated = round(statistics.ArithmeticMean(
+          perf_tests_helper.FlattenList(
+              stats.approximated_pixel_percentages)), 3)
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return scalar.ScalarValue(
+        page, 'mean_pixels_approximated', 'percent', mean_pixels_approximated,
+        description='Percentage of pixels that were approximated '
+                    '(checkerboarding, low-resolution tiles, etc.).',
+        none_value_reason=none_value_reason,
+        improvement_direction=improvement_direction.DOWN)
+
+  def _ComputeMeanPixelsCheckerboarded(self, page, stats):
+    """Add the mean percentage of pixels checkerboarded.
+
+    This looks at tiles which are only missing.
+    It does not take into consideration tiles which are of low or
+    non-ideal resolution.
+    """
+    mean_pixels_checkerboarded = None
+    none_value_reason = None
+    if self._HasEnoughFrames(stats.frame_timestamps):
+      if rendering_stats.CHECKERBOARDED_PIXEL_ERROR in stats.errors:
+        none_value_reason = stats.errors[
+            rendering_stats.CHECKERBOARDED_PIXEL_ERROR]
+      else:
+        mean_pixels_checkerboarded = round(statistics.ArithmeticMean(
+            perf_tests_helper.FlattenList(
+                stats.checkerboarded_pixel_percentages)), 3)
+    else:
+      none_value_reason = NOT_ENOUGH_FRAMES_MESSAGE
+    return scalar.ScalarValue(
+        page, 'mean_pixels_checkerboarded', 'percent',
+        mean_pixels_checkerboarded,
+        description='Percentage of pixels that were checkerboarded.',
+        none_value_reason=none_value_reason,
+        improvement_direction=improvement_direction.DOWN)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/smoothness_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/smoothness_unittest.py
new file mode 100644
index 0000000..c87f7c2
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/smoothness_unittest.py
@@ -0,0 +1,285 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.internal.results import page_test_results
+from telemetry.page import page as page_module
+from telemetry.web_perf.metrics import rendering_stats
+from telemetry.web_perf.metrics import smoothness
+
+
+class _MockRenderingStats(object):
+
+  stats = ['refresh_period', 'frame_timestamps', 'frame_times', 'paint_times',
+           'painted_pixel_counts', 'record_times',
+           'recorded_pixel_counts', 'approximated_pixel_percentages',
+           'checkerboarded_pixel_percentages', 'input_event_latency',
+           'frame_queueing_durations', 'main_thread_scroll_latency',
+           'gesture_scroll_update_latency']
+
+  def __init__(self, **kwargs):
+    self.errors = {}
+    for stat in self.stats:
+      value = kwargs[stat] if stat in kwargs else None
+      setattr(self, stat, value)
+
+
+#pylint: disable=protected-access
+class SmoothnessMetricUnitTest(unittest.TestCase):
+
+  def setUp(self):
+    self.metric = smoothness.SmoothnessMetric()
+    self.page = page_module.Page('file://blank.html')
+    self.good_timestamps = [[10, 20], [30, 40, 50]]
+    self.not_enough_frames_timestamps = [[10], [20, 30, 40]]
+
+  def testPopulateResultsFromStats(self):
+    stats = _MockRenderingStats()
+    for stat in _MockRenderingStats.stats:
+      # Just set fake data for all of the relevant arrays of stats typically
+      # found in a RenderingStats object.
+      setattr(stats, stat, [[10, 20], [30, 40, 50]])
+    results = page_test_results.PageTestResults()
+    results.WillRunPage(self.page)
+    self.metric._PopulateResultsFromStats(results, stats, False)
+    current_page_run = results.current_page_run
+    self.assertTrue(current_page_run.ok)
+    expected_values_count = 12
+    self.assertEquals(expected_values_count, len(current_page_run.values))
+
+  def testHasEnoughFrames(self):
+    # This list will pass since every sub-array has at least 2 frames.
+    has_enough_frames = self.metric._HasEnoughFrames(self.good_timestamps)
+    self.assertTrue(has_enough_frames)
+
+  def testHasEnoughFramesWithNotEnoughFrames(self):
+    # This list will fail since the first sub-array only has a single frame.
+    has_enough_frames = self.metric._HasEnoughFrames(
+        self.not_enough_frames_timestamps)
+    self.assertFalse(has_enough_frames)
+
+  def testComputeSurfaceFlingerMetricNoJank(self):
+    stats = _MockRenderingStats(refresh_period=10,
+                                frame_timestamps=[[10, 20], [130, 140, 150]],
+                                frame_times=[[10], [10, 10]])
+    avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
+        self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
+    self.assertEquals([1, 1, 1], frame_lengths.values)
+    self.assertEquals(1, max_frame_delay.value)
+    self.assertEquals(0, jank_count.value)
+    self.assertEquals(100, avg_surface_fps.value)
+
+  def testComputeSurfaceFlingerMetricJank(self):
+    stats = _MockRenderingStats(
+        refresh_period=10,
+        frame_timestamps=[[10, 20, 50], [130, 140, 150, 170, 180]],
+        frame_times=[[10, 30], [10, 10, 20, 10]])
+    avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
+        self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
+    self.assertEquals([1, 3, 1, 1, 2, 1], frame_lengths.values)
+    self.assertEquals(3, max_frame_delay.value)
+    self.assertEquals(2, jank_count.value)
+    self.assertEquals(67, avg_surface_fps.value)
+
+  def testComputeFrameTimeMetricWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        refresh_period=10,
+        frame_timestamps=self.not_enough_frames_timestamps,
+        frame_times=[[10, 20], [30, 40, 50]])
+    avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
+        self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
+    self.assertEquals(None, avg_surface_fps.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      avg_surface_fps.none_value_reason)
+    self.assertEquals(None, jank_count.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      jank_count.none_value_reason)
+    self.assertEquals(None, max_frame_delay.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      max_frame_delay.none_value_reason)
+    self.assertEquals(None, frame_lengths.values)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      frame_lengths.none_value_reason)
+
+  def testComputeLatencyMetric(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               input_event_latency=[[10, 20], [30, 40, 50]])
+    # pylint: disable=unbalanced-tuple-unpacking
+    mean_value, discrepancy_value = self.metric._ComputeLatencyMetric(
+        self.page, stats, 'input_event_latency', stats.input_event_latency)
+    self.assertEquals(30, mean_value.value)
+    self.assertEquals(60, discrepancy_value.value)
+
+  def testComputeLatencyMetricWithMissingData(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               input_event_latency=[[], []])
+    value = self.metric._ComputeLatencyMetric(
+        self.page, stats, 'input_event_latency', stats.input_event_latency)
+    self.assertEquals((), value)
+
+  def testComputeLatencyMetricWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        input_event_latency=[[], []])
+    # pylint: disable=unbalanced-tuple-unpacking
+    mean_value, discrepancy_value = self.metric._ComputeLatencyMetric(
+        self.page, stats, 'input_event_latency', stats.input_event_latency)
+    self.assertEquals(None, mean_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      mean_value.none_value_reason)
+    self.assertEquals(None, discrepancy_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      discrepancy_value.none_value_reason)
+
+  def testComputeGestureScrollUpdateLatencies(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.good_timestamps,
+        gesture_scroll_update_latency=[[10, 20], [30, 40, 50]])
+    gesture_value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
+        self.page, stats)
+    self.assertEquals([10, 30], gesture_value.values)
+
+  def testComputeGestureScrollUpdateLatenciesWithMissingData(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.good_timestamps,
+        gesture_scroll_update_latency=[[], []])
+    value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
+        self.page, stats)
+    self.assertEquals(None, value.values)
+
+  def testComputeGestureScrollUpdateLatenciesWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        gesture_scroll_update_latency=[[10, 20], [30, 40, 50]])
+    gesture_value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
+        self.page, stats)
+    self.assertEquals(None, gesture_value.values)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      gesture_value.none_value_reason)
+
+  def testComputeQueueingDuration(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               frame_queueing_durations=[[10, 20], [30, 40]])
+    list_of_scalar_values = self.metric._ComputeQueueingDuration(self.page,
+                                                                stats)
+    self.assertEquals([10, 20, 30, 40], list_of_scalar_values.values)
+
+  def testComputeQueueingDurationWithMissingData(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               frame_queueing_durations=[[], []])
+    list_of_scalar_values = self.metric._ComputeQueueingDuration(
+        self.page, stats)
+    self.assertEquals(None, list_of_scalar_values.values)
+    self.assertEquals('No frame queueing durations recorded.',
+                      list_of_scalar_values.none_value_reason)
+
+  def testComputeQueueingDurationWithMissingDataAndErrorValue(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               frame_queueing_durations=[[], []])
+    stats.errors['frame_queueing_durations'] = (
+        'Current chrome version does not support the queueing delay metric.')
+    list_of_scalar_values = self.metric._ComputeQueueingDuration(
+        self.page, stats)
+    self.assertEquals(None, list_of_scalar_values.values)
+    self.assertEquals(
+        'Current chrome version does not support the queueing delay metric.',
+        list_of_scalar_values.none_value_reason)
+
+  def testComputeQueueingDurationWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        frame_queueing_durations=[[10, 20], [30, 40, 50]])
+    list_of_scalar_values = self.metric._ComputeQueueingDuration(self.page,
+                                                                stats)
+    self.assertEquals(None, list_of_scalar_values.values)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      list_of_scalar_values.none_value_reason)
+
+  def testComputeFrameTimeMetric(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
+                               frame_times=[[10, 20], [30, 40, 50]])
+    frame_times_value, mean_frame_time_value, percentage_smooth_value = (
+        self.metric._ComputeFrameTimeMetric(self.page, stats))
+    self.assertEquals([10, 20, 30, 40, 50], frame_times_value.values)
+    self.assertEquals(30, mean_frame_time_value.value)
+    self.assertEquals(20, percentage_smooth_value.value)
+
+  def testComputeFrameTimeMetricWithNotEnoughFrames2(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        frame_times=[[10, 20], [30, 40, 50]])
+    frame_times_value, mean_frame_time_value, percentage_smooth_value = (
+        self.metric._ComputeFrameTimeMetric(self.page, stats))
+    self.assertEquals(None, frame_times_value.values)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      frame_times_value.none_value_reason)
+    self.assertEquals(None, mean_frame_time_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      mean_frame_time_value.none_value_reason)
+    self.assertEquals(None, percentage_smooth_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      percentage_smooth_value.none_value_reason)
+
+  def testComputeFrameTimeDiscrepancy(self):
+    stats = _MockRenderingStats(frame_timestamps=self.good_timestamps)
+    frame_time_discrepancy_value = self.metric._ComputeFrameTimeDiscrepancy(
+        self.page, stats)
+    self.assertEquals(10, frame_time_discrepancy_value.value)
+
+  def testComputeFrameTimeDiscrepancyWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps)
+    frame_time_discrepancy_value = self.metric._ComputeFrameTimeDiscrepancy(
+        self.page, stats)
+    self.assertEquals(None, frame_time_discrepancy_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      frame_time_discrepancy_value.none_value_reason)
+
+  def testComputeMeanPixelsApproximated(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.good_timestamps,
+        approximated_pixel_percentages=[[10, 20], [30, 40, 50]])
+    mean_pixels_value = self.metric._ComputeMeanPixelsApproximated(
+        self.page, stats)
+    self.assertEquals(30, mean_pixels_value.value)
+
+  def testComputeMeanPixelsApproximatedWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        approximated_pixel_percentages=[[10, 20], [30, 40, 50]])
+    mean_pixels_value = self.metric._ComputeMeanPixelsApproximated(
+        self.page, stats)
+    self.assertEquals(None, mean_pixels_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      mean_pixels_value.none_value_reason)
+
+  def testComputeMeanPixelsCheckerboarded(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.good_timestamps,
+        checkerboarded_pixel_percentages=[[10, 20], [30, 40, 50]])
+    mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
+        self.page, stats)
+    self.assertEquals(30, mean_pixels_value.value)
+
+  def testComputeMeanPixelsCheckerboardedWithNotEnoughFrames(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.not_enough_frames_timestamps,
+        checkerboarded_pixel_percentages=[[10, 20], [30, 40, 50]])
+    mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
+        self.page, stats)
+    self.assertEquals(None, mean_pixels_value.value)
+    self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
+                      mean_pixels_value.none_value_reason)
+
+  def testComputeMeanPixelsCheckerboardedWithNoData(self):
+    stats = _MockRenderingStats(
+        frame_timestamps=self.good_timestamps,
+        checkerboarded_pixel_percentages=None)
+    stats.errors[rendering_stats.CHECKERBOARDED_PIXEL_ERROR] = 'test error'
+    mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
+        self.page, stats)
+    self.assertEquals(None, mean_pixels_value.value)
+    self.assertEquals('test error',
+                      mean_pixels_value.none_value_reason)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/startup.py b/catapult/telemetry/telemetry/web_perf/metrics/startup.py
new file mode 100644
index 0000000..db5729b
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/startup.py
@@ -0,0 +1,95 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import value
+from telemetry.web_perf.metrics import timeline_based_metric
+
+_PROCESS_CREATION = 'Startup.BrowserProcessCreation'
+_MAIN_ENTRY_POINT = 'Startup.BrowserMainEntryPoint'
+
+# A dictionary that maps metric names to a value, which can be either of
+# the two:
+#  1. A tuple of one event name if the event itself contains reported duration
+#  2. A tuple of two event names if the value to report is the time difference
+#     between starting these events
+_METRICS = {
+  'messageloop_start_time':
+      ('Startup.BrowserMessageLoopStartTimeFromMainEntry',),
+
+  'window_display_time':
+      ('Startup.BrowserWindowDisplay',),
+
+  'open_tabs_time':
+      ('Startup.BrowserOpenTabs',),
+
+  'first_non_empty_paint_time':
+      ('Startup.FirstWebContents.NonEmptyPaint2',),
+
+  'first_main_frame_load_time':
+      ('Startup.FirstWebContents.MainFrameLoad2',),
+
+  'foreground_tab_load_complete':
+      (_MAIN_ENTRY_POINT, 'loadEventEnd'),
+
+  'foreground_tab_request_start':
+      (_MAIN_ENTRY_POINT, 'requestStart'),
+}
+
+_TRACKED_EVENT_NAMES = set()
+for i in _METRICS.values():
+  _TRACKED_EVENT_NAMES.add(i[0])
+  if len(i) == 2:
+    _TRACKED_EVENT_NAMES.add(i[1])
+
+
+class StartupTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """Reports summary stats from important startup events."""
+
+  def __init__(self):
+    super(StartupTimelineMetric, self).__init__()
+
+  def AddResults(self, model, _renderer_thread, interactions, results):
+    pass
+
+  def AddWholeTraceResults(self, model, results):
+    browser = model.browser_process
+
+    if not browser:
+      return
+
+    # Produce a map of events to track.
+    tracked_events = {}
+    for event in browser.parent.IterAllEvents(
+      event_predicate=lambda event: event.name in _TRACKED_EVENT_NAMES):
+      # In case of a begin/end trace event, only track the begin that contain
+      # the duration.
+      if event.name in tracked_events:
+        continue
+
+      tracked_events[event.name] = event
+
+    # Generate the metric values according to the tracked events.
+    for display_name, event_names in _METRICS.iteritems():
+      if event_names[0] not in tracked_events:
+        continue
+
+      duration = None
+      if len(event_names) == 1:
+        # The single event contains the duration to report.
+        duration = tracked_events[event_names[0]].duration
+
+      elif len(event_names) == 2:
+        # The duration is defined as the difference between two event starts.
+        if event_names[1] not in tracked_events:
+          continue
+
+        duration = (tracked_events[event_names[1]].start -
+            tracked_events[event_names[0]].start)
+
+      results.AddValue(value.scalar.ScalarValue(
+        page=results.current_page,
+        name=display_name,
+        units='ms',
+        value=duration,
+        improvement_direction=value.improvement_direction.DOWN))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py
new file mode 100644
index 0000000..a2aa113
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py
@@ -0,0 +1,100 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import telemetry.timeline.event as timeline_event
+from telemetry.testing import test_page_test_results
+from telemetry.web_perf.metrics import startup
+
+
+class StartupTimelineMetricTest(unittest.TestCase):
+
+  def setUp(self):
+    self.events = []
+
+  def AddEvent(self, event_name, start, duration=None):
+    event = timeline_event.TimelineEvent('my_category', event_name,
+                                         start, duration)
+    self.events.append(event)
+
+  # Attributes defined outside __init__
+  # pylint: disable=attribute-defined-outside-init
+  def ComputeStartupMetrics(self):
+    results = test_page_test_results.TestPageTestResults(self)
+
+    # Create a mock model usable by
+    # StartupTimelineMetric.AddWholeTraceResults().
+    def IterateEvents(event_predicate):
+      for event in self.events:
+        if event_predicate(event):
+          yield event
+    class MockClass(object):
+      pass
+    model = MockClass()
+    model.browser_process = MockClass()
+    model.browser_process.parent = MockClass()
+    model.browser_process.parent.IterAllEvents = IterateEvents
+
+    startup.StartupTimelineMetric().AddWholeTraceResults(model, results)
+    return results
+
+  def testUntrackedvents(self):
+    # Code coverage for untracked events
+    self.AddEvent('uknown_event_0', 0)
+    self.AddEvent('uknown_event_1', 1)
+    self.ComputeStartupMetrics()
+
+  def testInstantEventsBasedValue(self):
+    # Test case with instant events to measure the duration between the first
+    # occurrences of two distinct events.
+    START0 = 7
+    START1 = 8
+    DURATION0 = 17
+    DURATION1 = 18
+
+    # Generate duplicated events to make sure we consider only the first one.
+    self.AddEvent(startup._MAIN_ENTRY_POINT, START0)
+    self.AddEvent(startup._MAIN_ENTRY_POINT, START1)
+    self.AddEvent('loadEventEnd', START0 + DURATION0)
+    self.AddEvent('loadEventEnd', START1 + DURATION1)
+    self.AddEvent('requestStart', START0 + DURATION0 * 2)
+    self.AddEvent('requestStart', START1 + DURATION1 * 2)
+
+    results = self.ComputeStartupMetrics()
+    results.AssertHasPageSpecificScalarValue('foreground_tab_load_complete',
+        'ms', DURATION0)
+    results.AssertHasPageSpecificScalarValue('foreground_tab_request_start',
+        'ms', DURATION0 * 2)
+
+  def testDurationEventsBasedValues(self):
+    DURATION_EVENTS = set([
+        'messageloop_start_time',
+        'window_display_time',
+        'open_tabs_time',
+        'first_non_empty_paint_time',
+        'first_main_frame_load_time'])
+
+    # Test case to get the duration of the first occurrence of a duration event.
+    i = 1
+    for display_name in DURATION_EVENTS:
+      self.assertTrue(len(startup._METRICS[display_name]) == 1)
+      event_name = startup._METRICS[display_name][0]
+
+      duration = 13 * i
+      i += 1
+
+      # Generate duplicated events to make sure only the first event is
+      # considered.
+      self.AddEvent(event_name, 5, duration)
+      self.AddEvent(event_name, 6, duration + 2)
+
+    results = self.ComputeStartupMetrics()
+
+    i = 1
+    for display_name in DURATION_EVENTS:
+      duration = 13 * i
+      i += 1
+
+      results.AssertHasPageSpecificScalarValue(display_name, 'ms', duration)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/text_selection.py b/catapult/telemetry/telemetry/web_perf/metrics/text_selection.py
new file mode 100644
index 0000000..6918579
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/text_selection.py
@@ -0,0 +1,19 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.web_perf.metrics import single_event
+
+EVENT_NAME = 'WebLocalFrameImpl::moveRangeSelectionExtent'
+METRIC_NAME = 'text-selection'
+
+class TextSelectionMetric(single_event._SingleEventMetric):
+  """Reports directly durations of WebLocalFrameImpl::moveRangeSelectionExtent
+  events associated with moving a selection extent.
+  """
+
+  def __init__(self):
+    super(TextSelectionMetric, self).__init__(EVENT_NAME, METRIC_NAME,
+        metric_description=('List of durations of selection extent movements '
+                            'that were caused by and start during '
+                            'interactions'))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric.py b/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric.py
new file mode 100644
index 0000000..d1307e9
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric.py
@@ -0,0 +1,87 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TimelineBasedMetricException(Exception):
+  """Exception that can be thrown from metrics that implements
+     TimelineBasedMetric to indicate a problem arose when computing the metric.
+     """
+
+
+def _TimeRangesHasOverlap(iterable_time_ranges):
+  """ Returns True if there is are overlapped ranges in time ranges.
+  iterable_time_ranges: an iterable of time ranges. Each time range is a
+  tuple (start time, end time).
+  """
+  # Sort the ranges by the start time
+  sorted_time_ranges = sorted(iterable_time_ranges)
+  last_range = sorted_time_ranges[0]
+  for current_range in sorted_time_ranges[1:]:
+    start_current_range = current_range[0]
+    end_last_range = last_range[1]
+    if start_current_range < end_last_range:
+      return True
+    last_range = current_range
+  return False
+
+
+def IsEventInInteractions(event, interaction_records):
+  """ Return True if event is in any of the interaction records' time range.
+
+  Args:
+    event: an instance of telemetry.timeline.event.TimelineEvent.
+    interaction_records: a list of interaction records, whereas each record is
+      an instance of
+      telemetry.web_perf.timeline_interaction_record.TimelineInteractionRecord.
+
+  Returns:
+    True if |event|'s start & end time is in any of the |interaction_records|'s
+    time range.
+  """
+  return any(ir.start <= event.start and ir.end >= event.end for ir
+             in interaction_records)
+
+
+class TimelineBasedMetric(object):
+
+  def __init__(self):
+    """Computes metrics from a telemetry.timeline Model and a range
+
+    """
+    super(TimelineBasedMetric, self).__init__()
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    """Computes and adds metrics for the interaction_records' time ranges.
+
+    The override of this method should compute results on the data **only**
+    within the interaction_records' start and end time ranges.
+
+    Args:
+      model: An instance of telemetry.timeline.model.TimelineModel.
+      interaction_records: A list of instances of TimelineInteractionRecord. If
+        the override of this method doesn't support overlapped ranges, use
+        VerifyNonOverlappedRecords to check that no records are overlapped.
+      results: An instance of page.PageTestResults.
+
+    """
+    raise NotImplementedError()
+
+  def AddWholeTraceResults(self, model, results):
+    """Computes and adds metrics corresponding to the entire trace.
+
+    Override this method to compute results that correspond to the whole trace.
+
+    Args:
+      model: An instance of telemetry.timeline.model.TimelineModel.
+      results: An instance of page.PageTestResults.
+    """
+    pass
+
+  def VerifyNonOverlappedRecords(self, interaction_records):
+    """This raises exceptions if interaction_records contain overlapped ranges.
+    """
+    if _TimeRangesHasOverlap(((r.start, r.end) for r in interaction_records)):
+      raise TimelineBasedMetricException(
+          'This metric does not support interaction records with overlapped '
+          'time range.')
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric_unittest.py
new file mode 100644
index 0000000..31f0725
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/timeline_based_metric_unittest.py
@@ -0,0 +1,58 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import telemetry.web_perf.metrics.timeline_based_metric as tbm_module
+
+
+class FakeEvent(object):
+  def __init__(self, start, end):
+    self.start = start
+    self.end = end
+
+
+class FakeRecord(object):
+  def __init__(self, start, end):
+    self.start = start
+    self.end = end
+
+
+class TimelineBasedMetricTest(unittest.TestCase):
+
+  # pylint: disable=protected-access
+  def testTimeRangesHasOverlap(self):
+    # Test cases with overlap on one side
+    self.assertTrue(tbm_module._TimeRangesHasOverlap([(10, 20), (5, 15)]))
+    self.assertTrue(tbm_module._TimeRangesHasOverlap([(5, 15), (10, 20)]))
+    self.assertTrue(tbm_module._TimeRangesHasOverlap(
+        [(5, 15), (25, 30), (10, 20)]))
+
+    # Test cases with one range fall in the middle of other
+    self.assertTrue(tbm_module._TimeRangesHasOverlap([(10, 20), (15, 18)]))
+    self.assertTrue(tbm_module._TimeRangesHasOverlap([(15, 18), (10, 20)]))
+    self.assertTrue(tbm_module._TimeRangesHasOverlap(
+        [(15, 18), (40, 50), (10, 20)]))
+
+    self.assertFalse(tbm_module._TimeRangesHasOverlap([(15, 18), (20, 25)]))
+    self.assertFalse(tbm_module._TimeRangesHasOverlap(
+        [(1, 2), (2, 3), (0, 1)]))
+
+  def testIsEventInInteractions(self):
+    self.assertFalse(
+        tbm_module.IsEventInInteractions(
+        FakeEvent(0, 100),
+        [FakeRecord(5, 105), FakeRecord(50, 200), FakeRecord(300, 400)]))
+    self.assertFalse(
+        tbm_module.IsEventInInteractions(
+        FakeEvent(50, 100),
+        [FakeRecord(105, 205), FakeRecord(0, 45), FakeRecord(0, 90)]))
+    self.assertTrue(
+        tbm_module.IsEventInInteractions(
+        FakeEvent(50, 100),
+        [FakeRecord(5, 105), FakeRecord(0, 45), FakeRecord(0, 90)]))
+    self.assertTrue(
+        tbm_module.IsEventInInteractions(
+        FakeEvent(50, 100),
+        [FakeRecord(5, 45), FakeRecord(0, 45), FakeRecord(0, 100)]))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats.py b/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats.py
new file mode 100644
index 0000000..097d866
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats.py
@@ -0,0 +1,131 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+
+
+class TraceEventStatsInput(object):
+  """Input for the TraceEventStats.
+  Using this object with TraceEventStats will include two metrics, one with a
+  list of times of the given event, and one for the count of the events, named
+  `metric_name + '-count'`.
+  Args:
+    event_category: The category of the event to track.
+    event_name: The name of the event to track.
+    metric_name: The name of the metric name, which accumulates all of the
+                 times of the events.
+    metric_description: Description of the metric.
+    units: Units for the metric.
+    process_name: (optional) The name of the process to inspect for the trace
+                  events. Defaults to 'Renderer'.
+  """
+  def __init__(self, event_category, event_name, metric_name,
+               metric_description, units, process_name='Renderer'):
+    self.event_category = event_category
+    self.event_name = event_name
+    self.metric_name = metric_name
+    self.metric_description = metric_description
+    self.units = units
+    self.process_name = process_name
+    self.event_id = TraceEventStatsInput.GetEventId(event_category, event_name)
+    assert process_name is not None
+
+  @staticmethod
+  def GetEventId(event_category, event_name):
+    return event_category + '^SERIALIZE-DELIM^' + event_name
+
+class TraceEventStats(object):
+  """Reports durations and counts of given trace events.
+  """
+
+  def __init__(self, trace_event_aggregator_inputs=None):
+    self._inputs_by_process_name = collections.defaultdict(list)
+    self._metrics = set()
+    self._IndexNewInputs(trace_event_aggregator_inputs)
+
+  def AddInput(self, trace_event_aggregator_input):
+    self._IndexNewInputs([trace_event_aggregator_input])
+
+  def _IndexNewInputs(self, input_list):
+    if not input_list:
+      return
+    for input_obj in input_list:
+      name = input_obj.metric_name
+      # We check here to make sure we don't have a duplicate metric
+      assert name not in self._metrics
+      assert (name + '-count') not in self._metrics
+      self._metrics.add(name)
+      self._metrics.add(name + '-count')
+
+      self._inputs_by_process_name[input_obj.process_name].append(input_obj)
+
+  @staticmethod
+  def ThreadDurationIfPresent(event):
+    if event.thread_duration:
+      return event.thread_duration
+    else:
+      return event.duration
+
+  def AddResults(self, model, renderer_process, interactions, results):
+    del renderer_process  # unused
+    assert interactions
+    for p in model.GetAllProcesses():
+      if p.name not in self._inputs_by_process_name:
+        continue
+
+      inputs = self._inputs_by_process_name[p.name]
+      input_ids = {i.event_id for i in inputs}
+
+      def InputIdPredicate(e, ids):
+        return TraceEventStatsInput.GetEventId(e.category, e.name) in ids
+
+      self._AddResultsInternal(
+          p.IterAllEvents(
+              recursive=True,
+              event_type_predicate=lambda t: True,
+              event_predicate=
+                  lambda e, ids=input_ids: InputIdPredicate(e, ids)),
+          interactions,
+          results,
+          inputs)
+
+  # We assume events have been filtered already. 'events' is an iterator.
+  def _AddResultsInternal(self, events, interactions, results, inputs):
+    times_by_event_id = collections.defaultdict(list)
+
+    for event in events:
+      if not any(interaction.start <= event.start <= interaction.end
+                 for interaction in interactions):
+        continue
+      event_id = TraceEventStatsInput.GetEventId(event.category, event.name)
+      times_by_event_id[event_id].append(self.ThreadDurationIfPresent(event))
+
+    if not times_by_event_id:
+      return
+
+    inputs_by_event_id = dict([[input_obj.event_id, input_obj]
+                                for input_obj in inputs])
+
+    for (event_name, times) in times_by_event_id.iteritems():
+      input_for_event = inputs_by_event_id[event_name]
+      name = input_for_event.metric_name
+      results.AddValue(scalar.ScalarValue(
+        page=results.current_page,
+        tir_label=interactions[0].label,
+        name=name + '-count',
+        units='count',
+        value=len(times),
+        description='The number of times ' + name + ' was recorded.'))
+      if len(times) == 0:
+        continue
+      results.AddValue(list_of_scalar_values.ListOfScalarValues(
+        page=results.current_page,
+        tir_label=interactions[0].label,
+        name=name,
+        units=input_for_event.units,
+        values=times,
+        description=input_for_event.metric_description))
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py
new file mode 100644
index 0000000..242ae59
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py
@@ -0,0 +1,146 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from collections import namedtuple
+from telemetry.testing import test_page_test_results
+from telemetry.timeline import model as model_module
+from telemetry.timeline import slice as slice_module
+from telemetry.web_perf import timeline_interaction_record
+from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats
+from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput
+
+
+FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args')
+Interaction = timeline_interaction_record.TimelineInteractionRecord
+TEST_INTERACTION_LABEL = 'Action_TestInteraction'
+
+RENDERER_PROCESS = 'Renderer'
+OTHER_PROCESS = 'Other'
+
+EVENT_CATEGORY1 = 'Category1'
+EVENT_CATEGORY2 = 'Category2'
+
+EVENT_NAME1 = 'Name1'
+EVENT_NAME2 = 'Name2'
+
+
+def TestInteraction(start, end):
+  return Interaction(TEST_INTERACTION_LABEL, start, end)
+
+class TraceEventStatsUnittest(unittest.TestCase):
+
+  def setUp(self):
+    self.model = model_module.TimelineModel()
+    self.renderer_process = self.model.GetOrCreateProcess(1)
+    self.renderer_process.name = RENDERER_PROCESS
+    self.main_thread = self.renderer_process.GetOrCreateThread(tid=11)
+    self.other_process = self.model.GetOrCreateProcess(2)
+    self.other_process.name = OTHER_PROCESS
+    self.other_thread = self.other_process.GetOrCreateThread(tid=12)
+
+  def GetThreadForProcessName(self, process_name):
+    if process_name is RENDERER_PROCESS:
+      return self.main_thread
+    elif process_name is OTHER_PROCESS:
+      return self.other_thread
+    else:
+      raise
+
+  def AddEvent(self, process_name, event_category, event_name,
+               start, duration, thread_start, thread_duration):
+    thread = self.GetThreadForProcessName(process_name)
+    record = slice_module.Slice(thread,
+                             event_category,
+                             event_name,
+                             start, duration, thread_start, thread_duration)
+    thread.PushSlice(record)
+
+  def RunAggregator(self, aggregator, interactions):
+    results = test_page_test_results.TestPageTestResults(self)
+    aggregator.AddResults(self.model, self.renderer_process,
+                          interactions, results)
+    return results
+
+  def testBasicUsage(self):
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2)
+    interactions = [TestInteraction(9, 14)]
+
+    aggregator = TraceEventStats()
+    aggregator.AddInput(TraceEventStatsInput(
+      EVENT_CATEGORY1,
+      EVENT_NAME1,
+      'metric-name',
+      'metric-description',
+      'units',
+      'Renderer'))
+
+    results = self.RunAggregator(aggregator, interactions)
+    results.AssertHasPageSpecificScalarValue('metric-name-count', 'count', 2)
+    results.AssertHasPageSpecificListOfScalarValues(
+      'metric-name', 'units', [5, 2])
+
+  def testFiltering(self):
+    # These should be recorded.
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 20, 6, 20, 1)
+
+    # These should be filtered.
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 15, 1, 15, 1)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY2, EVENT_NAME1, 11, 4, 11, 4)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME2, 11, 3, 11, 3)
+    self.AddEvent(OTHER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 11, 2, 11, 2)
+
+    interactions = [TestInteraction(9, 14), TestInteraction(20, 21)]
+
+    aggregator = TraceEventStats()
+    # Test that we default to 'Renderer'
+    aggregator.AddInput(TraceEventStatsInput(
+      EVENT_CATEGORY1,
+      EVENT_NAME1,
+      'metric-name',
+      'metric-description',
+      'units'))
+
+    results = self.RunAggregator(aggregator, interactions)
+    results.AssertHasPageSpecificScalarValue('metric-name-count', 'count', 3)
+    results.AssertHasPageSpecificListOfScalarValues(
+      'metric-name', 'units', [5, 2, 1])
+
+  def testNoInputs(self):
+    # These should be recorded.
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 20, 6, 20, 1)
+
+    # These should be filtered.
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 15, 1, 15, 1)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY2, EVENT_NAME1, 11, 4, 11, 4)
+    self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME2, 11, 3, 11, 3)
+    self.AddEvent(OTHER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 11, 2, 11, 2)
+
+    interactions = [TestInteraction(9, 14), TestInteraction(20, 21)]
+
+    aggregator = TraceEventStats()
+
+    results = self.RunAggregator(aggregator, interactions)
+    self.assertEquals([], results.all_page_specific_values)
+
+
+  def testNoEvents(self):
+    interactions = [TestInteraction(9, 14)]
+
+    aggregator = TraceEventStats()
+    aggregator.AddInput(TraceEventStatsInput(
+      EVENT_CATEGORY1,
+      EVENT_NAME1,
+      'metric-name',
+      'metric-description',
+      'units'))
+
+    results = self.RunAggregator(aggregator, interactions)
+    self.assertEquals([], results.all_page_specific_values)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/v8_execution.py b/catapult/telemetry/telemetry/web_perf/metrics/v8_execution.py
new file mode 100644
index 0000000..3fbd7de
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/v8_execution.py
@@ -0,0 +1,135 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from telemetry.util import statistics
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import timeline_based_metric
+
+
+class V8ExecutionMetric(timeline_based_metric.TimelineBasedMetric):
+  """ This Metric aggregates various V8 runtime measurements."""
+  _EVENTS = ('v8.run', 'v8.compile', 'V8.Execute', 'WindowProxy::initialize',)
+  _RENDERER_MAIN_THREAD = 'CrRendererMain'
+
+  def __init__(self):
+    super(V8ExecutionMetric, self).__init__()
+    self._stats = [
+      V8TotalTimeStats('v8_execution_time_total', ['V8.Execute']),
+      V8SelfTimeStats('v8_execution_time_self', ['V8.Execute']),
+      V8SelfTimeStats('v8_parse_lazy_total',
+                      ['V8.ParseLazy', 'V8.ParseLazyMicroSeconds']),
+      V8SelfTimeStats('v8_compile_fullcode_total',
+                      ['V8.CompileFullCode']),
+      V8SelfTimeStats('v8_compile_ignition_total',
+                      ['V8.CompileIgnition']),
+      V8TotalTimeStats('v8_recompile_total',
+                       ['V8.RecompileSynchronous',
+                         'V8.RecompileConcurrent']),
+      V8TotalTimeStats('v8_recompile_synchronous_total',
+                       ['V8.RecompileSynchronous']),
+      V8TotalTimeStats('v8_recompile_concurrent_total',
+                       ['V8.RecompileConcurrent']),
+      V8TotalTimeStats('v8_optimize_code_total', ['V8.OptimizeCode']),
+      V8TotalTimeStats('v8_deoptimize_code_total', ['V8.DeoptimizeCode']),
+      V8OptimizeParseLazyStats('v8_optimize_parse_lazy_total'),
+    ]
+    self._name_to_stats = {}
+    for stat in self._stats:
+      for event_name in stat.event_names:
+        if event_name not in self._name_to_stats:
+          self._name_to_stats[event_name] = [stat]
+        else:
+          self._name_to_stats[event_name].append(stat)
+
+  def AddResults(self, timeline_model, renderer_thread, interactions, results):
+    self.VerifyNonOverlappedRecords(interactions)
+    self._ResetMetrics()
+    self._CollectEvents(timeline_model, interactions)
+    self._AddMetricResults(results, interactions[0].label)
+
+  def _ResetMetrics(self):
+    for metric in self._stats:
+      metric.Reset()
+
+  def _CollectEvents(self, timeline_model, interactions):
+    for event in timeline_model.IterAllSlices():
+      if not timeline_based_metric.IsEventInInteractions(event, interactions):
+        continue
+      self._CollectEvent(event)
+
+  def _CollectEvent(self, event):
+    if event.name not in self._name_to_stats:
+      return
+    for stat in self._name_to_stats[event.name]:
+      stat.CollectEvent(event)
+
+  def _AddMetricResults(self, results, label):
+    for stat in self._stats:
+      stat.AddResults(results, label)
+
+
+class V8TimeStats(object):
+  def __init__(self, name, event_names, description=None):
+    self.name = name
+    self.event_names = event_names
+    self.description = description
+    self.durations = []
+
+  def Reset(self):
+    self.durations = []
+
+  def Duration(self):
+    return sum(self.durations)
+
+  def Count(self):
+    return len(self.durations)
+
+  def Average(self):
+    return statistics.DivideIfPossibleOrZero(self.Duration(), self.Count())
+
+  def AddResults(self, results, label):
+    results.AddValue(
+      scalar.ScalarValue(
+          results.current_page,
+          self.name, 'ms', self.Duration(),
+          description=self.description,
+          tir_label=label))
+    results.AddValue(
+      scalar.ScalarValue(
+          results.current_page,
+          "%s_count" % self.name, 'count', self.Count(),
+          description=self.description,
+          tir_label=label))
+    results.AddValue(
+      scalar.ScalarValue(
+          results.current_page,
+          "%s_average" % self.name, 'ms', self.Average(),
+          description=self.description,
+          tir_label=label))
+
+  def CollectEvent(self, event):
+    raise NotImplementedError()
+
+
+class V8TotalTimeStats(V8TimeStats):
+  def CollectEvent(self, event):
+    self.durations.append(event.duration)
+
+
+class V8SelfTimeStats(V8TimeStats):
+  def CollectEvent(self, event):
+    self.durations.append(event.self_time)
+
+
+class V8OptimizeParseLazyStats(V8TimeStats):
+  def __init__(self, name):
+    super(V8OptimizeParseLazyStats, self).__init__(
+      name,
+      ['V8.ParseLazy', 'V8.ParseLazyMicroSeconds'],
+      'Time spent in lazy-parsing due to optimizing code')
+
+  def CollectEvent(self, event):
+    if event.parent_slice is None or \
+       event.parent_slice.name != "V8.OptimizeCode":
+      return
+    self.durations.append(event.self_time)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/v8_execution_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/v8_execution_unittest.py
new file mode 100644
index 0000000..1ac2ca4
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/v8_execution_unittest.py
@@ -0,0 +1,138 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.testing import test_page_test_results
+from telemetry.timeline import slice as slice_module
+from telemetry.timeline import model as model_module
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+from telemetry.web_perf.metrics import v8_execution
+
+RENDERER_PROCESS = 'Renderer'
+OTHER_PROCESS = 'Other'
+INTERACTION_RECORDS = [tir_module.TimelineInteractionRecord("test-record",
+                                                            0,
+                                                            float('inf'))]
+
+STATS = ('v8_execution_time_total', 'v8_execution_time_self',
+         'v8_parse_lazy_total', 'v8_compile_fullcode_total',
+         'v8_recompile_total', 'v8_recompile_synchronous_total',
+         'v8_recompile_concurrent_total', 'v8_optimize_code_total',
+         'v8_deoptimize_code_total',)
+
+
+class SliceContext(object):
+  """
+  Context object for easily adding subslices/subevents.
+  """
+  def __init__(self, test, record):
+    self.test = test
+    self.record = record
+
+  def __enter__(self):
+    self.test.parent_slice = self.record
+
+  def __exit__(self, exc_type, exc_value, exc_traceback):
+    self.test.parent_slice = self.record.parent_slice
+
+
+class V8ExecutionTests(unittest.TestCase):
+
+  def setUp(self):
+    self.model = model_module.TimelineModel()
+    self.renderer_process = self.model.GetOrCreateProcess(1)
+    self.renderer_process.name = RENDERER_PROCESS
+    self.renderer_thread = self.renderer_process.GetOrCreateThread(tid=11)
+    self.other_process = self.model.GetOrCreateProcess(2)
+    self.other_process.name = OTHER_PROCESS
+    self.other_thread = self.other_process.GetOrCreateThread(tid=12)
+    self.metric = v8_execution.V8ExecutionMetric()
+    self.results = None
+    self.parent_slice = None
+
+  def GetThreadForProcessName(self, process_name):
+    if process_name is RENDERER_PROCESS:
+      return self.renderer_thread
+    elif process_name is OTHER_PROCESS:
+      return self.other_thread
+    else:
+      raise
+
+  def AddResults(self):
+    self.results = test_page_test_results.TestPageTestResults(self)
+    self.metric.AddResults(self.model, self.renderer_thread,
+                           INTERACTION_RECORDS, self.results)
+
+  def AddEvent(self, process_name, event_category, event_name,
+               start, duration, thread_start=None, thread_duration=None):
+    thread = self.GetThreadForProcessName(process_name)
+    record = slice_module.Slice(thread, event_category, event_name,
+                start, duration,
+                start if thread_start is None else thread_start,
+                duration if thread_duration is None else thread_duration)
+    thread.PushSlice(record)
+    if self.parent_slice is not None:
+      record.parent_slice = self.parent_slice
+      self.parent_slice.AddSubSlice(record)
+    return SliceContext(self, record)
+
+  def AssertResultValues(self, name, value, count, average):
+    self.results.AssertHasPageSpecificScalarValue('%s' % name, 'ms', value)
+    self.results.AssertHasPageSpecificScalarValue('%s_count' % name, 'count',
+                                                  count)
+    self.results.AssertHasPageSpecificScalarValue('%s_average' % name, 'ms',
+                                                  average)
+
+  def testWithNoTraceEvents(self):
+    self.AddResults()
+    for name in STATS:
+      self.AssertResultValues(name, value=0, count=0, average=0)
+
+  def testExecutionTime(self):
+    self.AddEvent(RENDERER_PROCESS, '', 'V8.Execute', 0, 10)
+    with self.AddEvent(RENDERER_PROCESS, '', 'V8.Execute', 10, 20):
+      self.AddEvent(RENDERER_PROCESS, '', 'other', 10, 12)
+    self.AddResults()
+    self.AssertResultValues('v8_execution_time_total', value=30, count=2,
+                            average=15)
+    self.AssertResultValues('v8_execution_time_self', value=18, count=2,
+                            average=9)
+
+  def testOptimizeParseLazy(self):
+    self.AddEvent(RENDERER_PROCESS, '', 'V8.ParseLazy', 0, 10)
+    self.AddResults()
+    self.AssertResultValues('v8_parse_lazy_total', value=10, count=1,
+                            average=10)
+    self.AssertResultValues('v8_optimize_code_total', value=0, count=0,
+                            average=0)
+    self.AssertResultValues('v8_optimize_parse_lazy_total', value=0, count=0,
+                            average=0)
+
+    with self.AddEvent(RENDERER_PROCESS, '', 'V8.OptimizeCode', 10, 20):
+      self.AddEvent(RENDERER_PROCESS, '', 'V8.ParseLazy', 20, 8)
+    self.AddResults()
+    self.AssertResultValues('v8_parse_lazy_total', value=18, count=2, average=9)
+    self.AssertResultValues('v8_optimize_code_total', value=20, count=1,
+                            average=20)
+    self.AssertResultValues('v8_optimize_parse_lazy_total', value=8, count=1,
+                            average=8)
+
+  def testRecompile(self):
+    self.AddEvent(RENDERER_PROCESS, '', 'V8.RecompileSynchronous', 0, 10)
+    self.AddResults()
+    self.AssertResultValues('v8_recompile_synchronous_total', value=10, count=1,
+                            average=10)
+    self.AssertResultValues('v8_recompile_concurrent_total', value=0, count=0,
+                            average=0)
+    self.AssertResultValues('v8_recompile_total', value=10, count=1, average=10)
+
+    self.AddEvent(RENDERER_PROCESS, '', 'V8.RecompileConcurrent', 10, 8)
+    self.AddResults()
+    self.AssertResultValues('v8_recompile_synchronous_total', value=10, count=1,
+                            average=10)
+    self.AssertResultValues('v8_recompile_concurrent_total', value=8, count=1,
+                            average=8)
+    self.AssertResultValues('v8_recompile_total', value=18, count=2, average=9)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency.py b/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency.py
new file mode 100644
index 0000000..9d385f6
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency.py
@@ -0,0 +1,199 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.util import statistics
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import timeline_based_metric
+
+import logging
+
+class V8EventStat(object):
+
+  def __init__(self, src_event_name, result_name, result_description):
+    self.src_event_name = src_event_name
+    self.result_name = result_name
+    self.result_description = result_description
+    self.thread_duration = 0.0
+    self.thread_duration_inside_idle = 0.0
+    self.idle_task_overrun_duration = 0.0
+    self.max_thread_duration = 0.0
+    self.count = 0
+
+  @property
+  def thread_duration_outside_idle(self):
+    return self.thread_duration - self.thread_duration_inside_idle
+
+  @property
+  def percentage_thread_duration_during_idle(self):
+    return statistics.DivideIfPossibleOrZero(
+        100 * self.thread_duration_inside_idle, self.thread_duration)
+
+class V8GCLatency(timeline_based_metric.TimelineBasedMetric):
+  _RENDERER_MAIN_THREAD = 'CrRendererMain'
+  _IDLE_TASK_PARENT = 'SingleThreadIdleTaskRunner::RunTask'
+
+  def __init__(self):
+    super(V8GCLatency, self).__init__()
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    self.VerifyNonOverlappedRecords(interaction_records)
+    self._AddV8MetricsToResults(model, interaction_records, results)
+
+  def _AddV8MetricsToResults(self, model,
+                             interaction_records, results):
+    self._AddV8EventStatsToResults(model, interaction_records, results)
+
+  def _AddV8EventStatsToResults(self, model, interactions, results):
+    v8_event_stats = [
+        V8EventStat('V8.GCIncrementalMarking',
+                    'v8_gc_incremental_marking',
+                    'incremental marking steps'),
+        V8EventStat('V8.GCScavenger',
+                    'v8_gc_scavenger',
+                    'scavenges'),
+        V8EventStat('V8.GCCompactor',
+                    'v8_gc_mark_compactor',
+                    'mark-sweep-compactor'),
+        V8EventStat('V8.GCFinalizeMC',
+                    'v8_gc_finalize_incremental',
+                    'finalization of incremental marking'),
+        V8EventStat('V8.GCFinalizeMCReduceMemory',
+                    'v8_gc_finalize_incremental_reduce_memory',
+                    'finalization of incremental marking with memory reducer')]
+    label = interactions[0].label
+    name_to_v8_stat = {x.src_event_name : x for x in v8_event_stats}
+    thread_time_not_available = False
+    for event in model.IterAllSlices():
+      if (not timeline_based_metric.IsEventInInteractions(event, interactions)
+          or not event.name in name_to_v8_stat):
+        continue
+      event_stat = name_to_v8_stat[event.name]
+      if event.thread_duration is None:
+        thread_time_not_available = True
+        event_duration = event.duration
+      else:
+        event_duration = event.thread_duration
+      event_stat.thread_duration += event_duration
+      event_stat.max_thread_duration = max(event_stat.max_thread_duration,
+                                           event_duration)
+      event_stat.count += 1
+
+      parent_idle_task = self._ParentIdleTask(event)
+      if parent_idle_task:
+        allotted_idle_time = parent_idle_task.args['allotted_time_ms']
+        idle_task_wall_overrun = 0
+        if event.duration > allotted_idle_time:
+          idle_task_wall_overrun = event.duration - allotted_idle_time
+        # Don't count time over the deadline as being inside idle time.
+        # Since the deadline should be relative to wall clock we compare
+        # allotted_time_ms with wall duration instead of thread duration, and
+        # then assume the thread duration was inside idle for the same
+        # percentage of time.
+        inside_idle = event_duration * statistics.DivideIfPossibleOrZero(
+            event.duration - idle_task_wall_overrun, event.duration)
+        event_stat.thread_duration_inside_idle += inside_idle
+        event_stat.idle_task_overrun_duration += idle_task_wall_overrun
+
+    if thread_time_not_available:
+      logging.warning(
+          'thread time is not available in trace data, switch to walltime')
+
+    for v8_event_stat in v8_event_stats:
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, v8_event_stat.result_name, 'ms',
+          v8_event_stat.thread_duration,
+          description=('Total thread duration spent in %s' %
+                       v8_event_stat.result_description),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, '%s_max' % v8_event_stat.result_name, 'ms',
+          v8_event_stat.max_thread_duration,
+          description=('Max thread duration spent in %s' %
+                       v8_event_stat.result_description),
+          tir_label=label))
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, '%s_count' % v8_event_stat.result_name, 'count',
+          v8_event_stat.count,
+          description=('Number of %s' %
+                       v8_event_stat.result_description),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+      average_thread_duration = statistics.DivideIfPossibleOrZero(
+          v8_event_stat.thread_duration, v8_event_stat.count)
+      results.AddValue(scalar.ScalarValue(
+          results.current_page, '%s_average' % v8_event_stat.result_name, 'ms',
+          average_thread_duration,
+          description=('Average thread duration spent in %s' %
+                       v8_event_stat.result_description),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+      results.AddValue(scalar.ScalarValue(results.current_page,
+          '%s_outside_idle' % v8_event_stat.result_name, 'ms',
+          v8_event_stat.thread_duration_outside_idle,
+          description=(
+              'Total thread duration spent in %s outside of idle tasks' %
+              v8_event_stat.result_description),
+          tir_label=label))
+      results.AddValue(scalar.ScalarValue(results.current_page,
+          '%s_idle_deadline_overrun' % v8_event_stat.result_name, 'ms',
+          v8_event_stat.idle_task_overrun_duration,
+          description=('Total idle task deadline overrun for %s idle tasks'
+                       % v8_event_stat.result_description),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+      results.AddValue(scalar.ScalarValue(results.current_page,
+          '%s_percentage_idle' % v8_event_stat.result_name, 'idle%',
+          v8_event_stat.percentage_thread_duration_during_idle,
+          description=('Percentage of %s spent in idle time' %
+                       v8_event_stat.result_description),
+          tir_label=label,
+          improvement_direction=improvement_direction.UP))
+
+    # Add total metrics.
+    gc_total = sum(x.thread_duration for x in v8_event_stats)
+    gc_total_outside_idle = sum(
+        x.thread_duration_outside_idle for x in v8_event_stats)
+    gc_total_idle_deadline_overrun = sum(
+        x.idle_task_overrun_duration for x in v8_event_stats)
+    gc_total_percentage_idle = statistics.DivideIfPossibleOrZero(
+        100 * (gc_total - gc_total_outside_idle), gc_total)
+
+    results.AddValue(scalar.ScalarValue(results.current_page,
+        'v8_gc_total', 'ms', gc_total,
+        description='Total thread duration of all garbage collection events',
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(results.current_page,
+        'v8_gc_total_outside_idle', 'ms', gc_total_outside_idle,
+        description=(
+            'Total thread duration of all garbage collection events outside of '
+            'idle tasks'),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(results.current_page,
+        'v8_gc_total_idle_deadline_overrun', 'ms',
+        gc_total_idle_deadline_overrun,
+        description=(
+            'Total idle task deadline overrun for all idle tasks garbage '
+            'collection events'),
+          tir_label=label,
+          improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(results.current_page,
+        'v8_gc_total_percentage_idle', 'idle%', gc_total_percentage_idle,
+        description=(
+            'Percentage of the thread duration of all garbage collection '
+            'events spent inside of idle tasks'),
+          tir_label=label,
+          improvement_direction=improvement_direction.UP))
+
+  def _ParentIdleTask(self, event):
+    parent = event.parent_slice
+    while parent:
+      if parent.name == self._IDLE_TASK_PARENT:
+        return parent
+      parent = parent.parent_slice
+    return None
+
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency_unittest.py
new file mode 100644
index 0000000..03c850a
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/v8_gc_latency_unittest.py
@@ -0,0 +1,445 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.internal.results import page_test_results
+from telemetry.page import page as page_module
+from telemetry.testing import options_for_unittests
+from telemetry.testing import page_test_test_case
+from telemetry.timeline import model as model_module
+from telemetry.util import wpr_modes
+
+from telemetry.web_perf.metrics import v8_gc_latency
+from telemetry.web_perf import timeline_interaction_record
+
+class V8EventStat(object):
+
+  def __init__(self, src_event_name, result_name, result_description):
+    self.src_event_name = src_event_name
+    self.result_name = result_name
+    self.result_description = result_description
+    self.thread_duration = 0.0
+    self.thread_duration_inside_idle = 0.0
+    self.idle_task_overrun_duration = 0.0
+    self.max_thread_duration = 0.0
+    self.count = 0
+
+class V8GCLatencyTestPageHelper(object):
+
+  def __init__(self, page_set):
+    self._page_set = page_set
+    self._model = model_module.TimelineModel()
+    self._renderer_process = self._model.GetOrCreateProcess(1)
+    self._renderer_thread = self._renderer_process.GetOrCreateThread(2)
+    self._renderer_thread.name = 'CrRendererMain'
+    self._interaction_records = []
+
+  def AddEvent(self, category, name, thread_start, thread_duration,
+               args=None, wall_start=None, wall_duration=None):
+    wall_start = wall_start or thread_start
+    wall_duration = wall_duration or thread_duration
+    self._renderer_thread.BeginSlice(category, name, wall_start, thread_start,
+                                     args=args)
+    self._renderer_thread.EndSlice(wall_start + wall_duration,
+                                   thread_start + thread_duration)
+
+  def AddEventWithoutThreadDuration(self, category, name,
+                                    wall_start, wall_duration):
+    self._renderer_thread.BeginSlice(category, name, wall_start)
+    self._renderer_thread.EndSlice(wall_start + wall_duration)
+
+  def AddInteractionRecord(self, label, start, end):
+    self._interaction_records.append(
+      timeline_interaction_record.TimelineInteractionRecord(label, start, end))
+
+  class MockV8GCLatencyPage(page_module.Page):
+
+    def __init__(self, page_set):
+      super(V8GCLatencyTestPageHelper.MockV8GCLatencyPage, self).__init__(
+          'file://blank.html', page_set, page_set.base_dir)
+
+  def MeasureFakePage(self):
+    # Create a fake page and add it to the page set.
+    results = page_test_results.PageTestResults()
+    page = V8GCLatencyTestPageHelper.MockV8GCLatencyPage(self._page_set)
+    self._page_set.AddStory(page)
+
+    # Pretend we're about to run the tests to silence lower level asserts.
+    results.WillRunPage(page)
+
+    metric = v8_gc_latency.V8GCLatency()
+
+    # Finalize the timeline import.
+    self._model.FinalizeImport()
+
+    for interaction in self._interaction_records:
+      # Measure the V8GCLatency metric and return the results
+      # pylint: disable=protected-access
+      metric._AddV8MetricsToResults(self._model, [interaction], results)
+    results.DidRunPage(page)
+    return results
+
+
+class V8GCLatencyTests(page_test_test_case.PageTestTestCase):
+
+  def setUp(self):
+    self._options = options_for_unittests.GetCopy()
+    self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
+
+  def testWithNoTraceEvents(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+    test_page_helper.AddInteractionRecord('Action', 0, 32)
+
+    results = test_page_helper.MeasureFakePage()
+    self._AssertResultsEqual(_GetEmptyResults(), _ActualValues(results))
+
+  def testWithNoGarbageCollectionEvents(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action', 0, 32)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=14, wall_start=5, wall_duration=35)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+
+    self._AssertResultsEqual(expected, _ActualValues(results))
+
+  def testWithGarbageCollectionEvents(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action', 0, 88)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=77, wall_start=5, wall_duration=88)
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 5, 4)
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 3)
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 23, 4)
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 2)
+    test_page_helper.AddEvent('v8', 'V8.GCFinalizeMC', 38, 2)
+    test_page_helper.AddEvent('v8', 'V8.GCFinalizeMC', 42, 3)
+    test_page_helper.AddEvent('v8', 'V8.GCFinalizeMCReduceMemory', 46, 4)
+    test_page_helper.AddEvent('v8', 'V8.GCFinalizeMCReduceMemory', 51, 5)
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 62, 4)
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 72, 5)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_incremental_marking'] = ('ms', 6.0)
+    expected['v8_gc_incremental_marking_average'] = ('ms', 3.0)
+    expected['v8_gc_incremental_marking_count'] = ('count', 2)
+    expected['v8_gc_incremental_marking_max'] = ('ms', 4.0)
+    expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 6.0)
+    expected['v8_gc_finalize_incremental'] = ('ms', 5.0)
+    expected['v8_gc_finalize_incremental_average'] = ('ms', 2.5)
+    expected['v8_gc_finalize_incremental_count'] = ('count', 2)
+    expected['v8_gc_finalize_incremental_max'] = ('ms', 3.0)
+    expected['v8_gc_finalize_incremental_outside_idle'] = ('ms', 5.0)
+    expected['v8_gc_finalize_incremental_reduce_memory'] = ('ms', 9.0)
+    expected['v8_gc_finalize_incremental_reduce_memory_average'] = ('ms', 4.5)
+    expected['v8_gc_finalize_incremental_reduce_memory_count'] = ('count', 2)
+    expected['v8_gc_finalize_incremental_reduce_memory_max'] = ('ms', 5.0)
+    expected['v8_gc_finalize_incremental_reduce_memory_outside_idle'] = (
+        'ms', 9.0)
+    expected['v8_gc_scavenger'] = ('ms', 7.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 3.5)
+    expected['v8_gc_scavenger_count'] = ('count', 2)
+    expected['v8_gc_scavenger_max'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', 7.0)
+    expected['v8_gc_mark_compactor'] = ('ms', 9.0)
+    expected['v8_gc_mark_compactor_average'] = ('ms', 4.5)
+    expected['v8_gc_mark_compactor_count'] = ('count', 2)
+    expected['v8_gc_mark_compactor_max'] = ('ms', 5.0)
+    expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 9.0)
+    expected['v8_gc_total'] = ('ms', 36.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 36.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results))
+
+  def testWithIdleTaskGarbageCollectionEvents(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action', 0, 68)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=57, wall_start=5, wall_duration=68)
+
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 5, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 15, 4, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 3)
+
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 23, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 34, 3, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 2)
+
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 42, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 52, 6, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 52, 5)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_incremental_marking'] = ('ms', 6.0)
+    expected['v8_gc_incremental_marking_average'] = ('ms', 3.0)
+    expected['v8_gc_incremental_marking_count'] = ('count', 2)
+    expected['v8_gc_incremental_marking_max'] = ('ms', 4.0)
+    expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_incremental_marking_percentage_idle'] = \
+        ('idle%', 100 * 2 / 6.0)
+    expected['v8_gc_scavenger'] = ('ms', 7.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 3.5)
+    expected['v8_gc_scavenger_count'] = ('count', 2)
+    expected['v8_gc_scavenger_max'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 100 * 3 / 7.0)
+    expected['v8_gc_mark_compactor'] = ('ms', 9.0)
+    expected['v8_gc_mark_compactor_average'] = ('ms', 4.5)
+    expected['v8_gc_mark_compactor_count'] = ('count', 2)
+    expected['v8_gc_mark_compactor_max'] = ('ms', 5.0)
+    expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_mark_compactor_percentage_idle'] = ('idle%', 100 * 5 / 9.0)
+    expected['v8_gc_total'] = ('ms', 22.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 12.0)
+    expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 10 / 22.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results))
+
+  def testWithIdleTaskOverruns(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action', 0, 92)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=80, wall_start=5, wall_duration=92)
+
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 15, 15, {'allotted_time_ms': 8})
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 14)
+
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 34, 15, {'allotted_time_ms': 6})
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 14)
+
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 52, 23, {'allotted_time_ms': 9})
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 52, 22)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_incremental_marking'] = ('ms', 14.0)
+    expected['v8_gc_incremental_marking_average'] = ('ms', 14.0)
+    expected['v8_gc_incremental_marking_count'] = ('count', 1)
+    expected['v8_gc_incremental_marking_max'] = ('ms', 14.0)
+    expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 8.0)
+    expected['v8_gc_incremental_marking_idle_deadline_overrun'] = ('ms', 8.0)
+    expected['v8_gc_incremental_marking_percentage_idle'] = \
+        ('idle%', 100 * 6 / 14.0)
+    expected['v8_gc_scavenger'] = ('ms', 14.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 14.0)
+    expected['v8_gc_scavenger_count'] = ('count', 1)
+    expected['v8_gc_scavenger_max'] = ('ms', 14.0)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', 6.0)
+    expected['v8_gc_scavenger_idle_deadline_overrun'] = ('ms', 6.0)
+    expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 100 * 8 / 14.0)
+    expected['v8_gc_mark_compactor'] = ('ms', 22.0)
+    expected['v8_gc_mark_compactor_average'] = ('ms', 22.0)
+    expected['v8_gc_mark_compactor_count'] = ('count', 1)
+    expected['v8_gc_mark_compactor_max'] = ('ms', 22.0)
+    expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 13.0)
+    expected['v8_gc_mark_compactor_idle_deadline_overrun'] = ('ms', 13.0)
+    expected['v8_gc_mark_compactor_percentage_idle'] = ('idle%', 100 * 9 / 22.0)
+    expected['v8_gc_total'] = ('ms', 50.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 27.0)
+    expected['v8_gc_total_idle_deadline_overrun'] = ('ms', 27.0)
+    expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 23 / 50.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results))
+
+  def testWithIdleTaskWallDurationOverruns(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action', 0, 92)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=80, wall_start=5, wall_duration=92)
+
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 15, 15, {'allotted_time_ms': 8})
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger',
+        thread_start=15, thread_duration=4, wall_start=15, wall_duration=14)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_scavenger'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_count'] = ('count', 1)
+    expected['v8_gc_scavenger_max'] = ('ms', 4.0)
+    expected_outside_idle = 4.0 - (4.0 * 8 / 14)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', expected_outside_idle)
+    expected['v8_gc_scavenger_idle_deadline_overrun'] = ('ms', 6.0)
+    expected['v8_gc_scavenger_percentage_idle'] = \
+        ('idle%', 100 * (4.0 - expected_outside_idle) / 4.0)
+    expected['v8_gc_total'] = expected['v8_gc_scavenger']
+    expected['v8_gc_total_outside_idle'] = \
+        expected['v8_gc_scavenger_outside_idle']
+    expected['v8_gc_total_idle_deadline_overrun'] = \
+        expected['v8_gc_scavenger_idle_deadline_overrun']
+    expected['v8_gc_total_percentage_idle'] = \
+        expected['v8_gc_scavenger_percentage_idle']
+
+    self._AssertResultsEqual(expected, _ActualValues(results))
+
+  def testWithMultipleInteractionRecords(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+
+    test_page_helper.AddInteractionRecord('Action1', 5, 18)
+    test_page_helper.AddInteractionRecord('Action2', 19, 57)
+    test_page_helper.AddInteractionRecord('Action3', 60, 68)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=57, wall_start=5, wall_duration=68)
+
+    # This event is not in any interaction record.
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 0, 1)
+
+    # These events are in Action1.
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 5, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 15, 4, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 15, 3)
+
+    # These events are in Action2.
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 23, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 34, 3, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCIncrementalMarking', 34, 2)
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 42, 4)
+    test_page_helper.AddEvent('renderer.scheduler',
+        'SingleThreadIdleTaskRunner::RunTask', 52, 6, {'allotted_time_ms': 12})
+    test_page_helper.AddEvent('v8', 'V8.GCCompactor', 52, 5)
+
+    # This event is not in any interaction record.
+    test_page_helper.AddEvent('v8', 'V8.GCScavenger', 58, 1)
+
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_scavenger'] = ('ms', 7.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 3.5)
+    expected['v8_gc_scavenger_count'] = ('count', 2)
+    expected['v8_gc_scavenger_max'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 100 * 3 / 7.0)
+    expected['v8_gc_total'] = ('ms', 7.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 3.0 / 7.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results, 'Action1'))
+
+    expected = _GetEmptyResults()
+    expected['v8_gc_incremental_marking'] = ('ms', 6.0)
+    expected['v8_gc_incremental_marking_average'] = ('ms', 3.0)
+    expected['v8_gc_incremental_marking_count'] = ('count', 2)
+    expected['v8_gc_incremental_marking_max'] = ('ms', 4.0)
+    expected['v8_gc_incremental_marking_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_incremental_marking_percentage_idle'] = \
+        ('idle%', 100 * 2 / 6.0)
+    expected['v8_gc_mark_compactor'] = ('ms', 9.0)
+    expected['v8_gc_mark_compactor_average'] = ('ms', 4.5)
+    expected['v8_gc_mark_compactor_count'] = ('count', 2)
+    expected['v8_gc_mark_compactor_max'] = ('ms', 5.0)
+    expected['v8_gc_mark_compactor_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_mark_compactor_percentage_idle'] = ('idle%', 100 * 5 / 9.0)
+    expected['v8_gc_total'] = ('ms', 15.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 8.0)
+    expected['v8_gc_total_percentage_idle'] = ('idle%', 100 * 7.0 / 15.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results, 'Action2'))
+
+    expected = _GetEmptyResults()
+    self._AssertResultsEqual(expected, _ActualValues(results, 'Action3'))
+
+
+  def testRegress549150(self):
+    test_page_helper = V8GCLatencyTestPageHelper(
+        self.CreateEmptyPageSet())
+    test_page_helper.AddInteractionRecord('Action', 0, 10)
+    test_page_helper.AddEvent('toplevel', 'PostMessage',
+        thread_start=0, thread_duration=10, wall_start=0, wall_duration=10)
+    test_page_helper.AddEventWithoutThreadDuration(
+        'v8', 'V8.GCScavenger', 0, 4)
+    results = test_page_helper.MeasureFakePage()
+    expected = _GetEmptyResults()
+    expected['v8_gc_scavenger'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_average'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_count'] = ('count', 1)
+    expected['v8_gc_scavenger_max'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_scavenger_percentage_idle'] = ('idle%', 0.0)
+    expected['v8_gc_total'] = ('ms', 4.0)
+    expected['v8_gc_total_outside_idle'] = ('ms', 4.0)
+    expected['v8_gc_total_percentage_idle'] = ('idle%', 0.0)
+
+    self._AssertResultsEqual(expected, _ActualValues(results, 'Action'))
+
+
+  def _AssertResultsEqual(self, expected, actual):
+    for key in expected.iterkeys():
+      self.assertIn(key, actual.keys())
+      self.assertEqual(expected[key], actual[key],
+          'Result for [' + key + '] - expected ' + str(expected[key]) +
+          ' but got ' + str(actual[key]))
+
+
+def _ActualValues(results, interaction_record=''):
+  return dict(list(
+      (v.name, (v.units, v.value))
+      for v in results.all_page_specific_values
+      if (interaction_record == '' or v.tir_label == interaction_record)
+      ))
+
+
+def _GetEmptyResults():
+  return {'v8_gc_incremental_marking': ('ms', 0.0),
+          'v8_gc_incremental_marking_average': ('ms', 0.0),
+          'v8_gc_incremental_marking_count': ('count', 0),
+          'v8_gc_incremental_marking_max': ('ms', 0.0),
+          'v8_gc_incremental_marking_idle_deadline_overrun': ('ms', 0.0),
+          'v8_gc_incremental_marking_outside_idle': ('ms', 0.0),
+          'v8_gc_incremental_marking_percentage_idle': ('idle%', 0.0),
+          'v8_gc_finalize_incremental': ('ms', 0.0),
+          'v8_gc_finalize_incremental_average': ('ms', 0.0),
+          'v8_gc_finalize_incremental_count': ('count', 0),
+          'v8_gc_finalize_incremental_max': ('ms', 0.0),
+          'v8_gc_finalize_incremental_idle_deadline_overrun': ('ms', 0.0),
+          'v8_gc_finalize_incremental_outside_idle': ('ms', 0.0),
+          'v8_gc_finalize_incremental_percentage_idle': ('idle%', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory': ('ms', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory_average': ('ms', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory_count': ('count', 0),
+          'v8_gc_finalize_incremental_reduce_memory_max': ('ms', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory_idle_deadline_overrun':
+              ('ms', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory_outside_idle': ('ms', 0.0),
+          'v8_gc_finalize_incremental_reduce_memory_percentage_idle':
+              ('idle%', 0.0),
+          'v8_gc_mark_compactor': ('ms', 0.0),
+          'v8_gc_mark_compactor_average': ('ms', 0.0),
+          'v8_gc_mark_compactor_count': ('count', 0),
+          'v8_gc_mark_compactor_max': ('ms', 0.0),
+          'v8_gc_mark_compactor_idle_deadline_overrun': ('ms', 0.0),
+          'v8_gc_mark_compactor_outside_idle': ('ms', 0.0),
+          'v8_gc_mark_compactor_percentage_idle': ('idle%', 0.0),
+          'v8_gc_scavenger': ('ms', 0.0),
+          'v8_gc_scavenger_average': ('ms', 0.0),
+          'v8_gc_scavenger_count': ('count', 0),
+          'v8_gc_scavenger_max': ('ms', 0.0),
+          'v8_gc_scavenger_idle_deadline_overrun': ('ms', 0.0),
+          'v8_gc_scavenger_outside_idle': ('ms', 0.0),
+          'v8_gc_scavenger_percentage_idle': ('idle%', 0.0),
+          'v8_gc_total': ('ms', 0.0),
+          'v8_gc_total_idle_deadline_overrun': ('ms', 0.0),
+          'v8_gc_total_outside_idle': ('ms', 0.0)}
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats.py b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats.py
new file mode 100644
index 0000000..43276b6
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats.py
@@ -0,0 +1,366 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from telemetry.util import statistics
+
+DISPLAY_HERTZ = 60.0
+VSYNC_DURATION = 1e6 / DISPLAY_HERTZ
+# When to consider a frame frozen (in VSYNC units): meaning 1 initial
+# frame + 5 repeats of that frame.
+FROZEN_THRESHOLD = 6
+# Severity factor.
+SEVERITY = 3
+
+IDEAL_RENDER_INSTANT = 'Ideal Render Instant'
+ACTUAL_RENDER_BEGIN = 'Actual Render Begin'
+ACTUAL_RENDER_END = 'Actual Render End'
+SERIAL = 'Serial'
+
+
+class TimeStats(object):
+  """Stats container for webrtc rendering metrics."""
+
+  def __init__(self, drift_time=None, mean_drift_time=None,
+    std_dev_drift_time=None, percent_badly_out_of_sync=None,
+    percent_out_of_sync=None, smoothness_score=None, freezing_score=None,
+    rendering_length_error=None, fps=None, frame_distribution=None):
+    self.drift_time = drift_time
+    self.mean_drift_time = mean_drift_time
+    self.std_dev_drift_time = std_dev_drift_time
+    self.percent_badly_out_of_sync = percent_badly_out_of_sync
+    self.percent_out_of_sync = percent_out_of_sync
+    self.smoothness_score = smoothness_score
+    self.freezing_score = freezing_score
+    self.rendering_length_error = rendering_length_error
+    self.fps = fps
+    self.frame_distribution = frame_distribution
+    self.invalid_data = False
+
+
+
+class WebMediaPlayerMsRenderingStats(object):
+  """Analyzes events of WebMediaPlayerMs type."""
+
+  def __init__(self, events):
+    """Save relevant events according to their stream."""
+    self.stream_to_events = self._MapEventsToStream(events)
+
+  def _IsEventValid(self, event):
+    """Check that the needed arguments are present in event.
+
+    Args:
+      event: event to check.
+
+    Returns:
+      True is event is valid, false otherwise."""
+    if not event.args:
+      return False
+    mandatory = [ACTUAL_RENDER_BEGIN, ACTUAL_RENDER_END,
+        IDEAL_RENDER_INSTANT, SERIAL]
+    for parameter in mandatory:
+      if not parameter in event.args:
+        return False
+    return True
+
+  def _MapEventsToStream(self, events):
+    """Build a dictionary of events indexed by stream.
+
+    The events of interest have a 'Serial' argument which represents the
+    stream ID. The 'Serial' argument identifies the local or remote nature of
+    the stream with a least significant bit  of 0 or 1 as well as the hash
+    value of the video track's URL. So stream::=hash(0|1} . The method will
+    then list the events of the same stream in a frame_distribution on stream
+    id. Practically speaking remote streams have an odd stream id and local
+    streams have a even stream id.
+    Args:
+      events: Telemetry WebMediaPlayerMs events.
+
+    Returns:
+      A dict of stream IDs mapped to events on that stream.
+    """
+    stream_to_events = {}
+    for event in events:
+      if not self._IsEventValid(event):
+        # This is not a render event, skip it.
+        continue
+      stream = event.args[SERIAL]
+      events_for_stream = stream_to_events.setdefault(stream, [])
+      events_for_stream.append(event)
+
+    return stream_to_events
+
+  def _GetCadence(self, relevant_events):
+    """Calculate the apparent cadence of the rendering.
+
+    In this paragraph I will be using regex notation. What is intended by the
+    word cadence is a sort of extended instantaneous 'Cadence' (thus not
+    necessarily periodic). Just as an example, a normal 'Cadence' could be
+    something like [2 3] which means possibly an observed frame persistence
+    progression of [{2 3}+] for an ideal 20FPS video source. So what we are
+    calculating here is the list of frame persistence, kind of a
+    'Proto-Cadence', but cadence is shorter so we abuse the word.
+
+    Args:
+      relevant_events: list of Telemetry events.
+
+    Returns:
+      a list of frame persistence values.
+    """
+    cadence = []
+    frame_persistence = 0
+    old_ideal_render = 0
+    for event in relevant_events:
+      if not self._IsEventValid(event):
+        # This event is not a render event so skip it.
+        continue
+      if event.args[IDEAL_RENDER_INSTANT] == old_ideal_render:
+        frame_persistence += 1
+      else:
+        cadence.append(frame_persistence)
+        frame_persistence = 1
+        old_ideal_render = event.args[IDEAL_RENDER_INSTANT]
+    cadence.append(frame_persistence)
+    cadence.pop(0)
+    return cadence
+
+  def _GetSourceToOutputDistribution(self, cadence):
+    """Create distribution for the cadence frame display values.
+
+    If the overall display distribution is A1:A2:..:An, this will tell us how
+    many times a frame stays displayed during Ak*VSYNC_DURATION, also known as
+    'source to output' distribution. Or in other terms:
+    a distribution B::= let C be the cadence, B[k]=p with k in Unique(C)
+    and p=Card(k in C).
+
+    Args:
+      cadence: list of frame persistence values.
+
+    Returns:
+      a dictionary containing the distribution
+    """
+    frame_distribution = {}
+    for ticks in cadence:
+      ticks_so_far = frame_distribution.setdefault(ticks, 0)
+      frame_distribution[ticks] = ticks_so_far + 1
+    return frame_distribution
+
+  def _GetFpsFromCadence(self, frame_distribution):
+    """Calculate the apparent FPS from frame distribution.
+
+    Knowing the display frequency and the frame distribution, it is possible to
+    calculate the video apparent frame rate as played by WebMediaPlayerMs
+    module.
+
+    Args:
+      frame_distribution: the source to output distribution.
+
+    Returns:
+      the video apparent frame rate.
+    """
+    number_frames = sum(frame_distribution.values())
+    number_vsyncs = sum([ticks * frame_distribution[ticks]
+       for ticks in frame_distribution])
+    mean_ratio = float(number_vsyncs) / number_frames
+    return DISPLAY_HERTZ / mean_ratio
+
+  def _GetFrozenFramesReports(self, frame_distribution):
+    """Find evidence of frozen frames in distribution.
+
+    For simplicity we count as freezing the frames that appear at least five
+    times in a row counted from 'Ideal Render Instant' perspective. So let's
+    say for 1 source frame, we rendered 6 frames, then we consider 5 of these
+    rendered frames as frozen. But we mitigate this by saying anything under
+    5 frozen frames will not be counted as frozen.
+
+    Args:
+      frame_distribution: the source to output distribution.
+
+    Returns:
+      a list of dicts whose keys are ('frozen_frames', 'occurrences').
+    """
+    frozen_frames = []
+    frozen_frame_vsyncs = [ticks for ticks in frame_distribution if ticks >=
+        FROZEN_THRESHOLD]
+    for frozen_frames_vsync in frozen_frame_vsyncs:
+      logging.debug('%s frames not updated after %s vsyncs',
+          frame_distribution[frozen_frames_vsync], frozen_frames_vsync)
+      frozen_frames.append(
+          {'frozen_frames': frozen_frames_vsync - 1,
+           'occurrences': frame_distribution[frozen_frames_vsync]})
+    return frozen_frames
+
+  def _FrozenPenaltyWeight(self, number_frozen_frames):
+    """Returns the weighted penalty for a number of frozen frames.
+
+    As mentioned earlier, we count for frozen anything above 6 vsync display
+    duration for the same 'Initial Render Instant', which is five frozen
+    frames.
+
+    Args:
+      number_frozen_frames: number of frozen frames.
+
+    Returns:
+      the penalty weight (int) for that number of frozen frames.
+    """
+
+    penalty = {
+      0: 0,
+      1: 0,
+      2: 0,
+      3: 0,
+      4: 0,
+      5: 1,
+      6: 5,
+      7: 15,
+      8: 25
+    }
+    weight = penalty.get(number_frozen_frames, 8 * (number_frozen_frames - 4))
+    return weight
+
+  def _IsRemoteStream(self, stream):
+    """Check if stream is remote."""
+    return stream % 2
+
+  def _GetDrifTimeStats(self, relevant_events, cadence):
+    """Get the drift time statistics.
+
+    This method will calculate drift_time stats, that is to say :
+    drift_time::= list(actual render begin - ideal render).
+    rendering_length error::= the rendering length error.
+
+    Args:
+      relevant_events: events to get drift times stats from.
+      cadence: list of frame persistence values.
+
+    Returns:
+      a tuple of (drift_time, rendering_length_error).
+    """
+    drift_time = []
+    old_ideal_render = 0
+    discrepancy = []
+    index = 0
+    for event in relevant_events:
+      current_ideal_render = event.args[IDEAL_RENDER_INSTANT]
+      if current_ideal_render == old_ideal_render:
+        # Skip to next event because we're looking for a source frame.
+        continue
+      actual_render_begin = event.args[ACTUAL_RENDER_BEGIN]
+      drift_time.append(actual_render_begin - current_ideal_render)
+      discrepancy.append(abs(current_ideal_render - old_ideal_render
+          - VSYNC_DURATION * cadence[index]))
+      old_ideal_render = current_ideal_render
+      index += 1
+    discrepancy.pop(0)
+    last_ideal_render = relevant_events[-1].args[IDEAL_RENDER_INSTANT]
+    first_ideal_render = relevant_events[0].args[IDEAL_RENDER_INSTANT]
+    rendering_length_error = 100.0 * (sum([x for x in discrepancy]) /
+        (last_ideal_render - first_ideal_render))
+
+    return drift_time, rendering_length_error
+
+  def _GetSmoothnessStats(self, norm_drift_time):
+    """Get the smoothness stats from the normalized drift time.
+
+    This method will calculate the smoothness score, along with the percentage
+    of frames badly out of sync and the percentage of frames out of sync. To be
+    considered badly out of sync, a frame has to have missed rendering by at
+    least 2*VSYNC_DURATION. To be considered out of sync, a frame has to have
+    missed rendering by at least one VSYNC_DURATION.
+    The smoothness score is a measure of how out of sync the frames are.
+
+    Args:
+      norm_drift_time: normalized drift time.
+
+    Returns:
+      a tuple of (percent_badly_oos, percent_out_of_sync, smoothness_score)
+    """
+    # How many times is a frame later/earlier than T=2*VSYNC_DURATION. Time is
+    # in microseconds.
+    frames_severely_out_of_sync = len(
+        [x for x in norm_drift_time if abs(x) > 2 * VSYNC_DURATION])
+    percent_badly_oos = (
+        100.0 * frames_severely_out_of_sync / len(norm_drift_time))
+
+    # How many times is a frame later/earlier than VSYNC_DURATION.
+    frames_out_of_sync = len(
+        [x for x in norm_drift_time if abs(x) > VSYNC_DURATION])
+    percent_out_of_sync = (
+        100.0 * frames_out_of_sync / len(norm_drift_time))
+
+    frames_oos_only_once = frames_out_of_sync - frames_severely_out_of_sync
+
+    # Calculate smoothness metric. From the formula, we can see that smoothness
+    # score can be negative.
+    smoothness_score = 100.0 - 100.0 * (frames_oos_only_once +
+        SEVERITY * frames_severely_out_of_sync) / len(norm_drift_time)
+
+    # Minimum smoothness_score value allowed is zero.
+    if smoothness_score < 0:
+      smoothness_score = 0
+
+    return (percent_badly_oos, percent_out_of_sync, smoothness_score)
+
+  def _GetFreezingScore(self, frame_distribution):
+    """Get the freezing score."""
+
+    # The freezing score is based on the source to output distribution.
+    number_vsyncs = sum([n * frame_distribution[n]
+        for n in frame_distribution])
+    frozen_frames = self._GetFrozenFramesReports(frame_distribution)
+
+    # Calculate freezing metric.
+    # Freezing metric can be negative if things are really bad. In that case we
+    # change it to zero as minimum valud.
+    freezing_score = 100.0
+    for frozen_report in frozen_frames:
+      weight = self._FrozenPenaltyWeight(frozen_report['frozen_frames'])
+      freezing_score -= (
+          100.0 * frozen_report['occurrences'] / number_vsyncs * weight)
+    if freezing_score < 0:
+      freezing_score = 0
+
+    return freezing_score
+
+  def GetTimeStats(self):
+    """Calculate time stamp stats for all remote stream events."""
+    stats = {}
+    for stream, relevant_events in self.stream_to_events.iteritems():
+      if len(relevant_events) == 1:
+        logging.debug('Found a stream=%s with just one event', stream)
+        continue
+      if not self._IsRemoteStream(stream):
+        logging.info('Skipping processing of local stream: %s', stream)
+        continue
+
+      cadence = self._GetCadence(relevant_events)
+      if not cadence:
+        stats = TimeStats()
+        stats.invalid_data = True
+        return stats
+
+      frame_distribution = self._GetSourceToOutputDistribution(cadence)
+      fps = self._GetFpsFromCadence(frame_distribution)
+
+      drift_time_stats = self._GetDrifTimeStats(relevant_events, cadence)
+      (drift_time, rendering_length_error) = drift_time_stats
+
+      # Drift time normalization.
+      mean_drift_time = statistics.ArithmeticMean(drift_time)
+      norm_drift_time = [abs(x - mean_drift_time) for x in drift_time]
+
+      smoothness_stats = self._GetSmoothnessStats(norm_drift_time)
+      (percent_badly_oos, percent_out_of_sync,
+          smoothness_score) = smoothness_stats
+
+      freezing_score = self._GetFreezingScore(frame_distribution)
+
+      stats = TimeStats(drift_time=drift_time,
+          percent_badly_out_of_sync=percent_badly_oos,
+          percent_out_of_sync=percent_out_of_sync,
+          smoothness_score=smoothness_score, freezing_score=freezing_score,
+          rendering_length_error=rendering_length_error, fps=fps,
+          frame_distribution=frame_distribution)
+    return stats
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats_unittest.py b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats_unittest.py
new file mode 100644
index 0000000..15e1035
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_stats_unittest.py
@@ -0,0 +1,271 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from telemetry.web_perf.metrics import webrtc_rendering_stats as stats_helper
+
+
+class FakeEvent(object):
+  """Fake event class to mock rendering events."""
+
+  def __init__(self, **kwargs):
+    """Initializer for the fake WebMediaPlayerMS::UpdateCurrentFrame events.
+
+    The WebMediaPlayerMsRenderingStats only cares about actual render begin,
+    actual render end, ideal render instant and serial fields of the events.
+    So we only define these four fields here in this fake event class.
+    This method is written so as to take whatever valid parameters from the
+    event definition. It can also be used to craft incomplete events.
+
+    Args:
+      kwargs::= dict('actual_begin', 'actual_end', 'ideal_instant', 'serial').
+    """
+    self.args = {}
+    name_map = {
+        'Actual Render Begin': 'actual_begin',
+        'Actual Render End': 'actual_end',
+        'Ideal Render Instant': 'ideal_instant',
+        'Serial': 'serial'}
+    for internal_name, external_name in name_map.iteritems():
+      if external_name in kwargs:
+        self.args[internal_name] = kwargs[external_name]
+
+
+class WebMediaPlayerMsRenderingStatsTest(unittest.TestCase):
+
+  def setUp(self):
+    # A local stream id always has an even number.
+    # A remote stream id always has an odd number.
+    self.local_stream = 136390988
+    self.remote_stream = 118626165
+
+  def testInitialization(self):
+    event_local_stream = FakeEvent(actual_begin=1655987203306,
+        actual_end=1655987219972, ideal_instant=1655987154324,
+        serial=self.local_stream)
+
+    event_remote_stream = FakeEvent(actual_begin=1655987203306,
+        actual_end=1655987219972, ideal_instant=1655987167999,
+        serial=self.remote_stream)
+
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats(
+        [event_local_stream, event_remote_stream])
+
+    self.assertEqual(2, len(stats_parser.stream_to_events))
+
+    self.assertEqual(event_local_stream.args,
+        stats_parser.stream_to_events[self.local_stream][0].args)
+
+    self.assertEqual(event_remote_stream.args,
+        stats_parser.stream_to_events[self.remote_stream][0].args)
+
+  def testInvalidEvents(self):
+    event_missing_serial = FakeEvent(actual_begin=1655987244074,
+        actual_end=1655987260740, ideal_instant=1655987204839)
+
+    event_missing_actual_begin = FakeEvent(actual_end=1655987260740,
+        ideal_instant=1655987217999, serial=self.local_stream)
+
+    event_missing_actual_end = FakeEvent(actual_end=1655987260740,
+        ideal_instant=1655987217999, serial=self.remote_stream)
+
+    event_missing_ideal_instant = FakeEvent(actual_begin=1655987260740,
+        actual_end=1655987277406, serial=self.remote_stream)
+
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats(
+        [event_missing_serial, event_missing_actual_begin,
+         event_missing_actual_end, event_missing_ideal_instant])
+
+    self.assertEqual(0, len(stats_parser.stream_to_events))
+
+  def _GetFakeEvents(self):
+    fake_events = [
+        FakeEvent(actual_begin=1663780195583, actual_end=1663780212249,
+            ideal_instant=1663780179998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780212249, actual_end=1663780228915,
+            ideal_instant=1663780179998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780228915, actual_end=1663780245581,
+            ideal_instant=1663780197998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780245581, actual_end=1663780262247,
+            ideal_instant=1663780215998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780262247, actual_end=1663780278913,
+            ideal_instant=1663780215998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780278913, actual_end=1663780295579,
+            ideal_instant=1663780254998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780295579, actual_end=1663780312245,
+            ideal_instant=1663780254998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780312245, actual_end=1663780328911,
+           ideal_instant=1663780254998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780328911, actual_end=1663780345577,
+           ideal_instant=1663780310998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780345577, actual_end=1663780362243,
+            ideal_instant=1663780310998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780362243, actual_end=1663780378909,
+            ideal_instant=1663780310998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780378909, actual_end=1663780395575,
+            ideal_instant=1663780361998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780395575, actual_end=1663780412241,
+            ideal_instant=1663780361998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780412241, actual_end=1663780428907,
+            ideal_instant=1663780361998, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780428907, actual_end=1663780445573,
+            ideal_instant=1663780412998, serial=self.remote_stream)]
+
+    return fake_events
+
+  def _GetCorruptEvents(self):
+    # The events below are corrupt data because the |ideal_instant|
+    # parameter is zero, which makes all computation meaningless.
+    # Indeed, the ideal_instant (aka Ideal Render Instant) indicates
+    # when the frame should be rendered ideally.
+    corrupt_events = [
+        FakeEvent(actual_begin=1663780195583, actual_end=1663780212249,
+            ideal_instant=0, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780212249, actual_end=1663780228915,
+            ideal_instant=0, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780228915, actual_end=1663780245581,
+            ideal_instant=0, serial=self.remote_stream),
+        FakeEvent(actual_begin=1663780245581, actual_end=1663780262247,
+            ideal_instant=0, serial=self.remote_stream)]
+    return corrupt_events
+
+  def testGetCadence(self):
+    fake_events = self._GetFakeEvents()
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats(fake_events)
+    # The events defined in _GetFakeEvents above show that the first source
+    # framee of ideal_instant=1663780179998 is rendered twice, then
+    # the second source frame of ideal_instant=1663780197998 is rendered once
+    # the third source frame of  ideal_instant=1663780215998 is rendered twice
+    # and so on. The expected cadence will therefore be [2 1 2 etc..]
+    expected_cadence = [2, 1, 2, 3, 3, 3, 1]
+    self.assertEqual(expected_cadence, stats_parser._GetCadence(fake_events))
+
+  def testGetSourceToOutputDistribution(self):
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    cadence = [2, 1, 2, 3, 3, 3, 1]
+    expected_frame_distribution = {1: 2, 2: 2, 3: 3}
+    self.assertEqual(expected_frame_distribution,
+        stats_parser._GetSourceToOutputDistribution(cadence))
+
+  def testGetFpsFromCadence(self):
+    frame_distribution = {1: 2, 2: 2, 3: 3}
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    expected_frame_rate = 28.0
+    self.assertEqual(expected_frame_rate,
+        stats_parser._GetFpsFromCadence(frame_distribution))
+
+  def testGetFrozenFramesReports(self):
+    frame_distribution = {1: 2, 2: 2, 3: 569, 6: 1}
+    expected_frozen_reports = [{'frozen_frames': 5, 'occurrences': 1}]
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    self.assertEqual(expected_frozen_reports,
+        stats_parser._GetFrozenFramesReports(frame_distribution))
+
+  def testIsRemoteStream(self):
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    self.assertTrue(stats_parser._IsRemoteStream(self.remote_stream))
+
+  def testGetDrifTimeStats(self):
+    fake_events = self._GetFakeEvents()
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    cadence = stats_parser._GetCadence(fake_events)
+    expected_drift_time = [15585, 30917, 29583, 23915, 17913, 16911, 15909]
+    expected_rendering_length_error = 29.613733905579398
+
+    self.assertEqual((expected_drift_time, expected_rendering_length_error),
+        stats_parser._GetDrifTimeStats(fake_events, cadence))
+
+  def testGetSmoothnessStats(self):
+    norm_drift_time = [5948.2857142857138, 9383.7142857142862,
+        8049.7142857142862, 2381.7142857142862, 3620.2857142857138,
+        4622.2857142857138, 5624.2857142857138]
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    expected_percent_badly_oos = 0.0
+    expected_percent_out_of_sync = 0.0
+    expected_smoothness_score = 100.0
+    expected_smoothness_stats = (expected_percent_badly_oos,
+        expected_percent_out_of_sync, expected_smoothness_score)
+
+    self.assertEqual(expected_smoothness_stats,
+        stats_parser._GetSmoothnessStats(norm_drift_time))
+
+  def testNegativeSmoothnessScoreChangedToZero(self):
+    norm_drift_time = [15948.285714285714, 9383.714285714286,
+        28049.714285714286, 72381.71428571429, 3620.2857142857138,
+        4622.285714285714, 35624.28571428572]
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    expected_percent_badly_oos = 28.571428571428573
+    expected_percent_out_of_sync = 42.857142857142854
+    expected_smoothness_score = 0.0
+    expected_smoothness_stats = (expected_percent_badly_oos,
+        expected_percent_out_of_sync, expected_smoothness_score)
+
+    self.assertEqual(expected_smoothness_stats,
+        stats_parser._GetSmoothnessStats(norm_drift_time))
+
+  def testGetFreezingScore(self):
+    frame_distribution = {1: 2, 2: 2, 3: 569, 6: 1}
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    expected_freezing_score = 99.94182664339732
+    self.assertEqual(expected_freezing_score,
+        stats_parser._GetFreezingScore(frame_distribution))
+
+  def testNegativeFrezingScoreChangedToZero(self):
+    frame_distribution = {1: 2, 2: 2, 3: 2, 8:100}
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats([])
+    self.assertEqual(0.0, stats_parser._GetFreezingScore(frame_distribution))
+
+  def testGetTimeStats(self):
+    fake_events = self._GetFakeEvents()
+    expected_frame_dist = {1: 2, 2: 2, 3: 3}
+    expected_frame_rate = 28.0
+    expected_drift_time = [15585, 30917, 29583, 23915, 17913, 16911, 15909]
+    expected_rendering_length_error = 29.613733905579398
+    expected_percent_badly_oos = 0.0
+    expected_percent_out_of_sync = 0.0
+    expected_smoothness_score = 100.0
+    expected_freezing_score = 100.0
+
+    stats_cls = stats_helper.WebMediaPlayerMsRenderingStats
+
+    stats_parser = stats_cls(fake_events)
+
+    expected_stats = stats_helper.TimeStats(
+        drift_time=expected_drift_time,
+        percent_badly_out_of_sync=expected_percent_badly_oos,
+        percent_out_of_sync=expected_percent_out_of_sync,
+        smoothness_score=expected_smoothness_score,
+        freezing_score=expected_freezing_score,
+        rendering_length_error=expected_rendering_length_error,
+        fps=expected_frame_rate,
+        frame_distribution=expected_frame_dist)
+
+    stats = stats_parser.GetTimeStats()
+
+    self.assertEqual(expected_stats.drift_time, stats.drift_time)
+    self.assertEqual(expected_stats.percent_badly_out_of_sync,
+        stats.percent_badly_out_of_sync)
+    self.assertEqual(expected_stats.percent_out_of_sync,
+        stats.percent_out_of_sync)
+    self.assertEqual(expected_stats.smoothness_score, stats.smoothness_score)
+    self.assertEqual(expected_stats.freezing_score, stats.freezing_score)
+    self.assertEqual(expected_stats.rendering_length_error,
+        stats.rendering_length_error)
+    self.assertEqual(expected_stats.fps, stats.fps)
+    self.assertEqual(expected_stats.frame_distribution,
+        stats.frame_distribution)
+
+  def testCorruptData(self):
+    corrupt_events = self._GetCorruptEvents()
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats(corrupt_events)
+    stats = stats_parser.GetTimeStats()
+    self.assertTrue(stats.invalid_data)
+    self.assertIsNone(stats.drift_time)
+    self.assertIsNone(stats.percent_badly_out_of_sync)
+    self.assertIsNone(stats.percent_out_of_sync)
+    self.assertIsNone(stats.smoothness_score)
+    self.assertIsNone(stats.freezing_score)
+    self.assertIsNone(stats.rendering_length_error)
+    self.assertIsNone(stats.fps)
+    self.assertIsNone(stats.frame_distribution)
diff --git a/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_timeline.py b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_timeline.py
new file mode 100644
index 0000000..062ef09
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/metrics/webrtc_rendering_timeline.py
@@ -0,0 +1,133 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.value import list_of_scalar_values
+from telemetry.value import scalar
+from telemetry.value import improvement_direction
+from telemetry.web_perf.metrics import timeline_based_metric
+from telemetry.web_perf.metrics import webrtc_rendering_stats as stats_helper
+
+WEB_MEDIA_PLAYER_MS_EVENT = 'WebMediaPlayerMS::UpdateCurrentFrame'
+
+
+class WebRtcRenderingTimelineMetric(timeline_based_metric.TimelineBasedMetric):
+  """WebrtcRenderingTimelineMetric calculates metric for WebMediaPlayerMS.
+
+  The following metrics are added to the results:
+    WebRTCRendering_drift_time us
+    WebRTCRendering_percent_badly_out_of_sync %
+    WebRTCRendering_percent_out_of_sync %
+    WebRTCRendering_fps FPS
+    WebRTCRendering_smoothness_score %
+    WebRTCRendering_freezing_score %
+    WebRTCRendering_rendering_length_error %
+  """
+
+  def __init__(self):
+    super(WebRtcRenderingTimelineMetric, self).__init__()
+
+  @staticmethod
+  def IsMediaPlayerMSEvent(event):
+    """Verify that the event is a webmediaplayerMS event."""
+    return event.name == WEB_MEDIA_PLAYER_MS_EVENT
+
+  def AddResults(self, model, renderer_thread, interactions, results):
+    """Adding metrics to the results."""
+    assert interactions
+    found_events = []
+    for event in renderer_thread.parent.IterAllEvents(
+        event_predicate=self.IsMediaPlayerMSEvent):
+      if timeline_based_metric.IsEventInInteractions(event, interactions):
+        found_events.append(event)
+    stats_parser = stats_helper.WebMediaPlayerMsRenderingStats(found_events)
+    rendering_stats = stats_parser.GetTimeStats()
+    none_reason = None
+    if not rendering_stats:
+      # Create a TimeStats object whose members have None values.
+      rendering_stats = stats_helper.TimeStats()
+      none_reason = 'No WebMediaPlayerMS::UpdateCurrentFrame event found'
+    elif rendering_stats.invalid_data:
+      # Throw away the data.
+      rendering_stats = stats_helper.TimeStats()
+      none_reason = 'WebMediaPlayerMS data is corrupted.'
+    results.AddValue(list_of_scalar_values.ListOfScalarValues(
+        results.current_page,
+        'WebRTCRendering_drift_time',
+        'us',
+        rendering_stats.drift_time,
+        important=True,
+        description='Drift time for a rendered frame',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.DOWN,
+        none_value_reason=none_reason))
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_percent_badly_out_of_sync',
+        '%',
+        rendering_stats.percent_badly_out_of_sync,
+        important=True,
+        description='Percentage of frame which drifted more than 2 VSYNC',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.DOWN,
+        none_value_reason=none_reason))
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_percent_out_of_sync',
+        '%',
+        rendering_stats.percent_out_of_sync,
+        important=True,
+        description='Percentage of frame which drifted more than 1 VSYNC',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.DOWN,
+        none_value_reason=none_reason))
+
+    # I removed the frame distribution list from stats as it is not a metric,
+    # rather it is the underlying data. Also there is no sense of improvement
+    # direction for frame distribution.
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_fps',
+        'fps',
+        rendering_stats.fps,
+        important=True,
+        description='Calculated Frame Rate of video rendering',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.UP,
+        none_value_reason=none_reason))
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_smoothness_score',
+        '%',
+        rendering_stats.smoothness_score,
+        important=True,
+        description='Smoothness score of rendering',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.UP,
+        none_value_reason=none_reason))
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_freezing_score',
+        '%',
+        rendering_stats.freezing_score,
+        important=True,
+        description='Freezing score of rendering',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.UP,
+        none_value_reason=none_reason))
+
+    results.AddValue(scalar.ScalarValue(
+        results.current_page,
+        'WebRTCRendering_rendering_length_error',
+        '%',
+        rendering_stats.rendering_length_error,
+        important=True,
+        description='Rendering length error rate',
+        tir_label=interactions[0].label,
+        improvement_direction=improvement_direction.DOWN,
+        none_value_reason=none_reason))
diff --git a/catapult/telemetry/telemetry/web_perf/smooth_gesture_util.py b/catapult/telemetry/telemetry/web_perf/smooth_gesture_util.py
new file mode 100644
index 0000000..0d0d453
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/smooth_gesture_util.py
@@ -0,0 +1,37 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import copy
+
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+def GetAdjustedInteractionIfContainGesture(timeline, interaction_record):
+  """ Returns a new interaction record if interaction_record contains geture
+  whose time range that overlaps with interaction_record's range. If not,
+  returns a clone of original interaction_record.
+  The synthetic gesture controller inserts a trace marker to precisely
+  demarcate when the gesture was running. We check for overlap, not inclusion,
+  because gesture_actions can start/end slightly outside the telemetry markers
+  on Windows. This problem is probably caused by a race condition between
+  the browser and renderer process submitting the trace events for the
+  markers.
+  """
+  # Only adjust the range for gestures.
+  if not interaction_record.label.startswith('Gesture_'):
+    return copy.copy(interaction_record)
+  gesture_events = [
+    ev for ev
+    in timeline.IterAllAsyncSlicesOfName('SyntheticGestureController::running')
+    if ev.parent_slice is None and
+    ev.start <= interaction_record.end and
+    ev.end >= interaction_record.start]
+  if len(gesture_events) == 0:
+    return copy.copy(interaction_record)
+  if len(gesture_events) > 1:
+    raise Exception('More than one possible synthetic gesture marker found in '
+                    'interaction_record %s.' % interaction_record.label)
+  return tir_module.TimelineInteractionRecord(
+    interaction_record.label, gesture_events[0].start,
+    gesture_events[0].end, gesture_events[0],
+    interaction_record._flags)  # pylint: disable=protected-access
diff --git a/catapult/telemetry/telemetry/web_perf/smooth_gesture_util_unittest.py b/catapult/telemetry/telemetry/web_perf/smooth_gesture_util_unittest.py
new file mode 100644
index 0000000..d30c2e0
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/smooth_gesture_util_unittest.py
@@ -0,0 +1,159 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import time
+import unittest
+
+from telemetry import decorators
+from telemetry.page import page as page_module
+from telemetry.page import page_test
+from telemetry.testing import page_test_test_case
+from telemetry.timeline import async_slice
+from telemetry.timeline import model as model_module
+from telemetry.timeline import tracing_config
+from telemetry.web_perf import smooth_gesture_util as sg_util
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+class SmoothGestureUtilTest(unittest.TestCase):
+  def testGetAdjustedInteractionIfContainGesture(self):
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    renderer_main.name = 'CrRendererMain'
+
+    #      [          X          ]                   [   Y  ]
+    #      [  sub_async_slice_X  ]
+    #          [   record_1]
+    #          [   record_6]
+    #  [  record_2 ]          [ record_3 ]
+    #  [           record_4              ]
+    #                                [ record_5 ]
+    #
+    # Note: X and Y are async slice with name
+    # SyntheticGestureController::running
+
+    async_slice_X = async_slice.AsyncSlice(
+      'X', 'SyntheticGestureController::running', 10, duration=20,
+      start_thread=renderer_main, end_thread=renderer_main)
+
+    sub_async_slice_X = async_slice.AsyncSlice(
+      'X', 'SyntheticGestureController::running', 10, duration=20,
+      start_thread=renderer_main, end_thread=renderer_main)
+    sub_async_slice_X.parent_slice = async_slice_X
+    async_slice_X.AddSubSlice(sub_async_slice_X)
+
+    async_slice_Y = async_slice.AsyncSlice(
+      'X', 'SyntheticGestureController::running', 60, duration=20,
+      start_thread=renderer_main, end_thread=renderer_main)
+
+    renderer_main.AddAsyncSlice(async_slice_X)
+    renderer_main.AddAsyncSlice(async_slice_Y)
+
+    model.FinalizeImport(shift_world_to_zero=False)
+
+    record_1 = tir_module.TimelineInteractionRecord('Gesture_included', 15, 25)
+    record_2 = tir_module.TimelineInteractionRecord(
+      'Gesture_overlapped_left', 5, 25)
+    record_3 = tir_module.TimelineInteractionRecord(
+      'Gesture_overlapped_right', 25, 35)
+    record_4 = tir_module.TimelineInteractionRecord(
+      'Gesture_containing', 5, 35)
+    record_5 = tir_module.TimelineInteractionRecord(
+      'Gesture_non_overlapped', 35, 45)
+    record_6 = tir_module.TimelineInteractionRecord('Action_included', 15, 25)
+
+    adjusted_record_1 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_1)
+    self.assertEquals(adjusted_record_1.start, 10)
+    self.assertEquals(adjusted_record_1.end, 30)
+    self.assertTrue(adjusted_record_1 is not record_1)
+
+    adjusted_record_2 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_2)
+    self.assertEquals(adjusted_record_2.start, 10)
+    self.assertEquals(adjusted_record_2.end, 30)
+
+    adjusted_record_3 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_3)
+    self.assertEquals(adjusted_record_3.start, 10)
+    self.assertEquals(adjusted_record_3.end, 30)
+
+    adjusted_record_4 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_4)
+    self.assertEquals(adjusted_record_4.start, 10)
+    self.assertEquals(adjusted_record_4.end, 30)
+
+    adjusted_record_5 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_5)
+    self.assertEquals(adjusted_record_5.start, 35)
+    self.assertEquals(adjusted_record_5.end, 45)
+    self.assertTrue(adjusted_record_5 is not record_5)
+
+    adjusted_record_6 = sg_util.GetAdjustedInteractionIfContainGesture(
+      model, record_6)
+    self.assertEquals(adjusted_record_6.start, 15)
+    self.assertEquals(adjusted_record_6.end, 25)
+    self.assertTrue(adjusted_record_6 is not record_6)
+
+
+class ScrollingPage(page_module.Page):
+  def __init__(self, url, page_set, base_dir):
+    super(ScrollingPage, self).__init__(url, page_set, base_dir)
+
+  def RunPageInteractions(self, action_runner):
+    with action_runner.CreateGestureInteraction('ScrollAction'):
+      # Add 0.5s gap between when Gesture records are issued to when we actually
+      # scroll the page.
+      time.sleep(0.5)
+      action_runner.ScrollPage()
+      time.sleep(0.5)
+
+
+class SmoothGestureTest(page_test_test_case.PageTestTestCase):
+
+  @decorators.Disabled('mac',       # crbug.com/450171
+                       'win',       # crbug.com/570955
+                       'chromeos')  # crbug.com/483212
+  @decorators.Isolated  # Needed because of py_trace_event
+  def testSmoothGestureAdjusted(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(ScrollingPage(
+      'file://scrollable_page.html', ps, base_dir=ps.base_dir))
+    models = []
+    tab_ids = []
+    class ScrollingGestureTestMeasurement(page_test.PageTest):
+      def __init__(self):
+        # pylint: disable=bad-super-call
+        super(ScrollingGestureTestMeasurement, self).__init__()
+
+      def WillNavigateToPage(self, page, tab):
+        del page  # unused
+        config = tracing_config.TracingConfig()
+        config.enable_chrome_trace = True
+        tab.browser.platform.tracing_controller.StartTracing(config)
+
+      def ValidateAndMeasurePage(self, page, tab, results):
+        del page, results  # unused
+        models.append(model_module.TimelineModel(
+          tab.browser.platform.tracing_controller.StopTracing()))
+        tab_ids.append(tab.id)
+
+    self.RunMeasurement(ScrollingGestureTestMeasurement(), ps)
+    timeline_model = models[0]
+    renderer_thread = timeline_model.GetRendererThreadFromTabId(
+        tab_ids[0])
+    smooth_record = None
+    for e in renderer_thread.async_slices:
+      if tir_module.IsTimelineInteractionRecord(e.name):
+        smooth_record = tir_module.TimelineInteractionRecord.FromAsyncEvent(e)
+    self.assertIsNotNone(smooth_record)
+    adjusted_smooth_gesture = (
+      sg_util.GetAdjustedInteractionIfContainGesture(
+        timeline_model, smooth_record))
+    # Test that the scroll gesture starts at at least 500ms after the start of
+    # the interaction record and ends at at least 500ms before the end of
+    # interaction record.
+    self.assertLessEqual(
+      500, adjusted_smooth_gesture.start - smooth_record.start)
+    self.assertLessEqual(
+      500, smooth_record.end - adjusted_smooth_gesture.end)
diff --git a/catapult/telemetry/telemetry/web_perf/story_test.py b/catapult/telemetry/telemetry/web_perf/story_test.py
new file mode 100644
index 0000000..2ed65a4
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/story_test.py
@@ -0,0 +1,49 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class Failure(Exception):
+  """StoryTest Exception raised when an undesired but designed-for problem."""
+
+
+class StoryTest(object):
+  """A class for creating story tests.
+
+  The overall test run control flow follows this order:
+    test.WillRunStory
+    state.WillRunStory
+    state.RunStory
+    test.Measure
+    state.DidRunStory
+    test.DidRunStory
+  """
+
+  def WillRunStory(self, platform):
+    """Override to do any action before running the story.
+
+    This is run before state.WillRunStory.
+    Args:
+      platform: The platform that the story will run on.
+    """
+    raise NotImplementedError()
+
+  def Measure(self, platform, results):
+    """Override to take the measurement.
+
+    This is run only if state.RunStory is successful.
+    Args:
+      platform: The platform that the story will run on.
+      results: The results of running the story.
+    """
+    raise NotImplementedError()
+
+  def DidRunStory(self, platform):
+    """Override to do any action after running the story, e.g., clean up.
+
+    This is run after state.DidRunStory. And this is always called even if the
+    test run failed.
+    Args:
+      platform: The platform that the story will run on.
+    """
+    raise NotImplementedError()
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_based_measurement.py b/catapult/telemetry/telemetry/web_perf/timeline_based_measurement.py
new file mode 100644
index 0000000..9a9a77f
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_based_measurement.py
@@ -0,0 +1,338 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import collections
+import logging
+from collections import defaultdict
+
+from tracing.metrics import metric_runner
+
+from telemetry.timeline import model as model_module
+from telemetry.timeline import tracing_category_filter
+from telemetry.timeline import tracing_config
+from telemetry.value import trace
+from telemetry.value import translate_common_values
+from telemetry.web_perf.metrics import timeline_based_metric
+from telemetry.web_perf.metrics import blob_timeline
+from telemetry.web_perf.metrics import jitter_timeline
+from telemetry.web_perf.metrics import webrtc_rendering_timeline
+from telemetry.web_perf.metrics import gpu_timeline
+from telemetry.web_perf.metrics import indexeddb_timeline
+from telemetry.web_perf.metrics import layout
+from telemetry.web_perf.metrics import memory_timeline
+from telemetry.web_perf.metrics import responsiveness_metric
+from telemetry.web_perf.metrics import smoothness
+from telemetry.web_perf.metrics import text_selection
+from telemetry.web_perf import smooth_gesture_util
+from telemetry.web_perf import story_test
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+# TimelineBasedMeasurement considers all instrumentation as producing a single
+# timeline. But, depending on the amount of instrumentation that is enabled,
+# overhead increases. The user of the measurement must therefore chose between
+# a few levels of instrumentation.
+NO_OVERHEAD_LEVEL = 'no-overhead'
+MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
+DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
+
+ALL_OVERHEAD_LEVELS = [
+  NO_OVERHEAD_LEVEL,
+  MINIMAL_OVERHEAD_LEVEL,
+  DEBUG_OVERHEAD_LEVEL
+]
+
+
+def _GetAllLegacyTimelineBasedMetrics():
+  # TODO(nednguyen): use discovery pattern to return all the instances of
+  # all TimelineBasedMetrics class in web_perf/metrics/ folder.
+  # This cannot be done until crbug.com/460208 is fixed.
+  return (smoothness.SmoothnessMetric(),
+          responsiveness_metric.ResponsivenessMetric(),
+          layout.LayoutMetric(),
+          gpu_timeline.GPUTimelineMetric(),
+          blob_timeline.BlobTimelineMetric(),
+          jitter_timeline.JitterTimelineMetric(),
+          memory_timeline.MemoryTimelineMetric(),
+          text_selection.TextSelectionMetric(),
+          indexeddb_timeline.IndexedDBTimelineMetric(),
+          webrtc_rendering_timeline.WebRtcRenderingTimelineMetric())
+
+
+class InvalidInteractions(Exception):
+  pass
+
+
+# TODO(nednguyen): Get rid of this results wrapper hack after we add interaction
+# record to telemetry value system (crbug.com/453109)
+class ResultsWrapperInterface(object):
+  def __init__(self):
+    self._tir_label = None
+    self._results = None
+
+  def SetResults(self, results):
+    self._results = results
+
+  def SetTirLabel(self, tir_label):
+    self._tir_label = tir_label
+
+  @property
+  def current_page(self):
+    return self._results.current_page
+
+  def AddValue(self, value):
+    raise NotImplementedError
+
+
+class _TBMResultWrapper(ResultsWrapperInterface):
+  def AddValue(self, value):
+    assert self._tir_label
+    if value.tir_label:
+      assert value.tir_label == self._tir_label
+    else:
+      logging.warning(
+          'TimelineBasedMetric should create the interaction record label '
+          'for %r values.' % value.name)
+      value.tir_label = self._tir_label
+    self._results.AddValue(value)
+
+
+def _GetRendererThreadsToInteractionRecordsMap(model):
+  threads_to_records_map = defaultdict(list)
+  interaction_labels_of_previous_threads = set()
+  for curr_thread in model.GetAllThreads():
+    for event in curr_thread.async_slices:
+      # TODO(nduca): Add support for page-load interaction record.
+      if tir_module.IsTimelineInteractionRecord(event.name):
+        interaction = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
+        # Adjust the interaction record to match the synthetic gesture
+        # controller if needed.
+        interaction = (
+            smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
+                model, interaction))
+        threads_to_records_map[curr_thread].append(interaction)
+        if interaction.label in interaction_labels_of_previous_threads:
+          raise InvalidInteractions(
+            'Interaction record label %s is duplicated on different '
+            'threads' % interaction.label)
+    if curr_thread in threads_to_records_map:
+      interaction_labels_of_previous_threads.update(
+        r.label for r in threads_to_records_map[curr_thread])
+
+  return threads_to_records_map
+
+
+class _TimelineBasedMetrics(object):
+  def __init__(self, model, renderer_thread, interaction_records,
+               results_wrapper, metrics):
+    self._model = model
+    self._renderer_thread = renderer_thread
+    self._interaction_records = interaction_records
+    self._results_wrapper = results_wrapper
+    self._all_metrics = metrics
+
+  def AddResults(self, results):
+    interactions_by_label = defaultdict(list)
+    for i in self._interaction_records:
+      interactions_by_label[i.label].append(i)
+
+    for label, interactions in interactions_by_label.iteritems():
+      are_repeatable = [i.repeatable for i in interactions]
+      if not all(are_repeatable) and len(interactions) > 1:
+        raise InvalidInteractions('Duplicate unrepeatable interaction records '
+                                  'on the page')
+      self._results_wrapper.SetResults(results)
+      self._results_wrapper.SetTirLabel(label)
+      self.UpdateResultsByMetric(interactions, self._results_wrapper)
+
+  def UpdateResultsByMetric(self, interactions, wrapped_results):
+    if not interactions:
+      return
+
+    for metric in self._all_metrics:
+      metric.AddResults(self._model, self._renderer_thread,
+                        interactions, wrapped_results)
+
+
+class Options(object):
+  """A class to be used to configure TimelineBasedMeasurement.
+
+  This is created and returned by
+  Benchmark.CreateTimelineBasedMeasurementOptions.
+
+  By default, all the timeline based metrics in telemetry/web_perf/metrics are
+  used (see _GetAllLegacyTimelineBasedMetrics above).
+  To customize your metric needs, use SetTimelineBasedMetric().
+  """
+
+  def __init__(self, overhead_level=NO_OVERHEAD_LEVEL):
+    """As the amount of instrumentation increases, so does the overhead.
+    The user of the measurement chooses the overhead level that is appropriate,
+    and the tracing is filtered accordingly.
+
+    overhead_level: Can either be a custom TracingCategoryFilter object or
+        one of NO_OVERHEAD_LEVEL, MINIMAL_OVERHEAD_LEVEL or
+        DEBUG_OVERHEAD_LEVEL.
+    """
+    self._config = tracing_config.TracingConfig()
+    self._config.enable_chrome_trace = True
+    self._config.enable_platform_display_trace = True
+
+    if isinstance(overhead_level,
+                  tracing_category_filter.TracingCategoryFilter):
+      self._config.SetTracingCategoryFilter(overhead_level)
+    elif overhead_level in ALL_OVERHEAD_LEVELS:
+      if overhead_level == NO_OVERHEAD_LEVEL:
+        self._config.SetNoOverheadFilter()
+      elif overhead_level == MINIMAL_OVERHEAD_LEVEL:
+        self._config.SetMinimalOverheadFilter()
+      else:
+        self._config.SetDebugOverheadFilter()
+    else:
+      raise Exception("Overhead level must be a TracingCategoryFilter object"
+                      " or valid overhead level string."
+                      " Given overhead level: %s" % overhead_level)
+
+    self._timeline_based_metric = None
+    self._legacy_timeline_based_metrics = _GetAllLegacyTimelineBasedMetrics()
+
+
+  def ExtendTraceCategoryFilter(self, filters):
+    for new_category_filter in filters:
+      self._config.tracing_category_filter.AddIncludedCategory(
+          new_category_filter)
+
+  @property
+  def category_filter(self):
+    return self._config.tracing_category_filter
+
+  @property
+  def config(self):
+    return self._config
+
+  def SetTimelineBasedMetric(self, metric):
+    """Sets the new-style (TBMv2) metric to run.
+
+    Metrics are assumed to live in //tracing/tracing/metrics, so the path
+    should be relative to that. For example, to specify sample_metric.html,
+    you would pass 'sample_metric.html'.
+
+    Args:
+      metric: A string metric path under //tracing/tracing/metrics.
+    """
+    assert isinstance(metric, basestring)
+    self._legacy_timeline_based_metrics = None
+    self._timeline_based_metric = metric
+
+  def GetTimelineBasedMetric(self):
+    return self._timeline_based_metric
+
+  def SetLegacyTimelineBasedMetrics(self, metrics):
+    assert self._timeline_based_metric == None
+    assert isinstance(metrics, collections.Iterable)
+    for m in metrics:
+      assert isinstance(m, timeline_based_metric.TimelineBasedMetric)
+    self._legacy_timeline_based_metrics = metrics
+
+  def GetLegacyTimelineBasedMetrics(self):
+    return self._legacy_timeline_based_metrics
+
+
+class TimelineBasedMeasurement(story_test.StoryTest):
+  """Collects multiple metrics based on their interaction records.
+
+  A timeline based measurement shifts the burden of what metrics to collect onto
+  the story under test. Instead of the measurement
+  having a fixed set of values it collects, the story being tested
+  issues (via javascript) an Interaction record into the user timing API that
+  describing what is happening at that time, as well as a standardized set
+  of flags describing the semantics of the work being done. The
+  TimelineBasedMeasurement object collects a trace that includes both these
+  interaction records, and a user-chosen amount of performance data using
+  Telemetry's various timeline-producing APIs, tracing especially.
+
+  It then passes the recorded timeline to different TimelineBasedMetrics based
+  on those flags. As an example, this allows a single story run to produce
+  load timing data, smoothness data, critical jank information and overall cpu
+  usage information.
+
+  For information on how to mark up a page to work with
+  TimelineBasedMeasurement, refer to the
+  perf.metrics.timeline_interaction_record module.
+
+  Args:
+      options: an instance of timeline_based_measurement.Options.
+      results_wrapper: A class that has the __init__ method takes in
+        the page_test_results object and the interaction record label. This
+        class follows the ResultsWrapperInterface. Note: this class is not
+        supported long term and to be removed when crbug.com/453109 is resolved.
+  """
+  def __init__(self, options, results_wrapper=None):
+    self._tbm_options = options
+    self._results_wrapper = results_wrapper or _TBMResultWrapper()
+
+  def WillRunStory(self, platform):
+    """Configure and start tracing."""
+    if not platform.tracing_controller.IsChromeTracingSupported():
+      raise Exception('Not supported')
+    platform.tracing_controller.StartTracing(self._tbm_options.config)
+
+  def Measure(self, platform, results):
+    """Collect all possible metrics and added them to results."""
+    trace_result = platform.tracing_controller.StopTracing()
+    trace_value = trace.TraceValue(results.current_page, trace_result)
+    results.AddValue(trace_value)
+
+    if self._tbm_options.GetTimelineBasedMetric():
+      self._ComputeTimelineBasedMetric(results, trace_value)
+    else:
+      assert self._tbm_options.GetLegacyTimelineBasedMetrics()
+      self._ComputeLegacyTimelineBasedMetrics(results, trace_result)
+
+
+  def DidRunStory(self, platform):
+    """Clean up after running the story."""
+    if platform.tracing_controller.is_tracing_running:
+      platform.tracing_controller.StopTracing()
+
+  def _ComputeTimelineBasedMetric(self, results, trace_value):
+    metric = self._tbm_options.GetTimelineBasedMetric()
+    extra_import_options = {
+      'trackDetailedModelStats': True
+    }
+
+    mre_result = metric_runner.RunMetric(
+        trace_value.filename, metric, extra_import_options)
+    page = results.current_page
+
+    failure_dicts = mre_result.failures
+    for d in failure_dicts:
+      results.AddValue(
+          translate_common_values.TranslateMreFailure(d, page))
+
+    value_dicts = mre_result.pairs.get('values', [])
+    for d in value_dicts:
+      results.AddValue(
+          translate_common_values.TranslateScalarValue(d, page))
+
+  def _ComputeLegacyTimelineBasedMetrics(self, results, trace_result):
+    model = model_module.TimelineModel(trace_result)
+    threads_to_records_map = _GetRendererThreadsToInteractionRecordsMap(model)
+    if (len(threads_to_records_map.values()) == 0 and
+        self._tbm_options.config.enable_chrome_trace):
+      logging.warning(
+          'No timeline interaction records were recorded in the trace. '
+          'This could be caused by console.time() & console.timeEnd() execution'
+          ' failure or the tracing category specified doesn\'t include '
+          'blink.console categories.')
+
+    all_metrics = self._tbm_options.GetLegacyTimelineBasedMetrics()
+
+    for renderer_thread, interaction_records in (
+        threads_to_records_map.iteritems()):
+      meta_metrics = _TimelineBasedMetrics(
+          model, renderer_thread, interaction_records, self._results_wrapper,
+          all_metrics)
+      meta_metrics.AddResults(results)
+
+    for metric in all_metrics:
+      metric.AddWholeTraceResults(model, results)
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py b/catapult/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py
new file mode 100644
index 0000000..1523bfb
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py
@@ -0,0 +1,212 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import unittest
+
+from telemetry import story
+from telemetry.internal.results import page_test_results
+from telemetry.page import page as page_module
+from telemetry.timeline import async_slice
+from telemetry.timeline import model as model_module
+from telemetry.value import improvement_direction
+from telemetry.value import scalar
+from telemetry.web_perf.metrics import timeline_based_metric
+from telemetry.web_perf import timeline_based_measurement as tbm_module
+
+
+class FakeSmoothMetric(timeline_based_metric.TimelineBasedMetric):
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'FakeSmoothMetric', 'ms', 1,
+        improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'SmoothMetricRecords', 'count',
+        len(interaction_records),
+        improvement_direction=improvement_direction.DOWN))
+
+
+class FakeLoadingMetric(timeline_based_metric.TimelineBasedMetric):
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'FakeLoadingMetric', 'ms', 2,
+        improvement_direction=improvement_direction.DOWN))
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'LoadingMetricRecords', 'count',
+        len(interaction_records),
+        improvement_direction=improvement_direction.DOWN))
+
+
+class FakeStartupMetric(timeline_based_metric.TimelineBasedMetric):
+
+  def AddResults(self, model, renderer_thread, interaction_records, results):
+    pass
+
+  def AddWholeTraceResults(self, model, results):
+    results.AddValue(scalar.ScalarValue(
+        results.current_page, 'FakeStartupMetric', 'ms', 3,
+        improvement_direction=improvement_direction.DOWN))
+
+
+class TimelineBasedMetricTestData(object):
+
+  def __init__(self, options):
+    self._model = model_module.TimelineModel()
+    renderer_process = self._model.GetOrCreateProcess(1)
+    self._renderer_thread = renderer_process.GetOrCreateThread(2)
+    self._renderer_thread.name = 'CrRendererMain'
+    self._foo_thread = renderer_process.GetOrCreateThread(3)
+    self._foo_thread.name = 'CrFoo'
+
+    self._results_wrapper = tbm_module._TBMResultWrapper()
+    self._results = page_test_results.PageTestResults()
+    self._story_set = None
+    self._threads_to_records_map = None
+    self._tbm_options = options
+
+  @property
+  def model(self):
+    return self._model
+
+  @property
+  def renderer_thread(self):
+    return self._renderer_thread
+
+  @property
+  def foo_thread(self):
+    return self._foo_thread
+
+  @property
+  def threads_to_records_map(self):
+    return self._threads_to_records_map
+
+  @property
+  def results(self):
+    return self._results
+
+  def AddInteraction(self, thread, marker='', ts=0, duration=5):
+    assert thread in (self._renderer_thread, self._foo_thread)
+    thread.async_slices.append(async_slice.AsyncSlice(
+        'category', marker, timestamp=ts, duration=duration,
+        start_thread=self._renderer_thread, end_thread=self._renderer_thread,
+        thread_start=ts, thread_duration=duration))
+
+  def FinalizeImport(self):
+    self._model.FinalizeImport()
+    self._threads_to_records_map = (
+      tbm_module._GetRendererThreadsToInteractionRecordsMap(self._model))
+    self._story_set = story.StorySet(base_dir=os.path.dirname(__file__))
+    self._story_set.AddStory(page_module.Page(
+        'http://www.bar.com/', self._story_set, self._story_set.base_dir))
+    self._results.WillRunPage(self._story_set.stories[0])
+
+  def AddResults(self):
+    all_metrics = self._tbm_options.GetLegacyTimelineBasedMetrics()
+
+    for thread, records in self._threads_to_records_map.iteritems():
+      # pylint: disable=protected-access
+      metric = tbm_module._TimelineBasedMetrics(
+          self._model, thread, records, self._results_wrapper, all_metrics)
+      metric.AddResults(self._results)
+
+    for metric in all_metrics:
+      metric.AddWholeTraceResults(self._model, self._results)
+
+    self._results.DidRunPage(self._story_set.stories[0])
+
+
+class TimelineBasedMetricsTests(unittest.TestCase):
+
+  def setUp(self):
+    self.actual_get_all_tbm_metrics = (
+        tbm_module._GetAllLegacyTimelineBasedMetrics)
+    self._options = tbm_module.Options()
+    self._options.SetLegacyTimelineBasedMetrics(
+        (FakeSmoothMetric(), FakeLoadingMetric(), FakeStartupMetric()))
+
+  def tearDown(self):
+    tbm_module._GetAllLegacyTimelineBasedMetrics = (
+        self.actual_get_all_tbm_metrics)
+
+  def testGetRendererThreadsToInteractionRecordsMap(self):
+    d = TimelineBasedMetricTestData(self._options)
+    # Insert 2 interaction records to renderer_thread and 1 to foo_thread
+    d.AddInteraction(d.renderer_thread, ts=0, duration=20,
+                     marker='Interaction.LogicalName1')
+    d.AddInteraction(d.renderer_thread, ts=25, duration=5,
+                     marker='Interaction.LogicalName2')
+    d.AddInteraction(d.foo_thread, ts=50, duration=15,
+                     marker='Interaction.LogicalName3')
+    d.FinalizeImport()
+
+    self.assertEquals(2, len(d.threads_to_records_map))
+
+    # Assert the 2 interaction records of renderer_thread are in the map.
+    self.assertIn(d.renderer_thread, d.threads_to_records_map)
+    interactions = d.threads_to_records_map[d.renderer_thread]
+    self.assertEquals(2, len(interactions))
+    self.assertEquals(0, interactions[0].start)
+    self.assertEquals(20, interactions[0].end)
+
+    self.assertEquals(25, interactions[1].start)
+    self.assertEquals(30, interactions[1].end)
+
+    # Assert the 1 interaction records of foo_thread is in the map.
+    self.assertIn(d.foo_thread, d.threads_to_records_map)
+    interactions = d.threads_to_records_map[d.foo_thread]
+    self.assertEquals(1, len(interactions))
+    self.assertEquals(50, interactions[0].start)
+    self.assertEquals(65, interactions[0].end)
+
+  def testAddResults(self):
+    d = TimelineBasedMetricTestData(self._options)
+    d.AddInteraction(d.renderer_thread, ts=0, duration=20,
+                     marker='Interaction.LogicalName1')
+    d.AddInteraction(d.foo_thread, ts=25, duration=5,
+                     marker='Interaction.LogicalName2')
+    d.FinalizeImport()
+    d.AddResults()
+    self.assertEquals(1, len(d.results.FindAllPageSpecificValuesFromIRNamed(
+        'LogicalName1', 'FakeSmoothMetric')))
+    self.assertEquals(1, len(d.results.FindAllPageSpecificValuesFromIRNamed(
+        'LogicalName2', 'FakeLoadingMetric')))
+    self.assertEquals(1, len(d.results.FindAllPageSpecificValuesNamed(
+        'FakeStartupMetric')))
+
+  def testDuplicateInteractionsInDifferentThreads(self):
+    d = TimelineBasedMetricTestData(self._options)
+    d.AddInteraction(d.renderer_thread, ts=10, duration=5,
+                     marker='Interaction.LogicalName/repeatable')
+    d.AddInteraction(d.foo_thread, ts=20, duration=5,
+                     marker='Interaction.LogicalName')
+    self.assertRaises(tbm_module.InvalidInteractions, d.FinalizeImport)
+
+  def testDuplicateRepeatableInteractionsInDifferentThreads(self):
+    d = TimelineBasedMetricTestData(self._options)
+    d.AddInteraction(d.renderer_thread, ts=10, duration=5,
+                     marker='Interaction.LogicalName/repeatable')
+    d.AddInteraction(d.foo_thread, ts=20, duration=5,
+                     marker='Interaction.LogicalName/repeatable')
+    self.assertRaises(tbm_module.InvalidInteractions, d.FinalizeImport)
+
+  def testDuplicateUnrepeatableInteractionsInSameThread(self):
+    d = TimelineBasedMetricTestData(self._options)
+    d.AddInteraction(d.renderer_thread, ts=10, duration=5,
+                     marker='Interaction.LogicalName')
+    d.AddInteraction(d.renderer_thread, ts=20, duration=5,
+                     marker='Interaction.LogicalName')
+    d.FinalizeImport()
+    self.assertRaises(tbm_module.InvalidInteractions, d.AddResults)
+
+  def testDuplicateRepeatableInteractions(self):
+    d = TimelineBasedMetricTestData(self._options)
+    d.AddInteraction(d.renderer_thread, ts=10, duration=5,
+                     marker='Interaction.LogicalName/repeatable')
+    d.AddInteraction(d.renderer_thread, ts=20, duration=5,
+                     marker='Interaction.LogicalName/repeatable')
+    d.FinalizeImport()
+    d.AddResults()
+    self.assertEquals(1, len(d.results.pages_that_succeeded))
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_based_page_test.py b/catapult/telemetry/telemetry/web_perf/timeline_based_page_test.py
new file mode 100644
index 0000000..fdde37a
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_based_page_test.py
@@ -0,0 +1,28 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry.page import page_test
+
+class TimelineBasedPageTest(page_test.PageTest):
+  """Page test that collects metrics with TimelineBasedMeasurement.
+
+  WillRunStory(), Measure() and DidRunStory() are all done in story_runner
+  explicitly. We still need this wrapper around PageTest because it executes
+  some browser related functions in the parent class, which is needed by
+  Timeline Based Measurement benchmarks. This class will be removed after
+  page_test's hooks are fully removed.
+  """
+  def __init__(self, tbm):
+    super(TimelineBasedPageTest, self).__init__()
+    self._measurement = tbm
+
+  @property
+  def measurement(self):
+    return self._measurement
+
+  def ValidateAndMeasurePage(self, page, tab, results):
+    """Collect all possible metrics and added them to results."""
+    # Measurement is done explicitly in story_runner for timeline based page
+    # test.
+    pass
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_based_page_test_unittest.py b/catapult/telemetry/telemetry/web_perf/timeline_based_page_test_unittest.py
new file mode 100644
index 0000000..a7f71db
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_based_page_test_unittest.py
@@ -0,0 +1,157 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from telemetry import decorators
+from telemetry.page import page as page_module
+from telemetry.testing import browser_test_case
+from telemetry.testing import options_for_unittests
+from telemetry.testing import page_test_test_case
+from telemetry.timeline import tracing_category_filter
+from telemetry.util import wpr_modes
+from telemetry.web_perf import timeline_based_measurement as tbm_module
+
+class TestTimelinebasedMeasurementPage(page_module.Page):
+
+  def __init__(self, ps, base_dir, trigger_animation=False,
+               trigger_jank=False, trigger_slow=False,
+               trigger_scroll_gesture=False):
+    super(TestTimelinebasedMeasurementPage, self).__init__(
+        'file://interaction_enabled_page.html', ps, base_dir)
+    self._trigger_animation = trigger_animation
+    self._trigger_jank = trigger_jank
+    self._trigger_slow = trigger_slow
+    self._trigger_scroll_gesture = trigger_scroll_gesture
+
+  def RunPageInteractions(self, action_runner):
+    if self._trigger_animation:
+      action_runner.TapElement('#animating-button')
+      action_runner.WaitForJavaScriptCondition('window.animationDone')
+    if self._trigger_jank:
+      action_runner.TapElement('#jank-button')
+      action_runner.WaitForJavaScriptCondition('window.jankScriptDone')
+    if self._trigger_slow:
+      action_runner.TapElement('#slow-button')
+      action_runner.WaitForJavaScriptCondition('window.slowScriptDone')
+    if self._trigger_scroll_gesture:
+      with action_runner.CreateGestureInteraction('Scroll'):
+        action_runner.ScrollPage()
+
+
+class TimelineBasedPageTestTest(page_test_test_case.PageTestTestCase):
+
+  def setUp(self):
+    browser_test_case.teardown_browser()
+    self._options = options_for_unittests.GetCopy()
+    self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
+
+  # This test is flaky when run in parallel on the mac: crbug.com/426676
+  # Also, fails on android: crbug.com/437057, and chromeos: crbug.com/483212
+  @decorators.Disabled('android', 'mac', 'chromeos')
+  @decorators.Disabled('win')  # crbug.com/570955
+  @decorators.Isolated  # Needed because of py_trace_event
+  def testSmoothnessTimelineBasedMeasurementForSmoke(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(TestTimelinebasedMeasurementPage(
+        ps, ps.base_dir, trigger_animation=True))
+
+    tbm = tbm_module.TimelineBasedMeasurement(tbm_module.Options())
+    results = self.RunMeasurement(tbm, ps, options=self._options)
+
+    self.assertEquals(0, len(results.failures))
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'CenterAnimation', 'frame_time_discrepancy')
+    self.assertEquals(len(v), 1)
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'DrawerAnimation', 'frame_time_discrepancy')
+    self.assertEquals(len(v), 1)
+
+  # This test should eventually work on all platforms, but currently this
+  # this metric is flaky on desktop: crbug.com/453131
+  @decorators.Enabled('android')
+  def testGPUTimesTimelineBasedMeasurementForSmoke(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(TestTimelinebasedMeasurementPage(
+        ps, ps.base_dir, trigger_animation=True))
+
+    cat_filter = tracing_category_filter.TracingCategoryFilter(
+        'disabled-by-default-gpu.service')
+    tbm_option = tbm_module.Options(overhead_level=cat_filter)
+    tbm = tbm_module.TimelineBasedMeasurement(tbm_option)
+    results = self.RunMeasurement(tbm, ps, options=self._options)
+
+    self.assertEquals(0, len(results.failures))
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'CenterAnimation', 'browser_compositor_max_cpu_time')
+    self.assertEquals(len(v), 1)
+    self.assertGreater(v[0].value, 0)
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'DrawerAnimation', 'browser_compositor_max_cpu_time')
+    self.assertEquals(len(v), 1)
+    self.assertGreater(v[0].value, 0)
+
+  # Disabled since mainthread_jank metric is not supported on windows platform.
+  # Also, flaky on the mac when run in parallel: crbug.com/426676
+  # Also, fails on android: crbug.com/437057
+  # Also, fails on chromeos: crbug.com/483212
+  @decorators.Disabled('android', 'win', 'mac', 'chromeos')
+  @decorators.Isolated  # Needed because of py_trace_event
+  def testMainthreadJankTimelineBasedMeasurement(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(TestTimelinebasedMeasurementPage(
+        ps, ps.base_dir, trigger_jank=True))
+
+    tbm = tbm_module.TimelineBasedMeasurement(tbm_module.Options())
+    results = self.RunMeasurement(tbm, ps, options=self._options)
+    self.assertEquals(0, len(results.failures))
+
+    # In interaction_enabled_page.html, we create a jank loop based on
+    # window.performance.now() (basically loop for x milliseconds).
+    # Since window.performance.now() uses wall-time instead of thread time,
+    # we only assert the biggest jank > 50ms here to account for the fact
+    # that the browser may deschedule during the jank loop.
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'JankThreadJSRun', 'responsive-biggest_jank_thread_time')
+    self.assertGreaterEqual(v[0].value, 50)
+
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'JankThreadJSRun', 'responsive-total_big_jank_thread_time')
+    self.assertGreaterEqual(v[0].value, 50)
+
+  # win: crbug.com/520781, chromeos: crbug.com/483212.
+  @decorators.Disabled('win', 'chromeos')
+  @decorators.Isolated  # Needed because of py_trace_event
+  def testTimelineBasedMeasurementGestureAdjustmentSmoke(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(TestTimelinebasedMeasurementPage(
+        ps, ps.base_dir, trigger_scroll_gesture=True))
+
+    tbm = tbm_module.TimelineBasedMeasurement(tbm_module.Options())
+    results = self.RunMeasurement(tbm, ps, options=self._options)
+
+    self.assertEquals(0, len(results.failures))
+    v = results.FindAllPageSpecificValuesFromIRNamed(
+        'Gesture_Scroll', 'frame_time_discrepancy')
+    self.assertEquals(len(v), 1)
+
+  # Fails on chromeos: crbug.com/483212
+  @decorators.Disabled('chromeos')
+  def testTBM2ForSmoke(self):
+    ps = self.CreateEmptyPageSet()
+    ps.AddStory(TestTimelinebasedMeasurementPage(ps, ps.base_dir))
+
+    options = tbm_module.Options()
+    options.SetTimelineBasedMetric('sampleMetric')
+
+    tbm = tbm_module.TimelineBasedMeasurement(options)
+    results = self.RunMeasurement(tbm, ps, self._options)
+
+    self.assertEquals(0, len(results.failures))
+    v_foo = results.FindAllPageSpecificValuesNamed('foo')
+    v_bar = results.FindAllPageSpecificValuesNamed('bar')
+    self.assertEquals(len(v_foo), 1)
+    self.assertEquals(len(v_bar), 1)
+    self.assertEquals(v_foo[0].value, 1)
+    self.assertIsNotNone(v_foo[0].page)
+    self.assertEquals(v_bar[0].value, 2)
+    self.assertIsNotNone(v_bar[0].page)
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_interaction_record.py b/catapult/telemetry/telemetry/web_perf/timeline_interaction_record.py
new file mode 100644
index 0000000..7d6ce26
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_interaction_record.py
@@ -0,0 +1,235 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from telemetry import decorators
+import telemetry.timeline.bounds as timeline_bounds
+
+# Allows multiple duplicate interactions of the same type
+REPEATABLE = 'repeatable'
+
+FLAGS = [REPEATABLE]
+
+
+class ThreadTimeRangeOverlappedException(Exception):
+  """Exception that can be thrown when computing overlapped thread time range
+  with other events.
+  """
+
+class NoThreadTimeDataException(ThreadTimeRangeOverlappedException):
+  """Exception that can be thrown if there is not sufficient thread time data
+  to compute the overlapped thread time range."""
+
+def IsTimelineInteractionRecord(event_name):
+  return event_name.startswith('Interaction.')
+
+def _AssertFlagsAreValid(flags):
+  assert isinstance(flags, list)
+  for f in flags:
+    if f not in FLAGS:
+      raise AssertionError(
+          'Unrecognized flag for a timeline interaction record: %s' % f)
+
+def GetJavaScriptMarker(label, flags):
+  """Computes the marker string of an interaction record.
+
+  This marker string can be used with JavaScript API console.time()
+  and console.timeEnd() to mark the beginning and end of the
+  interaction record..
+
+  Args:
+    label: The label used to identify the interaction record.
+    flags: the flags for the interaction record see FLAGS above.
+
+  Returns:
+    The interaction record marker string (e.g., Interaction.Label/flag1,flag2).
+
+  Raises:
+    AssertionError: If one or more of the flags is unrecognized.
+  """
+  _AssertFlagsAreValid(flags)
+  marker = 'Interaction.%s' % label
+  if flags:
+    marker += '/%s' % (','.join(flags))
+  return marker
+
+class TimelineInteractionRecord(object):
+  """Represents an interaction that took place during a timeline recording.
+
+  As a page runs, typically a number of different (simulated) user interactions
+  take place. For instance, a user might click a button in a mail app causing a
+  popup to animate in. Then they might press another button that sends data to a
+  server and simultaneously closes the popup without an animation. These are two
+  interactions.
+
+  From the point of view of the page, each interaction might have a different
+  label: ClickComposeButton and SendEmail, for instance. From the point
+  of view of the benchmarking harness, the labels aren't so interesting as what
+  the performance expectations are for that interaction: was it loading
+  resources from the network? was there an animation?
+
+  Determining these things is hard to do, simply by observing the state given to
+  a page from javascript. There are hints, for instance if network requests are
+  sent, or if a CSS animation is pending. But this is by no means a complete
+  story.
+
+  Instead, we expect pages to mark up the timeline what they are doing, with
+  label and flags indicating the semantics of that interaction. This
+  is currently done by pushing markers into the console.time/timeEnd API: this
+  for instance can be issued in JS:
+
+     var str = 'Interaction.SendEmail';
+     console.time(str);
+     setTimeout(function() {
+       console.timeEnd(str);
+     }, 1000);
+
+  When run with perf.measurements.timeline_based_measurement running, this will
+  then cause a TimelineInteractionRecord to be created for this range with
+  all metrics reported for the marked up 1000ms time-range.
+
+  The valid interaction flags are:
+     * repeatable: Allows other interactions to use the same label
+  """
+
+  def __init__(self, label, start, end, async_event=None, flags=None):
+    assert label
+    self._label = label
+    self._start = start
+    self._end = end
+    self._async_event = async_event
+    self._flags = flags if flags is not None else []
+    _AssertFlagsAreValid(self._flags)
+
+  @property
+  def label(self):
+    return self._label
+
+  @property
+  def start(self):
+    return self._start
+
+  @property
+  def end(self):
+    return self._end
+
+  @property
+  def repeatable(self):
+    return REPEATABLE in self._flags
+
+  # TODO(nednguyen): After crbug.com/367175 is marked fixed, we should be able
+  # to get rid of perf.measurements.smooth_gesture_util and make this the only
+  # constructor method for TimelineInteractionRecord.
+  @classmethod
+  def FromAsyncEvent(cls, async_event):
+    """Construct an timeline_interaction_record from an async event.
+    Args:
+      async_event: An instance of
+        telemetry.timeline.async_slices.AsyncSlice
+    """
+    assert async_event.start_thread == async_event.end_thread, (
+        'Start thread of this record\'s async event is not the same as its '
+        'end thread')
+    m = re.match(r'Interaction\.(?P<label>.+?)(/(?P<flags>[^/]+))?$',
+                 async_event.name)
+    assert m, "Async event is not an interaction record."
+    label = m.group('label')
+    flags = m.group('flags').split(',') if m.group('flags') is not None else []
+    return cls(label, async_event.start, async_event.end, async_event, flags)
+
+  @decorators.Cache
+  def GetBounds(self):
+    bounds = timeline_bounds.Bounds()
+    bounds.AddValue(self.start)
+    bounds.AddValue(self.end)
+    return bounds
+
+  def GetOverlappedThreadTimeForSlice(self, timeline_slice):
+    """Get the thread duration of timeline_slice that overlaps with this record.
+
+    There are two cases :
+
+    Case 1: timeline_slice runs in the same thread as the record.
+
+                  |    [       timeline_slice         ]
+      THREAD 1    |                  |                              |
+                  |            record starts                    record ends
+
+                      (relative order in thread time)
+
+      As the thread timestamps in timeline_slice and record are consistent, we
+      simply use them to compute the overlap.
+
+    Case 2: timeline_slice runs in a different thread from the record's.
+
+                  |
+      THREAD 2    |    [       timeline_slice         ]
+                  |
+
+                  |
+      THREAD 1    |               |                               |
+                  |          record starts                      record ends
+
+                      (relative order in wall-time)
+
+      Unlike case 1, thread timestamps of a thread are measured by its
+      thread-specific clock, which is inconsistent with that of the other
+      thread, and thus can't be used to compute the overlapped thread duration.
+      Hence, we use a heuristic to compute the overlap (see
+      _GetOverlappedThreadTimeForSliceInDifferentThread for more details)
+
+    Args:
+      timeline_slice: An instance of telemetry.timeline.slice.Slice
+    """
+    if not self._async_event:
+      raise ThreadTimeRangeOverlappedException(
+          'This record was not constructed from async event')
+    if not self._async_event.has_thread_timestamps:
+      raise NoThreadTimeDataException(
+          'This record\'s async_event does not contain thread time data. '
+          'Event data: %s' % repr(self._async_event))
+    if not timeline_slice.has_thread_timestamps:
+      raise NoThreadTimeDataException(
+          'slice does not contain thread time data')
+
+    if timeline_slice.parent_thread == self._async_event.start_thread:
+      return self._GetOverlappedThreadTimeForSliceInSameThread(
+          timeline_slice)
+    else:
+      return self._GetOverlappedThreadTimeForSliceInDifferentThread(
+          timeline_slice)
+
+  def _GetOverlappedThreadTimeForSliceInSameThread(self, timeline_slice):
+    return timeline_bounds.Bounds.GetOverlap(
+        timeline_slice.thread_start, timeline_slice.thread_end,
+        self._async_event.thread_start, self._async_event.thread_end)
+
+  def _GetOverlappedThreadTimeForSliceInDifferentThread(self, timeline_slice):
+    # In case timeline_slice's parent thread is not the parent thread of the
+    # async slice that issues this record, we assume that events are descheduled
+    # uniformly. The overlap duration in thread time is then computed by
+    # multiplying the overlap wall-time duration of timeline_slice and the
+    # record's async slice with their thread_duration/duration ratios.
+    overlapped_walltime_duration = timeline_bounds.Bounds.GetOverlap(
+        timeline_slice.start, timeline_slice.end,
+        self.start, self.end)
+    if timeline_slice.duration == 0 or self._async_event.duration == 0:
+      return 0
+    timeline_slice_scheduled_ratio = (
+        timeline_slice.thread_duration / float(timeline_slice.duration))
+    record_scheduled_ratio = (
+        self._async_event.thread_duration / float(self._async_event.duration))
+    return (overlapped_walltime_duration * timeline_slice_scheduled_ratio *
+            record_scheduled_ratio)
+
+  def __repr__(self):
+    flags_str = ','.join(self._flags)
+    return ('TimelineInteractionRecord(label=\'%s\', start=%f, end=%f,' +
+            ' flags=%s, async_event=%s)') % (
+                self.label,
+                self.start,
+                self.end,
+                flags_str,
+                repr(self._async_event))
diff --git a/catapult/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py b/catapult/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py
new file mode 100644
index 0000000..870b66b
--- /dev/null
+++ b/catapult/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py
@@ -0,0 +1,152 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from telemetry.timeline import async_slice
+from telemetry.timeline import model as model_module
+from telemetry.timeline import slice as slice_module
+from telemetry.web_perf import timeline_interaction_record as tir_module
+
+
+class ParseTests(unittest.TestCase):
+
+  def testParse(self):
+    self.assertTrue(tir_module.IsTimelineInteractionRecord(
+        'Interaction.Foo'))
+    self.assertTrue(tir_module.IsTimelineInteractionRecord(
+        'Interaction.Foo/Bar'))
+    self.assertFalse(tir_module.IsTimelineInteractionRecord(
+        'SomethingRandom'))
+
+
+class TimelineInteractionRecordTests(unittest.TestCase):
+
+  def CreateSimpleRecordWithName(self, event_name):
+    s = async_slice.AsyncSlice(
+        'cat', event_name,
+        timestamp=0, duration=200, thread_start=20, thread_duration=100)
+    return tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
+
+  def CreateTestSliceFromTimeRanges(
+      self, parent_thread, time_start, time_end, thread_start, thread_end):
+    duration = time_end - time_start
+    thread_duration = thread_end - thread_start
+    return slice_module.Slice(parent_thread, 'Test', 'foo', time_start,
+                              duration, thread_start, thread_duration)
+
+  def testCreate(self):
+    r = self.CreateSimpleRecordWithName('Interaction.LogicalName')
+    self.assertEquals('LogicalName', r.label)
+    self.assertEquals(False, r.repeatable)
+
+    r = self.CreateSimpleRecordWithName('Interaction.LogicalName/repeatable')
+    self.assertEquals('LogicalName', r.label)
+    self.assertEquals(True, r.repeatable)
+
+    r = self.CreateSimpleRecordWithName(
+        'Interaction.LogicalNameWith/Slash/repeatable')
+    self.assertEquals('LogicalNameWith/Slash', r.label)
+    self.assertEquals(True, r.repeatable)
+
+  def testGetJavaScriptMarker(self):
+    repeatable_marker = tir_module.GetJavaScriptMarker(
+        'MyLabel', [tir_module.REPEATABLE])
+    self.assertEquals('Interaction.MyLabel/repeatable', repeatable_marker)
+
+  def testGetOverlappedThreadTimeForSliceInSameThread(self):
+    # Create a renderer thread.
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    model.FinalizeImport()
+
+   # Make a record that starts at 30ms and ends at 60ms in thread time.
+    s = async_slice.AsyncSlice(
+        'cat', 'Interaction.Test',
+        timestamp=0, duration=200, start_thread=renderer_main,
+        end_thread=renderer_main, thread_start=30, thread_duration=30)
+    record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
+
+    # Non overlapped range on the left of event.
+    s1 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 10, 20)
+    self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s1))
+
+    # Non overlapped range on the right of event.
+    s2 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 70, 90)
+    self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s2))
+
+    # Overlapped range on the left of event.
+    s3 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 20, 50)
+    self.assertEquals(20, record.GetOverlappedThreadTimeForSlice(s3))
+
+    # Overlapped range in the middle of event.
+    s4 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 40, 50)
+    self.assertEquals(10, record.GetOverlappedThreadTimeForSlice(s4))
+
+    # Overlapped range on the left of event.
+    s5 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 50, 90)
+    self.assertEquals(10, record.GetOverlappedThreadTimeForSlice(s5))
+
+  def testRepr(self):
+    # Create a renderer thread.
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    model.FinalizeImport()
+
+    s = async_slice.AsyncSlice(
+        'cat', 'Interaction.Test/repeatable',
+        timestamp=0, duration=200, start_thread=renderer_main,
+        end_thread=renderer_main, thread_start=30, thread_duration=30)
+    record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
+    expected_repr = (
+        'TimelineInteractionRecord(label=\'Test\', '
+        'start=0.000000, end=200.000000, flags=repeatable, '
+        'async_event=TimelineEvent(name=\'Interaction.Test/repeatable\','
+        ' start=0.000000, duration=200, thread_start=30, thread_duration=30))')
+    self.assertEquals(expected_repr, repr(record))
+
+
+  def testGetOverlappedThreadTimeForSliceInDifferentThread(self):
+    # Create a renderer thread and another thread.
+    model = model_module.TimelineModel()
+    renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
+    another_thread = model.GetOrCreateProcess(1).GetOrCreateThread(3)
+    model.FinalizeImport()
+
+   # Make a record that starts at 50ms and ends at 150ms in wall time, and is
+   # scheduled 75% of the time (hence thread_duration = 100ms*75% = 75ms).
+    s = async_slice.AsyncSlice(
+        'cat', 'Interaction.Test',
+        timestamp=50, duration=100, start_thread=renderer_main,
+        end_thread=renderer_main, thread_start=55, thread_duration=75)
+    record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
+
+    # Non overlapped range on the left of event.
+    s1 = self.CreateTestSliceFromTimeRanges(another_thread, 25, 40, 28, 30)
+    self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s1))
+
+    # Non overlapped range on the right of event.
+    s2 = self.CreateTestSliceFromTimeRanges(another_thread, 200, 300, 270, 290)
+    self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s2))
+
+    # Overlapped range on the left of event, and slice is scheduled 50% of the
+    # time.
+    # The overlapped wall-time duration is 50ms.
+    # The overlapped thread-time duration is 50ms * 75% * 50% = 18.75
+    s3 = self.CreateTestSliceFromTimeRanges(another_thread, 0, 100, 20, 70)
+    self.assertEquals(18.75, record.GetOverlappedThreadTimeForSlice(s3))
+
+    # Overlapped range in the middle of event, and slice is scheduled 20% of the
+    # time.
+    # The overlapped wall-time duration is 40ms.
+    # The overlapped thread-time duration is 40ms * 75% * 20% = 6
+    s4 = self.CreateTestSliceFromTimeRanges(another_thread, 100, 140, 120, 128)
+    self.assertEquals(6, record.GetOverlappedThreadTimeForSlice(s4))
+
+    # Overlapped range on the left of event, and slice is scheduled 100% of the
+    # time.
+    # The overlapped wall-time duration is 32ms.
+    # The overlapped thread-time duration is 32ms * 75% * 100% = 24
+    s5 = self.CreateTestSliceFromTimeRanges(another_thread, 118, 170, 118, 170)
+    self.assertEquals(24, record.GetOverlappedThreadTimeForSlice(s5))
diff --git a/catapult/telemetry/telemetry/wpr/__init__.py b/catapult/telemetry/telemetry/wpr/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/catapult/telemetry/telemetry/wpr/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/telemetry/telemetry/wpr/archive_info.py b/catapult/telemetry/telemetry/wpr/archive_info.py
new file mode 100644
index 0000000..2e75829
--- /dev/null
+++ b/catapult/telemetry/telemetry/wpr/archive_info.py
@@ -0,0 +1,219 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+import re
+import shutil
+import tempfile
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+
+def AssertValidCloudStorageBucket(bucket):
+  is_valid = bucket in (None,
+                        cloud_storage.PUBLIC_BUCKET,
+                        cloud_storage.PARTNER_BUCKET,
+                        cloud_storage.INTERNAL_BUCKET)
+  if not is_valid:
+    raise ValueError("Cloud storage privacy bucket %s is invalid" % bucket)
+
+
+class ArchiveError(Exception):
+  pass
+
+
+class WprArchiveInfo(object):
+  def __init__(self, file_path, data, bucket):
+    AssertValidCloudStorageBucket(bucket)
+    self._file_path = file_path
+    self._base_dir = os.path.dirname(file_path)
+    self._data = data
+    self._bucket = bucket
+
+    # Ensure directory exists.
+    if not os.path.exists(self._base_dir):
+      os.makedirs(self._base_dir)
+
+    # Map from the relative path (as it appears in the metadata file) of the
+    # .wpr file to a list of story names it supports.
+    self._wpr_file_to_story_names = data['archives']
+
+    # Map from the story name to a relative path (as it appears
+    # in the metadata file) of the .wpr file.
+    self._story_name_to_wpr_file = dict()
+    # Find out the wpr file names for each story.
+    for wpr_file in data['archives']:
+      story_names = data['archives'][wpr_file]
+      for story_name in story_names:
+        self._story_name_to_wpr_file[story_name] = wpr_file
+    self.temp_target_wpr_file_path = None
+
+  @classmethod
+  def FromFile(cls, file_path, bucket):
+    if os.path.exists(file_path):
+      with open(file_path, 'r') as f:
+        data = json.load(f)
+        return cls(file_path, data, bucket)
+    return cls(file_path, {'archives': {}}, bucket)
+
+  def DownloadArchivesIfNeeded(self):
+    """Downloads archives iff the Archive has a bucket parameter and the user
+    has permission to access the bucket.
+
+    Raises cloud storage Permissions or Credentials error when there is no
+    local copy of the archive and the user doesn't have permission to access
+    the archive's bucket.
+
+    Warns when a bucket is not specified or when the user doesn't have
+    permission to access the archive's bucket but a local copy of the archive
+    exists.
+    """
+    # Download all .wpr files.
+    if not self._bucket:
+      logging.warning('Story set in %s has no bucket specified, and '
+                      'cannot be downloaded from cloud_storage.', )
+      return
+    assert 'archives' in self._data, 'Invalid data format in %s. \'archives\'' \
+                                     ' field is needed' % self._file_path
+    for archive_path in self._data['archives']:
+      archive_path = self._WprFileNameToPath(archive_path)
+      try:
+        cloud_storage.GetIfChanged(archive_path, self._bucket)
+      except (cloud_storage.CredentialsError, cloud_storage.PermissionError):
+        if os.path.exists(archive_path):
+          # If the archive exists, assume the user recorded their own and
+          # simply warn.
+          logging.warning('Need credentials to update WPR archive: %s',
+                          archive_path)
+        else:
+          logging.error("You either aren't authenticated or don't have "
+                        "permission to use the archives for this page set."
+                        "\nYou may need to run gsutil config."
+                        "\nYou can find instructions for gsutil config at: "
+                        "http://www.chromium.org/developers/telemetry/"
+                        "upload_to_cloud_storage")
+          raise
+
+  def WprFilePathForStory(self, story):
+    if self.temp_target_wpr_file_path:
+      return self.temp_target_wpr_file_path
+    wpr_file = self._story_name_to_wpr_file.get(story.display_name, None)
+    if wpr_file is None and hasattr(story, 'url'):
+      # Some old pages always use the URL to identify a page rather than the
+      # display_name, so try to look for that.
+      wpr_file = self._story_name_to_wpr_file.get(story.url, None)
+    if wpr_file:
+      return self._WprFileNameToPath(wpr_file)
+    return None
+
+  def AddNewTemporaryRecording(self, temp_wpr_file_path=None):
+    if temp_wpr_file_path is None:
+      temp_wpr_file_handle, temp_wpr_file_path = tempfile.mkstemp()
+      os.close(temp_wpr_file_handle)
+    self.temp_target_wpr_file_path = temp_wpr_file_path
+
+  def AddRecordedStories(self, stories, upload_to_cloud_storage=False):
+    if not stories:
+      os.remove(self.temp_target_wpr_file_path)
+      return
+
+    (target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
+    for story in stories:
+      self._SetWprFileForStory(story.display_name, target_wpr_file)
+    shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
+
+    # Update the hash file.
+    target_wpr_file_hash = cloud_storage.CalculateHash(target_wpr_file_path)
+    with open(target_wpr_file_path + '.sha1', 'wb') as f:
+      f.write(target_wpr_file_hash)
+      f.flush()
+
+    self._WriteToFile()
+    self._DeleteAbandonedWprFiles()
+
+    # Upload to cloud storage
+    if upload_to_cloud_storage:
+      if not self._bucket:
+        logging.warning('StorySet must have bucket specified to upload '
+                        'stories to cloud storage.')
+        return
+      try:
+        cloud_storage.Insert(self._bucket, target_wpr_file_hash,
+                             target_wpr_file_path)
+      except cloud_storage.CloudStorageError, e:
+        logging.warning('Failed to upload wpr file %s to cloud storage. '
+                        'Error:%s' % target_wpr_file_path, e)
+
+  def _DeleteAbandonedWprFiles(self):
+    # Update the metadata so that the abandoned wpr files don't have
+    # empty story name arrays.
+    abandoned_wpr_files = self._AbandonedWprFiles()
+    for wpr_file in abandoned_wpr_files:
+      del self._wpr_file_to_story_names[wpr_file]
+      # Don't fail if we're unable to delete some of the files.
+      wpr_file_path = self._WprFileNameToPath(wpr_file)
+      try:
+        os.remove(wpr_file_path)
+      except Exception:
+        logging.warning('Failed to delete file: %s' % wpr_file_path)
+
+  def _AbandonedWprFiles(self):
+    abandoned_wpr_files = []
+    for wpr_file, story_names in (
+        self._wpr_file_to_story_names.iteritems()):
+      if not story_names:
+        abandoned_wpr_files.append(wpr_file)
+    return abandoned_wpr_files
+
+  def _WriteToFile(self):
+    """Writes the metadata into the file passed as constructor parameter."""
+    metadata = dict()
+    metadata['description'] = (
+        'Describes the Web Page Replay archives for a story set. '
+        'Don\'t edit by hand! Use record_wpr for updating.')
+    metadata['archives'] = self._wpr_file_to_story_names.copy()
+    # Don't write data for abandoned archives.
+    abandoned_wpr_files = self._AbandonedWprFiles()
+    for wpr_file in abandoned_wpr_files:
+      del metadata['archives'][wpr_file]
+
+    with open(self._file_path, 'w') as f:
+      json.dump(metadata, f, indent=4)
+      f.flush()
+
+  def _WprFileNameToPath(self, wpr_file):
+    return os.path.abspath(os.path.join(self._base_dir, wpr_file))
+
+  def _NextWprFileName(self):
+    """Creates a new file name for a wpr archive file."""
+    # The names are of the format "some_thing_number.wpr". Read the numbers.
+    highest_number = -1
+    base = None
+    for wpr_file in self._wpr_file_to_story_names:
+      match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
+      if not match:
+        raise Exception('Illegal wpr file name ' + wpr_file)
+      highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
+      if base and match.groupdict()['BASE'] != base:
+        raise Exception('Illegal wpr file name ' + wpr_file +
+                        ', doesn\'t begin with ' + base)
+      base = match.groupdict()['BASE']
+    if not base:
+      # If we're creating a completely new info file, use the base name of the
+      # story set file.
+      base = os.path.splitext(os.path.basename(self._file_path))[0]
+    new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
+    return new_filename, self._WprFileNameToPath(new_filename)
+
+  def _SetWprFileForStory(self, story_name, wpr_file):
+    """For modifying the metadata when we're going to record a new archive."""
+    old_wpr_file = self._story_name_to_wpr_file.get(story_name, None)
+    if old_wpr_file:
+      self._wpr_file_to_story_names[old_wpr_file].remove(story_name)
+    self._story_name_to_wpr_file[story_name] = wpr_file
+    if wpr_file not in self._wpr_file_to_story_names:
+      self._wpr_file_to_story_names[wpr_file] = []
+    self._wpr_file_to_story_names[wpr_file].append(story_name)
diff --git a/catapult/telemetry/telemetry/wpr/archive_info_unittest.py b/catapult/telemetry/telemetry/wpr/archive_info_unittest.py
new file mode 100644
index 0000000..b082aa9
--- /dev/null
+++ b/catapult/telemetry/telemetry/wpr/archive_info_unittest.py
@@ -0,0 +1,224 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import json
+import os
+import shutil
+import tempfile
+import unittest
+
+from catapult_base import cloud_storage  # pylint: disable=import-error
+
+from telemetry.page import page
+from telemetry.testing import system_stub
+from telemetry.wpr import archive_info
+
+
+class MockPage(page.Page):
+  def __init__(self, url, name=None):
+    super(MockPage, self).__init__(url, None, name=name)
+
+
+page1 = MockPage('http://www.foo.com/', 'Foo')
+page2 = MockPage('http://www.bar.com/', 'Bar')
+page3 = MockPage('http://www.baz.com/')
+recording1 = 'data_001.wpr'
+recording2 = 'data_002.wpr'
+archive_info_contents = ("""
+{
+"archives": {
+  "%s": ["%s", "%s"],
+  "%s": ["%s"]
+}
+}
+""" % (recording1, page1.display_name, page2.display_name, recording2,
+       page3.display_name))
+
+
+class WprArchiveInfoTest(unittest.TestCase):
+  def setUp(self):
+    self.tmp_dir = tempfile.mkdtemp()
+    # Write the metadata.
+    self.story_set_archive_info_file = os.path.join(
+        self.tmp_dir, 'info.json')
+    with open(self.story_set_archive_info_file, 'w') as f:
+      f.write(archive_info_contents)
+
+    # Write the existing .wpr files.
+    for i in [1, 2]:
+      with open(os.path.join(self.tmp_dir, ('data_00%d.wpr' % i)), 'w') as f:
+        f.write(archive_info_contents)
+
+    # Create the PageSetArchiveInfo object to be tested.
+    self.archive_info = archive_info.WprArchiveInfo.FromFile(
+        self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
+    # Use cloud_storage system stub.
+    self.overrides = system_stub.Override(archive_info, ['cloud_storage'])
+
+  def tearDown(self):
+    shutil.rmtree(self.tmp_dir)
+    self.overrides.Restore()
+
+  def assertCorrectHashFile(self, file_path):
+    old_ch = cloud_storage.CalculateHash
+    cloud_storage.CalculateHash = self.overrides.cloud_storage.CalculateHash
+    try:
+      self.assertTrue(os.path.exists(file_path + '.sha1'))
+      with open(file_path + '.sha1', 'rb') as f:
+        self.assertEquals(cloud_storage.CalculateHash(file_path), f.read())
+    finally:
+      cloud_storage.CalculateHash = old_ch
+
+  def testDownloadArchivesIfNeeded(self):
+    cloud_storage_stub = self.overrides.cloud_storage
+    # Second hash doesn't match, need to fetch it.
+    cloud_storage_stub.SetRemotePathsForTesting(
+        {cloud_storage.PUBLIC_BUCKET: {recording1: "dummyhash",
+                                       recording2: "dummyhash22"}})
+    cloud_storage_stub.SetCalculatedHashesForTesting(
+        {os.path.join(self.tmp_dir, recording1): "dummyhash",
+         os.path.join(self.tmp_dir, recording2): "dummyhash2",})
+    self.archive_info.DownloadArchivesIfNeeded()
+    self.assertEquals(len(cloud_storage_stub.downloaded_files), 1)
+    self.assertEquals(cloud_storage_stub.downloaded_files[0], recording2)
+
+  def testReadingArchiveInfo(self):
+    self.assertIsNotNone(self.archive_info.WprFilePathForStory(page1))
+    self.assertEquals(recording1, os.path.basename(
+        self.archive_info.WprFilePathForStory(page1)))
+
+    self.assertIsNotNone(self.archive_info.WprFilePathForStory(page2))
+    self.assertEquals(recording1, os.path.basename(
+        self.archive_info.WprFilePathForStory(page2)))
+
+    self.assertIsNotNone(self.archive_info.WprFilePathForStory(page3))
+    self.assertEquals(recording2, os.path.basename(
+        self.archive_info.WprFilePathForStory(page3)))
+
+  def testArchiveInfoFileGetsUpdated(self):
+    """Ensures that the archive info file is updated correctly."""
+
+    expected_archive_file_contents = {
+        u'description': (u'Describes the Web Page Replay archives for a'
+                         u' story set. Don\'t edit by hand! Use record_wpr for'
+                         u' updating.'),
+        u'archives': {
+            u'data_003.wpr': [u'Bar', u'http://www.baz.com/'],
+            u'data_001.wpr': [u'Foo']
+        }
+    }
+
+    new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
+    expected_archive_file_path = os.path.join(self.tmp_dir, 'data_003.wpr')
+    hash_dictionary = {expected_archive_file_path:'filehash'}
+    cloud_storage_stub = self.overrides.cloud_storage
+    cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)
+    with open(new_temp_recording, 'w') as f:
+      f.write('wpr data')
+    self.archive_info.AddNewTemporaryRecording(new_temp_recording)
+    self.archive_info.AddRecordedStories([page2, page3])
+
+    with open(self.story_set_archive_info_file, 'r') as f:
+      archive_file_contents = json.load(f)
+      self.assertEquals(expected_archive_file_contents, archive_file_contents)
+
+  def testModifications(self):
+    recording1_path = os.path.join(self.tmp_dir, recording1)
+    recording2_path = os.path.join(self.tmp_dir, recording2)
+
+    new_recording1 = os.path.join(self.tmp_dir, 'data_003.wpr')
+    new_recording2 = os.path.join(self.tmp_dir, 'data_004.wpr')
+    hash_dictionary = {new_recording1:'file_hash1',
+                       new_recording2:'file_hash2'}
+    cloud_storage_stub = self.overrides.cloud_storage
+    cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)
+
+    new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
+    with open(new_temp_recording, 'w') as f:
+      f.write('wpr data')
+
+    self.archive_info.AddNewTemporaryRecording(new_temp_recording)
+
+    self.assertEquals(new_temp_recording,
+                      self.archive_info.WprFilePathForStory(page1))
+    self.assertEquals(new_temp_recording,
+                      self.archive_info.WprFilePathForStory(page2))
+    self.assertEquals(new_temp_recording,
+                      self.archive_info.WprFilePathForStory(page3))
+
+    self.archive_info.AddRecordedStories([page2])
+
+    self.assertTrue(os.path.exists(new_recording1))
+    self.assertFalse(os.path.exists(new_temp_recording))
+
+    self.assertTrue(os.path.exists(recording1_path))
+    self.assertTrue(os.path.exists(recording2_path))
+    self.assertCorrectHashFile(new_recording1)
+
+    with open(new_temp_recording, 'w') as f:
+      f.write('wpr data')
+
+    self.archive_info.AddNewTemporaryRecording(new_temp_recording)
+    self.archive_info.AddRecordedStories([page3])
+
+    self.assertTrue(os.path.exists(new_recording2))
+    self.assertCorrectHashFile(new_recording2)
+    self.assertFalse(os.path.exists(new_temp_recording))
+
+    self.assertTrue(os.path.exists(recording1_path))
+    # recording2 is no longer needed, so it was deleted.
+    self.assertFalse(os.path.exists(recording2_path))
+
+  def testCreatingNewArchiveInfo(self):
+    # Write only the page set without the corresponding metadata file.
+    story_set_contents = ("""
+    {
+        archive_data_file": "new_archive_info.json",
+        "pages": [
+            {
+                "url": "%s",
+            }
+        ]
+    }""" % page1.url)
+
+    story_set_file = os.path.join(self.tmp_dir, 'new_story_set.json')
+    with open(story_set_file, 'w') as f:
+      f.write(story_set_contents)
+
+    self.story_set_archive_info_file = os.path.join(self.tmp_dir,
+                                                   'new_archive_info.json')
+
+    expected_archive_file_path = os.path.join(self.tmp_dir,
+                                              'new_archive_info_000.wpr')
+    hash_dictionary = {expected_archive_file_path:'filehash'}
+    self.overrides.cloud_storage.SetCalculatedHashesForTesting(hash_dictionary)
+
+    # Create the WprArchiveInfo object to be tested.
+    self.archive_info = archive_info.WprArchiveInfo.FromFile(
+        self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
+
+    # Add a recording for all the pages.
+    new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
+    with open(new_temp_recording, 'w') as f:
+      f.write('wpr data')
+
+    self.archive_info.AddNewTemporaryRecording(new_temp_recording)
+
+    self.assertEquals(new_temp_recording,
+                      self.archive_info.WprFilePathForStory(page1))
+
+    self.archive_info.AddRecordedStories([page1])
+
+    # Expected name for the recording (decided by WprArchiveInfo).
+    new_recording = os.path.join(self.tmp_dir, 'new_archive_info_000.wpr')
+
+    self.assertTrue(os.path.exists(new_recording))
+    self.assertFalse(os.path.exists(new_temp_recording))
+    self.assertCorrectHashFile(new_recording)
+
+    # Check that the archive info was written correctly.
+    self.assertTrue(os.path.exists(self.story_set_archive_info_file))
+    read_archive_info = archive_info.WprArchiveInfo.FromFile(
+        self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
+    self.assertEquals(new_recording,
+                      read_archive_info.WprFilePathForStory(page1))
diff --git a/catapult/telemetry/third_party/.gitignore b/catapult/telemetry/third_party/.gitignore
new file mode 100644
index 0000000..4127a2d
--- /dev/null
+++ b/catapult/telemetry/third_party/.gitignore
@@ -0,0 +1 @@
+gsutil
diff --git a/catapult/telemetry/third_party/WebKit/LICENSE b/catapult/telemetry/third_party/WebKit/LICENSE
new file mode 100644
index 0000000..70bcb8a
--- /dev/null
+++ b/catapult/telemetry/third_party/WebKit/LICENSE
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+//
+// The Chromium Authors can be found at
+// http://src.chromium.org/svn/trunk/src/AUTHORS
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js b/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js
new file mode 100644
index 0000000..19d7359
--- /dev/null
+++ b/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js
@@ -0,0 +1,4 @@
+
+(function($){$.extend({tablesorter:new
+function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",cssChildRow:"expand-child",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,sortLocaleCompare:true,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'/\.|\,/g',onRenderHeader:null,selectorHeaders:'thead th',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}if(table.tBodies.length==0)return;var rows=table.tBodies[0].rows;if(rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,rows,-1,i);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,rows,rowIndex,cellIndex){var l=parsers.length,node=false,nodeValue=false,keepLooking=true;while(nodeValue==''&&keepLooking){rowIndex++;if(rows[rowIndex]){node=getNodeFromRowAndCellIndex(rows,rowIndex,cellIndex);nodeValue=trimAndGetNodeText(table.config,node);if(table.config.debug){log('Checking if value was empty on row:'+rowIndex);}}else{keepLooking=false;}}for(var i=1;i<l;i++){if(parsers[i].is(nodeValue,table,node)){return parsers[i];}}return parsers[0];}function getNodeFromRowAndCellIndex(rows,rowIndex,cellIndex){return rows[rowIndex].cells[cellIndex];}function trimAndGetNodeText(config,node){return $.trim(getElementText(config,node));}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=$(table.tBodies[0].rows[i]),cols=[];if(c.hasClass(table.config.cssChildRow)){cache.row[cache.row.length-1]=cache.row[cache.row.length-1].add(c);continue;}cache.row.push(c);for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c[0].cells[j]),table,c[0].cells[j]));}cols.push(cache.normalized.length);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){var text="";if(!node)return"";if(!config.supportsTextContent)config.supportsTextContent=node.textContent||false;if(config.textExtraction=="simple"){if(config.supportsTextContent){text=node.textContent;}else{if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){text=node.childNodes[0].innerHTML;}else{text=node.innerHTML;}}}else{if(typeof(config.textExtraction)=="function"){text=config.textExtraction(node);}else{text=$(node).text();}}return text;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){var pos=n[i][checkCell];rows.push(r[pos]);if(!table.config.appender){var l=r[pos].length;for(var j=0;j<l;j++){tableBody[0].appendChild(r[pos][j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false;var header_index=computeTableHeaderCellIndexes(table);$tableHeaders=$(table.config.selectorHeaders,table).each(function(index){this.column=header_index[this.parentNode.rowIndex+"-"+this.cellIndex];this.order=formatSortingOrder(table.config.sortInitialOrder);this.count=this.order;if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(checkHeaderOptionsSortingLocked(table,index))this.order=this.lockedOrder=checkHeaderOptionsSortingLocked(table,index);if(!this.sortDisabled){var $th=$(this).addClass(table.config.cssHeader);if(table.config.onRenderHeader)table.config.onRenderHeader.apply($th);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function computeTableHeaderCellIndexes(t){var matrix=[];var lookup={};var thead=t.getElementsByTagName('THEAD')[0];var trs=thead.getElementsByTagName('TR');for(var i=0;i<trs.length;i++){var cells=trs[i].cells;for(var j=0;j<cells.length;j++){var c=cells[j];var rowIndex=c.parentNode.rowIndex;var cellId=rowIndex+"-"+c.cellIndex;var rowSpan=c.rowSpan||1;var colSpan=c.colSpan||1
+var firstAvailCol;if(typeof(matrix[rowIndex])=="undefined"){matrix[rowIndex]=[];}for(var k=0;k<matrix[rowIndex].length+1;k++){if(typeof(matrix[rowIndex][k])=="undefined"){firstAvailCol=k;break;}}lookup[cellId]=firstAvailCol;for(var k=rowIndex;k<rowIndex+rowSpan;k++){if(typeof(matrix[k])=="undefined"){matrix[k]=[];}var matrixrow=matrix[k];for(var l=firstAvailCol;l<firstAvailCol+colSpan;l++){matrixrow[l]="x";}}}}return lookup;}function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function checkHeaderOptionsSortingLocked(table,i){if((table.config.headers[i])&&(table.config.headers[i].lockedOrder))return table.config.headers[i].lockedOrder;return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){return(v.toLowerCase()=="desc")?1:0;}else{return(v==1)?1:0;}}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(table.config.parsers[c].type=="text")?((order==0)?makeSortFunction("text","asc",c):makeSortFunction("text","desc",c)):((order==0)?makeSortFunction("numeric","asc",c):makeSortFunction("numeric","desc",c));var e="e"+i;dynamicExp+="var "+e+" = "+s;dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";if(table.config.debug){benchmark("Evaling expression:"+dynamicExp,new Date());}eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function makeSortFunction(type,direction,index){var a="a["+index+"]",b="b["+index+"]";if(type=='text'&&direction=='asc'){return"("+a+" == "+b+" ? 0 : ("+a+" === null ? Number.POSITIVE_INFINITY : ("+b+" === null ? Number.NEGATIVE_INFINITY : ("+a+" < "+b+") ? -1 : 1 )));";}else if(type=='text'&&direction=='desc'){return"("+a+" == "+b+" ? 0 : ("+a+" === null ? Number.POSITIVE_INFINITY : ("+b+" === null ? Number.NEGATIVE_INFINITY : ("+b+" < "+a+") ? -1 : 1 )));";}else if(type=='numeric'&&direction=='asc'){return"("+a+" === null && "+b+" === null) ? 0 :("+a+" === null ? Number.POSITIVE_INFINITY : ("+b+" === null ? Number.NEGATIVE_INFINITY : "+a+" - "+b+"));";}else if(type=='numeric'&&direction=='desc'){return"("+a+" === null && "+b+" === null) ? 0 :("+a+" === null ? Number.POSITIVE_INFINITY : ("+b+" === null ? Number.NEGATIVE_INFINITY : "+b+" - "+a+"));";}};function makeSortText(i){return"((a["+i+"] < b["+i+"]) ? -1 : ((a["+i+"] > b["+i+"]) ? 1 : 0));";};function makeSortTextDesc(i){return"((b["+i+"] < a["+i+"]) ? -1 : ((b["+i+"] > a["+i+"]) ? 1 : 0));";};function makeSortNumeric(i){return"a["+i+"]-b["+i+"];";};function makeSortNumericDesc(i){return"b["+i+"]-a["+i+"];";};function sortText(a,b){if(table.config.sortLocaleCompare)return a.localeCompare(b);return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){if(table.config.sortLocaleCompare)return b.localeCompare(a);return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$.data(this,"tablesorter",config);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){$this.trigger("sortStart");var $cell=$(this);var i=this.column;this.order=this.count++%2;if(this.lockedOrder)this.order=this.lockedOrder;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){var me=this;setTimeout(function(){me.config.parsers=buildParserCache(me,$headers);cache=buildCache(me);},1);}).bind("updateCell",function(e,cell){var config=this.config;var pos=[(cell.parentNode.rowIndex-1),cell.cellIndex];cache.normalized[pos[0]][pos[1]]=config.parsers[pos[1]].format(getElementText(config,cell),cell);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){return/^[-+]?\d*$/.test($.trim(s.replace(/[,.']/g,'')));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLocaleLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[£$€]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}var $tr,row=-1,odd;$("tr:visible",table.tBodies[0]).each(function(i){$tr=$(this);if(!$tr.hasClass(table.config.cssChildRow))row++;odd=(row%2==0);$tr.removeClass(table.config.widgetZebra.css[odd?0:1]).addClass(table.config.widgetZebra.css[odd?1:0])});if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery);
diff --git a/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/statistics.js b/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/statistics.js
new file mode 100644
index 0000000..5a14b9c
--- /dev/null
+++ b/catapult/telemetry/third_party/WebKit/PerformanceTests/resources/statistics.js
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var Statistics = new (function () {
+
+    this.max = function (values) {
+        var maxVal = values[0];
+        for (var i = 1; i < values.length; i++) {
+            maxVal = Math.max(maxVal, values[i]);
+        }
+        return maxVal;
+    }
+
+    this.min = function (values) {
+        var minVal = values[0];
+        for (var i = 1; i < values.length; i++) {
+            minVal = Math.min(minVal, values[i]);
+        }
+        return minVal;
+    }
+
+    this.sum = function (values) {
+        return values.reduce(function (a, b) { return a + b; }, 0);
+    }
+
+    this.squareSum = function (values) {
+        return values.reduce(function (sum, value) { return sum + value * value;}, 0);
+    }
+
+    // With sum and sum of squares, we can compute the sample standard deviation in O(1).
+    // See https://rniwa.com/2012-11-10/sample-standard-deviation-in-terms-of-sum-and-square-sum-of-samples/
+    this.sampleStandardDeviation = function (numberOfSamples, sum, squareSum) {
+        if (numberOfSamples < 2)
+            return 0;
+        return Math.sqrt(squareSum / (numberOfSamples - 1)
+            - sum * sum / (numberOfSamples - 1) / numberOfSamples);
+    }
+
+    this.supportedConfidenceLevels = function () {
+        var supportedLevels = [];
+        for (var quantile in tDistributionInverseCDF)
+            supportedLevels.push((1 - (1 - quantile) * 2).toFixed(2));
+        return supportedLevels;
+    }
+
+    this.quantile = function (confidenceLevel, numberOfSamples, opt_degreesOfFreedom) {
+        var probability = (1 - (1 - confidenceLevel) / 2);
+        if (!(probability in tDistributionInverseCDF)) {
+            console.warn('We only support ' + this.supportedConfidenceLevels().map(
+                function (level) { return level * 100 + '%'; } ).join(', ') + ' confidence intervals.');
+            return NaN;
+        }
+        if (numberOfSamples < 2)
+            return Number.POSITIVE_INFINITY;
+
+        var cdfForProbability = tDistributionInverseCDF[probability];
+        var degreesOfFreedom = opt_degreesOfFreedom;
+        if (degreesOfFreedom === undefined)
+          degreesOfFreedom = numberOfSamples - 1;
+
+        // tDistributionQuantile(degreesOfFreedom, confidenceLevel) * sampleStandardDeviation / sqrt(numberOfSamples) * S/sqrt(numberOfSamples)
+        if (degreesOfFreedom <= 100)
+          return cdfForProbability[degreesOfFreedom - 1]; // The first entry is for the one degree of freedom.
+        else if (degreesOfFreedom <= 300)
+          return cdfForProbability[Math.round(degreesOfFreedom / 10) + 100 - 10 - 1];
+        else if (degreesOfFreedom <= 1300)
+          return cdfForProbability[Math.round(degreesOfFreedom / 100) + 120 - 3 - 1];
+        else
+          return cdfForProbability[cdfForProbability.length - 1];
+    }
+
+    // Computes the delta d s.t. (mean - d, mean + d) is the confidence interval with the specified confidence level in O(1).
+    this.confidenceIntervalDelta = function (confidenceLevel, numberOfSamples, sum, squareSum) {
+        var sampleStandardDeviation = this.sampleStandardDeviation(numberOfSamples, sum, squareSum);
+        return this.confidenceIntervalDeltaFromStd(confidenceLevel, numberOfSamples, sampleStandardDeviation);
+    }
+
+    this.confidenceIntervalDeltaFromStd = function (confidenceLevel, numberOfSamples, sampleStandardDeviation, opt_degreesOfFreedom) {
+        var quantile = this.quantile(confidenceLevel, numberOfSamples, opt_degreesOfFreedom);
+        return quantile * sampleStandardDeviation / Math.sqrt(numberOfSamples);
+    }
+
+
+    this.confidenceInterval = function (values, probability) {
+        var sum = this.sum(values);
+        var mean = sum / values.length;
+        var delta = this.confidenceIntervalDelta(probability || 0.95, values.length, sum, this.squareSum(values));
+        return [mean - delta, mean + delta];
+    }
+
+    // See http://en.wikipedia.org/wiki/Student's_t-distribution#Table_of_selected_values
+    // This table contains one sided (a.k.a. tail) values.
+    // Use TINV((1 - probability) * 2, df) in your favorite spreadsheet software to compute these.
+    // The spacing of the values with df greater than 100 maintains error less than 0.8%.
+    var tDistributionInverseCDF = {
+        0.9: [
+            // 1 - 100 step 1
+            3.077684, 1.885618, 1.637744, 1.533206, 1.475884, 1.439756, 1.414924, 1.396815, 1.383029, 1.372184,
+            1.363430, 1.356217, 1.350171, 1.345030, 1.340606, 1.336757, 1.333379, 1.330391, 1.327728, 1.325341,
+            1.323188, 1.321237, 1.319460, 1.317836, 1.316345, 1.314972, 1.313703, 1.312527, 1.311434, 1.310415,
+            1.309464, 1.308573, 1.307737, 1.306952, 1.306212, 1.305514, 1.304854, 1.304230, 1.303639, 1.303077,
+            1.302543, 1.302035, 1.301552, 1.301090, 1.300649, 1.300228, 1.299825, 1.299439, 1.299069, 1.298714,
+            1.298373, 1.298045, 1.297730, 1.297426, 1.297134, 1.296853, 1.296581, 1.296319, 1.296066, 1.295821,
+            1.295585, 1.295356, 1.295134, 1.294920, 1.294712, 1.294511, 1.294315, 1.294126, 1.293942, 1.293763,
+            1.293589, 1.293421, 1.293256, 1.293097, 1.292941, 1.292790, 1.292643, 1.292500, 1.292360, 1.292224,
+            1.292091, 1.291961, 1.291835, 1.291711, 1.291591, 1.291473, 1.291358, 1.291246, 1.291136, 1.291029,
+            1.290924, 1.290821, 1.290721, 1.290623, 1.290527, 1.290432, 1.290340, 1.290250, 1.290161, 1.290075,
+            // 110 - 300 step 10
+            1.289295, 1.288646, 1.288098, 1.287628, 1.287221, 1.286865, 1.286551, 1.286272, 1.286023, 1.285799,
+            1.285596, 1.285411, 1.285243, 1.285089, 1.284947, 1.284816, 1.284695, 1.284582, 1.284478, 1.284380,
+            // 400 - 1300 step 100
+            1.283672, 1.283247, 1.282964, 1.282762, 1.282611, 1.282493, 1.282399, 1.282322, 1.282257, 1.282203,
+            // Infinity
+            1.281548],
+        0.95: [
+            // 1 - 100 step 1
+            6.313752, 2.919986, 2.353363, 2.131847, 2.015048, 1.943180, 1.894579, 1.859548, 1.833113, 1.812461,
+            1.795885, 1.782288, 1.770933, 1.761310, 1.753050, 1.745884, 1.739607, 1.734064, 1.729133, 1.724718,
+            1.720743, 1.717144, 1.713872, 1.710882, 1.708141, 1.705618, 1.703288, 1.701131, 1.699127, 1.697261,
+            1.695519, 1.693889, 1.692360, 1.690924, 1.689572, 1.688298, 1.687094, 1.685954, 1.684875, 1.683851,
+            1.682878, 1.681952, 1.681071, 1.680230, 1.679427, 1.678660, 1.677927, 1.677224, 1.676551, 1.675905,
+            1.675285, 1.674689, 1.674116, 1.673565, 1.673034, 1.672522, 1.672029, 1.671553, 1.671093, 1.670649,
+            1.670219, 1.669804, 1.669402, 1.669013, 1.668636, 1.668271, 1.667916, 1.667572, 1.667239, 1.666914,
+            1.666600, 1.666294, 1.665996, 1.665707, 1.665425, 1.665151, 1.664885, 1.664625, 1.664371, 1.664125,
+            1.663884, 1.663649, 1.663420, 1.663197, 1.662978, 1.662765, 1.662557, 1.662354, 1.662155, 1.661961,
+            1.661771, 1.661585, 1.661404, 1.661226, 1.661052, 1.660881, 1.660715, 1.660551, 1.660391, 1.660234,
+            // 110 - 300 step 10
+            1.658824, 1.657651, 1.656659, 1.655811, 1.655076, 1.654433, 1.653866, 1.653363, 1.652913, 1.652508,
+            1.652142, 1.651809, 1.651506, 1.651227, 1.650971, 1.650735, 1.650517, 1.650314, 1.650125, 1.649949,
+            // 400 - 1300 step 100
+            1.648672, 1.647907, 1.647397, 1.647033, 1.646761, 1.646548, 1.646379, 1.646240, 1.646124, 1.646027,
+            // Infinity
+            1.644847],
+        0.975: [
+            // 1 - 100 step 1
+            12.706205, 4.302653, 3.182446, 2.776445, 2.570582, 2.446912, 2.364624, 2.306004, 2.262157, 2.228139,
+            2.200985, 2.178813, 2.160369, 2.144787, 2.131450, 2.119905, 2.109816, 2.100922, 2.093024, 2.085963,
+            2.079614, 2.073873, 2.068658, 2.063899, 2.059539, 2.055529, 2.051831, 2.048407, 2.045230, 2.042272,
+            2.039513, 2.036933, 2.034515, 2.032245, 2.030108, 2.028094, 2.026192, 2.024394, 2.022691, 2.021075,
+            2.019541, 2.018082, 2.016692, 2.015368, 2.014103, 2.012896, 2.011741, 2.010635, 2.009575, 2.008559,
+            2.007584, 2.006647, 2.005746, 2.004879, 2.004045, 2.003241, 2.002465, 2.001717, 2.000995, 2.000298,
+            1.999624, 1.998972, 1.998341, 1.997730, 1.997138, 1.996564, 1.996008, 1.995469, 1.994945, 1.994437,
+            1.993943, 1.993464, 1.992997, 1.992543, 1.992102, 1.991673, 1.991254, 1.990847, 1.990450, 1.990063,
+            1.989686, 1.989319, 1.988960, 1.988610, 1.988268, 1.987934, 1.987608, 1.987290, 1.986979, 1.986675,
+            1.986377, 1.986086, 1.985802, 1.985523, 1.985251, 1.984984, 1.984723, 1.984467, 1.984217, 1.983972,
+            // 110 - 300 step 10
+            1.981765, 1.979930, 1.978380, 1.977054, 1.975905, 1.974902, 1.974017, 1.973231, 1.972528, 1.971896,
+            1.971325, 1.970806, 1.970332, 1.969898, 1.969498, 1.969130, 1.968789, 1.968472, 1.968178, 1.967903,
+            // 400 - 1300 step 100
+            1.965912, 1.964720, 1.963926, 1.963359, 1.962934, 1.962603, 1.962339, 1.962123, 1.961943, 1.961790,
+            // Infinity
+            1.959964],
+        0.99: [
+            // 1 - 100 step 1
+            31.820516, 6.964557, 4.540703, 3.746947, 3.364930, 3.142668, 2.997952, 2.896459, 2.821438, 2.763769,
+            2.718079, 2.680998, 2.650309, 2.624494, 2.602480, 2.583487, 2.566934, 2.552380, 2.539483, 2.527977,
+            2.517648, 2.508325, 2.499867, 2.492159, 2.485107, 2.478630, 2.472660, 2.467140, 2.462021, 2.457262,
+            2.452824, 2.448678, 2.444794, 2.441150, 2.437723, 2.434494, 2.431447, 2.428568, 2.425841, 2.423257,
+            2.420803, 2.418470, 2.416250, 2.414134, 2.412116, 2.410188, 2.408345, 2.406581, 2.404892, 2.403272,
+            2.401718, 2.400225, 2.398790, 2.397410, 2.396081, 2.394801, 2.393568, 2.392377, 2.391229, 2.390119,
+            2.389047, 2.388011, 2.387008, 2.386037, 2.385097, 2.384186, 2.383302, 2.382446, 2.381615, 2.380807,
+            2.380024, 2.379262, 2.378522, 2.377802, 2.377102, 2.376420, 2.375757, 2.375111, 2.374482, 2.373868,
+            2.373270, 2.372687, 2.372119, 2.371564, 2.371022, 2.370493, 2.369977, 2.369472, 2.368979, 2.368497,
+            2.368026, 2.367566, 2.367115, 2.366674, 2.366243, 2.365821, 2.365407, 2.365002, 2.364606, 2.364217,
+            // 110 - 300 step 10
+            2.360726, 2.357825, 2.355375, 2.353278, 2.351465, 2.349880, 2.348483, 2.347243, 2.346134, 2.345137,
+            2.344236, 2.343417, 2.342670, 2.341985, 2.341356, 2.340775, 2.340238, 2.339739, 2.339275, 2.338842,
+            // 400 - 1300 step 100
+            2.335706, 2.333829, 2.332579, 2.331687, 2.331018, 2.330498, 2.330083, 2.329743, 2.329459, 2.329220,
+            // Infinity
+            2.326348],
+    };
+
+})();
+
+if (typeof module != 'undefined') {
+    for (var key in Statistics)
+        module.exports[key] = Statistics[key];
+}
diff --git a/catapult/telemetry/third_party/WebKit/README.chromium b/catapult/telemetry/third_party/WebKit/README.chromium
new file mode 100644
index 0000000..798990b
--- /dev/null
+++ b/catapult/telemetry/third_party/WebKit/README.chromium
@@ -0,0 +1,14 @@
+Name: Blink javascript libraries.
+Short Name: Blink
+URL: http://www.chromium.org/blink
+Version: N/A
+License: BSD license
+License File: NOT_SHIPPED
+Security Critical: no
+Description: Blink javascripts libraries are used for visualize performance
+metrics.
+Local Modifications: All the files not needed by telemetry are removed. Only
+keep jquery.tablesorter.min.js and statistics.js.
+statistics.js is modified to support computing confidenceIntervalDelta with
+custom standard deviation & degrees of freedom (see
+https://codereview.chromium.org/1309143006).
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/__init__.py
similarity index 100%
rename from catapult/perf_insights/third_party/__init__.py
rename to catapult/telemetry/third_party/__init__.py
diff --git a/catapult/telemetry/third_party/altgraph/MANIFEST.in b/catapult/telemetry/third_party/altgraph/MANIFEST.in
new file mode 100644
index 0000000..9a9b960
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/MANIFEST.in
@@ -0,0 +1,9 @@
+include ReadMe.txt
+include *.txt MANIFEST.in *.py
+graft doc
+graft doc/_static
+graft doc/_templates
+graft altgraph_tests
+global-exclude .DS_Store
+global-exclude *.pyc
+global-exclude *.so
diff --git a/catapult/telemetry/third_party/altgraph/PKG-INFO b/catapult/telemetry/third_party/altgraph/PKG-INFO
new file mode 100644
index 0000000..87b602f
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/PKG-INFO
@@ -0,0 +1,216 @@
+Metadata-Version: 1.1
+Name: altgraph
+Version: 0.12
+Summary: Python graph (network) package
+Home-page: http://packages.python.org/altgraph
+Author: Ronald Oussoren
+Author-email: ronaldoussoren@mac.com
+License: MIT
+Download-URL: http://pypi.python.org/pypi/altgraph
+Description: altgraph is a fork of graphlib: a graph (network) package for constructing
+        graphs, BFS and DFS traversals, topological sort, shortest paths, etc. with
+        graphviz output.
+        
+        altgraph includes some additional usage of Python 2.6+ features and
+        enhancements related to modulegraph and macholib.
+        
+        
+        Release history
+        ===============
+        
+        0.12
+        ----
+        
+        - Added ``ObjectGraph.edgeData`` to retrieve the edge data
+          from a specific edge.
+        
+        - Added ``AltGraph.update_edge_data`` and ``ObjectGraph.updateEdgeData``
+          to update the data associated with a graph edge.
+        
+        0.11
+        ----
+        
+        - Stabilize the order of elements in dot file exports,
+          patch from bitbucket user 'pombredanne'.
+        
+        - Tweak setup.py file to remove dependency on distribute (but
+          keep the dependency on setuptools)
+        
+        
+        0.10.2
+        ------
+        
+        - There where no classifiers in the package metadata due to a bug
+          in setup.py
+        
+        0.10.1
+        ------
+        
+        This is a bugfix release
+        
+        Bug fixes:
+        
+        - Issue #3: The source archive contains a README.txt
+          while the setup file refers to ReadMe.txt.
+        
+          This is caused by a misfeature in distutils, as a
+          workaround I've renamed ReadMe.txt to README.txt
+          in the source tree and setup file.
+        
+        
+        0.10
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Do not use "2to3" to support Python 3.
+        
+          As a side effect of this altgraph now supports
+          Python 2.6 and later, and no longer supports
+          earlier releases of Python.
+        
+        - The order of attributes in the Dot output
+          is now always alphabetical.
+        
+          With this change the output will be consistent
+          between runs and Python versions.
+        
+        0.9
+        ---
+        
+        This is a minor bugfix release
+        
+        Features:
+        
+        - Added ``altgraph.ObjectGraph.ObjectGraph.nodes``, a method
+          yielding all nodes in an object graph.
+        
+        Bugfixes:
+        
+        - The 0.8 release didn't work with py2app when using
+          python 3.x.
+        
+        
+        0.8
+        -----
+        
+        This is a minor feature release. The major new feature
+        is a extensive set of unittests, which explains almost
+        all other changes in this release.
+        
+        Bugfixes:
+        
+        - Installing failed with Python 2.5 due to using a distutils
+          class that isn't available in that version of Python
+          (issue #1 on the issue tracker)
+        
+        - ``altgraph.GraphStat.degree_dist`` now actually works
+        
+        - ``altgraph.Graph.add_edge(a, b, create_nodes=False)`` will
+          no longer create the edge when one of the nodes doesn't
+          exist.
+        
+        - ``altgraph.Graph.forw_topo_sort`` failed for some sparse graphs.
+        
+        - ``altgraph.Graph.back_topo_sort`` was completely broken in
+          previous releases.
+        
+        - ``altgraph.Graph.forw_bfs_subgraph`` now actually works.
+        
+        - ``altgraph.Graph.back_bfs_subgraph`` now actually works.
+        
+        - ``altgraph.Graph.iterdfs`` now returns the correct result
+          when the ``forward`` argument is ``False``.
+        
+        - ``altgraph.Graph.iterdata`` now returns the correct result
+          when the ``forward`` argument is ``False``.
+        
+        
+        Features:
+        
+        - The ``altgraph.Graph`` constructor now accepts an argument
+          that contains 2- and 3-tuples instead of requireing that
+          all items have the same size. The (optional) argument can now
+          also be any iterator.
+        
+        - ``altgraph.Graph.Graph.add_node`` has no effect when you
+          add a hidden node.
+        
+        - The private method ``altgraph.Graph._bfs`` is no longer
+          present.
+        
+        - The private method ``altgraph.Graph._dfs`` is no longer
+          present.
+        
+        - ``altgraph.ObjectGraph`` now has a ``__contains__`` methods,
+          which means you can use the ``in`` operator to check if a
+          node is part of a graph.
+        
+        - ``altgraph.GraphUtil.generate_random_graph`` will raise
+          ``GraphError`` instead of looping forever when it is
+          impossible to create the requested graph.
+        
+        - ``altgraph.Dot.edge_style`` raises ``GraphError`` when
+          one of the nodes is not present in the graph. The method
+          silently added the tail in the past, but without ensuring
+          a consistent graph state.
+        
+        - ``altgraph.Dot.save_img`` now works when the mode is
+          ``"neato"``.
+        
+        0.7.2
+        -----
+        
+        This is a minor bugfix release
+        
+        Bugfixes:
+        
+        - distutils didn't include the documentation subtree
+        
+        0.7.1
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+          and can be viewed at <http://packages.python.org/altgraph>.
+        
+        - The repository has moved to bitbucket
+        
+        - ``altgraph.GraphStat.avg_hops`` is no longer present, the function had no
+          implementation and no specified behaviour.
+        
+        - the module ``altgraph.compat`` is gone, which means altgraph will no
+          longer work with Python 2.3.
+        
+        
+        0.7.0
+        -----
+        
+        This is a minor feature release.
+        
+        Features:
+        
+        - Support for Python 3
+        
+        - It is now possible to run tests using 'python setup.py test'
+        
+          (The actual testsuite is still very minimal though)
+        
+Keywords: graph
+Platform: any
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Visualization
diff --git a/catapult/telemetry/third_party/altgraph/README.chromium b/catapult/telemetry/third_party/altgraph/README.chromium
new file mode 100644
index 0000000..f9f0cae
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/README.chromium
@@ -0,0 +1,12 @@
+Name: altgraph
+Short Name: altgraph
+URL: https://pypi.python.org/pypi/altgraph/
+Version: 0.12
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+Description: altgraph is a fork of graphlib: a graph (network) package for
+constructing graphs, BFS and DFS traversals, topological sort, shortest paths,
+etc. with graphviz output. altgraph is used by
+telemetry/third_party/modulegraph.
+Local modification: remove doc/_build directory.
diff --git a/catapult/telemetry/third_party/altgraph/README.txt b/catapult/telemetry/third_party/altgraph/README.txt
new file mode 100644
index 0000000..904a14b
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/README.txt
@@ -0,0 +1,6 @@
+altgraph is a fork of graphlib: a graph (network) package for constructing
+graphs, BFS and DFS traversals, topological sort, shortest paths, etc. with
+graphviz output.
+
+altgraph includes some additional usage of Python 2.6+ features and
+enhancements related to modulegraph and macholib.
diff --git a/catapult/telemetry/third_party/altgraph/altgraph.egg-info/PKG-INFO b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/PKG-INFO
new file mode 100644
index 0000000..87b602f
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/PKG-INFO
@@ -0,0 +1,216 @@
+Metadata-Version: 1.1
+Name: altgraph
+Version: 0.12
+Summary: Python graph (network) package
+Home-page: http://packages.python.org/altgraph
+Author: Ronald Oussoren
+Author-email: ronaldoussoren@mac.com
+License: MIT
+Download-URL: http://pypi.python.org/pypi/altgraph
+Description: altgraph is a fork of graphlib: a graph (network) package for constructing
+        graphs, BFS and DFS traversals, topological sort, shortest paths, etc. with
+        graphviz output.
+        
+        altgraph includes some additional usage of Python 2.6+ features and
+        enhancements related to modulegraph and macholib.
+        
+        
+        Release history
+        ===============
+        
+        0.12
+        ----
+        
+        - Added ``ObjectGraph.edgeData`` to retrieve the edge data
+          from a specific edge.
+        
+        - Added ``AltGraph.update_edge_data`` and ``ObjectGraph.updateEdgeData``
+          to update the data associated with a graph edge.
+        
+        0.11
+        ----
+        
+        - Stabilize the order of elements in dot file exports,
+          patch from bitbucket user 'pombredanne'.
+        
+        - Tweak setup.py file to remove dependency on distribute (but
+          keep the dependency on setuptools)
+        
+        
+        0.10.2
+        ------
+        
+        - There where no classifiers in the package metadata due to a bug
+          in setup.py
+        
+        0.10.1
+        ------
+        
+        This is a bugfix release
+        
+        Bug fixes:
+        
+        - Issue #3: The source archive contains a README.txt
+          while the setup file refers to ReadMe.txt.
+        
+          This is caused by a misfeature in distutils, as a
+          workaround I've renamed ReadMe.txt to README.txt
+          in the source tree and setup file.
+        
+        
+        0.10
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Do not use "2to3" to support Python 3.
+        
+          As a side effect of this altgraph now supports
+          Python 2.6 and later, and no longer supports
+          earlier releases of Python.
+        
+        - The order of attributes in the Dot output
+          is now always alphabetical.
+        
+          With this change the output will be consistent
+          between runs and Python versions.
+        
+        0.9
+        ---
+        
+        This is a minor bugfix release
+        
+        Features:
+        
+        - Added ``altgraph.ObjectGraph.ObjectGraph.nodes``, a method
+          yielding all nodes in an object graph.
+        
+        Bugfixes:
+        
+        - The 0.8 release didn't work with py2app when using
+          python 3.x.
+        
+        
+        0.8
+        -----
+        
+        This is a minor feature release. The major new feature
+        is a extensive set of unittests, which explains almost
+        all other changes in this release.
+        
+        Bugfixes:
+        
+        - Installing failed with Python 2.5 due to using a distutils
+          class that isn't available in that version of Python
+          (issue #1 on the issue tracker)
+        
+        - ``altgraph.GraphStat.degree_dist`` now actually works
+        
+        - ``altgraph.Graph.add_edge(a, b, create_nodes=False)`` will
+          no longer create the edge when one of the nodes doesn't
+          exist.
+        
+        - ``altgraph.Graph.forw_topo_sort`` failed for some sparse graphs.
+        
+        - ``altgraph.Graph.back_topo_sort`` was completely broken in
+          previous releases.
+        
+        - ``altgraph.Graph.forw_bfs_subgraph`` now actually works.
+        
+        - ``altgraph.Graph.back_bfs_subgraph`` now actually works.
+        
+        - ``altgraph.Graph.iterdfs`` now returns the correct result
+          when the ``forward`` argument is ``False``.
+        
+        - ``altgraph.Graph.iterdata`` now returns the correct result
+          when the ``forward`` argument is ``False``.
+        
+        
+        Features:
+        
+        - The ``altgraph.Graph`` constructor now accepts an argument
+          that contains 2- and 3-tuples instead of requireing that
+          all items have the same size. The (optional) argument can now
+          also be any iterator.
+        
+        - ``altgraph.Graph.Graph.add_node`` has no effect when you
+          add a hidden node.
+        
+        - The private method ``altgraph.Graph._bfs`` is no longer
+          present.
+        
+        - The private method ``altgraph.Graph._dfs`` is no longer
+          present.
+        
+        - ``altgraph.ObjectGraph`` now has a ``__contains__`` methods,
+          which means you can use the ``in`` operator to check if a
+          node is part of a graph.
+        
+        - ``altgraph.GraphUtil.generate_random_graph`` will raise
+          ``GraphError`` instead of looping forever when it is
+          impossible to create the requested graph.
+        
+        - ``altgraph.Dot.edge_style`` raises ``GraphError`` when
+          one of the nodes is not present in the graph. The method
+          silently added the tail in the past, but without ensuring
+          a consistent graph state.
+        
+        - ``altgraph.Dot.save_img`` now works when the mode is
+          ``"neato"``.
+        
+        0.7.2
+        -----
+        
+        This is a minor bugfix release
+        
+        Bugfixes:
+        
+        - distutils didn't include the documentation subtree
+        
+        0.7.1
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+          and can be viewed at <http://packages.python.org/altgraph>.
+        
+        - The repository has moved to bitbucket
+        
+        - ``altgraph.GraphStat.avg_hops`` is no longer present, the function had no
+          implementation and no specified behaviour.
+        
+        - the module ``altgraph.compat`` is gone, which means altgraph will no
+          longer work with Python 2.3.
+        
+        
+        0.7.0
+        -----
+        
+        This is a minor feature release.
+        
+        Features:
+        
+        - Support for Python 3
+        
+        - It is now possible to run tests using 'python setup.py test'
+        
+          (The actual testsuite is still very minimal though)
+        
+Keywords: graph
+Platform: any
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Visualization
diff --git a/catapult/telemetry/third_party/altgraph/altgraph.egg-info/SOURCES.txt b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/SOURCES.txt
new file mode 100644
index 0000000..c345b03
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/SOURCES.txt
@@ -0,0 +1,92 @@
+MANIFEST.in
+README.txt
+ReadMe.txt
+setup.cfg
+setup.py
+altgraph/Dot.py
+altgraph/Graph.py
+altgraph/GraphAlgo.py
+altgraph/GraphStat.py
+altgraph/GraphUtil.py
+altgraph/ObjectGraph.py
+altgraph/__init__.py
+altgraph.egg-info/PKG-INFO
+altgraph.egg-info/SOURCES.txt
+altgraph.egg-info/dependency_links.txt
+altgraph.egg-info/top_level.txt
+altgraph.egg-info/zip-safe
+altgraph_tests/__init__.py
+altgraph_tests/test_altgraph.py
+altgraph_tests/test_dot.py
+altgraph_tests/test_graph.py
+altgraph_tests/test_graphstat.py
+altgraph_tests/test_graphutil.py
+altgraph_tests/test_object_graph.py
+doc/Makefile
+doc/changelog.rst
+doc/conf.py
+doc/core.rst
+doc/dot.rst
+doc/graph.rst
+doc/graphalgo.rst
+doc/graphstat.rst
+doc/graphutil.rst
+doc/index.rst
+doc/license.rst
+doc/objectgraph.rst
+doc/_build/doctrees/changelog.doctree
+doc/_build/doctrees/core.doctree
+doc/_build/doctrees/dot.doctree
+doc/_build/doctrees/environment.pickle
+doc/_build/doctrees/graph.doctree
+doc/_build/doctrees/graphalgo.doctree
+doc/_build/doctrees/graphstat.doctree
+doc/_build/doctrees/graphutil.doctree
+doc/_build/doctrees/index.doctree
+doc/_build/doctrees/license.doctree
+doc/_build/doctrees/objectgraph.doctree
+doc/_build/html/.buildinfo
+doc/_build/html/changelog.html
+doc/_build/html/core.html
+doc/_build/html/dot.html
+doc/_build/html/genindex.html
+doc/_build/html/graph.html
+doc/_build/html/graphalgo.html
+doc/_build/html/graphstat.html
+doc/_build/html/graphutil.html
+doc/_build/html/index.html
+doc/_build/html/license.html
+doc/_build/html/objectgraph.html
+doc/_build/html/objects.inv
+doc/_build/html/py-modindex.html
+doc/_build/html/search.html
+doc/_build/html/searchindex.js
+doc/_build/html/_sources/changelog.txt
+doc/_build/html/_sources/core.txt
+doc/_build/html/_sources/dot.txt
+doc/_build/html/_sources/graph.txt
+doc/_build/html/_sources/graphalgo.txt
+doc/_build/html/_sources/graphstat.txt
+doc/_build/html/_sources/graphutil.txt
+doc/_build/html/_sources/index.txt
+doc/_build/html/_sources/license.txt
+doc/_build/html/_sources/objectgraph.txt
+doc/_build/html/_static/ajax-loader.gif
+doc/_build/html/_static/basic.css
+doc/_build/html/_static/comment-bright.png
+doc/_build/html/_static/comment-close.png
+doc/_build/html/_static/comment.png
+doc/_build/html/_static/doctools.js
+doc/_build/html/_static/down-pressed.png
+doc/_build/html/_static/down.png
+doc/_build/html/_static/file.png
+doc/_build/html/_static/jquery.js
+doc/_build/html/_static/minus.png
+doc/_build/html/_static/nature.css
+doc/_build/html/_static/plus.png
+doc/_build/html/_static/pygments.css
+doc/_build/html/_static/searchtools.js
+doc/_build/html/_static/underscore.js
+doc/_build/html/_static/up-pressed.png
+doc/_build/html/_static/up.png
+doc/_build/html/_static/websupport.js
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/altgraph/altgraph.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/altgraph/altgraph.egg-info/top_level.txt b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/top_level.txt
new file mode 100644
index 0000000..5ad6b8a
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/top_level.txt
@@ -0,0 +1 @@
+altgraph
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/altgraph/altgraph.egg-info/zip-safe
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/altgraph/altgraph.egg-info/zip-safe
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/Dot.py b/catapult/telemetry/third_party/altgraph/altgraph/Dot.py
new file mode 100644
index 0000000..49a471e
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/Dot.py
@@ -0,0 +1,299 @@
+'''
+altgraph.Dot - Interface to the dot language
+============================================
+
+The :py:mod:`~altgraph.Dot` module provides a simple interface to the
+file format used in the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+program. The module is intended to offload the most tedious part of the process
+(the **dot** file generation) while transparently exposing most of its features.
+
+To display the graphs or to generate image files the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+package needs to be installed on the system, moreover the :command:`dot` and :command:`dotty` programs must
+be accesible in the program path so that they can be ran from processes spawned
+within the module.
+
+Example usage
+-------------
+
+Here is a typical usage::
+
+    from altgraph import Graph, Dot
+
+    # create a graph
+    edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
+    graph = Graph.Graph(edges)
+
+    # create a dot representation of the graph
+    dot = Dot.Dot(graph)
+
+    # display the graph
+    dot.display()
+
+    # save the dot representation into the mydot.dot file
+    dot.save_dot(file_name='mydot.dot')
+
+    # save dot file as gif image into the graph.gif file
+    dot.save_img(file_name='graph', file_type='gif')
+
+Directed graph and non-directed graph
+-------------------------------------
+
+Dot class can use for both directed graph and non-directed graph
+by passing ``graphtype`` parameter.
+
+Example::
+
+    # create directed graph(default)
+    dot = Dot.Dot(graph, graphtype="digraph")
+
+    # create non-directed graph
+    dot = Dot.Dot(graph, graphtype="graph")
+
+Customizing the output
+----------------------
+
+The graph drawing process may be customized by passing
+valid :command:`dot` parameters for the nodes and edges. For a list of all
+parameters see the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+documentation.
+
+Example::
+
+    # customizing the way the overall graph is drawn
+    dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
+
+    # customizing node drawing
+    dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
+    dot.node_style(2, style='filled', fillcolor='red')
+
+    # customizing edge drawing
+    dot.edge_style(1, 2, style='dotted')
+    dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
+    dot.edge_style(4, 5, arrowsize=2, style='bold')
+
+
+.. note::
+
+   dotty (invoked via :py:func:`~altgraph.Dot.display`) may not be able to
+   display all graphics styles. To verify the output save it to an image file
+   and look at it that way.
+
+Valid attributes
+----------------
+
+    - dot styles, passed via the :py:meth:`Dot.style` method::
+
+        rankdir = 'LR'   (draws the graph horizontally, left to right)
+        ranksep = number (rank separation in inches)
+
+    - node attributes, passed via the :py:meth:`Dot.node_style` method::
+
+        style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
+        shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
+
+    - edge attributes, passed via the :py:meth:`Dot.edge_style` method::
+
+        style     = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
+        arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none' | 'tee' | 'vee'
+        weight    = number (the larger the number the closer the nodes will be)
+
+    - valid `graphviz colors <http://www.research.att.com/~erg/graphviz/info/colors.html>`_
+
+    - for more details on how to control the graph drawing process see the
+      `graphviz reference <http://www.research.att.com/sw/tools/graphviz/refs.html>`_.
+'''
+import os
+import warnings
+
+from altgraph import GraphError
+
+
+class Dot(object):
+    '''
+    A  class providing a **graphviz** (dot language) representation
+    allowing a fine grained control over how the graph is being
+    displayed.
+
+    If the :command:`dot` and :command:`dotty` programs are not in the current system path
+    their location needs to be specified in the contructor.
+    '''
+
+    def __init__(self, graph=None, nodes=None, edgefn=None, nodevisitor=None, edgevisitor=None, name="G", dot='dot', dotty='dotty', neato='neato', graphtype="digraph"):
+        '''
+        Initialization.
+        '''
+        self.name, self.attr = name, {}
+
+        assert graphtype in ['graph', 'digraph']
+        self.type = graphtype
+
+        self.temp_dot = "tmp_dot.dot"
+        self.temp_neo = "tmp_neo.dot"
+
+        self.dot, self.dotty, self.neato = dot, dotty, neato
+
+        # self.nodes: node styles
+        # self.edges: edge styles
+        self.nodes, self.edges = {}, {}
+
+        if graph is not None and nodes is None:
+            nodes = graph
+        if graph is not None and edgefn is None:
+            def edgefn(node, graph=graph):
+                return graph.out_nbrs(node)
+        if nodes is None:
+            nodes = ()
+
+        seen = set()
+        for node in nodes:
+            if nodevisitor is None:
+                style = {}
+            else:
+                style = nodevisitor(node)
+            if style is not None:
+                self.nodes[node] = {}
+                self.node_style(node, **style)
+                seen.add(node)
+        if edgefn is not None:
+            for head in seen:
+                for tail in (n for n in edgefn(head) if n in seen):
+                    if edgevisitor is None:
+                        edgestyle = {}
+                    else:
+                        edgestyle = edgevisitor(head, tail)
+                    if edgestyle is not None:
+                        if head not in self.edges:
+                            self.edges[head] = {}
+                        self.edges[head][tail] = {}
+                        self.edge_style(head, tail, **edgestyle)
+
+    def style(self, **attr):
+        '''
+        Changes the overall style
+        '''
+        self.attr = attr
+
+    def display(self, mode='dot'):
+        '''
+        Displays the current graph via dotty
+        '''
+
+        if  mode == 'neato':
+            self.save_dot(self.temp_neo)
+            neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
+            os.system(neato_cmd)
+        else:
+            self.save_dot(self.temp_dot)
+
+        plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
+        os.system(plot_cmd)
+
+    def node_style(self, node, **kwargs):
+        '''
+        Modifies a node style to the dot representation.
+        '''
+        if node not in self.edges:
+            self.edges[node] = {}
+        self.nodes[node] = kwargs
+
+    def all_node_style(self, **kwargs):
+        '''
+        Modifies all node styles
+        '''
+        for node in self.nodes:
+            self.node_style(node, **kwargs)
+
+    def edge_style(self, head, tail, **kwargs):
+        '''
+        Modifies an edge style to the dot representation.
+        '''
+        if tail not in self.nodes:
+            raise GraphError("invalid node %s" % (tail,))
+
+        try:
+            if tail not in self.edges[head]:
+                self.edges[head][tail]= {}
+            self.edges[head][tail] = kwargs
+        except KeyError:
+            raise GraphError("invalid edge  %s -> %s " % (head, tail) )
+
+    def iterdot(self):
+        # write graph title
+        if self.type == 'digraph':
+            yield 'digraph %s {\n' % (self.name,)
+        elif self.type == 'graph':
+            yield 'graph %s {\n' % (self.name,)
+
+        else:
+            raise GraphError("unsupported graphtype %s" % (self.type,))
+
+        # write overall graph attributes
+        for attr_name, attr_value in sorted(self.attr.items()):
+            yield '%s="%s";' % (attr_name, attr_value)
+        yield '\n'
+
+        # some reusable patterns
+        cpatt  = '%s="%s",'      # to separate attributes
+        epatt  = '];\n'          # to end attributes
+
+        # write node attributes
+        for node_name, node_attr in sorted(self.nodes.items()):
+            yield '\t"%s" [' % (node_name,)
+            for attr_name, attr_value in sorted(node_attr.items()):
+                yield cpatt % (attr_name, attr_value)
+            yield epatt
+
+        # write edge attributes
+        for head in sorted(self.edges):
+            for tail in sorted(self.edges[head]):
+                if self.type == 'digraph':
+                    yield '\t"%s" -> "%s" [' % (head, tail)
+                else:
+                    yield '\t"%s" -- "%s" [' % (head, tail)
+                for attr_name, attr_value in sorted(self.edges[head][tail].items()):
+                    yield cpatt % (attr_name, attr_value)
+                yield epatt
+
+        # finish file
+        yield '}\n'
+
+    def __iter__(self):
+        return self.iterdot()
+
+    def save_dot(self, file_name=None):
+        '''
+        Saves the current graph representation into a file
+        '''
+
+        if not file_name:
+            warnings.warn(DeprecationWarning, "always pass a file_name")
+            file_name = self.temp_dot
+
+        fp   = open(file_name, "w")
+        try:
+            for chunk in self.iterdot():
+                fp.write(chunk)
+        finally:
+            fp.close()
+
+    def save_img(self, file_name=None, file_type="gif", mode='dot'):
+        '''
+        Saves the dot file as an image file
+        '''
+
+        if not file_name:
+            warnings.warn(DeprecationWarning, "always pass a file_name")
+            file_name = "out"
+
+        if  mode == 'neato':
+            self.save_dot(self.temp_neo)
+            neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
+            os.system(neato_cmd)
+            plot_cmd = self.dot
+        else:
+            self.save_dot(self.temp_dot)
+            plot_cmd = self.dot
+
+        file_name  = "%s.%s" % (file_name, file_type)
+        create_cmd = "%s -T%s %s -o %s" % (plot_cmd, file_type, self.temp_dot, file_name)
+        os.system(create_cmd)
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/Graph.py b/catapult/telemetry/third_party/altgraph/altgraph/Graph.py
new file mode 100644
index 0000000..491e5c2
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/Graph.py
@@ -0,0 +1,677 @@
+"""
+altgraph.Graph - Base Graph class
+=================================
+
+..
+  #--Version 2.1
+  #--Bob Ippolito October, 2004
+
+  #--Version 2.0
+  #--Istvan Albert June, 2004
+
+  #--Version 1.0
+  #--Nathan Denny, May 27, 1999
+"""
+
+from altgraph import GraphError
+from collections import deque
+
+class Graph(object):
+    """
+    The Graph class represents a directed graph with *N* nodes and *E* edges.
+
+    Naming conventions:
+
+    - the prefixes such as *out*, *inc* and *all* will refer to methods
+      that operate on the outgoing, incoming or all edges of that node.
+
+      For example: :py:meth:`inc_degree` will refer to the degree of the node
+      computed over the incoming edges (the number of neighbours linking to
+      the node).
+
+    - the prefixes such as *forw* and *back* will refer to the
+      orientation of the edges used in the method with respect to the node.
+
+      For example: :py:meth:`forw_bfs` will start at the node then use the outgoing
+      edges to traverse the graph (goes forward).
+    """
+
+    def __init__(self, edges=None):
+        """
+        Initialization
+        """
+
+        self.next_edge = 0
+        self.nodes, self.edges = {}, {}
+        self.hidden_edges, self.hidden_nodes = {}, {}
+
+        if edges is not None:
+            for item in edges:
+                if len(item) == 2:
+                    head, tail = item
+                    self.add_edge(head, tail)
+                elif len(item) == 3:
+                    head, tail, data = item
+                    self.add_edge(head, tail, data)
+                else:
+                    raise GraphError("Cannot create edge from %s"%(item,))
+
+
+    def __repr__(self):
+        return '<Graph: %d nodes, %d edges>' % (
+            self.number_of_nodes(), self.number_of_edges())
+
+    def add_node(self, node, node_data=None):
+        """
+        Adds a new node to the graph.  Arbitrary data can be attached to the
+        node via the node_data parameter.  Adding the same node twice will be
+        silently ignored.
+
+        The node must be a hashable value.
+        """
+        #
+        # the nodes will contain tuples that will store incoming edges,
+        # outgoing edges and data
+        #
+        # index 0 -> incoming edges
+        # index 1 -> outgoing edges
+
+        if node in self.hidden_nodes:
+            # Node is present, but hidden
+            return
+
+        if node not in self.nodes:
+            self.nodes[node] = ([], [], node_data)
+
+    def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):
+        """
+        Adds a directed edge going from head_id to tail_id.
+        Arbitrary data can be attached to the edge via edge_data.
+        It may create the nodes if adding edges between nonexisting ones.
+
+        :param head_id: head node
+        :param tail_id: tail node
+        :param edge_data: (optional) data attached to the edge
+        :param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist
+        """
+        # shorcut
+        edge = self.next_edge
+
+        # add nodes if on automatic node creation
+        if create_nodes:
+            self.add_node(head_id)
+            self.add_node(tail_id)
+
+        # update the corresponding incoming and outgoing lists in the nodes
+        # index 0 -> incoming edges
+        # index 1 -> outgoing edges
+
+        try:
+            self.nodes[tail_id][0].append(edge)
+            self.nodes[head_id][1].append(edge)
+        except KeyError:
+            raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id))
+
+        # store edge information
+        self.edges[edge] = (head_id, tail_id, edge_data)
+
+
+        self.next_edge += 1
+
+    def hide_edge(self, edge):
+        """
+        Hides an edge from the graph. The edge may be unhidden at some later
+        time.
+        """
+        try:
+            head_id, tail_id, edge_data = self.hidden_edges[edge] = self.edges[edge]
+            self.nodes[tail_id][0].remove(edge)
+            self.nodes[head_id][1].remove(edge)
+            del self.edges[edge]
+        except KeyError:
+            raise GraphError('Invalid edge %s' % edge)
+
+    def hide_node(self, node):
+        """
+        Hides a node from the graph.  The incoming and outgoing edges of the
+        node will also be hidden.  The node may be unhidden at some later time.
+        """
+        try:
+            all_edges = self.all_edges(node)
+            self.hidden_nodes[node] = (self.nodes[node], all_edges)
+            for edge in all_edges:
+                self.hide_edge(edge)
+            del self.nodes[node]
+        except KeyError:
+            raise GraphError('Invalid node %s' % node)
+
+    def restore_node(self, node):
+        """
+        Restores a previously hidden node back into the graph and restores
+        all of its incoming and outgoing edges.
+        """
+        try:
+            self.nodes[node], all_edges = self.hidden_nodes[node]
+            for edge in all_edges:
+                self.restore_edge(edge)
+            del self.hidden_nodes[node]
+        except KeyError:
+            raise GraphError('Invalid node %s' % node)
+
+    def restore_edge(self, edge):
+        """
+        Restores a previously hidden edge back into the graph.
+        """
+        try:
+            head_id, tail_id, data = self.hidden_edges[edge]
+            self.nodes[tail_id][0].append(edge)
+            self.nodes[head_id][1].append(edge)
+            self.edges[edge] = head_id, tail_id, data
+            del self.hidden_edges[edge]
+        except KeyError:
+            raise GraphError('Invalid edge %s' % edge)
+
+    def restore_all_edges(self):
+        """
+        Restores all hidden edges.
+        """
+        for edge in list(self.hidden_edges.keys()):
+            try:
+                self.restore_edge(edge)
+            except GraphError:
+                pass
+
+    def restore_all_nodes(self):
+        """
+        Restores all hidden nodes.
+        """
+        for node in list(self.hidden_nodes.keys()):
+            self.restore_node(node)
+
+    def __contains__(self, node):
+        """
+        Test whether a node is in the graph
+        """
+        return node in self.nodes
+
+    def edge_by_id(self, edge):
+        """
+        Returns the edge that connects the head_id and tail_id nodes
+        """
+        try:
+            head, tail, data =  self.edges[edge]
+        except KeyError:
+            head, tail = None, None
+            raise GraphError('Invalid edge %s' % edge)
+
+        return (head, tail)
+
+    def edge_by_node(self, head, tail):
+        """
+        Returns the edge that connects the head_id and tail_id nodes
+        """
+        for edge in self.out_edges(head):
+            if self.tail(edge) == tail:
+                return edge
+        return None
+
+    def number_of_nodes(self):
+        """
+        Returns the number of nodes
+        """
+        return len(self.nodes)
+
+    def number_of_edges(self):
+        """
+        Returns the number of edges
+        """
+        return len(self.edges)
+
+    def __iter__(self):
+        """
+        Iterates over all nodes in the graph
+        """
+        return iter(self.nodes)
+
+    def node_list(self):
+        """
+        Return a list of the node ids for all visible nodes in the graph.
+        """
+        return list(self.nodes.keys())
+
+    def edge_list(self):
+        """
+        Returns an iterator for all visible nodes in the graph.
+        """
+        return list(self.edges.keys())
+
+    def number_of_hidden_edges(self):
+        """
+        Returns the number of hidden edges
+        """
+        return len(self.hidden_edges)
+
+    def number_of_hidden_nodes(self):
+        """
+        Returns the number of hidden nodes
+        """
+        return len(self.hidden_nodes)
+
+    def hidden_node_list(self):
+        """
+        Returns the list with the hidden nodes
+        """
+        return list(self.hidden_nodes.keys())
+
+    def hidden_edge_list(self):
+        """
+        Returns a list with the hidden edges
+        """
+        return list(self.hidden_edges.keys())
+
+    def describe_node(self, node):
+        """
+        return node, node data, outgoing edges, incoming edges for node
+        """
+        incoming, outgoing, data = self.nodes[node]
+        return node, data, outgoing, incoming
+
+    def describe_edge(self, edge):
+        """
+        return edge, edge data, head, tail for edge
+        """
+        head, tail, data = self.edges[edge]
+        return edge, data, head, tail
+
+    def node_data(self, node):
+        """
+        Returns the data associated with a node
+        """
+        return self.nodes[node][2]
+
+    def edge_data(self, edge):
+        """
+        Returns the data associated with an edge
+        """
+        return self.edges[edge][2]
+
+    def update_edge_data(self, edge, edge_data):
+        """
+        Replace the edge data for a specific edge
+        """
+        self.edges[edge] = self.edges[edge][0:2] + (edge_data,)
+
+    def head(self, edge):
+        """
+        Returns the node of the head of the edge.
+        """
+        return self.edges[edge][0]
+
+    def tail(self, edge):
+        """
+        Returns node of the tail of the edge.
+        """
+        return self.edges[edge][1]
+
+    def out_nbrs(self, node):
+        """
+        List of nodes connected by outgoing edges
+        """
+        l = [self.tail(n) for n in self.out_edges(node)]
+        return l
+
+    def inc_nbrs(self, node):
+        """
+        List of nodes connected by incoming edges
+        """
+        l = [self.head(n) for n in self.inc_edges(node)]
+        return l
+
+    def all_nbrs(self, node):
+        """
+        List of nodes connected by incoming and outgoing edges
+        """
+        l = dict.fromkeys( self.inc_nbrs(node) + self.out_nbrs(node) )
+        return list(l)
+
+    def out_edges(self, node):
+        """
+        Returns a list of the outgoing edges
+        """
+        try:
+            return list(self.nodes[node][1])
+        except KeyError:
+            raise GraphError('Invalid node %s' % node)
+
+        return None
+
+    def inc_edges(self, node):
+        """
+        Returns a list of the incoming edges
+        """
+        try:
+            return list(self.nodes[node][0])
+        except KeyError:
+            raise GraphError('Invalid node %s' % node)
+
+        return None
+
+    def all_edges(self, node):
+        """
+        Returns a list of incoming and outging edges.
+        """
+        return set(self.inc_edges(node) + self.out_edges(node))
+
+    def out_degree(self, node):
+        """
+        Returns the number of outgoing edges
+        """
+        return len(self.out_edges(node))
+
+    def inc_degree(self, node):
+        """
+        Returns the number of incoming edges
+        """
+        return len(self.inc_edges(node))
+
+    def all_degree(self, node):
+        """
+        The total degree of a node
+        """
+        return self.inc_degree(node) + self.out_degree(node)
+
+    def _topo_sort(self, forward=True):
+        """
+        Topological sort.
+
+        Returns a list of nodes where the successors (based on outgoing and
+        incoming edges selected by the forward parameter) of any given node
+        appear in the sequence after that node.
+        """
+        topo_list = []
+        queue = deque()
+        indeg = {}
+
+        # select the operation that will be performed
+        if forward:
+            get_edges = self.out_edges
+            get_degree = self.inc_degree
+            get_next = self.tail
+        else:
+            get_edges = self.inc_edges
+            get_degree = self.out_degree
+            get_next = self.head
+
+        for node in self.node_list():
+            degree = get_degree(node)
+            if degree:
+                indeg[node] = degree
+            else:
+                queue.append(node)
+
+        while queue:
+            curr_node = queue.popleft()
+            topo_list.append(curr_node)
+            for edge in get_edges(curr_node):
+                tail_id = get_next(edge)
+                if tail_id in indeg:
+                    indeg[tail_id] -= 1
+                    if indeg[tail_id] == 0:
+                        queue.append(tail_id)
+
+        if len(topo_list) == len(self.node_list()):
+            valid = True
+        else:
+            # the graph has cycles, invalid topological sort
+            valid = False
+
+        return (valid, topo_list)
+
+    def forw_topo_sort(self):
+        """
+        Topological sort.
+
+        Returns a list of nodes where the successors (based on outgoing edges)
+        of any given node appear in the sequence after that node.
+        """
+        return self._topo_sort(forward=True)
+
+    def back_topo_sort(self):
+        """
+        Reverse topological sort.
+
+        Returns a list of nodes where the successors (based on incoming edges)
+        of any given node appear in the sequence after that node.
+        """
+        return self._topo_sort(forward=False)
+
+    def _bfs_subgraph(self, start_id, forward=True):
+        """
+        Private method creates a subgraph in a bfs order.
+
+        The forward parameter specifies whether it is a forward or backward
+        traversal.
+        """
+        if forward:
+            get_bfs  = self.forw_bfs
+            get_nbrs = self.out_nbrs
+        else:
+            get_bfs  = self.back_bfs
+            get_nbrs = self.inc_nbrs
+
+        g = Graph()
+        bfs_list = get_bfs(start_id)
+        for node in bfs_list:
+            g.add_node(node)
+
+        for node in bfs_list:
+            for nbr_id in get_nbrs(node):
+                g.add_edge(node, nbr_id)
+
+        return g
+
+    def forw_bfs_subgraph(self, start_id):
+        """
+        Creates and returns a subgraph consisting of the breadth first
+        reachable nodes based on their outgoing edges.
+        """
+        return self._bfs_subgraph(start_id, forward=True)
+
+    def back_bfs_subgraph(self, start_id):
+        """
+        Creates and returns a subgraph consisting of the breadth first
+        reachable nodes based on the incoming edges.
+        """
+        return self._bfs_subgraph(start_id, forward=False)
+
+    def iterdfs(self, start, end=None, forward=True):
+        """
+        Collecting nodes in some depth first traversal.
+
+        The forward parameter specifies whether it is a forward or backward
+        traversal.
+        """
+        visited, stack = set([start]), deque([start])
+
+        if forward:
+            get_edges = self.out_edges
+            get_next = self.tail
+        else:
+            get_edges = self.inc_edges
+            get_next = self.head
+
+        while stack:
+            curr_node = stack.pop()
+            yield curr_node
+            if curr_node == end:
+                break
+            for edge in sorted(get_edges(curr_node)):
+                tail = get_next(edge)
+                if tail not in visited:
+                    visited.add(tail)
+                    stack.append(tail)
+
+    def iterdata(self, start, end=None, forward=True, condition=None):
+        """
+        Perform a depth-first walk of the graph (as ``iterdfs``)
+        and yield the item data of every node where condition matches. The
+        condition callback is only called when node_data is not None.
+        """
+
+        visited, stack = set([start]), deque([start])
+
+        if forward:
+            get_edges = self.out_edges
+            get_next = self.tail
+        else:
+            get_edges = self.inc_edges
+            get_next = self.head
+
+        get_data = self.node_data
+
+        while stack:
+            curr_node = stack.pop()
+            curr_data = get_data(curr_node)
+            if curr_data is not None:
+                if condition is not None and not condition(curr_data):
+                    continue
+                yield curr_data
+            if curr_node == end:
+                break
+            for edge in get_edges(curr_node):
+                tail = get_next(edge)
+                if tail not in visited:
+                    visited.add(tail)
+                    stack.append(tail)
+
+    def _iterbfs(self, start, end=None, forward=True):
+        """
+        The forward parameter specifies whether it is a forward or backward
+        traversal.  Returns a list of tuples where the first value is the hop
+        value the second value is the node id.
+        """
+        queue, visited = deque([(start, 0)]), set([start])
+
+        # the direction of the bfs depends on the edges that are sampled
+        if forward:
+            get_edges = self.out_edges
+            get_next = self.tail
+        else:
+            get_edges = self.inc_edges
+            get_next = self.head
+
+        while queue:
+            curr_node, curr_step = queue.popleft()
+            yield (curr_node, curr_step)
+            if curr_node == end:
+                break
+            for edge in get_edges(curr_node):
+                tail = get_next(edge)
+                if tail not in visited:
+                    visited.add(tail)
+                    queue.append((tail, curr_step + 1))
+
+
+    def forw_bfs(self, start, end=None):
+        """
+        Returns a list of nodes in some forward BFS order.
+
+        Starting from the start node the breadth first search proceeds along
+        outgoing edges.
+        """
+        return [node for node, step in self._iterbfs(start, end, forward=True)]
+
+    def back_bfs(self, start, end=None):
+        """
+        Returns a list of nodes in some backward BFS order.
+
+        Starting from the start node the breadth first search proceeds along
+        incoming edges.
+        """
+        return [node for node, step in self._iterbfs(start, end, forward=False)]
+
+    def forw_dfs(self, start, end=None):
+        """
+        Returns a list of nodes in some forward DFS order.
+
+        Starting with the start node the depth first search proceeds along
+        outgoing edges.
+        """
+        return list(self.iterdfs(start, end, forward=True))
+
+    def back_dfs(self, start, end=None):
+        """
+        Returns a list of nodes in some backward DFS order.
+
+        Starting from the start node the depth first search proceeds along
+        incoming edges.
+        """
+        return list(self.iterdfs(start, end, forward=False))
+
+    def connected(self):
+        """
+        Returns :py:data:`True` if the graph's every node can be reached from every
+        other node.
+        """
+        node_list = self.node_list()
+        for node in node_list:
+            bfs_list = self.forw_bfs(node)
+            if len(bfs_list) != len(node_list):
+                return False
+        return True
+
+    def clust_coef(self, node):
+        """
+        Computes and returns the local clustering coefficient of node.  The
+        local cluster coefficient is proportion of the actual number of edges between
+        neighbours of node and the maximum number of edges between those neighbours.
+
+        See <http://en.wikipedia.org/wiki/Clustering_coefficient#Local_clustering_coefficient>
+        for a formal definition.
+        """
+        num = 0
+        nbr_set = set(self.out_nbrs(node))
+
+        if node in nbr_set:
+            nbr_set.remove(node) # loop defense
+
+        for nbr in nbr_set:
+            sec_set = set(self.out_nbrs(nbr))
+            if nbr in sec_set:
+                sec_set.remove(nbr) # loop defense
+            num += len(nbr_set & sec_set)
+
+        nbr_num = len(nbr_set)
+        if nbr_num:
+            clust_coef = float(num) / (nbr_num * (nbr_num - 1))
+        else:
+            clust_coef = 0.0
+        return clust_coef
+
+    def get_hops(self, start, end=None, forward=True):
+        """
+        Computes the hop distance to all nodes centered around a specified node.
+
+        First order neighbours are at hop 1, their neigbours are at hop 2 etc.
+        Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
+        parameter.  If the distance between all neighbouring nodes is 1 the hop
+        number corresponds to the shortest distance between the nodes.
+
+        :param start: the starting node
+        :param end: ending node (optional). When not specified will search the whole graph.
+        :param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
+        :return: returns a list of tuples where each tuple contains the node and the hop.
+
+        Typical usage::
+
+            >>> print (graph.get_hops(1, 8))
+            >>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
+            # node 1 is at 0 hops
+            # node 2 is at 1 hop
+            # ...
+            # node 8 is at 5 hops
+        """
+        if forward:
+            return list(self._iterbfs(start=start, end=end, forward=True))
+        else:
+            return list(self._iterbfs(start=start, end=end, forward=False))
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/GraphAlgo.py b/catapult/telemetry/third_party/altgraph/altgraph/GraphAlgo.py
new file mode 100644
index 0000000..9e6fff2
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/GraphAlgo.py
@@ -0,0 +1,147 @@
+'''
+altgraph.GraphAlgo - Graph algorithms
+=====================================
+'''
+from altgraph import GraphError
+
+def dijkstra(graph, start, end=None):
+    """
+    Dijkstra's algorithm for shortest paths
+
+    `David Eppstein, UC Irvine, 4 April 2002 <http://www.ics.uci.edu/~eppstein/161/python/>`_
+
+    `Python Cookbook Recipe <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466>`_
+
+    Find shortest paths from the  start node to all nodes nearer than or equal to the end node.
+
+    Dijkstra's algorithm is only guaranteed to work correctly when all edge lengths are positive.
+    This code does not verify this property for all edges (only the edges examined until the end
+    vertex is reached), but will correctly compute shortest paths even for some graphs with negative
+    edges, and will raise an exception if it discovers that a negative edge has caused it to make a mistake.
+
+    *Adapted to altgraph by Istvan Albert, Pennsylvania State University - June, 9 2004*
+
+    """
+    D = {}    # dictionary of final distances
+    P = {}    # dictionary of predecessors
+    Q = _priorityDictionary()    # estimated distances of non-final vertices
+    Q[start] = 0
+
+    for v in Q:
+        D[v] = Q[v]
+        if v == end: break
+
+        for w in graph.out_nbrs(v):
+            edge_id  = graph.edge_by_node(v,w)
+            vwLength = D[v] + graph.edge_data(edge_id)
+            if w in D:
+                if vwLength < D[w]:
+                    raise GraphError("Dijkstra: found better path to already-final vertex")
+            elif w not in Q or vwLength < Q[w]:
+                Q[w] = vwLength
+                P[w] = v
+
+    return (D,P)
+
+def shortest_path(graph, start, end):
+    """
+    Find a single shortest path from the given start node to the given end node.
+    The input has the same conventions as dijkstra(). The output is a list of the nodes
+    in order along the shortest path.
+
+    **Note that the distances must be stored in the edge data as numeric data**
+    """
+
+    D,P = dijkstra(graph, start, end)
+    Path = []
+    while 1:
+        Path.append(end)
+        if end == start: break
+        end = P[end]
+    Path.reverse()
+    return Path
+
+#
+# Utility classes and functions
+#
+class _priorityDictionary(dict):
+    '''
+    Priority dictionary using binary heaps (internal use only)
+
+    David Eppstein, UC Irvine, 8 Mar 2002
+
+    Implements a data structure that acts almost like a dictionary, with two modifications:
+        1. D.smallest() returns the value x minimizing D[x].  For this to work correctly,
+            all values D[x] stored in the dictionary must be comparable.
+        2. iterating "for x in D" finds and removes the items from D in sorted order.
+            Each item is not removed until the next item is requested, so D[x] will still
+            return a useful value until the next iteration of the for-loop.
+            Each operation takes logarithmic amortized time.
+    '''
+    def __init__(self):
+        '''
+        Initialize priorityDictionary by creating binary heap of pairs (value,key).
+        Note that changing or removing a dict entry will not remove the old pair from the heap
+        until it is found by smallest() or until the heap is rebuilt.
+        '''
+        self.__heap = []
+        dict.__init__(self)
+
+    def smallest(self):
+        '''
+        Find smallest item after removing deleted items from front of heap.
+        '''
+        if len(self) == 0:
+            raise IndexError("smallest of empty priorityDictionary")
+        heap = self.__heap
+        while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:
+            lastItem = heap.pop()
+            insertionPoint = 0
+            while 1:
+                smallChild = 2*insertionPoint+1
+                if smallChild+1 < len(heap) and heap[smallChild] > heap[smallChild+1] :
+                    smallChild += 1
+                if smallChild >= len(heap) or lastItem <= heap[smallChild]:
+                    heap[insertionPoint] = lastItem
+                    break
+                heap[insertionPoint] = heap[smallChild]
+                insertionPoint = smallChild
+        return heap[0][1]
+
+    def __iter__(self):
+        '''
+        Create destructive sorted iterator of priorityDictionary.
+        '''
+        def iterfn():
+            while len(self) > 0:
+                x = self.smallest()
+                yield x
+                del self[x]
+        return iterfn()
+
+    def __setitem__(self,key,val):
+        '''
+        Change value stored in dictionary and add corresponding pair to heap.
+        Rebuilds the heap if the number of deleted items gets large, to avoid memory leakage.
+        '''
+        dict.__setitem__(self,key,val)
+        heap = self.__heap
+        if len(heap) > 2 * len(self):
+            self.__heap = [(v,k) for k,v in self.iteritems()]
+            self.__heap.sort()  # builtin sort probably faster than O(n)-time heapify
+        else:
+            newPair = (val,key)
+            insertionPoint = len(heap)
+            heap.append(None)
+            while insertionPoint > 0 and newPair < heap[(insertionPoint-1)//2]:
+                heap[insertionPoint] = heap[(insertionPoint-1)//2]
+                insertionPoint = (insertionPoint-1)//2
+            heap[insertionPoint] = newPair
+
+    def setdefault(self,key,val):
+        '''
+        Reimplement setdefault to pass through our customized __setitem__.
+        '''
+        if key not in self:
+            self[key] = val
+        return self[key]
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/GraphStat.py b/catapult/telemetry/third_party/altgraph/altgraph/GraphStat.py
new file mode 100644
index 0000000..25fc46c
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/GraphStat.py
@@ -0,0 +1,73 @@
+'''
+altgraph.GraphStat - Functions providing various graph statistics
+=================================================================
+'''
+import sys
+
+def degree_dist(graph, limits=(0,0), bin_num=10, mode='out'):
+    '''
+    Computes the degree distribution for a graph.
+
+    Returns a list of tuples where the first element of the tuple is the center of the bin
+    representing a range of degrees and the second element of the tuple are the number of nodes
+    with the degree falling in the range.
+
+    Example::
+
+        ....
+    '''
+
+    deg = []
+    if mode == 'inc':
+        get_deg = graph.inc_degree
+    else:
+        get_deg = graph.out_degree
+
+    for node in graph:
+        deg.append( get_deg(node) )
+
+    if not deg:
+        return []
+
+    results = _binning(values=deg, limits=limits, bin_num=bin_num)
+
+    return results
+
+_EPS = 1.0/(2.0**32)
+def _binning(values, limits=(0,0), bin_num=10):
+    '''
+    Bins data that falls between certain limits, if the limits are (0, 0) the
+    minimum and maximum values are used.
+
+    Returns a list of tuples where the first element of the tuple is the center of the bin
+    and the second element of the tuple are the counts.
+    '''
+    if limits == (0, 0):
+        min_val, max_val = min(values) - _EPS, max(values) + _EPS
+    else:
+        min_val, max_val = limits
+
+    # get bin size
+    bin_size = (max_val - min_val)/float(bin_num)
+    bins = [0] * (bin_num)
+
+    # will ignore these outliers for now
+    out_points = 0
+    for value in values:
+        try:
+            if (value - min_val) < 0:
+                out_points += 1
+            else:
+                index = int((value - min_val)/float(bin_size))
+                bins[index] += 1
+        except IndexError:
+            out_points += 1
+
+    # make it ready for an x,y plot
+    result = []
+    center = (bin_size/2) + min_val
+    for i, y in enumerate(bins):
+        x = center + bin_size * i
+        result.append( (x,y) )
+
+    return result
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/GraphUtil.py b/catapult/telemetry/third_party/altgraph/altgraph/GraphUtil.py
new file mode 100644
index 0000000..d3b6acd
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/GraphUtil.py
@@ -0,0 +1,137 @@
+'''
+altgraph.GraphUtil - Utility classes and functions
+==================================================
+'''
+
+import random
+from collections import deque
+from altgraph import Graph
+from altgraph import GraphError
+
+def generate_random_graph(node_num, edge_num, self_loops=False, multi_edges=False):
+    '''
+    Generates and returns a :py:class:`~altgraph.Graph.Graph` instance with *node_num* nodes
+    randomly connected by *edge_num* edges.
+    '''
+    g = Graph.Graph()
+
+    if not multi_edges:
+        if self_loops:
+            max_edges = node_num * node_num
+        else:
+            max_edges = node_num * (node_num-1)
+
+        if edge_num > max_edges:
+            raise GraphError("inconsistent arguments to 'generate_random_graph'")
+
+    nodes = range(node_num)
+
+    for node in nodes:
+        g.add_node(node)
+
+    while 1:
+        head = random.choice(nodes)
+        tail = random.choice(nodes)
+
+        # loop defense
+        if head == tail and not self_loops:
+            continue
+
+        # multiple edge defense
+        if g.edge_by_node(head,tail) is not None and not multi_edges:
+            continue
+
+        # add the edge
+        g.add_edge(head, tail)
+        if g.number_of_edges() >= edge_num:
+            break
+
+    return g
+
+def generate_scale_free_graph(steps, growth_num, self_loops=False, multi_edges=False):
+    '''
+    Generates and returns a :py:class:`~altgraph.Graph.Graph` instance that will have *steps* \* *growth_num* nodes
+    and a scale free (powerlaw) connectivity. Starting with a fully connected graph with *growth_num* nodes
+    at every step *growth_num* nodes are added to the graph and are connected to existing nodes with
+    a probability proportional to the degree of these existing nodes.
+    '''
+    # FIXME: The code doesn't seem to do what the documentation claims.
+    graph = Graph.Graph()
+
+    # initialize the graph
+    store = []
+    for i in range(growth_num):
+        #store   += [ i ] * (growth_num - 1)
+        for j in range(i + 1, growth_num):
+            store.append(i)
+            store.append(j)
+            graph.add_edge(i,j)
+
+    # generate
+    for node in range(growth_num, steps * growth_num):
+        graph.add_node(node)
+        while ( graph.out_degree(node) < growth_num ):
+            nbr = random.choice(store)
+
+            # loop defense
+            if node == nbr and not self_loops:
+                continue
+
+            # multi edge defense
+            if graph.edge_by_node(node, nbr) and not multi_edges:
+                continue
+
+            graph.add_edge(node, nbr)
+
+
+        for nbr in graph.out_nbrs(node):
+            store.append(node)
+            store.append(nbr)
+
+    return graph
+
+def filter_stack(graph, head, filters):
+    """
+    Perform a walk in a depth-first order starting
+    at *head*.
+
+    Returns (visited, removes, orphans).
+
+    * visited: the set of visited nodes
+    * removes: the list of nodes where the node
+      data does not all *filters*
+    * orphans: tuples of (last_good, node),
+      where node is not in removes, is directly
+      reachable from a node in *removes* and
+      *last_good* is the closest upstream node that is not
+      in *removes*.
+    """
+
+    visited, removes, orphans = set([head]), set(), set()
+    stack = deque([(head, head)])
+    get_data = graph.node_data
+    get_edges = graph.out_edges
+    get_tail = graph.tail
+
+    while stack:
+        last_good, node = stack.pop()
+        data = get_data(node)
+        if data is not None:
+            for filtfunc in filters:
+                if not filtfunc(data):
+                    removes.add(node)
+                    break
+            else:
+                last_good = node
+        for edge in get_edges(node):
+            tail = get_tail(edge)
+            if last_good is not node:
+                orphans.add((last_good, tail))
+            if tail not in visited:
+                visited.add(tail)
+                stack.append((last_good, tail))
+
+    orphans = [(last_good, tail) for (last_good, tail) in orphans if tail not in removes]
+    #orphans.sort()
+
+    return visited, removes, orphans
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/ObjectGraph.py b/catapult/telemetry/third_party/altgraph/altgraph/ObjectGraph.py
new file mode 100644
index 0000000..d07f51b
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/ObjectGraph.py
@@ -0,0 +1,202 @@
+"""
+altgraph.ObjectGraph - Graph of objects with an identifier
+==========================================================
+
+A graph of objects that have a "graphident" attribute.
+graphident is the key for the object in the graph
+"""
+
+from altgraph import GraphError
+from altgraph.Graph import Graph
+from altgraph.GraphUtil import filter_stack
+
+class ObjectGraph(object):
+    """
+    A graph of objects that have a "graphident" attribute.
+    graphident is the key for the object in the graph
+    """
+    def __init__(self, graph=None, debug=0):
+        if graph is None:
+            graph = Graph()
+        self.graphident = self
+        self.graph = graph
+        self.debug = debug
+        self.indent = 0
+        graph.add_node(self, None)
+
+    def __repr__(self):
+        return '<%s>' % (type(self).__name__,)
+
+    def flatten(self, condition=None, start=None):
+        """
+        Iterate over the subgraph that is entirely reachable by condition
+        starting from the given start node or the ObjectGraph root
+        """
+        if start is None:
+            start = self
+        start = self.getRawIdent(start)
+        return self.graph.iterdata(start=start, condition=condition)
+
+    def nodes(self):
+        for ident in self.graph:
+            node = self.graph.node_data(ident)
+            if node is not None:
+                yield self.graph.node_data(ident)
+
+
+    def get_edges(self, node):
+        start = self.getRawIdent(node)
+        _, _, outraw, incraw = self.graph.describe_node(start)
+        def iter_edges(lst, n):
+            seen = set()
+            for tpl in (self.graph.describe_edge(e) for e in lst):
+                ident = tpl[n]
+                if ident not in seen:
+                    yield self.findNode(ident)
+                    seen.add(ident)
+        return iter_edges(outraw, 3), iter_edges(incraw, 2)
+
+    def edgeData(self, fromNode, toNode):
+        start = self.getRawIdent(fromNode)
+        stop = self.getRawIdent(toNode)
+        edge = self.graph.edge_by_node(start, stop)
+        return self.graph.edge_data(edge)
+
+    def updateEdgeData(self, fromNode, toNode, edgeData):
+        start = self.getRawIdent(fromNode)
+        stop = self.getRawIdent(toNode)
+        edge = self.graph.edge_by_node(start, stop)
+        self.graph.update_edge_data(edge, edgeData)
+
+    def filterStack(self, filters):
+        """
+        Filter the ObjectGraph in-place by removing all edges to nodes that
+        do not match every filter in the given filter list
+
+        Returns a tuple containing the number of: (nodes_visited, nodes_removed, nodes_orphaned)
+        """
+        visited, removes, orphans = filter_stack(self.graph, self, filters)
+
+        for last_good, tail in orphans:
+            self.graph.add_edge(last_good, tail, edge_data='orphan')
+
+        for node in removes:
+            self.graph.hide_node(node)
+
+        return len(visited)-1, len(removes), len(orphans)
+
+    def removeNode(self, node):
+        """
+        Remove the given node from the graph if it exists
+        """
+        ident = self.getIdent(node)
+        if ident is not None:
+            self.graph.hide_node(ident)
+
+    def removeReference(self, fromnode, tonode):
+        """
+        Remove all edges from fromnode to tonode
+        """
+        if fromnode is None:
+            fromnode = self
+        fromident = self.getIdent(fromnode)
+        toident = self.getIdent(tonode)
+        if fromident is not None and toident is not None:
+            while True:
+                edge = self.graph.edge_by_node(fromident, toident)
+                if edge is None:
+                    break
+                self.graph.hide_edge(edge)
+
+    def getIdent(self, node):
+        """
+        Get the graph identifier for a node
+        """
+        ident = self.getRawIdent(node)
+        if ident is not None:
+            return ident
+        node = self.findNode(node)
+        if node is None:
+            return None
+        return node.graphident
+
+    def getRawIdent(self, node):
+        """
+        Get the identifier for a node object
+        """
+        if node is self:
+            return node
+        ident = getattr(node, 'graphident', None)
+        return ident
+
+    def __contains__(self, node):
+        return self.findNode(node) is not None
+
+    def findNode(self, node):
+        """
+        Find the node on the graph
+        """
+        ident = self.getRawIdent(node)
+        if ident is None:
+            ident = node
+        try:
+            return self.graph.node_data(ident)
+        except KeyError:
+            return None
+
+    def addNode(self, node):
+        """
+        Add a node to the graph referenced by the root
+        """
+        self.msg(4, "addNode", node)
+
+        try:
+            self.graph.restore_node(node.graphident)
+        except GraphError:
+            self.graph.add_node(node.graphident, node)
+
+    def createReference(self, fromnode, tonode, edge_data=None):
+        """
+        Create a reference from fromnode to tonode
+        """
+        if fromnode is None:
+            fromnode = self
+        fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)
+        if fromident is None or toident is None:
+            return
+        self.msg(4, "createReference", fromnode, tonode, edge_data)
+        self.graph.add_edge(fromident, toident, edge_data=edge_data)
+
+    def createNode(self, cls, name, *args, **kw):
+        """
+        Add a node of type cls to the graph if it does not already exist
+        by the given name
+        """
+        m = self.findNode(name)
+        if m is None:
+            m = cls(name, *args, **kw)
+            self.addNode(m)
+        return m
+
+    def msg(self, level, s, *args):
+        """
+        Print a debug message with the given level
+        """
+        if s and level <= self.debug:
+            print ("%s%s %s" % ("  " * self.indent, s, ' '.join(map(repr, args))))
+
+    def msgin(self, level, s, *args):
+        """
+        Print a debug message and indent
+        """
+        if level <= self.debug:
+            self.msg(level, s, *args)
+            self.indent = self.indent + 1
+
+    def msgout(self, level, s, *args):
+        """
+        Dedent and print a debug message
+        """
+        if level <= self.debug:
+            self.indent = self.indent - 1
+            self.msg(level, s, *args)
diff --git a/catapult/telemetry/third_party/altgraph/altgraph/__init__.py b/catapult/telemetry/third_party/altgraph/altgraph/__init__.py
new file mode 100644
index 0000000..9f72c18
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph/__init__.py
@@ -0,0 +1,135 @@
+'''
+altgraph - a python graph library
+=================================
+
+altgraph is a fork of `graphlib <http://pygraphlib.sourceforge.net>`_ tailored
+to use newer Python 2.3+ features, including additional support used by the
+py2app suite (modulegraph and macholib, specifically).
+
+altgraph is a python based graph (network) representation and manipulation package.
+It has started out as an extension to the `graph_lib module <http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html>`_
+written by Nathan Denny it has been significantly optimized and expanded.
+
+The :class:`altgraph.Graph.Graph` class is loosely modeled after the `LEDA <http://www.algorithmic-solutions.com/enleda.htm>`_
+(Library of Efficient Datatypes)  representation. The library
+includes methods for constructing graphs, BFS and DFS traversals,
+topological sort, finding connected components, shortest paths as well as a number
+graph statistics functions. The library can also visualize graphs
+via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_.
+
+The package contains the following modules:
+
+    -  the :py:mod:`altgraph.Graph` module contains the :class:`~altgraph.Graph.Graph` class that stores the graph data
+
+    -  the :py:mod:`altgraph.GraphAlgo` module implements graph algorithms operating on graphs (:py:class:`~altgraph.Graph.Graph`} instances)
+
+    -  the :py:mod:`altgraph.GraphStat` module contains functions for computing statistical measures on graphs
+
+    -  the :py:mod:`altgraph.GraphUtil` module contains functions for generating, reading and saving graphs
+
+    -  the :py:mod:`altgraph.Dot` module  contains functions for displaying graphs via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+
+    -  the :py:mod:`altgraph.ObjectGraph` module implements a graph of objects with a unique identifier
+
+Installation
+------------
+
+Download and unpack the archive then type::
+
+    python setup.py install
+
+This will install the library in the default location. For instructions on
+how to customize the install procedure read the output of::
+
+    python setup.py --help install
+
+To verify that the code works run the test suite::
+
+    python setup.py test
+
+Example usage
+-------------
+
+Lets assume that we want to analyze the graph below (links to the full picture) GRAPH_IMG.
+Our script then might look the following way::
+
+    from altgraph import Graph, GraphAlgo, Dot
+
+    # these are the edges
+    edges = [ (1,2), (2,4), (1,3), (2,4), (3,4), (4,5), (6,5),
+        (6,14), (14,15), (6, 15),  (5,7), (7, 8), (7,13), (12,8),
+        (8,13), (11,12), (11,9), (13,11), (9,13), (13,10) ]
+
+    # creates the graph
+    graph = Graph.Graph()
+    for head, tail in edges:
+        graph.add_edge(head, tail)
+
+    # do a forward bfs from 1 at most to 20
+    print(graph.forw_bfs(1))
+
+This will print the nodes in some breadth first order::
+
+    [1, 2, 3, 4, 5, 7, 8, 13, 11, 10, 12, 9]
+
+If we wanted to get the hop-distance from node 1 to node 8
+we coud write::
+
+    print(graph.get_hops(1, 8))
+
+This will print the following::
+
+    [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
+
+Node 1 is at 0 hops since it is the starting node, nodes 2,3 are 1 hop away ...
+node 8 is 5 hops away. To find the shortest distance between two nodes you
+can use::
+
+    print(GraphAlgo.shortest_path(graph, 1, 12))
+
+It will print the nodes on one (if there are more) the shortest paths::
+
+    [1, 2, 4, 5, 7, 13, 11, 12]
+
+To display the graph we can use the GraphViz backend::
+
+    dot = Dot.Dot(graph)
+
+    # display the graph on the monitor
+    dot.display()
+
+    # save it in an image file
+    dot.save_img(file_name='graph', file_type='gif')
+
+
+
+..
+  @author: U{Istvan Albert<http://www.personal.psu.edu/staff/i/u/iua1/>}
+
+  @license:  MIT License
+
+  Copyright (c) 2004 Istvan Albert unless otherwise noted.
+
+  Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+  and associated documentation files (the "Software"), to deal in the Software without restriction,
+  including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+  and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
+  so.
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+  INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+  PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+  FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+  THE SOFTWARE.
+  @requires: Python 2.3 or higher
+
+  @newfield contributor: Contributors:
+  @contributor: U{Reka Albert <http://www.phys.psu.edu/~ralbert/>}
+
+'''
+import pkg_resources
+__version__ = pkg_resources.require('altgraph')[0].version
+
+class GraphError(ValueError):
+    pass
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/__init__.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/__init__.py
new file mode 100644
index 0000000..6890389
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/__init__.py
@@ -0,0 +1 @@
+""" altgraph tests """
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_altgraph.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_altgraph.py
new file mode 100644
index 0000000..2ca6b25
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_altgraph.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env py.test
+import os
+import sys
+
+from altgraph import Graph, GraphAlgo
+import unittest
+
+class BasicTests (unittest.TestCase):
+    def setUp(self):
+        self.edges = [
+            (1,2), (2,4), (1,3), (2,4), (3,4), (4,5), (6,5), (6,14), (14,15),
+            (6, 15), (5,7), (7, 8), (7,13), (12,8), (8,13), (11,12), (11,9),
+            (13,11), (9,13), (13,10)
+        ]
+
+        # these are the edges
+        self.store = {}
+        self.g = Graph.Graph()
+        for head, tail in self.edges:
+            self.store[head] = self.store[tail] = None
+            self.g.add_edge(head, tail)
+
+    def test_num_edges(self):
+        # check the parameters
+        self.assertEqual(self.g.number_of_nodes(), len(self.store))
+        self.assertEqual(self.g.number_of_edges(), len(self.edges))
+
+    def test_forw_bfs(self):
+        # do a forward bfs
+        self.assertEqual( self.g.forw_bfs(1),
+                [1, 2, 3, 4, 5, 7, 8, 13, 11, 10, 12, 9])
+
+
+    def test_get_hops(self):
+        # diplay the hops and hop numbers between nodes
+        self.assertEqual(self.g.get_hops(1, 8),
+                [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)])
+
+    def test_shortest_path(self):
+        self.assertEqual(GraphAlgo.shortest_path(self.g, 1, 12),
+                [1, 2, 4, 5, 7, 13, 11, 12])
+
+
+if __name__ == "__main__":  # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_dot.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_dot.py
new file mode 100644
index 0000000..83993da
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_dot.py
@@ -0,0 +1,370 @@
+import unittest
+import os
+
+from altgraph import Dot
+from altgraph import Graph
+from altgraph import GraphError
+
+
+class TestDot (unittest.TestCase):
+
+    def test_constructor(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g)
+
+        self.assertEqual(dot.name, 'G')
+        self.assertEqual(dot.attr, {})
+        self.assertEqual(dot.temp_dot, 'tmp_dot.dot')
+        self.assertEqual(dot.temp_neo, 'tmp_neo.dot')
+        self.assertEqual(dot.dot, 'dot')
+        self.assertEqual(dot.dotty, 'dotty')
+        self.assertEqual(dot.neato, 'neato')
+        self.assertEqual(dot.type, 'digraph')
+
+        self.assertEqual(dot.nodes, dict([(x, {}) for x in g]))
+
+        edges = {}
+        for head in g:
+            edges[head] = {}
+            for tail in g.out_nbrs(head):
+                edges[head][tail] = {}
+
+        self.assertEqual(dot.edges[1], edges[1])
+        self.assertEqual(dot.edges, edges)
+
+
+        dot = Dot.Dot(g, nodes=[1,2],
+                edgefn=lambda node: list(sorted(g.out_nbrs(node)))[:-1],
+                nodevisitor=lambda node: {'label': node},
+                edgevisitor=lambda head, tail: {'label': (head, tail) },
+                name="testgraph",
+                dot='/usr/local/bin/dot',
+                dotty='/usr/local/bin/dotty',
+                neato='/usr/local/bin/neato',
+                graphtype="graph")
+
+        self.assertEqual(dot.name, 'testgraph')
+        self.assertEqual(dot.attr, {})
+        self.assertEqual(dot.temp_dot, 'tmp_dot.dot')
+        self.assertEqual(dot.temp_neo, 'tmp_neo.dot')
+        self.assertEqual(dot.dot, '/usr/local/bin/dot')
+        self.assertEqual(dot.dotty, '/usr/local/bin/dotty')
+        self.assertEqual(dot.neato, '/usr/local/bin/neato')
+        self.assertEqual(dot.type, 'graph')
+
+        self.assertEqual(dot.nodes, dict([(x, {'label': x}) for x in [1,2]]))
+
+        edges = {}
+        for head in [1,2]:
+            edges[head] = {}
+            for tail in list(sorted(g.out_nbrs(head)))[:-1]:
+                if tail not in [1,2]: continue
+                edges[head][tail] = {'label': (head, tail) }
+
+        self.assertEqual(dot.edges[1], edges[1])
+        self.assertEqual(dot.edges, edges)
+
+        self.assertRaises(GraphError, Dot.Dot, g, nodes=[1,2, 9])
+
+    def test_style(self):
+        g = Graph.Graph([])
+
+        dot = Dot.Dot(g)
+
+        self.assertEqual(dot.attr, {})
+
+        dot.style(key='value')
+        self.assertEqual(dot.attr, {'key': 'value'})
+
+        dot.style(key2='value2')
+        self.assertEqual(dot.attr, {'key2': 'value2'})
+
+    def test_node_style(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g)
+
+        self.assertEqual(dot.nodes[1], {})
+
+        dot.node_style(1, key='value')
+        self.assertEqual(dot.nodes[1], {'key': 'value'})
+
+        dot.node_style(1, key2='value2')
+        self.assertEqual(dot.nodes[1], {'key2': 'value2'})
+        self.assertEqual(dot.nodes[2], {})
+
+        dot.all_node_style(key3='value3')
+        for n in g:
+            self.assertEqual(dot.nodes[n], {'key3': 'value3'})
+
+        self.assertTrue(9 not in dot.nodes)
+        dot.node_style(9, key='value')
+        self.assertEqual(dot.nodes[9], {'key': 'value'})
+
+    def test_edge_style(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g)
+
+        self.assertEqual(dot.edges[1][2], {})
+        dot.edge_style(1,2, foo='bar')
+        self.assertEqual(dot.edges[1][2], {'foo': 'bar'})
+
+        dot.edge_style(1,2, foo2='2bar')
+        self.assertEqual(dot.edges[1][2], {'foo2': '2bar'})
+
+        self.assertEqual(dot.edges[1][3], {})
+
+        self.assertFalse(6 in dot.edges[1])
+        dot.edge_style(1,6, foo2='2bar')
+        self.assertEqual(dot.edges[1][6], {'foo2': '2bar'})
+
+        self.assertRaises(GraphError, dot.edge_style, 1, 9, a=1)
+        self.assertRaises(GraphError, dot.edge_style, 9, 1, a=1)
+
+
+    def test_iter(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g)
+        dot.style(graph="foobar")
+        dot.node_style(1, key='value')
+        dot.node_style(2, key='another', key2='world')
+        dot.edge_style(1,4, key1='value1', key2='value2')
+        dot.edge_style(2,4, key1='valueA')
+
+        self.assertEqual(list(iter(dot)), list(dot.iterdot()))
+
+        for item in dot.iterdot():
+            self.assertTrue(isinstance(item, str))
+
+        first = list(dot.iterdot())[0]
+        self.assertEqual(first, "digraph %s {\n"%(dot.name,))
+
+        dot.type = 'graph'
+        first = list(dot.iterdot())[0]
+        self.assertEqual(first, "graph %s {\n"%(dot.name,))
+
+        dot.type = 'foo'
+        self.assertRaises(GraphError, list, dot.iterdot())
+        dot.type = 'digraph'
+
+        self.assertEqual(list(dot), [
+            'digraph G {\n',
+              'graph="foobar";',
+              '\n',
+
+            '\t"1" [',
+              'key="value",',
+            '];\n',
+
+            '\t"2" [',
+              'key="another",',
+              'key2="world",',
+            '];\n',
+
+            '\t"3" [',
+            '];\n',
+
+            '\t"4" [',
+            '];\n',
+
+            '\t"6" [',
+            '];\n',
+
+            '\t"7" [',
+            '];\n',
+
+            '\t"1" -> "2" [',
+            '];\n',
+
+            '\t"1" -> "3" [',
+            '];\n',
+
+            '\t"1" -> "4" [',
+              'key1="value1",',
+              'key2="value2",',
+            '];\n',
+
+             '\t"2" -> "4" [',
+               'key1="valueA",',
+             '];\n',
+
+             '\t"2" -> "6" [',
+             '];\n',
+
+             '\t"2" -> "7" [',
+             '];\n',
+
+             '\t"6" -> "1" [',
+             '];\n',
+
+             '\t"7" -> "4" [',
+             '];\n',
+           '}\n'])
+
+
+    def test_save(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g)
+        dot.style(graph="foobar")
+        dot.node_style(1, key='value')
+        dot.node_style(2, key='another', key2='world')
+        dot.edge_style(1,4, key1='value1', key2='value2')
+        dot.edge_style(2,4, key1='valueA')
+
+        fn = 'test_dot.dot'
+        self.assertTrue(not os.path.exists(fn))
+
+        try:
+            dot.save_dot(fn)
+
+            fp = open(fn, 'r')
+            data = fp.read()
+            fp.close()
+            self.assertEqual(data, ''.join(dot))
+
+        finally:
+            if os.path.exists(fn):
+                os.unlink(fn)
+
+
+    def test_img(self):
+        g = Graph.Graph([
+                (1,2),
+                (1,3),
+                (1,4),
+                (2,4),
+                (2,6),
+                (2,7),
+                (7,4),
+                (6,1),
+            ]
+        )
+
+        dot = Dot.Dot(g, dot='/usr/local/bin/!!dot', dotty='/usr/local/bin/!!dotty', neato='/usr/local/bin/!!neato')
+        dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
+        dot.node_style(1, label='BASE_NODE',shape='box', color='blue')
+        dot.node_style(2, style='filled', fillcolor='red')
+        dot.edge_style(1,4, style='dotted')
+        dot.edge_style(2,4, arrowhead='dot', label='binds', labelangle='90')
+
+        system_cmds = []
+        def fake_system(cmd):
+            system_cmds.append(cmd)
+            return None
+
+        try:
+            real_system = os.system
+            os.system = fake_system
+
+            system_cmds = []
+            dot.save_img('foo')
+            self.assertEqual(system_cmds, ['/usr/local/bin/!!dot -Tgif tmp_dot.dot -o foo.gif'])
+
+            system_cmds = []
+            dot.save_img('foo', file_type='jpg')
+            self.assertEqual(system_cmds, ['/usr/local/bin/!!dot -Tjpg tmp_dot.dot -o foo.jpg'])
+
+            system_cmds = []
+            dot.save_img('bar', file_type='jpg', mode='neato')
+            self.assertEqual(system_cmds, [
+                '/usr/local/bin/!!neato -o tmp_dot.dot tmp_neo.dot',
+                '/usr/local/bin/!!dot -Tjpg tmp_dot.dot -o bar.jpg',
+            ])
+
+            system_cmds = []
+            dot.display()
+            self.assertEqual(system_cmds, [
+                '/usr/local/bin/!!dotty tmp_dot.dot'
+            ])
+
+            system_cmds = []
+            dot.display(mode='neato')
+            self.assertEqual(system_cmds, [
+                '/usr/local/bin/!!neato -o tmp_dot.dot tmp_neo.dot',
+                '/usr/local/bin/!!dotty tmp_dot.dot'
+            ])
+
+        finally:
+            if os.path.exists(dot.temp_dot):
+                os.unlink(dot.temp_dot)
+            if os.path.exists(dot.temp_neo):
+                os.unlink(dot.temp_neo)
+            os.system = real_system
+
+        if os.path.exists('/usr/local/bin/dot') and os.path.exists('/usr/local/bin/neato'):
+            try:
+                dot.dot='/usr/local/bin/dot'
+                dot.neato='/usr/local/bin/neato'
+                self.assertFalse(os.path.exists('foo.gif'))
+                dot.save_img('foo')
+                self.assertTrue(os.path.exists('foo.gif'))
+                os.unlink('foo.gif')
+
+                self.assertFalse(os.path.exists('foo.gif'))
+                dot.save_img('foo', mode='neato')
+                self.assertTrue(os.path.exists('foo.gif'))
+                os.unlink('foo.gif')
+
+            finally:
+                if os.path.exists(dot.temp_dot):
+                    os.unlink(dot.temp_dot)
+                if os.path.exists(dot.temp_neo):
+                    os.unlink(dot.temp_neo)
+
+
+if __name__ == "__main__": # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graph.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graph.py
new file mode 100644
index 0000000..553549f
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graph.py
@@ -0,0 +1,644 @@
+import unittest
+
+from altgraph import GraphError
+from altgraph.Graph import Graph
+
+class TestGraph (unittest.TestCase):
+
+    def test_nodes(self):
+        graph = Graph()
+
+        self.assertEqual(graph.node_list(), [])
+
+        o1 = object()
+        o1b = object()
+        o2 = object()
+        graph.add_node(1, o1)
+        graph.add_node(1, o1b)
+        graph.add_node(2, o2)
+        graph.add_node(3)
+
+        self.assertRaises(TypeError, graph.add_node, [])
+
+        self.assertTrue(graph.node_data(1) is o1)
+        self.assertTrue(graph.node_data(2) is o2)
+        self.assertTrue(graph.node_data(3) is None)
+
+        self.assertTrue(1 in graph)
+        self.assertTrue(2 in graph)
+        self.assertTrue(3 in graph)
+
+        self.assertEqual(graph.number_of_nodes(), 3)
+        self.assertEqual(graph.number_of_hidden_nodes(), 0)
+        self.assertEqual(graph.hidden_node_list(), [])
+        self.assertEqual(list(sorted(graph)), [1, 2, 3])
+
+        graph.hide_node(1)
+        graph.hide_node(2)
+        graph.hide_node(3)
+
+
+        self.assertEqual(graph.number_of_nodes(), 0)
+        self.assertEqual(graph.number_of_hidden_nodes(), 3)
+        self.assertEqual(list(sorted(graph.hidden_node_list())), [1, 2, 3])
+
+        self.assertFalse(1 in graph)
+        self.assertFalse(2 in graph)
+        self.assertFalse(3 in graph)
+
+        graph.add_node(1)
+        self.assertFalse(1 in graph)
+
+        graph.restore_node(1)
+        self.assertTrue(1 in graph)
+        self.assertFalse(2 in graph)
+        self.assertFalse(3 in graph)
+
+        graph.restore_all_nodes()
+        self.assertTrue(1 in graph)
+        self.assertTrue(2 in graph)
+        self.assertTrue(3 in graph)
+
+        self.assertEqual(list(sorted(graph.node_list())), [1, 2, 3])
+
+        v = graph.describe_node(1)
+        self.assertEqual(v, (1, o1, [], []))
+
+    def test_edges(self):
+        graph = Graph()
+        graph.add_node(1)
+        graph.add_node(2)
+        graph.add_node(3)
+        graph.add_node(4)
+        graph.add_node(5)
+
+        self.assertTrue(isinstance(graph.edge_list(), list))
+
+        graph.add_edge(1, 2)
+        graph.add_edge(4, 5, 'a')
+
+        self.assertRaises(GraphError, graph.add_edge, 'a', 'b', create_nodes=False)
+
+        self.assertEqual(graph.number_of_hidden_edges(), 0)
+        self.assertEqual(graph.number_of_edges(), 2)
+        e = graph.edge_by_node(1, 2)
+        self.assertTrue(isinstance(e, int))
+        graph.hide_edge(e)
+        self.assertEqual(graph.number_of_hidden_edges(), 1)
+        self.assertEqual(graph.number_of_edges(), 1)
+        e2 = graph.edge_by_node(1, 2)
+        self.assertTrue(e2 is None)
+
+        graph.restore_edge(e)
+        e2 = graph.edge_by_node(1, 2)
+        self.assertEqual(e, e2)
+        self.assertEqual(graph.number_of_hidden_edges(), 0)
+
+        self.assertEqual(graph.number_of_edges(), 2)
+
+        e1 = graph.edge_by_node(1, 2)
+        e2 = graph.edge_by_node(4, 5)
+        graph.hide_edge(e1)
+        graph.hide_edge(e2)
+
+        self.assertEqual(graph.number_of_edges(), 0)
+        graph.restore_all_edges()
+        self.assertEqual(graph.number_of_edges(), 2)
+
+        self.assertEqual(graph.edge_by_id(e1), (1,2))
+        self.assertRaises(GraphError, graph.edge_by_id, (e1+1)*(e2+1)+1)
+
+        self.assertEqual(list(sorted(graph.edge_list())), [e1, e2])
+
+        self.assertEqual(graph.describe_edge(e1), (e1, 1, 1, 2))
+        self.assertEqual(graph.describe_edge(e2), (e2, 'a', 4, 5))
+
+        self.assertEqual(graph.edge_data(e1), 1)
+        self.assertEqual(graph.edge_data(e2), 'a')
+
+        self.assertEqual(graph.head(e2), 4)
+        self.assertEqual(graph.tail(e2), 5)
+
+        graph.add_edge(1, 3)
+        graph.add_edge(1, 5)
+        graph.add_edge(4, 1)
+
+        self.assertEqual(list(sorted(graph.out_nbrs(1))), [2, 3, 5])
+        self.assertEqual(list(sorted(graph.inc_nbrs(1))), [4])
+        self.assertEqual(list(sorted(graph.inc_nbrs(5))), [1, 4])
+        self.assertEqual(list(sorted(graph.all_nbrs(1))), [2, 3, 4, 5])
+
+        graph.add_edge(5, 1)
+        self.assertEqual(list(sorted(graph.all_nbrs(5))), [1, 4])
+
+        self.assertEqual(graph.out_degree(1), 3)
+        self.assertEqual(graph.inc_degree(2), 1)
+        self.assertEqual(graph.inc_degree(5), 2)
+        self.assertEqual(graph.all_degree(5), 3)
+
+        v = graph.out_edges(4)
+        self.assertTrue(isinstance(v, list))
+        self.assertEqual(graph.edge_by_id(v[0]), (4, 5))
+
+        v = graph.out_edges(1)
+        for e in v:
+            self.assertEqual(graph.edge_by_id(e)[0], 1)
+
+        v = graph.inc_edges(1)
+        self.assertTrue(isinstance(v, list))
+        self.assertEqual(graph.edge_by_id(v[0]), (4, 1))
+
+        v = graph.inc_edges(5)
+        for e in v:
+            self.assertEqual(graph.edge_by_id(e)[1], 5)
+
+        v = graph.all_edges(5)
+        for e in v:
+            self.assertTrue(graph.edge_by_id(e)[1] == 5 or graph.edge_by_id(e)[0] == 5)
+
+        e1 = graph.edge_by_node(1, 2)
+        self.assertTrue(isinstance(e1, int))
+        graph.hide_node(1)
+        self.assertRaises(GraphError, graph.edge_by_node, 1, 2)
+        graph.restore_node(1)
+        e2 = graph.edge_by_node(1, 2)
+        self.assertEqual(e1, e2)
+
+
+
+    def test_toposort(self):
+        graph = Graph()
+        graph.add_node(1)
+        graph.add_node(2)
+        graph.add_node(3)
+        graph.add_node(4)
+        graph.add_node(5)
+
+        graph.add_edge(1, 2)
+        graph.add_edge(1, 3)
+        graph.add_edge(2, 4)
+        graph.add_edge(3, 5)
+
+        ok, result = graph.forw_topo_sort()
+        self.assertTrue(ok)
+        for idx in range(1, 6):
+            self.assertTrue(idx in result)
+
+        self.assertTrue(result.index(1) < result.index(2))
+        self.assertTrue(result.index(1) < result.index(3))
+        self.assertTrue(result.index(2) < result.index(4))
+        self.assertTrue(result.index(3) < result.index(5))
+
+        ok, result = graph.back_topo_sort()
+        self.assertTrue(ok)
+        for idx in range(1, 6):
+            self.assertTrue(idx in result)
+        self.assertTrue(result.index(2) < result.index(1))
+        self.assertTrue(result.index(3) < result.index(1))
+        self.assertTrue(result.index(4) < result.index(2))
+        self.assertTrue(result.index(5) < result.index(3))
+
+
+        # Same graph as before, but with edges
+        # reversed, which means we should get
+        # the same results as before if using
+        # back_topo_sort rather than forw_topo_sort
+        # (and v.v.)
+
+        graph = Graph()
+        graph.add_node(1)
+        graph.add_node(2)
+        graph.add_node(3)
+        graph.add_node(4)
+        graph.add_node(5)
+
+        graph.add_edge(2, 1)
+        graph.add_edge(3, 1)
+        graph.add_edge(4, 2)
+        graph.add_edge(5, 3)
+
+        ok, result = graph.back_topo_sort()
+        self.assertTrue(ok)
+        for idx in range(1, 6):
+            self.assertTrue(idx in result)
+
+        self.assertTrue(result.index(1) < result.index(2))
+        self.assertTrue(result.index(1) < result.index(3))
+        self.assertTrue(result.index(2) < result.index(4))
+        self.assertTrue(result.index(3) < result.index(5))
+
+        ok, result = graph.forw_topo_sort()
+        self.assertTrue(ok)
+        for idx in range(1, 6):
+            self.assertTrue(idx in result)
+        self.assertTrue(result.index(2) < result.index(1))
+        self.assertTrue(result.index(3) < result.index(1))
+        self.assertTrue(result.index(4) < result.index(2))
+        self.assertTrue(result.index(5) < result.index(3))
+
+
+        # Create a cycle
+        graph.add_edge(1, 5)
+        ok, result = graph.forw_topo_sort()
+        self.assertFalse(ok)
+        ok, result = graph.back_topo_sort()
+        self.assertFalse(ok)
+
+    def test_bfs_subgraph(self):
+        graph = Graph()
+        graph.add_edge(1, 2)
+        graph.add_edge(1, 4)
+        graph.add_edge(2, 4)
+        graph.add_edge(4, 8)
+        graph.add_edge(4, 9)
+        graph.add_edge(4, 10)
+        graph.add_edge(8, 10)
+
+        subgraph = graph.forw_bfs_subgraph(10)
+        self.assertTrue(isinstance(subgraph, Graph))
+        self.assertEqual(subgraph.number_of_nodes(), 1)
+        self.assertTrue(10 in subgraph)
+        self.assertEqual(subgraph.number_of_edges(), 0)
+
+        subgraph = graph.forw_bfs_subgraph(4)
+        self.assertTrue(isinstance(subgraph, Graph))
+        self.assertEqual(subgraph.number_of_nodes(), 4)
+        self.assertTrue(4 in subgraph)
+        self.assertTrue(8 in subgraph)
+        self.assertTrue(9 in subgraph)
+        self.assertTrue(10 in subgraph)
+        self.assertEqual(subgraph.number_of_edges(), 4)
+        e = subgraph.edge_by_node(4, 8)
+        e = subgraph.edge_by_node(4, 9)
+        e = subgraph.edge_by_node(4, 10)
+        e = subgraph.edge_by_node(8, 10)
+
+        # same graph as before, but switch around
+        # edges. This results in the same test results
+        # but now for back_bfs_subgraph rather than
+        # forw_bfs_subgraph
+
+        graph = Graph()
+        graph.add_edge(2, 1)
+        graph.add_edge(4, 1)
+        graph.add_edge(4, 2)
+        graph.add_edge(8, 4)
+        graph.add_edge(9, 4)
+        graph.add_edge(10, 4)
+        graph.add_edge(10, 8)
+
+        subgraph = graph.back_bfs_subgraph(10)
+        self.assertTrue(isinstance(subgraph, Graph))
+        self.assertEqual(subgraph.number_of_nodes(), 1)
+        self.assertTrue(10 in subgraph)
+        self.assertEqual(subgraph.number_of_edges(), 0)
+
+        subgraph = graph.back_bfs_subgraph(4)
+        self.assertTrue(isinstance(subgraph, Graph))
+        self.assertEqual(subgraph.number_of_nodes(), 4)
+        self.assertTrue(4 in subgraph)
+        self.assertTrue(8 in subgraph)
+        self.assertTrue(9 in subgraph)
+        self.assertTrue(10 in subgraph)
+        self.assertEqual(subgraph.number_of_edges(), 4)
+        e = subgraph.edge_by_node(4, 8)
+        e = subgraph.edge_by_node(4, 9)
+        e = subgraph.edge_by_node(4, 10)
+        e = subgraph.edge_by_node(8, 10)
+
+    def test_iterdfs(self):
+        graph = Graph()
+        graph.add_edge("1", "1.1")
+        graph.add_edge("1", "1.2")
+        graph.add_edge("1", "1.3")
+        graph.add_edge("1.1", "1.1.1")
+        graph.add_edge("1.1", "1.1.2")
+        graph.add_edge("1.2", "1.2.1")
+        graph.add_edge("1.2", "1.2.2")
+        graph.add_edge("1.2.2", "1.2.2.1")
+        graph.add_edge("1.2.2", "1.2.2.2")
+        graph.add_edge("1.2.2", "1.2.2.3")
+
+        result = list(graph.iterdfs("1"))
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+        result = list(graph.iterdfs("1", "1.2.1"))
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1'
+        ])
+
+        result = graph.forw_dfs("1")
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+        result = graph.forw_dfs("1", "1.2.1")
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1'
+        ])
+
+        graph = Graph()
+        graph.add_edge("1.1", "1")
+        graph.add_edge("1.2", "1")
+        graph.add_edge("1.3", "1")
+        graph.add_edge("1.1.1", "1.1")
+        graph.add_edge("1.1.2", "1.1")
+        graph.add_edge("1.2.1", "1.2")
+        graph.add_edge("1.2.2", "1.2")
+        graph.add_edge("1.2.2.1", "1.2.2")
+        graph.add_edge("1.2.2.2", "1.2.2")
+        graph.add_edge("1.2.2.3", "1.2.2")
+
+        result = list(graph.iterdfs("1", forward=False))
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+        result = list(graph.iterdfs("1", "1.2.1", forward=False))
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1'
+        ])
+        result = graph.back_dfs("1")
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+        result = graph.back_dfs("1", "1.2.1")
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1'
+        ])
+
+
+        # Introduce cyle:
+        graph.add_edge("1", "1.2")
+        result = list(graph.iterdfs("1", forward=False))
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+
+        result = graph.back_dfs("1")
+        self.assertEqual(result, [
+            '1', '1.3', '1.2', '1.2.2', '1.2.2.3', '1.2.2.2',
+            '1.2.2.1', '1.2.1', '1.1', '1.1.2', '1.1.1'
+        ])
+
+
+    def test_iterdata(self):
+        graph = Graph()
+        graph.add_node("1", "I")
+        graph.add_node("1.1", "I.I")
+        graph.add_node("1.2", "I.II")
+        graph.add_node("1.3", "I.III")
+        graph.add_node("1.1.1", "I.I.I")
+        graph.add_node("1.1.2", "I.I.II")
+        graph.add_node("1.2.1", "I.II.I")
+        graph.add_node("1.2.2", "I.II.II")
+        graph.add_node("1.2.2.1", "I.II.II.I")
+        graph.add_node("1.2.2.2", "I.II.II.II")
+        graph.add_node("1.2.2.3", "I.II.II.III")
+
+        graph.add_edge("1", "1.1")
+        graph.add_edge("1", "1.2")
+        graph.add_edge("1", "1.3")
+        graph.add_edge("1.1", "1.1.1")
+        graph.add_edge("1.1", "1.1.2")
+        graph.add_edge("1.2", "1.2.1")
+        graph.add_edge("1.2", "1.2.2")
+        graph.add_edge("1.2.2", "1.2.2.1")
+        graph.add_edge("1.2.2", "1.2.2.2")
+        graph.add_edge("1.2.2", "1.2.2.3")
+
+        result = list(graph.iterdata("1", forward=True))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II', 'I.II.II', 'I.II.II.III', 'I.II.II.II',
+            'I.II.II.I', 'I.II.I', 'I.I', 'I.I.II', 'I.I.I'
+        ])
+
+        result = list(graph.iterdata("1", end="1.2.1", forward=True))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II', 'I.II.II', 'I.II.II.III', 'I.II.II.II',
+            'I.II.II.I', 'I.II.I'
+        ])
+
+        result = list(graph.iterdata("1", condition=lambda n: len(n) < 6, forward=True))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II',
+            'I.I', 'I.I.I'
+        ])
+
+
+        # And the revese option:
+        graph = Graph()
+        graph.add_node("1", "I")
+        graph.add_node("1.1", "I.I")
+        graph.add_node("1.2", "I.II")
+        graph.add_node("1.3", "I.III")
+        graph.add_node("1.1.1", "I.I.I")
+        graph.add_node("1.1.2", "I.I.II")
+        graph.add_node("1.2.1", "I.II.I")
+        graph.add_node("1.2.2", "I.II.II")
+        graph.add_node("1.2.2.1", "I.II.II.I")
+        graph.add_node("1.2.2.2", "I.II.II.II")
+        graph.add_node("1.2.2.3", "I.II.II.III")
+
+        graph.add_edge("1.1", "1")
+        graph.add_edge("1.2", "1")
+        graph.add_edge("1.3", "1")
+        graph.add_edge("1.1.1", "1.1")
+        graph.add_edge("1.1.2", "1.1")
+        graph.add_edge("1.2.1", "1.2")
+        graph.add_edge("1.2.2", "1.2")
+        graph.add_edge("1.2.2.1", "1.2.2")
+        graph.add_edge("1.2.2.2", "1.2.2")
+        graph.add_edge("1.2.2.3", "1.2.2")
+
+        result = list(graph.iterdata("1", forward=False))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II', 'I.II.II', 'I.II.II.III', 'I.II.II.II',
+            'I.II.II.I', 'I.II.I', 'I.I', 'I.I.II', 'I.I.I'
+        ])
+
+        result = list(graph.iterdata("1", end="1.2.1", forward=False))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II', 'I.II.II', 'I.II.II.III', 'I.II.II.II',
+            'I.II.II.I', 'I.II.I'
+        ])
+
+        result = list(graph.iterdata("1", condition=lambda n: len(n) < 6, forward=False))
+        self.assertEqual(result, [
+            'I', 'I.III', 'I.II',
+            'I.I', 'I.I.I'
+        ])
+
+    def test_bfs(self):
+        graph = Graph()
+        graph.add_edge("1", "1.1")
+        graph.add_edge("1.1", "1.1.1")
+        graph.add_edge("1.1", "1.1.2")
+        graph.add_edge("1.1.2", "1.1.2.1")
+        graph.add_edge("1.1.2", "1.1.2.2")
+        graph.add_edge("1", "1.2")
+        graph.add_edge("1", "1.3")
+        graph.add_edge("1.2", "1.2.1")
+
+        self.assertEqual(graph.forw_bfs("1"),
+                ['1', '1.1', '1.2', '1.3', '1.1.1', '1.1.2', '1.2.1', '1.1.2.1', '1.1.2.2'])
+        self.assertEqual(graph.forw_bfs("1", "1.1.1"),
+                ['1', '1.1', '1.2', '1.3', '1.1.1'])
+
+
+        # And the "reverse" graph
+        graph = Graph()
+        graph.add_edge("1.1", "1")
+        graph.add_edge("1.1.1", "1.1")
+        graph.add_edge("1.1.2", "1.1")
+        graph.add_edge("1.1.2.1", "1.1.2")
+        graph.add_edge("1.1.2.2", "1.1.2")
+        graph.add_edge("1.2", "1")
+        graph.add_edge("1.3", "1")
+        graph.add_edge("1.2.1", "1.2")
+
+        self.assertEqual(graph.back_bfs("1"),
+                ['1', '1.1', '1.2', '1.3', '1.1.1', '1.1.2', '1.2.1', '1.1.2.1', '1.1.2.2'])
+        self.assertEqual(graph.back_bfs("1", "1.1.1"),
+                ['1', '1.1', '1.2', '1.3', '1.1.1'])
+
+
+
+        # check cycle handling
+        graph.add_edge("1", "1.2.1")
+        self.assertEqual(graph.back_bfs("1"),
+                ['1', '1.1', '1.2', '1.3', '1.1.1', '1.1.2', '1.2.1', '1.1.2.1', '1.1.2.2'])
+
+
+    def test_connected(self):
+        graph = Graph()
+        graph.add_node(1)
+        graph.add_node(2)
+        graph.add_node(3)
+        graph.add_node(4)
+
+        self.assertFalse(graph.connected())
+
+        graph.add_edge(1, 2)
+        graph.add_edge(3, 4)
+        self.assertFalse(graph.connected())
+
+        graph.add_edge(2, 3)
+        graph.add_edge(4, 1)
+        self.assertTrue(graph.connected())
+
+    def test_edges_complex(self):
+        g = Graph()
+        g.add_edge(1, 2)
+        e = g.edge_by_node(1,2)
+        g.hide_edge(e)
+        g.hide_node(2)
+        self.assertRaises(GraphError, g.restore_edge, e)
+
+        g.restore_all_edges()
+        self.assertRaises(GraphError, g.edge_by_id, e)
+
+    def test_clust_coef(self):
+        g = Graph()
+        g.add_edge(1, 2)
+        g.add_edge(1, 3)
+        g.add_edge(1, 4)
+        self.assertEqual(g.clust_coef(1), 0)
+
+        g.add_edge(2, 5)
+        g.add_edge(3, 5)
+        g.add_edge(4, 5)
+        self.assertEqual(g.clust_coef(1), 0)
+
+        g.add_edge(2, 3)
+        self.assertEqual(g.clust_coef(1), 1./6)
+        g.add_edge(2, 4)
+        self.assertEqual(g.clust_coef(1), 2./6)
+        g.add_edge(4, 2)
+        self.assertEqual(g.clust_coef(1), 3./6)
+
+        g.add_edge(2, 3)
+        g.add_edge(2, 4)
+        g.add_edge(3, 4)
+        g.add_edge(3, 2)
+        g.add_edge(4, 2)
+        g.add_edge(4, 3)
+        self.assertEqual(g.clust_coef(1), 1)
+
+
+    def test_get_hops(self):
+        graph = Graph()
+        graph.add_edge(1, 2)
+        graph.add_edge(1, 3)
+        graph.add_edge(2, 4)
+        graph.add_edge(4, 5)
+        graph.add_edge(5, 7)
+        graph.add_edge(7, 8)
+
+        self.assertEqual(graph.get_hops(1),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)])
+
+        self.assertEqual(graph.get_hops(1, 5),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3)])
+
+        graph.add_edge(5, 1)
+        graph.add_edge(7, 1)
+        graph.add_edge(7, 4)
+
+        self.assertEqual(graph.get_hops(1),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)])
+
+        # And the reverse graph
+        graph = Graph()
+        graph.add_edge(2, 1)
+        graph.add_edge(3, 1)
+        graph.add_edge(4, 2)
+        graph.add_edge(5, 4)
+        graph.add_edge(7, 5)
+        graph.add_edge(8, 7)
+
+        self.assertEqual(graph.get_hops(1, forward=False),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)])
+
+        self.assertEqual(graph.get_hops(1, 5, forward=False),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3)])
+
+        graph.add_edge(1, 5)
+        graph.add_edge(1, 7)
+        graph.add_edge(4, 7)
+
+        self.assertEqual(graph.get_hops(1, forward=False),
+            [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)])
+
+
+    def test_constructor(self):
+        graph = Graph(iter([
+                (1, 2),
+                (2, 3, 'a'),
+                (1, 3),
+                (3, 4),
+            ]))
+        self.assertEqual(graph.number_of_nodes(), 4)
+        self.assertEqual(graph.number_of_edges(), 4)
+        try:
+            graph.edge_by_node(1,2)
+            graph.edge_by_node(2,3)
+            graph.edge_by_node(1,3)
+            graph.edge_by_node(3,4)
+        except GraphError:
+            self.fail("Incorrect graph")
+
+        self.assertEqual(graph.edge_data(graph.edge_by_node(2, 3)), 'a')
+
+        self.assertRaises(GraphError, Graph, [(1,2,3,4)])
+
+if __name__ == "__main__": # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphstat.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphstat.py
new file mode 100644
index 0000000..b628b6f
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphstat.py
@@ -0,0 +1,70 @@
+import unittest
+
+from altgraph import GraphStat
+from altgraph import Graph
+import sys
+
+class TestDegreesDist (unittest.TestCase):
+
+    def test_simple(self):
+        a = Graph.Graph()
+        self.assertEqual(GraphStat.degree_dist(a), [])
+
+        a.add_node(1)
+        a.add_node(2)
+        a.add_node(3)
+
+        self.assertEqual(GraphStat.degree_dist(a), GraphStat._binning([0, 0, 0]))
+
+        for x in range(100):
+            a.add_node(x)
+
+        for x in range(1, 100):
+            for y in range(1, 50):
+                if x % y == 0:
+                    a.add_edge(x, y)
+
+        counts_inc = []
+        counts_out = []
+        for n in a:
+            counts_inc.append(a.inc_degree(n))
+            counts_out.append(a.out_degree(n))
+
+        self.assertEqual(GraphStat.degree_dist(a), GraphStat._binning(counts_out))
+        self.assertEqual(GraphStat.degree_dist(a, mode='inc'), GraphStat._binning(counts_inc))
+
+class TestBinning (unittest.TestCase):
+    def test_simple(self):
+
+        # Binning [0, 100) into 10 bins
+        a = list(range(100))
+        out = GraphStat._binning(a, limits=(0, 100), bin_num=10)
+
+        self.assertEqual(out,
+                [ (x*1.0, 10) for x in range(5, 100, 10) ])
+
+
+        # Check that outliers are ignored.
+        a = list(range(100))
+        out = GraphStat._binning(a, limits=(0, 90), bin_num=9)
+
+        self.assertEqual(out,
+                [ (x*1.0, 10) for x in range(5, 90, 10) ])
+
+
+        out = GraphStat._binning(a, limits=(0, 100), bin_num=15)
+        binSize = 100 / 15.0
+        result = [0]*15
+        for i in range(100):
+            bin = int(i/binSize)
+            try:
+                result[bin] += 1
+            except IndexError:
+                pass
+
+        result = [ (i * binSize + binSize/2, result[i]) for i in range(len(result))]
+
+        self.assertEqual(result, out)
+
+if __name__ == "__main__": # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphutil.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphutil.py
new file mode 100644
index 0000000..c116623
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_graphutil.py
@@ -0,0 +1,140 @@
+import unittest
+from altgraph import GraphUtil
+from altgraph import Graph, GraphError
+
+class TestGraphUtil (unittest.TestCase):
+
+    def test_generate_random(self):
+        g =  GraphUtil.generate_random_graph(10, 50)
+        self.assertEqual(g.number_of_nodes(), 10)
+        self.assertEqual(g.number_of_edges(), 50)
+
+        seen = set()
+
+        for e in g.edge_list():
+            h, t = g.edge_by_id(e)
+            self.assertFalse(h == t)
+            self.assertTrue((h, t) not in seen)
+            seen.add((h, t))
+
+        g =  GraphUtil.generate_random_graph(5, 30, multi_edges=True)
+        self.assertEqual(g.number_of_nodes(), 5)
+        self.assertEqual(g.number_of_edges(), 30)
+
+        seen = set()
+
+        for e in g.edge_list():
+            h, t = g.edge_by_id(e)
+            self.assertFalse(h == t)
+            if (h, t) in seen:
+                break
+            seen.add((h, t))
+
+        else:
+            self.fail("no duplicates?")
+
+        g =  GraphUtil.generate_random_graph(5, 21, self_loops=True)
+        self.assertEqual(g.number_of_nodes(), 5)
+        self.assertEqual(g.number_of_edges(), 21)
+
+        seen = set()
+
+        for e in g.edge_list():
+            h, t = g.edge_by_id(e)
+            self.assertFalse((h, t) in seen)
+            if h == t:
+                break
+            seen.add((h, t))
+
+        else:
+            self.fail("no self loops?")
+
+        self.assertRaises(GraphError, GraphUtil.generate_random_graph, 5, 21)
+        g = GraphUtil.generate_random_graph(5, 21, True)
+        self.assertRaises(GraphError, GraphUtil.generate_random_graph, 5, 26, True)
+
+    def test_generate_scale_free(self):
+        graph = GraphUtil.generate_scale_free_graph(50, 10)
+        self.assertEqual(graph.number_of_nodes(), 500)
+
+        counts = {}
+        for node in graph:
+            degree = graph.inc_degree(node)
+            try:
+                counts[degree] += 1
+            except KeyError:
+                counts[degree] = 1
+
+        total_counts = sum(counts.values())
+        P = {}
+        for degree, count in counts.items():
+            P[degree] = count * 1.0 / total_counts
+
+        # XXX: use algoritm <http://stackoverflow.com/questions/3433486/how-to-do-exponential-and-logarithmic-curve-fitting-in-python-i-found-only-polyn>
+        # to check if P[degree] ~ degree ** G (for some G)
+
+        #print sorted(P.items())
+
+        #print sorted([(count, degree) for degree, count in counts.items()])
+
+        #self.fail("missing tests for GraphUtil.generate_scale_free_graph")
+
+    def test_filter_stack(self):
+        g = Graph.Graph()
+        g.add_node("1", "N.1")
+        g.add_node("1.1", "N.1.1")
+        g.add_node("1.1.1", "N.1.1.1")
+        g.add_node("1.1.2", "N.1.1.2")
+        g.add_node("1.1.3", "N.1.1.3")
+        g.add_node("1.1.1.1", "N.1.1.1.1")
+        g.add_node("1.1.1.2", "N.1.1.1.2")
+        g.add_node("1.1.2.1", "N.1.1.2.1")
+        g.add_node("1.1.2.2", "N.1.1.2.2")
+        g.add_node("1.1.2.3", "N.1.1.2.3")
+        g.add_node("2", "N.2")
+
+        g.add_edge("1", "1.1")
+        g.add_edge("1.1", "1.1.1")
+        g.add_edge("1.1", "1.1.2")
+        g.add_edge("1.1", "1.1.3")
+        g.add_edge("1.1.1", "1.1.1.1")
+        g.add_edge("1.1.1", "1.1.1.2")
+        g.add_edge("1.1.2", "1.1.2.1")
+        g.add_edge("1.1.2", "1.1.2.2")
+        g.add_edge("1.1.2", "1.1.2.3")
+
+        v, r, o =  GraphUtil.filter_stack(g, "1", [
+            lambda n: n != "N.1.1.1", lambda n: n != "N.1.1.2.3" ])
+
+        self.assertEqual(v,
+            set(["1", "1.1", "1.1.1", "1.1.2", "1.1.3",
+                "1.1.1.1", "1.1.1.2", "1.1.2.1", "1.1.2.2",
+                "1.1.2.3"]))
+        self.assertEqual(r, set([
+                "1.1.1", "1.1.2.3"]))
+
+        o.sort()
+        self.assertEqual(o,
+            [
+                ("1.1", "1.1.1.1"),
+                ("1.1", "1.1.1.2")
+            ])
+
+        v, r, o =  GraphUtil.filter_stack(g, "1", [
+            lambda n: n != "N.1.1.1", lambda n: n != "N.1.1.1.2" ])
+
+        self.assertEqual(v,
+            set(["1", "1.1", "1.1.1", "1.1.2", "1.1.3",
+                "1.1.1.1", "1.1.1.2", "1.1.2.1", "1.1.2.2",
+                "1.1.2.3"]))
+        self.assertEqual(r, set([
+                "1.1.1", "1.1.1.2"]))
+
+        self.assertEqual(o,
+            [
+                ("1.1", "1.1.1.1"),
+            ])
+
+
+if __name__ == "__main__": # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/altgraph_tests/test_object_graph.py b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_object_graph.py
new file mode 100644
index 0000000..9035607
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/altgraph_tests/test_object_graph.py
@@ -0,0 +1,349 @@
+import unittest
+import sys
+from altgraph.ObjectGraph import ObjectGraph
+from altgraph.Graph import Graph
+
+try:
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+
+class Node (object):
+    def __init__(self, graphident):
+        self.graphident = graphident
+
+class SubNode (Node):
+    pass
+
+class ArgNode (object):
+    def __init__(self, graphident, *args, **kwds):
+        self.graphident = graphident
+        self.args = args
+        self.kwds = kwds
+
+    def __repr__(self):
+        return '<ArgNode %s>'%(self.graphident,)
+
+class TestObjectGraph (unittest.TestCase):
+
+    def test_constructor(self):
+        graph = ObjectGraph()
+        self.assertTrue(isinstance(graph, ObjectGraph))
+
+        g = Graph()
+        graph = ObjectGraph(g)
+        self.assertTrue(graph.graph is g)
+        self.assertEqual(graph.debug, 0)
+        self.assertEqual(graph.indent, 0)
+
+        graph = ObjectGraph(debug=5)
+        self.assertEqual(graph.debug, 5)
+
+    def test_repr(self):
+        graph = ObjectGraph()
+        self.assertEqual(repr(graph), '<ObjectGraph>')
+
+
+    def testNodes(self):
+        graph = ObjectGraph()
+        n1 = Node("n1")
+        n2 = Node("n2")
+        n3 = Node("n3")
+        n4 = Node("n4")
+
+        n1b = Node("n1")
+
+        self.assertTrue(graph.getIdent(graph)  is graph)
+        self.assertTrue(graph.getRawIdent(graph)  is graph)
+
+        graph.addNode(n1)
+        graph.addNode(n2)
+        graph.addNode(n3)
+
+        self.assertTrue(n1 in graph)
+        self.assertFalse(n4 in graph)
+        self.assertTrue("n1" in graph)
+        self.assertFalse("n4" in graph)
+
+        self.assertTrue(graph.findNode(n1) is n1)
+        self.assertTrue(graph.findNode(n1b) is n1)
+        self.assertTrue(graph.findNode(n2) is n2)
+        self.assertTrue(graph.findNode(n4) is None)
+        self.assertTrue(graph.findNode("n1") is n1)
+        self.assertTrue(graph.findNode("n2") is n2)
+        self.assertTrue(graph.findNode("n4") is None)
+
+        self.assertEqual(graph.getRawIdent(n1), "n1")
+        self.assertEqual(graph.getRawIdent(n1b), "n1")
+        self.assertEqual(graph.getRawIdent(n4), "n4")
+        self.assertEqual(graph.getRawIdent("n1"), None)
+
+        self.assertEqual(graph.getIdent(n1), "n1")
+        self.assertEqual(graph.getIdent(n1b), "n1")
+        self.assertEqual(graph.getIdent(n4), "n4")
+        self.assertEqual(graph.getIdent("n1"), "n1")
+
+        self.assertTrue(n3 in graph)
+        graph.removeNode(n3)
+        self.assertTrue(n3 not in graph)
+        graph.addNode(n3)
+        self.assertTrue(n3 in graph)
+
+        n = graph.createNode(SubNode, "n1")
+        self.assertTrue(n is n1)
+
+        n = graph.createNode(SubNode, "n8")
+        self.assertTrue(isinstance(n, SubNode))
+        self.assertTrue(n in graph)
+        self.assertTrue(graph.findNode("n8") is n)
+
+        n = graph.createNode(ArgNode, "args", 1, 2, 3, a='a', b='b')
+        self.assertTrue(isinstance(n, ArgNode))
+        self.assertTrue(n in graph)
+        self.assertTrue(graph.findNode("args") is n)
+        self.assertEqual(n.args, (1, 2, 3))
+        self.assertEqual(n.kwds, {'a':'a', 'b':'b'})
+
+    def testEdges(self):
+        graph = ObjectGraph()
+        n1 = graph.createNode(ArgNode, "n1", 1)
+        n2 = graph.createNode(ArgNode, "n2", 1)
+        n3 = graph.createNode(ArgNode, "n3", 1)
+        n4 = graph.createNode(ArgNode, "n4", 1)
+
+        graph.createReference(n1, n2, "n1-n2")
+        graph.createReference("n1", "n3", "n1-n3")
+        graph.createReference("n2", n3)
+
+        g = graph.graph
+        e = g.edge_by_node("n1", "n2")
+        self.assertTrue(e is not None)
+        self.assertEqual(g.edge_data(e), "n1-n2")
+
+        e = g.edge_by_node("n1", "n3")
+        self.assertTrue(e is not None)
+        self.assertEqual(g.edge_data(e), "n1-n3")
+
+        e = g.edge_by_node("n2", "n3")
+        self.assertTrue(e is not None)
+        self.assertEqual(g.edge_data(e), None)
+
+        e = g.edge_by_node("n1", "n4")
+        self.assertTrue(e is None)
+
+        graph.removeReference(n1, n2)
+        e = g.edge_by_node("n1", "n2")
+        self.assertTrue(e is None)
+
+        graph.removeReference("n1", "n3")
+        e = g.edge_by_node("n1", "n3")
+        self.assertTrue(e is None)
+
+        graph.createReference(n1, n2, "foo")
+        e = g.edge_by_node("n1", "n2")
+        self.assertTrue(e is not None)
+        self.assertEqual(g.edge_data(e), "foo")
+
+
+    def test_flatten(self):
+        graph = ObjectGraph()
+        n1 = graph.createNode(ArgNode, "n1", 1)
+        n2 = graph.createNode(ArgNode, "n2", 2)
+        n3 = graph.createNode(ArgNode, "n3", 3)
+        n4 = graph.createNode(ArgNode, "n4", 4)
+        n5 = graph.createNode(ArgNode, "n5", 5)
+        n6 = graph.createNode(ArgNode, "n6", 6)
+        n7 = graph.createNode(ArgNode, "n7", 7)
+        n8 = graph.createNode(ArgNode, "n8", 8)
+
+        graph.createReference(graph, n1)
+        graph.createReference(graph, n7)
+        graph.createReference(n1, n2)
+        graph.createReference(n1, n4)
+        graph.createReference(n2, n3)
+        graph.createReference(n2, n5)
+        graph.createReference(n5, n6)
+        graph.createReference(n4, n6)
+        graph.createReference(n4, n2)
+
+        self.assertFalse(isinstance(graph.flatten(), list))
+
+        fl = list(graph.flatten())
+        self.assertTrue(n1 in fl)
+        self.assertTrue(n2 in fl)
+        self.assertTrue(n3 in fl)
+        self.assertTrue(n4 in fl)
+        self.assertTrue(n5 in fl)
+        self.assertTrue(n6 in fl)
+        self.assertTrue(n7 in fl)
+        self.assertFalse(n8 in fl)
+
+        fl = list(graph.flatten(start=n2))
+        self.assertFalse(n1 in fl)
+        self.assertTrue(n2 in fl)
+        self.assertTrue(n3 in fl)
+        self.assertFalse(n4 in fl)
+        self.assertTrue(n5 in fl)
+        self.assertTrue(n6 in fl)
+        self.assertFalse(n7 in fl)
+        self.assertFalse(n8 in fl)
+
+        graph.createReference(n1, n5)
+        fl = list(graph.flatten(lambda n: n.args[0] % 2 != 0))
+        self.assertTrue(n1 in fl)
+        self.assertFalse(n2 in fl)
+        self.assertFalse(n3 in fl)
+        self.assertFalse(n4 in fl)
+        self.assertTrue(n5 in fl)
+        self.assertFalse(n6 in fl)
+        self.assertTrue(n7 in fl)
+        self.assertFalse(n8 in fl)
+
+    def test_iter_nodes(self):
+        graph = ObjectGraph()
+        n1 = graph.createNode(ArgNode, "n1", 1)
+        n2 = graph.createNode(ArgNode, "n2", 2)
+        n3 = graph.createNode(ArgNode, "n3", 3)
+        n4 = graph.createNode(ArgNode, "n4", 4)
+        n5 = graph.createNode(ArgNode, "n5", 5)
+        n6 = graph.createNode(ArgNode, "n6", 5)
+
+        nodes = graph.nodes()
+        if sys.version[0] == '2':
+            self.assertTrue(hasattr(nodes, 'next'))
+        else:
+            self.assertTrue(hasattr(nodes, '__next__'))
+        self.assertTrue(hasattr(nodes, '__iter__'))
+
+        nodes = list(nodes)
+        self.assertEqual(len(nodes), 6)
+        self.assertTrue(n1 in nodes)
+        self.assertTrue(n2 in nodes)
+        self.assertTrue(n3 in nodes)
+        self.assertTrue(n4 in nodes)
+        self.assertTrue(n5 in nodes)
+        self.assertTrue(n6 in nodes)
+
+    def test_get_edges(self):
+        graph = ObjectGraph()
+        n1 = graph.createNode(ArgNode, "n1", 1)
+        n2 = graph.createNode(ArgNode, "n2", 2)
+        n3 = graph.createNode(ArgNode, "n3", 3)
+        n4 = graph.createNode(ArgNode, "n4", 4)
+        n5 = graph.createNode(ArgNode, "n5", 5)
+        n6 = graph.createNode(ArgNode, "n6", 5)
+
+        graph.createReference(n1, n2)
+        graph.createReference(n1, n3)
+        graph.createReference(n3, n1)
+        graph.createReference(n5, n1)
+        graph.createReference(n2, n4)
+        graph.createReference(n2, n5)
+        graph.createReference(n6, n2)
+
+        outs, ins = graph.get_edges(n1)
+
+        self.assertFalse(isinstance(outs, list))
+        self.assertFalse(isinstance(ins, list))
+
+        ins = list(ins)
+        outs = list(outs)
+
+
+        self.assertTrue(n1 not in outs)
+        self.assertTrue(n2 in outs)
+        self.assertTrue(n3 in outs)
+        self.assertTrue(n4 not in outs)
+        self.assertTrue(n5 not in outs)
+        self.assertTrue(n6 not in outs)
+
+        self.assertTrue(n1 not in ins)
+        self.assertTrue(n2 not in ins)
+        self.assertTrue(n3 in ins)
+        self.assertTrue(n4 not in ins)
+        self.assertTrue(n5 in ins)
+        self.assertTrue(n6 not in ins)
+
+    def test_filterStack(self):
+        graph = ObjectGraph()
+        n1 = graph.createNode(ArgNode, "n1", 0)
+        n11 = graph.createNode(ArgNode, "n1.1", 1)
+        n12 = graph.createNode(ArgNode, "n1.2", 0)
+        n111 = graph.createNode(ArgNode, "n1.1.1", 0)
+        n112 = graph.createNode(ArgNode, "n1.1.2",2)
+        n2 = graph.createNode(ArgNode, "n2", 0)
+        n3 = graph.createNode(ArgNode, "n2", 0)
+
+        graph.createReference(None, n1)
+        graph.createReference(None, n2)
+        graph.createReference(n1, n11)
+        graph.createReference(n1, n12)
+        graph.createReference(n11, n111)
+        graph.createReference(n11, n112)
+
+        self.assertTrue(n1 in graph)
+        self.assertTrue(n2 in graph)
+        self.assertTrue(n11 in graph)
+        self.assertTrue(n12 in graph)
+        self.assertTrue(n111 in graph)
+        self.assertTrue(n112 in graph)
+        self.assertTrue(n2 in graph)
+        self.assertTrue(n3 in graph)
+
+        visited, removes, orphans = graph.filterStack(
+                [lambda n: n.args[0] != 1, lambda n: n.args[0] != 2])
+
+        self.assertEqual(visited, 6)
+        self.assertEqual(removes, 2)
+        self.assertEqual(orphans, 1)
+
+        e = graph.graph.edge_by_node(n1.graphident, n111.graphident)
+        self.assertEqual(graph.graph.edge_data(e), "orphan")
+
+        self.assertTrue(n1 in graph)
+        self.assertTrue(n2 in graph)
+        self.assertTrue(n11 not in graph)
+        self.assertTrue(n12 in graph)
+        self.assertTrue(n111 in graph)
+        self.assertTrue(n112 not in graph)
+        self.assertTrue(n2 in graph)
+        self.assertTrue(n3 in graph)
+
+
+class TestObjectGraphIO (unittest.TestCase):
+    def setUp(self):
+        self._stdout = sys.stdout
+
+    def tearDown(self):
+        sys.stdout = self._stdout
+
+    def test_msg(self):
+        graph = ObjectGraph()
+
+        sys.stdout = fp = StringIO()
+        graph.msg(0, "foo")
+        self.assertEqual(fp.getvalue(), "foo \n")
+
+        sys.stdout = fp = StringIO()
+        graph.msg(5, "foo")
+        self.assertEqual(fp.getvalue(), "")
+
+        sys.stdout = fp = StringIO()
+        graph.debug = 10
+        graph.msg(5, "foo")
+        self.assertEqual(fp.getvalue(), "foo \n")
+
+        sys.stdout = fp = StringIO()
+        graph.msg(0, "foo", 1, "a")
+        self.assertEqual(fp.getvalue(), "foo 1 'a'\n")
+
+        sys.stdout = fp = StringIO()
+        graph.msgin(0, "hello", "world")
+        graph.msg(0, "test me")
+        graph.msgout(0, "bye bye")
+        self.assertEqual(fp.getvalue(), "hello 'world'\n  test me \nbye bye \n")
+
+
+if __name__ == "__main__": # pragma: no cover
+    unittest.main()
diff --git a/catapult/telemetry/third_party/altgraph/doc/Makefile b/catapult/telemetry/third_party/altgraph/doc/Makefile
new file mode 100644
index 0000000..b91ac81
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/Makefile
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/altgraph.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/altgraph.qhc"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/catapult/telemetry/third_party/altgraph/doc/changelog.rst b/catapult/telemetry/third_party/altgraph/doc/changelog.rst
new file mode 100644
index 0000000..02fd412
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/changelog.rst
@@ -0,0 +1,185 @@
+Release history
+===============
+
+0.12
+----
+
+- Added ``ObjectGraph.edgeData`` to retrieve the edge data
+  from a specific edge.
+
+- Added ``AltGraph.update_edge_data`` and ``ObjectGraph.updateEdgeData``
+  to update the data associated with a graph edge.
+
+0.11
+----
+
+- Stabilize the order of elements in dot file exports,
+  patch from bitbucket user 'pombredanne'.
+
+- Tweak setup.py file to remove dependency on distribute (but
+  keep the dependency on setuptools)
+
+
+0.10.2
+------
+
+- There where no classifiers in the package metadata due to a bug
+  in setup.py
+
+0.10.1
+------
+
+This is a bugfix release
+
+Bug fixes:
+
+- Issue #3: The source archive contains a README.txt
+  while the setup file refers to ReadMe.txt.
+
+  This is caused by a misfeature in distutils, as a
+  workaround I've renamed ReadMe.txt to README.txt
+  in the source tree and setup file.
+
+
+0.10
+-----
+
+This is a minor feature release
+
+Features:
+
+- Do not use "2to3" to support Python 3.
+
+  As a side effect of this altgraph now supports
+  Python 2.6 and later, and no longer supports
+  earlier releases of Python.
+
+- The order of attributes in the Dot output
+  is now always alphabetical.
+
+  With this change the output will be consistent
+  between runs and Python versions.
+
+0.9
+---
+
+This is a minor bugfix release
+
+Features:
+
+- Added ``altgraph.ObjectGraph.ObjectGraph.nodes``, a method
+  yielding all nodes in an object graph.
+
+Bugfixes:
+
+- The 0.8 release didn't work with py2app when using
+  python 3.x.
+
+
+0.8
+-----
+
+This is a minor feature release. The major new feature
+is a extensive set of unittests, which explains almost
+all other changes in this release.
+
+Bugfixes:
+
+- Installing failed with Python 2.5 due to using a distutils
+  class that isn't available in that version of Python
+  (issue #1 on the issue tracker)
+
+- ``altgraph.GraphStat.degree_dist`` now actually works
+
+- ``altgraph.Graph.add_edge(a, b, create_nodes=False)`` will
+  no longer create the edge when one of the nodes doesn't
+  exist.
+
+- ``altgraph.Graph.forw_topo_sort`` failed for some sparse graphs.
+
+- ``altgraph.Graph.back_topo_sort`` was completely broken in
+  previous releases.
+
+- ``altgraph.Graph.forw_bfs_subgraph`` now actually works.
+
+- ``altgraph.Graph.back_bfs_subgraph`` now actually works.
+
+- ``altgraph.Graph.iterdfs`` now returns the correct result
+  when the ``forward`` argument is ``False``.
+
+- ``altgraph.Graph.iterdata`` now returns the correct result
+  when the ``forward`` argument is ``False``.
+
+
+Features:
+
+- The ``altgraph.Graph`` constructor now accepts an argument
+  that contains 2- and 3-tuples instead of requireing that
+  all items have the same size. The (optional) argument can now
+  also be any iterator.
+
+- ``altgraph.Graph.Graph.add_node`` has no effect when you
+  add a hidden node.
+
+- The private method ``altgraph.Graph._bfs`` is no longer
+  present.
+
+- The private method ``altgraph.Graph._dfs`` is no longer
+  present.
+
+- ``altgraph.ObjectGraph`` now has a ``__contains__`` methods,
+  which means you can use the ``in`` operator to check if a
+  node is part of a graph.
+
+- ``altgraph.GraphUtil.generate_random_graph`` will raise
+  ``GraphError`` instead of looping forever when it is
+  impossible to create the requested graph.
+
+- ``altgraph.Dot.edge_style`` raises ``GraphError`` when
+  one of the nodes is not present in the graph. The method
+  silently added the tail in the past, but without ensuring
+  a consistent graph state.
+
+- ``altgraph.Dot.save_img`` now works when the mode is
+  ``"neato"``.
+
+0.7.2
+-----
+
+This is a minor bugfix release
+
+Bugfixes:
+
+- distutils didn't include the documentation subtree
+
+0.7.1
+-----
+
+This is a minor feature release
+
+Features:
+
+- Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+  and can be viewed at <http://packages.python.org/altgraph>.
+
+- The repository has moved to bitbucket
+
+- ``altgraph.GraphStat.avg_hops`` is no longer present, the function had no
+  implementation and no specified behaviour.
+
+- the module ``altgraph.compat`` is gone, which means altgraph will no
+  longer work with Python 2.3.
+
+
+0.7.0
+-----
+
+This is a minor feature release.
+
+Features:
+
+- Support for Python 3
+
+- It is now possible to run tests using 'python setup.py test'
+
+  (The actual testsuite is still very minimal though)
diff --git a/catapult/telemetry/third_party/altgraph/doc/conf.py b/catapult/telemetry/third_party/altgraph/doc/conf.py
new file mode 100644
index 0000000..cd3fd99
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/conf.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# altgraph documentation build configuration file, created by
+# sphinx-quickstart on Tue Aug 31 11:04:49 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+def get_version():
+    fn = os.path.join(
+            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
+            'setup.cfg')
+    for ln in open(fn):
+        if ln.startswith('version'):
+            version = ln.split('=')[-1].strip()
+            return version
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+sys.path.insert(0,
+        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.autodoc' ]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'altgraph'
+copyright = u'2010-2011, Ronald Oussoren, Bob Ippolito, 2004 Istvan Albert'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = get_version()
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'nature'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'altgraphdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'altgraph.tex', u'altgraph Documentation',
+   u'Ronald Oussoren', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'python': ('http://docs.python.org/', None) }
diff --git a/catapult/telemetry/third_party/altgraph/doc/core.rst b/catapult/telemetry/third_party/altgraph/doc/core.rst
new file mode 100644
index 0000000..8288f6a
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/core.rst
@@ -0,0 +1,26 @@
+:mod:`altgraph` --- A Python Graph Library
+==================================================
+
+.. module:: altgraph
+   :synopsis: A directional graph for python
+
+altgraph is a fork of `graphlib <http://pygraphlib.sourceforge.net>`_ tailored
+to use newer Python 2.3+ features, including additional support used by the
+py2app suite (modulegraph and macholib, specifically).
+
+altgraph is a python based graph (network) representation and manipulation package.
+It has started out as an extension to the `graph_lib module <http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html>`_
+written by Nathan Denny it has been significantly optimized and expanded.
+
+The :class:`altgraph.Graph.Graph` class is loosely modeled after the `LEDA <http://www.algorithmic-solutions.com/enleda.htm>`_ 
+(Library of Efficient Datatypes)  representation. The library
+includes methods for constructing graphs, BFS and DFS traversals,
+topological sort, finding connected components, shortest paths as well as a number
+graph statistics functions. The library can also visualize graphs
+via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_.
+
+
+.. exception:: GraphError
+
+   Exception raised when methods are called with bad values of
+   an inconsistent state.
diff --git a/catapult/telemetry/third_party/altgraph/doc/dot.rst b/catapult/telemetry/third_party/altgraph/doc/dot.rst
new file mode 100644
index 0000000..3848c48
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/dot.rst
@@ -0,0 +1,224 @@
+:mod:`altgraph.Dot` --- Interface to the dot language
+=====================================================
+
+.. module:: altgraph.Dot
+   :synopsis: Interface to the dot language as used by Graphviz..
+
+The :py:mod:`~altgraph.Dot` module provides a simple interface to the
+file format used in the `graphviz`_ program. The module is intended to 
+offload the most tedious part of the process (the **dot** file generation) 
+while transparently exposing most of its features.
+
+.. _`graphviz`: <http://www.research.att.com/sw/tools/graphviz/>`_
+
+To display the graphs or to generate image files the `graphviz`_
+package needs to be installed on the system, moreover the :command:`dot` and :command:`dotty` programs must
+be accesible in the program path so that they can be ran from processes spawned
+within the module. 
+
+Example usage
+-------------
+
+Here is a typical usage::
+
+    from altgraph import Graph, Dot
+
+    # create a graph
+    edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
+    graph = Graph.Graph(edges)
+    
+    # create a dot representation of the graph
+    dot = Dot.Dot(graph)
+
+    # display the graph
+    dot.display()
+
+    # save the dot representation into the mydot.dot file
+    dot.save_dot(file_name='mydot.dot')
+
+    # save dot file as gif image into the graph.gif file
+    dot.save_img(file_name='graph', file_type='gif')
+
+
+Directed graph and non-directed graph
+-------------------------------------
+
+Dot class can use for both directed graph and non-directed graph
+by passing *graphtype* parameter.
+
+Example::
+
+    # create directed graph(default)
+    dot = Dot.Dot(graph, graphtype="digraph")
+
+    # create non-directed graph
+    dot = Dot.Dot(graph, graphtype="graph")
+
+
+Customizing the output
+----------------------
+
+The graph drawing process may be customized by passing
+valid :command:`dot` parameters for the nodes and edges. For a list of all
+parameters see the `graphviz`_ documentation.
+
+Example::
+
+    # customizing the way the overall graph is drawn
+    dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
+
+    # customizing node drawing
+    dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
+    dot.node_style(2, style='filled', fillcolor='red')
+
+    # customizing edge drawing
+    dot.edge_style(1, 2, style='dotted')
+    dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
+    dot.edge_style(4, 5, arrowsize=2, style='bold')
+
+
+    .. note:: 
+       
+       dotty (invoked via :py:func:`~altgraph.Dot.display`) may not be able to
+       display all graphics styles. To verify the output save it to an image 
+       file and look at it that way.
+
+Valid attributes
+----------------
+
+- dot styles, passed via the :py:meth:`Dot.style` method::
+
+    rankdir = 'LR'   (draws the graph horizontally, left to right)
+    ranksep = number (rank separation in inches)
+
+- node attributes, passed via the :py:meth:`Dot.node_style` method::
+
+     style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
+     shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
+
+- edge attributes, passed via the :py:meth:`Dot.edge_style` method::
+
+     style     = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
+     arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none' | 'tee' | 'vee'
+     weight    = number (the larger the number the closer the nodes will be)
+
+- valid `graphviz colors <http://www.research.att.com/~erg/graphviz/info/colors.html>`_
+
+- for more details on how to control the graph drawing process see the 
+  `graphviz reference <http://www.research.att.com/sw/tools/graphviz/refs.html>`_.
+
+
+Class interface
+---------------
+
+.. class:: Dot(graph[, nodes[, edgefn[, nodevisitor[, edgevisitor[, name[, dot[, dotty[, neato[, graphtype]]]]]]]]])
+
+  Creates a new Dot generator based on the specified 
+  :class:`Graph <altgraph.Graph.Graph>`.  The Dot generator won't reference
+  the *graph* once it is constructed.
+
+  If the *nodes* argument is present it is the list of nodes to include
+  in the graph, otherwise all nodes in *graph* are included.
+  
+  If the *edgefn* argument is present it is a function that yields the
+  nodes connected to another node, this defaults to 
+  :meth:`graph.out_nbr <altgraph.Graph.Graph.out_nbr>`. The constructor won't
+  add edges to the dot file unless both the head and tail of the edge
+  are in *nodes*.
+
+  If the *name* is present it specifies the name of the graph in the resulting
+  dot file. The default is ``"G"``.
+
+  The functions *nodevisitor* and *edgevisitor* return the default style
+  for a given edge or node (both default to functions that return an empty
+  style).
+
+  The arguments *dot*, *dotty* and *neato* are used to pass the path to 
+  the corresponding `graphviz`_ command.
+
+
+Updating graph attributes
+.........................
+
+.. method:: Dot.style(\**attr)
+
+   Sets the overall style (graph attributes) to the given attributes.
+
+   See `Valid Attributes`_ for more information about the attributes.
+
+.. method:: Dot.node_style(node, \**attr)
+
+   Sets the style for *node* to the given attributes.
+
+   This method will add *node* to the graph when it isn't already 
+   present.
+
+   See `Valid Attributes`_ for more information about the attributes.
+
+.. method:: Dot.all_node_style(\**attr)
+
+   Replaces the current style for all nodes
+
+
+.. method:: edge_style(head, tail, \**attr)
+
+   Sets the style of an edge to the given attributes. The edge will
+   be added to the graph when it isn't already present, but *head*
+   and *tail* must both be valid nodes.
+
+   See `Valid Attributes`_ for more information about the attributes.
+
+
+
+Emitting output
+...............
+
+.. method:: Dot.display([mode])
+
+   Displays the current graph via dotty.
+
+   If the *mode* is ``"neato"`` the dot file is processed with
+   the neato command before displaying.
+
+   This method won't return until the dotty command exits.
+
+.. method:: save_dot(filename)
+
+   Saves the current graph representation into the given file.
+
+   .. note::
+
+       For backward compatibility reasons this method can also
+       be called without an argument, it will then write the graph
+       into a fixed filename (present in the attribute :data:`Graph.temp_dot`).
+
+       This feature is deprecated and should not be used.
+
+
+.. method:: save_image(file_name[, file_type[, mode]])
+
+   Saves the current graph representation as an image file. The output
+   is written into a file whose basename is *file_name* and whose suffix
+   is *file_type*.
+
+   The *file_type* specifies the type of file to write, the default
+   is ``"gif"``.
+
+   If the *mode* is ``"neato"`` the dot file is processed with
+   the neato command before displaying.
+
+   .. note::
+
+       For backward compatibility reasons this method can also
+       be called without an argument, it will then write the graph
+       with a fixed basename (``"out"``).
+
+       This feature is deprecated and should not be used.
+
+.. method:: iterdot()
+
+   Yields all lines of a `graphviz`_ input file (including line endings).
+
+.. method:: __iter__()
+
+   Alias for the :meth:`iterdot` method.
diff --git a/catapult/telemetry/third_party/altgraph/doc/graph.rst b/catapult/telemetry/third_party/altgraph/doc/graph.rst
new file mode 100644
index 0000000..502a218
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/graph.rst
@@ -0,0 +1,305 @@
+:mod:`altgraph.Graph` --- Basic directional graphs
+==================================================
+
+.. module:: altgraph.Graph
+   :synopsis: Basic directional graphs.
+
+The module :mod:`altgraph.Graph` provides a class :class:`Graph` that
+represents a directed graph with *N* nodes and *E* edges.
+
+.. class:: Graph([edges])
+
+  Constructs a new empty :class:`Graph` object. If the optional
+  *edges* parameter is supplied, updates the graph by adding the
+  specified edges.
+
+  All of the elements in *edges* should be tuples with two or three
+  elements. The first two elements of the tuple are the source and
+  destination node of the edge, the optional third element is the
+  edge data.  The source and destination nodes are added to the graph
+  when the aren't already present.
+
+
+Node related methods
+--------------------
+
+.. method:: Graph.add_node(node[, node_data])
+
+   Adds a new node to the graph if it is not already present. The new
+   node must be a hashable object.
+
+   Arbitrary data can be attached to the node via the optional *node_data*
+   argument.
+
+   .. note:: the node also won't be added to the graph when it is
+      present but currently hidden.
+
+
+.. method:: Graph.hide_node(node)
+
+   Hides a *node* from the graph. The incoming and outgoing edges of
+   the node will also be hidden.
+
+   Raises :class:`altgraph.GraphError` when the node is not (visible)
+   node of the graph.
+
+
+.. method:: Graph.restore_node(node)
+
+   Restores a previously hidden *node*. The incoming and outgoing
+   edges of the node are also restored.
+
+   Raises :class:`altgraph.GraphError` when the node is not a hidden
+   node of the graph.
+
+.. method:: Graph.restore_all_nodes()
+
+   Restores all hidden nodes.
+
+.. method:: Graph.number_of_nodes()
+
+   Return the number of visible nodes in the graph.
+
+.. method:: Graph.number_of_hidden_nodes()
+
+   Return the number of hidden nodes in the graph.
+
+.. method:: Graph.node_list()
+
+   Return a list with all visible nodes in the graph.
+
+.. method:: Graph.hidden_node_list()
+
+   Return a list with all hidden nodes in the graph.
+
+.. method:: node_data(node)
+
+   Return the data associated with the *node* when it was
+   added.
+
+.. method:: Graph.describe_node(node)
+
+   Returns *node*, the node's data and the lists of outgoing
+   and incoming edges for the node.
+
+   .. note::
+
+      the edge lists should not be modified, doing so
+      can result in unpredicatable behavior.
+
+.. method:: Graph.__contains__(node)
+
+   Returns True iff *node* is a node in the graph. This
+   method is accessed through the *in* operator.
+
+.. method:: Graph.__iter__()
+
+   Yield all nodes in the graph.
+
+.. method:: Graph.out_edges(node)
+
+   Return the list of outgoing edges for *node*
+
+.. method:: Graph.inc_edges(node)
+
+   Return the list of incoming edges for *node*
+
+.. method:: Graph.all_edges(node)
+
+   Return the list of incoming and outgoing edges for *node*
+
+.. method:: Graph.out_degree(node)
+
+   Return the number of outgoing edges for *node*.
+
+.. method:: Graph.inc_degree(node)
+
+   Return the number of incoming edges for *node*.
+
+.. method:: Graph.all_degree(node)
+
+   Return the number of edges (incoming or outgoing) for *node*.
+
+Edge related methods
+--------------------
+
+.. method:: Graph.add_edge(head_id, tail_id [, edge data [, create_nodes]])
+
+   Adds a directed edge from *head_id* to *tail_id*. Arbitrary data can
+   be added via *edge_data*.  When *create_nodes* is *True* (the default),
+   *head_id* and *tail_id* will be added to the graph when the aren't
+   already present.
+
+.. method:: Graph.hide_edge(edge)
+
+   Hides an edge from the graph. The edge may be unhidden at some later
+   time.
+
+.. method:: Graph.restore_edge(edge)
+
+   Restores a previously hidden *edge*.
+
+.. method:: Graph.restore_all_edges()
+
+   Restore all edges that were hidden before, except for edges
+   referring to hidden nodes.
+
+.. method:: Graph.edge_by_node(head, tail)
+
+   Return the edge ID for an edge from *head* to *tail*,
+   or :data:`None` when no such edge exists.
+
+.. method:: Graph.edge_by_id(edge)
+
+   Return the head and tail of the *edge*
+
+.. method:: Graph.edge_data(edge)
+
+   Return the data associated with the *edge*.
+
+.. method:: Graph.update_edge_data(edge, data)
+
+   Replace the edge data for *edge* by *data*. Raises
+   :exc:`KeyError` when the edge does not exist.
+
+   .. versionadded:: 0.12
+
+.. method:: Graph.head(edge)
+
+   Return the head of an *edge*
+
+.. method:: Graph.tail(edge)
+
+   Return the tail of an *edge*
+
+.. method:: Graph.describe_edge(edge)
+
+   Return the *edge*, the associated data, its head and tail.
+
+.. method:: Graph.number_of_edges()
+
+   Return the number of visible edges.
+
+.. method:: Graph.number_of_hidden_edges()
+
+   Return the number of hidden edges.
+
+.. method:: Graph.edge_list()
+
+   Returns a list with all visible edges in the graph.
+
+.. method:: Graph.hidden_edge_list()
+
+   Returns a list with all hidden edges in the graph.
+
+Graph traversal
+---------------
+
+.. method:: Graph.out_nbrs(node)
+
+   Return a list of all nodes connected by outgoing edges.
+
+.. method:: Graph.inc_nbrs(node)
+
+   Return a list of all nodes connected by incoming edges.
+
+.. method:: Graph.all_nbrs(node)
+
+   Returns a list of nodes connected by an incoming or outgoing edge.
+
+.. method:: Graph.forw_topo_sort()
+
+   Return a list of nodes where the successors (based on outgoing
+   edges) of any given node apear in the sequence after that node.
+
+.. method:: Graph.back_topo_sort()
+
+   Return a list of nodes where the successors (based on incoming
+   edges) of any given node apear in the sequence after that node.
+
+.. method:: Graph.forw_bfs_subgraph(start_id)
+
+   Return a subgraph consisting of the breadth first
+   reachable nodes from *start_id* based on their outgoing edges.
+
+
+.. method:: Graph.back_bfs_subgraph(start_id)
+
+   Return a subgraph consisting of the breadth first
+   reachable nodes from *start_id* based on their incoming edges.
+
+.. method:: Graph.iterdfs(start[, end[, forward]])
+
+   Yield nodes in a depth first traversal starting at the *start*
+   node.
+
+   If *end* is specified traversal stops when reaching that node.
+
+   If forward is True (the default) edges are traversed in forward
+   direction, otherwise they are traversed in reverse direction.
+
+.. method:: Graph.iterdata(start[, end[, forward[, condition]]])
+
+   Yield the associated data for nodes in a depth first traversal
+   starting at the *start* node. This method will not yield values for nodes
+   without associated data.
+
+   If *end* is specified traversal stops when reaching that node.
+
+   If *condition* is specified and the condition callable returns
+   False for the associated data this method will not yield the
+   associated data and will not follow the edges for the node.
+
+   If forward is True (the default) edges are traversed in forward
+   direction, otherwise they are traversed in reverse direction.
+
+.. method:: Graph.forw_bfs(start[, end])
+
+   Returns a list of nodes starting at *start* in some bread first
+   search order (following outgoing edges).
+
+   When *end* is specified iteration stops at that node.
+
+.. method:: Graph.back_bfs(start[, end])
+
+   Returns a list of nodes starting at *start* in some bread first
+   search order (following incoming edges).
+
+   When *end* is specified iteration stops at that node.
+
+.. method:: Graph.get_hops(start[, end[, forward]])
+
+   Computes the hop distance to all nodes centered around a specified node.
+
+   First order neighbours are at hop 1, their neigbours are at hop 2 etc.
+   Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of
+   the forward parameter.
+
+   If the distance between all neighbouring nodes is 1 the hop number
+   corresponds to the shortest distance between the nodes.
+
+   Typical usage::
+
+        >>> print graph.get_hops(1, 8)
+        >>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
+        # node 1 is at 0 hops
+        # node 2 is at 1 hop
+        # ...
+        # node 8 is at 5 hops
+
+
+Graph statistics
+----------------
+
+.. method:: Graph.connected()
+
+   Returns True iff every node in the graph can be reached from
+   every other node.
+
+.. method:: Graph.clust_coef(node)
+
+   Returns the local clustering coefficient of node.
+
+   The local cluster coefficient is the proportion of the actual number
+   of edges between neighbours of node and the maximum number of
+   edges between those nodes.
diff --git a/catapult/telemetry/third_party/altgraph/doc/graphalgo.rst b/catapult/telemetry/third_party/altgraph/doc/graphalgo.rst
new file mode 100644
index 0000000..84d492f
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/graphalgo.rst
@@ -0,0 +1,26 @@
+:mod:`altgraph.GraphAlgo` --- Graph algorithms
+==================================================
+
+.. module:: altgraph.GraphAlgo
+   :synopsis: Basic graphs algoritms
+
+.. function:: dijkstra(graph, start[, end])
+
+   Dijkstra's algorithm for shortest paths.
+
+   Find shortest paths from the  start node to all nodes nearer 
+   than or equal to the *end* node. The edge data is assumed to be the edge length.
+
+   .. note::
+
+       Dijkstra's algorithm is only guaranteed to work correctly when all edge lengths are positive.
+       This code does not verify this property for all edges (only the edges examined until the end
+       vertex is reached), but will correctly compute shortest paths even for some graphs with negative
+       edges, and will raise an exception if it discovers that a negative edge has caused it to make a mistake.
+
+
+.. function:: shortest_path(graph, start, end)
+
+   Find a single shortest path from the given start node to the given end node.
+   The input has the same conventions as :func:`dijkstra`. The output is a list 
+   of the nodes in order along the shortest path.
diff --git a/catapult/telemetry/third_party/altgraph/doc/graphstat.rst b/catapult/telemetry/third_party/altgraph/doc/graphstat.rst
new file mode 100644
index 0000000..0931a12
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/graphstat.rst
@@ -0,0 +1,25 @@
+:mod:`altgraph.GraphStat` --- Functions providing various graph statistics
+==========================================================================
+
+.. module:: altgraph.GraphStat
+   :synopsis: Functions providing various graph statistics
+
+The module :mod:`altgraph.GraphStat` provides function that calculate
+graph statistics. Currently there is only one such function, more may
+be added later.
+
+.. function:: degree_dist(graph[, limits[, bin_num[, mode]]])
+
+   Groups the number of edges per node into *bin_num* bins
+   and returns the list of those bins. Every item in the result
+   is a tuple with the center of the bin and the number of items
+   in that bin.
+
+   When the *limits* argument is present it must be a tuple with
+   the mininum and maximum number of edges that get binned (that
+   is, when *limits* is ``(4, 10)`` only nodes with between 4
+   and 10 edges get counted.
+
+   The *mode* argument is used to count incoming (``'inc'``) or
+   outgoing (``'out'``) edges. The default is to count the outgoing
+   edges.
diff --git a/catapult/telemetry/third_party/altgraph/doc/graphutil.rst b/catapult/telemetry/third_party/altgraph/doc/graphutil.rst
new file mode 100644
index 0000000..c07836d
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/graphutil.rst
@@ -0,0 +1,55 @@
+:mod:`altgraph.GraphUtil` --- Utility functions
+================================================
+
+.. module:: altgraph.GraphUtil
+   :synopsis: Utility functions
+
+The module :mod:`altgraph.GraphUtil` performs a number of more
+or less useful utility functions.
+
+.. function:: generate_random_graph(node_num, edge_num[, self_loops[, multi_edges])
+
+   Generates and returns a :class:`Graph <altgraph.Graph.Graph>` instance
+   with *node_num* nodes randomly connected by *edge_num* edges.
+
+   When *self_loops* is present and True there can be edges that point from
+   a node to itself.
+
+   When *multi_edge* is present and True there can be duplicate edges.
+
+   This method raises :class:`GraphError <altgraph.GraphError` when
+   a graph with the requested configuration cannot be created.
+
+.. function:: generate_scale_free_graph(steps, growth_num[, self_loops[, multi_edges]])
+
+    Generates and returns a :py:class:`~altgraph.Graph.Graph` instance that 
+    will have *steps*growth_n um* nodes and a scale free (powerlaw) 
+    connectivity. 
+    
+    Starting with a fully connected graph with *growth_num* nodes
+    at every step *growth_num* nodes are added to the graph and are connected 
+    to existing nodes with a probability proportional to the degree of these 
+    existing nodes.
+
+    .. warning:: The current implementation is basically untested, although
+       code inspection seems to indicate an implementation that is consistent
+       with the description at 
+       `Wolfram MathWorld <http://mathworld.wolfram.com/Scale-FreeNetwork.html>`_
+
+.. function:: filter_stack(graph, head, filters)
+
+   Perform a depth-first oder walk of the graph starting at *head* and
+   apply all filter functions in *filters* on the node data of the nodes
+   found.
+
+   Returns (*visited*, *removes*, *orphans*), where
+
+   * *visited*: the set of visited nodes
+
+   * *removes*: the list of nodes where the node data doesn't match
+     all *filters*.
+
+   * *orphans*: list of tuples (*last_good*, *node*), where 
+     node is not in *removes* and one of the nodes that is connected
+     by an incoming edge is in *removes*. *Last_good* is the
+     closest upstream node that is not in *removes*.
diff --git a/catapult/telemetry/third_party/altgraph/doc/index.rst b/catapult/telemetry/third_party/altgraph/doc/index.rst
new file mode 100644
index 0000000..1e8d504
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/index.rst
@@ -0,0 +1,41 @@
+.. altgraph documentation master file, created by
+   sphinx-quickstart on Tue Aug 31 11:04:49 2010.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Altgraph - A basic graph library
+================================
+
+altgraph is a fork of graphlib: a graph (network) package for constructing
+graphs, BFS and DFS traversals, topological sort, shortest paths, etc. with
+graphviz output.
+
+The primary users of this package are `macholib <http://pypi.python.org/pypi/macholib>`_ and `modulegraph <http://pypi.python.org/pypi/modulegraph>`_.
+
+.. toctree::
+   :maxdepth: 1
+
+   changelog
+   license
+   core
+   graph
+   objectgraph
+   graphalgo
+   graphstat
+   graphutil
+   dot
+
+Online Resources
+----------------
+
+* `Sourcecode repository on bitbucket <http://bitbucket.org/ronaldoussoren/altgraph/>`_
+
+* `The issue tracker <http://bitbucket.org/ronaldoussoren/altgraph/issues>`_
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/catapult/telemetry/third_party/altgraph/doc/license.rst b/catapult/telemetry/third_party/altgraph/doc/license.rst
new file mode 100644
index 0000000..498e60b
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/license.rst
@@ -0,0 +1,25 @@
+License
+=======
+
+Copyright (c) 2004 Istvan Albert unless otherwise noted.
+
+Parts are copyright (c) Bob Ippolito
+
+Parts are copyright (c) 2010-2014 Ronald Oussoren
+
+MIT License
+...........
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+and associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
+so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/catapult/telemetry/third_party/altgraph/doc/objectgraph.rst b/catapult/telemetry/third_party/altgraph/doc/objectgraph.rst
new file mode 100644
index 0000000..e3df396
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/doc/objectgraph.rst
@@ -0,0 +1,146 @@
+:mod:`altgraph.ObjectGraph` --- Graphs of objecs with an identifier
+===================================================================
+
+.. module:: altgraph.ObjectGraph
+   :synopsis: A graph of objects that have a "graphident" attribute.
+
+.. class:: ObjectGraph([graph[, debug]])
+
+   A graph of objects that have a "graphident" attribute. The
+   value of this attribute is the key for the object in the
+   graph.
+
+   The optional *graph* is a previously constructed
+   :class:`Graph <altgraph.Graph.Graph>`.
+
+   The optional *debug* level controls the amount of debug output
+   (see :meth:`msg`, :meth:`msgin` and :meth:`msgout`).
+
+   .. note:: the altgraph library does not generate output, the
+      debug attribute and message methods are present for use
+      by subclasses.
+
+.. data:: ObjectGraph.graph
+
+   An :class:`Graph <altgraph.Graph.Graph>` object that contains
+   the graph data.
+
+
+.. method:: ObjectGraph.addNode(node)
+
+   Adds a *node* to the graph.
+
+   .. note:: re-adding a node that was previously removed
+      using :meth:`removeNode` will reinstate the previously
+      removed node.
+
+.. method:: ObjectGraph.createNode(self, cls, name, \*args, \**kwds)
+
+   Creates a new node using ``cls(*args, **kwds)`` and adds that
+   node using :meth:`addNode`.
+
+   Returns the newly created node.
+
+.. method:: ObjectGraph.removeNode(node)
+
+   Removes a *node* from the graph when it exists. The *node* argument
+   is either a node object, or the graphident of a node.
+
+.. method:: ObjectGraph.createReferences(fromnode, tonode[, edge_data])
+
+   Creates a reference from *fromnode* to *tonode*. The optional
+   *edge_data* is associated with the edge.
+
+   *Fromnode* and *tonode* can either be node objects or the graphident
+   values for nodes.
+
+.. method:: removeReference(fromnode, tonode)
+
+   Removes the reference from *fromnode* to *tonode* if it exists.
+
+.. method:: ObjectGraph.getRawIdent(node)
+
+   Returns the *graphident* attribute of *node*, or the graph itself
+   when *node* is :data:`None`.
+
+.. method:: getIdent(node)
+
+   Same as :meth:`getRawIdent`, but only if the node is part
+   of the graph.
+
+   *Node* can either be an actual node object or the graphident of
+   a node.
+
+.. method:: ObjectGraph.findNode(node)
+
+   Returns a given node in the graph, or :data:`Node` when it cannot
+   be found.
+
+   *Node* is either an object with a *graphident* attribute or
+   the *graphident* attribute itself.
+
+.. method:: ObjectGraph.__contains__(node)
+
+   Returns True if *node* is a member of the graph. *Node* is either an
+   object with a *graphident* attribute or the *graphident* attribute itself.
+
+.. method:: ObjectGraph.flatten([condition[, start]])
+
+   Yield all nodes that are entirely reachable by *condition*
+   starting fromt he given *start* node or the graph root.
+
+   .. note:: objects are only reachable from the graph root
+      when there is a reference from the root to the node
+      (either directly or through another node)
+
+.. method:: ObjectGraph.nodes()
+
+   Yield all nodes in the graph.
+
+.. method:: ObjectGraph.get_edges(node)
+
+   Returns two iterators that yield the nodes reaching by
+   outgoing and incoming edges.
+
+.. method:: ObjectGraph.filterStack(filters)
+
+   Filter the ObjectGraph in-place by removing all edges to nodes that
+   do not match every filter in the given filter list
+
+   Returns a tuple containing the number of:
+   (*nodes_visited*, *nodes_removed*, *nodes_orphaned*)
+
+.. method:: ObjectGraph.edgeData(fromNode, toNode):
+   Return the edge data associated with the edge from *fromNode*
+   to *toNode*.  Raises :exc:`KeyError` when no such edge exists.
+
+   .. versionadded: 0.12
+
+.. method:: ObjectGraph.updateEdgeData(fromNode, toNode, edgeData)
+
+   Replace the data associated with the edge from *fromNode* to
+   *toNode* by *edgeData*.
+
+   Raises :exc:`KeyError` when the edge does not exist.
+
+Debug output
+------------
+
+.. data:: ObjectGraph.debug
+
+   The current debug level.
+
+.. method:: ObjectGraph.msg(level, text, \*args)
+
+   Print a debug message at the current indentation level when the current
+   debug level is *level* or less.
+
+.. method:: ObjectGraph.msgin(level, text, \*args)
+
+   Print a debug message when the current debug level is *level* or less,
+   and increase the indentation level.
+
+.. method:: ObjectGraph.msgout(level, text, \*args)
+
+   Decrease the indentation level and print a debug message when the
+   current debug level is *level* or less.
diff --git a/catapult/telemetry/third_party/altgraph/setup.cfg b/catapult/telemetry/third_party/altgraph/setup.cfg
new file mode 100644
index 0000000..9c6880e
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/setup.cfg
@@ -0,0 +1,36 @@
+[metadata]
+name = altgraph
+version = 0.12
+description = Python graph (network) package
+long_description_file = 
+	README.txt
+	doc/changelog.rst
+author = Ronald Oussoren
+author_email = ronaldoussoren@mac.com
+maintainer = Ronald Oussoren
+maintainer_email = ronaldoussoren@mac.com
+url = http://packages.python.org/altgraph
+download_url = http://pypi.python.org/pypi/altgraph
+license = MIT
+classifiers = 
+	Intended Audience :: Developers
+	License :: OSI Approved :: MIT License
+	Programming Language :: Python
+	Programming Language :: Python :: 2
+	Programming Language :: Python :: 2.7
+	Programming Language :: Python :: 3
+	Programming Language :: Python :: 3.3
+	Programming Language :: Python :: 3.4
+	Topic :: Software Development :: Libraries :: Python Modules
+	Topic :: Scientific/Engineering :: Mathematics
+	Topic :: Scientific/Engineering :: Visualization
+keywords = graph
+platforms = any
+packages = altgraph
+zip-safe = 1
+
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/catapult/telemetry/third_party/altgraph/setup.py b/catapult/telemetry/third_party/altgraph/setup.py
new file mode 100644
index 0000000..a1a4cb6
--- /dev/null
+++ b/catapult/telemetry/third_party/altgraph/setup.py
@@ -0,0 +1,867 @@
+"""
+Shared setup file for simple python packages. Uses a setup.cfg that
+is the same as the distutils2 project, unless noted otherwise.
+
+It exists for two reasons:
+1) This makes it easier to reuse setup.py code between my own
+   projects
+
+2) Easier migration to distutils2 when that catches on.
+
+Additional functionality:
+
+* Section metadata:
+    requires-test:  Same as 'tests_require' option for setuptools.
+
+"""
+
+import sys
+import os
+import re
+import platform
+from fnmatch import fnmatch
+import os
+import sys
+import time
+import tempfile
+import tarfile
+try:
+    import urllib.request as urllib
+except ImportError:
+    import urllib
+from distutils import log
+try:
+    from hashlib import md5
+
+except ImportError:
+    from md5 import md5
+
+if sys.version_info[0] == 2:
+    from ConfigParser import RawConfigParser, NoOptionError, NoSectionError
+else:
+    from configparser import RawConfigParser, NoOptionError, NoSectionError
+
+ROOTDIR = os.path.dirname(os.path.abspath(__file__))
+
+
+#
+#
+#
+# Parsing the setup.cfg and converting it to something that can be
+# used by setuptools.setup()
+#
+#
+#
+
+def eval_marker(value):
+    """
+    Evaluate an distutils2 environment marker.
+
+    This code is unsafe when used with hostile setup.cfg files,
+    but that's not a problem for our own files.
+    """
+    value = value.strip()
+
+    class M:
+        def __init__(self, **kwds):
+            for k, v in kwds.items():
+                setattr(self, k, v)
+
+    variables = {
+        'python_version': '%d.%d'%(sys.version_info[0], sys.version_info[1]),
+        'python_full_version': sys.version.split()[0],
+        'os': M(
+            name=os.name,
+        ),
+        'sys': M(
+            platform=sys.platform,
+        ),
+        'platform': M(
+            version=platform.version(),
+            machine=platform.machine(),
+        ),
+    }
+
+    return bool(eval(value, variables, variables))
+
+
+    return True
+
+def _opt_value(cfg, into, section, key, transform = None):
+    try:
+        v = cfg.get(section, key)
+        if transform != _as_lines and ';' in v:
+            v, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                return
+
+            v = v.strip()
+
+        if v:
+            if transform:
+                into[key] = transform(v.strip())
+            else:
+                into[key] = v.strip()
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+def _as_bool(value):
+    if value.lower() in ('y', 'yes', 'on'):
+        return True
+    elif value.lower() in ('n', 'no', 'off'):
+        return False
+    elif value.isdigit():
+        return bool(int(value))
+    else:
+        raise ValueError(value)
+
+def _as_list(value):
+    return value.split()
+
+def _as_lines(value):
+    result = []
+    for v in value.splitlines():
+        if ';' in v:
+            v, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                continue
+
+            v = v.strip()
+            if v:
+                result.append(v)
+        else:
+            result.append(v)
+    return result
+
+def _map_requirement(value):
+    m = re.search(r'(\S+)\s*(?:\((.*)\))?', value)
+    name = m.group(1)
+    version = m.group(2)
+
+    if version is None:
+        return name
+
+    else:
+        mapped = []
+        for v in version.split(','):
+            v = v.strip()
+            if v[0].isdigit():
+                # Checks for a specific version prefix
+                m = v.rsplit('.', 1)
+                mapped.append('>=%s,<%s.%s'%(
+                    v, m[0], int(m[1])+1))
+
+            else:
+                mapped.append(v)
+        return '%s %s'%(name, ','.join(mapped),)
+
+def _as_requires(value):
+    requires = []
+    for req in value.splitlines():
+        if ';' in req:
+            req, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                continue
+            req = req.strip()
+
+        if not req:
+            continue
+        requires.append(_map_requirement(req))
+    return requires
+
+def parse_setup_cfg():
+    cfg = RawConfigParser()
+    r = cfg.read([os.path.join(ROOTDIR, 'setup.cfg')])
+    if len(r) != 1:
+        print("Cannot read 'setup.cfg'")
+        sys.exit(1)
+
+    metadata = dict(
+            name        = cfg.get('metadata', 'name'),
+            version     = cfg.get('metadata', 'version'),
+            description = cfg.get('metadata', 'description'),
+    )
+
+    _opt_value(cfg, metadata, 'metadata', 'license')
+    _opt_value(cfg, metadata, 'metadata', 'maintainer')
+    _opt_value(cfg, metadata, 'metadata', 'maintainer_email')
+    _opt_value(cfg, metadata, 'metadata', 'author')
+    _opt_value(cfg, metadata, 'metadata', 'author_email')
+    _opt_value(cfg, metadata, 'metadata', 'url')
+    _opt_value(cfg, metadata, 'metadata', 'download_url')
+    _opt_value(cfg, metadata, 'metadata', 'classifiers', _as_lines)
+    _opt_value(cfg, metadata, 'metadata', 'platforms', _as_list)
+    _opt_value(cfg, metadata, 'metadata', 'packages', _as_list)
+    _opt_value(cfg, metadata, 'metadata', 'keywords', _as_list)
+
+    try:
+        v = cfg.get('metadata', 'requires-dist')
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        requires = _as_requires(v)
+        if requires:
+            metadata['install_requires'] = requires
+
+    try:
+        v = cfg.get('metadata', 'requires-test')
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        requires = _as_requires(v)
+        if requires:
+            metadata['tests_require'] = requires
+
+
+    try:
+        v = cfg.get('metadata', 'long_description_file')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        parts = []
+        for nm in v.split():
+            fp = open(nm, 'rU')
+            parts.append(fp.read())
+            fp.close()
+
+        metadata['long_description'] = '\n\n'.join(parts)
+
+
+    try:
+        v = cfg.get('metadata', 'zip-safe')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        metadata['zip_safe'] = _as_bool(v)
+
+    try:
+        v = cfg.get('metadata', 'console_scripts')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        if 'entry_points' not in metadata:
+            metadata['entry_points'] = {}
+
+        metadata['entry_points']['console_scripts'] = v.splitlines()
+
+    if sys.version_info[:2] <= (2,6):
+        try:
+            metadata['tests_require'] += ", unittest2"
+        except KeyError:
+            metadata['tests_require'] = "unittest2"
+
+    return metadata
+
+
+#
+#
+#
+# Bootstrapping setuptools/distribute, based on
+# a heavily modified version of distribute_setup.py
+#
+#
+#
+
+
+SETUPTOOLS_PACKAGE='setuptools'
+
+
+try:
+    import subprocess
+
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        return subprocess.call(args) == 0
+
+except ImportError:
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        new_args = []
+        for a in args:
+            new_args.append(a.replace("'", "'\"'\"'"))
+        os.system(' '.join(new_args)) == 0
+
+
+try:
+    import json
+
+    def get_pypi_src_download(package):
+        url = 'https://pypi.python.org/pypi/%s/json'%(package,)
+        fp = urllib.urlopen(url)
+        try:
+            try:
+                data = fp.read()
+
+            finally:
+                fp.close()
+        except urllib.error:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        pkgdata = json.loads(data.decode('utf-8'))
+        if 'urls' not in pkgdata:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for info in pkgdata['urls']:
+            if info['packagetype'] == 'sdist' and info['url'].endswith('tar.gz'):
+                return (info.get('md5_digest'), info['url'])
+
+        raise RuntimeError("Cannot determine downlink link for %s"%(package,))
+
+except ImportError:
+    # Python 2.5 compatibility, no JSON in stdlib but luckily JSON syntax is
+    # simular enough to Python's syntax to be able to abuse the Python compiler
+
+    import _ast as ast
+
+    def get_pypi_src_download(package):
+        url = 'https://pypi.python.org/pypi/%s/json'%(package,)
+        fp = urllib.urlopen(url)
+        try:
+            try:
+                data = fp.read()
+
+            finally:
+                fp.close()
+        except urllib.error:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+
+        a = compile(data, '-', 'eval', ast.PyCF_ONLY_AST)
+        if not isinstance(a, ast.Expression):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        a = a.body
+        if not isinstance(a, ast.Dict):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for k, v in zip(a.keys, a.values):
+            if not isinstance(k, ast.Str):
+                raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+            k = k.s
+            if k == 'urls':
+                a = v
+                break
+        else:
+            raise RuntimeError("PyPI JSON for %s doesn't contain URLs section"%(package,))
+
+        if not isinstance(a, ast.List):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for info in v.elts:
+            if not isinstance(info, ast.Dict):
+                raise RuntimeError("Cannot determine download link for %s"%(package,))
+            url = None
+            packagetype = None
+            chksum = None
+
+            for k, v in zip(info.keys, info.values):
+                if not isinstance(k, ast.Str):
+                    raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+                if k.s == 'url':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    url = v.s
+
+                elif k.s == 'packagetype':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    packagetype = v.s
+
+                elif k.s == 'md5_digest':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    chksum = v.s
+
+            if url is not None and packagetype == 'sdist' and url.endswith('.tar.gz'):
+                return (chksum, url)
+
+        raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+def _build_egg(egg, tarball, to_dir):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # building an egg
+        log.warn('Building a %s egg in %s', egg, to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+    finally:
+        os.chdir(old_wd)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+def _do_download(to_dir, packagename=SETUPTOOLS_PACKAGE):
+    tarball = download_setuptools(packagename, to_dir)
+    version = tarball.split('-')[-1][:-7]
+    egg = os.path.join(to_dir, '%s-%s-py%d.%d.egg'
+                       % (packagename, version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        _build_egg(egg, tarball, to_dir)
+    sys.path.insert(0, egg)
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools():
+    # making sure we use the absolute path
+    return _do_download(os.path.abspath(os.curdir))
+
+def download_setuptools(packagename, to_dir):
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    try:
+        from urllib.request import urlopen
+    except ImportError:
+        from urllib2 import urlopen
+
+    chksum, url = get_pypi_src_download(packagename)
+    tgz_name = os.path.basename(url)
+    saveto = os.path.join(to_dir, tgz_name)
+
+    src = dst = None
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        try:
+            log.warn("Downloading %s", url)
+            src = urlopen(url)
+            # Read/write all in one block, so we don't create a corrupt file
+            # if the download is interrupted.
+            data = src.read()
+
+            if chksum is not None:
+                data_sum = md5(data).hexdigest()
+                if data_sum != chksum:
+                    raise RuntimeError("Downloading %s failed: corrupt checksum"%(url,))
+
+
+            dst = open(saveto, "wb")
+            dst.write(data)
+        finally:
+            if src:
+                src.close()
+            if dst:
+                dst.close()
+    return os.path.realpath(saveto)
+
+
+
+def _extractall(self, path=".", members=None):
+    """Extract all members from the archive to the current working
+       directory and set owner, modification time and permissions on
+       directories afterwards. `path' specifies a different directory
+       to extract to. `members' is optional and must be a subset of the
+       list returned by getmembers().
+    """
+    import copy
+    import operator
+    from tarfile import ExtractError
+    directories = []
+
+    if members is None:
+        members = self
+
+    for tarinfo in members:
+        if tarinfo.isdir():
+            # Extract directories with a safe mode.
+            directories.append(tarinfo)
+            tarinfo = copy.copy(tarinfo)
+            tarinfo.mode = 448 # decimal for oct 0700
+        self.extract(tarinfo, path)
+
+    # Reverse sort directories.
+    if sys.version_info < (2, 4):
+        def sorter(dir1, dir2):
+            return cmp(dir1.name, dir2.name)
+        directories.sort(sorter)
+        directories.reverse()
+    else:
+        directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+    # Set correct owner, mtime and filemode on directories.
+    for tarinfo in directories:
+        dirpath = os.path.join(path, tarinfo.name)
+        try:
+            self.chown(tarinfo, dirpath)
+            self.utime(tarinfo, dirpath)
+            self.chmod(tarinfo, dirpath)
+        except ExtractError:
+            e = sys.exc_info()[1]
+            if self.errorlevel > 1:
+                raise
+            else:
+                self._dbg(1, "tarfile: %s" % e)
+
+
+#
+#
+#
+# Definitions of custom commands
+#
+#
+#
+
+try:
+    import setuptools
+
+except ImportError:
+    use_setuptools()
+
+from setuptools import setup
+
+try:
+    from distutils.core import PyPIRCCommand
+except ImportError:
+    PyPIRCCommand = None # Ancient python version
+
+from distutils.core import Command
+from distutils.errors  import DistutilsError
+from distutils import log
+
+if PyPIRCCommand is None:
+    class upload_docs (Command):
+        description = "upload sphinx documentation"
+        user_options = []
+
+        def initialize_options(self):
+            pass
+
+        def finalize_options(self):
+            pass
+
+        def run(self):
+            raise DistutilsError("not supported on this version of python")
+
+else:
+    class upload_docs (PyPIRCCommand):
+        description = "upload sphinx documentation"
+        user_options = PyPIRCCommand.user_options
+
+        def initialize_options(self):
+            PyPIRCCommand.initialize_options(self)
+            self.username = ''
+            self.password = ''
+
+
+        def finalize_options(self):
+            PyPIRCCommand.finalize_options(self)
+            config = self._read_pypirc()
+            if config != {}:
+                self.username = config['username']
+                self.password = config['password']
+
+
+        def run(self):
+            import subprocess
+            import shutil
+            import zipfile
+            import os
+            import urllib
+            import StringIO
+            from base64 import standard_b64encode
+            import httplib
+            import urlparse
+
+            # Extract the package name from distutils metadata
+            meta = self.distribution.metadata
+            name = meta.get_name()
+
+            # Run sphinx
+            if os.path.exists('doc/_build'):
+                shutil.rmtree('doc/_build')
+            os.mkdir('doc/_build')
+
+            p = subprocess.Popen(['make', 'html'],
+                cwd='doc')
+            exit = p.wait()
+            if exit != 0:
+                raise DistutilsError("sphinx-build failed")
+
+            # Collect sphinx output
+            if not os.path.exists('dist'):
+                os.mkdir('dist')
+            zf = zipfile.ZipFile('dist/%s-docs.zip'%(name,), 'w',
+                    compression=zipfile.ZIP_DEFLATED)
+
+            for toplevel, dirs, files in os.walk('doc/_build/html'):
+                for fn in files:
+                    fullname = os.path.join(toplevel, fn)
+                    relname = os.path.relpath(fullname, 'doc/_build/html')
+
+                    print ("%s -> %s"%(fullname, relname))
+
+                    zf.write(fullname, relname)
+
+            zf.close()
+
+            # Upload the results, this code is based on the distutils
+            # 'upload' command.
+            content = open('dist/%s-docs.zip'%(name,), 'rb').read()
+
+            data = {
+                ':action': 'doc_upload',
+                'name': name,
+                'content': ('%s-docs.zip'%(name,), content),
+            }
+            auth = "Basic " + standard_b64encode(self.username + ":" +
+                 self.password)
+
+
+            boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+            sep_boundary = '\n--' + boundary
+            end_boundary = sep_boundary + '--'
+            body = StringIO.StringIO()
+            for key, value in data.items():
+                if not isinstance(value, list):
+                    value = [value]
+
+                for value in value:
+                    if isinstance(value, tuple):
+                        fn = ';filename="%s"'%(value[0])
+                        value = value[1]
+                    else:
+                        fn = ''
+
+                    body.write(sep_boundary)
+                    body.write('\nContent-Disposition: form-data; name="%s"'%key)
+                    body.write(fn)
+                    body.write("\n\n")
+                    body.write(value)
+
+            body.write(end_boundary)
+            body.write('\n')
+            body = body.getvalue()
+
+            self.announce("Uploading documentation to %s"%(self.repository,), log.INFO)
+
+            schema, netloc, url, params, query, fragments = \
+                    urlparse.urlparse(self.repository)
+
+
+            if schema == 'http':
+                http = httplib.HTTPConnection(netloc)
+            elif schema == 'https':
+                http = httplib.HTTPSConnection(netloc)
+            else:
+                raise AssertionError("unsupported schema "+schema)
+
+            data = ''
+            loglevel = log.INFO
+            try:
+                http.connect()
+                http.putrequest("POST", url)
+                http.putheader('Content-type',
+                    'multipart/form-data; boundary=%s'%boundary)
+                http.putheader('Content-length', str(len(body)))
+                http.putheader('Authorization', auth)
+                http.endheaders()
+                http.send(body)
+            except socket.error:
+                e = socket.exc_info()[1]
+                self.announce(str(e), log.ERROR)
+                return
+
+            r = http.getresponse()
+            if r.status in (200, 301):
+                self.announce('Upload succeeded (%s): %s' % (r.status, r.reason),
+                    log.INFO)
+            else:
+                self.announce('Upload failed (%s): %s' % (r.status, r.reason),
+                    log.ERROR)
+
+                print ('-'*75)
+                print (r.read())
+                print ('-'*75)
+
+
+def recursiveGlob(root, pathPattern):
+    """
+    Recursively look for files matching 'pathPattern'. Return a list
+    of matching files/directories.
+    """
+    result = []
+
+    for rootpath, dirnames, filenames in os.walk(root):
+        for fn in filenames:
+            if fnmatch(fn, pathPattern):
+                result.append(os.path.join(rootpath, fn))
+    return result
+
+
+def importExternalTestCases(unittest,
+        pathPattern="test_*.py", root=".", package=None):
+    """
+    Import all unittests in the PyObjC tree starting at 'root'
+    """
+
+    testFiles = recursiveGlob(root, pathPattern)
+    testModules = map(lambda x:x[len(root)+1:-3].replace('/', '.'), testFiles)
+    if package is not None:
+        testModules = [(package + '.' + m) for m in testModules]
+
+    suites = []
+
+    for modName in testModules:
+        try:
+            module = __import__(modName)
+        except ImportError:
+            print("SKIP %s: %s"%(modName, sys.exc_info()[1]))
+            continue
+
+        if '.' in modName:
+            for elem in modName.split('.')[1:]:
+                module = getattr(module, elem)
+
+        s = unittest.defaultTestLoader.loadTestsFromModule(module)
+        suites.append(s)
+
+    return unittest.TestSuite(suites)
+
+
+
+class test (Command):
+    description = "run test suite"
+    user_options = [
+        ('verbosity=', None, "print what tests are run"),
+    ]
+
+    def initialize_options(self):
+        self.verbosity='1'
+
+    def finalize_options(self):
+        if isinstance(self.verbosity, str):
+            self.verbosity = int(self.verbosity)
+
+
+    def cleanup_environment(self):
+        ei_cmd = self.get_finalized_command('egg_info')
+        egg_name = ei_cmd.egg_name.replace('-', '_')
+
+        to_remove =  []
+        for dirname in sys.path:
+            bn = os.path.basename(dirname)
+            if bn.startswith(egg_name + "-"):
+                to_remove.append(dirname)
+
+        for dirname in to_remove:
+            log.info("removing installed %r from sys.path before testing"%(
+                dirname,))
+            sys.path.remove(dirname)
+
+    def add_project_to_sys_path(self):
+        from pkg_resources import normalize_path, add_activation_listener
+        from pkg_resources import working_set, require
+
+        self.reinitialize_command('egg_info')
+        self.run_command('egg_info')
+        self.reinitialize_command('build_ext', inplace=1)
+        self.run_command('build_ext')
+
+
+        # Check if this distribution is already on sys.path
+        # and remove that version, this ensures that the right
+        # copy of the package gets tested.
+
+        self.__old_path = sys.path[:]
+        self.__old_modules = sys.modules.copy()
+
+
+        ei_cmd = self.get_finalized_command('egg_info')
+        sys.path.insert(0, normalize_path(ei_cmd.egg_base))
+        sys.path.insert(1, os.path.dirname(__file__))
+
+        # Strip the namespace packages defined in this distribution
+        # from sys.modules, needed to reset the search path for
+        # those modules.
+
+        nspkgs = getattr(self.distribution, 'namespace_packages')
+        if nspkgs is not None:
+            for nm in nspkgs:
+                del sys.modules[nm]
+
+        # Reset pkg_resources state:
+        add_activation_listener(lambda dist: dist.activate())
+        working_set.__init__()
+        require('%s==%s'%(ei_cmd.egg_name, ei_cmd.egg_version))
+
+    def remove_from_sys_path(self):
+        from pkg_resources import working_set
+        sys.path[:] = self.__old_path
+        sys.modules.clear()
+        sys.modules.update(self.__old_modules)
+        working_set.__init__()
+
+
+    def run(self):
+        import unittest
+
+        # Ensure that build directory is on sys.path (py3k)
+
+        self.cleanup_environment()
+        self.add_project_to_sys_path()
+
+        try:
+            meta = self.distribution.metadata
+            name = meta.get_name()
+            test_pkg = name + "_tests"
+            suite = importExternalTestCases(unittest,
+                    "test_*.py", test_pkg, test_pkg)
+
+            runner = unittest.TextTestRunner(verbosity=self.verbosity)
+            result = runner.run(suite)
+
+            # Print out summary. This is a structured format that
+            # should make it easy to use this information in scripts.
+            summary = dict(
+                count=result.testsRun,
+                fails=len(result.failures),
+                errors=len(result.errors),
+                xfails=len(getattr(result, 'expectedFailures', [])),
+                xpass=len(getattr(result, 'expectedSuccesses', [])),
+                skip=len(getattr(result, 'skipped', [])),
+            )
+            print("SUMMARY: %s"%(summary,))
+
+        finally:
+            self.remove_from_sys_path()
+
+#
+#
+#
+#  And finally run the setuptools main entry point.
+#
+#
+#
+
+metadata = parse_setup_cfg()
+
+setup(
+    cmdclass=dict(
+        upload_docs=upload_docs,
+        test=test,
+    ),
+    **metadata
+)
diff --git a/catapult/telemetry/third_party/chromite/LICENSE b/catapult/telemetry/third_party/chromite/LICENSE
new file mode 100644
index 0000000..0aa7fc9
--- /dev/null
+++ b/catapult/telemetry/third_party/chromite/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2009 The Chromium OS Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/catapult/telemetry/third_party/chromite/README.chromium b/catapult/telemetry/third_party/chromite/README.chromium
new file mode 100644
index 0000000..b51feb8
--- /dev/null
+++ b/catapult/telemetry/third_party/chromite/README.chromium
@@ -0,0 +1,15 @@
+Name: chromite
+Short Name: chromite
+URL: https://chromium.googlesource.com/chromiumos/chromite
+Version: 0.0.2
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+
+Local modification: remove all the files except chromite/ssh_keys/ which contain
+keys used by telemetry.
+
+Description:
+This contains scripts used to build Chromium for Chromium OS
+('cros chrome-sdk'), as well as interact with the Chromium OS
+build system.
diff --git a/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa b/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa
new file mode 100644
index 0000000..d50a630
--- /dev/null
+++ b/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEoAIBAAKCAQEAvsNpFdK5lb0GfKx+FgsrsM/2+aZVFYXHMPdvGtTz63ciRhq0
+Jnw7nln1SOcHraSz3/imECBg8NHIKV6rA+B9zbf7pZXEv20x5Ul0vrcPqYWC44PT
+tgsgvi8s0KZUZN93YlcjZ+Q7BjQ/tuwGSaLWLqJ7hnHALMJ3dbEM9fKBHQBCrG5H
+OaWD2gtXj7jp04M/WUnDDdemq/KMg6E9jcrJOiQ39IuTpas4hLQzVkKAKSrpl6MY
+2etHyoNarlWhcOwitArEDwf3WgnctwKstI/MTKB5BTpO2WXUNUv4kXzA+g8/l1al
+jIG13vtd9A/IV3KFVx/sLkkjuZ7z2rQXyNKuJwIBIwKCAQA79EWZJPh/hI0CnJyn
+16AEXp4T8nKDG2p9GpCiCGnq6u2Dvz/u1pZk97N9T+x4Zva0GvJc1vnlST7objW/
+Y8/ET8QeGSCT7x5PYDqiVspoemr3DCyYTKPkADKn+cLAngDzBXGHDTcfNP4U6xfr
+Qc5JK8BsFR8kApqSs/zCU4eqBtp2FVvPbgUOv3uUrFnjEuGs9rb1QZ0K6o08L4Cq
+N+e2nTysjp78blakZfqlurqTY6iJb0ImU2W3T8sV6w5GP1NT7eicXLO3WdIRB15a
+evogPeqtMo8GcO62wU/D4UCvq4GNEjvYOvFmPzXHvhTxsiWv5KEACtleBIEYmWHA
+POwrAoGBAOKgNRgxHL7r4bOmpLQcYK7xgA49OpikmrebXCQnZ/kZ3QsLVv1QdNMH
+Rx/ex7721g8R0oWslM14otZSMITCDCMWTYVBNM1bqYnUeEu5HagFwxjQ2tLuSs8E
+SBzEr96JLfhwuBhDH10sQqn+OQG1yj5acs4Pt3L4wlYwMx0vs1BxAoGBANd9Owro
+5ONiJXfKNaNY/cJYuLR+bzGeyp8oxToxgmM4UuA4hhDU7peg4sdoKJ4XjB9cKMCz
+ZGU5KHKKxNf95/Z7aywiIJEUE/xPRGNP6tngRunevp2QyvZf4pgvACvk1tl9B3HH
+7J5tY/GRkT4sQuZYpx3YnbdP5Y6Kx33BF7QXAoGAVCzghVQR/cVT1QNhvz29gs66
+iPIrtQnwUtNOHA6i9h+MnbPBOYRIpidGTaqEtKTTKisw79JjJ78X6TR4a9ML0oSg
+c1K71z9NmZgPbJU25qMN80ZCph3+h2f9hwc6AjLz0U5wQ4alP909VRVIX7iM8paf
+q59wBiHhyD3J16QAxhsCgYBu0rCmhmcV2rQu+kd4lCq7uJmBZZhFZ5tny9MlPgiK
+zIJkr1rkFbyIfqCDzyrU9irOTKc+iCUA25Ek9ujkHC4m/aTU3lnkNjYp/OFXpXF3
+XWZMY+0Ak5uUpldG85mwLIvATu3ivpbyZCTFYM5afSm4StmaUiU5tA+oZKEcGily
+jwKBgBdFLg+kTm877lcybQ04G1kIRMf5vAXcConzBt8ry9J+2iX1ddlu2K2vMroD
+1cP/U/EmvoCXSOGuetaI4UNQwE/rGCtkpvNj5y4twVLh5QufSOl49V0Ut0mwjPXw
+HfN/2MoO07vQrjgsFylvrw9A79xItABaqKndlmqlwMZWc9Ne
+-----END RSA PRIVATE KEY-----
diff --git a/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa.pub b/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa.pub
new file mode 100644
index 0000000..7a4d033
--- /dev/null
+++ b/catapult/telemetry/third_party/chromite/ssh_keys/testing_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvsNpFdK5lb0GfKx+FgsrsM/2+aZVFYXHMPdvGtTz63ciRhq0Jnw7nln1SOcHraSz3/imECBg8NHIKV6rA+B9zbf7pZXEv20x5Ul0vrcPqYWC44PTtgsgvi8s0KZUZN93YlcjZ+Q7BjQ/tuwGSaLWLqJ7hnHALMJ3dbEM9fKBHQBCrG5HOaWD2gtXj7jp04M/WUnDDdemq/KMg6E9jcrJOiQ39IuTpas4hLQzVkKAKSrpl6MY2etHyoNarlWhcOwitArEDwf3WgnctwKstI/MTKB5BTpO2WXUNUv4kXzA+g8/l1aljIG13vtd9A/IV3KFVx/sLkkjuZ7z2rQXyNKuJw== ChromeOS test key
diff --git a/catapult/telemetry/third_party/flot/LICENSE.txt b/catapult/telemetry/third_party/flot/LICENSE.txt
new file mode 100644
index 0000000..67f4625
--- /dev/null
+++ b/catapult/telemetry/third_party/flot/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2007-2013 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/catapult/telemetry/third_party/flot/README.chromium b/catapult/telemetry/third_party/flot/README.chromium
new file mode 100644
index 0000000..6bbf71a
--- /dev/null
+++ b/catapult/telemetry/third_party/flot/README.chromium
@@ -0,0 +1,10 @@
+Name: Flot Javascript/JQuery library for creating graphs
+Short Name: Flot
+URL: http://www.flotcharts.org
+Version: 0.8.1
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: yes
+Description: Flot is used in the webui for performance monitor and the visualizer for Deep Memory Profiler to draw charts of performance metrics.
+Local Modifications: All the files not needed by telemetry are removed. Only keep
+jquery.flot.min.js.
diff --git a/catapult/telemetry/third_party/flot/jquery.flot.min.js b/catapult/telemetry/third_party/flot/jquery.flot.min.js
new file mode 100644
index 0000000..3706512
--- /dev/null
+++ b/catapult/telemetry/third_party/flot/jquery.flot.min.js
@@ -0,0 +1,29 @@
+/* Javascript plotting library for jQuery, version 0.8.1.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ *   $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ *   var c = $.color.extract($("#mydiv"), 'background-color');
+ *   console.log(c.r, c.g, c.b, c.a);
+ *   $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */(function(e){e.color={},e.color.make=function(t,n,r,i){var s={};return s.r=t||0,s.g=n||0,s.b=r||0,s.a=i!=null?i:1,s.add=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]+=t;return s.normalize()},s.scale=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]*=t;return s.normalize()},s.toString=function(){return s.a>=1?"rgb("+[s.r,s.g,s.b].join(",")+")":"rgba("+[s.r,s.g,s.b,s.a].join(",")+")"},s.normalize=function(){function e(e,t,n){return t<e?e:t>n?n:t}return s.r=e(0,parseInt(s.r),255),s.g=e(0,parseInt(s.g),255),s.b=e(0,parseInt(s.b),255),s.a=e(0,s.a,1),s},s.clone=function(){return e.color.make(s.r,s.b,s.g,s.a)},s.normalize()},e.color.extract=function(t,n){var r;do{r=t.css(n).toLowerCase();if(r!=""&&r!="transparent")break;t=t.parent()}while(!e.nodeName(t.get(0),"body"));return r=="rgba(0, 0, 0, 0)"&&(r="transparent"),e.color.parse(r)},e.color.parse=function(n){var r,i=e.color.make;if(r=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10));if(r=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10),parseFloat(r[4]));if(r=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55);if(r=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55,parseFloat(r[4]));if(r=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(n))return i(parseInt(r[1],16),parseInt(r[2],16),parseInt(r[3],16));if(r=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(n))return i(parseInt(r[1]+r[1],16),parseInt(r[2]+r[2],16),parseInt(r[3]+r[3],16));var s=e.trim(n).toLowerCase();return s=="transparent"?i(255,255,255,0):(r=t[s]||[0,0,0],i(r[0],r[1],r[2]))};var t={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery),function(e){function n(t,n){var r=n.children("."+t)[0];if(r==null){r=document.createElement("canvas"),r.className=t,e(r).css({direction:"ltr",position:"absolute",left:0,top:0}).appendTo(n);if(!r.getContext){if(!window.G_vmlCanvasManager)throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");r=window.G_vmlCanvasManager.initElement(r)}}this.element=r;var i=this.context=r.getContext("2d"),s=window.devicePixelRatio||1,o=i.webkitBackingStorePixelRatio||i.mozBackingStorePixelRatio||i.msBackingStorePixelRatio||i.oBackingStorePixelRatio||i.backingStorePixelRatio||1;this.pixelRatio=s/o,this.resize(n.width(),n.height()),this.textContainer=null,this.text={},this._textCache={}}function r(t,r,s,o){function E(e,t){t=[w].concat(t);for(var n=0;n<e.length;++n)e[n].apply(this,t)}function S(){var t={Canvas:n};for(var r=0;r<o.length;++r){var i=o[r];i.init(w,t),i.options&&e.extend(!0,a,i.options)}}function x(n){e.extend(!0,a,n),n&&n.colors&&(a.colors=n.colors),a.xaxis.color==null&&(a.xaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.yaxis.color==null&&(a.yaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.xaxis.tickColor==null&&(a.xaxis.tickColor=a.grid.tickColor||a.xaxis.color),a.yaxis.tickColor==null&&(a.yaxis.tickColor=a.grid.tickColor||a.yaxis.color),a.grid.borderColor==null&&(a.grid.borderColor=a.grid.color),a.grid.tickColor==null&&(a.grid.tickColor=e.color.parse(a.grid.color).scale("a",.22).toString());var r,i,s,o={style:t.css("font-style"),size:Math.round(.8*(+t.css("font-size").replace("px","")||13)),variant:t.css("font-variant"),weight:t.css("font-weight"),family:t.css("font-family")};o.lineHeight=o.size*1.15,s=a.xaxes.length||1;for(r=0;r<s;++r)i=a.xaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.xaxis,i),a.xaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));s=a.yaxes.length||1;for(r=0;r<s;++r)i=a.yaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.yaxis,i),a.yaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));a.xaxis.noTicks&&a.xaxis.ticks==null&&(a.xaxis.ticks=a.xaxis.noTicks),a.yaxis.noTicks&&a.yaxis.ticks==null&&(a.yaxis.ticks=a.yaxis.noTicks),a.x2axis&&(a.xaxes[1]=e.extend(!0,{},a.xaxis,a.x2axis),a.xaxes[1].position="top"),a.y2axis&&(a.yaxes[1]=e.extend(!0,{},a.yaxis,a.y2axis),a.yaxes[1].position="right"),a.grid.coloredAreas&&(a.grid.markings=a.grid.coloredAreas),a.grid.coloredAreasColor&&(a.grid.markingsColor=a.grid.coloredAreasColor),a.lines&&e.extend(!0,a.series.lines,a.lines),a.points&&e.extend(!0,a.series.points,a.points),a.bars&&e.extend(!0,a.series.bars,a.bars),a.shadowSize!=null&&(a.series.shadowSize=a.shadowSize),a.highlightColor!=null&&(a.series.highlightColor=a.highlightColor);for(r=0;r<a.xaxes.length;++r)O(d,r+1).options=a.xaxes[r];for(r=0;r<a.yaxes.length;++r)O(v,r+1).options=a.yaxes[r];for(var u in b)a.hooks[u]&&a.hooks[u].length&&(b[u]=b[u].concat(a.hooks[u]));E(b.processOptions,[a])}function T(e){u=N(e),M(),_()}function N(t){var n=[];for(var r=0;r<t.length;++r){var i=e.extend(!0,{},a.series);t[r].data!=null?(i.data=t[r].data,delete t[r].data,e.extend(!0,i,t[r]),t[r].data=i.data):i.data=t[r],n.push(i)}return n}function C(e,t){var n=e[t+"axis"];return typeof n=="object"&&(n=n.n),typeof n!="number"&&(n=1),n}function k(){return e.grep(d.concat(v),function(e){return e})}function L(e){var t={},n,r;for(n=0;n<d.length;++n)r=d[n],r&&r.used&&(t["x"+r.n]=r.c2p(e.left));for(n=0;n<v.length;++n)r=v[n],r&&r.used&&(t["y"+r.n]=r.c2p(e.top));return t.x1!==undefined&&(t.x=t.x1),t.y1!==undefined&&(t.y=t.y1),t}function A(e){var t={},n,r,i;for(n=0;n<d.length;++n){r=d[n];if(r&&r.used){i="x"+r.n,e[i]==null&&r.n==1&&(i="x");if(e[i]!=null){t.left=r.p2c(e[i]);break}}}for(n=0;n<v.length;++n){r=v[n];if(r&&r.used){i="y"+r.n,e[i]==null&&r.n==1&&(i="y");if(e[i]!=null){t.top=r.p2c(e[i]);break}}}return t}function O(t,n){return t[n-1]||(t[n-1]={n:n,direction:t==d?"x":"y",options:e.extend(!0,{},t==d?a.xaxis:a.yaxis)}),t[n-1]}function M(){var t=u.length,n=-1,r;for(r=0;r<u.length;++r){var i=u[r].color;i!=null&&(t--,typeof i=="number"&&i>n&&(n=i))}t<=n&&(t=n+1);var s,o=[],f=a.colors,l=f.length,c=0;for(r=0;r<t;r++)s=e.color.parse(f[r%l]||"#666"),r%l==0&&r&&(c>=0?c<.5?c=-c-.2:c=0:c=-c),o[r]=s.scale("rgb",1+c);var h=0,p;for(r=0;r<u.length;++r){p=u[r],p.color==null?(p.color=o[h].toString(),++h):typeof p.color=="number"&&(p.color=o[p.color].toString());if(p.lines.show==null){var m,g=!0;for(m in p)if(p[m]&&p[m].show){g=!1;break}g&&(p.lines.show=!0)}p.lines.zero==null&&(p.lines.zero=!!p.lines.fill),p.xaxis=O(d,C(p,"x")),p.yaxis=O(v,C(p,"y"))}}function _(){function x(e,t,n){t<e.datamin&&t!=-r&&(e.datamin=t),n>e.datamax&&n!=r&&(e.datamax=n)}var t=Number.POSITIVE_INFINITY,n=Number.NEGATIVE_INFINITY,r=Number.MAX_VALUE,i,s,o,a,f,l,c,h,p,d,v,m,g,y,w,S;e.each(k(),function(e,r){r.datamin=t,r.datamax=n,r.used=!1});for(i=0;i<u.length;++i)l=u[i],l.datapoints={points:[]},E(b.processRawData,[l,l.data,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],w=l.data,S=l.datapoints.format;if(!S){S=[],S.push({x:!0,number:!0,required:!0}),S.push({y:!0,number:!0,required:!0});if(l.bars.show||l.lines.show&&l.lines.fill){var T=!!(l.bars.show&&l.bars.zero||l.lines.show&&l.lines.zero);S.push({y:!0,number:!0,required:!1,defaultValue:0,autoscale:T}),l.bars.horizontal&&(delete S[S.length-1].y,S[S.length-1].x=!0)}l.datapoints.format=S}if(l.datapoints.pointsize!=null)continue;l.datapoints.pointsize=S.length,h=l.datapoints.pointsize,c=l.datapoints.points;var N=l.lines.show&&l.lines.steps;l.xaxis.used=l.yaxis.used=!0;for(s=o=0;s<w.length;++s,o+=h){y=w[s];var C=y==null;if(!C)for(a=0;a<h;++a)m=y[a],g=S[a],g&&(g.number&&m!=null&&(m=+m,isNaN(m)?m=null:m==Infinity?m=r:m==-Infinity&&(m=-r)),m==null&&(g.required&&(C=!0),g.defaultValue!=null&&(m=g.defaultValue))),c[o+a]=m;if(C)for(a=0;a<h;++a)m=c[o+a],m!=null&&(g=S[a],g.autoscale&&(g.x&&x(l.xaxis,m,m),g.y&&x(l.yaxis,m,m))),c[o+a]=null;else if(N&&o>0&&c[o-h]!=null&&c[o-h]!=c[o]&&c[o-h+1]!=c[o+1]){for(a=0;a<h;++a)c[o+h+a]=c[o+a];c[o+1]=c[o-h+1],o+=h}}}for(i=0;i<u.length;++i)l=u[i],E(b.processDatapoints,[l,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],c=l.datapoints.points,h=l.datapoints.pointsize,S=l.datapoints.format;var L=t,A=t,O=n,M=n;for(s=0;s<c.length;s+=h){if(c[s]==null)continue;for(a=0;a<h;++a){m=c[s+a],g=S[a];if(!g||g.autoscale===!1||m==r||m==-r)continue;g.x&&(m<L&&(L=m),m>O&&(O=m)),g.y&&(m<A&&(A=m),m>M&&(M=m))}}if(l.bars.show){var _;switch(l.bars.align){case"left":_=0;break;case"right":_=-l.bars.barWidth;break;case"center":_=-l.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+l.bars.align)}l.bars.horizontal?(A+=_,M+=_+l.bars.barWidth):(L+=_,O+=_+l.bars.barWidth)}x(l.xaxis,L,O),x(l.yaxis,A,M)}e.each(k(),function(e,r){r.datamin==t&&(r.datamin=null),r.datamax==n&&(r.datamax=null)})}function D(){t.css("padding",0).children(":not(.flot-base,.flot-overlay)").remove(),t.css("position")=="static"&&t.css("position","relative"),f=new n("flot-base",t),l=new n("flot-overlay",t),h=f.context,p=l.context,c=e(l.element).unbind();var r=t.data("plot");r&&(r.shutdown(),l.clear()),t.data("plot",w)}function P(){a.grid.hoverable&&(c.mousemove(at),c.bind("mouseleave",ft)),a.grid.clickable&&c.click(lt),E(b.bindEvents,[c])}function H(){ot&&clearTimeout(ot),c.unbind("mousemove",at),c.unbind("mouseleave",ft),c.unbind("click",lt),E(b.shutdown,[c])}function B(e){function t(e){return e}var n,r,i=e.options.transform||t,s=e.options.inverseTransform;e.direction=="x"?(n=e.scale=g/Math.abs(i(e.max)-i(e.min)),r=Math.min(i(e.max),i(e.min))):(n=e.scale=y/Math.abs(i(e.max)-i(e.min)),n=-n,r=Math.max(i(e.max),i(e.min))),i==t?e.p2c=function(e){return(e-r)*n}:e.p2c=function(e){return(i(e)-r)*n},s?e.c2p=function(e){return s(r+e/n)}:e.c2p=function(e){return r+e/n}}function j(e){var t=e.options,n=e.ticks||[],r=t.labelWidth||0,i=t.labelHeight||0,s=r||e.direction=="x"?Math.floor(f.width/(n.length||1)):null;legacyStyles=e.direction+"Axis "+e.direction+e.n+"Axis",layer="flot-"+e.direction+"-axis flot-"+e.direction+e.n+"-axis "+legacyStyles,font=t.font||"flot-tick-label tickLabel";for(var o=0;o<n.length;++o){var u=n[o];if(!u.label)continue;var a=f.getTextInfo(layer,u.label,font,null,s);r=Math.max(r,a.width),i=Math.max(i,a.height)}e.labelWidth=t.labelWidth||r,e.labelHeight=t.labelHeight||i}function F(t){var n=t.labelWidth,r=t.labelHeight,i=t.options.position,s=t.options.tickLength,o=a.grid.axisMargin,u=a.grid.labelMargin,l=t.direction=="x"?d:v,c,h,p=e.grep(l,function(e){return e&&e.options.position==i&&e.reserveSpace});e.inArray(t,p)==p.length-1&&(o=0);if(s==null){var g=e.grep(l,function(e){return e&&e.reserveSpace});h=e.inArray(t,g)==0,h?s="full":s=5}isNaN(+s)||(u+=+s),t.direction=="x"?(r+=u,i=="bottom"?(m.bottom+=r+o,t.box={top:f.height-m.bottom,height:r}):(t.box={top:m.top+o,height:r},m.top+=r+o)):(n+=u,i=="left"?(t.box={left:m.left+o,width:n},m.left+=n+o):(m.right+=n+o,t.box={left:f.width-m.right,width:n})),t.position=i,t.tickLength=s,t.box.padding=u,t.innermost=h}function I(e){e.direction=="x"?(e.box.left=m.left-e.labelWidth/2,e.box.width=f.width-m.left-m.right+e.labelWidth):(e.box.top=m.top-e.labelHeight/2,e.box.height=f.height-m.bottom-m.top+e.labelHeight)}function q(){var t=a.grid.minBorderMargin,n={x:0,y:0},r,i;if(t==null){t=0;for(r=0;r<u.length;++r)t=Math.max(t,2*(u[r].points.radius+u[r].points.lineWidth/2))}n.x=n.y=Math.ceil(t),e.each(k(),function(e,t){var r=t.direction;t.reserveSpace&&(n[r]=Math.ceil(Math.max(n[r],(r=="x"?t.labelWidth:t.labelHeight)/2)))}),m.left=Math.max(n.x,m.left),m.right=Math.max(n.x,m.right),m.top=Math.max(n.y,m.top),m.bottom=Math.max(n.y,m.bottom)}function R(){var t,n=k(),r=a.grid.show;for(var i in m){var s=a.grid.margin||0;m[i]=typeof s=="number"?s:s[i]||0}E(b.processOffset,[m]);for(var i in m)typeof a.grid.borderWidth=="object"?m[i]+=r?a.grid.borderWidth[i]:0:m[i]+=r?a.grid.borderWidth:0;e.each(n,function(e,t){t.show=t.options.show,t.show==null&&(t.show=t.used),t.reserveSpace=t.show||t.options.reserveSpace,U(t)});if(r){var o=e.grep(n,function(e){return e.reserveSpace});e.each(o,function(e,t){z(t),W(t),X(t,t.ticks),j(t)});for(t=o.length-1;t>=0;--t)F(o[t]);q(),e.each(o,function(e,t){I(t)})}g=f.width-m.left-m.right,y=f.height-m.bottom-m.top,e.each(n,function(e,t){B(t)}),r&&G(),it()}function U(e){var t=e.options,n=+(t.min!=null?t.min:e.datamin),r=+(t.max!=null?t.max:e.datamax),i=r-n;if(i==0){var s=r==0?1:.01;t.min==null&&(n-=s);if(t.max==null||t.min!=null)r+=s}else{var o=t.autoscaleMargin;o!=null&&(t.min==null&&(n-=i*o,n<0&&e.datamin!=null&&e.datamin>=0&&(n=0)),t.max==null&&(r+=i*o,r>0&&e.datamax!=null&&e.datamax<=0&&(r=0)))}e.min=n,e.max=r}function z(t){var n=t.options,r;typeof n.ticks=="number"&&n.ticks>0?r=n.ticks:r=.3*Math.sqrt(t.direction=="x"?f.width:f.height);var s=(t.max-t.min)/r,o=-Math.floor(Math.log(s)/Math.LN10),u=n.tickDecimals;u!=null&&o>u&&(o=u);var a=Math.pow(10,-o),l=s/a,c;l<1.5?c=1:l<3?(c=2,l>2.25&&(u==null||o+1<=u)&&(c=2.5,++o)):l<7.5?c=5:c=10,c*=a,n.minTickSize!=null&&c<n.minTickSize&&(c=n.minTickSize),t.delta=s,t.tickDecimals=Math.max(0,u!=null?u:o),t.tickSize=n.tickSize||c;if(n.mode=="time"&&!t.tickGenerator)throw new Error("Time mode requires the flot.time plugin.");t.tickGenerator||(t.tickGenerator=function(e){var t=[],n=i(e.min,e.tickSize),r=0,s=Number.NaN,o;do o=s,s=n+r*e.tickSize,t.push(s),++r;while(s<e.max&&s!=o);return t},t.tickFormatter=function(e,t){var n=t.tickDecimals?Math.pow(10,t.tickDecimals):1,r=""+Math.round(e*n)/n;if(t.tickDecimals!=null){var i=r.indexOf("."),s=i==-1?0:r.length-i-1;if(s<t.tickDecimals)return(s?r:r+".")+(""+n).substr(1,t.tickDecimals-s)}return r}),e.isFunction(n.tickFormatter)&&(t.tickFormatter=function(e,t){return""+n.tickFormatter(e,t)});if(n.alignTicksWithAxis!=null){var h=(t.direction=="x"?d:v)[n.alignTicksWithAxis-1];if(h&&h.used&&h!=t){var p=t.tickGenerator(t);p.length>0&&(n.min==null&&(t.min=Math.min(t.min,p[0])),n.max==null&&p.length>1&&(t.max=Math.max(t.max,p[p.length-1]))),t.tickGenerator=function(e){var t=[],n,r;for(r=0;r<h.ticks.length;++r)n=(h.ticks[r].v-h.min)/(h.max-h.min),n=e.min+n*(e.max-e.min),t.push(n);return t};if(!t.mode&&n.tickDecimals==null){var m=Math.max(0,-Math.floor(Math.log(t.delta)/Math.LN10)+1),g=t.tickGenerator(t);g.length>1&&/\..*0$/.test((g[1]-g[0]).toFixed(m))||(t.tickDecimals=m)}}}}function W(t){var n=t.options.ticks,r=[];n==null||typeof n=="number"&&n>0?r=t.tickGenerator(t):n&&(e.isFunction(n)?r=n(t):r=n);var i,s;t.ticks=[];for(i=0;i<r.length;++i){var o=null,u=r[i];typeof u=="object"?(s=+u[0],u.length>1&&(o=u[1])):s=+u,o==null&&(o=t.tickFormatter(s,t)),isNaN(s)||t.ticks.push({v:s,label:o})}}function X(e,t){e.options.autoscaleMargin&&t.length>0&&(e.options.min==null&&(e.min=Math.min(e.min,t[0].v)),e.options.max==null&&t.length>1&&(e.max=Math.max(e.max,t[t.length-1].v)))}function V(){f.clear(),E(b.drawBackground,[h]);var e=a.grid;e.show&&e.backgroundColor&&K(),e.show&&!e.aboveData&&Q();for(var t=0;t<u.length;++t)E(b.drawSeries,[h,u[t]]),Y(u[t]);E(b.draw,[h]),e.show&&e.aboveData&&Q(),f.render(),ht()}function J(e,t){var n,r,i,s,o=k();for(var u=0;u<o.length;++u){n=o[u];if(n.direction==t){s=t+n.n+"axis",!e[s]&&n.n==1&&(s=t+"axis");if(e[s]){r=e[s].from,i=e[s].to;break}}}e[s]||(n=t=="x"?d[0]:v[0],r=e[t+"1"],i=e[t+"2"]);if(r!=null&&i!=null&&r>i){var a=r;r=i,i=a}return{from:r,to:i,axis:n}}function K(){h.save(),h.translate(m.left,m.top),h.fillStyle=bt(a.grid.backgroundColor,y,0,"rgba(255, 255, 255, 0)"),h.fillRect(0,0,g,y),h.restore()}function Q(){var t,n,r,i;h.save(),h.translate(m.left,m.top);var s=a.grid.markings;if(s){e.isFunction(s)&&(n=w.getAxes(),n.xmin=n.xaxis.min,n.xmax=n.xaxis.max,n.ymin=n.yaxis.min,n.ymax=n.yaxis.max,s=s(n));for(t=0;t<s.length;++t){var o=s[t],u=J(o,"x"),f=J(o,"y");u.from==null&&(u.from=u.axis.min),u.to==null&&(u.to=u.axis.max),f.from==null&&(f.from=f.axis.min),f.to==null&&(f.to=f.axis.max);if(u.to<u.axis.min||u.from>u.axis.max||f.to<f.axis.min||f.from>f.axis.max)continue;u.from=Math.max(u.from,u.axis.min),u.to=Math.min(u.to,u.axis.max),f.from=Math.max(f.from,f.axis.min),f.to=Math.min(f.to,f.axis.max);if(u.from==u.to&&f.from==f.to)continue;u.from=u.axis.p2c(u.from),u.to=u.axis.p2c(u.to),f.from=f.axis.p2c(f.from),f.to=f.axis.p2c(f.to),u.from==u.to||f.from==f.to?(h.beginPath(),h.strokeStyle=o.color||a.grid.markingsColor,h.lineWidth=o.lineWidth||a.grid.markingsLineWidth,h.moveTo(u.from,f.from),h.lineTo(u.to,f.to),h.stroke()):(h.fillStyle=o.color||a.grid.markingsColor,h.fillRect(u.from,f.to,u.to-u.from,f.from-f.to))}}n=k(),r=a.grid.borderWidth;for(var l=0;l<n.length;++l){var c=n[l],p=c.box,d=c.tickLength,v,b,E,S;if(!c.show||c.ticks.length==0)continue;h.lineWidth=1,c.direction=="x"?(v=0,d=="full"?b=c.position=="top"?0:y:b=p.top-m.top+(c.position=="top"?p.height:0)):(b=0,d=="full"?v=c.position=="left"?0:g:v=p.left-m.left+(c.position=="left"?p.width:0)),c.innermost||(h.strokeStyle=c.options.color,h.beginPath(),E=S=0,c.direction=="x"?E=g+1:S=y+1,h.lineWidth==1&&(c.direction=="x"?b=Math.floor(b)+.5:v=Math.floor(v)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S),h.stroke()),h.strokeStyle=c.options.tickColor,h.beginPath();for(t=0;t<c.ticks.length;++t){var x=c.ticks[t].v;E=S=0;if(isNaN(x)||x<c.min||x>c.max||d=="full"&&(typeof r=="object"&&r[c.position]>0||r>0)&&(x==c.min||x==c.max))continue;c.direction=="x"?(v=c.p2c(x),S=d=="full"?-y:d,c.position=="top"&&(S=-S)):(b=c.p2c(x),E=d=="full"?-g:d,c.position=="left"&&(E=-E)),h.lineWidth==1&&(c.direction=="x"?v=Math.floor(v)+.5:b=Math.floor(b)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S)}h.stroke()}r&&(i=a.grid.borderColor,typeof r=="object"||typeof i=="object"?(typeof r!="object"&&(r={top:r,right:r,bottom:r,left:r}),typeof i!="object"&&(i={top:i,right:i,bottom:i,left:i}),r.top>0&&(h.strokeStyle=i.top,h.lineWidth=r.top,h.beginPath(),h.moveTo(0-r.left,0-r.top/2),h.lineTo(g,0-r.top/2),h.stroke()),r.right>0&&(h.strokeStyle=i.right,h.lineWidth=r.right,h.beginPath(),h.moveTo(g+r.right/2,0-r.top),h.lineTo(g+r.right/2,y),h.stroke()),r.bottom>0&&(h.strokeStyle=i.bottom,h.lineWidth=r.bottom,h.beginPath(),h.moveTo(g+r.right,y+r.bottom/2),h.lineTo(0,y+r.bottom/2),h.stroke()),r.left>0&&(h.strokeStyle=i.left,h.lineWidth=r.left,h.beginPath(),h.moveTo(0-r.left/2,y+r.bottom),h.lineTo(0-r.left/2,0),h.stroke())):(h.lineWidth=r,h.strokeStyle=a.grid.borderColor,h.strokeRect(-r/2,-r/2,g+r,y+r))),h.restore()}function G(){e.each(k(),function(e,t){if(!t.show||t.ticks.length==0)return;var n=t.box,r=t.direction+"Axis "+t.direction+t.n+"Axis",i="flot-"+t.direction+"-axis flot-"+t.direction+t.n+"-axis "+r,s=t.options.font||"flot-tick-label tickLabel",o,u,a,l,c;f.removeText(i);for(var h=0;h<t.ticks.length;++h){o=t.ticks[h];if(!o.label||o.v<t.min||o.v>t.max)continue;t.direction=="x"?(l="center",u=m.left+t.p2c(o.v),t.position=="bottom"?a=n.top+n.padding:(a=n.top+n.height-n.padding,c="bottom")):(c="middle",a=m.top+t.p2c(o.v),t.position=="left"?(u=n.left+n.width-n.padding,l="right"):u=n.left+n.padding),f.addText(i,u,a,o.label,s,null,null,l,c)}})}function Y(e){e.lines.show&&Z(e),e.bars.show&&nt(e),e.points.show&&et(e)}function Z(e){function t(e,t,n,r,i){var s=e.points,o=e.pointsize,u=null,a=null;h.beginPath();for(var f=o;f<s.length;f+=o){var l=s[f-o],c=s[f-o+1],p=s[f],d=s[f+1];if(l==null||p==null)continue;if(c<=d&&c<i.min){if(d<i.min)continue;l=(i.min-c)/(d-c)*(p-l)+l,c=i.min}else if(d<=c&&d<i.min){if(c<i.min)continue;p=(i.min-c)/(d-c)*(p-l)+l,d=i.min}if(c>=d&&c>i.max){if(d>i.max)continue;l=(i.max-c)/(d-c)*(p-l)+l,c=i.max}else if(d>=c&&d>i.max){if(c>i.max)continue;p=(i.max-c)/(d-c)*(p-l)+l,d=i.max}if(l<=p&&l<r.min){if(p<r.min)continue;c=(r.min-l)/(p-l)*(d-c)+c,l=r.min}else if(p<=l&&p<r.min){if(l<r.min)continue;d=(r.min-l)/(p-l)*(d-c)+c,p=r.min}if(l>=p&&l>r.max){if(p>r.max)continue;c=(r.max-l)/(p-l)*(d-c)+c,l=r.max}else if(p>=l&&p>r.max){if(l>r.max)continue;d=(r.max-l)/(p-l)*(d-c)+c,p=r.max}(l!=u||c!=a)&&h.moveTo(r.p2c(l)+t,i.p2c(c)+n),u=p,a=d,h.lineTo(r.p2c(p)+t,i.p2c(d)+n)}h.stroke()}function n(e,t,n){var r=e.points,i=e.pointsize,s=Math.min(Math.max(0,n.min),n.max),o=0,u,a=!1,f=1,l=0,c=0;for(;;){if(i>0&&o>r.length+i)break;o+=i;var p=r[o-i],d=r[o-i+f],v=r[o],m=r[o+f];if(a){if(i>0&&p!=null&&v==null){c=o,i=-i,f=2;continue}if(i<0&&o==l+i){h.fill(),a=!1,i=-i,f=1,o=l=c+i;continue}}if(p==null||v==null)continue;if(p<=v&&p<t.min){if(v<t.min)continue;d=(t.min-p)/(v-p)*(m-d)+d,p=t.min}else if(v<=p&&v<t.min){if(p<t.min)continue;m=(t.min-p)/(v-p)*(m-d)+d,v=t.min}if(p>=v&&p>t.max){if(v>t.max)continue;d=(t.max-p)/(v-p)*(m-d)+d,p=t.max}else if(v>=p&&v>t.max){if(p>t.max)continue;m=(t.max-p)/(v-p)*(m-d)+d,v=t.max}a||(h.beginPath(),h.moveTo(t.p2c(p),n.p2c(s)),a=!0);if(d>=n.max&&m>=n.max){h.lineTo(t.p2c(p),n.p2c(n.max)),h.lineTo(t.p2c(v),n.p2c(n.max));continue}if(d<=n.min&&m<=n.min){h.lineTo(t.p2c(p),n.p2c(n.min)),h.lineTo(t.p2c(v),n.p2c(n.min));continue}var g=p,y=v;d<=m&&d<n.min&&m>=n.min?(p=(n.min-d)/(m-d)*(v-p)+p,d=n.min):m<=d&&m<n.min&&d>=n.min&&(v=(n.min-d)/(m-d)*(v-p)+p,m=n.min),d>=m&&d>n.max&&m<=n.max?(p=(n.max-d)/(m-d)*(v-p)+p,d=n.max):m>=d&&m>n.max&&d<=n.max&&(v=(n.max-d)/(m-d)*(v-p)+p,m=n.max),p!=g&&h.lineTo(t.p2c(g),n.p2c(d)),h.lineTo(t.p2c(p),n.p2c(d)),h.lineTo(t.p2c(v),n.p2c(m)),v!=y&&(h.lineTo(t.p2c(v),n.p2c(m)),h.lineTo(t.p2c(y),n.p2c(m)))}}h.save(),h.translate(m.left,m.top),h.lineJoin="round";var r=e.lines.lineWidth,i=e.shadowSize;if(r>0&&i>0){h.lineWidth=i,h.strokeStyle="rgba(0,0,0,0.1)";var s=Math.PI/18;t(e.datapoints,Math.sin(s)*(r/2+i/2),Math.cos(s)*(r/2+i/2),e.xaxis,e.yaxis),h.lineWidth=i/2,t(e.datapoints,Math.sin(s)*(r/2+i/4),Math.cos(s)*(r/2+i/4),e.xaxis,e.yaxis)}h.lineWidth=r,h.strokeStyle=e.color;var o=rt(e.lines,e.color,0,y);o&&(h.fillStyle=o,n(e.datapoints,e.xaxis,e.yaxis)),r>0&&t(e.datapoints,0,0,e.xaxis,e.yaxis),h.restore()}function et(e){function t(e,t,n,r,i,s,o,u){var a=e.points,f=e.pointsize;for(var l=0;l<a.length;l+=f){var c=a[l],p=a[l+1];if(c==null||c<s.min||c>s.max||p<o.min||p>o.max)continue;h.beginPath(),c=s.p2c(c),p=o.p2c(p)+r,u=="circle"?h.arc(c,p,t,0,i?Math.PI:Math.PI*2,!1):u(h,c,p,t,i),h.closePath(),n&&(h.fillStyle=n,h.fill()),h.stroke()}}h.save(),h.translate(m.left,m.top);var n=e.points.lineWidth,r=e.shadowSize,i=e.points.radius,s=e.points.symbol;n==0&&(n=1e-4);if(n>0&&r>0){var o=r/2;h.lineWidth=o,h.strokeStyle="rgba(0,0,0,0.1)",t(e.datapoints,i,null,o+o/2,!0,e.xaxis,e.yaxis,s),h.strokeStyle="rgba(0,0,0,0.2)",t(e.datapoints,i,null,o/2,!0,e.xaxis,e.yaxis,s)}h.lineWidth=n,h.strokeStyle=e.color,t(e.datapoints,i,rt(e.points,e.color),0,!1,e.xaxis,e.yaxis,s),h.restore()}function tt(e,t,n,r,i,s,o,u,a,f,l,c){var h,p,d,v,m,g,y,b,w;l?(b=g=y=!0,m=!1,h=n,p=e,v=t+r,d=t+i,p<h&&(w=p,p=h,h=w,m=!0,g=!1)):(m=g=y=!0,b=!1,h=e+r,p=e+i,d=n,v=t,v<d&&(w=v,v=d,d=w,b=!0,y=!1));if(p<u.min||h>u.max||v<a.min||d>a.max)return;h<u.min&&(h=u.min,m=!1),p>u.max&&(p=u.max,g=!1),d<a.min&&(d=a.min,b=!1),v>a.max&&(v=a.max,y=!1),h=u.p2c(h),d=a.p2c(d),p=u.p2c(p),v=a.p2c(v),o&&(f.beginPath(),f.moveTo(h,d),f.lineTo(h,v),f.lineTo(p,v),f.lineTo(p,d),f.fillStyle=o(d,v),f.fill()),c>0&&(m||g||y||b)&&(f.beginPath(),f.moveTo(h,d+s),m?f.lineTo(h,v+s):f.moveTo(h,v+s),y?f.lineTo(p,v+s):f.moveTo(p,v+s),g?f.lineTo(p,d+s):f.moveTo(p,d+s),b?f.lineTo(h,d+s):f.moveTo(h,d+s),f.stroke())}function nt(e){function t(t,n,r,i,s,o,u){var a=t.points,f=t.pointsize;for(var l=0;l<a.length;l+=f){if(a[l]==null)continue;tt(a[l],a[l+1],a[l+2],n,r,i,s,o,u,h,e.bars.horizontal,e.bars.lineWidth)}}h.save(),h.translate(m.left,m.top),h.lineWidth=e.bars.lineWidth,h.strokeStyle=e.color;var n;switch(e.bars.align){case"left":n=0;break;case"right":n=-e.bars.barWidth;break;case"center":n=-e.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+e.bars.align)}var r=e.bars.fill?function(t,n){return rt(e.bars,e.color,t,n)}:null;t(e.datapoints,n,n+e.bars.barWidth,0,r,e.xaxis,e.yaxis),h.restore()}function rt(t,n,r,i){var s=t.fill;if(!s)return null;if(t.fillColor)return bt(t.fillColor,r,i,n);var o=e.color.parse(n);return o.a=typeof s=="number"?s:.4,o.normalize(),o.toString()}function it(){t.find(".legend").remove();if(!a.legend.show)return;var n=[],r=[],i=!1,s=a.legend.labelFormatter,o,f;for(var l=0;l<u.length;++l)o=u[l],o.label&&(f=s?s(o.label,o):o.label,f&&r.push({label:f,color:o.color}));if(a.legend.sorted)if(e.isFunction(a.legend.sorted))r.sort(a.legend.sorted);else if(a.legend.sorted=="reverse")r.reverse();else{var c=a.legend.sorted!="descending";r.sort(function(e,t){return e.label==t.label?0:e.label<t.label!=c?1:-1})}for(var l=0;l<r.length;++l){var h=r[l];l%a.legend.noColumns==0&&(i&&n.push("</tr>"),n.push("<tr>"),i=!0),n.push('<td class="legendColorBox"><div style="border:1px solid '+a.legend.labelBoxBorderColor+';padding:1px"><div style="width:4px;height:0;border:5px solid '+h.color+';overflow:hidden"></div></div></td>'+'<td class="legendLabel">'+h.label+"</td>")}i&&n.push("</tr>");if(n.length==0)return;var p='<table style="font-size:smaller;color:'+a.grid.color+'">'+n.join("")+"</table>";if(a.legend.container!=null)e(a.legend.container).html(p);else{var d="",v=a.legend.position,g=a.legend.margin;g[0]==null&&(g=[g,g]),v.charAt(0)=="n"?d+="top:"+(g[1]+m.top)+"px;":v.charAt(0)=="s"&&(d+="bottom:"+(g[1]+m.bottom)+"px;"),v.charAt(1)=="e"?d+="right:"+(g[0]+m.right)+"px;":v.charAt(1)=="w"&&(d+="left:"+(g[0]+m.left)+"px;");var y=e('<div class="legend">'+p.replace('style="','style="position:absolute;'+d+";")+"</div>").appendTo(t);if(a.legend.backgroundOpacity!=0){var b=a.legend.backgroundColor;b==null&&(b=a.grid.backgroundColor,b&&typeof b=="string"?b=e.color.parse(b):b=e.color.extract(y,"background-color"),b.a=1,b=b.toString());var w=y.children();e('<div style="position:absolute;width:'+w.width()+"px;height:"+w.height()+"px;"+d+"background-color:"+b+';"> </div>').prependTo(y).css("opacity",a.legend.backgroundOpacity)}}}function ut(e,t,n){var r=a.grid.mouseActiveRadius,i=r*r+1,s=null,o=!1,f,l,c;for(f=u.length-1;f>=0;--f){if(!n(u[f]))continue;var h=u[f],p=h.xaxis,d=h.yaxis,v=h.datapoints.points,m=p.c2p(e),g=d.c2p(t),y=r/p.scale,b=r/d.scale;c=h.datapoints.pointsize,p.options.inverseTransform&&(y=Number.MAX_VALUE),d.options.inverseTransform&&(b=Number.MAX_VALUE);if(h.lines.show||h.points.show)for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1];if(w==null)continue;if(w-m>y||w-m<-y||E-g>b||E-g<-b)continue;var S=Math.abs(p.p2c(w)-e),x=Math.abs(d.p2c(E)-t),T=S*S+x*x;T<i&&(i=T,s=[f,l/c])}if(h.bars.show&&!s){var N=h.bars.align=="left"?0:-h.bars.barWidth/2,C=N+h.bars.barWidth;for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1],k=v[l+2];if(w==null)continue;if(u[f].bars.horizontal?m<=Math.max(k,w)&&m>=Math.min(k,w)&&g>=E+N&&g<=E+C:m>=w+N&&m<=w+C&&g>=Math.min(k,E)&&g<=Math.max(k,E))s=[f,l/c]}}}return s?(f=s[0],l=s[1],c=u[f].datapoints.pointsize,{datapoint:u[f].datapoints.points.slice(l*c,(l+1)*c),dataIndex:l,series:u[f],seriesIndex:f}):null}function at(e){a.grid.hoverable&&ct("plothover",e,function(e){return e["hoverable"]!=0})}function ft(e){a.grid.hoverable&&ct("plothover",e,function(e){return!1})}function lt(e){ct("plotclick",e,function(e){return e["clickable"]!=0})}function ct(e,n,r){var i=c.offset(),s=n.pageX-i.left-m.left,o=n.pageY-i.top-m.top,u=L({left:s,top:o});u.pageX=n.pageX,u.pageY=n.pageY;var f=ut(s,o,r);f&&(f.pageX=parseInt(f.series.xaxis.p2c(f.datapoint[0])+i.left+m.left,10),f.pageY=parseInt(f.series.yaxis.p2c(f.datapoint[1])+i.top+m.top,10));if(a.grid.autoHighlight){for(var l=0;l<st.length;++l){var h=st[l];h.auto==e&&(!f||h.series!=f.series||h.point[0]!=f.datapoint[0]||h.point[1]!=f.datapoint[1])&&vt(h.series,h.point)}f&&dt(f.series,f.datapoint,e)}t.trigger(e,[u,f])}function ht(){var e=a.interaction.redrawOverlayInterval;if(e==-1){pt();return}ot||(ot=setTimeout(pt,e))}function pt(){ot=null,p.save(),l.clear(),p.translate(m.left,m.top);var e,t;for(e=0;e<st.length;++e)t=st[e],t.series.bars.show?yt(t.series,t.point):gt(t.series,t.point);p.restore(),E(b.drawOverlay,[p])}function dt(e,t,n){typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var r=e.datapoints.pointsize;t=e.datapoints.points.slice(r*t,r*(t+1))}var i=mt(e,t);i==-1?(st.push({series:e,point:t,auto:n}),ht()):n||(st[i].auto=!1)}function vt(e,t){if(e==null&&t==null){st=[],ht();return}typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var n=e.datapoints.pointsize;t=e.datapoints.points.slice(n*t,n*(t+1))}var r=mt(e,t);r!=-1&&(st.splice(r,1),ht())}function mt(e,t){for(var n=0;n<st.length;++n){var r=st[n];if(r.series==e&&r.point[0]==t[0]&&r.point[1]==t[1])return n}return-1}function gt(t,n){var r=n[0],i=n[1],s=t.xaxis,o=t.yaxis,u=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString();if(r<s.min||r>s.max||i<o.min||i>o.max)return;var a=t.points.radius+t.points.lineWidth/2;p.lineWidth=a,p.strokeStyle=u;var f=1.5*a;r=s.p2c(r),i=o.p2c(i),p.beginPath(),t.points.symbol=="circle"?p.arc(r,i,f,0,2*Math.PI,!1):t.points.symbol(p,r,i,f,!1),p.closePath(),p.stroke()}function yt(t,n){var r=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString(),i=r,s=t.bars.align=="left"?0:-t.bars.barWidth/2;p.lineWidth=t.bars.lineWidth,p.strokeStyle=r,tt(n[0],n[1],n[2]||0,s,s+t.bars.barWidth,0,function(){return i},t.xaxis,t.yaxis,p,t.bars.horizontal,t.bars.lineWidth)}function bt(t,n,r,i){if(typeof t=="string")return t;var s=h.createLinearGradient(0,r,0,n);for(var o=0,u=t.colors.length;o<u;++o){var a=t.colors[o];if(typeof a!="string"){var f=e.color.parse(i);a.brightness!=null&&(f=f.scale("rgb",a.brightness)),a.opacity!=null&&(f.a*=a.opacity),a=f.toString()}s.addColorStop(o/(u-1),a)}return s}var u=[],a={colors:["#edc240","#afd8f8","#cb4b4b","#4da74d","#9440ed"],legend:{show:!0,noColumns:1,labelFormatter:null,labelBoxBorderColor:"#ccc",container:null,position:"ne",margin:5,backgroundColor:null,backgroundOpacity:.85,sorted:null},xaxis:{show:null,position:"bottom",mode:null,font:null,color:null,tickColor:null,transform:null,inverseTransform:null,min:null,max:null,autoscaleMargin:null,ticks:null,tickFormatter:null,labelWidth:null,labelHeight:null,reserveSpace:null,tickLength:null,alignTicksWithAxis:null,tickDecimals:null,tickSize:null,minTickSize:null},yaxis:{autoscaleMargin:.02,position:"left"},xaxes:[],yaxes:[],series:{points:{show:!1,radius:3,lineWidth:2,fill:!0,fillColor:"#ffffff",symbol:"circle"},lines:{lineWidth:2,fill:!1,fillColor:null,steps:!1},bars:{show:!1,lineWidth:2,barWidth:1,fill:!0,fillColor:null,align:"left",horizontal:!1,zero:!0},shadowSize:3,highlightColor:null},grid:{show:!0,aboveData:!1,color:"#545454",backgroundColor:null,borderColor:null,tickColor:null,margin:0,labelMargin:5,axisMargin:8,borderWidth:2,minBorderMargin:null,markings:null,markingsColor:"#f4f4f4",markingsLineWidth:2,clickable:!1,hoverable:!1,autoHighlight:!0,mouseActiveRadius:10},interaction:{redrawOverlayInterval:1e3/60},hooks:{}},f=null,l=null,c=null,h=null,p=null,d=[],v=[],m={left:0,right:0,top:0,bottom
+:0},g=0,y=0,b={processOptions:[],processRawData:[],processDatapoints:[],processOffset:[],drawBackground:[],drawSeries:[],draw:[],bindEvents:[],drawOverlay:[],shutdown:[]},w=this;w.setData=T,w.setupGrid=R,w.draw=V,w.getPlaceholder=function(){return t},w.getCanvas=function(){return f.element},w.getPlotOffset=function(){return m},w.width=function(){return g},w.height=function(){return y},w.offset=function(){var e=c.offset();return e.left+=m.left,e.top+=m.top,e},w.getData=function(){return u},w.getAxes=function(){var t={},n;return e.each(d.concat(v),function(e,n){n&&(t[n.direction+(n.n!=1?n.n:"")+"axis"]=n)}),t},w.getXAxes=function(){return d},w.getYAxes=function(){return v},w.c2p=L,w.p2c=A,w.getOptions=function(){return a},w.highlight=dt,w.unhighlight=vt,w.triggerRedrawOverlay=ht,w.pointOffset=function(e){return{left:parseInt(d[C(e,"x")-1].p2c(+e.x)+m.left,10),top:parseInt(v[C(e,"y")-1].p2c(+e.y)+m.top,10)}},w.shutdown=H,w.resize=function(){var e=t.width(),n=t.height();f.resize(e,n),l.resize(e,n)},w.hooks=b,S(w),x(s),D(),T(r),R(),V(),P();var st=[],ot=null}function i(e,t){return t*Math.floor(e/t)}var t=Object.prototype.hasOwnProperty;n.prototype.resize=function(e,t){if(e<=0||t<=0)throw new Error("Invalid dimensions for plot, width = "+e+", height = "+t);var n=this.element,r=this.context,i=this.pixelRatio;this.width!=e&&(n.width=e*i,n.style.width=e+"px",this.width=e),this.height!=t&&(n.height=t*i,n.style.height=t+"px",this.height=t),r.restore(),r.save(),r.scale(i,i)},n.prototype.clear=function(){this.context.clearRect(0,0,this.width,this.height)},n.prototype.render=function(){var e=this._textCache;for(var n in e)if(t.call(e,n)){var r=this.getTextLayer(n),i=e[n];r.hide();for(var s in i)if(t.call(i,s)){var o=i[s];for(var u in o)if(t.call(o,u)){var a=o[u].positions;for(var f=0,l;l=a[f];f++)l.active?l.rendered||(r.append(l.element),l.rendered=!0):(a.splice(f--,1),l.rendered&&l.element.detach());a.length==0&&delete o[u]}}r.show()}},n.prototype.getTextLayer=function(t){var n=this.text[t];return n==null&&(this.textContainer==null&&(this.textContainer=e("<div class='flot-text'></div>").css({position:"absolute",top:0,left:0,bottom:0,right:0,"font-size":"smaller",color:"#545454"}).insertAfter(this.element)),n=this.text[t]=e("<div></div>").addClass(t).css({position:"absolute",top:0,left:0,bottom:0,right:0}).appendTo(this.textContainer)),n},n.prototype.getTextInfo=function(t,n,r,i,s){var o,u,a,f;n=""+n,typeof r=="object"?o=r.style+" "+r.variant+" "+r.weight+" "+r.size+"px/"+r.lineHeight+"px "+r.family:o=r,u=this._textCache[t],u==null&&(u=this._textCache[t]={}),a=u[o],a==null&&(a=u[o]={}),f=a[n];if(f==null){var l=e("<div></div>").html(n).css({position:"absolute","max-width":s,top:-9999}).appendTo(this.getTextLayer(t));typeof r=="object"?l.css({font:o,color:r.color}):typeof r=="string"&&l.addClass(r),f=a[n]={width:l.outerWidth(!0),height:l.outerHeight(!0),element:l,positions:[]},l.detach()}return f},n.prototype.addText=function(e,t,n,r,i,s,o,u,a){var f=this.getTextInfo(e,r,i,s,o),l=f.positions;u=="center"?t-=f.width/2:u=="right"&&(t-=f.width),a=="middle"?n-=f.height/2:a=="bottom"&&(n-=f.height);for(var c=0,h;h=l[c];c++)if(h.x==t&&h.y==n){h.active=!0;return}h={active:!0,rendered:!1,element:l.length?f.element.clone():f.element,x:t,y:n},l.push(h),h.element.css({top:Math.round(n),left:Math.round(t),"text-align":u})},n.prototype.removeText=function(e,n,r,i,s,o){if(i==null){var u=this._textCache[e];if(u!=null)for(var a in u)if(t.call(u,a)){var f=u[a];for(var l in f)if(t.call(f,l)){var c=f[l].positions;for(var h=0,p;p=c[h];h++)p.active=!1}}}else{var c=this.getTextInfo(e,i,s,o).positions;for(var h=0,p;p=c[h];h++)p.x==n&&p.y==r&&(p.active=!1)}},e.plot=function(t,n,i){var s=new r(e(t),n,i,e.plot.plugins);return s},e.plot.version="0.8.1",e.plot.plugins=[],e.fn.plot=function(t,n){return this.each(function(){e.plot(this,t,n)})}}(jQuery);
\ No newline at end of file
diff --git a/catapult/telemetry/third_party/mock/LICENSE.txt b/catapult/telemetry/third_party/mock/LICENSE.txt
new file mode 100644
index 0000000..7891703
--- /dev/null
+++ b/catapult/telemetry/third_party/mock/LICENSE.txt
@@ -0,0 +1,26 @@
+Copyright (c) 2003-2012, Michael Foord
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/catapult/telemetry/third_party/mock/README.chromium b/catapult/telemetry/third_party/mock/README.chromium
new file mode 100644
index 0000000..2dc689b
--- /dev/null
+++ b/catapult/telemetry/third_party/mock/README.chromium
@@ -0,0 +1,16 @@
+Name: mock
+Short Name: mock
+URL: http://www.voidspace.org.uk/python/mock/
+Version: 1.0.1
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Library for mocks in Python tests.
+
+Local Modifications:
+Includes only mock.py and LICENSE.txt.
+Packaging and setup files have not been copied downstream.
+All other files and folders (docs/, html/, tests/)
+have not been copied downstream.
diff --git a/catapult/telemetry/third_party/mock/mock.py b/catapult/telemetry/third_party/mock/mock.py
new file mode 100644
index 0000000..c8fc5d1
--- /dev/null
+++ b/catapult/telemetry/third_party/mock/mock.py
@@ -0,0 +1,2367 @@
+# mock.py
+# Test tools for mocking and patching.
+# Copyright (C) 2007-2012 Michael Foord & the mock team
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+
+# mock 1.0
+# http://www.voidspace.org.uk/python/mock/
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# Comments, suggestions and bug reports welcome.
+
+
+__all__ = (
+    'Mock',
+    'MagicMock',
+    'patch',
+    'sentinel',
+    'DEFAULT',
+    'ANY',
+    'call',
+    'create_autospec',
+    'FILTER_DIR',
+    'NonCallableMock',
+    'NonCallableMagicMock',
+    'mock_open',
+    'PropertyMock',
+)
+
+
+__version__ = '1.0.1'
+
+
+import pprint
+import sys
+
+try:
+    import inspect
+except ImportError:
+    # for alternative platforms that
+    # may not have inspect
+    inspect = None
+
+try:
+    from functools import wraps as original_wraps
+except ImportError:
+    # Python 2.4 compatibility
+    def wraps(original):
+        def inner(f):
+            f.__name__ = original.__name__
+            f.__doc__ = original.__doc__
+            f.__module__ = original.__module__
+            f.__wrapped__ = original
+            return f
+        return inner
+else:
+    if sys.version_info[:2] >= (3, 3):
+        wraps = original_wraps
+    else:
+        def wraps(func):
+            def inner(f):
+                f = original_wraps(func)(f)
+                f.__wrapped__ = func
+                return f
+            return inner
+
+try:
+    unicode
+except NameError:
+    # Python 3
+    basestring = unicode = str
+
+try:
+    long
+except NameError:
+    # Python 3
+    long = int
+
+try:
+    BaseException
+except NameError:
+    # Python 2.4 compatibility
+    BaseException = Exception
+
+try:
+    next
+except NameError:
+    def next(obj):
+        return obj.next()
+
+
+BaseExceptions = (BaseException,)
+if 'java' in sys.platform:
+    # jython
+    import java
+    BaseExceptions = (BaseException, java.lang.Throwable)
+
+try:
+    _isidentifier = str.isidentifier
+except AttributeError:
+    # Python 2.X
+    import keyword
+    import re
+    regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+    def _isidentifier(string):
+        if string in keyword.kwlist:
+            return False
+        return regex.match(string)
+
+
+inPy3k = sys.version_info[0] == 3
+
+# Needed to work around Python 3 bug where use of "super" interferes with
+# defining __class__ as a descriptor
+_super = super
+
+self = 'im_self'
+builtin = '__builtin__'
+if inPy3k:
+    self = '__self__'
+    builtin = 'builtins'
+
+FILTER_DIR = True
+
+
+def _is_instance_mock(obj):
+    # can't use isinstance on Mock objects because they override __class__
+    # The base class for all mocks is NonCallableMock
+    return issubclass(type(obj), NonCallableMock)
+
+
+def _is_exception(obj):
+    return (
+        isinstance(obj, BaseExceptions) or
+        isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
+    )
+
+
+class _slotted(object):
+    __slots__ = ['a']
+
+
+DescriptorTypes = (
+    type(_slotted.a),
+    property,
+)
+
+
+def _getsignature(func, skipfirst, instance=False):
+    if inspect is None:
+        raise ImportError('inspect module not available')
+
+    if isinstance(func, ClassTypes) and not instance:
+        try:
+            func = func.__init__
+        except AttributeError:
+            return
+        skipfirst = True
+    elif not isinstance(func, FunctionTypes):
+        # for classes where instance is True we end up here too
+        try:
+            func = func.__call__
+        except AttributeError:
+            return
+
+    if inPy3k:
+        try:
+            argspec = inspect.getfullargspec(func)
+        except TypeError:
+            # C function / method, possibly inherited object().__init__
+            return
+        regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
+    else:
+        try:
+            regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
+        except TypeError:
+            # C function / method, possibly inherited object().__init__
+            return
+
+    # instance methods and classmethods need to lose the self argument
+    if getattr(func, self, None) is not None:
+        regargs = regargs[1:]
+    if skipfirst:
+        # this condition and the above one are never both True - why?
+        regargs = regargs[1:]
+
+    if inPy3k:
+        signature = inspect.formatargspec(
+            regargs, varargs, varkw, defaults,
+            kwonly, kwonlydef, ann, formatvalue=lambda value: "")
+    else:
+        signature = inspect.formatargspec(
+            regargs, varargs, varkwargs, defaults,
+            formatvalue=lambda value: "")
+    return signature[1:-1], func
+
+
+def _check_signature(func, mock, skipfirst, instance=False):
+    if not _callable(func):
+        return
+
+    result = _getsignature(func, skipfirst, instance)
+    if result is None:
+        return
+    signature, func = result
+
+    # can't use self because "self" is common as an argument name
+    # unfortunately even not in the first place
+    src = "lambda _mock_self, %s: None" % signature
+    checksig = eval(src, {})
+    _copy_func_details(func, checksig)
+    type(mock)._mock_check_sig = checksig
+
+
+def _copy_func_details(func, funcopy):
+    funcopy.__name__ = func.__name__
+    funcopy.__doc__ = func.__doc__
+    #funcopy.__dict__.update(func.__dict__)
+    funcopy.__module__ = func.__module__
+    if not inPy3k:
+        funcopy.func_defaults = func.func_defaults
+        return
+    funcopy.__defaults__ = func.__defaults__
+    funcopy.__kwdefaults__ = func.__kwdefaults__
+
+
+def _callable(obj):
+    if isinstance(obj, ClassTypes):
+        return True
+    if getattr(obj, '__call__', None) is not None:
+        return True
+    return False
+
+
+def _is_list(obj):
+    # checks for list or tuples
+    # XXXX badly named!
+    return type(obj) in (list, tuple)
+
+
+def _instance_callable(obj):
+    """Given an object, return True if the object is callable.
+    For classes, return True if instances would be callable."""
+    if not isinstance(obj, ClassTypes):
+        # already an instance
+        return getattr(obj, '__call__', None) is not None
+
+    klass = obj
+    # uses __bases__ instead of __mro__ so that we work with old style classes
+    if klass.__dict__.get('__call__') is not None:
+        return True
+
+    for base in klass.__bases__:
+        if _instance_callable(base):
+            return True
+    return False
+
+
+def _set_signature(mock, original, instance=False):
+    # creates a function with signature (*args, **kwargs) that delegates to a
+    # mock. It still does signature checking by calling a lambda with the same
+    # signature as the original.
+    if not _callable(original):
+        return
+
+    skipfirst = isinstance(original, ClassTypes)
+    result = _getsignature(original, skipfirst, instance)
+    if result is None:
+        # was a C function (e.g. object().__init__ ) that can't be mocked
+        return
+
+    signature, func = result
+
+    src = "lambda %s: None" % signature
+    checksig = eval(src, {})
+    _copy_func_details(func, checksig)
+
+    name = original.__name__
+    if not _isidentifier(name):
+        name = 'funcopy'
+    context = {'_checksig_': checksig, 'mock': mock}
+    src = """def %s(*args, **kwargs):
+    _checksig_(*args, **kwargs)
+    return mock(*args, **kwargs)""" % name
+    exec (src, context)
+    funcopy = context[name]
+    _setup_func(funcopy, mock)
+    return funcopy
+
+
+def _setup_func(funcopy, mock):
+    funcopy.mock = mock
+
+    # can't use isinstance with mocks
+    if not _is_instance_mock(mock):
+        return
+
+    def assert_called_with(*args, **kwargs):
+        return mock.assert_called_with(*args, **kwargs)
+    def assert_called_once_with(*args, **kwargs):
+        return mock.assert_called_once_with(*args, **kwargs)
+    def assert_has_calls(*args, **kwargs):
+        return mock.assert_has_calls(*args, **kwargs)
+    def assert_any_call(*args, **kwargs):
+        return mock.assert_any_call(*args, **kwargs)
+    def reset_mock():
+        funcopy.method_calls = _CallList()
+        funcopy.mock_calls = _CallList()
+        mock.reset_mock()
+        ret = funcopy.return_value
+        if _is_instance_mock(ret) and not ret is mock:
+            ret.reset_mock()
+
+    funcopy.called = False
+    funcopy.call_count = 0
+    funcopy.call_args = None
+    funcopy.call_args_list = _CallList()
+    funcopy.method_calls = _CallList()
+    funcopy.mock_calls = _CallList()
+
+    funcopy.return_value = mock.return_value
+    funcopy.side_effect = mock.side_effect
+    funcopy._mock_children = mock._mock_children
+
+    funcopy.assert_called_with = assert_called_with
+    funcopy.assert_called_once_with = assert_called_once_with
+    funcopy.assert_has_calls = assert_has_calls
+    funcopy.assert_any_call = assert_any_call
+    funcopy.reset_mock = reset_mock
+
+    mock._mock_delegate = funcopy
+
+
+def _is_magic(name):
+    return '__%s__' % name[2:-2] == name
+
+
+class _SentinelObject(object):
+    "A unique, named, sentinel object."
+    def __init__(self, name):
+        self.name = name
+
+    def __repr__(self):
+        return 'sentinel.%s' % self.name
+
+
+class _Sentinel(object):
+    """Access attributes to return a named object, usable as a sentinel."""
+    def __init__(self):
+        self._sentinels = {}
+
+    def __getattr__(self, name):
+        if name == '__bases__':
+            # Without this help(mock) raises an exception
+            raise AttributeError
+        return self._sentinels.setdefault(name, _SentinelObject(name))
+
+
+sentinel = _Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+_missing = sentinel.MISSING
+_deleted = sentinel.DELETED
+
+
+class OldStyleClass:
+    pass
+ClassType = type(OldStyleClass)
+
+
+def _copy(value):
+    if type(value) in (dict, list, tuple, set):
+        return type(value)(value)
+    return value
+
+
+ClassTypes = (type,)
+if not inPy3k:
+    ClassTypes = (type, ClassType)
+
+_allowed_names = set(
+    [
+        'return_value', '_mock_return_value', 'side_effect',
+        '_mock_side_effect', '_mock_parent', '_mock_new_parent',
+        '_mock_name', '_mock_new_name'
+    ]
+)
+
+
+def _delegating_property(name):
+    _allowed_names.add(name)
+    _the_name = '_mock_' + name
+    def _get(self, name=name, _the_name=_the_name):
+        sig = self._mock_delegate
+        if sig is None:
+            return getattr(self, _the_name)
+        return getattr(sig, name)
+    def _set(self, value, name=name, _the_name=_the_name):
+        sig = self._mock_delegate
+        if sig is None:
+            self.__dict__[_the_name] = value
+        else:
+            setattr(sig, name, value)
+
+    return property(_get, _set)
+
+
+
+class _CallList(list):
+
+    def __contains__(self, value):
+        if not isinstance(value, list):
+            return list.__contains__(self, value)
+        len_value = len(value)
+        len_self = len(self)
+        if len_value > len_self:
+            return False
+
+        for i in range(0, len_self - len_value + 1):
+            sub_list = self[i:i+len_value]
+            if sub_list == value:
+                return True
+        return False
+
+    def __repr__(self):
+        return pprint.pformat(list(self))
+
+
+def _check_and_set_parent(parent, value, name, new_name):
+    if not _is_instance_mock(value):
+        return False
+    if ((value._mock_name or value._mock_new_name) or
+        (value._mock_parent is not None) or
+        (value._mock_new_parent is not None)):
+        return False
+
+    _parent = parent
+    while _parent is not None:
+        # setting a mock (value) as a child or return value of itself
+        # should not modify the mock
+        if _parent is value:
+            return False
+        _parent = _parent._mock_new_parent
+
+    if new_name:
+        value._mock_new_parent = parent
+        value._mock_new_name = new_name
+    if name:
+        value._mock_parent = parent
+        value._mock_name = name
+    return True
+
+
+
+class Base(object):
+    _mock_return_value = DEFAULT
+    _mock_side_effect = None
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+
+class NonCallableMock(Base):
+    """A non-callable version of `Mock`"""
+
+    def __new__(cls, *args, **kw):
+        # every instance has its own class
+        # so we can create magic methods on the
+        # class without stomping on other mocks
+        new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
+        instance = object.__new__(new)
+        return instance
+
+
+    def __init__(
+            self, spec=None, wraps=None, name=None, spec_set=None,
+            parent=None, _spec_state=None, _new_name='', _new_parent=None,
+            **kwargs
+        ):
+        if _new_parent is None:
+            _new_parent = parent
+
+        __dict__ = self.__dict__
+        __dict__['_mock_parent'] = parent
+        __dict__['_mock_name'] = name
+        __dict__['_mock_new_name'] = _new_name
+        __dict__['_mock_new_parent'] = _new_parent
+
+        if spec_set is not None:
+            spec = spec_set
+            spec_set = True
+
+        self._mock_add_spec(spec, spec_set)
+
+        __dict__['_mock_children'] = {}
+        __dict__['_mock_wraps'] = wraps
+        __dict__['_mock_delegate'] = None
+
+        __dict__['_mock_called'] = False
+        __dict__['_mock_call_args'] = None
+        __dict__['_mock_call_count'] = 0
+        __dict__['_mock_call_args_list'] = _CallList()
+        __dict__['_mock_mock_calls'] = _CallList()
+
+        __dict__['method_calls'] = _CallList()
+
+        if kwargs:
+            self.configure_mock(**kwargs)
+
+        _super(NonCallableMock, self).__init__(
+            spec, wraps, name, spec_set, parent,
+            _spec_state
+        )
+
+
+    def attach_mock(self, mock, attribute):
+        """
+        Attach a mock as an attribute of this one, replacing its name and
+        parent. Calls to the attached mock will be recorded in the
+        `method_calls` and `mock_calls` attributes of this one."""
+        mock._mock_parent = None
+        mock._mock_new_parent = None
+        mock._mock_name = ''
+        mock._mock_new_name = None
+
+        setattr(self, attribute, mock)
+
+
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+
+
+    def _mock_add_spec(self, spec, spec_set):
+        _spec_class = None
+
+        if spec is not None and not _is_list(spec):
+            if isinstance(spec, ClassTypes):
+                _spec_class = spec
+            else:
+                _spec_class = _get_class(spec)
+
+            spec = dir(spec)
+
+        __dict__ = self.__dict__
+        __dict__['_spec_class'] = _spec_class
+        __dict__['_spec_set'] = spec_set
+        __dict__['_mock_methods'] = spec
+
+
+    def __get_return_value(self):
+        ret = self._mock_return_value
+        if self._mock_delegate is not None:
+            ret = self._mock_delegate.return_value
+
+        if ret is DEFAULT:
+            ret = self._get_child_mock(
+                _new_parent=self, _new_name='()'
+            )
+            self.return_value = ret
+        return ret
+
+
+    def __set_return_value(self, value):
+        if self._mock_delegate is not None:
+            self._mock_delegate.return_value = value
+        else:
+            self._mock_return_value = value
+            _check_and_set_parent(self, value, None, '()')
+
+    __return_value_doc = "The value to be returned when the mock is called."
+    return_value = property(__get_return_value, __set_return_value,
+                            __return_value_doc)
+
+
+    @property
+    def __class__(self):
+        if self._spec_class is None:
+            return type(self)
+        return self._spec_class
+
+    called = _delegating_property('called')
+    call_count = _delegating_property('call_count')
+    call_args = _delegating_property('call_args')
+    call_args_list = _delegating_property('call_args_list')
+    mock_calls = _delegating_property('mock_calls')
+
+
+    def __get_side_effect(self):
+        sig = self._mock_delegate
+        if sig is None:
+            return self._mock_side_effect
+        return sig.side_effect
+
+    def __set_side_effect(self, value):
+        value = _try_iter(value)
+        sig = self._mock_delegate
+        if sig is None:
+            self._mock_side_effect = value
+        else:
+            sig.side_effect = value
+
+    side_effect = property(__get_side_effect, __set_side_effect)
+
+
+    def reset_mock(self):
+        "Restore the mock object to its initial state."
+        self.called = False
+        self.call_args = None
+        self.call_count = 0
+        self.mock_calls = _CallList()
+        self.call_args_list = _CallList()
+        self.method_calls = _CallList()
+
+        for child in self._mock_children.values():
+            if isinstance(child, _SpecState):
+                continue
+            child.reset_mock()
+
+        ret = self._mock_return_value
+        if _is_instance_mock(ret) and ret is not self:
+            ret.reset_mock()
+
+
+    def configure_mock(self, **kwargs):
+        """Set attributes on the mock through keyword arguments.
+
+        Attributes plus return values and side effects can be set on child
+        mocks using standard dot notation and unpacking a dictionary in the
+        method call:
+
+        >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
+        >>> mock.configure_mock(**attrs)"""
+        for arg, val in sorted(kwargs.items(),
+                               # we sort on the number of dots so that
+                               # attributes are set before we set attributes on
+                               # attributes
+                               key=lambda entry: entry[0].count('.')):
+            args = arg.split('.')
+            final = args.pop()
+            obj = self
+            for entry in args:
+                obj = getattr(obj, entry)
+            setattr(obj, final, val)
+
+
+    def __getattr__(self, name):
+        if name == '_mock_methods':
+            raise AttributeError(name)
+        elif self._mock_methods is not None:
+            if name not in self._mock_methods or name in _all_magics:
+                raise AttributeError("Mock object has no attribute %r" % name)
+        elif _is_magic(name):
+            raise AttributeError(name)
+
+        result = self._mock_children.get(name)
+        if result is _deleted:
+            raise AttributeError(name)
+        elif result is None:
+            wraps = None
+            if self._mock_wraps is not None:
+                # XXXX should we get the attribute without triggering code
+                # execution?
+                wraps = getattr(self._mock_wraps, name)
+
+            result = self._get_child_mock(
+                parent=self, name=name, wraps=wraps, _new_name=name,
+                _new_parent=self
+            )
+            self._mock_children[name]  = result
+
+        elif isinstance(result, _SpecState):
+            result = create_autospec(
+                result.spec, result.spec_set, result.instance,
+                result.parent, result.name
+            )
+            self._mock_children[name]  = result
+
+        return result
+
+
+    def __repr__(self):
+        _name_list = [self._mock_new_name]
+        _parent = self._mock_new_parent
+        last = self
+
+        dot = '.'
+        if _name_list == ['()']:
+            dot = ''
+        seen = set()
+        while _parent is not None:
+            last = _parent
+
+            _name_list.append(_parent._mock_new_name + dot)
+            dot = '.'
+            if _parent._mock_new_name == '()':
+                dot = ''
+
+            _parent = _parent._mock_new_parent
+
+            # use ids here so as not to call __hash__ on the mocks
+            if id(_parent) in seen:
+                break
+            seen.add(id(_parent))
+
+        _name_list = list(reversed(_name_list))
+        _first = last._mock_name or 'mock'
+        if len(_name_list) > 1:
+            if _name_list[1] not in ('()', '().'):
+                _first += '.'
+        _name_list[0] = _first
+        name = ''.join(_name_list)
+
+        name_string = ''
+        if name not in ('mock', 'mock.'):
+            name_string = ' name=%r' % name
+
+        spec_string = ''
+        if self._spec_class is not None:
+            spec_string = ' spec=%r'
+            if self._spec_set:
+                spec_string = ' spec_set=%r'
+            spec_string = spec_string % self._spec_class.__name__
+        return "<%s%s%s id='%s'>" % (
+            type(self).__name__,
+            name_string,
+            spec_string,
+            id(self)
+        )
+
+
+    def __dir__(self):
+        """Filter the output of `dir(mock)` to only useful members.
+        XXXX
+        """
+        extras = self._mock_methods or []
+        from_type = dir(type(self))
+        from_dict = list(self.__dict__)
+
+        if FILTER_DIR:
+            from_type = [e for e in from_type if not e.startswith('_')]
+            from_dict = [e for e in from_dict if not e.startswith('_') or
+                         _is_magic(e)]
+        return sorted(set(extras + from_type + from_dict +
+                          list(self._mock_children)))
+
+
+    def __setattr__(self, name, value):
+        if name in _allowed_names:
+            # property setters go through here
+            return object.__setattr__(self, name, value)
+        elif (self._spec_set and self._mock_methods is not None and
+            name not in self._mock_methods and
+            name not in self.__dict__):
+            raise AttributeError("Mock object has no attribute '%s'" % name)
+        elif name in _unsupported_magics:
+            msg = 'Attempting to set unsupported magic method %r.' % name
+            raise AttributeError(msg)
+        elif name in _all_magics:
+            if self._mock_methods is not None and name not in self._mock_methods:
+                raise AttributeError("Mock object has no attribute '%s'" % name)
+
+            if not _is_instance_mock(value):
+                setattr(type(self), name, _get_method(name, value))
+                original = value
+                value = lambda *args, **kw: original(self, *args, **kw)
+            else:
+                # only set _new_name and not name so that mock_calls is tracked
+                # but not method calls
+                _check_and_set_parent(self, value, None, name)
+                setattr(type(self), name, value)
+                self._mock_children[name] = value
+        elif name == '__class__':
+            self._spec_class = value
+            return
+        else:
+            if _check_and_set_parent(self, value, name, name):
+                self._mock_children[name] = value
+        return object.__setattr__(self, name, value)
+
+
+    def __delattr__(self, name):
+        if name in _all_magics and name in type(self).__dict__:
+            delattr(type(self), name)
+            if name not in self.__dict__:
+                # for magic methods that are still MagicProxy objects and
+                # not set on the instance itself
+                return
+
+        if name in self.__dict__:
+            object.__delattr__(self, name)
+
+        obj = self._mock_children.get(name, _missing)
+        if obj is _deleted:
+            raise AttributeError(name)
+        if obj is not _missing:
+            del self._mock_children[name]
+        self._mock_children[name] = _deleted
+
+
+
+    def _format_mock_call_signature(self, args, kwargs):
+        name = self._mock_name or 'mock'
+        return _format_call_signature(name, args, kwargs)
+
+
+    def _format_mock_failure_message(self, args, kwargs):
+        message = 'Expected call: %s\nActual call: %s'
+        expected_string = self._format_mock_call_signature(args, kwargs)
+        call_args = self.call_args
+        if len(call_args) == 3:
+            call_args = call_args[1:]
+        actual_string = self._format_mock_call_signature(*call_args)
+        return message % (expected_string, actual_string)
+
+
+    def assert_called_with(_mock_self, *args, **kwargs):
+        """assert that the mock was called with the specified arguments.
+
+        Raises an AssertionError if the args and keyword args passed in are
+        different to the last call to the mock."""
+        self = _mock_self
+        if self.call_args is None:
+            expected = self._format_mock_call_signature(args, kwargs)
+            raise AssertionError('Expected call: %s\nNot called' % (expected,))
+
+        if self.call_args != (args, kwargs):
+            msg = self._format_mock_failure_message(args, kwargs)
+            raise AssertionError(msg)
+
+
+    def assert_called_once_with(_mock_self, *args, **kwargs):
+        """assert that the mock was called exactly once and with the specified
+        arguments."""
+        self = _mock_self
+        if not self.call_count == 1:
+            msg = ("Expected to be called once. Called %s times." %
+                   self.call_count)
+            raise AssertionError(msg)
+        return self.assert_called_with(*args, **kwargs)
+
+
+    def assert_has_calls(self, calls, any_order=False):
+        """assert the mock has been called with the specified calls.
+        The `mock_calls` list is checked for the calls.
+
+        If `any_order` is False (the default) then the calls must be
+        sequential. There can be extra calls before or after the
+        specified calls.
+
+        If `any_order` is True then the calls can be in any order, but
+        they must all appear in `mock_calls`."""
+        if not any_order:
+            if calls not in self.mock_calls:
+                raise AssertionError(
+                    'Calls not found.\nExpected: %r\n'
+                    'Actual: %r' % (calls, self.mock_calls)
+                )
+            return
+
+        all_calls = list(self.mock_calls)
+
+        not_found = []
+        for kall in calls:
+            try:
+                all_calls.remove(kall)
+            except ValueError:
+                not_found.append(kall)
+        if not_found:
+            raise AssertionError(
+                '%r not all found in call list' % (tuple(not_found),)
+            )
+
+
+    def assert_any_call(self, *args, **kwargs):
+        """assert the mock has been called with the specified arguments.
+
+        The assert passes if the mock has *ever* been called, unlike
+        `assert_called_with` and `assert_called_once_with` that only pass if
+        the call is the most recent one."""
+        kall = call(*args, **kwargs)
+        if kall not in self.call_args_list:
+            expected_string = self._format_mock_call_signature(args, kwargs)
+            raise AssertionError(
+                '%s call not found' % expected_string
+            )
+
+
+    def _get_child_mock(self, **kw):
+        """Create the child mocks for attributes and return value.
+        By default child mocks will be the same type as the parent.
+        Subclasses of Mock may want to override this to customize the way
+        child mocks are made.
+
+        For non-callable mocks the callable variant will be used (rather than
+        any custom subclass)."""
+        _type = type(self)
+        if not issubclass(_type, CallableMixin):
+            if issubclass(_type, NonCallableMagicMock):
+                klass = MagicMock
+            elif issubclass(_type, NonCallableMock) :
+                klass = Mock
+        else:
+            klass = _type.__mro__[1]
+        return klass(**kw)
+
+
+
+def _try_iter(obj):
+    if obj is None:
+        return obj
+    if _is_exception(obj):
+        return obj
+    if _callable(obj):
+        return obj
+    try:
+        return iter(obj)
+    except TypeError:
+        # XXXX backwards compatibility
+        # but this will blow up on first call - so maybe we should fail early?
+        return obj
+
+
+
+class CallableMixin(Base):
+
+    def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+                 wraps=None, name=None, spec_set=None, parent=None,
+                 _spec_state=None, _new_name='', _new_parent=None, **kwargs):
+        self.__dict__['_mock_return_value'] = return_value
+
+        _super(CallableMixin, self).__init__(
+            spec, wraps, name, spec_set, parent,
+            _spec_state, _new_name, _new_parent, **kwargs
+        )
+
+        self.side_effect = side_effect
+
+
+    def _mock_check_sig(self, *args, **kwargs):
+        # stub method that can be replaced with one with a specific signature
+        pass
+
+
+    def __call__(_mock_self, *args, **kwargs):
+        # can't use self in-case a function / method we are mocking uses self
+        # in the signature
+        _mock_self._mock_check_sig(*args, **kwargs)
+        return _mock_self._mock_call(*args, **kwargs)
+
+
+    def _mock_call(_mock_self, *args, **kwargs):
+        self = _mock_self
+        self.called = True
+        self.call_count += 1
+        self.call_args = _Call((args, kwargs), two=True)
+        self.call_args_list.append(_Call((args, kwargs), two=True))
+
+        _new_name = self._mock_new_name
+        _new_parent = self._mock_new_parent
+        self.mock_calls.append(_Call(('', args, kwargs)))
+
+        seen = set()
+        skip_next_dot = _new_name == '()'
+        do_method_calls = self._mock_parent is not None
+        name = self._mock_name
+        while _new_parent is not None:
+            this_mock_call = _Call((_new_name, args, kwargs))
+            if _new_parent._mock_new_name:
+                dot = '.'
+                if skip_next_dot:
+                    dot = ''
+
+                skip_next_dot = False
+                if _new_parent._mock_new_name == '()':
+                    skip_next_dot = True
+
+                _new_name = _new_parent._mock_new_name + dot + _new_name
+
+            if do_method_calls:
+                if _new_name == name:
+                    this_method_call = this_mock_call
+                else:
+                    this_method_call = _Call((name, args, kwargs))
+                _new_parent.method_calls.append(this_method_call)
+
+                do_method_calls = _new_parent._mock_parent is not None
+                if do_method_calls:
+                    name = _new_parent._mock_name + '.' + name
+
+            _new_parent.mock_calls.append(this_mock_call)
+            _new_parent = _new_parent._mock_new_parent
+
+            # use ids here so as not to call __hash__ on the mocks
+            _new_parent_id = id(_new_parent)
+            if _new_parent_id in seen:
+                break
+            seen.add(_new_parent_id)
+
+        ret_val = DEFAULT
+        effect = self.side_effect
+        if effect is not None:
+            if _is_exception(effect):
+                raise effect
+
+            if not _callable(effect):
+                result = next(effect)
+                if _is_exception(result):
+                    raise result
+                return result
+
+            ret_val = effect(*args, **kwargs)
+            if ret_val is DEFAULT:
+                ret_val = self.return_value
+
+        if (self._mock_wraps is not None and
+             self._mock_return_value is DEFAULT):
+            return self._mock_wraps(*args, **kwargs)
+        if ret_val is DEFAULT:
+            ret_val = self.return_value
+        return ret_val
+
+
+
+class Mock(CallableMixin, NonCallableMock):
+    """
+    Create a new `Mock` object. `Mock` takes several optional arguments
+    that specify the behaviour of the Mock object:
+
+    * `spec`: This can be either a list of strings or an existing object (a
+      class or instance) that acts as the specification for the mock object. If
+      you pass in an object then a list of strings is formed by calling dir on
+      the object (excluding unsupported magic attributes and methods). Accessing
+      any attribute not in this list will raise an `AttributeError`.
+
+      If `spec` is an object (rather than a list of strings) then
+      `mock.__class__` returns the class of the spec object. This allows mocks
+      to pass `isinstance` tests.
+
+    * `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
+      or get an attribute on the mock that isn't on the object passed as
+      `spec_set` will raise an `AttributeError`.
+
+    * `side_effect`: A function to be called whenever the Mock is called. See
+      the `side_effect` attribute. Useful for raising exceptions or
+      dynamically changing return values. The function is called with the same
+      arguments as the mock, and unless it returns `DEFAULT`, the return
+      value of this function is used as the return value.
+
+      Alternatively `side_effect` can be an exception class or instance. In
+      this case the exception will be raised when the mock is called.
+
+      If `side_effect` is an iterable then each call to the mock will return
+      the next value from the iterable. If any of the members of the iterable
+      are exceptions they will be raised instead of returned.
+
+    * `return_value`: The value returned when the mock is called. By default
+      this is a new Mock (created on first access). See the
+      `return_value` attribute.
+
+    * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
+      calling the Mock will pass the call through to the wrapped object
+      (returning the real result). Attribute access on the mock will return a
+      Mock object that wraps the corresponding attribute of the wrapped object
+      (so attempting to access an attribute that doesn't exist will raise an
+      `AttributeError`).
+
+      If the mock has an explicit `return_value` set then calls are not passed
+      to the wrapped object and the `return_value` is returned instead.
+
+    * `name`: If the mock has a name then it will be used in the repr of the
+      mock. This can be useful for debugging. The name is propagated to child
+      mocks.
+
+    Mocks can also be called with arbitrary keyword arguments. These will be
+    used to set attributes on the mock after it is created.
+    """
+
+
+
+def _dot_lookup(thing, comp, import_path):
+    try:
+        return getattr(thing, comp)
+    except AttributeError:
+        __import__(import_path)
+        return getattr(thing, comp)
+
+
+def _importer(target):
+    components = target.split('.')
+    import_path = components.pop(0)
+    thing = __import__(import_path)
+
+    for comp in components:
+        import_path += ".%s" % comp
+        thing = _dot_lookup(thing, comp, import_path)
+    return thing
+
+
+def _is_started(patcher):
+    # XXXX horrible
+    return hasattr(patcher, 'is_local')
+
+
+class _patch(object):
+
+    attribute_name = None
+    _active_patches = set()
+
+    def __init__(
+            self, getter, attribute, new, spec, create,
+            spec_set, autospec, new_callable, kwargs
+        ):
+        if new_callable is not None:
+            if new is not DEFAULT:
+                raise ValueError(
+                    "Cannot use 'new' and 'new_callable' together"
+                )
+            if autospec is not None:
+                raise ValueError(
+                    "Cannot use 'autospec' and 'new_callable' together"
+                )
+
+        self.getter = getter
+        self.attribute = attribute
+        self.new = new
+        self.new_callable = new_callable
+        self.spec = spec
+        self.create = create
+        self.has_local = False
+        self.spec_set = spec_set
+        self.autospec = autospec
+        self.kwargs = kwargs
+        self.additional_patchers = []
+
+
+    def copy(self):
+        patcher = _patch(
+            self.getter, self.attribute, self.new, self.spec,
+            self.create, self.spec_set,
+            self.autospec, self.new_callable, self.kwargs
+        )
+        patcher.attribute_name = self.attribute_name
+        patcher.additional_patchers = [
+            p.copy() for p in self.additional_patchers
+        ]
+        return patcher
+
+
+    def __call__(self, func):
+        if isinstance(func, ClassTypes):
+            return self.decorate_class(func)
+        return self.decorate_callable(func)
+
+
+    def decorate_class(self, klass):
+        for attr in dir(klass):
+            if not attr.startswith(patch.TEST_PREFIX):
+                continue
+
+            attr_value = getattr(klass, attr)
+            if not hasattr(attr_value, "__call__"):
+                continue
+
+            patcher = self.copy()
+            setattr(klass, attr, patcher(attr_value))
+        return klass
+
+
+    def decorate_callable(self, func):
+        if hasattr(func, 'patchings'):
+            func.patchings.append(self)
+            return func
+
+        @wraps(func)
+        def patched(*args, **keywargs):
+            # don't use a with here (backwards compatability with Python 2.4)
+            extra_args = []
+            entered_patchers = []
+
+            # can't use try...except...finally because of Python 2.4
+            # compatibility
+            exc_info = tuple()
+            try:
+                try:
+                    for patching in patched.patchings:
+                        arg = patching.__enter__()
+                        entered_patchers.append(patching)
+                        if patching.attribute_name is not None:
+                            keywargs.update(arg)
+                        elif patching.new is DEFAULT:
+                            extra_args.append(arg)
+
+                    args += tuple(extra_args)
+                    return func(*args, **keywargs)
+                except:
+                    if (patching not in entered_patchers and
+                        _is_started(patching)):
+                        # the patcher may have been started, but an exception
+                        # raised whilst entering one of its additional_patchers
+                        entered_patchers.append(patching)
+                    # Pass the exception to __exit__
+                    exc_info = sys.exc_info()
+                    # re-raise the exception
+                    raise
+            finally:
+                for patching in reversed(entered_patchers):
+                    patching.__exit__(*exc_info)
+
+        patched.patchings = [self]
+        if hasattr(func, 'func_code'):
+            # not in Python 3
+            patched.compat_co_firstlineno = getattr(
+                func, "compat_co_firstlineno",
+                func.func_code.co_firstlineno
+            )
+        return patched
+
+
+    def get_original(self):
+        target = self.getter()
+        name = self.attribute
+
+        original = DEFAULT
+        local = False
+
+        try:
+            original = target.__dict__[name]
+        except (AttributeError, KeyError):
+            original = getattr(target, name, DEFAULT)
+        else:
+            local = True
+
+        if not self.create and original is DEFAULT:
+            raise AttributeError(
+                "%s does not have the attribute %r" % (target, name)
+            )
+        return original, local
+
+
+    def __enter__(self):
+        """Perform the patch."""
+        new, spec, spec_set = self.new, self.spec, self.spec_set
+        autospec, kwargs = self.autospec, self.kwargs
+        new_callable = self.new_callable
+        self.target = self.getter()
+
+        # normalise False to None
+        if spec is False:
+            spec = None
+        if spec_set is False:
+            spec_set = None
+        if autospec is False:
+            autospec = None
+
+        if spec is not None and autospec is not None:
+            raise TypeError("Can't specify spec and autospec")
+        if ((spec is not None or autospec is not None) and
+            spec_set not in (True, None)):
+            raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
+
+        original, local = self.get_original()
+
+        if new is DEFAULT and autospec is None:
+            inherit = False
+            if spec is True:
+                # set spec to the object we are replacing
+                spec = original
+                if spec_set is True:
+                    spec_set = original
+                    spec = None
+            elif spec is not None:
+                if spec_set is True:
+                    spec_set = spec
+                    spec = None
+            elif spec_set is True:
+                spec_set = original
+
+            if spec is not None or spec_set is not None:
+                if original is DEFAULT:
+                    raise TypeError("Can't use 'spec' with create=True")
+                if isinstance(original, ClassTypes):
+                    # If we're patching out a class and there is a spec
+                    inherit = True
+
+            Klass = MagicMock
+            _kwargs = {}
+            if new_callable is not None:
+                Klass = new_callable
+            elif spec is not None or spec_set is not None:
+                this_spec = spec
+                if spec_set is not None:
+                    this_spec = spec_set
+                if _is_list(this_spec):
+                    not_callable = '__call__' not in this_spec
+                else:
+                    not_callable = not _callable(this_spec)
+                if not_callable:
+                    Klass = NonCallableMagicMock
+
+            if spec is not None:
+                _kwargs['spec'] = spec
+            if spec_set is not None:
+                _kwargs['spec_set'] = spec_set
+
+            # add a name to mocks
+            if (isinstance(Klass, type) and
+                issubclass(Klass, NonCallableMock) and self.attribute):
+                _kwargs['name'] = self.attribute
+
+            _kwargs.update(kwargs)
+            new = Klass(**_kwargs)
+
+            if inherit and _is_instance_mock(new):
+                # we can only tell if the instance should be callable if the
+                # spec is not a list
+                this_spec = spec
+                if spec_set is not None:
+                    this_spec = spec_set
+                if (not _is_list(this_spec) and not
+                    _instance_callable(this_spec)):
+                    Klass = NonCallableMagicMock
+
+                _kwargs.pop('name')
+                new.return_value = Klass(_new_parent=new, _new_name='()',
+                                         **_kwargs)
+        elif autospec is not None:
+            # spec is ignored, new *must* be default, spec_set is treated
+            # as a boolean. Should we check spec is not None and that spec_set
+            # is a bool?
+            if new is not DEFAULT:
+                raise TypeError(
+                    "autospec creates the mock for you. Can't specify "
+                    "autospec and new."
+                )
+            if original is DEFAULT:
+                raise TypeError("Can't use 'autospec' with create=True")
+            spec_set = bool(spec_set)
+            if autospec is True:
+                autospec = original
+
+            new = create_autospec(autospec, spec_set=spec_set,
+                                  _name=self.attribute, **kwargs)
+        elif kwargs:
+            # can't set keyword args when we aren't creating the mock
+            # XXXX If new is a Mock we could call new.configure_mock(**kwargs)
+            raise TypeError("Can't pass kwargs to a mock we aren't creating")
+
+        new_attr = new
+
+        self.temp_original = original
+        self.is_local = local
+        setattr(self.target, self.attribute, new_attr)
+        if self.attribute_name is not None:
+            extra_args = {}
+            if self.new is DEFAULT:
+                extra_args[self.attribute_name] =  new
+            for patching in self.additional_patchers:
+                arg = patching.__enter__()
+                if patching.new is DEFAULT:
+                    extra_args.update(arg)
+            return extra_args
+
+        return new
+
+
+    def __exit__(self, *exc_info):
+        """Undo the patch."""
+        if not _is_started(self):
+            raise RuntimeError('stop called on unstarted patcher')
+
+        if self.is_local and self.temp_original is not DEFAULT:
+            setattr(self.target, self.attribute, self.temp_original)
+        else:
+            delattr(self.target, self.attribute)
+            if not self.create and not hasattr(self.target, self.attribute):
+                # needed for proxy objects like django settings
+                setattr(self.target, self.attribute, self.temp_original)
+
+        del self.temp_original
+        del self.is_local
+        del self.target
+        for patcher in reversed(self.additional_patchers):
+            if _is_started(patcher):
+                patcher.__exit__(*exc_info)
+
+
+    def start(self):
+        """Activate a patch, returning any created mock."""
+        result = self.__enter__()
+        self._active_patches.add(self)
+        return result
+
+
+    def stop(self):
+        """Stop an active patch."""
+        self._active_patches.discard(self)
+        return self.__exit__()
+
+
+
+def _get_target(target):
+    try:
+        target, attribute = target.rsplit('.', 1)
+    except (TypeError, ValueError):
+        raise TypeError("Need a valid target to patch. You supplied: %r" %
+                        (target,))
+    getter = lambda: _importer(target)
+    return getter, attribute
+
+
+def _patch_object(
+        target, attribute, new=DEFAULT, spec=None,
+        create=False, spec_set=None, autospec=None,
+        new_callable=None, **kwargs
+    ):
+    """
+    patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
+                 spec_set=None, autospec=None, new_callable=None, **kwargs)
+
+    patch the named member (`attribute`) on an object (`target`) with a mock
+    object.
+
+    `patch.object` can be used as a decorator, class decorator or a context
+    manager. Arguments `new`, `spec`, `create`, `spec_set`,
+    `autospec` and `new_callable` have the same meaning as for `patch`. Like
+    `patch`, `patch.object` takes arbitrary keyword arguments for configuring
+    the mock object it creates.
+
+    When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
+    for choosing which methods to wrap.
+    """
+    getter = lambda: target
+    return _patch(
+        getter, attribute, new, spec, create,
+        spec_set, autospec, new_callable, kwargs
+    )
+
+
+def _patch_multiple(target, spec=None, create=False, spec_set=None,
+                    autospec=None, new_callable=None, **kwargs):
+    """Perform multiple patches in a single call. It takes the object to be
+    patched (either as an object or a string to fetch the object by importing)
+    and keyword arguments for the patches::
+
+        with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
+            ...
+
+    Use `DEFAULT` as the value if you want `patch.multiple` to create
+    mocks for you. In this case the created mocks are passed into a decorated
+    function by keyword, and a dictionary is returned when `patch.multiple` is
+    used as a context manager.
+
+    `patch.multiple` can be used as a decorator, class decorator or a context
+    manager. The arguments `spec`, `spec_set`, `create`,
+    `autospec` and `new_callable` have the same meaning as for `patch`. These
+    arguments will be applied to *all* patches done by `patch.multiple`.
+
+    When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
+    for choosing which methods to wrap.
+    """
+    if type(target) in (unicode, str):
+        getter = lambda: _importer(target)
+    else:
+        getter = lambda: target
+
+    if not kwargs:
+        raise ValueError(
+            'Must supply at least one keyword argument with patch.multiple'
+        )
+    # need to wrap in a list for python 3, where items is a view
+    items = list(kwargs.items())
+    attribute, new = items[0]
+    patcher = _patch(
+        getter, attribute, new, spec, create, spec_set,
+        autospec, new_callable, {}
+    )
+    patcher.attribute_name = attribute
+    for attribute, new in items[1:]:
+        this_patcher = _patch(
+            getter, attribute, new, spec, create, spec_set,
+            autospec, new_callable, {}
+        )
+        this_patcher.attribute_name = attribute
+        patcher.additional_patchers.append(this_patcher)
+    return patcher
+
+
+def patch(
+        target, new=DEFAULT, spec=None, create=False,
+        spec_set=None, autospec=None, new_callable=None, **kwargs
+    ):
+    """
+    `patch` acts as a function decorator, class decorator or a context
+    manager. Inside the body of the function or with statement, the `target`
+    is patched with a `new` object. When the function/with statement exits
+    the patch is undone.
+
+    If `new` is omitted, then the target is replaced with a
+    `MagicMock`. If `patch` is used as a decorator and `new` is
+    omitted, the created mock is passed in as an extra argument to the
+    decorated function. If `patch` is used as a context manager the created
+    mock is returned by the context manager.
+
+    `target` should be a string in the form `'package.module.ClassName'`. The
+    `target` is imported and the specified object replaced with the `new`
+    object, so the `target` must be importable from the environment you are
+    calling `patch` from. The target is imported when the decorated function
+    is executed, not at decoration time.
+
+    The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
+    if patch is creating one for you.
+
+    In addition you can pass `spec=True` or `spec_set=True`, which causes
+    patch to pass in the object being mocked as the spec/spec_set object.
+
+    `new_callable` allows you to specify a different class, or callable object,
+    that will be called to create the `new` object. By default `MagicMock` is
+    used.
+
+    A more powerful form of `spec` is `autospec`. If you set `autospec=True`
+    then the mock with be created with a spec from the object being replaced.
+    All attributes of the mock will also have the spec of the corresponding
+    attribute of the object being replaced. Methods and functions being
+    mocked will have their arguments checked and will raise a `TypeError` if
+    they are called with the wrong signature. For mocks replacing a class,
+    their return value (the 'instance') will have the same spec as the class.
+
+    Instead of `autospec=True` you can pass `autospec=some_object` to use an
+    arbitrary object as the spec instead of the one being replaced.
+
+    By default `patch` will fail to replace attributes that don't exist. If
+    you pass in `create=True`, and the attribute doesn't exist, patch will
+    create the attribute for you when the patched function is called, and
+    delete it again afterwards. This is useful for writing tests against
+    attributes that your production code creates at runtime. It is off by by
+    default because it can be dangerous. With it switched on you can write
+    passing tests against APIs that don't actually exist!
+
+    Patch can be used as a `TestCase` class decorator. It works by
+    decorating each test method in the class. This reduces the boilerplate
+    code when your test methods share a common patchings set. `patch` finds
+    tests by looking for method names that start with `patch.TEST_PREFIX`.
+    By default this is `test`, which matches the way `unittest` finds tests.
+    You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
+
+    Patch can be used as a context manager, with the with statement. Here the
+    patching applies to the indented block after the with statement. If you
+    use "as" then the patched object will be bound to the name after the
+    "as"; very useful if `patch` is creating a mock object for you.
+
+    `patch` takes arbitrary keyword arguments. These will be passed to
+    the `Mock` (or `new_callable`) on construction.
+
+    `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
+    available for alternate use-cases.
+    """
+    getter, attribute = _get_target(target)
+    return _patch(
+        getter, attribute, new, spec, create,
+        spec_set, autospec, new_callable, kwargs
+    )
+
+
+class _patch_dict(object):
+    """
+    Patch a dictionary, or dictionary like object, and restore the dictionary
+    to its original state after the test.
+
+    `in_dict` can be a dictionary or a mapping like container. If it is a
+    mapping then it must at least support getting, setting and deleting items
+    plus iterating over keys.
+
+    `in_dict` can also be a string specifying the name of the dictionary, which
+    will then be fetched by importing it.
+
+    `values` can be a dictionary of values to set in the dictionary. `values`
+    can also be an iterable of `(key, value)` pairs.
+
+    If `clear` is True then the dictionary will be cleared before the new
+    values are set.
+
+    `patch.dict` can also be called with arbitrary keyword arguments to set
+    values in the dictionary::
+
+        with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
+            ...
+
+    `patch.dict` can be used as a context manager, decorator or class
+    decorator. When used as a class decorator `patch.dict` honours
+    `patch.TEST_PREFIX` for choosing which methods to wrap.
+    """
+
+    def __init__(self, in_dict, values=(), clear=False, **kwargs):
+        if isinstance(in_dict, basestring):
+            in_dict = _importer(in_dict)
+        self.in_dict = in_dict
+        # support any argument supported by dict(...) constructor
+        self.values = dict(values)
+        self.values.update(kwargs)
+        self.clear = clear
+        self._original = None
+
+
+    def __call__(self, f):
+        if isinstance(f, ClassTypes):
+            return self.decorate_class(f)
+        @wraps(f)
+        def _inner(*args, **kw):
+            self._patch_dict()
+            try:
+                return f(*args, **kw)
+            finally:
+                self._unpatch_dict()
+
+        return _inner
+
+
+    def decorate_class(self, klass):
+        for attr in dir(klass):
+            attr_value = getattr(klass, attr)
+            if (attr.startswith(patch.TEST_PREFIX) and
+                 hasattr(attr_value, "__call__")):
+                decorator = _patch_dict(self.in_dict, self.values, self.clear)
+                decorated = decorator(attr_value)
+                setattr(klass, attr, decorated)
+        return klass
+
+
+    def __enter__(self):
+        """Patch the dict."""
+        self._patch_dict()
+
+
+    def _patch_dict(self):
+        values = self.values
+        in_dict = self.in_dict
+        clear = self.clear
+
+        try:
+            original = in_dict.copy()
+        except AttributeError:
+            # dict like object with no copy method
+            # must support iteration over keys
+            original = {}
+            for key in in_dict:
+                original[key] = in_dict[key]
+        self._original = original
+
+        if clear:
+            _clear_dict(in_dict)
+
+        try:
+            in_dict.update(values)
+        except AttributeError:
+            # dict like object with no update method
+            for key in values:
+                in_dict[key] = values[key]
+
+
+    def _unpatch_dict(self):
+        in_dict = self.in_dict
+        original = self._original
+
+        _clear_dict(in_dict)
+
+        try:
+            in_dict.update(original)
+        except AttributeError:
+            for key in original:
+                in_dict[key] = original[key]
+
+
+    def __exit__(self, *args):
+        """Unpatch the dict."""
+        self._unpatch_dict()
+        return False
+
+    start = __enter__
+    stop = __exit__
+
+
+def _clear_dict(in_dict):
+    try:
+        in_dict.clear()
+    except AttributeError:
+        keys = list(in_dict)
+        for key in keys:
+            del in_dict[key]
+
+
+def _patch_stopall():
+    """Stop all active patches."""
+    for patch in list(_patch._active_patches):
+        patch.stop()
+
+
+patch.object = _patch_object
+patch.dict = _patch_dict
+patch.multiple = _patch_multiple
+patch.stopall = _patch_stopall
+patch.TEST_PREFIX = 'test'
+
+magic_methods = (
+    "lt le gt ge eq ne "
+    "getitem setitem delitem "
+    "len contains iter "
+    "hash str sizeof "
+    "enter exit "
+    "divmod neg pos abs invert "
+    "complex int float index "
+    "trunc floor ceil "
+)
+
+numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
+inplace = ' '.join('i%s' % n for n in numerics.split())
+right = ' '.join('r%s' % n for n in numerics.split())
+extra = ''
+if inPy3k:
+    extra = 'bool next '
+else:
+    extra = 'unicode long nonzero oct hex truediv rtruediv '
+
+# not including __prepare__, __instancecheck__, __subclasscheck__
+# (as they are metaclass methods)
+# __del__ is not supported at all as it causes problems if it exists
+
+_non_defaults = set('__%s__' % method for method in [
+    'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
+    'format', 'get', 'set', 'delete', 'reversed',
+    'missing', 'reduce', 'reduce_ex', 'getinitargs',
+    'getnewargs', 'getstate', 'setstate', 'getformat',
+    'setformat', 'repr', 'dir'
+])
+
+
+def _get_method(name, func):
+    "Turns a callable object (like a mock) into a real function"
+    def method(self, *args, **kw):
+        return func(self, *args, **kw)
+    method.__name__ = name
+    return method
+
+
+_magics = set(
+    '__%s__' % method for method in
+    ' '.join([magic_methods, numerics, inplace, right, extra]).split()
+)
+
+_all_magics = _magics | _non_defaults
+
+_unsupported_magics = set([
+    '__getattr__', '__setattr__',
+    '__init__', '__new__', '__prepare__'
+    '__instancecheck__', '__subclasscheck__',
+    '__del__'
+])
+
+_calculate_return_value = {
+    '__hash__': lambda self: object.__hash__(self),
+    '__str__': lambda self: object.__str__(self),
+    '__sizeof__': lambda self: object.__sizeof__(self),
+    '__unicode__': lambda self: unicode(object.__str__(self)),
+}
+
+_return_values = {
+    '__lt__': NotImplemented,
+    '__gt__': NotImplemented,
+    '__le__': NotImplemented,
+    '__ge__': NotImplemented,
+    '__int__': 1,
+    '__contains__': False,
+    '__len__': 0,
+    '__exit__': False,
+    '__complex__': 1j,
+    '__float__': 1.0,
+    '__bool__': True,
+    '__nonzero__': True,
+    '__oct__': '1',
+    '__hex__': '0x1',
+    '__long__': long(1),
+    '__index__': 1,
+}
+
+
+def _get_eq(self):
+    def __eq__(other):
+        ret_val = self.__eq__._mock_return_value
+        if ret_val is not DEFAULT:
+            return ret_val
+        return self is other
+    return __eq__
+
+def _get_ne(self):
+    def __ne__(other):
+        if self.__ne__._mock_return_value is not DEFAULT:
+            return DEFAULT
+        return self is not other
+    return __ne__
+
+def _get_iter(self):
+    def __iter__():
+        ret_val = self.__iter__._mock_return_value
+        if ret_val is DEFAULT:
+            return iter([])
+        # if ret_val was already an iterator, then calling iter on it should
+        # return the iterator unchanged
+        return iter(ret_val)
+    return __iter__
+
+_side_effect_methods = {
+    '__eq__': _get_eq,
+    '__ne__': _get_ne,
+    '__iter__': _get_iter,
+}
+
+
+
+def _set_return_value(mock, method, name):
+    fixed = _return_values.get(name, DEFAULT)
+    if fixed is not DEFAULT:
+        method.return_value = fixed
+        return
+
+    return_calulator = _calculate_return_value.get(name)
+    if return_calulator is not None:
+        try:
+            return_value = return_calulator(mock)
+        except AttributeError:
+            # XXXX why do we return AttributeError here?
+            #      set it as a side_effect instead?
+            return_value = AttributeError(name)
+        method.return_value = return_value
+        return
+
+    side_effector = _side_effect_methods.get(name)
+    if side_effector is not None:
+        method.side_effect = side_effector(mock)
+
+
+
+class MagicMixin(object):
+    def __init__(self, *args, **kw):
+        _super(MagicMixin, self).__init__(*args, **kw)
+        self._mock_set_magics()
+
+
+    def _mock_set_magics(self):
+        these_magics = _magics
+
+        if self._mock_methods is not None:
+            these_magics = _magics.intersection(self._mock_methods)
+
+            remove_magics = set()
+            remove_magics = _magics - these_magics
+
+            for entry in remove_magics:
+                if entry in type(self).__dict__:
+                    # remove unneeded magic methods
+                    delattr(self, entry)
+
+        # don't overwrite existing attributes if called a second time
+        these_magics = these_magics - set(type(self).__dict__)
+
+        _type = type(self)
+        for entry in these_magics:
+            setattr(_type, entry, MagicProxy(entry, self))
+
+
+
+class NonCallableMagicMock(MagicMixin, NonCallableMock):
+    """A version of `MagicMock` that isn't callable."""
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+        self._mock_set_magics()
+
+
+
+class MagicMock(MagicMixin, Mock):
+    """
+    MagicMock is a subclass of Mock with default implementations
+    of most of the magic methods. You can use MagicMock without having to
+    configure the magic methods yourself.
+
+    If you use the `spec` or `spec_set` arguments then *only* magic
+    methods that exist in the spec will be created.
+
+    Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
+    """
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+        self._mock_set_magics()
+
+
+
+class MagicProxy(object):
+    def __init__(self, name, parent):
+        self.name = name
+        self.parent = parent
+
+    def __call__(self, *args, **kwargs):
+        m = self.create_mock()
+        return m(*args, **kwargs)
+
+    def create_mock(self):
+        entry = self.name
+        parent = self.parent
+        m = parent._get_child_mock(name=entry, _new_name=entry,
+                                   _new_parent=parent)
+        setattr(parent, entry, m)
+        _set_return_value(parent, m, entry)
+        return m
+
+    def __get__(self, obj, _type=None):
+        return self.create_mock()
+
+
+
+class _ANY(object):
+    "A helper object that compares equal to everything."
+
+    def __eq__(self, other):
+        return True
+
+    def __ne__(self, other):
+        return False
+
+    def __repr__(self):
+        return '<ANY>'
+
+ANY = _ANY()
+
+
+
+def _format_call_signature(name, args, kwargs):
+    message = '%s(%%s)' % name
+    formatted_args = ''
+    args_string = ', '.join([repr(arg) for arg in args])
+    kwargs_string = ', '.join([
+        '%s=%r' % (key, value) for key, value in kwargs.items()
+    ])
+    if args_string:
+        formatted_args = args_string
+    if kwargs_string:
+        if formatted_args:
+            formatted_args += ', '
+        formatted_args += kwargs_string
+
+    return message % formatted_args
+
+
+
+class _Call(tuple):
+    """
+    A tuple for holding the results of a call to a mock, either in the form
+    `(args, kwargs)` or `(name, args, kwargs)`.
+
+    If args or kwargs are empty then a call tuple will compare equal to
+    a tuple without those values. This makes comparisons less verbose::
+
+        _Call(('name', (), {})) == ('name',)
+        _Call(('name', (1,), {})) == ('name', (1,))
+        _Call(((), {'a': 'b'})) == ({'a': 'b'},)
+
+    The `_Call` object provides a useful shortcut for comparing with call::
+
+        _Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
+        _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
+
+    If the _Call has no name then it will match any name.
+    """
+    def __new__(cls, value=(), name=None, parent=None, two=False,
+                from_kall=True):
+        name = ''
+        args = ()
+        kwargs = {}
+        _len = len(value)
+        if _len == 3:
+            name, args, kwargs = value
+        elif _len == 2:
+            first, second = value
+            if isinstance(first, basestring):
+                name = first
+                if isinstance(second, tuple):
+                    args = second
+                else:
+                    kwargs = second
+            else:
+                args, kwargs = first, second
+        elif _len == 1:
+            value, = value
+            if isinstance(value, basestring):
+                name = value
+            elif isinstance(value, tuple):
+                args = value
+            else:
+                kwargs = value
+
+        if two:
+            return tuple.__new__(cls, (args, kwargs))
+
+        return tuple.__new__(cls, (name, args, kwargs))
+
+
+    def __init__(self, value=(), name=None, parent=None, two=False,
+                 from_kall=True):
+        self.name = name
+        self.parent = parent
+        self.from_kall = from_kall
+
+
+    def __eq__(self, other):
+        if other is ANY:
+            return True
+        try:
+            len_other = len(other)
+        except TypeError:
+            return False
+
+        self_name = ''
+        if len(self) == 2:
+            self_args, self_kwargs = self
+        else:
+            self_name, self_args, self_kwargs = self
+
+        other_name = ''
+        if len_other == 0:
+            other_args, other_kwargs = (), {}
+        elif len_other == 3:
+            other_name, other_args, other_kwargs = other
+        elif len_other == 1:
+            value, = other
+            if isinstance(value, tuple):
+                other_args = value
+                other_kwargs = {}
+            elif isinstance(value, basestring):
+                other_name = value
+                other_args, other_kwargs = (), {}
+            else:
+                other_args = ()
+                other_kwargs = value
+        else:
+            # len 2
+            # could be (name, args) or (name, kwargs) or (args, kwargs)
+            first, second = other
+            if isinstance(first, basestring):
+                other_name = first
+                if isinstance(second, tuple):
+                    other_args, other_kwargs = second, {}
+                else:
+                    other_args, other_kwargs = (), second
+            else:
+                other_args, other_kwargs = first, second
+
+        if self_name and other_name != self_name:
+            return False
+
+        # this order is important for ANY to work!
+        return (other_args, other_kwargs) == (self_args, self_kwargs)
+
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+    def __call__(self, *args, **kwargs):
+        if self.name is None:
+            return _Call(('', args, kwargs), name='()')
+
+        name = self.name + '()'
+        return _Call((self.name, args, kwargs), name=name, parent=self)
+
+
+    def __getattr__(self, attr):
+        if self.name is None:
+            return _Call(name=attr, from_kall=False)
+        name = '%s.%s' % (self.name, attr)
+        return _Call(name=name, parent=self, from_kall=False)
+
+
+    def __repr__(self):
+        if not self.from_kall:
+            name = self.name or 'call'
+            if name.startswith('()'):
+                name = 'call%s' % name
+            return name
+
+        if len(self) == 2:
+            name = 'call'
+            args, kwargs = self
+        else:
+            name, args, kwargs = self
+            if not name:
+                name = 'call'
+            elif not name.startswith('()'):
+                name = 'call.%s' % name
+            else:
+                name = 'call%s' % name
+        return _format_call_signature(name, args, kwargs)
+
+
+    def call_list(self):
+        """For a call object that represents multiple calls, `call_list`
+        returns a list of all the intermediate calls as well as the
+        final call."""
+        vals = []
+        thing = self
+        while thing is not None:
+            if thing.from_kall:
+                vals.append(thing)
+            thing = thing.parent
+        return _CallList(reversed(vals))
+
+
+call = _Call(from_kall=False)
+
+
+
+def create_autospec(spec, spec_set=False, instance=False, _parent=None,
+                    _name=None, **kwargs):
+    """Create a mock object using another object as a spec. Attributes on the
+    mock will use the corresponding attribute on the `spec` object as their
+    spec.
+
+    Functions or methods being mocked will have their arguments checked
+    to check that they are called with the correct signature.
+
+    If `spec_set` is True then attempting to set attributes that don't exist
+    on the spec object will raise an `AttributeError`.
+
+    If a class is used as a spec then the return value of the mock (the
+    instance of the class) will have the same spec. You can use a class as the
+    spec for an instance object by passing `instance=True`. The returned mock
+    will only be callable if instances of the mock are callable.
+
+    `create_autospec` also takes arbitrary keyword arguments that are passed to
+    the constructor of the created mock."""
+    if _is_list(spec):
+        # can't pass a list instance to the mock constructor as it will be
+        # interpreted as a list of strings
+        spec = type(spec)
+
+    is_type = isinstance(spec, ClassTypes)
+
+    _kwargs = {'spec': spec}
+    if spec_set:
+        _kwargs = {'spec_set': spec}
+    elif spec is None:
+        # None we mock with a normal mock without a spec
+        _kwargs = {}
+
+    _kwargs.update(kwargs)
+
+    Klass = MagicMock
+    if type(spec) in DescriptorTypes:
+        # descriptors don't have a spec
+        # because we don't know what type they return
+        _kwargs = {}
+    elif not _callable(spec):
+        Klass = NonCallableMagicMock
+    elif is_type and instance and not _instance_callable(spec):
+        Klass = NonCallableMagicMock
+
+    _new_name = _name
+    if _parent is None:
+        # for a top level object no _new_name should be set
+        _new_name = ''
+
+    mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
+                 name=_name, **_kwargs)
+
+    if isinstance(spec, FunctionTypes):
+        # should only happen at the top level because we don't
+        # recurse for functions
+        mock = _set_signature(mock, spec)
+    else:
+        _check_signature(spec, mock, is_type, instance)
+
+    if _parent is not None and not instance:
+        _parent._mock_children[_name] = mock
+
+    if is_type and not instance and 'return_value' not in kwargs:
+        mock.return_value = create_autospec(spec, spec_set, instance=True,
+                                            _name='()', _parent=mock)
+
+    for entry in dir(spec):
+        if _is_magic(entry):
+            # MagicMock already does the useful magic methods for us
+            continue
+
+        if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
+            # allow a mock to actually be a function
+            continue
+
+        # XXXX do we need a better way of getting attributes without
+        # triggering code execution (?) Probably not - we need the actual
+        # object to mock it so we would rather trigger a property than mock
+        # the property descriptor. Likewise we want to mock out dynamically
+        # provided attributes.
+        # XXXX what about attributes that raise exceptions other than
+        # AttributeError on being fetched?
+        # we could be resilient against it, or catch and propagate the
+        # exception when the attribute is fetched from the mock
+        try:
+            original = getattr(spec, entry)
+        except AttributeError:
+            continue
+
+        kwargs = {'spec': original}
+        if spec_set:
+            kwargs = {'spec_set': original}
+
+        if not isinstance(original, FunctionTypes):
+            new = _SpecState(original, spec_set, mock, entry, instance)
+            mock._mock_children[entry] = new
+        else:
+            parent = mock
+            if isinstance(spec, FunctionTypes):
+                parent = mock.mock
+
+            new = MagicMock(parent=parent, name=entry, _new_name=entry,
+                            _new_parent=parent, **kwargs)
+            mock._mock_children[entry] = new
+            skipfirst = _must_skip(spec, entry, is_type)
+            _check_signature(original, new, skipfirst=skipfirst)
+
+        # so functions created with _set_signature become instance attributes,
+        # *plus* their underlying mock exists in _mock_children of the parent
+        # mock. Adding to _mock_children may be unnecessary where we are also
+        # setting as an instance attribute?
+        if isinstance(new, FunctionTypes):
+            setattr(mock, entry, new)
+
+    return mock
+
+
+def _must_skip(spec, entry, is_type):
+    if not isinstance(spec, ClassTypes):
+        if entry in getattr(spec, '__dict__', {}):
+            # instance attribute - shouldn't skip
+            return False
+        spec = spec.__class__
+    if not hasattr(spec, '__mro__'):
+        # old style class: can't have descriptors anyway
+        return is_type
+
+    for klass in spec.__mro__:
+        result = klass.__dict__.get(entry, DEFAULT)
+        if result is DEFAULT:
+            continue
+        if isinstance(result, (staticmethod, classmethod)):
+            return False
+        return is_type
+
+    # shouldn't get here unless function is a dynamically provided attribute
+    # XXXX untested behaviour
+    return is_type
+
+
+def _get_class(obj):
+    try:
+        return obj.__class__
+    except AttributeError:
+        # in Python 2, _sre.SRE_Pattern objects have no __class__
+        return type(obj)
+
+
+class _SpecState(object):
+
+    def __init__(self, spec, spec_set=False, parent=None,
+                 name=None, ids=None, instance=False):
+        self.spec = spec
+        self.ids = ids
+        self.spec_set = spec_set
+        self.parent = parent
+        self.instance = instance
+        self.name = name
+
+
+FunctionTypes = (
+    # python function
+    type(create_autospec),
+    # instance method
+    type(ANY.__eq__),
+    # unbound method
+    type(_ANY.__eq__),
+)
+
+FunctionAttributes = set([
+    'func_closure',
+    'func_code',
+    'func_defaults',
+    'func_dict',
+    'func_doc',
+    'func_globals',
+    'func_name',
+])
+
+
+file_spec = None
+
+
+def mock_open(mock=None, read_data=''):
+    """
+    A helper function to create a mock to replace the use of `open`. It works
+    for `open` called directly or used as a context manager.
+
+    The `mock` argument is the mock object to configure. If `None` (the
+    default) then a `MagicMock` will be created for you, with the API limited
+    to methods or attributes available on standard file handles.
+
+    `read_data` is a string for the `read` method of the file handle to return.
+    This is an empty string by default.
+    """
+    global file_spec
+    if file_spec is None:
+        # set on first use
+        if inPy3k:
+            import _io
+            file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+        else:
+            file_spec = file
+
+    if mock is None:
+        mock = MagicMock(name='open', spec=open)
+
+    handle = MagicMock(spec=file_spec)
+    handle.write.return_value = None
+    handle.__enter__.return_value = handle
+    handle.read.return_value = read_data
+
+    mock.return_value = handle
+    return mock
+
+
+class PropertyMock(Mock):
+    """
+    A mock intended to be used as a property, or other descriptor, on a class.
+    `PropertyMock` provides `__get__` and `__set__` methods so you can specify
+    a return value when it is fetched.
+
+    Fetching a `PropertyMock` instance from an object calls the mock, with
+    no args. Setting it calls the mock with the value being set.
+    """
+    def _get_child_mock(self, **kwargs):
+        return MagicMock(**kwargs)
+
+    def __get__(self, obj, obj_type):
+        return self()
+    def __set__(self, obj, val):
+        self(val)
diff --git a/catapult/telemetry/third_party/modulegraph/MANIFEST.in b/catapult/telemetry/third_party/modulegraph/MANIFEST.in
new file mode 100644
index 0000000..7d2580d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/MANIFEST.in
@@ -0,0 +1,9 @@
+include *.txt MANIFEST.in *.py
+recursive-include scripts *.py
+graft doc
+graft doc/_static
+graft doc/_templates
+graft modulegraph_tests
+global-exclude .DS_Store
+global-exclude *.pyc
+global-exclude *.so
diff --git a/catapult/telemetry/third_party/modulegraph/PKG-INFO b/catapult/telemetry/third_party/modulegraph/PKG-INFO
new file mode 100644
index 0000000..bfd4006
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/PKG-INFO
@@ -0,0 +1,337 @@
+Metadata-Version: 1.1
+Name: modulegraph
+Version: 0.12.1
+Summary: Python module dependency analysis tool
+Home-page: http://bitbucket.org/ronaldoussoren/modulegraph
+Author: Ronald Oussoren
+Author-email: ronaldoussoren@mac.com
+License: MIT
+Download-URL: http://pypi.python.org/pypi/modulegraph
+Description: modulegraph determines a dependency graph between Python modules primarily
+        by bytecode analysis for import statements.
+        
+        modulegraph uses similar methods to modulefinder from the standard library,
+        but uses a more flexible internal representation, has more extensive 
+        knowledge of special cases, and is extensible.
+        
+        
+        Release history
+        ===============
+        
+        0.12.1
+        ------
+        
+        * Issue #25: Complex python files could cause an "maximum recursion depth exceeded"
+          exception due to using stack-based recursion to walk the module AST.
+        
+        
+        0.12
+        ----
+        
+        * Added 'modulegraph.modulegraph.InvalidSourceModule'. This graph node is
+          used for Python source modules that cannot be compiled (for example because
+          they contain syntax errors).
+        
+          This is primarily useful for being able to create a graph for packages
+          that have python 2.x or python 3.x compatibility in separate modules that
+          contain code that isn't valid in the "other" python version.
+        
+        * Added 'modulegraph.modulegraph.InvalidCompiledModule'. This graph node
+          is used for Python bytecode modules that cannot be loaded.
+        
+        * Added 'modulegraph.modulegraph.NamespacePackage'.
+        
+          Patch by bitbucket user htgoebel.
+        
+        * No longer add a MissingModule node to the graph for 'collections.defaultdict'
+          when using 'from collections import defaultdict' ('collections.defaultdict'
+          is an attribute of 'collections', not a submodule).
+        
+        * Fixed typo in ModuleGraph.getReferences()
+        
+        * Added ModuleGraph.getReferers(tonode). This methods yields the
+          nodes that are referencing *tonode* (the reverse of getReferences)
+        
+        * The graph will no longer contain MissingModule nodes when using 'from ... import name' to
+          import a global variable in a python module.
+        
+          There will still be MissingModule nodes for global variables in C extentions, and
+          for 'from missing import name' when 'missing' is itself a MissingModule.
+        
+        * Issue #18: Don't assume that a PEP 302 loader object has a ``path`` attribute. That
+          attribute is not documented and is not always present.
+        
+        0.11.2
+        ------
+        
+        *
+        
+        0.11.1
+        ------
+        
+        * Issue #145: Don't exclude the platform specific 'path' modules (like ntpath)
+        
+        0.11
+        ----
+        
+        This is a feature release
+        
+        Features
+        ........
+        
+        * Hardcode knowlegde about the compatibility aliases in the email
+          module (for python 2.5 upto 3.0).
+        
+          This makes it possible to remove a heavy-handed recipe from py2app.
+        
+        * Added ``modegraph.zipio.getmode`` to fetch the Unix file mode
+          for a file.
+        
+        * Added some handy methods to ``modulegraph.modulegraph.ModuleGraph``.
+        
+        0.10.5
+        ------
+        
+        This is a bugfix release
+        
+        * Don't look at the file extension to determine the file type
+          in modulegraph.find_modules.parse_mf_results, but use the
+          class of the item.
+        
+        * Issue #13: Improved handing of bad relative imports
+          ("from .foo import bar"), these tended to raise confusing errors and
+          are now handled like any other failed import.
+        
+        0.10.4
+        ------
+        
+        This is a bugfix release
+        
+        * There were no 'classifiers' in the package metadata due to a bug
+          in setup.py.
+        
+        0.10.3
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * ``modulegraph.find.modules.parse_mf_results`` failed when the main script of
+          a py2app module didn't have a file name ending in '.py'.
+        
+        0.10.2
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * Issue #12: modulegraph would sometimes find the wrong package *__init__*
+          module due to using the wrong search method. One easy way to reproduce the
+          problem was to have a toplevel module named *__init__*.
+        
+          Reported by Kentzo.
+        
+        0.10.1
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * Issue #11: creating xrefs and dotty graphs from modulegraphs (the --xref
+          and --graph options of py2app) didn't work with python 3 due to use of
+          APIs that aren't available in that version of python.
+        
+          Reported by Andrew Barnert.
+        
+        
+        0.10
+        ----
+        
+        This is a minor feature release
+        
+        Features
+        ........
+        
+        * ``modulegraph.find_modules.find_needed_modules`` claimed to automaticly
+          include subpackages for the "packages" argument as well, but that code
+          didn't work at all.
+        
+        * Issue #9: The modulegraph script is deprecated, use
+          "python -mmodulegraph" instead.
+        
+        * Issue #10: Ensure that the result of "zipio.open" can be used
+          in a with statement (that is, ``with zipio.open(...) as fp``.
+        
+        * No longer use "2to3" to support Python 3.
+        
+          Because of this modulegraph now supports Python 2.6
+          and later.
+        
+        * Slightly improved HTML output, which makes it easier
+          to manipulate the generated HTML using JavaScript.
+        
+          Patch by anatoly techtonik.
+        
+        * Ensure modulegraph works with changes introduced after
+          Python 3.3b1.
+        
+        * Implement support for PEP 420 ("Implicit namespace packages")
+          in Python 3.3.
+        
+        * ``modulegraph.util.imp_walk`` is deprecated and will be
+          removed in the next release of this package.
+        
+        Bugfixes
+        ........
+        
+        * The module graph was incomplete, and generated incorrect warnings
+          along the way, when a subpackage contained import statements for
+          submodules.
+        
+          An example of this is ``sqlalchemy.util``, the ``__init__.py`` file
+          for this package contains imports of modules in that modules using
+          the classic relative import syntax (that is ``import compat`` to
+          import ``sqlalchemy.util.compat``). Until this release modulegraph
+          searched the wrong path to locate these modules (and hence failed
+          to find them).
+        
+        
+        0.9.2
+        -----
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * The 'packages' option to modulegraph.find_modules.find_modules ignored
+          the search path argument but always used the default search path.
+        
+        * The 'imp_find_modules' function in modulegraph.util has an argument 'path',
+          this was a string in previous release and can now also be a sequence.
+        
+        * Don't crash when a module on the 'includes' list doesn't exist, but warn
+          just like for missing 'packages' (modulegraph.find_modules.find_modules)
+        
+        0.9.1
+        -----
+        
+        This is a bugfix release
+        
+        Bug fixes
+        .........
+        
+        - Fixed the name of nodes imports in packages where the first element of
+          a dotted name can be found but the rest cannot. This used to create
+          a MissingModule node for the dotted name in the global namespace instead
+          of relative to the package.
+        
+          That is, given a package "pkg" with submodule "sub" if the "__init__.py"
+          of "pkg" contains "import sub.nomod" we now create a MissingModule node
+          for "pkg.sub.nomod" instead of "sub.nomod".
+        
+          This fixes an issue with including the crcmod package in application
+          bundles, first reported on the pythonmac-sig mailinglist by
+          Brendan Simon.
+        
+        0.9
+        ---
+        
+        This is a minor feature release
+        
+        
+        Features:
+        
+        - Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+          and can be viewed at <http://packages.python.org/modulegraph>.
+        
+          The documention is very rough at this moment and in need of reorganisation and
+          language cleanup. I've basiclly writting the current version by reading the code
+          and documenting what it does, the order in which classes and methods are document
+          is therefore not necessarily the most useful.
+        
+        - The repository has moved to bitbucket
+        
+        - Renamed ``modulegraph.modulegraph.AddPackagePath`` to ``addPackagePath``,
+          likewise ``ReplacePackage`` is now ``replacePackage``. The old name is still
+          available, but is deprecated and will be removed before the 1.0 release.
+        
+        - ``modulegraph.modulegraph`` contains two node types that are unused and
+          have unclear semantics: ``FlatPackage`` and ``ArchiveModule``. These node
+          types are deprecated and will be removed before 1.0 is released.
+        
+        - Added a simple commandline tool (``modulegraph``) that will print information
+          about the dependency graph of a script.
+        
+        - Added a module (``zipio``) for dealing with paths that may refer to entries
+          inside zipfiles (such as source paths referring to modules in zipped eggfiles).
+        
+          With this addition ``modulegraph.modulegraph.os_listdir`` is deprecated and
+          it will be removed before the 1.0 release.
+        
+        Bug fixes:
+        
+        - The ``__cmp__`` method of a Node no longer causes an exception
+          when the compared-to object is not a Node. Patch by Ivan Kozik.
+        
+        - Issue #1: The initialiser for ``modulegraph.ModuleGraph`` caused an exception
+          when an entry on the path (``sys.path``) doesn't actually exist.
+        
+          Fix by "skurylo", testcase by Ronald.
+        
+        - The code no longer worked with python 2.5, this release fixes that.
+        
+        - Due to the switch to mercurial setuptools will no longer include
+          all required files. Fixed by adding a MANIFEST.in file
+        
+        - The method for printing a ``.dot`` representation of a ``ModuleGraph``
+          works again.
+        
+        
+        0.8.1
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - ``from __future__ import absolute_import`` is now supported
+        
+        - Relative imports (``from . import module``) are now supported
+        
+        - Add support for namespace packages when those are installed
+          using option ``--single-version-externally-managed`` (part
+          of setuptools/distribute)
+        
+        0.8
+        ---
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Initial support for Python 3.x
+        
+        - It is now possible to run the test suite
+          using ``python setup.py test``.
+        
+          (The actual test suite is still fairly minimal though)
+        
+Keywords: import,,dependencies
+Platform: any
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Build Tools
diff --git a/catapult/telemetry/third_party/modulegraph/README.chromium b/catapult/telemetry/third_party/modulegraph/README.chromium
new file mode 100644
index 0000000..937e677
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/README.chromium
@@ -0,0 +1,11 @@
+Name: modulegraph
+Short Name: modulegraph
+URL: https://pypi.python.org/pypi/modulegraph/
+Version: 0.12.1
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+Description: modulegraph determines a dependency graph between Python modules
+primarily by bytecode analysis for import statements. It's used by telemetry's
+find_dependencies script.
+Local modification: remove doc/_build directory.
diff --git a/catapult/telemetry/third_party/modulegraph/README.txt b/catapult/telemetry/third_party/modulegraph/README.txt
new file mode 100644
index 0000000..55ebf46
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/README.txt
@@ -0,0 +1,6 @@
+modulegraph determines a dependency graph between Python modules primarily
+by bytecode analysis for import statements.
+
+modulegraph uses similar methods to modulefinder from the standard library,
+but uses a more flexible internal representation, has more extensive 
+knowledge of special cases, and is extensible.
diff --git a/catapult/telemetry/third_party/modulegraph/doc/Makefile b/catapult/telemetry/third_party/modulegraph/doc/Makefile
new file mode 100644
index 0000000..b91ac81
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/Makefile
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/altgraph.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/altgraph.qhc"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/catapult/telemetry/third_party/modulegraph/doc/changelog.rst b/catapult/telemetry/third_party/modulegraph/doc/changelog.rst
new file mode 100644
index 0000000..f6725ac
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/changelog.rst
@@ -0,0 +1,307 @@
+Release history
+===============
+
+0.12.1
+------
+
+* Issue #25: Complex python files could cause an "maximum recursion depth exceeded"
+  exception due to using stack-based recursion to walk the module AST.
+
+
+0.12
+----
+
+* Added 'modulegraph.modulegraph.InvalidSourceModule'. This graph node is
+  used for Python source modules that cannot be compiled (for example because
+  they contain syntax errors).
+
+  This is primarily useful for being able to create a graph for packages
+  that have python 2.x or python 3.x compatibility in separate modules that
+  contain code that isn't valid in the "other" python version.
+
+* Added 'modulegraph.modulegraph.InvalidCompiledModule'. This graph node
+  is used for Python bytecode modules that cannot be loaded.
+
+* Added 'modulegraph.modulegraph.NamespacePackage'.
+
+  Patch by bitbucket user htgoebel.
+
+* No longer add a MissingModule node to the graph for 'collections.defaultdict'
+  when using 'from collections import defaultdict' ('collections.defaultdict'
+  is an attribute of 'collections', not a submodule).
+
+* Fixed typo in ModuleGraph.getReferences()
+
+* Added ModuleGraph.getReferers(tonode). This methods yields the
+  nodes that are referencing *tonode* (the reverse of getReferences)
+
+* The graph will no longer contain MissingModule nodes when using 'from ... import name' to
+  import a global variable in a python module.
+
+  There will still be MissingModule nodes for global variables in C extentions, and
+  for 'from missing import name' when 'missing' is itself a MissingModule.
+
+* Issue #18: Don't assume that a PEP 302 loader object has a ``path`` attribute. That
+  attribute is not documented and is not always present.
+
+0.11.2
+------
+
+*
+
+0.11.1
+------
+
+* Issue #145: Don't exclude the platform specific 'path' modules (like ntpath)
+
+0.11
+----
+
+This is a feature release
+
+Features
+........
+
+* Hardcode knowlegde about the compatibility aliases in the email
+  module (for python 2.5 upto 3.0).
+
+  This makes it possible to remove a heavy-handed recipe from py2app.
+
+* Added ``modegraph.zipio.getmode`` to fetch the Unix file mode
+  for a file.
+
+* Added some handy methods to ``modulegraph.modulegraph.ModuleGraph``.
+
+0.10.5
+------
+
+This is a bugfix release
+
+* Don't look at the file extension to determine the file type
+  in modulegraph.find_modules.parse_mf_results, but use the
+  class of the item.
+
+* Issue #13: Improved handing of bad relative imports
+  ("from .foo import bar"), these tended to raise confusing errors and
+  are now handled like any other failed import.
+
+0.10.4
+------
+
+This is a bugfix release
+
+* There were no 'classifiers' in the package metadata due to a bug
+  in setup.py.
+
+0.10.3
+------
+
+This is a bugfix release
+
+Bugfixes
+........
+
+* ``modulegraph.find.modules.parse_mf_results`` failed when the main script of
+  a py2app module didn't have a file name ending in '.py'.
+
+0.10.2
+------
+
+This is a bugfix release
+
+Bugfixes
+........
+
+* Issue #12: modulegraph would sometimes find the wrong package *__init__*
+  module due to using the wrong search method. One easy way to reproduce the
+  problem was to have a toplevel module named *__init__*.
+
+  Reported by Kentzo.
+
+0.10.1
+------
+
+This is a bugfix release
+
+Bugfixes
+........
+
+* Issue #11: creating xrefs and dotty graphs from modulegraphs (the --xref
+  and --graph options of py2app) didn't work with python 3 due to use of
+  APIs that aren't available in that version of python.
+
+  Reported by Andrew Barnert.
+
+
+0.10
+----
+
+This is a minor feature release
+
+Features
+........
+
+* ``modulegraph.find_modules.find_needed_modules`` claimed to automaticly
+  include subpackages for the "packages" argument as well, but that code
+  didn't work at all.
+
+* Issue #9: The modulegraph script is deprecated, use
+  "python -mmodulegraph" instead.
+
+* Issue #10: Ensure that the result of "zipio.open" can be used
+  in a with statement (that is, ``with zipio.open(...) as fp``.
+
+* No longer use "2to3" to support Python 3.
+
+  Because of this modulegraph now supports Python 2.6
+  and later.
+
+* Slightly improved HTML output, which makes it easier
+  to manipulate the generated HTML using JavaScript.
+
+  Patch by anatoly techtonik.
+
+* Ensure modulegraph works with changes introduced after
+  Python 3.3b1.
+
+* Implement support for PEP 420 ("Implicit namespace packages")
+  in Python 3.3.
+
+* ``modulegraph.util.imp_walk`` is deprecated and will be
+  removed in the next release of this package.
+
+Bugfixes
+........
+
+* The module graph was incomplete, and generated incorrect warnings
+  along the way, when a subpackage contained import statements for
+  submodules.
+
+  An example of this is ``sqlalchemy.util``, the ``__init__.py`` file
+  for this package contains imports of modules in that modules using
+  the classic relative import syntax (that is ``import compat`` to
+  import ``sqlalchemy.util.compat``). Until this release modulegraph
+  searched the wrong path to locate these modules (and hence failed
+  to find them).
+
+
+0.9.2
+-----
+
+This is a bugfix release
+
+Bugfixes
+........
+
+* The 'packages' option to modulegraph.find_modules.find_modules ignored
+  the search path argument but always used the default search path.
+
+* The 'imp_find_modules' function in modulegraph.util has an argument 'path',
+  this was a string in previous release and can now also be a sequence.
+
+* Don't crash when a module on the 'includes' list doesn't exist, but warn
+  just like for missing 'packages' (modulegraph.find_modules.find_modules)
+
+0.9.1
+-----
+
+This is a bugfix release
+
+Bug fixes
+.........
+
+- Fixed the name of nodes imports in packages where the first element of
+  a dotted name can be found but the rest cannot. This used to create
+  a MissingModule node for the dotted name in the global namespace instead
+  of relative to the package.
+
+  That is, given a package "pkg" with submodule "sub" if the "__init__.py"
+  of "pkg" contains "import sub.nomod" we now create a MissingModule node
+  for "pkg.sub.nomod" instead of "sub.nomod".
+
+  This fixes an issue with including the crcmod package in application
+  bundles, first reported on the pythonmac-sig mailinglist by
+  Brendan Simon.
+
+0.9
+---
+
+This is a minor feature release
+
+
+Features:
+
+- Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+  and can be viewed at <http://packages.python.org/modulegraph>.
+
+  The documention is very rough at this moment and in need of reorganisation and
+  language cleanup. I've basiclly writting the current version by reading the code
+  and documenting what it does, the order in which classes and methods are document
+  is therefore not necessarily the most useful.
+
+- The repository has moved to bitbucket
+
+- Renamed ``modulegraph.modulegraph.AddPackagePath`` to ``addPackagePath``,
+  likewise ``ReplacePackage`` is now ``replacePackage``. The old name is still
+  available, but is deprecated and will be removed before the 1.0 release.
+
+- ``modulegraph.modulegraph`` contains two node types that are unused and
+  have unclear semantics: ``FlatPackage`` and ``ArchiveModule``. These node
+  types are deprecated and will be removed before 1.0 is released.
+
+- Added a simple commandline tool (``modulegraph``) that will print information
+  about the dependency graph of a script.
+
+- Added a module (``zipio``) for dealing with paths that may refer to entries
+  inside zipfiles (such as source paths referring to modules in zipped eggfiles).
+
+  With this addition ``modulegraph.modulegraph.os_listdir`` is deprecated and
+  it will be removed before the 1.0 release.
+
+Bug fixes:
+
+- The ``__cmp__`` method of a Node no longer causes an exception
+  when the compared-to object is not a Node. Patch by Ivan Kozik.
+
+- Issue #1: The initialiser for ``modulegraph.ModuleGraph`` caused an exception
+  when an entry on the path (``sys.path``) doesn't actually exist.
+
+  Fix by "skurylo", testcase by Ronald.
+
+- The code no longer worked with python 2.5, this release fixes that.
+
+- Due to the switch to mercurial setuptools will no longer include
+  all required files. Fixed by adding a MANIFEST.in file
+
+- The method for printing a ``.dot`` representation of a ``ModuleGraph``
+  works again.
+
+
+0.8.1
+-----
+
+This is a minor feature release
+
+Features:
+
+- ``from __future__ import absolute_import`` is now supported
+
+- Relative imports (``from . import module``) are now supported
+
+- Add support for namespace packages when those are installed
+  using option ``--single-version-externally-managed`` (part
+  of setuptools/distribute)
+
+0.8
+---
+
+This is a minor feature release
+
+Features:
+
+- Initial support for Python 3.x
+
+- It is now possible to run the test suite
+  using ``python setup.py test``.
+
+  (The actual test suite is still fairly minimal though)
diff --git a/catapult/telemetry/third_party/modulegraph/doc/commandline.rst b/catapult/telemetry/third_party/modulegraph/doc/commandline.rst
new file mode 100644
index 0000000..b5a8df5
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/commandline.rst
@@ -0,0 +1,31 @@
+Commandline tools
+=================
+
+The package can be used as a script using "python -mmodulegraph".
+
+This script calculates the module graph for the scripts passed
+on the commandline and by default prints a list of modules
+in the objectgraph, and their type and location.
+
+The script has a number of options to change the output:
+
+* ``-d``: Increase the debug level
+
+* ``-q``: Clear the debug level (emit minimal output)
+
+* ``-m``: The arguments are module names instead of script files
+
+* ``-x name``: Add ``name`` to the list of excludes
+
+* ``-p path``: Add ``path`` to the module search path
+
+* ``-g``: Emit a ``.dot`` file instead of a list of modules
+
+* ``-h``: Emit a ``.html`` file instead of a list of modules
+
+Deprecation warning
+-------------------
+
+The package also installs a command-line tool named "modulegraph",
+this command-line tool is deprecated and will be removed in a
+future version.
diff --git a/catapult/telemetry/third_party/modulegraph/doc/conf.py b/catapult/telemetry/third_party/modulegraph/doc/conf.py
new file mode 100644
index 0000000..76d7b80
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/conf.py
@@ -0,0 +1,219 @@
+# -*- coding: utf-8 -*-
+#
+# modulegraph documentation build configuration file, created by
+# sphinx-quickstart on Tue Sep 28 21:04:40 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+def get_version():
+    fn = os.path.join(
+        os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
+        'setup.cfg')
+    for ln in open(fn):
+        if ln.startswith('version'):
+            version = ln.split('=')[-1].strip()
+            return version
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+sys.path.insert(0,
+    os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig']
+
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'modulegraph'
+copyright = u'2010, Ronald Oussoren'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = get_version()
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+#html_theme = 'default'
+html_theme = 'nature'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'modulegraphdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'modulegraph.tex', u'modulegraph Documentation',
+   u'Ronald Oussoren', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {
+        'python': ('http://docs.python.org/', None),
+        'altgraph': ('http://packages.python.org/altgraph', None),
+}
+
+todo_include_todos = True
diff --git a/catapult/telemetry/third_party/modulegraph/doc/find_modules.rst b/catapult/telemetry/third_party/modulegraph/doc/find_modules.rst
new file mode 100644
index 0000000..48f0f97
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/find_modules.rst
@@ -0,0 +1,58 @@
+:mod:`modulegraph.find_modules` --- High-level module dependency finding interface
+==================================================================================
+
+.. module:: modulegraph.find_modules
+   :synopsis: High-level module dependency finding interface
+
+This module provides a high-level interface to the functionality of 
+the modulegraph package.
+
+
+.. function:: find_modules([scripts[, includes[, packages[, excludes[, path[, debug]]]]]])
+
+   High-level interface, takes iterables for: scripts, includes, packages, excludes
+
+   And returns a :class:`modulegraph.modulegraph.ModuleGraph` instance, 
+   python_files, and extensions
+
+   python_files is a list of pure python dependencies as modulegraph.Module objects,
+
+   extensions is a list of platform-specific C extension dependencies as modulegraph.Module objects
+
+
+.. function:: parse_mf_results(mf)
+
+   Return two lists: the first one contains the python files in the graph,
+   the second the C extensions.
+        
+   :param mf: a :class:`modulegraph.modulegraph.ModuleGraph` instance
+
+
+Lower-level functionality
+-------------------------
+
+The functionality in this section is much lower level and should probably
+not be used. It's mostly documented as a convenience for maintainers.
+
+
+.. function:: get_implies()
+
+   Return a mapping of implied dependencies. The key is a, possibly dotted,
+   module name and the value a list of dependencies.
+
+   This contains hardcoded list of hard dependencies, for example for C
+   extensions in the standard libary that perform imports in C code, which
+   the generic dependency finder cannot locate.
+
+.. function:: plat_prepare(includes, packages, excludes)
+
+   Updates the lists of includes, packages and excludes for the current
+   platform. This will add items to these lists based on hardcoded platform
+   information.
+
+.. function:: find_needed_modules([mf[, scripts[, includes[, packages[, warn]]]]])
+
+   Feeds the given :class:`ModuleGraph <modulegraph.ModuleGraph>`  with
+   the *scripts*, *includes* and *packages* and returns the resulting
+   graph. This function will create a new graph when *mf* is not specified
+   or ``None``.
diff --git a/catapult/telemetry/third_party/modulegraph/doc/index.rst b/catapult/telemetry/third_party/modulegraph/doc/index.rst
new file mode 100644
index 0000000..534b4d3
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/index.rst
@@ -0,0 +1,42 @@
+.. modulegraph documentation master file, created by
+   sphinx-quickstart on Tue Sep 28 21:04:40 2010.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Modulegraph - Python module dependency graph
+============================================
+
+modulegraph determines a dependency graph between Python modules primarily
+by bytecode analysis for import statements.
+
+modulegraph uses similar methods to :mod:`modulefinder` from the standard library,
+but uses a more flexible internal representation, has more extensive 
+knowledge of special cases, and is extensible.
+
+Contents:
+
+.. toctree::
+   :maxdepth: 1
+
+   changelog
+   license
+   commandline
+   modulegraph
+   find_modules
+   util
+   zipio
+
+Online Resources
+----------------
+
+* `Sourcecode repository on bitbucket <http://bitbucket.org/ronaldoussoren/modulegraph/>`_
+
+* `The issue tracker <http://bitbucket.org/ronaldoussoren/modulegraph/issues>`_
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/catapult/telemetry/third_party/modulegraph/doc/license.rst b/catapult/telemetry/third_party/modulegraph/doc/license.rst
new file mode 100644
index 0000000..f9c8cc5
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/license.rst
@@ -0,0 +1,23 @@
+License
+=======
+
+Copyright (c) Bob Ippolito
+
+Parts are copyright (c) 2010-2014 Ronald Oussoren
+
+MIT License
+...........
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+and associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
+so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/catapult/telemetry/third_party/modulegraph/doc/modulegraph.rst b/catapult/telemetry/third_party/modulegraph/doc/modulegraph.rst
new file mode 100644
index 0000000..60566e7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/modulegraph.rst
@@ -0,0 +1,531 @@
+:mod:`modulegraph.modulegraph` --- Find modules used by a script
+================================================================
+
+.. module:: modulegraph.modulegraph
+   :synopsis: Find modules used by a script
+
+This module defines :class:`ModuleGraph`, which is used to find
+the dependencies of scripts using bytecode analysis.
+
+A number of APIs in this module refer to filesystem path. Those paths can refer to
+files inside zipfiles (for example when there are zipped egg files on :data:`sys.path`).
+Filenames referring to entries in a zipfile are not marked any way, if ``"somepath.zip"``
+refers to a zipfile, that is ``"somepath.zip/embedded/file"`` will be used to refer to
+``embedded/file`` inside the zipfile.
+
+The actual graph
+----------------
+
+.. class:: ModuleGraph([path[, excludes[, replace_paths[, implies[, graph[, debug]]]]]])
+
+   Create a new ModuleGraph object. Use the :meth:`run_script` method to add scripts,
+   and their dependencies to the graph.
+
+   :param path: Python search path to use, defaults to :data:`sys.path`
+   :param excludes: Iterable with module names that should not be included as a dependency
+   :param replace_paths: List of pathname rewrites ``(old, new)``. When this argument is
+     supplied the ``co_filename`` attributes of code objects get rewritten before scanning
+     them for dependencies.
+   :param implies: Implied module dependencies, a mapping from a module name to the list
+     of modules it depends on. Use this to tell modulegraph about dependencies that cannot
+     be found by code inspection (such as imports from C code or using the :func:`__import__`
+     function).
+   :param graph: A precreated :class:`Graph <altgraph.Graph.Graph>` object to use, the
+     default is to create a new one.
+   :param debug: The :class:`ObjectGraph <altgraph.ObjectGraph.ObjectGraph>` debug level.
+
+
+.. method:: run_script(pathname[, caller])
+
+   Create, and return,  a node by path (not module name). The *pathname* should
+   refer to a Python source file and will be scanned for dependencies.
+
+   The optional argument *caller* is the the node that calls this script,
+   and is used to add a reference in the graph.
+
+.. method:: import_hook(name[[, caller[, fromlist[, level, [, attr]]]])
+
+   Import a module and analyse its dependencies
+
+   :arg name:     The module name
+   :arg caller:   The node that caused the import to happen
+   :arg fromlist: The list of names to import, this is an empty list for
+      ``import name`` and a list of names for ``from name import a, b, c``.
+   :arg level:    The import level. The value should be ``-1`` for classical Python 2
+     imports, ``0`` for absolute imports and a positive number for relative imports (
+     where the value is the number of leading dots in the imported name).
+   :arg attr:     Attributes for the graph edge.
+
+
+.. method:: implyNodeReference(node, other, edgeData=None)
+
+   Explictly mark that *node* depends on *other*. Other is either
+   a :class:`node <Node>` or the name of a module that will be
+   searched for as if it were an absolute import.
+
+
+.. method:: createReference(fromnode, tonode[, edge_data])
+
+   Create a reference from *fromnode* to *tonode*, with optional edge data.
+
+   The default for *edge_data* is ``"direct"``.
+
+.. method:: getReferences(fromnode)
+
+   Yield all nodes that *fromnode* refers to. That is, all modules imported
+   by *fromnode*.
+
+   Node :data:`None` is the root of the graph, and refers to all notes that were
+   explicitly imported by :meth:`run_script` or :meth:`import_hook`, unless you use
+   an explicit parent with those methods.
+
+   .. versionadded:: 0.11
+
+.. method:: getReferers(tonode, collapse_missing_modules=True)
+
+   Yield all nodes that refer to *tonode*. That is, all modules that import
+   *tonode*.
+
+   If *collapse_missing_modules* is false this includes refererences from
+   :class:`MissingModule` nodes, otherwise :class:`MissingModule` nodes
+   are replaced by the "real" nodes that reference this missing node.
+
+   .. versionadded:: 0.12
+
+.. method:: foldReferences(pkgnode)
+
+   Hide all submodule nodes for package *pkgnode* and add ingoing and outgoing
+   edges to *pkgnode* based on the edges from the submodule nodes.
+
+   This can be used to simplify a module graph: after folding 'email' all
+   references to modules in the 'email' package are references to the package.
+
+   .. versionadded: 0.11
+
+.. method:: findNode(name)
+
+   Find a node by identifier.  If a node by that identifier exists, it will be returned.
+
+   If a lazy node exists by that identifier with no dependencies (excluded), it will be
+   instantiated and returned.
+
+   If a lazy node exists by that identifier with dependencies, it and its
+   dependencies will be instantiated and scanned for additional depende
+
+
+
+.. method:: create_xref([out])
+
+   Write an HTML file to the *out* stream (defaulting to :data:`sys.stdout`).
+
+   The HTML file contains a textual description of the dependency graph.
+
+
+
+.. method:: graphreport([fileobj[, flatpackages]])
+
+   .. todo:: To be documented
+
+
+
+.. method:: report()
+
+   Print a report to stdout, listing the found modules with their
+   paths, as well as modules that are missing, or seem to be missing.
+
+
+Mostly internal methods
+.......................
+
+The methods in this section should be considered as methods for subclassing at best,
+please let us know if you need these methods in your code as they are on track to be
+made private methods before the 1.0 release.
+
+.. warning:: The methods in this section will be refactored in a future release,
+   the current architecture makes it unnecessarily hard to write proper tests.
+
+.. method:: determine_parent(caller)
+
+   Returns the node of the package root voor *caller*. If *caller* is a package
+   this is the node itself, if the node is a module in a package this is the
+   node of for the package and otherwise the *caller* is not a package and
+   the result is :data:`None`.
+
+.. method:: find_head_package(parent, name[, level])
+
+   .. todo:: To be documented
+
+
+.. method:: load_tail(mod, tail)
+
+   This method is called to load the rest of a dotted name after loading the root
+   of a package. This will import all intermediate modules as well (using
+   :meth:`import_module`), and returns the module :class:`node <Node>` for the
+   requested node.
+
+   .. note:: When *tail* is empty this will just return *mod*.
+
+   :arg mod:   A start module (instance of :class:`Node`)
+   :arg tail:  The rest of a dotted name, can be empty
+   :raise ImportError: When the requested (or one of its parents) module cannot be found
+   :returns: the requested module
+
+
+
+.. method:: ensure_fromlist(m, fromlist)
+
+   Yield all submodules that would be imported when importing *fromlist*
+   from *m* (using ``from m import fromlist...``).
+
+   *m* must be a package and not a regular module.
+
+.. method:: find_all_submodules(m)
+
+   Yield the filenames for submodules of in the same package as *m*.
+
+
+
+.. method:: import_module(partname, fqname, parent)
+
+   Perform import of the module with basename *partname* (``path``) and
+   full name *fqname* (``os.path``). Import is performed by *parent*.
+
+   This will create a reference from the parent node to the
+   module node and will load the module node when it is not already
+   loaded.
+
+
+
+.. method:: load_module(fqname, fp, pathname, (suffix, mode, type))
+
+   Load the module named *fqname* from the given *pathame*. The
+   argument *fp* is either :data:`None`, or a stream where the
+   code for the Python module can be loaded (either byte-code or
+   the source code). The *(suffix, mode, type)* tuple are the
+   suffix of the source file, the open mode for the file and the
+   type of module.
+
+   Creates a node of the right class and processes the dependencies
+   of the :class:`node <Node>` by scanning the byte-code for the node.
+
+   Returns the resulting :class:`node <Node>`.
+
+
+
+.. method:: scan_code(code, m)
+
+   Scan the *code* object for module *m* and update the dependencies of
+   *m* using the import statemets found in the code.
+
+   This will automaticly scan the code for nested functions, generator
+   expressions and list comprehensions as well.
+
+
+
+.. method:: load_package(fqname, pathname)
+
+   Load a package directory.
+
+
+
+.. method:: find_module(name, path[, parent])
+
+   Locates a module named *name* that is not yet part of the
+   graph. This method will raise :exc:`ImportError` when
+   the module cannot be found or when it is already part
+   of the graph. The *name* can not be a dotted name.
+
+   The *path* is the search path used, or :data:`None` to
+   use the default path.
+
+   When the *parent* is specified *name* refers to a
+   subpackage of *parent*, and *path* should be the
+   search path of the parent.
+
+   Returns the result of the global function
+   :func:`find_module <modulegraph.modulegraph.find_module>`.
+
+
+.. method:: itergraphreport([name[, flatpackages]])
+
+   .. todo:: To be documented
+
+
+
+.. method:: replace_paths_in_code(co)
+
+   Replace the filenames in code object *co* using the *replace_paths* value that
+   was passed to the contructor. Returns the rewritten code object.
+
+
+
+.. method:: calc_setuptools_nspackages()
+
+   Returns a mapping from package name to a list of paths where that package
+   can be found in ``--single-version-externally-managed`` form.
+
+   This method is used to be able to find those packages: these use
+   a magic ``.pth`` file to ensure that the package is added to :data:`sys.path`,
+   as they do not contain an ``___init__.py`` file.
+
+   Packages in this form are used by system packages and the "pip"
+   installer.
+
+
+Graph nodes
+-----------
+
+The :class:`ModuleGraph` contains nodes that represent the various types of modules.
+
+.. class:: Alias(value)
+
+   This is a subclass of string that is used to mark module aliases.
+
+
+
+.. class:: Node(identifier)
+
+   Base class for nodes, which provides the common functionality.
+
+   Nodes can by used as mappings for storing arbitrary data in the node.
+
+   Nodes are compared by comparing their *identifier*.
+
+.. data:: debug
+
+   Debug level (integer)
+
+.. data:: graphident
+
+   The node identifier, this is the value of the *identifier* argument
+   to the constructor.
+
+.. data:: identifier
+
+   The node identifier, this is the value of the *identifier* argument
+   to the constructor.
+
+.. data:: filename
+
+   The filename associated with this node.
+
+.. data:: packagepath
+
+   The value of ``__path__`` for this node.
+
+.. data:: code
+
+   The :class:`code object <types.CodeObject>` associated with this node
+
+.. data:: globalnames
+
+   The set of global names that are assigned to in this module. This
+   includes those names imported through startimports of Python modules.
+
+.. data:: startimports
+
+   The set of startimports this module did that could not be resolved,
+   ie. a startimport from a non-Python module.
+
+
+.. method:: __contains__(name)
+
+   Return if there is a value associated with *name*.
+
+   This method is usually accessed as ``name in aNode``.
+
+.. method:: __setitem__(name, value)
+
+   Set the value of *name* to *value*.
+
+   This method is usually accessed as ``aNode[name] = value``.
+
+.. method:: __getitem__(name)
+
+   Returns the value of *name*, raises :exc:`KeyError` when
+   it cannot be found.
+
+   This method is usually accessed as ``value = aNode[name]``.
+
+.. method:: get(name[, default])
+
+   Returns the value of *name*, or the default value when it
+   cannot be found. The *default* is :data:`None` when not specified.
+
+.. method:: infoTuple()
+
+   Returns a tuple with information used in the :func:`repr`
+   output for the node. Subclasses can add additional informations
+   to the result.
+
+
+.. class:: AliasNode (name, node)
+
+   A node that represents an alias from a name to another node.
+
+   The value of attribute *graphident* for this node will be the
+   value of *name*, the other :class:`Node` attributed are
+   references to those attributed in *node*.
+
+.. class:: BadModule(identifier)
+
+   Base class for nodes that should be ignored for some reason
+
+.. class:: ExcludedModule(identifier)
+
+   A module that is explicitly excluded.
+
+.. class:: MissingModule(identifier)
+
+   A module that is imported but cannot be located.
+
+
+
+.. class:: Script(filename)
+
+   A python script.
+
+   .. data:: filename
+
+      The filename for the script
+
+.. class:: BaseModule(name[, filename[, path]])
+
+    The base class for actual modules. The *name* is
+    the possibly dotted module name, *filename* is the
+    filesystem path to the module and *path* is the
+    value of ``__path__`` for the module.
+
+.. data:: graphident
+
+   The name of the module
+
+.. data:: filename
+
+   The filesystem path to the module.
+
+.. data:: path
+
+   The value of ``__path__`` for this module.
+
+.. class:: BuiltinModule(name)
+
+   A built-in module (on in :data:`sys.builtin_module_names`).
+
+.. class:: SourceModule(name)
+
+   A module for which the python source code is available.
+
+.. class:: InvalidSourceModule(name)
+
+   A module for which the python source code is available, but where
+   that source code cannot be compiled (due to syntax errors).
+
+   This is a subclass of :class:`SourceModule`.
+
+   .. versionadded:: 0.12
+
+.. class:: CompiledModule(name)
+
+   A module for which only byte-code is available.
+
+.. class:: Package(name)
+
+   Represents a python package
+
+.. class:: NamespacePackage(name)
+
+   Represents a python namespace package.
+
+   This is a subclass of :class:`Package`.
+
+.. class:: Extension(name)
+
+   A native extension
+
+
+.. warning:: A number of other node types are defined in the module. Those modules aren't
+   used by modulegraph and will be removed in a future version.
+
+
+Edge data
+---------
+
+The edges in a module graph by default contain information about the edge, represented
+by an instance of :class:`DependencyInfo`.
+
+.. class:: DependencyInfo(conditional, function, tryexcept, fromlist)
+
+   This class is a :func:`namedtuple <collections.namedtuple>` for representing
+   the information on a dependency between two modules.
+
+   All attributes can be used to deduce if a dependency is essential or not, and
+   are particularly useful when reporting on missing modules (dependencies on
+   :class:`MissingModule`).
+
+   .. data:: fromlist
+
+      A boolean that is true iff the target of the edge is named in the "import"
+      list of a "from" import ("from package import module").
+
+      When the target module is imported multiple times this attribute is false
+      unless all imports are in "import" list of a "from" import.
+
+   .. data:: function
+
+      A boolean that is true iff the import is done inside a function definition,
+      and is false for imports in module scope (or class scope for classes that
+      aren't definined in a function).
+
+   .. data:: tryexcept
+
+      A boolean that is true iff the import that is done in the "try" or "except"
+      block of a try statement (but not in the "else" block).
+
+   .. data:: conditional
+
+      A boolean that is true iff the import is done in either block of an "if"
+      statement.
+
+   When the target of the edge is imported multiple times the :data:`function`,
+   :data:`tryexcept` and :data:`conditional` attributes of all imports are
+   merged: when there is an import where all these attributes are false the
+   attributes are false, otherwise each attribute is set to true if it is
+   true for at least one of the imports.
+
+   For example, when a module is imported both in a try-except statement and
+   furthermore is imported in a function (in two separate statements),
+   both :data:`tryexcept` and :data:`function` will be true.  But if there
+   is a third unconditional toplevel import for that module as well all
+   three attributes are false.
+
+   .. warning::
+
+      All attributes but :data:`fromlist` will be false when the source of
+      a dependency is scanned from a byte-compiled module instead of a python
+      source file. The :data:`fromlist` attribute will stil be set correctly.
+
+Utility functions
+-----------------
+
+.. function:: find_module(name[, path])
+
+   A version of :func:`imp.find_module` that works with zipped packages (and other
+   :pep:`302` importers).
+
+.. function:: moduleInfoForPath(path)
+
+   Return the module name, readmode and type for the file at *path*, or
+   None if it doesn't seem to be a valid module (based on its name).
+
+.. function:: addPackagePath(packagename, path)
+
+   Add *path* to the value of ``__path__`` for the package named *packagename*.
+
+.. function:: replacePackage(oldname, newname)
+
+   Rename *oldname* to *newname* when it is found by the module finder. This
+   is used as a workaround for the hack that the ``_xmlplus`` package uses
+   to inject itself in the ``xml`` namespace.
+
+
diff --git a/catapult/telemetry/third_party/modulegraph/doc/util.rst b/catapult/telemetry/third_party/modulegraph/doc/util.rst
new file mode 100644
index 0000000..86427aa
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/util.rst
@@ -0,0 +1,31 @@
+:mod:`modulegraph.util` --- Utilies functions
+=============================================
+
+.. module:: modulegraph.util
+   :synopsis: Utilitie functions
+
+
+.. function:: imp_find_module(name, path=None)
+
+   This function has the same interface as
+   :func:`imp.find_module`, but also works with
+   dotted names.
+
+.. function:: imp_walk(name)
+
+   yields the namepart and importer information
+   for every part of a dotted module name, and
+   raises :exc:`ImportError` when the *name*
+   cannot be found.
+
+   The result elements are tuples with two
+   elements, the first is a module name,
+   the second is the result for :func:`imp.find_module`
+   for that module (taking into account :pep:`302`
+   importers)
+
+   .. deprecated:: 0.10
+
+.. function:: guess_encoding(fp)
+
+   Returns the encoding of a python source file.
diff --git a/catapult/telemetry/third_party/modulegraph/doc/zipio.rst b/catapult/telemetry/third_party/modulegraph/doc/zipio.rst
new file mode 100644
index 0000000..dd227b8
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/doc/zipio.rst
@@ -0,0 +1,68 @@
+:mod:`modulegraph.zipio` --- Read-only filesystem access
+========================================================
+
+.. module:: modulegraph.zipio
+   :synopsis: Read-only filesystem access with ZIP support
+
+This module contains a number of functions that mirror functions found
+in :mod:`os` and :mod:`os.path`, but have support for data inside
+zipfiles as well as regular filesystem objects.
+
+The *path* argument of all functions below can refer to an object
+on the filesystem, but can also refer to an entry inside a zipfile. In
+the latter case, a prefix of *path* will be the name of zipfile while
+the rest refers to an object in that zipfile. As an example, when
+``somepath/mydata.zip`` is a zipfile the path ``somepath/mydata.zip/somefile.txt``
+will refer to ``somefile.txt`` inside the zipfile.
+
+.. function:: open(path[, mode])
+
+   Open a file, like :func:`the built-in open function <__builtin__.open>`.
+
+   The *mode* defaults to ``"r"`` and must be either ``"r"`` or ``"rb"``.
+
+.. function:: listdir(path)
+
+   List the contents of a directory, like :func:`os.listdir`.
+
+
+.. function:: isfile(path)
+
+   Returns true if *path* exists and refers to a file.
+
+   Raises IOError when *path* doesn't exist at all.
+
+   Based on :func:`os.path.isfile`
+
+
+.. function:: isdir(path)
+
+   Returns true if *path* exists and refers to a directory.
+
+   Raises IOError when *path* doesn't exist at all.
+
+   Based on :func:`os.path.isdir`
+
+
+.. function:: islink(path)
+
+   Returns true if *path* exists and refers to a symbolic link.
+
+   Raises IOError when *path* doesn't exist at all.
+
+   Based on :func:`os.path.islink`
+
+
+.. function:: readlink(path)
+
+   Returns the contents of a symbolic link, like :func:`os.readlink`.
+
+.. function:: getmtime(path)
+
+   Returns the last modifiction time of a file or directory, like
+   :func:`os.path.getmtime`.
+
+.. function:: getmode(path)
+
+   Returns the UNIX file mode for a file or directory, like the
+   *st_mode* attribute in the result of :func:`os.stat`.
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/PKG-INFO
new file mode 100644
index 0000000..bfd4006
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/PKG-INFO
@@ -0,0 +1,337 @@
+Metadata-Version: 1.1
+Name: modulegraph
+Version: 0.12.1
+Summary: Python module dependency analysis tool
+Home-page: http://bitbucket.org/ronaldoussoren/modulegraph
+Author: Ronald Oussoren
+Author-email: ronaldoussoren@mac.com
+License: MIT
+Download-URL: http://pypi.python.org/pypi/modulegraph
+Description: modulegraph determines a dependency graph between Python modules primarily
+        by bytecode analysis for import statements.
+        
+        modulegraph uses similar methods to modulefinder from the standard library,
+        but uses a more flexible internal representation, has more extensive 
+        knowledge of special cases, and is extensible.
+        
+        
+        Release history
+        ===============
+        
+        0.12.1
+        ------
+        
+        * Issue #25: Complex python files could cause an "maximum recursion depth exceeded"
+          exception due to using stack-based recursion to walk the module AST.
+        
+        
+        0.12
+        ----
+        
+        * Added 'modulegraph.modulegraph.InvalidSourceModule'. This graph node is
+          used for Python source modules that cannot be compiled (for example because
+          they contain syntax errors).
+        
+          This is primarily useful for being able to create a graph for packages
+          that have python 2.x or python 3.x compatibility in separate modules that
+          contain code that isn't valid in the "other" python version.
+        
+        * Added 'modulegraph.modulegraph.InvalidCompiledModule'. This graph node
+          is used for Python bytecode modules that cannot be loaded.
+        
+        * Added 'modulegraph.modulegraph.NamespacePackage'.
+        
+          Patch by bitbucket user htgoebel.
+        
+        * No longer add a MissingModule node to the graph for 'collections.defaultdict'
+          when using 'from collections import defaultdict' ('collections.defaultdict'
+          is an attribute of 'collections', not a submodule).
+        
+        * Fixed typo in ModuleGraph.getReferences()
+        
+        * Added ModuleGraph.getReferers(tonode). This methods yields the
+          nodes that are referencing *tonode* (the reverse of getReferences)
+        
+        * The graph will no longer contain MissingModule nodes when using 'from ... import name' to
+          import a global variable in a python module.
+        
+          There will still be MissingModule nodes for global variables in C extentions, and
+          for 'from missing import name' when 'missing' is itself a MissingModule.
+        
+        * Issue #18: Don't assume that a PEP 302 loader object has a ``path`` attribute. That
+          attribute is not documented and is not always present.
+        
+        0.11.2
+        ------
+        
+        *
+        
+        0.11.1
+        ------
+        
+        * Issue #145: Don't exclude the platform specific 'path' modules (like ntpath)
+        
+        0.11
+        ----
+        
+        This is a feature release
+        
+        Features
+        ........
+        
+        * Hardcode knowlegde about the compatibility aliases in the email
+          module (for python 2.5 upto 3.0).
+        
+          This makes it possible to remove a heavy-handed recipe from py2app.
+        
+        * Added ``modegraph.zipio.getmode`` to fetch the Unix file mode
+          for a file.
+        
+        * Added some handy methods to ``modulegraph.modulegraph.ModuleGraph``.
+        
+        0.10.5
+        ------
+        
+        This is a bugfix release
+        
+        * Don't look at the file extension to determine the file type
+          in modulegraph.find_modules.parse_mf_results, but use the
+          class of the item.
+        
+        * Issue #13: Improved handing of bad relative imports
+          ("from .foo import bar"), these tended to raise confusing errors and
+          are now handled like any other failed import.
+        
+        0.10.4
+        ------
+        
+        This is a bugfix release
+        
+        * There were no 'classifiers' in the package metadata due to a bug
+          in setup.py.
+        
+        0.10.3
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * ``modulegraph.find.modules.parse_mf_results`` failed when the main script of
+          a py2app module didn't have a file name ending in '.py'.
+        
+        0.10.2
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * Issue #12: modulegraph would sometimes find the wrong package *__init__*
+          module due to using the wrong search method. One easy way to reproduce the
+          problem was to have a toplevel module named *__init__*.
+        
+          Reported by Kentzo.
+        
+        0.10.1
+        ------
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * Issue #11: creating xrefs and dotty graphs from modulegraphs (the --xref
+          and --graph options of py2app) didn't work with python 3 due to use of
+          APIs that aren't available in that version of python.
+        
+          Reported by Andrew Barnert.
+        
+        
+        0.10
+        ----
+        
+        This is a minor feature release
+        
+        Features
+        ........
+        
+        * ``modulegraph.find_modules.find_needed_modules`` claimed to automaticly
+          include subpackages for the "packages" argument as well, but that code
+          didn't work at all.
+        
+        * Issue #9: The modulegraph script is deprecated, use
+          "python -mmodulegraph" instead.
+        
+        * Issue #10: Ensure that the result of "zipio.open" can be used
+          in a with statement (that is, ``with zipio.open(...) as fp``.
+        
+        * No longer use "2to3" to support Python 3.
+        
+          Because of this modulegraph now supports Python 2.6
+          and later.
+        
+        * Slightly improved HTML output, which makes it easier
+          to manipulate the generated HTML using JavaScript.
+        
+          Patch by anatoly techtonik.
+        
+        * Ensure modulegraph works with changes introduced after
+          Python 3.3b1.
+        
+        * Implement support for PEP 420 ("Implicit namespace packages")
+          in Python 3.3.
+        
+        * ``modulegraph.util.imp_walk`` is deprecated and will be
+          removed in the next release of this package.
+        
+        Bugfixes
+        ........
+        
+        * The module graph was incomplete, and generated incorrect warnings
+          along the way, when a subpackage contained import statements for
+          submodules.
+        
+          An example of this is ``sqlalchemy.util``, the ``__init__.py`` file
+          for this package contains imports of modules in that modules using
+          the classic relative import syntax (that is ``import compat`` to
+          import ``sqlalchemy.util.compat``). Until this release modulegraph
+          searched the wrong path to locate these modules (and hence failed
+          to find them).
+        
+        
+        0.9.2
+        -----
+        
+        This is a bugfix release
+        
+        Bugfixes
+        ........
+        
+        * The 'packages' option to modulegraph.find_modules.find_modules ignored
+          the search path argument but always used the default search path.
+        
+        * The 'imp_find_modules' function in modulegraph.util has an argument 'path',
+          this was a string in previous release and can now also be a sequence.
+        
+        * Don't crash when a module on the 'includes' list doesn't exist, but warn
+          just like for missing 'packages' (modulegraph.find_modules.find_modules)
+        
+        0.9.1
+        -----
+        
+        This is a bugfix release
+        
+        Bug fixes
+        .........
+        
+        - Fixed the name of nodes imports in packages where the first element of
+          a dotted name can be found but the rest cannot. This used to create
+          a MissingModule node for the dotted name in the global namespace instead
+          of relative to the package.
+        
+          That is, given a package "pkg" with submodule "sub" if the "__init__.py"
+          of "pkg" contains "import sub.nomod" we now create a MissingModule node
+          for "pkg.sub.nomod" instead of "sub.nomod".
+        
+          This fixes an issue with including the crcmod package in application
+          bundles, first reported on the pythonmac-sig mailinglist by
+          Brendan Simon.
+        
+        0.9
+        ---
+        
+        This is a minor feature release
+        
+        
+        Features:
+        
+        - Documentation is now generated using `sphinx <http://pypi.python.org/pypi/sphinx>`_
+          and can be viewed at <http://packages.python.org/modulegraph>.
+        
+          The documention is very rough at this moment and in need of reorganisation and
+          language cleanup. I've basiclly writting the current version by reading the code
+          and documenting what it does, the order in which classes and methods are document
+          is therefore not necessarily the most useful.
+        
+        - The repository has moved to bitbucket
+        
+        - Renamed ``modulegraph.modulegraph.AddPackagePath`` to ``addPackagePath``,
+          likewise ``ReplacePackage`` is now ``replacePackage``. The old name is still
+          available, but is deprecated and will be removed before the 1.0 release.
+        
+        - ``modulegraph.modulegraph`` contains two node types that are unused and
+          have unclear semantics: ``FlatPackage`` and ``ArchiveModule``. These node
+          types are deprecated and will be removed before 1.0 is released.
+        
+        - Added a simple commandline tool (``modulegraph``) that will print information
+          about the dependency graph of a script.
+        
+        - Added a module (``zipio``) for dealing with paths that may refer to entries
+          inside zipfiles (such as source paths referring to modules in zipped eggfiles).
+        
+          With this addition ``modulegraph.modulegraph.os_listdir`` is deprecated and
+          it will be removed before the 1.0 release.
+        
+        Bug fixes:
+        
+        - The ``__cmp__`` method of a Node no longer causes an exception
+          when the compared-to object is not a Node. Patch by Ivan Kozik.
+        
+        - Issue #1: The initialiser for ``modulegraph.ModuleGraph`` caused an exception
+          when an entry on the path (``sys.path``) doesn't actually exist.
+        
+          Fix by "skurylo", testcase by Ronald.
+        
+        - The code no longer worked with python 2.5, this release fixes that.
+        
+        - Due to the switch to mercurial setuptools will no longer include
+          all required files. Fixed by adding a MANIFEST.in file
+        
+        - The method for printing a ``.dot`` representation of a ``ModuleGraph``
+          works again.
+        
+        
+        0.8.1
+        -----
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - ``from __future__ import absolute_import`` is now supported
+        
+        - Relative imports (``from . import module``) are now supported
+        
+        - Add support for namespace packages when those are installed
+          using option ``--single-version-externally-managed`` (part
+          of setuptools/distribute)
+        
+        0.8
+        ---
+        
+        This is a minor feature release
+        
+        Features:
+        
+        - Initial support for Python 3.x
+        
+        - It is now possible to run the test suite
+          using ``python setup.py test``.
+        
+          (The actual test suite is still fairly minimal though)
+        
+Keywords: import,,dependencies
+Platform: any
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Build Tools
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/SOURCES.txt
new file mode 100644
index 0000000..3409094
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/SOURCES.txt
@@ -0,0 +1,280 @@
+MANIFEST.in
+README.txt
+setup.cfg
+setup.py
+doc/Makefile
+doc/changelog.rst
+doc/commandline.rst
+doc/conf.py
+doc/find_modules.rst
+doc/index.rst
+doc/license.rst
+doc/modulegraph.rst
+doc/util.rst
+doc/zipio.rst
+doc/_build/doctrees/changelog.doctree
+doc/_build/doctrees/commandline.doctree
+doc/_build/doctrees/environment.pickle
+doc/_build/doctrees/find_modules.doctree
+doc/_build/doctrees/index.doctree
+doc/_build/doctrees/license.doctree
+doc/_build/doctrees/modulegraph.doctree
+doc/_build/doctrees/util.doctree
+doc/_build/doctrees/zipio.doctree
+doc/_build/html/.buildinfo
+doc/_build/html/changelog.html
+doc/_build/html/commandline.html
+doc/_build/html/find_modules.html
+doc/_build/html/genindex.html
+doc/_build/html/index.html
+doc/_build/html/license.html
+doc/_build/html/modulegraph.html
+doc/_build/html/objects.inv
+doc/_build/html/py-modindex.html
+doc/_build/html/search.html
+doc/_build/html/searchindex.js
+doc/_build/html/util.html
+doc/_build/html/zipio.html
+doc/_build/html/_sources/changelog.txt
+doc/_build/html/_sources/commandline.txt
+doc/_build/html/_sources/find_modules.txt
+doc/_build/html/_sources/index.txt
+doc/_build/html/_sources/license.txt
+doc/_build/html/_sources/modulegraph.txt
+doc/_build/html/_sources/util.txt
+doc/_build/html/_sources/zipio.txt
+doc/_build/html/_static/ajax-loader.gif
+doc/_build/html/_static/basic.css
+doc/_build/html/_static/comment-bright.png
+doc/_build/html/_static/comment-close.png
+doc/_build/html/_static/comment.png
+doc/_build/html/_static/doctools.js
+doc/_build/html/_static/down-pressed.png
+doc/_build/html/_static/down.png
+doc/_build/html/_static/file.png
+doc/_build/html/_static/jquery-1.11.1.js
+doc/_build/html/_static/jquery.js
+doc/_build/html/_static/minus.png
+doc/_build/html/_static/nature.css
+doc/_build/html/_static/plus.png
+doc/_build/html/_static/pygments.css
+doc/_build/html/_static/searchtools.js
+doc/_build/html/_static/underscore-1.3.1.js
+doc/_build/html/_static/underscore.js
+doc/_build/html/_static/up-pressed.png
+doc/_build/html/_static/up.png
+doc/_build/html/_static/websupport.js
+modulegraph/__init__.py
+modulegraph/__main__.py
+modulegraph/_compat.py
+modulegraph/find_modules.py
+modulegraph/modulegraph.py
+modulegraph/util.py
+modulegraph/zipio.py
+modulegraph.egg-info/PKG-INFO
+modulegraph.egg-info/SOURCES.txt
+modulegraph.egg-info/dependency_links.txt
+modulegraph.egg-info/entry_points.txt
+modulegraph.egg-info/requires.txt
+modulegraph.egg-info/top_level.txt
+modulegraph.egg-info/zip-safe
+modulegraph_tests/__init__.py
+modulegraph_tests/test_basic.py
+modulegraph_tests/test_edge_data.py
+modulegraph_tests/test_explicit_packages.py
+modulegraph_tests/test_implies.py
+modulegraph_tests/test_import_from_init.py
+modulegraph_tests/test_imports.py
+modulegraph_tests/test_modulegraph.py
+modulegraph_tests/test_pep420_nspkg.py
+modulegraph_tests/test_pycompat_pkg.py
+modulegraph_tests/test_relimport2.py
+modulegraph_tests/test_setuptools_nspkg.py
+modulegraph_tests/test_util.py
+modulegraph_tests/test_zipio.py
+modulegraph_tests/testdata/script
+modulegraph_tests/testdata/syspath.egg
+modulegraph_tests/testdata/syspath.zip
+modulegraph_tests/testdata/test.egg
+modulegraph_tests/testdata/test.txt
+modulegraph_tests/testdata/zipped.egg
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6-nspkg.pth
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/namedpkg/slave.py
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6-nspkg.pth
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg/parent.py
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5-nspkg.pth
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/namedpkg/slave.py
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5-nspkg.pth
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg/parent.py
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5-nspkg.pth
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/namedpkg/slave.py
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5-nspkg.pth
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg/parent.py
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/src/install.py
+modulegraph_tests/testdata/nspkg/src/child/setup.py
+modulegraph_tests/testdata/nspkg/src/child/namedpkg/__init__.py
+modulegraph_tests/testdata/nspkg/src/child/namedpkg/slave.py
+modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/top_level.txt
+modulegraph_tests/testdata/nspkg/src/parent/setup.py
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg/__init__.py
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg/parent.py
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/PKG-INFO
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/SOURCES.txt
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/dependency_links.txt
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/namespace_packages.txt
+modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/top_level.txt
+modulegraph_tests/testdata/subdir/file1.txt
+modulegraph_tests/testdata/subdir/file2.txt
+modulegraph_tests/testdata/syspath/myext.pyd
+modulegraph_tests/testdata/syspath/mymodule.py
+modulegraph_tests/testdata/syspath/mymodule3.py
+modulegraph_tests/testdata/syspath/mypkg/__init__.py
+modulegraph_tests/testpkg-compatmodule/pkg/__init__.py
+modulegraph_tests/testpkg-compatmodule/pkg/api.py
+modulegraph_tests/testpkg-compatmodule/pkg/api2.py
+modulegraph_tests/testpkg-compatmodule/pkg/api3.py
+modulegraph_tests/testpkg-edgedata/function_class_existing.py
+modulegraph_tests/testpkg-edgedata/function_conditional_existing.py
+modulegraph_tests/testpkg-edgedata/function_conditional_import2_existing.py
+modulegraph_tests/testpkg-edgedata/function_conditional_import_existing.py
+modulegraph_tests/testpkg-edgedata/function_existing.py
+modulegraph_tests/testpkg-edgedata/function_import2_existing.py
+modulegraph_tests/testpkg-edgedata/function_import_existing.py
+modulegraph_tests/testpkg-edgedata/script.py
+modulegraph_tests/testpkg-edgedata/script_from_import.py
+modulegraph_tests/testpkg-edgedata/script_multi_import.py
+modulegraph_tests/testpkg-edgedata/toplevel_class_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_conditional_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_conditional_import2_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_conditional_import_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_import2_existing.py
+modulegraph_tests/testpkg-edgedata/toplevel_import_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/__init__.py
+modulegraph_tests/testpkg-edgedata/pkg/function_class_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_conditional_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import2_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_import2_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/function_import_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_class_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import2_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_import2_existing.py
+modulegraph_tests/testpkg-edgedata/pkg/toplevel_import_existing.py
+modulegraph_tests/testpkg-import-from-init/script.py
+modulegraph_tests/testpkg-import-from-init/pkg/__init__.py
+modulegraph_tests/testpkg-import-from-init/pkg/subpkg/__init__.py
+modulegraph_tests/testpkg-import-from-init/pkg/subpkg/_collections.py
+modulegraph_tests/testpkg-import-from-init/pkg/subpkg/compat.py
+modulegraph_tests/testpkg-import-from-init/pkg2/__init__.py
+modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/__init__.py
+modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/_collections.py
+modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/compat.py
+modulegraph_tests/testpkg-packages/main_script.py
+modulegraph_tests/testpkg-packages/pkg/__init__.py
+modulegraph_tests/testpkg-packages/pkg/sub3.py
+modulegraph_tests/testpkg-packages/pkg/sub1/__init__.py
+modulegraph_tests/testpkg-packages/pkg/sub1/modA.py
+modulegraph_tests/testpkg-packages/pkg/sub2/__init__.py
+modulegraph_tests/testpkg-packages/pkg/sub2/mod.py
+modulegraph_tests/testpkg-pep420-namespace/path1/package/sub2.py
+modulegraph_tests/testpkg-pep420-namespace/path2/package/sub1.py
+modulegraph_tests/testpkg-pep420-namespace/path2/package/nspkg/mod.py
+modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/__init__.py
+modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/sub.py
+modulegraph_tests/testpkg-regr1/main_script.py
+modulegraph_tests/testpkg-regr1/pkg/__init__.py
+modulegraph_tests/testpkg-regr1/pkg/a.py
+modulegraph_tests/testpkg-regr1/pkg/b.py
+modulegraph_tests/testpkg-regr2/main_script.py
+modulegraph_tests/testpkg-regr2/pkg/__init__.py
+modulegraph_tests/testpkg-regr2/pkg/base.py
+modulegraph_tests/testpkg-regr2/pkg/pkg.py
+modulegraph_tests/testpkg-regr3/script.py
+modulegraph_tests/testpkg-regr3/mypkg/__init__.py
+modulegraph_tests/testpkg-regr3/mypkg/distutils/__init__.py
+modulegraph_tests/testpkg-regr3/mypkg/distutils/ccompiler.py
+modulegraph_tests/testpkg-regr4/script.py
+modulegraph_tests/testpkg-regr4/pkg/__init__.py
+modulegraph_tests/testpkg-regr4/pkg/core/__init__.py
+modulegraph_tests/testpkg-regr4/pkg/core/callables.py
+modulegraph_tests/testpkg-regr4/pkg/core/listener.py
+modulegraph_tests/testpkg-regr4/pkg/core/listenerimpl.py
+modulegraph_tests/testpkg-regr5/__init__.py
+modulegraph_tests/testpkg-regr5/script.py
+modulegraph_tests/testpkg-regr6/module.py
+modulegraph_tests/testpkg-regr6/script.py
+modulegraph_tests/testpkg-relimport/mod.py
+modulegraph_tests/testpkg-relimport/script.py
+modulegraph_tests/testpkg-relimport/pkg/__init__.py
+modulegraph_tests/testpkg-relimport/pkg/mod.py
+modulegraph_tests/testpkg-relimport/pkg/oldstyle.py
+modulegraph_tests/testpkg-relimport/pkg/relative.py
+modulegraph_tests/testpkg-relimport/pkg/relimport.py
+modulegraph_tests/testpkg-relimport/pkg/toplevel.py
+modulegraph_tests/testpkg-relimport/pkg/sub2/__init__.py
+modulegraph_tests/testpkg-relimport/pkg/sub2/mod.py
+modulegraph_tests/testpkg-relimport/pkg/subpkg/__init__.py
+modulegraph_tests/testpkg-relimport/pkg/subpkg/mod2.py
+modulegraph_tests/testpkg-relimport/pkg/subpkg/relative.py
+modulegraph_tests/testpkg-relimport/pkg/subpkg/relative2.py
+modulegraph_tests/testpkg-relimport2/toplevel.py
+modulegraph_tests/testpkg-relimport2/pkg/__init__.py
+modulegraph_tests/testpkg-relimport2/pkg/mod1.py
+modulegraph_tests/testpkg-relimport2/pkg/mod2.py
+modulegraph_tests/testpkg-relimport2/pkg/mod3.py
+modulegraph_tests/testpkg-relimport2/pkg/sub/__init__.py
+modulegraph_tests/testpkg-setuptools-namespace/setup.py
+modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/__init__.py
+modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/module.py
+modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/__init__.py
+modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/sub.py
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/__init__.py
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/module.py
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/PKG-INFO
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/SOURCES.txt
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/dependency_links.txt
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/namespace_packages.txt
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/not-zip-safe
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/top_level.txt
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/__init__.py
+modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/sub.py
+scripts/extract_implies.py
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/entry_points.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/entry_points.txt
new file mode 100644
index 0000000..9fc3791
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+modulegraph = modulegraph.__main__:main
+
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/requires.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/requires.txt
new file mode 100644
index 0000000..dde6882
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/requires.txt
@@ -0,0 +1 @@
+altgraph >= 0.12
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/top_level.txt
new file mode 100644
index 0000000..e0e1b8f
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/top_level.txt
@@ -0,0 +1 @@
+modulegraph
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/zip-safe
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph.egg-info/zip-safe
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph/__init__.py
new file mode 100644
index 0000000..7c85619
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/__init__.py
@@ -0,0 +1,2 @@
+import pkg_resources
+__version__ = pkg_resources.require('modulegraph')[0].version
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/__main__.py b/catapult/telemetry/third_party/modulegraph/modulegraph/__main__.py
new file mode 100644
index 0000000..2a84cda
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/__main__.py
@@ -0,0 +1,76 @@
+from __future__ import print_function
+import sys
+import os
+import optparse
+import textwrap
+from .modulegraph import ModuleGraph
+
+def main():
+    # Parse command line
+    usage = textwrap.dedent('''\
+        Usage:
+            modulegraph [options] scriptfile ...
+
+        Valid options:
+        * -d: Increase debug level
+        * -q: Clear debug level
+
+        * -m: arguments are module names, not script files
+        * -x name: Add 'name' to the excludes list
+        * -p name: Add 'name' to the module search path
+
+        * -g: Output a .dot graph
+        * -h: Output a html file
+    ''')
+    parser = optparse.OptionParser(usage=usage, add_help_option=False)
+    parser.add_option('-d', action='count', dest='debug', default=1)
+    parser.add_option('-q', action='store_const', dest='debug', const=0)
+
+    parser.add_option('-m', action='store_true', dest='domods', default=False)
+    parser.add_option('-x', action='append', dest='excludes', default=[])
+    parser.add_option('-p', action='append', dest='addpath', default=[])
+
+    parser.add_option('-g', action='store_const', dest='output', const='dot')
+    parser.add_option('-h', action='store_const', dest='output', const='html')
+    opts, args = parser.parse_args()
+
+    if not args:
+        print("No script specified", file=sys.stderr)
+        print(usage, file=sys.stderr)
+        sys.exit(1)
+
+    script = args[0]
+
+    # Set the path based on sys.path and the script directory
+    path = sys.path[:]
+    path[0] = os.path.dirname(script)
+    path = opts.addpath + path
+    if opts.debug > 1:
+        print("path:", file=sys.stderr)
+        for item in path:
+            print("   ", repr(item), file=sys.stderr)
+
+    # Create the module finder and turn its crank
+    mf = ModuleGraph(path, excludes=opts.excludes, debug=opts.debug)
+    for arg in args:
+        if opts.domods:
+            if arg[-2:] == '.*':
+                mf.import_hook(arg[:-2], None, ["*"])
+            else:
+                mf.import_hook(arg)
+        else:
+            mf.run_script(arg)
+    if opts.output == 'dot':
+        mf.graphreport()
+    elif opts.output == 'html':
+        mf.create_xref()
+    else:
+        mf.report()
+    sys.exit(0)
+
+
+if __name__ == '__main__':
+    try:
+        main()
+    except KeyboardInterrupt:
+        print("\n[interrupt]")
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/_compat.py b/catapult/telemetry/third_party/modulegraph/modulegraph/_compat.py
new file mode 100644
index 0000000..aa0fc02
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/_compat.py
@@ -0,0 +1,9 @@
+import sys
+
+if sys.version_info[0] == 2:
+    def Bchr(value):
+        return chr(value)
+
+else:
+    def Bchr(value):
+        return value
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/find_modules.py b/catapult/telemetry/third_party/modulegraph/modulegraph/find_modules.py
new file mode 100644
index 0000000..fee8c17
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/find_modules.py
@@ -0,0 +1,366 @@
+"""
+modulegraph.find_modules - High-level module dependency finding interface
+=========================================================================
+
+History
+........
+
+Originally (loosely) based on code in py2exe's build_exe.py by Thomas Heller.
+"""
+from __future__ import absolute_import
+
+import sys
+import os
+import imp
+import warnings
+
+import modulegraph.modulegraph as modulegraph
+from modulegraph.modulegraph import Alias, Script, Extension
+from modulegraph.util import imp_find_module
+
+__all__ = [
+    'find_modules', 'parse_mf_results'
+]
+
+def get_implies():
+    result = {
+        # imports done from builtin modules in C code (untrackable by modulegraph)
+        "_curses":      ["curses"],
+        "posix":        ["resource"],
+        "gc":           ["time"],
+        "time":         ["_strptime"],
+        "datetime":     ["time"],
+        "MacOS":        ["macresource"],
+        "cPickle":      ["copy_reg", "cStringIO"],
+        "parser":       ["copy_reg"],
+        "codecs":       ["encodings"],
+        "cStringIO":    ["copy_reg"],
+        "_sre":         ["copy", "string", "sre"],
+        "zipimport":    ["zlib"],
+
+        # Python 3.2:
+        "_datetime":    ["time", "_strptime"],
+        "_json":        ["json.decoder"],
+        "_pickle":      ["codecs", "copyreg", "_compat_pickle"],
+        "_posixsubprocess": ["gc"],
+        "_ssl":         ["socket"],
+
+        # Python 3.3:
+        "_elementtree": ["copy", "xml.etree.ElementPath" ],
+
+        # mactoolboxglue can do a bunch more of these
+        # that are far harder to predict, these should be tracked
+        # manually for now.
+
+        # this isn't C, but it uses __import__
+        "anydbm":       ["dbhash", "gdbm", "dbm", "dumbdbm", "whichdb"],
+        # package aliases
+        "wxPython.wx":  Alias('wx'),
+
+    }
+
+    if sys.version_info[0] == 3:
+        result["_sre"] = ["copy", "re"]
+        result["parser"] = ["copyreg"]
+
+        # _frozen_importlib is part of the interpreter itself
+        result["_frozen_importlib"] = None
+
+    if sys.version_info[0] == 2 and sys.version_info[1] >= 5:
+        result.update({
+            "email.base64MIME":         Alias("email.base64mime"),
+            "email.Charset":            Alias("email.charset"),
+            "email.Encoders":           Alias("email.encoders"),
+            "email.Errors":             Alias("email.errors"),
+            "email.Feedparser":         Alias("email.feedParser"),
+            "email.Generator":          Alias("email.generator"),
+            "email.Header":             Alias("email.header"),
+            "email.Iterators":          Alias("email.iterators"),
+            "email.Message":            Alias("email.message"),
+            "email.Parser":             Alias("email.parser"),
+            "email.quopriMIME":         Alias("email.quoprimime"),
+            "email.Utils":              Alias("email.utils"),
+            "email.MIMEAudio":          Alias("email.mime.audio"),
+            "email.MIMEBase":           Alias("email.mime.base"),
+            "email.MIMEImage":          Alias("email.mime.image"),
+            "email.MIMEMessage":        Alias("email.mime.message"),
+            "email.MIMEMultipart":      Alias("email.mime.multipart"),
+            "email.MIMENonMultipart":   Alias("email.mime.nonmultipart"),
+            "email.MIMEText":           Alias("email.mime.text"),
+        })
+
+    if sys.version_info[:2] >= (2, 5):
+        result["_elementtree"] = ["pyexpat"]
+
+        import xml.etree
+        files = os.listdir(xml.etree.__path__[0])
+        for fn in files:
+            if fn.endswith('.py') and fn != "__init__.py":
+                result["_elementtree"].append("xml.etree.%s"%(fn[:-3],))
+
+    if sys.version_info[:2] >= (2, 6):
+        result['future_builtins'] = ['itertools']
+
+    # os.path is an alias for a platform specific submodule,
+    # ensure that the graph shows this.
+    result['os.path'] = Alias(os.path.__name__)
+
+
+    return result
+
+def parse_mf_results(mf):
+    """
+    Return two lists: the first one contains the python files in the graph,
+    the second the C extensions.
+
+    :param mf: a :class:`modulegraph.modulegraph.ModuleGraph` instance
+    """
+    #for name, imports in get_hidden_imports().items():
+    #    if name in mf.modules.keys():
+    #        for mod in imports:
+    #            mf.import_hook(mod)
+
+    # Retrieve modules from modulegraph
+    py_files = []
+    extensions = []
+
+    for item in mf.flatten():
+        # There may be __main__ modules (from mf.run_script), but
+        # we don't need it in the zipfile we build.
+        if item.identifier == "__main__":
+            continue
+        src = item.filename
+        if src and src != '-':
+            if isinstance(item, Script):
+                # Scripts are python files
+                py_files.append(item)
+
+            elif isinstance(item, Extension):
+                extensions.append(item)
+
+            else:
+                py_files.append(item)
+
+    # sort on the file names, the output is nicer to read
+    py_files.sort(key=lambda v: v.filename)
+    extensions.sort(key=lambda v: v.filename)
+    return py_files, extensions
+
+
+def plat_prepare(includes, packages, excludes):
+    # used by Python itself
+    includes.update(["warnings", "unicodedata", "weakref"])
+
+    #if os.uname()[0] != 'java':
+        # Jython specific imports in the stdlib:
+        #excludes.update([
+        #    'java.lang',
+        #    'org.python.core',
+        #])
+
+    if not sys.platform.startswith('irix'):
+        excludes.update([
+            'AL',
+            'sgi',
+            'vms_lib',
+        ])
+
+    if not sys.platform in ('mac', 'darwin'):
+        # XXX - this doesn't look nearly complete
+        excludes.update([
+            'Audio_mac',
+            'Carbon.File',
+            'Carbon.Folder',
+            'Carbon.Folders',
+            'EasyDialogs',
+            'MacOS',
+            'macfs',
+            'macostools',
+            #'macpath',
+            '_scproxy',
+        ])
+
+    if not sys.platform == 'win32':
+        # only win32
+        excludes.update([
+            #'ntpath',
+            'nturl2path',
+            'win32api',
+            'win32con',
+            'win32event',
+            'win32evtlogutil',
+            'win32evtlog',
+            'win32file',
+            'win32gui',
+            'win32pipe',
+            'win32process',
+            'win32security',
+            'pywintypes',
+            'winsound',
+            'win32',
+            '_winreg',
+            '_winapi',
+            'msvcrt',
+            'winreg',
+            '_subprocess',
+         ])
+
+    if not sys.platform == 'riscos':
+        excludes.update([
+             'riscosenviron',
+             #'riscospath',
+             'rourl2path',
+          ])
+
+    if not sys.platform == 'dos' or sys.platform.startswith('ms-dos'):
+        excludes.update([
+            'dos',
+        ])
+
+    if not sys.platform == 'os2emx':
+        excludes.update([
+            #'os2emxpath',
+            '_emx_link',
+        ])
+
+    excludes.update(set(['posix', 'nt', 'os2', 'mac', 'ce', 'riscos']) - set(sys.builtin_module_names))
+
+    # Carbon.Res depends on this, but the module hasn't been present
+    # for a while...
+    excludes.add('OverrideFrom23')
+    excludes.add('OverrideFrom23._Res')
+
+    # import trickery in the dummy_threading module (stdlib)
+    excludes.add('_dummy_threading')
+
+    try:
+        imp_find_module('poll')
+    except ImportError:
+        excludes.update([
+            'poll',
+        ])
+
+def find_needed_modules(mf=None, scripts=(), includes=(), packages=(), warn=warnings.warn):
+    if mf is None:
+        mf = modulegraph.ModuleGraph()
+    # feed Modulefinder with everything, and return it.
+
+    for path in scripts:
+        mf.run_script(path)
+
+    for mod in includes:
+        try:
+            if mod[-2:] == '.*':
+                mf.import_hook(mod[:-2], None, ['*'])
+            else:
+                mf.import_hook(mod)
+        except ImportError:
+            warn("No module named %s"%(mod,))
+
+    for f in packages:
+        # If modulegraph has seen a reference to the package, then
+        # we prefer to believe that (imp_find_module doesn't seem to locate
+        # sub-packages)
+        m = mf.findNode(f)
+        if m is not None:
+            path = m.packagepath[0]
+        else:
+            # Find path of package
+            # TODO: use imp_find_module_or_importer
+            try:
+                path = imp_find_module(f, mf.path)[1]
+            except ImportError:
+                warn("No package named %s" % f)
+                continue
+
+        # walk the path to find subdirs containing __init__.py files
+        # scan the results (directory of __init__.py files)
+        # first trim the path (of the head package),
+        # then convert directory name in package name,
+        # finally push into modulegraph.
+        # FIXME:
+        # 1) Needs to be adjusted for namespace packages in python 3.3
+        # 2) Code is fairly dodgy and needs better tests
+        for (dirpath, dirnames, filenames) in os.walk(path):
+            if '__init__.py' in filenames and dirpath.startswith(path):
+                package = f + '.' + dirpath[len(path)+1:].replace(os.sep, '.')
+                if package.endswith('.'):
+                    package = package[:-1]
+                m = mf.import_hook(package, None, ["*"])
+            else:
+                # Exclude subtrees that aren't packages
+                dirnames[:] = []
+
+
+    return mf
+
+#
+# resource constants
+#
+PY_SUFFIXES = ['.py', '.pyw', '.pyo', '.pyc']
+C_SUFFIXES = [
+    _triple[0] for _triple in imp.get_suffixes()
+    if _triple[2] == imp.C_EXTENSION
+]
+
+#
+# side-effects
+#
+
+def _replacePackages():
+    REPLACEPACKAGES = {
+        '_xmlplus':     'xml',
+    }
+    for k,v in REPLACEPACKAGES.items():
+        modulegraph.replacePackage(k, v)
+
+_replacePackages()
+
+def find_modules(scripts=(), includes=(), packages=(), excludes=(), path=None, debug=0):
+    """
+    High-level interface, takes iterables for:
+        scripts, includes, packages, excludes
+
+    And returns a :class:`modulegraph.modulegraph.ModuleGraph` instance,
+    python_files, and extensions
+
+    python_files is a list of pure python dependencies as modulegraph.Module objects,
+    extensions is a list of platform-specific C extension dependencies as modulegraph.Module objects
+    """
+    scripts = set(scripts)
+    includes = set(includes)
+    packages = set(packages)
+    excludes = set(excludes)
+    plat_prepare(includes, packages, excludes)
+    mf = modulegraph.ModuleGraph(
+        path=path,
+        excludes=(excludes - includes),
+        implies=get_implies(),
+        debug=debug,
+    )
+    find_needed_modules(mf, scripts, includes, packages)
+    return mf
+
+def test():
+    if '-g' in sys.argv[1:]:
+        sys.argv.remove('-g')
+        dograph = True
+    else:
+        dograph = False
+    if '-x' in sys.argv[1:]:
+        sys.argv.remove('-x')
+        doxref = True
+    else:
+        doxref= False
+
+    scripts = sys.argv[1:] or [__file__]
+    mf = find_modules(scripts=scripts)
+    if doxref:
+        mf.create_xref()
+    elif dograph:
+        mf.graphreport()
+    else:
+        mf.report()
+
+if __name__ == '__main__':
+    test()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/modulegraph.py b/catapult/telemetry/third_party/modulegraph/modulegraph/modulegraph.py
new file mode 100644
index 0000000..2795cc4
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/modulegraph.py
@@ -0,0 +1,1686 @@
+"""
+Find modules used by a script, using bytecode analysis.
+
+Based on the stdlib modulefinder by Thomas Heller and Just van Rossum,
+but uses a graph data structure and 2.3 features
+
+XXX: Verify all calls to import_hook (and variants) to ensure that
+imports are done in the right way.
+"""
+from __future__ import absolute_import, print_function
+
+import pkg_resources
+
+import dis
+import imp
+import marshal
+import os
+import sys
+import struct
+import zipimport
+import re
+from collections import deque, namedtuple
+import ast
+
+from altgraph.ObjectGraph import ObjectGraph
+from altgraph import GraphError
+
+from itertools import count
+
+from modulegraph import util
+from modulegraph import zipio
+
+if sys.version_info[0] == 2:
+    from StringIO import StringIO as BytesIO
+    from StringIO import StringIO
+    from  urllib import pathname2url
+    def _Bchr(value):
+        return chr(value)
+
+else:
+    from urllib.request  import pathname2url
+    from io import BytesIO, StringIO
+
+    def _Bchr(value):
+        return value
+
+
+# File open mode for reading (univeral newlines)
+if sys.version_info[0] == 2:
+    _READ_MODE = "rU"
+else:
+    _READ_MODE = "r"
+
+
+
+
+# Modulegraph does a good job at simulating Python's, but it can not
+# handle packagepath modifications packages make at runtime.  Therefore there
+# is a mechanism whereby you can register extra paths in this map for a
+# package, and it will be honored.
+#
+# Note this is a mapping is lists of paths.
+_packagePathMap = {}
+
+# Prefix used in magic .pth files used by setuptools to create namespace
+# packages without an __init__.py file.
+#
+# The value is a list of such prefixes as the prefix varies with versions of
+# setuptools.
+_SETUPTOOLS_NAMESPACEPKG_PTHs=(
+    "import sys,types,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('",
+    "import sys,new,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('",
+    "import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('",
+)
+
+
+def _namespace_package_path(fqname, pathnames, path=None):
+    """
+    Return the __path__ for the python package in *fqname*.
+
+    This function uses setuptools metadata to extract information
+    about namespace packages from installed eggs.
+    """
+    working_set = pkg_resources.WorkingSet(path)
+
+    path = list(pathnames)
+
+    for dist in working_set:
+        if dist.has_metadata('namespace_packages.txt'):
+            namespaces = dist.get_metadata(
+                    'namespace_packages.txt').splitlines()
+            if fqname in namespaces:
+                nspath = os.path.join(dist.location, *fqname.split('.'))
+                if nspath not in path:
+                    path.append(nspath)
+
+    return path
+
+_strs = re.compile(r'''^\s*["']([A-Za-z0-9_]+)["'],?\s*''') # "<- emacs happy
+
+def _eval_str_tuple(value):
+    """
+    Input is the repr of a tuple of strings, output
+    is that tuple.
+
+    This only works with a tuple where the members are
+    python identifiers.
+    """
+    if not (value.startswith('(') and value.endswith(')')):
+        raise ValueError(value)
+
+    orig_value = value
+    value = value[1:-1]
+
+    result = []
+    while value:
+        m = _strs.match(value)
+        if m is None:
+            raise ValueError(orig_value)
+
+        result.append(m.group(1))
+        value = value[len(m.group(0)):]
+
+    return tuple(result)
+
+def _path_from_importerror(exc, default):
+    # This is a hack, but sadly enough the necessary information
+    # isn't available otherwise.
+    m = re.match('^No module named (\S+)$', str(exc))
+    if m is not None:
+        return m.group(1)
+
+    return default
+
+def os_listdir(path):
+    """
+    Deprecated name
+    """
+    warnings.warn("Use zipio.listdir instead of os_listdir",
+            DeprecationWarning)
+    return zipio.listdir(path)
+
+
+def _code_to_file(co):
+    """ Convert code object to a .pyc pseudo-file """
+    return BytesIO(
+            imp.get_magic() + b'\0\0\0\0' + marshal.dumps(co))
+
+
+def find_module(name, path=None):
+    """
+    A version of imp.find_module that works with zipped packages.
+    """
+    if path is None:
+        path = sys.path
+
+    # Support for the PEP302 importer for normal imports:
+    # - Python 2.5 has pkgutil.ImpImporter
+    # - In setuptools 0.7 and later there's _pkgutil.ImpImporter
+    # - In earlier setuptools versions you pkg_resources.ImpWrapper
+    #
+    # XXX: This is a bit of a hack, should check if we can just rely on
+    # PEP302's get_code() method with all recent versions of pkgutil and/or
+    # setuptools (setuptools 0.6.latest, setuptools trunk and python2.[45])
+    #
+    # For python 3.3 this code should be replaced by code using importlib,
+    # for python 3.2 and 2.7 this should be cleaned up a lot.
+    try:
+        from pkgutil import ImpImporter
+    except ImportError:
+        try:
+            from _pkgutil import ImpImporter
+        except ImportError:
+            ImpImporter = pkg_resources.ImpWrapper
+
+    namespace_path =[]
+    fp = None
+    for entry in path:
+        importer = pkg_resources.get_importer(entry)
+        if importer is None:
+            continue
+
+        if sys.version_info[:2] >= (3,3) and hasattr(importer, 'find_loader'):
+            loader, portions = importer.find_loader(name)
+
+        else:
+            loader = importer.find_module(name)
+            portions = []
+
+        namespace_path.extend(portions)
+
+        if loader is None: continue
+
+        if isinstance(importer, ImpImporter):
+            filename = loader.filename
+            if filename.endswith('.pyc') or filename.endswith('.pyo'):
+                fp = open(filename, 'rb')
+                description = ('.pyc', 'rb', imp.PY_COMPILED)
+                return (fp, filename, description)
+
+            elif filename.endswith('.py'):
+                if sys.version_info[0] == 2:
+                    fp = open(filename, _READ_MODE)
+                else:
+                    with open(filename, 'rb') as fp:
+                        encoding = util.guess_encoding(fp)
+
+                    fp = open(filename, _READ_MODE, encoding=encoding)
+                description = ('.py', _READ_MODE, imp.PY_SOURCE)
+                return (fp, filename, description)
+
+            else:
+                for _sfx, _mode, _type in imp.get_suffixes():
+                    if _type == imp.C_EXTENSION and filename.endswith(_sfx):
+                        description = (_sfx, 'rb', imp.C_EXTENSION)
+                        break
+                else:
+                    description = ('', '', imp.PKG_DIRECTORY)
+
+                return (None, filename, description)
+
+        if hasattr(loader, 'path'):
+            if loader.path.endswith('.pyc') or loader.path.endswith('.pyo'):
+                fp = open(loader.path, 'rb')
+                description = ('.pyc', 'rb', imp.PY_COMPILED)
+                return (fp, loader.path, description)
+
+
+        if hasattr(loader, 'get_source'):
+            source = loader.get_source(name)
+            fp = StringIO(source)
+            co = None
+
+        else:
+            source = None
+
+        if source is None:
+            if hasattr(loader, 'get_code'):
+                co = loader.get_code(name)
+                fp = _code_to_file(co)
+
+            else:
+                fp = None
+                co = None
+
+        pathname = os.path.join(entry, *name.split('.'))
+
+        if isinstance(loader, zipimport.zipimporter):
+            # Check if this happens to be a wrapper module introduced by
+            # setuptools, if it is we return the actual extension.
+            zn = '/'.join(name.split('.'))
+            for _sfx, _mode, _type in imp.get_suffixes():
+                if _type == imp.C_EXTENSION:
+                    p = loader.prefix + zn + _sfx
+                    if loader._files is None:
+                        loader_files = zipimport._zip_directory_cache[loader.archive]
+                    else:
+                        loader_files = loader._files
+
+                    if p in loader_files:
+                        description = (_sfx, 'rb', imp.C_EXTENSION)
+                        return (None, pathname + _sfx, description)
+
+        if hasattr(loader, 'is_package') and loader.is_package(name):
+            return (None, pathname, ('', '', imp.PKG_DIRECTORY))
+
+        if co is None:
+            if hasattr(loader, 'path'):
+                filename = loader.path
+            elif hasattr(loader, 'get_filename'):
+                filename = loader.get_filename(name)
+                if source is not None:
+                    if filename.endswith(".pyc") or filename.endswith(".pyo"):
+                        filename = filename[:-1]
+            else:
+                filename = None
+
+            if filename is not None and (filename.endswith('.py') or filename.endswith('.pyw')):
+                return (fp, filename, ('.py', 'rU', imp.PY_SOURCE))
+            else:
+                if fp is not None:
+                    fp.close()
+                return (None, filename, (os.path.splitext(filename)[-1], 'rb', imp.C_EXTENSION))
+
+        else:
+            if hasattr(loader, 'path'):
+                return (fp, loader.path, ('.pyc', 'rb', imp.PY_COMPILED))
+            else:
+                return (fp, pathname + '.pyc', ('.pyc', 'rb', imp.PY_COMPILED))
+
+    if namespace_path:
+        if fp is not None:
+            fp.close()
+        return (None, namespace_path[0], ('', namespace_path, imp.PKG_DIRECTORY))
+
+    raise ImportError(name)
+
+def moduleInfoForPath(path):
+    for (ext, readmode, typ) in imp.get_suffixes():
+        if path.endswith(ext):
+            return os.path.basename(path)[:-len(ext)], readmode, typ
+    return None
+
+# A Public interface
+import warnings
+def AddPackagePath(packagename, path):
+    warnings.warn("Use addPackagePath instead of AddPackagePath",
+            DeprecationWarning)
+
+    addPackagePath(packagename, path)
+
+def addPackagePath(packagename, path):
+    paths = _packagePathMap.get(packagename, [])
+    paths.append(path)
+    _packagePathMap[packagename] = paths
+
+_replacePackageMap = {}
+
+# This ReplacePackage mechanism allows modulefinder to work around the
+# way the _xmlplus package injects itself under the name "xml" into
+# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
+# before running ModuleGraph.
+def ReplacePackage(oldname, newname):
+    warnings.warn("use replacePackage instead of ReplacePackage",
+            DeprecationWarning)
+    replacePackage(oldname, newname)
+
+def replacePackage(oldname, newname):
+    _replacePackageMap[oldname] = newname
+
+
+class DependencyInfo (namedtuple("DependencyInfo", ["conditional", "function", "tryexcept", "fromlist"])):
+    __slots__ = ()
+
+    def _merged(self, other):
+        if (not self.conditional and not self.function and not self.tryexcept) \
+            or (not other.conditional and not other.function and not other.tryexcept):
+                return DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=self.fromlist and other.fromlist)
+
+        else:
+            return DependencyInfo(
+                    conditional=self.conditional or other.conditional,
+                    function=self.function or other.function,
+                    tryexcept=self.tryexcept or other.tryexcept,
+                    fromlist=self.fromlist and other.fromlist)
+
+
+class Node(object):
+    def __init__(self, identifier):
+        self.debug = 0
+        self.graphident = identifier
+        self.identifier = identifier
+        self._namespace = {}
+        self.filename = None
+        self.packagepath = None
+        self.code = None
+        # The set of global names that are assigned to in the module.
+        # This includes those names imported through starimports of
+        # Python modules.
+        self.globalnames = set()
+        # The set of starimports this module did that could not be
+        # resolved, ie. a starimport from a non-Python module.
+        self.starimports = set()
+
+    def __contains__(self, name):
+        return name in self._namespace
+
+    def __getitem__(self, name):
+        return self._namespace[name]
+
+    def __setitem__(self, name, value):
+        self._namespace[name] = value
+
+    def get(self, *args):
+        return self._namespace.get(*args)
+
+    def __cmp__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return NotImplemented
+
+        return cmp(self.graphident, otherIdent)
+
+    def __eq__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return False
+
+        return self.graphident == otherIdent
+
+    def __ne__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return True
+
+        return self.graphident != otherIdent
+
+    def __lt__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return NotImplemented
+
+        return self.graphident < otherIdent
+
+    def __le__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return NotImplemented
+
+        return self.graphident <= otherIdent
+
+    def __gt__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return NotImplemented
+
+        return self.graphident > otherIdent
+
+    def __ge__(self, other):
+        try:
+            otherIdent = getattr(other, 'graphident')
+        except AttributeError:
+            return NotImplemented
+
+        return self.graphident >= otherIdent
+
+
+    def __hash__(self):
+        return hash(self.graphident)
+
+    def infoTuple(self):
+        return (self.identifier,)
+
+    def __repr__(self):
+        return '%s%r' % (type(self).__name__, self.infoTuple())
+
+class Alias(str):
+    pass
+
+class AliasNode(Node):
+    def __init__(self, name, node):
+        super(AliasNode, self).__init__(name)
+        for k in 'identifier', 'packagepath', '_namespace', 'globalnames', 'starimports':
+            setattr(self, k, getattr(node, k, None))
+
+    def infoTuple(self):
+        return (self.graphident, self.identifier)
+
+class BadModule(Node):
+    pass
+
+class ExcludedModule(BadModule):
+    pass
+
+class MissingModule(BadModule):
+    pass
+
+class Script(Node):
+    def __init__(self, filename):
+        super(Script, self).__init__(filename)
+        self.filename = filename
+
+    def infoTuple(self):
+        return (self.filename,)
+
+class BaseModule(Node):
+    def __init__(self, name, filename=None, path=None):
+        super(BaseModule, self).__init__(name)
+        self.filename = filename
+        self.packagepath = path
+
+    def infoTuple(self):
+        return tuple(filter(None, (self.identifier, self.filename, self.packagepath)))
+
+class BuiltinModule(BaseModule):
+    pass
+
+class SourceModule(BaseModule):
+    pass
+
+class InvalidSourceModule(SourceModule):
+    pass
+
+class CompiledModule(BaseModule):
+    pass
+
+class InvalidCompiledModule(BaseModule):
+    pass
+
+class Package(BaseModule):
+    pass
+
+class NamespacePackage(Package):
+    pass
+
+class Extension(BaseModule):
+    pass
+
+class FlatPackage(BaseModule): # nocoverage
+    def __init__(self, *args, **kwds):
+        warnings.warn("This class will be removed in a future version of modulegraph",
+            DeprecationWarning)
+        super(FlatPackage, *args, **kwds)
+
+class ArchiveModule(BaseModule): # nocoverage
+    def __init__(self, *args, **kwds):
+        warnings.warn("This class will be removed in a future version of modulegraph",
+            DeprecationWarning)
+        super(FlatPackage, *args, **kwds)
+
+# HTML templates for ModuleGraph generator
+header = """\
+<html>
+  <head>
+    <title>%(TITLE)s</title>
+    <style>
+      .node { margin:1em 0; }
+    </style>
+  </head>
+  <body>
+    <h1>%(TITLE)s</h1>"""
+entry = """
+<div class="node">
+  <a name="%(NAME)s" />
+  %(CONTENT)s
+</div>"""
+contpl = """<tt>%(NAME)s</tt> %(TYPE)s"""
+contpl_linked = """\
+<a target="code" href="%(URL)s" type="text/plain"><tt>%(NAME)s</tt></a>"""
+imports = """\
+  <div class="import">
+%(HEAD)s:
+  %(LINKS)s
+  </div>
+"""
+footer = """
+  </body>
+</html>"""
+
+def _ast_names(names):
+    result = []
+    for nm in names:
+        if isinstance(nm, ast.alias):
+            result.append(nm.name)
+        else:
+            result.append(nm)
+    return result
+
+
+if sys.version_info[0] == 2:
+    DEFAULT_IMPORT_LEVEL= -1
+else:
+    DEFAULT_IMPORT_LEVEL= 0
+
+class _Visitor (ast.NodeVisitor):
+    def __init__(self, graph, module):
+        self._graph = graph
+        self._module = module
+        self._level = DEFAULT_IMPORT_LEVEL
+        self._in_if = [False]
+        self._in_def = [False]
+        self._in_tryexcept = [False]
+
+    @property
+    def in_if(self):
+        return self._in_if[-1]
+
+    @property
+    def in_def(self):
+        return self._in_def[-1]
+
+    @property
+    def in_tryexcept(self):
+        return self._in_tryexcept[-1]
+
+    def _process_import(self, name, fromlist, level):
+
+        if sys.version_info[0] == 2:
+            if name == '__future__' and 'absolute_import' in (fromlist or ()):
+                self._level = 0
+
+        have_star = False
+        if fromlist is not None:
+            fromlist = set(fromlist)
+            if '*' in fromlist:
+                fromlist.remove('*')
+                have_star = True
+
+        imported_module = self._graph._safe_import_hook(name,
+            self._module, fromlist, level, attr=DependencyInfo(
+                conditional=self.in_if,
+                tryexcept=self.in_tryexcept,
+                function=self.in_def,
+                fromlist=False,
+            ))[0]
+        if have_star:
+            self._module.globalnames.update(imported_module.globalnames)
+            self._module.starimports.update(imported_module.starimports)
+            if imported_module.code is None:
+                self._module.starimports.add(name)
+
+
+    def visit_Import(self, node):
+        for nm in _ast_names(node.names):
+            self._process_import(nm, None, self._level)
+
+    def visit_ImportFrom(self, node):
+        level = node.level if node.level != 0 else self._level
+        self._process_import(node.module or '', _ast_names(node.names), level)
+
+    def visit_If(self, node):
+        self._in_if.append(True)
+        self.generic_visit(node)
+        self._in_if.pop()
+
+    def visit_FunctionDef(self, node):
+        self._in_def.append(True)
+        self.generic_visit(node)
+        self._in_def.pop()
+
+    def visit_Try(self, node):
+        self._in_tryexcept.append(True)
+        self.generic_visit(node)
+        self._in_tryexcept.pop()
+
+    def visit_ExceptHandler(self, node):
+        self._in_tryexcept.append(True)
+        self.generic_visit(node)
+        self._in_tryexcept.pop()
+
+    def visit_TryExcept(self, node):
+        self._in_tryexcept.append(True)
+        self.generic_visit(node)
+        self._in_tryexcept.pop()
+
+    def visit_ExceptHandler(self, node):
+        self._in_tryexcept.append(True)
+        self.generic_visit(node)
+        self._in_tryexcept.pop()
+
+    def visit_Expression(self, node):
+        # Expression node's cannot contain import statements or
+        # other nodes that are relevant for us.
+        pass
+
+    # Expression isn't actually used as such in AST trees,
+    # therefore define visitors for all kinds of expression nodes.
+    visit_BoolOp = visit_Expression
+    visit_BinOp = visit_Expression
+    visit_UnaryOp = visit_Expression
+    visit_Lambda = visit_Expression
+    visit_IfExp = visit_Expression
+    visit_Dict = visit_Expression
+    visit_Set = visit_Expression
+    visit_ListComp = visit_Expression
+    visit_SetComp = visit_Expression
+    visit_ListComp = visit_Expression
+    visit_GeneratorExp = visit_Expression
+    visit_Compare = visit_Expression
+    visit_Yield = visit_Expression
+    visit_YieldFrom = visit_Expression
+    visit_Await = visit_Expression
+    visit_Call = visit_Expression
+
+
+
+class ModuleGraph(ObjectGraph):
+    def __init__(self, path=None, excludes=(), replace_paths=(), implies=(), graph=None, debug=0):
+        super(ModuleGraph, self).__init__(graph=graph, debug=debug)
+        if path is None:
+            path = sys.path
+        self.path = path
+        self.lazynodes = {}
+        # excludes is stronger than implies
+        self.lazynodes.update(dict(implies))
+        for m in excludes:
+            self.lazynodes[m] = None
+        self.replace_paths = replace_paths
+
+        self.nspackages = self._calc_setuptools_nspackages()
+
+    def _calc_setuptools_nspackages(self):
+        # Setuptools has some magic handling for namespace
+        # packages when using 'install --single-version-externally-managed'
+        # (used by system packagers and also by pip)
+        #
+        # When this option is used namespace packages are writting to
+        # disk *without* an __init__.py file, which means the regular
+        # import machinery will not find them.
+        #
+        # We therefore explicitly look for the hack used by
+        # setuptools to get this kind of namespace packages to work.
+
+        pkgmap = {}
+
+        try:
+            from pkgutil import ImpImporter
+        except ImportError:
+            try:
+                from _pkgutil import ImpImporter
+            except ImportError:
+                ImpImporter = pkg_resources.ImpWrapper
+
+        if sys.version_info[:2] >= (3,3):
+            import importlib.machinery
+            ImpImporter = importlib.machinery.FileFinder
+
+        for entry in self.path:
+            importer = pkg_resources.get_importer(entry)
+
+            if isinstance(importer, ImpImporter):
+                try:
+                    ldir = os.listdir(entry)
+                except os.error:
+                    continue
+
+                for fn in ldir:
+                    if fn.endswith('-nspkg.pth'):
+                        fp = open(os.path.join(entry, fn), 'rU')
+                        try:
+                            for ln in fp:
+                                for pfx in _SETUPTOOLS_NAMESPACEPKG_PTHs:
+                                    if ln.startswith(pfx):
+                                        try:
+                                            start = len(pfx)-2
+                                            stop = ln.index(')', start)+1
+                                        except ValueError:
+                                            continue
+
+                                        pkg = _eval_str_tuple(ln[start:stop])
+                                        identifier = ".".join(pkg)
+                                        subdir = os.path.join(entry, *pkg)
+                                        if os.path.exists(os.path.join(subdir, '__init__.py')):
+                                            # There is a real __init__.py, ignore the setuptools hack
+                                            continue
+
+                                        if identifier in pkgmap:
+                                            pkgmap[identifier].append(subdir)
+                                        else:
+                                            pkgmap[identifier] = [subdir]
+                                        break
+                        finally:
+                            fp.close()
+
+        return pkgmap
+
+    def implyNodeReference(self, node, other, edge_data=None):
+        """
+        Imply that one node depends on another.
+        other may be a module name or another node.
+
+        For use by extension modules and tricky import code
+        """
+        if isinstance(other, Node):
+            self._updateReference(node, other, edge_data)
+
+        else:
+            if isinstance(other, tuple):
+                raise ValueError(other)
+
+            others = self._safe_import_hook(other, node, None)
+            for other in others:
+                self._updateReference(node, other, edge_data)
+
+
+    def getReferences(self, fromnode):
+        """
+        Yield all nodes that 'fromnode' dependes on (that is,
+        all modules that 'fromnode' imports.
+        """
+        node = self.findNode(fromnode)
+        out_edges, _ = self.get_edges(node)
+        return out_edges
+
+    def getReferers(self, tonode, collapse_missing_modules=True):
+        node = self.findNode(tonode)
+        _, in_edges = self.get_edges(node)
+
+        if collapse_missing_modules:
+            for n in in_edges:
+                if isinstance(n, MissingModule):
+                    for n in self.getReferers(n, False):
+                        yield n
+
+                else:
+                    yield n
+
+        else:
+            for n in in_edges:
+                yield n
+
+    def hasEdge(self, fromnode, tonode):
+        """ Return True iff there is an edge from 'fromnode' to 'tonode' """
+        fromnode = self.findNode(fromnode)
+        tonode = self.findNode(tonode)
+
+        return self.graph.edge_by_node(fromnode, tonode) is not None
+
+
+    def foldReferences(self, packagenode):
+        """
+        Create edges to/from 'packagenode' based on the
+        edges to/from modules in package. The module nodes
+        are then hidden.
+        """
+        pkg = self.findNode(packagenode)
+
+        for n in self.nodes():
+            if not n.identifier.startswith(pkg.identifier + '.'):
+                continue
+
+            iter_out, iter_inc = n.get_edges()
+            for other in iter_out:
+                if other.identifier.startswith(pkg.identifier + '.'):
+                    continue
+
+                if not self.hasEdge(pkg, other):
+                    # Ignore circular dependencies
+                    self._updateReference(pkg, other, 'pkg-internal-import')
+
+            for other in iter_in:
+                if other.identifier.startswith(pkg.identifier + '.'):
+                    # Ignore circular dependencies
+                    continue
+
+                if not self.hasEdge(other, pkg):
+                    self._updateReference(other, pkg, 'pkg-import')
+
+            self.graph.hide_node(n)
+
+    # TODO: unfoldReferences(pkg) that restore the submodule nodes and
+    #       removes 'pkg-import' and 'pkg-internal-import' edges. Care should
+    #       be taken to ensure that references are correct if multiple packages
+    #       are folded and then one of them in unfolded
+
+
+    def _updateReference(self, fromnode, tonode, edge_data):
+        try:
+            ed = self.edgeData(fromnode, tonode)
+        except (KeyError, GraphError): # XXX: Why 'GraphError'
+            return self.createReference(fromnode, tonode, edge_data)
+
+        if not (isinstance(ed, DependencyInfo) and isinstance(edge_data, DependencyInfo)):
+            self.updateEdgeData(fromnode, tonode, edge_data)
+        else:
+            self.updateEdgeData(fromnode, tonode, ed._merged(edge_data))
+
+
+    def createReference(self, fromnode, tonode, edge_data='direct'):
+        """
+        Create a reference from fromnode to tonode
+        """
+        return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)
+
+    def findNode(self, name):
+        """
+        Find a node by identifier.  If a node by that identifier exists,
+        it will be returned.
+
+        If a lazy node exists by that identifier with no dependencies (excluded),
+        it will be instantiated and returned.
+
+        If a lazy node exists by that identifier with dependencies, it and its
+        dependencies will be instantiated and scanned for additional dependencies.
+        """
+        data = super(ModuleGraph, self).findNode(name)
+        if data is not None:
+            return data
+        if name in self.lazynodes:
+            deps = self.lazynodes.pop(name)
+            if deps is None:
+                # excluded module
+                m = self.createNode(ExcludedModule, name)
+            elif isinstance(deps, Alias):
+                other = self._safe_import_hook(deps, None, None).pop()
+                m = self.createNode(AliasNode, name, other)
+                self.implyNodeReference(m, other)
+
+            else:
+                m = self._safe_import_hook(name, None, None).pop()
+                for dep in deps:
+                    self.implyNodeReference(m, dep)
+            return m
+
+        if name in self.nspackages:
+            # name is a --single-version-externally-managed
+            # namespace package (setuptools/distribute)
+            pathnames = self.nspackages.pop(name)
+            m = self.createNode(NamespacePackage, name)
+
+            # FIXME: The filename must be set to a string to ensure that py2app
+            # works, it is not clear yet why that is. Setting to None would be
+            # cleaner.
+            m.filename = '-'
+            m.packagepath = _namespace_package_path(name, pathnames, self.path)
+
+            # As per comment at top of file, simulate runtime packagepath additions.
+            m.packagepath = m.packagepath + _packagePathMap.get(name, [])
+            return m
+
+        return None
+
+    def run_script(self, pathname, caller=None):
+        """
+        Create a node by path (not module name).  It is expected to be a Python
+        source file, and will be scanned for dependencies.
+        """
+        self.msg(2, "run_script", pathname)
+        pathname = os.path.realpath(pathname)
+        m = self.findNode(pathname)
+        if m is not None:
+            return m
+
+        if sys.version_info[0] != 2:
+            with open(pathname, 'rb') as fp:
+                encoding = util.guess_encoding(fp)
+
+            with open(pathname, _READ_MODE, encoding=encoding) as fp:
+                contents = fp.read() + '\n'
+
+        else:
+            with open(pathname, _READ_MODE) as fp:
+                contents = fp.read() + '\n'
+
+        co = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)
+        m = self.createNode(Script, pathname)
+        self._updateReference(caller, m, None)
+        self._scan_code(co, m)
+        m.code = compile(co, pathname, 'exec', 0, True)
+        if self.replace_paths:
+            m.code = self._replace_paths_in_code(m.code)
+        return m
+
+    def import_hook(self, name, caller=None, fromlist=None, level=DEFAULT_IMPORT_LEVEL, attr=None):
+        """
+        Import a module
+
+        Return the set of modules that are imported
+        """
+        self.msg(3, "import_hook", name, caller, fromlist, level)
+        parent = self._determine_parent(caller)
+        q, tail = self._find_head_package(parent, name, level)
+        m = self._load_tail(q, tail)
+        modules = [m]
+        if fromlist and m.packagepath:
+            for s in self._ensure_fromlist(m, fromlist):
+                if s not in modules:
+                    modules.append(s)
+        for m in modules:
+            self._updateReference(caller, m, edge_data=attr)
+        return modules
+
+    def _determine_parent(self, caller):
+        """
+        Determine the package containing a node
+        """
+        self.msgin(4, "determine_parent", caller)
+        parent = None
+        if caller:
+            pname = caller.identifier
+
+            if isinstance(caller, Package):
+                parent = caller
+
+            elif '.' in pname:
+                pname = pname[:pname.rfind('.')]
+                parent = self.findNode(pname)
+
+            elif caller.packagepath:
+                # XXX: I have no idea why this line
+                # is necessary.
+                parent = self.findNode(pname)
+
+
+        self.msgout(4, "determine_parent ->", parent)
+        return parent
+
+    def _find_head_package(self, parent, name, level=DEFAULT_IMPORT_LEVEL):
+        """
+        Given a calling parent package and an import name determine the containing
+        package for the name
+        """
+        self.msgin(4, "find_head_package", parent, name, level)
+        if '.' in name:
+            head, tail = name.split('.', 1)
+        else:
+            head, tail = name, ''
+
+        if level == -1:
+            if parent:
+                qname = parent.identifier + '.' + head
+            else:
+                qname = head
+
+        elif level == 0:
+            qname = head
+
+            # Absolute import, ignore the parent
+            parent = None
+
+        else:
+            if parent is None:
+                self.msg(2, "Relative import outside of package")
+                raise ImportError("Relative import outside of package (name=%r, parent=%r, level=%r)"%(name, parent, level))
+
+            for i in range(level-1):
+                if '.' not in parent.identifier:
+                    self.msg(2, "Relative import outside of package")
+                    raise ImportError("Relative import outside of package (name=%r, parent=%r, level=%r)"%(name, parent, level))
+
+                p_fqdn = parent.identifier.rsplit('.', 1)[0]
+                new_parent = self.findNode(p_fqdn)
+                if new_parent is None:
+                    self.msg(2, "Relative import outside of package")
+                    raise ImportError("Relative import outside of package (name=%r, parent=%r, level=%r)"%(name, parent, level))
+
+                assert new_parent is not parent, (new_parent, parent)
+                parent = new_parent
+
+            if head:
+                qname = parent.identifier + '.' + head
+            else:
+                qname = parent.identifier
+
+
+        q = self._import_module(head, qname, parent)
+        if q:
+            self.msgout(4, "find_head_package ->", (q, tail))
+            return q, tail
+        if parent:
+            qname = head
+            parent = None
+            q = self._import_module(head, qname, parent)
+            if q:
+                self.msgout(4, "find_head_package ->", (q, tail))
+                return q, tail
+        self.msgout(4, "raise ImportError: No module named", qname)
+        raise ImportError("No module named " + qname)
+
+    def _load_tail(self, mod, tail):
+        self.msgin(4, "load_tail", mod, tail)
+        result = mod
+        while tail:
+            i = tail.find('.')
+            if i < 0: i = len(tail)
+            head, tail = tail[:i], tail[i+1:]
+            mname = "%s.%s" % (result.identifier, head)
+            result = self._import_module(head, mname, result)
+            if result is None:
+                result = self.createNode(MissingModule, mname)
+                #self.msgout(4, "raise ImportError: No module named", mname)
+                #raise ImportError("No module named " + mname)
+        self.msgout(4, "load_tail ->", result)
+        return result
+
+    def _ensure_fromlist(self, m, fromlist):
+        fromlist = set(fromlist)
+        self.msg(4, "ensure_fromlist", m, fromlist)
+        if '*' in fromlist:
+            fromlist.update(self._find_all_submodules(m))
+            fromlist.remove('*')
+        for sub in fromlist:
+            submod = m.get(sub)
+            if submod is None:
+                if sub in m.globalnames:
+                    # Name is a global in the module
+                    continue
+                # XXX: ^^^ need something simular for names imported
+                #      by 'm'.
+
+                fullname = m.identifier + '.' + sub
+                submod = self._import_module(sub, fullname, m)
+                if submod is None:
+                    raise ImportError("No module named " + fullname)
+            yield submod
+
+    def _find_all_submodules(self, m):
+        if not m.packagepath:
+            return
+        # 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
+        # But we must also collect Python extension modules - although
+        # we cannot separate normal dlls from Python extensions.
+        suffixes = [triple[0] for triple in imp.get_suffixes()]
+        for path in m.packagepath:
+            try:
+                names = zipio.listdir(path)
+            except (os.error, IOError):
+                self.msg(2, "can't list directory", path)
+                continue
+            for info in (moduleInfoForPath(p) for p in names):
+                if info is None: continue
+                if info[0] != '__init__':
+                    yield info[0]
+
+    def _import_module(self, partname, fqname, parent):
+        # XXX: Review me for use with absolute imports.
+        self.msgin(3, "import_module", partname, fqname, parent)
+        m = self.findNode(fqname)
+        if m is not None:
+            self.msgout(3, "import_module ->", m)
+            if parent:
+                self._updateReference(m, parent, edge_data=DependencyInfo(
+                    conditional=False, fromlist=False, function=False, tryexcept=False
+                ))
+            return m
+
+        if parent and parent.packagepath is None:
+            self.msgout(3, "import_module -> None")
+            return None
+
+        try:
+            searchpath = None
+            if parent is not None and parent.packagepath:
+                searchpath = parent.packagepath
+
+            fp, pathname, stuff = self._find_module(partname,
+                searchpath, parent)
+
+        except ImportError:
+            self.msgout(3, "import_module ->", None)
+            return None
+
+        try:
+            m = self._load_module(fqname, fp, pathname, stuff)
+
+        finally:
+            if fp is not None:
+                fp.close()
+
+        if parent:
+            self.msgout(4, "create reference", m, "->", parent)
+            self._updateReference(m, parent, edge_data=DependencyInfo(
+                conditional=False, fromlist=False, function=False, tryexcept=False
+            ))
+            parent[partname] = m
+
+        self.msgout(3, "import_module ->", m)
+        return m
+
+    def _load_module(self, fqname, fp, pathname, info):
+        suffix, mode, typ = info
+        self.msgin(2, "load_module", fqname, fp and "fp", pathname)
+
+        if typ == imp.PKG_DIRECTORY:
+            if isinstance(mode, (list, tuple)):
+                packagepath = mode
+            else:
+                packagepath = []
+
+            m = self._load_package(fqname, pathname, packagepath)
+            self.msgout(2, "load_module ->", m)
+            return m
+
+        if typ == imp.PY_SOURCE:
+            contents = fp.read()
+            if isinstance(contents, bytes):
+                contents += b'\n'
+            else:
+                contents += '\n'
+
+            try:
+                co = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)
+                #co = compile(contents, pathname, 'exec', 0, True)
+            except SyntaxError:
+                co = None
+                cls = InvalidSourceModule
+
+            else:
+                cls = SourceModule
+
+        elif typ == imp.PY_COMPILED:
+            if fp.read(4) != imp.get_magic():
+                self.msgout(2, "raise ImportError: Bad magic number", pathname)
+                co = None
+                cls = InvalidCompiledModule
+
+            else:
+                fp.read(4)
+                try:
+                    co = marshal.loads(fp.read())
+                    cls = CompiledModule
+                except Exception:
+                    co = None
+                    cls = InvalidCompiledModule
+
+        elif typ == imp.C_BUILTIN:
+            cls = BuiltinModule
+            co = None
+
+        else:
+            cls = Extension
+            co = None
+
+        m = self.createNode(cls, fqname)
+        m.filename = pathname
+        if co is not None:
+            self._scan_code(co, m)
+
+            if isinstance(co, ast.AST):
+                co = compile(co, pathname, 'exec', 0, True)
+            if self.replace_paths:
+                co = self._replace_paths_in_code(co)
+            m.code = co
+
+
+        self.msgout(2, "load_module ->", m)
+        return m
+
+    def _safe_import_hook(self, name, caller, fromlist, level=DEFAULT_IMPORT_LEVEL, attr=None):
+        # wrapper for self.import_hook() that won't raise ImportError
+        try:
+            mods = self.import_hook(name, caller, level=level, attr=attr)
+        except ImportError as msg:
+            self.msg(2, "ImportError:", str(msg))
+            m = self.createNode(MissingModule, _path_from_importerror(msg, name))
+            self._updateReference(caller, m, edge_data=attr)
+
+        else:
+            assert len(mods) == 1
+            m = list(mods)[0]
+
+        subs = [m]
+        if isinstance(attr, DependencyInfo):
+            attr = attr._replace(fromlist=True)
+        for sub in (fromlist or ()):
+            # If this name is in the module namespace already,
+            # then add the entry to the list of substitutions
+            if sub in m:
+                sm = m[sub]
+                if sm is not None:
+                    if sm not in subs:
+                        self._updateReference(caller, sm, edge_data=attr)
+                        subs.append(sm)
+                    continue
+
+            elif sub in m.globalnames:
+                # Global variable in the module, ignore
+                continue
+
+
+            # See if we can load it
+            #    fullname = name + '.' + sub
+            fullname = m.identifier + '.' + sub
+            #else:
+            #    print("XXX", repr(name), repr(sub), repr(caller), repr(m))
+            sm = self.findNode(fullname)
+            if sm is None:
+                try:
+                    sm = self.import_hook(name, caller, fromlist=[sub], level=level, attr=attr)
+                except ImportError as msg:
+                    self.msg(2, "ImportError:", str(msg))
+                    #sm = self.createNode(MissingModule, _path_from_importerror(msg, fullname))
+                    sm = self.createNode(MissingModule, fullname)
+                else:
+                    sm = self.findNode(fullname)
+                    if sm is None:
+                        sm = self.createNode(MissingModule, fullname)
+
+            m[sub] = sm
+            if sm is not None:
+                self._updateReference(m, sm, edge_data=attr)
+                self._updateReference(caller, sm, edge_data=attr)
+                if sm not in subs:
+                    subs.append(sm)
+        return subs
+
+    def _scan_code(self, co, m):
+        if isinstance(co, ast.AST):
+            #return self._scan_bytecode(compile(co, '-', 'exec', 0, True), m)
+            self._scan_ast(co, m)
+            self._scan_bytecode_stores(
+                    compile(co, '-', 'exec', 0, True), m)
+
+        else:
+            self._scan_bytecode(co, m)
+
+    def _scan_ast(self, co, m):
+        visitor = _Visitor(self, m)
+        visitor.visit(co)
+
+    def _scan_bytecode_stores(self, co, m,
+            STORE_NAME=_Bchr(dis.opname.index('STORE_NAME')),
+            STORE_GLOBAL=_Bchr(dis.opname.index('STORE_GLOBAL')),
+            HAVE_ARGUMENT=_Bchr(dis.HAVE_ARGUMENT),
+            unpack=struct.unpack):
+
+        extended_import = bool(sys.version_info[:2] >= (2,5))
+
+        code = co.co_code
+        constants = co.co_consts
+        n = len(code)
+        i = 0
+
+        while i < n:
+            c = code[i]
+            i += 1
+            if c >= HAVE_ARGUMENT:
+                i = i+2
+
+            if c == STORE_NAME or c == STORE_GLOBAL:
+                # keep track of all global names that are assigned to
+                oparg = unpack('<H', code[i - 2:i])[0]
+                name = co.co_names[oparg]
+                m.globalnames.add(name)
+
+        cotype = type(co)
+        for c in constants:
+            if isinstance(c, cotype):
+                self._scan_bytecode_stores(c, m)
+
+    def _scan_bytecode(self, co, m,
+            HAVE_ARGUMENT=_Bchr(dis.HAVE_ARGUMENT),
+            LOAD_CONST=_Bchr(dis.opname.index('LOAD_CONST')),
+            IMPORT_NAME=_Bchr(dis.opname.index('IMPORT_NAME')),
+            IMPORT_FROM=_Bchr(dis.opname.index('IMPORT_FROM')),
+            STORE_NAME=_Bchr(dis.opname.index('STORE_NAME')),
+            STORE_GLOBAL=_Bchr(dis.opname.index('STORE_GLOBAL')),
+            unpack=struct.unpack):
+
+        # Python >=2.5: LOAD_CONST flags, LOAD_CONST names, IMPORT_NAME name
+        # Python < 2.5: LOAD_CONST names, IMPORT_NAME name
+        extended_import = bool(sys.version_info[:2] >= (2,5))
+
+        code = co.co_code
+        constants = co.co_consts
+        n = len(code)
+        i = 0
+
+        level = None
+        fromlist = None
+
+        while i < n:
+            c = code[i]
+            i += 1
+            if c >= HAVE_ARGUMENT:
+                i = i+2
+
+            if c == IMPORT_NAME:
+                if extended_import:
+                    assert code[i-9] == LOAD_CONST
+                    assert code[i-6] == LOAD_CONST
+                    arg1, arg2 = unpack('<xHxH', code[i-9:i-3])
+                    level = co.co_consts[arg1]
+                    fromlist = co.co_consts[arg2]
+                else:
+                    assert code[-6] == LOAD_CONST
+                    arg1, = unpack('<xH', code[i-6:i-3])
+                    level = -1
+                    fromlist = co.co_consts[arg1]
+
+                assert fromlist is None or type(fromlist) is tuple
+                oparg, = unpack('<H', code[i - 2:i])
+                name = co.co_names[oparg]
+                have_star = False
+                if fromlist is not None:
+                    fromlist = set(fromlist)
+                    if '*' in fromlist:
+                        fromlist.remove('*')
+                        have_star = True
+
+                #self.msgin(2, "Before import hook", repr(name), repr(m), repr(fromlist), repr(level))
+
+                imported_module = self._safe_import_hook(name, m, fromlist, level)[0]
+
+                if have_star:
+                    m.globalnames.update(imported_module.globalnames)
+                    m.starimports.update(imported_module.starimports)
+                    if imported_module.code is None:
+                        m.starimports.add(name)
+
+            elif c == STORE_NAME or c == STORE_GLOBAL:
+                # keep track of all global names that are assigned to
+                oparg = unpack('<H', code[i - 2:i])[0]
+                name = co.co_names[oparg]
+                m.globalnames.add(name)
+
+        cotype = type(co)
+        for c in constants:
+            if isinstance(c, cotype):
+                self._scan_bytecode(c, m)
+
+    def _load_package(self, fqname, pathname, pkgpath):
+        """
+        Called only when an imp.PACKAGE_DIRECTORY is found
+        """
+        self.msgin(2, "load_package", fqname, pathname, pkgpath)
+        newname = _replacePackageMap.get(fqname)
+        if newname:
+            fqname = newname
+
+        ns_pkgpath = _namespace_package_path(fqname, pkgpath or [], self.path)
+        if ns_pkgpath or pkgpath:
+            # this is a namespace package
+            m = self.createNode(NamespacePackage, fqname)
+            m.filename = '-'
+            m.packagepath = ns_pkgpath
+        else:
+            m = self.createNode(Package, fqname)
+            m.filename = pathname
+            m.packagepath = [pathname] + ns_pkgpath
+
+        # As per comment at top of file, simulate runtime packagepath additions.
+        m.packagepath = m.packagepath + _packagePathMap.get(fqname, [])
+
+
+
+        try:
+            self.msg(2, "find __init__ for %s"%(m.packagepath,))
+            fp, buf, stuff = self._find_module("__init__", m.packagepath, parent=m)
+        except ImportError:
+            pass
+
+        else:
+            try:
+                self.msg(2, "load __init__ for %s"%(m.packagepath,))
+                self._load_module(fqname, fp, buf, stuff)
+            finally:
+                if fp is not None:
+                    fp.close()
+        self.msgout(2, "load_package ->", m)
+        return m
+
+    def _find_module(self, name, path, parent=None):
+        if parent is not None:
+            # assert path is not None
+            fullname = parent.identifier + '.' + name
+        else:
+            fullname = name
+
+        node = self.findNode(fullname)
+        if node is not None:
+            self.msgout(3, "find_module -> already included?", node)
+            raise ImportError(name)
+
+        if path is None:
+            if name in sys.builtin_module_names:
+                return (None, None, ("", "", imp.C_BUILTIN))
+
+            path = self.path
+
+        fp, buf, stuff = find_module(name, path)
+        try:
+            if buf:
+                buf = os.path.realpath(buf)
+
+            return (fp, buf, stuff)
+        except:
+            fp.close()
+            raise
+
+    def create_xref(self, out=None):
+        global header, footer, entry, contpl, contpl_linked, imports
+        if out is None:
+            out = sys.stdout
+        scripts = []
+        mods = []
+        for mod in self.flatten():
+            name = os.path.basename(mod.identifier)
+            if isinstance(mod, Script):
+                scripts.append((name, mod))
+            else:
+                mods.append((name, mod))
+        scripts.sort()
+        mods.sort()
+        scriptnames = [name for name, m in scripts]
+        scripts.extend(mods)
+        mods = scripts
+
+        title = "modulegraph cross reference for "  + ', '.join(scriptnames)
+        print(header % {"TITLE": title}, file=out)
+
+        def sorted_namelist(mods):
+            lst = [os.path.basename(mod.identifier) for mod in mods if mod]
+            lst.sort()
+            return lst
+        for name, m in mods:
+            content = ""
+            if isinstance(m, BuiltinModule):
+                content = contpl % {"NAME": name,
+                                    "TYPE": "<i>(builtin module)</i>"}
+            elif isinstance(m, Extension):
+                content = contpl % {"NAME": name,\
+                                    "TYPE": "<tt>%s</tt>" % m.filename}
+            else:
+                url = pathname2url(m.filename or "")
+                content = contpl_linked % {"NAME": name, "URL": url}
+            oute, ince = map(sorted_namelist, self.get_edges(m))
+            if oute:
+                links = ""
+                for n in oute:
+                    links += """  <a href="#%s">%s</a>\n""" % (n, n)
+                content += imports % {"HEAD": "imports", "LINKS": links}
+            if ince:
+                links = ""
+                for n in ince:
+                    links += """  <a href="#%s">%s</a>\n""" % (n, n)
+                content += imports % {"HEAD": "imported by", "LINKS": links}
+            print(entry % {"NAME": name,"CONTENT": content}, file=out)
+        print(footer, file=out)
+
+
+    def itergraphreport(self, name='G', flatpackages=()):
+        # XXX: Can this be implemented using Dot()?
+        nodes = map(self.graph.describe_node, self.graph.iterdfs(self))
+        describe_edge = self.graph.describe_edge
+        edges = deque()
+        packagenodes = set()
+        packageidents = {}
+        nodetoident = {}
+        inpackages = {}
+        mainedges = set()
+
+        # XXX - implement
+        flatpackages = dict(flatpackages)
+
+        def nodevisitor(node, data, outgoing, incoming):
+            if not isinstance(data, Node):
+                return {'label': str(node)}
+            #if isinstance(d, (ExcludedModule, MissingModule, BadModule)):
+            #    return None
+            s = '<f0> ' + type(data).__name__
+            for i,v in enumerate(data.infoTuple()[:1], 1):
+                s += '| <f%d> %s' % (i,v)
+            return {'label':s, 'shape':'record'}
+
+
+        def edgevisitor(edge, data, head, tail):
+            # XXX: This method nonsense, the edge
+            # data is never initialized.
+            if data == 'orphan':
+                return {'style':'dashed'}
+            elif data == 'pkgref':
+                return {'style':'dotted'}
+            return {}
+
+        yield 'digraph %s {\n' % (name,)
+        attr = dict(rankdir='LR', concentrate='true')
+        cpatt  = '%s="%s"'
+        for item in attr.items():
+            yield '\t%s;\n' % (cpatt % item,)
+
+        # find all packages (subgraphs)
+        for (node, data, outgoing, incoming) in nodes:
+            nodetoident[node] = getattr(data, 'identifier', None)
+            if isinstance(data, Package):
+                packageidents[data.identifier] = node
+                inpackages[node] = set([node])
+                packagenodes.add(node)
+
+
+        # create sets for subgraph, write out descriptions
+        for (node, data, outgoing, incoming) in nodes:
+            # update edges
+            for edge in (describe_edge(e) for e in outgoing):
+                edges.append(edge)
+
+            # describe node
+            yield '\t"%s" [%s];\n' % (
+                node,
+                ','.join([
+                    (cpatt % item) for item in
+                    nodevisitor(node, data, outgoing, incoming).items()
+                ]),
+            )
+
+            inside = inpackages.get(node)
+            if inside is None:
+                inside = inpackages[node] = set()
+            ident = nodetoident[node]
+            if ident is None:
+                continue
+            pkgnode = packageidents.get(ident[:ident.rfind('.')])
+            if pkgnode is not None:
+                inside.add(pkgnode)
+
+
+        graph = []
+        subgraphs = {}
+        for key in packagenodes:
+            subgraphs[key] = []
+
+        while edges:
+            edge, data, head, tail = edges.popleft()
+            if ((head, tail)) in mainedges:
+                continue
+            mainedges.add((head, tail))
+            tailpkgs = inpackages[tail]
+            common = inpackages[head] & tailpkgs
+            if not common and tailpkgs:
+                usepkgs = sorted(tailpkgs)
+                if len(usepkgs) != 1 or usepkgs[0] != tail:
+                    edges.append((edge, data, head, usepkgs[0]))
+                    edges.append((edge, 'pkgref', usepkgs[-1], tail))
+                    continue
+            if common:
+                common = common.pop()
+                if tail == common:
+                    edges.append((edge, data, tail, head))
+                elif head == common:
+                    subgraphs[common].append((edge, 'pkgref', head, tail))
+                else:
+                    edges.append((edge, data, common, head))
+                    edges.append((edge, data, common, tail))
+
+            else:
+                graph.append((edge, data, head, tail))
+
+        def do_graph(edges, tabs):
+            edgestr = tabs + '"%s" -> "%s" [%s];\n'
+            # describe edge
+            for (edge, data, head, tail) in edges:
+                attribs = edgevisitor(edge, data, head, tail)
+                yield edgestr % (
+                    head,
+                    tail,
+                    ','.join([(cpatt % item) for item in attribs.items()]),
+                )
+
+        for g, edges in subgraphs.items():
+            yield '\tsubgraph "cluster_%s" {\n' % (g,)
+            yield '\t\tlabel="%s";\n' % (nodetoident[g],)
+            for s in do_graph(edges, '\t\t'):
+                yield s
+            yield '\t}\n'
+
+        for s in do_graph(graph, '\t'):
+            yield s
+
+        yield '}\n'
+
+
+    def graphreport(self, fileobj=None, flatpackages=()):
+        if fileobj is None:
+            fileobj = sys.stdout
+        fileobj.writelines(self.itergraphreport(flatpackages=flatpackages))
+
+    def report(self):
+        """Print a report to stdout, listing the found modules with their
+        paths, as well as modules that are missing, or seem to be missing.
+        """
+        print()
+        print("%-15s %-25s %s" % ("Class", "Name", "File"))
+        print("%-15s %-25s %s" % ("-----", "----", "----"))
+        # Print modules found
+        sorted = [(os.path.basename(mod.identifier), mod) for mod in self.flatten()]
+        sorted.sort()
+        for (name, m) in sorted:
+            print("%-15s %-25s %s" % (type(m).__name__, name, m.filename or ""))
+
+    def _replace_paths_in_code(self, co):
+        new_filename = original_filename = os.path.normpath(co.co_filename)
+        for f, r in self.replace_paths:
+            f = os.path.join(f, '')
+            r = os.path.join(r, '')
+            if original_filename.startswith(f):
+                new_filename = r + original_filename[len(f):]
+                break
+
+        else:
+            return co
+
+        consts = list(co.co_consts)
+        for i in range(len(consts)):
+            if isinstance(consts[i], type(co)):
+                consts[i] = self._replace_paths_in_code(consts[i])
+
+        code_func = type(co)
+
+        if hasattr(co, 'co_kwonlyargcount'):
+            return code_func(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize,
+                         co.co_flags, co.co_code, tuple(consts), co.co_names,
+                         co.co_varnames, new_filename, co.co_name,
+                         co.co_firstlineno, co.co_lnotab,
+                         co.co_freevars, co.co_cellvars)
+        else:
+            return code_func(co.co_argcount, co.co_nlocals, co.co_stacksize,
+                         co.co_flags, co.co_code, tuple(consts), co.co_names,
+                         co.co_varnames, new_filename, co.co_name,
+                         co.co_firstlineno, co.co_lnotab,
+                         co.co_freevars, co.co_cellvars)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/util.py b/catapult/telemetry/third_party/modulegraph/modulegraph/util.py
new file mode 100644
index 0000000..acf6bc1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/util.py
@@ -0,0 +1,119 @@
+from __future__ import absolute_import
+
+import os
+import imp
+import sys
+import re
+import marshal
+import warnings
+
+try:
+    unicode
+except NameError:
+    unicode = str
+
+
+if sys.version_info[0] == 2:
+    from StringIO import StringIO as BytesIO
+    from StringIO import StringIO
+
+else:
+    from io import BytesIO, StringIO
+
+
+
+def imp_find_module(name, path=None):
+    """
+    same as imp.find_module, but handles dotted names
+    """
+    names = name.split('.')
+    if path is not None:
+        if isinstance(path, (str, unicode)):
+            path = [os.path.realpath(path)]
+    for name in names:
+        result = imp.find_module(name, path)
+        if result[0] is not None:
+            result[0].close()
+        path = [result[1]]
+    return result
+
+def _check_importer_for_path(name, path_item):
+    try:
+        importer = sys.path_importer_cache[path_item]
+    except KeyError:
+        for path_hook in sys.path_hooks:
+            try:
+                importer = path_hook(path_item)
+                break
+            except ImportError:
+                pass
+        else:
+            importer = None
+        sys.path_importer_cache.setdefault(path_item, importer)
+
+
+    if importer is None:
+        try:
+            return imp.find_module(name, [path_item])
+        except ImportError:
+            return None
+    return importer.find_module(name)
+
+def imp_walk(name):
+    """
+    yields namepart, tuple_or_importer for each path item
+
+    raise ImportError if a name can not be found.
+    """
+    warnings.warn("imp_walk will be removed in a future version", DeprecationWarning)
+
+    if name in sys.builtin_module_names:
+        yield name, (None, None, ("", "", imp.C_BUILTIN))
+        return
+    paths = sys.path
+    res = None
+    for namepart in name.split('.'):
+        for path_item in paths:
+            res = _check_importer_for_path(namepart, path_item)
+            if hasattr(res, 'load_module'):
+                if res.path.endswith('.py') or res.path.endswith('.pyw'):
+                    fp = StringIO(res.get_source(namepart))
+                    res = (fp, res.path, ('.py', 'rU', imp.PY_SOURCE))
+                elif res.path.endswith('.pyc') or res.path.endswith('.pyo'):
+                    co  = res.get_code(namepart)
+                    fp = BytesIO(imp.get_magic() + b'\0\0\0\0' + marshal.dumps(co))
+                    res = (fp, res.path, ('.pyc', 'rb', imp.PY_COMPILED))
+
+                else:
+                    res = (None, loader.path, (os.path.splitext(loader.path)[-1], 'rb', imp.C_EXTENSION))
+
+                break
+            elif isinstance(res, tuple):
+                break
+        else:
+            break
+
+        yield namepart, res
+        paths = [os.path.join(path_item, namepart)]
+    else:
+        return
+
+    raise ImportError('No module named %s' % (name,))
+
+
+cookie_re = re.compile(b"coding[:=]\s*([-\w.]+)")
+if sys.version_info[0] == 2:
+    default_encoding = 'ascii'
+else:
+    default_encoding = 'utf-8'
+
+def guess_encoding(fp):
+
+    for i in range(2):
+        ln = fp.readline()
+
+        m = cookie_re.search(ln)
+        if m is not None:
+            return m.group(1).decode('ascii')
+
+    return default_encoding
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph/zipio.py b/catapult/telemetry/third_party/modulegraph/modulegraph/zipio.py
new file mode 100644
index 0000000..34d580e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph/zipio.py
@@ -0,0 +1,426 @@
+"""
+A helper module that can work with paths
+that can refer to data inside a zipfile
+
+XXX: Need to determine if isdir("zipfile.zip")
+should return True or False. Currently returns
+True, but that might do the wrong thing with
+data-files that are zipfiles.
+"""
+import os as _os
+import zipfile as _zipfile
+import errno as _errno
+import time as _time
+import sys as _sys
+import stat as _stat
+
+_DFLT_DIR_MODE = (
+      _stat.S_IFDIR
+    | _stat.S_IXOTH
+    | _stat.S_IXGRP
+    | _stat.S_IXUSR
+    | _stat.S_IROTH
+    | _stat.S_IRGRP
+    | _stat.S_IRUSR)
+
+_DFLT_FILE_MODE = (
+      _stat.S_IFREG
+    | _stat.S_IROTH
+    | _stat.S_IRGRP
+    | _stat.S_IRUSR)
+
+
+if _sys.version_info[0] == 2:
+    from  StringIO import StringIO as _BaseStringIO
+    from  StringIO import StringIO as _BaseBytesIO
+
+    class _StringIO (_BaseStringIO):
+        def __enter__(self):
+            return self
+
+        def __exit__(self, exc_type, exc_value, traceback):
+            self.close()
+            return False
+
+    class _BytesIO (_BaseBytesIO):
+        def __enter__(self):
+            return self
+
+        def __exit__(self, exc_type, exc_value, traceback):
+            self.close()
+            return False
+
+else:
+    from io import StringIO as _StringIO
+    from io import BytesIO as _BytesIO
+
+
+
+
+def _locate(path):
+    full_path = path
+    if _os.path.exists(path):
+        return path, None
+
+    else:
+        rest = []
+        root = _os.path.splitdrive(path)
+        while path and path != root:
+            path, bn = _os.path.split(path)
+            rest.append(bn)
+            if _os.path.exists(path):
+                break
+
+        if path == root:
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        if not _os.path.isfile(path):
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        rest.reverse()
+        return path, '/'.join(rest).strip('/')
+
+_open = open
+def open(path, mode='r'):
+    if 'w' in mode or 'a' in mode:
+        raise IOError(
+            _errno.EINVAL, path, "Write access not supported")
+    elif 'r+' in mode:
+        raise IOError(
+            _errno.EINVAL, path, "Write access not supported")
+
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        return _open(path, mode)
+
+    else:
+        try:
+            zf = _zipfile.ZipFile(path, 'r')
+
+        except _zipfile.error:
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        try:
+            data = zf.read(rest)
+        except (_zipfile.error, KeyError):
+            zf.close()
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+        zf.close()
+
+        if mode == 'rb':
+            return _BytesIO(data)
+
+        else:
+            if _sys.version_info[0] == 3:
+                data = data.decode('ascii')
+
+            return _StringIO(data)
+
+def listdir(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest and not _os.path.isfile(path):
+        return _os.listdir(path)
+
+    else:
+        try:
+            zf = _zipfile.ZipFile(path, 'r')
+
+        except _zipfile.error:
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        result = set()
+        seen = False
+        try:
+            for nm in zf.namelist():
+                if rest is None:
+                    seen = True
+                    value = nm.split('/')[0]
+                    if value:
+                        result.add(value)
+
+                elif nm.startswith(rest):
+                    if nm == rest:
+                        seen = True
+                        value = ''
+                        pass
+                    elif nm[len(rest)] == '/':
+                        seen = True
+                        value = nm[len(rest)+1:].split('/')[0]
+                    else:
+                        value = None
+
+                    if value:
+                        result.add(value)
+        except _zipfile.error:
+            zf.close()
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        zf.close()
+
+        if not seen:
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        return list(result)
+
+def isfile(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        ok =  _os.path.isfile(path)
+        if ok:
+            try:
+                zf = _zipfile.ZipFile(path, 'r')
+                return False
+            except (_zipfile.error, IOError):
+                return True
+        return False
+
+    zf = None
+    try:
+        zf = _zipfile.ZipFile(path, 'r')
+        info = zf.getinfo(rest)
+        zf.close()
+        return True
+    except (KeyError, _zipfile.error):
+        if zf is not None:
+            zf.close()
+
+        # Check if this is a directory
+        try:
+            info = zf.getinfo(rest + '/')
+        except KeyError:
+            pass
+        else:
+            return False
+
+        rest = rest + '/'
+        for nm in zf.namelist():
+            if nm.startswith(rest):
+                # Directory
+                return False
+
+        # No trace in zipfile
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+
+
+
+
+def isdir(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        ok =  _os.path.isdir(path)
+        if not ok:
+            try:
+                zf = _zipfile.ZipFile(path, 'r')
+            except (_zipfile.error, IOError):
+                return False
+            return True
+        return True
+
+    zf = None
+    try:
+        try:
+            zf = _zipfile.ZipFile(path)
+        except _zipfile.error:
+            raise IOError(
+                _errno.ENOENT, full_path,
+                "No such file or directory")
+
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+        else:
+            # File found
+            return False
+
+        rest = rest + '/'
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+        else:
+            # Directory entry found
+            return True
+
+        for nm in zf.namelist():
+            if nm.startswith(rest):
+                return True
+
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+    finally:
+        if zf is not None:
+            zf.close()
+
+
+def islink(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        return _os.path.islink(path)
+
+    try:
+        zf = _zipfile.ZipFile(path)
+    except _zipfile.error:
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+    try:
+
+
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+        else:
+            # File
+            return False
+
+        rest += '/'
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+        else:
+            # Directory
+            return False
+
+        for nm in zf.namelist():
+            if nm.startswith(rest):
+                # Directory without listing
+                return False
+
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+
+    finally:
+        zf.close()
+
+
+def readlink(path):
+    full_path = path
+    path, rest = _locate(path)
+    if rest:
+        # No symlinks inside zipfiles
+        raise OSError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+
+    return _os.readlink(path)
+
+def getmode(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        return _os.stat(path).st_mode
+
+    zf = None
+    try:
+        zf = _zipfile.ZipFile(path)
+        info = None
+
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+
+        if info is None:
+            try:
+                info = zf.getinfo(rest + '/')
+            except KeyError:
+                pass
+
+        if info is None:
+            rest = rest + '/'
+            for nm in zf.namelist():
+                if nm.startswith(rest):
+                    break
+            else:
+                raise IOError(
+                    _errno.ENOENT, full_path,
+                    "No such file or directory")
+
+            # Directory exists, but has no entry of its own.
+            return _DFLT_DIR_MODE
+
+        # The mode is stored without file-type in external_attr.
+        if (info.external_attr >> 16) != 0:
+            return _stat.S_IFREG | (info.external_attr >> 16)
+        else:
+            return _DFLT_FILE_MODE
+
+
+    except KeyError:
+        if zf is not None:
+            zf.close()
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
+
+def getmtime(path):
+    full_path = path
+    path, rest = _locate(path)
+    if not rest:
+        return _os.path.getmtime(path)
+
+    zf = None
+    try:
+        zf = _zipfile.ZipFile(path)
+        info = None
+
+        try:
+            info = zf.getinfo(rest)
+        except KeyError:
+            pass
+
+        if info is None:
+            try:
+                info = zf.getinfo(rest + '/')
+            except KeyError:
+                pass
+
+        if info is None:
+            rest = rest + '/'
+            for nm in zf.namelist():
+                if nm.startswith(rest):
+                    break
+            else:
+                raise IOError(
+                    _errno.ENOENT, full_path,
+                    "No such file or directory")
+
+            # Directory exists, but has no entry of its
+            # own, fake mtime by using the timestamp of
+            # the zipfile itself.
+            return _os.path.getmtime(path)
+
+        return _time.mktime(info.date_time + (0, 0, -1))
+
+    except KeyError:
+        if zf is not None:
+            zf.close()
+        raise IOError(
+            _errno.ENOENT, full_path,
+            "No such file or directory")
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/__init__.py
new file mode 100644
index 0000000..3e9f9ed
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/__init__.py
@@ -0,0 +1 @@
+""" modulegraph tests """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_basic.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_basic.py
new file mode 100644
index 0000000..387fde9
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_basic.py
@@ -0,0 +1,43 @@
+import unittest
+
+import os, shutil
+
+from modulegraph import modulegraph
+
+class DummyModule(object):
+    packagepath = None
+    def __init__(self, ppath):
+        self.packagepath = ppath
+
+class FindAllSubmodulesTestCase(unittest.TestCase):
+    def testNone(self):
+        mg = modulegraph.ModuleGraph()
+        # empty packagepath
+        m = DummyModule(None)
+        sub_ms = []
+        for sm in mg._find_all_submodules(m):
+            sub_ms.append(sm)
+        self.assertEqual(sub_ms, [])
+
+    def testSimple(self):
+        mg = modulegraph.ModuleGraph()
+        # a string does not break anything although it is split into its characters
+        # BUG: "/hi/there" will read "/"
+        m = DummyModule("xyz")
+        sub_ms = []
+        for sm in mg._find_all_submodules(m):
+            sub_ms.append(sm)
+        self.assertEqual(sub_ms, [])
+
+    def testSlashes(self):
+        # a string does not break anything although it is split into its characters
+        # BUG: "/xyz" will read "/" so this one already triggers missing itertools
+        mg = modulegraph.ModuleGraph()
+        m = DummyModule("/xyz")
+        sub_ms = []
+        for sm in mg._find_all_submodules(m):
+            sub_ms.append(sm)
+        self.assertEqual(sub_ms, [])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_edge_data.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_edge_data.py
new file mode 100644
index 0000000..0760894
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_edge_data.py
@@ -0,0 +1,417 @@
+import os
+import sys
+if sys.version_info[:2] <= (2,6):
+    import unittest2 as unittest
+else:
+    import unittest
+
+from modulegraph import modulegraph
+
+
+# XXX: Todo: simular tests with bytecompiled modules
+
+
+class TestEdgeData (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def test_regular_import(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-edgedata')
+        mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        script_name = os.path.join(root, 'script.py')
+        mf.run_script(script_name)
+
+        script_node = mf.findNode(script_name)
+        self.assertIsInstance(script_node, modulegraph.Script)
+
+
+        node = mf.findNode('toplevel_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_class_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_class_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_conditional_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('toplevel_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_class_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_class_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_conditional_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_conditional_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('function_conditional_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_conditional_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_conditional_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_conditional_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('function_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=False))
+
+
+    def test_multi_import(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-edgedata')
+        mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        script_name = os.path.join(root, 'script_multi_import.py')
+        mf.run_script(script_name)
+
+        script_node = mf.findNode(script_name)
+        self.assertIsInstance(script_node, modulegraph.Script)
+
+
+        node = mf.findNode('os.path')
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=False))
+
+        node = mf.findNode('os')
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('sys')
+        ed = mf.edgeData(script_node, node)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('platform')
+        ed = mf.edgeData(script_node, node)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=False))
+
+        node = mf.findNode('email')
+        ed = mf.edgeData(script_node, node)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=False))
+
+    def test_from_imports(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-edgedata')
+        mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        script_name = os.path.join(root, 'script_from_import.py')
+        mf.run_script(script_name)
+
+        script_node = mf.findNode(script_name)
+        self.assertIsInstance(script_node, modulegraph.Script)
+
+
+        node = mf.findNode('pkg.toplevel_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_class_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_class_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_conditional_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.toplevel_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=False, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_class_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_class_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=False, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_conditional_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=True, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_import_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_import_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_import2_existing')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=True))
+
+        node = mf.findNode('pkg.function_import2_nonexisting')
+        self.assertIsInstance(node, modulegraph.MissingModule)
+        ed = mf.edgeData(script_node, node)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(conditional=False, function=True, tryexcept=True, fromlist=True))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_explicit_packages.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_explicit_packages.py
new file mode 100644
index 0000000..a964e4f
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_explicit_packages.py
@@ -0,0 +1,51 @@
+from __future__ import absolute_import
+import unittest
+
+import os, shutil, sys
+
+from modulegraph import find_modules
+from modulegraph import modulegraph
+
+
+class PackagesTestCase (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, object, types, message=None):
+            self.assertTrue(isinstance(object, types),
+                    message or '%r is not an instance of %r'%(object, types))
+
+    def testIncludePackage(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-packages')
+
+        mf = find_modules.find_modules(
+                path=[root]+sys.path,
+                scripts=[os.path.join(root, "main_script.py")],
+                packages=['pkg'],
+                debug=1)
+
+        node = mf.findNode('pkg')
+        self.assertIsInstance(node, modulegraph.Package)
+
+        node = mf.findNode('pkg.sub3')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+    def testIncludePackageWithExclude(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-packages')
+
+        mf = find_modules.find_modules(
+                path=[root]+sys.path,
+                scripts=[os.path.join(root, "main_script.py")],
+                packages=['pkg'],
+                excludes=['pkg.sub3'])
+
+        node = mf.findNode('pkg')
+        self.assertIsInstance(node, modulegraph.Package)
+
+        node = mf.findNode('pkg.sub3')
+        self.assertIsInstance(node, modulegraph.ExcludedModule)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_implies.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_implies.py
new file mode 100644
index 0000000..71be6a9
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_implies.py
@@ -0,0 +1,78 @@
+import unittest
+
+import os, shutil, sys
+
+from modulegraph import modulegraph
+
+class ImpliesTestCase(unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, object, types, message=None):
+            self.assertTrue(isinstance(object, types),
+                    message or '%r is not an instance of %r'%(object, types))
+
+    def testBasicImplies(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-relimport')
+
+        # First check that 'syslog' isn't accidently in the graph:
+        mg = modulegraph.ModuleGraph(path=[root]+sys.path)
+        mg.run_script(os.path.join(root, 'script.py'))
+        node = mg.findNode('mod')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = mg.findNode('syslog')
+        self.assertEqual(node, None)
+
+        # Now check that adding an implied dependency actually adds
+        # 'syslog' to the graph:
+        mg = modulegraph.ModuleGraph(path=[root]+sys.path, implies={
+            'mod': ['syslog']})
+        self.assertEqual(node, None)
+        mg.run_script(os.path.join(root, 'script.py'))
+        node = mg.findNode('mod')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = mg.findNode('syslog')
+        self.assertIsInstance(node, modulegraph.Extension)
+
+        # Check that the edges are correct:
+        self.assertTrue(mg.findNode('mod') in mg.get_edges(node)[1])
+        self.assertTrue(node in mg.get_edges(mg.findNode('mod'))[0])
+
+    def testPackagedImplies(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-relimport')
+
+        # First check that 'syslog' isn't accidently in the graph:
+        mg = modulegraph.ModuleGraph(path=[root]+sys.path)
+        mg.run_script(os.path.join(root, 'script.py'))
+        node = mg.findNode('mod')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = mg.findNode('syslog')
+        self.assertEqual(node, None)
+
+
+        # Now check that adding an implied dependency actually adds
+        # 'syslog' to the graph:
+        mg = modulegraph.ModuleGraph(path=[root]+sys.path, implies={
+            'pkg.relative': ['syslog']})
+        node = mg.findNode('syslog')
+        self.assertEqual(node, None)
+
+        mg.run_script(os.path.join(root, 'script.py'))
+        node = mg.findNode('pkg.relative')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = mg.findNode('syslog')
+        self.assertIsInstance(node, modulegraph.Extension)
+
+        # Check that the edges are correct:
+        self.assertTrue(mg.findNode('pkg.relative') in mg.get_edges(node)[1])
+        self.assertTrue(node in mg.get_edges(mg.findNode('pkg.relative'))[0])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_import_from_init.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_import_from_init.py
new file mode 100644
index 0000000..f1333a1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_import_from_init.py
@@ -0,0 +1,128 @@
+import sys
+if sys.version_info[:2] <= (2,6):
+    import unittest2 as unittest
+else:
+    import unittest
+import textwrap
+import subprocess
+import os
+from modulegraph import modulegraph
+
+class TestNativeImport (unittest.TestCase):
+    # The tests check that Python's import statement
+    # works as these tests expect.
+
+    def importModule(self, name):
+        if '.' in name:
+            script = textwrap.dedent("""\
+                try:
+                    import %s
+                except ImportError:
+                    import %s
+                print (%s.__name__)
+            """) %(name, name.rsplit('.', 1)[0], name)
+        else:
+            script = textwrap.dedent("""\
+                import %s
+                print (%s.__name__)
+            """) %(name, name)
+
+        p = subprocess.Popen([sys.executable, '-c', script],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                cwd=os.path.join(
+                    os.path.dirname(os.path.abspath(__file__)),
+                    'testpkg-import-from-init'),
+        )
+        data = p.communicate()[0]
+        if sys.version_info[0] != 2:
+            data = data.decode('UTF-8')
+        data = data.strip()
+
+        if data.endswith(' refs]'):
+            # with --with-pydebug builds
+            data = data.rsplit('\n', 1)[0].strip()
+
+        sts = p.wait()
+
+        if sts != 0:
+            print (data)
+        self.assertEqual(sts, 0)
+        return data
+
+
+    @unittest.skipUnless(sys.version_info[0] == 2, "Python 2.x test")
+    def testRootPkg(self):
+        m = self.importModule('pkg')
+        self.assertEqual(m, 'pkg')
+
+    @unittest.skipUnless(sys.version_info[0] == 2, "Python 2.x test")
+    def testSubPackage(self):
+        m = self.importModule('pkg.subpkg')
+        self.assertEqual(m, 'pkg.subpkg')
+
+    def testRootPkgRelImport(self):
+        m = self.importModule('pkg2')
+        self.assertEqual(m, 'pkg2')
+
+    def testSubPackageRelImport(self):
+        m = self.importModule('pkg2.subpkg')
+        self.assertEqual(m, 'pkg2.subpkg')
+
+
+class TestModuleGraphImport (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-import-from-init')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        #self.mf.debug = 999
+        self.mf.run_script(os.path.join(root, 'script.py'))
+
+
+    @unittest.skipUnless(sys.version_info[0] == 2, "Python 2.x test")
+    def testRootPkg(self):
+        node = self.mf.findNode('pkg')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertEqual(node.identifier, 'pkg')
+
+    @unittest.skipUnless(sys.version_info[0] == 2, "Python 2.x test")
+    def testSubPackage(self):
+        node = self.mf.findNode('pkg.subpkg')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertEqual(node.identifier, 'pkg.subpkg')
+
+        node = self.mf.findNode('pkg.subpkg.compat')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.subpkg.compat')
+
+        node = self.mf.findNode('pkg.subpkg._collections')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.subpkg._collections')
+
+    def testRootPkgRelImport(self):
+        node = self.mf.findNode('pkg2')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertEqual(node.identifier, 'pkg2')
+
+    def testSubPackageRelImport(self):
+        node = self.mf.findNode('pkg2.subpkg')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertEqual(node.identifier, 'pkg2.subpkg')
+
+        node = self.mf.findNode('pkg2.subpkg.compat')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg2.subpkg.compat')
+
+        node = self.mf.findNode('pkg2.subpkg._collections')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg2.subpkg._collections')
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_imports.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_imports.py
new file mode 100644
index 0000000..8cdcfa7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_imports.py
@@ -0,0 +1,473 @@
+"""
+Test for import machinery
+"""
+import unittest
+import sys
+import textwrap
+import subprocess
+import os
+from modulegraph import modulegraph
+
+class TestNativeImport (unittest.TestCase):
+    # The tests check that Python's import statement
+    # works as these tests expect.
+
+    def importModule(self, name):
+        if '.' in name:
+            script = textwrap.dedent("""\
+                try:
+                    import %s
+                except ImportError:
+                    import %s
+                print (%s.__name__)
+            """) %(name, name.rsplit('.', 1)[0], name)
+        else:
+            script = textwrap.dedent("""\
+                import %s
+                print (%s.__name__)
+            """) %(name, name)
+
+        p = subprocess.Popen([sys.executable, '-c', script],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                cwd=os.path.join(
+                    os.path.dirname(os.path.abspath(__file__)),
+                    'testpkg-relimport'),
+        )
+        data = p.communicate()[0]
+        if sys.version_info[0] != 2:
+            data = data.decode('UTF-8')
+        data = data.strip()
+
+        if data.endswith(' refs]'):
+            # with --with-pydebug builds
+            data = data.rsplit('\n', 1)[0].strip()
+
+        sts = p.wait()
+
+        if sts != 0:
+            print (data)
+        self.assertEqual(sts, 0)
+        return data
+
+
+    def testRootModule(self):
+        m = self.importModule('mod')
+        self.assertEqual(m, 'mod')
+
+    def testRootPkg(self):
+        m = self.importModule('pkg')
+        self.assertEqual(m, 'pkg')
+
+    def testSubModule(self):
+        m = self.importModule('pkg.mod')
+        self.assertEqual(m, 'pkg.mod')
+
+    if sys.version_info[0] == 2:
+        def testOldStyle(self):
+            m = self.importModule('pkg.oldstyle.mod')
+            self.assertEqual(m, 'pkg.mod')
+    else:
+        # python3 always has __future__.absolute_import
+        def testOldStyle(self):
+            m = self.importModule('pkg.oldstyle.mod')
+            self.assertEqual(m, 'mod')
+
+    def testNewStyle(self):
+        m = self.importModule('pkg.toplevel.mod')
+        self.assertEqual(m, 'mod')
+
+    def testRelativeImport(self):
+        m = self.importModule('pkg.relative.mod')
+        self.assertEqual(m, 'pkg.mod')
+
+        m = self.importModule('pkg.subpkg.relative.mod')
+        self.assertEqual(m, 'pkg.mod')
+
+        m = self.importModule('pkg.subpkg.mod2.mod')
+        self.assertEqual(m, 'pkg.sub2.mod')
+
+        m = self.importModule('pkg.subpkg.relative2')
+        self.assertEqual(m, 'pkg.subpkg.relative2')
+
+class TestModuleGraphImport (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-relimport')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        #self.mf.debug = 999
+        self.script_name = os.path.join(root, 'script.py')
+        self.mf.run_script(self.script_name)
+
+    def testGraphStructure(self):
+
+        # 1. Script to imported modules
+        n = self.mf.findNode(self.script_name)
+        self.assertIsInstance(n, modulegraph.Script)
+
+        imported = ('mod', 'pkg', 'pkg.mod', 'pkg.oldstyle',
+            'pkg.relative', 'pkg.toplevel', 'pkg.subpkg.relative',
+            'pkg.subpkg.relative2', 'pkg.subpkg.mod2')
+
+        for nm in imported:
+            n2 = self.mf.findNode(nm)
+            ed = self.mf.edgeData(n, n2)
+            self.assertIsInstance(ed, modulegraph.DependencyInfo)
+            self.assertEqual(ed, modulegraph.DependencyInfo(
+                fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        refs = self.mf.getReferences(n)
+        self.assertEqual(set(refs), set(self.mf.findNode(nm) for nm in imported))
+
+        refs = list(self.mf.getReferers(n))
+        # The script is a toplevel item and is therefore referred to from the graph root (aka 'None')
+        self.assertEqual(refs, [None])
+
+
+        # 2. 'mod'
+        n = self.mf.findNode('mod')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = list(self.mf.getReferences(n))
+        self.assertEqual(refs, [])
+
+        #refs = list(self.mf.getReferers(n))
+        #self.assertEquals(refs, [])
+
+        # 3. 'pkg'
+        n = self.mf.findNode('pkg')
+        self.assertIsInstance(n, modulegraph.Package)
+        refs = list(self.mf.getReferences(n))
+        self.maxDiff = None
+        self.assertEqual(refs, [n])
+
+        #refs = list(self.mf.getReferers(n))
+        #self.assertEquals(refs, [])
+
+        # 4. pkg.mod
+        n = self.mf.findNode('pkg.mod')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('pkg')]))
+        ed = self.mf.edgeData(n, self.mf.findNode('pkg'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+
+        # 5. pkg.oldstyle
+        n = self.mf.findNode('pkg.oldstyle')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        if sys.version_info[0] == 2:
+            n2 = self.mf.findNode('pkg.mod')
+        else:
+            n2 = self.mf.findNode('mod')
+        self.assertEqual(refs, set([self.mf.findNode('pkg'), n2]))
+        ed = self.mf.edgeData(n, n2)
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+
+        # 6. pkg.relative
+        n = self.mf.findNode('pkg.relative')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('__future__'), self.mf.findNode('pkg'), self.mf.findNode('pkg.mod')]))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('pkg.mod'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=True, conditional=False, function=False, tryexcept=False))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('__future__'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        #ed = self.mf.edgeData(n, self.mf.findNode('__future__.absolute_import'))
+        #self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        #self.assertEqual(ed, modulegraph.DependencyInfo(
+            #fromlist=True, conditional=False, function=False, tryexcept=False))
+
+        # 7. pkg.toplevel
+        n = self.mf.findNode('pkg.toplevel')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('__future__'), self.mf.findNode('pkg'), self.mf.findNode('mod')]))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('mod'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('__future__'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        #ed = self.mf.edgeData(n, self.mf.findNode('__future__.absolute_import'))
+        #self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        #self.assertEqual(ed, modulegraph.DependencyInfo(
+            #fromlist=True, conditional=False, function=False, tryexcept=False))
+
+        # 8. pkg.subpkg
+        n = self.mf.findNode('pkg.subpkg')
+        self.assertIsInstance(n, modulegraph.Package)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('pkg')]))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('pkg'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        # 9. pkg.subpkg.relative
+        n = self.mf.findNode('pkg.subpkg.relative')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('__future__'), self.mf.findNode('pkg'), self.mf.findNode('pkg.subpkg'), self.mf.findNode('pkg.mod')]))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('pkg.subpkg'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=False, conditional=False, function=False, tryexcept=False))
+
+        ed = self.mf.edgeData(n, self.mf.findNode('pkg.mod'))
+        self.assertIsInstance(ed, modulegraph.DependencyInfo)
+        self.assertEqual(ed, modulegraph.DependencyInfo(
+            fromlist=True, conditional=False, function=False, tryexcept=False))
+
+        # 10. pkg.subpkg.relative2
+        n = self.mf.findNode('pkg.subpkg.relative2')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([self.mf.findNode('pkg.subpkg'), self.mf.findNode('pkg.relimport'), self.mf.findNode('__future__')]))
+
+        # 10. pkg.subpkg.mod2
+        n = self.mf.findNode('pkg.subpkg.mod2')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+        refs = set(self.mf.getReferences(n))
+        self.assertEqual(refs, set([
+            self.mf.findNode('__future__'),
+            self.mf.findNode('pkg.subpkg'),
+            self.mf.findNode('pkg.sub2.mod'),
+            self.mf.findNode('pkg.sub2'),
+        ]))
+
+
+    def testRootModule(self):
+        node = self.mf.findNode('mod')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'mod')
+
+    def testRootPkg(self):
+        node = self.mf.findNode('pkg')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertEqual(node.identifier, 'pkg')
+
+    def testSubModule(self):
+        node = self.mf.findNode('pkg.mod')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.mod')
+
+    if sys.version_info[0] == 2:
+        def testOldStyle(self):
+            node = self.mf.findNode('pkg.oldstyle')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'pkg.oldstyle')
+            sub = [ n for n in self.mf.get_edges(node)[0] if n.identifier != '__future__' ][0]
+            self.assertEqual(sub.identifier, 'pkg.mod')
+    else:
+        # python3 always has __future__.absolute_import
+        def testOldStyle(self):
+            node = self.mf.findNode('pkg.oldstyle')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'pkg.oldstyle')
+            sub = [ n for n in self.mf.get_edges(node)[0] if n.identifier != '__future__' ][0]
+            self.assertEqual(sub.identifier, 'mod')
+
+    def testNewStyle(self):
+        node = self.mf.findNode('pkg.toplevel')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.toplevel')
+        sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__')][0]
+        self.assertEqual(sub.identifier, 'mod')
+
+    def testRelativeImport(self):
+        node = self.mf.findNode('pkg.relative')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.relative')
+        sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
+        self.assertIsInstance(sub, modulegraph.Package)
+        self.assertEqual(sub.identifier, 'pkg')
+
+        node = self.mf.findNode('pkg.subpkg.relative')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.subpkg.relative')
+        sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
+        self.assertIsInstance(sub, modulegraph.Package)
+        self.assertEqual(sub.identifier, 'pkg')
+
+        node = self.mf.findNode('pkg.subpkg.mod2')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.subpkg.mod2')
+        sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
+        self.assertIsInstance(sub, modulegraph.SourceModule)
+        self.assertEqual(sub.identifier, 'pkg.sub2.mod')
+
+        node = self.mf.findNode('pkg.subpkg.relative2')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'pkg.subpkg.relative2')
+
+        node = self.mf.findNode('pkg.relimport')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+class TestRegressions1 (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r", value, types)
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr1')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'main_script.py'))
+
+    def testRegr1(self):
+        node = self.mf.findNode('pkg.a')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        node = self.mf.findNode('pkg.b')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+
+    def testMissingPathEntry(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'nosuchdirectory')
+        try:
+            mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        except os.error:
+            self.fail('modulegraph initialiser raises os.error')
+
+class TestRegressions2 (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr2')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'main_script.py'))
+
+    def testRegr1(self):
+        node = self.mf.findNode('pkg.base')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        node = self.mf.findNode('pkg.pkg')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+class TestRegressions3 (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def assertStartswith(self, value, test):
+        if not value.startswith(test):
+            self.fail("%r does not start with %r"%(value, test))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr3')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'script.py'))
+
+    def testRegr1(self):
+        node = self.mf.findNode('mypkg.distutils')
+        self.assertIsInstance(node, modulegraph.Package)
+        node = self.mf.findNode('mypkg.distutils.ccompiler')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertStartswith(node.filename, os.path.dirname(__file__))
+
+        import distutils.sysconfig, distutils.ccompiler
+        node = self.mf.findNode('distutils.ccompiler')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(os.path.dirname(node.filename),
+                os.path.dirname(distutils.ccompiler.__file__))
+
+        node = self.mf.findNode('distutils.sysconfig')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(os.path.dirname(node.filename),
+                os.path.dirname(distutils.sysconfig.__file__))
+
+class TestRegression4 (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr4')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'script.py'))
+
+    def testRegr1(self):
+        node = self.mf.findNode('pkg.core')
+        self.assertIsInstance(node, modulegraph.Package)
+
+        node = self.mf.findNode('pkg.core.callables')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = self.mf.findNode('pkg.core.listener')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        node = self.mf.findNode('pkg.core.listenerimpl')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+class TestRegression5 (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr5')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'script.py'))
+
+    def testRegr1(self):
+        node = self.mf.findNode('distutils')
+        self.assertIsInstance(node, modulegraph.Package)
+        self.assertIn('distutils/__init__', node.filename)
+
+class TestDeeplyNested (unittest.TestCase):
+    def setUp(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-regr6')
+        self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        self.mf.run_script(os.path.join(root, 'script.py'))
+
+    def testRegr(self):
+        node = self.mf.findNode('os')
+        self.assertTrue(node is not None)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_modulegraph.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_modulegraph.py
new file mode 100644
index 0000000..0ee724b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_modulegraph.py
@@ -0,0 +1,1060 @@
+import unittest
+from modulegraph import modulegraph
+import pkg_resources
+import os
+import imp
+import sys
+import shutil
+import warnings
+from altgraph import Graph
+import textwrap
+import xml.etree.ElementTree as ET
+import pickle
+
+try:
+    bytes
+except NameError:
+    bytes = str
+
+try:
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+TESTDATA = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        "testdata", "nspkg")
+
+try:
+    expectedFailure = unittest.expectedFailure
+except AttributeError:
+    import functools
+    def expectedFailure(function):
+        @functools.wraps(function)
+        def wrapper(*args, **kwds):
+            try:
+                function(*args, **kwds)
+            except AssertionError:
+                pass
+
+            else:
+                self.fail("unexpected pass")
+
+class TestDependencyInfo (unittest.TestCase):
+    def test_pickling(self):
+        info = modulegraph.DependencyInfo(function=True, conditional=False, tryexcept=True, fromlist=False)
+        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+            b = pickle.dumps(info, proto)
+            self.assertTrue(isinstance(b, bytes))
+
+            o = pickle.loads(b)
+            self.assertEqual(o, info)
+
+    def test_merging(self):
+        info1 = modulegraph.DependencyInfo(function=True, conditional=False, tryexcept=True, fromlist=False)
+        info2 = modulegraph.DependencyInfo(function=False, conditional=True, tryexcept=True, fromlist=False)
+        self.assertEqual(
+            info1._merged(info2), modulegraph.DependencyInfo(function=True, conditional=True, tryexcept=True, fromlist=False))
+
+        info2 = modulegraph.DependencyInfo(function=False, conditional=True, tryexcept=False, fromlist=False)
+        self.assertEqual(
+            info1._merged(info2), modulegraph.DependencyInfo(function=True, conditional=True, tryexcept=True, fromlist=False))
+
+        info2 = modulegraph.DependencyInfo(function=False, conditional=False, tryexcept=False, fromlist=False)
+        self.assertEqual(
+            info1._merged(info2), modulegraph.DependencyInfo(function=False, conditional=False, tryexcept=False, fromlist=False))
+
+        info1 = modulegraph.DependencyInfo(function=True, conditional=False, tryexcept=True, fromlist=True)
+        self.assertEqual(
+            info1._merged(info2), modulegraph.DependencyInfo(function=False, conditional=False, tryexcept=False, fromlist=False))
+
+        info2 = modulegraph.DependencyInfo(function=False, conditional=False, tryexcept=False, fromlist=True)
+        self.assertEqual(
+            info1._merged(info2), modulegraph.DependencyInfo(function=False, conditional=False, tryexcept=False, fromlist=True))
+
+
+class TestFunctions (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, obj, types):
+            self.assertTrue(isinstance(obj, types), '%r is not instance of %r'%(obj, types))
+
+    def test_eval_str_tuple(self):
+        for v in [
+            '()',
+            '("hello",)',
+            '("hello", "world")',
+            "('hello',)",
+            "('hello', 'world')",
+            "('hello', \"world\")",
+            ]:
+
+            self.assertEqual(modulegraph._eval_str_tuple(v), eval(v))
+
+        self.assertRaises(ValueError, modulegraph._eval_str_tuple, "")
+        self.assertRaises(ValueError, modulegraph._eval_str_tuple, "'a'")
+        self.assertRaises(ValueError, modulegraph._eval_str_tuple, "'a', 'b'")
+        self.assertRaises(ValueError, modulegraph._eval_str_tuple, "('a', ('b', 'c'))")
+        self.assertRaises(ValueError, modulegraph._eval_str_tuple, "('a', ('b\", 'c'))")
+
+    def test_namespace_package_path(self):
+        class DS (object):
+            def __init__(self, path, namespace_packages=None):
+                self.location = path
+                self._namespace_packages = namespace_packages
+
+            def has_metadata(self, key):
+                if key == 'namespace_packages.txt':
+                    return self._namespace_packages is not None
+
+                raise ValueError("invalid lookup key")
+
+            def get_metadata(self, key):
+                if key == 'namespace_packages.txt':
+                    if self._namespace_packages is None:
+                        raise ValueError("no file")
+
+                    return self._namespace_packages
+
+                raise ValueError("invalid lookup key")
+
+        class WS (object):
+            def __init__(self, path=None):
+                pass
+
+            def __iter__(self):
+                yield DS("/pkg/pkg1")
+                yield DS("/pkg/pkg2", "foo\n")
+                yield DS("/pkg/pkg3", "bar.baz\n")
+                yield DS("/pkg/pkg4", "foobar\nfoo\n")
+
+        saved_ws = pkg_resources.WorkingSet
+        try:
+            pkg_resources.WorkingSet = WS
+
+            self.assertEqual(modulegraph._namespace_package_path("sys", ["appdir/pkg"]), ["appdir/pkg"])
+            self.assertEqual(modulegraph._namespace_package_path("foo", ["appdir/pkg"]), ["appdir/pkg", "/pkg/pkg2/foo", "/pkg/pkg4/foo"])
+            self.assertEqual(modulegraph._namespace_package_path("bar.baz", ["appdir/pkg"]), ["appdir/pkg", "/pkg/pkg3/bar/baz"])
+
+        finally:
+            pkg_resources.WorkingSet = saved_ws
+
+    def test_os_listdir(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)), 'testdata')
+
+        self.assertEqual(modulegraph.os_listdir('/etc/'), os.listdir('/etc'))
+        self.assertRaises(IOError, modulegraph.os_listdir, '/etc/hosts/foobar')
+        self.assertRaises(IOError, modulegraph.os_listdir, os.path.join(root, 'test.egg', 'bar'))
+
+        self.assertEqual(list(sorted(modulegraph.os_listdir(os.path.join(root, 'test.egg', 'foo')))),
+            [ 'bar', 'bar.txt', 'baz.txt' ])
+
+    def test_code_to_file(self):
+        try:
+            code = modulegraph._code_to_file.__code__
+        except AttributeError:
+            code = modulegraph._code_to_file.func_code
+
+        data = modulegraph._code_to_file(code)
+        self.assertTrue(hasattr(data, 'read'))
+
+        content = data.read()
+        self.assertIsInstance(content, bytes)
+        data.close()
+
+    def test_find_module(self):
+        for path in ('syspath', 'syspath.zip', 'syspath.egg'):
+            path = os.path.join(os.path.dirname(TESTDATA), path)
+            if os.path.exists(os.path.join(path, 'mymodule.pyc')):
+                os.unlink(os.path.join(path, 'mymodule.pyc'))
+
+            # Plain module
+            info = modulegraph.find_module('mymodule', path=[path] + sys.path)
+
+            fp = info[0]
+            filename = info[1]
+            description = info[2]
+
+            self.assertTrue(hasattr(fp, 'read'))
+
+            if path.endswith('.zip') or path.endswith('.egg'):
+                # Zip importers may precompile
+                if filename.endswith('.py'):
+                    self.assertEqual(filename, os.path.join(path, 'mymodule.py'))
+                    self.assertEqual(description, ('.py', 'rU', imp.PY_SOURCE))
+
+                else:
+                    self.assertEqual(filename, os.path.join(path, 'mymodule.pyc'))
+                    self.assertEqual(description, ('.pyc', 'rb', imp.PY_COMPILED))
+
+            else:
+                self.assertEqual(filename, os.path.join(path, 'mymodule.py'))
+                self.assertEqual(description, ('.py', 'rU', imp.PY_SOURCE))
+
+            # Compiled plain module, no source
+            if path.endswith('.zip') or path.endswith('.egg'):
+                self.assertRaises(ImportError, modulegraph.find_module, 'mymodule2', path=[path] + sys.path)
+
+            else:
+                info = modulegraph.find_module('mymodule2', path=[path] + sys.path)
+
+                fp = info[0]
+                filename = info[1]
+                description = info[2]
+
+                self.assertTrue(hasattr(fp, 'read'))
+                self.assertEqual(filename, os.path.join(path, 'mymodule2.pyc'))
+                self.assertEqual(description, ('.pyc', 'rb', imp.PY_COMPILED))
+
+                fp.close()
+
+            # Compiled plain module, with source
+#            info = modulegraph.find_module('mymodule3', path=[path] + sys.path)
+#
+#            fp = info[0]
+#            filename = info[1]
+#            description = info[2]
+#
+#            self.assertTrue(hasattr(fp, 'read'))
+#
+#            if sys.version_info[:2] >= (3,2):
+#                self.assertEqual(filename, os.path.join(path, '__pycache__', 'mymodule3.cpython-32.pyc'))
+#            else:
+#                self.assertEqual(filename, os.path.join(path, 'mymodule3.pyc'))
+#            self.assertEqual(description, ('.pyc', 'rb', imp.PY_COMPILED))
+
+
+            # Package
+            info = modulegraph.find_module('mypkg', path=[path] + sys.path)
+            fp = info[0]
+            filename = info[1]
+            description = info[2]
+
+            self.assertEqual(fp, None)
+            self.assertEqual(filename, os.path.join(path, 'mypkg'))
+            self.assertEqual(description, ('', '', imp.PKG_DIRECTORY))
+
+            # Extension
+            if path.endswith('.zip'):
+                self.assertRaises(ImportError, modulegraph.find_module, 'myext', path=[path] + sys.path)
+
+            else:
+                info = modulegraph.find_module('myext', path=[path] + sys.path)
+                fp = info[0]
+                filename = info[1]
+                description = info[2]
+
+                if sys.platform == 'win32':
+                    ext = '.pyd'
+                else:
+                    # This is a ly, but is good enough for now
+                    ext = '.so'
+
+                self.assertEqual(filename, os.path.join(path, 'myext' + ext))
+                self.assertEqual(description, (ext, 'rb', imp.C_EXTENSION))
+                self.assertEqual(fp, None)
+
+    def test_moduleInfoForPath(self):
+        self.assertEqual(modulegraph.moduleInfoForPath("/somewhere/else/file.txt"), None)
+
+        info = modulegraph.moduleInfoForPath("/somewhere/else/file.py")
+        self.assertEqual(info[0], "file")
+        if sys.version_info[:2] >= (3,4):
+            self.assertEqual(info[1], "r")
+        else:
+            self.assertEqual(info[1], "U")
+        self.assertEqual(info[2], imp.PY_SOURCE)
+
+        info = modulegraph.moduleInfoForPath("/somewhere/else/file.pyc")
+        self.assertEqual(info[0], "file")
+        self.assertEqual(info[1], "rb")
+        self.assertEqual(info[2], imp.PY_COMPILED)
+
+        if sys.platform in ('darwin', 'linux2'):
+            info = modulegraph.moduleInfoForPath("/somewhere/else/file.so")
+            self.assertEqual(info[0], "file")
+            self.assertEqual(info[1], "rb")
+            self.assertEqual(info[2], imp.C_EXTENSION)
+
+        elif sys.platform in ('win32',):
+            info = modulegraph.moduleInfoForPath("/somewhere/else/file.pyd")
+            self.assertEqual(info[0], "file")
+            self.assertEqual(info[1], "rb")
+            self.assertEqual(info[2], imp.C_EXTENSION)
+
+    if sys.version_info[:2] > (2,5):
+        exec(textwrap.dedent('''\
+            def test_deprecated(self):
+                saved_add = modulegraph.addPackagePath
+                saved_replace = modulegraph.replacePackage
+                try:
+                    called = []
+
+                    def log_add(*args, **kwds):
+                        called.append(('add', args, kwds))
+                    def log_replace(*args, **kwds):
+                        called.append(('replace', args, kwds))
+
+                    modulegraph.addPackagePath = log_add
+                    modulegraph.replacePackage = log_replace
+
+                    with warnings.catch_warnings(record=True) as w:
+                        warnings.simplefilter("always")
+                        modulegraph.ReplacePackage('a', 'b')
+                        modulegraph.AddPackagePath('c', 'd')
+
+                    self.assertEqual(len(w), 2)
+                    self.assertTrue(w[-1].category is DeprecationWarning)
+                    self.assertTrue(w[-2].category is DeprecationWarning)
+
+                    self.assertEqual(called, [
+                        ('replace', ('a', 'b'), {}),
+                        ('add', ('c', 'd'), {}),
+                    ])
+
+                finally:
+                    modulegraph.addPackagePath = saved_add
+                    modulegraph.replacePackage = saved_replace
+            '''), locals(), globals())
+
+    def test_addPackage(self):
+        saved = modulegraph._packagePathMap
+        self.assertIsInstance(saved, dict)
+        try:
+            modulegraph._packagePathMap = {}
+
+            modulegraph.addPackagePath('foo', 'a')
+            self.assertEqual(modulegraph._packagePathMap, { 'foo': ['a'] })
+
+            modulegraph.addPackagePath('foo', 'b')
+            self.assertEqual(modulegraph._packagePathMap, { 'foo': ['a', 'b'] })
+
+            modulegraph.addPackagePath('bar', 'b')
+            self.assertEqual(modulegraph._packagePathMap, { 'foo': ['a', 'b'], 'bar': ['b'] })
+
+        finally:
+            modulegraph._packagePathMap = saved
+
+
+    def test_replacePackage(self):
+        saved = modulegraph._replacePackageMap
+        self.assertIsInstance(saved, dict)
+        try:
+            modulegraph._replacePackageMap = {}
+
+            modulegraph.replacePackage("a", "b")
+            self.assertEqual(modulegraph._replacePackageMap, {"a": "b"})
+            modulegraph.replacePackage("a", "c")
+            self.assertEqual(modulegraph._replacePackageMap, {"a": "c"})
+            modulegraph.replacePackage("b", "c")
+            self.assertEqual(modulegraph._replacePackageMap, {"a": "c", 'b': 'c'})
+
+        finally:
+            modulegraph._replacePackageMap = saved
+
+class TestNode (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, obj, types):
+            self.assertTrue(isinstance(obj, types), '%r is not instance of %r'%(obj, types))
+    def testBasicAttributes(self):
+        n = modulegraph.Node("foobar.xyz")
+        self.assertIsInstance(n.debug, int)
+        self.assertEqual(n.identifier, n.graphident)
+        self.assertEqual(n.identifier, 'foobar.xyz')
+        self.assertEqual(n.filename, None)
+        self.assertEqual(n.packagepath, None)
+        self.assertEqual(n.code, None)
+        self.assertEqual(n.globalnames, set())
+        self.assertEqual(n.starimports, set())
+
+    def testMapping(self):
+        n = modulegraph.Node("foobar.xyz")
+        self.assertEqual(n._namespace, {})
+
+        self.assertFalse('foo' in n)
+        self.assertRaises(KeyError, n.__getitem__, 'foo')
+        self.assertEqual(n.get('foo'), None)
+        self.assertEqual(n.get('foo', 'a'), 'a')
+        n['foo'] = 42
+        self.assertEqual(n['foo'], 42)
+        self.assertTrue('foo' in n)
+        self.assertEqual(n._namespace, {'foo':42})
+
+    def testOrder(self):
+        n1 = modulegraph.Node("n1")
+        n2 = modulegraph.Node("n2")
+
+        self.assertTrue(n1 < n2)
+        self.assertFalse(n2 < n1)
+        self.assertTrue(n1 <= n1)
+        self.assertFalse(n1 == n2)
+        self.assertTrue(n1 == n1)
+        self.assertTrue(n1 != n2)
+        self.assertFalse(n1 != n1)
+        self.assertTrue(n2 > n1)
+        self.assertFalse(n1 > n2)
+        self.assertTrue(n1 >= n1)
+        self.assertTrue(n2 >= n1)
+
+    def testHashing(self):
+        n1a = modulegraph.Node('n1')
+        n1b = modulegraph.Node('n1')
+        n2 = modulegraph.Node('n2')
+
+        d = {}
+        d[n1a] = 'n1'
+        d[n2] = 'n2'
+        self.assertEqual(d[n1b], 'n1')
+        self.assertEqual(d[n2], 'n2')
+
+    def test_infoTuple(self):
+        n = modulegraph.Node('n1')
+        self.assertEqual(n.infoTuple(), ('n1',))
+
+    def assertNoMethods(self, klass):
+        d = dict(klass.__dict__)
+        del d['__doc__']
+        del d['__module__']
+        if '__qualname__' in d:
+            # New in Python 3.3
+            del d['__qualname__']
+        if '__dict__' in d:
+            # New in Python 3.4
+            del d['__dict__']
+        self.assertEqual(d, {})
+
+    def assertHasExactMethods(self, klass, *methods):
+        d = dict(klass.__dict__)
+        del d['__doc__']
+        del d['__module__']
+        if '__qualname__' in d:
+            # New in Python 3.3
+            del d['__qualname__']
+        if '__dict__' in d:
+            # New in Python 3.4
+            del d['__dict__']
+
+        for nm in methods:
+            self.assertTrue(nm in d, "%s doesn't have attribute %r"%(klass, nm))
+            del d[nm]
+
+        self.assertEqual(d, {})
+
+
+    if not hasattr(unittest.TestCase, 'assertIsSubclass'):
+        def assertIsSubclass(self, cls1, cls2, message=None):
+            self.assertTrue(issubclass(cls1, cls2),
+                    message or "%r is not a subclass of %r"%(cls1, cls2))
+
+    def test_subclasses(self):
+        self.assertIsSubclass(modulegraph.AliasNode, modulegraph.Node)
+        self.assertIsSubclass(modulegraph.Script, modulegraph.Node)
+        self.assertIsSubclass(modulegraph.BadModule, modulegraph.Node)
+        self.assertIsSubclass(modulegraph.ExcludedModule, modulegraph.BadModule)
+        self.assertIsSubclass(modulegraph.MissingModule, modulegraph.BadModule)
+        self.assertIsSubclass(modulegraph.BaseModule, modulegraph.Node)
+        self.assertIsSubclass(modulegraph.BuiltinModule, modulegraph.BaseModule)
+        self.assertIsSubclass(modulegraph.SourceModule, modulegraph.BaseModule)
+        self.assertIsSubclass(modulegraph.CompiledModule, modulegraph.BaseModule)
+        self.assertIsSubclass(modulegraph.Package, modulegraph.BaseModule)
+        self.assertIsSubclass(modulegraph.Extension, modulegraph.BaseModule)
+
+        # These classes have no new functionality, check that no code
+        # got added:
+        self.assertNoMethods(modulegraph.BadModule)
+        self.assertNoMethods(modulegraph.ExcludedModule)
+        self.assertNoMethods(modulegraph.MissingModule)
+        self.assertNoMethods(modulegraph.BuiltinModule)
+        self.assertNoMethods(modulegraph.SourceModule)
+        self.assertNoMethods(modulegraph.CompiledModule)
+        self.assertNoMethods(modulegraph.Package)
+        self.assertNoMethods(modulegraph.Extension)
+
+        # AliasNode is basicly a clone of an existing node
+        self.assertHasExactMethods(modulegraph.Script, '__init__', 'infoTuple')
+        n1 = modulegraph.Node('n1')
+        n1.packagepath = ['a', 'b']
+
+        a1 = modulegraph.AliasNode('a1', n1)
+        self.assertEqual(a1.graphident, 'a1')
+        self.assertEqual(a1.identifier, 'n1')
+        self.assertTrue(a1.packagepath is n1.packagepath)
+        self.assertTrue(a1._namespace is n1._namespace)
+        self.assertTrue(a1.globalnames is n1.globalnames)
+        self.assertTrue(a1.starimports is n1.starimports)
+
+        v = a1.infoTuple()
+        self.assertEqual(v, ('a1', 'n1'))
+
+        # Scripts have a filename
+        self.assertHasExactMethods(modulegraph.Script, '__init__', 'infoTuple')
+        s1 = modulegraph.Script('do_import')
+        self.assertEqual(s1.graphident, 'do_import')
+        self.assertEqual(s1.identifier, 'do_import')
+        self.assertEqual(s1.filename, 'do_import')
+
+        v = s1.infoTuple()
+        self.assertEqual(v, ('do_import',))
+
+        # BaseModule adds some attributes and a custom infotuple
+        self.assertHasExactMethods(modulegraph.BaseModule, '__init__', 'infoTuple')
+        m1 = modulegraph.BaseModule('foo')
+        self.assertEqual(m1.graphident, 'foo')
+        self.assertEqual(m1.identifier, 'foo')
+        self.assertEqual(m1.filename, None)
+        self.assertEqual(m1.packagepath, None)
+
+        m1 = modulegraph.BaseModule('foo', 'bar',  ['a'])
+        self.assertEqual(m1.graphident, 'foo')
+        self.assertEqual(m1.identifier, 'foo')
+        self.assertEqual(m1.filename, 'bar')
+        self.assertEqual(m1.packagepath, ['a'])
+
+class TestModuleGraph (unittest.TestCase):
+    # Test for class modulegraph.modulegraph.ModuleGraph
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, obj, types):
+            self.assertTrue(isinstance(obj, types), '%r is not instance of %r'%(obj, types))
+
+    def test_constructor(self):
+        o = modulegraph.ModuleGraph()
+        self.assertTrue(o.path is sys.path)
+        self.assertEqual(o.lazynodes, {})
+        self.assertEqual(o.replace_paths, ())
+        self.assertEqual(o.debug, 0)
+
+        # Stricter tests would be nice, but that requires
+        # better control over what's on sys.path
+        self.assertIsInstance(o.nspackages, dict)
+
+        g = Graph.Graph()
+        o = modulegraph.ModuleGraph(['a', 'b', 'c'], ['modA'], [
+                ('fromA', 'toB'), ('fromC', 'toD')],
+                {
+                    'modA': ['modB', 'modC'],
+                    'modC': ['modE', 'modF'],
+                }, g, 1)
+        self.assertEqual(o.path, ['a', 'b', 'c'])
+        self.assertEqual(o.lazynodes, {
+            'modA': None,
+            'modC': ['modE', 'modF'],
+        })
+        self.assertEqual(o.replace_paths, [('fromA', 'toB'), ('fromC', 'toD')])
+        self.assertEqual(o.nspackages, {})
+        self.assertTrue(o.graph is g)
+        self.assertEqual(o.debug, 1)
+
+    def test_calc_setuptools_nspackages(self):
+        stdlib = [ fn for fn in sys.path if fn.startswith(sys.prefix) and 'site-packages' not in fn ]
+        for subdir in [ nm for nm in os.listdir(TESTDATA) if nm != 'src' ]:
+            graph = modulegraph.ModuleGraph(path=[
+                    os.path.join(TESTDATA, subdir, "parent"),
+                    os.path.join(TESTDATA, subdir, "child"),
+                ] + stdlib)
+
+            pkgs = graph.nspackages
+            self.assertTrue('namedpkg' in pkgs)
+            self.assertEqual(set(pkgs['namedpkg']),
+                    set([
+                        os.path.join(TESTDATA, subdir, "parent", "namedpkg"),
+                        os.path.join(TESTDATA, subdir, "child", "namedpkg"),
+                    ]))
+            self.assertFalse(os.path.exists(os.path.join(TESTDATA, subdir, "parent", "namedpkg", "__init__.py")))
+            self.assertFalse(os.path.exists(os.path.join(TESTDATA, subdir, "child", "namedpkg", "__init__.py")))
+
+    def testImpliedReference(self):
+        graph = modulegraph.ModuleGraph()
+
+        record = []
+        def import_hook(*args):
+            record.append(('import_hook',) + args)
+            return [graph.createNode(modulegraph.Node, args[0])]
+
+        def _safe_import_hook(*args):
+            record.append(('_safe_import_hook',) + args)
+            return [graph.createNode(modulegraph.Node, args[0])]
+
+        graph.import_hook = import_hook
+        graph._safe_import_hook = _safe_import_hook
+
+        n1 = graph.createNode(modulegraph.Node, 'n1')
+        n2 = graph.createNode(modulegraph.Node, 'n2')
+
+        graph.implyNodeReference(n1, n2)
+        outs, ins = map(list, graph.get_edges(n1))
+        self.assertEqual(outs, [n2])
+        self.assertEqual(ins, [])
+
+        self.assertEqual(record, [])
+
+        graph.implyNodeReference(n2, "n3")
+        n3 = graph.findNode('n3')
+        outs, ins = map(list, graph.get_edges(n2))
+        self.assertEqual(outs, [n3])
+        self.assertEqual(ins, [n1])
+        self.assertEqual(record, [
+            ('_safe_import_hook', 'n3', n2, None)
+        ])
+
+
+
+    @expectedFailure
+    def test_findNode(self):
+        self.fail("findNode")
+
+    def test_run_script(self):
+        script = os.path.join(os.path.dirname(TESTDATA), 'script')
+
+        graph = modulegraph.ModuleGraph()
+        master = graph.createNode(modulegraph.Node, 'root')
+        m = graph.run_script(script, master)
+        self.assertEqual(list(graph.get_edges(master)[0])[0], m)
+        self.assertEqual(set(graph.get_edges(m)[0]), set([
+            graph.findNode('sys'),
+            graph.findNode('os'),
+        ]))
+
+    @expectedFailure
+    def test_import_hook(self):
+        self.fail("import_hook")
+
+    def test_determine_parent(self):
+        graph = modulegraph.ModuleGraph()
+        graph.import_hook('os.path', None)
+        graph.import_hook('idlelib', None)
+        graph.import_hook('xml.dom', None)
+
+        for node in graph.nodes():
+            if isinstance(node, modulegraph.Package):
+                break
+        else:
+            self.fail("No package located, should have at least 'os'")
+
+        self.assertIsInstance(node, modulegraph.Package)
+        parent = graph._determine_parent(node)
+        self.assertEqual(parent.identifier, node.identifier)
+        self.assertEqual(parent, graph.findNode(node.identifier))
+        self.assertTrue(isinstance(parent, modulegraph.Package))
+
+        # XXX: Might be a usecase for some odd code in determine_parent...
+        #node = modulegraph.Package('encodings')
+        #node.packagepath = parent.packagepath
+        #m = graph._determine_parent(node)
+        #self.assertTrue(m is parent)
+
+        m = graph.findNode('xml')
+        self.assertEqual(graph._determine_parent(m), m)
+
+        m = graph.findNode('xml.dom')
+        self.assertEqual(graph._determine_parent(m), graph.findNode('xml.dom'))
+
+
+    @expectedFailure
+    def test_find_head_package(self):
+        self.fail("find_head_package")
+
+    def test_load_tail(self):
+        # XXX: This test is dodgy!
+        graph = modulegraph.ModuleGraph()
+
+        record = []
+        def _import_module(partname, fqname, parent):
+            record.append((partname, fqname, parent))
+            if partname == 'raises' or '.raises.' in fqname:
+                return None
+            return modulegraph.Node(fqname)
+
+        graph._import_module = _import_module
+
+        record = []
+        root = modulegraph.Node('root')
+        m = graph._load_tail(root, '')
+        self.assertTrue(m is root)
+        self.assertEqual(record, [
+            ])
+
+        record = []
+        root = modulegraph.Node('root')
+        m = graph._load_tail(root, 'sub')
+        self.assertFalse(m is root)
+        self.assertEqual(record, [
+                ('sub', 'root.sub', root),
+            ])
+
+        record = []
+        root = modulegraph.Node('root')
+        m = graph._load_tail(root, 'sub.sub1')
+        self.assertFalse(m is root)
+        node = modulegraph.Node('root.sub')
+        self.assertEqual(record, [
+                ('sub', 'root.sub', root),
+                ('sub1', 'root.sub.sub1', node),
+            ])
+
+        record = []
+        root = modulegraph.Node('root')
+        m = graph._load_tail(root, 'sub.sub1.sub2')
+        self.assertFalse(m is root)
+        node = modulegraph.Node('root.sub')
+        node2 = modulegraph.Node('root.sub.sub1')
+        self.assertEqual(record, [
+                ('sub', 'root.sub', root),
+                ('sub1', 'root.sub.sub1', node),
+                ('sub2', 'root.sub.sub1.sub2', node2),
+            ])
+
+        n = graph._load_tail(root, 'raises')
+        self.assertIsInstance(n, modulegraph.MissingModule)
+        self.assertEqual(n.identifier, 'root.raises')
+
+        n = graph._load_tail(root, 'sub.raises')
+        self.assertIsInstance(n, modulegraph.MissingModule)
+        self.assertEqual(n.identifier, 'root.sub.raises')
+
+        n = graph._load_tail(root, 'sub.raises.sub')
+        self.assertIsInstance(n, modulegraph.MissingModule)
+        self.assertEqual(n.identifier, 'root.sub.raises.sub')
+
+
+
+    @expectedFailure
+    def test_ensure_fromlist(self):
+        # 1. basic 'from module import name, name'
+        # 2. 'from module import *'
+        # 3. from module import os
+        #    (where 'os' is not a name in 'module',
+        #     should create MissingModule node, and
+        #     should *not* refer to the global os)
+        self.fail("ensure_fromlist")
+
+    @expectedFailure
+    def test_find_all_submodules(self):
+        # 1. basic
+        # 2. no packagepath (basic module)
+        # 3. extensions, python modules
+        # 4. with/without zipfile
+        # 5. files that aren't python modules/extensions
+        self.fail("find_all_submodules")
+
+    @expectedFailure
+    def test_import_module(self):
+        self.fail("import_module")
+
+    @expectedFailure
+    def test_load_module(self):
+        self.fail("load_module")
+
+    @expectedFailure
+    def test_safe_import_hook(self):
+        self.fail("safe_import_hook")
+
+    @expectedFailure
+    def test_scan_code(self):
+        mod = modulegraph.Node('root')
+
+        graph = modulegraph.ModuleGraph()
+        code = compile('', '<test>', 'exec', 0, False)
+        graph.scan_code(code, mod)
+        self.assertEqual(list(graph.nodes()), [])
+
+        node_map = {}
+        def _safe_import(name, mod, fromlist, level):
+            if name in node_map:
+                node = node_map[name]
+            else:
+                node = modulegraph.Node(name)
+            node_map[name] = node
+            return [node]
+
+        graph = modulegraph.ModuleGraph()
+        graph._safe_import_hook = _safe_import
+
+        code = compile(textwrap.dedent('''\
+            import sys
+            import os.path
+
+            def testfunc():
+                import shutil
+            '''), '<test>', 'exec', 0, False)
+        graph.scan_code(code, mod)
+        modules = [node.identifier for node in graph.nodes()]
+        self.assertEqual(set(node_map), set(['sys', 'os.path', 'shutil']))
+
+
+        # from module import a, b, c
+        # from module import *
+        #  both:
+        #   -> with/without globals
+        #   -> with/without modules in globals (e.g,
+        #       from os import * adds dependency to os.path)
+        # from .module import a
+        # from ..module import a
+        #   -> check levels
+        # import name
+        # import a.b
+        #   -> should add dependency to a
+        # try to build case where commented out
+        # code would behave different than current code
+        # (Carbon.SomeMod contains 'import Sibling' seems
+        # to cause difference in real code)
+
+        self.fail("actual test needed")
+
+
+
+    @expectedFailure
+    def test_load_package(self):
+        self.fail("load_package")
+
+    def test_find_module(self):
+        record = []
+        def mock_finder(name, path):
+            record.append((name, path))
+            return saved_finder(name, path)
+
+        saved_finder = modulegraph.find_module
+        try:
+            modulegraph.find_module = mock_finder
+
+            graph = modulegraph.ModuleGraph()
+            m = graph._find_module('sys', None)
+            self.assertEqual(record, [])
+            self.assertEqual(m, (None, None, ("", "", imp.C_BUILTIN)))
+
+            modulegraph.find_module = saved_finder
+            xml = graph.import_hook("xml")[0]
+            self.assertEqual(xml.identifier, 'xml')
+            modulegraph.find_module = mock_finder
+
+            self.assertRaises(ImportError, graph._find_module, 'xml', None)
+
+            self.assertEqual(record, [])
+            m = graph._find_module('shutil', None)
+            self.assertEqual(record, [
+                ('shutil', graph.path),
+            ])
+            self.assertTrue(isinstance(m, tuple))
+            self.assertEqual(len(m), 3)
+            self.assertTrue(hasattr(m[0], 'read'))
+            self.assertIsInstance(m[0].read(), str)
+            srcfn = shutil.__file__
+            if srcfn.endswith('.pyc'):
+                srcfn = srcfn[:-1]
+            self.assertEqual(m[1], srcfn)
+            self.assertEqual(m[2], ('.py', 'rU', imp.PY_SOURCE))
+            m[0].close()
+
+            m2 = graph._find_module('shutil', None)
+            self.assertEqual(m[1:], m2[1:])
+            m2[0].close()
+
+
+            record[:] = []
+            m = graph._find_module('sax', xml.packagepath, xml)
+            self.assertEqual(m,
+                    (None, os.path.join(os.path.dirname(xml.filename), 'sax'),
+                    ('', '', imp.PKG_DIRECTORY)))
+            self.assertEqual(record, [
+                ('sax', xml.packagepath),
+            ])
+            if m[0] is not None: m[0].close()
+
+        finally:
+            modulegraph.find_module = saved_finder
+
+    @expectedFailure
+    def test_create_xref(self):
+        self.fail("create_xref")
+
+    @expectedFailure
+    def test_itergraphreport(self):
+        self.fail("itergraphreport")
+
+    def test_report(self):
+        graph = modulegraph.ModuleGraph()
+
+        saved_stdout = sys.stdout
+        try:
+            fp = sys.stdout = StringIO()
+            graph.report()
+            lines = fp.getvalue().splitlines()
+            fp.close()
+
+            self.assertEqual(len(lines), 3)
+            self.assertEqual(lines[0], '')
+            self.assertEqual(lines[1], 'Class           Name                      File')
+            self.assertEqual(lines[2], '-----           ----                      ----')
+
+            fp = sys.stdout = StringIO()
+            graph._safe_import_hook('os', None, ())
+            graph._safe_import_hook('sys', None, ())
+            graph._safe_import_hook('nomod', None, ())
+            graph.report()
+            lines = fp.getvalue().splitlines()
+            fp.close()
+
+            self.assertEqual(lines[0], '')
+            self.assertEqual(lines[1], 'Class           Name                      File')
+            self.assertEqual(lines[2], '-----           ----                      ----')
+            expected = []
+            for n in graph.flatten():
+                if n.filename:
+                    expected.append([type(n).__name__, n.identifier, n.filename])
+                else:
+                    expected.append([type(n).__name__, n.identifier])
+
+            expected.sort()
+            actual = [item.split() for item in lines[3:]]
+            actual.sort()
+            self.assertEqual(expected, actual)
+
+
+        finally:
+            sys.stdout = saved_stdout
+
+    def test_graphreport(self):
+
+        def my_iter(flatpackages="packages"):
+            yield "line1\n"
+            yield str(flatpackages) + "\n"
+            yield "line2\n"
+
+        graph = modulegraph.ModuleGraph()
+        graph.itergraphreport = my_iter
+
+        fp = StringIO()
+        graph.graphreport(fp)
+        self.assertEqual(fp.getvalue(), "line1\n()\nline2\n")
+
+        fp = StringIO()
+        graph.graphreport(fp, "deps")
+        self.assertEqual(fp.getvalue(), "line1\ndeps\nline2\n")
+
+        saved_stdout = sys.stdout
+        try:
+            sys.stdout = fp = StringIO()
+            graph.graphreport()
+            self.assertEqual(fp.getvalue(), "line1\n()\nline2\n")
+
+        finally:
+            sys.stdout = saved_stdout
+
+
+    def test_replace_paths_in_code(self):
+        graph = modulegraph.ModuleGraph(replace_paths=[
+                ('path1', 'path2'),
+                ('path3/path5', 'path4'),
+            ])
+
+        co = compile(textwrap.dedent("""
+        [x for x in range(4)]
+        """), "path4/index.py", 'exec', 0, 1)
+        co = graph._replace_paths_in_code(co)
+        self.assertEqual(co.co_filename, 'path4/index.py')
+
+        co = compile(textwrap.dedent("""
+        [x for x in range(4)]
+        (x for x in range(4))
+        """), "path1/index.py", 'exec', 0, 1)
+        self.assertEqual(co.co_filename, 'path1/index.py')
+        co = graph._replace_paths_in_code(co)
+        self.assertEqual(co.co_filename, 'path2/index.py')
+        for c in co.co_consts:
+            if isinstance(c, type(co)):
+                self.assertEqual(c.co_filename, 'path2/index.py')
+
+        co = compile(textwrap.dedent("""
+        [x for x in range(4)]
+        """), "path3/path4/index.py", 'exec', 0, 1)
+        co = graph._replace_paths_in_code(co)
+        self.assertEqual(co.co_filename, 'path3/path4/index.py')
+
+        co = compile(textwrap.dedent("""
+        [x for x in range(4)]
+        """), "path3/path5.py", 'exec', 0, 1)
+        co = graph._replace_paths_in_code(co)
+        self.assertEqual(co.co_filename, 'path3/path5.py')
+
+        co = compile(textwrap.dedent("""
+        [x for x in range(4)]
+        """), "path3/path5/index.py", 'exec', 0, 1)
+        co = graph._replace_paths_in_code(co)
+        self.assertEqual(co.co_filename, 'path4/index.py')
+
+    def test_createReference(self):
+        graph = modulegraph.ModuleGraph()
+        n1 = modulegraph.Node('n1')
+        n2 = modulegraph.Node('n2')
+        graph.addNode(n1)
+        graph.addNode(n2)
+
+        graph.createReference(n1, n2)
+        outs, ins = map(list, graph.get_edges(n1))
+        self.assertEqual(outs, [n2])
+        self.assertEqual(ins, [])
+        outs, ins = map(list, graph.get_edges(n2))
+        self.assertEqual(outs, [])
+        self.assertEqual(ins, [n1])
+
+        e = graph.graph.edge_by_node('n1', 'n2')
+        self.assertIsInstance(e, int)
+        self.assertEqual(graph.graph.edge_data(e), 'direct')
+
+    def test_create_xref(self):
+        # XXX: This test is far from optimal, it just ensures
+        # that all code is exercised to catch small bugs and
+        # py3k issues without verifying that the code actually
+        # works....
+        graph = modulegraph.ModuleGraph()
+        if __file__.endswith('.py'):
+            graph.run_script(__file__)
+        else:
+            graph.run_script(__file__[:-1])
+
+        graph.import_hook('os')
+        graph.import_hook('xml.etree')
+        graph.import_hook('unittest')
+
+        fp = StringIO()
+        graph.create_xref(out=fp)
+
+        data = fp.getvalue()
+        r = ET.fromstring(data)
+
+    def test_itergraphreport(self):
+        # XXX: This test is far from optimal, it just ensures
+        # that all code is exercised to catch small bugs and
+        # py3k issues without verifying that the code actually
+        # works....
+        graph = modulegraph.ModuleGraph()
+        if __file__.endswith('.py'):
+            graph.run_script(__file__)
+        else:
+            graph.run_script(__file__[:-1])
+        graph.import_hook('os')
+        graph.import_hook('xml.etree')
+        graph.import_hook('unittest')
+        graph.import_hook('distutils.command.build')
+
+        fp = StringIO()
+        list(graph.itergraphreport())
+
+        # XXX: platpackages isn't implemented, and is undocumented hence
+        # it is unclear what this is inteded to be...
+        #list(graph.itergraphreport(flatpackages=...))
+
+
+
+
+class CompatTests (unittest.TestCase):
+    def test_Bchr(self):
+        v = modulegraph._Bchr(ord('A'))
+        if sys.version_info[0] == 2:
+            self.assertTrue(isinstance(v, bytes))
+            self.assertEqual(v, b'A')
+        else:
+            self.assertTrue(isinstance(v, int))
+            self.assertEqual(v, ord('A'))
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pep420_nspkg.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pep420_nspkg.py
new file mode 100644
index 0000000..a20c981
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pep420_nspkg.py
@@ -0,0 +1,220 @@
+"""
+Tests that deal with pep420 namespace packages.
+
+PEP 420 is new in Python 3.3
+"""
+import os
+import shutil
+import sys
+import subprocess
+import textwrap
+
+if sys.version_info[:2] <= (2,6):
+    import unittest2 as unittest
+else:
+    import unittest
+
+from modulegraph import modulegraph
+
+gRootDir = os.path.dirname(os.path.abspath(__file__))
+gSrcDir = os.path.join(gRootDir, 'testpkg-pep420-namespace')
+
+if sys.version_info[:2] >= (3,3):
+
+    class TestPythonBehaviour (unittest.TestCase):
+        def importModule(self, name):
+            test_dir1 = os.path.join(gSrcDir, 'path1')
+            test_dir2 = os.path.join(gSrcDir, 'path2')
+            if '.' in name:
+                script = textwrap.dedent("""\
+                    import site
+                    site.addsitedir(%r)
+                    site.addsitedir(%r)
+                    try:
+                        import %s
+                    except ImportError:
+                        import %s
+                    print (%s.__name__)
+                """) %(test_dir1, test_dir2, name, name.rsplit('.', 1)[0], name)
+            else:
+                script = textwrap.dedent("""\
+                    import site
+                    site.addsitedir(%r)
+                    site.addsitedir(%r)
+                    import %s
+                    print (%s.__name__)
+                """) %(test_dir1, test_dir2, name, name)
+
+            p = subprocess.Popen([sys.executable, '-c', script],
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.STDOUT,
+                    cwd=os.path.join(
+                        os.path.dirname(os.path.abspath(__file__)),
+                        'testpkg-relimport'),
+            )
+            data = p.communicate()[0]
+            if sys.version_info[0] != 2:
+                data = data.decode('UTF-8')
+            data = data.strip()
+            if data.endswith(' refs]'):
+                data = data.rsplit('\n', 1)[0].strip()
+
+            sts = p.wait()
+
+            if sts != 0:
+                print (data)
+                self.fail("import of %r failed"%(name,))
+
+            return data
+
+        def testToplevel(self):
+            m = self.importModule('package.sub1')
+            self.assertEqual(m, 'package.sub1')
+
+            m = self.importModule('package.sub2')
+            self.assertEqual(m, 'package.sub2')
+
+        def testSub(self):
+            m = self.importModule('package.subpackage.sub')
+            self.assertEqual(m, 'package.subpackage.sub')
+
+            m = self.importModule('package.nspkg.mod')
+            self.assertEqual(m, 'package.nspkg.mod')
+
+    class TestModuleGraphImport (unittest.TestCase):
+        if not hasattr(unittest.TestCase, 'assertIsInstance'):
+            def assertIsInstance(self, value, types):
+                if not isinstance(value, types):
+                    self.fail("%r is not an instance of %r", value, types)
+
+        def setUp(self):
+            self.mf = modulegraph.ModuleGraph(path=[
+                    os.path.join(gSrcDir, 'path1'),
+                    os.path.join(gSrcDir, 'path2'),
+                ] + sys.path)
+
+
+        def testRootPkg(self):
+            self.mf.import_hook('package')
+
+            node = self.mf.findNode('package')
+            self.assertIsInstance(node, modulegraph.NamespacePackage)
+            self.assertEqual(node.identifier, 'package')
+            self.assertEqual(node.filename, '-')
+
+        def testRootPkgModule(self):
+            self.mf.import_hook('package.sub1')
+
+            node = self.mf.findNode('package.sub1')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'package.sub1')
+
+            self.mf.import_hook('package.sub2')
+            node = self.mf.findNode('package.sub2')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'package.sub2')
+
+        def testSubRootPkgModule(self):
+            self.mf.import_hook('package.subpackage.sub')
+
+            node = self.mf.findNode('package.subpackage.sub')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'package.subpackage.sub')
+
+            node = self.mf.findNode('package')
+            self.assertIsInstance(node, modulegraph.NamespacePackage)
+
+            self.mf.import_hook('package.nspkg.mod')
+            node = self.mf.findNode('package.nspkg.mod')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+            self.assertEqual(node.identifier, 'package.nspkg.mod')
+
+else:
+    # Check that PEP 420 is not implemented in python 3.2 and earlier
+    # (and that modulegraph also doesn't do this)
+
+    class TestPythonBehaviour (unittest.TestCase):
+        def importModule(self, name):
+            test_dir1 = os.path.join(gSrcDir, 'path1')
+            test_dir2 = os.path.join(gSrcDir, 'path2')
+            if '.' in name:
+                script = textwrap.dedent("""\
+                    import site
+                    site.addsitedir(%r)
+                    site.addsitedir(%r)
+                    try:
+                        import %s
+                    except ImportError:
+                        import %s
+                    print (%s.__name__)
+                """) %(test_dir1, test_dir2, name, name.rsplit('.', 1)[0], name)
+            else:
+                script = textwrap.dedent("""\
+                    import site
+                    site.addsitedir(%r)
+                    site.addsitedir(%r)
+                    import %s
+                    print (%s.__name__)
+                """) %(test_dir1, test_dir2, name, name)
+
+            p = subprocess.Popen([sys.executable, '-c', script],
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.STDOUT,
+                    cwd=os.path.join(
+                        os.path.dirname(os.path.abspath(__file__)),
+                        'testpkg-relimport'),
+            )
+            data = p.communicate()[0]
+            if sys.version_info[0] != 2:
+                data = data.decode('UTF-8')
+            data = data.strip()
+            if data.endswith(' refs]'):
+                data = data.rsplit('\n', 1)[0].strip()
+
+            sts = p.wait()
+
+            if sts != 0:
+                raise ImportError(name)
+
+            return data
+
+        def testToplevel(self):
+            m = self.importModule('sys')
+            self.assertEqual(m, 'sys')
+
+            self.assertRaises(ImportError, self.importModule, 'package.sub1')
+            self.assertRaises(ImportError, self.importModule, 'package.sub2')
+
+        def testSub(self):
+            self.assertRaises(ImportError, self.importModule, 'package.subpackage.sub')
+
+    class TestModuleGraphImport (unittest.TestCase):
+        if not hasattr(unittest.TestCase, 'assertIsInstance'):
+            def assertIsInstance(self, value, types):
+                if not isinstance(value, types):
+                    self.fail("%r is not an instance of %r", value, types)
+
+        def setUp(self):
+            self.mf = modulegraph.ModuleGraph(path=[
+                    os.path.join(gSrcDir, 'path1'),
+                    os.path.join(gSrcDir, 'path2'),
+                ] + sys.path)
+
+
+        def testRootPkg(self):
+            self.assertRaises(ImportError, self.mf.import_hook, 'package')
+
+            node = self.mf.findNode('package')
+            self.assertIs(node, None)
+
+        def testRootPkgModule(self):
+            self.assertRaises(ImportError, self.mf.import_hook, 'package.sub1')
+
+            node = self.mf.findNode('package.sub1')
+            self.assertIs(node, None)
+
+            node = self.mf.findNode('package.sub2')
+            self.assertIs(node, None)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pycompat_pkg.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pycompat_pkg.py
new file mode 100644
index 0000000..b5dcdb7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_pycompat_pkg.py
@@ -0,0 +1,60 @@
+"""
+Test for import machinery
+"""
+import unittest
+import sys
+import textwrap
+import subprocess
+import os
+from modulegraph import modulegraph
+
+class TestModuleGraphImport (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def test_compat(self):
+        root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-compatmodule')
+        mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
+        mf.import_hook('pkg.api')
+
+        node = mf.findNode('pkg')
+        self.assertIsInstance(node, modulegraph.Package)
+
+        node = mf.findNode('pkg.api')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+
+        if sys.version_info[0] == 2:
+            node = mf.findNode('pkg.api2')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+
+            node = mf.findNode('pkg.api3')
+            self.assertIsInstance(node, modulegraph.InvalidSourceModule)
+
+            node = mf.findNode('http.client')
+            self.assertIs(node, None)
+
+            node = mf.findNode('urllib2')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+
+        else:
+            node = mf.findNode('pkg.api2')
+            self.assertIsInstance(node, modulegraph.InvalidSourceModule)
+
+            node = mf.findNode('pkg.api3')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+
+            node = mf.findNode('http.client')
+            self.assertIsInstance(node, modulegraph.SourceModule)
+
+            node = mf.findNode('urllib2')
+            self.assertIs(node, None)
+
+
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_relimport2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_relimport2.py
new file mode 100644
index 0000000..0a465a2
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_relimport2.py
@@ -0,0 +1,45 @@
+"""
+Test for import machinery
+"""
+import unittest
+import sys
+import textwrap
+import subprocess
+import os
+from modulegraph import modulegraph
+
+class TestModuleGraphImport (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r"%(value, types))
+
+    def setUp(self):
+        self.root = os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                'testpkg-relimport2')
+        self.mf = modulegraph.ModuleGraph(path=[ self.root ] + sys.path)
+
+
+    def test_init_as_script(self):
+        self.mf.run_script(os.path.join(self.root, 'pkg/__init__.py'))
+        n = self.mf.findNode('mod1')
+        self.assertIs(n, None)
+
+        n = self.mf.findNode('mod2')
+        self.assertIsInstance(n, modulegraph.MissingModule)
+
+    def test_subpkg_bad_import(self):
+        self.mf.import_hook('pkg.sub')
+
+        n = self.mf.findNode('toplevel')
+        self.assertIs(n, None)
+
+        n = self.mf.findNode('pkg.mod1')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+
+        n = self.mf.findNode('pkg.mod3')
+        self.assertIsInstance(n, modulegraph.SourceModule)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_setuptools_nspkg.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_setuptools_nspkg.py
new file mode 100644
index 0000000..87f845c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_setuptools_nspkg.py
@@ -0,0 +1,147 @@
+"""
+Tests that deal with setuptools namespace
+packages, and in particular the installation
+flavour used by pip
+"""
+import os
+import shutil
+import sys
+import subprocess
+import unittest
+import textwrap
+
+from modulegraph import modulegraph
+
+gRootDir = os.path.dirname(os.path.abspath(__file__))
+gSrcDir = os.path.join(gRootDir, 'testpkg-setuptools-namespace')
+
+def install_testpkg(test_dir):
+    p = subprocess.Popen([
+        sys.executable, 'setup.py', 'install',
+            '--install-lib', test_dir,
+            '--single-version-externally-managed',
+            '--record', os.path.join(test_dir, 'record.lst'),
+        ], cwd=gSrcDir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+    data = p.communicate()[0]
+
+    exit = p.wait()
+    return exit
+
+
+class TestPythonBehaviour (unittest.TestCase):
+    def setUp(self):
+        test_dir = os.path.join(gRootDir, 'test.dir')
+        if os.path.exists(test_dir):
+            shutil.rmtree(test_dir)
+
+        os.mkdir(test_dir)
+        exit = install_testpkg(test_dir)
+        self.assertEqual(exit, 0)
+
+    def tearDown(self):
+        test_dir = os.path.join(gRootDir, 'test.dir')
+        if os.path.exists(test_dir):
+            shutil.rmtree(test_dir)
+
+    def importModule(self, name):
+        test_dir = os.path.join(gRootDir, 'test.dir')
+        if '.' in name:
+            script = textwrap.dedent("""\
+                import site
+                site.addsitedir(%r)
+                try:
+                    import %s
+                except ImportError:
+                    import %s
+                print (%s.__name__)
+            """) %(test_dir, name, name.rsplit('.', 1)[0], name)
+        else:
+            script = textwrap.dedent("""\
+                import site
+                site.addsitedir(%r)
+                import %s
+                print (%s.__name__)
+            """) %(test_dir, name, name)
+
+        p = subprocess.Popen([sys.executable, '-c', script],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                cwd=os.path.join(
+                    os.path.dirname(os.path.abspath(__file__)),
+                    'testpkg-relimport'),
+        )
+        data = p.communicate()[0]
+        if sys.version_info[0] != 2:
+            data = data.decode('UTF-8')
+        data = data.strip()
+        if data.endswith(' refs]'):
+            data = data.rsplit('\n', 1)[0].strip()
+
+        sts = p.wait()
+
+        if sts != 0:
+            print (data)
+            self.fail("import of %r failed"%(name,))
+
+        return data
+
+    def testToplevel(self):
+        m = self.importModule('nspkg.module')
+        self.assertEqual(m, 'nspkg.module')
+
+    def testSub(self):
+        m = self.importModule('nspkg.nssubpkg.sub')
+        self.assertEqual(m, 'nspkg.nssubpkg.sub')
+
+class TestModuleGraphImport (unittest.TestCase):
+    if not hasattr(unittest.TestCase, 'assertIsInstance'):
+        def assertIsInstance(self, value, types):
+            if not isinstance(value, types):
+                self.fail("%r is not an instance of %r", value, types)
+
+    def setUp(self):
+        test_dir = os.path.join(gRootDir, 'test.dir')
+        if os.path.exists(test_dir):
+            shutil.rmtree(test_dir)
+
+        os.mkdir(test_dir)
+        exit = install_testpkg(test_dir)
+        self.assertEqual(exit, 0)
+
+        self.mf = modulegraph.ModuleGraph(path=[ test_dir ] + sys.path)
+
+    def tearDown(self):
+        test_dir = os.path.join(gRootDir, 'test.dir')
+        if os.path.exists(test_dir):
+            shutil.rmtree(test_dir)
+
+    def testRootPkg(self):
+        self.mf.import_hook('nspkg')
+
+        node = self.mf.findNode('nspkg')
+        self.assertIsInstance(node, modulegraph.NamespacePackage)
+        self.assertEqual(node.identifier, 'nspkg')
+        self.assertEqual(node.filename, '-')
+
+    def testRootPkgModule(self):
+        self.mf.import_hook('nspkg.module')
+
+        node = self.mf.findNode('nspkg.module')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'nspkg.module')
+
+    def testSubRootPkgModule(self):
+        self.mf.import_hook('nspkg.nssubpkg.sub')
+
+        node = self.mf.findNode('nspkg.nssubpkg.sub')
+        self.assertIsInstance(node, modulegraph.SourceModule)
+        self.assertEqual(node.identifier, 'nspkg.nssubpkg.sub')
+
+
+        node = self.mf.findNode('nspkg')
+        self.assertIsInstance(node, modulegraph.NamespacePackage)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_util.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_util.py
new file mode 100644
index 0000000..eafe43e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_util.py
@@ -0,0 +1,59 @@
+import unittest
+import encodings
+import encodings.aliases
+from modulegraph import util
+import sys
+
+try:
+    from io import BytesIO
+except ImportError:
+    from cStringIO import StringIO as BytesIO
+
+class TestUtil (unittest.TestCase):
+    def test_imp_find_module(self):
+        fn = util.imp_find_module('encodings.aliases')[1]
+        self.assertTrue(encodings.aliases.__file__.startswith(fn))
+
+    def test_imp_walk(self):
+        imps = list(util.imp_walk('encodings.aliases'))
+        self.assertEqual(len(imps), 2)
+
+        self.assertEqual(imps[0][0], 'encodings')
+        self.assertTrue(encodings.__file__.startswith(imps[0][1][1]))
+
+        self.assertEqual(imps[1][0], 'aliases')
+        self.assertTrue(encodings.aliases.__file__.startswith(imps[1][1][1]))
+
+        # Close all files, avoid warning by unittest
+        for i in imps:
+            if i[1][0] is not None:
+                i[1][0].close()
+
+
+    def test_guess_encoding(self):
+        fp = BytesIO(b"# coding: utf-8")
+        self.assertEqual(util.guess_encoding(fp), "utf-8")
+
+        fp = BytesIO(b"\n# coding: utf-8")
+        self.assertEqual(util.guess_encoding(fp), "utf-8")
+
+        fp = BytesIO(b"# coding: latin-1")
+        self.assertEqual(util.guess_encoding(fp), "latin-1")
+
+        fp = BytesIO(b"\n# coding: latin-1")
+        self.assertEqual(util.guess_encoding(fp), "latin-1")
+
+        fp = BytesIO(b"#!/usr/bin/env/python\n# vim: set fileencoding=latin-1 :")
+        self.assertEqual(util.guess_encoding(fp), "latin-1")
+
+        fp = BytesIO(b"\n\n\n# coding: latin-1")
+        if sys.version_info[0] == 2:
+            self.assertEqual(util.guess_encoding(fp), "ascii")
+        else:
+            self.assertEqual(util.guess_encoding(fp), "utf-8")
+
+        del fp
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_zipio.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_zipio.py
new file mode 100644
index 0000000..81000bd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/test_zipio.py
@@ -0,0 +1,218 @@
+from modulegraph import zipio
+import os
+import time
+import sys
+
+if sys.version_info[:2] <= (2,6):
+    import unittest2 as unittest
+
+else:
+    import unittest
+
+TESTDATA=os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        'testdata')
+
+class TestModuleGraph (unittest.TestCase):
+    def test_locating(self):
+        # Private function
+        self.assertEqual(zipio._locate('/usr/bin/ditto'), ('/usr/bin/ditto', None))
+        self.assertEqual(zipio._locate('/usr/bin/ditto/bar'), ('/usr/bin/ditto', 'bar'))
+        self.assertEqual(zipio._locate('/usr/bin/ditto/foo/bar///bar/'), ('/usr/bin/ditto', 'foo/bar/bar'))
+        self.assertEqual(zipio._locate('/usr/bin/ditto///foo/bar///bar/'), ('/usr/bin/ditto', 'foo/bar/bar'))
+        self.assertRaises(IOError, zipio._locate, '/usr/bin/ditto.bar')
+        self.assertRaises(IOError, zipio._locate, '/foo/bar/baz.txt')
+
+    def test_open(self):
+        # 1. Regular file
+        fp = zipio.open(os.path.join(TESTDATA, 'test.txt'), 'r')
+        data = fp.read()
+        fp.close()
+        self.assertEqual(data, 'This is test.txt\n')
+
+        if sys.version_info[0] == 3:
+            fp = zipio.open(os.path.join(TESTDATA, 'test.txt'), 'rb')
+            data = fp.read()
+            fp.close()
+            self.assertEqual(data, b'This is test.txt\n')
+
+        # 2. File inside zipfile
+        fp = zipio.open(os.path.join(TESTDATA, 'zipped.egg', 'test.txt'), 'r')
+        data = fp.read()
+        fp.close()
+        self.assertEqual(data, 'Zipped up test.txt\n')
+
+        if sys.version_info[0] == 3:
+            fp = zipio.open(os.path.join(TESTDATA, 'zipped.egg', 'test.txt'), 'rb')
+            data = fp.read()
+            fp.close()
+            self.assertEqual(data, b'Zipped up test.txt\n')
+
+        # 3. EXC: Directory inside zipfile
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir'))
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir2'))
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir2/subdir'))
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir3'))
+        # TODO: Add subdir4/file.txt, without directory entry
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir4'))
+
+        # 4. EXC: No such file in zipfile
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'no-such-file'))
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'zipped.egg', 'subdir/no-such-file'))
+
+        # 5. EXC: No such regular file
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'no-such-file.txt'))
+
+        # 6. EXC: Open r/w
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'test.txt'), 'w')
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'test.txt'), 'a')
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'test.txt'), 'r+')
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'test.txt'), 'w+')
+        self.assertRaises(IOError, zipio.open, os.path.join(TESTDATA, 'test.txt'), 'a+')
+
+    def test_listdir(self):
+        # 1. Regular directory
+        self.assertEqual(set(os.listdir(os.path.join(TESTDATA, 'subdir'))), set(['file1.txt', 'file2.txt']))
+
+        # 2. Zipfile with files in directory
+        self.assertEqual(set(zipio.listdir(os.path.join(TESTDATA, 'zipped.egg'))), set([
+            'test.txt', 'subdir', 'subdir2', 'subdir3', 'subdir4']))
+
+        # 3. Zipfile with files in subdirectory
+        self.assertEqual(set(zipio.listdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir'))), set(['file1.txt', 'file2.txt']))
+        self.assertEqual(set(zipio.listdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir2'))), set(['subdir']))
+        self.assertEqual(set(zipio.listdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir4', 'subdir6'))), set(['mydir']))
+
+        # 4. Zipfile with entry for directory, no files
+        self.assertEqual(set(zipio.listdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir3'))), set([]))
+
+        # 5. EXC: Zipfile without directory
+        self.assertRaises(IOError, zipio.listdir, os.path.join(TESTDATA, 'zipped.egg', 'subdir10'))
+
+        # 6. EXC: Regular directory doesn't exist
+        self.assertRaises(IOError, zipio.listdir, os.path.join(TESTDATA, 'subdir10'))
+
+    def test_isfile(self):
+        self.assertTrue(zipio.isfile(os.path.join(TESTDATA, 'test.txt')))
+        self.assertFalse(zipio.isfile(os.path.join(TESTDATA, 'subdir')))
+        self.assertRaises(IOError, zipio.isfile, os.path.join(TESTDATA, 'no-such-file'))
+        self.assertFalse(zipio.isfile(os.path.join(TESTDATA, 'zipped.egg')))
+        self.assertFalse(zipio.isfile(os.path.join(TESTDATA, 'zipped.egg', 'subdir4')))
+        self.assertTrue(zipio.isfile(os.path.join(TESTDATA, 'zipped.egg', 'test.txt')))
+        self.assertFalse(zipio.isfile(os.path.join(TESTDATA, 'zipped.egg', 'subdir')))
+        self.assertRaises(IOError, zipio.isfile, os.path.join(TESTDATA, 'zipped.egg', 'no-such-file'))
+        self.assertTrue(zipio.isfile(os.path.join(TESTDATA, 'zipped.egg', 'subdir2', 'subdir', 'file1.txt')))
+
+    def test_isdir(self):
+        self.assertTrue(zipio.isdir(TESTDATA))
+        self.assertFalse(zipio.isdir(os.path.join(TESTDATA, 'test.txt')))
+        self.assertTrue(zipio.isdir(os.path.join(TESTDATA, 'zipped.egg')))
+        self.assertTrue(zipio.isdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir')))
+        self.assertTrue(zipio.isdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir2/subdir')))
+        self.assertTrue(zipio.isdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir4')))
+        self.assertFalse(zipio.isdir(os.path.join(TESTDATA, 'zipped.egg', 'subdir4', 'file.txt')))
+        self.assertRaises(IOError, zipio.isdir, os.path.join(TESTDATA, 'no-such-file'))
+        self.assertRaises(IOError, zipio.isdir, os.path.join(TESTDATA, 'zipped.egg', 'no-such-file'))
+        self.assertRaises(IOError, zipio.isdir, os.path.join(TESTDATA, 'zipped.egg', 'subdir', 'no-such-file'))
+
+    def test_islink(self):
+        fn = os.path.join(TESTDATA, 'symlink')
+        os.symlink('test.txt', fn)
+        try:
+            self.assertTrue(zipio.islink(fn))
+
+        finally:
+            os.unlink(fn)
+
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'test.txt')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'subdir')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'zipped.egg')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'zipped.egg/subdir')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'zipped.egg/subdir4')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'zipped.egg/test.txt')))
+        self.assertFalse(zipio.islink(os.path.join(TESTDATA, 'zipped.egg/subdir/file1.txt')))
+
+        self.assertRaises(IOError, zipio.islink, os.path.join(TESTDATA, 'no-such-file'))
+        self.assertRaises(IOError, zipio.islink, os.path.join(TESTDATA, 'zipped.egg', 'no-such-file'))
+
+
+    def test_readlink(self):
+        fn = os.path.join(TESTDATA, 'symlink')
+        os.symlink('test.txt', fn)
+        try:
+            self.assertEqual(zipio.readlink(fn), 'test.txt')
+
+        finally:
+            os.unlink(fn)
+
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'test.txt'))
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'subdir'))
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'zipped.egg'))
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'zipped.egg', 'subdir4'))
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'zipped.egg', 'no-such-file'))
+        self.assertRaises(OSError, zipio.readlink, os.path.join(TESTDATA, 'zipped.egg', 'subdir/no-such-file'))
+
+    def test_getmtime(self):
+        fn = os.path.join(TESTDATA, 'test.txt')
+        self.assertEqual(os.path.getmtime(fn), zipio.getmtime(fn))
+
+        fn = os.path.join(TESTDATA, 'zipped.egg')
+        self.assertEqual(os.path.getmtime(fn), zipio.getmtime(fn))
+
+        fn = os.path.join(TESTDATA, 'zipped.egg/test.txt')
+        self.assertIn(zipio.getmtime(fn), (1300193680.0, 1300222480.0))
+
+        fn = os.path.join(TESTDATA, 'zipped.egg/subdir')
+        self.assertIn(zipio.getmtime(fn), (1300193890.0, 1300222690.0))
+
+        fn = os.path.join(TESTDATA, 'zipped.egg/subdir4')
+        self.assertEqual(zipio.getmtime(fn), os.path.getmtime(os.path.join(TESTDATA, 'zipped.egg')))
+
+        self.assertRaises(IOError, zipio.getmtime, os.path.join(TESTDATA, 'no-file'))
+        self.assertRaises(IOError, zipio.getmtime, os.path.join(TESTDATA, 'zipped.egg/no-file'))
+
+    def test_contextlib(self):
+        # 1. Regular file
+        with zipio.open(os.path.join(TESTDATA, 'test.txt'), 'r') as fp:
+            data = fp.read()
+        try:
+            fp.read()
+            self.fail("file not closed")
+        except (ValueError, IOError):
+            pass
+
+        self.assertEqual(data, 'This is test.txt\n')
+
+        if sys.version_info[0] == 3:
+            with zipio.open(os.path.join(TESTDATA, 'test.txt'), 'rb') as fp:
+                data = fp.read()
+            try:
+                fp.read()
+                self.fail("file not closed")
+            except (ValueError, IOError):
+                pass
+
+            self.assertEqual(data, b'This is test.txt\n')
+
+        # 2. File inside zipfile
+        with zipio.open(os.path.join(TESTDATA, 'zipped.egg', 'test.txt'), 'r') as fp:
+            data = fp.read()
+        try:
+            fp.read()
+            self.fail("file not closed")
+        except (ValueError, IOError):
+            pass
+        self.assertEqual(data, 'Zipped up test.txt\n')
+
+        if sys.version_info[0] == 3:
+            with zipio.open(os.path.join(TESTDATA, 'zipped.egg', 'test.txt'), 'rb') as fp:
+                data = fp.read()
+            try:
+                fp.read()
+                self.fail("file not closed")
+            except (IOError, ValueError):
+                pass
+            self.assertEqual(data, b'Zipped up test.txt\n')
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/namedpkg/slave.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/namedpkg/slave.py
new file mode 100644
index 0000000..16a3d3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/namedpkg/slave.py
@@ -0,0 +1,2 @@
+""" slave packages """
+import os
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6-nspkg.pth
new file mode 100644
index 0000000..b020a3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6-nspkg.pth
@@ -0,0 +1 @@
+import sys,types,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',types.ModuleType('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/PKG-INFO
new file mode 100644
index 0000000..cff065a
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: nameduser
+Version: 1.5
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/SOURCES.txt
new file mode 100644
index 0000000..15c6126
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/slave.py
+nameduser.egg-info/PKG-INFO
+nameduser.egg-info/SOURCES.txt
+nameduser.egg-info/dependency_links.txt
+nameduser.egg-info/namespace_packages.txt
+nameduser.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6-nspkg.pth
new file mode 100644
index 0000000..b020a3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6-nspkg.pth
@@ -0,0 +1 @@
+import sys,types,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',types.ModuleType('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/PKG-INFO
new file mode 100644
index 0000000..138c5fd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: namedpkg
+Version: 1.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/SOURCES.txt
new file mode 100644
index 0000000..29dfc47
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/parent.py
+namedpkg.egg-info/PKG-INFO
+namedpkg.egg-info/SOURCES.txt
+namedpkg.egg-info/dependency_links.txt
+namedpkg.egg-info/namespace_packages.txt
+namedpkg.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg/parent.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg/parent.py
new file mode 100644
index 0000000..db7354b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg/parent.py
@@ -0,0 +1,2 @@
+""" parent packages """
+import sys
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/namedpkg/slave.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/namedpkg/slave.py
new file mode 100644
index 0000000..16a3d3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/namedpkg/slave.py
@@ -0,0 +1,2 @@
+""" slave packages """
+import os
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5-nspkg.pth
new file mode 100644
index 0000000..9423238
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5-nspkg.pth
@@ -0,0 +1 @@
+import sys,new,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',new.module('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
new file mode 100644
index 0000000..cff065a
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: nameduser
+Version: 1.5
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
new file mode 100644
index 0000000..15c6126
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/slave.py
+nameduser.egg-info/PKG-INFO
+nameduser.egg-info/SOURCES.txt
+nameduser.egg-info/dependency_links.txt
+nameduser.egg-info/namespace_packages.txt
+nameduser.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5-nspkg.pth
new file mode 100644
index 0000000..9423238
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5-nspkg.pth
@@ -0,0 +1 @@
+import sys,new,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',new.module('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
new file mode 100644
index 0000000..138c5fd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: namedpkg
+Version: 1.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
new file mode 100644
index 0000000..29dfc47
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/parent.py
+namedpkg.egg-info/PKG-INFO
+namedpkg.egg-info/SOURCES.txt
+namedpkg.egg-info/dependency_links.txt
+namedpkg.egg-info/namespace_packages.txt
+namedpkg.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg/parent.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg/parent.py
new file mode 100644
index 0000000..db7354b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg/parent.py
@@ -0,0 +1,2 @@
+""" parent packages """
+import sys
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/namedpkg/slave.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/namedpkg/slave.py
new file mode 100644
index 0000000..16a3d3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/namedpkg/slave.py
@@ -0,0 +1,2 @@
+""" slave packages """
+import os
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5-nspkg.pth
new file mode 100644
index 0000000..9423238
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5-nspkg.pth
@@ -0,0 +1 @@
+import sys,new,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',new.module('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
new file mode 100644
index 0000000..cff065a
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: nameduser
+Version: 1.5
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
new file mode 100644
index 0000000..15c6126
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/slave.py
+nameduser.egg-info/PKG-INFO
+nameduser.egg-info/SOURCES.txt
+nameduser.egg-info/dependency_links.txt
+nameduser.egg-info/namespace_packages.txt
+nameduser.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5-nspkg.pth b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5-nspkg.pth
new file mode 100644
index 0000000..9423238
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5-nspkg.pth
@@ -0,0 +1 @@
+import sys,new,os; p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('namedpkg',)); ie = os.path.exists(os.path.join(p,'__init__.py')); m = not ie and sys.modules.setdefault('namedpkg',new.module('namedpkg')); mp = (m or []) and m.__dict__.setdefault('__path__',[]); (p not in mp) and mp.append(p)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
new file mode 100644
index 0000000..138c5fd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: namedpkg
+Version: 1.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
new file mode 100644
index 0000000..29dfc47
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/parent.py
+namedpkg.egg-info/PKG-INFO
+namedpkg.egg-info/SOURCES.txt
+namedpkg.egg-info/dependency_links.txt
+namedpkg.egg-info/namespace_packages.txt
+namedpkg.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg/parent.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg/parent.py
new file mode 100644
index 0000000..db7354b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg/parent.py
@@ -0,0 +1,2 @@
+""" parent packages """
+import sys
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/__init__.py
new file mode 100644
index 0000000..de40ea7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/__init__.py
@@ -0,0 +1 @@
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/slave.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/slave.py
new file mode 100644
index 0000000..16a3d3c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/slave.py
@@ -0,0 +1,2 @@
+""" slave packages """
+import os
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/PKG-INFO
new file mode 100644
index 0000000..cff065a
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: nameduser
+Version: 1.5
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/SOURCES.txt
new file mode 100644
index 0000000..15c6126
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/slave.py
+nameduser.egg-info/PKG-INFO
+nameduser.egg-info/SOURCES.txt
+nameduser.egg-info/dependency_links.txt
+nameduser.egg-info/namespace_packages.txt
+nameduser.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/setup.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/setup.py
new file mode 100644
index 0000000..e1000c5
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/setup.py
@@ -0,0 +1,8 @@
+from setuptools import setup
+
+setup(
+        name="nameduser",
+        version="1.5",
+        packages=["namedpkg"],
+        namespace_packages=["namedpkg"],
+)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/install.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/install.py
new file mode 100644
index 0000000..3f14e73
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/install.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Script that will create a subdirectory one level up with two subdirs
+with --single-version-externally-managed namespace packages.
+
+Use this script with new versions of distribute and setuptools to ensure
+that changes in the handling of this option don't break us.
+"""
+import pkg_resources
+import subprocess
+import os
+import sys
+import shutil
+
+def main():
+    r = pkg_resources.require('setuptools')[0]
+    install_dir = os.path.join(
+            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
+            "%s-%s"%(r.project_name, r.version))
+    if os.path.exists(install_dir):
+        print("Skip %s %s: already installed"%(r.project_name, r.version))
+
+    else:
+        os.mkdir(install_dir)
+        os.mkdir(os.path.join(install_dir, "parent"))
+        os.mkdir(os.path.join(install_dir, "child"))
+
+        if os.path.exists('parent/build'):
+            shutil.rmtree('parent/build')
+        if os.path.exists('child/build'):
+            shutil.rmtree('child/build')
+
+        for subdir in ('parent', 'child'):
+            p = subprocess.Popen([
+                sys.executable,
+                "setup.py",
+                "install",
+                 "--install-lib=%s/%s"%(install_dir, subdir),
+                 "--single-version-externally-managed",
+                 "--record", "files.txt"
+            ],
+            cwd=subdir)
+            xit = p.wait()
+            if xit != 0:
+                print("ERROR: install failed")
+                sys.exit(1)
+
+
+            if os.path.exists('%s/files.txt'%(subdir,)):
+                os.unlink('%s/files.txt'%(subdir,))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/PKG-INFO
new file mode 100644
index 0000000..138c5fd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: namedpkg
+Version: 1.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/SOURCES.txt
new file mode 100644
index 0000000..29dfc47
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/SOURCES.txt
@@ -0,0 +1,8 @@
+setup.py
+namedpkg/__init__.py
+namedpkg/parent.py
+namedpkg.egg-info/PKG-INFO
+namedpkg.egg-info/SOURCES.txt
+namedpkg.egg-info/dependency_links.txt
+namedpkg.egg-info/namespace_packages.txt
+namedpkg.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/namespace_packages.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/top_level.txt
new file mode 100644
index 0000000..d332ce1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info/top_level.txt
@@ -0,0 +1 @@
+namedpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/__init__.py
new file mode 100644
index 0000000..de40ea7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/__init__.py
@@ -0,0 +1 @@
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/parent.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/parent.py
new file mode 100644
index 0000000..db7354b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/namedpkg/parent.py
@@ -0,0 +1,2 @@
+""" parent packages """
+import sys
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/setup.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/setup.py
new file mode 100644
index 0000000..1a5be60
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/parent/setup.py
@@ -0,0 +1,8 @@
+from setuptools import setup
+
+setup(
+        name="namedpkg",
+        version="1.0",
+        packages=["namedpkg"],
+        namespace_packages=["namedpkg"],
+)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/script b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/script
new file mode 100755
index 0000000..2716038
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/script
@@ -0,0 +1,4 @@
+#!/usr/bin/python
+import sys, os
+
+print (sys.version)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file1.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file1.txt
new file mode 100644
index 0000000..7898192
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file1.txt
@@ -0,0 +1 @@
+a
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file2.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file2.txt
new file mode 100644
index 0000000..6178079
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/subdir/file2.txt
@@ -0,0 +1 @@
+b
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.egg b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.egg
new file mode 100644
index 0000000..64db323
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.egg
Binary files differ
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.zip b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.zip
new file mode 100644
index 0000000..d3c1f42
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath.zip
Binary files differ
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/myext.pyd b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/myext.pyd
new file mode 100644
index 0000000..01e7507
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/myext.pyd
@@ -0,0 +1 @@
+""" fake extension """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule.py
new file mode 100644
index 0000000..de6c648
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule.py
@@ -0,0 +1,3 @@
+"""
+some module
+"""
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule3.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule3.py
new file mode 100644
index 0000000..422b686
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mymodule3.py
@@ -0,0 +1 @@
+"""  fake module """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mypkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mypkg/__init__.py
new file mode 100644
index 0000000..25597fb
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mypkg/__init__.py
@@ -0,0 +1 @@
+""" fake package """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.egg b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.egg
new file mode 100644
index 0000000..219c116
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.egg
Binary files differ
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.txt
new file mode 100644
index 0000000..3b86232
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/test.txt
@@ -0,0 +1 @@
+This is test.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/zipped.egg b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/zipped.egg
new file mode 100644
index 0000000..bf8bd09
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/zipped.egg
Binary files differ
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/__init__.py
new file mode 100644
index 0000000..78b491e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/__init__.py
@@ -0,0 +1 @@
+""" pkg """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api.py
new file mode 100644
index 0000000..53fe9ba
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api.py
@@ -0,0 +1,9 @@
+""" pkg.api """
+
+import sys
+
+if sys.version_info[0] == 2:
+    from .api2 import *
+
+else:
+    from .api3 import *
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api2.py
new file mode 100644
index 0000000..4f5be0b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api2.py
@@ -0,0 +1,11 @@
+import urllib2
+
+def div(a, b):
+    try:
+        return a/b
+
+    except ZeroDivisionError, exc:
+        return None
+
+class MyClass (object):
+    pass
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api3.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api3.py
new file mode 100644
index 0000000..dc4aefa
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-compatmodule/pkg/api3.py
@@ -0,0 +1,11 @@
+import http.client
+
+def div(a, b):
+    try:
+        return a/b
+
+    except ZeroDivisionError as exc:
+        return None
+
+class MyClass (object, metaclass=type):
+    pass
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_class_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_class_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_class_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_conditional_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/function_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/__init__.py
new file mode 100644
index 0000000..82e4875
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/__init__.py
@@ -0,0 +1 @@
+""" pkg.__init__ """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_class_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_class_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_class_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_conditional_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/function_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_class_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_class_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_class_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_conditional_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/pkg/toplevel_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script.py
new file mode 100644
index 0000000..901c332
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script.py
@@ -0,0 +1,51 @@
+
+import toplevel_existing
+import toplevel_nonexisting
+
+class MyClass:
+    import toplevel_class_existing
+    import toplevel_class_nonexisting
+
+if a == b:
+    import toplevel_conditional_existing
+    import toplevel_conditional_nonexisting
+
+    try:
+        import toplevel_conditional_import_existing
+        import toplevel_conditional_import_nonexisting
+    except:
+        import toplevel_conditional_import2_existing
+        import toplevel_conditional_import2_nonexisting
+
+try:
+    import toplevel_import_existing
+    import toplevel_import_nonexisting
+except:
+    import toplevel_import2_existing
+    import toplevel_import2_nonexisting
+
+def function():
+    import function_existing
+    import function_nonexisting
+
+    class MyClass:
+        import function_class_existing
+        import function_class_nonexisting
+
+    if a == b:
+        import function_conditional_existing
+        import function_conditional_nonexisting
+
+        try:
+            import function_conditional_import_existing
+            import function_conditional_import_nonexisting
+        except:
+            import function_conditional_import2_existing
+            import function_conditional_import2_nonexisting
+
+    try:
+        import function_import_existing
+        import function_import_nonexisting
+    except:
+        import function_import2_existing
+        import function_import2_nonexisting
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_from_import.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_from_import.py
new file mode 100644
index 0000000..1dd6783
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_from_import.py
@@ -0,0 +1,46 @@
+from pkg import toplevel_existing
+from pkg import toplevel_nonexisting
+
+class MyClass:
+    from pkg import toplevel_class_existing
+    from pkg import toplevel_class_nonexisting
+
+if a == b:
+    from pkg import toplevel_conditional_existing
+    from pkg import toplevel_conditional_nonexisting
+
+    try:
+        from pkg import toplevel_conditional_import_existing, toplevel_conditional_import_nonexisting
+    except:
+        from pkg import toplevel_conditional_import2_existing
+        from pkg import toplevel_conditional_import2_nonexisting
+
+try:
+    from pkg import toplevel_import_existing, toplevel_import_nonexisting
+except:
+    from pkg import toplevel_import2_existing
+    from pkg import toplevel_import2_nonexisting
+
+def function():
+    from pkg import function_existing, function_nonexisting
+
+    class MyClass:
+        from pkg import function_class_existing, function_class_nonexisting
+
+    if a == b:
+        from pkg import function_conditional_existing
+        from pkg import function_conditional_nonexisting
+
+        try:
+            from pkg import function_conditional_import_existing
+            from pkg import function_conditional_import_nonexisting
+        except:
+            from pkg import function_conditional_import2_existing
+            from pkg import function_conditional_import2_nonexisting
+
+    try:
+        from pkg import function_import_existing
+        from pkg import function_import_nonexisting
+    except:
+        from pkg import function_import2_existing
+        from pkg import function_import2_nonexisting
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_multi_import.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_multi_import.py
new file mode 100644
index 0000000..cb6ce54
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/script_multi_import.py
@@ -0,0 +1,26 @@
+
+
+try:
+    import os.path
+except ImportError:
+    pass
+
+import os
+
+def function(self):
+    import sys
+
+
+if a == b:
+    import sys
+
+def function2(self):
+    if a == b:
+        import platform
+
+def function3(self):
+    import platform
+    import email
+
+
+import email
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_class_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_class_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_class_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_conditional_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import2_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import2_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import2_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import_existing.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import_existing.py
new file mode 100644
index 0000000..ef467ea
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-edgedata/toplevel_import_existing.py
@@ -0,0 +1 @@
+""" $fname """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/__init__.py
new file mode 100644
index 0000000..78b491e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/__init__.py
@@ -0,0 +1 @@
+""" pkg """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/__init__.py
new file mode 100644
index 0000000..ecd411e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/__init__.py
@@ -0,0 +1,5 @@
+""" pkg.subpkg """
+
+from compat import X, Y
+
+from _collections import A, B
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/_collections.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/_collections.py
new file mode 100644
index 0000000..4e9a588
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/_collections.py
@@ -0,0 +1,3 @@
+""" pkg.subpkg._collections """
+
+A, B = "A", "B"
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/compat.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/compat.py
new file mode 100644
index 0000000..92850f2
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg/subpkg/compat.py
@@ -0,0 +1,3 @@
+""" pkg.subpkg.compat """
+
+X, Y = 1, 2
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/__init__.py
new file mode 100644
index 0000000..f6e15f3
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/__init__.py
@@ -0,0 +1 @@
+""" pkg2.__init__ """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/__init__.py
new file mode 100644
index 0000000..97fddf1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/__init__.py
@@ -0,0 +1,5 @@
+""" pkg2.subpkg """
+
+from .compat import X, Y
+
+from ._collections import A, B
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/_collections.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/_collections.py
new file mode 100644
index 0000000..1b37f9d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/_collections.py
@@ -0,0 +1,3 @@
+""" pkg2.subpkg._collections """
+
+A, B = "A", "B"
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/compat.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/compat.py
new file mode 100644
index 0000000..d544848
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/pkg2/subpkg/compat.py
@@ -0,0 +1,3 @@
+""" pkg2.subpkg.compat """
+
+X, Y = 1, 2
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/script.py
new file mode 100644
index 0000000..5867662
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-import-from-init/script.py
@@ -0,0 +1,2 @@
+import pkg.subpkg
+import pkg2.subpkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/main_script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/main_script.py
new file mode 100644
index 0000000..de10111
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/main_script.py
@@ -0,0 +1 @@
+import sys
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/__init__.py
new file mode 100644
index 0000000..84c8df8
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/__init__.py
@@ -0,0 +1 @@
+""" pkg.init """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/__init__.py
new file mode 100644
index 0000000..7d52f7f
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/__init__.py
@@ -0,0 +1 @@
+""" pkg.sub1.init """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/modA.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/modA.py
new file mode 100644
index 0000000..b827020
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub1/modA.py
@@ -0,0 +1 @@
+""" pkg.sub1.modA """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/__init__.py
new file mode 100644
index 0000000..ca5ca11
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/__init__.py
@@ -0,0 +1 @@
+""" pkg.sub2.init """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/mod.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/mod.py
new file mode 100644
index 0000000..1b172c8
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/mod.py
@@ -0,0 +1 @@
+""" pkg.sub2.mod """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub3.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub3.py
new file mode 100644
index 0000000..211217d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub3.py
@@ -0,0 +1 @@
+""" pkg.sub3 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path1/package/sub2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path1/package/sub2.py
new file mode 100644
index 0000000..894a1ec
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path1/package/sub2.py
@@ -0,0 +1 @@
+""" package.sub2 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/nspkg/mod.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/nspkg/mod.py
new file mode 100644
index 0000000..9e846e4
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/nspkg/mod.py
@@ -0,0 +1 @@
+""" package.nspkg.mod """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/sub1.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/sub1.py
new file mode 100644
index 0000000..bb1f933
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/sub1.py
@@ -0,0 +1 @@
+""" package.sub1 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/__init__.py
new file mode 100644
index 0000000..d1c6849
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/__init__.py
@@ -0,0 +1 @@
+""" package.subpackage """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/sub.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/sub.py
new file mode 100644
index 0000000..f0ed11d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-pep420-namespace/path2/package/subpackage/sub.py
@@ -0,0 +1 @@
+""" package.subpackage.sub """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/main_script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/main_script.py
new file mode 100644
index 0000000..35e2a36
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/main_script.py
@@ -0,0 +1 @@
+from pkg import a
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/__init__.py
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/a.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/a.py
new file mode 100644
index 0000000..b02981c
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/a.py
@@ -0,0 +1 @@
+from . import b
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/b.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr1/pkg/b.py
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/main_script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/main_script.py
new file mode 100644
index 0000000..288701b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/main_script.py
@@ -0,0 +1 @@
+import pkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/__init__.py
new file mode 100644
index 0000000..1374eb3
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/__init__.py
@@ -0,0 +1,9 @@
+"""
+Package structure simular to crcmod
+"""
+try:
+    from pkg.pkg import *
+    import pkg.base
+except ImportError:
+    from pkg import *
+    import base
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/base.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/base.py
new file mode 100644
index 0000000..93e66ee
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/base.py
@@ -0,0 +1 @@
+""" package base """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/pkg.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/pkg.py
new file mode 100644
index 0000000..d5fcda2
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr2/pkg/pkg.py
@@ -0,0 +1 @@
+""" nested """
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/__init__.py
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/distutils/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/distutils/__init__.py
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/distutils/ccompiler.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/distutils/ccompiler.py
new file mode 100644
index 0000000..c1b0f9b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/mypkg/distutils/ccompiler.py
@@ -0,0 +1,2 @@
+from distutils.ccompiler import *
+from distutils.sysconfig import customize_compiler
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/script.py
new file mode 100644
index 0000000..9e38ced
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr3/script.py
@@ -0,0 +1 @@
+from mypkg.distutils import ccompiler
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/__init__.py
new file mode 100644
index 0000000..82e4875
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/__init__.py
@@ -0,0 +1 @@
+""" pkg.__init__ """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/__init__.py
new file mode 100644
index 0000000..b393ff5
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/__init__.py
@@ -0,0 +1 @@
+""" pkg.core.__init__ """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/callables.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/callables.py
new file mode 100644
index 0000000..9ce619b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/callables.py
@@ -0,0 +1,3 @@
+""" pkg.callables """
+
+getID, getArgs, getRawFunction, ListenerInadequate, CallArgsInfo = [None]*5
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listener.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listener.py
new file mode 100644
index 0000000..28ae017
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listener.py
@@ -0,0 +1,6 @@
+from .callables import \
+    getID, getArgs, getRawFunction,\
+    ListenerInadequate, \
+    CallArgsInfo
+
+from .listenerimpl import Listener, ListenerValidator
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listenerimpl.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listenerimpl.py
new file mode 100644
index 0000000..775bd34
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/pkg/core/listenerimpl.py
@@ -0,0 +1,6 @@
+""" pkg.listenerimp """
+class Listener:
+    pass
+
+class ListenerValidator:
+    pass
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/script.py
new file mode 100644
index 0000000..a3b2bfd
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr4/script.py
@@ -0,0 +1 @@
+from pkg.core.listener import Listener as listen
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/__init__.py
new file mode 100644
index 0000000..606f71e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/__init__.py
@@ -0,0 +1 @@
+""" A dummy __init__ file """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/script.py
new file mode 100644
index 0000000..2a5de40
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr5/script.py
@@ -0,0 +1,4 @@
+import __init__
+
+from modulegraph.find_modules import find_needed_modules
+import distutils
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/module.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/module.py
new file mode 100644
index 0000000..f6b6f89
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/module.py
@@ -0,0 +1,1009 @@
+
+ds = {
+    'name': [
+        list(1)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+        + list(2)
+    ]
+}
+
+import os
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/script.py
new file mode 100644
index 0000000..92211e1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-regr6/script.py
@@ -0,0 +1 @@
+import module
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/mod.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/mod.py
new file mode 100644
index 0000000..7828fc9
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/mod.py
@@ -0,0 +1 @@
+""" Toplevel module """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/__init__.py
new file mode 100644
index 0000000..7fa65f6
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/__init__.py
@@ -0,0 +1 @@
+""" A Package """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/mod.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/mod.py
new file mode 100644
index 0000000..de7fba1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/mod.py
@@ -0,0 +1 @@
+""" A package module """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/oldstyle.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/oldstyle.py
new file mode 100644
index 0000000..4985e70
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/oldstyle.py
@@ -0,0 +1 @@
+import mod
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relative.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relative.py
new file mode 100644
index 0000000..8ffd65a
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relative.py
@@ -0,0 +1,2 @@
+from __future__ import absolute_import
+from . import mod
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relimport.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relimport.py
new file mode 100644
index 0000000..e23cb2e
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relimport.py
@@ -0,0 +1 @@
+""" pkg.relimport """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/__init__.py
new file mode 100644
index 0000000..75c8c11
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/__init__.py
@@ -0,0 +1 @@
+""" pkg.sub2 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/mod.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/mod.py
new file mode 100644
index 0000000..1b172c8
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/sub2/mod.py
@@ -0,0 +1 @@
+""" pkg.sub2.mod """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/__init__.py
new file mode 100644
index 0000000..ced6ba0
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/__init__.py
@@ -0,0 +1 @@
+""" pkg.subpkg """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/mod2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/mod2.py
new file mode 100644
index 0000000..791e4d4
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/mod2.py
@@ -0,0 +1,4 @@
+""" pkg.subpkg.mod2 """
+from __future__ import absolute_import
+from ..sub2.mod import __doc__
+from ..sub2 import mod
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative.py
new file mode 100644
index 0000000..775f435
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative.py
@@ -0,0 +1,3 @@
+""" pkg.subpkg.relative """
+from __future__ import absolute_import
+from .. import mod
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative2.py
new file mode 100644
index 0000000..9e11e20
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/subpkg/relative2.py
@@ -0,0 +1,3 @@
+""" pkg.subpkg.relative """
+from __future__ import absolute_import
+from ..relimport import __doc__ as doc
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/toplevel.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/toplevel.py
new file mode 100644
index 0000000..67f0bb7
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/toplevel.py
@@ -0,0 +1,2 @@
+from __future__ import absolute_import
+import mod
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/script.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/script.py
new file mode 100644
index 0000000..d2199dc
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/script.py
@@ -0,0 +1,9 @@
+import mod
+import pkg
+import pkg.mod
+import pkg.oldstyle
+import pkg.relative
+import pkg.toplevel
+import pkg.subpkg.relative
+import pkg.subpkg.relative2
+import pkg.subpkg.mod2
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/__init__.py
new file mode 100644
index 0000000..085139d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/__init__.py
@@ -0,0 +1,2 @@
+from . import mod1
+from .mod2 import *
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod1.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod1.py
new file mode 100644
index 0000000..b7ef456
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod1.py
@@ -0,0 +1 @@
+""" mod1 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod2.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod2.py
new file mode 100644
index 0000000..7161f08
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod2.py
@@ -0,0 +1 @@
+""" mod2 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod3.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod3.py
new file mode 100644
index 0000000..7999d2d
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/mod3.py
@@ -0,0 +1 @@
+""" mod3 """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/sub/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/sub/__init__.py
new file mode 100644
index 0000000..30440c3
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/pkg/sub/__init__.py
@@ -0,0 +1,3 @@
+from .. import mod1
+from .. import mod3
+from ... import toplevel
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/toplevel.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/toplevel.py
new file mode 100644
index 0000000..29059b9
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport2/toplevel.py
@@ -0,0 +1 @@
+""" toplevel """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/__init__.py
new file mode 100644
index 0000000..2e2033b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/__init__.py
@@ -0,0 +1,7 @@
+# this is a namespace package
+try:
+    import pkg_resources
+    pkg_resources.declare_namespace(__name__)
+except ImportError:
+    import pkgutil
+    __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/module.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/module.py
new file mode 100644
index 0000000..0c1d857
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/module.py
@@ -0,0 +1 @@
+""" nspkg.module """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/__init__.py
new file mode 100644
index 0000000..2e2033b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/__init__.py
@@ -0,0 +1,7 @@
+# this is a namespace package
+try:
+    import pkg_resources
+    pkg_resources.declare_namespace(__name__)
+except ImportError:
+    import pkgutil
+    __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/sub.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/sub.py
new file mode 100644
index 0000000..bce954b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/build/lib/nspkg/nssubpkg/sub.py
@@ -0,0 +1 @@
+""" nspkg.nsubpkg.sub """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/setup.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/setup.py
new file mode 100644
index 0000000..c3d21a1
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/setup.py
@@ -0,0 +1,10 @@
+from setuptools import setup
+
+setup(
+    name="nspkg",
+    version="1.0",
+    namespace_packages=['nspkg', 'nspkg.nssubpkg'],
+    packages=['nspkg', 'nspkg.nssubpkg'],
+    package_dir = {'': 'src'},
+    zip_safe=False,
+)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/PKG-INFO b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/PKG-INFO
new file mode 100644
index 0000000..a2d9629
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: nspkg
+Version: 1.0
+Summary: UNKNOWN
+Home-page: UNKNOWN
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/SOURCES.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/SOURCES.txt
new file mode 100644
index 0000000..6288716
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/SOURCES.txt
@@ -0,0 +1,11 @@
+setup.py
+src/nspkg/__init__.py
+src/nspkg/module.py
+src/nspkg.egg-info/PKG-INFO
+src/nspkg.egg-info/SOURCES.txt
+src/nspkg.egg-info/dependency_links.txt
+src/nspkg.egg-info/namespace_packages.txt
+src/nspkg.egg-info/not-zip-safe
+src/nspkg.egg-info/top_level.txt
+src/nspkg/nssubpkg/__init__.py
+src/nspkg/nssubpkg/sub.py
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/dependency_links.txt
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/dependency_links.txt
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/namespace_packages.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/namespace_packages.txt
new file mode 100644
index 0000000..2321d6b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/namespace_packages.txt
@@ -0,0 +1,2 @@
+nspkg
+nspkg.nssubpkg
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/not-zip-safe
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/not-zip-safe
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/top_level.txt b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/top_level.txt
new file mode 100644
index 0000000..61d82f4
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info/top_level.txt
@@ -0,0 +1 @@
+nspkg
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/__init__.py
new file mode 100644
index 0000000..2e2033b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/__init__.py
@@ -0,0 +1,7 @@
+# this is a namespace package
+try:
+    import pkg_resources
+    pkg_resources.declare_namespace(__name__)
+except ImportError:
+    import pkgutil
+    __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/module.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/module.py
new file mode 100644
index 0000000..0c1d857
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/module.py
@@ -0,0 +1 @@
+""" nspkg.module """
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/__init__.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/__init__.py
new file mode 100644
index 0000000..2e2033b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/__init__.py
@@ -0,0 +1,7 @@
+# this is a namespace package
+try:
+    import pkg_resources
+    pkg_resources.declare_namespace(__name__)
+except ImportError:
+    import pkgutil
+    __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/sub.py b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/sub.py
new file mode 100644
index 0000000..bce954b
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-setuptools-namespace/src/nspkg/nssubpkg/sub.py
@@ -0,0 +1 @@
+""" nspkg.nsubpkg.sub """
diff --git a/catapult/telemetry/third_party/modulegraph/scripts/extract_implies.py b/catapult/telemetry/third_party/modulegraph/scripts/extract_implies.py
new file mode 100644
index 0000000..d6ab353
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/scripts/extract_implies.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+This script looks for ImportModules calls in C extensions
+of the stdlib.
+
+The current version has harcoded the location of the source
+tries on Ronald's machine, a future version will be able
+to rebuild the modulegraph source file that contains
+this information.
+"""
+
+import re
+import sys
+import os
+import pprint
+
+import_re = re.compile('PyImport_ImportModule\w+\("(\w+)"\);')
+
+def extract_implies(root):
+    modules_dir = os.path.join(root, "Modules")
+    for fn in os.listdir(modules_dir):
+        if not fn.endswith('.c'):
+            continue
+
+        module_name = fn[:-2]
+        if module_name.endswith('module'):
+            module_name = module_name[:-6]
+
+        with open(os.path.join(modules_dir, fn)) as fp:
+            data = fp.read()
+
+        imports = list(sorted(set(import_re.findall(data))))
+        if imports:
+            yield module_name, imports
+
+
+
+def main():
+    for version in ('2.6', '2.7', '3.1'):
+        print "====", version
+        pprint.pprint(list(extract_implies('/Users/ronald/Projects/python/release%s-maint'%(version.replace('.', '')))))
+
+if __name__ == "__main__":
+    main()
diff --git a/catapult/telemetry/third_party/modulegraph/setup.cfg b/catapult/telemetry/third_party/modulegraph/setup.cfg
new file mode 100644
index 0000000..f1d0167
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/setup.cfg
@@ -0,0 +1,54 @@
+[metadata]
+name = modulegraph
+version = 0.12.1
+description = Python module dependency analysis tool
+long_description_file = 
+	README.txt
+	doc/changelog.rst
+classifiers = 
+	Intended Audience :: Developers
+	License :: OSI Approved :: MIT License
+	Programming Language :: Python
+	Programming Language :: Python :: 2
+	Programming Language :: Python :: 2.7
+	Programming Language :: Python :: 3
+	Programming Language :: Python :: 3.3
+	Programming Language :: Python :: 3.4
+	Topic :: Software Development :: Libraries :: Python Modules
+	Topic :: Software Development :: Build Tools
+author = Ronald Oussoren
+author_email = ronaldoussoren@mac.com
+maintainer = Ronald Oussoren
+maintainer_email = ronaldoussoren@mac.com
+url = http://bitbucket.org/ronaldoussoren/modulegraph
+download_url = http://pypi.python.org/pypi/modulegraph
+license = MIT
+packages = modulegraph
+platforms = any
+requires-dist = 
+	altgraph (>= 0.12)
+console_scripts = 
+	modulegraph = modulegraph.__main__:main
+zip-safe = 1
+keywords = import, dependencies
+
+[check-manifest]
+ignore = 
+	modulegraph_tests/testdata/nspkg/distribute-0.6.10/child/nameduser-1.5-py2.6.egg-info
+	modulegraph_tests/testdata/nspkg/distribute-0.6.10/parent/namedpkg-1.0-py2.6.egg-info
+	modulegraph_tests/testdata/nspkg/distribute-0.6.12/child/nameduser-1.5-py2.5.egg-info
+	modulegraph_tests/testdata/nspkg/distribute-0.6.12/parent/namedpkg-1.0-py2.5.egg-info
+	modulegraph_tests/testdata/nspkg/setuptools-0.6c9/child/nameduser-1.5-py2.5.egg-info
+	modulegraph_tests/testdata/nspkg/setuptools-0.6c9/parent/namedpkg-1.0-py2.5.egg-info
+	modulegraph_tests/testdata/nspkg/src/child/nameduser.egg-info
+	modulegraph_tests/testdata/nspkg/src/parent/namedpkg.egg-info
+	modulegraph_tests/testdata/syspath/myext.pyd
+	modulegraph_tests/testdata/syspath/myext.so
+	modulegraph_tests/testdata/syspath/mymodule2.pyc
+	modulegraph_tests/testpkg-setuptools-namespace/src/nspkg.egg-info
+
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/catapult/telemetry/third_party/modulegraph/setup.py b/catapult/telemetry/third_party/modulegraph/setup.py
new file mode 100644
index 0000000..a1a4cb6
--- /dev/null
+++ b/catapult/telemetry/third_party/modulegraph/setup.py
@@ -0,0 +1,867 @@
+"""
+Shared setup file for simple python packages. Uses a setup.cfg that
+is the same as the distutils2 project, unless noted otherwise.
+
+It exists for two reasons:
+1) This makes it easier to reuse setup.py code between my own
+   projects
+
+2) Easier migration to distutils2 when that catches on.
+
+Additional functionality:
+
+* Section metadata:
+    requires-test:  Same as 'tests_require' option for setuptools.
+
+"""
+
+import sys
+import os
+import re
+import platform
+from fnmatch import fnmatch
+import os
+import sys
+import time
+import tempfile
+import tarfile
+try:
+    import urllib.request as urllib
+except ImportError:
+    import urllib
+from distutils import log
+try:
+    from hashlib import md5
+
+except ImportError:
+    from md5 import md5
+
+if sys.version_info[0] == 2:
+    from ConfigParser import RawConfigParser, NoOptionError, NoSectionError
+else:
+    from configparser import RawConfigParser, NoOptionError, NoSectionError
+
+ROOTDIR = os.path.dirname(os.path.abspath(__file__))
+
+
+#
+#
+#
+# Parsing the setup.cfg and converting it to something that can be
+# used by setuptools.setup()
+#
+#
+#
+
+def eval_marker(value):
+    """
+    Evaluate an distutils2 environment marker.
+
+    This code is unsafe when used with hostile setup.cfg files,
+    but that's not a problem for our own files.
+    """
+    value = value.strip()
+
+    class M:
+        def __init__(self, **kwds):
+            for k, v in kwds.items():
+                setattr(self, k, v)
+
+    variables = {
+        'python_version': '%d.%d'%(sys.version_info[0], sys.version_info[1]),
+        'python_full_version': sys.version.split()[0],
+        'os': M(
+            name=os.name,
+        ),
+        'sys': M(
+            platform=sys.platform,
+        ),
+        'platform': M(
+            version=platform.version(),
+            machine=platform.machine(),
+        ),
+    }
+
+    return bool(eval(value, variables, variables))
+
+
+    return True
+
+def _opt_value(cfg, into, section, key, transform = None):
+    try:
+        v = cfg.get(section, key)
+        if transform != _as_lines and ';' in v:
+            v, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                return
+
+            v = v.strip()
+
+        if v:
+            if transform:
+                into[key] = transform(v.strip())
+            else:
+                into[key] = v.strip()
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+def _as_bool(value):
+    if value.lower() in ('y', 'yes', 'on'):
+        return True
+    elif value.lower() in ('n', 'no', 'off'):
+        return False
+    elif value.isdigit():
+        return bool(int(value))
+    else:
+        raise ValueError(value)
+
+def _as_list(value):
+    return value.split()
+
+def _as_lines(value):
+    result = []
+    for v in value.splitlines():
+        if ';' in v:
+            v, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                continue
+
+            v = v.strip()
+            if v:
+                result.append(v)
+        else:
+            result.append(v)
+    return result
+
+def _map_requirement(value):
+    m = re.search(r'(\S+)\s*(?:\((.*)\))?', value)
+    name = m.group(1)
+    version = m.group(2)
+
+    if version is None:
+        return name
+
+    else:
+        mapped = []
+        for v in version.split(','):
+            v = v.strip()
+            if v[0].isdigit():
+                # Checks for a specific version prefix
+                m = v.rsplit('.', 1)
+                mapped.append('>=%s,<%s.%s'%(
+                    v, m[0], int(m[1])+1))
+
+            else:
+                mapped.append(v)
+        return '%s %s'%(name, ','.join(mapped),)
+
+def _as_requires(value):
+    requires = []
+    for req in value.splitlines():
+        if ';' in req:
+            req, marker = v.rsplit(';', 1)
+            if not eval_marker(marker):
+                continue
+            req = req.strip()
+
+        if not req:
+            continue
+        requires.append(_map_requirement(req))
+    return requires
+
+def parse_setup_cfg():
+    cfg = RawConfigParser()
+    r = cfg.read([os.path.join(ROOTDIR, 'setup.cfg')])
+    if len(r) != 1:
+        print("Cannot read 'setup.cfg'")
+        sys.exit(1)
+
+    metadata = dict(
+            name        = cfg.get('metadata', 'name'),
+            version     = cfg.get('metadata', 'version'),
+            description = cfg.get('metadata', 'description'),
+    )
+
+    _opt_value(cfg, metadata, 'metadata', 'license')
+    _opt_value(cfg, metadata, 'metadata', 'maintainer')
+    _opt_value(cfg, metadata, 'metadata', 'maintainer_email')
+    _opt_value(cfg, metadata, 'metadata', 'author')
+    _opt_value(cfg, metadata, 'metadata', 'author_email')
+    _opt_value(cfg, metadata, 'metadata', 'url')
+    _opt_value(cfg, metadata, 'metadata', 'download_url')
+    _opt_value(cfg, metadata, 'metadata', 'classifiers', _as_lines)
+    _opt_value(cfg, metadata, 'metadata', 'platforms', _as_list)
+    _opt_value(cfg, metadata, 'metadata', 'packages', _as_list)
+    _opt_value(cfg, metadata, 'metadata', 'keywords', _as_list)
+
+    try:
+        v = cfg.get('metadata', 'requires-dist')
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        requires = _as_requires(v)
+        if requires:
+            metadata['install_requires'] = requires
+
+    try:
+        v = cfg.get('metadata', 'requires-test')
+
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        requires = _as_requires(v)
+        if requires:
+            metadata['tests_require'] = requires
+
+
+    try:
+        v = cfg.get('metadata', 'long_description_file')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        parts = []
+        for nm in v.split():
+            fp = open(nm, 'rU')
+            parts.append(fp.read())
+            fp.close()
+
+        metadata['long_description'] = '\n\n'.join(parts)
+
+
+    try:
+        v = cfg.get('metadata', 'zip-safe')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        metadata['zip_safe'] = _as_bool(v)
+
+    try:
+        v = cfg.get('metadata', 'console_scripts')
+    except (NoOptionError, NoSectionError):
+        pass
+
+    else:
+        if 'entry_points' not in metadata:
+            metadata['entry_points'] = {}
+
+        metadata['entry_points']['console_scripts'] = v.splitlines()
+
+    if sys.version_info[:2] <= (2,6):
+        try:
+            metadata['tests_require'] += ", unittest2"
+        except KeyError:
+            metadata['tests_require'] = "unittest2"
+
+    return metadata
+
+
+#
+#
+#
+# Bootstrapping setuptools/distribute, based on
+# a heavily modified version of distribute_setup.py
+#
+#
+#
+
+
+SETUPTOOLS_PACKAGE='setuptools'
+
+
+try:
+    import subprocess
+
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        return subprocess.call(args) == 0
+
+except ImportError:
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        new_args = []
+        for a in args:
+            new_args.append(a.replace("'", "'\"'\"'"))
+        os.system(' '.join(new_args)) == 0
+
+
+try:
+    import json
+
+    def get_pypi_src_download(package):
+        url = 'https://pypi.python.org/pypi/%s/json'%(package,)
+        fp = urllib.urlopen(url)
+        try:
+            try:
+                data = fp.read()
+
+            finally:
+                fp.close()
+        except urllib.error:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        pkgdata = json.loads(data.decode('utf-8'))
+        if 'urls' not in pkgdata:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for info in pkgdata['urls']:
+            if info['packagetype'] == 'sdist' and info['url'].endswith('tar.gz'):
+                return (info.get('md5_digest'), info['url'])
+
+        raise RuntimeError("Cannot determine downlink link for %s"%(package,))
+
+except ImportError:
+    # Python 2.5 compatibility, no JSON in stdlib but luckily JSON syntax is
+    # simular enough to Python's syntax to be able to abuse the Python compiler
+
+    import _ast as ast
+
+    def get_pypi_src_download(package):
+        url = 'https://pypi.python.org/pypi/%s/json'%(package,)
+        fp = urllib.urlopen(url)
+        try:
+            try:
+                data = fp.read()
+
+            finally:
+                fp.close()
+        except urllib.error:
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+
+        a = compile(data, '-', 'eval', ast.PyCF_ONLY_AST)
+        if not isinstance(a, ast.Expression):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        a = a.body
+        if not isinstance(a, ast.Dict):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for k, v in zip(a.keys, a.values):
+            if not isinstance(k, ast.Str):
+                raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+            k = k.s
+            if k == 'urls':
+                a = v
+                break
+        else:
+            raise RuntimeError("PyPI JSON for %s doesn't contain URLs section"%(package,))
+
+        if not isinstance(a, ast.List):
+            raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+        for info in v.elts:
+            if not isinstance(info, ast.Dict):
+                raise RuntimeError("Cannot determine download link for %s"%(package,))
+            url = None
+            packagetype = None
+            chksum = None
+
+            for k, v in zip(info.keys, info.values):
+                if not isinstance(k, ast.Str):
+                    raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+                if k.s == 'url':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    url = v.s
+
+                elif k.s == 'packagetype':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    packagetype = v.s
+
+                elif k.s == 'md5_digest':
+                    if not isinstance(v, ast.Str):
+                        raise RuntimeError("Cannot determine download link for %s"%(package,))
+                    chksum = v.s
+
+            if url is not None and packagetype == 'sdist' and url.endswith('.tar.gz'):
+                return (chksum, url)
+
+        raise RuntimeError("Cannot determine download link for %s"%(package,))
+
+def _build_egg(egg, tarball, to_dir):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # building an egg
+        log.warn('Building a %s egg in %s', egg, to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+    finally:
+        os.chdir(old_wd)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+def _do_download(to_dir, packagename=SETUPTOOLS_PACKAGE):
+    tarball = download_setuptools(packagename, to_dir)
+    version = tarball.split('-')[-1][:-7]
+    egg = os.path.join(to_dir, '%s-%s-py%d.%d.egg'
+                       % (packagename, version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        _build_egg(egg, tarball, to_dir)
+    sys.path.insert(0, egg)
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools():
+    # making sure we use the absolute path
+    return _do_download(os.path.abspath(os.curdir))
+
+def download_setuptools(packagename, to_dir):
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    try:
+        from urllib.request import urlopen
+    except ImportError:
+        from urllib2 import urlopen
+
+    chksum, url = get_pypi_src_download(packagename)
+    tgz_name = os.path.basename(url)
+    saveto = os.path.join(to_dir, tgz_name)
+
+    src = dst = None
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        try:
+            log.warn("Downloading %s", url)
+            src = urlopen(url)
+            # Read/write all in one block, so we don't create a corrupt file
+            # if the download is interrupted.
+            data = src.read()
+
+            if chksum is not None:
+                data_sum = md5(data).hexdigest()
+                if data_sum != chksum:
+                    raise RuntimeError("Downloading %s failed: corrupt checksum"%(url,))
+
+
+            dst = open(saveto, "wb")
+            dst.write(data)
+        finally:
+            if src:
+                src.close()
+            if dst:
+                dst.close()
+    return os.path.realpath(saveto)
+
+
+
+def _extractall(self, path=".", members=None):
+    """Extract all members from the archive to the current working
+       directory and set owner, modification time and permissions on
+       directories afterwards. `path' specifies a different directory
+       to extract to. `members' is optional and must be a subset of the
+       list returned by getmembers().
+    """
+    import copy
+    import operator
+    from tarfile import ExtractError
+    directories = []
+
+    if members is None:
+        members = self
+
+    for tarinfo in members:
+        if tarinfo.isdir():
+            # Extract directories with a safe mode.
+            directories.append(tarinfo)
+            tarinfo = copy.copy(tarinfo)
+            tarinfo.mode = 448 # decimal for oct 0700
+        self.extract(tarinfo, path)
+
+    # Reverse sort directories.
+    if sys.version_info < (2, 4):
+        def sorter(dir1, dir2):
+            return cmp(dir1.name, dir2.name)
+        directories.sort(sorter)
+        directories.reverse()
+    else:
+        directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+    # Set correct owner, mtime and filemode on directories.
+    for tarinfo in directories:
+        dirpath = os.path.join(path, tarinfo.name)
+        try:
+            self.chown(tarinfo, dirpath)
+            self.utime(tarinfo, dirpath)
+            self.chmod(tarinfo, dirpath)
+        except ExtractError:
+            e = sys.exc_info()[1]
+            if self.errorlevel > 1:
+                raise
+            else:
+                self._dbg(1, "tarfile: %s" % e)
+
+
+#
+#
+#
+# Definitions of custom commands
+#
+#
+#
+
+try:
+    import setuptools
+
+except ImportError:
+    use_setuptools()
+
+from setuptools import setup
+
+try:
+    from distutils.core import PyPIRCCommand
+except ImportError:
+    PyPIRCCommand = None # Ancient python version
+
+from distutils.core import Command
+from distutils.errors  import DistutilsError
+from distutils import log
+
+if PyPIRCCommand is None:
+    class upload_docs (Command):
+        description = "upload sphinx documentation"
+        user_options = []
+
+        def initialize_options(self):
+            pass
+
+        def finalize_options(self):
+            pass
+
+        def run(self):
+            raise DistutilsError("not supported on this version of python")
+
+else:
+    class upload_docs (PyPIRCCommand):
+        description = "upload sphinx documentation"
+        user_options = PyPIRCCommand.user_options
+
+        def initialize_options(self):
+            PyPIRCCommand.initialize_options(self)
+            self.username = ''
+            self.password = ''
+
+
+        def finalize_options(self):
+            PyPIRCCommand.finalize_options(self)
+            config = self._read_pypirc()
+            if config != {}:
+                self.username = config['username']
+                self.password = config['password']
+
+
+        def run(self):
+            import subprocess
+            import shutil
+            import zipfile
+            import os
+            import urllib
+            import StringIO
+            from base64 import standard_b64encode
+            import httplib
+            import urlparse
+
+            # Extract the package name from distutils metadata
+            meta = self.distribution.metadata
+            name = meta.get_name()
+
+            # Run sphinx
+            if os.path.exists('doc/_build'):
+                shutil.rmtree('doc/_build')
+            os.mkdir('doc/_build')
+
+            p = subprocess.Popen(['make', 'html'],
+                cwd='doc')
+            exit = p.wait()
+            if exit != 0:
+                raise DistutilsError("sphinx-build failed")
+
+            # Collect sphinx output
+            if not os.path.exists('dist'):
+                os.mkdir('dist')
+            zf = zipfile.ZipFile('dist/%s-docs.zip'%(name,), 'w',
+                    compression=zipfile.ZIP_DEFLATED)
+
+            for toplevel, dirs, files in os.walk('doc/_build/html'):
+                for fn in files:
+                    fullname = os.path.join(toplevel, fn)
+                    relname = os.path.relpath(fullname, 'doc/_build/html')
+
+                    print ("%s -> %s"%(fullname, relname))
+
+                    zf.write(fullname, relname)
+
+            zf.close()
+
+            # Upload the results, this code is based on the distutils
+            # 'upload' command.
+            content = open('dist/%s-docs.zip'%(name,), 'rb').read()
+
+            data = {
+                ':action': 'doc_upload',
+                'name': name,
+                'content': ('%s-docs.zip'%(name,), content),
+            }
+            auth = "Basic " + standard_b64encode(self.username + ":" +
+                 self.password)
+
+
+            boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+            sep_boundary = '\n--' + boundary
+            end_boundary = sep_boundary + '--'
+            body = StringIO.StringIO()
+            for key, value in data.items():
+                if not isinstance(value, list):
+                    value = [value]
+
+                for value in value:
+                    if isinstance(value, tuple):
+                        fn = ';filename="%s"'%(value[0])
+                        value = value[1]
+                    else:
+                        fn = ''
+
+                    body.write(sep_boundary)
+                    body.write('\nContent-Disposition: form-data; name="%s"'%key)
+                    body.write(fn)
+                    body.write("\n\n")
+                    body.write(value)
+
+            body.write(end_boundary)
+            body.write('\n')
+            body = body.getvalue()
+
+            self.announce("Uploading documentation to %s"%(self.repository,), log.INFO)
+
+            schema, netloc, url, params, query, fragments = \
+                    urlparse.urlparse(self.repository)
+
+
+            if schema == 'http':
+                http = httplib.HTTPConnection(netloc)
+            elif schema == 'https':
+                http = httplib.HTTPSConnection(netloc)
+            else:
+                raise AssertionError("unsupported schema "+schema)
+
+            data = ''
+            loglevel = log.INFO
+            try:
+                http.connect()
+                http.putrequest("POST", url)
+                http.putheader('Content-type',
+                    'multipart/form-data; boundary=%s'%boundary)
+                http.putheader('Content-length', str(len(body)))
+                http.putheader('Authorization', auth)
+                http.endheaders()
+                http.send(body)
+            except socket.error:
+                e = socket.exc_info()[1]
+                self.announce(str(e), log.ERROR)
+                return
+
+            r = http.getresponse()
+            if r.status in (200, 301):
+                self.announce('Upload succeeded (%s): %s' % (r.status, r.reason),
+                    log.INFO)
+            else:
+                self.announce('Upload failed (%s): %s' % (r.status, r.reason),
+                    log.ERROR)
+
+                print ('-'*75)
+                print (r.read())
+                print ('-'*75)
+
+
+def recursiveGlob(root, pathPattern):
+    """
+    Recursively look for files matching 'pathPattern'. Return a list
+    of matching files/directories.
+    """
+    result = []
+
+    for rootpath, dirnames, filenames in os.walk(root):
+        for fn in filenames:
+            if fnmatch(fn, pathPattern):
+                result.append(os.path.join(rootpath, fn))
+    return result
+
+
+def importExternalTestCases(unittest,
+        pathPattern="test_*.py", root=".", package=None):
+    """
+    Import all unittests in the PyObjC tree starting at 'root'
+    """
+
+    testFiles = recursiveGlob(root, pathPattern)
+    testModules = map(lambda x:x[len(root)+1:-3].replace('/', '.'), testFiles)
+    if package is not None:
+        testModules = [(package + '.' + m) for m in testModules]
+
+    suites = []
+
+    for modName in testModules:
+        try:
+            module = __import__(modName)
+        except ImportError:
+            print("SKIP %s: %s"%(modName, sys.exc_info()[1]))
+            continue
+
+        if '.' in modName:
+            for elem in modName.split('.')[1:]:
+                module = getattr(module, elem)
+
+        s = unittest.defaultTestLoader.loadTestsFromModule(module)
+        suites.append(s)
+
+    return unittest.TestSuite(suites)
+
+
+
+class test (Command):
+    description = "run test suite"
+    user_options = [
+        ('verbosity=', None, "print what tests are run"),
+    ]
+
+    def initialize_options(self):
+        self.verbosity='1'
+
+    def finalize_options(self):
+        if isinstance(self.verbosity, str):
+            self.verbosity = int(self.verbosity)
+
+
+    def cleanup_environment(self):
+        ei_cmd = self.get_finalized_command('egg_info')
+        egg_name = ei_cmd.egg_name.replace('-', '_')
+
+        to_remove =  []
+        for dirname in sys.path:
+            bn = os.path.basename(dirname)
+            if bn.startswith(egg_name + "-"):
+                to_remove.append(dirname)
+
+        for dirname in to_remove:
+            log.info("removing installed %r from sys.path before testing"%(
+                dirname,))
+            sys.path.remove(dirname)
+
+    def add_project_to_sys_path(self):
+        from pkg_resources import normalize_path, add_activation_listener
+        from pkg_resources import working_set, require
+
+        self.reinitialize_command('egg_info')
+        self.run_command('egg_info')
+        self.reinitialize_command('build_ext', inplace=1)
+        self.run_command('build_ext')
+
+
+        # Check if this distribution is already on sys.path
+        # and remove that version, this ensures that the right
+        # copy of the package gets tested.
+
+        self.__old_path = sys.path[:]
+        self.__old_modules = sys.modules.copy()
+
+
+        ei_cmd = self.get_finalized_command('egg_info')
+        sys.path.insert(0, normalize_path(ei_cmd.egg_base))
+        sys.path.insert(1, os.path.dirname(__file__))
+
+        # Strip the namespace packages defined in this distribution
+        # from sys.modules, needed to reset the search path for
+        # those modules.
+
+        nspkgs = getattr(self.distribution, 'namespace_packages')
+        if nspkgs is not None:
+            for nm in nspkgs:
+                del sys.modules[nm]
+
+        # Reset pkg_resources state:
+        add_activation_listener(lambda dist: dist.activate())
+        working_set.__init__()
+        require('%s==%s'%(ei_cmd.egg_name, ei_cmd.egg_version))
+
+    def remove_from_sys_path(self):
+        from pkg_resources import working_set
+        sys.path[:] = self.__old_path
+        sys.modules.clear()
+        sys.modules.update(self.__old_modules)
+        working_set.__init__()
+
+
+    def run(self):
+        import unittest
+
+        # Ensure that build directory is on sys.path (py3k)
+
+        self.cleanup_environment()
+        self.add_project_to_sys_path()
+
+        try:
+            meta = self.distribution.metadata
+            name = meta.get_name()
+            test_pkg = name + "_tests"
+            suite = importExternalTestCases(unittest,
+                    "test_*.py", test_pkg, test_pkg)
+
+            runner = unittest.TextTestRunner(verbosity=self.verbosity)
+            result = runner.run(suite)
+
+            # Print out summary. This is a structured format that
+            # should make it easy to use this information in scripts.
+            summary = dict(
+                count=result.testsRun,
+                fails=len(result.failures),
+                errors=len(result.errors),
+                xfails=len(getattr(result, 'expectedFailures', [])),
+                xpass=len(getattr(result, 'expectedSuccesses', [])),
+                skip=len(getattr(result, 'skipped', [])),
+            )
+            print("SUMMARY: %s"%(summary,))
+
+        finally:
+            self.remove_from_sys_path()
+
+#
+#
+#
+#  And finally run the setuptools main entry point.
+#
+#
+#
+
+metadata = parse_setup_cfg()
+
+setup(
+    cmdclass=dict(
+        upload_docs=upload_docs,
+        test=test,
+    ),
+    **metadata
+)
diff --git a/catapult/telemetry/third_party/mox3/.gitignore b/catapult/telemetry/third_party/mox3/.gitignore
new file mode 100644
index 0000000..f8f74ec
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/.gitignore
@@ -0,0 +1,71 @@
+*.py[co]
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+.testrepository
+
+#Translations
+*.mo
+
+# virtualenv
+.venv
+
+#Mr Developer
+.mr.developer.cfg
+
+# https://github.com/h5bp/html5-boilerplate/blob/master/.gitignore
+# Numerous always-ignore extensions
+*.diff
+*.err
+*.orig
+*.log
+*.rej
+*.swo
+*.swp
+*.vi
+*~
+
+# OS or Editor folders
+.DS_Store
+Thumbs.db
+.cache
+.project
+.settings
+.tmproj
+nbproject
+*.sublime-project
+*.sublime-workspace
+*.komodoproject
+.komodotools
+
+# Folders to ignore
+.hg
+.svn
+.CVS
+intermediate
+publish
+.idea
+
+# PyDev
+.pydevproject
+
+# pbr
+AUTHORS
+ChangeLog
diff --git a/catapult/telemetry/third_party/mox3/.gitreview b/catapult/telemetry/third_party/mox3/.gitreview
new file mode 100644
index 0000000..3653540
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/mox3.git
diff --git a/catapult/telemetry/third_party/mox3/.mailmap b/catapult/telemetry/third_party/mox3/.mailmap
new file mode 100644
index 0000000..6a6d090
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/.mailmap
@@ -0,0 +1 @@
+Przemysław Gajda <quermit@gmail.com> <quermit@gmail.com>
diff --git a/catapult/telemetry/third_party/mox3/.testr.conf b/catapult/telemetry/third_party/mox3/.testr.conf
new file mode 100644
index 0000000..6c1541e
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/catapult/telemetry/third_party/mox3/CONTRIBUTING.rst b/catapult/telemetry/third_party/mox3/CONTRIBUTING.rst
new file mode 100644
index 0000000..8121da2
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps in the "If you're a developer, start here"
+section of this page:
+
+   http://wiki.openstack.org/HowToContribute
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+   http://wiki.openstack.org/GerritWorkflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+   https://bugs.launchpad.net/mox3
diff --git a/catapult/telemetry/third_party/mox3/COPYING.txt b/catapult/telemetry/third_party/mox3/COPYING.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/COPYING.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/catapult/telemetry/third_party/mox3/MANIFEST.in b/catapult/telemetry/third_party/mox3/MANIFEST.in
new file mode 100644
index 0000000..c978a52
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/MANIFEST.in
@@ -0,0 +1,6 @@
+include AUTHORS
+include ChangeLog
+exclude .gitignore
+exclude .gitreview
+
+global-exclude *.pyc
diff --git a/catapult/telemetry/third_party/mox3/README.chromium b/catapult/telemetry/third_party/mox3/README.chromium
new file mode 100644
index 0000000..cecbc0a
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/README.chromium
@@ -0,0 +1,19 @@
+Name: mox3
+Short Name: mox3
+URL: https://github.com/openstack/mox3
+Version: 60dd893a8095f9d7957bf6635dc1620a7908d86b (commit hash)
+License: Apache License 2.0
+License File: NOT_SHIPPED
+Security Critical: no
+
+Local modification:
+Remove doc/source/conf.py because it's not needed and cause the checklicense.py
+to fail.
+
+Description:
+Mox3 is an unofficial port of the Google mox framework
+(http://code.google.com/p/pymox/) to Python 3. It was meant to be as compatible
+with mox as possible, but small enhancements have been made. The library was
+tested on Python version 3.2, 2.7 and 2.6.
+
+This library is added since pyfakefs depends on it.
diff --git a/catapult/telemetry/third_party/mox3/README.rst b/catapult/telemetry/third_party/mox3/README.rst
new file mode 100644
index 0000000..7f9e9db
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/README.rst
@@ -0,0 +1,60 @@
+Mox3 - Mock object framework for Python 3
+=========================================
+
+Mox3 is an unofficial port of the Google mox framework
+(http://code.google.com/p/pymox/) to Python 3. It was meant to be as compatible
+with mox as possible, but small enhancements have been made. The library was
+tested on Python version 3.2, 2.7 and 2.6.
+
+Use at your own risk ;) 
+
+To install:
+
+  $ python setup.py install
+
+Running Tests
+-------------
+The testing system is based on a combination of tox and testr. The canonical
+approach to running tests is to simply run the command `tox`. This will
+create virtual environments, populate them with depenedencies and run all of
+the tests that OpenStack CI systems run. Behind the scenes, tox is running
+`testr run --parallel`, but is set up such that you can supply any additional
+testr arguments that are needed to tox. For example, you can run:
+`tox -- --analyze-isolation` to cause tox to tell testr to add
+--analyze-isolation to its argument list.
+
+It is also possible to run the tests inside of a virtual environment
+you have created, or it is possible that you have all of the dependencies
+installed locally already. In this case, you can interact with the testr
+command directly. Running `testr run` will run the entire test suite. `testr
+run --parallel` will run it in parallel (this is the default incantation tox
+uses.) More information about testr can be found at:
+http://wiki.openstack.org/testr
+
+Basic Usage
+-----------
+  
+The basic usage of mox3 is the same as with mox, but the initial import should
+be made from the mox3 module:
+
+  from mox3 import mox
+
+To learn how to use mox3 you may check the documentation of the original mox
+framework:
+
+  http://code.google.com/p/pymox/wiki/MoxDocumentation
+
+Original Copyright
+------------------
+
+Mox is Copyright 2008 Google Inc, and licensed under the Apache
+License, Version 2.0; see the file COPYING.txt for details.  If you would
+like to help us improve Mox, join the group.
+
+OpenStack Fork
+--------------
+
+* Free software: Apache license
+* Documentation: http://docs.openstack.org/developer/mox3
+* Source: http://git.openstack.org/cgit/openstack/mox3
+* Bugs: http://bugs.launchpad.net/python-mox3
diff --git a/catapult/telemetry/third_party/mox3/doc/source/contributing.rst b/catapult/telemetry/third_party/mox3/doc/source/contributing.rst
new file mode 100644
index 0000000..2ca75d1
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/doc/source/contributing.rst
@@ -0,0 +1,5 @@
+==============
+ Contributing
+==============
+
+.. include:: ../../CONTRIBUTING.rst
diff --git a/catapult/telemetry/third_party/mox3/doc/source/index.rst b/catapult/telemetry/third_party/mox3/doc/source/index.rst
new file mode 100644
index 0000000..2df4863
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/doc/source/index.rst
@@ -0,0 +1,21 @@
+mox3
+====
+
+A fork of mox with Python 3 support.
+
+Contents
+========
+
+.. toctree::
+   :maxdepth: 2
+
+   readme
+   contributing
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/catapult/telemetry/third_party/mox3/doc/source/readme.rst b/catapult/telemetry/third_party/mox3/doc/source/readme.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/doc/source/readme.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/mox3/mox3/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/mox3/mox3/__init__.py
diff --git a/catapult/telemetry/third_party/mox3/mox3/fixture.py b/catapult/telemetry/third_party/mox3/mox3/fixture.py
new file mode 100644
index 0000000..f6e39d8
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/fixture.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from mox3 import mox
+from mox3 import stubout
+
+
+class MoxStubout(fixtures.Fixture):
+    """Deal with code around mox and stubout as a fixture."""
+
+    def setUp(self):
+        super(MoxStubout, self).setUp()
+        self.mox = mox.Mox()
+        self.stubs = stubout.StubOutForTesting()
+        self.addCleanup(self.mox.UnsetStubs)
+        self.addCleanup(self.stubs.UnsetAll)
+        self.addCleanup(self.stubs.SmartUnsetAll)
+        self.addCleanup(self.mox.VerifyAll)
diff --git a/catapult/telemetry/third_party/mox3/mox3/mox.py b/catapult/telemetry/third_party/mox3/mox3/mox.py
new file mode 100644
index 0000000..3c10cc8
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/mox.py
@@ -0,0 +1,2168 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+"""Mox, an object-mocking framework for Python.
+
+Mox works in the record-replay-verify paradigm.  When you first create
+a mock object, it is in record mode.  You then programmatically set
+the expected behavior of the mock object (what methods are to be
+called on it, with what parameters, what they should return, and in
+what order).
+
+Once you have set up the expected mock behavior, you put it in replay
+mode.  Now the mock responds to method calls just as you told it to.
+If an unexpected method (or an expected method with unexpected
+parameters) is called, then an exception will be raised.
+
+Once you are done interacting with the mock, you need to verify that
+all the expected interactions occured.  (Maybe your code exited
+prematurely without calling some cleanup method!)  The verify phase
+ensures that every expected method was called; otherwise, an exception
+will be raised.
+
+WARNING! Mock objects created by Mox are not thread-safe.  If you are
+call a mock in multiple threads, it should be guarded by a mutex.
+
+TODO(stevepm): Add the option to make mocks thread-safe!
+
+Suggested usage / workflow:
+
+    # Create Mox factory
+    my_mox = Mox()
+
+    # Create a mock data access object
+    mock_dao = my_mox.CreateMock(DAOClass)
+
+    # Set up expected behavior
+    mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
+    mock_dao.DeletePerson(person)
+
+    # Put mocks in replay mode
+    my_mox.ReplayAll()
+
+    # Inject mock object and run test
+    controller.SetDao(mock_dao)
+    controller.DeletePersonById('1')
+
+    # Verify all methods were called as expected
+    my_mox.VerifyAll()
+"""
+
+import collections
+import difflib
+import inspect
+import re
+import types
+import unittest
+
+from mox3 import stubout
+
+
+class Error(AssertionError):
+    """Base exception for this module."""
+
+    pass
+
+
+class ExpectedMethodCallsError(Error):
+    """Raised when an expected method wasn't called.
+
+    This can occur if Verify() is called before all expected methods have been
+    called.
+    """
+
+    def __init__(self, expected_methods):
+        """Init exception.
+
+        Args:
+            # expected_methods: A sequence of MockMethod objects that should
+            #                   have been called.
+            expected_methods: [MockMethod]
+
+        Raises:
+            ValueError: if expected_methods contains no methods.
+        """
+
+        if not expected_methods:
+            raise ValueError("There must be at least one expected method")
+        Error.__init__(self)
+        self._expected_methods = expected_methods
+
+    def __str__(self):
+        calls = "\n".join(["%3d.  %s" % (i, m)
+                          for i, m in enumerate(self._expected_methods)])
+        return "Verify: Expected methods never called:\n%s" % (calls,)
+
+
+class UnexpectedMethodCallError(Error):
+    """Raised when an unexpected method is called.
+
+    This can occur if a method is called with incorrect parameters, or out of
+    the specified order.
+    """
+
+    def __init__(self, unexpected_method, expected):
+        """Init exception.
+
+        Args:
+            # unexpected_method: MockMethod that was called but was not at the
+            #     head of the expected_method queue.
+            # expected: MockMethod or UnorderedGroup the method should have
+            #     been in.
+            unexpected_method: MockMethod
+            expected: MockMethod or UnorderedGroup
+        """
+
+        Error.__init__(self)
+        if expected is None:
+            self._str = "Unexpected method call %s" % (unexpected_method,)
+        else:
+            differ = difflib.Differ()
+            diff = differ.compare(str(unexpected_method).splitlines(True),
+                                  str(expected).splitlines(True))
+            self._str = ("Unexpected method call."
+                         "  unexpected:-  expected:+\n%s"
+                         % ("\n".join(line.rstrip() for line in diff),))
+
+    def __str__(self):
+        return self._str
+
+
+class UnknownMethodCallError(Error):
+    """Raised if an unknown method is requested of the mock object."""
+
+    def __init__(self, unknown_method_name):
+        """Init exception.
+
+        Args:
+            # unknown_method_name: Method call that is not part of the mocked
+            #     class's public interface.
+            unknown_method_name: str
+        """
+
+        Error.__init__(self)
+        self._unknown_method_name = unknown_method_name
+
+    def __str__(self):
+        return ("Method called is not a member of the object: %s" %
+                self._unknown_method_name)
+
+
+class PrivateAttributeError(Error):
+    """Raised if a MockObject is passed a private additional attribute name."""
+
+    def __init__(self, attr):
+        Error.__init__(self)
+        self._attr = attr
+
+    def __str__(self):
+        return ("Attribute '%s' is private and should not be available"
+                "in a mock object." % self._attr)
+
+
+class ExpectedMockCreationError(Error):
+    """Raised if mocks should have been created by StubOutClassWithMocks."""
+
+    def __init__(self, expected_mocks):
+        """Init exception.
+
+        Args:
+            # expected_mocks: A sequence of MockObjects that should have been
+            #     created
+
+        Raises:
+            ValueError: if expected_mocks contains no methods.
+        """
+
+        if not expected_mocks:
+            raise ValueError("There must be at least one expected method")
+        Error.__init__(self)
+        self._expected_mocks = expected_mocks
+
+    def __str__(self):
+        mocks = "\n".join(["%3d.  %s" % (i, m)
+                          for i, m in enumerate(self._expected_mocks)])
+        return "Verify: Expected mocks never created:\n%s" % (mocks,)
+
+
+class UnexpectedMockCreationError(Error):
+    """Raised if too many mocks were created by StubOutClassWithMocks."""
+
+    def __init__(self, instance, *params, **named_params):
+        """Init exception.
+
+        Args:
+            # instance: the type of obejct that was created
+            # params: parameters given during instantiation
+            # named_params: named parameters given during instantiation
+        """
+
+        Error.__init__(self)
+        self._instance = instance
+        self._params = params
+        self._named_params = named_params
+
+    def __str__(self):
+        args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
+        error = "Unexpected mock creation: %s(%s" % (self._instance, args)
+
+        if self._named_params:
+            error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
+                                      self._named_params.items()])
+
+        error += ")"
+        return error
+
+
+class Mox(object):
+    """Mox: a factory for creating mock objects."""
+
+    # A list of types that should be stubbed out with MockObjects (as
+    # opposed to MockAnythings).
+    _USE_MOCK_OBJECT = [types.FunctionType, types.ModuleType, types.MethodType]
+
+    def __init__(self):
+        """Initialize a new Mox."""
+
+        self._mock_objects = []
+        self.stubs = stubout.StubOutForTesting()
+
+    def CreateMock(self, class_to_mock, attrs=None, bounded_to=None):
+        """Create a new mock object.
+
+        Args:
+            # class_to_mock: the class to be mocked
+            class_to_mock: class
+            attrs: dict of attribute names to values that will be
+                   set on the mock object. Only public attributes may be set.
+            bounded_to: optionally, when class_to_mock is not a class,
+                        it points to a real class object, to which
+                        attribute is bound
+
+        Returns:
+            MockObject that can be used as the class_to_mock would be.
+        """
+        if attrs is None:
+            attrs = {}
+        new_mock = MockObject(class_to_mock, attrs=attrs,
+                              class_to_bind=bounded_to)
+        self._mock_objects.append(new_mock)
+        return new_mock
+
+    def CreateMockAnything(self, description=None):
+        """Create a mock that will accept any method calls.
+
+        This does not enforce an interface.
+
+        Args:
+        description: str. Optionally, a descriptive name for the mock object
+        being created, for debugging output purposes.
+        """
+        new_mock = MockAnything(description=description)
+        self._mock_objects.append(new_mock)
+        return new_mock
+
+    def ReplayAll(self):
+        """Set all mock objects to replay mode."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Replay()
+
+    def VerifyAll(self):
+        """Call verify on all mock objects created."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Verify()
+
+    def ResetAll(self):
+        """Call reset on all mock objects.    This does not unset stubs."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Reset()
+
+    def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
+        """Replace a method, attribute, etc. with a Mock.
+
+        This will replace a class or module with a MockObject, and everything
+        else (method, function, etc) with a MockAnything. This can be
+        overridden to always use a MockAnything by setting use_mock_anything
+        to True.
+
+        Args:
+            obj: A Python object (class, module, instance, callable).
+            attr_name: str. The name of the attribute to replace with a mock.
+            use_mock_anything: bool. True if a MockAnything should be used
+                               regardless of the type of attribute.
+        """
+
+        if inspect.isclass(obj):
+            class_to_bind = obj
+        else:
+            class_to_bind = None
+
+        attr_to_replace = getattr(obj, attr_name)
+        attr_type = type(attr_to_replace)
+
+        if attr_type == MockAnything or attr_type == MockObject:
+            raise TypeError('Cannot mock a MockAnything! Did you remember to '
+                            'call UnsetStubs in your previous test?')
+
+        type_check = (
+            attr_type in self._USE_MOCK_OBJECT or
+            inspect.isclass(attr_to_replace) or
+            isinstance(attr_to_replace, object))
+        if type_check and not use_mock_anything:
+            stub = self.CreateMock(attr_to_replace, bounded_to=class_to_bind)
+        else:
+            stub = self.CreateMockAnything(
+                description='Stub for %s' % attr_to_replace)
+            stub.__name__ = attr_name
+
+        self.stubs.Set(obj, attr_name, stub)
+
+    def StubOutClassWithMocks(self, obj, attr_name):
+        """Replace a class with a "mock factory" that will create mock objects.
+
+        This is useful if the code-under-test directly instantiates
+        dependencies.    Previously some boilder plate was necessary to
+        create a mock that would act as a factory.    Using
+        StubOutClassWithMocks, once you've stubbed out the class you may
+        use the stubbed class as you would any other mock created by mox:
+        during the record phase, new mock instances will be created, and
+        during replay, the recorded mocks will be returned.
+
+        In replay mode
+
+        # Example using StubOutWithMock (the old, clunky way):
+
+        mock1 = mox.CreateMock(my_import.FooClass)
+        mock2 = mox.CreateMock(my_import.FooClass)
+        foo_factory = mox.StubOutWithMock(my_import, 'FooClass',
+                                          use_mock_anything=True)
+        foo_factory(1, 2).AndReturn(mock1)
+        foo_factory(9, 10).AndReturn(mock2)
+        mox.ReplayAll()
+
+        my_import.FooClass(1, 2)     # Returns mock1 again.
+        my_import.FooClass(9, 10)    # Returns mock2 again.
+        mox.VerifyAll()
+
+        # Example using StubOutClassWithMocks:
+
+        mox.StubOutClassWithMocks(my_import, 'FooClass')
+        mock1 = my_import.FooClass(1, 2)     # Returns a new mock of FooClass
+        mock2 = my_import.FooClass(9, 10)    # Returns another mock instance
+        mox.ReplayAll()
+
+        my_import.FooClass(1, 2)     # Returns mock1 again.
+        my_import.FooClass(9, 10)    # Returns mock2 again.
+        mox.VerifyAll()
+        """
+        attr_to_replace = getattr(obj, attr_name)
+        attr_type = type(attr_to_replace)
+
+        if attr_type == MockAnything or attr_type == MockObject:
+            raise TypeError('Cannot mock a MockAnything! Did you remember to '
+                            'call UnsetStubs in your previous test?')
+
+        if not inspect.isclass(attr_to_replace):
+            raise TypeError('Given attr is not a Class. Use StubOutWithMock.')
+
+        factory = _MockObjectFactory(attr_to_replace, self)
+        self._mock_objects.append(factory)
+        self.stubs.Set(obj, attr_name, factory)
+
+    def UnsetStubs(self):
+        """Restore stubs to their original state."""
+
+        self.stubs.UnsetAll()
+
+
+def Replay(*args):
+    """Put mocks into Replay mode.
+
+    Args:
+        # args is any number of mocks to put into replay mode.
+    """
+
+    for mock in args:
+        mock._Replay()
+
+
+def Verify(*args):
+    """Verify mocks.
+
+    Args:
+        # args is any number of mocks to be verified.
+    """
+
+    for mock in args:
+        mock._Verify()
+
+
+def Reset(*args):
+    """Reset mocks.
+
+    Args:
+        # args is any number of mocks to be reset.
+    """
+
+    for mock in args:
+        mock._Reset()
+
+
+class MockAnything(object):
+    """A mock that can be used to mock anything.
+
+    This is helpful for mocking classes that do not provide a public interface.
+    """
+
+    def __init__(self, description=None):
+        """Initialize a new MockAnything.
+
+        Args:
+            description: str. Optionally, a descriptive name for the mock
+                         object being created, for debugging output purposes.
+        """
+        self._description = description
+        self._Reset()
+
+    def __repr__(self):
+        if self._description:
+            return '<MockAnything instance of %s>' % self._description
+        else:
+            return '<MockAnything instance>'
+
+    def __getattr__(self, method_name):
+        """Intercept method calls on this object.
+
+         A new MockMethod is returned that is aware of the MockAnything's
+         state (record or replay).    The call will be recorded or replayed
+         by the MockMethod's __call__.
+
+        Args:
+            # method name: the name of the method being called.
+            method_name: str
+
+        Returns:
+            A new MockMethod aware of MockAnything's state (record or replay).
+        """
+        if method_name == '__dir__':
+                return self.__class__.__dir__.__get__(self, self.__class__)
+
+        return self._CreateMockMethod(method_name)
+
+    def __str__(self):
+        return self._CreateMockMethod('__str__')()
+
+    def __call__(self, *args, **kwargs):
+        return self._CreateMockMethod('__call__')(*args, **kwargs)
+
+    def __getitem__(self, i):
+        return self._CreateMockMethod('__getitem__')(i)
+
+    def _CreateMockMethod(self, method_name, method_to_mock=None,
+                          class_to_bind=object):
+        """Create a new mock method call and return it.
+
+        Args:
+            # method_name: the name of the method being called.
+            # method_to_mock: The actual method being mocked, used for
+            #                 introspection.
+            # class_to_bind: Class to which method is bounded
+            #                (object by default)
+            method_name: str
+            method_to_mock: a method object
+
+        Returns:
+            A new MockMethod aware of MockAnything's state (record or replay).
+        """
+
+        return MockMethod(method_name, self._expected_calls_queue,
+                          self._replay_mode, method_to_mock=method_to_mock,
+                          description=self._description,
+                          class_to_bind=class_to_bind)
+
+    def __nonzero__(self):
+        """Return 1 for nonzero so the mock can be used as a conditional."""
+
+        return 1
+
+    def __bool__(self):
+        """Return True for nonzero so the mock can be used as a conditional."""
+        return True
+
+    def __eq__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return (isinstance(rhs, MockAnything) and
+                self._replay_mode == rhs._replay_mode and
+                self._expected_calls_queue == rhs._expected_calls_queue)
+
+    def __ne__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return not self == rhs
+
+    def _Replay(self):
+        """Start replaying expected method calls."""
+
+        self._replay_mode = True
+
+    def _Verify(self):
+        """Verify that all of the expected calls have been made.
+
+        Raises:
+            ExpectedMethodCallsError: if there are still more method calls in
+                                      the expected queue.
+        """
+
+        # If the list of expected calls is not empty, raise an exception
+        if self._expected_calls_queue:
+            # The last MultipleTimesGroup is not popped from the queue.
+            if (len(self._expected_calls_queue) == 1 and
+                    isinstance(self._expected_calls_queue[0],
+                               MultipleTimesGroup) and
+                    self._expected_calls_queue[0].IsSatisfied()):
+                pass
+            else:
+                raise ExpectedMethodCallsError(self._expected_calls_queue)
+
+    def _Reset(self):
+        """Reset the state of this mock to record mode with an empty queue."""
+
+        # Maintain a list of method calls we are expecting
+        self._expected_calls_queue = collections.deque()
+
+        # Make sure we are in setup mode, not replay mode
+        self._replay_mode = False
+
+
+class MockObject(MockAnything):
+    """Mock object that simulates the public/protected interface of a class."""
+
+    def __init__(self, class_to_mock, attrs=None, class_to_bind=None):
+        """Initialize a mock object.
+
+        Determines the methods and properties of the class and stores them.
+
+        Args:
+            # class_to_mock: class to be mocked
+            class_to_mock: class
+            attrs: dict of attribute names to values that will be set on the
+                   mock object. Only public attributes may be set.
+            class_to_bind: optionally, when class_to_mock is not a class at
+                           all, it points to a real class
+
+        Raises:
+            PrivateAttributeError: if a supplied attribute is not public.
+            ValueError: if an attribute would mask an existing method.
+        """
+        if attrs is None:
+            attrs = {}
+
+        # Used to hack around the mixin/inheritance of MockAnything, which
+        # is not a proper object (it can be anything. :-)
+        MockAnything.__dict__['__init__'](self)
+
+        # Get a list of all the public and special methods we should mock.
+        self._known_methods = set()
+        self._known_vars = set()
+        self._class_to_mock = class_to_mock
+
+        if inspect.isclass(class_to_mock):
+            self._class_to_bind = self._class_to_mock
+        else:
+            self._class_to_bind = class_to_bind
+
+        try:
+            if inspect.isclass(self._class_to_mock):
+                self._description = class_to_mock.__name__
+            else:
+                self._description = type(class_to_mock).__name__
+        except Exception:
+            pass
+
+        for method in dir(class_to_mock):
+            attr = getattr(class_to_mock, method)
+            if callable(attr):
+                self._known_methods.add(method)
+            elif not (type(attr) is property):
+                # treating properties as class vars makes little sense.
+                self._known_vars.add(method)
+
+        # Set additional attributes at instantiation time; this is quicker
+        # than manually setting attributes that are normally created in
+        # __init__.
+        for attr, value in attrs.items():
+            if attr.startswith("_"):
+                raise PrivateAttributeError(attr)
+            elif attr in self._known_methods:
+                raise ValueError("'%s' is a method of '%s' objects." % (attr,
+                                 class_to_mock))
+            else:
+                setattr(self, attr, value)
+
+    def _CreateMockMethod(self, *args, **kwargs):
+        """Overridden to provide self._class_to_mock to class_to_bind."""
+        kwargs.setdefault("class_to_bind", self._class_to_bind)
+        return super(MockObject, self)._CreateMockMethod(*args, **kwargs)
+
+    def __getattr__(self, name):
+        """Intercept attribute request on this object.
+
+        If the attribute is a public class variable, it will be returned and
+        not recorded as a call.
+
+        If the attribute is not a variable, it is handled like a method
+        call. The method name is checked against the set of mockable
+        methods, and a new MockMethod is returned that is aware of the
+        MockObject's state (record or replay).    The call will be recorded
+        or replayed by the MockMethod's __call__.
+
+        Args:
+            # name: the name of the attribute being requested.
+            name: str
+
+        Returns:
+            Either a class variable or a new MockMethod that is aware of the
+            state of the mock (record or replay).
+
+        Raises:
+            UnknownMethodCallError if the MockObject does not mock the
+            requested method.
+        """
+
+        if name in self._known_vars:
+            return getattr(self._class_to_mock, name)
+
+        if name in self._known_methods:
+            return self._CreateMockMethod(
+                name,
+                method_to_mock=getattr(self._class_to_mock, name))
+
+        raise UnknownMethodCallError(name)
+
+    def __eq__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return (isinstance(rhs, MockObject) and
+                self._class_to_mock == rhs._class_to_mock and
+                self._replay_mode == rhs._replay_mode and
+                self._expected_calls_queue == rhs._expected_calls_queue)
+
+    def __setitem__(self, key, value):
+        """Custom logic for mocking classes that support item assignment.
+
+        Args:
+            key: Key to set the value for.
+            value: Value to set.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __setitem__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class does not support item assignment.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __setitem__.
+
+        """
+        # Verify the class supports item assignment.
+        if '__setitem__' not in dir(self._class_to_mock):
+            raise TypeError('object does not support item assignment')
+
+        # If we are in replay mode then simply call the mock __setitem__ method
+        if self._replay_mode:
+            return MockMethod('__setitem__', self._expected_calls_queue,
+                              self._replay_mode)(key, value)
+
+        # Otherwise, create a mock method __setitem__.
+        return self._CreateMockMethod('__setitem__')(key, value)
+
+    def __getitem__(self, key):
+        """Provide custom logic for mocking classes that are subscriptable.
+
+        Args:
+            key: Key to return the value for.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __getitem__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class is not subscriptable.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __getitem__.
+
+        """
+        # Verify the class supports item assignment.
+        if '__getitem__' not in dir(self._class_to_mock):
+            raise TypeError('unsubscriptable object')
+
+        # If we are in replay mode then simply call the mock __getitem__ method
+        if self._replay_mode:
+            return MockMethod('__getitem__', self._expected_calls_queue,
+                              self._replay_mode)(key)
+
+        # Otherwise, create a mock method __getitem__.
+        return self._CreateMockMethod('__getitem__')(key)
+
+    def __iter__(self):
+        """Provide custom logic for mocking classes that are iterable.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __iter__ method that has already been called if not in replay mode.
+
+        Raises:
+            TypeError if the underlying class is not iterable.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __iter__.
+
+        """
+        methods = dir(self._class_to_mock)
+
+        # Verify the class supports iteration.
+        if '__iter__' not in methods:
+            # If it doesn't have iter method and we are in replay method,
+            # then try to iterate using subscripts.
+            if '__getitem__' not in methods or not self._replay_mode:
+                raise TypeError('not iterable object')
+            else:
+                results = []
+                index = 0
+                try:
+                    while True:
+                        results.append(self[index])
+                        index += 1
+                except IndexError:
+                    return iter(results)
+
+        # If we are in replay mode then simply call the mock __iter__ method.
+        if self._replay_mode:
+            return MockMethod('__iter__', self._expected_calls_queue,
+                              self._replay_mode)()
+
+        # Otherwise, create a mock method __iter__.
+        return self._CreateMockMethod('__iter__')()
+
+    def __contains__(self, key):
+        """Provide custom logic for mocking classes that contain items.
+
+        Args:
+            key: Key to look in container for.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __contains__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class does not implement __contains__
+            UnexpectedMethodCaller if the object does not expect the call to
+            __contains__.
+
+        """
+        contains = self._class_to_mock.__dict__.get('__contains__', None)
+
+        if contains is None:
+            raise TypeError('unsubscriptable object')
+
+        if self._replay_mode:
+            return MockMethod('__contains__', self._expected_calls_queue,
+                              self._replay_mode)(key)
+
+        return self._CreateMockMethod('__contains__')(key)
+
+    def __call__(self, *params, **named_params):
+        """Provide custom logic for mocking classes that are callable."""
+
+        # Verify the class we are mocking is callable.
+        is_callable = hasattr(self._class_to_mock, '__call__')
+        if not is_callable:
+            raise TypeError('Not callable')
+
+        # Because the call is happening directly on this object instead of
+        # a method, the call on the mock method is made right here
+
+        # If we are mocking a Function, then use the function, and not the
+        # __call__ method
+        method = None
+        if type(self._class_to_mock) in (types.FunctionType, types.MethodType):
+            method = self._class_to_mock
+        else:
+            method = getattr(self._class_to_mock, '__call__')
+        mock_method = self._CreateMockMethod('__call__', method_to_mock=method)
+
+        return mock_method(*params, **named_params)
+
+    @property
+    def __name__(self):
+        """Return the name that is being mocked."""
+        return self._description
+
+    # TODO(dejw): this property stopped to work after I introduced changes with
+    #     binding classes. Fortunately I found a solution in the form of
+    #     __getattribute__ method below, but this issue should be investigated
+    @property
+    def __class__(self):
+        return self._class_to_mock
+
+    def __dir__(self):
+        """Return only attributes of a class to mock."""
+        return dir(self._class_to_mock)
+
+    def __getattribute__(self, name):
+        """Return _class_to_mock on __class__ attribute."""
+        if name == "__class__":
+            return super(MockObject, self).__getattribute__("_class_to_mock")
+
+        return super(MockObject, self).__getattribute__(name)
+
+
+class _MockObjectFactory(MockObject):
+    """A MockObjectFactory creates mocks and verifies __init__ params.
+
+    A MockObjectFactory removes the boiler plate code that was previously
+    necessary to stub out direction instantiation of a class.
+
+    The MockObjectFactory creates new MockObjects when called and verifies the
+    __init__ params are correct when in record mode.    When replaying,
+    existing mocks are returned, and the __init__ params are verified.
+
+    See StubOutWithMock vs StubOutClassWithMocks for more detail.
+    """
+
+    def __init__(self, class_to_mock, mox_instance):
+        MockObject.__init__(self, class_to_mock)
+        self._mox = mox_instance
+        self._instance_queue = collections.deque()
+
+    def __call__(self, *params, **named_params):
+        """Instantiate and record that a new mock has been created."""
+
+        method = getattr(self._class_to_mock, '__init__')
+        mock_method = self._CreateMockMethod('__init__', method_to_mock=method)
+        # Note: calling mock_method() is deferred in order to catch the
+        # empty instance_queue first.
+
+        if self._replay_mode:
+            if not self._instance_queue:
+                raise UnexpectedMockCreationError(self._class_to_mock, *params,
+                                                  **named_params)
+
+            mock_method(*params, **named_params)
+
+            return self._instance_queue.pop()
+        else:
+            mock_method(*params, **named_params)
+
+            instance = self._mox.CreateMock(self._class_to_mock)
+            self._instance_queue.appendleft(instance)
+            return instance
+
+    def _Verify(self):
+        """Verify that all mocks have been created."""
+        if self._instance_queue:
+            raise ExpectedMockCreationError(self._instance_queue)
+        super(_MockObjectFactory, self)._Verify()
+
+
+class MethodSignatureChecker(object):
+    """Ensures that methods are called correctly."""
+
+    _NEEDED, _DEFAULT, _GIVEN = range(3)
+
+    def __init__(self, method, class_to_bind=None):
+        """Creates a checker.
+
+        Args:
+            # method: A method to check.
+            # class_to_bind: optionally, a class used to type check first
+            #                method parameter, only used with unbound methods
+            method: function
+            class_to_bind: type or None
+
+        Raises:
+            ValueError: method could not be inspected, so checks aren't
+                        possible. Some methods and functions like built-ins
+                        can't be inspected.
+        """
+        try:
+            self._args, varargs, varkw, defaults = inspect.getargspec(method)
+        except TypeError:
+            raise ValueError('Could not get argument specification for %r'
+                             % (method,))
+        if inspect.ismethod(method) or class_to_bind:
+            self._args = self._args[1:]    # Skip 'self'.
+        self._method = method
+        self._instance = None    # May contain the instance this is bound to.
+        self._instance = getattr(method, "__self__", None)
+
+        # _bounded_to determines whether the method is bound or not
+        if self._instance:
+            self._bounded_to = self._instance.__class__
+        else:
+            self._bounded_to = class_to_bind or getattr(method, "im_class",
+                                                        None)
+
+        self._has_varargs = varargs is not None
+        self._has_varkw = varkw is not None
+        if defaults is None:
+            self._required_args = self._args
+            self._default_args = []
+        else:
+            self._required_args = self._args[:-len(defaults)]
+            self._default_args = self._args[-len(defaults):]
+
+    def _RecordArgumentGiven(self, arg_name, arg_status):
+        """Mark an argument as being given.
+
+        Args:
+            # arg_name: The name of the argument to mark in arg_status.
+            # arg_status: Maps argument names to one of
+            #             _NEEDED, _DEFAULT, _GIVEN.
+            arg_name: string
+            arg_status: dict
+
+        Raises:
+            AttributeError: arg_name is already marked as _GIVEN.
+        """
+        if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN:
+            raise AttributeError('%s provided more than once' % (arg_name,))
+        arg_status[arg_name] = MethodSignatureChecker._GIVEN
+
+    def Check(self, params, named_params):
+        """Ensures that the parameters used while recording a call are valid.
+
+        Args:
+            # params: A list of positional parameters.
+            # named_params: A dict of named parameters.
+            params: list
+            named_params: dict
+
+        Raises:
+            AttributeError: the given parameters don't work with the given
+                            method.
+        """
+        arg_status = dict((a, MethodSignatureChecker._NEEDED)
+                          for a in self._required_args)
+        for arg in self._default_args:
+            arg_status[arg] = MethodSignatureChecker._DEFAULT
+
+        # WARNING: Suspect hack ahead.
+        #
+        # Check to see if this is an unbound method, where the instance
+        # should be bound as the first argument.    We try to determine if
+        # the first argument (param[0]) is an instance of the class, or it
+        # is equivalent to the class (used to account for Comparators).
+        #
+        # NOTE: If a Func() comparator is used, and the signature is not
+        # correct, this will cause extra executions of the function.
+        if inspect.ismethod(self._method) or self._bounded_to:
+            # The extra param accounts for the bound instance.
+            if len(params) > len(self._required_args):
+                expected = self._bounded_to
+
+                # Check if the param is an instance of the expected class,
+                # or check equality (useful for checking Comparators).
+
+                # This is a hack to work around the fact that the first
+                # parameter can be a Comparator, and the comparison may raise
+                # an exception during this comparison, which is OK.
+                try:
+                    param_equality = (params[0] == expected)
+                except Exception:
+                    param_equality = False
+
+                if isinstance(params[0], expected) or param_equality:
+                    params = params[1:]
+                # If the IsA() comparator is being used, we need to check the
+                # inverse of the usual case - that the given instance is a
+                # subclass of the expected class. For example, the code under
+                # test does late binding to a subclass.
+                elif (isinstance(params[0], IsA) and
+                      params[0]._IsSubClass(expected)):
+                    params = params[1:]
+
+        # Check that each positional param is valid.
+        for i in range(len(params)):
+            try:
+                arg_name = self._args[i]
+            except IndexError:
+                if not self._has_varargs:
+                    raise AttributeError(
+                        '%s does not take %d or more positional '
+                        'arguments' % (self._method.__name__, i))
+            else:
+                self._RecordArgumentGiven(arg_name, arg_status)
+
+        # Check each keyword argument.
+        for arg_name in named_params:
+            if arg_name not in arg_status and not self._has_varkw:
+                raise AttributeError('%s is not expecting keyword argument %s'
+                                     % (self._method.__name__, arg_name))
+            self._RecordArgumentGiven(arg_name, arg_status)
+
+        # Ensure all the required arguments have been given.
+        still_needed = [k for k, v in arg_status.items()
+                        if v == MethodSignatureChecker._NEEDED]
+        if still_needed:
+            raise AttributeError('No values given for arguments: %s'
+                                 % (' '.join(sorted(still_needed))))
+
+
+class MockMethod(object):
+    """Callable mock method.
+
+    A MockMethod should act exactly like the method it mocks, accepting
+    parameters and returning a value, or throwing an exception (as specified).
+    When this method is called, it can optionally verify whether the called
+    method (name and signature) matches the expected method.
+    """
+
+    def __init__(self, method_name, call_queue, replay_mode,
+                 method_to_mock=None, description=None, class_to_bind=None):
+        """Construct a new mock method.
+
+        Args:
+            # method_name: the name of the method
+            # call_queue: deque of calls, verify this call against the head,
+            #             or add this call to the queue.
+            # replay_mode: False if we are recording, True if we are verifying
+            #              calls against the call queue.
+            # method_to_mock: The actual method being mocked, used for
+            #                 introspection.
+            # description: optionally, a descriptive name for this method.
+            #              Typically this is equal to the descriptive name of
+            #              the method's class.
+            # class_to_bind: optionally, a class that is used for unbound
+            #                methods (or functions in Python3) to which method
+            #                is bound, in order not to loose binding
+            #                information. If given, it will be used for
+            #                checking the type of first method parameter
+            method_name: str
+            call_queue: list or deque
+            replay_mode: bool
+            method_to_mock: a method object
+            description: str or None
+            class_to_bind: type or None
+        """
+
+        self._name = method_name
+        self.__name__ = method_name
+        self._call_queue = call_queue
+        if not isinstance(call_queue, collections.deque):
+            self._call_queue = collections.deque(self._call_queue)
+        self._replay_mode = replay_mode
+        self._description = description
+
+        self._params = None
+        self._named_params = None
+        self._return_value = None
+        self._exception = None
+        self._side_effects = None
+
+        try:
+            self._checker = MethodSignatureChecker(method_to_mock,
+                                                   class_to_bind=class_to_bind)
+        except ValueError:
+            self._checker = None
+
+    def __call__(self, *params, **named_params):
+        """Log parameters and return the specified return value.
+
+        If the Mock(Anything/Object) associated with this call is in record
+        mode, this MockMethod will be pushed onto the expected call queue.
+        If the mock is in replay mode, this will pop a MockMethod off the
+        top of the queue and verify this call is equal to the expected call.
+
+        Raises:
+            UnexpectedMethodCall if this call is supposed to match an expected
+                method call and it does not.
+        """
+
+        self._params = params
+        self._named_params = named_params
+
+        if not self._replay_mode:
+            if self._checker is not None:
+                self._checker.Check(params, named_params)
+            self._call_queue.append(self)
+            return self
+
+        expected_method = self._VerifyMethodCall()
+
+        if expected_method._side_effects:
+            result = expected_method._side_effects(*params, **named_params)
+            if expected_method._return_value is None:
+                expected_method._return_value = result
+
+        if expected_method._exception:
+            raise expected_method._exception
+
+        return expected_method._return_value
+
+    def __getattr__(self, name):
+        """Raise an AttributeError with a helpful message."""
+
+        raise AttributeError(
+            'MockMethod has no attribute "%s". '
+            'Did you remember to put your mocks in replay mode?' % name)
+
+    def __iter__(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def next(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def __next__(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def _PopNextMethod(self):
+        """Pop the next method from our call queue."""
+        try:
+            return self._call_queue.popleft()
+        except IndexError:
+            raise UnexpectedMethodCallError(self, None)
+
+    def _VerifyMethodCall(self):
+        """Verify the called method is expected.
+
+        This can be an ordered method, or part of an unordered set.
+
+        Returns:
+            The expected mock method.
+
+        Raises:
+            UnexpectedMethodCall if the method called was not expected.
+        """
+
+        expected = self._PopNextMethod()
+
+        # Loop here, because we might have a MethodGroup followed by another
+        # group.
+        while isinstance(expected, MethodGroup):
+            expected, method = expected.MethodCalled(self)
+            if method is not None:
+                return method
+
+        # This is a mock method, so just check equality.
+        if expected != self:
+            raise UnexpectedMethodCallError(self, expected)
+
+        return expected
+
+    def __str__(self):
+        params = ', '.join(
+            [repr(p) for p in self._params or []] +
+            ['%s=%r' % x for x in sorted((self._named_params or {}).items())])
+        full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
+        if self._description:
+            full_desc = "%s.%s" % (self._description, full_desc)
+        return full_desc
+
+    def __hash__(self):
+        return id(self)
+
+    def __eq__(self, rhs):
+        """Test whether this MockMethod is equivalent to another MockMethod.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: MockMethod
+        """
+
+        return (isinstance(rhs, MockMethod) and
+                self._name == rhs._name and
+                self._params == rhs._params and
+                self._named_params == rhs._named_params)
+
+    def __ne__(self, rhs):
+        """Test if this MockMethod is not equivalent to another MockMethod.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: MockMethod
+        """
+
+        return not self == rhs
+
+    def GetPossibleGroup(self):
+        """Returns a possible group from the end of the call queue.
+
+        Return None if no other methods are on the stack.
+        """
+
+        # Remove this method from the tail of the queue so we can add it
+        # to a group.
+        this_method = self._call_queue.pop()
+        assert this_method == self
+
+        # Determine if the tail of the queue is a group, or just a regular
+        # ordered mock method.
+        group = None
+        try:
+            group = self._call_queue[-1]
+        except IndexError:
+            pass
+
+        return group
+
+    def _CheckAndCreateNewGroup(self, group_name, group_class):
+        """Checks if the last method (a possible group) is an instance of our
+        group_class. Adds the current method to this group or creates a
+        new one.
+
+        Args:
+
+            group_name: the name of the group.
+            group_class: the class used to create instance of this new group
+        """
+        group = self.GetPossibleGroup()
+
+        # If this is a group, and it is the correct group, add the method.
+        if isinstance(group, group_class) and group.group_name() == group_name:
+            group.AddMethod(self)
+            return self
+
+        # Create a new group and add the method.
+        new_group = group_class(group_name)
+        new_group.AddMethod(self)
+        self._call_queue.append(new_group)
+        return self
+
+    def InAnyOrder(self, group_name="default"):
+        """Move this method into a group of unordered calls.
+
+        A group of unordered calls must be defined together, and must be
+        executed in full before the next expected method can be called.
+        There can be multiple groups that are expected serially, if they are
+        given different group names. The same group name can be reused if there
+        is a standard method call, or a group with a different name, spliced
+        between usages.
+
+        Args:
+            group_name: the name of the unordered group.
+
+        Returns:
+            self
+        """
+        return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
+
+    def MultipleTimes(self, group_name="default"):
+        """Move method into group of calls which may be called multiple times.
+
+        A group of repeating calls must be defined together, and must be
+        executed in full before the next expected method can be called.
+
+        Args:
+            group_name: the name of the unordered group.
+
+        Returns:
+            self
+        """
+        return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
+
+    def AndReturn(self, return_value):
+        """Set the value to return when this method is called.
+
+        Args:
+            # return_value can be anything.
+        """
+
+        self._return_value = return_value
+        return return_value
+
+    def AndRaise(self, exception):
+        """Set the exception to raise when this method is called.
+
+        Args:
+            # exception: the exception to raise when this method is called.
+            exception: Exception
+        """
+
+        self._exception = exception
+
+    def WithSideEffects(self, side_effects):
+        """Set the side effects that are simulated when this method is called.
+
+        Args:
+            side_effects: A callable which modifies the parameters or other
+                          relevant state which a given test case depends on.
+
+        Returns:
+            Self for chaining with AndReturn and AndRaise.
+        """
+        self._side_effects = side_effects
+        return self
+
+
+class Comparator:
+    """Base class for all Mox comparators.
+
+    A Comparator can be used as a parameter to a mocked method when the exact
+    value is not known.    For example, the code you are testing might build up
+    a long SQL string that is passed to your mock DAO. You're only interested
+    that the IN clause contains the proper primary keys, so you can set your
+    mock up as follows:
+
+    mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
+
+    Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
+
+    A Comparator may replace one or more parameters, for example:
+    # return at most 10 rows
+    mock_dao.RunQuery(StrContains('SELECT'), 10)
+
+    or
+
+    # Return some non-deterministic number of rows
+    mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
+    """
+
+    def equals(self, rhs):
+        """Special equals method that all comparators must implement.
+
+        Args:
+            rhs: any python object
+        """
+
+        raise NotImplementedError('method must be implemented by a subclass.')
+
+    def __eq__(self, rhs):
+        return self.equals(rhs)
+
+    def __ne__(self, rhs):
+        return not self.equals(rhs)
+
+
+class Is(Comparator):
+    """Comparison class used to check identity, instead of equality."""
+
+    def __init__(self, obj):
+        self._obj = obj
+
+    def equals(self, rhs):
+        return rhs is self._obj
+
+    def __repr__(self):
+        return "<is %r (%s)>" % (self._obj, id(self._obj))
+
+
+class IsA(Comparator):
+    """This class wraps a basic Python type or class.    It is used to verify
+    that a parameter is of the given type or class.
+
+    Example:
+    mock_dao.Connect(IsA(DbConnectInfo))
+    """
+
+    def __init__(self, class_name):
+        """Initialize IsA
+
+        Args:
+            class_name: basic python type or a class
+        """
+
+        self._class_name = class_name
+
+    def equals(self, rhs):
+        """Check to see if the RHS is an instance of class_name.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return isinstance(rhs, self._class_name)
+        except TypeError:
+            # Check raw types if there was a type error.    This is helpful for
+            # things like cStringIO.StringIO.
+            return type(rhs) == type(self._class_name)
+
+    def _IsSubClass(self, clazz):
+        """Check to see if the IsA comparators class is a subclass of clazz.
+
+        Args:
+            # clazz: a class object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return issubclass(self._class_name, clazz)
+        except TypeError:
+            # Check raw types if there was a type error.    This is helpful for
+            # things like cStringIO.StringIO.
+            return type(clazz) == type(self._class_name)
+
+    def __repr__(self):
+        return 'mox.IsA(%s) ' % str(self._class_name)
+
+
+class IsAlmost(Comparator):
+    """Comparison class used to check whether a parameter is nearly equal
+    to a given value.    Generally useful for floating point numbers.
+
+    Example mock_dao.SetTimeout((IsAlmost(3.9)))
+    """
+
+    def __init__(self, float_value, places=7):
+        """Initialize IsAlmost.
+
+        Args:
+            float_value: The value for making the comparison.
+            places: The number of decimal places to round to.
+        """
+
+        self._float_value = float_value
+        self._places = places
+
+    def equals(self, rhs):
+        """Check to see if RHS is almost equal to float_value
+
+        Args:
+            rhs: the value to compare to float_value
+
+        Returns:
+            bool
+        """
+
+        try:
+            return round(rhs - self._float_value, self._places) == 0
+        except Exception:
+            # Probably because either float_value or rhs is not a number.
+            return False
+
+    def __repr__(self):
+        return str(self._float_value)
+
+
+class StrContains(Comparator):
+    """Comparison class used to check whether a substring exists in a
+    string parameter.    This can be useful in mocking a database with SQL
+    passed in as a string parameter, for example.
+
+    Example:
+    mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
+    """
+
+    def __init__(self, search_string):
+        """Initialize.
+
+        Args:
+            # search_string: the string you are searching for
+            search_string: str
+        """
+
+        self._search_string = search_string
+
+    def equals(self, rhs):
+        """Check to see if the search_string is contained in the rhs string.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return rhs.find(self._search_string) > -1
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<str containing \'%s\'>' % self._search_string
+
+
+class Regex(Comparator):
+    """Checks if a string matches a regular expression.
+
+    This uses a given regular expression to determine equality.
+    """
+
+    def __init__(self, pattern, flags=0):
+        """Initialize.
+
+        Args:
+            # pattern is the regular expression to search for
+            pattern: str
+            # flags passed to re.compile function as the second argument
+            flags: int
+        """
+        self.flags = flags
+        self.regex = re.compile(pattern, flags=flags)
+
+    def equals(self, rhs):
+        """Check to see if rhs matches regular expression pattern.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return self.regex.search(rhs) is not None
+        except Exception:
+            return False
+
+    def __repr__(self):
+        s = '<regular expression \'%s\'' % self.regex.pattern
+        if self.flags:
+            s += ', flags=%d' % self.flags
+        s += '>'
+        return s
+
+
+class In(Comparator):
+    """Checks whether an item (or key) is in a list (or dict) parameter.
+
+    Example:
+    mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
+    """
+
+    def __init__(self, key):
+        """Initialize.
+
+        Args:
+            # key is any thing that could be in a list or a key in a dict
+        """
+
+        self._key = key
+
+    def equals(self, rhs):
+        """Check to see whether key is in rhs.
+
+        Args:
+            rhs: dict
+
+        Returns:
+            bool
+        """
+
+        try:
+            return self._key in rhs
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<sequence or map containing \'%s\'>' % str(self._key)
+
+
+class Not(Comparator):
+    """Checks whether a predicates is False.
+
+    Example:
+        mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm',
+                                                  stevepm_user_info)))
+    """
+
+    def __init__(self, predicate):
+        """Initialize.
+
+        Args:
+            # predicate: a Comparator instance.
+        """
+
+        assert isinstance(predicate, Comparator), ("predicate %r must be a"
+                                                   " Comparator." % predicate)
+        self._predicate = predicate
+
+    def equals(self, rhs):
+        """Check to see whether the predicate is False.
+
+        Args:
+            rhs: A value that will be given in argument of the predicate.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return not self._predicate.equals(rhs)
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<not \'%s\'>' % self._predicate
+
+
+class ContainsKeyValue(Comparator):
+    """Checks whether a key/value pair is in a dict parameter.
+
+    Example:
+    mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
+    """
+
+    def __init__(self, key, value):
+        """Initialize.
+
+        Args:
+            # key: a key in a dict
+            # value: the corresponding value
+        """
+
+        self._key = key
+        self._value = value
+
+    def equals(self, rhs):
+        """Check whether the given key/value pair is in the rhs dict.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return rhs[self._key] == self._value
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<map containing the entry \'%s: %s\'>' % (str(self._key),
+                                                          str(self._value))
+
+
+class ContainsAttributeValue(Comparator):
+    """Checks whether passed parameter contains attributes with a given value.
+
+    Example:
+    mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info))
+    """
+
+    def __init__(self, key, value):
+        """Initialize.
+
+        Args:
+            # key: an attribute name of an object
+            # value: the corresponding value
+        """
+
+        self._key = key
+        self._value = value
+
+    def equals(self, rhs):
+        """Check if the given attribute has a matching value in the rhs object.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return getattr(rhs, self._key) == self._value
+        except Exception:
+            return False
+
+
+class SameElementsAs(Comparator):
+    """Checks whether sequences contain the same elements (ignoring order).
+
+    Example:
+    mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
+    """
+
+    def __init__(self, expected_seq):
+        """Initialize.
+
+        Args:
+            expected_seq: a sequence
+        """
+        # Store in case expected_seq is an iterator.
+        self._expected_list = list(expected_seq)
+
+    def equals(self, actual_seq):
+        """Check to see whether actual_seq has same elements as expected_seq.
+
+        Args:
+            actual_seq: sequence
+
+        Returns:
+            bool
+        """
+        try:
+            # Store in case actual_seq is an iterator. We potentially iterate
+            # twice: once to make the dict, once in the list fallback.
+            actual_list = list(actual_seq)
+        except TypeError:
+            # actual_seq cannot be read as a sequence.
+            #
+            # This happens because Mox uses __eq__ both to check object
+            # equality (in MethodSignatureChecker) and to invoke Comparators.
+            return False
+
+        try:
+            return set(self._expected_list) == set(actual_list)
+        except TypeError:
+            # Fall back to slower list-compare if any of the objects
+            # are unhashable.
+            if len(self._expected_list) != len(actual_list):
+                return False
+            for el in actual_list:
+                if el not in self._expected_list:
+                    return False
+        return True
+
+    def __repr__(self):
+        return '<sequence with same elements as \'%s\'>' % self._expected_list
+
+
+class And(Comparator):
+    """Evaluates one or more Comparators on RHS, returns an AND of the results.
+    """
+
+    def __init__(self, *args):
+        """Initialize.
+
+        Args:
+            *args: One or more Comparator
+        """
+
+        self._comparators = args
+
+    def equals(self, rhs):
+        """Checks whether all Comparators are equal to rhs.
+
+        Args:
+            # rhs: can be anything
+
+        Returns:
+            bool
+        """
+
+        for comparator in self._comparators:
+            if not comparator.equals(rhs):
+                return False
+
+        return True
+
+    def __repr__(self):
+        return '<AND %s>' % str(self._comparators)
+
+
+class Or(Comparator):
+    """Evaluates one or more Comparators on RHS; returns OR of the results."""
+
+    def __init__(self, *args):
+        """Initialize.
+
+        Args:
+            *args: One or more Mox comparators
+        """
+
+        self._comparators = args
+
+    def equals(self, rhs):
+        """Checks whether any Comparator is equal to rhs.
+
+        Args:
+            # rhs: can be anything
+
+        Returns:
+            bool
+        """
+
+        for comparator in self._comparators:
+            if comparator.equals(rhs):
+                return True
+
+        return False
+
+    def __repr__(self):
+        return '<OR %s>' % str(self._comparators)
+
+
+class Func(Comparator):
+    """Call a function that should verify the parameter passed in is correct.
+
+    You may need the ability to perform more advanced operations on the
+    parameter in order to validate it. You can use this to have a callable
+    validate any parameter. The callable should return either True or False.
+
+
+    Example:
+
+    def myParamValidator(param):
+        # Advanced logic here
+        return True
+
+    mock_dao.DoSomething(Func(myParamValidator), true)
+    """
+
+    def __init__(self, func):
+        """Initialize.
+
+        Args:
+            func: callable that takes one parameter and returns a bool
+        """
+
+        self._func = func
+
+    def equals(self, rhs):
+        """Test whether rhs passes the function test.
+
+        rhs is passed into func.
+
+        Args:
+            rhs: any python object
+
+        Returns:
+            the result of func(rhs)
+        """
+
+        return self._func(rhs)
+
+    def __repr__(self):
+        return str(self._func)
+
+
+class IgnoreArg(Comparator):
+    """Ignore an argument.
+
+    This can be used when we don't care about an argument of a method call.
+
+    Example:
+    # Check if CastMagic is called with 3 as first arg and
+    # 'disappear' as third.
+    mymock.CastMagic(3, IgnoreArg(), 'disappear')
+    """
+
+    def equals(self, unused_rhs):
+        """Ignores arguments and returns True.
+
+        Args:
+            unused_rhs: any python object
+
+        Returns:
+            always returns True
+        """
+
+        return True
+
+    def __repr__(self):
+        return '<IgnoreArg>'
+
+
+class Value(Comparator):
+    """Compares argument against a remembered value.
+
+    To be used in conjunction with Remember comparator.    See Remember()
+    for example.
+    """
+
+    def __init__(self):
+        self._value = None
+        self._has_value = False
+
+    def store_value(self, rhs):
+        self._value = rhs
+        self._has_value = True
+
+    def equals(self, rhs):
+        if not self._has_value:
+            return False
+        else:
+            return rhs == self._value
+
+    def __repr__(self):
+        if self._has_value:
+            return "<Value %r>" % self._value
+        else:
+            return "<Value>"
+
+
+class Remember(Comparator):
+    """Remembers the argument to a value store.
+
+    To be used in conjunction with Value comparator.
+
+    Example:
+    # Remember the argument for one method call.
+    users_list = Value()
+    mock_dao.ProcessUsers(Remember(users_list))
+
+    # Check argument against remembered value.
+    mock_dao.ReportUsers(users_list)
+    """
+
+    def __init__(self, value_store):
+        if not isinstance(value_store, Value):
+            raise TypeError(
+                "value_store is not an instance of the Value class")
+        self._value_store = value_store
+
+    def equals(self, rhs):
+        self._value_store.store_value(rhs)
+        return True
+
+    def __repr__(self):
+        return "<Remember %d>" % id(self._value_store)
+
+
+class MethodGroup(object):
+    """Base class containing common behaviour for MethodGroups."""
+
+    def __init__(self, group_name):
+        self._group_name = group_name
+
+    def group_name(self):
+        return self._group_name
+
+    def __str__(self):
+        return '<%s "%s">' % (self.__class__.__name__, self._group_name)
+
+    def AddMethod(self, mock_method):
+        raise NotImplementedError
+
+    def MethodCalled(self, mock_method):
+        raise NotImplementedError
+
+    def IsSatisfied(self):
+        raise NotImplementedError
+
+
+class UnorderedGroup(MethodGroup):
+    """UnorderedGroup holds a set of method calls that may occur in any order.
+
+    This construct is helpful for non-deterministic events, such as iterating
+    over the keys of a dict.
+    """
+
+    def __init__(self, group_name):
+        super(UnorderedGroup, self).__init__(group_name)
+        self._methods = []
+
+    def __str__(self):
+        return '%s "%s" pending calls:\n%s' % (
+            self.__class__.__name__,
+            self._group_name,
+            "\n".join(str(method) for method in self._methods))
+
+    def AddMethod(self, mock_method):
+        """Add a method to this group.
+
+        Args:
+            mock_method: A mock method to be added to this group.
+        """
+
+        self._methods.append(mock_method)
+
+    def MethodCalled(self, mock_method):
+        """Remove a method call from the group.
+
+        If the method is not in the set, an UnexpectedMethodCallError will be
+        raised.
+
+        Args:
+            mock_method: a mock method that should be equal to a method in the
+                         group.
+
+        Returns:
+            The mock method from the group
+
+        Raises:
+            UnexpectedMethodCallError if the mock_method was not in the group.
+        """
+
+        # Check to see if this method exists, and if so, remove it from the set
+        # and return it.
+        for method in self._methods:
+            if method == mock_method:
+                # Remove the called mock_method instead of the method in the
+                # group. The called method will match any comparators when
+                # equality is checked during removal. The method in the group
+                # could pass a comparator to another comparator during the
+                # equality check.
+                self._methods.remove(mock_method)
+
+                # If group is not empty, put it back at the head of the queue.
+                if not self.IsSatisfied():
+                    mock_method._call_queue.appendleft(self)
+
+                return self, method
+
+        raise UnexpectedMethodCallError(mock_method, self)
+
+    def IsSatisfied(self):
+        """Return True if there are not any methods in this group."""
+
+        return len(self._methods) == 0
+
+
+class MultipleTimesGroup(MethodGroup):
+    """MultipleTimesGroup holds methods that may be called any number of times.
+
+    Note: Each method must be called at least once.
+
+    This is helpful, if you don't know or care how many times a method is
+    called.
+    """
+
+    def __init__(self, group_name):
+        super(MultipleTimesGroup, self).__init__(group_name)
+        self._methods = set()
+        self._methods_left = set()
+
+    def AddMethod(self, mock_method):
+        """Add a method to this group.
+
+        Args:
+            mock_method: A mock method to be added to this group.
+        """
+
+        self._methods.add(mock_method)
+        self._methods_left.add(mock_method)
+
+    def MethodCalled(self, mock_method):
+        """Remove a method call from the group.
+
+        If the method is not in the set, an UnexpectedMethodCallError will be
+        raised.
+
+        Args:
+            mock_method: a mock method that should be equal to a method in the
+                         group.
+
+        Returns:
+            The mock method from the group
+
+        Raises:
+            UnexpectedMethodCallError if the mock_method was not in the group.
+        """
+
+        # Check to see if this method exists, and if so add it to the set of
+        # called methods.
+        for method in self._methods:
+            if method == mock_method:
+                self._methods_left.discard(method)
+                # Always put this group back on top of the queue,
+                # because we don't know when we are done.
+                mock_method._call_queue.appendleft(self)
+                return self, method
+
+        if self.IsSatisfied():
+            next_method = mock_method._PopNextMethod()
+            return next_method, None
+        else:
+            raise UnexpectedMethodCallError(mock_method, self)
+
+    def IsSatisfied(self):
+        """Return True if all methods in group are called at least once."""
+        return len(self._methods_left) == 0
+
+
+class MoxMetaTestBase(type):
+    """Metaclass to add mox cleanup and verification to every test.
+
+    As the mox unit testing class is being constructed (MoxTestBase or a
+    subclass), this metaclass will modify all test functions to call the
+    CleanUpMox method of the test class after they finish. This means that
+    unstubbing and verifying will happen for every test with no additional
+    code, and any failures will result in test failures as opposed to errors.
+    """
+
+    def __init__(cls, name, bases, d):
+        type.__init__(cls, name, bases, d)
+
+        # also get all the attributes from the base classes to account
+        # for a case when test class is not the immediate child of MoxTestBase
+        for base in bases:
+            for attr_name in dir(base):
+                if attr_name not in d:
+                    d[attr_name] = getattr(base, attr_name)
+
+        for func_name, func in d.items():
+            if func_name.startswith('test') and callable(func):
+
+                setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
+
+    @staticmethod
+    def CleanUpTest(cls, func):
+        """Adds Mox cleanup code to any MoxTestBase method.
+
+        Always unsets stubs after a test. Will verify all mocks for tests that
+        otherwise pass.
+
+        Args:
+            cls: MoxTestBase or subclass; the class whose method we are
+                                          altering.
+            func: method; the method of the MoxTestBase test class we wish to
+                          alter.
+
+        Returns:
+            The modified method.
+        """
+        def new_method(self, *args, **kwargs):
+            mox_obj = getattr(self, 'mox', None)
+            stubout_obj = getattr(self, 'stubs', None)
+            cleanup_mox = False
+            cleanup_stubout = False
+            if mox_obj and isinstance(mox_obj, Mox):
+                cleanup_mox = True
+            if stubout_obj and isinstance(stubout_obj,
+                                          stubout.StubOutForTesting):
+                cleanup_stubout = True
+            try:
+                func(self, *args, **kwargs)
+            finally:
+                if cleanup_mox:
+                    mox_obj.UnsetStubs()
+                if cleanup_stubout:
+                    stubout_obj.UnsetAll()
+                    stubout_obj.SmartUnsetAll()
+            if cleanup_mox:
+                mox_obj.VerifyAll()
+        new_method.__name__ = func.__name__
+        new_method.__doc__ = func.__doc__
+        new_method.__module__ = func.__module__
+        return new_method
+
+
+_MoxTestBase = MoxMetaTestBase('_MoxTestBase', (unittest.TestCase, ), {})
+
+
+class MoxTestBase(_MoxTestBase):
+    """Convenience test class to make stubbing easier.
+
+    Sets up a "mox" attribute which is an instance of Mox (any mox tests will
+    want this), and a "stubs" attribute that is an instance of
+    StubOutForTesting (needed at times). Also automatically unsets any stubs
+    and verifies that all mock methods have been called at the end of each
+    test, eliminating boilerplate code.
+    """
+
+    def setUp(self):
+        super(MoxTestBase, self).setUp()
+        self.mox = Mox()
+        self.stubs = stubout.StubOutForTesting()
diff --git a/catapult/telemetry/third_party/mox3/mox3/stubout.py b/catapult/telemetry/third_party/mox3/mox3/stubout.py
new file mode 100644
index 0000000..a02ed40
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/stubout.py
@@ -0,0 +1,152 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import inspect
+
+
+class StubOutForTesting(object):
+    """Sample Usage:
+
+       You want os.path.exists() to always return true during testing.
+
+       stubs = StubOutForTesting()
+       stubs.Set(os.path, 'exists', lambda x: 1)
+           ...
+       stubs.UnsetAll()
+
+       The above changes os.path.exists into a lambda that returns 1.    Once
+       the ... part of the code finishes, the UnsetAll() looks up the old value
+       of os.path.exists and restores it.
+
+    """
+    def __init__(self):
+        self.cache = []
+        self.stubs = []
+
+    def __del__(self):
+        self.SmartUnsetAll()
+        self.UnsetAll()
+
+    def SmartSet(self, obj, attr_name, new_attr):
+        """Replace obj.attr_name with new_attr.
+
+        This method is smart and works at the module, class, and instance level
+        while preserving proper inheritance. It will not stub out C types
+        however unless that has been explicitly allowed by the type.
+
+        This method supports the case where attr_name is a staticmethod or a
+        classmethod of obj.
+
+        Notes:
+          - If obj is an instance, then it is its class that will actually be
+            stubbed. Note that the method Set() does not do that: if obj is
+            an instance, it (and not its class) will be stubbed.
+          - The stubbing is using the builtin getattr and setattr. So, the
+            __get__ and __set__ will be called when stubbing (TODO: A better
+            idea would probably be to manipulate obj.__dict__ instead of
+            getattr() and setattr()).
+
+        Raises AttributeError if the attribute cannot be found.
+        """
+        if (inspect.ismodule(obj) or
+                (not inspect.isclass(obj) and attr_name in obj.__dict__)):
+            orig_obj = obj
+            orig_attr = getattr(obj, attr_name)
+
+        else:
+            if not inspect.isclass(obj):
+                mro = list(inspect.getmro(obj.__class__))
+            else:
+                mro = list(inspect.getmro(obj))
+
+            mro.reverse()
+
+            orig_attr = None
+
+            for cls in mro:
+                try:
+                    orig_obj = cls
+                    orig_attr = getattr(obj, attr_name)
+                except AttributeError:
+                    continue
+
+        if orig_attr is None:
+            raise AttributeError("Attribute not found.")
+
+        # Calling getattr() on a staticmethod transforms it to a 'normal'
+        # function. We need to ensure that we put it back as a staticmethod.
+        old_attribute = obj.__dict__.get(attr_name)
+        if (old_attribute is not None
+                and isinstance(old_attribute, staticmethod)):
+            orig_attr = staticmethod(orig_attr)
+
+        self.stubs.append((orig_obj, attr_name, orig_attr))
+        setattr(orig_obj, attr_name, new_attr)
+
+    def SmartUnsetAll(self):
+        """Reverses all the SmartSet() calls.
+
+        Restores things to their original definition. Its okay to call
+        SmartUnsetAll() repeatedly, as later calls have no effect if no
+        SmartSet() calls have been made.
+        """
+        self.stubs.reverse()
+
+        for args in self.stubs:
+            setattr(*args)
+
+        self.stubs = []
+
+    def Set(self, parent, child_name, new_child):
+        """Replace child_name's old definition with new_child.
+
+        Replace definiion in the context of the given parent. The parent could
+        be a module when the child is a function at module scope. Or the parent
+        could be a class when a class' method is being replaced. The named
+        child is set to new_child, while the prior definition is saved away
+        for later, when UnsetAll() is called.
+
+        This method supports the case where child_name is a staticmethod or a
+        classmethod of parent.
+        """
+        old_child = getattr(parent, child_name)
+
+        old_attribute = parent.__dict__.get(child_name)
+        if old_attribute is not None:
+            if isinstance(old_attribute, staticmethod):
+                old_child = staticmethod(old_child)
+            elif isinstance(old_attribute, classmethod):
+                old_child = classmethod(old_child.__func__)
+
+        self.cache.append((parent, old_child, child_name))
+        setattr(parent, child_name, new_child)
+
+    def UnsetAll(self):
+        """Reverses all the Set() calls.
+
+        Restores things to their original definition. Its okay to call
+        UnsetAll() repeatedly, as later calls have no effect if no Set()
+        calls have been made.
+        """
+        # Undo calls to Set() in reverse order, in case Set() was called on the
+        # same arguments repeatedly (want the original call to be last one
+        # undone)
+        self.cache.reverse()
+
+        for (parent, old_child, child_name) in self.cache:
+            setattr(parent, child_name, old_child)
+        self.cache = []
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/mox3/mox3/tests/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/mox3/mox3/tests/__init__.py
diff --git a/catapult/telemetry/third_party/mox3/mox3/tests/mox_helper.py b/catapult/telemetry/third_party/mox3/mox3/tests/mox_helper.py
new file mode 100644
index 0000000..67843a9
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/tests/mox_helper.py
@@ -0,0 +1,145 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+"""A very basic test class derived from mox.MoxTestBase, used by test_mox.py.
+
+The class defined in this module is used to test the features of
+MoxTestBase and is not intended to be a standalone test.  It needs to
+be in a separate module, because otherwise the tests in this class
+(which should not all pass) would be executed as part of the
+test_mox.py test suite.
+
+See test_mox.MoxTestBaseTest for how this class is actually used.
+"""
+
+import os
+
+from mox3 import mox
+
+
+class ExampleMoxTestMixin(object):
+    """Mix-in class for mox test case class.
+
+    It stubs out the same function as one of the test methods in
+    the example test case.    Both tests must pass as meta class wraps
+    test methods in all base classes.
+    """
+
+    def testStat(self):
+        self.mox.StubOutWithMock(os, 'stat')
+        os.stat(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.stat(self.DIR_PATH)
+
+
+class ExampleMoxTest(mox.MoxTestBase, ExampleMoxTestMixin):
+
+    DIR_PATH = '/path/to/some/directory'
+
+    def testSuccess(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.listdir(self.DIR_PATH)
+
+    def testExpectedNotCalled(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+
+    def testUnexpectedCall(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.listdir('/path/to/some/other/directory')
+        os.listdir(self.DIR_PATH)
+
+    def testFailure(self):
+        self.assertTrue(False)
+
+    def testStatOther(self):
+        self.mox.StubOutWithMock(os, 'stat')
+        os.stat(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.stat(self.DIR_PATH)
+
+    def testHasStubs(self):
+        listdir_list = []
+
+        def MockListdir(directory):
+            listdir_list.append(directory)
+
+        self.stubs.Set(os, 'listdir', MockListdir)
+        os.listdir(self.DIR_PATH)
+        self.assertEqual([self.DIR_PATH], listdir_list)
+
+
+class TestClassFromAnotherModule(object):
+
+    def __init__(self):
+        return None
+
+    def Value(self):
+        return 'Not mock'
+
+
+class ChildClassFromAnotherModule(TestClassFromAnotherModule):
+    """A child class of TestClassFromAnotherModule.
+
+    Used to test stubbing out unbound methods, where child classes
+    are eventually bound.
+    """
+
+    def __init__(self):
+        TestClassFromAnotherModule.__init__(self)
+
+
+class CallableClass(object):
+
+    def __init__(self, one, two, nine=None):
+        pass
+
+    def __call__(self, one):
+        return 'Not mock'
+
+    def Value(self):
+        return 'Not mock'
+
+
+def MyTestFunction(one, two, nine=None):
+    pass
+
+
+class ExampleClass(object):
+    def __init__(self, foo='bar'):
+        pass
+
+    def TestMethod(self, one, two, nine=None):
+        pass
+
+    def NamedParams(self, ignore, foo='bar', baz='qux'):
+        pass
+
+    def SpecialArgs(self, *args, **kwargs):
+        pass
+
+
+# This class is used to test stubbing out __init__ of a parent class.
+class ChildExampleClass(ExampleClass):
+    def __init__(self):
+        ExampleClass.__init__(self)
diff --git a/catapult/telemetry/third_party/mox3/mox3/tests/stubout_helper.py b/catapult/telemetry/third_party/mox3/mox3/tests/stubout_helper.py
new file mode 100644
index 0000000..7a6b266
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/tests/stubout_helper.py
@@ -0,0 +1,20 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+
+def SampleFunction():
+    raise Exception('I should never be called!')
diff --git a/catapult/telemetry/third_party/mox3/mox3/tests/test_mox.py b/catapult/telemetry/third_party/mox3/mox3/tests/test_mox.py
new file mode 100644
index 0000000..48d1ecf
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/tests/test_mox.py
@@ -0,0 +1,2408 @@
+# Unit tests for Mox.
+#
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import io
+import re
+import sys
+
+from mox3 import mox
+from mox3.tests import mox_helper
+
+import six
+import testtools
+
+
+OS_LISTDIR = mox_helper.os.listdir
+
+
+class ExpectedMethodCallsErrorTest(testtools.TestCase):
+    """Test creation and string conversion of ExpectedMethodCallsError."""
+
+    def testAtLeastOneMethod(self):
+        self.assertRaises(ValueError, mox.ExpectedMethodCallsError, [])
+
+    def testOneError(self):
+        method = mox.MockMethod("testMethod", [], False)
+        method(1, 2).AndReturn('output')
+        e = mox.ExpectedMethodCallsError([method])
+        self.assertEqual(
+            "Verify: Expected methods never called:\n"
+            "  0.  testMethod(1, 2) -> 'output'",
+            str(e))
+
+    def testManyErrors(self):
+        method1 = mox.MockMethod("testMethod", [], False)
+        method1(1, 2).AndReturn('output')
+        method2 = mox.MockMethod("testMethod", [], False)
+        method2(a=1, b=2, c="only named")
+        method3 = mox.MockMethod("testMethod2", [], False)
+        method3().AndReturn(44)
+        method4 = mox.MockMethod("testMethod", [], False)
+        method4(1, 2).AndReturn('output')
+        e = mox.ExpectedMethodCallsError([method1, method2, method3, method4])
+        self.assertEqual(
+            "Verify: Expected methods never called:\n"
+            "  0.  testMethod(1, 2) -> 'output'\n"
+            "  1.  testMethod(a=1, b=2, c='only named') -> None\n"
+            "  2.  testMethod2() -> 44\n"
+            "  3.  testMethod(1, 2) -> 'output'",
+            str(e))
+
+
+class OrTest(testtools.TestCase):
+    """Test Or correctly chains Comparators."""
+
+    def testValidOr(self):
+        """Or should be True if either Comparator returns True."""
+        self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == {})
+        self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test')
+        self.assertTrue(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test')
+
+    def testInvalidOr(self):
+        """Or should be False if both Comparators return False."""
+        self.assertFalse(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0)
+
+
+class AndTest(testtools.TestCase):
+    """Test And correctly chains Comparators."""
+
+    def testValidAnd(self):
+        """And should be True if both Comparators return True."""
+        self.assertTrue(mox.And(mox.IsA(str), mox.IsA(str)) == '1')
+
+    def testClauseOneFails(self):
+        """And should be False if the first Comparator returns False."""
+
+        self.assertFalse(mox.And(mox.IsA(dict), mox.IsA(str)) == '1')
+
+    def testAdvancedUsage(self):
+        """And should work with other Comparators.
+
+        Note: this test is reliant on In and ContainsKeyValue.
+        """
+        test_dict = {"mock": "obj", "testing": "isCOOL"}
+        self.assertTrue(mox.And(mox.In("testing"),
+                        mox.ContainsKeyValue("mock", "obj")) == test_dict)
+
+    def testAdvancedUsageFails(self):
+        """Note: this test is reliant on In and ContainsKeyValue."""
+        test_dict = {"mock": "obj", "testing": "isCOOL"}
+        self.assertFalse(mox.And(mox.In("NOTFOUND"),
+                         mox.ContainsKeyValue("mock", "obj")) == test_dict)
+
+
+class FuncTest(testtools.TestCase):
+    """Test Func correctly evaluates based upon true-false return."""
+
+    def testFuncTrueFalseEvaluation(self):
+        """Should return True if the validating function returns True."""
+        equals_one = lambda x: x == 1
+        always_none = lambda x: None
+
+        self.assertTrue(mox.Func(equals_one) == 1)
+        self.assertFalse(mox.Func(equals_one) == 0)
+
+        self.assertFalse(mox.Func(always_none) == 1)
+        self.assertFalse(mox.Func(always_none) == 0)
+        self.assertFalse(mox.Func(always_none) is None)
+
+    def testFuncExceptionPropagation(self):
+        """Exceptions within the validating function should propagate."""
+        class TestException(Exception):
+            pass
+
+        def raiseExceptionOnNotOne(value):
+            if value != 1:
+                raise TestException
+            else:
+                return True
+
+        self.assertTrue(mox.Func(raiseExceptionOnNotOne) == 1)
+        self.assertRaises(
+            TestException, mox.Func(raiseExceptionOnNotOne).__eq__, 2)
+
+
+class SameElementsAsTest(testtools.TestCase):
+    """SameElementsAs correctly identifies sequences with same elements."""
+
+    def testSortedLists(self):
+        """Should return True if two lists are exactly equal."""
+        self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c'])
+
+    def testUnsortedLists(self):
+        """Should return True if lists are unequal but have same elements."""
+        self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1])
+
+    def testUnhashableLists(self):
+        """Should return True if lists have the same unhashable elements."""
+        self.assertTrue(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) ==
+                        [{2: 'b'}, {'a': 1}])
+
+    def testEmptyLists(self):
+        """Should return True for two empty lists."""
+        self.assertTrue(mox.SameElementsAs([]) == [])
+
+    def testUnequalLists(self):
+        """Should return False if the lists are not equal."""
+        self.assertFalse(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c'])
+
+    def testUnequalUnhashableLists(self):
+        """Should return False if lists with unhashable items are unequal."""
+        self.assertFalse(mox.SameElementsAs(
+            [{'a': 1}, {2: 'b'}]) == [{2: 'b'}])
+
+    def testActualIsNotASequence(self):
+        """Should return False if the actual object is not a sequence."""
+        self.assertFalse(mox.SameElementsAs([1]) == object())
+
+    def testOneUnhashableObjectInActual(self):
+        """Store the entire iterator for a correct comparison.
+
+        In a previous version of SameElementsAs, iteration stopped when an
+        unhashable object was encountered and then was restarted, so the actual
+        list appeared smaller than it was.
+        """
+        self.assertFalse(mox.SameElementsAs([1, 2]) == iter([{}, 1, 2]))
+
+
+class ContainsKeyValueTest(testtools.TestCase):
+    """Test ContainsKeyValue correctly identifies key/value pairs in a dict.
+    """
+
+    def testValidPair(self):
+        """Should return True if the key value is in the dict."""
+        self.assertTrue(mox.ContainsKeyValue("key", 1) == {"key": 1})
+
+    def testInvalidValue(self):
+        """Should return False if the value is not correct."""
+        self.assertFalse(mox.ContainsKeyValue("key", 1) == {"key": 2})
+
+    def testInvalidKey(self):
+        """Should return False if they key is not in the dict."""
+        self.assertFalse(mox.ContainsKeyValue("qux", 1) == {"key": 2})
+
+
+class ContainsAttributeValueTest(testtools.TestCase):
+    """Test ContainsAttributeValue identifies properties in an object."""
+
+    def setUp(self):
+        """Create an object to test with."""
+
+        class TestObject(object):
+            key = 1
+
+        super(ContainsAttributeValueTest, self).setUp()
+        self.test_object = TestObject()
+
+    def testValidPair(self):
+        """Return True if the object has the key attribute that matches."""
+        self.assertTrue(mox.ContainsAttributeValue("key", 1)
+                        == self.test_object)
+
+    def testInvalidValue(self):
+        """Should return False if the value is not correct."""
+        self.assertFalse(mox.ContainsKeyValue("key", 2) == self.test_object)
+
+    def testInvalidKey(self):
+        """Should return False if they the object doesn't have the property."""
+        self.assertFalse(mox.ContainsKeyValue("qux", 1) == self.test_object)
+
+
+class InTest(testtools.TestCase):
+    """Test In correctly identifies a key in a list/dict."""
+
+    def testItemInList(self):
+        """Should return True if the item is in the list."""
+        self.assertTrue(mox.In(1) == [1, 2, 3])
+
+    def testKeyInDict(self):
+        """Should return True if the item is a key in a dict."""
+        self.assertTrue(mox.In("test") == {"test": "module"})
+
+    def testItemInTuple(self):
+        """Should return True if the item is in the list."""
+        self.assertTrue(mox.In(1) == (1, 2, 3))
+
+    def testTupleInTupleOfTuples(self):
+        self.assertTrue(mox.In((1, 2, 3)) == ((1, 2, 3), (1, 2)))
+
+    def testItemNotInList(self):
+        self.assertFalse(mox.In(1) == [2, 3])
+
+    def testTupleNotInTupleOfTuples(self):
+        self.assertFalse(mox.In((1, 2)) == ((1, 2, 3), (4, 5)))
+
+
+class NotTest(testtools.TestCase):
+    """Test Not correctly identifies False predicates."""
+
+    def testItemInList(self):
+        """Should return True if the item is NOT in the list."""
+        self.assertTrue(mox.Not(mox.In(42)) == [1, 2, 3])
+
+    def testKeyInDict(self):
+        """Should return True if the item is NOT a key in a dict."""
+        self.assertTrue(mox.Not(mox.In("foo")) == {"key": 42})
+
+    def testInvalidKeyWithNot(self):
+        """Should return False if they key is NOT in the dict."""
+        self.assertTrue(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2})
+
+
+class StrContainsTest(testtools.TestCase):
+    """Test StrContains checks for substring occurrence of a parameter."""
+
+    def testValidSubstringAtStart(self):
+        """Should return True if substring is at the start of the string."""
+        self.assertTrue(mox.StrContains("hello") == "hello world")
+
+    def testValidSubstringInMiddle(self):
+        """Should return True if substring is in the middle of the string."""
+        self.assertTrue(mox.StrContains("lo wo") == "hello world")
+
+    def testValidSubstringAtEnd(self):
+        """Should return True if the substring is at the end of the string."""
+        self.assertTrue(mox.StrContains("ld") == "hello world")
+
+    def testInvaildSubstring(self):
+        """Should return False if the substring is not in the string."""
+        self.assertFalse(mox.StrContains("AAA") == "hello world")
+
+    def testMultipleMatches(self):
+        """Should return True if there are multiple occurances of substring."""
+        self.assertTrue(mox.StrContains("abc") == "ababcabcabcababc")
+
+
+class RegexTest(testtools.TestCase):
+    """Test Regex correctly matches regular expressions."""
+
+    def testIdentifyBadSyntaxDuringInit(self):
+        """The user should know immediately if a regex has bad syntax."""
+        self.assertRaises(re.error, mox.Regex, '(a|b')
+
+    def testPatternInMiddle(self):
+        """Return True if the pattern matches at the middle of the string.
+
+        This ensures that re.search is used (instead of re.find).
+        """
+        self.assertTrue(mox.Regex(r"a\s+b") == "x y z a b c")
+
+    def testNonMatchPattern(self):
+        """Should return False if the pattern does not match the string."""
+        self.assertFalse(mox.Regex(r"a\s+b") == "x y z")
+
+    def testFlagsPassedCorrectly(self):
+        """Should return True as we pass IGNORECASE flag."""
+        self.assertTrue(mox.Regex(r"A", re.IGNORECASE) == "a")
+
+    def testReprWithoutFlags(self):
+        """repr should return the regular expression pattern."""
+        self.assertTrue(
+            repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
+
+    def testReprWithFlags(self):
+        """repr should return the regular expression pattern and flags."""
+        self.assertTrue(repr(mox.Regex(r"a\s+b", flags=4)) ==
+                        "<regular expression 'a\s+b', flags=4>")
+
+
+class IsTest(testtools.TestCase):
+    """Verify Is correctly checks equality based upon identity, not value."""
+
+    class AlwaysComparesTrue(object):
+        def __eq__(self, other):
+            return True
+
+        def __cmp__(self, other):
+            return 0
+
+        def __ne__(self, other):
+            return False
+
+    def testEqualityValid(self):
+        o1 = self.AlwaysComparesTrue()
+        self.assertTrue(mox.Is(o1), o1)
+
+    def testEqualityInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        self.assertTrue(o1 == o2)
+        # but...
+        self.assertFalse(mox.Is(o1) == o2)
+
+    def testInequalityValid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        self.assertTrue(mox.Is(o1) != o2)
+
+    def testInequalityInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        self.assertFalse(mox.Is(o1) != o1)
+
+    def testEqualityInListValid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        isa_list = [mox.Is(o1), mox.Is(o2)]
+        str_list = [o1, o2]
+        self.assertTrue(isa_list == str_list)
+
+    def testEquailtyInListInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        isa_list = [mox.Is(o1), mox.Is(o2)]
+        mixed_list = [o2, o1]
+        self.assertFalse(isa_list == mixed_list)
+
+
+class IsATest(testtools.TestCase):
+    """Verify IsA correctly checks equality based upon class type not value."""
+
+    def testEqualityValid(self):
+        """Verify that == correctly identifies objects of the same type."""
+        self.assertTrue(mox.IsA(str) == 'test')
+
+    def testEqualityInvalid(self):
+        """Verify that == correctly identifies objects of different types."""
+        self.assertFalse(mox.IsA(str) == 10)
+
+    def testInequalityValid(self):
+        """Verify that != identifies objects of different type."""
+        self.assertTrue(mox.IsA(str) != 10)
+
+    def testInequalityInvalid(self):
+        """Verify that != correctly identifies objects of the same type."""
+        self.assertFalse(mox.IsA(str) != "test")
+
+    def testEqualityInListValid(self):
+        """Verify list contents are properly compared."""
+        isa_list = [mox.IsA(str), mox.IsA(str)]
+        str_list = ["abc", "def"]
+        self.assertTrue(isa_list == str_list)
+
+    def testEquailtyInListInvalid(self):
+        """Verify list contents are properly compared."""
+        isa_list = [mox.IsA(str), mox.IsA(str)]
+        mixed_list = ["abc", 123]
+        self.assertFalse(isa_list == mixed_list)
+
+    def testSpecialTypes(self):
+        """Verify that IsA can handle objects like io.StringIO."""
+        isA = mox.IsA(io.StringIO())
+        stringIO = io.StringIO()
+        self.assertTrue(isA == stringIO)
+
+
+class IsAlmostTest(testtools.TestCase):
+    """Verify IsAlmost correctly checks equality of floating point numbers."""
+
+    def testEqualityValid(self):
+        """Verify that == correctly identifies nearly equivalent floats."""
+        self.assertEqual(mox.IsAlmost(1.8999999999), 1.9)
+
+    def testEqualityInvalid(self):
+        """Verify that == correctly identifies non-equivalent floats."""
+        self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
+
+    def testEqualityWithPlaces(self):
+        """Verify that specifying places has the desired effect."""
+        self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
+        self.assertEqual(mox.IsAlmost(1.899, places=2), 1.9)
+
+    def testNonNumericTypes(self):
+        """Verify that IsAlmost handles non-numeric types properly."""
+
+        self.assertNotEqual(mox.IsAlmost(1.8999999999), '1.9')
+        self.assertNotEqual(mox.IsAlmost('1.8999999999'), 1.9)
+        self.assertNotEqual(mox.IsAlmost('1.8999999999'), '1.9')
+
+
+class ValueRememberTest(testtools.TestCase):
+    """Verify comparing argument against remembered value."""
+
+    def testValueEquals(self):
+        """Verify that value will compare to stored value."""
+        value = mox.Value()
+        value.store_value('hello world')
+        self.assertEqual(value, 'hello world')
+
+    def testNoValue(self):
+        """Verify that uninitialized value does not compare to empty values."""
+        value = mox.Value()
+        self.assertNotEqual(value, None)
+        self.assertNotEqual(value, False)
+        self.assertNotEqual(value, 0)
+        self.assertNotEqual(value, '')
+        self.assertNotEqual(value, ())
+        self.assertNotEqual(value, [])
+        self.assertNotEqual(value, {})
+        self.assertNotEqual(value, object())
+        self.assertNotEqual(value, set())
+
+    def testRememberValue(self):
+        """Verify that comparing against remember will store argument."""
+        value = mox.Value()
+        remember = mox.Remember(value)
+        self.assertNotEqual(value, 'hello world')  # value not yet stored.
+        self.assertEqual(remember, 'hello world')  # store value here.
+        self.assertEqual(value, 'hello world')  # compare against stored value.
+
+
+class MockMethodTest(testtools.TestCase):
+    """Test class to verify that the MockMethod class is working correctly."""
+
+    def setUp(self):
+        super(MockMethodTest, self).setUp()
+        self.expected_method = mox.MockMethod(
+            "testMethod", [], False)(['original'])
+        self.mock_method = mox.MockMethod(
+            "testMethod", [self.expected_method], True)
+
+    def testNameAttribute(self):
+        """Should provide a __name__ attribute."""
+        self.assertEqual('testMethod', self.mock_method.__name__)
+
+    def testAndReturnNoneByDefault(self):
+        """Should return None by default."""
+        return_value = self.mock_method(['original'])
+        self.assertTrue(return_value is None)
+
+    def testAndReturnValue(self):
+        """Should return a specificed return value."""
+        expected_return_value = "test"
+        self.expected_method.AndReturn(expected_return_value)
+        return_value = self.mock_method(['original'])
+        self.assertTrue(return_value == expected_return_value)
+
+    def testAndRaiseException(self):
+        """Should raise a specified exception."""
+        class TestException(Exception):
+            pass
+
+        expected_exception = TestException('test exception')
+        self.expected_method.AndRaise(expected_exception)
+        self.assertRaises(TestException, self.mock_method, ['original'])
+
+    def testWithSideEffects(self):
+        """Should call state modifier."""
+        local_list = ['original']
+
+        def modifier(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+
+        self.expected_method.WithSideEffects(modifier).AndReturn(1)
+        self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+
+    def testWithReturningSideEffects(self):
+        """Should call state modifier and propagate its return value."""
+        local_list = ['original']
+        expected_return = 'expected_return'
+
+        def modifier_with_return(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+            return expected_return
+
+        self.expected_method.WithSideEffects(modifier_with_return)
+        actual_return = self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+        self.assertEqual(expected_return, actual_return)
+
+    def testWithReturningSideEffectsWithAndReturn(self):
+        """Should call state modifier and ignore its return value."""
+        local_list = ['original']
+        expected_return = 'expected_return'
+        unexpected_return = 'unexpected_return'
+
+        def modifier_with_return(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+            return unexpected_return
+
+        self.expected_method.WithSideEffects(modifier_with_return).AndReturn(
+            expected_return)
+        actual_return = self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+        self.assertEqual(expected_return, actual_return)
+
+    def testEqualityNoParamsEqual(self):
+        """Methods with the same name and without params should be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityNoParamsNotEqual(self):
+        """Methods with different names without params should not be equal."""
+        expected_method = mox.MockMethod("otherMethod", [], False)
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityParamsEqual(self):
+        """Methods with the same name and parameters should be equal."""
+        params = [1, 2, 3]
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = params
+
+        self.mock_method._params = params
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityParamsNotEqual(self):
+        """Methods with same name and different params should not be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = [1, 2, 3]
+
+        self.mock_method._params = ['a', 'b', 'c']
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityNamedParamsEqual(self):
+        """Methods with the same name and same named params should be equal."""
+        named_params = {"input1": "test", "input2": "params"}
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._named_params = named_params
+
+        self.mock_method._named_params = named_params
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityNamedParamsNotEqual(self):
+        """Methods with same name and diffnamed params should not be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._named_params = {"input1": "test", "input2": "params"}
+
+        self.mock_method._named_params = {
+            "input1": "test2", "input2": "params2"}
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityWrongType(self):
+        """Method should not be equal to an object of a different type."""
+        self.assertNotEqual(self.mock_method, "string?")
+
+    def testObjectEquality(self):
+        """Equality of objects should work without a Comparator"""
+        instA = TestClass()
+        instB = TestClass()
+
+        params = [instA, ]
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = params
+
+        self.mock_method._params = [instB, ]
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testStrConversion(self):
+        method = mox.MockMethod("f", [], False)
+        method(1, 2, "st", n1=8, n2="st2")
+        self.assertEqual(str(method),
+                         ("f(1, 2, 'st', n1=8, n2='st2') -> None"))
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(1, 2, "only positional")
+        self.assertEqual(str(method),
+                         "testMethod(1, 2, 'only positional') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(a=1, b=2, c="only named")
+        self.assertEqual(str(method),
+                         "testMethod(a=1, b=2, c='only named') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method()
+        self.assertEqual(str(method), "testMethod() -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(x="only 1 parameter")
+        self.assertEqual(str(method),
+                         "testMethod(x='only 1 parameter') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method().AndReturn('return_value')
+        self.assertEqual(str(method), "testMethod() -> 'return_value'")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method().AndReturn(('a', {1: 2}))
+        self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})")
+
+
+class MockAnythingTest(testtools.TestCase):
+    """Verify that the MockAnything class works as expected."""
+
+    def setUp(self):
+        super(MockAnythingTest, self).setUp()
+        self.mock_object = mox.MockAnything()
+
+    def testRepr(self):
+        """Calling repr on a MockAnything instance must work."""
+        self.assertEqual('<MockAnything instance>', repr(self.mock_object))
+
+    def testCanMockStr(self):
+        self.mock_object.__str__().AndReturn("foo")
+        self.mock_object._Replay()
+        actual = str(self.mock_object)
+        self.mock_object._Verify()
+        self.assertEqual("foo", actual)
+
+    def testSetupMode(self):
+        """Verify the mock will accept any call."""
+        self.mock_object.NonsenseCall()
+        self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
+
+    def testReplayWithExpectedCall(self):
+        """Verify the mock replays method calls as expected."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                        # start replay mode
+        self.mock_object.ValidCall()                    # make method call
+
+    def testReplayWithUnexpectedCall(self):
+        """Unexpected method calls should raise UnexpectedMethodCallError."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          self.mock_object.OtherValidCall)
+
+    def testVerifyWithCompleteReplay(self):
+        """Verify should not raise an exception for a valid replay."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        self.mock_object.ValidCall()                    # make method call
+        self.mock_object._Verify()
+
+    def testVerifyWithIncompleteReplay(self):
+        """Verify should raise an exception if the replay was not complete."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        # ValidCall() is never made
+        self.assertRaises(
+            mox.ExpectedMethodCallsError, self.mock_object._Verify)
+
+    def testSpecialClassMethod(self):
+        """Verify should not raise exception when special methods are used."""
+        self.mock_object[1].AndReturn(True)
+        self.mock_object._Replay()
+        returned_val = self.mock_object[1]
+        self.assertTrue(returned_val)
+        self.mock_object._Verify()
+
+    def testNonzero(self):
+        """You should be able to use the mock object in an if."""
+        self.mock_object._Replay()
+        if self.mock_object:
+            pass
+
+    def testNotNone(self):
+        """Mock should be comparable to None."""
+        self.mock_object._Replay()
+        if self.mock_object is not None:
+            pass
+
+        if self.mock_object is None:
+            pass
+
+    def testEquals(self):
+        """A mock should be able to compare itself to another object."""
+        self.mock_object._Replay()
+        self.assertEqual(self.mock_object, self.mock_object)
+
+    def testEqualsMockFailure(self):
+        """Verify equals identifies unequal objects."""
+        self.mock_object.SillyCall()
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, mox.MockAnything())
+
+    def testEqualsInstanceFailure(self):
+        """Verify equals identifies that objects are different instances."""
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, TestClass())
+
+    def testNotEquals(self):
+        """Verify not equals works."""
+        self.mock_object._Replay()
+        self.assertFalse(self.mock_object != self.mock_object)
+
+    def testNestedMockCallsRecordedSerially(self):
+        """Test that nested calls work when recorded serially."""
+        self.mock_object.CallInner().AndReturn(1)
+        self.mock_object.CallOuter(1)
+        self.mock_object._Replay()
+
+        self.mock_object.CallOuter(self.mock_object.CallInner())
+
+        self.mock_object._Verify()
+
+    def testNestedMockCallsRecordedNested(self):
+        """Test that nested cals work when recorded in a nested fashion."""
+        self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1))
+        self.mock_object._Replay()
+
+        self.mock_object.CallOuter(self.mock_object.CallInner())
+
+        self.mock_object._Verify()
+
+    def testIsCallable(self):
+        """Test that MockAnything can even mock a simple callable.
+
+        This is handy for "stubbing out" a method in a module with a mock, and
+        verifying that it was called.
+        """
+        self.mock_object().AndReturn('mox0rd')
+        self.mock_object._Replay()
+
+        self.assertEqual('mox0rd', self.mock_object())
+
+        self.mock_object._Verify()
+
+    def testIsReprable(self):
+        """Test that MockAnythings can be repr'd without causing a failure."""
+        self.assertTrue('MockAnything' in repr(self.mock_object))
+
+
+class MethodCheckerTest(testtools.TestCase):
+    """Tests MockMethod's use of MethodChecker method."""
+
+    def testUnboundMethodsRequiresInstance(self):
+        # SKIP TEST IN PYTHON 2.x (Ugly hack for python 2.6)
+        # REASON: semantics for unbound methods has changed only in Python 3
+        #     so this test in earlier versions is invald
+        if sys.version_info < (3, 0):
+            return
+
+        instance = CheckCallTestClass()
+        method = mox.MockMethod('NoParameters', [], False,
+                                CheckCallTestClass.NoParameters)
+
+        self.assertRaises(AttributeError, method)
+        method(instance)
+        self.assertRaises(AttributeError, method, instance, 1)
+
+    def testNoParameters(self):
+        method = mox.MockMethod('NoParameters', [], False,
+                                CheckCallTestClass.NoParameters,
+                                class_to_bind=CheckCallTestClass)
+        method()
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, a=1)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testOneParameter(self):
+        method = mox.MockMethod('OneParameter', [], False,
+                                CheckCallTestClass.OneParameter,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testTwoParameters(self):
+        method = mox.MockMethod('TwoParameters', [], False,
+                                CheckCallTestClass.TwoParameters,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        method(1, 2)
+        method(1, b=2)
+        method(a=1, b=2)
+        method(b=2, a=1)
+        self.assertRaises(AttributeError, method, b=2, c=3)
+        self.assertRaises(AttributeError, method, a=1, b=2, c=3)
+        self.assertRaises(AttributeError, method, 1, 2, 3)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4)
+        self.assertRaises(AttributeError, method, 3, a=1, b=2)
+
+    def testOneDefaultValue(self):
+        method = mox.MockMethod('OneDefaultValue', [], False,
+                                CheckCallTestClass.OneDefaultValue,
+                                class_to_bind=CheckCallTestClass)
+        method()
+        method(1)
+        method(a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testTwoDefaultValues(self):
+        method = mox.MockMethod('TwoDefaultValues', [], False,
+                                CheckCallTestClass.TwoDefaultValues,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, c=3)
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, 1, d=4)
+        self.assertRaises(AttributeError, method, 1, d=4, c=3)
+        method(1, 2)
+        method(a=1, b=2)
+        method(1, 2, 3)
+        method(1, 2, 3, 4)
+        method(1, 2, c=3)
+        method(1, 2, c=3, d=4)
+        method(1, 2, d=4, c=3)
+        method(d=4, c=3, a=1, b=2)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5)
+        self.assertRaises(AttributeError, method, 1, 2, e=9)
+        self.assertRaises(AttributeError, method, a=1, b=2, e=9)
+
+    def testArgs(self):
+        method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, 1)
+        method(1, 2)
+        method(a=1, b=2)
+        method(1, 2, 3)
+        method(1, 2, 3, 4)
+        self.assertRaises(AttributeError, method, 1, 2, a=3)
+        self.assertRaises(AttributeError, method, 1, 2, c=3)
+
+    def testKwargs(self):
+        method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(1, 2)
+        method(a=1, b=2)
+        method(b=2, a=1)
+        self.assertRaises(AttributeError, method, 1, 2, 3)
+        self.assertRaises(AttributeError, method, 1, 2, a=3)
+        method(1, 2, c=3)
+        method(a=1, b=2, c=3)
+        method(c=3, a=1, b=2)
+        method(a=1, b=2, c=3, d=4)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4)
+
+    def testArgsAndKwargs(self):
+        method = mox.MockMethod('ArgsAndKwargs', [], False,
+                                CheckCallTestClass.ArgsAndKwargs,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(1, 2)
+        method(1, 2, 3)
+        method(a=1)
+        method(1, b=2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        method(b=2, a=1)
+        method(c=3, b=2, a=1)
+        method(1, 2, c=3)
+
+
+class CheckCallTestClass(object):
+    def NoParameters(self):
+        pass
+
+    def OneParameter(self, a):
+        pass
+
+    def TwoParameters(self, a, b):
+        pass
+
+    def OneDefaultValue(self, a=1):
+        pass
+
+    def TwoDefaultValues(self, a, b, c=1, d=2):
+        pass
+
+    def Args(self, a, b, *args):
+        pass
+
+    def Kwargs(self, a, b=2, **kwargs):
+        pass
+
+    def ArgsAndKwargs(self, a, *args, **kwargs):
+        pass
+
+
+class MockObjectTest(testtools.TestCase):
+    """Verify that the MockObject class works as exepcted."""
+
+    def setUp(self):
+        super(MockObjectTest, self).setUp()
+        self.mock_object = mox.MockObject(TestClass)
+
+    def testSetupModeWithValidCall(self):
+        """Verify the mock object properly mocks a basic method call."""
+        self.mock_object.ValidCall()
+        self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
+
+    def testSetupModeWithInvalidCall(self):
+        """Rase UnknownMethodCallError for a non-member method call.
+        """
+        # Note: assertRaises does not catch exceptions thrown by MockObject's
+        # __getattr__
+        try:
+            self.mock_object.InvalidCall()
+            self.fail("No exception thrown, expected UnknownMethodCallError")
+        except mox.UnknownMethodCallError:
+            pass
+        except Exception:
+            self.fail("Wrong exception type thrown,"
+                      " expected UnknownMethodCallError")
+
+    def testReplayWithInvalidCall(self):
+        """Rase UnknownMethodCallError for a non-member method call.
+        """
+        self.mock_object.ValidCall()  # setup method call
+        self.mock_object._Replay()  # start replay mode
+        # Note: assertRaises does not catch exceptions thrown by MockObject's
+        # __getattr__
+        try:
+            self.mock_object.InvalidCall()
+            self.fail("No exception thrown, expected UnknownMethodCallError")
+        except mox.UnknownMethodCallError:
+            pass
+        except Exception:
+            self.fail("Wrong exception type thrown,"
+                      " expected UnknownMethodCallError")
+
+    def testIsInstance(self):
+        """Mock should be able to pass as an instance of the mocked class."""
+        self.assertTrue(isinstance(self.mock_object, TestClass))
+
+    def testFindValidMethods(self):
+        """Mock should be able to mock all public methods."""
+        self.assertTrue('ValidCall' in self.mock_object._known_methods)
+        self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
+        self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
+        self.assertTrue('MyStaticMethod' in self.mock_object._known_methods)
+        self.assertTrue('_ProtectedCall' in self.mock_object._known_methods)
+        self.assertTrue('__PrivateCall' not in self.mock_object._known_methods)
+        self.assertTrue(
+            '_TestClass__PrivateCall' in self.mock_object._known_methods)
+
+    def testFindsSuperclassMethods(self):
+        """Mock should be able to mock superclasses methods."""
+        self.mock_object = mox.MockObject(ChildClass)
+        self.assertTrue('ValidCall' in self.mock_object._known_methods)
+        self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
+        self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
+        self.assertTrue('ChildValidCall' in self.mock_object._known_methods)
+
+    def testAccessClassVariables(self):
+        """Class variables should be accessible through the mock."""
+        self.assertTrue('SOME_CLASS_VAR' in self.mock_object._known_vars)
+        self.assertTrue('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars)
+        self.assertEqual('test_value', self.mock_object.SOME_CLASS_VAR)
+
+    def testEquals(self):
+        """A mock should be able to compare itself to another object."""
+        self.mock_object._Replay()
+        self.assertEqual(self.mock_object, self.mock_object)
+
+    def testEqualsMockFailure(self):
+        """Verify equals identifies unequal objects."""
+        self.mock_object.ValidCall()
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, mox.MockObject(TestClass))
+
+    def testEqualsInstanceFailure(self):
+        """Verify equals identifies that objects are different instances."""
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, TestClass())
+
+    def testNotEquals(self):
+        """Verify not equals works."""
+        self.mock_object._Replay()
+        self.assertFalse(self.mock_object != self.mock_object)
+
+    def testMockSetItem_ExpectedSetItem_Success(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        dummy['X'] = 'Y'
+
+        dummy._Verify()
+
+    def testMockSetItem_ExpectedSetItem_NoSuccess(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        # NOT doing dummy['X'] = 'Y'
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockSetItem_ExpectedNoSetItem_Success(self):
+        """Test that __setitem__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        def call():
+            dummy['X'] = 'Y'
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockSetItem_ExpectedNoSetItem_NoSuccess(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Verify()
+
+    def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self):
+        """Test that __setitem__() fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        def call():
+            dummy['wrong'] = 'Y'
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockSetItem_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __init__(self):
+                self.my_dict = {}
+
+            def __setitem__(self, key, value):
+                self.my_dict[key], value
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        dummy[1] = 2
+        dummy._Replay()
+        dummy[1] = 2
+        dummy._Verify()
+
+    def testMockGetItem_ExpectedGetItem_Success(self):
+        """Test that __getitem__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        self.assertEqual(dummy['X'], 'value')
+
+        dummy._Verify()
+
+    def testMockGetItem_ExpectedGetItem_NoSuccess(self):
+        """Test that __getitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        # NOT doing dummy['X']
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockGetItem_ExpectedNoGetItem_NoSuccess(self):
+        """Test that __getitem__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X']
+
+        dummy._Replay()
+
+        def call():
+            return dummy['X']
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self):
+        """Test that __getitem__() fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        def call():
+            return dummy['wrong']
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockGetItem_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __getitem__(self, key):
+                return {1: '1', 2: '2'}[key]
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        dummy[1].AndReturn('3')
+
+        dummy._Replay()
+        self.assertEqual('3', dummy.__getitem__(1))
+        dummy._Verify()
+
+    def testMockIter_ExpectedIter_Success(self):
+        """Test that __iter__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        iter(dummy).AndReturn(iter(['X', 'Y']))
+
+        dummy._Replay()
+
+        self.assertEqual([x for x in dummy], ['X', 'Y'])
+
+        dummy._Verify()
+
+    def testMockContains_ExpectedContains_Success(self):
+        """Test that __contains__ gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn(True)
+
+        dummy._Replay()
+
+        self.assertTrue('X' in dummy)
+
+        dummy._Verify()
+
+    def testMockContains_ExpectedContains_NoSuccess(self):
+        """Test that __contains__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn('True')
+
+        dummy._Replay()
+
+        # NOT doing 'X' in dummy
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockContains_ExpectedContains_NonmatchingParameter(self):
+        """Test that __contains__ fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn(True)
+
+        dummy._Replay()
+
+        def call():
+            return 'Y' in dummy
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockIter_ExpectedIter_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        iter(dummy).AndReturn(iter(['X', 'Y']))
+
+        dummy._Replay()
+
+        # NOT doing self.assertEqual([x for x in dummy], ['X', 'Y'])
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockIter_ExpectedNoIter_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing iter(dummy)
+
+        dummy._Replay()
+
+        def call():
+            return [x for x in dummy]
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockIter_ExpectedGetItem_Success(self):
+        """Test that __iter__() gets mocked in Dummy using getitem."""
+        dummy = mox.MockObject(SubscribtableNonIterableClass)
+        dummy[0].AndReturn('a')
+        dummy[1].AndReturn('b')
+        dummy[2].AndRaise(IndexError)
+
+        dummy._Replay()
+        self.assertEqual(['a', 'b'], [x for x in dummy])
+        dummy._Verify()
+
+    def testMockIter_ExpectedNoGetItem_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy using getitem."""
+        dummy = mox.MockObject(SubscribtableNonIterableClass)
+        # NOT doing dummy[index]
+
+        dummy._Replay()
+        function = lambda: [x for x in dummy]
+        self.assertRaises(mox.UnexpectedMethodCallError, function)
+
+    def testMockGetIter_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __iter__(self):
+                return iter([1, 2, 3])
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        iter(dummy).AndReturn(iter(['a', 'b']))
+        dummy._Replay()
+        self.assertEqual(['a', 'b'], [x for x in dummy])
+        dummy._Verify()
+
+    def testInstantiationWithAdditionalAttributes(self):
+        mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"})
+        self.assertEqual(mock_object.attr1, "value")
+
+    def testCantOverrideMethodsWithAttributes(self):
+        self.assertRaises(ValueError, mox.MockObject, TestClass,
+                          attrs={"ValidCall": "value"})
+
+    def testCantMockNonPublicAttributes(self):
+        self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
+                          attrs={"_protected": "value"})
+        self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
+                          attrs={"__private": "value"})
+
+
+class MoxTest(testtools.TestCase):
+    """Verify Mox works correctly."""
+
+    def setUp(self):
+        super(MoxTest, self).setUp()
+        self.mox = mox.Mox()
+
+    def testCreateObject(self):
+        """Mox should create a mock object."""
+        self.mox.CreateMock(TestClass)
+
+    def testVerifyObjectWithCompleteReplay(self):
+        """Mox should replay and verify all objects it created."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall()
+        mock_obj.ValidCallWithArgs(mox.IsA(TestClass))
+        self.mox.ReplayAll()
+        mock_obj.ValidCall()
+        mock_obj.ValidCallWithArgs(TestClass("some_value"))
+        self.mox.VerifyAll()
+
+    def testVerifyObjectWithIncompleteReplay(self):
+        """Mox should raise an exception if a mock didn't replay completely."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall()
+        self.mox.ReplayAll()
+        # ValidCall() is never made
+        self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll)
+
+    def testEntireWorkflow(self):
+        """Test the whole work flow."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall().AndReturn("yes")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj.ValidCall()
+        self.assertEqual("yes", ret_val)
+        self.mox.VerifyAll()
+
+    def testSignatureMatchingWithComparatorAsFirstArg(self):
+        """Test that the first argument can be a comparator."""
+
+        def VerifyLen(val):
+            """This will raise an exception when not given a list.
+
+            This exception will be raised when trying to infer/validate the
+            method signature.
+            """
+            return len(val) != 1
+
+        mock_obj = self.mox.CreateMock(TestClass)
+        # This intentionally does not name the 'nine' param so it triggers
+        # deeper inspection.
+        mock_obj.MethodWithArgs(mox.Func(VerifyLen), mox.IgnoreArg(), None)
+        self.mox.ReplayAll()
+
+        mock_obj.MethodWithArgs([1, 2], "foo", None)
+
+        self.mox.VerifyAll()
+
+    def testCallableObject(self):
+        """Test recording calls to a callable object works."""
+        mock_obj = self.mox.CreateMock(CallableClass)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj("foo")
+        self.assertEqual("qux", ret_val)
+        self.mox.VerifyAll()
+
+    def testInheritedCallableObject(self):
+        """Recording calls to an object inheriting from a callable object."""
+        mock_obj = self.mox.CreateMock(InheritsFromCallable)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj("foo")
+        self.assertEqual("qux", ret_val)
+        self.mox.VerifyAll()
+
+    def testCallOnNonCallableObject(self):
+        """Test that you cannot call a non-callable object."""
+        mock_obj = self.mox.CreateMock("string is not callable")
+        self.assertRaises(TypeError, mock_obj)
+
+    def testCallableObjectWithBadCall(self):
+        """Test verifying calls to a callable object works."""
+        mock_obj = self.mox.CreateMock(CallableClass)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ")
+
+    def testCallableObjectVerifiesSignature(self):
+        mock_obj = self.mox.CreateMock(CallableClass)
+        # Too many arguments
+        self.assertRaises(AttributeError, mock_obj, "foo", "bar")
+
+    def testUnorderedGroup(self):
+        """Test that using one unordered group works."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+
+        self.mox.VerifyAll()
+
+    def testUnorderedGroupsInline(self):
+        """Unordered groups should work in the context of ordered calls."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+    def testMultipleUnorderdGroups(self):
+        """Multiple unoreded groups should work."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Foo().InAnyOrder('group2')
+        mock_obj.Bar().InAnyOrder('group2')
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+        mock_obj.Bar()
+        mock_obj.Foo()
+
+        self.mox.VerifyAll()
+
+    def testMultipleUnorderdGroupsOutOfOrder(self):
+        """Multiple unordered groups should maintain external order"""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Foo().InAnyOrder('group2')
+        mock_obj.Bar().InAnyOrder('group2')
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar)
+
+    def testUnorderedGroupWithReturnValue(self):
+        """Unordered groups should work with return values."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).InAnyOrder().AndReturn(9)
+        mock_obj.Method(2).InAnyOrder().AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_two = mock_obj.Method(2)
+        actual_one = mock_obj.Method(1)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(10, actual_two)
+
+        self.mox.VerifyAll()
+
+    def testUnorderedGroupWithComparator(self):
+        """Unordered groups should work with comparators."""
+
+        def VerifyOne(cmd):
+            if not isinstance(cmd, str):
+                self.fail('Unexpected type passed to comparator: ' + str(cmd))
+            return cmd == 'test'
+
+        def VerifyTwo(cmd):
+            return True
+
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\
+            AndReturn('yes test')
+        mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\
+            AndReturn('anything')
+
+        self.mox.ReplayAll()
+
+        mock_obj.Foo(['test'], 'anything', bar=1)
+        mock_obj.Foo(['test'], 'test', bar=1)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimes(self):
+        """Test if MultipleTimesGroup works."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).AndReturn(10)
+        mock_obj.Method(3).MultipleTimes().AndReturn(42)
+        self.mox.ReplayAll()
+
+        actual_one = mock_obj.Method(1)
+        second_one = mock_obj.Method(1)    # This tests MultipleTimes.
+        actual_two = mock_obj.Method(2)
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Method(3)
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(9, actual_one)
+        # Repeated calls should return same number.
+        self.assertEqual(9, second_one)
+        self.assertEqual(10, actual_two)
+        self.assertEqual(42, actual_three)
+
+    def testMultipleTimesUsingIsAParameter(self):
+        """Test if MultipleTimesGroup works with a IsA parameter."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_one = mock_obj.Method("1")
+        second_one = mock_obj.Method("2")    # This tests MultipleTimes.
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(9, actual_one)
+        # Repeated calls should return same number.
+        self.assertEqual(9, second_one)
+
+    def testMutlipleTimesUsingFunc(self):
+        """Test that the Func is not evaluated more times than necessary.
+
+        If a Func() has side effects, it can cause a passing test to fail.
+        """
+
+        self.counter = 0
+
+        def MyFunc(actual_str):
+            """Increment the counter if actual_str == 'foo'."""
+            if actual_str == 'foo':
+                self.counter += 1
+            return True
+
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(mox.Func(MyFunc)).MultipleTimes()
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method('foo')
+        mock_obj.Method('foo')
+        mock_obj.Method('not-foo')
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(2, self.counter)
+
+    def testMultipleTimesThreeMethods(self):
+        """Test if MultipleTimesGroup works with three or more methods."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).MultipleTimes().AndReturn(8)
+        mock_obj.Method(3).MultipleTimes().AndReturn(7)
+        mock_obj.Method(4).AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(1)
+        actual_two = mock_obj.Method(2)
+        mock_obj.Method(3)
+        actual_one = mock_obj.Method(1)
+        actual_four = mock_obj.Method(4)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(8, actual_two)
+        self.assertEqual(7, actual_three)
+        self.assertEqual(10, actual_four)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimesMissingOne(self):
+        """Test if MultipleTimesGroup fails if one method is missing."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).MultipleTimes().AndReturn(8)
+        mock_obj.Method(3).MultipleTimes().AndReturn(7)
+        mock_obj.Method(4).AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(3)
+        mock_obj.Method(2)
+        mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Method(2)
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4)
+
+    def testMultipleTimesTwoGroups(self):
+        """Test if MultipleTimesGroup works with a group after a
+        MultipleTimesGroup.
+        """
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_one = mock_obj.Method(1)
+        mock_obj.Method(1)
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(42, actual_three)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimesTwoGroupsFailure(self):
+        """Test if MultipleTimesGroup fails with a group after a
+        MultipleTimesGroup.
+        """
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(1)
+        mock_obj.Method(1)
+        mock_obj.Method(3)
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1)
+
+    def testWithSideEffects(self):
+        """Test side effect operations actually modify their target objects."""
+        def modifier(mutable_list):
+            mutable_list[0] = 'mutated'
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.ConfigureInOutParameter(
+            ['original']).WithSideEffects(modifier)
+        mock_obj.WorkWithParameter(['mutated'])
+        self.mox.ReplayAll()
+
+        local_list = ['original']
+        mock_obj.ConfigureInOutParameter(local_list)
+        mock_obj.WorkWithParameter(local_list)
+
+        self.mox.VerifyAll()
+
+    def testWithSideEffectsException(self):
+        """Test side effect operations actually modify their target objects."""
+        class TestException(Exception):
+            pass
+
+        def modifier(mutable_list):
+            mutable_list[0] = 'mutated'
+        mock_obj = self.mox.CreateMockAnything()
+        method = mock_obj.ConfigureInOutParameter(['original'])
+        method.WithSideEffects(modifier).AndRaise(TestException('exception'))
+        mock_obj.WorkWithParameter(['mutated'])
+        self.mox.ReplayAll()
+
+        local_list = ['original']
+        self.assertRaises(TestException,
+                          mock_obj.ConfigureInOutParameter,
+                          local_list)
+        mock_obj.WorkWithParameter(local_list)
+
+        self.mox.VerifyAll()
+
+    def testStubOutMethod(self):
+        """Test that a method is replaced with a MockObject."""
+        test_obj = TestClass()
+        method_type = type(test_obj.OtherValidCall)
+        # Replace OtherValidCall with a mock.
+        self.mox.StubOutWithMock(test_obj, 'OtherValidCall')
+        self.assertTrue(isinstance(test_obj.OtherValidCall, mox.MockObject))
+        self.assertFalse(type(test_obj.OtherValidCall) is method_type)
+
+        test_obj.OtherValidCall().AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = test_obj.OtherValidCall()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+        self.assertTrue(type(test_obj.OtherValidCall) is method_type)
+
+    def testStubOutMethod_Unbound_Comparator(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(mox.IgnoreArg()).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = TestClass.OtherValidCall(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Unbound_Subclass_Comparator(self):
+        self.mox.StubOutWithMock(
+            mox_helper.TestClassFromAnotherModule, 'Value')
+        mox_helper.TestClassFromAnotherModule.Value(
+            mox.IsA(mox_helper.ChildClassFromAnotherModule)).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        instance = mox_helper.ChildClassFromAnotherModule()
+        actual = mox_helper.TestClassFromAnotherModule.Value(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOuMethod_Unbound_WithOptionalParams(self):
+        self.mox = mox.Mox()
+        self.mox.StubOutWithMock(TestClass, 'OptionalArgs')
+        TestClass.OptionalArgs(mox.IgnoreArg(), foo=2)
+        self.mox.ReplayAll()
+
+        t = TestClass()
+        TestClass.OptionalArgs(t, foo=2)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_ActualInstance(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(instance).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = TestClass.OtherValidCall(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Unbound_DifferentInstance(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(instance).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        # This should fail, since the instances are different
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          TestClass.OtherValidCall, "wrong self")
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_NamedUsingPositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_NamedUsingPositional_SomePositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_SpecialArgs(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_SimpleTest(self):
+        t = self.mox.CreateMock(TestClass)
+
+        t.MethodWithArgs(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = t.MethodWithArgs(None, None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Bound_NamedUsingPositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
+        instance = mox_helper.ExampleClass()
+        instance.NamedParams('foo', baz=None)
+        self.mox.ReplayAll()
+
+        instance.NamedParams('foo', baz=None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_NamedUsingPositional_SomePositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
+        instance = mox_helper.ExampleClass()
+        instance.TestMethod(instance, 'one', 'two', 'nine')
+        self.mox.ReplayAll()
+
+        instance.TestMethod(instance, 'one', 'two', 'nine')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_SpecialArgs(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
+        instance = mox_helper.ExampleClass()
+        instance.SpecialArgs(instance, 'foo', None, bar='bar')
+        self.mox.ReplayAll()
+
+        instance.SpecialArgs(instance, 'foo', None, bar='bar')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Func_PropgatesExceptions(self):
+        """Errors in Func comparator should propagate to the calling method."""
+        class TestException(Exception):
+            pass
+
+        def raiseExceptionOnNotOne(value):
+            if value == 1:
+                return True
+            else:
+                raise TestException
+
+        test_obj = TestClass()
+        self.mox.StubOutWithMock(test_obj, 'MethodWithArgs')
+        test_obj.MethodWithArgs(
+            mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
+        test_obj.MethodWithArgs(
+            mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
+        self.mox.ReplayAll()
+
+        self.assertEqual(test_obj.MethodWithArgs('ignored', 1), 1)
+        self.assertRaises(TestException,
+                          test_obj.MethodWithArgs, 'ignored', 2)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOut_SignatureMatching_init_(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, '__init__')
+        mox_helper.ExampleClass.__init__(mox.IgnoreArg())
+        self.mox.ReplayAll()
+
+        # Create an instance of a child class, which calls the parent
+        # __init__
+        mox_helper.ChildExampleClass()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    # FIXME(dhellmann): Skip this test until someone can debug why it
+    # fails on python 3.4.
+
+    @testtools.skipIf(six.PY3, "This test needs to be fixed for python 3")
+    def testStubOutClass_OldStyle(self):
+        """Test a mocked class whose __init__ returns a Mock."""
+        self.mox.StubOutWithMock(mox_helper, 'TestClassFromAnotherModule')
+        self.assertTrue(isinstance(mox_helper.TestClassFromAnotherModule,
+                                   mox.MockObject))
+
+        mock_instance = self.mox.CreateMock(
+            mox_helper.TestClassFromAnotherModule)
+        mox_helper.TestClassFromAnotherModule().AndReturn(mock_instance)
+        mock_instance.Value().AndReturn('mock instance')
+
+        self.mox.ReplayAll()
+
+        a_mock = mox_helper.TestClassFromAnotherModule()
+        actual = a_mock.Value()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('mock instance', actual)
+
+    def testStubOutClass(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        # Instance one
+        mock_one = mox_helper.CallableClass(1, 2)
+        mock_one.Value().AndReturn('mock')
+
+        # Instance two
+        mock_two = mox_helper.CallableClass(8, 9)
+        mock_two('one').AndReturn('called mock')
+
+        self.mox.ReplayAll()
+
+        one = mox_helper.CallableClass(1, 2)
+        actual_one = one.Value()
+
+        two = mox_helper.CallableClass(8, 9)
+        actual_two = two('one')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+        # Verify the correct mocks were returned
+        self.assertEqual(mock_one, one)
+        self.assertEqual(mock_two, two)
+
+        # Verify
+        self.assertEqual('mock', actual_one)
+        self.assertEqual('called mock', actual_two)
+
+    def testStubOutClass_NotAClass(self):
+        self.assertRaises(TypeError, self.mox.StubOutClassWithMocks,
+                          mox_helper, 'MyTestFunction')
+
+    def testStubOutClassNotEnoughCreated(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+        mox_helper.CallableClass(8, 9)
+
+        self.mox.ReplayAll()
+        mox_helper.CallableClass(1, 2)
+
+        self.assertRaises(mox.ExpectedMockCreationError, self.mox.VerifyAll)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassWrongSignature(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        self.assertRaises(AttributeError, mox_helper.CallableClass)
+
+        self.mox.UnsetStubs()
+
+    def testStubOutClassWrongParameters(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+
+        self.mox.ReplayAll()
+
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          mox_helper.CallableClass, 8, 9)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassTooManyCreated(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+
+        self.mox.ReplayAll()
+        mox_helper.CallableClass(1, 2)
+        self.assertRaises(mox.UnexpectedMockCreationError,
+                          mox_helper.CallableClass, 8, 9)
+
+        self.mox.UnsetStubs()
+
+    def testWarnsUserIfMockingMock(self):
+        """Test that user is warned if they try to stub out a MockAnything."""
+        self.mox.StubOutWithMock(TestClass, 'MyStaticMethod')
+        self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass,
+                          'MyStaticMethod')
+
+    def testStubOutFirstClassMethodVerifiesSignature(self):
+        self.mox.StubOutWithMock(mox_helper, 'MyTestFunction')
+
+        # Wrong number of arguments
+        self.assertRaises(AttributeError, mox_helper.MyTestFunction, 1)
+        self.mox.UnsetStubs()
+
+    def _testMethodSignatureVerification(self, stubClass):
+        # If stubClass is true, the test is run against an a stubbed out class,
+        # else the test is run against a stubbed out instance.
+        if stubClass:
+            self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
+            obj = mox_helper.ExampleClass()
+        else:
+            obj = mox_helper.ExampleClass()
+            self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
+        self.assertRaises(AttributeError, obj.TestMethod)
+        self.assertRaises(AttributeError, obj.TestMethod, 1)
+        self.assertRaises(AttributeError, obj.TestMethod, nine=2)
+        obj.TestMethod(1, 2)
+        obj.TestMethod(1, 2, 3)
+        obj.TestMethod(1, 2, nine=3)
+        self.assertRaises(AttributeError, obj.TestMethod, 1, 2, 3, 4)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassMethodVerifiesSignature(self):
+        self._testMethodSignatureVerification(stubClass=True)
+
+    def testStubOutObjectMethodVerifiesSignature(self):
+        self._testMethodSignatureVerification(stubClass=False)
+
+    def testStubOutObject(self):
+        """Test than object is replaced with a Mock."""
+
+        class Foo(object):
+            def __init__(self):
+                self.obj = TestClass()
+
+        foo = Foo()
+        self.mox.StubOutWithMock(foo, "obj")
+        self.assertTrue(isinstance(foo.obj, mox.MockObject))
+        foo.obj.ValidCall()
+        self.mox.ReplayAll()
+
+        foo.obj.ValidCall()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertFalse(isinstance(foo.obj, mox.MockObject))
+
+    def testForgotReplayHelpfulMessage(self):
+        """If there is an AttributeError on a MockMethod, give helpful msg."""
+        foo = self.mox.CreateMockAnything()
+        bar = self.mox.CreateMockAnything()
+        foo.GetBar().AndReturn(bar)
+        bar.ShowMeTheMoney()
+        # Forgot to replay!
+        try:
+            foo.GetBar().ShowMeTheMoney()
+        except AttributeError as e:
+            self.assertEqual(
+                'MockMethod has no attribute "ShowMeTheMoney". '
+                'Did you remember to put your mocks in replay mode?', str(e))
+
+
+class ReplayTest(testtools.TestCase):
+    """Verify Replay works properly."""
+
+    def testReplay(self):
+        """Replay should put objects into replay mode."""
+        mock_obj = mox.MockObject(TestClass)
+        self.assertFalse(mock_obj._replay_mode)
+        mox.Replay(mock_obj)
+        self.assertTrue(mock_obj._replay_mode)
+
+
+class MoxTestBaseTest(testtools.TestCase):
+    """Verify that all tests in class derived from MoxTestBase are wrapped."""
+
+    def setUp(self):
+        super(MoxTestBaseTest, self).setUp()
+        self.mox = mox.Mox()
+        self.addCleanup(self.mox.UnsetStubs)
+        self.test_mox = mox.Mox()
+        self.addCleanup(self.test_mox.UnsetStubs)
+        self.test_stubs = mox.stubout.StubOutForTesting()
+        self.addCleanup(self.test_stubs.UnsetAll)
+        self.addCleanup(self.test_stubs.SmartUnsetAll)
+        self.result = testtools.TestResult()
+
+    def _setUpTestClass(self):
+        """Replacement for setUp in the test class instance.
+
+        Assigns a mox.Mox instance as the mox attribute of the test instance.
+        Replacement Mox instance is under our control before setUp is called
+        in the test class instance.
+        """
+        self.test.mox = self.test_mox
+        self.test.stubs = self.test_stubs
+
+    def _CreateTest(self, test_name):
+        """Create a test from our example mox class.
+
+        The created test instance is assigned to this instances test attribute.
+        """
+        self.test = mox_helper.ExampleMoxTest(test_name)
+        self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass)
+
+    def _VerifySuccess(self):
+        """Run the checks to confirm test method completed successfully."""
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        self.test_mox.UnsetStubs()
+        self.test_mox.VerifyAll()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()    # Needed to call the real VerifyAll() below.
+        self.test_mox.VerifyAll()
+
+    def testSuccess(self):
+        """Successful test method execution test."""
+        self._CreateTest('testSuccess')
+        self._VerifySuccess()
+
+    def testSuccessNoMocks(self):
+        """testSuccess() unsets all the mocks. Vverify they've been unset."""
+        self._CreateTest('testSuccess')
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testStubs(self):
+        """Test that "self.stubs" is provided as is useful."""
+        self._CreateTest('testHasStubs')
+        self._VerifySuccess()
+
+    def testStubsNoMocks(self):
+        """Let testHasStubs() unset the stubs by itself."""
+        self._CreateTest('testHasStubs')
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testExpectedNotCalled(self):
+        """Stubbed out method is not called."""
+        self._CreateTest('testExpectedNotCalled')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Don't stub out VerifyAll - that's what causes the test to fail
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testExpectedNotCalledNoMocks(self):
+        """Let testExpectedNotCalled() unset all the mocks by itself."""
+        self._CreateTest('testExpectedNotCalled')
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testUnexpectedCall(self):
+        """Stubbed out method is called with unexpected arguments."""
+        self._CreateTest('testUnexpectedCall')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Ensure no calls are made to VerifyAll()
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testFailure(self):
+        """Failing assertion in test method."""
+        self._CreateTest('testFailure')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Ensure no calls are made to VerifyAll()
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testMixin(self):
+        """Run test from mix-in test class, ensure it passes."""
+        self._CreateTest('testStat')
+        self._VerifySuccess()
+
+    def testMixinAgain(self):
+        """Run same test as above but from the current test class.
+
+        Ensures metaclass properly wrapped test methods from all base classes.
+        If unsetting of stubs doesn't happen, this will fail.
+        """
+        self._CreateTest('testStatOther')
+        self._VerifySuccess()
+
+
+class VerifyTest(testtools.TestCase):
+    """Verify Verify works properly."""
+
+    def testVerify(self):
+        """Verify should be called for all objects.
+
+        Should throw an exception because the expected behavior did not occur.
+        """
+        mock_obj = mox.MockObject(TestClass)
+        mock_obj.ValidCall()
+        mock_obj._Replay()
+        self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj)
+
+
+class ResetTest(testtools.TestCase):
+    """Verify Reset works properly."""
+
+    def testReset(self):
+        """Should empty all queues and put mocks in record mode."""
+        mock_obj = mox.MockObject(TestClass)
+        mock_obj.ValidCall()
+        self.assertFalse(mock_obj._replay_mode)
+        mock_obj._Replay()
+        self.assertTrue(mock_obj._replay_mode)
+        self.assertEqual(1, len(mock_obj._expected_calls_queue))
+
+        mox.Reset(mock_obj)
+        self.assertFalse(mock_obj._replay_mode)
+        self.assertEqual(0, len(mock_obj._expected_calls_queue))
+
+
+class MyTestCase(testtools.TestCase):
+    """Simulate the use of a fake wrapper around Python's unittest library."""
+
+    def setUp(self):
+        super(MyTestCase, self).setUp()
+        self.critical_variable = 42
+        self.another_critical_variable = 42
+
+    def testMethodOverride(self):
+        """Should be properly overriden in a derived class."""
+        self.assertEqual(42, self.another_critical_variable)
+        self.another_critical_variable += 1
+
+
+class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase):
+    """Test that multiple inheritance can be used with MoxTestBase."""
+
+    def setUp(self):
+        super(MoxTestBaseMultipleInheritanceTest, self).setUp()
+        self.another_critical_variable = 99
+
+    def testMultipleInheritance(self):
+        """Should be able to access members created by all parent setUp()."""
+        self.assertTrue(isinstance(self.mox, mox.Mox))
+        self.assertEqual(42, self.critical_variable)
+
+    def testMethodOverride(self):
+        """Should run before MyTestCase.testMethodOverride."""
+        self.assertEqual(99, self.another_critical_variable)
+        self.another_critical_variable = 42
+        super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride()
+        self.assertEqual(43, self.another_critical_variable)
+
+
+class MoxTestDontMockProperties(MoxTestBaseTest):
+        def testPropertiesArentMocked(self):
+                mock_class = self.mox.CreateMock(ClassWithProperties)
+                self.assertRaises(mox.UnknownMethodCallError,
+                                  lambda: mock_class.prop_attr)
+
+
+class TestClass(object):
+    """This class is used only for testing the mock framework."""
+
+    SOME_CLASS_VAR = "test_value"
+    _PROTECTED_CLASS_VAR = "protected value"
+
+    def __init__(self, ivar=None):
+        self.__ivar = ivar
+
+    def __eq__(self, rhs):
+        return self.__ivar == rhs
+
+    def __ne__(self, rhs):
+        return not self.__eq__(rhs)
+
+    def ValidCall(self):
+        pass
+
+    def MethodWithArgs(self, one, two, nine=None):
+        pass
+
+    def OtherValidCall(self):
+        pass
+
+    def OptionalArgs(self, foo='boom'):
+        pass
+
+    def ValidCallWithArgs(self, *args, **kwargs):
+        pass
+
+    @classmethod
+    def MyClassMethod(cls):
+        pass
+
+    @staticmethod
+    def MyStaticMethod():
+        pass
+
+    def _ProtectedCall(self):
+        pass
+
+    def __PrivateCall(self):
+        pass
+
+    def __DoNotMock(self):
+        pass
+
+    def __getitem__(self, key):
+        """Return the value for key."""
+        return self.d[key]
+
+    def __setitem__(self, key, value):
+        """Set the value for key to value."""
+        self.d[key] = value
+
+    def __contains__(self, key):
+        """Returns True if d contains the key."""
+        return key in self.d
+
+    def __iter__(self):
+        pass
+
+
+class ChildClass(TestClass):
+    """This inherits from TestClass."""
+    def __init__(self):
+        TestClass.__init__(self)
+
+    def ChildValidCall(self):
+        pass
+
+
+class CallableClass(object):
+    """This class is callable, and that should be mockable!"""
+
+    def __init__(self):
+        pass
+
+    def __call__(self, param):
+        return param
+
+
+class ClassWithProperties(object):
+        def setter_attr(self, value):
+                pass
+
+        def getter_attr(self):
+                pass
+
+        prop_attr = property(getter_attr, setter_attr)
+
+
+class SubscribtableNonIterableClass(object):
+    def __getitem__(self, index):
+        raise IndexError
+
+
+class InheritsFromCallable(CallableClass):
+    """This class should be mockable; it inherits from a callable class."""
+
+    pass
+
+
+if __name__ == '__main__':
+    testtools.main()
diff --git a/catapult/telemetry/third_party/mox3/mox3/tests/test_stubout.py b/catapult/telemetry/third_party/mox3/mox3/tests/test_stubout.py
new file mode 100644
index 0000000..4a04170
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/mox3/tests/test_stubout.py
@@ -0,0 +1,49 @@
+# Unit tests for stubout.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import fixtures
+import testtools
+
+from mox3 import mox
+from mox3 import stubout
+from mox3.tests import stubout_helper
+
+
+class StubOutForTestingTest(testtools.TestCase):
+    def setUp(self):
+        super(StubOutForTestingTest, self).setUp()
+        self.mox = mox.Mox()
+        self.useFixture(fixtures.MonkeyPatch(
+            'mox3.tests.stubout_helper.SampleFunction',
+            stubout_helper.SampleFunction))
+
+    def testSmartSetOnModule(self):
+        mock_function = self.mox.CreateMockAnything()
+        mock_function()
+
+        stubber = stubout.StubOutForTesting()
+        stubber.SmartSet(stubout_helper, 'SampleFunction', mock_function)
+
+        self.mox.ReplayAll()
+
+        stubout_helper.SampleFunction()
+
+        self.mox.VerifyAll()
+
+
+if __name__ == '__main__':
+    testtools.main()
diff --git a/catapult/telemetry/third_party/mox3/requirements.txt b/catapult/telemetry/third_party/mox3/requirements.txt
new file mode 100644
index 0000000..d52427f
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr<2.0,>=1.6
+
+fixtures>=1.3.1
diff --git a/catapult/telemetry/third_party/mox3/setup.cfg b/catapult/telemetry/third_party/mox3/setup.cfg
new file mode 100644
index 0000000..4a3de06
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/setup.cfg
@@ -0,0 +1,27 @@
+[metadata]
+name = mox3
+summary = Mock object framework for Python
+description-file =
+    README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifiers =
+    Environment :: OpenStack
+    Programming Language :: Python
+    License :: OSI Approved :: Apache Software License
+    Programming Language :: Python :: 2.6
+    Programming Language :: Python :: 2.7
+    Programming Language :: Python :: 3
+    Operating System :: OS Independent
+    Development Status :: 4 - Beta
+    Intended Audience :: Developers
+    Topic :: Software Development :: Testing
+
+[files]
+packages =
+    mox3
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
diff --git a/catapult/telemetry/third_party/mox3/setup.py b/catapult/telemetry/third_party/mox3/setup.py
new file mode 100644
index 0000000..d8080d0
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/setup.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
+setuptools.setup(
+    setup_requires=['pbr>=1.3'],
+    pbr=True)
diff --git a/catapult/telemetry/third_party/mox3/test-requirements.txt b/catapult/telemetry/third_party/mox3/test-requirements.txt
new file mode 100644
index 0000000..22f6480
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/test-requirements.txt
@@ -0,0 +1,22 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+# this file lists dependencies required for the testing of heat
+
+# Install bounded pep8/pyflakes first, then let flake8 install
+pep8==1.5.7
+pyflakes==0.8.1
+flake8<=2.4.1,>=2.2.4
+
+coverage>=3.6
+discover
+python-subunit>=0.0.18
+testrepository>=0.0.18
+testtools>=1.4.0
+
+six>=1.9.0
+
+# this is required for the docs build jobs
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
+oslosphinx>=2.5.0 # Apache-2.0
+
diff --git a/catapult/telemetry/third_party/mox3/tox.ini b/catapult/telemetry/third_party/mox3/tox.ini
new file mode 100644
index 0000000..eea97fc
--- /dev/null
+++ b/catapult/telemetry/third_party/mox3/tox.ini
@@ -0,0 +1,28 @@
+[tox]
+envlist = py34,py27,pep8
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands =
+  python setup.py testr --slowest --testr-args='{posargs}'
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[testenv:pep8]
+commands = flake8
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:cover]
+setenv = VIRTUAL_ENV={envdir}
+commands =
+  python setup.py testr --coverage
+
+[flake8]
+show-source = true
+builtins = _
+exclude=.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg
diff --git a/catapult/telemetry/third_party/png/README.chromium b/catapult/telemetry/third_party/png/README.chromium
new file mode 100644
index 0000000..d838ece
--- /dev/null
+++ b/catapult/telemetry/third_party/png/README.chromium
@@ -0,0 +1,17 @@
+Name: Pure Python PNG Reader/Writer
+Short Name: pypng
+URL: https://github.com/drj11/pypng/
+Version: 0
+Date: 2009-03-11
+Revision: dd1797c361eafa443878b0915f767b75bd518d3b
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+A png encoder and decoder for python. Used by telemetry to decode screenshots
+captured via the gpuBenchmark.windowSnapshot API, which are returned as
+Base64-encoded PNG files.
+
+Local Modifications:
+None.
\ No newline at end of file
diff --git a/catapult/telemetry/third_party/png/png.py b/catapult/telemetry/third_party/png/png.py
new file mode 100755
index 0000000..b55dd3a
--- /dev/null
+++ b/catapult/telemetry/third_party/png/png.py
@@ -0,0 +1,3857 @@
+#!/usr/bin/env python
+
+# $URL$
+# $Rev$
+
+# png.py - PNG encoder/decoder in pure Python
+#
+# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
+# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
+# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
+#
+# Original concept by Johann C. Rocholl.
+#
+# LICENSE (The MIT License)
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+# Changelog (recent first):
+# 2009-03-11 David: interlaced bit depth < 8 (writing).
+# 2009-03-10 David: interlaced bit depth < 8 (reading).
+# 2009-03-04 David: Flat and Boxed pixel formats.
+# 2009-02-26 David: Palette support (writing).
+# 2009-02-23 David: Bit-depths < 8; better PNM support.
+# 2006-06-17 Nicko: Reworked into a class, faster interlacing.
+# 2006-06-17 Johann: Very simple prototype PNG decoder.
+# 2006-06-17 Nicko: Test suite with various image generators.
+# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support.
+# 2006-06-15 Johann: Scanline iterator interface for large input files.
+# 2006-06-09 Johann: Very simple prototype PNG encoder.
+
+# Incorporated into Bangai-O Development Tools by drj on 2009-02-11 from
+# http://trac.browsershots.org/browser/trunk/pypng/lib/png.py?rev=2885
+
+# Incorporated into pypng by drj on 2009-03-12 from
+# //depot/prj/bangaio/master/code/png.py#67
+
+
+"""
+Pure Python PNG Reader/Writer
+
+This Python module implements support for PNG images (see PNG
+specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
+and writes PNG files with all allowable bit depths (1/2/4/8/16/24/32/48/64
+bits per pixel) and colour combinations: greyscale (1/2/4/8/16 bit); RGB,
+RGBA, LA (greyscale with alpha) with 8/16 bits per channel; colour mapped
+images (1/2/4/8 bit).  Adam7 interlacing is supported for reading and
+writing.  A number of optional chunks can be specified (when writing)
+and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
+
+For help, type ``import png; help(png)`` in your python interpreter.
+
+A good place to start is the :class:`Reader` and :class:`Writer` classes.
+
+Requires Python 2.3.  Limited support is available for Python 2.2, but
+not everything works.  Best with Python 2.4 and higher.  Installation is
+trivial, but see the ``README.txt`` file (with the source distribution)
+for details.
+
+This file can also be used as a command-line utility to convert
+`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the reverse conversion from PNG to
+PNM. The interface is similar to that of the ``pnmtopng`` program from
+Netpbm.  Type ``python png.py --help`` at the shell prompt
+for usage and a list of options.
+
+A note on spelling and terminology
+----------------------------------
+
+Generally British English spelling is used in the documentation.  So
+that's "greyscale" and "colour".  This not only matches the author's
+native language, it's also used by the PNG specification.
+
+The major colour models supported by PNG (and hence by PyPNG) are:
+greyscale, RGB, greyscale--alpha, RGB--alpha.  These are sometimes
+referred to using the abbreviations: L, RGB, LA, RGBA.  In this case
+each letter abbreviates a single channel: *L* is for Luminance or Luma or
+Lightness which is the channel used in greyscale images; *R*, *G*, *B* stand
+for Red, Green, Blue, the components of a colour image; *A* stands for
+Alpha, the opacity channel (used for transparency effects, but higher
+values are more opaque, so it makes sense to call it opacity).
+
+A note on formats
+-----------------
+
+When getting pixel data out of this module (reading) and presenting
+data to this module (writing) there are a number of ways the data could
+be represented as a Python value.  Generally this module uses one of
+three formats called "flat row flat pixel", "boxed row flat pixel", and
+"boxed row boxed pixel".  Basically the concern is whether each pixel
+and each row comes in its own little tuple (box), or not.
+
+Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
+has RGB components:
+
+Boxed row flat pixel::
+
+  list([R,G,B, R,G,B, R,G,B],
+       [R,G,B, R,G,B, R,G,B])
+
+Each row appears as its own list, but the pixels are flattened so that
+three values for one pixel simply follow the three values for the previous
+pixel.  This is the most common format used, because it provides a good
+compromise between space and convenience.  PyPNG regards itself as
+at liberty to replace any sequence type with any sufficiently compatible
+other sequence type; in practice each row is an array (from the array
+module), and the outer list is sometimes an iterator rather than an
+explicit list (so that streaming is possible).
+
+Flat row flat pixel::
+
+  [R,G,B, R,G,B, R,G,B,
+   R,G,B, R,G,B, R,G,B]
+
+The entire image is one single giant sequence of colour values.
+Generally an array will be used (to save space), not a list.
+
+Boxed row boxed pixel::
+
+  list([ (R,G,B), (R,G,B), (R,G,B) ],
+       [ (R,G,B), (R,G,B), (R,G,B) ])
+
+Each row appears in its own list, but each pixel also appears in its own
+tuple.  A serious memory burn in Python.
+
+In all cases the top row comes first, and for each row the pixels are
+ordered from left-to-right.  Within a pixel the values appear in the
+order, R-G-B-A (or L-A for greyscale--alpha).
+
+There is a fourth format, mentioned because it is used internally,
+is close to what lies inside a PNG file itself, and has some support
+from the public API.  This format is called packed.  When packed,
+each row is a sequence of bytes (integers from 0 to 255), just as
+it is before PNG scanline filtering is applied.  When the bit depth
+is 8 this is essentially the same as boxed row flat pixel; when the
+bit depth is less than 8, several pixels are packed into each byte;
+when the bit depth is 16 (the only value more than 8 that is supported
+by the PNG image format) each pixel value is decomposed into 2 bytes
+(and `packed` is a misnomer).  This format is used by the
+:meth:`Writer.write_packed` method.  It isn't usually a convenient
+format, but may be just right if the source data for the PNG image
+comes from something that uses a similar format (for example, 1-bit
+BMPs, or another PNG file).
+
+And now, my famous members
+--------------------------
+"""
+
+# http://www.python.org/doc/2.2.3/whatsnew/node5.html
+from __future__ import generators
+
+__version__ = "$URL$ $Rev$"
+
+from array import array
+try: # See :pyver:old
+    import itertools
+except:
+    pass
+import math
+# http://www.python.org/doc/2.4.4/lib/module-operator.html
+import operator
+import struct
+import sys
+import zlib
+# http://www.python.org/doc/2.4.4/lib/module-warnings.html
+import warnings
+try:
+    import pyximport
+    pyximport.install()
+    import cpngfilters as pngfilters
+except ImportError:
+    pass
+
+
+__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
+
+
+# The PNG signature.
+# http://www.w3.org/TR/PNG/#5PNG-file-signature
+_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
+
+_adam7 = ((0, 0, 8, 8),
+          (4, 0, 8, 8),
+          (0, 4, 4, 8),
+          (2, 0, 4, 4),
+          (0, 2, 2, 4),
+          (1, 0, 2, 2),
+          (0, 1, 1, 2))
+
+def group(s, n):
+    # See
+    # http://www.python.org/doc/2.6/library/functions.html#zip
+    return zip(*[iter(s)]*n)
+
+def isarray(x):
+    """Same as ``isinstance(x, array)`` except on Python 2.2, where it
+    always returns ``False``.  This helps PyPNG work on Python 2.2.
+    """
+
+    try:
+        return isinstance(x, array)
+    except:
+        return False
+
+try:  # see :pyver:old
+    array.tostring
+except:
+    def tostring(row):
+        l = len(row)
+        return struct.pack('%dB' % l, *row)
+else:
+    def tostring(row):
+        """Convert row of bytes to string.  Expects `row` to be an
+        ``array``.
+        """
+        return row.tostring()
+
+# Conditionally convert to bytes.  Works on Python 2 and Python 3.
+try:
+    bytes('', 'ascii')
+    def strtobytes(x): return bytes(x, 'iso8859-1')
+    def bytestostr(x): return str(x, 'iso8859-1')
+except:
+    strtobytes = str
+    bytestostr = str
+
+def interleave_planes(ipixels, apixels, ipsize, apsize):
+    """
+    Interleave (colour) planes, e.g. RGB + A = RGBA.
+
+    Return an array of pixels consisting of the `ipsize` elements of data
+    from each pixel in `ipixels` followed by the `apsize` elements of data
+    from each pixel in `apixels`.  Conventionally `ipixels` and
+    `apixels` are byte arrays so the sizes are bytes, but it actually
+    works with any arrays of the same type.  The returned array is the
+    same type as the input arrays which should be the same type as each other.
+    """
+
+    itotal = len(ipixels)
+    atotal = len(apixels)
+    newtotal = itotal + atotal
+    newpsize = ipsize + apsize
+    # Set up the output buffer
+    # See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
+    out = array(ipixels.typecode)
+    # It's annoying that there is no cheap way to set the array size :-(
+    out.extend(ipixels)
+    out.extend(apixels)
+    # Interleave in the pixel data
+    for i in range(ipsize):
+        out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
+    for i in range(apsize):
+        out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
+    return out
+
+def check_palette(palette):
+    """Check a palette argument (to the :class:`Writer` class) for validity.
+    Returns the palette as a list if okay; raises an exception otherwise.
+    """
+
+    # None is the default and is allowed.
+    if palette is None:
+        return None
+
+    p = list(palette)
+    if not (0 < len(p) <= 256):
+        raise ValueError("a palette must have between 1 and 256 entries")
+    seen_triple = False
+    for i,t in enumerate(p):
+        if len(t) not in (3,4):
+            raise ValueError(
+              "palette entry %d: entries must be 3- or 4-tuples." % i)
+        if len(t) == 3:
+            seen_triple = True
+        if seen_triple and len(t) == 4:
+            raise ValueError(
+              "palette entry %d: all 4-tuples must precede all 3-tuples" % i)
+        for x in t:
+            if int(x) != x or not(0 <= x <= 255):
+                raise ValueError(
+                  "palette entry %d: values must be integer: 0 <= x <= 255" % i)
+    return p
+
+class Error(Exception):
+    prefix = 'Error'
+    def __str__(self):
+        return self.prefix + ': ' + ' '.join(self.args)
+
+class FormatError(Error):
+    """Problem with input file format.  In other words, PNG file does
+    not conform to the specification in some way and is invalid.
+    """
+
+    prefix = 'FormatError'
+
+class ChunkError(FormatError):
+    prefix = 'ChunkError'
+
+
+class Writer:
+    """
+    PNG encoder in pure Python.
+    """
+
+    def __init__(self, width=None, height=None,
+                 size=None,
+                 greyscale=False,
+                 alpha=False,
+                 bitdepth=8,
+                 palette=None,
+                 transparent=None,
+                 background=None,
+                 gamma=None,
+                 compression=None,
+                 interlace=False,
+                 bytes_per_sample=None, # deprecated
+                 planes=None,
+                 colormap=None,
+                 maxval=None,
+                 chunk_limit=2**20):
+        """
+        Create a PNG encoder object.
+
+        Arguments:
+
+        width, height
+          Image size in pixels, as two separate arguments.
+        size
+          Image size (w,h) in pixels, as single argument.
+        greyscale
+          Input data is greyscale, not RGB.
+        alpha
+          Input data has alpha channel (RGBA or LA).
+        bitdepth
+          Bit depth: from 1 to 16.
+        palette
+          Create a palette for a colour mapped image (colour type 3).
+        transparent
+          Specify a transparent colour (create a ``tRNS`` chunk).
+        background
+          Specify a default background colour (create a ``bKGD`` chunk).
+        gamma
+          Specify a gamma value (create a ``gAMA`` chunk).
+        compression
+          zlib compression level: 0 (none) to 9 (more compressed); default: -1 or None.
+        interlace
+          Create an interlaced image.
+        chunk_limit
+          Write multiple ``IDAT`` chunks to save memory.
+
+        The image size (in pixels) can be specified either by using the
+        `width` and `height` arguments, or with the single `size`
+        argument.  If `size` is used it should be a pair (*width*,
+        *height*).
+
+        `greyscale` and `alpha` are booleans that specify whether
+        an image is greyscale (or colour), and whether it has an
+        alpha channel (or not).
+
+        `bitdepth` specifies the bit depth of the source pixel values.
+        Each source pixel value must be an integer between 0 and
+        ``2**bitdepth-1``.  For example, 8-bit images have values
+        between 0 and 255.  PNG only stores images with bit depths of
+        1,2,4,8, or 16.  When `bitdepth` is not one of these values,
+        the next highest valid bit depth is selected, and an ``sBIT``
+        (significant bits) chunk is generated that specifies the original
+        precision of the source image.  In this case the supplied pixel
+        values will be rescaled to fit the range of the selected bit depth.
+
+        The details of which bit depth / colour model combinations the
+        PNG file format supports directly, are somewhat arcane
+        (refer to the PNG specification for full details).  Briefly:
+        "small" bit depths (1,2,4) are only allowed with greyscale and
+        colour mapped images; colour mapped images cannot have bit depth
+        16.
+
+        For colour mapped images (in other words, when the `palette`
+        argument is specified) the `bitdepth` argument must match one of
+        the valid PNG bit depths: 1, 2, 4, or 8.  (It is valid to have a
+        PNG image with a palette and an ``sBIT`` chunk, but the meaning
+        is slightly different; it would be awkward to press the
+        `bitdepth` argument into service for this.)
+
+        The `palette` option, when specified, causes a colour mapped image
+        to be created: the PNG colour type is set to 3; greyscale
+        must not be set; alpha must not be set; transparent must
+        not be set; the bit depth must be 1,2,4, or 8.  When a colour
+        mapped image is created, the pixel values are palette indexes
+        and the `bitdepth` argument specifies the size of these indexes
+        (not the size of the colour values in the palette).
+
+        The palette argument value should be a sequence of 3- or
+        4-tuples.  3-tuples specify RGB palette entries; 4-tuples
+        specify RGBA palette entries.  If both 4-tuples and 3-tuples
+        appear in the sequence then all the 4-tuples must come
+        before all the 3-tuples.  A ``PLTE`` chunk is created; if there
+        are 4-tuples then a ``tRNS`` chunk is created as well.  The
+        ``PLTE`` chunk will contain all the RGB triples in the same
+        sequence; the ``tRNS`` chunk will contain the alpha channel for
+        all the 4-tuples, in the same sequence.  Palette entries
+        are always 8-bit.
+
+        If specified, the `transparent` and `background` parameters must
+        be a tuple with three integer values for red, green, blue, or
+        a simple integer (or singleton tuple) for a greyscale image.
+
+        If specified, the `gamma` parameter must be a positive number
+        (generally, a float).  A ``gAMA`` chunk will be created.  Note that
+        this will not change the values of the pixels as they appear in
+        the PNG file, they are assumed to have already been converted
+        appropriately for the gamma specified.
+
+        The `compression` argument specifies the compression level to
+        be used by the ``zlib`` module.  Values from 1 to 9 specify
+        compression, with 9 being "more compressed" (usually smaller
+        and slower, but it doesn't always work out that way).  0 means
+        no compression.  -1 and ``None`` both mean that the default
+        level of compession will be picked by the ``zlib`` module
+        (which is generally acceptable).
+
+        If `interlace` is true then an interlaced image is created
+        (using PNG's so far only interace method, *Adam7*).  This does not
+        affect how the pixels should be presented to the encoder, rather
+        it changes how they are arranged into the PNG file.  On slow
+        connexions interlaced images can be partially decoded by the
+        browser to give a rough view of the image that is successively
+        refined as more image data appears.
+        
+        .. note ::
+        
+          Enabling the `interlace` option requires the entire image
+          to be processed in working memory.
+
+        `chunk_limit` is used to limit the amount of memory used whilst
+        compressing the image.  In order to avoid using large amounts of
+        memory, multiple ``IDAT`` chunks may be created.
+        """
+
+        # At the moment the `planes` argument is ignored;
+        # its purpose is to act as a dummy so that
+        # ``Writer(x, y, **info)`` works, where `info` is a dictionary
+        # returned by Reader.read and friends.
+        # Ditto for `colormap`.
+
+        # A couple of helper functions come first.  Best skipped if you
+        # are reading through.
+
+        def isinteger(x):
+            try:
+                return int(x) == x
+            except:
+                return False
+
+        def check_color(c, which):
+            """Checks that a colour argument for transparent or
+            background options is the right form.  Also "corrects" bare
+            integers to 1-tuples.
+            """
+
+            if c is None:
+                return c
+            if greyscale:
+                try:
+                    l = len(c)
+                except TypeError:
+                    c = (c,)
+                if len(c) != 1:
+                    raise ValueError("%s for greyscale must be 1-tuple" %
+                        which)
+                if not isinteger(c[0]):
+                    raise ValueError(
+                        "%s colour for greyscale must be integer" %
+                        which)
+            else:
+                if not (len(c) == 3 and
+                        isinteger(c[0]) and
+                        isinteger(c[1]) and
+                        isinteger(c[2])):
+                    raise ValueError(
+                        "%s colour must be a triple of integers" %
+                        which)
+            return c
+
+        if size:
+            if len(size) != 2:
+                raise ValueError(
+                  "size argument should be a pair (width, height)")
+            if width is not None and width != size[0]:
+                raise ValueError(
+                  "size[0] (%r) and width (%r) should match when both are used."
+                    % (size[0], width))
+            if height is not None and height != size[1]:
+                raise ValueError(
+                  "size[1] (%r) and height (%r) should match when both are used."
+                    % (size[1], height))
+            width,height = size
+        del size
+
+        if width <= 0 or height <= 0:
+            raise ValueError("width and height must be greater than zero")
+        if not isinteger(width) or not isinteger(height):
+            raise ValueError("width and height must be integers")
+        # http://www.w3.org/TR/PNG/#7Integers-and-byte-order
+        if width > 2**32-1 or height > 2**32-1:
+            raise ValueError("width and height cannot exceed 2**32-1")
+
+        if alpha and transparent is not None:
+            raise ValueError(
+                "transparent colour not allowed with alpha channel")
+
+        if bytes_per_sample is not None:
+            warnings.warn('please use bitdepth instead of bytes_per_sample',
+                          DeprecationWarning)
+            if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
+                raise ValueError(
+                    "bytes per sample must be .125, .25, .5, 1, or 2")
+            bitdepth = int(8*bytes_per_sample)
+        del bytes_per_sample
+        if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
+            raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
+              bitdepth)
+
+        self.rescale = None
+        if palette:
+            if bitdepth not in (1,2,4,8):
+                raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
+            if transparent is not None:
+                raise ValueError("transparent and palette not compatible")
+            if alpha:
+                raise ValueError("alpha and palette not compatible")
+            if greyscale:
+                raise ValueError("greyscale and palette not compatible")
+        else:
+            # No palette, check for sBIT chunk generation.
+            if alpha or not greyscale:
+                if bitdepth not in (8,16):
+                    targetbitdepth = (8,16)[bitdepth > 8]
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+            else:
+                assert greyscale
+                assert not alpha
+                if bitdepth not in (1,2,4,8,16):
+                    if bitdepth > 8:
+                        targetbitdepth = 16
+                    elif bitdepth == 3:
+                        targetbitdepth = 4
+                    else:
+                        assert bitdepth in (5,6,7)
+                        targetbitdepth = 8
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+
+        if bitdepth < 8 and (alpha or not greyscale and not palette):
+            raise ValueError(
+              "bitdepth < 8 only permitted with greyscale or palette")
+        if bitdepth > 8 and palette:
+            raise ValueError(
+                "bit depth must be 8 or less for images with palette")
+
+        transparent = check_color(transparent, 'transparent')
+        background = check_color(background, 'background')
+
+        # It's important that the true boolean values (greyscale, alpha,
+        # colormap, interlace) are converted to bool because Iverson's
+        # convention is relied upon later on.
+        self.width = width
+        self.height = height
+        self.transparent = transparent
+        self.background = background
+        self.gamma = gamma
+        self.greyscale = bool(greyscale)
+        self.alpha = bool(alpha)
+        self.colormap = bool(palette)
+        self.bitdepth = int(bitdepth)
+        self.compression = compression
+        self.chunk_limit = chunk_limit
+        self.interlace = bool(interlace)
+        self.palette = check_palette(palette)
+
+        self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
+        assert self.color_type in (0,2,3,4,6)
+
+        self.color_planes = (3,1)[self.greyscale or self.colormap]
+        self.planes = self.color_planes + self.alpha
+        # :todo: fix for bitdepth < 8
+        self.psize = (self.bitdepth/8) * self.planes
+
+    def make_palette(self):
+        """Create the byte sequences for a ``PLTE`` and if necessary a
+        ``tRNS`` chunk.  Returned as a pair (*p*, *t*).  *t* will be
+        ``None`` if no ``tRNS`` chunk is necessary.
+        """
+
+        p = array('B')
+        t = array('B')
+
+        for x in self.palette:
+            p.extend(x[0:3])
+            if len(x) > 3:
+                t.append(x[3])
+        p = tostring(p)
+        t = tostring(t)
+        if t:
+            return p,t
+        return p,None
+
+    def write(self, outfile, rows):
+        """Write a PNG image to the output file.  `rows` should be
+        an iterable that yields each row in boxed row flat pixel format.
+        The rows should be the rows of the original image, so there
+        should be ``self.height`` rows of ``self.width * self.planes`` values.
+        If `interlace` is specified (when creating the instance), then
+        an interlaced PNG file will be written.  Supply the rows in the
+        normal image order; the interlacing is carried out internally.
+        
+        .. note ::
+
+          Interlacing will require the entire image to be in working memory.
+        """
+
+        if self.interlace:
+            fmt = 'BH'[self.bitdepth > 8]
+            a = array(fmt, itertools.chain(*rows))
+            return self.write_array(outfile, a)
+        else:
+            nrows = self.write_passes(outfile, rows)
+            if nrows != self.height:
+                raise ValueError(
+                  "rows supplied (%d) does not match height (%d)" %
+                  (nrows, self.height))
+
+    def write_passes(self, outfile, rows, packed=False):
+        """
+        Write a PNG image to the output file.
+
+        Most users are expected to find the :meth:`write` or
+        :meth:`write_array` method more convenient.
+        
+        The rows should be given to this method in the order that
+        they appear in the output file.  For straightlaced images,
+        this is the usual top to bottom ordering, but for interlaced
+        images the rows should have already been interlaced before
+        passing them to this function.
+
+        `rows` should be an iterable that yields each row.  When
+        `packed` is ``False`` the rows should be in boxed row flat pixel
+        format; when `packed` is ``True`` each row should be a packed
+        sequence of bytes.
+
+        """
+
+        # http://www.w3.org/TR/PNG/#5PNG-file-signature
+        outfile.write(_signature)
+
+        # http://www.w3.org/TR/PNG/#11IHDR
+        write_chunk(outfile, 'IHDR',
+                    struct.pack("!2I5B", self.width, self.height,
+                                self.bitdepth, self.color_type,
+                                0, 0, self.interlace))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11gAMA
+        if self.gamma is not None:
+            write_chunk(outfile, 'gAMA',
+                        struct.pack("!L", int(round(self.gamma*1e5))))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11sBIT
+        if self.rescale:
+            write_chunk(outfile, 'sBIT',
+                struct.pack('%dB' % self.planes,
+                            *[self.rescale[0]]*self.planes))
+        
+        # :chunk:order: Without a palette (PLTE chunk), ordering is
+        # relatively relaxed.  With one, gAMA chunk must precede PLTE
+        # chunk which must precede tRNS and bKGD.
+        # See http://www.w3.org/TR/PNG/#5ChunkOrdering
+        if self.palette:
+            p,t = self.make_palette()
+            write_chunk(outfile, 'PLTE', p)
+            if t:
+                # tRNS chunk is optional.  Only needed if palette entries
+                # have alpha.
+                write_chunk(outfile, 'tRNS', t)
+
+        # http://www.w3.org/TR/PNG/#11tRNS
+        if self.transparent is not None:
+            if self.greyscale:
+                write_chunk(outfile, 'tRNS',
+                            struct.pack("!1H", *self.transparent))
+            else:
+                write_chunk(outfile, 'tRNS',
+                            struct.pack("!3H", *self.transparent))
+
+        # http://www.w3.org/TR/PNG/#11bKGD
+        if self.background is not None:
+            if self.greyscale:
+                write_chunk(outfile, 'bKGD',
+                            struct.pack("!1H", *self.background))
+            else:
+                write_chunk(outfile, 'bKGD',
+                            struct.pack("!3H", *self.background))
+
+        # http://www.w3.org/TR/PNG/#11IDAT
+        if self.compression is not None:
+            compressor = zlib.compressobj(self.compression)
+        else:
+            compressor = zlib.compressobj()
+
+        # Choose an extend function based on the bitdepth.  The extend
+        # function packs/decomposes the pixel values into bytes and
+        # stuffs them onto the data array.
+        data = array('B')
+        if self.bitdepth == 8 or packed:
+            extend = data.extend
+        elif self.bitdepth == 16:
+            # Decompose into bytes
+            def extend(sl):
+                fmt = '!%dH' % len(sl)
+                data.extend(array('B', struct.pack(fmt, *sl)))
+        else:
+            # Pack into bytes
+            assert self.bitdepth < 8
+            # samples per byte
+            spb = int(8/self.bitdepth)
+            def extend(sl):
+                a = array('B', sl)
+                # Adding padding bytes so we can group into a whole
+                # number of spb-tuples.
+                l = float(len(a))
+                extra = math.ceil(l / float(spb))*spb - l
+                a.extend([0]*int(extra))
+                # Pack into bytes
+                l = group(a, spb)
+                l = map(lambda e: reduce(lambda x,y:
+                                           (x << self.bitdepth) + y, e), l)
+                data.extend(l)
+        if self.rescale:
+            oldextend = extend
+            factor = \
+              float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
+            def extend(sl):
+                oldextend(map(lambda x: int(round(factor*x)), sl))
+
+        # Build the first row, testing mostly to see if we need to
+        # changed the extend function to cope with NumPy integer types
+        # (they cause our ordinary definition of extend to fail, so we
+        # wrap it).  See
+        # http://code.google.com/p/pypng/issues/detail?id=44
+        enumrows = enumerate(rows)
+        del rows
+
+        # First row's filter type.
+        data.append(0)
+        # :todo: Certain exceptions in the call to ``.next()`` or the
+        # following try would indicate no row data supplied.
+        # Should catch.
+        i,row = enumrows.next()
+        try:
+            # If this fails...
+            extend(row)
+        except:
+            # ... try a version that converts the values to int first.
+            # Not only does this work for the (slightly broken) NumPy
+            # types, there are probably lots of other, unknown, "nearly"
+            # int types it works for.
+            def wrapmapint(f):
+                return lambda sl: f(map(int, sl))
+            extend = wrapmapint(extend)
+            del wrapmapint
+            extend(row)
+
+        for i,row in enumrows:
+            # Add "None" filter type.  Currently, it's essential that
+            # this filter type be used for every scanline as we do not
+            # mark the first row of a reduced pass image; that means we
+            # could accidentally compute the wrong filtered scanline if
+            # we used "up", "average", or "paeth" on such a line.
+            data.append(0)
+            extend(row)
+            if len(data) > self.chunk_limit:
+                compressed = compressor.compress(tostring(data))
+                if len(compressed):
+                    # print >> sys.stderr, len(data), len(compressed)
+                    write_chunk(outfile, 'IDAT', compressed)
+                # Because of our very witty definition of ``extend``,
+                # above, we must re-use the same ``data`` object.  Hence
+                # we use ``del`` to empty this one, rather than create a
+                # fresh one (which would be my natural FP instinct).
+                del data[:]
+        if len(data):
+            compressed = compressor.compress(tostring(data))
+        else:
+            compressed = ''
+        flushed = compressor.flush()
+        if len(compressed) or len(flushed):
+            # print >> sys.stderr, len(data), len(compressed), len(flushed)
+            write_chunk(outfile, 'IDAT', compressed + flushed)
+        # http://www.w3.org/TR/PNG/#11IEND
+        write_chunk(outfile, 'IEND')
+        return i+1
+
+    def write_array(self, outfile, pixels):
+        """
+        Write an array in flat row flat pixel format as a PNG file on
+        the output file.  See also :meth:`write` method.
+        """
+
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def write_packed(self, outfile, rows):
+        """
+        Write PNG file to `outfile`.  The pixel data comes from `rows`
+        which should be in boxed row packed format.  Each row should be
+        a sequence of packed bytes.
+
+        Technically, this method does work for interlaced images but it
+        is best avoided.  For interlaced images, the rows should be
+        presented in the order that they appear in the file.
+
+        This method should not be used when the source image bit depth
+        is not one naturally supported by PNG; the bit depth should be
+        1, 2, 4, 8, or 16.
+        """
+
+        if self.rescale:
+            raise Error("write_packed method not suitable for bit depth %d" %
+              self.rescale[0])
+        return self.write_passes(outfile, rows, packed=True)
+
+    def convert_pnm(self, infile, outfile):
+        """
+        Convert a PNM file containing raw pixel data into a PNG file
+        with the parameters set in the writer object.  Works for
+        (binary) PGM, PPM, and PAM formats.
+        """
+
+        if self.interlace:
+            pixels = array('B')
+            pixels.fromfile(infile,
+                            (self.bitdepth/8) * self.color_planes *
+                            self.width * self.height)
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.file_scanlines(infile))
+
+    def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
+        """
+        Convert a PPM and PGM file containing raw pixel data into a
+        PNG outfile with the parameters set in the writer object.
+        """
+        pixels = array('B')
+        pixels.fromfile(ppmfile,
+                        (self.bitdepth/8) * self.color_planes *
+                        self.width * self.height)
+        apixels = array('B')
+        apixels.fromfile(pgmfile,
+                         (self.bitdepth/8) *
+                         self.width * self.height)
+        pixels = interleave_planes(pixels, apixels,
+                                   (self.bitdepth/8) * self.color_planes,
+                                   (self.bitdepth/8))
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def file_scanlines(self, infile):
+        """
+        Generates boxed rows in flat pixel format, from the input file
+        `infile`.  It assumes that the input file is in a "Netpbm-like"
+        binary format, and is positioned at the beginning of the first
+        pixel.  The number of pixels to read is taken from the image
+        dimensions (`width`, `height`, `planes`) and the number of bytes
+        per value is implied by the image `bitdepth`.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        row_bytes = vpr
+        if self.bitdepth > 8:
+            assert self.bitdepth == 16
+            row_bytes *= 2
+            fmt = '>%dH' % vpr
+            def line():
+                return array('H', struct.unpack(fmt, infile.read(row_bytes)))
+        else:
+            def line():
+                scanline = array('B', infile.read(row_bytes))
+                return scanline
+        for y in range(self.height):
+            yield line()
+
+    def array_scanlines(self, pixels):
+        """
+        Generates boxed rows (flat pixels) from flat rows (flat pixels)
+        in an array.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        stop = 0
+        for y in range(self.height):
+            start = stop
+            stop = start + vpr
+            yield pixels[start:stop]
+
+    def array_scanlines_interlace(self, pixels):
+        """
+        Generator for interlaced scanlines from an array.  `pixels` is
+        the full source image in flat row flat pixel format.  The
+        generator yields each scanline of the reduced passes in turn, in
+        boxed row flat pixel format.
+        """
+
+        # http://www.w3.org/TR/PNG/#8InterlaceMethods
+        # Array type.
+        fmt = 'BH'[self.bitdepth > 8]
+        # Value per row
+        vpr = self.width * self.planes
+        for xstart, ystart, xstep, ystep in _adam7:
+            if xstart >= self.width:
+                continue
+            # Pixels per row (of reduced image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # number of values in reduced image row.
+            row_len = ppr*self.planes
+            for y in range(ystart, self.height, ystep):
+                if xstep == 1:
+                    offset = y * vpr
+                    yield pixels[offset:offset+vpr]
+                else:
+                    row = array(fmt)
+                    # There's no easier way to set the length of an array
+                    row.extend(pixels[0:row_len])
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        row[i::self.planes] = \
+                            pixels[offset+i:end_offset:skip]
+                    yield row
+
+def write_chunk(outfile, tag, data=strtobytes('')):
+    """
+    Write a PNG chunk to the output file, including length and
+    checksum.
+    """
+
+    # http://www.w3.org/TR/PNG/#5Chunk-layout
+    outfile.write(struct.pack("!I", len(data)))
+    tag = strtobytes(tag)
+    outfile.write(tag)
+    outfile.write(data)
+    checksum = zlib.crc32(tag)
+    checksum = zlib.crc32(data, checksum)
+    checksum &= 2**32-1
+    outfile.write(struct.pack("!I", checksum))
+
+def write_chunks(out, chunks):
+    """Create a PNG file by writing out the chunks."""
+
+    out.write(_signature)
+    for chunk in chunks:
+        write_chunk(out, *chunk)
+
+def filter_scanline(type, line, fo, prev=None):
+    """Apply a scanline filter to a scanline.  `type` specifies the
+    filter type (0 to 4); `line` specifies the current (unfiltered)
+    scanline as a sequence of bytes; `prev` specifies the previous
+    (unfiltered) scanline as a sequence of bytes. `fo` specifies the
+    filter offset; normally this is size of a pixel in bytes (the number
+    of bytes per sample times the number of channels), but when this is
+    < 1 (for bit depths < 8) then the filter offset is 1.
+    """
+
+    assert 0 <= type < 5
+
+    # The output array.  Which, pathetically, we extend one-byte at a
+    # time (fortunately this is linear).
+    out = array('B', [type])
+
+    def sub():
+        ai = -fo
+        for x in line:
+            if ai >= 0:
+                x = (x - line[ai]) & 0xff
+            out.append(x)
+            ai += 1
+    def up():
+        for i,x in enumerate(line):
+            x = (x - prev[i]) & 0xff
+            out.append(x)
+    def average():
+        ai = -fo
+        for i,x in enumerate(line):
+            if ai >= 0:
+                x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
+            else:
+                x = (x - (prev[i] >> 1)) & 0xff
+            out.append(x)
+            ai += 1
+    def paeth():
+        # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
+        ai = -fo # also used for ci
+        for i,x in enumerate(line):
+            a = 0
+            b = prev[i]
+            c = 0
+
+            if ai >= 0:
+                a = line[ai]
+                c = prev[ai]
+            p = a + b - c
+            pa = abs(p - a)
+            pb = abs(p - b)
+            pc = abs(p - c)
+            if pa <= pb and pa <= pc: Pr = a
+            elif pb <= pc: Pr = b
+            else: Pr = c
+
+            x = (x - Pr) & 0xff
+            out.append(x)
+            ai += 1
+
+    if not prev:
+        # We're on the first line.  Some of the filters can be reduced
+        # to simpler cases which makes handling the line "off the top"
+        # of the image simpler.  "up" becomes "none"; "paeth" becomes
+        # "left" (non-trivial, but true). "average" needs to be handled
+        # specially.
+        if type == 2: # "up"
+            return line # type = 0
+        elif type == 3:
+            prev = [0]*len(line)
+        elif type == 4: # "paeth"
+            type = 1
+    if type == 0:
+        out.extend(line)
+    elif type == 1:
+        sub()
+    elif type == 2:
+        up()
+    elif type == 3:
+        average()
+    else: # type == 4
+        paeth()
+    return out
+
+
+def from_array(a, mode=None, info={}):
+    """Create a PNG :class:`Image` object from a 2- or 3-dimensional array.
+    One application of this function is easy PIL-style saving:
+    ``png.from_array(pixels, 'L').save('foo.png')``.
+
+    .. note :
+
+      The use of the term *3-dimensional* is for marketing purposes
+      only.  It doesn't actually work.  Please bear with us.  Meanwhile
+      enjoy the complimentary snacks (on request) and please use a
+      2-dimensional array.
+    
+    Unless they are specified using the *info* parameter, the PNG's
+    height and width are taken from the array size.  For a 3 dimensional
+    array the first axis is the height; the second axis is the width;
+    and the third axis is the channel number.  Thus an RGB image that is
+    16 pixels high and 8 wide will use an array that is 16x8x3.  For 2
+    dimensional arrays the first axis is the height, but the second axis
+    is ``width*channels``, so an RGB image that is 16 pixels high and 8
+    wide will use a 2-dimensional array that is 16x24 (each row will be
+    8*3==24 sample values).
+
+    *mode* is a string that specifies the image colour format in a
+    PIL-style mode.  It can be:
+
+    ``'L'``
+      greyscale (1 channel)
+    ``'LA'``
+      greyscale with alpha (2 channel)
+    ``'RGB'``
+      colour image (3 channel)
+    ``'RGBA'``
+      colour image with alpha (4 channel)
+
+    The mode string can also specify the bit depth (overriding how this
+    function normally derives the bit depth, see below).  Appending
+    ``';16'`` to the mode will cause the PNG to be 16 bits per channel;
+    any decimal from 1 to 16 can be used to specify the bit depth.
+
+    When a 2-dimensional array is used *mode* determines how many
+    channels the image has, and so allows the width to be derived from
+    the second array dimension.
+
+    The array is expected to be a ``numpy`` array, but it can be any
+    suitable Python sequence.  For example, a list of lists can be used:
+    ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.  The exact
+    rules are: ``len(a)`` gives the first dimension, height;
+    ``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
+    third dimension, unless an exception is raised in which case a
+    2-dimensional array is assumed.  It's slightly more complicated than
+    that because an iterator of rows can be used, and it all still
+    works.  Using an iterator allows data to be streamed efficiently.
+
+    The bit depth of the PNG is normally taken from the array element's
+    datatype (but if *mode* specifies a bitdepth then that is used
+    instead).  The array element's datatype is determined in a way which
+    is supposed to work both for ``numpy`` arrays and for Python
+    ``array.array`` objects.  A 1 byte datatype will give a bit depth of
+    8, a 2 byte datatype will give a bit depth of 16.  If the datatype
+    does not have an implicit size, for example it is a plain Python
+    list of lists, as above, then a default of 8 is used.
+
+    The *info* parameter is a dictionary that can be used to specify
+    metadata (in the same style as the arguments to the
+    :class:``png.Writer`` class).  For this function the keys that are
+    useful are:
+    
+    height
+      overrides the height derived from the array dimensions and allows
+      *a* to be an iterable.
+    width
+      overrides the width derived from the array dimensions.
+    bitdepth
+      overrides the bit depth derived from the element datatype (but
+      must match *mode* if that also specifies a bit depth).
+
+    Generally anything specified in the
+    *info* dictionary will override any implicit choices that this
+    function would otherwise make, but must match any explicit ones.
+    For example, if the *info* dictionary has a ``greyscale`` key then
+    this must be true when mode is ``'L'`` or ``'LA'`` and false when
+    mode is ``'RGB'`` or ``'RGBA'``.
+    """
+
+    # We abuse the *info* parameter by modifying it.  Take a copy here.
+    # (Also typechecks *info* to some extent).
+    info = dict(info)
+
+    # Syntax check mode string.
+    bitdepth = None
+    try:
+        mode = mode.split(';')
+        if len(mode) not in (1,2):
+            raise Error()
+        if mode[0] not in ('L', 'LA', 'RGB', 'RGBA'):
+            raise Error()
+        if len(mode) == 2:
+            try:
+                bitdepth = int(mode[1])
+            except:
+                raise Error()
+    except Error:
+        raise Error("mode string should be 'RGB' or 'L;16' or similar.")
+    mode = mode[0]
+
+    # Get bitdepth from *mode* if possible.
+    if bitdepth:
+        if info.get('bitdepth') and bitdepth != info['bitdepth']:
+            raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
+              (bitdepth, info['bitdepth']))
+        info['bitdepth'] = bitdepth
+
+    # Fill in and/or check entries in *info*.
+    # Dimensions.
+    if 'size' in info:
+        # Check width, height, size all match where used.
+        for dimension,axis in [('width', 0), ('height', 1)]:
+            if dimension in info:
+                if info[dimension] != info['size'][axis]:
+                    raise Error(
+                      "info[%r] shhould match info['size'][%r]." %
+                      (dimension, axis))
+        info['width'],info['height'] = info['size']
+    if 'height' not in info:
+        try:
+            l = len(a)
+        except:
+            raise Error(
+              "len(a) does not work, supply info['height'] instead.")
+        info['height'] = l
+    # Colour format.
+    if 'greyscale' in info:
+        if bool(info['greyscale']) != ('L' in mode):
+            raise Error("info['greyscale'] should match mode.")
+    info['greyscale'] = 'L' in mode
+    if 'alpha' in info:
+        if bool(info['alpha']) != ('A' in mode):
+            raise Error("info['alpha'] should match mode.")
+    info['alpha'] = 'A' in mode
+
+    planes = len(mode)
+    if 'planes' in info:
+        if info['planes'] != planes:
+            raise Error("info['planes'] should match mode.")
+
+    # In order to work out whether we the array is 2D or 3D we need its
+    # first row, which requires that we take a copy of its iterator.
+    # We may also need the first row to derive width and bitdepth.
+    a,t = itertools.tee(a)
+    row = t.next()
+    del t
+    try:
+        row[0][0]
+        threed = True
+        testelement = row[0]
+    except:
+        threed = False
+        testelement = row
+    if 'width' not in info:
+        if threed:
+            width = len(row)
+        else:
+            width = len(row) // planes
+        info['width'] = width
+
+    # Not implemented yet
+    assert not threed
+
+    if 'bitdepth' not in info:
+        try:
+            dtype = testelement.dtype
+            # goto the "else:" clause.  Sorry.
+        except:
+            try:
+                # Try a Python array.array.
+                bitdepth = 8 * testelement.itemsize
+            except:
+                # We can't determine it from the array element's
+                # datatype, use a default of 8.
+                bitdepth = 8
+        else:
+            # If we got here without exception, we now assume that
+            # the array is a numpy array.
+            if dtype.kind == 'b':
+                bitdepth = 1
+            else:
+                bitdepth = 8 * dtype.itemsize
+        info['bitdepth'] = bitdepth
+
+    for thing in 'width height bitdepth greyscale alpha'.split():
+        assert thing in info
+    return Image(a, info)
+
+# So that refugee's from PIL feel more at home.  Not documented.
+fromarray = from_array
+
+class Image:
+    """A PNG image.
+    You can create an :class:`Image` object from an array of pixels by calling
+    :meth:`png.from_array`.  It can be saved to disk with the
+    :meth:`save` method."""
+    def __init__(self, rows, info):
+        """
+        .. note ::
+        
+          The constructor is not public.  Please do not call it.
+        """
+        
+        self.rows = rows
+        self.info = info
+
+    def save(self, file):
+        """Save the image to *file*.  If *file* looks like an open file
+        descriptor then it is used, otherwise it is treated as a
+        filename and a fresh file is opened.
+
+        In general, you can only call this method once; after it has
+        been called the first time and the PNG image has been saved, the
+        source data will have been streamed, and cannot be streamed
+        again.
+        """
+
+        w = Writer(**self.info)
+
+        try:
+            file.write
+            def close(): pass
+        except:
+            file = open(file, 'wb')
+            def close(): file.close()
+
+        try:
+            w.write(file, self.rows)
+        finally:
+            close()
+
+class _readable:
+    """
+    A simple file-like interface for strings and arrays.
+    """
+
+    def __init__(self, buf):
+        self.buf = buf
+        self.offset = 0
+
+    def read(self, n):
+        r = self.buf[self.offset:self.offset+n]
+        if isarray(r):
+            r = r.tostring()
+        self.offset += n
+        return r
+
+
+class Reader:
+    """
+    PNG decoder in pure Python.
+    """
+
+    def __init__(self, _guess=None, **kw):
+        """
+        Create a PNG decoder object.
+
+        The constructor expects exactly one keyword argument. If you
+        supply a positional argument instead, it will guess the input
+        type. You can choose among the following keyword arguments:
+
+        filename
+          Name of input file (a PNG file).
+        file
+          A file-like object (object with a read() method).
+        bytes
+          ``array`` or ``string`` with PNG data.
+
+        """
+        if ((_guess is not None and len(kw) != 0) or
+            (_guess is None and len(kw) != 1)):
+            raise TypeError("Reader() takes exactly 1 argument")
+
+        # Will be the first 8 bytes, later on.  See validate_signature.
+        self.signature = None
+        self.transparent = None
+        # A pair of (len,type) if a chunk has been read but its data and
+        # checksum have not (in other words the file position is just
+        # past the 4 bytes that specify the chunk type).  See preamble
+        # method for how this is used.
+        self.atchunk = None
+
+        if _guess is not None:
+            if isarray(_guess):
+                kw["bytes"] = _guess
+            elif isinstance(_guess, str):
+                kw["filename"] = _guess
+            elif isinstance(_guess, file):
+                kw["file"] = _guess
+
+        if "filename" in kw:
+            self.file = open(kw["filename"], "rb")
+        elif "file" in kw:
+            self.file = kw["file"]
+        elif "bytes" in kw:
+            self.file = _readable(kw["bytes"])
+        else:
+            raise TypeError("expecting filename, file or bytes array")
+
+
+    def chunk(self, seek=None, lenient=False):
+        """
+        Read the next PNG chunk from the input file; returns a
+        (*type*,*data*) tuple.  *type* is the chunk's type as a string
+        (all PNG chunk types are 4 characters long).  *data* is the
+        chunk's data content, as a string.
+
+        If the optional `seek` argument is
+        specified then it will keep reading chunks until it either runs
+        out of file or finds the type specified by the argument.  Note
+        that in general the order of chunks in PNGs is unspecified, so
+        using `seek` can cause you to miss chunks.
+
+        If the optional `lenient` argument evaluates to True,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        self.validate_signature()
+
+        while True:
+            # http://www.w3.org/TR/PNG/#5Chunk-layout
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+            length,type = self.atchunk
+            self.atchunk = None
+            data = self.file.read(length)
+            if len(data) != length:
+                raise ChunkError('Chunk %s too short for required %i octets.'
+                  % (type, length))
+            checksum = self.file.read(4)
+            if len(checksum) != 4:
+                raise ValueError('Chunk %s too short for checksum.', tag)
+            if seek and type != seek:
+                continue
+            verify = zlib.crc32(strtobytes(type))
+            verify = zlib.crc32(data, verify)
+            # Whether the output from zlib.crc32 is signed or not varies
+            # according to hideous implementation details, see
+            # http://bugs.python.org/issue1202 .
+            # We coerce it to be positive here (in a way which works on
+            # Python 2.3 and older).
+            verify &= 2**32 - 1
+            verify = struct.pack('!I', verify)
+            if checksum != verify:
+                # print repr(checksum)
+                (a, ) = struct.unpack('!I', checksum)
+                (b, ) = struct.unpack('!I', verify)
+                message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
+                if lenient:
+                    warnings.warn(message, RuntimeWarning)
+                else:
+                    raise ChunkError(message)
+            return type, data
+
+    def chunks(self):
+        """Return an iterator that will yield each chunk as a
+        (*chunktype*, *content*) pair.
+        """
+
+        while True:
+            t,v = self.chunk()
+            yield t,v
+            if t == 'IEND':
+                break
+
+    def undo_filter(self, filter_type, scanline, previous):
+        """Undo the filter for a scanline.  `scanline` is a sequence of
+        bytes that does not include the initial filter type byte.
+        `previous` is decoded previous scanline (for straightlaced
+        images this is the previous pixel row, but for interlaced
+        images, it is the previous scanline in the reduced image, which
+        in general is not the previous pixel row in the final image).
+        When there is no previous scanline (the first row of a
+        straightlaced image, or the first row in one of the passes in an
+        interlaced image), then this argument should be ``None``.
+
+        The scanline will have the effects of filtering removed, and the
+        result will be returned as a fresh sequence of bytes.
+        """
+
+        # :todo: Would it be better to update scanline in place?
+        # Yes, with the Cython extension making the undo_filter fast,
+        # updating scanline inplace makes the code 3 times faster
+        # (reading 50 images of 800x800 went from 40s to 16s)
+        result = scanline
+
+        if filter_type == 0:
+            return result
+
+        if filter_type not in (1,2,3,4):
+            raise FormatError('Invalid PNG Filter Type.'
+              '  See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
+
+        # Filter unit.  The stride from one pixel to the corresponding
+        # byte from the previous previous.  Normally this is the pixel
+        # size in bytes, but when this is smaller than 1, the previous
+        # byte is used instead.
+        fu = max(1, self.psize)
+
+        # For the first line of a pass, synthesize a dummy previous
+        # line.  An alternative approach would be to observe that on the
+        # first line 'up' is the same as 'null', 'paeth' is the same
+        # as 'sub', with only 'average' requiring any special case.
+        if not previous:
+            previous = array('B', [0]*len(scanline))
+
+        def sub():
+            """Undo sub filter."""
+
+            ai = 0
+            # Loops starts at index fu.  Observe that the initial part
+            # of the result is already filled in correctly with
+            # scanline.
+            for i in range(fu, len(result)):
+                x = scanline[i]
+                a = result[ai]
+                result[i] = (x + a) & 0xff
+                ai += 1
+
+        def up():
+            """Undo up filter."""
+
+            for i in range(len(result)):
+                x = scanline[i]
+                b = previous[i]
+                result[i] = (x + b) & 0xff
+
+        def average():
+            """Undo average filter."""
+
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = 0
+                else:
+                    a = result[ai]
+                b = previous[i]
+                result[i] = (x + ((a + b) >> 1)) & 0xff
+                ai += 1
+
+        def paeth():
+            """Undo Paeth filter."""
+
+            # Also used for ci.
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = c = 0
+                else:
+                    a = result[ai]
+                    c = previous[ai]
+                b = previous[i]
+                p = a + b - c
+                pa = abs(p - a)
+                pb = abs(p - b)
+                pc = abs(p - c)
+                if pa <= pb and pa <= pc:
+                    pr = a
+                elif pb <= pc:
+                    pr = b
+                else:
+                    pr = c
+                result[i] = (x + pr) & 0xff
+                ai += 1
+
+        # Call appropriate filter algorithm.  Note that 0 has already
+        # been dealt with.
+        (None,
+         pngfilters.undo_filter_sub,
+         pngfilters.undo_filter_up,
+         pngfilters.undo_filter_average,
+         pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
+        return result
+
+    def deinterlace(self, raw):
+        """
+        Read raw pixel data, undo filters, deinterlace, and flatten.
+        Return in flat row flat pixel format.
+        """
+
+        # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," +
+        #     " bpp=%s") % (self.width, self.height, self.planes, self.bps)
+        # Values per row (of the target image)
+        vpr = self.width * self.planes
+
+        # Make a result array, and make it big enough.  Interleaving
+        # writes to the output array randomly (well, not quite), so the
+        # entire output array must be in memory.
+        fmt = 'BH'[self.bitdepth > 8]
+        a = array(fmt, [0]*vpr*self.height)
+        source_offset = 0
+
+        for xstart, ystart, xstep, ystep in _adam7:
+            # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % (
+            #     xstart, ystart, xstep, ystep)
+            if xstart >= self.width:
+                continue
+            # The previous (reconstructed) scanline.  None at the
+            # beginning of a pass to indicate that there is no previous
+            # line.
+            recon = None
+            # Pixels per row (reduced pass image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # Row size in bytes for this pass.
+            row_size = int(math.ceil(self.psize * ppr))
+            for y in range(ystart, self.height, ystep):
+                filter_type = raw[source_offset]
+                source_offset += 1
+                scanline = raw[source_offset:source_offset+row_size]
+                source_offset += row_size
+                recon = self.undo_filter(filter_type, scanline, recon)
+                # Convert so that there is one element per pixel value
+                flat = self.serialtoflat(recon, ppr)
+                if xstep == 1:
+                    assert xstart == 0
+                    offset = y * vpr
+                    a[offset:offset+vpr] = flat
+                else:
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        a[offset+i:end_offset:skip] = \
+                            flat[i::self.planes]
+        return a
+
+    def iterboxed(self, rows):
+        """Iterator that yields each scanline in boxed row flat pixel
+        format.  `rows` should be an iterator that yields the bytes of
+        each row in turn.
+        """
+
+        def asvalues(raw):
+            """Convert a row of raw bytes into a flat row.  Result may
+            or may not share with argument"""
+
+            if self.bitdepth == 8:
+                return raw
+            if self.bitdepth == 16:
+                raw = tostring(raw)
+                return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
+            assert self.bitdepth < 8
+            width = self.width
+            # Samples per byte
+            spb = 8//self.bitdepth
+            out = array('B')
+            mask = 2**self.bitdepth - 1
+            shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
+            for o in raw:
+                out.extend(map(lambda i: mask&(o>>i), shifts))
+            return out[:width]
+
+        return itertools.imap(asvalues, rows)
+
+    def serialtoflat(self, bytes, width=None):
+        """Convert serial format (byte stream) pixel data to flat row
+        flat pixel.
+        """
+
+        if self.bitdepth == 8:
+            return bytes
+        if self.bitdepth == 16:
+            bytes = tostring(bytes)
+            return array('H',
+              struct.unpack('!%dH' % (len(bytes)//2), bytes))
+        assert self.bitdepth < 8
+        if width is None:
+            width = self.width
+        # Samples per byte
+        spb = 8//self.bitdepth
+        out = array('B')
+        mask = 2**self.bitdepth - 1
+        shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
+        l = width
+        for o in bytes:
+            out.extend([(mask&(o>>s)) for s in shifts][:l])
+            l -= spb
+            if l <= 0:
+                l = width
+        return out
+
+    def iterstraight(self, raw):
+        """Iterator that undoes the effect of filtering, and yields each
+        row in serialised format (as a sequence of bytes).  Assumes input
+        is straightlaced.  `raw` should be an iterable that yields the
+        raw bytes in chunks of arbitrary size."""
+
+        # length of row, in bytes
+        rb = self.row_bytes
+        a = array('B')
+        # The previous (reconstructed) scanline.  None indicates first
+        # line of image.
+        recon = None
+        for some in raw:
+            a.extend(some)
+            while len(a) >= rb + 1:
+                filter_type = a[0]
+                scanline = a[1:rb+1]
+                del a[:rb+1]
+                recon = self.undo_filter(filter_type, scanline, recon)
+                yield recon
+        if len(a) != 0:
+            # :file:format We get here with a file format error: when the
+            # available bytes (after decompressing) do not pack into exact
+            # rows.
+            raise FormatError(
+              'Wrong size for decompressed IDAT chunk.')
+        assert len(a) == 0
+
+    def validate_signature(self):
+        """If signature (header) has not been read then read and
+        validate it; otherwise do nothing.
+        """
+
+        if self.signature:
+            return
+        self.signature = self.file.read(8)
+        if self.signature != _signature:
+            raise FormatError("PNG file has invalid signature.")
+
+    def preamble(self, lenient=False):
+        """
+        Extract the image metadata by reading the initial part of the PNG
+        file up to the start of the ``IDAT`` chunk.  All the chunks that
+        precede the ``IDAT`` chunk are read and either processed for
+        metadata or discarded.
+
+        If the optional `lenient` argument evaluates to True,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        self.validate_signature()
+
+        while True:
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+                if self.atchunk is None:
+                    raise FormatError(
+                      'This PNG file has no IDAT chunks.')
+            if self.atchunk[1] == 'IDAT':
+                return
+            self.process_chunk(lenient=lenient)
+
+    def chunklentype(self):
+        """Reads just enough of the input to determine the next
+        chunk's length and type, returned as a (*length*, *type*) pair
+        where *type* is a string.  If there are no more chunks, ``None``
+        is returned.
+        """
+
+        x = self.file.read(8)
+        if not x:
+            return None
+        if len(x) != 8:
+            raise FormatError(
+              'End of file whilst reading chunk length and type.')
+        length,type = struct.unpack('!I4s', x)
+        type = bytestostr(type)
+        if length > 2**31-1:
+            raise FormatError('Chunk %s is too large: %d.' % (type,length))
+        return length,type
+
+    def process_chunk(self, lenient=False):
+        """Process the next chunk and its data.  This only processes the
+        following chunk types, all others are ignored: ``IHDR``,
+        ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
+
+        If the optional `lenient` argument evaluates to True,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        type, data = self.chunk(lenient=lenient)
+        if type == 'IHDR':
+            # http://www.w3.org/TR/PNG/#11IHDR
+            if len(data) != 13:
+                raise FormatError('IHDR chunk has incorrect length.')
+            (self.width, self.height, self.bitdepth, self.color_type,
+             self.compression, self.filter,
+             self.interlace) = struct.unpack("!2I5B", data)
+
+            # Check that the header specifies only valid combinations.
+            if self.bitdepth not in (1,2,4,8,16):
+                raise Error("invalid bit depth %d" % self.bitdepth)
+            if self.color_type not in (0,2,3,4,6):
+                raise Error("invalid colour type %d" % self.color_type)
+            # Check indexed (palettized) images have 8 or fewer bits
+            # per pixel; check only indexed or greyscale images have
+            # fewer than 8 bits per pixel.
+            if ((self.color_type & 1 and self.bitdepth > 8) or
+                (self.bitdepth < 8 and self.color_type not in (0,3))):
+                raise FormatError("Illegal combination of bit depth (%d)"
+                  " and colour type (%d)."
+                  " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
+                  % (self.bitdepth, self.color_type))
+            if self.compression != 0:
+                raise Error("unknown compression method %d" % self.compression)
+            if self.filter != 0:
+                raise FormatError("Unknown filter method %d,"
+                  " see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
+                  % self.filter)
+            if self.interlace not in (0,1):
+                raise FormatError("Unknown interlace method %d,"
+                  " see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
+                  % self.interlace)
+
+            # Derived values
+            # http://www.w3.org/TR/PNG/#6Colour-values
+            colormap =  bool(self.color_type & 1)
+            greyscale = not (self.color_type & 2)
+            alpha = bool(self.color_type & 4)
+            color_planes = (3,1)[greyscale or colormap]
+            planes = color_planes + alpha
+
+            self.colormap = colormap
+            self.greyscale = greyscale
+            self.alpha = alpha
+            self.color_planes = color_planes
+            self.planes = planes
+            self.psize = float(self.bitdepth)/float(8) * planes
+            if int(self.psize) == self.psize:
+                self.psize = int(self.psize)
+            self.row_bytes = int(math.ceil(self.width * self.psize))
+            # Stores PLTE chunk if present, and is used to check
+            # chunk ordering constraints.
+            self.plte = None
+            # Stores tRNS chunk if present, and is used to check chunk
+            # ordering constraints.
+            self.trns = None
+            # Stores sbit chunk if present.
+            self.sbit = None
+        elif type == 'PLTE':
+            # http://www.w3.org/TR/PNG/#11PLTE
+            if self.plte:
+                warnings.warn("Multiple PLTE chunks present.")
+            self.plte = data
+            if len(data) % 3 != 0:
+                raise FormatError(
+                  "PLTE chunk's length should be a multiple of 3.")
+            if len(data) > (2**self.bitdepth)*3:
+                raise FormatError("PLTE chunk is too long.")
+            if len(data) == 0:
+                raise FormatError("Empty PLTE is not allowed.")
+        elif type == 'bKGD':
+            try:
+                if self.colormap:
+                    if not self.plte:
+                        warnings.warn(
+                          "PLTE chunk is required before bKGD chunk.")
+                    self.background = struct.unpack('B', data)
+                else:
+                    self.background = struct.unpack("!%dH" % self.color_planes,
+                      data)
+            except struct.error:
+                raise FormatError("bKGD chunk has incorrect length.")
+        elif type == 'tRNS':
+            # http://www.w3.org/TR/PNG/#11tRNS
+            self.trns = data
+            if self.colormap:
+                if not self.plte:
+                    warnings.warn("PLTE chunk is required before tRNS chunk.")
+                else:
+                    if len(data) > len(self.plte)/3:
+                        # Was warning, but promoted to Error as it
+                        # would otherwise cause pain later on.
+                        raise FormatError("tRNS chunk is too long.")
+            else:
+                if self.alpha:
+                    raise FormatError(
+                      "tRNS chunk is not valid with colour type %d." %
+                      self.color_type)
+                try:
+                    self.transparent = \
+                        struct.unpack("!%dH" % self.color_planes, data)
+                except struct.error:
+                    raise FormatError("tRNS chunk has incorrect length.")
+        elif type == 'gAMA':
+            try:
+                self.gamma = struct.unpack("!L", data)[0] / 100000.0
+            except struct.error:
+                raise FormatError("gAMA chunk has incorrect length.")
+        elif type == 'sBIT':
+            self.sbit = data
+            if (self.colormap and len(data) != 3 or
+                not self.colormap and len(data) != self.planes):
+                raise FormatError("sBIT chunk has incorrect length.")
+
+    def read(self, lenient=False):
+        """
+        Read the PNG file and decode it.  Returns (`width`, `height`,
+        `pixels`, `metadata`).
+
+        May use excessive memory.
+
+        `pixels` are returned in boxed row flat pixel format.
+
+        If the optional `lenient` argument evaluates to True,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        def iteridat():
+            """Iterator that yields all the ``IDAT`` chunks as strings."""
+            while True:
+                try:
+                    type, data = self.chunk(lenient=lenient)
+                except ValueError, e:
+                    raise ChunkError(e.args[0])
+                if type == 'IEND':
+                    # http://www.w3.org/TR/PNG/#11IEND
+                    break
+                if type != 'IDAT':
+                    continue
+                # type == 'IDAT'
+                # http://www.w3.org/TR/PNG/#11IDAT
+                if self.colormap and not self.plte:
+                    warnings.warn("PLTE chunk is required before IDAT chunk")
+                yield data
+
+        def iterdecomp(idat):
+            """Iterator that yields decompressed strings.  `idat` should
+            be an iterator that yields the ``IDAT`` chunk data.
+            """
+
+            # Currently, with no max_length paramter to decompress, this
+            # routine will do one yield per IDAT chunk.  So not very
+            # incremental.
+            d = zlib.decompressobj()
+            # Each IDAT chunk is passed to the decompressor, then any
+            # remaining state is decompressed out.
+            for data in idat:
+                # :todo: add a max_length argument here to limit output
+                # size.
+                yield array('B', d.decompress(data))
+            yield array('B', d.flush())
+
+        self.preamble(lenient=lenient)
+        raw = iterdecomp(iteridat())
+
+        if self.interlace:
+            raw = array('B', itertools.chain(*raw))
+            arraycode = 'BH'[self.bitdepth>8]
+            # Like :meth:`group` but producing an array.array object for
+            # each row.
+            pixels = itertools.imap(lambda *row: array(arraycode, row),
+                       *[iter(self.deinterlace(raw))]*self.width*self.planes)
+        else:
+            pixels = self.iterboxed(self.iterstraight(raw))
+        meta = dict()
+        for attr in 'greyscale alpha planes bitdepth interlace'.split():
+            meta[attr] = getattr(self, attr)
+        meta['size'] = (self.width, self.height)
+        for attr in 'gamma transparent background'.split():
+            a = getattr(self, attr, None)
+            if a is not None:
+                meta[attr] = a
+        if self.plte:
+            meta['palette'] = self.palette()
+        return self.width, self.height, pixels, meta
+
+
+    def read_flat(self):
+        """
+        Read a PNG file and decode it into flat row flat pixel format.
+        Returns (*width*, *height*, *pixels*, *metadata*).
+
+        May use excessive memory.
+
+        `pixels` are returned in flat row flat pixel format.
+
+        See also the :meth:`read` method which returns pixels in the
+        more stream-friendly boxed row flat pixel format.
+        """
+
+        x, y, pixel, meta = self.read()
+        arraycode = 'BH'[meta['bitdepth']>8]
+        pixel = array(arraycode, itertools.chain(*pixel))
+        return x, y, pixel, meta
+
+    def palette(self, alpha='natural'):
+        """Returns a palette that is a sequence of 3-tuples or 4-tuples,
+        synthesizing it from the ``PLTE`` and ``tRNS`` chunks.  These
+        chunks should have already been processed (for example, by
+        calling the :meth:`preamble` method).  All the tuples are the
+        same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
+        there is a ``tRNS`` chunk.  Assumes that the image is colour type
+        3 and therefore a ``PLTE`` chunk is required.
+
+        If the `alpha` argument is ``'force'`` then an alpha channel is
+        always added, forcing the result to be a sequence of 4-tuples.
+        """
+
+        if not self.plte:
+            raise FormatError(
+                "Required PLTE chunk is missing in colour type 3 image.")
+        plte = group(array('B', self.plte), 3)
+        if self.trns or alpha == 'force':
+            trns = array('B', self.trns or '')
+            trns.extend([255]*(len(plte)-len(trns)))
+            plte = map(operator.add, plte, group(trns, 1))
+        return plte
+
+    def asDirect(self):
+        """Returns the image data as a direct representation of an
+        ``x * y * planes`` array.  This method is intended to remove the
+        need for callers to deal with palettes and transparency
+        themselves.  Images with a palette (colour type 3)
+        are converted to RGB or RGBA; images with transparency (a
+        ``tRNS`` chunk) are converted to LA or RGBA as appropriate.
+        When returned in this format the pixel values represent the
+        colour value directly without needing to refer to palettes or
+        transparency information.
+
+        Like the :meth:`read` method this method returns a 4-tuple:
+
+        (*width*, *height*, *pixels*, *meta*)
+
+        This method normally returns pixel values with the bit depth
+        they have in the source image, but when the source PNG has an
+        ``sBIT`` chunk it is inspected and can reduce the bit depth of
+        the result pixels; pixel values will be reduced according to
+        the bit depth specified in the ``sBIT`` chunk (PNG nerds should
+        note a single result bit depth is used for all channels; the
+        maximum of the ones specified in the ``sBIT`` chunk.  An RGB565
+        image will be rescaled to 6-bit RGB666).
+
+        The *meta* dictionary that is returned reflects the `direct`
+        format and not the original source image.  For example, an RGB
+        source image with a ``tRNS`` chunk to represent a transparent
+        colour, will have ``planes=3`` and ``alpha=False`` for the
+        source image, but the *meta* dictionary returned by this method
+        will have ``planes=4`` and ``alpha=True`` because an alpha
+        channel is synthesized and added.
+
+        *pixels* is the pixel data in boxed row flat pixel format (just
+        like the :meth:`read` method).
+
+        All the other aspects of the image data are not changed.
+        """
+
+        self.preamble()
+
+        # Simple case, no conversion necessary.
+        if not self.colormap and not self.trns and not self.sbit:
+            return self.read()
+
+        x,y,pixels,meta = self.read()
+
+        if self.colormap:
+            meta['colormap'] = False
+            meta['alpha'] = bool(self.trns)
+            meta['bitdepth'] = 8
+            meta['planes'] = 3 + bool(self.trns)
+            plte = self.palette()
+            def iterpal(pixels):
+                for row in pixels:
+                    row = map(plte.__getitem__, row)
+                    yield array('B', itertools.chain(*row))
+            pixels = iterpal(pixels)
+        elif self.trns:
+            # It would be nice if there was some reasonable way of doing
+            # this without generating a whole load of intermediate tuples.
+            # But tuples does seem like the easiest way, with no other way
+            # clearly much simpler or much faster.  (Actually, the L to LA
+            # conversion could perhaps go faster (all those 1-tuples!), but
+            # I still wonder whether the code proliferation is worth it)
+            it = self.transparent
+            maxval = 2**meta['bitdepth']-1
+            planes = meta['planes']
+            meta['alpha'] = True
+            meta['planes'] += 1
+            typecode = 'BH'[meta['bitdepth']>8]
+            def itertrns(pixels):
+                for row in pixels:
+                    # For each row we group it into pixels, then form a
+                    # characterisation vector that says whether each pixel
+                    # is opaque or not.  Then we convert True/False to
+                    # 0/maxval (by multiplication), and add it as the extra
+                    # channel.
+                    row = group(row, planes)
+                    opa = map(it.__ne__, row)
+                    opa = map(maxval.__mul__, opa)
+                    opa = zip(opa) # convert to 1-tuples
+                    yield array(typecode,
+                      itertools.chain(*map(operator.add, row, opa)))
+            pixels = itertrns(pixels)
+        targetbitdepth = None
+        if self.sbit:
+            sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
+            targetbitdepth = max(sbit)
+            if targetbitdepth > meta['bitdepth']:
+                raise Error('sBIT chunk %r exceeds bitdepth %d' %
+                    (sbit,self.bitdepth))
+            if min(sbit) <= 0:
+                raise Error('sBIT chunk %r has a 0-entry' % sbit)
+            if targetbitdepth == meta['bitdepth']:
+                targetbitdepth = None
+        if targetbitdepth:
+            shift = meta['bitdepth'] - targetbitdepth
+            meta['bitdepth'] = targetbitdepth
+            def itershift(pixels):
+                for row in pixels:
+                    yield map(shift.__rrshift__, row)
+            pixels = itershift(pixels)
+        return x,y,pixels,meta
+
+    def asFloat(self, maxval=1.0):
+        """Return image pixels as per :meth:`asDirect` method, but scale
+        all pixel values to be floating point values between 0.0 and
+        *maxval*.
+        """
+
+        x,y,pixels,info = self.asDirect()
+        sourcemaxval = 2**info['bitdepth']-1
+        del info['bitdepth']
+        info['maxval'] = float(maxval)
+        factor = float(maxval)/float(sourcemaxval)
+        def iterfloat():
+            for row in pixels:
+                yield map(factor.__mul__, row)
+        return x,y,iterfloat(),info
+
+    def _as_rescale(self, get, targetbitdepth):
+        """Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
+
+        width,height,pixels,meta = get()
+        maxval = 2**meta['bitdepth'] - 1
+        targetmaxval = 2**targetbitdepth - 1
+        factor = float(targetmaxval) / float(maxval)
+        meta['bitdepth'] = targetbitdepth
+        def iterscale():
+            for row in pixels:
+                yield map(lambda x: int(round(x*factor)), row)
+        if maxval == targetmaxval:
+            return width, height, pixels, meta
+        else:
+            return width, height, iterscale(), meta
+
+    def asRGB8(self):
+        """Return the image data as an RGB pixels with 8-bits per
+        sample.  This is like the :meth:`asRGB` method except that
+        this method additionally rescales the values so that they
+        are all between 0 and 255 (8-bit).  In the case where the
+        source image has a bit depth < 8 the transformation preserves
+        all the information; where the source image has bit depth
+        > 8, then rescaling to 8-bit values loses precision.  No
+        dithering is performed.  Like :meth:`asRGB`, an alpha channel
+        in the source image will raise an exception.
+
+        This function returns a 4-tuple:
+        (*width*, *height*, *pixels*, *metadata*).
+        *width*, *height*, *metadata* are as per the :meth:`read` method.
+        
+        *pixels* is the pixel data in boxed row flat pixel format.
+        """
+
+        return self._as_rescale(self.asRGB, 8)
+
+    def asRGBA8(self):
+        """Return the image data as RGBA pixels with 8-bits per
+        sample.  This method is similar to :meth:`asRGB8` and
+        :meth:`asRGBA`:  The result pixels have an alpha channel, *and*
+        values are rescaled to the range 0 to 255.  The alpha channel is
+        synthesized if necessary (with a small speed penalty).
+        """
+
+        return self._as_rescale(self.asRGBA, 8)
+
+    def asRGB(self):
+        """Return image as RGB pixels.  RGB colour images are passed
+        through unchanged; greyscales are expanded into RGB
+        triplets (there is a small speed overhead for doing this).
+
+        An alpha channel in the source image will raise an
+        exception.
+
+        The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha']:
+            raise Error("will not convert image with alpha channel to RGB")
+        if not meta['greyscale']:
+            return width,height,pixels,meta
+        meta['greyscale'] = False
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        def iterrgb():
+            for row in pixels:
+                a = array(typecode, [0]) * 3 * width
+                for i in range(3):
+                    a[i::3] = row
+                yield a
+        return width,height,iterrgb(),meta
+
+    def asRGBA(self):
+        """Return image as RGBA pixels.  Greyscales are expanded into
+        RGB triplets; an alpha channel is synthesized if necessary.
+        The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``, and
+        ``metadata['alpha']`` will be ``True``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha'] and not meta['greyscale']:
+            return width,height,pixels,meta
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        maxval = 2**meta['bitdepth'] - 1
+        maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
+        def newarray():
+            return array(typecode, maxbuffer)
+
+        if meta['alpha'] and meta['greyscale']:
+            # LA to RGBA
+            def convert():
+                for row in pixels:
+                    # Create a fresh target row, then copy L channel
+                    # into first three target channels, and A channel
+                    # into fourth channel.
+                    a = newarray()
+                    pngfilters.convert_la_to_rgba(row, a)
+                    yield a
+        elif meta['greyscale']:
+            # L to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    pngfilters.convert_l_to_rgba(row, a)
+                    yield a
+        else:
+            assert not meta['alpha'] and not meta['greyscale']
+            # RGB to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    pngfilters.convert_rgb_to_rgba(row, a)
+                    yield a
+        meta['alpha'] = True
+        meta['greyscale'] = False
+        return width,height,convert(),meta
+
+
+# === Legacy Version Support ===
+
+# :pyver:old:  PyPNG works on Python versions 2.3 and 2.2, but not
+# without some awkward problems.  Really PyPNG works on Python 2.4 (and
+# above); it works on Pythons 2.3 and 2.2 by virtue of fixing up
+# problems here.  It's a bit ugly (which is why it's hidden down here).
+#
+# Generally the strategy is one of pretending that we're running on
+# Python 2.4 (or above), and patching up the library support on earlier
+# versions so that it looks enough like Python 2.4.  When it comes to
+# Python 2.2 there is one thing we cannot patch: extended slices
+# http://www.python.org/doc/2.3/whatsnew/section-slices.html.
+# Instead we simply declare that features that are implemented using
+# extended slices will not work on Python 2.2.
+#
+# In order to work on Python 2.3 we fix up a recurring annoyance involving
+# the array type.  In Python 2.3 an array cannot be initialised with an
+# array, and it cannot be extended with a list (or other sequence).
+# Both of those are repeated issues in the code.  Whilst I would not
+# normally tolerate this sort of behaviour, here we "shim" a replacement
+# for array into place (and hope no-ones notices).  You never read this.
+#
+# In an amusing case of warty hacks on top of warty hacks... the array
+# shimming we try and do only works on Python 2.3 and above (you can't
+# subclass array.array in Python 2.2).  So to get it working on Python
+# 2.2 we go for something much simpler and (probably) way slower.
+try:
+    array('B').extend([])
+    array('B', array('B'))
+except:
+    # Expect to get here on Python 2.3
+    try:
+        class _array_shim(array):
+            true_array = array
+            def __new__(cls, typecode, init=None):
+                super_new = super(_array_shim, cls).__new__
+                it = super_new(cls, typecode)
+                if init is None:
+                    return it
+                it.extend(init)
+                return it
+            def extend(self, extension):
+                super_extend = super(_array_shim, self).extend
+                if isinstance(extension, self.true_array):
+                    return super_extend(extension)
+                if not isinstance(extension, (list, str)):
+                    # Convert to list.  Allows iterators to work.
+                    extension = list(extension)
+                return super_extend(self.true_array(self.typecode, extension))
+        array = _array_shim
+    except:
+        # Expect to get here on Python 2.2
+        def array(typecode, init=()):
+            if type(init) == str:
+                return map(ord, init)
+            return list(init)
+
+# Further hacks to get it limping along on Python 2.2
+try:
+    enumerate
+except:
+    def enumerate(seq):
+        i=0
+        for x in seq:
+            yield i,x
+            i += 1
+
+try:
+    reversed
+except:
+    def reversed(l):
+        l = list(l)
+        l.reverse()
+        for x in l:
+            yield x
+
+try:
+    itertools
+except:
+    class _dummy_itertools:
+        pass
+    itertools = _dummy_itertools()
+    def _itertools_imap(f, seq):
+        for x in seq:
+            yield f(x)
+    itertools.imap = _itertools_imap
+    def _itertools_chain(*iterables):
+        for it in iterables:
+            for element in it:
+                yield element
+    itertools.chain = _itertools_chain
+
+
+# === Support for users without Cython ===
+
+try:
+    pngfilters
+except:
+    class pngfilters(object):
+        def undo_filter_sub(filter_unit, scanline, previous, result):
+            """Undo sub filter."""
+
+            ai = 0
+            # Loops starts at index fu.  Observe that the initial part
+            # of the result is already filled in correctly with
+            # scanline.
+            for i in range(filter_unit, len(result)):
+                x = scanline[i]
+                a = result[ai]
+                result[i] = (x + a) & 0xff
+                ai += 1
+        undo_filter_sub = staticmethod(undo_filter_sub)
+
+        def undo_filter_up(filter_unit, scanline, previous, result):
+            """Undo up filter."""
+
+            for i in range(len(result)):
+                x = scanline[i]
+                b = previous[i]
+                result[i] = (x + b) & 0xff
+        undo_filter_up = staticmethod(undo_filter_up)
+
+        def undo_filter_average(filter_unit, scanline, previous, result):
+            """Undo up filter."""
+
+            ai = -filter_unit
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = 0
+                else:
+                    a = result[ai]
+                b = previous[i]
+                result[i] = (x + ((a + b) >> 1)) & 0xff
+                ai += 1
+        undo_filter_average = staticmethod(undo_filter_average)
+
+        def undo_filter_paeth(filter_unit, scanline, previous, result):
+            """Undo Paeth filter."""
+
+            # Also used for ci.
+            ai = -filter_unit
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = c = 0
+                else:
+                    a = result[ai]
+                    c = previous[ai]
+                b = previous[i]
+                p = a + b - c
+                pa = abs(p - a)
+                pb = abs(p - b)
+                pc = abs(p - c)
+                if pa <= pb and pa <= pc:
+                    pr = a
+                elif pb <= pc:
+                    pr = b
+                else:
+                    pr = c
+                result[i] = (x + pr) & 0xff
+                ai += 1
+        undo_filter_paeth = staticmethod(undo_filter_paeth)
+
+        def convert_la_to_rgba(row, result):
+            for i in range(3):
+                result[i::4] = row[0::2]
+            result[3::4] = row[1::2]
+        convert_la_to_rgba = staticmethod(convert_la_to_rgba)
+
+        def convert_l_to_rgba(row, result):
+            """Convert a grayscale image to RGBA. This method assumes the alpha
+            channel in result is already correctly initialized."""
+            for i in range(3):
+                result[i::4] = row
+        convert_l_to_rgba = staticmethod(convert_l_to_rgba)
+
+        def convert_rgb_to_rgba(row, result):
+            """Convert an RGB image to RGBA. This method assumes the alpha
+            channel in result is already correctly initialized."""
+            for i in range(3):
+                result[i::4] = row[i::3]
+        convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
+
+
+# === Internal Test Support ===
+
+# This section comprises the tests that are internally validated (as
+# opposed to tests which produce output files that are externally
+# validated).  Primarily they are unittests.
+
+# Note that it is difficult to internally validate the results of
+# writing a PNG file.  The only thing we can do is read it back in
+# again, which merely checks consistency, not that the PNG file we
+# produce is valid.
+
+# Run the tests from the command line:
+# python -c 'import png;png.test()'
+
+# (For an in-memory binary file IO object) We use BytesIO where
+# available, otherwise we use StringIO, but name it BytesIO.
+try:
+    from io import BytesIO
+except:
+    from StringIO import StringIO as BytesIO
+import tempfile
+# http://www.python.org/doc/2.4.4/lib/module-unittest.html
+import unittest
+
+
+def test():
+    unittest.main(__name__)
+
+def topngbytes(name, rows, x, y, **k):
+    """Convenience function for creating a PNG file "in memory" as a
+    string.  Creates a :class:`Writer` instance using the keyword arguments,
+    then passes `rows` to its :meth:`Writer.write` method.  The resulting
+    PNG file is returned as a string.  `name` is used to identify the file for
+    debugging.
+    """
+
+    import os
+
+    print name
+    f = BytesIO()
+    w = Writer(x, y, **k)
+    w.write(f, rows)
+    if os.environ.get('PYPNG_TEST_TMP'):
+        w = open(name, 'wb')
+        w.write(f.getvalue())
+        w.close()
+    return f.getvalue()
+
+def testWithIO(inp, out, f):
+    """Calls the function `f` with ``sys.stdin`` changed to `inp`
+    and ``sys.stdout`` changed to `out`.  They are restored when `f`
+    returns.  This function returns whatever `f` returns.
+    """
+
+    import os
+
+    try:
+        oldin,sys.stdin = sys.stdin,inp
+        oldout,sys.stdout = sys.stdout,out
+        x = f()
+    finally:
+        sys.stdin = oldin
+        sys.stdout = oldout
+    if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'):
+        name = mycallersname()
+        if name:
+            w = open(name+'.png', 'wb')
+            w.write(out.getvalue())
+            w.close()
+    return x
+
+def mycallersname():
+    """Returns the name of the caller of the caller of this function
+    (hence the name of the caller of the function in which
+    "mycallersname()" textually appears).  Returns None if this cannot
+    be determined."""
+
+    # http://docs.python.org/library/inspect.html#the-interpreter-stack
+    import inspect
+
+    frame = inspect.currentframe()
+    if not frame:
+        return None
+    frame_,filename_,lineno_,funname,linelist_,listi_ = (
+      inspect.getouterframes(frame)[2])
+    return funname
+
+def seqtobytes(s):
+    """Convert a sequence of integers to a *bytes* instance.  Good for
+    plastering over Python 2 / Python 3 cracks.
+    """
+
+    return strtobytes(''.join(chr(x) for x in s))
+
+class Test(unittest.TestCase):
+    # This member is used by the superclass.  If we don't define a new
+    # class here then when we use self.assertRaises() and the PyPNG code
+    # raises an assertion then we get no proper traceback.  I can't work
+    # out why, but defining a new class here means we get a proper
+    # traceback.
+    class failureException(Exception):
+        pass
+
+    def helperLN(self, n):
+        mask = (1 << n) - 1
+        # Use small chunk_limit so that multiple chunk writing is
+        # tested.  Making it a test for Issue 20.
+        w = Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99)
+        f = BytesIO()
+        w.write_array(f, array('B', map(mask.__and__, range(1, 256))))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assertEqual(x, 15)
+        self.assertEqual(y, 17)
+        self.assertEqual(list(itertools.chain(*pixels)),
+                         map(mask.__and__, range(1,256)))
+    def testL8(self):
+        return self.helperLN(8)
+    def testL4(self):
+        return self.helperLN(4)
+    def testL2(self):
+        "Also tests asRGB8."
+        w = Writer(1, 4, greyscale=True, bitdepth=2)
+        f = BytesIO()
+        w.write_array(f, array('B', range(4)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGB8()
+        self.assertEqual(x, 1)
+        self.assertEqual(y, 4)
+        for i,row in enumerate(pixels):
+            self.assertEqual(len(row), 3)
+            self.assertEqual(list(row), [0x55*i]*3)
+    def testP2(self):
+        "2-bit palette."
+        a = (255,255,255)
+        b = (200,120,120)
+        c = (50,99,50)
+        w = Writer(1, 4, bitdepth=2, palette=[a,b,c])
+        f = BytesIO()
+        w.write_array(f, array('B', (0,1,1,2)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGB8()
+        self.assertEqual(x, 1)
+        self.assertEqual(y, 4)
+        self.assertEqual(map(list, pixels), map(list, [a, b, b, c]))
+    def testPtrns(self):
+        "Test colour type 3 and tRNS chunk (and 4-bit palette)."
+        a = (50,99,50,50)
+        b = (200,120,120,80)
+        c = (255,255,255)
+        d = (200,120,120)
+        e = (50,99,50)
+        w = Writer(3, 3, bitdepth=4, palette=[a,b,c,d,e])
+        f = BytesIO()
+        w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGBA8()
+        self.assertEqual(x, 3)
+        self.assertEqual(y, 3)
+        c = c+(255,)
+        d = d+(255,)
+        e = e+(255,)
+        boxed = [(e,d,c),(d,c,a),(c,a,b)]
+        flat = map(lambda row: itertools.chain(*row), boxed)
+        self.assertEqual(map(list, pixels), map(list, flat))
+    def testRGBtoRGBA(self):
+        "asRGBA8() on colour type 2 source."""
+        # Test for Issue 26
+        r = Reader(bytes=_pngsuite['basn2c08'])
+        x,y,pixels,meta = r.asRGBA8()
+        # Test the pixels at row 9 columns 0 and 1.
+        row9 = list(pixels)[9]
+        self.assertEqual(list(row9[0:8]),
+                         [0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff])
+    def testLtoRGBA(self):
+        "asRGBA() on grey source."""
+        # Test for Issue 60
+        r = Reader(bytes=_pngsuite['basi0g08'])
+        x,y,pixels,meta = r.asRGBA()
+        row9 = list(list(pixels)[9])
+        self.assertEqual(row9[0:8],
+          [222, 222, 222, 255, 221, 221, 221, 255])
+    def testCtrns(self):
+        "Test colour type 2 and tRNS chunk."
+        # Test for Issue 25
+        r = Reader(bytes=_pngsuite['tbrn2c08'])
+        x,y,pixels,meta = r.asRGBA8()
+        # I just happen to know that the first pixel is transparent.
+        # In particular it should be #7f7f7f00
+        row0 = list(pixels)[0]
+        self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00))
+    def testAdam7read(self):
+        """Adam7 interlace reading.
+        Specifically, test that for images in the PngSuite that
+        have both an interlaced and straightlaced pair that both
+        images from the pair produce the same array of pixels."""
+        for candidate in _pngsuite:
+            if not candidate.startswith('basn'):
+                continue
+            candi = candidate.replace('n', 'i')
+            if candi not in _pngsuite:
+                continue
+            print 'adam7 read', candidate
+            straight = Reader(bytes=_pngsuite[candidate])
+            adam7 = Reader(bytes=_pngsuite[candi])
+            # Just compare the pixels.  Ignore x,y (because they're
+            # likely to be correct?); metadata is ignored because the
+            # "interlace" member differs.  Lame.
+            straight = straight.read()[2]
+            adam7 = adam7.read()[2]
+            self.assertEqual(map(list, straight), map(list, adam7))
+    def testAdam7write(self):
+        """Adam7 interlace writing.
+        For each test image in the PngSuite, write an interlaced
+        and a straightlaced version.  Decode both, and compare results.
+        """
+        # Not such a great test, because the only way we can check what
+        # we have written is to read it back again.
+
+        for name,bytes in _pngsuite.items():
+            # Only certain colour types supported for this test.
+            if name[3:5] not in ['n0', 'n2', 'n4', 'n6']:
+                continue
+            it = Reader(bytes=bytes)
+            x,y,pixels,meta = it.read()
+            pngi = topngbytes('adam7wn'+name+'.png', pixels,
+              x=x, y=y, bitdepth=it.bitdepth,
+              greyscale=it.greyscale, alpha=it.alpha,
+              transparent=it.transparent,
+              interlace=False)
+            x,y,ps,meta = Reader(bytes=pngi).read()
+            it = Reader(bytes=bytes)
+            x,y,pixels,meta = it.read()
+            pngs = topngbytes('adam7wi'+name+'.png', pixels,
+              x=x, y=y, bitdepth=it.bitdepth,
+              greyscale=it.greyscale, alpha=it.alpha,
+              transparent=it.transparent,
+              interlace=True)
+            x,y,pi,meta = Reader(bytes=pngs).read()
+            self.assertEqual(map(list, ps), map(list, pi))
+    def testPGMin(self):
+        """Test that the command line tool can read PGM files."""
+        def do():
+            return _main(['testPGMin'])
+        s = BytesIO()
+        s.write(strtobytes('P5 2 2 3\n'))
+        s.write(strtobytes('\x00\x01\x02\x03'))
+        s.flush()
+        s.seek(0)
+        o = BytesIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assertTrue(r.greyscale)
+        self.assertEqual(r.bitdepth, 2)
+    def testPAMin(self):
+        """Test that the command line tool can read PAM file."""
+        def do():
+            return _main(['testPAMin'])
+        s = BytesIO()
+        s.write(strtobytes('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n'
+                'TUPLTYPE RGB_ALPHA\nENDHDR\n'))
+        # The pixels in flat row flat pixel format
+        flat =  [255,0,0,255, 0,255,0,120, 0,0,255,30]
+        asbytes = seqtobytes(flat)
+        s.write(asbytes)
+        s.flush()
+        s.seek(0)
+        o = BytesIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assertTrue(r.alpha)
+        self.assertTrue(not r.greyscale)
+        self.assertEqual(list(itertools.chain(*pixels)), flat)
+    def testLA4(self):
+        """Create an LA image with bitdepth 4."""
+        bytes = topngbytes('la4.png', [[5, 12]], 1, 1,
+          greyscale=True, alpha=True, bitdepth=4)
+        sbit = Reader(bytes=bytes).chunk('sBIT')[1]
+        self.assertEqual(sbit, strtobytes('\x04\x04'))
+    def testPal(self):
+        """Test that a palette PNG returns the palette in info."""
+        r = Reader(bytes=_pngsuite['basn3p04'])
+        x,y,pixels,info = r.read()
+        self.assertEqual(x, 32)
+        self.assertEqual(y, 32)
+        self.assertTrue('palette' in info)
+    def testPalWrite(self):
+        """Test metadata for paletted PNG can be passed from one PNG
+        to another."""
+        r = Reader(bytes=_pngsuite['basn3p04'])
+        x,y,pixels,info = r.read()
+        w = Writer(**info)
+        o = BytesIO()
+        w.write(o, pixels)
+        o.flush()
+        o.seek(0)
+        r = Reader(file=o)
+        _,_,_,again_info = r.read()
+        # Same palette
+        self.assertEqual(again_info['palette'], info['palette'])
+    def testPalExpand(self):
+        """Test that bitdepth can be used to fiddle with pallete image."""
+        r = Reader(bytes=_pngsuite['basn3p04'])
+        x,y,pixels,info = r.read()
+        pixels = [list(row) for row in pixels]
+        info['bitdepth'] = 8
+        w = Writer(**info)
+        o = BytesIO()
+        w.write(o, pixels)
+        o.flush()
+        o.seek(0)
+        r = Reader(file=o)
+        _,_,again_pixels,again_info = r.read()
+        # Same pixels
+        again_pixels = [list(row) for row in again_pixels]
+        self.assertEqual(again_pixels, pixels)
+
+    def testPNMsbit(self):
+        """Test that PNM files can generates sBIT chunk."""
+        def do():
+            return _main(['testPNMsbit'])
+        s = BytesIO()
+        s.write(strtobytes('P6 8 1 1\n'))
+        for pixel in range(8):
+            s.write(struct.pack('<I', (0x4081*pixel)&0x10101)[:3])
+        s.flush()
+        s.seek(0)
+        o = BytesIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        sbit = r.chunk('sBIT')[1]
+        self.assertEqual(sbit, strtobytes('\x01\x01\x01'))
+    def testLtrns0(self):
+        """Create greyscale image with tRNS chunk."""
+        return self.helperLtrns(0)
+    def testLtrns1(self):
+        """Using 1-tuple for transparent arg."""
+        return self.helperLtrns((0,))
+    def helperLtrns(self, transparent):
+        """Helper used by :meth:`testLtrns*`."""
+        pixels = zip([0x00, 0x38, 0x4c, 0x54, 0x5c, 0x40, 0x38, 0x00])
+        o = BytesIO()
+        w = Writer(8, 8, greyscale=True, bitdepth=1, transparent=transparent)
+        w.write_packed(o, pixels)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.asDirect()
+        self.assertTrue(meta['alpha'])
+        self.assertTrue(meta['greyscale'])
+        self.assertEqual(meta['bitdepth'], 1)
+    def testWinfo(self):
+        """Test the dictionary returned by a `read` method can be used
+        as args for :meth:`Writer`.
+        """
+        r = Reader(bytes=_pngsuite['basn2c16'])
+        info = r.read()[3]
+        w = Writer(**info)
+    def testPackedIter(self):
+        """Test iterator for row when using write_packed.
+
+        Indicative for Issue 47.
+        """
+        w = Writer(16, 2, greyscale=True, alpha=False, bitdepth=1)
+        o = BytesIO()
+        w.write_packed(o, [itertools.chain([0x0a], [0xaa]),
+                           itertools.chain([0x0f], [0xff])])
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,info = r.asDirect()
+        pixels = list(pixels)
+        self.assertEqual(len(pixels), 2)
+        self.assertEqual(len(pixels[0]), 16)
+    def testInterlacedArray(self):
+        """Test that reading an interlaced PNG yields each row as an
+        array."""
+        r = Reader(bytes=_pngsuite['basi0g08'])
+        list(r.read()[2])[0].tostring
+    def testTrnsArray(self):
+        """Test that reading a type 2 PNG with tRNS chunk yields each
+        row as an array (using asDirect)."""
+        r = Reader(bytes=_pngsuite['tbrn2c08'])
+        list(r.asDirect()[2])[0].tostring
+
+    # Invalid file format tests.  These construct various badly
+    # formatted PNG files, then feed them into a Reader.  When
+    # everything is working properly, we should get FormatError
+    # exceptions raised.
+    def testEmpty(self):
+        """Test empty file."""
+
+        r = Reader(bytes='')
+        self.assertRaises(FormatError, r.asDirect)
+    def testSigOnly(self):
+        """Test file containing just signature bytes."""
+
+        r = Reader(bytes=_signature)
+        self.assertRaises(FormatError, r.asDirect)
+    def testExtraPixels(self):
+        """Test file that contains too many pixels."""
+
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            data = zlib.decompress(chunk[1])
+            data += strtobytes('\x00garbage')
+            data = zlib.compress(data)
+            chunk = (chunk[0], data)
+            return chunk
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+    def testNotEnoughPixels(self):
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            # Remove last byte.
+            data = zlib.decompress(chunk[1])
+            data = data[:-1]
+            data = zlib.compress(data)
+            return (chunk[0], data)
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+    def helperFormat(self, f):
+        r = Reader(bytes=_pngsuite['basn0g01'])
+        o = BytesIO()
+        def newchunks():
+            for chunk in r.chunks():
+                yield f(chunk)
+        write_chunks(o, newchunks())
+        r = Reader(bytes=o.getvalue())
+        return list(r.asDirect()[2])
+    def testBadFilter(self):
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            data = zlib.decompress(chunk[1])
+            # Corrupt the first filter byte
+            data = strtobytes('\x99') + data[1:]
+            data = zlib.compress(data)
+            return (chunk[0], data)
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+
+    def testFlat(self):
+        """Test read_flat."""
+        import hashlib
+
+        r = Reader(bytes=_pngsuite['basn0g02'])
+        x,y,pixel,meta = r.read_flat()
+        d = hashlib.md5(seqtobytes(pixel)).digest()
+        self.assertEqual(_enhex(d), '255cd971ab8cd9e7275ff906e5041aa0')
+    def testfromarray(self):
+        img = from_array([[0, 0x33, 0x66], [0xff, 0xcc, 0x99]], 'L')
+        img.save('testfromarray.png')
+    def testfromarrayL16(self):
+        img = from_array(group(range(2**16), 256), 'L;16')
+        img.save('testL16.png')
+    def testfromarrayRGB(self):
+        img = from_array([[0,0,0, 0,0,1, 0,1,0, 0,1,1],
+                          [1,0,0, 1,0,1, 1,1,0, 1,1,1]], 'RGB;1')
+        o = BytesIO()
+        img.save(o)
+    def testfromarrayIter(self):
+        import itertools
+
+        i = itertools.islice(itertools.count(10), 20)
+        i = itertools.imap(lambda x: [x, x, x], i)
+        img = from_array(i, 'RGB;5', dict(height=20))
+        f = open('testiter.png', 'wb')
+        img.save(f)
+        f.close()
+
+    # numpy dependent tests.  These are skipped (with a message to
+    # sys.stderr) if numpy cannot be imported.
+    def testNumpyuint16(self):
+        """numpy uint16."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.uint16, range(0,0x10000,0x5555))]
+        b = topngbytes('numpyuint16.png', rows, 4, 1,
+            greyscale=True, alpha=False, bitdepth=16)
+    def testNumpyuint8(self):
+        """numpy uint8."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.uint8, range(0,0x100,0x55))]
+        b = topngbytes('numpyuint8.png', rows, 4, 1,
+            greyscale=True, alpha=False, bitdepth=8)
+    def testNumpybool(self):
+        """numpy bool."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.bool, [0,1])]
+        b = topngbytes('numpybool.png', rows, 2, 1,
+            greyscale=True, alpha=False, bitdepth=1)
+    def testNumpyarray(self):
+        """numpy array."""
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        pixels = numpy.array([[0,0x5555],[0x5555,0xaaaa]], numpy.uint16)
+        img = from_array(pixels, 'L')
+        img.save('testnumpyL16.png')
+
+    def paeth(self, x, a, b, c):
+        p = a + b - c
+        pa = abs(p - a)
+        pb = abs(p - b)
+        pc = abs(p - c)
+        if pa <= pb and pa <= pc:
+            pr = a
+        elif pb <= pc:
+            pr = b
+        else:
+            pr = c
+        return x - pr
+
+    # test filters and unfilters
+    def testFilterScanlineFirstLine(self):
+        fo = 3  # bytes per pixel
+        line = [30, 31, 32, 230, 231, 232]
+        out = filter_scanline(0, line, fo, None)  # none
+        self.assertEqual(list(out), [0, 30, 31, 32, 230, 231, 232])
+        out = filter_scanline(1, line, fo, None)  # sub
+        self.assertEqual(list(out), [1, 30, 31, 32, 200, 200, 200])
+        out = filter_scanline(2, line, fo, None)  # up
+        # TODO: All filtered scanlines start with a byte indicating the filter
+        # algorithm, except "up". Is this a bug? Should the expected output
+        # start with 2 here?
+        self.assertEqual(list(out), [30, 31, 32, 230, 231, 232])
+        out = filter_scanline(3, line, fo, None)  # average
+        self.assertEqual(list(out), [3, 30, 31, 32, 215, 216, 216])
+        out = filter_scanline(4, line, fo, None)  # paeth
+        self.assertEqual(list(out), [
+            4, self.paeth(30, 0, 0, 0), self.paeth(31, 0, 0, 0),
+            self.paeth(32, 0, 0, 0), self.paeth(230, 30, 0, 0),
+            self.paeth(231, 31, 0, 0), self.paeth(232, 32, 0, 0)
+            ])
+    def testFilterScanline(self):
+        prev = [20, 21, 22, 210, 211, 212]
+        line = [30, 32, 34, 230, 233, 236]
+        fo = 3
+        out = filter_scanline(0, line, fo, prev)  # none
+        self.assertEqual(list(out), [0, 30, 32, 34, 230, 233, 236])
+        out = filter_scanline(1, line, fo, prev)  # sub
+        self.assertEqual(list(out), [1, 30, 32, 34, 200, 201, 202])
+        out = filter_scanline(2, line, fo, prev)  # up
+        self.assertEqual(list(out), [2, 10, 11, 12, 20, 22, 24])
+        out = filter_scanline(3, line, fo, prev)  # average
+        self.assertEqual(list(out), [3, 20, 22, 23, 110, 112, 113])
+        out = filter_scanline(4, line, fo, prev)  # paeth
+        self.assertEqual(list(out), [
+            4, self.paeth(30, 0, 20, 0), self.paeth(32, 0, 21, 0),
+            self.paeth(34, 0, 22, 0), self.paeth(230, 30, 210, 20),
+            self.paeth(233, 32, 211, 21), self.paeth(236, 34, 212, 22)
+            ])
+    def testUnfilterScanline(self):
+        reader = Reader(bytes='')
+        reader.psize = 3
+        scanprev = array('B', [20, 21, 22, 210, 211, 212])
+        scanline = array('B', [30, 32, 34, 230, 233, 236])
+        def cp(a):
+            return array('B', a)
+
+        out = reader.undo_filter(0, cp(scanline), cp(scanprev))
+        self.assertEqual(list(out), list(scanline))  # none
+        out = reader.undo_filter(1, cp(scanline), cp(scanprev))
+        self.assertEqual(list(out), [30, 32, 34, 4, 9, 14])  # sub
+        out = reader.undo_filter(2, cp(scanline), cp(scanprev))
+        self.assertEqual(list(out), [50, 53, 56, 184, 188, 192])  # up
+        out = reader.undo_filter(3, cp(scanline), cp(scanprev))
+        self.assertEqual(list(out), [40, 42, 45, 99, 103, 108])  # average
+        out = reader.undo_filter(4, cp(scanline), cp(scanprev))
+        self.assertEqual(list(out), [50, 53, 56, 184, 188, 192])  # paeth
+    def testUnfilterScanlinePaeth(self):
+        # This tests more edge cases in the paeth unfilter
+        reader = Reader(bytes='')
+        reader.psize = 3
+        scanprev = array('B', [2, 0, 0, 0, 9, 11])
+        scanline = array('B', [6, 10, 9, 100, 101, 102])
+
+        out = reader.undo_filter(4, scanline, scanprev)
+        self.assertEqual(list(out), [8, 10, 9, 108, 111, 113])  # paeth
+    def testIterstraight(self):
+        def arraify(list_of_str):
+            return [array('B', s) for s in list_of_str]
+        reader = Reader(bytes='')
+        reader.row_bytes = 6
+        reader.psize = 3
+        rows = reader.iterstraight(arraify(['\x00abcdef', '\x00ghijkl']))
+        self.assertEqual(list(rows), arraify(['abcdef', 'ghijkl']))
+
+        rows = reader.iterstraight(arraify(['\x00abc', 'def\x00ghijkl']))
+        self.assertEqual(list(rows), arraify(['abcdef', 'ghijkl']))
+
+        rows = reader.iterstraight(arraify(['\x00abcdef\x00ghijkl']))
+        self.assertEqual(list(rows), arraify(['abcdef', 'ghijkl']))
+
+        rows = reader.iterstraight(arraify(['\x00abcdef\x00ghi', 'jkl']))
+        self.assertEqual(list(rows), arraify(['abcdef', 'ghijkl']))
+
+# === Command Line Support ===
+
+def _dehex(s):
+    """Liberally convert from hex string to binary string."""
+    import re
+    import binascii
+
+    # Remove all non-hexadecimal digits
+    s = re.sub(r'[^a-fA-F\d]', '', s)
+    # binscii.unhexlify works in Python 2 and Python 3 (unlike
+    # thing.decode('hex')).
+    return binascii.unhexlify(strtobytes(s))
+def _enhex(s):
+    """Convert from binary string (bytes) to hex string (str)."""
+
+    import binascii
+
+    return bytestostr(binascii.hexlify(s))
+
+# Copies of PngSuite test files taken
+# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
+# on 2009-02-19 by drj and converted to hex.
+# Some of these are not actually in PngSuite (but maybe they should
+# be?), they use the same naming scheme, but start with a capital
+# letter.
+_pngsuite = {
+  'basi0g01': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
+cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
+300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
+65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
+d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
+ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
+a18c7ffd0094e3511d661822f20000000049454e44ae426082
+"""),
+  'basi0g02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
+1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
+00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
+90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
+0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
+"""),
+  'basi0g04': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
+bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
+301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
+03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
+3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
+43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
+a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
+13811f2781eba9d34d07160000000049454e44ae426082
+"""),
+  'basi0g08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
+be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
+3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
+94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
+999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
+70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
+f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
+769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
+"""),
+  'basi0g16': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
+fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
+301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
+23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
+4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
+c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
+272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
+5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
+b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
+00000049454e44ae426082
+"""),
+  'basi2c08': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
+350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
+210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
+7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
+a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
+6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
+4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
+63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
+26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
+db803146578337df4d0a3121fc3d330000000049454e44ae426082
+"""),
+  'basi2c16': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
+760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
+3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
+b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
+d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
+b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
+c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
+5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
+6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
+d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
+d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
+f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
+480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
+a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
+bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
+df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
+c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
+cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
+14deb3df1344f70000000049454e44ae426082
+"""),
+  'basi3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
+500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
+ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
+ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
+00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
+6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
+8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
+ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
+4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
+ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
+ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
+770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
+005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
+88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
+eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
+63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
+00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
+00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
+999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
+aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
+dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
+ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
+110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
+ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
+004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
+661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
+a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
+d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
+16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
+9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
+e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
+d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
+33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
+0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
+b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
+8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
+3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
+c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
+cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
+a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
+d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
+0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
+e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
+0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
+070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
+6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
+f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
+02f80f73fefe1072afc1e50000000049454e44ae426082
+"""),
+  'basi6a08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
+620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
+3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
+949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
+9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
+b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
+6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
+909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
+622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
+0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
+200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
+0049454e44ae426082
+"""),
+  'basn0g01': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
+590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
+300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
+a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
+6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
+ae426082
+"""),
+  'basn0g02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
+890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
+1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
+49454e44ae426082
+"""),
+  # A version of basn0g04 dithered down to 3 bits.
+  'Basn0g03': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
+c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
+44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
+83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
+63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
+952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
+a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
+a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
+b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
+426082
+"""),
+  'basn0g04': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+290000000467414d41000186a031e8965f0000004849444154789c6360601014
+545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
+912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
+01eaff41fa0000000049454e44ae426082
+"""),
+  'basn0g08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
+280000000467414d41000186a031e8965f0000004149444154789c6364602400
+1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
+8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
+000049454e44ae426082
+"""),
+  'basn0g16': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
+6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
+300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
+3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
+d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
+454e44ae426082
+"""),
+  'basn2c08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
+a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
+300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
+7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
+5e0f40cf4b0000000049454e44ae426082
+"""),
+  'basn2c16': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
+e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
+301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
+1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
+f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
+35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
+8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
+fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
+b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
+c4ec0000000049454e44ae426082
+"""),
+  'basn3p04': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200403000000815467
+c70000000467414d41000186a031e8965f000000037342495404040477f8b5a3
+0000002d504c54452200ff00ffff8800ff22ff000099ffff6600dd00ff77ff00
+ff000000ff99ddff00ff00bbffbb000044ff00ff44d2b049bd00000047494441
+54789c63e8e8080d3d7366d5aaf27263e377ef66ce64204300952b28488e002a
+d7c5851c0154eeddbbe408a07119c81140e52a29912380ca4d4b23470095bb7b
+37190200e0c4ead10f82057d0000000049454e44ae426082
+"""),
+  'basn6a08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
+f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
+300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
+305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
+1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
+011bf00052201a9c160fb84c0000000049454e44ae426082
+"""),
+  'cs3n3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f0000000373424954030303a392a042
+00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
+b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
+b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
+0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
+184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
+6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
+426082
+"""),
+  's09n3p02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
+830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
+0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
+789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
+437f230000000049454e44ae426082
+"""),
+  'tbgn3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
+abab110000222200737300999999510d00444400959500959595e6e600919191
+8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
+006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
+515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
+009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
+002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
+1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
+4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
+00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
+0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
+bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
+005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
+00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
+1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
+113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
+160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
+6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
+01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
+4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
+6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
+06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
+ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
+1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
+866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
+412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
+92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
+63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
+084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
+2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
+b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
+e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
+3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
+9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
+49454e44ae426082
+"""),
+  'Tp2n3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
+7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
+0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
+06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
+0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
+ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
+7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
+7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
+0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
+0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
+ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
+02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
+7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
+ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
+80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
+7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
+ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
+ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
+ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
+ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
+04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
+0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
+000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
+04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
+06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
+f874524e53000000000000000008080808080808081010101010101010181818
+1818181818202020202020202029292929292929293131313131313131393939
+393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
+5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
+7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
+9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
+bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
+dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
+ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
+63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
+b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
+c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
+8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
+119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
+e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
+e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
+c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
+bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
+44ae426082
+"""),
+  'tbbn1g04': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
+000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
+efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
+e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
+bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
+9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
+0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
+b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
+46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
+1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
+1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
+7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
+426082
+"""),
+  'tbrn2c08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
+a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
+33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
+965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
+56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
+f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
+f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
+a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
+f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
+8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
+b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
+dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
+31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
+b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
+74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
+e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
+636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
+7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
+3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
+8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
+36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
+8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
+87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
+ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
+a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
+5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
+783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
+34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
+118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
+5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
+08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
+18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
+73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
+6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
+43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
+c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
+288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
+680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
+8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
+84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
+1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
+9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
+426082
+"""),
+  'basn6a16': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
+b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
+d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
+10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
+728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
+c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
+0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
+c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
+744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
+d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
+2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
+13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
+9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
+1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
+b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
+155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
+cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
+d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
+5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
+589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
+153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
+f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
+0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
+e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
+9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
+2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
+2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
+61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
+d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
+39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
+c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
+f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
+c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
+d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
+6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
+00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
+76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
+7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
+004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
+3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
+13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
+040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
+99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
+bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
+bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
+bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
+aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
+5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
+a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
+b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
+949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
+fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
+de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
+3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
+ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
+805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
+c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
+e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
+70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
+ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
+8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
+78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
+a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
+1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
+c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
+7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
+8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
+dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
+3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
+6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
+c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
+fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
+7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
+7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
+c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
+bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
+8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
+7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
+5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
+4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
+1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
+a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
+839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
+7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
+c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
+ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
+7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
+901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
+7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
+c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
+64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
+50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
+33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
+1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
+90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
+5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
+d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
+c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
+7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
+05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
+3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
+550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
+acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
+25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
+41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
+09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
+9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
+00000049454e44ae426082
+"""),
+}
+
+def read_pam_header(infile):
+    """
+    Read (the rest of a) PAM header.  `infile` should be positioned
+    immediately after the initial 'P7' line (at the beginning of the
+    second line).  Returns are as for `read_pnm_header`.
+    """
+    
+    # Unlike PBM, PGM, and PPM, we can read the header a line at a time.
+    header = dict()
+    while True:
+        l = infile.readline().strip()
+        if l == strtobytes('ENDHDR'):
+            break
+        if not l:
+            raise EOFError('PAM ended prematurely')
+        if l[0] == strtobytes('#'):
+            continue
+        l = l.split(None, 1)
+        if l[0] not in header:
+            header[l[0]] = l[1]
+        else:
+            header[l[0]] += strtobytes(' ') + l[1]
+
+    required = ['WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL']
+    required = [strtobytes(x) for x in required]
+    WIDTH,HEIGHT,DEPTH,MAXVAL = required
+    present = [x for x in required if x in header]
+    if len(present) != len(required):
+        raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
+    width = int(header[WIDTH])
+    height = int(header[HEIGHT])
+    depth = int(header[DEPTH])
+    maxval = int(header[MAXVAL])
+    if (width <= 0 or
+        height <= 0 or
+        depth <= 0 or
+        maxval <= 0):
+        raise Error(
+          'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
+    return 'P7', width, height, depth, maxval
+
+def read_pnm_header(infile, supported=('P5','P6')):
+    """
+    Read a PNM header, returning (format,width,height,depth,maxval).
+    `width` and `height` are in pixels.  `depth` is the number of
+    channels in the image; for PBM and PGM it is synthesized as 1, for
+    PPM as 3; for PAM images it is read from the header.  `maxval` is
+    synthesized (as 1) for PBM images.
+    """
+
+    # Generally, see http://netpbm.sourceforge.net/doc/ppm.html
+    # and http://netpbm.sourceforge.net/doc/pam.html
+
+    supported = [strtobytes(x) for x in supported]
+
+    # Technically 'P7' must be followed by a newline, so by using
+    # rstrip() we are being liberal in what we accept.  I think this
+    # is acceptable.
+    type = infile.read(3).rstrip()
+    if type not in supported:
+        raise NotImplementedError('file format %s not supported' % type)
+    if type == strtobytes('P7'):
+        # PAM header parsing is completely different.
+        return read_pam_header(infile)
+    # Expected number of tokens in header (3 for P4, 4 for P6)
+    expected = 4
+    pbm = ('P1', 'P4')
+    if type in pbm:
+        expected = 3
+    header = [type]
+
+    # We have to read the rest of the header byte by byte because the
+    # final whitespace character (immediately following the MAXVAL in
+    # the case of P6) may not be a newline.  Of course all PNM files in
+    # the wild use a newline at this point, so it's tempting to use
+    # readline; but it would be wrong.
+    def getc():
+        c = infile.read(1)
+        if not c:
+            raise Error('premature EOF reading PNM header')
+        return c
+
+    c = getc()
+    while True:
+        # Skip whitespace that precedes a token.
+        while c.isspace():
+            c = getc()
+        # Skip comments.
+        while c == '#':
+            while c not in '\n\r':
+                c = getc()
+        if not c.isdigit():
+            raise Error('unexpected character %s found in header' % c)
+        # According to the specification it is legal to have comments
+        # that appear in the middle of a token.
+        # This is bonkers; I've never seen it; and it's a bit awkward to
+        # code good lexers in Python (no goto).  So we break on such
+        # cases.
+        token = strtobytes('')
+        while c.isdigit():
+            token += c
+            c = getc()
+        # Slight hack.  All "tokens" are decimal integers, so convert
+        # them here.
+        header.append(int(token))
+        if len(header) == expected:
+            break
+    # Skip comments (again)
+    while c == '#':
+        while c not in '\n\r':
+            c = getc()
+    if not c.isspace():
+        raise Error('expected header to end with whitespace, not %s' % c)
+
+    if type in pbm:
+        # synthesize a MAXVAL
+        header.append(1)
+    depth = (1,3)[type == strtobytes('P6')]
+    return header[0], header[1], header[2], depth, header[3]
+
+def write_pnm(file, width, height, pixels, meta):
+    """Write a Netpbm PNM/PAM file."""
+
+    bitdepth = meta['bitdepth']
+    maxval = 2**bitdepth - 1
+    # Rudely, the number of image planes can be used to determine
+    # whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
+    planes = meta['planes']
+    # Can be an assert as long as we assume that pixels and meta came
+    # from a PNG file.
+    assert planes in (1,2,3,4)
+    if planes in (1,3):
+        if 1 == planes:
+            # PGM
+            # Could generate PBM if maxval is 1, but we don't (for one
+            # thing, we'd have to convert the data, not just blat it
+            # out).
+            fmt = 'P5'
+        else:
+            # PPM
+            fmt = 'P6'
+        file.write('%s %d %d %d\n' % (fmt, width, height, maxval))
+    if planes in (2,4):
+        # PAM
+        # See http://netpbm.sourceforge.net/doc/pam.html
+        if 2 == planes:
+            tupltype = 'GRAYSCALE_ALPHA'
+        else:
+            tupltype = 'RGB_ALPHA'
+        file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
+                   'TUPLTYPE %s\nENDHDR\n' %
+                   (width, height, planes, maxval, tupltype))
+    # Values per row
+    vpr = planes * width
+    # struct format
+    fmt = '>%d' % vpr
+    if maxval > 0xff:
+        fmt = fmt + 'H'
+    else:
+        fmt = fmt + 'B'
+    for row in pixels:
+        file.write(struct.pack(fmt, *row))
+    file.flush()
+
+def color_triple(color):
+    """
+    Convert a command line colour value to a RGB triple of integers.
+    FIXME: Somewhere we need support for greyscale backgrounds etc.
+    """
+    if color.startswith('#') and len(color) == 4:
+        return (int(color[1], 16),
+                int(color[2], 16),
+                int(color[3], 16))
+    if color.startswith('#') and len(color) == 7:
+        return (int(color[1:3], 16),
+                int(color[3:5], 16),
+                int(color[5:7], 16))
+    elif color.startswith('#') and len(color) == 13:
+        return (int(color[1:5], 16),
+                int(color[5:9], 16),
+                int(color[9:13], 16))
+
+def _add_common_options(parser):
+    """Call *parser.add_option* for each of the options that are
+    common between this PNG--PNM conversion tool and the gen
+    tool.
+    """
+    parser.add_option("-i", "--interlace",
+                      default=False, action="store_true",
+                      help="create an interlaced PNG file (Adam7)")
+    parser.add_option("-t", "--transparent",
+                      action="store", type="string", metavar="#RRGGBB",
+                      help="mark the specified colour as transparent")
+    parser.add_option("-b", "--background",
+                      action="store", type="string", metavar="#RRGGBB",
+                      help="save the specified background colour")
+    parser.add_option("-g", "--gamma",
+                      action="store", type="float", metavar="value",
+                      help="save the specified gamma value")
+    parser.add_option("-c", "--compression",
+                      action="store", type="int", metavar="level",
+                      help="zlib compression level (0-9)")
+    return parser
+
+def _main(argv):
+    """
+    Run the PNG encoder with options from the command line.
+    """
+
+    # Parse command line arguments
+    from optparse import OptionParser
+    import re
+    version = '%prog ' + re.sub(r'( ?\$|URL: |Rev:)', '', __version__)
+    parser = OptionParser(version=version)
+    parser.set_usage("%prog [options] [imagefile]")
+    parser.add_option('-r', '--read-png', default=False,
+                      action='store_true',
+                      help='Read PNG, write PNM')
+    parser.add_option("-a", "--alpha",
+                      action="store", type="string", metavar="pgmfile",
+                      help="alpha channel transparency (RGBA)")
+    _add_common_options(parser)
+
+    (options, args) = parser.parse_args(args=argv[1:])
+
+    # Convert options
+    if options.transparent is not None:
+        options.transparent = color_triple(options.transparent)
+    if options.background is not None:
+        options.background = color_triple(options.background)
+
+    # Prepare input and output files
+    if len(args) == 0:
+        infilename = '-'
+        infile = sys.stdin
+    elif len(args) == 1:
+        infilename = args[0]
+        infile = open(infilename, 'rb')
+    else:
+        parser.error("more than one input file")
+    outfile = sys.stdout
+    if sys.platform == "win32":
+        import msvcrt, os
+        msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+
+    if options.read_png:
+        # Encode PNG to PPM
+        png = Reader(file=infile)
+        width,height,pixels,meta = png.asDirect()
+        write_pnm(outfile, width, height, pixels, meta) 
+    else:
+        # Encode PNM to PNG
+        format, width, height, depth, maxval = \
+          read_pnm_header(infile, ('P5','P6','P7'))
+        # When it comes to the variety of input formats, we do something
+        # rather rude.  Observe that L, LA, RGB, RGBA are the 4 colour
+        # types supported by PNG and that they correspond to 1, 2, 3, 4
+        # channels respectively.  So we use the number of channels in
+        # the source image to determine which one we have.  We do not
+        # care about TUPLTYPE.
+        greyscale = depth <= 2
+        pamalpha = depth in (2,4)
+        supported = map(lambda x: 2**x-1, range(1,17))
+        try:
+            mi = supported.index(maxval)
+        except ValueError:
+            raise NotImplementedError(
+              'your maxval (%s) not in supported list %s' %
+              (maxval, str(supported)))
+        bitdepth = mi+1
+        writer = Writer(width, height,
+                        greyscale=greyscale,
+                        bitdepth=bitdepth,
+                        interlace=options.interlace,
+                        transparent=options.transparent,
+                        background=options.background,
+                        alpha=bool(pamalpha or options.alpha),
+                        gamma=options.gamma,
+                        compression=options.compression)
+        if options.alpha:
+            pgmfile = open(options.alpha, 'rb')
+            format, awidth, aheight, adepth, amaxval = \
+              read_pnm_header(pgmfile, 'P5')
+            if amaxval != '255':
+                raise NotImplementedError(
+                  'maxval %s not supported for alpha channel' % amaxval)
+            if (awidth, aheight) != (width, height):
+                raise ValueError("alpha channel image size mismatch"
+                                 " (%s has %sx%s but %s has %sx%s)"
+                                 % (infilename, width, height,
+                                    options.alpha, awidth, aheight))
+            writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
+        else:
+            writer.convert_pnm(infile, outfile)
+
+
+if __name__ == '__main__':
+    try:
+        _main(sys.argv)
+    except Error, e:
+        print >>sys.stderr, e
diff --git a/catapult/telemetry/third_party/pyfakefs/README.chromium b/catapult/telemetry/third_party/pyfakefs/README.chromium
new file mode 100644
index 0000000..6b9dac7
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/README.chromium
@@ -0,0 +1,16 @@
+Name: pyfakefs
+Short Name: pyfakefs
+URL: https://github.com/jmcgeheeiv/pyfakefs
+Version: 7e8e097c0165ba9d51fa9d34a0888d8ec082d15b (commit hash)
+License: Apache License 2.0
+License File: NOT_SHIPPED
+Security Critical: no
+
+Local modification: create a pyfakefs as a project folder & move pyfakefs to
+pyfakefs/pyfakefs since we don't want the project folder to be a module.
+
+Description:
+pyfakefs implements a fake file system that mocks the Python file system
+modules. Using pyfakefs, your tests operate on a fake file system in memory
+without touching the real disk. The software under test requires no modification
+to work with pyfakefs.
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/.travis.yml b/catapult/telemetry/third_party/pyfakefs/pyfakefs/.travis.yml
new file mode 100644
index 0000000..bbd351b
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/.travis.yml
@@ -0,0 +1,29 @@
+# Perform continuous integration testing with Travis CI.
+#
+# Copyright 2015 John McGehee. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+language: python
+python:
+  - "2.6"
+  - "2.7"
+  - "3.2"
+  - "3.3"
+  - "3.4"
+
+install:
+  - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install importlib unittest2; fi
+  - pip install -r requirements.txt
+
+script: ./all_tests.py
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/COPYING b/catapult/telemetry/third_party/pyfakefs/pyfakefs/COPYING
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/COPYING
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/README.md b/catapult/telemetry/third_party/pyfakefs/pyfakefs/README.md
new file mode 100644
index 0000000..ba3f5e5
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/README.md
@@ -0,0 +1,51 @@
+# pyfakefs
+pyfakefs implements a fake file system that mocks the Python file system modules.
+Using pyfakefs, your tests operate on a fake file system in memory without
+touching the real disk.  The software under test requires no modification to
+work with pyfakefs.
+
+## Usage
+See the [usage tutorial](http://github.com/jmcgeheeiv/pyfakefs/wiki/Tutorial)
+for a concrete example of how to apply pyfakefs.
+
+## Continuous Integration
+pyfakefs is tested with Python 2.6 and above.  See [Travis-CI](http://travis-ci.org) for
+[pyfakefs continuous integration test results](https://travis-ci.org/jmcgeheeiv/pyfakefs)
+for each Python version.
+
+## Installation
+
+### Compatibility
+pyfakefs works with Python 2.6 and above.  pyfakefs has no dependencies beyond the Python
+standard library.
+
+### PyPi
+pyfakefs project is hosted on PyPi and can be installed:
+
+```bash
+pip install pyfakefs
+```
+
+## History
+pyfakefs.py was initially developed at Google by Mike Bland as a modest fake
+implementation of core Python modules.  It was introduced to all of Google
+in September 2006. Since then, it has been enhanced to extend its
+functionality and usefulness.  At Google alone, pyfakefs is used in over 2,000
+Python tests.
+
+pyfakefs was released to the public in 2011 as Google Code project
+[pyfakefs](http://code.google.com/p/pyfakefs/).
+
+Fork
+[jmcgeheeiv-pyfakefs](http://code.google.com/p/jmcgeheeiv-pyfakefs/)
+added a [usage tutorial](http://github.com/jmcgeheeiv/pyfakefs/wiki/Tutorial),
+direct support for [unittest](http://docs.python.org/2/library/unittest.html)
+and [doctest](http://docs.python.org/2/library/doctest.html).
+
+Fork
+[shiffdane-jmcgeheeiv-pyfakefs](http://code.google.com/p/shiffdane-jmcgeheeiv-pyfakefs/)
+added further corrections.
+
+After the [shutdown of Google Code was announced,](http://google-opensource.blogspot.com/2015/03/farewell-to-google-code.html)
+all three Google Code projects are merged together here on GitHub as pyfakefs.
+
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/__init__.py
old mode 100644
new mode 100755
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/pyfakefs/pyfakefs/__init__.py
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/all_tests.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/all_tests.py
new file mode 100755
index 0000000..1226362
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/all_tests.py
@@ -0,0 +1,47 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A test suite that runs all tests for pyfakefs at once."""
+
+import unittest
+
+import fake_filesystem_glob_test
+import fake_filesystem_shutil_test
+import fake_filesystem_test
+import fake_filesystem_vs_real_test
+import fake_tempfile_test
+import fake_filesystem_unittest_test
+import example_test
+
+
+class AllTests(unittest.TestSuite):
+  """A test suite that runs all tests for pyfakefs at once."""
+
+  def suite(self):  # pylint: disable-msg=C6409
+    loader = unittest.defaultTestLoader
+    self.addTests([
+        loader.loadTestsFromModule(fake_filesystem_test),
+        loader.loadTestsFromModule(fake_filesystem_glob_test),
+        loader.loadTestsFromModule(fake_filesystem_shutil_test),
+        loader.loadTestsFromModule(fake_tempfile_test),
+        loader.loadTestsFromModule(fake_filesystem_vs_real_test),
+        loader.loadTestsFromModule(fake_filesystem_unittest_test),
+        loader.loadTestsFromModule(example_test),
+    ])
+    return self
+
+if __name__ == '__main__':
+  unittest.TextTestRunner(verbosity=2).run(AllTests().suite())
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/example.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/example.py
new file mode 100644
index 0000000..436e4cf
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/example.py
@@ -0,0 +1,121 @@
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Author: John McGehee
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Example module that is tested in :py:class`pyfakefs.example_test.TestExample`.
+This demonstrates the usage of the
+:py:class`pyfakefs.fake_filesystem_unittest.TestCase` base class.
+
+The modules related to file handling are bound to the respective fake modules:
+
+>>> os     #doctest: +ELLIPSIS 
+<fake_filesystem.FakeOsModule object...>
+>>> os.path     #doctest: +ELLIPSIS
+<fake_filesystem.FakePathModule object...>
+>>> glob     #doctest: +ELLIPSIS
+<fake_filesystem_glob.FakeGlobModule object...>
+>>> shutil     #doctest: +ELLIPSIS
+<fake_filesystem_shutil.FakeShutilModule object...>
+
+The `open()` built-in is bound to the fake `open()`:
+
+>>> open     #doctest: +ELLIPSIS
+<fake_filesystem.FakeFileOpen object...>
+
+In Python 2 the `file()` built-in is also bound to the fake `open()`.  `file()`
+was eliminated in Python 3.
+"""
+
+import os
+import glob
+import shutil
+
+def create_file(path):
+    '''Create the specified file and add some content to it.  Use the `open()`
+    built in function.
+    
+    For example, the following file operations occur in the fake file system.
+    In the real file system, we would not even have permission to write `/test`:
+    
+    >>> os.path.isdir('/test')
+    False
+    >>> os.mkdir('/test')
+    >>> os.path.isdir('/test')
+    True
+    >>> os.path.exists('/test/file.txt')
+    False
+    >>> create_file('/test/file.txt')
+    >>> os.path.exists('/test/file.txt')
+    True
+    >>> with open('/test/file.txt') as f:
+    ...     f.readlines()
+    ["This is test file '/test/file.txt'.\\n", 'It was created using the open() function.\\n']
+    '''
+    with open(path, 'w') as f:
+        f.write("This is test file '{}'.\n".format(path))
+        f.write("It was created using the open() function.\n")
+
+def delete_file(path):
+    '''Delete the specified file.
+    
+    For example:
+        
+    >>> os.mkdir('/test')
+    >>> os.path.exists('/test/file.txt')
+    False
+    >>> create_file('/test/file.txt')
+    >>> os.path.exists('/test/file.txt')
+    True
+    >>> delete_file('/test/file.txt')
+    >>> os.path.exists('/test/file.txt')
+    False
+    '''
+    os.remove(path)
+    
+def path_exists(path):
+    '''Return True if the specified file exists.
+    
+    For example:
+        
+    >>> path_exists('/test')
+    False
+    >>> os.mkdir('/test')
+    >>> path_exists('/test')
+    True
+    >>>
+    >>> path_exists('/test/file.txt')
+    False
+    >>> create_file('/test/file.txt')
+    >>> path_exists('/test/file.txt')
+    True
+    '''
+    return os.path.exists(path)
+
+def get_glob(glob_path):
+    '''Return the list of paths matching the specified glob expression.
+    
+    For example:
+    
+    >>> os.mkdir('/test')
+    >>> create_file('/test/file1.txt')
+    >>> create_file('/test/file2.txt')
+    >>> get_glob('/test/file*.txt')
+    ['/test/file1.txt', '/test/file2.txt']
+    '''
+    return glob.glob(glob_path)
+
+def rm_tree(path):
+    '''Delete the specified file hierarchy.'''
+    shutil.rmtree(path)
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/example_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/example_test.py
new file mode 100644
index 0000000..4a46dd9
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/example_test.py
@@ -0,0 +1,137 @@
+#! /usr/bin/env python
+#
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Author: John McGehee
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Test the :py:class`pyfakefs.example` module to demonstrate the usage of the
+:py:class`pyfakefs.fake_filesystem_unittest.TestCase` base class.
+"""
+
+import os
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+import fake_filesystem_unittest
+# The module under test is pyfakefs.example
+import example
+
+
+def load_tests(loader, tests, ignore):
+    '''Load the pyfakefs/example.py doctest tests into unittest.'''
+    return fake_filesystem_unittest.load_doctests(loader, tests, ignore, example)
+
+
+class TestExample(fake_filesystem_unittest.TestCase): # pylint: disable=R0904
+    '''Test the pyfakefs.example module.'''
+
+    def setUp(self):
+        '''Invoke the :py:class:`pyfakefs.fake_filesystem_unittest.TestCase`
+        `self.setUp()` method.  This defines:
+        
+        * Attribute `self.fs`, an instance of \
+          :py:class:`pyfakefs.fake_filesystem.FakeFilesystem`.  This is useful \
+          for creating test files.
+        * Attribute `self.stubs`, an instance of \
+          :py:class:`mox.stubout.StubOutForTesting`.  Use this if you need to
+          define additional stubs.
+        '''
+        self.setUpPyfakefs()
+
+    def tearDown(self):
+        # No longer need self.tearDownPyfakefs()
+        pass
+        
+    def test_create_file(self):
+        '''Test example.create_file()'''
+        # The os module has been replaced with the fake os module so all of the
+        # following occurs in the fake filesystem.
+        self.assertFalse(os.path.isdir('/test'))
+        os.mkdir('/test')
+        self.assertTrue(os.path.isdir('/test'))
+        
+        self.assertFalse(os.path.exists('/test/file.txt'))
+        example.create_file('/test/file.txt')
+        self.assertTrue(os.path.exists('/test/file.txt'))
+        
+    def test_delete_file(self):
+        '''Test example.delete_file()
+
+        `self.fs.CreateFile()` is convenient because it automatically creates
+        directories in the fake file system and allows you to specify the file
+        contents.
+        
+        You could also use `open()` or `file()`.
+        '''
+        self.fs.CreateFile('/test/full.txt',
+                           contents='First line\n'
+                                    'Second Line\n')
+        self.assertTrue(os.path.exists('/test/full.txt'))
+        example.delete_file('/test/full.txt')
+        self.assertFalse(os.path.exists('/test/full.txt'))
+
+    def test_file_exists(self):
+        '''Test example.path_exists()
+
+        `self.fs.CreateFile()` is convenient because it automatically creates
+        directories in the fake file system and allows you to specify the file
+        contents.
+        
+        You could also use `open()` or `file()` if you wanted.
+        '''
+        self.assertFalse(example.path_exists('/test/empty.txt'))          
+        self.fs.CreateFile('/test/empty.txt')
+        self.assertTrue(example.path_exists('/test/empty.txt'))              
+        
+    def test_get_globs(self):
+        '''Test example.get_glob()
+        
+        `self.fs.CreateDirectory()` creates directories.  However, you might
+        prefer the familiar `os.makedirs()`, which also works fine on the fake
+        file system.
+        '''
+        self.assertFalse(os.path.isdir('/test'))
+        self.fs.CreateDirectory('/test/dir1/dir2a')
+        self.assertTrue(os.path.isdir('/test/dir1/dir2a'))
+        # os.mkdirs() works, too.
+        os.makedirs('/test/dir1/dir2b')
+        self.assertTrue(os.path.isdir('/test/dir1/dir2b'))
+        
+        self.assertCountEqual(example.get_glob('/test/dir1/nonexistent*'),
+                              [])
+        self.assertCountEqual(example.get_glob('/test/dir1/dir*'),
+                              ['/test/dir1/dir2a', '/test/dir1/dir2b'])
+
+    def test_rm_tree(self):
+        '''Test example.rm_tree()
+        
+        `self.fs.CreateDirectory()` creates directories.  However, you might
+        prefer the familiar `os.makedirs()`, which also works fine on the fake
+        file system.
+        '''
+        self.fs.CreateDirectory('/test/dir1/dir2a')
+        # os.mkdirs() works, too.
+        os.makedirs('/test/dir1/dir2b')
+        self.assertTrue(os.path.isdir('/test/dir1/dir2b'))
+        self.assertTrue(os.path.isdir('/test/dir1/dir2a'))
+       
+        example.rm_tree('/test/dir1')
+        self.assertFalse(os.path.exists('/test/dir1'))
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem.py
new file mode 100644
index 0000000..d390e66
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem.py
@@ -0,0 +1,2202 @@
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable-msg=W0612,W0613,C6409
+
+"""A fake filesystem implementation for unit testing.
+
+Includes:
+  FakeFile:  Provides the appearance of a real file.
+  FakeDirectory: Provides the appearance of a real dir.
+  FakeFilesystem:  Provides the appearance of a real directory hierarchy.
+  FakeOsModule:  Uses FakeFilesystem to provide a fake os module replacement.
+  FakePathModule:  Faked os.path module replacement.
+  FakeFileOpen:  Faked file() and open() function replacements.
+
+Usage:
+>>> import fake_filesystem
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> os_module = fake_filesystem.FakeOsModule(filesystem)
+>>> pathname = '/a/new/dir/new-file'
+
+Create a new file object, creating parent directory objects as needed:
+>>> os_module.path.exists(pathname)
+False
+>>> new_file = filesystem.CreateFile(pathname)
+
+File objects can't be overwritten:
+>>> os_module.path.exists(pathname)
+True
+>>> try:
+...   filesystem.CreateFile(pathname)
+... except IOError as e:
+...   assert e.errno == errno.EEXIST, 'unexpected errno: %d' % e.errno
+...   assert e.strerror == 'File already exists in fake filesystem'
+
+Remove a file object:
+>>> filesystem.RemoveObject(pathname)
+>>> os_module.path.exists(pathname)
+False
+
+Create a new file object at the previous path:
+>>> beatles_file = filesystem.CreateFile(pathname,
+...     contents='Dear Prudence\\nWon\\'t you come out to play?\\n')
+>>> os_module.path.exists(pathname)
+True
+
+Use the FakeFileOpen class to read fake file objects:
+>>> file_module = fake_filesystem.FakeFileOpen(filesystem)
+>>> for line in file_module(pathname):
+...     print line.rstrip()
+...
+Dear Prudence
+Won't you come out to play?
+
+File objects cannot be treated like directory objects:
+>>> os_module.listdir(pathname)  #doctest: +NORMALIZE_WHITESPACE
+Traceback (most recent call last):
+  File "fake_filesystem.py", line 291, in listdir
+    raise OSError(errno.ENOTDIR,
+OSError: [Errno 20] Fake os module: not a directory: '/a/new/dir/new-file'
+
+The FakeOsModule can list fake directory objects:
+>>> os_module.listdir(os_module.path.dirname(pathname))
+['new-file']
+
+The FakeOsModule also supports stat operations:
+>>> import stat
+>>> stat.S_ISREG(os_module.stat(pathname).st_mode)
+True
+>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)
+True
+"""
+
+import errno
+import heapq
+import os
+import stat
+import sys
+import time
+import warnings
+try:
+  import cStringIO as io  # pylint: disable-msg=C6204
+except ImportError:
+  import io  # pylint: disable-msg=C6204
+
+__pychecker__ = 'no-reimportself'
+
+__version__ = '2.5'
+
+PERM_READ = 0o400      # Read permission bit.
+PERM_WRITE = 0o200     # Write permission bit.
+PERM_EXE = 0o100       # Write permission bit.
+PERM_DEF = 0o777       # Default permission bits.
+PERM_DEF_FILE = 0o666  # Default permission bits (regular file)
+PERM_ALL = 0o7777      # All permission bits.
+
+_OPEN_MODE_MAP = {
+    # mode name:(file must exist, need read, need write,
+    #            truncate [implies need write], append)
+    'r': (True, True, False, False, False),
+    'w': (False, False, True, True, False),
+    'a': (False, False, True, False, True),
+    'r+': (True, True, True, False, False),
+    'w+': (False, True, True, True, False),
+    'a+': (False, True, True, False, True),
+    }
+
+_MAX_LINK_DEPTH = 20
+
+FAKE_PATH_MODULE_DEPRECATION = ('Do not instantiate a FakePathModule directly; '
+                                'let FakeOsModule instantiate it.  See the '
+                                'FakeOsModule docstring for details.')
+
+
+class Error(Exception):
+  pass
+
+_is_windows = sys.platform.startswith('win')
+_is_cygwin = sys.platform == 'cygwin'
+
+if _is_windows:
+  # On Windows, raise WindowsError instead of OSError if available
+  OSError = WindowsError  # pylint: disable-msg=E0602,W0622
+
+
+class FakeLargeFileIoException(Error):
+  def __init__(self, file_path):
+    Error.__init__(self,
+                   'Read and write operations not supported for '
+                   'fake large file: %s' % file_path)
+
+
+def CopyModule(old):
+  """Recompiles and creates new module object."""
+  saved = sys.modules.pop(old.__name__, None)
+  new = __import__(old.__name__)
+  sys.modules[old.__name__] = saved
+  return new
+
+
+class FakeFile(object):
+  """Provides the appearance of a real file.
+
+     Attributes currently faked out:
+       st_mode: user-specified, otherwise S_IFREG
+       st_ctime: the time.time() timestamp when the file is created.
+       st_size: the size of the file
+
+     Other attributes needed by os.stat are assigned default value of None
+      these include: st_ino, st_dev, st_nlink, st_uid, st_gid, st_atime,
+      st_mtime
+  """
+
+  def __init__(self, name, st_mode=stat.S_IFREG | PERM_DEF_FILE,
+               contents=None):
+    """init.
+
+    Args:
+      name:  name of the file/directory, without parent path information
+      st_mode:  the stat.S_IF* constant representing the file type (i.e.
+        stat.S_IFREG, stat.SIFDIR)
+      contents:  the contents of the filesystem object; should be a string for
+        regular files, and a list of other FakeFile or FakeDirectory objects
+        for FakeDirectory objects
+    """
+    self.name = name
+    self.st_mode = st_mode
+    self.contents = contents
+    self.epoch = 0
+    self.st_ctime = int(time.time())
+    self.st_atime = self.st_ctime
+    self.st_mtime = self.st_ctime
+    if contents:
+      self.st_size = len(contents)
+    else:
+      self.st_size = 0
+    # Non faked features, write setter methods for fakeing them
+    self.st_ino = None
+    self.st_dev = None
+    self.st_nlink = None
+    self.st_uid = None
+    self.st_gid = None
+
+  def SetLargeFileSize(self, st_size):
+    """Sets the self.st_size attribute and replaces self.content with None.
+
+    Provided specifically to simulate very large files without regards
+    to their content (which wouldn't fit in memory)
+
+    Args:
+      st_size: The desired file size
+
+    Raises:
+      IOError: if the st_size is not a non-negative integer
+    """
+    # the st_size should be an positive integer value
+    if not isinstance(st_size, int) or st_size < 0:
+      raise IOError(errno.ENOSPC,
+                    'Fake file object: can not create non negative integer '
+                    'size=%r fake file' % st_size,
+                    self.name)
+
+    self.st_size = st_size
+    self.contents = None
+
+  def IsLargeFile(self):
+    """Return True if this file was initialized with size but no contents."""
+    return self.contents is None
+
+  def SetContents(self, contents):
+    """Sets the file contents and size.
+
+    Args:
+      contents: string, new content of file.
+    """
+    # convert a byte array to a string
+    if sys.version_info >= (3, 0) and isinstance(contents, bytes):
+      contents = ''.join(chr(i) for i in contents)
+    self.contents = contents
+    self.st_size = len(contents)
+    self.epoch += 1
+
+  def SetSize(self, st_size):
+    """Resizes file content, padding with nulls if new size exceeds the old.
+
+    Args:
+      st_size: The desired size for the file.
+
+    Raises:
+      IOError: if the st_size arg is not a non-negative integer
+    """
+
+    if not isinstance(st_size, int) or st_size < 0:
+      raise IOError(errno.ENOSPC,
+                    'Fake file object: can not create non negative integer '
+                    'size=%r fake file' % st_size,
+                    self.name)
+
+    current_size = len(self.contents)
+    if st_size < current_size:
+      self.contents = self.contents[:st_size]
+    else:
+      self.contents = '%s%s' % (self.contents, '\0' * (st_size - current_size))
+    self.st_size = len(self.contents)
+    self.epoch += 1
+
+  def SetATime(self, st_atime):
+    """Set the self.st_atime attribute.
+
+    Args:
+      st_atime: The desired atime.
+    """
+    self.st_atime = st_atime
+
+  def SetMTime(self, st_mtime):
+    """Set the self.st_mtime attribute.
+
+    Args:
+      st_mtime: The desired mtime.
+    """
+    self.st_mtime = st_mtime
+
+  def __str__(self):
+    return '%s(%o)' % (self.name, self.st_mode)
+
+  def SetIno(self, st_ino):
+    """Set the self.st_ino attribute.
+
+    Args:
+      st_ino: The desired inode.
+    """
+    self.st_ino = st_ino
+
+
+class FakeDirectory(FakeFile):
+  """Provides the appearance of a real dir."""
+
+  def __init__(self, name, perm_bits=PERM_DEF):
+    """init.
+
+    Args:
+      name:  name of the file/directory, without parent path information
+      perm_bits: permission bits. defaults to 0o777.
+    """
+    FakeFile.__init__(self, name, stat.S_IFDIR | perm_bits, {})
+
+  def AddEntry(self, pathname):
+    """Adds a child FakeFile to this directory.
+
+    Args:
+      pathname:  FakeFile instance to add as a child of this directory
+    """
+    self.contents[pathname.name] = pathname
+
+  def GetEntry(self, pathname_name):
+    """Retrieves the specified child file or directory.
+
+    Args:
+      pathname_name: basename of the child object to retrieve
+    Returns:
+      string, file contents
+    Raises:
+      KeyError: if no child exists by the specified name
+    """
+    return self.contents[pathname_name]
+
+  def RemoveEntry(self, pathname_name):
+    """Removes the specified child file or directory.
+
+    Args:
+      pathname_name: basename of the child object to remove
+
+    Raises:
+      KeyError: if no child exists by the specified name
+    """
+    del self.contents[pathname_name]
+
+  def __str__(self):
+    rc = super(FakeDirectory, self).__str__() + ':\n'
+    for item in self.contents:
+      item_desc = self.contents[item].__str__()
+      for line in item_desc.split('\n'):
+        if line:
+          rc = rc + '  ' + line + '\n'
+    return rc
+
+
+class FakeFilesystem(object):
+  """Provides the appearance of a real directory tree for unit testing."""
+
+  def __init__(self, path_separator=os.path.sep):
+    """init.
+
+    Args:
+      path_separator:  optional substitute for os.path.sep
+    """
+    self.path_separator = path_separator
+    self.root = FakeDirectory(self.path_separator)
+    self.cwd = self.root.name
+    # We can't query the current value without changing it:
+    self.umask = os.umask(0o22)
+    os.umask(self.umask)
+    # A list of open file objects. Their position in the list is their
+    # file descriptor number
+    self.open_files = []
+    # A heap containing all free positions in self.open_files list
+    self.free_fd_heap = []
+
+  def SetIno(self, path, st_ino):
+    """Set the self.st_ino attribute of file at 'path'.
+
+    Args:
+      path: Path to file.
+      st_ino: The desired inode.
+    """
+    self.GetObject(path).SetIno(st_ino)
+
+  def AddOpenFile(self, file_obj):
+    """Adds file_obj to the list of open files on the filesystem.
+
+    The position in the self.open_files array is the file descriptor number
+
+    Args:
+      file_obj:  file object to be added to open files list.
+
+    Returns:
+      File descriptor number for the file object.
+    """
+    if self.free_fd_heap:
+      open_fd = heapq.heappop(self.free_fd_heap)
+      self.open_files[open_fd] = file_obj
+      return open_fd
+
+    self.open_files.append(file_obj)
+    return len(self.open_files) - 1
+
+  def CloseOpenFile(self, file_obj):
+    """Removes file_obj from the list of open files on the filesystem.
+
+    Sets the entry in open_files to None.
+
+    Args:
+      file_obj:  file object to be removed to open files list.
+    """
+    self.open_files[file_obj.filedes] = None
+    heapq.heappush(self.free_fd_heap, file_obj.filedes)
+
+  def GetOpenFile(self, file_des):
+    """Returns an open file.
+
+    Args:
+      file_des:  file descriptor of the open file.
+
+    Raises:
+      OSError: an invalid file descriptor.
+      TypeError: filedes is not an integer.
+
+    Returns:
+      Open file object.
+    """
+    if not isinstance(file_des, int):
+      raise TypeError('an integer is required')
+    if (file_des >= len(self.open_files) or
+        self.open_files[file_des] is None):
+      raise OSError(errno.EBADF, 'Bad file descriptor', file_des)
+    return self.open_files[file_des]
+
+  def CollapsePath(self, path):
+    """Mimics os.path.normpath using the specified path_separator.
+
+    Mimics os.path.normpath using the path_separator that was specified
+    for this FakeFilesystem.  Normalizes the path, but unlike the method
+    NormalizePath, does not make it absolute.  Eliminates dot components
+    (. and ..) and combines repeated path separators (//).  Initial ..
+    components are left in place for relative paths.  If the result is an empty
+    path, '.' is returned instead.  Unlike the real os.path.normpath, this does
+    not replace '/' with '\\' on Windows.
+
+    Args:
+      path:  (str) The path to normalize.
+
+    Returns:
+      (str) A copy of path with empty components and dot components removed.
+    """
+    is_absolute_path = path.startswith(self.path_separator)
+    path_components = path.split(self.path_separator)
+    collapsed_path_components = []
+    for component in path_components:
+      if (not component) or (component == '.'):
+        continue
+      if component == '..':
+        if collapsed_path_components and (
+            collapsed_path_components[-1] != '..'):
+          # Remove an up-reference: directory/..
+          collapsed_path_components.pop()
+          continue
+        elif is_absolute_path:
+          # Ignore leading .. components if starting from the root directory.
+          continue
+      collapsed_path_components.append(component)
+    collapsed_path = self.path_separator.join(collapsed_path_components)
+    if is_absolute_path:
+      collapsed_path = self.path_separator + collapsed_path
+    return collapsed_path or '.'
+
+  def NormalizePath(self, path):
+    """Absolutize and minimalize the given path.
+
+    Forces all relative paths to be absolute, and normalizes the path to
+    eliminate dot and empty components.
+
+    Args:
+      path:  path to normalize
+
+    Returns:
+      The normalized path relative to the current working directory, or the root
+        directory if path is empty.
+    """
+    if not path:
+      path = self.path_separator
+    elif not path.startswith(self.path_separator):
+      # Prefix relative paths with cwd, if cwd is not root.
+      path = self.path_separator.join(
+          (self.cwd != self.root.name and self.cwd or '',
+           path))
+    if path == '.':
+      path = self.cwd
+    return self.CollapsePath(path)
+
+  def SplitPath(self, path):
+    """Mimics os.path.split using the specified path_separator.
+
+    Mimics os.path.split using the path_separator that was specified
+    for this FakeFilesystem.
+
+    Args:
+      path:  (str) The path to split.
+
+    Returns:
+      (str) A duple (pathname, basename) for which pathname does not
+          end with a slash, and basename does not contain a slash.
+    """
+    path_components = path.split(self.path_separator)
+    if not path_components:
+      return ('', '')
+    basename = path_components.pop()
+    if not path_components:
+      return ('', basename)
+    for component in path_components:
+      if component:
+        # The path is not the root; it contains a non-separator component.
+        # Strip all trailing separators.
+        while not path_components[-1]:
+          path_components.pop()
+        return (self.path_separator.join(path_components), basename)
+    # Root path.  Collapse all leading separators.
+    return (self.path_separator, basename)
+
+  def JoinPaths(self, *paths):
+    """Mimics os.path.join using the specified path_separator.
+
+    Mimics os.path.join using the path_separator that was specified
+    for this FakeFilesystem.
+
+    Args:
+      *paths:  (str) Zero or more paths to join.
+
+    Returns:
+      (str) The paths joined by the path separator, starting with the last
+          absolute path in paths.
+    """
+    if len(paths) == 1:
+      return paths[0]
+    joined_path_segments = []
+    for path_segment in paths:
+      if path_segment.startswith(self.path_separator):
+        # An absolute path
+        joined_path_segments = [path_segment]
+      else:
+        if (joined_path_segments and
+            not joined_path_segments[-1].endswith(self.path_separator)):
+          joined_path_segments.append(self.path_separator)
+        if path_segment:
+          joined_path_segments.append(path_segment)
+    return ''.join(joined_path_segments)
+
+  def GetPathComponents(self, path):
+    """Breaks the path into a list of component names.
+
+    Does not include the root directory as a component, as all paths
+    are considered relative to the root directory for the FakeFilesystem.
+    Callers should basically follow this pattern:
+
+      file_path = self.NormalizePath(file_path)
+      path_components = self.GetPathComponents(file_path)
+      current_dir = self.root
+      for component in path_components:
+        if component not in current_dir.contents:
+          raise IOError
+        DoStuffWithComponent(curent_dir, component)
+        current_dir = current_dir.GetEntry(component)
+
+    Args:
+      path:  path to tokenize
+
+    Returns:
+      The list of names split from path
+    """
+    if not path or path == self.root.name:
+      return []
+    path_components = path.split(self.path_separator)
+    assert path_components
+    if not path_components[0]:
+      # This is an absolute path.
+      path_components = path_components[1:]
+    return path_components
+
+  def Exists(self, file_path):
+    """True if a path points to an existing file system object.
+
+    Args:
+      file_path:  path to examine
+
+    Returns:
+      bool(if object exists)
+
+    Raises:
+      TypeError: if file_path is None
+    """
+    if file_path is None:
+      raise TypeError
+    if not file_path:
+      return False
+    try:
+      file_path = self.ResolvePath(file_path)
+    except IOError:
+      return False
+    if file_path == self.root.name:
+      return True
+    path_components = self.GetPathComponents(file_path)
+    current_dir = self.root
+    for component in path_components:
+      if component not in current_dir.contents:
+        return False
+      current_dir = current_dir.contents[component]
+    return True
+
+  def ResolvePath(self, file_path):
+    """Follow a path, resolving symlinks.
+
+    ResolvePath traverses the filesystem along the specified file path,
+    resolving file names and symbolic links until all elements of the path are
+    exhausted, or we reach a file which does not exist.  If all the elements
+    are not consumed, they just get appended to the path resolved so far.
+    This gives us the path which is as resolved as it can be, even if the file
+    does not exist.
+
+    This behavior mimics Unix semantics, and is best shown by example.  Given a
+    file system that looks like this:
+
+          /a/b/
+          /a/b/c -> /a/b2          c is a symlink to /a/b2
+          /a/b2/x
+          /a/c   -> ../d
+          /a/x   -> y
+     Then:
+          /a/b/x      =>  /a/b/x
+          /a/c        =>  /a/d
+          /a/x        =>  /a/y
+          /a/b/c/d/e  =>  /a/b2/d/e
+
+    Args:
+      file_path:  path to examine
+
+    Returns:
+      resolved_path (string) or None
+
+    Raises:
+      TypeError: if file_path is None
+      IOError: if file_path is '' or a part of the path doesn't exist
+    """
+
+    def _ComponentsToPath(component_folders):
+      return '%s%s' % (self.path_separator,
+                       self.path_separator.join(component_folders))
+
+    def _ValidRelativePath(file_path):
+      while file_path and '/..' in file_path:
+        file_path = file_path[:file_path.rfind('/..')]
+        if not self.Exists(self.NormalizePath(file_path)):
+          return False
+      return True
+
+    def _FollowLink(link_path_components, link):
+      """Follow a link w.r.t. a path resolved so far.
+
+      The component is either a real file, which is a no-op, or a symlink.
+      In the case of a symlink, we have to modify the path as built up so far
+        /a/b => ../c   should yield /a/../c (which will normalize to /a/c)
+        /a/b => x      should yield /a/x
+        /a/b => /x/y/z should yield /x/y/z
+      The modified path may land us in a new spot which is itself a
+      link, so we may repeat the process.
+
+      Args:
+        link_path_components: The resolved path built up to the link so far.
+        link: The link object itself.
+
+      Returns:
+        (string) the updated path resolved after following the link.
+
+      Raises:
+        IOError: if there are too many levels of symbolic link
+      """
+      link_path = link.contents
+      # For links to absolute paths, we want to throw out everything in the
+      # path built so far and replace with the link.  For relative links, we
+      # have to append the link to what we have so far,
+      if not link_path.startswith(self.path_separator):
+        # Relative path.  Append remainder of path to what we have processed
+        # so far, excluding the name of the link itself.
+        # /a/b => ../c   should yield /a/../c (which will normalize to /c)
+        # /a/b => d should yield a/d
+        components = link_path_components[:-1]
+        components.append(link_path)
+        link_path = self.path_separator.join(components)
+      # Don't call self.NormalizePath(), as we don't want to prepend self.cwd.
+      return self.CollapsePath(link_path)
+
+    if file_path is None:
+      # file.open(None) raises TypeError, so mimic that.
+      raise TypeError('Expected file system path string, received None')
+    if not file_path or not _ValidRelativePath(file_path):
+      # file.open('') raises IOError, so mimic that, and validate that all
+      # parts of a relative path exist.
+      raise IOError(errno.ENOENT,
+                    'No such file or directory: \'%s\'' % file_path)
+    file_path = self.NormalizePath(file_path)
+    if file_path == self.root.name:
+      return file_path
+
+    current_dir = self.root
+    path_components = self.GetPathComponents(file_path)
+
+    resolved_components = []
+    link_depth = 0
+    while path_components:
+      component = path_components.pop(0)
+      resolved_components.append(component)
+      if component not in current_dir.contents:
+        # The component of the path at this point does not actually exist in
+        # the folder.   We can't resolve the path any more.  It is legal to link
+        # to a file that does not yet exist, so rather than raise an error, we
+        # just append the remaining components to what return path we have built
+        # so far and return that.
+        resolved_components.extend(path_components)
+        break
+      current_dir = current_dir.contents[component]
+
+      # Resolve any possible symlinks in the current path component.
+      if stat.S_ISLNK(current_dir.st_mode):
+        # This link_depth check is not really meant to be an accurate check.
+        # It is just a quick hack to prevent us from looping forever on
+        # cycles.
+        link_depth += 1
+        if link_depth > _MAX_LINK_DEPTH:
+          raise IOError(errno.EMLINK,
+                        'Too many levels of symbolic links: \'%s\'' %
+                        _ComponentsToPath(resolved_components))
+        link_path = _FollowLink(resolved_components, current_dir)
+
+        # Following the link might result in the complete replacement of the
+        # current_dir, so we evaluate the entire resulting path.
+        target_components = self.GetPathComponents(link_path)
+        path_components = target_components + path_components
+        resolved_components = []
+        current_dir = self.root
+    return _ComponentsToPath(resolved_components)
+
+  def GetObjectFromNormalizedPath(self, file_path):
+    """Searches for the specified filesystem object within the fake filesystem.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve, with a
+          path that has already been normalized/resolved
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    if file_path == self.root.name:
+      return self.root
+    path_components = self.GetPathComponents(file_path)
+    target_object = self.root
+    try:
+      for component in path_components:
+        if not isinstance(target_object, FakeDirectory):
+          raise IOError(errno.ENOENT,
+                        'No such file or directory in fake filesystem',
+                        file_path)
+        target_object = target_object.GetEntry(component)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in fake filesystem',
+                    file_path)
+    return target_object
+
+  def GetObject(self, file_path):
+    """Searches for the specified filesystem object within the fake filesystem.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    file_path = self.NormalizePath(file_path)
+    return self.GetObjectFromNormalizedPath(file_path)
+
+  def ResolveObject(self, file_path):
+    """Searches for the specified filesystem object, resolving all links.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    return self.GetObjectFromNormalizedPath(self.ResolvePath(file_path))
+
+  def LResolveObject(self, path):
+    """Searches for the specified object, resolving only parent links.
+
+    This is analogous to the stat/lstat difference.  This resolves links *to*
+    the object but not of the final object itself.
+
+    Args:
+      path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    if path == self.root.name:
+      # The root directory will never be a link
+      return self.root
+    parent_directory, child_name = self.SplitPath(path)
+    if not parent_directory:
+      parent_directory = self.cwd
+    try:
+      parent_obj = self.ResolveObject(parent_directory)
+      assert parent_obj
+      if not isinstance(parent_obj, FakeDirectory):
+        raise IOError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      return parent_obj.GetEntry(child_name)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in the fake filesystem',
+                    path)
+
+  def AddObject(self, file_path, file_object):
+    """Add a fake file or directory into the filesystem at file_path.
+
+    Args:
+      file_path: the path to the file to be added relative to self
+      file_object: file or directory to add
+
+    Raises:
+      IOError: if file_path does not correspond to a directory
+    """
+    try:
+      target_directory = self.GetObject(file_path)
+      target_directory.AddEntry(file_object)
+    except AttributeError:
+      raise IOError(errno.ENOTDIR,
+                    'Not a directory in the fake filesystem',
+                    file_path)
+
+  def RemoveObject(self, file_path):
+    """Remove an existing file or directory.
+
+    Args:
+      file_path: the path to the file relative to self
+
+    Raises:
+      IOError: if file_path does not correspond to an existing file, or if part
+        of the path refers to something other than a directory
+      OSError: if the directory is in use (eg, if it is '/')
+    """
+    if file_path == self.root.name:
+      raise OSError(errno.EBUSY, 'Fake device or resource busy',
+                    file_path)
+    try:
+      dirname, basename = self.SplitPath(file_path)
+      target_directory = self.GetObject(dirname)
+      target_directory.RemoveEntry(basename)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in the fake filesystem',
+                    file_path)
+    except AttributeError:
+      raise IOError(errno.ENOTDIR,
+                    'Not a directory in the fake filesystem',
+                    file_path)
+
+  def CreateDirectory(self, directory_path, perm_bits=PERM_DEF, inode=None):
+    """Creates directory_path, and all the parent directories.
+
+    Helper method to set up your test faster
+
+    Args:
+      directory_path:  directory to create
+      perm_bits: permission bits
+      inode: inode of directory
+
+    Returns:
+      the newly created FakeDirectory object
+
+    Raises:
+      OSError:  if the directory already exists
+    """
+    directory_path = self.NormalizePath(directory_path)
+    if self.Exists(directory_path):
+      raise OSError(errno.EEXIST,
+                    'Directory exists in fake filesystem',
+                    directory_path)
+    path_components = self.GetPathComponents(directory_path)
+    current_dir = self.root
+
+    for component in path_components:
+      if component not in current_dir.contents:
+        new_dir = FakeDirectory(component, perm_bits)
+        current_dir.AddEntry(new_dir)
+        current_dir = new_dir
+      else:
+        current_dir = current_dir.contents[component]
+
+    current_dir.SetIno(inode)
+    return current_dir
+
+  def CreateFile(self, file_path, st_mode=stat.S_IFREG | PERM_DEF_FILE,
+                 contents='', st_size=None, create_missing_dirs=True,
+                 apply_umask=False, inode=None):
+    """Creates file_path, including all the parent directories along the way.
+
+    Helper method to set up your test faster.
+
+    Args:
+      file_path: path to the file to create
+      st_mode: the stat.S_IF constant representing the file type
+      contents: the contents of the file
+      st_size: file size; only valid if contents=None
+      create_missing_dirs: if True, auto create missing directories
+      apply_umask: whether or not the current umask must be applied on st_mode
+      inode: inode of the file
+
+    Returns:
+      the newly created FakeFile object
+
+    Raises:
+      IOError: if the file already exists
+      IOError: if the containing directory is required and missing
+    """
+    file_path = self.NormalizePath(file_path)
+    if self.Exists(file_path):
+      raise IOError(errno.EEXIST,
+                    'File already exists in fake filesystem',
+                    file_path)
+    parent_directory, new_file = self.SplitPath(file_path)
+    if not parent_directory:
+      parent_directory = self.cwd
+    if not self.Exists(parent_directory):
+      if not create_missing_dirs:
+        raise IOError(errno.ENOENT, 'No such fake directory', parent_directory)
+      self.CreateDirectory(parent_directory)
+    if apply_umask:
+      st_mode &= ~self.umask
+    file_object = FakeFile(new_file, st_mode, contents)
+    file_object.SetIno(inode)
+    self.AddObject(parent_directory, file_object)
+
+    # set the size if st_size is given
+    if not contents and st_size is not None:
+      try:
+        file_object.SetLargeFileSize(st_size)
+      except IOError:
+        self.RemoveObject(file_path)
+        raise
+
+    return file_object
+
+  def CreateLink(self, file_path, link_target):
+    """Creates the specified symlink, pointed at the specified link target.
+
+    Args:
+      file_path:  path to the symlink to create
+      link_target:  the target of the symlink
+
+    Returns:
+      the newly created FakeFile object
+
+    Raises:
+      IOError:  if the file already exists
+    """
+    resolved_file_path = self.ResolvePath(file_path)
+    return self.CreateFile(resolved_file_path, st_mode=stat.S_IFLNK | PERM_DEF,
+                           contents=link_target)
+
+  def __str__(self):
+    return str(self.root)
+
+
+class FakePathModule(object):
+  """Faked os.path module replacement.
+
+  FakePathModule should *only* be instantiated by FakeOsModule.  See the
+  FakeOsModule docstring for details.
+  """
+  _OS_PATH_COPY = CopyModule(os.path)
+
+  def __init__(self, filesystem, os_module=None):
+    """Init.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      os_module: (deprecated) FakeOsModule to assign to self.os
+    """
+    self.filesystem = filesystem
+    self._os_path = self._OS_PATH_COPY
+    if os_module is None:
+      warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,
+                    stacklevel=2)
+    self._os_path.os = self.os = os_module
+    self.sep = self.filesystem.path_separator
+
+  def exists(self, path):
+    """Determines whether the file object exists within the fake filesystem.
+
+    Args:
+      path:  path to the file object
+
+    Returns:
+      bool (if file exists)
+    """
+    return self.filesystem.Exists(path)
+
+  def lexists(self, path):
+    """Test whether a path exists.  Returns True for broken symbolic links.
+
+    Args:
+      path:  path to the symlnk object
+
+    Returns:
+      bool (if file exists)
+    """
+    return self.exists(path) or self.islink(path)
+
+  def getsize(self, path):
+    """Return the file object size in bytes.
+
+    Args:
+      path:  path to the file object
+
+    Returns:
+      file size in bytes
+    """
+    file_obj = self.filesystem.GetObject(path)
+    return file_obj.st_size
+
+  def _istype(self, path, st_flag):
+    """Helper function to implement isdir(), islink(), etc.
+
+    See the stat(2) man page for valid stat.S_I* flag values
+
+    Args:
+      path:  path to file to stat and test
+      st_flag:  the stat.S_I* flag checked for the file's st_mode
+
+    Returns:
+      boolean (the st_flag is set in path's st_mode)
+
+    Raises:
+      TypeError: if path is None
+    """
+    if path is None:
+      raise TypeError
+    try:
+      obj = self.filesystem.ResolveObject(path)
+      if obj:
+        return stat.S_IFMT(obj.st_mode) == st_flag
+    except IOError:
+      return False
+    return False
+
+  def isabs(self, path):
+    if self.filesystem.path_separator == os.path.sep:
+      # Pass through to os.path.isabs, which on Windows has special
+      # handling for a leading drive letter.
+      return self._os_path.isabs(path)
+    else:
+      return path.startswith(self.filesystem.path_separator)
+
+  def isdir(self, path):
+    """Determines if path identifies a directory."""
+    return self._istype(path, stat.S_IFDIR)
+
+  def isfile(self, path):
+    """Determines if path identifies a regular file."""
+    return self._istype(path, stat.S_IFREG)
+
+  def islink(self, path):
+    """Determines if path identifies a symbolic link.
+
+    Args:
+      path: path to filesystem object.
+
+    Returns:
+      boolean (the st_flag is set in path's st_mode)
+
+    Raises:
+      TypeError: if path is None
+    """
+    if path is None:
+      raise TypeError
+    try:
+      link_obj = self.filesystem.LResolveObject(path)
+      return stat.S_IFMT(link_obj.st_mode) == stat.S_IFLNK
+    except IOError:
+      return False
+    except KeyError:
+      return False
+    return False
+
+  def getmtime(self, path):
+    """Returns the mtime of the file."""
+    try:
+      file_obj = self.filesystem.GetObject(path)
+    except IOError as e:
+      raise OSError(errno.ENOENT, str(e))
+    return file_obj.st_mtime
+
+  def abspath(self, path):
+    """Return the absolute version of a path."""
+    if not self.isabs(path):
+      if sys.version_info < (3, 0) and isinstance(path, unicode):
+        cwd = self.os.getcwdu()
+      else:
+        cwd = self.os.getcwd()
+      path = self.join(cwd, path)
+    return self.normpath(path)
+
+  def join(self, *p):
+    """Returns the completed path with a separator of the parts."""
+    return self.filesystem.JoinPaths(*p)
+
+  def normpath(self, path):
+    """Normalize path, eliminating double slashes, etc."""
+    return self.filesystem.CollapsePath(path)
+
+  if _is_windows:
+
+    def relpath(self, path, start=None):
+      """ntpath.relpath() needs the cwd passed in the start argument."""
+      if start is None:
+        start = self.filesystem.cwd
+      path = self._os_path.relpath(path, start)
+      return path.replace(self._os_path.sep, self.filesystem.path_separator)
+
+    realpath = abspath
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to os.path."""
+    return self._os_path.__dict__[name]
+
+
+class FakeOsModule(object):
+  """Uses FakeFilesystem to provide a fake os module replacement.
+
+  Do not create os.path separately from os, as there is a necessary circular
+  dependency between os and os.path to replicate the behavior of the standard
+  Python modules.  What you want to do is to just let FakeOsModule take care of
+  os.path setup itself.
+
+  # You always want to do this.
+  filesystem = fake_filesystem.FakeFilesystem()
+  my_os_module = fake_filesystem.FakeOsModule(filesystem)
+  """
+
+  def __init__(self, filesystem, os_path_module=None):
+    """Also exposes self.path (to fake os.path).
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      os_path_module: (deprecated) optional FakePathModule instance
+    """
+    self.filesystem = filesystem
+    self.sep = filesystem.path_separator
+    self._os_module = os
+    if os_path_module is None:
+      self.path = FakePathModule(self.filesystem, self)
+    else:
+      warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,
+                    stacklevel=2)
+      self.path = os_path_module
+    if sys.version_info < (3, 0):
+      self.fdopen = self._fdopen_ver2
+    else:
+      self.fdopen = self._fdopen
+
+  def _fdopen(self, *args, **kwargs):
+    """Redirector to open() builtin function.
+
+    Args:
+      *args: pass through args
+      **kwargs: pass through kwargs
+
+    Returns:
+      File object corresponding to file_des.
+
+    Raises:
+      TypeError: if file descriptor is not an integer.
+    """
+    if not isinstance(args[0], int):
+      raise TypeError('an integer is required')
+    return FakeFileOpen(self.filesystem)(*args, **kwargs)
+
+  def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
+    """Returns an open file object connected to the file descriptor file_des.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      mode: additional file flags. Currently checks to see if the mode matches
+        the mode of the requested file object.
+      bufsize: ignored. (Used for signature compliance with __builtin__.fdopen)
+
+    Returns:
+      File object corresponding to file_des.
+
+    Raises:
+      OSError: if bad file descriptor or incompatible mode is given.
+      TypeError: if file descriptor is not an integer.
+    """
+    if not isinstance(file_des, int):
+      raise TypeError('an integer is required')
+
+    try:
+      return FakeFileOpen(self.filesystem).Call(file_des, mode=mode)
+    except IOError as e:
+      raise OSError(e)
+
+  def open(self, file_path, flags, mode=None):
+    """Returns the file descriptor for a FakeFile.
+
+    WARNING: This implementation only implements creating a file. Please fill
+    out the remainder for your needs.
+
+    Args:
+      file_path: the path to the file
+      flags: low-level bits to indicate io operation
+      mode: bits to define default permissions
+
+    Returns:
+      A file descriptor.
+
+    Raises:
+      OSError: if the path cannot be found
+      ValueError: if invalid mode is given
+      NotImplementedError: if an unsupported flag is passed in
+    """
+    if flags & os.O_CREAT:
+      fake_file = FakeFileOpen(self.filesystem)(file_path, 'w')
+      if mode:
+        self.chmod(file_path, mode)
+      return fake_file.fileno()
+    else:
+      raise NotImplementedError('FakeOsModule.open')
+
+  def close(self, file_des):
+    """Closes a file descriptor.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    fh.close()
+
+  def read(self, file_des, num_bytes):
+    """Reads number of bytes from a file descriptor, returns bytes read.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      num_bytes: Number of bytes to read from file.
+
+    Returns:
+      Bytes read from file.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    return fh.read(num_bytes)
+
+  def write(self, file_des, contents):
+    """Writes string to file descriptor, returns number of bytes written.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      contents: String of bytes to write to file.
+
+    Returns:
+      Number of bytes written.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    fh.write(contents)
+    fh.flush()
+    return len(contents)
+
+  def fstat(self, file_des):
+    """Returns the os.stat-like tuple for the FakeFile object of file_des.
+
+    Args:
+      file_des:  file descriptor of filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    stats = self.filesystem.GetOpenFile(file_des).GetObject()
+    st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                             stats.st_nlink, stats.st_uid, stats.st_gid,
+                             stats.st_size, stats.st_atime,
+                             stats.st_mtime, stats.st_ctime))
+    return st_obj
+
+  def _ConfirmDir(self, target_directory):
+    """Tests that the target is actually a directory, raising OSError if not.
+
+    Args:
+      target_directory:  path to the target directory within the fake
+        filesystem
+
+    Returns:
+      the FakeFile object corresponding to target_directory
+
+    Raises:
+      OSError:  if the target is not a directory
+    """
+    try:
+      directory = self.filesystem.GetObject(target_directory)
+    except IOError as e:
+      raise OSError(e.errno, e.strerror, target_directory)
+    if not directory.st_mode & stat.S_IFDIR:
+      raise OSError(errno.ENOTDIR,
+                    'Fake os module: not a directory',
+                    target_directory)
+    return directory
+
+  def umask(self, new_mask):
+    """Change the current umask.
+
+    Args:
+      new_mask: An integer.
+
+    Returns:
+      The old mask.
+
+    Raises:
+      TypeError: new_mask is of an invalid type.
+    """
+    if not isinstance(new_mask, int):
+      raise TypeError('an integer is required')
+    old_umask = self.filesystem.umask
+    self.filesystem.umask = new_mask
+    return old_umask
+
+  def chdir(self, target_directory):
+    """Change current working directory to target directory.
+
+    Args:
+      target_directory:  path to new current working directory
+
+    Raises:
+      OSError: if user lacks permission to enter the argument directory or if
+               the target is not a directory
+    """
+    target_directory = self.filesystem.ResolvePath(target_directory)
+    self._ConfirmDir(target_directory)
+    directory = self.filesystem.GetObject(target_directory)
+    # A full implementation would check permissions all the way up the tree.
+    if not directory.st_mode | PERM_EXE:
+      raise OSError(errno.EACCES, 'Fake os module: permission denied',
+                    directory)
+    self.filesystem.cwd = target_directory
+
+  def getcwd(self):
+    """Return current working directory."""
+    return self.filesystem.cwd
+
+  def getcwdu(self):
+    """Return current working directory. Deprecated in Python 3."""
+    if sys.version_info >= (3, 0):
+      raise AttributeError('no attribute getcwdu')
+    return unicode(self.filesystem.cwd)
+
+  def listdir(self, target_directory):
+    """Returns a sorted list of filenames in target_directory.
+
+    Args:
+      target_directory:  path to the target directory within the fake
+        filesystem
+
+    Returns:
+      a sorted list of file names within the target directory
+
+    Raises:
+      OSError:  if the target is not a directory
+    """
+    target_directory = self.filesystem.ResolvePath(target_directory)
+    directory = self._ConfirmDir(target_directory)
+    return sorted(directory.contents)
+
+  def _ClassifyDirectoryContents(self, root):
+    """Classify contents of a directory as files/directories.
+
+    Args:
+      root: (str) Directory to examine.
+
+    Returns:
+      (tuple) A tuple consisting of three values: the directory examined, a
+      list containing all of the directory entries, and a list containing all
+      of the non-directory entries.  (This is the same format as returned by
+      the os.walk generator.)
+
+    Raises:
+      Nothing on its own, but be ready to catch exceptions generated by
+      underlying mechanisms like os.listdir.
+    """
+    dirs = []
+    files = []
+    for entry in self.listdir(root):
+      if self.path.isdir(self.path.join(root, entry)):
+        dirs.append(entry)
+      else:
+        files.append(entry)
+    return (root, dirs, files)
+
+  def walk(self, top, topdown=True, onerror=None):
+    """Performs an os.walk operation over the fake filesystem.
+
+    Args:
+      top:  root directory from which to begin walk
+      topdown:  determines whether to return the tuples with the root as the
+        first entry (True) or as the last, after all the child directory
+        tuples (False)
+      onerror:  if not None, function which will be called to handle the
+        os.error instance provided when os.listdir() fails
+
+    Yields:
+      (path, directories, nondirectories) for top and each of its
+      subdirectories.  See the documentation for the builtin os module for
+      further details.
+    """
+    top = self.path.normpath(top)
+    try:
+      top_contents = self._ClassifyDirectoryContents(top)
+    except OSError as e:
+      top_contents = None
+      if onerror is not None:
+        onerror(e)
+
+    if top_contents is not None:
+      if topdown:
+        yield top_contents
+
+      for directory in top_contents[1]:
+        for contents in self.walk(self.path.join(top, directory),
+                                  topdown=topdown, onerror=onerror):
+          yield contents
+
+      if not topdown:
+        yield top_contents
+
+  def readlink(self, path):
+    """Reads the target of a symlink.
+
+    Args:
+      path:  symlink to read the target of
+
+    Returns:
+      the string representing the path to which the symbolic link points.
+
+    Raises:
+      TypeError: if path is None
+      OSError: (with errno=ENOENT) if path is not a valid path, or
+               (with errno=EINVAL) if path is valid, but is not a symlink
+    """
+    if path is None:
+      raise TypeError
+    try:
+      link_obj = self.filesystem.LResolveObject(path)
+    except IOError:
+      raise OSError(errno.ENOENT, 'Fake os module: path does not exist', path)
+    if stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK:
+      raise OSError(errno.EINVAL, 'Fake os module: not a symlink', path)
+    return link_obj.contents
+
+  def stat(self, entry_path):
+    """Returns the os.stat-like tuple for the FakeFile object of entry_path.
+
+    Args:
+      entry_path:  path to filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    try:
+      stats = self.filesystem.ResolveObject(entry_path)
+      st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                               stats.st_nlink, stats.st_uid, stats.st_gid,
+                               stats.st_size, stats.st_atime,
+                               stats.st_mtime, stats.st_ctime))
+      return st_obj
+    except IOError as io_error:
+      raise OSError(io_error.errno, io_error.strerror, entry_path)
+
+  def lstat(self, entry_path):
+    """Returns the os.stat-like tuple for entry_path, not following symlinks.
+
+    Args:
+      entry_path:  path to filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    try:
+      stats = self.filesystem.LResolveObject(entry_path)
+      st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                               stats.st_nlink, stats.st_uid, stats.st_gid,
+                               stats.st_size, stats.st_atime,
+                               stats.st_mtime, stats.st_ctime))
+      return st_obj
+    except IOError as io_error:
+      raise OSError(io_error.errno, io_error.strerror, entry_path)
+
+  def remove(self, path):
+    """Removes the FakeFile object representing the specified file."""
+    path = self.filesystem.NormalizePath(path)
+    if self.path.isdir(path) and not self.path.islink(path):
+      raise OSError(errno.EISDIR, "Is a directory: '%s'" % path)
+    try:
+      self.filesystem.RemoveObject(path)
+    except IOError as e:
+      raise OSError(e.errno, e.strerror, e.filename)
+
+  # As per the documentation unlink = remove.
+  unlink = remove
+
+  def rename(self, old_file, new_file):
+    """Adds a FakeFile object at new_file containing contents of old_file.
+
+    Also removes the FakeFile object for old_file, and replaces existing
+    new_file object, if one existed.
+
+    Args:
+      old_file:  path to filesystem object to rename
+      new_file:  path to where the filesystem object will live after this call
+
+    Raises:
+      OSError:  if old_file does not exist.
+      IOError:  if dirname(new_file) does not exist
+    """
+    old_file = self.filesystem.NormalizePath(old_file)
+    new_file = self.filesystem.NormalizePath(new_file)
+    if not self.filesystem.Exists(old_file):
+      raise OSError(errno.ENOENT,
+                    'Fake os object: can not rename nonexistent file '
+                    'with name',
+                    old_file)
+    if self.filesystem.Exists(new_file):
+      if old_file == new_file:
+        return None  # Nothing to do here.
+      else:
+        self.remove(new_file)
+    old_dir, old_name = self.path.split(old_file)
+    new_dir, new_name = self.path.split(new_file)
+    if not self.filesystem.Exists(new_dir):
+      raise IOError(errno.ENOENT, 'No such fake directory', new_dir)
+    old_dir_object = self.filesystem.ResolveObject(old_dir)
+    old_object = old_dir_object.GetEntry(old_name)
+    old_object_mtime = old_object.st_mtime
+    new_dir_object = self.filesystem.ResolveObject(new_dir)
+    if old_object.st_mode & stat.S_IFDIR:
+      old_object.name = new_name
+      new_dir_object.AddEntry(old_object)
+      old_dir_object.RemoveEntry(old_name)
+    else:
+      self.filesystem.CreateFile(new_file,
+                                 st_mode=old_object.st_mode,
+                                 contents=old_object.contents,
+                                 create_missing_dirs=False)
+      self.remove(old_file)
+    new_object = self.filesystem.GetObject(new_file)
+    new_object.SetMTime(old_object_mtime)
+    self.chown(new_file, old_object.st_uid, old_object.st_gid)
+
+  def rmdir(self, target_directory):
+    """Remove a leaf Fake directory.
+
+    Args:
+      target_directory: (str) Name of directory to remove.
+
+    Raises:
+      OSError: if target_directory does not exist or is not a directory,
+      or as per FakeFilesystem.RemoveObject. Cannot remove '.'.
+    """
+    if target_directory == '.':
+      raise OSError(errno.EINVAL, 'Invalid argument: \'.\'')
+    target_directory = self.filesystem.NormalizePath(target_directory)
+    if self._ConfirmDir(target_directory):
+      if self.listdir(target_directory):
+        raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty',
+                      target_directory)
+      try:
+        self.filesystem.RemoveObject(target_directory)
+      except IOError as e:
+        raise OSError(e.errno, e.strerror, e.filename)
+
+  def removedirs(self, target_directory):
+    """Remove a leaf Fake directory and all empty intermediate ones."""
+    target_directory = self.filesystem.NormalizePath(target_directory)
+    directory = self._ConfirmDir(target_directory)
+    if directory.contents:
+      raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty',
+                    self.path.basename(target_directory))
+    else:
+      self.rmdir(target_directory)
+    head, tail = self.path.split(target_directory)
+    if not tail:
+      head, tail = self.path.split(head)
+    while head and tail:
+      head_dir = self._ConfirmDir(head)
+      if head_dir.contents:
+        break
+      self.rmdir(head)
+      head, tail = self.path.split(head)
+
+  def mkdir(self, dir_name, mode=PERM_DEF):
+    """Create a leaf Fake directory.
+
+    Args:
+      dir_name: (str) Name of directory to create.  Relative paths are assumed
+        to be relative to '/'.
+      mode: (int) Mode to create directory with.  This argument defaults to
+        0o777.  The umask is applied to this mode.
+
+    Raises:
+      OSError: if the directory name is invalid or parent directory is read only
+      or as per FakeFilesystem.AddObject.
+    """
+    if dir_name.endswith(self.sep):
+      dir_name = dir_name[:-1]
+
+    parent_dir, _ = self.path.split(dir_name)
+    if parent_dir:
+      base_dir = self.path.normpath(parent_dir)
+      if parent_dir.endswith(self.sep + '..'):
+        base_dir, unused_dotdot, _ = parent_dir.partition(self.sep + '..')
+      if not self.filesystem.Exists(base_dir):
+        raise OSError(errno.ENOENT, 'No such fake directory', base_dir)
+
+    dir_name = self.filesystem.NormalizePath(dir_name)
+    if self.filesystem.Exists(dir_name):
+      raise OSError(errno.EEXIST, 'Fake object already exists', dir_name)
+    head, tail = self.path.split(dir_name)
+    directory_object = self.filesystem.GetObject(head)
+    if not directory_object.st_mode & PERM_WRITE:
+      raise OSError(errno.EACCES, 'Permission Denied', dir_name)
+
+    self.filesystem.AddObject(
+        head, FakeDirectory(tail, mode & ~self.filesystem.umask))
+
+  def makedirs(self, dir_name, mode=PERM_DEF):
+    """Create a leaf Fake directory + create any non-existent parent dirs.
+
+    Args:
+      dir_name: (str) Name of directory to create.
+      mode: (int) Mode to create directory (and any necessary parent
+        directories) with. This argument defaults to 0o777.  The umask is
+        applied to this mode.
+
+    Raises:
+      OSError: if the directory already exists or as per
+      FakeFilesystem.CreateDirectory
+    """
+    dir_name = self.filesystem.NormalizePath(dir_name)
+    path_components = self.filesystem.GetPathComponents(dir_name)
+
+    # Raise a permission denied error if the first existing directory is not
+    # writeable.
+    current_dir = self.filesystem.root
+    for component in path_components:
+      if component not in current_dir.contents:
+        if not current_dir.st_mode & PERM_WRITE:
+          raise OSError(errno.EACCES, 'Permission Denied', dir_name)
+        else:
+          break
+      else:
+        current_dir = current_dir.contents[component]
+
+    self.filesystem.CreateDirectory(dir_name, mode & ~self.filesystem.umask)
+
+  def access(self, path, mode):
+    """Check if a file exists and has the specified permissions.
+
+    Args:
+      path: (str) Path to the file.
+      mode: (int) Permissions represented as a bitwise-OR combination of
+          os.F_OK, os.R_OK, os.W_OK, and os.X_OK.
+    Returns:
+      boolean, True if file is accessible, False otherwise
+    """
+    try:
+      st = self.stat(path)
+    except OSError as os_error:
+      if os_error.errno == errno.ENOENT:
+        return False
+      raise
+    return (mode & ((st.st_mode >> 6) & 7)) == mode
+
+  def chmod(self, path, mode):
+    """Change the permissions of a file as encoded in integer mode.
+
+    Args:
+      path: (str) Path to the file.
+      mode: (int) Permissions
+    """
+    try:
+      file_object = self.filesystem.GetObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      raise
+    file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
+                           (mode & PERM_ALL))
+    file_object.st_ctime = int(time.time())
+
+  def utime(self, path, times):
+    """Change the access and modified times of a file.
+
+    Args:
+      path: (str) Path to the file.
+      times: 2-tuple of numbers, of the form (atime, mtime) which is used to set
+          the access and modified times, respectively. If None, file's access
+          and modified times are set to the current time.
+
+    Raises:
+      TypeError: If anything other than integers is specified in passed tuple or
+          number of elements in the tuple is not equal to 2.
+    """
+    try:
+      file_object = self.filesystem.GetObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      raise
+    if times is None:
+      file_object.st_atime = int(time.time())
+      file_object.st_mtime = int(time.time())
+    else:
+      if len(times) != 2:
+        raise TypeError('utime() arg 2 must be a tuple (atime, mtime)')
+      for t in times:
+        if not isinstance(t, (int, float)):
+          raise TypeError('an integer is required')
+
+      file_object.st_atime = times[0]
+      file_object.st_mtime = times[1]
+
+  def chown(self, path, uid, gid):
+    """Set ownership of a faked file.
+
+    Args:
+      path: (str) Path to the file or directory.
+      uid: (int) Numeric uid to set the file or directory to.
+      gid: (int) Numeric gid to set the file or directory to.
+    """
+    try:
+      file_object = self.filesystem.GetObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      raise
+    if uid != -1:
+      file_object.st_uid = uid
+    if gid != -1:
+      file_object.st_gid = gid
+
+  def mknod(self, filename, mode=None, device=None):
+    """Create a filesystem node named 'filename'.
+
+    Does not support device special files or named pipes as the real os
+    module does.
+
+    Args:
+      filename: (str) Name of the file to create
+      mode: (int) permissions to use and type of file to be created.
+        Default permissions are 0o666.  Only the stat.S_IFREG file type
+        is supported by the fake implementation.  The umask is applied
+        to this mode.
+      device: not supported in fake implementation
+
+    Raises:
+      OSError: if called with unsupported options or the file can not be
+      created.
+    """
+    if mode is None:
+      mode = stat.S_IFREG | PERM_DEF_FILE
+    if device or not mode & stat.S_IFREG:
+      raise OSError(errno.EINVAL,
+                    'Fake os mknod implementation only supports '
+                    'regular files.')
+
+    head, tail = self.path.split(filename)
+    if not tail:
+      if self.filesystem.Exists(head):
+        raise OSError(errno.EEXIST, 'Fake filesystem: %s: %s' % (
+            os.strerror(errno.EEXIST), filename))
+      raise OSError(errno.ENOENT, 'Fake filesystem: %s: %s' % (
+          os.strerror(errno.ENOENT), filename))
+    if tail == '.' or tail == '..' or self.filesystem.Exists(filename):
+      raise OSError(errno.EEXIST, 'Fake fileystem: %s: %s' % (
+          os.strerror(errno.EEXIST), filename))
+    try:
+      self.filesystem.AddObject(head, FakeFile(tail,
+                                               mode & ~self.filesystem.umask))
+    except IOError:
+      raise OSError(errno.ENOTDIR, 'Fake filesystem: %s: %s' % (
+          os.strerror(errno.ENOTDIR), filename))
+
+  def symlink(self, link_target, path):
+    """Creates the specified symlink, pointed at the specified link target.
+
+    Args:
+      link_target:  the target of the symlink
+      path:  path to the symlink to create
+
+    Returns:
+      None
+
+    Raises:
+      IOError:  if the file already exists
+    """
+    self.filesystem.CreateLink(path, link_target)
+
+  # pylint: disable-msg=C6002
+  # TODO: Link doesn't behave like os.link, this needs to be fixed properly.
+  link = symlink
+
+  def __getattr__(self, name):
+    """Forwards any unfaked calls to the standard os module."""
+    return getattr(self._os_module, name)
+
+
+class FakeFileOpen(object):
+  """Faked file() and open() function replacements.
+
+  Returns FakeFile objects in a FakeFilesystem in place of the file()
+  or open() function.
+  """
+
+  def __init__(self, filesystem, delete_on_close=False):
+    """init.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      delete_on_close:  optional boolean, deletes file on close()
+    """
+    self.filesystem = filesystem
+    self._delete_on_close = delete_on_close
+
+  def __call__(self, *args, **kwargs):
+    """Redirects calls to file() or open() to appropriate method."""
+    if sys.version_info < (3, 0):
+      return self._call_ver2(*args, **kwargs)
+    else:
+      return self.Call(*args, **kwargs)
+
+  def _call_ver2(self, file_path, mode='r', buffering=-1, flags=None):
+    """Limits args of open() or file() for Python 2.x versions."""
+    # Backwards compatibility, mode arg used to be named flags
+    mode = flags or mode
+    return self.Call(file_path, mode, buffering)
+
+  def Call(self, file_, mode='r', buffering=-1, encoding=None,
+           errors=None, newline=None, closefd=True, opener=None):
+    """Returns a StringIO object with the contents of the target file object.
+
+    Args:
+      file_: path to target file or a file descriptor
+      mode: additional file modes. All r/w/a r+/w+/a+ modes are supported.
+        't', and 'U' are ignored, e.g., 'wU' is treated as 'w'. 'b' sets
+        binary mode, no end of line translations in StringIO.
+      buffering: ignored. (Used for signature compliance with __builtin__.open)
+      encoding: ignored, strings have no encoding
+      errors: ignored, this relates to encoding
+      newline: controls universal newlines, passed to StringIO object
+      closefd: if a file descriptor rather than file name is passed, and set
+        to false, then the file descriptor is kept open when file is closed
+      opener: not supported
+
+    Returns:
+      a StringIO object containing the contents of the target file
+
+    Raises:
+      IOError: if the target object is a directory, the path is invalid or
+        permission is denied.
+    """
+    orig_modes = mode  # Save original mdoes for error messages.
+    # Binary mode for non 3.x or set by mode
+    binary = sys.version_info < (3, 0) or 'b' in mode
+    # Normalize modes. Ignore 't' and 'U'.
+    mode = mode.replace('t', '').replace('b', '')
+    mode = mode.replace('rU', 'r').replace('U', 'r')
+
+    if mode not in _OPEN_MODE_MAP:
+      raise IOError('Invalid mode: %r' % orig_modes)
+
+    must_exist, need_read, need_write, truncate, append = _OPEN_MODE_MAP[mode]
+
+    file_object = None
+    filedes = None
+    # opening a file descriptor
+    if isinstance(file_, int):
+      filedes = file_
+      file_object = self.filesystem.GetOpenFile(filedes).GetObject()
+      file_path = file_object.name
+    else:
+      file_path = file_
+      real_path = self.filesystem.ResolvePath(file_path)
+      if self.filesystem.Exists(file_path):
+        file_object = self.filesystem.GetObjectFromNormalizedPath(real_path)
+      closefd = True
+
+    if file_object:
+      if ((need_read and not file_object.st_mode & PERM_READ) or
+          (need_write and not file_object.st_mode & PERM_WRITE)):
+        raise IOError(errno.EACCES, 'Permission denied', file_path)
+      if need_write:
+        file_object.st_ctime = int(time.time())
+        if truncate:
+          file_object.SetContents('')
+    else:
+      if must_exist:
+        raise IOError(errno.ENOENT, 'No such file or directory', file_path)
+      file_object = self.filesystem.CreateFile(
+          real_path, create_missing_dirs=False, apply_umask=True)
+
+    if file_object.st_mode & stat.S_IFDIR:
+      raise IOError(errno.EISDIR, 'Fake file object: is a directory', file_path)
+
+    class FakeFileWrapper(object):
+      """Wrapper for a StringIO object for use by a FakeFile object.
+
+      If the wrapper has any data written to it, it will propagate to
+      the FakeFile object on close() or flush().
+      """
+      if sys.version_info < (3, 0):
+        _OPERATION_ERROR = IOError
+      else:
+        _OPERATION_ERROR = io.UnsupportedOperation
+
+      def __init__(self, file_object, update=False, read=False, append=False,
+                   delete_on_close=False, filesystem=None, newline=None,
+                   binary=True, closefd=True):
+        self._file_object = file_object
+        self._append = append
+        self._read = read
+        self._update = update
+        self._closefd = closefd
+        self._file_epoch = file_object.epoch
+        contents = file_object.contents
+        newline_arg = {} if binary else {'newline': newline}
+        io_class = io.StringIO
+        # For Python 3, files opened as binary only read/write byte contents.
+        if sys.version_info >= (3, 0) and binary:
+          io_class = io.BytesIO
+          if contents and isinstance(contents, str):
+            contents = bytes(contents, 'ascii')
+        if contents:
+          if update:
+            self._io = io_class(**newline_arg)
+            self._io.write(contents)
+            if not append:
+              self._io.seek(0)
+            else:
+              self._read_whence = 0
+              if read:
+                self._read_seek = 0
+              else:
+                self._read_seek = self._io.tell()
+          else:
+            self._io = io_class(contents, **newline_arg)
+        else:
+          self._io = io_class(**newline_arg)
+          self._read_whence = 0
+          self._read_seek = 0
+        if delete_on_close:
+          assert filesystem, 'delete_on_close=True requires filesystem='
+        self._filesystem = filesystem
+        self._delete_on_close = delete_on_close
+        # override, don't modify FakeFile.name, as FakeFilesystem expects
+        # it to be the file name only, no directories.
+        self.name = file_object.opened_as
+
+      def __enter__(self):
+        """To support usage of this fake file with the 'with' statement."""
+        return self
+
+      def __exit__(self, type, value, traceback):  # pylint: disable-msg=W0622
+        """To support usage of this fake file with the 'with' statement."""
+        self.close()
+
+      def GetObject(self):
+        """Returns FakeFile object that is wrapped by current class."""
+        return self._file_object
+
+      def fileno(self):
+        """Returns file descriptor of file object."""
+        return self.filedes
+
+      def close(self):
+        """File close."""
+        if self._update:
+          self._file_object.SetContents(self._io.getvalue())
+        if self._closefd:
+          self._filesystem.CloseOpenFile(self)
+        if self._delete_on_close:
+          self._filesystem.RemoveObject(self.name)
+
+      def flush(self):
+        """Flush file contents to 'disk'."""
+        if self._update:
+          self._file_object.SetContents(self._io.getvalue())
+          self._file_epoch = self._file_object.epoch
+
+      def seek(self, offset, whence=0):
+        """Move read/write pointer in 'file'."""
+        if not self._append:
+          self._io.seek(offset, whence)
+        else:
+          self._read_seek = offset
+          self._read_whence = whence
+
+      def tell(self):
+        """Return the file's current position.
+
+        Returns:
+          int, file's current position in bytes.
+        """
+        if not self._append:
+          return self._io.tell()
+        if self._read_whence:
+          write_seek = self._io.tell()
+          self._io.seek(self._read_seek, self._read_whence)
+          self._read_seek = self._io.tell()
+          self._read_whence = 0
+          self._io.seek(write_seek)
+        return self._read_seek
+
+      def _UpdateStringIO(self):
+        """Updates the StringIO with changes to the file object contents."""
+        if self._file_epoch == self._file_object.epoch:
+          return
+        whence = self._io.tell()
+        self._io.seek(0)
+        self._io.truncate()
+        self._io.write(self._file_object.contents)
+        self._io.seek(whence)
+        self._file_epoch = self._file_object.epoch
+
+      def _ReadWrappers(self, name):
+        """Wrap a StringIO attribute in a read wrapper.
+
+        Returns a read_wrapper which tracks our own read pointer since the
+        StringIO object has no concept of a different read and write pointer.
+
+        Args:
+          name: the name StringIO attribute to wrap.  Should be a read call.
+
+        Returns:
+          either a read_error or read_wrapper function.
+        """
+        io_attr = getattr(self._io, name)
+
+        def read_wrapper(*args, **kwargs):
+          """Wrap all read calls to the StringIO Object.
+
+          We do this to track the read pointer separate from the write
+          pointer.  Anything that wants to read from the StringIO object
+          while we're in append mode goes through this.
+
+          Args:
+            *args: pass through args
+            **kwargs: pass through kwargs
+          Returns:
+            Wrapped StringIO object method
+          """
+          self._io.seek(self._read_seek, self._read_whence)
+          ret_value = io_attr(*args, **kwargs)
+          self._read_seek = self._io.tell()
+          self._read_whence = 0
+          self._io.seek(0, 2)
+          return ret_value
+        return read_wrapper
+
+      def _OtherWrapper(self, name):
+        """Wrap a StringIO attribute in an other_wrapper.
+
+        Args:
+          name: the name of the StringIO attribute to wrap.
+
+        Returns:
+          other_wrapper which is described below.
+        """
+        io_attr = getattr(self._io, name)
+
+        def other_wrapper(*args, **kwargs):
+          """Wrap all other calls to the StringIO Object.
+
+          We do this to track changes to the write pointer.  Anything that
+          moves the write pointer in a file open for appending should move
+          the read pointer as well.
+
+          Args:
+            *args: pass through args
+            **kwargs: pass through kwargs
+          Returns:
+            Wrapped StringIO object method
+          """
+          write_seek = self._io.tell()
+          ret_value = io_attr(*args, **kwargs)
+          if write_seek != self._io.tell():
+            self._read_seek = self._io.tell()
+            self._read_whence = 0
+            self._file_object.st_size += (self._read_seek - write_seek)
+          return ret_value
+        return other_wrapper
+
+      def Size(self):
+        return self._file_object.st_size
+
+      def __getattr__(self, name):
+        if self._file_object.IsLargeFile():
+          raise FakeLargeFileIoException(file_path)
+
+        # errors on called method vs. open mode
+        if not self._read and name.startswith('read'):
+          def read_error(*args, **kwargs):
+            """Throw an error unless the argument is zero."""
+            if args and args[0] == 0:
+              return ''
+            raise self._OPERATION_ERROR('File is not open for reading.')
+          return read_error
+        if not self._update and (name.startswith('write')
+                                 or name == 'truncate'):
+          def write_error(*args, **kwargs):
+            """Throw an error."""
+            raise self._OPERATION_ERROR('File is not open for writing.')
+          return write_error
+
+        if name.startswith('read'):
+          self._UpdateStringIO()
+        if self._append:
+          if name.startswith('read'):
+            return self._ReadWrappers(name)
+          else:
+            return self._OtherWrapper(name)
+        return getattr(self._io, name)
+
+      def __iter__(self):
+        if not self._read:
+          raise self._OPERATION_ERROR('File is not open for reading')
+        return self._io.__iter__()
+
+    # if you print obj.name, the argument to open() must be printed. Not the
+    # abspath, not the filename, but the actual argument.
+    file_object.opened_as = file_path
+
+    fakefile = FakeFileWrapper(file_object,
+                               update=need_write,
+                               read=need_read,
+                               append=append,
+                               delete_on_close=self._delete_on_close,
+                               filesystem=self.filesystem,
+                               newline=newline,
+                               binary=binary,
+                               closefd=closefd)
+    if filedes is not None:
+      fakefile.filedes = filedes
+    else:
+      fakefile.filedes = self.filesystem.AddOpenFile(fakefile)
+    return fakefile
+ 
+
+def _RunDoctest():
+  # pylint: disable-msg=C6204
+  import doctest
+  import fake_filesystem  # pylint: disable-msg=W0406
+  return doctest.testmod(fake_filesystem)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py
new file mode 100755
index 0000000..db387df
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py
@@ -0,0 +1,120 @@
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A fake glob module implementation that uses fake_filesystem for unit tests.
+
+Includes:
+  FakeGlob: Uses a FakeFilesystem to provide a fake replacement for the
+    glob module.
+
+Usage:
+>>> import fake_filesystem
+>>> import fake_filesystem_glob
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> glob_module = fake_filesystem_glob.FakeGlobModule(filesystem)
+
+>>> file = filesystem.CreateFile('new-file')
+>>> glob_module.glob('*')
+['new-file']
+>>> glob_module.glob('???-file')
+['new-file']
+"""
+
+import fnmatch
+import glob
+import os
+
+import fake_filesystem
+
+
+class FakeGlobModule(object):
+  """Uses a FakeFilesystem to provide a fake replacement for glob module."""
+
+  def __init__(self, filesystem):
+    """Construct fake glob module using the fake filesystem.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+    """
+    self._glob_module = glob
+    self._os_module = fake_filesystem.FakeOsModule(filesystem)
+    self._path_module = self._os_module.path
+
+  def glob(self, pathname):  # pylint: disable-msg=C6409
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain shell-style wildcards a la fnmatch.
+
+    Args:
+      pathname: the pattern with which to find a list of paths
+
+    Returns:
+      List of strings matching the glob pattern.
+    """
+    if not self.has_magic(pathname):
+      if self._path_module.exists(pathname):
+        return [pathname]
+      else:
+        return []
+
+    dirname, basename = self._path_module.split(pathname)
+
+    if not dirname:
+      return self.glob1(self._path_module.curdir, basename)
+    elif self.has_magic(dirname):
+      path_list = self.glob(dirname)
+    else:
+      path_list = [dirname]
+
+    if not self.has_magic(basename):
+      result = []
+      for dirname in path_list:
+        if basename or self._path_module.isdir(dirname):
+          name = self._path_module.join(dirname, basename)
+          if self._path_module.exists(name):
+            result.append(name)
+    else:
+      result = []
+      for dirname in path_list:
+        sublist = self.glob1(dirname, basename)
+        for name in sublist:
+          result.append(self._path_module.join(dirname, name))
+
+    return result
+
+  def glob1(self, dirname, pattern):  # pylint: disable-msg=C6409
+    if not dirname:
+      dirname = self._path_module.curdir
+    try:
+      names = self._os_module.listdir(dirname)
+    except os.error:
+      return []
+    if pattern[0] != '.':
+      names = filter(lambda x: x[0] != '.', names)
+    return fnmatch.filter(names, pattern)
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to the standard glob module."""
+    return getattr(self._glob_module, name)
+
+
+def _RunDoctest():
+  # pylint: disable-msg=C6111,C6204,W0406
+  import doctest
+  import fake_filesystem_glob
+  return doctest.testmod(fake_filesystem_glob)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob_test.py
new file mode 100755
index 0000000..b08f982
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_glob_test.py
@@ -0,0 +1,82 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test for fake_filesystem_glob."""
+
+import doctest
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+import fake_filesystem
+import fake_filesystem_glob
+
+
+class FakeGlobUnitTest(unittest.TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.glob = fake_filesystem_glob.FakeGlobModule(self.filesystem)
+    directory = './xyzzy'
+    self.filesystem.CreateDirectory(directory)
+    self.filesystem.CreateDirectory('%s/subdir' % directory)
+    self.filesystem.CreateDirectory('%s/subdir2' % directory)
+    self.filesystem.CreateFile('%s/subfile' % directory)
+    self.filesystem.CreateFile('[Temp]')
+
+  def testGlobEmpty(self):
+    self.assertEqual(self.glob.glob(''), [])
+
+  def testGlobStar(self):
+    self.assertEqual(['/xyzzy/subdir', '/xyzzy/subdir2', '/xyzzy/subfile'],
+                     self.glob.glob('/xyzzy/*'))
+
+  def testGlobExact(self):
+    self.assertEqual(['/xyzzy'], self.glob.glob('/xyzzy'))
+    self.assertEqual(['/xyzzy/subfile'], self.glob.glob('/xyzzy/subfile'))
+
+  def testGlobQuestion(self):
+    self.assertEqual(['/xyzzy/subdir', '/xyzzy/subdir2', '/xyzzy/subfile'],
+                     self.glob.glob('/x?zz?/*'))
+
+  def testGlobNoMagic(self):
+    self.assertEqual(['/xyzzy'], self.glob.glob('/xyzzy'))
+    self.assertEqual(['/xyzzy/subdir'], self.glob.glob('/xyzzy/subdir'))
+
+  def testNonExistentPath(self):
+    self.assertEqual([], self.glob.glob('nonexistent'))
+
+  def testDocTest(self):
+    self.assertFalse(doctest.testmod(fake_filesystem_glob)[0])
+
+  def testMagicDir(self):
+    self.assertEqual(['/[Temp]'], self.glob.glob('/*emp*'))
+
+  def testRootGlob(self):
+    self.assertEqual(['[Temp]', 'xyzzy'], self.glob.glob('*'))
+
+  def testGlob1(self):
+    self.assertEqual(['[Temp]'], self.glob.glob1('/', '*Tem*'))
+
+  def testHasMagic(self):
+    self.assertTrue(self.glob.has_magic('['))
+    self.assertFalse(self.glob.has_magic('a'))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py
new file mode 100755
index 0000000..87aff44
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable-msg=W0612,W0613,C6409
+
+"""A fake shutil module implementation that uses fake_filesystem for unit tests.
+
+Includes:
+  FakeShutil: Uses a FakeFilesystem to provide a fake replacement for the
+    shutil module.
+
+Usage:
+>>> import fake_filesystem
+>>> import fake_filesystem_shutil
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> shutil_module = fake_filesystem_shutil.FakeShutilModule(filesystem)
+
+Copy a fake_filesystem directory tree:
+>>> new_file = filesystem.CreateFile('/src/new-file')
+>>> shutil_module.copytree('/src', '/dst')
+>>> filesystem.Exists('/dst/new-file')
+True
+
+Remove a fake_filesystem directory tree:
+>>> shutil_module.rmtree('/src')
+>>> filesystem.Exists('/src/new-file')
+False
+"""
+
+import errno
+import os
+import shutil
+import stat
+
+__pychecker__ = 'no-reimportself'
+
+_PERM_WRITE = 0o200  # Write permission bit.
+_PERM_READ = 0o400   # Read permission bit.
+_PERM_ALL = 0o7777   # All permission bits.
+
+
+class FakeShutilModule(object):
+  """Uses a FakeFilesystem to provide a fake replacement for shutil module."""
+
+  def __init__(self, filesystem):
+    """Construct fake shutil module using the fake filesystem.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+    """
+    self.filesystem = filesystem
+    self._shutil_module = shutil
+
+  def rmtree(self, path, ignore_errors=False, onerror=None):
+    """Remove a directory and all its contents.
+
+    Args:
+      path: (str) Directory tree to remove.
+      ignore_errors: (bool) unimplemented
+      onerror: (func) unimplemented
+    """
+    self.filesystem.RemoveObject(path)
+
+  def copy(self, src, dst):
+    """Copy data and mode bits ("cp src dst").
+
+    Args:
+      src: (str) source file
+      dst: (str) destination, may be a directory
+    """
+    if self.filesystem.Exists(dst):
+      if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
+        dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
+    self.copyfile(src, dst)
+    src_object = self.filesystem.GetObject(src)
+    dst_object = self.filesystem.GetObject(dst)
+    dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
+                          (src_object.st_mode & _PERM_ALL))
+
+  def copyfile(self, src, dst):
+    """Copy data from src to dst.
+
+    Args:
+      src: (str) source file
+      dst: (dst) destination file
+
+    Raises:
+      IOError: if the file can't be copied
+      shutil.Error: if the src and dst files are the same
+    """
+    src_file_object = self.filesystem.GetObject(src)
+    if not src_file_object.st_mode & _PERM_READ:
+      raise IOError(errno.EACCES, 'Permission denied', src)
+    if stat.S_ISDIR(src_file_object.st_mode):
+      raise IOError(errno.EISDIR, 'Is a directory', src)
+
+    dst_dir = os.path.dirname(dst)
+    if dst_dir:
+      if not self.filesystem.Exists(dst_dir):
+        raise IOError(errno.ENOTDIR, 'Not a directory', dst)
+      dst_dir_object = self.filesystem.GetObject(dst_dir)
+      if not dst_dir_object.st_mode & _PERM_WRITE:
+        raise IOError(errno.EACCES, 'Permission denied', dst_dir)
+
+    abspath_src = self.filesystem.NormalizePath(
+        self.filesystem.ResolvePath(src))
+    abspath_dst = self.filesystem.NormalizePath(
+        self.filesystem.ResolvePath(dst))
+    if abspath_src == abspath_dst:
+      raise shutil.Error('`%s` and `%s` are the same file' % (src, dst))
+
+    if self.filesystem.Exists(dst):
+      dst_file_object = self.filesystem.GetObject(dst)
+      if stat.S_ISDIR(dst_file_object.st_mode):
+        raise IOError(errno.EISDIR, 'Is a directory', dst)
+      if not dst_file_object.st_mode & _PERM_WRITE:
+        raise IOError(errno.EACCES, 'Permission denied', dst)
+      dst_file_object.SetContents(src_file_object.contents)
+
+    else:
+      self.filesystem.CreateFile(dst, contents=src_file_object.contents)
+
+  def copystat(self, src, dst):
+    """Copy all stat info (mode bits, atime, and mtime) from src to dst.
+
+    Args:
+      src: (str) source file
+      dst: (str) destination file
+    """
+    src_object = self.filesystem.GetObject(src)
+    dst_object = self.filesystem.GetObject(dst)
+    dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
+                          (src_object.st_mode & _PERM_ALL))
+    dst_object.st_uid = src_object.st_uid
+    dst_object.st_gid = src_object.st_gid
+    dst_object.st_atime = src_object.st_atime
+    dst_object.st_mtime = src_object.st_mtime
+
+  def copy2(self, src, dst):
+    """Copy data and all stat info ("cp -p src dst").
+
+    Args:
+      src: (str) source file
+      dst: (str) destination, may be a directory
+    """
+    if self.filesystem.Exists(dst):
+      if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
+        dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
+    self.copyfile(src, dst)
+    self.copystat(src, dst)
+
+  def copytree(self, src, dst, symlinks=False):
+    """Recursively copy a directory tree.
+
+    Args:
+      src: (str) source directory
+      dst: (str) destination directory, must not already exist
+      symlinks: (bool) copy symlinks as symlinks instead of copying the
+                contents of the linked files. Currently unused.
+
+    Raises:
+      OSError: if src is missing or isn't a directory
+    """
+    self.filesystem.CreateDirectory(dst)
+    try:
+      directory = self.filesystem.GetObject(src)
+    except IOError as e:
+      raise OSError(e.errno, e.message)
+    if not stat.S_ISDIR(directory.st_mode):
+      raise OSError(errno.ENOTDIR,
+                    'Fake os module: %r not a directory' % src)
+    for name in directory.contents:
+      srcname = self.filesystem.JoinPaths(src, name)
+      dstname = self.filesystem.JoinPaths(dst, name)
+      src_mode = self.filesystem.GetObject(srcname).st_mode
+      if stat.S_ISDIR(src_mode):
+        self.copytree(srcname, dstname, symlinks)
+      else:
+        self.copy2(srcname, dstname)
+
+  def move(self, src, dst):
+    """Rename a file or directory.
+
+    Args:
+      src: (str) source file or directory
+      dst: (str) if the src is a directory, the dst must not already exist
+    """
+    if stat.S_ISDIR(self.filesystem.GetObject(src).st_mode):
+      self.copytree(src, dst, symlinks=True)
+    else:
+      self.copy2(src, dst)
+    self.filesystem.RemoveObject(src)
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to the standard shutil module."""
+    return getattr(self._shutil_module, name)
+
+
+def _RunDoctest():
+  # pylint: disable-msg=C6111,C6204,W0406
+  import doctest
+  import fake_filesystem_shutil
+  return doctest.testmod(fake_filesystem_shutil)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil_test.py
new file mode 100755
index 0000000..4f26518
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil_test.py
@@ -0,0 +1,305 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for fake_filesystem_shutil."""
+
+import stat
+import time
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+import fake_filesystem
+import fake_filesystem_shutil
+
+
+class FakeShutilModuleTest(unittest.TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.shutil = fake_filesystem_shutil.FakeShutilModule(self.filesystem)
+
+  def testRmtree(self):
+    directory = 'xyzzy'
+    self.filesystem.CreateDirectory(directory)
+    self.filesystem.CreateDirectory('%s/subdir' % directory)
+    self.filesystem.CreateFile('%s/subfile' % directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.shutil.rmtree(directory)
+    self.assertFalse(self.filesystem.Exists(directory))
+
+  def testCopy(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_obj = self.filesystem.CreateFile(src_file)
+    src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.copy(src_file, dst_file)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    dst_obj = self.filesystem.GetObject(dst_file)
+    self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
+
+  def testCopyDirectory(self):
+    src_file = 'xyzzy'
+    parent_directory = 'parent'
+    dst_file = '%s/%s' % (parent_directory, src_file)
+    src_obj = self.filesystem.CreateFile(src_file)
+    self.filesystem.CreateDirectory(parent_directory)
+    src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(parent_directory))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.copy(src_file, parent_directory)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    dst_obj = self.filesystem.GetObject(dst_file)
+    self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
+
+  def testCopystat(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_obj = self.filesystem.CreateFile(src_file)
+    dst_obj = self.filesystem.CreateFile(dst_file)
+    src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
+    src_obj.st_uid = 123
+    src_obj.st_gid = 123
+    src_obj.st_atime = time.time()
+    src_obj.st_mtime = time.time()
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.shutil.copystat(src_file, dst_file)
+    self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
+    self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
+    self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
+    self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
+    self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
+
+  def testCopy2(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_obj = self.filesystem.CreateFile(src_file)
+    src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
+    src_obj.st_uid = 123
+    src_obj.st_gid = 123
+    src_obj.st_atime = time.time()
+    src_obj.st_mtime = time.time()
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.copy2(src_file, dst_file)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    dst_obj = self.filesystem.GetObject(dst_file)
+    self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
+    self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
+    self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
+    self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
+    self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
+
+  def testCopy2Directory(self):
+    src_file = 'xyzzy'
+    parent_directory = 'parent'
+    dst_file = '%s/%s' % (parent_directory, src_file)
+    src_obj = self.filesystem.CreateFile(src_file)
+    self.filesystem.CreateDirectory(parent_directory)
+    src_obj.st_mode = ((src_obj.st_mode & ~0o7777) | 0o750)
+    src_obj.st_uid = 123
+    src_obj.st_gid = 123
+    src_obj.st_atime = time.time()
+    src_obj.st_mtime = time.time()
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(parent_directory))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.copy2(src_file, parent_directory)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    dst_obj = self.filesystem.GetObject(dst_file)
+    self.assertEqual(src_obj.st_mode, dst_obj.st_mode)
+    self.assertEqual(src_obj.st_uid, dst_obj.st_uid)
+    self.assertEqual(src_obj.st_gid, dst_obj.st_gid)
+    self.assertEqual(src_obj.st_atime, dst_obj.st_atime)
+    self.assertEqual(src_obj.st_mtime, dst_obj.st_mtime)
+
+  def testCopytree(self):
+    src_directory = 'xyzzy'
+    dst_directory = 'xyzzy_copy'
+    self.filesystem.CreateDirectory(src_directory)
+    self.filesystem.CreateDirectory('%s/subdir' % src_directory)
+    self.filesystem.CreateFile('%s/subfile' % src_directory)
+    self.assertTrue(self.filesystem.Exists(src_directory))
+    self.assertFalse(self.filesystem.Exists(dst_directory))
+    self.shutil.copytree(src_directory, dst_directory)
+    self.assertTrue(self.filesystem.Exists(dst_directory))
+    self.assertTrue(self.filesystem.Exists('%s/subdir' % dst_directory))
+    self.assertTrue(self.filesystem.Exists('%s/subfile' % dst_directory))
+
+  def testCopytreeSrcIsFile(self):
+    src_file = 'xyzzy'
+    dst_directory = 'xyzzy_copy'
+    self.filesystem.CreateFile(src_file)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_directory))
+    self.assertRaises(OSError,
+                      self.shutil.copytree,
+                      src_file,
+                      dst_directory)
+
+  def testMoveFile(self):
+    src_file = 'original_xyzzy'
+    dst_file = 'moved_xyzzy'
+    self.filesystem.CreateFile(src_file)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.move(src_file, dst_file)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.assertFalse(self.filesystem.Exists(src_file))
+
+  def testMoveFileIntoDirectory(self):
+    src_file = 'xyzzy'
+    dst_directory = 'directory'
+    dst_file = '%s/%s' % (dst_directory, src_file)
+    self.filesystem.CreateFile(src_file)
+    self.filesystem.CreateDirectory(dst_directory)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.move(src_file, dst_directory)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.assertFalse(self.filesystem.Exists(src_file))
+
+  def testMoveDirectory(self):
+    src_directory = 'original_xyzzy'
+    dst_directory = 'moved_xyzzy'
+    self.filesystem.CreateDirectory(src_directory)
+    self.filesystem.CreateFile('%s/subfile' % src_directory)
+    self.filesystem.CreateDirectory('%s/subdir' % src_directory)
+    self.assertTrue(self.filesystem.Exists(src_directory))
+    self.assertFalse(self.filesystem.Exists(dst_directory))
+    self.shutil.move(src_directory, dst_directory)
+    self.assertTrue(self.filesystem.Exists(dst_directory))
+    self.assertTrue(self.filesystem.Exists('%s/subfile' % dst_directory))
+    self.assertTrue(self.filesystem.Exists('%s/subdir' % dst_directory))
+    self.assertFalse(self.filesystem.Exists(src_directory))
+
+
+class CopyFileTest(unittest.TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.shutil = fake_filesystem_shutil.FakeShutilModule(self.filesystem)
+
+  def testCommonCase(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    contents = 'contents of file'
+    self.filesystem.CreateFile(src_file, contents=contents)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertFalse(self.filesystem.Exists(dst_file))
+    self.shutil.copyfile(src_file, dst_file)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.assertEqual(contents, self.filesystem.GetObject(dst_file).contents)
+
+  def testRaisesIfSourceAndDestAreTheSameFile(self):
+    src_file = 'xyzzy'
+    dst_file = src_file
+    contents = 'contents of file'
+    self.filesystem.CreateFile(src_file, contents=contents)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertRaises(self.shutil.Error,
+                      self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfDestIsASymlinkToSrc(self):
+    src_file = '/tmp/foo'
+    dst_file = '/tmp/bar'
+    contents = 'contents of file'
+    self.filesystem.CreateFile(src_file, contents=contents)
+    self.filesystem.CreateLink(dst_file, src_file)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertRaises(self.shutil.Error,
+                      self.shutil.copyfile, src_file, dst_file)
+
+  def testSucceedsIfDestExistsAndIsWritable(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_contents = 'contents of source file'
+    dst_contents = 'contents of dest file'
+    self.filesystem.CreateFile(src_file, contents=src_contents)
+    self.filesystem.CreateFile(dst_file, contents=dst_contents)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.shutil.copyfile(src_file, dst_file)
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.assertEqual(src_contents,
+                     self.filesystem.GetObject(dst_file).contents)
+
+  def testRaisesIfDestExistsAndIsNotWritable(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_contents = 'contents of source file'
+    dst_contents = 'contents of dest file'
+    self.filesystem.CreateFile(src_file, contents=src_contents)
+    self.filesystem.CreateFile(dst_file,
+                               st_mode=stat.S_IFREG | 0o400,
+                               contents=dst_contents)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(dst_file))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfDestDirIsNotWritable(self):
+    src_file = 'xyzzy'
+    dst_dir = '/tmp/foo'
+    dst_file = '%s/%s' % (dst_dir, src_file)
+    src_contents = 'contents of source file'
+    self.filesystem.CreateFile(src_file, contents=src_contents)
+    self.filesystem.CreateDirectory(dst_dir, perm_bits=0o555)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(dst_dir))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfSrcDoesntExist(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    self.assertFalse(self.filesystem.Exists(src_file))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfSrcNotReadable(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    src_contents = 'contents of source file'
+    self.filesystem.CreateFile(src_file,
+                               st_mode=stat.S_IFREG | 0o000,
+                               contents=src_contents)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfSrcIsADirectory(self):
+    src_file = 'xyzzy'
+    dst_file = 'xyzzy_copy'
+    self.filesystem.CreateDirectory(src_file)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_file)
+
+  def testRaisesIfDestIsADirectory(self):
+    src_file = 'xyzzy'
+    dst_dir = '/tmp/foo'
+    src_contents = 'contents of source file'
+    self.filesystem.CreateFile(src_file, contents=src_contents)
+    self.filesystem.CreateDirectory(dst_dir)
+    self.assertTrue(self.filesystem.Exists(src_file))
+    self.assertTrue(self.filesystem.Exists(dst_dir))
+    self.assertRaises(IOError, self.shutil.copyfile, src_file, dst_dir)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_test.py
new file mode 100644
index 0000000..54aa256
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_test.py
@@ -0,0 +1,2925 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for fake_filesystem module."""
+
+import errno
+import os
+import re
+import stat
+import sys
+import time
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+import fake_filesystem
+
+
+def _GetDummyTime(start_time, increment):
+  def _DummyTime():
+    _DummyTime._curr_time += increment
+    return _DummyTime._curr_time
+  _DummyTime._curr_time = start_time - increment  # pylint: disable-msg=W0612
+  return _DummyTime
+
+
+class TestCase(unittest.TestCase):
+  is_windows = sys.platform.startswith('win')
+  is_cygwin = sys.platform == 'cygwin'
+
+  def assertModeEqual(self, expected, actual):
+    return self.assertEqual(stat.S_IMODE(expected), stat.S_IMODE(actual))
+
+
+class FakeDirectoryUnitTest(TestCase):
+  def setUp(self):
+    self.orig_time = time.time
+    time.time = _GetDummyTime(10, 1)
+    self.fake_file = fake_filesystem.FakeFile('foobar', contents='dummy_file')
+    self.fake_dir = fake_filesystem.FakeDirectory('somedir')
+
+  def tearDown(self):
+    time.time = self.orig_time
+
+  def testNewFileAndDirectory(self):
+    self.assertTrue(stat.S_IFREG & self.fake_file.st_mode)
+    self.assertTrue(stat.S_IFDIR & self.fake_dir.st_mode)
+    self.assertEqual({}, self.fake_dir.contents)
+    self.assertEqual(10, self.fake_file.st_ctime)
+
+  def testAddEntry(self):
+    self.fake_dir.AddEntry(self.fake_file)
+    self.assertEqual({'foobar': self.fake_file}, self.fake_dir.contents)
+
+  def testGetEntry(self):
+    self.fake_dir.AddEntry(self.fake_file)
+    self.assertEqual(self.fake_file, self.fake_dir.GetEntry('foobar'))
+
+  def testRemoveEntry(self):
+    self.fake_dir.AddEntry(self.fake_file)
+    self.assertEqual(self.fake_file, self.fake_dir.GetEntry('foobar'))
+    self.fake_dir.RemoveEntry('foobar')
+    self.assertRaises(KeyError, self.fake_dir.GetEntry, 'foobar')
+
+  def testShouldThrowIfSetSizeIsNotInteger(self):
+    self.assertRaises(IOError, self.fake_file.SetSize, 0.1)
+
+  def testShouldThrowIfSetSizeIsNegative(self):
+    self.assertRaises(IOError, self.fake_file.SetSize, -1)
+
+  def testProduceEmptyFileIfSetSizeIsZero(self):
+    self.fake_file.SetSize(0)
+    self.assertEqual('', self.fake_file.contents)
+
+  def testSetsContentEmptyIfSetSizeIsZero(self):
+    self.fake_file.SetSize(0)
+    self.assertEqual('', self.fake_file.contents)
+
+  def testTruncateFileIfSizeIsSmallerThanCurrentSize(self):
+    self.fake_file.SetSize(6)
+    self.assertEqual('dummy_', self.fake_file.contents)
+
+  def testLeaveFileUnchangedIfSizeIsEqualToCurrentSize(self):
+    self.fake_file.SetSize(10)
+    self.assertEqual('dummy_file', self.fake_file.contents)
+
+  def testPadsFileContentWithNullBytesIfSizeIsGreaterThanCurrentSize(self):
+    self.fake_file.SetSize(13)
+    self.assertEqual('dummy_file\0\0\0', self.fake_file.contents)
+
+  def testSetMTime(self):
+    self.assertEqual(10, self.fake_file.st_mtime)
+    self.fake_file.SetMTime(13)
+    self.assertEqual(13, self.fake_file.st_mtime)
+    self.fake_file.SetMTime(131)
+    self.assertEqual(131, self.fake_file.st_mtime)
+
+  def testFileInode(self):
+    filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    fake_os = fake_filesystem.FakeOsModule(filesystem)
+    file_path = 'some_file1'
+    filesystem.CreateFile(file_path, contents='contents here1', inode=42)
+    self.assertEqual(42, fake_os.stat(file_path)[stat.ST_INO])
+
+    file_obj = filesystem.GetObject(file_path)
+    file_obj.SetIno(43)
+    self.assertEqual(43, fake_os.stat(file_path)[stat.ST_INO])
+
+  def testDirectoryInode(self):
+    filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    fake_os = fake_filesystem.FakeOsModule(filesystem)
+    dirpath = 'testdir'
+    filesystem.CreateDirectory(dirpath, inode=42)
+    self.assertEqual(42, fake_os.stat(dirpath)[stat.ST_INO])
+
+    dir_obj = filesystem.GetObject(dirpath)
+    dir_obj.SetIno(43)
+    self.assertEqual(43, fake_os.stat(dirpath)[stat.ST_INO])
+
+
+class SetLargeFileSizeTest(FakeDirectoryUnitTest):
+
+  def testShouldThrowIfSizeIsNotInteger(self):
+    self.assertRaises(IOError, self.fake_file.SetLargeFileSize, 0.1)
+
+  def testShouldThrowIfSizeIsNegative(self):
+    self.assertRaises(IOError, self.fake_file.SetLargeFileSize, -1)
+
+  def testSetsContentNoneIfSizeIsNonNegativeInteger(self):
+    self.fake_file.SetLargeFileSize(1000000000)
+    self.assertEqual(None, self.fake_file.contents)
+    self.assertEqual(1000000000, self.fake_file.st_size)
+
+
+class NormalizePathTest(TestCase):
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.root_name = '/'
+
+  def testEmptyPathShouldGetNormalizedToRootPath(self):
+    self.assertEqual(self.root_name, self.filesystem.NormalizePath(''))
+
+  def testRootPathRemainsUnchanged(self):
+    self.assertEqual(self.root_name,
+                     self.filesystem.NormalizePath(self.root_name))
+
+  def testRelativePathForcedToCwd(self):
+    path = 'bar'
+    self.filesystem.cwd = '/foo'
+    self.assertEqual('/foo/bar', self.filesystem.NormalizePath(path))
+
+  def testAbsolutePathRemainsUnchanged(self):
+    path = '/foo/bar'
+    self.assertEqual(path, self.filesystem.NormalizePath(path))
+
+  def testDottedPathIsNormalized(self):
+    path = '/foo/..'
+    self.assertEqual('/', self.filesystem.NormalizePath(path))
+    path = 'foo/../bar'
+    self.assertEqual('/bar', self.filesystem.NormalizePath(path))
+
+  def testDotPathIsNormalized(self):
+    path = '.'
+    self.assertEqual('/', self.filesystem.NormalizePath(path))
+
+
+class GetPathComponentsTest(TestCase):
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.root_name = '/'
+
+  def testRootPathShouldReturnEmptyList(self):
+    self.assertEqual([], self.filesystem.GetPathComponents(self.root_name))
+
+  def testEmptyPathShouldReturnEmptyList(self):
+    self.assertEqual([], self.filesystem.GetPathComponents(''))
+
+  def testRelativePathWithOneComponentShouldReturnComponent(self):
+    self.assertEqual(['foo'], self.filesystem.GetPathComponents('foo'))
+
+  def testAbsolutePathWithOneComponentShouldReturnComponent(self):
+    self.assertEqual(['foo'], self.filesystem.GetPathComponents('/foo'))
+
+  def testTwoLevelRelativePathShouldReturnComponents(self):
+    self.assertEqual(['foo', 'bar'],
+                     self.filesystem.GetPathComponents('foo/bar'))
+
+  def testTwoLevelAbsolutePathShouldReturnComponents(self):
+    self.assertEqual(['foo', 'bar'],
+                     self.filesystem.GetPathComponents('/foo/bar'))
+
+
+class FakeFilesystemUnitTest(TestCase):
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.root_name = '/'
+    self.fake_file = fake_filesystem.FakeFile('foobar')
+    self.fake_child = fake_filesystem.FakeDirectory('foobaz')
+    self.fake_grandchild = fake_filesystem.FakeDirectory('quux')
+
+  def testNewFilesystem(self):
+    self.assertEqual('/', self.filesystem.path_separator)
+    self.assertTrue(stat.S_IFDIR & self.filesystem.root.st_mode)
+    self.assertEqual(self.root_name, self.filesystem.root.name)
+    self.assertEqual({}, self.filesystem.root.contents)
+
+  def testNoneRaisesTypeError(self):
+    self.assertRaises(TypeError, self.filesystem.Exists, None)
+
+  def testEmptyStringDoesNotExist(self):
+    self.assertFalse(self.filesystem.Exists(''))
+
+  def testExistsRoot(self):
+    self.assertTrue(self.filesystem.Exists(self.root_name))
+
+  def testExistsUnaddedFile(self):
+    self.assertFalse(self.filesystem.Exists(self.fake_file.name))
+
+  def testGetRootObject(self):
+    self.assertEqual(self.filesystem.root,
+                     self.filesystem.GetObject(self.root_name))
+
+  def testAddObjectToRoot(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertEqual({'foobar': self.fake_file}, self.filesystem.root.contents)
+
+  def testExistsAddedFile(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertTrue(self.filesystem.Exists(self.fake_file.name))
+
+  def testExistsRelativePath(self):
+    self.filesystem.CreateFile('/a/b/file_one')
+    self.filesystem.CreateFile('/a/c/file_two')
+    self.assertTrue(self.filesystem.Exists('a/b/../c/file_two'))
+    self.assertTrue(self.filesystem.Exists('/a/c/../b/file_one'))
+    self.assertTrue(self.filesystem.Exists('/a/c/../../a/b/file_one'))
+    self.assertFalse(self.filesystem.Exists('a/b/../z/d'))
+    self.assertFalse(self.filesystem.Exists('a/b/../z/../c/file_two'))
+    self.filesystem.cwd = '/a/c'
+    self.assertTrue(self.filesystem.Exists('../b/file_one'))
+    self.assertTrue(self.filesystem.Exists('../../a/b/file_one'))
+    self.assertTrue(self.filesystem.Exists('../../a/b/../../a/c/file_two'))
+    self.assertFalse(self.filesystem.Exists('../z/file_one'))
+    self.assertFalse(self.filesystem.Exists('../z/../c/file_two'))
+
+  def testGetObjectFromRoot(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertEqual(self.fake_file, self.filesystem.GetObject('foobar'))
+
+  def testGetNonexistentObjectFromRootError(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertEqual(self.fake_file, self.filesystem.GetObject('foobar'))
+    self.assertRaises(IOError, self.filesystem.GetObject,
+                      'some_bogus_filename')
+
+  def testRemoveObjectFromRoot(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.filesystem.RemoveObject(self.fake_file.name)
+    self.assertRaises(IOError, self.filesystem.GetObject, self.fake_file.name)
+
+  def testRemoveNonexistenObjectFromRootError(self):
+    self.assertRaises(IOError, self.filesystem.RemoveObject,
+                      'some_bogus_filename')
+
+  def testExistsRemovedFile(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.filesystem.RemoveObject(self.fake_file.name)
+    self.assertFalse(self.filesystem.Exists(self.fake_file.name))
+
+  def testAddObjectToChild(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    self.assertEqual(
+        {self.fake_file.name: self.fake_file},
+        self.filesystem.root.GetEntry(self.fake_child.name).contents)
+
+  def testAddObjectToRegularFileError(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertRaises(IOError, self.filesystem.AddObject,
+                      self.fake_file.name, self.fake_file)
+
+  def testExistsFileAddedToChild(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    path = self.filesystem.JoinPaths(self.fake_child.name,
+                                     self.fake_file.name)
+    self.assertTrue(self.filesystem.Exists(path))
+
+  def testGetObjectFromChild(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    self.assertEqual(self.fake_file,
+                     self.filesystem.GetObject(
+                         self.filesystem.JoinPaths(self.fake_child.name,
+                                                   self.fake_file.name)))
+
+  def testGetNonexistentObjectFromChildError(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    self.assertRaises(IOError, self.filesystem.GetObject,
+                      self.filesystem.JoinPaths(self.fake_child.name,
+                                                'some_bogus_filename'))
+
+  def testRemoveObjectFromChild(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    target_path = self.filesystem.JoinPaths(self.fake_child.name,
+                                            self.fake_file.name)
+    self.filesystem.RemoveObject(target_path)
+    self.assertRaises(IOError, self.filesystem.GetObject, target_path)
+
+  def testRemoveObjectFromChildError(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.assertRaises(IOError, self.filesystem.RemoveObject,
+                      self.filesystem.JoinPaths(self.fake_child.name,
+                                                'some_bogus_filename'))
+
+  def testRemoveObjectFromNonDirectoryError(self):
+    self.filesystem.AddObject(self.root_name, self.fake_file)
+    self.assertRaises(
+        IOError, self.filesystem.RemoveObject,
+        self.filesystem.JoinPaths(
+            '%s' % self.fake_file.name,
+            'file_does_not_matter_since_parent_not_a_directory'))
+
+  def testExistsFileRemovedFromChild(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_file)
+    path = self.filesystem.JoinPaths(self.fake_child.name,
+                                     self.fake_file.name)
+    self.filesystem.RemoveObject(path)
+    self.assertFalse(self.filesystem.Exists(path))
+
+  def testOperateOnGrandchildDirectory(self):
+    self.filesystem.AddObject(self.root_name, self.fake_child)
+    self.filesystem.AddObject(self.fake_child.name, self.fake_grandchild)
+    grandchild_directory = self.filesystem.JoinPaths(self.fake_child.name,
+                                                     self.fake_grandchild.name)
+    grandchild_file = self.filesystem.JoinPaths(grandchild_directory,
+                                                self.fake_file.name)
+    self.assertRaises(IOError, self.filesystem.GetObject, grandchild_file)
+    self.filesystem.AddObject(grandchild_directory, self.fake_file)
+    self.assertEqual(self.fake_file,
+                     self.filesystem.GetObject(grandchild_file))
+    self.assertTrue(self.filesystem.Exists(grandchild_file))
+    self.filesystem.RemoveObject(grandchild_file)
+    self.assertRaises(IOError, self.filesystem.GetObject, grandchild_file)
+    self.assertFalse(self.filesystem.Exists(grandchild_file))
+
+  def testCreateDirectoryInRootDirectory(self):
+    path = 'foo'
+    self.filesystem.CreateDirectory(path)
+    new_dir = self.filesystem.GetObject(path)
+    self.assertEqual(os.path.basename(path), new_dir.name)
+    self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
+
+  def testCreateDirectoryInRootDirectoryAlreadyExistsError(self):
+    path = 'foo'
+    self.filesystem.CreateDirectory(path)
+    self.assertRaises(OSError, self.filesystem.CreateDirectory, path)
+
+  def testCreateDirectory(self):
+    path = 'foo/bar/baz'
+    self.filesystem.CreateDirectory(path)
+    new_dir = self.filesystem.GetObject(path)
+    self.assertEqual(os.path.basename(path), new_dir.name)
+    self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
+
+    # Create second directory to make sure first is OK.
+    path = '%s/quux' % path
+    self.filesystem.CreateDirectory(path)
+    new_dir = self.filesystem.GetObject(path)
+    self.assertEqual(os.path.basename(path), new_dir.name)
+    self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
+
+  def testCreateDirectoryAlreadyExistsError(self):
+    path = 'foo/bar/baz'
+    self.filesystem.CreateDirectory(path)
+    self.assertRaises(OSError, self.filesystem.CreateDirectory, path)
+
+  def testCreateFileInCurrentDirectory(self):
+    path = 'foo'
+    contents = 'dummy data'
+    self.filesystem.CreateFile(path, contents=contents)
+    self.assertTrue(self.filesystem.Exists(path))
+    self.assertFalse(self.filesystem.Exists(os.path.dirname(path)))
+    path = './%s' % path
+    self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
+
+  def testCreateFileInRootDirectory(self):
+    path = '/foo'
+    contents = 'dummy data'
+    self.filesystem.CreateFile(path, contents=contents)
+    new_file = self.filesystem.GetObject(path)
+    self.assertTrue(self.filesystem.Exists(path))
+    self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
+    self.assertEqual(os.path.basename(path), new_file.name)
+    self.assertTrue(stat.S_IFREG & new_file.st_mode)
+    self.assertEqual(contents, new_file.contents)
+
+  def testCreateFileWithSizeButNoContentCreatesLargeFile(self):
+    path = 'large_foo_bar'
+    self.filesystem.CreateFile(path, st_size=100000000)
+    new_file = self.filesystem.GetObject(path)
+    self.assertEqual(None, new_file.contents)
+    self.assertEqual(100000000, new_file.st_size)
+
+  def testCreateFileInRootDirectoryAlreadyExistsError(self):
+    path = 'foo'
+    self.filesystem.CreateFile(path)
+    self.assertRaises(IOError, self.filesystem.CreateFile, path)
+
+  def testCreateFile(self):
+    path = 'foo/bar/baz'
+    retval = self.filesystem.CreateFile(path, contents='dummy_data')
+    self.assertTrue(self.filesystem.Exists(path))
+    self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
+    new_file = self.filesystem.GetObject(path)
+    self.assertEqual(os.path.basename(path), new_file.name)
+    self.assertTrue(stat.S_IFREG & new_file.st_mode)
+    self.assertEqual(new_file, retval)
+
+  def testCreateFileAlreadyExistsError(self):
+    path = 'foo/bar/baz'
+    self.filesystem.CreateFile(path, contents='dummy_data')
+    self.assertRaises(IOError, self.filesystem.CreateFile, path)
+
+  def testCreateLink(self):
+    path = 'foo/bar/baz'
+    target_path = 'foo/bar/quux'
+    new_file = self.filesystem.CreateLink(path, 'quux')
+    # Neither the path not the final target exists before we actually write to
+    # one of them, even though the link appears in the file system.
+    self.assertFalse(self.filesystem.Exists(path))
+    self.assertFalse(self.filesystem.Exists(target_path))
+    self.assertTrue(stat.S_IFLNK & new_file.st_mode)
+
+    # but once we write the linked to file, they both will exist.
+    self.filesystem.CreateFile(target_path)
+    self.assertTrue(self.filesystem.Exists(path))
+    self.assertTrue(self.filesystem.Exists(target_path))
+
+  def testResolveObject(self):
+    target_path = 'dir/target'
+    target_contents = '0123456789ABCDEF'
+    link_name = 'x'
+    self.filesystem.CreateDirectory('dir')
+    self.filesystem.CreateFile('dir/target', contents=target_contents)
+    self.filesystem.CreateLink(link_name, target_path)
+    obj = self.filesystem.ResolveObject(link_name)
+    self.assertEqual('target', obj.name)
+    self.assertEqual(target_contents, obj.contents)
+
+  def testLresolveObject(self):
+    target_path = 'dir/target'
+    target_contents = '0123456789ABCDEF'
+    link_name = 'x'
+    self.filesystem.CreateDirectory('dir')
+    self.filesystem.CreateFile('dir/target', contents=target_contents)
+    self.filesystem.CreateLink(link_name, target_path)
+    obj = self.filesystem.LResolveObject(link_name)
+    self.assertEqual(link_name, obj.name)
+    self.assertEqual(target_path, obj.contents)
+
+  def testDirectoryAccessOnFile(self):
+    self.filesystem.CreateFile('not_a_dir')
+    self.assertRaises(IOError, self.filesystem.ResolveObject, 'not_a_dir/foo')
+    self.assertRaises(IOError, self.filesystem.ResolveObject,
+                      'not_a_dir/foo/bar')
+    self.assertRaises(IOError, self.filesystem.LResolveObject, 'not_a_dir/foo')
+    self.assertRaises(IOError, self.filesystem.LResolveObject,
+                      'not_a_dir/foo/bar')
+
+
+class FakeOsModuleTest(TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.rwx = self.os.R_OK | self.os.W_OK | self.os.X_OK
+    self.rw = self.os.R_OK | self.os.W_OK
+    self.orig_time = time.time
+    time.time = _GetDummyTime(200, 20)
+
+  def tearDown(self):
+    time.time = self.orig_time
+
+  def assertRaisesWithRegexpMatch(self, expected_exception, expected_regexp,
+                                  callable_obj, *args, **kwargs):
+    """Asserts that the message in a raised exception matches the given regexp.
+
+    Args:
+      expected_exception: Exception class expected to be raised.
+      expected_regexp: Regexp (re pattern object or string) expected to be
+        found in error message.
+      callable_obj: Function to be called.
+      *args: Extra args.
+      **kwargs: Extra kwargs.
+    """
+    try:
+      callable_obj(*args, **kwargs)
+    except expected_exception as err:
+      if isinstance(expected_regexp, str):
+        expected_regexp = re.compile(expected_regexp)
+      self.assertTrue(
+          expected_regexp.search(str(err)),
+          '"%s" does not match "%s"' % (expected_regexp.pattern, str(err)))
+    else:
+      self.fail(expected_exception.__name__ + ' not raised')
+
+  def testChdir(self):
+    """chdir should work on a directory."""
+    directory = '/foo'
+    self.filesystem.CreateDirectory(directory)
+    self.os.chdir(directory)
+
+  def testChdirFailsNonExist(self):
+    """chdir should raise OSError if the target does not exist."""
+    directory = '/no/such/directory'
+    self.assertRaises(OSError, self.os.chdir, directory)
+
+  def testChdirFailsNonDirectory(self):
+    """chdir should raies OSError if the target is not a directory."""
+    filename = '/foo/bar'
+    self.filesystem.CreateFile(filename)
+    self.assertRaises(OSError, self.os.chdir, filename)
+
+  def testConsecutiveChdir(self):
+    """Consecutive relative chdir calls should work."""
+    dir1 = 'foo'
+    dir2 = 'bar'
+    full_dirname = self.os.path.join(dir1, dir2)
+    self.filesystem.CreateDirectory(full_dirname)
+    self.os.chdir(dir1)
+    self.os.chdir(dir2)
+    self.assertEqual(self.os.getcwd(), self.os.path.sep + full_dirname)
+
+  def testBackwardsChdir(self):
+    """chdir into '..' should behave appropriately."""
+    rootdir = self.os.getcwd()
+    dirname = 'foo'
+    abs_dirname = self.os.path.abspath(dirname)
+    self.filesystem.CreateDirectory(dirname)
+    self.os.chdir(dirname)
+    self.assertEqual(abs_dirname, self.os.getcwd())
+    self.os.chdir('..')
+    self.assertEqual(rootdir, self.os.getcwd())
+    self.os.chdir(self.os.path.join(dirname, '..'))
+    self.assertEqual(rootdir, self.os.getcwd())
+
+  def testGetCwd(self):
+    dirname = '/foo/bar'
+    self.filesystem.CreateDirectory(dirname)
+    self.assertEqual(self.os.getcwd(), self.os.path.sep)
+    self.os.chdir(dirname)
+    self.assertEqual(self.os.getcwd(), dirname)
+
+  def testListdir(self):
+    directory = 'xyzzy/plugh'
+    files = ['foo', 'bar', 'baz']
+    for f in files:
+      self.filesystem.CreateFile('%s/%s' % (directory, f))
+    files.sort()
+    self.assertEqual(files, self.os.listdir(directory))
+
+  def testListdirOnSymlink(self):
+    directory = 'xyzzy'
+    files = ['foo', 'bar', 'baz']
+    for f in files:
+      self.filesystem.CreateFile('%s/%s' % (directory, f))
+    self.filesystem.CreateLink('symlink', 'xyzzy')
+    files.sort()
+    self.assertEqual(files, self.os.listdir('symlink'))
+
+  def testListdirError(self):
+    file_path = 'foo/bar/baz'
+    self.filesystem.CreateFile(file_path)
+    self.assertRaises(OSError, self.os.listdir, file_path)
+
+  def testExistsCurrentDir(self):
+    self.assertTrue(self.filesystem.Exists('.'))
+
+  def testListdirCurrent(self):
+    files = ['foo', 'bar', 'baz']
+    for f in files:
+      self.filesystem.CreateFile('%s' % f)
+    files.sort()
+    self.assertEqual(files, self.os.listdir('.'))
+
+  def testFdopen(self):
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+    file_path1 = 'some_file1'
+    self.filesystem.CreateFile(file_path1, contents='contents here1')
+    fake_file1 = fake_open(file_path1, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+
+    self.assertFalse(self.os.fdopen(0) is fake_file1)
+
+    self.assertRaises(TypeError, self.os.fdopen, None)
+    self.assertRaises(TypeError, self.os.fdopen, 'a string')
+
+  def testOutOfRangeFdopen(self):
+    # We haven't created any files, so even 0 is out of range.
+    self.assertRaises(OSError, self.os.fdopen, 0)
+
+  def testClosedFileDescriptor(self):
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+    first_path = 'some_file1'
+    second_path = 'some_file2'
+    third_path = 'some_file3'
+    self.filesystem.CreateFile(first_path, contents='contents here1')
+    self.filesystem.CreateFile(second_path, contents='contents here2')
+    self.filesystem.CreateFile(third_path, contents='contents here3')
+
+    fake_file1 = fake_open(first_path, 'r')
+    fake_file2 = fake_open(second_path, 'r')
+    fake_file3 = fake_open(third_path, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+    self.assertEqual(1, fake_file2.fileno())
+    self.assertEqual(2, fake_file3.fileno())
+
+    fileno2 = fake_file2.fileno()
+    self.os.close(fileno2)
+    self.assertRaises(OSError, self.os.close, fileno2)
+    self.assertEqual(0, fake_file1.fileno())
+    self.assertEqual(2, fake_file3.fileno())
+
+    self.assertFalse(self.os.fdopen(0) is fake_file1)
+    self.assertFalse(self.os.fdopen(2) is fake_file3)
+    self.assertRaises(OSError, self.os.fdopen, 1)
+
+  def testFdopenMode(self):
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+    file_path1 = 'some_file1'
+    self.filesystem.CreateFile(file_path1, contents='contents here1',
+                               st_mode=((stat.S_IFREG | 0o666) ^ stat.S_IWRITE))
+
+    fake_file1 = fake_open(file_path1, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+    self.os.fdopen(0)
+    self.os.fdopen(0, mode='r')
+    exception = OSError if sys.version_info < (3, 0) else IOError
+    self.assertRaises(exception, self.os.fdopen, 0, 'w')
+
+  def testLowLevelOpenCreate(self):
+    file_path = 'file1'
+    # this is the low-level open, not FakeFileOpen
+    fileno = self.os.open(file_path, self.os.O_CREAT)
+    self.assertEqual(0, fileno)
+    self.assertTrue(self.os.path.exists(file_path))
+
+  def testLowLevelOpenCreateMode(self):
+    file_path = 'file1'
+    fileno = self.os.open(file_path, self.os.O_CREAT, 0o700)
+    self.assertEqual(0, fileno)
+    self.assertTrue(self.os.path.exists(file_path))
+    self.assertModeEqual(0o700, self.os.stat(file_path).st_mode)
+
+  def testLowLevelOpenCreateModeUnsupported(self):
+    file_path = 'file1'
+    fake_flag = 0b100000000000000000000000
+    self.assertRaises(NotImplementedError, self.os.open, file_path, fake_flag)
+
+  def testLowLevelWriteRead(self):
+    file_path = 'file1'
+    self.filesystem.CreateFile(file_path, contents='orig contents')
+    new_contents = '1234567890abcdef'
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+
+    fh = fake_open(file_path, 'w')
+    fileno = fh.fileno()
+
+    self.assertEqual(len(new_contents), self.os.write(fileno, new_contents))
+    self.assertEqual(new_contents,
+                     self.filesystem.GetObject(file_path).contents)
+    self.os.close(fileno)
+
+    fh = fake_open(file_path, 'r')
+    fileno = fh.fileno()
+    self.assertEqual('', self.os.read(fileno, 0))
+    self.assertEqual(new_contents[0:2], self.os.read(fileno, 2))
+    self.assertEqual(new_contents[2:10], self.os.read(fileno, 8))
+    self.assertEqual(new_contents[10:], self.os.read(fileno, 100))
+    self.assertEqual('', self.os.read(fileno, 10))
+    self.os.close(fileno)
+
+    self.assertRaises(OSError, self.os.write, fileno, new_contents)
+    self.assertRaises(OSError, self.os.read, fileno, 10)
+
+  def testFstat(self):
+    directory = 'xyzzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path, contents='ABCDE')
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+    file_obj = fake_open(file_path)
+    fileno = file_obj.fileno()
+    self.assertTrue(stat.S_IFREG & self.os.fstat(fileno)[stat.ST_MODE])
+    self.assertTrue(stat.S_IFREG & self.os.fstat(fileno).st_mode)
+    self.assertEqual(5, self.os.fstat(fileno)[stat.ST_SIZE])
+
+  def testStat(self):
+    directory = 'xyzzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path, contents='ABCDE')
+    self.assertTrue(stat.S_IFDIR & self.os.stat(directory)[stat.ST_MODE])
+    self.assertTrue(stat.S_IFREG & self.os.stat(file_path)[stat.ST_MODE])
+    self.assertTrue(stat.S_IFREG & self.os.stat(file_path).st_mode)
+    self.assertEqual(5, self.os.stat(file_path)[stat.ST_SIZE])
+
+  def testLstat(self):
+    directory = 'xyzzy'
+    base_name = 'plugh'
+    file_contents = 'frobozz'
+    # Just make sure we didn't accidentally make our test data meaningless.
+    self.assertNotEqual(len(base_name), len(file_contents))
+    file_path = '%s/%s' % (directory, base_name)
+    link_path = '%s/link' % directory
+    self.filesystem.CreateFile(file_path, contents=file_contents)
+    self.filesystem.CreateLink(link_path, base_name)
+    self.assertEqual(len(file_contents), self.os.lstat(file_path)[stat.ST_SIZE])
+    self.assertEqual(len(base_name), self.os.lstat(link_path)[stat.ST_SIZE])
+
+  def testStatNonExistentFile(self):
+    # set up
+    file_path = '/non/existent/file'
+    self.assertFalse(self.filesystem.Exists(file_path))
+    # actual tests
+    try:
+      # Use try-catch to check exception attributes.
+      self.os.stat(file_path)
+      self.fail('Exception is expected.')  # COV_NF_LINE
+    except OSError as os_error:
+      self.assertEqual(errno.ENOENT, os_error.errno)
+      self.assertEqual(file_path, os_error.filename)
+
+  def testReadlink(self):
+    link_path = 'foo/bar/baz'
+    target = 'tarJAY'
+    self.filesystem.CreateLink(link_path, target)
+    self.assertEqual(self.os.readlink(link_path), target)
+
+  def testReadlinkRaisesIfPathIsNotALink(self):
+    file_path = 'foo/bar/eleventyone'
+    self.filesystem.CreateFile(file_path)
+    self.assertRaises(OSError, self.os.readlink, file_path)
+
+  def testReadlinkRaisesIfPathDoesNotExist(self):
+    self.assertRaises(OSError, self.os.readlink, '/this/path/does/not/exist')
+
+  def testReadlinkRaisesIfPathIsNone(self):
+    self.assertRaises(TypeError, self.os.readlink, None)
+
+  def testReadlinkWithLinksInPath(self):
+    self.filesystem.CreateLink('/meyer/lemon/pie', 'yum')
+    self.filesystem.CreateLink('/geo/metro', '/meyer')
+    self.assertEqual('yum', self.os.readlink('/geo/metro/lemon/pie'))
+
+  def testReadlinkWithChainedLinksInPath(self):
+    self.filesystem.CreateLink('/eastern/european/wolfhounds/chase', 'cats')
+    self.filesystem.CreateLink('/russian', '/eastern/european')
+    self.filesystem.CreateLink('/dogs', '/russian/wolfhounds')
+    self.assertEqual('cats', self.os.readlink('/dogs/chase'))
+
+  def testRemoveDir(self):
+    directory = 'xyzzy'
+    dir_path = '/%s/plugh' % directory
+    self.filesystem.CreateDirectory(dir_path)
+    self.assertTrue(self.filesystem.Exists(dir_path))
+    self.assertRaises(OSError, self.os.remove, dir_path)
+    self.assertTrue(self.filesystem.Exists(dir_path))
+    self.os.chdir(directory)
+    self.assertRaises(OSError, self.os.remove, 'plugh')
+    self.assertTrue(self.filesystem.Exists(dir_path))
+    self.assertRaises(OSError, self.os.remove, '/plugh')
+
+  def testRemoveFile(self):
+    directory = 'zzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.os.remove(file_path)
+    self.assertFalse(self.filesystem.Exists(file_path))
+
+  def testRemoveFileNoDirectory(self):
+    directory = 'zzy'
+    file_name = 'plugh'
+    file_path = '%s/%s' % (directory, file_name)
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.os.chdir(directory)
+    self.os.remove(file_name)
+    self.assertFalse(self.filesystem.Exists(file_path))
+
+  def testRemoveFileRelativePath(self):
+    original_dir = self.os.getcwd()
+    directory = 'zzy'
+    subdirectory = self.os.path.join(directory, directory)
+    file_name = 'plugh'
+    file_path = '%s/%s' % (directory, file_name)
+    file_path_relative = self.os.path.join('..', file_name)
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.filesystem.CreateDirectory(subdirectory)
+    self.assertTrue(self.filesystem.Exists(subdirectory))
+    self.os.chdir(subdirectory)
+    self.os.remove(file_path_relative)
+    self.assertFalse(self.filesystem.Exists(file_path_relative))
+    self.os.chdir(original_dir)
+    self.assertFalse(self.filesystem.Exists(file_path))
+
+  def testRemoveDirRaisesError(self):
+    directory = 'zzy'
+    self.filesystem.CreateDirectory(directory)
+    self.assertRaises(OSError,
+                      self.os.remove,
+                      directory)
+
+  def testRemoveSymlinkToDir(self):
+    directory = 'zzy'
+    link = 'link_to_dir'
+    self.filesystem.CreateDirectory(directory)
+    self.os.symlink(directory, link)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertTrue(self.filesystem.Exists(link))
+    self.os.remove(link)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertFalse(self.filesystem.Exists(link))
+
+  def testUnlink(self):
+    self.assertTrue(self.os.unlink == self.os.remove)
+
+  def testUnlinkRaisesIfNotExist(self):
+    file_path = '/file/does/not/exist'
+    self.assertFalse(self.filesystem.Exists(file_path))
+    self.assertRaises(OSError, self.os.unlink, file_path)
+
+  def testRenameToNonexistentFile(self):
+    """Can rename a file to an unused name."""
+    directory = 'xyzzy'
+    old_file_path = '%s/plugh_old' % directory
+    new_file_path = '%s/plugh_new' % directory
+    self.filesystem.CreateFile(old_file_path, contents='test contents')
+    self.assertTrue(self.filesystem.Exists(old_file_path))
+    self.assertFalse(self.filesystem.Exists(new_file_path))
+    self.os.rename(old_file_path, new_file_path)
+    self.assertFalse(self.filesystem.Exists(old_file_path))
+    self.assertTrue(self.filesystem.Exists(new_file_path))
+    self.assertEqual('test contents',
+                     self.filesystem.GetObject(new_file_path).contents)
+
+  def testRenameDirectory(self):
+    """Can rename a directory to an unused name."""
+    for old_path, new_path in [('wxyyw', 'xyzzy'), ('/abccb', 'cdeed')]:
+      self.filesystem.CreateFile('%s/plugh' % old_path, contents='test')
+      self.assertTrue(self.filesystem.Exists(old_path))
+      self.assertFalse(self.filesystem.Exists(new_path))
+      self.os.rename(old_path, new_path)
+      self.assertFalse(self.filesystem.Exists(old_path))
+      self.assertTrue(self.filesystem.Exists(new_path))
+      self.assertEqual(
+          'test', self.filesystem.GetObject('%s/plugh' % new_path).contents)
+
+  def testRenameToExistentFile(self):
+    """Can rename a file to a used name."""
+    directory = 'xyzzy'
+    old_file_path = '%s/plugh_old' % directory
+    new_file_path = '%s/plugh_new' % directory
+    self.filesystem.CreateFile(old_file_path, contents='test contents 1')
+    self.filesystem.CreateFile(new_file_path, contents='test contents 2')
+    self.assertTrue(self.filesystem.Exists(old_file_path))
+    self.assertTrue(self.filesystem.Exists(new_file_path))
+    self.os.rename(old_file_path, new_file_path)
+    self.assertFalse(self.filesystem.Exists(old_file_path))
+    self.assertTrue(self.filesystem.Exists(new_file_path))
+    self.assertEqual('test contents 1',
+                     self.filesystem.GetObject(new_file_path).contents)
+
+  def testRenameToNonexistentDir(self):
+    """Can rename a file to a name in a nonexistent dir."""
+    directory = 'xyzzy'
+    old_file_path = '%s/plugh_old' % directory
+    new_file_path = '%s/no_such_path/plugh_new' % directory
+    self.filesystem.CreateFile(old_file_path, contents='test contents')
+    self.assertTrue(self.filesystem.Exists(old_file_path))
+    self.assertFalse(self.filesystem.Exists(new_file_path))
+    self.assertRaises(IOError, self.os.rename, old_file_path, new_file_path)
+    self.assertTrue(self.filesystem.Exists(old_file_path))
+    self.assertFalse(self.filesystem.Exists(new_file_path))
+    self.assertEqual('test contents',
+                     self.filesystem.GetObject(old_file_path).contents)
+
+  def testRenameNonexistentFileShouldRaiseError(self):
+    """Can't rename a file that doesn't exist."""
+    self.assertRaises(OSError,
+                      self.os.rename,
+                      'nonexistent-foo',
+                      'doesn\'t-matter-bar')
+
+  def testRenameEmptyDir(self):
+    """Test a rename of an empty directory."""
+    directory = 'xyzzy'
+    before_dir = '%s/empty' % directory
+    after_dir = '%s/unused' % directory
+    self.filesystem.CreateDirectory(before_dir)
+    self.assertTrue(self.filesystem.Exists('%s/.' % before_dir))
+    self.assertFalse(self.filesystem.Exists(after_dir))
+    self.os.rename(before_dir, after_dir)
+    self.assertFalse(self.filesystem.Exists(before_dir))
+    self.assertTrue(self.filesystem.Exists('%s/.' % after_dir))
+
+  def testRenameDir(self):
+    """Test a rename of a directory."""
+    directory = 'xyzzy'
+    before_dir = '%s/before' % directory
+    before_file = '%s/before/file' % directory
+    after_dir = '%s/after' % directory
+    after_file = '%s/after/file' % directory
+    self.filesystem.CreateDirectory(before_dir)
+    self.filesystem.CreateFile(before_file, contents='payload')
+    self.assertTrue(self.filesystem.Exists(before_dir))
+    self.assertTrue(self.filesystem.Exists(before_file))
+    self.assertFalse(self.filesystem.Exists(after_dir))
+    self.assertFalse(self.filesystem.Exists(after_file))
+    self.os.rename(before_dir, after_dir)
+    self.assertFalse(self.filesystem.Exists(before_dir))
+    self.assertFalse(self.filesystem.Exists(before_file))
+    self.assertTrue(self.filesystem.Exists(after_dir))
+    self.assertTrue(self.filesystem.Exists(after_file))
+    self.assertEqual('payload',
+                     self.filesystem.GetObject(after_file).contents)
+
+  def testRenamePreservesStat(self):
+    """Test if rename preserves mtime."""
+    directory = 'xyzzy'
+    old_file_path = '%s/plugh_old' % directory
+    new_file_path = '%s/plugh_new' % directory
+    old_file = self.filesystem.CreateFile(old_file_path)
+    old_file.SetMTime(old_file.st_mtime - 3600)
+    self.os.chown(old_file_path, 200, 200)
+    self.os.chmod(old_file_path, 0o222)
+    new_file = self.filesystem.CreateFile(new_file_path)
+    self.assertNotEqual(new_file.st_mtime, old_file.st_mtime)
+    self.os.rename(old_file_path, new_file_path)
+    new_file = self.filesystem.GetObject(new_file_path)
+    self.assertEqual(new_file.st_mtime, old_file.st_mtime)
+    self.assertEqual(new_file.st_mode, old_file.st_mode)
+    self.assertEqual(new_file.st_uid, old_file.st_uid)
+    self.assertEqual(new_file.st_gid, old_file.st_gid)
+
+  def testRenameSameFilenames(self):
+    """Test renaming when old and new names are the same."""
+    directory = 'xyzzy'
+    file_contents = 'Spam eggs'
+    file_path = '%s/eggs' % directory
+    self.filesystem.CreateFile(file_path, contents=file_contents)
+    self.os.rename(file_path, file_path)
+    self.assertEqual(file_contents,
+                     self.filesystem.GetObject(file_path).contents)
+
+  def testRmdir(self):
+    """Can remove a directory."""
+    directory = 'xyzzy'
+    sub_dir = '/xyzzy/abccd'
+    other_dir = '/xyzzy/cdeed'
+    self.filesystem.CreateDirectory(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.os.rmdir(directory)
+    self.assertFalse(self.filesystem.Exists(directory))
+    self.filesystem.CreateDirectory(sub_dir)
+    self.filesystem.CreateDirectory(other_dir)
+    self.os.chdir(sub_dir)
+    self.os.rmdir('../cdeed')
+    self.assertFalse(self.filesystem.Exists(other_dir))
+    self.os.chdir('..')
+    self.os.rmdir('abccd')
+    self.assertFalse(self.filesystem.Exists(sub_dir))
+
+  def testRmdirRaisesIfNotEmpty(self):
+    """Raises an exception if the target directory is not empty."""
+    directory = 'xyzzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.assertRaises(OSError, self.os.rmdir, directory)
+
+  def testRmdirRaisesIfNotDirectory(self):
+    """Raises an exception if the target is not a directory."""
+    directory = 'xyzzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.assertRaises(OSError, self.os.rmdir, file_path)
+    self.assertRaises(OSError, self.os.rmdir, '.')
+
+  def testRmdirRaisesIfNotExist(self):
+    """Raises an exception if the target does not exist."""
+    directory = 'xyzzy'
+    self.assertFalse(self.filesystem.Exists(directory))
+    self.assertRaises(OSError, self.os.rmdir, directory)
+
+  def RemovedirsCheck(self, directory):
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.os.removedirs(directory)
+    return not self.filesystem.Exists(directory)
+
+  def testRemovedirs(self):
+    data = ['test1', 'test1/test2', 'test1/extra', 'test1/test2/test3']
+    for directory in data:
+      self.filesystem.CreateDirectory(directory)
+      self.assertTrue(self.filesystem.Exists(directory))
+    self.assertRaises(OSError, self.RemovedirsCheck, data[0])
+    self.assertRaises(OSError, self.RemovedirsCheck, data[1])
+
+    self.assertTrue(self.RemovedirsCheck(data[3]))
+    self.assertTrue(self.filesystem.Exists(data[0]))
+    self.assertFalse(self.filesystem.Exists(data[1]))
+    self.assertTrue(self.filesystem.Exists(data[2]))
+
+    # Should raise because '/test1/extra' is all that is left, and
+    # removedirs('/test1/extra') will eventually try to rmdir('/').
+    self.assertRaises(OSError, self.RemovedirsCheck, data[2])
+
+    # However, it will still delete '/test1') in the process.
+    self.assertFalse(self.filesystem.Exists(data[0]))
+
+    self.filesystem.CreateDirectory('test1/test2')
+    # Add this to the root directory to avoid raising an exception.
+    self.filesystem.CreateDirectory('test3')
+    self.assertTrue(self.RemovedirsCheck('test1/test2'))
+    self.assertFalse(self.filesystem.Exists('test1/test2'))
+    self.assertFalse(self.filesystem.Exists('test1'))
+
+  def testRemovedirsRaisesIfRemovingRoot(self):
+    """Raises exception if asked to remove '/'."""
+    directory = '/'
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertRaises(OSError, self.os.removedirs, directory)
+
+  def testRemovedirsRaisesIfCascadeRemovingRoot(self):
+    """Raises exception if asked to remove '/' as part of a larger operation.
+
+    All of other directories should still be removed, though.
+    """
+    directory = '/foo/bar/'
+    self.filesystem.CreateDirectory(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertRaises(OSError, self.os.removedirs, directory)
+    head, unused_tail = self.os.path.split(directory)
+    while head != '/':
+      self.assertFalse(self.filesystem.Exists(directory))
+      head, unused_tail = self.os.path.split(head)
+
+  def testRemovedirsWithTrailingSlash(self):
+    """removedirs works on directory names with trailing slashes."""
+    # separate this case from the removing-root-directory case
+    self.filesystem.CreateDirectory('/baz')
+    directory = '/foo/bar/'
+    self.filesystem.CreateDirectory(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.os.removedirs(directory)
+    self.assertFalse(self.filesystem.Exists(directory))
+
+  def testMkdir(self):
+    """mkdir can create a relative directory."""
+    directory = 'xyzzy'
+    self.assertFalse(self.filesystem.Exists(directory))
+    self.os.mkdir(directory)
+    self.assertTrue(self.filesystem.Exists('/%s' % directory))
+    self.os.chdir(directory)
+    self.os.mkdir(directory)
+    self.assertTrue(self.filesystem.Exists('/%s/%s' % (directory, directory)))
+    self.os.chdir(directory)
+    self.os.mkdir('../abccb')
+    self.assertTrue(self.filesystem.Exists('/%s/abccb' % directory))
+
+  def testMkdirWithTrailingSlash(self):
+    """mkdir can create a directory named with a trailing slash."""
+    directory = '/foo/'
+    self.assertFalse(self.filesystem.Exists(directory))
+    self.os.mkdir(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertTrue(self.filesystem.Exists('/foo'))
+
+  def testMkdirRaisesIfEmptyDirectoryName(self):
+    """mkdir raises exeption if creating directory named ''."""
+    directory = ''
+    self.assertRaises(OSError, self.os.mkdir, directory)
+
+  def testMkdirRaisesIfNoParent(self):
+    """mkdir raises exception if parent directory does not exist."""
+    parent = 'xyzzy'
+    directory = '%s/foo' % (parent,)
+    self.assertFalse(self.filesystem.Exists(parent))
+    self.assertRaises(Exception, self.os.mkdir, directory)
+
+  def testMkdirRaisesIfDirectoryExists(self):
+    """mkdir raises exception if directory already exists."""
+    directory = 'xyzzy'
+    self.filesystem.CreateDirectory(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    self.assertRaises(Exception, self.os.mkdir, directory)
+
+  def testMkdirRaisesIfFileExists(self):
+    """mkdir raises exception if name already exists as a file."""
+    directory = 'xyzzy'
+    file_path = '%s/plugh' % directory
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.assertRaises(Exception, self.os.mkdir, file_path)
+
+  def testMkdirRaisesWithSlashDot(self):
+    """mkdir raises exception if mkdir foo/. (trailing /.)."""
+    self.assertRaises(Exception, self.os.mkdir, '/.')
+    directory = '/xyzzy/.'
+    self.assertRaises(Exception, self.os.mkdir, directory)
+    self.filesystem.CreateDirectory('/xyzzy')
+    self.assertRaises(Exception, self.os.mkdir, directory)
+
+  def testMkdirRaisesWithDoubleDots(self):
+    """mkdir raises exception if mkdir foo/foo2/../foo3."""
+    self.assertRaises(Exception, self.os.mkdir, '/..')
+    directory = '/xyzzy/dir1/dir2/../../dir3'
+    self.assertRaises(Exception, self.os.mkdir, directory)
+    self.filesystem.CreateDirectory('/xyzzy')
+    self.assertRaises(Exception, self.os.mkdir, directory)
+    self.filesystem.CreateDirectory('/xyzzy/dir1')
+    self.assertRaises(Exception, self.os.mkdir, directory)
+    self.filesystem.CreateDirectory('/xyzzy/dir1/dir2')
+    self.os.mkdir(directory)
+    self.assertTrue(self.filesystem.Exists(directory))
+    directory = '/xyzzy/dir1/..'
+    self.assertRaises(Exception, self.os.mkdir, directory)
+
+  def testMkdirRaisesIfParentIsReadOnly(self):
+    """mkdir raises exception if parent is read only."""
+    directory = '/a'
+    self.os.mkdir(directory)
+
+    # Change directory permissions to be read only.
+    self.os.chmod(directory, 0o400)
+
+    directory = '/a/b'
+    self.assertRaises(Exception, self.os.mkdir, directory)
+
+  def testMakedirs(self):
+    """makedirs can create a directory even in parent does not exist."""
+    parent = 'xyzzy'
+    directory = '%s/foo' % (parent,)
+    self.assertFalse(self.filesystem.Exists(parent))
+    self.os.makedirs(directory)
+    self.assertTrue(self.filesystem.Exists(parent))
+
+  def testMakedirsRaisesIfParentIsFile(self):
+    """makedirs raises exception if a parent component exists as a file."""
+    file_path = 'xyzzy'
+    directory = '%s/plugh' % file_path
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    self.assertRaises(Exception, self.os.makedirs, directory)
+
+  def testMakedirsRaisesIfAccessDenied(self):
+    """makedirs raises exception if access denied."""
+    directory = '/a'
+    self.os.mkdir(directory)
+
+    # Change directory permissions to be read only.
+    self.os.chmod(directory, 0o400)
+
+    directory = '/a/b'
+    self.assertRaises(Exception, self.os.makedirs, directory)
+
+  def _CreateTestFile(self, path):
+    self.filesystem.CreateFile(path)
+    self.assertTrue(self.filesystem.Exists(path))
+    st = self.os.stat(path)
+    self.assertEqual(0o666, stat.S_IMODE(st.st_mode))
+    self.assertTrue(st.st_mode & stat.S_IFREG)
+    self.assertFalse(st.st_mode & stat.S_IFDIR)
+
+  def _CreateTestDirectory(self, path):
+    self.filesystem.CreateDirectory(path)
+    self.assertTrue(self.filesystem.Exists(path))
+    st = self.os.stat(path)
+    self.assertEqual(0o777, stat.S_IMODE(st.st_mode))
+    self.assertFalse(st.st_mode & stat.S_IFREG)
+    self.assertTrue(st.st_mode & stat.S_IFDIR)
+
+  def testAccess700(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    self.os.chmod(path, 0o700)
+    self.assertModeEqual(0o700, self.os.stat(path).st_mode)
+    # actual tests
+    self.assertTrue(self.os.access(path, self.os.F_OK))
+    self.assertTrue(self.os.access(path, self.os.R_OK))
+    self.assertTrue(self.os.access(path, self.os.W_OK))
+    self.assertTrue(self.os.access(path, self.os.X_OK))
+    self.assertTrue(self.os.access(path, self.rwx))
+
+  def testAccess600(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    self.os.chmod(path, 0o600)
+    self.assertModeEqual(0o600, self.os.stat(path).st_mode)
+    # actual tests
+    self.assertTrue(self.os.access(path, self.os.F_OK))
+    self.assertTrue(self.os.access(path, self.os.R_OK))
+    self.assertTrue(self.os.access(path, self.os.W_OK))
+    self.assertFalse(self.os.access(path, self.os.X_OK))
+    self.assertFalse(self.os.access(path, self.rwx))
+    self.assertTrue(self.os.access(path, self.rw))
+
+  def testAccess400(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    self.os.chmod(path, 0o400)
+    self.assertModeEqual(0o400, self.os.stat(path).st_mode)
+    # actual tests
+    self.assertTrue(self.os.access(path, self.os.F_OK))
+    self.assertTrue(self.os.access(path, self.os.R_OK))
+    self.assertFalse(self.os.access(path, self.os.W_OK))
+    self.assertFalse(self.os.access(path, self.os.X_OK))
+    self.assertFalse(self.os.access(path, self.rwx))
+    self.assertFalse(self.os.access(path, self.rw))
+
+  def testAccessNonExistentFile(self):
+    # set up
+    path = '/non/existent/file'
+    self.assertFalse(self.filesystem.Exists(path))
+    # actual tests
+    self.assertFalse(self.os.access(path, self.os.F_OK))
+    self.assertFalse(self.os.access(path, self.os.R_OK))
+    self.assertFalse(self.os.access(path, self.os.W_OK))
+    self.assertFalse(self.os.access(path, self.os.X_OK))
+    self.assertFalse(self.os.access(path, self.rwx))
+    self.assertFalse(self.os.access(path, self.rw))
+
+  def testChmod(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    # actual tests
+    self.os.chmod(path, 0o6543)
+    st = self.os.stat(path)
+    self.assertModeEqual(0o6543, st.st_mode)
+    self.assertTrue(st.st_mode & stat.S_IFREG)
+    self.assertFalse(st.st_mode & stat.S_IFDIR)
+
+  def testChmodDir(self):
+    # set up
+    path = '/some_dir'
+    self._CreateTestDirectory(path)
+    # actual tests
+    self.os.chmod(path, 0o1234)
+    st = self.os.stat(path)
+    self.assertModeEqual(0o1234, st.st_mode)
+    self.assertFalse(st.st_mode & stat.S_IFREG)
+    self.assertTrue(st.st_mode & stat.S_IFDIR)
+
+  def testChmodNonExistent(self):
+    # set up
+    path = '/non/existent/file'
+    self.assertFalse(self.filesystem.Exists(path))
+    # actual tests
+    try:
+      # Use try-catch to check exception attributes.
+      self.os.chmod(path, 0o777)
+      self.fail('Exception is expected.')  # COV_NF_LINE
+    except OSError as os_error:
+      self.assertEqual(errno.ENOENT, os_error.errno)
+      self.assertEqual(path, os_error.filename)
+
+  def testChmodStCtime(self):
+    # set up
+    file_path = 'some_file'
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.filesystem.Exists(file_path))
+    st = self.os.stat(file_path)
+    self.assertEqual(200, st.st_ctime)
+    # tests
+    self.os.chmod(file_path, 0o765)
+    st = self.os.stat(file_path)
+    self.assertEqual(220, st.st_ctime)
+
+  def testUtimeSetsCurrentTimeIfArgsIsNone(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    st = self.os.stat(path)
+    # 200 is the current time established in setUp().
+    self.assertEqual(200, st.st_atime)
+    self.assertEqual(200, st.st_mtime)
+    # actual tests
+    self.os.utime(path, None)
+    st = self.os.stat(path)
+    self.assertEqual(220, st.st_atime)
+    self.assertEqual(240, st.st_mtime)
+
+  def testUtimeSetsCurrentTimeIfArgsIsNoneWithFloats(self):
+    # set up
+    # time.time can report back floats, but it should be converted to ints
+    # since atime/ctime/mtime are all defined as seconds since epoch.
+    time.time = _GetDummyTime(200.0123, 20)
+    path = '/some_file'
+    self._CreateTestFile(path)
+    st = self.os.stat(path)
+    # 200 is the current time established above (if converted to int).
+    self.assertEqual(200, st.st_atime)
+    self.assertEqual(200, st.st_mtime)
+    # actual tests
+    self.os.utime(path, None)
+    st = self.os.stat(path)
+    self.assertEqual(220, st.st_atime)
+    self.assertEqual(240, st.st_mtime)
+
+  def testUtimeSetsSpecifiedTime(self):
+    # set up
+    path = '/some_file'
+    self._CreateTestFile(path)
+    st = self.os.stat(path)
+    # actual tests
+    self.os.utime(path, (1, 2))
+    st = self.os.stat(path)
+    self.assertEqual(1, st.st_atime)
+    self.assertEqual(2, st.st_mtime)
+
+  def testUtimeDir(self):
+    # set up
+    path = '/some_dir'
+    self._CreateTestDirectory(path)
+    # actual tests
+    self.os.utime(path, (1.0, 2.0))
+    st = self.os.stat(path)
+    self.assertEqual(1.0, st.st_atime)
+    self.assertEqual(2.0, st.st_mtime)
+
+  def testUtimeNonExistent(self):
+    # set up
+    path = '/non/existent/file'
+    self.assertFalse(self.filesystem.Exists(path))
+    # actual tests
+    try:
+      # Use try-catch to check exception attributes.
+      self.os.utime(path, (1, 2))
+      self.fail('Exception is expected.')  # COV_NF_LINE
+    except OSError as os_error:
+      self.assertEqual(errno.ENOENT, os_error.errno)
+      self.assertEqual(path, os_error.filename)
+
+  def testUtimeTupleArgIsOfIncorrectLength(self):
+    # set up
+    path = '/some_dir'
+    self._CreateTestDirectory(path)
+    # actual tests
+    self.assertRaisesWithRegexpMatch(
+        TypeError, r'utime\(\) arg 2 must be a tuple \(atime, mtime\)',
+        self.os.utime, path, (1, 2, 3))
+
+  def testUtimeTupleArgContainsIncorrectType(self):
+    # set up
+    path = '/some_dir'
+    self._CreateTestDirectory(path)
+    # actual tests
+    self.assertRaisesWithRegexpMatch(
+        TypeError, 'an integer is required',
+        self.os.utime, path, (1, 'str'))
+
+  def testChownExistingFile(self):
+    # set up
+    file_path = 'some_file'
+    self.filesystem.CreateFile(file_path)
+    # first set it make sure it's set
+    self.os.chown(file_path, 100, 100)
+    st = self.os.stat(file_path)
+    self.assertEqual(st[stat.ST_UID], 100)
+    self.assertEqual(st[stat.ST_GID], 100)
+    # we can make sure it changed
+    self.os.chown(file_path, 200, 200)
+    st = self.os.stat(file_path)
+    self.assertEqual(st[stat.ST_UID], 200)
+    self.assertEqual(st[stat.ST_GID], 200)
+    # setting a value to -1 leaves it unchanged
+    self.os.chown(file_path, -1, -1)
+    st = self.os.stat(file_path)
+    self.assertEqual(st[stat.ST_UID], 200)
+    self.assertEqual(st[stat.ST_GID], 200)
+
+  def testChownNonexistingFileShouldRaiseOsError(self):
+    file_path = 'some_file'
+    self.assertFalse(self.filesystem.Exists(file_path))
+    self.assertRaises(OSError, self.os.chown, file_path, 100, 100)
+
+  def testClassifyDirectoryContents(self):
+    """Directory classification should work correctly."""
+    root_directory = '/foo'
+    test_directories = ['bar1', 'baz2']
+    test_files = ['baz1', 'bar2', 'baz3']
+    self.filesystem.CreateDirectory(root_directory)
+    for directory in test_directories:
+      directory = self.os.path.join(root_directory, directory)
+      self.filesystem.CreateDirectory(directory)
+    for test_file in test_files:
+      test_file = self.os.path.join(root_directory, test_file)
+      self.filesystem.CreateFile(test_file)
+
+    test_directories.sort()
+    test_files.sort()
+    generator = self.os.walk(root_directory)
+    root, dirs, files = next(generator)
+    dirs.sort()
+    files.sort()
+    self.assertEqual(root_directory, root)
+    self.assertEqual(test_directories, dirs)
+    self.assertEqual(test_files, files)
+
+  def testClassifyDoesNotHideExceptions(self):
+    """_ClassifyDirectoryContents should not hide exceptions."""
+    directory = '/foo'
+    self.assertEqual(False, self.filesystem.Exists(directory))
+    self.assertRaises(OSError, self.os._ClassifyDirectoryContents, directory)
+
+  def testWalkTopDown(self):
+    """Walk down ordering is correct."""
+    self.filesystem.CreateFile('foo/1.txt')
+    self.filesystem.CreateFile('foo/bar1/2.txt')
+    self.filesystem.CreateFile('foo/bar1/baz/3.txt')
+    self.filesystem.CreateFile('foo/bar2/4.txt')
+    expected = [
+        ('foo', ['bar1', 'bar2'], ['1.txt']),
+        ('foo/bar1', ['baz'], ['2.txt']),
+        ('foo/bar1/baz', [], ['3.txt']),
+        ('foo/bar2', [], ['4.txt']),
+        ]
+    self.assertEqual(expected, [step for step in self.os.walk('foo')])
+
+  def testWalkBottomUp(self):
+    """Walk up ordering is correct."""
+    self.filesystem.CreateFile('foo/bar1/baz/1.txt')
+    self.filesystem.CreateFile('foo/bar1/2.txt')
+    self.filesystem.CreateFile('foo/bar2/3.txt')
+    self.filesystem.CreateFile('foo/4.txt')
+
+    expected = [
+        ('foo/bar1/baz', [], ['1.txt']),
+        ('foo/bar1', ['baz'], ['2.txt']),
+        ('foo/bar2', [], ['3.txt']),
+        ('foo', ['bar1', 'bar2'], ['4.txt']),
+        ]
+    self.assertEqual(expected,
+                     [step for step in self.os.walk('foo', topdown=False)])
+
+  def testWalkRaisesIfNonExistent(self):
+    """Raises an exception when attempting to walk non-existent directory."""
+    directory = '/foo/bar'
+    self.assertEqual(False, self.filesystem.Exists(directory))
+    generator = self.os.walk(directory)
+    self.assertRaises(StopIteration, next, generator)
+
+  def testWalkRaisesIfNotDirectory(self):
+    """Raises an exception when attempting to walk a non-directory."""
+    filename = '/foo/bar'
+    self.filesystem.CreateFile(filename)
+    generator = self.os.walk(filename)
+    self.assertRaises(StopIteration, next, generator)
+
+  def testMkNodeCanCreateAFile(self):
+    filename = 'foo'
+    self.assertFalse(self.filesystem.Exists(filename))
+    self.os.mknod(filename)
+    self.assertTrue(self.filesystem.Exists(filename))
+
+  def testMkNodeRaisesIfEmptyFileName(self):
+    filename = ''
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMkNodeRaisesIfParentDirDoesntExist(self):
+    parent = 'xyzzy'
+    filename = '%s/foo' % (parent,)
+    self.assertFalse(self.filesystem.Exists(parent))
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMkNodeRaisesIfFileExists(self):
+    filename = '/tmp/foo'
+    self.filesystem.CreateFile(filename)
+    self.assertTrue(self.filesystem.Exists(filename))
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMkNodeRaisesIfFilenameIsDot(self):
+    filename = '/tmp/.'
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMkNodeRaisesIfFilenameIsDoubleDot(self):
+    filename = '/tmp/..'
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMknodEmptyTailForExistingFileRaises(self):
+    filename = '/tmp/foo'
+    self.filesystem.CreateFile(filename)
+    self.assertTrue(self.filesystem.Exists(filename))
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMknodEmptyTailForNonexistentFileRaises(self):
+    filename = '/tmp/foo'
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMknodRaisesIfFilenameIsEmptyString(self):
+    filename = ''
+    self.assertRaises(OSError, self.os.mknod, filename)
+
+  def testMknodeRaisesIfUnsupportedOptions(self):
+    filename = 'abcde'
+    self.assertRaises(OSError, self.os.mknod, filename,
+                      mode=stat.S_IFCHR)
+
+  def testMknodeRaisesIfParentIsNotADirectory(self):
+    filename1 = '/tmp/foo'
+    self.filesystem.CreateFile(filename1)
+    self.assertTrue(self.filesystem.Exists(filename1))
+    filename2 = '/tmp/foo/bar'
+    self.assertRaises(OSError, self.os.mknod, filename2)
+
+  def ResetErrno(self):
+    """Reset the last seen errno."""
+    self.last_errno = False
+
+  def StoreErrno(self, os_error):
+    """Store the last errno we saw."""
+    self.last_errno = os_error.errno
+
+  def GetErrno(self):
+    """Return the last errno we saw."""
+    return self.last_errno
+
+  def testWalkCallsOnErrorIfNonExistent(self):
+    """Calls onerror with correct errno when walking non-existent directory."""
+    self.ResetErrno()
+    directory = '/foo/bar'
+    self.assertEqual(False, self.filesystem.Exists(directory))
+    # Calling os.walk on a non-existent directory should trigger a call to the
+    # onerror method.  We do not actually care what, if anything, is returned.
+    for unused_entry in self.os.walk(directory, onerror=self.StoreErrno):
+      pass
+    self.assertTrue(self.GetErrno() in (errno.ENOTDIR, errno.ENOENT))
+
+  def testWalkCallsOnErrorIfNotDirectory(self):
+    """Calls onerror with correct errno when walking non-directory."""
+    self.ResetErrno()
+    filename = '/foo/bar'
+    self.filesystem.CreateFile(filename)
+    self.assertEqual(True, self.filesystem.Exists(filename))
+    # Calling os.walk on a file should trigger a call to the onerror method.
+    # We do not actually care what, if anything, is returned.
+    for unused_entry in self.os.walk(filename, onerror=self.StoreErrno):
+      pass
+    self.assertTrue(self.GetErrno() in (errno.ENOTDIR, errno.EACCES))
+
+  def testWalkSkipsRemovedDirectories(self):
+    """Caller can modify list of directories to visit while walking."""
+    root = '/foo'
+    visit = 'visit'
+    no_visit = 'no_visit'
+    self.filesystem.CreateFile('%s/bar' % (root,))
+    self.filesystem.CreateFile('%s/%s/1.txt' % (root, visit))
+    self.filesystem.CreateFile('%s/%s/2.txt' % (root, visit))
+    self.filesystem.CreateFile('%s/%s/3.txt' % (root, no_visit))
+    self.filesystem.CreateFile('%s/%s/4.txt' % (root, no_visit))
+
+    generator = self.os.walk('/foo')
+    root_contents = next(generator)
+    root_contents[1].remove(no_visit)
+
+    visited_visit_directory = False
+
+    for root, unused_dirs, unused_files in iter(generator):
+      self.assertEqual(False, root.endswith('/%s' % (no_visit)))
+      if root.endswith('/%s' % (visit)):
+        visited_visit_directory = True
+
+    self.assertEqual(True, visited_visit_directory)
+
+  def testSymlink(self):
+    file_path = 'foo/bar/baz'
+    self.os.symlink('bogus', file_path)
+    self.assertTrue(self.os.path.lexists(file_path))
+    self.assertFalse(self.os.path.exists(file_path))
+    self.filesystem.CreateFile('foo/bar/bogus')
+    self.assertTrue(self.os.path.lexists(file_path))
+    self.assertTrue(self.os.path.exists(file_path))
+
+  def testUMask(self):
+    umask = os.umask(0o22)
+    os.umask(umask)
+    self.assertEqual(umask, self.os.umask(0o22))
+
+  def testMkdirUmaskApplied(self):
+    """mkdir creates a directory with umask applied."""
+    self.os.umask(0o22)
+    self.os.mkdir('dir1')
+    self.assertModeEqual(0o755, self.os.stat('dir1').st_mode)
+    self.os.umask(0o67)
+    self.os.mkdir('dir2')
+    self.assertModeEqual(0o710, self.os.stat('dir2').st_mode)
+
+  def testMakedirsUmaskApplied(self):
+    """makedirs creates a directories with umask applied."""
+    self.os.umask(0o22)
+    self.os.makedirs('/p1/dir1')
+    self.assertModeEqual(0o755, self.os.stat('/p1').st_mode)
+    self.assertModeEqual(0o755, self.os.stat('/p1/dir1').st_mode)
+    self.os.umask(0o67)
+    self.os.makedirs('/p2/dir2')
+    self.assertModeEqual(0o710, self.os.stat('/p2').st_mode)
+    self.assertModeEqual(0o710, self.os.stat('/p2/dir2').st_mode)
+
+  def testMknodeUmaskApplied(self):
+    """mkdir creates a device with umask applied."""
+    self.os.umask(0o22)
+    self.os.mknod('nod1')
+    self.assertModeEqual(0o644, self.os.stat('nod1').st_mode)
+    self.os.umask(0o27)
+    self.os.mknod('nod2')
+    self.assertModeEqual(0o640, self.os.stat('nod2').st_mode)
+
+  def testOpenUmaskApplied(self):
+    """open creates a file with umask applied."""
+    fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
+    self.os.umask(0o22)
+    fake_open('file1', 'w').close()
+    self.assertModeEqual(0o644, self.os.stat('file1').st_mode)
+    self.os.umask(0o27)
+    fake_open('file2', 'w').close()
+    self.assertModeEqual(0o640, self.os.stat('file2').st_mode)
+
+
+class StatPropagationTest(TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.open = fake_filesystem.FakeFileOpen(self.filesystem)
+
+  def testFileSizeUpdatedViaClose(self):
+    """test that file size gets updated via close()."""
+    file_dir = 'xyzzy'
+    file_path = 'xyzzy/close'
+    content = 'This is a test.'
+    self.os.mkdir(file_dir)
+    fh = self.open(file_path, 'w')
+    self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual('', self.filesystem.GetObject(file_path).contents)
+    fh.write(content)
+    self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual('', self.filesystem.GetObject(file_path).contents)
+    fh.close()
+    self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
+
+  def testFileSizeNotResetAfterClose(self):
+    file_dir = 'xyzzy'
+    file_path = 'xyzzy/close'
+    self.os.mkdir(file_dir)
+    size = 1234
+    # The file has size, but no content. When the file is opened for reading,
+    # its size should be preserved.
+    self.filesystem.CreateFile(file_path, st_size=size)
+    fh = self.open(file_path, 'r')
+    fh.close()
+    self.assertEqual(size, self.open(file_path, 'r').Size())
+
+  def testFileSizeAfterWrite(self):
+    file_path = 'test_file'
+    original_content = 'abcdef'
+    original_size = len(original_content)
+    self.filesystem.CreateFile(file_path, contents=original_content)
+    added_content = 'foo bar'
+    expected_size = original_size + len(added_content)
+    fh = self.open(file_path, 'a')
+    fh.write(added_content)
+    self.assertEqual(expected_size, fh.Size())
+    fh.close()
+    self.assertEqual(expected_size, self.open(file_path, 'r').Size())
+
+  def testLargeFileSizeAfterWrite(self):
+    file_path = 'test_file'
+    original_content = 'abcdef'
+    original_size = len(original_content)
+    self.filesystem.CreateFile(file_path, st_size=original_size)
+    added_content = 'foo bar'
+    fh = self.open(file_path, 'a')
+    # We can't use assertRaises, because the exception is thrown
+    # in __getattr__, so just saying 'fh.write' causes the exception.
+    try:
+      fh.write(added_content)
+    except fake_filesystem.FakeLargeFileIoException:
+      return
+    self.fail('Writing to a large file should not be allowed')
+
+  def testFileSizeUpdatedViaFlush(self):
+    """test that file size gets updated via flush()."""
+    file_dir = 'xyzzy'
+    file_name = 'flush'
+    file_path = self.os.path.join(file_dir, file_name)
+    content = 'This might be a test.'
+    self.os.mkdir(file_dir)
+    fh = self.open(file_path, 'w')
+    self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual('', self.filesystem.GetObject(file_path).contents)
+    fh.write(content)
+    self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual('', self.filesystem.GetObject(file_path).contents)
+    fh.flush()
+    self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
+    fh.close()
+    self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
+
+  def testFileSizeTruncation(self):
+    """test that file size gets updated via open()."""
+    file_dir = 'xyzzy'
+    file_path = 'xyzzy/truncation'
+    content = 'AAA content.'
+
+    # pre-create file with content
+    self.os.mkdir(file_dir)
+    fh = self.open(file_path, 'w')
+    fh.write(content)
+    fh.close()
+    self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
+
+    # test file truncation
+    fh = self.open(file_path, 'w')
+    self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
+    self.assertEqual('', self.filesystem.GetObject(file_path).contents)
+    fh.close()
+
+
+class OsPathInjectionRegressionTest(TestCase):
+  """Test faking os.path before calling os.walk.
+
+  Found when investigating a problem with
+  gws/tools/labrat/rat_utils_unittest, which was faking out os.path
+  before calling os.walk.
+  """
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.os_path = os.path
+    # The bug was that when os.path gets faked, the FakePathModule doesn't get
+    # called in self.os.walk().  FakePathModule now insists that it is created
+    # as part of FakeOsModule.
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+
+  def tearDown(self):
+    os.path = self.os_path
+
+  def testCreateTopLevelDirectory(self):
+    top_level_dir = '/x'
+    self.assertFalse(self.filesystem.Exists(top_level_dir))
+    self.filesystem.CreateDirectory(top_level_dir)
+    self.assertTrue(self.filesystem.Exists('/'))
+    self.assertTrue(self.filesystem.Exists(top_level_dir))
+    self.filesystem.CreateDirectory('%s/po' % top_level_dir)
+    self.filesystem.CreateFile('%s/po/control' % top_level_dir)
+    self.filesystem.CreateFile('%s/po/experiment' % top_level_dir)
+    self.filesystem.CreateDirectory('%s/gv' % top_level_dir)
+    self.filesystem.CreateFile('%s/gv/control' % top_level_dir)
+
+    expected = [
+        ('/', ['x'], []),
+        ('/x', ['gv', 'po'], []),
+        ('/x/gv', [], ['control']),
+        ('/x/po', [], ['control', 'experiment']),
+        ]
+    self.assertEqual(expected, [step for step in self.os.walk('/')])
+
+
+class FakePathModuleTest(TestCase):
+  def setUp(self):
+    self.orig_time = time.time
+    time.time = _GetDummyTime(10, 1)
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.path = self.os.path
+
+  def tearDown(self):
+    time.time = self.orig_time
+
+  def testAbspath(self):
+    """abspath should return a consistent representation of a file."""
+    filename = 'foo'
+    abspath = '/%s' % filename
+    self.filesystem.CreateFile(abspath)
+    self.assertEqual(abspath, self.path.abspath(abspath))
+    self.assertEqual(abspath, self.path.abspath(filename))
+    self.assertEqual(abspath, self.path.abspath('../%s' % filename))
+
+  def testAbspathDealsWithRelativeNonRootPath(self):
+    """abspath should correctly handle relative paths from a non-/ directory.
+
+    This test is distinct from the basic functionality test because
+    fake_filesystem has historically been based in /.
+    """
+    filename = '/foo/bar/baz'
+    file_components = filename.split(self.path.sep)
+    basedir = '/%s' % (file_components[0],)
+    self.filesystem.CreateFile(filename)
+    self.os.chdir(basedir)
+    self.assertEqual(basedir, self.path.abspath(self.path.curdir))
+    self.assertEqual('/', self.path.abspath('..'))
+    self.assertEqual(self.path.join(basedir, file_components[1]),
+                     self.path.abspath(file_components[1]))
+
+  def testRelpath(self):
+    path_foo = '/path/to/foo'
+    path_bar = '/path/to/bar'
+    path_other = '/some/where/else'
+    self.assertRaises(ValueError, self.path.relpath, None)
+    self.assertRaises(ValueError, self.path.relpath, '')
+    self.assertEqual(path_foo[1:],
+                     self.path.relpath(path_foo))
+    self.assertEqual('../foo',
+                     self.path.relpath(path_foo, path_bar))
+    self.assertEqual('../../..%s' % path_other,
+                     self.path.relpath(path_other, path_bar))
+    self.assertEqual('.',
+                     self.path.relpath(path_bar, path_bar))
+
+  @unittest.skipIf(TestCase.is_windows, 'realpath does not follow symlinks in win32')
+  def testRealpathVsAbspath(self):
+    self.filesystem.CreateFile('/george/washington/bridge')
+    self.filesystem.CreateLink('/first/president', '/george/washington')
+    self.assertEqual('/first/president/bridge',
+                     self.os.path.abspath('/first/president/bridge'))
+    self.assertEqual('/george/washington/bridge',
+                     self.os.path.realpath('/first/president/bridge'))
+    self.os.chdir('/first/president')
+    self.assertEqual('/george/washington/bridge',
+                     self.os.path.realpath('bridge'))
+
+  def testExists(self):
+    file_path = 'foo/bar/baz'
+    self.filesystem.CreateFile(file_path)
+    self.assertTrue(self.path.exists(file_path))
+    self.assertFalse(self.path.exists('/some/other/bogus/path'))
+
+  def testLexists(self):
+    file_path = 'foo/bar/baz'
+    self.filesystem.CreateDirectory('foo/bar')
+    self.filesystem.CreateLink(file_path, 'bogus')
+    self.assertTrue(self.path.lexists(file_path))
+    self.assertFalse(self.path.exists(file_path))
+    self.filesystem.CreateFile('foo/bar/bogus')
+    self.assertTrue(self.path.exists(file_path))
+
+  def testDirname(self):
+    dirname = 'foo/bar'
+    self.assertEqual(dirname, self.path.dirname('%s/baz' % dirname))
+
+  def testJoin(self):
+    components = ['foo', 'bar', 'baz']
+    self.assertEqual('foo/bar/baz', self.path.join(*components))
+
+  def testExpandUser(self):
+    if self.is_windows:
+      self.assertEqual(self.path.expanduser('~'),
+                       self.os.environ['USERPROFILE'].replace('\\', '/'))
+    else:
+      self.assertEqual(self.path.expanduser('~'),
+                       self.os.environ['HOME'])
+
+  @unittest.skipIf(TestCase.is_windows or TestCase.is_cygwin,
+                   'only tested on unix systems')
+  def testExpandRoot(self):
+      self.assertEqual('/root', self.path.expanduser('~root'))
+
+  def testGetsizePathNonexistent(self):
+    file_path = 'foo/bar/baz'
+    self.assertRaises(IOError, self.path.getsize, file_path)
+
+  def testGetsizeFileEmpty(self):
+    file_path = 'foo/bar/baz'
+    self.filesystem.CreateFile(file_path)
+    self.assertEqual(0, self.path.getsize(file_path))
+
+  def testGetsizeFileNonZeroSize(self):
+    file_path = 'foo/bar/baz'
+    self.filesystem.CreateFile(file_path, contents='1234567')
+    self.assertEqual(7, self.path.getsize(file_path))
+
+  def testGetsizeDirEmpty(self):
+    # For directories, only require that the size is non-negative.
+    dir_path = 'foo/bar'
+    self.filesystem.CreateDirectory(dir_path)
+    size = self.path.getsize(dir_path)
+    self.assertFalse(int(size) < 0,
+                     'expected non-negative size; actual: %s' % size)
+
+  def testGetsizeDirNonZeroSize(self):
+    # For directories, only require that the size is non-negative.
+    dir_path = 'foo/bar'
+    self.filesystem.CreateFile(self.filesystem.JoinPaths(dir_path, 'baz'))
+    size = self.path.getsize(dir_path)
+    self.assertFalse(int(size) < 0,
+                     'expected non-negative size; actual: %s' % size)
+
+  def testIsdir(self):
+    self.filesystem.CreateFile('foo/bar')
+    self.assertTrue(self.path.isdir('foo'))
+    self.assertFalse(self.path.isdir('foo/bar'))
+    self.assertFalse(self.path.isdir('it_dont_exist'))
+
+  def testIsdirWithCwdChange(self):
+    self.filesystem.CreateFile('/foo/bar/baz')
+    self.assertTrue(self.path.isdir('/foo'))
+    self.assertTrue(self.path.isdir('/foo/bar'))
+    self.assertTrue(self.path.isdir('foo'))
+    self.assertTrue(self.path.isdir('foo/bar'))
+    self.filesystem.cwd = '/foo'
+    self.assertTrue(self.path.isdir('/foo'))
+    self.assertTrue(self.path.isdir('/foo/bar'))
+    self.assertTrue(self.path.isdir('bar'))
+
+  def testIsfile(self):
+    self.filesystem.CreateFile('foo/bar')
+    self.assertFalse(self.path.isfile('foo'))
+    self.assertTrue(self.path.isfile('foo/bar'))
+    self.assertFalse(self.path.isfile('it_dont_exist'))
+
+  def testGetMtime(self):
+    test_file = self.filesystem.CreateFile('foo/bar1.txt')
+    # The root directory ('', effectively '/') is created at time 10,
+    # the parent directory ('foo') at time 11, and the file at time 12.
+    self.assertEqual(12, test_file.st_mtime)
+    test_file.SetMTime(24)
+    self.assertEqual(24, self.path.getmtime('foo/bar1.txt'))
+
+  def testGetMtimeRaisesOSError(self):
+    self.assertFalse(self.path.exists('it_dont_exist'))
+    self.assertRaises(OSError, self.path.getmtime, 'it_dont_exist')
+
+  def testIslink(self):
+    self.filesystem.CreateDirectory('foo')
+    self.filesystem.CreateFile('foo/regular_file')
+    self.filesystem.CreateLink('foo/link_to_file', 'regular_file')
+    self.assertFalse(self.path.islink('foo'))
+
+    # An object can be both a link and a file or file, according to the
+    # comments in Python/Lib/posixpath.py.
+    self.assertTrue(self.path.islink('foo/link_to_file'))
+    self.assertTrue(self.path.isfile('foo/link_to_file'))
+
+    self.assertTrue(self.path.isfile('foo/regular_file'))
+    self.assertFalse(self.path.islink('foo/regular_file'))
+
+    self.assertFalse(self.path.islink('it_dont_exist'))
+
+  @unittest.skipIf(sys.version_info >= (3, 0) or TestCase.is_windows,
+                   'os.path.walk deprecrated in Python 3, cannot be properly '
+                   'tested in win32')
+  def testWalk(self):
+    self.filesystem.CreateFile('/foo/bar/baz')
+    self.filesystem.CreateFile('/foo/bar/xyzzy/plugh')
+    visited_nodes = []
+
+    def RecordVisitedNodes(visited, dirname, fnames):
+      visited.extend(((dirname, fname) for fname in fnames))
+
+    self.path.walk('/foo', RecordVisitedNodes, visited_nodes)
+    expected = [('/foo', 'bar'),
+                ('/foo/bar', 'baz'),
+                ('/foo/bar', 'xyzzy'),
+                ('/foo/bar/xyzzy', 'plugh')]
+    self.assertEqual(expected, visited_nodes)
+
+  @unittest.skipIf(sys.version_info >= (3, 0) or TestCase.is_windows,
+                   'os.path.walk deprecrated in Python 3, cannot be properly '
+                   'tested in win32')
+  def testWalkFromNonexistentTopDoesNotThrow(self):
+    visited_nodes = []
+
+    def RecordVisitedNodes(visited, dirname, fnames):
+      visited.extend(((dirname, fname) for fname in fnames))
+
+    self.path.walk('/foo', RecordVisitedNodes, visited_nodes)
+    self.assertEqual([], visited_nodes)
+
+
+class FakeFileOpenTestBase(TestCase):
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem)
+    self.open = self.file
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.orig_time = time.time
+    time.time = _GetDummyTime(100, 10)
+
+  def tearDown(self):
+    time.time = self.orig_time
+
+
+class FakeFileOpenTest(FakeFileOpenTestBase):
+  def testOpenNoParentDir(self):
+    """Expect raise when open'ing a file in a missing directory."""
+    file_path = 'foo/bar.txt'
+    self.assertRaises(IOError, self.file, file_path, 'w')
+
+  def testDeleteOnClose(self):
+    file_dir = 'boo'
+    file_path = 'boo/far'
+    self.os.mkdir(file_dir)
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem,
+                                             delete_on_close=True)
+    fh = self.file(file_path, 'w')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fh.close()
+    self.assertFalse(self.filesystem.Exists(file_path))
+
+  def testNoDeleteOnCloseByDefault(self):
+    file_dir = 'boo'
+    file_path = 'boo/czar'
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem)
+    self.os.mkdir(file_dir)
+    fh = self.file(file_path, 'w')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fh.close()
+    self.assertTrue(self.filesystem.Exists(file_path))
+
+  def testCompatibilityOfWithStatement(self):
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem,
+                                             delete_on_close=True)
+    file_path = 'foo'
+    self.assertFalse(self.filesystem.Exists(file_path))
+    with self.file(file_path, 'w') as _:
+      self.assertTrue(self.filesystem.Exists(file_path))
+    # After the 'with' statement, the close() method should have been called.
+    self.assertFalse(self.filesystem.Exists(file_path))
+
+  def testOpenValidFile(self):
+    contents = [
+        'I am he as\n',
+        'you are he as\n',
+        'you are me and\n',
+        'we are all together\n'
+        ]
+    file_path = 'foo/bar.txt'
+    self.filesystem.CreateFile(file_path, contents=''.join(contents))
+    self.assertEqual(contents, self.file(file_path).readlines())
+
+  def testOpenValidArgs(self):
+    contents = [
+        "Bang bang Maxwell's silver hammer\n",
+        'Came down on her head',
+        ]
+    file_path = 'abbey_road/maxwell'
+    self.filesystem.CreateFile(file_path, contents=''.join(contents))
+    self.assertEqual(
+        contents, self.open(file_path, mode='r', buffering=1).readlines())
+    if sys.version_info >= (3, 0):
+      self.assertEqual(
+          contents, self.open(file_path, mode='r', buffering=1,
+                              encoding='utf-8', errors='strict', newline='\n',
+                              closefd=False, opener=False).readlines())
+
+  @unittest.skipIf(sys.version_info < (3, 0), 'only tested on 3.0 or greater')
+  def testOpenNewlineArg(self):
+    file_path = 'some_file'
+    file_contents = 'two\r\nlines'
+    self.filesystem.CreateFile(file_path, contents=file_contents)
+    fake_file = self.open(file_path, mode='r', newline=None)
+    self.assertEqual(['two\n', 'lines'], fake_file.readlines())
+    fake_file = self.open(file_path, mode='r', newline='')
+    self.assertEqual(['two\r\n', 'lines'], fake_file.readlines())
+    fake_file = self.open(file_path, mode='r', newline='\r')
+    self.assertEqual(['two\r', '\r', 'lines'], fake_file.readlines())
+    fake_file = self.open(file_path, mode='r', newline='\n')
+    self.assertEqual(['two\r\n', 'lines'], fake_file.readlines())
+    fake_file = self.open(file_path, mode='r', newline='\r\n')
+    self.assertEqual(['two\r\r\n', 'lines'], fake_file.readlines())
+
+  def testOpenValidFileWithCwd(self):
+    contents = [
+        'I am he as\n',
+        'you are he as\n',
+        'you are me and\n',
+        'we are all together\n'
+        ]
+    file_path = '/foo/bar.txt'
+    self.filesystem.CreateFile(file_path, contents=''.join(contents))
+    self.filesystem.cwd = '/foo'
+    self.assertEqual(contents, self.file(file_path).readlines())
+
+  def testIterateOverFile(self):
+    contents = [
+        "Bang bang Maxwell's silver hammer",
+        'Came down on her head',
+        ]
+    file_path = 'abbey_road/maxwell'
+    self.filesystem.CreateFile(file_path, contents='\n'.join(contents))
+    result = [line.rstrip() for line in self.file(file_path)]
+    self.assertEqual(contents, result)
+
+  def testOpenDirectoryError(self):
+    directory_path = 'foo/bar'
+    self.filesystem.CreateDirectory(directory_path)
+    self.assertRaises(IOError, self.file.__call__, directory_path)
+
+  def testCreateFileWithWrite(self):
+    contents = [
+        "Here comes the sun, little darlin'",
+        'Here comes the sun, and I say,',
+        "It's alright",
+        ]
+    file_dir = 'abbey_road'
+    file_path = 'abbey_road/here_comes_the_sun'
+    self.os.mkdir(file_dir)
+    fake_file = self.file(file_path, 'w')
+    for line in contents:
+      fake_file.write(line + '\n')
+    fake_file.close()
+    result = [line.rstrip() for line in self.file(file_path)]
+    self.assertEqual(contents, result)
+
+  def testCreateFileWithAppend(self):
+    contents = [
+        "Here comes the sun, little darlin'",
+        'Here comes the sun, and I say,',
+        "It's alright",
+        ]
+    file_dir = 'abbey_road'
+    file_path = 'abbey_road/here_comes_the_sun'
+    self.os.mkdir(file_dir)
+    fake_file = self.file(file_path, 'a')
+    for line in contents:
+      fake_file.write(line + '\n')
+    fake_file.close()
+    result = [line.rstrip() for line in self.file(file_path)]
+    self.assertEqual(contents, result)
+
+  def testOverwriteExistingFile(self):
+    file_path = 'overwrite/this/file'
+    self.filesystem.CreateFile(file_path, contents='To disappear')
+    new_contents = [
+        'Only these lines',
+        'should be in the file.',
+        ]
+    fake_file = self.file(file_path, 'w')
+    for line in new_contents:
+      fake_file.write(line + '\n')
+    fake_file.close()
+    result = [line.rstrip() for line in self.file(file_path)]
+    self.assertEqual(new_contents, result)
+
+  def testAppendExistingFile(self):
+    file_path = 'append/this/file'
+    contents = [
+        'Contents of original file'
+        'Appended contents',
+        ]
+    self.filesystem.CreateFile(file_path, contents=contents[0])
+    fake_file = self.file(file_path, 'a')
+    for line in contents[1:]:
+      fake_file.write(line + '\n')
+    fake_file.close()
+    result = [line.rstrip() for line in self.file(file_path)]
+    self.assertEqual(contents, result)
+
+  def testOpenWithWplus(self):
+    # set up
+    file_path = 'wplus_file'
+    self.filesystem.CreateFile(file_path, contents='old contents')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fake_file = self.file(file_path, 'r')
+    self.assertEqual('old contents', fake_file.read())
+    fake_file.close()
+    # actual tests
+    fake_file = self.file(file_path, 'w+')
+    fake_file.write('new contents')
+    fake_file.seek(0)
+    self.assertTrue('new contents', fake_file.read())
+    fake_file.close()
+
+  def testOpenWithWplusTruncation(self):
+    # set up
+    file_path = 'wplus_file'
+    self.filesystem.CreateFile(file_path, contents='old contents')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fake_file = self.file(file_path, 'r')
+    self.assertEqual('old contents', fake_file.read())
+    fake_file.close()
+    # actual tests
+    fake_file = self.file(file_path, 'w+')
+    fake_file.seek(0)
+    self.assertEqual('', fake_file.read())
+    fake_file.close()
+
+  def testOpenWithAppendFlag(self):
+    contents = [
+        'I am he as\n',
+        'you are he as\n',
+        'you are me and\n',
+        'we are all together\n'
+        ]
+    additional_contents = [
+        'These new lines\n',
+        'like you a lot.\n'
+        ]
+    file_path = 'append/this/file'
+    self.filesystem.CreateFile(file_path, contents=''.join(contents))
+    fake_file = self.file(file_path, 'a')
+    self.assertRaises(IOError, fake_file.read)
+    self.assertEqual('', fake_file.read(0))
+    self.assertEqual('', fake_file.readline(0))
+    self.assertEqual(len(''.join(contents)), fake_file.tell())
+    fake_file.seek(0)
+    self.assertEqual(0, fake_file.tell())
+    fake_file.writelines(additional_contents)
+    fake_file.close()
+    result = self.file(file_path).readlines()
+    self.assertEqual(contents + additional_contents, result)
+
+  def testAppendWithAplus(self):
+    # set up
+    file_path = 'aplus_file'
+    self.filesystem.CreateFile(file_path, contents='old contents')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fake_file = self.file(file_path, 'r')
+    self.assertEqual('old contents', fake_file.read())
+    fake_file.close()
+    # actual tests
+    fake_file = self.file(file_path, 'a+')
+    self.assertEqual(0, fake_file.tell())
+    fake_file.seek(6, 1)
+    fake_file.write('new contents')
+    self.assertEqual(24, fake_file.tell())
+    fake_file.seek(0)
+    self.assertEqual('old contentsnew contents', fake_file.read())
+    fake_file.close()
+
+  def testAppendWithAplusReadWithLoop(self):
+    # set up
+    file_path = 'aplus_file'
+    self.filesystem.CreateFile(file_path, contents='old contents')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fake_file = self.file(file_path, 'r')
+    self.assertEqual('old contents', fake_file.read())
+    fake_file.close()
+    # actual tests
+    fake_file = self.file(file_path, 'a+')
+    fake_file.seek(0)
+    fake_file.write('new contents')
+    fake_file.seek(0)
+    for line in fake_file:
+      self.assertEqual('old contentsnew contents', line)
+    fake_file.close()
+
+  def testReadEmptyFileWithAplus(self):
+    file_path = 'aplus_file'
+    fake_file = self.file(file_path, 'a+')
+    self.assertEqual('', fake_file.read())
+    fake_file.close()
+
+  def testReadWithRplus(self):
+    # set up
+    file_path = 'rplus_file'
+    self.filesystem.CreateFile(file_path, contents='old contents here')
+    self.assertTrue(self.filesystem.Exists(file_path))
+    fake_file = self.file(file_path, 'r')
+    self.assertEqual('old contents here', fake_file.read())
+    fake_file.close()
+    # actual tests
+    fake_file = self.file(file_path, 'r+')
+    self.assertEqual('old contents here', fake_file.read())
+    fake_file.seek(0)
+    fake_file.write('new contents')
+    fake_file.seek(0)
+    self.assertEqual('new contents here', fake_file.read())
+    fake_file.close()
+
+  def testOpenStCtime(self):
+    # set up
+    file_path = 'some_file'
+    self.assertFalse(self.filesystem.Exists(file_path))
+    # tests
+    fake_file = self.file(file_path, 'w')
+    fake_file.close()
+    st = self.os.stat(file_path)
+    self.assertEqual(100, st.st_ctime)
+
+    fake_file = self.file(file_path, 'w')
+    fake_file.close()
+    st = self.os.stat(file_path)
+    self.assertEqual(110, st.st_ctime)
+
+    fake_file = self.file(file_path, 'w+')
+    fake_file.close()
+    st = self.os.stat(file_path)
+    self.assertEqual(120, st.st_ctime)
+
+    fake_file = self.file(file_path, 'r')
+    fake_file.close()
+    st = self.os.stat(file_path)
+    self.assertEqual(120, st.st_ctime)
+
+  def _CreateWithPermission(self, file_path, perm_bits):
+    self.filesystem.CreateFile(file_path)
+    self.os.chmod(file_path, perm_bits)
+    st = self.os.stat(file_path)
+    self.assertModeEqual(perm_bits, st.st_mode)
+    self.assertTrue(st.st_mode & stat.S_IFREG)
+    self.assertFalse(st.st_mode & stat.S_IFDIR)
+
+  def testOpenFlags700(self):
+    # set up
+    file_path = 'target_file'
+    self._CreateWithPermission(file_path, 0o700)
+    # actual tests
+    self.file(file_path, 'r').close()
+    self.file(file_path, 'w').close()
+    self.file(file_path, 'w+').close()
+    self.assertRaises(IOError, self.file, file_path, 'INV')
+
+  def testOpenFlags400(self):
+    # set up
+    file_path = 'target_file'
+    self._CreateWithPermission(file_path, 0o400)
+    # actual tests
+    self.file(file_path, 'r').close()
+    self.assertRaises(IOError, self.file, file_path, 'w')
+    self.assertRaises(IOError, self.file, file_path, 'w+')
+
+  def testOpenFlags200(self):
+    # set up
+    file_path = 'target_file'
+    self._CreateWithPermission(file_path, 0o200)
+    # actual tests
+    self.assertRaises(IOError, self.file, file_path, 'r')
+    self.file(file_path, 'w').close()
+    self.assertRaises(IOError, self.file, file_path, 'w+')
+
+  def testOpenFlags100(self):
+    # set up
+    file_path = 'target_file'
+    self._CreateWithPermission(file_path, 0o100)
+    # actual tests 4
+    self.assertRaises(IOError, self.file, file_path, 'r')
+    self.assertRaises(IOError, self.file, file_path, 'w')
+    self.assertRaises(IOError, self.file, file_path, 'w+')
+
+  def testFollowLinkRead(self):
+    link_path = '/foo/bar/baz'
+    target = '/tarJAY'
+    target_contents = 'real baz contents'
+    self.filesystem.CreateFile(target, contents=target_contents)
+    self.filesystem.CreateLink(link_path, target)
+    self.assertEqual(target, self.os.readlink(link_path))
+    fh = self.open(link_path, 'r')
+    got_contents = fh.read()
+    fh.close()
+    self.assertEqual(target_contents, got_contents)
+
+  def testFollowLinkWrite(self):
+    link_path = '/foo/bar/TBD'
+    target = '/tarJAY'
+    target_contents = 'real baz contents'
+    self.filesystem.CreateLink(link_path, target)
+    self.assertFalse(self.filesystem.Exists(target))
+
+    fh = self.open(link_path, 'w')
+    fh.write(target_contents)
+    fh.close()
+    fh = self.open(target, 'r')
+    got_contents = fh.read()
+    fh.close()
+    self.assertEqual(target_contents, got_contents)
+
+  def testFollowIntraPathLinkWrite(self):
+    # Test a link in the middle of of a file path.
+    link_path = '/foo/build/local_machine/output/1'
+    target = '/tmp/output/1'
+    self.filesystem.CreateDirectory('/tmp/output')
+    self.filesystem.CreateLink('/foo/build/local_machine', '/tmp')
+    self.assertFalse(self.filesystem.Exists(link_path))
+    self.assertFalse(self.filesystem.Exists(target))
+
+    target_contents = 'real baz contents'
+    fh = self.open(link_path, 'w')
+    fh.write(target_contents)
+    fh.close()
+    fh = self.open(target, 'r')
+    got_contents = fh.read()
+    fh.close()
+    self.assertEqual(target_contents, got_contents)
+
+  def testFileDescriptorsForDifferentFiles(self):
+    first_path = 'some_file1'
+    second_path = 'some_file2'
+    third_path = 'some_file3'
+    self.filesystem.CreateFile(first_path, contents='contents here1')
+    self.filesystem.CreateFile(second_path, contents='contents here2')
+    self.filesystem.CreateFile(third_path, contents='contents here3')
+
+    fake_file1 = self.open(first_path, 'r')
+    fake_file2 = self.open(second_path, 'r')
+    fake_file3 = self.open(third_path, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+    self.assertEqual(1, fake_file2.fileno())
+    self.assertEqual(2, fake_file3.fileno())
+
+  def testFileDescriptorsForTheSameFileAreDifferent(self):
+    first_path = 'some_file1'
+    second_path = 'some_file2'
+    self.filesystem.CreateFile(first_path, contents='contents here1')
+    self.filesystem.CreateFile(second_path, contents='contents here2')
+
+    fake_file1 = self.open(first_path, 'r')
+    fake_file2 = self.open(second_path, 'r')
+    fake_file1a = self.open(first_path, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+    self.assertEqual(1, fake_file2.fileno())
+    self.assertEqual(2, fake_file1a.fileno())
+
+  def testReusedFileDescriptorsDoNotAffectOthers(self):
+    first_path = 'some_file1'
+    second_path = 'some_file2'
+    third_path = 'some_file3'
+    self.filesystem.CreateFile(first_path, contents='contents here1')
+    self.filesystem.CreateFile(second_path, contents='contents here2')
+    self.filesystem.CreateFile(third_path, contents='contents here3')
+
+    fake_file1 = self.open(first_path, 'r')
+    fake_file2 = self.open(second_path, 'r')
+    fake_file3 = self.open(third_path, 'r')
+    fake_file1a = self.open(first_path, 'r')
+    self.assertEqual(0, fake_file1.fileno())
+    self.assertEqual(1, fake_file2.fileno())
+    self.assertEqual(2, fake_file3.fileno())
+    self.assertEqual(3, fake_file1a.fileno())
+
+    fake_file1.close()
+    fake_file2.close()
+    fake_file2 = self.open(second_path, 'r')
+    fake_file1b = self.open(first_path, 'r')
+    self.assertEqual(0, fake_file2.fileno())
+    self.assertEqual(1, fake_file1b.fileno())
+    self.assertEqual(2, fake_file3.fileno())
+    self.assertEqual(3, fake_file1a.fileno())
+
+  def testIntertwinedReadWrite(self):
+    file_path = 'some_file'
+    self.filesystem.CreateFile(file_path)
+    with self.open(file_path, 'a') as writer:
+      with self.open(file_path, 'r') as reader:
+        writes = ['hello', 'world\n', 'somewhere\nover', 'the\n', 'rainbow']
+        reads = []
+        # when writes are flushes, they are piped to the reader
+        for write in writes:
+          writer.write(write)
+          writer.flush()
+          reads.append(reader.read())
+          reader.flush()
+        self.assertEqual(writes, reads)
+        writes = ['nothing', 'to\nsee', 'here']
+        reads = []
+        # when writes are not flushed, the reader doesn't read anything new
+        for write in writes:
+          writer.write(write)
+          reads.append(reader.read())
+        self.assertEqual(['' for _ in writes], reads)
+
+  def testOpenIoErrors(self):
+    file_path = 'some_file'
+    self.filesystem.CreateFile(file_path)
+
+    with self.open(file_path, 'a') as fh:
+      self.assertRaises(IOError, fh.read)
+      self.assertRaises(IOError, fh.readlines)
+    with self.open(file_path, 'w') as fh:
+      self.assertRaises(IOError, fh.read)
+      self.assertRaises(IOError, fh.readlines)
+    with self.open(file_path, 'r') as fh:
+      self.assertRaises(IOError, fh.truncate)
+      self.assertRaises(IOError, fh.write, 'contents')
+      self.assertRaises(IOError, fh.writelines, ['con', 'tents'])
+
+    def _IteratorOpen(file_path, mode):
+      for _ in self.open(file_path, mode):
+        pass
+    self.assertRaises(IOError, _IteratorOpen, file_path, 'w')
+    self.assertRaises(IOError, _IteratorOpen, file_path, 'a')
+
+
+class OpenWithFileDescriptorTest(FakeFileOpenTestBase):
+
+  @unittest.skipIf(sys.version_info < (3, 0), 'only tested on 3.0 or greater')
+  def testOpenWithFileDescriptor(self):
+    file_path = 'this/file'
+    self.filesystem.CreateFile(file_path)
+    fd = self.os.open(file_path, os.O_CREAT)
+    self.assertEqual(fd, self.open(fd, 'r').fileno())
+
+  @unittest.skipIf(sys.version_info < (3, 0), 'only tested on 3.0 or greater')
+  def testClosefdWithFileDescriptor(self):
+    file_path = 'this/file'
+    self.filesystem.CreateFile(file_path)
+    fd = self.os.open(file_path, os.O_CREAT)
+    fh = self.open(fd, 'r', closefd=False)
+    fh.close()
+    self.assertIsNotNone(self.filesystem.open_files[fd])
+    fh = self.open(fd, 'r', closefd=True)
+    fh.close()
+    self.assertIsNone(self.filesystem.open_files[fd])
+
+
+class OpenWithBinaryFlagsTest(TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem)
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.file_path = 'some_file'
+    self.file_contents = b'binary contents'
+    self.filesystem.CreateFile(self.file_path, contents=self.file_contents)
+
+  def OpenFakeFile(self, mode):
+    return self.file(self.file_path, mode=mode)
+
+  def OpenFileAndSeek(self, mode):
+    fake_file = self.file(self.file_path, mode=mode)
+    fake_file.seek(0, 2)
+    return fake_file
+
+  def WriteAndReopenFile(self, fake_file, mode='rb'):
+    fake_file.write(self.file_contents)
+    fake_file.close()
+    return self.file(self.file_path, mode=mode)
+
+  def testReadBinary(self):
+    fake_file = self.OpenFakeFile('rb')
+    self.assertEqual(self.file_contents, fake_file.read())
+
+  def testWriteBinary(self):
+    fake_file = self.OpenFileAndSeek('wb')
+    self.assertEqual(0, fake_file.tell())
+    fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
+    self.assertEqual(self.file_contents, fake_file.read())
+    # reopen the file in text mode
+    fake_file = self.OpenFakeFile('wb')
+    fake_file = self.WriteAndReopenFile(fake_file, mode='r')
+    self.assertEqual(self.file_contents.decode('ascii'), fake_file.read())
+
+  def testWriteAndReadBinary(self):
+    fake_file = self.OpenFileAndSeek('w+b')
+    self.assertEqual(0, fake_file.tell())
+    fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
+    self.assertEqual(self.file_contents, fake_file.read())
+
+
+class OpenWithIgnoredFlagsTest(TestCase):
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.file = fake_filesystem.FakeFileOpen(self.filesystem)
+    self.os = fake_filesystem.FakeOsModule(self.filesystem)
+    self.file_path = 'some_file'
+    self.read_contents = self.file_contents = 'two\r\nlines'
+    # For python 3.x, text file newlines are converted to \n
+    if sys.version_info >= (3, 0):
+      self.read_contents = 'two\nlines'
+    self.filesystem.CreateFile(self.file_path, contents=self.file_contents)
+    # It's resonable to assume the file exists at this point
+
+  def OpenFakeFile(self, mode):
+    return self.file(self.file_path, mode=mode)
+
+  def OpenFileAndSeek(self, mode):
+    fake_file = self.file(self.file_path, mode=mode)
+    fake_file.seek(0, 2)
+    return fake_file
+
+  def WriteAndReopenFile(self, fake_file, mode='r'):
+    fake_file.write(self.file_contents)
+    fake_file.close()
+    return self.file(self.file_path, mode=mode)
+
+  def testReadText(self):
+    fake_file = self.OpenFakeFile('rt')
+    self.assertEqual(self.read_contents, fake_file.read())
+
+  def testReadUniversalNewlines(self):
+    fake_file = self.OpenFakeFile('rU')
+    self.assertEqual(self.read_contents, fake_file.read())
+
+  def testUniversalNewlines(self):
+    fake_file = self.OpenFakeFile('U')
+    self.assertEqual(self.read_contents, fake_file.read())
+
+  def testWriteText(self):
+    fake_file = self.OpenFileAndSeek('wt')
+    self.assertEqual(0, fake_file.tell())
+    fake_file = self.WriteAndReopenFile(fake_file)
+    self.assertEqual(self.read_contents, fake_file.read())
+
+  def testWriteAndReadTextBinary(self):
+    fake_file = self.OpenFileAndSeek('w+bt')
+    self.assertEqual(0, fake_file.tell())
+    if sys.version_info >= (3, 0):
+      self.assertRaises(TypeError, fake_file.write, self.file_contents)
+    else:
+      fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
+      self.assertEqual(self.file_contents, fake_file.read())
+
+
+class OpenWithInvalidFlagsTest(FakeFileOpenTestBase):
+
+  def testCapitalR(self):
+    self.assertRaises(IOError, self.file, 'some_file', 'R')
+
+  def testCapitalW(self):
+    self.assertRaises(IOError, self.file, 'some_file', 'W')
+
+  def testCapitalA(self):
+    self.assertRaises(IOError, self.file, 'some_file', 'A')
+
+  def testLowerU(self):
+    self.assertRaises(IOError, self.file, 'some_file', 'u')
+
+  def testLowerRw(self):
+    self.assertRaises(IOError, self.file, 'some_file', 'rw')
+
+
+class ResolvePathTest(FakeFileOpenTestBase):
+
+  def __WriteToFile(self, file_name):
+    fh = self.open(file_name, 'w')
+    fh.write('x')
+    fh.close()
+
+  def testNoneFilepathRaisesTypeError(self):
+    self.assertRaises(TypeError, self.open, None, 'w')
+
+  def testEmptyFilepathRaisesIOError(self):
+    self.assertRaises(IOError, self.open, '', 'w')
+
+  def testNormalPath(self):
+    self.__WriteToFile('foo')
+    self.assertTrue(self.filesystem.Exists('foo'))
+
+  def testLinkWithinSameDirectory(self):
+    final_target = '/foo/baz'
+    self.filesystem.CreateLink('/foo/bar', 'baz')
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+    self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
+
+  def testLinkToSubDirectory(self):
+    final_target = '/foo/baz/bip'
+    self.filesystem.CreateDirectory('/foo/baz')
+    self.filesystem.CreateLink('/foo/bar', 'baz/bip')
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+    self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
+    self.assertTrue(self.filesystem.Exists('/foo/baz'))
+    # Make sure that intermediate directory got created.
+    new_dir = self.filesystem.GetObject('/foo/baz')
+    self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
+
+  def testLinkToParentDirectory(self):
+    final_target = '/baz/bip'
+    self.filesystem.CreateDirectory('/foo')
+    self.filesystem.CreateDirectory('/baz')
+    self.filesystem.CreateLink('/foo/bar', '../baz')
+    self.__WriteToFile('/foo/bar/bip')
+    self.assertTrue(self.filesystem.Exists(final_target))
+    self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
+    self.assertTrue(self.filesystem.Exists('/foo/bar'))
+
+  def testLinkToAbsolutePath(self):
+    final_target = '/foo/baz/bip'
+    self.filesystem.CreateDirectory('/foo/baz')
+    self.filesystem.CreateLink('/foo/bar', final_target)
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+
+  def testRelativeLinksWorkAfterChdir(self):
+    final_target = '/foo/baz/bip'
+    self.filesystem.CreateDirectory('/foo/baz')
+    self.filesystem.CreateLink('/foo/bar', './baz/bip')
+    self.assertEqual(final_target,
+                     self.filesystem.ResolvePath('/foo/bar'))
+
+    os_module = fake_filesystem.FakeOsModule(self.filesystem)
+    self.assertTrue(os_module.path.islink('/foo/bar'))
+    os_module.chdir('/foo')
+    self.assertEqual('/foo', os_module.getcwd())
+    self.assertTrue(os_module.path.islink('bar'))
+
+    self.assertEqual('/foo/baz/bip',
+                     self.filesystem.ResolvePath('bar'))
+
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+
+  def testAbsoluteLinksWorkAfterChdir(self):
+    final_target = '/foo/baz/bip'
+    self.filesystem.CreateDirectory('/foo/baz')
+    self.filesystem.CreateLink('/foo/bar', final_target)
+    self.assertEqual(final_target,
+                     self.filesystem.ResolvePath('/foo/bar'))
+
+    os_module = fake_filesystem.FakeOsModule(self.filesystem)
+    self.assertTrue(os_module.path.islink('/foo/bar'))
+    os_module.chdir('/foo')
+    self.assertEqual('/foo', os_module.getcwd())
+    self.assertTrue(os_module.path.islink('bar'))
+
+    self.assertEqual('/foo/baz/bip',
+                     self.filesystem.ResolvePath('bar'))
+
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+
+  def testChdirThroughRelativeLink(self):
+    self.filesystem.CreateDirectory('/x/foo')
+    self.filesystem.CreateDirectory('/x/bar')
+    self.filesystem.CreateLink('/x/foo/bar', '../bar')
+    self.assertEqual('/x/bar', self.filesystem.ResolvePath('/x/foo/bar'))
+
+    os_module = fake_filesystem.FakeOsModule(self.filesystem)
+    os_module.chdir('/x/foo')
+    self.assertEqual('/x/foo', os_module.getcwd())
+    self.assertEqual('/x/bar', self.filesystem.ResolvePath('bar'))
+
+    os_module.chdir('bar')
+    self.assertEqual('/x/bar', os_module.getcwd())
+
+  def testReadLinkToLink(self):
+    # Write into the final link target and read back from a file which will
+    # point to that.
+    self.filesystem.CreateLink('/foo/bar', 'link')
+    self.filesystem.CreateLink('/foo/link', 'baz')
+    self.__WriteToFile('/foo/baz')
+    fh = self.open('/foo/bar', 'r')
+    self.assertEqual('x', fh.read())
+
+  def testWriteLinkToLink(self):
+    final_target = '/foo/baz'
+    self.filesystem.CreateLink('/foo/bar', 'link')
+    self.filesystem.CreateLink('/foo/link', 'baz')
+    self.__WriteToFile('/foo/bar')
+    self.assertTrue(self.filesystem.Exists(final_target))
+
+  def testMultipleLinks(self):
+    final_target = '/a/link1/c/link2/e'
+    self.os.makedirs('/a/link1/c/link2')
+
+    self.filesystem.CreateLink('/a/b', 'link1')
+    self.assertEqual('/a/link1', self.filesystem.ResolvePath('/a/b'))
+    self.assertEqual('/a/link1/c', self.filesystem.ResolvePath('/a/b/c'))
+
+    self.filesystem.CreateLink('/a/link1/c/d', 'link2')
+    self.assertTrue(self.filesystem.Exists('/a/link1/c/d'))
+    self.assertTrue(self.filesystem.Exists('/a/b/c/d'))
+
+    final_target = '/a/link1/c/link2/e'
+    self.assertFalse(self.filesystem.Exists(final_target))
+    self.__WriteToFile('/a/b/c/d/e')
+    self.assertTrue(self.filesystem.Exists(final_target))
+
+  def testTooManyLinks(self):
+    self.filesystem.CreateLink('/a/loop', 'loop')
+    self.assertFalse(self.filesystem.Exists('/a/loop'))
+
+
+class PathManipulationTests(TestCase):
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='|')
+
+
+class CollapsePathPipeSeparatorTest(PathManipulationTests):
+  """Tests CollapsePath (mimics os.path.normpath) using | as path separator."""
+
+  def testEmptyPathBecomesDotPath(self):
+    self.assertEqual('.', self.filesystem.CollapsePath(''))
+
+  def testDotPathUnchanged(self):
+    self.assertEqual('.', self.filesystem.CollapsePath('.'))
+
+  def testSlashesAreNotCollapsed(self):
+    """Tests that '/' is not treated specially if the path separator is '|'.
+
+    In particular, multiple slashes should not be collapsed.
+    """
+    self.assertEqual('/', self.filesystem.CollapsePath('/'))
+    self.assertEqual('/////', self.filesystem.CollapsePath('/////'))
+
+  def testRootPath(self):
+    self.assertEqual('|', self.filesystem.CollapsePath('|'))
+
+  def testMultipleSeparatorsCollapsedIntoRootPath(self):
+    self.assertEqual('|', self.filesystem.CollapsePath('|||||'))
+
+  def testAllDotPathsRemovedButOne(self):
+    self.assertEqual('.', self.filesystem.CollapsePath('.|.|.|.'))
+
+  def testAllDotPathsRemovedIfAnotherPathComponentExists(self):
+    self.assertEqual('|', self.filesystem.CollapsePath('|.|.|.|'))
+    self.assertEqual('foo|bar', self.filesystem.CollapsePath('foo|.|.|.|bar'))
+
+  def testIgnoresUpLevelReferencesStartingFromRoot(self):
+    self.assertEqual('|', self.filesystem.CollapsePath('|..|..|..|'))
+    self.assertEqual('|', self.filesystem.CollapsePath('||..|.|..||'))
+    self.assertEqual(
+        '|', self.filesystem.CollapsePath('|..|..|foo|bar|..|..|'))
+
+  def testConservesUpLevelReferencesStartingFromCurrentDirectory(self):
+    self.assertEqual(
+        '..|..', self.filesystem.CollapsePath('..|foo|bar|..|..|..'))
+
+  def testCombineDotAndUpLevelReferencesInAbsolutePath(self):
+    self.assertEqual(
+        '|yes', self.filesystem.CollapsePath('|||||.|..|||yes|no|..|.|||'))
+
+  def testDotsInPathCollapsesToLastPath(self):
+    self.assertEqual(
+        'bar', self.filesystem.CollapsePath('foo|..|bar'))
+    self.assertEqual(
+        'bar', self.filesystem.CollapsePath('foo|..|yes|..|no|..|bar'))
+
+
+class SplitPathTest(PathManipulationTests):
+  """Tests SplitPath (which mimics os.path.split) using | as path separator."""
+
+  def testEmptyPath(self):
+    self.assertEqual(('', ''), self.filesystem.SplitPath(''))
+
+  def testNoSeparators(self):
+    self.assertEqual(('', 'ab'), self.filesystem.SplitPath('ab'))
+
+  def testSlashesDoNotSplit(self):
+    """Tests that '/' is not treated specially if the path separator is '|'."""
+    self.assertEqual(('', 'a/b'), self.filesystem.SplitPath('a/b'))
+
+  def testEliminateTrailingSeparatorsFromHead(self):
+    self.assertEqual(('a', 'b'), self.filesystem.SplitPath('a|b'))
+    self.assertEqual(('a', 'b'), self.filesystem.SplitPath('a|||b'))
+    self.assertEqual(('|a', 'b'), self.filesystem.SplitPath('|a||b'))
+    self.assertEqual(('a|b', 'c'), self.filesystem.SplitPath('a|b|c'))
+    self.assertEqual(('|a|b', 'c'), self.filesystem.SplitPath('|a|b|c'))
+
+  def testRootSeparatorIsNotStripped(self):
+    self.assertEqual(('|', ''), self.filesystem.SplitPath('|||'))
+    self.assertEqual(('|', 'a'), self.filesystem.SplitPath('|a'))
+    self.assertEqual(('|', 'a'), self.filesystem.SplitPath('|||a'))
+
+  def testEmptyTailIfPathEndsInSeparator(self):
+    self.assertEqual(('a|b', ''), self.filesystem.SplitPath('a|b|'))
+
+  def testEmptyPathComponentsArePreservedInHead(self):
+    self.assertEqual(('|a||b', 'c'), self.filesystem.SplitPath('|a||b||c'))
+
+
+class JoinPathTest(PathManipulationTests):
+  """Tests JoinPath (which mimics os.path.join) using | as path separator."""
+
+  def testOneEmptyComponent(self):
+    self.assertEqual('', self.filesystem.JoinPaths(''))
+
+  def testMultipleEmptyComponents(self):
+    self.assertEqual('', self.filesystem.JoinPaths('', '', ''))
+
+  def testSeparatorsNotStrippedFromSingleComponent(self):
+    self.assertEqual('||a||', self.filesystem.JoinPaths('||a||'))
+
+  def testOneSeparatorAddedBetweenComponents(self):
+    self.assertEqual('a|b|c|d', self.filesystem.JoinPaths('a', 'b', 'c', 'd'))
+
+  def testNoSeparatorAddedForComponentsEndingInSeparator(self):
+    self.assertEqual('a|b|c', self.filesystem.JoinPaths('a|', 'b|', 'c'))
+    self.assertEqual('a|||b|||c',
+                     self.filesystem.JoinPaths('a|||', 'b|||', 'c'))
+
+  def testComponentsPrecedingAbsoluteComponentAreIgnored(self):
+    self.assertEqual('|c|d', self.filesystem.JoinPaths('a', '|b', '|c', 'd'))
+
+  def testOneSeparatorAddedForTrailingEmptyComponents(self):
+    self.assertEqual('a|', self.filesystem.JoinPaths('a', ''))
+    self.assertEqual('a|', self.filesystem.JoinPaths('a', '', ''))
+
+  def testNoSeparatorAddedForLeadingEmptyComponents(self):
+    self.assertEqual('a', self.filesystem.JoinPaths('', 'a'))
+
+  def testInternalEmptyComponentsIgnored(self):
+    self.assertEqual('a|b', self.filesystem.JoinPaths('a', '', 'b'))
+    self.assertEqual('a|b|', self.filesystem.JoinPaths('a|', '', 'b|'))
+
+
+class PathSeparatorTest(TestCase):
+  def testOsPathSepMatchesFakeFilesystemSeparator(self):
+    filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
+    fake_os = fake_filesystem.FakeOsModule(filesystem)
+    self.assertEqual('!', fake_os.sep)
+    self.assertEqual('!', fake_os.path.sep)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py
new file mode 100644
index 0000000..692fb2f
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py
@@ -0,0 +1,226 @@
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Copyright 2015 John McGehee
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A base class for unit tests using the :py:class:`pyfakefs` module.
+
+This class searches `sys.modules` for modules that import the `os`, `glob`,
+`shutil`, and `tempfile` modules.
+
+The `setUp()` method binds these modules to the corresponding fake
+modules from `pyfakefs`.  Further, the built in functions `file()` and
+`open()` are bound to fake functions.
+
+The `tearDownPyfakefs()` method returns the module bindings to their original
+state.
+
+It is expected that `setUp()` be invoked at the beginning of the derived
+class' `setUp()` method, and `tearDownPyfakefs()` be invoked at the end of the
+derived class' `tearDown()` method.
+
+During the test, everything uses the fake file system and modules.  This means
+that even in your test, you can use familiar functions like `open()` and
+`os.makedirs()` to manipulate the fake file system.
+
+This also means existing unit tests that use the real file system can be
+retrofitted to use `pyfakefs` by simply changing their base class from
+`:py:class`unittest.TestCase` to
+`:py:class`pyfakefs.fake_filesystem_unittest.TestCase`.
+"""
+
+import sys
+import unittest
+import doctest
+import fake_filesystem
+import fake_filesystem_glob
+import fake_filesystem_shutil
+import fake_tempfile
+if sys.version_info < (3,):
+    import __builtin__ as builtins
+else:
+    import builtins
+
+import mox3.stubout
+
+def load_doctests(loader, tests, ignore, module):
+    '''Load the doctest tests for the specified module into unittest.'''
+    _patcher = Patcher()
+    globs = _patcher.replaceGlobs(vars(module))
+    tests.addTests(doctest.DocTestSuite(module,
+                                        globs=globs,
+                                        setUp=_patcher.setUp,
+                                        tearDown=_patcher.tearDown))
+    return tests
+
+
+class TestCase(unittest.TestCase):
+    def __init__(self, methodName='runTest'):
+        super(TestCase, self).__init__(methodName)
+        self._stubber = Patcher()
+        
+    @property
+    def fs(self):
+        return self._stubber.fs
+    
+    @property
+    def patches(self):
+        return self._stubber.patches
+        
+    def setUpPyfakefs(self):
+        '''Bind the file-related modules to the :py:class:`pyfakefs` fake file
+        system instead of the real file system.  Also bind the fake `file()` and
+        `open()` functions.
+        
+        Invoke this at the beginning of the `setUp()` method in your unit test
+        class.
+        '''
+        self._stubber.setUp()
+        self.addCleanup(self._stubber.tearDown)
+
+    
+    def tearDownPyfakefs(self):
+        ''':meth:`pyfakefs.fake_filesystem_unittest.setUpPyfakefs` registers the
+        tear down procedure using :meth:`unittest.TestCase.addCleanup`.  Thus this
+        method is deprecated, and remains just for backward compatibility.
+        '''
+        pass
+
+class Patcher(object):
+    '''
+    Instantiate a stub creator to bind and un-bind the file-related modules to
+    the :py:mod:`pyfakefs` fake modules.
+    '''
+    SKIPMODULES = set([None, fake_filesystem, fake_filesystem_glob,
+                      fake_filesystem_shutil, fake_tempfile, sys])
+    '''Stub nothing that is imported within these modules.
+    `sys` is included to prevent `sys.path` from being stubbed with the fake
+    `os.path`.
+    '''
+    assert None in SKIPMODULES, "sys.modules contains 'None' values; must skip them."
+    
+    SKIPNAMES = set(['os', 'glob', 'path', 'shutil', 'tempfile'])
+        
+    def __init__(self):
+        # Attributes set by _findModules()
+        self._osModules = None
+        self._globModules = None
+        self._pathModules = None
+        self._shutilModules = None
+        self._tempfileModules = None
+        self._findModules()
+        assert None not in vars(self).values(), \
+                "_findModules() missed the initialization of an instance variable"
+        
+        # Attributes set by _refresh()
+        self._stubs = None
+        self.fs = None
+        self.fake_os = None
+        self.fake_glob = None
+        self.fake_path = None
+        self.fake_shutil = None
+        self.fake_tempfile_ = None
+        self.fake_open = None
+        # _isStale is set by tearDown(), reset by _refresh()
+        self._isStale = True
+        self._refresh()
+        assert None not in vars(self).values(), \
+                "_refresh() missed the initialization of an instance variable"
+        assert self._isStale == False, "_refresh() did not reset _isStale"
+        
+    def _findModules(self):
+        '''Find and cache all modules that import file system modules.
+        Later, `setUp()` will stub these with the fake file system
+        modules.
+        '''
+        self._osModules = set()
+        self._globModules = set()
+        self._pathModules = set()
+        self._shutilModules = set()
+        self._tempfileModules = set()
+        for name, module in set(sys.modules.items()):
+            if module in self.SKIPMODULES or name in self.SKIPNAMES:
+                continue
+            if 'os' in module.__dict__:
+                self._osModules.add(module)
+            if 'glob' in module.__dict__:
+                self._globModules.add(module)
+            if 'path' in module.__dict__:
+                self._pathModules.add(module)
+            if 'shutil' in module.__dict__:
+                self._shutilModules.add(module)
+            if 'tempfile' in module.__dict__:
+                self._tempfileModules.add(module)
+
+    def _refresh(self):
+        '''Renew the fake file system and set the _isStale flag to `False`.'''
+        if self._stubs is not None:
+            self._stubs.SmartUnsetAll()
+        self._stubs = mox3.stubout.StubOutForTesting()
+
+        self.fs = fake_filesystem.FakeFilesystem()
+        self.fake_os = fake_filesystem.FakeOsModule(self.fs)
+        self.fake_glob = fake_filesystem_glob.FakeGlobModule(self.fs)
+        self.fake_path = self.fake_os.path
+        self.fake_shutil = fake_filesystem_shutil.FakeShutilModule(self.fs)
+        self.fake_tempfile_ = fake_tempfile.FakeTempfileModule(self.fs)
+        self.fake_open = fake_filesystem.FakeFileOpen(self.fs)
+
+        self._isStale = False
+
+    def setUp(self, doctester=None):
+        '''Bind the file-related modules to the :py:mod:`pyfakefs` fake
+        modules real ones.  Also bind the fake `file()` and `open()` functions.
+        '''
+        if self._isStale:
+            self._refresh()
+        
+        if doctester is not None:
+            doctester.globs = self.replaceGlobs(doctester.globs)
+            
+        if sys.version_info < (3,):
+            # No file() in Python3
+            self._stubs.SmartSet(builtins, 'file', self.fake_open)
+        self._stubs.SmartSet(builtins, 'open', self.fake_open)
+        
+        for module in self._osModules:
+            self._stubs.SmartSet(module,  'os', self.fake_os)
+        for module in self._globModules:
+            self._stubs.SmartSet(module,  'glob', self.fake_glob)
+        for module in self._pathModules:
+            self._stubs.SmartSet(module,  'path', self.fake_path)
+        for module in self._shutilModules:
+            self._stubs.SmartSet(module,  'shutil', self.fake_shutil)
+        for module in self._tempfileModules:
+            self._stubs.SmartSet(module,  'tempfile', self.fake_tempfile_)
+    
+    def replaceGlobs(self, globs_):
+        globs = globs_.copy()
+        if self._isStale:
+            self._refresh()
+        if 'os' in globs:
+            globs['os'] = fake_filesystem.FakeOsModule(self.fs)
+        if 'glob' in globs:
+            globs['glob'] = fake_filesystem_glob.FakeGlobModule(self.fs)
+        if 'path' in globs:
+            globs['path'] =  fake_filesystem.FakePathModule(self.fs)
+        if 'shutil' in globs:
+            globs['shutil'] = fake_filesystem_shutil.FakeShutilModule(self.fs)
+        if 'tempfile' in globs:
+            globs['tempfile'] = fake_tempfile.FakeTempfileModule(self.fs)
+        return globs
+    
+    def tearDown(self, doctester=None):
+        '''Clear the fake filesystem bindings created by `setUp()`.'''
+        self._isStale = True
+        self._stubs.SmartUnsetAll()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest_test.py
new file mode 100644
index 0000000..7d0ac70
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest_test.py
@@ -0,0 +1,107 @@
+#! /usr/bin/env python
+#
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Author: John McGehee
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Test the :py:class`pyfakefs.fake_filesystem_unittest.TestCase` base class.
+"""
+
+import os
+import glob
+import shutil
+import tempfile
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+import fake_filesystem_unittest
+import pytest
+
+class TestPyfakefsUnittest(fake_filesystem_unittest.TestCase): # pylint: disable=R0904
+    '''Test the pyfakefs.fake_filesystem_unittest.TestCase` base class.'''
+
+    def setUp(self):
+        '''Set up the fake file system'''
+        self.setUpPyfakefs()
+
+    def tearDown(self):
+        '''Tear down the fake file system'''
+        self.tearDownPyfakefs()
+
+    @unittest.skipIf(sys.version_info > (2,), "file() was removed in Python 3")
+    def test_file(self):
+        '''Fake `file()` function is bound'''
+        self.assertFalse(os.path.exists('/fake_file.txt'))
+        with file('/fake_file.txt', 'w') as f:
+            f.write("This test file was created using the file() function.\n")
+        self.assertTrue(self.fs.Exists('/fake_file.txt'))
+        with file('/fake_file.txt') as f:
+            content = f.read()
+        self.assertEqual(content,
+                         'This test file was created using the file() function.\n')
+            
+    def test_open(self):
+        '''Fake `open()` function is bound'''
+        self.assertFalse(os.path.exists('/fake_file.txt'))
+        with open('/fake_file.txt', 'w') as f:
+            f.write("This test file was created using the open() function.\n")
+        self.assertTrue(self.fs.Exists('/fake_file.txt'))
+        with open('/fake_file.txt') as f:
+            content = f.read()
+        self.assertEqual(content,
+                         'This test file was created using the open() function.\n')
+            
+    def test_os(self):
+        '''Fake os module is bound'''
+        self.assertFalse(self.fs.Exists('/test/dir1/dir2'))          
+        os.makedirs('/test/dir1/dir2')
+        self.assertTrue(self.fs.Exists('/test/dir1/dir2'))          
+        
+    def test_glob(self):
+        '''Fake glob module is bound'''
+        self.assertCountEqual(glob.glob('/test/dir1/dir*'),
+                              [])
+        self.fs.CreateDirectory('/test/dir1/dir2a')
+        self.assertCountEqual(glob.glob('/test/dir1/dir*'),
+                              ['/test/dir1/dir2a'])
+        self.fs.CreateDirectory('/test/dir1/dir2b')
+        self.assertCountEqual(glob.glob('/test/dir1/dir*'),
+                              ['/test/dir1/dir2a', '/test/dir1/dir2b'])
+
+    def test_shutil(self):
+        '''Fake shutil module is bound'''
+        self.fs.CreateDirectory('/test/dir1/dir2a')
+        self.fs.CreateDirectory('/test/dir1/dir2b')
+        self.assertTrue(self.fs.Exists('/test/dir1/dir2b'))
+        self.assertTrue(self.fs.Exists('/test/dir1/dir2a'))
+       
+        shutil.rmtree('/test/dir1')
+        self.assertFalse(self.fs.Exists('/test/dir1'))
+
+    def test_tempfile(self):
+        '''Fake tempfile module is bound'''
+        with tempfile.NamedTemporaryFile() as tf:
+            tf.write(b'Temporary file contents\n')
+            name = tf.name
+            self.assertTrue(self.fs.Exists(tf.name))
+    
+    def test_pytest(self):
+        '''Compatibility with the :py:module:`pytest` module.'''
+        pass
+                      
+if __name__ == "__main__":
+    unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_vs_real_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_vs_real_test.py
new file mode 100755
index 0000000..9da1bc8
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_vs_real_test.py
@@ -0,0 +1,612 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#            http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test that FakeFilesystem calls work identically to a real filesystem."""
+
+#pylint: disable-all
+
+import os #@UnusedImport
+import os.path
+import shutil
+import sys
+import tempfile
+import time
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+import fake_filesystem
+
+
+def Sep(path):
+    """Converts slashes in the path to the architecture's path seperator."""
+    if isinstance(path, str):
+        return path.replace('/', os.sep)
+    return path
+
+
+class TestCase(unittest.TestCase):
+    is_windows = sys.platform.startswith('win')
+    is_cygwin = sys.platform == 'cygwin'
+    _FAKE_FS_BASE = Sep('/fakefs')
+
+
+class FakeFilesystemVsRealTest(TestCase):
+
+    def _Paths(self, path):
+        """For a given path, return paths in the real and fake filesystems."""
+        if not path:
+            return (None, None)
+        return (os.path.join(self.real_base, path),
+                        os.path.join(self.fake_base, path))
+
+    def _CreateTestFile(self, file_type, path, contents=None):
+        """Create a dir, file, or link in both the real fs and the fake."""
+        path = Sep(path)
+        self._created_files.append([file_type, path, contents])
+        real_path, fake_path = self._Paths(path)
+        if file_type == 'd':
+            os.mkdir(real_path)
+            self.fake_os.mkdir(fake_path)
+        if file_type == 'f':
+            fh = open(real_path, 'w')
+            fh.write(contents or '')
+            fh.close()
+            fh = self.fake_open(fake_path, 'w')
+            fh.write(contents or '')
+            fh.close()
+        # b for binary file
+        if file_type == 'b':
+            fh = open(real_path, 'wb')
+            fh.write(contents or '')
+            fh.close()
+            fh = self.fake_open(fake_path, 'wb')
+            fh.write(contents or '')
+            fh.close()
+        # l for symlink, h for hard link
+        if file_type in ('l', 'h'):
+            real_target, fake_target = (contents, contents)
+            # If it begins with '/', make it relative to the base.    You can't go
+            # creating files in / for the real file system.
+            if contents.startswith(os.sep):
+                real_target, fake_target = self._Paths(contents[1:])
+            if file_type == 'l':
+                os.symlink(real_target, real_path)
+                self.fake_os.symlink(fake_target, fake_path)
+            elif file_type == 'h':
+                os.link(real_target, real_path)
+                self.fake_os.link(fake_target, fake_path)
+
+    def setUp(self):
+        # Base paths in the real and test file systems.     We keep them different
+        # so that missing features in the fake don't fall through to the base
+        # operations and magically succeed.
+        tsname = 'fakefs.%s' % time.time()
+        # Fully expand the base_path - required on OS X.
+        self.real_base = os.path.realpath(
+                os.path.join(tempfile.gettempdir(), tsname))
+        os.chdir(tempfile.gettempdir())
+        if os.path.isdir(self.real_base):
+            shutil.rmtree(self.real_base)
+        os.mkdir(self.real_base)
+        self.fake_base = self._FAKE_FS_BASE
+
+        # Make sure we can write to the physical testing temp directory.
+        self.assertTrue(os.access(self.real_base, os.W_OK))
+
+        self.fake_filesystem = fake_filesystem.FakeFilesystem()
+        self.fake_filesystem.CreateDirectory(self.fake_base)
+        self.fake_os = fake_filesystem.FakeOsModule(self.fake_filesystem)
+        self.fake_open = fake_filesystem.FakeFileOpen(self.fake_filesystem)
+        self._created_files = []
+
+        os.chdir(self.real_base)
+        self.fake_os.chdir(self.fake_base)
+
+    def tearDown(self):
+        # We have to remove all the files from the real FS. Doing the same for the
+        # fake FS is optional, but doing it is an extra sanity check.
+        os.chdir(tempfile.gettempdir())
+        try:
+            rev_files = self._created_files[:]
+            rev_files.reverse()
+            for info in rev_files:
+                real_path, fake_path = self._Paths(info[1])
+                if info[0] == 'd':
+                    try:
+                        os.rmdir(real_path)
+                    except OSError as e:
+                        if 'Directory not empty' in e:
+                            self.fail('Real path %s not empty: %s : %s' % (
+                                    real_path, e, os.listdir(real_path)))
+                        else:
+                            raise
+                    self.fake_os.rmdir(fake_path)
+                if info[0] == 'f' or info[0] == 'l':
+                    os.remove(real_path)
+                    self.fake_os.remove(fake_path)
+        finally:
+            shutil.rmtree(self.real_base)
+
+    def _GetErrno(self, raised_error):
+        try:
+            return (raised_error and raised_error.errno) or None
+        except AttributeError:
+            return None
+
+    def _CompareBehaviors(self, method_name, path, real, fake,
+                                                method_returns_path=False):
+        """Invoke an os method in both real and fake contexts and compare results.
+
+        Invoke a real filesystem method with a path to a real file and invoke a fake
+        filesystem method with a path to a fake file and compare the results.    We
+        expect some calls to throw Exceptions, so we catch those and compare them.
+
+        Args:
+            method_name: Name of method being tested, for use in error messages.
+            path: potential path to a file in the real and fake file systems, passing
+                an empty tuple indicates that no arguments to pass to method.
+            real: built-in system library or method from the built-in system library
+                which takes a path as an arg and returns some value.
+            fake: fake_filsystem object or method from a fake_filesystem class
+                which takes a path as an arg and returns some value.
+            method_returns_path: True if the method returns a path, and thus we must
+                compensate for expected difference between real and fake.
+
+        Returns:
+            A description of the difference in behavior, or None.
+        """
+        # pylint: disable=C6403
+
+        def _ErrorClass(e):
+            return (e and e.__class__.__name__) or 'None'
+
+        real_value = None
+        fake_value = None
+        real_err = None
+        fake_err = None
+        method_call = '%s' % method_name
+        method_call += '()' if path == () else '(%s)' % path
+        # Catching Exception below gives a lint warning, but it's what we need.
+        try:
+            args = [] if path == () else [path]
+            real_method = real
+            if not callable(real):
+                real_method = getattr(real, method_name)
+            real_value = str(real_method(*args))
+        except Exception as e:    # pylint: disable-msg=W0703
+            real_err = e
+        try:
+            fake_method = fake
+            if not callable(fake):
+                fake_method = getattr(fake, method_name)
+            args = [] if path == () else [path]
+            fake_value = str(fake_method(*args))
+        except Exception as e:    # pylint: disable-msg=W0703
+            fake_err = e
+        # We only compare on the error class because the acutal error contents
+        # is almost always different because of the file paths.
+        if _ErrorClass(real_err) != _ErrorClass(fake_err):
+            if real_err is None:
+                return '%s: real version returned %s, fake raised %s' % (
+                        method_call, real_value, _ErrorClass(fake_err))
+            if fake_err is None:
+                return '%s: real version raised %s, fake returned %s' % (
+                        method_call, _ErrorClass(real_err), fake_value)
+            return '%s: real version raised %s, fake raised %s' % (
+                    method_call, _ErrorClass(real_err), _ErrorClass(fake_err))
+        real_errno = self._GetErrno(real_err)
+        fake_errno = self._GetErrno(fake_err)
+        if real_errno != fake_errno:
+            return '%s(%s): both raised %s, real errno %s, fake errno %s' % (
+                    method_name, path, _ErrorClass(real_err), real_errno, fake_errno)
+        # If the method is supposed to return a full path AND both values
+        # begin with the expected full path, then trim it off.
+        if method_returns_path:
+            if (real_value and fake_value
+                    and real_value.startswith(self.real_base)
+                    and fake_value.startswith(self.fake_base)):
+                real_value = real_value[len(self.real_base):]
+                fake_value = fake_value[len(self.fake_base):]
+        if real_value != fake_value:
+            return '%s: real return %s, fake returned %s' % (
+                    method_call, real_value, fake_value)
+        return None
+
+    def assertOsMethodBehaviorMatches(self, method_name, path,
+                                                                        method_returns_path=False):
+        """Invoke an os method in both real and fake contexts and compare.
+
+        For a given method name (from the os module) and a path, compare the
+        behavior of the system provided module against the fake_filesytem module.
+        We expect results and/or Exceptions raised to be identical.
+
+        Args:
+            method_name: Name of method being tested.
+            path: potential path to a file in the real and fake file systems.
+            method_returns_path: True if the method returns a path, and thus we must
+                compensate for expected difference between real and fake.
+
+        Returns:
+            A description of the difference in behavior, or None.
+        """
+        path = Sep(path)
+        return self._CompareBehaviors(method_name, path, os, self.fake_os,
+                                                                    method_returns_path)
+
+    def DiffOpenMethodBehavior(self, method_name, path, mode, data,
+                                                         method_returns_data=True):
+        """Invoke an open method in both real and fkae contexts and compare.
+
+        Args:
+            method_name: Name of method being tested.
+            path: potential path to a file in the real and fake file systems.
+            mode: how to open the file.
+            data: any data to pass to the method.
+            method_returns_data: True if a method returns some sort of data.
+
+        For a given method name (from builtin open) and a path, compare the
+        behavior of the system provided module against the fake_filesytem module.
+        We expect results and/or Exceptions raised to be identical.
+
+        Returns:
+            A description of the difference in behavior, or None.
+        """
+        with open(path, mode) as real_fh:
+            with self.fake_open(path, mode) as fake_fh:
+                return self._CompareBehaviors(method_name, data, real_fh, fake_fh,
+                                                                            method_returns_data)
+
+    def DiffOsPathMethodBehavior(self, method_name, path,
+                                                             method_returns_path=False):
+        """Invoke an os.path method in both real and fake contexts and compare.
+
+        For a given method name (from the os.path module) and a path, compare the
+        behavior of the system provided module against the fake_filesytem module.
+        We expect results and/or Exceptions raised to be identical.
+
+        Args:
+            method_name: Name of method being tested.
+            path: potential path to a file in the real and fake file systems.
+            method_returns_path: True if the method returns a path, and thus we must
+                compensate for expected difference between real and fake.
+
+        Returns:
+            A description of the difference in behavior, or None.
+        """
+        return self._CompareBehaviors(method_name, path, os.path, self.fake_os.path,
+                                                                    method_returns_path)
+
+    def assertOsPathMethodBehaviorMatches(self, method_name, path,
+                                                                                method_returns_path=False):
+        """Assert that an os.path behaves the same in both real and fake contexts.
+
+        Wraps DiffOsPathMethodBehavior, raising AssertionError if any differences
+        are reported.
+
+        Args:
+            method_name: Name of method being tested.
+            path: potential path to a file in the real and fake file systems.
+            method_returns_path: True if the method returns a path, and thus we must
+                compensate for expected difference between real and fake.
+
+        Raises:
+            AssertionError if there is any difference in behavior.
+        """
+        path = Sep(path)
+        diff = self.DiffOsPathMethodBehavior(method_name, path, method_returns_path)
+        if diff:
+            self.fail(diff)
+
+    def assertAllOsBehaviorsMatch(self, path):
+        path = Sep(path)
+        os_method_names = [] if self.is_windows else ['readlink']
+        os_method_names_no_args = ['getcwd']
+        if sys.version_info < (3, 0):
+            os_method_names_no_args.append('getcwdu')
+        os_path_method_names = ['isabs',
+                                                        'isdir',
+                                                        'isfile',
+                                                        'exists'
+                                                     ]
+        if not self.is_windows:
+            os_path_method_names.append('islink')
+            os_path_method_names.append('lexists')
+        wrapped_methods = [['access', self._AccessReal, self._AccessFake],
+                                             ['stat.size', self._StatSizeReal, self._StatSizeFake],
+                                             ['lstat.size', self._LstatSizeReal, self._LstatSizeFake]
+                                            ]
+
+        differences = []
+        for method_name in os_method_names:
+            diff = self.assertOsMethodBehaviorMatches(method_name, path)
+            if diff:
+                differences.append(diff)
+        for method_name in os_method_names_no_args:
+            diff = self.assertOsMethodBehaviorMatches(method_name, (),
+                                                                                                method_returns_path=True)
+            if diff:
+                differences.append(diff)
+        for method_name in os_path_method_names:
+            diff = self.DiffOsPathMethodBehavior(method_name, path)
+            if diff:
+                differences.append(diff)
+        for m in wrapped_methods:
+            diff = self._CompareBehaviors(m[0], path, m[1], m[2])
+            if diff:
+                differences.append(diff)
+        if differences:
+            self.fail('Behaviors do not match for %s:\n    %s' %
+                                (path, '\n    '.join(differences)))
+
+    def assertFileHandleBehaviorsMatch(self, path, mode, data):
+        path = Sep(path)
+        write_method_names = ['write', 'writelines']
+        read_method_names = ['read', 'readlines']
+        other_method_names = ['truncate', 'flush', 'close']
+        differences = []
+        for method_name in write_method_names:
+            diff = self.DiffOpenMethodBehavior(method_name, path, mode, data)
+            if diff:
+                differences.append(diff)
+        for method_name in read_method_names + other_method_names:
+            diff = self.DiffOpenMethodBehavior(method_name, path, mode, ())
+            if diff:
+                differences.append(diff)
+        if differences:
+            self.fail('Behaviors do not match for %s:\n    %s' %
+                                (path, '\n    '.join(differences)))
+
+    # Helpers for checks which are not straight method calls.
+
+    def _AccessReal(self, path):
+        return os.access(path, os.R_OK)
+
+    def _AccessFake(self, path):
+        return self.fake_os.access(path, os.R_OK)
+
+    def _StatSizeReal(self, path):
+        real_path, unused_fake_path = self._Paths(path)
+        # fake_filesystem.py does not implement stat().st_size for directories
+        if os.path.isdir(real_path):
+            return None
+        return os.stat(real_path).st_size
+
+    def _StatSizeFake(self, path):
+        unused_real_path, fake_path = self._Paths(path)
+        # fake_filesystem.py does not implement stat().st_size for directories
+        if self.fake_os.path.isdir(fake_path):
+            return None
+        return self.fake_os.stat(fake_path).st_size
+
+    def _LstatSizeReal(self, path):
+        real_path, unused_fake_path = self._Paths(path)
+        if os.path.isdir(real_path):
+            return None
+        size = os.lstat(real_path).st_size
+        # Account for the difference in the lengths of the absolute paths.
+        if os.path.islink(real_path):
+            if os.readlink(real_path).startswith(os.sep):
+                size -= len(self.real_base)
+        return size
+
+    def _LstatSizeFake(self, path):
+        unused_real_path, fake_path = self._Paths(path)
+        #size = 0
+        if self.fake_os.path.isdir(fake_path):
+            return None
+        size = self.fake_os.lstat(fake_path).st_size
+        # Account for the difference in the lengths of the absolute paths.
+        if self.fake_os.path.islink(fake_path):
+            if self.fake_os.readlink(fake_path).startswith(os.sep):
+                size -= len(self.fake_base)
+        return size
+
+    def testIsabs(self):
+        # We do not have to create any files for isabs.
+        self.assertOsPathMethodBehaviorMatches('isabs', None)
+        self.assertOsPathMethodBehaviorMatches('isabs', '')
+        self.assertOsPathMethodBehaviorMatches('isabs', '/')
+        self.assertOsPathMethodBehaviorMatches('isabs', '/a')
+        self.assertOsPathMethodBehaviorMatches('isabs', 'a')
+
+    def testNonePath(self):
+        self.assertAllOsBehaviorsMatch(None)
+
+    def testEmptyPath(self):
+        self.assertAllOsBehaviorsMatch('')
+
+    def testRootPath(self):
+        self.assertAllOsBehaviorsMatch('/')
+
+    def testNonExistantFile(self):
+        self.assertAllOsBehaviorsMatch('foo')
+
+    def testEmptyFile(self):
+        self._CreateTestFile('f', 'aFile')
+        self.assertAllOsBehaviorsMatch('aFile')
+
+    def testFileWithContents(self):
+        self._CreateTestFile('f', 'aFile', 'some contents')
+        self.assertAllOsBehaviorsMatch('aFile')
+
+    def testFileWithBinaryContents(self):
+        self._CreateTestFile('b', 'aFile', b'some contents')
+        self.assertAllOsBehaviorsMatch('aFile')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testSymLinkToEmptyFile(self):
+        self._CreateTestFile('f', 'aFile')
+        self._CreateTestFile('l', 'link_to_empty', 'aFile')
+        self.assertAllOsBehaviorsMatch('link_to_empty')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def TBD_testHardLinkToEmptyFile(self):
+        self._CreateTestFile('f', 'aFile')
+        self._CreateTestFile('h', 'link_to_empty', 'aFile')
+        self.assertAllOsBehaviorsMatch('link_to_empty')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testSymLinkToRealFile(self):
+        self._CreateTestFile('f', 'aFile', 'some contents')
+        self._CreateTestFile('l', 'link_to_file', 'aFile')
+        self.assertAllOsBehaviorsMatch('link_to_file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def TBD_testHardLinkToRealFile(self):
+        self._CreateTestFile('f', 'aFile', 'some contents')
+        self._CreateTestFile('h', 'link_to_file', 'aFile')
+        self.assertAllOsBehaviorsMatch('link_to_file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testBrokenSymLink(self):
+        self._CreateTestFile('l', 'broken_link', 'broken')
+        self._CreateTestFile('l', 'loop', '/a/loop')
+        self.assertAllOsBehaviorsMatch('broken_link')
+
+    def testFileInAFolder(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('f', 'a/b/file', 'contents')
+        self.assertAllOsBehaviorsMatch('a/b/file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testAbsoluteSymLinkToFolder(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('f', 'a/b/file', 'contents')
+        self._CreateTestFile('l', 'a/link', '/a/b')
+        self.assertAllOsBehaviorsMatch('a/link/file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testLinkToFolderAfterChdir(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('f', 'a/b/file', 'contents')
+        self._CreateTestFile('l', 'a/link', '/a/b')
+
+        real_dir, fake_dir = self._Paths('a/b')
+        os.chdir(real_dir)
+        self.fake_os.chdir(fake_dir)
+        self.assertAllOsBehaviorsMatch('file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testRelativeSymLinkToFolder(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('f', 'a/b/file', 'contents')
+        self._CreateTestFile('l', 'a/link', 'b')
+        self.assertAllOsBehaviorsMatch('a/link/file')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testSymLinkToParent(self):
+        # Soft links on HFS+ / OS X behave differently.
+        if os.uname()[0] != 'Darwin':
+            self._CreateTestFile('d', 'a')
+            self._CreateTestFile('d', 'a/b')
+            self._CreateTestFile('l', 'a/b/c', '..')
+            self.assertAllOsBehaviorsMatch('a/b/c')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testPathThroughSymLinkToParent(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('f', 'a/target', 'contents')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('l', 'a/b/c', '..')
+        self.assertAllOsBehaviorsMatch('a/b/c/target')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testSymLinkToSiblingDirectory(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self._CreateTestFile('l', 'a/b/c', '../sibling_of_b')
+        self.assertAllOsBehaviorsMatch('a/b/c/target')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testSymLinkToSiblingDirectoryNonExistantFile(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self._CreateTestFile('l', 'a/b/c', '../sibling_of_b')
+        self.assertAllOsBehaviorsMatch('a/b/c/file_does_not_exist')
+
+    @unittest.skipIf(TestCase.is_windows, 'no symlink in Windows')
+    def testBrokenSymLinkToSiblingDirectory(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self._CreateTestFile('l', 'a/b/c', '../broken_sibling_of_b')
+        self.assertAllOsBehaviorsMatch('a/b/c/target')
+
+    def testRelativePath(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self.assertAllOsBehaviorsMatch('a/b/../sibling_of_b/target')
+
+    def testBrokenRelativePath(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self.assertAllOsBehaviorsMatch('a/b/../broken/target')
+
+    def testBadRelativePath(self):
+        self._CreateTestFile('d', 'a')
+        self._CreateTestFile('f', 'a/target', 'contents')
+        self._CreateTestFile('d', 'a/b')
+        self._CreateTestFile('d', 'a/sibling_of_b')
+        self._CreateTestFile('f', 'a/sibling_of_b/target', 'contents')
+        self.assertAllOsBehaviorsMatch('a/b/../broken/../target')
+
+    def testGetmtimeNonexistantPath(self):
+        self.assertOsPathMethodBehaviorMatches('getmtime', 'no/such/path')
+
+    def testBuiltinOpenModes(self):
+        self._CreateTestFile('f', 'read', 'some contents')
+        self._CreateTestFile('f', 'write', 'some contents')
+        self._CreateTestFile('f', 'append', 'some contents')
+        self.assertFileHandleBehaviorsMatch('read', 'r', 'other contents')
+        self.assertFileHandleBehaviorsMatch('write', 'w', 'other contents')
+        self.assertFileHandleBehaviorsMatch('append', 'a', 'other contents')
+        self._CreateTestFile('f', 'readplus', 'some contents')
+        self._CreateTestFile('f', 'writeplus', 'some contents')
+        self.assertFileHandleBehaviorsMatch('readplus', 'r+', 'other contents')
+        self.assertFileHandleBehaviorsMatch('writeplus', 'w+', 'other contents')
+        self._CreateTestFile('b', 'binaryread', b'some contents')
+        self._CreateTestFile('b', 'binarywrite', b'some contents')
+        self._CreateTestFile('b', 'binaryappend', b'some contents')
+        self.assertFileHandleBehaviorsMatch('binaryread', 'rb', b'other contents')
+        self.assertFileHandleBehaviorsMatch('binarywrite', 'wb', b'other contents')
+        self.assertFileHandleBehaviorsMatch('binaryappend', 'ab', b'other contents')
+        self.assertFileHandleBehaviorsMatch('read', 'rb', 'other contents')
+        self.assertFileHandleBehaviorsMatch('write', 'wb', 'other contents')
+        self.assertFileHandleBehaviorsMatch('append', 'ab', 'other contents')
+
+
+def main(unused_argv):
+    unittest.main()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile.py
new file mode 100644
index 0000000..090425d
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile.py
@@ -0,0 +1,312 @@
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Fake tempfile module.
+
+Fake implementation of the python2.4.1 tempfile built-in module that works with
+a FakeFilesystem object.
+"""
+#pylint: disable-all
+
+import errno
+import logging
+import os
+import stat
+import tempfile
+
+import fake_filesystem
+
+try:
+  import StringIO as io  # pylint: disable-msg=C6204
+except ImportError:
+  import io  # pylint: disable-msg=C6204
+
+
+class FakeTempfileModule(object):
+  """Uses a FakeFilesystem to provide a mock for the tempfile 2.4.1 module.
+
+  Common usage:
+  filesystem = fake_filesystem.FakeFilesystem()
+  my_tempfile_module = mock_tempfile.FakeTempfileModule(filesystem)
+
+  See also: default keyword arguments for Dependency Injection on
+  http://go/tott-episode-12
+  """
+
+  def __init__(self, filesystem):
+    self._filesystem = filesystem
+    self._tempfile = tempfile
+    self.tempdir = None  # initialized by mktemp(), others
+    self._temp_prefix = 'tmp'
+    self._mktemp_retvals = []
+
+  # pylint: disable-msg=W0622
+  def _TempFilename(self, suffix='', prefix=None, dir=None):
+    """Create a temporary filename that does not exist.
+
+    This is a re-implementation of how tempfile creates random filenames,
+    and is probably different.
+
+    Does not modify self._filesystem, that's your job.
+
+    Output: self.tempdir is initialized if unset
+    Args:
+      suffix: filename suffix
+      prefix: filename prefix
+      dir: dir to put filename in
+    Returns:
+      string, temp filename that does not exist
+    """
+    if dir is None:
+      dir = self._filesystem.JoinPaths(self._filesystem.root.name, 'tmp')
+    filename = None
+    if prefix is None:
+      prefix = self._temp_prefix
+    while not filename or self._filesystem.Exists(filename):
+      # pylint: disable-msg=W0212
+      filename = self._filesystem.JoinPaths(dir, '%s%s%s' % (
+          prefix,
+          next(self._tempfile._RandomNameSequence()),
+          suffix))
+    return filename
+
+  # pylint: disable-msg=W0622,W0613
+  def TemporaryFile(self, mode='w+b', bufsize=-1,
+                    suffix='', prefix=None, dir=None):
+    """Return a file-like object deleted on close().
+
+    Python 2.4.1 tempfile.TemporaryFile.__doc__ =
+    >Return a file (or file-like) object that can be used as a temporary
+    >storage area. The file is created using mkstemp. It will be destroyed as
+    >soon as it is closed (including an implicit close when the object is
+    >garbage collected). Under Unix, the directory entry for the file is
+    >removed immediately after the file is created. Other platforms do not
+    >support this; your code should not rely on a temporary file created using
+    >this function having or not having a visible name in the file system.
+    >
+    >The mode parameter defaults to 'w+b' so that the file created can be read
+    >and written without being closed. Binary mode is used so that it behaves
+    >consistently on all platforms without regard for the data that is stored.
+    >bufsize defaults to -1, meaning that the operating system default is used.
+    >
+    >The dir, prefix and suffix parameters are passed to mkstemp()
+
+    Args:
+      mode: optional string, see above
+      bufsize: optional int, see above
+      suffix: optional string, see above
+      prefix: optional string, see above
+      dir: optional string, see above
+    Returns:
+      a file-like object.
+    """
+    # pylint: disable-msg=C6002
+    # TODO: prefix, suffix, bufsize, dir, mode unused?
+    # cannot be cStringIO due to .name requirement below
+    retval = io.StringIO()
+    retval.name = '<fdopen>'  # as seen on 2.4.3
+    return retval
+
+  # pylint: disable-msg=W0622,W0613
+  def NamedTemporaryFile(self, mode='w+b', bufsize=-1,
+                         suffix='', prefix=None, dir=None, delete=True):
+    """Return a file-like object with name that is deleted on close().
+
+    Python 2.4.1 tempfile.NamedTemporaryFile.__doc__ =
+    >This function operates exactly as TemporaryFile() does, except that
+    >the file is guaranteed to have a visible name in the file system. That
+    >name can be retrieved from the name member of the file object.
+
+    Args:
+      mode: optional string, see above
+      bufsize: optional int, see above
+      suffix: optional string, see above
+      prefix: optional string, see above
+      dir: optional string, see above
+      delete: optional bool, see above
+    Returns:
+      a file-like object including obj.name
+    """
+    # pylint: disable-msg=C6002
+    # TODO: bufsiz unused?
+    temp = self.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
+    filename = temp[1]
+    mock_open = fake_filesystem.FakeFileOpen(
+        self._filesystem, delete_on_close=delete)
+    obj = mock_open(filename, mode)
+    obj.name = filename
+    return obj
+
+  # pylint: disable-msg=C6409
+  def mkstemp(self, suffix='', prefix=None, dir=None, text=False):
+    """Create temp file, returning a 2-tuple: (9999, filename).
+
+    Important: Returns 9999 instead of a real file descriptor!
+
+    Python 2.4.1 tempfile.mkstemp.__doc__ =
+    >mkstemp([suffix, [prefix, [dir, [text]]]])
+    >
+    >User-callable function to create and return a unique temporary file.
+    >The return value is a pair (fd, name) where fd is the file descriptor
+    >returned by os.open, and name is the filename.
+    >
+    >...[snip args]...
+    >
+    >The file is readable and writable only by the creating user ID.
+    >If the operating system uses permission bits to indicate whether
+    >a file is executable, the file is executable by no one. The file
+    >descriptor is not inherited by children of this process.
+    >
+    >Caller is responsible for deleting the file when done with it.
+
+    NOTE: if dir is unspecified, this call creates a directory.
+
+    Output: self.tempdir is initialized if unset
+    Args:
+      suffix: optional string, filename suffix
+      prefix: optional string, filename prefix
+      dir: optional string, directory for temp file; must exist before call
+      text: optional boolean, True = open file in text mode.
+          default False = open file in binary mode.
+    Returns:
+      2-tuple containing
+      [0] = int, file descriptor number for the file object
+      [1] = string, absolute pathname of a file
+    Raises:
+      OSError: when dir= is specified but does not exist
+    """
+    # pylint: disable-msg=C6002
+    # TODO: optional boolean text is unused?
+    # default dir affected by "global"
+    filename = self._TempEntryname(suffix, prefix, dir)
+    fh = self._filesystem.CreateFile(filename, st_mode=stat.S_IFREG|0o600)
+    fd = self._filesystem.AddOpenFile(fh)
+
+    self._mktemp_retvals.append(filename)
+    return (fd, filename)
+
+  # pylint: disable-msg=C6409
+  def mkdtemp(self, suffix='', prefix=None, dir=None):
+    """Create temp directory, returns string, absolute pathname.
+
+    Python 2.4.1 tempfile.mkdtemp.__doc__ =
+    >mkdtemp([suffix[, prefix[, dir]]])
+    >Creates a temporary directory in the most secure manner
+    >possible. [...]
+    >
+    >The user of mkdtemp() is responsible for deleting the temporary
+    >directory and its contents when done with it.
+    > [...]
+    >mkdtemp() returns the absolute pathname of the new directory. [...]
+
+    Args:
+      suffix: optional string, filename suffix
+      prefix: optional string, filename prefix
+      dir: optional string, directory for temp dir. Must exist before call
+    Returns:
+      string, directory name
+    """
+    dirname = self._TempEntryname(suffix, prefix, dir)
+    self._filesystem.CreateDirectory(dirname, perm_bits=0o700)
+
+    self._mktemp_retvals.append(dirname)
+    return dirname
+
+  def _TempEntryname(self, suffix, prefix, dir):
+    """Helper function for mk[ds]temp.
+
+    Args:
+      suffix: string, filename suffix
+      prefix: string, filename prefix
+      dir: string, directory for temp dir. Must exist before call
+    Returns:
+      string, entry name
+    """
+    # default dir affected by "global"
+    if dir is None:
+      call_mkdir = True
+      dir = self.gettempdir()
+    else:
+      call_mkdir = False
+
+    entryname = None
+    while not entryname or self._filesystem.Exists(entryname):
+      entryname = self._TempFilename(suffix=suffix, prefix=prefix, dir=dir)
+    if not call_mkdir:
+      # This is simplistic. A bad input of suffix=/f will cause tempfile
+      # to blow up, but this mock won't.  But that's already a broken
+      # corner case
+      parent_dir = os.path.dirname(entryname)
+      try:
+        self._filesystem.GetObject(parent_dir)
+      except IOError as err:
+        assert 'No such file or directory' in str(err)
+        # python -c 'import tempfile; tempfile.mkstemp(dir="/no/such/dr")'
+        # OSError: [Errno 2] No such file or directory: '/no/such/dr/tmpFBuqjO'
+        raise OSError(
+            errno.ENOENT,
+            'No such directory in mock filesystem',
+            parent_dir)
+    return entryname
+
+  # pylint: disable-msg=C6409
+  def gettempdir(self):
+    """Get default temp dir.  Sets default if unset."""
+    if self.tempdir:
+      return self.tempdir
+    # pylint: disable-msg=C6002
+    # TODO: environment variables TMPDIR TEMP TMP, or other dirs?
+    self.tempdir = '/tmp'
+    return self.tempdir
+
+  # pylint: disable-msg=C6409
+  def gettempprefix(self):
+    """Get temp filename prefix.
+
+    NOTE: This has no effect on py2.4
+
+    Returns:
+      string, prefix to use in temporary filenames
+    """
+    return self._temp_prefix
+
+  # pylint: disable-msg=C6409
+  def mktemp(self, suffix=''):
+    """mktemp is deprecated in 2.4.1, and is thus unimplemented."""
+    raise NotImplementedError
+
+  def _SetTemplate(self, template):
+    """Setter for 'template' property."""
+    self._temp_prefix = template
+    logging.error('tempfile.template= is a NOP in python2.4')
+
+  def __SetTemplate(self, template):
+    """Indirect setter for 'template' property."""
+    self._SetTemplate(template)
+
+  def __DeprecatedTemplate(self):
+    """template property implementation."""
+    raise NotImplementedError
+
+  # reading from template is deprecated, setting is ok.
+  template = property(__DeprecatedTemplate, __SetTemplate,
+                      doc="""Set the prefix for temp filenames""")
+
+  def FakeReturnedMktempValues(self):
+    """For validation purposes, mktemp()'s return values are stored."""
+    return self._mktemp_retvals
+
+  def FakeMktempReset(self):
+    """Clear the stored mktemp() values."""
+    self._mktemp_retvals = []
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile_test.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile_test.py
new file mode 100755
index 0000000..e7dcb6c
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile_test.py
@@ -0,0 +1,197 @@
+#! /usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for the fake_tempfile module."""
+
+#pylint: disable-all
+
+import stat
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+
+try:
+  import StringIO as io  # pylint: disable-msg=C6204
+except ImportError:
+  import io  # pylint: disable-msg=C6204
+
+import fake_filesystem
+import fake_tempfile
+
+
+class FakeLogging(object):
+  """Fake logging object for testGettempprefix."""
+
+  def __init__(self, test_case):
+    self._message = None
+    self._test_case = test_case
+
+  # pylint: disable-msg=C6409
+  def error(self, message):
+    if self._message is not None:
+      self.FailOnMessage(message)
+    self._message = message
+
+  def FailOnMessage(self, message):
+    self._test_case.fail('Unexpected message received: %s' % message)
+
+  warn = FailOnMessage
+  info = FailOnMessage
+  debug = FailOnMessage
+  fatal = FailOnMessage
+
+  def message(self):
+    return self._message
+
+
+class FakeTempfileModuleTest(unittest.TestCase):
+  """Test the 'tempfile' module mock."""
+
+  def setUp(self):
+    self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
+    self.tempfile = fake_tempfile.FakeTempfileModule(self.filesystem)
+    self.orig_logging = fake_tempfile.logging
+    self.fake_logging = FakeLogging(self)
+    fake_tempfile.logging = self.fake_logging
+
+  def tearDown(self):
+    fake_tempfile.logging = self.orig_logging
+
+  def testTempFilename(self):
+    # pylint: disable-msg=C6002
+    # TODO: test that tempdir is init'ed
+    filename_a = self.tempfile._TempFilename()
+    # expect /tmp/tmp######
+    self.assertTrue(filename_a.startswith('/tmp/tmp'))
+    self.assertLess(len('/tmp/tmpA'), len(filename_a))
+
+    # see that random part changes
+    filename_b = self.tempfile._TempFilename()
+    self.assertTrue(filename_b.startswith('/tmp/tmp'))
+    self.assertLess(len('/tmp/tmpB'), len(filename_a))
+    self.assertNotEqual(filename_a, filename_b)
+
+  def testTempFilenameSuffix(self):
+    """test tempfile._TempFilename(suffix=)."""
+    filename = self.tempfile._TempFilename(suffix='.suffix')
+    self.assertTrue(filename.startswith('/tmp/tmp'))
+    self.assertTrue(filename.endswith('.suffix'))
+    self.assertLess(len('/tmp/tmpX.suffix'), len(filename))
+
+  def testTempFilenamePrefix(self):
+    """test tempfile._TempFilename(prefix=)."""
+    filename = self.tempfile._TempFilename(prefix='prefix.')
+    self.assertTrue(filename.startswith('/tmp/prefix.'))
+    self.assertLess(len('/tmp/prefix.X'), len(filename))
+
+  def testTempFilenameDir(self):
+    """test tempfile._TempFilename(dir=)."""
+    filename = self.tempfile._TempFilename(dir='/dir')
+    self.assertTrue(filename.startswith('/dir/tmp'))
+    self.assertLess(len('/dir/tmpX'), len(filename))
+
+  def testTemporaryFile(self):
+    obj = self.tempfile.TemporaryFile()
+    self.assertEqual('<fdopen>', obj.name)
+    self.assertTrue(isinstance(obj, io.StringIO))
+
+  def testNamedTemporaryFile(self):
+    obj = self.tempfile.NamedTemporaryFile()
+    created_filenames = self.tempfile.FakeReturnedMktempValues()
+    self.assertEqual(created_filenames[0], obj.name)
+    self.assertTrue(self.filesystem.GetObject(obj.name))
+    obj.close()
+    self.assertRaises(IOError, self.filesystem.GetObject, obj.name)
+
+  def testNamedTemporaryFileNoDelete(self):
+    obj = self.tempfile.NamedTemporaryFile(delete=False)
+    obj.write(b'foo')
+    obj.close()
+    file_obj = self.filesystem.GetObject(obj.name)
+    self.assertEqual('foo', file_obj.contents)
+    obj = self.tempfile.NamedTemporaryFile(mode='w', delete=False)
+    obj.write('foo')
+    obj.close()
+    file_obj = self.filesystem.GetObject(obj.name)
+    self.assertEqual('foo', file_obj.contents)
+
+  def testMkstemp(self):
+    next_fd = len(self.filesystem.open_files)
+    temporary = self.tempfile.mkstemp()
+    self.assertEqual(2, len(temporary))
+    self.assertTrue(temporary[1].startswith('/tmp/tmp'))
+    created_filenames = self.tempfile.FakeReturnedMktempValues()
+    self.assertEqual(next_fd, temporary[0])
+    self.assertEqual(temporary[1], created_filenames[0])
+    self.assertTrue(self.filesystem.Exists(temporary[1]))
+    self.assertEqual(self.filesystem.GetObject(temporary[1]).st_mode,
+                     stat.S_IFREG|0o600)
+
+  def testMkstempDir(self):
+    """test tempfile.mkstemp(dir=)."""
+    # expect fail: /dir does not exist
+    self.assertRaises(OSError, self.tempfile.mkstemp, dir='/dir')
+    # expect pass: /dir exists
+    self.filesystem.CreateDirectory('/dir')
+    next_fd = len(self.filesystem.open_files)
+    temporary = self.tempfile.mkstemp(dir='/dir')
+    self.assertEqual(2, len(temporary))
+    self.assertEqual(next_fd, temporary[0])
+    self.assertTrue(temporary[1].startswith('/dir/tmp'))
+    created_filenames = self.tempfile.FakeReturnedMktempValues()
+    self.assertEqual(temporary[1], created_filenames[0])
+    self.assertTrue(self.filesystem.Exists(temporary[1]))
+    self.assertEqual(self.filesystem.GetObject(temporary[1]).st_mode,
+                     stat.S_IFREG|0o600)
+    # pylint: disable-msg=C6002
+    # TODO: add a test that /dir is actually writable.
+
+  def testMkdtemp(self):
+    dirname = self.tempfile.mkdtemp()
+    self.assertTrue(dirname)
+    created_filenames = self.tempfile.FakeReturnedMktempValues()
+    self.assertEqual(dirname, created_filenames[0])
+    self.assertTrue(self.filesystem.Exists(dirname))
+    self.assertEqual(self.filesystem.GetObject(dirname).st_mode,
+                     stat.S_IFDIR|0o700)
+
+  def testGettempdir(self):
+    self.assertEqual(None, self.tempfile.tempdir)
+    self.assertEqual('/tmp', self.tempfile.gettempdir())
+    self.assertEqual('/tmp', self.tempfile.tempdir)
+
+  def testGettempprefix(self):
+    """test tempfile.gettempprefix() and the tempfile.template setter."""
+    self.assertEqual('tmp', self.tempfile.gettempprefix())
+    # set and verify
+    self.tempfile.template = 'strung'
+    self.assertEqual('strung', self.tempfile.gettempprefix())
+    self.assertEqual('tempfile.template= is a NOP in python2.4',
+                     self.fake_logging.message())
+
+  def testMktemp(self):
+    self.assertRaises(NotImplementedError, self.tempfile.mktemp)
+
+  def testTemplateGet(self):
+    """verify tempfile.template still unimplemented."""
+    self.assertRaises(NotImplementedError, getattr,
+                      self.tempfile, 'template')
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/requirements.txt b/catapult/telemetry/third_party/pyfakefs/pyfakefs/requirements.txt
new file mode 100644
index 0000000..ddd4287
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/requirements.txt
@@ -0,0 +1 @@
+wheel==0.23.0
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/setup.py b/catapult/telemetry/third_party/pyfakefs/pyfakefs/setup.py
new file mode 100644
index 0000000..b1a2f11
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/setup.py
@@ -0,0 +1,88 @@
+#! /usr/bin/env python
+
+# Copyright 2009 Google Inc. All Rights Reserved.
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Copyright 2014-2015 John McGehee
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# 
+#     http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from fake_filesystem import __version__
+
+import os
+
+
+NAME = 'pyfakefs'
+MODULES = ['fake_filesystem',
+           'fake_filesystem_glob',
+           'fake_filesystem_shutil',
+           'fake_tempfile',
+           'fake_filesystem_unittest']
+REQUIRES = ['mox3']
+DESCRIPTION = 'Fake file system for testing file operations without touching the real file system.'
+
+URL = "https://github.com/jmcgeheeiv/pyfakefs"
+
+readme = os.path.join(os.path.dirname(__file__), 'README.md')
+LONG_DESCRIPTION = open(readme).read()
+
+CLASSIFIERS = [
+    'Development Status :: 5 - Production/Stable',
+    'Environment :: Console',
+    'Intended Audience :: Developers',
+    'License :: OSI Approved :: Apache Software License',
+    'Programming Language :: Python',
+    'Programming Language :: Python :: 2.6',
+    'Programming Language :: Python :: 2.7',
+    'Programming Language :: Python :: 3.2',
+    'Programming Language :: Python :: 3.3',
+    'Programming Language :: Python :: 3.4',
+    'Operating System :: POSIX',
+    'Operating System :: MacOS',
+    'Operating System :: Microsoft :: Windows',
+    'Topic :: Software Development :: Libraries',
+    'Topic :: Software Development :: Libraries :: Python Modules',
+    'Topic :: Software Development :: Testing',
+    'Topic :: System :: Filesystems',
+]
+
+AUTHOR = 'Google and John McGehee'
+AUTHOR_EMAIL = 'github@johnnado,com'
+KEYWORDS = ("testing test file os shutil glob mocking unittest "
+            "fakes filesystem unit").split(' ')
+
+params = dict(
+    name=NAME,
+    version=__version__,
+    py_modules=MODULES,
+    install_requires=REQUIRES,
+
+    # metadata for upload to PyPI
+    author=AUTHOR,
+    author_email=AUTHOR_EMAIL,
+    description=DESCRIPTION,
+    long_description=LONG_DESCRIPTION,
+    keywords=KEYWORDS,
+    url=URL,
+    classifiers=CLASSIFIERS,
+)
+
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+else:
+    params['tests_require'] = ['unittest2']
+    params['test_suite'] = 'unittest2.collector'
+
+setup(**params) # pylint: disable = W0142
diff --git a/catapult/telemetry/third_party/pyfakefs/pyfakefs/tox.ini b/catapult/telemetry/third_party/pyfakefs/pyfakefs/tox.ini
new file mode 100644
index 0000000..0da79b7
--- /dev/null
+++ b/catapult/telemetry/third_party/pyfakefs/pyfakefs/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist=py26,py27,py32,py33,pypy
+
+[testenv]
+commands=python all_tests.py
+
+[testenv:py26]
+deps=unittest2
diff --git a/catapult/telemetry/third_party/pyserial/LICENSE.txt b/catapult/telemetry/third_party/pyserial/LICENSE.txt
new file mode 100644
index 0000000..f604ea3
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/LICENSE.txt
@@ -0,0 +1,61 @@
+Copyright (c) 2001-2013 Chris Liechti <cliechti@gmx.net>;
+All Rights Reserved.
+
+This is the Python license. In short, you can use this product in
+commercial and non-commercial applications, modify it, redistribute it.
+A notification to the author when you use and/or modify it is welcome.
+
+
+TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING THIS SOFTWARE
+===================================================================
+
+LICENSE AGREEMENT
+-----------------
+
+1. This LICENSE AGREEMENT is between the copyright holder of this
+product, and the Individual or Organization ("Licensee") accessing
+and otherwise using this product in source or binary form and its
+associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement,
+the copyright holder hereby grants Licensee a nonexclusive,
+royalty-free, world-wide license to reproduce, analyze, test,
+perform and/or display publicly, prepare derivative works, distribute,
+and otherwise use this product alone or in any derivative version,
+provided, however, that copyright holders License Agreement and
+copyright holders notice of copyright are retained in this product
+alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates this product or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to this product.
+
+4. The copyright holder is making this product available to Licensee on
+an "AS IS" basis. THE COPYRIGHT HOLDER MAKES NO REPRESENTATIONS OR
+WARRANTIES, EXPRESS OR IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION,
+THE COPYRIGHT HOLDER MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
+WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
+THAT THE USE OF THIS PRODUCT WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. THE COPYRIGHT HOLDER SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER
+USERS OF THIS PRODUCT FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
+DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE
+USING THIS PRODUCT, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE
+POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between the
+copyright holder and Licensee. This License Agreement does not grant
+permission to use trademarks or trade names from the copyright holder
+in a trademark sense to endorse or promote products or services of
+Licensee, or any third party.
+
+8. By copying, installing or otherwise using this product, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
diff --git a/catapult/telemetry/third_party/pyserial/README.chromium b/catapult/telemetry/third_party/pyserial/README.chromium
new file mode 100644
index 0000000..04593db
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/README.chromium
@@ -0,0 +1,19 @@
+Name: pySerial
+Short Name: pySerial
+URL: https://github.com/pyserial/pyserial
+Version: 2.7
+Date: 2013-10-17
+License: Python
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Library for Python access for the serial port. Used for communication with
+a Monsoon device, which tunnels serial over USB.
+
+Local Modifications:
+Includes only the serial/ folder and LICENSE.txt.
+Packaging and setup files have not been copied downstream.
+All other files and folders (documentation/, examples/, test/)
+have not been copied downstream.
+linux-product_info.patch has been applied to include the product information as description.
diff --git a/catapult/telemetry/third_party/pyserial/linux-product_info.patch b/catapult/telemetry/third_party/pyserial/linux-product_info.patch
new file mode 100644
index 0000000..9f8001a
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/linux-product_info.patch
@@ -0,0 +1,19 @@
+Index: serial/tools/list_ports_linux.py
+===================================================================
+--- serial/tools/list_ports_linux.py	(revision 494)
++++ serial/tools/list_ports_linux.py	(working copy)
+@@ -110,6 +110,14 @@
+     sys_dev_path = '/sys/class/tty/%s/device/interface' % (base,)
+     if os.path.exists(sys_dev_path):
+         return read_line(sys_dev_path)
++
++    # USB Product Information
++    sys_dev_path = '/sys/class/tty/%s/device' % (base,)
++    if os.path.exists(sys_dev_path):
++        product_name_file = os.path.dirname(os.path.realpath(sys_dev_path)) + "/product"
++        if os.path.exists(product_name_file):
++            return read_line(product_name_file)
++
+     return base
+ 
+ def hwinfo(device):
diff --git a/catapult/telemetry/third_party/pyserial/serial/__init__.py b/catapult/telemetry/third_party/pyserial/serial/__init__.py
new file mode 100755
index 0000000..33ae52e
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/__init__.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python 
+
+# portable serial port access with python
+# this is a wrapper module for different platform implementations
+#
+# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+VERSION = '2.7'
+
+import sys
+
+if sys.platform == 'cli':
+    from serial.serialcli import *
+else:
+    import os
+    # chose an implementation, depending on os
+    if os.name == 'nt': #sys.platform == 'win32':
+        from serial.serialwin32 import *
+    elif os.name == 'posix':
+        from serial.serialposix import *
+    elif os.name == 'java':
+        from serial.serialjava import *
+    else:
+        raise ImportError("Sorry: no implementation for your platform ('%s') available" % (os.name,))
+
+
+protocol_handler_packages = [
+        'serial.urlhandler',
+        ]
+
+def serial_for_url(url, *args, **kwargs):
+    """\
+    Get an instance of the Serial class, depending on port/url. The port is not
+    opened when the keyword parameter 'do_not_open' is true, by default it
+    is. All other parameters are directly passed to the __init__ method when
+    the port is instantiated.
+
+    The list of package names that is searched for protocol handlers is kept in
+    ``protocol_handler_packages``.
+
+    e.g. we want to support a URL ``foobar://``. A module
+    ``my_handlers.protocol_foobar`` is provided by the user. Then
+    ``protocol_handler_packages.append("my_handlers")`` would extend the search
+    path so that ``serial_for_url("foobar://"))`` would work.
+    """
+    # check remove extra parameter to not confuse the Serial class
+    do_open = 'do_not_open' not in kwargs or not kwargs['do_not_open']
+    if 'do_not_open' in kwargs: del kwargs['do_not_open']
+    # the default is to use the native version
+    klass = Serial   # 'native' implementation
+    # check port type and get class
+    try:
+        url_nocase = url.lower()
+    except AttributeError:
+        # it's not a string, use default
+        pass
+    else:
+        if '://' in url_nocase:
+            protocol = url_nocase.split('://', 1)[0]
+            for package_name in protocol_handler_packages:
+                module_name = '%s.protocol_%s' % (package_name, protocol,)
+                try:
+                    handler_module = __import__(module_name)
+                except ImportError:
+                    pass
+                else:
+                    klass = sys.modules[module_name].Serial
+                    break
+            else:
+                raise ValueError('invalid URL, protocol %r not known' % (protocol,))
+        else:
+            klass = Serial   # 'native' implementation
+    # instantiate and open when desired
+    instance = klass(None, *args, **kwargs)
+    instance.port = url
+    if do_open:
+        instance.open()
+    return instance
diff --git a/catapult/telemetry/third_party/pyserial/serial/rfc2217.py b/catapult/telemetry/third_party/pyserial/serial/rfc2217.py
new file mode 100644
index 0000000..2012ea7
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/rfc2217.py
@@ -0,0 +1,1323 @@
+#! python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# This module implements a RFC2217 compatible client. RF2217 descibes a
+# protocol to access serial ports over TCP/IP and allows setting the baud rate,
+# modem control lines etc.
+#
+# (C) 2001-2013 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+# TODO:
+# - setting control line -> answer is not checked (had problems with one of the
+#   severs). consider implementing a compatibility mode flag to make check
+#   conditional
+# - write timeout not implemented at all
+
+##############################################################################
+# observations and issues with servers
+#=============================================================================
+# sredird V2.2.1
+# - http://www.ibiblio.org/pub/Linux/system/serial/   sredird-2.2.2.tar.gz
+# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
+#   [105 1] instead of the actual value.
+# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
+#   numbers than 2**32?
+# - To get the signature [COM_PORT_OPTION 0] has to be sent.
+# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
+#=============================================================================
+# telnetcpcd (untested)
+# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
+# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
+#=============================================================================
+# ser2net
+# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
+#   acknowledges that the client activates these options
+# - The configuration may be that the server prints a banner. As this client
+#   implementation does a flushInput on connect, this banner is hidden from
+#   the user application.
+# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
+#   second.
+# - To get the signature [COM_PORT_OPTION 0] has to be sent.
+# - run a server: run ser2net daemon, in /etc/ser2net.conf:
+#     2000:telnet:0:/dev/ttyS0:9600 remctl banner
+##############################################################################
+
+# How to identify ports? pySerial might want to support other protocols in the
+# future, so lets use an URL scheme.
+# for RFC2217 compliant servers we will use this:
+#    rfc2217://<host>:<port>[/option[/option...]]
+#
+# options:
+# - "debug" print diagnostic messages
+# - "ign_set_control": do not look at the answers to SET_CONTROL
+# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
+#   Without this option it expects that the server sends notifications
+#   automatically on change (which most servers do and is according to the
+#   RFC).
+# the order of the options is not relevant
+
+from serial.serialutil import *
+import time
+import struct
+import socket
+import threading
+import Queue
+import logging
+
+# port string is expected to be something like this:
+# rfc2217://host:port
+# host may be an IP or including domain, whatever.
+# port is 0...65535
+
+# map log level names to constants. used in fromURL()
+LOGGER_LEVELS = {
+    'debug': logging.DEBUG,
+    'info': logging.INFO,
+    'warning': logging.WARNING,
+    'error': logging.ERROR,
+    }
+
+
+# telnet protocol characters
+IAC  = to_bytes([255]) # Interpret As Command
+DONT = to_bytes([254])
+DO   = to_bytes([253])
+WONT = to_bytes([252])
+WILL = to_bytes([251])
+IAC_DOUBLED = to_bytes([IAC, IAC])
+
+SE  = to_bytes([240])  # Subnegotiation End
+NOP = to_bytes([241])  # No Operation
+DM  = to_bytes([242])  # Data Mark
+BRK = to_bytes([243])  # Break
+IP  = to_bytes([244])  # Interrupt process
+AO  = to_bytes([245])  # Abort output
+AYT = to_bytes([246])  # Are You There
+EC  = to_bytes([247])  # Erase Character
+EL  = to_bytes([248])  # Erase Line
+GA  = to_bytes([249])  # Go Ahead
+SB =  to_bytes([250])  # Subnegotiation Begin
+
+# selected telnet options
+BINARY = to_bytes([0]) # 8-bit data path
+ECHO = to_bytes([1])   # echo
+SGA = to_bytes([3])    # suppress go ahead
+
+# RFC2217
+COM_PORT_OPTION = to_bytes([44])
+
+# Client to Access Server
+SET_BAUDRATE = to_bytes([1])
+SET_DATASIZE = to_bytes([2])
+SET_PARITY = to_bytes([3])
+SET_STOPSIZE = to_bytes([4])
+SET_CONTROL = to_bytes([5])
+NOTIFY_LINESTATE = to_bytes([6])
+NOTIFY_MODEMSTATE = to_bytes([7])
+FLOWCONTROL_SUSPEND = to_bytes([8])
+FLOWCONTROL_RESUME = to_bytes([9])
+SET_LINESTATE_MASK = to_bytes([10])
+SET_MODEMSTATE_MASK = to_bytes([11])
+PURGE_DATA = to_bytes([12])
+
+SERVER_SET_BAUDRATE = to_bytes([101])
+SERVER_SET_DATASIZE = to_bytes([102])
+SERVER_SET_PARITY = to_bytes([103])
+SERVER_SET_STOPSIZE = to_bytes([104])
+SERVER_SET_CONTROL = to_bytes([105])
+SERVER_NOTIFY_LINESTATE = to_bytes([106])
+SERVER_NOTIFY_MODEMSTATE = to_bytes([107])
+SERVER_FLOWCONTROL_SUSPEND = to_bytes([108])
+SERVER_FLOWCONTROL_RESUME = to_bytes([109])
+SERVER_SET_LINESTATE_MASK = to_bytes([110])
+SERVER_SET_MODEMSTATE_MASK = to_bytes([111])
+SERVER_PURGE_DATA = to_bytes([112])
+
+RFC2217_ANSWER_MAP = {
+    SET_BAUDRATE: SERVER_SET_BAUDRATE,
+    SET_DATASIZE: SERVER_SET_DATASIZE,
+    SET_PARITY: SERVER_SET_PARITY,
+    SET_STOPSIZE: SERVER_SET_STOPSIZE,
+    SET_CONTROL: SERVER_SET_CONTROL,
+    NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
+    NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
+    FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
+    FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
+    SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
+    SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
+    PURGE_DATA: SERVER_PURGE_DATA,
+}
+
+SET_CONTROL_REQ_FLOW_SETTING = to_bytes([0])        # Request Com Port Flow Control Setting (outbound/both)
+SET_CONTROL_USE_NO_FLOW_CONTROL = to_bytes([1])     # Use No Flow Control (outbound/both)
+SET_CONTROL_USE_SW_FLOW_CONTROL = to_bytes([2])     # Use XON/XOFF Flow Control (outbound/both)
+SET_CONTROL_USE_HW_FLOW_CONTROL = to_bytes([3])     # Use HARDWARE Flow Control (outbound/both)
+SET_CONTROL_REQ_BREAK_STATE = to_bytes([4])         # Request BREAK State
+SET_CONTROL_BREAK_ON = to_bytes([5])                # Set BREAK State ON
+SET_CONTROL_BREAK_OFF = to_bytes([6])               # Set BREAK State OFF
+SET_CONTROL_REQ_DTR = to_bytes([7])                 # Request DTR Signal State
+SET_CONTROL_DTR_ON = to_bytes([8])                  # Set DTR Signal State ON
+SET_CONTROL_DTR_OFF = to_bytes([9])                 # Set DTR Signal State OFF
+SET_CONTROL_REQ_RTS = to_bytes([10])                # Request RTS Signal State
+SET_CONTROL_RTS_ON = to_bytes([11])                 # Set RTS Signal State ON
+SET_CONTROL_RTS_OFF = to_bytes([12])                # Set RTS Signal State OFF
+SET_CONTROL_REQ_FLOW_SETTING_IN = to_bytes([13])    # Request Com Port Flow Control Setting (inbound)
+SET_CONTROL_USE_NO_FLOW_CONTROL_IN = to_bytes([14]) # Use No Flow Control (inbound)
+SET_CONTROL_USE_SW_FLOW_CONTOL_IN = to_bytes([15])  # Use XON/XOFF Flow Control (inbound)
+SET_CONTROL_USE_HW_FLOW_CONTOL_IN = to_bytes([16])  # Use HARDWARE Flow Control (inbound)
+SET_CONTROL_USE_DCD_FLOW_CONTROL = to_bytes([17])   # Use DCD Flow Control (outbound/both)
+SET_CONTROL_USE_DTR_FLOW_CONTROL = to_bytes([18])   # Use DTR Flow Control (inbound)
+SET_CONTROL_USE_DSR_FLOW_CONTROL = to_bytes([19])   # Use DSR Flow Control (outbound/both)
+
+LINESTATE_MASK_TIMEOUT = 128                # Time-out Error
+LINESTATE_MASK_SHIFTREG_EMPTY = 64          # Transfer Shift Register Empty
+LINESTATE_MASK_TRANSREG_EMPTY = 32          # Transfer Holding Register Empty
+LINESTATE_MASK_BREAK_DETECT = 16            # Break-detect Error
+LINESTATE_MASK_FRAMING_ERROR = 8            # Framing Error
+LINESTATE_MASK_PARTIY_ERROR = 4             # Parity Error
+LINESTATE_MASK_OVERRUN_ERROR = 2            # Overrun Error
+LINESTATE_MASK_DATA_READY = 1               # Data Ready
+
+MODEMSTATE_MASK_CD = 128                    # Receive Line Signal Detect (also known as Carrier Detect)
+MODEMSTATE_MASK_RI = 64                     # Ring Indicator
+MODEMSTATE_MASK_DSR = 32                    # Data-Set-Ready Signal State
+MODEMSTATE_MASK_CTS = 16                    # Clear-To-Send Signal State
+MODEMSTATE_MASK_CD_CHANGE = 8               # Delta Receive Line Signal Detect
+MODEMSTATE_MASK_RI_CHANGE = 4               # Trailing-edge Ring Detector
+MODEMSTATE_MASK_DSR_CHANGE = 2              # Delta Data-Set-Ready
+MODEMSTATE_MASK_CTS_CHANGE = 1              # Delta Clear-To-Send
+
+PURGE_RECEIVE_BUFFER = to_bytes([1])        # Purge access server receive data buffer
+PURGE_TRANSMIT_BUFFER = to_bytes([2])       # Purge access server transmit data buffer
+PURGE_BOTH_BUFFERS = to_bytes([3])          # Purge both the access server receive data buffer and the access server transmit data buffer
+
+
+RFC2217_PARITY_MAP = {
+    PARITY_NONE: 1,
+    PARITY_ODD: 2,
+    PARITY_EVEN: 3,
+    PARITY_MARK: 4,
+    PARITY_SPACE: 5,
+}
+RFC2217_REVERSE_PARITY_MAP = dict((v,k) for k,v in RFC2217_PARITY_MAP.items())
+
+RFC2217_STOPBIT_MAP = {
+    STOPBITS_ONE: 1,
+    STOPBITS_ONE_POINT_FIVE: 3,
+    STOPBITS_TWO: 2,
+}
+RFC2217_REVERSE_STOPBIT_MAP = dict((v,k) for k,v in RFC2217_STOPBIT_MAP.items())
+
+# Telnet filter states
+M_NORMAL = 0
+M_IAC_SEEN = 1
+M_NEGOTIATE = 2
+
+# TelnetOption and TelnetSubnegotiation states
+REQUESTED = 'REQUESTED'
+ACTIVE = 'ACTIVE'
+INACTIVE = 'INACTIVE'
+REALLY_INACTIVE = 'REALLY_INACTIVE'
+
+class TelnetOption(object):
+    """Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
+
+    def __init__(self, connection, name, option, send_yes, send_no, ack_yes, ack_no, initial_state, activation_callback=None):
+        """\
+        Initialize option.
+        :param connection: connection used to transmit answers
+        :param name: a readable name for debug outputs
+        :param send_yes: what to send when option is to be enabled.
+        :param send_no: what to send when option is to be disabled.
+        :param ack_yes: what to expect when remote agrees on option.
+        :param ack_no: what to expect when remote disagrees on option.
+        :param initial_state: options initialized with REQUESTED are tried to
+            be enabled on startup. use INACTIVE for all others.
+        """
+        self.connection = connection
+        self.name = name
+        self.option = option
+        self.send_yes = send_yes
+        self.send_no = send_no
+        self.ack_yes = ack_yes
+        self.ack_no = ack_no
+        self.state = initial_state
+        self.active = False
+        self.activation_callback = activation_callback
+
+    def __repr__(self):
+        """String for debug outputs"""
+        return "%s:%s(%s)" % (self.name, self.active, self.state)
+
+    def process_incoming(self, command):
+        """A DO/DONT/WILL/WONT was received for this option, update state and
+        answer when needed."""
+        if command == self.ack_yes:
+            if self.state is REQUESTED:
+                self.state = ACTIVE
+                self.active = True
+                if self.activation_callback is not None:
+                    self.activation_callback()
+            elif self.state is ACTIVE:
+                pass
+            elif self.state is INACTIVE:
+                self.state = ACTIVE
+                self.connection.telnetSendOption(self.send_yes, self.option)
+                self.active = True
+                if self.activation_callback is not None:
+                    self.activation_callback()
+            elif self.state is REALLY_INACTIVE:
+                self.connection.telnetSendOption(self.send_no, self.option)
+            else:
+                raise ValueError('option in illegal state %r' % self)
+        elif command == self.ack_no:
+            if self.state is REQUESTED:
+                self.state = INACTIVE
+                self.active = False
+            elif self.state is ACTIVE:
+                self.state = INACTIVE
+                self.connection.telnetSendOption(self.send_no, self.option)
+                self.active = False
+            elif self.state is INACTIVE:
+                pass
+            elif self.state is REALLY_INACTIVE:
+                pass
+            else:
+                raise ValueError('option in illegal state %r' % self)
+
+
+class TelnetSubnegotiation(object):
+    """\
+    A object to handle subnegotiation of options. In this case actually
+    sub-sub options for RFC 2217. It is used to track com port options.
+    """
+
+    def __init__(self, connection, name, option, ack_option=None):
+        if ack_option is None: ack_option = option
+        self.connection = connection
+        self.name = name
+        self.option = option
+        self.value = None
+        self.ack_option = ack_option
+        self.state = INACTIVE
+
+    def __repr__(self):
+        """String for debug outputs."""
+        return "%s:%s" % (self.name, self.state)
+
+    def set(self, value):
+        """\
+        request a change of the value. a request is sent to the server. if
+        the client needs to know if the change is performed he has to check the
+        state of this object.
+        """
+        self.value = value
+        self.state = REQUESTED
+        self.connection.rfc2217SendSubnegotiation(self.option, self.value)
+        if self.connection.logger:
+            self.connection.logger.debug("SB Requesting %s -> %r" % (self.name, self.value))
+
+    def isReady(self):
+        """\
+        check if answer from server has been received. when server rejects
+        the change, raise a ValueError.
+        """
+        if self.state == REALLY_INACTIVE:
+            raise ValueError("remote rejected value for option %r" % (self.name))
+        return self.state == ACTIVE
+    # add property to have a similar interface as TelnetOption
+    active = property(isReady)
+
+    def wait(self, timeout=3):
+        """\
+        wait until the subnegotiation has been acknowledged or timeout. It
+        can also throw a value error when the answer from the server does not
+        match the value sent.
+        """
+        timeout_time = time.time() + timeout
+        while time.time() < timeout_time:
+            time.sleep(0.05)    # prevent 100% CPU load
+            if self.isReady():
+                break
+        else:
+            raise SerialException("timeout while waiting for option %r" % (self.name))
+
+    def checkAnswer(self, suboption):
+        """\
+        check an incoming subnegotiation block. the parameter already has
+        cut off the header like sub option number and com port option value.
+        """
+        if self.value == suboption[:len(self.value)]:
+            self.state = ACTIVE
+        else:
+            # error propagation done in isReady
+            self.state = REALLY_INACTIVE
+        if self.connection.logger:
+            self.connection.logger.debug("SB Answer %s -> %r -> %s" % (self.name, suboption, self.state))
+
+
+class RFC2217Serial(SerialBase):
+    """Serial port implementation for RFC 2217 remote serial ports."""
+
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                 9600, 19200, 38400, 57600, 115200)
+
+    def open(self):
+        """\
+        Open port with current settings. This may throw a SerialException
+        if the port cannot be opened.
+        """
+        self.logger = None
+        self._ignore_set_control_answer = False
+        self._poll_modem_state = False
+        self._network_timeout = 3
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        try:
+            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._socket.connect(self.fromURL(self.portstr))
+            self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+        except Exception, msg:
+            self._socket = None
+            raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
+
+        self._socket.settimeout(5) # XXX good value?
+
+        # use a thread save queue as buffer. it also simplifies implementing
+        # the read timeout
+        self._read_buffer = Queue.Queue()
+        # to ensure that user writes does not interfere with internal
+        # telnet/rfc2217 options establish a lock
+        self._write_lock = threading.Lock()
+        # name the following separately so that, below, a check can be easily done
+        mandadory_options = [
+            TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
+            TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
+        ]
+        # all supported telnet options
+        self._telnet_options = [
+            TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
+            TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
+            TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
+            TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
+            TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
+        ] + mandadory_options
+        # RFC 2217 specific states
+        # COM port settings
+        self._rfc2217_port_settings = {
+            'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
+            'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
+            'parity':   TelnetSubnegotiation(self, 'parity',   SET_PARITY,   SERVER_SET_PARITY),
+            'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
+            }
+        # There are more subnegotiation objects, combine all in one dictionary
+        # for easy access
+        self._rfc2217_options = {
+            'purge':    TelnetSubnegotiation(self, 'purge',    PURGE_DATA,   SERVER_PURGE_DATA),
+            'control':  TelnetSubnegotiation(self, 'control',  SET_CONTROL,  SERVER_SET_CONTROL),
+            }
+        self._rfc2217_options.update(self._rfc2217_port_settings)
+        # cache for line and modem states that the server sends to us
+        self._linestate = 0
+        self._modemstate = None
+        self._modemstate_expires = 0
+        # RFC 2217 flow control between server and client
+        self._remote_suspend_flow = False
+
+        self._thread = threading.Thread(target=self._telnetReadLoop)
+        self._thread.setDaemon(True)
+        self._thread.setName('pySerial RFC 2217 reader thread for %s' % (self._port,))
+        self._thread.start()
+
+        # negotiate Telnet/RFC 2217 -> send initial requests
+        for option in self._telnet_options:
+            if option.state is REQUESTED:
+                self.telnetSendOption(option.send_yes, option.option)
+        # now wait until important options are negotiated
+        timeout_time = time.time() + self._network_timeout
+        while time.time() < timeout_time:
+            time.sleep(0.05)    # prevent 100% CPU load
+            if sum(o.active for o in mandadory_options) == len(mandadory_options):
+                break
+        else:
+            raise SerialException("Remote does not seem to support RFC2217 or BINARY mode %r" % mandadory_options)
+        if self.logger:
+            self.logger.info("Negotiated options: %s" % self._telnet_options)
+
+        # fine, go on, set RFC 2271 specific things
+        self._reconfigurePort()
+        # all things set up get, now a clean start
+        self._isOpen = True
+        if not self._rtscts:
+            self.setRTS(True)
+            self.setDTR(True)
+        self.flushInput()
+        self.flushOutput()
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port."""
+        if self._socket is None:
+            raise SerialException("Can only operate on open ports")
+
+        # if self._timeout != 0 and self._interCharTimeout is not None:
+            # XXX
+
+        if self._writeTimeout is not None:
+            raise NotImplementedError('writeTimeout is currently not supported')
+            # XXX
+
+        # Setup the connection
+        # to get good performance, all parameter changes are sent first...
+        if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
+            raise ValueError("invalid baudrate: %r" % (self._baudrate))
+        self._rfc2217_port_settings['baudrate'].set(struct.pack('!I', self._baudrate))
+        self._rfc2217_port_settings['datasize'].set(struct.pack('!B', self._bytesize))
+        self._rfc2217_port_settings['parity'].set(struct.pack('!B', RFC2217_PARITY_MAP[self._parity]))
+        self._rfc2217_port_settings['stopsize'].set(struct.pack('!B', RFC2217_STOPBIT_MAP[self._stopbits]))
+
+        # and now wait until parameters are active
+        items = self._rfc2217_port_settings.values()
+        if self.logger:
+            self.logger.debug("Negotiating settings: %s" % (items,))
+        timeout_time = time.time() + self._network_timeout
+        while time.time() < timeout_time:
+            time.sleep(0.05)    # prevent 100% CPU load
+            if sum(o.active for o in items) == len(items):
+                break
+        else:
+            raise SerialException("Remote does not accept parameter change (RFC2217): %r" % items)
+        if self.logger:
+            self.logger.info("Negotiated settings: %s" % (items,))
+
+        if self._rtscts and self._xonxoff:
+            raise ValueError('xonxoff and rtscts together are not supported')
+        elif self._rtscts:
+            self.rfc2217SetControl(SET_CONTROL_USE_HW_FLOW_CONTROL)
+        elif self._xonxoff:
+            self.rfc2217SetControl(SET_CONTROL_USE_SW_FLOW_CONTROL)
+        else:
+            self.rfc2217SetControl(SET_CONTROL_USE_NO_FLOW_CONTROL)
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            if self._socket:
+                try:
+                    self._socket.shutdown(socket.SHUT_RDWR)
+                    self._socket.close()
+                except:
+                    # ignore errors.
+                    pass
+                self._socket = None
+            if self._thread:
+                self._thread.join()
+            self._isOpen = False
+            # in case of quick reconnects, give the server some time
+            time.sleep(0.3)
+
+    def makeDeviceName(self, port):
+        raise SerialException("there is no sensible way to turn numbers into URLs")
+
+    def fromURL(self, url):
+        """extract host and port from an URL string"""
+        if url.lower().startswith("rfc2217://"): url = url[10:]
+        try:
+            # is there a "path" (our options)?
+            if '/' in url:
+                # cut away options
+                url, options = url.split('/', 1)
+                # process options now, directly altering self
+                for option in options.split('/'):
+                    if '=' in option:
+                        option, value = option.split('=', 1)
+                    else:
+                        value = None
+                    if option == 'logging':
+                        logging.basicConfig()   # XXX is that good to call it here?
+                        self.logger = logging.getLogger('pySerial.rfc2217')
+                        self.logger.setLevel(LOGGER_LEVELS[value])
+                        self.logger.debug('enabled logging')
+                    elif option == 'ign_set_control':
+                        self._ignore_set_control_answer = True
+                    elif option == 'poll_modem':
+                        self._poll_modem_state = True
+                    elif option == 'timeout':
+                        self._network_timeout = float(value)
+                    else:
+                        raise ValueError('unknown option: %r' % (option,))
+            # get host and port
+            host, port = url.split(':', 1) # may raise ValueError because of unpacking
+            port = int(port)               # and this if it's not a number
+            if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
+        except ValueError, e:
+            raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
+        return (host, port)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        if not self._isOpen: raise portNotOpenError
+        return self._read_buffer.qsize()
+
+    def read(self, size=1):
+        """\
+        Read size bytes from the serial port. If a timeout is set it may
+        return less characters as requested. With no timeout it will block
+        until the requested number of bytes is read.
+        """
+        if not self._isOpen: raise portNotOpenError
+        data = bytearray()
+        try:
+            while len(data) < size:
+                if self._thread is None:
+                    raise SerialException('connection failed (reader thread died)')
+                data.append(self._read_buffer.get(True, self._timeout))
+        except Queue.Empty: # -> timeout
+            pass
+        return bytes(data)
+
+    def write(self, data):
+        """\
+        Output the given string over the serial port. Can block if the
+        connection is blocked. May raise SerialException if the connection is
+        closed.
+        """
+        if not self._isOpen: raise portNotOpenError
+        self._write_lock.acquire()
+        try:
+            try:
+                self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
+            except socket.error, e:
+                raise SerialException("connection failed (socket error): %s" % e) # XXX what exception if socket connection fails
+        finally:
+            self._write_lock.release()
+        return len(data)
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        self.rfc2217SendPurge(PURGE_RECEIVE_BUFFER)
+        # empty read buffer
+        while self._read_buffer.qsize():
+            self._read_buffer.get(False)
+
+    def flushOutput(self):
+        """\
+        Clear output buffer, aborting the current output and
+        discarding all that is in the buffer.
+        """
+        if not self._isOpen: raise portNotOpenError
+        self.rfc2217SendPurge(PURGE_TRANSMIT_BUFFER)
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given
+        duration."""
+        if not self._isOpen: raise portNotOpenError
+        self.setBreak(True)
+        time.sleep(duration)
+        self.setBreak(False)
+
+    def setBreak(self, level=True):
+        """\
+        Set break: Controls TXD. When active, to transmitting is
+        possible.
+        """
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('set BREAK to %s' % ('inactive', 'active')[bool(level)])
+        if level:
+            self.rfc2217SetControl(SET_CONTROL_BREAK_ON)
+        else:
+            self.rfc2217SetControl(SET_CONTROL_BREAK_OFF)
+
+    def setRTS(self, level=True):
+        """Set terminal status line: Request To Send."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('set RTS to %s' % ('inactive', 'active')[bool(level)])
+        if level:
+            self.rfc2217SetControl(SET_CONTROL_RTS_ON)
+        else:
+            self.rfc2217SetControl(SET_CONTROL_RTS_OFF)
+
+    def setDTR(self, level=True):
+        """Set terminal status line: Data Terminal Ready."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('set DTR to %s' % ('inactive', 'active')[bool(level)])
+        if level:
+            self.rfc2217SetControl(SET_CONTROL_DTR_ON)
+        else:
+            self.rfc2217SetControl(SET_CONTROL_DTR_OFF)
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send."""
+        if not self._isOpen: raise portNotOpenError
+        return bool(self.getModemState() & MODEMSTATE_MASK_CTS)
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready."""
+        if not self._isOpen: raise portNotOpenError
+        return bool(self.getModemState() & MODEMSTATE_MASK_DSR)
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator."""
+        if not self._isOpen: raise portNotOpenError
+        return bool(self.getModemState() & MODEMSTATE_MASK_RI)
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect."""
+        if not self._isOpen: raise portNotOpenError
+        return bool(self.getModemState() & MODEMSTATE_MASK_CD)
+
+    # - - - platform specific - - -
+    # None so far
+
+    # - - - RFC2217 specific - - -
+
+    def _telnetReadLoop(self):
+        """read loop for the socket."""
+        mode = M_NORMAL
+        suboption = None
+        try:
+            while self._socket is not None:
+                try:
+                    data = self._socket.recv(1024)
+                except socket.timeout:
+                    # just need to get out of recv form time to time to check if
+                    # still alive
+                    continue
+                except socket.error, e:
+                    # connection fails -> terminate loop
+                    if self.logger:
+                        self.logger.debug("socket error in reader thread: %s" % (e,))
+                    break
+                if not data: break # lost connection
+                for byte in data:
+                    if mode == M_NORMAL:
+                        # interpret as command or as data
+                        if byte == IAC:
+                            mode = M_IAC_SEEN
+                        else:
+                            # store data in read buffer or sub option buffer
+                            # depending on state
+                            if suboption is not None:
+                                suboption.append(byte)
+                            else:
+                                self._read_buffer.put(byte)
+                    elif mode == M_IAC_SEEN:
+                        if byte == IAC:
+                            # interpret as command doubled -> insert character
+                            # itself
+                            if suboption is not None:
+                                suboption.append(IAC)
+                            else:
+                                self._read_buffer.put(IAC)
+                            mode = M_NORMAL
+                        elif byte == SB:
+                            # sub option start
+                            suboption = bytearray()
+                            mode = M_NORMAL
+                        elif byte == SE:
+                            # sub option end -> process it now
+                            self._telnetProcessSubnegotiation(bytes(suboption))
+                            suboption = None
+                            mode = M_NORMAL
+                        elif byte in (DO, DONT, WILL, WONT):
+                            # negotiation
+                            telnet_command = byte
+                            mode = M_NEGOTIATE
+                        else:
+                            # other telnet commands
+                            self._telnetProcessCommand(byte)
+                            mode = M_NORMAL
+                    elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
+                        self._telnetNegotiateOption(telnet_command, byte)
+                        mode = M_NORMAL
+        finally:
+            self._thread = None
+            if self.logger:
+                self.logger.debug("read thread terminated")
+
+    # - incoming telnet commands and options
+
+    def _telnetProcessCommand(self, command):
+        """Process commands other than DO, DONT, WILL, WONT."""
+        # Currently none. RFC2217 only uses negotiation and subnegotiation.
+        if self.logger:
+            self.logger.warning("ignoring Telnet command: %r" % (command,))
+
+    def _telnetNegotiateOption(self, command, option):
+        """Process incoming DO, DONT, WILL, WONT."""
+        # check our registered telnet options and forward command to them
+        # they know themselves if they have to answer or not
+        known = False
+        for item in self._telnet_options:
+            # can have more than one match! as some options are duplicated for
+            # 'us' and 'them'
+            if item.option == option:
+                item.process_incoming(command)
+                known = True
+        if not known:
+            # handle unknown options
+            # only answer to positive requests and deny them
+            if command == WILL or command == DO:
+                self.telnetSendOption((command == WILL and DONT or WONT), option)
+                if self.logger:
+                    self.logger.warning("rejected Telnet option: %r" % (option,))
+
+
+    def _telnetProcessSubnegotiation(self, suboption):
+        """Process subnegotiation, the data between IAC SB and IAC SE."""
+        if suboption[0:1] == COM_PORT_OPTION:
+            if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
+                self._linestate = ord(suboption[2:3]) # ensure it is a number
+                if self.logger:
+                    self.logger.info("NOTIFY_LINESTATE: %s" % self._linestate)
+            elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
+                self._modemstate = ord(suboption[2:3]) # ensure it is a number
+                if self.logger:
+                    self.logger.info("NOTIFY_MODEMSTATE: %s" % self._modemstate)
+                # update time when we think that a poll would make sense
+                self._modemstate_expires = time.time() + 0.3
+            elif suboption[1:2] == FLOWCONTROL_SUSPEND:
+                self._remote_suspend_flow = True
+            elif suboption[1:2] == FLOWCONTROL_RESUME:
+                self._remote_suspend_flow = False
+            else:
+                for item in self._rfc2217_options.values():
+                    if item.ack_option == suboption[1:2]:
+                        #~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
+                        item.checkAnswer(bytes(suboption[2:]))
+                        break
+                else:
+                    if self.logger:
+                        self.logger.warning("ignoring COM_PORT_OPTION: %r" % (suboption,))
+        else:
+            if self.logger:
+                self.logger.warning("ignoring subnegotiation: %r" % (suboption,))
+
+    # - outgoing telnet commands and options
+
+    def _internal_raw_write(self, data):
+        """internal socket write with no data escaping. used to send telnet stuff."""
+        self._write_lock.acquire()
+        try:
+            self._socket.sendall(data)
+        finally:
+            self._write_lock.release()
+
+    def telnetSendOption(self, action, option):
+        """Send DO, DONT, WILL, WONT."""
+        self._internal_raw_write(to_bytes([IAC, action, option]))
+
+    def rfc2217SendSubnegotiation(self, option, value=''):
+        """Subnegotiation of RFC2217 parameters."""
+        value = value.replace(IAC, IAC_DOUBLED)
+        self._internal_raw_write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
+
+    def rfc2217SendPurge(self, value):
+        item = self._rfc2217_options['purge']
+        item.set(value) # transmit desired purge type
+        item.wait(self._network_timeout) # wait for acknowledge from the server
+
+    def rfc2217SetControl(self, value):
+        item = self._rfc2217_options['control']
+        item.set(value) # transmit desired control type
+        if self._ignore_set_control_answer:
+            # answers are ignored when option is set. compatibility mode for
+            # servers that answer, but not the expected one... (or no answer
+            # at all) i.e. sredird
+            time.sleep(0.1)  # this helps getting the unit tests passed
+        else:
+            item.wait(self._network_timeout)  # wait for acknowledge from the server
+
+    def rfc2217FlowServerReady(self):
+        """\
+        check if server is ready to receive data. block for some time when
+        not.
+        """
+        #~ if self._remote_suspend_flow:
+            #~ wait---
+
+    def getModemState(self):
+        """\
+        get last modem state (cached value. if value is "old", request a new
+        one. this cache helps that we don't issue to many requests when e.g. all
+        status lines, one after the other is queried by te user (getCTS, getDSR
+        etc.)
+        """
+        # active modem state polling enabled? is the value fresh enough?
+        if self._poll_modem_state and self._modemstate_expires < time.time():
+            if self.logger:
+                self.logger.debug('polling modem state')
+            # when it is older, request an update
+            self.rfc2217SendSubnegotiation(NOTIFY_MODEMSTATE)
+            timeout_time = time.time() + self._network_timeout
+            while time.time() < timeout_time:
+                time.sleep(0.05)    # prevent 100% CPU load
+                # when expiration time is updated, it means that there is a new
+                # value
+                if self._modemstate_expires > time.time():
+                    if self.logger:
+                        self.logger.warning('poll for modem state failed')
+                    break
+            # even when there is a timeout, do not generate an error just
+            # return the last known value. this way we can support buggy
+            # servers that do not respond to polls, but send automatic
+            # updates.
+        if self._modemstate is not None:
+            if self.logger:
+                self.logger.debug('using cached modem state')
+            return self._modemstate
+        else:
+            # never received a notification from the server
+            raise SerialException("remote sends no NOTIFY_MODEMSTATE")
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(RFC2217Serial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(RFC2217Serial, io.RawIOBase):
+        pass
+
+
+#############################################################################
+# The following is code that helps implementing an RFC 2217 server.
+
+class PortManager(object):
+    """\
+    This class manages the state of Telnet and RFC 2217. It needs a serial
+    instance and a connection to work with. Connection is expected to implement
+    a (thread safe) write function, that writes the string to the network.
+    """
+
+    def __init__(self, serial_port, connection, logger=None):
+        self.serial = serial_port
+        self.connection = connection
+        self.logger = logger
+        self._client_is_rfc2217 = False
+
+        # filter state machine
+        self.mode = M_NORMAL
+        self.suboption = None
+        self.telnet_command = None
+
+        # states for modem/line control events
+        self.modemstate_mask = 255
+        self.last_modemstate = None
+        self.linstate_mask = 0
+
+        # all supported telnet options
+        self._telnet_options = [
+            TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
+            TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
+            TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
+            TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
+            TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
+            TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
+            TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
+            ]
+
+        # negotiate Telnet/RFC2217 -> send initial requests
+        if self.logger:
+            self.logger.debug("requesting initial Telnet/RFC 2217 options")
+        for option in self._telnet_options:
+            if option.state is REQUESTED:
+                self.telnetSendOption(option.send_yes, option.option)
+        # issue 1st modem state notification
+
+    def _client_ok(self):
+        """\
+        callback of telnet option. it gets called when option is activated.
+        this one here is used to detect when the client agrees on RFC 2217. a
+        flag is set so that other functions like check_modem_lines know if the
+        client is ok.
+        """
+        # The callback is used for we and they so if one party agrees, we're
+        # already happy. it seems not all servers do the negotiation correctly
+        # and i guess there are incorrect clients too.. so be happy if client
+        # answers one or the other positively.
+        self._client_is_rfc2217 = True
+        if self.logger:
+            self.logger.info("client accepts RFC 2217")
+        # this is to ensure that the client gets a notification, even if there
+        # was no change
+        self.check_modem_lines(force_notification=True)
+
+    # - outgoing telnet commands and options
+
+    def telnetSendOption(self, action, option):
+        """Send DO, DONT, WILL, WONT."""
+        self.connection.write(to_bytes([IAC, action, option]))
+
+    def rfc2217SendSubnegotiation(self, option, value=''):
+        """Subnegotiation of RFC 2217 parameters."""
+        value = value.replace(IAC, IAC_DOUBLED)
+        self.connection.write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
+
+    # - check modem lines, needs to be called periodically from user to
+    # establish polling
+
+    def check_modem_lines(self, force_notification=False):
+        modemstate = (
+            (self.serial.getCTS() and MODEMSTATE_MASK_CTS) |
+            (self.serial.getDSR() and MODEMSTATE_MASK_DSR) |
+            (self.serial.getRI() and MODEMSTATE_MASK_RI) |
+            (self.serial.getCD() and MODEMSTATE_MASK_CD)
+        )
+        # check what has changed
+        deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
+        if deltas & MODEMSTATE_MASK_CTS:
+            modemstate |= MODEMSTATE_MASK_CTS_CHANGE
+        if deltas & MODEMSTATE_MASK_DSR:
+            modemstate |= MODEMSTATE_MASK_DSR_CHANGE
+        if deltas & MODEMSTATE_MASK_RI:
+            modemstate |= MODEMSTATE_MASK_RI_CHANGE
+        if deltas & MODEMSTATE_MASK_CD:
+            modemstate |= MODEMSTATE_MASK_CD_CHANGE
+        # if new state is different and the mask allows this change, send
+        # notification. suppress notifications when client is not rfc2217
+        if modemstate != self.last_modemstate or force_notification:
+            if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
+                self.rfc2217SendSubnegotiation(
+                    SERVER_NOTIFY_MODEMSTATE,
+                    to_bytes([modemstate & self.modemstate_mask])
+                    )
+                if self.logger:
+                    self.logger.info("NOTIFY_MODEMSTATE: %s" % (modemstate,))
+            # save last state, but forget about deltas.
+            # otherwise it would also notify about changing deltas which is
+            # probably not very useful
+            self.last_modemstate = modemstate & 0xf0
+
+    # - outgoing data escaping
+
+    def escape(self, data):
+        """\
+        this generator function is for the user. all outgoing data has to be
+        properly escaped, so that no IAC character in the data stream messes up
+        the Telnet state machine in the server.
+
+        socket.sendall(escape(data))
+        """
+        for byte in data:
+            if byte == IAC:
+                yield IAC
+                yield IAC
+            else:
+                yield byte
+
+    # - incoming data filter
+
+    def filter(self, data):
+        """\
+        handle a bunch of incoming bytes. this is a generator. it will yield
+        all characters not of interest for Telnet/RFC 2217.
+
+        The idea is that the reader thread pushes data from the socket through
+        this filter:
+
+        for byte in filter(socket.recv(1024)):
+            # do things like CR/LF conversion/whatever
+            # and write data to the serial port
+            serial.write(byte)
+
+        (socket error handling code left as exercise for the reader)
+        """
+        for byte in data:
+            if self.mode == M_NORMAL:
+                # interpret as command or as data
+                if byte == IAC:
+                    self.mode = M_IAC_SEEN
+                else:
+                    # store data in sub option buffer or pass it to our
+                    # consumer depending on state
+                    if self.suboption is not None:
+                        self.suboption.append(byte)
+                    else:
+                        yield byte
+            elif self.mode == M_IAC_SEEN:
+                if byte == IAC:
+                    # interpret as command doubled -> insert character
+                    # itself
+                    if self.suboption is not None:
+                        self.suboption.append(byte)
+                    else:
+                        yield byte
+                    self.mode = M_NORMAL
+                elif byte == SB:
+                    # sub option start
+                    self.suboption = bytearray()
+                    self.mode = M_NORMAL
+                elif byte == SE:
+                    # sub option end -> process it now
+                    self._telnetProcessSubnegotiation(bytes(self.suboption))
+                    self.suboption = None
+                    self.mode = M_NORMAL
+                elif byte in (DO, DONT, WILL, WONT):
+                    # negotiation
+                    self.telnet_command = byte
+                    self.mode = M_NEGOTIATE
+                else:
+                    # other telnet commands
+                    self._telnetProcessCommand(byte)
+                    self.mode = M_NORMAL
+            elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
+                self._telnetNegotiateOption(self.telnet_command, byte)
+                self.mode = M_NORMAL
+
+    # - incoming telnet commands and options
+
+    def _telnetProcessCommand(self, command):
+        """Process commands other than DO, DONT, WILL, WONT."""
+        # Currently none. RFC2217 only uses negotiation and subnegotiation.
+        if self.logger:
+            self.logger.warning("ignoring Telnet command: %r" % (command,))
+
+    def _telnetNegotiateOption(self, command, option):
+        """Process incoming DO, DONT, WILL, WONT."""
+        # check our registered telnet options and forward command to them
+        # they know themselves if they have to answer or not
+        known = False
+        for item in self._telnet_options:
+            # can have more than one match! as some options are duplicated for
+            # 'us' and 'them'
+            if item.option == option:
+                item.process_incoming(command)
+                known = True
+        if not known:
+            # handle unknown options
+            # only answer to positive requests and deny them
+            if command == WILL or command == DO:
+                self.telnetSendOption((command == WILL and DONT or WONT), option)
+                if self.logger:
+                    self.logger.warning("rejected Telnet option: %r" % (option,))
+
+
+    def _telnetProcessSubnegotiation(self, suboption):
+        """Process subnegotiation, the data between IAC SB and IAC SE."""
+        if suboption[0:1] == COM_PORT_OPTION:
+            if self.logger:
+                self.logger.debug('received COM_PORT_OPTION: %r' % (suboption,))
+            if suboption[1:2] == SET_BAUDRATE:
+                backup = self.serial.baudrate
+                try:
+                    (baudrate,) = struct.unpack("!I", suboption[2:6])
+                    if baudrate != 0:
+                        self.serial.baudrate = baudrate
+                except ValueError, e:
+                    if self.logger:
+                        self.logger.error("failed to set baud rate: %s" % (e,))
+                    self.serial.baudrate = backup
+                else:
+                    if self.logger:
+                        self.logger.info("%s baud rate: %s" % (baudrate and 'set' or 'get', self.serial.baudrate))
+                self.rfc2217SendSubnegotiation(SERVER_SET_BAUDRATE, struct.pack("!I", self.serial.baudrate))
+            elif suboption[1:2] == SET_DATASIZE:
+                backup = self.serial.bytesize
+                try:
+                    (datasize,) = struct.unpack("!B", suboption[2:3])
+                    if datasize != 0:
+                        self.serial.bytesize = datasize
+                except ValueError, e:
+                    if self.logger:
+                        self.logger.error("failed to set data size: %s" % (e,))
+                    self.serial.bytesize = backup
+                else:
+                    if self.logger:
+                        self.logger.info("%s data size: %s" % (datasize and 'set' or 'get', self.serial.bytesize))
+                self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize))
+            elif suboption[1:2] == SET_PARITY:
+                backup = self.serial.parity
+                try:
+                    parity = struct.unpack("!B", suboption[2:3])[0]
+                    if parity != 0:
+                            self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
+                except ValueError, e:
+                    if self.logger:
+                        self.logger.error("failed to set parity: %s" % (e,))
+                    self.serial.parity = backup
+                else:
+                    if self.logger:
+                        self.logger.info("%s parity: %s" % (parity and 'set' or 'get', self.serial.parity))
+                self.rfc2217SendSubnegotiation(
+                    SERVER_SET_PARITY,
+                    struct.pack("!B", RFC2217_PARITY_MAP[self.serial.parity])
+                    )
+            elif suboption[1:2] == SET_STOPSIZE:
+                backup = self.serial.stopbits
+                try:
+                    stopbits = struct.unpack("!B", suboption[2:3])[0]
+                    if stopbits != 0:
+                        self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
+                except ValueError, e:
+                    if self.logger:
+                        self.logger.error("failed to set stop bits: %s" % (e,))
+                    self.serial.stopbits = backup
+                else:
+                    if self.logger:
+                        self.logger.info("%s stop bits: %s" % (stopbits and 'set' or 'get', self.serial.stopbits))
+                self.rfc2217SendSubnegotiation(
+                    SERVER_SET_STOPSIZE,
+                    struct.pack("!B", RFC2217_STOPBIT_MAP[self.serial.stopbits])
+                    )
+            elif suboption[1:2] == SET_CONTROL:
+                if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
+                    if self.serial.xonxoff:
+                        self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
+                    elif self.serial.rtscts:
+                        self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
+                    else:
+                        self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
+                elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
+                    self.serial.xonxoff = False
+                    self.serial.rtscts = False
+                    if self.logger:
+                        self.logger.info("changed flow control to None")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
+                elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
+                    self.serial.xonxoff = True
+                    if self.logger:
+                        self.logger.info("changed flow control to XON/XOFF")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
+                elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
+                    self.serial.rtscts = True
+                    if self.logger:
+                        self.logger.info("changed flow control to RTS/CTS")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
+                elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
+                    if self.logger:
+                        self.logger.warning("requested break state - not implemented")
+                    pass # XXX needs cached value
+                elif suboption[2:3] == SET_CONTROL_BREAK_ON:
+                    self.serial.setBreak(True)
+                    if self.logger:
+                        self.logger.info("changed BREAK to active")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
+                elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
+                    self.serial.setBreak(False)
+                    if self.logger:
+                        self.logger.info("changed BREAK to inactive")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
+                elif suboption[2:3] == SET_CONTROL_REQ_DTR:
+                    if self.logger:
+                        self.logger.warning("requested DTR state - not implemented")
+                    pass # XXX needs cached value
+                elif suboption[2:3] == SET_CONTROL_DTR_ON:
+                    self.serial.setDTR(True)
+                    if self.logger:
+                        self.logger.info("changed DTR to active")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
+                elif suboption[2:3] == SET_CONTROL_DTR_OFF:
+                    self.serial.setDTR(False)
+                    if self.logger:
+                        self.logger.info("changed DTR to inactive")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
+                elif suboption[2:3] == SET_CONTROL_REQ_RTS:
+                    if self.logger:
+                        self.logger.warning("requested RTS state - not implemented")
+                    pass # XXX needs cached value
+                    #~ self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
+                elif suboption[2:3] == SET_CONTROL_RTS_ON:
+                    self.serial.setRTS(True)
+                    if self.logger:
+                        self.logger.info("changed RTS to active")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
+                elif suboption[2:3] == SET_CONTROL_RTS_OFF:
+                    self.serial.setRTS(False)
+                    if self.logger:
+                        self.logger.info("changed RTS to inactive")
+                    self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
+                #~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
+                #~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
+            elif suboption[1:2] == NOTIFY_LINESTATE:
+                # client polls for current state
+                self.rfc2217SendSubnegotiation(
+                    SERVER_NOTIFY_LINESTATE,
+                    to_bytes([0])   # sorry, nothing like that implemented
+                    )
+            elif suboption[1:2] == NOTIFY_MODEMSTATE:
+                if self.logger:
+                    self.logger.info("request for modem state")
+                # client polls for current state
+                self.check_modem_lines(force_notification=True)
+            elif suboption[1:2] == FLOWCONTROL_SUSPEND:
+                if self.logger:
+                    self.logger.info("suspend")
+                self._remote_suspend_flow = True
+            elif suboption[1:2] == FLOWCONTROL_RESUME:
+                if self.logger:
+                    self.logger.info("resume")
+                self._remote_suspend_flow = False
+            elif suboption[1:2] == SET_LINESTATE_MASK:
+                self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
+                if self.logger:
+                    self.logger.info("line state mask: 0x%02x" % (self.linstate_mask,))
+            elif suboption[1:2] == SET_MODEMSTATE_MASK:
+                self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
+                if self.logger:
+                    self.logger.info("modem state mask: 0x%02x" % (self.modemstate_mask,))
+            elif suboption[1:2] == PURGE_DATA:
+                if suboption[2:3] == PURGE_RECEIVE_BUFFER:
+                    self.serial.flushInput()
+                    if self.logger:
+                        self.logger.info("purge in")
+                    self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
+                elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
+                    self.serial.flushOutput()
+                    if self.logger:
+                        self.logger.info("purge out")
+                    self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
+                elif suboption[2:3] == PURGE_BOTH_BUFFERS:
+                    self.serial.flushInput()
+                    self.serial.flushOutput()
+                    if self.logger:
+                        self.logger.info("purge both")
+                    self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
+                else:
+                    if self.logger:
+                        self.logger.error("undefined PURGE_DATA: %r" % list(suboption[2:]))
+            else:
+                if self.logger:
+                    self.logger.error("undefined COM_PORT_OPTION: %r" % list(suboption[1:]))
+        else:
+            if self.logger:
+                self.logger.warning("unknown subnegotiation: %r" % (suboption,))
+
+
+# simple client test
+if __name__ == '__main__':
+    import sys
+    s = Serial('rfc2217://localhost:7000', 115200)
+    sys.stdout.write('%s\n' % s)
+
+    #~ s.baudrate = 1898
+
+    sys.stdout.write("write...\n")
+    s.write("hello\n")
+    s.flush()
+    sys.stdout.write("read: %s\n" % s.read(5))
+
+    #~ s.baudrate = 19200
+    #~ s.databits = 7
+    s.close()
diff --git a/catapult/telemetry/third_party/pyserial/serial/serialcli.py b/catapult/telemetry/third_party/pyserial/serial/serialcli.py
new file mode 100644
index 0000000..19169a3
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/serialcli.py
@@ -0,0 +1,273 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython and .NET/Mono
+# serial driver for .NET/Mono (IronPython), .NET >= 2
+# see __init__.py
+#
+# (C) 2008 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+import clr
+import System
+import System.IO.Ports
+from serial.serialutil import *
+
+
+def device(portnum):
+    """Turn a port number into a device name"""
+    return System.IO.Ports.SerialPort.GetPortNames()[portnum]
+
+
+# must invoke function with byte array, make a helper to convert strings
+# to byte arrays
+sab = System.Array[System.Byte]
+def as_byte_array(string):
+    return sab([ord(x) for x in string])  # XXX will require adaption when run with a 3.x compatible IronPython
+
+class IronSerial(SerialBase):
+    """Serial port implementation for .NET/Mono."""
+
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                9600, 19200, 38400, 57600, 115200)
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        try:
+            self._port_handle = System.IO.Ports.SerialPort(self.portstr)
+        except Exception, msg:
+            self._port_handle = None
+            raise SerialException("could not open port %s: %s" % (self.portstr, msg))
+
+        self._reconfigurePort()
+        self._port_handle.Open()
+        self._isOpen = True
+        if not self._rtscts:
+            self.setRTS(True)
+            self.setDTR(True)
+        self.flushInput()
+        self.flushOutput()
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port."""
+        if not self._port_handle:
+            raise SerialException("Can only operate on a valid port handle")
+
+        #~ self._port_handle.ReceivedBytesThreshold = 1
+
+        if self._timeout is None:
+            self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
+        else:
+            self._port_handle.ReadTimeout = int(self._timeout*1000)
+
+        # if self._timeout != 0 and self._interCharTimeout is not None:
+            # timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
+
+        if self._writeTimeout is None:
+            self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
+        else:
+            self._port_handle.WriteTimeout = int(self._writeTimeout*1000)
+
+
+        # Setup the connection info.
+        try:
+            self._port_handle.BaudRate = self._baudrate
+        except IOError, e:
+            # catch errors from illegal baudrate settings
+            raise ValueError(str(e))
+
+        if self._bytesize == FIVEBITS:
+            self._port_handle.DataBits     = 5
+        elif self._bytesize == SIXBITS:
+            self._port_handle.DataBits     = 6
+        elif self._bytesize == SEVENBITS:
+            self._port_handle.DataBits     = 7
+        elif self._bytesize == EIGHTBITS:
+            self._port_handle.DataBits     = 8
+        else:
+            raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
+
+        if self._parity == PARITY_NONE:
+            self._port_handle.Parity       = getattr(System.IO.Ports.Parity, 'None') # reserved keyword in Py3k
+        elif self._parity == PARITY_EVEN:
+            self._port_handle.Parity       = System.IO.Ports.Parity.Even
+        elif self._parity == PARITY_ODD:
+            self._port_handle.Parity       = System.IO.Ports.Parity.Odd
+        elif self._parity == PARITY_MARK:
+            self._port_handle.Parity       = System.IO.Ports.Parity.Mark
+        elif self._parity == PARITY_SPACE:
+            self._port_handle.Parity       = System.IO.Ports.Parity.Space
+        else:
+            raise ValueError("Unsupported parity mode: %r" % self._parity)
+
+        if self._stopbits == STOPBITS_ONE:
+            self._port_handle.StopBits     = System.IO.Ports.StopBits.One
+        elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
+            self._port_handle.StopBits     = System.IO.Ports.StopBits.OnePointFive
+        elif self._stopbits == STOPBITS_TWO:
+            self._port_handle.StopBits     = System.IO.Ports.StopBits.Two
+        else:
+            raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
+
+        if self._rtscts and self._xonxoff:
+            self._port_handle.Handshake  = System.IO.Ports.Handshake.RequestToSendXOnXOff
+        elif self._rtscts:
+            self._port_handle.Handshake  = System.IO.Ports.Handshake.RequestToSend
+        elif self._xonxoff:
+            self._port_handle.Handshake  = System.IO.Ports.Handshake.XOnXOff
+        else:
+            self._port_handle.Handshake  = getattr(System.IO.Ports.Handshake, 'None')   # reserved keyword in Py3k
+
+    #~ def __del__(self):
+        #~ self.close()
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            if self._port_handle:
+                try:
+                    self._port_handle.Close()
+                except System.IO.Ports.InvalidOperationException:
+                    # ignore errors. can happen for unplugged USB serial devices
+                    pass
+                self._port_handle = None
+            self._isOpen = False
+
+    def makeDeviceName(self, port):
+        try:
+            return device(port)
+        except TypeError, e:
+            raise SerialException(str(e))
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        if not self._port_handle: raise portNotOpenError
+        return self._port_handle.BytesToRead
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+           return less characters as requested. With no timeout it will block
+           until the requested number of bytes is read."""
+        if not self._port_handle: raise portNotOpenError
+        # must use single byte reads as this is the only way to read
+        # without applying encodings
+        data = bytearray()
+        while size:
+            try:
+                data.append(self._port_handle.ReadByte())
+            except System.TimeoutException, e:
+                break
+            else:
+                size -= 1
+        return bytes(data)
+
+    def write(self, data):
+        """Output the given string over the serial port."""
+        if not self._port_handle: raise portNotOpenError
+        if not isinstance(data, (bytes, bytearray)):
+            raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
+        try:
+            # must call overloaded method with byte array argument
+            # as this is the only one not applying encodings
+            self._port_handle.Write(as_byte_array(data), 0, len(data))
+        except System.TimeoutException, e:
+            raise writeTimeoutError
+        return len(data)
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self._port_handle: raise portNotOpenError
+        self._port_handle.DiscardInBuffer()
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self._port_handle: raise portNotOpenError
+        self._port_handle.DiscardOutBuffer()
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given duration."""
+        if not self._port_handle: raise portNotOpenError
+        import time
+        self._port_handle.BreakState = True
+        time.sleep(duration)
+        self._port_handle.BreakState = False
+
+    def setBreak(self, level=True):
+        """Set break: Controls TXD. When active, to transmitting is possible."""
+        if not self._port_handle: raise portNotOpenError
+        self._port_handle.BreakState = bool(level)
+
+    def setRTS(self, level=True):
+        """Set terminal status line: Request To Send"""
+        if not self._port_handle: raise portNotOpenError
+        self._port_handle.RtsEnable = bool(level)
+
+    def setDTR(self, level=True):
+        """Set terminal status line: Data Terminal Ready"""
+        if not self._port_handle: raise portNotOpenError
+        self._port_handle.DtrEnable = bool(level)
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self._port_handle: raise portNotOpenError
+        return self._port_handle.CtsHolding
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self._port_handle: raise portNotOpenError
+        return self._port_handle.DsrHolding
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self._port_handle: raise portNotOpenError
+        #~ return self._port_handle.XXX
+        return False #XXX an error would be better
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self._port_handle: raise portNotOpenError
+        return self._port_handle.CDHolding
+
+    # - - platform specific - - - -
+    # none
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(IronSerial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(IronSerial, io.RawIOBase):
+        pass
+
+
+# Nur Testfunktion!!
+if __name__ == '__main__':
+    import sys
+
+    s = Serial(0)
+    sys.stdio.write('%s\n' % s)
+
+    s = Serial()
+    sys.stdio.write('%s\n' % s)
+
+
+    s.baudrate = 19200
+    s.databits = 7
+    s.close()
+    s.port = 0
+    s.open()
+    sys.stdio.write('%s\n' % s)
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/serialjava.py b/catapult/telemetry/third_party/pyserial/serial/serialjava.py
new file mode 100644
index 0000000..46a78f8
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/serialjava.py
@@ -0,0 +1,262 @@
+#!jython
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# module for serial IO for Jython and JavaComm
+# see __init__.py
+#
+# (C) 2002-2008 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+from serial.serialutil import *
+
+def my_import(name):
+    mod = __import__(name)
+    components = name.split('.')
+    for comp in components[1:]:
+        mod = getattr(mod, comp)
+    return mod
+
+
+def detect_java_comm(names):
+    """try given list of modules and return that imports"""
+    for name in names:
+        try:
+            mod = my_import(name)
+            mod.SerialPort
+            return mod
+        except (ImportError, AttributeError):
+            pass
+    raise ImportError("No Java Communications API implementation found")
+
+
+# Java Communications API implementations
+# http://mho.republika.pl/java/comm/
+
+comm = detect_java_comm([
+    'javax.comm', # Sun/IBM
+    'gnu.io',     # RXTX
+])
+
+
+def device(portnumber):
+    """Turn a port number into a device name"""
+    enum = comm.CommPortIdentifier.getPortIdentifiers()
+    ports = []
+    while enum.hasMoreElements():
+        el = enum.nextElement()
+        if el.getPortType() == comm.CommPortIdentifier.PORT_SERIAL:
+            ports.append(el)
+    return ports[portnumber].getName()
+
+
+class JavaSerial(SerialBase):
+    """Serial port class, implemented with Java Communications API and
+       thus usable with jython and the appropriate java extension."""
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        if type(self._port) == type(''):      # strings are taken directly
+            portId = comm.CommPortIdentifier.getPortIdentifier(self._port)
+        else:
+            portId = comm.CommPortIdentifier.getPortIdentifier(device(self._port))     # numbers are transformed to a comport id obj
+        try:
+            self.sPort = portId.open("python serial module", 10)
+        except Exception, msg:
+            self.sPort = None
+            raise SerialException("Could not open port: %s" % msg)
+        self._reconfigurePort()
+        self._instream = self.sPort.getInputStream()
+        self._outstream = self.sPort.getOutputStream()
+        self._isOpen = True
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port."""
+        if not self.sPort:
+            raise SerialException("Can only operate on a valid port handle")
+
+        self.sPort.enableReceiveTimeout(30)
+        if self._bytesize == FIVEBITS:
+            jdatabits = comm.SerialPort.DATABITS_5
+        elif self._bytesize == SIXBITS:
+            jdatabits = comm.SerialPort.DATABITS_6
+        elif self._bytesize == SEVENBITS:
+            jdatabits = comm.SerialPort.DATABITS_7
+        elif self._bytesize == EIGHTBITS:
+            jdatabits = comm.SerialPort.DATABITS_8
+        else:
+            raise ValueError("unsupported bytesize: %r" % self._bytesize)
+
+        if self._stopbits == STOPBITS_ONE:
+            jstopbits = comm.SerialPort.STOPBITS_1
+        elif stopbits == STOPBITS_ONE_POINT_FIVE:
+            self._jstopbits = comm.SerialPort.STOPBITS_1_5
+        elif self._stopbits == STOPBITS_TWO:
+            jstopbits = comm.SerialPort.STOPBITS_2
+        else:
+            raise ValueError("unsupported number of stopbits: %r" % self._stopbits)
+
+        if self._parity == PARITY_NONE:
+            jparity = comm.SerialPort.PARITY_NONE
+        elif self._parity == PARITY_EVEN:
+            jparity = comm.SerialPort.PARITY_EVEN
+        elif self._parity == PARITY_ODD:
+            jparity = comm.SerialPort.PARITY_ODD
+        elif self._parity == PARITY_MARK:
+            jparity = comm.SerialPort.PARITY_MARK
+        elif self._parity == PARITY_SPACE:
+            jparity = comm.SerialPort.PARITY_SPACE
+        else:
+            raise ValueError("unsupported parity type: %r" % self._parity)
+
+        jflowin = jflowout = 0
+        if self._rtscts:
+            jflowin  |=  comm.SerialPort.FLOWCONTROL_RTSCTS_IN
+            jflowout |=  comm.SerialPort.FLOWCONTROL_RTSCTS_OUT
+        if self._xonxoff:
+            jflowin  |=  comm.SerialPort.FLOWCONTROL_XONXOFF_IN
+            jflowout |=  comm.SerialPort.FLOWCONTROL_XONXOFF_OUT
+
+        self.sPort.setSerialPortParams(self._baudrate, jdatabits, jstopbits, jparity)
+        self.sPort.setFlowControlMode(jflowin | jflowout)
+
+        if self._timeout >= 0:
+            self.sPort.enableReceiveTimeout(self._timeout*1000)
+        else:
+            self.sPort.disableReceiveTimeout()
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            if self.sPort:
+                self._instream.close()
+                self._outstream.close()
+                self.sPort.close()
+                self.sPort = None
+            self._isOpen = False
+
+    def makeDeviceName(self, port):
+        return device(port)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        if not self.sPort: raise portNotOpenError
+        return self._instream.available()
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+           return less characters as requested. With no timeout it will block
+           until the requested number of bytes is read."""
+        if not self.sPort: raise portNotOpenError
+        read = bytearray()
+        if size > 0:
+            while len(read) < size:
+                x = self._instream.read()
+                if x == -1:
+                    if self.timeout >= 0:
+                        break
+                else:
+                    read.append(x)
+        return bytes(read)
+
+    def write(self, data):
+        """Output the given string over the serial port."""
+        if not self.sPort: raise portNotOpenError
+        if not isinstance(data, (bytes, bytearray)):
+            raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
+        self._outstream.write(data)
+        return len(data)
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self.sPort: raise portNotOpenError
+        self._instream.skip(self._instream.available())
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self.sPort: raise portNotOpenError
+        self._outstream.flush()
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given duration."""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.sendBreak(duration*1000.0)
+
+    def setBreak(self, level=1):
+        """Set break: Controls TXD. When active, to transmitting is possible."""
+        if self.fd is None: raise portNotOpenError
+        raise SerialException("The setBreak function is not implemented in java.")
+
+    def setRTS(self, level=1):
+        """Set terminal status line: Request To Send"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.setRTS(level)
+
+    def setDTR(self, level=1):
+        """Set terminal status line: Data Terminal Ready"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.setDTR(level)
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.isCTS()
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.isDSR()
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.isRI()
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self.sPort: raise portNotOpenError
+        self.sPort.isCD()
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(JavaSerial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(JavaSerial, io.RawIOBase):
+        pass
+
+
+if __name__ == '__main__':
+    s = Serial(0,
+         baudrate=19200,        # baudrate
+         bytesize=EIGHTBITS,    # number of databits
+         parity=PARITY_EVEN,    # enable parity checking
+         stopbits=STOPBITS_ONE, # number of stopbits
+         timeout=3,             # set a timeout value, None for waiting forever
+         xonxoff=0,             # enable software flow control
+         rtscts=0,              # enable RTS/CTS flow control
+    )
+    s.setRTS(1)
+    s.setDTR(1)
+    s.flushInput()
+    s.flushOutput()
+    s.write('hello')
+    sys.stdio.write('%r\n' % s.read(5))
+    sys.stdio.write('%s\n' % s.inWaiting())
+    del s
+
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/serialposix.py b/catapult/telemetry/third_party/pyserial/serial/serialposix.py
new file mode 100755
index 0000000..b9b4b28
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/serialposix.py
@@ -0,0 +1,703 @@
+#!/usr/bin/env python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# module for serial IO for POSIX compatible systems, like Linux
+# see __init__.py
+#
+# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+#
+# parts based on code from Grant B. Edwards  <grante@visi.com>:
+#  ftp://ftp.visi.com/users/grante/python/PosixSerial.py
+#
+# references: http://www.easysw.com/~mike/serial/serial.html
+
+import sys, os, fcntl, termios, struct, select, errno, time
+from serial.serialutil import *
+
+# Do check the Python version as some constants have moved.
+if (sys.hexversion < 0x020100f0):
+    import TERMIOS
+else:
+    TERMIOS = termios
+
+if (sys.hexversion < 0x020200f0):
+    import FCNTL
+else:
+    FCNTL = fcntl
+
+# try to detect the OS so that a device can be selected...
+# this code block should supply a device() and set_special_baudrate() function
+# for the platform
+plat = sys.platform.lower()
+
+if   plat[:5] == 'linux':    # Linux (confirmed)
+
+    def device(port):
+        return '/dev/ttyS%d' % port
+
+    TCGETS2 = 0x802C542A
+    TCSETS2 = 0x402C542B
+    BOTHER = 0o010000
+
+    def set_special_baudrate(port, baudrate):
+        # right size is 44 on x86_64, allow for some growth
+        import array
+        buf = array.array('i', [0] * 64)
+
+        try:
+            # get serial_struct
+            FCNTL.ioctl(port.fd, TCGETS2, buf)
+            # set custom speed
+            buf[2] &= ~TERMIOS.CBAUD
+            buf[2] |= BOTHER
+            buf[9] = buf[10] = baudrate
+
+            # set serial_struct
+            res = FCNTL.ioctl(port.fd, TCSETS2, buf)
+        except IOError, e:
+            raise ValueError('Failed to set custom baud rate (%s): %s' % (baudrate, e))
+
+    baudrate_constants = {
+        0:       0000000,  # hang up
+        50:      0000001,
+        75:      0000002,
+        110:     0000003,
+        134:     0000004,
+        150:     0000005,
+        200:     0000006,
+        300:     0000007,
+        600:     0000010,
+        1200:    0000011,
+        1800:    0000012,
+        2400:    0000013,
+        4800:    0000014,
+        9600:    0000015,
+        19200:   0000016,
+        38400:   0000017,
+        57600:   0010001,
+        115200:  0010002,
+        230400:  0010003,
+        460800:  0010004,
+        500000:  0010005,
+        576000:  0010006,
+        921600:  0010007,
+        1000000: 0010010,
+        1152000: 0010011,
+        1500000: 0010012,
+        2000000: 0010013,
+        2500000: 0010014,
+        3000000: 0010015,
+        3500000: 0010016,
+        4000000: 0010017
+    }
+
+elif plat == 'cygwin':       # cygwin/win32 (confirmed)
+
+    def device(port):
+        return '/dev/com%d' % (port + 1)
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {
+        128000: 0x01003,
+        256000: 0x01005,
+        500000: 0x01007,
+        576000: 0x01008,
+        921600: 0x01009,
+        1000000: 0x0100a,
+        1152000: 0x0100b,
+        1500000: 0x0100c,
+        2000000: 0x0100d,
+        2500000: 0x0100e,
+        3000000: 0x0100f
+    }
+
+elif plat[:7] == 'openbsd':    # OpenBSD
+
+    def device(port):
+        return '/dev/cua%02d' % port
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:3] == 'bsd' or  \
+    plat[:7] == 'freebsd':
+
+    def device(port):
+        return '/dev/cuad%d' % port
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:6] == 'darwin':   # OS X
+
+    version = os.uname()[2].split('.')
+    # Tiger or above can support arbitrary serial speeds
+    if int(version[0]) >= 8:
+        def set_special_baudrate(port, baudrate):
+            # use IOKit-specific call to set up high speeds
+            import array, fcntl
+            buf = array.array('i', [baudrate])
+            IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t)
+            fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1)
+    else: # version < 8
+        def set_special_baudrate(port, baudrate):
+            raise ValueError("baud rate not supported")
+
+    def device(port):
+        return '/dev/cuad%d' % port
+
+    baudrate_constants = {}
+
+
+elif plat[:6] == 'netbsd':   # NetBSD 1.6 testing by Erk
+
+    def device(port):
+        return '/dev/dty%02d' % port
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:4] == 'irix':     # IRIX (partially tested)
+
+    def device(port):
+        return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:2] == 'hp':       # HP-UX (not tested)
+
+    def device(port):
+        return '/dev/tty%dp0' % (port+1)
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:5] == 'sunos':    # Solaris/SunOS (confirmed)
+
+    def device(port):
+        return '/dev/tty%c' % (ord('a')+port)
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+elif plat[:3] == 'aix':      # AIX
+
+    def device(port):
+        return '/dev/tty%d' % (port)
+
+    def set_special_baudrate(port, baudrate):
+        raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
+
+    baudrate_constants = {}
+
+else:
+    # platform detection has failed...
+    sys.stderr.write("""\
+don't know how to number ttys on this system.
+! Use an explicit path (eg /dev/ttyS1) or send this information to
+! the author of this module:
+
+sys.platform = %r
+os.name = %r
+serialposix.py version = %s
+
+also add the device name of the serial port and where the
+counting starts for the first serial port.
+e.g. 'first serial port: /dev/ttyS0'
+and with a bit luck you can get this module running...
+""" % (sys.platform, os.name, VERSION))
+    # no exception, just continue with a brave attempt to build a device name
+    # even if the device name is not correct for the platform it has chances
+    # to work using a string with the real device name as port parameter.
+    def device(portum):
+        return '/dev/ttyS%d' % portnum
+    def set_special_baudrate(port, baudrate):
+        raise SerialException("sorry don't know how to handle non standard baud rate on this platform")
+    baudrate_constants = {}
+    #~ raise Exception, "this module does not run on this platform, sorry."
+
+# whats up with "aix", "beos", ....
+# they should work, just need to know the device names.
+
+
+# load some constants for later use.
+# try to use values from TERMIOS, use defaults from linux otherwise
+TIOCMGET  = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
+TIOCMBIS  = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
+TIOCMBIC  = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
+TIOCMSET  = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
+
+#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
+TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
+TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
+#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
+#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
+
+TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
+TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
+TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
+TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
+TIOCM_CD  = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
+TIOCM_RI  = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
+#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
+#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
+if hasattr(TERMIOS, 'TIOCINQ'):
+    TIOCINQ = TERMIOS.TIOCINQ
+else:
+    TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
+TIOCOUTQ   = hasattr(TERMIOS, 'TIOCOUTQ') and TERMIOS.TIOCOUTQ or 0x5411
+
+TIOCM_zero_str = struct.pack('I', 0)
+TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
+TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
+
+TIOCSBRK  = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
+TIOCCBRK  = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
+
+
+class PosixSerial(SerialBase):
+    """Serial port class POSIX implementation. Serial port configuration is 
+    done with termios and fcntl. Runs on Linux and many other Un*x like
+    systems."""
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        self.fd = None
+        # open
+        try:
+            self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK)
+        except IOError, msg:
+            self.fd = None
+            raise SerialException(msg.errno, "could not open port %s: %s" % (self._port, msg))
+        #~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0)  # set blocking
+
+        try:
+            self._reconfigurePort()
+        except:
+            try:
+                os.close(self.fd)
+            except:
+                # ignore any exception when closing the port
+                # also to keep original exception that happened when setting up
+                pass
+            self.fd = None
+            raise
+        else:
+            self._isOpen = True
+        self.flushInput()
+
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port."""
+        if self.fd is None:
+            raise SerialException("Can only operate on a valid file descriptor")
+        custom_baud = None
+
+        vmin = vtime = 0                # timeout is done via select
+        if self._interCharTimeout is not None:
+            vmin = 1
+            vtime = int(self._interCharTimeout * 10)
+        try:
+            orig_attr = termios.tcgetattr(self.fd)
+            iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
+        except termios.error, msg:      # if a port is nonexistent but has a /dev file, it'll fail here
+            raise SerialException("Could not configure port: %s" % msg)
+        # set up raw mode / no echo / binary
+        cflag |=  (TERMIOS.CLOCAL|TERMIOS.CREAD)
+        lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL|
+                     TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT
+        for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
+            if hasattr(TERMIOS, flag):
+                lflag &= ~getattr(TERMIOS, flag)
+
+        oflag &= ~(TERMIOS.OPOST)
+        iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK)
+        if hasattr(TERMIOS, 'IUCLC'):
+            iflag &= ~TERMIOS.IUCLC
+        if hasattr(TERMIOS, 'PARMRK'):
+            iflag &= ~TERMIOS.PARMRK
+
+        # setup baud rate
+        try:
+            ispeed = ospeed = getattr(TERMIOS, 'B%s' % (self._baudrate))
+        except AttributeError:
+            try:
+                ispeed = ospeed = baudrate_constants[self._baudrate]
+            except KeyError:
+                #~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
+                # may need custom baud rate, it isn't in our list.
+                ispeed = ospeed = getattr(TERMIOS, 'B38400')
+                try:
+                    custom_baud = int(self._baudrate) # store for later
+                except ValueError:
+                    raise ValueError('Invalid baud rate: %r' % self._baudrate)
+                else:
+                    if custom_baud < 0:
+                        raise ValueError('Invalid baud rate: %r' % self._baudrate)
+
+        # setup char len
+        cflag &= ~TERMIOS.CSIZE
+        if self._bytesize == 8:
+            cflag |= TERMIOS.CS8
+        elif self._bytesize == 7:
+            cflag |= TERMIOS.CS7
+        elif self._bytesize == 6:
+            cflag |= TERMIOS.CS6
+        elif self._bytesize == 5:
+            cflag |= TERMIOS.CS5
+        else:
+            raise ValueError('Invalid char len: %r' % self._bytesize)
+        # setup stopbits
+        if self._stopbits == STOPBITS_ONE:
+            cflag &= ~(TERMIOS.CSTOPB)
+        elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
+            cflag |=  (TERMIOS.CSTOPB)  # XXX same as TWO.. there is no POSIX support for 1.5
+        elif self._stopbits == STOPBITS_TWO:
+            cflag |=  (TERMIOS.CSTOPB)
+        else:
+            raise ValueError('Invalid stop bit specification: %r' % self._stopbits)
+        # setup parity
+        iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP)
+        if self._parity == PARITY_NONE:
+            cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD)
+        elif self._parity == PARITY_EVEN:
+            cflag &= ~(TERMIOS.PARODD)
+            cflag |=  (TERMIOS.PARENB)
+        elif self._parity == PARITY_ODD:
+            cflag |=  (TERMIOS.PARENB|TERMIOS.PARODD)
+        else:
+            raise ValueError('Invalid parity: %r' % self._parity)
+        # setup flow control
+        # xonxoff
+        if hasattr(TERMIOS, 'IXANY'):
+            if self._xonxoff:
+                iflag |=  (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY)
+            else:
+                iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY)
+        else:
+            if self._xonxoff:
+                iflag |=  (TERMIOS.IXON|TERMIOS.IXOFF)
+            else:
+                iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF)
+        # rtscts
+        if hasattr(TERMIOS, 'CRTSCTS'):
+            if self._rtscts:
+                cflag |=  (TERMIOS.CRTSCTS)
+            else:
+                cflag &= ~(TERMIOS.CRTSCTS)
+        elif hasattr(TERMIOS, 'CNEW_RTSCTS'):   # try it with alternate constant name
+            if self._rtscts:
+                cflag |=  (TERMIOS.CNEW_RTSCTS)
+            else:
+                cflag &= ~(TERMIOS.CNEW_RTSCTS)
+        # XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
+
+        # buffer
+        # vmin "minimal number of characters to be read. = for non blocking"
+        if vmin < 0 or vmin > 255:
+            raise ValueError('Invalid vmin: %r ' % vmin)
+        cc[TERMIOS.VMIN] = vmin
+        # vtime
+        if vtime < 0 or vtime > 255:
+            raise ValueError('Invalid vtime: %r' % vtime)
+        cc[TERMIOS.VTIME] = vtime
+        # activate settings
+        if [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
+            termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
+
+        # apply custom baud rate, if any
+        if custom_baud is not None:
+            set_special_baudrate(self, custom_baud)
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            if self.fd is not None:
+                os.close(self.fd)
+                self.fd = None
+            self._isOpen = False
+
+    def makeDeviceName(self, port):
+        return device(port)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
+        s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
+        return struct.unpack('I',s)[0]
+
+    # select based implementation, proved to work on many systems
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+           return less characters as requested. With no timeout it will block
+           until the requested number of bytes is read."""
+        if not self._isOpen: raise portNotOpenError
+        read = bytearray()
+        while len(read) < size:
+            try:
+                ready,_,_ = select.select([self.fd],[],[], self._timeout)
+                # If select was used with a timeout, and the timeout occurs, it
+                # returns with empty lists -> thus abort read operation.
+                # For timeout == 0 (non-blocking operation) also abort when there
+                # is nothing to read.
+                if not ready:
+                    break   # timeout
+                buf = os.read(self.fd, size-len(read))
+                # read should always return some data as select reported it was
+                # ready to read when we get to this point.
+                if not buf:
+                    # Disconnected devices, at least on Linux, show the
+                    # behavior that they are always ready to read immediately
+                    # but reading returns nothing.
+                    raise SerialException('device reports readiness to read but returned no data (device disconnected or multiple access on port?)')
+                read.extend(buf)
+            except select.error, e:
+                # ignore EAGAIN errors. all other errors are shown
+                # see also http://www.python.org/dev/peps/pep-3151/#select
+                if e[0] != errno.EAGAIN:
+                    raise SerialException('read failed: %s' % (e,))
+            except OSError, e:
+                # ignore EAGAIN errors. all other errors are shown
+                if e.errno != errno.EAGAIN:
+                    raise SerialException('read failed: %s' % (e,))
+        return bytes(read)
+
+    def write(self, data):
+        """Output the given string over the serial port."""
+        if not self._isOpen: raise portNotOpenError
+        d = to_bytes(data)
+        tx_len = len(d)
+        if self._writeTimeout is not None and self._writeTimeout > 0:
+            timeout = time.time() + self._writeTimeout
+        else:
+            timeout = None
+        while tx_len > 0:
+            try:
+                n = os.write(self.fd, d)
+                if timeout:
+                    # when timeout is set, use select to wait for being ready
+                    # with the time left as timeout
+                    timeleft = timeout - time.time()
+                    if timeleft < 0:
+                        raise writeTimeoutError
+                    _, ready, _ = select.select([], [self.fd], [], timeleft)
+                    if not ready:
+                        raise writeTimeoutError
+                else:
+                    # wait for write operation
+                    _, ready, _ = select.select([], [self.fd], [], None)
+                    if not ready:
+                        raise SerialException('write failed (select)')
+                d = d[n:]
+                tx_len -= n
+            except OSError, v:
+                if v.errno != errno.EAGAIN:
+                    raise SerialException('write failed: %s' % (v,))
+        return len(data)
+
+    def flush(self):
+        """Flush of file like objects. In this case, wait until all data
+           is written."""
+        self.drainOutput()
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        termios.tcflush(self.fd, TERMIOS.TCIFLUSH)
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        termios.tcflush(self.fd, TERMIOS.TCOFLUSH)
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given duration."""
+        if not self._isOpen: raise portNotOpenError
+        termios.tcsendbreak(self.fd, int(duration/0.25))
+
+    def setBreak(self, level=1):
+        """Set break: Controls TXD. When active, no transmitting is possible."""
+        if self.fd is None: raise portNotOpenError
+        if level:
+            fcntl.ioctl(self.fd, TIOCSBRK)
+        else:
+            fcntl.ioctl(self.fd, TIOCCBRK)
+
+    def setRTS(self, level=1):
+        """Set terminal status line: Request To Send"""
+        if not self._isOpen: raise portNotOpenError
+        if level:
+            fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
+        else:
+            fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
+
+    def setDTR(self, level=1):
+        """Set terminal status line: Data Terminal Ready"""
+        if not self._isOpen: raise portNotOpenError
+        if level:
+            fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
+        else:
+            fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self._isOpen: raise portNotOpenError
+        s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+        return struct.unpack('I',s)[0] & TIOCM_CTS != 0
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self._isOpen: raise portNotOpenError
+        s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+        return struct.unpack('I',s)[0] & TIOCM_DSR != 0
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self._isOpen: raise portNotOpenError
+        s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+        return struct.unpack('I',s)[0] & TIOCM_RI != 0
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self._isOpen: raise portNotOpenError
+        s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+        return struct.unpack('I',s)[0] & TIOCM_CD != 0
+
+    # - - platform specific - - - -
+
+    def outWaiting(self):
+        """Return the number of characters currently in the output buffer."""
+        #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
+        s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
+        return struct.unpack('I',s)[0]
+
+    def drainOutput(self):
+        """internal - not portable!"""
+        if not self._isOpen: raise portNotOpenError
+        termios.tcdrain(self.fd)
+
+    def nonblocking(self):
+        """internal - not portable!"""
+        if not self._isOpen: raise portNotOpenError
+        fcntl.fcntl(self.fd, FCNTL.F_SETFL, os.O_NONBLOCK)
+
+    def fileno(self):
+        """\
+        For easier use of the serial port instance with select.
+        WARNING: this function is not portable to different platforms!
+        """
+        if not self._isOpen: raise portNotOpenError
+        return self.fd
+
+    def setXON(self, level=True):
+        """\
+        Manually control flow - when software flow control is enabled.
+        This will send XON (true) and XOFF (false) to the other device.
+        WARNING: this function is not portable to different platforms!
+        """
+        if not self.hComPort: raise portNotOpenError
+        if enable:
+            termios.tcflow(self.fd, TERMIOS.TCION)
+        else:
+            termios.tcflow(self.fd, TERMIOS.TCIOFF)
+
+    def flowControlOut(self, enable):
+        """\
+        Manually control flow of outgoing data - when hardware or software flow
+        control is enabled.
+        WARNING: this function is not portable to different platforms!
+        """
+        if not self._isOpen: raise portNotOpenError
+        if enable:
+            termios.tcflow(self.fd, TERMIOS.TCOON)
+        else:
+            termios.tcflow(self.fd, TERMIOS.TCOOFF)
+
+
+# assemble Serial class with the platform specifc implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derrive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(PosixSerial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(PosixSerial, io.RawIOBase):
+        pass
+
+class PosixPollSerial(Serial):
+    """poll based read implementation. not all systems support poll properly.
+    however this one has better handling of errors, such as a device
+    disconnecting while it's in use (e.g. USB-serial unplugged)"""
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+           return less characters as requested. With no timeout it will block
+           until the requested number of bytes is read."""
+        if self.fd is None: raise portNotOpenError
+        read = bytearray()
+        poll = select.poll()
+        poll.register(self.fd, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
+        if size > 0:
+            while len(read) < size:
+                # print "\tread(): size",size, "have", len(read)    #debug
+                # wait until device becomes ready to read (or something fails)
+                for fd, event in poll.poll(self._timeout*1000):
+                    if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL):
+                        raise SerialException('device reports error (poll)')
+                    #  we don't care if it is select.POLLIN or timeout, that's
+                    #  handled below
+                buf = os.read(self.fd, size - len(read))
+                read.extend(buf)
+                if ((self._timeout is not None and self._timeout >= 0) or 
+                    (self._interCharTimeout is not None and self._interCharTimeout > 0)) and not buf:
+                    break   # early abort on timeout
+        return bytes(read)
+
+
+if __name__ == '__main__':
+    s = Serial(0,
+                 baudrate=19200,        # baud rate
+                 bytesize=EIGHTBITS,    # number of data bits
+                 parity=PARITY_EVEN,    # enable parity checking
+                 stopbits=STOPBITS_ONE, # number of stop bits
+                 timeout=3,             # set a timeout value, None for waiting forever
+                 xonxoff=0,             # enable software flow control
+                 rtscts=0,              # enable RTS/CTS flow control
+               )
+    s.setRTS(1)
+    s.setDTR(1)
+    s.flushInput()
+    s.flushOutput()
+    s.write('hello')
+    sys.stdout.write('%r\n' % s.read(5))
+    sys.stdout.write('%s\n' % s.inWaiting())
+    del s
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/serialutil.py b/catapult/telemetry/third_party/pyserial/serial/serialutil.py
new file mode 100644
index 0000000..f28ece45
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/serialutil.py
@@ -0,0 +1,551 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+# compatibility for older Python < 2.6
+try:
+    bytes
+    bytearray
+except (NameError, AttributeError):
+    # Python older than 2.6 do not have these types. Like for Python 2.6 they
+    # should behave like str. For Python older than 3.0 we want to work with
+    # strings anyway, only later versions have a true bytes type.
+    bytes = str
+    # bytearray is a mutable type that is easily turned into an instance of
+    # bytes
+    class bytearray(list):
+        # for bytes(bytearray()) usage
+        def __str__(self): return ''.join(self)
+        def __repr__(self): return 'bytearray(%r)' % ''.join(self)
+        # append automatically converts integers to characters
+        def append(self, item):
+            if isinstance(item, str):
+                list.append(self, item)
+            else:
+                list.append(self, chr(item))
+        # +=
+        def __iadd__(self, other):
+            for byte in other:
+                self.append(byte)
+            return self
+
+        def __getslice__(self, i, j):
+            return bytearray(list.__getslice__(self, i, j))
+
+        def __getitem__(self, item):
+            if isinstance(item, slice):
+                return bytearray(list.__getitem__(self, item))
+            else:
+                return ord(list.__getitem__(self, item))
+
+        def __eq__(self, other):
+            if isinstance(other, basestring):
+                other = bytearray(other)
+            return list.__eq__(self, other)
+
+# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
+# isn't returning the contents (very unfortunate). Therefore we need special
+# cases and test for it. Ensure that there is a ``memoryview`` object for older
+# Python versions. This is easier than making every test dependent on its
+# existence.
+try:
+    memoryview
+except (NameError, AttributeError):
+    # implementation does not matter as we do not realy use it.
+    # it just must not inherit from something else we might care for.
+    class memoryview:
+        pass
+
+
+# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
+# so a simple ``bytes(sequence)`` doesn't work for all versions
+def to_bytes(seq):
+    """convert a sequence to a bytes type"""
+    if isinstance(seq, bytes):
+        return seq
+    elif isinstance(seq, bytearray):
+        return bytes(seq)
+    elif isinstance(seq, memoryview):
+        return seq.tobytes()
+    else:
+        b = bytearray()
+        for item in seq:
+            b.append(item)  # this one handles int and str for our emulation and ints for Python 3.x
+        return bytes(b)
+
+# create control bytes
+XON  = to_bytes([17])
+XOFF = to_bytes([19])
+
+CR = to_bytes([13])
+LF = to_bytes([10])
+
+
+PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
+STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
+FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
+
+PARITY_NAMES = {
+    PARITY_NONE:  'None',
+    PARITY_EVEN:  'Even',
+    PARITY_ODD:   'Odd',
+    PARITY_MARK:  'Mark',
+    PARITY_SPACE: 'Space',
+}
+
+
+class SerialException(IOError):
+    """Base class for serial port related exceptions."""
+
+
+class SerialTimeoutException(SerialException):
+    """Write timeouts give an exception"""
+
+
+writeTimeoutError = SerialTimeoutException('Write timeout')
+portNotOpenError = SerialException('Attempting to use a port that is not open')
+
+
+class FileLike(object):
+    """An abstract file like class.
+
+    This class implements readline and readlines based on read and
+    writelines based on write.
+    This class is used to provide the above functions for to Serial
+    port objects.
+
+    Note that when the serial port was opened with _NO_ timeout that
+    readline blocks until it sees a newline (or the specified size is
+    reached) and that readlines would never return and therefore
+    refuses to work (it raises an exception in this case)!
+    """
+
+    def __init__(self):
+        self.closed = True
+
+    def close(self):
+        self.closed = True
+
+    # so that ports are closed when objects are discarded
+    def __del__(self):
+        """Destructor.  Calls close()."""
+        # The try/except block is in case this is called at program
+        # exit time, when it's possible that globals have already been
+        # deleted, and then the close() call might fail.  Since
+        # there's nothing we can do about such failures and they annoy
+        # the end users, we suppress the traceback.
+        try:
+            self.close()
+        except:
+            pass
+
+    def writelines(self, sequence):
+        for line in sequence:
+            self.write(line)
+
+    def flush(self):
+        """flush of file like objects"""
+        pass
+
+    # iterator for e.g. "for line in Serial(0): ..." usage
+    def next(self):
+        line = self.readline()
+        if not line: raise StopIteration
+        return line
+
+    def __iter__(self):
+        return self
+
+    def readline(self, size=None, eol=LF):
+        """read a line which is terminated with end-of-line (eol) character
+        ('\n' by default) or until timeout."""
+        leneol = len(eol)
+        line = bytearray()
+        while True:
+            c = self.read(1)
+            if c:
+                line += c
+                if line[-leneol:] == eol:
+                    break
+                if size is not None and len(line) >= size:
+                    break
+            else:
+                break
+        return bytes(line)
+
+    def readlines(self, sizehint=None, eol=LF):
+        """read a list of lines, until timeout.
+        sizehint is ignored."""
+        if self.timeout is None:
+            raise ValueError("Serial port MUST have enabled timeout for this function!")
+        leneol = len(eol)
+        lines = []
+        while True:
+            line = self.readline(eol=eol)
+            if line:
+                lines.append(line)
+                if line[-leneol:] != eol:    # was the line received with a timeout?
+                    break
+            else:
+                break
+        return lines
+
+    def xreadlines(self, sizehint=None):
+        """Read lines, implemented as generator. It will raise StopIteration on
+        timeout (empty read). sizehint is ignored."""
+        while True:
+            line = self.readline()
+            if not line: break
+            yield line
+
+    # other functions of file-likes - not used by pySerial
+
+    #~ readinto(b)
+
+    def seek(self, pos, whence=0):
+        raise IOError("file is not seekable")
+
+    def tell(self):
+        raise IOError("file is not seekable")
+
+    def truncate(self, n=None):
+        raise IOError("file is not seekable")
+
+    def isatty(self):
+        return False
+
+
+class SerialBase(object):
+    """Serial port base class. Provides __init__ function and properties to
+       get/set port settings."""
+
+    # default values, may be overridden in subclasses that do not support all values
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                 9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
+                 576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
+                 3000000, 3500000, 4000000)
+    BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
+    PARITIES  = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
+    STOPBITS  = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
+
+    def __init__(self,
+                 port = None,           # number of device, numbering starts at
+                                        # zero. if everything fails, the user
+                                        # can specify a device string, note
+                                        # that this isn't portable anymore
+                                        # port will be opened if one is specified
+                 baudrate=9600,         # baud rate
+                 bytesize=EIGHTBITS,    # number of data bits
+                 parity=PARITY_NONE,    # enable parity checking
+                 stopbits=STOPBITS_ONE, # number of stop bits
+                 timeout=None,          # set a timeout value, None to wait forever
+                 xonxoff=False,         # enable software flow control
+                 rtscts=False,          # enable RTS/CTS flow control
+                 writeTimeout=None,     # set a timeout for writes
+                 dsrdtr=False,          # None: use rtscts setting, dsrdtr override if True or False
+                 interCharTimeout=None  # Inter-character timeout, None to disable
+                 ):
+        """Initialize comm port object. If a port is given, then the port will be
+           opened immediately. Otherwise a Serial port object in closed state
+           is returned."""
+
+        self._isOpen   = False
+        self._port     = None           # correct value is assigned below through properties
+        self._baudrate = None           # correct value is assigned below through properties
+        self._bytesize = None           # correct value is assigned below through properties
+        self._parity   = None           # correct value is assigned below through properties
+        self._stopbits = None           # correct value is assigned below through properties
+        self._timeout  = None           # correct value is assigned below through properties
+        self._writeTimeout = None       # correct value is assigned below through properties
+        self._xonxoff  = None           # correct value is assigned below through properties
+        self._rtscts   = None           # correct value is assigned below through properties
+        self._dsrdtr   = None           # correct value is assigned below through properties
+        self._interCharTimeout = None   # correct value is assigned below through properties
+
+        # assign values using get/set methods using the properties feature
+        self.port     = port
+        self.baudrate = baudrate
+        self.bytesize = bytesize
+        self.parity   = parity
+        self.stopbits = stopbits
+        self.timeout  = timeout
+        self.writeTimeout = writeTimeout
+        self.xonxoff  = xonxoff
+        self.rtscts   = rtscts
+        self.dsrdtr   = dsrdtr
+        self.interCharTimeout = interCharTimeout
+
+        if port is not None:
+            self.open()
+
+    def isOpen(self):
+        """Check if the port is opened."""
+        return self._isOpen
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    # TODO: these are not really needed as the is the BAUDRATES etc. attribute...
+    # maybe i remove them before the final release...
+
+    def getSupportedBaudrates(self):
+        return [(str(b), b) for b in self.BAUDRATES]
+
+    def getSupportedByteSizes(self):
+        return [(str(b), b) for b in self.BYTESIZES]
+
+    def getSupportedStopbits(self):
+        return [(str(b), b) for b in self.STOPBITS]
+
+    def getSupportedParities(self):
+        return [(PARITY_NAMES[b], b) for b in self.PARITIES]
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def setPort(self, port):
+        """Change the port. The attribute portstr is set to a string that
+           contains the name of the port."""
+
+        was_open = self._isOpen
+        if was_open: self.close()
+        if port is not None:
+            if isinstance(port, basestring):
+                self.portstr = port
+            else:
+                self.portstr = self.makeDeviceName(port)
+        else:
+            self.portstr = None
+        self._port = port
+        self.name = self.portstr
+        if was_open: self.open()
+
+    def getPort(self):
+        """Get the current port setting. The value that was passed on init or using
+           setPort() is passed back. See also the attribute portstr which contains
+           the name of the port as a string."""
+        return self._port
+
+    port = property(getPort, setPort, doc="Port setting")
+
+
+    def setBaudrate(self, baudrate):
+        """Change baud rate. It raises a ValueError if the port is open and the
+        baud rate is not possible. If the port is closed, then the value is
+        accepted and the exception is raised when the port is opened."""
+        try:
+            b = int(baudrate)
+        except TypeError:
+            raise ValueError("Not a valid baudrate: %r" % (baudrate,))
+        else:
+            if b <= 0:
+                raise ValueError("Not a valid baudrate: %r" % (baudrate,))
+            self._baudrate = b
+            if self._isOpen:  self._reconfigurePort()
+
+    def getBaudrate(self):
+        """Get the current baud rate setting."""
+        return self._baudrate
+
+    baudrate = property(getBaudrate, setBaudrate, doc="Baud rate setting")
+
+
+    def setByteSize(self, bytesize):
+        """Change byte size."""
+        if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
+        self._bytesize = bytesize
+        if self._isOpen: self._reconfigurePort()
+
+    def getByteSize(self):
+        """Get the current byte size setting."""
+        return self._bytesize
+
+    bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
+
+
+    def setParity(self, parity):
+        """Change parity setting."""
+        if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
+        self._parity = parity
+        if self._isOpen: self._reconfigurePort()
+
+    def getParity(self):
+        """Get the current parity setting."""
+        return self._parity
+
+    parity = property(getParity, setParity, doc="Parity setting")
+
+
+    def setStopbits(self, stopbits):
+        """Change stop bits size."""
+        if stopbits not in self.STOPBITS: raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
+        self._stopbits = stopbits
+        if self._isOpen: self._reconfigurePort()
+
+    def getStopbits(self):
+        """Get the current stop bits setting."""
+        return self._stopbits
+
+    stopbits = property(getStopbits, setStopbits, doc="Stop bits setting")
+
+
+    def setTimeout(self, timeout):
+        """Change timeout setting."""
+        if timeout is not None:
+            try:
+                timeout + 1     # test if it's a number, will throw a TypeError if not...
+            except TypeError:
+                raise ValueError("Not a valid timeout: %r" % (timeout,))
+            if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
+        self._timeout = timeout
+        if self._isOpen: self._reconfigurePort()
+
+    def getTimeout(self):
+        """Get the current timeout setting."""
+        return self._timeout
+
+    timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
+
+
+    def setWriteTimeout(self, timeout):
+        """Change timeout setting."""
+        if timeout is not None:
+            if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
+            try:
+                timeout + 1     #test if it's a number, will throw a TypeError if not...
+            except TypeError:
+                raise ValueError("Not a valid timeout: %r" % timeout)
+
+        self._writeTimeout = timeout
+        if self._isOpen: self._reconfigurePort()
+
+    def getWriteTimeout(self):
+        """Get the current timeout setting."""
+        return self._writeTimeout
+
+    writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
+
+
+    def setXonXoff(self, xonxoff):
+        """Change XON/XOFF setting."""
+        self._xonxoff = xonxoff
+        if self._isOpen: self._reconfigurePort()
+
+    def getXonXoff(self):
+        """Get the current XON/XOFF setting."""
+        return self._xonxoff
+
+    xonxoff = property(getXonXoff, setXonXoff, doc="XON/XOFF setting")
+
+    def setRtsCts(self, rtscts):
+        """Change RTS/CTS flow control setting."""
+        self._rtscts = rtscts
+        if self._isOpen: self._reconfigurePort()
+
+    def getRtsCts(self):
+        """Get the current RTS/CTS flow control setting."""
+        return self._rtscts
+
+    rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
+
+    def setDsrDtr(self, dsrdtr=None):
+        """Change DsrDtr flow control setting."""
+        if dsrdtr is None:
+            # if not set, keep backwards compatibility and follow rtscts setting
+            self._dsrdtr = self._rtscts
+        else:
+            # if defined independently, follow its value
+            self._dsrdtr = dsrdtr
+        if self._isOpen: self._reconfigurePort()
+
+    def getDsrDtr(self):
+        """Get the current DSR/DTR flow control setting."""
+        return self._dsrdtr
+
+    dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
+
+    def setInterCharTimeout(self, interCharTimeout):
+        """Change inter-character timeout setting."""
+        if interCharTimeout is not None:
+            if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
+            try:
+                interCharTimeout + 1     # test if it's a number, will throw a TypeError if not...
+            except TypeError:
+                raise ValueError("Not a valid timeout: %r" % interCharTimeout)
+
+        self._interCharTimeout = interCharTimeout
+        if self._isOpen: self._reconfigurePort()
+
+    def getInterCharTimeout(self):
+        """Get the current inter-character timeout setting."""
+        return self._interCharTimeout
+
+    interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    _SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
+            'dsrdtr', 'rtscts', 'timeout', 'writeTimeout', 'interCharTimeout')
+
+    def getSettingsDict(self):
+        """Get current port settings as a dictionary. For use with
+        applySettingsDict"""
+        return dict([(key, getattr(self, '_'+key)) for key in self._SETTINGS])
+
+    def applySettingsDict(self, d):
+        """apply stored settings from a dictionary returned from
+        getSettingsDict. it's allowed to delete keys from the dictionary. these
+        values will simply left unchanged."""
+        for key in self._SETTINGS:
+            if d[key] != getattr(self, '_'+key):   # check against internal "_" value
+                setattr(self, key, d[key])          # set non "_" value to use properties write function
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def __repr__(self):
+        """String representation of the current port settings and its state."""
+        return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
+            self.__class__.__name__,
+            id(self),
+            self._isOpen,
+            self.portstr,
+            self.baudrate,
+            self.bytesize,
+            self.parity,
+            self.stopbits,
+            self.timeout,
+            self.xonxoff,
+            self.rtscts,
+            self.dsrdtr,
+        )
+
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+    # compatibility with io library
+
+    def readable(self): return True
+    def writable(self): return True
+    def seekable(self): return False
+    def readinto(self, b):
+        data = self.read(len(b))
+        n = len(data)
+        try:
+            b[:n] = data
+        except TypeError, err:
+            import array
+            if not isinstance(b, array.array):
+                raise err
+            b[:n] = array.array('b', data)
+        return n
+
+
+if __name__ == '__main__':
+    import sys
+    s = SerialBase()
+    sys.stdout.write('port name:  %s\n' % s.portstr)
+    sys.stdout.write('baud rates: %s\n' % s.getSupportedBaudrates())
+    sys.stdout.write('byte sizes: %s\n' % s.getSupportedByteSizes())
+    sys.stdout.write('parities:   %s\n' % s.getSupportedParities())
+    sys.stdout.write('stop bits:  %s\n' % s.getSupportedStopbits())
+    sys.stdout.write('%s\n' % s)
diff --git a/catapult/telemetry/third_party/pyserial/serial/serialwin32.py b/catapult/telemetry/third_party/pyserial/serial/serialwin32.py
new file mode 100644
index 0000000..dfdd953
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/serialwin32.py
@@ -0,0 +1,461 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# serial driver for win32
+# see __init__.py
+#
+# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+#
+# Initial patch to use ctypes by Giovanni Bajo <rasky@develer.com>
+
+import ctypes
+from serial import win32
+
+from serial.serialutil import *
+
+
+def device(portnum):
+    """Turn a port number into a device name"""
+    return 'COM%d' % (portnum+1) # numbers are transformed to a string
+
+
+class Win32Serial(SerialBase):
+    """Serial port implementation for Win32 based on ctypes."""
+
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                 9600, 19200, 38400, 57600, 115200)
+
+    def __init__(self, *args, **kwargs):
+        self.hComPort = None
+        self._overlappedRead = None
+        self._overlappedWrite = None
+        self._rtsToggle = False
+
+        self._rtsState = win32.RTS_CONTROL_ENABLE
+        self._dtrState = win32.DTR_CONTROL_ENABLE
+
+
+        SerialBase.__init__(self, *args, **kwargs)
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        # the "\\.\COMx" format is required for devices other than COM1-COM8
+        # not all versions of windows seem to support this properly
+        # so that the first few ports are used with the DOS device name
+        port = self.portstr
+        try:
+            if port.upper().startswith('COM') and int(port[3:]) > 8:
+                port = '\\\\.\\' + port
+        except ValueError:
+            # for like COMnotanumber
+            pass
+        self.hComPort = win32.CreateFile(port,
+               win32.GENERIC_READ | win32.GENERIC_WRITE,
+               0, # exclusive access
+               None, # no security
+               win32.OPEN_EXISTING,
+               win32.FILE_ATTRIBUTE_NORMAL | win32.FILE_FLAG_OVERLAPPED,
+               0)
+        if self.hComPort == win32.INVALID_HANDLE_VALUE:
+            self.hComPort = None    # 'cause __del__ is called anyway
+            raise SerialException("could not open port %r: %r" % (self.portstr, ctypes.WinError()))
+
+        try:
+            self._overlappedRead = win32.OVERLAPPED()
+            self._overlappedRead.hEvent = win32.CreateEvent(None, 1, 0, None)
+            self._overlappedWrite = win32.OVERLAPPED()
+            #~ self._overlappedWrite.hEvent = win32.CreateEvent(None, 1, 0, None)
+            self._overlappedWrite.hEvent = win32.CreateEvent(None, 0, 0, None)
+
+            # Setup a 4k buffer
+            win32.SetupComm(self.hComPort, 4096, 4096)
+
+            # Save original timeout values:
+            self._orgTimeouts = win32.COMMTIMEOUTS()
+            win32.GetCommTimeouts(self.hComPort, ctypes.byref(self._orgTimeouts))
+
+            self._reconfigurePort()
+
+            # Clear buffers:
+            # Remove anything that was there
+            win32.PurgeComm(self.hComPort,
+                    win32.PURGE_TXCLEAR | win32.PURGE_TXABORT |
+                    win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
+        except:
+            try:
+                self._close()
+            except:
+                # ignore any exception when closing the port
+                # also to keep original exception that happened when setting up
+                pass
+            self.hComPort = None
+            raise
+        else:
+            self._isOpen = True
+
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port."""
+        if not self.hComPort:
+            raise SerialException("Can only operate on a valid port handle")
+
+        # Set Windows timeout values
+        # timeouts is a tuple with the following items:
+        # (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
+        #  ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
+        #  WriteTotalTimeoutConstant)
+        if self._timeout is None:
+            timeouts = (0, 0, 0, 0, 0)
+        elif self._timeout == 0:
+            timeouts = (win32.MAXDWORD, 0, 0, 0, 0)
+        else:
+            timeouts = (0, 0, int(self._timeout*1000), 0, 0)
+        if self._timeout != 0 and self._interCharTimeout is not None:
+            timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
+
+        if self._writeTimeout is None:
+            pass
+        elif self._writeTimeout == 0:
+            timeouts = timeouts[:-2] + (0, win32.MAXDWORD)
+        else:
+            timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000))
+        win32.SetCommTimeouts(self.hComPort, ctypes.byref(win32.COMMTIMEOUTS(*timeouts)))
+
+        win32.SetCommMask(self.hComPort, win32.EV_ERR)
+
+        # Setup the connection info.
+        # Get state and modify it:
+        comDCB = win32.DCB()
+        win32.GetCommState(self.hComPort, ctypes.byref(comDCB))
+        comDCB.BaudRate = self._baudrate
+
+        if self._bytesize == FIVEBITS:
+            comDCB.ByteSize     = 5
+        elif self._bytesize == SIXBITS:
+            comDCB.ByteSize     = 6
+        elif self._bytesize == SEVENBITS:
+            comDCB.ByteSize     = 7
+        elif self._bytesize == EIGHTBITS:
+            comDCB.ByteSize     = 8
+        else:
+            raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
+
+        if self._parity == PARITY_NONE:
+            comDCB.Parity       = win32.NOPARITY
+            comDCB.fParity      = 0 # Disable Parity Check
+        elif self._parity == PARITY_EVEN:
+            comDCB.Parity       = win32.EVENPARITY
+            comDCB.fParity      = 1 # Enable Parity Check
+        elif self._parity == PARITY_ODD:
+            comDCB.Parity       = win32.ODDPARITY
+            comDCB.fParity      = 1 # Enable Parity Check
+        elif self._parity == PARITY_MARK:
+            comDCB.Parity       = win32.MARKPARITY
+            comDCB.fParity      = 1 # Enable Parity Check
+        elif self._parity == PARITY_SPACE:
+            comDCB.Parity       = win32.SPACEPARITY
+            comDCB.fParity      = 1 # Enable Parity Check
+        else:
+            raise ValueError("Unsupported parity mode: %r" % self._parity)
+
+        if self._stopbits == STOPBITS_ONE:
+            comDCB.StopBits     = win32.ONESTOPBIT
+        elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
+            comDCB.StopBits     = win32.ONE5STOPBITS
+        elif self._stopbits == STOPBITS_TWO:
+            comDCB.StopBits     = win32.TWOSTOPBITS
+        else:
+            raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
+
+        comDCB.fBinary          = 1 # Enable Binary Transmission
+        # Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
+        if self._rtscts:
+            comDCB.fRtsControl  = win32.RTS_CONTROL_HANDSHAKE
+        elif self._rtsToggle:
+            comDCB.fRtsControl  = win32.RTS_CONTROL_TOGGLE
+        else:
+            comDCB.fRtsControl  = self._rtsState
+        if self._dsrdtr:
+            comDCB.fDtrControl  = win32.DTR_CONTROL_HANDSHAKE
+        else:
+            comDCB.fDtrControl  = self._dtrState
+
+        if self._rtsToggle:
+            comDCB.fOutxCtsFlow     = 0
+        else:
+            comDCB.fOutxCtsFlow     = self._rtscts
+        comDCB.fOutxDsrFlow     = self._dsrdtr
+        comDCB.fOutX            = self._xonxoff
+        comDCB.fInX             = self._xonxoff
+        comDCB.fNull            = 0
+        comDCB.fErrorChar       = 0
+        comDCB.fAbortOnError    = 0
+        comDCB.XonChar          = XON
+        comDCB.XoffChar         = XOFF
+
+        if not win32.SetCommState(self.hComPort, ctypes.byref(comDCB)):
+            raise ValueError("Cannot configure port, some setting was wrong. Original message: %r" % ctypes.WinError())
+
+    #~ def __del__(self):
+        #~ self.close()
+
+
+    def _close(self):
+        """internal close port helper"""
+        if self.hComPort:
+            # Restore original timeout values:
+            win32.SetCommTimeouts(self.hComPort, self._orgTimeouts)
+            # Close COM-Port:
+            win32.CloseHandle(self.hComPort)
+            if self._overlappedRead is not None:
+                win32.CloseHandle(self._overlappedRead.hEvent)
+                self._overlappedRead = None
+            if self._overlappedWrite is not None:
+                win32.CloseHandle(self._overlappedWrite.hEvent)
+                self._overlappedWrite = None
+            self.hComPort = None
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            self._close()
+            self._isOpen = False
+
+    def makeDeviceName(self, port):
+        return device(port)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        flags = win32.DWORD()
+        comstat = win32.COMSTAT()
+        if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
+            raise SerialException('call to ClearCommError failed')
+        return comstat.cbInQue
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+           return less characters as requested. With no timeout it will block
+           until the requested number of bytes is read."""
+        if not self.hComPort: raise portNotOpenError
+        if size > 0:
+            win32.ResetEvent(self._overlappedRead.hEvent)
+            flags = win32.DWORD()
+            comstat = win32.COMSTAT()
+            if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
+                raise SerialException('call to ClearCommError failed')
+            if self.timeout == 0:
+                n = min(comstat.cbInQue, size)
+                if n > 0:
+                    buf = ctypes.create_string_buffer(n)
+                    rc = win32.DWORD()
+                    err = win32.ReadFile(self.hComPort, buf, n, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
+                    if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
+                        raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
+                    err = win32.WaitForSingleObject(self._overlappedRead.hEvent, win32.INFINITE)
+                    read = buf.raw[:rc.value]
+                else:
+                    read = bytes()
+            else:
+                buf = ctypes.create_string_buffer(size)
+                rc = win32.DWORD()
+                err = win32.ReadFile(self.hComPort, buf, size, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
+                if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
+                    raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
+                err = win32.GetOverlappedResult(self.hComPort, ctypes.byref(self._overlappedRead), ctypes.byref(rc), True)
+                read = buf.raw[:rc.value]
+        else:
+            read = bytes()
+        return bytes(read)
+
+    def write(self, data):
+        """Output the given string over the serial port."""
+        if not self.hComPort: raise portNotOpenError
+        #~ if not isinstance(data, (bytes, bytearray)):
+            #~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
+        # convert data (needed in case of memoryview instance: Py 3.1 io lib), ctypes doesn't like memoryview
+        data = to_bytes(data)
+        if data:
+            #~ win32event.ResetEvent(self._overlappedWrite.hEvent)
+            n = win32.DWORD()
+            err = win32.WriteFile(self.hComPort, data, len(data), ctypes.byref(n), self._overlappedWrite)
+            if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
+                raise SerialException("WriteFile failed (%r)" % ctypes.WinError())
+            if self._writeTimeout != 0: # if blocking (None) or w/ write timeout (>0)
+                # Wait for the write to complete.
+                #~ win32.WaitForSingleObject(self._overlappedWrite.hEvent, win32.INFINITE)
+                err = win32.GetOverlappedResult(self.hComPort, self._overlappedWrite, ctypes.byref(n), True)
+                if n.value != len(data):
+                    raise writeTimeoutError
+            return n.value
+        else:
+            return 0
+
+    def flush(self):
+        """Flush of file like objects. In this case, wait until all data
+           is written."""
+        while self.outWaiting():
+            time.sleep(0.05)
+        # XXX could also use WaitCommEvent with mask EV_TXEMPTY, but it would
+        # require overlapped IO and its also only possible to set a single mask
+        # on the port---
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self.hComPort: raise portNotOpenError
+        win32.PurgeComm(self.hComPort, win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self.hComPort: raise portNotOpenError
+        win32.PurgeComm(self.hComPort, win32.PURGE_TXCLEAR | win32.PURGE_TXABORT)
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given duration."""
+        if not self.hComPort: raise portNotOpenError
+        import time
+        win32.SetCommBreak(self.hComPort)
+        time.sleep(duration)
+        win32.ClearCommBreak(self.hComPort)
+
+    def setBreak(self, level=1):
+        """Set break: Controls TXD. When active, to transmitting is possible."""
+        if not self.hComPort: raise portNotOpenError
+        if level:
+            win32.SetCommBreak(self.hComPort)
+        else:
+            win32.ClearCommBreak(self.hComPort)
+
+    def setRTS(self, level=1):
+        """Set terminal status line: Request To Send"""
+        # remember level for reconfigure
+        if level:
+            self._rtsState = win32.RTS_CONTROL_ENABLE
+        else:
+            self._rtsState = win32.RTS_CONTROL_DISABLE
+        # also apply now if port is open
+        if self.hComPort:
+            if level:
+                win32.EscapeCommFunction(self.hComPort, win32.SETRTS)
+            else:
+                win32.EscapeCommFunction(self.hComPort, win32.CLRRTS)
+
+    def setDTR(self, level=1):
+        """Set terminal status line: Data Terminal Ready"""
+        # remember level for reconfigure
+        if level:
+            self._dtrState = win32.DTR_CONTROL_ENABLE
+        else:
+            self._dtrState = win32.DTR_CONTROL_DISABLE
+        # also apply now if port is open
+        if self.hComPort:
+            if level:
+                win32.EscapeCommFunction(self.hComPort, win32.SETDTR)
+            else:
+                win32.EscapeCommFunction(self.hComPort, win32.CLRDTR)
+
+    def _GetCommModemStatus(self):
+        stat = win32.DWORD()
+        win32.GetCommModemStatus(self.hComPort, ctypes.byref(stat))
+        return stat.value
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self.hComPort: raise portNotOpenError
+        return win32.MS_CTS_ON & self._GetCommModemStatus() != 0
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self.hComPort: raise portNotOpenError
+        return win32.MS_DSR_ON & self._GetCommModemStatus() != 0
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self.hComPort: raise portNotOpenError
+        return win32.MS_RING_ON & self._GetCommModemStatus() != 0
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self.hComPort: raise portNotOpenError
+        return win32.MS_RLSD_ON & self._GetCommModemStatus() != 0
+
+    # - - platform specific - - - -
+
+    def setBufferSize(self, rx_size=4096, tx_size=None):
+        """\
+        Recommend a buffer size to the driver (device driver can ignore this
+        vlaue). Must be called before the port is opended.
+        """
+        if tx_size is None: tx_size = rx_size
+        win32.SetupComm(self.hComPort, rx_size, tx_size)
+
+    def setXON(self, level=True):
+        """\
+        Manually control flow - when software flow control is enabled.
+        This will send XON (true) and XOFF (false) to the other device.
+        WARNING: this function is not portable to different platforms!
+        """
+        if not self.hComPort: raise portNotOpenError
+        if level:
+            win32.EscapeCommFunction(self.hComPort, win32.SETXON)
+        else:
+            win32.EscapeCommFunction(self.hComPort, win32.SETXOFF)
+
+    def outWaiting(self):
+        """return how many characters the in the outgoing buffer"""
+        flags = win32.DWORD()
+        comstat = win32.COMSTAT()
+        if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
+            raise SerialException('call to ClearCommError failed')
+        return comstat.cbOutQue
+
+    # functions useful for RS-485 adapters
+    def setRtsToggle(self, rtsToggle):
+        """Change RTS toggle control setting."""
+        self._rtsToggle = rtsToggle
+        if self._isOpen: self._reconfigurePort()
+
+    def getRtsToggle(self):
+        """Get the current RTS toggle control setting."""
+        return self._rtsToggle
+
+    rtsToggle = property(getRtsToggle, setRtsToggle, doc="RTS toggle control setting")
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(Win32Serial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(Win32Serial, io.RawIOBase):
+        pass
+
+
+# Nur Testfunktion!!
+if __name__ == '__main__':
+    s = Serial(0)
+    sys.stdout.write("%s\n" % s)
+
+    s = Serial()
+    sys.stdout.write("%s\n" % s)
+
+    s.baudrate = 19200
+    s.databits = 7
+    s.close()
+    s.port = 0
+    s.open()
+    sys.stdout.write("%s\n" % s)
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/sermsdos.py b/catapult/telemetry/third_party/pyserial/serial/sermsdos.py
new file mode 100644
index 0000000..09a0017
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/sermsdos.py
@@ -0,0 +1,200 @@
+# sermsdos.py
+#
+# History:
+#
+#   3rd September 2002                      Dave Haynes
+#   1. First defined
+#
+# Although this code should run under the latest versions of
+# Python, on DOS-based platforms such as Windows 95 and 98,
+# it has been specifically written to be compatible with
+# PyDOS, available at:
+# http://www.python.org/ftp/python/wpy/dos.html
+#
+# PyDOS is a stripped-down version of Python 1.5.2 for
+# DOS machines. Therefore, in making changes to this file,
+# please respect Python 1.5.2 syntax. In addition, please
+# limit the width of this file to 60 characters.
+#
+# Note also that the modules in PyDOS contain fewer members
+# than other versions, so we are restricted to using the
+# following:
+#
+# In module os:
+# -------------
+# environ, chdir, getcwd, getpid, umask, fdopen, close,
+# dup, dup2, fstat, lseek, open, read, write, O_RDONLY,
+# O_WRONLY, O_RDWR, O_APPEND, O_CREAT, O_EXCL, O_TRUNC,
+# access, F_OK, R_OK, W_OK, X_OK, chmod, listdir, mkdir,
+# remove, rename, renames, rmdir, stat, unlink, utime,
+# execl, execle, execlp, execlpe, execvp, execvpe, _exit,
+# system.
+#
+# In module os.path:
+# ------------------
+# curdir, pardir, sep, altsep, pathsep, defpath, linesep.
+#
+
+import os
+import sys
+import string
+import serial.serialutil
+
+BAUD_RATES = {
+                110: "11",
+                150: "15",
+                300: "30",
+                600: "60",
+                1200: "12",
+                2400: "24",
+                4800: "48",
+                9600: "96",
+                19200: "19"}
+
+(PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK,
+PARITY_SPACE) = (0, 1, 2, 3, 4)
+(STOPBITS_ONE, STOPBITS_ONEANDAHALF,
+STOPBITS_TWO) = (1, 1.5, 2)
+FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
+(RETURN_ERROR, RETURN_BUSY, RETURN_RETRY, RETURN_READY,
+RETURN_NONE) = ('E', 'B', 'P', 'R', 'N')
+portNotOpenError = ValueError('port not open')
+
+def device(portnum):
+    return 'COM%d' % (portnum+1)
+
+class Serial(serialutil.FileLike):
+    """
+       port: number of device; numbering starts at
+            zero. if everything fails, the user can
+            specify a device string, note that this
+            isn't portable any more
+       baudrate: baud rate
+       bytesize: number of databits
+       parity: enable parity checking
+       stopbits: number of stopbits
+       timeout: set a timeout (None for waiting forever)
+       xonxoff: enable software flow control
+       rtscts: enable RTS/CTS flow control
+       retry: DOS retry mode
+    """
+    def __init__(self,
+                 port,
+                 baudrate = 9600,
+                 bytesize = EIGHTBITS,
+                 parity = PARITY_NONE,
+                 stopbits = STOPBITS_ONE,
+                 timeout = None,
+                 xonxoff = 0,
+                 rtscts = 0,
+                 retry = RETURN_RETRY
+                 ):
+
+        if type(port) == type(''):
+        # strings are taken directly
+            self.portstr = port
+        else:
+        # numbers are transformed to a string
+            self.portstr = device(port+1)
+
+        self.baud = BAUD_RATES[baudrate]
+        self.bytesize = str(bytesize)
+
+        if parity == PARITY_NONE:
+            self.parity = 'N'
+        elif parity == PARITY_EVEN:
+            self.parity = 'E'
+        elif parity == PARITY_ODD:
+            self.parity = 'O'
+        elif parity == PARITY_MARK:
+            self.parity = 'M'
+        elif parity == PARITY_SPACE:
+            self.parity = 'S'
+
+        self.stop = str(stopbits)
+        self.retry = retry
+        self.filename = "sermsdos.tmp"
+
+        self._config(self.portstr, self.baud, self.parity,
+        self.bytesize, self.stop, self.retry, self.filename)
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        pass
+
+    def _config(self, port, baud, parity, data, stop, retry,
+        filename):
+        comString = string.join(("MODE ", port, ":"
+        , " BAUD= ", baud, " PARITY= ", parity
+        , " DATA= ", data, " STOP= ", stop, " RETRY= ",
+        retry, " > ", filename ), '')
+        os.system(comString)
+
+    def setBaudrate(self, baudrate):
+        self._config(self.portstr, BAUD_RATES[baudrate],
+        self.parity, self.bytesize, self.stop, self.retry,
+        self.filename)
+
+    def inWaiting(self):
+        """returns the number of bytes waiting to be read"""
+        raise NotImplementedError
+
+    def read(self, num = 1):
+        """Read num bytes from serial port"""
+        handle = os.open(self.portstr,
+        os.O_RDONLY | os.O_BINARY)
+        rv = os.read(handle, num)
+        os.close(handle)
+        return rv
+
+    def write(self, s):
+        """Write string to serial port"""
+        handle = os.open(self.portstr,
+        os.O_WRONLY | os.O_BINARY)
+        rv = os.write(handle, s)
+        os.close(handle)
+        return rv
+
+    def flushInput(self):
+        raise NotImplementedError
+
+    def flushOutput(self):
+        raise NotImplementedError
+
+    def sendBreak(self):
+        raise NotImplementedError
+
+    def setRTS(self,level=1):
+        """Set terminal status line"""
+        raise NotImplementedError
+
+    def setDTR(self,level=1):
+        """Set terminal status line"""
+        raise NotImplementedError
+
+    def getCTS(self):
+        """Eead terminal status line"""
+        raise NotImplementedError
+
+    def getDSR(self):
+        """Eead terminal status line"""
+        raise NotImplementedError
+
+    def getRI(self):
+        """Eead terminal status line"""
+        raise NotImplementedError
+
+    def getCD(self):
+        """Eead terminal status line"""
+        raise NotImplementedError
+
+    def __repr__(self):
+        return string.join(( "<Serial>: ", self.portstr
+        , self.baud, self.parity, self.bytesize, self.stop,
+        self.retry , self.filename), ' ')
+
+if __name__ == '__main__':
+    s = Serial(0)
+    sys.stdio.write('%s %s\n' % (__name__, s))
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/pyserial/serial/tools/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/pyserial/serial/tools/__init__.py
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/list_ports.py b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports.py
new file mode 100755
index 0000000..d373a55
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+# portable serial port access with python
+# this is a wrapper module for different platform implementations of the
+# port enumeration feature
+#
+# (C) 2011-2013 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+"""\
+This module will provide a function called comports that returns an
+iterable (generator or list) that will enumerate available com ports. Note that
+on some systems non-existent ports may be listed.
+
+Additionally a grep function is supplied that can be used to search for ports
+based on their descriptions or hardware ID.
+"""
+
+import sys, os, re
+
+# chose an implementation, depending on os
+#~ if sys.platform == 'cli':
+#~ else:
+import os
+# chose an implementation, depending on os
+if os.name == 'nt': #sys.platform == 'win32':
+    from serial.tools.list_ports_windows import *
+elif os.name == 'posix':
+    from serial.tools.list_ports_posix import *
+#~ elif os.name == 'java':
+else:
+    raise ImportError("Sorry: no implementation for your platform ('%s') available" % (os.name,))
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+def grep(regexp):
+    """\
+    Search for ports using a regular expression. Port name, description and
+    hardware ID are searched. The function returns an iterable that returns the
+    same tuples as comport() would do.
+    """
+    r = re.compile(regexp, re.I)
+    for port, desc, hwid in comports():
+        if r.search(port) or r.search(desc) or r.search(hwid):
+            yield port, desc, hwid
+
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+def main():
+    import optparse
+
+    parser = optparse.OptionParser(
+        usage = "%prog [options] [<regexp>]",
+        description = "Miniterm - A simple terminal program for the serial port."
+    )
+
+    parser.add_option("--debug",
+            help="print debug messages and tracebacks (development mode)",
+            dest="debug",
+            default=False,
+            action='store_true')
+
+    parser.add_option("-v", "--verbose",
+            help="show more messages (can be given multiple times)",
+            dest="verbose",
+            default=1,
+            action='count')
+
+    parser.add_option("-q", "--quiet",
+            help="suppress all messages",
+            dest="verbose",
+            action='store_const',
+            const=0)
+
+    (options, args) = parser.parse_args()
+
+
+    hits = 0
+    # get iteraror w/ or w/o filter
+    if args:
+        if len(args) > 1:
+            parser.error('more than one regexp not supported')
+        print "Filtered list with regexp: %r" % (args[0],)
+        iterator = sorted(grep(args[0]))
+    else:
+        iterator = sorted(comports())
+    # list them
+    for port, desc, hwid in iterator:
+        print("%-20s" % (port,))
+        if options.verbose > 1:
+            print("    desc: %s" % (desc,))
+            print("    hwid: %s" % (hwid,))
+        hits += 1
+    if options.verbose:
+        if hits:
+            print("%d ports found" % (hits,))
+        else:
+            print("no ports found")
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# test
+if __name__ == '__main__':
+    main()
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_linux.py b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_linux.py
new file mode 100755
index 0000000..ecfd158
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_linux.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+
+# portable serial port access with python
+#
+# This is a module that gathers a list of serial ports including details on
+# GNU/Linux systems
+#
+# (C) 2011-2013 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+import glob
+import sys
+import os
+import re
+
+try:
+    import subprocess
+except ImportError:
+    def popen(argv):
+        try:
+            si, so =  os.popen4(' '.join(argv))
+            return so.read().strip()
+        except:
+            raise IOError('lsusb failed')
+else:
+    def popen(argv):
+        try:
+            return subprocess.check_output(argv, stderr=subprocess.STDOUT).strip()
+        except:
+            raise IOError('lsusb failed')
+
+
+# The comports function is expected to return an iterable that yields tuples of
+# 3 strings: port name, human readable description and a hardware ID.
+#
+# as currently no method is known to get the second two strings easily, they
+# are currently just identical to the port name.
+
+# try to detect the OS so that a device can be selected...
+plat = sys.platform.lower()
+
+def read_line(filename):
+    """help function to read a single line from a file. returns none"""
+    try:
+        f = open(filename)
+        line = f.readline().strip()
+        f.close()
+        return line
+    except IOError:
+        return None
+
+def re_group(regexp, text):
+    """search for regexp in text, return 1st group on match"""
+    if sys.version < '3':
+        m = re.search(regexp, text)
+    else:
+        # text is bytes-like
+        m = re.search(regexp, text.decode('ascii', 'replace'))
+    if m: return m.group(1)
+
+
+# try to extract descriptions from sysfs. this was done by experimenting,
+# no guarantee that it works for all devices or in the future...
+
+def usb_sysfs_hw_string(sysfs_path):
+    """given a path to a usb device in sysfs, return a string describing it"""
+    bus, dev = os.path.basename(os.path.realpath(sysfs_path)).split('-')
+    snr = read_line(sysfs_path+'/serial')
+    if snr:
+        snr_txt = ' SNR=%s' % (snr,)
+    else:
+        snr_txt = ''
+    return 'USB VID:PID=%s:%s%s' % (
+            read_line(sysfs_path+'/idVendor'),
+            read_line(sysfs_path+'/idProduct'),
+            snr_txt
+            )
+
+def usb_lsusb_string(sysfs_path):
+    base = os.path.basename(os.path.realpath(sysfs_path))
+    bus = base.split('-')[0]
+    try:
+        dev = int(read_line(os.path.join(sysfs_path, 'devnum')))
+        desc = popen(['lsusb', '-v', '-s', '%s:%s' % (bus, dev)])
+        # descriptions from device
+        iManufacturer = re_group('iManufacturer\s+\w+ (.+)', desc)
+        iProduct = re_group('iProduct\s+\w+ (.+)', desc)
+        iSerial = re_group('iSerial\s+\w+ (.+)', desc) or ''
+        # descriptions from kernel
+        idVendor = re_group('idVendor\s+0x\w+ (.+)', desc)
+        idProduct = re_group('idProduct\s+0x\w+ (.+)', desc)
+        # create descriptions. prefer text from device, fall back to the others
+        return '%s %s %s' % (iManufacturer or idVendor, iProduct or idProduct, iSerial)
+    except IOError:
+        return base
+
+def describe(device):
+    """\
+    Get a human readable description.
+    For USB-Serial devices try to run lsusb to get a human readable description.
+    For USB-CDC devices read the description from sysfs.
+    """
+    base = os.path.basename(device)
+    # USB-Serial devices
+    sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
+    if os.path.exists(sys_dev_path):
+        sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
+        return usb_lsusb_string(sys_usb)
+    # USB-CDC devices
+    sys_dev_path = '/sys/class/tty/%s/device/interface' % (base,)
+    if os.path.exists(sys_dev_path):
+        return read_line(sys_dev_path)
+
+    # USB Product Information
+    sys_dev_path = '/sys/class/tty/%s/device' % (base,)
+    if os.path.exists(sys_dev_path):
+        product_name_file = os.path.dirname(os.path.realpath(sys_dev_path)) + "/product"
+        if os.path.exists(product_name_file):
+            return read_line(product_name_file)
+
+    return base
+
+def hwinfo(device):
+    """Try to get a HW identification using sysfs"""
+    base = os.path.basename(device)
+    if os.path.exists('/sys/class/tty/%s/device' % (base,)):
+        # PCI based devices
+        sys_id_path = '/sys/class/tty/%s/device/id' % (base,)
+        if os.path.exists(sys_id_path):
+            return read_line(sys_id_path)
+        # USB-Serial devices
+        sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
+        if os.path.exists(sys_dev_path):
+            sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
+            return usb_sysfs_hw_string(sys_usb)
+        # USB-CDC devices
+        if base.startswith('ttyACM'):
+            sys_dev_path = '/sys/class/tty/%s/device' % (base,)
+            if os.path.exists(sys_dev_path):
+                return usb_sysfs_hw_string(sys_dev_path + '/..')
+    return 'n/a'    # XXX directly remove these from the list?
+
+def comports():
+    devices = glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
+    return [(d, describe(d), hwinfo(d)) for d in devices]
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# test
+if __name__ == '__main__':
+    for port, desc, hwid in sorted(comports()):
+        print "%s: %s [%s]" % (port, desc, hwid)
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_osx.py b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_osx.py
new file mode 100755
index 0000000..c9ed615
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_osx.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python
+
+# portable serial port access with python
+#
+# This is a module that gathers a list of serial ports including details on OSX
+#
+# code originally from https://github.com/makerbot/pyserial/tree/master/serial/tools
+# with contributions from cibomahto, dgs3, FarMcKon, tedbrandston
+# and modifications by cliechti
+#
+# this is distributed under a free software license, see license.txt
+
+
+
+# List all of the callout devices in OS/X by querying IOKit.
+
+# See the following for a reference of how to do this:
+# http://developer.apple.com/library/mac/#documentation/DeviceDrivers/Conceptual/WorkingWSerial/WWSerial_SerialDevs/SerialDevices.html#//apple_ref/doc/uid/TP30000384-CIHGEAFD
+
+# More help from darwin_hid.py
+
+# Also see the 'IORegistryExplorer' for an idea of what we are actually searching
+
+import ctypes
+from ctypes import util
+import re
+
+iokit = ctypes.cdll.LoadLibrary(ctypes.util.find_library('IOKit'))
+cf = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
+
+kIOMasterPortDefault = ctypes.c_void_p.in_dll(iokit, "kIOMasterPortDefault")
+kCFAllocatorDefault = ctypes.c_void_p.in_dll(cf, "kCFAllocatorDefault")
+
+kCFStringEncodingMacRoman = 0
+
+iokit.IOServiceMatching.restype = ctypes.c_void_p
+
+iokit.IOServiceGetMatchingServices.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+iokit.IOServiceGetMatchingServices.restype = ctypes.c_void_p
+
+iokit.IORegistryEntryGetParentEntry.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+
+iokit.IORegistryEntryCreateCFProperty.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32]
+iokit.IORegistryEntryCreateCFProperty.restype = ctypes.c_void_p
+
+iokit.IORegistryEntryGetPath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+iokit.IORegistryEntryGetPath.restype = ctypes.c_void_p
+
+iokit.IORegistryEntryGetName.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+iokit.IORegistryEntryGetName.restype = ctypes.c_void_p
+
+iokit.IOObjectGetClass.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+iokit.IOObjectGetClass.restype = ctypes.c_void_p
+
+iokit.IOObjectRelease.argtypes = [ctypes.c_void_p]
+
+
+cf.CFStringCreateWithCString.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int32]
+cf.CFStringCreateWithCString.restype = ctypes.c_void_p
+
+cf.CFStringGetCStringPtr.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
+cf.CFStringGetCStringPtr.restype = ctypes.c_char_p
+
+cf.CFNumberGetValue.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p]
+cf.CFNumberGetValue.restype = ctypes.c_void_p
+
+def get_string_property(device_t, property):
+    """ Search the given device for the specified string property
+
+    @param device_t Device to search
+    @param property String to search for.
+    @return Python string containing the value, or None if not found.
+    """
+    key = cf.CFStringCreateWithCString(
+        kCFAllocatorDefault,
+        property.encode("mac_roman"),
+        kCFStringEncodingMacRoman
+    )
+
+    CFContainer = iokit.IORegistryEntryCreateCFProperty(
+        device_t,
+        key,
+        kCFAllocatorDefault,
+        0
+    );
+
+    output = None
+
+    if CFContainer:
+        output = cf.CFStringGetCStringPtr(CFContainer, 0)
+
+    return output
+
+def get_int_property(device_t, property):
+    """ Search the given device for the specified string property
+
+    @param device_t Device to search
+    @param property String to search for.
+    @return Python string containing the value, or None if not found.
+    """
+    key = cf.CFStringCreateWithCString(
+        kCFAllocatorDefault,
+        property.encode("mac_roman"),
+        kCFStringEncodingMacRoman
+    )
+
+    CFContainer = iokit.IORegistryEntryCreateCFProperty(
+        device_t,
+        key,
+        kCFAllocatorDefault,
+        0
+    );
+
+    number = ctypes.c_uint16()
+
+    if CFContainer:
+        output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number))
+
+    return number.value
+
+def IORegistryEntryGetName(device):
+    pathname = ctypes.create_string_buffer(100) # TODO: Is this ok?
+    iokit.IOObjectGetClass(
+        device,
+        ctypes.byref(pathname)
+    )
+
+    return pathname.value
+
+def GetParentDeviceByType(device, parent_type):
+    """ Find the first parent of a device that implements the parent_type
+        @param IOService Service to inspect
+        @return Pointer to the parent type, or None if it was not found.
+    """
+    # First, try to walk up the IOService tree to find a parent of this device that is a IOUSBDevice.
+    while IORegistryEntryGetName(device) != parent_type:
+        parent = ctypes.c_void_p()
+        response = iokit.IORegistryEntryGetParentEntry(
+            device,
+            "IOService".encode("mac_roman"),
+            ctypes.byref(parent)
+        )
+
+        # If we weren't able to find a parent for the device, we're done.
+        if response != 0:
+            return None
+
+        device = parent
+
+    return device
+
+def GetIOServicesByType(service_type):
+    """
+    """
+    serial_port_iterator = ctypes.c_void_p()
+
+    response = iokit.IOServiceGetMatchingServices(
+        kIOMasterPortDefault,
+        iokit.IOServiceMatching(service_type),
+        ctypes.byref(serial_port_iterator)
+    )
+
+    services = []
+    while iokit.IOIteratorIsValid(serial_port_iterator):
+        service = iokit.IOIteratorNext(serial_port_iterator)
+        if not service:
+            break
+        services.append(service)
+
+    iokit.IOObjectRelease(serial_port_iterator)
+
+    return services
+
+def comports():
+    # Scan for all iokit serial ports
+    services = GetIOServicesByType('IOSerialBSDClient')
+
+    ports = []
+    for service in services:
+        info = []
+
+        # First, add the callout device file.
+        info.append(get_string_property(service, "IOCalloutDevice"))
+
+        # If the serial port is implemented by a
+        usb_device = GetParentDeviceByType(service, "IOUSBDevice")
+        if usb_device != None:
+            info.append(get_string_property(usb_device, "USB Product Name"))
+
+            info.append(
+                "USB VID:PID=%x:%x SNR=%s"%(
+                get_int_property(usb_device, "idVendor"),
+                get_int_property(usb_device, "idProduct"),
+                get_string_property(usb_device, "USB Serial Number"))
+            )
+        else:
+           info.append('n/a')
+           info.append('n/a')
+
+        ports.append(info)
+
+    return ports
+
+# test
+if __name__ == '__main__':
+    for port, desc, hwid in sorted(comports()):
+        print "%s: %s [%s]" % (port, desc, hwid)
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_posix.py b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_posix.py
new file mode 100755
index 0000000..09f115f
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_posix.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# portable serial port access with python
+
+# This is a module that gathers a list of serial ports on POSIXy systems.
+# For some specific implementations, see also list_ports_linux, list_ports_osx
+#
+# this is a wrapper module for different platform implementations of the
+# port enumeration feature
+#
+# (C) 2011-2013 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+"""\
+The ``comports`` function is expected to return an iterable that yields tuples
+of 3 strings: port name, human readable description and a hardware ID.
+
+As currently no method is known to get the second two strings easily, they are
+currently just identical to the port name.
+"""
+
+import glob
+import sys
+import os
+
+# try to detect the OS so that a device can be selected...
+plat = sys.platform.lower()
+
+if   plat[:5] == 'linux':    # Linux (confirmed)
+    from serial.tools.list_ports_linux import comports
+
+elif plat == 'cygwin':       # cygwin/win32
+    def comports():
+        devices = glob.glob('/dev/com*')
+        return [(d, d, d) for d in devices]
+
+elif plat[:7] == 'openbsd':    # OpenBSD
+    def comports():
+        devices = glob.glob('/dev/cua*')
+        return [(d, d, d) for d in devices]
+
+elif plat[:3] == 'bsd' or  \
+        plat[:7] == 'freebsd':
+
+    def comports():
+        devices = glob.glob('/dev/cuad*')
+        return [(d, d, d) for d in devices]
+
+elif plat[:6] == 'darwin':   # OS X (confirmed)
+    from serial.tools.list_ports_osx import comports
+
+elif plat[:6] == 'netbsd':   # NetBSD
+    def comports():
+        """scan for available ports. return a list of device names."""
+        devices = glob.glob('/dev/dty*')
+        return [(d, d, d) for d in devices]
+
+elif plat[:4] == 'irix':     # IRIX
+    def comports():
+        """scan for available ports. return a list of device names."""
+        devices = glob.glob('/dev/ttyf*')
+        return [(d, d, d) for d in devices]
+
+elif plat[:2] == 'hp':       # HP-UX (not tested)
+    def comports():
+        """scan for available ports. return a list of device names."""
+        devices = glob.glob('/dev/tty*p0')
+        return [(d, d, d) for d in devices]
+
+elif plat[:5] == 'sunos':    # Solaris/SunOS
+    def comports():
+        """scan for available ports. return a list of device names."""
+        devices = glob.glob('/dev/tty*c')
+        return [(d, d, d) for d in devices]
+
+elif plat[:3] == 'aix':      # AIX
+    def comports():
+        """scan for available ports. return a list of device names."""
+        devices = glob.glob('/dev/tty*')
+        return [(d, d, d) for d in devices]
+
+else:
+    # platform detection has failed...
+    sys.stderr.write("""\
+don't know how to enumerate ttys on this system.
+! I you know how the serial ports are named send this information to
+! the author of this module:
+
+sys.platform = %r
+os.name = %r
+pySerial version = %s
+
+also add the naming scheme of the serial ports and with a bit luck you can get
+this module running...
+""" % (sys.platform, os.name, serial.VERSION))
+    raise ImportError("Sorry: no implementation for your platform ('%s') available" % (os.name,))
+
+# test
+if __name__ == '__main__':
+    for port, desc, hwid in sorted(comports()):
+        print "%s: %s [%s]" % (port, desc, hwid)
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_windows.py b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_windows.py
new file mode 100644
index 0000000..ca597ca
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/list_ports_windows.py
@@ -0,0 +1,240 @@
+import ctypes
+import re
+
+def ValidHandle(value, func, arguments):
+    if value == 0:
+        raise ctypes.WinError()
+    return value
+
+import serial
+from serial.win32 import ULONG_PTR, is_64bit
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import BOOL
+from ctypes.wintypes import HWND
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import WORD
+from ctypes.wintypes import LONG
+from ctypes.wintypes import ULONG
+from ctypes.wintypes import LPCSTR
+from ctypes.wintypes import HKEY
+from ctypes.wintypes import BYTE
+
+NULL = 0
+HDEVINFO = ctypes.c_void_p
+PCTSTR = ctypes.c_char_p
+PTSTR = ctypes.c_void_p
+CHAR = ctypes.c_char
+LPDWORD = PDWORD = ctypes.POINTER(DWORD)
+#~ LPBYTE = PBYTE = ctypes.POINTER(BYTE)
+LPBYTE = PBYTE = ctypes.c_void_p        # XXX avoids error about types
+
+ACCESS_MASK = DWORD
+REGSAM = ACCESS_MASK
+
+
+def byte_buffer(length):
+    """Get a buffer for a string"""
+    return (BYTE*length)()
+
+def string(buffer):
+    s = []
+    for c in buffer:
+        if c == 0: break
+        s.append(chr(c & 0xff)) # "& 0xff": hack to convert signed to unsigned
+    return ''.join(s)
+
+
+class GUID(ctypes.Structure):
+    _fields_ = [
+        ('Data1', DWORD),
+        ('Data2', WORD),
+        ('Data3', WORD),
+        ('Data4', BYTE*8),
+    ]
+    def __str__(self):
+        return "{%08x-%04x-%04x-%s-%s}" % (
+            self.Data1,
+            self.Data2,
+            self.Data3,
+            ''.join(["%02x" % d for d in self.Data4[:2]]),
+            ''.join(["%02x" % d for d in self.Data4[2:]]),
+        )
+
+class SP_DEVINFO_DATA(ctypes.Structure):
+    _fields_ = [
+        ('cbSize', DWORD),
+        ('ClassGuid', GUID),
+        ('DevInst', DWORD),
+        ('Reserved', ULONG_PTR),
+    ]
+    def __str__(self):
+        return "ClassGuid:%s DevInst:%s" % (self.ClassGuid, self.DevInst)
+PSP_DEVINFO_DATA = ctypes.POINTER(SP_DEVINFO_DATA)
+
+PSP_DEVICE_INTERFACE_DETAIL_DATA = ctypes.c_void_p
+
+setupapi = ctypes.windll.LoadLibrary("setupapi")
+SetupDiDestroyDeviceInfoList = setupapi.SetupDiDestroyDeviceInfoList
+SetupDiDestroyDeviceInfoList.argtypes = [HDEVINFO]
+SetupDiDestroyDeviceInfoList.restype = BOOL
+
+SetupDiClassGuidsFromName = setupapi.SetupDiClassGuidsFromNameA
+SetupDiClassGuidsFromName.argtypes = [PCTSTR, ctypes.POINTER(GUID), DWORD, PDWORD]
+SetupDiClassGuidsFromName.restype = BOOL
+
+SetupDiEnumDeviceInfo = setupapi.SetupDiEnumDeviceInfo
+SetupDiEnumDeviceInfo.argtypes = [HDEVINFO, DWORD, PSP_DEVINFO_DATA]
+SetupDiEnumDeviceInfo.restype = BOOL
+
+SetupDiGetClassDevs = setupapi.SetupDiGetClassDevsA
+SetupDiGetClassDevs.argtypes = [ctypes.POINTER(GUID), PCTSTR, HWND, DWORD]
+SetupDiGetClassDevs.restype = HDEVINFO
+SetupDiGetClassDevs.errcheck = ValidHandle
+
+SetupDiGetDeviceRegistryProperty = setupapi.SetupDiGetDeviceRegistryPropertyA
+SetupDiGetDeviceRegistryProperty.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, PDWORD, PBYTE, DWORD, PDWORD]
+SetupDiGetDeviceRegistryProperty.restype = BOOL
+
+SetupDiGetDeviceInstanceId = setupapi.SetupDiGetDeviceInstanceIdA
+SetupDiGetDeviceInstanceId.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, PTSTR, DWORD, PDWORD]
+SetupDiGetDeviceInstanceId.restype = BOOL
+
+SetupDiOpenDevRegKey = setupapi.SetupDiOpenDevRegKey
+SetupDiOpenDevRegKey.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, DWORD, DWORD, REGSAM]
+SetupDiOpenDevRegKey.restype = HKEY
+
+advapi32 = ctypes.windll.LoadLibrary("Advapi32")
+RegCloseKey = advapi32.RegCloseKey
+RegCloseKey.argtypes = [HKEY]
+RegCloseKey.restype = LONG
+
+RegQueryValueEx = advapi32.RegQueryValueExA
+RegQueryValueEx.argtypes = [HKEY, LPCSTR, LPDWORD, LPDWORD, LPBYTE, LPDWORD]
+RegQueryValueEx.restype = LONG
+
+
+DIGCF_PRESENT = 2
+DIGCF_DEVICEINTERFACE = 16
+INVALID_HANDLE_VALUE = 0
+ERROR_INSUFFICIENT_BUFFER = 122
+SPDRP_HARDWAREID = 1
+SPDRP_FRIENDLYNAME = 12
+DICS_FLAG_GLOBAL = 1
+DIREG_DEV = 0x00000001
+KEY_READ = 0x20019
+
+# workaround for compatibility between Python 2.x and 3.x
+Ports = serial.to_bytes([80, 111, 114, 116, 115]) # "Ports"
+PortName = serial.to_bytes([80, 111, 114, 116, 78, 97, 109, 101]) # "PortName"
+
+def comports():
+    GUIDs = (GUID*8)() # so far only seen one used, so hope 8 are enough...
+    guids_size = DWORD()
+    if not SetupDiClassGuidsFromName(
+            Ports,
+            GUIDs,
+            ctypes.sizeof(GUIDs),
+            ctypes.byref(guids_size)):
+        raise ctypes.WinError()
+
+    # repeat for all possible GUIDs
+    for index in range(guids_size.value):
+        g_hdi = SetupDiGetClassDevs(
+                ctypes.byref(GUIDs[index]),
+                None,
+                NULL,
+                DIGCF_PRESENT) # was DIGCF_PRESENT|DIGCF_DEVICEINTERFACE which misses CDC ports
+
+        devinfo = SP_DEVINFO_DATA()
+        devinfo.cbSize = ctypes.sizeof(devinfo)
+        index = 0
+        while SetupDiEnumDeviceInfo(g_hdi, index, ctypes.byref(devinfo)):
+            index += 1
+
+            # get the real com port name
+            hkey = SetupDiOpenDevRegKey(
+                    g_hdi,
+                    ctypes.byref(devinfo),
+                    DICS_FLAG_GLOBAL,
+                    0,
+                    DIREG_DEV,  # DIREG_DRV for SW info
+                    KEY_READ)
+            port_name_buffer = byte_buffer(250)
+            port_name_length = ULONG(ctypes.sizeof(port_name_buffer))
+            RegQueryValueEx(
+                    hkey,
+                    PortName,
+                    None,
+                    None,
+                    ctypes.byref(port_name_buffer),
+                    ctypes.byref(port_name_length))
+            RegCloseKey(hkey)
+
+            # unfortunately does this method also include parallel ports.
+            # we could check for names starting with COM or just exclude LPT
+            # and hope that other "unknown" names are serial ports...
+            if string(port_name_buffer).startswith('LPT'):
+                continue
+
+            # hardware ID
+            szHardwareID = byte_buffer(250)
+            # try to get ID that includes serial number
+            if not SetupDiGetDeviceInstanceId(
+                    g_hdi,
+                    ctypes.byref(devinfo),
+                    ctypes.byref(szHardwareID),
+                    ctypes.sizeof(szHardwareID) - 1,
+                    None):
+                # fall back to more generic hardware ID if that would fail
+                if not SetupDiGetDeviceRegistryProperty(
+                        g_hdi,
+                        ctypes.byref(devinfo),
+                        SPDRP_HARDWAREID,
+                        None,
+                        ctypes.byref(szHardwareID),
+                        ctypes.sizeof(szHardwareID) - 1,
+                        None):
+                    # Ignore ERROR_INSUFFICIENT_BUFFER
+                    if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
+                        raise ctypes.WinError()
+            # stringify
+            szHardwareID_str = string(szHardwareID)
+
+            # in case of USB, make a more readable string, similar to that form
+            # that we also generate on other platforms
+            if szHardwareID_str.startswith('USB'):
+                m = re.search(r'VID_([0-9a-f]{4})&PID_([0-9a-f]{4})(\\(\w+))?', szHardwareID_str, re.I)
+                if m:
+                    if m.group(4):
+                        szHardwareID_str = 'USB VID:PID=%s:%s SNR=%s' % (m.group(1), m.group(2), m.group(4))
+                    else:
+                        szHardwareID_str = 'USB VID:PID=%s:%s' % (m.group(1), m.group(2))
+
+            # friendly name
+            szFriendlyName = byte_buffer(250)
+            if not SetupDiGetDeviceRegistryProperty(
+                    g_hdi,
+                    ctypes.byref(devinfo),
+                    SPDRP_FRIENDLYNAME,
+                    #~ SPDRP_DEVICEDESC,
+                    None,
+                    ctypes.byref(szFriendlyName),
+                    ctypes.sizeof(szFriendlyName) - 1,
+                    None):
+                # Ignore ERROR_INSUFFICIENT_BUFFER
+                #~ if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
+                    #~ raise IOError("failed to get details for %s (%s)" % (devinfo, szHardwareID.value))
+                # ignore errors and still include the port in the list, friendly name will be same as port name
+                yield string(port_name_buffer), 'n/a', szHardwareID_str
+            else:
+                yield string(port_name_buffer), string(szFriendlyName), szHardwareID_str
+
+        SetupDiDestroyDeviceInfoList(g_hdi)
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# test
+if __name__ == '__main__':
+    import serial
+
+    for port, desc, hwid in sorted(comports()):
+        print "%s: %s [%s]" % (port, desc, hwid)
diff --git a/catapult/telemetry/third_party/pyserial/serial/tools/miniterm.py b/catapult/telemetry/third_party/pyserial/serial/tools/miniterm.py
new file mode 100755
index 0000000..274c7fb
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/tools/miniterm.py
@@ -0,0 +1,694 @@
+#!/usr/bin/env python
+
+# Very simple serial terminal
+# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
+
+# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
+# done), received characters are displayed as is (or escaped trough pythons
+# repr, useful for debug purposes)
+
+
+import sys, os, serial, threading
+try:
+    from serial.tools.list_ports import comports
+except ImportError:
+    comports = None
+
+EXITCHARCTER = serial.to_bytes([0x1d])   # GS/CTRL+]
+MENUCHARACTER = serial.to_bytes([0x14])  # Menu: CTRL+T
+
+DEFAULT_PORT = None
+DEFAULT_BAUDRATE = 9600
+DEFAULT_RTS = None
+DEFAULT_DTR = None
+
+
+def key_description(character):
+    """generate a readable description for a key"""
+    ascii_code = ord(character)
+    if ascii_code < 32:
+        return 'Ctrl+%c' % (ord('@') + ascii_code)
+    else:
+        return repr(character)
+
+
+# help text, starts with blank line! it's a function so that the current values
+# for the shortcut keys is used and not the value at program start
+def get_help_text():
+    return """
+--- pySerial (%(version)s) - miniterm - help
+---
+--- %(exit)-8s Exit program
+--- %(menu)-8s Menu escape key, followed by:
+--- Menu keys:
+---    %(itself)-7s Send the menu character itself to remote
+---    %(exchar)-7s Send the exit character itself to remote
+---    %(info)-7s Show info
+---    %(upload)-7s Upload file (prompt will be shown)
+--- Toggles:
+---    %(rts)-7s RTS          %(echo)-7s local echo
+---    %(dtr)-7s DTR          %(break)-7s BREAK
+---    %(lfm)-7s line feed    %(repr)-7s Cycle repr mode
+---
+--- Port settings (%(menu)s followed by the following):
+---    p          change port
+---    7 8        set data bits
+---    n e o s m  change parity (None, Even, Odd, Space, Mark)
+---    1 2 3      set stop bits (1, 2, 1.5)
+---    b          change baud rate
+---    x X        disable/enable software flow control
+---    r R        disable/enable hardware flow control
+""" % {
+    'version': getattr(serial, 'VERSION', 'unknown version'),
+    'exit': key_description(EXITCHARCTER),
+    'menu': key_description(MENUCHARACTER),
+    'rts': key_description('\x12'),
+    'repr': key_description('\x01'),
+    'dtr': key_description('\x04'),
+    'lfm': key_description('\x0c'),
+    'break': key_description('\x02'),
+    'echo': key_description('\x05'),
+    'info': key_description('\x09'),
+    'upload': key_description('\x15'),
+    'itself': key_description(MENUCHARACTER),
+    'exchar': key_description(EXITCHARCTER),
+}
+
+if sys.version_info >= (3, 0):
+    def character(b):
+        return b.decode('latin1')
+else:
+    def character(b):
+        return b
+
+LF = serial.to_bytes([10])
+CR = serial.to_bytes([13])
+CRLF = serial.to_bytes([13, 10])
+
+X00 = serial.to_bytes([0])
+X0E = serial.to_bytes([0x0e])
+
+# first choose a platform dependant way to read single characters from the console
+global console
+
+if os.name == 'nt':
+    import msvcrt
+    class Console(object):
+        def __init__(self):
+            pass
+
+        def setup(self):
+            pass    # Do nothing for 'nt'
+
+        def cleanup(self):
+            pass    # Do nothing for 'nt'
+
+        def getkey(self):
+            while True:
+                z = msvcrt.getch()
+                if z == X00 or z == X0E:    # functions keys, ignore
+                    msvcrt.getch()
+                else:
+                    if z == CR:
+                        return LF
+                    return z
+
+    console = Console()
+
+elif os.name == 'posix':
+    import termios, sys, os
+    class Console(object):
+        def __init__(self):
+            self.fd = sys.stdin.fileno()
+            self.old = None
+
+        def setup(self):
+            self.old = termios.tcgetattr(self.fd)
+            new = termios.tcgetattr(self.fd)
+            new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
+            new[6][termios.VMIN] = 1
+            new[6][termios.VTIME] = 0
+            termios.tcsetattr(self.fd, termios.TCSANOW, new)
+
+        def getkey(self):
+            c = os.read(self.fd, 1)
+            return c
+
+        def cleanup(self):
+            if self.old is not None:
+                termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
+
+    console = Console()
+
+    def cleanup_console():
+        console.cleanup()
+
+    sys.exitfunc = cleanup_console      # terminal modes have to be restored on exit...
+
+else:
+    raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
+
+
+def dump_port_list():
+    if comports:
+        sys.stderr.write('\n--- Available ports:\n')
+        for port, desc, hwid in sorted(comports()):
+            #~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
+            sys.stderr.write('--- %-20s %s\n' % (port, desc))
+
+
+CONVERT_CRLF = 2
+CONVERT_CR   = 1
+CONVERT_LF   = 0
+NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
+LF_MODES = ('LF', 'CR', 'CR/LF')
+
+REPR_MODES = ('raw', 'some control', 'all control', 'hex')
+
+class Miniterm(object):
+    def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
+        try:
+            self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
+        except AttributeError:
+            # happens when the installed pyserial is older than 2.5. use the
+            # Serial class directly then.
+            self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
+        self.echo = echo
+        self.repr_mode = repr_mode
+        self.convert_outgoing = convert_outgoing
+        self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
+        self.dtr_state = True
+        self.rts_state = True
+        self.break_state = False
+
+    def _start_reader(self):
+        """Start reader thread"""
+        self._reader_alive = True
+        # start serial->console thread
+        self.receiver_thread = threading.Thread(target=self.reader)
+        self.receiver_thread.setDaemon(True)
+        self.receiver_thread.start()
+
+    def _stop_reader(self):
+        """Stop reader thread only, wait for clean exit of thread"""
+        self._reader_alive = False
+        self.receiver_thread.join()
+
+
+    def start(self):
+        self.alive = True
+        self._start_reader()
+        # enter console->serial loop
+        self.transmitter_thread = threading.Thread(target=self.writer)
+        self.transmitter_thread.setDaemon(True)
+        self.transmitter_thread.start()
+
+    def stop(self):
+        self.alive = False
+
+    def join(self, transmit_only=False):
+        self.transmitter_thread.join()
+        if not transmit_only:
+            self.receiver_thread.join()
+
+    def dump_port_settings(self):
+        sys.stderr.write("\n--- Settings: %s  %s,%s,%s,%s\n" % (
+                self.serial.portstr,
+                self.serial.baudrate,
+                self.serial.bytesize,
+                self.serial.parity,
+                self.serial.stopbits))
+        sys.stderr.write('--- RTS: %-8s  DTR: %-8s  BREAK: %-8s\n' % (
+                (self.rts_state and 'active' or 'inactive'),
+                (self.dtr_state and 'active' or 'inactive'),
+                (self.break_state and 'active' or 'inactive')))
+        try:
+            sys.stderr.write('--- CTS: %-8s  DSR: %-8s  RI: %-8s  CD: %-8s\n' % (
+                    (self.serial.getCTS() and 'active' or 'inactive'),
+                    (self.serial.getDSR() and 'active' or 'inactive'),
+                    (self.serial.getRI() and 'active' or 'inactive'),
+                    (self.serial.getCD() and 'active' or 'inactive')))
+        except serial.SerialException:
+            # on RFC 2217 ports it can happen to no modem state notification was
+            # yet received. ignore this error.
+            pass
+        sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
+        sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
+        sys.stderr.write('--- data escaping: %s  linefeed: %s\n' % (
+                REPR_MODES[self.repr_mode],
+                LF_MODES[self.convert_outgoing]))
+
+    def reader(self):
+        """loop and copy serial->console"""
+        try:
+            while self.alive and self._reader_alive:
+                data = character(self.serial.read(1))
+
+                if self.repr_mode == 0:
+                    # direct output, just have to care about newline setting
+                    if data == '\r' and self.convert_outgoing == CONVERT_CR:
+                        sys.stdout.write('\n')
+                    else:
+                        sys.stdout.write(data)
+                elif self.repr_mode == 1:
+                    # escape non-printable, let pass newlines
+                    if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
+                        if data == '\n':
+                            sys.stdout.write('\n')
+                        elif data == '\r':
+                            pass
+                    elif data == '\n' and self.convert_outgoing == CONVERT_LF:
+                        sys.stdout.write('\n')
+                    elif data == '\r' and self.convert_outgoing == CONVERT_CR:
+                        sys.stdout.write('\n')
+                    else:
+                        sys.stdout.write(repr(data)[1:-1])
+                elif self.repr_mode == 2:
+                    # escape all non-printable, including newline
+                    sys.stdout.write(repr(data)[1:-1])
+                elif self.repr_mode == 3:
+                    # escape everything (hexdump)
+                    for c in data:
+                        sys.stdout.write("%s " % c.encode('hex'))
+                sys.stdout.flush()
+        except serial.SerialException, e:
+            self.alive = False
+            # would be nice if the console reader could be interruptted at this
+            # point...
+            raise
+
+
+    def writer(self):
+        """\
+        Loop and copy console->serial until EXITCHARCTER character is
+        found. When MENUCHARACTER is found, interpret the next key
+        locally.
+        """
+        menu_active = False
+        try:
+            while self.alive:
+                try:
+                    b = console.getkey()
+                except KeyboardInterrupt:
+                    b = serial.to_bytes([3])
+                c = character(b)
+                if menu_active:
+                    if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
+                        self.serial.write(b)                    # send character
+                        if self.echo:
+                            sys.stdout.write(c)
+                    elif c == '\x15':                       # CTRL+U -> upload file
+                        sys.stderr.write('\n--- File to upload: ')
+                        sys.stderr.flush()
+                        console.cleanup()
+                        filename = sys.stdin.readline().rstrip('\r\n')
+                        if filename:
+                            try:
+                                file = open(filename, 'r')
+                                sys.stderr.write('--- Sending file %s ---\n' % filename)
+                                while True:
+                                    line = file.readline().rstrip('\r\n')
+                                    if not line:
+                                        break
+                                    self.serial.write(line)
+                                    self.serial.write('\r\n')
+                                    # Wait for output buffer to drain.
+                                    self.serial.flush()
+                                    sys.stderr.write('.')   # Progress indicator.
+                                sys.stderr.write('\n--- File %s sent ---\n' % filename)
+                            except IOError, e:
+                                sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
+                        console.setup()
+                    elif c in '\x08hH?':                    # CTRL+H, h, H, ? -> Show help
+                        sys.stderr.write(get_help_text())
+                    elif c == '\x12':                       # CTRL+R -> Toggle RTS
+                        self.rts_state = not self.rts_state
+                        self.serial.setRTS(self.rts_state)
+                        sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
+                    elif c == '\x04':                       # CTRL+D -> Toggle DTR
+                        self.dtr_state = not self.dtr_state
+                        self.serial.setDTR(self.dtr_state)
+                        sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
+                    elif c == '\x02':                       # CTRL+B -> toggle BREAK condition
+                        self.break_state = not self.break_state
+                        self.serial.setBreak(self.break_state)
+                        sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
+                    elif c == '\x05':                       # CTRL+E -> toggle local echo
+                        self.echo = not self.echo
+                        sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
+                    elif c == '\x09':                       # CTRL+I -> info
+                        self.dump_port_settings()
+                    elif c == '\x01':                       # CTRL+A -> cycle escape mode
+                        self.repr_mode += 1
+                        if self.repr_mode > 3:
+                            self.repr_mode = 0
+                        sys.stderr.write('--- escape data: %s ---\n' % (
+                            REPR_MODES[self.repr_mode],
+                        ))
+                    elif c == '\x0c':                       # CTRL+L -> cycle linefeed mode
+                        self.convert_outgoing += 1
+                        if self.convert_outgoing > 2:
+                            self.convert_outgoing = 0
+                        self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
+                        sys.stderr.write('--- line feed %s ---\n' % (
+                            LF_MODES[self.convert_outgoing],
+                        ))
+                    elif c in 'pP':                         # P -> change port
+                        dump_port_list()
+                        sys.stderr.write('--- Enter port name: ')
+                        sys.stderr.flush()
+                        console.cleanup()
+                        try:
+                            port = sys.stdin.readline().strip()
+                        except KeyboardInterrupt:
+                            port = None
+                        console.setup()
+                        if port and port != self.serial.port:
+                            # reader thread needs to be shut down
+                            self._stop_reader()
+                            # save settings
+                            settings = self.serial.getSettingsDict()
+                            try:
+                                try:
+                                    new_serial = serial.serial_for_url(port, do_not_open=True)
+                                except AttributeError:
+                                    # happens when the installed pyserial is older than 2.5. use the
+                                    # Serial class directly then.
+                                    new_serial = serial.Serial()
+                                    new_serial.port = port
+                                # restore settings and open
+                                new_serial.applySettingsDict(settings)
+                                new_serial.open()
+                                new_serial.setRTS(self.rts_state)
+                                new_serial.setDTR(self.dtr_state)
+                                new_serial.setBreak(self.break_state)
+                            except Exception, e:
+                                sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
+                                new_serial.close()
+                            else:
+                                self.serial.close()
+                                self.serial = new_serial
+                                sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
+                            # and restart the reader thread
+                            self._start_reader()
+                    elif c in 'bB':                         # B -> change baudrate
+                        sys.stderr.write('\n--- Baudrate: ')
+                        sys.stderr.flush()
+                        console.cleanup()
+                        backup = self.serial.baudrate
+                        try:
+                            self.serial.baudrate = int(sys.stdin.readline().strip())
+                        except ValueError, e:
+                            sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
+                            self.serial.baudrate = backup
+                        else:
+                            self.dump_port_settings()
+                        console.setup()
+                    elif c == '8':                          # 8 -> change to 8 bits
+                        self.serial.bytesize = serial.EIGHTBITS
+                        self.dump_port_settings()
+                    elif c == '7':                          # 7 -> change to 8 bits
+                        self.serial.bytesize = serial.SEVENBITS
+                        self.dump_port_settings()
+                    elif c in 'eE':                         # E -> change to even parity
+                        self.serial.parity = serial.PARITY_EVEN
+                        self.dump_port_settings()
+                    elif c in 'oO':                         # O -> change to odd parity
+                        self.serial.parity = serial.PARITY_ODD
+                        self.dump_port_settings()
+                    elif c in 'mM':                         # M -> change to mark parity
+                        self.serial.parity = serial.PARITY_MARK
+                        self.dump_port_settings()
+                    elif c in 'sS':                         # S -> change to space parity
+                        self.serial.parity = serial.PARITY_SPACE
+                        self.dump_port_settings()
+                    elif c in 'nN':                         # N -> change to no parity
+                        self.serial.parity = serial.PARITY_NONE
+                        self.dump_port_settings()
+                    elif c == '1':                          # 1 -> change to 1 stop bits
+                        self.serial.stopbits = serial.STOPBITS_ONE
+                        self.dump_port_settings()
+                    elif c == '2':                          # 2 -> change to 2 stop bits
+                        self.serial.stopbits = serial.STOPBITS_TWO
+                        self.dump_port_settings()
+                    elif c == '3':                          # 3 -> change to 1.5 stop bits
+                        self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
+                        self.dump_port_settings()
+                    elif c in 'xX':                         # X -> change software flow control
+                        self.serial.xonxoff = (c == 'X')
+                        self.dump_port_settings()
+                    elif c in 'rR':                         # R -> change hardware flow control
+                        self.serial.rtscts = (c == 'R')
+                        self.dump_port_settings()
+                    else:
+                        sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
+                    menu_active = False
+                elif c == MENUCHARACTER: # next char will be for menu
+                    menu_active = True
+                elif c == EXITCHARCTER: 
+                    self.stop()
+                    break                                   # exit app
+                elif c == '\n':
+                    self.serial.write(self.newline)         # send newline character(s)
+                    if self.echo:
+                        sys.stdout.write(c)                 # local echo is a real newline in any case
+                        sys.stdout.flush()
+                else:
+                    self.serial.write(b)                    # send byte
+                    if self.echo:
+                        sys.stdout.write(c)
+                        sys.stdout.flush()
+        except:
+            self.alive = False
+            raise
+
+def main():
+    import optparse
+
+    parser = optparse.OptionParser(
+        usage = "%prog [options] [port [baudrate]]",
+        description = "Miniterm - A simple terminal program for the serial port."
+    )
+
+    group = optparse.OptionGroup(parser, "Port settings")
+
+    group.add_option("-p", "--port",
+        dest = "port",
+        help = "port, a number or a device name. (deprecated option, use parameter instead)",
+        default = DEFAULT_PORT
+    )
+
+    group.add_option("-b", "--baud",
+        dest = "baudrate",
+        action = "store",
+        type = 'int',
+        help = "set baud rate, default %default",
+        default = DEFAULT_BAUDRATE
+    )
+
+    group.add_option("--parity",
+        dest = "parity",
+        action = "store",
+        help = "set parity, one of [N, E, O, S, M], default=N",
+        default = 'N'
+    )
+
+    group.add_option("--rtscts",
+        dest = "rtscts",
+        action = "store_true",
+        help = "enable RTS/CTS flow control (default off)",
+        default = False
+    )
+
+    group.add_option("--xonxoff",
+        dest = "xonxoff",
+        action = "store_true",
+        help = "enable software flow control (default off)",
+        default = False
+    )
+
+    group.add_option("--rts",
+        dest = "rts_state",
+        action = "store",
+        type = 'int',
+        help = "set initial RTS line state (possible values: 0, 1)",
+        default = DEFAULT_RTS
+    )
+
+    group.add_option("--dtr",
+        dest = "dtr_state",
+        action = "store",
+        type = 'int',
+        help = "set initial DTR line state (possible values: 0, 1)",
+        default = DEFAULT_DTR
+    )
+
+    parser.add_option_group(group)
+
+    group = optparse.OptionGroup(parser, "Data handling")
+
+    group.add_option("-e", "--echo",
+        dest = "echo",
+        action = "store_true",
+        help = "enable local echo (default off)",
+        default = False
+    )
+
+    group.add_option("--cr",
+        dest = "cr",
+        action = "store_true",
+        help = "do not send CR+LF, send CR only",
+        default = False
+    )
+
+    group.add_option("--lf",
+        dest = "lf",
+        action = "store_true",
+        help = "do not send CR+LF, send LF only",
+        default = False
+    )
+
+    group.add_option("-D", "--debug",
+        dest = "repr_mode",
+        action = "count",
+        help = """debug received data (escape non-printable chars)
+--debug can be given multiple times:
+0: just print what is received
+1: escape non-printable characters, do newlines as unusual
+2: escape non-printable characters, newlines too
+3: hex dump everything""",
+        default = 0
+    )
+
+    parser.add_option_group(group)
+
+
+    group = optparse.OptionGroup(parser, "Hotkeys")
+
+    group.add_option("--exit-char",
+        dest = "exit_char",
+        action = "store",
+        type = 'int',
+        help = "ASCII code of special character that is used to exit the application",
+        default = 0x1d
+    )
+
+    group.add_option("--menu-char",
+        dest = "menu_char",
+        action = "store",
+        type = 'int',
+        help = "ASCII code of special character that is used to control miniterm (menu)",
+        default = 0x14
+    )
+
+    parser.add_option_group(group)
+
+    group = optparse.OptionGroup(parser, "Diagnostics")
+
+    group.add_option("-q", "--quiet",
+        dest = "quiet",
+        action = "store_true",
+        help = "suppress non-error messages",
+        default = False
+    )
+
+    parser.add_option_group(group)
+
+
+    (options, args) = parser.parse_args()
+
+    options.parity = options.parity.upper()
+    if options.parity not in 'NEOSM':
+        parser.error("invalid parity")
+
+    if options.cr and options.lf:
+        parser.error("only one of --cr or --lf can be specified")
+
+    if options.menu_char == options.exit_char:
+        parser.error('--exit-char can not be the same as --menu-char')
+
+    global EXITCHARCTER, MENUCHARACTER
+    EXITCHARCTER = chr(options.exit_char)
+    MENUCHARACTER = chr(options.menu_char)
+
+    port = options.port
+    baudrate = options.baudrate
+    if args:
+        if options.port is not None:
+            parser.error("no arguments are allowed, options only when --port is given")
+        port = args.pop(0)
+        if args:
+            try:
+                baudrate = int(args[0])
+            except ValueError:
+                parser.error("baud rate must be a number, not %r" % args[0])
+            args.pop(0)
+        if args:
+            parser.error("too many arguments")
+    else:
+        # noport given on command line -> ask user now
+        if port is None:
+            dump_port_list()
+            port = raw_input('Enter port name:')
+
+    convert_outgoing = CONVERT_CRLF
+    if options.cr:
+        convert_outgoing = CONVERT_CR
+    elif options.lf:
+        convert_outgoing = CONVERT_LF
+
+    try:
+        miniterm = Miniterm(
+            port,
+            baudrate,
+            options.parity,
+            rtscts=options.rtscts,
+            xonxoff=options.xonxoff,
+            echo=options.echo,
+            convert_outgoing=convert_outgoing,
+            repr_mode=options.repr_mode,
+        )
+    except serial.SerialException, e:
+        sys.stderr.write("could not open port %r: %s\n" % (port, e))
+        sys.exit(1)
+
+    if not options.quiet:
+        sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
+            miniterm.serial.portstr,
+            miniterm.serial.baudrate,
+            miniterm.serial.bytesize,
+            miniterm.serial.parity,
+            miniterm.serial.stopbits,
+        ))
+        sys.stderr.write('--- Quit: %s  |  Menu: %s | Help: %s followed by %s ---\n' % (
+            key_description(EXITCHARCTER),
+            key_description(MENUCHARACTER),
+            key_description(MENUCHARACTER),
+            key_description('\x08'),
+        ))
+
+    if options.dtr_state is not None:
+        if not options.quiet:
+            sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
+        miniterm.serial.setDTR(options.dtr_state)
+        miniterm.dtr_state = options.dtr_state
+    if options.rts_state is not None:
+        if not options.quiet:
+            sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
+        miniterm.serial.setRTS(options.rts_state)
+        miniterm.rts_state = options.rts_state
+
+    console.setup()
+    miniterm.start()
+    try:
+        miniterm.join(True)
+    except KeyboardInterrupt:
+        pass
+    if not options.quiet:
+        sys.stderr.write("\n--- exit ---\n")
+    miniterm.join()
+    #~ console.cleanup()
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+if __name__ == '__main__':
+    main()
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/telemetry/third_party/pyserial/serial/urlhandler/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/telemetry/third_party/pyserial/serial/urlhandler/__init__.py
diff --git a/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_hwgrep.py b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_hwgrep.py
new file mode 100644
index 0000000..62cda43
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_hwgrep.py
@@ -0,0 +1,45 @@
+#! python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# This module implements a special URL handler that uses the port listing to
+# find ports by searching the string descriptions.
+#
+# (C) 2011 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+#
+# URL format:    hwgrep://regexp
+
+import serial
+import serial.tools.list_ports
+
+class Serial(serial.Serial):
+    """Just inherit the native Serial port implementation and patch the open function."""
+
+    def setPort(self, value):
+        """translate port name before storing it"""
+        if isinstance(value, basestring) and value.startswith('hwgrep://'):
+            serial.Serial.setPort(self, self.fromURL(value))
+        else:
+            serial.Serial.setPort(self, value)
+
+    def fromURL(self, url):
+        """extract host and port from an URL string"""
+        if url.lower().startswith("hwgrep://"): url = url[9:]
+        # use a for loop to get the 1st element from the generator
+        for port, desc, hwid in serial.tools.list_ports.grep(url):
+            return port
+        else:
+            raise serial.SerialException('no ports found matching regexp %r' % (url,))
+
+    # override property
+    port = property(serial.Serial.getPort, setPort, doc="Port setting")
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+if __name__ == '__main__':
+    #~ s = Serial('hwgrep://ttyS0')
+    s = Serial(None)
+    s.port = 'hwgrep://ttyS0'
+    print s
+
diff --git a/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py
new file mode 100644
index 0000000..7da94ad
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py
@@ -0,0 +1,265 @@
+#! python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# This module implements a loop back connection receiving itself what it sent.
+#
+# The purpose of this module is.. well... You can run the unit tests with it.
+# and it was so easy to implement ;-)
+#
+# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+#
+# URL format:    loop://[option[/option...]]
+# options:
+# - "debug" print diagnostic messages
+
+from serial.serialutil import *
+import threading
+import time
+import logging
+
+# map log level names to constants. used in fromURL()
+LOGGER_LEVELS = {
+    'debug': logging.DEBUG,
+    'info': logging.INFO,
+    'warning': logging.WARNING,
+    'error': logging.ERROR,
+    }
+
+
+class LoopbackSerial(SerialBase):
+    """Serial port implementation that simulates a loop back connection in plain software."""
+
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                 9600, 19200, 38400, 57600, 115200)
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        self.logger = None
+        self.buffer_lock = threading.Lock()
+        self.loop_buffer = bytearray()
+        self.cts = False
+        self.dsr = False
+
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        # not that there is anything to open, but the function applies the
+        # options found in the URL
+        self.fromURL(self.port)
+
+        # not that there anything to configure...
+        self._reconfigurePort()
+        # all things set up get, now a clean start
+        self._isOpen = True
+        if not self._rtscts:
+            self.setRTS(True)
+            self.setDTR(True)
+        self.flushInput()
+        self.flushOutput()
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port. for the loop://
+        protocol all settings are ignored!"""
+        # not that's it of any real use, but it helps in the unit tests
+        if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
+            raise ValueError("invalid baudrate: %r" % (self._baudrate))
+        if self.logger:
+            self.logger.info('_reconfigurePort()')
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            self._isOpen = False
+            # in case of quick reconnects, give the server some time
+            time.sleep(0.3)
+
+    def makeDeviceName(self, port):
+        raise SerialException("there is no sensible way to turn numbers into URLs")
+
+    def fromURL(self, url):
+        """extract host and port from an URL string"""
+        if url.lower().startswith("loop://"): url = url[7:]
+        try:
+            # process options now, directly altering self
+            for option in url.split('/'):
+                if '=' in option:
+                    option, value = option.split('=', 1)
+                else:
+                    value = None
+                if not option:
+                    pass
+                elif option == 'logging':
+                    logging.basicConfig()   # XXX is that good to call it here?
+                    self.logger = logging.getLogger('pySerial.loop')
+                    self.logger.setLevel(LOGGER_LEVELS[value])
+                    self.logger.debug('enabled logging')
+                else:
+                    raise ValueError('unknown option: %r' % (option,))
+        except ValueError, e:
+            raise SerialException('expected a string in the form "[loop://][option[/option...]]": %s' % e)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            # attention the logged value can differ from return value in
+            # threaded environments...
+            self.logger.debug('inWaiting() -> %d' % (len(self.loop_buffer),))
+        return len(self.loop_buffer)
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+        return less characters as requested. With no timeout it will block
+        until the requested number of bytes is read."""
+        if not self._isOpen: raise portNotOpenError
+        if self._timeout is not None:
+            timeout = time.time() + self._timeout
+        else:
+            timeout = None
+        data = bytearray()
+        while size > 0:
+            self.buffer_lock.acquire()
+            try:
+                block = to_bytes(self.loop_buffer[:size])
+                del self.loop_buffer[:size]
+            finally:
+                self.buffer_lock.release()
+            data += block
+            size -= len(block)
+            # check for timeout now, after data has been read.
+            # useful for timeout = 0 (non blocking) read
+            if timeout and time.time() > timeout:
+                break
+        return bytes(data)
+
+    def write(self, data):
+        """Output the given string over the serial port. Can block if the
+        connection is blocked. May raise SerialException if the connection is
+        closed."""
+        if not self._isOpen: raise portNotOpenError
+        # ensure we're working with bytes
+        data = to_bytes(data)
+        # calculate aprox time that would be used to send the data
+        time_used_to_send = 10.0*len(data) / self._baudrate
+        # when a write timeout is configured check if we would be successful
+        # (not sending anything, not even the part that would have time)
+        if self._writeTimeout is not None and time_used_to_send > self._writeTimeout:
+            time.sleep(self._writeTimeout) # must wait so that unit test succeeds
+            raise writeTimeoutError
+        self.buffer_lock.acquire()
+        try:
+            self.loop_buffer += data
+        finally:
+            self.buffer_lock.release()
+        return len(data)
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('flushInput()')
+        self.buffer_lock.acquire()
+        try:
+            del self.loop_buffer[:]
+        finally:
+            self.buffer_lock.release()
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('flushOutput()')
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given
+        duration."""
+        if not self._isOpen: raise portNotOpenError
+
+    def setBreak(self, level=True):
+        """Set break: Controls TXD. When active, to transmitting is
+        possible."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('setBreak(%r)' % (level,))
+
+    def setRTS(self, level=True):
+        """Set terminal status line: Request To Send"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('setRTS(%r) -> state of CTS' % (level,))
+        self.cts = level
+
+    def setDTR(self, level=True):
+        """Set terminal status line: Data Terminal Ready"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('setDTR(%r) -> state of DSR' % (level,))
+        self.dsr = level
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('getCTS() -> state of RTS (%r)' % (self.cts,))
+        return self.cts
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('getDSR() -> state of DTR (%r)' % (self.dsr,))
+        return self.dsr
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getRI()')
+        return False
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getCD()')
+        return True
+
+    # - - - platform specific - - -
+    # None so far
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(LoopbackSerial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(LoopbackSerial, io.RawIOBase):
+        pass
+
+
+# simple client test
+if __name__ == '__main__':
+    import sys
+    s = Serial('loop://')
+    sys.stdout.write('%s\n' % s)
+
+    sys.stdout.write("write...\n")
+    s.write("hello\n")
+    s.flush()
+    sys.stdout.write("read: %s\n" % s.read(5))
+
+    s.close()
diff --git a/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_rfc2217.py b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_rfc2217.py
new file mode 100644
index 0000000..981ba45
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_rfc2217.py
@@ -0,0 +1,11 @@
+#! python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see ../__init__.py
+#
+# This is a thin wrapper to load the rfc2271 implementation.
+#
+# (C) 2011 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+
+from serial.rfc2217 import Serial
diff --git a/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py
new file mode 100644
index 0000000..c90a8e4
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py
@@ -0,0 +1,274 @@
+#! python
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# This module implements a simple socket based client.
+# It does not support changing any port parameters and will silently ignore any
+# requests to do so.
+#
+# The purpose of this module is that applications using pySerial can connect to
+# TCP/IP to serial port converters that do not support RFC 2217.
+#
+# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
+# this is distributed under a free software license, see license.txt
+#
+# URL format:    socket://<host>:<port>[/option[/option...]]
+# options:
+# - "debug" print diagnostic messages
+
+from serial.serialutil import *
+import time
+import socket
+import logging
+
+# map log level names to constants. used in fromURL()
+LOGGER_LEVELS = {
+    'debug': logging.DEBUG,
+    'info': logging.INFO,
+    'warning': logging.WARNING,
+    'error': logging.ERROR,
+    }
+
+POLL_TIMEOUT = 2
+
+class SocketSerial(SerialBase):
+    """Serial port implementation for plain sockets."""
+
+    BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+                 9600, 19200, 38400, 57600, 115200)
+
+    def open(self):
+        """Open port with current settings. This may throw a SerialException
+           if the port cannot be opened."""
+        self.logger = None
+        if self._port is None:
+            raise SerialException("Port must be configured before it can be used.")
+        if self._isOpen:
+            raise SerialException("Port is already open.")
+        try:
+            # XXX in future replace with create_connection (py >=2.6)
+            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._socket.connect(self.fromURL(self.portstr))
+        except Exception, msg:
+            self._socket = None
+            raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
+
+        self._socket.settimeout(POLL_TIMEOUT) # used for write timeout support :/
+
+        # not that there anything to configure...
+        self._reconfigurePort()
+        # all things set up get, now a clean start
+        self._isOpen = True
+        if not self._rtscts:
+            self.setRTS(True)
+            self.setDTR(True)
+        self.flushInput()
+        self.flushOutput()
+
+    def _reconfigurePort(self):
+        """Set communication parameters on opened port. for the socket://
+        protocol all settings are ignored!"""
+        if self._socket is None:
+            raise SerialException("Can only operate on open ports")
+        if self.logger:
+            self.logger.info('ignored port configuration change')
+
+    def close(self):
+        """Close port"""
+        if self._isOpen:
+            if self._socket:
+                try:
+                    self._socket.shutdown(socket.SHUT_RDWR)
+                    self._socket.close()
+                except:
+                    # ignore errors.
+                    pass
+                self._socket = None
+            self._isOpen = False
+            # in case of quick reconnects, give the server some time
+            time.sleep(0.3)
+
+    def makeDeviceName(self, port):
+        raise SerialException("there is no sensible way to turn numbers into URLs")
+
+    def fromURL(self, url):
+        """extract host and port from an URL string"""
+        if url.lower().startswith("socket://"): url = url[9:]
+        try:
+            # is there a "path" (our options)?
+            if '/' in url:
+                # cut away options
+                url, options = url.split('/', 1)
+                # process options now, directly altering self
+                for option in options.split('/'):
+                    if '=' in option:
+                        option, value = option.split('=', 1)
+                    else:
+                        value = None
+                    if option == 'logging':
+                        logging.basicConfig()   # XXX is that good to call it here?
+                        self.logger = logging.getLogger('pySerial.socket')
+                        self.logger.setLevel(LOGGER_LEVELS[value])
+                        self.logger.debug('enabled logging')
+                    else:
+                        raise ValueError('unknown option: %r' % (option,))
+            # get host and port
+            host, port = url.split(':', 1) # may raise ValueError because of unpacking
+            port = int(port)               # and this if it's not a number
+            if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
+        except ValueError, e:
+            raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
+        return (host, port)
+
+    #  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -  -
+
+    def inWaiting(self):
+        """Return the number of characters currently in the input buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            # set this one to debug as the function could be called often...
+            self.logger.debug('WARNING: inWaiting returns dummy value')
+        return 0 # hmmm, see comment in read()
+
+    def read(self, size=1):
+        """Read size bytes from the serial port. If a timeout is set it may
+        return less characters as requested. With no timeout it will block
+        until the requested number of bytes is read."""
+        if not self._isOpen: raise portNotOpenError
+        data = bytearray()
+        if self._timeout is not None:
+            timeout = time.time() + self._timeout
+        else:
+            timeout = None
+        while len(data) < size and (timeout is None or time.time() < timeout):
+            try:
+                # an implementation with internal buffer would be better
+                # performing...
+                t = time.time()
+                block = self._socket.recv(size - len(data))
+                duration = time.time() - t
+                if block:
+                    data.extend(block)
+                else:
+                    # no data -> EOF (connection probably closed)
+                    break
+            except socket.timeout:
+                # just need to get out of recv from time to time to check if
+                # still alive
+                continue
+            except socket.error, e:
+                # connection fails -> terminate loop
+                raise SerialException('connection failed (%s)' % e)
+        return bytes(data)
+
+    def write(self, data):
+        """Output the given string over the serial port. Can block if the
+        connection is blocked. May raise SerialException if the connection is
+        closed."""
+        if not self._isOpen: raise portNotOpenError
+        try:
+            self._socket.sendall(to_bytes(data))
+        except socket.error, e:
+            # XXX what exception if socket connection fails
+            raise SerialException("socket connection failed: %s" % e)
+        return len(data)
+
+    def flushInput(self):
+        """Clear input buffer, discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored flushInput')
+
+    def flushOutput(self):
+        """Clear output buffer, aborting the current output and
+        discarding all that is in the buffer."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored flushOutput')
+
+    def sendBreak(self, duration=0.25):
+        """Send break condition. Timed, returns to idle state after given
+        duration."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored sendBreak(%r)' % (duration,))
+
+    def setBreak(self, level=True):
+        """Set break: Controls TXD. When active, to transmitting is
+        possible."""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored setBreak(%r)' % (level,))
+
+    def setRTS(self, level=True):
+        """Set terminal status line: Request To Send"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored setRTS(%r)' % (level,))
+
+    def setDTR(self, level=True):
+        """Set terminal status line: Data Terminal Ready"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('ignored setDTR(%r)' % (level,))
+
+    def getCTS(self):
+        """Read terminal status line: Clear To Send"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getCTS()')
+        return True
+
+    def getDSR(self):
+        """Read terminal status line: Data Set Ready"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getDSR()')
+        return True
+
+    def getRI(self):
+        """Read terminal status line: Ring Indicator"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getRI()')
+        return False
+
+    def getCD(self):
+        """Read terminal status line: Carrier Detect"""
+        if not self._isOpen: raise portNotOpenError
+        if self.logger:
+            self.logger.info('returning dummy for getCD()')
+        return True
+
+    # - - - platform specific - - -
+    # None so far
+
+
+# assemble Serial class with the platform specific implementation and the base
+# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
+# library, derive from io.RawIOBase
+try:
+    import io
+except ImportError:
+    # classic version with our own file-like emulation
+    class Serial(SocketSerial, FileLike):
+        pass
+else:
+    # io library present
+    class Serial(SocketSerial, io.RawIOBase):
+        pass
+
+
+# simple client test
+if __name__ == '__main__':
+    import sys
+    s = Serial('socket://localhost:7000')
+    sys.stdout.write('%s\n' % s)
+
+    sys.stdout.write("write...\n")
+    s.write("hello\n")
+    s.flush()
+    sys.stdout.write("read: %s\n" % s.read(5))
+
+    s.close()
diff --git a/catapult/telemetry/third_party/pyserial/serial/win32.py b/catapult/telemetry/third_party/pyserial/serial/win32.py
new file mode 100644
index 0000000..61b3d7a
--- /dev/null
+++ b/catapult/telemetry/third_party/pyserial/serial/win32.py
@@ -0,0 +1,320 @@
+from ctypes import *
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import BOOL
+from ctypes.wintypes import LPCWSTR
+_stdcall_libraries = {}
+_stdcall_libraries['kernel32'] = WinDLL('kernel32')
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import WORD
+from ctypes.wintypes import BYTE
+
+INVALID_HANDLE_VALUE = HANDLE(-1).value
+
+# some details of the windows API differ between 32 and 64 bit systems..
+def is_64bit():
+    """Returns true when running on a 64 bit system"""
+    return sizeof(c_ulong) != sizeof(c_void_p)
+
+# ULONG_PTR is a an ordinary number, not a pointer and contrary to the name it
+# is either 32 or 64 bits, depending on the type of windows...
+# so test if this a 32 bit windows...
+if is_64bit():
+    # assume 64 bits
+    ULONG_PTR = c_int64
+else:
+    # 32 bits
+    ULONG_PTR = c_ulong
+
+
+class _SECURITY_ATTRIBUTES(Structure):
+    pass
+LPSECURITY_ATTRIBUTES = POINTER(_SECURITY_ATTRIBUTES)
+
+
+try:
+    CreateEventW = _stdcall_libraries['kernel32'].CreateEventW
+except AttributeError:
+    # Fallback to non wide char version for old OS...
+    from ctypes.wintypes import LPCSTR
+    CreateEventA = _stdcall_libraries['kernel32'].CreateEventA
+    CreateEventA.restype = HANDLE
+    CreateEventA.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCSTR]
+    CreateEvent=CreateEventA
+
+    CreateFileA = _stdcall_libraries['kernel32'].CreateFileA
+    CreateFileA.restype = HANDLE
+    CreateFileA.argtypes = [LPCSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
+    CreateFile = CreateFileA
+else:
+    CreateEventW.restype = HANDLE
+    CreateEventW.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCWSTR]
+    CreateEvent = CreateEventW # alias
+
+    CreateFileW = _stdcall_libraries['kernel32'].CreateFileW
+    CreateFileW.restype = HANDLE
+    CreateFileW.argtypes = [LPCWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
+    CreateFile = CreateFileW # alias
+
+class _OVERLAPPED(Structure):
+    pass
+OVERLAPPED = _OVERLAPPED
+
+class _COMSTAT(Structure):
+    pass
+COMSTAT = _COMSTAT
+
+class _DCB(Structure):
+    pass
+DCB = _DCB
+
+class _COMMTIMEOUTS(Structure):
+    pass
+COMMTIMEOUTS = _COMMTIMEOUTS
+
+GetLastError = _stdcall_libraries['kernel32'].GetLastError
+GetLastError.restype = DWORD
+GetLastError.argtypes = []
+
+LPOVERLAPPED = POINTER(_OVERLAPPED)
+LPDWORD = POINTER(DWORD)
+
+GetOverlappedResult = _stdcall_libraries['kernel32'].GetOverlappedResult
+GetOverlappedResult.restype = BOOL
+GetOverlappedResult.argtypes = [HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
+
+ResetEvent = _stdcall_libraries['kernel32'].ResetEvent
+ResetEvent.restype = BOOL
+ResetEvent.argtypes = [HANDLE]
+
+LPCVOID = c_void_p
+
+WriteFile = _stdcall_libraries['kernel32'].WriteFile
+WriteFile.restype = BOOL
+WriteFile.argtypes = [HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED]
+
+LPVOID = c_void_p
+
+ReadFile = _stdcall_libraries['kernel32'].ReadFile
+ReadFile.restype = BOOL
+ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
+
+CloseHandle = _stdcall_libraries['kernel32'].CloseHandle
+CloseHandle.restype = BOOL
+CloseHandle.argtypes = [HANDLE]
+
+ClearCommBreak = _stdcall_libraries['kernel32'].ClearCommBreak
+ClearCommBreak.restype = BOOL
+ClearCommBreak.argtypes = [HANDLE]
+
+LPCOMSTAT = POINTER(_COMSTAT)
+
+ClearCommError = _stdcall_libraries['kernel32'].ClearCommError
+ClearCommError.restype = BOOL
+ClearCommError.argtypes = [HANDLE, LPDWORD, LPCOMSTAT]
+
+SetupComm = _stdcall_libraries['kernel32'].SetupComm
+SetupComm.restype = BOOL
+SetupComm.argtypes = [HANDLE, DWORD, DWORD]
+
+EscapeCommFunction = _stdcall_libraries['kernel32'].EscapeCommFunction
+EscapeCommFunction.restype = BOOL
+EscapeCommFunction.argtypes = [HANDLE, DWORD]
+
+GetCommModemStatus = _stdcall_libraries['kernel32'].GetCommModemStatus
+GetCommModemStatus.restype = BOOL
+GetCommModemStatus.argtypes = [HANDLE, LPDWORD]
+
+LPDCB = POINTER(_DCB)
+
+GetCommState = _stdcall_libraries['kernel32'].GetCommState
+GetCommState.restype = BOOL
+GetCommState.argtypes = [HANDLE, LPDCB]
+
+LPCOMMTIMEOUTS = POINTER(_COMMTIMEOUTS)
+
+GetCommTimeouts = _stdcall_libraries['kernel32'].GetCommTimeouts
+GetCommTimeouts.restype = BOOL
+GetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
+
+PurgeComm = _stdcall_libraries['kernel32'].PurgeComm
+PurgeComm.restype = BOOL
+PurgeComm.argtypes = [HANDLE, DWORD]
+
+SetCommBreak = _stdcall_libraries['kernel32'].SetCommBreak
+SetCommBreak.restype = BOOL
+SetCommBreak.argtypes = [HANDLE]
+
+SetCommMask = _stdcall_libraries['kernel32'].SetCommMask
+SetCommMask.restype = BOOL
+SetCommMask.argtypes = [HANDLE, DWORD]
+
+SetCommState = _stdcall_libraries['kernel32'].SetCommState
+SetCommState.restype = BOOL
+SetCommState.argtypes = [HANDLE, LPDCB]
+
+SetCommTimeouts = _stdcall_libraries['kernel32'].SetCommTimeouts
+SetCommTimeouts.restype = BOOL
+SetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
+
+WaitForSingleObject = _stdcall_libraries['kernel32'].WaitForSingleObject
+WaitForSingleObject.restype = DWORD
+WaitForSingleObject.argtypes = [HANDLE, DWORD]
+
+ONESTOPBIT = 0 # Variable c_int
+TWOSTOPBITS = 2 # Variable c_int
+ONE5STOPBITS = 1
+
+NOPARITY = 0 # Variable c_int
+ODDPARITY = 1 # Variable c_int
+EVENPARITY = 2 # Variable c_int
+MARKPARITY = 3
+SPACEPARITY = 4
+
+RTS_CONTROL_HANDSHAKE = 2 # Variable c_int
+RTS_CONTROL_DISABLE = 0 # Variable c_int
+RTS_CONTROL_ENABLE = 1 # Variable c_int
+RTS_CONTROL_TOGGLE = 3 # Variable c_int
+SETRTS = 3
+CLRRTS = 4
+
+DTR_CONTROL_HANDSHAKE = 2 # Variable c_int
+DTR_CONTROL_DISABLE = 0 # Variable c_int
+DTR_CONTROL_ENABLE = 1 # Variable c_int
+SETDTR = 5
+CLRDTR = 6
+
+MS_DSR_ON = 32 # Variable c_ulong
+EV_RING = 256 # Variable c_int
+EV_PERR = 512 # Variable c_int
+EV_ERR = 128 # Variable c_int
+SETXOFF = 1 # Variable c_int
+EV_RXCHAR = 1 # Variable c_int
+GENERIC_WRITE = 1073741824 # Variable c_long
+PURGE_TXCLEAR = 4 # Variable c_int
+FILE_FLAG_OVERLAPPED = 1073741824 # Variable c_int
+EV_DSR = 16 # Variable c_int
+MAXDWORD = 4294967295L # Variable c_uint
+EV_RLSD = 32 # Variable c_int
+ERROR_IO_PENDING = 997 # Variable c_long
+MS_CTS_ON = 16 # Variable c_ulong
+EV_EVENT1 = 2048 # Variable c_int
+EV_RX80FULL = 1024 # Variable c_int
+PURGE_RXABORT = 2 # Variable c_int
+FILE_ATTRIBUTE_NORMAL = 128 # Variable c_int
+PURGE_TXABORT = 1 # Variable c_int
+SETXON = 2 # Variable c_int
+OPEN_EXISTING = 3 # Variable c_int
+MS_RING_ON = 64 # Variable c_ulong
+EV_TXEMPTY = 4 # Variable c_int
+EV_RXFLAG = 2 # Variable c_int
+MS_RLSD_ON = 128 # Variable c_ulong
+GENERIC_READ = 2147483648L # Variable c_ulong
+EV_EVENT2 = 4096 # Variable c_int
+EV_CTS = 8 # Variable c_int
+EV_BREAK = 64 # Variable c_int
+PURGE_RXCLEAR = 8 # Variable c_int
+INFINITE = 0xFFFFFFFFL
+
+
+class N11_OVERLAPPED4DOLLAR_48E(Union):
+    pass
+class N11_OVERLAPPED4DOLLAR_484DOLLAR_49E(Structure):
+    pass
+N11_OVERLAPPED4DOLLAR_484DOLLAR_49E._fields_ = [
+    ('Offset', DWORD),
+    ('OffsetHigh', DWORD),
+]
+
+PVOID = c_void_p
+
+N11_OVERLAPPED4DOLLAR_48E._anonymous_ = ['_0']
+N11_OVERLAPPED4DOLLAR_48E._fields_ = [
+    ('_0', N11_OVERLAPPED4DOLLAR_484DOLLAR_49E),
+    ('Pointer', PVOID),
+]
+_OVERLAPPED._anonymous_ = ['_0']
+_OVERLAPPED._fields_ = [
+    ('Internal', ULONG_PTR),
+    ('InternalHigh', ULONG_PTR),
+    ('_0', N11_OVERLAPPED4DOLLAR_48E),
+    ('hEvent', HANDLE),
+]
+_SECURITY_ATTRIBUTES._fields_ = [
+    ('nLength', DWORD),
+    ('lpSecurityDescriptor', LPVOID),
+    ('bInheritHandle', BOOL),
+]
+_COMSTAT._fields_ = [
+    ('fCtsHold', DWORD, 1),
+    ('fDsrHold', DWORD, 1),
+    ('fRlsdHold', DWORD, 1),
+    ('fXoffHold', DWORD, 1),
+    ('fXoffSent', DWORD, 1),
+    ('fEof', DWORD, 1),
+    ('fTxim', DWORD, 1),
+    ('fReserved', DWORD, 25),
+    ('cbInQue', DWORD),
+    ('cbOutQue', DWORD),
+]
+_DCB._fields_ = [
+    ('DCBlength', DWORD),
+    ('BaudRate', DWORD),
+    ('fBinary', DWORD, 1),
+    ('fParity', DWORD, 1),
+    ('fOutxCtsFlow', DWORD, 1),
+    ('fOutxDsrFlow', DWORD, 1),
+    ('fDtrControl', DWORD, 2),
+    ('fDsrSensitivity', DWORD, 1),
+    ('fTXContinueOnXoff', DWORD, 1),
+    ('fOutX', DWORD, 1),
+    ('fInX', DWORD, 1),
+    ('fErrorChar', DWORD, 1),
+    ('fNull', DWORD, 1),
+    ('fRtsControl', DWORD, 2),
+    ('fAbortOnError', DWORD, 1),
+    ('fDummy2', DWORD, 17),
+    ('wReserved', WORD),
+    ('XonLim', WORD),
+    ('XoffLim', WORD),
+    ('ByteSize', BYTE),
+    ('Parity', BYTE),
+    ('StopBits', BYTE),
+    ('XonChar', c_char),
+    ('XoffChar', c_char),
+    ('ErrorChar', c_char),
+    ('EofChar', c_char),
+    ('EvtChar', c_char),
+    ('wReserved1', WORD),
+]
+_COMMTIMEOUTS._fields_ = [
+    ('ReadIntervalTimeout', DWORD),
+    ('ReadTotalTimeoutMultiplier', DWORD),
+    ('ReadTotalTimeoutConstant', DWORD),
+    ('WriteTotalTimeoutMultiplier', DWORD),
+    ('WriteTotalTimeoutConstant', DWORD),
+]
+__all__ = ['GetLastError', 'MS_CTS_ON', 'FILE_ATTRIBUTE_NORMAL',
+           'DTR_CONTROL_ENABLE', '_COMSTAT', 'MS_RLSD_ON',
+           'GetOverlappedResult', 'SETXON', 'PURGE_TXABORT',
+           'PurgeComm', 'N11_OVERLAPPED4DOLLAR_48E', 'EV_RING',
+           'ONESTOPBIT', 'SETXOFF', 'PURGE_RXABORT', 'GetCommState',
+           'RTS_CONTROL_ENABLE', '_DCB', 'CreateEvent',
+           '_COMMTIMEOUTS', '_SECURITY_ATTRIBUTES', 'EV_DSR',
+           'EV_PERR', 'EV_RXFLAG', 'OPEN_EXISTING', 'DCB',
+           'FILE_FLAG_OVERLAPPED', 'EV_CTS', 'SetupComm',
+           'LPOVERLAPPED', 'EV_TXEMPTY', 'ClearCommBreak',
+           'LPSECURITY_ATTRIBUTES', 'SetCommBreak', 'SetCommTimeouts',
+           'COMMTIMEOUTS', 'ODDPARITY', 'EV_RLSD',
+           'GetCommModemStatus', 'EV_EVENT2', 'PURGE_TXCLEAR',
+           'EV_BREAK', 'EVENPARITY', 'LPCVOID', 'COMSTAT', 'ReadFile',
+           'PVOID', '_OVERLAPPED', 'WriteFile', 'GetCommTimeouts',
+           'ResetEvent', 'EV_RXCHAR', 'LPCOMSTAT', 'ClearCommError',
+           'ERROR_IO_PENDING', 'EscapeCommFunction', 'GENERIC_READ',
+           'RTS_CONTROL_HANDSHAKE', 'OVERLAPPED',
+           'DTR_CONTROL_HANDSHAKE', 'PURGE_RXCLEAR', 'GENERIC_WRITE',
+           'LPDCB', 'CreateEventW', 'SetCommMask', 'EV_EVENT1',
+           'SetCommState', 'LPVOID', 'CreateFileW', 'LPDWORD',
+           'EV_RX80FULL', 'TWOSTOPBITS', 'LPCOMMTIMEOUTS', 'MAXDWORD',
+           'MS_DSR_ON', 'MS_RING_ON',
+           'N11_OVERLAPPED4DOLLAR_484DOLLAR_49E', 'EV_ERR',
+           'ULONG_PTR', 'CreateFile', 'NOPARITY', 'CloseHandle']
diff --git a/catapult/telemetry/third_party/webpagereplay/.coveragerc b/catapult/telemetry/third_party/webpagereplay/.coveragerc
new file mode 100644
index 0000000..9e3b31a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/.coveragerc
@@ -0,0 +1,4 @@
+[report]
+omit =
+  */python?.?/*
+  */third_party/*
diff --git a/catapult/telemetry/third_party/webpagereplay/.gitignore b/catapult/telemetry/third_party/webpagereplay/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/catapult/telemetry/third_party/webpagereplay/.travis.yml b/catapult/telemetry/third_party/webpagereplay/.travis.yml
new file mode 100644
index 0000000..23e151b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/.travis.yml
@@ -0,0 +1,34 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+language: python
+
+python:
+ - "2.7"
+
+install:
+ - pip install -r requirements.txt
+ - pip install coveralls
+
+# traffic_shaper test requires sudo.
+sudo: required
+
+script:
+ - sudo $(which coverage) run run_tests
+
+after_script:
+ - coveralls
+
+notifications:
+ email: false
diff --git a/catapult/telemetry/third_party/webpagereplay/COPYING b/catapult/telemetry/third_party/webpagereplay/COPYING
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/COPYING
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/catapult/telemetry/third_party/webpagereplay/PRESUBMIT.py b/catapult/telemetry/third_party/webpagereplay/PRESUBMIT.py
new file mode 100644
index 0000000..5948576
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/PRESUBMIT.py
@@ -0,0 +1,28 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Presubmit script for changes affecting tools/perf/.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into depot_tools.
+"""
+
+def _CommonChecks(input_api, output_api):
+  """Performs common checks, which includes running pylint."""
+  results = []
+  results.extend(input_api.canned_checks.RunPylint(
+        input_api, output_api, black_list=[], pylintrc='pylintrc'))
+  return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  report = []
+  report.extend(_CommonChecks(input_api, output_api))
+  return report
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  report = []
+  report.extend(_CommonChecks(input_api, output_api))
+  return report
diff --git a/catapult/telemetry/third_party/webpagereplay/README.chromium b/catapult/telemetry/third_party/webpagereplay/README.chromium
new file mode 100644
index 0000000..6758fba
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/README.chromium
@@ -0,0 +1,14 @@
+Name: chromite
+Short Name: webpagereplay
+URL: https://github.com/chromium/web-page-replay
+Version: b03f84da6e90951d4275b5675533d4512a178398 (commit hash)
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+
+Local modification: Remove webpagereplay/third_party/ipaddr/OWNERS to avoid
+PRESUBMIT complainings about non-standard OWNER format.
+
+Description:
+This contains webpagereplay used by telemetry for record & replay web requests &
+responses.
diff --git a/catapult/telemetry/third_party/webpagereplay/README.md b/catapult/telemetry/third_party/webpagereplay/README.md
new file mode 100644
index 0000000..28a3dc3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/README.md
@@ -0,0 +1,28 @@
+[![Build
+Status](https://travis-ci.org/chromium/web-page-replay.png)](https://travis-ci.org/chromium/web-page-replay)
+[![Coverage
+Status](https://coveralls.io/repos/chromium/web-page-replay/badge.svg)](https://coveralls.io/r/chromium/web-page-replay)
+
+# Web Page Replay
+Record live Web pages and use them for local performance testing!
+
+## How?
+Use local DNS and HTTP(S) proxies to captures your live traffic. Then
+use these captures in order to replay the same exact content, making
+sure that your tests get consistent results, that are not affected by
+the origin servers, the network, etc.
+
+## Tell me more
+Check out the [getting
+started](documentation/GettingStarted.md) guide or take a
+look at the [architecture
+diagram](documentation/WebPageReplayDiagram.png).
+
+Also see [Note about web-page-replay
+code](https://docs.google.com/document/d/1cehHn3Lig7UYw_7pqQJjkbPTV3kS11EYwjKO-6jT0c8)
+
+## I want to help
+If you find issues with the project, you can file issues on this repo.
+If you want to do more and contribute code to help the project evolve,
+check out our [contribution
+guidelines](documentation/Contributing.md).
diff --git a/catapult/telemetry/third_party/webpagereplay/adb_install_cert.py b/catapult/telemetry/third_party/webpagereplay/adb_install_cert.py
new file mode 100644
index 0000000..61d2973
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/adb_install_cert.py
@@ -0,0 +1,244 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Installs certificate on phone with KitKat."""
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+
+KEYCODE_ENTER = '66'
+KEYCODE_TAB = '61'
+
+
+class CertInstallError(Exception):
+  pass
+
+
+class CertRemovalError(Exception):
+  pass
+
+
+
+_ANDROID_M_BUILD_VERSION = 23
+
+
+class AndroidCertInstaller(object):
+  """Certificate installer for phones with KitKat."""
+
+  def __init__(self, device_id, cert_name, cert_path):
+    if not os.path.exists(cert_path):
+      raise ValueError('Not a valid certificate path')
+    self.device_id = device_id
+    self.cert_name = cert_name
+    self.cert_path = cert_path
+    self.file_name = os.path.basename(self.cert_path)
+    self.reformatted_cert_fname = None
+    self.reformatted_cert_path = None
+    self.android_cacerts_path = None
+
+  @staticmethod
+  def _run_cmd(cmd, dirname=None):
+    return subprocess.check_output(cmd, cwd=dirname)
+
+  def _adb(self, *args):
+    """Runs the adb command."""
+    cmd = ['adb']
+    if self.device_id:
+      cmd.extend(['-s', self.device_id])
+    cmd.extend(args)
+    return self._run_cmd(cmd)
+
+  def _adb_shell(self, *args):
+    cmd = ['shell']
+    cmd.extend(args)
+    return self._adb(*cmd)
+
+  def _adb_su_shell(self, *args):
+    """Runs command as root."""
+    build_version_sdk = int(self._get_property('ro.build.version.sdk'))
+    if build_version_sdk >= _ANDROID_M_BUILD_VERSION:
+      cmd = ['su', '0']
+    else:
+      cmd = ['su', '-c']
+    cmd.extend(args)
+    return self._adb_shell(*cmd)
+
+  def _get_property(self, prop):
+    return self._adb_shell('getprop', prop).strip()
+
+  def check_device(self):
+    install_warning = False
+    if self._get_property('ro.product.device') != 'hammerhead':
+      logging.warning('Device is not hammerhead')
+      install_warning = True
+    if self._get_property('ro.build.version.release') != '4.4.2':
+      logging.warning('Version is not 4.4.2')
+      install_warning = True
+    if install_warning:
+      logging.warning('Certificate may not install properly')
+
+  def _input_key(self, key):
+    """Inputs a keyevent."""
+    self._adb_shell('input', 'keyevent', key)
+
+  def _input_text(self, text):
+    """Inputs text."""
+    self._adb_shell('input', 'text', text)
+
+  @staticmethod
+  def _remove(file_name):
+    """Deletes file."""
+    if os.path.exists(file_name):
+      os.remove(file_name)
+
+  def _format_hashed_cert(self):
+    """Makes a certificate file that follows the format of files in cacerts."""
+    self._remove(self.reformatted_cert_path)
+    contents = self._run_cmd(['openssl', 'x509', '-inform', 'PEM', '-text',
+                              '-in', self.cert_path])
+    description, begin_cert, cert_body = contents.rpartition('-----BEGIN '
+                                                             'CERTIFICATE')
+    contents = ''.join([begin_cert, cert_body, description])
+    with open(self.reformatted_cert_path, 'w') as cert_file:
+      cert_file.write(contents)
+
+  def _remove_cert_from_cacerts(self):
+    self._adb_su_shell('mount', '-o', 'remount,rw', '/system')
+    self._adb_su_shell('rm', '-f', self.android_cacerts_path)
+
+  def _is_cert_installed(self):
+    return (self._adb_su_shell('ls', self.android_cacerts_path).strip() ==
+            self.android_cacerts_path)
+
+  def _generate_reformatted_cert_path(self):
+    # Determine OpenSSL version, string is of the form
+    # 'OpenSSL 0.9.8za 5 Jun 2014' .
+    openssl_version = self._run_cmd(['openssl', 'version']).split()
+
+    if len(openssl_version) < 2:
+      raise ValueError('Unexpected OpenSSL version string: ', openssl_version)
+
+    # subject_hash flag name changed as of OpenSSL version 1.0.0 .
+    is_old_openssl_version = openssl_version[1].startswith('0')
+    subject_hash_flag = (
+        '-subject_hash' if is_old_openssl_version else '-subject_hash_old')
+
+    output = self._run_cmd(['openssl', 'x509', '-inform', 'PEM',
+                            subject_hash_flag, '-in', self.cert_path],
+                           os.path.dirname(self.cert_path))
+    self.reformatted_cert_fname = output.partition('\n')[0].strip() + '.0'
+    self.reformatted_cert_path = os.path.join(os.path.dirname(self.cert_path),
+                                              self.reformatted_cert_fname)
+    self.android_cacerts_path = ('/system/etc/security/cacerts/%s' %
+                                 self.reformatted_cert_fname)
+
+  def remove_cert(self):
+    self._generate_reformatted_cert_path()
+
+    if self._is_cert_installed():
+      self._remove_cert_from_cacerts()
+
+    if self._is_cert_installed():
+      raise CertRemovalError('Cert Removal Failed')
+
+  def install_cert(self, overwrite_cert=False):
+    """Installs a certificate putting it in /system/etc/security/cacerts."""
+    self._generate_reformatted_cert_path()
+
+    if self._is_cert_installed():
+      if overwrite_cert:
+        self._remove_cert_from_cacerts()
+      else:
+        logging.info('cert is already installed')
+        return
+
+    self._format_hashed_cert()
+    self._adb('push', self.reformatted_cert_path, '/sdcard/')
+    self._remove(self.reformatted_cert_path)
+    self._adb_su_shell('mount', '-o', 'remount,rw', '/system')
+    self._adb_su_shell(
+        'cp', '/sdcard/%s' % self.reformatted_cert_fname,
+        '/system/etc/security/cacerts/%s' % self.reformatted_cert_fname)
+    self._adb_su_shell('chmod', '644', self.android_cacerts_path)
+    if not self._is_cert_installed():
+      raise CertInstallError('Cert Install Failed')
+
+  def install_cert_using_gui(self):
+    """Installs certificate on the device using adb commands."""
+    self.check_device()
+    # TODO(mruthven): Add a check to see if the certificate is already installed
+    # Install the certificate.
+    logging.info('Installing %s on %s', self.cert_path, self.device_id)
+    self._adb('push', self.cert_path, '/sdcard/')
+
+    # Start credential install intent.
+    self._adb_shell('am', 'start', '-W', '-a', 'android.credentials.INSTALL')
+
+    # Move to and click search button.
+    self._input_key(KEYCODE_TAB)
+    self._input_key(KEYCODE_TAB)
+    self._input_key(KEYCODE_ENTER)
+
+    # Search for certificate and click it.
+    # Search only works with lower case letters
+    self._input_text(self.file_name.lower())
+    self._input_key(KEYCODE_ENTER)
+
+    # These coordinates work for hammerhead devices.
+    self._adb_shell('input', 'tap', '300', '300')
+
+    # Name the certificate and click enter.
+    self._input_text(self.cert_name)
+    self._input_key(KEYCODE_TAB)
+    self._input_key(KEYCODE_TAB)
+    self._input_key(KEYCODE_TAB)
+    self._input_key(KEYCODE_ENTER)
+
+    # Remove the file.
+    self._adb_shell('rm', '/sdcard/' + self.file_name)
+
+
+def parse_args():
+  """Parses command line arguments."""
+  parser = argparse.ArgumentParser(description='Install cert on device.')
+  parser.add_argument(
+      '-n', '--cert-name', default='dummycert', help='certificate name')
+  parser.add_argument(
+      '--overwrite', default=False, action='store_true',
+      help='Overwrite certificate file if it is already installed')
+  parser.add_argument(
+      '--remove', default=False, action='store_true',
+      help='Remove certificate file if it is installed')
+  parser.add_argument(
+      '--device-id', help='device serial number')
+  parser.add_argument(
+      'cert_path', help='Certificate file path')
+  return parser.parse_args()
+
+
+def main():
+  args = parse_args()
+  cert_installer = AndroidCertInstaller(args.device_id, args.cert_name,
+                                        args.cert_path)
+  if args.remove:
+    cert_installer.remove_cert()
+  else:
+    cert_installer.install_cert(args.overwrite)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/telemetry/third_party/webpagereplay/certutils.py b/catapult/telemetry/third_party/webpagereplay/certutils.py
new file mode 100644
index 0000000..c64e4e0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/certutils.py
@@ -0,0 +1,256 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Routines to generate root and server certificates.
+
+Certificate Naming Conventions:
+  ca_cert:  crypto.X509 for the certificate authority (w/ both the pub &
+                priv keys)
+  cert:  a crypto.X509 certificate (w/ just the pub key)
+  cert_str:  a certificate string (w/ just the pub cert)
+  key:  a private crypto.PKey  (from ca or pem)
+  ca_cert_str:  a certificae authority string (w/ both the pub & priv certs)
+"""
+
+import logging
+import os
+import socket
+import time
+
+openssl_import_error = None
+
+Error = None
+SSL_METHOD = None
+SysCallError = None
+VERIFY_PEER = None
+ZeroReturnError = None
+FILETYPE_PEM = None
+
+try:
+  from OpenSSL import crypto, SSL
+
+  Error = SSL.Error
+  SSL_METHOD = SSL.SSLv23_METHOD
+  SysCallError = SSL.SysCallError
+  VERIFY_PEER = SSL.VERIFY_PEER
+  ZeroReturnError = SSL.ZeroReturnError
+  FILETYPE_PEM = crypto.FILETYPE_PEM
+except ImportError, e:
+  openssl_import_error = e
+
+
+def get_ssl_context(method=SSL_METHOD):
+  # One of: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or TLSv1_METHOD
+  if openssl_import_error:
+    raise openssl_import_error  # pylint: disable=raising-bad-type
+  return SSL.Context(method)
+
+
+class WrappedConnection(object):
+
+  def __init__(self, obj):
+    self._wrapped_obj = obj
+
+  def __getattr__(self, attr):
+    if attr in self.__dict__:
+      return getattr(self, attr)
+    return getattr(self._wrapped_obj, attr)
+
+  def recv(self, buflen=1024, flags=0):
+    try:
+      return self._wrapped_obj.recv(buflen, flags)
+    except SSL.SysCallError, e:
+      if e.args[1] == 'Unexpected EOF':
+        return ''
+      raise
+    except SSL.ZeroReturnError:
+      return ''
+
+
+def get_ssl_connection(context, connection):
+  return WrappedConnection(SSL.Connection(context, connection))
+
+
+def load_privatekey(key, filetype=FILETYPE_PEM):
+  """Loads obj private key object from string."""
+  return crypto.load_privatekey(filetype, key)
+
+
+def load_cert(cert_str, filetype=FILETYPE_PEM):
+  """Loads obj cert object from string."""
+  return crypto.load_certificate(filetype, cert_str)
+
+
+def _dump_privatekey(key, filetype=FILETYPE_PEM):
+  """Dumps obj private key object to string."""
+  return crypto.dump_privatekey(filetype, key)
+
+
+def _dump_cert(cert, filetype=FILETYPE_PEM):
+  """Dumps obj cert object to string."""
+  return crypto.dump_certificate(filetype, cert)
+
+
+def generate_dummy_ca_cert(subject='_WebPageReplayCert'):
+  """Generates dummy certificate authority.
+
+  Args:
+    subject: a string representing the desired root cert issuer
+  Returns:
+    A tuple of the public key and the private key strings for the root
+    certificate
+  """
+  if openssl_import_error:
+    raise openssl_import_error  # pylint: disable=raising-bad-type
+
+  key = crypto.PKey()
+  key.generate_key(crypto.TYPE_RSA, 1024)
+
+  ca_cert = crypto.X509()
+  ca_cert.set_serial_number(int(time.time()*10000))
+  ca_cert.set_version(2)
+  ca_cert.get_subject().CN = subject
+  ca_cert.get_subject().O = subject
+  ca_cert.gmtime_adj_notBefore(-60 * 60 * 24 * 365 * 2)
+  ca_cert.gmtime_adj_notAfter(60 * 60 * 24 * 365 * 2)
+  ca_cert.set_issuer(ca_cert.get_subject())
+  ca_cert.set_pubkey(key)
+  ca_cert.add_extensions([
+      crypto.X509Extension('basicConstraints', True, 'CA:TRUE'),
+      crypto.X509Extension('nsCertType', True, 'sslCA'),
+      crypto.X509Extension('extendedKeyUsage', True,
+                           ('serverAuth,clientAuth,emailProtection,'
+                            'timeStamping,msCodeInd,msCodeCom,msCTLSign,'
+                            'msSGC,msEFS,nsSGC')),
+      crypto.X509Extension('keyUsage', False, 'keyCertSign, cRLSign'),
+      crypto.X509Extension('subjectKeyIdentifier', False, 'hash',
+                           subject=ca_cert),
+      ])
+  ca_cert.sign(key, 'sha256')
+  key_str = _dump_privatekey(key)
+  ca_cert_str = _dump_cert(ca_cert)
+  return ca_cert_str, key_str
+
+
+def get_host_cert(host, port=443):
+  """Contacts the host and returns its certificate."""
+  host_certs = []
+  def verify_cb(conn, cert, errnum, depth, ok):
+    host_certs.append(cert)
+    # Return True to indicates that the certificate was ok.
+    return True
+
+  context = SSL.Context(SSL.SSLv23_METHOD)
+  context.set_verify(SSL.VERIFY_PEER, verify_cb)  # Demand a certificate
+  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+  connection = SSL.Connection(context, s)
+  try:
+    connection.connect((host, port))
+    connection.send('')
+  except SSL.SysCallError:
+    pass
+  except socket.gaierror:
+    logging.debug('Host name is not valid')
+  finally:
+    connection.shutdown()
+    connection.close()
+  if not host_certs:
+    logging.warning('Unable to get host certificate from %s:%s', host, port)
+    return ''
+  return _dump_cert(host_certs[-1])
+
+
+def write_dummy_ca_cert(ca_cert_str, key_str, cert_path):
+  """Writes four certificate files.
+
+  For example, if cert_path is "mycert.pem":
+      mycert.pem - CA plus private key
+      mycert-cert.pem - CA in PEM format
+      mycert-cert.cer - CA for Android
+      mycert-cert.p12 - CA in PKCS12 format for Windows devices
+  Args:
+    cert_path: path string such as "mycert.pem"
+    ca_cert_str: certificate string
+    key_str: private key string
+  """
+  dirname = os.path.dirname(cert_path)
+  if dirname and not os.path.exists(dirname):
+    os.makedirs(dirname)
+
+  root_path = os.path.splitext(cert_path)[0]
+  ca_cert_path = root_path + '-cert.pem'
+  android_cer_path = root_path + '-cert.cer'
+  windows_p12_path = root_path + '-cert.p12'
+
+  # Dump the CA plus private key
+  with open(cert_path, 'w') as f:
+    f.write(key_str)
+    f.write(ca_cert_str)
+
+  # Dump the certificate in PEM format
+  with open(ca_cert_path, 'w') as f:
+    f.write(ca_cert_str)
+
+  # Create a .cer file with the same contents for Android
+  with open(android_cer_path, 'w') as f:
+    f.write(ca_cert_str)
+
+  ca_cert = load_cert(ca_cert_str)
+  key = load_privatekey(key_str)
+  # Dump the certificate in PKCS12 format for Windows devices
+  with open(windows_p12_path, 'w') as f:
+    p12 = crypto.PKCS12()
+    p12.set_certificate(ca_cert)
+    p12.set_privatekey(key)
+    f.write(p12.export())
+
+
+def generate_cert(root_ca_cert_str, server_cert_str, server_host):
+  """Generates a cert_str with the sni field in server_cert_str signed by the
+  root_ca_cert_str.
+
+  Args:
+    root_ca_cert_str: PEM formatted string representing the root cert
+    server_cert_str: PEM formatted string representing cert
+    server_host: host name to use if there is no server_cert_str
+  Returns:
+    a PEM formatted certificate string
+  """
+  if openssl_import_error:
+    raise openssl_import_error  # pylint: disable=raising-bad-type
+
+  common_name = server_host
+  if server_cert_str:
+    cert = load_cert(server_cert_str)
+    common_name = cert.get_subject().commonName
+  else:
+    cert = crypto.X509()
+
+  ca_cert = load_cert(root_ca_cert_str)
+  key = load_privatekey(root_ca_cert_str)
+
+  req = crypto.X509Req()
+  req.get_subject().CN = common_name
+  req.set_pubkey(ca_cert.get_pubkey())
+  req.sign(key, 'sha256')
+
+  cert.gmtime_adj_notBefore(-60 * 60)
+  cert.gmtime_adj_notAfter(60 * 60 * 24 * 30)
+  cert.set_issuer(ca_cert.get_subject())
+  cert.set_subject(req.get_subject())
+  cert.set_serial_number(int(time.time()*10000))
+  cert.set_pubkey(req.get_pubkey())
+  cert.sign(key, 'sha256')
+
+  return _dump_cert(cert)
diff --git a/catapult/telemetry/third_party/webpagereplay/certutils_test.py b/catapult/telemetry/third_party/webpagereplay/certutils_test.py
new file mode 100644
index 0000000..de1ac2d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/certutils_test.py
@@ -0,0 +1,135 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test routines to generate dummy certificates."""
+
+import BaseHTTPServer
+import os
+import shutil
+import ssl
+import tempfile
+import threading
+import unittest
+
+import certutils
+
+
+class Server(BaseHTTPServer.HTTPServer):
+
+  def __init__(self, https_root_ca_cert_path):
+    BaseHTTPServer.HTTPServer.__init__(
+        self, ('localhost', 0), BaseHTTPServer.BaseHTTPRequestHandler)
+    self.socket = ssl.wrap_socket(
+        self.socket, certfile=https_root_ca_cert_path, server_side=True,
+        do_handshake_on_connect=False)
+
+  def __enter__(self):
+    thread = threading.Thread(target=self.serve_forever)
+    thread.daemon = True
+    thread.start()
+    return self
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+    except KeyboardInterrupt:
+      pass
+
+  def __exit__(self, type_, value_, traceback_):
+    self.cleanup()
+
+
+class CertutilsTest(unittest.TestCase):
+
+  def _check_cert_file(self, cert_file_path, cert_str, key_str=None):
+    cert_load = open(cert_file_path, 'r').read()
+    if key_str:
+      expected_cert = key_str + cert_str
+    else:
+      expected_cert = cert_str
+    self.assertEqual(expected_cert, cert_load)
+
+  def setUp(self):
+    self._temp_dir = tempfile.mkdtemp(prefix='certutils_', dir='/tmp')
+
+  def tearDown(self):
+    if self._temp_dir:
+      shutil.rmtree(self._temp_dir)
+
+  def test_generate_dummy_ca_cert(self):
+    subject = 'testSubject'
+    c, _ = certutils.generate_dummy_ca_cert(subject)
+    c = certutils.load_cert(c)
+    self.assertEqual(c.get_subject().commonName, subject)
+
+  def test_get_host_cert(self):
+    ca_cert_path = os.path.join(self._temp_dir, 'rootCA.pem')
+    issuer = 'testCA'
+    certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(issuer),
+                                  cert_path=ca_cert_path)
+
+    with Server(ca_cert_path) as server:
+      cert_str = certutils.get_host_cert('localhost', server.server_port)
+      cert = certutils.load_cert(cert_str)
+      self.assertEqual(issuer, cert.get_subject().commonName)
+
+  def test_get_host_cert_gives_empty_for_bad_host(self):
+    cert_str = certutils.get_host_cert('not_a_valid_host_name_2472341234234234')
+    self.assertEqual('', cert_str)
+
+  def test_write_dummy_ca_cert(self):
+    base_path = os.path.join(self._temp_dir, 'testCA')
+    ca_cert_path = base_path + '.pem'
+    cert_path = base_path + '-cert.pem'
+    ca_cert_android = base_path + '-cert.cer'
+    ca_cert_windows = base_path + '-cert.p12'
+
+    self.assertFalse(os.path.exists(ca_cert_path))
+    self.assertFalse(os.path.exists(cert_path))
+    self.assertFalse(os.path.exists(ca_cert_android))
+    self.assertFalse(os.path.exists(ca_cert_windows))
+    c, k = certutils.generate_dummy_ca_cert()
+    certutils.write_dummy_ca_cert(c, k, ca_cert_path)
+
+    self._check_cert_file(ca_cert_path, c, k)
+    self._check_cert_file(cert_path, c)
+    self._check_cert_file(ca_cert_android, c)
+    self.assertTrue(os.path.exists(ca_cert_windows))
+
+  def test_generate_cert(self):
+    ca_cert_path = os.path.join(self._temp_dir, 'testCA.pem')
+    issuer = 'testIssuer'
+    certutils.write_dummy_ca_cert(
+        *certutils.generate_dummy_ca_cert(issuer), cert_path=ca_cert_path)
+
+    with open(ca_cert_path, 'r') as root_file:
+      root_string = root_file.read()
+    subject = 'testSubject'
+    cert_string = certutils.generate_cert(
+        root_string, '', subject)
+    cert = certutils.load_cert(cert_string)
+    self.assertEqual(issuer, cert.get_issuer().commonName)
+    self.assertEqual(subject, cert.get_subject().commonName)
+
+    with open(ca_cert_path, 'r') as ca_cert_file:
+      ca_cert_str = ca_cert_file.read()
+    cert_string = certutils.generate_cert(ca_cert_str, cert_string,
+                                          'host')
+    cert = certutils.load_cert(cert_string)
+    self.assertEqual(issuer, cert.get_issuer().commonName)
+    self.assertEqual(subject, cert.get_subject().commonName)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/customhandlers.py b/catapult/telemetry/third_party/webpagereplay/customhandlers.py
new file mode 100644
index 0000000..14166af
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/customhandlers.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Handle special HTTP requests.
+
+/web-page-replay-generate-[RESPONSE_CODE]
+  - Return the given RESPONSE_CODE.
+/web-page-replay-post-image-[FILENAME]
+  - Save the posted image to local disk.
+/web-page-replay-command-[record|replay|status]
+  - Optional. Enable by calling custom_handlers.add_server_manager_handler(...).
+  - Change the server mode to either record or replay.
+    + When switching to record, the http_archive is cleared.
+    + When switching to replay, the http_archive is maintained.
+"""
+
+import base64
+import httparchive
+import json
+import logging
+import os
+
+COMMON_URL_PREFIX = '/web-page-replay-'
+COMMAND_URL_PREFIX = COMMON_URL_PREFIX + 'command-'
+GENERATOR_URL_PREFIX = COMMON_URL_PREFIX + 'generate-'
+POST_IMAGE_URL_PREFIX = COMMON_URL_PREFIX + 'post-image-'
+IMAGE_DATA_PREFIX = 'data:image/png;base64,'
+
+
+def SimpleResponse(status):
+  """Return a ArchivedHttpResponse with |status| code and a simple text body."""
+  return httparchive.create_response(status)
+
+
+def JsonResponse(data):
+  """Return a ArchivedHttpResponse with |data| encoded as json in the body."""
+  status = 200
+  reason = 'OK'
+  headers = [('content-type', 'application/json')]
+  body = json.dumps(data)
+  return httparchive.create_response(status, reason, headers, body)
+
+
+class CustomHandlers(object):
+
+  def __init__(self, options, http_archive):
+    """Initialize CustomHandlers.
+
+    Args:
+      options: original options passed to the server.
+      http_archive: reference to the HttpArchive object.
+    """
+    self.server_manager = None
+    self.options = options
+    self.http_archive = http_archive
+    self.handlers = [
+        (GENERATOR_URL_PREFIX, self.get_generator_url_response_code)]
+    # screenshot_dir is a path to which screenshots are saved.
+    if options.screenshot_dir:
+      if not os.path.exists(options.screenshot_dir):
+        try:
+          os.makedirs(options.screenshot_dir)
+        except IOError:
+          logging.error('Unable to create screenshot dir: %s',
+                         options.screenshot_dir)
+          options.screenshot_dir = None
+      if options.screenshot_dir:
+        self.screenshot_dir = options.screenshot_dir
+        self.handlers.append(
+            (POST_IMAGE_URL_PREFIX, self.handle_possible_post_image))
+
+  def handle(self, request):
+    """Dispatches requests to matching handlers.
+
+    Args:
+      request: an http request
+    Returns:
+      ArchivedHttpResponse or None.
+    """
+    for prefix, handler in self.handlers:
+      if request.full_path.startswith(prefix):
+        return handler(request, request.full_path[len(prefix):])
+    return None
+
+  def get_generator_url_response_code(self, request, url_suffix):
+    """Parse special generator URLs for the embedded response code.
+
+    Args:
+      request: an ArchivedHttpRequest instance
+      url_suffix: string that is after the handler prefix (e.g. 304)
+    Returns:
+      On a match, an ArchivedHttpResponse.
+      Otherwise, None.
+    """
+    del request
+    try:
+      response_code = int(url_suffix)
+      return SimpleResponse(response_code)
+    except ValueError:
+      return None
+
+  def handle_possible_post_image(self, request, url_suffix):
+    """If sent, saves embedded image to local directory.
+
+    Expects a special url containing the filename. If sent, saves the base64
+    encoded request body as a PNG image locally. This feature is enabled by
+    passing in screenshot_dir to the initializer for this class.
+
+    Args:
+      request: an ArchivedHttpRequest instance
+      url_suffix: string that is after the handler prefix (e.g. 'foo.png')
+    Returns:
+      On a match, an ArchivedHttpResponse.
+      Otherwise, None.
+    """
+    basename = url_suffix
+    if not basename:
+      return None
+
+    data = request.request_body
+    if not data.startswith(IMAGE_DATA_PREFIX):
+      logging.error('Unexpected image format for: %s', basename)
+      return SimpleResponse(400)
+
+    data = data[len(IMAGE_DATA_PREFIX):]
+    png = base64.b64decode(data)
+    filename = os.path.join(self.screenshot_dir,
+                            '%s-%s.png' % (request.host, basename))
+    if not os.access(self.screenshot_dir, os.W_OK):
+      logging.error('Unable to write to: %s', filename)
+      return SimpleResponse(400)
+
+    with file(filename, 'w') as f:
+      f.write(png)
+    return SimpleResponse(200)
+
+  def add_server_manager_handler(self, server_manager):
+    """Add the ability to change the server mode (e.g. to record mode).
+    Args:
+      server_manager: a servermanager.ServerManager instance.
+    """
+    self.server_manager = server_manager
+    self.handlers.append(
+        (COMMAND_URL_PREFIX, self.handle_server_manager_command))
+
+  def handle_server_manager_command(self, request, url_suffix):
+    """Parse special URLs for the embedded server manager command.
+
+    Clients like webpagetest.org can use URLs of this form to change
+    the replay server from record mode to replay mode.
+
+    This handler is not in the default list of handlers. Call
+    add_server_manager_handler to add it.
+
+    In the future, this could be expanded to save or serve archive files.
+
+    Args:
+      request: an ArchivedHttpRequest instance
+      url_suffix: string that is after the handler prefix (e.g. 'record')
+    Returns:
+      On a match, an ArchivedHttpResponse.
+      Otherwise, None.
+    """
+    command = url_suffix
+    if command == 'record':
+      self.server_manager.SetRecordMode()
+      return SimpleResponse(200)
+    elif command == 'replay':
+      self.server_manager.SetReplayMode()
+      return SimpleResponse(200)
+    elif command == 'status':
+      status = {}
+      is_record_mode = self.server_manager.IsRecordMode()
+      status['is_record_mode'] = is_record_mode
+      status['options'] = json.loads(str(self.options))
+      archive_stats = self.http_archive.stats()
+      if archive_stats:
+        status['archive_stats'] = json.loads(archive_stats)
+      return JsonResponse(status)
+    elif command == 'exit':
+      self.server_manager.should_exit = True
+      return SimpleResponse(200)
+    elif command == 'log':
+      logging.info('log command: %s', str(request.request_body)[:1000000])
+      return SimpleResponse(200)
+    return None
diff --git a/catapult/telemetry/third_party/webpagereplay/daemonserver.py b/catapult/telemetry/third_party/webpagereplay/daemonserver.py
new file mode 100644
index 0000000..371c654
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/daemonserver.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+
+
+class DaemonServer(object):
+  """Base class which manages creation and cleanup of daemon style servers."""
+
+  def __enter__(self):
+    # TODO: Because of python's Global Interpreter Lock (GIL), the threads
+    # will run on the same CPU. Consider using processes instead because
+    # the components do not need to communicate with each other. On Linux,
+    # "taskset" could be used to assign each process to specific CPU/core.
+    # Of course, only bother with this if the processing speed is an issue.
+    # Some related discussion: http://stackoverflow.com/questions/990102/python-
+    # global-interpreter-lock-gil-workaround-on-multi-core-systems-using-tasks
+    thread = threading.Thread(target=self.serve_forever)
+    thread.daemon = True  # Python exits when no non-daemon threads are left.
+    thread.start()
+    return self
+
+  def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
+    self.cleanup()
diff --git a/catapult/telemetry/third_party/webpagereplay/deterministic.js b/catapult/telemetry/third_party/webpagereplay/deterministic.js
new file mode 100644
index 0000000..a8e0ce9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/deterministic.js
@@ -0,0 +1,62 @@
+(function () {
+  var random_count = 0;
+  var random_count_threshold = 25;
+  var random_seed = 0.462;
+  Math.random = function() {
+    random_count++;
+    if (random_count > random_count_threshold){
+     random_seed += 0.1;
+     random_count = 1;
+    }
+    return (random_seed % 1);
+  };
+  if (typeof(crypto) == 'object' &&
+      typeof(crypto.getRandomValues) == 'function') {
+    crypto.getRandomValues = function(arr) {
+      var scale = Math.pow(256, arr.BYTES_PER_ELEMENT);
+      for (var i = 0; i < arr.length; i++) {
+        arr[i] = Math.floor(Math.random() * scale);
+      }
+      return arr;
+    };
+  }
+})();
+(function () {
+  var date_count = 0;
+  var date_count_threshold = 25;
+  var orig_date = Date;
+  var time_seed = 1204251968254;
+  Date = function() {
+    if (this instanceof Date) {
+      date_count++;
+      if (date_count > date_count_threshold){
+        time_seed += 50;
+        date_count = 1;
+      }
+      switch (arguments.length) {
+      case 0: return new orig_date(time_seed);
+      case 1: return new orig_date(arguments[0]);
+      default: return new orig_date(arguments[0], arguments[1],
+         arguments.length >= 3 ? arguments[2] : 1,
+         arguments.length >= 4 ? arguments[3] : 0,
+         arguments.length >= 5 ? arguments[4] : 0,
+         arguments.length >= 6 ? arguments[5] : 0,
+         arguments.length >= 7 ? arguments[6] : 0);
+      }
+    }
+    return new Date().toString();
+  };
+  Date.__proto__ = orig_date;
+  Date.prototype = orig_date.prototype;
+  Date.prototype.constructor = Date;
+  orig_date.now = function() {
+    return new Date().getTime();
+  };
+  orig_date.prototype.getTimezoneOffset = function() {
+    var dst2010Start = 1268560800000;
+    var dst2010End = 1289120400000;
+    if (this.getTime() >= dst2010Start && this.getTime() < dst2010End)
+      return 420;
+    return 480;
+  };
+})();
diff --git a/catapult/telemetry/third_party/webpagereplay/dnsproxy.py b/catapult/telemetry/third_party/webpagereplay/dnsproxy.py
new file mode 100644
index 0000000..a913d84
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/dnsproxy.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import daemonserver
+import errno
+import logging
+import socket
+import SocketServer
+import threading
+import time
+
+from third_party.dns import flags
+from third_party.dns import message
+from third_party.dns import rcode
+from third_party.dns import resolver
+from third_party.dns import rdatatype
+from third_party import ipaddr
+
+
+
+class DnsProxyException(Exception):
+  pass
+
+
+class RealDnsLookup(object):
+  def __init__(self, name_servers):
+    if '127.0.0.1' in name_servers:
+      raise DnsProxyException(
+          'Invalid nameserver: 127.0.0.1 (causes an infinte loop)')
+    self.resolver = resolver.get_default_resolver()
+    self.resolver.nameservers = name_servers
+    self.dns_cache_lock = threading.Lock()
+    self.dns_cache = {}
+
+  @staticmethod
+  def _IsIPAddress(hostname):
+    try:
+      socket.inet_aton(hostname)
+      return True
+    except socket.error:
+      return False
+
+  def __call__(self, hostname, rdtype=rdatatype.A):
+    """Return real IP for a host.
+
+    Args:
+      host: a hostname ending with a period (e.g. "www.google.com.")
+      rdtype: the query type (1 for 'A', 28 for 'AAAA')
+    Returns:
+      the IP address as a string (e.g. "192.168.25.2")
+    """
+    if self._IsIPAddress(hostname):
+      return hostname
+    self.dns_cache_lock.acquire()
+    ip = self.dns_cache.get(hostname)
+    self.dns_cache_lock.release()
+    if ip:
+      return ip
+    try:
+      answers = self.resolver.query(hostname, rdtype)
+    except resolver.NXDOMAIN:
+      return None
+    except resolver.NoNameservers:
+      logging.debug('_real_dns_lookup(%s) -> No nameserver.',
+                    hostname)
+      return None
+    except (resolver.NoAnswer, resolver.Timeout) as ex:
+      logging.debug('_real_dns_lookup(%s) -> None (%s)',
+                    hostname, ex.__class__.__name__)
+      return None
+    if answers:
+      ip = str(answers[0])
+    self.dns_cache_lock.acquire()
+    self.dns_cache[hostname] = ip
+    self.dns_cache_lock.release()
+    return ip
+
+  def ClearCache(self):
+    """Clear the dns cache."""
+    self.dns_cache_lock.acquire()
+    self.dns_cache.clear()
+    self.dns_cache_lock.release()
+
+
+class ReplayDnsLookup(object):
+  """Resolve DNS requests to replay host."""
+  def __init__(self, replay_ip, filters=None):
+    self.replay_ip = replay_ip
+    self.filters = filters or []
+
+  def __call__(self, hostname):
+    ip = self.replay_ip
+    for f in self.filters:
+      ip = f(hostname, default_ip=ip)
+    return ip
+
+
+class PrivateIpFilter(object):
+  """Resolve private hosts to their real IPs and others to the Web proxy IP.
+
+  Hosts in the given http_archive will resolve to the Web proxy IP without
+  checking the real IP.
+
+  This only supports IPv4 lookups.
+  """
+  def __init__(self, real_dns_lookup, http_archive):
+    """Initialize PrivateIpDnsLookup.
+
+    Args:
+      real_dns_lookup: a function that resolves a host to an IP.
+      http_archive: an instance of a HttpArchive
+        Hosts is in the archive will always resolve to the web_proxy_ip
+    """
+    self.real_dns_lookup = real_dns_lookup
+    self.http_archive = http_archive
+    self.InitializeArchiveHosts()
+
+  def __call__(self, host, default_ip):
+    """Return real IPv4 for private hosts and Web proxy IP otherwise.
+
+    Args:
+      host: a hostname ending with a period (e.g. "www.google.com.")
+    Returns:
+      IP address as a string or None (if lookup fails)
+    """
+    ip = default_ip
+    if host not in self.archive_hosts:
+      real_ip = self.real_dns_lookup(host)
+      if real_ip:
+        if ipaddr.IPAddress(real_ip).is_private:
+          ip = real_ip
+      else:
+        ip = None
+    return ip
+
+  def InitializeArchiveHosts(self):
+    """Recompute the archive_hosts from the http_archive."""
+    self.archive_hosts = set('%s.' % req.host.split(':')[0]
+                             for req in self.http_archive)
+
+
+class DelayFilter(object):
+  """Add a delay to replayed lookups."""
+
+  def __init__(self, is_record_mode, delay_ms):
+    self.is_record_mode = is_record_mode
+    self.delay_ms = int(delay_ms)
+
+  def __call__(self, host, default_ip):
+    if not self.is_record_mode:
+      time.sleep(self.delay_ms * 1000.0)
+    return default_ip
+
+  def SetRecordMode(self):
+    self.is_record_mode = True
+
+  def SetReplayMode(self):
+    self.is_record_mode = False
+
+
+class UdpDnsHandler(SocketServer.DatagramRequestHandler):
+  """Resolve DNS queries to localhost.
+
+  Possible alternative implementation:
+  http://howl.play-bow.org/pipermail/dnspython-users/2010-February/000119.html
+  """
+
+  STANDARD_QUERY_OPERATION_CODE = 0
+
+  def handle(self):
+    """Handle a DNS query.
+
+    IPv6 requests (with rdtype AAAA) receive mismatched IPv4 responses
+    (with rdtype A). To properly support IPv6, the http proxy would
+    need both types of addresses. By default, Windows XP does not
+    support IPv6.
+    """
+    self.data = self.rfile.read()
+    self.transaction_id = self.data[0]
+    self.flags = self.data[1]
+    self.qa_counts = self.data[4:6]
+    self.domain = ''
+    operation_code = (ord(self.data[2]) >> 3) & 15
+    if operation_code == self.STANDARD_QUERY_OPERATION_CODE:
+      self.wire_domain = self.data[12:]
+      self.domain = self._domain(self.wire_domain)
+    else:
+      logging.debug("DNS request with non-zero operation code: %s",
+                    operation_code)
+    ip = self.server.dns_lookup(self.domain)
+    if ip is None:
+      logging.debug('dnsproxy: %s -> NXDOMAIN', self.domain)
+      response = self.get_dns_no_such_name_response()
+    else:
+      if ip == self.server.server_address[0]:
+        logging.debug('dnsproxy: %s -> %s (replay web proxy)', self.domain, ip)
+      else:
+        logging.debug('dnsproxy: %s -> %s', self.domain, ip)
+      response = self.get_dns_response(ip)
+    self.wfile.write(response)
+
+  @classmethod
+  def _domain(cls, wire_domain):
+    domain = ''
+    index = 0
+    length = ord(wire_domain[index])
+    while length:
+      domain += wire_domain[index + 1:index + length + 1] + '.'
+      index += length + 1
+      length = ord(wire_domain[index])
+    return domain
+
+  def get_dns_response(self, ip):
+    packet = ''
+    if self.domain:
+      packet = (
+          self.transaction_id +
+          self.flags +
+          '\x81\x80' +        # standard query response, no error
+          self.qa_counts * 2 + '\x00\x00\x00\x00' +  # Q&A counts
+          self.wire_domain +
+          '\xc0\x0c'          # pointer to domain name
+          '\x00\x01'          # resource record type ("A" host address)
+          '\x00\x01'          # class of the data
+          '\x00\x00\x00\x3c'  # ttl (seconds)
+          '\x00\x04' +        # resource data length (4 bytes for ip)
+          socket.inet_aton(ip)
+          )
+    return packet
+
+  def get_dns_no_such_name_response(self):
+    query_message = message.from_wire(self.data)
+    response_message = message.make_response(query_message)
+    response_message.flags |= flags.AA | flags.RA
+    response_message.set_rcode(rcode.NXDOMAIN)
+    return response_message.to_wire()
+
+
+class DnsProxyServer(SocketServer.ThreadingUDPServer,
+                     daemonserver.DaemonServer):
+  # Increase the request queue size. The default value, 5, is set in
+  # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer).
+  # Since we're intercepting many domains through this single server,
+  # it is quite possible to get more than 5 concurrent requests.
+  request_queue_size = 256
+
+  # Allow sockets to be reused. See
+  # http://svn.python.org/projects/python/trunk/Lib/SocketServer.py for more
+  # details.
+  allow_reuse_address = True
+
+  # Don't prevent python from exiting when there is thread activity.
+  daemon_threads = True
+
+  def __init__(self, host='', port=53, dns_lookup=None):
+    """Initialize DnsProxyServer.
+
+    Args:
+      host: a host string (name or IP) to bind the dns proxy and to which
+        DNS requests will be resolved.
+      port: an integer port on which to bind the proxy.
+      dns_lookup: a list of filters to apply to lookup.
+    """
+    try:
+      SocketServer.ThreadingUDPServer.__init__(
+          self, (host, port), UdpDnsHandler)
+    except socket.error, (error_number, msg):
+      if error_number == errno.EACCES:
+        raise DnsProxyException(
+            'Unable to bind DNS server on (%s:%s)' % (host, port))
+      raise
+    self.dns_lookup = dns_lookup or (lambda host: self.server_address[0])
+    self.server_port = self.server_address[1]
+    logging.warning('DNS server started on %s:%d', self.server_address[0],
+                                                   self.server_address[1])
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+      self.server_close()
+    except KeyboardInterrupt, e:
+      pass
+    logging.info('Stopped DNS server')
diff --git a/catapult/telemetry/third_party/webpagereplay/documentation/Contributing.md b/catapult/telemetry/third_party/webpagereplay/documentation/Contributing.md
new file mode 100644
index 0000000..492649b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/documentation/Contributing.md
@@ -0,0 +1,41 @@
+# Contributing
+
+1. Thanks for considering contributing to Web Page Replay. You're awesome!
+2. Style Guide - The source code of Web Page Replay follows the [Google
+Python Style
+Guide](http://google-styleguide.googlecode.com/svn/trunk/pyguide.html) so you should familiarize yourself with those
+guidelines. You may also wish to email web-page-replay-dev at
+googlegroups.com for advice on your change before starting.
+3. Get the code - Fork this repo and clone it locally.
+4. Get a review - All submissions, including submissions by project members,
+require review.
+
+## Using rietveld
+
+1. Make sure that you have a fork of the original repo.
+2. Make your changes.
+3. Commit your changes.
+4. Run 'yes "" |git cl config' (first time only).
+5. Run 'git cl upload'.
+6. Once the review is approved, run 'git cl land' to land your changes. This also
+pushes your change to your forked branch.
+7. Login your github account and make a pull request to merge the change from
+your forked branch to the original repo.
+
+## The fine print
+
+Before we can use your code you have to sign the [Google Individual
+Contributor License
+Agreement](http://code.google.com/legal/individual-cla-v1.0.html), which you can do online. This is mainly
+because you own the copyright to your changes, even after your
+contribution becomes part of our codebase, so we need your permission to
+use and distribute your code. We also need to be sure of various other
+things, for instance that you'll tell us if you know that your code
+infringes on other people's patents. You don't have to do this until
+after you've submitted your code for review and a member has approved
+it, but you will have to do it before we can put your code into our
+codebase.
+
+Contributions made by corporations are covered by a different agreement
+than the one above, the [Software Grant and Corporate Contributor License
+Agreement](http://code.google.com/legal/corporate-cla-v1.0.html).
diff --git a/catapult/telemetry/third_party/webpagereplay/documentation/GettingStarted.md b/catapult/telemetry/third_party/webpagereplay/documentation/GettingStarted.md
new file mode 100644
index 0000000..90790b4
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/documentation/GettingStarted.md
@@ -0,0 +1,169 @@
+# Prerequisites
+* A Mac running OS X 10.6 ("Snow Leopard") or Linux (tested with Ubuntu
+Lucid). Support for Windows is still experimental
+* [Python 2.6](http://www.python.org/download/releases/2.6.6/)
+
+# Install
+Only do this the first time.
+
+1. Open the Terminal application and download the source.
+```
+$ git clone https://github.com/chromium/web-page-replay.git
+```
+2. Move to the newly created directory.
+```
+$ cd web-page-replay
+```
+## Linux-specific install steps
+On Linux, Dummynet must be installed to simulate network conditions.
+
+1. For the Linux code, try downloading the [latest linux sources from Marta
+Carbone](http://info.iet.unipi.it/~marta/dummynet/). These are more up-to-date than what is found on the [Dummynet
+homepage](http://info.iet.unipi.it/~luigi/dummynet/).
+2. Build and install:
+```
+$ tar -C /tmp -xvzf ipfw3-20120119.tgz
+$ cd /tmp/ipfw3-20120119
+$ make
+[Ignore output like the following:]
+        echo "  ERROR: Kernel configuration is invalid.";\
+        echo "         include/generated/autoconf.h or
+include/config/auto.conf are missing.";\
+        echo "         Run 'make oldconfig && make prepare' on kernel
+src to fix it.";\
+[The lines will print without "echo" if there is an actual error.]
+$ sudo insmod dummynet2/ipfw_mod.ko
+$ sudo cp ipfw/ipfw /usr/local/sbin
+```
+3. To remove it later
+```
+$ sudo rmmod ipfw_mod.ko
+```
+## Windows-specific install steps
+*Windows support is experimental and not well tested.* On Windows XP, the
+Dummynet driver must be installed to simulate network conditions
+(Drivers for Windows Vista and Windows 7 are currently unavailable).
+
+1. Control Panel -> Network Connections -> Right-click adapter in use ->
+select Properties
+2. Click Install... -> Service -> Add... -> Have Disk...
+3. Browse... ->
+web-page-replay-read-only\third_party\ipfw_win32\netipfw.inf
+4. Click Open -> Ok -> Ok
+  - Accept any warnings for installing an unknown driver
+
+# Record
+First you must record the web page or pages that you wish to replay.
+
+1. Open the web browser you wish to use and clear its cache so that all
+resources will be requested from the network.
+2. Switch to the Terminal application and start the program in record mode.
+All HTTP requests performed on the machine while it is running will be
+saved into the archive.
+```
+$ sudo ./replay.py --record ~/archive.wpr
+```
+3. Load the web page or pages in the open web browser. Be sure to wait
+until each is fully loaded.
+4. Stop recording by killing the replay.py process with Ctrl+c. The archive
+will be saved to ~/archive.wpr.
+
+# Replay
+After you have created an archive, you may later replay it at any time.
+
+1. Start the program in replay mode with a previously recorded archive.
+```
+$ sudo ./replay.py ~/archive.wpr
+```
+2. Load recorded pages in a web browser. A 404 will be served for any pages
+or resources not in the recorded archive.
+3. Stop replaying by killing the replay.py process with Ctrl+c.
+
+## Network simulation examples
+During replay, you may simulate desired network conditions. This is
+useful for benchmarking.
+
+* 128KByte/s uplink bandwidth, 4Mbps/s downlink bandwidth with 100ms RTT
+time
+```
+$ sudo ./replay.py --up 128KByte/s --down 4Mbit/s --delay_ms=100 archive.wpr
+```
+* 1% packet loss rate
+```
+$ sudo ./replay.py --packet_loss_rate=0.01 ~/archive.wpr
+```
+
+## Using browser proxy settings
+You may choose to disable the forwarding of DNS requests to the local
+replay server. If DNS request forwarding is disabled, an external
+mechanism must be used to forward traffic to the replay server.
+
+* Disable DNS forwarding
+```
+$ ./replay.py --no-dns_forwarding --record ~/archive.wpr
+```
+* Forwarding traffic to replay server (via Google Chrome on linux)
+1. Go to Chrome Preferences -> Under the Hood -> Change Proxy Settings
+2. Under Manual Proxy configuration -> HTTP proxy, enter 127.0.0.1 for IP
+and the port that web page replay is configured to listen to (default
+80).
+
+Alternatively, traffic forwarding may also be configured via command
+line flags.
+```
+$ google-chrome --host-resolver-rules="MAP * 127.0.0.1:80,EXCLUDE localhost"
+```
+
+# HTTPS/SSL support
+By default, Web Page Replay, creates a self-signed certificate to serve
+SSL traffic. In order for it to work, browsers need to be configured to
+ignore certificate errors. Be aware that doing so opens a giant security
+hole.
+
+```
+$ google-chrome --ignore-certificate-errors
+```
+
+Firefox has [a configuration file for
+exceptions](https://developer.mozilla.org/En/Cert_override.txt). That requires listing
+each host that gets used. If you have a better solution, please add it
+to the comments below. IE and Safari options are also needed.
+
+To turn off SSL support, run replay.py with "--no-ssl".
+
+# Troubleshooting
+
+## Permission errors
+
+On Linux, either of the following two errors are permission problems:
+
+```
+python: can't open file './replay.py': [Errno 13] Permission denied
+```
+```
+Traceback (most recent call last):
+  File "./replay.py", line 50, in <module>
+    import dnsproxy
+  File "/home/slamm/p/wpr/dnsproxy.py", line 19, in <module>
+    import platformsettings
+ImportError: No module named platformsettings
+```
+This can happen if you checkout the files to an NFS directory. Either
+move the files to a local directory, or make them world
+readable/executable.
+
+## Unable to access auto mounted directories
+WPR can cause autofs to hang. On Ubuntu, the following command fixes it:
+
+```
+$ sudo restart autofs
+```
+
+# Help
+
+For full usage instructions and advanced options, see the program's
+help.
+
+```
+$ ./replay.py --help
+```
diff --git a/catapult/telemetry/third_party/webpagereplay/documentation/Rules.md b/catapult/telemetry/third_party/webpagereplay/documentation/Rules.md
new file mode 100644
index 0000000..3517635
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/documentation/Rules.md
@@ -0,0 +1,95 @@
+WebPageReplay Rule Language
+===========================
+
+WebPageReplay rules allows developers to customize record/replay handling.
+
+Motiviation
+-----------
+
+Web sites often require custom replay logic, e.g.:
+
+  1. The recording uploads various metrics/logs to:
+
+        http://example.com/gen_204?emsg=foo
+
+     but, during replay, it uploads different parameters:
+
+        http://example.com/gen_204?emsg=bar
+
+     so (as-is) our replay fails.  We want "*/gen_204" to always respond
+     "HTTP 204 No Change".
+
+  2. The recording fetches data from one server:
+
+        http://mirrorA.example.com/stuff
+
+     but replay selects a different server:
+
+        http://mirrorB.example.com/stuff
+
+     which breaks replay.  We want "mirror*.example.com/stuff" to be equivalent.
+
+  3. The recorded URL + response contains a UID, e.g.:
+
+        http://example.com?q=foo  -->  "you sent foo."
+
+     but the replay asks for:
+
+        http://example.com?q=bar  -->  replay error!
+
+     We want it to reply "you sent bar."
+
+We could hack all the above rules into the code, but that can''t be (cleanly) extended or open sourced.
+
+Instead, we want a simple config file of "predicate --> action" rules.
+
+
+Format
+------
+
+The JSON-formatted rule file is specified on the command line:
+
+    replay.py ... --rules_path my_rules ...
+
+The rules file must contain an array of single-item objects, e.g.:
+
+    [{"comment": "ignore me"},
+     {"LogUrl": {"url": "example\\.com/logme.*"}},
+     {"LogUrl": {"url": "example\\.com/someotherpath"}}
+    ]
+
+All "comment" items are ignored and support arbitrary values, e.g., a string
+or commented-out rule(s).
+
+All other items must specify a string TYPE key and object ARGS value, e.g.:
+
+     {"LogUrl": {"url": "example\\.com/test", "stop": false}}
+
+The default TYPE package is "rules" and the default rule_parser
+"allowed_imports" is similarly restricted to only allow "rules" classes.
+
+The TYPE implementation class must match the Rule API defined in
+"rules/rule.py":
+
+  class Rule(object):
+    def IsType(self, rule_type_name): ...
+    def ApplyRule(self, return_value, request, response): ...
+
+The ARGS must match the rule-specific constructor, e.g.:
+
+    class LogUrl(rule.Rule):
+      def __init__(self, url, stop=False):
+        self._url_re = re.compile(url)
+        self._stop = stop
+      ...
+
+All rules of the same rule_type_name are chained together and applied in the
+same order as they appear in the input JSON file.
+
+
+Rules
+-------
+
+### rules.LogUrl:
+
+If the url pattern matches then log the request URL.
diff --git a/catapult/telemetry/third_party/webpagereplay/documentation/WebPageReplayDiagram.png b/catapult/telemetry/third_party/webpagereplay/documentation/WebPageReplayDiagram.png
new file mode 100644
index 0000000..fc98922
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/documentation/WebPageReplayDiagram.png
Binary files differ
diff --git a/catapult/telemetry/third_party/webpagereplay/exception_formatter.py b/catapult/telemetry/third_party/webpagereplay/exception_formatter.py
new file mode 100644
index 0000000..8ceb0d4
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/exception_formatter.py
@@ -0,0 +1,96 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import os
+import sys
+import traceback
+
+
+def PrintFormattedException(msg=None):
+  exception_class, exception, tb = sys.exc_info()
+
+  def _GetFinalFrame(tb_level):
+    while tb_level.tb_next:
+      tb_level = tb_level.tb_next
+    return tb_level.tb_frame
+
+  processed_tb = traceback.extract_tb(tb)
+  frame = _GetFinalFrame(tb)
+  exception_list = traceback.format_exception_only(exception_class, exception)
+  exception_string = '\n'.join(l.strip() for l in exception_list)
+
+  if msg:
+    print >> sys.stderr
+    print >> sys.stderr, msg
+
+  _PrintFormattedTrace(processed_tb, frame, exception_string)
+
+def PrintFormattedFrame(frame, exception_string=None):
+  _PrintFormattedTrace(traceback.extract_stack(frame), frame, exception_string)
+
+
+def _PrintFormattedTrace(processed_tb, frame, exception_string=None):
+  """Prints an Exception in a more useful format than the default.
+  """
+  print >> sys.stderr
+
+  # Format the traceback.
+  base_dir = os.path.dirname(__file__)
+  print >> sys.stderr, 'Traceback (most recent call last):'
+  for filename, line, function, text in processed_tb:
+    filename = os.path.abspath(filename)
+    if filename.startswith(base_dir):
+      filename = filename[len(base_dir)+1:]
+    print >> sys.stderr, '  %s at %s:%d' % (function, filename, line)
+    print >> sys.stderr, '    %s' % text
+
+  # Format the exception.
+  if exception_string:
+    print >> sys.stderr, exception_string
+
+  # Format the locals.
+  local_variables = [(variable, value) for variable, value in
+                     frame.f_locals.iteritems() if variable != 'self']
+  print >> sys.stderr
+  print >> sys.stderr, 'Locals:'
+  if local_variables:
+    longest_variable = max(len(v) for v, _ in local_variables)
+    for variable, value in sorted(local_variables):
+      value = repr(value)
+      possibly_truncated_value = _AbbreviateMiddleOfString(value, ' ... ', 1024)
+      truncation_indication = ''
+      if len(possibly_truncated_value) != len(value):
+        truncation_indication = ' (truncated)'
+      print >> sys.stderr, '  %s: %s%s' % (variable.ljust(longest_variable + 1),
+                                           possibly_truncated_value,
+                                           truncation_indication)
+  else:
+    print >> sys.stderr, '  No locals!'
+
+  print >> sys.stderr
+  sys.stderr.flush()
+
+
+def _AbbreviateMiddleOfString(target, middle, max_length):
+  if max_length < 0:
+    raise ValueError('Must provide positive max_length')
+  if len(middle) > max_length:
+    raise ValueError('middle must not be greater than max_length')
+
+  if len(target) <= max_length:
+    return target
+  half_length = (max_length - len(middle)) / 2.
+  return (target[:int(math.floor(half_length))] + middle +
+          target[-int(math.ceil(half_length)):])
diff --git a/catapult/telemetry/third_party/webpagereplay/httparchive.py b/catapult/telemetry/third_party/webpagereplay/httparchive.py
new file mode 100755
index 0000000..388fc66
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httparchive.py
@@ -0,0 +1,1022 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""View and edit HTTP Archives.
+
+To list all URLs in an archive:
+  $ ./httparchive.py ls archive.wpr
+
+To view the content of all URLs from example.com:
+  $ ./httparchive.py cat --host example.com archive.wpr
+
+To view the content of a particular URL:
+  $ ./httparchive.py cat --host www.example.com --full_path /foo archive.wpr
+
+To view the content of all URLs:
+  $ ./httparchive.py cat archive.wpr
+
+To edit a particular URL:
+  $ ./httparchive.py edit --host www.example.com --full_path /foo archive.wpr
+
+To print statistics of an archive:
+  $ ./httparchive.py stats archive.wpr
+
+To print statistics of a set of URLs:
+  $ ./httparchive.py stats --host www.example.com archive.wpr
+
+To merge multiple archives
+  $ ./httparchive.py merge --merged_file new.wpr archive1.wpr archive2.wpr ...
+"""
+
+import calendar
+import certutils
+import cPickle
+import difflib
+import email.utils
+import httplib
+import httpzlib
+import json
+import logging
+import optparse
+import os
+import StringIO
+import subprocess
+import sys
+import tempfile
+import time
+import urlparse
+from collections import defaultdict
+
+
+
+def LogRunTime(fn):
+  """Annotation which logs the run time of the function."""
+  def wrapped(self, *args, **kwargs):
+    start_time = time.time()
+    try:
+      return fn(self, *args, **kwargs)
+    finally:
+      run_time = (time.time() - start_time) * 1000.0
+      logging.debug('%s: %dms', fn.__name__, run_time)
+  return wrapped
+
+
+class HttpArchiveException(Exception):
+  """Base class for all exceptions in httparchive."""
+  pass
+
+
+class HttpArchive(dict):
+  """Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values.
+
+  Attributes:
+    responses_by_host: dict of {hostname, {request: response}}. This must remain
+        in sync with the underlying dict of self. It is used as an optimization
+        so that get_requests() doesn't have to linearly search all requests in
+        the archive to find potential matches.
+  """
+
+  def __init__(self):  # pylint: disable=super-init-not-called
+    self.responses_by_host = defaultdict(dict)
+
+  def __setstate__(self, state):
+    """Influence how to unpickle.
+
+    Args:
+      state: a dictionary for __dict__
+    """
+    self.__dict__.update(state)
+    self.responses_by_host = defaultdict(dict)
+    for request in self:
+      self.responses_by_host[request.host][request] = self[request]
+
+  def __getstate__(self):
+    """Influence how to pickle.
+
+    Returns:
+      a dict to use for pickling
+    """
+    state = self.__dict__.copy()
+    del state['responses_by_host']
+    return state
+
+  def __setitem__(self, key, value):
+    super(HttpArchive, self).__setitem__(key, value)
+    if hasattr(self, 'responses_by_host'):
+      self.responses_by_host[key.host][key] = value
+
+  def __delitem__(self, key):
+    super(HttpArchive, self).__delitem__(key)
+    del self.responses_by_host[key.host][key]
+
+  def get(self, request, default=None):
+    """Return the archived response for a given request.
+
+    Does extra checking for handling some HTTP request headers.
+
+    Args:
+      request: instance of ArchivedHttpRequest
+      default: default value to return if request is not found
+
+    Returns:
+      Instance of ArchivedHttpResponse or default if no matching
+      response is found
+    """
+    if request in self:
+      return self[request]
+    return self.get_conditional_response(request, default)
+
+  def get_conditional_response(self, request, default):
+    """Get the response based on the conditional HTTP request headers.
+
+    Args:
+      request: an ArchivedHttpRequest representing the original request.
+      default: default ArchivedHttpResponse
+          original request with matched headers removed.
+
+    Returns:
+      an ArchivedHttpResponse with a status of 200, 302 (not modified), or
+          412 (precondition failed)
+    """
+    response = default
+    if request.is_conditional():
+      stripped_request = request.create_request_without_conditions()
+      if stripped_request in self:
+        response = self[stripped_request]
+        if response.status == 200:
+          status = self.get_conditional_status(request, response)
+          if status != 200:
+            response = create_response(status)
+    return response
+
+  def get_conditional_status(self, request, response):
+    status = 200
+    last_modified = email.utils.parsedate(
+        response.update_date(response.get_header('last-modified')))
+    response_etag = response.get_header('etag')
+    is_get_or_head = request.command.upper() in ('GET', 'HEAD')
+
+    match_value = request.headers.get('if-match', None)
+    if match_value:
+      if self.is_etag_match(match_value, response_etag):
+        status = 200
+      else:
+        status = 412  # precondition failed
+    none_match_value = request.headers.get('if-none-match', None)
+    if none_match_value:
+      if self.is_etag_match(none_match_value, response_etag):
+        status = 304
+      elif is_get_or_head:
+        status = 200
+      else:
+        status = 412
+    if is_get_or_head and last_modified:
+      for header in ('if-modified-since', 'if-unmodified-since'):
+        date = email.utils.parsedate(request.headers.get(header, None))
+        if date:
+          if ((header == 'if-modified-since' and last_modified > date) or
+              (header == 'if-unmodified-since' and last_modified < date)):
+            if status != 412:
+              status = 200
+          else:
+            status = 304  # not modified
+    return status
+
+  @staticmethod
+  def is_etag_match(request_etag, response_etag):
+    """Determines whether the entity tags of the request/response matches.
+
+    Args:
+      request_etag: the value string of the "if-(none)-match:"
+                    portion of the request header
+      response_etag: the etag value of the response
+
+    Returns:
+      True on match, False otherwise
+    """
+    response_etag = response_etag.strip('" ')
+    for etag in request_etag.split(','):
+      etag = etag.strip('" ')
+      if etag in ('*', response_etag):
+        return True
+    return False
+
+  def get_requests(self, command=None, host=None, full_path=None, is_ssl=None,
+                   use_query=True):
+    """Return a list of requests that match the given args."""
+    if host:
+      return [r for r in self.responses_by_host[host]
+              if r.matches(command, None, full_path, is_ssl,
+                           use_query=use_query)]
+    else:
+      return [r for r in self
+              if r.matches(command, host, full_path, is_ssl,
+                           use_query=use_query)]
+
+  def ls(self, command=None, host=None, full_path=None):
+    """List all URLs that match given params."""
+    return ''.join(sorted(
+        '%s\n' % r for r in self.get_requests(command, host, full_path)))
+
+  def cat(self, command=None, host=None, full_path=None):
+    """Print the contents of all URLs that match given params."""
+    out = StringIO.StringIO()
+    for request in self.get_requests(command, host, full_path):
+      print >>out, str(request)
+      print >>out, 'Untrimmed request headers:'
+      for k in request.headers:
+        print >>out, '    %s: %s' % (k, request.headers[k])
+      if request.request_body:
+        print >>out, request.request_body
+      print >>out, '---- Response Info', '-' * 51
+      response = self[request]
+      chunk_lengths = [len(x) for x in response.response_data]
+      print >>out, ('Status: %s\n'
+                    'Reason: %s\n'
+                    'Headers delay: %s\n'
+                    'Response headers:') % (
+          response.status, response.reason, response.delays['headers'])
+      for k, v in response.headers:
+        print >>out, '    %s: %s' % (k, v)
+      print >>out, ('Chunk count: %s\n'
+                    'Chunk lengths: %s\n'
+                    'Chunk delays: %s') % (
+          len(chunk_lengths), chunk_lengths, response.delays['data'])
+      body = response.get_data_as_text()
+      print >>out, '---- Response Data', '-' * 51
+      if body:
+        print >>out, body
+      else:
+        print >>out, '[binary data]'
+      print >>out, '=' * 70
+    return out.getvalue()
+
+  def stats(self, command=None, host=None, full_path=None):
+    """Print stats about the archive for all URLs that match given params."""
+    matching_requests = self.get_requests(command, host, full_path)
+    if not matching_requests:
+      print 'Failed to find any requests matching given command, host, path.'
+      return
+
+    out = StringIO.StringIO()
+    stats = {
+        'Total': len(matching_requests),
+        'Domains': defaultdict(int),
+        'HTTP_response_code': defaultdict(int),
+        'content_type': defaultdict(int),
+        'Documents': defaultdict(int),
+        }
+
+    for request in matching_requests:
+      stats['Domains'][request.host] += 1
+      stats['HTTP_response_code'][self[request].status] += 1
+
+      content_type = self[request].get_header('content-type')
+      # Remove content type options for readability and higher level groupings.
+      str_content_type = str(content_type.split(';')[0]
+                            if content_type else None)
+      stats['content_type'][str_content_type] += 1
+
+      #  Documents are the main URL requested and not a referenced resource.
+      if str_content_type == 'text/html' and not 'referer' in request.headers:
+        stats['Documents'][request.host] += 1
+
+    print >>out, json.dumps(stats, indent=4)
+    return out.getvalue()
+
+  def merge(self, merged_archive=None, other_archives=None):
+    """Merge multiple archives into merged_archive by 'chaining' resources,
+    only resources that are not part of the accumlated archive are added"""
+    if not other_archives:
+      print 'No archives passed to merge'
+      return
+
+    # Note we already loaded 'replay_file'.
+    print 'Loaded %d responses' % len(self)
+
+    for archive in other_archives:
+      if not os.path.exists(archive):
+        print 'Error: Replay file "%s" does not exist' % archive
+        return
+
+      http_archive_other = HttpArchive.Load(archive)
+      print 'Loaded %d responses from %s' % (len(http_archive_other), archive)
+      for r in http_archive_other:
+        # Only resources that are not already part of the current archive
+        # get added.
+        if r not in self:
+          print '\t %s ' % r
+          self[r] = http_archive_other[r]
+    self.Persist('%s' % merged_archive)
+
+  def edit(self, command=None, host=None, full_path=None):
+    """Edits the single request which matches given params."""
+    editor = os.getenv('EDITOR')
+    if not editor:
+      print 'You must set the EDITOR environmental variable.'
+      return
+
+    matching_requests = self.get_requests(command, host, full_path)
+    if not matching_requests:
+      print ('Failed to find any requests matching given command, host, '
+             'full_path.')
+      return
+
+    if len(matching_requests) > 1:
+      print 'Found multiple matching requests. Please refine.'
+      print self.ls(command, host, full_path)
+
+    response = self[matching_requests[0]]
+    tmp_file = tempfile.NamedTemporaryFile(delete=False)
+    tmp_file.write(response.get_response_as_text())
+    tmp_file.close()
+    subprocess.check_call([editor, tmp_file.name])
+    response.set_response_from_text(''.join(open(tmp_file.name).readlines()))
+    os.remove(tmp_file.name)
+
+  def find_closest_request(self, request, use_path=False):
+    """Find the closest matching request in the archive to the given request.
+
+    Args:
+      request: an ArchivedHttpRequest
+      use_path: If True, closest matching request's path component must match.
+        (Note: this refers to the 'path' component within the URL, not the
+         'full path' which includes the query string component.)
+
+        If use_path=True, candidate will NOT match in example below
+        e.g. request   = GET www.test.com/a?p=1
+             candidate = GET www.test.com/b?p=1
+
+        Even if use_path=False, urls with same paths are always favored.
+        For example, candidate1 is considered a better match than candidate2.
+          request    = GET www.test.com/a?p=1&q=2&r=3
+          candidate1 = GET www.test.com/a?s=4
+          candidate2 = GET www.test.com/b?p=1&q=2&r=3
+
+    Returns:
+      If a close match is found, return the instance of ArchivedHttpRequest.
+      Otherwise, return None.
+    """
+    # Start with strictest constraints. This trims search space considerably.
+    requests = self.get_requests(request.command, request.host,
+                                 request.full_path, is_ssl=request.is_ssl,
+                                 use_query=True)
+    # Relax constraint: use_query if there is no match.
+    if not requests:
+      requests = self.get_requests(request.command, request.host,
+                                   request.full_path, is_ssl=request.is_ssl,
+                                   use_query=False)
+    # Relax constraint: full_path if there is no match and use_path=False.
+    if not requests and not use_path:
+      requests = self.get_requests(request.command, request.host,
+                                   None, is_ssl=request.is_ssl,
+                                   use_query=False)
+
+    if not requests:
+      return None
+
+    if len(requests) == 1:
+      return requests[0]
+
+    matcher = difflib.SequenceMatcher(b=request.cmp_seq)
+
+    # quick_ratio() is cheap to compute, but ratio() is expensive. So we call
+    # quick_ratio() on all requests, sort them descending, and then loop through
+    # until we find a candidate whose ratio() is >= the next quick_ratio().
+    # This works because quick_ratio() is guaranteed to be an upper bound on
+    # ratio().
+    candidates = []
+    for candidate in requests:
+      matcher.set_seq1(candidate.cmp_seq)
+      candidates.append((matcher.quick_ratio(), candidate))
+
+    candidates.sort(reverse=True, key=lambda c: c[0])
+
+    best_match = (0, None)
+    for i in xrange(len(candidates)):
+      matcher.set_seq1(candidates[i][1].cmp_seq)
+      best_match = max(best_match, (matcher.ratio(), candidates[i][1]))
+      if i + 1 < len(candidates) and best_match[0] >= candidates[i+1][0]:
+        break
+    return best_match[1]
+
+  def diff(self, request):
+    """Diff the given request to the closest matching request in the archive.
+
+    Args:
+      request: an ArchivedHttpRequest
+    Returns:
+      If a close match is found, return a textual diff between the requests.
+      Otherwise, return None.
+    """
+    request_lines = request.formatted_request.split('\n')
+    closest_request = self.find_closest_request(request)
+    if closest_request:
+      closest_request_lines = closest_request.formatted_request.split('\n')
+      return '\n'.join(difflib.ndiff(closest_request_lines, request_lines))
+    return None
+
+  def get_server_cert(self, host):
+    """Gets certificate from the server and stores it in archive"""
+    request = ArchivedHttpRequest('SERVER_CERT', host, '', None, {})
+    if request not in self:
+      self[request] = create_response(200, body=certutils.get_host_cert(host))
+    return self[request].response_data[0]
+
+  def get_certificate(self, host):
+    request = ArchivedHttpRequest('DUMMY_CERT', host, '', None, {})
+    if request not in self:
+      self[request] = create_response(200, body=self._generate_cert(host))
+    return self[request].response_data[0]
+
+  @classmethod
+  def AssertWritable(cls, filename):
+    """Raises an IOError if filename is not writable."""
+    persist_dir = os.path.dirname(os.path.abspath(filename))
+    if not os.path.exists(persist_dir):
+      raise IOError('Directory does not exist: %s' % persist_dir)
+    if os.path.exists(filename):
+      if not os.access(filename, os.W_OK):
+        raise IOError('Need write permission on file: %s' % filename)
+    elif not os.access(persist_dir, os.W_OK):
+      raise IOError('Need write permission on directory: %s' % persist_dir)
+
+  @classmethod
+  def Load(cls, filename):
+    """Load an instance from filename."""
+    return cPickle.load(open(filename, 'rb'))
+
+  def Persist(self, filename):
+    """Persist all state to filename."""
+    try:
+      original_checkinterval = sys.getcheckinterval()
+      sys.setcheckinterval(2**31-1)  # Lock out other threads so nothing can
+                                     # modify |self| during pickling.
+      pickled_self = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
+    finally:
+      sys.setcheckinterval(original_checkinterval)
+    with open(filename, 'wb') as f:
+      f.write(pickled_self)
+
+
+class ArchivedHttpRequest(object):
+  """Record all the state that goes into a request.
+
+  ArchivedHttpRequest instances are considered immutable so they can
+  serve as keys for HttpArchive instances.
+  (The immutability is not enforced.)
+
+  Upon creation, the headers are "trimmed" (i.e. edited or dropped)
+  and saved to self.trimmed_headers to allow requests to match in a wider
+  variety of playback situations (e.g. using different user agents).
+
+  For unpickling, 'trimmed_headers' is recreated from 'headers'. That
+  allows for changes to the trim function and can help with debugging.
+  """
+  CONDITIONAL_HEADERS = [
+      'if-none-match', 'if-match',
+      'if-modified-since', 'if-unmodified-since']
+
+  def __init__(self, command, host, full_path, request_body, headers,
+               is_ssl=False):
+    """Initialize an ArchivedHttpRequest.
+
+    Args:
+      command: a string (e.g. 'GET' or 'POST').
+      host: a host name (e.g. 'www.google.com').
+      full_path: a request path.  Includes everything after the host & port in
+          the URL (e.g. '/search?q=dogs').
+      request_body: a request body string for a POST or None.
+      headers: {key: value, ...} where key and value are strings.
+      is_ssl: a boolean which is True iff request is make via SSL.
+    """
+    self.command = command
+    self.host = host
+    self.full_path = full_path
+    parsed_url = urlparse.urlparse(full_path) if full_path else None
+    self.path = parsed_url.path if parsed_url else None
+    self.request_body = request_body
+    self.headers = headers
+    self.is_ssl = is_ssl
+    self.trimmed_headers = self._TrimHeaders(headers)
+    self.formatted_request = self._GetFormattedRequest()
+    self.cmp_seq = self._GetCmpSeq(parsed_url.query if parsed_url else None)
+
+  def __str__(self):
+    scheme = 'https' if self.is_ssl else 'http'
+    return '%s %s://%s%s %s' % (
+        self.command, scheme, self.host, self.full_path, self.trimmed_headers)
+
+  def __repr__(self):
+    return repr((self.command, self.host, self.full_path, self.request_body,
+                 self.trimmed_headers, self.is_ssl))
+
+  def __hash__(self):
+    """Return a integer hash to use for hashed collections including dict."""
+    return hash(repr(self))
+
+  def __eq__(self, other):
+    """Define the __eq__ method to match the hash behavior."""
+    return repr(self) == repr(other)
+
+  def __setstate__(self, state):
+    """Influence how to unpickle.
+
+    "headers" are the original request headers.
+    "trimmed_headers" are the trimmed headers used for matching requests
+    during replay.
+
+    Args:
+      state: a dictionary for __dict__
+    """
+    if 'full_headers' in state:
+      # Fix older version of archive.
+      state['headers'] = state['full_headers']
+      del state['full_headers']
+    if 'headers' not in state:
+      raise HttpArchiveException(
+          'Archived HTTP request is missing "headers". The HTTP archive is'
+          ' likely from a previous version and must be re-recorded.')
+    if 'path' in state:
+      # before, 'path' and 'path_without_query' were used and 'path' was
+      # pickled.  Now, 'path' has been renamed to 'full_path' and
+      # 'path_without_query' has been renamed to 'path'.  'full_path' is
+      # pickled, but 'path' is not.  If we see 'path' here it means we are
+      # dealing with an older archive.
+      state['full_path'] = state['path']
+      del state['path']
+    state['trimmed_headers'] = self._TrimHeaders(dict(state['headers']))
+    if 'is_ssl' not in state:
+      state['is_ssl'] = False
+    self.__dict__.update(state)
+    parsed_url = urlparse.urlparse(self.full_path)
+    self.path = parsed_url.path
+    self.formatted_request = self._GetFormattedRequest()
+    self.cmp_seq = self._GetCmpSeq(parsed_url.query)
+
+  def __getstate__(self):
+    """Influence how to pickle.
+
+    Returns:
+      a dict to use for pickling
+    """
+    state = self.__dict__.copy()
+    del state['trimmed_headers']
+    del state['path']
+    del state['formatted_request']
+    del state['cmp_seq']
+    return state
+
+  def _GetFormattedRequest(self):
+    """Format request to make diffs easier to read.
+
+    Returns:
+      A string consisting of the request. Example:
+      'GET www.example.com/path\nHeader-Key: header value\n'
+    """
+    parts = ['%s %s%s\n' % (self.command, self.host, self.full_path)]
+    if self.request_body:
+      parts.append('%s\n' % self.request_body)
+    for k, v in self.trimmed_headers:
+      k = '-'.join(x.capitalize() for x in k.split('-'))
+      parts.append('%s: %s\n' % (k, v))
+    return ''.join(parts)
+
+  def _GetCmpSeq(self, query=None):
+    """Compute a sequence out of query and header for difflib to compare.
+    For example:
+      [('q1', 'a1'), ('q2', 'a2'), ('k1', 'v1'), ('k2', 'v2')]
+    will be returned for a request with URL:
+      http://example.com/index.html?q1=a2&q2=a2
+    and header:
+      k1: v1
+      k2: v2
+
+    Args:
+      query: the query string in the URL.
+
+    Returns:
+      A sequence for difflib to compare.
+    """
+    if not query:
+      return self.trimmed_headers
+    return sorted(urlparse.parse_qsl(query)) + self.trimmed_headers
+
+  def matches(self, command=None, host=None, full_path=None, is_ssl=None,
+              use_query=True):
+    """Returns true iff the request matches all parameters.
+
+    Args:
+      command: a string (e.g. 'GET' or 'POST').
+      host: a host name (e.g. 'www.google.com').
+      full_path: a request path with query string (e.g. '/search?q=dogs')
+      is_ssl: whether the request is secure.
+      use_query:
+        If use_query is True, request matching uses both the hierarchical path
+        and query string component.
+        If use_query is False, request matching only uses the hierarchical path
+
+        e.g. req1 = GET www.test.com/index?aaaa
+             req2 = GET www.test.com/index?bbbb
+
+        If use_query is True, req1.matches(req2) evaluates to False
+        If use_query is False, req1.matches(req2) evaluates to True
+
+    Returns:
+      True iff the request matches all parameters
+    """
+    if command is not None and command != self.command:
+      return False
+    if is_ssl is not None and is_ssl != self.is_ssl:
+      return False
+    if host is not None and host != self.host:
+      return False
+    if full_path is None:
+      return True
+    if use_query:
+      return full_path == self.full_path
+    else:
+      return self.path == urlparse.urlparse(full_path).path
+
+  @classmethod
+  def _TrimHeaders(cls, headers):
+    """Removes headers that are known to cause problems during replay.
+
+    These headers are removed for the following reasons:
+    - accept: Causes problems with www.bing.com. During record, CSS is fetched
+              with *. During replay, it's text/css.
+    - accept-charset, accept-language, referer: vary between clients.
+    - cache-control:  sometimes sent from Chrome with 'max-age=0' as value.
+    - connection, method, scheme, url, version: Cause problems with spdy.
+    - cookie: Extremely sensitive to request/response order.
+    - keep-alive: Doesn't affect the content of the request, only some
+      transient state of the transport layer.
+    - user-agent: Changes with every Chrome version.
+    - proxy-connection: Sent for proxy requests.
+    - x-chrome-variations, x-client-data: Unique to each Chrome binary. Used by
+      Google to collect statistics about Chrome's enabled features.
+
+    Another variant to consider is dropping only the value from the header.
+    However, this is particularly bad for the cookie header, because the
+    presence of the cookie depends on the responses we've seen when the request
+    is made.
+
+    Args:
+      headers: {header_key: header_value, ...}
+
+    Returns:
+      [(header_key, header_value), ...]  # (with undesirable headers removed)
+    """
+    # TODO(tonyg): Strip sdch from the request headers because we can't
+    # guarantee that the dictionary will be recorded, so replay may not work.
+    if 'accept-encoding' in headers:
+      accept_encoding = headers['accept-encoding']
+      accept_encoding = accept_encoding.replace('sdch', '')
+      # Strip lzma so Opera's requests matches archives recorded using Chrome.
+      accept_encoding = accept_encoding.replace('lzma', '')
+      stripped_encodings = [e.strip() for e in accept_encoding.split(',')]
+      accept_encoding = ','.join(filter(bool, stripped_encodings))
+      headers['accept-encoding'] = accept_encoding
+    undesirable_keys = [
+        'accept', 'accept-charset', 'accept-language', 'cache-control',
+        'connection', 'cookie', 'keep-alive', 'method',
+        'referer', 'scheme', 'url', 'version', 'user-agent', 'proxy-connection',
+        'x-chrome-variations', 'x-client-data']
+    return sorted([(k, v) for k, v in headers.items()
+                   if k.lower() not in undesirable_keys])
+
+  def is_conditional(self):
+    """Return list of headers that match conditional headers."""
+    for header in self.CONDITIONAL_HEADERS:
+      if header in self.headers:
+        return True
+    return False
+
+  def create_request_without_conditions(self):
+    stripped_headers = dict((k, v) for k, v in self.headers.iteritems()
+                            if k.lower() not in self.CONDITIONAL_HEADERS)
+    return ArchivedHttpRequest(
+        self.command, self.host, self.full_path, self.request_body,
+        stripped_headers, self.is_ssl)
+
+class ArchivedHttpResponse(object):
+  """All the data needed to recreate all HTTP response."""
+
+  # CHUNK_EDIT_SEPARATOR is used to edit and view text content.
+  # It is not sent in responses. It is added by get_data_as_text()
+  # and removed by set_data().
+  CHUNK_EDIT_SEPARATOR = '[WEB_PAGE_REPLAY_CHUNK_BOUNDARY]'
+
+  # DELAY_EDIT_SEPARATOR is used to edit and view server delays.
+  DELAY_EDIT_SEPARATOR = ('\n[WEB_PAGE_REPLAY_EDIT_ARCHIVE --- '
+                          'Delays are above. Response content is below.]\n')
+
+  def __init__(self, version, status, reason, headers, response_data,
+               delays=None):
+    """Initialize an ArchivedHttpResponse.
+
+    Args:
+      version: HTTP protocol version used by server.
+          10 for HTTP/1.0, 11 for HTTP/1.1 (same as httplib).
+      status: Status code returned by server (e.g. 200).
+      reason: Reason phrase returned by server (e.g. "OK").
+      headers: list of (header, value) tuples.
+      response_data: list of content chunks.
+          Concatenating the chunks gives the complete contents
+          (i.e. the chunks do not have any lengths or delimiters).
+          Do not include the final, zero-length chunk that marks the end.
+      delays: dict of (ms) delays for 'connect', 'headers' and 'data'.
+          e.g. {'connect': 50, 'headers': 150, 'data': [0, 10, 10]}
+          connect - The time to connect to the server.
+            Each resource has a value because Replay's record mode captures it.
+            This includes the time for the SYN and SYN/ACK (1 rtt).
+          headers - The time elapsed between the TCP connect and the headers.
+            This typically includes all the server-time to generate a response.
+          data - If the response is chunked, these are the times for each chunk.
+    """
+    self.version = version
+    self.status = status
+    self.reason = reason
+    self.headers = headers
+    self.response_data = response_data
+    self.delays = delays
+    self.fix_delays()
+
+  def fix_delays(self):
+    """Initialize delays, or check the number of data delays."""
+    expected_num_delays = len(self.response_data)
+    if not self.delays:
+      self.delays = {
+          'connect': 0,
+          'headers': 0,
+          'data': [0] * expected_num_delays
+          }
+    else:
+      num_delays = len(self.delays['data'])
+      if num_delays != expected_num_delays:
+        raise HttpArchiveException(
+            'Server delay length mismatch: %d (expected %d): %s',
+            num_delays, expected_num_delays, self.delays['data'])
+
+  def __repr__(self):
+    return repr((self.version, self.status, self.reason, sorted(self.headers),
+                 self.response_data))
+
+  def __hash__(self):
+    """Return a integer hash to use for hashed collections including dict."""
+    return hash(repr(self))
+
+  def __eq__(self, other):
+    """Define the __eq__ method to match the hash behavior."""
+    return repr(self) == repr(other)
+
+  def __setstate__(self, state):
+    """Influence how to unpickle.
+
+    Args:
+      state: a dictionary for __dict__
+    """
+    if 'server_delays' in state:
+      state['delays'] = {
+          'connect': 0,
+          'headers': 0,
+          'data': state['server_delays']
+          }
+      del state['server_delays']
+    elif 'delays' not in state:
+      state['delays'] = None
+    self.__dict__.update(state)
+    self.fix_delays()
+
+  def get_header(self, key, default=None):
+    for k, v in self.headers:
+      if key.lower() == k.lower():
+        return v
+    return default
+
+  def set_header(self, key, value):
+    for i, (k, v) in enumerate(self.headers):
+      if key == k:
+        self.headers[i] = (key, value)
+        return
+    self.headers.append((key, value))
+
+  def remove_header(self, key):
+    for i, (k, v) in enumerate(self.headers):
+      if key.lower() == k.lower():
+        self.headers.pop(i)
+        return
+
+  @staticmethod
+  def _get_epoch_seconds(date_str):
+    """Return the epoch seconds of a date header.
+
+    Args:
+      date_str: a date string (e.g. "Thu, 01 Dec 1994 16:00:00 GMT")
+    Returns:
+      epoch seconds as a float
+    """
+    date_tuple = email.utils.parsedate(date_str)
+    if date_tuple:
+      return calendar.timegm(date_tuple)
+    return None
+
+  def update_date(self, date_str, now=None):
+    """Return an updated date based on its delta from the "Date" header.
+
+    For example, if |date_str| is one week later than the "Date" header,
+    then the returned date string is one week later than the current date.
+
+    Args:
+      date_str: a date string (e.g. "Thu, 01 Dec 1994 16:00:00 GMT")
+    Returns:
+      a date string
+    """
+    date_seconds = self._get_epoch_seconds(self.get_header('date'))
+    header_seconds = self._get_epoch_seconds(date_str)
+    if date_seconds and header_seconds:
+      updated_seconds = header_seconds + (now or time.time()) - date_seconds
+      return email.utils.formatdate(updated_seconds, usegmt=True)
+    return date_str
+
+  def is_gzip(self):
+    return self.get_header('content-encoding') == 'gzip'
+
+  def is_compressed(self):
+    return self.get_header('content-encoding') in ('gzip', 'deflate')
+
+  def is_chunked(self):
+    return self.get_header('transfer-encoding') == 'chunked'
+
+  def get_data_as_text(self):
+    """Return content as a single string.
+
+    Uncompresses and concatenates chunks with CHUNK_EDIT_SEPARATOR.
+    """
+    content_type = self.get_header('content-type')
+    if (not content_type or
+        not (content_type.startswith('text/') or
+             content_type == 'application/x-javascript' or
+             content_type.startswith('application/json'))):
+      return None
+    if self.is_compressed():
+      uncompressed_chunks = httpzlib.uncompress_chunks(
+          self.response_data, self.is_gzip())
+    else:
+      uncompressed_chunks = self.response_data
+    return self.CHUNK_EDIT_SEPARATOR.join(uncompressed_chunks)
+
+  def get_delays_as_text(self):
+    """Return delays as editable text."""
+    return json.dumps(self.delays, indent=2)
+
+  def get_response_as_text(self):
+    """Returns response content as a single string.
+
+    Server delays are separated on a per-chunk basis. Delays are in seconds.
+    Response content begins after DELAY_EDIT_SEPARATOR
+    """
+    data = self.get_data_as_text()
+    if data is None:
+      logging.warning('Data can not be represented as text.')
+      data = ''
+    delays = self.get_delays_as_text()
+    return self.DELAY_EDIT_SEPARATOR.join((delays, data))
+
+  def set_data(self, text):
+    """Inverse of get_data_as_text().
+
+    Split on CHUNK_EDIT_SEPARATOR and compress if needed.
+    """
+    text_chunks = text.split(self.CHUNK_EDIT_SEPARATOR)
+    if self.is_compressed():
+      self.response_data = httpzlib.compress_chunks(text_chunks, self.is_gzip())
+    else:
+      self.response_data = text_chunks
+    if not self.is_chunked():
+      content_length = sum(len(c) for c in self.response_data)
+      self.set_header('content-length', str(content_length))
+
+  def set_delays(self, delays_text):
+    """Inverse of get_delays_as_text().
+
+    Args:
+      delays_text: JSON encoded text such as the following:
+          {
+            connect: 80,
+            headers: 80,
+            data: [6, 55, 0]
+          }
+        Times are in milliseconds.
+        Each data delay corresponds with one response_data value.
+    """
+    try:
+      self.delays = json.loads(delays_text)
+    except (ValueError, KeyError) as e:
+      logging.critical('Unable to parse delays %s: %s', delays_text, e)
+    self.fix_delays()
+
+  def set_response_from_text(self, text):
+    """Inverse of get_response_as_text().
+
+    Modifies the state of the archive according to the textual representation.
+    """
+    try:
+      delays, data = text.split(self.DELAY_EDIT_SEPARATOR)
+    except ValueError:
+      logging.critical(
+          'Error parsing text representation. Skipping edits.')
+      return
+    self.set_delays(delays)
+    self.set_data(data)
+
+
+def create_response(status, reason=None, headers=None, body=None):
+  """Convenience method for creating simple ArchivedHttpResponse objects."""
+  if reason is None:
+    reason = httplib.responses.get(status, 'Unknown')
+  if headers is None:
+    headers = [('content-type', 'text/plain')]
+  if body is None:
+    body = "%s %s" % (status, reason)
+  return ArchivedHttpResponse(11, status, reason, headers, [body])
+
+
+def main():
+  class PlainHelpFormatter(optparse.IndentedHelpFormatter):
+    def format_description(self, description):
+      if description:
+        return description + '\n'
+      else:
+        return ''
+
+  option_parser = optparse.OptionParser(
+      usage='%prog [ls|cat|edit|stats|merge] [options] replay_file(s)',
+      formatter=PlainHelpFormatter(),
+      description=__doc__,
+      epilog='http://code.google.com/p/web-page-replay/')
+
+  option_parser.add_option('-c', '--command', default=None,
+      action='store',
+      type='string',
+      help='Only show URLs matching this command.')
+  option_parser.add_option('-o', '--host', default=None,
+      action='store',
+      type='string',
+      help='Only show URLs matching this host.')
+  option_parser.add_option('-p', '--full_path', default=None,
+      action='store',
+      type='string',
+      help='Only show URLs matching this full path.')
+  option_parser.add_option('-f', '--merged_file', default=None,
+        action='store',
+        type='string',
+        help='The output file to use when using the merge command.')
+
+  options, args = option_parser.parse_args()
+
+  # Merge command expects an umlimited number of archives.
+  if len(args) < 2:
+    print 'args: %s' % args
+    option_parser.error('Must specify a command and replay_file')
+
+  command = args[0]
+  replay_file = args[1]
+
+  if not os.path.exists(replay_file):
+    option_parser.error('Replay file "%s" does not exist' % replay_file)
+
+  http_archive = HttpArchive.Load(replay_file)
+  if command == 'ls':
+    print http_archive.ls(options.command, options.host, options.full_path)
+  elif command == 'cat':
+    print http_archive.cat(options.command, options.host, options.full_path)
+  elif command == 'stats':
+    print http_archive.stats(options.command, options.host, options.full_path)
+  elif command == 'merge':
+    if not options.merged_file:
+      print 'Error: Must specify a merged file name (use --merged_file)'
+      return
+    http_archive.merge(options.merged_file, args[2:])
+  elif command == 'edit':
+    http_archive.edit(options.command, options.host, options.full_path)
+    http_archive.Persist(replay_file)
+  else:
+    option_parser.error('Unknown command "%s"' % command)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/telemetry/third_party/webpagereplay/httparchive_test.py b/catapult/telemetry/third_party/webpagereplay/httparchive_test.py
new file mode 100755
index 0000000..bc94653
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httparchive_test.py
@@ -0,0 +1,442 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import calendar
+import email.utils
+import httparchive
+import unittest
+
+
+def create_request(headers):
+  return httparchive.ArchivedHttpRequest(
+      'GET', 'www.test.com', '/', None, headers)
+
+def create_response(headers):
+  return httparchive.ArchivedHttpResponse(
+      11, 200, 'OK', headers, '')
+
+
+class HttpArchiveTest(unittest.TestCase):
+
+  REQUEST_HEADERS = {}
+  REQUEST = create_request(REQUEST_HEADERS)
+
+  # Used for if-(un)modified-since checks
+  DATE_PAST = 'Wed, 13 Jul 2011 03:58:08 GMT'
+  DATE_PRESENT = 'Wed, 20 Jul 2011 04:58:08 GMT'
+  DATE_FUTURE = 'Wed, 27 Jul 2011 05:58:08 GMT'
+  DATE_INVALID = 'This is an invalid date!!'
+
+  # etag values
+  ETAG_VALID = 'etag'
+  ETAG_INVALID = 'This is an invalid etag value!!'
+
+  RESPONSE_HEADERS = [('last-modified', DATE_PRESENT), ('etag', ETAG_VALID)]
+  RESPONSE = create_response(RESPONSE_HEADERS)
+
+  def setUp(self):
+    self.archive = httparchive.HttpArchive()
+    self.archive[self.REQUEST] = self.RESPONSE
+
+    # Also add an identical POST request for testing
+    request = httparchive.ArchivedHttpRequest(
+        'POST', 'www.test.com', '/', None, self.REQUEST_HEADERS)
+    self.archive[request] = self.RESPONSE
+
+  def tearDown(self):
+    pass
+
+  def test_init(self):
+    archive = httparchive.HttpArchive()
+    self.assertEqual(len(archive), 0)
+
+  def test__TrimHeaders(self):
+    request = httparchive.ArchivedHttpRequest
+    header1 = {'accept-encoding': 'gzip,deflate'}
+    self.assertEqual(request._TrimHeaders(header1),
+                     [(k, v) for k, v in header1.items()])
+
+    header2 = {'referer': 'www.google.com'}
+    self.assertEqual(request._TrimHeaders(header2), [])
+
+    header3 = {'referer': 'www.google.com', 'cookie': 'cookie_monster!',
+               'hello': 'world'}
+    self.assertEqual(request._TrimHeaders(header3), [('hello', 'world')])
+
+    # Tests that spaces and trailing comma get stripped.
+    header4 = {'accept-encoding': 'gzip, deflate,, '}
+    self.assertEqual(request._TrimHeaders(header4),
+                     [('accept-encoding', 'gzip,deflate')])
+
+    # Tests that 'lzma' gets stripped.
+    header5 = {'accept-encoding': 'gzip, deflate, lzma'}
+    self.assertEqual(request._TrimHeaders(header5),
+                     [('accept-encoding', 'gzip,deflate')])
+
+    # Tests that x-client-data gets stripped.
+    header6 = {'x-client-data': 'testdata'}
+    self.assertEqual(request._TrimHeaders(header6), [])
+
+  def test_matches(self):
+    headers = {}
+    request1 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/index.html?hello=world', None, headers)
+    request2 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/index.html?foo=bar', None, headers)
+
+    self.assert_(not request1.matches(
+        request2.command, request2.host, request2.full_path, use_query=True))
+    self.assert_(request1.matches(
+        request2.command, request2.host, request2.full_path, use_query=False))
+
+    self.assert_(request1.matches(
+        request2.command, request2.host, None, use_query=True))
+    self.assert_(request1.matches(
+        request2.command, None, request2.full_path, use_query=False))
+
+    empty_request = httparchive.ArchivedHttpRequest(
+        None, None, None, None, headers)
+    self.assert_(not empty_request.matches(
+        request2.command, request2.host, None, use_query=True))
+    self.assert_(not empty_request.matches(
+        request2.command, None, request2.full_path, use_query=False))
+
+  def setup_find_closest_request(self):
+    headers = {}
+    request1 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/a?hello=world', None, headers)
+    request2 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/a?foo=bar', None, headers)
+    request3 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/b?hello=world', None, headers)
+    request4 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/c?hello=world', None, headers)
+
+    archive = httparchive.HttpArchive()
+    # Add requests 2 and 3 and find closest match with request1
+    archive[request2] = self.RESPONSE
+    archive[request3] = self.RESPONSE
+
+    return archive, request1, request2, request3, request4
+
+  def test_find_closest_request(self):
+    archive, request1, request2, request3, request4 = (
+      self.setup_find_closest_request())
+
+    # Always favor requests with same paths, even if use_path=False.
+    self.assertEqual(
+        request2, archive.find_closest_request(request1, use_path=False))
+    # If we match strictly on path, request2 is the only match
+    self.assertEqual(
+        request2, archive.find_closest_request(request1, use_path=True))
+    # request4 can be matched with request3, if use_path=False
+    self.assertEqual(
+        request3, archive.find_closest_request(request4, use_path=False))
+    # ...but None, if use_path=True
+    self.assertEqual(
+        None, archive.find_closest_request(request4, use_path=True))
+
+  def test_find_closest_request_delete_simple(self):
+    archive, request1, request2, request3, request4 = (
+      self.setup_find_closest_request())
+
+    del archive[request3]
+    self.assertEqual(
+        request2, archive.find_closest_request(request1, use_path=False))
+    self.assertEqual(
+        request2, archive.find_closest_request(request1, use_path=True))
+
+  def test_find_closest_request_delete_complex(self):
+    archive, request1, request2, request3, request4 = (
+      self.setup_find_closest_request())
+
+    del archive[request2]
+    self.assertEqual(
+        request3, archive.find_closest_request(request1, use_path=False))
+    self.assertEqual(
+        None, archive.find_closest_request(request1, use_path=True))
+
+  def test_find_closest_request_timestamp(self):
+    headers = {}
+    request1 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/index.html?time=100000000&important=true',
+        None, headers)
+    request2 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/index.html?time=99999999&important=true',
+        None, headers)
+    request3 = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/index.html?time=10000000&important=false',
+        None, headers)
+    archive = httparchive.HttpArchive()
+    # Add requests 2 and 3 and find closest match with request1
+    archive[request2] = self.RESPONSE
+    archive[request3] = self.RESPONSE
+
+    # Although request3 is lexicographically closer, request2 is semantically
+    # more similar.
+    self.assertEqual(
+        request2, archive.find_closest_request(request1, use_path=True))
+
+  def test_get_cmp_seq(self):
+    # The order of key-value pairs in query and header respectively should not
+    # matter.
+    headers = {'k2': 'v2', 'k1': 'v1'}
+    request = httparchive.ArchivedHttpRequest(
+        'GET', 'www.test.com', '/a?c=d&a=b;e=f', None, headers)
+    self.assertEqual([('a', 'b'), ('c', 'd'), ('e', 'f'),
+                      ('k1', 'v1'), ('k2', 'v2')],
+                     request._GetCmpSeq('c=d&a=b;e=f'))
+
+  def test_get_simple(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+
+    self.assertEqual(archive.get(request), response)
+
+    false_request_headers = {'foo': 'bar'}
+    false_request = create_request(false_request_headers)
+    self.assertEqual(archive.get(false_request, default=None), None)
+
+  def test_get_modified_headers(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+    not_modified_response = httparchive.create_response(304)
+
+    # Fail check and return response again
+    request_headers = {'if-modified-since': self.DATE_PAST}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # Succeed check and return 304 Not Modified
+    request_headers = {'if-modified-since': self.DATE_FUTURE}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # Succeed check and return 304 Not Modified
+    request_headers = {'if-modified-since': self.DATE_PRESENT}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # Invalid date, fail check and return response again
+    request_headers = {'if-modified-since': self.DATE_INVALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # fail check since the request is not a GET or HEAD request (as per RFC)
+    request_headers = {'if-modified-since': self.DATE_FUTURE}
+    request = httparchive.ArchivedHttpRequest(
+        'POST', 'www.test.com', '/', None, request_headers)
+    self.assertEqual(archive.get(request), response)
+
+  def test_get_unmodified_headers(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+    not_modified_response = httparchive.create_response(304)
+
+    # Succeed check
+    request_headers = {'if-unmodified-since': self.DATE_PAST}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # Fail check
+    request_headers = {'if-unmodified-since': self.DATE_FUTURE}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # Succeed check
+    request_headers = {'if-unmodified-since': self.DATE_PRESENT}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # Fail check
+    request_headers = {'if-unmodified-since': self.DATE_INVALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # Fail check since the request is not a GET or HEAD request (as per RFC)
+    request_headers = {'if-modified-since': self.DATE_PAST}
+    request = httparchive.ArchivedHttpRequest(
+        'POST', 'www.test.com', '/', None, request_headers)
+    self.assertEqual(archive.get(request), response)
+
+  def test_get_etags(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+    not_modified_response = httparchive.create_response(304)
+    precondition_failed_response = httparchive.create_response(412)
+
+    # if-match headers
+    request_headers = {'if-match': self.ETAG_VALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    request_headers = {'if-match': self.ETAG_INVALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), precondition_failed_response)
+
+    # if-none-match headers
+    request_headers = {'if-none-match': self.ETAG_VALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    request_headers = {'if-none-match': self.ETAG_INVALID}
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+  def test_get_multiple_match_headers(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+    not_modified_response = httparchive.create_response(304)
+    precondition_failed_response = httparchive.create_response(412)
+
+    # if-match headers
+    # If the request would, without the If-Match header field,
+    # result in anything other than a 2xx or 412 status,
+    # then the If-Match header MUST be ignored.
+
+    request_headers = {
+        'if-match': self.ETAG_VALID,
+        'if-modified-since': self.DATE_PAST,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # Invalid etag, precondition failed
+    request_headers = {
+        'if-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_PAST,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), precondition_failed_response)
+
+    # 304 response; ignore if-match header
+    request_headers = {
+        'if-match': self.ETAG_VALID,
+        'if-modified-since': self.DATE_FUTURE,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # 304 response; ignore if-match header
+    request_headers = {
+        'if-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_PRESENT,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    # Invalid etag, precondition failed
+    request_headers = {
+        'if-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_INVALID,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), precondition_failed_response)
+
+  def test_get_multiple_none_match_headers(self):
+    request = self.REQUEST
+    response = self.RESPONSE
+    archive = self.archive
+    not_modified_response = httparchive.create_response(304)
+    precondition_failed_response = httparchive.create_response(412)
+
+    # if-none-match headers
+    # If the request would, without the If-None-Match header field,
+    # result in anything other than a 2xx or 304 status,
+    # then the If-None-Match header MUST be ignored.
+
+    request_headers = {
+        'if-none-match': self.ETAG_VALID,
+        'if-modified-since': self.DATE_PAST,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    request_headers = {
+        'if-none-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_PAST,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+    # etag match, precondition failed
+    request_headers = {
+        'if-none-match': self.ETAG_VALID,
+        'if-modified-since': self.DATE_FUTURE,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    request_headers = {
+        'if-none-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_PRESENT,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), not_modified_response)
+
+    request_headers = {
+        'if-none-match': self.ETAG_INVALID,
+        'if-modified-since': self.DATE_INVALID,
+    }
+    request = create_request(request_headers)
+    self.assertEqual(archive.get(request), response)
+
+
+class ArchivedHttpResponse(unittest.TestCase):
+  PAST_DATE_A = 'Tue, 13 Jul 2010 03:47:07 GMT'
+  PAST_DATE_B = 'Tue, 13 Jul 2010 02:47:07 GMT'  # PAST_DATE_A -1 hour
+  PAST_DATE_C = 'Tue, 13 Jul 2010 04:47:07 GMT'  # PAST_DATE_A +1 hour
+  NOW_DATE_A = 'Wed, 20 Jul 2011 04:58:08 GMT'
+  NOW_DATE_B = 'Wed, 20 Jul 2011 03:58:08 GMT'  # NOW_DATE_A -1 hour
+  NOW_DATE_C = 'Wed, 20 Jul 2011 05:58:08 GMT'  # NOW_DATE_A +1 hour
+  NOW_SECONDS = calendar.timegm(email.utils.parsedate(NOW_DATE_A))
+
+  def setUp(self):
+    self.response = create_response([('date', self.PAST_DATE_A)])
+
+  def test_update_date_same_date(self):
+    self.assertEqual(
+        self.response.update_date(self.PAST_DATE_A, now=self.NOW_SECONDS),
+        self.NOW_DATE_A)
+
+  def test_update_date_before_date(self):
+    self.assertEqual(
+        self.response.update_date(self.PAST_DATE_B, now=self.NOW_SECONDS),
+        self.NOW_DATE_B)
+
+  def test_update_date_after_date(self):
+    self.assertEqual(
+        self.response.update_date(self.PAST_DATE_C, now=self.NOW_SECONDS),
+        self.NOW_DATE_C)
+
+  def test_update_date_bad_date_param(self):
+    self.assertEqual(
+        self.response.update_date('garbage date', now=self.NOW_SECONDS),
+        'garbage date')
+
+  def test_update_date_bad_date_header(self):
+    self.response.set_header('date', 'garbage date')
+    self.assertEqual(
+        self.response.update_date(self.PAST_DATE_B, now=self.NOW_SECONDS),
+        self.PAST_DATE_B)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/httpclient.py b/catapult/telemetry/third_party/webpagereplay/httpclient.py
new file mode 100644
index 0000000..88ee9f3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httpclient.py
@@ -0,0 +1,501 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Retrieve web resources over http."""
+
+import copy
+import httplib
+import logging
+import random
+import ssl
+import StringIO
+
+import httparchive
+import platformsettings
+import script_injector
+
+
+# PIL isn't always available, but we still want to be able to run without
+# the image scrambling functionality in this case.
+try:
+  import Image
+except ImportError:
+  Image = None
+
+TIMER = platformsettings.timer
+
+
+class HttpClientException(Exception):
+  """Base class for all exceptions in httpclient."""
+  pass
+
+
+def _InjectScripts(response, inject_script):
+  """Injects |inject_script| immediately after <head> or <html>.
+
+  Copies |response| if it is modified.
+
+  Args:
+    response: an ArchivedHttpResponse
+    inject_script: JavaScript string (e.g. "Math.random = function(){...}")
+  Returns:
+    an ArchivedHttpResponse
+  """
+  if type(response) == tuple:
+    logging.warn('tuple response: %s', response)
+  content_type = response.get_header('content-type')
+  if content_type and content_type.startswith('text/html'):
+    text = response.get_data_as_text()
+    text, already_injected = script_injector.InjectScript(
+        text, 'text/html', inject_script)
+    if not already_injected:
+      response = copy.deepcopy(response)
+      response.set_data(text)
+  return response
+
+
+def _ScrambleImages(response):
+  """If the |response| is an image, attempt to scramble it.
+
+  Copies |response| if it is modified.
+
+  Args:
+    response: an ArchivedHttpResponse
+  Returns:
+    an ArchivedHttpResponse
+  """
+
+  assert Image, '--scramble_images requires the PIL module to be installed.'
+
+  content_type = response.get_header('content-type')
+  if content_type and content_type.startswith('image/'):
+    try:
+      image_data = response.response_data[0]
+      image_data.decode(encoding='base64')
+      im = Image.open(StringIO.StringIO(image_data))
+
+      pixel_data = list(im.getdata())
+      random.shuffle(pixel_data)
+
+      scrambled_image = im.copy()
+      scrambled_image.putdata(pixel_data)
+
+      output_image_io = StringIO.StringIO()
+      scrambled_image.save(output_image_io, im.format)
+      output_image_data = output_image_io.getvalue()
+      output_image_data.encode(encoding='base64')
+
+      response = copy.deepcopy(response)
+      response.set_data(output_image_data)
+    except Exception:
+      pass
+
+  return response
+
+
+class DetailedHTTPResponse(httplib.HTTPResponse):
+  """Preserve details relevant to replaying responses.
+
+  WARNING: This code uses attributes and methods of HTTPResponse
+  that are not part of the public interface.
+  """
+
+  def read_chunks(self):
+    """Return the response body content and timing data.
+
+    The returned chunks have the chunk size and CRLFs stripped off.
+    If the response was compressed, the returned data is still compressed.
+
+    Returns:
+      (chunks, delays)
+        chunks:
+          [response_body]                  # non-chunked responses
+          [chunk_1, chunk_2, ...]          # chunked responses
+        delays:
+          [0]                              # non-chunked responses
+          [chunk_1_first_byte_delay, ...]  # chunked responses
+
+      The delay for the first body item should be recorded by the caller.
+    """
+    buf = []
+    chunks = []
+    delays = []
+    if not self.chunked:
+      chunks.append(self.read())
+      delays.append(0)
+    else:
+      start = TIMER()
+      try:
+        while True:
+          line = self.fp.readline()
+          chunk_size = self._read_chunk_size(line)
+          if chunk_size is None:
+            raise httplib.IncompleteRead(''.join(chunks))
+          if chunk_size == 0:
+            break
+          delays.append(TIMER() - start)
+          chunks.append(self._safe_read(chunk_size))
+          self._safe_read(2)  # skip the CRLF at the end of the chunk
+          start = TIMER()
+
+        # Ignore any trailers.
+        while True:
+          line = self.fp.readline()
+          if not line or line == '\r\n':
+            break
+      finally:
+        self.close()
+    return chunks, delays
+
+  @classmethod
+  def _read_chunk_size(cls, line):
+    chunk_extensions_pos = line.find(';')
+    if chunk_extensions_pos != -1:
+      line = line[:chunk_extensions_pos]  # strip chunk-extensions
+    try:
+      chunk_size = int(line, 16)
+    except ValueError:
+      return None
+    return chunk_size
+
+
+class DetailedHTTPConnection(httplib.HTTPConnection):
+  """Preserve details relevant to replaying connections."""
+  response_class = DetailedHTTPResponse
+
+
+class DetailedHTTPSResponse(DetailedHTTPResponse):
+  """Preserve details relevant to replaying SSL responses."""
+  pass
+
+
+class DetailedHTTPSConnection(httplib.HTTPSConnection):
+  """Preserve details relevant to replaying SSL connections."""
+  response_class = DetailedHTTPSResponse
+
+  def __init__(self, host, port):
+    # https://www.python.org/dev/peps/pep-0476/#opting-out
+    if hasattr(ssl, '_create_unverified_context'):
+      httplib.HTTPSConnection.__init__(
+          self, host=host, port=port, context=ssl._create_unverified_context())
+    else:
+      httplib.HTTPSConnection.__init__(self, host=host, port=port)
+
+
+class RealHttpFetch(object):
+
+  def __init__(self, real_dns_lookup):
+    """Initialize RealHttpFetch.
+
+    Args:
+      real_dns_lookup: a function that resolves a host to an IP.
+    """
+    self._real_dns_lookup = real_dns_lookup
+
+  @staticmethod
+  def _GetHeaderNameValue(header):
+    """Parse the header line and return a name/value tuple.
+
+    Args:
+      header: a string for a header such as "Content-Length: 314".
+    Returns:
+      A tuple (header_name, header_value) on success or None if the header
+      is not in expected format. header_name is in lowercase.
+    """
+    i = header.find(':')
+    if i > 0:
+      return (header[:i].lower(), header[i+1:].strip())
+    return None
+
+  @staticmethod
+  def _ToTuples(headers):
+    """Parse headers and save them to a list of tuples.
+
+    This method takes HttpResponse.msg.headers as input and convert it
+    to a list of (header_name, header_value) tuples.
+    HttpResponse.msg.headers is a list of strings where each string
+    represents either a header or a continuation line of a header.
+    1. a normal header consists of two parts which are separated by colon :
+       "header_name:header_value..."
+    2. a continuation line is a string starting with whitespace
+       "[whitespace]continued_header_value..."
+    If a header is not in good shape or an unexpected continuation line is
+    seen, it will be ignored.
+
+    Should avoid using response.getheaders() directly
+    because response.getheaders() can't handle multiple headers
+    with the same name properly. Instead, parse the
+    response.msg.headers using this method to get all headers.
+
+    Args:
+      headers: an instance of HttpResponse.msg.headers.
+    Returns:
+      A list of tuples which looks like:
+      [(header_name, header_value), (header_name2, header_value2)...]
+    """
+    all_headers = []
+    for line in headers:
+      if line[0] in '\t ':
+        if not all_headers:
+          logging.warning(
+              'Unexpected response header continuation line [%s]', line)
+          continue
+        name, value = all_headers.pop()
+        value += '\n ' + line.strip()
+      else:
+        name_value = RealHttpFetch._GetHeaderNameValue(line)
+        if not name_value:
+          logging.warning(
+              'Response header in wrong format [%s]', line)
+          continue
+        name, value = name_value  # pylint: disable=unpacking-non-sequence
+      all_headers.append((name, value))
+    return all_headers
+
+  @staticmethod
+  def _get_request_host_port(request):
+    host_parts = request.host.split(':')
+    host = host_parts[0]
+    port = int(host_parts[1]) if len(host_parts) == 2 else None
+    return host, port
+
+  @staticmethod
+  def _get_system_proxy(is_ssl):
+    return platformsettings.get_system_proxy(is_ssl)
+
+  def _get_connection(self, request_host, request_port, is_ssl):
+    """Return a detailed connection object for host/port pair.
+
+    If a system proxy is defined (see platformsettings.py), it will be used.
+
+    Args:
+      request_host: a host string (e.g. "www.example.com").
+      request_port: a port integer (e.g. 8080) or None (for the default port).
+      is_ssl: True if HTTPS connection is needed.
+    Returns:
+      A DetailedHTTPSConnection or DetailedHTTPConnection instance.
+    """
+    connection_host = request_host
+    connection_port = request_port
+    system_proxy = self._get_system_proxy(is_ssl)
+    if system_proxy:
+      connection_host = system_proxy.host
+      connection_port = system_proxy.port
+
+    # Use an IP address because WPR may override DNS settings.
+    connection_ip = self._real_dns_lookup(connection_host)
+    if not connection_ip:
+      logging.critical('Unable to find host ip for name: %s', connection_host)
+      return None
+
+    if is_ssl:
+      connection = DetailedHTTPSConnection(connection_ip, connection_port)
+      if system_proxy:
+        connection.set_tunnel(request_host, request_port)
+    else:
+      connection = DetailedHTTPConnection(connection_ip, connection_port)
+    return connection
+
+  def __call__(self, request):
+    """Fetch an HTTP request.
+
+    Args:
+      request: an ArchivedHttpRequest
+    Returns:
+      an ArchivedHttpResponse
+    """
+    logging.debug('RealHttpFetch: %s %s', request.host, request.full_path)
+    request_host, request_port = self._get_request_host_port(request)
+    retries = 3
+    while True:
+      try:
+        connection = self._get_connection(
+            request_host, request_port, request.is_ssl)
+        connect_start = TIMER()
+        connection.connect()
+        connect_delay = int((TIMER() - connect_start) * 1000)
+        start = TIMER()
+        connection.request(
+            request.command,
+            request.full_path,
+            request.request_body,
+            request.headers)
+        response = connection.getresponse()
+        headers_delay = int((TIMER() - start) * 1000)
+
+        chunks, chunk_delays = response.read_chunks()
+        delays = {
+            'connect': connect_delay,
+            'headers': headers_delay,
+            'data': chunk_delays
+            }
+        archived_http_response = httparchive.ArchivedHttpResponse(
+            response.version,
+            response.status,
+            response.reason,
+            RealHttpFetch._ToTuples(response.msg.headers),
+            chunks,
+            delays)
+        return archived_http_response
+      except Exception, e:
+        if retries:
+          retries -= 1
+          logging.warning('Retrying fetch %s: %s', request, repr(e))
+          continue
+        logging.critical('Could not fetch %s: %s', request, repr(e))
+        return None
+
+
+class RecordHttpArchiveFetch(object):
+  """Make real HTTP fetches and save responses in the given HttpArchive."""
+
+  def __init__(self, http_archive, real_dns_lookup, inject_script):
+    """Initialize RecordHttpArchiveFetch.
+
+    Args:
+      http_archive: an instance of a HttpArchive
+      real_dns_lookup: a function that resolves a host to an IP.
+      inject_script: script string to inject in all pages
+    """
+    self.http_archive = http_archive
+    self.real_http_fetch = RealHttpFetch(real_dns_lookup)
+    self.inject_script = inject_script
+
+  def __call__(self, request):
+    """Fetch the request and return the response.
+
+    Args:
+      request: an ArchivedHttpRequest.
+    Returns:
+      an ArchivedHttpResponse
+    """
+    # If request is already in the archive, return the archived response.
+    if request in self.http_archive:
+      logging.debug('Repeated request found: %s', request)
+      response = self.http_archive[request]
+    else:
+      response = self.real_http_fetch(request)
+      if response is None:
+        return None
+      self.http_archive[request] = response
+    if self.inject_script:
+      response = _InjectScripts(response, self.inject_script)
+    logging.debug('Recorded: %s', request)
+    return response
+
+
+class ReplayHttpArchiveFetch(object):
+  """Serve responses from the given HttpArchive."""
+
+  def __init__(self, http_archive, real_dns_lookup, inject_script,
+               use_diff_on_unknown_requests=False,
+               use_closest_match=False, scramble_images=False):
+    """Initialize ReplayHttpArchiveFetch.
+
+    Args:
+      http_archive: an instance of a HttpArchive
+      real_dns_lookup: a function that resolves a host to an IP.
+      inject_script: script string to inject in all pages
+      use_diff_on_unknown_requests: If True, log unknown requests
+        with a diff to requests that look similar.
+      use_closest_match: If True, on replay mode, serve the closest match
+        in the archive instead of giving a 404.
+    """
+    self.http_archive = http_archive
+    self.inject_script = inject_script
+    self.use_diff_on_unknown_requests = use_diff_on_unknown_requests
+    self.use_closest_match = use_closest_match
+    self.scramble_images = scramble_images
+    self.real_http_fetch = RealHttpFetch(real_dns_lookup)
+
+  def __call__(self, request):
+    """Fetch the request and return the response.
+
+    Args:
+      request: an instance of an ArchivedHttpRequest.
+    Returns:
+      Instance of ArchivedHttpResponse (if found) or None
+    """
+    if request.host.startswith('127.0.0.1:'):
+      return self.real_http_fetch(request)
+
+    response = self.http_archive.get(request)
+
+    if self.use_closest_match and not response:
+      closest_request = self.http_archive.find_closest_request(
+          request, use_path=True)
+      if closest_request:
+        response = self.http_archive.get(closest_request)
+        if response:
+          logging.info('Request not found: %s\nUsing closest match: %s',
+                       request, closest_request)
+
+    if not response:
+      reason = str(request)
+      if self.use_diff_on_unknown_requests:
+        diff = self.http_archive.diff(request)
+        if diff:
+          reason += (
+              "\nNearest request diff "
+              "('-' for archived request, '+' for current request):\n%s" % diff)
+      logging.warning('Could not replay: %s', reason)
+    else:
+      if self.inject_script:
+        response = _InjectScripts(response, self.inject_script)
+      if self.scramble_images:
+        response = _ScrambleImages(response)
+    return response
+
+
+class ControllableHttpArchiveFetch(object):
+  """Controllable fetch function that can swap between record and replay."""
+
+  def __init__(self, http_archive, real_dns_lookup,
+               inject_script, use_diff_on_unknown_requests,
+               use_record_mode, use_closest_match, scramble_images):
+    """Initialize HttpArchiveFetch.
+
+    Args:
+      http_archive: an instance of a HttpArchive
+      real_dns_lookup: a function that resolves a host to an IP.
+      inject_script: script string to inject in all pages.
+      use_diff_on_unknown_requests: If True, log unknown requests
+        with a diff to requests that look similar.
+      use_record_mode: If True, start in server in record mode.
+      use_closest_match: If True, on replay mode, serve the closest match
+        in the archive instead of giving a 404.
+    """
+    self.http_archive = http_archive
+    self.record_fetch = RecordHttpArchiveFetch(
+        http_archive, real_dns_lookup, inject_script)
+    self.replay_fetch = ReplayHttpArchiveFetch(
+        http_archive, real_dns_lookup, inject_script,
+        use_diff_on_unknown_requests, use_closest_match, scramble_images)
+    if use_record_mode:
+      self.SetRecordMode()
+    else:
+      self.SetReplayMode()
+
+  def SetRecordMode(self):
+    self.fetch = self.record_fetch
+    self.is_record_mode = True
+
+  def SetReplayMode(self):
+    self.fetch = self.replay_fetch
+    self.is_record_mode = False
+
+  def __call__(self, *args, **kwargs):
+    """Forward calls to Replay/Record fetch functions depending on mode."""
+    return self.fetch(*args, **kwargs)
diff --git a/catapult/telemetry/third_party/webpagereplay/httpclient_test.py b/catapult/telemetry/third_party/webpagereplay/httpclient_test.py
new file mode 100644
index 0000000..a9d5b2d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httpclient_test.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import dnsproxy
+import httparchive
+import httpclient
+import platformsettings
+import test_utils
+
+
+class RealHttpFetchTest(unittest.TestCase):
+
+  # Initialize test data
+  CONTENT_TYPE = 'content-type: image/x-icon'
+  COOKIE_1 = ('Set-Cookie: GMAIL_IMP=EXPIRED; '
+              'Expires=Thu, 12-Jul-2012 22:41:22 GMT; '
+              'Path=/mail; Secure')
+  COOKIE_2 = ('Set-Cookie: GMAIL_STAT_205a=EXPIRED; '
+              'Expires=Thu, 12-Jul-2012 22:42:24 GMT; '
+              'Path=/mail; Secure')
+  FIRST_LINE = 'fake-header: first line'
+  SECOND_LINE = ' second line'
+  THIRD_LINE = '\tthird line'
+  BAD_HEADER = 'this is a bad header'
+
+  def test__GetHeaderNameValueBasic(self):
+    """Test _GetHeaderNameValue with normal header."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    name_value = real_http_fetch._GetHeaderNameValue(self.CONTENT_TYPE)
+    self.assertEqual(name_value, ('content-type', 'image/x-icon'))
+
+  def test__GetHeaderNameValueLowercasesName(self):
+    """_GetHeaderNameValue lowercases header name."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    header = 'X-Google-Gfe-Backend-Request-Info: eid=1KMAUMeiK4eMiAL52YyMBg'
+    expected = ('x-google-gfe-backend-request-info',
+                'eid=1KMAUMeiK4eMiAL52YyMBg')
+    name_value = real_http_fetch._GetHeaderNameValue(header)
+    self.assertEqual(name_value, expected)
+
+  def test__GetHeaderNameValueBadLineGivesNone(self):
+    """_GetHeaderNameValue returns None for a header in wrong format."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    name_value = real_http_fetch._GetHeaderNameValue(self.BAD_HEADER)
+    self.assertIsNone(name_value)
+
+  def test__ToTuplesBasic(self):
+    """Test _ToTuples with normal input."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    headers = [self.CONTENT_TYPE, self.COOKIE_1, self.FIRST_LINE]
+    result = real_http_fetch._ToTuples(headers)
+    expected = [('content-type', 'image/x-icon'),
+                ('set-cookie', self.COOKIE_1[12:]),
+                ('fake-header', 'first line')]
+    self.assertEqual(result, expected)
+
+  def test__ToTuplesMultipleHeadersWithSameName(self):
+    """Test mulitple headers with the same name."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    headers = [self.CONTENT_TYPE, self.COOKIE_1, self.COOKIE_2, self.FIRST_LINE]
+    result = real_http_fetch._ToTuples(headers)
+    expected = [('content-type', 'image/x-icon'),
+                ('set-cookie', self.COOKIE_1[12:]),
+                ('set-cookie', self.COOKIE_2[12:]),
+                ('fake-header', 'first line')]
+    self.assertEqual(result, expected)
+
+  def test__ToTuplesAppendsContinuationLine(self):
+    """Test continuation line is handled."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    headers = [self.CONTENT_TYPE, self.COOKIE_1, self.FIRST_LINE,
+               self.SECOND_LINE, self.THIRD_LINE]
+    result = real_http_fetch._ToTuples(headers)
+    expected = [('content-type', 'image/x-icon'),
+                ('set-cookie', self.COOKIE_1[12:]),
+                ('fake-header', 'first line\n second line\n third line')]
+    self.assertEqual(result, expected)
+
+  def test__ToTuplesIgnoresBadHeader(self):
+    """Test bad header is ignored."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    bad_headers = [self.CONTENT_TYPE, self.BAD_HEADER, self.COOKIE_1]
+    expected = [('content-type', 'image/x-icon'),
+                ('set-cookie', self.COOKIE_1[12:])]
+    result = real_http_fetch._ToTuples(bad_headers)
+    self.assertEqual(result, expected)
+
+  def test__ToTuplesIgnoresMisplacedContinuationLine(self):
+    """Test misplaced continuation line is ignored."""
+
+    real_http_fetch = httpclient.RealHttpFetch
+    misplaced_headers = [self.THIRD_LINE, self.CONTENT_TYPE,
+                         self.COOKIE_1, self.FIRST_LINE, self.SECOND_LINE]
+    result = real_http_fetch._ToTuples(misplaced_headers)
+    expected = [('content-type', 'image/x-icon'),
+                ('set-cookie', self.COOKIE_1[12:]),
+                ('fake-header', 'first line\n second line')]
+    self.assertEqual(result, expected)
+
+
+class RealHttpFetchGetConnectionTest(unittest.TestCase):
+  """Test that a connection is made with request IP/port or proxy IP/port."""
+
+  def setUp(self):
+    def real_dns_lookup(host):
+      return {
+          'example.com': '127.127.127.127',
+          'proxy.com': '2.2.2.2',
+          }[host]
+    self.fetch = httpclient.RealHttpFetch(real_dns_lookup)
+    self.https_proxy = None
+    self.http_proxy = None
+    def get_proxy(is_ssl):
+      return self.https_proxy if is_ssl else self.http_proxy
+    self.fetch._get_system_proxy = get_proxy
+
+  def set_http_proxy(self, host, port):
+    self.http_proxy = platformsettings.SystemProxy(host, port)
+
+  def set_https_proxy(self, host, port):
+    self.https_proxy = platformsettings.SystemProxy(host, port)
+
+  def test_get_connection_without_proxy_connects_to_host_ip(self):
+    """HTTP connection with no proxy connects to host IP."""
+    self.set_http_proxy(host=None, port=None)
+    connection = self.fetch._get_connection('example.com', None, is_ssl=False)
+    self.assertEqual('127.127.127.127', connection.host)
+    self.assertEqual(80, connection.port)  # default HTTP port
+
+  def test_get_connection_without_proxy_uses_nondefault_request_port(self):
+    """HTTP connection with no proxy connects with request port."""
+    self.set_https_proxy(host=None, port=None)
+    connection = self.fetch._get_connection('example.com', 8888, is_ssl=False)
+    self.assertEqual('127.127.127.127', connection.host)
+    self.assertEqual(8888, connection.port)  # request HTTP port
+
+  def test_get_connection_with_proxy_uses_proxy_port(self):
+    """HTTP connection with proxy connects used proxy port."""
+    self.set_http_proxy(host='proxy.com', port=None)
+    connection = self.fetch._get_connection('example.com', 8888, is_ssl=False)
+    self.assertEqual('2.2.2.2', connection.host)  # proxy IP
+    self.assertEqual(80, connection.port)  # proxy port (default HTTP)
+
+  def test_ssl_get_connection_without_proxy_connects_to_host_ip(self):
+    """HTTPS (SSL) connection with no proxy connects to host IP."""
+    self.set_https_proxy(host=None, port=None)
+    connection = self.fetch._get_connection('example.com', None, is_ssl=True)
+    self.assertEqual('127.127.127.127', connection.host)
+    self.assertEqual(443, connection.port)  # default SSL port
+
+  def test_ssl_get_connection_with_proxy_connects_to_proxy_ip(self):
+    """HTTPS (SSL) connection with proxy connects to proxy IP."""
+    self.set_https_proxy(host='proxy.com', port=8443)
+    connection = self.fetch._get_connection('example.com', None, is_ssl=True)
+    self.assertEqual('2.2.2.2', connection.host)  # proxy IP
+    self.assertEqual(8443, connection.port)  # SSL proxy port
+
+  def test_ssl_get_connection_with_proxy_tunnels_to_host(self):
+    """HTTPS (SSL) connection with proxy tunnels to target host."""
+    self.set_https_proxy(host='proxy.com', port=8443)
+    connection = self.fetch._get_connection('example.com', None, is_ssl=True)
+    self.assertEqual('example.com', connection._tunnel_host)  # host name
+    self.assertEqual(None, connection._tunnel_port)  # host port
+
+
+class ActualNetworkFetchTest(test_utils.RealNetworkFetchTest):
+
+  def testFetchNonSSLRequest(self):
+    real_dns_lookup = dnsproxy.RealDnsLookup(
+        name_servers=[platformsettings.get_original_primary_nameserver()])
+    fetch = httpclient.RealHttpFetch(real_dns_lookup)
+    request = httparchive.ArchivedHttpRequest(
+        command='GET', host='google.com', full_path='/search?q=dogs',
+        request_body=None, headers={}, is_ssl=False)
+    response = fetch(request)
+    self.assertIsNotNone(response)
+
+  def testFetchSSLRequest(self):
+    real_dns_lookup = dnsproxy.RealDnsLookup(
+        name_servers=[platformsettings.get_original_primary_nameserver()])
+    fetch = httpclient.RealHttpFetch(real_dns_lookup)
+    request = httparchive.ArchivedHttpRequest(
+        command='GET', host='google.com', full_path='/search?q=dogs',
+        request_body=None, headers={}, is_ssl=True)
+    response = fetch(request)
+    self.assertIsNotNone(response)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/httpproxy.py b/catapult/telemetry/third_party/webpagereplay/httpproxy.py
new file mode 100644
index 0000000..1ba46b0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httpproxy.py
@@ -0,0 +1,437 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import BaseHTTPServer
+import certutils
+import collections
+import errno
+import logging
+import socket
+import SocketServer
+import ssl
+import sys
+import time
+import urlparse
+
+import daemonserver
+import httparchive
+import platformsettings
+import proxyshaper
+import sslproxy
+
+def _HandleSSLCertificateError():
+  """
+  This method is intended to be called from
+  BaseHTTPServer.HTTPServer.handle_error().
+  """
+  exc_type, exc_value, exc_traceback = sys.exc_info()
+  if isinstance(exc_value, ssl.SSLError):
+    return
+
+  raise
+
+
+class HttpProxyError(Exception):
+  """Module catch-all error."""
+  pass
+
+
+class HttpProxyServerError(HttpProxyError):
+  """Raised for errors like 'Address already in use'."""
+  pass
+
+
+class HttpArchiveHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+  protocol_version = 'HTTP/1.1'  # override BaseHTTPServer setting
+
+  # Since we do lots of small wfile.write() calls, turn on buffering.
+  wbufsize = -1  # override StreamRequestHandler (a base class) setting
+
+  def setup(self):
+    """Override StreamRequestHandler method."""
+    BaseHTTPServer.BaseHTTPRequestHandler.setup(self)
+    if self.server.traffic_shaping_up_bps:
+      self.rfile = proxyshaper.RateLimitedFile(
+          self.server.get_active_request_count, self.rfile,
+          self.server.traffic_shaping_up_bps)
+    if self.server.traffic_shaping_down_bps:
+      self.wfile = proxyshaper.RateLimitedFile(
+          self.server.get_active_request_count, self.wfile,
+          self.server.traffic_shaping_down_bps)
+
+  # Make request handler logging match our logging format.
+  def log_request(self, code='-', size='-'):
+    pass
+
+  def log_error(self, format, *args):  # pylint:disable=redefined-builtin
+    logging.error(format, *args)
+
+  def log_message(self, format, *args):  # pylint:disable=redefined-builtin
+    logging.info(format, *args)
+
+  def read_request_body(self):
+    request_body = None
+    length = int(self.headers.get('content-length', 0)) or None
+    if length:
+      request_body = self.rfile.read(length)
+    return request_body
+
+  def get_header_dict(self):
+    return dict(self.headers.items())
+
+  def get_archived_http_request(self):
+    host = self.headers.get('host')
+    if host is None:
+      logging.error('Request without host header')
+      return None
+
+    parsed = urlparse.urlparse(self.path)
+    params = ';%s' % parsed.params if parsed.params else ''
+    query = '?%s' % parsed.query if parsed.query else ''
+    fragment = '#%s' % parsed.fragment if parsed.fragment else ''
+    full_path = '%s%s%s%s' % (parsed.path, params, query, fragment)
+
+    StubRequest = collections.namedtuple('StubRequest', ('host', 'full_path'))
+    request, response = StubRequest(host, full_path), None
+
+    self.server.log_url(request, response)
+
+    return httparchive.ArchivedHttpRequest(
+        self.command,
+        host,
+        full_path,
+        self.read_request_body(),
+        self.get_header_dict(),
+        self.server.is_ssl)
+
+  def send_archived_http_response(self, response):
+    try:
+      # We need to set the server name before we start the response.
+      is_chunked = response.is_chunked()
+      has_content_length = response.get_header('content-length') is not None
+      self.server_version = response.get_header('server', 'WebPageReplay')
+      self.sys_version = ''
+
+      if response.version == 10:
+        self.protocol_version = 'HTTP/1.0'
+
+      # If we don't have chunked encoding and there is no content length,
+      # we need to manually compute the content-length.
+      if not is_chunked and not has_content_length:
+        content_length = sum(len(c) for c in response.response_data)
+        response.headers.append(('content-length', str(content_length)))
+
+      is_replay = not self.server.http_archive_fetch.is_record_mode
+      if is_replay and self.server.traffic_shaping_delay_ms:
+        logging.debug('Using round trip delay: %sms',
+                      self.server.traffic_shaping_delay_ms)
+        time.sleep(self.server.traffic_shaping_delay_ms / 1000.0)
+      if is_replay and self.server.use_delays:
+        logging.debug('Using delays (ms): %s', response.delays)
+        time.sleep(response.delays['headers'] / 1000.0)
+        delays = response.delays['data']
+      else:
+        delays = [0] * len(response.response_data)
+      self.send_response(response.status, response.reason)
+      # TODO(mbelshe): This is lame - each write is a packet!
+      for header, value in response.headers:
+        if header in ('last-modified', 'expires'):
+          self.send_header(header, response.update_date(value))
+        elif header not in ('date', 'server'):
+          self.send_header(header, value)
+      self.end_headers()
+
+      for chunk, delay in zip(response.response_data, delays):
+        if delay:
+          self.wfile.flush()
+          time.sleep(delay / 1000.0)
+        if is_chunked:
+          # Write chunk length (hex) and data (e.g. "A\r\nTESSELATED\r\n").
+          self.wfile.write('%x\r\n%s\r\n' % (len(chunk), chunk))
+        else:
+          self.wfile.write(chunk)
+      if is_chunked:
+        self.wfile.write('0\r\n\r\n')  # write final, zero-length chunk.
+      self.wfile.flush()
+
+      # TODO(mbelshe): This connection close doesn't seem to work.
+      if response.version == 10:
+        self.close_connection = 1
+
+    except Exception, e:
+      logging.error('Error sending response for %s%s: %s',
+                    self.headers['host'], self.path, e)
+
+  def handle_one_request(self):
+    """Handle a single HTTP request.
+
+    This method overrides a method from BaseHTTPRequestHandler. When this
+    method returns, it must leave self.close_connection in the correct state.
+    If this method raises an exception, the state of self.close_connection
+    doesn't matter.
+    """
+    try:
+      self.raw_requestline = self.rfile.readline(65537)
+      self.do_parse_and_handle_one_request()
+    except socket.timeout, e:
+      # A read or a write timed out.  Discard this connection
+      self.log_error('Request timed out: %r', e)
+      self.close_connection = 1
+      return
+    except ssl.SSLError:
+      # There is insufficient information passed up the stack from OpenSSL to
+      # determine the true cause of the SSL error. This almost always happens
+      # because the client refuses to accept the self-signed certs of
+      # WebPageReplay.
+      self.close_connection = 1
+      return
+    except socket.error, e:
+      # Connection reset errors happen all the time due to the browser closing
+      # without terminating the connection properly.  They can be safely
+      # ignored.
+      if e[0] == errno.ECONNRESET:
+        self.close_connection = 1
+        return
+      raise
+
+
+  def do_parse_and_handle_one_request(self):
+    start_time = time.time()
+    self.server.num_active_requests += 1
+    request = None
+    try:
+      if len(self.raw_requestline) > 65536:
+        self.requestline = ''
+        self.request_version = ''
+        self.command = ''
+        self.send_error(414)
+        self.close_connection = 0
+        return
+      if not self.raw_requestline:
+        # This indicates that the socket has been closed by the client.
+        self.close_connection = 1
+        return
+
+      # self.parse_request() sets self.close_connection. There is no need to
+      # set the property after the method is executed, unless custom behavior
+      # is desired.
+      if not self.parse_request():
+        # An error code has been sent, just exit.
+        return
+
+      try:
+        response = None
+        request = self.get_archived_http_request()
+
+        if request is None:
+          self.send_error(500)
+          return
+        response = self.server.custom_handlers.handle(request)
+        if not response:
+          response = self.server.http_archive_fetch(request)
+        if response:
+          self.send_archived_http_response(response)
+        else:
+          self.send_error(404)
+      finally:
+        self.wfile.flush()  # Actually send the response if not already done.
+    finally:
+      request_time_ms = (time.time() - start_time) * 1000.0
+      self.server.total_request_time += request_time_ms
+      if request:
+        if response:
+          logging.debug('Served: %s (%dms)', request, request_time_ms)
+        else:
+          logging.warning('Failed to find response for: %s (%dms)',
+                          request, request_time_ms)
+      self.server.num_active_requests -= 1
+
+  def send_error(self, status, body=None):
+    """Override the default send error with a version that doesn't unnecessarily
+    close the connection.
+    """
+    response = httparchive.create_response(status, body=body)
+    self.send_archived_http_response(response)
+
+
+class HttpProxyServer(SocketServer.ThreadingMixIn,
+                      BaseHTTPServer.HTTPServer,
+                      daemonserver.DaemonServer):
+  HANDLER = HttpArchiveHandler
+
+  # Increase the request queue size. The default value, 5, is set in
+  # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer).
+  # Since we're intercepting many domains through this single server,
+  # it is quite possible to get more than 5 concurrent requests.
+  request_queue_size = 256
+
+  # The number of simultaneous connections that the HTTP server supports. This
+  # is primarily limited by system limits such as RLIMIT_NOFILE.
+  connection_limit = 500
+
+  # Allow sockets to be reused. See
+  # http://svn.python.org/projects/python/trunk/Lib/SocketServer.py for more
+  # details.
+  allow_reuse_address = True
+
+  # Don't prevent python from exiting when there is thread activity.
+  daemon_threads = True
+
+  def __init__(self, http_archive_fetch, custom_handlers, rules,
+               host='localhost', port=80, use_delays=False, is_ssl=False,
+               protocol='HTTP',
+               down_bandwidth='0', up_bandwidth='0', delay_ms='0'):
+    """Start HTTP server.
+
+    Args:
+      rules: a rule_parser Rules.
+      host: a host string (name or IP) for the web proxy.
+      port: a port string (e.g. '80') for the web proxy.
+      use_delays: if True, add response data delays during replay.
+      is_ssl: True iff proxy is using SSL.
+      up_bandwidth: Upload bandwidth
+      down_bandwidth: Download bandwidth
+           Bandwidths measured in [K|M]{bit/s|Byte/s}. '0' means unlimited.
+      delay_ms: Propagation delay in milliseconds. '0' means no delay.
+    """
+    if platformsettings.SupportsFdLimitControl():
+      # BaseHTTPServer opens a new thread and two fds for each connection.
+      # Check that the process can open at least 1000 fds.
+      soft_limit, hard_limit = platformsettings.GetFdLimit()
+      # Add some wiggle room since there are probably fds not associated with
+      # connections.
+      wiggle_room = 100
+      desired_limit = 2 * HttpProxyServer.connection_limit + wiggle_room
+      if soft_limit < desired_limit:
+        assert desired_limit <= hard_limit, (
+            'The hard limit for number of open files per process is %s which '
+            'is lower than the desired limit of %s.' %
+            (hard_limit, desired_limit))
+        platformsettings.AdjustFdLimit(desired_limit, hard_limit)
+
+    try:
+      BaseHTTPServer.HTTPServer.__init__(self, (host, port), self.HANDLER)
+    except Exception, e:
+      raise HttpProxyServerError('Could not start HTTPServer on port %d: %s' %
+                                 (port, e))
+    self.http_archive_fetch = http_archive_fetch
+    self.custom_handlers = custom_handlers
+    self.use_delays = use_delays
+    self.is_ssl = is_ssl
+    self.traffic_shaping_down_bps = proxyshaper.GetBitsPerSecond(down_bandwidth)
+    self.traffic_shaping_up_bps = proxyshaper.GetBitsPerSecond(up_bandwidth)
+    self.traffic_shaping_delay_ms = int(delay_ms)
+    self.num_active_requests = 0
+    self.num_active_connections = 0
+    self.total_request_time = 0
+    self.protocol = protocol
+    self.log_url = rules.Find('log_url')
+
+    # Note: This message may be scraped. Do not change it.
+    logging.warning(
+        '%s server started on %s:%d' % (self.protocol, self.server_address[0],
+                                        self.server_address[1]))
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+      self.server_close()
+    except KeyboardInterrupt:
+      pass
+    logging.info('Stopped %s server. Total time processing requests: %dms',
+                 self.protocol, self.total_request_time)
+
+  def get_active_request_count(self):
+    return self.num_active_requests
+
+  def get_request(self):
+    self.num_active_connections += 1
+    if self.num_active_connections >= HttpProxyServer.connection_limit:
+      logging.error(
+          'Number of active connections (%s) surpasses the '
+          'supported limit of %s.' %
+          (self.num_active_connections, HttpProxyServer.connection_limit))
+    return BaseHTTPServer.HTTPServer.get_request(self)
+
+  def close_request(self, request):
+    BaseHTTPServer.HTTPServer.close_request(self, request)
+    self.num_active_connections -= 1
+
+
+class HttpsProxyServer(HttpProxyServer):
+  """SSL server that generates certs for each host."""
+
+  def __init__(self, http_archive_fetch, custom_handlers, rules,
+               https_root_ca_cert_path, **kwargs):
+    self.ca_cert_path = https_root_ca_cert_path
+    self.HANDLER = sslproxy.wrap_handler(HttpArchiveHandler)
+    HttpProxyServer.__init__(self, http_archive_fetch, custom_handlers, rules,
+                             is_ssl=True, protocol='HTTPS', **kwargs)
+    with open(self.ca_cert_path, 'r') as cert_file:
+      self._ca_cert_str = cert_file.read()
+    self._host_to_cert_map = {}
+    self._server_cert_to_cert_map = {}
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+      self.server_close()
+    except KeyboardInterrupt:
+      pass
+
+  def get_certificate(self, host):
+    if host in self._host_to_cert_map:
+      return self._host_to_cert_map[host]
+
+    server_cert = self.http_archive_fetch.http_archive.get_server_cert(host)
+    if server_cert in self._server_cert_to_cert_map:
+      cert = self._server_cert_to_cert_map[server_cert]
+      self._host_to_cert_map[host] = cert
+      return cert
+
+    cert = certutils.generate_cert(self._ca_cert_str, server_cert, host)
+    self._server_cert_to_cert_map[server_cert] = cert
+    self._host_to_cert_map[host] = cert
+    return cert
+
+  def handle_error(self, request, client_address):
+    _HandleSSLCertificateError()
+
+
+class SingleCertHttpsProxyServer(HttpProxyServer):
+  """SSL server."""
+
+  def __init__(self, http_archive_fetch, custom_handlers, rules,
+               https_root_ca_cert_path, **kwargs):
+    HttpProxyServer.__init__(self, http_archive_fetch, custom_handlers, rules,
+                             is_ssl=True, protocol='HTTPS', **kwargs)
+    self.socket = ssl.wrap_socket(
+        self.socket, certfile=https_root_ca_cert_path, server_side=True,
+        do_handshake_on_connect=False)
+    # Ancestor class, DaemonServer, calls serve_forever() during its __init__.
+
+  def handle_error(self, request, client_address):
+    _HandleSSLCertificateError()
+
+
+class HttpToHttpsProxyServer(HttpProxyServer):
+  """Listens for HTTP requests but sends them to the target as HTTPS requests"""
+
+  def __init__(self, http_archive_fetch, custom_handlers, rules, **kwargs):
+    HttpProxyServer.__init__(self, http_archive_fetch, custom_handlers, rules,
+                             is_ssl=True, protocol='HTTP-to-HTTPS', **kwargs)
+
+  def handle_error(self, request, client_address):
+    _HandleSSLCertificateError()
diff --git a/catapult/telemetry/third_party/webpagereplay/httpproxy_test.py b/catapult/telemetry/third_party/webpagereplay/httpproxy_test.py
new file mode 100644
index 0000000..8e0b401
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httpproxy_test.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import httparchive
+import httplib
+import httpproxy
+import threading
+import unittest
+import util
+
+
+class MockCustomResponseHandler(object):
+  def __init__(self, response):
+    """
+    Args:
+      response: An instance of ArchivedHttpResponse that is returned for each
+      request.
+    """
+    self._response = response
+
+  def handle(self, request):
+    del request
+    return self._response
+
+
+class MockHttpArchiveFetch(object):
+  def __init__(self):
+    self.is_record_mode = False
+
+  def __call__(self, request):
+    return None
+
+
+class MockHttpArchiveHandler(httpproxy.HttpArchiveHandler):
+  def handle_one_request(self):
+    httpproxy.HttpArchiveHandler.handle_one_request(self)
+    HttpProxyTest.HANDLED_REQUEST_COUNT += 1
+
+
+class MockRules(object):
+  def Find(self, unused_rule_type_name):  # pylint: disable=unused-argument
+    return lambda unused_request, unused_response: None
+
+
+class HttpProxyTest(unittest.TestCase):
+  def setUp(self):
+    self.has_proxy_server_bound_port = False
+    self.has_proxy_server_started = False
+
+  def set_up_proxy_server(self, response):
+    """
+    Args:
+      response: An instance of ArchivedHttpResponse that is returned for each
+      request.
+    """
+    HttpProxyTest.HANDLED_REQUEST_COUNT = 0
+    self.host = 'localhost'
+    self.port = 8889
+    custom_handlers = MockCustomResponseHandler(response)
+    rules = MockRules()
+    http_archive_fetch = MockHttpArchiveFetch()
+    self.proxy_server = httpproxy.HttpProxyServer(
+        http_archive_fetch, custom_handlers, rules,
+        host=self.host, port=self.port)
+    self.proxy_server.RequestHandlerClass = MockHttpArchiveHandler
+    self.has_proxy_server_bound_port = True
+
+  def tearDown(self):
+    if self.has_proxy_server_started:
+      self.proxy_server.shutdown()
+    if self.has_proxy_server_bound_port:
+      self.proxy_server.server_close()
+
+  def serve_requests_forever(self):
+    self.has_proxy_server_started = True
+    self.proxy_server.serve_forever(poll_interval=0.01)
+
+  # Tests that handle_one_request does not leak threads, and does not try to
+  # re-handle connections that are finished.
+  def test_handle_one_request_closes_connection(self):
+    # By default, BaseHTTPServer.py treats all HTTP 1.1 requests as keep-alive.
+    # Intentionally use HTTP 1.0 to prevent this behavior.
+    response = httparchive.ArchivedHttpResponse(
+        version=10, status=200, reason="OK",
+        headers=[], response_data=["bat1"])
+    self.set_up_proxy_server(response)
+    t = threading.Thread(
+        target=HttpProxyTest.serve_requests_forever, args=(self,))
+    t.start()
+
+    initial_thread_count = threading.activeCount()
+
+    # Make a bunch of requests.
+    request_count = 10
+    for _ in range(request_count):
+      conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
+      conn.request("GET", "/index.html")
+      res = conn.getresponse().read()
+      self.assertEqual(res, "bat1")
+      conn.close()
+
+    # Check to make sure that there is no leaked thread.
+    util.WaitFor(lambda: threading.activeCount() == initial_thread_count, 2)
+
+    self.assertEqual(request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
+
+
+  # Tests that keep-alive header works.
+  def test_keep_alive_header(self):
+    response = httparchive.ArchivedHttpResponse(
+        version=11, status=200, reason="OK",
+        headers=[("Connection", "keep-alive")], response_data=["bat1"])
+    self.set_up_proxy_server(response)
+    t = threading.Thread(
+        target=HttpProxyTest.serve_requests_forever, args=(self,))
+    t.start()
+
+    initial_thread_count = threading.activeCount()
+
+    # Make a bunch of requests.
+    request_count = 10
+    connections = []
+    for _ in range(request_count):
+      conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
+      conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
+      res = conn.getresponse().read()
+      self.assertEqual(res, "bat1")
+      connections.append(conn)
+
+    # Repeat the same requests.
+    for conn in connections:
+      conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
+      res = conn.getresponse().read()
+      self.assertEqual(res, "bat1")
+
+    # Check that the right number of requests have been handled.
+    self.assertEqual(2 * request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
+
+    # Check to make sure that exactly "request_count" new threads are active.
+    self.assertEqual(
+        threading.activeCount(), initial_thread_count + request_count)
+
+    for conn in connections:
+      conn.close()
+
+    util.WaitFor(lambda: threading.activeCount() == initial_thread_count, 1)
+
+  # Test that opening 400 simultaneous connections does not cause httpproxy to
+  # hit a process fd limit. The default limit is 256 fds.
+  def test_max_fd(self):
+    response = httparchive.ArchivedHttpResponse(
+        version=11, status=200, reason="OK",
+        headers=[("Connection", "keep-alive")], response_data=["bat1"])
+    self.set_up_proxy_server(response)
+    t = threading.Thread(
+        target=HttpProxyTest.serve_requests_forever, args=(self,))
+    t.start()
+
+    # Make a bunch of requests.
+    request_count = 400
+    connections = []
+    for _ in range(request_count):
+      conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
+      conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
+      res = conn.getresponse().read()
+      self.assertEqual(res, "bat1")
+      connections.append(conn)
+
+    # Check that the right number of requests have been handled.
+    self.assertEqual(request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
+
+    for conn in connections:
+      conn.close()
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/httpzlib.py b/catapult/telemetry/third_party/webpagereplay/httpzlib.py
new file mode 100644
index 0000000..b06cb29
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/httpzlib.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Apply gzip/deflate to separate chunks of data."""
+
+import struct
+import zlib
+
+GZIP_HEADER = (
+    '\037\213'             # magic header
+    '\010'                 # compression method
+    '\000'                 # flags (none)
+    '\000\000\000\000'     # packed time (use zero)
+    '\002'
+    '\377')
+
+
+def compress_chunks(uncompressed_chunks, use_gzip):
+  """Compress a list of data with gzip or deflate.
+
+  The returned chunks may be used with HTTP chunked encoding.
+
+  Args:
+    uncompressed_chunks: a list of strings
+       (e.g. ["this is the first chunk", "and the second"])
+    use_gzip: if True, compress with gzip. Otherwise, use deflate.
+
+  Returns:
+    [compressed_chunk_1, compressed_chunk_2, ...]
+  """
+  if use_gzip:
+    size = 0
+    crc = zlib.crc32("") & 0xffffffffL
+    compressor = zlib.compressobj(
+        6, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
+  else:
+    compressor = zlib.compressobj()
+  compressed_chunks = []
+  last_index = len(uncompressed_chunks) - 1
+  for index, data in enumerate(uncompressed_chunks):
+    chunk = ''
+    if use_gzip:
+      size += len(data)
+      crc = zlib.crc32(data, crc) & 0xffffffffL
+      if index == 0:
+        chunk += GZIP_HEADER
+    chunk += compressor.compress(data)
+    if index < last_index:
+      chunk += compressor.flush(zlib.Z_SYNC_FLUSH)
+    else:
+      chunk += (compressor.flush(zlib.Z_FULL_FLUSH) +
+                compressor.flush())
+      if use_gzip:
+        chunk += (struct.pack("<L", long(crc)) +
+                  struct.pack("<L", long(size)))
+    compressed_chunks.append(chunk)
+  return compressed_chunks
+
+
+def uncompress_chunks(compressed_chunks, use_gzip):
+  """Uncompress a list of data compressed with gzip or deflate.
+
+  Args:
+    compressed_chunks: a list of compressed data
+    use_gzip: if True, uncompress with gzip. Otherwise, use deflate.
+
+  Returns:
+    [uncompressed_chunk_1, uncompressed_chunk_2, ...]
+  """
+  if use_gzip:
+    decompress = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress
+  else:
+    decompress = zlib.decompressobj(-zlib.MAX_WBITS).decompress
+  return [decompress(c) for c in compressed_chunks]
diff --git a/catapult/telemetry/third_party/webpagereplay/mock-archive.txt b/catapult/telemetry/third_party/webpagereplay/mock-archive.txt
new file mode 100644
index 0000000..a90bb03
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/mock-archive.txt
@@ -0,0 +1,10 @@
+GET%www.zappos.com%/%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.zappos.com')]
+GET%www.zappos.com%/css/print.20110525145237.css%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.zappos.com')]
+GET%www.zappos.com%/favicon.ico%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.zappos.com')]
+GET%www.zappos.com%/hydra/hydra.p.20110607.js%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.zappos.com')]
+GET%www.zappos.com%/imgs/shadebg.20110525145241.png%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.zappos.com')]
+GET%www.msn.com%/%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.msn.com')]
+GET%www.msn.com%/?euid=&userGroup=W:default&PM=z:1%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.msn.com'), ('x-requested-with', 'XMLHttpRequest')]
+GET%www.msn.com%/?euid=342%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.msn.com'), ('x-requested-with', 'XMLHttpRequest')]
+GET%www.amazon.com%/%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.amazon.com')]
+GET%www.google.com%/%%[('accept-encoding', 'gzip,deflate'), ('host', 'www.google.com')]
diff --git a/catapult/telemetry/third_party/webpagereplay/mockhttprequest.py b/catapult/telemetry/third_party/webpagereplay/mockhttprequest.py
new file mode 100644
index 0000000..ac5df99
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/mockhttprequest.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Mock instance of ArchivedHttpRequest used for testing."""
+
+
+class ArchivedHttpRequest(object):
+  """Mock instance of ArchivedHttpRequest in HttpArchive."""
+
+  def __init__(self, command, host, path, request_body, headers):
+    """Initialize an ArchivedHttpRequest.
+
+    Args:
+      command: a string (e.g. 'GET' or 'POST').
+      host: a host name (e.g. 'www.google.com').
+      path: a request path (e.g. '/search?q=dogs').
+      request_body: a request body string for a POST or None.
+      headers: [(header1, value1), ...] list of tuples
+    """
+    self.command = command
+    self.host = host
+    self.path = path
+    self.request_body = request_body
+    self.headers = headers
+    self.trimmed_headers = headers
+
+  def __str__(self):
+    return '%s %s%s %s' % (self.command, self.host, self.path,
+                           self.trimmed_headers)
+
+  def __repr__(self):
+    return repr((self.command, self.host, self.path, self.request_body,
+                 self.trimmed_headers))
+
+  def __hash__(self):
+    """Return a integer hash to use for hashed collections including dict."""
+    return hash(repr(self))
+
+  def __eq__(self, other):
+    """Define the __eq__ method to match the hash behavior."""
+    return repr(self) == repr(other)
+
+  def matches(self, command=None, host=None, path=None):
+    """Returns true iff the request matches all parameters."""
+    return ((command is None or command == self.command) and
+            (host is None or host == self.host) and
+            (path is None or path == self.path))
diff --git a/catapult/telemetry/third_party/webpagereplay/net_configs.py b/catapult/telemetry/third_party/webpagereplay/net_configs.py
new file mode 100644
index 0000000..644358e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/net_configs.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Defines a list of common network speeds.
+
+These values come from http://www.webpagetest.org/
+
+See:
+https://sites.google.com/a/webpagetest.org/docs/other-resources/2011-fcc-broadband-data
+https://github.com/WPO-Foundation/webpagetest/blob/HEAD/www/settings/connectivity.ini.sample
+"""
+
+import collections
+
+
+NetConfig = collections.namedtuple('NetConfig', ['down', 'up', 'delay_ms'])
+
+
+# pylint: disable=bad-whitespace
+_NET_CONFIGS = {
+    'dialup': NetConfig(down=   '49Kbit/s', up=  '30Kbit/s', delay_ms= '120'),
+    '3g':     NetConfig(down= '1638Kbit/s', up= '768Kbit/s', delay_ms= '150'),
+    'dsl':    NetConfig(down= '1536Kbit/s', up= '384Kbit/s', delay_ms=  '50'),
+    'cable':  NetConfig(down=    '5Mbit/s', up=   '1Mbit/s', delay_ms=  '28'),
+    'fios':   NetConfig(down=   '20Mbit/s', up=   '5Mbit/s', delay_ms=   '4'),
+    }
+
+
+NET_CONFIG_NAMES = _NET_CONFIGS.keys()
+
+
+def GetNetConfig(key):
+  """Returns the NetConfig object corresponding to the given |key|."""
+  if key not in _NET_CONFIGS:
+    raise KeyError('No net config with key: %s' % key)
+  return _NET_CONFIGS[key]
diff --git a/catapult/telemetry/third_party/webpagereplay/platformsettings.py b/catapult/telemetry/third_party/webpagereplay/platformsettings.py
new file mode 100644
index 0000000..81cb56c
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/platformsettings.py
@@ -0,0 +1,794 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides cross-platform utility functions.
+
+Example:
+  import platformsettings
+  ip = platformsettings.get_server_ip_address()
+
+Functions with "_temporary_" in their name automatically clean-up upon
+termination (via the atexit module).
+
+For the full list of functions, see the bottom of the file.
+"""
+
+import atexit
+import distutils.spawn
+import distutils.version
+import fileinput
+import logging
+import os
+import platform
+import re
+import socket
+import stat
+import subprocess
+import sys
+import time
+import urlparse
+
+
+class PlatformSettingsError(Exception):
+  """Module catch-all error."""
+  pass
+
+
+class DnsReadError(PlatformSettingsError):
+  """Raised when unable to read DNS settings."""
+  pass
+
+
+class DnsUpdateError(PlatformSettingsError):
+  """Raised when unable to update DNS settings."""
+  pass
+
+
+class NotAdministratorError(PlatformSettingsError):
+  """Raised when not running as administrator."""
+  pass
+
+
+class CalledProcessError(PlatformSettingsError):
+  """Raised when a _check_output() process returns a non-zero exit status."""
+  def __init__(self, returncode, cmd):
+    super(CalledProcessError, self).__init__()
+    self.returncode = returncode
+    self.cmd = cmd
+
+  def __str__(self):
+    return 'Command "%s" returned non-zero exit status %d' % (
+        ' '.join(self.cmd), self.returncode)
+
+
+def FindExecutable(executable):
+  """Finds the given executable in PATH.
+
+  Since WPR may be invoked as sudo, meaning PATH is empty, we also hardcode a
+  few common paths.
+
+  Returns:
+    The fully qualified path with .exe appended if appropriate or None if it
+    doesn't exist.
+  """
+  return distutils.spawn.find_executable(executable,
+                                         os.pathsep.join([os.environ['PATH'],
+                                                          '/sbin',
+                                                          '/usr/bin',
+                                                          '/usr/sbin/',
+                                                          '/usr/local/sbin',
+                                                          ]))
+
+def HasSniSupport():
+  try:
+    import OpenSSL
+    return (distutils.version.StrictVersion(OpenSSL.__version__) >=
+            distutils.version.StrictVersion('0.13'))
+  except ImportError:
+    return False
+
+
+def SupportsFdLimitControl():
+  """Whether the platform supports changing the process fd limit."""
+  return os.name is 'posix'
+
+
+def GetFdLimit():
+  """Returns a tuple of (soft_limit, hard_limit)."""
+  import resource
+  return resource.getrlimit(resource.RLIMIT_NOFILE)
+
+
+def AdjustFdLimit(new_soft_limit, new_hard_limit):
+  """Sets a new soft and hard limit for max number of fds."""
+  import resource
+  resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft_limit, new_hard_limit))
+
+
+class SystemProxy(object):
+  """A host/port pair for a HTTP or HTTPS proxy configuration."""
+
+  def __init__(self, host, port):
+    """Initialize a SystemProxy instance.
+
+    Args:
+      host: a host name or IP address string (e.g. "example.com" or "1.1.1.1").
+      port: a port string or integer (e.g. "8888" or 8888).
+    """
+    self.host = host
+    self.port = int(port) if port else None
+
+  def __nonzero__(self):
+    """True if the host is set."""
+    return bool(self.host)
+
+  @classmethod
+  def from_url(cls, proxy_url):
+    """Create a SystemProxy instance.
+
+    If proxy_url is None, an empty string, or an invalid URL, the
+    SystemProxy instance with have None and None for the host and port
+    (no exception is raised).
+
+    Args:
+      proxy_url: a proxy url string such as "http://proxy.com:8888/".
+    Returns:
+      a System proxy instance.
+    """
+    if proxy_url:
+      parse_result = urlparse.urlparse(proxy_url)
+      return cls(parse_result.hostname, parse_result.port)
+    return cls(None, None)
+
+
+class _BasePlatformSettings(object):
+
+  def get_system_logging_handler(self):
+    """Return a handler for the logging module (optional)."""
+    return None
+
+  def rerun_as_administrator(self):
+    """If needed, rerun the program with administrative privileges.
+
+    Raises NotAdministratorError if unable to rerun.
+    """
+    pass
+
+  def timer(self):
+    """Return the current time in seconds as a floating point number."""
+    return time.time()
+
+  def get_server_ip_address(self, is_server_mode=False):
+    """Returns the IP address to use for dnsproxy and ipfw."""
+    if is_server_mode:
+      return socket.gethostbyname(socket.gethostname())
+    return '127.0.0.1'
+
+  def get_httpproxy_ip_address(self, is_server_mode=False):
+    """Returns the IP address to use for httpproxy."""
+    if is_server_mode:
+      return '0.0.0.0'
+    return '127.0.0.1'
+
+  def get_system_proxy(self, use_ssl):
+    """Returns the system HTTP(S) proxy host, port."""
+    del use_ssl
+    return SystemProxy(None, None)
+
+  def _ipfw_cmd(self):
+    raise NotImplementedError
+
+  def ipfw(self, *args):
+    ipfw_cmd = (self._ipfw_cmd(), ) + args
+    return self._check_output(*ipfw_cmd, elevate_privilege=True)
+
+  def has_ipfw(self):
+    try:
+      self.ipfw('list')
+      return True
+    except AssertionError as e:
+      logging.warning('Failed to start ipfw command. '
+                      'Error: %s' % e.message)
+      return False
+
+  def _get_cwnd(self):
+    return None
+
+  def _set_cwnd(self, args):
+    pass
+
+  def _elevate_privilege_for_cmd(self, args):
+    return args
+
+  def _check_output(self, *args, **kwargs):
+    """Run Popen(*args) and return its output as a byte string.
+
+    Python 2.7 has subprocess.check_output. This is essentially the same
+    except that, as a convenience, all the positional args are used as
+    command arguments and the |elevate_privilege| kwarg is supported.
+
+    Args:
+      *args: sequence of program arguments
+      elevate_privilege: Run the command with elevated privileges.
+    Raises:
+      CalledProcessError if the program returns non-zero exit status.
+    Returns:
+      output as a byte string.
+    """
+    command_args = [str(a) for a in args]
+
+    if os.path.sep not in command_args[0]:
+      qualified_command = FindExecutable(command_args[0])
+      assert qualified_command, 'Failed to find %s in path' % command_args[0]
+      command_args[0] = qualified_command
+
+    if kwargs.get('elevate_privilege'):
+      command_args = self._elevate_privilege_for_cmd(command_args)
+
+    logging.debug(' '.join(command_args))
+    process = subprocess.Popen(
+        command_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    output = process.communicate()[0]
+    retcode = process.poll()
+    if retcode:
+      raise CalledProcessError(retcode, command_args)
+    return output
+
+  def set_temporary_tcp_init_cwnd(self, cwnd):
+    cwnd = int(cwnd)
+    original_cwnd = self._get_cwnd()
+    if original_cwnd is None:
+      raise PlatformSettingsError('Unable to get current tcp init_cwnd.')
+    if cwnd == original_cwnd:
+      logging.info('TCP init_cwnd already set to target value: %s', cwnd)
+    else:
+      self._set_cwnd(cwnd)
+      if self._get_cwnd() == cwnd:
+        logging.info('Changed cwnd to %s', cwnd)
+        atexit.register(self._set_cwnd, original_cwnd)
+      else:
+        logging.error('Unable to update cwnd to %s', cwnd)
+
+  def setup_temporary_loopback_config(self):
+    """Setup the loopback interface similar to real interface.
+
+    We use loopback for much of our testing, and on some systems, loopback
+    behaves differently from real interfaces.
+    """
+    logging.error('Platform does not support loopback configuration.')
+
+  def _save_primary_interface_properties(self):
+    self._orig_nameserver = self.get_original_primary_nameserver()
+
+  def _restore_primary_interface_properties(self):
+    self._set_primary_nameserver(self._orig_nameserver)
+
+  def _get_primary_nameserver(self):
+    raise NotImplementedError
+
+  def _set_primary_nameserver(self, _):
+    raise NotImplementedError
+
+  def get_original_primary_nameserver(self):
+    if not hasattr(self, '_original_nameserver'):
+      self._original_nameserver = self._get_primary_nameserver()
+      logging.info('Saved original primary DNS nameserver: %s',
+                   self._original_nameserver)
+    return self._original_nameserver
+
+  def set_temporary_primary_nameserver(self, nameserver):
+    self._save_primary_interface_properties()
+    self._set_primary_nameserver(nameserver)
+    if self._get_primary_nameserver() == nameserver:
+      logging.info('Changed temporary primary nameserver to %s', nameserver)
+      atexit.register(self._restore_primary_interface_properties)
+    else:
+      raise self._get_dns_update_error()
+
+
+class _PosixPlatformSettings(_BasePlatformSettings):
+
+  # pylint: disable=abstract-method
+  # Suppress lint check for _get_primary_nameserver & _set_primary_nameserver
+
+  def rerun_as_administrator(self):
+    """If needed, rerun the program with administrative privileges.
+
+    Raises NotAdministratorError if unable to rerun.
+    """
+    if os.geteuid() != 0:
+      logging.warn('Rerunning with sudo: %s', sys.argv)
+      os.execv('/usr/bin/sudo', ['--'] + sys.argv)
+
+  def _elevate_privilege_for_cmd(self, args):
+    def IsSetUID(path):
+      return (os.stat(path).st_mode & stat.S_ISUID) == stat.S_ISUID
+
+    def IsElevated():
+      p = subprocess.Popen(
+          ['sudo', '-nv'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+          stderr=subprocess.STDOUT)
+      stdout = p.communicate()[0]
+      # Some versions of sudo set the returncode based on whether sudo requires
+      # a password currently. Other versions return output when password is
+      # required and no output when the user is already authenticated.
+      return not p.returncode and not stdout
+
+    if not IsSetUID(args[0]):
+      args = ['sudo'] + args
+
+      if not IsElevated():
+        print 'WPR needs to run %s under sudo. Please authenticate.' % args[1]
+        subprocess.check_call(['sudo', '-v'])  # Synchronously authenticate.
+
+        prompt = ('Would you like to always allow %s to run without sudo '
+                  '(via `sudo chmod +s %s`)? (y/N)' % (args[1], args[1]))
+        if raw_input(prompt).lower() == 'y':
+          subprocess.check_call(['sudo', 'chmod', '+s', args[1]])
+    return args
+
+  def get_system_proxy(self, use_ssl):
+    """Returns the system HTTP(S) proxy host, port."""
+    proxy_url = os.environ.get('https_proxy' if use_ssl else 'http_proxy')
+    return SystemProxy.from_url(proxy_url)
+
+  def _ipfw_cmd(self):
+    return 'ipfw'
+
+  def _get_dns_update_error(self):
+    return DnsUpdateError('Did you run under sudo?')
+
+  def _sysctl(self, *args, **kwargs):
+    sysctl_args = [FindExecutable('sysctl')]
+    if kwargs.get('use_sudo'):
+      sysctl_args = self._elevate_privilege_for_cmd(sysctl_args)
+    sysctl_args.extend(str(a) for a in args)
+    sysctl = subprocess.Popen(
+        sysctl_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+    stdout = sysctl.communicate()[0]
+    return sysctl.returncode, stdout
+
+  def has_sysctl(self, name):
+    if not hasattr(self, 'has_sysctl_cache'):
+      self.has_sysctl_cache = {}
+    if name not in self.has_sysctl_cache:
+      self.has_sysctl_cache[name] = self._sysctl(name)[0] == 0
+    return self.has_sysctl_cache[name]
+
+  def set_sysctl(self, name, value):
+    rv = self._sysctl('%s=%s' % (name, value), use_sudo=True)[0]
+    if rv != 0:
+      logging.error('Unable to set sysctl %s: %s', name, rv)
+
+  def get_sysctl(self, name):
+    rv, value = self._sysctl('-n', name)
+    if rv == 0:
+      return value
+    else:
+      logging.error('Unable to get sysctl %s: %s', name, rv)
+      return None
+
+
+class _OsxPlatformSettings(_PosixPlatformSettings):
+  LOCAL_SLOWSTART_MIB_NAME = 'net.inet.tcp.local_slowstart_flightsize'
+
+  def _scutil(self, cmd):
+    scutil = subprocess.Popen([FindExecutable('scutil')],
+                               stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+    return scutil.communicate(cmd)[0]
+
+  def _ifconfig(self, *args):
+    return self._check_output('ifconfig', *args, elevate_privilege=True)
+
+  def set_sysctl(self, name, value):
+    rv = self._sysctl('-w', '%s=%s' % (name, value), use_sudo=True)[0]
+    if rv != 0:
+      logging.error('Unable to set sysctl %s: %s', name, rv)
+
+  def _get_cwnd(self):
+    return int(self.get_sysctl(self.LOCAL_SLOWSTART_MIB_NAME))
+
+  def _set_cwnd(self, size):
+    self.set_sysctl(self.LOCAL_SLOWSTART_MIB_NAME, size)
+
+  def _get_loopback_mtu(self):
+    config = self._ifconfig('lo0')
+    match = re.search(r'\smtu\s+(\d+)', config)
+    return int(match.group(1)) if match else None
+
+  def setup_temporary_loopback_config(self):
+    """Configure loopback to temporarily use reasonably sized frames.
+
+    OS X uses jumbo frames by default (16KB).
+    """
+    TARGET_LOOPBACK_MTU = 1500
+    original_mtu = self._get_loopback_mtu()
+    if original_mtu is None:
+      logging.error('Unable to read loopback mtu. Setting left unchanged.')
+      return
+    if original_mtu == TARGET_LOOPBACK_MTU:
+      logging.debug('Loopback MTU already has target value: %d', original_mtu)
+    else:
+      self._ifconfig('lo0', 'mtu', TARGET_LOOPBACK_MTU)
+      if self._get_loopback_mtu() == TARGET_LOOPBACK_MTU:
+        logging.debug('Set loopback MTU to %d (was %d)',
+                      TARGET_LOOPBACK_MTU, original_mtu)
+        atexit.register(self._ifconfig, 'lo0', 'mtu', original_mtu)
+      else:
+        logging.error('Unable to change loopback MTU from %d to %d',
+                      original_mtu, TARGET_LOOPBACK_MTU)
+
+  def _get_dns_service_key(self):
+    output = self._scutil('show State:/Network/Global/IPv4')
+    lines = output.split('\n')
+    for line in lines:
+      key_value = line.split(' : ')
+      if key_value[0] == '  PrimaryService':
+        return 'State:/Network/Service/%s/DNS' % key_value[1]
+    raise DnsReadError('Unable to find DNS service key: %s', output)
+
+  def _get_primary_nameserver(self):
+    output = self._scutil('show %s' % self._get_dns_service_key())
+    match = re.search(
+        br'ServerAddresses\s+:\s+<array>\s+{\s+0\s+:\s+((\d{1,3}\.){3}\d{1,3})',
+        output)
+    if match:
+      return match.group(1)
+    else:
+      raise DnsReadError('Unable to find primary DNS server: %s', output)
+
+  def _set_primary_nameserver(self, dns):
+    command = '\n'.join([
+      'd.init',
+      'd.add ServerAddresses * %s' % dns,
+      'set %s' % self._get_dns_service_key()
+    ])
+    self._scutil(command)
+
+
+class _FreeBSDPlatformSettings(_PosixPlatformSettings):
+  """Partial implementation for FreeBSD.  Does not allow a DNS server to be
+  launched nor ipfw to be used.
+  """
+  RESOLV_CONF = '/etc/resolv.conf'
+
+  def _get_default_route_line(self):
+    raise NotImplementedError
+
+  def _set_cwnd(self, cwnd):
+    raise NotImplementedError
+
+  def _get_cwnd(self):
+    raise NotImplementedError
+
+  def setup_temporary_loopback_config(self):
+    raise NotImplementedError
+
+  def _write_resolve_conf(self, dns):
+    raise NotImplementedError
+
+  def _get_primary_nameserver(self):
+    try:
+      resolv_file = open(self.RESOLV_CONF)
+    except IOError:
+      raise DnsReadError()
+    for line in resolv_file:
+      if line.startswith('nameserver '):
+        return line.split()[1]
+    raise DnsReadError()
+
+  def _set_primary_nameserver(self, dns):
+    raise NotImplementedError
+
+
+class _LinuxPlatformSettings(_PosixPlatformSettings):
+  """The following thread recommends a way to update DNS on Linux:
+
+  http://ubuntuforums.org/showthread.php?t=337553
+
+         sudo cp /etc/dhcp3/dhclient.conf /etc/dhcp3/dhclient.conf.bak
+         sudo gedit /etc/dhcp3/dhclient.conf
+         #prepend domain-name-servers 127.0.0.1;
+         prepend domain-name-servers 208.67.222.222, 208.67.220.220;
+
+         prepend domain-name-servers 208.67.222.222, 208.67.220.220;
+         request subnet-mask, broadcast-address, time-offset, routers,
+             domain-name, domain-name-servers, host-name,
+             netbios-name-servers, netbios-scope;
+         #require subnet-mask, domain-name-servers;
+
+         sudo /etc/init.d/networking restart
+
+  The code below does not try to change dchp and does not restart networking.
+  Update this as needed to make it more robust on more systems.
+  """
+  RESOLV_CONF = '/etc/resolv.conf'
+  ROUTE_RE = re.compile('initcwnd (\d+)')
+  TCP_BASE_MSS = 'net.ipv4.tcp_base_mss'
+  TCP_MTU_PROBING = 'net.ipv4.tcp_mtu_probing'
+
+  def _get_default_route_line(self):
+    stdout = self._check_output('ip', 'route')
+    for line in stdout.split('\n'):
+      if line.startswith('default'):
+        return line
+    return None
+
+  def _set_cwnd(self, cwnd):
+    default_line = self._get_default_route_line()
+    self._check_output(
+        'ip', 'route', 'change', default_line, 'initcwnd', str(cwnd))
+
+  def _get_cwnd(self):
+    default_line = self._get_default_route_line()
+    m = self.ROUTE_RE.search(default_line)
+    if m:
+      return int(m.group(1))
+    # If 'initcwnd' wasn't found, then 0 means it's the system default.
+    return 0
+
+  def setup_temporary_loopback_config(self):
+    """Setup Linux to temporarily use reasonably sized frames.
+
+    Linux uses jumbo frames by default (16KB), using the combination
+    of MTU probing and a base MSS makes it use normal sized packets.
+
+    The reason this works is because tcp_base_mss is only used when MTU
+    probing is enabled.  And since we're using the max value, it will
+    always use the reasonable size.  This is relevant for server-side realism.
+    The client-side will vary depending on the client TCP stack config.
+    """
+    ENABLE_MTU_PROBING = 2
+    original_probing = self.get_sysctl(self.TCP_MTU_PROBING)
+    self.set_sysctl(self.TCP_MTU_PROBING, ENABLE_MTU_PROBING)
+    atexit.register(self.set_sysctl, self.TCP_MTU_PROBING, original_probing)
+
+    TCP_FULL_MSS = 1460
+    original_mss = self.get_sysctl(self.TCP_BASE_MSS)
+    self.set_sysctl(self.TCP_BASE_MSS, TCP_FULL_MSS)
+    atexit.register(self.set_sysctl, self.TCP_BASE_MSS, original_mss)
+
+  def _write_resolve_conf(self, dns):
+    is_first_nameserver_replaced = False
+    # The fileinput module uses sys.stdout as the edited file output.
+    for line in fileinput.input(self.RESOLV_CONF, inplace=1, backup='.bak'):
+      if line.startswith('nameserver ') and not is_first_nameserver_replaced:
+        print 'nameserver %s' % dns
+        is_first_nameserver_replaced = True
+      else:
+        print line,
+    if not is_first_nameserver_replaced:
+      raise DnsUpdateError('Could not find a suitable nameserver entry in %s' %
+                           self.RESOLV_CONF)
+
+  def _get_primary_nameserver(self):
+    try:
+      resolv_file = open(self.RESOLV_CONF)
+    except IOError:
+      raise DnsReadError()
+    for line in resolv_file:
+      if line.startswith('nameserver '):
+        return line.split()[1]
+    raise DnsReadError()
+
+  def _set_primary_nameserver(self, dns):
+    """Replace the first nameserver entry with the one given."""
+    try:
+      self._write_resolve_conf(dns)
+    except OSError, e:
+      if 'Permission denied' in e:
+        raise self._get_dns_update_error()
+      raise
+
+
+class _WindowsPlatformSettings(_BasePlatformSettings):
+
+  # pylint: disable=abstract-method
+  # Suppress lint check for _ipfw_cmd
+
+  def get_system_logging_handler(self):
+    """Return a handler for the logging module (optional).
+
+    For Windows, output can be viewed with DebugView.
+    http://technet.microsoft.com/en-us/sysinternals/bb896647.aspx
+    """
+    import ctypes
+    output_debug_string = ctypes.windll.kernel32.OutputDebugStringA
+    output_debug_string.argtypes = [ctypes.c_char_p]
+    class DebugViewHandler(logging.Handler):
+      def emit(self, record):
+        output_debug_string('[wpr] ' + self.format(record))
+    return DebugViewHandler()
+
+  def rerun_as_administrator(self):
+    """If needed, rerun the program with administrative privileges.
+
+    Raises NotAdministratorError if unable to rerun.
+    """
+    import ctypes
+    if not ctypes.windll.shell32.IsUserAnAdmin():
+      raise NotAdministratorError('Rerun with administrator privileges.')
+      #os.execv('runas', sys.argv)  # TODO: replace needed Windows magic
+
+  def timer(self):
+    """Return the current time in seconds as a floating point number.
+
+    From time module documentation:
+       On Windows, this function [time.clock()] returns wall-clock
+       seconds elapsed since the first call to this function, as a
+       floating point number, based on the Win32 function
+       QueryPerformanceCounter(). The resolution is typically better
+       than one microsecond.
+    """
+    return time.clock()
+
+  def _arp(self, *args):
+    return self._check_output('arp', *args)
+
+  def _route(self, *args):
+    return self._check_output('route', *args)
+
+  def _ipconfig(self, *args):
+    return self._check_output('ipconfig', *args)
+
+  def _get_mac_address(self, ip):
+    """Return the MAC address for the given ip."""
+    ip_re = re.compile(r'^\s*IP(?:v4)? Address[ .]+:\s+([0-9.]+)')
+    for line in self._ipconfig('/all').splitlines():
+      if line[:1].isalnum():
+        current_ip = None
+        current_mac = None
+      elif ':' in line:
+        line = line.strip()
+        ip_match = ip_re.match(line)
+        if ip_match:
+          current_ip = ip_match.group(1)
+        elif line.startswith('Physical Address'):
+          current_mac = line.split(':', 1)[1].lstrip()
+        if current_ip == ip and current_mac:
+          return current_mac
+    return None
+
+  def setup_temporary_loopback_config(self):
+    """On Windows, temporarily route the server ip to itself."""
+    ip = self.get_server_ip_address()
+    mac_address = self._get_mac_address(ip)
+    if self.mac_address:
+      self._arp('-s', ip, self.mac_address)
+      self._route('add', ip, ip, 'mask', '255.255.255.255')
+      atexit.register(self._arp, '-d', ip)
+      atexit.register(self._route, 'delete', ip, ip, 'mask', '255.255.255.255')
+    else:
+      logging.warn('Unable to configure loopback: MAC address not found.')
+    # TODO(slamm): Configure cwnd, MTU size
+
+  def _get_dns_update_error(self):
+    return DnsUpdateError('Did you run as administrator?')
+
+  def _netsh_show_dns(self):
+    """Return DNS information:
+
+    Example output:
+        Configuration for interface "Local Area Connection 3"
+        DNS servers configured through DHCP:  None
+        Register with which suffix:           Primary only
+
+        Configuration for interface "Wireless Network Connection 2"
+        DNS servers configured through DHCP:  192.168.1.1
+        Register with which suffix:           Primary only
+    """
+    return self._check_output('netsh', 'interface', 'ip', 'show', 'dns')
+
+  def _netsh_set_dns(self, iface_name, addr):
+    """Modify DNS information on the primary interface."""
+    output = self._check_output('netsh', 'interface', 'ip', 'set', 'dns',
+                                iface_name, 'static', addr)
+
+  def _netsh_set_dns_dhcp(self, iface_name):
+    """Modify DNS information on the primary interface."""
+    output = self._check_output('netsh', 'interface', 'ip', 'set', 'dns',
+                                iface_name, 'dhcp')
+
+  def _get_interfaces_with_dns(self):
+    output = self._netsh_show_dns()
+    lines = output.split('\n')
+    iface_re = re.compile(r'^Configuration for interface \"(?P<name>.*)\"')
+    dns_re = re.compile(r'(?P<kind>.*):\s+(?P<dns>\d+\.\d+\.\d+\.\d+)')
+    iface_name = None
+    iface_dns = None
+    iface_kind = None
+    ifaces = []
+    for line in lines:
+      iface_match = iface_re.match(line)
+      if iface_match:
+        iface_name = iface_match.group('name')
+      dns_match = dns_re.match(line)
+      if dns_match:
+        iface_dns = dns_match.group('dns')
+        iface_dns_config = dns_match.group('kind').strip()
+        if iface_dns_config == "Statically Configured DNS Servers":
+          iface_kind = "static"
+        elif iface_dns_config == "DNS servers configured through DHCP":
+          iface_kind = "dhcp"
+      if iface_name and iface_dns and iface_kind:
+        ifaces.append((iface_dns, iface_name, iface_kind))
+        iface_name = None
+        iface_dns = None
+    return ifaces
+
+  def _save_primary_interface_properties(self):
+    # TODO(etienneb): On windows, an interface can have multiple DNS server
+    # configured. We should save/restore all of them.
+    ifaces = self._get_interfaces_with_dns()
+    self._primary_interfaces = ifaces
+
+  def _restore_primary_interface_properties(self):
+    for iface in self._primary_interfaces:
+      (iface_dns, iface_name, iface_kind) = iface
+      self._netsh_set_dns(iface_name, iface_dns)
+      if iface_kind == "dhcp":
+        self._netsh_set_dns_dhcp(iface_name)
+
+  def _get_primary_nameserver(self):
+    ifaces = self._get_interfaces_with_dns()
+    if not len(ifaces):
+      raise DnsUpdateError("Interface with valid DNS configured not found.")
+    (iface_dns, iface_name, iface_kind) = ifaces[0]
+    return iface_dns
+
+  def _set_primary_nameserver(self, dns):
+    for iface in self._primary_interfaces:
+      (iface_dns, iface_name, iface_kind) = iface
+      self._netsh_set_dns(iface_name, dns)
+
+
+class _WindowsXpPlatformSettings(_WindowsPlatformSettings):
+  def _ipfw_cmd(self):
+    return (r'third_party\ipfw_win32\ipfw.exe',)
+
+
+def _new_platform_settings(system, release):
+  """Make a new instance of PlatformSettings for the current system."""
+  if system == 'Darwin':
+    return _OsxPlatformSettings()
+  if system == 'Linux':
+    return _LinuxPlatformSettings()
+  if system == 'Windows' and release == 'XP':
+    return _WindowsXpPlatformSettings()
+  if system == 'Windows':
+    return _WindowsPlatformSettings()
+  if system == 'FreeBSD':
+    return _FreeBSDPlatformSettings()
+  raise NotImplementedError('Sorry %s %s is not supported.' % (system, release))
+
+
+# Create one instance of the platform-specific settings and
+# make the functions available at the module-level.
+_inst = _new_platform_settings(platform.system(), platform.release())
+
+get_system_logging_handler = _inst.get_system_logging_handler
+rerun_as_administrator = _inst.rerun_as_administrator
+timer = _inst.timer
+
+get_server_ip_address = _inst.get_server_ip_address
+get_httpproxy_ip_address = _inst.get_httpproxy_ip_address
+get_system_proxy = _inst.get_system_proxy
+ipfw = _inst.ipfw
+has_ipfw = _inst.has_ipfw
+set_temporary_tcp_init_cwnd = _inst.set_temporary_tcp_init_cwnd
+setup_temporary_loopback_config = _inst.setup_temporary_loopback_config
+
+get_original_primary_nameserver = _inst.get_original_primary_nameserver
+set_temporary_primary_nameserver = _inst.set_temporary_primary_nameserver
diff --git a/catapult/telemetry/third_party/webpagereplay/platformsettings_test.py b/catapult/telemetry/third_party/webpagereplay/platformsettings_test.py
new file mode 100755
index 0000000..3172f9b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/platformsettings_test.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for platformsettings.
+
+Usage:
+$ ./platformsettings_test.py
+"""
+
+import unittest
+
+import platformsettings
+
+WINDOWS_7_IP = '172.11.25.170'
+WINDOWS_7_MAC = '00-1A-44-DA-88-C0'
+WINDOWS_7_IPCONFIG = """
+Windows IP Configuration
+
+   Host Name . . . . . . . . . . . . : THEHOST1-W
+   Primary Dns Suffix  . . . . . . . : something.example.com
+   Node Type . . . . . . . . . . . . : Hybrid
+   IP Routing Enabled. . . . . . . . : No
+   WINS Proxy Enabled. . . . . . . . : No
+   DNS Suffix Search List. . . . . . : example.com
+                                       another.example.com
+
+Ethernet adapter Local Area Connection:
+
+   Connection-specific DNS Suffix  . : somethingexample.com
+   Description . . . . . . . . . . . : Int PRO/1000 MT Network Connection
+   Physical Address. . . . . . . . . : %(mac_addr)s
+   DHCP Enabled. . . . . . . . . . . : Yes
+   Autoconfiguration Enabled . . . . : Yes
+   IPv6 Address. . . . . . . . . . . : 1234:0:1000:1200:839f:d256:3a6c:210(Preferred)
+   Temporary IPv6 Address. . . . . . : 2143:0:2100:1800:38f9:2d65:a3c6:120(Preferred)
+   Link-local IPv6 Address . . . . . : abcd::1234:1a33:b2cc:238%%18(Preferred)
+   IPv4 Address. . . . . . . . . . . : %(ip_addr)s(Preferred)
+   Subnet Mask . . . . . . . . . . . : 255.255.248.0
+   Lease Obtained. . . . . . . . . . : Thursday, April 28, 2011 9:40:22 PM
+   Lease Expires . . . . . . . . . . : Tuesday, May 10, 2011 12:15:48 PM
+   Default Gateway . . . . . . . . . : abcd::2:37ee:ef70:56%%18
+                                       172.11.25.254
+   DHCP Server . . . . . . . . . . . : 172.11.22.33
+   DNS Servers . . . . . . . . . . . : 8.8.4.4
+   NetBIOS over Tcpip. . . . . . . . : Enabled
+""" % {'ip_addr': WINDOWS_7_IP, 'mac_addr': WINDOWS_7_MAC}
+
+WINDOWS_XP_IP = '172.1.2.3'
+WINDOWS_XP_MAC = '00-34-B8-1F-FA-70'
+WINDOWS_XP_IPCONFIG = """
+Windows IP Configuration
+
+        Host Name . . . . . . . . . . . . : HOSTY-0
+        Primary Dns Suffix  . . . . . . . :
+        Node Type . . . . . . . . . . . . : Unknown
+        IP Routing Enabled. . . . . . . . : No
+        WINS Proxy Enabled. . . . . . . . : No
+        DNS Suffix Search List. . . . . . : example.com
+
+Ethernet adapter Local Area Connection 2:
+
+        Connection-specific DNS Suffix  . : example.com
+        Description . . . . . . . . . . . : Int Adapter (PILA8470B)
+        Physical Address. . . . . . . . . : %(mac_addr)s
+        Dhcp Enabled. . . . . . . . . . . : Yes
+        Autoconfiguration Enabled . . . . : Yes
+        IP Address. . . . . . . . . . . . : %(ip_addr)s
+        Subnet Mask . . . . . . . . . . . : 255.255.254.0
+        Default Gateway . . . . . . . . . : 172.1.2.254
+        DHCP Server . . . . . . . . . . . : 172.1.3.241
+        DNS Servers . . . . . . . . . . . : 172.1.3.241
+                                            8.8.8.8
+                                            8.8.4.4
+        Lease Obtained. . . . . . . . . . : Thursday, April 07, 2011 9:14:55 AM
+        Lease Expires . . . . . . . . . . : Thursday, April 07, 2011 1:14:55 PM
+""" % {'ip_addr': WINDOWS_XP_IP, 'mac_addr': WINDOWS_XP_MAC}
+
+
+# scutil show State:/Network/Global/IPv4
+OSX_IPV4_STATE = """
+<dictionary> {
+  PrimaryInterface : en1
+  PrimaryService : 8824452C-FED4-4C09-9256-40FB146739E0
+  Router : 192.168.1.1
+}
+"""
+
+# scutil show State:/Network/Service/[PRIMARY_SERVICE_KEY]/DNS
+OSX_DNS_STATE_LION = """
+<dictionary> {
+  DomainName : mtv.corp.google.com
+  SearchDomains : <array> {
+    0 : mtv.corp.google.com
+    1 : corp.google.com
+    2 : prod.google.com
+    3 : prodz.google.com
+    4 : google.com
+  }
+  ServerAddresses : <array> {
+    0 : 172.72.255.1
+    1 : 172.49.117.57
+    2 : 172.54.116.57
+  }
+}
+"""
+
+OSX_DNS_STATE_SNOW_LEOPARD = """
+<dictionary> {
+  ServerAddresses : <array> {
+    0 : 172.27.1.1
+    1 : 172.94.117.57
+    2 : 172.45.116.57
+  }
+  DomainName : mtv.corp.google.com
+  SearchDomains : <array> {
+    0 : mtv.corp.google.com
+    1 : corp.google.com
+    2 : prod.google.com
+    3 : prodz.google.com
+    4 : google.com
+  }
+}
+"""
+
+
+class SystemProxyTest(unittest.TestCase):
+
+  def test_basic(self):
+    system_proxy = platformsettings.SystemProxy(None, None)
+    self.assertEqual(None, system_proxy.host)
+    self.assertEqual(None, system_proxy.port)
+    self.assertFalse(system_proxy)
+
+  def test_from_url_empty(self):
+    system_proxy = platformsettings.SystemProxy.from_url('')
+    self.assertEqual(None, system_proxy.host)
+    self.assertEqual(None, system_proxy.port)
+    self.assertFalse(system_proxy)
+
+  def test_from_url_basic(self):
+    system_proxy = platformsettings.SystemProxy.from_url('http://pxy.com:8888/')
+    self.assertEqual('pxy.com', system_proxy.host)
+    self.assertEqual(8888, system_proxy.port)
+    self.assertTrue(system_proxy)
+
+  def test_from_url_no_port(self):
+    system_proxy = platformsettings.SystemProxy.from_url('http://pxy.com/')
+    self.assertEqual('pxy.com', system_proxy.host)
+    self.assertEqual(None, system_proxy.port)
+    self.assertTrue(system_proxy)
+
+  def test_from_url_empty_string(self):
+    system_proxy = platformsettings.SystemProxy.from_url('')
+    self.assertEqual(None, system_proxy.host)
+    self.assertEqual(None, system_proxy.port)
+    self.assertFalse(system_proxy)
+
+  def test_from_url_bad_string(self):
+    system_proxy = platformsettings.SystemProxy.from_url('foo:80')
+    self.assertEqual(None, system_proxy.host)
+    self.assertEqual(None, system_proxy.port)
+    self.assertFalse(system_proxy)
+
+
+class HasSniTest(unittest.TestCase):
+  def test_has_sni(self):
+    # Check that no exception is raised.
+    platformsettings.HasSniSupport()
+
+
+# pylint: disable=abstract-method
+class Win7Settings(platformsettings._WindowsPlatformSettings):
+  @classmethod
+  def _ipconfig(cls, *args):
+    if args == ('/all',):
+      return WINDOWS_7_IPCONFIG
+    raise RuntimeError
+
+class WinXpSettings(platformsettings._WindowsPlatformSettings):
+  @classmethod
+  def _ipconfig(cls, *args):
+    if args == ('/all',):
+      return WINDOWS_XP_IPCONFIG
+    raise RuntimeError
+
+
+class WindowsPlatformSettingsTest(unittest.TestCase):
+  def test_get_mac_address_xp(self):
+    self.assertEqual(WINDOWS_XP_MAC,
+                     WinXpSettings()._get_mac_address(WINDOWS_XP_IP))
+
+  def test_get_mac_address_7(self):
+    self.assertEqual(WINDOWS_7_MAC,
+                     Win7Settings()._get_mac_address(WINDOWS_7_IP))
+
+
+class OsxSettings(platformsettings._OsxPlatformSettings):
+  def __init__(self):
+    super(OsxSettings, self).__init__()
+    self.ipv4_state = OSX_IPV4_STATE
+    self.dns_state = None  # varies by test
+
+  def _scutil(self, cmd):
+    if cmd == 'show State:/Network/Global/IPv4':
+      return self.ipv4_state
+    elif cmd.startswith('show State:/Network/Service/'):
+      return self.dns_state
+    raise RuntimeError("Unrecognized cmd: %s", cmd)
+
+
+class OsxPlatformSettingsTest(unittest.TestCase):
+  def setUp(self):
+    self.settings = OsxSettings()
+
+  def test_get_primary_nameserver_lion(self):
+    self.settings.dns_state = OSX_DNS_STATE_LION
+    self.assertEqual('172.72.255.1', self.settings._get_primary_nameserver())
+
+  def test_get_primary_nameserver_snow_leopard(self):
+    self.settings.dns_state = OSX_DNS_STATE_SNOW_LEOPARD
+    self.assertEqual('172.27.1.1', self.settings._get_primary_nameserver())
+
+  def test_get_primary_nameserver_unexpected_ipv4_state_raises(self):
+    self.settings.ipv4_state = 'Some error'
+    self.settings.dns_state = OSX_DNS_STATE_SNOW_LEOPARD
+    self.assertRaises(platformsettings.DnsReadError,
+                      self.settings._get_primary_nameserver)
+
+  def test_get_primary_nameserver_unexpected_dns_state_raises(self):
+    self.settings.dns_state = 'Some other error'
+    self.assertRaises(platformsettings.DnsReadError,
+                      self.settings._get_primary_nameserver)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/proxyshaper.py b/catapult/telemetry/third_party/webpagereplay/proxyshaper.py
new file mode 100644
index 0000000..6c6976f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/proxyshaper.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Simulate network characteristics directly in Python.
+
+Allows running replay without dummynet.
+"""
+
+import logging
+import platformsettings
+import re
+import time
+
+
+TIMER = platformsettings.timer
+
+
+class ProxyShaperError(Exception):
+  """Module catch-all error."""
+  pass
+
+class BandwidthValueError(ProxyShaperError):
+  """Raised for unexpected dummynet-style bandwidth value."""
+  pass
+
+
+class RateLimitedFile(object):
+  """Wrap a file like object with rate limiting.
+
+  TODO(slamm): Simulate slow-start.
+      Each RateLimitedFile corresponds to one-direction of a
+      bidirectional socket. Slow-start can be added here (algorithm needed).
+      Will consider changing this class to take read and write files and
+      corresponding bit rates for each.
+  """
+  BYTES_PER_WRITE = 1460
+
+  def __init__(self, request_counter, f, bps):
+    """Initialize a RateLimiter.
+
+    Args:
+      request_counter: callable to see how many requests share the limit.
+      f: file-like object to wrap.
+      bps: an integer of bits per second.
+    """
+    self.request_counter = request_counter
+    self.original_file = f
+    self.bps = bps
+
+  def transfer_seconds(self, num_bytes):
+    """Seconds to read/write |num_bytes| with |self.bps|."""
+    return 8.0 * num_bytes / self.bps
+
+  def write(self, data):
+    num_bytes = len(data)
+    num_sent_bytes = 0
+    while num_sent_bytes < num_bytes:
+      num_write_bytes = min(self.BYTES_PER_WRITE, num_bytes - num_sent_bytes)
+      num_requests = self.request_counter()
+      wait = self.transfer_seconds(num_write_bytes) * num_requests
+      logging.debug('write sleep: %0.4fs (%d requests)', wait, num_requests)
+      time.sleep(wait)
+
+      self.original_file.write(
+          data[num_sent_bytes:num_sent_bytes + num_write_bytes])
+      num_sent_bytes += num_write_bytes
+
+  def _read(self, read_func, size):
+    start = TIMER()
+    data = read_func(size)
+    read_seconds = TIMER() - start
+    num_bytes = len(data)
+    num_requests = self.request_counter()
+    wait = self.transfer_seconds(num_bytes) * num_requests - read_seconds
+    if wait > 0:
+      logging.debug('read sleep: %0.4fs %d requests)', wait, num_requests)
+      time.sleep(wait)
+    return data
+
+  def readline(self, size=-1):
+    return self._read(self.original_file.readline, size)
+
+  def read(self, size=-1):
+    return self._read(self.original_file.read, size)
+
+  def __getattr__(self, name):
+    """Forward any non-overriden calls."""
+    return getattr(self.original_file, name)
+
+
+def GetBitsPerSecond(bandwidth):
+  """Return bits per second represented by dummynet bandwidth option.
+
+  See ipfw/dummynet.c:read_bandwidth for how it is really done.
+
+  Args:
+    bandwidth: a dummynet-style bandwidth specification (e.g. "10Kbit/s")
+  """
+  if bandwidth == '0':
+    return 0
+  bw_re = r'^(\d+)(?:([KM])?(bit|Byte)/s)?$'
+  match = re.match(bw_re, str(bandwidth))
+  if not match:
+    raise BandwidthValueError('Value, "%s", does not match regex: %s' % (
+        bandwidth, bw_re))
+  bw = int(match.group(1))
+  if match.group(2) == 'K':
+    bw *= 1000
+  if match.group(2) == 'M':
+    bw *= 1000000
+  if match.group(3) == 'Byte':
+    bw *= 8
+  return bw
diff --git a/catapult/telemetry/third_party/webpagereplay/proxyshaper_test.py b/catapult/telemetry/third_party/webpagereplay/proxyshaper_test.py
new file mode 100755
index 0000000..5c2e3ae
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/proxyshaper_test.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for proxyshaper.
+
+Usage:
+$ ./proxyshaper_test.py
+"""
+
+import proxyshaper
+import StringIO
+import unittest
+
+
+# pylint: disable=bad-whitespace
+VALID_RATES = (
+    # input,       expected_bps
+    ( '384Kbit/s',   384000),
+    ('1536Kbit/s',  1536000),
+    (   '1Mbit/s',  1000000),
+    (   '5Mbit/s',  5000000),
+    (  '2MByte/s', 16000000),
+    (         '0',        0),
+    (         '5',        5),
+    (      384000,   384000),
+    )
+
+ERROR_RATES = (
+    '1536KBit/s',  # Older versions of dummynet used capital 'B' for bytes.
+    '1Mbyte/s',    # Require capital 'B' for bytes.
+    '5bps',
+    )
+
+
+class TimedTestCase(unittest.TestCase):
+  def assertValuesAlmostEqual(self, expected, actual, tolerance=0.05):
+    """Like the following with nicer default message:
+           assertTrue(expected <= actual + tolerance &&
+                      expected >= actual - tolerance)
+    """
+    delta = tolerance * expected
+    if actual > expected + delta or actual < expected - delta:
+      self.fail('%s is not equal to expected %s +/- %s%%' % (
+              actual, expected, 100 * tolerance))
+
+
+class RateLimitedFileTest(TimedTestCase):
+  def testReadLimitedBasic(self):
+    num_bytes = 1024
+    bps = 384000
+    request_counter = lambda: 1
+    f = StringIO.StringIO(' ' * num_bytes)
+    limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
+    start = proxyshaper.TIMER()
+    self.assertEqual(num_bytes, len(limited_f.read()))
+    expected_ms = 8.0 * num_bytes / bps * 1000.0
+    actual_ms = (proxyshaper.TIMER() - start) * 1000.0
+    self.assertValuesAlmostEqual(expected_ms, actual_ms)
+
+  def testReadlineLimitedBasic(self):
+    num_bytes = 1024 * 8 + 512
+    bps = 384000
+    request_counter = lambda: 1
+    f = StringIO.StringIO(' ' * num_bytes)
+    limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
+    start = proxyshaper.TIMER()
+    self.assertEqual(num_bytes, len(limited_f.readline()))
+    expected_ms = 8.0 * num_bytes / bps * 1000.0
+    actual_ms = (proxyshaper.TIMER() - start) * 1000.0
+    self.assertValuesAlmostEqual(expected_ms, actual_ms)
+
+  def testReadLimitedSlowedByMultipleRequests(self):
+    num_bytes = 1024
+    bps = 384000
+    request_count = 2
+    request_counter = lambda: request_count
+    f = StringIO.StringIO(' ' * num_bytes)
+    limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
+    start = proxyshaper.TIMER()
+    num_read_bytes = limited_f.read()
+    self.assertEqual(num_bytes, len(num_read_bytes))
+    expected_ms = 8.0 * num_bytes / (bps / float(request_count)) * 1000.0
+    actual_ms = (proxyshaper.TIMER() - start) * 1000.0
+    self.assertValuesAlmostEqual(expected_ms, actual_ms)
+
+  def testWriteLimitedBasic(self):
+    num_bytes = 1024 * 10 + 350
+    bps = 384000
+    request_counter = lambda: 1
+    f = StringIO.StringIO()
+    limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
+    start = proxyshaper.TIMER()
+    limited_f.write(' ' * num_bytes)
+    self.assertEqual(num_bytes, len(limited_f.getvalue()))
+    expected_ms = 8.0 * num_bytes / bps * 1000.0
+    actual_ms = (proxyshaper.TIMER() - start) * 1000.0
+    self.assertValuesAlmostEqual(expected_ms, actual_ms)
+
+  def testWriteLimitedSlowedByMultipleRequests(self):
+    num_bytes = 1024 * 10
+    bps = 384000
+    request_count = 2
+    request_counter = lambda: request_count
+    f = StringIO.StringIO(' ' * num_bytes)
+    limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
+    start = proxyshaper.TIMER()
+    limited_f.write(' ' * num_bytes)
+    self.assertEqual(num_bytes, len(limited_f.getvalue()))
+    expected_ms = 8.0 * num_bytes / (bps / float(request_count)) * 1000.0
+    actual_ms = (proxyshaper.TIMER() - start) * 1000.0
+    self.assertValuesAlmostEqual(expected_ms, actual_ms)
+
+
+class GetBitsPerSecondTest(unittest.TestCase):
+  def testConvertsValidValues(self):
+    for dummynet_option, expected_bps in VALID_RATES:
+      bps = proxyshaper.GetBitsPerSecond(dummynet_option)
+      self.assertEqual(
+          expected_bps, bps, 'Unexpected result for %s: %s != %s' % (
+              dummynet_option, expected_bps, bps))
+
+  def testRaisesOnUnexpectedValues(self):
+    for dummynet_option in ERROR_RATES:
+      self.assertRaises(proxyshaper.BandwidthValueError,
+                        proxyshaper.GetBitsPerSecond, dummynet_option)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/pylintrc b/catapult/telemetry/third_party/webpagereplay/pylintrc
new file mode 100644
index 0000000..27c3925
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/pylintrc
@@ -0,0 +1,17 @@
+[MESSAGES CONTROL]
+
+# Disable the message, report, category or checker with the given id(s).
+# TODO(wpr-owners): Reduce this list to as small as possible.
+disable=I0010,I0011,abstract-class-little-used,abstract-class-not-used,anomalous-backslash-in-string,bad-builtin,bad-context-manager,bad-continuation,bad-str-strip-call,broad-except,cell-var-from-loop,deprecated-lambda,deprecated-module,duplicate-code,eval-used,exec-used,fixme,function-redefined,global-statement,interface-not-implemented,invalid-name,locally-enabled,logging-not-lazy,missing-docstring,missing-final-newline,no-init,no-member,no-name-in-module,no-self-use,no-self-use,not-callable,star-args,too-few-public-methods,too-many-ancestors,too-many-arguments,too-many-branches,too-many-function-args,too-many-instance-attributes,too-many-lines,too-many-locals,too-many-public-methods,too-many-return-statements,too-many-statements,useless-else-on-loop,unused-variable,attribute-defined-outside-init,protected-access
+
+
+[REPORTS]
+
+# Don't write out full reports, just messages.
+reports=no
+
+
+[FORMAT]
+
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
diff --git a/catapult/telemetry/third_party/webpagereplay/replay.py b/catapult/telemetry/third_party/webpagereplay/replay.py
new file mode 100755
index 0000000..50762ce
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/replay.py
@@ -0,0 +1,556 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Replays web pages under simulated network conditions.
+
+Must be run as administrator (sudo).
+
+To record web pages:
+  1. Start the program in record mode.
+     $ sudo ./replay.py --record archive.wpr
+  2. Load the web pages you want to record in a web browser. It is important to
+     clear browser caches before this so that all subresources are requested
+     from the network.
+  3. Kill the process to stop recording.
+
+To replay web pages:
+  1. Start the program in replay mode with a previously recorded archive.
+     $ sudo ./replay.py archive.wpr
+  2. Load recorded pages in a web browser. A 404 will be served for any pages or
+     resources not in the recorded archive.
+
+Network simulation examples:
+  # 128KByte/s uplink bandwidth, 4Mbps/s downlink bandwidth with 100ms RTT time
+  $ sudo ./replay.py --up 128KByte/s --down 4Mbit/s --delay_ms=100 archive.wpr
+
+  # 1% packet loss rate
+  $ sudo ./replay.py --packet_loss_rate=0.01 archive.wpr
+"""
+
+import json
+import logging
+import optparse
+import os
+import socket
+import sys
+import traceback
+
+import customhandlers
+import dnsproxy
+import httparchive
+import httpclient
+import httpproxy
+import net_configs
+import platformsettings
+import rules_parser
+import script_injector
+import servermanager
+import trafficshaper
+
+if sys.version < '2.6':
+  print 'Need Python 2.6 or greater.'
+  sys.exit(1)
+
+
+def configure_logging(log_level_name, log_file_name=None):
+  """Configure logging level and format.
+
+  Args:
+    log_level_name: 'debug', 'info', 'warning', 'error', or 'critical'.
+    log_file_name: a file name
+  """
+  if logging.root.handlers:
+    logging.critical('A logging method (e.g. "logging.warn(...)")'
+                     ' was called before logging was configured.')
+  log_level = getattr(logging, log_level_name.upper())
+  log_format = (
+    '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
+    '%(message)s')
+
+
+  logging.basicConfig(level=log_level, format=log_format)
+  logger = logging.getLogger()
+  if log_file_name:
+    fh = logging.FileHandler(log_file_name)
+    fh.setLevel(log_level)
+    fh.setFormatter(logging.Formatter(log_format))
+    logger.addHandler(fh)
+  system_handler = platformsettings.get_system_logging_handler()
+  if system_handler:
+    logger.addHandler(system_handler)
+
+
+def AddDnsForward(server_manager, host):
+  """Forward DNS traffic."""
+  server_manager.Append(platformsettings.set_temporary_primary_nameserver, host)
+
+
+def AddDnsProxy(server_manager, options, host, port, real_dns_lookup,
+                http_archive):
+  dns_filters = []
+  if options.dns_private_passthrough:
+    private_filter = dnsproxy.PrivateIpFilter(real_dns_lookup, http_archive)
+    dns_filters.append(private_filter)
+    server_manager.AppendRecordCallback(private_filter.InitializeArchiveHosts)
+    server_manager.AppendReplayCallback(private_filter.InitializeArchiveHosts)
+  if options.shaping_dns:
+    delay_filter = dnsproxy.DelayFilter(options.record, **options.shaping_dns)
+    dns_filters.append(delay_filter)
+    server_manager.AppendRecordCallback(delay_filter.SetRecordMode)
+    server_manager.AppendReplayCallback(delay_filter.SetReplayMode)
+  server_manager.Append(dnsproxy.DnsProxyServer, host, port,
+                        dns_lookup=dnsproxy.ReplayDnsLookup(host, dns_filters))
+
+
+def AddWebProxy(server_manager, options, host, real_dns_lookup, http_archive):
+  if options.rules_path:
+    with open(options.rules_path) as file_obj:
+      allowed_imports = [
+          name.strip() for name in options.allowed_rule_imports.split(',')]
+      rules = rules_parser.Rules(file_obj, allowed_imports)
+    logging.info('Parsed %s rules:\n%s', options.rules_path, rules)
+  else:
+    rules = rules_parser.Rules()
+  inject_script = script_injector.GetInjectScript(options.inject_scripts)
+  custom_handlers = customhandlers.CustomHandlers(options, http_archive)
+  custom_handlers.add_server_manager_handler(server_manager)
+  archive_fetch = httpclient.ControllableHttpArchiveFetch(
+      http_archive, real_dns_lookup,
+      inject_script,
+      options.diff_unknown_requests, options.record,
+      use_closest_match=options.use_closest_match,
+      scramble_images=options.scramble_images)
+  server_manager.AppendRecordCallback(archive_fetch.SetRecordMode)
+  server_manager.AppendReplayCallback(archive_fetch.SetReplayMode)
+  server_manager.Append(
+      httpproxy.HttpProxyServer,
+      archive_fetch, custom_handlers, rules,
+      host=host, port=options.port, use_delays=options.use_server_delay,
+      **options.shaping_http)
+  if options.ssl:
+    if options.should_generate_certs:
+      server_manager.Append(
+          httpproxy.HttpsProxyServer, archive_fetch, custom_handlers, rules,
+          options.https_root_ca_cert_path, host=host, port=options.ssl_port,
+          use_delays=options.use_server_delay, **options.shaping_http)
+    else:
+      server_manager.Append(
+          httpproxy.SingleCertHttpsProxyServer, archive_fetch,
+          custom_handlers, rules, options.https_root_ca_cert_path, host=host,
+          port=options.ssl_port, use_delays=options.use_server_delay,
+          **options.shaping_http)
+  if options.http_to_https_port:
+    server_manager.Append(
+        httpproxy.HttpToHttpsProxyServer,
+        archive_fetch, custom_handlers, rules,
+        host=host, port=options.http_to_https_port,
+        use_delays=options.use_server_delay,
+        **options.shaping_http)
+
+
+def AddTrafficShaper(server_manager, options, host):
+  if options.shaping_dummynet:
+    server_manager.AppendTrafficShaper(
+        trafficshaper.TrafficShaper, host=host,
+        use_loopback=not options.server_mode and host == '127.0.0.1',
+        **options.shaping_dummynet)
+
+
+class OptionsWrapper(object):
+  """Add checks, updates, and methods to option values.
+
+  Example:
+    options, args = option_parser.parse_args()
+    options = OptionsWrapper(options, option_parser)  # run checks and updates
+    if options.record and options.HasTrafficShaping():
+       [...]
+  """
+  _TRAFFICSHAPING_OPTIONS = {
+      'down', 'up', 'delay_ms', 'packet_loss_rate', 'init_cwnd', 'net'}
+  _CONFLICTING_OPTIONS = (
+      ('record', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
+                  'spdy', 'use_server_delay')),
+      ('append', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
+                  'use_server_delay')),  # same as --record
+      ('net', ('down', 'up', 'delay_ms')),
+      ('server', ('server_mode',)),
+  )
+
+  def __init__(self, options, parser):
+    self._options = options
+    self._parser = parser
+    self._nondefaults = set([
+        name for name, value in parser.defaults.items()
+        if getattr(options, name) != value])
+    self._CheckConflicts()
+    self._CheckValidIp('host')
+    self._CheckFeatureSupport()
+    self._MassageValues()
+
+  def _CheckConflicts(self):
+    """Give an error if mutually exclusive options are used."""
+    for option, bad_options in self._CONFLICTING_OPTIONS:
+      if option in self._nondefaults:
+        for bad_option in bad_options:
+          if bad_option in self._nondefaults:
+            self._parser.error('Option --%s cannot be used with --%s.' %
+                                (bad_option, option))
+
+  def _CheckValidIp(self, name):
+    """Give an error if option |name| is not a valid IPv4 address."""
+    value = getattr(self._options, name)
+    if value:
+      try:
+        socket.inet_aton(value)
+      except Exception:
+        self._parser.error('Option --%s must be a valid IPv4 address.' % name)
+
+  def _CheckFeatureSupport(self):
+    if (self._options.should_generate_certs and
+        not platformsettings.HasSniSupport()):
+      self._parser.error('Option --should_generate_certs requires pyOpenSSL '
+                         '0.13 or greater for SNI support.')
+
+  def _ShapingKeywordArgs(self, shaping_key):
+    """Return the shaping keyword args for |shaping_key|.
+
+    Args:
+      shaping_key: one of 'dummynet', 'dns', 'http'.
+    Returns:
+      {}  # if shaping_key does not apply, or options have default values.
+      {k: v, ...}
+    """
+    kwargs = {}
+    def AddItemIfSet(d, kw_key, opt_key=None):
+      opt_key = opt_key or kw_key
+      if opt_key in self._nondefaults:
+        d[kw_key] = getattr(self, opt_key)
+    if ((self.shaping_type == 'proxy' and shaping_key in ('dns', 'http')) or
+        self.shaping_type == shaping_key):
+      AddItemIfSet(kwargs, 'delay_ms')
+      if shaping_key in ('dummynet', 'http'):
+        AddItemIfSet(kwargs, 'down_bandwidth', opt_key='down')
+        AddItemIfSet(kwargs, 'up_bandwidth', opt_key='up')
+        if shaping_key == 'dummynet':
+          AddItemIfSet(kwargs, 'packet_loss_rate')
+          AddItemIfSet(kwargs, 'init_cwnd')
+        elif self.shaping_type != 'none':
+          if 'packet_loss_rate' in self._nondefaults:
+            logging.warn('Shaping type, %s, ignores --packet_loss_rate=%s',
+                         self.shaping_type, self.packet_loss_rate)
+          if 'init_cwnd' in self._nondefaults:
+            logging.warn('Shaping type, %s, ignores --init_cwnd=%s',
+                         self.shaping_type, self.init_cwnd)
+    return kwargs
+
+  def _MassageValues(self):
+    """Set options that depend on the values of other options."""
+    if self.append and not self.record:
+      self._options.record = True
+    if self.net:
+      self._options.down, self._options.up, self._options.delay_ms = \
+          net_configs.GetNetConfig(self.net)
+      self._nondefaults.update(['down', 'up', 'delay_ms'])
+    if not self.ssl:
+      self._options.https_root_ca_cert_path = None
+    self.shaping_dns = self._ShapingKeywordArgs('dns')
+    self.shaping_http = self._ShapingKeywordArgs('http')
+    self.shaping_dummynet = self._ShapingKeywordArgs('dummynet')
+
+  def __getattr__(self, name):
+    """Make the original option values available."""
+    return getattr(self._options, name)
+
+  def __repr__(self):
+    """Return a json representation of the original options dictionary."""
+    return json.dumps(self._options.__dict__)
+
+  def IsRootRequired(self):
+    """Returns True iff the options require whole program root access."""
+    if self.server:
+      return True
+
+    def IsPrivilegedPort(port):
+      return port and port < 1024
+
+    if IsPrivilegedPort(self.port) or (self.ssl and
+                                       IsPrivilegedPort(self.ssl_port)):
+      return True
+
+    if self.dns_forwarding:
+      if IsPrivilegedPort(self.dns_port):
+        return True
+      if not self.server_mode and self.host == '127.0.0.1':
+        return True
+
+    return False
+
+
+def replay(options, replay_filename):
+  if options.admin_check and options.IsRootRequired():
+    platformsettings.rerun_as_administrator()
+  configure_logging(options.log_level, options.log_file)
+  server_manager = servermanager.ServerManager(options.record)
+  if options.server:
+    AddDnsForward(server_manager, options.server)
+  else:
+    real_dns_lookup = dnsproxy.RealDnsLookup(
+        name_servers=[platformsettings.get_original_primary_nameserver()])
+    if options.record:
+      httparchive.HttpArchive.AssertWritable(replay_filename)
+      if options.append and os.path.exists(replay_filename):
+        http_archive = httparchive.HttpArchive.Load(replay_filename)
+        logging.info('Appending to %s (loaded %d existing responses)',
+                     replay_filename, len(http_archive))
+      else:
+        http_archive = httparchive.HttpArchive()
+    else:
+      http_archive = httparchive.HttpArchive.Load(replay_filename)
+      logging.info('Loaded %d responses from %s',
+                   len(http_archive), replay_filename)
+    server_manager.AppendRecordCallback(real_dns_lookup.ClearCache)
+    server_manager.AppendRecordCallback(http_archive.clear)
+
+    ipfw_dns_host = None
+    if options.dns_forwarding or options.shaping_dummynet:
+      # compute the ip/host used for the DNS server and traffic shaping
+      ipfw_dns_host = options.host
+      if not ipfw_dns_host:
+        ipfw_dns_host = platformsettings.get_server_ip_address(
+            options.server_mode)
+
+    if options.dns_forwarding:
+      if not options.server_mode and ipfw_dns_host == '127.0.0.1':
+        AddDnsForward(server_manager, ipfw_dns_host)
+      AddDnsProxy(server_manager, options, ipfw_dns_host, options.dns_port,
+                  real_dns_lookup, http_archive)
+    if options.ssl and options.https_root_ca_cert_path is None:
+      options.https_root_ca_cert_path = os.path.join(os.path.dirname(__file__),
+                                                     'wpr_cert.pem')
+    http_proxy_address = options.host
+    if not http_proxy_address:
+      http_proxy_address = platformsettings.get_httpproxy_ip_address(
+          options.server_mode)
+    AddWebProxy(server_manager, options, http_proxy_address, real_dns_lookup,
+                http_archive)
+    AddTrafficShaper(server_manager, options, ipfw_dns_host)
+
+  exit_status = 0
+  try:
+    server_manager.Run()
+  except KeyboardInterrupt:
+    logging.info('Shutting down.')
+  except (dnsproxy.DnsProxyException,
+          trafficshaper.TrafficShaperException,
+          platformsettings.NotAdministratorError,
+          platformsettings.DnsUpdateError) as e:
+    logging.critical('%s: %s', e.__class__.__name__, e)
+    exit_status = 1
+  except Exception:
+    logging.critical(traceback.format_exc())
+    exit_status = 2
+
+  if options.record:
+    http_archive.Persist(replay_filename)
+    logging.info('Saved %d responses to %s', len(http_archive), replay_filename)
+  return exit_status
+
+
+def GetOptionParser():
+  class PlainHelpFormatter(optparse.IndentedHelpFormatter):
+    def format_description(self, description):
+      if description:
+        return description + '\n'
+      else:
+        return ''
+  option_parser = optparse.OptionParser(
+      usage='%prog [options] replay_file',
+      formatter=PlainHelpFormatter(),
+      description=__doc__,
+      epilog='http://code.google.com/p/web-page-replay/')
+
+  option_parser.add_option('-r', '--record', default=False,
+      action='store_true',
+      help='Download real responses and record them to replay_file')
+  option_parser.add_option('--append', default=False,
+      action='store_true',
+      help='Append responses to replay_file.')
+  option_parser.add_option('-l', '--log_level', default='debug',
+      action='store',
+      type='choice',
+      choices=('debug', 'info', 'warning', 'error', 'critical'),
+      help='Minimum verbosity level to log')
+  option_parser.add_option('-f', '--log_file', default=None,
+      action='store',
+      type='string',
+      help='Log file to use in addition to writting logs to stderr.')
+
+  network_group = optparse.OptionGroup(option_parser,
+      'Network Simulation Options',
+      'These options configure the network simulation in replay mode')
+  network_group.add_option('-u', '--up', default='0',
+      action='store',
+      type='string',
+      help='Upload Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
+  network_group.add_option('-d', '--down', default='0',
+      action='store',
+      type='string',
+      help='Download Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
+  network_group.add_option('-m', '--delay_ms', default='0',
+      action='store',
+      type='string',
+      help='Propagation delay (latency) in milliseconds. Zero means no delay.')
+  network_group.add_option('-p', '--packet_loss_rate', default='0',
+      action='store',
+      type='string',
+      help='Packet loss rate in range [0..1]. Zero means no loss.')
+  network_group.add_option('-w', '--init_cwnd', default='0',
+      action='store',
+      type='string',
+      help='Set initial cwnd (linux only, requires kernel patch)')
+  network_group.add_option('--net', default=None,
+      action='store',
+      type='choice',
+      choices=net_configs.NET_CONFIG_NAMES,
+      help='Select a set of network options: %s.' % ', '.join(
+          net_configs.NET_CONFIG_NAMES))
+  network_group.add_option('--shaping_type', default='dummynet',
+      action='store',
+      choices=('dummynet', 'proxy'),
+      help='When shaping is configured (i.e. --up, --down, etc.) decides '
+           'whether to use |dummynet| (default), or |proxy| servers.')
+  option_parser.add_option_group(network_group)
+
+  harness_group = optparse.OptionGroup(option_parser,
+      'Replay Harness Options',
+      'These advanced options configure various aspects of the replay harness')
+  harness_group.add_option('-S', '--server', default=None,
+      action='store',
+      type='string',
+      help='IP address of host running "replay.py --server_mode". '
+           'This only changes the primary DNS nameserver to use the given IP.')
+  harness_group.add_option('-M', '--server_mode', default=False,
+      action='store_true',
+      help='Run replay DNS & http proxies, and trafficshaping on --port '
+           'without changing the primary DNS nameserver. '
+           'Other hosts may connect to this using "replay.py --server" '
+           'or by pointing their DNS to this server.')
+  harness_group.add_option('-i', '--inject_scripts', default='deterministic.js',
+      action='store',
+      dest='inject_scripts',
+      help='A comma separated list of JavaScript sources to inject in all '
+           'pages. By default a script is injected that eliminates sources '
+           'of entropy such as Date() and Math.random() deterministic. '
+           'CAUTION: Without deterministic.js, many pages will not replay.')
+  harness_group.add_option('-D', '--no-diff_unknown_requests', default=True,
+      action='store_false',
+      dest='diff_unknown_requests',
+      help='During replay, do not show a diff of unknown requests against '
+           'their nearest match in the archive.')
+  harness_group.add_option('-C', '--use_closest_match', default=False,
+      action='store_true',
+      dest='use_closest_match',
+      help='During replay, if a request is not found, serve the closest match'
+           'in the archive instead of giving a 404.')
+  harness_group.add_option('-U', '--use_server_delay', default=False,
+      action='store_true',
+      dest='use_server_delay',
+      help='During replay, simulate server delay by delaying response time to'
+           'requests.')
+  harness_group.add_option('-I', '--screenshot_dir', default=None,
+      action='store',
+      type='string',
+      help='Save PNG images of the loaded page in the given directory.')
+  harness_group.add_option('-P', '--no-dns_private_passthrough', default=True,
+      action='store_false',
+      dest='dns_private_passthrough',
+      help='Don\'t forward DNS requests that resolve to private network '
+           'addresses. CAUTION: With this option important services like '
+           'Kerberos will resolve to the HTTP proxy address.')
+  harness_group.add_option('-x', '--no-dns_forwarding', default=True,
+      action='store_false',
+      dest='dns_forwarding',
+      help='Don\'t forward DNS requests to the local replay server. '
+           'CAUTION: With this option an external mechanism must be used to '
+           'forward traffic to the replay server.')
+  harness_group.add_option('--host', default=None,
+      action='store',
+      type='str',
+      help='The IP address to bind all servers to. Defaults to 0.0.0.0 or '
+           '127.0.0.1, depending on --server_mode and platform.')
+  harness_group.add_option('-o', '--port', default=80,
+      action='store',
+      type='int',
+      help='Port number to listen on.')
+  harness_group.add_option('--ssl_port', default=443,
+      action='store',
+      type='int',
+      help='SSL port number to listen on.')
+  harness_group.add_option('--http_to_https_port', default=None,
+      action='store',
+      type='int',
+      help='Port on which WPR will listen for HTTP requests that it will send '
+           'along as HTTPS requests.')
+  harness_group.add_option('--dns_port', default=53,
+      action='store',
+      type='int',
+      help='DNS port number to listen on.')
+  harness_group.add_option('-c', '--https_root_ca_cert_path', default=None,
+      action='store',
+      type='string',
+      help='Certificate file to use with SSL (gets auto-generated if needed).')
+  harness_group.add_option('--no-ssl', default=True,
+      action='store_false',
+      dest='ssl',
+      help='Do not setup an SSL proxy.')
+  option_parser.add_option_group(harness_group)
+  harness_group.add_option('--should_generate_certs', default=False,
+      action='store_true',
+      help='Use OpenSSL to generate certificate files for requested hosts.')
+  harness_group.add_option('--no-admin-check', default=True,
+      action='store_false',
+      dest='admin_check',
+      help='Do not check if administrator access is needed.')
+  harness_group.add_option('--scramble_images', default=False,
+      action='store_true',
+      dest='scramble_images',
+      help='Scramble image responses.')
+  harness_group.add_option('--rules_path', default=None,
+      action='store',
+      help='Path of file containing Python rules.')
+  harness_group.add_option('--allowed_rule_imports', default='rules',
+      action='store',
+      help='A comma-separate list of allowed rule imports, or \'*\' to allow'
+           ' all packages.  Defaults to \'%default\'.')
+  return option_parser
+
+
+def main():
+  option_parser = GetOptionParser()
+  options, args = option_parser.parse_args()
+  options = OptionsWrapper(options, option_parser)
+
+  if options.server:
+    replay_filename = None
+  elif len(args) != 1:
+    option_parser.error('Must specify a replay_file')
+  else:
+    replay_filename = args[0]
+
+  return replay(options, replay_filename)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/catapult/telemetry/third_party/webpagereplay/replay_test.py b/catapult/telemetry/third_party/webpagereplay/replay_test.py
new file mode 100755
index 0000000..1523dd2
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/replay_test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for replay.
+
+Usage:
+$ ./replay_test.py
+"""
+
+import replay
+import unittest
+
+
+class MockOptions(dict):
+  """A dict with items that can be accessed as attributes."""
+  def __getattr__(self, name):
+    return self[name]
+
+
+class OptionsWrapperTest(unittest.TestCase):
+
+  def testNoTrafficShapingByDefault(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args([])
+    options = replay.OptionsWrapper(options, parser)
+    self.assertEqual({}, options.shaping_dns)
+    self.assertEqual({}, options.shaping_http)
+    self.assertEqual({}, options.shaping_dummynet)
+
+  def testShapingProxyWithoutOptionsGivesEmptySettings(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args(['--shaping=proxy'])
+    options = replay.OptionsWrapper(options, parser)
+    self.assertEqual({}, options.shaping_dns)
+    self.assertEqual({}, options.shaping_http)
+    self.assertEqual({}, options.shaping_dummynet)
+
+  def testShapingProxyWithNetOption(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args(['--shaping=proxy', '--net=cable'])
+    options = replay.OptionsWrapper(options, parser)
+    expected_http = {
+        'down_bandwidth': '5Mbit/s', 'delay_ms': '28', 'up_bandwidth': '1Mbit/s'
+        }
+    self.assertEqual({'delay_ms': '28'}, options.shaping_dns)
+    self.assertEqual(expected_http, options.shaping_http)
+    self.assertEqual({}, options.shaping_dummynet)
+
+  def testNetOptionUsesDummynetByDefault(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args(['--net=cable'])
+    options = replay.OptionsWrapper(options, parser)
+    expected_dummynet = {
+        'down_bandwidth': '5Mbit/s', 'delay_ms': '28', 'up_bandwidth': '1Mbit/s'
+        }
+    self.assertEqual({}, options.shaping_dns)
+    self.assertEqual({}, options.shaping_http)
+    self.assertEqual(expected_dummynet, options.shaping_dummynet)
+
+  def testPacketLossForDummynet(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args(['--packet_loss_rate=12'])
+    options = replay.OptionsWrapper(options, parser)
+    self.assertEqual({'packet_loss_rate': '12'}, options.shaping_dummynet)
+
+  def testIgnoredProxyShapingOptions(self):
+    parser = replay.GetOptionParser()
+    options, args = parser.parse_args(
+        ['--packet_loss_rate=12', '--init_cwnd=10', '--shaping=proxy'])
+    options = replay.OptionsWrapper(options, parser)
+    self.assertEqual({}, options.shaping_dns)
+    self.assertEqual({}, options.shaping_http)
+    self.assertEqual({}, options.shaping_dummynet)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/requirements.txt b/catapult/telemetry/third_party/webpagereplay/requirements.txt
new file mode 100644
index 0000000..41b4fa1
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/requirements.txt
@@ -0,0 +1 @@
+pyOpenSSL==0.13
diff --git a/catapult/telemetry/third_party/webpagereplay/rules/__init__.py b/catapult/telemetry/third_party/webpagereplay/rules/__init__.py
new file mode 100644
index 0000000..3486216
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/rules/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Export rules for rules_parser access.
+from rules.log_url import LogUrl
diff --git a/catapult/telemetry/third_party/webpagereplay/rules/log_url.py b/catapult/telemetry/third_party/webpagereplay/rules/log_url.py
new file mode 100644
index 0000000..2f5ab21
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/rules/log_url.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+
+from rules import rule
+
+
+class LogUrl(rule.Rule):
+  """Logs the request URL."""
+
+  def __init__(self, url, stop=False):
+    r"""Initializes with a url pattern.
+
+    Args:
+      url: a string regex, e.g. r'example\.com/id=(\d{6})'.
+      stop:  boolean ApplyRule should_stop value, defaults to True.
+    """
+    self._url_re = re.compile(url)
+    self._stop = stop
+
+  def IsType(self, rule_type_name):
+    """Returns True if the name matches this rule."""
+    return rule_type_name == 'log_url'
+
+  def ApplyRule(self, return_value, request, response):
+    """Returns True if logged.
+
+    Args:
+      return_value: the prior log_url rule's return_value (if any).
+      request: the httparchive ArchivedHttpRequest.
+      response: the httparchive ArchivedHttpResponse.
+    Returns:
+      A (should_stop, return_value) tuple, e.g. (False, True).
+    """
+    del response  # unused.
+    url = '%s%s' % (request.host, request.full_path)
+    if not self._url_re.match(url):
+      return False, return_value
+
+    logging.debug('url: %s', url)
+    return self._stop, True
+
+  def __str__(self):
+    return _ToString(self, ('url', self._url_re.pattern),
+                     None if self._stop else ('stop', False))
+
+  def __repr__(self):
+    return str(self)
+
+
+def _ToString(obj, *items):
+  pkg = (obj.__module__[:obj.__module__.rfind('.') + 1]
+         if '.' in obj.__module__ else '')
+  clname = obj.__class__.__name__
+  args = [('%s=r\'%s\'' % item if isinstance(item[1], basestring)
+           else '%s=%s' % item) for item in items if item]
+  return '%s%s(%s)' % (pkg, clname, ', '.join(args))
diff --git a/catapult/telemetry/third_party/webpagereplay/rules/rule.py b/catapult/telemetry/third_party/webpagereplay/rules/rule.py
new file mode 100644
index 0000000..816b61b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/rules/rule.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class Rule(object):
+  """An optional base class for rule implementations.
+
+  The rule_parser looks for the 'IsType' and 'ApplyRule' methods by name, so
+  rules are not strictly required to extend this class.
+  """
+
+  def IsType(self, rule_type_name):
+    """Returns True if the name matches this rule."""
+    raise NotImplementedError
+
+  def ApplyRule(self, return_value, request, response):
+    """Invokes this rule with the given args.
+
+    Args:
+      return_value: the prior rule's return_value (if any).
+      request: the httparchive ArchivedHttpRequest.
+      response: the httparchive ArchivedHttpResponse, which may be None.
+    Returns:
+      A (should_stop, return_value) tuple.  Typically the request and response
+        are treated as immutable, so it's the caller's job to apply the
+        return_value (e.g., set response fields).
+    """
+    raise NotImplementedError
diff --git a/catapult/telemetry/third_party/webpagereplay/rules_parser.py b/catapult/telemetry/third_party/webpagereplay/rules_parser.py
new file mode 100644
index 0000000..109db6d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/rules_parser.py
@@ -0,0 +1,167 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+r"""Rules parser.
+
+The input syntax is:
+  [{"comment": ignored_value},
+   {"rule_class_name1": {"arg1": value, "arg2": value, ...}},
+   {"rule_class_name2": {"arg1": value, "arg2": value, ...}},
+   ...]
+E.g.:
+  [{"comment": "this text is ignored"},
+   {"SendStatus": {"url": "example\\.com/ss.*", "status": 204}},
+   {"ModifyUrl": {"url": "(example\\.com)(/.*)", "new_url": "{1}"}}
+  ]
+"""
+
+import json
+import re
+
+
+class Error(Exception):
+  pass
+
+
+class Rules(object):
+
+  """A parsed sequence of Rule objects."""
+
+  def __init__(self, file_obj=None, allowed_imports=None):
+    """Initializes from the given file object.
+
+    Args:
+      file_obj: A file object.
+      allowed_imports: A set of strings, defaults to {'rules'}.
+        Use {'*'} to allow any import path.
+    """
+    if allowed_imports is None:
+      allowed_imports = {'rules'}
+    self._rules = [] if file_obj is None else _Load(file_obj, allowed_imports)
+
+  def Contains(self, rule_type_name):
+    """Returns true if any rule matches the given type name.
+
+    Args:
+      rule_type_name: a string.
+    Returns:
+      True if any rule matches, else False.
+    """
+    return any(rule for rule in self._rules if rule.IsType(rule_type_name))
+
+  def Find(self, rule_type_name):
+    """Returns a _Rule object containing all rules with the given type name.
+
+    Args:
+      rule_type_name: a string.
+    Returns:
+      A callable object that expects two arguments:
+        request: the httparchive ArchivedHttpRequest
+        response: the httparchive ArchivedHttpResponse
+      and returns the rule return_value of the first rule that returns
+      should_stop == True, or the last rule's return_value if all rules returns
+      should_stop == False.
+    """
+    matches = [rule for rule in self._rules if rule.IsType(rule_type_name)]
+    return _Rule(matches)
+
+  def __str__(self):
+    return _ToString(self._rules)
+
+  def __repr__(self):
+    return str(self)
+
+
+class _Rule(object):
+  """Calls a sequence of Rule objects until one returns should_stop."""
+
+  def __init__(self, rules):
+    self._rules = rules
+
+  def __call__(self, request, response):
+    """Calls the rules until one returns should_stop.
+
+    Args:
+      request: the httparchive ArchivedHttpRequest.
+      response: the httparchive ArchivedHttpResponse, which may be None.
+    Returns:
+      The rule return_value of the first rule that returns should_stop == True,
+      or the last rule's return_value if all rules return should_stop == False.
+    """
+    return_value = None
+    for rule in self._rules:
+      should_stop, return_value = rule.ApplyRule(
+          return_value, request, response)
+      if should_stop:
+        break
+    return return_value
+
+  def __str__(self):
+    return _ToString(self._rules)
+
+  def __repr__(self):
+    return str(self)
+
+
+def _ToString(rules):
+  """Formats a sequence of Rule objects into a string."""
+  return '[\n%s\n]' % '\n'.join('%s' % rule for rule in rules)
+
+
+def _Load(file_obj, allowed_imports):
+  """Parses and evaluates all rules in the given file.
+
+  Args:
+    file_obj: a file object.
+    allowed_imports: a sequence of strings, e.g.: {'rules'}.
+  Returns:
+    a list of rules.
+  """
+  rules = []
+  entries = json.load(file_obj)
+  if not isinstance(entries, list):
+    raise Error('Expecting a list, not %s', type(entries))
+  for i, entry in enumerate(entries):
+    if not isinstance(entry, dict):
+      raise Error('%s: Expecting a dict, not %s', i, type(entry))
+    if len(entry) != 1:
+      raise Error('%s: Expecting 1 item, not %d', i, len(entry))
+    name, args = next(entry.iteritems())
+    if not isinstance(name, basestring):
+      raise Error('%s: Expecting a string TYPE, not %s', i, type(name))
+    if not re.match(r'(\w+\.)*\w+$', name):
+      raise Error('%s: Expecting a classname TYPE, not %s', i, name)
+    if name == 'comment':
+      continue
+    if not isinstance(args, dict):
+      raise Error('%s: Expecting a dict ARGS, not %s', i, type(args))
+    fullname = str(name)
+    if '.' not in fullname:
+      fullname = 'rules.%s' % fullname
+
+    modulename, classname = fullname.rsplit('.', 1)
+    if '*' not in allowed_imports and modulename not in allowed_imports:
+      raise Error('%s: Package %r is not in allowed_imports', i, modulename)
+
+    module = __import__(modulename, fromlist=[classname])
+    clazz = getattr(module, classname)
+
+    missing = {s for s in ('IsType', 'ApplyRule') if not hasattr(clazz, s)}
+    if missing:
+      raise Error('%s: %s lacks %s', i, clazz.__name__, ' and '.join(missing))
+
+    rule = clazz(**args)
+
+    rules.append(rule)
+  return rules
diff --git a/catapult/telemetry/third_party/webpagereplay/rules_parser_test.py b/catapult/telemetry/third_party/webpagereplay/rules_parser_test.py
new file mode 100755
index 0000000..bc20d80
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/rules_parser_test.py
@@ -0,0 +1,81 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for rules_parser.  Usage: ./rules_parser_test.py"""
+
+import collections
+import logging
+from StringIO import StringIO
+import unittest
+
+import rules_parser
+
+
+class RuleParserTest(unittest.TestCase):
+
+  @classmethod
+  def setUpClass(cls):
+    if not logging.root.handlers:
+      logging.basicConfig(level=logging.DEBUG,  # Enable log_url stdout.
+                          format='%(asctime)s %(levelname)s %(message)s')
+
+  def testCall(self):
+    my_rules = rules_parser.Rules(StringIO(r'''
+        [{"comment": "ignore me"},
+         {"LogUrl": {"url": "example\\.com/ss.*"}},
+         {"LogUrl": {"url": "example\\.com/blah$"}}]'''))
+    log_url = my_rules.Find('log_url')
+    self.assertEquals(True, log_url(FakeRequest(full_path='/ss'), None))
+    self.assertEquals(True, log_url(FakeRequest(full_path='/ssxxxx'), None))
+    self.assertEquals(True, log_url(FakeRequest(full_path='/blah'), None))
+    self.assertEquals(None, log_url(FakeRequest(full_path='/blahxxx'), None))
+    self.assertEquals(None, log_url(FakeRequest(full_path='/'), None))
+
+  def testImport(self):
+    my_rules = rules_parser.Rules(StringIO(r'''
+        [{"rules.LogUrl": {"url": "example\\.com/ss.*"}}]'''))
+    self.assertTrue(my_rules.Contains('log_url'))
+
+  def testRaises(self):
+    input_pairs = [
+        'bad_json',
+        '123',
+        '{}',
+        '[42]',
+        '[{12:34}]',
+        '[{"a":"b","c":"d"}]',
+        '[{"bad+rule@name":{}}]',
+        '["unallowed.Path":{}]',
+        '["NoSuchRule":{}]',
+        '["LogUrl":"bad"]',
+        '["LogUrl":{}]',
+        '["LogUrl":{"url":123}]',
+        '["LogUrl":{"url":"", "bad_arg":123}]',
+    ]
+    for input_text in input_pairs:
+      self.assertRaises(Exception, rules_parser.Rules, StringIO(input_text))
+
+
+class FakeRequest(collections.namedtuple(
+    'FakeRequest', ('command', 'host', 'full_path', 'request_body',
+                    'headers', 'is_ssl'))):
+
+  def __new__(cls, command='GET', host='example.com', full_path='/',
+              request_body=None, headers=None, is_ssl=False):
+    return super(FakeRequest, cls).__new__(
+        cls, command, host, full_path, request_body, headers or {}, is_ssl)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/run_tests b/catapult/telemetry/third_party/webpagereplay/run_tests
new file mode 100755
index 0000000..4f632d0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/run_tests
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+import os
+import sys
+
+import test_runner
+
+_WPR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+if __name__ == '__main__':
+  runner = test_runner.TestRunner()
+  runner.AddDirectory(_WPR_DIR)
+  sys.exit(runner.Main())
diff --git a/catapult/telemetry/third_party/webpagereplay/script_injector.py b/catapult/telemetry/third_party/webpagereplay/script_injector.py
new file mode 100644
index 0000000..6899c9d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/script_injector.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Inject javascript into html page source code."""
+
+import logging
+import os
+import re
+import util
+
+DOCTYPE_RE = re.compile(r'^.{,256}?(<!--.*-->)?.{,256}?<!doctype html>',
+                        re.IGNORECASE | re.DOTALL)
+HTML_RE = re.compile(r'^.{,256}?(<!--.*-->)?.{,256}?<html.*?>',
+                     re.IGNORECASE | re.DOTALL)
+HEAD_RE = re.compile(r'^.{,256}?(<!--.*-->)?.{,256}?<head.*?>',
+                     re.IGNORECASE | re.DOTALL)
+
+
+def GetInjectScript(scripts):
+  """Loads |scripts| from disk and returns a string of their content."""
+  lines = []
+  if scripts:
+    if not isinstance(scripts, list):
+      scripts = scripts.split(',')
+    for script in scripts:
+      if os.path.exists(script):
+        with open(script) as f:
+          lines.extend(f.read())
+      elif util.resource_exists(script):
+        lines.extend(util.resource_string(script))
+      else:
+        raise Exception('Script does not exist: %s', script)
+
+  def MinifyScript(script):
+    """Remove C-style comments and line breaks from script.
+    Note: statements must be ';' terminated, and not depending on newline"""
+    # Regex adapted from http://ostermiller.org/findcomment.html.
+    MULTILINE_COMMENT_RE = re.compile(r'/\*.*?\*/', re.DOTALL | re.MULTILINE)
+    SINGLELINE_COMMENT_RE = re.compile(r'//.*', re.MULTILINE)
+    # Remove C-style comments from JS.
+    script = re.sub(MULTILINE_COMMENT_RE, '', script)
+    script = re.sub(SINGLELINE_COMMENT_RE, '', script)
+    # Remove line breaks.
+    script = script.translate(None, '\r\n')
+    return script
+
+  return MinifyScript(''.join(lines))
+
+
+def InjectScript(content, content_type, script_to_inject):
+  """Inject |script_to_inject| into |content| if |content_type| is 'text/html'.
+
+  Inject |script_to_inject| into |content| immediately after <head>, <html> or
+  <!doctype html>, if one of them is found. Otherwise, inject at the beginning.
+
+  Returns:
+    content, already_injected
+    |content| is the new content if script is injected, otherwise the original.
+    |already_injected| indicates if |script_to_inject| is already in |content|.
+  """
+  already_injected = False
+  if content_type and content_type == 'text/html':
+    already_injected = not content or script_to_inject in content
+    if not already_injected:
+      def InsertScriptAfter(matchobj):
+        return '%s<script>%s</script>' % (matchobj.group(0), script_to_inject)
+
+      content, is_injected = HEAD_RE.subn(InsertScriptAfter, content, 1)
+      if not is_injected:
+        content, is_injected = HTML_RE.subn(InsertScriptAfter, content, 1)
+      if not is_injected:
+        content, is_injected = DOCTYPE_RE.subn(InsertScriptAfter, content, 1)
+      if not is_injected:
+        content = '<script>%s</script>%s' % (script_to_inject, content)
+        logging.warning('Inject at the very beginning, because no tag of '
+                        '<head>, <html> or <!doctype html> is found.')
+  return content, already_injected
diff --git a/catapult/telemetry/third_party/webpagereplay/script_injector_test.py b/catapult/telemetry/third_party/webpagereplay/script_injector_test.py
new file mode 100755
index 0000000..cf77a40
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/script_injector_test.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import script_injector
+import unittest
+
+
+LONG_COMMENT = '<!--%s-->' % ('comment,' * 200)
+SCRIPT_TO_INJECT = 'var flag = 0;'
+EXPECTED_SCRIPT = '<script>%s</script>' % SCRIPT_TO_INJECT
+TEXT_HTML = 'text/html'
+TEXT_CSS = 'text/css'
+APPLICATION = 'application/javascript'
+
+TEMPLATE_HEAD = '<!doctype html><html><head>%s</head><body></body></html>'
+TEMPLATE_HTML = '<!doctype html><html>%s<body></body></html>'
+TEMPLATE_DOCTYPE = '<!doctype html>%s<body></body>'
+TEMPLATE_RAW = '%s<body></body>'
+TEMPLATE_COMMENT = '%s<!doctype html>%s<html>%s<head>%s</head></html>'
+
+
+class ScriptInjectorTest(unittest.TestCase):
+
+  def test_unsupported_content_type(self):
+    source = 'abc'
+    # CSS.
+    new_source, already_injected = script_injector.InjectScript(
+        source, TEXT_CSS, SCRIPT_TO_INJECT)
+    self.assertEqual(new_source, source)
+    self.assertFalse(already_injected)
+    # Javascript.
+    new_source, already_injected = script_injector.InjectScript(
+        source, APPLICATION, SCRIPT_TO_INJECT)
+    self.assertEqual(new_source, source)
+    self.assertFalse(already_injected)
+
+  def test_empty_content_as_already_injected(self):
+    source, already_injected = script_injector.InjectScript(
+        '', TEXT_HTML, SCRIPT_TO_INJECT)
+    self.assertEqual(source, '')
+    self.assertTrue(already_injected)
+
+  def test_already_injected(self):
+    source, already_injected = script_injector.InjectScript(
+        TEMPLATE_HEAD % EXPECTED_SCRIPT, TEXT_HTML, SCRIPT_TO_INJECT)
+    self.assertEqual(source, TEMPLATE_HEAD % EXPECTED_SCRIPT)
+    self.assertTrue(already_injected)
+
+  def _assert_successful_injection(self, template):
+    source, already_injected = script_injector.InjectScript(
+        template % '', TEXT_HTML, SCRIPT_TO_INJECT)
+    self.assertEqual(source, template % EXPECTED_SCRIPT)
+    self.assertFalse(already_injected)
+
+  def test_normal(self):
+    self._assert_successful_injection(TEMPLATE_HEAD)
+
+  def test_no_head_tag(self):
+    self._assert_successful_injection(TEMPLATE_HTML)
+
+  def test_no_head_and_html_tag(self):
+    self._assert_successful_injection(TEMPLATE_DOCTYPE)
+
+  def test_no_head_html_and_doctype_tag(self):
+    self._assert_successful_injection(TEMPLATE_RAW)
+
+  def _assert_successful_injection_with_comment(self, before_doctype,
+                                                after_doctype, after_html):
+    source, already_injected = script_injector.InjectScript(
+        TEMPLATE_COMMENT % (before_doctype, after_doctype, after_html, ''),
+        TEXT_HTML, SCRIPT_TO_INJECT)
+    expected_source = TEMPLATE_COMMENT % (before_doctype, after_doctype,
+                                          after_html, EXPECTED_SCRIPT)
+    self.assertEqual(source, expected_source)
+    self.assertFalse(already_injected)
+
+  def test_comment_before_doctype(self):
+    self._assert_successful_injection_with_comment(LONG_COMMENT, '', '')
+
+  def test_comment_after_doctype(self):
+    self._assert_successful_injection_with_comment('', LONG_COMMENT, '')
+
+  def test_comment_after_html(self):
+    self._assert_successful_injection_with_comment('', '', LONG_COMMENT)
+
+  def test_all_comments(self):
+    self._assert_successful_injection_with_comment(
+        LONG_COMMENT, LONG_COMMENT, LONG_COMMENT)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/servermanager.py b/catapult/telemetry/third_party/webpagereplay/servermanager.py
new file mode 100644
index 0000000..8bb9b3a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/servermanager.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Control "replay.py --server_mode" (e.g. switch from record to replay)."""
+
+import sys
+import time
+
+class ServerManager(object):
+  """Run servers until is removed or an exception is raised.
+
+  Servers start in the order they are appended and stop in the
+  opposite order. Servers are started by calling the initializer
+  passed to ServerManager.Append() and by calling __enter__(). Once an
+  server's initializer is called successfully, the __exit__() function
+  is guaranteed to be called when ServerManager.Run() completes.
+  """
+
+  def __init__(self, is_record_mode):
+    """Initialize a server manager."""
+    self.initializers = []
+    self.record_callbacks = []
+    self.replay_callbacks = []
+    self.traffic_shapers = []
+    self.is_record_mode = is_record_mode
+    self.should_exit = False
+
+  def Append(self, initializer, *init_args, **init_kwargs):
+    """Append a server to the end of the list to run.
+
+    Servers start in the order they are appended and stop in the
+    opposite order.
+
+    Args:
+      initializer: a function that returns a server instance.
+          A server needs to implement the with-statement interface.
+      init_args: positional arguments for the initializer.
+      init_args: keyword arguments for the initializer.
+    """
+    self.initializers.append((initializer, init_args, init_kwargs))
+
+  def AppendTrafficShaper(self, initializer, *init_args, **init_kwargs):
+    """Append a traffic shaper to the end of the list to run.
+
+    Args:
+      initializer: a function that returns a server instance.
+          A server needs to implement the with-statement interface.
+      init_args: positional arguments for the initializer.
+      init_args: keyword arguments for the initializer.
+    """
+    self.traffic_shapers.append((initializer, init_args, init_kwargs))
+
+  def AppendRecordCallback(self, func):
+    """Append a function to the list to call when switching to record mode.
+
+    Args:
+      func: a function that takes no arguments and returns no value.
+    """
+    self.record_callbacks.append(func)
+
+  def AppendReplayCallback(self, func):
+    """Append a function to the list to call when switching to replay mode.
+
+    Args:
+      func: a function that takes no arguments and returns no value.
+    """
+    self.replay_callbacks.append(func)
+
+  def IsRecordMode(self):
+    """Call all the functions that have been registered to enter replay mode."""
+    return self.is_record_mode
+
+  def SetRecordMode(self):
+    """Call all the functions that have been registered to enter record mode."""
+    self.is_record_mode = True
+    for record_func in self.record_callbacks:
+      record_func()
+
+  def SetReplayMode(self):
+    """Call all the functions that have been registered to enter replay mode."""
+    self.is_record_mode = False
+    for replay_func in self.replay_callbacks:
+      replay_func()
+
+  def Run(self):
+    """Create the servers and loop.
+
+    The loop quits if a server raises an exception.
+
+    Raises:
+      any exception raised by the servers
+    """
+    server_exits = []
+    server_ports = []
+    exception_info = (None, None, None)
+    try:
+      for initializer, init_args, init_kwargs in self.initializers:
+        server = initializer(*init_args, **init_kwargs)
+        if server:
+          server_exits.insert(0, server.__exit__)
+          server.__enter__()
+          if hasattr(server, 'server_port'):
+            server_ports.append(server.server_port)
+      for initializer, init_args, init_kwargs in self.traffic_shapers:
+        init_kwargs['ports'] = server_ports
+        shaper = initializer(*init_args, **init_kwargs)
+        if server:
+          server_exits.insert(0, shaper.__exit__)
+          shaper.__enter__()
+      while True:
+        time.sleep(1)
+        if self.should_exit:
+          break
+    except Exception:
+      exception_info = sys.exc_info()
+    finally:
+      for server_exit in server_exits:
+        try:
+          if server_exit(*exception_info):
+            exception_info = (None, None, None)
+        except Exception:
+          exception_info = sys.exc_info()
+      if exception_info != (None, None, None):
+        # pylint: disable=raising-bad-type
+        raise exception_info[0], exception_info[1], exception_info[2]
diff --git a/catapult/telemetry/third_party/webpagereplay/setup.py b/catapult/telemetry/third_party/webpagereplay/setup.py
new file mode 100644
index 0000000..4a177be
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/setup.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Creates a distributable python package.
+
+Creating new packages:
+  1. Generate the package, dist/webpagereplay-X.X.tar.gz:
+       python setup.py sdist
+  2. Upload the package file to the following:
+       http://code.google.com/p/web-page-replay/downloads/entry
+
+Installing packages:
+  $ easy_install http://web-page-replay.googlecode.com/files/webpagereplay-X.X.tar.gz
+  - The replay and httparchive commands are now on your PATH.
+"""
+
+import setuptools
+
+setuptools.setup(
+    name='webpagereplay',
+    version='1.1.2',
+    description='Record and replay web content',
+    author='Web Page Replay Project Authors',
+    author_email='web-page-replay-dev@googlegroups.com',
+    url='http://code.google.com/p/web-page-replay/',
+    license='Apache License 2.0',
+    install_requires=['dnspython>=1.8'],
+    packages=[
+        '',
+        'third_party',
+        'third_party.ipaddr'
+        ],
+    package_dir={'': '.'},
+    package_data={
+        '': ['*.js', '*.txt', 'COPYING', 'LICENSE'],
+        },
+    entry_points={
+        'console_scripts': [
+            'httparchive = httparchive:main',
+            'replay = replay:main',
+            ]
+        },
+    )
diff --git a/catapult/telemetry/third_party/webpagereplay/sslproxy.py b/catapult/telemetry/third_party/webpagereplay/sslproxy.py
new file mode 100755
index 0000000..06bb601
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/sslproxy.py
@@ -0,0 +1,89 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Extends BaseHTTPRequestHandler with SSL certificate generation."""
+
+import logging
+import socket
+
+import certutils
+
+
+
+def _SetUpUsingDummyCert(handler):
+  """Sets up connection providing the certificate to the client.
+
+  This method handles Server Name Indication (SNI) using dummy certs.
+
+  Args:
+    handler: an instance of BaseHTTPServer.BaseHTTPRequestHandler that is used
+      by some instance of  BaseHTTPServer.HTTPServer.
+  """
+  # One of: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or TLSv1_METHOD
+  context = certutils.get_ssl_context()
+  def handle_servername(connection):
+    """A SNI callback that happens during do_handshake()."""
+    try:
+      host = connection.get_servername()
+      if host:
+        cert_str = (
+            handler.server.get_certificate(host))
+        new_context = certutils.get_ssl_context()
+        cert = certutils.load_cert(cert_str)
+        new_context.use_certificate(cert)
+        new_context.use_privatekey_file(handler.server.ca_cert_path)
+        connection.set_context(new_context)
+        return new_context
+      # else: fail with 'no shared cipher'
+    except Exception, e:
+      # Do not leak any exceptions or else openssl crashes.
+      logging.error('Exception in SNI handler: %s', e)
+
+  context.set_tlsext_servername_callback(handle_servername)
+  handler.connection = certutils.get_ssl_connection(context, handler.connection)
+  handler.connection.set_accept_state()
+  try:
+    handler.connection.do_handshake()
+  except certutils.Error, v:
+    host = handler.connection.get_servername()
+    if not host:
+      logging.error('Dropping request without SNI')
+      return ''
+    raise certutils.Error('SSL handshake error %s: %s' % (host, str(v)))
+
+  # Re-wrap the read/write streams with our new connection.
+  handler.rfile = socket._fileobject(handler.connection, 'rb', handler.rbufsize,
+                                  close=False)
+  handler.wfile = socket._fileobject(handler.connection, 'wb', handler.wbufsize,
+                                  close=False)
+
+
+def wrap_handler(handler_class):
+  """Wraps a BaseHTTPHandler with SSL MITM certificates."""
+  if certutils.openssl_import_error:
+    # pylint: disable=raising-bad-type
+    raise certutils.openssl_import_error
+
+  class WrappedHandler(handler_class):
+
+    def setup(self):
+      handler_class.setup(self)
+      _SetUpUsingDummyCert(self)
+
+    def finish(self):
+      handler_class.finish(self)
+      self.connection.shutdown()
+      self.connection.close()
+
+  return WrappedHandler
diff --git a/catapult/telemetry/third_party/webpagereplay/sslproxy_test.py b/catapult/telemetry/third_party/webpagereplay/sslproxy_test.py
new file mode 100644
index 0000000..faea0bd
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/sslproxy_test.py
@@ -0,0 +1,194 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test routines to generate dummy certificates."""
+
+import BaseHTTPServer
+import shutil
+import signal
+import socket
+import tempfile
+import threading
+import time
+import unittest
+
+import certutils
+import sslproxy
+
+
+class Client(object):
+
+  def __init__(self, ca_cert_path, verify_cb, port, host_name='foo.com',
+               host='localhost'):
+    self.host_name = host_name
+    self.verify_cb = verify_cb
+    self.ca_cert_path = ca_cert_path
+    self.port = port
+    self.host_name = host_name
+    self.host = host
+    self.connection = None
+
+  def run_request(self):
+    context = certutils.get_ssl_context()
+    context.set_verify(certutils.VERIFY_PEER, self.verify_cb)  # Demand a cert
+    context.use_certificate_file(self.ca_cert_path)
+    context.load_verify_locations(self.ca_cert_path)
+
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    self.connection = certutils.get_ssl_connection(context, s)
+    self.connection.connect((self.host, self.port))
+    self.connection.set_tlsext_host_name(self.host_name)
+
+    try:
+      self.connection.send('\r\n\r\n')
+    finally:
+      self.connection.shutdown()
+      self.connection.close()
+
+
+class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
+  protocol_version = 'HTTP/1.1'  # override BaseHTTPServer setting
+
+  def handle_one_request(self):
+    """Handle a single HTTP request."""
+    self.raw_requestline = self.rfile.readline(65537)
+
+
+class WrappedErrorHandler(Handler):
+  """Wraps handler to verify expected sslproxy errors are being raised."""
+
+  def setup(self):
+    Handler.setup(self)
+    try:
+      sslproxy._SetUpUsingDummyCert(self)
+    except certutils.Error:
+      self.server.error_function = certutils.Error
+
+  def finish(self):
+    Handler.finish(self)
+    self.connection.shutdown()
+    self.connection.close()
+
+
+class DummyArchive(object):
+
+  def __init__(self):
+    pass
+
+
+class DummyFetch(object):
+
+  def __init__(self):
+    self.http_archive = DummyArchive()
+
+
+class Server(BaseHTTPServer.HTTPServer):
+  """SSL server."""
+
+  def __init__(self, ca_cert_path, use_error_handler=False, port=0,
+               host='localhost'):
+    self.ca_cert_path = ca_cert_path
+    with open(ca_cert_path, 'r') as ca_file:
+      self.ca_cert_str = ca_file.read()
+    self.http_archive_fetch = DummyFetch()
+    if use_error_handler:
+      self.HANDLER = WrappedErrorHandler
+    else:
+      self.HANDLER = sslproxy.wrap_handler(Handler)
+    try:
+      BaseHTTPServer.HTTPServer.__init__(self, (host, port), self.HANDLER)
+    except Exception, e:
+      raise RuntimeError('Could not start HTTPSServer on port %d: %s'
+                         % (port, e))
+
+  def __enter__(self):
+    thread = threading.Thread(target=self.serve_forever)
+    thread.daemon = True
+    thread.start()
+    return self
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+    except KeyboardInterrupt:
+      pass
+
+  def __exit__(self, type_, value_, traceback_):
+    self.cleanup()
+
+  def get_certificate(self, host):
+    return certutils.generate_cert(self.ca_cert_str, '', host)
+
+
+class TestClient(unittest.TestCase):
+  _temp_dir = None
+
+  def setUp(self):
+    self._temp_dir = tempfile.mkdtemp(prefix='sslproxy_', dir='/tmp')
+    self.ca_cert_path = self._temp_dir + 'testCA.pem'
+    self.cert_path = self._temp_dir + 'testCA-cert.cer'
+    self.wrong_ca_cert_path = self._temp_dir + 'wrong.pem'
+    self.wrong_cert_path = self._temp_dir + 'wrong-cert.cer'
+
+    # Write both pem and cer files for certificates
+    certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
+                                  cert_path=self.ca_cert_path)
+    certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
+                                  cert_path=self.ca_cert_path)
+
+  def tearDown(self):
+    if self._temp_dir:
+      shutil.rmtree(self._temp_dir)
+
+  def verify_cb(self, conn, cert, errnum, depth, ok):
+    """A callback that verifies the certificate authentication worked.
+
+    Args:
+      conn: Connection object
+      cert: x509 object
+      errnum: possible error number
+      depth: error depth
+      ok: 1 if the authentication worked 0 if it didnt.
+    Returns:
+      1 or 0 depending on if the verification worked
+    """
+    self.assertFalse(cert.has_expired())
+    self.assertGreater(time.strftime('%Y%m%d%H%M%SZ', time.gmtime()),
+                       cert.get_notBefore())
+    return ok
+
+  def test_no_host(self):
+    with Server(self.ca_cert_path) as server:
+      c = Client(self.cert_path, self.verify_cb, server.server_port, '')
+      self.assertRaises(certutils.Error, c.run_request)
+
+  def test_client_connection(self):
+    with Server(self.ca_cert_path) as server:
+      c = Client(self.cert_path, self.verify_cb, server.server_port, 'foo.com')
+      c.run_request()
+
+      c = Client(self.cert_path, self.verify_cb, server.server_port,
+                 'random.host')
+      c.run_request()
+
+  def test_wrong_cert(self):
+    with Server(self.ca_cert_path, True) as server:
+      c = Client(self.wrong_cert_path, self.verify_cb, server.server_port,
+                 'foo.com')
+      self.assertRaises(certutils.Error, c.run_request)
+
+
+if __name__ == '__main__':
+  signal.signal(signal.SIGINT, signal.SIG_DFL)  # Exit on Ctrl-C
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/test_runner.py b/catapult/telemetry/third_party/webpagereplay/test_runner.py
new file mode 100644
index 0000000..c8ca89f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/test_runner.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+import sys
+import os
+import optparse
+
+__all__ = []
+
+def FilterSuite(suite, predicate):
+  new_suite = suite.__class__()
+
+  for x in suite:
+    if isinstance(x, unittest.TestSuite):
+      subsuite = FilterSuite(x, predicate)
+      if subsuite.countTestCases() == 0:
+        continue
+
+      new_suite.addTest(subsuite)
+      continue
+
+    assert isinstance(x, unittest.TestCase)
+    if predicate(x):
+      new_suite.addTest(x)
+
+  return new_suite
+
+class _TestLoader(unittest.TestLoader):
+  def __init__(self, *args):
+    super(_TestLoader, self).__init__(*args)
+    self.discover_calls = []
+
+  def loadTestsFromModule(self, module, use_load_tests=True):
+    if module.__file__ != __file__:
+      return super(_TestLoader, self).loadTestsFromModule(
+          module, use_load_tests)
+
+    suite = unittest.TestSuite()
+    for discover_args in self.discover_calls:
+      subsuite = self.discover(*discover_args)
+      suite.addTest(subsuite)
+    return suite
+
+class _RunnerImpl(unittest.TextTestRunner):
+  def __init__(self, filters):
+    super(_RunnerImpl, self).__init__(verbosity=2)
+    self.filters = filters
+
+  def ShouldTestRun(self, test):
+    return not self.filters or any(name in test.id() for name in self.filters)
+
+  def run(self, suite):
+    filtered_test = FilterSuite(suite, self.ShouldTestRun)
+    return super(_RunnerImpl, self).run(filtered_test)
+
+
+class TestRunner(object):
+  def __init__(self):
+    self._loader = _TestLoader()
+
+  def AddDirectory(self, dir_path, test_file_pattern="*test.py"):
+    assert os.path.isdir(dir_path)
+
+    self._loader.discover_calls.append((dir_path, test_file_pattern, dir_path))
+
+  def Main(self, argv=None):
+    if argv is None:
+      argv = sys.argv
+
+    parser = optparse.OptionParser()
+    options, args = parser.parse_args(argv[1:])
+
+    runner = _RunnerImpl(filters=args)
+    return unittest.main(module=__name__, argv=[sys.argv[0]],
+                         testLoader=self._loader,
+                         testRunner=runner)
diff --git a/catapult/telemetry/third_party/webpagereplay/test_utils.py b/catapult/telemetry/third_party/webpagereplay/test_utils.py
new file mode 100644
index 0000000..b76750e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/test_utils.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import urllib2
+
+
+def _IsInternetOn():
+  try:
+    urllib2.urlopen('https://example.com', timeout=10)
+    return True
+  except urllib2.URLError as err:
+    return False
+
+
+class RealNetworkFetchTest(unittest.TestCase):
+  def setUp(self):
+    if not _IsInternetOn():
+      self.skipTest('No internet, skip test')
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/__init__.py b/catapult/telemetry/third_party/webpagereplay/third_party/__init__.py
new file mode 100644
index 0000000..ea60fc8
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+try:
+    __file__
+except NameError:
+    __file__ = sys.argv[0]
+third_party_dir = os.path.dirname(os.path.abspath(__file__))
+ipaddr_dir = os.path.join(third_party_dir, "ipaddr")
+sys.path.append(ipaddr_dir)  # workaround for no __init__.py
+import ipaddr
+
+# Modules in dns/ import sibling modules by "import dns/xxx", but
+# some platform has dns/ in global site-packages directory so we need to raise
+# the precedence of local search path (crbug/493869).
+# The implementation here preloads all dns/ modules into this package so clients
+# don't need to worry about import path issue.
+# An easier solution might be modify dns/ modules to use relative path, but I
+# tried not to touch third_party lib for now.
+sys.path.insert(0, third_party_dir)
+from dns import __all__ as all_dns_modules
+all_dns_modules = ['dns.' + m for m in  all_dns_modules]
+map(__import__, all_dns_modules)
+sys.path.pop(0)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/LICENSE b/catapult/telemetry/third_party/webpagereplay/third_party/dns/LICENSE
new file mode 100644
index 0000000..633c18c
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/LICENSE
@@ -0,0 +1,14 @@
+Copyright (C) 2001-2003 Nominum, Inc.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose with or without fee is hereby granted,
+provided that the above copyright notice and this permission notice
+appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/README.web-page-replay b/catapult/telemetry/third_party/webpagereplay/third_party/dns/README.web-page-replay
new file mode 100644
index 0000000..6d445fe
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/README.web-page-replay
@@ -0,0 +1,12 @@
+Name: A DNS toolkit for Python
+Short Name: dnspython
+URL: http://www.dnspython.org/
+Version: 1.8.0 (found in ./version.py)
+License: ISC
+License File: LICENSE
+
+Description:
+Used by Web Page Replay's dnsproxy module to create and handle dns queries.
+
+Local Modifications:
+None.
\ No newline at end of file
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/__init__.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/__init__.py
new file mode 100644
index 0000000..5ad5737
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/__init__.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2003-2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython DNS toolkit"""
+
+__all__ = [
+    'dnssec',
+    'e164',
+    'edns',
+    'entropy',
+    'exception',
+    'flags',
+    'inet',
+    'ipv4',
+    'ipv6',
+    'message',
+    'name',
+    'namedict',
+    'node',
+    'opcode',
+    'query',
+    'rcode',
+    'rdata',
+    'rdataclass',
+    'rdataset',
+    'rdatatype',
+    'renderer',
+    'resolver',
+    'reversename',
+    'rrset',
+    'set',
+    'tokenizer',
+    'tsig',
+    'tsigkeyring',
+    'ttl',
+    'rdtypes',
+    'update',
+    'version',
+    'zone',
+]
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/dnssec.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/dnssec.py
new file mode 100644
index 0000000..54fd78d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/dnssec.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2003-2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related functions and constants."""
+
+RSAMD5 = 1
+DH = 2
+DSA = 3
+ECC = 4
+RSASHA1 = 5
+DSANSEC3SHA1 = 6
+RSASHA1NSEC3SHA1 = 7
+RSASHA256 = 8
+RSASHA512 = 10
+INDIRECT = 252
+PRIVATEDNS = 253
+PRIVATEOID = 254
+
+_algorithm_by_text = {
+    'RSAMD5' : RSAMD5,
+    'DH' : DH,
+    'DSA' : DSA,
+    'ECC' : ECC,
+    'RSASHA1' : RSASHA1,
+    'DSANSEC3SHA1' : DSANSEC3SHA1,
+    'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
+    'RSASHA256' : RSASHA256,
+    'RSASHA512' : RSASHA512,
+    'INDIRECT' : INDIRECT,
+    'PRIVATEDNS' : PRIVATEDNS,
+    'PRIVATEOID' : PRIVATEOID,
+    }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_algorithm_by_value = dict([(y, x) for x, y in _algorithm_by_text.iteritems()])
+
+class UnknownAlgorithm(Exception):
+    """Raised if an algorithm is unknown."""
+    pass
+
+def algorithm_from_text(text):
+    """Convert text into a DNSSEC algorithm value
+    @rtype: int"""
+    
+    value = _algorithm_by_text.get(text.upper())
+    if value is None:
+        value = int(text)
+    return value
+
+def algorithm_to_text(value):
+    """Convert a DNSSEC algorithm value to text
+    @rtype: string"""
+    
+    text = _algorithm_by_value.get(value)
+    if text is None:
+        text = str(value)
+    return text
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/e164.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/e164.py
new file mode 100644
index 0000000..d8f71ec
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/e164.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2006, 2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS E.164 helpers
+
+@var public_enum_domain: The DNS public ENUM domain, e164.arpa.
+@type public_enum_domain: dns.name.Name object
+"""
+
+import dns.exception
+import dns.name
+import dns.resolver
+
+public_enum_domain = dns.name.from_text('e164.arpa.')
+
+def from_e164(text, origin=public_enum_domain):
+    """Convert an E.164 number in textual form into a Name object whose
+    value is the ENUM domain name for that number.
+    @param text: an E.164 number in textual form.
+    @type text: str
+    @param origin: The domain in which the number should be constructed.
+    The default is e164.arpa.
+    @type: dns.name.Name object or None
+    @rtype: dns.name.Name object
+    """
+    parts = [d for d in text if d.isdigit()]
+    parts.reverse()
+    return dns.name.from_text('.'.join(parts), origin=origin)
+
+def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
+    """Convert an ENUM domain name into an E.164 number.
+    @param name: the ENUM domain name.
+    @type name: dns.name.Name object.
+    @param origin: A domain containing the ENUM domain name.  The
+    name is relativized to this domain before being converted to text.
+    @type: dns.name.Name object or None
+    @param want_plus_prefix: if True, add a '+' to the beginning of the
+    returned number.
+    @rtype: str
+    """
+    if not origin is None:
+        name = name.relativize(origin)
+    dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
+    if len(dlabels) != len(name.labels):
+        raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
+    dlabels.reverse()
+    text = ''.join(dlabels)
+    if want_plus_prefix:
+        text = '+' + text
+    return text
+
+def query(number, domains, resolver=None):
+    """Look for NAPTR RRs for the specified number in the specified domains.
+
+    e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
+    """
+    if resolver is None:
+        resolver = dns.resolver.get_default_resolver()
+    for domain in domains:
+        if isinstance(domain, (str, unicode)):
+            domain = dns.name.from_text(domain)
+        qname = dns.e164.from_e164(number, domain)
+        try:
+            return resolver.query(qname, 'NAPTR')
+        except dns.resolver.NXDOMAIN:
+            pass
+    raise dns.resolver.NXDOMAIN
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/edns.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/edns.py
new file mode 100644
index 0000000..1731ced
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/edns.py
@@ -0,0 +1,142 @@
+# Copyright (C) 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""EDNS Options"""
+
+NSID = 3
+
+class Option(object):
+    """Base class for all EDNS option types.
+    """
+
+    def __init__(self, otype):
+        """Initialize an option.
+        @param rdtype: The rdata type
+        @type rdtype: int
+        """
+        self.otype = otype
+
+    def to_wire(self, file):
+        """Convert an option to wire format.
+        """
+        raise NotImplementedError
+
+    def from_wire(cls, otype, wire, current, olen):
+        """Build an EDNS option object from wire format
+
+        @param otype: The option type
+        @type otype: int
+        @param wire: The wire-format message
+        @type wire: string
+        @param current: The offet in wire of the beginning of the rdata.
+        @type current: int
+        @param olen: The length of the wire-format option data
+        @type olen: int
+        @rtype: dns.ends.Option instance"""
+        raise NotImplementedError
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        """Compare an ENDS option with another option of the same type.
+        Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
+        """
+        raise NotImplementedError
+
+    def __eq__(self, other):
+        if not isinstance(other, Option):
+            return False
+        if self.otype != other.otype:
+            return False
+        return self._cmp(other) == 0
+
+    def __ne__(self, other):
+        if not isinstance(other, Option):
+            return False
+        if self.otype != other.otype:
+            return False
+        return self._cmp(other) != 0
+
+    def __lt__(self, other):
+        if not isinstance(other, Option) or \
+               self.otype != other.otype:
+            return NotImplemented
+        return self._cmp(other) < 0
+
+    def __le__(self, other):
+        if not isinstance(other, Option) or \
+               self.otype != other.otype:
+            return NotImplemented
+        return self._cmp(other) <= 0
+
+    def __ge__(self, other):
+        if not isinstance(other, Option) or \
+               self.otype != other.otype:
+            return NotImplemented
+        return self._cmp(other) >= 0
+
+    def __gt__(self, other):
+        if not isinstance(other, Option) or \
+               self.otype != other.otype:
+            return NotImplemented
+        return self._cmp(other) > 0
+
+
+class GenericOption(Option):
+    """Generate Rdata Class
+
+    This class is used for EDNS option types for which we have no better
+    implementation.
+    """
+
+    def __init__(self, otype, data):
+        super(GenericOption, self).__init__(otype)
+        self.data = data
+
+    def to_wire(self, file):
+        file.write(self.data)
+
+    def from_wire(cls, otype, wire, current, olen):
+        return cls(otype, wire[current : current + olen])
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+	return cmp(self.data, other.data)
+
+_type_to_class = {
+}
+
+def get_option_class(otype):
+    cls = _type_to_class.get(otype)
+    if cls is None:
+        cls = GenericOption
+    return cls
+
+def option_from_wire(otype, wire, current, olen):
+    """Build an EDNS option object from wire format
+
+    @param otype: The option type
+    @type otype: int
+    @param wire: The wire-format message
+    @type wire: string
+    @param current: The offet in wire of the beginning of the rdata.
+    @type current: int
+    @param olen: The length of the wire-format option data
+    @type olen: int
+    @rtype: dns.ends.Option instance"""
+
+    cls = get_option_class(otype)
+    return cls.from_wire(otype, wire, current, olen)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/entropy.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/entropy.py
new file mode 100644
index 0000000..fd9d4f8
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/entropy.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import time
+try:
+    import threading as _threading
+except ImportError:
+    import dummy_threading as _threading
+
+class EntropyPool(object):
+    def __init__(self, seed=None):
+        self.pool_index = 0
+        self.digest = None
+        self.next_byte = 0
+        self.lock = _threading.Lock()
+        try:
+            import hashlib
+            self.hash = hashlib.sha1()
+            self.hash_len = 20
+        except:
+            try:
+                import sha
+                self.hash = sha.new()
+                self.hash_len = 20
+            except:
+                import md5
+                self.hash = md5.new()
+                self.hash_len = 16
+        self.pool = '\0' * self.hash_len
+        if not seed is None:
+            self.stir(seed)
+            self.seeded = True
+        else:
+            self.seeded = False
+
+    def stir(self, entropy, already_locked=False):
+        if not already_locked:
+            self.lock.acquire()
+        try:
+            bytes = [ord(c) for c in self.pool]
+            for c in entropy:
+                if self.pool_index == self.hash_len:
+                    self.pool_index = 0
+                b = ord(c) & 0xff
+                bytes[self.pool_index] ^= b
+                self.pool_index += 1
+            self.pool = ''.join([chr(c) for c in bytes])
+        finally:
+            if not already_locked:
+                self.lock.release()
+
+    def _maybe_seed(self):
+        if not self.seeded:
+            try:
+                seed = os.urandom(16)
+            except:
+                try:
+                    r = file('/dev/urandom', 'r', 0)
+                    try:
+                        seed = r.read(16)
+                    finally:
+                        r.close()
+                except:
+                    seed = str(time.time())
+            self.seeded = True
+            self.stir(seed, True)
+
+    def random_8(self):
+        self.lock.acquire()
+        self._maybe_seed()
+        try:
+            if self.digest is None or self.next_byte == self.hash_len:
+                self.hash.update(self.pool)
+                self.digest = self.hash.digest()
+                self.stir(self.digest, True)
+                self.next_byte = 0
+            value = ord(self.digest[self.next_byte])
+            self.next_byte += 1
+        finally:
+            self.lock.release()
+        return value
+
+    def random_16(self):
+        return self.random_8() * 256 + self.random_8()
+
+    def random_32(self):
+        return self.random_16() * 65536 + self.random_16()
+
+    def random_between(self, first, last):
+        size = last - first + 1
+        if size > 4294967296L:
+            raise ValueError('too big')
+        if size > 65536:
+            rand = self.random_32
+            max = 4294967295L
+        elif size > 256:
+            rand = self.random_16
+            max = 65535
+        else:
+            rand = self.random_8
+            max = 255
+	return (first + size * rand() // (max + 1))
+
+pool = EntropyPool()
+
+def random_16():
+    return pool.random_16()
+
+def between(first, last):
+    return pool.random_between(first, last)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/exception.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/exception.py
new file mode 100644
index 0000000..c6d6570
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/exception.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNS Exceptions."""
+
+class DNSException(Exception):
+    """Abstract base class shared by all dnspython exceptions."""
+    pass
+
+class FormError(DNSException):
+    """DNS message is malformed."""
+    pass
+
+class SyntaxError(DNSException):
+    """Text input is malformed."""
+    pass
+
+class UnexpectedEnd(SyntaxError):
+    """Raised if text input ends unexpectedly."""
+    pass
+
+class TooBig(DNSException):
+    """The message is too big."""
+    pass
+
+class Timeout(DNSException):
+    """The operation timed out."""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/flags.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/flags.py
new file mode 100644
index 0000000..17afdbc
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/flags.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Message Flags."""
+
+# Standard DNS flags
+
+QR = 0x8000
+AA = 0x0400
+TC = 0x0200
+RD = 0x0100
+RA = 0x0080
+AD = 0x0020
+CD = 0x0010
+
+# EDNS flags
+
+DO = 0x8000
+
+_by_text = {
+    'QR' : QR,
+    'AA' : AA,
+    'TC' : TC,
+    'RD' : RD,
+    'RA' : RA,
+    'AD' : AD,
+    'CD' : CD
+}
+
+_edns_by_text = {
+    'DO' : DO
+}
+
+
+# We construct the inverse mappings programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mappings not to be true inverses.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+_edns_by_value = dict([(y, x) for x, y in _edns_by_text.iteritems()])
+
+def _order_flags(table):
+    order = list(table.iteritems())
+    order.sort()
+    order.reverse()
+    return order
+
+_flags_order = _order_flags(_by_value)
+
+_edns_flags_order = _order_flags(_edns_by_value)
+
+def _from_text(text, table):
+    flags = 0
+    tokens = text.split()
+    for t in tokens:
+        flags = flags | table[t.upper()]
+    return flags
+
+def _to_text(flags, table, order):
+    text_flags = []
+    for k, v in order:
+        if flags & k != 0:
+            text_flags.append(v)
+    return ' '.join(text_flags)
+
+def from_text(text):
+    """Convert a space-separated list of flag text values into a flags
+    value.
+    @rtype: int"""
+
+    return _from_text(text, _by_text)
+
+def to_text(flags):
+    """Convert a flags value into a space-separated list of flag text
+    values.
+    @rtype: string"""
+
+    return _to_text(flags, _by_value, _flags_order)
+    
+
+def edns_from_text(text):
+    """Convert a space-separated list of EDNS flag text values into a EDNS
+    flags value.
+    @rtype: int"""
+
+    return _from_text(text, _edns_by_text)
+
+def edns_to_text(flags):
+    """Convert an EDNS flags value into a space-separated list of EDNS flag
+    text values.
+    @rtype: string"""
+
+    return _to_text(flags, _edns_by_value, _edns_flags_order)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/inet.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/inet.py
new file mode 100644
index 0000000..8a8f3e1
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/inet.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Generic Internet address helper functions."""
+
+import socket
+
+import dns.ipv4
+import dns.ipv6
+
+
+# We assume that AF_INET is always defined.
+
+AF_INET = socket.AF_INET
+
+# AF_INET6 might not be defined in the socket module, but we need it.
+# We'll try to use the socket module's value, and if it doesn't work,
+# we'll use our own value.
+
+try:
+    AF_INET6 = socket.AF_INET6
+except AttributeError:
+    AF_INET6 = 9999
+
+def inet_pton(family, text):
+    """Convert the textual form of a network address into its binary form.
+
+    @param family: the address family
+    @type family: int
+    @param text: the textual address
+    @type text: string
+    @raises NotImplementedError: the address family specified is not
+    implemented.
+    @rtype: string
+    """
+    
+    if family == AF_INET:
+        return dns.ipv4.inet_aton(text)
+    elif family == AF_INET6:
+        return dns.ipv6.inet_aton(text)
+    else:
+        raise NotImplementedError
+
+def inet_ntop(family, address):
+    """Convert the binary form of a network address into its textual form.
+
+    @param family: the address family
+    @type family: int
+    @param address: the binary address
+    @type address: string
+    @raises NotImplementedError: the address family specified is not
+    implemented.
+    @rtype: string
+    """
+    if family == AF_INET:
+        return dns.ipv4.inet_ntoa(address)
+    elif family == AF_INET6:
+        return dns.ipv6.inet_ntoa(address)
+    else:
+        raise NotImplementedError
+
+def af_for_address(text):
+    """Determine the address family of a textual-form network address.
+
+    @param text: the textual address
+    @type text: string
+    @raises ValueError: the address family cannot be determined from the input.
+    @rtype: int
+    """
+    try:
+        junk = dns.ipv4.inet_aton(text)
+        return AF_INET
+    except:
+        try:
+            junk = dns.ipv6.inet_aton(text)
+            return AF_INET6
+        except:
+            raise ValueError
+
+def is_multicast(text):
+    """Is the textual-form network address a multicast address?
+
+    @param text: the textual address
+    @raises ValueError: the address family cannot be determined from the input.
+    @rtype: bool
+    """
+    try:
+        first = ord(dns.ipv4.inet_aton(text)[0])
+        return (first >= 224 and first <= 239)
+    except:
+        try:
+            first = ord(dns.ipv6.inet_aton(text)[0])
+            return (first == 255)
+        except:
+            raise ValueError
+    
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv4.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv4.py
new file mode 100644
index 0000000..1569da5
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv4.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv4 helper functions."""
+
+import socket
+import sys
+
+if sys.hexversion < 0x02030000 or sys.platform == 'win32':
+    #
+    # Some versions of Python 2.2 have an inet_aton which rejects
+    # the valid IP address '255.255.255.255'.  It appears this
+    # problem is still present on the Win32 platform even in 2.3.
+    # We'll work around the problem.
+    #
+    def inet_aton(text):
+        if text == '255.255.255.255':
+            return '\xff' * 4
+        else:
+            return socket.inet_aton(text)
+else:
+    inet_aton = socket.inet_aton
+
+inet_ntoa = socket.inet_ntoa
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv6.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv6.py
new file mode 100644
index 0000000..33c6713
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ipv6.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv6 helper functions."""
+
+import re
+
+import dns.exception
+import dns.ipv4
+
+_leading_zero = re.compile(r'0+([0-9a-f]+)')
+
+def inet_ntoa(address):
+    """Convert a network format IPv6 address into text.
+
+    @param address: the binary address
+    @type address: string
+    @rtype: string
+    @raises ValueError: the address isn't 16 bytes long
+    """
+
+    if len(address) != 16:
+        raise ValueError("IPv6 addresses are 16 bytes long")
+    hex = address.encode('hex_codec')
+    chunks = []
+    i = 0
+    l = len(hex)
+    while i < l:
+        chunk = hex[i : i + 4]
+        # strip leading zeros.  we do this with an re instead of
+        # with lstrip() because lstrip() didn't support chars until
+        # python 2.2.2
+        m = _leading_zero.match(chunk)
+        if not m is None:
+            chunk = m.group(1)
+        chunks.append(chunk)
+        i += 4
+    #
+    # Compress the longest subsequence of 0-value chunks to ::
+    #
+    best_start = 0
+    best_len = 0
+    start = -1
+    last_was_zero = False
+    for i in xrange(8):
+        if chunks[i] != '0':
+            if last_was_zero:
+                end = i
+                current_len = end - start
+                if current_len > best_len:
+                    best_start = start
+                    best_len = current_len
+                last_was_zero = False
+        elif not last_was_zero:
+            start = i
+            last_was_zero = True
+    if last_was_zero:
+        end = 8
+        current_len = end - start
+        if current_len > best_len:
+            best_start = start
+            best_len = current_len
+    if best_len > 0:
+        if best_start == 0 and \
+           (best_len == 6 or
+            best_len == 5 and chunks[5] == 'ffff'):
+            # We have an embedded IPv4 address
+            if best_len == 6:
+                prefix = '::'
+            else:
+                prefix = '::ffff:'
+            hex = prefix + dns.ipv4.inet_ntoa(address[12:])
+        else:
+            hex = ':'.join(chunks[:best_start]) + '::' + \
+                  ':'.join(chunks[best_start + best_len:])
+    else:
+        hex = ':'.join(chunks)
+    return hex
+
+_v4_ending = re.compile(r'(.*):(\d+)\.(\d+)\.(\d+)\.(\d+)$')
+_colon_colon_start = re.compile(r'::.*')
+_colon_colon_end = re.compile(r'.*::$')
+
+def inet_aton(text):
+    """Convert a text format IPv6 address into network format.
+
+    @param text: the textual address
+    @type text: string
+    @rtype: string
+    @raises dns.exception.SyntaxError: the text was not properly formatted
+    """
+
+    #
+    # Our aim here is not something fast; we just want something that works.
+    #
+
+    if text == '::':
+        text = '0::'
+    #
+    # Get rid of the icky dot-quad syntax if we have it.
+    #
+    m = _v4_ending.match(text)
+    if not m is None:
+        text = "%s:%04x:%04x" % (m.group(1),
+                                 int(m.group(2)) * 256 + int(m.group(3)),
+                                 int(m.group(4)) * 256 + int(m.group(5)))
+    #
+    # Try to turn '::<whatever>' into ':<whatever>'; if no match try to
+    # turn '<whatever>::' into '<whatever>:'
+    #
+    m = _colon_colon_start.match(text)
+    if not m is None:
+        text = text[1:]
+    else:
+        m = _colon_colon_end.match(text)
+        if not m is None:
+            text = text[:-1]
+    #
+    # Now canonicalize into 8 chunks of 4 hex digits each
+    #
+    chunks = text.split(':')
+    l = len(chunks)
+    if l > 8:
+        raise dns.exception.SyntaxError
+    seen_empty = False
+    canonical = []
+    for c in chunks:
+        if c == '':
+            if seen_empty:
+                raise dns.exception.SyntaxError
+            seen_empty = True
+            for i in xrange(0, 8 - l + 1):
+                canonical.append('0000')
+        else:
+            lc = len(c)
+            if lc > 4:
+                raise dns.exception.SyntaxError
+            if lc != 4:
+                c = ('0' * (4 - lc)) + c
+            canonical.append(c)
+    if l < 8 and not seen_empty:
+        raise dns.exception.SyntaxError
+    text = ''.join(canonical)
+
+    #
+    # Finally we can go to binary.
+    #
+    try:
+        return text.decode('hex_codec')
+    except TypeError:
+        raise dns.exception.SyntaxError
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/message.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/message.py
new file mode 100644
index 0000000..ba0ebf6
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/message.py
@@ -0,0 +1,1083 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Messages"""
+
+import cStringIO
+import random
+import struct
+import sys
+import time
+
+import dns.exception
+import dns.flags
+import dns.name
+import dns.opcode
+import dns.entropy
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+import dns.renderer
+import dns.tsig
+
+class ShortHeader(dns.exception.FormError):
+    """Raised if the DNS packet passed to from_wire() is too short."""
+    pass
+
+class TrailingJunk(dns.exception.FormError):
+    """Raised if the DNS packet passed to from_wire() has extra junk
+    at the end of it."""
+    pass
+
+class UnknownHeaderField(dns.exception.DNSException):
+    """Raised if a header field name is not recognized when converting from
+    text into a message."""
+    pass
+
+class BadEDNS(dns.exception.FormError):
+    """Raised if an OPT record occurs somewhere other than the start of
+    the additional data section."""
+    pass
+
+class BadTSIG(dns.exception.FormError):
+    """Raised if a TSIG record occurs somewhere other than the end of
+    the additional data section."""
+    pass
+
+class UnknownTSIGKey(dns.exception.DNSException):
+    """Raised if we got a TSIG but don't know the key."""
+    pass
+
+class Message(object):
+    """A DNS message.
+
+    @ivar id: The query id; the default is a randomly chosen id.
+    @type id: int
+    @ivar flags: The DNS flags of the message.  @see: RFC 1035 for an
+    explanation of these flags.
+    @type flags: int
+    @ivar question: The question section.
+    @type question: list of dns.rrset.RRset objects
+    @ivar answer: The answer section.
+    @type answer: list of dns.rrset.RRset objects
+    @ivar authority: The authority section.
+    @type authority: list of dns.rrset.RRset objects
+    @ivar additional: The additional data section.
+    @type additional: list of dns.rrset.RRset objects
+    @ivar edns: The EDNS level to use.  The default is -1, no Edns.
+    @type edns: int
+    @ivar ednsflags: The EDNS flags
+    @type ednsflags: long
+    @ivar payload: The EDNS payload size.  The default is 0.
+    @type payload: int
+    @ivar options: The EDNS options
+    @type options: list of dns.edns.Option objects
+    @ivar request_payload: The associated request's EDNS payload size.
+    @type request_payload: int
+    @ivar keyring: The TSIG keyring to use.  The default is None.
+    @type keyring: dict
+    @ivar keyname: The TSIG keyname to use.  The default is None.
+    @type keyname: dns.name.Name object
+    @ivar keyalgorithm: The TSIG key algorithm to use.  The default is
+    dns.tsig.default_algorithm.
+    @type keyalgorithm: string
+    @ivar request_mac: The TSIG MAC of the request message associated with
+    this message; used when validating TSIG signatures.   @see: RFC 2845 for
+    more information on TSIG fields.
+    @type request_mac: string
+    @ivar fudge: TSIG time fudge; default is 300 seconds.
+    @type fudge: int
+    @ivar original_id: TSIG original id; defaults to the message's id
+    @type original_id: int
+    @ivar tsig_error: TSIG error code; default is 0.
+    @type tsig_error: int
+    @ivar other_data: TSIG other data.
+    @type other_data: string
+    @ivar mac: The TSIG MAC for this message.
+    @type mac: string
+    @ivar xfr: Is the message being used to contain the results of a DNS
+    zone transfer?  The default is False.
+    @type xfr: bool
+    @ivar origin: The origin of the zone in messages which are used for
+    zone transfers or for DNS dynamic updates.  The default is None.
+    @type origin: dns.name.Name object
+    @ivar tsig_ctx: The TSIG signature context associated with this
+    message.  The default is None.
+    @type tsig_ctx: hmac.HMAC object
+    @ivar had_tsig: Did the message decoded from wire format have a TSIG
+    signature?
+    @type had_tsig: bool
+    @ivar multi: Is this message part of a multi-message sequence?  The
+    default is false.  This variable is used when validating TSIG signatures
+    on messages which are part of a zone transfer.
+    @type multi: bool
+    @ivar first: Is this message standalone, or the first of a multi
+    message sequence?  This variable is used when validating TSIG signatures
+    on messages which are part of a zone transfer.
+    @type first: bool
+    @ivar index: An index of rrsets in the message.  The index key is
+    (section, name, rdclass, rdtype, covers, deleting).  Indexing can be
+    disabled by setting the index to None.
+    @type index: dict
+    """
+
+    def __init__(self, id=None):
+        if id is None:
+            self.id = dns.entropy.random_16()
+        else:
+            self.id = id
+        self.flags = 0
+        self.question = []
+        self.answer = []
+        self.authority = []
+        self.additional = []
+        self.edns = -1
+        self.ednsflags = 0
+        self.payload = 0
+        self.options = []
+        self.request_payload = 0
+        self.keyring = None
+        self.keyname = None
+        self.keyalgorithm = dns.tsig.default_algorithm
+        self.request_mac = ''
+        self.other_data = ''
+        self.tsig_error = 0
+        self.fudge = 300
+        self.original_id = self.id
+        self.mac = ''
+        self.xfr = False
+        self.origin = None
+        self.tsig_ctx = None
+        self.had_tsig = False
+        self.multi = False
+        self.first = True
+        self.index = {}
+
+    def __repr__(self):
+        return '<DNS message, ID ' + `self.id` + '>'
+
+    def __str__(self):
+        return self.to_text()
+
+    def to_text(self,  origin=None, relativize=True, **kw):
+        """Convert the message to text.
+
+        The I{origin}, I{relativize}, and any other keyword
+        arguments are passed to the rrset to_wire() method.
+
+        @rtype: string
+        """
+
+        s = cStringIO.StringIO()
+        print >> s, 'id %d' % self.id
+        print >> s, 'opcode %s' % \
+              dns.opcode.to_text(dns.opcode.from_flags(self.flags))
+        rc = dns.rcode.from_flags(self.flags, self.ednsflags)
+        print >> s, 'rcode %s' % dns.rcode.to_text(rc)
+        print >> s, 'flags %s' % dns.flags.to_text(self.flags)
+        if self.edns >= 0:
+            print >> s, 'edns %s' % self.edns
+            if self.ednsflags != 0:
+                print >> s, 'eflags %s' % \
+                      dns.flags.edns_to_text(self.ednsflags)
+            print >> s, 'payload', self.payload
+        is_update = dns.opcode.is_update(self.flags)
+        if is_update:
+            print >> s, ';ZONE'
+        else:
+            print >> s, ';QUESTION'
+        for rrset in self.question:
+            print >> s, rrset.to_text(origin, relativize, **kw)
+        if is_update:
+            print >> s, ';PREREQ'
+        else:
+            print >> s, ';ANSWER'
+        for rrset in self.answer:
+            print >> s, rrset.to_text(origin, relativize, **kw)
+        if is_update:
+            print >> s, ';UPDATE'
+        else:
+            print >> s, ';AUTHORITY'
+        for rrset in self.authority:
+            print >> s, rrset.to_text(origin, relativize, **kw)
+        print >> s, ';ADDITIONAL'
+        for rrset in self.additional:
+            print >> s, rrset.to_text(origin, relativize, **kw)
+        #
+        # We strip off the final \n so the caller can print the result without
+        # doing weird things to get around eccentricities in Python print
+        # formatting
+        #
+        return s.getvalue()[:-1]
+
+    def __eq__(self, other):
+        """Two messages are equal if they have the same content in the
+        header, question, answer, and authority sections.
+        @rtype: bool"""
+        if not isinstance(other, Message):
+            return False
+        if self.id != other.id:
+            return False
+        if self.flags != other.flags:
+            return False
+        for n in self.question:
+            if n not in other.question:
+                return False
+        for n in other.question:
+            if n not in self.question:
+                return False
+        for n in self.answer:
+            if n not in other.answer:
+                return False
+        for n in other.answer:
+            if n not in self.answer:
+                return False
+        for n in self.authority:
+            if n not in other.authority:
+                return False
+        for n in other.authority:
+            if n not in self.authority:
+                return False
+        return True
+
+    def __ne__(self, other):
+        """Are two messages not equal?
+        @rtype: bool"""
+        return not self.__eq__(other)
+
+    def is_response(self, other):
+        """Is other a response to self?
+        @rtype: bool"""
+        if other.flags & dns.flags.QR == 0 or \
+           self.id != other.id or \
+           dns.opcode.from_flags(self.flags) != \
+           dns.opcode.from_flags(other.flags):
+            return False
+        if dns.rcode.from_flags(other.flags, other.ednsflags) != \
+               dns.rcode.NOERROR:
+            return True
+        if dns.opcode.is_update(self.flags):
+            return True
+        for n in self.question:
+            if n not in other.question:
+                return False
+        for n in other.question:
+            if n not in self.question:
+                return False
+        return True
+
+    def section_number(self, section):
+        if section is self.question:
+            return 0
+        elif section is self.answer:
+            return 1
+        elif section is self.authority:
+            return 2
+        elif section is self.additional:
+            return 3
+        else:
+            raise ValueError('unknown section')
+
+    def find_rrset(self, section, name, rdclass, rdtype,
+                   covers=dns.rdatatype.NONE, deleting=None, create=False,
+                   force_unique=False):
+        """Find the RRset with the given attributes in the specified section.
+
+        @param section: the section of the message to look in, e.g.
+        self.answer.
+        @type section: list of dns.rrset.RRset objects
+        @param name: the name of the RRset
+        @type name: dns.name.Name object
+        @param rdclass: the class of the RRset
+        @type rdclass: int
+        @param rdtype: the type of the RRset
+        @type rdtype: int
+        @param covers: the covers value of the RRset
+        @type covers: int
+        @param deleting: the deleting value of the RRset
+        @type deleting: int
+        @param create: If True, create the RRset if it is not found.
+        The created RRset is appended to I{section}.
+        @type create: bool
+        @param force_unique: If True and create is also True, create a
+        new RRset regardless of whether a matching RRset exists already.
+        @type force_unique: bool
+        @raises KeyError: the RRset was not found and create was False
+        @rtype: dns.rrset.RRset object"""
+
+        key = (self.section_number(section),
+               name, rdclass, rdtype, covers, deleting)
+        if not force_unique:
+            if not self.index is None:
+                rrset = self.index.get(key)
+                if not rrset is None:
+                    return rrset
+            else:
+                for rrset in section:
+                    if rrset.match(name, rdclass, rdtype, covers, deleting):
+                        return rrset
+        if not create:
+            raise KeyError
+        rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
+        section.append(rrset)
+        if not self.index is None:
+            self.index[key] = rrset
+        return rrset
+
+    def get_rrset(self, section, name, rdclass, rdtype,
+                  covers=dns.rdatatype.NONE, deleting=None, create=False,
+                  force_unique=False):
+        """Get the RRset with the given attributes in the specified section.
+
+        If the RRset is not found, None is returned.
+
+        @param section: the section of the message to look in, e.g.
+        self.answer.
+        @type section: list of dns.rrset.RRset objects
+        @param name: the name of the RRset
+        @type name: dns.name.Name object
+        @param rdclass: the class of the RRset
+        @type rdclass: int
+        @param rdtype: the type of the RRset
+        @type rdtype: int
+        @param covers: the covers value of the RRset
+        @type covers: int
+        @param deleting: the deleting value of the RRset
+        @type deleting: int
+        @param create: If True, create the RRset if it is not found.
+        The created RRset is appended to I{section}.
+        @type create: bool
+        @param force_unique: If True and create is also True, create a
+        new RRset regardless of whether a matching RRset exists already.
+        @type force_unique: bool
+        @rtype: dns.rrset.RRset object or None"""
+
+        try:
+            rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
+                                    deleting, create, force_unique)
+        except KeyError:
+            rrset = None
+        return rrset
+
+    def to_wire(self, origin=None, max_size=0, **kw):
+        """Return a string containing the message in DNS compressed wire
+        format.
+
+        Additional keyword arguments are passed to the rrset to_wire()
+        method.
+
+        @param origin: The origin to be appended to any relative names.
+        @type origin: dns.name.Name object
+        @param max_size: The maximum size of the wire format output; default
+        is 0, which means 'the message's request payload, if nonzero, or
+        65536'.
+        @type max_size: int
+        @raises dns.exception.TooBig: max_size was exceeded
+        @rtype: string
+        """
+
+        if max_size == 0:
+            if self.request_payload != 0:
+                max_size = self.request_payload
+            else:
+                max_size = 65535
+        if max_size < 512:
+            max_size = 512
+        elif max_size > 65535:
+            max_size = 65535
+        r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
+        for rrset in self.question:
+            r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
+        for rrset in self.answer:
+            r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
+        for rrset in self.authority:
+            r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
+        if self.edns >= 0:
+            r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
+        for rrset in self.additional:
+            r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
+        r.write_header()
+        if not self.keyname is None:
+            r.add_tsig(self.keyname, self.keyring[self.keyname],
+                       self.fudge, self.original_id, self.tsig_error,
+                       self.other_data, self.request_mac,
+                       self.keyalgorithm)
+            self.mac = r.mac
+        return r.get_wire()
+
+    def use_tsig(self, keyring, keyname=None, fudge=300,
+                 original_id=None, tsig_error=0, other_data='',
+                 algorithm=dns.tsig.default_algorithm):
+        """When sending, a TSIG signature using the specified keyring
+        and keyname should be added.
+
+        @param keyring: The TSIG keyring to use; defaults to None.
+        @type keyring: dict
+        @param keyname: The name of the TSIG key to use; defaults to None.
+        The key must be defined in the keyring.  If a keyring is specified
+        but a keyname is not, then the key used will be the first key in the
+        keyring.  Note that the order of keys in a dictionary is not defined,
+        so applications should supply a keyname when a keyring is used, unless
+        they know the keyring contains only one key.
+        @type keyname: dns.name.Name or string
+        @param fudge: TSIG time fudge; default is 300 seconds.
+        @type fudge: int
+        @param original_id: TSIG original id; defaults to the message's id
+        @type original_id: int
+        @param tsig_error: TSIG error code; default is 0.
+        @type tsig_error: int
+        @param other_data: TSIG other data.
+        @type other_data: string
+        @param algorithm: The TSIG algorithm to use; defaults to
+        dns.tsig.default_algorithm
+        """
+
+        self.keyring = keyring
+        if keyname is None:
+            self.keyname = self.keyring.keys()[0]
+        else:
+            if isinstance(keyname, (str, unicode)):
+                keyname = dns.name.from_text(keyname)
+            self.keyname = keyname
+        self.keyalgorithm = algorithm
+        self.fudge = fudge
+        if original_id is None:
+            self.original_id = self.id
+        else:
+            self.original_id = original_id
+        self.tsig_error = tsig_error
+        self.other_data = other_data
+
+    def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
+        """Configure EDNS behavior.
+        @param edns: The EDNS level to use.  Specifying None, False, or -1
+        means 'do not use EDNS', and in this case the other parameters are
+        ignored.  Specifying True is equivalent to specifying 0, i.e. 'use
+        EDNS0'.
+        @type edns: int or bool or None
+        @param ednsflags: EDNS flag values.
+        @type ednsflags: int
+        @param payload: The EDNS sender's payload field, which is the maximum
+        size of UDP datagram the sender can handle.
+        @type payload: int
+        @param request_payload: The EDNS payload size to use when sending
+        this message.  If not specified, defaults to the value of payload.
+        @type request_payload: int or None
+        @param options: The EDNS options
+        @type options: None or list of dns.edns.Option objects
+        @see: RFC 2671
+        """
+        if edns is None or edns is False:
+            edns = -1
+        if edns is True:
+            edns = 0
+        if request_payload is None:
+            request_payload = payload
+        if edns < 0:
+            ednsflags = 0
+            payload = 0
+            request_payload = 0
+            options = []
+        else:
+            # make sure the EDNS version in ednsflags agrees with edns
+            ednsflags &= 0xFF00FFFFL
+            ednsflags |= (edns << 16)
+            if options is None:
+                options = []
+        self.edns = edns
+        self.ednsflags = ednsflags
+        self.payload = payload
+        self.options = options
+        self.request_payload = request_payload
+
+    def want_dnssec(self, wanted=True):
+        """Enable or disable 'DNSSEC desired' flag in requests.
+        @param wanted: Is DNSSEC desired?  If True, EDNS is enabled if
+        required, and then the DO bit is set.  If False, the DO bit is
+        cleared if EDNS is enabled.
+        @type wanted: bool
+        """
+        if wanted:
+            if self.edns < 0:
+                self.use_edns()
+            self.ednsflags |= dns.flags.DO
+        elif self.edns >= 0:
+            self.ednsflags &= ~dns.flags.DO
+
+    def rcode(self):
+        """Return the rcode.
+        @rtype: int
+        """
+        return dns.rcode.from_flags(self.flags, self.ednsflags)
+
+    def set_rcode(self, rcode):
+        """Set the rcode.
+        @param rcode: the rcode
+        @type rcode: int
+        """
+        (value, evalue) = dns.rcode.to_flags(rcode)
+        self.flags &= 0xFFF0
+        self.flags |= value
+        self.ednsflags &= 0x00FFFFFFL
+        self.ednsflags |= evalue
+        if self.ednsflags != 0 and self.edns < 0:
+            self.edns = 0
+
+    def opcode(self):
+        """Return the opcode.
+        @rtype: int
+        """
+        return dns.opcode.from_flags(self.flags)
+
+    def set_opcode(self, opcode):
+        """Set the opcode.
+        @param opcode: the opcode
+        @type opcode: int
+        """
+        self.flags &= 0x87FF
+        self.flags |= dns.opcode.to_flags(opcode)
+
+class _WireReader(object):
+    """Wire format reader.
+
+    @ivar wire: the wire-format message.
+    @type wire: string
+    @ivar message: The message object being built
+    @type message: dns.message.Message object
+    @ivar current: When building a message object from wire format, this
+    variable contains the offset from the beginning of wire of the next octet
+    to be read.
+    @type current: int
+    @ivar updating: Is the message a dynamic update?
+    @type updating: bool
+    @ivar one_rr_per_rrset: Put each RR into its own RRset?
+    @type one_rr_per_rrset: bool
+    @ivar zone_rdclass: The class of the zone in messages which are
+    DNS dynamic updates.
+    @type zone_rdclass: int
+    """
+
+    def __init__(self, wire, message, question_only=False,
+                 one_rr_per_rrset=False):
+        self.wire = wire
+        self.message = message
+        self.current = 0
+        self.updating = False
+        self.zone_rdclass = dns.rdataclass.IN
+        self.question_only = question_only
+        self.one_rr_per_rrset = one_rr_per_rrset
+
+    def _get_question(self, qcount):
+        """Read the next I{qcount} records from the wire data and add them to
+        the question section.
+        @param qcount: the number of questions in the message
+        @type qcount: int"""
+
+        if self.updating and qcount > 1:
+            raise dns.exception.FormError
+
+        for i in xrange(0, qcount):
+            (qname, used) = dns.name.from_wire(self.wire, self.current)
+            if not self.message.origin is None:
+                qname = qname.relativize(self.message.origin)
+            self.current = self.current + used
+            (rdtype, rdclass) = \
+                     struct.unpack('!HH',
+                                   self.wire[self.current:self.current + 4])
+            self.current = self.current + 4
+            self.message.find_rrset(self.message.question, qname,
+                                    rdclass, rdtype, create=True,
+                                    force_unique=True)
+            if self.updating:
+                self.zone_rdclass = rdclass
+
+    def _get_section(self, section, count):
+        """Read the next I{count} records from the wire data and add them to
+        the specified section.
+        @param section: the section of the message to which to add records
+        @type section: list of dns.rrset.RRset objects
+        @param count: the number of records to read
+        @type count: int"""
+
+        if self.updating or self.one_rr_per_rrset:
+            force_unique = True
+        else:
+            force_unique = False
+        seen_opt = False
+        for i in xrange(0, count):
+            rr_start = self.current
+            (name, used) = dns.name.from_wire(self.wire, self.current)
+            absolute_name = name
+            if not self.message.origin is None:
+                name = name.relativize(self.message.origin)
+            self.current = self.current + used
+            (rdtype, rdclass, ttl, rdlen) = \
+                     struct.unpack('!HHIH',
+                                   self.wire[self.current:self.current + 10])
+            self.current = self.current + 10
+            if rdtype == dns.rdatatype.OPT:
+                if not section is self.message.additional or seen_opt:
+                    raise BadEDNS
+                self.message.payload = rdclass
+                self.message.ednsflags = ttl
+                self.message.edns = (ttl & 0xff0000) >> 16
+                self.message.options = []
+                current = self.current
+                optslen = rdlen
+                while optslen > 0:
+                    (otype, olen) = \
+                            struct.unpack('!HH',
+                                          self.wire[current:current + 4])
+                    current = current + 4
+                    opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
+                    self.message.options.append(opt)
+                    current = current + olen
+                    optslen = optslen - 4 - olen
+                seen_opt = True
+            elif rdtype == dns.rdatatype.TSIG:
+                if not (section is self.message.additional and
+                        i == (count - 1)):
+                    raise BadTSIG
+                if self.message.keyring is None:
+                    raise UnknownTSIGKey('got signed message without keyring')
+                secret = self.message.keyring.get(absolute_name)
+                if secret is None:
+                    raise UnknownTSIGKey("key '%s' unknown" % name)
+                self.message.tsig_ctx = \
+                                      dns.tsig.validate(self.wire,
+                                          absolute_name,
+                                          secret,
+                                          int(time.time()),
+                                          self.message.request_mac,
+                                          rr_start,
+                                          self.current,
+                                          rdlen,
+                                          self.message.tsig_ctx,
+                                          self.message.multi,
+                                          self.message.first)
+                self.message.had_tsig = True
+            else:
+                if ttl < 0:
+                    ttl = 0
+                if self.updating and \
+                   (rdclass == dns.rdataclass.ANY or
+                    rdclass == dns.rdataclass.NONE):
+                    deleting = rdclass
+                    rdclass = self.zone_rdclass
+                else:
+                    deleting = None
+                if deleting == dns.rdataclass.ANY or \
+                   (deleting == dns.rdataclass.NONE and \
+                    section == self.message.answer):
+                    covers = dns.rdatatype.NONE
+                    rd = None
+                else:
+                    rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
+                                             self.current, rdlen,
+                                             self.message.origin)
+                    covers = rd.covers()
+                if self.message.xfr and rdtype == dns.rdatatype.SOA:
+                    force_unique = True
+                rrset = self.message.find_rrset(section, name,
+                                                rdclass, rdtype, covers,
+                                                deleting, True, force_unique)
+                if not rd is None:
+                    rrset.add(rd, ttl)
+            self.current = self.current + rdlen
+
+    def read(self):
+        """Read a wire format DNS message and build a dns.message.Message
+        object."""
+
+        l = len(self.wire)
+        if l < 12:
+            raise ShortHeader
+        (self.message.id, self.message.flags, qcount, ancount,
+         aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
+        self.current = 12
+        if dns.opcode.is_update(self.message.flags):
+            self.updating = True
+        self._get_question(qcount)
+        if self.question_only:
+            return
+        self._get_section(self.message.answer, ancount)
+        self._get_section(self.message.authority, aucount)
+        self._get_section(self.message.additional, adcount)
+        if self.current != l:
+            raise TrailingJunk
+        if self.message.multi and self.message.tsig_ctx and \
+               not self.message.had_tsig:
+            self.message.tsig_ctx.update(self.wire)
+
+
+def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
+              tsig_ctx = None, multi = False, first = True,
+              question_only = False, one_rr_per_rrset = False):
+    """Convert a DNS wire format message into a message
+    object.
+
+    @param keyring: The keyring to use if the message is signed.
+    @type keyring: dict
+    @param request_mac: If the message is a response to a TSIG-signed request,
+    I{request_mac} should be set to the MAC of that request.
+    @type request_mac: string
+    @param xfr: Is this message part of a zone transfer?
+    @type xfr: bool
+    @param origin: If the message is part of a zone transfer, I{origin}
+    should be the origin name of the zone.
+    @type origin: dns.name.Name object
+    @param tsig_ctx: The ongoing TSIG context, used when validating zone
+    transfers.
+    @type tsig_ctx: hmac.HMAC object
+    @param multi: Is this message part of a multiple message sequence?
+    @type multi: bool
+    @param first: Is this message standalone, or the first of a multi
+    message sequence?
+    @type first: bool
+    @param question_only: Read only up to the end of the question section?
+    @type question_only: bool
+    @param one_rr_per_rrset: Put each RR into its own RRset
+    @type one_rr_per_rrset: bool
+    @raises ShortHeader: The message is less than 12 octets long.
+    @raises TrailingJunk: There were octets in the message past the end
+    of the proper DNS message.
+    @raises BadEDNS: An OPT record was in the wrong section, or occurred more
+    than once.
+    @raises BadTSIG: A TSIG record was not the last record of the additional
+    data section.
+    @rtype: dns.message.Message object"""
+
+    m = Message(id=0)
+    m.keyring = keyring
+    m.request_mac = request_mac
+    m.xfr = xfr
+    m.origin = origin
+    m.tsig_ctx = tsig_ctx
+    m.multi = multi
+    m.first = first
+
+    reader = _WireReader(wire, m, question_only, one_rr_per_rrset)
+    reader.read()
+
+    return m
+
+
+class _TextReader(object):
+    """Text format reader.
+
+    @ivar tok: the tokenizer
+    @type tok: dns.tokenizer.Tokenizer object
+    @ivar message: The message object being built
+    @type message: dns.message.Message object
+    @ivar updating: Is the message a dynamic update?
+    @type updating: bool
+    @ivar zone_rdclass: The class of the zone in messages which are
+    DNS dynamic updates.
+    @type zone_rdclass: int
+    @ivar last_name: The most recently read name when building a message object
+    from text format.
+    @type last_name: dns.name.Name object
+    """
+
+    def __init__(self, text, message):
+        self.message = message
+        self.tok = dns.tokenizer.Tokenizer(text)
+        self.last_name = None
+        self.zone_rdclass = dns.rdataclass.IN
+        self.updating = False
+
+    def _header_line(self, section):
+        """Process one line from the text format header section."""
+
+        token = self.tok.get()
+        what = token.value
+        if what == 'id':
+            self.message.id = self.tok.get_int()
+        elif what == 'flags':
+            while True:
+                token = self.tok.get()
+                if not token.is_identifier():
+                    self.tok.unget(token)
+                    break
+                self.message.flags = self.message.flags | \
+                                     dns.flags.from_text(token.value)
+            if dns.opcode.is_update(self.message.flags):
+                self.updating = True
+        elif what == 'edns':
+            self.message.edns = self.tok.get_int()
+            self.message.ednsflags = self.message.ednsflags | \
+                                     (self.message.edns << 16)
+        elif what == 'eflags':
+            if self.message.edns < 0:
+                self.message.edns = 0
+            while True:
+                token = self.tok.get()
+                if not token.is_identifier():
+                    self.tok.unget(token)
+                    break
+                self.message.ednsflags = self.message.ednsflags | \
+                              dns.flags.edns_from_text(token.value)
+        elif what == 'payload':
+            self.message.payload = self.tok.get_int()
+            if self.message.edns < 0:
+                self.message.edns = 0
+        elif what == 'opcode':
+            text = self.tok.get_string()
+            self.message.flags = self.message.flags | \
+                      dns.opcode.to_flags(dns.opcode.from_text(text))
+        elif what == 'rcode':
+            text = self.tok.get_string()
+            self.message.set_rcode(dns.rcode.from_text(text))
+        else:
+            raise UnknownHeaderField
+        self.tok.get_eol()
+
+    def _question_line(self, section):
+        """Process one line from the text format question section."""
+
+        token = self.tok.get(want_leading = True)
+        if not token.is_whitespace():
+            self.last_name = dns.name.from_text(token.value, None)
+        name = self.last_name
+        token = self.tok.get()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError
+        # Class
+        try:
+            rdclass = dns.rdataclass.from_text(token.value)
+            token = self.tok.get()
+            if not token.is_identifier():
+                raise dns.exception.SyntaxError
+        except dns.exception.SyntaxError:
+            raise dns.exception.SyntaxError
+        except:
+            rdclass = dns.rdataclass.IN
+        # Type
+        rdtype = dns.rdatatype.from_text(token.value)
+        self.message.find_rrset(self.message.question, name,
+                                rdclass, rdtype, create=True,
+                                force_unique=True)
+        if self.updating:
+            self.zone_rdclass = rdclass
+        self.tok.get_eol()
+
+    def _rr_line(self, section):
+        """Process one line from the text format answer, authority, or
+        additional data sections.
+        """
+
+        deleting = None
+        # Name
+        token = self.tok.get(want_leading = True)
+        if not token.is_whitespace():
+            self.last_name = dns.name.from_text(token.value, None)
+        name = self.last_name
+        token = self.tok.get()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError
+        # TTL
+        try:
+            ttl = int(token.value, 0)
+            token = self.tok.get()
+            if not token.is_identifier():
+                raise dns.exception.SyntaxError
+        except dns.exception.SyntaxError:
+            raise dns.exception.SyntaxError
+        except:
+            ttl = 0
+        # Class
+        try:
+            rdclass = dns.rdataclass.from_text(token.value)
+            token = self.tok.get()
+            if not token.is_identifier():
+                raise dns.exception.SyntaxError
+            if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
+                deleting = rdclass
+                rdclass = self.zone_rdclass
+        except dns.exception.SyntaxError:
+            raise dns.exception.SyntaxError
+        except:
+            rdclass = dns.rdataclass.IN
+        # Type
+        rdtype = dns.rdatatype.from_text(token.value)
+        token = self.tok.get()
+        if not token.is_eol_or_eof():
+            self.tok.unget(token)
+            rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
+            covers = rd.covers()
+        else:
+            rd = None
+            covers = dns.rdatatype.NONE
+        rrset = self.message.find_rrset(section, name,
+                                        rdclass, rdtype, covers,
+                                        deleting, True, self.updating)
+        if not rd is None:
+            rrset.add(rd, ttl)
+
+    def read(self):
+        """Read a text format DNS message and build a dns.message.Message
+        object."""
+
+        line_method = self._header_line
+        section = None
+        while 1:
+            token = self.tok.get(True, True)
+            if token.is_eol_or_eof():
+                break
+            if token.is_comment():
+                u = token.value.upper()
+                if u == 'HEADER':
+                    line_method = self._header_line
+                elif u == 'QUESTION' or u == 'ZONE':
+                    line_method = self._question_line
+                    section = self.message.question
+                elif u == 'ANSWER' or u == 'PREREQ':
+                    line_method = self._rr_line
+                    section = self.message.answer
+                elif u == 'AUTHORITY' or u == 'UPDATE':
+                    line_method = self._rr_line
+                    section = self.message.authority
+                elif u == 'ADDITIONAL':
+                    line_method = self._rr_line
+                    section = self.message.additional
+                self.tok.get_eol()
+                continue
+            self.tok.unget(token)
+            line_method(section)
+
+
+def from_text(text):
+    """Convert the text format message into a message object.
+
+    @param text: The text format message.
+    @type text: string
+    @raises UnknownHeaderField:
+    @raises dns.exception.SyntaxError:
+    @rtype: dns.message.Message object"""
+
+    # 'text' can also be a file, but we don't publish that fact
+    # since it's an implementation detail.  The official file
+    # interface is from_file().
+
+    m = Message()
+
+    reader = _TextReader(text, m)
+    reader.read()
+
+    return m
+
+def from_file(f):
+    """Read the next text format message from the specified file.
+
+    @param f: file or string.  If I{f} is a string, it is treated
+    as the name of a file to open.
+    @raises UnknownHeaderField:
+    @raises dns.exception.SyntaxError:
+    @rtype: dns.message.Message object"""
+
+    if sys.hexversion >= 0x02030000:
+        # allow Unicode filenames; turn on universal newline support
+        str_type = basestring
+        opts = 'rU'
+    else:
+        str_type = str
+        opts = 'r'
+    if isinstance(f, str_type):
+        f = file(f, opts)
+        want_close = True
+    else:
+        want_close = False
+
+    try:
+        m = from_text(f)
+    finally:
+        if want_close:
+            f.close()
+    return m
+
+def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
+               want_dnssec=False):
+    """Make a query message.
+
+    The query name, type, and class may all be specified either
+    as objects of the appropriate type, or as strings.
+
+    The query will have a randomly choosen query id, and its DNS flags
+    will be set to dns.flags.RD.
+
+    @param qname: The query name.
+    @type qname: dns.name.Name object or string
+    @param rdtype: The desired rdata type.
+    @type rdtype: int
+    @param rdclass: The desired rdata class; the default is class IN.
+    @type rdclass: int
+    @param use_edns: The EDNS level to use; the default is None (no EDNS).
+    See the description of dns.message.Message.use_edns() for the possible
+    values for use_edns and their meanings.
+    @type use_edns: int or bool or None
+    @param want_dnssec: Should the query indicate that DNSSEC is desired?
+    @type want_dnssec: bool
+    @rtype: dns.message.Message object"""
+
+    if isinstance(qname, (str, unicode)):
+        qname = dns.name.from_text(qname)
+    if isinstance(rdtype, str):
+        rdtype = dns.rdatatype.from_text(rdtype)
+    if isinstance(rdclass, str):
+        rdclass = dns.rdataclass.from_text(rdclass)
+    m = Message()
+    m.flags |= dns.flags.RD
+    m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
+                 force_unique=True)
+    m.use_edns(use_edns)
+    m.want_dnssec(want_dnssec)
+    return m
+
+def make_response(query, recursion_available=False, our_payload=8192):
+    """Make a message which is a response for the specified query.
+    The message returned is really a response skeleton; it has all
+    of the infrastructure required of a response, but none of the
+    content.
+
+    The response's question section is a shallow copy of the query's
+    question section, so the query's question RRsets should not be
+    changed.
+
+    @param query: the query to respond to
+    @type query: dns.message.Message object
+    @param recursion_available: should RA be set in the response?
+    @type recursion_available: bool
+    @param our_payload: payload size to advertise in EDNS responses; default
+    is 8192.
+    @type our_payload: int
+    @rtype: dns.message.Message object"""
+
+    if query.flags & dns.flags.QR:
+        raise dns.exception.FormError('specified query message is not a query')
+    response = dns.message.Message(query.id)
+    response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
+    if recursion_available:
+        response.flags |= dns.flags.RA
+    response.set_opcode(query.opcode())
+    response.question = list(query.question)
+    if query.edns >= 0:
+        response.use_edns(0, 0, our_payload, query.payload)
+    if not query.keyname is None:
+        response.keyname = query.keyname
+        response.keyring = query.keyring
+        response.request_mac = query.mac
+    return response
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/name.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/name.py
new file mode 100644
index 0000000..b54aa19
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/name.py
@@ -0,0 +1,700 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Names.
+
+@var root: The DNS root name.
+@type root: dns.name.Name object
+@var empty: The empty DNS name.
+@type empty: dns.name.Name object
+"""
+
+import cStringIO
+import struct
+import sys
+
+if sys.hexversion >= 0x02030000:
+    import encodings.idna
+
+import dns.exception
+
+NAMERELN_NONE = 0
+NAMERELN_SUPERDOMAIN = 1
+NAMERELN_SUBDOMAIN = 2
+NAMERELN_EQUAL = 3
+NAMERELN_COMMONANCESTOR = 4
+
+class EmptyLabel(dns.exception.SyntaxError):
+    """Raised if a label is empty."""
+    pass
+
+class BadEscape(dns.exception.SyntaxError):
+    """Raised if an escaped code in a text format name is invalid."""
+    pass
+
+class BadPointer(dns.exception.FormError):
+    """Raised if a compression pointer points forward instead of backward."""
+    pass
+
+class BadLabelType(dns.exception.FormError):
+    """Raised if the label type of a wire format name is unknown."""
+    pass
+
+class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
+    """Raised if an attempt is made to convert a non-absolute name to
+    wire when there is also a non-absolute (or missing) origin."""
+    pass
+
+class NameTooLong(dns.exception.FormError):
+    """Raised if a name is > 255 octets long."""
+    pass
+
+class LabelTooLong(dns.exception.SyntaxError):
+    """Raised if a label is > 63 octets long."""
+    pass
+
+class AbsoluteConcatenation(dns.exception.DNSException):
+    """Raised if an attempt is made to append anything other than the
+    empty name to an absolute name."""
+    pass
+
+class NoParent(dns.exception.DNSException):
+    """Raised if an attempt is made to get the parent of the root name
+    or the empty name."""
+    pass
+
+_escaped = {
+    '"' : True,
+    '(' : True,
+    ')' : True,
+    '.' : True,
+    ';' : True,
+    '\\' : True,
+    '@' : True,
+    '$' : True
+    }
+
+def _escapify(label):
+    """Escape the characters in label which need it.
+    @returns: the escaped string
+    @rtype: string"""
+    text = ''
+    for c in label:
+        if c in _escaped:
+            text += '\\' + c
+        elif ord(c) > 0x20 and ord(c) < 0x7F:
+            text += c
+        else:
+            text += '\\%03d' % ord(c)
+    return text
+
+def _validate_labels(labels):
+    """Check for empty labels in the middle of a label sequence,
+    labels that are too long, and for too many labels.
+    @raises NameTooLong: the name as a whole is too long
+    @raises LabelTooLong: an individual label is too long
+    @raises EmptyLabel: a label is empty (i.e. the root label) and appears
+    in a position other than the end of the label sequence"""
+
+    l = len(labels)
+    total = 0
+    i = -1
+    j = 0
+    for label in labels:
+        ll = len(label)
+        total += ll + 1
+        if ll > 63:
+            raise LabelTooLong
+        if i < 0 and label == '':
+            i = j
+        j += 1
+    if total > 255:
+        raise NameTooLong
+    if i >= 0 and i != l - 1:
+        raise EmptyLabel
+
+class Name(object):
+    """A DNS name.
+
+    The dns.name.Name class represents a DNS name as a tuple of labels.
+    Instances of the class are immutable.
+
+    @ivar labels: The tuple of labels in the name. Each label is a string of
+    up to 63 octets."""
+
+    __slots__ = ['labels']
+
+    def __init__(self, labels):
+        """Initialize a domain name from a list of labels.
+        @param labels: the labels
+        @type labels: any iterable whose values are strings
+        """
+
+        super(Name, self).__setattr__('labels', tuple(labels))
+        _validate_labels(self.labels)
+
+    def __setattr__(self, name, value):
+        raise TypeError("object doesn't support attribute assignment")
+
+    def is_absolute(self):
+        """Is the most significant label of this name the root label?
+        @rtype: bool
+        """
+
+        return len(self.labels) > 0 and self.labels[-1] == ''
+
+    def is_wild(self):
+        """Is this name wild?  (I.e. Is the least significant label '*'?)
+        @rtype: bool
+        """
+
+        return len(self.labels) > 0 and self.labels[0] == '*'
+
+    def __hash__(self):
+        """Return a case-insensitive hash of the name.
+        @rtype: int
+        """
+
+        h = 0L
+        for label in self.labels:
+            for c in label:
+                h += ( h << 3 ) + ord(c.lower())
+        return int(h % sys.maxint)
+
+    def fullcompare(self, other):
+        """Compare two names, returning a 3-tuple (relation, order, nlabels).
+
+        I{relation} describes the relation ship between the names,
+        and is one of: dns.name.NAMERELN_NONE,
+        dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
+        dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
+
+        I{order} is < 0 if self < other, > 0 if self > other, and ==
+        0 if self == other.  A relative name is always less than an
+        absolute name.  If both names have the same relativity, then
+        the DNSSEC order relation is used to order them.
+
+        I{nlabels} is the number of significant labels that the two names
+        have in common.
+        """
+
+        sabs = self.is_absolute()
+        oabs = other.is_absolute()
+        if sabs != oabs:
+            if sabs:
+                return (NAMERELN_NONE, 1, 0)
+            else:
+                return (NAMERELN_NONE, -1, 0)
+        l1 = len(self.labels)
+        l2 = len(other.labels)
+        ldiff = l1 - l2
+        if ldiff < 0:
+            l = l1
+        else:
+            l = l2
+
+        order = 0
+        nlabels = 0
+        namereln = NAMERELN_NONE
+        while l > 0:
+            l -= 1
+            l1 -= 1
+            l2 -= 1
+            label1 = self.labels[l1].lower()
+            label2 = other.labels[l2].lower()
+            if label1 < label2:
+                order = -1
+                if nlabels > 0:
+                    namereln = NAMERELN_COMMONANCESTOR
+                return (namereln, order, nlabels)
+            elif label1 > label2:
+                order = 1
+                if nlabels > 0:
+                    namereln = NAMERELN_COMMONANCESTOR
+                return (namereln, order, nlabels)
+            nlabels += 1
+        order = ldiff
+        if ldiff < 0:
+            namereln = NAMERELN_SUPERDOMAIN
+        elif ldiff > 0:
+            namereln = NAMERELN_SUBDOMAIN
+        else:
+            namereln = NAMERELN_EQUAL
+        return (namereln, order, nlabels)
+
+    def is_subdomain(self, other):
+        """Is self a subdomain of other?
+
+        The notion of subdomain includes equality.
+        @rtype: bool
+        """
+
+        (nr, o, nl) = self.fullcompare(other)
+        if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
+            return True
+        return False
+
+    def is_superdomain(self, other):
+        """Is self a superdomain of other?
+
+        The notion of subdomain includes equality.
+        @rtype: bool
+        """
+
+        (nr, o, nl) = self.fullcompare(other)
+        if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
+            return True
+        return False
+
+    def canonicalize(self):
+        """Return a name which is equal to the current name, but is in
+        DNSSEC canonical form.
+        @rtype: dns.name.Name object
+        """
+
+        return Name([x.lower() for x in self.labels])
+
+    def __eq__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] == 0
+        else:
+            return False
+
+    def __ne__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] != 0
+        else:
+            return True
+
+    def __lt__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] < 0
+        else:
+            return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] <= 0
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] >= 0
+        else:
+            return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, Name):
+            return self.fullcompare(other)[1] > 0
+        else:
+            return NotImplemented
+
+    def __repr__(self):
+        return '<DNS name ' + self.__str__() + '>'
+
+    def __str__(self):
+        return self.to_text(False)
+
+    def to_text(self, omit_final_dot = False):
+        """Convert name to text format.
+        @param omit_final_dot: If True, don't emit the final dot (denoting the
+        root label) for absolute names.  The default is False.
+        @rtype: string
+        """
+
+        if len(self.labels) == 0:
+            return '@'
+        if len(self.labels) == 1 and self.labels[0] == '':
+            return '.'
+        if omit_final_dot and self.is_absolute():
+            l = self.labels[:-1]
+        else:
+            l = self.labels
+        s = '.'.join(map(_escapify, l))
+        return s
+
+    def to_unicode(self, omit_final_dot = False):
+        """Convert name to Unicode text format.
+
+        IDN ACE lables are converted to Unicode.
+
+        @param omit_final_dot: If True, don't emit the final dot (denoting the
+        root label) for absolute names.  The default is False.
+        @rtype: string
+        """
+
+        if len(self.labels) == 0:
+            return u'@'
+        if len(self.labels) == 1 and self.labels[0] == '':
+            return u'.'
+        if omit_final_dot and self.is_absolute():
+            l = self.labels[:-1]
+        else:
+            l = self.labels
+        s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l])
+        return s
+
+    def to_digestable(self, origin=None):
+        """Convert name to a format suitable for digesting in hashes.
+
+        The name is canonicalized and converted to uncompressed wire format.
+
+        @param origin: If the name is relative and origin is not None, then
+        origin will be appended to it.
+        @type origin: dns.name.Name object
+        @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+        absolute.  If self is a relative name, then an origin must be supplied;
+        if it is missing, then this exception is raised
+        @rtype: string
+        """
+
+        if not self.is_absolute():
+            if origin is None or not origin.is_absolute():
+                raise NeedAbsoluteNameOrOrigin
+            labels = list(self.labels)
+            labels.extend(list(origin.labels))
+        else:
+            labels = self.labels
+        dlabels = ["%s%s" % (chr(len(x)), x.lower()) for x in labels]
+        return ''.join(dlabels)
+
+    def to_wire(self, file = None, compress = None, origin = None):
+        """Convert name to wire format, possibly compressing it.
+
+        @param file: the file where the name is emitted (typically
+        a cStringIO file).  If None, a string containing the wire name
+        will be returned.
+        @type file: file or None
+        @param compress: The compression table.  If None (the default) names
+        will not be compressed.
+        @type compress: dict
+        @param origin: If the name is relative and origin is not None, then
+        origin will be appended to it.
+        @type origin: dns.name.Name object
+        @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+        absolute.  If self is a relative name, then an origin must be supplied;
+        if it is missing, then this exception is raised
+        """
+
+        if file is None:
+            file = cStringIO.StringIO()
+            want_return = True
+        else:
+            want_return = False
+
+        if not self.is_absolute():
+            if origin is None or not origin.is_absolute():
+                raise NeedAbsoluteNameOrOrigin
+            labels = list(self.labels)
+            labels.extend(list(origin.labels))
+        else:
+            labels = self.labels
+        i = 0
+        for label in labels:
+            n = Name(labels[i:])
+            i += 1
+            if not compress is None:
+                pos = compress.get(n)
+            else:
+                pos = None
+            if not pos is None:
+                value = 0xc000 + pos
+                s = struct.pack('!H', value)
+                file.write(s)
+                break
+            else:
+                if not compress is None and len(n) > 1:
+                    pos = file.tell()
+                    if pos < 0xc000:
+                        compress[n] = pos
+                l = len(label)
+                file.write(chr(l))
+                if l > 0:
+                    file.write(label)
+        if want_return:
+            return file.getvalue()
+
+    def __len__(self):
+        """The length of the name (in labels).
+        @rtype: int
+        """
+
+        return len(self.labels)
+
+    def __getitem__(self, index):
+        return self.labels[index]
+
+    def __getslice__(self, start, stop):
+        return self.labels[start:stop]
+
+    def __add__(self, other):
+        return self.concatenate(other)
+
+    def __sub__(self, other):
+        return self.relativize(other)
+
+    def split(self, depth):
+        """Split a name into a prefix and suffix at depth.
+
+        @param depth: the number of labels in the suffix
+        @type depth: int
+        @raises ValueError: the depth was not >= 0 and <= the length of the
+        name.
+        @returns: the tuple (prefix, suffix)
+        @rtype: tuple
+        """
+
+        l = len(self.labels)
+        if depth == 0:
+            return (self, dns.name.empty)
+        elif depth == l:
+            return (dns.name.empty, self)
+        elif depth < 0 or depth > l:
+            raise ValueError('depth must be >= 0 and <= the length of the name')
+        return (Name(self[: -depth]), Name(self[-depth :]))
+
+    def concatenate(self, other):
+        """Return a new name which is the concatenation of self and other.
+        @rtype: dns.name.Name object
+        @raises AbsoluteConcatenation: self is absolute and other is
+        not the empty name
+        """
+
+        if self.is_absolute() and len(other) > 0:
+            raise AbsoluteConcatenation
+        labels = list(self.labels)
+        labels.extend(list(other.labels))
+        return Name(labels)
+
+    def relativize(self, origin):
+        """If self is a subdomain of origin, return a new name which is self
+        relative to origin.  Otherwise return self.
+        @rtype: dns.name.Name object
+        """
+
+        if not origin is None and self.is_subdomain(origin):
+            return Name(self[: -len(origin)])
+        else:
+            return self
+
+    def derelativize(self, origin):
+        """If self is a relative name, return a new name which is the
+        concatenation of self and origin.  Otherwise return self.
+        @rtype: dns.name.Name object
+        """
+
+        if not self.is_absolute():
+            return self.concatenate(origin)
+        else:
+            return self
+
+    def choose_relativity(self, origin=None, relativize=True):
+        """Return a name with the relativity desired by the caller.  If
+        origin is None, then self is returned.  Otherwise, if
+        relativize is true the name is relativized, and if relativize is
+        false the name is derelativized.
+        @rtype: dns.name.Name object
+        """
+
+        if origin:
+            if relativize:
+                return self.relativize(origin)
+            else:
+                return self.derelativize(origin)
+        else:
+            return self
+
+    def parent(self):
+        """Return the parent of the name.
+        @rtype: dns.name.Name object
+        @raises NoParent: the name is either the root name or the empty name,
+        and thus has no parent.
+        """
+        if self == root or self == empty:
+            raise NoParent
+        return Name(self.labels[1:])
+
+root = Name([''])
+empty = Name([])
+
+def from_unicode(text, origin = root):
+    """Convert unicode text into a Name object.
+
+    Lables are encoded in IDN ACE form.
+
+    @rtype: dns.name.Name object
+    """
+
+    if not isinstance(text, unicode):
+        raise ValueError("input to from_unicode() must be a unicode string")
+    if not (origin is None or isinstance(origin, Name)):
+        raise ValueError("origin must be a Name or None")
+    labels = []
+    label = u''
+    escaping = False
+    edigits = 0
+    total = 0
+    if text == u'@':
+        text = u''
+    if text:
+        if text == u'.':
+            return Name([''])	# no Unicode "u" on this constant!
+        for c in text:
+            if escaping:
+                if edigits == 0:
+                    if c.isdigit():
+                        total = int(c)
+                        edigits += 1
+                    else:
+                        label += c
+                        escaping = False
+                else:
+                    if not c.isdigit():
+                        raise BadEscape
+                    total *= 10
+                    total += int(c)
+                    edigits += 1
+                    if edigits == 3:
+                        escaping = False
+                        label += chr(total)
+            elif c == u'.' or c == u'\u3002' or \
+                 c == u'\uff0e' or c == u'\uff61':
+                if len(label) == 0:
+                    raise EmptyLabel
+                labels.append(encodings.idna.ToASCII(label))
+                label = u''
+            elif c == u'\\':
+                escaping = True
+                edigits = 0
+                total = 0
+            else:
+                label += c
+        if escaping:
+            raise BadEscape
+        if len(label) > 0:
+            labels.append(encodings.idna.ToASCII(label))
+        else:
+            labels.append('')
+    if (len(labels) == 0 or labels[-1] != '') and not origin is None:
+        labels.extend(list(origin.labels))
+    return Name(labels)
+
+def from_text(text, origin = root):
+    """Convert text into a Name object.
+    @rtype: dns.name.Name object
+    """
+
+    if not isinstance(text, str):
+        if isinstance(text, unicode) and sys.hexversion >= 0x02030000:
+            return from_unicode(text, origin)
+        else:
+            raise ValueError("input to from_text() must be a string")
+    if not (origin is None or isinstance(origin, Name)):
+        raise ValueError("origin must be a Name or None")
+    labels = []
+    label = ''
+    escaping = False
+    edigits = 0
+    total = 0
+    if text == '@':
+        text = ''
+    if text:
+        if text == '.':
+            return Name([''])
+        for c in text:
+            if escaping:
+                if edigits == 0:
+                    if c.isdigit():
+                        total = int(c)
+                        edigits += 1
+                    else:
+                        label += c
+                        escaping = False
+                else:
+                    if not c.isdigit():
+                        raise BadEscape
+                    total *= 10
+                    total += int(c)
+                    edigits += 1
+                    if edigits == 3:
+                        escaping = False
+                        label += chr(total)
+            elif c == '.':
+                if len(label) == 0:
+                    raise EmptyLabel
+                labels.append(label)
+                label = ''
+            elif c == '\\':
+                escaping = True
+                edigits = 0
+                total = 0
+            else:
+                label += c
+        if escaping:
+            raise BadEscape
+        if len(label) > 0:
+            labels.append(label)
+        else:
+            labels.append('')
+    if (len(labels) == 0 or labels[-1] != '') and not origin is None:
+        labels.extend(list(origin.labels))
+    return Name(labels)
+
+def from_wire(message, current):
+    """Convert possibly compressed wire format into a Name.
+    @param message: the entire DNS message
+    @type message: string
+    @param current: the offset of the beginning of the name from the start
+    of the message
+    @type current: int
+    @raises dns.name.BadPointer: a compression pointer did not point backwards
+    in the message
+    @raises dns.name.BadLabelType: an invalid label type was encountered.
+    @returns: a tuple consisting of the name that was read and the number
+    of bytes of the wire format message which were consumed reading it
+    @rtype: (dns.name.Name object, int) tuple
+    """
+
+    if not isinstance(message, str):
+        raise ValueError("input to from_wire() must be a byte string")
+    labels = []
+    biggest_pointer = current
+    hops = 0
+    count = ord(message[current])
+    current += 1
+    cused = 1
+    while count != 0:
+        if count < 64:
+            labels.append(message[current : current + count])
+            current += count
+            if hops == 0:
+                cused += count
+        elif count >= 192:
+            current = (count & 0x3f) * 256 + ord(message[current])
+            if hops == 0:
+                cused += 1
+            if current >= biggest_pointer:
+                raise BadPointer
+            biggest_pointer = current
+            hops += 1
+        else:
+            raise BadLabelType
+        count = ord(message[current])
+        current += 1
+        if hops == 0:
+            cused += 1
+    labels.append('')
+    return (Name(labels), cused)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/namedict.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/namedict.py
new file mode 100644
index 0000000..54afb77
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/namedict.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS name dictionary"""
+
+import dns.name
+
+class NameDict(dict):
+
+    """A dictionary whose keys are dns.name.Name objects.
+    @ivar max_depth: the maximum depth of the keys that have ever been
+    added to the dictionary.
+    @type max_depth: int
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(NameDict, self).__init__(*args, **kwargs)
+        self.max_depth = 0
+
+    def __setitem__(self, key, value):
+        if not isinstance(key, dns.name.Name):
+            raise ValueError('NameDict key must be a name')
+        depth = len(key)
+        if depth > self.max_depth:
+            self.max_depth = depth
+        super(NameDict, self).__setitem__(key, value)
+
+    def get_deepest_match(self, name):
+        """Find the deepest match to I{name} in the dictionary.
+
+        The deepest match is the longest name in the dictionary which is
+        a superdomain of I{name}.
+
+        @param name: the name
+        @type name: dns.name.Name object
+        @rtype: (key, value) tuple
+        """
+
+        depth = len(name)
+        if depth > self.max_depth:
+            depth = self.max_depth
+        for i in xrange(-depth, 0):
+            n = dns.name.Name(name[i:])
+            if self.has_key(n):
+                return (n, self[n])
+        v = self[dns.name.empty]
+        return (dns.name.empty, v)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/node.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/node.py
new file mode 100644
index 0000000..07fff92
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/node.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS nodes.  A node is a set of rdatasets."""
+
+import StringIO
+
+import dns.rdataset
+import dns.rdatatype
+import dns.renderer
+
+class Node(object):
+    """A DNS node.
+    
+    A node is a set of rdatasets
+
+    @ivar rdatasets: the node's rdatasets
+    @type rdatasets: list of dns.rdataset.Rdataset objects"""
+
+    __slots__ = ['rdatasets']
+    
+    def __init__(self):
+        """Initialize a DNS node.
+        """
+        
+        self.rdatasets = [];
+
+    def to_text(self, name, **kw):
+        """Convert a node to text format.
+
+        Each rdataset at the node is printed.  Any keyword arguments
+        to this method are passed on to the rdataset's to_text() method.
+        @param name: the owner name of the rdatasets
+        @type name: dns.name.Name object
+        @rtype: string
+        """
+        
+        s = StringIO.StringIO()
+        for rds in self.rdatasets:
+            print >> s, rds.to_text(name, **kw)
+        return s.getvalue()[:-1]
+
+    def __repr__(self):
+        return '<DNS node ' + str(id(self)) + '>'
+    
+    def __eq__(self, other):
+        """Two nodes are equal if they have the same rdatasets.
+
+        @rtype: bool
+        """
+        #
+        # This is inefficient.  Good thing we don't need to do it much.
+        #
+        for rd in self.rdatasets:
+            if rd not in other.rdatasets:
+                return False
+        for rd in other.rdatasets:
+            if rd not in self.rdatasets:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+        
+    def __len__(self):
+        return len(self.rdatasets)
+
+    def __iter__(self):
+        return iter(self.rdatasets)
+
+    def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+                      create=False):
+        """Find an rdataset matching the specified properties in the
+        current node.
+
+        @param rdclass: The class of the rdataset
+        @type rdclass: int
+        @param rdtype: The type of the rdataset
+        @type rdtype: int
+        @param covers: The covered type.  Usually this value is
+        dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+        dns.rdatatype.RRSIG, then the covers value will be the rdata
+        type the SIG/RRSIG covers.  The library treats the SIG and RRSIG
+        types as if they were a family of
+        types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).  This makes RRSIGs much
+        easier to work with than if RRSIGs covering different rdata
+        types were aggregated into a single RRSIG rdataset.
+        @type covers: int
+        @param create: If True, create the rdataset if it is not found.
+        @type create: bool
+        @raises KeyError: An rdataset of the desired type and class does
+        not exist and I{create} is not True.
+        @rtype: dns.rdataset.Rdataset object
+        """
+
+        for rds in self.rdatasets:
+            if rds.match(rdclass, rdtype, covers):
+                return rds
+        if not create:
+            raise KeyError
+        rds = dns.rdataset.Rdataset(rdclass, rdtype)
+        self.rdatasets.append(rds)
+        return rds
+
+    def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+                     create=False):
+        """Get an rdataset matching the specified properties in the
+        current node.
+
+        None is returned if an rdataset of the specified type and
+        class does not exist and I{create} is not True.
+
+        @param rdclass: The class of the rdataset
+        @type rdclass: int
+        @param rdtype: The type of the rdataset
+        @type rdtype: int
+        @param covers: The covered type.
+        @type covers: int
+        @param create: If True, create the rdataset if it is not found.
+        @type create: bool
+        @rtype: dns.rdataset.Rdataset object or None
+        """
+
+        try:
+            rds = self.find_rdataset(rdclass, rdtype, covers, create)
+        except KeyError:
+            rds = None
+        return rds
+
+    def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+        """Delete the rdataset matching the specified properties in the
+        current node.
+
+        If a matching rdataset does not exist, it is not an error.
+
+        @param rdclass: The class of the rdataset
+        @type rdclass: int
+        @param rdtype: The type of the rdataset
+        @type rdtype: int
+        @param covers: The covered type.
+        @type covers: int
+        """
+
+        rds = self.get_rdataset(rdclass, rdtype, covers)
+        if not rds is None:
+            self.rdatasets.remove(rds)
+
+    def replace_rdataset(self, replacement):
+        """Replace an rdataset.
+        
+        It is not an error if there is no rdataset matching I{replacement}.
+
+        Ownership of the I{replacement} object is transferred to the node;
+        in other words, this method does not store a copy of I{replacement}
+        at the node, it stores I{replacement} itself.
+        """
+
+        self.delete_rdataset(replacement.rdclass, replacement.rdtype,
+                             replacement.covers)
+        self.rdatasets.append(replacement)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/opcode.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/opcode.py
new file mode 100644
index 0000000..705bd09
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/opcode.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Opcodes."""
+
+import dns.exception
+
+QUERY = 0
+IQUERY = 1
+STATUS = 2
+NOTIFY = 4
+UPDATE = 5
+
+_by_text = {
+    'QUERY' : QUERY,
+    'IQUERY' : IQUERY,
+    'STATUS' : STATUS,
+    'NOTIFY' : NOTIFY,
+    'UPDATE' : UPDATE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+class UnknownOpcode(dns.exception.DNSException):
+    """Raised if an opcode is unknown."""
+    pass
+
+def from_text(text):
+    """Convert text into an opcode.
+
+    @param text: the textual opcode
+    @type text: string
+    @raises UnknownOpcode: the opcode is unknown
+    @rtype: int
+    """
+
+    if text.isdigit():
+        value = int(text)
+        if value >= 0 and value <= 15:
+            return value
+    value = _by_text.get(text.upper())
+    if value is None:
+        raise UnknownOpcode
+    return value
+
+def from_flags(flags):
+    """Extract an opcode from DNS message flags.
+
+    @param flags: int
+    @rtype: int
+    """
+    
+    return (flags & 0x7800) >> 11
+
+def to_flags(value):
+    """Convert an opcode to a value suitable for ORing into DNS message
+    flags.
+    @rtype: int
+    """
+    
+    return (value << 11) & 0x7800
+    
+def to_text(value):
+    """Convert an opcode to text.
+
+    @param value: the opcdoe
+    @type value: int
+    @raises UnknownOpcode: the opcode is unknown
+    @rtype: string
+    """
+    
+    text = _by_value.get(value)
+    if text is None:
+        text = str(value)
+    return text
+
+def is_update(flags):
+    """True if the opcode in flags is UPDATE.
+
+    @param flags: DNS flags
+    @type flags: int
+    @rtype: bool
+    """
+    
+    if (from_flags(flags) == UPDATE):
+        return True
+    return False
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/query.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/query.py
new file mode 100644
index 0000000..c023b14
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/query.py
@@ -0,0 +1,428 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+from __future__ import generators
+
+import errno
+import select
+import socket
+import struct
+import sys
+import time
+
+import dns.exception
+import dns.inet
+import dns.name
+import dns.message
+import dns.rdataclass
+import dns.rdatatype
+
+class UnexpectedSource(dns.exception.DNSException):
+    """Raised if a query response comes from an unexpected address or port."""
+    pass
+
+class BadResponse(dns.exception.FormError):
+    """Raised if a query response does not respond to the question asked."""
+    pass
+
+def _compute_expiration(timeout):
+    if timeout is None:
+        return None
+    else:
+        return time.time() + timeout
+
+def _wait_for(ir, iw, ix, expiration):
+    done = False
+    while not done:
+        if expiration is None:
+            timeout = None
+        else:
+            timeout = expiration - time.time()
+            if timeout <= 0.0:
+                raise dns.exception.Timeout
+        try:
+            if timeout is None:
+                (r, w, x) = select.select(ir, iw, ix)
+            else:
+                (r, w, x) = select.select(ir, iw, ix, timeout)
+        except select.error, e:
+            if e.args[0] != errno.EINTR:
+                raise e
+        done = True
+        if len(r) == 0 and len(w) == 0 and len(x) == 0:
+            raise dns.exception.Timeout
+
+def _wait_for_readable(s, expiration):
+    _wait_for([s], [], [s], expiration)
+
+def _wait_for_writable(s, expiration):
+    _wait_for([], [s], [s], expiration)
+
+def _addresses_equal(af, a1, a2):
+    # Convert the first value of the tuple, which is a textual format
+    # address into binary form, so that we are not confused by different
+    # textual representations of the same address
+    n1 = dns.inet.inet_pton(af, a1[0])
+    n2 = dns.inet.inet_pton(af, a2[0])
+    return n1 == n2 and a1[1:] == a2[1:]
+
+def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+        ignore_unexpected=False, one_rr_per_rrset=False):
+    """Return the response obtained after sending a query via UDP.
+
+    @param q: the query
+    @type q: dns.message.Message
+    @param where: where to send the message
+    @type where: string containing an IPv4 or IPv6 address
+    @param timeout: The number of seconds to wait before the query times out.
+    If None, the default, wait forever.
+    @type timeout: float
+    @param port: The port to which to send the message.  The default is 53.
+    @type port: int
+    @param af: the address family to use.  The default is None, which
+    causes the address family to use to be inferred from the form of of where.
+    If the inference attempt fails, AF_INET is used.
+    @type af: int
+    @rtype: dns.message.Message object
+    @param source: source address.  The default is the IPv4 wildcard address.
+    @type source: string
+    @param source_port: The port from which to send the message.
+    The default is 0.
+    @type source_port: int
+    @param ignore_unexpected: If True, ignore responses from unexpected
+    sources.  The default is False.
+    @type ignore_unexpected: bool
+    @param one_rr_per_rrset: Put each RR into its own RRset
+    @type one_rr_per_rrset: bool
+    """
+
+    wire = q.to_wire()
+    if af is None:
+        try:
+            af = dns.inet.af_for_address(where)
+        except:
+            af = dns.inet.AF_INET
+    if af == dns.inet.AF_INET:
+        destination = (where, port)
+        if source is not None:
+            source = (source, source_port)
+    elif af == dns.inet.AF_INET6:
+        destination = (where, port, 0, 0)
+        if source is not None:
+            source = (source, source_port, 0, 0)
+    s = socket.socket(af, socket.SOCK_DGRAM, 0)
+    try:
+        expiration = _compute_expiration(timeout)
+        s.setblocking(0)
+        if source is not None:
+            s.bind(source)
+        _wait_for_writable(s, expiration)
+        s.sendto(wire, destination)
+        while 1:
+            _wait_for_readable(s, expiration)
+            (wire, from_address) = s.recvfrom(65535)
+            if _addresses_equal(af, from_address, destination) or \
+                    (dns.inet.is_multicast(where) and \
+                         from_address[1:] == destination[1:]):
+                break
+            if not ignore_unexpected:
+                raise UnexpectedSource('got a response from '
+                                       '%s instead of %s' % (from_address,
+                                                             destination))
+    finally:
+        s.close()
+    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+                              one_rr_per_rrset=one_rr_per_rrset)
+    if not q.is_response(r):
+        raise BadResponse
+    return r
+
+def _net_read(sock, count, expiration):
+    """Read the specified number of bytes from sock.  Keep trying until we
+    either get the desired amount, or we hit EOF.
+    A Timeout exception will be raised if the operation is not completed
+    by the expiration time.
+    """
+    s = ''
+    while count > 0:
+        _wait_for_readable(sock, expiration)
+        n = sock.recv(count)
+        if n == '':
+            raise EOFError
+        count = count - len(n)
+        s = s + n
+    return s
+
+def _net_write(sock, data, expiration):
+    """Write the specified data to the socket.
+    A Timeout exception will be raised if the operation is not completed
+    by the expiration time.
+    """
+    current = 0
+    l = len(data)
+    while current < l:
+        _wait_for_writable(sock, expiration)
+        current += sock.send(data[current:])
+
+def _connect(s, address):
+    try:
+        s.connect(address)
+    except socket.error:
+        (ty, v) = sys.exc_info()[:2]
+        if v[0] != errno.EINPROGRESS and \
+               v[0] != errno.EWOULDBLOCK and \
+               v[0] != errno.EALREADY:
+            raise v
+
+def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+        one_rr_per_rrset=False):
+    """Return the response obtained after sending a query via TCP.
+
+    @param q: the query
+    @type q: dns.message.Message object
+    @param where: where to send the message
+    @type where: string containing an IPv4 or IPv6 address
+    @param timeout: The number of seconds to wait before the query times out.
+    If None, the default, wait forever.
+    @type timeout: float
+    @param port: The port to which to send the message.  The default is 53.
+    @type port: int
+    @param af: the address family to use.  The default is None, which
+    causes the address family to use to be inferred from the form of of where.
+    If the inference attempt fails, AF_INET is used.
+    @type af: int
+    @rtype: dns.message.Message object
+    @param source: source address.  The default is the IPv4 wildcard address.
+    @type source: string
+    @param source_port: The port from which to send the message.
+    The default is 0.
+    @type source_port: int
+    @param one_rr_per_rrset: Put each RR into its own RRset
+    @type one_rr_per_rrset: bool
+    """
+
+    wire = q.to_wire()
+    if af is None:
+        try:
+            af = dns.inet.af_for_address(where)
+        except:
+            af = dns.inet.AF_INET
+    if af == dns.inet.AF_INET:
+        destination = (where, port)
+        if source is not None:
+            source = (source, source_port)
+    elif af == dns.inet.AF_INET6:
+        destination = (where, port, 0, 0)
+        if source is not None:
+            source = (source, source_port, 0, 0)
+    s = socket.socket(af, socket.SOCK_STREAM, 0)
+    try:
+        expiration = _compute_expiration(timeout)
+        s.setblocking(0)
+        if source is not None:
+            s.bind(source)
+        _connect(s, destination)
+
+        l = len(wire)
+
+        # copying the wire into tcpmsg is inefficient, but lets us
+        # avoid writev() or doing a short write that would get pushed
+        # onto the net
+        tcpmsg = struct.pack("!H", l) + wire
+        _net_write(s, tcpmsg, expiration)
+        ldata = _net_read(s, 2, expiration)
+        (l,) = struct.unpack("!H", ldata)
+        wire = _net_read(s, l, expiration)
+    finally:
+        s.close()
+    r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+                              one_rr_per_rrset=one_rr_per_rrset)
+    if not q.is_response(r):
+        raise BadResponse
+    return r
+
+def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
+        timeout=None, port=53, keyring=None, keyname=None, relativize=True,
+        af=None, lifetime=None, source=None, source_port=0, serial=0,
+        use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
+    """Return a generator for the responses to a zone transfer.
+
+    @param where: where to send the message
+    @type where: string containing an IPv4 or IPv6 address
+    @param zone: The name of the zone to transfer
+    @type zone: dns.name.Name object or string
+    @param rdtype: The type of zone transfer.  The default is
+    dns.rdatatype.AXFR.
+    @type rdtype: int or string
+    @param rdclass: The class of the zone transfer.  The default is
+    dns.rdatatype.IN.
+    @type rdclass: int or string
+    @param timeout: The number of seconds to wait for each response message.
+    If None, the default, wait forever.
+    @type timeout: float
+    @param port: The port to which to send the message.  The default is 53.
+    @type port: int
+    @param keyring: The TSIG keyring to use
+    @type keyring: dict
+    @param keyname: The name of the TSIG key to use
+    @type keyname: dns.name.Name object or string
+    @param relativize: If True, all names in the zone will be relativized to
+    the zone origin.  It is essential that the relativize setting matches
+    the one specified to dns.zone.from_xfr().
+    @type relativize: bool
+    @param af: the address family to use.  The default is None, which
+    causes the address family to use to be inferred from the form of of where.
+    If the inference attempt fails, AF_INET is used.
+    @type af: int
+    @param lifetime: The total number of seconds to spend doing the transfer.
+    If None, the default, then there is no limit on the time the transfer may
+    take.
+    @type lifetime: float
+    @rtype: generator of dns.message.Message objects.
+    @param source: source address.  The default is the IPv4 wildcard address.
+    @type source: string
+    @param source_port: The port from which to send the message.
+    The default is 0.
+    @type source_port: int
+    @param serial: The SOA serial number to use as the base for an IXFR diff
+    sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
+    @type serial: int
+    @param use_udp: Use UDP (only meaningful for IXFR)
+    @type use_udp: bool
+    @param keyalgorithm: The TSIG algorithm to use; defaults to
+    dns.tsig.default_algorithm
+    @type keyalgorithm: string
+    """
+
+    if isinstance(zone, (str, unicode)):
+        zone = dns.name.from_text(zone)
+    if isinstance(rdtype, str):
+        rdtype = dns.rdatatype.from_text(rdtype)
+    q = dns.message.make_query(zone, rdtype, rdclass)
+    if rdtype == dns.rdatatype.IXFR:
+        rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
+                                    '. . %u 0 0 0 0' % serial)
+        q.authority.append(rrset)
+    if not keyring is None:
+        q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+    wire = q.to_wire()
+    if af is None:
+        try:
+            af = dns.inet.af_for_address(where)
+        except:
+            af = dns.inet.AF_INET
+    if af == dns.inet.AF_INET:
+        destination = (where, port)
+        if source is not None:
+            source = (source, source_port)
+    elif af == dns.inet.AF_INET6:
+        destination = (where, port, 0, 0)
+        if source is not None:
+            source = (source, source_port, 0, 0)
+    if use_udp:
+        if rdtype != dns.rdatatype.IXFR:
+            raise ValueError('cannot do a UDP AXFR')
+        s = socket.socket(af, socket.SOCK_DGRAM, 0)
+    else:
+        s = socket.socket(af, socket.SOCK_STREAM, 0)
+    s.setblocking(0)
+    if source is not None:
+        s.bind(source)
+    expiration = _compute_expiration(lifetime)
+    _connect(s, destination)
+    l = len(wire)
+    if use_udp:
+        _wait_for_writable(s, expiration)
+        s.send(wire)
+    else:
+        tcpmsg = struct.pack("!H", l) + wire
+        _net_write(s, tcpmsg, expiration)
+    done = False
+    soa_rrset = None
+    soa_count = 0
+    if relativize:
+        origin = zone
+        oname = dns.name.empty
+    else:
+        origin = None
+        oname = zone
+    tsig_ctx = None
+    first = True
+    while not done:
+        mexpiration = _compute_expiration(timeout)
+        if mexpiration is None or mexpiration > expiration:
+            mexpiration = expiration
+        if use_udp:
+            _wait_for_readable(s, expiration)
+            (wire, from_address) = s.recvfrom(65535)
+        else:
+            ldata = _net_read(s, 2, mexpiration)
+            (l,) = struct.unpack("!H", ldata)
+            wire = _net_read(s, l, mexpiration)
+        r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+                                  xfr=True, origin=origin, tsig_ctx=tsig_ctx,
+                                  multi=True, first=first,
+                                  one_rr_per_rrset=(rdtype==dns.rdatatype.IXFR))
+        tsig_ctx = r.tsig_ctx
+        first = False
+        answer_index = 0
+        delete_mode = False
+        expecting_SOA = False
+        if soa_rrset is None:
+            if not r.answer or r.answer[0].name != oname:
+                raise dns.exception.FormError
+            rrset = r.answer[0]
+            if rrset.rdtype != dns.rdatatype.SOA:
+                raise dns.exception.FormError("first RRset is not an SOA")
+            answer_index = 1
+            soa_rrset = rrset.copy()
+            if rdtype == dns.rdatatype.IXFR:
+                if soa_rrset[0].serial == serial:
+                    #
+                    # We're already up-to-date.
+                    #
+                    done = True
+                else:
+                    expecting_SOA = True
+        #
+        # Process SOAs in the answer section (other than the initial
+        # SOA in the first message).
+        #
+        for rrset in r.answer[answer_index:]:
+            if done:
+                raise dns.exception.FormError("answers after final SOA")
+            if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
+                if expecting_SOA:
+                    if rrset[0].serial != serial:
+                        raise dns.exception.FormError("IXFR base serial mismatch")
+                    expecting_SOA = False
+                elif rdtype == dns.rdatatype.IXFR:
+                    delete_mode = not delete_mode
+                if rrset == soa_rrset and not delete_mode:
+                    done = True
+            elif expecting_SOA:
+                #
+                # We made an IXFR request and are expecting another
+                # SOA RR, but saw something else, so this must be an
+                # AXFR response.
+                #
+                rdtype = dns.rdatatype.AXFR
+                expecting_SOA = False
+        if done and q.keyring and not r.had_tsig:
+            raise dns.exception.FormError("missing TSIG")
+        yield r
+    s.close()
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rcode.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rcode.py
new file mode 100644
index 0000000..c055f2e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rcode.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Result Codes."""
+
+import dns.exception
+
+NOERROR = 0
+FORMERR = 1
+SERVFAIL = 2
+NXDOMAIN = 3
+NOTIMP = 4
+REFUSED = 5
+YXDOMAIN = 6
+YXRRSET = 7
+NXRRSET = 8
+NOTAUTH = 9
+NOTZONE = 10
+BADVERS = 16
+
+_by_text = {
+    'NOERROR' : NOERROR,
+    'FORMERR' : FORMERR,
+    'SERVFAIL' : SERVFAIL,
+    'NXDOMAIN' : NXDOMAIN,
+    'NOTIMP' : NOTIMP,
+    'REFUSED' : REFUSED,
+    'YXDOMAIN' : YXDOMAIN,
+    'YXRRSET' : YXRRSET,
+    'NXRRSET' : NXRRSET,
+    'NOTAUTH' : NOTAUTH,
+    'NOTZONE' : NOTZONE,
+    'BADVERS' : BADVERS
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be a true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+class UnknownRcode(dns.exception.DNSException):
+    """Raised if an rcode is unknown."""
+    pass
+
+def from_text(text):
+    """Convert text into an rcode.
+
+    @param text: the texual rcode
+    @type text: string
+    @raises UnknownRcode: the rcode is unknown
+    @rtype: int
+    """
+
+    if text.isdigit():
+        v = int(text)
+        if v >= 0 and v <= 4095:
+            return v
+    v = _by_text.get(text.upper())
+    if v is None:
+        raise UnknownRcode
+    return v
+
+def from_flags(flags, ednsflags):
+    """Return the rcode value encoded by flags and ednsflags.
+
+    @param flags: the DNS flags
+    @type flags: int
+    @param ednsflags: the EDNS flags
+    @type ednsflags: int
+    @raises ValueError: rcode is < 0 or > 4095
+    @rtype: int
+    """
+
+    value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
+    if value < 0 or value > 4095:
+        raise ValueError('rcode must be >= 0 and <= 4095')
+    return value
+
+def to_flags(value):
+    """Return a (flags, ednsflags) tuple which encodes the rcode.
+
+    @param value: the rcode
+    @type value: int
+    @raises ValueError: rcode is < 0 or > 4095
+    @rtype: (int, int) tuple
+    """
+
+    if value < 0 or value > 4095:
+        raise ValueError('rcode must be >= 0 and <= 4095')
+    v = value & 0xf
+    ev = long(value & 0xff0) << 20
+    return (v, ev)
+
+def to_text(value):
+    """Convert rcode into text.
+
+    @param value: the rcode
+    @type value: int
+    @rtype: string
+    """
+
+    text = _by_value.get(value)
+    if text is None:
+        text = str(value)
+    return text
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdata.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdata.py
new file mode 100644
index 0000000..ce02686
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdata.py
@@ -0,0 +1,456 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata.
+
+@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
+the module which implements that type.
+@type _rdata_modules: dict
+@var _module_prefix: The prefix to use when forming modules names.  The
+default is 'dns.rdtypes'.  Changing this value will break the library.
+@type _module_prefix: string
+@var _hex_chunk: At most this many octets that will be represented in each
+chunk of hexstring that _hexify() produces before whitespace occurs.
+@type _hex_chunk: int"""
+
+import cStringIO
+
+import dns.exception
+import dns.rdataclass
+import dns.rdatatype
+import dns.tokenizer
+
+_hex_chunksize = 32
+
+def _hexify(data, chunksize=None):
+    """Convert a binary string into its hex encoding, broken up into chunks
+    of I{chunksize} characters separated by a space.
+
+    @param data: the binary string
+    @type data: string
+    @param chunksize: the chunk size.  Default is L{dns.rdata._hex_chunksize}
+    @rtype: string
+    """
+
+    if chunksize is None:
+        chunksize = _hex_chunksize
+    hex = data.encode('hex_codec')
+    l = len(hex)
+    if l > chunksize:
+        chunks = []
+        i = 0
+        while i < l:
+            chunks.append(hex[i : i + chunksize])
+            i += chunksize
+        hex = ' '.join(chunks)
+    return hex
+
+_base64_chunksize = 32
+
+def _base64ify(data, chunksize=None):
+    """Convert a binary string into its base64 encoding, broken up into chunks
+    of I{chunksize} characters separated by a space.
+
+    @param data: the binary string
+    @type data: string
+    @param chunksize: the chunk size.  Default is
+    L{dns.rdata._base64_chunksize}
+    @rtype: string
+    """
+
+    if chunksize is None:
+        chunksize = _base64_chunksize
+    b64 = data.encode('base64_codec')
+    b64 = b64.replace('\n', '')
+    l = len(b64)
+    if l > chunksize:
+        chunks = []
+        i = 0
+        while i < l:
+            chunks.append(b64[i : i + chunksize])
+            i += chunksize
+        b64 = ' '.join(chunks)
+    return b64
+
+__escaped = {
+    '"' : True,
+    '\\' : True,
+    }
+
+def _escapify(qstring):
+    """Escape the characters in a quoted string which need it.
+
+    @param qstring: the string
+    @type qstring: string
+    @returns: the escaped string
+    @rtype: string
+    """
+
+    text = ''
+    for c in qstring:
+        if c in __escaped:
+            text += '\\' + c
+        elif ord(c) >= 0x20 and ord(c) < 0x7F:
+            text += c
+        else:
+            text += '\\%03d' % ord(c)
+    return text
+
+def _truncate_bitmap(what):
+    """Determine the index of greatest byte that isn't all zeros, and
+    return the bitmap that contains all the bytes less than that index.
+
+    @param what: a string of octets representing a bitmap.
+    @type what: string
+    @rtype: string
+    """
+
+    for i in xrange(len(what) - 1, -1, -1):
+        if what[i] != '\x00':
+            break
+    return ''.join(what[0 : i + 1])
+
+class Rdata(object):
+    """Base class for all DNS rdata types.
+    """
+
+    __slots__ = ['rdclass', 'rdtype']
+
+    def __init__(self, rdclass, rdtype):
+        """Initialize an rdata.
+        @param rdclass: The rdata class
+        @type rdclass: int
+        @param rdtype: The rdata type
+        @type rdtype: int
+        """
+
+        self.rdclass = rdclass
+        self.rdtype = rdtype
+
+    def covers(self):
+        """DNS SIG/RRSIG rdatas apply to a specific type; this type is
+        returned by the covers() function.  If the rdata type is not
+        SIG or RRSIG, dns.rdatatype.NONE is returned.  This is useful when
+        creating rdatasets, allowing the rdataset to contain only RRSIGs
+        of a particular type, e.g. RRSIG(NS).
+        @rtype: int
+        """
+
+        return dns.rdatatype.NONE
+
+    def extended_rdatatype(self):
+        """Return a 32-bit type value, the least significant 16 bits of
+        which are the ordinary DNS type, and the upper 16 bits of which are
+        the "covered" type, if any.
+        @rtype: int
+        """
+
+        return self.covers() << 16 | self.rdtype
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        """Convert an rdata to text format.
+        @rtype: string
+        """
+        raise NotImplementedError
+
+    def to_wire(self, file, compress = None, origin = None):
+        """Convert an rdata to wire format.
+        @rtype: string
+        """
+
+        raise NotImplementedError
+
+    def to_digestable(self, origin = None):
+        """Convert rdata to a format suitable for digesting in hashes.  This
+        is also the DNSSEC canonical form."""
+        f = cStringIO.StringIO()
+        self.to_wire(f, None, origin)
+        return f.getvalue()
+
+    def validate(self):
+        """Check that the current contents of the rdata's fields are
+        valid.  If you change an rdata by assigning to its fields,
+        it is a good idea to call validate() when you are done making
+        changes.
+        """
+        dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
+
+    def __repr__(self):
+        covers = self.covers()
+        if covers == dns.rdatatype.NONE:
+            ctext = ''
+        else:
+            ctext = '(' + dns.rdatatype.to_text(covers) + ')'
+        return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
+               dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
+               str(self) + '>'
+
+    def __str__(self):
+        return self.to_text()
+
+    def _cmp(self, other):
+        """Compare an rdata with another rdata of the same rdtype and
+        rdclass.  Return < 0 if self < other in the DNSSEC ordering,
+        0 if self == other, and > 0 if self > other.
+        """
+
+        raise NotImplementedError
+
+    def __eq__(self, other):
+        if not isinstance(other, Rdata):
+            return False
+        if self.rdclass != other.rdclass or \
+           self.rdtype != other.rdtype:
+            return False
+        return self._cmp(other) == 0
+
+    def __ne__(self, other):
+        if not isinstance(other, Rdata):
+            return True
+        if self.rdclass != other.rdclass or \
+           self.rdtype != other.rdtype:
+            return True
+        return self._cmp(other) != 0
+
+    def __lt__(self, other):
+        if not isinstance(other, Rdata) or \
+               self.rdclass != other.rdclass or \
+               self.rdtype != other.rdtype:
+            return NotImplemented
+        return self._cmp(other) < 0
+
+    def __le__(self, other):
+        if not isinstance(other, Rdata) or \
+               self.rdclass != other.rdclass or \
+               self.rdtype != other.rdtype:
+            return NotImplemented
+        return self._cmp(other) <= 0
+
+    def __ge__(self, other):
+        if not isinstance(other, Rdata) or \
+               self.rdclass != other.rdclass or \
+               self.rdtype != other.rdtype:
+            return NotImplemented
+        return self._cmp(other) >= 0
+
+    def __gt__(self, other):
+        if not isinstance(other, Rdata) or \
+               self.rdclass != other.rdclass or \
+               self.rdtype != other.rdtype:
+            return NotImplemented
+        return self._cmp(other) > 0
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        """Build an rdata object from text format.
+
+        @param rdclass: The rdata class
+        @type rdclass: int
+        @param rdtype: The rdata type
+        @type rdtype: int
+        @param tok: The tokenizer
+        @type tok: dns.tokenizer.Tokenizer
+        @param origin: The origin to use for relative names
+        @type origin: dns.name.Name
+        @param relativize: should names be relativized?
+        @type relativize: bool
+        @rtype: dns.rdata.Rdata instance
+        """
+
+        raise NotImplementedError
+
+    from_text = classmethod(from_text)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        """Build an rdata object from wire format
+
+        @param rdclass: The rdata class
+        @type rdclass: int
+        @param rdtype: The rdata type
+        @type rdtype: int
+        @param wire: The wire-format message
+        @type wire: string
+        @param current: The offet in wire of the beginning of the rdata.
+        @type current: int
+        @param rdlen: The length of the wire-format rdata
+        @type rdlen: int
+        @param origin: The origin to use for relative names
+        @type origin: dns.name.Name
+        @rtype: dns.rdata.Rdata instance
+        """
+
+        raise NotImplementedError
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        """Convert any domain names in the rdata to the specified
+        relativization.
+        """
+
+        pass
+
+
+class GenericRdata(Rdata):
+    """Generate Rdata Class
+
+    This class is used for rdata types for which we have no better
+    implementation.  It implements the DNS "unknown RRs" scheme.
+    """
+
+    __slots__ = ['data']
+
+    def __init__(self, rdclass, rdtype, data):
+        super(GenericRdata, self).__init__(rdclass, rdtype)
+        self.data = data
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return r'\# %d ' % len(self.data) + _hexify(self.data)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        token = tok.get()
+        if not token.is_identifier() or token.value != '\#':
+            raise dns.exception.SyntaxError(r'generic rdata does not start with \#')
+        length = tok.get_int()
+        chunks = []
+        while 1:
+            token = tok.get()
+            if token.is_eol_or_eof():
+                break
+            chunks.append(token.value)
+        hex = ''.join(chunks)
+        data = hex.decode('hex_codec')
+        if len(data) != length:
+            raise dns.exception.SyntaxError('generic rdata hex data has wrong length')
+        return cls(rdclass, rdtype, data)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(self.data)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        return cls(rdclass, rdtype, wire[current : current + rdlen])
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        return cmp(self.data, other.data)
+
+_rdata_modules = {}
+_module_prefix = 'dns.rdtypes'
+
+def get_rdata_class(rdclass, rdtype):
+
+    def import_module(name):
+        mod = __import__(name)
+        components = name.split('.')
+        for comp in components[1:]:
+            mod = getattr(mod, comp)
+        return mod
+
+    mod = _rdata_modules.get((rdclass, rdtype))
+    rdclass_text = dns.rdataclass.to_text(rdclass)
+    rdtype_text = dns.rdatatype.to_text(rdtype)
+    rdtype_text = rdtype_text.replace('-', '_')
+    if not mod:
+        mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
+        if not mod:
+            try:
+                mod = import_module('.'.join([_module_prefix,
+                                              rdclass_text, rdtype_text]))
+                _rdata_modules[(rdclass, rdtype)] = mod
+            except ImportError:
+                try:
+                    mod = import_module('.'.join([_module_prefix,
+                                                  'ANY', rdtype_text]))
+                    _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
+                except ImportError:
+                    mod = None
+    if mod:
+        cls = getattr(mod, rdtype_text)
+    else:
+        cls = GenericRdata
+    return cls
+
+def from_text(rdclass, rdtype, tok, origin = None, relativize = True):
+    """Build an rdata object from text format.
+
+    This function attempts to dynamically load a class which
+    implements the specified rdata class and type.  If there is no
+    class-and-type-specific implementation, the GenericRdata class
+    is used.
+
+    Once a class is chosen, its from_text() class method is called
+    with the parameters to this function.
+
+    @param rdclass: The rdata class
+    @type rdclass: int
+    @param rdtype: The rdata type
+    @type rdtype: int
+    @param tok: The tokenizer
+    @type tok: dns.tokenizer.Tokenizer
+    @param origin: The origin to use for relative names
+    @type origin: dns.name.Name
+    @param relativize: Should names be relativized?
+    @type relativize: bool
+    @rtype: dns.rdata.Rdata instance"""
+
+    if isinstance(tok, str):
+        tok = dns.tokenizer.Tokenizer(tok)
+    cls = get_rdata_class(rdclass, rdtype)
+    if cls != GenericRdata:
+        # peek at first token
+        token = tok.get()
+        tok.unget(token)
+        if token.is_identifier() and \
+           token.value == r'\#':
+            #
+            # Known type using the generic syntax.  Extract the
+            # wire form from the generic syntax, and then run
+            # from_wire on it.
+            #
+            rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
+                                           relativize)
+            return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
+                             origin)
+    return cls.from_text(rdclass, rdtype, tok, origin, relativize)
+
+def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None):
+    """Build an rdata object from wire format
+
+    This function attempts to dynamically load a class which
+    implements the specified rdata class and type.  If there is no
+    class-and-type-specific implementation, the GenericRdata class
+    is used.
+
+    Once a class is chosen, its from_wire() class method is called
+    with the parameters to this function.
+
+    @param rdclass: The rdata class
+    @type rdclass: int
+    @param rdtype: The rdata type
+    @type rdtype: int
+    @param wire: The wire-format message
+    @type wire: string
+    @param current: The offet in wire of the beginning of the rdata.
+    @type current: int
+    @param rdlen: The length of the wire-format rdata
+    @type rdlen: int
+    @param origin: The origin to use for relative names
+    @type origin: dns.name.Name
+    @rtype: dns.rdata.Rdata instance"""
+
+    cls = get_rdata_class(rdclass, rdtype)
+    return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataclass.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataclass.py
new file mode 100644
index 0000000..887fd1a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataclass.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Classes.
+
+@var _by_text: The rdata class textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata class value to textual name mapping
+@type _by_value: dict
+@var _metaclasses: If an rdataclass is a metaclass, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metaclasses: dict"""
+
+import re
+
+import dns.exception
+
+RESERVED0 = 0
+IN = 1
+CH = 3
+HS = 4
+NONE = 254
+ANY = 255
+
+_by_text = {
+    'RESERVED0' : RESERVED0,
+    'IN' : IN,
+    'CH' : CH,
+    'HS' : HS,
+    'NONE' : NONE,
+    'ANY' : ANY
+    }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+# Now that we've built the inverse map, we can add class aliases to
+# the _by_text mapping.
+
+_by_text.update({
+    'INTERNET' : IN,
+    'CHAOS' : CH,
+    'HESIOD' : HS
+    })
+
+_metaclasses = {
+    NONE : True,
+    ANY : True
+    }
+
+_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I);
+
+class UnknownRdataclass(dns.exception.DNSException):
+    """Raised when a class is unknown."""
+    pass
+
+def from_text(text):
+    """Convert text into a DNS rdata class value.
+    @param text: the text
+    @type text: string
+    @rtype: int
+    @raises dns.rdataclass.UnknownRdataClass: the class is unknown
+    @raises ValueError: the rdata class value is not >= 0 and <= 65535
+    """
+
+    value = _by_text.get(text.upper())
+    if value is None:
+        match = _unknown_class_pattern.match(text)
+        if match == None:
+            raise UnknownRdataclass
+        value = int(match.group(1))
+        if value < 0 or value > 65535:
+            raise ValueError("class must be between >= 0 and <= 65535")
+    return value
+
+def to_text(value):
+    """Convert a DNS rdata class to text.
+    @param value: the rdata class value
+    @type value: int
+    @rtype: string
+    @raises ValueError: the rdata class value is not >= 0 and <= 65535
+    """
+
+    if value < 0 or value > 65535:
+        raise ValueError("class must be between >= 0 and <= 65535")
+    text = _by_value.get(value)
+    if text is None:
+        text = 'CLASS' + `value`
+    return text
+
+def is_metaclass(rdclass):
+    """True if the class is a metaclass.
+    @param rdclass: the rdata class
+    @type rdclass: int
+    @rtype: bool"""
+
+    if _metaclasses.has_key(rdclass):
+        return True
+    return False
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataset.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataset.py
new file mode 100644
index 0000000..0af018b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdataset.py
@@ -0,0 +1,329 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
+
+import random
+import StringIO
+import struct
+
+import dns.exception
+import dns.rdatatype
+import dns.rdataclass
+import dns.rdata
+import dns.set
+
+# define SimpleSet here for backwards compatibility
+SimpleSet = dns.set.Set
+
+class DifferingCovers(dns.exception.DNSException):
+    """Raised if an attempt is made to add a SIG/RRSIG whose covered type
+    is not the same as that of the other rdatas in the rdataset."""
+    pass
+
+class IncompatibleTypes(dns.exception.DNSException):
+    """Raised if an attempt is made to add rdata of an incompatible type."""
+    pass
+
+class Rdataset(dns.set.Set):
+    """A DNS rdataset.
+
+    @ivar rdclass: The class of the rdataset
+    @type rdclass: int
+    @ivar rdtype: The type of the rdataset
+    @type rdtype: int
+    @ivar covers: The covered type.  Usually this value is
+    dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+    dns.rdatatype.RRSIG, then the covers value will be the rdata
+    type the SIG/RRSIG covers.  The library treats the SIG and RRSIG
+    types as if they were a family of
+    types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).  This makes RRSIGs much
+    easier to work with than if RRSIGs covering different rdata
+    types were aggregated into a single RRSIG rdataset.
+    @type covers: int
+    @ivar ttl: The DNS TTL (Time To Live) value
+    @type ttl: int
+    """
+
+    __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
+
+    def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+        """Create a new rdataset of the specified class and type.
+
+        @see: the description of the class instance variables for the
+        meaning of I{rdclass} and I{rdtype}"""
+
+        super(Rdataset, self).__init__()
+        self.rdclass = rdclass
+        self.rdtype = rdtype
+        self.covers = covers
+        self.ttl = 0
+
+    def _clone(self):
+        obj = super(Rdataset, self)._clone()
+        obj.rdclass = self.rdclass
+        obj.rdtype = self.rdtype
+        obj.covers = self.covers
+        obj.ttl = self.ttl
+        return obj
+
+    def update_ttl(self, ttl):
+        """Set the TTL of the rdataset to be the lesser of the set's current
+        TTL or the specified TTL.  If the set contains no rdatas, set the TTL
+        to the specified TTL.
+        @param ttl: The TTL
+        @type ttl: int"""
+
+        if len(self) == 0:
+            self.ttl = ttl
+        elif ttl < self.ttl:
+            self.ttl = ttl
+
+    def add(self, rd, ttl=None):
+        """Add the specified rdata to the rdataset.
+
+        If the optional I{ttl} parameter is supplied, then
+        self.update_ttl(ttl) will be called prior to adding the rdata.
+
+        @param rd: The rdata
+        @type rd: dns.rdata.Rdata object
+        @param ttl: The TTL
+        @type ttl: int"""
+
+        #
+        # If we're adding a signature, do some special handling to
+        # check that the signature covers the same type as the
+        # other rdatas in this rdataset.  If this is the first rdata
+        # in the set, initialize the covers field.
+        #
+        if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
+            raise IncompatibleTypes
+        if not ttl is None:
+            self.update_ttl(ttl)
+        if self.rdtype == dns.rdatatype.RRSIG or \
+           self.rdtype == dns.rdatatype.SIG:
+            covers = rd.covers()
+            if len(self) == 0 and self.covers == dns.rdatatype.NONE:
+                self.covers = covers
+            elif self.covers != covers:
+                raise DifferingCovers
+        if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
+            self.clear()
+        super(Rdataset, self).add(rd)
+
+    def union_update(self, other):
+        self.update_ttl(other.ttl)
+        super(Rdataset, self).union_update(other)
+
+    def intersection_update(self, other):
+        self.update_ttl(other.ttl)
+        super(Rdataset, self).intersection_update(other)
+
+    def update(self, other):
+        """Add all rdatas in other to self.
+
+        @param other: The rdataset from which to update
+        @type other: dns.rdataset.Rdataset object"""
+
+        self.update_ttl(other.ttl)
+        super(Rdataset, self).update(other)
+
+    def __repr__(self):
+        if self.covers == 0:
+            ctext = ''
+        else:
+            ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+        return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
+               dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
+
+    def __str__(self):
+        return self.to_text()
+
+    def __eq__(self, other):
+        """Two rdatasets are equal if they have the same class, type, and
+        covers, and contain the same rdata.
+        @rtype: bool"""
+
+        if not isinstance(other, Rdataset):
+            return False
+        if self.rdclass != other.rdclass or \
+           self.rdtype != other.rdtype or \
+           self.covers != other.covers:
+            return False
+        return super(Rdataset, self).__eq__(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def to_text(self, name=None, origin=None, relativize=True,
+                override_rdclass=None, **kw):
+        """Convert the rdataset into DNS master file format.
+
+        @see: L{dns.name.Name.choose_relativity} for more information
+        on how I{origin} and I{relativize} determine the way names
+        are emitted.
+
+        Any additional keyword arguments are passed on to the rdata
+        to_text() method.
+
+        @param name: If name is not None, emit a RRs with I{name} as
+        the owner name.
+        @type name: dns.name.Name object
+        @param origin: The origin for relative names, or None.
+        @type origin: dns.name.Name object
+        @param relativize: True if names should names be relativized
+        @type relativize: bool"""
+        if not name is None:
+            name = name.choose_relativity(origin, relativize)
+            ntext = str(name)
+            pad = ' '
+        else:
+            ntext = ''
+            pad = ''
+        s = StringIO.StringIO()
+        if not override_rdclass is None:
+            rdclass = override_rdclass
+        else:
+            rdclass = self.rdclass
+        if len(self) == 0:
+            #
+            # Empty rdatasets are used for the question section, and in
+            # some dynamic updates, so we don't need to print out the TTL
+            # (which is meaningless anyway).
+            #
+            print >> s, '%s%s%s %s' % (ntext, pad,
+                                       dns.rdataclass.to_text(rdclass),
+                                       dns.rdatatype.to_text(self.rdtype))
+        else:
+            for rd in self:
+                print >> s, '%s%s%d %s %s %s' % \
+                      (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
+                       dns.rdatatype.to_text(self.rdtype),
+                       rd.to_text(origin=origin, relativize=relativize, **kw))
+        #
+        # We strip off the final \n for the caller's convenience in printing
+        #
+        return s.getvalue()[:-1]
+
+    def to_wire(self, name, file, compress=None, origin=None,
+                override_rdclass=None, want_shuffle=True):
+        """Convert the rdataset to wire format.
+
+        @param name: The owner name of the RRset that will be emitted
+        @type name: dns.name.Name object
+        @param file: The file to which the wire format data will be appended
+        @type file: file
+        @param compress: The compression table to use; the default is None.
+        @type compress: dict
+        @param origin: The origin to be appended to any relative names when
+        they are emitted.  The default is None.
+        @returns: the number of records emitted
+        @rtype: int
+        """
+
+        if not override_rdclass is None:
+            rdclass =  override_rdclass
+            want_shuffle = False
+        else:
+            rdclass = self.rdclass
+        file.seek(0, 2)
+        if len(self) == 0:
+            name.to_wire(file, compress, origin)
+            stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
+            file.write(stuff)
+            return 1
+        else:
+            if want_shuffle:
+                l = list(self)
+                random.shuffle(l)
+            else:
+                l = self
+            for rd in l:
+                name.to_wire(file, compress, origin)
+                stuff = struct.pack("!HHIH", self.rdtype, rdclass,
+                                    self.ttl, 0)
+                file.write(stuff)
+                start = file.tell()
+                rd.to_wire(file, compress, origin)
+                end = file.tell()
+                assert end - start < 65536
+                file.seek(start - 2)
+                stuff = struct.pack("!H", end - start)
+                file.write(stuff)
+                file.seek(0, 2)
+            return len(self)
+
+    def match(self, rdclass, rdtype, covers):
+        """Returns True if this rdataset matches the specified class, type,
+        and covers"""
+        if self.rdclass == rdclass and \
+           self.rdtype == rdtype and \
+           self.covers == covers:
+            return True
+        return False
+
+def from_text_list(rdclass, rdtype, ttl, text_rdatas):
+    """Create an rdataset with the specified class, type, and TTL, and with
+    the specified list of rdatas in text format.
+
+    @rtype: dns.rdataset.Rdataset object
+    """
+
+    if isinstance(rdclass, str):
+        rdclass = dns.rdataclass.from_text(rdclass)
+    if isinstance(rdtype, str):
+        rdtype = dns.rdatatype.from_text(rdtype)
+    r = Rdataset(rdclass, rdtype)
+    r.update_ttl(ttl)
+    for t in text_rdatas:
+        rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+        r.add(rd)
+    return r
+
+def from_text(rdclass, rdtype, ttl, *text_rdatas):
+    """Create an rdataset with the specified class, type, and TTL, and with
+    the specified rdatas in text format.
+
+    @rtype: dns.rdataset.Rdataset object
+    """
+
+    return from_text_list(rdclass, rdtype, ttl, text_rdatas)
+
+def from_rdata_list(ttl, rdatas):
+    """Create an rdataset with the specified TTL, and with
+    the specified list of rdata objects.
+
+    @rtype: dns.rdataset.Rdataset object
+    """
+
+    if len(rdatas) == 0:
+        raise ValueError("rdata list must not be empty")
+    r = None
+    for rd in rdatas:
+        if r is None:
+            r = Rdataset(rd.rdclass, rd.rdtype)
+            r.update_ttl(ttl)
+            first_time = False
+        r.add(rd)
+    return r
+
+def from_rdata(ttl, *rdatas):
+    """Create an rdataset with the specified TTL, and with
+    the specified rdata objects.
+
+    @rtype: dns.rdataset.Rdataset object
+    """
+
+    return from_rdata_list(ttl, rdatas)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdatatype.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdatatype.py
new file mode 100644
index 0000000..1a02b7d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdatatype.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Types.
+
+@var _by_text: The rdata type textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata type value to textual name mapping
+@type _by_value: dict
+@var _metatypes: If an rdatatype is a metatype, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metatypes: dict
+@var _singletons: If an rdatatype is a singleton, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _singletons: dict"""
+
+import re
+
+import dns.exception
+
+NONE = 0
+A = 1
+NS = 2
+MD = 3
+MF = 4
+CNAME = 5
+SOA = 6
+MB = 7
+MG = 8
+MR = 9
+NULL = 10
+WKS = 11
+PTR = 12
+HINFO = 13
+MINFO = 14
+MX = 15
+TXT = 16
+RP = 17
+AFSDB = 18
+X25 = 19
+ISDN = 20
+RT = 21
+NSAP = 22
+NSAP_PTR = 23
+SIG = 24
+KEY = 25
+PX = 26
+GPOS = 27
+AAAA = 28
+LOC = 29
+NXT = 30
+SRV = 33
+NAPTR = 35
+KX = 36
+CERT = 37
+A6 = 38
+DNAME = 39
+OPT = 41
+APL = 42
+DS = 43
+SSHFP = 44
+IPSECKEY = 45
+RRSIG = 46
+NSEC = 47
+DNSKEY = 48
+DHCID = 49
+NSEC3 = 50
+NSEC3PARAM = 51
+HIP = 55
+SPF = 99
+UNSPEC = 103
+TKEY = 249
+TSIG = 250
+IXFR = 251
+AXFR = 252
+MAILB = 253
+MAILA = 254
+ANY = 255
+TA = 32768
+DLV = 32769
+
+_by_text = {
+    'NONE' : NONE,
+    'A' : A,
+    'NS' : NS,
+    'MD' : MD,
+    'MF' : MF,
+    'CNAME' : CNAME,
+    'SOA' : SOA,
+    'MB' : MB,
+    'MG' : MG,
+    'MR' : MR,
+    'NULL' : NULL,
+    'WKS' : WKS,
+    'PTR' : PTR,
+    'HINFO' : HINFO,
+    'MINFO' : MINFO,
+    'MX' : MX,
+    'TXT' : TXT,
+    'RP' : RP,
+    'AFSDB' : AFSDB,
+    'X25' : X25,
+    'ISDN' : ISDN,
+    'RT' : RT,
+    'NSAP' : NSAP,
+    'NSAP-PTR' : NSAP_PTR,
+    'SIG' : SIG,
+    'KEY' : KEY,
+    'PX' : PX,
+    'GPOS' : GPOS,
+    'AAAA' : AAAA,
+    'LOC' : LOC,
+    'NXT' : NXT,
+    'SRV' : SRV,
+    'NAPTR' : NAPTR,
+    'KX' : KX,
+    'CERT' : CERT,
+    'A6' : A6,
+    'DNAME' : DNAME,
+    'OPT' : OPT,
+    'APL' : APL,
+    'DS' : DS,
+    'SSHFP' : SSHFP,
+    'IPSECKEY' : IPSECKEY,
+    'RRSIG' : RRSIG,
+    'NSEC' : NSEC,
+    'DNSKEY' : DNSKEY,
+    'DHCID' : DHCID,
+    'NSEC3' : NSEC3,
+    'NSEC3PARAM' : NSEC3PARAM,
+    'HIP' : HIP,
+    'SPF' : SPF,
+    'UNSPEC' : UNSPEC,
+    'TKEY' : TKEY,
+    'TSIG' : TSIG,
+    'IXFR' : IXFR,
+    'AXFR' : AXFR,
+    'MAILB' : MAILB,
+    'MAILA' : MAILA,
+    'ANY' : ANY,
+    'TA' : TA,
+    'DLV' : DLV,
+    }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+_metatypes = {
+    OPT : True
+    }
+
+_singletons = {
+    SOA : True,
+    NXT : True,
+    DNAME : True,
+    NSEC : True,
+    # CNAME is technically a singleton, but we allow multiple CNAMEs.
+    }
+
+_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I);
+
+class UnknownRdatatype(dns.exception.DNSException):
+    """Raised if a type is unknown."""
+    pass
+
+def from_text(text):
+    """Convert text into a DNS rdata type value.
+    @param text: the text
+    @type text: string
+    @raises dns.rdatatype.UnknownRdatatype: the type is unknown
+    @raises ValueError: the rdata type value is not >= 0 and <= 65535
+    @rtype: int"""
+
+    value = _by_text.get(text.upper())
+    if value is None:
+        match = _unknown_type_pattern.match(text)
+        if match == None:
+            raise UnknownRdatatype
+        value = int(match.group(1))
+        if value < 0 or value > 65535:
+            raise ValueError("type must be between >= 0 and <= 65535")
+    return value
+
+def to_text(value):
+    """Convert a DNS rdata type to text.
+    @param value: the rdata type value
+    @type value: int
+    @raises ValueError: the rdata type value is not >= 0 and <= 65535
+    @rtype: string"""
+
+    if value < 0 or value > 65535:
+        raise ValueError("type must be between >= 0 and <= 65535")
+    text = _by_value.get(value)
+    if text is None:
+        text = 'TYPE' + `value`
+    return text
+
+def is_metatype(rdtype):
+    """True if the type is a metatype.
+    @param rdtype: the type
+    @type rdtype: int
+    @rtype: bool"""
+
+    if rdtype >= TKEY and rdtype <= ANY or _metatypes.has_key(rdtype):
+        return True
+    return False
+
+def is_singleton(rdtype):
+    """True if the type is a singleton.
+    @param rdtype: the type
+    @type rdtype: int
+    @rtype: bool"""
+
+    if _singletons.has_key(rdtype):
+        return True
+    return False
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/AFSDB.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/AFSDB.py
new file mode 100644
index 0000000..e8ca6f5
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/AFSDB.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+    """AFSDB record
+
+    @ivar subtype: the subtype value
+    @type subtype: int
+    @ivar hostname: the hostname name
+    @type hostname: dns.name.Name object"""
+
+    # Use the property mechanism to make "subtype" an alias for the
+    # "preference" attribute, and "hostname" an alias for the "exchange"
+    # attribute.
+    #
+    # This lets us inherit the UncompressedMX implementation but lets
+    # the caller use appropriate attribute names for the rdata type.
+    #
+    # We probably lose some performance vs. a cut-and-paste
+    # implementation, but this way we don't copy code, and that's
+    # good.
+
+    def get_subtype(self):
+        return self.preference
+
+    def set_subtype(self, subtype):
+        self.preference = subtype
+
+    subtype = property(get_subtype, set_subtype)
+
+    def get_hostname(self):
+        return self.exchange
+
+    def set_hostname(self, hostname):
+        self.exchange = hostname
+
+    hostname = property(get_hostname, set_hostname)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CERT.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CERT.py
new file mode 100644
index 0000000..d270351
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CERT.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+import dns.tokenizer
+
+_ctype_by_value = {
+    1 : 'PKIX',
+    2 : 'SPKI',
+    3 : 'PGP',
+    253 : 'URI',
+    254 : 'OID',
+    }
+
+_ctype_by_name = {
+    'PKIX' : 1,
+    'SPKI' : 2,
+    'PGP' : 3,
+    'URI' : 253,
+    'OID' : 254,
+    }
+
+def _ctype_from_text(what):
+    v = _ctype_by_name.get(what)
+    if not v is None:
+        return v
+    return int(what)
+
+def _ctype_to_text(what):
+    v = _ctype_by_value.get(what)
+    if not v is None:
+        return v
+    return str(what)
+
+class CERT(dns.rdata.Rdata):
+    """CERT record
+
+    @ivar certificate_type: certificate type
+    @type certificate_type: int
+    @ivar key_tag: key tag
+    @type key_tag: int
+    @ivar algorithm: algorithm
+    @type algorithm: int
+    @ivar certificate: the certificate or CRL
+    @type certificate: string
+    @see: RFC 2538"""
+
+    __slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
+
+    def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
+                 certificate):
+        super(CERT, self).__init__(rdclass, rdtype)
+        self.certificate_type = certificate_type
+        self.key_tag = key_tag
+        self.algorithm = algorithm
+        self.certificate = certificate
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        certificate_type = _ctype_to_text(self.certificate_type)
+        return "%s %d %s %s" % (certificate_type, self.key_tag,
+                                dns.dnssec.algorithm_to_text(self.algorithm),
+                                dns.rdata._base64ify(self.certificate))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        certificate_type = _ctype_from_text(tok.get_string())
+        key_tag = tok.get_uint16()
+        algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+        if algorithm < 0 or algorithm > 255:
+            raise dns.exception.SyntaxError("bad algorithm type")
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        b64 = ''.join(chunks)
+        certificate = b64.decode('base64_codec')
+        return cls(rdclass, rdtype, certificate_type, key_tag,
+                   algorithm, certificate)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
+                             self.algorithm)
+        file.write(prefix)
+        file.write(self.certificate)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        prefix = wire[current : current + 5]
+        current += 5
+        rdlen -= 5
+        if rdlen < 0:
+            raise dns.exception.FormError
+        (certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
+        certificate = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
+                   certificate)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        f = cStringIO.StringIO()
+        self.to_wire(f)
+        wire1 = f.getvalue()
+        f.seek(0)
+        f.truncate()
+        other.to_wire(f)
+        wire2 = f.getvalue()
+        f.close()
+
+        return cmp(wire1, wire2)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CNAME.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CNAME.py
new file mode 100644
index 0000000..7f5c4b3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/CNAME.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class CNAME(dns.rdtypes.nsbase.NSBase):
+    """CNAME record
+
+    Note: although CNAME is officially a singleton type, dnspython allows
+    non-singleton CNAME rdatasets because such sets have been commonly
+    used by BIND and other nameservers for load balancing."""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DLV.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DLV.py
new file mode 100644
index 0000000..07b9548
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DLV.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+class DLV(dns.rdtypes.dsbase.DSBase):
+    """DLV record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNAME.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNAME.py
new file mode 100644
index 0000000..99b5013
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNAME.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class DNAME(dns.rdtypes.nsbase.UncompressedNS):
+    """DNAME record"""
+    def to_digestable(self, origin = None):
+        return self.target.to_digestable(origin)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNSKEY.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNSKEY.py
new file mode 100644
index 0000000..ad66ef0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DNSKEY.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.keybase
+
+# flag constants
+SEP = 0x0001
+REVOKE = 0x0080
+ZONE = 0x0100
+
+class DNSKEY(dns.rdtypes.keybase.KEYBase):
+    """DNSKEY record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DS.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DS.py
new file mode 100644
index 0000000..3a06f44
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/DS.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+class DS(dns.rdtypes.dsbase.DSBase):
+    """DS record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/GPOS.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/GPOS.py
new file mode 100644
index 0000000..6f63cc0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/GPOS.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+def _validate_float_string(what):
+    if what[0] == '-' or what[0] == '+':
+        what = what[1:]
+    if what.isdigit():
+        return
+    (left, right) = what.split('.')
+    if left == '' and right == '':
+        raise dns.exception.FormError
+    if not left == '' and not left.isdigit():
+        raise dns.exception.FormError
+    if not right == '' and not right.isdigit():
+        raise dns.exception.FormError
+    
+class GPOS(dns.rdata.Rdata):
+    """GPOS record
+
+    @ivar latitude: latitude
+    @type latitude: string
+    @ivar longitude: longitude
+    @type longitude: string
+    @ivar altitude: altitude
+    @type altitude: string
+    @see: RFC 1712"""
+
+    __slots__ = ['latitude', 'longitude', 'altitude']
+    
+    def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
+        super(GPOS, self).__init__(rdclass, rdtype)
+        if isinstance(latitude, float) or \
+           isinstance(latitude, int) or \
+           isinstance(latitude, long):
+            latitude = str(latitude)
+        if isinstance(longitude, float) or \
+           isinstance(longitude, int) or \
+           isinstance(longitude, long):
+            longitude = str(longitude)
+        if isinstance(altitude, float) or \
+           isinstance(altitude, int) or \
+           isinstance(altitude, long):
+            altitude = str(altitude)
+        _validate_float_string(latitude)
+        _validate_float_string(longitude)
+        _validate_float_string(altitude)
+        self.latitude = latitude
+        self.longitude = longitude
+        self.altitude = altitude
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '%s %s %s' % (self.latitude, self.longitude, self.altitude)
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        latitude = tok.get_string()
+        longitude = tok.get_string()
+        altitude = tok.get_string()
+        tok.get_eol()
+        return cls(rdclass, rdtype, latitude, longitude, altitude)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.latitude)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.latitude)
+        l = len(self.longitude)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.longitude)
+        l = len(self.altitude)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.altitude)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l > rdlen:
+            raise dns.exception.FormError
+        latitude = wire[current : current + l]
+        current += l
+        rdlen -= l
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l > rdlen:
+            raise dns.exception.FormError
+        longitude = wire[current : current + l]
+        current += l
+        rdlen -= l
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l != rdlen:
+            raise dns.exception.FormError
+        altitude = wire[current : current + l]
+        return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        v = cmp(self.latitude, other.latitude)
+        if v == 0:
+            v = cmp(self.longitude, other.longitude)
+            if v == 0:
+                v = cmp(self.altitude, other.altitude)
+        return v
+
+    def _get_float_latitude(self):
+        return float(self.latitude)
+
+    def _set_float_latitude(self, value):
+        self.latitude = str(value)
+
+    float_latitude = property(_get_float_latitude, _set_float_latitude,
+                              doc="latitude as a floating point value")
+
+    def _get_float_longitude(self):
+        return float(self.longitude)
+
+    def _set_float_longitude(self, value):
+        self.longitude = str(value)
+
+    float_longitude = property(_get_float_longitude, _set_float_longitude,
+                               doc="longitude as a floating point value")
+
+    def _get_float_altitude(self):
+        return float(self.altitude)
+
+    def _set_float_altitude(self, value):
+        self.altitude = str(value)
+
+    float_altitude = property(_get_float_altitude, _set_float_altitude,
+                              doc="altitude as a floating point value")
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HINFO.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HINFO.py
new file mode 100644
index 0000000..e592ad3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HINFO.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class HINFO(dns.rdata.Rdata):
+    """HINFO record
+
+    @ivar cpu: the CPU type
+    @type cpu: string
+    @ivar os: the OS type
+    @type os: string
+    @see: RFC 1035"""
+
+    __slots__ = ['cpu', 'os']
+    
+    def __init__(self, rdclass, rdtype, cpu, os):
+        super(HINFO, self).__init__(rdclass, rdtype)
+        self.cpu = cpu
+        self.os = os
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
+                              dns.rdata._escapify(self.os))
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        cpu = tok.get_string()
+        os = tok.get_string()
+        tok.get_eol()
+        return cls(rdclass, rdtype, cpu, os)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.cpu)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.cpu)
+        l = len(self.os)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.os)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l > rdlen:
+            raise dns.exception.FormError
+        cpu = wire[current : current + l]
+        current += l
+        rdlen -= l
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l != rdlen:
+            raise dns.exception.FormError
+        os = wire[current : current + l]
+        return cls(rdclass, rdtype, cpu, os)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        v = cmp(self.cpu, other.cpu)
+        if v == 0:
+            v = cmp(self.os, other.os)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HIP.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HIP.py
new file mode 100644
index 0000000..8f96ae9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/HIP.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+class HIP(dns.rdata.Rdata):
+    """HIP record
+
+    @ivar hit: the host identity tag
+    @type hit: string
+    @ivar algorithm: the public key cryptographic algorithm
+    @type algorithm: int
+    @ivar key: the public key
+    @type key: string
+    @ivar servers: the rendezvous servers
+    @type servers: list of dns.name.Name objects
+    @see: RFC 5205"""
+
+    __slots__ = ['hit', 'algorithm', 'key', 'servers']
+
+    def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
+        super(HIP, self).__init__(rdclass, rdtype)
+        self.hit = hit
+        self.algorithm = algorithm
+        self.key = key
+        self.servers = servers
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        hit = self.hit.encode('hex-codec')
+        key = self.key.encode('base64-codec').replace('\n', '')
+        text = ''
+        servers = []
+        for server in self.servers:
+            servers.append(str(server.choose_relativity(origin, relativize)))
+        if len(servers) > 0:
+            text += (' ' + ' '.join(servers))
+        return '%u %s %s%s' % (self.algorithm, hit, key, text)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        algorithm = tok.get_uint8()
+        hit = tok.get_string().decode('hex-codec')
+        if len(hit) > 255:
+            raise dns.exception.SyntaxError("HIT too long")
+        key = tok.get_string().decode('base64-codec')
+        servers = []
+        while 1:
+            token = tok.get()
+            if token.is_eol_or_eof():
+                break
+            server = dns.name.from_text(token.value, origin)
+            server.choose_relativity(origin, relativize)
+            servers.append(server)
+        return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        lh = len(self.hit)
+        lk = len(self.key)
+        file.write(struct.pack("!BBH", lh, self.algorithm, lk))
+        file.write(self.hit)
+        file.write(self.key)
+        for server in self.servers:
+            server.to_wire(file, None, origin)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (lh, algorithm, lk) = struct.unpack('!BBH',
+                                            wire[current : current + 4])
+        current += 4
+        rdlen -= 4
+        hit = wire[current : current + lh]
+        current += lh
+        rdlen -= lh
+        key = wire[current : current + lk]
+        current += lk
+        rdlen -= lk
+        servers = []
+        while rdlen > 0:
+            (server, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                                 current)
+            current += cused
+            rdlen -= cused
+            if not origin is None:
+                server = server.relativize(origin)
+            servers.append(server)
+        return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        servers = []
+        for server in self.servers:
+            server = server.choose_relativity(origin, relativize)
+            servers.append(server)
+        self.servers = servers
+
+    def _cmp(self, other):
+        b1 = cStringIO.StringIO()
+        lh = len(self.hit)
+        lk = len(self.key)
+        b1.write(struct.pack("!BBH", lh, self.algorithm, lk))
+        b1.write(self.hit)
+        b1.write(self.key)
+        b2 = cStringIO.StringIO()
+        lh = len(other.hit)
+        lk = len(other.key)
+        b2.write(struct.pack("!BBH", lh, other.algorithm, lk))
+        b2.write(other.hit)
+        b2.write(other.key)
+        v = cmp(b1.getvalue(), b2.getvalue())
+        if v != 0:
+            return v
+        ls = len(self.servers)
+        lo = len(other.servers)
+        count = min(ls, lo)
+        i = 0
+        while i < count:
+            v = cmp(self.servers[i], other.servers[i])
+            if v != 0:
+                return v
+            i += 1
+        return ls - lo
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/ISDN.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/ISDN.py
new file mode 100644
index 0000000..424d3a9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/ISDN.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class ISDN(dns.rdata.Rdata):
+    """ISDN record
+
+    @ivar address: the ISDN address
+    @type address: string
+    @ivar subaddress: the ISDN subaddress (or '' if not present)
+    @type subaddress: string
+    @see: RFC 1183"""
+
+    __slots__ = ['address', 'subaddress']
+
+    def __init__(self, rdclass, rdtype, address, subaddress):
+        super(ISDN, self).__init__(rdclass, rdtype)
+        self.address = address
+        self.subaddress = subaddress
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        if self.subaddress:
+            return '"%s" "%s"' % (dns.rdata._escapify(self.address),
+                                  dns.rdata._escapify(self.subaddress))
+        else:
+            return '"%s"' % dns.rdata._escapify(self.address)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_string()
+        t = tok.get()
+        if not t.is_eol_or_eof():
+            tok.unget(t)
+            subaddress = tok.get_string()
+        else:
+            tok.unget(t)
+            subaddress = ''
+        tok.get_eol()
+        return cls(rdclass, rdtype, address, subaddress)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.address)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.address)
+        l = len(self.subaddress)
+        if l > 0:
+            assert l < 256
+            byte = chr(l)
+            file.write(byte)
+            file.write(self.subaddress)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l > rdlen:
+            raise dns.exception.FormError
+        address = wire[current : current + l]
+        current += l
+        rdlen -= l
+        if rdlen > 0:
+            l = ord(wire[current])
+            current += 1
+            rdlen -= 1
+            if l != rdlen:
+                raise dns.exception.FormError
+            subaddress = wire[current : current + l]
+        else:
+            subaddress = ''
+        return cls(rdclass, rdtype, address, subaddress)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        v = cmp(self.address, other.address)
+        if v == 0:
+            v = cmp(self.subaddress, other.subaddress)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/KEY.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/KEY.py
new file mode 100644
index 0000000..c8581ed
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/KEY.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.keybase
+
+class KEY(dns.rdtypes.keybase.KEYBase):
+    """KEY record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/LOC.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/LOC.py
new file mode 100644
index 0000000..518dd60
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/LOC.py
@@ -0,0 +1,334 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+
+_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
+         100000000L, 1000000000L, 10000000000L)
+
+def _exponent_of(what, desc):
+    exp = None
+    for i in xrange(len(_pows)):
+        if what // _pows[i] == 0L:
+            exp = i - 1
+            break
+    if exp is None or exp < 0:
+        raise dns.exception.SyntaxError("%s value out of bounds" % desc)
+    return exp
+
+def _float_to_tuple(what):
+    if what < 0:
+        sign = -1
+        what *= -1
+    else:
+        sign = 1
+    what = long(round(what * 3600000))
+    degrees = int(what // 3600000)
+    what -= degrees * 3600000
+    minutes = int(what // 60000)
+    what -= minutes * 60000
+    seconds = int(what // 1000)
+    what -= int(seconds * 1000)
+    what = int(what)
+    return (degrees * sign, minutes, seconds, what)
+
+def _tuple_to_float(what):
+    if what[0] < 0:
+        sign = -1
+        value = float(what[0]) * -1
+    else:
+        sign = 1
+        value = float(what[0])
+    value += float(what[1]) / 60.0
+    value += float(what[2]) / 3600.0
+    value += float(what[3]) / 3600000.0
+    return sign * value
+
+def _encode_size(what, desc):
+    what = long(what);
+    exponent = _exponent_of(what, desc) & 0xF
+    base = what // pow(10, exponent) & 0xF
+    return base * 16 + exponent
+
+def _decode_size(what, desc):
+    exponent = what & 0x0F
+    if exponent > 9:
+        raise dns.exception.SyntaxError("bad %s exponent" % desc)
+    base = (what & 0xF0) >> 4
+    if base > 9:
+        raise dns.exception.SyntaxError("bad %s base" % desc)
+    return long(base) * pow(10, exponent)
+
+class LOC(dns.rdata.Rdata):
+    """LOC record
+
+    @ivar latitude: latitude
+    @type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
+    seconds, and milliseconds of the coordinate.
+    @ivar longitude: longitude
+    @type longitude: (int, int, int, int) tuple specifying the degrees,
+    minutes, seconds, and milliseconds of the coordinate.
+    @ivar altitude: altitude
+    @type altitude: float
+    @ivar size: size of the sphere
+    @type size: float
+    @ivar horizontal_precision: horizontal precision
+    @type horizontal_precision: float
+    @ivar vertical_precision: vertical precision
+    @type vertical_precision: float
+    @see: RFC 1876"""
+
+    __slots__ = ['latitude', 'longitude', 'altitude', 'size',
+                 'horizontal_precision', 'vertical_precision']
+
+    def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
+                 size=1.0, hprec=10000.0, vprec=10.0):
+        """Initialize a LOC record instance.
+
+        The parameters I{latitude} and I{longitude} may be either a 4-tuple
+        of integers specifying (degrees, minutes, seconds, milliseconds),
+        or they may be floating point values specifying the number of
+        degrees.  The other parameters are floats."""
+
+        super(LOC, self).__init__(rdclass, rdtype)
+        if isinstance(latitude, int) or isinstance(latitude, long):
+            latitude = float(latitude)
+        if isinstance(latitude, float):
+            latitude = _float_to_tuple(latitude)
+        self.latitude = latitude
+        if isinstance(longitude, int) or isinstance(longitude, long):
+            longitude = float(longitude)
+        if isinstance(longitude, float):
+            longitude = _float_to_tuple(longitude)
+        self.longitude = longitude
+        self.altitude = float(altitude)
+        self.size = float(size)
+        self.horizontal_precision = float(hprec)
+        self.vertical_precision = float(vprec)
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        if self.latitude[0] > 0:
+            lat_hemisphere = 'N'
+            lat_degrees = self.latitude[0]
+        else:
+            lat_hemisphere = 'S'
+            lat_degrees = -1 * self.latitude[0]
+        if self.longitude[0] > 0:
+            long_hemisphere = 'E'
+            long_degrees = self.longitude[0]
+        else:
+            long_hemisphere = 'W'
+            long_degrees = -1 * self.longitude[0]
+        text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
+            lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
+            lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
+            self.longitude[3], long_hemisphere, self.altitude / 100.0
+            )
+
+        if self.size != 1.0 or self.horizontal_precision != 10000.0 or \
+           self.vertical_precision != 10.0:
+            text += " %0.2fm %0.2fm %0.2fm" % (
+                self.size / 100.0, self.horizontal_precision / 100.0,
+                self.vertical_precision / 100.0
+            )
+        return text
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        latitude = [0, 0, 0, 0]
+        longitude = [0, 0, 0, 0]
+        size = 1.0
+        hprec = 10000.0
+        vprec = 10.0
+
+        latitude[0] = tok.get_int()
+        t = tok.get_string()
+        if t.isdigit():
+            latitude[1] = int(t)
+            t = tok.get_string()
+            if '.' in t:
+                (seconds, milliseconds) = t.split('.')
+                if not seconds.isdigit():
+                    raise dns.exception.SyntaxError('bad latitude seconds value')
+                latitude[2] = int(seconds)
+                if latitude[2] >= 60:
+                    raise dns.exception.SyntaxError('latitude seconds >= 60')
+                l = len(milliseconds)
+                if l == 0 or l > 3 or not milliseconds.isdigit():
+                    raise dns.exception.SyntaxError('bad latitude milliseconds value')
+                if l == 1:
+                    m = 100
+                elif l == 2:
+                    m = 10
+                else:
+                    m = 1
+                latitude[3] = m * int(milliseconds)
+                t = tok.get_string()
+            elif t.isdigit():
+                latitude[2] = int(t)
+                t = tok.get_string()
+        if t == 'S':
+            latitude[0] *= -1
+        elif t != 'N':
+            raise dns.exception.SyntaxError('bad latitude hemisphere value')
+
+        longitude[0] = tok.get_int()
+        t = tok.get_string()
+        if t.isdigit():
+            longitude[1] = int(t)
+            t = tok.get_string()
+            if '.' in t:
+                (seconds, milliseconds) = t.split('.')
+                if not seconds.isdigit():
+                    raise dns.exception.SyntaxError('bad longitude seconds value')
+                longitude[2] = int(seconds)
+                if longitude[2] >= 60:
+                    raise dns.exception.SyntaxError('longitude seconds >= 60')
+                l = len(milliseconds)
+                if l == 0 or l > 3 or not milliseconds.isdigit():
+                    raise dns.exception.SyntaxError('bad longitude milliseconds value')
+                if l == 1:
+                    m = 100
+                elif l == 2:
+                    m = 10
+                else:
+                    m = 1
+                longitude[3] = m * int(milliseconds)
+                t = tok.get_string()
+            elif t.isdigit():
+                longitude[2] = int(t)
+                t = tok.get_string()
+        if t == 'W':
+            longitude[0] *= -1
+        elif t != 'E':
+            raise dns.exception.SyntaxError('bad longitude hemisphere value')
+
+        t = tok.get_string()
+        if t[-1] == 'm':
+            t = t[0 : -1]
+        altitude = float(t) * 100.0	# m -> cm
+
+        token = tok.get().unescape()
+        if not token.is_eol_or_eof():
+            value = token.value
+            if value[-1] == 'm':
+                value = value[0 : -1]
+            size = float(value) * 100.0	# m -> cm
+            token = tok.get().unescape()
+            if not token.is_eol_or_eof():
+                value = token.value
+                if value[-1] == 'm':
+                    value = value[0 : -1]
+                hprec = float(value) * 100.0	# m -> cm
+                token = tok.get().unescape()
+                if not token.is_eol_or_eof():
+                    value = token.value
+                    if value[-1] == 'm':
+                        value = value[0 : -1]
+                        vprec = float(value) * 100.0	# m -> cm
+                        tok.get_eol()
+
+        return cls(rdclass, rdtype, latitude, longitude, altitude,
+                   size, hprec, vprec)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        if self.latitude[0] < 0:
+            sign = -1
+            degrees = long(-1 * self.latitude[0])
+        else:
+            sign = 1
+            degrees = long(self.latitude[0])
+        milliseconds = (degrees * 3600000 +
+                        self.latitude[1] * 60000 +
+                        self.latitude[2] * 1000 +
+                        self.latitude[3]) * sign
+        latitude = 0x80000000L + milliseconds
+        if self.longitude[0] < 0:
+            sign = -1
+            degrees = long(-1 * self.longitude[0])
+        else:
+            sign = 1
+            degrees = long(self.longitude[0])
+        milliseconds = (degrees * 3600000 +
+                        self.longitude[1] * 60000 +
+                        self.longitude[2] * 1000 +
+                        self.longitude[3]) * sign
+        longitude = 0x80000000L + milliseconds
+        altitude = long(self.altitude) + 10000000L
+        size = _encode_size(self.size, "size")
+        hprec = _encode_size(self.horizontal_precision, "horizontal precision")
+        vprec = _encode_size(self.vertical_precision, "vertical precision")
+        wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
+                           longitude, altitude)
+        file.write(wire)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (version, size, hprec, vprec, latitude, longitude, altitude) = \
+                  struct.unpack("!BBBBIII", wire[current : current + rdlen])
+        if latitude > 0x80000000L:
+            latitude = float(latitude - 0x80000000L) / 3600000
+        else:
+            latitude = -1 * float(0x80000000L - latitude) / 3600000
+        if latitude < -90.0 or latitude > 90.0:
+            raise dns.exception.FormError("bad latitude")
+        if longitude > 0x80000000L:
+            longitude = float(longitude - 0x80000000L) / 3600000
+        else:
+            longitude = -1 * float(0x80000000L - longitude) / 3600000
+        if longitude < -180.0 or longitude > 180.0:
+            raise dns.exception.FormError("bad longitude")
+        altitude = float(altitude) - 10000000.0
+        size = _decode_size(size, "size")
+        hprec = _decode_size(hprec, "horizontal precision")
+        vprec = _decode_size(vprec, "vertical precision")
+        return cls(rdclass, rdtype, latitude, longitude, altitude,
+                   size, hprec, vprec)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        f = cStringIO.StringIO()
+        self.to_wire(f)
+        wire1 = f.getvalue()
+        f.seek(0)
+        f.truncate()
+        other.to_wire(f)
+        wire2 = f.getvalue()
+        f.close()
+
+        return cmp(wire1, wire2)
+
+    def _get_float_latitude(self):
+        return _tuple_to_float(self.latitude)
+
+    def _set_float_latitude(self, value):
+        self.latitude = _float_to_tuple(value)
+
+    float_latitude = property(_get_float_latitude, _set_float_latitude,
+                              doc="latitude as a floating point value")
+
+    def _get_float_longitude(self):
+        return _tuple_to_float(self.longitude)
+
+    def _set_float_longitude(self, value):
+        self.longitude = _float_to_tuple(value)
+
+    float_longitude = property(_get_float_longitude, _set_float_longitude,
+                               doc="longitude as a floating point value")
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/MX.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/MX.py
new file mode 100644
index 0000000..9cad260
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/MX.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class MX(dns.rdtypes.mxbase.MXBase):
+    """MX record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NS.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NS.py
new file mode 100644
index 0000000..4b03a3a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NS.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class NS(dns.rdtypes.nsbase.NSBase):
+    """NS record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC.py
new file mode 100644
index 0000000..72859ce
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC.py
@@ -0,0 +1,141 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+
+class NSEC(dns.rdata.Rdata):
+    """NSEC record
+
+    @ivar next: the next name
+    @type next: dns.name.Name object
+    @ivar windows: the windowed bitmap list
+    @type windows: list of (window number, string) tuples"""
+
+    __slots__ = ['next', 'windows']
+
+    def __init__(self, rdclass, rdtype, next, windows):
+        super(NSEC, self).__init__(rdclass, rdtype)
+        self.next = next
+        self.windows = windows
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        next = self.next.choose_relativity(origin, relativize)
+        text = ''
+        for (window, bitmap) in self.windows:
+            bits = []
+            for i in xrange(0, len(bitmap)):
+                byte = ord(bitmap[i])
+                for j in xrange(0, 8):
+                    if byte & (0x80 >> j):
+                        bits.append(dns.rdatatype.to_text(window * 256 + \
+                                                          i * 8 + j))
+            text += (' ' + ' '.join(bits))
+        return '%s%s' % (next, text)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        next = tok.get_name()
+        next = next.choose_relativity(origin, relativize)
+        rdtypes = []
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            nrdtype = dns.rdatatype.from_text(token.value)
+            if nrdtype == 0:
+                raise dns.exception.SyntaxError("NSEC with bit 0")
+            if nrdtype > 65535:
+                raise dns.exception.SyntaxError("NSEC with bit > 65535")
+            rdtypes.append(nrdtype)
+        rdtypes.sort()
+        window = 0
+        octets = 0
+        prior_rdtype = 0
+        bitmap = ['\0'] * 32
+        windows = []
+        for nrdtype in rdtypes:
+            if nrdtype == prior_rdtype:
+                continue
+            prior_rdtype = nrdtype
+            new_window = nrdtype // 256
+            if new_window != window:
+                windows.append((window, ''.join(bitmap[0:octets])))
+                bitmap = ['\0'] * 32
+                window = new_window
+            offset = nrdtype % 256
+            byte = offset / 8
+            bit = offset % 8
+            octets = byte + 1
+            bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
+        windows.append((window, ''.join(bitmap[0:octets])))
+        return cls(rdclass, rdtype, next, windows)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        self.next.to_wire(file, None, origin)
+        for (window, bitmap) in self.windows:
+            file.write(chr(window))
+            file.write(chr(len(bitmap)))
+            file.write(bitmap)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+        current += cused
+        rdlen -= cused
+        windows = []
+        while rdlen > 0:
+            if rdlen < 3:
+                raise dns.exception.FormError("NSEC too short")
+            window = ord(wire[current])
+            octets = ord(wire[current + 1])
+            if octets == 0 or octets > 32:
+                raise dns.exception.FormError("bad NSEC octets")
+            current += 2
+            rdlen -= 2
+            if rdlen < octets:
+                raise dns.exception.FormError("bad NSEC bitmap length")
+            bitmap = wire[current : current + octets]
+            current += octets
+            rdlen -= octets
+            windows.append((window, bitmap))
+        if not origin is None:
+            next = next.relativize(origin)
+        return cls(rdclass, rdtype, next, windows)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.next = self.next.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        v = cmp(self.next, other.next)
+        if v == 0:
+            b1 = cStringIO.StringIO()
+            for (window, bitmap) in self.windows:
+                b1.write(chr(window))
+                b1.write(chr(len(bitmap)))
+                b1.write(bitmap)
+            b2 = cStringIO.StringIO()
+            for (window, bitmap) in other.windows:
+                b2.write(chr(window))
+                b2.write(chr(len(bitmap)))
+                b2.write(bitmap)
+            v = cmp(b1.getvalue(), b2.getvalue())
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3.py
new file mode 100644
index 0000000..932d7b4
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import cStringIO
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
+                                     'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+                                     '0123456789ABCDEFGHIJKLMNOPQRSTUV')
+
+# hash algorithm constants
+SHA1 = 1
+
+# flag constants
+OPTOUT = 1
+
+class NSEC3(dns.rdata.Rdata):
+    """NSEC3 record
+
+    @ivar algorithm: the hash algorithm number
+    @type algorithm: int
+    @ivar flags: the flags
+    @type flags: int
+    @ivar iterations: the number of iterations
+    @type iterations: int
+    @ivar salt: the salt
+    @type salt: string
+    @ivar next: the next name hash
+    @type next: string
+    @ivar windows: the windowed bitmap list
+    @type windows: list of (window number, string) tuples"""
+
+    __slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
+
+    def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
+                 next, windows):
+        super(NSEC3, self).__init__(rdclass, rdtype)
+        self.algorithm = algorithm
+        self.flags = flags
+        self.iterations = iterations
+        self.salt = salt
+        self.next = next
+        self.windows = windows
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        next = base64.b32encode(self.next).translate(b32_normal_to_hex).lower()
+        if self.salt == '':
+            salt = '-'
+        else:
+            salt = self.salt.encode('hex-codec')
+        text = ''
+        for (window, bitmap) in self.windows:
+            bits = []
+            for i in xrange(0, len(bitmap)):
+                byte = ord(bitmap[i])
+                for j in xrange(0, 8):
+                    if byte & (0x80 >> j):
+                        bits.append(dns.rdatatype.to_text(window * 256 + \
+                                                          i * 8 + j))
+            text += (' ' + ' '.join(bits))
+        return '%u %u %u %s %s%s' % (self.algorithm, self.flags, self.iterations,
+                                     salt, next, text)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        algorithm = tok.get_uint8()
+        flags = tok.get_uint8()
+        iterations = tok.get_uint16()
+        salt = tok.get_string()
+        if salt == '-':
+            salt = ''
+        else:
+            salt = salt.decode('hex-codec')
+        next = tok.get_string().upper().translate(b32_hex_to_normal)
+        next = base64.b32decode(next)
+        rdtypes = []
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            nrdtype = dns.rdatatype.from_text(token.value)
+            if nrdtype == 0:
+                raise dns.exception.SyntaxError("NSEC3 with bit 0")
+            if nrdtype > 65535:
+                raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
+            rdtypes.append(nrdtype)
+        rdtypes.sort()
+        window = 0
+        octets = 0
+        prior_rdtype = 0
+        bitmap = ['\0'] * 32
+        windows = []
+        for nrdtype in rdtypes:
+            if nrdtype == prior_rdtype:
+                continue
+            prior_rdtype = nrdtype
+            new_window = nrdtype // 256
+            if new_window != window:
+                windows.append((window, ''.join(bitmap[0:octets])))
+                bitmap = ['\0'] * 32
+                window = new_window
+            offset = nrdtype % 256
+            byte = offset / 8
+            bit = offset % 8
+            octets = byte + 1
+            bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
+        windows.append((window, ''.join(bitmap[0:octets])))
+        return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.salt)
+        file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+                               self.iterations, l))
+        file.write(self.salt)
+        l = len(self.next)
+        file.write(struct.pack("!B", l))
+        file.write(self.next)
+        for (window, bitmap) in self.windows:
+            file.write(chr(window))
+            file.write(chr(len(bitmap)))
+            file.write(bitmap)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
+                                                             wire[current : current + 5])
+        current += 5
+        rdlen -= 5
+        salt = wire[current : current + slen]
+        current += slen
+        rdlen -= slen
+        (nlen, ) = struct.unpack('!B', wire[current])
+        current += 1
+        rdlen -= 1
+        next = wire[current : current + nlen]
+        current += nlen
+        rdlen -= nlen
+        windows = []
+        while rdlen > 0:
+            if rdlen < 3:
+                raise dns.exception.FormError("NSEC3 too short")
+            window = ord(wire[current])
+            octets = ord(wire[current + 1])
+            if octets == 0 or octets > 32:
+                raise dns.exception.FormError("bad NSEC3 octets")
+            current += 2
+            rdlen -= 2
+            if rdlen < octets:
+                raise dns.exception.FormError("bad NSEC3 bitmap length")
+            bitmap = wire[current : current + octets]
+            current += octets
+            rdlen -= octets
+            windows.append((window, bitmap))
+        return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        b1 = cStringIO.StringIO()
+        self.to_wire(b1)
+        b2 = cStringIO.StringIO()
+        other.to_wire(b2)
+        return cmp(b1.getvalue(), b2.getvalue())
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3PARAM.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3PARAM.py
new file mode 100644
index 0000000..ec91e5e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NSEC3PARAM.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+
+class NSEC3PARAM(dns.rdata.Rdata):
+    """NSEC3PARAM record
+
+    @ivar algorithm: the hash algorithm number
+    @type algorithm: int
+    @ivar flags: the flags
+    @type flags: int
+    @ivar iterations: the number of iterations
+    @type iterations: int
+    @ivar salt: the salt
+    @type salt: string"""
+
+    __slots__ = ['algorithm', 'flags', 'iterations', 'salt']
+
+    def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
+        super(NSEC3PARAM, self).__init__(rdclass, rdtype)
+        self.algorithm = algorithm
+        self.flags = flags
+        self.iterations = iterations
+        self.salt = salt
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        if self.salt == '':
+            salt = '-'
+        else:
+            salt = self.salt.encode('hex-codec')
+        return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        algorithm = tok.get_uint8()
+        flags = tok.get_uint8()
+        iterations = tok.get_uint16()
+        salt = tok.get_string()
+        if salt == '-':
+            salt = ''
+        else:
+            salt = salt.decode('hex-codec')
+        return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.salt)
+        file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+                               self.iterations, l))
+        file.write(self.salt)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
+                                                             wire[current : current + 5])
+        current += 5
+        rdlen -= 5
+        salt = wire[current : current + slen]
+        current += slen
+        rdlen -= slen
+        if rdlen != 0:
+            raise dns.exception.FormError
+        return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        b1 = cStringIO.StringIO()
+        self.to_wire(b1)
+        b2 = cStringIO.StringIO()
+        other.to_wire(b2)
+        return cmp(b1.getvalue(), b2.getvalue())
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NXT.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NXT.py
new file mode 100644
index 0000000..99ae9b9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/NXT.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+
+class NXT(dns.rdata.Rdata):
+    """NXT record
+
+    @ivar next: the next name
+    @type next: dns.name.Name object
+    @ivar bitmap: the type bitmap
+    @type bitmap: string
+    @see: RFC 2535"""
+
+    __slots__ = ['next', 'bitmap']
+
+    def __init__(self, rdclass, rdtype, next, bitmap):
+        super(NXT, self).__init__(rdclass, rdtype)
+        self.next = next
+        self.bitmap = bitmap
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        next = self.next.choose_relativity(origin, relativize)
+        bits = []
+        for i in xrange(0, len(self.bitmap)):
+            byte = ord(self.bitmap[i])
+            for j in xrange(0, 8):
+                if byte & (0x80 >> j):
+                    bits.append(dns.rdatatype.to_text(i * 8 + j))
+        text = ' '.join(bits)
+        return '%s %s' % (next, text)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        next = tok.get_name()
+        next = next.choose_relativity(origin, relativize)
+        bitmap = ['\x00', '\x00', '\x00', '\x00',
+                  '\x00', '\x00', '\x00', '\x00',
+                  '\x00', '\x00', '\x00', '\x00',
+                  '\x00', '\x00', '\x00', '\x00' ]
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            if token.value.isdigit():
+                nrdtype = int(token.value)
+            else:
+                nrdtype = dns.rdatatype.from_text(token.value)
+            if nrdtype == 0:
+                raise dns.exception.SyntaxError("NXT with bit 0")
+            if nrdtype > 127:
+                raise dns.exception.SyntaxError("NXT with bit > 127")
+            i = nrdtype // 8
+            bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (nrdtype % 8)))
+        bitmap = dns.rdata._truncate_bitmap(bitmap)
+        return cls(rdclass, rdtype, next, bitmap)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        self.next.to_wire(file, None, origin)
+        file.write(self.bitmap)
+
+    def to_digestable(self, origin = None):
+        return self.next.to_digestable(origin) + self.bitmap
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+        current += cused
+        rdlen -= cused
+        bitmap = wire[current : current + rdlen]
+        if not origin is None:
+            next = next.relativize(origin)
+        return cls(rdclass, rdtype, next, bitmap)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.next = self.next.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        v = cmp(self.next, other.next)
+        if v == 0:
+            v = cmp(self.bitmap, other.bitmap)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/PTR.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/PTR.py
new file mode 100644
index 0000000..6c4b79e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/PTR.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class PTR(dns.rdtypes.nsbase.NSBase):
+    """PTR record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RP.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RP.py
new file mode 100644
index 0000000..421ce8e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RP.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class RP(dns.rdata.Rdata):
+    """RP record
+
+    @ivar mbox: The responsible person's mailbox
+    @type mbox: dns.name.Name object
+    @ivar txt: The owner name of a node with TXT records, or the root name
+    if no TXT records are associated with this RP.
+    @type txt: dns.name.Name object
+    @see: RFC 1183"""
+
+    __slots__ = ['mbox', 'txt']
+
+    def __init__(self, rdclass, rdtype, mbox, txt):
+        super(RP, self).__init__(rdclass, rdtype)
+        self.mbox = mbox
+        self.txt = txt
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        mbox = self.mbox.choose_relativity(origin, relativize)
+        txt = self.txt.choose_relativity(origin, relativize)
+        return "%s %s" % (str(mbox), str(txt))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        mbox = tok.get_name()
+        txt = tok.get_name()
+        mbox = mbox.choose_relativity(origin, relativize)
+        txt = txt.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, mbox, txt)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        self.mbox.to_wire(file, None, origin)
+        self.txt.to_wire(file, None, origin)
+
+    def to_digestable(self, origin = None):
+        return self.mbox.to_digestable(origin) + \
+            self.txt.to_digestable(origin)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (mbox, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                           current)
+        current += cused
+        rdlen -= cused
+        if rdlen <= 0:
+            raise dns.exception.FormError
+        (txt, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                          current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            mbox = mbox.relativize(origin)
+            txt = txt.relativize(origin)
+        return cls(rdclass, rdtype, mbox, txt)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.mbox = self.mbox.choose_relativity(origin, relativize)
+        self.txt = self.txt.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        v = cmp(self.mbox, other.mbox)
+        if v == 0:
+            v = cmp(self.txt, other.txt)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RRSIG.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RRSIG.py
new file mode 100644
index 0000000..0e4816f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RRSIG.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.sigbase
+
+class RRSIG(dns.rdtypes.sigbase.SIGBase):
+    """RRSIG record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RT.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RT.py
new file mode 100644
index 0000000..1efd372
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/RT.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+    """RT record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SIG.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SIG.py
new file mode 100644
index 0000000..501e29c
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SIG.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.sigbase
+
+class SIG(dns.rdtypes.sigbase.SIGBase):
+    """SIG record"""
+    def to_digestable(self, origin = None):
+        return struct.pack('!HBBIIIH', self.type_covered,
+                           self.algorithm, self.labels,
+                           self.original_ttl, self.expiration,
+                           self.inception, self.key_tag) + \
+                           self.signer.to_digestable(origin) + \
+                           self.signature
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SOA.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SOA.py
new file mode 100644
index 0000000..a25a35e
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SOA.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class SOA(dns.rdata.Rdata):
+    """SOA record
+
+    @ivar mname: the SOA MNAME (master name) field
+    @type mname: dns.name.Name object
+    @ivar rname: the SOA RNAME (responsible name) field
+    @type rname: dns.name.Name object
+    @ivar serial: The zone's serial number
+    @type serial: int
+    @ivar refresh: The zone's refresh value (in seconds)
+    @type refresh: int
+    @ivar retry: The zone's retry value (in seconds)
+    @type retry: int
+    @ivar expire: The zone's expiration value (in seconds)
+    @type expire: int
+    @ivar minimum: The zone's negative caching time (in seconds, called
+    "minimum" for historical reasons)
+    @type minimum: int
+    @see: RFC 1035"""
+
+    __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
+                 'minimum']
+    
+    def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
+                 expire, minimum):
+        super(SOA, self).__init__(rdclass, rdtype)
+        self.mname = mname
+        self.rname = rname
+        self.serial = serial
+        self.refresh = refresh
+        self.retry = retry
+        self.expire = expire
+        self.minimum = minimum
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        mname = self.mname.choose_relativity(origin, relativize)
+        rname = self.rname.choose_relativity(origin, relativize)
+        return '%s %s %d %d %d %d %d' % (
+            mname, rname, self.serial, self.refresh, self.retry,
+            self.expire, self.minimum )
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        mname = tok.get_name()
+        rname = tok.get_name()
+        mname = mname.choose_relativity(origin, relativize)
+        rname = rname.choose_relativity(origin, relativize)
+        serial = tok.get_uint32()
+        refresh = tok.get_ttl()
+        retry = tok.get_ttl()
+        expire = tok.get_ttl()
+        minimum = tok.get_ttl()
+        tok.get_eol()
+        return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
+                   expire, minimum )
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        self.mname.to_wire(file, compress, origin)
+        self.rname.to_wire(file, compress, origin)
+        five_ints = struct.pack('!IIIII', self.serial, self.refresh,
+                                self.retry, self.expire, self.minimum)
+        file.write(five_ints)
+
+    def to_digestable(self, origin = None):
+        return self.mname.to_digestable(origin) + \
+            self.rname.to_digestable(origin) + \
+            struct.pack('!IIIII', self.serial, self.refresh,
+                        self.retry, self.expire, self.minimum)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+        current += cused
+        rdlen -= cused
+        (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+        current += cused
+        rdlen -= cused
+        if rdlen != 20:
+            raise dns.exception.FormError
+        five_ints = struct.unpack('!IIIII',
+                                  wire[current : current + rdlen])
+        if not origin is None:
+            mname = mname.relativize(origin)
+            rname = rname.relativize(origin)
+        return cls(rdclass, rdtype, mname, rname,
+                   five_ints[0], five_ints[1], five_ints[2], five_ints[3],
+                   five_ints[4])
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.mname = self.mname.choose_relativity(origin, relativize)
+        self.rname = self.rname.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        v = cmp(self.mname, other.mname)
+        if v == 0:
+            v = cmp(self.rname, other.rname)
+            if v == 0:
+                self_ints = struct.pack('!IIIII', self.serial, self.refresh,
+                                        self.retry, self.expire, self.minimum)
+                other_ints = struct.pack('!IIIII', other.serial, other.refresh,
+                                         other.retry, other.expire,
+                                         other.minimum)
+                v = cmp(self_ints, other_ints)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SPF.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SPF.py
new file mode 100644
index 0000000..9b5a9a9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SPF.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+class SPF(dns.rdtypes.txtbase.TXTBase):
+    """SPF record
+
+    @see: RFC 4408"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SSHFP.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SSHFP.py
new file mode 100644
index 0000000..162dda5
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/SSHFP.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2005-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.rdata
+import dns.rdatatype
+
+class SSHFP(dns.rdata.Rdata):
+    """SSHFP record
+
+    @ivar algorithm: the algorithm
+    @type algorithm: int
+    @ivar fp_type: the digest type
+    @type fp_type: int
+    @ivar fingerprint: the fingerprint
+    @type fingerprint: string
+    @see: draft-ietf-secsh-dns-05.txt"""
+
+    __slots__ = ['algorithm', 'fp_type', 'fingerprint']
+    
+    def __init__(self, rdclass, rdtype, algorithm, fp_type,
+                 fingerprint):
+        super(SSHFP, self).__init__(rdclass, rdtype)
+        self.algorithm = algorithm
+        self.fp_type = fp_type
+        self.fingerprint = fingerprint
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '%d %d %s' % (self.algorithm,
+                             self.fp_type,
+                             dns.rdata._hexify(self.fingerprint,
+                                               chunksize=128))
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        algorithm = tok.get_uint8()
+        fp_type = tok.get_uint8()
+        fingerprint = tok.get_string()
+        fingerprint = fingerprint.decode('hex_codec')
+        tok.get_eol()
+        return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        header = struct.pack("!BB", self.algorithm, self.fp_type)
+        file.write(header)
+        file.write(self.fingerprint)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        header = struct.unpack("!BB", wire[current : current + 2])
+        current += 2
+        rdlen -= 2
+        fingerprint = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, header[0], header[1], fingerprint)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        hs = struct.pack("!BB", self.algorithm, self.fp_type)
+        ho = struct.pack("!BB", other.algorithm, other.fp_type)
+        v = cmp(hs, ho)
+        if v == 0:
+            v = cmp(self.fingerprint, other.fingerprint)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/TXT.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/TXT.py
new file mode 100644
index 0000000..23f4f3b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/TXT.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+class TXT(dns.rdtypes.txtbase.TXTBase):
+    """TXT record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/X25.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/X25.py
new file mode 100644
index 0000000..c3632f7
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/X25.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class X25(dns.rdata.Rdata):
+    """X25 record
+
+    @ivar address: the PSDN address
+    @type address: string
+    @see: RFC 1183"""
+
+    __slots__ = ['address']
+    
+    def __init__(self, rdclass, rdtype, address):
+        super(X25, self).__init__(rdclass, rdtype)
+        self.address = address
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '"%s"' % dns.rdata._escapify(self.address)
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_string()
+        tok.get_eol()
+        return cls(rdclass, rdtype, address)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        l = len(self.address)
+        assert l < 256
+        byte = chr(l)
+        file.write(byte)
+        file.write(self.address)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        l = ord(wire[current])
+        current += 1
+        rdlen -= 1
+        if l != rdlen:
+            raise dns.exception.FormError
+        address = wire[current : current + l]
+        return cls(rdclass, rdtype, address)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        return cmp(self.address, other.address)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/__init__.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/__init__.py
new file mode 100644
index 0000000..0815dd5
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/ANY/__init__.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class ANY (generic) rdata type classes."""
+
+__all__ = [
+    'AFSDB',
+    'CERT',
+    'CNAME',
+    'DLV',
+    'DNAME',
+    'DNSKEY',
+    'DS',
+    'GPOS',
+    'HINFO',
+    'HIP',
+    'ISDN',
+    'KEY',
+    'LOC',
+    'MX',
+    'NS',
+    'NSEC',
+    'NSEC3',
+    'NSEC3PARAM',
+    'NXT',
+    'PTR',
+    'RP',
+    'RRSIG',
+    'RT',
+    'SIG',
+    'SOA',
+    'SPF',
+    'SSHFP',
+    'TXT',
+    'X25',
+]
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/A.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/A.py
new file mode 100644
index 0000000..e05f204
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/A.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.ipv4
+import dns.rdata
+import dns.tokenizer
+
+class A(dns.rdata.Rdata):
+    """A record.
+
+    @ivar address: an IPv4 address
+    @type address: string (in the standard "dotted quad" format)"""
+
+    __slots__ = ['address']
+
+    def __init__(self, rdclass, rdtype, address):
+        super(A, self).__init__(rdclass, rdtype)
+        # check that it's OK
+        junk = dns.ipv4.inet_aton(address)
+        self.address = address
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return self.address
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_identifier()
+        tok.get_eol()
+        return cls(rdclass, rdtype, address)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(dns.ipv4.inet_aton(self.address))
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        address = dns.ipv4.inet_ntoa(wire[current : current + rdlen])
+        return cls(rdclass, rdtype, address)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        sa = dns.ipv4.inet_aton(self.address)
+        oa = dns.ipv4.inet_aton(other.address)
+        return cmp(sa, oa)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/AAAA.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/AAAA.py
new file mode 100644
index 0000000..2d812d3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/AAAA.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+class AAAA(dns.rdata.Rdata):
+    """AAAA record.
+
+    @ivar address: an IPv6 address
+    @type address: string (in the standard IPv6 format)"""
+
+    __slots__ = ['address']
+
+    def __init__(self, rdclass, rdtype, address):
+        super(AAAA, self).__init__(rdclass, rdtype)
+        # check that it's OK
+        junk = dns.inet.inet_pton(dns.inet.AF_INET6, address)
+        self.address = address
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return self.address
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_identifier()
+        tok.get_eol()
+        return cls(rdclass, rdtype, address)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address))
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        address = dns.inet.inet_ntop(dns.inet.AF_INET6,
+                                     wire[current : current + rdlen])
+        return cls(rdclass, rdtype, address)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        sa = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+        oa = dns.inet.inet_pton(dns.inet.AF_INET6, other.address)
+        return cmp(sa, oa)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/APL.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/APL.py
new file mode 100644
index 0000000..7412c02
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/APL.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+class APLItem(object):
+    """An APL list item.
+
+    @ivar family: the address family (IANA address family registry)
+    @type family: int
+    @ivar negation: is this item negated?
+    @type negation: bool
+    @ivar address: the address
+    @type address: string
+    @ivar prefix: the prefix length
+    @type prefix: int
+    """
+
+    __slots__ = ['family', 'negation', 'address', 'prefix']
+
+    def __init__(self, family, negation, address, prefix):
+        self.family = family
+        self.negation = negation
+        self.address = address
+        self.prefix = prefix
+
+    def __str__(self):
+        if self.negation:
+            return "!%d:%s/%s" % (self.family, self.address, self.prefix)
+        else:
+            return "%d:%s/%s" % (self.family, self.address, self.prefix)
+
+    def to_wire(self, file):
+        if self.family == 1:
+            address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
+        elif self.family == 2:
+            address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+        else:
+            address = self.address.decode('hex_codec')
+        #
+        # Truncate least significant zero bytes.
+        #
+        last = 0
+        for i in xrange(len(address) - 1, -1, -1):
+            if address[i] != chr(0):
+                last = i + 1
+                break
+        address = address[0 : last]
+        l = len(address)
+        assert l < 128
+        if self.negation:
+            l |= 0x80
+        header = struct.pack('!HBB', self.family, self.prefix, l)
+        file.write(header)
+        file.write(address)
+
+class APL(dns.rdata.Rdata):
+    """APL record.
+
+    @ivar items: a list of APL items
+    @type items: list of APL_Item
+    @see: RFC 3123"""
+
+    __slots__ = ['items']
+
+    def __init__(self, rdclass, rdtype, items):
+        super(APL, self).__init__(rdclass, rdtype)
+        self.items = items
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return ' '.join(map(lambda x: str(x), self.items))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        items = []
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            item = token.value
+            if item[0] == '!':
+                negation = True
+                item = item[1:]
+            else:
+                negation = False
+            (family, rest) = item.split(':', 1)
+            family = int(family)
+            (address, prefix) = rest.split('/', 1)
+            prefix = int(prefix)
+            item = APLItem(family, negation, address, prefix)
+            items.append(item)
+
+        return cls(rdclass, rdtype, items)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        for item in self.items:
+            item.to_wire(file)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        items = []
+        while 1:
+            if rdlen < 4:
+                raise dns.exception.FormError
+            header = struct.unpack('!HBB', wire[current : current + 4])
+            afdlen = header[2]
+            if afdlen > 127:
+                negation = True
+                afdlen -= 128
+            else:
+                negation = False
+            current += 4
+            rdlen -= 4
+            if rdlen < afdlen:
+                raise dns.exception.FormError
+            address = wire[current : current + afdlen]
+            l = len(address)
+            if header[0] == 1:
+                if l < 4:
+                    address += '\x00' * (4 - l)
+                address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
+            elif header[0] == 2:
+                if l < 16:
+                    address += '\x00' * (16 - l)
+                address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
+            else:
+                #
+                # This isn't really right according to the RFC, but it
+                # seems better than throwing an exception
+                #
+                address = address.encode('hex_codec')
+            current += afdlen
+            rdlen -= afdlen
+            item = APLItem(header[0], negation, address, header[1])
+            items.append(item)
+            if rdlen == 0:
+                break
+        return cls(rdclass, rdtype, items)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        f = cStringIO.StringIO()
+        self.to_wire(f)
+        wire1 = f.getvalue()
+        f.seek(0)
+        f.truncate()
+        other.to_wire(f)
+        wire2 = f.getvalue()
+        f.close()
+
+        return cmp(wire1, wire2)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/DHCID.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/DHCID.py
new file mode 100644
index 0000000..2d35234
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/DHCID.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+
+class DHCID(dns.rdata.Rdata):
+    """DHCID record
+
+    @ivar data: the data (the content of the RR is opaque as far as the
+    DNS is concerned)
+    @type data: string
+    @see: RFC 4701"""
+
+    __slots__ = ['data']
+
+    def __init__(self, rdclass, rdtype, data):
+        super(DHCID, self).__init__(rdclass, rdtype)
+        self.data = data
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return dns.rdata._base64ify(self.data)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        b64 = ''.join(chunks)
+        data = b64.decode('base64_codec')
+        return cls(rdclass, rdtype, data)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(self.data)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        data = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, data)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        return cmp(self.data, other.data)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/IPSECKEY.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/IPSECKEY.py
new file mode 100644
index 0000000..9ab08d8
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/IPSECKEY.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.inet
+import dns.name
+
+class IPSECKEY(dns.rdata.Rdata):
+    """IPSECKEY record
+
+    @ivar precedence: the precedence for this key data
+    @type precedence: int
+    @ivar gateway_type: the gateway type
+    @type gateway_type: int
+    @ivar algorithm: the algorithm to use
+    @type algorithm: int
+    @ivar gateway: the public key
+    @type gateway: None, IPv4 address, IPV6 address, or domain name
+    @ivar key: the public key
+    @type key: string
+    @see: RFC 4025"""
+
+    __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
+
+    def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
+                 gateway, key):
+        super(IPSECKEY, self).__init__(rdclass, rdtype)
+        if gateway_type == 0:
+            if gateway != '.' and not gateway is None:
+                raise SyntaxError('invalid gateway for gateway type 0')
+            gateway = None
+        elif gateway_type == 1:
+            # check that it's OK
+            junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
+        elif gateway_type == 2:
+            # check that it's OK
+            junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
+        elif gateway_type == 3:
+            pass
+        else:
+            raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
+        self.precedence = precedence
+        self.gateway_type = gateway_type
+        self.algorithm = algorithm
+        self.gateway = gateway
+        self.key = key
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        if self.gateway_type == 0:
+            gateway = '.'
+        elif self.gateway_type == 1:
+            gateway = self.gateway
+        elif self.gateway_type == 2:
+            gateway = self.gateway
+        elif self.gateway_type == 3:
+            gateway = str(self.gateway.choose_relativity(origin, relativize))
+        else:
+            raise ValueError('invalid gateway type')
+        return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
+                                   self.algorithm, gateway,
+                                   dns.rdata._base64ify(self.key))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        precedence = tok.get_uint8()
+        gateway_type = tok.get_uint8()
+        algorithm = tok.get_uint8()
+        if gateway_type == 3:
+            gateway = tok.get_name().choose_relativity(origin, relativize)
+        else:
+            gateway = tok.get_string()
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        b64 = ''.join(chunks)
+        key = b64.decode('base64_codec')
+        return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
+                   gateway, key)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        header = struct.pack("!BBB", self.precedence, self.gateway_type,
+                             self.algorithm)
+        file.write(header)
+        if self.gateway_type == 0:
+            pass
+        elif self.gateway_type == 1:
+            file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
+        elif self.gateway_type == 2:
+            file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
+        elif self.gateway_type == 3:
+            self.gateway.to_wire(file, None, origin)
+        else:
+            raise ValueError('invalid gateway type')
+        file.write(self.key)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        if rdlen < 3:
+            raise dns.exception.FormError
+        header = struct.unpack('!BBB', wire[current : current + 3])
+        gateway_type = header[1]
+        current += 3
+        rdlen -= 3
+        if gateway_type == 0:
+            gateway = None
+        elif gateway_type == 1:
+            gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
+                                         wire[current : current + 4])
+            current += 4
+            rdlen -= 4
+        elif gateway_type == 2:
+            gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
+                                         wire[current : current + 16])
+            current += 16
+            rdlen -= 16
+        elif gateway_type == 3:
+            (gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                                  current)
+            current += cused
+            rdlen -= cused
+        else:
+            raise dns.exception.FormError('invalid IPSECKEY gateway type')
+        key = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, header[0], gateway_type, header[2],
+                   gateway, key)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        f = cStringIO.StringIO()
+        self.to_wire(f)
+        wire1 = f.getvalue()
+        f.seek(0)
+        f.truncate()
+        other.to_wire(f)
+        wire2 = f.getvalue()
+        f.close()
+
+        return cmp(wire1, wire2)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/KX.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/KX.py
new file mode 100644
index 0000000..4d8a3a7
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/KX.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class KX(dns.rdtypes.mxbase.UncompressedMX):
+    """KX record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NAPTR.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NAPTR.py
new file mode 100644
index 0000000..a3cca55
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NAPTR.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.name
+import dns.rdata
+
+def _write_string(file, s):
+    l = len(s)
+    assert l < 256
+    byte = chr(l)
+    file.write(byte)
+    file.write(s)
+
+class NAPTR(dns.rdata.Rdata):
+    """NAPTR record
+
+    @ivar order: order
+    @type order: int
+    @ivar preference: preference
+    @type preference: int
+    @ivar flags: flags
+    @type flags: string
+    @ivar service: service
+    @type service: string
+    @ivar regexp: regular expression
+    @type regexp: string
+    @ivar replacement: replacement name
+    @type replacement: dns.name.Name object
+    @see: RFC 3403"""
+
+    __slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
+                 'replacement']
+    
+    def __init__(self, rdclass, rdtype, order, preference, flags, service,
+                 regexp, replacement):
+        super(NAPTR, self).__init__(rdclass, rdtype)
+        self.order = order
+        self.preference = preference
+        self.flags = flags
+        self.service = service
+        self.regexp = regexp
+        self.replacement = replacement
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        replacement = self.replacement.choose_relativity(origin, relativize)
+        return '%d %d "%s" "%s" "%s" %s' % \
+               (self.order, self.preference,
+                dns.rdata._escapify(self.flags),
+                dns.rdata._escapify(self.service),
+                dns.rdata._escapify(self.regexp),
+                self.replacement)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        order = tok.get_uint16()
+        preference = tok.get_uint16()
+        flags = tok.get_string()
+        service = tok.get_string()
+        regexp = tok.get_string()
+        replacement = tok.get_name()
+        replacement = replacement.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, order, preference, flags, service,
+                   regexp, replacement)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        two_ints = struct.pack("!HH", self.order, self.preference)
+        file.write(two_ints)
+        _write_string(file, self.flags)
+        _write_string(file, self.service)
+        _write_string(file, self.regexp)
+        self.replacement.to_wire(file, compress, origin)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (order, preference) = struct.unpack('!HH', wire[current : current + 4])
+        current += 4
+        rdlen -= 4
+        strings = []
+        for i in xrange(3):
+            l = ord(wire[current])
+            current += 1
+            rdlen -= 1
+            if l > rdlen or rdlen < 0:
+                raise dns.exception.FormError
+            s = wire[current : current + l]
+            current += l
+            rdlen -= l
+            strings.append(s)
+        (replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                                  current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            replacement = replacement.relativize(origin)
+        return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
+                   strings[2], replacement)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.replacement = self.replacement.choose_relativity(origin,
+                                                              relativize)
+        
+    def _cmp(self, other):
+        sp = struct.pack("!HH", self.order, self.preference)
+        op = struct.pack("!HH", other.order, other.preference)
+        v = cmp(sp, op)
+        if v == 0:
+            v = cmp(self.flags, other.flags)
+            if v == 0:
+                v = cmp(self.service, other.service)
+                if v == 0:
+                    v = cmp(self.regexp, other.regexp)
+                    if v == 0:
+                        v = cmp(self.replacement, other.replacement)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP.py
new file mode 100644
index 0000000..22b9131
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class NSAP(dns.rdata.Rdata):
+    """NSAP record.
+
+    @ivar address: a NASP
+    @type address: string
+    @see: RFC 1706"""
+
+    __slots__ = ['address']
+
+    def __init__(self, rdclass, rdtype, address):
+        super(NSAP, self).__init__(rdclass, rdtype)
+        self.address = address
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return "0x%s" % self.address.encode('hex_codec')
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_string()
+        t = tok.get_eol()
+        if address[0:2] != '0x':
+            raise dns.exception.SyntaxError('string does not start with 0x')
+        address = address[2:].replace('.', '')
+        if len(address) % 2 != 0:
+            raise dns.exception.SyntaxError('hexstring has odd length')
+        address = address.decode('hex_codec')
+        return cls(rdclass, rdtype, address)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(self.address)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        address = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, address)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        return cmp(self.address, other.address)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP_PTR.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP_PTR.py
new file mode 100644
index 0000000..6f591f4
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/NSAP_PTR.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
+    """NSAP-PTR record"""
+    pass
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/PX.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/PX.py
new file mode 100644
index 0000000..0f11290
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/PX.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class PX(dns.rdata.Rdata):
+    """PX record.
+
+    @ivar preference: the preference value
+    @type preference: int
+    @ivar map822: the map822 name
+    @type map822: dns.name.Name object
+    @ivar mapx400: the mapx400 name
+    @type mapx400: dns.name.Name object
+    @see: RFC 2163"""
+
+    __slots__ = ['preference', 'map822', 'mapx400']
+        
+    def __init__(self, rdclass, rdtype, preference, map822, mapx400):
+        super(PX, self).__init__(rdclass, rdtype)
+        self.preference = preference
+        self.map822 = map822
+        self.mapx400 = mapx400
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        map822 = self.map822.choose_relativity(origin, relativize)
+        mapx400 = self.mapx400.choose_relativity(origin, relativize)
+        return '%d %s %s' % (self.preference, map822, mapx400)
+        
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        preference = tok.get_uint16()
+        map822 = tok.get_name()
+        map822 = map822.choose_relativity(origin, relativize)
+        mapx400 = tok.get_name(None)
+        mapx400 = mapx400.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, preference, map822, mapx400)
+    
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        pref = struct.pack("!H", self.preference)
+        file.write(pref)
+        self.map822.to_wire(file, None, origin)
+        self.mapx400.to_wire(file, None, origin)
+        
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (preference, ) = struct.unpack('!H', wire[current : current + 2])
+        current += 2
+        rdlen -= 2
+        (map822, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                               current)
+        if cused > rdlen:
+            raise dns.exception.FormError
+        current += cused
+        rdlen -= cused
+        if not origin is None:
+            map822 = map822.relativize(origin)
+        (mapx400, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                              current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            mapx400 = mapx400.relativize(origin)
+        return cls(rdclass, rdtype, preference, map822, mapx400)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.map822 = self.map822.choose_relativity(origin, relativize)
+        self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        sp = struct.pack("!H", self.preference)
+        op = struct.pack("!H", other.preference)
+        v = cmp(sp, op)
+        if v == 0:
+            v = cmp(self.map822, other.map822)
+            if v == 0:
+                v = cmp(self.mapx400, other.mapx400)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/SRV.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/SRV.py
new file mode 100644
index 0000000..c9c5823
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/SRV.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class SRV(dns.rdata.Rdata):
+    """SRV record
+
+    @ivar priority: the priority
+    @type priority: int
+    @ivar weight: the weight
+    @type weight: int
+    @ivar port: the port of the service
+    @type port: int
+    @ivar target: the target host
+    @type target: dns.name.Name object
+    @see: RFC 2782"""
+
+    __slots__ = ['priority', 'weight', 'port', 'target']
+
+    def __init__(self, rdclass, rdtype, priority, weight, port, target):
+        super(SRV, self).__init__(rdclass, rdtype)
+        self.priority = priority
+        self.weight = weight
+        self.port = port
+        self.target = target
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        target = self.target.choose_relativity(origin, relativize)
+        return '%d %d %d %s' % (self.priority, self.weight, self.port,
+                                target)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        priority = tok.get_uint16()
+        weight = tok.get_uint16()
+        port = tok.get_uint16()
+        target = tok.get_name(None)
+        target = target.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, priority, weight, port, target)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
+        file.write(three_ints)
+        self.target.to_wire(file, compress, origin)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (priority, weight, port) = struct.unpack('!HHH',
+                                                 wire[current : current + 6])
+        current += 6
+        rdlen -= 6
+        (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                             current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            target = target.relativize(origin)
+        return cls(rdclass, rdtype, priority, weight, port, target)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.target = self.target.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        sp = struct.pack("!HHH", self.priority, self.weight, self.port)
+        op = struct.pack("!HHH", other.priority, other.weight, other.port)
+        v = cmp(sp, op)
+        if v == 0:
+            v = cmp(self.target, other.target)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/WKS.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/WKS.py
new file mode 100644
index 0000000..85aafb3
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/WKS.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+
+import dns.ipv4
+import dns.rdata
+
+_proto_tcp = socket.getprotobyname('tcp')
+_proto_udp = socket.getprotobyname('udp')
+
+class WKS(dns.rdata.Rdata):
+    """WKS record
+
+    @ivar address: the address
+    @type address: string
+    @ivar protocol: the protocol
+    @type protocol: int
+    @ivar bitmap: the bitmap
+    @type bitmap: string
+    @see: RFC 1035"""
+
+    __slots__ = ['address', 'protocol', 'bitmap']
+
+    def __init__(self, rdclass, rdtype, address, protocol, bitmap):
+        super(WKS, self).__init__(rdclass, rdtype)
+        self.address = address
+        self.protocol = protocol
+        self.bitmap = bitmap
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        bits = []
+        for i in xrange(0, len(self.bitmap)):
+            byte = ord(self.bitmap[i])
+            for j in xrange(0, 8):
+                if byte & (0x80 >> j):
+                    bits.append(str(i * 8 + j))
+        text = ' '.join(bits)
+        return '%s %d %s' % (self.address, self.protocol, text)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        address = tok.get_string()
+        protocol = tok.get_string()
+        if protocol.isdigit():
+            protocol = int(protocol)
+        else:
+            protocol = socket.getprotobyname(protocol)
+        bitmap = []
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            if token.value.isdigit():
+                serv = int(token.value)
+            else:
+                if protocol != _proto_udp and protocol != _proto_tcp:
+                    raise NotImplementedError("protocol must be TCP or UDP")
+                if protocol == _proto_udp:
+                    protocol_text = "udp"
+                else:
+                    protocol_text = "tcp"
+                serv = socket.getservbyname(token.value, protocol_text)
+            i = serv // 8
+            l = len(bitmap)
+            if l < i + 1:
+                for j in xrange(l, i + 1):
+                    bitmap.append('\x00')
+            bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (serv % 8)))
+        bitmap = dns.rdata._truncate_bitmap(bitmap)
+        return cls(rdclass, rdtype, address, protocol, bitmap)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        file.write(dns.ipv4.inet_aton(self.address))
+        protocol = struct.pack('!B', self.protocol)
+        file.write(protocol)
+        file.write(self.bitmap)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        address = dns.ipv4.inet_ntoa(wire[current : current + 4])
+        protocol, = struct.unpack('!B', wire[current + 4 : current + 5])
+        current += 5
+        rdlen -= 5
+        bitmap = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, address, protocol, bitmap)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        sa = dns.ipv4.inet_aton(self.address)
+        oa = dns.ipv4.inet_aton(other.address)
+        v = cmp(sa, oa)
+        if v == 0:
+            sp = struct.pack('!B', self.protocol)
+            op = struct.pack('!B', other.protocol)
+            v = cmp(sp, op)
+            if v == 0:
+                v = cmp(self.bitmap, other.bitmap)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/__init__.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/__init__.py
new file mode 100644
index 0000000..ab93129
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/IN/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class IN rdata type classes."""
+
+__all__ = [
+    'A',
+    'AAAA',
+    'APL',
+    'DHCID',
+    'KX',
+    'NAPTR',
+    'NSAP',
+    'NSAP_PTR',
+    'PX',
+    'SRV',
+    'WKS',
+]
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/__init__.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/__init__.py
new file mode 100644
index 0000000..13282be
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata type classes"""
+
+__all__ = [
+    'ANY',
+    'IN',
+    'mxbase',
+    'nsbase',
+    'sigbase',
+    'keybase',
+]
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/dsbase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/dsbase.py
new file mode 100644
index 0000000..aa46403
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/dsbase.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.rdata
+import dns.rdatatype
+
+class DSBase(dns.rdata.Rdata):
+    """Base class for rdata that is like a DS record
+
+    @ivar key_tag: the key tag
+    @type key_tag: int
+    @ivar algorithm: the algorithm
+    @type algorithm: int
+    @ivar digest_type: the digest type
+    @type digest_type: int
+    @ivar digest: the digest
+    @type digest: int
+    @see: draft-ietf-dnsext-delegation-signer-14.txt"""
+
+    __slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest']
+
+    def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type,
+                 digest):
+        super(DSBase, self).__init__(rdclass, rdtype)
+        self.key_tag = key_tag
+        self.algorithm = algorithm
+        self.digest_type = digest_type
+        self.digest = digest
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '%d %d %d %s' % (self.key_tag, self.algorithm,
+                                self.digest_type,
+                                dns.rdata._hexify(self.digest,
+                                                  chunksize=128))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        key_tag = tok.get_uint16()
+        algorithm = tok.get_uint8()
+        digest_type = tok.get_uint8()
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        digest = ''.join(chunks)
+        digest = digest.decode('hex_codec')
+        return cls(rdclass, rdtype, key_tag, algorithm, digest_type,
+                   digest)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        header = struct.pack("!HBB", self.key_tag, self.algorithm,
+                             self.digest_type)
+        file.write(header)
+        file.write(self.digest)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        header = struct.unpack("!HBB", wire[current : current + 4])
+        current += 4
+        rdlen -= 4
+        digest = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        hs = struct.pack("!HBB", self.key_tag, self.algorithm,
+                         self.digest_type)
+        ho = struct.pack("!HBB", other.key_tag, other.algorithm,
+                         other.digest_type)
+        v = cmp(hs, ho)
+        if v == 0:
+            v = cmp(self.digest, other.digest)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/keybase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/keybase.py
new file mode 100644
index 0000000..75c9272
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/keybase.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+
+_flags_from_text = {
+    'NOCONF': (0x4000, 0xC000),
+    'NOAUTH': (0x8000, 0xC000),
+    'NOKEY': (0xC000, 0xC000),
+    'FLAG2': (0x2000, 0x2000),
+    'EXTEND': (0x1000, 0x1000),
+    'FLAG4': (0x0800, 0x0800),
+    'FLAG5': (0x0400, 0x0400),
+    'USER': (0x0000, 0x0300),
+    'ZONE': (0x0100, 0x0300),
+    'HOST': (0x0200, 0x0300),
+    'NTYP3': (0x0300, 0x0300),
+    'FLAG8': (0x0080, 0x0080),
+    'FLAG9': (0x0040, 0x0040),
+    'FLAG10': (0x0020, 0x0020),
+    'FLAG11': (0x0010, 0x0010),
+    'SIG0': (0x0000, 0x000f),
+    'SIG1': (0x0001, 0x000f),
+    'SIG2': (0x0002, 0x000f),
+    'SIG3': (0x0003, 0x000f),
+    'SIG4': (0x0004, 0x000f),
+    'SIG5': (0x0005, 0x000f),
+    'SIG6': (0x0006, 0x000f),
+    'SIG7': (0x0007, 0x000f),
+    'SIG8': (0x0008, 0x000f),
+    'SIG9': (0x0009, 0x000f),
+    'SIG10': (0x000a, 0x000f),
+    'SIG11': (0x000b, 0x000f),
+    'SIG12': (0x000c, 0x000f),
+    'SIG13': (0x000d, 0x000f),
+    'SIG14': (0x000e, 0x000f),
+    'SIG15': (0x000f, 0x000f),
+    }
+
+_protocol_from_text = {
+    'NONE' : 0,
+    'TLS' : 1,
+    'EMAIL' : 2,
+    'DNSSEC' : 3,
+    'IPSEC' : 4,
+    'ALL' : 255,
+    }
+
+class KEYBase(dns.rdata.Rdata):
+    """KEY-like record base
+
+    @ivar flags: the key flags
+    @type flags: int
+    @ivar protocol: the protocol for which this key may be used
+    @type protocol: int
+    @ivar algorithm: the algorithm used for the key
+    @type algorithm: int
+    @ivar key: the public key
+    @type key: string"""
+
+    __slots__ = ['flags', 'protocol', 'algorithm', 'key']
+
+    def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
+        super(KEYBase, self).__init__(rdclass, rdtype)
+        self.flags = flags
+        self.protocol = protocol
+        self.algorithm = algorithm
+        self.key = key
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm,
+                                dns.rdata._base64ify(self.key))
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        flags = tok.get_string()
+        if flags.isdigit():
+            flags = int(flags)
+        else:
+            flag_names = flags.split('|')
+            flags = 0
+            for flag in flag_names:
+                v = _flags_from_text.get(flag)
+                if v is None:
+                    raise dns.exception.SyntaxError('unknown flag %s' % flag)
+                flags &= ~v[1]
+                flags |= v[0]
+        protocol = tok.get_string()
+        if protocol.isdigit():
+            protocol = int(protocol)
+        else:
+            protocol = _protocol_from_text.get(protocol)
+            if protocol is None:
+                raise dns.exception.SyntaxError('unknown protocol %s' % protocol)
+
+        algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        b64 = ''.join(chunks)
+        key = b64.decode('base64_codec')
+        return cls(rdclass, rdtype, flags, protocol, algorithm, key)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+        file.write(header)
+        file.write(self.key)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        if rdlen < 4:
+            raise dns.exception.FormError
+        header = struct.unpack('!HBB', wire[current : current + 4])
+        current += 4
+        rdlen -= 4
+        key = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, header[0], header[1], header[2],
+                   key)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        hs = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+        ho = struct.pack("!HBB", other.flags, other.protocol, other.algorithm)
+        v = cmp(hs, ho)
+        if v == 0:
+            v = cmp(self.key, other.key)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/mxbase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/mxbase.py
new file mode 100644
index 0000000..5e3515b
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/mxbase.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""MX-like base classes."""
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class MXBase(dns.rdata.Rdata):
+    """Base class for rdata that is like an MX record.
+
+    @ivar preference: the preference value
+    @type preference: int
+    @ivar exchange: the exchange name
+    @type exchange: dns.name.Name object"""
+
+    __slots__ = ['preference', 'exchange']
+
+    def __init__(self, rdclass, rdtype, preference, exchange):
+        super(MXBase, self).__init__(rdclass, rdtype)
+        self.preference = preference
+        self.exchange = exchange
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        exchange = self.exchange.choose_relativity(origin, relativize)
+        return '%d %s' % (self.preference, exchange)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        preference = tok.get_uint16()
+        exchange = tok.get_name()
+        exchange = exchange.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, preference, exchange)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        pref = struct.pack("!H", self.preference)
+        file.write(pref)
+        self.exchange.to_wire(file, compress, origin)
+
+    def to_digestable(self, origin = None):
+        return struct.pack("!H", self.preference) + \
+            self.exchange.to_digestable(origin)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (preference, ) = struct.unpack('!H', wire[current : current + 2])
+        current += 2
+        rdlen -= 2
+        (exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                               current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            exchange = exchange.relativize(origin)
+        return cls(rdclass, rdtype, preference, exchange)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.exchange = self.exchange.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        sp = struct.pack("!H", self.preference)
+        op = struct.pack("!H", other.preference)
+        v = cmp(sp, op)
+        if v == 0:
+            v = cmp(self.exchange, other.exchange)
+        return v
+
+class UncompressedMX(MXBase):
+    """Base class for rdata that is like an MX record, but whose name
+    is not compressed when converted to DNS wire format, and whose
+    digestable form is not downcased."""
+
+    def to_wire(self, file, compress = None, origin = None):
+        super(UncompressedMX, self).to_wire(file, None, origin)
+
+    def to_digestable(self, origin = None):
+        f = cStringIO.StringIO()
+        self.to_wire(f, None, origin)
+        return f.getvalue()
+
+class UncompressedDowncasingMX(MXBase):
+    """Base class for rdata that is like an MX record, but whose name
+    is not compressed when convert to DNS wire format."""
+
+    def to_wire(self, file, compress = None, origin = None):
+        super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/nsbase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/nsbase.py
new file mode 100644
index 0000000..7cdb2a0
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/nsbase.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""NS-like base classes."""
+
+import cStringIO
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class NSBase(dns.rdata.Rdata):
+    """Base class for rdata that is like an NS record.
+
+    @ivar target: the target name of the rdata
+    @type target: dns.name.Name object"""
+
+    __slots__ = ['target']
+
+    def __init__(self, rdclass, rdtype, target):
+        super(NSBase, self).__init__(rdclass, rdtype)
+        self.target = target
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        target = self.target.choose_relativity(origin, relativize)
+        return str(target)
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        target = tok.get_name()
+        target = target.choose_relativity(origin, relativize)
+        tok.get_eol()
+        return cls(rdclass, rdtype, target)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        self.target.to_wire(file, compress, origin)
+
+    def to_digestable(self, origin = None):
+        return self.target.to_digestable(origin)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+                                             current)
+        if cused != rdlen:
+            raise dns.exception.FormError
+        if not origin is None:
+            target = target.relativize(origin)
+        return cls(rdclass, rdtype, target)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.target = self.target.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        return cmp(self.target, other.target)
+
+class UncompressedNS(NSBase):
+    """Base class for rdata that is like an NS record, but whose name
+    is not compressed when convert to DNS wire format, and whose
+    digestable form is not downcased."""
+
+    def to_wire(self, file, compress = None, origin = None):
+        super(UncompressedNS, self).to_wire(file, None, origin)
+
+    def to_digestable(self, origin = None):
+        f = cStringIO.StringIO()
+        self.to_wire(f, None, origin)
+        return f.getvalue()
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/sigbase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/sigbase.py
new file mode 100644
index 0000000..ccb6dd6
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/sigbase.py
@@ -0,0 +1,168 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import calendar
+import struct
+import time
+
+import dns.dnssec
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+class BadSigTime(dns.exception.DNSException):
+    """Raised when a SIG or RRSIG RR's time cannot be parsed."""
+    pass
+
+def sigtime_to_posixtime(what):
+    if len(what) != 14:
+        raise BadSigTime
+    year = int(what[0:4])
+    month = int(what[4:6])
+    day = int(what[6:8])
+    hour = int(what[8:10])
+    minute = int(what[10:12])
+    second = int(what[12:14])
+    return calendar.timegm((year, month, day, hour, minute, second,
+                            0, 0, 0))
+
+def posixtime_to_sigtime(what):
+    return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
+
+class SIGBase(dns.rdata.Rdata):
+    """SIG-like record base
+
+    @ivar type_covered: the rdata type this signature covers
+    @type type_covered: int
+    @ivar algorithm: the algorithm used for the sig
+    @type algorithm: int
+    @ivar labels: number of labels
+    @type labels: int
+    @ivar original_ttl: the original TTL
+    @type original_ttl: long
+    @ivar expiration: signature expiration time
+    @type expiration: long
+    @ivar inception: signature inception time
+    @type inception: long
+    @ivar key_tag: the key tag
+    @type key_tag: int
+    @ivar signer: the signer
+    @type signer: dns.name.Name object
+    @ivar signature: the signature
+    @type signature: string"""
+
+    __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
+                 'expiration', 'inception', 'key_tag', 'signer',
+                 'signature']
+
+    def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
+                 original_ttl, expiration, inception, key_tag, signer,
+                 signature):
+        super(SIGBase, self).__init__(rdclass, rdtype)
+        self.type_covered = type_covered
+        self.algorithm = algorithm
+        self.labels = labels
+        self.original_ttl = original_ttl
+        self.expiration = expiration
+        self.inception = inception
+        self.key_tag = key_tag
+        self.signer = signer
+        self.signature = signature
+
+    def covers(self):
+        return self.type_covered
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        return '%s %d %d %d %s %s %d %s %s' % (
+            dns.rdatatype.to_text(self.type_covered),
+            self.algorithm,
+            self.labels,
+            self.original_ttl,
+            posixtime_to_sigtime(self.expiration),
+            posixtime_to_sigtime(self.inception),
+            self.key_tag,
+            self.signer,
+            dns.rdata._base64ify(self.signature)
+            )
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        type_covered = dns.rdatatype.from_text(tok.get_string())
+        algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+        labels = tok.get_int()
+        original_ttl = tok.get_ttl()
+        expiration = sigtime_to_posixtime(tok.get_string())
+        inception = sigtime_to_posixtime(tok.get_string())
+        key_tag = tok.get_int()
+        signer = tok.get_name()
+        signer = signer.choose_relativity(origin, relativize)
+        chunks = []
+        while 1:
+            t = tok.get().unescape()
+            if t.is_eol_or_eof():
+                break
+            if not t.is_identifier():
+                raise dns.exception.SyntaxError
+            chunks.append(t.value)
+        b64 = ''.join(chunks)
+        signature = b64.decode('base64_codec')
+        return cls(rdclass, rdtype, type_covered, algorithm, labels,
+                   original_ttl, expiration, inception, key_tag, signer,
+                   signature)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        header = struct.pack('!HBBIIIH', self.type_covered,
+                             self.algorithm, self.labels,
+                             self.original_ttl, self.expiration,
+                             self.inception, self.key_tag)
+        file.write(header)
+        self.signer.to_wire(file, None, origin)
+        file.write(self.signature)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        header = struct.unpack('!HBBIIIH', wire[current : current + 18])
+        current += 18
+        rdlen -= 18
+        (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+        current += cused
+        rdlen -= cused
+        if not origin is None:
+            signer = signer.relativize(origin)
+        signature = wire[current : current + rdlen]
+        return cls(rdclass, rdtype, header[0], header[1], header[2],
+                   header[3], header[4], header[5], header[6], signer,
+                   signature)
+
+    from_wire = classmethod(from_wire)
+
+    def choose_relativity(self, origin = None, relativize = True):
+        self.signer = self.signer.choose_relativity(origin, relativize)
+
+    def _cmp(self, other):
+        hs = struct.pack('!HBBIIIH', self.type_covered,
+                         self.algorithm, self.labels,
+                         self.original_ttl, self.expiration,
+                         self.inception, self.key_tag)
+        ho = struct.pack('!HBBIIIH', other.type_covered,
+                         other.algorithm, other.labels,
+                         other.original_ttl, other.expiration,
+                         other.inception, other.key_tag)
+        v = cmp(hs, ho)
+        if v == 0:
+            v = cmp(self.signer, other.signer)
+            if v == 0:
+                v = cmp(self.signature, other.signature)
+        return v
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/txtbase.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/txtbase.py
new file mode 100644
index 0000000..43db2a4
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/txtbase.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""TXT-like base class."""
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class TXTBase(dns.rdata.Rdata):
+    """Base class for rdata that is like a TXT record
+
+    @ivar strings: the text strings
+    @type strings: list of string
+    @see: RFC 1035"""
+
+    __slots__ = ['strings']
+
+    def __init__(self, rdclass, rdtype, strings):
+        super(TXTBase, self).__init__(rdclass, rdtype)
+        if isinstance(strings, str):
+            strings = [ strings ]
+        self.strings = strings[:]
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        txt = ''
+        prefix = ''
+        for s in self.strings:
+            txt += '%s"%s"' % (prefix, dns.rdata._escapify(s))
+            prefix = ' '
+        return txt
+
+    def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+        strings = []
+        while 1:
+            token = tok.get().unescape()
+            if token.is_eol_or_eof():
+                break
+            if not (token.is_quoted_string() or token.is_identifier()):
+                raise dns.exception.SyntaxError("expected a string")
+            if len(token.value) > 255:
+                raise dns.exception.SyntaxError("string too long")
+            strings.append(token.value)
+        if len(strings) == 0:
+            raise dns.exception.UnexpectedEnd
+        return cls(rdclass, rdtype, strings)
+
+    from_text = classmethod(from_text)
+
+    def to_wire(self, file, compress = None, origin = None):
+        for s in self.strings:
+            l = len(s)
+            assert l < 256
+            byte = chr(l)
+            file.write(byte)
+            file.write(s)
+
+    def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+        strings = []
+        while rdlen > 0:
+            l = ord(wire[current])
+            current += 1
+            rdlen -= 1
+            if l > rdlen:
+                raise dns.exception.FormError
+            s = wire[current : current + l]
+            current += l
+            rdlen -= l
+            strings.append(s)
+        return cls(rdclass, rdtype, strings)
+
+    from_wire = classmethod(from_wire)
+
+    def _cmp(self, other):
+        return cmp(self.strings, other.strings)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/renderer.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/renderer.py
new file mode 100644
index 0000000..bb0218a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/renderer.py
@@ -0,0 +1,324 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Help for building DNS wire format messages"""
+
+import cStringIO
+import struct
+import random
+import time
+
+import dns.exception
+import dns.tsig
+
+QUESTION = 0
+ANSWER = 1
+AUTHORITY = 2
+ADDITIONAL = 3
+
+class Renderer(object):
+    """Helper class for building DNS wire-format messages.
+
+    Most applications can use the higher-level L{dns.message.Message}
+    class and its to_wire() method to generate wire-format messages.
+    This class is for those applications which need finer control
+    over the generation of messages.
+
+    Typical use::
+
+        r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
+        r.add_question(qname, qtype, qclass)
+        r.add_rrset(dns.renderer.ANSWER, rrset_1)
+        r.add_rrset(dns.renderer.ANSWER, rrset_2)
+        r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
+        r.add_edns(0, 0, 4096)
+        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
+        r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
+        r.write_header()
+        r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
+        wire = r.get_wire()
+
+    @ivar output: where rendering is written
+    @type output: cStringIO.StringIO object
+    @ivar id: the message id
+    @type id: int
+    @ivar flags: the message flags
+    @type flags: int
+    @ivar max_size: the maximum size of the message
+    @type max_size: int
+    @ivar origin: the origin to use when rendering relative names
+    @type origin: dns.name.Name object
+    @ivar compress: the compression table
+    @type compress: dict
+    @ivar section: the section currently being rendered
+    @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
+    dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
+    @ivar counts: list of the number of RRs in each section
+    @type counts: int list of length 4
+    @ivar mac: the MAC of the rendered message (if TSIG was used)
+    @type mac: string
+    """
+
+    def __init__(self, id=None, flags=0, max_size=65535, origin=None):
+        """Initialize a new renderer.
+
+        @param id: the message id
+        @type id: int
+        @param flags: the DNS message flags
+        @type flags: int
+        @param max_size: the maximum message size; the default is 65535.
+        If rendering results in a message greater than I{max_size},
+        then L{dns.exception.TooBig} will be raised.
+        @type max_size: int
+        @param origin: the origin to use when rendering relative names
+        @type origin: dns.name.Namem or None.
+        """
+
+        self.output = cStringIO.StringIO()
+        if id is None:
+            self.id = random.randint(0, 65535)
+        else:
+            self.id = id
+        self.flags = flags
+        self.max_size = max_size
+        self.origin = origin
+        self.compress = {}
+        self.section = QUESTION
+        self.counts = [0, 0, 0, 0]
+        self.output.write('\x00' * 12)
+        self.mac = ''
+
+    def _rollback(self, where):
+        """Truncate the output buffer at offset I{where}, and remove any
+        compression table entries that pointed beyond the truncation
+        point.
+
+        @param where: the offset
+        @type where: int
+        """
+
+        self.output.seek(where)
+        self.output.truncate()
+        keys_to_delete = []
+        for k, v in self.compress.iteritems():
+            if v >= where:
+                keys_to_delete.append(k)
+        for k in keys_to_delete:
+            del self.compress[k]
+
+    def _set_section(self, section):
+        """Set the renderer's current section.
+
+        Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
+        ADDITIONAL.  Sections may be empty.
+
+        @param section: the section
+        @type section: int
+        @raises dns.exception.FormError: an attempt was made to set
+        a section value less than the current section.
+        """
+
+        if self.section != section:
+            if self.section > section:
+                raise dns.exception.FormError
+            self.section = section
+
+    def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
+        """Add a question to the message.
+
+        @param qname: the question name
+        @type qname: dns.name.Name
+        @param rdtype: the question rdata type
+        @type rdtype: int
+        @param rdclass: the question rdata class
+        @type rdclass: int
+        """
+
+        self._set_section(QUESTION)
+        before = self.output.tell()
+        qname.to_wire(self.output, self.compress, self.origin)
+        self.output.write(struct.pack("!HH", rdtype, rdclass))
+        after = self.output.tell()
+        if after >= self.max_size:
+            self._rollback(before)
+            raise dns.exception.TooBig
+        self.counts[QUESTION] += 1
+
+    def add_rrset(self, section, rrset, **kw):
+        """Add the rrset to the specified section.
+
+        Any keyword arguments are passed on to the rdataset's to_wire()
+        routine.
+
+        @param section: the section
+        @type section: int
+        @param rrset: the rrset
+        @type rrset: dns.rrset.RRset object
+        """
+
+        self._set_section(section)
+        before = self.output.tell()
+        n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
+        after = self.output.tell()
+        if after >= self.max_size:
+            self._rollback(before)
+            raise dns.exception.TooBig
+        self.counts[section] += n
+
+    def add_rdataset(self, section, name, rdataset, **kw):
+        """Add the rdataset to the specified section, using the specified
+        name as the owner name.
+
+        Any keyword arguments are passed on to the rdataset's to_wire()
+        routine.
+
+        @param section: the section
+        @type section: int
+        @param name: the owner name
+        @type name: dns.name.Name object
+        @param rdataset: the rdataset
+        @type rdataset: dns.rdataset.Rdataset object
+        """
+
+        self._set_section(section)
+        before = self.output.tell()
+        n = rdataset.to_wire(name, self.output, self.compress, self.origin,
+                             **kw)
+        after = self.output.tell()
+        if after >= self.max_size:
+            self._rollback(before)
+            raise dns.exception.TooBig
+        self.counts[section] += n
+
+    def add_edns(self, edns, ednsflags, payload, options=None):
+        """Add an EDNS OPT record to the message.
+
+        @param edns: The EDNS level to use.
+        @type edns: int
+        @param ednsflags: EDNS flag values.
+        @type ednsflags: int
+        @param payload: The EDNS sender's payload field, which is the maximum
+        size of UDP datagram the sender can handle.
+        @type payload: int
+        @param options: The EDNS options list
+        @type options: list of dns.edns.Option instances
+        @see: RFC 2671
+        """
+
+        # make sure the EDNS version in ednsflags agrees with edns
+        ednsflags &= 0xFF00FFFFL
+        ednsflags |= (edns << 16)
+        self._set_section(ADDITIONAL)
+        before = self.output.tell()
+        self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload,
+                                      ednsflags, 0))
+        if not options is None:
+            lstart = self.output.tell()
+            for opt in options:
+                stuff = struct.pack("!HH", opt.otype, 0)
+                self.output.write(stuff)
+                start = self.output.tell()
+                opt.to_wire(self.output)
+                end = self.output.tell()
+                assert end - start < 65536
+                self.output.seek(start - 2)
+                stuff = struct.pack("!H", end - start)
+                self.output.write(stuff)
+                self.output.seek(0, 2)
+            lend = self.output.tell()
+            assert lend - lstart < 65536
+            self.output.seek(lstart - 2)
+            stuff = struct.pack("!H", lend - lstart)
+            self.output.write(stuff)
+            self.output.seek(0, 2)
+        after = self.output.tell()
+        if after >= self.max_size:
+            self._rollback(before)
+            raise dns.exception.TooBig
+        self.counts[ADDITIONAL] += 1
+
+    def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
+                 request_mac, algorithm=dns.tsig.default_algorithm):
+        """Add a TSIG signature to the message.
+
+        @param keyname: the TSIG key name
+        @type keyname: dns.name.Name object
+        @param secret: the secret to use
+        @type secret: string
+        @param fudge: TSIG time fudge
+        @type fudge: int
+        @param id: the message id to encode in the tsig signature
+        @type id: int
+        @param tsig_error: TSIG error code; default is 0.
+        @type tsig_error: int
+        @param other_data: TSIG other data.
+        @type other_data: string
+        @param request_mac: This message is a response to the request which
+        had the specified MAC.
+        @param algorithm: the TSIG algorithm to use
+        @type request_mac: string
+        """
+
+        self._set_section(ADDITIONAL)
+        before = self.output.tell()
+        s = self.output.getvalue()
+        (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
+                                                    keyname,
+                                                    secret,
+                                                    int(time.time()),
+                                                    fudge,
+                                                    id,
+                                                    tsig_error,
+                                                    other_data,
+                                                    request_mac,
+                                                    algorithm=algorithm)
+        keyname.to_wire(self.output, self.compress, self.origin)
+        self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG,
+                                      dns.rdataclass.ANY, 0, 0))
+        rdata_start = self.output.tell()
+        self.output.write(tsig_rdata)
+        after = self.output.tell()
+        assert after - rdata_start < 65536
+        if after >= self.max_size:
+            self._rollback(before)
+            raise dns.exception.TooBig
+        self.output.seek(rdata_start - 2)
+        self.output.write(struct.pack('!H', after - rdata_start))
+        self.counts[ADDITIONAL] += 1
+        self.output.seek(10)
+        self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
+        self.output.seek(0, 2)
+
+    def write_header(self):
+        """Write the DNS message header.
+
+        Writing the DNS message header is done asfter all sections
+        have been rendered, but before the optional TSIG signature
+        is added.
+        """
+
+        self.output.seek(0)
+        self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
+                                      self.counts[0], self.counts[1],
+                                      self.counts[2], self.counts[3]))
+        self.output.seek(0, 2)
+
+    def get_wire(self):
+        """Return the wire format message.
+
+        @rtype: string
+        """
+
+        return self.output.getvalue()
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/resolver.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/resolver.py
new file mode 100644
index 0000000..372d7d8
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/resolver.py
@@ -0,0 +1,761 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS stub resolver.
+
+@var default_resolver: The default resolver object
+@type default_resolver: dns.resolver.Resolver object"""
+
+import socket
+import sys
+import time
+
+import dns.exception
+import dns.message
+import dns.name
+import dns.query
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+
+if sys.platform == 'win32':
+    import _winreg
+
+class NXDOMAIN(dns.exception.DNSException):
+    """The query name does not exist."""
+    pass
+
+# The definition of the Timeout exception has moved from here to the
+# dns.exception module.  We keep dns.resolver.Timeout defined for
+# backwards compatibility.
+
+Timeout = dns.exception.Timeout
+
+class NoAnswer(dns.exception.DNSException):
+    """The response did not contain an answer to the question."""
+    pass
+
+class NoNameservers(dns.exception.DNSException):
+    """No non-broken nameservers are available to answer the query."""
+    pass
+
+class NotAbsolute(dns.exception.DNSException):
+    """Raised if an absolute domain name is required but a relative name
+    was provided."""
+    pass
+
+class NoRootSOA(dns.exception.DNSException):
+    """Raised if for some reason there is no SOA at the root name.
+    This should never happen!"""
+    pass
+
+
+class Answer(object):
+    """DNS stub resolver answer
+
+    Instances of this class bundle up the result of a successful DNS
+    resolution.
+
+    For convenience, the answer object implements much of the sequence
+    protocol, forwarding to its rrset.  E.g. "for a in answer" is
+    equivalent to "for a in answer.rrset", "answer[i]" is equivalent
+    to "answer.rrset[i]", and "answer[i:j]" is equivalent to
+    "answer.rrset[i:j]".
+
+    Note that CNAMEs or DNAMEs in the response may mean that answer
+    node's name might not be the query name.
+
+    @ivar qname: The query name
+    @type qname: dns.name.Name object
+    @ivar rdtype: The query type
+    @type rdtype: int
+    @ivar rdclass: The query class
+    @type rdclass: int
+    @ivar response: The response message
+    @type response: dns.message.Message object
+    @ivar rrset: The answer
+    @type rrset: dns.rrset.RRset object
+    @ivar expiration: The time when the answer expires
+    @type expiration: float (seconds since the epoch)
+    """
+    def __init__(self, qname, rdtype, rdclass, response):
+        self.qname = qname
+        self.rdtype = rdtype
+        self.rdclass = rdclass
+        self.response = response
+        min_ttl = -1
+        rrset = None
+        for count in xrange(0, 15):
+            try:
+                rrset = response.find_rrset(response.answer, qname,
+                                            rdclass, rdtype)
+                if min_ttl == -1 or rrset.ttl < min_ttl:
+                    min_ttl = rrset.ttl
+                break
+            except KeyError:
+                if rdtype != dns.rdatatype.CNAME:
+                    try:
+                        crrset = response.find_rrset(response.answer,
+                                                     qname,
+                                                     rdclass,
+                                                     dns.rdatatype.CNAME)
+                        if min_ttl == -1 or crrset.ttl < min_ttl:
+                            min_ttl = crrset.ttl
+                        for rd in crrset:
+                            qname = rd.target
+                            break
+                        continue
+                    except KeyError:
+                        raise NoAnswer
+                raise NoAnswer
+        if rrset is None:
+            raise NoAnswer
+        self.rrset = rrset
+        self.expiration = time.time() + min_ttl
+
+    def __getattr__(self, attr):
+        if attr == 'name':
+            return self.rrset.name
+        elif attr == 'ttl':
+            return self.rrset.ttl
+        elif attr == 'covers':
+            return self.rrset.covers
+        elif attr == 'rdclass':
+            return self.rrset.rdclass
+        elif attr == 'rdtype':
+            return self.rrset.rdtype
+        else:
+            raise AttributeError(attr)
+
+    def __len__(self):
+        return len(self.rrset)
+
+    def __iter__(self):
+        return iter(self.rrset)
+
+    def __getitem__(self, i):
+        return self.rrset[i]
+
+    def __delitem__(self, i):
+        del self.rrset[i]
+
+    def __getslice__(self, i, j):
+        return self.rrset[i:j]
+
+    def __delslice__(self, i, j):
+        del self.rrset[i:j]
+
+class Cache(object):
+    """Simple DNS answer cache.
+
+    @ivar data: A dictionary of cached data
+    @type data: dict
+    @ivar cleaning_interval: The number of seconds between cleanings.  The
+    default is 300 (5 minutes).
+    @type cleaning_interval: float
+    @ivar next_cleaning: The time the cache should next be cleaned (in seconds
+    since the epoch.)
+    @type next_cleaning: float
+    """
+
+    def __init__(self, cleaning_interval=300.0):
+        """Initialize a DNS cache.
+
+        @param cleaning_interval: the number of seconds between periodic
+        cleanings.  The default is 300.0
+        @type cleaning_interval: float.
+        """
+
+        self.data = {}
+        self.cleaning_interval = cleaning_interval
+        self.next_cleaning = time.time() + self.cleaning_interval
+
+    def maybe_clean(self):
+        """Clean the cache if it's time to do so."""
+
+        now = time.time()
+        if self.next_cleaning <= now:
+            keys_to_delete = []
+            for (k, v) in self.data.iteritems():
+                if v.expiration <= now:
+                    keys_to_delete.append(k)
+            for k in keys_to_delete:
+                del self.data[k]
+            now = time.time()
+            self.next_cleaning = now + self.cleaning_interval
+
+    def get(self, key):
+        """Get the answer associated with I{key}.  Returns None if
+        no answer is cached for the key.
+        @param key: the key
+        @type key: (dns.name.Name, int, int) tuple whose values are the
+        query name, rdtype, and rdclass.
+        @rtype: dns.resolver.Answer object or None
+        """
+
+        self.maybe_clean()
+        v = self.data.get(key)
+        if v is None or v.expiration <= time.time():
+            return None
+        return v
+
+    def put(self, key, value):
+        """Associate key and value in the cache.
+        @param key: the key
+        @type key: (dns.name.Name, int, int) tuple whose values are the
+        query name, rdtype, and rdclass.
+        @param value: The answer being cached
+        @type value: dns.resolver.Answer object
+        """
+
+        self.maybe_clean()
+        self.data[key] = value
+
+    def flush(self, key=None):
+        """Flush the cache.
+
+        If I{key} is specified, only that item is flushed.  Otherwise
+        the entire cache is flushed.
+
+        @param key: the key to flush
+        @type key: (dns.name.Name, int, int) tuple or None
+        """
+
+        if not key is None:
+            if self.data.has_key(key):
+                del self.data[key]
+        else:
+            self.data = {}
+            self.next_cleaning = time.time() + self.cleaning_interval
+
+class Resolver(object):
+    """DNS stub resolver
+
+    @ivar domain: The domain of this host
+    @type domain: dns.name.Name object
+    @ivar nameservers: A list of nameservers to query.  Each nameserver is
+    a string which contains the IP address of a nameserver.
+    @type nameservers: list of strings
+    @ivar search: The search list.  If the query name is a relative name,
+    the resolver will construct an absolute query name by appending the search
+    names one by one to the query name.
+    @type search: list of dns.name.Name objects
+    @ivar port: The port to which to send queries.  The default is 53.
+    @type port: int
+    @ivar timeout: The number of seconds to wait for a response from a
+    server, before timing out.
+    @type timeout: float
+    @ivar lifetime: The total number of seconds to spend trying to get an
+    answer to the question.  If the lifetime expires, a Timeout exception
+    will occur.
+    @type lifetime: float
+    @ivar keyring: The TSIG keyring to use.  The default is None.
+    @type keyring: dict
+    @ivar keyname: The TSIG keyname to use.  The default is None.
+    @type keyname: dns.name.Name object
+    @ivar keyalgorithm: The TSIG key algorithm to use.  The default is
+    dns.tsig.default_algorithm.
+    @type keyalgorithm: string
+    @ivar edns: The EDNS level to use.  The default is -1, no Edns.
+    @type edns: int
+    @ivar ednsflags: The EDNS flags
+    @type ednsflags: int
+    @ivar payload: The EDNS payload size.  The default is 0.
+    @type payload: int
+    @ivar cache: The cache to use.  The default is None.
+    @type cache: dns.resolver.Cache object
+    """
+    def __init__(self, filename='/etc/resolv.conf', configure=True):
+        """Initialize a resolver instance.
+
+        @param filename: The filename of a configuration file in
+        standard /etc/resolv.conf format.  This parameter is meaningful
+        only when I{configure} is true and the platform is POSIX.
+        @type filename: string or file object
+        @param configure: If True (the default), the resolver instance
+        is configured in the normal fashion for the operating system
+        the resolver is running on.  (I.e. a /etc/resolv.conf file on
+        POSIX systems and from the registry on Windows systems.)
+        @type configure: bool"""
+
+        self.reset()
+        if configure:
+            if sys.platform == 'win32':
+                self.read_registry()
+            elif filename:
+                self.read_resolv_conf(filename)
+
+    def reset(self):
+        """Reset all resolver configuration to the defaults."""
+        self.domain = \
+            dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
+        if len(self.domain) == 0:
+            self.domain = dns.name.root
+        self.nameservers = []
+        self.search = []
+        self.port = 53
+        self.timeout = 2.0
+        self.lifetime = 30.0
+        self.keyring = None
+        self.keyname = None
+        self.keyalgorithm = dns.tsig.default_algorithm
+        self.edns = -1
+        self.ednsflags = 0
+        self.payload = 0
+        self.cache = None
+
+    def read_resolv_conf(self, f):
+        """Process f as a file in the /etc/resolv.conf format.  If f is
+        a string, it is used as the name of the file to open; otherwise it
+        is treated as the file itself."""
+        if isinstance(f, str) or isinstance(f, unicode):
+            try:
+                f = open(f, 'r')
+            except IOError:
+                # /etc/resolv.conf doesn't exist, can't be read, etc.
+                # We'll just use the default resolver configuration.
+                self.nameservers = ['127.0.0.1']
+                return
+            want_close = True
+        else:
+            want_close = False
+        try:
+            for l in f:
+                if len(l) == 0 or l[0] == '#' or l[0] == ';':
+                    continue
+                tokens = l.split()
+                if len(tokens) == 0:
+                    continue
+                if tokens[0] == 'nameserver':
+                    self.nameservers.append(tokens[1])
+                elif tokens[0] == 'domain':
+                    self.domain = dns.name.from_text(tokens[1])
+                elif tokens[0] == 'search':
+                    for suffix in tokens[1:]:
+                        self.search.append(dns.name.from_text(suffix))
+        finally:
+            if want_close:
+                f.close()
+        if len(self.nameservers) == 0:
+            self.nameservers.append('127.0.0.1')
+
+    def _determine_split_char(self, entry):
+        #
+        # The windows registry irritatingly changes the list element
+        # delimiter in between ' ' and ',' (and vice-versa) in various
+        # versions of windows.
+        #
+        if entry.find(' ') >= 0:
+            split_char = ' '
+        elif entry.find(',') >= 0:
+            split_char = ','
+        else:
+            # probably a singleton; treat as a space-separated list.
+            split_char = ' '
+        return split_char
+
+    def _config_win32_nameservers(self, nameservers):
+        """Configure a NameServer registry entry."""
+        # we call str() on nameservers to convert it from unicode to ascii
+        nameservers = str(nameservers)
+        split_char = self._determine_split_char(nameservers)
+        ns_list = nameservers.split(split_char)
+        for ns in ns_list:
+            if not ns in self.nameservers:
+                self.nameservers.append(ns)
+
+    def _config_win32_domain(self, domain):
+        """Configure a Domain registry entry."""
+        # we call str() on domain to convert it from unicode to ascii
+        self.domain = dns.name.from_text(str(domain))
+
+    def _config_win32_search(self, search):
+        """Configure a Search registry entry."""
+        # we call str() on search to convert it from unicode to ascii
+        search = str(search)
+        split_char = self._determine_split_char(search)
+        search_list = search.split(split_char)
+        for s in search_list:
+            if not s in self.search:
+                self.search.append(dns.name.from_text(s))
+
+    def _config_win32_fromkey(self, key):
+        """Extract DNS info from a registry key."""
+        try:
+            servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
+        except WindowsError:
+            servers = None
+        if servers:
+            self._config_win32_nameservers(servers)
+            try:
+                dom, rtype = _winreg.QueryValueEx(key, 'Domain')
+                if dom:
+                    self._config_win32_domain(dom)
+            except WindowsError:
+                pass
+        else:
+            try:
+                servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
+            except WindowsError:
+                servers = None
+            if servers:
+                self._config_win32_nameservers(servers)
+                try:
+                    dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
+                    if dom:
+                        self._config_win32_domain(dom)
+                except WindowsError:
+                    pass
+        try:
+            search, rtype = _winreg.QueryValueEx(key, 'SearchList')
+        except WindowsError:
+            search = None
+        if search:
+            self._config_win32_search(search)
+
+    def read_registry(self):
+        """Extract resolver configuration from the Windows registry."""
+        lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
+        want_scan = False
+        try:
+            try:
+                # XP, 2000
+                tcp_params = _winreg.OpenKey(lm,
+                                             r'SYSTEM\CurrentControlSet'
+                                             r'\Services\Tcpip\Parameters')
+                want_scan = True
+            except EnvironmentError:
+                # ME
+                tcp_params = _winreg.OpenKey(lm,
+                                             r'SYSTEM\CurrentControlSet'
+                                             r'\Services\VxD\MSTCP')
+            try:
+                self._config_win32_fromkey(tcp_params)
+            finally:
+                tcp_params.Close()
+            if want_scan:
+                interfaces = _winreg.OpenKey(lm,
+                                             r'SYSTEM\CurrentControlSet'
+                                             r'\Services\Tcpip\Parameters'
+                                             r'\Interfaces')
+                try:
+                    i = 0
+                    while True:
+                        try:
+                            guid = _winreg.EnumKey(interfaces, i)
+                            i += 1
+                            key = _winreg.OpenKey(interfaces, guid)
+                            if not self._win32_is_nic_enabled(lm, guid, key):
+                                continue
+                            try:
+                                self._config_win32_fromkey(key)
+                            finally:
+                                key.Close()
+                        except EnvironmentError:
+                            break
+                finally:
+                    interfaces.Close()
+        finally:
+            lm.Close()
+
+    def _win32_is_nic_enabled(self, lm, guid, interface_key):
+         # Look in the Windows Registry to determine whether the network
+         # interface corresponding to the given guid is enabled.
+         #
+         # (Code contributed by Paul Marks, thanks!)
+         #
+         try:
+             # This hard-coded location seems to be consistent, at least
+             # from Windows 2000 through Vista.
+             connection_key = _winreg.OpenKey(
+                 lm,
+                 r'SYSTEM\CurrentControlSet\Control\Network'
+                 r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
+                 r'\%s\Connection' % guid)
+
+             try:
+                 # The PnpInstanceID points to a key inside Enum
+                 (pnp_id, ttype) = _winreg.QueryValueEx(
+                     connection_key, 'PnpInstanceID')
+
+                 if ttype != _winreg.REG_SZ:
+                     raise ValueError
+
+                 device_key = _winreg.OpenKey(
+                     lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
+
+                 try:
+                     # Get ConfigFlags for this device
+                     (flags, ttype) = _winreg.QueryValueEx(
+                         device_key, 'ConfigFlags')
+
+                     if ttype != _winreg.REG_DWORD:
+                         raise ValueError
+
+                     # Based on experimentation, bit 0x1 indicates that the
+                     # device is disabled.
+                     return not (flags & 0x1)
+
+                 finally:
+                     device_key.Close()
+             finally:
+                 connection_key.Close()
+         except (EnvironmentError, ValueError):
+             # Pre-vista, enabled interfaces seem to have a non-empty
+             # NTEContextList; this was how dnspython detected enabled
+             # nics before the code above was contributed.  We've retained
+             # the old method since we don't know if the code above works
+             # on Windows 95/98/ME.
+             try:
+                 (nte, ttype) = _winreg.QueryValueEx(interface_key,
+                                                     'NTEContextList')
+                 return nte is not None
+             except WindowsError:
+                 return False
+
+    def _compute_timeout(self, start):
+        now = time.time()
+        if now < start:
+            if start - now > 1:
+                # Time going backwards is bad.  Just give up.
+                raise Timeout
+            else:
+                # Time went backwards, but only a little.  This can
+                # happen, e.g. under vmware with older linux kernels.
+                # Pretend it didn't happen.
+                now = start
+        duration = now - start
+        if duration >= self.lifetime:
+            raise Timeout
+        return min(self.lifetime - duration, self.timeout)
+
+    def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+              tcp=False, source=None):
+        """Query nameservers to find the answer to the question.
+
+        The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
+        of the appropriate type, or strings that can be converted into objects
+        of the appropriate type.  E.g. For I{rdtype} the integer 2 and the
+        the string 'NS' both mean to query for records with DNS rdata type NS.
+
+        @param qname: the query name
+        @type qname: dns.name.Name object or string
+        @param rdtype: the query type
+        @type rdtype: int or string
+        @param rdclass: the query class
+        @type rdclass: int or string
+        @param tcp: use TCP to make the query (default is False).
+        @type tcp: bool
+        @param source: bind to this IP address (defaults to machine default IP).
+        @type source: IP address in dotted quad notation
+        @rtype: dns.resolver.Answer instance
+        @raises Timeout: no answers could be found in the specified lifetime
+        @raises NXDOMAIN: the query name does not exist
+        @raises NoAnswer: the response did not contain an answer
+        @raises NoNameservers: no non-broken nameservers are available to
+        answer the question."""
+
+        if isinstance(qname, (str, unicode)):
+            qname = dns.name.from_text(qname, None)
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(rdclass, str):
+            rdclass = dns.rdataclass.from_text(rdclass)
+        qnames_to_try = []
+        if qname.is_absolute():
+            qnames_to_try.append(qname)
+        else:
+            if len(qname) > 1:
+                qnames_to_try.append(qname.concatenate(dns.name.root))
+            if self.search:
+                for suffix in self.search:
+                    qnames_to_try.append(qname.concatenate(suffix))
+            else:
+                qnames_to_try.append(qname.concatenate(self.domain))
+        all_nxdomain = True
+        start = time.time()
+        for qname in qnames_to_try:
+            if self.cache:
+                answer = self.cache.get((qname, rdtype, rdclass))
+                if answer:
+                    return answer
+            request = dns.message.make_query(qname, rdtype, rdclass)
+            if not self.keyname is None:
+                request.use_tsig(self.keyring, self.keyname, self.keyalgorithm)
+            request.use_edns(self.edns, self.ednsflags, self.payload)
+            response = None
+            #
+            # make a copy of the servers list so we can alter it later.
+            #
+            nameservers = self.nameservers[:]
+            backoff = 0.10
+            while response is None:
+                if len(nameservers) == 0:
+                    raise NoNameservers
+                for nameserver in nameservers[:]:
+                    timeout = self._compute_timeout(start)
+                    try:
+                        if tcp:
+                            response = dns.query.tcp(request, nameserver,
+                                                     timeout, self.port,
+                                                     source=source)
+                        else:
+                            response = dns.query.udp(request, nameserver,
+                                                     timeout, self.port,
+                                                     source=source)
+                    except (socket.error, dns.exception.Timeout):
+                        #
+                        # Communication failure or timeout.  Go to the
+                        # next server
+                        #
+                        response = None
+                        continue
+                    except dns.query.UnexpectedSource:
+                        #
+                        # Who knows?  Keep going.
+                        #
+                        response = None
+                        continue
+                    except dns.exception.FormError:
+                        #
+                        # We don't understand what this server is
+                        # saying.  Take it out of the mix and
+                        # continue.
+                        #
+                        nameservers.remove(nameserver)
+                        response = None
+                        continue
+                    rcode = response.rcode()
+                    if rcode == dns.rcode.NOERROR or \
+                           rcode == dns.rcode.NXDOMAIN:
+                        break
+                    #
+                    # We got a response, but we're not happy with the
+                    # rcode in it.  Remove the server from the mix if
+                    # the rcode isn't SERVFAIL.
+                    #
+                    if rcode != dns.rcode.SERVFAIL:
+                        nameservers.remove(nameserver)
+                    response = None
+                if not response is None:
+                    break
+                #
+                # All nameservers failed!
+                #
+                if len(nameservers) > 0:
+                    #
+                    # But we still have servers to try.  Sleep a bit
+                    # so we don't pound them!
+                    #
+                    timeout = self._compute_timeout(start)
+                    sleep_time = min(timeout, backoff)
+                    backoff *= 2
+                    time.sleep(sleep_time)
+            if response.rcode() == dns.rcode.NXDOMAIN:
+                continue
+            all_nxdomain = False
+            break
+        if all_nxdomain:
+            raise NXDOMAIN
+        answer = Answer(qname, rdtype, rdclass, response)
+        if self.cache:
+            self.cache.put((qname, rdtype, rdclass), answer)
+        return answer
+
+    def use_tsig(self, keyring, keyname=None,
+                 algorithm=dns.tsig.default_algorithm):
+        """Add a TSIG signature to the query.
+
+        @param keyring: The TSIG keyring to use; defaults to None.
+        @type keyring: dict
+        @param keyname: The name of the TSIG key to use; defaults to None.
+        The key must be defined in the keyring.  If a keyring is specified
+        but a keyname is not, then the key used will be the first key in the
+        keyring.  Note that the order of keys in a dictionary is not defined,
+        so applications should supply a keyname when a keyring is used, unless
+        they know the keyring contains only one key.
+        @param algorithm: The TSIG key algorithm to use.  The default
+        is dns.tsig.default_algorithm.
+        @type algorithm: string"""
+        self.keyring = keyring
+        if keyname is None:
+            self.keyname = self.keyring.keys()[0]
+        else:
+            self.keyname = keyname
+        self.keyalgorithm = algorithm
+
+    def use_edns(self, edns, ednsflags, payload):
+        """Configure Edns.
+
+        @param edns: The EDNS level to use.  The default is -1, no Edns.
+        @type edns: int
+        @param ednsflags: The EDNS flags
+        @type ednsflags: int
+        @param payload: The EDNS payload size.  The default is 0.
+        @type payload: int"""
+
+        if edns is None:
+            edns = -1
+        self.edns = edns
+        self.ednsflags = ednsflags
+        self.payload = payload
+
+default_resolver = None
+
+def get_default_resolver():
+    """Get the default resolver, initializing it if necessary."""
+    global default_resolver
+    if default_resolver is None:
+        default_resolver = Resolver()
+    return default_resolver
+
+def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+          tcp=False, source=None):
+    """Query nameservers to find the answer to the question.
+
+    This is a convenience function that uses the default resolver
+    object to make the query.
+    @see: L{dns.resolver.Resolver.query} for more information on the
+    parameters."""
+    return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)
+
+def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
+    """Find the name of the zone which contains the specified name.
+
+    @param name: the query name
+    @type name: absolute dns.name.Name object or string
+    @param rdclass: The query class
+    @type rdclass: int
+    @param tcp: use TCP to make the query (default is False).
+    @type tcp: bool
+    @param resolver: the resolver to use
+    @type resolver: dns.resolver.Resolver object or None
+    @rtype: dns.name.Name"""
+
+    if isinstance(name, (str, unicode)):
+        name = dns.name.from_text(name, dns.name.root)
+    if resolver is None:
+        resolver = get_default_resolver()
+    if not name.is_absolute():
+        raise NotAbsolute(name)
+    while 1:
+        try:
+            answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
+            return name
+        except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+            try:
+                name = name.parent()
+            except dns.name.NoParent:
+                raise NoRootSOA
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/reversename.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/reversename.py
new file mode 100644
index 0000000..0a61b82
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/reversename.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Reverse Map Names.
+
+@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
+@type ipv4_reverse_domain: dns.name.Name object
+@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
+@type ipv6_reverse_domain: dns.name.Name object
+"""
+
+import dns.name
+import dns.ipv6
+import dns.ipv4
+
+ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
+ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
+
+def from_address(text):
+    """Convert an IPv4 or IPv6 address in textual form into a Name object whose
+    value is the reverse-map domain name of the address.
+    @param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
+    '::1')
+    @type text: str
+    @rtype: dns.name.Name object
+    """
+    try:
+        parts = list(dns.ipv6.inet_aton(text).encode('hex_codec'))
+        origin = ipv6_reverse_domain
+    except:
+        parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)]
+        origin = ipv4_reverse_domain
+    parts.reverse()
+    return dns.name.from_text('.'.join(parts), origin=origin)
+
+def to_address(name):
+    """Convert a reverse map domain name into textual address form.
+    @param name: an IPv4 or IPv6 address in reverse-map form.
+    @type name: dns.name.Name object
+    @rtype: str
+    """
+    if name.is_subdomain(ipv4_reverse_domain):
+        name = name.relativize(ipv4_reverse_domain)
+        labels = list(name.labels)
+        labels.reverse()
+        text = '.'.join(labels)
+        # run through inet_aton() to check syntax and make pretty.
+        return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
+    elif name.is_subdomain(ipv6_reverse_domain):
+        name = name.relativize(ipv6_reverse_domain)
+        labels = list(name.labels)
+        labels.reverse()
+        parts = []
+        i = 0
+        l = len(labels)
+        while i < l:
+            parts.append(''.join(labels[i:i+4]))
+            i += 4
+        text = ':'.join(parts)
+        # run through inet_aton() to check syntax and make pretty.
+        return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
+    else:
+        raise dns.exception.SyntaxError('unknown reverse-map address family')
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/rrset.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rrset.py
new file mode 100644
index 0000000..7f6c4af
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/rrset.py
@@ -0,0 +1,175 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS RRsets (an RRset is a named rdataset)"""
+
+import dns.name
+import dns.rdataset
+import dns.rdataclass
+import dns.renderer
+
+class RRset(dns.rdataset.Rdataset):
+    """A DNS RRset (named rdataset).
+
+    RRset inherits from Rdataset, and RRsets can be treated as
+    Rdatasets in most cases.  There are, however, a few notable
+    exceptions.  RRsets have different to_wire() and to_text() method
+    arguments, reflecting the fact that RRsets always have an owner
+    name.
+    """
+
+    __slots__ = ['name', 'deleting']
+
+    def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
+                 deleting=None):
+        """Create a new RRset."""
+
+        super(RRset, self).__init__(rdclass, rdtype)
+        self.name = name
+        self.deleting = deleting
+
+    def _clone(self):
+        obj = super(RRset, self)._clone()
+        obj.name = self.name
+        obj.deleting = self.deleting
+        return obj
+
+    def __repr__(self):
+        if self.covers == 0:
+            ctext = ''
+        else:
+            ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+        if not self.deleting is None:
+            dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
+        else:
+            dtext = ''
+        return '<DNS ' + str(self.name) + ' ' + \
+               dns.rdataclass.to_text(self.rdclass) + ' ' + \
+               dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
+
+    def __str__(self):
+        return self.to_text()
+
+    def __eq__(self, other):
+        """Two RRsets are equal if they have the same name and the same
+        rdataset
+
+        @rtype: bool"""
+        if not isinstance(other, RRset):
+            return False
+        if self.name != other.name:
+            return False
+        return super(RRset, self).__eq__(other)
+
+    def match(self, name, rdclass, rdtype, covers, deleting=None):
+        """Returns True if this rrset matches the specified class, type,
+        covers, and deletion state."""
+
+        if not super(RRset, self).match(rdclass, rdtype, covers):
+            return False
+        if self.name != name or self.deleting != deleting:
+            return False
+        return True
+
+    def to_text(self, origin=None, relativize=True, **kw):
+        """Convert the RRset into DNS master file format.
+
+        @see: L{dns.name.Name.choose_relativity} for more information
+        on how I{origin} and I{relativize} determine the way names
+        are emitted.
+
+        Any additional keyword arguments are passed on to the rdata
+        to_text() method.
+
+        @param origin: The origin for relative names, or None.
+        @type origin: dns.name.Name object
+        @param relativize: True if names should names be relativized
+        @type relativize: bool"""
+
+        return super(RRset, self).to_text(self.name, origin, relativize,
+                                          self.deleting, **kw)
+
+    def to_wire(self, file, compress=None, origin=None, **kw):
+        """Convert the RRset to wire format."""
+
+        return super(RRset, self).to_wire(self.name, file, compress, origin,
+                                          self.deleting, **kw)
+
+    def to_rdataset(self):
+        """Convert an RRset into an Rdataset.
+
+        @rtype: dns.rdataset.Rdataset object
+        """
+        return dns.rdataset.from_rdata_list(self.ttl, list(self))
+
+
+def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
+    """Create an RRset with the specified name, TTL, class, and type, and with
+    the specified list of rdatas in text format.
+
+    @rtype: dns.rrset.RRset object
+    """
+
+    if isinstance(name, (str, unicode)):
+        name = dns.name.from_text(name, None)
+    if isinstance(rdclass, str):
+        rdclass = dns.rdataclass.from_text(rdclass)
+    if isinstance(rdtype, str):
+        rdtype = dns.rdatatype.from_text(rdtype)
+    r = RRset(name, rdclass, rdtype)
+    r.update_ttl(ttl)
+    for t in text_rdatas:
+        rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+        r.add(rd)
+    return r
+
+def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
+    """Create an RRset with the specified name, TTL, class, and type and with
+    the specified rdatas in text format.
+
+    @rtype: dns.rrset.RRset object
+    """
+
+    return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
+
+def from_rdata_list(name, ttl, rdatas):
+    """Create an RRset with the specified name and TTL, and with
+    the specified list of rdata objects.
+
+    @rtype: dns.rrset.RRset object
+    """
+
+    if isinstance(name, (str, unicode)):
+        name = dns.name.from_text(name, None)
+
+    if len(rdatas) == 0:
+        raise ValueError("rdata list must not be empty")
+    r = None
+    for rd in rdatas:
+        if r is None:
+            r = RRset(name, rd.rdclass, rd.rdtype)
+            r.update_ttl(ttl)
+            first_time = False
+        r.add(rd)
+    return r
+
+def from_rdata(name, ttl, *rdatas):
+    """Create an RRset with the specified name and TTL, and with
+    the specified rdata objects.
+
+    @rtype: dns.rrset.RRset object
+    """
+
+    return from_rdata_list(name, ttl, rdatas)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/set.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/set.py
new file mode 100644
index 0000000..91f9fb8
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/set.py
@@ -0,0 +1,263 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A simple Set class."""
+
+class Set(object):
+    """A simple set class.
+
+    Sets are not in Python until 2.3, and rdata are not immutable so
+    we cannot use sets.Set anyway.  This class implements subset of
+    the 2.3 Set interface using a list as the container.
+
+    @ivar items: A list of the items which are in the set
+    @type items: list"""
+
+    __slots__ = ['items']
+
+    def __init__(self, items=None):
+        """Initialize the set.
+
+        @param items: the initial set of items
+        @type items: any iterable or None
+        """
+
+        self.items = []
+        if not items is None:
+            for item in items:
+                self.add(item)
+
+    def __repr__(self):
+        return "dns.simpleset.Set(%s)" % repr(self.items)
+
+    def add(self, item):
+        """Add an item to the set."""
+        if not item in self.items:
+            self.items.append(item)
+
+    def remove(self, item):
+        """Remove an item from the set."""
+        self.items.remove(item)
+
+    def discard(self, item):
+        """Remove an item from the set if present."""
+        try:
+            self.items.remove(item)
+        except ValueError:
+            pass
+
+    def _clone(self):
+        """Make a (shallow) copy of the set.
+
+        There is a 'clone protocol' that subclasses of this class
+        should use.  To make a copy, first call your super's _clone()
+        method, and use the object returned as the new instance.  Then
+        make shallow copies of the attributes defined in the subclass.
+
+        This protocol allows us to write the set algorithms that
+        return new instances (e.g. union) once, and keep using them in
+        subclasses.
+        """
+
+        cls = self.__class__
+        obj = cls.__new__(cls)
+        obj.items = list(self.items)
+        return obj
+
+    def __copy__(self):
+        """Make a (shallow) copy of the set."""
+        return self._clone()
+
+    def copy(self):
+        """Make a (shallow) copy of the set."""
+        return self._clone()
+
+    def union_update(self, other):
+        """Update the set, adding any elements from other which are not
+        already in the set.
+        @param other: the collection of items with which to update the set
+        @type other: Set object
+        """
+        if not isinstance(other, Set):
+            raise ValueError('other must be a Set instance')
+        if self is other:
+            return
+        for item in other.items:
+            self.add(item)
+
+    def intersection_update(self, other):
+        """Update the set, removing any elements from other which are not
+        in both sets.
+        @param other: the collection of items with which to update the set
+        @type other: Set object
+        """
+        if not isinstance(other, Set):
+            raise ValueError('other must be a Set instance')
+        if self is other:
+            return
+        # we make a copy of the list so that we can remove items from
+        # the list without breaking the iterator.
+        for item in list(self.items):
+            if item not in other.items:
+                self.items.remove(item)
+
+    def difference_update(self, other):
+        """Update the set, removing any elements from other which are in
+        the set.
+        @param other: the collection of items with which to update the set
+        @type other: Set object
+        """
+        if not isinstance(other, Set):
+            raise ValueError('other must be a Set instance')
+        if self is other:
+            self.items = []
+        else:
+            for item in other.items:
+                self.discard(item)
+
+    def union(self, other):
+        """Return a new set which is the union of I{self} and I{other}.
+
+        @param other: the other set
+        @type other: Set object
+        @rtype: the same type as I{self}
+        """
+
+        obj = self._clone()
+        obj.union_update(other)
+        return obj
+
+    def intersection(self, other):
+        """Return a new set which is the intersection of I{self} and I{other}.
+
+        @param other: the other set
+        @type other: Set object
+        @rtype: the same type as I{self}
+        """
+
+        obj = self._clone()
+        obj.intersection_update(other)
+        return obj
+
+    def difference(self, other):
+        """Return a new set which I{self} - I{other}, i.e. the items
+        in I{self} which are not also in I{other}.
+
+        @param other: the other set
+        @type other: Set object
+        @rtype: the same type as I{self}
+        """
+
+        obj = self._clone()
+        obj.difference_update(other)
+        return obj
+
+    def __or__(self, other):
+        return self.union(other)
+
+    def __and__(self, other):
+        return self.intersection(other)
+
+    def __add__(self, other):
+        return self.union(other)
+
+    def __sub__(self, other):
+        return self.difference(other)
+
+    def __ior__(self, other):
+        self.union_update(other)
+        return self
+
+    def __iand__(self, other):
+        self.intersection_update(other)
+        return self
+
+    def __iadd__(self, other):
+        self.union_update(other)
+        return self
+
+    def __isub__(self, other):
+        self.difference_update(other)
+        return self
+
+    def update(self, other):
+        """Update the set, adding any elements from other which are not
+        already in the set.
+        @param other: the collection of items with which to update the set
+        @type other: any iterable type"""
+        for item in other:
+            self.add(item)
+
+    def clear(self):
+        """Make the set empty."""
+        self.items = []
+
+    def __eq__(self, other):
+        # Yes, this is inefficient but the sets we're dealing with are
+        # usually quite small, so it shouldn't hurt too much.
+        for item in self.items:
+            if not item in other.items:
+                return False
+        for item in other.items:
+            if not item in self.items:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __len__(self):
+        return len(self.items)
+
+    def __iter__(self):
+        return iter(self.items)
+
+    def __getitem__(self, i):
+        return self.items[i]
+
+    def __delitem__(self, i):
+        del self.items[i]
+
+    def __getslice__(self, i, j):
+        return self.items[i:j]
+
+    def __delslice__(self, i, j):
+        del self.items[i:j]
+
+    def issubset(self, other):
+        """Is I{self} a subset of I{other}?
+
+        @rtype: bool
+        """
+
+        if not isinstance(other, Set):
+            raise ValueError('other must be a Set instance')
+        for item in self.items:
+            if not item in other.items:
+                return False
+        return True
+
+    def issuperset(self, other):
+        """Is I{self} a superset of I{other}?
+
+        @rtype: bool
+        """
+
+        if not isinstance(other, Set):
+            raise ValueError('other must be a Set instance')
+        for item in other.items:
+            if not item in self.items:
+                return False
+        return True
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/tokenizer.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tokenizer.py
new file mode 100644
index 0000000..4f68a2a
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tokenizer.py
@@ -0,0 +1,547 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Tokenize DNS master file format"""
+
+import cStringIO
+import sys
+
+import dns.exception
+import dns.name
+import dns.ttl
+
+_DELIMITERS = {
+    ' ' : True,
+    '\t' : True,
+    '\n' : True,
+    ';' : True,
+    '(' : True,
+    ')' : True,
+    '"' : True }
+
+_QUOTING_DELIMITERS = { '"' : True }
+
+EOF = 0
+EOL = 1
+WHITESPACE = 2
+IDENTIFIER = 3
+QUOTED_STRING = 4
+COMMENT = 5
+DELIMITER = 6
+
+class UngetBufferFull(dns.exception.DNSException):
+    """Raised when an attempt is made to unget a token when the unget
+    buffer is full."""
+    pass
+
+class Token(object):
+    """A DNS master file format token.
+
+    @ivar ttype: The token type
+    @type ttype: int
+    @ivar value: The token value
+    @type value: string
+    @ivar has_escape: Does the token value contain escapes?
+    @type has_escape: bool
+    """
+
+    def __init__(self, ttype, value='', has_escape=False):
+        """Initialize a token instance.
+
+        @param ttype: The token type
+        @type ttype: int
+        @ivar value: The token value
+        @type value: string
+        @ivar has_escape: Does the token value contain escapes?
+        @type has_escape: bool
+        """
+        self.ttype = ttype
+        self.value = value
+        self.has_escape = has_escape
+
+    def is_eof(self):
+        return self.ttype == EOF
+
+    def is_eol(self):
+        return self.ttype == EOL
+
+    def is_whitespace(self):
+        return self.ttype == WHITESPACE
+
+    def is_identifier(self):
+        return self.ttype == IDENTIFIER
+
+    def is_quoted_string(self):
+        return self.ttype == QUOTED_STRING
+
+    def is_comment(self):
+        return self.ttype == COMMENT
+
+    def is_delimiter(self):
+        return self.ttype == DELIMITER
+
+    def is_eol_or_eof(self):
+        return (self.ttype == EOL or self.ttype == EOF)
+
+    def __eq__(self, other):
+        if not isinstance(other, Token):
+            return False
+        return (self.ttype == other.ttype and
+                self.value == other.value)
+
+    def __ne__(self, other):
+        if not isinstance(other, Token):
+            return True
+        return (self.ttype != other.ttype or
+                self.value != other.value)
+
+    def __str__(self):
+        return '%d "%s"' % (self.ttype, self.value)
+
+    def unescape(self):
+        if not self.has_escape:
+            return self
+        unescaped = ''
+        l = len(self.value)
+        i = 0
+        while i < l:
+            c = self.value[i]
+            i += 1
+            if c == '\\':
+                if i >= l:
+                    raise dns.exception.UnexpectedEnd
+                c = self.value[i]
+                i += 1
+                if c.isdigit():
+                    if i >= l:
+                        raise dns.exception.UnexpectedEnd
+                    c2 = self.value[i]
+                    i += 1
+                    if i >= l:
+                        raise dns.exception.UnexpectedEnd
+                    c3 = self.value[i]
+                    i += 1
+                    if not (c2.isdigit() and c3.isdigit()):
+                        raise dns.exception.SyntaxError
+                    c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+            unescaped += c
+        return Token(self.ttype, unescaped)
+
+    # compatibility for old-style tuple tokens
+
+    def __len__(self):
+        return 2
+
+    def __iter__(self):
+        return iter((self.ttype, self.value))
+
+    def __getitem__(self, i):
+        if i == 0:
+            return self.ttype
+        elif i == 1:
+            return self.value
+        else:
+            raise IndexError
+
+class Tokenizer(object):
+    """A DNS master file format tokenizer.
+
+    A token is a (type, value) tuple, where I{type} is an int, and
+    I{value} is a string.  The valid types are EOF, EOL, WHITESPACE,
+    IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
+
+    @ivar file: The file to tokenize
+    @type file: file
+    @ivar ungotten_char: The most recently ungotten character, or None.
+    @type ungotten_char: string
+    @ivar ungotten_token: The most recently ungotten token, or None.
+    @type ungotten_token: (int, string) token tuple
+    @ivar multiline: The current multiline level.  This value is increased
+    by one every time a '(' delimiter is read, and decreased by one every time
+    a ')' delimiter is read.
+    @type multiline: int
+    @ivar quoting: This variable is true if the tokenizer is currently
+    reading a quoted string.
+    @type quoting: bool
+    @ivar eof: This variable is true if the tokenizer has encountered EOF.
+    @type eof: bool
+    @ivar delimiters: The current delimiter dictionary.
+    @type delimiters: dict
+    @ivar line_number: The current line number
+    @type line_number: int
+    @ivar filename: A filename that will be returned by the L{where} method.
+    @type filename: string
+    """
+
+    def __init__(self, f=sys.stdin, filename=None):
+        """Initialize a tokenizer instance.
+
+        @param f: The file to tokenize.  The default is sys.stdin.
+        This parameter may also be a string, in which case the tokenizer
+        will take its input from the contents of the string.
+        @type f: file or string
+        @param filename: the name of the filename that the L{where} method
+        will return.
+        @type filename: string
+        """
+
+        if isinstance(f, str):
+            f = cStringIO.StringIO(f)
+            if filename is None:
+                filename = '<string>'
+        else:
+            if filename is None:
+                if f is sys.stdin:
+                    filename = '<stdin>'
+                else:
+                    filename = '<file>'
+        self.file = f
+        self.ungotten_char = None
+        self.ungotten_token = None
+        self.multiline = 0
+        self.quoting = False
+        self.eof = False
+        self.delimiters = _DELIMITERS
+        self.line_number = 1
+        self.filename = filename
+
+    def _get_char(self):
+        """Read a character from input.
+        @rtype: string
+        """
+
+        if self.ungotten_char is None:
+            if self.eof:
+                c = ''
+            else:
+                c = self.file.read(1)
+                if c == '':
+                    self.eof = True
+                elif c == '\n':
+                    self.line_number += 1
+        else:
+            c = self.ungotten_char
+            self.ungotten_char = None
+        return c
+
+    def where(self):
+        """Return the current location in the input.
+
+        @rtype: (string, int) tuple.  The first item is the filename of
+        the input, the second is the current line number.
+        """
+
+        return (self.filename, self.line_number)
+
+    def _unget_char(self, c):
+        """Unget a character.
+
+        The unget buffer for characters is only one character large; it is
+        an error to try to unget a character when the unget buffer is not
+        empty.
+
+        @param c: the character to unget
+        @type c: string
+        @raises UngetBufferFull: there is already an ungotten char
+        """
+
+        if not self.ungotten_char is None:
+            raise UngetBufferFull
+        self.ungotten_char = c
+
+    def skip_whitespace(self):
+        """Consume input until a non-whitespace character is encountered.
+
+        The non-whitespace character is then ungotten, and the number of
+        whitespace characters consumed is returned.
+
+        If the tokenizer is in multiline mode, then newlines are whitespace.
+
+        @rtype: int
+        """
+
+        skipped = 0
+        while True:
+            c = self._get_char()
+            if c != ' ' and c != '\t':
+                if (c != '\n') or not self.multiline:
+                    self._unget_char(c)
+                    return skipped
+            skipped += 1
+
+    def get(self, want_leading = False, want_comment = False):
+        """Get the next token.
+
+        @param want_leading: If True, return a WHITESPACE token if the
+        first character read is whitespace.  The default is False.
+        @type want_leading: bool
+        @param want_comment: If True, return a COMMENT token if the
+        first token read is a comment.  The default is False.
+        @type want_comment: bool
+        @rtype: Token object
+        @raises dns.exception.UnexpectedEnd: input ended prematurely
+        @raises dns.exception.SyntaxError: input was badly formed
+        """
+
+        if not self.ungotten_token is None:
+            token = self.ungotten_token
+            self.ungotten_token = None
+            if token.is_whitespace():
+                if want_leading:
+                    return token
+            elif token.is_comment():
+                if want_comment:
+                    return token
+            else:
+                return token
+        skipped = self.skip_whitespace()
+        if want_leading and skipped > 0:
+            return Token(WHITESPACE, ' ')
+        token = ''
+        ttype = IDENTIFIER
+        has_escape = False
+        while True:
+            c = self._get_char()
+            if c == '' or c in self.delimiters:
+                if c == '' and self.quoting:
+                    raise dns.exception.UnexpectedEnd
+                if token == '' and ttype != QUOTED_STRING:
+                    if c == '(':
+                        self.multiline += 1
+                        self.skip_whitespace()
+                        continue
+                    elif c == ')':
+                        if not self.multiline > 0:
+                            raise dns.exception.SyntaxError
+                        self.multiline -= 1
+                        self.skip_whitespace()
+                        continue
+                    elif c == '"':
+                        if not self.quoting:
+                            self.quoting = True
+                            self.delimiters = _QUOTING_DELIMITERS
+                            ttype = QUOTED_STRING
+                            continue
+                        else:
+                            self.quoting = False
+                            self.delimiters = _DELIMITERS
+                            self.skip_whitespace()
+                            continue
+                    elif c == '\n':
+                        return Token(EOL, '\n')
+                    elif c == ';':
+                        while 1:
+                            c = self._get_char()
+                            if c == '\n' or c == '':
+                                break
+                            token += c
+                        if want_comment:
+                            self._unget_char(c)
+                            return Token(COMMENT, token)
+                        elif c == '':
+                            if self.multiline:
+                                raise dns.exception.SyntaxError('unbalanced parentheses')
+                            return Token(EOF)
+                        elif self.multiline:
+                            self.skip_whitespace()
+                            token = ''
+                            continue
+                        else:
+                            return Token(EOL, '\n')
+                    else:
+                        # This code exists in case we ever want a
+                        # delimiter to be returned.  It never produces
+                        # a token currently.
+                        token = c
+                        ttype = DELIMITER
+                else:
+                    self._unget_char(c)
+                break
+            elif self.quoting:
+                if c == '\\':
+                    c = self._get_char()
+                    if c == '':
+                        raise dns.exception.UnexpectedEnd
+                    if c.isdigit():
+                        c2 = self._get_char()
+                        if c2 == '':
+                            raise dns.exception.UnexpectedEnd
+                        c3 = self._get_char()
+                        if c == '':
+                            raise dns.exception.UnexpectedEnd
+                        if not (c2.isdigit() and c3.isdigit()):
+                            raise dns.exception.SyntaxError
+                        c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+                elif c == '\n':
+                    raise dns.exception.SyntaxError('newline in quoted string')
+            elif c == '\\':
+                #
+                # It's an escape.  Put it and the next character into
+                # the token; it will be checked later for goodness.
+                #
+                token += c
+                has_escape = True
+                c = self._get_char()
+                if c == '' or c == '\n':
+                    raise dns.exception.UnexpectedEnd
+            token += c
+        if token == '' and ttype != QUOTED_STRING:
+            if self.multiline:
+                raise dns.exception.SyntaxError('unbalanced parentheses')
+            ttype = EOF
+        return Token(ttype, token, has_escape)
+
+    def unget(self, token):
+        """Unget a token.
+
+        The unget buffer for tokens is only one token large; it is
+        an error to try to unget a token when the unget buffer is not
+        empty.
+
+        @param token: the token to unget
+        @type token: Token object
+        @raises UngetBufferFull: there is already an ungotten token
+        """
+
+        if not self.ungotten_token is None:
+            raise UngetBufferFull
+        self.ungotten_token = token
+
+    def next(self):
+        """Return the next item in an iteration.
+        @rtype: (int, string)
+        """
+
+        token = self.get()
+        if token.is_eof():
+            raise StopIteration
+        return token
+
+    def __iter__(self):
+        return self
+
+    # Helpers
+
+    def get_int(self):
+        """Read the next token and interpret it as an integer.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: int
+        """
+
+        token = self.get().unescape()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError('expecting an identifier')
+        if not token.value.isdigit():
+            raise dns.exception.SyntaxError('expecting an integer')
+        return int(token.value)
+
+    def get_uint8(self):
+        """Read the next token and interpret it as an 8-bit unsigned
+        integer.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: int
+        """
+
+        value = self.get_int()
+        if value < 0 or value > 255:
+            raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
+        return value
+
+    def get_uint16(self):
+        """Read the next token and interpret it as a 16-bit unsigned
+        integer.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: int
+        """
+
+        value = self.get_int()
+        if value < 0 or value > 65535:
+            raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
+        return value
+
+    def get_uint32(self):
+        """Read the next token and interpret it as a 32-bit unsigned
+        integer.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: int
+        """
+
+        token = self.get().unescape()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError('expecting an identifier')
+        if not token.value.isdigit():
+            raise dns.exception.SyntaxError('expecting an integer')
+        value = long(token.value)
+        if value < 0 or value > 4294967296L:
+            raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
+        return value
+
+    def get_string(self, origin=None):
+        """Read the next token and interpret it as a string.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: string
+        """
+
+        token = self.get().unescape()
+        if not (token.is_identifier() or token.is_quoted_string()):
+            raise dns.exception.SyntaxError('expecting a string')
+        return token.value
+
+    def get_identifier(self, origin=None):
+        """Read the next token and raise an exception if it is not an identifier.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: string
+        """
+
+        token = self.get().unescape()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError('expecting an identifier')
+        return token.value
+
+    def get_name(self, origin=None):
+        """Read the next token and interpret it as a DNS name.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: dns.name.Name object"""
+
+        token = self.get()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError('expecting an identifier')
+        return dns.name.from_text(token.value, origin)
+
+    def get_eol(self):
+        """Read the next token and raise an exception if it isn't EOL or
+        EOF.
+
+        @raises dns.exception.SyntaxError:
+        @rtype: string
+        """
+
+        token = self.get()
+        if not token.is_eol_or_eof():
+            raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
+        return token.value
+
+    def get_ttl(self):
+        token = self.get().unescape()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError('expecting an identifier')
+        return dns.ttl.from_text(token.value)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsig.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsig.py
new file mode 100644
index 0000000..b4deeca
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsig.py
@@ -0,0 +1,216 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TSIG support."""
+
+import hmac
+import struct
+
+import dns.exception
+import dns.rdataclass
+import dns.name
+
+class BadTime(dns.exception.DNSException):
+    """Raised if the current time is not within the TSIG's validity time."""
+    pass
+
+class BadSignature(dns.exception.DNSException):
+    """Raised if the TSIG signature fails to verify."""
+    pass
+
+class PeerError(dns.exception.DNSException):
+    """Base class for all TSIG errors generated by the remote peer"""
+    pass
+
+class PeerBadKey(PeerError):
+    """Raised if the peer didn't know the key we used"""
+    pass
+
+class PeerBadSignature(PeerError):
+    """Raised if the peer didn't like the signature we sent"""
+    pass
+
+class PeerBadTime(PeerError):
+    """Raised if the peer didn't like the time we sent"""
+    pass
+
+class PeerBadTruncation(PeerError):
+    """Raised if the peer didn't like amount of truncation in the TSIG we sent"""
+    pass
+
+default_algorithm = "HMAC-MD5.SIG-ALG.REG.INT"
+
+BADSIG = 16
+BADKEY = 17
+BADTIME = 18
+BADTRUNC = 22
+
+def sign(wire, keyname, secret, time, fudge, original_id, error,
+         other_data, request_mac, ctx=None, multi=False, first=True,
+         algorithm=default_algorithm):
+    """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
+    for the input parameters, the HMAC MAC calculated by applying the
+    TSIG signature algorithm, and the TSIG digest context.
+    @rtype: (string, string, hmac.HMAC object)
+    @raises ValueError: I{other_data} is too long
+    @raises NotImplementedError: I{algorithm} is not supported
+    """
+
+    (algorithm_name, digestmod) = get_algorithm(algorithm)
+    if first:
+        ctx = hmac.new(secret, digestmod=digestmod)
+        ml = len(request_mac)
+        if ml > 0:
+            ctx.update(struct.pack('!H', ml))
+            ctx.update(request_mac)
+    id = struct.pack('!H', original_id)
+    ctx.update(id)
+    ctx.update(wire[2:])
+    if first:
+        ctx.update(keyname.to_digestable())
+        ctx.update(struct.pack('!H', dns.rdataclass.ANY))
+        ctx.update(struct.pack('!I', 0))
+    long_time = time + 0L
+    upper_time = (long_time >> 32) & 0xffffL
+    lower_time = long_time & 0xffffffffL
+    time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
+    pre_mac = algorithm_name + time_mac
+    ol = len(other_data)
+    if ol > 65535:
+        raise ValueError('TSIG Other Data is > 65535 bytes')
+    post_mac = struct.pack('!HH', error, ol) + other_data
+    if first:
+        ctx.update(pre_mac)
+        ctx.update(post_mac)
+    else:
+        ctx.update(time_mac)
+    mac = ctx.digest()
+    mpack = struct.pack('!H', len(mac))
+    tsig_rdata = pre_mac + mpack + mac + id + post_mac
+    if multi:
+        ctx = hmac.new(secret)
+        ml = len(mac)
+        ctx.update(struct.pack('!H', ml))
+        ctx.update(mac)
+    else:
+        ctx = None
+    return (tsig_rdata, mac, ctx)
+
+def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
+             other_data, request_mac, ctx=None, multi=False, first=True,
+             algorithm=default_algorithm):
+    return sign(wire, keyname, secret, time, fudge, original_id, error,
+                other_data, request_mac, ctx, multi, first, algorithm)
+
+def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
+             tsig_rdlen, ctx=None, multi=False, first=True):
+    """Validate the specified TSIG rdata against the other input parameters.
+
+    @raises FormError: The TSIG is badly formed.
+    @raises BadTime: There is too much time skew between the client and the
+    server.
+    @raises BadSignature: The TSIG signature did not validate
+    @rtype: hmac.HMAC object"""
+
+    (adcount,) = struct.unpack("!H", wire[10:12])
+    if adcount == 0:
+        raise dns.exception.FormError
+    adcount -= 1
+    new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
+    current = tsig_rdata
+    (aname, used) = dns.name.from_wire(wire, current)
+    current = current + used
+    (upper_time, lower_time, fudge, mac_size) = \
+                 struct.unpack("!HIHH", wire[current:current + 10])
+    time = ((upper_time + 0L) << 32) + (lower_time + 0L)
+    current += 10
+    mac = wire[current:current + mac_size]
+    current += mac_size
+    (original_id, error, other_size) = \
+                  struct.unpack("!HHH", wire[current:current + 6])
+    current += 6
+    other_data = wire[current:current + other_size]
+    current += other_size
+    if current != tsig_rdata + tsig_rdlen:
+        raise dns.exception.FormError
+    if error != 0:
+        if error == BADSIG:
+            raise PeerBadSignature
+        elif error == BADKEY:
+            raise PeerBadKey
+        elif error == BADTIME:
+            raise PeerBadTime
+        elif error == BADTRUNC:
+            raise PeerBadTruncation
+        else:
+            raise PeerError('unknown TSIG error code %d' % error)
+    time_low = time - fudge
+    time_high = time + fudge
+    if now < time_low or now > time_high:
+        raise BadTime
+    (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
+                                original_id, error, other_data,
+                                request_mac, ctx, multi, first, aname)
+    if (our_mac != mac):
+        raise BadSignature
+    return ctx
+
+def get_algorithm(algorithm):
+    """Returns the wire format string and the hash module to use for the
+    specified TSIG algorithm
+
+    @rtype: (string, hash constructor)
+    @raises NotImplementedError: I{algorithm} is not supported
+    """
+
+    hashes = {}
+    try:
+        import hashlib
+        hashes[dns.name.from_text('hmac-sha224')] = hashlib.sha224
+        hashes[dns.name.from_text('hmac-sha256')] = hashlib.sha256
+        hashes[dns.name.from_text('hmac-sha384')] = hashlib.sha384
+        hashes[dns.name.from_text('hmac-sha512')] = hashlib.sha512
+        hashes[dns.name.from_text('hmac-sha1')] = hashlib.sha1
+        hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] = hashlib.md5
+
+        import sys
+        if sys.hexversion < 0x02050000:
+            # hashlib doesn't conform to PEP 247: API for
+            # Cryptographic Hash Functions, which hmac before python
+            # 2.5 requires, so add the necessary items.
+            class HashlibWrapper:
+                def __init__(self, basehash):
+                    self.basehash = basehash
+                    self.digest_size = self.basehash().digest_size
+
+                def new(self, *args, **kwargs):
+                    return self.basehash(*args, **kwargs)
+
+            for name in hashes:
+                hashes[name] = HashlibWrapper(hashes[name])
+
+    except ImportError:
+        import md5, sha
+        hashes[dns.name.from_text('HMAC-MD5.SIG-ALG.REG.INT')] =  md5.md5
+        hashes[dns.name.from_text('hmac-sha1')] = sha.sha
+
+    if isinstance(algorithm, (str, unicode)):
+        algorithm = dns.name.from_text(algorithm)
+
+    if algorithm in hashes:
+        return (algorithm.to_digestable(), hashes[algorithm])
+
+    raise NotImplementedError("TSIG algorithm " + str(algorithm) +
+                              " is not supported")
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsigkeyring.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsigkeyring.py
new file mode 100644
index 0000000..cbd1a27
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/tsigkeyring.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A place to store TSIG keys."""
+
+import base64
+
+import dns.name
+
+def from_text(textring):
+    """Convert a dictionary containing (textual DNS name, base64 secret) pairs
+    into a binary keyring which has (dns.name.Name, binary secret) pairs.
+    @rtype: dict"""
+    
+    keyring = {}
+    for keytext in textring:
+        keyname = dns.name.from_text(keytext)
+        secret = base64.decodestring(textring[keytext])
+        keyring[keyname] = secret
+    return keyring
+
+def to_text(keyring):
+    """Convert a dictionary containing (dns.name.Name, binary secret) pairs
+    into a text keyring which has (textual DNS name, base64 secret) pairs.
+    @rtype: dict"""
+    
+    textring = {}
+    for keyname in keyring:
+        keytext = dns.name.to_text(keyname)
+        secret = base64.encodestring(keyring[keyname])
+        textring[keytext] = secret
+    return textring
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/ttl.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ttl.py
new file mode 100644
index 0000000..f295300
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/ttl.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TTL conversion."""
+
+import dns.exception
+
+class BadTTL(dns.exception.SyntaxError):
+    pass
+
+def from_text(text):
+    """Convert the text form of a TTL to an integer.
+
+    The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
+
+    @param text: the textual TTL
+    @type text: string
+    @raises dns.ttl.BadTTL: the TTL is not well-formed
+    @rtype: int
+    """
+
+    if text.isdigit():
+        total = long(text)
+    else:
+        if not text[0].isdigit():
+            raise BadTTL
+        total = 0L
+        current = 0L
+        for c in text:
+            if c.isdigit():
+                current *= 10
+                current += long(c)
+            else:
+                c = c.lower()
+                if c == 'w':
+                    total += current * 604800L
+                elif c == 'd':
+                    total += current * 86400L
+                elif c == 'h':
+                    total += current * 3600L
+                elif c == 'm':
+                    total += current * 60L
+                elif c == 's':
+                    total += current
+                else:
+                    raise BadTTL("unknown unit '%s'" % c)
+                current = 0
+        if not current == 0:
+            raise BadTTL("trailing integer")
+    if total < 0L or total > 2147483647L:
+        raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
+    return total
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/update.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/update.py
new file mode 100644
index 0000000..7d42636
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/update.py
@@ -0,0 +1,241 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Dynamic Update Support"""
+
+import dns.message
+import dns.name
+import dns.opcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+
+class Update(dns.message.Message):
+    def __init__(self, zone, rdclass=dns.rdataclass.IN, keyring=None,
+                 keyname=None, keyalgorithm=dns.tsig.default_algorithm):
+        """Initialize a new DNS Update object.
+
+        @param zone: The zone which is being updated.
+        @type zone: A dns.name.Name or string
+        @param rdclass: The class of the zone; defaults to dns.rdataclass.IN.
+        @type rdclass: An int designating the class, or a string whose value
+        is the name of a class.
+        @param keyring: The TSIG keyring to use; defaults to None.
+        @type keyring: dict
+        @param keyname: The name of the TSIG key to use; defaults to None.
+        The key must be defined in the keyring.  If a keyring is specified
+        but a keyname is not, then the key used will be the first key in the
+        keyring.  Note that the order of keys in a dictionary is not defined,
+        so applications should supply a keyname when a keyring is used, unless
+        they know the keyring contains only one key.
+        @type keyname: dns.name.Name or string
+        @param keyalgorithm: The TSIG algorithm to use; defaults to
+        dns.tsig.default_algorithm
+        @type keyalgorithm: string
+        """
+        super(Update, self).__init__()
+        self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE)
+        if isinstance(zone, (str, unicode)):
+            zone = dns.name.from_text(zone)
+        self.origin = zone
+        if isinstance(rdclass, str):
+            rdclass = dns.rdataclass.from_text(rdclass)
+        self.zone_rdclass = rdclass
+        self.find_rrset(self.question, self.origin, rdclass, dns.rdatatype.SOA,
+                        create=True, force_unique=True)
+        if not keyring is None:
+            self.use_tsig(keyring, keyname, keyalgorithm)
+
+    def _add_rr(self, name, ttl, rd, deleting=None, section=None):
+        """Add a single RR to the update section."""
+
+        if section is None:
+            section = self.authority
+        covers = rd.covers()
+        rrset = self.find_rrset(section, name, self.zone_rdclass, rd.rdtype,
+                                covers, deleting, True, True)
+        rrset.add(rd, ttl)
+
+    def _add(self, replace, section, name, *args):
+        """Add records.  The first argument is the replace mode.  If
+        false, RRs are added to an existing RRset; if true, the RRset
+        is replaced with the specified contents.  The second
+        argument is the section to add to.  The third argument
+        is always a name.  The other arguments can be:
+
+                - rdataset...
+
+                - ttl, rdata...
+
+                - ttl, rdtype, string..."""
+
+        if isinstance(name, (str, unicode)):
+            name = dns.name.from_text(name, None)
+        if isinstance(args[0], dns.rdataset.Rdataset):
+            for rds in args:
+                if replace:
+                    self.delete(name, rds.rdtype)
+                for rd in rds:
+                    self._add_rr(name, rds.ttl, rd, section=section)
+        else:
+            args = list(args)
+            ttl = int(args.pop(0))
+            if isinstance(args[0], dns.rdata.Rdata):
+                if replace:
+                    self.delete(name, args[0].rdtype)
+                for rd in args:
+                    self._add_rr(name, ttl, rd, section=section)
+            else:
+                rdtype = args.pop(0)
+                if isinstance(rdtype, str):
+                    rdtype = dns.rdatatype.from_text(rdtype)
+                if replace:
+                    self.delete(name, rdtype)
+                for s in args:
+                    rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+                                             self.origin)
+                    self._add_rr(name, ttl, rd, section=section)
+
+    def add(self, name, *args):
+        """Add records.  The first argument is always a name.  The other
+        arguments can be:
+
+                - rdataset...
+
+                - ttl, rdata...
+
+                - ttl, rdtype, string..."""
+        self._add(False, self.authority, name, *args)
+
+    def delete(self, name, *args):
+        """Delete records.  The first argument is always a name.  The other
+        arguments can be:
+
+                - I{nothing}
+
+                - rdataset...
+
+                - rdata...
+
+                - rdtype, [string...]"""
+
+        if isinstance(name, (str, unicode)):
+            name = dns.name.from_text(name, None)
+        if len(args) == 0:
+            rrset = self.find_rrset(self.authority, name, dns.rdataclass.ANY,
+                                    dns.rdatatype.ANY, dns.rdatatype.NONE,
+                                    dns.rdatatype.ANY, True, True)
+        elif isinstance(args[0], dns.rdataset.Rdataset):
+            for rds in args:
+                for rd in rds:
+                    self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+        else:
+            args = list(args)
+            if isinstance(args[0], dns.rdata.Rdata):
+                for rd in args:
+                    self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+            else:
+                rdtype = args.pop(0)
+                if isinstance(rdtype, str):
+                    rdtype = dns.rdatatype.from_text(rdtype)
+                if len(args) == 0:
+                    rrset = self.find_rrset(self.authority, name,
+                                            self.zone_rdclass, rdtype,
+                                            dns.rdatatype.NONE,
+                                            dns.rdataclass.ANY,
+                                            True, True)
+                else:
+                    for s in args:
+                        rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+                                                 self.origin)
+                        self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+
+    def replace(self, name, *args):
+        """Replace records.  The first argument is always a name.  The other
+        arguments can be:
+
+                - rdataset...
+
+                - ttl, rdata...
+
+                - ttl, rdtype, string...
+
+        Note that if you want to replace the entire node, you should do
+        a delete of the name followed by one or more calls to add."""
+
+        self._add(True, self.authority, name, *args)
+
+    def present(self, name, *args):
+        """Require that an owner name (and optionally an rdata type,
+        or specific rdataset) exists as a prerequisite to the
+        execution of the update.  The first argument is always a name.
+        The other arguments can be:
+
+                - rdataset...
+
+                - rdata...
+
+                - rdtype, string..."""
+
+        if isinstance(name, (str, unicode)):
+            name = dns.name.from_text(name, None)
+        if len(args) == 0:
+            rrset = self.find_rrset(self.answer, name,
+                                    dns.rdataclass.ANY, dns.rdatatype.ANY,
+                                    dns.rdatatype.NONE, None,
+                                    True, True)
+        elif isinstance(args[0], dns.rdataset.Rdataset) or \
+             isinstance(args[0], dns.rdata.Rdata) or \
+             len(args) > 1:
+            if not isinstance(args[0], dns.rdataset.Rdataset):
+                # Add a 0 TTL
+                args = list(args)
+                args.insert(0, 0)
+            self._add(False, self.answer, name, *args)
+        else:
+            rdtype = args[0]
+            if isinstance(rdtype, str):
+                rdtype = dns.rdatatype.from_text(rdtype)
+            rrset = self.find_rrset(self.answer, name,
+                                    dns.rdataclass.ANY, rdtype,
+                                    dns.rdatatype.NONE, None,
+                                    True, True)
+
+    def absent(self, name, rdtype=None):
+        """Require that an owner name (and optionally an rdata type) does
+        not exist as a prerequisite to the execution of the update."""
+
+        if isinstance(name, (str, unicode)):
+            name = dns.name.from_text(name, None)
+        if rdtype is None:
+            rrset = self.find_rrset(self.answer, name,
+                                    dns.rdataclass.NONE, dns.rdatatype.ANY,
+                                    dns.rdatatype.NONE, None,
+                                    True, True)
+        else:
+            if isinstance(rdtype, str):
+                rdtype = dns.rdatatype.from_text(rdtype)
+            rrset = self.find_rrset(self.answer, name,
+                                    dns.rdataclass.NONE, rdtype,
+                                    dns.rdatatype.NONE, None,
+                                    True, True)
+
+    def to_wire(self, origin=None, max_size=65535):
+        """Return a string containing the update in DNS compressed wire
+        format.
+        @rtype: string"""
+        if origin is None:
+            origin = self.origin
+        return super(Update, self).to_wire(origin, max_size)
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/version.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/version.py
new file mode 100644
index 0000000..7a36775
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/version.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython release version information."""
+
+MAJOR = 1
+MINOR = 8
+MICRO = 0
+RELEASELEVEL = 0x0f
+SERIAL = 0
+
+if RELEASELEVEL == 0x0f:
+    version = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
+elif RELEASELEVEL == 0x00:
+    version = '%d.%d.%dx%d' % \
+              (MAJOR, MINOR, MICRO, SERIAL)
+else:
+    version = '%d.%d.%d%x%d' % \
+              (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL)
+
+hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \
+             SERIAL
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/dns/zone.py b/catapult/telemetry/third_party/webpagereplay/third_party/dns/zone.py
new file mode 100644
index 0000000..93c157d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/dns/zone.py
@@ -0,0 +1,855 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+from __future__ import generators
+
+import sys
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdata
+import dns.rrset
+import dns.tokenizer
+import dns.ttl
+
+class BadZone(dns.exception.DNSException):
+    """The zone is malformed."""
+    pass
+
+class NoSOA(BadZone):
+    """The zone has no SOA RR at its origin."""
+    pass
+
+class NoNS(BadZone):
+    """The zone has no NS RRset at its origin."""
+    pass
+
+class UnknownOrigin(BadZone):
+    """The zone's origin is unknown."""
+    pass
+
+class Zone(object):
+    """A DNS zone.
+
+    A Zone is a mapping from names to nodes.  The zone object may be
+    treated like a Python dictionary, e.g. zone[name] will retrieve
+    the node associated with that name.  The I{name} may be a
+    dns.name.Name object, or it may be a string.  In the either case,
+    if the name is relative it is treated as relative to the origin of
+    the zone.
+
+    @ivar rdclass: The zone's rdata class; the default is class IN.
+    @type rdclass: int
+    @ivar origin: The origin of the zone.
+    @type origin: dns.name.Name object
+    @ivar nodes: A dictionary mapping the names of nodes in the zone to the
+    nodes themselves.
+    @type nodes: dict
+    @ivar relativize: should names in the zone be relativized?
+    @type relativize: bool
+    @cvar node_factory: the factory used to create a new node
+    @type node_factory: class or callable
+    """
+
+    node_factory = dns.node.Node
+
+    __slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
+
+    def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
+        """Initialize a zone object.
+
+        @param origin: The origin of the zone.
+        @type origin: dns.name.Name object
+        @param rdclass: The zone's rdata class; the default is class IN.
+        @type rdclass: int"""
+
+        self.rdclass = rdclass
+        self.origin = origin
+        self.nodes = {}
+        self.relativize = relativize
+
+    def __eq__(self, other):
+        """Two zones are equal if they have the same origin, class, and
+        nodes.
+        @rtype: bool
+        """
+
+        if not isinstance(other, Zone):
+            return False
+        if self.rdclass != other.rdclass or \
+           self.origin != other.origin or \
+           self.nodes != other.nodes:
+            return False
+        return True
+
+    def __ne__(self, other):
+        """Are two zones not equal?
+        @rtype: bool
+        """
+
+        return not self.__eq__(other)
+
+    def _validate_name(self, name):
+        if isinstance(name, (str, unicode)):
+            name = dns.name.from_text(name, None)
+        elif not isinstance(name, dns.name.Name):
+            raise KeyError("name parameter must be convertable to a DNS name")
+        if name.is_absolute():
+            if not name.is_subdomain(self.origin):
+                raise KeyError("name parameter must be a subdomain of the zone origin")
+            if self.relativize:
+                name = name.relativize(self.origin)
+        return name
+
+    def __getitem__(self, key):
+        key = self._validate_name(key)
+        return self.nodes[key]
+
+    def __setitem__(self, key, value):
+        key = self._validate_name(key)
+        self.nodes[key] = value
+
+    def __delitem__(self, key):
+        key = self._validate_name(key)
+        del self.nodes[key]
+
+    def __iter__(self):
+        return self.nodes.iterkeys()
+
+    def iterkeys(self):
+        return self.nodes.iterkeys()
+
+    def keys(self):
+        return self.nodes.keys()
+
+    def itervalues(self):
+        return self.nodes.itervalues()
+
+    def values(self):
+        return self.nodes.values()
+
+    def iteritems(self):
+        return self.nodes.iteritems()
+
+    def items(self):
+        return self.nodes.items()
+
+    def get(self, key):
+        key = self._validate_name(key)
+        return self.nodes.get(key)
+
+    def __contains__(self, other):
+        return other in self.nodes
+
+    def find_node(self, name, create=False):
+        """Find a node in the zone, possibly creating it.
+
+        @param name: the name of the node to find
+        @type name: dns.name.Name object or string
+        @param create: should the node be created if it doesn't exist?
+        @type create: bool
+        @raises KeyError: the name is not known and create was not specified.
+        @rtype: dns.node.Node object
+        """
+
+        name = self._validate_name(name)
+        node = self.nodes.get(name)
+        if node is None:
+            if not create:
+                raise KeyError
+            node = self.node_factory()
+            self.nodes[name] = node
+        return node
+
+    def get_node(self, name, create=False):
+        """Get a node in the zone, possibly creating it.
+
+        This method is like L{find_node}, except it returns None instead
+        of raising an exception if the node does not exist and creation
+        has not been requested.
+
+        @param name: the name of the node to find
+        @type name: dns.name.Name object or string
+        @param create: should the node be created if it doesn't exist?
+        @type create: bool
+        @rtype: dns.node.Node object or None
+        """
+
+        try:
+            node = self.find_node(name, create)
+        except KeyError:
+            node = None
+        return node
+
+    def delete_node(self, name):
+        """Delete the specified node if it exists.
+
+        It is not an error if the node does not exist.
+        """
+
+        name = self._validate_name(name)
+        if self.nodes.has_key(name):
+            del self.nodes[name]
+
+    def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+                      create=False):
+        """Look for rdata with the specified name and type in the zone,
+        and return an rdataset encapsulating it.
+
+        The I{name}, I{rdtype}, and I{covers} parameters may be
+        strings, in which case they will be converted to their proper
+        type.
+
+        The rdataset returned is not a copy; changes to it will change
+        the zone.
+
+        KeyError is raised if the name or type are not found.
+        Use L{get_rdataset} if you want to have None returned instead.
+
+        @param name: the owner name to look for
+        @type name: DNS.name.Name object or string
+        @param rdtype: the rdata type desired
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        @param create: should the node and rdataset be created if they do not
+        exist?
+        @type create: bool
+        @raises KeyError: the node or rdata could not be found
+        @rtype: dns.rrset.RRset object
+        """
+
+        name = self._validate_name(name)
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(covers, str):
+            covers = dns.rdatatype.from_text(covers)
+        node = self.find_node(name, create)
+        return node.find_rdataset(self.rdclass, rdtype, covers, create)
+
+    def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+                     create=False):
+        """Look for rdata with the specified name and type in the zone,
+        and return an rdataset encapsulating it.
+
+        The I{name}, I{rdtype}, and I{covers} parameters may be
+        strings, in which case they will be converted to their proper
+        type.
+
+        The rdataset returned is not a copy; changes to it will change
+        the zone.
+
+        None is returned if the name or type are not found.
+        Use L{find_rdataset} if you want to have KeyError raised instead.
+
+        @param name: the owner name to look for
+        @type name: DNS.name.Name object or string
+        @param rdtype: the rdata type desired
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        @param create: should the node and rdataset be created if they do not
+        exist?
+        @type create: bool
+        @rtype: dns.rrset.RRset object
+        """
+
+        try:
+            rdataset = self.find_rdataset(name, rdtype, covers, create)
+        except KeyError:
+            rdataset = None
+        return rdataset
+
+    def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
+        """Delete the rdataset matching I{rdtype} and I{covers}, if it
+        exists at the node specified by I{name}.
+
+        The I{name}, I{rdtype}, and I{covers} parameters may be
+        strings, in which case they will be converted to their proper
+        type.
+
+        It is not an error if the node does not exist, or if there is no
+        matching rdataset at the node.
+
+        If the node has no rdatasets after the deletion, it will itself
+        be deleted.
+
+        @param name: the owner name to look for
+        @type name: DNS.name.Name object or string
+        @param rdtype: the rdata type desired
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        """
+
+        name = self._validate_name(name)
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(covers, str):
+            covers = dns.rdatatype.from_text(covers)
+        node = self.get_node(name)
+        if not node is None:
+            node.delete_rdataset(self.rdclass, rdtype, covers)
+            if len(node) == 0:
+                self.delete_node(name)
+
+    def replace_rdataset(self, name, replacement):
+        """Replace an rdataset at name.
+
+        It is not an error if there is no rdataset matching I{replacement}.
+
+        Ownership of the I{replacement} object is transferred to the zone;
+        in other words, this method does not store a copy of I{replacement}
+        at the node, it stores I{replacement} itself.
+
+        If the I{name} node does not exist, it is created.
+
+        @param name: the owner name
+        @type name: DNS.name.Name object or string
+        @param replacement: the replacement rdataset
+        @type replacement: dns.rdataset.Rdataset
+        """
+
+        if replacement.rdclass != self.rdclass:
+            raise ValueError('replacement.rdclass != zone.rdclass')
+        node = self.find_node(name, True)
+        node.replace_rdataset(replacement)
+
+    def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+        """Look for rdata with the specified name and type in the zone,
+        and return an RRset encapsulating it.
+
+        The I{name}, I{rdtype}, and I{covers} parameters may be
+        strings, in which case they will be converted to their proper
+        type.
+
+        This method is less efficient than the similar
+        L{find_rdataset} because it creates an RRset instead of
+        returning the matching rdataset.  It may be more convenient
+        for some uses since it returns an object which binds the owner
+        name to the rdata.
+
+        This method may not be used to create new nodes or rdatasets;
+        use L{find_rdataset} instead.
+
+        KeyError is raised if the name or type are not found.
+        Use L{get_rrset} if you want to have None returned instead.
+
+        @param name: the owner name to look for
+        @type name: DNS.name.Name object or string
+        @param rdtype: the rdata type desired
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        @raises KeyError: the node or rdata could not be found
+        @rtype: dns.rrset.RRset object
+        """
+
+        name = self._validate_name(name)
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(covers, str):
+            covers = dns.rdatatype.from_text(covers)
+        rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
+        rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
+        rrset.update(rdataset)
+        return rrset
+
+    def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+        """Look for rdata with the specified name and type in the zone,
+        and return an RRset encapsulating it.
+
+        The I{name}, I{rdtype}, and I{covers} parameters may be
+        strings, in which case they will be converted to their proper
+        type.
+
+        This method is less efficient than the similar L{get_rdataset}
+        because it creates an RRset instead of returning the matching
+        rdataset.  It may be more convenient for some uses since it
+        returns an object which binds the owner name to the rdata.
+
+        This method may not be used to create new nodes or rdatasets;
+        use L{find_rdataset} instead.
+
+        None is returned if the name or type are not found.
+        Use L{find_rrset} if you want to have KeyError raised instead.
+
+        @param name: the owner name to look for
+        @type name: DNS.name.Name object or string
+        @param rdtype: the rdata type desired
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        @rtype: dns.rrset.RRset object
+        """
+
+        try:
+            rrset = self.find_rrset(name, rdtype, covers)
+        except KeyError:
+            rrset = None
+        return rrset
+
+    def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
+                          covers=dns.rdatatype.NONE):
+        """Return a generator which yields (name, rdataset) tuples for
+        all rdatasets in the zone which have the specified I{rdtype}
+        and I{covers}.  If I{rdtype} is dns.rdatatype.ANY, the default,
+        then all rdatasets will be matched.
+
+        @param rdtype: int or string
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        """
+
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(covers, str):
+            covers = dns.rdatatype.from_text(covers)
+        for (name, node) in self.iteritems():
+            for rds in node:
+                if rdtype == dns.rdatatype.ANY or \
+                   (rds.rdtype == rdtype and rds.covers == covers):
+                    yield (name, rds)
+
+    def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
+                       covers=dns.rdatatype.NONE):
+        """Return a generator which yields (name, ttl, rdata) tuples for
+        all rdatas in the zone which have the specified I{rdtype}
+        and I{covers}.  If I{rdtype} is dns.rdatatype.ANY, the default,
+        then all rdatas will be matched.
+
+        @param rdtype: int or string
+        @type rdtype: int or string
+        @param covers: the covered type (defaults to None)
+        @type covers: int or string
+        """
+
+        if isinstance(rdtype, str):
+            rdtype = dns.rdatatype.from_text(rdtype)
+        if isinstance(covers, str):
+            covers = dns.rdatatype.from_text(covers)
+        for (name, node) in self.iteritems():
+            for rds in node:
+                if rdtype == dns.rdatatype.ANY or \
+                   (rds.rdtype == rdtype and rds.covers == covers):
+                    for rdata in rds:
+                        yield (name, rds.ttl, rdata)
+
+    def to_file(self, f, sorted=True, relativize=True, nl=None):
+        """Write a zone to a file.
+
+        @param f: file or string.  If I{f} is a string, it is treated
+        as the name of a file to open.
+        @param sorted: if True, the file will be written with the
+        names sorted in DNSSEC order from least to greatest.  Otherwise
+        the names will be written in whatever order they happen to have
+        in the zone's dictionary.
+        @param relativize: if True, domain names in the output will be
+        relativized to the zone's origin (if possible).
+        @type relativize: bool
+        @param nl: The end of line string.  If not specified, the
+        output will use the platform's native end-of-line marker (i.e.
+        LF on POSIX, CRLF on Windows, CR on Macintosh).
+        @type nl: string or None
+        """
+
+        if sys.hexversion >= 0x02030000:
+            # allow Unicode filenames
+            str_type = basestring
+        else:
+            str_type = str
+        if nl is None:
+            opts = 'w'
+        else:
+            opts = 'wb'
+        if isinstance(f, str_type):
+            f = file(f, opts)
+            want_close = True
+        else:
+            want_close = False
+        try:
+            if sorted:
+                names = self.keys()
+                names.sort()
+            else:
+                names = self.iterkeys()
+            for n in names:
+                l = self[n].to_text(n, origin=self.origin,
+                                    relativize=relativize)
+                if nl is None:
+                    print >> f, l
+                else:
+                    f.write(l)
+                    f.write(nl)
+        finally:
+            if want_close:
+                f.close()
+
+    def check_origin(self):
+        """Do some simple checking of the zone's origin.
+
+        @raises dns.zone.NoSOA: there is no SOA RR
+        @raises dns.zone.NoNS: there is no NS RRset
+        @raises KeyError: there is no origin node
+        """
+        if self.relativize:
+            name = dns.name.empty
+        else:
+            name = self.origin
+        if self.get_rdataset(name, dns.rdatatype.SOA) is None:
+            raise NoSOA
+        if self.get_rdataset(name, dns.rdatatype.NS) is None:
+            raise NoNS
+
+
+class _MasterReader(object):
+    """Read a DNS master file
+
+    @ivar tok: The tokenizer
+    @type tok: dns.tokenizer.Tokenizer object
+    @ivar ttl: The default TTL
+    @type ttl: int
+    @ivar last_name: The last name read
+    @type last_name: dns.name.Name object
+    @ivar current_origin: The current origin
+    @type current_origin: dns.name.Name object
+    @ivar relativize: should names in the zone be relativized?
+    @type relativize: bool
+    @ivar zone: the zone
+    @type zone: dns.zone.Zone object
+    @ivar saved_state: saved reader state (used when processing $INCLUDE)
+    @type saved_state: list of (tokenizer, current_origin, last_name, file)
+    tuples.
+    @ivar current_file: the file object of the $INCLUDed file being parsed
+    (None if no $INCLUDE is active).
+    @ivar allow_include: is $INCLUDE allowed?
+    @type allow_include: bool
+    @ivar check_origin: should sanity checks of the origin node be done?
+    The default is True.
+    @type check_origin: bool
+    """
+
+    def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
+                 allow_include=False, check_origin=True):
+        if isinstance(origin, (str, unicode)):
+            origin = dns.name.from_text(origin)
+        self.tok = tok
+        self.current_origin = origin
+        self.relativize = relativize
+        self.ttl = 0
+        self.last_name = None
+        self.zone = zone_factory(origin, rdclass, relativize=relativize)
+        self.saved_state = []
+        self.current_file = None
+        self.allow_include = allow_include
+        self.check_origin = check_origin
+
+    def _eat_line(self):
+        while 1:
+            token = self.tok.get()
+            if token.is_eol_or_eof():
+                break
+
+    def _rr_line(self):
+        """Process one line from a DNS master file."""
+        # Name
+        if self.current_origin is None:
+            raise UnknownOrigin
+        token = self.tok.get(want_leading = True)
+        if not token.is_whitespace():
+            self.last_name = dns.name.from_text(token.value, self.current_origin)
+        else:
+            token = self.tok.get()
+            if token.is_eol_or_eof():
+                # treat leading WS followed by EOL/EOF as if they were EOL/EOF.
+                return
+            self.tok.unget(token)
+        name = self.last_name
+        if not name.is_subdomain(self.zone.origin):
+            self._eat_line()
+            return
+        if self.relativize:
+            name = name.relativize(self.zone.origin)
+        token = self.tok.get()
+        if not token.is_identifier():
+            raise dns.exception.SyntaxError
+        # TTL
+        try:
+            ttl = dns.ttl.from_text(token.value)
+            token = self.tok.get()
+            if not token.is_identifier():
+                raise dns.exception.SyntaxError
+        except dns.ttl.BadTTL:
+            ttl = self.ttl
+        # Class
+        try:
+            rdclass = dns.rdataclass.from_text(token.value)
+            token = self.tok.get()
+            if not token.is_identifier():
+                raise dns.exception.SyntaxError
+        except dns.exception.SyntaxError:
+            raise dns.exception.SyntaxError
+        except:
+            rdclass = self.zone.rdclass
+        if rdclass != self.zone.rdclass:
+            raise dns.exception.SyntaxError("RR class is not zone's class")
+        # Type
+        try:
+            rdtype = dns.rdatatype.from_text(token.value)
+        except:
+            raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value)
+        n = self.zone.nodes.get(name)
+        if n is None:
+            n = self.zone.node_factory()
+            self.zone.nodes[name] = n
+        try:
+            rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
+                                     self.current_origin, False)
+        except dns.exception.SyntaxError:
+            # Catch and reraise.
+            (ty, va) = sys.exc_info()[:2]
+            raise va
+        except:
+            # All exceptions that occur in the processing of rdata
+            # are treated as syntax errors.  This is not strictly
+            # correct, but it is correct almost all of the time.
+            # We convert them to syntax errors so that we can emit
+            # helpful filename:line info.
+            (ty, va) = sys.exc_info()[:2]
+            raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va)))
+
+        rd.choose_relativity(self.zone.origin, self.relativize)
+        covers = rd.covers()
+        rds = n.find_rdataset(rdclass, rdtype, covers, True)
+        rds.add(rd, ttl)
+
+    def read(self):
+        """Read a DNS master file and build a zone object.
+
+        @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+        @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+        """
+
+        try:
+            while 1:
+                token = self.tok.get(True, True).unescape()
+                if token.is_eof():
+                    if not self.current_file is None:
+                        self.current_file.close()
+                    if len(self.saved_state) > 0:
+                        (self.tok,
+                         self.current_origin,
+                         self.last_name,
+                         self.current_file,
+                         self.ttl) = self.saved_state.pop(-1)
+                        continue
+                    break
+                elif token.is_eol():
+                    continue
+                elif token.is_comment():
+                    self.tok.get_eol()
+                    continue
+                elif token.value[0] == '$':
+                    u = token.value.upper()
+                    if u == '$TTL':
+                        token = self.tok.get()
+                        if not token.is_identifier():
+                            raise dns.exception.SyntaxError("bad $TTL")
+                        self.ttl = dns.ttl.from_text(token.value)
+                        self.tok.get_eol()
+                    elif u == '$ORIGIN':
+                        self.current_origin = self.tok.get_name()
+                        self.tok.get_eol()
+                        if self.zone.origin is None:
+                            self.zone.origin = self.current_origin
+                    elif u == '$INCLUDE' and self.allow_include:
+                        token = self.tok.get()
+                        if not token.is_quoted_string():
+                            raise dns.exception.SyntaxError("bad filename in $INCLUDE")
+                        filename = token.value
+                        token = self.tok.get()
+                        if token.is_identifier():
+                            new_origin = dns.name.from_text(token.value, \
+                                                            self.current_origin)
+                            self.tok.get_eol()
+                        elif not token.is_eol_or_eof():
+                            raise dns.exception.SyntaxError("bad origin in $INCLUDE")
+                        else:
+                            new_origin = self.current_origin
+                        self.saved_state.append((self.tok,
+                                                 self.current_origin,
+                                                 self.last_name,
+                                                 self.current_file,
+                                                 self.ttl))
+                        self.current_file = file(filename, 'r')
+                        self.tok = dns.tokenizer.Tokenizer(self.current_file,
+                                                           filename)
+                        self.current_origin = new_origin
+                    else:
+                        raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'")
+                    continue
+                self.tok.unget(token)
+                self._rr_line()
+        except dns.exception.SyntaxError, detail:
+            (filename, line_number) = self.tok.where()
+            if detail is None:
+                detail = "syntax error"
+            raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail))
+
+        # Now that we're done reading, do some basic checking of the zone.
+        if self.check_origin:
+            self.zone.check_origin()
+
+def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
+              relativize = True, zone_factory=Zone, filename=None,
+              allow_include=False, check_origin=True):
+    """Build a zone object from a master file format string.
+
+    @param text: the master file format input
+    @type text: string.
+    @param origin: The origin of the zone; if not specified, the first
+    $ORIGIN statement in the master file will determine the origin of the
+    zone.
+    @type origin: dns.name.Name object or string
+    @param rdclass: The zone's rdata class; the default is class IN.
+    @type rdclass: int
+    @param relativize: should names be relativized?  The default is True
+    @type relativize: bool
+    @param zone_factory: The zone factory to use
+    @type zone_factory: function returning a Zone
+    @param filename: The filename to emit when describing where an error
+    occurred; the default is '<string>'.
+    @type filename: string
+    @param allow_include: is $INCLUDE allowed?
+    @type allow_include: bool
+    @param check_origin: should sanity checks of the origin node be done?
+    The default is True.
+    @type check_origin: bool
+    @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+    @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+    @rtype: dns.zone.Zone object
+    """
+
+    # 'text' can also be a file, but we don't publish that fact
+    # since it's an implementation detail.  The official file
+    # interface is from_file().
+
+    if filename is None:
+        filename = '<string>'
+    tok = dns.tokenizer.Tokenizer(text, filename)
+    reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
+                           allow_include=allow_include,
+                           check_origin=check_origin)
+    reader.read()
+    return reader.zone
+
+def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
+              relativize = True, zone_factory=Zone, filename=None,
+              allow_include=True, check_origin=True):
+    """Read a master file and build a zone object.
+
+    @param f: file or string.  If I{f} is a string, it is treated
+    as the name of a file to open.
+    @param origin: The origin of the zone; if not specified, the first
+    $ORIGIN statement in the master file will determine the origin of the
+    zone.
+    @type origin: dns.name.Name object or string
+    @param rdclass: The zone's rdata class; the default is class IN.
+    @type rdclass: int
+    @param relativize: should names be relativized?  The default is True
+    @type relativize: bool
+    @param zone_factory: The zone factory to use
+    @type zone_factory: function returning a Zone
+    @param filename: The filename to emit when describing where an error
+    occurred; the default is '<file>', or the value of I{f} if I{f} is a
+    string.
+    @type filename: string
+    @param allow_include: is $INCLUDE allowed?
+    @type allow_include: bool
+    @param check_origin: should sanity checks of the origin node be done?
+    The default is True.
+    @type check_origin: bool
+    @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+    @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+    @rtype: dns.zone.Zone object
+    """
+
+    if sys.hexversion >= 0x02030000:
+        # allow Unicode filenames; turn on universal newline support
+        str_type = basestring
+        opts = 'rU'
+    else:
+        str_type = str
+        opts = 'r'
+    if isinstance(f, str_type):
+        if filename is None:
+            filename = f
+        f = file(f, opts)
+        want_close = True
+    else:
+        if filename is None:
+            filename = '<file>'
+        want_close = False
+
+    try:
+        z = from_text(f, origin, rdclass, relativize, zone_factory,
+                      filename, allow_include, check_origin)
+    finally:
+        if want_close:
+            f.close()
+    return z
+
+def from_xfr(xfr, zone_factory=Zone, relativize=True):
+    """Convert the output of a zone transfer generator into a zone object.
+
+    @param xfr: The xfr generator
+    @type xfr: generator of dns.message.Message objects
+    @param relativize: should names be relativized?  The default is True.
+    It is essential that the relativize setting matches the one specified
+    to dns.query.xfr().
+    @type relativize: bool
+    @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+    @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+    @rtype: dns.zone.Zone object
+    """
+
+    z = None
+    for r in xfr:
+        if z is None:
+            if relativize:
+                origin = r.origin
+            else:
+                origin = r.answer[0].name
+            rdclass = r.answer[0].rdclass
+            z = zone_factory(origin, rdclass, relativize=relativize)
+        for rrset in r.answer:
+            znode = z.nodes.get(rrset.name)
+            if not znode:
+                znode = z.node_factory()
+                z.nodes[rrset.name] = znode
+            zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
+                                       rrset.covers, True)
+            zrds.update_ttl(rrset.ttl)
+            for rd in rrset:
+                rd.choose_relativity(z.origin, relativize)
+                zrds.add(rd)
+    z.check_origin()
+    return z
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/COPYING b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/COPYING
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/COPYING
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/MANIFEST.in b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/MANIFEST.in
new file mode 100644
index 0000000..f572804
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/MANIFEST.in
@@ -0,0 +1,3 @@
+include COPYING
+include ipaddr_test.py
+include RELEASENOTES
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README
new file mode 100644
index 0000000..1b54294
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README
@@ -0,0 +1,8 @@
+ipaddr.py is a library for working with IP addresses, both IPv4 and IPv6.
+It was developed by Google for internal use, and is now open source.
+
+Project home page: http://code.google.com/p/ipaddr-py/
+
+Please send contributions to ipaddr-py-dev@googlegroups.com.  Code should
+include unit tests and follow the Google Python style guide:
+http://code.google.com/p/soc/wiki/PythonStyleGuide
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README.web-page-replay b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README.web-page-replay
new file mode 100644
index 0000000..4b42084
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/README.web-page-replay
@@ -0,0 +1,12 @@
+Name: An IPv4/IPv6 manipulation library in Python.
+Short Name: ipaddr-py
+URL: https://code.google.com/p/ipaddr-py/
+Version: 2.1.10 (ipaddr.__version__)
+License: Apache (v2.0)
+License File: COPYING
+
+Description:
+Used by Web Page Replay to check if an IP address is private.
+
+Local Modifications:
+Cherry picked revision 728996d6b1d4 to add license boilerplate to test-2to3.sh.
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr.py b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr.py
new file mode 100644
index 0000000..ad27ae9
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr.py
@@ -0,0 +1,1897 @@
+#!/usr/bin/python
+#
+# Copyright 2007 Google Inc.
+#  Licensed to PSF under a Contributor Agreement.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+__version__ = '2.1.10'
+
+import struct
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+    """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+    """A Value Error related to the netmask."""
+
+
+def IPAddress(address, version=None):
+    """Take an IP string/int and return an object of the correct type.
+
+    Args:
+        address: A string or integer, the IP address.  Either IPv4 or
+          IPv6 addresses may be supplied; integers less than 2**32 will
+          be considered to be IPv4 by default.
+        version: An Integer, 4 or 6. If set, don't try to automatically
+          determine what the IP address type is. important for things
+          like IPAddress(1), which could be IPv4, '0.0.0.1',  or IPv6,
+          '::1'.
+
+    Returns:
+        An IPv4Address or IPv6Address object.
+
+    Raises:
+        ValueError: if the string passed isn't either a v4 or a v6
+          address.
+
+    """
+    if version:
+        if version == 4:
+            return IPv4Address(address)
+        elif version == 6:
+            return IPv6Address(address)
+
+    try:
+        return IPv4Address(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    try:
+        return IPv6Address(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+                     address)
+
+
+def IPNetwork(address, version=None, strict=False):
+    """Take an IP string/int and return an object of the correct type.
+
+    Args:
+        address: A string or integer, the IP address.  Either IPv4 or
+          IPv6 addresses may be supplied; integers less than 2**32 will
+          be considered to be IPv4 by default.
+        version: An Integer, if set, don't try to automatically
+          determine what the IP address type is. important for things
+          like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
+          '::1/128'.
+
+    Returns:
+        An IPv4Network or IPv6Network object.
+
+    Raises:
+        ValueError: if the string passed isn't either a v4 or a v6
+          address. Or if a strict network was requested and a strict
+          network wasn't given.
+
+    """
+    if version:
+        if version == 4:
+            return IPv4Network(address, strict)
+        elif version == 6:
+            return IPv6Network(address, strict)
+
+    try:
+        return IPv4Network(address, strict)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    try:
+        return IPv6Network(address, strict)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+                     address)
+
+
+def v4_int_to_packed(address):
+    """The binary representation of this address.
+
+    Args:
+        address: An integer representation of an IPv4 IP address.
+
+    Returns:
+        The binary representation of this address.
+
+    Raises:
+        ValueError: If the integer is too large to be an IPv4 IP
+          address.
+    """
+    if address > _BaseV4._ALL_ONES:
+        raise ValueError('Address too large for IPv4')
+    return Bytes(struct.pack('!I', address))
+
+
+def v6_int_to_packed(address):
+    """The binary representation of this address.
+
+    Args:
+        address: An integer representation of an IPv4 IP address.
+
+    Returns:
+        The binary representation of this address.
+    """
+    return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
+
+
+def _find_address_range(addresses):
+    """Find a sequence of addresses.
+
+    Args:
+        addresses: a list of IPv4 or IPv6 addresses.
+
+    Returns:
+        A tuple containing the first and last IP addresses in the sequence.
+
+    """
+    first = last = addresses[0]
+    for ip in addresses[1:]:
+        if ip._ip == last._ip + 1:
+            last = ip
+        else:
+            break
+    return (first, last)
+
+def _get_prefix_length(number1, number2, bits):
+    """Get the number of leading bits that are same for two numbers.
+
+    Args:
+        number1: an integer.
+        number2: another integer.
+        bits: the maximum number of bits to compare.
+
+    Returns:
+        The number of leading bits that are the same for two numbers.
+
+    """
+    for i in range(bits):
+        if number1 >> i == number2 >> i:
+            return bits - i
+    return 0
+
+def _count_righthand_zero_bits(number, bits):
+    """Count the number of zero bits on the right hand side.
+
+    Args:
+        number: an integer.
+        bits: maximum number of bits to count.
+
+    Returns:
+        The number of zero bits on the right hand side of the number.
+
+    """
+    if number == 0:
+        return bits
+    for i in range(bits):
+        if (number >> i) % 2:
+            return i
+
+def summarize_address_range(first, last):
+    """Summarize a network range given the first and last IP addresses.
+
+    Example:
+        >>> summarize_address_range(IPv4Address('1.1.1.0'),
+            IPv4Address('1.1.1.130'))
+        [IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
+        IPv4Network('1.1.1.130/32')]
+
+    Args:
+        first: the first IPv4Address or IPv6Address in the range.
+        last: the last IPv4Address or IPv6Address in the range.
+
+    Returns:
+        The address range collapsed to a list of IPv4Network's or
+        IPv6Network's.
+
+    Raise:
+        TypeError:
+            If the first and last objects are not IP addresses.
+            If the first and last objects are not the same version.
+        ValueError:
+            If the last object is not greater than the first.
+            If the version is not 4 or 6.
+
+    """
+    if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
+        raise TypeError('first and last must be IP addresses, not networks')
+    if first.version != last.version:
+        raise TypeError("%s and %s are not of the same version" % (
+                str(first), str(last)))
+    if first > last:
+        raise ValueError('last IP address must be greater than first')
+
+    networks = []
+
+    if first.version == 4:
+        ip = IPv4Network
+    elif first.version == 6:
+        ip = IPv6Network
+    else:
+        raise ValueError('unknown IP version')
+
+    ip_bits = first._max_prefixlen
+    first_int = first._ip
+    last_int = last._ip
+    while first_int <= last_int:
+        nbits = _count_righthand_zero_bits(first_int, ip_bits)
+        current = None
+        while nbits >= 0:
+            addend = 2**nbits - 1
+            current = first_int + addend
+            nbits -= 1
+            if current <= last_int:
+                break
+        prefix = _get_prefix_length(first_int, current, ip_bits)
+        net = ip('%s/%d' % (str(first), prefix))
+        networks.append(net)
+        if current == ip._ALL_ONES:
+            break
+        first_int = current + 1
+        first = IPAddress(first_int, version=first._version)
+    return networks
+
+def _collapse_address_list_recursive(addresses):
+    """Loops through the addresses, collapsing concurrent netblocks.
+
+    Example:
+
+        ip1 = IPv4Network('1.1.0.0/24')
+        ip2 = IPv4Network('1.1.1.0/24')
+        ip3 = IPv4Network('1.1.2.0/24')
+        ip4 = IPv4Network('1.1.3.0/24')
+        ip5 = IPv4Network('1.1.4.0/24')
+        ip6 = IPv4Network('1.1.0.1/22')
+
+        _collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
+          [IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
+
+        This shouldn't be called directly; it is called via
+          collapse_address_list([]).
+
+    Args:
+        addresses: A list of IPv4Network's or IPv6Network's
+
+    Returns:
+        A list of IPv4Network's or IPv6Network's depending on what we were
+        passed.
+
+    """
+    ret_array = []
+    optimized = False
+
+    for cur_addr in addresses:
+        if not ret_array:
+            ret_array.append(cur_addr)
+            continue
+        if cur_addr in ret_array[-1]:
+            optimized = True
+        elif cur_addr == ret_array[-1].supernet().subnet()[1]:
+            ret_array.append(ret_array.pop().supernet())
+            optimized = True
+        else:
+            ret_array.append(cur_addr)
+
+    if optimized:
+        return _collapse_address_list_recursive(ret_array)
+
+    return ret_array
+
+
+def collapse_address_list(addresses):
+    """Collapse a list of IP objects.
+
+    Example:
+        collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
+          [IPv4('1.1.0.0/23')]
+
+    Args:
+        addresses: A list of IPv4Network or IPv6Network objects.
+
+    Returns:
+        A list of IPv4Network or IPv6Network objects depending on what we
+        were passed.
+
+    Raises:
+        TypeError: If passed a list of mixed version objects.
+
+    """
+    i = 0
+    addrs = []
+    ips = []
+    nets = []
+
+    # split IP addresses and networks
+    for ip in addresses:
+        if isinstance(ip, _BaseIP):
+            if ips and ips[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                        str(ip), str(ips[-1])))
+            ips.append(ip)
+        elif ip._prefixlen == ip._max_prefixlen:
+            if ips and ips[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                        str(ip), str(ips[-1])))
+            ips.append(ip.ip)
+        else:
+            if nets and nets[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                        str(ip), str(ips[-1])))
+            nets.append(ip)
+
+    # sort and dedup
+    ips = sorted(set(ips))
+    nets = sorted(set(nets))
+
+    while i < len(ips):
+        (first, last) = _find_address_range(ips[i:])
+        i = ips.index(last) + 1
+        addrs.extend(summarize_address_range(first, last))
+
+    return _collapse_address_list_recursive(sorted(
+        addrs + nets, key=_BaseNet._get_networks_key))
+
+# backwards compatibility
+CollapseAddrList = collapse_address_list
+
+# We need to distinguish between the string and packed-bytes representations
+# of an IP address.  For example, b'0::1' is the IPv4 address 48.58.58.49,
+# while '0::1' is an IPv6 address.
+#
+# In Python 3, the native 'bytes' type already provides this functionality,
+# so we use it directly.  For earlier implementations where bytes is not a
+# distinct type, we create a subclass of str to serve as a tag.
+#
+# Usage example (Python 2):
+#   ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
+#
+# Usage example (Python 3):
+#   ip = ipaddr.IPAddress(b'xxxx')
+try:
+    if bytes is str:
+        raise TypeError("bytes is not a distinct type")
+    Bytes = bytes
+except (NameError, TypeError):
+    class Bytes(str):
+        def __repr__(self):
+            return 'Bytes(%s)' % str.__repr__(self)
+
+def get_mixed_type_key(obj):
+    """Return a key suitable for sorting between networks and addresses.
+
+    Address and Network objects are not sortable by default; they're
+    fundamentally different so the expression
+
+        IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
+
+    doesn't make any sense.  There are some times however, where you may wish
+    to have ipaddr sort these for you anyway. If you need to do this, you
+    can use this function as the key= argument to sorted().
+
+    Args:
+      obj: either a Network or Address object.
+    Returns:
+      appropriate key.
+
+    """
+    if isinstance(obj, _BaseNet):
+        return obj._get_networks_key()
+    elif isinstance(obj, _BaseIP):
+        return obj._get_address_key()
+    return NotImplemented
+
+class _IPAddrBase(object):
+
+    """The mother class."""
+
+    def __index__(self):
+        return self._ip
+
+    def __int__(self):
+        return self._ip
+
+    def __hex__(self):
+        return hex(self._ip)
+
+    @property
+    def exploded(self):
+        """Return the longhand version of the IP address as a string."""
+        return self._explode_shorthand_ip_string()
+
+    @property
+    def compressed(self):
+        """Return the shorthand version of the IP address as a string."""
+        return str(self)
+
+
+class _BaseIP(_IPAddrBase):
+
+    """A generic IP object.
+
+    This IP class contains the version independent methods which are
+    used by single IP addresses.
+
+    """
+
+    def __eq__(self, other):
+        try:
+            return (self._ip == other._ip
+                    and self._version == other._version)
+        except AttributeError:
+            return NotImplemented
+
+    def __ne__(self, other):
+        eq = self.__eq__(other)
+        if eq is NotImplemented:
+            return NotImplemented
+        return not eq
+
+    def __le__(self, other):
+        gt = self.__gt__(other)
+        if gt is NotImplemented:
+            return NotImplemented
+        return not gt
+
+    def __ge__(self, other):
+        lt = self.__lt__(other)
+        if lt is NotImplemented:
+            return NotImplemented
+        return not lt
+
+    def __lt__(self, other):
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                    str(self), str(other)))
+        if not isinstance(other, _BaseIP):
+            raise TypeError('%s and %s are not of the same type' % (
+                    str(self), str(other)))
+        if self._ip != other._ip:
+            return self._ip < other._ip
+        return False
+
+    def __gt__(self, other):
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                    str(self), str(other)))
+        if not isinstance(other, _BaseIP):
+            raise TypeError('%s and %s are not of the same type' % (
+                    str(self), str(other)))
+        if self._ip != other._ip:
+            return self._ip > other._ip
+        return False
+
+    # Shorthand for Integer addition and subtraction. This is not
+    # meant to ever support addition/subtraction of addresses.
+    def __add__(self, other):
+        if not isinstance(other, int):
+            return NotImplemented
+        return IPAddress(int(self) + other, version=self._version)
+
+    def __sub__(self, other):
+        if not isinstance(other, int):
+            return NotImplemented
+        return IPAddress(int(self) - other, version=self._version)
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, str(self))
+
+    def __str__(self):
+        return  '%s' % self._string_from_ip_int(self._ip)
+
+    def __hash__(self):
+        return hash(hex(long(self._ip)))
+
+    def _get_address_key(self):
+        return (self._version, self)
+
+    @property
+    def version(self):
+        raise NotImplementedError('BaseIP has no version')
+
+
+class _BaseNet(_IPAddrBase):
+
+    """A generic IP object.
+
+    This IP class contains the version independent methods which are
+    used by networks.
+
+    """
+
+    def __init__(self, address):
+        self._cache = {}
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, str(self))
+
+    def iterhosts(self):
+        """Generate Iterator over usable hosts in a network.
+
+           This is like __iter__ except it doesn't return the network
+           or broadcast addresses.
+
+        """
+        cur = int(self.network) + 1
+        bcast = int(self.broadcast) - 1
+        while cur <= bcast:
+            cur += 1
+            yield IPAddress(cur - 1, version=self._version)
+
+    def __iter__(self):
+        cur = int(self.network)
+        bcast = int(self.broadcast)
+        while cur <= bcast:
+            cur += 1
+            yield IPAddress(cur - 1, version=self._version)
+
+    def __getitem__(self, n):
+        network = int(self.network)
+        broadcast = int(self.broadcast)
+        if n >= 0:
+            if network + n > broadcast:
+                raise IndexError
+            return IPAddress(network + n, version=self._version)
+        else:
+            n += 1
+            if broadcast + n < network:
+                raise IndexError
+            return IPAddress(broadcast + n, version=self._version)
+
+    def __lt__(self, other):
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                    str(self), str(other)))
+        if not isinstance(other, _BaseNet):
+            raise TypeError('%s and %s are not of the same type' % (
+                    str(self), str(other)))
+        if self.network != other.network:
+            return self.network < other.network
+        if self.netmask != other.netmask:
+            return self.netmask < other.netmask
+        return False
+
+    def __gt__(self, other):
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                    str(self), str(other)))
+        if not isinstance(other, _BaseNet):
+            raise TypeError('%s and %s are not of the same type' % (
+                    str(self), str(other)))
+        if self.network != other.network:
+            return self.network > other.network
+        if self.netmask != other.netmask:
+            return self.netmask > other.netmask
+        return False
+
+    def __le__(self, other):
+        gt = self.__gt__(other)
+        if gt is NotImplemented:
+            return NotImplemented
+        return not gt
+
+    def __ge__(self, other):
+        lt = self.__lt__(other)
+        if lt is NotImplemented:
+            return NotImplemented
+        return not lt
+
+    def __eq__(self, other):
+        try:
+            return (self._version == other._version
+                    and self.network == other.network
+                    and int(self.netmask) == int(other.netmask))
+        except AttributeError:
+            if isinstance(other, _BaseIP):
+                return (self._version == other._version
+                        and self._ip == other._ip)
+
+    def __ne__(self, other):
+        eq = self.__eq__(other)
+        if eq is NotImplemented:
+            return NotImplemented
+        return not eq
+
+    def __str__(self):
+        return  '%s/%s' % (str(self.ip),
+                           str(self._prefixlen))
+
+    def __hash__(self):
+        return hash(int(self.network) ^ int(self.netmask))
+
+    def __contains__(self, other):
+        # always false if one is v4 and the other is v6.
+        if self._version != other._version:
+          return False
+        # dealing with another network.
+        if isinstance(other, _BaseNet):
+            return (self.network <= other.network and
+                    self.broadcast >= other.broadcast)
+        # dealing with another address
+        else:
+            return (int(self.network) <= int(other._ip) <=
+                    int(self.broadcast))
+
+    def overlaps(self, other):
+        """Tell if self is partly contained in other."""
+        return self.network in other or self.broadcast in other or (
+            other.network in self or other.broadcast in self)
+
+    @property
+    def network(self):
+        x = self._cache.get('network')
+        if x is None:
+            x = IPAddress(self._ip & int(self.netmask), version=self._version)
+            self._cache['network'] = x
+        return x
+
+    @property
+    def broadcast(self):
+        x = self._cache.get('broadcast')
+        if x is None:
+            x = IPAddress(self._ip | int(self.hostmask), version=self._version)
+            self._cache['broadcast'] = x
+        return x
+
+    @property
+    def hostmask(self):
+        x = self._cache.get('hostmask')
+        if x is None:
+            x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
+                          version=self._version)
+            self._cache['hostmask'] = x
+        return x
+
+    @property
+    def with_prefixlen(self):
+        return '%s/%d' % (str(self.ip), self._prefixlen)
+
+    @property
+    def with_netmask(self):
+        return '%s/%s' % (str(self.ip), str(self.netmask))
+
+    @property
+    def with_hostmask(self):
+        return '%s/%s' % (str(self.ip), str(self.hostmask))
+
+    @property
+    def numhosts(self):
+        """Number of hosts in the current subnet."""
+        return int(self.broadcast) - int(self.network) + 1
+
+    @property
+    def version(self):
+        raise NotImplementedError('BaseNet has no version')
+
+    @property
+    def prefixlen(self):
+        return self._prefixlen
+
+    def address_exclude(self, other):
+        """Remove an address from a larger block.
+
+        For example:
+
+            addr1 = IPNetwork('10.1.1.0/24')
+            addr2 = IPNetwork('10.1.1.0/26')
+            addr1.address_exclude(addr2) =
+                [IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
+
+        or IPv6:
+
+            addr1 = IPNetwork('::1/32')
+            addr2 = IPNetwork('::1/128')
+            addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
+                IPNetwork('::2/127'),
+                IPNetwork('::4/126'),
+                IPNetwork('::8/125'),
+                ...
+                IPNetwork('0:0:8000::/33')]
+
+        Args:
+            other: An IPvXNetwork object of the same type.
+
+        Returns:
+            A sorted list of IPvXNetwork objects addresses which is self
+            minus other.
+
+        Raises:
+            TypeError: If self and other are of difffering address
+              versions, or if other is not a network object.
+            ValueError: If other is not completely contained by self.
+
+        """
+        if not self._version == other._version:
+            raise TypeError("%s and %s are not of the same version" % (
+                str(self), str(other)))
+
+        if not isinstance(other, _BaseNet):
+            raise TypeError("%s is not a network object" % str(other))
+
+        if other not in self:
+            raise ValueError('%s not contained in %s' % (str(other),
+                                                         str(self)))
+        if other == self:
+            return []
+
+        ret_addrs = []
+
+        # Make sure we're comparing the network of other.
+        other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
+                   version=other._version)
+
+        s1, s2 = self.subnet()
+        while s1 != other and s2 != other:
+            if other in s1:
+                ret_addrs.append(s2)
+                s1, s2 = s1.subnet()
+            elif other in s2:
+                ret_addrs.append(s1)
+                s1, s2 = s2.subnet()
+            else:
+                # If we got here, there's a bug somewhere.
+                assert True == False, ('Error performing exclusion: '
+                                       's1: %s s2: %s other: %s' %
+                                       (str(s1), str(s2), str(other)))
+        if s1 == other:
+            ret_addrs.append(s2)
+        elif s2 == other:
+            ret_addrs.append(s1)
+        else:
+            # If we got here, there's a bug somewhere.
+            assert True == False, ('Error performing exclusion: '
+                                   's1: %s s2: %s other: %s' %
+                                   (str(s1), str(s2), str(other)))
+
+        return sorted(ret_addrs, key=_BaseNet._get_networks_key)
+
+    def compare_networks(self, other):
+        """Compare two IP objects.
+
+        This is only concerned about the comparison of the integer
+        representation of the network addresses.  This means that the
+        host bits aren't considered at all in this method.  If you want
+        to compare host bits, you can easily enough do a
+        'HostA._ip < HostB._ip'
+
+        Args:
+            other: An IP object.
+
+        Returns:
+            If the IP versions of self and other are the same, returns:
+
+            -1 if self < other:
+              eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
+              IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
+            0 if self == other
+              eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
+              IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
+            1 if self > other
+              eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
+              IPv6('1080::1:200C:417A/112') >
+              IPv6('1080::0:200C:417A/112')
+
+            If the IP versions of self and other are different, returns:
+
+            -1 if self._version < other._version
+              eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
+            1 if self._version > other._version
+              eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
+
+        """
+        if self._version < other._version:
+            return -1
+        if self._version > other._version:
+            return 1
+        # self._version == other._version below here:
+        if self.network < other.network:
+            return -1
+        if self.network > other.network:
+            return 1
+        # self.network == other.network below here:
+        if self.netmask < other.netmask:
+            return -1
+        if self.netmask > other.netmask:
+            return 1
+        # self.network == other.network and self.netmask == other.netmask
+        return 0
+
+    def _get_networks_key(self):
+        """Network-only key function.
+
+        Returns an object that identifies this address' network and
+        netmask. This function is a suitable "key" argument for sorted()
+        and list.sort().
+
+        """
+        return (self._version, self.network, self.netmask)
+
+    def _ip_int_from_prefix(self, prefixlen=None):
+        """Turn the prefix length netmask into a int for comparison.
+
+        Args:
+            prefixlen: An integer, the prefix length.
+
+        Returns:
+            An integer.
+
+        """
+        if not prefixlen and prefixlen != 0:
+            prefixlen = self._prefixlen
+        return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
+
+    def _prefix_from_ip_int(self, ip_int, mask=32):
+        """Return prefix length from the decimal netmask.
+
+        Args:
+            ip_int: An integer, the IP address.
+            mask: The netmask.  Defaults to 32.
+
+        Returns:
+            An integer, the prefix length.
+
+        """
+        while mask:
+            if ip_int & 1 == 1:
+                break
+            ip_int >>= 1
+            mask -= 1
+
+        return mask
+
+    def _ip_string_from_prefix(self, prefixlen=None):
+        """Turn a prefix length into a dotted decimal string.
+
+        Args:
+            prefixlen: An integer, the netmask prefix length.
+
+        Returns:
+            A string, the dotted decimal netmask string.
+
+        """
+        if not prefixlen:
+            prefixlen = self._prefixlen
+        return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
+
+    def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
+        """The subnets which join to make the current subnet.
+
+        In the case that self contains only one IP
+        (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+        for IPv6), return a list with just ourself.
+
+        Args:
+            prefixlen_diff: An integer, the amount the prefix length
+              should be increased by. This should not be set if
+              new_prefix is also set.
+            new_prefix: The desired new prefix length. This must be a
+              larger number (smaller prefix) than the existing prefix.
+              This should not be set if prefixlen_diff is also set.
+
+        Returns:
+            An iterator of IPv(4|6) objects.
+
+        Raises:
+            ValueError: The prefixlen_diff is too small or too large.
+                OR
+            prefixlen_diff and new_prefix are both set or new_prefix
+              is a smaller number than the current prefix (smaller
+              number means a larger network)
+
+        """
+        if self._prefixlen == self._max_prefixlen:
+            yield self
+            return
+
+        if new_prefix is not None:
+            if new_prefix < self._prefixlen:
+                raise ValueError('new prefix must be longer')
+            if prefixlen_diff != 1:
+                raise ValueError('cannot set prefixlen_diff and new_prefix')
+            prefixlen_diff = new_prefix - self._prefixlen
+
+        if prefixlen_diff < 0:
+            raise ValueError('prefix length diff must be > 0')
+        new_prefixlen = self._prefixlen + prefixlen_diff
+
+        if not self._is_valid_netmask(str(new_prefixlen)):
+            raise ValueError(
+                'prefix length diff %d is invalid for netblock %s' % (
+                    new_prefixlen, str(self)))
+
+        first = IPNetwork('%s/%s' % (str(self.network),
+                                     str(self._prefixlen + prefixlen_diff)),
+                         version=self._version)
+
+        yield first
+        current = first
+        while True:
+            broadcast = current.broadcast
+            if broadcast == self.broadcast:
+                return
+            new_addr = IPAddress(int(broadcast) + 1, version=self._version)
+            current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
+                                version=self._version)
+
+            yield current
+
+    def masked(self):
+        """Return the network object with the host bits masked out."""
+        return IPNetwork('%s/%d' % (self.network, self._prefixlen),
+                         version=self._version)
+
+    def subnet(self, prefixlen_diff=1, new_prefix=None):
+        """Return a list of subnets, rather than an iterator."""
+        return list(self.iter_subnets(prefixlen_diff, new_prefix))
+
+    def supernet(self, prefixlen_diff=1, new_prefix=None):
+        """The supernet containing the current network.
+
+        Args:
+            prefixlen_diff: An integer, the amount the prefix length of
+              the network should be decreased by.  For example, given a
+              /24 network and a prefixlen_diff of 3, a supernet with a
+              /21 netmask is returned.
+
+        Returns:
+            An IPv4 network object.
+
+        Raises:
+            ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
+              negative prefix length.
+                OR
+            If prefixlen_diff and new_prefix are both set or new_prefix is a
+              larger number than the current prefix (larger number means a
+              smaller network)
+
+        """
+        if self._prefixlen == 0:
+            return self
+
+        if new_prefix is not None:
+            if new_prefix > self._prefixlen:
+                raise ValueError('new prefix must be shorter')
+            if prefixlen_diff != 1:
+                raise ValueError('cannot set prefixlen_diff and new_prefix')
+            prefixlen_diff = self._prefixlen - new_prefix
+
+
+        if self.prefixlen - prefixlen_diff < 0:
+            raise ValueError(
+                'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+                (self.prefixlen, prefixlen_diff))
+        return IPNetwork('%s/%s' % (str(self.network),
+                                    str(self.prefixlen - prefixlen_diff)),
+                         version=self._version)
+
+    # backwards compatibility
+    Subnet = subnet
+    Supernet = supernet
+    AddressExclude = address_exclude
+    CompareNetworks = compare_networks
+    Contains = __contains__
+
+
+class _BaseV4(object):
+
+    """Base IPv4 object.
+
+    The following methods are used by IPv4 objects in both single IP
+    addresses and networks.
+
+    """
+
+    # Equivalent to 255.255.255.255 or 32 bits of 1's.
+    _ALL_ONES = (2**IPV4LENGTH) - 1
+    _DECIMAL_DIGITS = frozenset('0123456789')
+
+    def __init__(self, address):
+        self._version = 4
+        self._max_prefixlen = IPV4LENGTH
+
+    def _explode_shorthand_ip_string(self):
+        return str(self)
+
+    def _ip_int_from_string(self, ip_str):
+        """Turn the given IP string into an integer for comparison.
+
+        Args:
+            ip_str: A string, the IP ip_str.
+
+        Returns:
+            The IP ip_str as an integer.
+
+        Raises:
+            AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+        """
+        octets = ip_str.split('.')
+        if len(octets) != 4:
+            raise AddressValueError(ip_str)
+
+        packed_ip = 0
+        for oc in octets:
+            try:
+                packed_ip = (packed_ip << 8) | self._parse_octet(oc)
+            except ValueError:
+                raise AddressValueError(ip_str)
+        return packed_ip
+
+    def _parse_octet(self, octet_str):
+        """Convert a decimal octet into an integer.
+
+        Args:
+            octet_str: A string, the number to parse.
+
+        Returns:
+            The octet as an integer.
+
+        Raises:
+            ValueError: if the octet isn't strictly a decimal from [0..255].
+
+        """
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        if not self._DECIMAL_DIGITS.issuperset(octet_str):
+            raise ValueError
+        octet_int = int(octet_str, 10)
+        # Disallow leading zeroes, because no clear standard exists on
+        # whether these should be interpreted as decimal or octal.
+        if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
+            raise ValueError
+        return octet_int
+
+    def _string_from_ip_int(self, ip_int):
+        """Turns a 32-bit integer into dotted decimal notation.
+
+        Args:
+            ip_int: An integer, the IP address.
+
+        Returns:
+            The IP address as a string in dotted decimal notation.
+
+        """
+        octets = []
+        for _ in xrange(4):
+            octets.insert(0, str(ip_int & 0xFF))
+            ip_int >>= 8
+        return '.'.join(octets)
+
+    @property
+    def max_prefixlen(self):
+        return self._max_prefixlen
+
+    @property
+    def packed(self):
+        """The binary representation of this address."""
+        return v4_int_to_packed(self._ip)
+
+    @property
+    def version(self):
+        return self._version
+
+    @property
+    def is_reserved(self):
+       """Test if the address is otherwise IETF reserved.
+
+        Returns:
+            A boolean, True if the address is within the
+            reserved IPv4 Network range.
+
+       """
+       return self in IPv4Network('240.0.0.0/4')
+
+    @property
+    def is_private(self):
+        """Test if this address is allocated for private networks.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 1918.
+
+        """
+        return (self in IPv4Network('10.0.0.0/8') or
+                self in IPv4Network('172.16.0.0/12') or
+                self in IPv4Network('192.168.0.0/16'))
+
+    @property
+    def is_multicast(self):
+        """Test if the address is reserved for multicast use.
+
+        Returns:
+            A boolean, True if the address is multicast.
+            See RFC 3171 for details.
+
+        """
+        return self in IPv4Network('224.0.0.0/4')
+
+    @property
+    def is_unspecified(self):
+        """Test if the address is unspecified.
+
+        Returns:
+            A boolean, True if this is the unspecified address as defined in
+            RFC 5735 3.
+
+        """
+        return self in IPv4Network('0.0.0.0')
+
+    @property
+    def is_loopback(self):
+        """Test if the address is a loopback address.
+
+        Returns:
+            A boolean, True if the address is a loopback per RFC 3330.
+
+        """
+        return self in IPv4Network('127.0.0.0/8')
+
+    @property
+    def is_link_local(self):
+        """Test if the address is reserved for link-local.
+
+        Returns:
+            A boolean, True if the address is link-local per RFC 3927.
+
+        """
+        return self in IPv4Network('169.254.0.0/16')
+
+
+class IPv4Address(_BaseV4, _BaseIP):
+
+    """Represent and manipulate single IPv4 Addresses."""
+
+    def __init__(self, address):
+
+        """
+        Args:
+            address: A string or integer representing the IP
+              '192.168.1.1'
+
+              Additionally, an integer can be passed, so
+              IPv4Address('192.168.1.1') == IPv4Address(3232235777).
+              or, more generally
+              IPv4Address(int(IPv4Address('192.168.1.1'))) ==
+                IPv4Address('192.168.1.1')
+
+        Raises:
+            AddressValueError: If ipaddr isn't a valid IPv4 address.
+
+        """
+        _BaseV4.__init__(self, address)
+
+        # Efficient constructor from integer.
+        if isinstance(address, (int, long)):
+            self._ip = address
+            if address < 0 or address > self._ALL_ONES:
+                raise AddressValueError(address)
+            return
+
+        # Constructing from a packed address
+        if isinstance(address, Bytes):
+            try:
+                self._ip, = struct.unpack('!I', address)
+            except struct.error:
+                raise AddressValueError(address)  # Wrong length.
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP string.
+        addr_str = str(address)
+        self._ip = self._ip_int_from_string(addr_str)
+
+
+class IPv4Network(_BaseV4, _BaseNet):
+
+    """This class represents and manipulates 32-bit IPv4 networks.
+
+    Attributes: [examples for IPv4Network('1.2.3.4/27')]
+        ._ip: 16909060
+        .ip: IPv4Address('1.2.3.4')
+        .network: IPv4Address('1.2.3.0')
+        .hostmask: IPv4Address('0.0.0.31')
+        .broadcast: IPv4Address('1.2.3.31')
+        .netmask: IPv4Address('255.255.255.224')
+        .prefixlen: 27
+
+    """
+
+    # the valid octets for host and netmasks. only useful for IPv4.
+    _valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
+
+    def __init__(self, address, strict=False):
+        """Instantiate a new IPv4 network object.
+
+        Args:
+            address: A string or integer representing the IP [& network].
+              '192.168.1.1/24'
+              '192.168.1.1/255.255.255.0'
+              '192.168.1.1/0.0.0.255'
+              are all functionally the same in IPv4. Similarly,
+              '192.168.1.1'
+              '192.168.1.1/255.255.255.255'
+              '192.168.1.1/32'
+              are also functionaly equivalent. That is to say, failing to
+              provide a subnetmask will create an object with a mask of /32.
+
+              If the mask (portion after the / in the argument) is given in
+              dotted quad form, it is treated as a netmask if it starts with a
+              non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+              starts with a zero field (e.g. 0.255.255.255 == /8), with the
+              single exception of an all-zero mask which is treated as a
+              netmask == /0. If no mask is given, a default of /32 is used.
+
+              Additionally, an integer can be passed, so
+              IPv4Network('192.168.1.1') == IPv4Network(3232235777).
+              or, more generally
+              IPv4Network(int(IPv4Network('192.168.1.1'))) ==
+                IPv4Network('192.168.1.1')
+
+            strict: A boolean. If true, ensure that we have been passed
+              A true network address, eg, 192.168.1.0/24 and not an
+              IP address on a network, eg, 192.168.1.1/24.
+
+        Raises:
+            AddressValueError: If ipaddr isn't a valid IPv4 address.
+            NetmaskValueError: If the netmask isn't valid for
+              an IPv4 address.
+            ValueError: If strict was True and a network address was not
+              supplied.
+
+        """
+        _BaseNet.__init__(self, address)
+        _BaseV4.__init__(self, address)
+
+        # Constructing from an integer or packed bytes.
+        if isinstance(address, (int, long, Bytes)):
+            self.ip = IPv4Address(address)
+            self._ip = self.ip._ip
+            self._prefixlen = self._max_prefixlen
+            self.netmask = IPv4Address(self._ALL_ONES)
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP prefix string.
+        addr = str(address).split('/')
+
+        if len(addr) > 2:
+            raise AddressValueError(address)
+
+        self._ip = self._ip_int_from_string(addr[0])
+        self.ip = IPv4Address(self._ip)
+
+        if len(addr) == 2:
+            mask = addr[1].split('.')
+            if len(mask) == 4:
+                # We have dotted decimal netmask.
+                if self._is_valid_netmask(addr[1]):
+                    self.netmask = IPv4Address(self._ip_int_from_string(
+                            addr[1]))
+                elif self._is_hostmask(addr[1]):
+                    self.netmask = IPv4Address(
+                        self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
+                else:
+                    raise NetmaskValueError('%s is not a valid netmask'
+                                                     % addr[1])
+
+                self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
+            else:
+                # We have a netmask in prefix length form.
+                if not self._is_valid_netmask(addr[1]):
+                    raise NetmaskValueError(addr[1])
+                self._prefixlen = int(addr[1])
+                self.netmask = IPv4Address(self._ip_int_from_prefix(
+                    self._prefixlen))
+        else:
+            self._prefixlen = self._max_prefixlen
+            self.netmask = IPv4Address(self._ip_int_from_prefix(
+                self._prefixlen))
+        if strict:
+            if self.ip != self.network:
+                raise ValueError('%s has host bits set' %
+                                 self.ip)
+        if self._prefixlen == (self._max_prefixlen - 1):
+            self.iterhosts = self.__iter__
+
+    def _is_hostmask(self, ip_str):
+        """Test if the IP string is a hostmask (rather than a netmask).
+
+        Args:
+            ip_str: A string, the potential hostmask.
+
+        Returns:
+            A boolean, True if the IP string is a hostmask.
+
+        """
+        bits = ip_str.split('.')
+        try:
+            parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
+        except ValueError:
+            return False
+        if len(parts) != len(bits):
+            return False
+        if parts[0] < parts[-1]:
+            return True
+        return False
+
+    def _is_valid_netmask(self, netmask):
+        """Verify that the netmask is valid.
+
+        Args:
+            netmask: A string, either a prefix or dotted decimal
+              netmask.
+
+        Returns:
+            A boolean, True if the prefix represents a valid IPv4
+            netmask.
+
+        """
+        mask = netmask.split('.')
+        if len(mask) == 4:
+            if [x for x in mask if int(x) not in self._valid_mask_octets]:
+                return False
+            if [y for idx, y in enumerate(mask) if idx > 0 and
+                y > mask[idx - 1]]:
+                return False
+            return True
+        try:
+            netmask = int(netmask)
+        except ValueError:
+            return False
+        return 0 <= netmask <= self._max_prefixlen
+
+    # backwards compatibility
+    IsRFC1918 = lambda self: self.is_private
+    IsMulticast = lambda self: self.is_multicast
+    IsLoopback = lambda self: self.is_loopback
+    IsLinkLocal = lambda self: self.is_link_local
+
+
+class _BaseV6(object):
+
+    """Base IPv6 object.
+
+    The following methods are used by IPv6 objects in both single IP
+    addresses and networks.
+
+    """
+
+    _ALL_ONES = (2**IPV6LENGTH) - 1
+    _HEXTET_COUNT = 8
+    _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+
+    def __init__(self, address):
+        self._version = 6
+        self._max_prefixlen = IPV6LENGTH
+
+    def _ip_int_from_string(self, ip_str):
+        """Turn an IPv6 ip_str into an integer.
+
+        Args:
+            ip_str: A string, the IPv6 ip_str.
+
+        Returns:
+            A long, the IPv6 ip_str.
+
+        Raises:
+            AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+        """
+        parts = ip_str.split(':')
+
+        # An IPv6 address needs at least 2 colons (3 parts).
+        if len(parts) < 3:
+            raise AddressValueError(ip_str)
+
+        # If the address has an IPv4-style suffix, convert it to hexadecimal.
+        if '.' in parts[-1]:
+            ipv4_int = IPv4Address(parts.pop())._ip
+            parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+            parts.append('%x' % (ipv4_int & 0xFFFF))
+
+        # An IPv6 address can't have more than 8 colons (9 parts).
+        if len(parts) > self._HEXTET_COUNT + 1:
+            raise AddressValueError(ip_str)
+
+        # Disregarding the endpoints, find '::' with nothing in between.
+        # This indicates that a run of zeroes has been skipped.
+        try:
+            skip_index, = (
+                [i for i in xrange(1, len(parts) - 1) if not parts[i]] or
+                [None])
+        except ValueError:
+            # Can't have more than one '::'
+            raise AddressValueError(ip_str)
+
+        # parts_hi is the number of parts to copy from above/before the '::'
+        # parts_lo is the number of parts to copy from below/after the '::'
+        if skip_index is not None:
+            # If we found a '::', then check if it also covers the endpoints.
+            parts_hi = skip_index
+            parts_lo = len(parts) - skip_index - 1
+            if not parts[0]:
+                parts_hi -= 1
+                if parts_hi:
+                    raise AddressValueError(ip_str)  # ^: requires ^::
+            if not parts[-1]:
+                parts_lo -= 1
+                if parts_lo:
+                    raise AddressValueError(ip_str)  # :$ requires ::$
+            parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
+            if parts_skipped < 1:
+                raise AddressValueError(ip_str)
+        else:
+            # Otherwise, allocate the entire address to parts_hi.  The endpoints
+            # could still be empty, but _parse_hextet() will check for that.
+            if len(parts) != self._HEXTET_COUNT:
+                raise AddressValueError(ip_str)
+            parts_hi = len(parts)
+            parts_lo = 0
+            parts_skipped = 0
+
+        try:
+            # Now, parse the hextets into a 128-bit integer.
+            ip_int = 0L
+            for i in xrange(parts_hi):
+                ip_int <<= 16
+                ip_int |= self._parse_hextet(parts[i])
+            ip_int <<= 16 * parts_skipped
+            for i in xrange(-parts_lo, 0):
+                ip_int <<= 16
+                ip_int |= self._parse_hextet(parts[i])
+            return ip_int
+        except ValueError:
+            raise AddressValueError(ip_str)
+
+    def _parse_hextet(self, hextet_str):
+        """Convert an IPv6 hextet string into an integer.
+
+        Args:
+            hextet_str: A string, the number to parse.
+
+        Returns:
+            The hextet as an integer.
+
+        Raises:
+            ValueError: if the input isn't strictly a hex number from [0..FFFF].
+
+        """
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        if not self._HEX_DIGITS.issuperset(hextet_str):
+            raise ValueError
+        hextet_int = int(hextet_str, 16)
+        if hextet_int > 0xFFFF:
+            raise ValueError
+        return hextet_int
+
+    def _compress_hextets(self, hextets):
+        """Compresses a list of hextets.
+
+        Compresses a list of strings, replacing the longest continuous
+        sequence of "0" in the list with "" and adding empty strings at
+        the beginning or at the end of the string such that subsequently
+        calling ":".join(hextets) will produce the compressed version of
+        the IPv6 address.
+
+        Args:
+            hextets: A list of strings, the hextets to compress.
+
+        Returns:
+            A list of strings.
+
+        """
+        best_doublecolon_start = -1
+        best_doublecolon_len = 0
+        doublecolon_start = -1
+        doublecolon_len = 0
+        for index in range(len(hextets)):
+            if hextets[index] == '0':
+                doublecolon_len += 1
+                if doublecolon_start == -1:
+                    # Start of a sequence of zeros.
+                    doublecolon_start = index
+                if doublecolon_len > best_doublecolon_len:
+                    # This is the longest sequence of zeros so far.
+                    best_doublecolon_len = doublecolon_len
+                    best_doublecolon_start = doublecolon_start
+            else:
+                doublecolon_len = 0
+                doublecolon_start = -1
+
+        if best_doublecolon_len > 1:
+            best_doublecolon_end = (best_doublecolon_start +
+                                    best_doublecolon_len)
+            # For zeros at the end of the address.
+            if best_doublecolon_end == len(hextets):
+                hextets += ['']
+            hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+            # For zeros at the beginning of the address.
+            if best_doublecolon_start == 0:
+                hextets = [''] + hextets
+
+        return hextets
+
+    def _string_from_ip_int(self, ip_int=None):
+        """Turns a 128-bit integer into hexadecimal notation.
+
+        Args:
+            ip_int: An integer, the IP address.
+
+        Returns:
+            A string, the hexadecimal representation of the address.
+
+        Raises:
+            ValueError: The address is bigger than 128 bits of all ones.
+
+        """
+        if not ip_int and ip_int != 0:
+            ip_int = int(self._ip)
+
+        if ip_int > self._ALL_ONES:
+            raise ValueError('IPv6 address is too large')
+
+        hex_str = '%032x' % ip_int
+        hextets = []
+        for x in range(0, 32, 4):
+            hextets.append('%x' % int(hex_str[x:x+4], 16))
+
+        hextets = self._compress_hextets(hextets)
+        return ':'.join(hextets)
+
+    def _explode_shorthand_ip_string(self):
+        """Expand a shortened IPv6 address.
+
+        Args:
+            ip_str: A string, the IPv6 address.
+
+        Returns:
+            A string, the expanded IPv6 address.
+
+        """
+        if isinstance(self, _BaseNet):
+            ip_str = str(self.ip)
+        else:
+            ip_str = str(self)
+
+        ip_int = self._ip_int_from_string(ip_str)
+        parts = []
+        for i in xrange(self._HEXTET_COUNT):
+            parts.append('%04x' % (ip_int & 0xFFFF))
+            ip_int >>= 16
+        parts.reverse()
+        if isinstance(self, _BaseNet):
+            return '%s/%d' % (':'.join(parts), self.prefixlen)
+        return ':'.join(parts)
+
+    @property
+    def max_prefixlen(self):
+        return self._max_prefixlen
+
+    @property
+    def packed(self):
+        """The binary representation of this address."""
+        return v6_int_to_packed(self._ip)
+
+    @property
+    def version(self):
+        return self._version
+
+    @property
+    def is_multicast(self):
+        """Test if the address is reserved for multicast use.
+
+        Returns:
+            A boolean, True if the address is a multicast address.
+            See RFC 2373 2.7 for details.
+
+        """
+        return self in IPv6Network('ff00::/8')
+
+    @property
+    def is_reserved(self):
+        """Test if the address is otherwise IETF reserved.
+
+        Returns:
+            A boolean, True if the address is within one of the
+            reserved IPv6 Network ranges.
+
+        """
+        return (self in IPv6Network('::/8') or
+                self in IPv6Network('100::/8') or
+                self in IPv6Network('200::/7') or
+                self in IPv6Network('400::/6') or
+                self in IPv6Network('800::/5') or
+                self in IPv6Network('1000::/4') or
+                self in IPv6Network('4000::/3') or
+                self in IPv6Network('6000::/3') or
+                self in IPv6Network('8000::/3') or
+                self in IPv6Network('A000::/3') or
+                self in IPv6Network('C000::/3') or
+                self in IPv6Network('E000::/4') or
+                self in IPv6Network('F000::/5') or
+                self in IPv6Network('F800::/6') or
+                self in IPv6Network('FE00::/9'))
+
+    @property
+    def is_unspecified(self):
+        """Test if the address is unspecified.
+
+        Returns:
+            A boolean, True if this is the unspecified address as defined in
+            RFC 2373 2.5.2.
+
+        """
+        return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
+
+    @property
+    def is_loopback(self):
+        """Test if the address is a loopback address.
+
+        Returns:
+            A boolean, True if the address is a loopback address as defined in
+            RFC 2373 2.5.3.
+
+        """
+        return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
+
+    @property
+    def is_link_local(self):
+        """Test if the address is reserved for link-local.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 4291.
+
+        """
+        return self in IPv6Network('fe80::/10')
+
+    @property
+    def is_site_local(self):
+        """Test if the address is reserved for site-local.
+
+        Note that the site-local address space has been deprecated by RFC 3879.
+        Use is_private to test if this address is in the space of unique local
+        addresses as defined by RFC 4193.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+        """
+        return self in IPv6Network('fec0::/10')
+
+    @property
+    def is_private(self):
+        """Test if this address is allocated for private networks.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 4193.
+
+        """
+        return self in IPv6Network('fc00::/7')
+
+    @property
+    def ipv4_mapped(self):
+        """Return the IPv4 mapped address.
+
+        Returns:
+            If the IPv6 address is a v4 mapped address, return the
+            IPv4 mapped address. Return None otherwise.
+
+        """
+        if (self._ip >> 32) != 0xFFFF:
+            return None
+        return IPv4Address(self._ip & 0xFFFFFFFF)
+
+    @property
+    def teredo(self):
+        """Tuple of embedded teredo IPs.
+
+        Returns:
+            Tuple of the (server, client) IPs or None if the address
+            doesn't appear to be a teredo address (doesn't start with
+            2001::/32)
+
+        """
+        if (self._ip >> 96) != 0x20010000:
+            return None
+        return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+                IPv4Address(~self._ip & 0xFFFFFFFF))
+
+    @property
+    def sixtofour(self):
+        """Return the IPv4 6to4 embedded address.
+
+        Returns:
+            The IPv4 6to4-embedded address if present or None if the
+            address doesn't appear to contain a 6to4 embedded address.
+
+        """
+        if (self._ip >> 112) != 0x2002:
+            return None
+        return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Address(_BaseV6, _BaseIP):
+
+    """Represent and manipulate single IPv6 Addresses.
+    """
+
+    def __init__(self, address):
+        """Instantiate a new IPv6 address object.
+
+        Args:
+            address: A string or integer representing the IP
+
+              Additionally, an integer can be passed, so
+              IPv6Address('2001:4860::') ==
+                IPv6Address(42541956101370907050197289607612071936L).
+              or, more generally
+              IPv6Address(IPv6Address('2001:4860::')._ip) ==
+                IPv6Address('2001:4860::')
+
+        Raises:
+            AddressValueError: If address isn't a valid IPv6 address.
+
+        """
+        _BaseV6.__init__(self, address)
+
+        # Efficient constructor from integer.
+        if isinstance(address, (int, long)):
+            self._ip = address
+            if address < 0 or address > self._ALL_ONES:
+                raise AddressValueError(address)
+            return
+
+        # Constructing from a packed address
+        if isinstance(address, Bytes):
+            try:
+                hi, lo = struct.unpack('!QQ', address)
+            except struct.error:
+                raise AddressValueError(address)  # Wrong length.
+            self._ip = (hi << 64) | lo
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP string.
+        addr_str = str(address)
+        if not addr_str:
+            raise AddressValueError('')
+
+        self._ip = self._ip_int_from_string(addr_str)
+
+
+class IPv6Network(_BaseV6, _BaseNet):
+
+    """This class represents and manipulates 128-bit IPv6 networks.
+
+    Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
+        .ip: IPv6Address('2001:658:22a:cafe:200::1')
+        .network: IPv6Address('2001:658:22a:cafe::')
+        .hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
+        .broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
+        .netmask: IPv6Address('ffff:ffff:ffff:ffff::')
+        .prefixlen: 64
+
+    """
+
+
+    def __init__(self, address, strict=False):
+        """Instantiate a new IPv6 Network object.
+
+        Args:
+            address: A string or integer representing the IPv6 network or the IP
+              and prefix/netmask.
+              '2001:4860::/128'
+              '2001:4860:0000:0000:0000:0000:0000:0000/128'
+              '2001:4860::'
+              are all functionally the same in IPv6.  That is to say,
+              failing to provide a subnetmask will create an object with
+              a mask of /128.
+
+              Additionally, an integer can be passed, so
+              IPv6Network('2001:4860::') ==
+                IPv6Network(42541956101370907050197289607612071936L).
+              or, more generally
+              IPv6Network(IPv6Network('2001:4860::')._ip) ==
+                IPv6Network('2001:4860::')
+
+            strict: A boolean. If true, ensure that we have been passed
+              A true network address, eg, 192.168.1.0/24 and not an
+              IP address on a network, eg, 192.168.1.1/24.
+
+        Raises:
+            AddressValueError: If address isn't a valid IPv6 address.
+            NetmaskValueError: If the netmask isn't valid for
+              an IPv6 address.
+            ValueError: If strict was True and a network address was not
+              supplied.
+
+        """
+        _BaseNet.__init__(self, address)
+        _BaseV6.__init__(self, address)
+
+        # Constructing from an integer or packed bytes.
+        if isinstance(address, (int, long, Bytes)):
+            self.ip = IPv6Address(address)
+            self._ip = self.ip._ip
+            self._prefixlen = self._max_prefixlen
+            self.netmask = IPv6Address(self._ALL_ONES)
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP prefix string.
+        addr = str(address).split('/')
+
+        if len(addr) > 2:
+            raise AddressValueError(address)
+
+        self._ip = self._ip_int_from_string(addr[0])
+        self.ip = IPv6Address(self._ip)
+
+        if len(addr) == 2:
+            if self._is_valid_netmask(addr[1]):
+                self._prefixlen = int(addr[1])
+            else:
+                raise NetmaskValueError(addr[1])
+        else:
+            self._prefixlen = self._max_prefixlen
+
+        self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
+
+        if strict:
+            if self.ip != self.network:
+                raise ValueError('%s has host bits set' %
+                                 self.ip)
+        if self._prefixlen == (self._max_prefixlen - 1):
+            self.iterhosts = self.__iter__
+
+    def _is_valid_netmask(self, prefixlen):
+        """Verify that the netmask/prefixlen is valid.
+
+        Args:
+            prefixlen: A string, the netmask in prefix length format.
+
+        Returns:
+            A boolean, True if the prefix represents a valid IPv6
+            netmask.
+
+        """
+        try:
+            prefixlen = int(prefixlen)
+        except ValueError:
+            return False
+        return 0 <= prefixlen <= self._max_prefixlen
+
+    @property
+    def with_netmask(self):
+        return self.with_prefixlen
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr_test.py b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr_test.py
new file mode 100755
index 0000000..9446889
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/ipaddr_test.py
@@ -0,0 +1,1105 @@
+#!/usr/bin/python
+#
+# Copyright 2007 Google Inc.
+#  Licensed to PSF under a Contributor Agreement.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for ipaddr module."""
+
+
+import unittest
+import time
+import ipaddr
+
+# Compatibility function to cast str to bytes objects
+if issubclass(ipaddr.Bytes, str):
+    _cb = ipaddr.Bytes
+else:
+    _cb = lambda bytestr: bytes(bytestr, 'charmap')
+
+class IpaddrUnitTest(unittest.TestCase):
+
+    def setUp(self):
+        self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
+        self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
+        self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
+
+    def tearDown(self):
+        del(self.ipv4)
+        del(self.ipv4_hostmask)
+        del(self.ipv6)
+        del(self)
+
+    def testRepr(self):
+        self.assertEqual("IPv4Network('1.2.3.4/32')",
+                         repr(ipaddr.IPv4Network('1.2.3.4')))
+        self.assertEqual("IPv6Network('::1/128')",
+                         repr(ipaddr.IPv6Network('::1')))
+
+    def testAutoMasking(self):
+        addr1 = ipaddr.IPv4Network('1.1.1.255/24')
+        addr1_masked = ipaddr.IPv4Network('1.1.1.0/24')
+        self.assertEqual(addr1_masked, addr1.masked())
+
+        addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96')
+        addr2_masked = ipaddr.IPv6Network('2000:cafe::/96')
+        self.assertEqual(addr2_masked, addr2.masked())
+
+    # issue57
+    def testAddressIntMath(self):
+        self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255,
+                         ipaddr.IPv4Address('1.1.2.0'))
+        self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256,
+                         ipaddr.IPv4Address('1.1.0.1'))
+        self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2),
+                         ipaddr.IPv6Address('::ffff'))
+        self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2),
+                         ipaddr.IPv6Address('::1'))
+
+    def testInvalidStrings(self):
+        def AssertInvalidIP(ip_str):
+            self.assertRaises(ValueError, ipaddr.IPAddress, ip_str)
+        AssertInvalidIP("")
+        AssertInvalidIP("016.016.016.016")
+        AssertInvalidIP("016.016.016")
+        AssertInvalidIP("016.016")
+        AssertInvalidIP("016")
+        AssertInvalidIP("000.000.000.000")
+        AssertInvalidIP("000")
+        AssertInvalidIP("0x0a.0x0a.0x0a.0x0a")
+        AssertInvalidIP("0x0a.0x0a.0x0a")
+        AssertInvalidIP("0x0a.0x0a")
+        AssertInvalidIP("0x0a")
+        AssertInvalidIP("42.42.42.42.42")
+        AssertInvalidIP("42.42.42")
+        AssertInvalidIP("42.42")
+        AssertInvalidIP("42")
+        AssertInvalidIP("42..42.42")
+        AssertInvalidIP("42..42.42.42")
+        AssertInvalidIP("42.42.42.42.")
+        AssertInvalidIP("42.42.42.42...")
+        AssertInvalidIP(".42.42.42.42")
+        AssertInvalidIP("...42.42.42.42")
+        AssertInvalidIP("42.42.42.-0")
+        AssertInvalidIP("42.42.42.+0")
+        AssertInvalidIP(".")
+        AssertInvalidIP("...")
+        AssertInvalidIP("bogus")
+        AssertInvalidIP("bogus.com")
+        AssertInvalidIP("192.168.0.1.com")
+        AssertInvalidIP("12345.67899.-54321.-98765")
+        AssertInvalidIP("257.0.0.0")
+        AssertInvalidIP("42.42.42.-42")
+        AssertInvalidIP("3ffe::1.net")
+        AssertInvalidIP("3ffe::1::1")
+        AssertInvalidIP("1::2::3::4:5")
+        AssertInvalidIP("::7:6:5:4:3:2:")
+        AssertInvalidIP(":6:5:4:3:2:1::")
+        AssertInvalidIP("2001::db:::1")
+        AssertInvalidIP("FEDC:9878")
+        AssertInvalidIP("+1.+2.+3.4")
+        AssertInvalidIP("1.2.3.4e0")
+        AssertInvalidIP("::7:6:5:4:3:2:1:0")
+        AssertInvalidIP("7:6:5:4:3:2:1:0::")
+        AssertInvalidIP("9:8:7:6:5:4:3::2:1")
+        AssertInvalidIP("0:1:2:3::4:5:6:7")
+        AssertInvalidIP("3ffe:0:0:0:0:0:0:0:1")
+        AssertInvalidIP("3ffe::10000")
+        AssertInvalidIP("3ffe::goog")
+        AssertInvalidIP("3ffe::-0")
+        AssertInvalidIP("3ffe::+0")
+        AssertInvalidIP("3ffe::-1")
+        AssertInvalidIP(":")
+        AssertInvalidIP(":::")
+        AssertInvalidIP("::1.2.3")
+        AssertInvalidIP("::1.2.3.4.5")
+        AssertInvalidIP("::1.2.3.4:")
+        AssertInvalidIP("1.2.3.4::")
+        AssertInvalidIP("2001:db8::1:")
+        AssertInvalidIP(":2001:db8::1")
+        AssertInvalidIP(":1:2:3:4:5:6:7")
+        AssertInvalidIP("1:2:3:4:5:6:7:")
+        AssertInvalidIP(":1:2:3:4:5:6:")
+        AssertInvalidIP("192.0.2.1/32")
+        AssertInvalidIP("2001:db8::1/128")
+
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
+                          'google.com')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
+                          '::1.2.3.4')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          'google.com')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          '1.2.3.4')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          'cafe:cafe::/128/190')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          '1234:axy::b')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
+                          '1234:axy::b')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
+                          '2001:db8:::1')
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
+                          '2001:888888::1')
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Address(1)._ip_int_from_string,
+                          '1.a.2.3')
+        self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3'))
+
+    def testGetNetwork(self):
+        self.assertEqual(int(self.ipv4.network), 16909056)
+        self.assertEqual(str(self.ipv4.network), '1.2.3.0')
+        self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
+
+        self.assertEqual(int(self.ipv6.network),
+                         42540616829182469433403647294022090752)
+        self.assertEqual(str(self.ipv6.network),
+                         '2001:658:22a:cafe::')
+        self.assertEqual(str(self.ipv6.hostmask),
+                         '::ffff:ffff:ffff:ffff')
+
+    def testBadVersionComparison(self):
+        # These should always raise TypeError
+        v4addr = ipaddr.IPAddress('1.1.1.1')
+        v4net = ipaddr.IPNetwork('1.1.1.1')
+        v6addr = ipaddr.IPAddress('::1')
+        v6net = ipaddr.IPAddress('::1')
+
+        self.assertRaises(TypeError, v4addr.__lt__, v6addr)
+        self.assertRaises(TypeError, v4addr.__gt__, v6addr)
+        self.assertRaises(TypeError, v4net.__lt__, v6net)
+        self.assertRaises(TypeError, v4net.__gt__, v6net)
+
+        self.assertRaises(TypeError, v6addr.__lt__, v4addr)
+        self.assertRaises(TypeError, v6addr.__gt__, v4addr)
+        self.assertRaises(TypeError, v6net.__lt__, v4net)
+        self.assertRaises(TypeError, v6net.__gt__, v4net)
+
+    def testMixedTypeComparison(self):
+        v4addr = ipaddr.IPAddress('1.1.1.1')
+        v4net = ipaddr.IPNetwork('1.1.1.1/32')
+        v6addr = ipaddr.IPAddress('::1')
+        v6net = ipaddr.IPNetwork('::1/128')
+
+        self.assertFalse(v4net.__contains__(v6net))
+        self.assertFalse(v6net.__contains__(v4net))
+
+        self.assertRaises(TypeError, lambda: v4addr < v4net)
+        self.assertRaises(TypeError, lambda: v4addr > v4net)
+        self.assertRaises(TypeError, lambda: v4net < v4addr)
+        self.assertRaises(TypeError, lambda: v4net > v4addr)
+
+        self.assertRaises(TypeError, lambda: v6addr < v6net)
+        self.assertRaises(TypeError, lambda: v6addr > v6net)
+        self.assertRaises(TypeError, lambda: v6net < v6addr)
+        self.assertRaises(TypeError, lambda: v6net > v6addr)
+
+        # with get_mixed_type_key, you can sort addresses and network.
+        self.assertEqual([v4addr, v4net], sorted([v4net, v4addr],
+                                                 key=ipaddr.get_mixed_type_key))
+        self.assertEqual([v6addr, v6net], sorted([v6net, v6addr],
+                                                 key=ipaddr.get_mixed_type_key))
+
+    def testIpFromInt(self):
+        self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Network, 2**32)
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Network, -1)
+
+        ipv4 = ipaddr.IPNetwork('1.2.3.4')
+        ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
+        self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
+        self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
+
+        v6_int = 42540616829182469433547762482097946625
+        self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv6Network, 2**128)
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv6Network, -1)
+
+        self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
+        self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
+
+    def testIpFromPacked(self):
+        ip = ipaddr.IPNetwork
+
+        self.assertEqual(self.ipv4.ip,
+                         ip(_cb('\x01\x02\x03\x04')).ip)
+        self.assertEqual(ip('255.254.253.252'),
+                         ip(_cb('\xff\xfe\xfd\xfc')))
+        self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 3))
+        self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 5))
+        self.assertEqual(self.ipv6.ip,
+                         ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
+                           '\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
+        self.assertEqual(ip('ffff:2:3:4:ffff::'),
+                         ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
+                               '\xff\xff' + '\x00' * 6)))
+        self.assertEqual(ip('::'),
+                         ip(_cb('\x00' * 16)))
+        self.assertRaises(ValueError, ip, _cb('\x00' * 15))
+        self.assertRaises(ValueError, ip, _cb('\x00' * 17))
+
+    def testGetIp(self):
+        self.assertEqual(int(self.ipv4.ip), 16909060)
+        self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
+        self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
+
+        self.assertEqual(int(self.ipv6.ip),
+                         42540616829182469433547762482097946625)
+        self.assertEqual(str(self.ipv6.ip),
+                         '2001:658:22a:cafe:200::1')
+
+    def testGetNetmask(self):
+        self.assertEqual(int(self.ipv4.netmask), 4294967040L)
+        self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
+        self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
+        self.assertEqual(int(self.ipv6.netmask),
+                         340282366920938463444927863358058659840)
+        self.assertEqual(self.ipv6.prefixlen, 64)
+
+    def testZeroNetmask(self):
+        ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
+        self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
+        self.assertTrue(ipv4_zero_netmask._is_valid_netmask(str(0)))
+
+        ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
+        self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
+        self.assertTrue(ipv6_zero_netmask._is_valid_netmask(str(0)))
+
+    def testGetBroadcast(self):
+        self.assertEqual(int(self.ipv4.broadcast), 16909311L)
+        self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
+
+        self.assertEqual(int(self.ipv6.broadcast),
+                         42540616829182469451850391367731642367)
+        self.assertEqual(str(self.ipv6.broadcast),
+                         '2001:658:22a:cafe:ffff:ffff:ffff:ffff')
+
+    def testGetPrefixlen(self):
+        self.assertEqual(self.ipv4.prefixlen, 24)
+
+        self.assertEqual(self.ipv6.prefixlen, 64)
+
+    def testGetSupernet(self):
+        self.assertEqual(self.ipv4.supernet().prefixlen, 23)
+        self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
+        self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
+                         ipaddr.IPv4Network('0.0.0.0/0'))
+
+        self.assertEqual(self.ipv6.supernet().prefixlen, 63)
+        self.assertEqual(str(self.ipv6.supernet().network),
+                         '2001:658:22a:cafe::')
+        self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
+                         ipaddr.IPv6Network('::0/0'))
+
+    def testGetSupernet3(self):
+        self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
+        self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
+
+        self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
+        self.assertEqual(str(self.ipv6.supernet(3).network),
+                         '2001:658:22a:caf8::')
+
+    def testGetSupernet4(self):
+        self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
+                          new_prefix=1)
+        self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
+        self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
+                         self.ipv4.supernet(new_prefix=22))
+
+        self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
+                          new_prefix=1)
+        self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
+        self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
+                         self.ipv6.supernet(new_prefix=62))
+
+    def testIterSubnets(self):
+        self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets()))
+        self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets()))
+
+    def testIterHosts(self):
+        self.assertEqual([ipaddr.IPv4Address('2.0.0.0'),
+                          ipaddr.IPv4Address('2.0.0.1')],
+                         list(ipaddr.IPNetwork('2.0.0.0/31').iterhosts()))
+
+    def testFancySubnetting(self):
+        self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
+                         sorted(self.ipv4.subnet(new_prefix=27)))
+        self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
+        self.assertRaises(ValueError, self.ipv4.subnet,
+                          prefixlen_diff=3, new_prefix=27)
+        self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
+                         sorted(self.ipv6.subnet(new_prefix=68)))
+        self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
+        self.assertRaises(ValueError, self.ipv6.subnet,
+                          prefixlen_diff=4, new_prefix=68)
+
+    def testGetSubnet(self):
+        self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
+        self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
+        self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
+
+        self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
+
+    def testGetSubnetForSingle32(self):
+        ip = ipaddr.IPv4Network('1.2.3.4/32')
+        subnets1 = [str(x) for x in ip.subnet()]
+        subnets2 = [str(x) for x in ip.subnet(2)]
+        self.assertEqual(subnets1, ['1.2.3.4/32'])
+        self.assertEqual(subnets1, subnets2)
+
+    def testGetSubnetForSingle128(self):
+        ip = ipaddr.IPv6Network('::1/128')
+        subnets1 = [str(x) for x in ip.subnet()]
+        subnets2 = [str(x) for x in ip.subnet(2)]
+        self.assertEqual(subnets1, ['::1/128'])
+        self.assertEqual(subnets1, subnets2)
+
+    def testSubnet2(self):
+        ips = [str(x) for x in self.ipv4.subnet(2)]
+        self.assertEqual(
+            ips,
+            ['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
+
+        ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
+        self.assertEqual(
+            ipsv6,
+            ['2001:658:22a:cafe::/66',
+             '2001:658:22a:cafe:4000::/66',
+             '2001:658:22a:cafe:8000::/66',
+             '2001:658:22a:cafe:c000::/66'])
+
+    def testSubnetFailsForLargeCidrDiff(self):
+        self.assertRaises(ValueError, self.ipv4.subnet, 9)
+        self.assertRaises(ValueError, self.ipv6.subnet, 65)
+
+    def testSupernetFailsForLargeCidrDiff(self):
+        self.assertRaises(ValueError, self.ipv4.supernet, 25)
+        self.assertRaises(ValueError, self.ipv6.supernet, 65)
+
+    def testSubnetFailsForNegativeCidrDiff(self):
+        self.assertRaises(ValueError, self.ipv4.subnet, -1)
+        self.assertRaises(ValueError, self.ipv6.subnet, -1)
+
+    def testGetNumHosts(self):
+        self.assertEqual(self.ipv4.numhosts, 256)
+        self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
+        self.assertEqual(self.ipv4.supernet().numhosts, 512)
+
+        self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
+        self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
+        self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
+
+    def testContains(self):
+        self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
+        self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
+        self.assertTrue(self.ipv4 in self.ipv4)
+        self.assertTrue(self.ipv6 in self.ipv6)
+        # We can test addresses and string as well.
+        addr1 = ipaddr.IPv4Address('1.2.3.37')
+        self.assertTrue(addr1 in self.ipv4)
+        # issue 61, bad network comparison on like-ip'd network objects
+        # with identical broadcast addresses.
+        self.assertFalse(ipaddr.IPv4Network('1.1.0.0/16').__contains__(
+                ipaddr.IPv4Network('1.0.0.0/15')))
+
+    def testBadAddress(self):
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
+                          'poop')
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Network, '1.2.3.256')
+
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          'poopv6')
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Network, '1.2.3.4/32/24')
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv4Network, '10/8')
+        self.assertRaises(ipaddr.AddressValueError,
+                          ipaddr.IPv6Network, '10/8')
+
+
+    def testBadNetMask(self):
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv4Network, '1.2.3.4/')
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv4Network, '1.2.3.4/33')
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv4Network, '1.1.1.1/240.255.0.0')
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv6Network, '::1/')
+        self.assertRaises(ipaddr.NetmaskValueError,
+                          ipaddr.IPv6Network, '::1/129')
+
+    def testNth(self):
+        self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
+        self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
+
+        self.assertEqual(str(self.ipv6[5]),
+                         '2001:658:22a:cafe::5')
+
+    def testGetitem(self):
+        # http://code.google.com/p/ipaddr-py/issues/detail?id=15
+        addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
+        self.assertEqual(28, addr.prefixlen)
+        addr_list = list(addr)
+        self.assertEqual('172.31.255.128', str(addr_list[0]))
+        self.assertEqual('172.31.255.128', str(addr[0]))
+        self.assertEqual('172.31.255.143', str(addr_list[-1]))
+        self.assertEqual('172.31.255.143', str(addr[-1]))
+        self.assertEqual(addr_list[-1], addr[-1])
+
+    def testEqual(self):
+        self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
+        self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
+        self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
+        self.assertFalse(self.ipv4 == '')
+        self.assertFalse(self.ipv4 == [])
+        self.assertFalse(self.ipv4 == 2)
+        self.assertTrue(ipaddr.IPNetwork('1.1.1.1/32') ==
+                        ipaddr.IPAddress('1.1.1.1'))
+        self.assertTrue(ipaddr.IPNetwork('1.1.1.1/24') ==
+                        ipaddr.IPAddress('1.1.1.1'))
+        self.assertFalse(ipaddr.IPNetwork('1.1.1.0/24') ==
+                         ipaddr.IPAddress('1.1.1.1'))
+
+        self.assertTrue(self.ipv6 ==
+            ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
+        self.assertTrue(ipaddr.IPNetwork('::1/128') ==
+                        ipaddr.IPAddress('::1'))
+        self.assertTrue(ipaddr.IPNetwork('::1/127') ==
+                        ipaddr.IPAddress('::1'))
+        self.assertFalse(ipaddr.IPNetwork('::0/127') ==
+                         ipaddr.IPAddress('::1'))
+        self.assertFalse(self.ipv6 ==
+            ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
+        self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
+        self.assertFalse(self.ipv6 == '')
+        self.assertFalse(self.ipv6 == [])
+        self.assertFalse(self.ipv6 == 2)
+
+    def testNotEqual(self):
+        self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
+        self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
+        self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
+        self.assertTrue(self.ipv4 != '')
+        self.assertTrue(self.ipv4 != [])
+        self.assertTrue(self.ipv4 != 2)
+
+        addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1')
+        self.assertFalse(self.ipv6 !=
+            ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
+        self.assertTrue(self.ipv6 !=
+            ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
+        self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
+        self.assertTrue(self.ipv6 != '')
+        self.assertTrue(self.ipv6 != [])
+        self.assertTrue(self.ipv6 != 2)
+
+    def testSlash32Constructor(self):
+        self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
+                          '1.2.3.4/32')
+
+    def testSlash128Constructor(self):
+        self.assertEqual(str(ipaddr.IPv6Network('::1/128')),
+                                  '::1/128')
+
+    def testSlash0Constructor(self):
+        self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
+                          '1.2.3.4/0')
+
+    def testCollapsing(self):
+        # test only IP addresses including some duplicates
+        ip1 = ipaddr.IPv4Address('1.1.1.0')
+        ip2 = ipaddr.IPv4Address('1.1.1.1')
+        ip3 = ipaddr.IPv4Address('1.1.1.2')
+        ip4 = ipaddr.IPv4Address('1.1.1.3')
+        ip5 = ipaddr.IPv4Address('1.1.1.4')
+        ip6 = ipaddr.IPv4Address('1.1.1.0')
+        # check that addreses are subsumed properly.
+        collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
+        self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
+                                     ipaddr.IPv4Network('1.1.1.4/32')])
+
+        # test a mix of IP addresses and networks including some duplicates
+        ip1 = ipaddr.IPv4Address('1.1.1.0')
+        ip2 = ipaddr.IPv4Address('1.1.1.1')
+        ip3 = ipaddr.IPv4Address('1.1.1.2')
+        ip4 = ipaddr.IPv4Address('1.1.1.3')
+        ip5 = ipaddr.IPv4Network('1.1.1.4/30')
+        ip6 = ipaddr.IPv4Network('1.1.1.4/30')
+        # check that addreses are subsumed properly.
+        collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
+        self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
+
+        # test only IP networks
+        ip1 = ipaddr.IPv4Network('1.1.0.0/24')
+        ip2 = ipaddr.IPv4Network('1.1.1.0/24')
+        ip3 = ipaddr.IPv4Network('1.1.2.0/24')
+        ip4 = ipaddr.IPv4Network('1.1.3.0/24')
+        ip5 = ipaddr.IPv4Network('1.1.4.0/24')
+        # stored in no particular order b/c we want CollapseAddr to call [].sort
+        ip6 = ipaddr.IPv4Network('1.1.0.0/22')
+        # check that addreses are subsumed properly.
+        collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
+        self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
+                                     ipaddr.IPv4Network('1.1.4.0/24')])
+
+        # test that two addresses are supernet'ed properly
+        collapsed = ipaddr.collapse_address_list([ip1, ip2])
+        self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
+
+        # test same IP networks
+        ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
+        self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
+                         [ip_same1])
+
+        # test same IP addresses
+        ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
+        self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
+                         [ipaddr.IPNetwork('1.1.1.1/32')])
+        ip1 = ipaddr.IPv6Network('::2001:1/100')
+        ip2 = ipaddr.IPv6Network('::2002:1/120')
+        ip3 = ipaddr.IPv6Network('::2001:1/96')
+        # test that ipv6 addresses are subsumed properly.
+        collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
+        self.assertEqual(collapsed, [ip3])
+
+        # the toejam test
+        ip1 = ipaddr.IPAddress('1.1.1.1')
+        ip2 = ipaddr.IPAddress('::1')
+        self.assertRaises(TypeError, ipaddr.collapse_address_list,
+                          [ip1, ip2])
+
+    def testSummarizing(self):
+        #ip = ipaddr.IPAddress
+        #ipnet = ipaddr.IPNetwork
+        summarize = ipaddr.summarize_address_range
+        ip1 = ipaddr.IPAddress('1.1.1.0')
+        ip2 = ipaddr.IPAddress('1.1.1.255')
+        # test a /24 is sumamrized properly
+        self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
+        # test an  IPv4 range that isn't on a network byte boundary
+        ip2 = ipaddr.IPAddress('1.1.1.8')
+        self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
+                                               ipaddr.IPNetwork('1.1.1.8')])
+
+        ip1 = ipaddr.IPAddress('1::')
+        ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
+        # test a IPv6 is sumamrized properly
+        self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
+        # test an IPv6 range that isn't on a network byte boundary
+        ip2 = ipaddr.IPAddress('2::')
+        self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
+                                               ipaddr.IPNetwork('2::/128')])
+
+        # test exception raised when first is greater than last
+        self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
+            ipaddr.IPAddress('1.1.0.0'))
+        # test exception raised when first and last aren't IP addresses
+        self.assertRaises(TypeError, summarize,
+                          ipaddr.IPNetwork('1.1.1.0'),
+                          ipaddr.IPNetwork('1.1.0.0'))
+        self.assertRaises(TypeError, summarize,
+            ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
+        # test exception raised when first and last are not same version
+        self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'),
+            ipaddr.IPNetwork('1.1.0.0'))
+
+    def testAddressComparison(self):
+        self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
+                        ipaddr.IPAddress('1.1.1.1'))
+        self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
+                        ipaddr.IPAddress('1.1.1.2'))
+        self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
+        self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
+
+    def testNetworkComparison(self):
+        # ip1 and ip2 have the same network address
+        ip1 = ipaddr.IPv4Network('1.1.1.0/24')
+        ip2 = ipaddr.IPv4Network('1.1.1.1/24')
+        ip3 = ipaddr.IPv4Network('1.1.2.0/24')
+
+        self.assertTrue(ip1 < ip3)
+        self.assertTrue(ip3 > ip2)
+
+        self.assertEqual(ip1.compare_networks(ip2), 0)
+        self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
+        self.assertEqual(ip1.compare_networks(ip3), -1)
+        self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
+
+        ip1 = ipaddr.IPv6Network('2001::2000/96')
+        ip2 = ipaddr.IPv6Network('2001::2001/96')
+        ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
+
+        self.assertTrue(ip1 < ip3)
+        self.assertTrue(ip3 > ip2)
+        self.assertEqual(ip1.compare_networks(ip2), 0)
+        self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
+        self.assertEqual(ip1.compare_networks(ip3), -1)
+        self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
+
+        # Test comparing different protocols.
+        # Should always raise a TypeError.
+        ipv6 = ipaddr.IPv6Network('::/0')
+        ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
+        self.assertRaises(TypeError, ipv4.__lt__, ipv6)
+        self.assertRaises(TypeError, ipv4.__gt__, ipv6)
+        self.assertRaises(TypeError, ipv6.__lt__, ipv4)
+        self.assertRaises(TypeError, ipv6.__gt__, ipv4)
+
+        # Regression test for issue 19.
+        ip1 = ipaddr.IPNetwork('10.1.2.128/25')
+        self.assertFalse(ip1 < ip1)
+        self.assertFalse(ip1 > ip1)
+        ip2 = ipaddr.IPNetwork('10.1.3.0/24')
+        self.assertTrue(ip1 < ip2)
+        self.assertFalse(ip2 < ip1)
+        self.assertFalse(ip1 > ip2)
+        self.assertTrue(ip2 > ip1)
+        ip3 = ipaddr.IPNetwork('10.1.3.0/25')
+        self.assertTrue(ip2 < ip3)
+        self.assertFalse(ip3 < ip2)
+        self.assertFalse(ip2 > ip3)
+        self.assertTrue(ip3 > ip2)
+
+        # Regression test for issue 28.
+        ip1 = ipaddr.IPNetwork('10.10.10.0/31')
+        ip2 = ipaddr.IPNetwork('10.10.10.0')
+        ip3 = ipaddr.IPNetwork('10.10.10.2/31')
+        ip4 = ipaddr.IPNetwork('10.10.10.2')
+        sorted = [ip1, ip2, ip3, ip4]
+        unsorted = [ip2, ip4, ip1, ip3]
+        unsorted.sort()
+        self.assertEqual(sorted, unsorted)
+        unsorted = [ip4, ip1, ip3, ip2]
+        unsorted.sort()
+        self.assertEqual(sorted, unsorted)
+        self.assertRaises(TypeError, ip1.__lt__, ipaddr.IPAddress('10.10.10.0'))
+        self.assertRaises(TypeError, ip2.__lt__, ipaddr.IPAddress('10.10.10.0'))
+
+        # <=, >=
+        self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
+                        ipaddr.IPNetwork('1.1.1.1'))
+        self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
+                        ipaddr.IPNetwork('1.1.1.2'))
+        self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
+                        ipaddr.IPNetwork('1.1.1.1'))
+        self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
+        self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
+        self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
+
+    def testStrictNetworks(self):
+        self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24',
+                          strict=True)
+        self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True)
+
+    def testOverlaps(self):
+        other = ipaddr.IPv4Network('1.2.3.0/30')
+        other2 = ipaddr.IPv4Network('1.2.2.0/24')
+        other3 = ipaddr.IPv4Network('1.2.2.64/26')
+        self.assertTrue(self.ipv4.overlaps(other))
+        self.assertFalse(self.ipv4.overlaps(other2))
+        self.assertTrue(other2.overlaps(other3))
+
+    def testEmbeddedIpv4(self):
+        ipv4_string = '192.168.0.1'
+        ipv4 = ipaddr.IPv4Network(ipv4_string)
+        v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
+        self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
+        v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
+        self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
+        self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
+                          '2001:1.1.1.1:1.1.1.1')
+
+    # Issue 67: IPv6 with embedded IPv4 address not recognized.
+    def testIPv6AddressTooLarge(self):
+        # RFC4291 2.5.5.2
+        self.assertEqual(ipaddr.IPAddress('::FFFF:192.0.2.1'),
+                          ipaddr.IPAddress('::FFFF:c000:201'))
+        # RFC4291 2.2 (part 3) x::d.d.d.d 
+        self.assertEqual(ipaddr.IPAddress('FFFF::192.0.2.1'),
+                          ipaddr.IPAddress('FFFF::c000:201'))
+
+    def testIPVersion(self):
+        self.assertEqual(self.ipv4.version, 4)
+        self.assertEqual(self.ipv6.version, 6)
+
+    def testMaxPrefixLength(self):
+        self.assertEqual(self.ipv4.max_prefixlen, 32)
+        self.assertEqual(self.ipv6.max_prefixlen, 128)
+
+    def testPacked(self):
+        self.assertEqual(self.ipv4.packed,
+                         _cb('\x01\x02\x03\x04'))
+        self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
+                         _cb('\xff\xfe\xfd\xfc'))
+        self.assertEqual(self.ipv6.packed,
+                         _cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
+                             '\x02\x00\x00\x00\x00\x00\x00\x01'))
+        self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
+                         _cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+                            + '\x00' * 6))
+        self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
+                         _cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
+
+    def testIpStrFromPrefixlen(self):
+        ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
+        self.assertEqual(ipv4._ip_string_from_prefix(), '255.255.255.0')
+        self.assertEqual(ipv4._ip_string_from_prefix(28), '255.255.255.240')
+
+    def testIpType(self):
+        ipv4net = ipaddr.IPNetwork('1.2.3.4')
+        ipv4addr = ipaddr.IPAddress('1.2.3.4')
+        ipv6net = ipaddr.IPNetwork('::1.2.3.4')
+        ipv6addr = ipaddr.IPAddress('::1.2.3.4')
+        self.assertEqual(ipaddr.IPv4Network, type(ipv4net))
+        self.assertEqual(ipaddr.IPv4Address, type(ipv4addr))
+        self.assertEqual(ipaddr.IPv6Network, type(ipv6net))
+        self.assertEqual(ipaddr.IPv6Address, type(ipv6addr))
+
+    def testReservedIpv4(self):
+        # test networks
+        self.assertEqual(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
+        self.assertEqual(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
+
+        self.assertEqual(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
+        self.assertEqual(False, ipaddr.IPNetwork('192.169.0.0').is_private)
+        self.assertEqual(True, ipaddr.IPNetwork('10.255.255.255').is_private)
+        self.assertEqual(False, ipaddr.IPNetwork('11.0.0.0').is_private)
+        self.assertEqual(True, ipaddr.IPNetwork('172.31.255.255').is_private)
+        self.assertEqual(False, ipaddr.IPNetwork('172.32.0.0').is_private)
+
+        self.assertEqual(True,
+                          ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
+        self.assertEqual(False,
+                          ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
+
+        self.assertEqual(True,
+                          ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
+        self.assertEqual(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
+        self.assertEqual(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
+
+        # test addresses
+        self.assertEqual(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
+        self.assertEqual(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
+
+        self.assertEqual(True, ipaddr.IPAddress('192.168.1.1').is_private)
+        self.assertEqual(False, ipaddr.IPAddress('192.169.0.0').is_private)
+        self.assertEqual(True, ipaddr.IPAddress('10.255.255.255').is_private)
+        self.assertEqual(False, ipaddr.IPAddress('11.0.0.0').is_private)
+        self.assertEqual(True, ipaddr.IPAddress('172.31.255.255').is_private)
+        self.assertEqual(False, ipaddr.IPAddress('172.32.0.0').is_private)
+
+        self.assertEqual(True,
+                          ipaddr.IPAddress('169.254.100.200').is_link_local)
+        self.assertEqual(False,
+                          ipaddr.IPAddress('169.255.100.200').is_link_local)
+
+        self.assertEqual(True,
+                          ipaddr.IPAddress('127.100.200.254').is_loopback)
+        self.assertEqual(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
+        self.assertEqual(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
+        self.assertEqual(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified)
+
+    def testReservedIpv6(self):
+
+        self.assertEqual(True, ipaddr.IPNetwork('ffff::').is_multicast)
+        self.assertEqual(True, ipaddr.IPNetwork(2**128-1).is_multicast)
+        self.assertEqual(True, ipaddr.IPNetwork('ff00::').is_multicast)
+        self.assertEqual(False, ipaddr.IPNetwork('fdff::').is_multicast)
+
+        self.assertEqual(True, ipaddr.IPNetwork('fecf::').is_site_local)
+        self.assertEqual(True, ipaddr.IPNetwork(
+                'feff:ffff:ffff:ffff::').is_site_local)
+        self.assertEqual(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
+        self.assertEqual(False, ipaddr.IPNetwork('ff00::').is_site_local)
+
+        self.assertEqual(True, ipaddr.IPNetwork('fc00::').is_private)
+        self.assertEqual(True, ipaddr.IPNetwork(
+                'fc00:ffff:ffff:ffff::').is_private)
+        self.assertEqual(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
+        self.assertEqual(False, ipaddr.IPNetwork('fe00::').is_private)
+
+        self.assertEqual(True, ipaddr.IPNetwork('fea0::').is_link_local)
+        self.assertEqual(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
+        self.assertEqual(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
+        self.assertEqual(False, ipaddr.IPNetwork('fec0::').is_link_local)
+
+        self.assertEqual(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
+        self.assertEqual(False, ipaddr.IPNetwork('::1/127').is_loopback)
+        self.assertEqual(False, ipaddr.IPNetwork('::').is_loopback)
+        self.assertEqual(False, ipaddr.IPNetwork('::2').is_loopback)
+
+        self.assertEqual(True, ipaddr.IPNetwork('0::0').is_unspecified)
+        self.assertEqual(False, ipaddr.IPNetwork('::1').is_unspecified)
+        self.assertEqual(False, ipaddr.IPNetwork('::/127').is_unspecified)
+
+        # test addresses
+        self.assertEqual(True, ipaddr.IPAddress('ffff::').is_multicast)
+        self.assertEqual(True, ipaddr.IPAddress(2**128-1).is_multicast)
+        self.assertEqual(True, ipaddr.IPAddress('ff00::').is_multicast)
+        self.assertEqual(False, ipaddr.IPAddress('fdff::').is_multicast)
+
+        self.assertEqual(True, ipaddr.IPAddress('fecf::').is_site_local)
+        self.assertEqual(True, ipaddr.IPAddress(
+                'feff:ffff:ffff:ffff::').is_site_local)
+        self.assertEqual(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
+        self.assertEqual(False, ipaddr.IPAddress('ff00::').is_site_local)
+
+        self.assertEqual(True, ipaddr.IPAddress('fc00::').is_private)
+        self.assertEqual(True, ipaddr.IPAddress(
+                'fc00:ffff:ffff:ffff::').is_private)
+        self.assertEqual(False, ipaddr.IPAddress('fbff:ffff::').is_private)
+        self.assertEqual(False, ipaddr.IPAddress('fe00::').is_private)
+
+        self.assertEqual(True, ipaddr.IPAddress('fea0::').is_link_local)
+        self.assertEqual(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
+        self.assertEqual(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
+        self.assertEqual(False, ipaddr.IPAddress('fec0::').is_link_local)
+
+        self.assertEqual(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
+        self.assertEqual(True, ipaddr.IPAddress('::1').is_loopback)
+        self.assertEqual(False, ipaddr.IPAddress('::2').is_loopback)
+
+        self.assertEqual(True, ipaddr.IPAddress('0::0').is_unspecified)
+        self.assertEqual(False, ipaddr.IPAddress('::1').is_unspecified)
+
+        # some generic IETF reserved addresses
+        self.assertEqual(True, ipaddr.IPAddress('100::').is_reserved)
+        self.assertEqual(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
+
+    def testIpv4Mapped(self):
+        self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
+                         ipaddr.IPAddress('192.168.1.1'))
+        self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
+        self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
+                         ipaddr.IPAddress('192.168.1.1'))
+
+    def testAddrExclude(self):
+        addr1 = ipaddr.IPNetwork('10.1.1.0/24')
+        addr2 = ipaddr.IPNetwork('10.1.1.0/26')
+        addr3 = ipaddr.IPNetwork('10.2.1.0/24')
+        addr4 = ipaddr.IPAddress('10.1.1.0')
+        self.assertEqual(addr1.address_exclude(addr2),
+                         [ipaddr.IPNetwork('10.1.1.64/26'),
+                          ipaddr.IPNetwork('10.1.1.128/25')])
+        self.assertRaises(ValueError, addr1.address_exclude, addr3)
+        self.assertRaises(TypeError, addr1.address_exclude, addr4)
+        self.assertEqual(addr1.address_exclude(addr1), [])
+
+    def testHash(self):
+        self.assertEqual(hash(ipaddr.IPNetwork('10.1.1.0/24')),
+                          hash(ipaddr.IPNetwork('10.1.1.0/24')))
+        self.assertEqual(hash(ipaddr.IPAddress('10.1.1.0')),
+                          hash(ipaddr.IPAddress('10.1.1.0')))
+        # i70
+        self.assertEqual(hash(ipaddr.IPAddress('1.2.3.4')),
+                          hash(ipaddr.IPAddress(
+                    long(ipaddr.IPAddress('1.2.3.4')._ip))))
+        ip1 = ipaddr.IPAddress('10.1.1.0')
+        ip2 = ipaddr.IPAddress('1::')
+        dummy = {}
+        dummy[self.ipv4] = None
+        dummy[self.ipv6] = None
+        dummy[ip1] = None
+        dummy[ip2] = None
+        self.assertTrue(self.ipv4 in dummy)
+        self.assertTrue(ip2 in dummy)
+
+    def testCopyConstructor(self):
+        addr1 = ipaddr.IPNetwork('10.1.1.0/24')
+        addr2 = ipaddr.IPNetwork(addr1)
+        addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
+        addr4 = ipaddr.IPNetwork(addr3)
+        addr5 = ipaddr.IPv4Address('1.1.1.1')
+        addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
+
+        self.assertEqual(addr1, addr2)
+        self.assertEqual(addr3, addr4)
+        self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
+        self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
+
+    def testCompressIPv6Address(self):
+        test_addresses = {
+            '1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
+            '2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
+            '2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
+            '2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
+            '2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
+            '0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
+            '0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
+            '0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
+            '1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
+            '0:0:0:0:0:0:0:0': '::/128',
+            '0:0:0:0:0:0:0:0/0': '::/0',
+            '0:0:0:0:0:0:0:1': '::1/128',
+            '2001:0658:022a:cafe:0000:0000:0000:0000/66':
+            '2001:658:22a:cafe::/66',
+            '::1.2.3.4': '::102:304/128',
+            '1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
+            '::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
+            '::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
+            '7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
+            '0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
+            }
+        for uncompressed, compressed in test_addresses.items():
+            self.assertEqual(compressed, str(ipaddr.IPv6Network(uncompressed)))
+
+    def testExplodeShortHandIpStr(self):
+        addr1 = ipaddr.IPv6Network('2001::1')
+        addr2 = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
+        self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
+                         addr1.exploded)
+        self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
+                         ipaddr.IPv6Network('::1/128').exploded)
+        # issue 77
+        self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
+                         addr2.exploded)
+
+    def testIntRepresentation(self):
+        self.assertEqual(16909060, int(self.ipv4))
+        self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
+
+    def testHexRepresentation(self):
+        self.assertEqual(hex(0x1020304),
+                         hex(self.ipv4))
+
+        self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
+                         hex(self.ipv6))
+
+    # backwards compatibility
+    def testBackwardsCompability(self):
+        self.assertEqual(ipaddr.CollapseAddrList(
+            [ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
+                         [ipaddr.IPNetwork('1.1.0.0/23')])
+
+        self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
+            ipaddr.IPNetwork('::42:8000/113')),
+                         [ipaddr.IPNetwork('::42:0/113')])
+
+        self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
+            ipaddr.IPNetwork('2::/9')) < 0)
+
+        self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
+            ipaddr.IPNetwork('2::/16')), False)
+
+        self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
+                         [ipaddr.IPNetwork('0.0.0.0/1'),
+                          ipaddr.IPNetwork('128.0.0.0/1')])
+        self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
+                         [ipaddr.IPNetwork('::/128'),
+                          ipaddr.IPNetwork('::1/128')])
+
+        self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
+                         ipaddr.IPNetwork('1.0.0.0/31'))
+        self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
+                         ipaddr.IPNetwork('::/120'))
+
+        self.assertEqual(ipaddr.IPNetwork('10.0.0.2').IsRFC1918(), True)
+        self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
+        self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
+        self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
+                         False)
+
+    def testForceVersion(self):
+        self.assertEqual(ipaddr.IPNetwork(1).version, 4)
+        self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
+
+    def testWithStar(self):
+        self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
+        self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
+        self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
+
+        self.assertEqual(str(self.ipv6.with_prefixlen),
+                         '2001:658:22a:cafe:200::1/64')
+        # rfc3513 sec 2.3 says that ipv6 only uses cidr notation for
+        # subnets
+        self.assertEqual(str(self.ipv6.with_netmask),
+                         '2001:658:22a:cafe:200::1/64')
+        # this probably don't make much sense, but it's included for
+        # compatibility with ipv4
+        self.assertEqual(str(self.ipv6.with_hostmask),
+                         '2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
+
+    def testNetworkElementCaching(self):
+        # V4 - make sure we're empty
+        self.assertFalse(self.ipv4._cache.has_key('network'))
+        self.assertFalse(self.ipv4._cache.has_key('broadcast'))
+        self.assertFalse(self.ipv4._cache.has_key('hostmask'))
+
+        # V4 - populate and test
+        self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
+        self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
+        self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
+
+        # V4 - check we're cached
+        self.assertTrue(self.ipv4._cache.has_key('network'))
+        self.assertTrue(self.ipv4._cache.has_key('broadcast'))
+        self.assertTrue(self.ipv4._cache.has_key('hostmask'))
+
+        # V6 - make sure we're empty
+        self.assertFalse(self.ipv6._cache.has_key('network'))
+        self.assertFalse(self.ipv6._cache.has_key('broadcast'))
+        self.assertFalse(self.ipv6._cache.has_key('hostmask'))
+
+        # V6 - populate and test
+        self.assertEqual(self.ipv6.network,
+                         ipaddr.IPv6Address('2001:658:22a:cafe::'))
+        self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
+            '2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
+        self.assertEqual(self.ipv6.hostmask,
+                         ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
+
+        # V6 - check we're cached
+        self.assertTrue(self.ipv6._cache.has_key('network'))
+        self.assertTrue(self.ipv6._cache.has_key('broadcast'))
+        self.assertTrue(self.ipv6._cache.has_key('hostmask'))
+
+    def testTeredo(self):
+        # stolen from wikipedia
+        server = ipaddr.IPv4Address('65.54.227.120')
+        client = ipaddr.IPv4Address('192.0.2.45')
+        teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
+        self.assertEqual((server, client),
+                         ipaddr.IPAddress(teredo_addr).teredo)
+        bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
+        self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
+        bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
+        self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
+
+        # i77
+        teredo_addr = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
+        self.assertEqual((ipaddr.IPv4Address('94.245.121.253'),
+                          ipaddr.IPv4Address('95.26.244.94')),
+                         teredo_addr.teredo)
+
+
+    def testsixtofour(self):
+        sixtofouraddr = ipaddr.IPAddress('2002:ac1d:2d64::1')
+        bad_addr = ipaddr.IPAddress('2000:ac1d:2d64::1')
+        self.assertEqual(ipaddr.IPv4Address('172.29.45.100'),
+                         sixtofouraddr.sixtofour)
+        self.assertFalse(bad_addr.sixtofour)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/setup.py b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/setup.py
new file mode 100755
index 0000000..3356432
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/setup.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+#
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from distutils.core import setup
+
+import ipaddr
+
+
+setup(name='ipaddr',
+      maintainer='Google',
+      maintainer_email='ipaddr-py-dev@googlegroups.com',
+      version=ipaddr.__version__,
+      url='http://code.google.com/p/ipaddr-py/',
+      license='Apache License, Version 2.0',
+      classifiers=[
+          'Development Status :: 5 - Production/Stable',
+          'Intended Audience :: Developers',
+          'License :: OSI Approved :: Apache Software License',
+          'Operating System :: OS Independent',
+          'Topic :: Internet',
+          'Topic :: Software Development :: Libraries',
+          'Topic :: System :: Networking'],
+      py_modules=['ipaddr'])
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/test-2to3.sh b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/test-2to3.sh
new file mode 100755
index 0000000..5196083
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipaddr/test-2to3.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# Copyright 2007 Google Inc.
+#  Licensed to PSF under a Contributor Agreement.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# Converts the python2 ipaddr files to python3 and runs the unit tests
+# with both python versions.
+
+mkdir -p 2to3output && \
+cp -f *.py 2to3output && \
+( cd 2to3output && 2to3 . | patch -p0 ) && \
+py3version=$(python3 --version 2>&1) && \
+echo -e "\nTesting with ${py3version}" && \
+python3 2to3output/ipaddr_test.py && \
+rm -r 2to3output && \
+pyversion=$(python --version 2>&1) && \
+echo -e "\nTesting with ${pyversion}" && \
+./ipaddr_test.py
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/LICENSE b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/LICENSE
new file mode 100644
index 0000000..c1df6fe
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/LICENSE
@@ -0,0 +1,25 @@
+/*-
+ * Copyright (c) 1998-2010 Luigi Rizzo, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.txt b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.txt
new file mode 100644
index 0000000..3dae853
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.txt
@@ -0,0 +1,27 @@
+This directory contains the binaries to install and use IPFW and
+DUMMYNET on a Windows Machine. The kernel part is an NDIS module,
+whereas the user interface is a command line program.
+
+1. INSTALL THE NDIS DRIVER
+
+- open the configuration panel for the network card in use
+  (either right click on the icon on the SYSTRAY, or go to
+  Control Panel -> Network and select one card)
+
+- click on Properties->Install->Service->Add
+- click on 'Driver Disk' and select 'netipfw.inf' in this folder
+- select 'ipfw+dummynet' which is the only service you should see
+- click accept on the warnings for the installation of an unknown
+  driver (roughly twice per existing network card)
+
+Now you are ready to use the emulator. To configure it, open a 'cmd'
+window and you can use the ipfw command from the command line.
+Otherwise click on the 'TESTME.bat' which is a batch program that
+runs various tests.
+
+2. UNINSTALL THE DRIVER
+
+- select a network card as above.
+- click on Properties
+- select 'ipfw+dummynet'
+- click on 'Remove'
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.web-page-replay b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.web-page-replay
new file mode 100644
index 0000000..8bf15c6
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/README.web-page-replay
@@ -0,0 +1,12 @@
+Name: Windows XP NDIS module for Dummynet.
+Short Name: ipfw3
+URL: http://info.iet.unipi.it/~luigi/dummynet/
+Version: 20100322 v.3.0.0.2
+License: BSD
+License File: LICENSE
+
+Description:
+Used by Web Page Replay to simulate network delays and bandwidth throttling on Windows XP.
+
+Local Modifications:
+Dropped files: cyg-ipfw.exe, cygwin1.dll, testme.bat, wget.exe.
\ No newline at end of file
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.exe b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.exe
new file mode 100644
index 0000000..2ab5c3f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.exe
Binary files differ
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.sys b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.sys
new file mode 100644
index 0000000..d5e1b9f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/ipfw.sys
Binary files differ
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw.inf b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw.inf
new file mode 100644
index 0000000..11ec684
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw.inf
@@ -0,0 +1,79 @@
+; version section
+[Version]
+Signature  = "$Windows NT$"
+Class      = NetService
+ClassGUID  = {4D36E974-E325-11CE-BFC1-08002BE10318}
+Provider   = %Unipi%
+DriverVer  = 26/02/2010,3.0.0.2
+
+; manufacturer section
+[Manufacturer]
+%Unipi% = UNIPI,NTx86
+
+; control flags section
+; optional, unused in netipfw.inf inf, used in netipfw_m.inf
+[ControlFlags]
+
+; models section
+[UNIPI] ; Win2k
+%Desc% = Ipfw.ndi, unipi_ipfw
+[UNIPI.NTx86] ;For WinXP and later
+%Desc% = Ipfw.ndi, unipi_ipfw
+
+; ddinstall section
+[Ipfw.ndi]
+AddReg          = Ipfw.ndi.AddReg, Ipfw.AddReg
+Characteristics = 0x4410 ;  NCF_FILTER | NCF_NDIS_PROTOCOL !--Filter Specific--!!
+CopyFiles       = Ipfw.Files.Sys
+CopyInf         = netipfw_m.inf
+
+; remove section
+[Ipfw.ndi.Remove]
+DelFiles = Ipfw.Files.Sys
+
+;ddinstall.services section
+[Ipfw.ndi.Services]
+AddService = Ipfw,,Ipfw.AddService
+
+[Ipfw.AddService]
+DisplayName    = %ServiceDesc%
+ServiceType    = 1 ;SERVICE_KERNEL_DRIVER
+StartType      = 3 ;SERVICE_DEMAND_START
+ErrorControl   = 1 ;SERVICE_ERROR_NORMAL
+ServiceBinary  = %12%\ipfw.sys
+AddReg         = Ipfw.AddService.AddReg
+
+[Ipfw.AddService.AddReg]
+
+;file copy related sections
+[SourceDisksNames]
+1=%DiskDescription%,"",,
+
+[SourceDisksFiles]
+ipfw.sys=1
+
+[DestinationDirs]
+DefaultDestDir = 12
+Ipfw.Files.Sys   = 12   ; %windir%\System32\drivers
+
+; ddinstall->copyfiles points here
+[Ipfw.Files.Sys]
+ipfw.sys,,,2
+
+; ddinstall->addreg points here
+[Ipfw.ndi.AddReg]
+HKR, Ndi,            HelpText,            , %HELP% ; this is displayed at the bottom of the General page of the Connection Properties dialog box
+HKR, Ndi,            FilterClass,         , failover
+HKR, Ndi,            FilterDeviceInfId,   , unipi_ipfwmp
+HKR, Ndi,            Service,             , Ipfw
+HKR, Ndi\Interfaces, UpperRange,          , noupper
+HKR, Ndi\Interfaces, LowerRange,          , nolower
+HKR, Ndi\Interfaces, FilterMediaTypes,    , "ethernet, tokenring, fddi, wan"
+
+;strings section
+[Strings]
+Unipi = "Unipi"
+DiskDescription = "Ipfw Driver Disk"
+Desc = "ipfw+dummynet"
+HELP = "This is ipfw and dummynet network emulator, developed by unipi.it"
+ServiceDesc = "ipfw service"
diff --git a/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw_m.inf b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw_m.inf
new file mode 100644
index 0000000..864559f
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/third_party/ipfw_win32/netipfw_m.inf
@@ -0,0 +1,54 @@
+; version section
+[Version]
+Signature  = "$Windows NT$"
+Class      = Net
+ClassGUID  = {4D36E972-E325-11CE-BFC1-08002BE10318}
+Provider   = %Unipi%
+DriverVer  = 26/02/2010,3.0.0.2
+
+; control flags section
+; optional, unused in netipfw.inf inf, used in netipfw_m.inf
+[ControlFlags]
+ExcludeFromSelect = unipi_ipfwmp
+
+; destinationdirs section, optional
+[DestinationDirs]
+DefaultDestDir=12
+; No files to copy 
+
+; manufacturer section
+[Manufacturer]
+%Unipi% = UNIPI,NTx86
+
+; models section
+[UNIPI] ; Win2k
+%Desc% = IpfwMP.ndi, unipi_ipfwmp
+[UNIPI.NTx86] ;For WinXP and later
+%Desc% = IpfwMP.ndi, unipi_ipfwmp
+
+; ddinstall section
+[IpfwMP.ndi]
+AddReg  = IpfwMP.ndi.AddReg
+Characteristics = 0x29 ;NCF_NOT_USER_REMOVABLE | NCF_VIRTUAL | NCF_HIDDEN
+
+; ddinstall->addreg points here
+[IpfwMP.ndi.AddReg]
+HKR, Ndi, Service,  0,  IpfwMP
+
+;ddinstall.services section
+[IpfwMP.ndi.Services]
+AddService = IpfwMP,0x2, IpfwMP.AddService
+
+[IpfwMP.AddService]
+ServiceType    = 1 ;SERVICE_KERNEL_DRIVER
+StartType      = 3 ;SERVICE_DEMAND_START
+ErrorControl   = 1 ;SERVICE_ERROR_NORMAL
+ServiceBinary  = %12%\ipfw.sys
+AddReg         = IpfwMP.AddService.AddReg
+
+[IpfwMP.AddService.AddReg]
+; None
+
+[Strings]
+Unipi = "Unipi"
+Desc = "Ipfw Miniport"
diff --git a/catapult/telemetry/third_party/webpagereplay/trafficshaper.py b/catapult/telemetry/third_party/webpagereplay/trafficshaper.py
new file mode 100644
index 0000000..0078218
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/trafficshaper.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import platformsettings
+import re
+
+
+# Mac has broken bandwitdh parsing, so double check the values.
+# On Mac OS X 10.6, "KBit/s" actually uses "KByte/s".
+BANDWIDTH_PATTERN = r'0|\d+[KM]?(bit|Byte)/s'
+
+
+class TrafficShaperException(Exception):
+  pass
+
+
+class BandwidthValueError(TrafficShaperException):
+  def __init__(self, value):  # pylint: disable=super-init-not-called
+    self.value = value
+
+  def __str__(self):
+    return 'Value, "%s", does not match regex: %s' % (
+        self.value, BANDWIDTH_PATTERN)
+
+
+class TrafficShaper(object):
+  """Manages network traffic shaping."""
+
+  # Pick webpagetest-compatible values (details: http://goo.gl/oghTg).
+  _UPLOAD_PIPE = '10'      # Enforces overall upload bandwidth.
+  _UPLOAD_QUEUE = '10'     # Shares upload bandwidth among source ports.
+  _UPLOAD_RULE = '5000'    # Specifies when the upload queue is used.
+  _DOWNLOAD_PIPE = '11'    # Enforces overall download bandwidth.
+  _DOWNLOAD_QUEUE = '11'   # Shares download bandwidth among destination ports.
+  _DOWNLOAD_RULE = '5100'  # Specifies when the download queue is used.
+  _QUEUE_SLOTS = 100       # Number of packets to queue.
+
+  _BANDWIDTH_RE = re.compile(BANDWIDTH_PATTERN)
+
+  def __init__(self,
+               dont_use=None,
+               host='127.0.0.1',
+               ports=None,
+               up_bandwidth='0',
+               down_bandwidth='0',
+               delay_ms='0',
+               packet_loss_rate='0',
+               init_cwnd='0',
+               use_loopback=True):
+    """Start shaping traffic.
+
+    Args:
+      host: a host string (name or IP) for the web proxy.
+      ports: a list of ports to shape traffic on.
+      up_bandwidth: Upload bandwidth
+      down_bandwidth: Download bandwidth
+           Bandwidths measured in [K|M]{bit/s|Byte/s}. '0' means unlimited.
+      delay_ms: Propagation delay in milliseconds. '0' means no delay.
+      packet_loss_rate: Packet loss rate in range [0..1]. '0' means no loss.
+      init_cwnd: the initial cwnd setting. '0' means no change.
+      use_loopback: True iff shaping is done on the loopback (or equiv) adapter.
+    """
+    assert dont_use is None  # Force args to be named.
+    self.host = host
+    self.ports = ports
+    self.up_bandwidth = up_bandwidth
+    self.down_bandwidth = down_bandwidth
+    self.delay_ms = delay_ms
+    self.packet_loss_rate = packet_loss_rate
+    self.init_cwnd = init_cwnd
+    self.use_loopback = use_loopback
+    if not self._BANDWIDTH_RE.match(self.up_bandwidth):
+      raise BandwidthValueError(self.up_bandwidth)
+    if not self._BANDWIDTH_RE.match(self.down_bandwidth):
+      raise BandwidthValueError(self.down_bandwidth)
+    self.is_shaping = False
+
+  def __enter__(self):
+    if self.use_loopback:
+      platformsettings.setup_temporary_loopback_config()
+    if self.init_cwnd != '0':
+      platformsettings.set_temporary_tcp_init_cwnd(self.init_cwnd)
+    try:
+      ipfw_list = platformsettings.ipfw('list')
+      if not ipfw_list.startswith('65535 '):
+        logging.warn('ipfw has existing rules:\n%s', ipfw_list)
+        self._delete_rules(ipfw_list)
+    except Exception:
+      pass
+    if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and
+        self.delay_ms == '0' and self.packet_loss_rate == '0'):
+      logging.info('Skipped shaping traffic.')
+      return
+    if not self.ports:
+      raise TrafficShaperException('No ports on which to shape traffic.')
+
+    ports = ','.join(str(p) for p in self.ports)
+    half_delay_ms = int(self.delay_ms) / 2  # split over up/down links
+
+    try:
+      # Configure upload shaping.
+      platformsettings.ipfw(
+          'pipe', self._UPLOAD_PIPE,
+          'config',
+          'bw', self.up_bandwidth,
+          'delay', half_delay_ms,
+          )
+      platformsettings.ipfw(
+          'queue', self._UPLOAD_QUEUE,
+          'config',
+          'pipe', self._UPLOAD_PIPE,
+          'plr', self.packet_loss_rate,
+          'queue', self._QUEUE_SLOTS,
+          'mask', 'src-port', '0xffff',
+          )
+      platformsettings.ipfw(
+          'add', self._UPLOAD_RULE,
+          'queue', self._UPLOAD_QUEUE,
+          'ip',
+          'from', 'any',
+          'to', self.host,
+          self.use_loopback and 'out' or 'in',
+          'dst-port', ports,
+          )
+      self.is_shaping = True
+
+      # Configure download shaping.
+      platformsettings.ipfw(
+          'pipe', self._DOWNLOAD_PIPE,
+          'config',
+          'bw', self.down_bandwidth,
+          'delay', half_delay_ms,
+          )
+      platformsettings.ipfw(
+          'queue', self._DOWNLOAD_QUEUE,
+          'config',
+          'pipe', self._DOWNLOAD_PIPE,
+          'plr', self.packet_loss_rate,
+          'queue', self._QUEUE_SLOTS,
+          'mask', 'dst-port', '0xffff',
+          )
+      platformsettings.ipfw(
+          'add', self._DOWNLOAD_RULE,
+          'queue', self._DOWNLOAD_QUEUE,
+          'ip',
+          'from', self.host,
+          'to', 'any',
+          'out',
+          'src-port', ports,
+          )
+      logging.info('Started shaping traffic')
+    except Exception:
+      logging.error('Unable to shape traffic.')
+      raise
+
+  def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
+    if self.is_shaping:
+      try:
+        self._delete_rules()
+        logging.info('Stopped shaping traffic')
+      except Exception:
+        logging.error('Unable to stop shaping traffic.')
+        raise
+
+  def _delete_rules(self, ipfw_list=None):
+    if ipfw_list is None:
+      ipfw_list = platformsettings.ipfw('list')
+    existing_rules = set(
+        r.split()[0].lstrip('0') for r in ipfw_list.splitlines())
+    delete_rules = [r for r in (self._DOWNLOAD_RULE, self._UPLOAD_RULE)
+                    if r in existing_rules]
+    if delete_rules:
+      platformsettings.ipfw('delete', *delete_rules)
diff --git a/catapult/telemetry/third_party/webpagereplay/trafficshaper_test.py b/catapult/telemetry/third_party/webpagereplay/trafficshaper_test.py
new file mode 100755
index 0000000..525446d
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/trafficshaper_test.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""System integration test for traffic shaping.
+
+Usage:
+$ sudo ./trafficshaper_test.py
+"""
+
+import daemonserver
+import logging
+import platformsettings
+import socket
+import SocketServer
+import trafficshaper
+import unittest
+
+RESPONSE_SIZE_KEY = 'response-size:'
+TEST_DNS_PORT = 5555
+TEST_HTTP_PORT = 8888
+TIMER = platformsettings.timer
+
+
+def GetElapsedMs(start_time, end_time):
+  """Return milliseconds elapsed between |start_time| and |end_time|.
+
+  Args:
+    start_time: seconds as a float (or string representation of float).
+    end_time: seconds as a float (or string representation of float).
+  Return:
+    milliseconds elapsed as integer.
+  """
+  return int((float(end_time) - float(start_time)) * 1000)
+
+
+class TrafficShaperTest(unittest.TestCase):
+
+  def testBadBandwidthRaises(self):
+    self.assertRaises(trafficshaper.BandwidthValueError,
+                      trafficshaper.TrafficShaper,
+                      down_bandwidth='1KBit/s')
+
+
+class TimedUdpHandler(SocketServer.DatagramRequestHandler):
+  """UDP handler that returns the time when the request was handled."""
+
+  def handle(self):
+    data = self.rfile.read()
+    read_time = self.server.timer()
+    self.wfile.write(str(read_time))
+
+
+class TimedTcpHandler(SocketServer.StreamRequestHandler):
+  """Tcp handler that returns the time when the request was read.
+
+  It can respond with the number of bytes specified in the request.
+  The request looks like:
+    request_data -> RESPONSE_SIZE_KEY num_response_bytes '\n' ANY_DATA
+  """
+
+  def handle(self):
+    data = self.rfile.read()
+    read_time = self.server.timer()
+    contents = str(read_time)
+    if data.startswith(RESPONSE_SIZE_KEY):
+      num_response_bytes = int(data[len(RESPONSE_SIZE_KEY):data.index('\n')])
+      contents = '%s\n%s' % (contents,
+                             '\x00' * (num_response_bytes - len(contents) - 1))
+    self.wfile.write(contents)
+
+
+class TimedUdpServer(SocketServer.ThreadingUDPServer,
+                     daemonserver.DaemonServer):
+  """A simple UDP server similar to dnsproxy."""
+
+  # Override SocketServer.TcpServer setting to avoid intermittent errors.
+  allow_reuse_address = True
+
+  def __init__(self, host, port, timer=TIMER):
+    SocketServer.ThreadingUDPServer.__init__(
+        self, (host, port), TimedUdpHandler)
+    self.timer = timer
+
+  def cleanup(self):
+    pass
+
+
+class TimedTcpServer(SocketServer.ThreadingTCPServer,
+                     daemonserver.DaemonServer):
+  """A simple TCP server similar to httpproxy."""
+
+  # Override SocketServer.TcpServer setting to avoid intermittent errors.
+  allow_reuse_address = True
+
+  def __init__(self, host, port, timer=TIMER):
+    SocketServer.ThreadingTCPServer.__init__(
+        self, (host, port), TimedTcpHandler)
+    self.timer = timer
+
+  def cleanup(self):
+    try:
+      self.shutdown()
+    except KeyboardInterrupt, e:
+      pass
+
+
+class TcpTestSocketCreator(object):
+  """A TCP socket creator suitable for with-statement."""
+
+  def __init__(self, host, port, timeout=1.0):
+    self.address = (host, port)
+    self.timeout = timeout
+
+  def __enter__(self):
+    self.socket = socket.create_connection(self.address, timeout=self.timeout)
+    return self.socket
+
+  def __exit__(self, *args):
+    self.socket.close()
+
+
+class TimedTestCase(unittest.TestCase):
+  def assertValuesAlmostEqual(self, expected, actual, tolerance=0.05):
+    """Like the following with nicer default message:
+           assertTrue(expected <= actual + tolerance &&
+                      expected >= actual - tolerance)
+    """
+    delta = tolerance * expected
+    if actual > expected + delta or actual < expected - delta:
+      self.fail('%s is not equal to expected %s +/- %s%%' % (
+              actual, expected, 100 * tolerance))
+
+
+class TcpTrafficShaperTest(TimedTestCase):
+
+  def setUp(self):
+    self.host = platformsettings.get_server_ip_address()
+    self.port = TEST_HTTP_PORT
+    self.tcp_socket_creator = TcpTestSocketCreator(self.host, self.port)
+    self.timer = TIMER
+
+  def TrafficShaper(self, **kwargs):
+    return trafficshaper.TrafficShaper(
+        host=self.host, ports=(self.port,), **kwargs)
+
+  def GetTcpSendTimeMs(self, num_bytes):
+    """Return time in milliseconds to send |num_bytes|."""
+
+    with self.tcp_socket_creator as s:
+      start_time = self.timer()
+      request_data = '\x00' * num_bytes
+
+      s.sendall(request_data)
+      # TODO(slamm): Figure out why partial is shutdown needed to make it work.
+      s.shutdown(socket.SHUT_WR)
+      read_time = s.recv(1024)
+    return GetElapsedMs(start_time, read_time)
+
+  def GetTcpReceiveTimeMs(self, num_bytes):
+    """Return time in milliseconds to receive |num_bytes|."""
+
+    with self.tcp_socket_creator as s:
+      s.sendall('%s%s\n' % (RESPONSE_SIZE_KEY, num_bytes))
+      # TODO(slamm): Figure out why partial is shutdown needed to make it work.
+      s.shutdown(socket.SHUT_WR)
+      num_remaining_bytes = num_bytes
+      read_time = None
+      while num_remaining_bytes > 0:
+        response_data = s.recv(4096)
+        num_remaining_bytes -= len(response_data)
+        if not read_time:
+          read_time, padding = response_data.split('\n')
+    return GetElapsedMs(read_time, self.timer())
+
+  def testTcpConnectToIp(self):
+    """Verify that it takes |delay_ms| to establish a TCP connection."""
+    if not platformsettings.has_ipfw():
+      logging.warning('ipfw is not available in path. Skip the test')
+      return
+    with TimedTcpServer(self.host, self.port):
+      for delay_ms in (100, 175):
+        with self.TrafficShaper(delay_ms=delay_ms):
+          start_time = self.timer()
+          with self.tcp_socket_creator:
+            connect_time = GetElapsedMs(start_time, self.timer())
+        self.assertValuesAlmostEqual(delay_ms, connect_time, tolerance=0.12)
+
+  def testTcpUploadShaping(self):
+    """Verify that 'up' bandwidth is shaped on TCP connections."""
+    if not platformsettings.has_ipfw():
+      logging.warning('ipfw is not available in path. Skip the test')
+      return
+    num_bytes = 1024 * 100
+    bandwidth_kbits = 2000
+    expected_ms = 8.0 * num_bytes / bandwidth_kbits
+    with TimedTcpServer(self.host, self.port):
+      with self.TrafficShaper(up_bandwidth='%sKbit/s' % bandwidth_kbits):
+        self.assertValuesAlmostEqual(expected_ms, self.GetTcpSendTimeMs(num_bytes))
+
+  def testTcpDownloadShaping(self):
+    """Verify that 'down' bandwidth is shaped on TCP connections."""
+    if not platformsettings.has_ipfw():
+      logging.warning('ipfw is not available in path. Skip the test')
+      return
+    num_bytes = 1024 * 100
+    bandwidth_kbits = 2000
+    expected_ms = 8.0 * num_bytes / bandwidth_kbits
+    with TimedTcpServer(self.host, self.port):
+      with self.TrafficShaper(down_bandwidth='%sKbit/s' % bandwidth_kbits):
+        self.assertValuesAlmostEqual(expected_ms, self.GetTcpReceiveTimeMs(num_bytes))
+
+  def testTcpInterleavedDownloads(self):
+    # TODO(slamm): write tcp interleaved downloads test
+    pass
+
+
+class UdpTrafficShaperTest(TimedTestCase):
+
+  def setUp(self):
+    self.host = platformsettings.get_server_ip_address()
+    self.dns_port = TEST_DNS_PORT
+    self.timer = TIMER
+
+  def TrafficShaper(self, **kwargs):
+    return trafficshaper.TrafficShaper(
+        host=self.host, ports=(self.dns_port,), **kwargs)
+
+  def GetUdpSendReceiveTimesMs(self):
+    """Return time in milliseconds to send |num_bytes|."""
+    start_time = self.timer()
+    udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    udp_socket.sendto('test data\n', (self.host, self.dns_port))
+    read_time = udp_socket.recv(1024)
+    return (GetElapsedMs(start_time, read_time),
+            GetElapsedMs(read_time, self.timer()))
+
+  def testUdpDelay(self):
+    if not platformsettings.has_ipfw():
+      logging.warning('ipfw is not available in path. Skip the test')
+      return
+    for delay_ms in (100, 170):
+      expected_ms = delay_ms / 2
+      with TimedUdpServer(self.host, self.dns_port):
+        with self.TrafficShaper(delay_ms=delay_ms):
+          send_ms, receive_ms = self.GetUdpSendReceiveTimesMs()
+          self.assertValuesAlmostEqual(expected_ms, send_ms, tolerance=0.10)
+          self.assertValuesAlmostEqual(expected_ms, receive_ms, tolerance=0.10)
+
+
+  def testUdpInterleavedDelay(self):
+    # TODO(slamm): write udp interleaved udp delay test
+    pass
+
+
+class TcpAndUdpTrafficShaperTest(TimedTestCase):
+  # TODO(slamm): Test concurrent TCP and UDP traffic
+  pass
+
+
+# TODO(slamm): Packet loss rate (try different ports)
+
+
+if __name__ == '__main__':
+  #logging.getLogger().setLevel(logging.DEBUG)
+  unittest.main()
diff --git a/catapult/telemetry/third_party/webpagereplay/util.py b/catapult/telemetry/third_party/webpagereplay/util.py
new file mode 100644
index 0000000..419d232
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/util.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Miscellaneous utility functions."""
+
+import inspect
+import logging
+import time
+
+try:
+  # pkg_resources (part of setuptools) is needed when WPR is
+  # distributed as a package. (Resources may need to be extracted from
+  # the package.)
+
+  import pkg_resources
+
+  def resource_exists(resource_name):
+    return pkg_resources.resource_exists(__name__, resource_name)
+
+  def resource_string(resource_name):
+    return pkg_resources.resource_string(__name__, resource_name)
+
+except ImportError:
+  # Import of pkg_resources failed, so fall back to getting resources
+  # from the file system.
+
+  import os
+
+  def _resource_path(resource_name):
+    _replay_dir = os.path.dirname(os.path.abspath(__file__))
+    return os.path.join(_replay_dir, resource_name)
+
+  def resource_exists(resource_name):
+    return os.path.exists(_resource_path(resource_name))
+
+  def resource_string(resource_name):
+    return open(_resource_path(resource_name)).read()
+
+
+class TimeoutException(Exception):
+  pass
+
+
+def WaitFor(condition, timeout):
+  """Waits for up to |timeout| secs for the function |condition| to return True.
+
+  Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.
+
+  Returns:
+    Result of |condition| function (if present).
+  """
+  min_poll_interval = 0.1
+  max_poll_interval = 5
+  output_interval = 300
+
+  def GetConditionString():
+    if condition.__name__ == '<lambda>':
+      try:
+        return inspect.getsource(condition).strip()
+      except IOError:
+        pass
+    return condition.__name__
+
+  start_time = time.time()
+  last_output_time = start_time
+  while True:
+    res = condition()
+    if res:
+      return res
+    now = time.time()
+    elapsed_time = now - start_time
+    last_output_elapsed_time = now - last_output_time
+    if elapsed_time > timeout:
+      raise TimeoutException('Timed out while waiting %ds for %s.' %
+                                        (timeout, GetConditionString()))
+    if last_output_elapsed_time > output_interval:
+      logging.info('Continuing to wait %ds for %s. Elapsed: %ds.',
+                   timeout, GetConditionString(), elapsed_time)
+      last_output_time = time.time()
+    poll_interval = min(max(elapsed_time / 10., min_poll_interval),
+                        max_poll_interval)
+    time.sleep(poll_interval)
diff --git a/catapult/telemetry/third_party/webpagereplay/wpr_cert.pem b/catapult/telemetry/third_party/webpagereplay/wpr_cert.pem
new file mode 100644
index 0000000..773bfea
--- /dev/null
+++ b/catapult/telemetry/third_party/webpagereplay/wpr_cert.pem
@@ -0,0 +1,31 @@
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALLOoY1ENN78dMvO
+33tSElddaaPFV4ooXJKbCqWsEurkKJjtoHxnri54rgNOLSU4O8aQytWE9zq6Ei/c
+qK7dl6UqZiZtwk4HVrBL786cVKlgpPkhcnG+EMdDxy/PyMD38jNtTNMJLiwbxiZq
+Mo3vjZ/4+1XLdNisAWTDLaWeXxmbAgMBAAECgYAadwLqScIZjvwqfkANnKQiUi0k
+lDzUsgyhllkJFGLoaUSo/eLXBvF851e6HYQJEj2msh+TYs7E3m16sAo3d4zOIdnz
+VwOF0SVuUveqJz6K1/k6nPxck+dPj8Mi+gBm3Fd0+0wcozjWaxhx3f462HCUb6b+
+ZpJRBsbyvzu6rn7iQQJBAOlWhtfL8r9+Kl0vxRD1XukaJwlxPv24JhfKOU4z8WlJ
+WX7Wr8ws+xKS+CtfFnjkf/iFJPpTb8jxpQyWMJzYZIkCQQDELE5hGnBFVQArMAOp
+VbwYordTrVY3AagO4tDJ6T3a7GEXE28ol16/i02+4FLd65vubL21IuX0exH/eRvZ
+Q4wDAkEAub/qyiEOFkjOWq5rd0uNiY0LJGYlWf7dPDT8l3ecJ09/0gv/mE76c9fR
+fV1N22EzSlhbjncbVuCenj11Z3aP2QJAILtfzJXzu63GHG6jfcKfYuDrg9u9Mepl
+1y4DNl1jg77DKG2Gs5gmKAGfVETrrrmcR/j+4lVTVyqdwym6+tJpbwJBAN3vixxc
+5N9pUMDfFnHrx/x9QPd0JgSAT21KSIB+PndlbD7QO6nwFhQNNcTYt2D4VWPVo1vg
+lOraHyFakb7NqEA=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIICWDCCAcGgAwIBAgIJAIt1sARz1phuMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQwHhcNMTIxMDIzMjA1NzA0WhcNMjIxMDIxMjA1NzA0WjBF
+MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
+ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
+gQCyzqGNRDTe/HTLzt97UhJXXWmjxVeKKFySmwqlrBLq5CiY7aB8Z64ueK4DTi0l
+ODvGkMrVhPc6uhIv3Kiu3ZelKmYmbcJOB1awS+/OnFSpYKT5IXJxvhDHQ8cvz8jA
+9/IzbUzTCS4sG8YmajKN742f+PtVy3TYrAFkwy2lnl8ZmwIDAQABo1AwTjAdBgNV
+HQ4EFgQUyihF4Nbabk9aBDOOxMVDLRLiqMEwHwYDVR0jBBgwFoAUyihF4Nbabk9a
+BDOOxMVDLRLiqMEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQCIrYif
+q4YQ7A8zy8PB1wKOyMy50r7IokO8hplbjRwVCQwbLnRM74LPp84caEdwaCJvAaP9
+QWjVz6p3K2sK9iHaidQZ/9xqJPoZrYttQCcQUweaASp04Y9WwTA4EPeDZLgp4PgU
+SV/mvKRGimmce6LMxFlPiZio/IDiUNXVJ7j/sg==
+-----END CERTIFICATE-----
diff --git a/catapult/telemetry/third_party/websocket-client/.gitignore b/catapult/telemetry/third_party/websocket-client/.gitignore
new file mode 100644
index 0000000..c7d73ad
--- /dev/null
+++ b/catapult/telemetry/third_party/websocket-client/.gitignore
@@ -0,0 +1,8 @@
+*.pyc
+*~
+*\#
+.\#*
+
+build
+dist
+websocket_client.egg-info
diff --git a/catapult/telemetry/third_party/websocket-client/LICENSE b/catapult/telemetry/third_party/websocket-client/LICENSE
new file mode 100644
index 0000000..c255f4a
--- /dev/null
+++ b/catapult/telemetry/third_party/websocket-client/LICENSE
@@ -0,0 +1,506 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
diff --git a/catapult/telemetry/third_party/websocket-client/README.chromium b/catapult/telemetry/third_party/websocket-client/README.chromium
new file mode 100644
index 0000000..0a6632a
--- /dev/null
+++ b/catapult/telemetry/third_party/websocket-client/README.chromium
@@ -0,0 +1,20 @@
+Name: Python websocket-client
+Short Name: websocket-client
+URL: https://github.com/liris/websocket-client
+Version: 0.13.0
+Revision: 7d3a2e7c2b2ebf534039988f4f7bcc599b9e4d0e
+Date: Tue Oct 15 07:44:01 2013 +0900
+License: LGPL-2.1
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+
+websocket-client module is WebSocket client for python. This provide the low
+level APIs for WebSocket. All APIs are the synchronous functions.
+
+Used by the python code in devtools-auto to communicate with a running Chrome instance.
+
+Local Modifications:
+None. However, test, example and packaging code from the upstream repository has
+not been copied downstream.
diff --git a/catapult/telemetry/third_party/websocket-client/README.rst b/catapult/telemetry/third_party/websocket-client/README.rst
new file mode 100644
index 0000000..320342b
--- /dev/null
+++ b/catapult/telemetry/third_party/websocket-client/README.rst
@@ -0,0 +1,189 @@
+=================
+websocket-client
+=================
+
+websocket-client module  is WebSocket client for python. This provide the low level APIs for WebSocket. All APIs are the synchronous functions.
+
+websocket-client supports only hybi-13.
+
+License
+============
+
+ - LGPL
+
+Installation
+=============
+
+This module is tested on only Python 2.7.
+
+Type "python setup.py install" or "pip install websocket-client" to install.
+
+This module does not depend on any other module.
+
+How about Python 3
+===========================
+
+py3( https://github.com/liris/websocket-client/tree/py3 ) branch is for python 3.3. Every test case is passed.
+If you are using python3, please check it.
+
+Example
+============
+
+Low Level API example::
+
+    from websocket import create_connection
+    ws = create_connection("ws://echo.websocket.org/")
+    print "Sending 'Hello, World'..."
+    ws.send("Hello, World")
+    print "Sent"
+    print "Reeiving..."
+    result =  ws.recv()
+    print "Received '%s'" % result
+    ws.close()
+
+If you want to customize socket options, set sockopt.
+
+sockopt example:
+
+    from websocket import create_connection
+    ws = create_connection("ws://echo.websocket.org/".
+                            sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY),) )
+
+
+JavaScript websocket-like API example::
+
+  import websocket
+  import thread
+  import time
+  
+  def on_message(ws, message):
+      print message
+  
+  def on_error(ws, error):
+      print error
+  
+  def on_close(ws):
+      print "### closed ###"
+  
+  def on_open(ws):
+      def run(*args):
+          for i in range(3):
+              time.sleep(1)
+              ws.send("Hello %d" % i)
+          time.sleep(1)
+          ws.close()
+          print "thread terminating..."
+      thread.start_new_thread(run, ())
+  
+  
+  if __name__ == "__main__":
+      websocket.enableTrace(True)
+      ws = websocket.WebSocketApp("ws://echo.websocket.org/",
+                                  on_message = on_message,
+                                  on_error = on_error,
+                                  on_close = on_close)
+      ws.on_open = on_open
+      
+      ws.run_forever()
+
+
+wsdump.py
+============
+
+wsdump.py is simple WebSocket test(debug) tool.
+
+sample for echo.websocket.org::
+
+  $ wsdump.py ws://echo.websocket.org/
+  Press Ctrl+C to quit
+  > Hello, WebSocket
+  < Hello, WebSocket
+  > How are you?
+  < How are you?
+
+Usage
+---------
+
+usage::
+  wsdump.py [-h] [-v [VERBOSE]] ws_url
+
+WebSocket Simple Dump Tool
+
+positional arguments:
+  ws_url                websocket url. ex. ws://echo.websocket.org/
+
+optional arguments:
+  -h, --help                           show this help message and exit
+
+  -v VERBOSE, --verbose VERBOSE    set verbose mode. If set to 1, show opcode. If set to 2, enable to trace websocket module
+
+example::
+
+  $ wsdump.py ws://echo.websocket.org/
+  $ wsdump.py ws://echo.websocket.org/ -v
+  $ wsdump.py ws://echo.websocket.org/ -vv
+
+ChangeLog
+============
+
+- v0.12.0
+
+  - support keep alive for WebSocketApp(ISSUE#34)
+  - fix some SSL bugs(ISSUE#35, #36)
+  - fix "Timing out leaves websocket library in bad state"(ISSUE#37)
+  - fix "WebSocketApp.run_with_no_err() silently eats all exceptions"(ISSUE#38)
+  - WebSocketTimeoutException will be raised for ws/wss timeout(ISSUE#40)
+  - improve wsdump message(ISSUE#42)
+  - support fragmentation message(ISSUE#43)
+  - fix some bugs
+
+- v0.11.0
+
+  - Only log non-normal close status(ISSUE#31)
+  - Fix default Origin isn't URI(ISSUE#32)
+  - fileno support(ISSUE#33)
+
+- v0.10.0
+
+  - allow to set HTTP Header to WebSocketApp(ISSUE#27)
+  - fix typo in pydoc(ISSUE#28)
+  - Passing a socketopt flag to the websocket constructor(ISSUE#29)
+  - websocket.send fails with long data(ISSUE#30)
+
+
+- v0.9.0
+
+  - allow to set opcode in WebSocketApp.send(ISSUE#25)
+  - allow to modify Origin(ISSUE#26)
+
+- v0.8.0
+
+  - many bug fix
+  - some performance improvement
+
+- v0.7.0
+
+  - fixed problem to read long data.(ISSUE#12)
+  - fix buffer size boundary violation
+
+- v0.6.0
+
+  - Patches: UUID4, self.keep_running, mask_key (ISSUE#11)
+  - add wsdump.py tool 
+
+- v0.5.2
+
+  - fix Echo App Demo Throw Error: 'NoneType' object has no attribute 'opcode  (ISSUE#10)
+
+- v0.5.1
+
+  - delete invalid print statement.
+
+- v0.5.0
+
+  - support hybi-13 protocol.
+
+- v0.4.1
+
+  - fix incorrect custom header order(ISSUE#1)
+   
diff --git a/catapult/telemetry/third_party/websocket-client/websocket.py b/catapult/telemetry/third_party/websocket-client/websocket.py
new file mode 100644
index 0000000..70f0260
--- /dev/null
+++ b/catapult/telemetry/third_party/websocket-client/websocket.py
@@ -0,0 +1,893 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+"""
+
+
+import socket
+
+try:
+    import ssl
+    from ssl import SSLError
+    HAVE_SSL = True
+except ImportError:
+    # dummy class of SSLError for ssl none-support environment.
+    class SSLError(Exception):
+        pass
+
+    HAVE_SSL = False
+
+from urlparse import urlparse
+import os
+import array
+import struct
+import uuid
+import hashlib
+import base64
+import threading
+import time
+import logging
+import traceback
+import sys
+
+"""
+websocket python client.
+=========================
+
+This version support only hybi-13.
+Please see http://tools.ietf.org/html/rfc6455 for protocol.
+"""
+
+
+# websocket supported version.
+VERSION = 13
+
+# closing frame status codes.
+STATUS_NORMAL = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA_TYPE = 1003
+STATUS_STATUS_NOT_AVAILABLE = 1005
+STATUS_ABNORMAL_CLOSED = 1006
+STATUS_INVALID_PAYLOAD = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_INVALID_EXTENSION = 1010
+STATUS_UNEXPECTED_CONDITION = 1011
+STATUS_TLS_HANDSHAKE_ERROR = 1015
+
+logger = logging.getLogger()
+
+
+class WebSocketException(Exception):
+    """
+    websocket exeception class.
+    """
+    pass
+
+
+class WebSocketConnectionClosedException(WebSocketException):
+    """
+    If remote host closed the connection or some network error happened,
+    this exception will be raised.
+    """
+    pass
+
+class WebSocketTimeoutException(WebSocketException):
+    """
+    WebSocketTimeoutException will be raised at socket timeout during read/write data.
+    """
+    pass
+
+default_timeout = None
+traceEnabled = False
+
+
+def enableTrace(tracable):
+    """
+    turn on/off the tracability.
+
+    tracable: boolean value. if set True, tracability is enabled.
+    """
+    global traceEnabled
+    traceEnabled = tracable
+    if tracable:
+        if not logger.handlers:
+            logger.addHandler(logging.StreamHandler())
+        logger.setLevel(logging.DEBUG)
+
+
+def setdefaulttimeout(timeout):
+    """
+    Set the global timeout setting to connect.
+
+    timeout: default socket timeout time. This value is second.
+    """
+    global default_timeout
+    default_timeout = timeout
+
+
+def getdefaulttimeout():
+    """
+    Return the global timeout setting(second) to connect.
+    """
+    return default_timeout
+
+
+def _parse_url(url):
+    """
+    parse url and the result is tuple of
+    (hostname, port, resource path and the flag of secure mode)
+
+    url: url string.
+    """
+    if ":" not in url:
+        raise ValueError("url is invalid")
+
+    scheme, url = url.split(":", 1)
+
+    parsed = urlparse(url, scheme="http")
+    if parsed.hostname:
+        hostname = parsed.hostname
+    else:
+        raise ValueError("hostname is invalid")
+    port = 0
+    if parsed.port:
+        port = parsed.port
+
+    is_secure = False
+    if scheme == "ws":
+        if not port:
+            port = 80
+    elif scheme == "wss":
+        is_secure = True
+        if not port:
+            port = 443
+    else:
+        raise ValueError("scheme %s is invalid" % scheme)
+
+    if parsed.path:
+        resource = parsed.path
+    else:
+        resource = "/"
+
+    if parsed.query:
+        resource += "?" + parsed.query
+
+    return (hostname, port, resource, is_secure)
+
+
+def create_connection(url, timeout=None, **options):
+    """
+    connect to url and return websocket object.
+
+    Connect to url and return the WebSocket object.
+    Passing optional timeout parameter will set the timeout on the socket.
+    If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
+    You can customize using 'options'.
+    If you set "header" list object, you can set your own custom header.
+
+    >>> conn = create_connection("ws://echo.websocket.org/",
+         ...     header=["User-Agent: MyProgram",
+         ...             "x-custom: header"])
+
+
+    timeout: socket timeout time. This value is integer.
+             if you set None for this value, it means "use default_timeout value"
+
+    options: current support option is only "header".
+             if you set header as dict value, the custom HTTP headers are added.
+    """
+    sockopt = options.get("sockopt", [])
+    sslopt = options.get("sslopt", {})
+    websock = WebSocket(sockopt=sockopt, sslopt=sslopt)
+    websock.settimeout(timeout if timeout is not None else default_timeout)
+    websock.connect(url, **options)
+    return websock
+
+_MAX_INTEGER = (1 << 32) -1
+_AVAILABLE_KEY_CHARS = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
+_MAX_CHAR_BYTE = (1<<8) -1
+
+# ref. Websocket gets an update, and it breaks stuff.
+# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
+
+
+def _create_sec_websocket_key():
+    uid = uuid.uuid4()
+    return base64.encodestring(uid.bytes).strip()
+
+
+_HEADERS_TO_CHECK = {
+    "upgrade": "websocket",
+    "connection": "upgrade",
+    }
+
+
+class ABNF(object):
+    """
+    ABNF frame class.
+    see http://tools.ietf.org/html/rfc5234
+    and http://tools.ietf.org/html/rfc6455#section-5.2
+    """
+
+    # operation code values.
+    OPCODE_CONT   = 0x0
+    OPCODE_TEXT   = 0x1
+    OPCODE_BINARY = 0x2
+    OPCODE_CLOSE  = 0x8
+    OPCODE_PING   = 0x9
+    OPCODE_PONG   = 0xa
+
+    # available operation code value tuple
+    OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
+                OPCODE_PING, OPCODE_PONG)
+
+    # opcode human readable string
+    OPCODE_MAP = {
+        OPCODE_CONT: "cont",
+        OPCODE_TEXT: "text",
+        OPCODE_BINARY: "binary",
+        OPCODE_CLOSE: "close",
+        OPCODE_PING: "ping",
+        OPCODE_PONG: "pong"
+        }
+
+    # data length threashold.
+    LENGTH_7  = 0x7d
+    LENGTH_16 = 1 << 16
+    LENGTH_63 = 1 << 63
+
+    def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
+                 opcode=OPCODE_TEXT, mask=1, data=""):
+        """
+        Constructor for ABNF.
+        please check RFC for arguments.
+        """
+        self.fin = fin
+        self.rsv1 = rsv1
+        self.rsv2 = rsv2
+        self.rsv3 = rsv3
+        self.opcode = opcode
+        self.mask = mask
+        self.data = data
+        self.get_mask_key = os.urandom
+
+    def __str__(self):
+        return "fin=" + str(self.fin) \
+                + " opcode=" + str(self.opcode) \
+                + " data=" + str(self.data)
+
+    @staticmethod
+    def create_frame(data, opcode):
+        """
+        create frame to send text, binary and other data.
+
+        data: data to send. This is string value(byte array).
+            if opcode is OPCODE_TEXT and this value is uniocde,
+            data value is conveted into unicode string, automatically.
+
+        opcode: operation code. please see OPCODE_XXX.
+        """
+        if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
+            data = data.encode("utf-8")
+        # mask must be set if send data from client
+        return ABNF(1, 0, 0, 0, opcode, 1, data)
+
+    def format(self):
+        """
+        format this object to string(byte array) to send data to server.
+        """
+        if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
+            raise ValueError("not 0 or 1")
+        if self.opcode not in ABNF.OPCODES:
+            raise ValueError("Invalid OPCODE")
+        length = len(self.data)
+        if length >= ABNF.LENGTH_63:
+            raise ValueError("data is too long")
+
+        frame_header = chr(self.fin << 7
+                           | self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
+                           | self.opcode)
+        if length < ABNF.LENGTH_7:
+            frame_header += chr(self.mask << 7 | length)
+        elif length < ABNF.LENGTH_16:
+            frame_header += chr(self.mask << 7 | 0x7e)
+            frame_header += struct.pack("!H", length)
+        else:
+            frame_header += chr(self.mask << 7 | 0x7f)
+            frame_header += struct.pack("!Q", length)
+
+        if not self.mask:
+            return frame_header + self.data
+        else:
+            mask_key = self.get_mask_key(4)
+            return frame_header + self._get_masked(mask_key)
+
+    def _get_masked(self, mask_key):
+        s = ABNF.mask(mask_key, self.data)
+        return mask_key + "".join(s)
+
+    @staticmethod
+    def mask(mask_key, data):
+        """
+        mask or unmask data. Just do xor for each byte
+
+        mask_key: 4 byte string(byte).
+
+        data: data to mask/unmask.
+        """
+        _m = array.array("B", mask_key)
+        _d = array.array("B", data)
+        for i in xrange(len(_d)):
+            _d[i] ^= _m[i % 4]
+        return _d.tostring()
+
+
+class WebSocket(object):
+    """
+    Low level WebSocket interface.
+    This class is based on
+      The WebSocket protocol draft-hixie-thewebsocketprotocol-76
+      http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
+
+    We can connect to the websocket server and send/recieve data.
+    The following example is a echo client.
+
+    >>> import websocket
+    >>> ws = websocket.WebSocket()
+    >>> ws.connect("ws://echo.websocket.org")
+    >>> ws.send("Hello, Server")
+    >>> ws.recv()
+    'Hello, Server'
+    >>> ws.close()
+
+    get_mask_key: a callable to produce new mask keys, see the set_mask_key
+      function's docstring for more details
+    sockopt: values for socket.setsockopt.
+        sockopt must be tuple and each element is argument of sock.setscokopt.
+    sslopt: dict object for ssl socket option.
+    """
+
+    def __init__(self, get_mask_key=None, sockopt=None, sslopt=None):
+        """
+        Initalize WebSocket object.
+        """
+        if sockopt is None:
+            sockopt = []
+        if sslopt is None:
+            sslopt = {}
+        self.connected = False
+        self.sock = socket.socket()
+        for opts in sockopt:
+            self.sock.setsockopt(*opts)
+        self.sslopt = sslopt
+        self.get_mask_key = get_mask_key
+        # Buffers over the packets from the layer beneath until desired amount
+        # bytes of bytes are received.
+        self._recv_buffer = []
+        # These buffer over the build-up of a single frame.
+        self._frame_header = None
+        self._frame_length = None
+        self._frame_mask = None
+        self._cont_data = None
+
+    def fileno(self):
+        return self.sock.fileno()
+
+    def set_mask_key(self, func):
+        """
+        set function to create musk key. You can custumize mask key generator.
+        Mainly, this is for testing purpose.
+
+        func: callable object. the fuct must 1 argument as integer.
+              The argument means length of mask key.
+              This func must be return string(byte array),
+              which length is argument specified.
+        """
+        self.get_mask_key = func
+
+    def gettimeout(self):
+        """
+        Get the websocket timeout(second).
+        """
+        return self.sock.gettimeout()
+
+    def settimeout(self, timeout):
+        """
+        Set the timeout to the websocket.
+
+        timeout: timeout time(second).
+        """
+        self.sock.settimeout(timeout)
+
+    timeout = property(gettimeout, settimeout)
+
+    def connect(self, url, **options):
+        """
+        Connect to url. url is websocket url scheme. ie. ws://host:port/resource
+        You can customize using 'options'.
+        If you set "header" dict object, you can set your own custom header.
+
+        >>> ws = WebSocket()
+        >>> ws.connect("ws://echo.websocket.org/",
+                ...     header={"User-Agent: MyProgram",
+                ...             "x-custom: header"})
+
+        timeout: socket timeout time. This value is integer.
+                 if you set None for this value,
+                 it means "use default_timeout value"
+
+        options: current support option is only "header".
+                 if you set header as dict value,
+                 the custom HTTP headers are added.
+
+        """
+        hostname, port, resource, is_secure = _parse_url(url)
+        # TODO: we need to support proxy
+        self.sock.connect((hostname, port))
+        if is_secure:
+            if HAVE_SSL:
+                if self.sslopt is None:
+                    sslopt = {}
+                else:
+                    sslopt = self.sslopt
+                self.sock = ssl.wrap_socket(self.sock, **sslopt)
+            else:
+                raise WebSocketException("SSL not available.")
+
+        self._handshake(hostname, port, resource, **options)
+
+    def _handshake(self, host, port, resource, **options):
+        sock = self.sock
+        headers = []
+        headers.append("GET %s HTTP/1.1" % resource)
+        headers.append("Upgrade: websocket")
+        headers.append("Connection: Upgrade")
+        if port == 80:
+            hostport = host
+        else:
+            hostport = "%s:%d" % (host, port)
+        headers.append("Host: %s" % hostport)
+
+        if "origin" in options:
+            headers.append("Origin: %s" % options["origin"])
+        else:
+            headers.append("Origin: http://%s" % hostport)
+
+        key = _create_sec_websocket_key()
+        headers.append("Sec-WebSocket-Key: %s" % key)
+        headers.append("Sec-WebSocket-Version: %s" % VERSION)
+        if "header" in options:
+            headers.extend(options["header"])
+
+        headers.append("")
+        headers.append("")
+
+        header_str = "\r\n".join(headers)
+        self._send(header_str)
+        if traceEnabled:
+            logger.debug("--- request header ---")
+            logger.debug(header_str)
+            logger.debug("-----------------------")
+
+        status, resp_headers = self._read_headers()
+        if status != 101:
+            self.close()
+            raise WebSocketException("Handshake Status %d" % status)
+
+        success = self._validate_header(resp_headers, key)
+        if not success:
+            self.close()
+            raise WebSocketException("Invalid WebSocket Header")
+
+        self.connected = True
+
+    def _validate_header(self, headers, key):
+        for k, v in _HEADERS_TO_CHECK.iteritems():
+            r = headers.get(k, None)
+            if not r:
+                return False
+            r = r.lower()
+            if v != r:
+                return False
+
+        result = headers.get("sec-websocket-accept", None)
+        if not result:
+            return False
+        result = result.lower()
+
+        value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+        hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
+        return hashed == result
+
+    def _read_headers(self):
+        status = None
+        headers = {}
+        if traceEnabled:
+            logger.debug("--- response header ---")
+
+        while True:
+            line = self._recv_line()
+            if line == "\r\n":
+                break
+            line = line.strip()
+            if traceEnabled:
+                logger.debug(line)
+            if not status:
+                status_info = line.split(" ", 2)
+                status = int(status_info[1])
+            else:
+                kv = line.split(":", 1)
+                if len(kv) == 2:
+                    key, value = kv
+                    headers[key.lower()] = value.strip().lower()
+                else:
+                    raise WebSocketException("Invalid header")
+
+        if traceEnabled:
+            logger.debug("-----------------------")
+
+        return status, headers
+
+    def send(self, payload, opcode=ABNF.OPCODE_TEXT):
+        """
+        Send the data as string.
+
+        payload: Payload must be utf-8 string or unicoce,
+                  if the opcode is OPCODE_TEXT.
+                  Otherwise, it must be string(byte array)
+
+        opcode: operation code to send. Please see OPCODE_XXX.
+        """
+        frame = ABNF.create_frame(payload, opcode)
+        if self.get_mask_key:
+            frame.get_mask_key = self.get_mask_key
+        data = frame.format()
+        length = len(data)
+        if traceEnabled:
+            logger.debug("send: " + repr(data))
+        while data:
+            l = self._send(data)
+            data = data[l:]
+        return length
+
+    def send_binary(self, payload):
+        return self.send(payload, ABNF.OPCODE_BINARY)
+
+    def ping(self, payload=""):
+        """
+        send ping data.
+
+        payload: data payload to send server.
+        """
+        self.send(payload, ABNF.OPCODE_PING)
+
+    def pong(self, payload):
+        """
+        send pong data.
+
+        payload: data payload to send server.
+        """
+        self.send(payload, ABNF.OPCODE_PONG)
+
+    def recv(self):
+        """
+        Receive string data(byte array) from the server.
+
+        return value: string(byte array) value.
+        """
+        opcode, data = self.recv_data()
+        return data
+
+    def recv_data(self):
+        """
+        Recieve data with operation code.
+
+        return  value: tuple of operation code and string(byte array) value.
+        """
+        while True:
+            frame = self.recv_frame()
+            if not frame:
+                # handle error:
+                # 'NoneType' object has no attribute 'opcode'
+                raise WebSocketException("Not a valid frame %s" % frame)
+            elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
+                if frame.opcode == ABNF.OPCODE_CONT and not self._cont_data:
+                    raise WebSocketException("Illegal frame")
+                if self._cont_data:
+                    self._cont_data[1] += frame.data
+                else:
+                    self._cont_data = [frame.opcode, frame.data]
+                
+                if frame.fin:
+                    data = self._cont_data
+                    self._cont_data = None
+                    return data
+            elif frame.opcode == ABNF.OPCODE_CLOSE:
+                self.send_close()
+                return (frame.opcode, None)
+            elif frame.opcode == ABNF.OPCODE_PING:
+                self.pong(frame.data)
+
+    def recv_frame(self):
+        """
+        recieve data as frame from server.
+
+        return value: ABNF frame object.
+        """
+        # Header
+        if self._frame_header is None:
+            self._frame_header = self._recv_strict(2)
+        b1 = ord(self._frame_header[0])
+        fin = b1 >> 7 & 1
+        rsv1 = b1 >> 6 & 1
+        rsv2 = b1 >> 5 & 1
+        rsv3 = b1 >> 4 & 1
+        opcode = b1 & 0xf
+        b2 = ord(self._frame_header[1])
+        has_mask = b2 >> 7 & 1
+        # Frame length
+        if self._frame_length is None:
+            length_bits = b2 & 0x7f
+            if length_bits == 0x7e:
+                length_data = self._recv_strict(2)
+                self._frame_length = struct.unpack("!H", length_data)[0]
+            elif length_bits == 0x7f:
+                length_data = self._recv_strict(8)
+                self._frame_length = struct.unpack("!Q", length_data)[0]
+            else:
+                self._frame_length = length_bits
+        # Mask
+        if self._frame_mask is None:
+            self._frame_mask = self._recv_strict(4) if has_mask else ""
+        # Payload
+        payload = self._recv_strict(self._frame_length)
+        if has_mask:
+            payload = ABNF.mask(self._frame_mask, payload)
+        # Reset for next frame
+        self._frame_header = None
+        self._frame_length = None
+        self._frame_mask = None
+        return ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
+
+
+    def send_close(self, status=STATUS_NORMAL, reason=""):
+        """
+        send close data to the server.
+
+        status: status code to send. see STATUS_XXX.
+
+        reason: the reason to close. This must be string.
+        """
+        if status < 0 or status >= ABNF.LENGTH_16:
+            raise ValueError("code is invalid range")
+        self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+
+    def close(self, status=STATUS_NORMAL, reason=""):
+        """
+        Close Websocket object
+
+        status: status code to send. see STATUS_XXX.
+
+        reason: the reason to close. This must be string.
+        """
+        if self.connected:
+            if status < 0 or status >= ABNF.LENGTH_16:
+                raise ValueError("code is invalid range")
+
+            try:
+                self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+                timeout = self.sock.gettimeout()
+                self.sock.settimeout(3)
+                try:
+                    frame = self.recv_frame()
+                    if logger.isEnabledFor(logging.ERROR):
+                        recv_status = struct.unpack("!H", frame.data)[0]
+                        if recv_status != STATUS_NORMAL:
+                            logger.error("close status: " + repr(recv_status))
+                except:
+                    pass
+                self.sock.settimeout(timeout)
+                self.sock.shutdown(socket.SHUT_RDWR)
+            except:
+                pass
+        self._closeInternal()
+
+    def _closeInternal(self):
+        self.connected = False
+        self.sock.close()
+
+    def _send(self, data):
+        try:
+            return self.sock.send(data)
+        except socket.timeout as e:
+            raise WebSocketTimeoutException(e.message)
+        except Exception as e:
+            if "timed out" in e.message:
+                raise WebSocketTimeoutException(e.message)
+            else:
+                raise e
+
+    def _recv(self, bufsize):
+        try:
+            bytes = self.sock.recv(bufsize)
+        except socket.timeout as e:
+            raise WebSocketTimeoutException(e.message)
+        except SSLError as e:
+            if e.message == "The read operation timed out":
+                raise WebSocketTimeoutException(e.message)
+            else:
+                raise
+        if not bytes:
+            raise WebSocketConnectionClosedException()
+        return bytes
+
+
+    def _recv_strict(self, bufsize):
+        shortage = bufsize - sum(len(x) for x in self._recv_buffer)
+        while shortage > 0:
+            bytes = self._recv(shortage)
+            self._recv_buffer.append(bytes)
+            shortage -= len(bytes)
+        unified = "".join(self._recv_buffer)
+        if shortage == 0:
+            self._recv_buffer = []
+            return unified
+        else:
+            self._recv_buffer = [unified[bufsize:]]
+            return unified[:bufsize]
+
+
+    def _recv_line(self):
+        line = []
+        while True:
+            c = self._recv(1)
+            line.append(c)
+            if c == "\n":
+                break
+        return "".join(line)
+
+
+class WebSocketApp(object):
+    """
+    Higher level of APIs are provided.
+    The interface is like JavaScript WebSocket object.
+    """
+    def __init__(self, url, header=[],
+                 on_open=None, on_message=None, on_error=None,
+                 on_close=None, keep_running=True, get_mask_key=None):
+        """
+        url: websocket url.
+        header: custom header for websocket handshake.
+        on_open: callable object which is called at opening websocket.
+          this function has one argument. The arugment is this class object.
+        on_message: callbale object which is called when recieved data.
+         on_message has 2 arguments.
+         The 1st arugment is this class object.
+         The passing 2nd arugment is utf-8 string which we get from the server.
+       on_error: callable object which is called when we get error.
+         on_error has 2 arguments.
+         The 1st arugment is this class object.
+         The passing 2nd arugment is exception object.
+       on_close: callable object which is called when closed the connection.
+         this function has one argument. The arugment is this class object.
+       keep_running: a boolean flag indicating whether the app's main loop should
+         keep running, defaults to True
+       get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
+         docstring for more information
+        """
+        self.url = url
+        self.header = header
+        self.on_open = on_open
+        self.on_message = on_message
+        self.on_error = on_error
+        self.on_close = on_close
+        self.keep_running = keep_running
+        self.get_mask_key = get_mask_key
+        self.sock = None
+
+    def send(self, data, opcode=ABNF.OPCODE_TEXT):
+        """
+        send message.
+        data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode.
+        opcode: operation code of data. default is OPCODE_TEXT.
+        """
+        if self.sock.send(data, opcode) == 0:
+            raise WebSocketConnectionClosedException()
+
+    def close(self):
+        """
+        close websocket connection.
+        """
+        self.keep_running = False
+        self.sock.close()
+
+    def _send_ping(self, interval):
+        while True:
+            for i in range(interval):
+                time.sleep(1)
+                if not self.keep_running:
+                    return
+            self.sock.ping()
+
+    def run_forever(self, sockopt=None, sslopt=None, ping_interval=0):
+        """
+        run event loop for WebSocket framework.
+        This loop is infinite loop and is alive during websocket is available.
+        sockopt: values for socket.setsockopt.
+            sockopt must be tuple and each element is argument of sock.setscokopt.
+        sslopt: ssl socket optional dict.
+        ping_interval: automatically send "ping" command every specified period(second)
+            if set to 0, not send automatically.
+        """
+        if sockopt is None:
+            sockopt = []
+        if sslopt is None:
+            sslopt = {}
+        if self.sock:
+            raise WebSocketException("socket is already opened")
+        thread = None
+
+        try:
+            self.sock = WebSocket(self.get_mask_key, sockopt=sockopt, sslopt=sslopt)
+            self.sock.settimeout(default_timeout)
+            self.sock.connect(self.url, header=self.header)
+            self._callback(self.on_open)
+
+            if ping_interval:
+                thread = threading.Thread(target=self._send_ping, args=(ping_interval,))
+                thread.setDaemon(True)
+                thread.start()
+
+            while self.keep_running:
+                data = self.sock.recv()
+                if data is None:
+                    break
+                self._callback(self.on_message, data)
+        except Exception, e:
+            self._callback(self.on_error, e)
+        finally:
+            if thread:
+                self.keep_running = False
+            self.sock.close()
+            self._callback(self.on_close)
+            self.sock = None
+
+    def _callback(self, callback, *args):
+        if callback:
+            try:
+                callback(self, *args)
+            except Exception, e:
+                logger.error(e)
+                if logger.isEnabledFor(logging.DEBUG):
+                    _, _, tb = sys.exc_info()
+                    traceback.print_tb(tb)
+
+
+if __name__ == "__main__":
+    enableTrace(True)
+    ws = create_connection("ws://echo.websocket.org/")
+    print("Sending 'Hello, World'...")
+    ws.send("Hello, World")
+    print("Sent")
+    print("Receiving...")
+    result = ws.recv()
+    print("Received '%s'" % result)
+    ws.close()
diff --git a/catapult/telemetry/update_docs b/catapult/telemetry/update_docs
new file mode 100755
index 0000000..f2f92ac
--- /dev/null
+++ b/catapult/telemetry/update_docs
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import sys
+
+from build import update_docs
+
+if __name__ == '__main__':
+  sys.exit(update_docs.Main(sys.argv[1:]))
diff --git a/catapult/telemetry/validate_binary_dependencies b/catapult/telemetry/validate_binary_dependencies
new file mode 100755
index 0000000..0294112
--- /dev/null
+++ b/catapult/telemetry/validate_binary_dependencies
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import argparse
+import json
+import os
+import sys
+
+from telemetry.core import util
+
+sys.path.insert(1, os.path.abspath(os.path.join(
+    util.GetCatapultDir(), 'catapult_base')))
+sys.path.insert(1, os.path.abspath(os.path.join(
+    util.GetCatapultDir(), 'dependency_manager')))
+from catapult_base import cloud_storage
+import dependency_manager
+
+
+def ValidateCloudStorageDependencies(file_path):
+  base_config = dependency_manager.BaseConfig(file_path)
+  cloud_storage_deps_not_exist = []
+  for dep_info in base_config.IterDependencyInfo():
+    if dep_info.has_cloud_storage_info:
+      if not dep_info.cloud_storage_info.DependencyExistsInCloudStorage():
+        print >> sys.stderr, (
+          '%s does not exist in cloud storage' % dep_info.cloud_storage_info)
+        cloud_storage_deps_not_exist = True
+      else:
+        print >> sys.stdout, (
+          '%s passes cloud storage validation' % dep_info.dependency)
+
+  if cloud_storage_deps_not_exist:
+    raise Exception(
+        "Some dependencies specify cloud storage locations that don't exist.")
+
+
+def Main(args):
+  parser = argparse.ArgumentParser(
+      description='Validate the dependencies in a binary dependency json file')
+  parser.add_argument('file_path', type=str,
+                      help='The path to binary dependency json file')
+  options = parser.parse_args(args)
+  ValidateCloudStorageDependencies(options.file_path)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv[1:]))
diff --git a/catapult/third_party/Paste/README.chromium b/catapult/third_party/Paste/README.chromium
index 05b6698..8e2f957 100644
--- a/catapult/third_party/Paste/README.chromium
+++ b/catapult/third_party/Paste/README.chromium
@@ -6,3 +6,6 @@
 License: MIT
 License File: docs/license.txt
 Security Critical: no
+
+Local Modifications:
+Removed ".orig" files (paste/urlmap.py.orig and paste/util/template.py.orig).
diff --git a/catapult/third_party/Paste/paste/urlmap.py.orig b/catapult/third_party/Paste/paste/urlmap.py.orig
deleted file mode 100644
index f721f2d..0000000
--- a/catapult/third_party/Paste/paste/urlmap.py.orig
+++ /dev/null
@@ -1,263 +0,0 @@
-# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
-# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
-"""
-Map URL prefixes to WSGI applications.  See ``URLMap``
-"""
-
-import re
-import os
-import cgi
-try:
-    # Python 3
-    from collections import MutableMapping as DictMixin
-except ImportError:
-    # Python 2
-    from UserDict import DictMixin
-
-from paste import httpexceptions
-
-__all__ = ['URLMap', 'PathProxyURLMap']
-
-def urlmap_factory(loader, global_conf, **local_conf):
-    if 'not_found_app' in local_conf:
-        not_found_app = local_conf.pop('not_found_app')
-    else:
-        not_found_app = global_conf.get('not_found_app')
-    if not_found_app:
-        not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
-    urlmap = URLMap(not_found_app=not_found_app)
-    for path, app_name in local_conf.items():
-        path = parse_path_expression(path)
-        app = loader.get_app(app_name, global_conf=global_conf)
-        urlmap[path] = app
-    return urlmap
-
-def parse_path_expression(path):
-    """
-    Parses a path expression like 'domain foobar.com port 20 /' or
-    just '/foobar' for a path alone.  Returns as an address that
-    URLMap likes.
-    """
-    parts = path.split()
-    domain = port = path = None
-    while parts:
-        if parts[0] == 'domain':
-            parts.pop(0)
-            if not parts:
-                raise ValueError("'domain' must be followed with a domain name")
-            if domain:
-                raise ValueError("'domain' given twice")
-            domain = parts.pop(0)
-        elif parts[0] == 'port':
-            parts.pop(0)
-            if not parts:
-                raise ValueError("'port' must be followed with a port number")
-            if port:
-                raise ValueError("'port' given twice")
-            port = parts.pop(0)
-        else:
-            if path:
-                raise ValueError("more than one path given (have %r, got %r)"
-                                 % (path, parts[0]))
-            path = parts.pop(0)
-    s = ''
-    if domain:
-        s = 'http://%s' % domain
-    if port:
-        if not domain:
-            raise ValueError("If you give a port, you must also give a domain")
-        s += ':' + port
-    if path:
-        if s:
-            s += '/'
-        s += path
-    return s
-
-class URLMap(DictMixin):
-
-    """
-    URLMap instances are dictionary-like object that dispatch to one
-    of several applications based on the URL.
-
-    The dictionary keys are URLs to match (like
-    ``PATH_INFO.startswith(url)``), and the values are applications to
-    dispatch to.  URLs are matched most-specific-first, i.e., longest
-    URL first.  The ``SCRIPT_NAME`` and ``PATH_INFO`` environmental
-    variables are adjusted to indicate the new context.
-
-    URLs can also include domains, like ``http://blah.com/foo``, or as
-    tuples ``('blah.com', '/foo')``.  This will match domain names; without
-    the ``http://domain`` or with a domain of ``None`` any domain will be
-    matched (so long as no other explicit domain matches).  """
-
-    def __init__(self, not_found_app=None):
-        self.applications = []
-        if not not_found_app:
-            not_found_app = self.not_found_app
-        self.not_found_application = not_found_app
-
-    def __len__(self):
-        return len(self.applications)
-
-    def __iter__(self):
-        for app_url, app in self.applications:
-            yield app_url
-
-    norm_url_re = re.compile('//+')
-    domain_url_re = re.compile('^(http|https)://')
-
-    def not_found_app(self, environ, start_response):
-        mapper = environ.get('paste.urlmap_object')
-        if mapper:
-            matches = [p for p, a in mapper.applications]
-            extra = 'defined apps: %s' % (
-                ',\n  '.join(map(repr, matches)))
-        else:
-            extra = ''
-        extra += '\nSCRIPT_NAME: %r' % cgi.escape(environ.get('SCRIPT_NAME'))
-        extra += '\nPATH_INFO: %r' % cgi.escape(environ.get('PATH_INFO'))
-        extra += '\nHTTP_HOST: %r' % cgi.escape(environ.get('HTTP_HOST'))
-        app = httpexceptions.HTTPNotFound(
-            environ['PATH_INFO'],
-            comment=cgi.escape(extra)).wsgi_application
-        return app(environ, start_response)
-
-    def normalize_url(self, url, trim=True):
-        if isinstance(url, (list, tuple)):
-            domain = url[0]
-            url = self.normalize_url(url[1])[1]
-            return domain, url
-        assert (not url or url.startswith('/')
-                or self.domain_url_re.search(url)), (
-            "URL fragments must start with / or http:// (you gave %r)" % url)
-        match = self.domain_url_re.search(url)
-        if match:
-            url = url[match.end():]
-            if '/' in url:
-                domain, url = url.split('/', 1)
-                url = '/' + url
-            else:
-                domain, url = url, ''
-        else:
-            domain = None
-        url = self.norm_url_re.sub('/', url)
-        if trim:
-            url = url.rstrip('/')
-        return domain, url
-
-    def sort_apps(self):
-        """
-        Make sure applications are sorted with longest URLs first
-        """
-        def key(app_desc):
-            (domain, url), app = app_desc
-            if not domain:
-                # Make sure empty domains sort last:
-                return '\xff', -len(url)
-            else:
-                return domain, -len(url)
-        apps = [(key(desc), desc) for desc in self.applications]
-        apps.sort()
-        self.applications = [desc for (sortable, desc) in apps]
-
-    def __setitem__(self, url, app):
-        if app is None:
-            try:
-                del self[url]
-            except KeyError:
-                pass
-            return
-        dom_url = self.normalize_url(url)
-        if dom_url in self:
-            del self[dom_url]
-        self.applications.append((dom_url, app))
-        self.sort_apps()
-
-    def __getitem__(self, url):
-        dom_url = self.normalize_url(url)
-        for app_url, app in self.applications:
-            if app_url == dom_url:
-                return app
-        raise KeyError(
-            "No application with the url %r (domain: %r; existing: %s)"
-            % (url[1], url[0] or '*', self.applications))
-
-    def __delitem__(self, url):
-        url = self.normalize_url(url)
-        for app_url, app in self.applications:
-            if app_url == url:
-                self.applications.remove((app_url, app))
-                break
-        else:
-            raise KeyError(
-                "No application with the url %r" % (url,))
-
-    def keys(self):
-        return [app_url for app_url, app in self.applications]
-
-    def __call__(self, environ, start_response):
-        host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
-        if ':' in host:
-            host, port = host.split(':', 1)
-        else:
-            if environ['wsgi.url_scheme'] == 'http':
-                port = '80'
-            else:
-                port = '443'
-        path_info = environ.get('PATH_INFO')
-        path_info = self.normalize_url(path_info, False)[1]
-        for (domain, app_url), app in self.applications:
-            if domain and domain != host and domain != host+':'+port:
-                continue
-            if (path_info == app_url
-                or path_info.startswith(app_url + '/')):
-                environ['SCRIPT_NAME'] += app_url
-                environ['PATH_INFO'] = path_info[len(app_url):]
-                return app(environ, start_response)
-        environ['paste.urlmap_object'] = self
-        return self.not_found_application(environ, start_response)
-
-
-class PathProxyURLMap(object):
-
-    """
-    This is a wrapper for URLMap that catches any strings that
-    are passed in as applications; these strings are treated as
-    filenames (relative to `base_path`) and are passed to the
-    callable `builder`, which will return an application.
-
-    This is intended for cases when configuration files can be
-    treated as applications.
-
-    `base_paste_url` is the URL under which all applications added through
-    this wrapper must go.  Use ``""`` if you want this to not
-    change incoming URLs.
-    """
-
-    def __init__(self, map, base_paste_url, base_path, builder):
-        self.map = map
-        self.base_paste_url = self.map.normalize_url(base_paste_url)
-        self.base_path = base_path
-        self.builder = builder
-
-    def __setitem__(self, url, app):
-        if isinstance(app, (str, unicode)):
-            app_fn = os.path.join(self.base_path, app)
-            app = self.builder(app_fn)
-        url = self.map.normalize_url(url)
-        # @@: This means http://foo.com/bar will potentially
-        # match foo.com, but /base_paste_url/bar, which is unintuitive
-        url = (url[0] or self.base_paste_url[0],
-               self.base_paste_url[1] + url[1])
-        self.map[url] = app
-
-    def __getattr__(self, attr):
-        return getattr(self.map, attr)
-
-    # This is really the only settable attribute
-    def not_found_application__get(self):
-        return self.map.not_found_application
-    def not_found_application__set(self, value):
-        self.map.not_found_application = value
-    not_found_application = property(not_found_application__get,
-                                     not_found_application__set)
diff --git a/catapult/third_party/Paste/paste/util/template.py.orig b/catapult/third_party/Paste/paste/util/template.py.orig
deleted file mode 100644
index 89466c3..0000000
--- a/catapult/third_party/Paste/paste/util/template.py.orig
+++ /dev/null
@@ -1,762 +0,0 @@
-"""
-A small templating language
-
-This implements a small templating language for use internally in
-Paste and Paste Script.  This language implements if/elif/else,
-for/continue/break, expressions, and blocks of Python code.  The
-syntax is::
-
-  {{any expression (function calls etc)}}
-  {{any expression | filter}}
-  {{for x in y}}...{{endfor}}
-  {{if x}}x{{elif y}}y{{else}}z{{endif}}
-  {{py:x=1}}
-  {{py:
-  def foo(bar):
-      return 'baz'
-  }}
-  {{default var = default_value}}
-  {{# comment}}
-
-You use this with the ``Template`` class or the ``sub`` shortcut.
-The ``Template`` class takes the template string and the name of
-the template (for errors) and a default namespace.  Then (like
-``string.Template``) you can call the ``tmpl.substitute(**kw)``
-method to make a substitution (or ``tmpl.substitute(a_dict)``).
-
-``sub(content, **kw)`` substitutes the template immediately.  You
-can use ``__name='tmpl.html'`` to set the name of the template.
-
-If there are syntax errors ``TemplateError`` will be raised.
-"""
-
-import re
-import six
-import sys
-import cgi
-from six.moves.urllib.parse import quote
-from paste.util.looper import looper
-
-__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
-           'sub_html', 'html', 'bunch']
-
-token_re = re.compile(r'\{\{|\}\}')
-in_re = re.compile(r'\s+in\s+')
-var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
-
-class TemplateError(Exception):
-    """Exception raised while parsing a template
-    """
-
-    def __init__(self, message, position, name=None):
-        self.message = message
-        self.position = position
-        self.name = name
-
-    def __str__(self):
-        msg = '%s at line %s column %s' % (
-            self.message, self.position[0], self.position[1])
-        if self.name:
-            msg += ' in %s' % self.name
-        return msg
-
-class _TemplateContinue(Exception):
-    pass
-
-class _TemplateBreak(Exception):
-    pass
-
-class Template(object):
-
-    default_namespace = {
-        'start_braces': '{{',
-        'end_braces': '}}',
-        'looper': looper,
-        }
-
-    default_encoding = 'utf8'
-
-    def __init__(self, content, name=None, namespace=None):
-        self.content = content
-        self._unicode = isinstance(content, six.text_type)
-        self.name = name
-
-        if not self._unicode:
-            content = content.decode(self.default_encoding)
-            self._unicode = True
-
-        self._parsed = parse(content, name=name)
-        if namespace is None:
-            namespace = {}
-        self.namespace = namespace
-
-    def from_filename(cls, filename, namespace=None, encoding=None):
-        f = open(filename, 'rb')
-        c = f.read()
-        f.close()
-        if encoding:
-            c = c.decode(encoding)
-        return cls(content=c, name=filename, namespace=namespace)
-
-    from_filename = classmethod(from_filename)
-
-    def __repr__(self):
-        return '<%s %s name=%r>' % (
-            self.__class__.__name__,
-            hex(id(self))[2:], self.name)
-
-    def substitute(self, *args, **kw):
-        if args:
-            if kw:
-                raise TypeError(
-                    "You can only give positional *or* keyword arguments")
-            if len(args) > 1:
-                raise TypeError(
-                    "You can only give on positional argument")
-            kw = args[0]
-        ns = self.default_namespace.copy()
-        ns.update(self.namespace)
-        ns.update(kw)
-        result = self._interpret(ns)
-        return result
-
-    def _interpret(self, ns):
-        __traceback_hide__ = True
-        parts = []
-        self._interpret_codes(self._parsed, ns, out=parts)
-        return ''.join(parts)
-
-    def _interpret_codes(self, codes, ns, out):
-        __traceback_hide__ = True
-        for item in codes:
-            if isinstance(item, six.string_types):
-                out.append(item)
-            else:
-                self._interpret_code(item, ns, out)
-
-    def _interpret_code(self, code, ns, out):
-        __traceback_hide__ = True
-        name, pos = code[0], code[1]
-        if name == 'py':
-            self._exec(code[2], ns, pos)
-        elif name == 'continue':
-            raise _TemplateContinue()
-        elif name == 'break':
-            raise _TemplateBreak()
-        elif name == 'for':
-            vars, expr, content = code[2], code[3], code[4]
-            expr = self._eval(expr, ns, pos)
-            self._interpret_for(vars, expr, content, ns, out)
-        elif name == 'cond':
-            parts = code[2:]
-            self._interpret_if(parts, ns, out)
-        elif name == 'expr':
-            parts = code[2].split('|')
-            base = self._eval(parts[0], ns, pos)
-            for part in parts[1:]:
-                func = self._eval(part, ns, pos)
-                base = func(base)
-            out.append(self._repr(base, pos))
-        elif name == 'default':
-            var, expr = code[2], code[3]
-            if var not in ns:
-                result = self._eval(expr, ns, pos)
-                ns[var] = result
-        elif name == 'comment':
-            return
-        else:
-            assert 0, "Unknown code: %r" % name
-
-    def _interpret_for(self, vars, expr, content, ns, out):
-        __traceback_hide__ = True
-        for item in expr:
-            if len(vars) == 1:
-                ns[vars[0]] = item
-            else:
-                if len(vars) != len(item):
-                    raise ValueError(
-                        'Need %i items to unpack (got %i items)'
-                        % (len(vars), len(item)))
-                for name, value in zip(vars, item):
-                    ns[name] = value
-            try:
-                self._interpret_codes(content, ns, out)
-            except _TemplateContinue:
-                continue
-            except _TemplateBreak:
-                break
-
-    def _interpret_if(self, parts, ns, out):
-        __traceback_hide__ = True
-        # @@: if/else/else gets through
-        for part in parts:
-            assert not isinstance(part, six.string_types)
-            name, pos = part[0], part[1]
-            if name == 'else':
-                result = True
-            else:
-                result = self._eval(part[2], ns, pos)
-            if result:
-                self._interpret_codes(part[3], ns, out)
-                break
-
-    def _eval(self, code, ns, pos):
-        __traceback_hide__ = True
-        try:
-            value = eval(code, ns)
-            return value
-        except:
-            exc_info = sys.exc_info()
-            e = exc_info[1]
-            if getattr(e, 'args'):
-                arg0 = e.args[0]
-            else:
-                arg0 = str(e)
-            e.args = (self._add_line_info(arg0, pos),)
-            six.reraise(exc_info[0], e, exc_info[2])
-
-    def _exec(self, code, ns, pos):
-        __traceback_hide__ = True
-        try:
-            six.exec_(code, ns)
-        except:
-            exc_info = sys.exc_info()
-            e = exc_info[1]
-            e.args = (self._add_line_info(e.args[0], pos),)
-            six.reraise(exc_info[0], e, exc_info[2])
-
-    def _repr(self, value, pos):
-        __traceback_hide__ = True
-        try:
-            if value is None:
-                return ''
-            if self._unicode:
-                try:
-                    value = six.text_type(value)
-                except UnicodeDecodeError:
-                    value = str(value)
-            else:
-                value = str(value)
-        except:
-            exc_info = sys.exc_info()
-            e = exc_info[1]
-            e.args = (self._add_line_info(e.args[0], pos),)
-            six.reraise(exc_info[0], e, exc_info[2])
-        else:
-            if self._unicode and isinstance(value, six.binary_type):
-                if not self.default_encoding:
-                    raise UnicodeDecodeError(
-                        'Cannot decode str value %r into unicode '
-                        '(no default_encoding provided)' % value)
-                value = value.decode(self.default_encoding)
-            elif not self._unicode and isinstance(value, six.text_type):
-                if not self.default_encoding:
-                    raise UnicodeEncodeError(
-                        'Cannot encode unicode value %r into str '
-                        '(no default_encoding provided)' % value)
-                value = value.encode(self.default_encoding)
-            return value
-
-
-    def _add_line_info(self, msg, pos):
-        msg = "%s at line %s column %s" % (
-            msg, pos[0], pos[1])
-        if self.name:
-            msg += " in file %s" % self.name
-        return msg
-
-def sub(content, **kw):
-    name = kw.get('__name')
-    tmpl = Template(content, name=name)
-    return tmpl.substitute(kw)
-
-def paste_script_template_renderer(content, vars, filename=None):
-    tmpl = Template(content, name=filename)
-    return tmpl.substitute(vars)
-
-class bunch(dict):
-
-    def __init__(self, **kw):
-        for name, value in kw.items():
-            setattr(self, name, value)
-
-    def __setattr__(self, name, value):
-        self[name] = value
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(name)
-
-    def __getitem__(self, key):
-        if 'default' in self:
-            try:
-                return dict.__getitem__(self, key)
-            except KeyError:
-                return dict.__getitem__(self, 'default')
-        else:
-            return dict.__getitem__(self, key)
-
-    def __repr__(self):
-        items = [
-            (k, v) for k, v in self.items()]
-        items.sort()
-        return '<%s %s>' % (
-            self.__class__.__name__,
-            ' '.join(['%s=%r' % (k, v) for k, v in items]))
-
-############################################################
-## HTML Templating
-############################################################
-
-class html(object):
-    def __init__(self, value):
-        self.value = value
-    def __str__(self):
-        return self.value
-    def __repr__(self):
-        return '<%s %r>' % (
-            self.__class__.__name__, self.value)
-
-def html_quote(value):
-    if value is None:
-        return ''
-    if not isinstance(value, six.string_types):
-        if hasattr(value, '__unicode__'):
-            value = unicode(value)
-        else:
-            value = str(value)
-    value = cgi.escape(value, 1)
-    if isinstance(value, unicode):
-        value = value.encode('ascii', 'xmlcharrefreplace')
-    return value
-
-def url(v):
-    if not isinstance(v, six.string_types):
-        if hasattr(v, '__unicode__'):
-            v = unicode(v)
-        else:
-            v = str(v)
-    if isinstance(v, unicode):
-        v = v.encode('utf8')
-    return quote(v)
-
-def attr(**kw):
-    kw = kw.items()
-    kw.sort()
-    parts = []
-    for name, value in kw:
-        if value is None:
-            continue
-        if name.endswith('_'):
-            name = name[:-1]
-        parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
-    return html(' '.join(parts))
-
-class HTMLTemplate(Template):
-
-    default_namespace = Template.default_namespace.copy()
-    default_namespace.update(dict(
-        html=html,
-        attr=attr,
-        url=url,
-        ))
-
-    def _repr(self, value, pos):
-        plain = Template._repr(self, value, pos)
-        if isinstance(value, html):
-            return plain
-        else:
-            return html_quote(plain)
-
-def sub_html(content, **kw):
-    name = kw.get('__name')
-    tmpl = HTMLTemplate(content, name=name)
-    return tmpl.substitute(kw)
-
-
-############################################################
-## Lexing and Parsing
-############################################################
-
-def lex(s, name=None, trim_whitespace=True):
-    """
-    Lex a string into chunks:
-
-        >>> lex('hey')
-        ['hey']
-        >>> lex('hey {{you}}')
-        ['hey ', ('you', (1, 7))]
-        >>> lex('hey {{')
-        Traceback (most recent call last):
-            ...
-        TemplateError: No }} to finish last expression at line 1 column 7
-        >>> lex('hey }}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: }} outside expression at line 1 column 7
-        >>> lex('hey {{ {{')
-        Traceback (most recent call last):
-            ...
-        TemplateError: {{ inside expression at line 1 column 10
-
-    """
-    in_expr = False
-    chunks = []
-    last = 0
-    last_pos = (1, 1)
-    for match in token_re.finditer(s):
-        expr = match.group(0)
-        pos = find_position(s, match.end())
-        if expr == '{{' and in_expr:
-            raise TemplateError('{{ inside expression', position=pos,
-                                name=name)
-        elif expr == '}}' and not in_expr:
-            raise TemplateError('}} outside expression', position=pos,
-                                name=name)
-        if expr == '{{':
-            part = s[last:match.start()]
-            if part:
-                chunks.append(part)
-            in_expr = True
-        else:
-            chunks.append((s[last:match.start()], last_pos))
-            in_expr = False
-        last = match.end()
-        last_pos = pos
-    if in_expr:
-        raise TemplateError('No }} to finish last expression',
-                            name=name, position=last_pos)
-    part = s[last:]
-    if part:
-        chunks.append(part)
-    if trim_whitespace:
-        chunks = trim_lex(chunks)
-    return chunks
-
-statement_re = re.compile(r'^(?:if |elif |else |for |py:)')
-single_statements = ['endif', 'endfor', 'continue', 'break']
-trail_whitespace_re = re.compile(r'\n[\t ]*$')
-lead_whitespace_re = re.compile(r'^[\t ]*\n')
-
-def trim_lex(tokens):
-    r"""
-    Takes a lexed set of tokens, and removes whitespace when there is
-    a directive on a line by itself:
-
-       >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
-       >>> tokens
-       [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
-       >>> trim_lex(tokens)
-       [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
-    """
-    for i in range(len(tokens)):
-        current = tokens[i]
-        if isinstance(tokens[i], six.string_types):
-            # we don't trim this
-            continue
-        item = current[0]
-        if not statement_re.search(item) and item not in single_statements:
-            continue
-        if not i:
-            prev = ''
-        else:
-            prev = tokens[i-1]
-        if i+1 >= len(tokens):
-            next = ''
-        else:
-            next = tokens[i+1]
-        if (not isinstance(next, six.string_types)
-            or not isinstance(prev, six.string_types)):
-            continue
-        if ((not prev or trail_whitespace_re.search(prev))
-            and (not next or lead_whitespace_re.search(next))):
-            if prev:
-                m = trail_whitespace_re.search(prev)
-                # +1 to leave the leading \n on:
-                prev = prev[:m.start()+1]
-                tokens[i-1] = prev
-            if next:
-                m = lead_whitespace_re.search(next)
-                next = next[m.end():]
-                tokens[i+1] = next
-    return tokens
-
-
-def find_position(string, index):
-    """Given a string and index, return (line, column)"""
-    leading = string[:index].splitlines()
-    return (len(leading), len(leading[-1])+1)
-
-def parse(s, name=None):
-    r"""
-    Parses a string into a kind of AST
-
-        >>> parse('{{x}}')
-        [('expr', (1, 3), 'x')]
-        >>> parse('foo')
-        ['foo']
-        >>> parse('{{if x}}test{{endif}}')
-        [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
-        >>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
-        ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
-        >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
-        [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
-        >>> parse('{{py:x=1}}')
-        [('py', (1, 3), 'x=1')]
-        >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
-        [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
-
-    Some exceptions::
-
-        >>> parse('{{continue}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: continue outside of for loop at line 1 column 3
-        >>> parse('{{if x}}foo')
-        Traceback (most recent call last):
-            ...
-        TemplateError: No {{endif}} at line 1 column 3
-        >>> parse('{{else}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: else outside of an if block at line 1 column 3
-        >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: Unexpected endif at line 1 column 25
-        >>> parse('{{if}}{{endif}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: if with no expression at line 1 column 3
-        >>> parse('{{for x y}}{{endfor}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
-        >>> parse('{{py:x=1\ny=2}}')
-        Traceback (most recent call last):
-            ...
-        TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
-    """
-    tokens = lex(s, name=name)
-    result = []
-    while tokens:
-        next, tokens = parse_expr(tokens, name)
-        result.append(next)
-    return result
-
-def parse_expr(tokens, name, context=()):
-    if isinstance(tokens[0], six.string_types):
-        return tokens[0], tokens[1:]
-    expr, pos = tokens[0]
-    expr = expr.strip()
-    if expr.startswith('py:'):
-        expr = expr[3:].lstrip(' \t')
-        if expr.startswith('\n'):
-            expr = expr[1:]
-        else:
-            if '\n' in expr:
-                raise TemplateError(
-                    'Multi-line py blocks must start with a newline',
-                    position=pos, name=name)
-        return ('py', pos, expr), tokens[1:]
-    elif expr in ('continue', 'break'):
-        if 'for' not in context:
-            raise TemplateError(
-                'continue outside of for loop',
-                position=pos, name=name)
-        return (expr, pos), tokens[1:]
-    elif expr.startswith('if '):
-        return parse_cond(tokens, name, context)
-    elif (expr.startswith('elif ')
-          or expr == 'else'):
-        raise TemplateError(
-            '%s outside of an if block' % expr.split()[0],
-            position=pos, name=name)
-    elif expr in ('if', 'elif', 'for'):
-        raise TemplateError(
-            '%s with no expression' % expr,
-            position=pos, name=name)
-    elif expr in ('endif', 'endfor'):
-        raise TemplateError(
-            'Unexpected %s' % expr,
-            position=pos, name=name)
-    elif expr.startswith('for '):
-        return parse_for(tokens, name, context)
-    elif expr.startswith('default '):
-        return parse_default(tokens, name, context)
-    elif expr.startswith('#'):
-        return ('comment', pos, tokens[0][0]), tokens[1:]
-    return ('expr', pos, tokens[0][0]), tokens[1:]
-
-def parse_cond(tokens, name, context):
-    start = tokens[0][1]
-    pieces = []
-    context = context + ('if',)
-    while 1:
-        if not tokens:
-            raise TemplateError(
-                'Missing {{endif}}',
-                position=start, name=name)
-        if (isinstance(tokens[0], tuple)
-            and tokens[0][0] == 'endif'):
-            return ('cond', start) + tuple(pieces), tokens[1:]
-        next, tokens = parse_one_cond(tokens, name, context)
-        pieces.append(next)
-
-def parse_one_cond(tokens, name, context):
-    (first, pos), tokens = tokens[0], tokens[1:]
-    content = []
-    if first.endswith(':'):
-        first = first[:-1]
-    if first.startswith('if '):
-        part = ('if', pos, first[3:].lstrip(), content)
-    elif first.startswith('elif '):
-        part = ('elif', pos, first[5:].lstrip(), content)
-    elif first == 'else':
-        part = ('else', pos, None, content)
-    else:
-        assert 0, "Unexpected token %r at %s" % (first, pos)
-    while 1:
-        if not tokens:
-            raise TemplateError(
-                'No {{endif}}',
-                position=pos, name=name)
-        if (isinstance(tokens[0], tuple)
-            and (tokens[0][0] == 'endif'
-                 or tokens[0][0].startswith('elif ')
-                 or tokens[0][0] == 'else')):
-            return part, tokens
-        next, tokens = parse_expr(tokens, name, context)
-        content.append(next)
-
-def parse_for(tokens, name, context):
-    first, pos = tokens[0]
-    tokens = tokens[1:]
-    context = ('for',) + context
-    content = []
-    assert first.startswith('for ')
-    if first.endswith(':'):
-        first = first[:-1]
-    first = first[3:].strip()
-    match = in_re.search(first)
-    if not match:
-        raise TemplateError(
-            'Bad for (no "in") in %r' % first,
-            position=pos, name=name)
-    vars = first[:match.start()]
-    if '(' in vars:
-        raise TemplateError(
-            'You cannot have () in the variable section of a for loop (%r)'
-            % vars, position=pos, name=name)
-    vars = tuple([
-        v.strip() for v in first[:match.start()].split(',')
-        if v.strip()])
-    expr = first[match.end():]
-    while 1:
-        if not tokens:
-            raise TemplateError(
-                'No {{endfor}}',
-                position=pos, name=name)
-        if (isinstance(tokens[0], tuple)
-            and tokens[0][0] == 'endfor'):
-            return ('for', pos, vars, expr, content), tokens[1:]
-        next, tokens = parse_expr(tokens, name, context)
-        content.append(next)
-
-def parse_default(tokens, name, context):
-    first, pos = tokens[0]
-    assert first.startswith('default ')
-    first = first.split(None, 1)[1]
-    parts = first.split('=', 1)
-    if len(parts) == 1:
-        raise TemplateError(
-            "Expression must be {{default var=value}}; no = found in %r" % first,
-            position=pos, name=name)
-    var = parts[0].strip()
-    if ',' in var:
-        raise TemplateError(
-            "{{default x, y = ...}} is not supported",
-            position=pos, name=name)
-    if not var_re.search(var):
-        raise TemplateError(
-            "Not a valid variable name for {{default}}: %r"
-            % var, position=pos, name=name)
-    expr = parts[1].strip()
-    return ('default', pos, var, expr), tokens[1:]
-
-_fill_command_usage = """\
-%prog [OPTIONS] TEMPLATE arg=value
-
-Use py:arg=value to set a Python value; otherwise all values are
-strings.
-"""
-
-def fill_command(args=None):
-    import sys, optparse, pkg_resources, os
-    if args is None:
-        args = sys.argv[1:]
-    dist = pkg_resources.get_distribution('Paste')
-    parser = optparse.OptionParser(
-        version=str(dist),
-        usage=_fill_command_usage)
-    parser.add_option(
-        '-o', '--output',
-        dest='output',
-        metavar="FILENAME",
-        help="File to write output to (default stdout)")
-    parser.add_option(
-        '--html',
-        dest='use_html',
-        action='store_true',
-        help="Use HTML style filling (including automatic HTML quoting)")
-    parser.add_option(
-        '--env',
-        dest='use_env',
-        action='store_true',
-        help="Put the environment in as top-level variables")
-    options, args = parser.parse_args(args)
-    if len(args) < 1:
-        print('You must give a template filename')
-        print(dir(parser))
-        assert 0
-    template_name = args[0]
-    args = args[1:]
-    vars = {}
-    if options.use_env:
-        vars.update(os.environ)
-    for value in args:
-        if '=' not in value:
-            print('Bad argument: %r' % value)
-            sys.exit(2)
-        name, value = value.split('=', 1)
-        if name.startswith('py:'):
-            name = name[:3]
-            value = eval(value)
-        vars[name] = value
-    if template_name == '-':
-        template_content = sys.stdin.read()
-        template_name = '<stdin>'
-    else:
-        f = open(template_name, 'rb')
-        template_content = f.read()
-        f.close()
-    if options.use_html:
-        TemplateClass = HTMLTemplate
-    else:
-        TemplateClass = Template
-    template = TemplateClass(template_content, name=template_name)
-    result = template.substitute(vars)
-    if options.output:
-        f = open(options.output, 'wb')
-        f.write(result)
-        f.close()
-    else:
-        sys.stdout.write(result)
-
-if __name__ == '__main__':
-    from paste.util.template import fill_command
-    fill_command()
-
-
diff --git a/catapult/third_party/closure_linter/README b/catapult/third_party/closure_linter/README
index 4a21b2d..3755ae8 100644
--- a/catapult/third_party/closure_linter/README
+++ b/catapult/third_party/closure_linter/README
@@ -5,5 +5,5 @@
 
 After installing, you get two helper applications installed into /usr/local/bin:
 
-   gjslint.py - runs the linter and checks for errors
-   fixjsstyle.py - tries to fix errors automatically
+   gjslint - runs the linter and checks for errors
+   fixjsstyle - tries to fix errors automatically
diff --git a/catapult/third_party/closure_linter/README.chromium b/catapult/third_party/closure_linter/README.chromium
index bd972bb..085fa71 100644
--- a/catapult/third_party/closure_linter/README.chromium
+++ b/catapult/third_party/closure_linter/README.chromium
@@ -1,8 +1,8 @@
 Name: closure-linter
 URL: http://code.google.com/p/closure-linter/
-Version: 2.3.4
-Date: 28 Feb 2012
-Revision: 16
+Version: 2.3.19
+Date: 31 Jul 2015
+Revision: 339a468111ce1c94eecc20dc5109a8cbdf98b68e
 License: Apache 2.0
 Security Critical: no
 
@@ -12,6 +12,7 @@
 
 Local modifications:
   Removed closure_linter/testdata/
-  Added Apache License notice to closure_linter/common/tokens_test.py
+  Added Apache License notice to closure_linter/typeannotation.py
+  Added Apache License notice to closure_linter/typeannotation_test.py
 
-  Lines ending with @supress longLineCheck do not warn.
\ No newline at end of file
+  Lines ending with @supress longLineCheck do not warn.
diff --git a/catapult/third_party/closure_linter/closure_linter/__init__.py b/catapult/third_party/closure_linter/closure_linter/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/aliaspass.py b/catapult/third_party/closure_linter/closure_linter/aliaspass.py
new file mode 100644
index 0000000..bb37bfa
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/aliaspass.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pass that scans for goog.scope aliases and lint/usage errors."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter import scopeutil
+from closure_linter import tokenutil
+from closure_linter.common import error
+
+
+# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
+# and related classes onto it.
+
+
+def _GetAliasForIdentifier(identifier, alias_map):
+  """Returns the aliased_symbol name for an identifier.
+
+  Example usage:
+    >>> alias_map = {'MyClass': 'goog.foo.MyClass'}
+    >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
+    'goog.foo.MyClass.prototype.action'
+
+    >>> _GetAliasForIdentifier('MyClass.prototype.action', {})
+    None
+
+  Args:
+    identifier: The identifier.
+    alias_map: A dictionary mapping a symbol to an alias.
+
+  Returns:
+    The aliased symbol name or None if not found.
+  """
+  ns = identifier.split('.', 1)[0]
+  aliased_symbol = alias_map.get(ns)
+  if aliased_symbol:
+    return aliased_symbol + identifier[len(ns):]
+
+
+def _SetTypeAlias(js_type, alias_map):
+  """Updates the alias for identifiers in a type.
+
+  Args:
+    js_type: A typeannotation.TypeAnnotation instance.
+    alias_map: A dictionary mapping a symbol to an alias.
+  """
+  aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
+  if aliased_symbol:
+    js_type.alias = aliased_symbol
+  for sub_type in js_type.IterTypes():
+    _SetTypeAlias(sub_type, alias_map)
+
+
+class AliasPass(object):
+  """Pass to identify goog.scope() usages.
+
+  Identifies goog.scope() usages and finds lint/usage errors.  Notes any
+  aliases of symbols in Closurized namespaces (that is, reassignments
+  such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
+  when they're using an alias (so they may be expanded to the full symbol
+  later -- that "MyClass.prototype.action" refers to
+  "goog.foo.MyClass.prototype.action" when expanded.).
+  """
+
+  def __init__(self, closurized_namespaces=None, error_handler=None):
+    """Creates a new pass.
+
+    Args:
+      closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
+      error_handler: An error handler to report lint errors to.
+    """
+
+    self._error_handler = error_handler
+
+    # If we have namespaces, freeze the set.
+    if closurized_namespaces:
+      closurized_namespaces = frozenset(closurized_namespaces)
+
+    self._closurized_namespaces = closurized_namespaces
+
+  def Process(self, start_token):
+    """Runs the pass on a token stream.
+
+    Args:
+      start_token: The first token in the stream.
+    """
+
+    if start_token is None:
+      return
+
+    # TODO(nnaze): Add more goog.scope usage checks.
+    self._CheckGoogScopeCalls(start_token)
+
+    # If we have closurized namespaces, identify aliased identifiers.
+    if self._closurized_namespaces:
+      context = start_token.metadata.context
+      root_context = context.GetRoot()
+      self._ProcessRootContext(root_context)
+
+  def _CheckGoogScopeCalls(self, start_token):
+    """Check goog.scope calls for lint/usage errors."""
+
+    def IsScopeToken(token):
+      return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
+              token.string == 'goog.scope')
+
+    # Find all the goog.scope tokens in the file
+    scope_tokens = [t for t in start_token if IsScopeToken(t)]
+
+    for token in scope_tokens:
+      scope_context = token.metadata.context
+
+      if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
+              scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
+        self._MaybeReportError(
+            error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
+                        'goog.scope call not in global scope', token))
+
+    # There should be only one goog.scope reference.  Register errors for
+    # every instance after the first.
+    for token in scope_tokens[1:]:
+      self._MaybeReportError(
+          error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
+                      'More than one goog.scope call in file.', token))
+
+  def _MaybeReportError(self, err):
+    """Report an error to the handler (if registered)."""
+    if self._error_handler:
+      self._error_handler.HandleError(err)
+
+  @classmethod
+  def _YieldAllContexts(cls, context):
+    """Yields all contexts that are contained by the given context."""
+    yield context
+    for child_context in context.children:
+      for descendent_child in cls._YieldAllContexts(child_context):
+        yield descendent_child
+
+  @staticmethod
+  def _IsTokenInParentBlock(token, parent_block):
+    """Determines whether the given token is contained by the given block.
+
+    Args:
+      token: A token
+      parent_block: An EcmaContext.
+
+    Returns:
+      Whether the token is in a context that is or is a child of the given
+      parent_block context.
+    """
+    context = token.metadata.context
+
+    while context:
+      if context is parent_block:
+        return True
+      context = context.parent
+
+    return False
+
+  def _ProcessRootContext(self, root_context):
+    """Processes all goog.scope blocks under the root context."""
+
+    assert root_context.type is ecmametadatapass.EcmaContext.ROOT
+
+    # Process aliases in statements in the root scope for goog.module-style
+    # aliases.
+    global_alias_map = {}
+    for context in root_context.children:
+      if context.type == ecmametadatapass.EcmaContext.STATEMENT:
+        for statement_child in context.children:
+          if statement_child.type == ecmametadatapass.EcmaContext.VAR:
+            match = scopeutil.MatchModuleAlias(statement_child)
+            if match:
+              # goog.require aliases cannot use further aliases, the symbol is
+              # the second part of match, directly.
+              symbol = match[1]
+              if scopeutil.IsInClosurizedNamespace(symbol,
+                                                   self._closurized_namespaces):
+                global_alias_map[match[0]] = symbol
+
+    # Process each block to find aliases.
+    for context in root_context.children:
+      self._ProcessBlock(context, global_alias_map)
+
+  def _ProcessBlock(self, context, global_alias_map):
+    """Scans a goog.scope block to find aliases and mark alias tokens."""
+    alias_map = global_alias_map.copy()
+
+    # Iterate over every token in the context. Each token points to one
+    # context, but multiple tokens may point to the same context. We only want
+    # to check each context once, so keep track of those we've seen.
+    seen_contexts = set()
+    token = context.start_token
+    while token and self._IsTokenInParentBlock(token, context):
+      token_context = token.metadata.context if token.metadata else None
+
+      # Check to see if this token is an alias.
+      if token_context and token_context not in seen_contexts:
+        seen_contexts.add(token_context)
+
+        # If this is a alias statement in the goog.scope block.
+        if (token_context.type == ecmametadatapass.EcmaContext.VAR and
+            scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
+          match = scopeutil.MatchAlias(token_context)
+
+          # If this is an alias, remember it in the map.
+          if match:
+            alias, symbol = match
+            symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
+            if scopeutil.IsInClosurizedNamespace(symbol,
+                                                 self._closurized_namespaces):
+              alias_map[alias] = symbol
+
+      # If this token is an identifier that matches an alias,
+      # mark the token as an alias to the original symbol.
+      if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
+          token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
+        identifier = tokenutil.GetIdentifierForToken(token)
+        if identifier:
+          aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
+          if aliased_symbol:
+            token.metadata.aliased_symbol = aliased_symbol
+
+      elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
+        flag = token.attached_object
+        if flag and flag.HasType() and flag.jstype:
+          _SetTypeAlias(flag.jstype, alias_map)
+
+      token = token.next  # Get next token
diff --git a/catapult/third_party/closure_linter/closure_linter/aliaspass_test.py b/catapult/third_party/closure_linter/closure_linter/aliaspass_test.py
new file mode 100644
index 0000000..7042e53
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/aliaspass_test.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the aliaspass module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import aliaspass
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+
+def _GetTokenByLineAndString(start_token, string, line_number):
+  for token in start_token:
+    if token.line_number == line_number and token.string == string:
+      return token
+
+
+class AliasPassTest(googletest.TestCase):
+
+  def testInvalidGoogScopeCall(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
+
+    error_accumulator = erroraccumulator.ErrorAccumulator()
+    alias_pass = aliaspass.AliasPass(
+        error_handler=error_accumulator)
+    alias_pass.Process(start_token)
+
+    alias_errors = error_accumulator.GetErrors()
+    self.assertEquals(1, len(alias_errors))
+
+    alias_error = alias_errors[0]
+
+    self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
+    self.assertEquals('goog.scope', alias_error.token.string)
+
+  def testAliasedIdentifiers(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+    alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+    alias_pass.Process(start_token)
+
+    alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
+    self.assertTrue(alias_token.metadata.is_alias_definition)
+
+    my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
+    self.assertIsNone(my_class_token.metadata.aliased_symbol)
+
+    component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
+    self.assertEquals('goog.ui.Component',
+                      component_token.metadata.aliased_symbol)
+
+    event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
+    self.assertEquals('goog.events.Event.Something',
+                      event_token.metadata.aliased_symbol)
+
+    non_closurized_token = _GetTokenByLineAndString(
+        start_token, 'NonClosurizedClass', 18)
+    self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
+
+    long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
+    self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
+                      long_start_token.metadata.aliased_symbol)
+
+  def testAliasedDoctypes(self):
+    """Tests that aliases are correctly expanded within type annotations."""
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+    tracker = javascriptstatetracker.JavaScriptStateTracker()
+    tracker.DocFlagPass(start_token, error_handler=None)
+
+    alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+    alias_pass.Process(start_token)
+
+    flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
+    self.assertEquals(
+        'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
+        repr(flag_token.attached_object.jstype))
+
+  def testModuleAlias(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass("""
+goog.module('goog.test');
+var Alias = goog.require('goog.Alias');
+Alias.use();
+""")
+    alias_pass = aliaspass.AliasPass(set(['goog']))
+    alias_pass.Process(start_token)
+    alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
+    self.assertTrue(alias_token.metadata.is_alias_definition)
+
+  def testMultipleGoogScopeCalls(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(
+        _TEST_MULTIPLE_SCOPE_SCRIPT)
+
+    error_accumulator = erroraccumulator.ErrorAccumulator()
+
+    alias_pass = aliaspass.AliasPass(
+        set(['goog', 'myproject']),
+        error_handler=error_accumulator)
+    alias_pass.Process(start_token)
+
+    alias_errors = error_accumulator.GetErrors()
+
+    self.assertEquals(3, len(alias_errors))
+
+    error = alias_errors[0]
+    self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
+    self.assertEquals(7, error.token.line_number)
+
+    error = alias_errors[1]
+    self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+    self.assertEquals(7, error.token.line_number)
+
+    error = alias_errors[2]
+    self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+    self.assertEquals(11, error.token.line_number)
+
+
+_TEST_ALIAS_SCRIPT = """
+goog.scope(function() {
+var events = goog.events; // scope alias
+var Event = events.
+    Event; // nested multiline scope alias
+
+// This should not be registered as an aliased identifier because
+// it appears before the alias.
+var myClass = new MyClass();
+
+var Component = goog.ui.Component; // scope alias
+var MyClass = myproject.foo.MyClass; // scope alias
+
+// Scope alias of non-Closurized namespace.
+var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+var component = new Component(Event.Something);
+var nonClosurized = NonClosurizedClass();
+
+/**
+ * A created namespace with a really long identifier.
+ * @type {events.Event.<Component,Array<MyClass>}
+ */
+Event.
+    MultilineIdentifier.
+        someMethod = function() {};
+});
+"""
+
+_TEST_SCOPE_SCRIPT = """
+function foo () {
+  // This goog.scope call is invalid.
+  goog.scope(function() {
+
+  });
+}
+"""
+
+_TEST_MULTIPLE_SCOPE_SCRIPT = """
+goog.scope(function() {
+  // do nothing
+});
+
+function foo() {
+  var test = goog.scope; // We should not see goog.scope mentioned.
+}
+
+// This goog.scope invalid. There can be only one.
+goog.scope(function() {
+
+});
+"""
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/checker.py b/catapult/third_party/closure_linter/closure_linter/checker.py
old mode 100755
new mode 100644
index 5c0fa81..1c98417
--- a/catapult/third_party/closure_linter/closure_linter/checker.py
+++ b/catapult/third_party/closure_linter/closure_linter/checker.py
@@ -21,18 +21,12 @@
 
 import gflags as flags
 
+from closure_linter import aliaspass
 from closure_linter import checkerbase
 from closure_linter import closurizednamespacesinfo
-from closure_linter import ecmametadatapass
 from closure_linter import javascriptlintrules
-from closure_linter import javascriptstatetracker
-from closure_linter.common import lintrunner
 
-flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
-                  'List of files with relaxed documentation checks. Will not '
-                  'report errors for missing documentation, some missing '
-                  'descriptions, or methods whose @return tags don\'t have a '
-                  'matching return statement.')
+
 flags.DEFINE_list('closurized_namespaces', '',
                   'Namespace prefixes, used for testing of'
                   'goog.provide/require')
@@ -44,65 +38,66 @@
 class JavaScriptStyleChecker(checkerbase.CheckerBase):
   """Checker that applies JavaScriptLintRules."""
 
-  def __init__(self, error_handler):
+  def __init__(self, state_tracker, error_handler):
     """Initialize an JavaScriptStyleChecker object.
 
     Args:
+      state_tracker: State tracker.
       error_handler: Error handler to pass all errors to.
     """
     self._namespaces_info = None
+    self._alias_pass = None
     if flags.FLAGS.closurized_namespaces:
       self._namespaces_info = (
           closurizednamespacesinfo.ClosurizedNamespacesInfo(
               flags.FLAGS.closurized_namespaces,
               flags.FLAGS.ignored_extra_namespaces))
 
+      self._alias_pass = aliaspass.AliasPass(
+          flags.FLAGS.closurized_namespaces, error_handler)
+
     checkerbase.CheckerBase.__init__(
         self,
         error_handler=error_handler,
         lint_rules=javascriptlintrules.JavaScriptLintRules(
             self._namespaces_info),
-        state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
-        metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
-        limited_doc_files=flags.FLAGS.limited_doc_files)
+        state_tracker=state_tracker)
 
-  def _CheckTokens(self, token, parse_error, debug_tokens):
+  def Check(self, start_token, limited_doc_checks=False, is_html=False,
+            stop_token=None):
     """Checks a token stream for lint warnings/errors.
 
     Adds a separate pass for computing dependency information based on
     goog.require and goog.provide statements prior to the main linting pass.
 
     Args:
-      token: The first token in the token stream.
-      parse_error: A ParseError if any errors occurred.
-      debug_tokens: Whether every token should be printed as it is encountered
-          during the pass.
-
-    Returns:
-      A boolean indicating whether the full token stream could be checked or if
-      checking failed prematurely.
+      start_token: The first token in the token stream.
+      limited_doc_checks: Whether to perform limited checks.
+      is_html: Whether this token stream is HTML.
+      stop_token: If given, checks should stop at this token.
     """
+    self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+
+    self._state_tracker.DocFlagPass(start_token, self._error_handler)
+
+    if self._alias_pass:
+      self._alias_pass.Process(start_token)
+
     # To maximize the amount of errors that get reported before a parse error
     # is displayed, don't run the dependency pass if a parse error exists.
-    if self._namespaces_info and not parse_error:
+    if self._namespaces_info:
       self._namespaces_info.Reset()
-      result = (self._ExecutePass(token, self._DependencyPass) and
-                self._ExecutePass(token, self._LintPass,
-                                  debug_tokens=debug_tokens))
-    else:
-      result = self._ExecutePass(token, self._LintPass, parse_error,
-                                 debug_tokens)
+      self._ExecutePass(start_token, self._DependencyPass, stop_token)
 
-    if not result:
-      return False
+    self._ExecutePass(start_token, self._LintPass, stop_token)
 
-    self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
-
-    self._error_handler.FinishFile()
-    return True
+    # If we have a stop_token, we didn't end up reading the whole file and,
+    # thus, don't call Finalize to do end-of-file checks.
+    if not stop_token:
+      self._lint_rules.Finalize(self._state_tracker)
 
   def _DependencyPass(self, token):
-    """Processes an invidual token for dependency information.
+    """Processes an individual token for dependency information.
 
     Used to encapsulate the logic needed to process an individual token so that
     it can be passed to _ExecutePass.
@@ -111,20 +106,3 @@
       token: The token to process.
     """
     self._namespaces_info.ProcessToken(token, self._state_tracker)
-
-
-class GJsLintRunner(lintrunner.LintRunner):
-  """Wrapper class to run GJsLint."""
-
-  def Run(self, filenames, error_handler):
-    """Run GJsLint on the given filenames.
-
-    Args:
-      filenames: The filenames to check
-      error_handler: An ErrorHandler object.
-    """
-    checker = JavaScriptStyleChecker(error_handler)
-
-    # Check the list of files.
-    for filename in filenames:
-      checker.Check(filename)
diff --git a/catapult/third_party/closure_linter/closure_linter/checkerbase.py b/catapult/third_party/closure_linter/closure_linter/checkerbase.py
old mode 100755
new mode 100644
index c7735a0..6679ded
--- a/catapult/third_party/closure_linter/closure_linter/checkerbase.py
+++ b/catapult/third_party/closure_linter/closure_linter/checkerbase.py
@@ -16,27 +16,15 @@
 
 """Base classes for writing checkers that operate on tokens."""
 
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
 __author__ = ('robbyw@google.com (Robert Walker)',
               'ajp@google.com (Andy Perelson)',
               'jacobr@google.com (Jacob Richman)')
 
-import StringIO
-import traceback
-
-import gflags as flags
-from closure_linter import ecmametadatapass
 from closure_linter import errorrules
-from closure_linter import errors
-from closure_linter import javascripttokenizer
 from closure_linter.common import error
-from closure_linter.common import htmlutil
-
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('debug_tokens', False,
-                     'Whether to print all tokens for debugging.')
-
-flags.DEFINE_boolean('error_trace', False,
-                     'Whether to show error exceptions.')
 
 
 class LintRulesBase(object):
@@ -83,12 +71,11 @@
     """
     raise TypeError('Abstract method CheckToken not implemented')
 
-  def Finalize(self, parser_state, tokenizer_mode):
+  def Finalize(self, parser_state):
     """Perform all checks that need to occur after all lines are processed.
 
     Args:
       parser_state: State of the parser after parsing all tokens
-      tokenizer_mode: Mode of the tokenizer after parsing the entire page
 
     Raises:
       TypeError: If not overridden.
@@ -99,8 +86,7 @@
 class CheckerBase(object):
   """This class handles checking a LintRules object against a file."""
 
-  def __init__(self, error_handler, lint_rules, state_tracker,
-               limited_doc_files=None, metadata_pass=None):
+  def __init__(self, error_handler, lint_rules, state_tracker):
     """Initialize a checker object.
 
     Args:
@@ -108,19 +94,11 @@
       lint_rules: LintRules object defining lint errors given a token
         and state_tracker object.
       state_tracker: Object that tracks the current state in the token stream.
-      limited_doc_files: List of filenames that are not required to have
-        documentation comments.
-      metadata_pass: Object that builds metadata about the token stream.
+
     """
     self._error_handler = error_handler
     self._lint_rules = lint_rules
     self._state_tracker = state_tracker
-    self._metadata_pass = metadata_pass
-    self._limited_doc_files = limited_doc_files
-
-    # TODO(user): Factor out. A checker does not need to know about the
-    # tokenizer, only the token stream.
-    self._tokenizer = javascripttokenizer.JavaScriptTokenizer()
 
     self._has_errors = False
 
@@ -148,103 +126,21 @@
     """
     return self._has_errors
 
-  def Check(self, filename, source=None):
-    """Checks the file, printing warnings and errors as they are found.
+  def Check(self, start_token, limited_doc_checks=False, is_html=False,
+            stop_token=None):
+    """Checks a token stream, reporting errors to the error reporter.
 
     Args:
-      filename: The name of the file to check.
-      source: Optional. The contents of the file.  Can be either a string or
-          file-like object.  If omitted, contents will be read from disk from
-          the given filename.
-    """
-
-    if source is None:
-      try:
-        f = open(filename)
-      except IOError:
-        self._error_handler.HandleFile(filename, None)
-        self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
-        self._error_handler.FinishFile()
-        return
-    else:
-      if type(source) in [str, unicode]:
-        f = StringIO.StringIO(source)
-      else:
-        f = source
-
-    try:
-      if filename.endswith('.html') or filename.endswith('.htm'):
-        self.CheckLines(filename, htmlutil.GetScriptLines(f), True)
-      else:
-        self.CheckLines(filename, f, False)
-    finally:
-      f.close()
-
-  def CheckLines(self, filename, lines_iter, is_html):
-    """Checks a file, given as an iterable of lines, for warnings and errors.
-
-    Args:
-      filename: The name of the file to check.
-      lines_iter: An iterator that yields one line of the file at a time.
+      start_token: First token in token stream.
+      limited_doc_checks: Whether doc checking is relaxed for this file.
       is_html: Whether the file being checked is an HTML file with extracted
           contents.
-
-    Returns:
-      A boolean indicating whether the full file could be checked or if checking
-      failed prematurely.
+      stop_token: If given, check should stop at this token.
     """
-    limited_doc_checks = False
-    if self._limited_doc_files:
-      for limited_doc_filename in self._limited_doc_files:
-        if filename.endswith(limited_doc_filename):
-          limited_doc_checks = True
-          break
 
-    lint_rules = self._lint_rules
-    lint_rules.Initialize(self, limited_doc_checks, is_html)
-
-    token = self._tokenizer.TokenizeFile(lines_iter)
-
-    parse_error = None
-    if self._metadata_pass:
-      try:
-        self._metadata_pass.Reset()
-        self._metadata_pass.Process(token)
-      except ecmametadatapass.ParseError, caught_parse_error:
-        if FLAGS.error_trace:
-          traceback.print_exc()
-        parse_error = caught_parse_error
-      except Exception:
-        print 'Internal error in %s' % filename
-        traceback.print_exc()
-        return False
-
-    self._error_handler.HandleFile(filename, token)
-
-    return self._CheckTokens(token, parse_error=parse_error,
-                             debug_tokens=FLAGS.debug_tokens)
-
-  def _CheckTokens(self, token, parse_error, debug_tokens):
-    """Checks a token stream for lint warnings/errors.
-
-    Args:
-      token: The first token in the token stream to check.
-      parse_error: A ParseError if any errors occurred.
-      debug_tokens: Whether every token should be printed as it is encountered
-          during the pass.
-
-    Returns:
-      A boolean indicating whether the full token stream could be checked or if
-      checking failed prematurely.
-    """
-    result = self._ExecutePass(token, self._LintPass, parse_error, debug_tokens)
-
-    if not result:
-      return False
-
-    self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
-    self._error_handler.FinishFile()
-    return True
+    self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+    self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
+    self._lint_rules.Finalize(self._state_tracker)
 
   def _LintPass(self, token):
     """Checks an individual token for lint warnings/errors.
@@ -257,8 +153,7 @@
     """
     self._lint_rules.CheckToken(token, self._state_tracker)
 
-  def _ExecutePass(self, token, pass_function, parse_error=None,
-                   debug_tokens=False):
+  def _ExecutePass(self, token, pass_function, stop_token=None):
     """Calls the given function for every token in the given token stream.
 
     As each token is passed to the given function, state is kept up to date and,
@@ -270,43 +165,28 @@
     Args:
       token: The first token in the token stream.
       pass_function: The function to call for each token in the token stream.
-      parse_error: A ParseError if any errors occurred.
-      debug_tokens: Whether every token should be printed as it is encountered
-          during the pass.
-
-    Returns:
-      A boolean indicating whether the full token stream could be checked or if
-      checking failed prematurely.
+      stop_token: The last token to check (if given).
 
     Raises:
       Exception: If any error occurred while calling the given function.
     """
+
     self._state_tracker.Reset()
     while token:
-      if debug_tokens:
-        print token
+      # When we are looking at a token and decided to delete the whole line, we
+      # will delete all of them in the "HandleToken()" below.  So the current
+      # token and subsequent ones may already be deleted here.  The way we
+      # delete a token does not wipe out the previous and next pointers of the
+      # deleted token.  So we need to check the token itself to make sure it is
+      # not deleted.
+      if not token.is_deleted:
+        # End the pass at the stop token
+        if stop_token and token is stop_token:
+          return
 
-      if parse_error and parse_error.token == token:
-        message = ('Error parsing file at token "%s". Unable to '
-                   'check the rest of file.' % token.string)
-        self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
-        self._error_handler.FinishFile()
-        return
-
-      try:
         self._state_tracker.HandleToken(
             token, self._state_tracker.GetLastNonSpaceToken())
         pass_function(token)
         self._state_tracker.HandleAfterToken(token)
-      except:
-        if FLAGS.error_trace:
-          raise
-        else:
-          self.HandleError(errors.FILE_DOES_NOT_PARSE,
-                           ('Error parsing file at token "%s". Unable to '
-                            'check the rest of file.' % token.string),
-                           token)
-          self._error_handler.FinishFile()
-        return False
+
       token = token.next
-    return True
diff --git a/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py b/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py
old mode 100755
new mode 100644
index f2902dc..18b5a2a
--- a/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py
+++ b/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py
@@ -24,17 +24,46 @@
 
 
 
+import re
+
 from closure_linter import javascripttokens
 from closure_linter import tokenutil
 
-# pylint: disable-msg=C6409
+# pylint: disable=g-bad-name
 TokenType = javascripttokens.JavaScriptTokenType
 
 DEFAULT_EXTRA_NAMESPACES = [
-  'goog.testing.asserts',
-  'goog.testing.jsunit',
+    'goog.testing.asserts',
+    'goog.testing.jsunit',
 ]
 
+
+class UsedNamespace(object):
+  """A type for information about a used namespace."""
+
+  def __init__(self, namespace, identifier, token, alias_definition):
+    """Initializes the instance.
+
+    Args:
+      namespace: the namespace of an identifier used in the file
+      identifier: the complete identifier
+      token: the token that uses the namespace
+      alias_definition: a boolean stating whether the namespace is only to used
+          for an alias definition and should not be required.
+    """
+    self.namespace = namespace
+    self.identifier = identifier
+    self.token = token
+    self.alias_definition = alias_definition
+
+  def GetLine(self):
+    return self.token.line_number
+
+  def __repr__(self):
+    return 'UsedNamespace(%s)' % ', '.join(
+        ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
+
+
 class ClosurizedNamespacesInfo(object):
   """Dependency information for closurized JavaScript files.
 
@@ -79,11 +108,11 @@
     # two lists would only have to contain namespaces.
 
     # A list of tuples where the first element is the namespace of an identifier
-    # created in the file and the second is the identifier itself.
+    # created in the file, the second is the identifier itself and the third is
+    # the line number where it's created.
     self._created_namespaces = []
 
-    # A list of tuples where the first element is the namespace of an identifier
-    # used in the file and the second is the identifier itself.
+    # A list of UsedNamespace instances.
     self._used_namespaces = []
 
     # A list of seemingly-unnecessary namespaces that are goog.required() and
@@ -111,7 +140,7 @@
       A list of strings where each string is a 'namespace' corresponding to an
       existing goog.provide statement in the file being checked.
     """
-    return list(self._provided_namespaces)
+    return set(self._provided_namespaces)
 
   def GetRequiredNamespaces(self):
     """Returns the namespaces which are already required by this file.
@@ -120,7 +149,7 @@
       A list of strings where each string is a 'namespace' corresponding to an
       existing goog.require statement in the file being checked.
     """
-    return list(self._required_namespaces)
+    return set(self._required_namespaces)
 
   def IsExtraProvide(self, token):
     """Returns whether the given goog.provide token is unnecessary.
@@ -132,20 +161,16 @@
       True if the given token corresponds to an unnecessary goog.provide
       statement, otherwise False.
     """
-    if self._scopified_file:
-      return False
+    namespace = tokenutil.GetStringAfterToken(token)
 
-    namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
-
-    base_namespace = namespace.split('.', 1)[0]
-    if base_namespace not in self._closurized_namespaces:
+    if self.GetClosurizedNamespace(namespace) is None:
       return False
 
     if token in self._duplicate_provide_tokens:
       return True
 
     # TODO(user): There's probably a faster way to compute this.
-    for created_namespace, created_identifier in self._created_namespaces:
+    for created_namespace, created_identifier, _ in self._created_namespaces:
       if namespace == created_namespace or namespace == created_identifier:
         return False
 
@@ -161,13 +186,9 @@
       True if the given token corresponds to an unnecessary goog.require
       statement, otherwise False.
     """
-    if self._scopified_file:
-      return False
+    namespace = tokenutil.GetStringAfterToken(token)
 
-    namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
-
-    base_namespace = namespace.split('.', 1)[0]
-    if base_namespace not in self._closurized_namespaces:
+    if self.GetClosurizedNamespace(namespace) is None:
       return False
 
     if namespace in self._ignored_extra_namespaces:
@@ -186,34 +207,34 @@
       return True
 
     # TODO(user): There's probably a faster way to compute this.
-    for used_namespace, used_identifier in self._used_namespaces:
-      if namespace == used_namespace or namespace == used_identifier:
+    for ns in self._used_namespaces:
+      if (not ns.alias_definition and (
+          namespace == ns.namespace or namespace == ns.identifier)):
         return False
 
     return True
 
   def GetMissingProvides(self):
-    """Returns the set of missing provided namespaces for the current file.
+    """Returns the dict of missing provided namespaces for the current file.
 
     Returns:
-      Returns a set of strings where each string is a namespace that should be
-      provided by this file, but is not.
+      Returns a dictionary of key as string and value as integer where each
+      string(key) is a namespace that should be provided by this file, but is
+      not and integer(value) is first line number where it's defined.
     """
-    if self._scopified_file:
-      return set()
-
-    missing_provides = set()
-    for namespace, identifier in self._created_namespaces:
+    missing_provides = dict()
+    for namespace, identifier, line_number in self._created_namespaces:
       if (not self._IsPrivateIdentifier(identifier) and
           namespace not in self._provided_namespaces and
           identifier not in self._provided_namespaces and
-          namespace not in self._required_namespaces):
-        missing_provides.add(namespace)
+          namespace not in self._required_namespaces and
+          namespace not in missing_provides):
+        missing_provides[namespace] = line_number
 
     return missing_provides
 
   def GetMissingRequires(self):
-    """Returns the set of missing required namespaces for the current file.
+    """Returns the dict of missing required namespaces for the current file.
 
     For each non-private identifier used in the file, find either a
     goog.require, goog.provide or a created identifier that satisfies it.
@@ -227,34 +248,82 @@
     can't always detect the creation of the namespace.
 
     Returns:
-      Returns a set of strings where each string is a namespace that should be
-      required by this file, but is not.
+      Returns a dictionary of key as string and value integer where each
+      string(key) is a namespace that should be required by this file, but is
+      not and integer(value) is first line number where it's used.
     """
-    if self._scopified_file:
-      return set()
-
     external_dependencies = set(self._required_namespaces)
 
     # Assume goog namespace is always available.
     external_dependencies.add('goog')
+    # goog.module is treated as a builtin, too (for goog.module.get).
+    external_dependencies.add('goog.module')
 
     created_identifiers = set()
-    for namespace, identifier in self._created_namespaces:
+    for unused_namespace, identifier, unused_line_number in (
+        self._created_namespaces):
       created_identifiers.add(identifier)
 
-    missing_requires = set()
-    for namespace, identifier in self._used_namespaces:
-      if (not self._IsPrivateIdentifier(identifier) and
+    missing_requires = dict()
+    illegal_alias_statements = dict()
+
+    def ShouldRequireNamespace(namespace, identifier):
+      """Checks if a namespace would normally be required."""
+      return (
+          not self._IsPrivateIdentifier(identifier) and
           namespace not in external_dependencies and
           namespace not in self._provided_namespaces and
           identifier not in external_dependencies and
-          identifier not in created_identifiers):
-        missing_requires.add(namespace)
+          identifier not in created_identifiers and
+          namespace not in missing_requires)
 
-    return missing_requires
+    # First check all the used identifiers where we know that their namespace
+    # needs to be provided (unless they are optional).
+    for ns in self._used_namespaces:
+      namespace = ns.namespace
+      identifier = ns.identifier
+      if (not ns.alias_definition and
+          ShouldRequireNamespace(namespace, identifier)):
+        missing_requires[namespace] = ns.GetLine()
+
+    # Now that all required namespaces are known, we can check if the alias
+    # definitions (that are likely being used for typeannotations that don't
+    # need explicit goog.require statements) are already covered. If not
+    # the user shouldn't use the alias.
+    for ns in self._used_namespaces:
+      if (not ns.alias_definition or
+          not ShouldRequireNamespace(ns.namespace, ns.identifier)):
+        continue
+      if self._FindNamespace(ns.identifier, self._provided_namespaces,
+                             created_identifiers, external_dependencies,
+                             missing_requires):
+        continue
+      namespace = ns.identifier.rsplit('.', 1)[0]
+      illegal_alias_statements[namespace] = ns.token
+
+    return missing_requires, illegal_alias_statements
+
+  def _FindNamespace(self, identifier, *namespaces_list):
+    """Finds the namespace of an identifier given a list of other namespaces.
+
+    Args:
+      identifier: An identifier whose parent needs to be defined.
+          e.g. for goog.bar.foo we search something that provides
+          goog.bar.
+      *namespaces_list: var args of iterables of namespace identifiers
+    Returns:
+      The namespace that the given identifier is part of or None.
+    """
+    identifier = identifier.rsplit('.', 1)[0]
+    identifier_prefix = identifier + '.'
+    for namespaces in namespaces_list:
+      for namespace in namespaces:
+        if namespace == identifier or namespace.startswith(identifier_prefix):
+          return namespace
+    return None
 
   def _IsPrivateIdentifier(self, identifier):
-    """Returns whether the given identifer is private."""
+    """Returns whether the given identifier is private."""
     pieces = identifier.split('.')
     for piece in pieces:
       if piece.endswith('_'):
@@ -295,7 +364,7 @@
 
     if token.type == TokenType.IDENTIFIER:
       # TODO(user): Consider saving the whole identifier in metadata.
-      whole_identifier_string = self._GetWholeIdentifierString(token)
+      whole_identifier_string = tokenutil.GetIdentifierForToken(token)
       if whole_identifier_string is None:
         # We only want to process the identifier one time. If the whole string
         # identifier is None, that means this token was part of a multi-token
@@ -306,7 +375,7 @@
       # just ignore it (e.g. dynamic loading in test runners).
       if token.string == 'goog.require' and not state_tracker.InFunction():
         self._require_tokens.append(token)
-        namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+        namespace = tokenutil.GetStringAfterToken(token)
         if namespace in self._required_namespaces:
           self._duplicate_require_tokens.append(token)
         else:
@@ -314,14 +383,13 @@
 
         # If there is a suppression for the require, add a usage for it so it
         # gets treated as a regular goog.require (i.e. still gets sorted).
-        jsdoc = state_tracker.GetDocComment()
-        if jsdoc and ('extraRequire' in jsdoc.suppressions):
+        if self._HasSuppression(state_tracker, 'extraRequire'):
           self._suppressed_requires.append(namespace)
-          self._AddUsedNamespace(state_tracker, namespace)
+          self._AddUsedNamespace(state_tracker, namespace, token)
 
       elif token.string == 'goog.provide':
         self._provide_tokens.append(token)
-        namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+        namespace = tokenutil.GetStringAfterToken(token)
         if namespace in self._provided_namespaces:
           self._duplicate_provide_tokens.append(token)
         else:
@@ -329,84 +397,88 @@
 
         # If there is a suppression for the provide, add a creation for it so it
         # gets treated as a regular goog.provide (i.e. still gets sorted).
-        jsdoc = state_tracker.GetDocComment()
-        if jsdoc and ('extraProvide' in jsdoc.suppressions):
-          self._AddCreatedNamespace(state_tracker, namespace)
+        if self._HasSuppression(state_tracker, 'extraProvide'):
+          self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
 
       elif token.string == 'goog.scope':
         self._scopified_file = True
 
+      elif token.string == 'goog.setTestOnly':
+
+        # Since the message is optional, we don't want to scan to later lines.
+        for t in tokenutil.GetAllTokensInSameLine(token):
+          if t.type == TokenType.STRING_TEXT:
+            message = t.string
+
+            if re.match(r'^\w+(\.\w+)+$', message):
+              # This looks like a namespace. If it's a Closurized namespace,
+              # consider it created.
+              base_namespace = message.split('.', 1)[0]
+              if base_namespace in self._closurized_namespaces:
+                self._AddCreatedNamespace(state_tracker, message,
+                                          token.line_number)
+
+            break
       else:
         jsdoc = state_tracker.GetDocComment()
+        if token.metadata and token.metadata.aliased_symbol:
+          whole_identifier_string = token.metadata.aliased_symbol
+        elif (token.string == 'goog.module.get' and
+              not self._HasSuppression(state_tracker, 'extraRequire')):
+          # Cannot use _AddUsedNamespace as this is not an identifier, but
+          # already the entire namespace that's required.
+          namespace = tokenutil.GetStringAfterToken(token)
+          namespace = UsedNamespace(namespace, namespace, token,
+                                    alias_definition=False)
+          self._used_namespaces.append(namespace)
         if jsdoc and jsdoc.HasFlag('typedef'):
           self._AddCreatedNamespace(state_tracker, whole_identifier_string,
-                                    self.GetClosurizedNamespace(
+                                    token.line_number,
+                                    namespace=self.GetClosurizedNamespace(
                                         whole_identifier_string))
         else:
-          self._AddUsedNamespace(state_tracker, whole_identifier_string)
+          is_alias_definition = (token.metadata and
+                                 token.metadata.is_alias_definition)
+          self._AddUsedNamespace(state_tracker, whole_identifier_string,
+                                 token, is_alias_definition)
 
     elif token.type == TokenType.SIMPLE_LVALUE:
       identifier = token.values['identifier']
-      namespace = self.GetClosurizedNamespace(identifier)
-      if state_tracker.InFunction():
-        self._AddUsedNamespace(state_tracker, identifier)
-      elif namespace and namespace != 'goog':
-        self._AddCreatedNamespace(state_tracker, identifier, namespace)
+      start_token = tokenutil.GetIdentifierStart(token)
+      if start_token and start_token != token:
+        # Multi-line identifier being assigned. Get the whole identifier.
+        identifier = tokenutil.GetIdentifierForToken(start_token)
+      else:
+        start_token = token
+      # If an alias is defined on the start_token, use it instead.
+      if (start_token and
+          start_token.metadata and
+          start_token.metadata.aliased_symbol and
+          not start_token.metadata.is_alias_definition):
+        identifier = start_token.metadata.aliased_symbol
+
+      if identifier:
+        namespace = self.GetClosurizedNamespace(identifier)
+        if state_tracker.InFunction():
+          self._AddUsedNamespace(state_tracker, identifier, token)
+        elif namespace and namespace != 'goog':
+          self._AddCreatedNamespace(state_tracker, identifier,
+                                    token.line_number, namespace=namespace)
 
     elif token.type == TokenType.DOC_FLAG:
-      flag_type = token.attached_object.flag_type
-      is_interface = state_tracker.GetDocComment().HasFlag('interface')
-      if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
-        # Interfaces should be goog.require'd.
-        doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
-        interface = tokenutil.Search(doc_start, TokenType.COMMENT)
-        self._AddUsedNamespace(state_tracker, interface.string)
+      flag = token.attached_object
+      flag_type = flag.flag_type
+      if flag and flag.HasType() and flag.jstype:
+        is_interface = state_tracker.GetDocComment().HasFlag('interface')
+        if flag_type == 'implements' or (flag_type == 'extends'
+                                         and is_interface):
+          identifier = flag.jstype.alias or flag.jstype.identifier
+          self._AddUsedNamespace(state_tracker, identifier, token)
+          # Since we process doctypes only for implements and extends, the
+          # type is a simple one and we don't need any iteration for subtypes.
 
-
-  def _GetWholeIdentifierString(self, token):
-    """Returns the whole identifier string for the given token.
-
-    Checks the tokens after the current one to see if the token is one in a
-    sequence of tokens which are actually just one identifier (i.e. a line was
-    wrapped in the middle of an identifier).
-
-    Args:
-      token: The token to check.
-
-    Returns:
-      The whole identifier string or None if this token is not the first token
-      in a multi-token identifier.
-    """
-    result = ''
-
-    # Search backward to determine if this token is the first token of the
-    # identifier. If it is not the first token, return None to signal that this
-    # token should be ignored.
-    prev_token = token.previous
-    while prev_token:
-      if (prev_token.IsType(TokenType.IDENTIFIER) or
-          prev_token.IsType(TokenType.NORMAL) and prev_token.string == '.'):
-        return None
-      elif (not prev_token.IsType(TokenType.WHITESPACE) and
-            not prev_token.IsAnyType(TokenType.COMMENT_TYPES)):
-        break
-      prev_token = prev_token.previous
-
-    # Search forward to find other parts of this identifier separated by white
-    # space.
-    next_token = token
-    while next_token:
-      if (next_token.IsType(TokenType.IDENTIFIER) or
-          next_token.IsType(TokenType.NORMAL) and next_token.string == '.'):
-        result += next_token.string
-      elif (not next_token.IsType(TokenType.WHITESPACE) and
-            not next_token.IsAnyType(TokenType.COMMENT_TYPES)):
-        break
-      next_token = next_token.next
-
-    return result
-
-  def _AddCreatedNamespace(self, state_tracker, identifier, namespace=None):
+  def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
+                           namespace=None):
     """Adds the namespace of an identifier to the list of created namespaces.
 
     If the identifier is annotated with a 'missingProvide' suppression, it is
@@ -415,19 +487,20 @@
     Args:
       state_tracker: The JavaScriptStateTracker instance.
       identifier: The identifier to add.
+      line_number: Line number where namespace is created.
       namespace: The namespace of the identifier or None if the identifier is
           also the namespace.
     """
     if not namespace:
       namespace = identifier
 
-    jsdoc = state_tracker.GetDocComment()
-    if jsdoc and 'missingProvide' in jsdoc.suppressions:
+    if self._HasSuppression(state_tracker, 'missingProvide'):
       return
 
-    self._created_namespaces.append([namespace, identifier])
+    self._created_namespaces.append([namespace, identifier, line_number])
 
-  def _AddUsedNamespace(self, state_tracker, identifier):
+  def _AddUsedNamespace(self, state_tracker, identifier, token,
+                        is_alias_definition=False):
     """Adds the namespace of an identifier to the list of used namespaces.
 
     If the identifier is annotated with a 'missingRequire' suppression, it is
@@ -436,14 +509,32 @@
     Args:
       state_tracker: The JavaScriptStateTracker instance.
       identifier: An identifier which has been used.
+      token: The token in which the namespace is used.
+      is_alias_definition: If the used namespace is part of an alias_definition.
+          Aliased symbols need their parent namespace to be available, if it is
+          not yet required through another symbol, an error will be thrown.
     """
-    jsdoc = state_tracker.GetDocComment()
-    if jsdoc and 'missingRequire' in jsdoc.suppressions:
+    if self._HasSuppression(state_tracker, 'missingRequire'):
       return
 
+    identifier = self._GetUsedIdentifier(identifier)
     namespace = self.GetClosurizedNamespace(identifier)
-    if namespace:
-      self._used_namespaces.append([namespace, identifier])
+    # b/5362203 If its a variable in scope then its not a required namespace.
+    if namespace and not state_tracker.IsVariableInScope(namespace):
+      namespace = UsedNamespace(namespace, identifier, token,
+                                is_alias_definition)
+      self._used_namespaces.append(namespace)
+
+  def _HasSuppression(self, state_tracker, suppression):
+    jsdoc = state_tracker.GetDocComment()
+    return jsdoc and suppression in jsdoc.suppressions
+
+  def _GetUsedIdentifier(self, identifier):
+    """Strips apply/call/inherit calls from the identifier."""
+    for suffix in ('.apply', '.call', '.inherit'):
+      if identifier.endswith(suffix):
+        return identifier[:-len(suffix)]
+    return identifier
 
   def GetClosurizedNamespace(self, identifier):
     """Given an identifier, returns the namespace that identifier is from.
@@ -464,11 +555,6 @@
       if not identifier.startswith(namespace + '.'):
         continue
 
-      last_part = parts[-1]
-      if not last_part:
-        # TODO(robbyw): Handle this: it's a multi-line identifier.
-        return None
-
       # The namespace for a class is the shortest prefix ending in a class
       # name, which starts with a capital letter but is not a capitalized word.
       #
diff --git a/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py b/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py
old mode 100755
new mode 100644
index cec3376..7aeae21
--- a/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py
@@ -19,16 +19,23 @@
 
 
 import unittest as googletest
+from closure_linter import aliaspass
 from closure_linter import closurizednamespacesinfo
+from closure_linter import ecmametadatapass
 from closure_linter import javascriptstatetracker
-from closure_linter import javascripttokenizer
 from closure_linter import javascripttokens
+from closure_linter import testutil
 from closure_linter import tokenutil
 
-# pylint: disable-msg=C6409
+# pylint: disable=g-bad-name
 TokenType = javascripttokens.JavaScriptTokenType
 
 
+def _ToLineDict(illegal_alias_stmts):
+  """Replaces tokens with the respective line number."""
+  return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
+
+
 class ClosurizedNamespacesInfoTest(googletest.TestCase):
   """Tests for ClosurizedNamespacesInfo."""
 
@@ -56,8 +63,6 @@
       'package.className.prototype.something.somethingElse': 'package.className'
   }
 
-  _tokenizer = javascripttokenizer.JavaScriptTokenizer()
-
   def testGetClosurizedNamespace(self):
     """Tests that the correct namespace is returned for various identifiers."""
     namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
@@ -93,8 +98,9 @@
         'goog.provide(\'package.Foo\');',
         'package.Foo = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraProvide(token),
                      'Should not be extra since it is created.')
@@ -105,8 +111,9 @@
         'goog.provide(\'package.Foo.methodName\');',
         'package.Foo.methodName = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraProvide(token),
                      'Should not be extra since it is created.')
@@ -114,8 +121,19 @@
   def testIsExtraProvide_notCreated(self):
     """Tests that provides for non-created namespaces are extra."""
     input_lines = ['goog.provide(\'package.Foo\');']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
+
+    self.assertTrue(namespaces_info.IsExtraProvide(token),
+                    'Should be extra since it is not created.')
+
+  def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
+    """Tests that provides for non-created namespaces are extra."""
+    input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['multi.part'])
 
     self.assertTrue(namespaces_info.IsExtraProvide(token),
                     'Should be extra since it is not created.')
@@ -127,8 +145,9 @@
         'goog.provide(\'package.Foo\');',
         'package.Foo = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     # Advance to the second goog.provide token.
     token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
@@ -139,8 +158,9 @@
   def testIsExtraProvide_notClosurized(self):
     """Tests that provides of non-closurized namespaces are not extra."""
     input_lines = ['goog.provide(\'notclosurized.Foo\');']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraProvide(token),
                      'Should not be extra since it is not closurized.')
@@ -151,8 +171,9 @@
         'goog.require(\'package.Foo\');',
         'var x = package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraRequire(token),
                      'Should not be extra since it is used.')
@@ -163,8 +184,9 @@
         'goog.require(\'package.Foo.methodName\');',
         'var x = package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertTrue(namespaces_info.IsExtraRequire(token),
                     'Should require the package, not the method specifically.')
@@ -172,8 +194,20 @@
   def testIsExtraRequire_notUsed(self):
     """Tests that requires for unused namespaces are extra."""
     input_lines = ['goog.require(\'package.Foo\');']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
+
+    self.assertTrue(namespaces_info.IsExtraRequire(token),
+                    'Should be extra since it is not used.')
+
+  def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
+    """Tests unused require with multi-part closurized namespaces."""
+
+    input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['multi.part'])
 
     self.assertTrue(namespaces_info.IsExtraRequire(token),
                     'Should be extra since it is not used.')
@@ -181,8 +215,9 @@
   def testIsExtraRequire_notClosurized(self):
     """Tests that requires of non-closurized namespaces are not extra."""
     input_lines = ['goog.require(\'notclosurized.Foo\');']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraRequire(token),
                      'Should not be extra since it is not closurized.')
@@ -193,8 +228,9 @@
         'goog.require(\'package.Foo.Enum\');',
         'var x = package.Foo.Enum.VALUE1;',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertTrue(namespaces_info.IsExtraRequire(token),
                     'The whole class, not the object, should be required.');
@@ -205,8 +241,9 @@
         'goog.require(\'package.Foo.CONSTANT\');',
         'var x = package.Foo.CONSTANT',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertTrue(namespaces_info.IsExtraRequire(token),
                     'The class, not the constant, should be required.');
@@ -217,8 +254,9 @@
         'goog.require(\'package.subpackage.CONSTANT\');',
         'var x = package.subpackage.CONSTANT',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraRequire(token),
                     'Constants can be required except on classes.');
@@ -229,8 +267,9 @@
         'goog.require(\'package.subpackage.method\');',
         'var x = package.subpackage.method()',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertFalse(namespaces_info.IsExtraRequire(token),
                     'Methods can be required except on classes.');
@@ -238,8 +277,9 @@
   def testIsExtraRequire_defaults(self):
     """Tests that there are no warnings about extra requires for test utils"""
     input_lines = ['goog.require(\'goog.testing.jsunit\');']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
+
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['goog'])
 
     self.assertFalse(namespaces_info.IsExtraRequire(token),
                      'Should not be extra since it is for testing.')
@@ -250,8 +290,9 @@
         'goog.provide(\'package.Foo\');',
         'package.Foo = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+    namespaces_info = self._GetNamespacesInfoForScript(
+        input_lines, ['package'])
 
     self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
 
@@ -261,9 +302,8 @@
         'goog.provide(\'package.Foo.methodName\');',
         'package.Foo.methodName = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
     self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
 
   def testGetMissingProvides_providedParentIdentifier(self):
@@ -274,26 +314,27 @@
         'package.foo.ClassName.methodName = function() {};',
         'package.foo.ClassName.ObjectName = 1;',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
     self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
 
   def testGetMissingProvides_unprovided(self):
     """Tests that unprovided functions cause a missing provide."""
     input_lines = ['package.Foo = function() {};']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
-    self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+    missing_provides = namespaces_info.GetMissingProvides()
+    self.assertEquals(1, len(missing_provides))
+    missing_provide = missing_provides.popitem()
+    self.assertEquals('package.Foo', missing_provide[0])
+    self.assertEquals(1, missing_provide[1])
 
   def testGetMissingProvides_privatefunction(self):
     """Tests that unprovided private functions don't cause a missing provide."""
     input_lines = ['package.Foo_ = function() {};']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
     self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
 
   def testGetMissingProvides_required(self):
@@ -302,9 +343,8 @@
         'goog.require(\'package.Foo\');',
         'package.Foo.methodName = function() {};'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
     self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
 
   def testGetMissingRequires_required(self):
@@ -313,10 +353,10 @@
         'goog.require(\'package.Foo\');',
         'package.Foo();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
 
   def testGetMissingRequires_requiredIdentifier(self):
     """Tests that required namespaces satisfy identifiers on that namespace."""
@@ -324,10 +364,21 @@
         'goog.require(\'package.Foo\');',
         'package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
+
+  def testGetMissingRequires_requiredNamespace(self):
+    """Tests that required namespaces satisfy the namespace."""
+    input_lines = [
+        'goog.require(\'package.soy.fooTemplate\');',
+        'render(package.soy.fooTemplate);'
+    ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
 
   def testGetMissingRequires_requiredParentClass(self):
     """Tests that requiring a parent class of an object is sufficient to prevent
@@ -337,19 +388,22 @@
         'package.Foo.methodName();',
         'package.Foo.methodName(package.Foo.ObjectName);'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
 
   def testGetMissingRequires_unrequired(self):
     """Tests that unrequired namespaces cause a missing require."""
     input_lines = ['package.Foo();']
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
-    self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(1, len(missing_requires))
+    missing_req = missing_requires.popitem()
+    self.assertEquals('package.Foo', missing_req[0])
+    self.assertEquals(1, missing_req[1])
 
   def testGetMissingRequires_provided(self):
     """Tests that provided namespaces satisfy identifiers on that namespace."""
@@ -357,22 +411,28 @@
         'goog.provide(\'package.Foo\');',
         'package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
 
   def testGetMissingRequires_created(self):
     """Tests that created namespaces do not satisfy usage of an identifier."""
     input_lines = [
         'package.Foo = function();',
-        'package.Foo.methodName();'
+        'package.Foo.methodName();',
+        'package.Foo.anotherMethodName1();',
+        'package.Foo.anotherMethodName2();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
-    self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(1, len(missing_requires))
+    missing_require = missing_requires.popitem()
+    self.assertEquals('package.Foo', missing_require[0])
+    # Make sure line number of first occurrence is reported
+    self.assertEquals(2, missing_require[1])
 
   def testGetMissingRequires_createdIdentifier(self):
     """Tests that created identifiers satisfy usage of the identifier."""
@@ -380,10 +440,21 @@
         'package.Foo.methodName = function();',
         'package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(0, len(missing_requires))
+
+  def testGetMissingRequires_implements(self):
+    """Tests that a parametrized type requires the correct identifier."""
+    input_lines = [
+        '/** @constructor @implements {package.Bar<T>} */',
+        'package.Foo = function();',
+    ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertItemsEqual({'package.Bar': 1}, missing_requires)
 
   def testGetMissingRequires_objectOnClass(self):
     """Tests that we should require a class, not the object on the class."""
@@ -391,11 +462,50 @@
         'goog.require(\'package.Foo.Enum\');',
         'var x = package.Foo.Enum.VALUE1;',
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
-    self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
-                    'The whole class, not the object, should be required.');
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(1, len(missing_requires),
+                      'The whole class, not the object, should be required.')
+
+  def testGetMissingRequires_variableWithSameName(self):
+    """Tests that we should not goog.require variables and parameters.
+
+    b/5362203 Variables in scope are not missing namespaces.
+    """
+    input_lines = [
+        'goog.provide(\'Foo\');',
+        'Foo.A = function();',
+        'Foo.A.prototype.method = function(ab) {',
+        '  if (ab) {',
+        '    var docs;',
+        '    var lvalue = new Obj();',
+        '    // Variable in scope hence not goog.require here.',
+        '    docs.foo.abc = 1;',
+        '    lvalue.next();',
+        '  }',
+        '  // Since js is function scope this should also not goog.require.',
+        '  docs.foo.func();',
+        '  // Its not a variable in scope hence goog.require.',
+        '  dummy.xyz.reset();',
+        ' return this.method2();',
+        '};',
+        'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
+        '  // Parameter hence not goog.require.',
+        '  docs.nodes.length = 2;',
+        '  lvalue.abc.reset();',
+        '};'
+    ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
+                                                                     'docs',
+                                                                     'lvalue',
+                                                                     'dummy'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals(2, len(missing_requires))
+    self.assertItemsEqual(
+        {'dummy.xyz': 14,
+         'lvalue.abc': 20}, missing_requires)
 
   def testIsFirstProvide(self):
     """Tests operation of the isFirstProvide method."""
@@ -403,9 +513,9 @@
         'goog.provide(\'package.Foo\');',
         'package.Foo.methodName();'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
 
+    token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        input_lines, ['package'])
     self.assertTrue(namespaces_info.IsFirstProvide(token))
 
   def testGetWholeIdentifierString(self):
@@ -415,13 +525,315 @@
         '    veryLong.',
         '    identifier;'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
+
+    token = testutil.TokenizeSource(input_lines)
 
     self.assertEquals('package.Foo.veryLong.identifier',
-                      namespaces_info._GetWholeIdentifierString(token))
+                      tokenutil.GetIdentifierForToken(token))
+
     self.assertEquals(None,
-                      namespaces_info._GetWholeIdentifierString(token.next))
+                      tokenutil.GetIdentifierForToken(token.next))
+
+  def testScopified(self):
+    """Tests that a goog.scope call is noticed."""
+    input_lines = [
+        'goog.scope(function() {',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    self.assertTrue(namespaces_info._scopified_file)
+
+  def testScope_unusedAlias(self):
+    """Tests that an unused alias symbol is illegal."""
+    input_lines = [
+        'goog.scope(function() {',
+        'var Event = goog.events.Event;',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+  def testScope_usedMultilevelAlias(self):
+    """Tests that an used alias symbol in a deep namespace is ok."""
+    input_lines = [
+        'goog.require(\'goog.Events\');',
+        'goog.scope(function() {',
+        'var Event = goog.Events.DeepNamespace.Event;',
+        'Event();',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({}, illegal_alias_stmts)
+
+  def testScope_usedAlias(self):
+    """Tests that aliased symbols result in correct requires."""
+    input_lines = [
+        'goog.scope(function() {',
+        'var Event = goog.events.Event;',
+        'var dom = goog.dom;',
+        'Event(dom.classes.get);',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, illegal_alias_stmts)
+    self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
+                      missing_requires)
+
+  def testModule_alias(self):
+    """Tests that goog.module style aliases are supported."""
+    input_lines = [
+        'goog.module(\'test.module\');',
+        'var Unused = goog.require(\'goog.Unused\');',
+        'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+        'var x = new AliasedClass();',
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+    self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+                     'AliasedClass should be marked as used')
+    unusedToken = self._GetRequireTokens('goog.Unused')
+    self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
+                    'Unused should be marked as not used')
+
+  def testModule_aliasInScope(self):
+    """Tests that goog.module style aliases are supported."""
+    input_lines = [
+        'goog.module(\'test.module\');',
+        'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+        'goog.scope(function() {',
+        'var x = new AliasedClass();',
+        '});',
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+    self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+                     'AliasedClass should be marked as used')
+
+  def testModule_getAlwaysProvided(self):
+    """Tests that goog.module.get is recognized as a built-in."""
+    input_lines = [
+        'goog.provide(\'test.MyClass\');',
+        'goog.require(\'goog.someModule\');',
+        'goog.scope(function() {',
+        'var someModule = goog.module.get(\'goog.someModule\');',
+        'test.MyClass = function() {};',
+        '});',
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
+
+  def testModule_requireForGet(self):
+    """Tests that goog.module.get needs a goog.require call."""
+    input_lines = [
+        'goog.provide(\'test.MyClass\');',
+        'function foo() {',
+        '  var someModule = goog.module.get(\'goog.someModule\');',
+        '  someModule.doSth();',
+        '}',
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    self.assertEquals({'goog.someModule': 3},
+                      namespaces_info.GetMissingRequires()[0])
+
+  def testScope_usedTypeAlias(self):
+    """Tests aliased symbols in type annotations."""
+    input_lines = [
+        'goog.scope(function() {',
+        'var Event = goog.events.Event;',
+        '/** @type {Event} */;',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+  def testScope_partialAlias_typeOnly(self):
+    """Tests a partial alias only used in type annotations.
+
+    In this example, some goog.events namespace would need to be required
+    so that evaluating goog.events.bar doesn't throw an error.
+    """
+    input_lines = [
+        'goog.scope(function() {',
+        'var bar = goog.events.bar;',
+        '/** @type {bar.Foo} */;',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+  def testScope_partialAlias(self):
+    """Tests a partial alias in conjunction with a type annotation.
+
+    In this example, the partial alias is already defined by another type,
+    therefore the doc-only type doesn't need to be required.
+    """
+    input_lines = [
+        'goog.scope(function() {',
+        'var bar = goog.events.bar;',
+        '/** @type {bar.Event} */;',
+        'bar.EventType();'
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
+    self.assertEquals({}, illegal_alias_stmts)
+
+  def testScope_partialAliasRequires(self):
+    """Tests partial aliases with correct requires."""
+    input_lines = [
+        'goog.require(\'goog.events.bar.EventType\');',
+        'goog.scope(function() {',
+        'var bar = goog.events.bar;',
+        '/** @type {bar.Event} */;',
+        'bar.EventType();'
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({}, illegal_alias_stmts)
+
+  def testScope_partialAliasRequiresBoth(self):
+    """Tests partial aliases with correct requires."""
+    input_lines = [
+        'goog.require(\'goog.events.bar.Event\');',
+        'goog.require(\'goog.events.bar.EventType\');',
+        'goog.scope(function() {',
+        'var bar = goog.events.bar;',
+        '/** @type {bar.Event} */;',
+        'bar.EventType();'
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({}, illegal_alias_stmts)
+    event_token = self._GetRequireTokens('goog.events.bar.Event')
+    self.assertTrue(namespaces_info.IsExtraRequire(event_token))
+
+  def testScope_partialAliasNoSubtypeRequires(self):
+    """Tests that partial aliases don't yield subtype requires (regression)."""
+    input_lines = [
+        'goog.provide(\'goog.events.Foo\');',
+        'goog.scope(function() {',
+        'goog.events.Foo = {};',
+        'var Foo = goog.events.Foo;'
+        'Foo.CssName_ = {};'
+        'var CssName_ = Foo.CssName_;'
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, _ = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+
+  def testScope_aliasNamespace(self):
+    """Tests that an unused alias namespace is not required when available.
+
+    In the example goog.events.Bar is not required, because the namespace
+    goog.events is already defined because goog.events.Foo is required.
+    """
+    input_lines = [
+        'goog.require(\'goog.events.Foo\');',
+        'goog.scope(function() {',
+        'var Bar = goog.events.Bar;',
+        '/** @type {Bar} */;',
+        'goog.events.Foo;',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({}, illegal_alias_stmts)
+
+  def testScope_aliasNamespaceIllegal(self):
+    """Tests that an unused alias namespace is not required when available."""
+    input_lines = [
+        'goog.scope(function() {',
+        'var Bar = goog.events.Bar;',
+        '/** @type {Bar} */;',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, missing_requires)
+    self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+  def testScope_provides(self):
+    """Tests that aliased symbols result in correct provides."""
+    input_lines = [
+        'goog.scope(function() {',
+        'goog.bar = {};',
+        'var bar = goog.bar;',
+        'bar.Foo = {};',
+        '});'
+        ]
+
+    namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+    missing_provides = namespaces_info.GetMissingProvides()
+    self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
+    _, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+    self.assertEquals({}, illegal_alias_stmts)
+
+  def testSetTestOnlyNamespaces(self):
+    """Tests that a namespace in setTestOnly makes it a valid provide."""
+    namespaces_info = self._GetNamespacesInfoForScript([
+        'goog.setTestOnly(\'goog.foo.barTest\');'
+        ], ['goog'])
+
+    token = self._GetProvideTokens('goog.foo.barTest')
+    self.assertFalse(namespaces_info.IsExtraProvide(token))
+
+    token = self._GetProvideTokens('goog.foo.bazTest')
+    self.assertTrue(namespaces_info.IsExtraProvide(token))
+
+  def testSetTestOnlyComment(self):
+    """Ensure a comment in setTestOnly does not cause a created namespace."""
+    namespaces_info = self._GetNamespacesInfoForScript([
+        'goog.setTestOnly(\'this is a comment\');'
+        ], ['goog'])
+
+    self.assertEquals(
+        [], namespaces_info._created_namespaces,
+        'A comment in setTestOnly should not modify created namespaces.')
+
+  def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
+    _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+        script, closurized_namespaces)
+
+    return namespaces_info
+
+  def _GetStartTokenAndNamespacesInfoForScript(
+      self, script, closurized_namespaces):
+
+    token = testutil.TokenizeSource(script)
+    return token, self._GetInitializedNamespacesInfo(
+        token, closurized_namespaces, [])
 
   def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
                                     ignored_extra_namespaces):
@@ -431,8 +843,18 @@
         ignored_extra_namespaces=ignored_extra_namespaces)
     state_tracker = javascriptstatetracker.JavaScriptStateTracker()
 
+    ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+    ecma_pass.Process(token)
+
+    state_tracker.DocFlagPass(token, error_handler=None)
+
+    alias_pass = aliaspass.AliasPass(closurized_namespaces)
+    alias_pass.Process(token)
+
     while token:
+      state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
       namespaces_info.ProcessToken(token, state_tracker)
+      state_tracker.HandleAfterToken(token)
       token = token.next
 
     return namespaces_info
@@ -440,12 +862,12 @@
   def _GetProvideTokens(self, namespace):
     """Returns a list of tokens for a goog.require of the given namespace."""
     line_text = 'goog.require(\'' + namespace + '\');\n'
-    return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
+    return testutil.TokenizeSource([line_text])
 
   def _GetRequireTokens(self, namespace):
     """Returns a list of tokens for a goog.require of the given namespace."""
     line_text = 'goog.require(\'' + namespace + '\');\n'
-    return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
+    return testutil.TokenizeSource([line_text])
 
 if __name__ == '__main__':
   googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/common/__init__.py b/catapult/third_party/closure_linter/closure_linter/common/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/error.py b/catapult/third_party/closure_linter/closure_linter/common/error.py
old mode 100755
new mode 100644
index 0e3b476..4209c23
--- a/catapult/third_party/closure_linter/closure_linter/common/error.py
+++ b/catapult/third_party/closure_linter/closure_linter/common/error.py
@@ -23,7 +23,7 @@
 class Error(object):
   """Object representing a style error."""
 
-  def __init__(self, code, message, token, position, fix_data):
+  def __init__(self, code, message, token=None, position=None, fix_data=None):
     """Initialize the error object.
 
     Args:
diff --git a/catapult/third_party/closure_linter/closure_linter/common/erroraccumulator.py b/catapult/third_party/closure_linter/closure_linter/common/erroraccumulator.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/errorhandler.py b/catapult/third_party/closure_linter/closure_linter/common/errorhandler.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/filetestcase.py b/catapult/third_party/closure_linter/closure_linter/common/filetestcase.py
old mode 100755
new mode 100644
index 03b5ece..7cd83cd
--- a/catapult/third_party/closure_linter/closure_linter/common/filetestcase.py
+++ b/catapult/third_party/closure_linter/closure_linter/common/filetestcase.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,6 +25,7 @@
 
 import re
 
+import gflags as flags
 import unittest as googletest
 from closure_linter.common import erroraccumulator
 
@@ -41,21 +41,27 @@
   _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
                             r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
 
-  def __init__(self, filename, runner, converter):
+  def __init__(self, filename, lint_callable, converter):
     """Create a single file lint test case.
 
     Args:
       filename: Filename to test.
-      runner: Object implementing the LintRunner interface that lints a file.
+      lint_callable: Callable that lints a file.  This is usually runner.Run().
       converter: Function taking an error string and returning an error code.
     """
 
     googletest.TestCase.__init__(self, 'runTest')
     self._filename = filename
     self._messages = []
-    self._runner = runner
+    self._lint_callable = lint_callable
     self._converter = converter
 
+  def setUp(self):
+    flags.FLAGS.dot_on_next_line = True
+
+  def tearDown(self):
+    flags.FLAGS.dot_on_next_line = False
+
   def shortDescription(self):
     """Provides a description for the test."""
     return 'Run linter on %s' % self._filename
@@ -65,7 +71,7 @@
     try:
       filename = self._filename
       stream = open(filename)
-    except IOError, ex:
+    except IOError as ex:
       raise IOError('Could not find testdata resource for %s: %s' %
                     (self._filename, ex))
 
@@ -96,13 +102,14 @@
     return messages
 
   def _ProcessFileAndGetMessages(self, filename):
-    """Trap gpylint's output parse it to get messages added."""
-    errors = erroraccumulator.ErrorAccumulator()
-    self._runner.Run([filename], errors)
+    """Trap gjslint's output parse it to get messages added."""
+    error_accumulator = erroraccumulator.ErrorAccumulator()
+    self._lint_callable(filename, error_accumulator)
 
-    errors = errors.GetErrors()
+    errors = error_accumulator.GetErrors()
 
     # Convert to expected tuple format.
+
     error_msgs = [(error.token.line_number, error.code) for error in errors]
     error_msgs.sort()
     return error_msgs
diff --git a/catapult/third_party/closure_linter/closure_linter/common/htmlutil.py b/catapult/third_party/closure_linter/closure_linter/common/htmlutil.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/lintrunner.py b/catapult/third_party/closure_linter/closure_linter/common/lintrunner.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/matcher.py b/catapult/third_party/closure_linter/closure_linter/common/matcher.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/position.py b/catapult/third_party/closure_linter/closure_linter/common/position.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/simplefileflags.py b/catapult/third_party/closure_linter/closure_linter/common/simplefileflags.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/closure_linter/closure_linter/common/tokenizer.py b/catapult/third_party/closure_linter/closure_linter/common/tokenizer.py
old mode 100755
new mode 100644
index 0234720..9420ea3
--- a/catapult/third_party/closure_linter/closure_linter/common/tokenizer.py
+++ b/catapult/third_party/closure_linter/closure_linter/common/tokenizer.py
@@ -90,7 +90,8 @@
     Returns:
       The newly created Token object.
     """
-    return tokens.Token(string, token_type, line, line_number, values)
+    return tokens.Token(string, token_type, line, line_number, values,
+                        line_number)
 
   def __TokenizeLine(self, line):
     """Tokenizes the given line.
diff --git a/catapult/third_party/closure_linter/closure_linter/common/tokens.py b/catapult/third_party/closure_linter/closure_linter/common/tokens.py
old mode 100755
new mode 100644
index 4c7d818..4703998
--- a/catapult/third_party/closure_linter/closure_linter/common/tokens.py
+++ b/catapult/third_party/closure_linter/closure_linter/common/tokens.py
@@ -47,7 +47,8 @@
         a separate metadata pass.
   """
 
-  def __init__(self, string, token_type, line, line_number, values=None):
+  def __init__(self, string, token_type, line, line_number, values=None,
+               orig_line_number=None):
     """Creates a new Token object.
 
     Args:
@@ -58,13 +59,18 @@
       values: A dict of named values within the token.  For instance, a
         function declaration may have a value called 'name' which captures the
         name of the function.
+      orig_line_number: The line number of the original file this token comes
+        from. This should be only set during the tokenization process. For newly
+        created error fix tokens after that, it should be None.
     """
     self.type = token_type
     self.string = string
     self.length = len(string)
     self.line = line
     self.line_number = line_number
+    self.orig_line_number = orig_line_number
     self.values = values
+    self.is_deleted = False
 
     # These parts can only be computed when the file is fully tokenized
     self.previous = None
diff --git a/catapult/third_party/closure_linter/closure_linter/common/tokens_test.py b/catapult/third_party/closure_linter/closure_linter/common/tokens_test.py
index 79ac0ae..01ec89d 100644
--- a/catapult/third_party/closure_linter/closure_linter/common/tokens_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/common/tokens_test.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 
-
+__author__ = 'nnaze@google.com (Nathan Naze)'
 
 import unittest as googletest
 from closure_linter.common import tokens
diff --git a/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py b/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py
old mode 100755
new mode 100644
index 263385f..5204a6e
--- a/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py
+++ b/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py
@@ -23,24 +23,28 @@
 
 import re
 
+import gflags as flags
+
 from closure_linter import checkerbase
 from closure_linter import ecmametadatapass
 from closure_linter import error_check
+from closure_linter import errorrules
 from closure_linter import errors
 from closure_linter import indentation
-from closure_linter import javascripttokens
 from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
 from closure_linter import statetracker
 from closure_linter import tokenutil
 from closure_linter.common import error
-from closure_linter.common import htmlutil
-from closure_linter.common import lintrunner
 from closure_linter.common import position
-from closure_linter.common import tokens
-import gflags as flags
+
 
 FLAGS = flags.FLAGS
 flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
+# TODO(user): When flipping this to True, remove logic from unit tests
+# that overrides this flag.
+flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
+                     'placed on the next line for wrapped expressions')
 
 # TODO(robbyw): Check for extra parens on return statements
 # TODO(robbyw): Check for 0px in strings
@@ -55,6 +59,7 @@
 Rule = error_check.Rule
 Type = javascripttokens.JavaScriptTokenType
 
+
 class EcmaScriptLintRules(checkerbase.LintRulesBase):
   """EmcaScript lint style checking rules.
 
@@ -67,14 +72,15 @@
   language.
   """
 
-  # Static constants.
-  MAX_LINE_LENGTH = 80
+  # It will be initialized in constructor so the flags are initialized.
+  max_line_length = -1
 
+  # Static constants.
   MISSING_PARAMETER_SPACE = re.compile(r',\S')
 
-  EXTRA_SPACE = re.compile('(\(\s|\s\))')
+  EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
 
-  ENDS_WITH_SPACE = re.compile('\s$')
+  ENDS_WITH_SPACE = re.compile(r'\s$')
 
   ILLEGAL_TAB = re.compile(r'\t')
 
@@ -85,12 +91,18 @@
   AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
 
   # Acceptable tokens to remove for line too long testing.
-  LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
+  LONG_LINE_IGNORE = frozenset(
+      ['*', '//', '@see'] +
       ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
 
+  JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
+      '@fileoverview', '@param', '@return', '@returns'])
+
   def __init__(self):
     """Initialize this lint rule object."""
     checkerbase.LintRulesBase.__init__(self)
+    if EcmaScriptLintRules.max_line_length == -1:
+      EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
 
   def Initialize(self, checker, limited_doc_checks, is_html):
     """Initialize this lint rule object before parsing a new file."""
@@ -107,6 +119,7 @@
 
     Args:
       last_token: The last token in the line.
+      state: parser_state object that indicates the current state in the page
     """
     # Start from the last token so that we have the flag object attached to
     # and DOC_FLAG tokens.
@@ -119,8 +132,8 @@
     while token and token.line_number == line_number:
       if state.IsTypeToken(token):
         line.insert(0, 'x' * len(token.string))
-      elif token.type in (Type.IDENTIFIER, Type.NORMAL):
-        # Dots are acceptable places to wrap.
+      elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
+        # Dots are acceptable places to wrap (may be tokenized as identifiers).
         line.insert(0, token.string.replace('.', ' '))
       else:
         line.insert(0, token.string)
@@ -130,7 +143,7 @@
     line = line.rstrip('\n\r\f')
     try:
       length = len(unicode(line, 'utf-8'))
-    except:
+    except (LookupError, UnicodeDecodeError):
       # Unknown encoding. The line length may be wrong, as was originally the
       # case for utf-8 (see bug 1735846). For now just accept the default
       # length, but as we find problems we can either add test for other
@@ -138,7 +151,7 @@
       # false positives at the cost of more false negatives.
       length = len(line)
 
-    if length > self.MAX_LINE_LENGTH:
+    if length > EcmaScriptLintRules.max_line_length:
 
       # If the line matches one of the exceptions, then it's ok.
       for long_line_regexp in self.GetLongLineExceptions():
@@ -150,44 +163,42 @@
       parts = set(line.split())
 
       # We allow two "words" (type and name) when the line contains @param
-      max = 1
+      max_parts = 1
       if '@param' in parts:
-        max = 2
+        max_parts = 2
 
       # Custom tags like @requires may have url like descriptions, so ignore
       # the tag, similar to how we handle @see.
       custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
-      if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
-        self._HandleError(errors.LINE_TOO_LONG,
+      if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
+          > max_parts):
+        self._HandleError(
+            errors.LINE_TOO_LONG,
             'Line too long (%d characters).' % len(line), last_token)
 
-  def _CheckJsDocType(self, token):
+  def _CheckJsDocType(self, token, js_type):
     """Checks the given type for style errors.
 
     Args:
       token: The DOC_FLAG token for the flag whose type to check.
+      js_type: The flag's typeannotation.TypeAnnotation instance.
     """
-    flag = token.attached_object
-    type = flag.type
-    if type and type is not None and not type.isspace():
-      pieces = self.TYPE_SPLIT.split(type)
-      if len(pieces) == 1 and type.count('|') == 1 and (
-           type.endswith('|null') or type.startswith('null|')):
-         self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
-             'Prefer "?Type" to "Type|null": "%s"' % type, token)
+    if not js_type: return
 
-      for p in pieces:
-        if p.count('|') and p.count('?'):
-          # TODO(robbyw): We should do actual parsing of JsDoc types.  As is,
-          # this won't report an error for {number|Array.<string>?}, etc.
-          self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
-              'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
+    if js_type.type_group and len(js_type.sub_types) == 2:
+      identifiers = [t.identifier for t in js_type.sub_types]
+      if 'null' in identifiers:
+        # Don't warn if the identifier is a template type (e.g. {TYPE|null}.
+        if not identifiers[0].isupper() and not identifiers[1].isupper():
+          self._HandleError(
+              errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
+              'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
 
-      if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
-          flag.type_start_token.type != Type.DOC_START_BRACE or
-          flag.type_end_token.type != Type.DOC_END_BRACE):
-        self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
-            'Type must always be surrounded by curly braces.', token)
+    # TODO(user): We should report an error for wrong usage of '?' and '|'
+    # e.g. {?number|string|null} etc.
+
+    for sub_type in js_type.IterTypes():
+      self._CheckJsDocType(token, sub_type)
 
   def _CheckForMissingSpaceBeforeToken(self, token):
     """Checks for a missing space at the beginning of a token.
@@ -207,7 +218,60 @@
           errors.MISSING_SPACE,
           'Missing space before "%s"' % token.string,
           token,
-          Position.AtBeginning())
+          position=Position.AtBeginning())
+
+  def _CheckOperator(self, token):
+    """Checks an operator for spacing and line style.
+
+    Args:
+      token: The operator token.
+    """
+    last_code = token.metadata.last_code
+
+    if not self._ExpectSpaceBeforeOperator(token):
+      if (token.previous and token.previous.type == Type.WHITESPACE and
+          last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
+          last_code.line_number == token.line_number):
+        self._HandleError(
+            errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
+            token.previous, position=Position.All(token.previous.string))
+
+    elif (token.previous and
+          not token.previous.IsComment() and
+          not tokenutil.IsDot(token) and
+          token.previous.type in Type.EXPRESSION_ENDER_TYPES):
+      self._HandleError(errors.MISSING_SPACE,
+                        'Missing space before "%s"' % token.string, token,
+                        position=Position.AtBeginning())
+
+    # Check wrapping of operators.
+    next_code = tokenutil.GetNextCodeToken(token)
+
+    is_dot = tokenutil.IsDot(token)
+    wrapped_before = last_code and last_code.line_number != token.line_number
+    wrapped_after = next_code and next_code.line_number != token.line_number
+
+    if FLAGS.dot_on_next_line and is_dot and wrapped_after:
+      self._HandleError(
+          errors.LINE_ENDS_WITH_DOT,
+          '"." must go on the following line',
+          token)
+    if (not is_dot and wrapped_before and
+        not token.metadata.IsUnaryOperator()):
+      self._HandleError(
+          errors.LINE_STARTS_WITH_OPERATOR,
+          'Binary operator must go on previous line "%s"' % token.string,
+          token)
+
+  def _IsLabel(self, token):
+    # A ':' token is considered part of a label if it occurs in a case
+    # statement, a plain label, or an object literal, i.e. is not part of a
+    # ternary.
+
+    return (token.string == ':' and
+            token.metadata.context.type in (Context.LITERAL_ELEMENT,
+                                            Context.CASE_BLOCK,
+                                            Context.STATEMENT))
 
   def _ExpectSpaceBeforeOperator(self, token):
     """Returns whether a space should appear before the given operator token.
@@ -221,13 +285,13 @@
     if token.string == ',' or token.metadata.IsUnaryPostOperator():
       return False
 
+    if tokenutil.IsDot(token):
+      return False
+
     # Colons should appear in labels, object literals, the case of a switch
     # statement, and ternary operator. Only want a space in the case of the
     # ternary operator.
-    if (token.string == ':' and
-        token.metadata.context.type in (Context.LITERAL_ELEMENT,
-                                        Context.CASE_BLOCK,
-                                        Context.STATEMENT)):
+    if self._IsLabel(token):
       return False
 
     if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
@@ -247,7 +311,7 @@
     last_in_line = token.IsLastInLine()
     last_non_space_token = state.GetLastNonSpaceToken()
 
-    type = token.type
+    token_type = token.type
 
     # Process the line change.
     if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
@@ -259,11 +323,12 @@
     if last_in_line:
       self._CheckLineLength(token, state)
 
-    if type == Type.PARAMETERS:
+    if token_type == Type.PARAMETERS:
       # Find missing spaces in parameter lists.
       if self.MISSING_PARAMETER_SPACE.search(token.string):
+        fix_data = ', '.join([s.strip() for s in token.string.split(',')])
         self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
-            token)
+                          token, position=None, fix_data=fix_data.strip())
 
       # Find extra spaces at the beginning of parameter lists.  Make sure
       # we aren't at the beginning of a continuing multi-line list.
@@ -271,54 +336,57 @@
         space_count = len(token.string) - len(token.string.lstrip())
         if space_count:
           self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
-              token, Position(0, space_count))
+                            token, position=Position(0, space_count))
 
-    elif (type == Type.START_BLOCK and
+    elif (token_type == Type.START_BLOCK and
           token.metadata.context.type == Context.BLOCK):
       self._CheckForMissingSpaceBeforeToken(token)
 
-    elif type == Type.END_BLOCK:
-      # This check is for object literal end block tokens, but there is no need
-      # to test that condition since a comma at the end of any other kind of
-      # block is undoubtedly a parse error.
+    elif token_type == Type.END_BLOCK:
       last_code = token.metadata.last_code
-      if last_code.IsOperator(','):
-        self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
-            'Illegal comma at end of object literal', last_code,
-            Position.All(last_code.string))
-
       if state.InFunction() and state.IsFunctionClose():
-        is_immediately_called = (token.next and
-                                 token.next.type == Type.START_PAREN)
         if state.InTopLevelFunction():
-          # When the function was top-level and not immediately called, check
-          # that it's terminated by a semi-colon.
-          if state.InAssignedFunction():
-            if not is_immediately_called and (last_in_line or
-                not token.next.type == Type.SEMICOLON):
-              self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
-                  'Missing semicolon after function assigned to a variable',
-                  token, Position.AtEnd(token.string))
-          else:
+          # A semicolons should not be included at the end of a function
+          # declaration.
+          if not state.InAssignedFunction():
             if not last_in_line and token.next.type == Type.SEMICOLON:
-              self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
+              self._HandleError(
+                  errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                   'Illegal semicolon after function declaration',
-                  token.next, Position.All(token.next.string))
+                  token.next, position=Position.All(token.next.string))
 
-        if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
+        # A semicolon should be included at the end of a function expression
+        # that is not immediately called or used by a dot operator.
+        if (state.InAssignedFunction() and token.next
+            and token.next.type != Type.SEMICOLON):
+          next_token = tokenutil.GetNextCodeToken(token)
+          is_immediately_used = next_token and (
+              next_token.type == Type.START_PAREN or
+              tokenutil.IsDot(next_token))
+          if not is_immediately_used:
+            self._HandleError(
+                errors.MISSING_SEMICOLON_AFTER_FUNCTION,
+                'Missing semicolon after function assigned to a variable',
+                token, position=Position.AtEnd(token.string))
+
+        if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
           self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
-              'Interface methods cannot contain code', last_code)
+                            'Interface methods cannot contain code', last_code)
 
       elif (state.IsBlockClose() and
             token.next and token.next.type == Type.SEMICOLON):
-        self._HandleError(errors.REDUNDANT_SEMICOLON,
-            'No semicolon is required to end a code block',
-            token.next, Position.All(token.next.string))
+        if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
+            and last_code.metadata.context.type != Context.OBJECT_LITERAL):
+          self._HandleError(
+              errors.REDUNDANT_SEMICOLON,
+              'No semicolon is required to end a code block',
+              token.next, position=Position.All(token.next.string))
 
-    elif type == Type.SEMICOLON:
+    elif token_type == Type.SEMICOLON:
       if token.previous and token.previous.type == Type.WHITESPACE:
-        self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
-            token.previous, Position.All(token.previous.string))
+        self._HandleError(
+            errors.EXTRA_SPACE, 'Extra space before ";"',
+            token.previous, position=Position.All(token.previous.string))
 
       if token.next and token.next.line_number == token.line_number:
         if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
@@ -327,10 +395,11 @@
 
         elif token.next.type not in (
             Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
-          self._HandleError(errors.MISSING_SPACE,
+          self._HandleError(
+              errors.MISSING_SPACE,
               'Missing space after ";" in for statement',
               token.next,
-              Position.AtBeginning())
+              position=Position.AtBeginning())
 
       last_code = token.metadata.last_code
       if last_code and last_code.type == Type.SEMICOLON:
@@ -339,7 +408,8 @@
         # NOTE(user): This is not a perfect check, and will not throw an error
         # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
         # probably won't work either.
-        for_token = tokenutil.CustomSearch(last_code,
+        for_token = tokenutil.CustomSearch(
+            last_code,
             lambda token: token.type == Type.KEYWORD and token.string == 'for',
             end_func=lambda token: token.type == Type.SEMICOLON,
             distance=None,
@@ -347,93 +417,83 @@
 
         if not for_token:
           self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
-              token, Position.All(token.string))
+                            token, position=Position.All(token.string))
 
-    elif type == Type.START_PAREN:
-      if token.previous and token.previous.type == Type.KEYWORD:
+    elif token_type == Type.START_PAREN:
+      # Ensure that opening parentheses have a space before any keyword
+      # that is not being invoked like a member function.
+      if (token.previous and token.previous.type == Type.KEYWORD and
+          (not token.previous.metadata or
+           not token.previous.metadata.last_code or
+           not token.previous.metadata.last_code.string or
+           token.previous.metadata.last_code.string[-1:] != '.')):
         self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
-            token, Position.AtBeginning())
+                          token, position=Position.AtBeginning())
       elif token.previous and token.previous.type == Type.WHITESPACE:
         before_space = token.previous.previous
+        # Ensure that there is no extra space before a function invocation,
+        # even if the function being invoked happens to be a keyword.
         if (before_space and before_space.line_number == token.line_number and
-            before_space.type == Type.IDENTIFIER):
-          self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
-              token.previous, Position.All(token.previous.string))
+            before_space.type == Type.IDENTIFIER or
+            (before_space.type == Type.KEYWORD and before_space.metadata and
+             before_space.metadata.last_code and
+             before_space.metadata.last_code.string and
+             before_space.metadata.last_code.string[-1:] == '.')):
+          self._HandleError(
+              errors.EXTRA_SPACE, 'Extra space before "("',
+              token.previous, position=Position.All(token.previous.string))
 
-    elif type == Type.START_BRACKET:
+    elif token_type == Type.START_BRACKET:
       self._HandleStartBracket(token, last_non_space_token)
-    elif type in (Type.END_PAREN, Type.END_BRACKET):
+    elif token_type in (Type.END_PAREN, Type.END_BRACKET):
       # Ensure there is no space before closing parentheses, except when
       # it's in a for statement with an omitted section, or when it's at the
       # beginning of a line.
       if (token.previous and token.previous.type == Type.WHITESPACE and
           not token.previous.IsFirstInLine() and
           not (last_non_space_token and last_non_space_token.line_number ==
-                   token.line_number and
+               token.line_number and
                last_non_space_token.type == Type.SEMICOLON)):
-        self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
-            token.string, token.previous, Position.All(token.previous.string))
+        self._HandleError(
+            errors.EXTRA_SPACE, 'Extra space before "%s"' %
+            token.string, token.previous,
+            position=Position.All(token.previous.string))
 
-      if token.type == Type.END_BRACKET:
-        last_code = token.metadata.last_code
-        if last_code.IsOperator(','):
-          self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
-              'Illegal comma at end of array literal', last_code,
-              Position.All(last_code.string))
-
-    elif type == Type.WHITESPACE:
+    elif token_type == Type.WHITESPACE:
       if self.ILLEGAL_TAB.search(token.string):
         if token.IsFirstInLine():
           if token.next:
-            self._HandleError(errors.ILLEGAL_TAB,
+            self._HandleError(
+                errors.ILLEGAL_TAB,
                 'Illegal tab in whitespace before "%s"' % token.next.string,
-                token, Position.All(token.string))
+                token, position=Position.All(token.string))
           else:
-            self._HandleError(errors.ILLEGAL_TAB,
+            self._HandleError(
+                errors.ILLEGAL_TAB,
                 'Illegal tab in whitespace',
-                token, Position.All(token.string))
+                token, position=Position.All(token.string))
         else:
-          self._HandleError(errors.ILLEGAL_TAB,
+          self._HandleError(
+              errors.ILLEGAL_TAB,
               'Illegal tab in whitespace after "%s"' % token.previous.string,
-              token, Position.All(token.string))
+              token, position=Position.All(token.string))
 
       # Check whitespace length if it's not the first token of the line and
       # if it's not immediately before a comment.
       if last_in_line:
         # Check for extra whitespace at the end of a line.
         self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
-            token, Position.All(token.string))
+                          token, position=Position.All(token.string))
       elif not first_in_line and not token.next.IsComment():
         if token.length > 1:
-          self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
+          self._HandleError(
+              errors.EXTRA_SPACE, 'Extra space after "%s"' %
               token.previous.string, token,
-              Position(1, len(token.string) - 1))
+              position=Position(1, len(token.string) - 1))
 
-    elif type == Type.OPERATOR:
-      last_code = token.metadata.last_code
-
-      if not self._ExpectSpaceBeforeOperator(token):
-        if (token.previous and token.previous.type == Type.WHITESPACE and
-            last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
-          self._HandleError(errors.EXTRA_SPACE,
-              'Extra space before "%s"' % token.string, token.previous,
-              Position.All(token.previous.string))
-
-      elif (token.previous and
-            not token.previous.IsComment() and
-            token.previous.type in Type.EXPRESSION_ENDER_TYPES):
-        self._HandleError(errors.MISSING_SPACE,
-                          'Missing space before "%s"' % token.string, token,
-                          Position.AtBeginning())
-
-      # Check that binary operators are not used to start lines.
-      if ((not last_code or last_code.line_number != token.line_number) and
-          not token.metadata.IsUnaryOperator()):
-        self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
-            'Binary operator should go on previous line "%s"' % token.string,
-            token)
-
-    elif type == Type.DOC_FLAG:
+    elif token_type == Type.OPERATOR:
+      self._CheckOperator(token)
+    elif token_type == Type.DOC_FLAG:
       flag = token.attached_object
 
       if flag.flag_type == 'bug':
@@ -443,21 +503,22 @@
 
         if not string.isdigit():
           self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
-              '@bug should be followed by a bug number', token)
+                            '@bug should be followed by a bug number', token)
 
       elif flag.flag_type == 'suppress':
         if flag.type is None:
           # A syntactically invalid suppress tag will get tokenized as a normal
           # flag, indicating an error.
-          self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
+          self._HandleError(
+              errors.INCORRECT_SUPPRESS_SYNTAX,
               'Invalid suppress syntax: should be @suppress {errortype}. '
               'Spaces matter.', token)
         else:
-          for suppress_type in flag.type.split('|'):
+          for suppress_type in flag.jstype.IterIdentifiers():
             if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
-              self._HandleError(errors.INVALID_SUPPRESS_TYPE,
-                'Invalid suppression type: %s' % suppress_type,
-                token)
+              self._HandleError(
+                  errors.INVALID_SUPPRESS_TYPE,
+                  'Invalid suppression type: %s' % suppress_type, token)
 
       elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
             flag.flag_type == 'author'):
@@ -478,12 +539,12 @@
           if num_spaces < 1:
             self._HandleError(errors.MISSING_SPACE,
                               'Missing space after email address',
-                              token.next, Position(result.start(2), 0))
+                              token.next, position=Position(result.start(2), 0))
           elif num_spaces > 1:
-            self._HandleError(errors.EXTRA_SPACE,
-                              'Extra space after email address',
-                              token.next,
-                              Position(result.start(2) + 1, num_spaces - 1))
+            self._HandleError(
+                errors.EXTRA_SPACE, 'Extra space after email address',
+                token.next,
+                position=Position(result.start(2) + 1, num_spaces - 1))
 
           # Check for extra spaces before email address. Can't be too few, if
           # not at least one we wouldn't match @author tag.
@@ -491,81 +552,61 @@
           if num_spaces > 1:
             self._HandleError(errors.EXTRA_SPACE,
                               'Extra space before email address',
-                              token.next, Position(1, num_spaces - 1))
+                              token.next, position=Position(1, num_spaces - 1))
 
       elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
             not self._limited_doc_checks):
         if flag.flag_type == 'param':
           if flag.name is None:
             self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
-                'Missing name in @param tag', token)
+                              'Missing name in @param tag', token)
 
         if not flag.description or flag.description is None:
           flag_name = token.type
           if 'name' in token.values:
             flag_name = '@' + token.values['name']
-          self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
-              'Missing description in %s tag' % flag_name, token)
+
+          if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
+            self._HandleError(
+                errors.MISSING_JSDOC_TAG_DESCRIPTION,
+                'Missing description in %s tag' % flag_name, token)
         else:
           self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
 
-          # We want punctuation to be inside of any tags ending a description,
-          # so strip tags before checking description. See bug 1127192. Note
-          # that depending on how lines break, the real description end token
-          # may consist only of stripped html and the effective end token can
-          # be different.
-          end_token = flag.description_end_token
-          end_string = htmlutil.StripTags(end_token.string).strip()
-          while (end_string == '' and not
-                 end_token.type in Type.FLAG_ENDING_TYPES):
-            end_token = end_token.previous
-            if end_token.type in Type.FLAG_DESCRIPTION_TYPES:
-              end_string = htmlutil.StripTags(end_token.string).rstrip()
-
-          if not (end_string.endswith('.') or end_string.endswith('?') or
-              end_string.endswith('!')):
-            # Find the position for the missing punctuation, inside of any html
-            # tags.
-            desc_str = end_token.string.rstrip()
-            while desc_str.endswith('>'):
-              start_tag_index = desc_str.rfind('<')
-              if start_tag_index < 0:
-                break
-              desc_str = desc_str[:start_tag_index].rstrip()
-            end_position = Position(len(desc_str), 0)
-
-            self._HandleError(
-                errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
-                ('%s descriptions must end with valid punctuation such as a '
-                 'period.' % token.string),
-                end_token, end_position)
-
-      if flag.flag_type in state.GetDocFlag().HAS_TYPE:
+      if flag.HasType():
         if flag.type_start_token is not None:
           self._CheckForMissingSpaceBeforeToken(
               token.attached_object.type_start_token)
 
-        if flag.type and flag.type != '' and not flag.type.isspace():
-          self._CheckJsDocType(token)
+        if flag.jstype and not flag.jstype.IsEmpty():
+          self._CheckJsDocType(token, flag.jstype)
 
-    if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
-        if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
-            token.values['name'] not in FLAGS.custom_jsdoc_tags):
-          self._HandleError(errors.INVALID_JSDOC_TAG,
-              'Invalid JsDoc tag: %s' % token.values['name'], token)
+          if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
+              flag.type_start_token.type != Type.DOC_START_BRACE or
+              flag.type_end_token.type != Type.DOC_END_BRACE):
+            self._HandleError(
+                errors.MISSING_BRACES_AROUND_TYPE,
+                'Type must always be surrounded by curly braces.', token)
 
-        if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
-            token.values['name'] == 'inheritDoc' and
-            type == Type.DOC_INLINE_FLAG):
-          self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
-              'Unnecessary braces around @inheritDoc',
-              token)
+    if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
+      if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
+          token.values['name'] not in FLAGS.custom_jsdoc_tags):
+        self._HandleError(
+            errors.INVALID_JSDOC_TAG,
+            'Invalid JsDoc tag: %s' % token.values['name'], token)
 
-    elif type == Type.SIMPLE_LVALUE:
+      if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
+          token.values['name'] == 'inheritDoc' and
+          token_type == Type.DOC_INLINE_FLAG):
+        self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
+                          'Unnecessary braces around @inheritDoc',
+                          token)
+
+    elif token_type == Type.SIMPLE_LVALUE:
       identifier = token.values['identifier']
 
       if ((not state.InFunction() or state.InConstructor()) and
-          not state.InParentheses() and not state.InObjectLiteralDescendant()):
+          state.InTopLevel() and not state.InObjectLiteralDescendant()):
         jsdoc = state.GetDocComment()
         if not state.HasDocComment(identifier):
           # Only test for documentation on identifiers with .s in them to
@@ -577,9 +618,10 @@
               self._limited_doc_checks):
             comment = state.GetLastComment()
             if not (comment and comment.lower().count('jsdoc inherited')):
-              self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,
+              self._HandleError(
+                  errors.MISSING_MEMBER_DOCUMENTATION,
                   "No docs found for member '%s'" % identifier,
-                  token);
+                  token)
         elif jsdoc and (not state.InConstructor() or
                         identifier.startswith('this.')):
           # We are at the top level and the function/member is documented.
@@ -589,43 +631,49 @@
             #
             # @inheritDoc is deprecated in favor of using @override, and they
             if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
-                and not ('accessControls' in jsdoc.suppressions)):
-              self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
+                and ('accessControls' not in jsdoc.suppressions)):
+              self._HandleError(
+                  errors.INVALID_OVERRIDE_PRIVATE,
                   '%s should not override a private member.' % identifier,
                   jsdoc.GetFlag('override').flag_token)
             if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
-                and not ('accessControls' in jsdoc.suppressions)):
-              self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
+                and ('accessControls' not in jsdoc.suppressions)):
+              self._HandleError(
+                  errors.INVALID_INHERIT_DOC_PRIVATE,
                   '%s should not inherit from a private member.' % identifier,
                   jsdoc.GetFlag('inheritDoc').flag_token)
             if (not jsdoc.HasFlag('private') and
-                not ('underscore' in jsdoc.suppressions) and not
+                ('underscore' not in jsdoc.suppressions) and not
                 ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
                  ('accessControls' in jsdoc.suppressions))):
-              self._HandleError(errors.MISSING_PRIVATE,
+              self._HandleError(
+                  errors.MISSING_PRIVATE,
                   'Member "%s" must have @private JsDoc.' %
                   identifier, token)
             if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
-              self._HandleError(errors.UNNECESSARY_SUPPRESS,
+              self._HandleError(
+                  errors.UNNECESSARY_SUPPRESS,
                   '@suppress {underscore} is not necessary with @private',
                   jsdoc.suppressions['underscore'])
           elif (jsdoc.HasFlag('private') and
                 not self.InExplicitlyTypedLanguage()):
             # It is convention to hide public fields in some ECMA
             # implementations from documentation using the @private tag.
-            self._HandleError(errors.EXTRA_PRIVATE,
+            self._HandleError(
+                errors.EXTRA_PRIVATE,
                 'Member "%s" must not have @private JsDoc' %
                 identifier, token)
 
           # These flags are only legal on localizable message definitions;
           # such variables always begin with the prefix MSG_.
-          for f in ('desc', 'hidden', 'meaning'):
-            if (jsdoc.HasFlag(f)
-              and not identifier.startswith('MSG_')
-              and identifier.find('.MSG_') == -1):
-              self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
-                  'Member "%s" should not have @%s JsDoc' % (identifier, f),
-                  token)
+          if not identifier.startswith('MSG_') and '.MSG_' not in identifier:
+            for f in ('desc', 'hidden', 'meaning'):
+              if jsdoc.HasFlag(f):
+                self._HandleError(
+                    errors.INVALID_USE_OF_DESC_TAG,
+                    'Member "%s" does not start with MSG_ and thus '
+                    'should not have @%s JsDoc' % (identifier, f),
+                    token)
 
       # Check for illegaly assigning live objects as prototype property values.
       index = identifier.find('.prototype.')
@@ -636,28 +684,30 @@
         if next_code and (
             next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
             next_code.IsOperator('new')):
-          self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
+          self._HandleError(
+              errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
               'Member %s cannot have a non-primitive value' % identifier,
               token)
 
-    elif type == Type.END_PARAMETERS:
+    elif token_type == Type.END_PARAMETERS:
       # Find extra space at the end of parameter lists.  We check the token
       # prior to the current one when it is a closing paren.
       if (token.previous and token.previous.type == Type.PARAMETERS
           and self.ENDS_WITH_SPACE.search(token.previous.string)):
         self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
-            token.previous)
+                          token.previous)
 
       jsdoc = state.GetDocComment()
       if state.GetFunction().is_interface:
         if token.previous and token.previous.type == Type.PARAMETERS:
-          self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
+          self._HandleError(
+              errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
               'Interface constructor cannot have parameters',
               token.previous)
       elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
-          and not jsdoc.InheritsDocumentation()
-          and not state.InObjectLiteralDescendant() and not
-          jsdoc.IsInvalidated()):
+            and not jsdoc.InheritsDocumentation()
+            and not state.InObjectLiteralDescendant() and not
+            jsdoc.IsInvalidated()):
         distance, edit = jsdoc.CompareParameters(state.GetParams())
         if distance:
           params_iter = iter(state.GetParams())
@@ -678,12 +728,13 @@
             elif op == 'D':
               # Deletion
               self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
-                  'Found docs for non-existing parameter: "%s"' %
-                  docs_iter.next(), token)
+                                'Found docs for non-existing parameter: "%s"' %
+                                docs_iter.next(), token)
             elif op == 'S':
               # Substitution
               if not self._limited_doc_checks:
-                self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
+                self._HandleError(
+                    errors.WRONG_PARAMETER_DOCUMENTATION,
                     'Parameter mismatch: got "%s", expected "%s"' %
                     (params_iter.next(), docs_iter.next()), token)
 
@@ -692,32 +743,33 @@
               params_iter.next()
               docs_iter.next()
 
-    elif type == Type.STRING_TEXT:
+    elif token_type == Type.STRING_TEXT:
       # If this is the first token after the start of the string, but it's at
       # the end of a line, we know we have a multi-line string.
-      if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,
+      if token.previous.type in (
+          Type.SINGLE_QUOTE_STRING_START,
           Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
         self._HandleError(errors.MULTI_LINE_STRING,
-            'Multi-line strings are not allowed', token)
-
+                          'Multi-line strings are not allowed', token)
 
     # This check is orthogonal to the ones above, and repeats some types, so
     # it is a plain if and not an elif.
     if token.type in Type.COMMENT_TYPES:
       if self.ILLEGAL_TAB.search(token.string):
         self._HandleError(errors.ILLEGAL_TAB,
-            'Illegal tab in comment "%s"' % token.string, token)
+                          'Illegal tab in comment "%s"' % token.string, token)
 
       trimmed = token.string.rstrip()
       if last_in_line and token.string != trimmed:
         # Check for extra whitespace at the end of a line.
-        self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
-            token, Position(len(trimmed), len(token.string) - len(trimmed)))
+        self._HandleError(
+            errors.EXTRA_SPACE, 'Extra space at end of line', token,
+            position=Position(len(trimmed), len(token.string) - len(trimmed)))
 
     # This check is also orthogonal since it is based on metadata.
     if token.metadata.is_implied_semicolon:
       self._HandleError(errors.MISSING_SEMICOLON,
-          'Missing semicolon at end of line', token)
+                        'Missing semicolon at end of line', token)
 
   def _HandleStartBracket(self, token, last_non_space_token):
     """Handles a token that is an open bracket.
@@ -729,8 +781,9 @@
     if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
         last_non_space_token and
         last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
-      self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
-                        token.previous, Position.All(token.previous.string))
+      self._HandleError(
+          errors.EXTRA_SPACE, 'Extra space before "["',
+          token.previous, position=Position.All(token.previous.string))
     # If the [ token is the first token in a line we shouldn't complain
     # about a missing space before [.  This is because some Ecma script
     # languages allow syntax like:
@@ -746,29 +799,31 @@
     # should trigger a proper indentation warning message as [ is not indented
     # by four spaces.
     elif (not token.IsFirstInLine() and token.previous and
-          not token.previous.type in (
+          token.previous.type not in (
               [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
               Type.EXPRESSION_ENDER_TYPES)):
       self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
-                        token, Position.AtBeginning())
+                        token, position=Position.AtBeginning())
 
-  def Finalize(self, state, tokenizer_mode):
+  def Finalize(self, state):
+    """Perform all checks that need to occur after all lines are processed.
+
+    Args:
+      state: State of the parser after parsing all tokens
+
+    Raises:
+      TypeError: If not overridden.
+    """
     last_non_space_token = state.GetLastNonSpaceToken()
     # Check last line for ending with newline.
-    if False and state.GetLastLine() and not (state.GetLastLine().isspace() or
+    if state.GetLastLine() and not (
+        state.GetLastLine().isspace() or
         state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
       self._HandleError(
           errors.FILE_MISSING_NEWLINE,
           'File does not end with new line.  (%s)' % state.GetLastLine(),
           last_non_space_token)
 
-    # Check that the mode is not mid comment, argument list, etc.
-    if not tokenizer_mode == Modes.TEXT_MODE:
-      self._HandleError(
-          errors.FILE_IN_BLOCK,
-          'File ended in mode "%s".' % tokenizer_mode,
-          last_non_space_token)
-
     try:
       self._indentation.Finalize()
     except Exception, e:
@@ -778,7 +833,11 @@
           last_non_space_token)
 
   def GetLongLineExceptions(self):
-    """Gets a list of regexps for lines which can be longer than the limit."""
+    """Gets a list of regexps for lines which can be longer than the limit.
+
+    Returns:
+      A list of regexps, used as matches (rather than searches).
+    """
     return []
 
   def InExplicitlyTypedLanguage(self):
diff --git a/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py b/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py
old mode 100755
new mode 100644
index 2c797b3..5062161
--- a/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py
+++ b/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py
@@ -115,18 +115,30 @@
   BLOCK_TYPES = frozenset([
       ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
 
-  def __init__(self, type, start_token, parent):
+  def __init__(self, context_type, start_token, parent=None):
     """Initializes the context object.
 
     Args:
-      type: The context type.
+      context_type: The context type.
       start_token: The token where this context starts.
       parent: The parent context.
+
+    Attributes:
+      type: The context type.
+      start_token: The token where this context starts.
+      end_token: The token where this context ends.
+      parent: The parent context.
+      children: The child contexts of this context, in order.
     """
-    self.type = type
+    self.type = context_type
     self.start_token = start_token
     self.end_token = None
-    self.parent = parent
+
+    self.parent = None
+    self.children = []
+
+    if parent:
+      parent.AddChild(self)
 
   def __repr__(self):
     """Returns a string representation of the context object."""
@@ -137,6 +149,32 @@
       context = context.parent
     return 'Context(%s)' % ' > '.join(stack)
 
+  def AddChild(self, child):
+    """Adds a child to this context and sets child's parent to this context.
+
+    Args:
+      child: A child EcmaContext.  The child's parent will be set to this
+          context.
+    """
+
+    child.parent = self
+
+    self.children.append(child)
+    self.children.sort(EcmaContext._CompareContexts)
+
+  def GetRoot(self):
+    """Get the root context that contains this context, if any."""
+    context = self
+    while context:
+      if context.type is EcmaContext.ROOT:
+        return context
+      context = context.parent
+
+  @staticmethod
+  def _CompareContexts(context1, context2):
+    """Sorts contexts 1 and 2 by start token document position."""
+    return tokenutil.Compare(context1.start_token, context2.start_token)
+
 
 class EcmaMetaData(object):
   """Token metadata for EcmaScript languages.
@@ -146,6 +184,11 @@
     context: The context this token appears in.
     operator_type: The operator type, will be one of the *_OPERATOR constants
         defined below.
+    aliased_symbol: The full symbol being identified, as a string (e.g. an
+        'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
+        tokens. This is set in aliaspass.py and is a best guess.
+    is_alias_definition: True if the symbol is part of an alias definition.
+        If so, these symbols won't be counted towards goog.requires/provides.
   """
 
   UNARY_OPERATOR = 'unary'
@@ -164,6 +207,8 @@
     self.is_implied_semicolon = False
     self.is_implied_block = False
     self.is_implied_block_close = False
+    self.aliased_symbol = None
+    self.is_alias_definition = False
 
   def __repr__(self):
     """Returns a string representation of the context object."""
@@ -172,6 +217,8 @@
       parts.append('optype: %r' % self.operator_type)
     if self.is_implied_semicolon:
       parts.append('implied;')
+    if self.aliased_symbol:
+      parts.append('alias for: %s' % self.aliased_symbol)
     return 'MetaData(%s)' % ', '.join(parts)
 
   def IsUnaryOperator(self):
@@ -196,21 +243,21 @@
     self._AddContext(EcmaContext.ROOT)
     self._last_code = None
 
-  def _CreateContext(self, type):
+  def _CreateContext(self, context_type):
     """Overridable by subclasses to create the appropriate context type."""
-    return EcmaContext(type, self._token, self._context)
+    return EcmaContext(context_type, self._token, self._context)
 
   def _CreateMetaData(self):
     """Overridable by subclasses to create the appropriate metadata type."""
     return EcmaMetaData()
 
-  def _AddContext(self, type):
+  def _AddContext(self, context_type):
     """Adds a context of the given type to the context stack.
 
     Args:
-      type: The type of context to create
+      context_type: The type of context to create
     """
-    self._context  = self._CreateContext(type)
+    self._context = self._CreateContext(context_type)
 
   def _PopContext(self):
     """Moves up one level in the context stack.
@@ -233,7 +280,7 @@
     """Pops the context stack until a context of the given type is popped.
 
     Args:
-      stop_types: The types of context to pop to - stops at the first match.
+      *stop_types: The types of context to pop to - stops at the first match.
 
     Returns:
       The context object of the given type that was popped.
@@ -364,10 +411,14 @@
       self._AddContext(EcmaContext.SWITCH)
 
     elif (token_type == TokenType.KEYWORD and
-          token.string in ('case', 'default')):
+          token.string in ('case', 'default') and
+          self._context.type != EcmaContext.OBJECT_LITERAL):
       # Pop up to but not including the switch block.
       while self._context.parent.type != EcmaContext.SWITCH:
         self._PopContext()
+        if self._context.parent is None:
+          raise ParseError(token, 'Encountered case/default statement '
+                           'without switch statement')
 
     elif token.IsOperator('?'):
       self._AddContext(EcmaContext.TERNARY_TRUE)
@@ -386,9 +437,9 @@
       # ternary_false > ternary_true > statement > root
       elif (self._context.type == EcmaContext.TERNARY_FALSE and
             self._context.parent.type == EcmaContext.TERNARY_TRUE):
-           self._PopContext() # Leave current ternary false context.
-           self._PopContext() # Leave current parent ternary true
-           self._AddContext(EcmaContext.TERNARY_FALSE)
+        self._PopContext()  # Leave current ternary false context.
+        self._PopContext()  # Leave current parent ternary true
+        self._AddContext(EcmaContext.TERNARY_FALSE)
 
       elif self._context.parent.type == EcmaContext.SWITCH:
         self._AddContext(EcmaContext.CASE_BLOCK)
@@ -444,25 +495,27 @@
       is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
       is_last_code_in_line = token.IsCode() and (
           not next_code or next_code.line_number != token.line_number)
-      is_continued_identifier = (token.type == TokenType.IDENTIFIER and
-                                 token.string.endswith('.'))
       is_continued_operator = (token.type == TokenType.OPERATOR and
                                not token.metadata.IsUnaryPostOperator())
       is_continued_dot = token.string == '.'
       next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
-      next_code_is_dot = next_code and next_code.string == '.'
-      is_end_of_block = (token.type == TokenType.END_BLOCK and
+      is_end_of_block = (
+          token.type == TokenType.END_BLOCK and
           token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
       is_multiline_string = token.type == TokenType.STRING_TEXT
+      is_continued_var_decl = (token.IsKeyword('var') and
+                               next_code and
+                               (next_code.type in [TokenType.IDENTIFIER,
+                                                   TokenType.SIMPLE_LVALUE]) and
+                               token.line_number < next_code.line_number)
       next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
       if (is_last_code_in_line and
           self._StatementCouldEndInContext() and
           not is_multiline_string and
           not is_end_of_block and
-          not is_continued_identifier and
+          not is_continued_var_decl and
           not is_continued_operator and
           not is_continued_dot and
-          not next_code_is_dot and
           not next_code_is_operator and
           not is_implied_block and
           not next_code_is_block):
@@ -470,7 +523,7 @@
         self._EndStatement()
 
   def _StatementCouldEndInContext(self):
-    """Returns whether the current statement (if any) may end in this context."""
+    """Returns if the current statement (if any) may end in this context."""
     # In the basic statement or variable declaration context, statement can
     # always end in this context.
     if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
diff --git a/catapult/third_party/closure_linter/closure_linter/error_check.py b/catapult/third_party/closure_linter/closure_linter/error_check.py
old mode 100755
new mode 100644
index ed243e9..5fe2b92
--- a/catapult/third_party/closure_linter/closure_linter/error_check.py
+++ b/catapult/third_party/closure_linter/closure_linter/error_check.py
@@ -34,7 +34,9 @@
   NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
   BRACES_AROUND_TYPE = 'braces_around_type'
   OPTIONAL_TYPE_MARKER = 'optional_type_marker'
+  VARIABLE_ARG_MARKER = 'variable_arg_marker'
   UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
+  UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
 
   # Rule to raise all known errors.
   ALL = 'all'
@@ -46,7 +48,8 @@
                              WELL_FORMED_AUTHOR,
                              NO_BRACES_AROUND_INHERIT_DOC,
                              BRACES_AROUND_TYPE,
-                             OPTIONAL_TYPE_MARKER])
+                             OPTIONAL_TYPE_MARKER,
+                             VARIABLE_ARG_MARKER])
 
 
 flags.DEFINE_boolean('strict', False,
@@ -69,7 +72,9 @@
                          ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
                          'use of optional marker = in param types.\n'
                          ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
-                         'unused private variables.\n')
+                         'unused private variables.\n'
+                         ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
+                         'unused local variables.\n')
 
 
 def ShouldCheck(rule):
@@ -84,6 +89,8 @@
   Returns:
     True if the rule should be checked according to the flags, otherwise False.
   """
+  if 'no_' + rule in FLAGS.jslint_error:
+    return False
   if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
     return True
   # Checks strict rules.
diff --git a/catapult/third_party/closure_linter/closure_linter/error_fixer.py b/catapult/third_party/closure_linter/closure_linter/error_fixer.py
old mode 100755
new mode 100644
index 92e2221..221550a
--- a/catapult/third_party/closure_linter/closure_linter/error_fixer.py
+++ b/catapult/third_party/closure_linter/closure_linter/error_fixer.py
@@ -16,6 +16,9 @@
 
 """Main class responsible for automatically fixing simple style violations."""
 
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
 __author__ = 'robbyw@google.com (Robert Walker)'
 
 import re
@@ -37,16 +40,19 @@
 # Regex to represent common mistake inverting author name and email as
 # @author User Name (user@company)
 INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
-                                  '(?P<name>[^(]+)'
-                                  '(?P<whitespace_after_name>\s+)'
-                                  '\('
-                                  '(?P<email>[^\s]+@[^)\s]+)'
-                                  '\)'
-                                  '(?P<trailing_characters>.*)')
+                                  r'(?P<name>[^(]+)'
+                                  r'(?P<whitespace_after_name>\s+)'
+                                  r'\('
+                                  r'(?P<email>[^\s]+@[^)\s]+)'
+                                  r'\)'
+                                  r'(?P<trailing_characters>.*)')
 
 FLAGS = flags.FLAGS
 flags.DEFINE_boolean('disable_indentation_fixing', False,
                      'Whether to disable automatic fixing of indentation.')
+flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
+                  'fix. Defaults to all supported error codes when empty. '
+                  'See errors.py for a list of error codes.')
 
 
 class ErrorFixer(errorhandler.ErrorHandler):
@@ -65,6 +71,12 @@
     self._file_token = None
     self._external_file = external_file
 
+    try:
+      self._fix_error_codes = set([errors.ByName(error.upper()) for error in
+                                   FLAGS.fix_error_codes])
+    except KeyError as ke:
+      raise ValueError('Unknown error code ' + ke.args[0])
+
   def HandleFile(self, filename, first_token):
     """Notifies this ErrorPrinter that subsequent errors are in filename.
 
@@ -73,6 +85,7 @@
       first_token: The first token in the file.
     """
     self._file_name = filename
+    self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
     self._file_token = first_token
     self._file_fix_count = 0
     self._file_changed_lines = set()
@@ -90,6 +103,46 @@
       for token in tokens:
         self._file_changed_lines.add(token.line_number)
 
+  def _FixJsDocPipeNull(self, js_type):
+    """Change number|null or null|number to ?number.
+
+    Args:
+      js_type: The typeannotation.TypeAnnotation instance to fix.
+    """
+
+    # Recurse into all sub_types if the error was at a deeper level.
+    map(self._FixJsDocPipeNull, js_type.IterTypes())
+
+    if js_type.type_group and len(js_type.sub_types) == 2:
+      # Find and remove the null sub_type:
+      sub_type = None
+      for sub_type in js_type.sub_types:
+        if sub_type.identifier == 'null':
+          map(tokenutil.DeleteToken, sub_type.tokens)
+          self._AddFix(sub_type.tokens)
+          break
+      else:
+        return
+
+      first_token = js_type.FirstToken()
+      question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
+                            first_token.line_number)
+      tokenutil.InsertTokenBefore(question_mark, first_token)
+      js_type.tokens.insert(0, question_mark)
+      js_type.tokens.remove(sub_type)
+      js_type.sub_types.remove(sub_type)
+      js_type.or_null = True
+
+      # Now also remove the separator, which is in the parent's token list,
+      # either before or after the sub_type, there is exactly one. Scan for it.
+      for token in js_type.tokens:
+        if (token and isinstance(token, Token) and
+            token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
+          tokenutil.DeleteToken(token)
+          js_type.tokens.remove(token)
+          self._AddFix(token)
+          break
+
   def HandleError(self, error):
     """Attempts to fix the error.
 
@@ -99,24 +152,11 @@
     code = error.code
     token = error.token
 
+    if self._fix_error_codes and code not in self._fix_error_codes:
+      return
+
     if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
-      iterator = token.attached_object.type_start_token
-      if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
-        iterator = iterator.next
-
-      leading_space = len(iterator.string) - len(iterator.string.lstrip())
-      iterator.string = '%s?%s' % (' ' * leading_space,
-                                   iterator.string.lstrip())
-
-      # Cover the no outer brace case where the end token is part of the type.
-      while iterator and iterator != token.attached_object.type_end_token.next:
-        iterator.string = iterator.string.replace(
-            'null|', '').replace('|null', '')
-        iterator = iterator.next
-
-      # Create a new flag object with updated type info.
-      token.attached_object = javascriptstatetracker.JsDocFlag(token)
-      self._AddFix(token)
+      self._FixJsDocPipeNull(token.attached_object.jstype)
 
     elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
       iterator = token.attached_object.type_end_token
@@ -131,6 +171,19 @@
       token.attached_object = javascriptstatetracker.JsDocFlag(token)
       self._AddFix(token)
 
+    elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
+      iterator = token.attached_object.type_start_token
+      if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
+        iterator = iterator.next
+
+      starting_space = len(iterator.string) - len(iterator.string.lstrip())
+      iterator.string = '%s...%s' % (' ' * starting_space,
+                                     iterator.string.lstrip())
+
+      # Create a new flag object with updated type info.
+      token.attached_object = javascriptstatetracker.JsDocFlag(token)
+      self._AddFix(token)
+
     elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
                   errors.MISSING_SEMICOLON):
       semicolon_token = Token(';', Type.SEMICOLON, token.line,
@@ -143,7 +196,7 @@
     elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
                   errors.REDUNDANT_SEMICOLON,
                   errors.COMMA_AT_END_OF_LITERAL):
-      tokenutil.DeleteToken(token)
+      self._DeleteToken(token)
       self._AddFix(token)
 
     elif code == errors.INVALID_JSDOC_TAG:
@@ -156,7 +209,10 @@
       self._AddFix(token)
 
     elif code == errors.MISSING_SPACE:
-      if error.position:
+      if error.fix_data:
+        token.string = error.fix_data
+        self._AddFix(token)
+      elif error.position:
         if error.position.IsAtBeginning():
           tokenutil.InsertSpaceTokenAfter(token.previous)
         elif error.position.IsAtEnd(token.string):
@@ -170,10 +226,6 @@
         token.string = error.position.Set(token.string, '')
         self._AddFix(token)
 
-    elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
-      token.string = error.position.Set(token.string, '.')
-      self._AddFix(token)
-
     elif code == errors.MISSING_LINE:
       if error.position.IsAtBeginning():
         tokenutil.InsertBlankLineAfter(token.previous)
@@ -182,7 +234,7 @@
       self._AddFix(token)
 
     elif code == errors.EXTRA_LINE:
-      tokenutil.DeleteToken(token)
+      self._DeleteToken(token)
       self._AddFix(token)
 
     elif code == errors.WRONG_BLANK_LINE_COUNT:
@@ -197,10 +249,10 @@
         num_lines *= -1
         should_delete = True
 
-      for i in xrange(1, num_lines + 1):
+      for unused_i in xrange(1, num_lines + 1):
         if should_delete:
           # TODO(user): DeleteToken should update line numbers.
-          tokenutil.DeleteToken(token.previous)
+          self._DeleteToken(token.previous)
         else:
           tokenutil.InsertBlankLineAfter(token.previous)
         self._AddFix(token)
@@ -216,8 +268,8 @@
 
         tokenutil.InsertTokenAfter(single_quote_start, token)
         tokenutil.InsertTokenAfter(single_quote_end, end_quote)
-        tokenutil.DeleteToken(token)
-        tokenutil.DeleteToken(end_quote)
+        self._DeleteToken(token)
+        self._DeleteToken(end_quote)
         self._AddFix([token, end_quote])
 
     elif code == errors.MISSING_BRACES_AROUND_TYPE:
@@ -269,6 +321,43 @@
 
       self._AddFix(fixed_tokens)
 
+    elif code == errors.LINE_STARTS_WITH_OPERATOR:
+      # Remove whitespace following the operator so the line starts clean.
+      self._StripSpace(token, before=False)
+
+      # Remove the operator.
+      tokenutil.DeleteToken(token)
+      self._AddFix(token)
+
+      insertion_point = tokenutil.GetPreviousCodeToken(token)
+
+      # Insert a space between the previous token and the new operator.
+      space = Token(' ', Type.WHITESPACE, insertion_point.line,
+                    insertion_point.line_number)
+      tokenutil.InsertTokenAfter(space, insertion_point)
+
+      # Insert the operator on the end of the previous line.
+      new_token = Token(token.string, token.type, insertion_point.line,
+                        insertion_point.line_number)
+      tokenutil.InsertTokenAfter(new_token, space)
+      self._AddFix(new_token)
+
+    elif code == errors.LINE_ENDS_WITH_DOT:
+      # Remove whitespace preceding the operator to remove trailing whitespace.
+      self._StripSpace(token, before=True)
+
+      # Remove the dot.
+      tokenutil.DeleteToken(token)
+      self._AddFix(token)
+
+      insertion_point = tokenutil.GetNextCodeToken(token)
+
+      # Insert the dot at the beginning of the next line of code.
+      new_token = Token(token.string, token.type, insertion_point.line,
+                        insertion_point.line_number)
+      tokenutil.InsertTokenBefore(new_token, insertion_point)
+      self._AddFix(new_token)
+
     elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
       require_start_token = error.fix_data
       sorter = requireprovidesorter.RequireProvideSorter()
@@ -285,8 +374,8 @@
 
     elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
       if token.previous.string == '{' and token.next.string == '}':
-        tokenutil.DeleteToken(token.previous)
-        tokenutil.DeleteToken(token.next)
+        self._DeleteToken(token.previous)
+        self._DeleteToken(token.next)
         self._AddFix([token])
 
     elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
@@ -305,6 +394,12 @@
       actual = error.position.start
       expected = error.position.length
 
+      # Cases where first token is param but with leading spaces.
+      if (len(token.string.lstrip()) == len(token.string) - actual and
+          token.string.lstrip()):
+        token.string = token.string.lstrip()
+        actual = 0
+
       if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
         token.string = token.string.lstrip() + (' ' * expected)
         self._AddFix([token])
@@ -337,7 +432,7 @@
             return
 
         if removed_tokens:
-          tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
+          self._DeleteTokens(removed_tokens[0], len(removed_tokens))
 
         whitespace_token = Token('  ', Type.WHITESPACE, token.line,
                                  token.line_number)
@@ -353,42 +448,59 @@
 
     elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
       tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
-      tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
+      num_delete_tokens = len(tokens_in_line)
+      # If line being deleted is preceded and succeed with blank lines then
+      # delete one blank line also.
+      if (tokens_in_line[0].previous and tokens_in_line[-1].next
+          and tokens_in_line[0].previous.type == Type.BLANK_LINE
+          and tokens_in_line[-1].next.type == Type.BLANK_LINE):
+        num_delete_tokens += 1
+      self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
       self._AddFix(tokens_in_line)
 
     elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
-      is_provide = code == errors.MISSING_GOOG_PROVIDE
-      is_require = code == errors.MISSING_GOOG_REQUIRE
-
       missing_namespaces = error.fix_data[0]
-      need_blank_line = error.fix_data[1]
+      need_blank_line = error.fix_data[1] or (not token.previous)
 
-      if need_blank_line is None:
-        # TODO(user): This happens when there are no existing
-        # goog.provide or goog.require statements to position new statements
-        # relative to. Consider handling this case with a heuristic.
-        return
+      insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
+      dummy_first_token = insert_location
+      tokenutil.InsertTokenBefore(insert_location, token)
 
-      insert_location = token.previous
-
-      # If inserting a missing require with no existing requires, insert a
-      # blank line first.
-      if need_blank_line and is_require:
+      # If inserting a blank line check blank line does not exist before
+      # token to avoid extra blank lines.
+      if (need_blank_line and insert_location.previous
+          and insert_location.previous.type != Type.BLANK_LINE):
         tokenutil.InsertBlankLineAfter(insert_location)
         insert_location = insert_location.next
 
       for missing_namespace in missing_namespaces:
         new_tokens = self._GetNewRequireOrProvideTokens(
-            is_provide, missing_namespace, insert_location.line_number + 1)
+            code == errors.MISSING_GOOG_PROVIDE,
+            missing_namespace, insert_location.line_number + 1)
         tokenutil.InsertLineAfter(insert_location, new_tokens)
         insert_location = new_tokens[-1]
         self._AddFix(new_tokens)
 
-      # If inserting a missing provide with no existing provides, insert a
-      # blank line after.
-      if need_blank_line and is_provide:
+      # If inserting a blank line check blank line does not exist after
+      # token to avoid extra blank lines.
+      if (need_blank_line and insert_location.next
+          and insert_location.next.type != Type.BLANK_LINE):
         tokenutil.InsertBlankLineAfter(insert_location)
 
+      tokenutil.DeleteToken(dummy_first_token)
+
+  def _StripSpace(self, token, before):
+    """Strip whitespace tokens either preceding or following the given token.
+
+    Args:
+      token: The token.
+      before: If true, strip space before the token, if false, after it.
+    """
+    token = token.previous if before else token.next
+    while token and token.type == Type.WHITESPACE:
+      tokenutil.DeleteToken(token)
+      token = token.previous if before else token.next
+
   def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
     """Returns a list of tokens to create a goog.require/provide statement.
 
@@ -415,25 +527,86 @@
         Token(';', Type.SEMICOLON, line_text, line_number)
         ]
 
+  def _DeleteToken(self, token):
+    """Deletes the specified token from the linked list of tokens.
+
+    Updates instance variables pointing to tokens such as _file_token if
+    they reference the deleted token.
+
+    Args:
+      token: The token to delete.
+    """
+    if token == self._file_token:
+      self._file_token = token.next
+
+    tokenutil.DeleteToken(token)
+
+  def _DeleteTokens(self, token, token_count):
+    """Deletes the given number of tokens starting with the given token.
+
+    Updates instance variables pointing to tokens such as _file_token if
+    they reference the deleted token.
+
+    Args:
+      token: The first token to delete.
+      token_count: The total number of tokens to delete.
+    """
+    if token == self._file_token:
+      for unused_i in xrange(token_count):
+        self._file_token = self._file_token.next
+
+    tokenutil.DeleteTokens(token, token_count)
+
   def FinishFile(self):
     """Called when the current file has finished style checking.
 
-    Used to go back and fix any errors in the file.
+    Used to go back and fix any errors in the file. It currently supports both
+    js and html files. For js files it does a simple dump of all tokens, but in
+    order to support html file, we need to merge the original file with the new
+    token set back together. This works because the tokenized html file is the
+    original html file with all non js lines kept but blanked out with one blank
+    line token per line of html.
     """
     if self._file_fix_count:
+      # Get the original file content for html.
+      if self._file_is_html:
+        f = open(self._file_name, 'r')
+        original_lines = f.readlines()
+        f.close()
+
       f = self._external_file
       if not f:
-        print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
+        error_noun = 'error' if self._file_fix_count == 1 else 'errors'
+        print 'Fixed %d %s in %s' % (
+            self._file_fix_count, error_noun, self._file_name)
         f = open(self._file_name, 'w')
 
       token = self._file_token
+      # Finding the first not deleted token.
+      while token.is_deleted:
+        token = token.next
+      # If something got inserted before first token (e.g. due to sorting)
+      # then move to start. Bug 8398202.
+      while token.previous:
+        token = token.previous
       char_count = 0
+      line = ''
       while token:
-        f.write(token.string)
+        line += token.string
         char_count += len(token.string)
 
         if token.IsLastInLine():
-          f.write('\n')
+          # We distinguish if a blank line in html was from stripped original
+          # file or newly added error fix by looking at the "org_line_number"
+          # field on the token. It is only set in the tokenizer, so for all
+          # error fixes, the value should be None.
+          if (line or not self._file_is_html or
+              token.orig_line_number is None):
+            f.write(line)
+            f.write('\n')
+          else:
+            f.write(original_lines[token.orig_line_number - 1])
+          line = ''
           if char_count > 80 and token.line_number in self._file_changed_lines:
             print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
                 token.line_number, self._file_name)
diff --git a/catapult/third_party/closure_linter/closure_linter/error_fixer_test.py b/catapult/third_party/closure_linter/closure_linter/error_fixer_test.py
new file mode 100644
index 0000000..0deeb3b
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/error_fixer_test.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the error_fixer module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+
+
+import unittest as googletest
+from closure_linter import error_fixer
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+class ErrorFixerTest(googletest.TestCase):
+  """Unit tests for error_fixer."""
+
+  def setUp(self):
+    self.error_fixer = error_fixer.ErrorFixer()
+
+  def testDeleteToken(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+    second_token = start_token.next
+    self.error_fixer.HandleFile('test_file', start_token)
+
+    self.error_fixer._DeleteToken(start_token)
+
+    self.assertEqual(second_token, self.error_fixer._file_token)
+
+  def testDeleteTokens(self):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+    fourth_token = start_token.next.next.next
+    self.error_fixer.HandleFile('test_file', start_token)
+
+    self.error_fixer._DeleteTokens(start_token, 3)
+
+    self.assertEqual(fourth_token, self.error_fixer._file_token)
+
+  def DoTestFixJsDocPipeNull(self, expected, original):
+    _, comments = testutil.ParseFunctionsAndComments(
+        '/** @param {%s} */' % original)
+    jstype = comments[0].GetDocFlags()[0].jstype
+    self.error_fixer.HandleFile('unittest', None)
+    self.error_fixer._FixJsDocPipeNull(jstype)
+    self.assertEquals(expected, repr(jstype))
+    result = tokenutil.TokensToString(jstype.FirstToken()).strip('} */')
+    self.assertEquals(expected, result)
+
+  def testFixJsDocPipeNull(self):
+    self.DoTestFixJsDocPipeNull('?Object', 'Object|null')
+    self.DoTestFixJsDocPipeNull('function(?Object)', 'function(Object|null)')
+    self.DoTestFixJsDocPipeNull('function(?Object=)',
+                                'function(Object|null=)')
+    self.DoTestFixJsDocPipeNull(
+        'function(?(Object)=,null=,?(Object)=):string',
+        'function((Object|null)=,null=,(Object|null)=):string')
+
+_TEST_SCRIPT = """\
+var x = 3;
+"""
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/errorrecord.py b/catapult/third_party/closure_linter/closure_linter/errorrecord.py
index 6b253eb..ce9fb90 100644
--- a/catapult/third_party/closure_linter/closure_linter/errorrecord.py
+++ b/catapult/third_party/closure_linter/closure_linter/errorrecord.py
@@ -16,7 +16,7 @@
 
 """A simple, pickle-serializable class to represent a lint error."""
 
-
+__author__ = 'nnaze@google.com (Nathan Naze)'
 
 import gflags as flags
 
@@ -58,8 +58,9 @@
   new_error = error.code in errors.NEW_ERRORS
 
   if FLAGS.unix_mode:
-    error_string = erroroutput.GetUnixErrorOutput(path, error, new_error)
+    error_string = erroroutput.GetUnixErrorOutput(
+        path, error, new_error=new_error)
   else:
-    error_string = erroroutput.GetErrorOutput(error, new_error)
+    error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
 
   return ErrorRecord(path, error_string, new_error)
diff --git a/catapult/third_party/closure_linter/closure_linter/errorrules.py b/catapult/third_party/closure_linter/closure_linter/errorrules.py
old mode 100755
new mode 100644
index afb6fa9..b1b72aa
--- a/catapult/third_party/closure_linter/closure_linter/errorrules.py
+++ b/catapult/third_party/closure_linter/closure_linter/errorrules.py
@@ -25,18 +25,48 @@
 FLAGS = flags.FLAGS
 flags.DEFINE_boolean('jsdoc', True,
                      'Whether to report errors for missing JsDoc.')
+flags.DEFINE_list('disable', None,
+                  'Disable specific error. Usage Ex.: gjslint --disable 1,'
+                  '0011 foo.js.')
+flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
+                     'without warning.', lower_bound=1)
+
+disabled_error_nums = None
+
+
+def GetMaxLineLength():
+  """Returns allowed maximum length of line.
+
+  Returns:
+    Length of line allowed without any warning.
+  """
+  return FLAGS.max_line_length
 
 
 def ShouldReportError(error):
   """Whether the given error should be reported.
-  
+
   Returns:
-    True for all errors except missing documentation errors.  For these,
-    it returns the value of the jsdoc flag.
+    True for all errors except missing documentation errors and disabled
+    errors.  For missing documentation, it returns the value of the
+    jsdoc flag.
   """
-  return FLAGS.jsdoc or error not in (
+  global disabled_error_nums
+  if disabled_error_nums is None:
+    disabled_error_nums = []
+    if FLAGS.disable:
+      for error_str in FLAGS.disable:
+        error_num = 0
+        try:
+          error_num = int(error_str)
+        except ValueError:
+          pass
+        disabled_error_nums.append(error_num)
+
+  return ((FLAGS.jsdoc or error not in (
       errors.MISSING_PARAMETER_DOCUMENTATION,
       errors.MISSING_RETURN_DOCUMENTATION,
       errors.MISSING_MEMBER_DOCUMENTATION,
       errors.MISSING_PRIVATE,
-      errors.MISSING_JSDOC_TAG_THIS)
+      errors.MISSING_JSDOC_TAG_THIS)) and
+          (not FLAGS.disable or error not in disabled_error_nums))
diff --git a/catapult/third_party/closure_linter/closure_linter/errorrules_test.py b/catapult/third_party/closure_linter/closure_linter/errorrules_test.py
new file mode 100644
index 0000000..cb90378
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/errorrules_test.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Medium tests for the gjslint errorrules.
+
+Currently its just verifying that warnings can't be disabled.
+"""
+
+
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
+
+class ErrorRulesTest(googletest.TestCase):
+  """Test case to for gjslint errorrules."""
+
+  def testNoMaxLineLengthFlagExists(self):
+    """Tests that --max_line_length flag does not exists."""
+    self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
+
+  def testGetMaxLineLength(self):
+    """Tests warning are reported for line greater than 80.
+    """
+
+    # One line > 100 and one line > 80 and < 100. So should produce two
+    # line too long error.
+    original = [
+        'goog.require(\'dummy.aa\');',
+        '',
+        'function a() {',
+        '  dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+        ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
+        '  dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+        ' + 14 + 15 + 16 + 17 + 18;',
+        '}',
+        ''
+        ]
+
+    # Expect line too long.
+    expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
+
+    self._AssertErrors(original, expected)
+
+  def testNoDisableFlagExists(self):
+    """Tests that --disable flag does not exists."""
+    self.assertTrue('disable' not in flags.FLAGS.FlagDict())
+
+  def testWarningsNotDisabled(self):
+    """Tests warnings are reported when nothing is disabled.
+    """
+    original = [
+        'goog.require(\'dummy.aa\');',
+        'goog.require(\'dummy.Cc\');',
+        'goog.require(\'dummy.Dd\');',
+        '',
+        'function a() {',
+        '  dummy.aa.i = 1;',
+        '  dummy.Cc.i = 1;',
+        '  dummy.Dd.i = 1;',
+        '}',
+        ]
+
+    expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+                errors.FILE_MISSING_NEWLINE]
+
+    self._AssertErrors(original, expected)
+
+  def _AssertErrors(self, original, expected_errors, include_header=True):
+    """Asserts that the error fixer corrects original to expected."""
+    if include_header:
+      original = self._GetHeader() + original
+
+    # Trap gjslint's output parse it to get messages added.
+    error_accumulator = erroraccumulator.ErrorAccumulator()
+    runner.Run('testing.js', error_accumulator, source=original)
+    error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+    error_nums.sort()
+    expected_errors.sort()
+    self.assertListEqual(error_nums, expected_errors)
+
+  def _GetHeader(self):
+    """Returns a fake header for a JavaScript file."""
+    return [
+        '// Copyright 2011 Google Inc. All Rights Reserved.',
+        '',
+        '/**',
+        ' * @fileoverview Fake file overview.',
+        ' * @author fake@google.com (Fake Person)',
+        ' */',
+        ''
+        ]
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/errors.py b/catapult/third_party/closure_linter/closure_linter/errors.py
old mode 100755
new mode 100644
index 08c6dbe..46a957e
--- a/catapult/third_party/closure_linter/closure_linter/errors.py
+++ b/catapult/third_party/closure_linter/closure_linter/errors.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -56,9 +55,11 @@
 LINE_TOO_LONG = 110
 LINE_STARTS_WITH_OPERATOR = 120
 COMMA_AT_END_OF_LITERAL = 121
+LINE_ENDS_WITH_DOT = 122
 MULTI_LINE_STRING = 130
 UNNECESSARY_DOUBLE_QUOTED_STRING = 131
 UNUSED_PRIVATE_MEMBER = 132
+UNUSED_LOCAL_VARIABLE = 133
 
 # Requires, provides
 GOOG_REQUIRES_NOT_ALPHABETIZED = 140
@@ -67,6 +68,7 @@
 MISSING_GOOG_PROVIDE = 143
 EXTRA_GOOG_REQUIRE = 144
 EXTRA_GOOG_PROVIDE = 145
+ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
 
 # JsDoc
 INVALID_JSDOC_TAG = 200
@@ -94,7 +96,9 @@
 JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
 JSDOC_MISSING_OPTIONAL_TYPE = 232
 JSDOC_MISSING_OPTIONAL_PREFIX = 233
-JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240
+JSDOC_MISSING_VAR_ARGS_TYPE = 234
+JSDOC_MISSING_VAR_ARGS_NAME = 235
+JSDOC_DOES_NOT_PARSE = 236
 # TODO(robbyw): Split this in to more specific syntax problems.
 INCORRECT_SUPPRESS_SYNTAX = 250
 INVALID_SUPPRESS_TYPE = 251
@@ -112,6 +116,11 @@
 MISSING_END_OF_SCOPE_COMMENT = 500
 MALFORMED_END_OF_SCOPE_COMMENT = 501
 
+# goog.scope - Namespace aliasing
+# TODO(nnaze) Add additional errors here and in aliaspass.py
+INVALID_USE_OF_GOOG_SCOPE = 600
+EXTRA_GOOG_SCOPE_USAGE = 601
+
 # ActionScript specific errors:
 # TODO(user): move these errors to their own file and move all JavaScript
 # specific errors to their own file as well.
@@ -134,9 +143,12 @@
     # Errors added after 2.0.2:
     WRONG_INDENTATION,
     MISSING_SEMICOLON,
-    # Errors added after 2.3.4:
-    MISSING_END_OF_SCOPE_COMMENT,
-    MALFORMED_END_OF_SCOPE_COMMENT,
-    UNUSED_PRIVATE_MEMBER,
-    # Errors added after 2.3.5:
+    # Errors added after 2.3.9:
+    JSDOC_MISSING_VAR_ARGS_TYPE,
+    JSDOC_MISSING_VAR_ARGS_NAME,
+    # Errors added after 2.3.15:
+    ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+    JSDOC_DOES_NOT_PARSE,
+    LINE_ENDS_WITH_DOT,
+    # Errors added after 2.3.19:
     ])
diff --git a/catapult/third_party/closure_linter/closure_linter/fixjsstyle.py b/catapult/third_party/closure_linter/closure_linter/fixjsstyle.py
old mode 100755
new mode 100644
index c23f6b7..2d65e03
--- a/catapult/third_party/closure_linter/closure_linter/fixjsstyle.py
+++ b/catapult/third_party/closure_linter/closure_linter/fixjsstyle.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-# python2.6 for command-line runs using p4lib.  pylint: disable-msg=C6301
 #
 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
 #
@@ -19,20 +18,23 @@
 
 __author__ = 'robbyw@google.com (Robert Walker)'
 
+import StringIO
 import sys
 
 import gflags as flags
-from closure_linter import checker
+
 from closure_linter import error_fixer
+from closure_linter import runner
 from closure_linter.common import simplefileflags as fileflags
 
 FLAGS = flags.FLAGS
 flags.DEFINE_list('additional_extensions', None, 'List of additional file '
                   'extensions (not js) that should be treated as '
                   'JavaScript files.')
+flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
 
 
-def main(argv = None):
+def main(argv=None):
   """Main function.
 
   Args:
@@ -47,11 +49,18 @@
 
   files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
 
-  style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
+  output_buffer = None
+  if FLAGS.dry_run:
+    output_buffer = StringIO.StringIO()
+
+  fixer = error_fixer.ErrorFixer(output_buffer)
 
   # Check the list of files.
   for filename in files:
-    style_checker.Check(filename)
+    runner.Run(filename, fixer)
+    if FLAGS.dry_run:
+      print output_buffer.getvalue()
+
 
 if __name__ == '__main__':
   main()
diff --git a/catapult/third_party/closure_linter/closure_linter/fixjsstyle_test.py b/catapult/third_party/closure_linter/closure_linter/fixjsstyle_test.py
old mode 100755
new mode 100644
index 5096568..34de3f8
--- a/catapult/third_party/closure_linter/closure_linter/fixjsstyle_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/fixjsstyle_test.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2008 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,8 +21,9 @@
 
 import gflags as flags
 import unittest as googletest
-from closure_linter import checker
 from closure_linter import error_fixer
+from closure_linter import runner
+
 
 _RESOURCE_PREFIX = 'closure_linter/testdata'
 
@@ -35,10 +35,20 @@
 class FixJsStyleTest(googletest.TestCase):
   """Test case to for gjslint auto-fixing."""
 
+  def setUp(self):
+    flags.FLAGS.dot_on_next_line = True
+
+  def tearDown(self):
+    flags.FLAGS.dot_on_next_line = False
+
   def testFixJsStyle(self):
-    test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
-                  ['indentation.js', 'fixjsstyle.indentation.out.js']]
+    test_cases = [
+        ['fixjsstyle.in.js', 'fixjsstyle.out.js'],
+        ['indentation.js', 'fixjsstyle.indentation.out.js'],
+        ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
+        ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
     for [running_input_file, running_output_file] in test_cases:
+      print 'Checking %s vs %s' % (running_input_file, running_output_file)
       input_filename = None
       golden_filename = None
       current_filename = None
@@ -48,7 +58,7 @@
 
         golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
         current_filename = golden_filename
-      except IOError, ex:
+      except IOError as ex:
         raise IOError('Could not find testdata resource for %s: %s' %
                       (current_filename, ex))
 
@@ -62,60 +72,348 @@
 
       # Autofix the file, sending output to a fake file.
       actual = StringIO.StringIO()
-      style_checker = checker.JavaScriptStyleChecker(
-          error_fixer.ErrorFixer(actual))
-      style_checker.Check(input_filename)
+      runner.Run(input_filename, error_fixer.ErrorFixer(actual))
 
       # Now compare the files.
       actual.seek(0)
       expected = open(golden_filename, 'r')
 
+      # Uncomment to generate new golden files and run
+      # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
+      # actual.seek(0)
+
       self.assertEqual(actual.readlines(), expected.readlines())
 
+  def testAddProvideFirstLine(self):
+    """Tests handling of case where goog.provide is added."""
+    original = [
+        'dummy.bb.cc = 1;',
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.bb\');',
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testAddRequireFirstLine(self):
+    """Tests handling of case where goog.require is added."""
+    original = [
+        'a = dummy.bb.cc;',
+        ]
+
+    expected = [
+        'goog.require(\'dummy.bb\');',
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testDeleteProvideAndAddProvideFirstLine(self):
+    """Tests handling of case where goog.provide is deleted and added.
+
+       Bug 14832597.
+    """
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.bb\');',
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testDeleteProvideAndAddRequireFirstLine(self):
+    """Tests handling where goog.provide is deleted and goog.require added.
+
+       Bug 14832597.
+    """
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    expected = [
+        'goog.require(\'dummy.bb\');',
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testDeleteRequireAndAddRequireFirstLine(self):
+    """Tests handling of case where goog.require is deleted and added.
+
+       Bug 14832597.
+    """
+    original = [
+        'goog.require(\'dummy.aa\');',
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    expected = [
+        'goog.require(\'dummy.bb\');',
+        '',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        'goog.require(\'dummy.aa\');',
+        'a = dummy.bb.cc;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testDeleteRequireAndAddProvideFirstLine(self):
+    """Tests handling where goog.require is deleted and goog.provide added.
+
+       Bug 14832597.
+    """
+    original = [
+        'goog.require(\'dummy.aa\');',
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.bb\');',
+        '',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+    original = [
+        'goog.require(\'dummy.aa\');',
+        'dummy.bb.cc = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testMultipleProvideInsert(self):
+    original = [
+        'goog.provide(\'dummy.bb\');',
+        'goog.provide(\'dummy.dd\');',
+        '',
+        'dummy.aa.ff = 1;',
+        'dummy.bb.ff = 1;',
+        'dummy.cc.ff = 1;',
+        'dummy.dd.ff = 1;',
+        'dummy.ee.ff = 1;',
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.aa\');',
+        'goog.provide(\'dummy.bb\');',
+        'goog.provide(\'dummy.cc\');',
+        'goog.provide(\'dummy.dd\');',
+        'goog.provide(\'dummy.ee\');',
+        '',
+        'dummy.aa.ff = 1;',
+        'dummy.bb.ff = 1;',
+        'dummy.cc.ff = 1;',
+        'dummy.dd.ff = 1;',
+        'dummy.ee.ff = 1;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testMultipleRequireInsert(self):
+    original = [
+        'goog.require(\'dummy.bb\');',
+        'goog.require(\'dummy.dd\');',
+        '',
+        'a = dummy.aa.ff;',
+        'b = dummy.bb.ff;',
+        'c = dummy.cc.ff;',
+        'd = dummy.dd.ff;',
+        'e = dummy.ee.ff;',
+        ]
+
+    expected = [
+        'goog.require(\'dummy.aa\');',
+        'goog.require(\'dummy.bb\');',
+        'goog.require(\'dummy.cc\');',
+        'goog.require(\'dummy.dd\');',
+        'goog.require(\'dummy.ee\');',
+        '',
+        'a = dummy.aa.ff;',
+        'b = dummy.bb.ff;',
+        'c = dummy.cc.ff;',
+        'd = dummy.dd.ff;',
+        'e = dummy.ee.ff;',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testUnsortedRequires(self):
+    """Tests handling of unsorted goog.require statements without header.
+
+       Bug 8398202.
+    """
+    original = [
+        'goog.require(\'dummy.aa\');',
+        'goog.require(\'dummy.Cc\');',
+        'goog.require(\'dummy.Dd\');',
+        '',
+        'function a() {',
+        '  dummy.aa.i = 1;',
+        '  dummy.Cc.i = 1;',
+        '  dummy.Dd.i = 1;',
+        '}',
+        ]
+
+    expected = [
+        'goog.require(\'dummy.Cc\');',
+        'goog.require(\'dummy.Dd\');',
+        'goog.require(\'dummy.aa\');',
+        '',
+        'function a() {',
+        '  dummy.aa.i = 1;',
+        '  dummy.Cc.i = 1;',
+        '  dummy.Dd.i = 1;',
+        '}',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
   def testMissingExtraAndUnsortedRequires(self):
     """Tests handling of missing extra and unsorted goog.require statements."""
     original = [
-        "goog.require('dummy.aa');",
-        "goog.require('dummy.Cc');",
-        "goog.require('dummy.Dd');",
-        "",
-        "var x = new dummy.Bb();",
-        "dummy.Cc.someMethod();",
-        "dummy.aa.someMethod();",
+        'goog.require(\'dummy.aa\');',
+        'goog.require(\'dummy.Cc\');',
+        'goog.require(\'dummy.Dd\');',
+        '',
+        'var x = new dummy.Bb();',
+        'dummy.Cc.someMethod();',
+        'dummy.aa.someMethod();',
         ]
 
     expected = [
-        "goog.require('dummy.Bb');",
-        "goog.require('dummy.Cc');",
-        "goog.require('dummy.aa');",
-        "",
-        "var x = new dummy.Bb();",
-        "dummy.Cc.someMethod();",
-        "dummy.aa.someMethod();",
+        'goog.require(\'dummy.Bb\');',
+        'goog.require(\'dummy.Cc\');',
+        'goog.require(\'dummy.aa\');',
+        '',
+        'var x = new dummy.Bb();',
+        'dummy.Cc.someMethod();',
+        'dummy.aa.someMethod();',
         ]
 
     self._AssertFixes(original, expected)
 
-  def testMissingExtraAndUnsortedProvides(self):
-    """Tests handling of missing extra and unsorted goog.provide statements."""
+  def testExtraRequireOnFirstLine(self):
+    """Tests handling of extra goog.require statement on the first line.
+
+       There was a bug when fixjsstyle quits with an exception. It happened if
+        - the first line of the file is an extra goog.require() statement,
+        - goog.require() statements are not sorted.
+    """
     original = [
-        "goog.provide('dummy.aa');",
-        "goog.provide('dummy.Cc');",
-        "goog.provide('dummy.Dd');",
-        "",
-        "dummy.Cc = function() {};",
-        "dummy.Bb = function() {};",
-        "dummy.aa.someMethod = function();",
+        'goog.require(\'dummy.aa\');',
+        'goog.require(\'dummy.cc\');',
+        'goog.require(\'dummy.bb\');',
+        '',
+        'var x = new dummy.bb();',
+        'var y = new dummy.cc();',
         ]
 
     expected = [
-        "goog.provide('dummy.Bb');",
-        "goog.provide('dummy.Cc');",
-        "goog.provide('dummy.aa');",
-        "",
-        "dummy.Cc = function() {};",
-        "dummy.Bb = function() {};",
-        "dummy.aa.someMethod = function();",
+        'goog.require(\'dummy.bb\');',
+        'goog.require(\'dummy.cc\');',
+        '',
+        'var x = new dummy.bb();',
+        'var y = new dummy.cc();',
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testUnsortedProvides(self):
+    """Tests handling of unsorted goog.provide statements without header.
+
+       Bug 8398202.
+    """
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        'goog.provide(\'dummy.Cc\');',
+        'goog.provide(\'dummy.Dd\');',
+        '',
+        'dummy.aa = function() {};'
+        'dummy.Cc = function() {};'
+        'dummy.Dd = function() {};'
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.Cc\');',
+        'goog.provide(\'dummy.Dd\');',
+        'goog.provide(\'dummy.aa\');',
+        '',
+        'dummy.aa = function() {};'
+        'dummy.Cc = function() {};'
+        'dummy.Dd = function() {};'
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testMissingExtraAndUnsortedProvides(self):
+    """Tests handling of missing extra and unsorted goog.provide statements."""
+    original = [
+        'goog.provide(\'dummy.aa\');',
+        'goog.provide(\'dummy.Cc\');',
+        'goog.provide(\'dummy.Dd\');',
+        '',
+        'dummy.Cc = function() {};',
+        'dummy.Bb = function() {};',
+        'dummy.aa.someMethod = function();',
+        ]
+
+    expected = [
+        'goog.provide(\'dummy.Bb\');',
+        'goog.provide(\'dummy.Cc\');',
+        'goog.provide(\'dummy.aa\');',
+        '',
+        'dummy.Cc = function() {};',
+        'dummy.Bb = function() {};',
+        'dummy.aa.someMethod = function();',
         ]
 
     self._AssertFixes(original, expected)
@@ -123,21 +421,21 @@
   def testNoRequires(self):
     """Tests positioning of missing requires without existing requires."""
     original = [
-        "goog.provide('dummy.Something');",
-        "",
-        "dummy.Something = function() {};",
-        "",
-        "var x = new dummy.Bb();",
+        'goog.provide(\'dummy.Something\');',
+        '',
+        'dummy.Something = function() {};',
+        '',
+        'var x = new dummy.Bb();',
         ]
 
     expected = [
-        "goog.provide('dummy.Something');",
-        "",
-        "goog.require('dummy.Bb');",
-        "",
-        "dummy.Something = function() {};",
-        "",
-        "var x = new dummy.Bb();",
+        'goog.provide(\'dummy.Something\');',
+        '',
+        'goog.require(\'dummy.Bb\');',
+        '',
+        'dummy.Something = function() {};',
+        '',
+        'var x = new dummy.Bb();',
         ]
 
     self._AssertFixes(original, expected)
@@ -145,25 +443,35 @@
   def testNoProvides(self):
     """Tests positioning of missing provides without existing provides."""
     original = [
-        "goog.require('dummy.Bb');",
-        "",
-        "dummy.Something = function() {};",
-        "",
-        "var x = new dummy.Bb();",
+        'goog.require(\'dummy.Bb\');',
+        '',
+        'dummy.Something = function() {};',
+        '',
+        'var x = new dummy.Bb();',
         ]
 
     expected = [
-        "goog.provide('dummy.Something');",
-        "",
-        "goog.require('dummy.Bb');",
-        "",
-        "dummy.Something = function() {};",
-        "",
-        "var x = new dummy.Bb();",
+        'goog.provide(\'dummy.Something\');',
+        '',
+        'goog.require(\'dummy.Bb\');',
+        '',
+        'dummy.Something = function() {};',
+        '',
+        'var x = new dummy.Bb();',
         ]
 
     self._AssertFixes(original, expected)
 
+  def testOutputOkayWhenFirstTokenIsDeleted(self):
+    """Tests that autofix output is is correct when first token is deleted.
+
+    Regression test for bug 4581567
+    """
+    original = ['"use strict";']
+    expected = ["'use strict';"]
+
+    self._AssertFixes(original, expected, include_header=False)
+
   def testGoogScopeIndentation(self):
     """Tests Handling a typical end-of-scope indentation fix."""
     original = [
@@ -222,15 +530,68 @@
 
     self._AssertFixes(original, expected)
 
-  def _AssertFixes(self, original, expected):
+  def testEndsWithIdentifier(self):
+    """Tests Handling case where script ends with identifier. Bug 7643404."""
+    original = [
+        'goog.provide(\'xyz\');',
+        '',
+        'abc'
+        ]
+
+    expected = [
+        'goog.provide(\'xyz\');',
+        '',
+        'abc;'
+        ]
+
+    self._AssertFixes(original, expected)
+
+  def testFileStartsWithSemicolon(self):
+    """Tests handling files starting with semicolon.
+
+      b/10062516
+    """
+    original = [
+        ';goog.provide(\'xyz\');',
+        '',
+        'abc;'
+        ]
+
+    expected = [
+        'goog.provide(\'xyz\');',
+        '',
+        'abc;'
+        ]
+
+    self._AssertFixes(original, expected, include_header=False)
+
+  def testCodeStartsWithSemicolon(self):
+    """Tests handling code in starting with semicolon after comments.
+
+      b/10062516
+    """
+    original = [
+        ';goog.provide(\'xyz\');',
+        '',
+        'abc;'
+        ]
+
+    expected = [
+        'goog.provide(\'xyz\');',
+        '',
+        'abc;'
+        ]
+
+    self._AssertFixes(original, expected)
+
+  def _AssertFixes(self, original, expected, include_header=True):
     """Asserts that the error fixer corrects original to expected."""
-    original = self._GetHeader() + original
-    expected = self._GetHeader() + expected
+    if include_header:
+      original = self._GetHeader() + original
+      expected = self._GetHeader() + expected
 
     actual = StringIO.StringIO()
-    style_checker = checker.JavaScriptStyleChecker(
-        error_fixer.ErrorFixer(actual))
-    style_checker.CheckLines('testing.js', original, False)
+    runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
     actual.seek(0)
 
     expected = [x + '\n' for x in expected]
@@ -240,13 +601,13 @@
   def _GetHeader(self):
     """Returns a fake header for a JavaScript file."""
     return [
-        "// Copyright 2011 Google Inc. All Rights Reserved.",
-        "",
-        "/**",
-        " * @fileoverview Fake file overview.",
-        " * @author fake@google.com (Fake Person)",
-        " */",
-        ""
+        '// Copyright 2011 Google Inc. All Rights Reserved.',
+        '',
+        '/**',
+        ' * @fileoverview Fake file overview.',
+        ' * @author fake@google.com (Fake Person)',
+        ' */',
+        ''
         ]
 
 
diff --git a/catapult/third_party/closure_linter/closure_linter/full_test.py b/catapult/third_party/closure_linter/closure_linter/full_test.py
old mode 100755
new mode 100644
index fde9c70..e69a8c0
--- a/catapult/third_party/closure_linter/closure_linter/full_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/full_test.py
@@ -23,7 +23,6 @@
 __author__ = ('robbyw@google.com (Robert Walker)',
               'ajp@google.com (Andy Perelson)')
 
-import re
 import os
 import sys
 import unittest
@@ -31,9 +30,9 @@
 import gflags as flags
 import unittest as googletest
 
-from closure_linter import checker
-from closure_linter import errors
 from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import runner
 from closure_linter.common import filetestcase
 
 _RESOURCE_PREFIX = 'closure_linter/testdata'
@@ -52,6 +51,7 @@
     'all_js_wrapped.js',
     'blank_lines.js',
     'ends_with_block.js',
+    'empty_file.js',
     'externs.js',
     'externs_jsdoc.js',
     'goog_scope.js',
@@ -65,6 +65,7 @@
     'provide_blank.js',
     'provide_extra.js',
     'provide_missing.js',
+    'require_alias.js',
     'require_all_caps.js',
     'require_blank.js',
     'require_extra.js',
@@ -73,20 +74,24 @@
     'require_function_through_both.js',
     'require_function_through_namespace.js',
     'require_interface.js',
+    'require_interface_alias.js',
     'require_interface_base.js',
     'require_lower_case.js',
     'require_missing.js',
     'require_numeric.js',
     'require_provide_blank.js',
-    'require_provide_ok.js',
     'require_provide_missing.js',
+    'require_provide_ok.js',
+    'semicolon_missing.js',
+    'semicolon_missing2.js',
     'simple.html',
     'spaces.js',
     'tokenizer.js',
     'unparseable.js',
+    'unused_local_variables.js',
     'unused_private_members.js',
-    'utf8.html'
-    ]
+    'utf8.html',
+]
 
 
 class GJsLintTestSuite(unittest.TestSuite):
@@ -106,8 +111,11 @@
       test_files = _TEST_FILES
     for test_file in test_files:
       resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
-      self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
-          checker.GJsLintRunner(), errors.ByName))
+      self.addTest(
+          filetestcase.AnnotatedFileTestCase(
+              resource_path,
+              runner.Run,
+              errors.ByName))
 
 if __name__ == '__main__':
   # Don't let main parse args; it happens in the TestSuite.
diff --git a/catapult/third_party/closure_linter/closure_linter/gjslint.py b/catapult/third_party/closure_linter/closure_linter/gjslint.py
old mode 100755
new mode 100644
index dcfe09f..b67c5a9
--- a/catapult/third_party/closure_linter/closure_linter/gjslint.py
+++ b/catapult/third_party/closure_linter/closure_linter/gjslint.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python
-# python2.6 for command-line runs using p4lib.  pylint: disable-msg=C6301
-#
 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,23 +32,27 @@
 """
 
 __author__ = ('robbyw@google.com (Robert Walker)',
-              'ajp@google.com (Andy Perelson)')
+              'ajp@google.com (Andy Perelson)',
+              'nnaze@google.com (Nathan Naze)',)
 
-import functools
+import errno
 import itertools
+import os
+import platform
+import re
 import sys
 import time
 
 import gflags as flags
 
-from closure_linter import checker
 from closure_linter import errorrecord
+from closure_linter import runner
 from closure_linter.common import erroraccumulator
 from closure_linter.common import simplefileflags as fileflags
 
 # Attempt import of multiprocessing (should be available in Python 2.6 and up).
 try:
-  # pylint: disable-msg=C6204
+  # pylint: disable=g-import-not-at-top
   import multiprocessing
 except ImportError:
   multiprocessing = None
@@ -60,6 +62,9 @@
                      'Whether to emit warnings in standard unix format.')
 flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
 flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
+flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
+                     'Most useful for per-file linting, such as that performed '
+                     'by the presubmit linter service.')
 flags.DEFINE_boolean('check_html', False,
                      'Whether to check javascript in html files.')
 flags.DEFINE_boolean('summary', False,
@@ -67,13 +72,20 @@
 flags.DEFINE_list('additional_extensions', None, 'List of additional file '
                   'extensions (not js) that should be treated as '
                   'JavaScript files.')
-flags.DEFINE_boolean('multiprocess', False,
-                     'Whether to parallalize linting using the '
-                     'multiprocessing module.  Disabled by default.')
+flags.DEFINE_boolean('multiprocess',
+                     platform.system() is 'Linux' and bool(multiprocessing),
+                     'Whether to attempt parallelized linting using the '
+                     'multiprocessing module.  Enabled by default on Linux '
+                     'if the multiprocessing module is present (Python 2.6+). '
+                     'Otherwise disabled by default. '
+                     'Disabling may make debugging easier.')
+flags.ADOPT_module_key_flags(fileflags)
+flags.ADOPT_module_key_flags(runner)
 
 
 GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
-                      '--check_html', '--summary']
+                      '--check_html', '--summary', '--quiet']
+
 
 
 def _MultiprocessCheckPaths(paths):
@@ -92,12 +104,20 @@
 
   pool = multiprocessing.Pool()
 
-  for results in pool.imap(_CheckPath, paths):
-    for record in results:
-      yield record
+  path_results = pool.imap(_CheckPath, paths)
+  for results in path_results:
+    for result in results:
+      yield result
 
-  pool.close()
-  pool.join()
+  # Force destruct before returning, as this can sometimes raise spurious
+  # "interrupted system call" (EINTR), which we can ignore.
+  try:
+    pool.close()
+    pool.join()
+    del pool
+  except OSError as err:
+    if err.errno is not errno.EINTR:
+      raise err
 
 
 def _CheckPaths(paths):
@@ -126,13 +146,11 @@
     A list of errorrecord.ErrorRecords for any found errors.
   """
 
-  error_accumulator = erroraccumulator.ErrorAccumulator()
-  style_checker = checker.JavaScriptStyleChecker(error_accumulator)
-  style_checker.Check(path)
+  error_handler = erroraccumulator.ErrorAccumulator()
+  runner.Run(path, error_handler)
 
-  # Return any errors as error records.
-  make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
-  return map(make_error_record, error_accumulator.GetErrors())
+  make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
+  return map(make_error_record, error_handler.GetErrors())
 
 
 def _GetFilePaths(argv):
@@ -178,13 +196,20 @@
   error_paths_count = len(error_paths)
   no_error_paths_count = all_paths_count - error_paths_count
 
-  if error_count or new_error_count:
-    print ('Found %d errors, including %d new errors, in %d files '
-           '(%d files OK).' % (
-               error_count,
-               new_error_count,
-               error_paths_count,
-               no_error_paths_count))
+  if (error_count or new_error_count) and not FLAGS.quiet:
+    error_noun = 'error' if error_count == 1 else 'errors'
+    new_error_noun = 'error' if new_error_count == 1 else 'errors'
+    error_file_noun = 'file' if error_paths_count == 1 else 'files'
+    ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
+    print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
+           (error_count,
+            error_noun,
+            new_error_count,
+            new_error_noun,
+            error_paths_count,
+            error_file_noun,
+            no_error_paths_count,
+            ok_file_noun))
 
 
 def _PrintErrorRecords(error_records):
@@ -216,7 +241,9 @@
     return '%.2fs' % t
 
 
-def main(argv = None):
+
+
+def main(argv=None):
   """Main function.
 
   Args:
@@ -228,6 +255,14 @@
   if FLAGS.time:
     start_time = time.time()
 
+  # Emacs sets the environment variable INSIDE_EMACS in the subshell.
+  # Request Unix mode as emacs will expect output to be in Unix format
+  # for integration.
+  # See https://www.gnu.org/software/emacs/manual/html_node/emacs/
+  # Interactive-Shell.html
+  if 'INSIDE_EMACS' in os.environ:
+    FLAGS.unix_mode = True
+
   suffixes = ['.js']
   if FLAGS.additional_extensions:
     suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
@@ -274,7 +309,8 @@
       else:
         fix_args.append(flag)
 
-    print """
+    if not FLAGS.quiet:
+      print """
 Some of the errors reported by GJsLint may be auto-fixable using the script
 fixjsstyle. Please double check any changes it makes and report any bugs. The
 script can be run by executing:
diff --git a/catapult/third_party/closure_linter/closure_linter/indentation.py b/catapult/third_party/closure_linter/closure_linter/indentation.py
old mode 100755
new mode 100644
index cb97853..d48ad2b
--- a/catapult/third_party/closure_linter/closure_linter/indentation.py
+++ b/catapult/third_party/closure_linter/closure_linter/indentation.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2010 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +17,8 @@
 
 __author__ = ('robbyw@google.com (Robert Walker)')
 
+import gflags as flags
+
 from closure_linter import ecmametadatapass
 from closure_linter import errors
 from closure_linter import javascripttokens
@@ -25,7 +26,6 @@
 from closure_linter.common import error
 from closure_linter.common import position
 
-import gflags as flags
 
 flags.DEFINE_boolean('debug_indentation', False,
                      'Whether to print debugging information for indentation.')
@@ -89,7 +89,7 @@
     self.overridden_by = None
     self.is_permanent_override = False
     self.is_block = is_block
-    self.is_transient = not is_block and not token.type in (
+    self.is_transient = not is_block and token.type not in (
         Type.START_PAREN, Type.START_PARAMETERS)
     self.line_number = token.line_number
 
@@ -121,7 +121,7 @@
     if self._stack:
       old_stack = self._stack
       self._stack = []
-      raise Exception("INTERNAL ERROR: indentation stack is not empty: %r" %
+      raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
                       old_stack)
 
   def CheckToken(self, token, state):
@@ -155,7 +155,7 @@
       start_token = self._PopTo(Type.START_BLOCK)
       # Check for required goog.scope comment.
       if start_token:
-        goog_scope = self._GoogScopeOrNone(start_token.token)
+        goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
         if goog_scope is not None:
           if not token.line.endswith(';  // goog.scope\n'):
             if (token.line.find('//') > -1 and
@@ -181,21 +181,11 @@
     elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
       self._Add(self._PopTo(Type.START_BLOCK))
 
-    elif is_first and token.string == '.':
-      # This token should have been on the previous line, so treat it as if it
-      # was there.
-      info = TokenInfo(token)
-      info.line_number = token.line_number - 1
-      self._Add(info)
-
     elif token_type == Type.SEMICOLON:
       self._PopTransient()
 
-    not_binary_operator = (token_type != Type.OPERATOR or
-                           token.metadata.IsUnaryOperator())
-    not_dot = token.string != '.'
-    if is_first and not_binary_operator and not_dot and token.type not in (
-        Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT):
+    if (is_first and
+        token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
       if flags.FLAGS.debug_indentation:
         print 'Line #%d: stack %r' % (token.line_number, stack)
 
@@ -222,15 +212,15 @@
         indentation_errors.append([
             errors.WRONG_INDENTATION,
             'Wrong indentation: expected any of {%s} but got %d' % (
-                ', '.join(
-                    ['%d' % x for x in expected]), actual),
+                ', '.join('%d' % x for x in expected if x < 80), actual),
             token,
             Position(actual, expected[0])])
         self._start_index_offset[token.line_number] = expected[0] - actual
 
     # Add tokens that could increase indentation.
     if token_type == Type.START_BRACKET:
-      self._Add(TokenInfo(token=token,
+      self._Add(TokenInfo(
+          token=token,
           is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
 
     elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
@@ -253,12 +243,15 @@
     # Add some tokens only if they appear at the end of the line.
     is_last = self._IsLastCodeInLine(token)
     if is_last:
+      next_code_token = tokenutil.GetNextCodeToken(token)
+      # Increase required indentation if this is an overlong wrapped statement
+      # ending in an operator.
       if token_type == Type.OPERATOR:
         if token.string == ':':
-          if (stack and stack[-1].token.string == '?'):
+          if stack and stack[-1].token.string == '?':
             # When a ternary : is on a different line than its '?', it doesn't
             # add indentation.
-            if (token.line_number == stack[-1].token.line_number):
+            if token.line_number == stack[-1].token.line_number:
               self._Add(TokenInfo(token))
           elif token.metadata.context.type == Context.CASE_BLOCK:
             # Pop transient tokens from say, line continuations, e.g.,
@@ -273,7 +266,6 @@
             # When in an object literal, acts as operator indicating line
             # continuations.
             self._Add(TokenInfo(token))
-            pass
           else:
             # ':' might also be a statement label, no effect on indentation in
             # this case.
@@ -287,13 +279,16 @@
             self._Add(TokenInfo(token))
           elif token.metadata.context.type != Context.PARAMETERS:
             self._PopTransient()
-
-      elif (token.string.endswith('.')
-            and token_type in (Type.IDENTIFIER, Type.NORMAL)):
+      # Increase required indentation if this is the end of a statement that's
+      # continued with an operator on the next line (e.g. the '.').
+      elif (next_code_token and next_code_token.type == Type.OPERATOR and
+            not next_code_token.metadata.IsUnaryOperator()):
         self._Add(TokenInfo(token))
       elif token_type == Type.PARAMETERS and token.string.endswith(','):
         # Parameter lists.
         self._Add(TokenInfo(token))
+      elif token.IsKeyword('var'):
+        self._Add(TokenInfo(token))
       elif token.metadata.is_implied_semicolon:
         self._PopTransient()
     elif token.IsAssignment():
@@ -321,6 +316,12 @@
   def _IsHardStop(self, token):
     """Determines if the given token can have a hard stop after it.
 
+    Args:
+      token: token to examine
+
+    Returns:
+      Whether the token can have a hard stop after it.
+
     Hard stops are indentations defined by the position of another token as in
     indentation lined up with return, (, [, and ?.
     """
@@ -365,7 +366,15 @@
       # Handle hard stops after (, [, return, =, and ?
       if self._IsHardStop(token):
         override_is_hard_stop = (token_info.overridden_by and
-            self._IsHardStop(token_info.overridden_by.token))
+                                 self._IsHardStop(
+                                     token_info.overridden_by.token))
+        if token.type == Type.START_PAREN and token.previous:
+          # For someFunction(...) we allow to indent at the beginning of the
+          # identifier +4
+          prev = token.previous
+          if (prev.type == Type.IDENTIFIER and
+              prev.line_number == token.line_number):
+            hard_stops.add(prev.start_index + 4)
         if not override_is_hard_stop:
           start_index = token.start_index
           if token.line_number in self._start_index_offset:
@@ -377,7 +386,7 @@
           elif token.string == 'return' and not token_info.overridden_by:
             hard_stops.add(start_index + 7)
 
-          elif (token.type == Type.START_BRACKET):
+          elif token.type == Type.START_BRACKET:
             hard_stops.add(start_index + 1)
 
           elif token.IsAssignment():
@@ -447,26 +456,30 @@
       if token.type not in Type.NON_CODE_TYPES:
         return False
 
-  def _GoogScopeOrNone(self, token):
-    """Determines if the given START_BLOCK is part of a goog.scope statement.
+  def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
+    """Checks if tokens are (likely) a valid function property assignment.
 
     Args:
-      token: A token of type START_BLOCK.
+      start_token: Start of the token range.
+      end_token: End of the token range.
 
     Returns:
-      The goog.scope function call token, or None if such call doesn't exist.
+      True if all tokens between start_token and end_token are legal tokens
+      within a function declaration and assignment into a property.
     """
-    # Search for a goog.scope statement, which will be 5 tokens before the
-    # block. Illustration of the tokens found prior to the start block:
-    # goog.scope(function() {
-    #      5    4    3   21 ^
-
-    maybe_goog_scope = token
-    for unused_i in xrange(5):
-      maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
-                          maybe_goog_scope.previous else None)
-    if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
-      return maybe_goog_scope
+    for token in tokenutil.GetTokenRange(start_token, end_token):
+      fn_decl_tokens = (Type.FUNCTION_DECLARATION,
+                        Type.PARAMETERS,
+                        Type.START_PARAMETERS,
+                        Type.END_PARAMETERS,
+                        Type.END_PAREN)
+      if (token.type not in fn_decl_tokens and
+          token.IsCode() and
+          not tokenutil.IsIdentifierOrDot(token) and
+          not token.IsAssignment() and
+          not (token.type == Type.OPERATOR and token.string == ',')):
+        return False
+    return True
 
   def _Add(self, token_info):
     """Adds the given token info to the stack.
@@ -479,10 +492,35 @@
       return
 
     if token_info.is_block or token_info.token.type == Type.START_PAREN:
-      token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
-      index = 1
-      while index <= len(self._stack):
-        stack_info = self._stack[-index]
+      scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
+      token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
+
+      if (token_info.token.type == Type.START_BLOCK and
+          token_info.token.metadata.context.type == Context.BLOCK):
+        # Handle function() {} assignments: their block contents get special
+        # treatment and are allowed to just indent by two whitespace.
+        # For example
+        # long.long.name = function(
+        #     a) {
+        # In this case the { and the = are on different lines.  But the
+        # override should still apply for all previous stack tokens that are
+        # part of an assignment of a block.
+
+        has_assignment = any(x for x in self._stack if x.token.IsAssignment())
+        if has_assignment:
+          last_token = token_info.token.previous
+          for stack_info in reversed(self._stack):
+            if (last_token and
+                not self._AllFunctionPropertyAssignTokens(stack_info.token,
+                                                          last_token)):
+              break
+            stack_info.overridden_by = token_info
+            stack_info.is_permanent_override = True
+            last_token = stack_info.token
+
+      index = len(self._stack) - 1
+      while index >= 0:
+        stack_info = self._stack[index]
         stack_token = stack_info.token
 
         if stack_info.line_number == token_info.line_number:
@@ -497,24 +535,14 @@
             #   a: 10
             # },
             # 30);
+            # b/11450054. If a string is not closed properly then close_block
+            # could be null.
             close_block = token_info.token.metadata.context.end_token
-            stack_info.is_permanent_override = \
-                close_block.line_number != token_info.token.line_number
-        elif (token_info.token.type == Type.START_BLOCK and
-              token_info.token.metadata.context.type == Context.BLOCK and
-              (stack_token.IsAssignment() or
-               stack_token.type == Type.IDENTIFIER)):
-          # When starting a function block, the override can transcend lines.
-          # For example
-          # long.long.name = function(
-          #     a) {
-          # In this case the { and the = are on different lines.  But the
-          # override should still apply.
-          stack_info.overridden_by = token_info
-          stack_info.is_permanent_override = True
+            stack_info.is_permanent_override = close_block and (
+                close_block.line_number != token_info.token.line_number)
         else:
           break
-        index += 1
+        index -= 1
 
     self._stack.append(token_info)
 
diff --git a/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py b/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py
old mode 100755
new mode 100644
index 0a02a59..0b2d7cd
--- a/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py
+++ b/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2011 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,7 +24,7 @@
               'jacobr@google.com (Jacob Richman)')
 
 import re
-from sets import Set
+
 from closure_linter import ecmalintrules
 from closure_linter import error_check
 from closure_linter import errors
@@ -51,30 +50,19 @@
     ecmalintrules.EcmaScriptLintRules.__init__(self)
     self._namespaces_info = namespaces_info
     self._declared_private_member_tokens = {}
-    self._declared_private_members = Set()
-    self._used_private_members = Set()
+    self._declared_private_members = set()
+    self._used_private_members = set()
+    # A stack of dictionaries, one for each function scope entered. Each
+    # dictionary is keyed by an identifier that defines a local variable and has
+    # a token as its value.
+    self._unused_local_variables_by_scope = []
 
   def HandleMissingParameterDoc(self, token, param_name):
     """Handle errors associated with a parameter missing a param tag."""
     self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
                       'Missing docs for parameter: "%s"' % param_name, token)
 
-  def __ContainsRecordType(self, token):
-    """Check whether the given token contains a record type.
-
-    Args:
-      token: The token being checked
-
-    Returns:
-      True if the token contains a record type, False otherwise.
-    """
-    # If we see more than one left-brace in the string of an annotation token,
-    # then there's a record type in there.
-    return (
-        token and token.type == Type.DOC_FLAG and
-        token.attached_object.type is not None and
-        token.attached_object.type.find('{') != token.string.rfind('{'))
-
+  # pylint: disable=too-many-statements
   def CheckToken(self, token, state):
     """Checks a token, given the current parser_state, for warnings and errors.
 
@@ -82,11 +70,6 @@
       token: The current token under consideration
       state: parser_state object that indicates the current state in the page
     """
-    if self.__ContainsRecordType(token):
-      # We should bail out and not emit any warnings for this annotation.
-      # TODO(nicksantos): Support record types for real.
-      state.GetDocComment().Invalidate()
-      return
 
     # Call the base class's CheckToken function.
     super(JavaScriptLintRules, self).CheckToken(token, state)
@@ -94,21 +77,29 @@
     # Store some convenience variables
     namespaces_info = self._namespaces_info
 
+    if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
+      self._CheckUnusedLocalVariables(token, state)
+
     if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
       # Find all assignments to private members.
       if token.type == Type.SIMPLE_LVALUE:
         identifier = token.string
         if identifier.endswith('_') and not identifier.endswith('__'):
           doc_comment = state.GetDocComment()
-          suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
-                        doc_comment.GetFlag('suppress').type == 'underscore')
+          suppressed = doc_comment and (
+              'underscore' in doc_comment.suppressions or
+              'unusedPrivateMembers' in doc_comment.suppressions)
           if not suppressed:
             # Look for static members defined on a provided namespace.
-            namespace = namespaces_info.GetClosurizedNamespace(identifier)
-            provided_namespaces = namespaces_info.GetProvidedNamespaces()
+            if namespaces_info:
+              namespace = namespaces_info.GetClosurizedNamespace(identifier)
+              provided_namespaces = namespaces_info.GetProvidedNamespaces()
+            else:
+              namespace = None
+              provided_namespaces = set()
 
             # Skip cases of this.something_.somethingElse_.
-            regex = re.compile('^this\.[a-zA-Z_]+$')
+            regex = re.compile(r'^this\.[a-zA-Z_]+$')
             if namespace in provided_namespaces or regex.match(identifier):
               variable = identifier.split('.')[-1]
               self._declared_private_member_tokens[variable] = token
@@ -132,28 +123,41 @@
         self._CheckForMissingSpaceBeforeToken(
             token.attached_object.name_token)
 
-        if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and
-            flag.type is not None and flag.name is not None):
-          # Check for optional marker in type.
-          if (flag.type.endswith('=') and
-              not flag.name.startswith('opt_')):
-            self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
-                              'Optional parameter name %s must be prefixed '
-                              'with opt_.' % flag.name,
-                              token)
-          elif (not flag.type.endswith('=') and
-                flag.name.startswith('opt_')):
-            self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
-                              'Optional parameter %s type must end with =.' %
-                              flag.name,
-                              token)
+        if flag.type is not None and flag.name is not None:
+          if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
+            # Check for variable arguments marker in type.
+            if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
+              self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
+                                'Variable length argument %s must be renamed '
+                                'to var_args.' % flag.name,
+                                token)
+            elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
+              self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
+                                'Variable length argument %s type must start '
+                                'with \'...\'.' % flag.name,
+                                token)
+
+          if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
+            # Check for optional marker in type.
+            if (flag.jstype.opt_arg and
+                not flag.name.startswith('opt_')):
+              self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
+                                'Optional parameter name %s must be prefixed '
+                                'with opt_.' % flag.name,
+                                token)
+            elif (not flag.jstype.opt_arg and
+                  flag.name.startswith('opt_')):
+              self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
+                                'Optional parameter %s type must end with =.' %
+                                flag.name,
+                                token)
 
       if flag.flag_type in state.GetDocFlag().HAS_TYPE:
         # Check for both missing type token and empty type braces '{}'
-        # Missing suppress types are reported separately and we allow enums
-        # without types.
-        if (flag.flag_type not in ('suppress', 'enum') and
-            (not flag.type or flag.type.isspace())):
+        # Missing suppress types are reported separately and we allow enums,
+        # const, private, public and protected without types.
+        if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
+            and (not flag.jstype or flag.jstype.IsEmpty())):
           self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
                             'Missing type in %s tag' % token.string, token)
 
@@ -176,7 +180,7 @@
             errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
             'Single-quoted string preferred over double-quoted string.',
             token,
-            Position.All(token.string))
+            position=Position.All(token.string))
 
     elif token.type == Type.END_DOC_COMMENT:
       doc_comment = state.GetDocComment()
@@ -187,13 +191,19 @@
         self._SetLimitedDocChecks(True)
 
       if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
-          not self._is_html and state.InTopLevel() and not state.InBlock()):
+          not self._is_html and
+          state.InTopLevel() and
+          not state.InNonScopeBlock()):
 
         # Check if we're in a fileoverview or constructor JsDoc.
         is_constructor = (
             doc_comment.HasFlag('constructor') or
             doc_comment.HasFlag('interface'))
-        is_file_overview = doc_comment.HasFlag('fileoverview')
+        # @fileoverview is an optional tag so if the dosctring is the first
+        # token in the file treat it as a file level docstring.
+        is_file_level_comment = (
+            doc_comment.HasFlag('fileoverview') or
+            not doc_comment.start_token.previous)
 
         # If the comment is not a file overview, and it does not immediately
         # precede some code, skip it.
@@ -201,7 +211,8 @@
         # behavior at the top of a file.
         next_token = token.next
         if (not next_token or
-            (not is_file_overview and next_token.type in Type.NON_CODE_TYPES)):
+            (not is_file_level_comment and
+             next_token.type in Type.NON_CODE_TYPES)):
           return
 
         # Don't require extra blank lines around suppression of extra
@@ -214,7 +225,7 @@
         # Find the start of this block (include comments above the block, unless
         # this is a file overview).
         block_start = doc_comment.start_token
-        if not is_file_overview:
+        if not is_file_level_comment:
           token = block_start.previous
           while token and token.type in Type.COMMENT_TYPES:
             block_start = token
@@ -236,22 +247,25 @@
         error_message = False
         expected_blank_lines = 0
 
-        if is_file_overview and blank_lines == 0:
+        # Only need blank line before file overview if it is not the beginning
+        # of the file, e.g. copyright is first.
+        if is_file_level_comment and blank_lines == 0 and block_start.previous:
           error_message = 'Should have a blank line before a file overview.'
           expected_blank_lines = 1
         elif is_constructor and blank_lines != 3:
           error_message = (
               'Should have 3 blank lines before a constructor/interface.')
           expected_blank_lines = 3
-        elif not is_file_overview and not is_constructor and blank_lines != 2:
+        elif (not is_file_level_comment and not is_constructor and
+              blank_lines != 2):
           error_message = 'Should have 2 blank lines between top-level blocks.'
           expected_blank_lines = 2
 
         if error_message:
           self._HandleError(
               errors.WRONG_BLANK_LINE_COUNT, error_message,
-              block_start, Position.AtBeginning(),
-              expected_blank_lines - blank_lines)
+              block_start, position=Position.AtBeginning(),
+              fix_data=expected_blank_lines - blank_lines)
 
     elif token.type == Type.END_BLOCK:
       if state.InFunction() and state.IsFunctionClose():
@@ -269,37 +283,79 @@
             self._HandleError(
                 errors.MISSING_RETURN_DOCUMENTATION,
                 'Missing @return JsDoc in function with non-trivial return',
-                function.doc.end_token, Position.AtBeginning())
+                function.doc.end_token, position=Position.AtBeginning())
           elif (not function.has_return and
                 not function.has_throw and
                 function.doc and
                 function.doc.HasFlag('return') and
                 not state.InInterfaceMethod()):
-            return_flag = function.doc.GetFlag('return')
-            if (return_flag.type is None or (
-                'undefined' not in return_flag.type and
-                'void' not in return_flag.type and
-                '*' not in return_flag.type)):
+            flag = function.doc.GetFlag('return')
+            valid_no_return_names = ['undefined', 'void', '*']
+            invalid_return = flag.jstype is None or not any(
+                sub_type.identifier in valid_no_return_names
+                for sub_type in flag.jstype.IterTypeGroup())
+
+            if invalid_return:
               self._HandleError(
                   errors.UNNECESSARY_RETURN_DOCUMENTATION,
                   'Found @return JsDoc on function that returns nothing',
-                  return_flag.flag_token, Position.AtBeginning())
+                  flag.flag_token, position=Position.AtBeginning())
 
-      if state.InFunction() and state.IsFunctionClose():
-        is_immediately_called = (token.next and
-                                 token.next.type == Type.START_PAREN)
+        # b/4073735. Method in object literal definition of prototype can
+        # safely reference 'this'.
+        prototype_object_literal = False
+        block_start = None
+        previous_code = None
+        previous_previous_code = None
+
+        # Search for cases where prototype is defined as object literal.
+        #       previous_previous_code
+        #       |       previous_code
+        #       |       | block_start
+        #       |       | |
+        # a.b.prototype = {
+        #   c : function() {
+        #     this.d = 1;
+        #   }
+        # }
+
+        # If in object literal, find first token of block so to find previous
+        # tokens to check above condition.
+        if state.InObjectLiteral():
+          block_start = state.GetCurrentBlockStart()
+
+        # If an object literal then get previous token (code type). For above
+        # case it should be '='.
+        if block_start:
+          previous_code = tokenutil.SearchExcept(block_start,
+                                                 Type.NON_CODE_TYPES,
+                                                 reverse=True)
+
+        # If previous token to block is '=' then get its previous token.
+        if previous_code and previous_code.IsOperator('='):
+          previous_previous_code = tokenutil.SearchExcept(previous_code,
+                                                          Type.NON_CODE_TYPES,
+                                                          reverse=True)
+
+        # If variable/token before '=' ends with '.prototype' then its above
+        # case of prototype defined with object literal.
+        prototype_object_literal = (previous_previous_code and
+                                    previous_previous_code.string.endswith(
+                                        '.prototype'))
+
         if (function.has_this and function.doc and
             not function.doc.HasFlag('this') and
             not function.is_constructor and
             not function.is_interface and
-            '.prototype.' not in function.name):
+            '.prototype.' not in function.name and
+            not prototype_object_literal):
           self._HandleError(
               errors.MISSING_JSDOC_TAG_THIS,
               'Missing @this JsDoc in function referencing "this". ('
               'this usually means you are trying to reference "this" in '
               'a static function, or you have forgotten to mark a '
               'constructor with @constructor)',
-              function.doc.end_token, Position.AtBeginning())
+              function.doc.end_token, position=Position.AtBeginning())
 
     elif token.type == Type.IDENTIFIER:
       if token.string == 'goog.inherits' and not state.InFunction():
@@ -308,7 +364,7 @@
               errors.MISSING_LINE,
               'Missing newline between constructor and goog.inherits',
               token,
-              Position.AtBeginning())
+              position=Position.AtBeginning())
 
         extra_space = state.GetLastNonSpaceToken().next
         while extra_space != token:
@@ -325,13 +381,23 @@
       elif (token.string == 'goog.provide' and
             not state.InFunction() and
             namespaces_info is not None):
-        namespace = tokenutil.Search(token, Type.STRING_TEXT).string
+        namespace = tokenutil.GetStringAfterToken(token)
 
         # Report extra goog.provide statement.
-        if namespaces_info.IsExtraProvide(token):
+        if not namespace or namespaces_info.IsExtraProvide(token):
+          if not namespace:
+            msg = 'Empty namespace in goog.provide'
+          else:
+            msg = 'Unnecessary goog.provide: ' +  namespace
+
+            # Hint to user if this is a Test namespace.
+            if namespace.endswith('Test'):
+              msg += (' *Test namespaces must be mentioned in the '
+                      'goog.setTestOnly() call')
+
           self._HandleError(
               errors.EXTRA_GOOG_PROVIDE,
-              'Unnecessary goog.provide: ' + namespace,
+              msg,
               token, position=Position.AtBeginning())
 
         if namespaces_info.IsLastProvide(token):
@@ -346,17 +412,20 @@
           # If there are no require statements, missing requires should be
           # reported after the last provide.
           if not namespaces_info.GetRequiredNamespaces():
-            missing_requires = namespaces_info.GetMissingRequires()
+            missing_requires, illegal_alias_statements = (
+                namespaces_info.GetMissingRequires())
             if missing_requires:
               self._ReportMissingRequires(
                   missing_requires,
                   tokenutil.GetLastTokenInSameLine(token).next,
                   True)
+            if illegal_alias_statements:
+              self._ReportIllegalAliasStatement(illegal_alias_statements)
 
       elif (token.string == 'goog.require' and
             not state.InFunction() and
             namespaces_info is not None):
-        namespace = tokenutil.Search(token, Type.STRING_TEXT).string
+        namespace = tokenutil.GetStringAfterToken(token)
 
         # If there are no provide statements, missing provides should be
         # reported before the first require.
@@ -370,20 +439,28 @@
                 True)
 
         # Report extra goog.require statement.
-        if namespaces_info.IsExtraRequire(token):
+        if not namespace or namespaces_info.IsExtraRequire(token):
+          if not namespace:
+            msg = 'Empty namespace in goog.require'
+          else:
+            msg = 'Unnecessary goog.require: ' + namespace
+
           self._HandleError(
               errors.EXTRA_GOOG_REQUIRE,
-              'Unnecessary goog.require: ' + namespace,
+              msg,
               token, position=Position.AtBeginning())
 
         # Report missing goog.require statements.
         if namespaces_info.IsLastRequire(token):
-          missing_requires = namespaces_info.GetMissingRequires()
+          missing_requires, illegal_alias_statements = (
+              namespaces_info.GetMissingRequires())
           if missing_requires:
             self._ReportMissingRequires(
                 missing_requires,
                 tokenutil.GetLastTokenInSameLine(token).next,
                 False)
+          if illegal_alias_statements:
+            self._ReportIllegalAliasStatement(illegal_alias_statements)
 
     elif token.type == Type.OPERATOR:
       last_in_line = token.IsLastInLine()
@@ -395,14 +472,15 @@
       if (not token.metadata.IsUnaryOperator() and not last_in_line
           and not token.next.IsComment()
           and not token.next.IsOperator(',')
-          and not token.next.type in (Type.WHITESPACE, Type.END_PAREN,
+          and not tokenutil.IsDot(token)
+          and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
                                       Type.END_BRACKET, Type.SEMICOLON,
                                       Type.START_BRACKET)):
         self._HandleError(
             errors.MISSING_SPACE,
             'Missing space after "%s"' % token.string,
             token,
-            Position.AtEnd(token.string))
+            position=Position.AtEnd(token.string))
     elif token.type == Type.WHITESPACE:
       first_in_line = token.IsFirstInLine()
       last_in_line = token.IsLastInLine()
@@ -417,52 +495,183 @@
               errors.EXTRA_SPACE,
               'Extra space after "%s"' % token.previous.string,
               token,
-              Position.All(token.string))
+              position=Position.All(token.string))
+    elif token.type == Type.SEMICOLON:
+      previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
+                                              reverse=True)
+      if not previous_token:
+        self._HandleError(
+            errors.REDUNDANT_SEMICOLON,
+            'Semicolon without any statement',
+            token,
+            position=Position.AtEnd(token.string))
+      elif (previous_token.type == Type.KEYWORD and
+            previous_token.string not in ['break', 'continue', 'return']):
+        self._HandleError(
+            errors.REDUNDANT_SEMICOLON,
+            ('Semicolon after \'%s\' without any statement.'
+             ' Looks like an error.' % previous_token.string),
+            token,
+            position=Position.AtEnd(token.string))
+
+  def _CheckUnusedLocalVariables(self, token, state):
+    """Checks for unused local variables in function blocks.
+
+    Args:
+      token: The token to check.
+      state: The state tracker.
+    """
+    # We don't use state.InFunction because that disregards scope functions.
+    in_function = state.FunctionDepth() > 0
+    if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
+      if in_function:
+        identifier = token.string
+        # Check whether the previous token was var.
+        previous_code_token = tokenutil.CustomSearch(
+            token,
+            lambda t: t.type not in Type.NON_CODE_TYPES,
+            reverse=True)
+        if previous_code_token and previous_code_token.IsKeyword('var'):
+          # Add local variable declaration to the top of the unused locals
+          # stack.
+          self._unused_local_variables_by_scope[-1][identifier] = token
+        elif token.type == Type.IDENTIFIER:
+          # This covers most cases where the variable is used as an identifier.
+          self._MarkLocalVariableUsed(token.string)
+        elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
+          # This covers cases where a value is assigned to a property of the
+          # variable.
+          self._MarkLocalVariableUsed(token.string)
+    elif token.type == Type.START_BLOCK:
+      if in_function and state.IsFunctionOpen():
+        # Push a new map onto the stack
+        self._unused_local_variables_by_scope.append({})
+    elif token.type == Type.END_BLOCK:
+      if state.IsFunctionClose():
+        # Pop the stack and report any remaining locals as unused.
+        unused_local_variables = self._unused_local_variables_by_scope.pop()
+        for unused_token in unused_local_variables.values():
+          self._HandleError(
+              errors.UNUSED_LOCAL_VARIABLE,
+              'Unused local variable: %s.' % unused_token.string,
+              unused_token)
+    elif token.type == Type.DOC_FLAG:
+      # Flags that use aliased symbols should be counted.
+      flag = token.attached_object
+      js_type = flag and flag.jstype
+      if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
+        self._MarkAliasUsed(js_type)
+
+  def _MarkAliasUsed(self, js_type):
+    """Marks aliases in a type as used.
+
+    Recursively iterates over all subtypes in a jsdoc type annotation and
+    tracks usage of aliased symbols (which may be local variables).
+    Marks the local variable as used in the scope nearest to the current
+    scope that matches the given token.
+
+    Args:
+      js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
+    """
+    if js_type.alias:
+      self._MarkLocalVariableUsed(js_type.identifier)
+    for sub_type in js_type.IterTypes():
+      self._MarkAliasUsed(sub_type)
+
+  def _MarkLocalVariableUsed(self, identifier):
+    """Marks the local variable as used in the relevant scope.
+
+    Marks the local variable in the scope nearest to the current scope that
+    matches the given identifier as used.
+
+    Args:
+      identifier: The identifier representing the potential usage of a local
+                  variable.
+    """
+    identifier = identifier.split('.', 1)[0]
+    # Find the first instance of the identifier in the stack of function scopes
+    # and mark it used.
+    for unused_local_variables in reversed(
+        self._unused_local_variables_by_scope):
+      if identifier in unused_local_variables:
+        del unused_local_variables[identifier]
+        break
 
   def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
     """Reports missing provide statements to the error handler.
 
     Args:
-      missing_provides: A list of strings where each string is a namespace that
-          should be provided, but is not.
+      missing_provides: A dictionary of string(key) and integer(value) where
+          each string(key) is a namespace that should be provided, but is not
+          and integer(value) is first line number where it's required.
       token: The token where the error was detected (also where the new provides
           will be inserted.
       need_blank_line: Whether a blank line needs to be inserted after the new
           provides are inserted. May be True, False, or None, where None
           indicates that the insert location is unknown.
     """
+
+    missing_provides_msg = 'Missing the following goog.provide statements:\n'
+    missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
+                                       sorted(missing_provides)])
+    missing_provides_msg += '\n'
+
+    missing_provides_msg += '\nFirst line where provided: \n'
+    missing_provides_msg += '\n'.join(
+        ['  %s : line %d' % (x, missing_provides[x]) for x in
+         sorted(missing_provides)])
+    missing_provides_msg += '\n'
+
     self._HandleError(
         errors.MISSING_GOOG_PROVIDE,
-        'Missing the following goog.provide statements:\n' +
-        '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
-                      sorted(missing_provides))),
+        missing_provides_msg,
         token, position=Position.AtBeginning(),
-        fix_data=(missing_provides, need_blank_line))
+        fix_data=(missing_provides.keys(), need_blank_line))
 
   def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
     """Reports missing require statements to the error handler.
 
     Args:
-      missing_requires: A list of strings where each string is a namespace that
-          should be required, but is not.
+      missing_requires: A dictionary of string(key) and integer(value) where
+          each string(key) is a namespace that should be required, but is not
+          and integer(value) is first line number where it's required.
       token: The token where the error was detected (also where the new requires
           will be inserted.
       need_blank_line: Whether a blank line needs to be inserted before the new
           requires are inserted. May be True, False, or None, where None
           indicates that the insert location is unknown.
     """
+
+    missing_requires_msg = 'Missing the following goog.require statements:\n'
+    missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
+                                       sorted(missing_requires)])
+    missing_requires_msg += '\n'
+
+    missing_requires_msg += '\nFirst line where required: \n'
+    missing_requires_msg += '\n'.join(
+        ['  %s : line %d' % (x, missing_requires[x]) for x in
+         sorted(missing_requires)])
+    missing_requires_msg += '\n'
+
     self._HandleError(
         errors.MISSING_GOOG_REQUIRE,
-        'Missing the following goog.require statements:\n' +
-        '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
-                      sorted(missing_requires))),
+        missing_requires_msg,
         token, position=Position.AtBeginning(),
-        fix_data=(missing_requires, need_blank_line))
+        fix_data=(missing_requires.keys(), need_blank_line))
 
-  def Finalize(self, state, tokenizer_mode):
+  def _ReportIllegalAliasStatement(self, illegal_alias_statements):
+    """Reports alias statements that would need a goog.require."""
+    for namespace, token in illegal_alias_statements.iteritems():
+      self._HandleError(
+          errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+          'The alias definition would need the namespace \'%s\' which is not '
+          'required through any other symbol.' % namespace,
+          token, position=Position.AtBeginning())
+
+  def Finalize(self, state):
     """Perform all checks that need to occur after all lines are processed."""
     # Call the base class's Finalize function.
-    super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
+    super(JavaScriptLintRules, self).Finalize(state)
 
     if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
       # Report an error for any declared private member that was never used.
@@ -477,8 +686,8 @@
 
       # Clear state to prepare for the next file.
       self._declared_private_member_tokens = {}
-      self._declared_private_members = Set()
-      self._used_private_members = Set()
+      self._declared_private_members = set()
+      self._used_private_members = set()
 
     namespaces_info = self._namespaces_info
     if namespaces_info is not None:
@@ -491,10 +700,12 @@
           self._ReportMissingProvides(
               missing_provides, state.GetFirstToken(), None)
 
-        missing_requires = namespaces_info.GetMissingRequires()
+        missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
         if missing_requires:
           self._ReportMissingRequires(
               missing_requires, state.GetFirstToken(), None)
+        if illegal_alias:
+          self._ReportIllegalAliasStatement(illegal_alias)
 
     self._CheckSortedRequiresProvides(state.GetFirstToken())
 
@@ -508,32 +719,38 @@
       token: The first token in the token stream.
     """
     sorter = requireprovidesorter.RequireProvideSorter()
-    provides_result = sorter.CheckProvides(token)
-    if provides_result:
+    first_provide_token = sorter.CheckProvides(token)
+    if first_provide_token:
+      new_order = sorter.GetFixedProvideString(first_provide_token)
       self._HandleError(
           errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
           'goog.provide classes must be alphabetized.  The correct code is:\n' +
-          '\n'.join(
-              map(lambda x: 'goog.provide(\'%s\');' % x, provides_result[1])),
-          provides_result[0],
+          new_order,
+          first_provide_token,
           position=Position.AtBeginning(),
-          fix_data=provides_result[0])
+          fix_data=first_provide_token)
 
-    requires_result = sorter.CheckRequires(token)
-    if requires_result:
+    first_require_token = sorter.CheckRequires(token)
+    if first_require_token:
+      new_order = sorter.GetFixedRequireString(first_require_token)
       self._HandleError(
           errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
           'goog.require classes must be alphabetized.  The correct code is:\n' +
-          '\n'.join(
-              map(lambda x: 'goog.require(\'%s\');' % x, requires_result[1])),
-          requires_result[0],
+          new_order,
+          first_require_token,
           position=Position.AtBeginning(),
-          fix_data=requires_result[0])
+          fix_data=first_require_token)
 
   def GetLongLineExceptions(self):
-    """Gets a list of regexps for lines which can be longer than the limit."""
+    """Gets a list of regexps for lines which can be longer than the limit.
+
+    Returns:
+      A list of regexps, used as matches (rather than searches).
+    """
     return [
-        re.compile('.*// @suppress longLineCheck$'),
-        re.compile('goog\.require\(.+\);?\s*$'),
-        re.compile('goog\.provide\(.+\);?\s*$')
+        re.compile(r'.*// @suppress longLineCheck$'),
+        re.compile(r'((var|let|const) .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
+        re.compile(r'goog\.(forwardDeclare|module|provide|setTestOnly)'
+                   r'\(.+\);?\s*$'),
+        re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
         ]
diff --git a/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker.py b/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker.py
old mode 100755
new mode 100644
index 2ce5c02..e0a42f6
--- a/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker.py
+++ b/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-#
 # Copyright 2008 The Closure Linter Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,7 +35,8 @@
       including braces.
     type_end_token: The last token specifying the flag JS type,
       including braces.
-    type: The JavaScript type spec.
+    type: The type spec string.
+    jstype: The type spec, a TypeAnnotation instance.
     name_token: The token specifying the flag name.
     name: The flag name
     description_start_token: The first token in the description.
@@ -50,18 +50,10 @@
   # TODO(robbyw): determine which of these, if any, should be illegal.
   EXTENDED_DOC = frozenset([
       'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
-      'meaning', 'protected', 'notypecheck', 'throws'])
+      'meaning', 'provideGoog', 'throws'])
 
   LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
 
-  def __init__(self, flag_token):
-    """Creates the JsDocFlag object and attaches it to the given start token.
-
-    Args:
-      flag_token: The starting token of the flag.
-    """
-    statetracker.DocFlag.__init__(self, flag_token)
-
 
 class JavaScriptStateTracker(statetracker.StateTracker):
   """JavaScript state tracker.
@@ -74,6 +66,11 @@
     """Initializes a JavaScript token stream state tracker."""
     statetracker.StateTracker.__init__(self, JsDocFlag)
 
+  def Reset(self):
+    self._scope_depth = 0
+    self._block_stack = []
+    super(JavaScriptStateTracker, self).Reset()
+
   def InTopLevel(self):
     """Compute whether we are at the top level in the class.
 
@@ -85,7 +82,26 @@
     Returns:
       Whether we are at the top level in the class.
     """
-    return not self.InParentheses()
+    return self._scope_depth == self.ParenthesesDepth()
+
+  def InFunction(self):
+    """Returns true if the current token is within a function.
+
+    This js-specific override ignores goog.scope functions.
+
+    Returns:
+      True if the current token is within a function.
+    """
+    return self._scope_depth != self.FunctionDepth()
+
+  def InNonScopeBlock(self):
+    """Compute whether we are nested within a non-goog.scope block.
+
+    Returns:
+      True if the token is not enclosed in a block that does not originate from
+      a goog.scope statement. False otherwise.
+    """
+    return self._scope_depth != self.BlockDepth()
 
   def GetBlockType(self, token):
     """Determine the block type given a START_BLOCK token.
@@ -97,20 +113,38 @@
     Returns:
       Code block type for current token.
     """
-    last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
-                                       True)
+    last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
     if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
                           Type.KEYWORD) and not last_code.IsKeyword('return'):
       return self.CODE
     else:
       return self.OBJECT_LITERAL
 
+  def GetCurrentBlockStart(self):
+    """Gets the start token of current block.
+
+    Returns:
+      Starting token of current block. None if not in block.
+    """
+    if self._block_stack:
+      return self._block_stack[-1]
+    else:
+      return None
+
   def HandleToken(self, token, last_non_space_token):
     """Handles the given token and updates state.
 
     Args:
       token: The token to handle.
-      last_non_space_token:
+      last_non_space_token: The last non space token encountered
     """
+    if token.type == Type.START_BLOCK:
+      self._block_stack.append(token)
+    if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
+      self._scope_depth += 1
+    if token.type == Type.END_BLOCK:
+      start_token = self._block_stack.pop()
+      if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
+        self._scope_depth -= 1
     super(JavaScriptStateTracker, self).HandleToken(token,
                                                     last_non_space_token)
diff --git a/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker_test.py b/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker_test.py
new file mode 100644
index 0000000..76dabd2
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/javascriptstatetracker_test.py
@@ -0,0 +1,278 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the javascriptstatetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+import unittest as googletest
+
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+_FUNCTION_SCRIPT = """\
+var a = 3;
+
+function foo(aaa, bbb, ccc) {
+  var b = 4;
+}
+
+
+/**
+ * JSDoc comment.
+ */
+var bar = function(ddd, eee, fff) {
+
+};
+
+
+/**
+ * Verify that nested functions get their proper parameters recorded.
+ */
+var baz = function(ggg, hhh, iii) {
+  var qux = function(jjj, kkk, lll) {
+  };
+  // make sure that entering a new block does not change baz' parameters.
+  {};
+};
+
+"""
+
+
+class FunctionTest(googletest.TestCase):
+
+  def testFunctionParse(self):
+    functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
+    self.assertEquals(4, len(functions))
+
+    # First function
+    function = functions[0]
+    self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
+
+    start_token = function.start_token
+    end_token = function.end_token
+
+    self.assertEquals(
+        javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+        function.start_token.type)
+
+    self.assertEquals('function', start_token.string)
+    self.assertEquals(3, start_token.line_number)
+    self.assertEquals(0, start_token.start_index)
+
+    self.assertEquals('}', end_token.string)
+    self.assertEquals(5, end_token.line_number)
+    self.assertEquals(0, end_token.start_index)
+
+    self.assertEquals('foo', function.name)
+
+    self.assertIsNone(function.doc)
+
+    # Second function
+    function = functions[1]
+    self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
+
+    start_token = function.start_token
+    end_token = function.end_token
+
+    self.assertEquals(
+        javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+        function.start_token.type)
+
+    self.assertEquals('function', start_token.string)
+    self.assertEquals(11, start_token.line_number)
+    self.assertEquals(10, start_token.start_index)
+
+    self.assertEquals('}', end_token.string)
+    self.assertEquals(13, end_token.line_number)
+    self.assertEquals(0, end_token.start_index)
+
+    self.assertEquals('bar', function.name)
+
+    self.assertIsNotNone(function.doc)
+
+    # Check function JSDoc
+    doc = function.doc
+    doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
+
+    comment_type = javascripttokens.JavaScriptTokenType.COMMENT
+    comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
+
+    self.assertEquals('JSDoc comment.',
+                      tokenutil.TokensToString(comment_tokens).strip())
+
+    # Third function
+    function = functions[2]
+    self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
+
+    start_token = function.start_token
+    end_token = function.end_token
+
+    self.assertEquals(
+        javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+        function.start_token.type)
+
+    self.assertEquals('function', start_token.string)
+    self.assertEquals(19, start_token.line_number)
+    self.assertEquals(10, start_token.start_index)
+
+    self.assertEquals('}', end_token.string)
+    self.assertEquals(24, end_token.line_number)
+    self.assertEquals(0, end_token.start_index)
+
+    self.assertEquals('baz', function.name)
+    self.assertIsNotNone(function.doc)
+
+    # Fourth function (inside third function)
+    function = functions[3]
+    self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
+
+    start_token = function.start_token
+    end_token = function.end_token
+
+    self.assertEquals(
+        javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+        function.start_token.type)
+
+    self.assertEquals('function', start_token.string)
+    self.assertEquals(20, start_token.line_number)
+    self.assertEquals(12, start_token.start_index)
+
+    self.assertEquals('}', end_token.string)
+    self.assertEquals(21, end_token.line_number)
+    self.assertEquals(2, end_token.start_index)
+
+    self.assertEquals('qux', function.name)
+    self.assertIsNone(function.doc)
+
+
+
+class CommentTest(googletest.TestCase):
+
+  def testGetDescription(self):
+    comment = self._ParseComment("""
+        /**
+         * Comment targeting goog.foo.
+         *
+         * This is the second line.
+         * @param {number} foo The count of foo.
+         */
+        target;""")
+
+    self.assertEqual(
+        'Comment targeting goog.foo.\n\nThis is the second line.',
+        comment.description)
+
+  def testCommentGetTarget(self):
+    self.assertCommentTarget('goog.foo', """
+        /**
+         * Comment targeting goog.foo.
+         */
+        goog.foo = 6;
+        """)
+
+    self.assertCommentTarget('bar', """
+        /**
+         * Comment targeting bar.
+         */
+        var bar = "Karate!";
+        """)
+
+    self.assertCommentTarget('doThing', """
+        /**
+         * Comment targeting doThing.
+         */
+        function doThing() {};
+        """)
+
+    self.assertCommentTarget('this.targetProperty', """
+        goog.bar.Baz = function() {
+          /**
+           * Comment targeting targetProperty.
+           */
+          this.targetProperty = 3;
+        };
+        """)
+
+    self.assertCommentTarget('goog.bar.prop', """
+        /**
+         * Comment targeting goog.bar.prop.
+         */
+        goog.bar.prop;
+        """)
+
+    self.assertCommentTarget('goog.aaa.bbb', """
+        /**
+         * Comment targeting goog.aaa.bbb.
+         */
+        (goog.aaa.bbb)
+        """)
+
+    self.assertCommentTarget('theTarget', """
+        /**
+         * Comment targeting symbol preceded by newlines, whitespace,
+         * and parens -- things we ignore.
+         */
+        (theTarget)
+        """)
+
+    self.assertCommentTarget(None, """
+        /**
+         * @fileoverview File overview.
+         */
+        (notATarget)
+        """)
+
+    self.assertCommentTarget(None, """
+        /**
+         * Comment that doesn't find a target.
+         */
+        """)
+
+    self.assertCommentTarget('theTarget.is.split.across.lines', """
+        /**
+         * Comment that addresses a symbol split across lines.
+         */
+        (theTarget.is.split
+             .across.lines)
+        """)
+
+    self.assertCommentTarget('theTarget.is.split.across.lines', """
+        /**
+         * Comment that addresses a symbol split across lines.
+         */
+        (theTarget.is.split.
+                across.lines)
+        """)
+
+  def _ParseComment(self, script):
+    """Parse a script that contains one comment and return it."""
+    _, comments = testutil.ParseFunctionsAndComments(script)
+    self.assertEquals(1, len(comments))
+    return comments[0]
+
+  def assertCommentTarget(self, target, script):
+    comment = self._ParseComment(script)
+    self.assertEquals(target, comment.GetTargetIdentifier())
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/javascripttokenizer.py b/catapult/third_party/closure_linter/closure_linter/javascripttokenizer.py
old mode 100755
new mode 100644
index 98f9184..4c6f3a1
--- a/catapult/third_party/closure_linter/closure_linter/javascripttokenizer.py
+++ b/catapult/third_party/closure_linter/closure_linter/javascripttokenizer.py
@@ -51,7 +51,7 @@
   """
 
   # Useful patterns for JavaScript parsing.
-  IDENTIFIER_CHAR = r'A-Za-z0-9_$.'
+  IDENTIFIER_CHAR = r'A-Za-z0-9_$'
 
   # Number patterns based on:
   # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
@@ -92,6 +92,9 @@
   # like in email addresses in the @author tag.
   DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
   DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
+  # Match anything that is allowed in a type definition, except for tokens
+  # needed to parse it (and the lookahead assertion for "*/").
+  DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
 
   # Match the prefix ' * ' that starts every line of jsdoc. Want to include
   # spaces after the '*', but nothing else that occurs after a '*', and don't
@@ -141,13 +144,25 @@
   #   delete, in, instanceof, new, typeof - included as operators.
   #   this - included in identifiers.
   #   null, undefined - not included, should go in some "special constant" list.
-  KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else',
-      'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var',
-      'while', 'with']
-  # Match a keyword string followed by a non-identifier character in order to
-  # not match something like doSomething as do + Something.
-  KEYWORD = re.compile('(%s)((?=[^%s])|$)' % (
-      '|'.join(KEYWORD_LIST), IDENTIFIER_CHAR))
+  KEYWORD_LIST = [
+      'break',
+      'case',
+      'catch',
+      'continue',
+      'default',
+      'do',
+      'else',
+      'finally',
+      'for',
+      'if',
+      'return',
+      'switch',
+      'throw',
+      'try',
+      'var',
+      'while',
+      'with',
+  ]
 
   # List of regular expressions to match as operators.  Some notes: for our
   # purposes, the comma behaves similarly enough to a normal operator that we
@@ -155,19 +170,62 @@
   # characters - this may not match some very esoteric uses of the in operator.
   # Operators that are subsets of larger operators must come later in this list
   # for proper matching, e.g., '>>' must come AFTER '>>>'.
-  OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=',
-                   '!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+',
-                   '--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%',
-                   '&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?',
-                   r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b',
-                   r'\btypeof\b', r'\bvoid\b']
+  OPERATOR_LIST = [
+      ',',
+      r'\+\+',
+      '===',
+      '!==',
+      '>>>=',
+      '>>>',
+      '==',
+      '>=',
+      '<=',
+      '!=',
+      '<<=',
+      '>>=',
+      '<<',
+      '>>',
+      '=>',
+      '>',
+      '<',
+      r'\+=',
+      r'\+',
+      '--',
+      r'\^=',
+      '-=',
+      '-',
+      '/=',
+      '/',
+      r'\*=',
+      r'\*',
+      '%=',
+      '%',
+      '&&',
+      r'\|\|',
+      '&=',
+      '&',
+      r'\|=',
+      r'\|',
+      '=',
+      '!',
+      ':',
+      r'\?',
+      r'\^',
+      r'\bdelete\b',
+      r'\bin\b',
+      r'\binstanceof\b',
+      r'\bnew\b',
+      r'\btypeof\b',
+      r'\bvoid\b',
+      r'\.',
+  ]
   OPERATOR = re.compile('|'.join(OPERATOR_LIST))
 
   WHITESPACE = re.compile(r'\s+')
   SEMICOLON = re.compile(r';')
   # Technically JavaScript identifiers can't contain '.', but we treat a set of
-  # nested identifiers as a single identifier.
-  NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR
+  # nested identifiers as a single identifier, except for trailing dots.
+  NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
   IDENTIFIER = re.compile(NESTED_IDENTIFIER)
 
   SIMPLE_LVALUE = re.compile(r"""
@@ -181,13 +239,35 @@
   # beginning of the line, after whitespace, or after a '{'.  The look-behind
   # check is necessary to not match someone@google.com as a flag.
   DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
-  # To properly parse parameter names, we need to tokenize whitespace into a
-  # token.
-  DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P<name>%s)\b' %
-                                     '|'.join(['param']))
+  # To properly parse parameter names and complex doctypes containing
+  # whitespace, we need to tokenize whitespace into a token after certain
+  # doctags. All statetracker.HAS_TYPE that are not listed here must not contain
+  # any whitespace in their types.
+  DOC_FLAG_LEX_SPACES = re.compile(
+      r'(^|(?<=\s))@(?P<name>%s)\b' %
+      '|'.join([
+          'const',
+          'enum',
+          'export',
+          'extends',
+          'final',
+          'implements',
+          'package',
+          'param',
+          'private',
+          'protected',
+          'public',
+          'return',
+          'type',
+          'typedef'
+      ]))
 
   DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
 
+  DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
+  DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
+  DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
+
   # Star followed by non-slash, i.e a star that does not end a comment.
   # This is used for TYPE_GROUP below.
   SAFE_STAR = r'(\*(?!/))'
@@ -208,136 +288,158 @@
       # Tokenize braces so we can find types.
       Matcher(START_BLOCK, Type.DOC_START_BRACE),
       Matcher(END_BLOCK, Type.DOC_END_BRACE),
+
+      # And some more to parse types.
+      Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
+      Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
+
+      Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
+      Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
+
       Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
 
-
-  # The token matcher groups work as follows: it is an list of  Matcher objects.
-  # The matchers will be tried in this order, and the first to match will be
-  # returned.  Hence the order is important because the matchers that come first
-  # overrule the matchers that come later.
-  JAVASCRIPT_MATCHERS = {
-      # Matchers for basic text mode.
-      JavaScriptModes.TEXT_MODE: [
-        # Check a big group - strings, starting comments, and regexes - all
-        # of which could be intertwined.  'string with /regex/',
-        # /regex with 'string'/, /* comment with /regex/ and string */ (and so
-        # on)
-        Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
-                JavaScriptModes.DOC_COMMENT_MODE),
-        Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
-                JavaScriptModes.BLOCK_COMMENT_MODE),
-        Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
-                Type.START_SINGLE_LINE_COMMENT),
-        Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
-                JavaScriptModes.LINE_COMMENT_MODE),
-        Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
-                JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
-        Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
-                JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
-        Matcher(REGEX, Type.REGEX),
-
-        # Next we check for start blocks appearing outside any of the items
-        # above.
-        Matcher(START_BLOCK, Type.START_BLOCK),
-        Matcher(END_BLOCK, Type.END_BLOCK),
-
-        # Then we search for function declarations.
-        Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
-                JavaScriptModes.FUNCTION_MODE),
-
-        # Next, we convert non-function related parens to tokens.
-        Matcher(OPENING_PAREN, Type.START_PAREN),
-        Matcher(CLOSING_PAREN, Type.END_PAREN),
-
-        # Next, we convert brackets to tokens.
-        Matcher(OPENING_BRACKET, Type.START_BRACKET),
-        Matcher(CLOSING_BRACKET, Type.END_BRACKET),
-
-        # Find numbers.  This has to happen before operators because scientific
-        # notation numbers can have + and - in them.
-        Matcher(NUMBER, Type.NUMBER),
-
-        # Find operators and simple assignments
-        Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
-        Matcher(OPERATOR, Type.OPERATOR),
-
-        # Find key words and whitespace.
-        Matcher(KEYWORD, Type.KEYWORD),
-        Matcher(WHITESPACE, Type.WHITESPACE),
-
-        # Find identifiers.
-        Matcher(IDENTIFIER, Type.IDENTIFIER),
-
-        # Finally, we convert semicolons to tokens.
-        Matcher(SEMICOLON, Type.SEMICOLON)],
-
-      # Matchers for single quote strings.
-      JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
-          Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
-          Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
-              JavaScriptModes.TEXT_MODE)],
-
-      # Matchers for double quote strings.
-      JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
-          Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
-          Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
-              JavaScriptModes.TEXT_MODE)],
-
-      # Matchers for block comments.
-      JavaScriptModes.BLOCK_COMMENT_MODE: [
-        # First we check for exiting a block comment.
-        Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
-                JavaScriptModes.TEXT_MODE),
-
-        # Match non-comment-ending text..
-        Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
-
-      # Matchers for doc comments.
-      JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
-        Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
-
-      JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
-        Matcher(WHITESPACE, Type.COMMENT),
-        Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
-
-      # Matchers for single line comments.
-      JavaScriptModes.LINE_COMMENT_MODE: [
-        # We greedy match until the end of the line in line comment mode.
-        Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
-
-      # Matchers for code after the function keyword.
-      JavaScriptModes.FUNCTION_MODE: [
-        # Must match open paren before anything else and move into parameter
-        # mode, otherwise everything inside the parameter list is parsed
-        # incorrectly.
-        Matcher(OPENING_PAREN, Type.START_PARAMETERS,
-                JavaScriptModes.PARAMETER_MODE),
-        Matcher(WHITESPACE, Type.WHITESPACE),
-        Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
-
-      # Matchers for function parameters
-      JavaScriptModes.PARAMETER_MODE: [
-        # When in function parameter mode, a closing paren is treated specially.
-        # Everything else is treated as lines of parameters.
-        Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
-                JavaScriptModes.TEXT_MODE),
-        Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
-
   # When text is not matched, it is given this default type based on mode.
   # If unspecified in this map, the default default is Type.NORMAL.
   JAVASCRIPT_DEFAULT_TYPES = {
-    JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
-    JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
+      JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
+      JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
   }
 
-  def __init__(self, parse_js_doc = True):
+  @classmethod
+  def BuildMatchers(cls):
+    """Builds the token matcher group.
+
+    The token matcher groups work as follows: it is a list of Matcher objects.
+    The matchers will be tried in this order, and the first to match will be
+    returned.  Hence the order is important because the matchers that come first
+    overrule the matchers that come later.
+
+    Returns:
+      The completed token matcher group.
+    """
+    # Match a keyword string followed by a non-identifier character in order to
+    # not match something like doSomething as do + Something.
+    keyword = re.compile('(%s)((?=[^%s])|$)' % (
+        '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
+    return {
+
+        # Matchers for basic text mode.
+        JavaScriptModes.TEXT_MODE: [
+            # Check a big group - strings, starting comments, and regexes - all
+            # of which could be intertwined.  'string with /regex/',
+            # /regex with 'string'/, /* comment with /regex/ and string */ (and
+            # so on)
+            Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
+                    JavaScriptModes.DOC_COMMENT_MODE),
+            Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
+                    JavaScriptModes.BLOCK_COMMENT_MODE),
+            Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
+                    Type.START_SINGLE_LINE_COMMENT),
+            Matcher(cls.START_SINGLE_LINE_COMMENT,
+                    Type.START_SINGLE_LINE_COMMENT,
+                    JavaScriptModes.LINE_COMMENT_MODE),
+            Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
+                    JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
+            Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
+                    JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
+            Matcher(cls.REGEX, Type.REGEX),
+
+            # Next we check for start blocks appearing outside any of the items
+            # above.
+            Matcher(cls.START_BLOCK, Type.START_BLOCK),
+            Matcher(cls.END_BLOCK, Type.END_BLOCK),
+
+            # Then we search for function declarations.
+            Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
+                    JavaScriptModes.FUNCTION_MODE),
+
+            # Next, we convert non-function related parens to tokens.
+            Matcher(cls.OPENING_PAREN, Type.START_PAREN),
+            Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
+
+            # Next, we convert brackets to tokens.
+            Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
+            Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
+
+            # Find numbers.  This has to happen before operators because
+            # scientific notation numbers can have + and - in them.
+            Matcher(cls.NUMBER, Type.NUMBER),
+
+            # Find operators and simple assignments
+            Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
+            Matcher(cls.OPERATOR, Type.OPERATOR),
+
+            # Find key words and whitespace.
+            Matcher(keyword, Type.KEYWORD),
+            Matcher(cls.WHITESPACE, Type.WHITESPACE),
+
+            # Find identifiers.
+            Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
+
+            # Finally, we convert semicolons to tokens.
+            Matcher(cls.SEMICOLON, Type.SEMICOLON)],
+
+        # Matchers for single quote strings.
+        JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
+            Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
+            Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
+                    JavaScriptModes.TEXT_MODE)],
+
+        # Matchers for double quote strings.
+        JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
+            Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
+            Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
+                    JavaScriptModes.TEXT_MODE)],
+
+        # Matchers for block comments.
+        JavaScriptModes.BLOCK_COMMENT_MODE: [
+            # First we check for exiting a block comment.
+            Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
+                    JavaScriptModes.TEXT_MODE),
+
+            # Match non-comment-ending text..
+            Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
+
+        # Matchers for doc comments.
+        JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
+            Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
+
+        JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
+            Matcher(cls.WHITESPACE, Type.COMMENT),
+            Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
+
+        # Matchers for single line comments.
+        JavaScriptModes.LINE_COMMENT_MODE: [
+            # We greedy match until the end of the line in line comment mode.
+            Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
+
+        # Matchers for code after the function keyword.
+        JavaScriptModes.FUNCTION_MODE: [
+            # Must match open paren before anything else and move into parameter
+            # mode, otherwise everything inside the parameter list is parsed
+            # incorrectly.
+            Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
+                    JavaScriptModes.PARAMETER_MODE),
+            Matcher(cls.WHITESPACE, Type.WHITESPACE),
+            Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
+
+        # Matchers for function parameters
+        JavaScriptModes.PARAMETER_MODE: [
+            # When in function parameter mode, a closing paren is treated
+            # specially. Everything else is treated as lines of parameters.
+            Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
+                    JavaScriptModes.TEXT_MODE),
+            Matcher(cls.PARAMETERS, Type.PARAMETERS,
+                    JavaScriptModes.PARAMETER_MODE)]}
+
+  def __init__(self, parse_js_doc=True):
     """Create a tokenizer object.
 
     Args:
       parse_js_doc: Whether to do detailed parsing of javascript doc comments,
           or simply treat them as normal comments.  Defaults to parsing JsDoc.
     """
-    matchers = self.JAVASCRIPT_MATCHERS
+    matchers = self.BuildMatchers()
     if not parse_js_doc:
       # Make a copy so the original doesn't get modified.
       matchers = copy.deepcopy(matchers)
@@ -360,4 +462,4 @@
         name of the function.
     """
     return javascripttokens.JavaScriptToken(string, token_type, line,
-                                            line_number, values)
+                                            line_number, values, line_number)
diff --git a/catapult/third_party/closure_linter/closure_linter/javascripttokens.py b/catapult/third_party/closure_linter/closure_linter/javascripttokens.py
old mode 100755
new mode 100644
index f46d4e1..f5815d2
--- a/catapult/third_party/closure_linter/closure_linter/javascripttokens.py
+++ b/catapult/third_party/closure_linter/closure_linter/javascripttokens.py
@@ -53,6 +53,9 @@
   DOC_START_BRACE = 'doc {'
   DOC_END_BRACE = 'doc }'
   DOC_PREFIX = 'comment prefix: * '
+  DOC_TYPE_START_BLOCK = 'Type <'
+  DOC_TYPE_END_BLOCK = 'Type >'
+  DOC_TYPE_MODIFIER = 'modifier'
   SIMPLE_LVALUE = 'lvalue='
   KEYWORD = 'keyword'
   OPERATOR = 'operator'
@@ -62,14 +65,17 @@
       SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
       DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
 
-  COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT,
+  COMMENT_TYPES = frozenset([
+      START_SINGLE_LINE_COMMENT, COMMENT,
       START_BLOCK_COMMENT, START_DOC_COMMENT,
       END_BLOCK_COMMENT, END_DOC_COMMENT,
       DOC_START_BRACE, DOC_END_BRACE,
-      DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX])
+      DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
+      DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
 
   FLAG_DESCRIPTION_TYPES = frozenset([
-      DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE])
+      DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
+      DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
 
   FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
 
diff --git a/catapult/third_party/closure_linter/closure_linter/not_strict_test.py b/catapult/third_party/closure_linter/closure_linter/not_strict_test.py
old mode 100755
new mode 100644
index 8df8efc..c92c13e
--- a/catapult/third_party/closure_linter/closure_linter/not_strict_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/not_strict_test.py
@@ -28,8 +28,8 @@
 import gflags as flags
 import unittest as googletest
 
-from closure_linter import checker
 from closure_linter import errors
+from closure_linter import runner
 from closure_linter.common import filetestcase
 
 _RESOURCE_PREFIX = 'closure_linter/testdata'
@@ -66,7 +66,7 @@
     for test_file in test_files:
       resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
       self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
-                                                      checker.GJsLintRunner(),
+                                                      runner.Run,
                                                       errors.ByName))
 
 if __name__ == '__main__':
diff --git a/catapult/third_party/closure_linter/closure_linter/requireprovidesorter.py b/catapult/third_party/closure_linter/closure_linter/requireprovidesorter.py
old mode 100755
new mode 100644
index 6dda3ae..e7e08a1
--- a/catapult/third_party/closure_linter/closure_linter/requireprovidesorter.py
+++ b/catapult/third_party/closure_linter/closure_linter/requireprovidesorter.py
@@ -54,10 +54,7 @@
       token: A token in the token stream before any goog.provide tokens.
 
     Returns:
-      A tuple containing the first provide token in the token stream and a list
-      of provided objects sorted alphabetically. For example:
-
-      (JavaScriptToken, ['object.a', 'object.b', ...])
+      The first provide token in the token stream.
 
       None is returned if all goog.provide statements are already sorted.
     """
@@ -65,7 +62,7 @@
     provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
     sorted_provide_strings = sorted(provide_strings)
     if provide_strings != sorted_provide_strings:
-      return [provide_tokens[0], sorted_provide_strings]
+      return provide_tokens[0]
     return None
 
   def CheckRequires(self, token):
@@ -79,10 +76,7 @@
       token: A token in the token stream before any goog.require tokens.
 
     Returns:
-      A tuple containing the first require token in the token stream and a list
-      of required dependencies sorted alphabetically. For example:
-
-      (JavaScriptToken, ['object.a', 'object.b', ...])
+      The first require token in the token stream.
 
       None is returned if all goog.require statements are already sorted.
     """
@@ -90,7 +84,7 @@
     require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
     sorted_require_strings = sorted(require_strings)
     if require_strings != sorted_require_strings:
-      return (require_tokens[0], sorted_require_strings)
+      return require_tokens[0]
     return None
 
   def FixProvides(self, token):
@@ -127,7 +121,7 @@
     first_token = tokens[0]
     last_token = tokens[-1]
     i = last_token
-    while i != first_token:
+    while i != first_token and i is not None:
       if i.type is Type.BLANK_LINE:
         tokenutil.DeleteToken(i)
       i = i.previous
@@ -143,12 +137,18 @@
       for i in tokens_to_delete:
         tokenutil.DeleteToken(i)
 
+    # Save token to rest of file. Sorted token will be inserted before this.
+    rest_of_file = tokens_map[strings[-1]][-1].next
+
     # Re-add all tokens in the map in alphabetical order.
     insert_after = tokens[0].previous
     for string in sorted_strings:
       for i in tokens_map[string]:
-        tokenutil.InsertTokenAfter(i, insert_after)
-        insert_after = i
+        if rest_of_file:
+          tokenutil.InsertTokenBefore(i, rest_of_file)
+        else:
+          tokenutil.InsertTokenAfter(i, insert_after)
+          insert_after = i
 
   def _GetRequireOrProvideTokens(self, token, token_string):
     """Gets all goog.provide or goog.require tokens in the given token stream.
@@ -167,9 +167,13 @@
       if token.type == Type.IDENTIFIER:
         if token.string == token_string:
           tokens.append(token)
-        elif token.string not in ['goog.require', 'goog.provide']:
-          # The goog.provide and goog.require identifiers are at the top of the
-          # file. So if any other identifier is encountered, return.
+        elif token.string not in [
+            'goog.provide', 'goog.require', 'goog.setTestOnly']:
+          # These 3 identifiers are at the top of the file. So if any other
+          # identifier is encountered, return.
+          # TODO(user): Once it's decided what ordering goog.require
+          # should use, add 'goog.module' to the list above and implement the
+          # decision.
           break
       token = token.next
 
@@ -193,8 +197,9 @@
     """
     token_strings = []
     for token in tokens:
-      name = tokenutil.Search(token, Type.STRING_TEXT).string
-      token_strings.append(name)
+      if not token.is_deleted:
+        name = tokenutil.GetStringAfterToken(token)
+        token_strings.append(name)
     return token_strings
 
   def _GetTokensMap(self, tokens):
@@ -228,13 +233,14 @@
     """
     tokens_map = {}
     for token in tokens:
-      object_name = tokenutil.Search(token, Type.STRING_TEXT).string
+      object_name = tokenutil.GetStringAfterToken(token)
       # If the previous line starts with a comment, presume that the comment
       # relates to the goog.require or goog.provide and keep them together when
       # sorting.
       first_token = token
       previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
-      while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
+      while (previous_first_token and
+             previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
         first_token = previous_first_token
         previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
             first_token)
@@ -270,3 +276,54 @@
     token_list.append(last_token)
 
     return token_list
+
+  def GetFixedRequireString(self, token):
+    """Get fixed/sorted order of goog.require statements.
+
+    Args:
+      token: The first token in the token stream.
+
+    Returns:
+      A string for correct sorted order of goog.require.
+    """
+    return self._GetFixedRequireOrProvideString(
+        self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+  def GetFixedProvideString(self, token):
+    """Get fixed/sorted order of goog.provide statements.
+
+    Args:
+      token: The first token in the token stream.
+
+    Returns:
+      A string for correct sorted order of goog.provide.
+    """
+    return self._GetFixedRequireOrProvideString(
+        self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+  def _GetFixedRequireOrProvideString(self, tokens):
+    """Sorts goog.provide or goog.require statements.
+
+    Args:
+      tokens: A list of goog.provide or goog.require tokens in the order they
+              appear in the token stream. i.e. the first token in this list must
+              be the first goog.provide or goog.require token.
+
+    Returns:
+      A string for sorted goog.require or goog.provide statements
+    """
+
+    # A map from required/provided object name to tokens that make up the line
+    # it was on, including any comments immediately before it or after it on the
+    # same line.
+    tokens_map = self._GetTokensMap(tokens)
+    sorted_strings = sorted(tokens_map.keys())
+
+    new_order = ''
+    for string in sorted_strings:
+      for i in tokens_map[string]:
+        new_order += i.string
+        if i.IsLastInLine():
+          new_order += '\n'
+
+    return new_order
diff --git a/catapult/third_party/closure_linter/closure_linter/requireprovidesorter_test.py b/catapult/third_party/closure_linter/closure_linter/requireprovidesorter_test.py
index d1d61dc..fecb6d0 100644
--- a/catapult/third_party/closure_linter/closure_linter/requireprovidesorter_test.py
+++ b/catapult/third_party/closure_linter/closure_linter/requireprovidesorter_test.py
@@ -19,20 +19,58 @@
 
 
 import unittest as googletest
-from closure_linter import ecmametadatapass
-from closure_linter import javascripttokenizer
 from closure_linter import javascripttokens
 from closure_linter import requireprovidesorter
+from closure_linter import testutil
 
-# pylint: disable-msg=C6409
+# pylint: disable=g-bad-name
 TokenType = javascripttokens.JavaScriptTokenType
 
 
 class RequireProvideSorterTest(googletest.TestCase):
   """Tests for RequireProvideSorter."""
 
-  _tokenizer = javascripttokenizer.JavaScriptTokenizer()
-  _metadata_pass = ecmametadatapass.EcmaMetaDataPass()
+  def testGetFixedProvideString(self):
+    """Tests that fixed string constains proper comments also."""
+    input_lines = [
+        'goog.provide(\'package.xyz\');',
+        '/** @suppress {extraprovide} **/',
+        'goog.provide(\'package.abcd\');'
+    ]
+
+    expected_lines = [
+        '/** @suppress {extraprovide} **/',
+        'goog.provide(\'package.abcd\');',
+        'goog.provide(\'package.xyz\');'
+    ]
+
+    token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+    sorter = requireprovidesorter.RequireProvideSorter()
+    fixed_provide_string = sorter.GetFixedProvideString(token)
+
+    self.assertEquals(expected_lines, fixed_provide_string.splitlines())
+
+  def testGetFixedRequireString(self):
+    """Tests that fixed string constains proper comments also."""
+    input_lines = [
+        'goog.require(\'package.xyz\');',
+        '/** This is needed for scope. **/',
+        'goog.require(\'package.abcd\');'
+    ]
+
+    expected_lines = [
+        '/** This is needed for scope. **/',
+        'goog.require(\'package.abcd\');',
+        'goog.require(\'package.xyz\');'
+    ]
+
+    token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+    sorter = requireprovidesorter.RequireProvideSorter()
+    fixed_require_string = sorter.GetFixedRequireString(token)
+
+    self.assertEquals(expected_lines, fixed_require_string.splitlines())
 
   def testFixRequires_removeBlankLines(self):
     """Tests that blank lines are omitted in sorted goog.require statements."""
@@ -49,15 +87,58 @@
         'goog.require(\'package.subpackage.ClassA\');',
         'goog.require(\'package.subpackage.ClassB\');'
     ]
-    token = self._tokenizer.TokenizeFile(input_lines)
-    self._metadata_pass.Reset()
-    self._metadata_pass.Process(token)
+    token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
 
     sorter = requireprovidesorter.RequireProvideSorter()
     sorter.FixRequires(token)
 
     self.assertEquals(expected_lines, self._GetLines(token))
 
+  def fixRequiresTest_withTestOnly(self, position):
+    """Regression-tests sorting even with a goog.setTestOnly statement.
+
+    Args:
+      position: The position in the list where to insert the goog.setTestOnly
+                statement. Will be used to test all possible combinations for
+                this test.
+    """
+    input_lines = [
+        'goog.provide(\'package.subpackage.Whatever\');',
+        '',
+        'goog.require(\'package.subpackage.ClassB\');',
+        'goog.require(\'package.subpackage.ClassA\');'
+    ]
+    expected_lines = [
+        'goog.provide(\'package.subpackage.Whatever\');',
+        '',
+        'goog.require(\'package.subpackage.ClassA\');',
+        'goog.require(\'package.subpackage.ClassB\');'
+    ]
+    input_lines.insert(position, 'goog.setTestOnly();')
+    expected_lines.insert(position, 'goog.setTestOnly();')
+
+    token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+    sorter = requireprovidesorter.RequireProvideSorter()
+    sorter.FixRequires(token)
+
+    self.assertEquals(expected_lines, self._GetLines(token))
+
+  def testFixRequires_withTestOnly(self):
+    """Regression-tests sorting even after a goog.setTestOnly statement."""
+
+    # goog.setTestOnly at first line.
+    self.fixRequiresTest_withTestOnly(position=0)
+
+    # goog.setTestOnly after goog.provide.
+    self.fixRequiresTest_withTestOnly(position=1)
+
+    # goog.setTestOnly before goog.require.
+    self.fixRequiresTest_withTestOnly(position=2)
+
+    # goog.setTestOnly after goog.require.
+    self.fixRequiresTest_withTestOnly(position=4)
+
   def _GetLines(self, token):
     """Returns an array of lines based on the specified token stream."""
     lines = []
diff --git a/catapult/third_party/closure_linter/closure_linter/runner.py b/catapult/third_party/closure_linter/closure_linter/runner.py
new file mode 100644
index 0000000..04e7fa4
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/runner.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import traceback
+
+import gflags as flags
+
+from closure_linter import checker
+from closure_linter import ecmalintrules
+from closure_linter import ecmametadatapass
+from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+from closure_linter.common import error
+from closure_linter.common import htmlutil
+from closure_linter.common import tokens
+
+flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
+                  'List of files with relaxed documentation checks. Will not '
+                  'report errors for missing documentation, some missing '
+                  'descriptions, or methods whose @return tags don\'t have a '
+                  'matching return statement.')
+flags.DEFINE_boolean('error_trace', False,
+                     'Whether to show error exceptions.')
+flags.ADOPT_module_key_flags(checker)
+flags.ADOPT_module_key_flags(ecmalintrules)
+flags.ADOPT_module_key_flags(error_check)
+
+
+def _GetLastNonWhiteSpaceToken(start_token):
+  """Get the last non-whitespace token in a token stream."""
+  ret_token = None
+
+  whitespace_tokens = frozenset([
+      tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
+  for t in start_token:
+    if t.type not in whitespace_tokens:
+      ret_token = t
+
+  return ret_token
+
+
+def _IsHtml(filename):
+  return filename.endswith('.html') or filename.endswith('.htm')
+
+
+def _Tokenize(fileobj):
+  """Tokenize a file.
+
+  Args:
+    fileobj: file-like object (or iterable lines) with the source.
+
+  Returns:
+    The first token in the token stream and the ending mode of the tokenizer.
+  """
+  tokenizer = javascripttokenizer.JavaScriptTokenizer()
+  start_token = tokenizer.TokenizeFile(fileobj)
+  return start_token, tokenizer.mode
+
+
+def _IsLimitedDocCheck(filename, limited_doc_files):
+  """Whether this this a limited-doc file.
+
+  Args:
+    filename: The filename.
+    limited_doc_files: Iterable of strings. Suffixes of filenames that should
+      be limited doc check.
+
+  Returns:
+    Whether the file should be limited check.
+  """
+  for limited_doc_filename in limited_doc_files:
+    if filename.endswith(limited_doc_filename):
+      return True
+  return False
+
+
+def Run(filename, error_handler, source=None):
+  """Tokenize, run passes, and check the given file.
+
+  Args:
+    filename: The path of the file to check
+    error_handler: The error handler to report errors to.
+    source: A file-like object with the file source. If omitted, the file will
+      be read from the filename path.
+  """
+  if not source:
+    try:
+      source = open(filename)
+    except IOError:
+      error_handler.HandleFile(filename, None)
+      error_handler.HandleError(
+          error.Error(errors.FILE_NOT_FOUND, 'File not found'))
+      error_handler.FinishFile()
+      return
+
+  if _IsHtml(filename):
+    source_file = htmlutil.GetScriptLines(source)
+  else:
+    source_file = source
+
+  token, tokenizer_mode = _Tokenize(source_file)
+
+  error_handler.HandleFile(filename, token)
+
+  # If we did not end in the basic mode, this a failed parse.
+  if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
+    error_handler.HandleError(
+        error.Error(errors.FILE_IN_BLOCK,
+                    'File ended in mode "%s".' % tokenizer_mode,
+                    _GetLastNonWhiteSpaceToken(token)))
+
+  # Run the ECMA pass
+  error_token = None
+
+  ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+  error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
+
+  is_limited_doc_check = (
+      _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
+
+  _RunChecker(token, error_handler,
+              is_limited_doc_check,
+              is_html=_IsHtml(filename),
+              stop_token=error_token)
+
+  error_handler.FinishFile()
+
+
+def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
+  """Run a metadata pass over a token stream.
+
+  Args:
+    start_token: The first token in a token stream.
+    metadata_pass: Metadata pass to run.
+    error_handler: The error handler to report errors to.
+    filename: Filename of the source.
+
+  Returns:
+    The token where the error occurred (if any).
+  """
+
+  try:
+    metadata_pass.Process(start_token)
+  except ecmametadatapass.ParseError, parse_err:
+    if flags.FLAGS.error_trace:
+      traceback.print_exc()
+    error_token = parse_err.token
+    error_msg = str(parse_err)
+    error_handler.HandleError(
+        error.Error(errors.FILE_DOES_NOT_PARSE,
+                    ('Error parsing file at token "%s". Unable to '
+                     'check the rest of file.'
+                     '\nError "%s"' % (error_token, error_msg)), error_token))
+    return error_token
+  except Exception:  # pylint: disable=broad-except
+    traceback.print_exc()
+    error_handler.HandleError(
+        error.Error(
+            errors.FILE_DOES_NOT_PARSE,
+            'Internal error in %s' % filename))
+
+
+def _RunChecker(start_token, error_handler,
+                limited_doc_checks, is_html,
+                stop_token=None):
+
+  state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+  style_checker = checker.JavaScriptStyleChecker(
+      state_tracker=state_tracker,
+      error_handler=error_handler)
+
+  style_checker.Check(start_token,
+                      is_html=is_html,
+                      limited_doc_checks=limited_doc_checks,
+                      stop_token=stop_token)
diff --git a/catapult/third_party/closure_linter/closure_linter/runner_test.py b/catapult/third_party/closure_linter/closure_linter/runner_test.py
new file mode 100644
index 0000000..da5857d
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/runner_test.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the runner module."""
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+
+import mox
+
+
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import error
+from closure_linter.common import errorhandler
+from closure_linter.common import tokens
+
+
+class LimitedDocTest(googletest.TestCase):
+
+  def testIsLimitedDocCheck(self):
+    self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
+    self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
+
+    self.assertTrue(runner._IsLimitedDocCheck(
+        'foo_moo.js', ['moo.js', 'quack.js']))
+    self.assertFalse(runner._IsLimitedDocCheck(
+        'foo_moo.js', ['woof.js', 'quack.js']))
+
+
+class RunnerTest(googletest.TestCase):
+
+  def setUp(self):
+    self.mox = mox.Mox()
+
+  def testRunOnMissingFile(self):
+    mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+    def ValidateError(err):
+      return (isinstance(err, error.Error) and
+              err.code is errors.FILE_NOT_FOUND and
+              err.token is None)
+
+    mock_error_handler.HandleFile('does_not_exist.js', None)
+    mock_error_handler.HandleError(mox.Func(ValidateError))
+    mock_error_handler.FinishFile()
+
+    self.mox.ReplayAll()
+
+    runner.Run('does_not_exist.js', mock_error_handler)
+
+    self.mox.VerifyAll()
+
+  def testBadTokenization(self):
+    mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+    def ValidateError(err):
+      return (isinstance(err, error.Error) and
+              err.code is errors.FILE_IN_BLOCK and
+              err.token.string == '}')
+
+    mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
+    mock_error_handler.HandleError(mox.Func(ValidateError))
+    mock_error_handler.HandleError(mox.IsA(error.Error))
+    mock_error_handler.FinishFile()
+
+    self.mox.ReplayAll()
+
+    source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
+    runner.Run('foo.js', mock_error_handler, source)
+
+    self.mox.VerifyAll()
+
+
+_BAD_TOKENIZATION_SCRIPT = """
+function foo () {
+  var a = 3;
+  var b = 2;
+  return b + a; /* Comment not closed
+}
+"""
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/scopeutil.py b/catapult/third_party/closure_linter/closure_linter/scopeutil.py
new file mode 100644
index 0000000..a7ca9b6
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/scopeutil.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools to match goog.scope alias statements."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import itertools
+
+from closure_linter import ecmametadatapass
+from closure_linter import tokenutil
+from closure_linter.javascripttokens import JavaScriptTokenType
+
+
+
+def IsGoogScopeBlock(context):
+  """Whether the given context is a goog.scope block.
+
+  This function only checks that the block is a function block inside
+  a goog.scope() call.
+
+  TODO(nnaze): Implement goog.scope checks that verify the call is
+  in the root context and contains only a single function literal.
+
+  Args:
+    context: An EcmaContext of type block.
+
+  Returns:
+    Whether the context is a goog.scope block.
+  """
+
+  if context.type != ecmametadatapass.EcmaContext.BLOCK:
+    return False
+
+  if not _IsFunctionLiteralBlock(context):
+    return False
+
+  # Check that this function is contained by a group
+  # of form "goog.scope(...)".
+  parent = context.parent
+  if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
+
+    last_code_token = parent.start_token.metadata.last_code
+
+    if (last_code_token and
+        last_code_token.type is JavaScriptTokenType.IDENTIFIER and
+        last_code_token.string == 'goog.scope'):
+      return True
+
+  return False
+
+
+def _IsFunctionLiteralBlock(block_context):
+  """Check if a context is a function literal block (without parameters).
+
+  Example function literal block: 'function() {}'
+
+  Args:
+    block_context: An EcmaContext of type block.
+
+  Returns:
+    Whether this context is a function literal block.
+  """
+
+  previous_code_tokens_iter = itertools.ifilter(
+      lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
+      reversed(block_context.start_token))
+
+  # Ignore the current token
+  next(previous_code_tokens_iter, None)
+
+  # Grab the previous three tokens and put them in correct order.
+  previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
+  previous_code_tokens.reverse()
+
+  # There aren't three previous tokens.
+  if len(previous_code_tokens) is not 3:
+    return False
+
+  # Check that the previous three code tokens are "function ()"
+  previous_code_token_types = [token.type for token in previous_code_tokens]
+  if (previous_code_token_types == [
+      JavaScriptTokenType.FUNCTION_DECLARATION,
+      JavaScriptTokenType.START_PARAMETERS,
+      JavaScriptTokenType.END_PARAMETERS]):
+    return True
+
+  return False
+
+
+def IsInClosurizedNamespace(symbol, closurized_namespaces):
+  """Match a goog.scope alias.
+
+  Args:
+    symbol: An identifier like 'goog.events.Event'.
+    closurized_namespaces: Iterable of valid Closurized namespaces (strings).
+
+  Returns:
+    True if symbol is an identifier in a Closurized namespace, otherwise False.
+  """
+  for ns in closurized_namespaces:
+    if symbol.startswith(ns + '.'):
+      return True
+
+  return False
+
+
+def _GetVarAssignmentTokens(context):
+  """Returns the tokens from context if it is a var assignment.
+
+  Args:
+    context: An EcmaContext.
+
+  Returns:
+    If a var assignment, the tokens contained within it w/o the trailing
+    semicolon.
+  """
+  if context.type != ecmametadatapass.EcmaContext.VAR:
+    return
+
+  # Get the tokens in this statement.
+  if context.start_token and context.end_token:
+    statement_tokens = tokenutil.GetTokenRange(context.start_token,
+                                               context.end_token)
+  else:
+    return
+
+  # And now just those tokens that are actually code.
+  is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
+  code_tokens = filter(is_non_code_type, statement_tokens)
+
+  # Pop off the semicolon if present.
+  if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
+    code_tokens.pop()
+
+  if len(code_tokens) < 4:
+    return
+
+  if (code_tokens[0].IsKeyword('var') and
+      code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
+      code_tokens[2].IsOperator('=')):
+    return code_tokens
+
+
+def MatchAlias(context):
+  """Match an alias statement (some identifier assigned to a variable).
+
+  Example alias: var MyClass = proj.longNamespace.MyClass.
+
+  Args:
+    context: An EcmaContext of type EcmaContext.VAR.
+
+  Returns:
+    If a valid alias, returns a tuple of alias and symbol, otherwise None.
+  """
+  code_tokens = _GetVarAssignmentTokens(context)
+  if code_tokens is None:
+    return
+
+  if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
+    # var Foo = bar.Foo;
+    alias, symbol = code_tokens[1], code_tokens[3]
+    # Mark both tokens as an alias definition to not count them as usages.
+    alias.metadata.is_alias_definition = True
+    symbol.metadata.is_alias_definition = True
+    return alias.string, tokenutil.GetIdentifierForToken(symbol)
+
+
+def MatchModuleAlias(context):
+  """Match an alias statement in a goog.module style import.
+
+  Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
+
+  Args:
+    context: An EcmaContext.
+
+  Returns:
+    If a valid alias, returns a tuple of alias and symbol, otherwise None.
+  """
+  code_tokens = _GetVarAssignmentTokens(context)
+  if code_tokens is None:
+    return
+
+  if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
+     code_tokens[3].string == 'goog.require'):
+    # var Foo = goog.require('bar.Foo');
+    alias = code_tokens[1]
+    symbol = tokenutil.GetStringAfterToken(code_tokens[3])
+    if symbol:
+      alias.metadata.is_alias_definition = True
+      return alias.string, symbol
diff --git a/catapult/third_party/closure_linter/closure_linter/scopeutil_test.py b/catapult/third_party/closure_linter/closure_linter/scopeutil_test.py
new file mode 100644
index 0000000..722a953
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/scopeutil_test.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import scopeutil
+from closure_linter import testutil
+
+
+def _FindContexts(start_token):
+  """Depth first search of all contexts referenced by a token stream.
+
+  Includes contexts' parents, which might not be directly referenced
+  by any token in the stream.
+
+  Args:
+    start_token: First token in the token stream.
+
+  Yields:
+    All contexts referenced by this token stream.
+  """
+
+  seen_contexts = set()
+
+  # For each token, yield the context if we haven't seen it before.
+  for token in start_token:
+
+    token_context = token.metadata.context
+    contexts = [token_context]
+
+    # Also grab all the context's ancestors.
+    parent = token_context.parent
+    while parent:
+      contexts.append(parent)
+      parent = parent.parent
+
+    # Yield each of these contexts if we've not seen them.
+    for context in contexts:
+      if context not in seen_contexts:
+        yield context
+
+      seen_contexts.add(context)
+
+
+def _FindFirstContextOfType(token, context_type):
+  """Returns the first statement context."""
+  for context in _FindContexts(token):
+    if context.type == context_type:
+      return context
+
+
+def _ParseAssignment(script):
+  start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
+  statement = _FindFirstContextOfType(
+      start_token, ecmametadatapass.EcmaContext.VAR)
+  return statement
+
+
+class StatementTest(googletest.TestCase):
+
+  def assertAlias(self, expected_match, script):
+    statement = _ParseAssignment(script)
+    match = scopeutil.MatchAlias(statement)
+    self.assertEquals(expected_match, match)
+
+  def assertModuleAlias(self, expected_match, script):
+    statement = _ParseAssignment(script)
+    match = scopeutil.MatchModuleAlias(statement)
+    self.assertEquals(expected_match, match)
+
+  def testSimpleAliases(self):
+    self.assertAlias(
+        ('foo', 'goog.foo'),
+        'var foo = goog.foo;')
+
+    self.assertAlias(
+        ('foo', 'goog.foo'),
+        'var foo = goog.foo')  # No semicolon
+
+  def testAliasWithComment(self):
+    self.assertAlias(
+        ('Component', 'goog.ui.Component'),
+        'var Component = /* comment */ goog.ui.Component;')
+
+  def testMultilineAlias(self):
+    self.assertAlias(
+        ('Component', 'goog.ui.Component'),
+        'var Component = \n  goog.ui.\n  Component;')
+
+  def testNonSymbolAliasVarStatements(self):
+    self.assertAlias(None, 'var foo = 3;')
+    self.assertAlias(None, 'var foo = function() {};')
+    self.assertAlias(None, 'var foo = bar ? baz : qux;')
+
+  def testModuleAlias(self):
+    self.assertModuleAlias(
+        ('foo', 'goog.foo'),
+        'var foo = goog.require("goog.foo");')
+    self.assertModuleAlias(
+        None,
+        'var foo = goog.require(notastring);')
+
+
+class ScopeBlockTest(googletest.TestCase):
+
+  @staticmethod
+  def _GetBlocks(source):
+    start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
+    for context in _FindContexts(start_token):
+      if context.type is ecmametadatapass.EcmaContext.BLOCK:
+        yield context
+
+  def assertNoBlocks(self, script):
+    blocks = list(self._GetBlocks(script))
+    self.assertEquals([], blocks)
+
+  def testNotBlocks(self):
+    # Ensure these are not considered blocks.
+    self.assertNoBlocks('goog.scope(if{});')
+    self.assertNoBlocks('goog.scope(for{});')
+    self.assertNoBlocks('goog.scope(switch{});')
+    self.assertNoBlocks('goog.scope(function foo{});')
+
+  def testNonScopeBlocks(self):
+
+    blocks = list(self._GetBlocks('goog.scope(try{});'))
+    self.assertEquals(1, len(blocks))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+    blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
+    self.assertEquals(1, len(blocks))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+    blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
+    # Two blocks: try and catch.
+    self.assertEquals(2, len(blocks))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+    blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
+    self.assertEquals(3, len(blocks))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+    self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+
+class AliasTest(googletest.TestCase):
+
+  def setUp(self):
+    self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+
+  def testMatchAliasStatement(self):
+    matches = set()
+    for context in _FindContexts(self.start_token):
+      match = scopeutil.MatchAlias(context)
+      if match:
+        matches.add(match)
+
+    self.assertEquals(
+        set([('bar', 'baz'),
+             ('foo', 'this.foo_'),
+             ('Component', 'goog.ui.Component'),
+             ('MyClass', 'myproject.foo.MyClass'),
+             ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
+        matches)
+
+  def testMatchAliasStatement_withClosurizedNamespaces(self):
+
+    closurized_namepaces = frozenset(['goog', 'myproject'])
+
+    matches = set()
+    for context in _FindContexts(self.start_token):
+      match = scopeutil.MatchAlias(context)
+      if match:
+        unused_alias, symbol = match
+        if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
+          matches.add(match)
+
+    self.assertEquals(
+        set([('MyClass', 'myproject.foo.MyClass'),
+             ('Component', 'goog.ui.Component')]),
+        matches)
+
+_TEST_SCRIPT = """
+goog.scope(function() {
+  var Component = goog.ui.Component; // scope alias
+  var MyClass = myproject.foo.MyClass; // scope alias
+
+  // Scope alias of non-Closurized namespace.
+  var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+  var foo = this.foo_; // non-scope object property alias
+  var bar = baz; // variable alias
+
+  var component = new Component();
+});
+
+"""
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/statetracker.py b/catapult/third_party/closure_linter/closure_linter/statetracker.py
old mode 100755
new mode 100644
index 52e86a9..0b8fdf3
--- a/catapult/third_party/closure_linter/closure_linter/statetracker.py
+++ b/catapult/third_party/closure_linter/closure_linter/statetracker.py
@@ -24,6 +24,7 @@
 from closure_linter import javascripttokenizer
 from closure_linter import javascripttokens
 from closure_linter import tokenutil
+from closure_linter import typeannotation
 
 # Shorthand
 Type = javascripttokens.JavaScriptTokenType
@@ -39,7 +40,8 @@
       including braces.
     type_end_token: The last token specifying the flag type,
       including braces.
-    type: The type spec.
+    type: The type spec string.
+    jstype: The type spec, a TypeAnnotation instance.
     name_token: The token specifying the flag name.
     name: The flag name
     description_start_token: The first token in the description.
@@ -53,35 +55,49 @@
   STANDARD_DOC = frozenset([
       'author',
       'bug',
+      'classTemplate',
+      'consistentIdGenerator',
       'const',
       'constructor',
       'define',
       'deprecated',
+      'dict',
       'enum',
       'export',
+      'expose',
       'extends',
       'externs',
       'fileoverview',
+      'idGenerator',
       'implements',
       'implicitCast',
       'interface',
       'lends',
       'license',
+      'ngInject',  # This annotation is specific to AngularJS.
       'noalias',
       'nocompile',
       'nosideeffects',
       'override',
       'owner',
+      'nocollapse',
+      'package',
       'param',
+      'polymerBehavior',  # This annotation is specific to Polymer.
       'preserve',
       'private',
+      'protected',
+      'public',
       'return',
       'see',
+      'stableIdGenerator',
+      'struct',
       'supported',
       'template',
       'this',
       'type',
       'typedef',
+      'unrestricted',
       ])
 
   ANNOTATION = frozenset(['preserveTry', 'suppress'])
@@ -99,7 +115,9 @@
   SUPPRESS_TYPES = frozenset([
       'accessControls',
       'ambiguousFunctionDecl',
+      'checkDebuggerStatement',
       'checkRegExp',
+      'checkStructDictInheritance',
       'checkTypes',
       'checkVars',
       'const',
@@ -117,44 +135,101 @@
       'missingProperties',
       'missingProvide',
       'missingRequire',
+      'missingReturn',
       'nonStandardJsDocs',
+      'reportUnknownTypes',
       'strictModuleDepCheck',
+      'suspiciousCode',
       'tweakValidation',
       'typeInvalidation',
       'undefinedNames',
       'undefinedVars',
       'underscore',
       'unknownDefines',
+      'unnecessaryCasts',
+      'unusedPrivateMembers',
       'uselessCode',
       'visibility',
-      'with'])
+      'with',
+  ])
 
   HAS_DESCRIPTION = frozenset([
-    'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
-    'preserve', 'return', 'supported'])
+      'define',
+      'deprecated',
+      'desc',
+      'fileoverview',
+      'license',
+      'param',
+      'preserve',
+      'return',
+      'supported',
+  ])
 
+  # Docflags whose argument should be parsed using the typeannotation parser.
   HAS_TYPE = frozenset([
-      'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
-      'suppress'])
+      'const',
+      'define',
+      'enum',
+      'export',
+      'extends',
+      'final',
+      'implements',
+      'mods',
+      'package',
+      'param',
+      'private',
+      'protected',
+      'public',
+      'return',
+      'suppress',
+      'type',
+      'typedef',
+  ])
 
-  TYPE_ONLY = frozenset(['enum', 'extends', 'implements',  'suppress', 'type'])
+  # Docflags for which it's ok to omit the type (flag without an argument).
+  CAN_OMIT_TYPE = frozenset([
+      'const',
+      'enum',
+      'export',
+      'final',
+      'package',
+      'private',
+      'protected',
+      'public',
+      'suppress',  # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
+  ])
+
+  # Docflags that only take a type as an argument and should not parse a
+  # following description.
+  TYPE_ONLY = frozenset([
+      'const',
+      'enum',
+      'extends',
+      'implements',
+      'package',
+      'suppress',
+      'type',
+  ])
 
   HAS_NAME = frozenset(['param'])
 
   EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
   EMPTY_STRING = re.compile(r'^\s*$')
 
-  def __init__(self, flag_token):
+  def __init__(self, flag_token, error_handler=None):
     """Creates the DocFlag object and attaches it to the given start token.
 
     Args:
       flag_token: The starting token of the flag.
+      error_handler: An optional error handler for errors occurring while
+        parsing the doctype.
     """
     self.flag_token = flag_token
     self.flag_type = flag_token.string.strip().lstrip('@')
 
     # Extract type, if applicable.
     self.type = None
+    self.jstype = None
     self.type_start_token = None
     self.type_end_token = None
     if self.flag_type in self.HAS_TYPE:
@@ -163,28 +238,37 @@
       if brace:
         end_token, contents = _GetMatchingEndBraceAndContents(brace)
         self.type = contents
+        self.jstype = typeannotation.Parse(brace, end_token,
+                                           error_handler)
         self.type_start_token = brace
         self.type_end_token = end_token
       elif (self.flag_type in self.TYPE_ONLY and
-          flag_token.next.type not in Type.FLAG_ENDING_TYPES):
+            flag_token.next.type not in Type.FLAG_ENDING_TYPES and
+            flag_token.line_number == flag_token.next.line_number):
+        # b/10407058. If the flag is expected to be followed by a type then
+        # search for type in same line only. If no token after flag in same
+        # line then conclude that no type is specified.
         self.type_start_token = flag_token.next
         self.type_end_token, self.type = _GetEndTokenAndContents(
             self.type_start_token)
         if self.type is not None:
           self.type = self.type.strip()
+          self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
+                                             error_handler)
 
     # Extract name, if applicable.
     self.name_token = None
     self.name = None
     if self.flag_type in self.HAS_NAME:
       # Handle bad case, name could be immediately after flag token.
-      self.name_token = _GetNextIdentifierToken(flag_token)
+      self.name_token = _GetNextPartialIdentifierToken(flag_token)
 
       # Handle good case, if found token is after type start, look for
-      # identifier after type end, since types contain identifiers.
+      # a identifier (substring to cover cases like [cnt] b/4197272) after
+      # type end, since types contain identifiers.
       if (self.type and self.name_token and
           tokenutil.Compare(self.name_token, self.type_start_token) > 0):
-        self.name_token = _GetNextIdentifierToken(self.type_end_token)
+        self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
 
       if self.name_token:
         self.name = self.name_token.string
@@ -212,6 +296,13 @@
         self.description_end_token, self.description = (
             _GetEndTokenAndContents(interesting_token))
 
+  def HasType(self):
+    """Returns whether this flag should have a type annotation."""
+    return self.flag_type in self.HAS_TYPE
+
+  def __repr__(self):
+    return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
+
 
 class DocComment(object):
   """JavaScript doc comment object.
@@ -228,14 +319,21 @@
     Args:
       start_token: The first token in the doc comment.
     """
-    self.__params = {}
-    self.ordered_params = []
-    self.__flags = {}
+    self.__flags = []
     self.start_token = start_token
     self.end_token = None
     self.suppressions = {}
     self.invalidated = False
 
+  @property
+  def ordered_params(self):
+    """Gives the list of parameter names as a list of strings."""
+    params = []
+    for flag in self.__flags:
+      if flag.flag_type == 'param' and flag.name:
+        params.append(flag.name)
+    return params
+
   def Invalidate(self):
     """Indicate that the JSDoc is well-formed but we had problems parsing it.
 
@@ -249,35 +347,26 @@
     """Test whether Invalidate() has been called."""
     return self.invalidated
 
-  def AddParam(self, name, param_type):
-    """Add a new documented parameter.
-
-    Args:
-      name: The name of the parameter to document.
-      param_type: The parameter's declared JavaScript type.
-    """
-    self.ordered_params.append(name)
-    self.__params[name] = param_type
-
   def AddSuppression(self, token):
     """Add a new error suppression flag.
 
     Args:
       token: The suppression flag token.
     """
-    #TODO(user): Error if no braces
-    brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
-                                  [Type.DOC_FLAG])
-    if brace:
-      end_token, contents = _GetMatchingEndBraceAndContents(brace)
-      for suppression in contents.split('|'):
+    flag = token and token.attached_object
+    if flag and flag.jstype:
+      for suppression in flag.jstype.IterIdentifiers():
         self.suppressions[suppression] = token
 
   def SuppressionOnly(self):
     """Returns whether this comment contains only suppression flags."""
-    for flag_type in self.__flags.keys():
-      if flag_type != 'suppress':
+    if not self.__flags:
+      return False
+
+    for flag in self.__flags:
+      if flag.flag_type != 'suppress':
         return False
+
     return True
 
   def AddFlag(self, flag):
@@ -286,7 +375,7 @@
     Args:
       flag: DocFlag object.
     """
-    self.__flags[flag.flag_type] = flag
+    self.__flags.append(flag)
 
   def InheritsDocumentation(self):
     """Test if the jsdoc implies documentation inheritance.
@@ -305,7 +394,10 @@
     Returns:
       True if the flag is set.
     """
-    return flag_type in self.__flags
+    for flag in self.__flags:
+      if flag.flag_type == flag_type:
+        return True
+    return False
 
   def GetFlag(self, flag_type):
     """Gets the last flag of the given type.
@@ -316,7 +408,101 @@
     Returns:
       The last instance of the given flag type in this doc comment.
     """
-    return self.__flags[flag_type]
+    for flag in reversed(self.__flags):
+      if flag.flag_type == flag_type:
+        return flag
+
+  def GetDocFlags(self):
+    """Return the doc flags for this comment."""
+    return list(self.__flags)
+
+  def _YieldDescriptionTokens(self):
+    for token in self.start_token:
+
+      if (token is self.end_token or
+          token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
+          token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
+        return
+
+      if token.type not in [
+          javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
+          javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
+          javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
+        yield token
+
+  @property
+  def description(self):
+    return tokenutil.TokensToString(
+        self._YieldDescriptionTokens())
+
+  def GetTargetIdentifier(self):
+    """Returns the identifier (as a string) that this is a comment for.
+
+    Note that this uses method uses GetIdentifierForToken to get the full
+    identifier, even if broken up by whitespace, newlines, or comments,
+    and thus could be longer than GetTargetToken().string.
+
+    Returns:
+      The identifier for the token this comment is for.
+    """
+    token = self.GetTargetToken()
+    if token:
+      return tokenutil.GetIdentifierForToken(token)
+
+  def GetTargetToken(self):
+    """Get this comment's target token.
+
+    Returns:
+      The token that is the target of this comment, or None if there isn't one.
+    """
+
+    # File overviews describe the file, not a token.
+    if self.HasFlag('fileoverview'):
+      return
+
+    skip_types = frozenset([
+        Type.WHITESPACE,
+        Type.BLANK_LINE,
+        Type.START_PAREN])
+
+    target_types = frozenset([
+        Type.FUNCTION_NAME,
+        Type.IDENTIFIER,
+        Type.SIMPLE_LVALUE])
+
+    token = self.end_token.next
+    while token:
+      if token.type in target_types:
+        return token
+
+      # Handles the case of a comment on "var foo = ...'
+      if token.IsKeyword('var'):
+        next_code_token = tokenutil.CustomSearch(
+            token,
+            lambda t: t.type not in Type.NON_CODE_TYPES)
+
+        if (next_code_token and
+            next_code_token.IsType(Type.SIMPLE_LVALUE)):
+          return next_code_token
+
+        return
+
+      # Handles the case of a comment on "function foo () {}"
+      if token.type is Type.FUNCTION_DECLARATION:
+        next_code_token = tokenutil.CustomSearch(
+            token,
+            lambda t: t.type not in Type.NON_CODE_TYPES)
+
+        if next_code_token.IsType(Type.FUNCTION_NAME):
+          return next_code_token
+
+        return
+
+      # Skip types will end the search.
+      if token.type not in skip_types:
+        return
+
+      token = token.next
 
   def CompareParameters(self, params):
     """Computes the edit distance and list from the function params to the docs.
@@ -386,7 +572,8 @@
     Returns:
       A string representation of this object.
     """
-    return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
+    return '<DocComment: %s, %s>' % (
+        str(self.ordered_params), str(self.__flags))
 
 
 #
@@ -435,28 +622,25 @@
   return token, ''.join(contents)
 
 
-def _GetNextIdentifierToken(start_token):
-  """Searches for and returns the first identifier at the beginning of a token.
+def _GetNextPartialIdentifierToken(start_token):
+  """Returns the first token having identifier as substring after a token.
 
-  Searches each token after the start to see if it starts with an identifier.
-  If found, will split the token into at most 3 piecies: leading whitespace,
-  identifier, rest of token, returning the identifier token. If no identifier is
-  found returns None and changes no tokens. Search is abandoned when a
-  FLAG_ENDING_TYPE token is found.
+  Searches each token after the start to see if it contains an identifier.
+  If found, token is returned. If no identifier is found returns None.
+  Search is abandoned when a FLAG_ENDING_TYPE token is found.
 
   Args:
     start_token: The token to start searching after.
 
   Returns:
-    The identifier token is found, None otherwise.
+    The token found containing identifier, None otherwise.
   """
   token = start_token.next
 
-  while token and not token.type in Type.FLAG_ENDING_TYPES:
-    match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
+  while token and token.type not in Type.FLAG_ENDING_TYPES:
+    match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
         token.string)
-    if (match is not None and token.type == Type.COMMENT and
-        len(token.string) == len(match.group(0))):
+    if match is not None and token.type == Type.COMMENT:
       return token
 
     token = token.next
@@ -539,6 +723,9 @@
     is_constructor: If the function is a constructor.
     name: The name of the function, whether given in the function keyword or
         as the lvalue the function is assigned to.
+    start_token: First token of the function (the function' keyword token).
+    end_token: Last token of the function (the closing '}' token).
+    parameters: List of parameter names.
   """
 
   def __init__(self, block_depth, is_assigned, doc, name):
@@ -551,6 +738,9 @@
     self.has_this = False
     self.name = name
     self.doc = doc
+    self.start_token = None
+    self.end_token = None
+    self.parameters = None
 
 
 class StateTracker(object):
@@ -577,7 +767,7 @@
     self._block_depth = 0
     self._is_block_close = False
     self._paren_depth = 0
-    self._functions = []
+    self._function_stack = []
     self._functions_by_name = {}
     self._last_comment = None
     self._doc_comment = None
@@ -587,6 +777,24 @@
     self._last_line = None
     self._first_token = None
     self._documented_identifiers = set()
+    self._variables_in_scope = []
+
+  def DocFlagPass(self, start_token, error_handler):
+    """Parses doc flags.
+
+    This pass needs to be executed before the aliaspass and we don't want to do
+    a full-blown statetracker dry run for these.
+
+    Args:
+      start_token: The token at which to start iterating
+      error_handler: An error handler for error reporting.
+    """
+    if not start_token:
+      return
+    doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
+    for token in start_token:
+      if token.type in doc_flag_types:
+        token.attached_object = self._doc_flag(token, error_handler)
 
   def InFunction(self):
     """Returns true if the current token is within a function.
@@ -594,7 +802,7 @@
     Returns:
       True if the current token is within a function.
     """
-    return bool(self._functions)
+    return bool(self._function_stack)
 
   def InConstructor(self):
     """Returns true if the current token is within a constructor.
@@ -602,7 +810,7 @@
     Returns:
       True if the current token is within a constructor.
     """
-    return self.InFunction() and self._functions[-1].is_constructor
+    return self.InFunction() and self._function_stack[-1].is_constructor
 
   def InInterfaceMethod(self):
     """Returns true if the current token is within an interface method.
@@ -611,10 +819,10 @@
       True if the current token is within an interface method.
     """
     if self.InFunction():
-      if self._functions[-1].is_interface:
+      if self._function_stack[-1].is_interface:
         return True
       else:
-        name = self._functions[-1].name
+        name = self._function_stack[-1].name
         prototype_index = name.find('.prototype.')
         if prototype_index != -1:
           class_function_name = name[0:prototype_index]
@@ -630,7 +838,7 @@
     Returns:
       True if the current token is within a top level function.
     """
-    return len(self._functions) == 1 and self.InTopLevel()
+    return len(self._function_stack) == 1 and self.InTopLevel()
 
   def InAssignedFunction(self):
     """Returns true if the current token is within a function variable.
@@ -638,7 +846,7 @@
     Returns:
       True if if the current token is within a function variable
     """
-    return self.InFunction() and self._functions[-1].is_assigned
+    return self.InFunction() and self._function_stack[-1].is_assigned
 
   def IsFunctionOpen(self):
     """Returns true if the current token is a function block open.
@@ -646,8 +854,8 @@
     Returns:
       True if the current token is a function block open.
     """
-    return (self._functions and
-            self._functions[-1].block_depth == self._block_depth - 1)
+    return (self._function_stack and
+            self._function_stack[-1].block_depth == self._block_depth - 1)
 
   def IsFunctionClose(self):
     """Returns true if the current token is a function block close.
@@ -655,8 +863,8 @@
     Returns:
       True if the current token is a function block close.
     """
-    return (self._functions and
-            self._functions[-1].block_depth == self._block_depth)
+    return (self._function_stack and
+            self._function_stack[-1].block_depth == self._block_depth)
 
   def InBlock(self):
     """Returns true if the current token is within a block.
@@ -698,6 +906,30 @@
     """
     return bool(self._paren_depth)
 
+  def ParenthesesDepth(self):
+    """Returns the number of parens surrounding the token.
+
+    Returns:
+      The number of parenthesis surrounding the token.
+    """
+    return self._paren_depth
+
+  def BlockDepth(self):
+    """Returns the number of blocks in which the token is nested.
+
+    Returns:
+      The number of blocks in which the token is nested.
+    """
+    return self._block_depth
+
+  def FunctionDepth(self):
+    """Returns the number of functions in which the token is nested.
+
+    Returns:
+      The number of functions in which the token is nested.
+    """
+    return len(self._function_stack)
+
   def InTopLevel(self):
     """Whether we are at the top level in the class.
 
@@ -791,7 +1023,8 @@
         Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
       f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
                                 None, True)
-      if f and f.attached_object.type_start_token is not None:
+      if (f and f.attached_object.type_start_token is not None and
+          f.attached_object.type_end_token is not None):
         return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
                 tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
     return False
@@ -802,8 +1035,8 @@
     Returns:
       The current Function object.
     """
-    if self._functions:
-      return self._functions[-1]
+    if self._function_stack:
+      return self._function_stack[-1]
 
   def GetBlockDepth(self):
     """Return the block depth.
@@ -825,6 +1058,29 @@
     """Return the very first token in the file."""
     return self._first_token
 
+  def IsVariableInScope(self, token_string):
+    """Checks if string is variable in current scope.
+
+    For given string it checks whether the string is a defined variable
+    (including function param) in current state.
+
+    E.g. if variables defined (variables in current scope) is docs
+    then docs, docs.length etc will be considered as variable in current
+    scope. This will help in avoding extra goog.require for variables.
+
+    Args:
+      token_string: String to check if its is a variable in current scope.
+
+    Returns:
+      true if given string is a variable in current scope.
+    """
+    for variable in self._variables_in_scope:
+      if (token_string == variable
+          or token_string.startswith(variable + '.')):
+        return True
+
+    return False
+
   def HandleToken(self, token, last_non_space_token):
     """Handles the given token and updates state.
 
@@ -847,6 +1103,12 @@
       # by language.
       self._block_types.append(self.GetBlockType(token))
 
+      # When entering a function body, record its parameters.
+      if self.InFunction():
+        function = self._function_stack[-1]
+        if self._block_depth == function.block_depth + 1:
+          function.parameters = self.GetParams()
+
     # Track block depth.
     elif type == Type.END_BLOCK:
       self._is_block_close = not self.InObjectLiteral()
@@ -872,21 +1134,23 @@
       self._doc_comment.end_token = token
 
     elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
-      flag = self._doc_flag(token)
-      token.attached_object = flag
+      # Don't overwrite flags if they were already parsed in a previous pass.
+      if token.attached_object is None:
+        flag = self._doc_flag(token)
+        token.attached_object = flag
+      else:
+        flag = token.attached_object
       self._doc_comment.AddFlag(flag)
 
-      if flag.flag_type == 'param' and flag.name:
-        self._doc_comment.AddParam(flag.name, flag.type)
-      elif flag.flag_type == 'suppress':
+      if flag.flag_type == 'suppress':
         self._doc_comment.AddSuppression(token)
 
     elif type == Type.FUNCTION_DECLARATION:
       last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
                                          True)
       doc = None
-      # Only functions outside of parens are eligible for documentation.
-      if not self._paren_depth:
+      # Only top-level functions are eligible for documentation.
+      if self.InTopLevel():
         doc = self._doc_comment
 
       name = ''
@@ -900,8 +1164,7 @@
         # my.function.foo.
         #   bar = function() ...
         identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
-        while identifier and identifier.type in (
-            Type.IDENTIFIER, Type.SIMPLE_LVALUE):
+        while identifier and tokenutil.IsIdentifierOrDot(identifier):
           name = identifier.string + name
           # Traverse behind us, skipping whitespace and comments.
           while True:
@@ -916,14 +1179,22 @@
           next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
 
       function = Function(self._block_depth, is_assigned, doc, name)
-      self._functions.append(function)
+      function.start_token = token
+
+      self._function_stack.append(function)
       self._functions_by_name[name] = function
 
+      # Add a delimiter in stack for scope variables to define start of
+      # function. This helps in popping variables of this function when
+      # function declaration ends.
+      self._variables_in_scope.append('')
+
     elif type == Type.START_PARAMETERS:
       self._cumulative_params = ''
 
     elif type == Type.PARAMETERS:
       self._cumulative_params += token.string
+      self._variables_in_scope.extend(self.GetParams())
 
     elif type == Type.KEYWORD and token.string == 'return':
       next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
@@ -937,6 +1208,17 @@
       if function:
         function.has_throw = True
 
+    elif type == Type.KEYWORD and token.string == 'var':
+      function = self.GetFunction()
+      next_token = tokenutil.Search(token, [Type.IDENTIFIER,
+                                            Type.SIMPLE_LVALUE])
+
+      if next_token:
+        if next_token.type == Type.SIMPLE_LVALUE:
+          self._variables_in_scope.append(next_token.values['identifier'])
+        else:
+          self._variables_in_scope.append(next_token.string)
+
     elif type == Type.SIMPLE_LVALUE:
       identifier = token.values['identifier']
       jsdoc = self.GetDocComment()
@@ -950,7 +1232,7 @@
 
       # Detect documented non-assignments.
       next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
-      if next_token.IsType(Type.SEMICOLON):
+      if next_token and next_token.IsType(Type.SEMICOLON):
         if (self._last_non_space_token and
             self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
           self._documented_identifiers.add(token.string)
@@ -970,7 +1252,6 @@
       if function:
         function.has_this = True
 
-
   def HandleAfterToken(self, token):
     """Handle updating state after a token has been checked.
 
@@ -996,7 +1277,17 @@
 
       if self.InFunction() and self.IsFunctionClose():
         # TODO(robbyw): Detect the function's name for better errors.
-        self._functions.pop()
+        function = self._function_stack.pop()
+        function.end_token = token
+
+        # Pop all variables till delimiter ('') those were defined in the
+        # function being closed so make them out of scope.
+        while self._variables_in_scope and self._variables_in_scope[-1]:
+          self._variables_in_scope.pop()
+
+        # Pop delimiter
+        if self._variables_in_scope:
+          self._variables_in_scope.pop()
 
     elif type == Type.END_PARAMETERS and self._doc_comment:
       self._doc_comment = None
diff --git a/catapult/third_party/closure_linter/closure_linter/statetracker_test.py b/catapult/third_party/closure_linter/closure_linter/statetracker_test.py
new file mode 100644
index 0000000..494dc64
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/statetracker_test.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the statetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+
+import unittest as googletest
+
+from closure_linter import javascripttokens
+from closure_linter import statetracker
+from closure_linter import testutil
+
+
+class _FakeDocFlag(object):
+
+  def __repr__(self):
+    return '@%s %s' % (self.flag_type, self.name)
+
+
+class IdentifierTest(googletest.TestCase):
+
+  def testJustIdentifier(self):
+    a = javascripttokens.JavaScriptToken(
+        'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
+
+    st = statetracker.StateTracker()
+    st.HandleToken(a, None)
+
+
+class DocCommentTest(googletest.TestCase):
+
+  @staticmethod
+  def _MakeDocFlagFake(flag_type, name=None):
+    flag = _FakeDocFlag()
+    flag.flag_type = flag_type
+    flag.name = name
+    return flag
+
+  def testDocFlags(self):
+    comment = statetracker.DocComment(None)
+
+    a = self._MakeDocFlagFake('param', 'foo')
+    comment.AddFlag(a)
+
+    b = self._MakeDocFlagFake('param', '')
+    comment.AddFlag(b)
+
+    c = self._MakeDocFlagFake('param', 'bar')
+    comment.AddFlag(c)
+
+    self.assertEquals(
+        ['foo', 'bar'],
+        comment.ordered_params)
+
+    self.assertEquals(
+        [a, b, c],
+        comment.GetDocFlags())
+
+  def testInvalidate(self):
+    comment = statetracker.DocComment(None)
+
+    self.assertFalse(comment.invalidated)
+    self.assertFalse(comment.IsInvalidated())
+
+    comment.Invalidate()
+
+    self.assertTrue(comment.invalidated)
+    self.assertTrue(comment.IsInvalidated())
+
+  def testSuppressionOnly(self):
+    comment = statetracker.DocComment(None)
+
+    self.assertFalse(comment.SuppressionOnly())
+    comment.AddFlag(self._MakeDocFlagFake('suppress'))
+    self.assertTrue(comment.SuppressionOnly())
+    comment.AddFlag(self._MakeDocFlagFake('foo'))
+    self.assertFalse(comment.SuppressionOnly())
+
+  def testRepr(self):
+    comment = statetracker.DocComment(None)
+    comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
+    comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
+
+    self.assertEquals(
+        '<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
+        repr(comment))
+
+  def testDocFlagParam(self):
+    comment = self._ParseComment("""
+    /**
+     * @param {string} [name] Name of customer.
+     */""")
+    flag = comment.GetFlag('param')
+    self.assertEquals('string', flag.type)
+    self.assertEquals('string', flag.jstype.ToString())
+    self.assertEquals('[name]', flag.name)
+
+  def _ParseComment(self, script):
+    """Parse a script that contains one comment and return it."""
+    _, comments = testutil.ParseFunctionsAndComments(script)
+    self.assertEquals(1, len(comments))
+    return comments[0]
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/strict_test.py b/catapult/third_party/closure_linter/closure_linter/strict_test.py
new file mode 100644
index 0000000..75044e8
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/strict_test.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --strict.
+
+Tests errors that can be thrown by gjslint when in strict mode.
+"""
+
+
+
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+
+
+class StrictTest(unittest.TestCase):
+  """Tests scenarios where strict generates warnings."""
+
+  def testUnclosedString(self):
+    """Tests warnings are reported when nothing is disabled.
+
+       b/11450054.
+    """
+    original = [
+        'bug = function() {',
+        '  (\'foo\'\');',
+        '};',
+        '',
+        ]
+
+    expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
+                errors.FILE_IN_BLOCK]
+    self._AssertErrors(original, expected)
+
+  def _AssertErrors(self, original, expected_errors):
+    """Asserts that the error fixer corrects original to expected."""
+
+    # Trap gjslint's output parse it to get messages added.
+    error_accumulator = erroraccumulator.ErrorAccumulator()
+    runner.Run('testing.js', error_accumulator, source=original)
+    error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+    error_nums.sort()
+    expected_errors.sort()
+    self.assertListEqual(error_nums, expected_errors)
+
+if __name__ == '__main__':
+  googletest.main()
+
diff --git a/catapult/third_party/closure_linter/closure_linter/testutil.py b/catapult/third_party/closure_linter/closure_linter/testutil.py
new file mode 100644
index 0000000..f7084ee
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/testutil.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for testing gjslint components."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+
+def TokenizeSource(source):
+  """Convert a source into a string of tokens.
+
+  Args:
+    source: A source file as a string or file-like object (iterates lines).
+
+  Returns:
+    The first token of the resulting token stream.
+  """
+
+  if isinstance(source, basestring):
+    source = StringIO.StringIO(source)
+
+  tokenizer = javascripttokenizer.JavaScriptTokenizer()
+  return tokenizer.TokenizeFile(source)
+
+
+def TokenizeSourceAndRunEcmaPass(source):
+  """Tokenize a source and run the EcmaMetaDataPass on it.
+
+  Args:
+    source: A source file as a string or file-like object (iterates lines).
+
+  Returns:
+    The first token of the resulting token stream.
+  """
+  start_token = TokenizeSource(source)
+  ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+  ecma_pass.Process(start_token)
+  return start_token
+
+
+def ParseFunctionsAndComments(source, error_handler=None):
+  """Run the tokenizer and tracker and return comments and functions found.
+
+  Args:
+    source: A source file as a string or file-like object (iterates lines).
+    error_handler: An error handler.
+
+  Returns:
+    The functions and comments as a tuple.
+  """
+  start_token = TokenizeSourceAndRunEcmaPass(source)
+
+  tracker = javascriptstatetracker.JavaScriptStateTracker()
+  if error_handler is not None:
+    tracker.DocFlagPass(start_token, error_handler)
+
+  functions = []
+  comments = []
+  for token in start_token:
+    tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
+
+    function = tracker.GetFunction()
+    if function and function not in functions:
+      functions.append(function)
+
+    comment = tracker.GetDocComment()
+    if comment and comment not in comments:
+      comments.append(comment)
+
+    tracker.HandleAfterToken(token)
+
+  return functions, comments
diff --git a/catapult/third_party/closure_linter/closure_linter/tokenutil.py b/catapult/third_party/closure_linter/closure_linter/tokenutil.py
old mode 100755
new mode 100644
index 92ff16b..11e3ccc
--- a/catapult/third_party/closure_linter/closure_linter/tokenutil.py
+++ b/catapult/third_party/closure_linter/closure_linter/tokenutil.py
@@ -20,12 +20,13 @@
               'ajp@google.com (Andy Perelson)')
 
 import copy
+import StringIO
 
-from closure_linter import javascripttokens
 from closure_linter.common import tokens
+from closure_linter.javascripttokens import JavaScriptToken
+from closure_linter.javascripttokens import JavaScriptTokenType
 
 # Shorthand
-JavaScriptToken = javascripttokens.JavaScriptToken
 Type = tokens.TokenType
 
 
@@ -214,6 +215,13 @@
   Args:
     token: The token to delete
   """
+  # When deleting a token, we do not update the deleted token itself to make
+  # sure the previous and next pointers are still pointing to tokens which are
+  # not deleted.  Also it is very hard to keep track of all previously deleted
+  # tokens to update them when their pointers become invalid.  So we add this
+  # flag that any token linked list iteration logic can skip deleted node safely
+  # when its current token is deleted.
+  token.is_deleted = True
   if token.previous:
     token.previous.next = token.next
 
@@ -238,6 +246,47 @@
   DeleteToken(token)
 
 
+def InsertTokenBefore(new_token, token):
+  """Insert new_token before token.
+
+  Args:
+    new_token: A token to be added to the stream
+    token: A token already in the stream
+  """
+  new_token.next = token
+  new_token.previous = token.previous
+
+  new_token.metadata = copy.copy(token.metadata)
+
+  if new_token.IsCode():
+    old_last_code = token.metadata.last_code
+    following_token = token
+    while (following_token and
+           following_token.metadata.last_code == old_last_code):
+      following_token.metadata.last_code = new_token
+      following_token = following_token.next
+
+  token.previous = new_token
+  if new_token.previous:
+    new_token.previous.next = new_token
+
+  if new_token.start_index is None:
+    if new_token.line_number == token.line_number:
+      new_token.start_index = token.start_index
+    else:
+      previous_token = new_token.previous
+      if previous_token:
+        new_token.start_index = (previous_token.start_index +
+                                 len(previous_token.string))
+      else:
+        new_token.start_index = 0
+
+    iterator = new_token.next
+    while iterator and iterator.line_number == new_token.line_number:
+      iterator.start_index += len(new_token.string)
+      iterator = iterator.next
+
+
 def InsertTokenAfter(new_token, token):
   """Insert new_token after token.
 
@@ -372,3 +421,277 @@
     return token1.line_number - token2.line_number
   else:
     return token1.start_index - token2.start_index
+
+
+def GoogScopeOrNoneFromStartBlock(token):
+  """Determines if the given START_BLOCK is part of a goog.scope statement.
+
+  Args:
+    token: A token of type START_BLOCK.
+
+  Returns:
+    The goog.scope function call token, or None if such call doesn't exist.
+  """
+  if token.type != JavaScriptTokenType.START_BLOCK:
+    return None
+
+  # Search for a goog.scope statement, which will be 5 tokens before the
+  # block. Illustration of the tokens found prior to the start block:
+  # goog.scope(function() {
+  #      5    4    3   21 ^
+
+  maybe_goog_scope = token
+  for unused_i in xrange(5):
+    maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
+                        maybe_goog_scope.previous else None)
+  if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
+    return maybe_goog_scope
+
+
+def GetTokenRange(start_token, end_token):
+  """Returns a list of tokens between the two given, inclusive.
+
+  Args:
+    start_token: Start token in the range.
+    end_token: End token in the range.
+
+  Returns:
+    A list of tokens, in order, from start_token to end_token (including start
+    and end).  Returns none if the tokens do not describe a valid range.
+  """
+
+  token_range = []
+  token = start_token
+
+  while token:
+    token_range.append(token)
+
+    if token == end_token:
+      return token_range
+
+    token = token.next
+
+
+def TokensToString(token_iterable):
+  """Convert a number of tokens into a string.
+
+  Newlines will be inserted whenever the line_number of two neighboring
+  strings differ.
+
+  Args:
+    token_iterable: The tokens to turn to a string.
+
+  Returns:
+    A string representation of the given tokens.
+  """
+
+  buf = StringIO.StringIO()
+  token_list = list(token_iterable)
+  if not token_list:
+    return ''
+
+  line_number = token_list[0].line_number
+
+  for token in token_list:
+
+    while line_number < token.line_number:
+      line_number += 1
+      buf.write('\n')
+
+    if line_number > token.line_number:
+      line_number = token.line_number
+      buf.write('\n')
+
+    buf.write(token.string)
+
+  return buf.getvalue()
+
+
+def GetPreviousCodeToken(token):
+  """Returns the code token before the specified token.
+
+  Args:
+    token: A token.
+
+  Returns:
+    The code token before the specified token or None if no such token
+    exists.
+  """
+
+  return CustomSearch(
+      token,
+      lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+      reverse=True)
+
+
+def GetNextCodeToken(token):
+  """Returns the next code token after the specified token.
+
+  Args:
+    token: A token.
+
+  Returns:
+    The next code token after the specified token or None if no such token
+    exists.
+  """
+
+  return CustomSearch(
+      token,
+      lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+      reverse=False)
+
+
+def GetIdentifierStart(token):
+  """Returns the first token in an identifier.
+
+  Given a token which is part of an identifier, returns the token at the start
+  of the identifier.
+
+  Args:
+    token: A token which is part of an identifier.
+
+  Returns:
+    The token at the start of the identifier or None if the identifier was not
+    of the form 'a.b.c' (e.g. "['a']['b'].c").
+  """
+
+  start_token = token
+  previous_code_token = GetPreviousCodeToken(token)
+
+  while (previous_code_token and (
+      previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+      IsDot(previous_code_token))):
+    start_token = previous_code_token
+    previous_code_token = GetPreviousCodeToken(previous_code_token)
+
+  if IsDot(start_token):
+    return None
+
+  return start_token
+
+
+def GetIdentifierForToken(token):
+  """Get the symbol specified by a token.
+
+  Given a token, this function additionally concatenates any parts of an
+  identifying symbol being identified that are split by whitespace or a
+  newline.
+
+  The function will return None if the token is not the first token of an
+  identifier.
+
+  Args:
+    token: The first token of a symbol.
+
+  Returns:
+    The whole symbol, as a string.
+  """
+
+  # Search backward to determine if this token is the first token of the
+  # identifier. If it is not the first token, return None to signal that this
+  # token should be ignored.
+  prev_token = token.previous
+  while prev_token:
+    if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+        IsDot(prev_token)):
+      return None
+
+    if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
+        prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
+      prev_token = prev_token.previous
+    else:
+      break
+
+  # A "function foo()" declaration.
+  if token.type is JavaScriptTokenType.FUNCTION_NAME:
+    return token.string
+
+  # A "var foo" declaration (if the previous token is 'var')
+  previous_code_token = GetPreviousCodeToken(token)
+
+  if previous_code_token and previous_code_token.IsKeyword('var'):
+    return token.string
+
+  # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
+  # could span multiple lines or be broken up by whitespace.  We need
+  # to concatenate.
+  identifier_types = set([
+      JavaScriptTokenType.IDENTIFIER,
+      JavaScriptTokenType.SIMPLE_LVALUE
+      ])
+
+  assert token.type in identifier_types
+
+  # Start with the first token
+  symbol_tokens = [token]
+
+  if token.next:
+    for t in token.next:
+      last_symbol_token = symbol_tokens[-1]
+
+      # A dot is part of the previous symbol.
+      if IsDot(t):
+        symbol_tokens.append(t)
+        continue
+
+      # An identifier is part of the previous symbol if the previous one was a
+      # dot.
+      if t.type in identifier_types:
+        if IsDot(last_symbol_token):
+          symbol_tokens.append(t)
+          continue
+        else:
+          break
+
+      # Skip any whitespace
+      if t.type in JavaScriptTokenType.NON_CODE_TYPES:
+        continue
+
+      # This is the end of the identifier. Stop iterating.
+      break
+
+  if symbol_tokens:
+    return ''.join([t.string for t in symbol_tokens])
+
+
+def GetStringAfterToken(token):
+  """Get string after token.
+
+  Args:
+    token: Search will be done after this token.
+
+  Returns:
+    String if found after token else None (empty string will also
+    return None).
+
+  Search until end of string as in case of empty string Type.STRING_TEXT is not
+  present/found and don't want to return next string.
+  E.g.
+  a = '';
+  b = 'test';
+  When searching for string after 'a' if search is not limited by end of string
+  then it will return 'test' which is not desirable as there is a empty string
+  before that.
+
+  This will return None for cases where string is empty or no string found
+  as in both cases there is no Type.STRING_TEXT.
+  """
+  string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
+                             [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
+                              JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
+  if string_token:
+    return string_token.string
+  else:
+    return None
+
+
+def IsDot(token):
+  """Whether the token represents a "dot" operator (foo.bar)."""
+  return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
+
+
+def IsIdentifierOrDot(token):
+  """Whether the token is either an identifier or a '.'."""
+  return (token.type in [JavaScriptTokenType.IDENTIFIER,
+                         JavaScriptTokenType.SIMPLE_LVALUE] or
+          IsDot(token))
diff --git a/catapult/third_party/closure_linter/closure_linter/tokenutil_test.py b/catapult/third_party/closure_linter/closure_linter/tokenutil_test.py
new file mode 100644
index 0000000..c7d3854
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/tokenutil_test.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+class FakeToken(object):
+  pass
+
+
+class TokenUtilTest(googletest.TestCase):
+
+  def testGetTokenRange(self):
+
+    a = FakeToken()
+    b = FakeToken()
+    c = FakeToken()
+    d = FakeToken()
+    e = FakeToken()
+
+    a.next = b
+    b.next = c
+    c.next = d
+
+    self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
+
+    # This is an error as e does not come after a in the token chain.
+    self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
+
+  def testTokensToString(self):
+
+    a = FakeToken()
+    b = FakeToken()
+    c = FakeToken()
+    d = FakeToken()
+    e = FakeToken()
+
+    a.string = 'aaa'
+    b.string = 'bbb'
+    c.string = 'ccc'
+    d.string = 'ddd'
+    e.string = 'eee'
+
+    a.line_number = 5
+    b.line_number = 6
+    c.line_number = 6
+    d.line_number = 10
+    e.line_number = 11
+
+    self.assertEquals(
+        'aaa\nbbbccc\n\n\n\nddd\neee',
+        tokenutil.TokensToString([a, b, c, d, e]))
+
+    self.assertEquals(
+        'ddd\neee\naaa\nbbbccc',
+        tokenutil.TokensToString([d, e, a, b, c]),
+        'Neighboring tokens not in line_number order should have a newline '
+        'between them.')
+
+  def testGetPreviousCodeToken(self):
+
+    tokens = testutil.TokenizeSource("""
+start1. // comment
+    /* another comment */
+    end1
+""")
+
+    def _GetTokenStartingWith(token_starts_with):
+      for t in tokens:
+        if t.string.startswith(token_starts_with):
+          return t
+
+    self.assertEquals(
+        None,
+        tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
+
+    self.assertEquals(
+        '.',
+        tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
+
+    self.assertEquals(
+        'start1',
+        tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
+
+  def testGetNextCodeToken(self):
+
+    tokens = testutil.TokenizeSource("""
+start1. // comment
+    /* another comment */
+    end1
+""")
+
+    def _GetTokenStartingWith(token_starts_with):
+      for t in tokens:
+        if t.string.startswith(token_starts_with):
+          return t
+
+    self.assertEquals(
+        '.',
+        tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
+
+    self.assertEquals(
+        'end1',
+        tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
+
+    self.assertEquals(
+        None,
+        tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
+
+  def testGetIdentifierStart(self):
+
+    tokens = testutil.TokenizeSource("""
+start1 . // comment
+    prototype. /* another comment */
+    end1
+
+['edge'][case].prototype.
+    end2 = function() {}
+""")
+
+    def _GetTokenStartingWith(token_starts_with):
+      for t in tokens:
+        if t.string.startswith(token_starts_with):
+          return t
+
+    self.assertEquals(
+        'start1',
+        tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
+
+    self.assertEquals(
+        'start1',
+        tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
+
+    self.assertEquals(
+        None,
+        tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
+
+  def testInsertTokenBefore(self):
+
+    self.AssertInsertTokenAfterBefore(False)
+
+  def testInsertTokenAfter(self):
+
+    self.AssertInsertTokenAfterBefore(True)
+
+  def AssertInsertTokenAfterBefore(self, after):
+
+    new_token = javascripttokens.JavaScriptToken(
+        'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
+
+    existing_token1 = javascripttokens.JavaScriptToken(
+        'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
+    existing_token1.start_index = 0
+    existing_token1.metadata = ecmametadatapass.EcmaMetaData()
+
+    existing_token2 = javascripttokens.JavaScriptToken(
+        ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
+    existing_token2.start_index = 3
+    existing_token2.metadata = ecmametadatapass.EcmaMetaData()
+    existing_token2.metadata.last_code = existing_token1
+
+    existing_token1.next = existing_token2
+    existing_token2.previous = existing_token1
+
+    if after:
+      tokenutil.InsertTokenAfter(new_token, existing_token1)
+    else:
+      tokenutil.InsertTokenBefore(new_token, existing_token2)
+
+    self.assertEquals(existing_token1, new_token.previous)
+    self.assertEquals(existing_token2, new_token.next)
+
+    self.assertEquals(new_token, existing_token1.next)
+    self.assertEquals(new_token, existing_token2.previous)
+
+    self.assertEquals(existing_token1, new_token.metadata.last_code)
+    self.assertEquals(new_token, existing_token2.metadata.last_code)
+
+    self.assertEquals(0, existing_token1.start_index)
+    self.assertEquals(3, new_token.start_index)
+    self.assertEquals(4, existing_token2.start_index)
+
+  def testGetIdentifierForToken(self):
+
+    tokens = testutil.TokenizeSource("""
+start1.abc.def.prototype.
+  onContinuedLine
+
+(start2.abc.def
+  .hij.klm
+  .nop)
+
+start3.abc.def
+   .hij = function() {};
+
+// An absurd multi-liner.
+start4.abc.def.
+   hij.
+   klm = function() {};
+
+start5 . aaa . bbb . ccc
+  shouldntBePartOfThePreviousSymbol
+
+start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
+
+var start7 = 42;
+
+function start8() {
+
+}
+
+start9.abc. // why is there a comment here?
+  def /* another comment */
+  shouldntBePart
+
+start10.abc // why is there a comment here?
+  .def /* another comment */
+  shouldntBePart
+
+start11.abc. middle1.shouldNotBeIdentifier
+""")
+
+    def _GetTokenStartingWith(token_starts_with):
+      for t in tokens:
+        if t.string.startswith(token_starts_with):
+          return t
+
+    self.assertEquals(
+        'start1.abc.def.prototype.onContinuedLine',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
+
+    self.assertEquals(
+        'start2.abc.def.hij.klm.nop',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
+
+    self.assertEquals(
+        'start3.abc.def.hij',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
+
+    self.assertEquals(
+        'start4.abc.def.hij.klm',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
+
+    self.assertEquals(
+        'start5.aaa.bbb.ccc',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
+
+    self.assertEquals(
+        'start6.abc.def',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
+
+    self.assertEquals(
+        'start7',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
+
+    self.assertEquals(
+        'start8',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
+
+    self.assertEquals(
+        'start9.abc.def',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
+
+    self.assertEquals(
+        'start10.abc.def',
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
+
+    self.assertIsNone(
+        tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/catapult/third_party/closure_linter/closure_linter/typeannotation.py b/catapult/third_party/closure_linter/closure_linter/typeannotation.py
new file mode 100644
index 0000000..5565134
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/typeannotation.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+#*-* coding: utf-8
+# Copyright 2016 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Closure typeannotation parsing and utilities."""
+
+
+
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter.common import error
+
+# Shorthand
+TYPE = javascripttokens.JavaScriptTokenType
+
+
+class TypeAnnotation(object):
+  """Represents a structured view of a closure type annotation.
+
+  Attribute:
+    identifier: The name of the type.
+    key_type: The name part before a colon.
+    sub_types: The list of sub_types used e.g. for Array.<…>
+    or_null: The '?' annotation
+    not_null: The '!' annotation
+    type_group: If this a a grouping (a|b), but does not include function(a).
+    return_type: The return type of a function definition.
+    alias: The actual type set by closurizednamespaceinfo if the identifier uses
+        an alias to shorten the name.
+    tokens: An ordered list of tokens used for this type. May contain
+        TypeAnnotation instances for sub_types, key_type or return_type.
+  """
+
+  IMPLICIT_TYPE_GROUP = 2
+
+  NULLABILITY_UNKNOWN = 2
+
+  FUNCTION_TYPE = 'function'
+  NULL_TYPE = 'null'
+  VAR_ARGS_TYPE = '...'
+
+  # Frequently used known non-nullable types.
+  NON_NULLABLE = frozenset([
+      'boolean', FUNCTION_TYPE, 'number', 'string', 'undefined'])
+  # Frequently used known nullable types.
+  NULLABLE_TYPE_WHITELIST = frozenset([
+      'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
+      'Object'])
+
+  def __init__(self):
+    self.identifier = ''
+    self.sub_types = []
+    self.or_null = False
+    self.not_null = False
+    self.type_group = False
+    self.alias = None
+    self.key_type = None
+    self.record_type = False
+    self.opt_arg = False
+    self.return_type = None
+    self.tokens = []
+
+  def IsFunction(self):
+    """Determines whether this is a function definition."""
+    return self.identifier == TypeAnnotation.FUNCTION_TYPE
+
+  def IsConstructor(self):
+    """Determines whether this is a function definition for a constructor."""
+    key_type = self.sub_types and self.sub_types[0].key_type
+    return self.IsFunction() and key_type.identifier == 'new'
+
+  def IsRecordType(self):
+    """Returns True if this type is a record type."""
+    return (self.record_type or
+            any(t.IsRecordType() for t in self.sub_types))
+
+  def IsVarArgsType(self):
+    """Determines if the type is a var_args type, i.e. starts with '...'."""
+    return self.identifier.startswith(TypeAnnotation.VAR_ARGS_TYPE) or (
+        self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
+        self.sub_types[0].identifier.startswith(TypeAnnotation.VAR_ARGS_TYPE))
+
+  def IsEmpty(self):
+    """Returns True if the type is empty."""
+    return not self.tokens
+
+  def IsUnknownType(self):
+    """Returns True if this is the unknown type {?}."""
+    return (self.or_null
+            and not self.identifier
+            and not self.sub_types
+            and not self.return_type)
+
+  def Append(self, item):
+    """Adds a sub_type to this type and finalizes it.
+
+    Args:
+      item: The TypeAnnotation item to append.
+    """
+    # item is a TypeAnnotation instance, so pylint: disable=protected-access
+    self.sub_types.append(item._Finalize(self))
+
+  def __repr__(self):
+    """Reconstructs the type definition."""
+    append = ''
+    if self.sub_types:
+      separator = (',' if not self.type_group else '|')
+      if self.IsFunction():
+        surround = '(%s)'
+      else:
+        surround = {False: '{%s}' if self.record_type else '<%s>',
+                    True: '(%s)',
+                    TypeAnnotation.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
+      append = surround % separator.join(repr(t) for t in self.sub_types)
+    if self.return_type:
+      append += ':%s' % repr(self.return_type)
+    append += '=' if self.opt_arg else ''
+    prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
+    keyword = '%s:' % repr(self.key_type) if self.key_type else ''
+    return keyword + prefix + '%s' % (self.alias or self.identifier) + append
+
+  def ToString(self):
+    """Concats the type's tokens to form a string again."""
+    ret = []
+    for token in self.tokens:
+      if not isinstance(token, TypeAnnotation):
+        ret.append(token.string)
+      else:
+        ret.append(token.ToString())
+    return ''.join(ret)
+
+  def Dump(self, indent=''):
+    """Dumps this type's structure for debugging purposes."""
+    result = []
+    for t in self.tokens:
+      if isinstance(t, TypeAnnotation):
+        result.append(indent + str(t) + ' =>\n' + t.Dump(indent + '  '))
+      else:
+        result.append(indent + str(t))
+    return '\n'.join(result)
+
+  def IterIdentifiers(self):
+    """Iterates over all identifiers in this type and its subtypes."""
+    if self.identifier:
+      yield self.identifier
+    for subtype in self.IterTypes():
+      for identifier in subtype.IterIdentifiers():
+        yield identifier
+
+  def IterTypeGroup(self):
+    """Iterates over all types in the type group including self.
+
+    Yields:
+      If this is a implicit or manual type-group: all sub_types.
+      Otherwise: self
+      E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
+      for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
+
+    """
+    if self.type_group:
+      for sub_type in self.sub_types:
+        for sub_type in sub_type.IterTypeGroup():
+          yield sub_type
+    else:
+      yield self
+
+  def IterTypes(self):
+    """Iterates over each subtype as well as return and key types."""
+    if self.return_type:
+      yield self.return_type
+
+    if self.key_type:
+      yield self.key_type
+
+    for sub_type in self.sub_types:
+      yield sub_type
+
+  def GetNullability(self, modifiers=True):
+    """Computes whether the type may be null.
+
+    Args:
+      modifiers: Whether the modifiers ? and ! should be considered in the
+                 evaluation.
+    Returns:
+      True if the type allows null, False if the type is strictly non nullable
+      and NULLABILITY_UNKNOWN if the nullability cannot be determined.
+    """
+
+    # Explicitly marked nullable types or 'null' are nullable.
+    if ((modifiers and self.or_null) or
+        self.identifier == TypeAnnotation.NULL_TYPE):
+      return True
+
+    # Explicitly marked non-nullable types or non-nullable base types:
+    if ((modifiers and self.not_null) or self.record_type
+        or self.identifier in TypeAnnotation.NON_NULLABLE):
+      return False
+
+    # A type group is nullable if any of its elements are nullable.
+    if self.type_group:
+      maybe_nullable = False
+      for sub_type in self.sub_types:
+        nullability = sub_type.GetNullability()
+        if nullability == self.NULLABILITY_UNKNOWN:
+          maybe_nullable = nullability
+        elif nullability:
+          return True
+      return maybe_nullable
+
+    # Whitelisted types are nullable.
+    if self.identifier.rstrip('.') in TypeAnnotation.NULLABLE_TYPE_WHITELIST:
+      return True
+
+    # All other types are unknown (most should be nullable, but
+    # enums are not and typedefs might not be).
+    return TypeAnnotation.NULLABILITY_UNKNOWN
+
+  def WillAlwaysBeNullable(self):
+    """Computes whether the ! flag is illegal for this type.
+
+    This is the case if this type or any of the subtypes is marked as
+    explicitly nullable.
+
+    Returns:
+      True if the ! flag would be illegal.
+    """
+    if self.or_null or self.identifier == TypeAnnotation.NULL_TYPE:
+      return True
+
+    if self.type_group:
+      return any(t.WillAlwaysBeNullable() for t in self.sub_types)
+
+    return False
+
+  def _Finalize(self, parent):
+    """Fixes some parsing issues once the TypeAnnotation is complete."""
+
+    # Normalize functions whose definition ended up in the key type because
+    # they defined a return type after a colon.
+    if (self.key_type and
+        self.key_type.identifier == TypeAnnotation.FUNCTION_TYPE):
+      current = self.key_type
+      current.return_type = self
+      self.key_type = None
+      # opt_arg never refers to the return type but to the function itself.
+      current.opt_arg = self.opt_arg
+      self.opt_arg = False
+      return current
+
+    # If a typedef just specified the key, it will not end up in the key type.
+    if parent.record_type and not self.key_type:
+      current = TypeAnnotation()
+      current.key_type = self
+      current.tokens.append(self)
+      return current
+    return self
+
+  def FirstToken(self):
+    """Returns the first token used in this type or any of its subtypes."""
+    first = self.tokens[0]
+    return first.FirstToken() if isinstance(first, TypeAnnotation) else first
+
+
+def Parse(token, token_end, error_handler):
+  """Parses a type annotation and returns a TypeAnnotation object."""
+  return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
+
+
+class TypeAnnotationParser(object):
+  """A parser for type annotations constructing the TypeAnnotation object."""
+
+  def __init__(self, error_handler):
+    self._stack = []
+    self._error_handler = error_handler
+    self._closing_error = False
+
+  def Parse(self, token, token_end):
+    """Parses a type annotation and returns a TypeAnnotation object."""
+    root = TypeAnnotation()
+    self._stack.append(root)
+    current = TypeAnnotation()
+    root.tokens.append(current)
+
+    while token and token != token_end:
+      if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
+        if token.string == '(':
+          if current.identifier and current.identifier not in [
+              TypeAnnotation.FUNCTION_TYPE, TypeAnnotation.VAR_ARGS_TYPE]:
+            self.Error(token,
+                       'Invalid identifier for (): "%s"' % current.identifier)
+          current.type_group = (
+              current.identifier != TypeAnnotation.FUNCTION_TYPE)
+        elif token.string == '{':
+          current.record_type = True
+        current.tokens.append(token)
+        self._stack.append(current)
+        current = TypeAnnotation()
+        self._stack[-1].tokens.append(current)
+
+      elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
+        prev = self._stack.pop()
+        prev.Append(current)
+        current = prev
+
+        # If an implicit type group was created, close it as well.
+        if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+          prev = self._stack.pop()
+          prev.Append(current)
+          current = prev
+        current.tokens.append(token)
+
+      elif token.type == TYPE.DOC_TYPE_MODIFIER:
+        if token.string == '!':
+          current.tokens.append(token)
+          current.not_null = True
+        elif token.string == '?':
+          current.tokens.append(token)
+          current.or_null = True
+        elif token.string == ':':
+          current.tokens.append(token)
+          prev = current
+          current = TypeAnnotation()
+          prev.tokens.append(current)
+          current.key_type = prev
+        elif token.string == '=':
+          # For implicit type groups the '=' refers to the parent.
+          try:
+            if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+              self._stack[-1].tokens.append(token)
+              self._stack[-1].opt_arg = True
+            else:
+              current.tokens.append(token)
+              current.opt_arg = True
+          except IndexError:
+            self.ClosingError(token)
+        elif token.string == '|':
+          # If a type group has explicitly been opened, do a normal append.
+          # Otherwise we have to open the type group and move the current
+          # type into it, before appending
+          if not self._stack[-1].type_group:
+            type_group = TypeAnnotation()
+            if (current.key_type and
+                current.key_type.identifier != TypeAnnotation.FUNCTION_TYPE):
+              type_group.key_type = current.key_type
+              current.key_type = None
+            type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
+            # Fix the token order
+            prev = self._stack[-1].tokens.pop()
+            self._stack[-1].tokens.append(type_group)
+            type_group.tokens.append(prev)
+            self._stack.append(type_group)
+          self._stack[-1].tokens.append(token)
+          self.Append(current, error_token=token)
+          current = TypeAnnotation()
+          self._stack[-1].tokens.append(current)
+        elif token.string == ',':
+          self.Append(current, error_token=token)
+          current = TypeAnnotation()
+          self._stack[-1].tokens.append(token)
+          self._stack[-1].tokens.append(current)
+        else:
+          current.tokens.append(token)
+          self.Error(token, 'Invalid token')
+
+      elif token.type == TYPE.COMMENT:
+        current.tokens.append(token)
+        current.identifier += token.string.strip()
+
+      elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
+        current.tokens.append(token)
+
+      else:
+        current.tokens.append(token)
+        self.Error(token, 'Unexpected token')
+
+      token = token.next
+
+    self.Append(current, error_token=token)
+    try:
+      ret = self._stack.pop()
+    except IndexError:
+      self.ClosingError(token)
+      # The type is screwed up, but let's return something.
+      return current
+
+    if self._stack and (len(self._stack) != 1 or
+                        ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
+      self.Error(token, 'Too many opening items.')
+
+    return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
+
+  def Append(self, type_obj, error_token):
+    """Appends a new TypeAnnotation object to the current parent."""
+    if self._stack:
+      self._stack[-1].Append(type_obj)
+    else:
+      self.ClosingError(error_token)
+
+  def ClosingError(self, token):
+    """Reports an error about too many closing items, but only once."""
+    if not self._closing_error:
+      self._closing_error = True
+      self.Error(token, 'Too many closing items.')
+
+  def Error(self, token, message):
+    """Calls the error_handler to post an error message."""
+    if self._error_handler:
+      self._error_handler.HandleError(error.Error(
+          errors.JSDOC_DOES_NOT_PARSE,
+          'Error parsing jsdoc type at token "%s" (column: %d): %s' %
+          (token.string, token.start_index, message), token))
+
diff --git a/catapult/third_party/closure_linter/closure_linter/typeannotation_test.py b/catapult/third_party/closure_linter/closure_linter/typeannotation_test.py
new file mode 100644
index 0000000..5e8e3cb
--- /dev/null
+++ b/catapult/third_party/closure_linter/closure_linter/typeannotation_test.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+# Copyright 2016 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the typeannotation module."""
+
+
+
+
+import unittest as googletest
+
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
+              'function(...(Object.<string>))>')
+
+
+class TypeErrorException(Exception):
+  """Exception for TypeErrors."""
+
+  def __init__(self, errors):
+    super(TypeErrorException, self).__init__()
+    self.errors = errors
+
+
+class TypeParserTest(googletest.TestCase):
+  """Tests for typeannotation parsing."""
+
+  def _ParseComment(self, script):
+    """Parse a script that contains one comment and return it."""
+    accumulator = erroraccumulator.ErrorAccumulator()
+    _, comments = testutil.ParseFunctionsAndComments(script, accumulator)
+    if accumulator.GetErrors():
+      raise TypeErrorException(accumulator.GetErrors())
+    self.assertEquals(1, len(comments))
+    return comments[0]
+
+  def _ParseType(self, type_str):
+    """Creates a comment to parse and returns the parsed type."""
+    comment = self._ParseComment('/** @type {%s} **/' % type_str)
+    return comment.GetDocFlags()[0].jstype
+
+  def assertProperReconstruction(self, type_str, matching_str=None):
+    """Parses the type and asserts the its repr matches the type.
+
+    If matching_str is specified, it will assert that the repr matches this
+    string instead.
+
+    Args:
+      type_str: The type string to parse.
+      matching_str: A string the __repr__ of the parsed type should match.
+    Returns:
+      The parsed js_type.
+    """
+    parsed_type = self._ParseType(type_str)
+    # Use listEqual assertion to more easily identify the difference
+    self.assertListEqual(list(matching_str or type_str),
+                         list(repr(parsed_type)))
+    self.assertEquals(matching_str or type_str, repr(parsed_type))
+
+    # Newlines will be inserted by the file writer.
+    self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
+    return parsed_type
+
+  def assertNullable(self, type_str, nullable=True):
+    parsed_type = self.assertProperReconstruction(type_str)
+    self.assertEquals(nullable, parsed_type.GetNullability(),
+                      '"%s" should %sbe nullable' %
+                      (type_str, 'not ' if nullable else ''))
+
+  def assertNotNullable(self, type_str):
+    return self.assertNullable(type_str, nullable=False)
+
+  def testReconstruction(self):
+    self.assertProperReconstruction('*')
+    self.assertProperReconstruction('number')
+    self.assertProperReconstruction('(((number)))')
+    self.assertProperReconstruction('!number')
+    self.assertProperReconstruction('?!number')
+    self.assertProperReconstruction('number=')
+    self.assertProperReconstruction('number=!?', '?!number=')
+    self.assertProperReconstruction('number|?string')
+    self.assertProperReconstruction('(number|string)')
+    self.assertProperReconstruction('?(number|string)')
+    self.assertProperReconstruction('Object.<number,string>')
+    self.assertProperReconstruction('function(new:Object)')
+    self.assertProperReconstruction('function(new:Object):number')
+    self.assertProperReconstruction('function(new:Object,Element):number')
+    self.assertProperReconstruction('function(this:T,...)')
+    self.assertProperReconstruction('{a:?number}')
+    self.assertProperReconstruction('{a:?number,b:(number|string)}')
+    self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
+    self.assertProperReconstruction('{handleEvent:function(?):?}')
+    self.assertProperReconstruction('function():?|null')
+    self.assertProperReconstruction('null|function():?|bar')
+
+  def testOptargs(self):
+    self.assertProperReconstruction('number=')
+    self.assertProperReconstruction('number|string=')
+    self.assertProperReconstruction('(number|string)=')
+    self.assertProperReconstruction('(number|string=)')
+    self.assertProperReconstruction('(number=|string)')
+    self.assertProperReconstruction('function(...):number=')
+
+  def testIndepth(self):
+    # Do an deeper check of the crazy identifier
+    crazy = self.assertProperReconstruction(CRAZY_TYPE)
+    self.assertEquals('Array.', crazy.identifier)
+    self.assertEquals(1, len(crazy.sub_types))
+    func1 = crazy.sub_types[0]
+    func2 = func1.return_type
+    self.assertEquals('function', func1.identifier)
+    self.assertEquals('function', func2.identifier)
+    self.assertEquals(3, len(func1.sub_types))
+    self.assertEquals(1, len(func2.sub_types))
+    self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
+
+  def testIterIdentifiers(self):
+    nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
+    for identifier in ('a', 'b', 'c', 'd', 'e'):
+      self.assertIn(identifier, nested_identifiers.IterIdentifiers())
+
+  def testIsEmpty(self):
+    self.assertTrue(self._ParseType('').IsEmpty())
+    self.assertFalse(self._ParseType('?').IsEmpty())
+    self.assertFalse(self._ParseType('!').IsEmpty())
+    self.assertFalse(self._ParseType('<?>').IsEmpty())
+
+  def testIsConstructor(self):
+    self.assertFalse(self._ParseType('').IsConstructor())
+    self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
+    self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
+
+  def testIsVarArgsType(self):
+    self.assertTrue(self._ParseType('...number').IsVarArgsType())
+    self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
+    self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
+    self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
+    self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
+
+  def testIsUnknownType(self):
+    self.assertTrue(self._ParseType('?').IsUnknownType())
+    self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
+    self.assertFalse(self._ParseType('?|!').IsUnknownType())
+    self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
+    self.assertFalse(self._ParseType('!').IsUnknownType())
+
+    long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
+    record = self._ParseType(long_type)
+    # First check that there's not just one type with 3 return types, but three
+    # top-level types.
+    self.assertEquals(3, len(record.sub_types))
+
+    # Now extract all unknown type instances and verify that they really are.
+    handle_event, sample = record.sub_types[1].sub_types
+    for i, sub_type in enumerate([
+        record.sub_types[0].return_type,
+        handle_event.return_type,
+        handle_event.sub_types[0],
+        sample,
+        record.sub_types[2]]):
+      self.assertTrue(sub_type.IsUnknownType(),
+                      'Type %d should be the unknown type: %s\n%s' % (
+                          i, sub_type.tokens, record.Dump()))
+
+  def testTypedefNames(self):
+    easy = self._ParseType('{a}')
+    self.assertTrue(easy.record_type)
+
+    easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
+    self.assertEquals('a', easy.key_type.identifier)
+    self.assertEquals('', easy.identifier)
+
+    easy = self.assertProperReconstruction('{a:b}').sub_types[0]
+    self.assertEquals('a', easy.key_type.identifier)
+    self.assertEquals('b', easy.identifier)
+
+  def assertTypeError(self, type_str):
+    """Asserts that parsing the given type raises a linter error."""
+    self.assertRaises(TypeErrorException, self._ParseType, type_str)
+
+  def testParseBadTypes(self):
+    """Tests that several errors in types don't break the parser."""
+    self.assertTypeError('<')
+    self.assertTypeError('>')
+    self.assertTypeError('Foo.<Bar')
+    self.assertTypeError('Foo.Bar>=')
+    self.assertTypeError('Foo.<Bar>>=')
+    self.assertTypeError('(')
+    self.assertTypeError(')')
+    self.assertTypeError('Foo.<Bar)>')
+    self._ParseType(':')
+    self._ParseType(':foo')
+    self.assertTypeError(':)foo')
+    self.assertTypeError('(a|{b:(c|function(new:d):e')
+
+  def testNullable(self):
+    self.assertNullable('null')
+    self.assertNullable('Object')
+    self.assertNullable('?string')
+    self.assertNullable('?number')
+
+    self.assertNotNullable('string')
+    self.assertNotNullable('number')
+    self.assertNotNullable('boolean')
+    self.assertNotNullable('function(Object)')
+    self.assertNotNullable('function(Object):Object')
+    self.assertNotNullable('function(?Object):?Object')
+    self.assertNotNullable('!Object')
+
+    self.assertNotNullable('boolean|string')
+    self.assertNotNullable('(boolean|string)')
+
+    self.assertNullable('(boolean|string|null)')
+    self.assertNullable('(?boolean)')
+    self.assertNullable('?(boolean)')
+
+    self.assertNullable('(boolean|Object)')
+    self.assertNotNullable('(boolean|(string|{a:}))')
+
+  def testSpaces(self):
+    """Tests that spaces don't change the outcome."""
+    type_str = (' A < b | ( c | ? ! d e f ) > | '
+                'function ( x : . . . ) : { y : z = } ')
+    two_spaces = type_str.replace(' ', '  ')
+    no_spaces = type_str.replace(' ', '')
+    newlines = type_str.replace(' ', '\n * ')
+    self.assertProperReconstruction(no_spaces)
+    self.assertProperReconstruction(type_str, no_spaces)
+    self.assertProperReconstruction(two_spaces, no_spaces)
+    self.assertProperReconstruction(newlines, no_spaces)
+
+if __name__ == '__main__':
+  googletest.main()
+
diff --git a/catapult/third_party/closure_linter/setup.py b/catapult/third_party/closure_linter/setup.py
old mode 100755
new mode 100644
index aa2ca4b..52bf566
--- a/catapult/third_party/closure_linter/setup.py
+++ b/catapult/third_party/closure_linter/setup.py
@@ -20,12 +20,12 @@
     from distutils.core import setup
 
 setup(name='closure_linter',
-      version='2.3.5',
+      version='2.3.19',
       description='Closure Linter',
       license='Apache',
       author='The Closure Linter Authors',
       author_email='opensource@google.com',
-      url='http://code.google.com/p/closure-linter',
+      url='https://github.com/google/closure-linter',
       install_requires=['python-gflags'],
       package_dir={'closure_linter': 'closure_linter'},
       packages=['closure_linter', 'closure_linter.common'],
diff --git a/catapult/third_party/coverage/.travis.yml b/catapult/third_party/coverage/.travis.yml
new file mode 100644
index 0000000..97679e1
--- /dev/null
+++ b/catapult/third_party/coverage/.travis.yml
@@ -0,0 +1,26 @@
+# Tell Travis what to do
+# https://travis-ci.org/nedbat/coveragepy
+
+language: python
+
+sudo: false
+
+python:
+  - 2.7
+
+env:
+  - TOXENV=py26
+  - TOXENV=py27
+  - TOXENV=py33
+  - TOXENV=py34
+  - TOXENV=pypy
+  - TOXENV=py27 COVERAGE_COVERAGE=yes
+
+install:
+  - pip install -r requirements/tox.pip
+
+script:
+  - tox
+  - if [ $COVERAGE_COVERAGE == 'yes' ]; then python igor.py combine_html; fi
+  - if [ $COVERAGE_COVERAGE == 'yes' ]; then pip install codecov; fi
+  - if [ $COVERAGE_COVERAGE == 'yes' ]; then codecov; fi
diff --git a/catapult/third_party/coverage/AUTHORS.txt b/catapult/third_party/coverage/AUTHORS.txt
new file mode 100644
index 0000000..de3d650
--- /dev/null
+++ b/catapult/third_party/coverage/AUTHORS.txt
@@ -0,0 +1,69 @@
+Coverage.py was originally written by Gareth Rees, and since 2004 has been
+extended and maintained by Ned Batchelder.
+
+Other contributions have been made by:
+
+Adi Roiban
+Alex Gaynor
+Alexander Todorov
+Anthony Sottile
+Arcadiy Ivanov
+Ben Finney
+Bill Hart
+Brandon Rhodes
+Brett Cannon
+Buck Evan
+Carl Gieringer
+Catherine Proulx
+Chris Adams
+Chris Rose
+Christian Heimes
+Christine Lytwynec
+Christoph Zwerschke
+Conrad Ho
+Danek Duvall
+Danny Allen
+David Christian
+David Stanek
+Detlev Offenbach
+Devin Jeanpierre
+Dmitry Trofimov
+Eduardo Schettino
+Edward Loper
+Geoff Bache
+George Paci
+George Song
+Greg Rogers
+Guillaume Chazarain
+Imri Goldberg
+Ionel Cristian Mărieș
+JT Olds
+Jessamyn Smith
+Jon Chappell
+Joseph Tate
+Julian Berman
+Krystian Kichewko
+Leonardo Pistone
+Lex Berezhny
+Marc Abramowitz
+Marcus Cobden
+Mark van der Wal
+Martin Fuzzey
+Matthew Desmarais
+Mickie Betz
+Noel O'Boyle
+Pablo Carballo
+Patrick Mezard
+Peter Portante
+Roger Hu
+Ross Lawley
+Sandra Martocchia
+Sigve Tjora
+Stan Hu
+Stefan Behnel
+Steve Leonard
+Steve Peak
+Ted Wexler
+Titus Brown
+Yury Selivanov
+Zooko Wilcox-O'Hearn
diff --git a/catapult/third_party/coverage/CHANGES.rst b/catapult/third_party/coverage/CHANGES.rst
new file mode 100644
index 0000000..7ff7334
--- /dev/null
+++ b/catapult/third_party/coverage/CHANGES.rst
@@ -0,0 +1,1500 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+==============================
+Change history for Coverage.py
+==============================
+
+
+Version 4.0.3, 24 November 2015
+-------------------------------
+
+- Fixed a mysterious problem that manifested in different ways: sometimes
+  hanging the process (`issue 420`_), sometimes making database connections
+  fail (`issue 445`_).
+
+- The XML report now has correct ``<source>`` elements when using a
+  ``--source=`` option somewhere besides the current directory.  This fixes
+  `issue 439`_. Thanks, Arcady Ivanov.
+
+- Fixed an unusual edge case of detecting source encodings, described in
+  `issue 443`_.
+
+- Help messages that mention the command to use now properly use the actual
+  command name, which might be different than "coverage".  Thanks to Ben Finney,
+  this closes `issue 438`_.
+
+.. _issue 420: https://bitbucket.org/ned/coveragepy/issues/420/coverage-40-hangs-indefinitely-on-python27
+.. _issue 438: https://bitbucket.org/ned/coveragepy/issues/438/parameterise-coverage-command-name
+.. _issue 439: https://bitbucket.org/ned/coveragepy/issues/439/incorrect-cobertura-file-sources-generated
+.. _issue 443: https://bitbucket.org/ned/coveragepy/issues/443/coverage-gets-confused-when-encoding
+.. _issue 445: https://bitbucket.org/ned/coveragepy/issues/445/django-app-cannot-connect-to-cassandra
+
+
+Version 4.0.2 --- 4 November 2015
+---------------------------------
+
+- More work on supporting unusually encoded source. Fixed `issue 431`_.
+
+- Files or directories with non-ASCII characters are now handled properly,
+  fixing `issue 432`_.
+
+- Setting a trace function with sys.settrace was broken by a change in 4.0.1,
+  as reported in `issue 436`_.  This is now fixed.
+
+- Officially support PyPy 4.0, which required no changes, just updates to the
+  docs.
+
+.. _issue 431: https://bitbucket.org/ned/coveragepy/issues/431/couldnt-parse-python-file-with-cp1252
+.. _issue 432: https://bitbucket.org/ned/coveragepy/issues/432/path-with-unicode-characters-various
+.. _issue 436: https://bitbucket.org/ned/coveragepy/issues/436/disabled-coverage-ctracer-may-rise-from
+
+
+Version 4.0.1 --- 13 October 2015
+---------------------------------
+
+- When combining data files, unreadable files will now generate a warning
+  instead of failing the command.  This is more in line with the older
+  coverage.py v3.7.1 behavior, which silently ignored unreadable files.
+  Prompted by `issue 418`_.
+
+- The --skip-covered option would skip reporting on 100% covered files, but
+  also skipped them when calculating total coverage.  This was wrong, it should
+  only remove lines from the report, not change the final answer.  This is now
+  fixed, closing `issue 423`_.
+
+- In 4.0, the data file recorded a summary of the system on which it was run.
+  Combined data files would keep all of those summaries.  This could lead to
+  enormous data files consisting of mostly repetitive useless information. That
+  summary is now gone, fixing `issue 415`_.  If you want summary information,
+  get in touch, and we'll figure out a better way to do it.
+
+- Test suites that mocked os.path.exists would experience strange failures, due
+  to coverage.py using their mock inadvertently.  This is now fixed, closing
+  `issue 416`_.
+
+- Importing a ``__init__`` module explicitly would lead to an error:
+  ``AttributeError: 'module' object has no attribute '__path__'``, as reported
+  in `issue 410`_.  This is now fixed.
+
+- Code that uses ``sys.settrace(sys.gettrace())`` used to incur a more than 2x
+  speed penalty.  Now there's no penalty at all. Fixes `issue 397`_.
+
+- Pyexpat C code will no longer be recorded as a source file, fixing
+  `issue 419`_.
+
+- The source kit now contains all of the files needed to have a complete source
+  tree, re-fixing `issue 137`_ and closing `issue 281`_.
+
+.. _issue 281: https://bitbucket.org/ned/coveragepy/issues/281/supply-scripts-for-testing-in-the
+.. _issue 397: https://bitbucket.org/ned/coveragepy/issues/397/stopping-and-resuming-coverage-with
+.. _issue 410: https://bitbucket.org/ned/coveragepy/issues/410/attributeerror-module-object-has-no
+.. _issue 415: https://bitbucket.org/ned/coveragepy/issues/415/repeated-coveragedataupdates-cause
+.. _issue 416: https://bitbucket.org/ned/coveragepy/issues/416/mocking-ospathexists-causes-failures
+.. _issue 418: https://bitbucket.org/ned/coveragepy/issues/418/json-parse-error
+.. _issue 419: https://bitbucket.org/ned/coveragepy/issues/419/nosource-no-source-for-code-path-to-c
+.. _issue 423: https://bitbucket.org/ned/coveragepy/issues/423/skip_covered-changes-reported-total
+
+
+Version 4.0 --- 20 September 2015
+---------------------------------
+
+No changes from 4.0b3
+
+
+Version 4.0b3 --- 7 September 2015
+----------------------------------
+
+- Reporting on an unmeasured file would fail with a traceback.  This is now
+  fixed, closing `issue 403`_.
+
+- The Jenkins ShiningPanda plugin looks for an obsolete file name to find the
+  HTML reports to publish, so it was failing under coverage.py 4.0.  Now we
+  create that file if we are running under Jenkins, to keep things working
+  smoothly. `issue 404`_.
+
+- Kits used to include tests and docs, but didn't install them anywhere, or
+  provide all of the supporting tools to make them useful.  Kits no longer
+  include tests and docs.  If you were using them from the older packages, get
+  in touch and help me understand how.
+
+.. _issue 403: https://bitbucket.org/ned/coveragepy/issues/403/hasherupdate-fails-with-typeerror-nonetype
+.. _issue 404: https://bitbucket.org/ned/coveragepy/issues/404/shiningpanda-jenkins-plugin-cant-find-html
+
+
+
+Version 4.0b2 --- 22 August 2015
+--------------------------------
+
+- 4.0b1 broke --append creating new data files.  This is now fixed, closing
+  `issue 392`_.
+
+- ``py.test --cov`` can write empty data, then touch files due to ``--source``,
+  which made coverage.py mistakenly force the data file to record lines instead
+  of arcs.  This would lead to a "Can't combine line data with arc data" error
+  message.  This is now fixed, and changed some method names in the
+  CoverageData interface.  Fixes `issue 399`_.
+
+- `CoverageData.read_fileobj` and `CoverageData.write_fileobj` replace the
+  `.read` and `.write` methods, and are now properly inverses of each other.
+
+- When using ``report --skip-covered``, a message will now be included in the
+  report output indicating how many files were skipped, and if all files are
+  skipped, coverage.py won't accidentally scold you for having no data to
+  report.  Thanks, Krystian Kichewko.
+
+- A new conversion utility has been added:  ``python -m coverage.pickle2json``
+  will convert v3.x pickle data files to v4.x JSON data files.  Thanks,
+  Alexander Todorov.  Closes `issue 395`_.
+
+- A new version identifier is available, `coverage.version_info`, a plain tuple
+  of values similar to `sys.version_info`_.
+
+.. _issue 392: https://bitbucket.org/ned/coveragepy/issues/392/run-append-doesnt-create-coverage-file
+.. _issue 395: https://bitbucket.org/ned/coveragepy/issues/395/rfe-read-pickled-files-as-well-for
+.. _issue 399: https://bitbucket.org/ned/coveragepy/issues/399/coverageexception-cant-combine-line-data
+.. _sys.version_info: https://docs.python.org/3/library/sys.html#sys.version_info
+
+
+Version 4.0b1 --- 2 August 2015
+-------------------------------
+
+- Coverage.py is now licensed under the Apache 2.0 license.  See NOTICE.txt for
+  details.  Closes `issue 313`_.
+
+- The data storage has been completely revamped.  The data file is now
+  JSON-based instead of a pickle, closing `issue 236`_.  The `CoverageData`
+  class is now a public supported documented API to the data file.
+
+- A new configuration option, ``[run] note``, lets you set a note that will be
+  stored in the `runs` section of the data file.  You can use this to annotate
+  the data file with any information you like.
+
+- Unrecognized configuration options will now print an error message and stop
+  coverage.py.  This should help prevent configuration mistakes from passing
+  silently.  Finishes `issue 386`_.
+
+- In parallel mode, ``coverage erase`` will now delete all of the data files,
+  fixing `issue 262`_.
+
+- Coverage.py now accepts a directory name for ``coverage run`` and will run a
+  ``__main__.py`` found there, just like Python will.  Fixes `issue 252`_.
+  Thanks, Dmitry Trofimov.
+
+- The XML report now includes a ``missing-branches`` attribute.  Thanks, Steve
+  Peak.  This is not a part of the Cobertura DTD, so the XML report no longer
+  references the DTD.
+
+- Missing branches in the HTML report now have a bit more information in the
+  right-hand annotations.  Hopefully this will make their meaning clearer.
+
+- All the reporting functions now behave the same if no data had been
+  collected, exiting with a status code of 1.  Fixed ``fail_under`` to be
+  applied even when the report is empty.  Thanks, Ionel Cristian Mărieș.
+
+- Plugins are now initialized differently.  Instead of looking for a class
+  called ``Plugin``, coverage.py looks for a function called ``coverage_init``.
+
+- A file-tracing plugin can now ask to have built-in Python reporting by
+  returning `"python"` from its `file_reporter()` method.
+
+- Code that was executed with `exec` would be mis-attributed to the file that
+  called it.  This is now fixed, closing `issue 380`_.
+
+- The ability to use item access on `Coverage.config` (introduced in 4.0a2) has
+  been changed to a more explicit `Coverage.get_option` and
+  `Coverage.set_option` API.
+
+- The ``Coverage.use_cache`` method is no longer supported.
+
+- The private method ``Coverage._harvest_data`` is now called
+  ``Coverage.get_data``, and returns the ``CoverageData`` containing the
+  collected data.
+
+- The project is consistently referred to as "coverage.py" throughout the code
+  and the documentation, closing `issue 275`_.
+
+- Combining data files with an explicit configuration file was broken in 4.0a6,
+  but now works again, closing `issue 385`_.
+
+- ``coverage combine`` now accepts files as well as directories.
+
+- The speed is back to 3.7.1 levels, after having slowed down due to plugin
+  support, finishing up `issue 387`_.
+
+.. _issue 236: https://bitbucket.org/ned/coveragepy/issues/236/pickles-are-bad-and-you-should-feel-bad
+.. _issue 252: https://bitbucket.org/ned/coveragepy/issues/252/coverage-wont-run-a-program-with
+.. _issue 262: https://bitbucket.org/ned/coveragepy/issues/262/when-parallel-true-erase-should-erase-all
+.. _issue 275: https://bitbucket.org/ned/coveragepy/issues/275/refer-consistently-to-project-as-coverage
+.. _issue 313: https://bitbucket.org/ned/coveragepy/issues/313/add-license-file-containing-2-3-or-4
+.. _issue 380: https://bitbucket.org/ned/coveragepy/issues/380/code-executed-by-exec-excluded-from
+.. _issue 385: https://bitbucket.org/ned/coveragepy/issues/385/coverage-combine-doesnt-work-with-rcfile
+.. _issue 386: https://bitbucket.org/ned/coveragepy/issues/386/error-on-unrecognised-configuration
+.. _issue 387: https://bitbucket.org/ned/coveragepy/issues/387/performance-degradation-from-371-to-40
+
+.. 40 issues closed in 4.0 below here
+
+
+Version 4.0a6 --- 21 June 2015
+------------------------------
+
+- Python 3.5b2 and PyPy 2.6.0 are supported.
+
+- The original module-level function interface to coverage.py is no longer
+  supported.  You must now create a ``coverage.Coverage`` object, and use
+  methods on it.
+
+- The ``coverage combine`` command now accepts any number of directories as
+  arguments, and will combine all the data files from those directories.  This
+  means you don't have to copy the files to one directory before combining.
+  Thanks, Christine Lytwynec.  Finishes `issue 354`_.
+
+- Branch coverage couldn't properly handle certain extremely long files. This
+  is now fixed (`issue 359`_).
+
+- Branch coverage didn't understand yield statements properly.  Mickie Betz
+  persisted in pursuing this despite Ned's pessimism.  Fixes `issue 308`_ and
+  `issue 324`_.
+
+- The COVERAGE_DEBUG environment variable can be used to set the ``[run] debug``
+  configuration option to control what internal operations are logged.
+
+- HTML reports were truncated at formfeed characters.  This is now fixed
+  (`issue 360`_).  It's always fun when the problem is due to a `bug in the
+  Python standard library <http://bugs.python.org/issue19035>`_.
+
+- Files with incorrect encoding declaration comments are no longer ignored by
+  the reporting commands, fixing `issue 351`_.
+
+- HTML reports now include a timestamp in the footer, closing `issue 299`_.
+  Thanks, Conrad Ho.
+
+- HTML reports now begrudgingly use double-quotes rather than single quotes,
+  because there are "software engineers" out there writing tools that read HTML
+  and somehow have no idea that single quotes exist.  Capitulates to the absurd
+  `issue 361`_.  Thanks, Jon Chappell.
+
+- The ``coverage annotate`` command now handles non-ASCII characters properly,
+  closing `issue 363`_.  Thanks, Leonardo Pistone.
+
+- Drive letters on Windows were not normalized correctly, now they are. Thanks,
+  Ionel Cristian Mărieș.
+
+- Plugin support had some bugs fixed, closing `issue 374`_ and `issue 375`_.
+  Thanks, Stefan Behnel.
+
+.. _issue 299: https://bitbucket.org/ned/coveragepy/issue/299/inserted-created-on-yyyy-mm-dd-hh-mm-in
+.. _issue 308: https://bitbucket.org/ned/coveragepy/issue/308/yield-lambda-branch-coverage
+.. _issue 324: https://bitbucket.org/ned/coveragepy/issue/324/yield-in-loop-confuses-branch-coverage
+.. _issue 351: https://bitbucket.org/ned/coveragepy/issue/351/files-with-incorrect-encoding-are-ignored
+.. _issue 354: https://bitbucket.org/ned/coveragepy/issue/354/coverage-combine-should-take-a-list-of
+.. _issue 359: https://bitbucket.org/ned/coveragepy/issue/359/xml-report-chunk-error
+.. _issue 360: https://bitbucket.org/ned/coveragepy/issue/360/html-reports-get-confused-by-l-in-the-code
+.. _issue 361: https://bitbucket.org/ned/coveragepy/issue/361/use-double-quotes-in-html-output-to
+.. _issue 363: https://bitbucket.org/ned/coveragepy/issue/363/annotate-command-hits-unicode-happy-fun
+.. _issue 374: https://bitbucket.org/ned/coveragepy/issue/374/c-tracer-lookups-fail-in
+.. _issue 375: https://bitbucket.org/ned/coveragepy/issue/375/ctracer_handle_return-reads-byte-code
+
+
+Version 4.0a5 --- 16 February 2015
+----------------------------------
+
+- Plugin support is now implemented in the C tracer instead of the Python
+  tracer. This greatly improves the speed of tracing projects using plugins.
+
+- Coverage.py now always adds the current directory to sys.path, so that
+  plugins can import files in the current directory (`issue 358`_).
+
+- If the `config_file` argument to the Coverage constructor is specified as
+  ".coveragerc", it is treated as if it were True.  This means setup.cfg is
+  also examined, and a missing file is not considered an error (`issue 357`_).
+
+- Wildly experimental: support for measuring processes started by the
+  multiprocessing module.  To use, set ``--concurrency=multiprocessing``,
+  either on the command line or in the .coveragerc file (`issue 117`_). Thanks,
+  Eduardo Schettino.  Currently, this does not work on Windows.
+
+- A new warning is possible, if a desired file isn't measured because it was
+  imported before coverage.py was started (`issue 353`_).
+
+- The `coverage.process_startup` function now will start coverage measurement
+  only once, no matter how many times it is called.  This fixes problems due
+  to unusual virtualenv configurations (`issue 340`_).
+
+- Added 3.5.0a1 to the list of supported CPython versions.
+
+.. _issue 117: https://bitbucket.org/ned/coveragepy/issue/117/enable-coverage-measurement-of-code-run-by
+.. _issue 340: https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy
+.. _issue 353: https://bitbucket.org/ned/coveragepy/issue/353/40a3-introduces-an-unexpected-third-case
+.. _issue 357: https://bitbucket.org/ned/coveragepy/issue/357/behavior-changed-when-coveragerc-is
+.. _issue 358: https://bitbucket.org/ned/coveragepy/issue/358/all-coverage-commands-should-adjust
+
+
+Version 4.0a4 --- 25 January 2015
+---------------------------------
+
+- Plugins can now provide sys_info for debugging output.
+
+- Started plugins documentation.
+
+- Prepared to move the docs to readthedocs.org.
+
+
+Version 4.0a3 --- 20 January 2015
+---------------------------------
+
+- Reports now use file names with extensions.  Previously, a report would
+  describe a/b/c.py as "a/b/c".  Now it is shown as "a/b/c.py".  This allows
+  for better support of non-Python files, and also fixed `issue 69`_.
+
+- The XML report now reports each directory as a package again.  This was a bad
+  regression, I apologize.  This was reported in `issue 235`_, which is now
+  fixed.
+
+- A new configuration option for the XML report: ``[xml] package_depth``
+  controls which directories are identified as packages in the report.
+  Directories deeper than this depth are not reported as packages.
+  The default is that all directories are reported as packages.
+  Thanks, Lex Berezhny.
+
+- When looking for the source for a frame, check if the file exists. On
+  Windows, .pyw files are no longer recorded as .py files. Along the way, this
+  fixed `issue 290`_.
+
+- Empty files are now reported as 100% covered in the XML report, not 0%
+  covered (`issue 345`_).
+
+- Regexes in the configuration file are now compiled as soon as they are read,
+  to provide error messages earlier (`issue 349`_).
+
+.. _issue 69: https://bitbucket.org/ned/coveragepy/issue/69/coverage-html-overwrite-files-that-doesnt
+.. _issue 235: https://bitbucket.org/ned/coveragepy/issue/235/package-name-is-missing-in-xml-report
+.. _issue 290: https://bitbucket.org/ned/coveragepy/issue/290/running-programmatically-with-pyw-files
+.. _issue 345: https://bitbucket.org/ned/coveragepy/issue/345/xml-reports-line-rate-0-for-empty-files
+.. _issue 349: https://bitbucket.org/ned/coveragepy/issue/349/bad-regex-in-config-should-get-an-earlier
+
+
+Version 4.0a2 --- 14 January 2015
+---------------------------------
+
+- Officially support PyPy 2.4, and PyPy3 2.4.  Drop support for
+  CPython 3.2 and older versions of PyPy.  The code won't work on CPython 3.2.
+  It will probably still work on older versions of PyPy, but I'm not testing
+  against them.
+
+- Plugins!
+
+- The original command line switches (`-x` to run a program, etc) are no
+  longer supported.
+
+- A new option: `coverage report --skip-covered` will reduce the number of
+  files reported by skipping files with 100% coverage.  Thanks, Krystian
+  Kichewko.  This means that empty `__init__.py` files will be skipped, since
+  they are 100% covered, closing `issue 315`_.
+
+- You can now specify the ``--fail-under`` option in the ``.coveragerc`` file
+  as the ``[report] fail_under`` option.  This closes `issue 314`_.
+
+- The ``COVERAGE_OPTIONS`` environment variable is no longer supported.  It was
+  a hack for ``--timid`` before configuration files were available.
+
+- The HTML report now has filtering.  Type text into the Filter box on the
+  index page, and only modules with that text in the name will be shown.
+  Thanks, Danny Allen.
+
+- The textual report and the HTML report used to report partial branches
+  differently for no good reason.  Now the text report's "missing branches"
+  column is a "partial branches" column so that both reports show the same
+  numbers.  This closes `issue 342`_.
+
+- If you specify a ``--rcfile`` that cannot be read, you will get an error
+  message.  Fixes `issue 343`_.
+
+- The ``--debug`` switch can now be used on any command.
+
+- You can now programmatically adjust the configuration of coverage.py by
+  setting items on `Coverage.config` after construction.
+
+- A module run with ``-m`` can be used as the argument to ``--source``, fixing
+  `issue 328`_.  Thanks, Buck Evan.
+
+- The regex for matching exclusion pragmas has been fixed to allow more kinds
+  of whitespace, fixing `issue 334`_.
+
+- Made some PyPy-specific tweaks to improve speed under PyPy.  Thanks, Alex
+  Gaynor.
+
+- In some cases, with a source file missing a final newline, coverage.py would
+  count statements incorrectly.  This is now fixed, closing `issue 293`_.
+
+- The status.dat file that HTML reports use to avoid re-creating files that
+  haven't changed is now a JSON file instead of a pickle file.  This obviates
+  `issue 287`_ and `issue 237`_.
+
+.. _issue 237: https://bitbucket.org/ned/coveragepy/issue/237/htmlcov-with-corrupt-statusdat
+.. _issue 287: https://bitbucket.org/ned/coveragepy/issue/287/htmlpy-doesnt-specify-pickle-protocol
+.. _issue 293: https://bitbucket.org/ned/coveragepy/issue/293/number-of-statement-detection-wrong-if-no
+.. _issue 314: https://bitbucket.org/ned/coveragepy/issue/314/fail_under-param-not-working-in-coveragerc
+.. _issue 315: https://bitbucket.org/ned/coveragepy/issue/315/option-to-omit-empty-files-eg-__init__py
+.. _issue 328: https://bitbucket.org/ned/coveragepy/issue/328/misbehavior-in-run-source
+.. _issue 334: https://bitbucket.org/ned/coveragepy/issue/334/pragma-not-recognized-if-tab-character
+.. _issue 342: https://bitbucket.org/ned/coveragepy/issue/342/console-and-html-coverage-reports-differ
+.. _issue 343: https://bitbucket.org/ned/coveragepy/issue/343/an-explicitly-named-non-existent-config
+
+
+Version 4.0a1 --- 27 September 2014
+-----------------------------------
+
+- Python versions supported are now CPython 2.6, 2.7, 3.2, 3.3, and 3.4, and
+  PyPy 2.2.
+
+- Gevent, eventlet, and greenlet are now supported, closing `issue 149`_.
+  The ``concurrency`` setting specifies the concurrency library in use.  Huge
+  thanks to Peter Portante for initial implementation, and to Joe Jevnik for
+  the final insight that completed the work.
+
+- Options are now also read from a setup.cfg file, if any.  Sections are
+  prefixed with "coverage:", so the ``[run]`` options will be read from the
+  ``[coverage:run]`` section of setup.cfg.  Finishes `issue 304`_.
+
+- The ``report -m`` command can now show missing branches when reporting on
+  branch coverage.  Thanks, Steve Leonard. Closes `issue 230`_.
+
+- The XML report now contains a <source> element, fixing `issue 94`_.  Thanks
+  Stan Hu.
+
+- The class defined in the coverage module is now called ``Coverage`` instead
+  of ``coverage``, though the old name still works, for backward compatibility.
+
+- The ``fail-under`` value is now rounded the same as reported results,
+  preventing paradoxical results, fixing `issue 284`_.
+
+- The XML report will now create the output directory if need be, fixing
+  `issue 285`_.  Thanks, Chris Rose.
+
+- HTML reports no longer raise UnicodeDecodeError if a Python file has
+  undecodable characters, fixing `issue 303`_ and `issue 331`_.
+
+- The annotate command will now annotate all files, not just ones relative to
+  the current directory, fixing `issue 57`_.
+
+- The coverage module no longer causes deprecation warnings on Python 3.4 by
+  importing the imp module, fixing `issue 305`_.
+
+- Encoding declarations in source files are only considered if they are truly
+  comments.  Thanks, Anthony Sottile.
+
+.. _issue 57: https://bitbucket.org/ned/coveragepy/issue/57/annotate-command-fails-to-annotate-many
+.. _issue 94: https://bitbucket.org/ned/coveragepy/issue/94/coverage-xml-doesnt-produce-sources
+.. _issue 149: https://bitbucket.org/ned/coveragepy/issue/149/coverage-gevent-looks-broken
+.. _issue 230: https://bitbucket.org/ned/coveragepy/issue/230/show-line-no-for-missing-branches-in
+.. _issue 284: https://bitbucket.org/ned/coveragepy/issue/284/fail-under-should-show-more-precision
+.. _issue 285: https://bitbucket.org/ned/coveragepy/issue/285/xml-report-fails-if-output-file-directory
+.. _issue 303: https://bitbucket.org/ned/coveragepy/issue/303/unicodedecodeerror
+.. _issue 304: https://bitbucket.org/ned/coveragepy/issue/304/attempt-to-get-configuration-from-setupcfg
+.. _issue 305: https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
+.. _issue 331: https://bitbucket.org/ned/coveragepy/issue/331/failure-of-encoding-detection-on-python2
+
+
+Version 3.7.1 --- 13 December 2013
+----------------------------------
+
+- Improved the speed of HTML report generation by about 20%.
+
+- Fixed the mechanism for finding OS-installed static files for the HTML report
+  so that it will actually find OS-installed static files.
+
+
+Version 3.7 --- 6 October 2013
+------------------------------
+
+- Added the ``--debug`` switch to ``coverage run``.  It accepts a list of
+  options indicating the type of internal activity to log to stderr.
+
+- Improved the branch coverage facility, fixing `issue 92`_ and `issue 175`_.
+
+- Running code with ``coverage run -m`` now behaves more like Python does,
+  setting sys.path properly, which fixes `issue 207`_ and `issue 242`_.
+
+- Coverage.py can now run .pyc files directly, closing `issue 264`_.
+
+- Coverage.py properly supports .pyw files, fixing `issue 261`_.
+
+- Omitting files within a tree specified with the ``source`` option would
+  cause them to be incorrectly marked as unexecuted, as described in
+  `issue 218`_.  This is now fixed.
+
+- When specifying paths to alias together during data combining, you can now
+  specify relative paths, fixing `issue 267`_.
+
+- Most file paths can now be specified with username expansion (``~/src``, or
+  ``~build/src``, for example), and with environment variable expansion
+  (``build/$BUILDNUM/src``).
+
+- Trying to create an XML report with no files to report on, would cause a
+  ZeroDivideError, but no longer does, fixing `issue 250`_.
+
+- When running a threaded program under the Python tracer, coverage.py no
+  longer issues a spurious warning about the trace function changing: "Trace
+  function changed, measurement is likely wrong: None."  This fixes `issue
+  164`_.
+
+- Static files necessary for HTML reports are found in system-installed places,
+  to ease OS-level packaging of coverage.py.  Closes `issue 259`_.
+
+- Source files with encoding declarations, but a blank first line, were not
+  decoded properly.  Now they are.  Thanks, Roger Hu.
+
+- The source kit now includes the ``__main__.py`` file in the root coverage
+  directory, fixing `issue 255`_.
+
+.. _issue 92: https://bitbucket.org/ned/coveragepy/issue/92/finally-clauses-arent-treated-properly-in
+.. _issue 164: https://bitbucket.org/ned/coveragepy/issue/164/trace-function-changed-warning-when-using
+.. _issue 175: https://bitbucket.org/ned/coveragepy/issue/175/branch-coverage-gets-confused-in-certain
+.. _issue 207: https://bitbucket.org/ned/coveragepy/issue/207/run-m-cannot-find-module-or-package-in
+.. _issue 242: https://bitbucket.org/ned/coveragepy/issue/242/running-a-two-level-package-doesnt-work
+.. _issue 218: https://bitbucket.org/ned/coveragepy/issue/218/run-command-does-not-respect-the-omit-flag
+.. _issue 250: https://bitbucket.org/ned/coveragepy/issue/250/uncaught-zerodivisionerror-when-generating
+.. _issue 255: https://bitbucket.org/ned/coveragepy/issue/255/directory-level-__main__py-not-included-in
+.. _issue 259: https://bitbucket.org/ned/coveragepy/issue/259/allow-use-of-system-installed-third-party
+.. _issue 261: https://bitbucket.org/ned/coveragepy/issue/261/pyw-files-arent-reported-properly
+.. _issue 264: https://bitbucket.org/ned/coveragepy/issue/264/coverage-wont-run-pyc-files
+.. _issue 267: https://bitbucket.org/ned/coveragepy/issue/267/relative-path-aliases-dont-work
+
+
+Version 3.6 --- 5 January 2013
+------------------------------
+
+- Added a page to the docs about troublesome situations, closing `issue 226`_,
+  and added some info to the TODO file, closing `issue 227`_.
+
+.. _issue 226: https://bitbucket.org/ned/coveragepy/issue/226/make-readme-section-to-describe-when
+.. _issue 227: https://bitbucket.org/ned/coveragepy/issue/227/update-todo
+
+
+Version 3.6b3 --- 29 December 2012
+----------------------------------
+
+- Beta 2 broke the nose plugin. It's fixed again, closing `issue 224`_.
+
+.. _issue 224: https://bitbucket.org/ned/coveragepy/issue/224/36b2-breaks-nosexcover
+
+
+Version 3.6b2 --- 23 December 2012
+----------------------------------
+
+- Coverage.py runs on Python 2.3 and 2.4 again. It was broken in 3.6b1.
+
+- The C extension is optionally compiled using a different more widely-used
+  technique, taking another stab at fixing `issue 80`_ once and for all.
+
+- Combining data files would create entries for phantom files if used with
+  ``source`` and path aliases.  It no longer does.
+
+- ``debug sys`` now shows the configuration file path that was read.
+
+- If an oddly-behaved package claims that code came from an empty-string
+  file name, coverage.py no longer associates it with the directory name,
+  fixing `issue 221`_.
+
+.. _issue 221: https://bitbucket.org/ned/coveragepy/issue/221/coveragepy-incompatible-with-pyratemp
+
+
+Version 3.6b1 --- 28 November 2012
+----------------------------------
+
+- Wildcards in ``include=`` and ``omit=`` arguments were not handled properly
+  in reporting functions, though they were when running.  Now they are handled
+  uniformly, closing `issue 143`_ and `issue 163`_.  **NOTE**: it is possible
+  that your configurations may now be incorrect.  If you use ``include`` or
+  ``omit`` during reporting, whether on the command line, through the API, or
+  in a configuration file, please check carefully that you were not relying on
+  the old broken behavior.
+
+- The **report**, **html**, and **xml** commands now accept a ``--fail-under``
+  switch that indicates in the exit status whether the coverage percentage was
+  less than a particular value.  Closes `issue 139`_.
+
+- The reporting functions coverage.report(), coverage.html_report(), and
+  coverage.xml_report() now all return a float, the total percentage covered
+  measurement.
+
+- The HTML report's title can now be set in the configuration file, with the
+  ``--title`` switch on the command line, or via the API.
+
+- Configuration files now support substitution of environment variables, using
+  syntax like ``${WORD}``.  Closes `issue 97`_.
+
+- Embarrassingly, the ``[xml] output=`` setting in the .coveragerc file simply
+  didn't work.  Now it does.
+
+- The XML report now consistently uses file names for the file name attribute,
+  rather than sometimes using module names.  Fixes `issue 67`_.
+  Thanks, Marcus Cobden.
+
+- Coverage percentage metrics are now computed slightly differently under
+  branch coverage.  This means that completely unexecuted files will now
+  correctly have 0% coverage, fixing `issue 156`_.  This also means that your
+  total coverage numbers will generally now be lower if you are measuring
+  branch coverage.
+
+- When installing, now in addition to creating a "coverage" command, two new
+  aliases are also installed.  A "coverage2" or "coverage3" command will be
+  created, depending on whether you are installing in Python 2.x or 3.x.
+  A "coverage-X.Y" command will also be created corresponding to your specific
+  version of Python.  Closes `issue 111`_.
+
+- The coverage.py installer no longer tries to bootstrap setuptools or
+  Distribute.  You must have one of them installed first, as `issue 202`_
+  recommended.
+
+- The coverage.py kit now includes docs (closing `issue 137`_) and tests.
+
+- On Windows, files are now reported in their correct case, fixing `issue 89`_
+  and `issue 203`_.
+
+- If a file is missing during reporting, the path shown in the error message
+  is now correct, rather than an incorrect path in the current directory.
+  Fixes `issue 60`_.
+
+- Running an HTML report in Python 3 in the same directory as an old Python 2
+  HTML report would fail with a UnicodeDecodeError. This issue (`issue 193`_)
+  is now fixed.
+
+- Fixed yet another error trying to parse non-Python files as Python, this
+  time an IndentationError, closing `issue 82`_ for the fourth time...
+
+- If `coverage xml` fails because there is no data to report, it used to
+  create a zero-length XML file.  Now it doesn't, fixing `issue 210`_.
+
+- Jython files now work with the ``--source`` option, fixing `issue 100`_.
+
+- Running coverage.py under a debugger is unlikely to work, but it shouldn't
+  fail with "TypeError: 'NoneType' object is not iterable".  Fixes `issue
+  201`_.
+
+- On some Linux distributions, when installed with the OS package manager,
+  coverage.py would report its own code as part of the results.  Now it won't,
+  fixing `issue 214`_, though this will take some time to be repackaged by the
+  operating systems.
+
+- Docstrings for the legacy singleton methods are more helpful.  Thanks Marius
+  Gedminas.  Closes `issue 205`_.
+
+- The pydoc tool can now show documentation for the class `coverage.coverage`.
+  Closes `issue 206`_.
+
+- Added a page to the docs about contributing to coverage.py, closing
+  `issue 171`_.
+
+- When coverage.py ended unsuccessfully, it may have reported odd errors like
+  ``'NoneType' object has no attribute 'isabs'``.  It no longer does,
+  so kiss `issue 153`_ goodbye.
+
+.. _issue 60: https://bitbucket.org/ned/coveragepy/issue/60/incorrect-path-to-orphaned-pyc-files
+.. _issue 67: https://bitbucket.org/ned/coveragepy/issue/67/xml-report-filenames-may-be-generated
+.. _issue 89: https://bitbucket.org/ned/coveragepy/issue/89/on-windows-all-packages-are-reported-in
+.. _issue 97: https://bitbucket.org/ned/coveragepy/issue/97/allow-environment-variables-to-be
+.. _issue 100: https://bitbucket.org/ned/coveragepy/issue/100/source-directive-doesnt-work-for-packages
+.. _issue 111: https://bitbucket.org/ned/coveragepy/issue/111/when-installing-coverage-with-pip-not
+.. _issue 137: https://bitbucket.org/ned/coveragepy/issue/137/provide-docs-with-source-distribution
+.. _issue 139: https://bitbucket.org/ned/coveragepy/issue/139/easy-check-for-a-certain-coverage-in-tests
+.. _issue 143: https://bitbucket.org/ned/coveragepy/issue/143/omit-doesnt-seem-to-work-in-coverage
+.. _issue 153: https://bitbucket.org/ned/coveragepy/issue/153/non-existent-filename-triggers
+.. _issue 156: https://bitbucket.org/ned/coveragepy/issue/156/a-completely-unexecuted-file-shows-14
+.. _issue 163: https://bitbucket.org/ned/coveragepy/issue/163/problem-with-include-and-omit-filename
+.. _issue 171: https://bitbucket.org/ned/coveragepy/issue/171/how-to-contribute-and-run-tests
+.. _issue 193: https://bitbucket.org/ned/coveragepy/issue/193/unicodedecodeerror-on-htmlpy
+.. _issue 201: https://bitbucket.org/ned/coveragepy/issue/201/coverage-using-django-14-with-pydb-on
+.. _issue 202: https://bitbucket.org/ned/coveragepy/issue/202/get-rid-of-ez_setuppy-and
+.. _issue 203: https://bitbucket.org/ned/coveragepy/issue/203/duplicate-filenames-reported-when-filename
+.. _issue 205: https://bitbucket.org/ned/coveragepy/issue/205/make-pydoc-coverage-more-friendly
+.. _issue 206: https://bitbucket.org/ned/coveragepy/issue/206/pydoc-coveragecoverage-fails-with-an-error
+.. _issue 210: https://bitbucket.org/ned/coveragepy/issue/210/if-theres-no-coverage-data-coverage-xml
+.. _issue 214: https://bitbucket.org/ned/coveragepy/issue/214/coveragepy-measures-itself-on-precise
+
+
+Version 3.5.3 --- 29 September 2012
+-----------------------------------
+
+- Line numbers in the HTML report line up better with the source lines, fixing
+  `issue 197`_, thanks Marius Gedminas.
+
+- When specifying a directory as the source= option, the directory itself no
+  longer needs to have a ``__init__.py`` file, though its sub-directories do,
+  to be considered as source files.
+
+- Files encoded as UTF-8 with a BOM are now properly handled, fixing
+  `issue 179`_.  Thanks, Pablo Carballo.
+
+- Fixed more cases of non-Python files being reported as Python source, and
+  then not being able to parse them as Python.  Closes `issue 82`_ (again).
+  Thanks, Julian Berman.
+
+- Fixed memory leaks under Python 3, thanks, Brett Cannon. Closes `issue 147`_.
+
+- Optimized .pyo files may not have been handled correctly, `issue 195`_.
+  Thanks, Marius Gedminas.
+
+- Certain unusually named file paths could have been mangled during reporting,
+  `issue 194`_.  Thanks, Marius Gedminas.
+
+- Try to do a better job of the impossible task of detecting when we can't
+  build the C extension, fixing `issue 183`_.
+
+- Testing is now done with `tox`_, thanks, Marc Abramowitz.
+
+.. _issue 147: https://bitbucket.org/ned/coveragepy/issue/147/massive-memory-usage-by-ctracer
+.. _issue 179: https://bitbucket.org/ned/coveragepy/issue/179/htmlreporter-fails-when-source-file-is
+.. _issue 183: https://bitbucket.org/ned/coveragepy/issue/183/install-fails-for-python-23
+.. _issue 194: https://bitbucket.org/ned/coveragepy/issue/194/filelocatorrelative_filename-could-mangle
+.. _issue 195: https://bitbucket.org/ned/coveragepy/issue/195/pyo-file-handling-in-codeunit
+.. _issue 197: https://bitbucket.org/ned/coveragepy/issue/197/line-numbers-in-html-report-do-not-align
+.. _tox: http://tox.readthedocs.org/
+
+
+Version 3.5.2 --- 4 May 2012
+----------------------------
+
+No changes since 3.5.2.b1
+
+
+Version 3.5.2b1 --- 29 April 2012
+---------------------------------
+
+- The HTML report has slightly tweaked controls: the buttons at the top of
+  the page are color-coded to the source lines they affect.
+
+- Custom CSS can be applied to the HTML report by specifying a CSS file as
+  the ``extra_css`` configuration value in the ``[html]`` section.
+
+- Source files with custom encodings declared in a comment at the top are now
+  properly handled during reporting on Python 2.  Python 3 always handled them
+  properly.  This fixes `issue 157`_.
+
+- Backup files left behind by editors are no longer collected by the source=
+  option, fixing `issue 168`_.
+
+- If a file doesn't parse properly as Python, we don't report it as an error
+  if the file name seems like maybe it wasn't meant to be Python.  This is a
+  pragmatic fix for `issue 82`_.
+
+- The ``-m`` switch on ``coverage report``, which includes missing line numbers
+  in the summary report, can now be specified as ``show_missing`` in the
+  config file.  Closes `issue 173`_.
+
+- When running a module with ``coverage run -m <modulename>``, certain details
+  of the execution environment weren't the same as for
+  ``python -m <modulename>``.  This had the unfortunate side-effect of making
+  ``coverage run -m unittest discover`` not work if you had tests in a
+  directory named "test".  This fixes `issue 155`_ and `issue 142`_.
+
+- Now the exit status of your product code is properly used as the process
+  status when running ``python -m coverage run ...``.  Thanks, JT Olds.
+
+- When installing into pypy, we no longer attempt (and fail) to compile
+  the C tracer function, closing `issue 166`_.
+
+.. _issue 142: https://bitbucket.org/ned/coveragepy/issue/142/executing-python-file-syspath-is-replaced
+.. _issue 155: https://bitbucket.org/ned/coveragepy/issue/155/cant-use-coverage-run-m-unittest-discover
+.. _issue 157: https://bitbucket.org/ned/coveragepy/issue/157/chokes-on-source-files-with-non-utf-8
+.. _issue 166: https://bitbucket.org/ned/coveragepy/issue/166/dont-try-to-compile-c-extension-on-pypy
+.. _issue 168: https://bitbucket.org/ned/coveragepy/issue/168/dont-be-alarmed-by-emacs-droppings
+.. _issue 173: https://bitbucket.org/ned/coveragepy/issue/173/theres-no-way-to-specify-show-missing-in
+
+
+Version 3.5.1 --- 23 September 2011
+-----------------------------------
+
+- The ``[paths]`` feature unfortunately didn't work in real world situations
+  where you wanted to, you know, report on the combined data.  Now all paths
+  stored in the combined file are canonicalized properly.
+
+
+Version 3.5.1b1 --- 28 August 2011
+----------------------------------
+
+- When combining data files from parallel runs, you can now instruct
+  coverage.py about which directories are equivalent on different machines.  A
+  ``[paths]`` section in the configuration file lists paths that are to be
+  considered equivalent.  Finishes `issue 17`_.
+
+- for-else constructs are understood better, and don't cause erroneous partial
+  branch warnings.  Fixes `issue 122`_.
+
+- Branch coverage for ``with`` statements is improved, fixing `issue 128`_.
+
+- The number of partial branches reported on the HTML summary page was
+  different than the number reported on the individual file pages.  This is
+  now fixed.
+
+- An explicit include directive to measure files in the Python installation
+  wouldn't work because of the standard library exclusion.  Now the include
+  directive takes precedence, and the files will be measured.  Fixes
+  `issue 138`_.
+
+- The HTML report now handles Unicode characters in Python source files
+  properly.  This fixes `issue 124`_ and `issue 144`_. Thanks, Devin
+  Jeanpierre.
+
+- In order to help the core developers measure the test coverage of the
+  standard library, Brandon Rhodes devised an aggressive hack to trick Python
+  into running some coverage.py code before anything else in the process.
+  See the coverage/fullcoverage directory if you are interested.
+
+.. _issue 17: http://bitbucket.org/ned/coveragepy/issue/17/support-combining-coverage-data-from
+.. _issue 122: http://bitbucket.org/ned/coveragepy/issue/122/for-else-always-reports-missing-branch
+.. _issue 124: http://bitbucket.org/ned/coveragepy/issue/124/no-arbitrary-unicode-in-html-reports-in
+.. _issue 128: http://bitbucket.org/ned/coveragepy/issue/128/branch-coverage-of-with-statement-in-27
+.. _issue 138: http://bitbucket.org/ned/coveragepy/issue/138/include-should-take-precedence-over-is
+.. _issue 144: http://bitbucket.org/ned/coveragepy/issue/144/failure-generating-html-output-for
+
+
+Version 3.5 --- 29 June 2011
+----------------------------
+
+- The HTML report hotkeys now behave slightly differently when the current
+  chunk isn't visible at all:  a chunk on the screen will be selected,
+  instead of the old behavior of jumping to the literal next chunk.
+  The hotkeys now work in Google Chrome.  Thanks, Guido van Rossum.
+
+
+Version 3.5b1 --- 5 June 2011
+-----------------------------
+
+- The HTML report now has hotkeys.  Try ``n``, ``s``, ``m``, ``x``, ``b``,
+  ``p``, and ``c`` on the overview page to change the column sorting.
+  On a file page, ``r``, ``m``, ``x``, and ``p`` toggle the run, missing,
+  excluded, and partial line markings.  You can navigate the highlighted
+  sections of code by using the ``j`` and ``k`` keys for next and previous.
+  The ``1`` (one) key jumps to the first highlighted section in the file,
+  and ``0`` (zero) scrolls to the top of the file.
+
+- The ``--omit`` and ``--include`` switches now interpret their values more
+  usefully.  If the value starts with a wildcard character, it is used as-is.
+  If it does not, it is interpreted relative to the current directory.
+  Closes `issue 121`_.
+
+- Partial branch warnings can now be pragma'd away.  The configuration option
+  ``partial_branches`` is a list of regular expressions.  Lines matching any of
+  those expressions will never be marked as a partial branch.  In addition,
+  there's a built-in list of regular expressions marking statements which should
+  never be marked as partial.  This list includes ``while True:``, ``while 1:``,
+  ``if 1:``, and ``if 0:``.
+
+- The ``coverage()`` constructor accepts single strings for the ``omit=`` and
+  ``include=`` arguments, adapting to a common error in programmatic use.
+
+- Modules can now be run directly using ``coverage run -m modulename``, to
+  mirror Python's ``-m`` flag.  Closes `issue 95`_, thanks, Brandon Rhodes.
+
+- ``coverage run`` didn't emulate Python accurately in one small detail: the
+  current directory inserted into ``sys.path`` was relative rather than
+  absolute. This is now fixed.
+
+- HTML reporting is now incremental: a record is kept of the data that
+  produced the HTML reports, and only files whose data has changed will
+  be generated.  This should make most HTML reporting faster.
+
+- Pathological code execution could disable the trace function behind our
+  backs, leading to incorrect code measurement.  Now if this happens,
+  coverage.py will issue a warning, at least alerting you to the problem.
+  Closes `issue 93`_.  Thanks to Marius Gedminas for the idea.
+
+- The C-based trace function now behaves properly when saved and restored
+  with ``sys.gettrace()`` and ``sys.settrace()``.  This fixes `issue 125`_
+  and `issue 123`_.  Thanks, Devin Jeanpierre.
+
+- Source files are now opened with Python 3.2's ``tokenize.open()`` where
+  possible, to get the best handling of Python source files with encodings.
+  Closes `issue 107`_, thanks, Brett Cannon.
+
+- Syntax errors in supposed Python files can now be ignored during reporting
+  with the ``-i`` switch just like other source errors.  Closes `issue 115`_.
+
+- Installation from source now succeeds on machines without a C compiler,
+  closing `issue 80`_.
+
+- Coverage.py can now be run directly from a working tree by specifying
+  the directory name to python:  ``python coverage_py_working_dir run ...``.
+  Thanks, Brett Cannon.
+
+- A little bit of Jython support: `coverage run` can now measure Jython
+  execution by adapting when $py.class files are traced. Thanks, Adi Roiban.
+  Jython still doesn't provide the Python libraries needed to make
+  coverage reporting work, unfortunately.
+
+- Internally, files are now closed explicitly, fixing `issue 104`_.  Thanks,
+  Brett Cannon.
+
+.. _issue 80: https://bitbucket.org/ned/coveragepy/issue/80/is-there-a-duck-typing-way-to-know-we-cant
+.. _issue 93: http://bitbucket.org/ned/coveragepy/issue/93/copying-a-mock-object-breaks-coverage
+.. _issue 95: https://bitbucket.org/ned/coveragepy/issue/95/run-subcommand-should-take-a-module-name
+.. _issue 104: https://bitbucket.org/ned/coveragepy/issue/104/explicitly-close-files
+.. _issue 107: https://bitbucket.org/ned/coveragepy/issue/107/codeparser-not-opening-source-files-with
+.. _issue 115: https://bitbucket.org/ned/coveragepy/issue/115/fail-gracefully-when-reporting-on-file
+.. _issue 121: https://bitbucket.org/ned/coveragepy/issue/121/filename-patterns-are-applied-stupidly
+.. _issue 123: https://bitbucket.org/ned/coveragepy/issue/123/pyeval_settrace-used-in-way-that-breaks
+.. _issue 125: https://bitbucket.org/ned/coveragepy/issue/125/coverage-removes-decoratortoolss-tracing
+
+
+Version 3.4 --- 19 September 2010
+---------------------------------
+
+- The XML report is now sorted by package name, fixing `issue 88`_.
+
+- Programs that exited with ``sys.exit()`` with no argument weren't handled
+  properly, producing a coverage.py stack trace.  That is now fixed.
+
+.. _issue 88: http://bitbucket.org/ned/coveragepy/issue/88/xml-report-lists-packages-in-random-order
+
+
+Version 3.4b2 --- 6 September 2010
+----------------------------------
+
+- Completely unexecuted files can now be included in coverage results, reported
+  as 0% covered.  This only happens if the --source option is specified, since
+  coverage.py needs guidance about where to look for source files.
+
+- The XML report output now properly includes a percentage for branch coverage,
+  fixing `issue 65`_ and `issue 81`_.
+
+- Coverage percentages are now displayed uniformly across reporting methods.
+  Previously, different reports could round percentages differently.  Also,
+  percentages are only reported as 0% or 100% if they are truly 0 or 100, and
+  are rounded otherwise.  Fixes `issue 41`_ and `issue 70`_.
+
+- The precision of reported coverage percentages can be set with the
+  ``[report] precision`` config file setting.  Completes `issue 16`_.
+
+- Threads derived from ``threading.Thread`` with an overridden `run` method
+  would report no coverage for the `run` method.  This is now fixed, closing
+  `issue 85`_.
+
+.. _issue 16: http://bitbucket.org/ned/coveragepy/issue/16/allow-configuration-of-accuracy-of-percentage-totals
+.. _issue 41: http://bitbucket.org/ned/coveragepy/issue/41/report-says-100-when-it-isnt-quite-there
+.. _issue 65: http://bitbucket.org/ned/coveragepy/issue/65/branch-option-not-reported-in-cobertura
+.. _issue 70: http://bitbucket.org/ned/coveragepy/issue/70/text-report-and-html-report-disagree-on-coverage
+.. _issue 81: http://bitbucket.org/ned/coveragepy/issue/81/xml-report-does-not-have-condition-coverage-attribute-for-lines-with-a
+.. _issue 85: http://bitbucket.org/ned/coveragepy/issue/85/threadrun-isnt-measured
+
+
+Version 3.4b1 --- 21 August 2010
+--------------------------------
+
+- BACKWARD INCOMPATIBILITY: the ``--omit`` and ``--include`` switches now take
+  file patterns rather than file prefixes, closing `issue 34`_ and `issue 36`_.
+
+- BACKWARD INCOMPATIBILITY: the `omit_prefixes` argument is gone throughout
+  coverage.py, replaced with `omit`, a list of file name patterns suitable for
+  `fnmatch`.  A parallel argument `include` controls what files are included.
+
+- The run command now has a ``--source`` switch, a list of directories or
+  module names.  If provided, coverage.py will only measure execution in those
+  source files.
+
+- Various warnings are printed to stderr for problems encountered during data
+  measurement: if a ``--source`` module has no Python source to measure, or is
+  never encountered at all, or if no data is collected.
+
+- The reporting commands (report, annotate, html, and xml) now have an
+  ``--include`` switch to restrict reporting to modules matching those file
+  patterns, similar to the existing ``--omit`` switch. Thanks, Zooko.
+
+- The run command now supports ``--include`` and ``--omit`` to control what
+  modules it measures. This can speed execution and reduce the amount of data
+  during reporting. Thanks Zooko.
+
+- Since coverage.py 3.1, using the Python trace function has been slower than
+  it needs to be.  A cache of tracing decisions was broken, but has now been
+  fixed.
+
+- Python 2.7 and 3.2 have introduced new opcodes that are now supported.
+
+- Python files with no statements, for example, empty ``__init__.py`` files,
+  are now reported as having zero statements instead of one.  Fixes `issue 1`_.
+
+- Reports now have a column of missed line counts rather than executed line
+  counts, since developers should focus on reducing the missed lines to zero,
+  rather than increasing the executed lines to varying targets.  Once
+  suggested, this seemed blindingly obvious.
+
+- Line numbers in HTML source pages are clickable, linking directly to that
+  line, which is highlighted on arrival.  Added a link back to the index page
+  at the bottom of each HTML page.
+
+- Programs that call ``os.fork`` will properly collect data from both the child
+  and parent processes.  Use ``coverage run -p`` to get two data files that can
+  be combined with ``coverage combine``.  Fixes `issue 56`_.
+
+- Coverage.py is now runnable as a module: ``python -m coverage``.  Thanks,
+  Brett Cannon.
+
+- When measuring code running in a virtualenv, most of the system library was
+  being measured when it shouldn't have been.  This is now fixed.
+
+- Doctest text files are no longer recorded in the coverage data, since they
+  can't be reported anyway.  Fixes `issue 52`_ and `issue 61`_.
+
+- Jinja HTML templates compile into Python code using the HTML file name,
+  which confused coverage.py.  Now these files are no longer traced, fixing
+  `issue 82`_.
+
+- Source files can have more than one dot in them (foo.test.py), and will be
+  treated properly while reporting.  Fixes `issue 46`_.
+
+- Source files with DOS line endings are now properly tokenized for syntax
+  coloring on non-DOS machines.  Fixes `issue 53`_.
+
+- Unusual code structure that confused exits from methods with exits from
+  classes is now properly analyzed.  See `issue 62`_.
+
+- Asking for an HTML report with no files now shows a nice error message rather
+  than a cryptic failure ('int' object is unsubscriptable). Fixes `issue 59`_.
+
+.. _issue 1:  http://bitbucket.org/ned/coveragepy/issue/1/empty-__init__py-files-are-reported-as-1-executable
+.. _issue 34: http://bitbucket.org/ned/coveragepy/issue/34/enhanced-omit-globbing-handling
+.. _issue 36: http://bitbucket.org/ned/coveragepy/issue/36/provide-regex-style-omit
+.. _issue 46: http://bitbucket.org/ned/coveragepy/issue/46
+.. _issue 53: http://bitbucket.org/ned/coveragepy/issue/53
+.. _issue 52: http://bitbucket.org/ned/coveragepy/issue/52/doctesttestfile-confuses-source-detection
+.. _issue 56: http://bitbucket.org/ned/coveragepy/issue/56
+.. _issue 61: http://bitbucket.org/ned/coveragepy/issue/61/annotate-i-doesnt-work
+.. _issue 62: http://bitbucket.org/ned/coveragepy/issue/62
+.. _issue 59: http://bitbucket.org/ned/coveragepy/issue/59/html-report-fails-with-int-object-is
+.. _issue 82: http://bitbucket.org/ned/coveragepy/issue/82/tokenerror-when-generating-html-report
+
+
+Version 3.3.1 --- 6 March 2010
+------------------------------
+
+- Using `parallel=True` in .coveragerc file prevented reporting, but now does
+  not, fixing `issue 49`_.
+
+- When running your code with "coverage run", if you call `sys.exit()`,
+  coverage.py will exit with that status code, fixing `issue 50`_.
+
+.. _issue 49: http://bitbucket.org/ned/coveragepy/issue/49
+.. _issue 50: http://bitbucket.org/ned/coveragepy/issue/50
+
+
+Version 3.3 --- 24 February 2010
+--------------------------------
+
+- Settings are now read from a .coveragerc file.  A specific file can be
+  specified on the command line with --rcfile=FILE.  The name of the file can
+  be programmatically set with the `config_file` argument to the coverage()
+  constructor, or reading a config file can be disabled with
+  `config_file=False`.
+
+- Fixed a problem with nested loops having their branch possibilities
+  mischaracterized: `issue 39`_.
+
+- Added coverage.process_start to enable coverage measurement when Python
+  starts.
+
+- Parallel data file names now have a random number appended to them in
+  addition to the machine name and process id.
+
+- Parallel data files combined with "coverage combine" are deleted after
+  they're combined, to clean up unneeded files.  Fixes `issue 40`_.
+
+- Exceptions thrown from product code run with "coverage run" are now displayed
+  without internal coverage.py frames, so the output is the same as when the
+  code is run without coverage.py.
+
+- The `data_suffix` argument to the coverage constructor is now appended with
+  an added dot rather than simply appended, so that .coveragerc files will not
+  be confused for data files.
+
+- Python source files that don't end with a newline can now be executed, fixing
+  `issue 47`_.
+
+- Added an AUTHORS.txt file.
+
+.. _issue 39: http://bitbucket.org/ned/coveragepy/issue/39
+.. _issue 40: http://bitbucket.org/ned/coveragepy/issue/40
+.. _issue 47: http://bitbucket.org/ned/coveragepy/issue/47
+
+
+Version 3.2 --- 5 December 2009
+-------------------------------
+
+- Added a ``--version`` option on the command line.
+
+
+Version 3.2b4 --- 1 December 2009
+---------------------------------
+
+- Branch coverage improvements:
+
+  - The XML report now includes branch information.
+
+- Click-to-sort HTML report columns are now persisted in a cookie.  Viewing
+  a report will sort it first the way you last had a coverage report sorted.
+  Thanks, `Chris Adams`_.
+
+- On Python 3.x, setuptools has been replaced by `Distribute`_.
+
+.. _Distribute: http://packages.python.org/distribute/
+
+
+Version 3.2b3 --- 23 November 2009
+----------------------------------
+
+- Fixed a memory leak in the C tracer that was introduced in 3.2b1.
+
+- Branch coverage improvements:
+
+  - Branches to excluded code are ignored.
+
+- The table of contents in the HTML report is now sortable: click the headers
+  on any column.  Thanks, `Chris Adams`_.
+
+.. _Chris Adams: http://improbable.org/chris/
+
+
+Version 3.2b2 --- 19 November 2009
+----------------------------------
+
+- Branch coverage improvements:
+
+  - Classes are no longer incorrectly marked as branches: `issue 32`_.
+
+  - "except" clauses with types are no longer incorrectly marked as branches:
+    `issue 35`_.
+
+- Fixed some problems syntax coloring sources with line continuations and
+  source with tabs: `issue 30`_ and `issue 31`_.
+
+- The --omit option now works much better than before, fixing `issue 14`_ and
+  `issue 33`_.  Thanks, Danek Duvall.
+
+.. _issue 14: http://bitbucket.org/ned/coveragepy/issue/14
+.. _issue 30: http://bitbucket.org/ned/coveragepy/issue/30
+.. _issue 31: http://bitbucket.org/ned/coveragepy/issue/31
+.. _issue 32: http://bitbucket.org/ned/coveragepy/issue/32
+.. _issue 33: http://bitbucket.org/ned/coveragepy/issue/33
+.. _issue 35: http://bitbucket.org/ned/coveragepy/issue/35
+
+
+Version 3.2b1 --- 10 November 2009
+----------------------------------
+
+- Branch coverage!
+
+- XML reporting has file paths that let Cobertura find the source code.
+
+- The tracer code has changed, it's a few percent faster.
+
+- Some exceptions reported by the command line interface have been cleaned up
+  so that tracebacks inside coverage.py aren't shown.  Fixes `issue 23`_.
+
+.. _issue 23: http://bitbucket.org/ned/coveragepy/issue/23
+
+
+Version 3.1 --- 4 October 2009
+------------------------------
+
+- Source code can now be read from eggs.  Thanks, Ross Lawley.  Fixes
+  `issue 25`_.
+
+.. _issue 25: http://bitbucket.org/ned/coveragepy/issue/25
+
+
+Version 3.1b1 --- 27 September 2009
+-----------------------------------
+
+- Python 3.1 is now supported.
+
+- Coverage.py has a new command line syntax with sub-commands.  This expands
+  the possibilities for adding features and options in the future.  The old
+  syntax is still supported.  Try "coverage help" to see the new commands.
+  Thanks to Ben Finney for early help.
+
+- Added an experimental "coverage xml" command for producing coverage reports
+  in a Cobertura-compatible XML format.  Thanks, Bill Hart.
+
+- Added the --timid option to enable a simpler slower trace function that works
+  for DecoratorTools projects, including TurboGears.  Fixed `issue 12`_ and
+  `issue 13`_.
+
+- HTML reports show modules from other directories.  Fixed `issue 11`_.
+
+- HTML reports now display syntax-colored Python source.
+
+- Programs that change directory will still write .coverage files in the
+  directory where execution started.  Fixed `issue 24`_.
+
+- Added a "coverage debug" command for getting diagnostic information about the
+  coverage.py installation.
+
+.. _issue 11: http://bitbucket.org/ned/coveragepy/issue/11
+.. _issue 12: http://bitbucket.org/ned/coveragepy/issue/12
+.. _issue 13: http://bitbucket.org/ned/coveragepy/issue/13
+.. _issue 24: http://bitbucket.org/ned/coveragepy/issue/24
+
+
+Version 3.0.1 --- 7 July 2009
+-----------------------------
+
+- Removed the recursion limit in the tracer function.  Previously, code that
+  ran more than 500 frames deep would crash. Fixed `issue 9`_.
+
+- Fixed a bizarre problem involving pyexpat, whereby lines following XML parser
+  invocations could be overlooked.  Fixed `issue 10`_.
+
+- On Python 2.3, coverage.py could mis-measure code with exceptions being
+  raised.  This is now fixed.
+
+- The coverage.py code itself will now not be measured by coverage.py, and no
+  coverage.py modules will be mentioned in the nose --with-cover plug-in.
+  Fixed `issue 8`_.
+
+- When running source files, coverage.py now opens them in universal newline
+  mode just like Python does.  This lets it run Windows files on Mac, for
+  example.
+
+.. _issue 9: http://bitbucket.org/ned/coveragepy/issue/9
+.. _issue 10: http://bitbucket.org/ned/coveragepy/issue/10
+.. _issue 8: http://bitbucket.org/ned/coveragepy/issue/8
+
+
+Version 3.0 --- 13 June 2009
+----------------------------
+
+- Fixed the way the Python library was ignored.  Too much code was being
+  excluded the old way.
+
+- Tabs are now properly converted in HTML reports.  Previously indentation was
+  lost.  Fixed `issue 6`_.
+
+- Nested modules now get a proper flat_rootname.  Thanks, Christian Heimes.
+
+.. _issue 6: http://bitbucket.org/ned/coveragepy/issue/6
+
+
+Version 3.0b3 --- 16 May 2009
+-----------------------------
+
+- Added parameters to coverage.__init__ for options that had been set on the
+  coverage object itself.
+
+- Added clear_exclude() and get_exclude_list() methods for programmatic
+  manipulation of the exclude regexes.
+
+- Added coverage.load() to read previously-saved data from the data file.
+
+- Improved the finding of code files.  For example, .pyc files that have been
+  installed after compiling are now located correctly.  Thanks, Detlev
+  Offenbach.
+
+- When using the object API (that is, constructing a coverage() object), data
+  is no longer saved automatically on process exit.  You can re-enable it with
+  the auto_data=True parameter on the coverage() constructor. The module-level
+  interface still uses automatic saving.
+
+
+Version 3.0b --- 30 April 2009
+------------------------------
+
+HTML reporting, and continued refactoring.
+
+- HTML reports and annotation of source files: use the new -b (browser) switch.
+  Thanks to George Song for code, inspiration and guidance.
+
+- Code in the Python standard library is not measured by default.  If you need
+  to measure standard library code, use the -L command-line switch during
+  execution, or the cover_pylib=True argument to the coverage() constructor.
+
+- Source annotation into a directory (-a -d) behaves differently.  The
+  annotated files are named with their hierarchy flattened so that same-named
+  files from different directories no longer collide.  Also, only files in the
+  current tree are included.
+
+- coverage.annotate_file is no longer available.
+
+- Programs executed with -x now behave more as they should, for example,
+  __file__ has the correct value.
+
+- .coverage data files have a new pickle-based format designed for better
+  extensibility.
+
+- Removed the undocumented cache_file argument to coverage.usecache().
+
+
+Version 3.0b1 --- 7 March 2009
+------------------------------
+
+Major overhaul.
+
+- Coverage.py is now a package rather than a module.  Functionality has been
+  split into classes.
+
+- The trace function is implemented in C for speed.  Coverage.py runs are now
+  much faster.  Thanks to David Christian for productive micro-sprints and
+  other encouragement.
+
+- Executable lines are identified by reading the line number tables in the
+  compiled code, removing a great deal of complicated analysis code.
+
+- Precisely which lines are considered executable has changed in some cases.
+  Therefore, your coverage stats may also change slightly.
+
+- The singleton coverage object is only created if the module-level functions
+  are used.  This maintains the old interface while allowing better
+  programmatic use of Coverage.py.
+
+- The minimum supported Python version is 2.3.
+
+
+Version 2.85 --- 14 September 2008
+----------------------------------
+
+- Add support for finding source files in eggs. Don't check for
+  morf's being instances of ModuleType, instead use duck typing so that
+  pseudo-modules can participate. Thanks, Imri Goldberg.
+
+- Use os.realpath as part of the fixing of file names so that symlinks won't
+  confuse things. Thanks, Patrick Mezard.
+
+
+Version 2.80 --- 25 May 2008
+----------------------------
+
+- Open files in rU mode to avoid line ending craziness. Thanks, Edward Loper.
+
+
+Version 2.78 --- 30 September 2007
+----------------------------------
+
+- Don't try to predict whether a file is Python source based on the extension.
+  Extension-less files are often Pythons scripts. Instead, simply parse the file
+  and catch the syntax errors. Hat tip to Ben Finney.
+
+
+Version 2.77 --- 29 July 2007
+-----------------------------
+
+- Better packaging.
+
+
+Version 2.76 --- 23 July 2007
+-----------------------------
+
+- Now Python 2.5 is *really* fully supported: the body of the new with
+  statement is counted as executable.
+
+
+Version 2.75 --- 22 July 2007
+-----------------------------
+
+- Python 2.5 now fully supported. The method of dealing with multi-line
+  statements is now less sensitive to the exact line that Python reports during
+  execution. Pass statements are handled specially so that their disappearance
+  during execution won't throw off the measurement.
+
+
+Version 2.7 --- 21 July 2007
+----------------------------
+
+- "#pragma: nocover" is excluded by default.
+
+- Properly ignore docstrings and other constant expressions that appear in the
+  middle of a function, a problem reported by Tim Leslie.
+
+- coverage.erase() shouldn't clobber the exclude regex. Change how parallel
+  mode is invoked, and fix erase() so that it erases the cache when called
+  programmatically.
+
+- In reports, ignore code executed from strings, since we can't do anything
+  useful with it anyway.
+
+- Better file handling on Linux, thanks Guillaume Chazarain.
+
+- Better shell support on Windows, thanks Noel O'Boyle.
+
+- Python 2.2 support maintained, thanks Catherine Proulx.
+
+- Minor changes to avoid lint warnings.
+
+
+Version 2.6 --- 23 August 2006
+------------------------------
+
+- Applied Joseph Tate's patch for function decorators.
+
+- Applied Sigve Tjora and Mark van der Wal's fixes for argument handling.
+
+- Applied Geoff Bache's parallel mode patch.
+
+- Refactorings to improve testability. Fixes to command-line logic for parallel
+  mode and collect.
+
+
+Version 2.5 --- 4 December 2005
+-------------------------------
+
+- Call threading.settrace so that all threads are measured. Thanks Martin
+  Fuzzey.
+
+- Add a file argument to report so that reports can be captured to a different
+  destination.
+
+- Coverage.py can now measure itself.
+
+- Adapted Greg Rogers' patch for using relative file names, and sorting and
+  omitting files to report on.
+
+
+Version 2.2 --- 31 December 2004
+--------------------------------
+
+- Allow for keyword arguments in the module global functions. Thanks, Allen.
+
+
+Version 2.1 --- 14 December 2004
+--------------------------------
+
+- Return 'analysis' to its original behavior and add 'analysis2'. Add a global
+  for 'annotate', and factor it, adding 'annotate_file'.
+
+
+Version 2.0 --- 12 December 2004
+--------------------------------
+
+Significant code changes.
+
+- Finding executable statements has been rewritten so that docstrings and
+  other quirks of Python execution aren't mistakenly identified as missing
+  lines.
+
+- Lines can be excluded from consideration, even entire suites of lines.
+
+- The file system cache of covered lines can be disabled programmatically.
+
+- Modernized the code.
+
+
+Earlier History
+---------------
+
+2001-12-04 GDR Created.
+
+2001-12-06 GDR Added command-line interface and source code annotation.
+
+2001-12-09 GDR Moved design and interface to separate documents.
+
+2001-12-10 GDR Open cache file as binary on Windows. Allow simultaneous -e and
+-x, or -a and -r.
+
+2001-12-12 GDR Added command-line help. Cache analysis so that it only needs to
+be done once when you specify -a and -r.
+
+2001-12-13 GDR Improved speed while recording. Portable between Python 1.5.2
+and 2.1.1.
+
+2002-01-03 GDR Module-level functions work correctly.
+
+2002-01-07 GDR Update sys.path when running a file with the -x option, so that
+it matches the value the program would get if it were run on its own.
diff --git a/catapult/third_party/coverage/LICENSE.txt b/catapult/third_party/coverage/LICENSE.txt
new file mode 100644
index 0000000..f433b1a
--- /dev/null
+++ b/catapult/third_party/coverage/LICENSE.txt
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/catapult/third_party/coverage/MANIFEST.in b/catapult/third_party/coverage/MANIFEST.in
new file mode 100644
index 0000000..31e2230
--- /dev/null
+++ b/catapult/third_party/coverage/MANIFEST.in
@@ -0,0 +1,46 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+# MANIFEST.in file for coverage.py
+
+include AUTHORS.txt
+include CHANGES.rst
+include LICENSE.txt
+include MANIFEST.in
+include Makefile
+include NOTICE.txt
+include README.rst
+include TODO.txt
+include __main__.py
+include .travis.yml
+include appveyor.yml
+include circle.yml
+include howto.txt
+include igor.py
+include metacov.ini
+include pylintrc
+include setup.py
+include tox.ini
+include tox_wheels.ini
+
+recursive-include ci *.*
+exclude ci/appveyor.token
+
+recursive-include coverage/fullcoverage *.py
+recursive-include coverage/ctracer *.c *.h
+
+recursive-include doc conf.py *.pip *.rst *.txt
+recursive-include doc/_static *.*
+prune doc/_build
+
+recursive-include requirements *.pip
+
+recursive-include tests *.py *.tok
+recursive-include tests/farm */gold*/*.* */gold*/*/*.*
+recursive-include tests/farm/*/src * *.*
+recursive-include tests js/*.* qunit/*.*
+prune tests/eggsrc/build
+prune tests/eggsrc/dist
+prune tests/eggsrc/*.egg-info
+
+global-exclude *.py[co]
diff --git a/catapult/third_party/coverage/Makefile b/catapult/third_party/coverage/Makefile
new file mode 100644
index 0000000..ce26501
--- /dev/null
+++ b/catapult/third_party/coverage/Makefile
@@ -0,0 +1,112 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+# Makefile for utility work on coverage.py.
+
+default:
+	@echo "* No default action *"
+
+clean:
+	-rm -f *.pyd */*.pyd
+	-rm -f *.so */*.so
+	-PYTHONPATH=. python tests/test_farm.py clean
+	-rm -rf build coverage.egg-info dist htmlcov
+	-rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc
+	-rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo
+	-rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak
+	-rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class
+	-rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__
+	-rm -f coverage/*,cover
+	-rm -f MANIFEST
+	-rm -f .coverage .coverage.* coverage.xml .metacov* .noseids
+	-rm -f tests/zipmods.zip
+	-rm -rf tests/eggsrc/build tests/eggsrc/dist tests/eggsrc/*.egg-info
+	-rm -f setuptools-*.egg distribute-*.egg distribute-*.tar.gz
+	-rm -rf doc/_build doc/_spell
+
+sterile: clean
+	-rm -rf .tox*
+
+LINTABLE = coverage igor.py setup.py tests ci/*.py
+
+lint:
+	-pylint $(LINTABLE)
+	python -m tabnanny $(LINTABLE)
+	python igor.py check_eol
+
+spell:
+	-pylint --disable=all --enable=spelling $(LINTABLE)
+
+pep8:
+	pep8 --filename=*.py --repeat $(LINTABLE)
+
+test:
+	tox -e py27,py34 $(ARGS)
+
+metacov:
+	COVERAGE_COVERAGE=yes tox $(ARGS)
+
+metahtml:
+	python igor.py combine_html
+
+# Kitting
+
+kit:
+	python setup.py sdist --formats=gztar,zip
+
+wheel:
+	tox -c tox_wheels.ini $(ARGS)
+
+kit_upload:
+	twine upload dist/*
+
+kit_local:
+	cp -v dist/* `awk -F "=" '/find-links/ {print $$2}' ~/.pip/pip.conf`
+	# pip caches wheels of things it has installed. Clean them out so we
+	# don't go crazy trying to figure out why our new code isn't installing.
+	find ~/Library/Caches/pip/wheels -name 'coverage-*' -delete
+
+download_appveyor:
+	python ci/download_appveyor.py nedbat/coveragepy
+
+pypi:
+	python setup.py register
+
+build_ext:
+	python setup.py build_ext
+
+install:
+	python setup.py install
+
+uninstall:
+	-rm -rf $(PYHOME)/lib/site-packages/coverage*
+	-rm -rf $(PYHOME)/scripts/coverage*
+
+# Documentation
+
+SPHINXBUILD = sphinx-build
+SPHINXOPTS = -a -E doc
+WEBHOME = ~/web/stellated/
+WEBSAMPLE = $(WEBHOME)/files/sample_coverage_html
+WEBSAMPLEBETA = $(WEBHOME)/files/sample_coverage_html_beta
+
+docreqs:
+	pip install -r doc/requirements.pip
+
+dochtml:
+	$(SPHINXBUILD) -b html $(SPHINXOPTS) doc/_build/html
+	@echo
+	@echo "Build finished. The HTML pages are in doc/_build/html."
+
+docspell:
+	$(SPHINXBUILD) -b spelling $(SPHINXOPTS) doc/_spell
+
+publish:
+	rm -f $(WEBSAMPLE)/*.*
+	mkdir -p $(WEBSAMPLE)
+	cp doc/sample_html/*.* $(WEBSAMPLE)
+
+publishbeta:
+	rm -f $(WEBSAMPLEBETA)/*.*
+	mkdir -p $(WEBSAMPLEBETA)
+	cp doc/sample_html_beta/*.* $(WEBSAMPLEBETA)
diff --git a/catapult/third_party/coverage/NOTICE.txt b/catapult/third_party/coverage/NOTICE.txt
new file mode 100644
index 0000000..ff68ab8
--- /dev/null
+++ b/catapult/third_party/coverage/NOTICE.txt
@@ -0,0 +1,14 @@
+Copyright 2001 Gareth Rees.  All rights reserved.
+Copyright 2004-2015 Ned Batchelder.  All rights reserved.
+
+Except where noted otherwise, this software is licensed under the Apache
+License, Version 2.0 (the "License"); you may not use this work except in
+compliance with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/catapult/third_party/coverage/PKG-INFO b/catapult/third_party/coverage/PKG-INFO
new file mode 100644
index 0000000..d7bc35e
--- /dev/null
+++ b/catapult/third_party/coverage/PKG-INFO
@@ -0,0 +1,100 @@
+Metadata-Version: 1.1
+Name: coverage
+Version: 4.0.3
+Summary: Code coverage measurement for Python
+Home-page: https://coverage.readthedocs.org
+Author: Ned Batchelder and others
+Author-email: ned@nedbatchelder.com
+License: Apache 2.0
+Description: .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+        .. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+        
+        ===========
+        Coverage.py
+        ===========
+        
+        Code coverage testing for Python.
+        
+        |  |license| |versions| |status| |docs|
+        |  |ci-status| |win-ci-status| |codecov|
+        |  |kit| |format| |downloads|
+        
+        Coverage.py measures code coverage, typically during test execution. It uses
+        the code analysis tools and tracing hooks provided in the Python standard
+        library to determine which lines are executable, and which have been executed.
+        
+        Coverage.py runs on CPython 2.6, 2.7, 3.3, 3.4 and 3.5; PyPy 2.4, 2.6 and 4.0;
+        and PyPy3 2.4.
+        
+        Documentation is on `Read the Docs <https://coverage.readthedocs.org>`_.
+        Code repository and issue tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_,
+        with a mirrored repository on `GitHub <https://github.com/nedbat/coveragepy>`_.
+        
+        **New in 4.0:** ``--concurrency``, plugins for non-Python files, setup.cfg
+        support, --skip-covered, HTML filtering, and more than 50 issues closed.
+        
+        
+        Getting Started
+        ---------------
+        
+        See the `quick start <https://coverage.readthedocs.org/#quick-start>`_
+        section of the docs.
+        
+        
+        License
+        -------
+        
+        Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0.
+        For details, see https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt.
+        
+        
+        .. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master
+            :target: https://travis-ci.org/nedbat/coveragepy
+            :alt: Build status
+        .. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/bitbucket/ned/coveragepy?svg=true
+            :target: https://ci.appveyor.com/project/nedbat/coveragepy
+            :alt: Windows build status
+        .. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
+            :target: https://coverage.readthedocs.org
+            :alt: Documentation
+        .. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
+            :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
+            :alt: Requirements status
+        .. |kit| image:: https://badge.fury.io/py/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: PyPI status
+        .. |format| image:: https://img.shields.io/pypi/format/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Kit format
+        .. |downloads| image:: https://img.shields.io/pypi/dd/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Daily PyPI downloads
+        .. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Python versions supported
+        .. |status| image:: https://img.shields.io/pypi/status/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Package stability
+        .. |license| image:: https://img.shields.io/pypi/l/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: License
+        .. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master
+            :target: http://codecov.io/github/nedbat/coveragepy?branch=master
+            :alt: Coverage!
+        
+Keywords: code coverage testing
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
+Classifier: Development Status :: 5 - Production/Stable
diff --git a/catapult/third_party/coverage/README.chromium b/catapult/third_party/coverage/README.chromium
new file mode 100644
index 0000000..ff9cf5c
--- /dev/null
+++ b/catapult/third_party/coverage/README.chromium
@@ -0,0 +1,16 @@
+Name: coverage.py
+Short Name: coverage
+URL: https://pypi.python.org/pypi/coverage
+Version: 4.0.3
+Date: 2015-11-24
+License: Apache 2.0
+License File: LICENSE.txt
+Security Critical: no
+
+Description:
+Coverage.py measures code coverage, typically during test execution. It uses the
+code analysis tools and tracing hooks provided in the Python standard library to
+determine which lines are executable, and which have been executed.
+
+Local Modifications:
+Removed 'doc' and 'test' directories to reduce total file size.
diff --git a/catapult/third_party/coverage/README.rst b/catapult/third_party/coverage/README.rst
new file mode 100644
index 0000000..66a8f5e
--- /dev/null
+++ b/catapult/third_party/coverage/README.rst
@@ -0,0 +1,75 @@
+.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+.. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+===========
+Coverage.py
+===========
+
+Code coverage testing for Python.
+
+|  |license| |versions| |status| |docs|
+|  |ci-status| |win-ci-status| |codecov|
+|  |kit| |format| |downloads|
+
+Coverage.py measures code coverage, typically during test execution. It uses
+the code analysis tools and tracing hooks provided in the Python standard
+library to determine which lines are executable, and which have been executed.
+
+Coverage.py runs on CPython 2.6, 2.7, 3.3, 3.4 and 3.5; PyPy 2.4, 2.6 and 4.0;
+and PyPy3 2.4.
+
+Documentation is on `Read the Docs <http://coverage.readthedocs.org>`_.
+Code repository and issue tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_,
+with a mirrored repository on `GitHub <https://github.com/nedbat/coveragepy>`_.
+
+**New in 4.0:** ``--concurrency``, plugins for non-Python files, setup.cfg
+support, --skip-covered, HTML filtering, and more than 50 issues closed.
+
+
+Getting Started
+---------------
+
+See the `quick start <http://coverage.readthedocs.org/#quick-start>`_
+section of the docs.
+
+
+License
+-------
+
+Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0.
+For details, see https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt.
+
+
+.. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master
+    :target: https://travis-ci.org/nedbat/coveragepy
+    :alt: Build status
+.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/bitbucket/ned/coveragepy?svg=true
+    :target: https://ci.appveyor.com/project/nedbat/coveragepy
+    :alt: Windows build status
+.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
+    :target: http://coverage.readthedocs.org
+    :alt: Documentation
+.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
+    :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
+    :alt: Requirements status
+.. |kit| image:: https://badge.fury.io/py/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: PyPI status
+.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Kit format
+.. |downloads| image:: https://img.shields.io/pypi/dd/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Daily PyPI downloads
+.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Python versions supported
+.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: Package stability
+.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
+    :target: https://pypi.python.org/pypi/coverage
+    :alt: License
+.. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master
+    :target: http://codecov.io/github/nedbat/coveragepy?branch=master
+    :alt: Coverage!
diff --git a/catapult/third_party/coverage/TODO.txt b/catapult/third_party/coverage/TODO.txt
new file mode 100644
index 0000000..f6036d2
--- /dev/null
+++ b/catapult/third_party/coverage/TODO.txt
@@ -0,0 +1,285 @@
+Coverage.py TODO
+
+Key:
+    * Heading
+    - Not done yet.
+    + Done.
+    x Not going to do.
+
+* 4.0
+
+- What defaults should change?
+    x --source = . ?
+    x --branch = True ?
+
+- Remove 2.3, 2.4, 2.5 limitations
+    + set, sorted, reversed, rpartition
+    + generator expressions
+    + decorators
+    + collections.defaultdict
+    + .startswith((,))
+    + "with" statements
+    - .format() ?
+    + try/except/finally
+    + with assertRaises
+    + addCleanup instead of tearDown
+    + exec statement can look like a function in py2 (since when?)
+    - runpy ?
+    + we can use "except ExcClass as e:"
+
+- Plugins
+    + Clean up
+    + implement plugin support in CTracer
+    + remove plugin support from PyTracer
+    x add services:
+        - filelocator
+        - warning
+    - dynamic_source_filename: return should be a canonical path
+        - update the omit test to use "quux*" instead of "*quux*"
+    + docs
++ Make reports use filenames, not module names
+- documentation
+    - test helpers
+    + cov.config["run:branch"] api (well, coverage.get_option etc)
+    + "added in 4.0"
+    - tweaks to theme?
+    - Plugins!
+        Once per process
+        Once per file
+            - create a file tracer
+            - call its has_dynamic_source_file()
+        Once per call
+        Once per line
+- build process
+    - don't publish to nedbat.com any more (but still need the sample html reports)
+        + don't need .px tooling
+        - write a new nedbat.com/code/coverage page.
+    - all doc links should point to rtfd
++ Remove code only run on <2.6
++ Change data file to json
++ Create data api
++ gevent, etc.
++ Remove the old command-line syntax
+    + A pain, b/c of the structure of the tests.
+    + BTW: make an easier way to write those tests.
+
+- tests
+    - test the kit has the right contents
+    - test the kit installs the right stuff
+
+
+* --source stuff:
+    + warn if a package is never found.
+    + warn if no data was collected
+    - tie --source into reporting
+
+* Soon
+
++ Better omit handling that ignores files during measurement.
+    - Deal with ~ in specified paths correctly.
++ while TRUE claims to be partial.
+    + A way to mark lines as partial branches, with a regex?
+        + Default to "while True:", "while 1:"
++ HTML keyboard short cuts
+
+
+* 3.2
+
++ Some kind of indication in the HTML where yellow lines aren't going.
+- Profile the reporting code: it's REALLY slow.
+    - parser is doing some redundant work.
++ Analysis class should do rolling up of stats also (actually Numbers)
++ Update docs for --branch.
+x self.coverage.data.has_arcs is ugly.
++ Branches that never jump to nocover lines shouldn't be marked as partial.
+    (see top of test_cogapp for examples)
++ Maybe turning off yellow lines should make those lines green?
++ A missing branch to leave the function shows an annotation of -1. Now "exit".
++ XML report needs to get branch information.
++ Add branch info to "coverage debug data"
++ Polish up the help, and double-check the docs.
+
+
+* Speed
+
++ C extension collector
+- bitvector in trace extension.
+- Ignore certain modules
++ Record linenos rather than (file,lineno) pairs in tracer.
+x Tricky swapping of collector like figleaf, pycov, et al. (Don't need to do
+    this with C collector).
+- Seems like there should be a faster way to manage all the line number sets in
+    CodeParser.raw_parse.
+- If tracing, canonical_filename_cache overlaps with should_trace_cache.  Skip
+    canonical_filename_cache. Maybe it isn't even worth it...
+- Would pre-allocating line number integers make the C tracer faster? It would
+    use less memory.
+
+
+* Accuracy
+
+- Record magic number of module to ensure code hasn't changed
+- Record version of coverage data file, so we can update what's stored there.
+- Record options in coverage data file, so multiple runs are certain to make
+    sense together.
+- Do I still need the lines in annotate_file that deal specially with "else"?
+
+
+* Power
+
++ Branch coverage
+    Titus' idea:
+        1: if a:
+        2:     b = 2
+        3: c = 3
+    if the coverage data shows 1,2,3, it was if-then.  if it's 1,3, then the
+    missing else was executed.
++ API for getting coverage data.
+- Instruction tracing instead of line tracing.
+- Path tracing (how does this even work?)
+- Count execution of lines
+- Track callers of functions (ala std module trace)
+- Method/Class/Module coverage reporting.
+- .coverage files that can be kept separate, rather than accumulated.
+- test/coverage map: http://rbtcollins.wordpress.com/2009/09/16/back-from-hiatus/
+    - Similar to figleaf's sections.
+
+
+* Convenience
+
+- Command line modules should also be directories, meaning all the modules in that
+    directory.
+- Why can't a morf also be a string, the name of a module?
+- ignore by module as well as file?
++ Use a .coveragerc file to control coverage.py without the programmatic API.
+- Add a --data switch to explicitly control the data file on the command line.
+x Why can't you specify execute (-x) and report (-r) in the same invocation?
+    Maybe just because -x needs the rest of the command line?
++ Support 2.3 - 3.1!
+    http://pythonology.blogspot.com/2009/02/making-code-run-on-python-20-through-30.html
+    http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3
+    http://pydev.blogspot.com/2008/11/making-code-work-in-python-2-and-3.html
+    + Explicitly set pickle protocol to 2.
+- An inference mode that marks lines as executed if they "must have been" executed:
+    class definitions, etc, when coverage is started after the class is defined.
+- Different categories of exclude pragma? So you can enable and disable them
+    from the command line, to reconsider exclusions.
++ Reporting on files never touched by coverage.py (package completeness)
+- A setup.py command? http://jeetworks.org/node/50
+- Deltas: indicate the change in coverage percentage from the last run.
++ Show lines missing rather than lines run in the reporting, since that's what
+  you need to focus on.
+
+
+* Beauty
+
++ HTML report
+    - Colored bars indicating coverage per file.
+    - Package navigation.
+    - Rolled-up statistics.
+    - Some way to focus in on red and yellow
+        - Show only lines near highlights?
+        + Jump to next highlight?
+            + Keyboard navigation: j and k.
+    - Cookie for changes to pyfile.html state.
+    + Clickable column headers on the index page.
+    + Syntax coloring in HTML report.
+    + Dynamic effects in HTML report.
+    + Footer in reports pointing to coverage.py home page.
+    + Baseline grid for linenumber font.
+    + Separate out css and HTML.
+    + Does it work right with utf-8 source files? http://www.python.org/dev/peps/pep-0263/
+    - Use vim modeline to determine tab width: http://vimdoc.sourceforge.net/htmldoc/options.html#modeline
+
+
+* Community
+
++ New docs, rather than pointing to Gareth's
+    + Min python version is 2.3.
+    - Three phases of work:
+        - Collection
+        - Analysis
+        - Reporting
+    - Distinction between:
+        - ignore (files not to collect)
+        - exclude (lines not to report as missed)
+        - omit (files not to report)
+    - Changes from coverage.py 2.x:
+        - Bare "except:" lines now count as executable code.
+        - Double function decorators: all decorator lines count as executable code.
+    x Document the .coverage file format.
+    + HTML reporting.
+        - Much more detail about what's in the report.
+    - References between pages are off:
+        - They have <em> tags around them.
+        - They use #anchors that don't survive the px->html conversion.
++ Be sure --help text is complete (-i is missing).
++ Host the project somewhere with a real bug tracker: bitbucket.org
++ Point discussion to TIP
+- PEP 8 compliance?
+
+
+* Programmability
+
++ Don't use sys.exit in CoverageScript.
++ Remove singleton
+    + Initialization of instance variables in the class.
+
+
+* Installation
+
+x How will coverage.py package install over coverage.py module?
+x pip can't install it: it reads the coverage.py html page, and finds the kit link,
+    but then can't handle the root-relative link.
+
+
+* Modernization
+
++ Decide on minimum supported version
+    + 2.3
+    + Get rid of the basestring protection
+    + Use enumerate
+    + Use sets instead of dicts
++ Switch from getopt to optparse.
++ Get rid of the recursive nonsense.
++ Docstrings.
++ Remove huge document-style comments.
+- Better names:
+    + self.cache -> self.cache_filename -> CoverageData.filename
+    + self.usecache -> CoverageData.use_file
+- More classes:
+    - Module munging
+    + Coverage data files
++ Why are some imports at the top of the file, and some in functions?
++ Get rid of sys.exitfunc use.
++ True and False (with no backward adaptation: the constants are new in 2.2.1)
++ Get rid of compiler module
+    + In analyzing code
+    + In test_coverage.py
++ Style:
+    + lineno
+    + filename
+
+
+* Correctness
+
+- What does -p (parallel mode) mean with -e (erase data)?
+
+
+* Tests
+
++ Switch to a real test runner, like nose.
++ Test both the C trace function and the Python trace function.
++ parser.py has no direct tests.
++ Tests about the .coverage file.
++ Tests about the --long-form of arguments.
++ Tests about overriding the .coverage filename.
+- Tests about parallel mode.
++ Tests about assigning a multi-line string.
+- Tests about tricky docstrings.
++ Coverage test coverage.py!
+- Tests that tracing stops after calling stop()
+- More intensive thread testing.
+x Tests about the "import __main__" in cmdline.py
++ What happens if the -x script raises an exception?
+- Test that the kit has all the proper contents.
diff --git a/catapult/third_party/coverage/__main__.py b/catapult/third_party/coverage/__main__.py
new file mode 100644
index 0000000..a44b820
--- /dev/null
+++ b/catapult/third_party/coverage/__main__.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Be able to execute coverage.py by pointing Python at a working tree."""
+
+import runpy, os
+
+PKG = 'coverage'
+
+try:
+    run_globals = runpy.run_module(PKG, run_name='__main__', alter_sys=True)
+    executed = os.path.splitext(os.path.basename(run_globals['__file__']))[0]
+    if executed != '__main__':  # For Python 2.5 compatibility
+        raise ImportError(
+            'Incorrectly executed %s instead of __main__' % executed
+            )
+except ImportError:  # For Python 2.6 compatibility
+    runpy.run_module('%s.__main__' % PKG, run_name='__main__', alter_sys=True)
diff --git a/catapult/third_party/coverage/appveyor.yml b/catapult/third_party/coverage/appveyor.yml
new file mode 100644
index 0000000..faa354f
--- /dev/null
+++ b/catapult/third_party/coverage/appveyor.yml
@@ -0,0 +1,130 @@
+# Appveyor, continuous integration for Windows
+# https://ci.appveyor.com/project/nedbat/coveragepy
+
+version: '{branch}-{build}'
+
+shallow_clone: true
+
+environment:
+
+  CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\ci\\run_with_env.cmd"
+
+  matrix:
+    - JOB: "2.6 32-bit"
+      TOXENV: "py26"
+      PYTHON: "C:\\Python26.6"
+      PYTHON_VERSION: "2.6.6"
+      PYTHON_ARCH: "32"
+
+    - JOB: "2.6 64-bit"
+      TOXENV: "py26"
+      PYTHON: "C:\\Python26.6-x64"
+      PYTHON_VERSION: "2.6.6"
+      PYTHON_ARCH: "64"
+
+    - JOB: "2.7 32-bit"
+      TOXENV: "py27"
+      PYTHON: "C:\\Python27.10"
+      PYTHON_VERSION: "2.7.10"
+      PYTHON_ARCH: "32"
+
+    - JOB: "2.7 64-bit"
+      TOXENV: "py27"
+      PYTHON: "C:\\Python27.10-x64"
+      PYTHON_VERSION: "2.7.10"
+      PYTHON_ARCH: "64"
+
+    - JOB: "3.3 32-bit"
+      TOXENV: "py33"
+      PYTHON: "C:\\Python33"
+      PYTHON_VERSION: "3.3"
+      PYTHON_ARCH: "32"
+
+    - JOB: "3.3 64-bit"
+      TOXENV: "py33"
+      PYTHON: "C:\\Python33-x64"
+      PYTHON_VERSION: "3.3"
+      PYTHON_ARCH: "64"
+
+    - JOB: "3.4 32-bit"
+      TOXENV: "py34"
+      PYTHON: "C:\\Python34"
+      PYTHON_VERSION: "3.4"
+      PYTHON_ARCH: "32"
+
+    - JOB: "3.4 64-bit"
+      TOXENV: "py34"
+      PYTHON: "C:\\Python34-x64"
+      PYTHON_VERSION: "3.4"
+      PYTHON_ARCH: "64"
+
+    - JOB: "3.5 32-bit"
+      TOXENV: "py35"
+      PYTHON: "C:\\Python35"
+      PYTHON_VERSION: "3.5.0"
+      PYTHON_ARCH: "32"
+
+    - JOB: "3.5 64-bit"
+      TOXENV: "py35"
+      PYTHON: "C:\\Python35-x64"
+      PYTHON_VERSION: "3.5.0"
+      PYTHON_ARCH: "64"
+
+    # Meta coverage
+    - JOB: "Meta 2.7"
+      TOXENV: "py27"
+      PYTHON: "C:\\Python27"
+      PYTHON_VERSION: "2.7"
+      PYTHON_ARCH: "32"
+      COVERAGE_COVERAGE: "yes"
+
+    - JOB: "Meta 3.4"
+      TOXENV: "py34"
+      PYTHON: "C:\\Python34"
+      PYTHON_VERSION: "3.4"
+      PYTHON_ARCH: "32"
+      COVERAGE_COVERAGE: "yes"
+
+init:
+  - "ECHO %TOXENV%"
+
+install:
+  # Install Python (from the official .msi of http://python.org) and pip when
+  # not already installed.
+  - ps: if (-not(Test-Path($env:PYTHON))) { & ci\install.ps1 }
+
+  # Prepend newly installed Python to the PATH of this build (this cannot be
+  # done from inside the powershell script as it would require to restart
+  # the parent CMD process).
+  - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+
+  # Check that we have the expected version and architecture for Python
+  - "python --version"
+  - "python -c \"import struct; print(struct.calcsize('P') * 8)\""
+
+  # Upgrade to the latest version of pip to avoid it displaying warnings
+  # about it being out of date.
+  - "pip install --disable-pip-version-check --user --upgrade pip"
+
+  # Install requirements.
+  - "%CMD_IN_ENV% pip install -r requirements/tox.pip -r requirements/wheel.pip"
+
+  # Make a python3.4.bat file in the current directory so that tox will find it
+  # and python3.4 will mean what we want it to.
+  - "python -c \"import os; open('python{0}.{1}.bat'.format(*os.environ['TOXENV'][2:]), 'w').write('@{0}\\\\python \\x25*\\n'.format(os.environ['PYTHON']))\""
+
+build_script:
+  # If not a metacov job, then build wheels and .exe installers.
+  - if NOT "%COVERAGE_COVERAGE%" == "yes" %CMD_IN_ENV% %PYTHON%\python setup.py bdist_wheel bdist_wininst
+
+  # Push everything in dist\ as an artifact.
+  - ps: if ( Test-Path 'dist' -PathType Container ) { Get-ChildItem dist\*.* | % { Push-AppveyorArtifact $_.FullName -FileName ('dist\' + $_.Name) } }
+
+test_script:
+  - "%CMD_IN_ENV% %PYTHON%\\Scripts\\tox"
+
+after_test:
+  - if "%COVERAGE_COVERAGE%" == "yes" 7z a metacov-win-%TOXENV%.zip %APPVEYOR_BUILD_FOLDER%\.metacov*
+
+artifacts:
+  - path: "metacov-*.zip"
diff --git a/catapult/third_party/coverage/ci/README.txt b/catapult/third_party/coverage/ci/README.txt
new file mode 100644
index 0000000..a34d036
--- /dev/null
+++ b/catapult/third_party/coverage/ci/README.txt
@@ -0,0 +1 @@
+Files to support continuous integration systems.
diff --git a/catapult/third_party/coverage/ci/download_appveyor.py b/catapult/third_party/coverage/ci/download_appveyor.py
new file mode 100644
index 0000000..f640b41
--- /dev/null
+++ b/catapult/third_party/coverage/ci/download_appveyor.py
@@ -0,0 +1,95 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Use the Appveyor API to download Windows artifacts."""
+
+import os
+import os.path
+import sys
+import zipfile
+
+import requests
+
+
+def make_auth_headers():
+    """Make the authentication headers needed to use the Appveyor API."""
+    with open("ci/appveyor.token") as f:
+        token = f.read().strip()
+
+    headers = {
+        'Authorization': 'Bearer {0}'.format(token),
+    }
+    return headers
+
+
+def make_url(url, **kwargs):
+    """Build an Appveyor API url."""
+    return "https://ci.appveyor.com/api" + url.format(**kwargs)
+
+
+def get_project_build(account_project):
+    """Get the details of the latest Appveyor build."""
+    url = make_url("/projects/{account_project}", account_project=account_project)
+    response = requests.get(url, headers=make_auth_headers())
+    return response.json()
+
+
+def download_latest_artifacts(account_project):
+    """Download all the artifacts from the latest build."""
+    build = get_project_build(account_project)
+    jobs = build['build']['jobs']
+    print "Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs))
+    for job in jobs:
+        name = job['name'].partition(':')[2].split(',')[0].strip()
+        print "  {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job)
+
+        url = make_url("/buildjobs/{jobid}/artifacts", jobid=job['jobId'])
+        response = requests.get(url, headers=make_auth_headers())
+        artifacts = response.json()
+
+        for artifact in artifacts:
+            is_zip = artifact['type'] == "Zip"
+            filename = artifact['fileName']
+            print "    {0}, {1} bytes".format(filename, artifact['size'])
+
+            url = make_url(
+                "/buildjobs/{jobid}/artifacts/{filename}",
+                jobid=job['jobId'],
+                filename=filename
+            )
+            download_url(url, filename, make_auth_headers())
+
+            if is_zip:
+                unpack_zipfile(filename)
+                os.remove(filename)
+
+
+def ensure_dirs(filename):
+    """Make sure the directories exist for `filename`."""
+    dirname, _ = os.path.split(filename)
+    if dirname and not os.path.exists(dirname):
+        os.makedirs(dirname)
+
+
+def download_url(url, filename, headers):
+    """Download a file from `url` to `filename`."""
+    ensure_dirs(filename)
+    response = requests.get(url, headers=headers, stream=True)
+    if response.status_code == 200:
+        with open(filename, 'wb') as f:
+            for chunk in response.iter_content(16*1024):
+                f.write(chunk)
+
+
+def unpack_zipfile(filename):
+    """Unpack a zipfile, using the names in the zip."""
+    with open(filename, 'rb') as fzip:
+        z = zipfile.ZipFile(fzip)
+        for name in z.namelist():
+            print "      extracting {0}".format(name)
+            ensure_dirs(name)
+            z.extract(name)
+
+
+if __name__ == "__main__":
+    download_latest_artifacts(sys.argv[1])
diff --git a/catapult/third_party/coverage/ci/install.ps1 b/catapult/third_party/coverage/ci/install.ps1
new file mode 100644
index 0000000..f9934aa
--- /dev/null
+++ b/catapult/third_party/coverage/ci/install.ps1
@@ -0,0 +1,232 @@
+# From: https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor/install.ps1
+#
+#
+# Sample script to install Python and pip under Windows
+# Authors: Olivier Grisel, Jonathan Helmus, Kyle Kastner, and Alex Willmer
+# License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/
+
+$MINICONDA_URL = "http://repo.continuum.io/miniconda/"
+$BASE_URL = "https://www.python.org/ftp/python/"
+$GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
+$GET_PIP_PATH = "C:\get-pip.py"
+
+$PYTHON_PRERELEASE_REGEX = @"
+(?x)
+(?<major>\d+)
+\.
+(?<minor>\d+)
+\.
+(?<micro>\d+)
+(?<prerelease>[a-z]{1,2}\d+)
+"@
+
+
+function Download ($filename, $url) {
+    $webclient = New-Object System.Net.WebClient
+
+    $basedir = $pwd.Path + "\"
+    $filepath = $basedir + $filename
+    if (Test-Path $filename) {
+        Write-Host "Reusing" $filepath
+        return $filepath
+    }
+
+    # Download and retry up to 3 times in case of network transient errors.
+    Write-Host "Downloading" $filename "from" $url
+    $retry_attempts = 2
+    for ($i = 0; $i -lt $retry_attempts; $i++) {
+        try {
+            $webclient.DownloadFile($url, $filepath)
+            break
+        }
+        Catch [Exception]{
+            Start-Sleep 1
+        }
+    }
+    if (Test-Path $filepath) {
+        Write-Host "File saved at" $filepath
+    } else {
+        # Retry once to get the error message if any at the last try
+        $webclient.DownloadFile($url, $filepath)
+    }
+    return $filepath
+}
+
+
+function ParsePythonVersion ($python_version) {
+    if ($python_version -match $PYTHON_PRERELEASE_REGEX) {
+        return ([int]$matches.major, [int]$matches.minor, [int]$matches.micro,
+                $matches.prerelease)
+    }
+    $version_obj = [version]$python_version
+    return ($version_obj.major, $version_obj.minor, $version_obj.build, "")
+}
+
+
+function DownloadPython ($python_version, $platform_suffix) {
+    $major, $minor, $micro, $prerelease = ParsePythonVersion $python_version
+
+    if (($major -le 2 -and $micro -eq 0) `
+        -or ($major -eq 3 -and $minor -le 2 -and $micro -eq 0) `
+        ) {
+        $dir = "$major.$minor"
+        $python_version = "$major.$minor$prerelease"
+    } else {
+        $dir = "$major.$minor.$micro"
+    }
+
+    if ($prerelease) {
+        if (($major -le 2) `
+            -or ($major -eq 3 -and $minor -eq 1) `
+            -or ($major -eq 3 -and $minor -eq 2) `
+            -or ($major -eq 3 -and $minor -eq 3) `
+            ) {
+            $dir = "$dir/prev"
+        }
+    }
+
+    if (($major -le 2) -or ($major -le 3 -and $minor -le 4)) {
+        $ext = "msi"
+        if ($platform_suffix) {
+            $platform_suffix = ".$platform_suffix"
+        }
+    } else {
+        $ext = "exe"
+        if ($platform_suffix) {
+            $platform_suffix = "-$platform_suffix"
+        }
+    }
+
+    $filename = "python-$python_version$platform_suffix.$ext"
+    $url = "$BASE_URL$dir/$filename"
+    $filepath = Download $filename $url
+    return $filepath
+}
+
+
+function InstallPython ($python_version, $architecture, $python_home) {
+    Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home
+    if (Test-Path $python_home) {
+        Write-Host $python_home "already exists, skipping."
+        return $false
+    }
+    if ($architecture -eq "32") {
+        $platform_suffix = ""
+    } else {
+        $platform_suffix = "amd64"
+    }
+    $installer_path = DownloadPython $python_version $platform_suffix
+    $installer_ext = [System.IO.Path]::GetExtension($installer_path)
+    Write-Host "Installing $installer_path to $python_home"
+    $install_log = $python_home + ".log"
+    if ($installer_ext -eq '.msi') {
+        InstallPythonMSI $installer_path $python_home $install_log
+    } else {
+        InstallPythonEXE $installer_path $python_home $install_log
+    }
+    if (Test-Path $python_home) {
+        Write-Host "Python $python_version ($architecture) installation complete"
+    } else {
+        Write-Host "Failed to install Python in $python_home"
+        Get-Content -Path $install_log
+        Exit 1
+    }
+}
+
+
+function InstallPythonEXE ($exepath, $python_home, $install_log) {
+    $install_args = "/quiet InstallAllUsers=1 TargetDir=$python_home"
+    RunCommand $exepath $install_args
+}
+
+
+function InstallPythonMSI ($msipath, $python_home, $install_log) {
+    $install_args = "/qn /log $install_log /i $msipath TARGETDIR=$python_home"
+    $uninstall_args = "/qn /x $msipath"
+    RunCommand "msiexec.exe" $install_args
+    if (-not(Test-Path $python_home)) {
+        Write-Host "Python seems to be installed else-where, reinstalling."
+        RunCommand "msiexec.exe" $uninstall_args
+        RunCommand "msiexec.exe" $install_args
+    }
+}
+
+function RunCommand ($command, $command_args) {
+    Write-Host $command $command_args
+    Start-Process -FilePath $command -ArgumentList $command_args -Wait -Passthru
+}
+
+
+function InstallPip ($python_home) {
+    $pip_path = $python_home + "\Scripts\pip.exe"
+    $python_path = $python_home + "\python.exe"
+    if (-not(Test-Path $pip_path)) {
+        Write-Host "Installing pip..."
+        $webclient = New-Object System.Net.WebClient
+        $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH)
+        Write-Host "Executing:" $python_path $GET_PIP_PATH
+        & $python_path $GET_PIP_PATH
+    } else {
+        Write-Host "pip already installed."
+    }
+}
+
+
+function DownloadMiniconda ($python_version, $platform_suffix) {
+    if ($python_version -eq "3.4") {
+        $filename = "Miniconda3-3.5.5-Windows-" + $platform_suffix + ".exe"
+    } else {
+        $filename = "Miniconda-3.5.5-Windows-" + $platform_suffix + ".exe"
+    }
+    $url = $MINICONDA_URL + $filename
+    $filepath = Download $filename $url
+    return $filepath
+}
+
+
+function InstallMiniconda ($python_version, $architecture, $python_home) {
+    Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home
+    if (Test-Path $python_home) {
+        Write-Host $python_home "already exists, skipping."
+        return $false
+    }
+    if ($architecture -eq "32") {
+        $platform_suffix = "x86"
+    } else {
+        $platform_suffix = "x86_64"
+    }
+    $filepath = DownloadMiniconda $python_version $platform_suffix
+    Write-Host "Installing" $filepath "to" $python_home
+    $install_log = $python_home + ".log"
+    $args = "/S /D=$python_home"
+    Write-Host $filepath $args
+    Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru
+    if (Test-Path $python_home) {
+        Write-Host "Python $python_version ($architecture) installation complete"
+    } else {
+        Write-Host "Failed to install Python in $python_home"
+        Get-Content -Path $install_log
+        Exit 1
+    }
+}
+
+
+function InstallMinicondaPip ($python_home) {
+    $pip_path = $python_home + "\Scripts\pip.exe"
+    $conda_path = $python_home + "\Scripts\conda.exe"
+    if (-not(Test-Path $pip_path)) {
+        Write-Host "Installing pip..."
+        $args = "install --yes pip"
+        Write-Host $conda_path $args
+        Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru
+    } else {
+        Write-Host "pip already installed."
+    }
+}
+
+function main () {
+    InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON
+    InstallPip $env:PYTHON
+}
+
+main
diff --git a/catapult/third_party/coverage/ci/run_with_env.cmd b/catapult/third_party/coverage/ci/run_with_env.cmd
new file mode 100644
index 0000000..66b9252
--- /dev/null
+++ b/catapult/third_party/coverage/ci/run_with_env.cmd
@@ -0,0 +1,91 @@
+:: From: https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor/run_with_env.cmd
+::
+::
+:: To build extensions for 64 bit Python 3, we need to configure environment
+:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of:
+:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1)
+::
+:: To build extensions for 64 bit Python 2, we need to configure environment
+:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of:
+:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0)
+::
+:: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific
+:: environment configurations.
+::
+:: Note: this script needs to be run with the /E:ON and /V:ON flags for the
+:: cmd interpreter, at least for (SDK v7.0)
+::
+:: More details at:
+:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows
+:: http://stackoverflow.com/a/13751649/163740
+::
+:: Author: Olivier Grisel
+:: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/
+::
+:: Notes about batch files for Python people:
+::
+:: Quotes in values are literally part of the values:
+::      SET FOO="bar"
+:: FOO is now five characters long: " b a r "
+:: If you don't want quotes, don't include them on the right-hand side.
+::
+:: The CALL lines at the end of this file look redundant, but if you move them
+:: outside of the IF clauses, they do not run properly in the SET_SDK_64==Y
+:: case, I don't know why.
+@ECHO OFF
+
+SET COMMAND_TO_RUN=%*
+SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows
+SET WIN_WDK=c:\Program Files (x86)\Windows Kits\10\Include\wdf
+
+:: Extract the major and minor versions, and allow for the minor version to be
+:: more than 9.  This requires the version number to have two dots in it.
+SET MAJOR_PYTHON_VERSION=%PYTHON_VERSION:~0,1%
+IF "%PYTHON_VERSION:~3,1%" == "." (
+    SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,1%
+) ELSE (
+    SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,2%
+)
+
+:: Based on the Python version, determine what SDK version to use, and whether
+:: to set the SDK for 64-bit.
+IF %MAJOR_PYTHON_VERSION% == 2 (
+    SET WINDOWS_SDK_VERSION="v7.0"
+    SET SET_SDK_64=Y
+) ELSE (
+    IF %MAJOR_PYTHON_VERSION% == 3 (
+        SET WINDOWS_SDK_VERSION="v7.1"
+        IF %MINOR_PYTHON_VERSION% LEQ 4 (
+            SET SET_SDK_64=Y
+        ) ELSE (
+            SET SET_SDK_64=N
+            IF EXIST "%WIN_WDK%" (
+                :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/
+                REN "%WIN_WDK%" 0wdf
+            )
+        )
+    ) ELSE (
+        ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%"
+        EXIT 1
+    )
+)
+
+IF %PYTHON_ARCH% == 64 (
+    IF %SET_SDK_64% == Y (
+        ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture
+        SET DISTUTILS_USE_SDK=1
+        SET MSSdk=1
+        "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION%
+        "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release
+        ECHO Executing: %COMMAND_TO_RUN%
+        call %COMMAND_TO_RUN% || EXIT 1
+    ) ELSE (
+        ECHO Using default MSVC build environment for 64 bit architecture
+        ECHO Executing: %COMMAND_TO_RUN%
+        call %COMMAND_TO_RUN% || EXIT 1
+    )
+) ELSE (
+    ECHO Using default MSVC build environment for 32 bit architecture
+    ECHO Executing: %COMMAND_TO_RUN%
+    call %COMMAND_TO_RUN% || EXIT 1
+)
diff --git a/catapult/third_party/coverage/circle.yml b/catapult/third_party/coverage/circle.yml
new file mode 100644
index 0000000..a52959e
--- /dev/null
+++ b/catapult/third_party/coverage/circle.yml
@@ -0,0 +1,18 @@
+# Circle CI configuration for coverage.py.
+# https://circleci.com/gh/nedbat/coveragepy
+
+machine:
+  python:
+    version: 2.7.6
+  post:
+    - pyenv global pypy-2.4.0 2.6.8 2.7.9 3.3.3 3.4.2
+
+dependencies:
+  pre:
+    - pip install -U pip
+  override:
+    - pip install -r requirements/tox.pip
+
+test:
+  override:
+    - tox
diff --git a/catapult/third_party/coverage/coverage.egg-info/PKG-INFO b/catapult/third_party/coverage/coverage.egg-info/PKG-INFO
new file mode 100644
index 0000000..d7bc35e
--- /dev/null
+++ b/catapult/third_party/coverage/coverage.egg-info/PKG-INFO
@@ -0,0 +1,100 @@
+Metadata-Version: 1.1
+Name: coverage
+Version: 4.0.3
+Summary: Code coverage measurement for Python
+Home-page: https://coverage.readthedocs.org
+Author: Ned Batchelder and others
+Author-email: ned@nedbatchelder.com
+License: Apache 2.0
+Description: .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+        .. For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+        
+        ===========
+        Coverage.py
+        ===========
+        
+        Code coverage testing for Python.
+        
+        |  |license| |versions| |status| |docs|
+        |  |ci-status| |win-ci-status| |codecov|
+        |  |kit| |format| |downloads|
+        
+        Coverage.py measures code coverage, typically during test execution. It uses
+        the code analysis tools and tracing hooks provided in the Python standard
+        library to determine which lines are executable, and which have been executed.
+        
+        Coverage.py runs on CPython 2.6, 2.7, 3.3, 3.4 and 3.5; PyPy 2.4, 2.6 and 4.0;
+        and PyPy3 2.4.
+        
+        Documentation is on `Read the Docs <https://coverage.readthedocs.org>`_.
+        Code repository and issue tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_,
+        with a mirrored repository on `GitHub <https://github.com/nedbat/coveragepy>`_.
+        
+        **New in 4.0:** ``--concurrency``, plugins for non-Python files, setup.cfg
+        support, --skip-covered, HTML filtering, and more than 50 issues closed.
+        
+        
+        Getting Started
+        ---------------
+        
+        See the `quick start <https://coverage.readthedocs.org/#quick-start>`_
+        section of the docs.
+        
+        
+        License
+        -------
+        
+        Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0.
+        For details, see https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt.
+        
+        
+        .. |ci-status| image:: https://travis-ci.org/nedbat/coveragepy.svg?branch=master
+            :target: https://travis-ci.org/nedbat/coveragepy
+            :alt: Build status
+        .. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/bitbucket/ned/coveragepy?svg=true
+            :target: https://ci.appveyor.com/project/nedbat/coveragepy
+            :alt: Windows build status
+        .. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
+            :target: https://coverage.readthedocs.org
+            :alt: Documentation
+        .. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
+            :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
+            :alt: Requirements status
+        .. |kit| image:: https://badge.fury.io/py/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: PyPI status
+        .. |format| image:: https://img.shields.io/pypi/format/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Kit format
+        .. |downloads| image:: https://img.shields.io/pypi/dd/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Daily PyPI downloads
+        .. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Python versions supported
+        .. |status| image:: https://img.shields.io/pypi/status/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: Package stability
+        .. |license| image:: https://img.shields.io/pypi/l/coverage.svg
+            :target: https://pypi.python.org/pypi/coverage
+            :alt: License
+        .. |codecov| image:: http://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master
+            :target: http://codecov.io/github/nedbat/coveragepy?branch=master
+            :alt: Coverage!
+        
+Keywords: code coverage testing
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
+Classifier: Development Status :: 5 - Production/Stable
diff --git a/catapult/third_party/coverage/coverage.egg-info/SOURCES.txt b/catapult/third_party/coverage/coverage.egg-info/SOURCES.txt
new file mode 100644
index 0000000..bba8938
--- /dev/null
+++ b/catapult/third_party/coverage/coverage.egg-info/SOURCES.txt
@@ -0,0 +1,280 @@
+.travis.yml
+AUTHORS.txt
+CHANGES.rst
+LICENSE.txt
+MANIFEST.in
+Makefile
+NOTICE.txt
+README.rst
+TODO.txt
+__main__.py
+appveyor.yml
+circle.yml
+howto.txt
+igor.py
+metacov.ini
+pylintrc
+setup.py
+tox.ini
+tox_wheels.ini
+ci/README.txt
+ci/download_appveyor.py
+ci/install.ps1
+ci/run_with_env.cmd
+coverage/__init__.py
+coverage/__main__.py
+coverage/annotate.py
+coverage/backunittest.py
+coverage/backward.py
+coverage/bytecode.py
+coverage/cmdline.py
+coverage/collector.py
+coverage/config.py
+coverage/control.py
+coverage/data.py
+coverage/debug.py
+coverage/env.py
+coverage/execfile.py
+coverage/files.py
+coverage/html.py
+coverage/misc.py
+coverage/monkey.py
+coverage/parser.py
+coverage/phystokens.py
+coverage/pickle2json.py
+coverage/plugin.py
+coverage/plugin_support.py
+coverage/python.py
+coverage/pytracer.py
+coverage/report.py
+coverage/results.py
+coverage/summary.py
+coverage/templite.py
+coverage/test_helpers.py
+coverage/version.py
+coverage/xmlreport.py
+coverage.egg-info/PKG-INFO
+coverage.egg-info/SOURCES.txt
+coverage.egg-info/dependency_links.txt
+coverage.egg-info/entry_points.txt
+coverage.egg-info/not-zip-safe
+coverage.egg-info/top_level.txt
+coverage/ctracer/datastack.c
+coverage/ctracer/datastack.h
+coverage/ctracer/filedisp.c
+coverage/ctracer/filedisp.h
+coverage/ctracer/module.c
+coverage/ctracer/stats.h
+coverage/ctracer/tracer.c
+coverage/ctracer/tracer.h
+coverage/ctracer/util.h
+coverage/fullcoverage/encodings.py
+coverage/htmlfiles/coverage_html.js
+coverage/htmlfiles/index.html
+coverage/htmlfiles/jquery.debounce.min.js
+coverage/htmlfiles/jquery.hotkeys.js
+coverage/htmlfiles/jquery.isonscreen.js
+coverage/htmlfiles/jquery.min.js
+coverage/htmlfiles/jquery.tablesorter.min.js
+coverage/htmlfiles/keybd_closed.png
+coverage/htmlfiles/keybd_open.png
+coverage/htmlfiles/pyfile.html
+coverage/htmlfiles/style.css
+doc/api.rst
+doc/api_coverage.rst
+doc/api_coveragedata.rst
+doc/api_plugin.rst
+doc/branch.rst
+doc/changes.rst
+doc/cmd.rst
+doc/conf.py
+doc/config.rst
+doc/contributing.rst
+doc/dict.txt
+doc/excluding.rst
+doc/faq.rst
+doc/howitworks.rst
+doc/index.rst
+doc/install.rst
+doc/plugins.rst
+doc/python-coverage.1.txt
+doc/requirements.pip
+doc/source.rst
+doc/subprocess.rst
+doc/trouble.rst
+doc/_static/coverage.css
+doc/_static/neds.css
+requirements/dev.pip
+requirements/tox.pip
+requirements/wheel.pip
+tests/__init__.py
+tests/backtest.py
+tests/coveragetest.py
+tests/covmodzip1.py
+tests/goldtest.py
+tests/helpers.py
+tests/osinfo.py
+tests/plugin1.py
+tests/plugin2.py
+tests/stress_phystoken.tok
+tests/stress_phystoken_dos.tok
+tests/test_api.py
+tests/test_arcs.py
+tests/test_backward.py
+tests/test_cmdline.py
+tests/test_collector.py
+tests/test_concurrency.py
+tests/test_config.py
+tests/test_coverage.py
+tests/test_data.py
+tests/test_debug.py
+tests/test_execfile.py
+tests/test_farm.py
+tests/test_filereporter.py
+tests/test_files.py
+tests/test_html.py
+tests/test_misc.py
+tests/test_oddball.py
+tests/test_parser.py
+tests/test_phystokens.py
+tests/test_pickle2json.py
+tests/test_plugins.py
+tests/test_process.py
+tests/test_python.py
+tests/test_results.py
+tests/test_summary.py
+tests/test_templite.py
+tests/test_testing.py
+tests/test_xml.py
+tests/try_execfile.py
+tests/eggsrc/setup.py
+tests/eggsrc/egg1/__init__.py
+tests/eggsrc/egg1/egg1.py
+tests/farm/annotate/annotate_dir.py
+tests/farm/annotate/run.py
+tests/farm/annotate/run_encodings.py
+tests/farm/annotate/run_multi.py
+tests/farm/annotate/gold/white.py,cover
+tests/farm/annotate/gold_anno_dir/a___init__.py,cover
+tests/farm/annotate/gold_anno_dir/a_a.py,cover
+tests/farm/annotate/gold_anno_dir/b___init__.py,cover
+tests/farm/annotate/gold_anno_dir/b_b.py,cover
+tests/farm/annotate/gold_anno_dir/multi.py,cover
+tests/farm/annotate/gold_encodings/utf8.py,cover
+tests/farm/annotate/gold_multi/multi.py,cover
+tests/farm/annotate/gold_multi/a/__init__.py,cover
+tests/farm/annotate/gold_multi/a/a.py,cover
+tests/farm/annotate/gold_multi/b/__init__.py,cover
+tests/farm/annotate/gold_multi/b/b.py,cover
+tests/farm/annotate/src/multi.py
+tests/farm/annotate/src/utf8.py
+tests/farm/annotate/src/white.py
+tests/farm/annotate/src/a/__init__.py
+tests/farm/annotate/src/a/a.py
+tests/farm/annotate/src/b/__init__.py
+tests/farm/annotate/src/b/b.py
+tests/farm/html/gold_a/a_py.html
+tests/farm/html/gold_a/index.html
+tests/farm/html/gold_b_branch/b_py.html
+tests/farm/html/gold_b_branch/index.html
+tests/farm/html/gold_bom/bom_py.html
+tests/farm/html/gold_bom/index.html
+tests/farm/html/gold_isolatin1/index.html
+tests/farm/html/gold_isolatin1/isolatin1_py.html
+tests/farm/html/gold_omit_1/index.html
+tests/farm/html/gold_omit_1/m1_py.html
+tests/farm/html/gold_omit_1/m2_py.html
+tests/farm/html/gold_omit_1/m3_py.html
+tests/farm/html/gold_omit_1/main_py.html
+tests/farm/html/gold_omit_2/index.html
+tests/farm/html/gold_omit_2/m2_py.html
+tests/farm/html/gold_omit_2/m3_py.html
+tests/farm/html/gold_omit_2/main_py.html
+tests/farm/html/gold_omit_3/index.html
+tests/farm/html/gold_omit_3/m3_py.html
+tests/farm/html/gold_omit_3/main_py.html
+tests/farm/html/gold_omit_4/index.html
+tests/farm/html/gold_omit_4/m1_py.html
+tests/farm/html/gold_omit_4/m3_py.html
+tests/farm/html/gold_omit_4/main_py.html
+tests/farm/html/gold_omit_5/index.html
+tests/farm/html/gold_omit_5/m1_py.html
+tests/farm/html/gold_omit_5/main_py.html
+tests/farm/html/gold_other/blah_blah_other_py.html
+tests/farm/html/gold_other/here_py.html
+tests/farm/html/gold_other/index.html
+tests/farm/html/gold_partial/index.html
+tests/farm/html/gold_partial/partial_py.html
+tests/farm/html/gold_styled/a_py.html
+tests/farm/html/gold_styled/extra.css
+tests/farm/html/gold_styled/index.html
+tests/farm/html/gold_styled/style.css
+tests/farm/html/gold_unicode/index.html
+tests/farm/html/gold_unicode/unicode_py.html
+tests/farm/html/gold_x_xml/coverage.xml
+tests/farm/html/gold_y_xml_branch/coverage.xml
+tests/farm/html/othersrc/other.py
+tests/farm/html/src/a.py
+tests/farm/html/src/b.py
+tests/farm/html/src/bom.py
+tests/farm/html/src/extra.css
+tests/farm/html/src/here.py
+tests/farm/html/src/isolatin1.py
+tests/farm/html/src/m1.py
+tests/farm/html/src/m2.py
+tests/farm/html/src/m3.py
+tests/farm/html/src/main.py
+tests/farm/html/src/omit4.ini
+tests/farm/html/src/omit5.ini
+tests/farm/html/src/partial.py
+tests/farm/html/src/run_a_xml_2.ini
+tests/farm/html/src/tabbed.py
+tests/farm/html/src/unicode.py
+tests/farm/html/src/y.py
+tests/farm/run/run_chdir.py
+tests/farm/run/run_timid.py
+tests/farm/run/run_xxx.py
+tests/farm/run/src/chdir.py
+tests/farm/run/src/showtrace.py
+tests/farm/run/src/xxx
+tests/farm/run/src/subdir/placeholder
+tests/js/index.html
+tests/js/tests.js
+tests/modules/covmod1.py
+tests/modules/runmod1.py
+tests/modules/usepkgs.py
+tests/modules/aa/__init__.py
+tests/modules/aa/afile.odd.py
+tests/modules/aa/afile.py
+tests/modules/aa/zfile.py
+tests/modules/aa/bb/__init__.py
+tests/modules/aa/bb/bfile.odd.py
+tests/modules/aa/bb/bfile.py
+tests/modules/aa/bb.odd/bfile.py
+tests/modules/aa/bb/cc/__init__.py
+tests/modules/aa/bb/cc/cfile.py
+tests/modules/pkg1/__init__.py
+tests/modules/pkg1/__main__.py
+tests/modules/pkg1/p1a.py
+tests/modules/pkg1/p1b.py
+tests/modules/pkg1/p1c.py
+tests/modules/pkg1/runmod2.py
+tests/modules/pkg1/sub/__init__.py
+tests/modules/pkg1/sub/__main__.py
+tests/modules/pkg1/sub/ps1a.py
+tests/modules/pkg1/sub/runmod3.py
+tests/modules/pkg2/__init__.py
+tests/modules/pkg2/p2a.py
+tests/modules/pkg2/p2b.py
+tests/modules/plugins/__init__.py
+tests/modules/plugins/a_plugin.py
+tests/modules/plugins/another.py
+tests/moremodules/othermods/__init__.py
+tests/moremodules/othermods/othera.py
+tests/moremodules/othermods/otherb.py
+tests/moremodules/othermods/sub/__init__.py
+tests/moremodules/othermods/sub/osa.py
+tests/moremodules/othermods/sub/osb.py
+tests/qunit/jquery.tmpl.min.js
+tests/qunit/qunit.css
+tests/qunit/qunit.js
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/third_party/coverage/coverage.egg-info/dependency_links.txt
similarity index 100%
rename from catapult/third_party/six/six.egg-info/dependency_links.txt
rename to catapult/third_party/coverage/coverage.egg-info/dependency_links.txt
diff --git a/catapult/third_party/coverage/coverage.egg-info/entry_points.txt b/catapult/third_party/coverage/coverage.egg-info/entry_points.txt
new file mode 100644
index 0000000..a487c42
--- /dev/null
+++ b/catapult/third_party/coverage/coverage.egg-info/entry_points.txt
@@ -0,0 +1,5 @@
+[console_scripts]
+coverage = coverage.cmdline:main
+coverage-2.7 = coverage.cmdline:main
+coverage2 = coverage.cmdline:main
+
diff --git a/catapult/third_party/six/six.egg-info/dependency_links.txt b/catapult/third_party/coverage/coverage.egg-info/not-zip-safe
similarity index 100%
copy from catapult/third_party/six/six.egg-info/dependency_links.txt
copy to catapult/third_party/coverage/coverage.egg-info/not-zip-safe
diff --git a/catapult/third_party/coverage/coverage.egg-info/top_level.txt b/catapult/third_party/coverage/coverage.egg-info/top_level.txt
new file mode 100644
index 0000000..4ebc8ae
--- /dev/null
+++ b/catapult/third_party/coverage/coverage.egg-info/top_level.txt
@@ -0,0 +1 @@
+coverage
diff --git a/catapult/third_party/coverage/coverage/__init__.py b/catapult/third_party/coverage/coverage/__init__.py
new file mode 100644
index 0000000..d132e4a
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/__init__.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Code coverage measurement for Python.
+
+Ned Batchelder
+http://nedbatchelder.com/code/coverage
+
+"""
+
+from coverage.version import __version__, __url__, version_info
+
+from coverage.control import Coverage, process_startup
+from coverage.data import CoverageData
+from coverage.misc import CoverageException
+from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
+
+# Backward compatibility.
+coverage = Coverage
+
+# On Windows, we encode and decode deep enough that something goes wrong and
+# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
+# Adding a reference here prevents it from being unloaded.  Yuk.
+import encodings.utf_8
+
+# Because of the "from coverage.control import fooey" lines at the top of the
+# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
+# This makes some inspection tools (like pydoc) unable to find the class
+# coverage.coverage.  So remove that entry.
+import sys
+try:
+    del sys.modules['coverage.coverage']
+except KeyError:
+    pass
diff --git a/catapult/third_party/coverage/coverage/__main__.py b/catapult/third_party/coverage/coverage/__main__.py
new file mode 100644
index 0000000..35ab87a
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/__main__.py
@@ -0,0 +1,8 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Coverage.py's main entry point."""
+
+import sys
+from coverage.cmdline import main
+sys.exit(main())
diff --git a/catapult/third_party/coverage/coverage/annotate.py b/catapult/third_party/coverage/coverage/annotate.py
new file mode 100644
index 0000000..4060450
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/annotate.py
@@ -0,0 +1,103 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Source file annotation for coverage.py."""
+
+import io
+import os
+import re
+
+from coverage.files import flat_rootname
+from coverage.misc import isolate_module
+from coverage.report import Reporter
+
+os = isolate_module(os)
+
+
+class AnnotateReporter(Reporter):
+    """Generate annotated source files showing line coverage.
+
+    This reporter creates annotated copies of the measured source files. Each
+    .py file is copied as a .py,cover file, with a left-hand margin annotating
+    each line::
+
+        > def h(x):
+        -     if 0:   #pragma: no cover
+        -         pass
+        >     if x == 1:
+        !         a = 1
+        >     else:
+        >         a = 2
+
+        > h(2)
+
+    Executed lines use '>', lines not executed use '!', lines excluded from
+    consideration use '-'.
+
+    """
+
+    def __init__(self, coverage, config):
+        super(AnnotateReporter, self).__init__(coverage, config)
+        self.directory = None
+
+    blank_re = re.compile(r"\s*(#|$)")
+    else_re = re.compile(r"\s*else\s*:\s*(#|$)")
+
+    def report(self, morfs, directory=None):
+        """Run the report.
+
+        See `coverage.report()` for arguments.
+
+        """
+        self.report_files(self.annotate_file, morfs, directory)
+
+    def annotate_file(self, fr, analysis):
+        """Annotate a single file.
+
+        `fr` is the FileReporter for the file to annotate.
+
+        """
+        statements = sorted(analysis.statements)
+        missing = sorted(analysis.missing)
+        excluded = sorted(analysis.excluded)
+
+        if self.directory:
+            dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
+            if dest_file.endswith("_py"):
+                dest_file = dest_file[:-3] + ".py"
+            dest_file += ",cover"
+        else:
+            dest_file = fr.filename + ",cover"
+
+        with io.open(dest_file, 'w', encoding='utf8') as dest:
+            i = 0
+            j = 0
+            covered = True
+            source = fr.source()
+            for lineno, line in enumerate(source.splitlines(True), start=1):
+                while i < len(statements) and statements[i] < lineno:
+                    i += 1
+                while j < len(missing) and missing[j] < lineno:
+                    j += 1
+                if i < len(statements) and statements[i] == lineno:
+                    covered = j >= len(missing) or missing[j] > lineno
+                if self.blank_re.match(line):
+                    dest.write(u'  ')
+                elif self.else_re.match(line):
+                    # Special logic for lines containing only 'else:'.
+                    if i >= len(statements) and j >= len(missing):
+                        dest.write(u'! ')
+                    elif i >= len(statements) or j >= len(missing):
+                        dest.write(u'> ')
+                    elif statements[i] == missing[j]:
+                        dest.write(u'! ')
+                    else:
+                        dest.write(u'> ')
+                elif lineno in excluded:
+                    dest.write(u'- ')
+                elif covered:
+                    dest.write(u'> ')
+                else:
+                    dest.write(u'! ')
+
+                dest.write(line)
diff --git a/catapult/third_party/coverage/coverage/backunittest.py b/catapult/third_party/coverage/coverage/backunittest.py
new file mode 100644
index 0000000..09574cc
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/backunittest.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Implementations of unittest features from the future."""
+
+# Use unittest2 if it's available, otherwise unittest.  This gives us
+# back-ported features for 2.6.
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+
+def unittest_has(method):
+    """Does `unittest.TestCase` have `method` defined?"""
+    return hasattr(unittest.TestCase, method)
+
+
+class TestCase(unittest.TestCase):
+    """Just like unittest.TestCase, but with assert methods added.
+
+    Designed to be compatible with 3.1 unittest.  Methods are only defined if
+    `unittest` doesn't have them.
+
+    """
+    # pylint: disable=missing-docstring
+
+    # Many Pythons have this method defined.  But PyPy3 has a bug with it
+    # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
+    # own implementation that works everywhere, at least for the ways we're
+    # calling it.
+    def assertCountEqual(self, s1, s2):
+        """Assert these have the same elements, regardless of order."""
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    if not unittest_has('assertRaisesRegex'):
+        def assertRaisesRegex(self, *args, **kwargs):
+            return self.assertRaisesRegexp(*args, **kwargs)
+
+    if not unittest_has('assertRegex'):
+        def assertRegex(self, *args, **kwargs):
+            return self.assertRegexpMatches(*args, **kwargs)
diff --git a/catapult/third_party/coverage/coverage/backward.py b/catapult/third_party/coverage/coverage/backward.py
new file mode 100644
index 0000000..4fc7221
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/backward.py
@@ -0,0 +1,180 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Add things to old Pythons so I can pretend they are newer."""
+
+# This file does lots of tricky stuff, so disable a bunch of pylint warnings.
+# pylint: disable=redefined-builtin
+# pylint: disable=unused-import
+# pylint: disable=no-name-in-module
+
+import sys
+
+from coverage import env
+
+
+# Pythons 2 and 3 differ on where to get StringIO.
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+# In py3, ConfigParser was renamed to the more-standard configparser
+try:
+    import configparser
+except ImportError:
+    import ConfigParser as configparser
+
+# What's a string called?
+try:
+    string_class = basestring
+except NameError:
+    string_class = str
+
+# What's a Unicode string called?
+try:
+    unicode_class = unicode
+except NameError:
+    unicode_class = str
+
+# Where do pickles come from?
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+# range or xrange?
+try:
+    range = xrange
+except NameError:
+    range = range
+
+# shlex.quote is new, but there's an undocumented implementation in "pipes",
+# who knew!?
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    # Useful function, available under a different (undocumented) name
+    # in Python versions earlier than 3.3.
+    from pipes import quote as shlex_quote
+
+# A function to iterate listlessly over a dict's items.
+try:
+    {}.iteritems
+except AttributeError:
+    def iitems(d):
+        """Produce the items from dict `d`."""
+        return d.items()
+else:
+    def iitems(d):
+        """Produce the items from dict `d`."""
+        return d.iteritems()
+
+# Getting the `next` function from an iterator is different in 2 and 3.
+try:
+    iter([]).next
+except AttributeError:
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).__next__
+else:
+    def iternext(seq):
+        """Get the `next` function for iterating over `seq`."""
+        return iter(seq).next
+
+# Python 3.x is picky about bytes and strings, so provide methods to
+# get them right, and make them no-ops in 2.x
+if env.PY3:
+    def to_bytes(s):
+        """Convert string `s` to bytes."""
+        return s.encode('utf8')
+
+    def binary_bytes(byte_values):
+        """Produce a byte string with the ints from `byte_values`."""
+        return bytes(byte_values)
+
+    def byte_to_int(byte_value):
+        """Turn an element of a bytes object into an int."""
+        return byte_value
+
+    def bytes_to_ints(bytes_value):
+        """Turn a bytes object into a sequence of ints."""
+        # In Python 3, iterating bytes gives ints.
+        return bytes_value
+
+else:
+    def to_bytes(s):
+        """Convert string `s` to bytes (no-op in 2.x)."""
+        return s
+
+    def binary_bytes(byte_values):
+        """Produce a byte string with the ints from `byte_values`."""
+        return "".join(chr(b) for b in byte_values)
+
+    def byte_to_int(byte_value):
+        """Turn an element of a bytes object into an int."""
+        return ord(byte_value)
+
+    def bytes_to_ints(bytes_value):
+        """Turn a bytes object into a sequence of ints."""
+        for byte in bytes_value:
+            yield ord(byte)
+
+
+try:
+    # In Python 2.x, the builtins were in __builtin__
+    BUILTINS = sys.modules['__builtin__']
+except KeyError:
+    # In Python 3.x, they're in builtins
+    BUILTINS = sys.modules['builtins']
+
+
+# imp was deprecated in Python 3.3
+try:
+    import importlib
+    import importlib.util
+    imp = None
+except ImportError:
+    importlib = None
+
+# We only want to use importlib if it has everything we need.
+try:
+    importlib_util_find_spec = importlib.util.find_spec
+except Exception:
+    import imp
+    importlib_util_find_spec = None
+
+# What is the .pyc magic number for this version of Python?
+try:
+    PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
+except AttributeError:
+    PYC_MAGIC_NUMBER = imp.get_magic()
+
+
+def import_local_file(modname, modfile=None):
+    """Import a local file as a module.
+
+    Opens a file in the current directory named `modname`.py, imports it
+    as `modname`, and returns the module object.  `modfile` is the file to
+    import if it isn't in the current directory.
+
+    """
+    try:
+        from importlib.machinery import SourceFileLoader
+    except ImportError:
+        SourceFileLoader = None
+
+    if modfile is None:
+        modfile = modname + '.py'
+    if SourceFileLoader:
+        mod = SourceFileLoader(modname, modfile).load_module()
+    else:
+        for suff in imp.get_suffixes():                 # pragma: part covered
+            if suff[0] == '.py':
+                break
+
+        with open(modfile, 'r') as f:
+            # pylint: disable=undefined-loop-variable
+            mod = imp.load_module(modname, f, modfile, suff)
+
+    return mod
diff --git a/catapult/third_party/coverage/coverage/bytecode.py b/catapult/third_party/coverage/coverage/bytecode.py
new file mode 100644
index 0000000..82929ce
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/bytecode.py
@@ -0,0 +1,87 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Bytecode manipulation for coverage.py"""
+
+import opcode
+import types
+
+from coverage.backward import byte_to_int
+
+
+class ByteCode(object):
+    """A single bytecode."""
+    def __init__(self):
+        # The offset of this bytecode in the code object.
+        self.offset = -1
+
+        # The opcode, defined in the `opcode` module.
+        self.op = -1
+
+        # The argument, a small integer, whose meaning depends on the opcode.
+        self.arg = -1
+
+        # The offset in the code object of the next bytecode.
+        self.next_offset = -1
+
+        # The offset to jump to.
+        self.jump_to = -1
+
+
+class ByteCodes(object):
+    """Iterator over byte codes in `code`.
+
+    This handles the logic of EXTENDED_ARG byte codes internally.  Those byte
+    codes are not returned by this iterator.
+
+    Returns `ByteCode` objects.
+
+    """
+    def __init__(self, code):
+        self.code = code
+
+    def __getitem__(self, i):
+        return byte_to_int(self.code[i])
+
+    def __iter__(self):
+        offset = 0
+        ext_arg = 0
+        while offset < len(self.code):
+            bc = ByteCode()
+            bc.op = self[offset]
+            bc.offset = offset
+
+            next_offset = offset+1
+            if bc.op >= opcode.HAVE_ARGUMENT:
+                bc.arg = ext_arg + self[offset+1] + 256*self[offset+2]
+                next_offset += 2
+
+                label = -1
+                if bc.op in opcode.hasjrel:
+                    label = next_offset + bc.arg
+                elif bc.op in opcode.hasjabs:
+                    label = bc.arg
+                bc.jump_to = label
+
+            bc.next_offset = offset = next_offset
+            if bc.op == opcode.EXTENDED_ARG:
+                ext_arg = bc.arg * 256*256
+            else:
+                ext_arg = 0
+                yield bc
+
+
+class CodeObjects(object):
+    """Iterate over all the code objects in `code`."""
+    def __init__(self, code):
+        self.stack = [code]
+
+    def __iter__(self):
+        while self.stack:
+            # We're going to return the code object on the stack, but first
+            # push its children for later returning.
+            code = self.stack.pop()
+            for c in code.co_consts:
+                if isinstance(c, types.CodeType):
+                    self.stack.append(c)
+            yield code
diff --git a/catapult/third_party/coverage/coverage/cmdline.py b/catapult/third_party/coverage/coverage/cmdline.py
new file mode 100644
index 0000000..221c18d
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/cmdline.py
@@ -0,0 +1,758 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Command-line support for coverage.py."""
+
+import glob
+import optparse
+import os.path
+import sys
+import textwrap
+import traceback
+
+from coverage import env
+from coverage.execfile import run_python_file, run_python_module
+from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
+from coverage.debug import info_formatter, info_header
+
+
+class Opts(object):
+    """A namespace class for individual options we'll build parsers from."""
+
+    append = optparse.make_option(
+        '-a', '--append', action='store_true',
+        help="Append coverage data to .coverage, otherwise it is started clean with each run.",
+    )
+    branch = optparse.make_option(
+        '', '--branch', action='store_true',
+        help="Measure branch coverage in addition to statement coverage.",
+    )
+    CONCURRENCY_CHOICES = [
+        "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
+    ]
+    concurrency = optparse.make_option(
+        '', '--concurrency', action='store', metavar="LIB",
+        choices=CONCURRENCY_CHOICES,
+        help=(
+            "Properly measure code using a concurrency library. "
+            "Valid values are: %s."
+        ) % ", ".join(CONCURRENCY_CHOICES),
+    )
+    debug = optparse.make_option(
+        '', '--debug', action='store', metavar="OPTS",
+        help="Debug options, separated by commas",
+    )
+    directory = optparse.make_option(
+        '-d', '--directory', action='store', metavar="DIR",
+        help="Write the output files to DIR.",
+    )
+    fail_under = optparse.make_option(
+        '', '--fail-under', action='store', metavar="MIN", type="int",
+        help="Exit with a status of 2 if the total coverage is less than MIN.",
+    )
+    help = optparse.make_option(
+        '-h', '--help', action='store_true',
+        help="Get help on this command.",
+    )
+    ignore_errors = optparse.make_option(
+        '-i', '--ignore-errors', action='store_true',
+        help="Ignore errors while reading source files.",
+    )
+    include = optparse.make_option(
+        '', '--include', action='store',
+        metavar="PAT1,PAT2,...",
+        help=(
+            "Include only files whose paths match one of these patterns. "
+            "Accepts shell-style wildcards, which must be quoted."
+        ),
+    )
+    pylib = optparse.make_option(
+        '-L', '--pylib', action='store_true',
+        help=(
+            "Measure coverage even inside the Python installed library, "
+            "which isn't done by default."
+        ),
+    )
+    show_missing = optparse.make_option(
+        '-m', '--show-missing', action='store_true',
+        help="Show line numbers of statements in each module that weren't executed.",
+    )
+    skip_covered = optparse.make_option(
+        '--skip-covered', action='store_true',
+        help="Skip files with 100% coverage.",
+    )
+    omit = optparse.make_option(
+        '', '--omit', action='store',
+        metavar="PAT1,PAT2,...",
+        help=(
+            "Omit files whose paths match one of these patterns. "
+            "Accepts shell-style wildcards, which must be quoted."
+        ),
+    )
+    output_xml = optparse.make_option(
+        '-o', '', action='store', dest="outfile",
+        metavar="OUTFILE",
+        help="Write the XML report to this file. Defaults to 'coverage.xml'",
+    )
+    parallel_mode = optparse.make_option(
+        '-p', '--parallel-mode', action='store_true',
+        help=(
+            "Append the machine name, process id and random number to the "
+            ".coverage data file name to simplify collecting data from "
+            "many processes."
+        ),
+    )
+    module = optparse.make_option(
+        '-m', '--module', action='store_true',
+        help=(
+            "<pyfile> is an importable Python module, not a script path, "
+            "to be run as 'python -m' would run it."
+        ),
+    )
+    rcfile = optparse.make_option(
+        '', '--rcfile', action='store',
+        help="Specify configuration file.  Defaults to '.coveragerc'",
+    )
+    source = optparse.make_option(
+        '', '--source', action='store', metavar="SRC1,SRC2,...",
+        help="A list of packages or directories of code to be measured.",
+    )
+    timid = optparse.make_option(
+        '', '--timid', action='store_true',
+        help=(
+            "Use a simpler but slower trace method.  Try this if you get "
+            "seemingly impossible results!"
+        ),
+    )
+    title = optparse.make_option(
+        '', '--title', action='store', metavar="TITLE",
+        help="A text string to use as the title on the HTML.",
+    )
+    version = optparse.make_option(
+        '', '--version', action='store_true',
+        help="Display version information and exit.",
+    )
+
+
+class CoverageOptionParser(optparse.OptionParser, object):
+    """Base OptionParser for coverage.py.
+
+    Problems don't exit the program.
+    Defaults are initialized for all options.
+
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CoverageOptionParser, self).__init__(
+            add_help_option=False, *args, **kwargs
+            )
+        self.set_defaults(
+            action=None,
+            append=None,
+            branch=None,
+            concurrency=None,
+            debug=None,
+            directory=None,
+            fail_under=None,
+            help=None,
+            ignore_errors=None,
+            include=None,
+            module=None,
+            omit=None,
+            parallel_mode=None,
+            pylib=None,
+            rcfile=True,
+            show_missing=None,
+            skip_covered=None,
+            source=None,
+            timid=None,
+            title=None,
+            version=None,
+            )
+
+        self.disable_interspersed_args()
+        self.help_fn = self.help_noop
+
+    def help_noop(self, error=None, topic=None, parser=None):
+        """No-op help function."""
+        pass
+
+    class OptionParserError(Exception):
+        """Used to stop the optparse error handler ending the process."""
+        pass
+
+    def parse_args_ok(self, args=None, options=None):
+        """Call optparse.parse_args, but return a triple:
+
+        (ok, options, args)
+
+        """
+        try:
+            options, args = \
+                super(CoverageOptionParser, self).parse_args(args, options)
+        except self.OptionParserError:
+            return False, None, None
+        return True, options, args
+
+    def error(self, msg):
+        """Override optparse.error so sys.exit doesn't get called."""
+        self.help_fn(msg)
+        raise self.OptionParserError
+
+
+class GlobalOptionParser(CoverageOptionParser):
+    """Command-line parser for coverage.py global option arguments."""
+
+    def __init__(self):
+        super(GlobalOptionParser, self).__init__()
+
+        self.add_options([
+            Opts.help,
+            Opts.version,
+        ])
+
+
+class CmdOptionParser(CoverageOptionParser):
+    """Parse one of the new-style commands for coverage.py."""
+
+    def __init__(self, action, options=None, defaults=None, usage=None, description=None):
+        """Create an OptionParser for a coverage.py command.
+
+        `action` is the slug to put into `options.action`.
+        `options` is a list of Option's for the command.
+        `defaults` is a dict of default value for options.
+        `usage` is the usage string to display in help.
+        `description` is the description of the command, for the help text.
+
+        """
+        if usage:
+            usage = "%prog " + usage
+        super(CmdOptionParser, self).__init__(
+            usage=usage,
+            description=description,
+        )
+        self.set_defaults(action=action, **(defaults or {}))
+        if options:
+            self.add_options(options)
+        self.cmd = action
+
+    def __eq__(self, other):
+        # A convenience equality, so that I can put strings in unit test
+        # results, and they will compare equal to objects.
+        return (other == "<CmdOptionParser:%s>" % self.cmd)
+
+    def get_prog_name(self):
+        """Override of an undocumented function in optparse.OptionParser."""
+        program_name = super(CmdOptionParser, self).get_prog_name()
+
+        # Include the sub-command for this parser as part of the command.
+        return "%(command)s %(subcommand)s" % {'command': program_name, 'subcommand': self.cmd}
+
+
+GLOBAL_ARGS = [
+    Opts.debug,
+    Opts.help,
+    Opts.rcfile,
+    ]
+
+CMDS = {
+    'annotate': CmdOptionParser(
+        "annotate",
+        [
+            Opts.directory,
+            Opts.ignore_errors,
+            Opts.include,
+            Opts.omit,
+            ] + GLOBAL_ARGS,
+        usage="[options] [modules]",
+        description=(
+            "Make annotated copies of the given files, marking statements that are executed "
+            "with > and statements that are missed with !."
+        ),
+    ),
+
+    'combine': CmdOptionParser(
+        "combine",
+        GLOBAL_ARGS,
+        usage="<path1> <path2> ... <pathN>",
+        description=(
+            "Combine data from multiple coverage files collected "
+            "with 'run -p'.  The combined results are written to a single "
+            "file representing the union of the data. The positional "
+            "arguments are data files or directories containing data files. "
+            "If no paths are provided, data files in the default data file's "
+            "directory are combined."
+        ),
+    ),
+
+    'debug': CmdOptionParser(
+        "debug", GLOBAL_ARGS,
+        usage="<topic>",
+        description=(
+            "Display information on the internals of coverage.py, "
+            "for diagnosing problems. "
+            "Topics are 'data' to show a summary of the collected data, "
+            "or 'sys' to show installation information."
+        ),
+    ),
+
+    'erase': CmdOptionParser(
+        "erase", GLOBAL_ARGS,
+        usage=" ",
+        description="Erase previously collected coverage data.",
+    ),
+
+    'help': CmdOptionParser(
+        "help", GLOBAL_ARGS,
+        usage="[command]",
+        description="Describe how to use coverage.py",
+    ),
+
+    'html': CmdOptionParser(
+        "html",
+        [
+            Opts.directory,
+            Opts.fail_under,
+            Opts.ignore_errors,
+            Opts.include,
+            Opts.omit,
+            Opts.title,
+            ] + GLOBAL_ARGS,
+        usage="[options] [modules]",
+        description=(
+            "Create an HTML report of the coverage of the files.  "
+            "Each file gets its own page, with the source decorated to show "
+            "executed, excluded, and missed lines."
+        ),
+    ),
+
+    'report': CmdOptionParser(
+        "report",
+        [
+            Opts.fail_under,
+            Opts.ignore_errors,
+            Opts.include,
+            Opts.omit,
+            Opts.show_missing,
+            Opts.skip_covered,
+            ] + GLOBAL_ARGS,
+        usage="[options] [modules]",
+        description="Report coverage statistics on modules."
+    ),
+
+    'run': CmdOptionParser(
+        "run",
+        [
+            Opts.append,
+            Opts.branch,
+            Opts.concurrency,
+            Opts.include,
+            Opts.module,
+            Opts.omit,
+            Opts.pylib,
+            Opts.parallel_mode,
+            Opts.source,
+            Opts.timid,
+            ] + GLOBAL_ARGS,
+        usage="[options] <pyfile> [program options]",
+        description="Run a Python program, measuring code execution."
+    ),
+
+    'xml': CmdOptionParser(
+        "xml",
+        [
+            Opts.fail_under,
+            Opts.ignore_errors,
+            Opts.include,
+            Opts.omit,
+            Opts.output_xml,
+            ] + GLOBAL_ARGS,
+        usage="[options] [modules]",
+        description="Generate an XML report of coverage results."
+    ),
+}
+
+
+OK, ERR, FAIL_UNDER = 0, 1, 2
+
+
+class CoverageScript(object):
+    """The command-line interface to coverage.py."""
+
+    def __init__(self, _covpkg=None, _run_python_file=None,
+                 _run_python_module=None, _help_fn=None, _path_exists=None):
+        # _covpkg is for dependency injection, so we can test this code.
+        if _covpkg:
+            self.covpkg = _covpkg
+        else:
+            import coverage
+            self.covpkg = coverage
+
+        # For dependency injection:
+        self.run_python_file = _run_python_file or run_python_file
+        self.run_python_module = _run_python_module or run_python_module
+        self.help_fn = _help_fn or self.help
+        self.path_exists = _path_exists or os.path.exists
+        self.global_option = False
+
+        self.coverage = None
+
+        self.program_name = os.path.basename(sys.argv[0])
+        if env.WINDOWS:
+            # entry_points={'console_scripts':...} on Windows makes files
+            # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
+            # invoke coverage-script.py, coverage3-script.py, and
+            # coverage-3.5-script.py.  argv[0] is the .py file, but we want to
+            # get back to the original form.
+            auto_suffix = "-script.py"
+            if self.program_name.endswith(auto_suffix):
+                self.program_name = self.program_name[:-len(auto_suffix)]
+
+    def command_line(self, argv):
+        """The bulk of the command line interface to coverage.py.
+
+        `argv` is the argument list to process.
+
+        Returns 0 if all is well, 1 if something went wrong.
+
+        """
+        # Collect the command-line options.
+        if not argv:
+            self.help_fn(topic='minimum_help')
+            return OK
+
+        # The command syntax we parse depends on the first argument.  Global
+        # switch syntax always starts with an option.
+        self.global_option = argv[0].startswith('-')
+        if self.global_option:
+            parser = GlobalOptionParser()
+        else:
+            parser = CMDS.get(argv[0])
+            if not parser:
+                self.help_fn("Unknown command: '%s'" % argv[0])
+                return ERR
+            argv = argv[1:]
+
+        parser.help_fn = self.help_fn
+        ok, options, args = parser.parse_args_ok(argv)
+        if not ok:
+            return ERR
+
+        # Handle help and version.
+        if self.do_help(options, args, parser):
+            return OK
+
+        # Check for conflicts and problems in the options.
+        if not self.args_ok(options, args):
+            return ERR
+
+        # We need to be able to import from the current directory, because
+        # plugins may try to, for example, to read Django settings.
+        sys.path[0] = ''
+
+        # Listify the list options.
+        source = unshell_list(options.source)
+        omit = unshell_list(options.omit)
+        include = unshell_list(options.include)
+        debug = unshell_list(options.debug)
+
+        # Do something.
+        self.coverage = self.covpkg.coverage(
+            data_suffix=options.parallel_mode,
+            cover_pylib=options.pylib,
+            timid=options.timid,
+            branch=options.branch,
+            config_file=options.rcfile,
+            source=source,
+            omit=omit,
+            include=include,
+            debug=debug,
+            concurrency=options.concurrency,
+            )
+
+        if options.action == "debug":
+            return self.do_debug(args)
+
+        elif options.action == "erase":
+            self.coverage.erase()
+            return OK
+
+        elif options.action == "run":
+            return self.do_run(options, args)
+
+        elif options.action == "combine":
+            self.coverage.load()
+            data_dirs = args or None
+            self.coverage.combine(data_dirs)
+            self.coverage.save()
+            return OK
+
+        # Remaining actions are reporting, with some common options.
+        report_args = dict(
+            morfs=unglob_args(args),
+            ignore_errors=options.ignore_errors,
+            omit=omit,
+            include=include,
+            )
+
+        self.coverage.load()
+
+        total = None
+        if options.action == "report":
+            total = self.coverage.report(
+                show_missing=options.show_missing,
+                skip_covered=options.skip_covered, **report_args)
+        elif options.action == "annotate":
+            self.coverage.annotate(
+                directory=options.directory, **report_args)
+        elif options.action == "html":
+            total = self.coverage.html_report(
+                directory=options.directory, title=options.title,
+                **report_args)
+        elif options.action == "xml":
+            outfile = options.outfile
+            total = self.coverage.xml_report(outfile=outfile, **report_args)
+
+        if total is not None:
+            # Apply the command line fail-under options, and then use the config
+            # value, so we can get fail_under from the config file.
+            if options.fail_under is not None:
+                self.coverage.set_option("report:fail_under", options.fail_under)
+
+            if self.coverage.get_option("report:fail_under"):
+
+                # Total needs to be rounded, but be careful of 0 and 100.
+                if 0 < total < 1:
+                    total = 1
+                elif 99 < total < 100:
+                    total = 99
+                else:
+                    total = round(total)
+
+                if total >= self.coverage.get_option("report:fail_under"):
+                    return OK
+                else:
+                    return FAIL_UNDER
+
+        return OK
+
+    def help(self, error=None, topic=None, parser=None):
+        """Display an error message, or the named topic."""
+        assert error or topic or parser
+        if error:
+            print(error)
+            print("Use '%s help' for help." % (self.program_name,))
+        elif parser:
+            print(parser.format_help().strip())
+        else:
+            help_params = dict(self.covpkg.__dict__)
+            help_params['program_name'] = self.program_name
+            help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
+            if help_msg:
+                print(help_msg % help_params)
+            else:
+                print("Don't know topic %r" % topic)
+
+    def do_help(self, options, args, parser):
+        """Deal with help requests.
+
+        Return True if it handled the request, False if not.
+
+        """
+        # Handle help.
+        if options.help:
+            if self.global_option:
+                self.help_fn(topic='help')
+            else:
+                self.help_fn(parser=parser)
+            return True
+
+        if options.action == "help":
+            if args:
+                for a in args:
+                    parser = CMDS.get(a)
+                    if parser:
+                        self.help_fn(parser=parser)
+                    else:
+                        self.help_fn(topic=a)
+            else:
+                self.help_fn(topic='help')
+            return True
+
+        # Handle version.
+        if options.version:
+            self.help_fn(topic='version')
+            return True
+
+        return False
+
+    def args_ok(self, options, args):
+        """Check for conflicts and problems in the options.
+
+        Returns True if everything is OK, or False if not.
+
+        """
+        if options.action == "run" and not args:
+            self.help_fn("Nothing to do.")
+            return False
+
+        return True
+
+    def do_run(self, options, args):
+        """Implementation of 'coverage run'."""
+
+        if options.append and self.coverage.get_option("run:parallel"):
+            self.help_fn("Can't append to data files in parallel mode.")
+            return ERR
+
+        if not self.coverage.get_option("run:parallel"):
+            if not options.append:
+                self.coverage.erase()
+
+        # Run the script.
+        self.coverage.start()
+        code_ran = True
+        try:
+            if options.module:
+                self.run_python_module(args[0], args)
+            else:
+                filename = args[0]
+                self.run_python_file(filename, args)
+        except NoSource:
+            code_ran = False
+            raise
+        finally:
+            self.coverage.stop()
+            if code_ran:
+                if options.append:
+                    data_file = self.coverage.get_option("run:data_file")
+                    if self.path_exists(data_file):
+                        self.coverage.combine(data_paths=[data_file])
+                self.coverage.save()
+
+        return OK
+
+    def do_debug(self, args):
+        """Implementation of 'coverage debug'."""
+
+        if not args:
+            self.help_fn("What information would you like: data, sys?")
+            return ERR
+
+        for info in args:
+            if info == 'sys':
+                sys_info = self.coverage.sys_info()
+                print(info_header("sys"))
+                for line in info_formatter(sys_info):
+                    print(" %s" % line)
+            elif info == 'data':
+                self.coverage.load()
+                data = self.coverage.data
+                print(info_header("data"))
+                print("path: %s" % self.coverage.data_files.filename)
+                if data:
+                    print("has_arcs: %r" % data.has_arcs())
+                    summary = data.line_counts(fullpath=True)
+                    filenames = sorted(summary.keys())
+                    print("\n%d files:" % len(filenames))
+                    for f in filenames:
+                        line = "%s: %d lines" % (f, summary[f])
+                        plugin = data.file_tracer(f)
+                        if plugin:
+                            line += " [%s]" % plugin
+                        print(line)
+                else:
+                    print("No data collected")
+            else:
+                self.help_fn("Don't know what you mean by %r" % info)
+                return ERR
+
+        return OK
+
+
+def unshell_list(s):
+    """Turn a command-line argument into a list."""
+    if not s:
+        return None
+    if env.WINDOWS:
+        # When running coverage.py as coverage.exe, some of the behavior
+        # of the shell is emulated: wildcards are expanded into a list of
+        # file names.  So you have to single-quote patterns on the command
+        # line, but (not) helpfully, the single quotes are included in the
+        # argument, so we have to strip them off here.
+        s = s.strip("'")
+    return s.split(',')
+
+
+def unglob_args(args):
+    """Interpret shell wildcards for platforms that need it."""
+    if env.WINDOWS:
+        globbed = []
+        for arg in args:
+            if '?' in arg or '*' in arg:
+                globbed.extend(glob.glob(arg))
+            else:
+                globbed.append(arg)
+        args = globbed
+    return args
+
+
+HELP_TOPICS = {
+    'help': """\
+    Coverage.py, version %(__version__)s
+    Measure, collect, and report on code coverage in Python programs.
+
+    usage: %(program_name)s <command> [options] [args]
+
+    Commands:
+        annotate    Annotate source files with execution information.
+        combine     Combine a number of data files.
+        erase       Erase previously collected coverage data.
+        help        Get help on using coverage.py.
+        html        Create an HTML report.
+        report      Report coverage stats on modules.
+        run         Run a Python program and measure code execution.
+        xml         Create an XML report of coverage results.
+
+    Use "%(program_name)s help <command>" for detailed help on any command.
+    For full documentation, see %(__url__)s
+    """,
+
+    'minimum_help': """\
+    Code coverage for Python.  Use '%(program_name)s help' for help.
+    """,
+
+    'version': """\
+    Coverage.py, version %(__version__)s.
+    Documentation at %(__url__)s
+    """,
+}
+
+
+def main(argv=None):
+    """The main entry point to coverage.py.
+
+    This is installed as the script entry point.
+
+    """
+    if argv is None:
+        argv = sys.argv[1:]
+    try:
+        status = CoverageScript().command_line(argv)
+    except ExceptionDuringRun as err:
+        # An exception was caught while running the product code.  The
+        # sys.exc_info() return tuple is packed into an ExceptionDuringRun
+        # exception.
+        traceback.print_exception(*err.args)
+        status = ERR
+    except CoverageException as err:
+        # A controlled error inside coverage.py: print the message to the user.
+        print(err)
+        status = ERR
+    except SystemExit as err:
+        # The user called `sys.exit()`.  Exit with their argument, if any.
+        if err.args:
+            status = err.args[0]
+        else:
+            status = None
+    return status
diff --git a/catapult/third_party/coverage/coverage/collector.py b/catapult/third_party/coverage/coverage/collector.py
new file mode 100644
index 0000000..0a43d87
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/collector.py
@@ -0,0 +1,326 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Raw data collector for coverage.py."""
+
+import os
+import sys
+
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import abs_file
+from coverage.misc import CoverageException, isolate_module
+from coverage.pytracer import PyTracer
+
+os = isolate_module(os)
+
+
+try:
+    # Use the C extension code when we can, for speed.
+    from coverage.tracer import CTracer, CFileDisposition   # pylint: disable=no-name-in-module
+except ImportError:
+    # Couldn't import the C extension, maybe it isn't built.
+    if os.getenv('COVERAGE_TEST_TRACER') == 'c':
+        # During testing, we use the COVERAGE_TEST_TRACER environment variable
+        # to indicate that we've fiddled with the environment to test this
+        # fallback code.  If we thought we had a C tracer, but couldn't import
+        # it, then exit quickly and clearly instead of dribbling confusing
+        # errors. I'm using sys.exit here instead of an exception because an
+        # exception here causes all sorts of other noise in unittest.
+        sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
+        sys.exit(1)
+    CTracer = None
+
+
+class FileDisposition(object):
+    """A simple value type for recording what to do with a file."""
+    pass
+
+
+class Collector(object):
+    """Collects trace data.
+
+    Creates a Tracer object for each thread, since they track stack
+    information.  Each Tracer points to the same shared data, contributing
+    traced data points.
+
+    When the Collector is started, it creates a Tracer for the current thread,
+    and installs a function to create Tracers for each new thread started.
+    When the Collector is stopped, all active Tracers are stopped.
+
+    Threads started while the Collector is stopped will never have Tracers
+    associated with them.
+
+    """
+
+    # The stack of active Collectors.  Collectors are added here when started,
+    # and popped when stopped.  Collectors on the stack are paused when not
+    # the top, and resumed when they become the top again.
+    _collectors = []
+
+    def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
+        """Create a collector.
+
+        `should_trace` is a function, taking a file name, and returning a
+        `coverage.FileDisposition object`.
+
+        `check_include` is a function taking a file name and a frame. It returns
+        a boolean: True if the file should be traced, False if not.
+
+        If `timid` is true, then a slower simpler trace function will be
+        used.  This is important for some environments where manipulation of
+        tracing functions make the faster more sophisticated trace function not
+        operate properly.
+
+        If `branch` is true, then branches will be measured.  This involves
+        collecting data on which statements followed each other (arcs).  Use
+        `get_arc_data` to get the arc data.
+
+        `warn` is a warning function, taking a single string message argument,
+        to be used if a warning needs to be issued.
+
+        `concurrency` is a string indicating the concurrency library in use.
+        Valid values are "greenlet", "eventlet", "gevent", or "thread" (the
+        default).
+
+        """
+        self.should_trace = should_trace
+        self.check_include = check_include
+        self.warn = warn
+        self.branch = branch
+        self.threading = None
+        self.concurrency = concurrency
+
+        self.concur_id_func = None
+
+        try:
+            if concurrency == "greenlet":
+                import greenlet
+                self.concur_id_func = greenlet.getcurrent
+            elif concurrency == "eventlet":
+                import eventlet.greenthread     # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = eventlet.greenthread.getcurrent
+            elif concurrency == "gevent":
+                import gevent                   # pylint: disable=import-error,useless-suppression
+                self.concur_id_func = gevent.getcurrent
+            elif concurrency == "thread" or not concurrency:
+                # It's important to import threading only if we need it.  If
+                # it's imported early, and the program being measured uses
+                # gevent, then gevent's monkey-patching won't work properly.
+                import threading
+                self.threading = threading
+            else:
+                raise CoverageException("Don't understand concurrency=%s" % concurrency)
+        except ImportError:
+            raise CoverageException(
+                "Couldn't trace with concurrency=%s, the module isn't installed." % concurrency
+            )
+
+        self.reset()
+
+        if timid:
+            # Being timid: use the simple Python trace function.
+            self._trace_class = PyTracer
+        else:
+            # Being fast: use the C Tracer if it is available, else the Python
+            # trace function.
+            self._trace_class = CTracer or PyTracer
+
+        if self._trace_class is CTracer:
+            self.file_disposition_class = CFileDisposition
+            self.supports_plugins = True
+        else:
+            self.file_disposition_class = FileDisposition
+            self.supports_plugins = False
+
+    def __repr__(self):
+        return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
+
+    def tracer_name(self):
+        """Return the class name of the tracer we're using."""
+        return self._trace_class.__name__
+
+    def reset(self):
+        """Clear collected data, and prepare to collect more."""
+        # A dictionary mapping file names to dicts with line number keys (if not
+        # branch coverage), or mapping file names to dicts with line number
+        # pairs as keys (if branch coverage).
+        self.data = {}
+
+        # A dictionary mapping file names to file tracer plugin names that will
+        # handle them.
+        self.file_tracers = {}
+
+        # The .should_trace_cache attribute is a cache from file names to
+        # coverage.FileDisposition objects, or None.  When a file is first
+        # considered for tracing, a FileDisposition is obtained from
+        # Coverage.should_trace.  Its .trace attribute indicates whether the
+        # file should be traced or not.  If it should be, a plugin with dynamic
+        # file names can decide not to trace it based on the dynamic file name
+        # being excluded by the inclusion rules, in which case the
+        # FileDisposition will be replaced by None in the cache.
+        if env.PYPY:
+            import __pypy__                     # pylint: disable=import-error
+            # Alex Gaynor said:
+            # should_trace_cache is a strictly growing key: once a key is in
+            # it, it never changes.  Further, the keys used to access it are
+            # generally constant, given sufficient context. That is to say, at
+            # any given point _trace() is called, pypy is able to know the key.
+            # This is because the key is determined by the physical source code
+            # line, and that's invariant with the call site.
+            #
+            # This property of a dict with immutable keys, combined with
+            # call-site-constant keys is a match for PyPy's module dict,
+            # which is optimized for such workloads.
+            #
+            # This gives a 20% benefit on the workload described at
+            # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
+            self.should_trace_cache = __pypy__.newdict("module")
+        else:
+            self.should_trace_cache = {}
+
+        # Our active Tracers.
+        self.tracers = []
+
+    def _start_tracer(self):
+        """Start a new Tracer object, and store it in self.tracers."""
+        tracer = self._trace_class()
+        tracer.data = self.data
+        tracer.trace_arcs = self.branch
+        tracer.should_trace = self.should_trace
+        tracer.should_trace_cache = self.should_trace_cache
+        tracer.warn = self.warn
+
+        if hasattr(tracer, 'concur_id_func'):
+            tracer.concur_id_func = self.concur_id_func
+        elif self.concur_id_func:
+            raise CoverageException(
+                "Can't support concurrency=%s with %s, only threads are supported" % (
+                    self.concurrency, self.tracer_name(),
+                )
+            )
+
+        if hasattr(tracer, 'file_tracers'):
+            tracer.file_tracers = self.file_tracers
+        if hasattr(tracer, 'threading'):
+            tracer.threading = self.threading
+        if hasattr(tracer, 'check_include'):
+            tracer.check_include = self.check_include
+
+        fn = tracer.start()
+        self.tracers.append(tracer)
+
+        return fn
+
+    # The trace function has to be set individually on each thread before
+    # execution begins.  Ironically, the only support the threading module has
+    # for running code before the thread main is the tracing function.  So we
+    # install this as a trace function, and the first time it's called, it does
+    # the real trace installation.
+
+    def _installation_trace(self, frame, event, arg):
+        """Called on new threads, installs the real tracer."""
+        # Remove ourselves as the trace function.
+        sys.settrace(None)
+        # Install the real tracer.
+        fn = self._start_tracer()
+        # Invoke the real trace function with the current event, to be sure
+        # not to lose an event.
+        if fn:
+            fn = fn(frame, event, arg)
+        # Return the new trace function to continue tracing in this scope.
+        return fn
+
+    def start(self):
+        """Start collecting trace information."""
+        if self._collectors:
+            self._collectors[-1].pause()
+
+        # Check to see whether we had a fullcoverage tracer installed. If so,
+        # get the stack frames it stashed away for us.
+        traces0 = []
+        fn0 = sys.gettrace()
+        if fn0:
+            tracer0 = getattr(fn0, '__self__', None)
+            if tracer0:
+                traces0 = getattr(tracer0, 'traces', [])
+
+        try:
+            # Install the tracer on this thread.
+            fn = self._start_tracer()
+        except:
+            if self._collectors:
+                self._collectors[-1].resume()
+            raise
+
+        # If _start_tracer succeeded, then we add ourselves to the global
+        # stack of collectors.
+        self._collectors.append(self)
+
+        # Replay all the events from fullcoverage into the new trace function.
+        for args in traces0:
+            (frame, event, arg), lineno = args
+            try:
+                fn(frame, event, arg, lineno=lineno)
+            except TypeError:
+                raise Exception("fullcoverage must be run with the C trace function.")
+
+        # Install our installation tracer in threading, to jump start other
+        # threads.
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
+
+    def stop(self):
+        """Stop collecting trace information."""
+        assert self._collectors
+        assert self._collectors[-1] is self, (
+            "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
+        )
+
+        self.pause()
+        self.tracers = []
+
+        # Remove this Collector from the stack, and resume the one underneath
+        # (if any).
+        self._collectors.pop()
+        if self._collectors:
+            self._collectors[-1].resume()
+
+    def pause(self):
+        """Pause tracing, but be prepared to `resume`."""
+        for tracer in self.tracers:
+            tracer.stop()
+            stats = tracer.get_stats()
+            if stats:
+                print("\nCoverage.py tracer stats:")
+                for k in sorted(stats.keys()):
+                    print("%16s: %s" % (k, stats[k]))
+        if self.threading:
+            self.threading.settrace(None)
+
+    def resume(self):
+        """Resume tracing after a `pause`."""
+        for tracer in self.tracers:
+            tracer.start()
+        if self.threading:
+            self.threading.settrace(self._installation_trace)
+        else:
+            self._start_tracer()
+
+    def save_data(self, covdata):
+        """Save the collected data to a `CoverageData`.
+
+        Also resets the collector.
+
+        """
+        def abs_file_dict(d):
+            """Return a dict like d, but with keys modified by `abs_file`."""
+            return dict((abs_file(k), v) for k, v in iitems(d))
+
+        if self.branch:
+            covdata.add_arcs(abs_file_dict(self.data))
+        else:
+            covdata.add_lines(abs_file_dict(self.data))
+        covdata.add_file_tracers(abs_file_dict(self.file_tracers))
+
+        self.reset()
diff --git a/catapult/third_party/coverage/coverage/config.py b/catapult/third_party/coverage/coverage/config.py
new file mode 100644
index 0000000..cd66697
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/config.py
@@ -0,0 +1,365 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Config file for coverage.py"""
+
+import collections
+import os
+import re
+import sys
+
+from coverage.backward import configparser, iitems, string_class
+from coverage.misc import CoverageException, isolate_module
+
+os = isolate_module(os)
+
+
+class HandyConfigParser(configparser.RawConfigParser):
+    """Our specialization of ConfigParser."""
+
+    def __init__(self, section_prefix):
+        configparser.RawConfigParser.__init__(self)
+        self.section_prefix = section_prefix
+
+    def read(self, filename):
+        """Read a file name as UTF-8 configuration data."""
+        kwargs = {}
+        if sys.version_info >= (3, 2):
+            kwargs['encoding'] = "utf-8"
+        return configparser.RawConfigParser.read(self, filename, **kwargs)
+
+    def has_option(self, section, option):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_option(self, section, option)
+
+    def has_section(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.has_section(self, section)
+
+    def options(self, section):
+        section = self.section_prefix + section
+        return configparser.RawConfigParser.options(self, section)
+
+    def get_section(self, section):
+        """Get the contents of a section, as a dictionary."""
+        d = {}
+        for opt in self.options(section):
+            d[opt] = self.get(section, opt)
+        return d
+
+    def get(self, section, *args, **kwargs):
+        """Get a value, replacing environment variables also.
+
+        The arguments are the same as `RawConfigParser.get`, but in the found
+        value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
+        environment variable ``WORD``.
+
+        Returns the finished value.
+
+        """
+        section = self.section_prefix + section
+        v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
+        def dollar_replace(m):
+            """Called for each $replacement."""
+            # Only one of the groups will have matched, just get its text.
+            word = next(w for w in m.groups() if w is not None)     # pragma: part covered
+            if word == "$":
+                return "$"
+            else:
+                return os.environ.get(word, '')
+
+        dollar_pattern = r"""(?x)   # Use extended regex syntax
+            \$(?:                   # A dollar sign, then
+            (?P<v1>\w+) |           #   a plain word,
+            {(?P<v2>\w+)} |         #   or a {-wrapped word,
+            (?P<char>[$])           #   or a dollar sign.
+            )
+            """
+        v = re.sub(dollar_pattern, dollar_replace, v)
+        return v
+
+    def getlist(self, section, option):
+        """Read a list of strings.
+
+        The value of `section` and `option` is treated as a comma- and newline-
+        separated list of strings.  Each value is stripped of whitespace.
+
+        Returns the list of strings.
+
+        """
+        value_list = self.get(section, option)
+        values = []
+        for value_line in value_list.split('\n'):
+            for value in value_line.split(','):
+                value = value.strip()
+                if value:
+                    values.append(value)
+        return values
+
+    def getregexlist(self, section, option):
+        """Read a list of full-line regexes.
+
+        The value of `section` and `option` is treated as a newline-separated
+        list of regexes.  Each value is stripped of whitespace.
+
+        Returns the list of strings.
+
+        """
+        line_list = self.get(section, option)
+        value_list = []
+        for value in line_list.splitlines():
+            value = value.strip()
+            try:
+                re.compile(value)
+            except re.error as e:
+                raise CoverageException(
+                    "Invalid [%s].%s value %r: %s" % (section, option, value, e)
+                )
+            if value:
+                value_list.append(value)
+        return value_list
+
+
+# The default line exclusion regexes.
+DEFAULT_EXCLUDE = [
+    r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
+]
+
+# The default partial branch regexes, to be modified by the user.
+DEFAULT_PARTIAL = [
+    r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
+]
+
+# The default partial branch regexes, based on Python semantics.
+# These are any Python branching constructs that can't actually execute all
+# their branches.
+DEFAULT_PARTIAL_ALWAYS = [
+    'while (True|1|False|0):',
+    'if (True|1|False|0):',
+]
+
+
+class CoverageConfig(object):
+    """Coverage.py configuration.
+
+    The attributes of this class are the various settings that control the
+    operation of coverage.py.
+
+    """
+    def __init__(self):
+        """Initialize the configuration attributes to their defaults."""
+        # Metadata about the config.
+        self.attempted_config_files = []
+        self.config_files = []
+
+        # Defaults for [run]
+        self.branch = False
+        self.concurrency = None
+        self.cover_pylib = False
+        self.data_file = ".coverage"
+        self.debug = []
+        self.note = None
+        self.parallel = False
+        self.plugins = []
+        self.source = None
+        self.timid = False
+
+        # Defaults for [report]
+        self.exclude_list = DEFAULT_EXCLUDE[:]
+        self.fail_under = 0
+        self.ignore_errors = False
+        self.include = None
+        self.omit = None
+        self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
+        self.partial_list = DEFAULT_PARTIAL[:]
+        self.precision = 0
+        self.show_missing = False
+        self.skip_covered = False
+
+        # Defaults for [html]
+        self.extra_css = None
+        self.html_dir = "htmlcov"
+        self.html_title = "Coverage report"
+
+        # Defaults for [xml]
+        self.xml_output = "coverage.xml"
+        self.xml_package_depth = 99
+
+        # Defaults for [paths]
+        self.paths = {}
+
+        # Options for plugins
+        self.plugin_options = {}
+
+    MUST_BE_LIST = ["omit", "include", "debug", "plugins"]
+
+    def from_args(self, **kwargs):
+        """Read config values from `kwargs`."""
+        for k, v in iitems(kwargs):
+            if v is not None:
+                if k in self.MUST_BE_LIST and isinstance(v, string_class):
+                    v = [v]
+                setattr(self, k, v)
+
+    def from_file(self, filename, section_prefix=""):
+        """Read configuration from a .rc file.
+
+        `filename` is a file name to read.
+
+        Returns True or False, whether the file could be read.
+
+        """
+        self.attempted_config_files.append(filename)
+
+        cp = HandyConfigParser(section_prefix)
+        try:
+            files_read = cp.read(filename)
+        except configparser.Error as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
+        if not files_read:
+            return False
+
+        self.config_files.extend(files_read)
+
+        try:
+            for option_spec in self.CONFIG_FILE_OPTIONS:
+                self._set_attr_from_config_option(cp, *option_spec)
+        except ValueError as err:
+            raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
+
+        # Check that there are no unrecognized options.
+        all_options = collections.defaultdict(set)
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            section, option = option_spec[1].split(":")
+            all_options[section].add(option)
+
+        for section, options in iitems(all_options):
+            if cp.has_section(section):
+                for unknown in set(cp.options(section)) - options:
+                    if section_prefix:
+                        section = section_prefix + section
+                    raise CoverageException(
+                        "Unrecognized option '[%s] %s=' in config file %s" % (
+                            section, unknown, filename
+                        )
+                    )
+
+        # [paths] is special
+        if cp.has_section('paths'):
+            for option in cp.options('paths'):
+                self.paths[option] = cp.getlist('paths', option)
+
+        # plugins can have options
+        for plugin in self.plugins:
+            if cp.has_section(plugin):
+                self.plugin_options[plugin] = cp.get_section(plugin)
+
+        return True
+
+    CONFIG_FILE_OPTIONS = [
+        # These are *args for _set_attr_from_config_option:
+        #   (attr, where, type_="")
+        #
+        #   attr is the attribute to set on the CoverageConfig object.
+        #   where is the section:name to read from the configuration file.
+        #   type_ is the optional type to apply, by using .getTYPE to read the
+        #       configuration value from the file.
+
+        # [run]
+        ('branch', 'run:branch', 'boolean'),
+        ('concurrency', 'run:concurrency'),
+        ('cover_pylib', 'run:cover_pylib', 'boolean'),
+        ('data_file', 'run:data_file'),
+        ('debug', 'run:debug', 'list'),
+        ('include', 'run:include', 'list'),
+        ('note', 'run:note'),
+        ('omit', 'run:omit', 'list'),
+        ('parallel', 'run:parallel', 'boolean'),
+        ('plugins', 'run:plugins', 'list'),
+        ('source', 'run:source', 'list'),
+        ('timid', 'run:timid', 'boolean'),
+
+        # [report]
+        ('exclude_list', 'report:exclude_lines', 'regexlist'),
+        ('fail_under', 'report:fail_under', 'int'),
+        ('ignore_errors', 'report:ignore_errors', 'boolean'),
+        ('include', 'report:include', 'list'),
+        ('omit', 'report:omit', 'list'),
+        ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
+        ('partial_list', 'report:partial_branches', 'regexlist'),
+        ('precision', 'report:precision', 'int'),
+        ('show_missing', 'report:show_missing', 'boolean'),
+        ('skip_covered', 'report:skip_covered', 'boolean'),
+
+        # [html]
+        ('extra_css', 'html:extra_css'),
+        ('html_dir', 'html:directory'),
+        ('html_title', 'html:title'),
+
+        # [xml]
+        ('xml_output', 'xml:output'),
+        ('xml_package_depth', 'xml:package_depth', 'int'),
+    ]
+
+    def _set_attr_from_config_option(self, cp, attr, where, type_=''):
+        """Set an attribute on self if it exists in the ConfigParser."""
+        section, option = where.split(":")
+        if cp.has_option(section, option):
+            method = getattr(cp, 'get' + type_)
+            setattr(self, attr, method(section, option))
+
+    def get_plugin_options(self, plugin):
+        """Get a dictionary of options for the plugin named `plugin`."""
+        return self.plugin_options.get(plugin, {})
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        `value` is the new value for the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                setattr(self, attr, value)
+                return
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            self.plugin_options.setdefault(plugin_name, {})[key] = value
+            return
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
+
+    def get_option(self, option_name):
+        """Get an option from the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        Returns the value of the option.
+
+        """
+
+        # Check all the hard-coded options.
+        for option_spec in self.CONFIG_FILE_OPTIONS:
+            attr, where = option_spec[:2]
+            if where == option_name:
+                return getattr(self, attr)
+
+        # See if it's a plugin option.
+        plugin_name, _, key = option_name.partition(":")
+        if key and plugin_name in self.plugins:
+            return self.plugin_options.get(plugin_name, {}).get(key)
+
+        # If we get here, we didn't find the option.
+        raise CoverageException("No such option: %r" % option_name)
diff --git a/catapult/third_party/coverage/coverage/control.py b/catapult/third_party/coverage/coverage/control.py
new file mode 100644
index 0000000..0a5ccae
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/control.py
@@ -0,0 +1,1194 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Core control stuff for coverage.py."""
+
+import atexit
+import inspect
+import os
+import platform
+import re
+import sys
+import traceback
+
+from coverage import env, files
+from coverage.annotate import AnnotateReporter
+from coverage.backward import string_class, iitems
+from coverage.collector import Collector
+from coverage.config import CoverageConfig
+from coverage.data import CoverageData, CoverageDataFiles
+from coverage.debug import DebugControl
+from coverage.files import TreeMatcher, FnmatchMatcher
+from coverage.files import PathAliases, find_python_files, prep_patterns
+from coverage.files import ModuleMatcher, abs_file
+from coverage.html import HtmlReporter
+from coverage.misc import CoverageException, bool_or_none, join_regex
+from coverage.misc import file_be_gone, isolate_module
+from coverage.monkey import patch_multiprocessing
+from coverage.plugin import FileReporter
+from coverage.plugin_support import Plugins
+from coverage.python import PythonFileReporter
+from coverage.results import Analysis, Numbers
+from coverage.summary import SummaryReporter
+from coverage.xmlreport import XmlReporter
+
+os = isolate_module(os)
+
+# Pypy has some unusual stuff in the "stdlib".  Consider those locations
+# when deciding where the stdlib is.
+try:
+    import _structseq
+except ImportError:
+    _structseq = None
+
+
+class Coverage(object):
+    """Programmatic access to coverage.py.
+
+    To use::
+
+        from coverage import Coverage
+
+        cov = Coverage()
+        cov.start()
+        #.. call your code ..
+        cov.stop()
+        cov.html_report(directory='covhtml')
+
+    """
+    def __init__(
+        self, data_file=None, data_suffix=None, cover_pylib=None,
+        auto_data=False, timid=None, branch=None, config_file=True,
+        source=None, omit=None, include=None, debug=None,
+        concurrency=None,
+    ):
+        """
+        `data_file` is the base name of the data file to use, defaulting to
+        ".coverage".  `data_suffix` is appended (with a dot) to `data_file` to
+        create the final file name.  If `data_suffix` is simply True, then a
+        suffix is created with the machine and process identity included.
+
+        `cover_pylib` is a boolean determining whether Python code installed
+        with the Python interpreter is measured.  This includes the Python
+        standard library and any packages installed with the interpreter.
+
+        If `auto_data` is true, then any existing data file will be read when
+        coverage measurement starts, and data will be saved automatically when
+        measurement stops.
+
+        If `timid` is true, then a slower and simpler trace function will be
+        used.  This is important for some environments where manipulation of
+        tracing functions breaks the faster trace function.
+
+        If `branch` is true, then branch coverage will be measured in addition
+        to the usual statement coverage.
+
+        `config_file` determines what configuration file to read:
+
+            * If it is ".coveragerc", it is interpreted as if it were True,
+              for backward compatibility.
+
+            * If it is a string, it is the name of the file to read.  If the
+              file can't be read, it is an error.
+
+            * If it is True, then a few standard files names are tried
+              (".coveragerc", "setup.cfg").  It is not an error for these files
+              to not be found.
+
+            * If it is False, then no configuration file is read.
+
+        `source` is a list of file paths or package names.  Only code located
+        in the trees indicated by the file paths or package names will be
+        measured.
+
+        `include` and `omit` are lists of file name patterns. Files that match
+        `include` will be measured, files that match `omit` will not.  Each
+        will also accept a single string argument.
+
+        `debug` is a list of strings indicating what debugging information is
+        desired.
+
+        `concurrency` is a string indicating the concurrency library being used
+        in the measured code.  Without this, coverage.py will get incorrect
+        results.  Valid strings are "greenlet", "eventlet", "gevent", or
+        "thread" (the default).
+
+        .. versionadded:: 4.0
+            The `concurrency` parameter.
+
+        """
+        # Build our configuration from a number of sources:
+        # 1: defaults:
+        self.config = CoverageConfig()
+
+        # 2: from the rcfile, .coveragerc or setup.cfg file:
+        if config_file:
+            did_read_rc = False
+            # Some API users were specifying ".coveragerc" to mean the same as
+            # True, so make it so.
+            if config_file == ".coveragerc":
+                config_file = True
+            specified_file = (config_file is not True)
+            if not specified_file:
+                config_file = ".coveragerc"
+
+            did_read_rc = self.config.from_file(config_file)
+
+            if not did_read_rc:
+                if specified_file:
+                    raise CoverageException(
+                        "Couldn't read '%s' as a config file" % config_file
+                        )
+                self.config.from_file("setup.cfg", section_prefix="coverage:")
+
+        # 3: from environment variables:
+        env_data_file = os.environ.get('COVERAGE_FILE')
+        if env_data_file:
+            self.config.data_file = env_data_file
+        debugs = os.environ.get('COVERAGE_DEBUG')
+        if debugs:
+            self.config.debug.extend(debugs.split(","))
+
+        # 4: from constructor arguments:
+        self.config.from_args(
+            data_file=data_file, cover_pylib=cover_pylib, timid=timid,
+            branch=branch, parallel=bool_or_none(data_suffix),
+            source=source, omit=omit, include=include, debug=debug,
+            concurrency=concurrency,
+            )
+
+        self._debug_file = None
+        self._auto_data = auto_data
+        self._data_suffix = data_suffix
+
+        # The matchers for _should_trace.
+        self.source_match = None
+        self.source_pkgs_match = None
+        self.pylib_match = self.cover_match = None
+        self.include_match = self.omit_match = None
+
+        # Is it ok for no data to be collected?
+        self._warn_no_data = True
+        self._warn_unimported_source = True
+
+        # A record of all the warnings that have been issued.
+        self._warnings = []
+
+        # Other instance attributes, set later.
+        self.omit = self.include = self.source = None
+        self.source_pkgs = None
+        self.data = self.data_files = self.collector = None
+        self.plugins = None
+        self.pylib_dirs = self.cover_dirs = None
+        self.data_suffix = self.run_suffix = None
+        self._exclude_re = None
+        self.debug = None
+
+        # State machine variables:
+        # Have we initialized everything?
+        self._inited = False
+        # Have we started collecting and not stopped it?
+        self._started = False
+        # Have we measured some data and not harvested it?
+        self._measured = False
+
+    def _init(self):
+        """Set all the initial state.
+
+        This is called by the public methods to initialize state. This lets us
+        construct a :class:`Coverage` object, then tweak its state before this
+        function is called.
+
+        """
+        if self._inited:
+            return
+
+        # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
+        # is an environment variable, the name of a file to append debug logs
+        # to.
+        if self._debug_file is None:
+            debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE")
+            if debug_file_name:
+                self._debug_file = open(debug_file_name, "a")
+            else:
+                self._debug_file = sys.stderr
+        self.debug = DebugControl(self.config.debug, self._debug_file)
+
+        # Load plugins
+        self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug)
+
+        # _exclude_re is a dict that maps exclusion list names to compiled
+        # regexes.
+        self._exclude_re = {}
+        self._exclude_regex_stale()
+
+        files.set_relative_directory()
+
+        # The source argument can be directories or package names.
+        self.source = []
+        self.source_pkgs = []
+        for src in self.config.source or []:
+            if os.path.exists(src):
+                self.source.append(files.canonical_filename(src))
+            else:
+                self.source_pkgs.append(src)
+
+        self.omit = prep_patterns(self.config.omit)
+        self.include = prep_patterns(self.config.include)
+
+        concurrency = self.config.concurrency
+        if concurrency == "multiprocessing":
+            patch_multiprocessing()
+            concurrency = None
+
+        self.collector = Collector(
+            should_trace=self._should_trace,
+            check_include=self._check_include_omit_etc,
+            timid=self.config.timid,
+            branch=self.config.branch,
+            warn=self._warn,
+            concurrency=concurrency,
+            )
+
+        # Early warning if we aren't going to be able to support plugins.
+        if self.plugins.file_tracers and not self.collector.supports_plugins:
+            self._warn(
+                "Plugin file tracers (%s) aren't supported with %s" % (
+                    ", ".join(
+                        plugin._coverage_plugin_name
+                            for plugin in self.plugins.file_tracers
+                        ),
+                    self.collector.tracer_name(),
+                    )
+                )
+            for plugin in self.plugins.file_tracers:
+                plugin._coverage_enabled = False
+
+        # Suffixes are a bit tricky.  We want to use the data suffix only when
+        # collecting data, not when combining data.  So we save it as
+        # `self.run_suffix` now, and promote it to `self.data_suffix` if we
+        # find that we are collecting data later.
+        if self._data_suffix or self.config.parallel:
+            if not isinstance(self._data_suffix, string_class):
+                # if data_suffix=True, use .machinename.pid.random
+                self._data_suffix = True
+        else:
+            self._data_suffix = None
+        self.data_suffix = None
+        self.run_suffix = self._data_suffix
+
+        # Create the data file.  We do this at construction time so that the
+        # data file will be written into the directory where the process
+        # started rather than wherever the process eventually chdir'd to.
+        self.data = CoverageData(debug=self.debug)
+        self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn)
+
+        # The directories for files considered "installed with the interpreter".
+        self.pylib_dirs = set()
+        if not self.config.cover_pylib:
+            # Look at where some standard modules are located. That's the
+            # indication for "installed with the interpreter". In some
+            # environments (virtualenv, for example), these modules may be
+            # spread across a few locations. Look at all the candidate modules
+            # we've imported, and take all the different ones.
+            for m in (atexit, inspect, os, platform, re, _structseq, traceback):
+                if m is not None and hasattr(m, "__file__"):
+                    self.pylib_dirs.add(self._canonical_dir(m))
+            if _structseq and not hasattr(_structseq, '__file__'):
+                # PyPy 2.4 has no __file__ in the builtin modules, but the code
+                # objects still have the file names.  So dig into one to find
+                # the path to exclude.
+                structseq_new = _structseq.structseq_new
+                try:
+                    structseq_file = structseq_new.func_code.co_filename
+                except AttributeError:
+                    structseq_file = structseq_new.__code__.co_filename
+                self.pylib_dirs.add(self._canonical_dir(structseq_file))
+
+        # To avoid tracing the coverage.py code itself, we skip anything
+        # located where we are.
+        self.cover_dirs = [self._canonical_dir(__file__)]
+        if env.TESTING:
+            # When testing, we use PyContracts, which should be considered
+            # part of coverage.py, and it uses six. Exclude those directories
+            # just as we exclude ourselves.
+            import contracts, six
+            for mod in [contracts, six]:
+                self.cover_dirs.append(self._canonical_dir(mod))
+
+        # Set the reporting precision.
+        Numbers.set_precision(self.config.precision)
+
+        atexit.register(self._atexit)
+
+        self._inited = True
+
+        # Create the matchers we need for _should_trace
+        if self.source or self.source_pkgs:
+            self.source_match = TreeMatcher(self.source)
+            self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
+        else:
+            if self.cover_dirs:
+                self.cover_match = TreeMatcher(self.cover_dirs)
+            if self.pylib_dirs:
+                self.pylib_match = TreeMatcher(self.pylib_dirs)
+        if self.include:
+            self.include_match = FnmatchMatcher(self.include)
+        if self.omit:
+            self.omit_match = FnmatchMatcher(self.omit)
+
+        # The user may want to debug things, show info if desired.
+        wrote_any = False
+        if self.debug.should('config'):
+            config_info = sorted(self.config.__dict__.items())
+            self.debug.write_formatted_info("config", config_info)
+            wrote_any = True
+
+        if self.debug.should('sys'):
+            self.debug.write_formatted_info("sys", self.sys_info())
+            for plugin in self.plugins:
+                header = "sys: " + plugin._coverage_plugin_name
+                info = plugin.sys_info()
+                self.debug.write_formatted_info(header, info)
+            wrote_any = True
+
+        if wrote_any:
+            self.debug.write_formatted_info("end", ())
+
+    def _canonical_dir(self, morf):
+        """Return the canonical directory of the module or file `morf`."""
+        morf_filename = PythonFileReporter(morf, self).filename
+        return os.path.split(morf_filename)[0]
+
+    def _source_for_file(self, filename):
+        """Return the source file for `filename`.
+
+        Given a file name being traced, return the best guess as to the source
+        file to attribute it to.
+
+        """
+        if filename.endswith(".py"):
+            # .py files are themselves source files.
+            return filename
+
+        elif filename.endswith((".pyc", ".pyo")):
+            # Bytecode files probably have source files near them.
+            py_filename = filename[:-1]
+            if os.path.exists(py_filename):
+                # Found a .py file, use that.
+                return py_filename
+            if env.WINDOWS:
+                # On Windows, it could be a .pyw file.
+                pyw_filename = py_filename + "w"
+                if os.path.exists(pyw_filename):
+                    return pyw_filename
+            # Didn't find source, but it's probably the .py file we want.
+            return py_filename
+
+        elif filename.endswith("$py.class"):
+            # Jython is easy to guess.
+            return filename[:-9] + ".py"
+
+        # No idea, just use the file name as-is.
+        return filename
+
+    def _name_for_module(self, module_globals, filename):
+        """Get the name of the module for a set of globals and file name.
+
+        For configurability's sake, we allow __main__ modules to be matched by
+        their importable name.
+
+        If loaded via runpy (aka -m), we can usually recover the "original"
+        full dotted module name, otherwise, we resort to interpreting the
+        file name to get the module's name.  In the case that the module name
+        can't be determined, None is returned.
+
+        """
+        dunder_name = module_globals.get('__name__', None)
+
+        if isinstance(dunder_name, str) and dunder_name != '__main__':
+            # This is the usual case: an imported module.
+            return dunder_name
+
+        loader = module_globals.get('__loader__', None)
+        for attrname in ('fullname', 'name'):   # attribute renamed in py3.2
+            if hasattr(loader, attrname):
+                fullname = getattr(loader, attrname)
+            else:
+                continue
+
+            if isinstance(fullname, str) and fullname != '__main__':
+                # Module loaded via: runpy -m
+                return fullname
+
+        # Script as first argument to Python command line.
+        inspectedname = inspect.getmodulename(filename)
+        if inspectedname is not None:
+            return inspectedname
+        else:
+            return dunder_name
+
+    def _should_trace_internal(self, filename, frame):
+        """Decide whether to trace execution in `filename`, with a reason.
+
+        This function is called from the trace function.  As each new file name
+        is encountered, this function determines whether it is traced or not.
+
+        Returns a FileDisposition object.
+
+        """
+        original_filename = filename
+        disp = _disposition_init(self.collector.file_disposition_class, filename)
+
+        def nope(disp, reason):
+            """Simple helper to make it easy to return NO."""
+            disp.trace = False
+            disp.reason = reason
+            return disp
+
+        # Compiled Python files have two file names: frame.f_code.co_filename is
+        # the file name at the time the .pyc was compiled.  The second name is
+        # __file__, which is where the .pyc was actually loaded from.  Since
+        # .pyc files can be moved after compilation (for example, by being
+        # installed), we look for __file__ in the frame and prefer it to the
+        # co_filename value.
+        dunder_file = frame.f_globals.get('__file__')
+        if dunder_file:
+            filename = self._source_for_file(dunder_file)
+            if original_filename and not original_filename.startswith('<'):
+                orig = os.path.basename(original_filename)
+                if orig != os.path.basename(filename):
+                    # Files shouldn't be renamed when moved. This happens when
+                    # exec'ing code.  If it seems like something is wrong with
+                    # the frame's file name, then just use the original.
+                    filename = original_filename
+
+        if not filename:
+            # Empty string is pretty useless.
+            return nope(disp, "empty string isn't a file name")
+
+        if filename.startswith('memory:'):
+            return nope(disp, "memory isn't traceable")
+
+        if filename.startswith('<'):
+            # Lots of non-file execution is represented with artificial
+            # file names like "<string>", "<doctest readme.txt[0]>", or
+            # "<exec_function>".  Don't ever trace these executions, since we
+            # can't do anything with the data later anyway.
+            return nope(disp, "not a real file name")
+
+        # pyexpat does a dumb thing, calling the trace function explicitly from
+        # C code with a C file name.
+        if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
+            return nope(disp, "pyexpat lies about itself")
+
+        # Jython reports the .class file to the tracer, use the source file.
+        if filename.endswith("$py.class"):
+            filename = filename[:-9] + ".py"
+
+        canonical = files.canonical_filename(filename)
+        disp.canonical_filename = canonical
+
+        # Try the plugins, see if they have an opinion about the file.
+        plugin = None
+        for plugin in self.plugins.file_tracers:
+            if not plugin._coverage_enabled:
+                continue
+
+            try:
+                file_tracer = plugin.file_tracer(canonical)
+                if file_tracer is not None:
+                    file_tracer._coverage_plugin = plugin
+                    disp.trace = True
+                    disp.file_tracer = file_tracer
+                    if file_tracer.has_dynamic_source_filename():
+                        disp.has_dynamic_filename = True
+                    else:
+                        disp.source_filename = files.canonical_filename(
+                            file_tracer.source_filename()
+                        )
+                    break
+            except Exception:
+                self._warn(
+                    "Disabling plugin %r due to an exception:" % (
+                        plugin._coverage_plugin_name
+                    )
+                )
+                traceback.print_exc()
+                plugin._coverage_enabled = False
+                continue
+        else:
+            # No plugin wanted it: it's Python.
+            disp.trace = True
+            disp.source_filename = canonical
+
+        if not disp.has_dynamic_filename:
+            if not disp.source_filename:
+                raise CoverageException(
+                    "Plugin %r didn't set source_filename for %r" %
+                    (plugin, disp.original_filename)
+                )
+            reason = self._check_include_omit_etc_internal(
+                disp.source_filename, frame,
+            )
+            if reason:
+                nope(disp, reason)
+
+        return disp
+
+    def _check_include_omit_etc_internal(self, filename, frame):
+        """Check a file name against the include, omit, etc, rules.
+
+        Returns a string or None.  String means, don't trace, and is the reason
+        why.  None means no reason found to not trace.
+
+        """
+        modulename = self._name_for_module(frame.f_globals, filename)
+
+        # If the user specified source or include, then that's authoritative
+        # about the outer bound of what to measure and we don't have to apply
+        # any canned exclusions. If they didn't, then we have to exclude the
+        # stdlib and coverage.py directories.
+        if self.source_match:
+            if self.source_pkgs_match.match(modulename):
+                if modulename in self.source_pkgs:
+                    self.source_pkgs.remove(modulename)
+                return None  # There's no reason to skip this file.
+
+            if not self.source_match.match(filename):
+                return "falls outside the --source trees"
+        elif self.include_match:
+            if not self.include_match.match(filename):
+                return "falls outside the --include trees"
+        else:
+            # If we aren't supposed to trace installed code, then check if this
+            # is near the Python standard library and skip it if so.
+            if self.pylib_match and self.pylib_match.match(filename):
+                return "is in the stdlib"
+
+            # We exclude the coverage.py code itself, since a little of it
+            # will be measured otherwise.
+            if self.cover_match and self.cover_match.match(filename):
+                return "is part of coverage.py"
+
+        # Check the file against the omit pattern.
+        if self.omit_match and self.omit_match.match(filename):
+            return "is inside an --omit pattern"
+
+        # No reason found to skip this file.
+        return None
+
+    def _should_trace(self, filename, frame):
+        """Decide whether to trace execution in `filename`.
+
+        Calls `_should_trace_internal`, and returns the FileDisposition.
+
+        """
+        disp = self._should_trace_internal(filename, frame)
+        if self.debug.should('trace'):
+            self.debug.write(_disposition_debug_msg(disp))
+        return disp
+
+    def _check_include_omit_etc(self, filename, frame):
+        """Check a file name against the include/omit/etc, rules, verbosely.
+
+        Returns a boolean: True if the file should be traced, False if not.
+
+        """
+        reason = self._check_include_omit_etc_internal(filename, frame)
+        if self.debug.should('trace'):
+            if not reason:
+                msg = "Including %r" % (filename,)
+            else:
+                msg = "Not including %r: %s" % (filename, reason)
+            self.debug.write(msg)
+
+        return not reason
+
+    def _warn(self, msg):
+        """Use `msg` as a warning."""
+        self._warnings.append(msg)
+        if self.debug.should('pid'):
+            msg = "[%d] %s" % (os.getpid(), msg)
+        sys.stderr.write("Coverage.py warning: %s\n" % msg)
+
+    def get_option(self, option_name):
+        """Get an option from the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with `"run:branch"`.
+
+        Returns the value of the option.
+
+        .. versionadded:: 4.0
+
+        """
+        return self.config.get_option(option_name)
+
+    def set_option(self, option_name, value):
+        """Set an option in the configuration.
+
+        `option_name` is a colon-separated string indicating the section and
+        option name.  For example, the ``branch`` option in the ``[run]``
+        section of the config file would be indicated with ``"run:branch"``.
+
+        `value` is the new value for the option.  This should be a Python
+        value where appropriate.  For example, use True for booleans, not the
+        string ``"True"``.
+
+        As an example, calling::
+
+            cov.set_option("run:branch", True)
+
+        has the same effect as this configuration file::
+
+            [run]
+            branch = True
+
+        .. versionadded:: 4.0
+
+        """
+        self.config.set_option(option_name, value)
+
+    def use_cache(self, usecache):
+        """Obsolete method."""
+        self._init()
+        if not usecache:
+            self._warn("use_cache(False) is no longer supported.")
+
+    def load(self):
+        """Load previously-collected coverage data from the data file."""
+        self._init()
+        self.collector.reset()
+        self.data_files.read(self.data)
+
+    def start(self):
+        """Start measuring code coverage.
+
+        Coverage measurement actually occurs in functions called after
+        :meth:`start` is invoked.  Statements in the same scope as
+        :meth:`start` won't be measured.
+
+        Once you invoke :meth:`start`, you must also call :meth:`stop`
+        eventually, or your process might not shut down cleanly.
+
+        """
+        self._init()
+        if self.run_suffix:
+            # Calling start() means we're running code, so use the run_suffix
+            # as the data_suffix when we eventually save the data.
+            self.data_suffix = self.run_suffix
+        if self._auto_data:
+            self.load()
+
+        self.collector.start()
+        self._started = True
+        self._measured = True
+
+    def stop(self):
+        """Stop measuring code coverage."""
+        if self._started:
+            self.collector.stop()
+        self._started = False
+
+    def _atexit(self):
+        """Clean up on process shutdown."""
+        if self._started:
+            self.stop()
+        if self._auto_data:
+            self.save()
+
+    def erase(self):
+        """Erase previously-collected coverage data.
+
+        This removes the in-memory data collected in this session as well as
+        discarding the data file.
+
+        """
+        self._init()
+        self.collector.reset()
+        self.data.erase()
+        self.data_files.erase(parallel=self.config.parallel)
+
+    def clear_exclude(self, which='exclude'):
+        """Clear the exclude list."""
+        self._init()
+        setattr(self.config, which + "_list", [])
+        self._exclude_regex_stale()
+
+    def exclude(self, regex, which='exclude'):
+        """Exclude source lines from execution consideration.
+
+        A number of lists of regular expressions are maintained.  Each list
+        selects lines that are treated differently during reporting.
+
+        `which` determines which list is modified.  The "exclude" list selects
+        lines that are not considered executable at all.  The "partial" list
+        indicates lines with branches that are not taken.
+
+        `regex` is a regular expression.  The regex is added to the specified
+        list.  If any of the regexes in the list is found in a line, the line
+        is marked for special treatment during reporting.
+
+        """
+        self._init()
+        excl_list = getattr(self.config, which + "_list")
+        excl_list.append(regex)
+        self._exclude_regex_stale()
+
+    def _exclude_regex_stale(self):
+        """Drop all the compiled exclusion regexes, a list was modified."""
+        self._exclude_re.clear()
+
+    def _exclude_regex(self, which):
+        """Return a compiled regex for the given exclusion list."""
+        if which not in self._exclude_re:
+            excl_list = getattr(self.config, which + "_list")
+            self._exclude_re[which] = join_regex(excl_list)
+        return self._exclude_re[which]
+
+    def get_exclude_list(self, which='exclude'):
+        """Return a list of excluded regex patterns.
+
+        `which` indicates which list is desired.  See :meth:`exclude` for the
+        lists that are available, and their meaning.
+
+        """
+        self._init()
+        return getattr(self.config, which + "_list")
+
+    def save(self):
+        """Save the collected coverage data to the data file."""
+        self._init()
+        self.get_data()
+        self.data_files.write(self.data, suffix=self.data_suffix)
+
+    def combine(self, data_paths=None):
+        """Combine together a number of similarly-named coverage data files.
+
+        All coverage data files whose name starts with `data_file` (from the
+        coverage() constructor) will be read, and combined together into the
+        current measurements.
+
+        `data_paths` is a list of files or directories from which data should
+        be combined. If no list is passed, then the data files from the
+        directory indicated by the current data file (probably the current
+        directory) will be combined.
+
+        .. versionadded:: 4.0
+            The `data_paths` parameter.
+
+        """
+        self._init()
+        self.get_data()
+
+        aliases = None
+        if self.config.paths:
+            aliases = PathAliases()
+            for paths in self.config.paths.values():
+                result = paths[0]
+                for pattern in paths[1:]:
+                    aliases.add(pattern, result)
+
+        self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths)
+
+    def get_data(self):
+        """Get the collected data and reset the collector.
+
+        Also warn about various problems collecting data.
+
+        Returns a :class:`coverage.CoverageData`, the collected coverage data.
+
+        .. versionadded:: 4.0
+
+        """
+        self._init()
+        if not self._measured:
+            return self.data
+
+        self.collector.save_data(self.data)
+
+        # If there are still entries in the source_pkgs list, then we never
+        # encountered those packages.
+        if self._warn_unimported_source:
+            for pkg in self.source_pkgs:
+                if pkg not in sys.modules:
+                    self._warn("Module %s was never imported." % pkg)
+                elif not (
+                    hasattr(sys.modules[pkg], '__file__') and
+                    os.path.exists(sys.modules[pkg].__file__)
+                ):
+                    self._warn("Module %s has no Python source." % pkg)
+                else:
+                    self._warn("Module %s was previously imported, but not measured." % pkg)
+
+        # Find out if we got any data.
+        if not self.data and self._warn_no_data:
+            self._warn("No data was collected.")
+
+        # Find files that were never executed at all.
+        for src in self.source:
+            for py_file in find_python_files(src):
+                py_file = files.canonical_filename(py_file)
+
+                if self.omit_match and self.omit_match.match(py_file):
+                    # Turns out this file was omitted, so don't pull it back
+                    # in as unexecuted.
+                    continue
+
+                self.data.touch_file(py_file)
+
+        if self.config.note:
+            self.data.add_run_info(note=self.config.note)
+
+        self._measured = False
+        return self.data
+
+    # Backward compatibility with version 1.
+    def analysis(self, morf):
+        """Like `analysis2` but doesn't return excluded line numbers."""
+        f, s, _, m, mf = self.analysis2(morf)
+        return f, s, m, mf
+
+    def analysis2(self, morf):
+        """Analyze a module.
+
+        `morf` is a module or a file name.  It will be analyzed to determine
+        its coverage statistics.  The return value is a 5-tuple:
+
+        * The file name for the module.
+        * A list of line numbers of executable statements.
+        * A list of line numbers of excluded statements.
+        * A list of line numbers of statements not run (missing from
+          execution).
+        * A readable formatted string of the missing line numbers.
+
+        The analysis uses the source file itself and the current measured
+        coverage data.
+
+        """
+        self._init()
+        analysis = self._analyze(morf)
+        return (
+            analysis.filename,
+            sorted(analysis.statements),
+            sorted(analysis.excluded),
+            sorted(analysis.missing),
+            analysis.missing_formatted(),
+            )
+
+    def _analyze(self, it):
+        """Analyze a single morf or code unit.
+
+        Returns an `Analysis` object.
+
+        """
+        self.get_data()
+        if not isinstance(it, FileReporter):
+            it = self._get_file_reporter(it)
+
+        return Analysis(self.data, it)
+
+    def _get_file_reporter(self, morf):
+        """Get a FileReporter for a module or file name."""
+        plugin = None
+        file_reporter = "python"
+
+        if isinstance(morf, string_class):
+            abs_morf = abs_file(morf)
+            plugin_name = self.data.file_tracer(abs_morf)
+            if plugin_name:
+                plugin = self.plugins.get(plugin_name)
+
+        if plugin:
+            file_reporter = plugin.file_reporter(abs_morf)
+            if file_reporter is None:
+                raise CoverageException(
+                    "Plugin %r did not provide a file reporter for %r." % (
+                        plugin._coverage_plugin_name, morf
+                    )
+                )
+
+        if file_reporter == "python":
+            file_reporter = PythonFileReporter(morf, self)
+
+        return file_reporter
+
+    def _get_file_reporters(self, morfs=None):
+        """Get a list of FileReporters for a list of modules or file names.
+
+        For each module or file name in `morfs`, find a FileReporter.  Return
+        the list of FileReporters.
+
+        If `morfs` is a single module or file name, this returns a list of one
+        FileReporter.  If `morfs` is empty or None, then the list of all files
+        measured is used to find the FileReporters.
+
+        """
+        if not morfs:
+            morfs = self.data.measured_files()
+
+        # Be sure we have a list.
+        if not isinstance(morfs, (list, tuple)):
+            morfs = [morfs]
+
+        file_reporters = []
+        for morf in morfs:
+            file_reporter = self._get_file_reporter(morf)
+            file_reporters.append(file_reporter)
+
+        return file_reporters
+
+    def report(
+        self, morfs=None, show_missing=True, ignore_errors=None,
+        file=None,                  # pylint: disable=redefined-builtin
+        omit=None, include=None, skip_covered=False,
+    ):
+        """Write a summary report to `file`.
+
+        Each module in `morfs` is listed, with counts of statements, executed
+        statements, missing statements, and a list of lines missed.
+
+        `include` is a list of file name patterns.  Files that match will be
+        included in the report. Files matching `omit` will not be included in
+        the report.
+
+        Returns a float, the total percentage covered.
+
+        """
+        self.get_data()
+        self.config.from_args(
+            ignore_errors=ignore_errors, omit=omit, include=include,
+            show_missing=show_missing, skip_covered=skip_covered,
+            )
+        reporter = SummaryReporter(self, self.config)
+        return reporter.report(morfs, outfile=file)
+
+    def annotate(
+        self, morfs=None, directory=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
+        """Annotate a list of modules.
+
+        Each module in `morfs` is annotated.  The source is written to a new
+        file, named with a ",cover" suffix, with each line prefixed with a
+        marker to indicate the coverage of the line.  Covered lines have ">",
+        excluded lines have "-", and missing lines have "!".
+
+        See :meth:`report` for other arguments.
+
+        """
+        self.get_data()
+        self.config.from_args(
+            ignore_errors=ignore_errors, omit=omit, include=include
+            )
+        reporter = AnnotateReporter(self, self.config)
+        reporter.report(morfs, directory=directory)
+
+    def html_report(self, morfs=None, directory=None, ignore_errors=None,
+                    omit=None, include=None, extra_css=None, title=None):
+        """Generate an HTML report.
+
+        The HTML is written to `directory`.  The file "index.html" is the
+        overview starting point, with links to more detailed pages for
+        individual modules.
+
+        `extra_css` is a path to a file of other CSS to apply on the page.
+        It will be copied into the HTML directory.
+
+        `title` is a text string (not HTML) to use as the title of the HTML
+        report.
+
+        See :meth:`report` for other arguments.
+
+        Returns a float, the total percentage covered.
+
+        """
+        self.get_data()
+        self.config.from_args(
+            ignore_errors=ignore_errors, omit=omit, include=include,
+            html_dir=directory, extra_css=extra_css, html_title=title,
+            )
+        reporter = HtmlReporter(self, self.config)
+        return reporter.report(morfs)
+
+    def xml_report(
+        self, morfs=None, outfile=None, ignore_errors=None,
+        omit=None, include=None,
+    ):
+        """Generate an XML report of coverage results.
+
+        The report is compatible with Cobertura reports.
+
+        Each module in `morfs` is included in the report.  `outfile` is the
+        path to write the file to, "-" will write to stdout.
+
+        See :meth:`report` for other arguments.
+
+        Returns a float, the total percentage covered.
+
+        """
+        self.get_data()
+        self.config.from_args(
+            ignore_errors=ignore_errors, omit=omit, include=include,
+            xml_output=outfile,
+            )
+        file_to_close = None
+        delete_file = False
+        if self.config.xml_output:
+            if self.config.xml_output == '-':
+                outfile = sys.stdout
+            else:
+                # Ensure that the output directory is created; done here
+                # because this report pre-opens the output file.
+                # HTMLReport does this using the Report plumbing because
+                # its task is more complex, being multiple files.
+                output_dir = os.path.dirname(self.config.xml_output)
+                if output_dir and not os.path.isdir(output_dir):
+                    os.makedirs(output_dir)
+                open_kwargs = {}
+                if env.PY3:
+                    open_kwargs['encoding'] = 'utf8'
+                outfile = open(self.config.xml_output, "w", **open_kwargs)
+                file_to_close = outfile
+        try:
+            reporter = XmlReporter(self, self.config)
+            return reporter.report(morfs, outfile=outfile)
+        except CoverageException:
+            delete_file = True
+            raise
+        finally:
+            if file_to_close:
+                file_to_close.close()
+                if delete_file:
+                    file_be_gone(self.config.xml_output)
+
+    def sys_info(self):
+        """Return a list of (key, value) pairs showing internal information."""
+
+        import coverage as covmod
+
+        self._init()
+
+        ft_plugins = []
+        for ft in self.plugins.file_tracers:
+            ft_name = ft._coverage_plugin_name
+            if not ft._coverage_enabled:
+                ft_name += " (disabled)"
+            ft_plugins.append(ft_name)
+
+        info = [
+            ('version', covmod.__version__),
+            ('coverage', covmod.__file__),
+            ('cover_dirs', self.cover_dirs),
+            ('pylib_dirs', self.pylib_dirs),
+            ('tracer', self.collector.tracer_name()),
+            ('plugins.file_tracers', ft_plugins),
+            ('config_files', self.config.attempted_config_files),
+            ('configs_read', self.config.config_files),
+            ('data_path', self.data_files.filename),
+            ('python', sys.version.replace('\n', '')),
+            ('platform', platform.platform()),
+            ('implementation', platform.python_implementation()),
+            ('executable', sys.executable),
+            ('cwd', os.getcwd()),
+            ('path', sys.path),
+            ('environment', sorted(
+                ("%s = %s" % (k, v))
+                for k, v in iitems(os.environ)
+                if k.startswith(("COV", "PY"))
+            )),
+            ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
+            ]
+
+        matcher_names = [
+            'source_match', 'source_pkgs_match',
+            'include_match', 'omit_match',
+            'cover_match', 'pylib_match',
+            ]
+
+        for matcher_name in matcher_names:
+            matcher = getattr(self, matcher_name)
+            if matcher:
+                matcher_info = matcher.info()
+            else:
+                matcher_info = '-none-'
+            info.append((matcher_name, matcher_info))
+
+        return info
+
+
+# FileDisposition "methods": FileDisposition is a pure value object, so it can
+# be implemented in either C or Python.  Acting on them is done with these
+# functions.
+
+def _disposition_init(cls, original_filename):
+    """Construct and initialize a new FileDisposition object."""
+    disp = cls()
+    disp.original_filename = original_filename
+    disp.canonical_filename = original_filename
+    disp.source_filename = None
+    disp.trace = False
+    disp.reason = ""
+    disp.file_tracer = None
+    disp.has_dynamic_filename = False
+    return disp
+
+
+def _disposition_debug_msg(disp):
+    """Make a nice debug message of what the FileDisposition is doing."""
+    if disp.trace:
+        msg = "Tracing %r" % (disp.original_filename,)
+        if disp.file_tracer:
+            msg += ": will be traced by %r" % disp.file_tracer
+    else:
+        msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
+    return msg
+
+
+def process_startup():
+    """Call this at Python start-up to perhaps measure coverage.
+
+    If the environment variable COVERAGE_PROCESS_START is defined, coverage
+    measurement is started.  The value of the variable is the config file
+    to use.
+
+    There are two ways to configure your Python installation to invoke this
+    function when Python starts:
+
+    #. Create or append to sitecustomize.py to add these lines::
+
+        import coverage
+        coverage.process_startup()
+
+    #. Create a .pth file in your Python installation containing::
+
+        import coverage; coverage.process_startup()
+
+    """
+    cps = os.environ.get("COVERAGE_PROCESS_START")
+    if not cps:
+        # No request for coverage, nothing to do.
+        return
+
+    # This function can be called more than once in a process. This happens
+    # because some virtualenv configurations make the same directory visible
+    # twice in sys.path.  This means that the .pth file will be found twice,
+    # and executed twice, executing this function twice.  We set a global
+    # flag (an attribute on this function) to indicate that coverage.py has
+    # already been started, so we can avoid doing it twice.
+    #
+    # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more
+    # details.
+
+    if hasattr(process_startup, "done"):
+        # We've annotated this function before, so we must have already
+        # started coverage.py in this process.  Nothing to do.
+        return
+
+    process_startup.done = True
+    cov = Coverage(config_file=cps, auto_data=True)
+    cov.start()
+    cov._warn_no_data = False
+    cov._warn_unimported_source = False
diff --git a/catapult/third_party/coverage/coverage/ctracer/datastack.c b/catapult/third_party/coverage/coverage/ctracer/datastack.c
new file mode 100644
index 0000000..5a384e6
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/datastack.c
@@ -0,0 +1,42 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#include "util.h"
+#include "datastack.h"
+
+#define STACK_DELTA    100
+
+int
+DataStack_init(Stats *pstats, DataStack *pdata_stack)
+{
+    pdata_stack->depth = -1;
+    pdata_stack->stack = NULL;
+    pdata_stack->alloc = 0;
+    return RET_OK;
+}
+
+void
+DataStack_dealloc(Stats *pstats, DataStack *pdata_stack)
+{
+    PyMem_Free(pdata_stack->stack);
+}
+
+int
+DataStack_grow(Stats *pstats, DataStack *pdata_stack)
+{
+    pdata_stack->depth++;
+    if (pdata_stack->depth >= pdata_stack->alloc) {
+        /* We've outgrown our data_stack array: make it bigger. */
+        int bigger = pdata_stack->alloc + STACK_DELTA;
+        DataStackEntry * bigger_data_stack = PyMem_Realloc(pdata_stack->stack, bigger * sizeof(DataStackEntry));
+        STATS( pstats->stack_reallocs++; )
+        if (bigger_data_stack == NULL) {
+            PyErr_NoMemory();
+            pdata_stack->depth--;
+            return RET_ERROR;
+        }
+        pdata_stack->stack = bigger_data_stack;
+        pdata_stack->alloc = bigger;
+    }
+    return RET_OK;
+}
diff --git a/catapult/third_party/coverage/coverage/ctracer/datastack.h b/catapult/third_party/coverage/coverage/ctracer/datastack.h
new file mode 100644
index 0000000..78f85f7
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/datastack.h
@@ -0,0 +1,45 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#ifndef _COVERAGE_DATASTACK_H
+#define _COVERAGE_DATASTACK_H
+
+#include "util.h"
+#include "stats.h"
+
+/* An entry on the data stack.  For each call frame, we need to record all
+ * the information needed for CTracer_handle_line to operate as quickly as
+ * possible.  All PyObject* here are borrowed references.
+ */
+typedef struct DataStackEntry {
+    /* The current file_data dictionary.  Borrowed, owned by self->data. */
+    PyObject * file_data;
+
+    /* The disposition object for this frame. If collector.py and control.py
+     * are working properly, this will be an instance of CFileDisposition.
+     */
+    PyObject * disposition;
+
+    /* The FileTracer handling this frame, or None if it's Python. */
+    PyObject * file_tracer;
+
+    /* The line number of the last line recorded, for tracing arcs.
+        -1 means there was no previous line, as when entering a code object.
+    */
+    int last_line;
+} DataStackEntry;
+
+/* A data stack is a dynamically allocated vector of DataStackEntry's. */
+typedef struct DataStack {
+    int depth;      /* The index of the last-used entry in stack. */
+    int alloc;      /* number of entries allocated at stack. */
+    /* The file data at each level, or NULL if not recording. */
+    DataStackEntry * stack;
+} DataStack;
+
+
+int DataStack_init(Stats * pstats, DataStack *pdata_stack);
+void DataStack_dealloc(Stats * pstats, DataStack *pdata_stack);
+int DataStack_grow(Stats * pstats, DataStack *pdata_stack);
+
+#endif /* _COVERAGE_DATASTACK_H */
diff --git a/catapult/third_party/coverage/coverage/ctracer/filedisp.c b/catapult/third_party/coverage/coverage/ctracer/filedisp.c
new file mode 100644
index 0000000..479a2c9
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/filedisp.c
@@ -0,0 +1,85 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#include "util.h"
+#include "filedisp.h"
+
+void
+CFileDisposition_dealloc(CFileDisposition *self)
+{
+    Py_XDECREF(self->original_filename);
+    Py_XDECREF(self->canonical_filename);
+    Py_XDECREF(self->source_filename);
+    Py_XDECREF(self->trace);
+    Py_XDECREF(self->reason);
+    Py_XDECREF(self->file_tracer);
+    Py_XDECREF(self->has_dynamic_filename);
+}
+
+static PyMemberDef
+CFileDisposition_members[] = {
+    { "original_filename",      T_OBJECT, offsetof(CFileDisposition, original_filename), 0,
+            PyDoc_STR("") },
+
+    { "canonical_filename",     T_OBJECT, offsetof(CFileDisposition, canonical_filename), 0,
+            PyDoc_STR("") },
+
+    { "source_filename",        T_OBJECT, offsetof(CFileDisposition, source_filename), 0,
+            PyDoc_STR("") },
+
+    { "trace",                  T_OBJECT, offsetof(CFileDisposition, trace), 0,
+            PyDoc_STR("") },
+
+    { "reason",                 T_OBJECT, offsetof(CFileDisposition, reason), 0,
+            PyDoc_STR("") },
+
+    { "file_tracer",            T_OBJECT, offsetof(CFileDisposition, file_tracer), 0,
+            PyDoc_STR("") },
+
+    { "has_dynamic_filename",   T_OBJECT, offsetof(CFileDisposition, has_dynamic_filename), 0,
+            PyDoc_STR("") },
+
+    { NULL }
+};
+
+PyTypeObject
+CFileDispositionType = {
+    MyType_HEAD_INIT
+    "coverage.CFileDispositionType",        /*tp_name*/
+    sizeof(CFileDisposition),  /*tp_basicsize*/
+    0,                         /*tp_itemsize*/
+    (destructor)CFileDisposition_dealloc, /*tp_dealloc*/
+    0,                         /*tp_print*/
+    0,                         /*tp_getattr*/
+    0,                         /*tp_setattr*/
+    0,                         /*tp_compare*/
+    0,                         /*tp_repr*/
+    0,                         /*tp_as_number*/
+    0,                         /*tp_as_sequence*/
+    0,                         /*tp_as_mapping*/
+    0,                         /*tp_hash */
+    0,                         /*tp_call*/
+    0,                         /*tp_str*/
+    0,                         /*tp_getattro*/
+    0,                         /*tp_setattro*/
+    0,                         /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+    "CFileDisposition objects", /* tp_doc */
+    0,                         /* tp_traverse */
+    0,                         /* tp_clear */
+    0,                         /* tp_richcompare */
+    0,                         /* tp_weaklistoffset */
+    0,                         /* tp_iter */
+    0,                         /* tp_iternext */
+    0,                         /* tp_methods */
+    CFileDisposition_members,  /* tp_members */
+    0,                         /* tp_getset */
+    0,                         /* tp_base */
+    0,                         /* tp_dict */
+    0,                         /* tp_descr_get */
+    0,                         /* tp_descr_set */
+    0,                         /* tp_dictoffset */
+    0,                         /* tp_init */
+    0,                         /* tp_alloc */
+    0,                         /* tp_new */
+};
diff --git a/catapult/third_party/coverage/coverage/ctracer/filedisp.h b/catapult/third_party/coverage/coverage/ctracer/filedisp.h
new file mode 100644
index 0000000..ada68ea
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/filedisp.h
@@ -0,0 +1,26 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#ifndef _COVERAGE_FILEDISP_H
+#define _COVERAGE_FILEDISP_H
+
+#include "util.h"
+#include "structmember.h"
+
+typedef struct CFileDisposition {
+    PyObject_HEAD
+
+    PyObject * original_filename;
+    PyObject * canonical_filename;
+    PyObject * source_filename;
+    PyObject * trace;
+    PyObject * reason;
+    PyObject * file_tracer;
+    PyObject * has_dynamic_filename;
+} CFileDisposition;
+
+void CFileDisposition_dealloc(CFileDisposition *self);
+
+extern PyTypeObject CFileDispositionType;
+
+#endif /* _COVERAGE_FILEDISP_H */
diff --git a/catapult/third_party/coverage/coverage/ctracer/module.c b/catapult/third_party/coverage/coverage/ctracer/module.c
new file mode 100644
index 0000000..7623185
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/module.c
@@ -0,0 +1,108 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#include "util.h"
+#include "tracer.h"
+#include "filedisp.h"
+
+/* Module definition */
+
+#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
+
+#if PY_MAJOR_VERSION >= 3
+
+static PyModuleDef
+moduledef = {
+    PyModuleDef_HEAD_INIT,
+    "coverage.tracer",
+    MODULE_DOC,
+    -1,
+    NULL,       /* methods */
+    NULL,
+    NULL,       /* traverse */
+    NULL,       /* clear */
+    NULL
+};
+
+
+PyObject *
+PyInit_tracer(void)
+{
+    PyObject * mod = PyModule_Create(&moduledef);
+    if (mod == NULL) {
+        return NULL;
+    }
+
+    if (CTracer_intern_strings() < 0) {
+        return NULL;
+    }
+
+    /* Initialize CTracer */
+    CTracerType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&CTracerType) < 0) {
+        Py_DECREF(mod);
+        return NULL;
+    }
+
+    Py_INCREF(&CTracerType);
+    if (PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType) < 0) {
+        Py_DECREF(mod);
+        Py_DECREF(&CTracerType);
+        return NULL;
+    }
+
+    /* Initialize CFileDisposition */
+    CFileDispositionType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&CFileDispositionType) < 0) {
+        Py_DECREF(mod);
+        Py_DECREF(&CTracerType);
+        return NULL;
+    }
+
+    Py_INCREF(&CFileDispositionType);
+    if (PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType) < 0) {
+        Py_DECREF(mod);
+        Py_DECREF(&CTracerType);
+        Py_DECREF(&CFileDispositionType);
+        return NULL;
+    }
+
+    return mod;
+}
+
+#else
+
+void
+inittracer(void)
+{
+    PyObject * mod;
+
+    mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
+    if (mod == NULL) {
+        return;
+    }
+
+    if (CTracer_intern_strings() < 0) {
+        return;
+    }
+
+    /* Initialize CTracer */
+    CTracerType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&CTracerType) < 0) {
+        return;
+    }
+
+    Py_INCREF(&CTracerType);
+    PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
+
+    /* Initialize CFileDisposition */
+    CFileDispositionType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&CFileDispositionType) < 0) {
+        return;
+    }
+
+    Py_INCREF(&CFileDispositionType);
+    PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType);
+}
+
+#endif /* Py3k */
diff --git a/catapult/third_party/coverage/coverage/ctracer/stats.h b/catapult/third_party/coverage/coverage/ctracer/stats.h
new file mode 100644
index 0000000..ceba79b
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/stats.h
@@ -0,0 +1,30 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#ifndef _COVERAGE_STATS_H
+#define _COVERAGE_STATS_H
+
+#include "util.h"
+
+#if COLLECT_STATS
+#define STATS(x)        x
+#else
+#define STATS(x)
+#endif
+
+typedef struct Stats {
+    unsigned int calls;     /* Need at least one member, but the rest only if needed. */
+#if COLLECT_STATS
+    unsigned int lines;
+    unsigned int returns;
+    unsigned int exceptions;
+    unsigned int others;
+    unsigned int new_files;
+    unsigned int missed_returns;
+    unsigned int stack_reallocs;
+    unsigned int errors;
+    unsigned int pycalls;
+#endif
+} Stats;
+
+#endif /* _COVERAGE_STATS_H */
diff --git a/catapult/third_party/coverage/coverage/ctracer/tracer.c b/catapult/third_party/coverage/coverage/ctracer/tracer.c
new file mode 100644
index 0000000..25036f9
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/tracer.c
@@ -0,0 +1,1073 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+/* C-based Tracer for coverage.py. */
+
+#include "util.h"
+#include "datastack.h"
+#include "filedisp.h"
+#include "tracer.h"
+
+/* Python C API helpers. */
+
+static int
+pyint_as_int(PyObject * pyint, int *pint)
+{
+    int the_int = MyInt_AsInt(pyint);
+    if (the_int == -1 && PyErr_Occurred()) {
+        return RET_ERROR;
+    }
+
+    *pint = the_int;
+    return RET_OK;
+}
+
+
+/* Interned strings to speed GetAttr etc. */
+
+static PyObject *str_trace;
+static PyObject *str_file_tracer;
+static PyObject *str__coverage_enabled;
+static PyObject *str__coverage_plugin;
+static PyObject *str__coverage_plugin_name;
+static PyObject *str_dynamic_source_filename;
+static PyObject *str_line_number_range;
+
+int
+CTracer_intern_strings(void)
+{
+    int ret = RET_ERROR;
+
+#define INTERN_STRING(v, s)                     \
+    v = MyText_InternFromString(s);             \
+    if (v == NULL) {                            \
+        goto error;                             \
+    }
+
+    INTERN_STRING(str_trace, "trace")
+    INTERN_STRING(str_file_tracer, "file_tracer")
+    INTERN_STRING(str__coverage_enabled, "_coverage_enabled")
+    INTERN_STRING(str__coverage_plugin, "_coverage_plugin")
+    INTERN_STRING(str__coverage_plugin_name, "_coverage_plugin_name")
+    INTERN_STRING(str_dynamic_source_filename, "dynamic_source_filename")
+    INTERN_STRING(str_line_number_range, "line_number_range")
+
+    ret = RET_OK;
+
+error:
+    return ret;
+}
+
+static void CTracer_disable_plugin(CTracer *self, PyObject * disposition);
+
+static int
+CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
+{
+    int ret = RET_ERROR;
+
+    if (DataStack_init(&self->stats, &self->data_stack) < 0) {
+        goto error;
+    }
+
+    self->pdata_stack = &self->data_stack;
+
+    self->cur_entry.last_line = -1;
+
+    ret = RET_OK;
+    goto ok;
+
+error:
+    STATS( self->stats.errors++; )
+
+ok:
+    return ret;
+}
+
+static void
+CTracer_dealloc(CTracer *self)
+{
+    int i;
+
+    if (self->started) {
+        PyEval_SetTrace(NULL, NULL);
+    }
+
+    Py_XDECREF(self->should_trace);
+    Py_XDECREF(self->check_include);
+    Py_XDECREF(self->warn);
+    Py_XDECREF(self->concur_id_func);
+    Py_XDECREF(self->data);
+    Py_XDECREF(self->file_tracers);
+    Py_XDECREF(self->should_trace_cache);
+
+    DataStack_dealloc(&self->stats, &self->data_stack);
+    if (self->data_stacks) {
+        for (i = 0; i < self->data_stacks_used; i++) {
+            DataStack_dealloc(&self->stats, self->data_stacks + i);
+        }
+        PyMem_Free(self->data_stacks);
+    }
+
+    Py_XDECREF(self->data_stack_index);
+
+    Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+#if TRACE_LOG
+static const char *
+indent(int n)
+{
+    static const char * spaces =
+        "                                                                    "
+        "                                                                    "
+        "                                                                    "
+        "                                                                    "
+        ;
+    return spaces + strlen(spaces) - n*2;
+}
+
+static int logging = 0;
+/* Set these constants to be a file substring and line number to start logging. */
+static const char * start_file = "tests/views";
+static int start_line = 27;
+
+static void
+showlog(int depth, int lineno, PyObject * filename, const char * msg)
+{
+    if (logging) {
+        printf("%s%3d ", indent(depth), depth);
+        if (lineno) {
+            printf("%4d", lineno);
+        }
+        else {
+            printf("    ");
+        }
+        if (filename) {
+            PyObject *ascii = MyText_AS_BYTES(filename);
+            printf(" %s", MyBytes_AS_STRING(ascii));
+            Py_DECREF(ascii);
+        }
+        if (msg) {
+            printf(" %s", msg);
+        }
+        printf("\n");
+    }
+}
+
+#define SHOWLOG(a,b,c,d)    showlog(a,b,c,d)
+#else
+#define SHOWLOG(a,b,c,d)
+#endif /* TRACE_LOG */
+
+#if WHAT_LOG
+static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
+#endif
+
+/* Record a pair of integers in self->cur_entry.file_data. */
+static int
+CTracer_record_pair(CTracer *self, int l1, int l2)
+{
+    int ret = RET_ERROR;
+
+    PyObject * t = NULL;
+
+    t = Py_BuildValue("(ii)", l1, l2);
+    if (t == NULL) {
+        goto error;
+    }
+
+    if (PyDict_SetItem(self->cur_entry.file_data, t, Py_None) < 0) {
+        goto error;
+    }
+
+    ret = RET_OK;
+
+error:
+    Py_XDECREF(t);
+
+    return ret;
+}
+
+/* Set self->pdata_stack to the proper data_stack to use. */
+static int
+CTracer_set_pdata_stack(CTracer *self)
+{
+    int ret = RET_ERROR;
+    PyObject * co_obj = NULL;
+    PyObject * stack_index = NULL;
+
+    if (self->concur_id_func != Py_None) {
+        int the_index = 0;
+
+        if (self->data_stack_index == NULL) {
+            PyObject * weakref = NULL;
+
+            weakref = PyImport_ImportModule("weakref");
+            if (weakref == NULL) {
+                goto error;
+            }
+            STATS( self->stats.pycalls++; )
+            self->data_stack_index = PyObject_CallMethod(weakref, "WeakKeyDictionary", NULL);
+            Py_XDECREF(weakref);
+
+            if (self->data_stack_index == NULL) {
+                goto error;
+            }
+        }
+
+        STATS( self->stats.pycalls++; )
+        co_obj = PyObject_CallObject(self->concur_id_func, NULL);
+        if (co_obj == NULL) {
+            goto error;
+        }
+        stack_index = PyObject_GetItem(self->data_stack_index, co_obj);
+        if (stack_index == NULL) {
+            /* PyObject_GetItem sets an exception if it didn't find the thing. */
+            PyErr_Clear();
+
+            /* A new concurrency object.  Make a new data stack. */
+            the_index = self->data_stacks_used;
+            stack_index = MyInt_FromInt(the_index);
+            if (stack_index == NULL) {
+                goto error;
+            }
+            if (PyObject_SetItem(self->data_stack_index, co_obj, stack_index) < 0) {
+                goto error;
+            }
+            self->data_stacks_used++;
+            if (self->data_stacks_used >= self->data_stacks_alloc) {
+                int bigger = self->data_stacks_alloc + 10;
+                DataStack * bigger_stacks = PyMem_Realloc(self->data_stacks, bigger * sizeof(DataStack));
+                if (bigger_stacks == NULL) {
+                    PyErr_NoMemory();
+                    goto error;
+                }
+                self->data_stacks = bigger_stacks;
+                self->data_stacks_alloc = bigger;
+            }
+            DataStack_init(&self->stats, &self->data_stacks[the_index]);
+        }
+        else {
+            if (pyint_as_int(stack_index, &the_index) < 0) {
+                goto error;
+            }
+        }
+
+        self->pdata_stack = &self->data_stacks[the_index];
+    }
+    else {
+        self->pdata_stack = &self->data_stack;
+    }
+
+    ret = RET_OK;
+
+error:
+
+    Py_XDECREF(co_obj);
+    Py_XDECREF(stack_index);
+
+    return ret;
+}
+
+/*
+ * Parts of the trace function.
+ */
+
+static int
+CTracer_check_missing_return(CTracer *self, PyFrameObject *frame)
+{
+    int ret = RET_ERROR;
+
+    if (self->last_exc_back) {
+        if (frame == self->last_exc_back) {
+            /* Looks like someone forgot to send a return event. We'll clear
+               the exception state and do the RETURN code here.  Notice that the
+               frame we have in hand here is not the correct frame for the RETURN,
+               that frame is gone.  Our handling for RETURN doesn't need the
+               actual frame, but we do log it, so that will look a little off if
+               you're looking at the detailed log.
+
+               If someday we need to examine the frame when doing RETURN, then
+               we'll need to keep more of the missed frame's state.
+            */
+            STATS( self->stats.missed_returns++; )
+            if (CTracer_set_pdata_stack(self) < 0) {
+                goto error;
+            }
+            if (self->pdata_stack->depth >= 0) {
+                if (self->tracing_arcs && self->cur_entry.file_data) {
+                    if (CTracer_record_pair(self, self->cur_entry.last_line, -self->last_exc_firstlineno) < 0) {
+                        goto error;
+                    }
+                }
+                SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
+                self->cur_entry = self->pdata_stack->stack[self->pdata_stack->depth];
+                self->pdata_stack->depth--;
+            }
+        }
+        self->last_exc_back = NULL;
+    }
+
+    ret = RET_OK;
+
+error:
+
+    return ret;
+}
+
+static int
+CTracer_handle_call(CTracer *self, PyFrameObject *frame)
+{
+    int ret = RET_ERROR;
+    int ret2;
+
+    /* Owned references that we clean up at the very end of the function. */
+    PyObject * disposition = NULL;
+    PyObject * plugin = NULL;
+    PyObject * plugin_name = NULL;
+    PyObject * next_tracename = NULL;
+
+    /* Borrowed references. */
+    PyObject * filename = NULL;
+    PyObject * disp_trace = NULL;
+    PyObject * tracename = NULL;
+    PyObject * file_tracer = NULL;
+    PyObject * has_dynamic_filename = NULL;
+
+    CFileDisposition * pdisp = NULL;
+
+
+    STATS( self->stats.calls++; )
+    /* Grow the stack. */
+    if (CTracer_set_pdata_stack(self) < 0) {
+        goto error;
+    }
+    if (DataStack_grow(&self->stats, self->pdata_stack) < 0) {
+        goto error;
+    }
+
+    /* Push the current state on the stack. */
+    self->pdata_stack->stack[self->pdata_stack->depth] = self->cur_entry;
+
+    /* Check if we should trace this line. */
+    filename = frame->f_code->co_filename;
+    disposition = PyDict_GetItem(self->should_trace_cache, filename);
+    if (disposition == NULL) {
+        if (PyErr_Occurred()) {
+            goto error;
+        }
+        STATS( self->stats.new_files++; )
+
+        /* We've never considered this file before. */
+        /* Ask should_trace about it. */
+        STATS( self->stats.pycalls++; )
+        disposition = PyObject_CallFunctionObjArgs(self->should_trace, filename, frame, NULL);
+        if (disposition == NULL) {
+            /* An error occurred inside should_trace. */
+            goto error;
+        }
+        if (PyDict_SetItem(self->should_trace_cache, filename, disposition) < 0) {
+            goto error;
+        }
+    }
+    else {
+        Py_INCREF(disposition);
+    }
+
+    if (disposition == Py_None) {
+        /* A later check_include returned false, so don't trace it. */
+        disp_trace = Py_False;
+    }
+    else {
+        /* The object we got is a CFileDisposition, use it efficiently. */
+        pdisp = (CFileDisposition *) disposition;
+        disp_trace = pdisp->trace;
+        if (disp_trace == NULL) {
+            goto error;
+        }
+    }
+
+    if (disp_trace == Py_True) {
+        /* If tracename is a string, then we're supposed to trace. */
+        tracename = pdisp->source_filename;
+        if (tracename == NULL) {
+            goto error;
+        }
+        file_tracer = pdisp->file_tracer;
+        if (file_tracer == NULL) {
+            goto error;
+        }
+        if (file_tracer != Py_None) {
+            plugin = PyObject_GetAttr(file_tracer, str__coverage_plugin);
+            if (plugin == NULL) {
+                goto error;
+            }
+            plugin_name = PyObject_GetAttr(plugin, str__coverage_plugin_name);
+            if (plugin_name == NULL) {
+                goto error;
+            }
+        }
+        has_dynamic_filename = pdisp->has_dynamic_filename;
+        if (has_dynamic_filename == NULL) {
+            goto error;
+        }
+        if (has_dynamic_filename == Py_True) {
+            STATS( self->stats.pycalls++; )
+            next_tracename = PyObject_CallMethodObjArgs(
+                file_tracer, str_dynamic_source_filename,
+                tracename, frame, NULL
+                );
+            if (next_tracename == NULL) {
+                /* An exception from the function. Alert the user with a
+                 * warning and a traceback.
+                 */
+                CTracer_disable_plugin(self, disposition);
+                /* Because we handled the error, goto ok. */
+                goto ok;
+            }
+            tracename = next_tracename;
+
+            if (tracename != Py_None) {
+                /* Check the dynamic source filename against the include rules. */
+                PyObject * included = NULL;
+                int should_include;
+                included = PyDict_GetItem(self->should_trace_cache, tracename);
+                if (included == NULL) {
+                    PyObject * should_include_bool;
+                    if (PyErr_Occurred()) {
+                        goto error;
+                    }
+                    STATS( self->stats.new_files++; )
+                    STATS( self->stats.pycalls++; )
+                    should_include_bool = PyObject_CallFunctionObjArgs(self->check_include, tracename, frame, NULL);
+                    if (should_include_bool == NULL) {
+                        goto error;
+                    }
+                    should_include = (should_include_bool == Py_True);
+                    Py_DECREF(should_include_bool);
+                    if (PyDict_SetItem(self->should_trace_cache, tracename, should_include ? disposition : Py_None) < 0) {
+                        goto error;
+                    }
+                }
+                else {
+                    should_include = (included != Py_None);
+                }
+                if (!should_include) {
+                    tracename = Py_None;
+                }
+            }
+        }
+    }
+    else {
+        tracename = Py_None;
+    }
+
+    if (tracename != Py_None) {
+        PyObject * file_data = PyDict_GetItem(self->data, tracename);
+
+        if (file_data == NULL) {
+            if (PyErr_Occurred()) {
+                goto error;
+            }
+            file_data = PyDict_New();
+            if (file_data == NULL) {
+                goto error;
+            }
+            ret2 = PyDict_SetItem(self->data, tracename, file_data);
+            Py_DECREF(file_data);
+            if (ret2 < 0) {
+                goto error;
+            }
+
+            /* If the disposition mentions a plugin, record that. */
+            if (file_tracer != Py_None) {
+                ret2 = PyDict_SetItem(self->file_tracers, tracename, plugin_name);
+                if (ret2 < 0) {
+                    goto error;
+                }
+            }
+        }
+
+        self->cur_entry.file_data = file_data;
+        self->cur_entry.file_tracer = file_tracer;
+
+        /* Make the frame right in case settrace(gettrace()) happens. */
+        Py_INCREF(self);
+        frame->f_trace = (PyObject*)self;
+        SHOWLOG(self->pdata_stack->depth, frame->f_lineno, filename, "traced");
+    }
+    else {
+        self->cur_entry.file_data = NULL;
+        self->cur_entry.file_tracer = Py_None;
+        SHOWLOG(self->pdata_stack->depth, frame->f_lineno, filename, "skipped");
+    }
+
+    self->cur_entry.disposition = disposition;
+
+    /* A call event is really a "start frame" event, and can happen for
+     * re-entering a generator also.  f_lasti is -1 for a true call, and a
+     * real byte offset for a generator re-entry.
+     */
+    self->cur_entry.last_line = (frame->f_lasti < 0) ? -1 : frame->f_lineno;
+
+ok:
+    ret = RET_OK;
+
+error:
+    Py_XDECREF(next_tracename);
+    Py_XDECREF(disposition);
+    Py_XDECREF(plugin);
+    Py_XDECREF(plugin_name);
+
+    return ret;
+}
+
+
+static void
+CTracer_disable_plugin(CTracer *self, PyObject * disposition)
+{
+    PyObject * file_tracer = NULL;
+    PyObject * plugin = NULL;
+    PyObject * plugin_name = NULL;
+    PyObject * msg = NULL;
+    PyObject * ignored = NULL;
+
+    PyErr_Print();
+
+    file_tracer = PyObject_GetAttr(disposition, str_file_tracer);
+    if (file_tracer == NULL) {
+        goto error;
+    }
+    if (file_tracer == Py_None) {
+        /* This shouldn't happen... */
+        goto ok;
+    }
+    plugin = PyObject_GetAttr(file_tracer, str__coverage_plugin);
+    if (plugin == NULL) {
+        goto error;
+    }
+    plugin_name = PyObject_GetAttr(plugin, str__coverage_plugin_name);
+    if (plugin_name == NULL) {
+        goto error;
+    }
+    msg = MyText_FromFormat(
+        "Disabling plugin '%s' due to previous exception",
+        MyText_AsString(plugin_name)
+        );
+    if (msg == NULL) {
+        goto error;
+    }
+    STATS( self->stats.pycalls++; )
+    ignored = PyObject_CallFunctionObjArgs(self->warn, msg, NULL);
+    if (ignored == NULL) {
+        goto error;
+    }
+
+    /* Disable the plugin for future files, and stop tracing this file. */
+    if (PyObject_SetAttr(plugin, str__coverage_enabled, Py_False) < 0) {
+        goto error;
+    }
+    if (PyObject_SetAttr(disposition, str_trace, Py_False) < 0) {
+        goto error;
+    }
+
+    goto ok;
+
+error:
+    /* This function doesn't return a status, so if an error happens, print it,
+     * but don't interrupt the flow. */
+    /* PySys_WriteStderr is nicer, but is not in the public API. */
+    fprintf(stderr, "Error occurred while disabling plugin:\n");
+    PyErr_Print();
+
+ok:
+    Py_XDECREF(file_tracer);
+    Py_XDECREF(plugin);
+    Py_XDECREF(plugin_name);
+    Py_XDECREF(msg);
+    Py_XDECREF(ignored);
+}
+
+
+static int
+CTracer_unpack_pair(CTracer *self, PyObject *pair, int *p_one, int *p_two)
+{
+    int ret = RET_ERROR;
+    int the_int;
+    PyObject * pyint = NULL;
+    int index;
+
+    if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2) {
+        PyErr_SetString(
+            PyExc_TypeError,
+            "line_number_range must return 2-tuple"
+            );
+        goto error;
+    }
+
+    for (index = 0; index < 2; index++) {
+        pyint = PyTuple_GetItem(pair, index);
+        if (pyint == NULL) {
+            goto error;
+        }
+        if (pyint_as_int(pyint, &the_int) < 0) {
+            goto error;
+        }
+        *(index == 0 ? p_one : p_two) = the_int;
+    }
+
+    ret = RET_OK;
+
+error:
+    return ret;
+}
+
+static int
+CTracer_handle_line(CTracer *self, PyFrameObject *frame)
+{
+    int ret = RET_ERROR;
+    int ret2;
+
+    STATS( self->stats.lines++; )
+    if (self->pdata_stack->depth >= 0) {
+        SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "line");
+        if (self->cur_entry.file_data) {
+            int lineno_from = -1;
+            int lineno_to = -1;
+
+            /* We're tracing in this frame: record something. */
+            if (self->cur_entry.file_tracer != Py_None) {
+                PyObject * from_to = NULL;
+                STATS( self->stats.pycalls++; )
+                from_to = PyObject_CallMethodObjArgs(self->cur_entry.file_tracer, str_line_number_range, frame, NULL);
+                if (from_to == NULL) {
+                    goto error;
+                }
+                ret2 = CTracer_unpack_pair(self, from_to, &lineno_from, &lineno_to);
+                Py_DECREF(from_to);
+                if (ret2 < 0) {
+                    CTracer_disable_plugin(self, self->cur_entry.disposition);
+                    goto ok;
+                }
+            }
+            else {
+                lineno_from = lineno_to = frame->f_lineno;
+            }
+
+            if (lineno_from != -1) {
+                for (; lineno_from <= lineno_to; lineno_from++) {
+                    if (self->tracing_arcs) {
+                        /* Tracing arcs: key is (last_line,this_line). */
+                        if (CTracer_record_pair(self, self->cur_entry.last_line, lineno_from) < 0) {
+                            goto error;
+                        }
+                    }
+                    else {
+                        /* Tracing lines: key is simply this_line. */
+                        PyObject * this_line = MyInt_FromInt(lineno_from);
+                        if (this_line == NULL) {
+                            goto error;
+                        }
+
+                        ret2 = PyDict_SetItem(self->cur_entry.file_data, this_line, Py_None);
+                        Py_DECREF(this_line);
+                        if (ret2 < 0) {
+                            goto error;
+                        }
+                    }
+
+                    self->cur_entry.last_line = lineno_from;
+                }
+            }
+        }
+    }
+
+ok:
+    ret = RET_OK;
+
+error:
+
+    return ret;
+}
+
+static int
+CTracer_handle_return(CTracer *self, PyFrameObject *frame)
+{
+    int ret = RET_ERROR;
+
+    STATS( self->stats.returns++; )
+    /* A near-copy of this code is above in the missing-return handler. */
+    if (CTracer_set_pdata_stack(self) < 0) {
+        goto error;
+    }
+    if (self->pdata_stack->depth >= 0) {
+        if (self->tracing_arcs && self->cur_entry.file_data) {
+            /* Need to distinguish between RETURN_VALUE and YIELD_VALUE. Read
+             * the current bytecode to see what it is.  In unusual circumstances
+             * (Cython code), co_code can be the empty string, so range-check
+             * f_lasti before reading the byte.
+             */
+            int bytecode = RETURN_VALUE;
+            PyObject * pCode = frame->f_code->co_code;
+            int lasti = frame->f_lasti;
+
+            if (lasti < MyBytes_GET_SIZE(pCode)) {
+                bytecode = MyBytes_AS_STRING(pCode)[lasti];
+            }
+            if (bytecode != YIELD_VALUE) {
+                int first = frame->f_code->co_firstlineno;
+                if (CTracer_record_pair(self, self->cur_entry.last_line, -first) < 0) {
+                    goto error;
+                }
+            }
+        }
+
+        SHOWLOG(self->pdata_stack->depth, frame->f_lineno, frame->f_code->co_filename, "return");
+        self->cur_entry = self->pdata_stack->stack[self->pdata_stack->depth];
+        self->pdata_stack->depth--;
+    }
+
+    ret = RET_OK;
+
+error:
+
+    return ret;
+}
+
+static int
+CTracer_handle_exception(CTracer *self, PyFrameObject *frame)
+{
+    /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
+        without a return event.  To detect that, we'll keep a copy of the
+        parent frame for an exception event.  If the next event is in that
+        frame, then we must have returned without a return event.  We can
+        synthesize the missing event then.
+
+        Python itself fixed this problem in 2.4.  Pyexpat still has the bug.
+        I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
+        If it gets fixed, this code should still work properly.  Maybe some day
+        the bug will be fixed everywhere coverage.py is supported, and we can
+        remove this missing-return detection.
+
+        More about this fix: http://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
+    */
+    STATS( self->stats.exceptions++; )
+    self->last_exc_back = frame->f_back;
+    self->last_exc_firstlineno = frame->f_code->co_firstlineno;
+
+    return RET_OK;
+}
+
+/*
+ * The Trace Function
+ */
+static int
+CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
+{
+    int ret = RET_ERROR;
+
+    #if WHAT_LOG || TRACE_LOG
+    PyObject * ascii = NULL;
+    #endif
+
+    #if WHAT_LOG
+    if (what <= (int)(sizeof(what_sym)/sizeof(const char *))) {
+        ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+        printf("trace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), frame->f_lineno);
+        Py_DECREF(ascii);
+    }
+    #endif
+
+    #if TRACE_LOG
+    ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+    if (strstr(MyBytes_AS_STRING(ascii), start_file) && frame->f_lineno == start_line) {
+        logging = 1;
+    }
+    Py_DECREF(ascii);
+    #endif
+
+    /* See below for details on missing-return detection. */
+    if (CTracer_check_missing_return(self, frame) < 0) {
+        goto error;
+    }
+
+    switch (what) {
+    case PyTrace_CALL:
+        if (CTracer_handle_call(self, frame) < 0) {
+            goto error;
+        }
+        break;
+
+    case PyTrace_RETURN:
+        if (CTracer_handle_return(self, frame) < 0) {
+            goto error;
+        }
+        break;
+
+    case PyTrace_LINE:
+        if (CTracer_handle_line(self, frame) < 0) {
+            goto error;
+        }
+        break;
+
+    case PyTrace_EXCEPTION:
+        if (CTracer_handle_exception(self, frame) < 0) {
+            goto error;
+        }
+        break;
+
+    default:
+        STATS( self->stats.others++; )
+        break;
+    }
+
+    ret = RET_OK;
+    goto cleanup;
+
+error:
+    STATS( self->stats.errors++; )
+
+cleanup:
+    return ret;
+}
+
+
+/*
+ * Python has two ways to set the trace function: sys.settrace(fn), which
+ * takes a Python callable, and PyEval_SetTrace(func, obj), which takes
+ * a C function and a Python object.  The way these work together is that
+ * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
+ * Python callable as the object in PyEval_SetTrace.  So sys.gettrace()
+ * simply returns the Python object used as the second argument to
+ * PyEval_SetTrace.  So sys.gettrace() will return our self parameter, which
+ * means it must be callable to be used in sys.settrace().
+ *
+ * So we make our self callable, equivalent to invoking our trace function.
+ *
+ * To help with the process of replaying stored frames, this function has an
+ * optional keyword argument:
+ *
+ *      def CTracer_call(frame, event, arg, lineno=0)
+ *
+ * If provided, the lineno argument is used as the line number, and the
+ * frame's f_lineno member is ignored.
+ */
+static PyObject *
+CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
+{
+    PyFrameObject *frame;
+    PyObject *what_str;
+    PyObject *arg;
+    int lineno = 0;
+    int what;
+    int orig_lineno;
+    PyObject *ret = NULL;
+    PyObject * ascii = NULL;
+
+    static char *what_names[] = {
+        "call", "exception", "line", "return",
+        "c_call", "c_exception", "c_return",
+        NULL
+        };
+
+    static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
+            &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
+        goto done;
+    }
+
+    /* In Python, the what argument is a string, we need to find an int
+       for the C function. */
+    for (what = 0; what_names[what]; what++) {
+        int should_break;
+        ascii = MyText_AS_BYTES(what_str);
+        should_break = !strcmp(MyBytes_AS_STRING(ascii), what_names[what]);
+        Py_DECREF(ascii);
+        if (should_break) {
+            break;
+        }
+    }
+
+    #if WHAT_LOG
+    ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+    printf("pytrace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), frame->f_lineno);
+    Py_DECREF(ascii);
+    #endif
+
+    /* Save off the frame's lineno, and use the forced one, if provided. */
+    orig_lineno = frame->f_lineno;
+    if (lineno > 0) {
+        frame->f_lineno = lineno;
+    }
+
+    /* Invoke the C function, and return ourselves. */
+    if (CTracer_trace(self, frame, what, arg) == RET_OK) {
+        Py_INCREF(self);
+        ret = (PyObject *)self;
+    }
+
+    /* Clean up. */
+    frame->f_lineno = orig_lineno;
+
+    /* For better speed, install ourselves the C way so that future calls go
+       directly to CTracer_trace, without this intermediate function.
+
+       Only do this if this is a CALL event, since new trace functions only
+       take effect then.  If we don't condition it on CALL, then we'll clobber
+       the new trace function before it has a chance to get called.  To
+       understand why, there are three internal values to track: frame.f_trace,
+       c_tracefunc, and c_traceobj.  They are explained here:
+       http://nedbatchelder.com/text/trace-function.html
+
+       Without the conditional on PyTrace_CALL, this is what happens:
+
+            def func():                 #   f_trace         c_tracefunc     c_traceobj
+                                        #   --------------  --------------  --------------
+                                        #   CTracer         CTracer.trace   CTracer
+                sys.settrace(my_func)
+                                        #   CTracer         trampoline      my_func
+                        # Now Python calls trampoline(CTracer), which calls this function
+                        # which calls PyEval_SetTrace below, setting us as the tracer again:
+                                        #   CTracer         CTracer.trace   CTracer
+                        # and it's as if the settrace never happened.
+        */
+    if (what == PyTrace_CALL) {
+        PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
+    }
+
+done:
+    return ret;
+}
+
+static PyObject *
+CTracer_start(CTracer *self, PyObject *args_unused)
+{
+    PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
+    self->started = 1;
+    self->tracing_arcs = self->trace_arcs && PyObject_IsTrue(self->trace_arcs);
+    self->cur_entry.last_line = -1;
+
+    /* start() returns a trace function usable with sys.settrace() */
+    Py_INCREF(self);
+    return (PyObject *)self;
+}
+
+static PyObject *
+CTracer_stop(CTracer *self, PyObject *args_unused)
+{
+    if (self->started) {
+        PyEval_SetTrace(NULL, NULL);
+        self->started = 0;
+    }
+
+    Py_RETURN_NONE;
+}
+
+static PyObject *
+CTracer_get_stats(CTracer *self)
+{
+#if COLLECT_STATS
+    return Py_BuildValue(
+        "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI,sI}",
+        "calls", self->stats.calls,
+        "lines", self->stats.lines,
+        "returns", self->stats.returns,
+        "exceptions", self->stats.exceptions,
+        "others", self->stats.others,
+        "new_files", self->stats.new_files,
+        "missed_returns", self->stats.missed_returns,
+        "stack_reallocs", self->stats.stack_reallocs,
+        "stack_alloc", self->pdata_stack->alloc,
+        "errors", self->stats.errors,
+        "pycalls", self->stats.pycalls
+        );
+#else
+    Py_RETURN_NONE;
+#endif /* COLLECT_STATS */
+}
+
+static PyMemberDef
+CTracer_members[] = {
+    { "should_trace",       T_OBJECT, offsetof(CTracer, should_trace), 0,
+            PyDoc_STR("Function indicating whether to trace a file.") },
+
+    { "check_include",      T_OBJECT, offsetof(CTracer, check_include), 0,
+            PyDoc_STR("Function indicating whether to include a file.") },
+
+    { "warn",               T_OBJECT, offsetof(CTracer, warn), 0,
+            PyDoc_STR("Function for issuing warnings.") },
+
+    { "concur_id_func",     T_OBJECT, offsetof(CTracer, concur_id_func), 0,
+            PyDoc_STR("Function for determining concurrency context") },
+
+    { "data",               T_OBJECT, offsetof(CTracer, data), 0,
+            PyDoc_STR("The raw dictionary of trace data.") },
+
+    { "file_tracers",       T_OBJECT, offsetof(CTracer, file_tracers), 0,
+            PyDoc_STR("Mapping from file name to plugin name.") },
+
+    { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
+            PyDoc_STR("Dictionary caching should_trace results.") },
+
+    { "trace_arcs",         T_OBJECT, offsetof(CTracer, trace_arcs), 0,
+            PyDoc_STR("Should we trace arcs, or just lines?") },
+
+    { NULL }
+};
+
+static PyMethodDef
+CTracer_methods[] = {
+    { "start",      (PyCFunction) CTracer_start,        METH_VARARGS,
+            PyDoc_STR("Start the tracer") },
+
+    { "stop",       (PyCFunction) CTracer_stop,         METH_VARARGS,
+            PyDoc_STR("Stop the tracer") },
+
+    { "get_stats",  (PyCFunction) CTracer_get_stats,    METH_VARARGS,
+            PyDoc_STR("Get statistics about the tracing") },
+
+    { NULL }
+};
+
+PyTypeObject
+CTracerType = {
+    MyType_HEAD_INIT
+    "coverage.CTracer",        /*tp_name*/
+    sizeof(CTracer),           /*tp_basicsize*/
+    0,                         /*tp_itemsize*/
+    (destructor)CTracer_dealloc, /*tp_dealloc*/
+    0,                         /*tp_print*/
+    0,                         /*tp_getattr*/
+    0,                         /*tp_setattr*/
+    0,                         /*tp_compare*/
+    0,                         /*tp_repr*/
+    0,                         /*tp_as_number*/
+    0,                         /*tp_as_sequence*/
+    0,                         /*tp_as_mapping*/
+    0,                         /*tp_hash */
+    (ternaryfunc)CTracer_call, /*tp_call*/
+    0,                         /*tp_str*/
+    0,                         /*tp_getattro*/
+    0,                         /*tp_setattro*/
+    0,                         /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+    "CTracer objects",         /* tp_doc */
+    0,                         /* tp_traverse */
+    0,                         /* tp_clear */
+    0,                         /* tp_richcompare */
+    0,                         /* tp_weaklistoffset */
+    0,                         /* tp_iter */
+    0,                         /* tp_iternext */
+    CTracer_methods,           /* tp_methods */
+    CTracer_members,           /* tp_members */
+    0,                         /* tp_getset */
+    0,                         /* tp_base */
+    0,                         /* tp_dict */
+    0,                         /* tp_descr_get */
+    0,                         /* tp_descr_set */
+    0,                         /* tp_dictoffset */
+    (initproc)CTracer_init,    /* tp_init */
+    0,                         /* tp_alloc */
+    0,                         /* tp_new */
+};
diff --git a/catapult/third_party/coverage/coverage/ctracer/tracer.h b/catapult/third_party/coverage/coverage/ctracer/tracer.h
new file mode 100644
index 0000000..053fbf6
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/tracer.h
@@ -0,0 +1,68 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#ifndef _COVERAGE_TRACER_H
+#define _COVERAGE_TRACER_H
+
+#include "util.h"
+#include "structmember.h"
+#include "frameobject.h"
+#include "opcode.h"
+
+#include "datastack.h"
+
+/* The CTracer type. */
+
+typedef struct CTracer {
+    PyObject_HEAD
+
+    /* Python objects manipulated directly by the Collector class. */
+    PyObject * should_trace;
+    PyObject * check_include;
+    PyObject * warn;
+    PyObject * concur_id_func;
+    PyObject * data;
+    PyObject * file_tracers;
+    PyObject * should_trace_cache;
+    PyObject * trace_arcs;
+
+    /* Has the tracer been started? */
+    int started;
+    /* Are we tracing arcs, or just lines? */
+    int tracing_arcs;
+
+    /*
+        The data stack is a stack of dictionaries.  Each dictionary collects
+        data for a single source file.  The data stack parallels the call stack:
+        each call pushes the new frame's file data onto the data stack, and each
+        return pops file data off.
+
+        The file data is a dictionary whose form depends on the tracing options.
+        If tracing arcs, the keys are line number pairs.  If not tracing arcs,
+        the keys are line numbers.  In both cases, the value is irrelevant
+        (None).
+    */
+
+    DataStack data_stack;           /* Used if we aren't doing concurrency. */
+
+    PyObject * data_stack_index;    /* Used if we are doing concurrency. */
+    DataStack * data_stacks;
+    int data_stacks_alloc;
+    int data_stacks_used;
+    DataStack * pdata_stack;
+
+    /* The current file's data stack entry, copied from the stack. */
+    DataStackEntry cur_entry;
+
+    /* The parent frame for the last exception event, to fix missing returns. */
+    PyFrameObject * last_exc_back;
+    int last_exc_firstlineno;
+
+    Stats stats;
+} CTracer;
+
+int CTracer_intern_strings(void);
+
+extern PyTypeObject CTracerType;
+
+#endif /* _COVERAGE_TRACER_H */
diff --git a/catapult/third_party/coverage/coverage/ctracer/util.h b/catapult/third_party/coverage/coverage/ctracer/util.h
new file mode 100644
index 0000000..bb3ad5a
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/ctracer/util.h
@@ -0,0 +1,52 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+#ifndef _COVERAGE_UTIL_H
+#define _COVERAGE_UTIL_H
+
+#include <Python.h>
+
+/* Compile-time debugging helpers */
+#undef WHAT_LOG         /* Define to log the WHAT params in the trace function. */
+#undef TRACE_LOG        /* Define to log our bookkeeping. */
+#undef COLLECT_STATS    /* Collect counters: stats are printed when tracer is stopped. */
+
+/* Py 2.x and 3.x compatibility */
+
+#if PY_MAJOR_VERSION >= 3
+
+#define MyText_Type         PyUnicode_Type
+#define MyText_AS_BYTES(o)  PyUnicode_AsASCIIString(o)
+#define MyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
+#define MyBytes_AS_STRING(o) PyBytes_AS_STRING(o)
+#define MyText_AsString(o)  PyUnicode_AsUTF8(o)
+#define MyText_FromFormat   PyUnicode_FromFormat
+#define MyInt_FromInt(i)    PyLong_FromLong((long)i)
+#define MyInt_AsInt(o)      (int)PyLong_AsLong(o)
+#define MyText_InternFromString(s) \
+                            PyUnicode_InternFromString(s)
+
+#define MyType_HEAD_INIT    PyVarObject_HEAD_INIT(NULL, 0)
+
+#else
+
+#define MyText_Type         PyString_Type
+#define MyText_AS_BYTES(o)  (Py_INCREF(o), o)
+#define MyBytes_GET_SIZE(o) PyString_GET_SIZE(o)
+#define MyBytes_AS_STRING(o) PyString_AS_STRING(o)
+#define MyText_AsString(o)  PyString_AsString(o)
+#define MyText_FromFormat   PyUnicode_FromFormat
+#define MyInt_FromInt(i)    PyInt_FromLong((long)i)
+#define MyInt_AsInt(o)      (int)PyInt_AsLong(o)
+#define MyText_InternFromString(s) \
+                            PyString_InternFromString(s)
+
+#define MyType_HEAD_INIT    PyObject_HEAD_INIT(NULL)  0,
+
+#endif /* Py3k */
+
+/* The values returned to indicate ok or error. */
+#define RET_OK      0
+#define RET_ERROR   -1
+
+#endif /* _COVERAGE_UTIL_H */
diff --git a/catapult/third_party/coverage/coverage/data.py b/catapult/third_party/coverage/coverage/data.py
new file mode 100644
index 0000000..17cf73c
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/data.py
@@ -0,0 +1,766 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Coverage data for coverage.py."""
+
+import glob
+import json
+import optparse
+import os
+import os.path
+import random
+import re
+import socket
+
+from coverage import env
+from coverage.backward import iitems, string_class
+from coverage.debug import _TEST_NAME_FILE
+from coverage.files import PathAliases
+from coverage.misc import CoverageException, file_be_gone, isolate_module
+
+os = isolate_module(os)
+
+
+class CoverageData(object):
+    """Manages collected coverage data, including file storage.
+
+    This class is the public supported API to the data coverage.py collects
+    during program execution.  It includes information about what code was
+    executed. It does not include information from the analysis phase, to
+    determine what lines could have been executed, or what lines were not
+    executed.
+
+    .. note::
+
+        The file format is not documented or guaranteed.  It will change in
+        the future, in possibly complicated ways.  Do not read coverage.py
+        data files directly.  Use this API to avoid disruption.
+
+    There are a number of kinds of data that can be collected:
+
+    * **lines**: the line numbers of source lines that were executed.
+      These are always available.
+
+    * **arcs**: pairs of source and destination line numbers for transitions
+      between source lines.  These are only available if branch coverage was
+      used.
+
+    * **file tracer names**: the module names of the file tracer plugins that
+      handled each file in the data.
+
+    * **run information**: information about the program execution.  This is
+      written during "coverage run", and then accumulated during "coverage
+      combine".
+
+    Lines, arcs, and file tracer names are stored for each source file. File
+    names in this API are case-sensitive, even on platforms with
+    case-insensitive file systems.
+
+    To read a coverage.py data file, use :meth:`read_file`, or
+    :meth:`read_fileobj` if you have an already-opened file.  You can then
+    access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
+    or :meth:`file_tracer`.  Run information is available with
+    :meth:`run_infos`.
+
+    The :meth:`has_arcs` method indicates whether arc data is available.  You
+    can get a list of the files in the data with :meth:`measured_files`.
+    A summary of the line data is available from :meth:`line_counts`.  As with
+    most Python containers, you can determine if there is any data at all by
+    using this object as a boolean value.
+
+
+    Most data files will be created by coverage.py itself, but you can use
+    methods here to create data files if you like.  The :meth:`add_lines`,
+    :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
+    that are convenient for coverage.py.  The :meth:`add_run_info` method adds
+    key-value pairs to the run information.
+
+    To add a file without any measured data, use :meth:`touch_file`.
+
+    You write to a named file with :meth:`write_file`, or to an already opened
+    file with :meth:`write_fileobj`.
+
+    You can clear the data in memory with :meth:`erase`.  Two data collections
+    can be combined by using :meth:`update` on one :class:`CoverageData`,
+    passing it the other.
+
+    """
+
+    # The data file format is JSON, with these keys:
+    #
+    #     * lines: a dict mapping file names to lists of line numbers
+    #       executed::
+    #
+    #         { "file1": [17,23,45], "file2": [1,2,3], ... }
+    #
+    #     * arcs: a dict mapping file names to lists of line number pairs::
+    #
+    #         { "file1": [[17,23], [17,25], [25,26]], ... }
+    #
+    #     * file_tracers: a dict mapping file names to plugin names::
+    #
+    #         { "file1": "django.coverage", ... }
+    #
+    #     * runs: a list of dicts of information about the coverage.py runs
+    #       contributing to the data::
+    #
+    #         [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
+    #
+    # Only one of `lines` or `arcs` will be present: with branch coverage, data
+    # is stored as arcs. Without branch coverage, it is stored as lines.  The
+    # line data is easily recovered from the arcs: it is all the first elements
+    # of the pairs that are greater than zero.
+
+    def __init__(self, debug=None):
+        """Create a CoverageData.
+
+        `debug` is a `DebugControl` object for writing debug messages.
+
+        """
+        self._debug = debug
+
+        # A map from canonical Python source file name to a dictionary in
+        # which there's an entry for each line number that has been
+        # executed:
+        #
+        #   { 'filename1.py': [12, 47, 1001], ... }
+        #
+        self._lines = None
+
+        # A map from canonical Python source file name to a dictionary with an
+        # entry for each pair of line numbers forming an arc:
+        #
+        #   { 'filename1.py': [(12,14), (47,48), ... ], ... }
+        #
+        self._arcs = None
+
+        # A map from canonical source file name to a plugin module name:
+        #
+        #   { 'filename1.py': 'django.coverage', ... }
+        #
+        self._file_tracers = {}
+
+        # A list of dicts of information about the coverage.py runs.
+        self._runs = []
+
+    def __repr__(self):
+        return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
+            klass=self.__class__.__name__,
+            lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
+            arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
+            tracers="{{{0}}}".format(len(self._file_tracers)),
+            runs="[{0}]".format(len(self._runs)),
+        )
+
+    ##
+    ## Reading data
+    ##
+
+    def has_arcs(self):
+        """Does this data have arcs?
+
+        Arc data is only available if branch coverage was used during
+        collection.
+
+        Returns a boolean.
+
+        """
+        return self._has_arcs()
+
+    def lines(self, filename):
+        """Get the list of lines executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no lines executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of integers, the line numbers
+        executed in the file. The list is in no particular order.
+
+        """
+        if self._arcs is not None:
+            if filename in self._arcs:
+                return [s for s, __ in self._arcs[filename] if s > 0]
+        elif self._lines is not None:
+            if filename in self._lines:
+                return self._lines[filename]
+        return None
+
+    def arcs(self, filename):
+        """Get the list of arcs executed for a file.
+
+        If the file was not measured, returns None.  A file might be measured,
+        and have no arcs executed, in which case an empty list is returned.
+
+        If the file was executed, returns a list of 2-tuples of integers. Each
+        pair is a starting line number and an ending line number for a
+        transition from one line to another. The list is in no particular
+        order.
+
+        Negative numbers have special meaning.  If the starting line number is
+        -N, it represents an entry to the code object that starts at line N.
+        If the ending ling number is -N, it's an exit from the code object that
+        starts at line N.
+
+        """
+        if self._arcs is not None:
+            if filename in self._arcs:
+                return self._arcs[filename]
+        return None
+
+    def file_tracer(self, filename):
+        """Get the plugin name of the file tracer for a file.
+
+        Returns the name of the plugin that handles this file.  If the file was
+        measured, but didn't use a plugin, then "" is returned.  If the file
+        was not measured, then None is returned.
+
+        """
+        # Because the vast majority of files involve no plugin, we don't store
+        # them explicitly in self._file_tracers.  Check the measured data
+        # instead to see if it was a known file with no plugin.
+        if filename in (self._arcs or self._lines or {}):
+            return self._file_tracers.get(filename, "")
+        return None
+
+    def run_infos(self):
+        """Return the list of dicts of run information.
+
+        For data collected during a single run, this will be a one-element
+        list.  If data has been combined, there will be one element for each
+        original data file.
+
+        """
+        return self._runs
+
+    def measured_files(self):
+        """A list of all files that had been measured."""
+        return list(self._arcs or self._lines or {})
+
+    def line_counts(self, fullpath=False):
+        """Return a dict summarizing the line coverage data.
+
+        Keys are based on the file names, and values are the number of executed
+        lines.  If `fullpath` is true, then the keys are the full pathnames of
+        the files, otherwise they are the basenames of the files.
+
+        Returns a dict mapping file names to counts of lines.
+
+        """
+        summ = {}
+        if fullpath:
+            filename_fn = lambda f: f
+        else:
+            filename_fn = os.path.basename
+        for filename in self.measured_files():
+            summ[filename_fn(filename)] = len(self.lines(filename))
+        return summ
+
+    def __nonzero__(self):
+        return bool(self._lines or self._arcs)
+
+    __bool__ = __nonzero__
+
+    def read_fileobj(self, file_obj):
+        """Read the coverage data from the given file object.
+
+        Should only be used on an empty CoverageData object.
+
+        """
+        data = self._read_raw_data(file_obj)
+
+        self._lines = self._arcs = None
+
+        if 'lines' in data:
+            self._lines = data['lines']
+        if 'arcs' in data:
+            self._arcs = dict(
+                (fname, [tuple(pair) for pair in arcs])
+                for fname, arcs in iitems(data['arcs'])
+            )
+        self._file_tracers = data.get('file_tracers', {})
+        self._runs = data.get('runs', [])
+
+        self._validate()
+
+    def read_file(self, filename):
+        """Read the coverage data from `filename` into this object."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Reading data from %r" % (filename,))
+        try:
+            with self._open_for_reading(filename) as f:
+                self.read_fileobj(f)
+        except Exception as exc:
+            raise CoverageException(
+                "Couldn't read data from '%s': %s: %s" % (
+                    filename, exc.__class__.__name__, exc,
+                )
+            )
+
+    _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
+
+    @classmethod
+    def _open_for_reading(cls, filename):
+        """Open a file appropriately for reading data."""
+        return open(filename, "r")
+
+    @classmethod
+    def _read_raw_data(cls, file_obj):
+        """Read the raw data from a file object."""
+        go_away = file_obj.read(len(cls._GO_AWAY))
+        if go_away != cls._GO_AWAY:
+            raise CoverageException("Doesn't seem to be a coverage.py data file")
+        return json.load(file_obj)
+
+    @classmethod
+    def _read_raw_data_file(cls, filename):
+        """Read the raw data from a file, for debugging."""
+        with cls._open_for_reading(filename) as f:
+            return cls._read_raw_data(f)
+
+    ##
+    ## Writing data
+    ##
+
+    def add_lines(self, line_data):
+        """Add measured line data.
+
+        `line_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { lineno: None, ... }, ...}
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding lines: %d files, %d lines total" % (
+                len(line_data), sum(len(lines) for lines in line_data.values())
+            ))
+        if self._has_arcs():
+            raise CoverageException("Can't add lines to existing arc data")
+
+        if self._lines is None:
+            self._lines = {}
+        for filename, linenos in iitems(line_data):
+            if filename in self._lines:
+                new_linenos = set(self._lines[filename])
+                new_linenos.update(linenos)
+                linenos = new_linenos
+            self._lines[filename] = list(linenos)
+
+        self._validate()
+
+    def add_arcs(self, arc_data):
+        """Add measured arc data.
+
+        `arc_data` is a dictionary mapping file names to dictionaries::
+
+            { filename: { (l1,l2): None, ... }, ...}
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding arcs: %d files, %d arcs total" % (
+                len(arc_data), sum(len(arcs) for arcs in arc_data.values())
+            ))
+        if self._has_lines():
+            raise CoverageException("Can't add arcs to existing line data")
+
+        if self._arcs is None:
+            self._arcs = {}
+        for filename, arcs in iitems(arc_data):
+            if filename in self._arcs:
+                new_arcs = set(self._arcs[filename])
+                new_arcs.update(arcs)
+                arcs = new_arcs
+            self._arcs[filename] = list(arcs)
+
+        self._validate()
+
+    def add_file_tracers(self, file_tracers):
+        """Add per-file plugin information.
+
+        `file_tracers` is { filename: plugin_name, ... }
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
+
+        existing_files = self._arcs or self._lines or {}
+        for filename, plugin_name in iitems(file_tracers):
+            if filename not in existing_files:
+                raise CoverageException(
+                    "Can't add file tracer data for unmeasured file '%s'" % (filename,)
+                )
+            existing_plugin = self._file_tracers.get(filename)
+            if existing_plugin is not None and plugin_name != existing_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, existing_plugin, plugin_name,
+                    )
+                )
+            self._file_tracers[filename] = plugin_name
+
+        self._validate()
+
+    def add_run_info(self, **kwargs):
+        """Add information about the run.
+
+        Keywords are arbitrary, and are stored in the run dictionary. Values
+        must be JSON serializable.  You may use this function more than once,
+        but repeated keywords overwrite each other.
+
+        """
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Adding run info: %r" % (kwargs,))
+        if not self._runs:
+            self._runs = [{}]
+        self._runs[0].update(kwargs)
+        self._validate()
+
+    def touch_file(self, filename):
+        """Ensure that `filename` appears in the data, empty if needed."""
+        if self._debug and self._debug.should('dataop'):
+            self._debug.write("Touching %r" % (filename,))
+        if not self._has_arcs() and not self._has_lines():
+            raise CoverageException("Can't touch files in an empty CoverageData")
+
+        if self._has_arcs():
+            where = self._arcs
+        else:
+            where = self._lines
+        where.setdefault(filename, [])
+
+        self._validate()
+
+    def write_fileobj(self, file_obj):
+        """Write the coverage data to `file_obj`."""
+
+        # Create the file data.
+        file_data = {}
+
+        if self._has_arcs():
+            file_data['arcs'] = self._arcs
+
+        if self._has_lines():
+            file_data['lines'] = self._lines
+
+        if self._file_tracers:
+            file_data['file_tracers'] = self._file_tracers
+
+        if self._runs:
+            file_data['runs'] = self._runs
+
+        # Write the data to the file.
+        file_obj.write(self._GO_AWAY)
+        json.dump(file_data, file_obj)
+
+    def write_file(self, filename):
+        """Write the coverage data to `filename`."""
+        if self._debug and self._debug.should('dataio'):
+            self._debug.write("Writing data to %r" % (filename,))
+        with open(filename, 'w') as fdata:
+            self.write_fileobj(fdata)
+
+    def erase(self):
+        """Erase the data in this object."""
+        self._lines = None
+        self._arcs = None
+        self._file_tracers = {}
+        self._runs = []
+        self._validate()
+
+    def update(self, other_data, aliases=None):
+        """Update this data with data from another `CoverageData`.
+
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
+
+        """
+        if self._has_lines() and other_data._has_arcs():
+            raise CoverageException("Can't combine arc data with line data")
+        if self._has_arcs() and other_data._has_lines():
+            raise CoverageException("Can't combine line data with arc data")
+
+        aliases = aliases or PathAliases()
+
+        # _file_tracers: only have a string, so they have to agree.
+        # Have to do these first, so that our examination of self._arcs and
+        # self._lines won't be confused by data updated from other_data.
+        for filename in other_data.measured_files():
+            other_plugin = other_data.file_tracer(filename)
+            filename = aliases.map(filename)
+            this_plugin = self.file_tracer(filename)
+            if this_plugin is None:
+                if other_plugin:
+                    self._file_tracers[filename] = other_plugin
+            elif this_plugin != other_plugin:
+                raise CoverageException(
+                    "Conflicting file tracer name for '%s': %r vs %r" % (
+                        filename, this_plugin, other_plugin,
+                    )
+                )
+
+        # _runs: add the new runs to these runs.
+        self._runs.extend(other_data._runs)
+
+        # _lines: merge dicts.
+        if other_data._has_lines():
+            if self._lines is None:
+                self._lines = {}
+            for filename, file_lines in iitems(other_data._lines):
+                filename = aliases.map(filename)
+                if filename in self._lines:
+                    lines = set(self._lines[filename])
+                    lines.update(file_lines)
+                    file_lines = list(lines)
+                self._lines[filename] = file_lines
+
+        # _arcs: merge dicts.
+        if other_data._has_arcs():
+            if self._arcs is None:
+                self._arcs = {}
+            for filename, file_arcs in iitems(other_data._arcs):
+                filename = aliases.map(filename)
+                if filename in self._arcs:
+                    arcs = set(self._arcs[filename])
+                    arcs.update(file_arcs)
+                    file_arcs = list(arcs)
+                self._arcs[filename] = file_arcs
+
+        self._validate()
+
+    ##
+    ## Miscellaneous
+    ##
+
+    def _validate(self):
+        """If we are in paranoid mode, validate that everything is right."""
+        if env.TESTING:
+            self._validate_invariants()
+
+    def _validate_invariants(self):
+        """Validate internal invariants."""
+        # Only one of _lines or _arcs should exist.
+        assert not(self._has_lines() and self._has_arcs()), (
+            "Shouldn't have both _lines and _arcs"
+        )
+
+        # _lines should be a dict of lists of ints.
+        if self._has_lines():
+            for fname, lines in iitems(self._lines):
+                assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) for x in lines), (
+                    "_lines[%r] shouldn't be %r" % (fname, lines)
+                )
+
+        # _arcs should be a dict of lists of pairs of ints.
+        if self._has_arcs():
+            for fname, arcs in iitems(self._arcs):
+                assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
+                assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
+                    "_arcs[%r] shouldn't be %r" % (fname, arcs)
+                )
+
+        # _file_tracers should have only non-empty strings as values.
+        for fname, plugin in iitems(self._file_tracers):
+            assert isinstance(fname, string_class), (
+                "Key in _file_tracers shouldn't be %r" % (fname,)
+            )
+            assert plugin and isinstance(plugin, string_class), (
+                "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
+            )
+
+        # _runs should be a list of dicts.
+        for val in self._runs:
+            assert isinstance(val, dict)
+            for key in val:
+                assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
+
+    def add_to_hash(self, filename, hasher):
+        """Contribute `filename`'s data to the `hasher`.
+
+        `hasher` is a `coverage.misc.Hasher` instance to be updated with
+        the file's data.  It should only get the results data, not the run
+        data.
+
+        """
+        if self._has_arcs():
+            hasher.update(sorted(self.arcs(filename) or []))
+        else:
+            hasher.update(sorted(self.lines(filename) or []))
+        hasher.update(self.file_tracer(filename))
+
+    ##
+    ## Internal
+    ##
+
+    def _has_lines(self):
+        """Do we have data in self._lines?"""
+        return self._lines is not None
+
+    def _has_arcs(self):
+        """Do we have data in self._arcs?"""
+        return self._arcs is not None
+
+
+class CoverageDataFiles(object):
+    """Manage the use of coverage data files."""
+
+    def __init__(self, basename=None, warn=None):
+        """Create a CoverageDataFiles to manage data files.
+
+        `warn` is the warning function to use.
+
+        `basename` is the name of the file to use for storing data.
+
+        """
+        self.warn = warn
+        # Construct the file name that will be used for data storage.
+        self.filename = os.path.abspath(basename or ".coverage")
+
+    def erase(self, parallel=False):
+        """Erase the data from the file storage.
+
+        If `parallel` is true, then also deletes data files created from the
+        basename by parallel-mode.
+
+        """
+        file_be_gone(self.filename)
+        if parallel:
+            data_dir, local = os.path.split(self.filename)
+            localdot = local + '.*'
+            pattern = os.path.join(os.path.abspath(data_dir), localdot)
+            for filename in glob.glob(pattern):
+                file_be_gone(filename)
+
+    def read(self, data):
+        """Read the coverage data."""
+        if os.path.exists(self.filename):
+            data.read_file(self.filename)
+
+    def write(self, data, suffix=None):
+        """Write the collected coverage data to a file.
+
+        `suffix` is a suffix to append to the base file name. This can be used
+        for multiple or parallel execution, so that many coverage data files
+        can exist simultaneously.  A dot will be used to join the base name and
+        the suffix.
+
+        """
+        filename = self.filename
+        if suffix is True:
+            # If data_suffix was a simple true value, then make a suffix with
+            # plenty of distinguishing information.  We do this here in
+            # `save()` at the last minute so that the pid will be correct even
+            # if the process forks.
+            extra = ""
+            if _TEST_NAME_FILE:                             # pragma: debugging
+                with open(_TEST_NAME_FILE) as f:
+                    test_name = f.read()
+                extra = "." + test_name
+            suffix = "%s%s.%s.%06d" % (
+                socket.gethostname(), extra, os.getpid(),
+                random.randint(0, 999999)
+            )
+
+        if suffix:
+            filename += "." + suffix
+        data.write_file(filename)
+
+    def combine_parallel_data(self, data, aliases=None, data_paths=None):
+        """Combine a number of data files together.
+
+        Treat `self.filename` as a file prefix, and combine the data from all
+        of the data files starting with that prefix plus a dot.
+
+        If `aliases` is provided, it's a `PathAliases` object that is used to
+        re-map paths to match the local machine's.
+
+        If `data_paths` is provided, it is a list of directories or files to
+        combine.  Directories are searched for files that start with
+        `self.filename` plus dot as a prefix, and those files are combined.
+
+        If `data_paths` is not provided, then the directory portion of
+        `self.filename` is used as the directory to search for data files.
+
+        Every data file found and combined is then deleted from disk. If a file
+        cannot be read, a warning will be issued, and the file will not be
+        deleted.
+
+        """
+        # Because of the os.path.abspath in the constructor, data_dir will
+        # never be an empty string.
+        data_dir, local = os.path.split(self.filename)
+        localdot = local + '.*'
+
+        data_paths = data_paths or [data_dir]
+        files_to_combine = []
+        for p in data_paths:
+            if os.path.isfile(p):
+                files_to_combine.append(os.path.abspath(p))
+            elif os.path.isdir(p):
+                pattern = os.path.join(os.path.abspath(p), localdot)
+                files_to_combine.extend(glob.glob(pattern))
+            else:
+                raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
+
+        for f in files_to_combine:
+            new_data = CoverageData()
+            try:
+                new_data.read_file(f)
+            except CoverageException as exc:
+                if self.warn:
+                    # The CoverageException has the file name in it, so just
+                    # use the message as the warning.
+                    self.warn(str(exc))
+            else:
+                data.update(new_data, aliases=aliases)
+                file_be_gone(f)
+
+
+def canonicalize_json_data(data):
+    """Canonicalize our JSON data so it can be compared."""
+    for fname, lines in iitems(data.get('lines', {})):
+        data['lines'][fname] = sorted(lines)
+    for fname, arcs in iitems(data.get('arcs', {})):
+        data['arcs'][fname] = sorted(arcs)
+
+
+def pretty_data(data):
+    """Format data as JSON, but as nicely as possible.
+
+    Returns a string.
+
+    """
+    # Start with a basic JSON dump.
+    out = json.dumps(data, indent=4, sort_keys=True)
+    # But pairs of numbers shouldn't be split across lines...
+    out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
+    # Trailing spaces mess with tests, get rid of them.
+    out = re.sub(r"(?m)\s+$", "", out)
+    return out
+
+
+def debug_main(args):
+    """Dump the raw data from data files.
+
+    Run this as::
+
+        $ python -m coverage.data [FILE]
+
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        "-c", "--canonical", action="store_true",
+        help="Sort data into a canonical order",
+    )
+    options, args = parser.parse_args(args)
+
+    for filename in (args or [".coverage"]):
+        print("--- {0} ------------------------------".format(filename))
+        data = CoverageData._read_raw_data_file(filename)
+        if options.canonical:
+            canonicalize_json_data(data)
+        print(pretty_data(data))
+
+
+if __name__ == '__main__':
+    import sys
+    debug_main(sys.argv[1:])
diff --git a/catapult/third_party/coverage/coverage/debug.py b/catapult/third_party/coverage/coverage/debug.py
new file mode 100644
index 0000000..4076b9b
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/debug.py
@@ -0,0 +1,104 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Control of and utilities for debugging."""
+
+import inspect
+import os
+import sys
+
+from coverage.misc import isolate_module
+
+os = isolate_module(os)
+
+
+# When debugging, it can be helpful to force some options, especially when
+# debugging the configuration mechanisms you usually use to control debugging!
+# This is a list of forced debugging options.
+FORCED_DEBUG = []
+
+# A hack for debugging testing in sub-processes.
+_TEST_NAME_FILE = ""    # "/tmp/covtest.txt"
+
+
+class DebugControl(object):
+    """Control and output for debugging."""
+
+    def __init__(self, options, output):
+        """Configure the options and output file for debugging."""
+        self.options = options
+        self.output = output
+
+    def __repr__(self):
+        return "<DebugControl options=%r output=%r>" % (self.options, self.output)
+
+    def should(self, option):
+        """Decide whether to output debug information in category `option`."""
+        return (option in self.options or option in FORCED_DEBUG)
+
+    def write(self, msg):
+        """Write a line of debug output."""
+        if self.should('pid'):
+            msg = "pid %5d: %s" % (os.getpid(), msg)
+        self.output.write(msg+"\n")
+        if self.should('callers'):
+            dump_stack_frames(self.output)
+        self.output.flush()
+
+    def write_formatted_info(self, header, info):
+        """Write a sequence of (label,data) pairs nicely."""
+        self.write(info_header(header))
+        for line in info_formatter(info):
+            self.write(" %s" % line)
+
+
+def info_header(label):
+    """Make a nice header string."""
+    return "--{0:-<60s}".format(" "+label+" ")
+
+
+def info_formatter(info):
+    """Produce a sequence of formatted lines from info.
+
+    `info` is a sequence of pairs (label, data).  The produced lines are
+    nicely formatted, ready to print.
+
+    """
+    info = list(info)
+    if not info:
+        return
+    label_len = max(len(l) for l, _d in info)
+    for label, data in info:
+        if data == []:
+            data = "-none-"
+        if isinstance(data, (list, set, tuple)):
+            prefix = "%*s:" % (label_len, label)
+            for e in data:
+                yield "%*s %s" % (label_len+1, prefix, e)
+                prefix = ""
+        else:
+            yield "%*s: %s" % (label_len, label, data)
+
+
+def short_stack():                                          # pragma: debugging
+    """Return a string summarizing the call stack.
+
+    The string is multi-line, with one line per stack frame. Each line shows
+    the function name, the file name, and the line number:
+
+        ...
+        start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
+        import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
+        import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
+        ...
+
+    """
+    stack = inspect.stack()[:0:-1]
+    return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
+
+
+def dump_stack_frames(out=None):                            # pragma: debugging
+    """Print a summary of the stack to stdout, or some place else."""
+    out = out or sys.stdout
+    out.write(short_stack())
+    out.write("\n")
diff --git a/catapult/third_party/coverage/coverage/env.py b/catapult/third_party/coverage/coverage/env.py
new file mode 100644
index 0000000..4cd02c0
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/env.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Determine facts about the environment."""
+
+import os
+import sys
+
+# Operating systems.
+WINDOWS = sys.platform == "win32"
+LINUX = sys.platform == "linux2"
+
+# Python implementations.
+PYPY = '__pypy__' in sys.builtin_module_names
+
+# Python versions.
+PYVERSION = sys.version_info
+PY2 = PYVERSION < (3, 0)
+PY3 = PYVERSION >= (3, 0)
+
+# Coverage.py specifics.
+
+# Are we using the C-implemented trace function?
+C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
+
+# Are we coverage-measuring ourselves?
+METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
+
+# Are we running our test suite?
+# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
+# test-specific behavior like contracts.
+TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
diff --git a/catapult/third_party/coverage/coverage/execfile.py b/catapult/third_party/coverage/coverage/execfile.py
new file mode 100644
index 0000000..3e20a52
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/execfile.py
@@ -0,0 +1,239 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Execute files of Python code."""
+
+import marshal
+import os
+import sys
+import types
+
+from coverage.backward import BUILTINS
+from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
+from coverage.misc import ExceptionDuringRun, NoCode, NoSource, isolate_module
+from coverage.phystokens import compile_unicode
+from coverage.python import get_python_source
+
+os = isolate_module(os)
+
+
+class DummyLoader(object):
+    """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
+
+    Currently only implements the .fullname attribute
+    """
+    def __init__(self, fullname, *_args):
+        self.fullname = fullname
+
+
+if importlib_util_find_spec:
+    def find_module(modulename):
+        """Find the module named `modulename`.
+
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        try:
+            spec = importlib_util_find_spec(modulename)
+        except ImportError as err:
+            raise NoSource(str(err))
+        if not spec:
+            raise NoSource("No module named %r" % (modulename,))
+        pathname = spec.origin
+        packagename = spec.name
+        if pathname.endswith("__init__.py") and not modulename.endswith("__init__"):
+            mod_main = modulename + ".__main__"
+            spec = importlib_util_find_spec(mod_main)
+            if not spec:
+                raise NoSource(
+                    "No module named %s; "
+                    "%r is a package and cannot be directly executed"
+                    % (mod_main, modulename)
+                )
+            pathname = spec.origin
+            packagename = spec.name
+        packagename = packagename.rpartition(".")[0]
+        return pathname, packagename
+else:
+    def find_module(modulename):
+        """Find the module named `modulename`.
+
+        Returns the file path of the module, and the name of the enclosing
+        package.
+        """
+        openfile = None
+        glo, loc = globals(), locals()
+        try:
+            # Search for the module - inside its parent package, if any - using
+            # standard import mechanics.
+            if '.' in modulename:
+                packagename, name = modulename.rsplit('.', 1)
+                package = __import__(packagename, glo, loc, ['__path__'])
+                searchpath = package.__path__
+            else:
+                packagename, name = None, modulename
+                searchpath = None  # "top-level search" in imp.find_module()
+            openfile, pathname, _ = imp.find_module(name, searchpath)
+
+            # Complain if this is a magic non-file module.
+            if openfile is None and pathname is None:
+                raise NoSource(
+                    "module does not live in a file: %r" % modulename
+                    )
+
+            # If `modulename` is actually a package, not a mere module, then we
+            # pretend to be Python 2.7 and try running its __main__.py script.
+            if openfile is None:
+                packagename = modulename
+                name = '__main__'
+                package = __import__(packagename, glo, loc, ['__path__'])
+                searchpath = package.__path__
+                openfile, pathname, _ = imp.find_module(name, searchpath)
+        except ImportError as err:
+            raise NoSource(str(err))
+        finally:
+            if openfile:
+                openfile.close()
+
+        return pathname, packagename
+
+
+def run_python_module(modulename, args):
+    """Run a Python module, as though with ``python -m name args...``.
+
+    `modulename` is the name of the module, possibly a dot-separated name.
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the module being executed.
+
+    """
+    pathname, packagename = find_module(modulename)
+
+    pathname = os.path.abspath(pathname)
+    args[0] = pathname
+    run_python_file(pathname, args, package=packagename, modulename=modulename, path0="")
+
+
+def run_python_file(filename, args, package=None, modulename=None, path0=None):
+    """Run a Python file as if it were the main program on the command line.
+
+    `filename` is the path to the file to execute, it need not be a .py file.
+    `args` is the argument array to present as sys.argv, including the first
+    element naming the file being executed.  `package` is the name of the
+    enclosing package, if any.
+
+    `modulename` is the name of the module the file was run as.
+
+    `path0` is the value to put into sys.path[0].  If it's None, then this
+    function will decide on a value.
+
+    """
+    if modulename is None and sys.version_info >= (3, 3):
+        modulename = '__main__'
+
+    # Create a module to serve as __main__
+    old_main_mod = sys.modules['__main__']
+    main_mod = types.ModuleType('__main__')
+    sys.modules['__main__'] = main_mod
+    main_mod.__file__ = filename
+    if package:
+        main_mod.__package__ = package
+    if modulename:
+        main_mod.__loader__ = DummyLoader(modulename)
+
+    main_mod.__builtins__ = BUILTINS
+
+    # Set sys.argv properly.
+    old_argv = sys.argv
+    sys.argv = args
+
+    if os.path.isdir(filename):
+        # Running a directory means running the __main__.py file in that
+        # directory.
+        my_path0 = filename
+
+        for ext in [".py", ".pyc", ".pyo"]:
+            try_filename = os.path.join(filename, "__main__" + ext)
+            if os.path.exists(try_filename):
+                filename = try_filename
+                break
+        else:
+            raise NoSource("Can't find '__main__' module in '%s'" % filename)
+    else:
+        my_path0 = os.path.abspath(os.path.dirname(filename))
+
+    # Set sys.path correctly.
+    old_path0 = sys.path[0]
+    sys.path[0] = path0 if path0 is not None else my_path0
+
+    try:
+        # Make a code object somehow.
+        if filename.endswith((".pyc", ".pyo")):
+            code = make_code_from_pyc(filename)
+        else:
+            code = make_code_from_py(filename)
+
+        # Execute the code object.
+        try:
+            exec(code, main_mod.__dict__)
+        except SystemExit:
+            # The user called sys.exit().  Just pass it along to the upper
+            # layers, where it will be handled.
+            raise
+        except:
+            # Something went wrong while executing the user code.
+            # Get the exc_info, and pack them into an exception that we can
+            # throw up to the outer loop.  We peel one layer off the traceback
+            # so that the coverage.py code doesn't appear in the final printed
+            # traceback.
+            typ, err, tb = sys.exc_info()
+
+            # PyPy3 weirdness.  If I don't access __context__, then somehow it
+            # is non-None when the exception is reported at the upper layer,
+            # and a nested exception is shown to the user.  This getattr fixes
+            # it somehow? https://bitbucket.org/pypy/pypy/issue/1903
+            getattr(err, '__context__', None)
+
+            raise ExceptionDuringRun(typ, err, tb.tb_next)
+    finally:
+        # Restore the old __main__, argv, and path.
+        sys.modules['__main__'] = old_main_mod
+        sys.argv = old_argv
+        sys.path[0] = old_path0
+
+
+def make_code_from_py(filename):
+    """Get source from `filename` and make a code object of it."""
+    # Open the source file.
+    try:
+        source = get_python_source(filename)
+    except (IOError, NoSource):
+        raise NoSource("No file to run: '%s'" % filename)
+
+    code = compile_unicode(source, filename, "exec")
+    return code
+
+
+def make_code_from_pyc(filename):
+    """Get a code object from a .pyc file."""
+    try:
+        fpyc = open(filename, "rb")
+    except IOError:
+        raise NoCode("No file to run: '%s'" % filename)
+
+    with fpyc:
+        # First four bytes are a version-specific magic number.  It has to
+        # match or we won't run the file.
+        magic = fpyc.read(4)
+        if magic != PYC_MAGIC_NUMBER:
+            raise NoCode("Bad magic number in .pyc file")
+
+        # Skip the junk in the header that we don't need.
+        fpyc.read(4)            # Skip the moddate.
+        if sys.version_info >= (3, 3):
+            # 3.3 added another long to the header (size), skip it.
+            fpyc.read(4)
+
+        # The rest of the file is the code object we want.
+        code = marshal.load(fpyc)
+
+    return code
diff --git a/catapult/third_party/coverage/coverage/files.py b/catapult/third_party/coverage/coverage/files.py
new file mode 100644
index 0000000..44997d1
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/files.py
@@ -0,0 +1,378 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""File wrangling."""
+
+import fnmatch
+import ntpath
+import os
+import os.path
+import posixpath
+import re
+import sys
+
+from coverage import env
+from coverage.backward import unicode_class
+from coverage.misc import contract, CoverageException, join_regex, isolate_module
+
+
+os = isolate_module(os)
+
+
+def set_relative_directory():
+    """Set the directory that `relative_filename` will be relative to."""
+    global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
+
+    # The absolute path to our current directory.
+    RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
+
+    # Cache of results of calling the canonical_filename() method, to
+    # avoid duplicating work.
+    CANONICAL_FILENAME_CACHE = {}
+
+
+def relative_directory():
+    """Return the directory that `relative_filename` is relative to."""
+    return RELATIVE_DIR
+
+
+@contract(returns='unicode')
+def relative_filename(filename):
+    """Return the relative form of `filename`.
+
+    The file name will be relative to the current directory when the
+    `set_relative_directory` was called.
+
+    """
+    fnorm = os.path.normcase(filename)
+    if fnorm.startswith(RELATIVE_DIR):
+        filename = filename[len(RELATIVE_DIR):]
+    return unicode_filename(filename)
+
+
+@contract(returns='unicode')
+def canonical_filename(filename):
+    """Return a canonical file name for `filename`.
+
+    An absolute path with no redundant components and normalized case.
+
+    """
+    if filename not in CANONICAL_FILENAME_CACHE:
+        if not os.path.isabs(filename):
+            for path in [os.curdir] + sys.path:
+                if path is None:
+                    continue
+                f = os.path.join(path, filename)
+                if os.path.exists(f):
+                    filename = f
+                    break
+        cf = abs_file(filename)
+        CANONICAL_FILENAME_CACHE[filename] = cf
+    return CANONICAL_FILENAME_CACHE[filename]
+
+
+def flat_rootname(filename):
+    """A base for a flat file name to correspond to this file.
+
+    Useful for writing files about the code where you want all the files in
+    the same directory, but need to differentiate same-named files from
+    different directories.
+
+    For example, the file a/b/c.py will return 'a_b_c_py'
+
+    """
+    name = ntpath.splitdrive(filename)[1]
+    return re.sub(r"[\\/.:]", "_", name)
+
+
+if env.WINDOWS:
+
+    _ACTUAL_PATH_CACHE = {}
+    _ACTUAL_PATH_LIST_CACHE = {}
+
+    def actual_path(path):
+        """Get the actual path of `path`, including the correct case."""
+        if env.PY2 and isinstance(path, unicode_class):
+            path = path.encode(sys.getfilesystemencoding())
+        if path in _ACTUAL_PATH_CACHE:
+            return _ACTUAL_PATH_CACHE[path]
+
+        head, tail = os.path.split(path)
+        if not tail:
+            # This means head is the drive spec: normalize it.
+            actpath = head.upper()
+        elif not head:
+            actpath = tail
+        else:
+            head = actual_path(head)
+            if head in _ACTUAL_PATH_LIST_CACHE:
+                files = _ACTUAL_PATH_LIST_CACHE[head]
+            else:
+                try:
+                    files = os.listdir(head)
+                except OSError:
+                    files = []
+                _ACTUAL_PATH_LIST_CACHE[head] = files
+            normtail = os.path.normcase(tail)
+            for f in files:
+                if os.path.normcase(f) == normtail:
+                    tail = f
+                    break
+            actpath = os.path.join(head, tail)
+        _ACTUAL_PATH_CACHE[path] = actpath
+        return actpath
+
+else:
+    def actual_path(filename):
+        """The actual path for non-Windows platforms."""
+        return filename
+
+
+if env.PY2:
+    @contract(returns='unicode')
+    def unicode_filename(filename):
+        """Return a Unicode version of `filename`."""
+        if isinstance(filename, str):
+            encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+            filename = filename.decode(encoding, "replace")
+        return filename
+else:
+    @contract(filename='unicode', returns='unicode')
+    def unicode_filename(filename):
+        """Return a Unicode version of `filename`."""
+        return filename
+
+
+@contract(returns='unicode')
+def abs_file(filename):
+    """Return the absolute normalized form of `filename`."""
+    path = os.path.expandvars(os.path.expanduser(filename))
+    path = os.path.abspath(os.path.realpath(path))
+    path = actual_path(path)
+    path = unicode_filename(path)
+    return path
+
+
+RELATIVE_DIR = None
+CANONICAL_FILENAME_CACHE = None
+set_relative_directory()
+
+
+def isabs_anywhere(filename):
+    """Is `filename` an absolute path on any OS?"""
+    return ntpath.isabs(filename) or posixpath.isabs(filename)
+
+
+def prep_patterns(patterns):
+    """Prepare the file patterns for use in a `FnmatchMatcher`.
+
+    If a pattern starts with a wildcard, it is used as a pattern
+    as-is.  If it does not start with a wildcard, then it is made
+    absolute with the current directory.
+
+    If `patterns` is None, an empty list is returned.
+
+    """
+    prepped = []
+    for p in patterns or []:
+        if p.startswith(("*", "?")):
+            prepped.append(p)
+        else:
+            prepped.append(abs_file(p))
+    return prepped
+
+
+class TreeMatcher(object):
+    """A matcher for files in a tree."""
+    def __init__(self, directories):
+        self.dirs = list(directories)
+
+    def __repr__(self):
+        return "<TreeMatcher %r>" % self.dirs
+
+    def info(self):
+        """A list of strings for displaying when dumping state."""
+        return self.dirs
+
+    def match(self, fpath):
+        """Does `fpath` indicate a file in one of our trees?"""
+        for d in self.dirs:
+            if fpath.startswith(d):
+                if fpath == d:
+                    # This is the same file!
+                    return True
+                if fpath[len(d)] == os.sep:
+                    # This is a file in the directory
+                    return True
+        return False
+
+
+class ModuleMatcher(object):
+    """A matcher for modules in a tree."""
+    def __init__(self, module_names):
+        self.modules = list(module_names)
+
+    def __repr__(self):
+        return "<ModuleMatcher %r>" % (self.modules)
+
+    def info(self):
+        """A list of strings for displaying when dumping state."""
+        return self.modules
+
+    def match(self, module_name):
+        """Does `module_name` indicate a module in one of our packages?"""
+        if not module_name:
+            return False
+
+        for m in self.modules:
+            if module_name.startswith(m):
+                if module_name == m:
+                    return True
+                if module_name[len(m)] == '.':
+                    # This is a module in the package
+                    return True
+
+        return False
+
+
+class FnmatchMatcher(object):
+    """A matcher for files by file name pattern."""
+    def __init__(self, pats):
+        self.pats = pats[:]
+        # fnmatch is platform-specific. On Windows, it does the Windows thing
+        # of treating / and \ as equivalent. But on other platforms, we need to
+        # take care of that ourselves.
+        fnpats = (fnmatch.translate(p) for p in pats)
+        fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
+        if env.WINDOWS:
+            # Windows is also case-insensitive.  BTW: the regex docs say that
+            # flags like (?i) have to be at the beginning, but fnmatch puts
+            # them at the end, and having two there seems to work fine.
+            fnpats = (p + "(?i)" for p in fnpats)
+        self.re = re.compile(join_regex(fnpats))
+
+    def __repr__(self):
+        return "<FnmatchMatcher %r>" % self.pats
+
+    def info(self):
+        """A list of strings for displaying when dumping state."""
+        return self.pats
+
+    def match(self, fpath):
+        """Does `fpath` match one of our file name patterns?"""
+        return self.re.match(fpath) is not None
+
+
+def sep(s):
+    """Find the path separator used in this string, or os.sep if none."""
+    sep_match = re.search(r"[\\/]", s)
+    if sep_match:
+        the_sep = sep_match.group(0)
+    else:
+        the_sep = os.sep
+    return the_sep
+
+
+class PathAliases(object):
+    """A collection of aliases for paths.
+
+    When combining data files from remote machines, often the paths to source
+    code are different, for example, due to OS differences, or because of
+    serialized checkouts on continuous integration machines.
+
+    A `PathAliases` object tracks a list of pattern/result pairs, and can
+    map a path through those aliases to produce a unified path.
+
+    """
+    def __init__(self):
+        self.aliases = []
+
+    def add(self, pattern, result):
+        """Add the `pattern`/`result` pair to the list of aliases.
+
+        `pattern` is an `fnmatch`-style pattern.  `result` is a simple
+        string.  When mapping paths, if a path starts with a match against
+        `pattern`, then that match is replaced with `result`.  This models
+        isomorphic source trees being rooted at different places on two
+        different machines.
+
+        `pattern` can't end with a wildcard component, since that would
+        match an entire tree, and not just its root.
+
+        """
+        # The pattern can't end with a wildcard component.
+        pattern = pattern.rstrip(r"\/")
+        if pattern.endswith("*"):
+            raise CoverageException("Pattern must not end with wildcards.")
+        pattern_sep = sep(pattern)
+
+        # The pattern is meant to match a filepath.  Let's make it absolute
+        # unless it already is, or is meant to match any prefix.
+        if not pattern.startswith('*') and not isabs_anywhere(pattern):
+            pattern = abs_file(pattern)
+        pattern += pattern_sep
+
+        # Make a regex from the pattern.  fnmatch always adds a \Z to
+        # match the whole string, which we don't want.
+        regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
+
+        # We want */a/b.py to match on Windows too, so change slash to match
+        # either separator.
+        regex_pat = regex_pat.replace(r"\/", r"[\\/]")
+        # We want case-insensitive matching, so add that flag.
+        regex = re.compile(r"(?i)" + regex_pat)
+
+        # Normalize the result: it must end with a path separator.
+        result_sep = sep(result)
+        result = result.rstrip(r"\/") + result_sep
+        self.aliases.append((regex, result, pattern_sep, result_sep))
+
+    def map(self, path):
+        """Map `path` through the aliases.
+
+        `path` is checked against all of the patterns.  The first pattern to
+        match is used to replace the root of the path with the result root.
+        Only one pattern is ever used.  If no patterns match, `path` is
+        returned unchanged.
+
+        The separator style in the result is made to match that of the result
+        in the alias.
+
+        Returns the mapped path.  If a mapping has happened, this is a
+        canonical path.  If no mapping has happened, it is the original value
+        of `path` unchanged.
+
+        """
+        for regex, result, pattern_sep, result_sep in self.aliases:
+            m = regex.match(path)
+            if m:
+                new = path.replace(m.group(0), result)
+                if pattern_sep != result_sep:
+                    new = new.replace(pattern_sep, result_sep)
+                new = canonical_filename(new)
+                return new
+        return path
+
+
+def find_python_files(dirname):
+    """Yield all of the importable Python files in `dirname`, recursively.
+
+    To be importable, the files have to be in a directory with a __init__.py,
+    except for `dirname` itself, which isn't required to have one.  The
+    assumption is that `dirname` was specified directly, so the user knows
+    best, but sub-directories are checked for a __init__.py to be sure we only
+    find the importable files.
+
+    """
+    for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
+        if i > 0 and '__init__.py' not in filenames:
+            # If a directory doesn't have __init__.py, then it isn't
+            # importable and neither are its files
+            del dirnames[:]
+            continue
+        for filename in filenames:
+            # We're only interested in files that look like reasonable Python
+            # files: Must end with .py or .pyw, and must not have certain funny
+            # characters that probably mean they are editor junk.
+            if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
+                yield os.path.join(dirpath, filename)
diff --git a/catapult/third_party/coverage/coverage/fullcoverage/encodings.py b/catapult/third_party/coverage/coverage/fullcoverage/encodings.py
new file mode 100644
index 0000000..699f386
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/fullcoverage/encodings.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Imposter encodings module that installs a coverage-style tracer.
+
+This is NOT the encodings module; it is an imposter that sets up tracing
+instrumentation and then replaces itself with the real encodings module.
+
+If the directory that holds this file is placed first in the PYTHONPATH when
+using "coverage" to run Python's tests, then this file will become the very
+first module imported by the internals of Python 3.  It installs a
+coverage.py-compatible trace function that can watch Standard Library modules
+execute from the very earliest stages of Python's own boot process.  This fixes
+a problem with coverage.py - that it starts too late to trace the coverage of
+many of the most fundamental modules in the Standard Library.
+
+"""
+
+import sys
+
+class FullCoverageTracer(object):
+    def __init__(self):
+        # `traces` is a list of trace events.  Frames are tricky: the same
+        # frame object is used for a whole scope, with new line numbers
+        # written into it.  So in one scope, all the frame objects are the
+        # same object, and will eventually all will point to the last line
+        # executed.  So we keep the line numbers alongside the frames.
+        # The list looks like:
+        #
+        #   traces = [
+        #       ((frame, event, arg), lineno), ...
+        #       ]
+        #
+        self.traces = []
+
+    def fullcoverage_trace(self, *args):
+        frame, event, arg = args
+        self.traces.append((args, frame.f_lineno))
+        return self.fullcoverage_trace
+
+sys.settrace(FullCoverageTracer().fullcoverage_trace)
+
+# In coverage/files.py is actual_filename(), which uses glob.glob.  I don't
+# understand why, but that use of glob borks everything if fullcoverage is in
+# effect.  So here we make an ugly hail-mary pass to switch off glob.glob over
+# there.  This means when using fullcoverage, Windows path names will not be
+# their actual case.
+
+#sys.fullcoverage = True
+
+# Finally, remove our own directory from sys.path; remove ourselves from
+# sys.modules; and re-import "encodings", which will be the real package
+# this time.  Note that the delete from sys.modules dictionary has to
+# happen last, since all of the symbols in this module will become None
+# at that exact moment, including "sys".
+
+parentdir = max(filter(__file__.startswith, sys.path), key=len)
+sys.path.remove(parentdir)
+del sys.modules['encodings']
+import encodings
diff --git a/catapult/third_party/coverage/coverage/html.py b/catapult/third_party/coverage/coverage/html.py
new file mode 100644
index 0000000..8dca632
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/html.py
@@ -0,0 +1,448 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""HTML reporting for coverage.py."""
+
+import datetime
+import json
+import os
+import re
+import shutil
+
+import coverage
+from coverage import env
+from coverage.backward import iitems
+from coverage.files import flat_rootname
+from coverage.misc import CoverageException, Hasher, isolate_module
+from coverage.report import Reporter
+from coverage.results import Numbers
+from coverage.templite import Templite
+
+os = isolate_module(os)
+
+
+# Static files are looked for in a list of places.
+STATIC_PATH = [
+    # The place Debian puts system Javascript libraries.
+    "/usr/share/javascript",
+
+    # Our htmlfiles directory.
+    os.path.join(os.path.dirname(__file__), "htmlfiles"),
+]
+
+
+def data_filename(fname, pkgdir=""):
+    """Return the path to a data file of ours.
+
+    The file is searched for on `STATIC_PATH`, and the first place it's found,
+    is returned.
+
+    Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
+    is provided, at that sub-directory.
+
+    """
+    tried = []
+    for static_dir in STATIC_PATH:
+        static_filename = os.path.join(static_dir, fname)
+        if os.path.exists(static_filename):
+            return static_filename
+        else:
+            tried.append(static_filename)
+        if pkgdir:
+            static_filename = os.path.join(static_dir, pkgdir, fname)
+            if os.path.exists(static_filename):
+                return static_filename
+            else:
+                tried.append(static_filename)
+    raise CoverageException(
+        "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
+    )
+
+
+def data(fname):
+    """Return the contents of a data file of ours."""
+    with open(data_filename(fname)) as data_file:
+        return data_file.read()
+
+
+class HtmlReporter(Reporter):
+    """HTML reporting."""
+
+    # These files will be copied from the htmlfiles directory to the output
+    # directory.
+    STATIC_FILES = [
+        ("style.css", ""),
+        ("jquery.min.js", "jquery"),
+        ("jquery.debounce.min.js", "jquery-debounce"),
+        ("jquery.hotkeys.js", "jquery-hotkeys"),
+        ("jquery.isonscreen.js", "jquery-isonscreen"),
+        ("jquery.tablesorter.min.js", "jquery-tablesorter"),
+        ("coverage_html.js", ""),
+        ("keybd_closed.png", ""),
+        ("keybd_open.png", ""),
+    ]
+
+    def __init__(self, cov, config):
+        super(HtmlReporter, self).__init__(cov, config)
+        self.directory = None
+        title = self.config.html_title
+        if env.PY2:
+            title = title.decode("utf8")
+        self.template_globals = {
+            'escape': escape,
+            'pair': pair,
+            'title': title,
+            '__url__': coverage.__url__,
+            '__version__': coverage.__version__,
+        }
+        self.source_tmpl = Templite(
+            data("pyfile.html"), self.template_globals
+        )
+
+        self.coverage = cov
+
+        self.files = []
+        self.has_arcs = self.coverage.data.has_arcs()
+        self.status = HtmlStatus()
+        self.extra_css = None
+        self.totals = Numbers()
+        self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
+
+    def report(self, morfs):
+        """Generate an HTML report for `morfs`.
+
+        `morfs` is a list of modules or file names.
+
+        """
+        assert self.config.html_dir, "must give a directory for html reporting"
+
+        # Read the status data.
+        self.status.read(self.config.html_dir)
+
+        # Check that this run used the same settings as the last run.
+        m = Hasher()
+        m.update(self.config)
+        these_settings = m.hexdigest()
+        if self.status.settings_hash() != these_settings:
+            self.status.reset()
+            self.status.set_settings_hash(these_settings)
+
+        # The user may have extra CSS they want copied.
+        if self.config.extra_css:
+            self.extra_css = os.path.basename(self.config.extra_css)
+
+        # Process all the files.
+        self.report_files(self.html_file, morfs, self.config.html_dir)
+
+        if not self.files:
+            raise CoverageException("No data to report.")
+
+        # Write the index file.
+        self.index_file()
+
+        self.make_local_static_report_files()
+        return self.totals.n_statements and self.totals.pc_covered
+
+    def make_local_static_report_files(self):
+        """Make local instances of static files for HTML report."""
+        # The files we provide must always be copied.
+        for static, pkgdir in self.STATIC_FILES:
+            shutil.copyfile(
+                data_filename(static, pkgdir),
+                os.path.join(self.directory, static)
+            )
+
+        # The user may have extra CSS they want copied.
+        if self.extra_css:
+            shutil.copyfile(
+                self.config.extra_css,
+                os.path.join(self.directory, self.extra_css)
+            )
+
+    def write_html(self, fname, html):
+        """Write `html` to `fname`, properly encoded."""
+        with open(fname, "wb") as fout:
+            fout.write(html.encode('ascii', 'xmlcharrefreplace'))
+
+    def file_hash(self, source, fr):
+        """Compute a hash that changes if the file needs to be re-reported."""
+        m = Hasher()
+        m.update(source)
+        self.coverage.data.add_to_hash(fr.filename, m)
+        return m.hexdigest()
+
+    def html_file(self, fr, analysis):
+        """Generate an HTML file for one source file."""
+        source = fr.source()
+
+        # Find out if the file on disk is already correct.
+        rootname = flat_rootname(fr.relative_filename())
+        this_hash = self.file_hash(source.encode('utf-8'), fr)
+        that_hash = self.status.file_hash(rootname)
+        if this_hash == that_hash:
+            # Nothing has changed to require the file to be reported again.
+            self.files.append(self.status.index_info(rootname))
+            return
+
+        self.status.set_file_hash(rootname, this_hash)
+
+        # Get the numbers for this file.
+        nums = analysis.numbers
+
+        if self.has_arcs:
+            missing_branch_arcs = analysis.missing_branch_arcs()
+
+        # These classes determine which lines are highlighted by default.
+        c_run = "run hide_run"
+        c_exc = "exc"
+        c_mis = "mis"
+        c_par = "par " + c_run
+
+        lines = []
+
+        for lineno, line in enumerate(fr.source_token_lines(), start=1):
+            # Figure out how to mark this line.
+            line_class = []
+            annotate_html = ""
+            annotate_title = ""
+            if lineno in analysis.statements:
+                line_class.append("stm")
+            if lineno in analysis.excluded:
+                line_class.append(c_exc)
+            elif lineno in analysis.missing:
+                line_class.append(c_mis)
+            elif self.has_arcs and lineno in missing_branch_arcs:
+                line_class.append(c_par)
+                shorts = []
+                longs = []
+                for b in missing_branch_arcs[lineno]:
+                    if b < 0:
+                        shorts.append("exit")
+                        longs.append("the function exit")
+                    else:
+                        shorts.append(b)
+                        longs.append("line %d" % b)
+                # 202F is NARROW NO-BREAK SPACE.
+                # 219B is RIGHTWARDS ARROW WITH STROKE.
+                short_fmt = "%s&#x202F;&#x219B;&#x202F;%s"
+                annotate_html = ",&nbsp;&nbsp; ".join(short_fmt % (lineno, d) for d in shorts)
+                annotate_html += " [?]"
+
+                annotate_title = "Line %d was executed, but never jumped to " % lineno
+                if len(longs) == 1:
+                    annotate_title += longs[0]
+                elif len(longs) == 2:
+                    annotate_title += longs[0] + " or " + longs[1]
+                else:
+                    annotate_title += ", ".join(longs[:-1]) + ", or " + longs[-1]
+            elif lineno in analysis.statements:
+                line_class.append(c_run)
+
+            # Build the HTML for the line.
+            html = []
+            for tok_type, tok_text in line:
+                if tok_type == "ws":
+                    html.append(escape(tok_text))
+                else:
+                    tok_html = escape(tok_text) or '&nbsp;'
+                    html.append(
+                        '<span class="%s">%s</span>' % (tok_type, tok_html)
+                    )
+
+            lines.append({
+                'html': ''.join(html),
+                'number': lineno,
+                'class': ' '.join(line_class) or "pln",
+                'annotate': annotate_html,
+                'annotate_title': annotate_title,
+            })
+
+        # Write the HTML page for this file.
+        template_values = {
+            'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
+            'has_arcs': self.has_arcs, 'extra_css': self.extra_css,
+            'fr': fr, 'nums': nums, 'lines': lines,
+            'time_stamp': self.time_stamp,
+        }
+        html = spaceless(self.source_tmpl.render(template_values))
+
+        html_filename = rootname + ".html"
+        html_path = os.path.join(self.directory, html_filename)
+        self.write_html(html_path, html)
+
+        # Save this file's information for the index file.
+        index_info = {
+            'nums': nums,
+            'html_filename': html_filename,
+            'relative_filename': fr.relative_filename(),
+        }
+        self.files.append(index_info)
+        self.status.set_index_info(rootname, index_info)
+
+    def index_file(self):
+        """Write the index.html file for this report."""
+        index_tmpl = Templite(data("index.html"), self.template_globals)
+
+        self.totals = sum(f['nums'] for f in self.files)
+
+        html = index_tmpl.render({
+            'has_arcs': self.has_arcs,
+            'extra_css': self.extra_css,
+            'files': self.files,
+            'totals': self.totals,
+            'time_stamp': self.time_stamp,
+        })
+
+        self.write_html(os.path.join(self.directory, "index.html"), html)
+
+        # Write the latest hashes for next time.
+        self.status.write(self.directory)
+
+
+class HtmlStatus(object):
+    """The status information we keep to support incremental reporting."""
+
+    STATUS_FILE = "status.json"
+    STATUS_FORMAT = 1
+
+    #           pylint: disable=wrong-spelling-in-comment,useless-suppression
+    #  The data looks like:
+    #
+    #  {
+    #      'format': 1,
+    #      'settings': '540ee119c15d52a68a53fe6f0897346d',
+    #      'version': '4.0a1',
+    #      'files': {
+    #          'cogapp___init__': {
+    #              'hash': 'e45581a5b48f879f301c0f30bf77a50c',
+    #              'index': {
+    #                  'html_filename': 'cogapp___init__.html',
+    #                  'name': 'cogapp/__init__',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
+    #              }
+    #          },
+    #          ...
+    #          'cogapp_whiteutils': {
+    #              'hash': '8504bb427fc488c4176809ded0277d51',
+    #              'index': {
+    #                  'html_filename': 'cogapp_whiteutils.html',
+    #                  'name': 'cogapp/whiteutils',
+    #                  'nums': <coverage.results.Numbers object at 0x10ab7d90>,
+    #              }
+    #          },
+    #      },
+    #  }
+
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        """Initialize to empty."""
+        self.settings = ''
+        self.files = {}
+
+    def read(self, directory):
+        """Read the last status in `directory`."""
+        usable = False
+        try:
+            status_file = os.path.join(directory, self.STATUS_FILE)
+            with open(status_file, "r") as fstatus:
+                status = json.load(fstatus)
+        except (IOError, ValueError):
+            usable = False
+        else:
+            usable = True
+            if status['format'] != self.STATUS_FORMAT:
+                usable = False
+            elif status['version'] != coverage.__version__:
+                usable = False
+
+        if usable:
+            self.files = {}
+            for filename, fileinfo in iitems(status['files']):
+                fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
+                self.files[filename] = fileinfo
+            self.settings = status['settings']
+        else:
+            self.reset()
+
+    def write(self, directory):
+        """Write the current status to `directory`."""
+        status_file = os.path.join(directory, self.STATUS_FILE)
+        files = {}
+        for filename, fileinfo in iitems(self.files):
+            fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
+            files[filename] = fileinfo
+
+        status = {
+            'format': self.STATUS_FORMAT,
+            'version': coverage.__version__,
+            'settings': self.settings,
+            'files': files,
+        }
+        with open(status_file, "w") as fout:
+            json.dump(status, fout)
+
+        # Older versions of ShiningPanda look for the old name, status.dat.
+        # Accomodate them if we are running under Jenkins.
+        # https://issues.jenkins-ci.org/browse/JENKINS-28428
+        if "JENKINS_URL" in os.environ:
+            with open(os.path.join(directory, "status.dat"), "w") as dat:
+                dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
+
+    def settings_hash(self):
+        """Get the hash of the coverage.py settings."""
+        return self.settings
+
+    def set_settings_hash(self, settings):
+        """Set the hash of the coverage.py settings."""
+        self.settings = settings
+
+    def file_hash(self, fname):
+        """Get the hash of `fname`'s contents."""
+        return self.files.get(fname, {}).get('hash', '')
+
+    def set_file_hash(self, fname, val):
+        """Set the hash of `fname`'s contents."""
+        self.files.setdefault(fname, {})['hash'] = val
+
+    def index_info(self, fname):
+        """Get the information for index.html for `fname`."""
+        return self.files.get(fname, {}).get('index', {})
+
+    def set_index_info(self, fname, info):
+        """Set the information for index.html for `fname`."""
+        self.files.setdefault(fname, {})['index'] = info
+
+
+# Helpers for templates and generating HTML
+
+def escape(t):
+    """HTML-escape the text in `t`."""
+    return (
+        t
+        # Convert HTML special chars into HTML entities.
+        .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
+        .replace("'", "&#39;").replace('"', "&quot;")
+        # Convert runs of spaces: "......" -> "&nbsp;.&nbsp;.&nbsp;."
+        .replace("  ", "&nbsp; ")
+        # To deal with odd-length runs, convert the final pair of spaces
+        # so that "....." -> "&nbsp;.&nbsp;&nbsp;."
+        .replace("  ", "&nbsp; ")
+    )
+
+
+def spaceless(html):
+    """Squeeze out some annoying extra space from an HTML string.
+
+    Nicely-formatted templates mean lots of extra space in the result.
+    Get rid of some.
+
+    """
+    html = re.sub(r">\s+<p ", ">\n<p ", html)
+    return html
+
+
+def pair(ratio):
+    """Format a pair of numbers so JavaScript can read them in an attribute."""
+    return "%s %s" % ratio
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/coverage_html.js b/catapult/third_party/coverage/coverage/htmlfiles/coverage_html.js
new file mode 100644
index 0000000..bd6a875
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/coverage_html.js
@@ -0,0 +1,512 @@
+// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+// For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+// Coverage.py HTML report browser code.
+/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
+/*global coverage: true, document, window, $ */
+
+coverage = {};
+
+// Find all the elements with shortkey_* class, and use them to assign a shortcut key.
+coverage.assign_shortkeys = function () {
+    $("*[class*='shortkey_']").each(function (i, e) {
+        $.each($(e).attr("class").split(" "), function (i, c) {
+            if (/^shortkey_/.test(c)) {
+                $(document).bind('keydown', c.substr(9), function () {
+                    $(e).click();
+                });
+            }
+        });
+    });
+};
+
+// Create the events for the help panel.
+coverage.wire_up_help_panel = function () {
+    $("#keyboard_icon").click(function () {
+        // Show the help panel, and position it so the keyboard icon in the
+        // panel is in the same place as the keyboard icon in the header.
+        $(".help_panel").show();
+        var koff = $("#keyboard_icon").offset();
+        var poff = $("#panel_icon").position();
+        $(".help_panel").offset({
+            top: koff.top-poff.top,
+            left: koff.left-poff.left
+        });
+    });
+    $("#panel_icon").click(function () {
+        $(".help_panel").hide();
+    });
+};
+
+// Create the events for the filter box.
+coverage.wire_up_filter = function () {
+    // Cache elements.
+    var table = $("table.index");
+    var table_rows = table.find("tbody tr");
+    var table_row_names = table_rows.find("td.name a");
+    var no_rows = $("#no_rows");
+
+    // Create a duplicate table footer that we can modify with dynamic summed values.
+    var table_footer = $("table.index tfoot tr");
+    var table_dynamic_footer = table_footer.clone();
+    table_dynamic_footer.attr('class', 'total_dynamic hidden');
+    table_footer.after(table_dynamic_footer);
+
+    // Observe filter keyevents.
+    $("#filter").on("keyup change", $.debounce(150, function (event) {
+        var filter_value = $(this).val();
+
+        if (filter_value === "") {
+            // Filter box is empty, remove all filtering.
+            table_rows.removeClass("hidden");
+
+            // Show standard footer, hide dynamic footer.
+            table_footer.removeClass("hidden");
+            table_dynamic_footer.addClass("hidden");
+
+            // Hide placeholder, show table.
+            if (no_rows.length > 0) {
+                no_rows.hide();
+            }
+            table.show();
+
+        }
+        else {
+            // Filter table items by value.
+            var hide = $([]);
+            var show = $([]);
+
+            // Compile elements to hide / show.
+            $.each(table_row_names, function () {
+                var element = $(this).parents("tr");
+
+                if ($(this).text().indexOf(filter_value) === -1) {
+                    // hide
+                    hide = hide.add(element);
+                }
+                else {
+                    // show
+                    show = show.add(element);
+                }
+            });
+
+            // Perform DOM manipulation.
+            hide.addClass("hidden");
+            show.removeClass("hidden");
+
+            // Show placeholder if no rows will be displayed.
+            if (no_rows.length > 0) {
+                if (show.length === 0) {
+                    // Show placeholder, hide table.
+                    no_rows.show();
+                    table.hide();
+                }
+                else {
+                    // Hide placeholder, show table.
+                    no_rows.hide();
+                    table.show();
+                }
+            }
+
+            // Manage dynamic header:
+            if (hide.length > 0) {
+                // Calculate new dynamic sum values based on visible rows.
+                for (var column = 2; column < 20; column++) {
+                    // Calculate summed value.
+                    var cells = table_rows.find('td:nth-child(' + column + ')');
+                    if (!cells.length) {
+                        // No more columns...!
+                        break;
+                    }
+
+                    var sum = 0, numer = 0, denom = 0;
+                    $.each(cells.filter(':visible'), function () {
+                        var ratio = $(this).data("ratio");
+                        if (ratio) {
+                            var splitted = ratio.split(" ");
+                            numer += parseInt(splitted[0], 10);
+                            denom += parseInt(splitted[1], 10);
+                        }
+                        else {
+                            sum += parseInt(this.innerHTML, 10);
+                        }
+                    });
+
+                    // Get footer cell element.
+                    var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')');
+
+                    // Set value into dynamic footer cell element.
+                    if (cells[0].innerHTML.indexOf('%') > -1) {
+                        // Percentage columns use the numerator and denominator,
+                        // and adapt to the number of decimal places.
+                        var match = /\.([0-9]+)/.exec(cells[0].innerHTML);
+                        var places = 0;
+                        if (match) {
+                            places = match[1].length;
+                        }
+                        var pct = numer * 100 / denom;
+                        footer_cell.text(pct.toFixed(places) + '%');
+                    }
+                    else {
+                        footer_cell.text(sum);
+                    }
+                }
+
+                // Hide standard footer, show dynamic footer.
+                table_footer.addClass("hidden");
+                table_dynamic_footer.removeClass("hidden");
+            }
+            else {
+                // Show standard footer, hide dynamic footer.
+                table_footer.removeClass("hidden");
+                table_dynamic_footer.addClass("hidden");
+            }
+        }
+    }));
+
+    // Trigger change event on setup, to force filter on page refresh
+    // (filter value may still be present).
+    $("#filter").trigger("change");
+};
+
+// Loaded on index.html
+coverage.index_ready = function ($) {
+    // Look for a cookie containing previous sort settings:
+    var sort_list = [];
+    var cookie_name = "COVERAGE_INDEX_SORT";
+    var i;
+
+    // This almost makes it worth installing the jQuery cookie plugin:
+    if (document.cookie.indexOf(cookie_name) > -1) {
+        var cookies = document.cookie.split(";");
+        for (i = 0; i < cookies.length; i++) {
+            var parts = cookies[i].split("=");
+
+            if ($.trim(parts[0]) === cookie_name && parts[1]) {
+                sort_list = eval("[[" + parts[1] + "]]");
+                break;
+            }
+        }
+    }
+
+    // Create a new widget which exists only to save and restore
+    // the sort order:
+    $.tablesorter.addWidget({
+        id: "persistentSort",
+
+        // Format is called by the widget before displaying:
+        format: function (table) {
+            if (table.config.sortList.length === 0 && sort_list.length > 0) {
+                // This table hasn't been sorted before - we'll use
+                // our stored settings:
+                $(table).trigger('sorton', [sort_list]);
+            }
+            else {
+                // This is not the first load - something has
+                // already defined sorting so we'll just update
+                // our stored value to match:
+                sort_list = table.config.sortList;
+            }
+        }
+    });
+
+    // Configure our tablesorter to handle the variable number of
+    // columns produced depending on report options:
+    var headers = [];
+    var col_count = $("table.index > thead > tr > th").length;
+
+    headers[0] = { sorter: 'text' };
+    for (i = 1; i < col_count-1; i++) {
+        headers[i] = { sorter: 'digit' };
+    }
+    headers[col_count-1] = { sorter: 'percent' };
+
+    // Enable the table sorter:
+    $("table.index").tablesorter({
+        widgets: ['persistentSort'],
+        headers: headers
+    });
+
+    coverage.assign_shortkeys();
+    coverage.wire_up_help_panel();
+    coverage.wire_up_filter();
+
+    // Watch for page unload events so we can save the final sort settings:
+    $(window).unload(function () {
+        document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/";
+    });
+};
+
+// -- pyfile stuff --
+
+coverage.pyfile_ready = function ($) {
+    // If we're directed to a particular line number, highlight the line.
+    var frag = location.hash;
+    if (frag.length > 2 && frag[1] === 'n') {
+        $(frag).addClass('highlight');
+        coverage.set_sel(parseInt(frag.substr(2), 10));
+    }
+    else {
+        coverage.set_sel(0);
+    }
+
+    $(document)
+        .bind('keydown', 'j', coverage.to_next_chunk_nicely)
+        .bind('keydown', 'k', coverage.to_prev_chunk_nicely)
+        .bind('keydown', '0', coverage.to_top)
+        .bind('keydown', '1', coverage.to_first_chunk)
+        ;
+
+    $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");});
+    $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");});
+    $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");});
+    $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");});
+
+    coverage.assign_shortkeys();
+    coverage.wire_up_help_panel();
+};
+
+coverage.toggle_lines = function (btn, cls) {
+    btn = $(btn);
+    var hide = "hide_"+cls;
+    if (btn.hasClass(hide)) {
+        $("#source ."+cls).removeClass(hide);
+        btn.removeClass(hide);
+    }
+    else {
+        $("#source ."+cls).addClass(hide);
+        btn.addClass(hide);
+    }
+};
+
+// Return the nth line div.
+coverage.line_elt = function (n) {
+    return $("#t" + n);
+};
+
+// Return the nth line number div.
+coverage.num_elt = function (n) {
+    return $("#n" + n);
+};
+
+// Return the container of all the code.
+coverage.code_container = function () {
+    return $(".linenos");
+};
+
+// Set the selection.  b and e are line numbers.
+coverage.set_sel = function (b, e) {
+    // The first line selected.
+    coverage.sel_begin = b;
+    // The next line not selected.
+    coverage.sel_end = (e === undefined) ? b+1 : e;
+};
+
+coverage.to_top = function () {
+    coverage.set_sel(0, 1);
+    coverage.scroll_window(0);
+};
+
+coverage.to_first_chunk = function () {
+    coverage.set_sel(0, 1);
+    coverage.to_next_chunk();
+};
+
+coverage.is_transparent = function (color) {
+    // Different browsers return different colors for "none".
+    return color === "transparent" || color === "rgba(0, 0, 0, 0)";
+};
+
+coverage.to_next_chunk = function () {
+    var c = coverage;
+
+    // Find the start of the next colored chunk.
+    var probe = c.sel_end;
+    var color, probe_line;
+    while (true) {
+        probe_line = c.line_elt(probe);
+        if (probe_line.length === 0) {
+            return;
+        }
+        color = probe_line.css("background-color");
+        if (!c.is_transparent(color)) {
+            break;
+        }
+        probe++;
+    }
+
+    // There's a next chunk, `probe` points to it.
+    var begin = probe;
+
+    // Find the end of this chunk.
+    var next_color = color;
+    while (next_color === color) {
+        probe++;
+        probe_line = c.line_elt(probe);
+        next_color = probe_line.css("background-color");
+    }
+    c.set_sel(begin, probe);
+    c.show_selection();
+};
+
+coverage.to_prev_chunk = function () {
+    var c = coverage;
+
+    // Find the end of the prev colored chunk.
+    var probe = c.sel_begin-1;
+    var probe_line = c.line_elt(probe);
+    if (probe_line.length === 0) {
+        return;
+    }
+    var color = probe_line.css("background-color");
+    while (probe > 0 && c.is_transparent(color)) {
+        probe--;
+        probe_line = c.line_elt(probe);
+        if (probe_line.length === 0) {
+            return;
+        }
+        color = probe_line.css("background-color");
+    }
+
+    // There's a prev chunk, `probe` points to its last line.
+    var end = probe+1;
+
+    // Find the beginning of this chunk.
+    var prev_color = color;
+    while (prev_color === color) {
+        probe--;
+        probe_line = c.line_elt(probe);
+        prev_color = probe_line.css("background-color");
+    }
+    c.set_sel(probe+1, end);
+    c.show_selection();
+};
+
+// Return the line number of the line nearest pixel position pos
+coverage.line_at_pos = function (pos) {
+    var l1 = coverage.line_elt(1),
+        l2 = coverage.line_elt(2),
+        result;
+    if (l1.length && l2.length) {
+        var l1_top = l1.offset().top,
+            line_height = l2.offset().top - l1_top,
+            nlines = (pos - l1_top) / line_height;
+        if (nlines < 1) {
+            result = 1;
+        }
+        else {
+            result = Math.ceil(nlines);
+        }
+    }
+    else {
+        result = 1;
+    }
+    return result;
+};
+
+// Returns 0, 1, or 2: how many of the two ends of the selection are on
+// the screen right now?
+coverage.selection_ends_on_screen = function () {
+    if (coverage.sel_begin === 0) {
+        return 0;
+    }
+
+    var top = coverage.line_elt(coverage.sel_begin);
+    var next = coverage.line_elt(coverage.sel_end-1);
+
+    return (
+        (top.isOnScreen() ? 1 : 0) +
+        (next.isOnScreen() ? 1 : 0)
+    );
+};
+
+coverage.to_next_chunk_nicely = function () {
+    coverage.finish_scrolling();
+    if (coverage.selection_ends_on_screen() === 0) {
+        // The selection is entirely off the screen: select the top line on
+        // the screen.
+        var win = $(window);
+        coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop()));
+    }
+    coverage.to_next_chunk();
+};
+
+coverage.to_prev_chunk_nicely = function () {
+    coverage.finish_scrolling();
+    if (coverage.selection_ends_on_screen() === 0) {
+        var win = $(window);
+        coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height()));
+    }
+    coverage.to_prev_chunk();
+};
+
+// Select line number lineno, or if it is in a colored chunk, select the
+// entire chunk
+coverage.select_line_or_chunk = function (lineno) {
+    var c = coverage;
+    var probe_line = c.line_elt(lineno);
+    if (probe_line.length === 0) {
+        return;
+    }
+    var the_color = probe_line.css("background-color");
+    if (!c.is_transparent(the_color)) {
+        // The line is in a highlighted chunk.
+        // Search backward for the first line.
+        var probe = lineno;
+        var color = the_color;
+        while (probe > 0 && color === the_color) {
+            probe--;
+            probe_line = c.line_elt(probe);
+            if (probe_line.length === 0) {
+                break;
+            }
+            color = probe_line.css("background-color");
+        }
+        var begin = probe + 1;
+
+        // Search forward for the last line.
+        probe = lineno;
+        color = the_color;
+        while (color === the_color) {
+            probe++;
+            probe_line = c.line_elt(probe);
+            color = probe_line.css("background-color");
+        }
+
+        coverage.set_sel(begin, probe);
+    }
+    else {
+        coverage.set_sel(lineno);
+    }
+};
+
+coverage.show_selection = function () {
+    var c = coverage;
+
+    // Highlight the lines in the chunk
+    c.code_container().find(".highlight").removeClass("highlight");
+    for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) {
+        c.num_elt(probe).addClass("highlight");
+    }
+
+    c.scroll_to_selection();
+};
+
+coverage.scroll_to_selection = function () {
+    // Scroll the page if the chunk isn't fully visible.
+    if (coverage.selection_ends_on_screen() < 2) {
+        // Need to move the page. The html,body trick makes it scroll in all
+        // browsers, got it from http://stackoverflow.com/questions/3042651
+        var top = coverage.line_elt(coverage.sel_begin);
+        var top_pos = parseInt(top.offset().top, 10);
+        coverage.scroll_window(top_pos - 30);
+    }
+};
+
+coverage.scroll_window = function (to_pos) {
+    $("html,body").animate({scrollTop: to_pos}, 200);
+};
+
+coverage.finish_scrolling = function () {
+    $("html,body").stop(true, true);
+};
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/index.html b/catapult/third_party/coverage/coverage/htmlfiles/index.html
new file mode 100644
index 0000000..ee2deab
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/index.html
@@ -0,0 +1,118 @@
+{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
+{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #}
+
+<!DOCTYPE html>
+<html>
+<head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+    <title>{{ title|escape }}</title>
+    <link rel="stylesheet" href="style.css" type="text/css">
+    {% if extra_css %}
+        <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
+    {% endif %}
+    <script type="text/javascript" src="jquery.min.js"></script>
+    <script type="text/javascript" src="jquery.debounce.min.js"></script>
+    <script type="text/javascript" src="jquery.tablesorter.min.js"></script>
+    <script type="text/javascript" src="jquery.hotkeys.js"></script>
+    <script type="text/javascript" src="coverage_html.js"></script>
+    <script type="text/javascript">
+        jQuery(document).ready(coverage.index_ready);
+    </script>
+</head>
+<body class="indexfile">
+
+<div id="header">
+    <div class="content">
+        <h1>{{ title|escape }}:
+            <span class="pc_cov">{{totals.pc_covered_str}}%</span>
+        </h1>
+
+        <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
+
+        <form id="filter_container">
+            <input id="filter" type="text" value="" placeholder="filter..." />
+        </form>
+    </div>
+</div>
+
+<div class="help_panel">
+    <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
+    <p class="legend">Hot-keys on this page</p>
+    <div>
+    <p class="keyhelp">
+        <span class="key">n</span>
+        <span class="key">s</span>
+        <span class="key">m</span>
+        <span class="key">x</span>
+        {% if has_arcs %}
+        <span class="key">b</span>
+        <span class="key">p</span>
+        {% endif %}
+        <span class="key">c</span> &nbsp; change column sorting
+    </p>
+    </div>
+</div>
+
+<div id="index">
+    <table class="index">
+        <thead>
+            {# The title="" attr doesn"t work in Safari. #}
+            <tr class="tablehead" title="Click to sort">
+                <th class="name left headerSortDown shortkey_n">Module</th>
+                <th class="shortkey_s">statements</th>
+                <th class="shortkey_m">missing</th>
+                <th class="shortkey_x">excluded</th>
+                {% if has_arcs %}
+                <th class="shortkey_b">branches</th>
+                <th class="shortkey_p">partial</th>
+                {% endif %}
+                <th class="right shortkey_c">coverage</th>
+            </tr>
+        </thead>
+        {# HTML syntax requires thead, tfoot, tbody #}
+        <tfoot>
+            <tr class="total">
+                <td class="name left">Total</td>
+                <td>{{totals.n_statements}}</td>
+                <td>{{totals.n_missing}}</td>
+                <td>{{totals.n_excluded}}</td>
+                {% if has_arcs %}
+                <td>{{totals.n_branches}}</td>
+                <td>{{totals.n_partial_branches}}</td>
+                {% endif %}
+                <td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
+            </tr>
+        </tfoot>
+        <tbody>
+            {% for file in files %}
+            <tr class="file">
+                <td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
+                <td>{{file.nums.n_statements}}</td>
+                <td>{{file.nums.n_missing}}</td>
+                <td>{{file.nums.n_excluded}}</td>
+                {% if has_arcs %}
+                <td>{{file.nums.n_branches}}</td>
+                <td>{{file.nums.n_partial_branches}}</td>
+                {% endif %}
+                <td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
+            </tr>
+            {% endfor %}
+        </tbody>
+    </table>
+
+    <p id="no_rows">
+        No items found using the specified filter.
+    </p>
+</div>
+
+<div id="footer">
+    <div class="content">
+        <p>
+            <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
+            created at {{ time_stamp }}
+        </p>
+    </div>
+</div>
+
+</body>
+</html>
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/jquery.debounce.min.js b/catapult/third_party/coverage/coverage/htmlfiles/jquery.debounce.min.js
new file mode 100644
index 0000000..648fe5d
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/jquery.debounce.min.js
@@ -0,0 +1,9 @@
+/*
+ * jQuery throttle / debounce - v1.1 - 3/7/2010
+ * http://benalman.com/projects/jquery-throttle-debounce-plugin/
+ *
+ * Copyright (c) 2010 "Cowboy" Ben Alman
+ * Dual licensed under the MIT and GPL licenses.
+ * http://benalman.com/about/license/
+ */
+(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this);
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/jquery.hotkeys.js b/catapult/third_party/coverage/coverage/htmlfiles/jquery.hotkeys.js
new file mode 100644
index 0000000..09b21e0
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/jquery.hotkeys.js
@@ -0,0 +1,99 @@
+/*
+ * jQuery Hotkeys Plugin
+ * Copyright 2010, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ *
+ * Based upon the plugin by Tzury Bar Yochay:
+ * http://github.com/tzuryby/hotkeys
+ *
+ * Original idea by:
+ * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/
+*/
+
+(function(jQuery){
+
+	jQuery.hotkeys = {
+		version: "0.8",
+
+		specialKeys: {
+			8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause",
+			20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home",
+			37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del",
+			96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7",
+			104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/",
+			112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8",
+			120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta"
+		},
+
+		shiftNums: {
+			"`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&",
+			"8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<",
+			".": ">",  "/": "?",  "\\": "|"
+		}
+	};
+
+	function keyHandler( handleObj ) {
+		// Only care when a possible input has been specified
+		if ( typeof handleObj.data !== "string" ) {
+			return;
+		}
+
+		var origHandler = handleObj.handler,
+			keys = handleObj.data.toLowerCase().split(" ");
+
+		handleObj.handler = function( event ) {
+			// Don't fire in text-accepting inputs that we didn't directly bind to
+			if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) ||
+				 event.target.type === "text") ) {
+				return;
+			}
+
+			// Keypress represents characters, not special keys
+			var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ],
+				character = String.fromCharCode( event.which ).toLowerCase(),
+				key, modif = "", possible = {};
+
+			// check combinations (alt|ctrl|shift+anything)
+			if ( event.altKey && special !== "alt" ) {
+				modif += "alt+";
+			}
+
+			if ( event.ctrlKey && special !== "ctrl" ) {
+				modif += "ctrl+";
+			}
+
+			// TODO: Need to make sure this works consistently across platforms
+			if ( event.metaKey && !event.ctrlKey && special !== "meta" ) {
+				modif += "meta+";
+			}
+
+			if ( event.shiftKey && special !== "shift" ) {
+				modif += "shift+";
+			}
+
+			if ( special ) {
+				possible[ modif + special ] = true;
+
+			} else {
+				possible[ modif + character ] = true;
+				possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true;
+
+				// "$" can be triggered as "Shift+4" or "Shift+$" or just "$"
+				if ( modif === "shift+" ) {
+					possible[ jQuery.hotkeys.shiftNums[ character ] ] = true;
+				}
+			}
+
+			for ( var i = 0, l = keys.length; i < l; i++ ) {
+				if ( possible[ keys[i] ] ) {
+					return origHandler.apply( this, arguments );
+				}
+			}
+		};
+	}
+
+	jQuery.each([ "keydown", "keyup", "keypress" ], function() {
+		jQuery.event.special[ this ] = { add: keyHandler };
+	});
+
+})( jQuery );
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/jquery.isonscreen.js b/catapult/third_party/coverage/coverage/htmlfiles/jquery.isonscreen.js
new file mode 100644
index 0000000..0182ebd
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/jquery.isonscreen.js
@@ -0,0 +1,53 @@
+/* Copyright (c) 2010
+ * @author Laurence Wheway
+ * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
+ * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
+ *
+ * @version 1.2.0
+ */
+(function($) {
+	jQuery.extend({
+		isOnScreen: function(box, container) {
+			//ensure numbers come in as intgers (not strings) and remove 'px' is it's there
+			for(var i in box){box[i] = parseFloat(box[i])};
+			for(var i in container){container[i] = parseFloat(container[i])};
+
+			if(!container){
+				container = {
+					left: $(window).scrollLeft(),
+					top: $(window).scrollTop(),
+					width: $(window).width(),
+					height: $(window).height()
+				}
+			}
+
+			if(	box.left+box.width-container.left > 0 &&
+				box.left < container.width+container.left &&
+				box.top+box.height-container.top > 0 &&
+				box.top < container.height+container.top
+			) return true;
+			return false;
+		}
+	})
+
+
+	jQuery.fn.isOnScreen = function (container) {
+		for(var i in container){container[i] = parseFloat(container[i])};
+
+		if(!container){
+			container = {
+				left: $(window).scrollLeft(),
+				top: $(window).scrollTop(),
+				width: $(window).width(),
+				height: $(window).height()
+			}
+		}
+
+		if(	$(this).offset().left+$(this).width()-container.left > 0 &&
+			$(this).offset().left < container.width+container.left &&
+			$(this).offset().top+$(this).height()-container.top > 0 &&
+			$(this).offset().top < container.height+container.top
+		) return true;
+		return false;
+	}
+})(jQuery);
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/jquery.min.js b/catapult/third_party/coverage/coverage/htmlfiles/jquery.min.js
new file mode 100644
index 0000000..d1608e3
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/jquery.min.js
@@ -0,0 +1,4 @@
+/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="<select msallowclip=''><option selected=''></option></select>",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;
+if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"submitBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fb=/ jQuery\d+="(?:null|\d+)"/g,gb=new RegExp("<(?:"+eb+")[\\s/>]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/<tbody/i,lb=/<|&#?\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\s*(?:[^=]|=\s*.checked.)/i,ob=/^$|\/(?:java|ecma)script/i,pb=/^true\/(.*)/,qb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,rb={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?"<table>"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.style.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\([^)]*\)/i,Nb=/opacity\s*=\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp("^("+S+")(.*)$","i"),Qb=new RegExp("^([+-])=("+S+")","i"),Rb={position:"absolute",visibility:"hidden",display:"block"},Sb={letterSpacing:"0",fontWeight:"400"},Tb=["Webkit","O","Moz","ms"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fb(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));return g}function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),"normal"===f&&b in Sb&&(f=Sb[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Mb,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+" "+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Jb,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")
+},cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cc=/queueHooks$/,dc=[ic],ec={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fb(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fb(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc("show"),slideUp:gc("hide"),slideToggle:gc("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lc=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(lc,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var uc=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(uc," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vc=m.now(),wc=/\?/,xc=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\/\//,Gc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hc={},Ic={},Jc="*/".concat("*");try{zc=location.href}catch(Kc){zc=y.createElement("a"),zc.href="",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:"GET",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+"").replace(Ac,"").replace(Fc,yc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yc[3]||("http:"===yc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,"$1_="+vc++):e+(wc.test(e)?"&":"?")+"_="+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("If-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jc+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\[\]$/,Sc=/\r?\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vc(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join("&").replace(Qc,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,"\r\n")}}):{name:b.name,value:c.replace(Sc,"\r\n")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on("unload",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&"withCredentials"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+"");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,"string"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=""}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}m.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),m.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=y.head||m("head")[0]||y.documentElement;return{send:function(d,e){b=y.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\?(?=&|$)|\?\?/;m.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=_c.pop()||m.expando+"_"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&ad.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,"$1"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||m.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),m.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if("string"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(f="POST"),g.length>0&&m.ajax({url:a,type:f,dataType:"html",data:b}).done(function(a){e=arguments,g.html(d?m("<div>").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m});
\ No newline at end of file
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/jquery.tablesorter.min.js b/catapult/third_party/coverage/coverage/htmlfiles/jquery.tablesorter.min.js
new file mode 100644
index 0000000..64c7007
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/jquery.tablesorter.min.js
@@ -0,0 +1,2 @@
+
+(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery);
\ No newline at end of file
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/keybd_closed.png b/catapult/third_party/coverage/coverage/htmlfiles/keybd_closed.png
new file mode 100644
index 0000000..f2b0418
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/keybd_closed.png
Binary files differ
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/keybd_open.png b/catapult/third_party/coverage/coverage/htmlfiles/keybd_open.png
new file mode 100644
index 0000000..a77961d
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/keybd_open.png
Binary files differ
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/pyfile.html b/catapult/third_party/coverage/coverage/htmlfiles/pyfile.html
new file mode 100644
index 0000000..ad7969d
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/pyfile.html
@@ -0,0 +1,97 @@
+{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
+{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #}
+
+<!DOCTYPE html>
+<html>
+<head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+    {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
+    {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
+    <meta http-equiv="X-UA-Compatible" content="IE=emulateIE7" />
+    <title>Coverage for {{fr.relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
+    <link rel="stylesheet" href="style.css" type="text/css">
+    {% if extra_css %}
+        <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
+    {% endif %}
+    <script type="text/javascript" src="jquery.min.js"></script>
+    <script type="text/javascript" src="jquery.hotkeys.js"></script>
+    <script type="text/javascript" src="jquery.isonscreen.js"></script>
+    <script type="text/javascript" src="coverage_html.js"></script>
+    <script type="text/javascript">
+        jQuery(document).ready(coverage.pyfile_ready);
+    </script>
+</head>
+<body class="pyfile">
+
+<div id="header">
+    <div class="content">
+        <h1>Coverage for <b>{{fr.relative_filename|escape}}</b> :
+            <span class="pc_cov">{{nums.pc_covered_str}}%</span>
+        </h1>
+
+        <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
+
+        <h2 class="stats">
+            {{nums.n_statements}} statements &nbsp;
+            <span class="{{c_run}} shortkey_r button_toggle_run">{{nums.n_executed}} run</span>
+            <span class="{{c_mis}} shortkey_m button_toggle_mis">{{nums.n_missing}} missing</span>
+            <span class="{{c_exc}} shortkey_x button_toggle_exc">{{nums.n_excluded}} excluded</span>
+
+            {% if has_arcs %}
+                <span class="{{c_par}} shortkey_p button_toggle_par">{{nums.n_partial_branches}} partial</span>
+            {% endif %}
+        </h2>
+    </div>
+</div>
+
+<div class="help_panel">
+    <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
+    <p class="legend">Hot-keys on this page</p>
+    <div>
+    <p class="keyhelp">
+        <span class="key">r</span>
+        <span class="key">m</span>
+        <span class="key">x</span>
+        <span class="key">p</span> &nbsp; toggle line displays
+    </p>
+    <p class="keyhelp">
+        <span class="key">j</span>
+        <span class="key">k</span> &nbsp; next/prev highlighted chunk
+    </p>
+    <p class="keyhelp">
+        <span class="key">0</span> &nbsp; (zero) top of page
+    </p>
+    <p class="keyhelp">
+        <span class="key">1</span> &nbsp; (one) first highlighted chunk
+    </p>
+    </div>
+</div>
+
+<div id="source">
+    <table>
+        <tr>
+            <td class="linenos">
+                {% for line in lines %}
+                    <p id="n{{line.number}}" class="{{line.class}}"><a href="#n{{line.number}}">{{line.number}}</a></p>
+                {% endfor %}
+            </td>
+            <td class="text">
+                {% for line in lines %}
+                    <p id="t{{line.number}}" class="{{line.class}}">{% if line.annotate %}<span class="annotate" title="{{line.annotate_title}}">{{line.annotate}}</span>{% endif %}{{line.html}}<span class="strut">&nbsp;</span></p>
+                {% endfor %}
+            </td>
+        </tr>
+    </table>
+</div>
+
+<div id="footer">
+    <div class="content">
+        <p>
+            <a class="nav" href="index.html">&#xab; index</a> &nbsp; &nbsp; <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
+            created at {{ time_stamp }}
+        </p>
+    </div>
+</div>
+
+</body>
+</html>
diff --git a/catapult/third_party/coverage/coverage/htmlfiles/style.css b/catapult/third_party/coverage/coverage/htmlfiles/style.css
new file mode 100644
index 0000000..15b0890
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/htmlfiles/style.css
@@ -0,0 +1,326 @@
+/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
+/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
+
+/* CSS styles for coverage.py. */
+/* Page-wide styles */
+html, body, h1, h2, h3, p, table, td, th {
+    margin: 0;
+    padding: 0;
+    border: 0;
+    outline: 0;
+    font-weight: inherit;
+    font-style: inherit;
+    font-size: 100%;
+    font-family: inherit;
+    vertical-align: baseline;
+    }
+
+/* Set baseline grid to 16 pt. */
+body {
+    font-family: georgia, serif;
+    font-size: 1em;
+    }
+
+html>body {
+    font-size: 16px;
+    }
+
+/* Set base font size to 12/16 */
+p {
+    font-size: .75em;           /* 12/16 */
+    line-height: 1.33333333em;  /* 16/12 */
+    }
+
+table {
+    border-collapse: collapse;
+    }
+td {
+    vertical-align: top;
+}
+table tr.hidden {
+    display: none !important;
+    }
+
+p#no_rows {
+    display: none;
+    font-size: 1.2em;
+    }
+
+a.nav {
+    text-decoration: none;
+    color: inherit;
+    }
+a.nav:hover {
+    text-decoration: underline;
+    color: inherit;
+    }
+
+/* Page structure */
+#header {
+    background: #f8f8f8;
+    width: 100%;
+    border-bottom: 1px solid #eee;
+    }
+
+#source {
+    padding: 1em;
+    font-family: "courier new", monospace;
+    }
+
+.indexfile #footer {
+    margin: 1em 3em;
+    }
+
+.pyfile #footer {
+    margin: 1em 1em;
+    }
+
+#footer .content {
+    padding: 0;
+    font-size: 85%;
+    font-family: verdana, sans-serif;
+    color: #666666;
+    font-style: italic;
+    }
+
+#index {
+    margin: 1em 0 0 3em;
+    }
+
+/* Header styles */
+#header .content {
+    padding: 1em 3em;
+    }
+
+h1 {
+    font-size: 1.25em;
+    display: inline-block;
+}
+
+#filter_container {
+    display: inline-block;
+    float: right;
+    margin: 0 2em 0 0;
+}
+#filter_container input {
+    width: 10em;
+}
+
+h2.stats {
+    margin-top: .5em;
+    font-size: 1em;
+}
+.stats span {
+    border: 1px solid;
+    padding: .1em .25em;
+    margin: 0 .1em;
+    cursor: pointer;
+    border-color: #999 #ccc #ccc #999;
+}
+.stats span.hide_run, .stats span.hide_exc,
+.stats span.hide_mis, .stats span.hide_par,
+.stats span.par.hide_run.hide_par {
+    border-color: #ccc #999 #999 #ccc;
+}
+.stats span.par.hide_run {
+    border-color: #999 #ccc #ccc #999;
+}
+
+.stats span.run {
+    background: #ddffdd;
+}
+.stats span.exc {
+    background: #eeeeee;
+}
+.stats span.mis {
+    background: #ffdddd;
+}
+.stats span.hide_run {
+    background: #eeffee;
+}
+.stats span.hide_exc {
+    background: #f5f5f5;
+}
+.stats span.hide_mis {
+    background: #ffeeee;
+}
+.stats span.par {
+    background: #ffffaa;
+}
+.stats span.hide_par {
+    background: #ffffcc;
+}
+
+/* Help panel */
+#keyboard_icon {
+    float: right;
+    margin: 5px;
+    cursor: pointer;
+}
+
+.help_panel {
+    position: absolute;
+    background: #ffc;
+    padding: .5em;
+    border: 1px solid #883;
+    display: none;
+}
+
+.indexfile .help_panel {
+    width: 20em; height: 4em;
+}
+
+.pyfile .help_panel {
+    width: 16em; height: 8em;
+}
+
+.help_panel .legend {
+    font-style: italic;
+    margin-bottom: 1em;
+}
+
+#panel_icon {
+    float: right;
+    cursor: pointer;
+}
+
+.keyhelp {
+    margin: .75em;
+}
+
+.keyhelp .key {
+    border: 1px solid black;
+    border-color: #888 #333 #333 #888;
+    padding: .1em .35em;
+    font-family: monospace;
+    font-weight: bold;
+    background: #eee;
+}
+
+/* Source file styles */
+.linenos p {
+    text-align: right;
+    margin: 0;
+    padding: 0 .5em;
+    color: #999999;
+    font-family: verdana, sans-serif;
+    font-size: .625em;   /* 10/16 */
+    line-height: 1.6em;  /* 16/10 */
+    }
+.linenos p.highlight {
+    background: #ffdd00;
+    }
+.linenos p a {
+    text-decoration: none;
+    color: #999999;
+    }
+.linenos p a:hover {
+    text-decoration: underline;
+    color: #999999;
+    }
+
+td.text {
+    width: 100%;
+    }
+.text p {
+    margin: 0;
+    padding: 0 0 0 .5em;
+    border-left: 2px solid #ffffff;
+    white-space: nowrap;
+    }
+
+.text p.mis {
+    background: #ffdddd;
+    border-left: 2px solid #ff0000;
+    }
+.text p.run, .text p.run.hide_par {
+    background: #ddffdd;
+    border-left: 2px solid #00ff00;
+    }
+.text p.exc {
+    background: #eeeeee;
+    border-left: 2px solid #808080;
+    }
+.text p.par, .text p.par.hide_run {
+    background: #ffffaa;
+    border-left: 2px solid #eeee99;
+    }
+.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par,
+.text p.hide_run.hide_par {
+    background: inherit;
+    }
+
+.text span.annotate {
+    font-family: georgia;
+    color: #666;
+    float: right;
+    padding-right: .5em;
+    }
+.text p.hide_par span.annotate {
+    display: none;
+    }
+
+/* Syntax coloring */
+.text .com {
+    color: green;
+    font-style: italic;
+    line-height: 1px;
+    }
+.text .key {
+    font-weight: bold;
+    line-height: 1px;
+    }
+.text .str {
+    color: #000080;
+    }
+
+/* index styles */
+#index td, #index th {
+    text-align: right;
+    width: 5em;
+    padding: .25em .5em;
+    border-bottom: 1px solid #eee;
+    }
+#index th {
+    font-style: italic;
+    color: #333;
+    border-bottom: 1px solid #ccc;
+    cursor: pointer;
+    }
+#index th:hover {
+    background: #eee;
+    border-bottom: 1px solid #999;
+    }
+#index td.left, #index th.left {
+    padding-left: 0;
+    }
+#index td.right, #index th.right {
+    padding-right: 0;
+    }
+#index th.headerSortDown, #index th.headerSortUp {
+    border-bottom: 1px solid #000;
+    }
+#index td.name, #index th.name {
+    text-align: left;
+    width: auto;
+    }
+#index td.name a {
+    text-decoration: none;
+    color: #000;
+    }
+#index td.name a:hover {
+    text-decoration: underline;
+    color: #000;
+    }
+#index tr.total,
+#index tr.total_dynamic {
+    }
+#index tr.total td,
+#index tr.total_dynamic td {
+    font-weight: bold;
+    border-top: 1px solid #ccc;
+    border-bottom: none;
+    }
+#index tr.file:hover {
+    background: #eeeeee;
+    }
diff --git a/catapult/third_party/coverage/coverage/misc.py b/catapult/third_party/coverage/coverage/misc.py
new file mode 100644
index 0000000..db6298b
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/misc.py
@@ -0,0 +1,248 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Miscellaneous stuff for coverage.py."""
+
+import errno
+import hashlib
+import inspect
+import locale
+import os
+import sys
+import types
+
+from coverage import env
+from coverage.backward import string_class, to_bytes, unicode_class
+
+ISOLATED_MODULES = {}
+
+
+def isolate_module(mod):
+    """Copy a module so that we are isolated from aggressive mocking.
+
+    If a test suite mocks os.path.exists (for example), and then we need to use
+    it during the test, everything will get tangled up if we use their mock.
+    Making a copy of the module when we import it will isolate coverage.py from
+    those complications.
+    """
+    if mod not in ISOLATED_MODULES:
+        new_mod = types.ModuleType(mod.__name__)
+        ISOLATED_MODULES[mod] = new_mod
+        for name in dir(mod):
+            value = getattr(mod, name)
+            if isinstance(value, types.ModuleType):
+                value = isolate_module(value)
+            setattr(new_mod, name, value)
+    return ISOLATED_MODULES[mod]
+
+os = isolate_module(os)
+
+
+# Use PyContracts for assertion testing on parameters and returns, but only if
+# we are running our own test suite.
+if env.TESTING:
+    from contracts import contract              # pylint: disable=unused-import
+    from contracts import new_contract
+
+    try:
+        # Define contract words that PyContract doesn't have.
+        new_contract('bytes', lambda v: isinstance(v, bytes))
+        if env.PY3:
+            new_contract('unicode', lambda v: isinstance(v, unicode_class))
+    except ValueError:
+        # During meta-coverage, this module is imported twice, and PyContracts
+        # doesn't like redefining contracts. It's OK.
+        pass
+else:                                           # pragma: not covered
+    # We aren't using real PyContracts, so just define a no-op decorator as a
+    # stunt double.
+    def contract(**unused):
+        """Dummy no-op implementation of `contract`."""
+        return lambda func: func
+
+
+def nice_pair(pair):
+    """Make a nice string representation of a pair of numbers.
+
+    If the numbers are equal, just return the number, otherwise return the pair
+    with a dash between them, indicating the range.
+
+    """
+    start, end = pair
+    if start == end:
+        return "%d" % start
+    else:
+        return "%d-%d" % (start, end)
+
+
+def format_lines(statements, lines):
+    """Nicely format a list of line numbers.
+
+    Format a list of line numbers for printing by coalescing groups of lines as
+    long as the lines represent consecutive statements.  This will coalesce
+    even if there are gaps between statements.
+
+    For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
+    `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
+
+    """
+    pairs = []
+    i = 0
+    j = 0
+    start = None
+    statements = sorted(statements)
+    lines = sorted(lines)
+    while i < len(statements) and j < len(lines):
+        if statements[i] == lines[j]:
+            if start is None:
+                start = lines[j]
+            end = lines[j]
+            j += 1
+        elif start:
+            pairs.append((start, end))
+            start = None
+        i += 1
+    if start:
+        pairs.append((start, end))
+    ret = ', '.join(map(nice_pair, pairs))
+    return ret
+
+
+def expensive(fn):
+    """A decorator to indicate that a method shouldn't be called more than once.
+
+    Normally, this does nothing.  During testing, this raises an exception if
+    called more than once.
+
+    """
+    if env.TESTING:
+        attr = "_once_" + fn.__name__
+
+        def _wrapped(self):
+            """Inner function that checks the cache."""
+            if hasattr(self, attr):
+                raise Exception("Shouldn't have called %s more than once" % fn.__name__)
+            setattr(self, attr, True)
+            return fn(self)
+        return _wrapped
+    else:
+        return fn
+
+
+def bool_or_none(b):
+    """Return bool(b), but preserve None."""
+    if b is None:
+        return None
+    else:
+        return bool(b)
+
+
+def join_regex(regexes):
+    """Combine a list of regexes into one that matches any of them."""
+    return "|".join("(?:%s)" % r for r in regexes)
+
+
+def file_be_gone(path):
+    """Remove a file, and don't get annoyed if it doesn't exist."""
+    try:
+        os.remove(path)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
+
+def output_encoding(outfile=None):
+    """Determine the encoding to use for output written to `outfile` or stdout."""
+    if outfile is None:
+        outfile = sys.stdout
+    encoding = (
+        getattr(outfile, "encoding", None) or
+        getattr(sys.__stdout__, "encoding", None) or
+        locale.getpreferredencoding()
+    )
+    return encoding
+
+
+class Hasher(object):
+    """Hashes Python data into md5."""
+    def __init__(self):
+        self.md5 = hashlib.md5()
+
+    def update(self, v):
+        """Add `v` to the hash, recursively if needed."""
+        self.md5.update(to_bytes(str(type(v))))
+        if isinstance(v, string_class):
+            self.md5.update(to_bytes(v))
+        elif isinstance(v, bytes):
+            self.md5.update(v)
+        elif v is None:
+            pass
+        elif isinstance(v, (int, float)):
+            self.md5.update(to_bytes(str(v)))
+        elif isinstance(v, (tuple, list)):
+            for e in v:
+                self.update(e)
+        elif isinstance(v, dict):
+            keys = v.keys()
+            for k in sorted(keys):
+                self.update(k)
+                self.update(v[k])
+        else:
+            for k in dir(v):
+                if k.startswith('__'):
+                    continue
+                a = getattr(v, k)
+                if inspect.isroutine(a):
+                    continue
+                self.update(k)
+                self.update(a)
+
+    def hexdigest(self):
+        """Retrieve the hex digest of the hash."""
+        return self.md5.hexdigest()
+
+
+def _needs_to_implement(that, func_name):
+    """Helper to raise NotImplementedError in interface stubs."""
+    if hasattr(that, "_coverage_plugin_name"):
+        thing = "Plugin"
+        name = that._coverage_plugin_name
+    else:
+        thing = "Class"
+        klass = that.__class__
+        name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
+
+    raise NotImplementedError(
+        "{thing} {name!r} needs to implement {func_name}()".format(
+            thing=thing, name=name, func_name=func_name
+            )
+        )
+
+
+class CoverageException(Exception):
+    """An exception specific to coverage.py."""
+    pass
+
+
+class NoSource(CoverageException):
+    """We couldn't find the source for a module."""
+    pass
+
+
+class NoCode(NoSource):
+    """We couldn't find any code at all."""
+    pass
+
+
+class NotPython(CoverageException):
+    """A source file turned out not to be parsable Python."""
+    pass
+
+
+class ExceptionDuringRun(CoverageException):
+    """An exception happened while running customer code.
+
+    Construct it with three arguments, the values from `sys.exc_info`.
+
+    """
+    pass
diff --git a/catapult/third_party/coverage/coverage/monkey.py b/catapult/third_party/coverage/coverage/monkey.py
new file mode 100644
index 0000000..c4ec68c
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/monkey.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Monkey-patching to make coverage.py work right in some cases."""
+
+import multiprocessing
+import multiprocessing.process
+import sys
+
+# An attribute that will be set on modules to indicate that they have been
+# monkey-patched.
+PATCHED_MARKER = "_coverage$patched"
+
+
+def patch_multiprocessing():
+    """Monkey-patch the multiprocessing module.
+
+    This enables coverage measurement of processes started by multiprocessing.
+    This is wildly experimental!
+
+    """
+    if hasattr(multiprocessing, PATCHED_MARKER):
+        return
+
+    if sys.version_info >= (3, 4):
+        klass = multiprocessing.process.BaseProcess
+    else:
+        klass = multiprocessing.Process
+
+    original_bootstrap = klass._bootstrap
+
+    class ProcessWithCoverage(klass):
+        """A replacement for multiprocess.Process that starts coverage."""
+        def _bootstrap(self):
+            """Wrapper around _bootstrap to start coverage."""
+            from coverage import Coverage
+            cov = Coverage(data_suffix=True)
+            cov.start()
+            try:
+                return original_bootstrap(self)
+            finally:
+                cov.stop()
+                cov.save()
+
+    if sys.version_info >= (3, 4):
+        klass._bootstrap = ProcessWithCoverage._bootstrap
+    else:
+        multiprocessing.Process = ProcessWithCoverage
+
+    setattr(multiprocessing, PATCHED_MARKER, True)
diff --git a/catapult/third_party/coverage/coverage/parser.py b/catapult/third_party/coverage/coverage/parser.py
new file mode 100644
index 0000000..a5e9623
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/parser.py
@@ -0,0 +1,671 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Code parsing for coverage.py."""
+
+import collections
+import dis
+import re
+import token
+import tokenize
+
+from coverage.backward import range    # pylint: disable=redefined-builtin
+from coverage.backward import bytes_to_ints
+from coverage.bytecode import ByteCodes, CodeObjects
+from coverage.misc import contract, nice_pair, expensive, join_regex
+from coverage.misc import CoverageException, NoSource, NotPython
+from coverage.phystokens import compile_unicode, generate_tokens
+
+
+class PythonParser(object):
+    """Parse code to find executable lines, excluded lines, etc."""
+
+    @contract(text='unicode|None')
+    def __init__(self, text=None, filename=None, exclude=None):
+        """
+        Source can be provided as `text`, the text itself, or `filename`, from
+        which the text will be read.  Excluded lines are those that match
+        `exclude`, a regex.
+
+        """
+        assert text or filename, "PythonParser needs either text or filename"
+        self.filename = filename or "<code>"
+        self.text = text
+        if not self.text:
+            from coverage.python import get_python_source
+            try:
+                self.text = get_python_source(self.filename)
+            except IOError as err:
+                raise NoSource(
+                    "No source for code: '%s': %s" % (self.filename, err)
+                )
+
+        self.exclude = exclude
+
+        self.show_tokens = False
+
+        # The text lines of the parsed code.
+        self.lines = self.text.split('\n')
+
+        # The line numbers of excluded lines of code.
+        self.excluded = set()
+
+        # The line numbers of docstring lines.
+        self.docstrings = set()
+
+        # The line numbers of class definitions.
+        self.classdefs = set()
+
+        # A dict mapping line numbers to (lo,hi) for multi-line statements.
+        self.multiline = {}
+
+        # The line numbers that start statements.
+        self.statement_starts = set()
+
+        # Lazily-created ByteParser and arc data.
+        self._byte_parser = None
+        self._all_arcs = None
+
+    @property
+    def byte_parser(self):
+        """Create a ByteParser on demand."""
+        if not self._byte_parser:
+            self._byte_parser = ByteParser(self.text, filename=self.filename)
+        return self._byte_parser
+
+    def lines_matching(self, *regexes):
+        """Find the lines matching one of a list of regexes.
+
+        Returns a set of line numbers, the lines that contain a match for one
+        of the regexes in `regexes`.  The entire line needn't match, just a
+        part of it.
+
+        """
+        regex_c = re.compile(join_regex(regexes))
+        matches = set()
+        for i, ltext in enumerate(self.lines, start=1):
+            if regex_c.search(ltext):
+                matches.add(i)
+        return matches
+
+    def _raw_parse(self):
+        """Parse the source to find the interesting facts about its lines.
+
+        A handful of member fields are updated.
+
+        """
+        # Find lines which match an exclusion pattern.
+        if self.exclude:
+            self.excluded = self.lines_matching(self.exclude)
+
+        # Tokenize, to find excluded suites, to find docstrings, and to find
+        # multi-line statements.
+        indent = 0
+        exclude_indent = 0
+        excluding = False
+        prev_toktype = token.INDENT
+        first_line = None
+        empty = True
+
+        tokgen = generate_tokens(self.text)
+        for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
+            if self.show_tokens:                # pragma: not covered
+                print("%10s %5s %-20r %r" % (
+                    tokenize.tok_name.get(toktype, toktype),
+                    nice_pair((slineno, elineno)), ttext, ltext
+                ))
+            if toktype == token.INDENT:
+                indent += 1
+            elif toktype == token.DEDENT:
+                indent -= 1
+            elif toktype == token.NAME and ttext == 'class':
+                # Class definitions look like branches in the byte code, so
+                # we need to exclude them.  The simplest way is to note the
+                # lines with the 'class' keyword.
+                self.classdefs.add(slineno)
+            elif toktype == token.OP and ttext == ':':
+                if not excluding and elineno in self.excluded:
+                    # Start excluding a suite.  We trigger off of the colon
+                    # token so that the #pragma comment will be recognized on
+                    # the same line as the colon.
+                    exclude_indent = indent
+                    excluding = True
+            elif toktype == token.STRING and prev_toktype == token.INDENT:
+                # Strings that are first on an indented line are docstrings.
+                # (a trick from trace.py in the stdlib.) This works for
+                # 99.9999% of cases.  For the rest (!) see:
+                # http://stackoverflow.com/questions/1769332/x/1769794#1769794
+                self.docstrings.update(range(slineno, elineno+1))
+            elif toktype == token.NEWLINE:
+                if first_line is not None and elineno != first_line:
+                    # We're at the end of a line, and we've ended on a
+                    # different line than the first line of the statement,
+                    # so record a multi-line range.
+                    for l in range(first_line, elineno+1):
+                        self.multiline[l] = first_line
+                first_line = None
+
+            if ttext.strip() and toktype != tokenize.COMMENT:
+                # A non-whitespace token.
+                empty = False
+                if first_line is None:
+                    # The token is not whitespace, and is the first in a
+                    # statement.
+                    first_line = slineno
+                    # Check whether to end an excluded suite.
+                    if excluding and indent <= exclude_indent:
+                        excluding = False
+                    if excluding:
+                        self.excluded.add(elineno)
+
+            prev_toktype = toktype
+
+        # Find the starts of the executable statements.
+        if not empty:
+            self.statement_starts.update(self.byte_parser._find_statements())
+
+    def first_line(self, line):
+        """Return the first line number of the statement including `line`."""
+        first_line = self.multiline.get(line)
+        if first_line:
+            return first_line
+        else:
+            return line
+
+    def first_lines(self, lines):
+        """Map the line numbers in `lines` to the correct first line of the
+        statement.
+
+        Returns a set of the first lines.
+
+        """
+        return set(self.first_line(l) for l in lines)
+
+    def translate_lines(self, lines):
+        """Implement `FileReporter.translate_lines`."""
+        return self.first_lines(lines)
+
+    def translate_arcs(self, arcs):
+        """Implement `FileReporter.translate_arcs`."""
+        return [
+            (self.first_line(a), self.first_line(b))
+            for (a, b) in arcs
+        ]
+
+    @expensive
+    def parse_source(self):
+        """Parse source text to find executable lines, excluded lines, etc.
+
+        Return values are 1) a set of executable line numbers, and 2) a set of
+        excluded line numbers.
+
+        Reported line numbers are normalized to the first line of multi-line
+        statements.
+
+        """
+        try:
+            self._raw_parse()
+        except (tokenize.TokenError, IndentationError) as err:
+            if hasattr(err, "lineno"):
+                lineno = err.lineno         # IndentationError
+            else:
+                lineno = err.args[1][0]     # TokenError
+            raise NotPython(
+                u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
+                    self.filename, err.args[0], lineno
+                )
+            )
+
+        excluded_lines = self.first_lines(self.excluded)
+        ignore = set()
+        ignore.update(excluded_lines)
+        ignore.update(self.docstrings)
+        starts = self.statement_starts - ignore
+        lines = self.first_lines(starts)
+        lines -= ignore
+
+        return lines, excluded_lines
+
+    def arcs(self):
+        """Get information about the arcs available in the code.
+
+        Returns a set of line number pairs.  Line numbers have been normalized
+        to the first line of multi-line statements.
+
+        """
+        if self._all_arcs is None:
+            self._all_arcs = set()
+            for l1, l2 in self.byte_parser._all_arcs():
+                fl1 = self.first_line(l1)
+                fl2 = self.first_line(l2)
+                if fl1 != fl2:
+                    self._all_arcs.add((fl1, fl2))
+        return self._all_arcs
+
+    def exit_counts(self):
+        """Get a count of exits from that each line.
+
+        Excluded lines are excluded.
+
+        """
+        excluded_lines = self.first_lines(self.excluded)
+        exit_counts = collections.defaultdict(int)
+        for l1, l2 in self.arcs():
+            if l1 < 0:
+                # Don't ever report -1 as a line number
+                continue
+            if l1 in excluded_lines:
+                # Don't report excluded lines as line numbers.
+                continue
+            if l2 in excluded_lines:
+                # Arcs to excluded lines shouldn't count.
+                continue
+            exit_counts[l1] += 1
+
+        # Class definitions have one extra exit, so remove one for each:
+        for l in self.classdefs:
+            # Ensure key is there: class definitions can include excluded lines.
+            if l in exit_counts:
+                exit_counts[l] -= 1
+
+        return exit_counts
+
+
+## Opcodes that guide the ByteParser.
+
+def _opcode(name):
+    """Return the opcode by name from the dis module."""
+    return dis.opmap[name]
+
+
+def _opcode_set(*names):
+    """Return a set of opcodes by the names in `names`."""
+    s = set()
+    for name in names:
+        try:
+            s.add(_opcode(name))
+        except KeyError:
+            pass
+    return s
+
+# Opcodes that leave the code object.
+OPS_CODE_END = _opcode_set('RETURN_VALUE')
+
+# Opcodes that unconditionally end the code chunk.
+OPS_CHUNK_END = _opcode_set(
+    'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
+    'BREAK_LOOP', 'CONTINUE_LOOP',
+)
+
+# Opcodes that unconditionally begin a new code chunk.  By starting new chunks
+# with unconditional jump instructions, we neatly deal with jumps to jumps
+# properly.
+OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD')
+
+# Opcodes that push a block on the block stack.
+OPS_PUSH_BLOCK = _opcode_set(
+    'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH'
+)
+
+# Block types for exception handling.
+OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
+
+# Opcodes that pop a block from the block stack.
+OPS_POP_BLOCK = _opcode_set('POP_BLOCK')
+
+# Opcodes that have a jump destination, but aren't really a jump.
+OPS_NO_JUMP = OPS_PUSH_BLOCK
+
+# Individual opcodes we need below.
+OP_BREAK_LOOP = _opcode('BREAK_LOOP')
+OP_END_FINALLY = _opcode('END_FINALLY')
+OP_COMPARE_OP = _opcode('COMPARE_OP')
+COMPARE_EXCEPTION = 10  # just have to get this constant from the code.
+OP_LOAD_CONST = _opcode('LOAD_CONST')
+OP_RETURN_VALUE = _opcode('RETURN_VALUE')
+
+
+class ByteParser(object):
+    """Parse byte codes to understand the structure of code."""
+
+    @contract(text='unicode')
+    def __init__(self, text, code=None, filename=None):
+        self.text = text
+        if code:
+            self.code = code
+        else:
+            try:
+                self.code = compile_unicode(text, filename, "exec")
+            except SyntaxError as synerr:
+                raise NotPython(
+                    u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
+                        filename, synerr.msg, synerr.lineno
+                    )
+                )
+
+        # Alternative Python implementations don't always provide all the
+        # attributes on code objects that we need to do the analysis.
+        for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']:
+            if not hasattr(self.code, attr):
+                raise CoverageException(
+                    "This implementation of Python doesn't support code analysis.\n"
+                    "Run coverage.py under CPython for this command."
+                )
+
+    def child_parsers(self):
+        """Iterate over all the code objects nested within this one.
+
+        The iteration includes `self` as its first value.
+
+        """
+        children = CodeObjects(self.code)
+        return (ByteParser(self.text, code=c) for c in children)
+
+    def _bytes_lines(self):
+        """Map byte offsets to line numbers in `code`.
+
+        Uses co_lnotab described in Python/compile.c to map byte offsets to
+        line numbers.  Produces a sequence: (b0, l0), (b1, l1), ...
+
+        Only byte offsets that correspond to line numbers are included in the
+        results.
+
+        """
+        # Adapted from dis.py in the standard library.
+        byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
+        line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
+
+        last_line_num = None
+        line_num = self.code.co_firstlineno
+        byte_num = 0
+        for byte_incr, line_incr in zip(byte_increments, line_increments):
+            if byte_incr:
+                if line_num != last_line_num:
+                    yield (byte_num, line_num)
+                    last_line_num = line_num
+                byte_num += byte_incr
+            line_num += line_incr
+        if line_num != last_line_num:
+            yield (byte_num, line_num)
+
+    def _find_statements(self):
+        """Find the statements in `self.code`.
+
+        Produce a sequence of line numbers that start statements.  Recurses
+        into all code objects reachable from `self.code`.
+
+        """
+        for bp in self.child_parsers():
+            # Get all of the lineno information from this code.
+            for _, l in bp._bytes_lines():
+                yield l
+
+    def _block_stack_repr(self, block_stack):               # pragma: debugging
+        """Get a string version of `block_stack`, for debugging."""
+        blocks = ", ".join(
+            "(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack
+        )
+        return "[" + blocks + "]"
+
+    def _split_into_chunks(self):
+        """Split the code object into a list of `Chunk` objects.
+
+        Each chunk is only entered at its first instruction, though there can
+        be many exits from a chunk.
+
+        Returns a list of `Chunk` objects.
+
+        """
+        # The list of chunks so far, and the one we're working on.
+        chunks = []
+        chunk = None
+
+        # A dict mapping byte offsets of line starts to the line numbers.
+        bytes_lines_map = dict(self._bytes_lines())
+
+        # The block stack: loops and try blocks get pushed here for the
+        # implicit jumps that can occur.
+        # Each entry is a tuple: (block type, destination)
+        block_stack = []
+
+        # Some op codes are followed by branches that should be ignored.  This
+        # is a count of how many ignores are left.
+        ignore_branch = 0
+
+        # We have to handle the last two bytecodes specially.
+        ult = penult = None
+
+        # Get a set of all of the jump-to points.
+        jump_to = set()
+        bytecodes = list(ByteCodes(self.code.co_code))
+        for bc in bytecodes:
+            if bc.jump_to >= 0:
+                jump_to.add(bc.jump_to)
+
+        chunk_lineno = 0
+
+        # Walk the byte codes building chunks.
+        for bc in bytecodes:
+            # Maybe have to start a new chunk.
+            start_new_chunk = False
+            first_chunk = False
+            if bc.offset in bytes_lines_map:
+                # Start a new chunk for each source line number.
+                start_new_chunk = True
+                chunk_lineno = bytes_lines_map[bc.offset]
+                first_chunk = True
+            elif bc.offset in jump_to:
+                # To make chunks have a single entrance, we have to make a new
+                # chunk when we get to a place some bytecode jumps to.
+                start_new_chunk = True
+            elif bc.op in OPS_CHUNK_BEGIN:
+                # Jumps deserve their own unnumbered chunk.  This fixes
+                # problems with jumps to jumps getting confused.
+                start_new_chunk = True
+
+            if not chunk or start_new_chunk:
+                if chunk:
+                    chunk.exits.add(bc.offset)
+                chunk = Chunk(bc.offset, chunk_lineno, first_chunk)
+                if not chunks:
+                    # The very first chunk of a code object is always an
+                    # entrance.
+                    chunk.entrance = True
+                chunks.append(chunk)
+
+            # Look at the opcode.
+            if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP:
+                if ignore_branch:
+                    # Someone earlier wanted us to ignore this branch.
+                    ignore_branch -= 1
+                else:
+                    # The opcode has a jump, it's an exit for this chunk.
+                    chunk.exits.add(bc.jump_to)
+
+            if bc.op in OPS_CODE_END:
+                # The opcode can exit the code object.
+                chunk.exits.add(-self.code.co_firstlineno)
+            if bc.op in OPS_PUSH_BLOCK:
+                # The opcode adds a block to the block_stack.
+                block_stack.append((bc.op, bc.jump_to))
+            if bc.op in OPS_POP_BLOCK:
+                # The opcode pops a block from the block stack.
+                block_stack.pop()
+            if bc.op in OPS_CHUNK_END:
+                # This opcode forces the end of the chunk.
+                if bc.op == OP_BREAK_LOOP:
+                    # A break is implicit: jump where the top of the
+                    # block_stack points.
+                    chunk.exits.add(block_stack[-1][1])
+                chunk = None
+            if bc.op == OP_END_FINALLY:
+                # For the finally clause we need to find the closest exception
+                # block, and use its jump target as an exit.
+                for block in reversed(block_stack):
+                    if block[0] in OPS_EXCEPT_BLOCKS:
+                        chunk.exits.add(block[1])
+                        break
+            if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
+                # This is an except clause.  We want to overlook the next
+                # branch, so that except's don't count as branches.
+                ignore_branch += 1
+
+            penult = ult
+            ult = bc
+
+        if chunks:
+            # The last two bytecodes could be a dummy "return None" that
+            # shouldn't be counted as real code. Every Python code object seems
+            # to end with a return, and a "return None" is inserted if there
+            # isn't an explicit return in the source.
+            if ult and penult:
+                if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE:
+                    if self.code.co_consts[penult.arg] is None:
+                        # This is "return None", but is it dummy?  A real line
+                        # would be a last chunk all by itself.
+                        if chunks[-1].byte != penult.offset:
+                            ex = -self.code.co_firstlineno
+                            # Split the last chunk
+                            last_chunk = chunks[-1]
+                            last_chunk.exits.remove(ex)
+                            last_chunk.exits.add(penult.offset)
+                            chunk = Chunk(
+                                penult.offset, last_chunk.line, False
+                            )
+                            chunk.exits.add(ex)
+                            chunks.append(chunk)
+
+            # Give all the chunks a length.
+            chunks[-1].length = bc.next_offset - chunks[-1].byte
+            for i in range(len(chunks)-1):
+                chunks[i].length = chunks[i+1].byte - chunks[i].byte
+
+        #self.validate_chunks(chunks)
+        return chunks
+
+    def validate_chunks(self, chunks):                      # pragma: debugging
+        """Validate the rule that chunks have a single entrance."""
+        # starts is the entrances to the chunks
+        starts = set(ch.byte for ch in chunks)
+        for ch in chunks:
+            assert all((ex in starts or ex < 0) for ex in ch.exits)
+
+    def _arcs(self):
+        """Find the executable arcs in the code.
+
+        Yields pairs: (from,to).  From and to are integer line numbers.  If
+        from is < 0, then the arc is an entrance into the code object.  If to
+        is < 0, the arc is an exit from the code object.
+
+        """
+        chunks = self._split_into_chunks()
+
+        # A map from byte offsets to the chunk starting at that offset.
+        byte_chunks = dict((c.byte, c) for c in chunks)
+
+        # Traverse from the first chunk in each line, and yield arcs where
+        # the trace function will be invoked.
+        for chunk in chunks:
+            if chunk.entrance:
+                yield (-1, chunk.line)
+
+            if not chunk.first:
+                continue
+
+            chunks_considered = set()
+            chunks_to_consider = [chunk]
+            while chunks_to_consider:
+                # Get the chunk we're considering, and make sure we don't
+                # consider it again.
+                this_chunk = chunks_to_consider.pop()
+                chunks_considered.add(this_chunk)
+
+                # For each exit, add the line number if the trace function
+                # would be triggered, or add the chunk to those being
+                # considered if not.
+                for ex in this_chunk.exits:
+                    if ex < 0:
+                        yield (chunk.line, ex)
+                    else:
+                        next_chunk = byte_chunks[ex]
+                        if next_chunk in chunks_considered:
+                            continue
+
+                        # The trace function is invoked if visiting the first
+                        # bytecode in a line, or if the transition is a
+                        # backward jump.
+                        backward_jump = next_chunk.byte < this_chunk.byte
+                        if next_chunk.first or backward_jump:
+                            if next_chunk.line != chunk.line:
+                                yield (chunk.line, next_chunk.line)
+                        else:
+                            chunks_to_consider.append(next_chunk)
+
+    def _all_chunks(self):
+        """Returns a list of `Chunk` objects for this code and its children.
+
+        See `_split_into_chunks` for details.
+
+        """
+        chunks = []
+        for bp in self.child_parsers():
+            chunks.extend(bp._split_into_chunks())
+
+        return chunks
+
+    def _all_arcs(self):
+        """Get the set of all arcs in this code object and its children.
+
+        See `_arcs` for details.
+
+        """
+        arcs = set()
+        for bp in self.child_parsers():
+            arcs.update(bp._arcs())
+
+        return arcs
+
+
+class Chunk(object):
+    """A sequence of byte codes with a single entrance.
+
+    To analyze byte code, we have to divide it into chunks, sequences of byte
+    codes such that each chunk has only one entrance, the first instruction in
+    the block.
+
+    This is almost the CS concept of `basic block`_, except that we're willing
+    to have many exits from a chunk, and "basic block" is a more cumbersome
+    term.
+
+    .. _basic block: http://en.wikipedia.org/wiki/Basic_block
+
+    `byte` is the offset to the bytecode starting this chunk.
+
+    `line` is the source line number containing this chunk.
+
+    `first` is true if this is the first chunk in the source line.
+
+    An exit < 0 means the chunk can leave the code (return).  The exit is
+    the negative of the starting line number of the code block.
+
+    The `entrance` attribute is a boolean indicating whether the code object
+    can be entered at this chunk.
+
+    """
+    def __init__(self, byte, line, first):
+        self.byte = byte
+        self.line = line
+        self.first = first
+        self.length = 0
+        self.entrance = False
+        self.exits = set()
+
+    def __repr__(self):
+        return "<%d+%d @%d%s%s %r>" % (
+            self.byte,
+            self.length,
+            self.line,
+            "!" if self.first else "",
+            "v" if self.entrance else "",
+            list(self.exits),
+        )
diff --git a/catapult/third_party/coverage/coverage/phystokens.py b/catapult/third_party/coverage/coverage/phystokens.py
new file mode 100644
index 0000000..b34b1c3
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/phystokens.py
@@ -0,0 +1,295 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Better tokenizing for coverage.py."""
+
+import codecs
+import keyword
+import re
+import sys
+import token
+import tokenize
+
+from coverage import env
+from coverage.backward import iternext
+from coverage.misc import contract
+
+
+def phys_tokens(toks):
+    """Return all physical tokens, even line continuations.
+
+    tokenize.generate_tokens() doesn't return a token for the backslash that
+    continues lines.  This wrapper provides those tokens so that we can
+    re-create a faithful representation of the original source.
+
+    Returns the same values as generate_tokens()
+
+    """
+    last_line = None
+    last_lineno = -1
+    last_ttype = None
+    for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
+        if last_lineno != elineno:
+            if last_line and last_line.endswith("\\\n"):
+                # We are at the beginning of a new line, and the last line
+                # ended with a backslash.  We probably have to inject a
+                # backslash token into the stream. Unfortunately, there's more
+                # to figure out.  This code::
+                #
+                #   usage = """\
+                #   HEY THERE
+                #   """
+                #
+                # triggers this condition, but the token text is::
+                #
+                #   '"""\\\nHEY THERE\n"""'
+                #
+                # so we need to figure out if the backslash is already in the
+                # string token or not.
+                inject_backslash = True
+                if last_ttype == tokenize.COMMENT:
+                    # Comments like this \
+                    # should never result in a new token.
+                    inject_backslash = False
+                elif ttype == token.STRING:
+                    if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
+                        # It's a multi-line string and the first line ends with
+                        # a backslash, so we don't need to inject another.
+                        inject_backslash = False
+                if inject_backslash:
+                    # Figure out what column the backslash is in.
+                    ccol = len(last_line.split("\n")[-2]) - 1
+                    # Yield the token, with a fake token type.
+                    yield (
+                        99999, "\\\n",
+                        (slineno, ccol), (slineno, ccol+2),
+                        last_line
+                        )
+            last_line = ltext
+            last_ttype = ttype
+        yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
+        last_lineno = elineno
+
+
+@contract(source='unicode')
+def source_token_lines(source):
+    """Generate a series of lines, one for each line in `source`.
+
+    Each line is a list of pairs, each pair is a token::
+
+        [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+
+    Each pair has a token class, and the token text.
+
+    If you concatenate all the token texts, and then join them with newlines,
+    you should have your original `source` back, with two differences:
+    trailing whitespace is not preserved, and a final line with no newline
+    is indistinguishable from a final line with a newline.
+
+    """
+
+    ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
+    line = []
+    col = 0
+
+    # The \f is because of http://bugs.python.org/issue19035
+    source = source.expandtabs(8).replace('\r\n', '\n').replace('\f', ' ')
+    tokgen = generate_tokens(source)
+
+    for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
+        mark_start = True
+        for part in re.split('(\n)', ttext):
+            if part == '\n':
+                yield line
+                line = []
+                col = 0
+                mark_end = False
+            elif part == '':
+                mark_end = False
+            elif ttype in ws_tokens:
+                mark_end = False
+            else:
+                if mark_start and scol > col:
+                    line.append(("ws", u" " * (scol - col)))
+                    mark_start = False
+                tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
+                if ttype == token.NAME and keyword.iskeyword(ttext):
+                    tok_class = "key"
+                line.append((tok_class, part))
+                mark_end = True
+            scol = 0
+        if mark_end:
+            col = ecol
+
+    if line:
+        yield line
+
+
+class CachedTokenizer(object):
+    """A one-element cache around tokenize.generate_tokens.
+
+    When reporting, coverage.py tokenizes files twice, once to find the
+    structure of the file, and once to syntax-color it.  Tokenizing is
+    expensive, and easily cached.
+
+    This is a one-element cache so that our twice-in-a-row tokenizing doesn't
+    actually tokenize twice.
+
+    """
+    def __init__(self):
+        self.last_text = None
+        self.last_tokens = None
+
+    @contract(text='unicode')
+    def generate_tokens(self, text):
+        """A stand-in for `tokenize.generate_tokens`."""
+        if text != self.last_text:
+            self.last_text = text
+            readline = iternext(text.splitlines(True))
+            self.last_tokens = list(tokenize.generate_tokens(readline))
+        return self.last_tokens
+
+# Create our generate_tokens cache as a callable replacement function.
+generate_tokens = CachedTokenizer().generate_tokens
+
+
+COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
+
+@contract(source='bytes')
+def _source_encoding_py2(source):
+    """Determine the encoding for `source`, according to PEP 263.
+
+    `source` is a byte string, the text of the program.
+
+    Returns a string, the name of the encoding.
+
+    """
+    assert isinstance(source, bytes)
+
+    # Do this so the detect_encode code we copied will work.
+    readline = iternext(source.splitlines(True))
+
+    # This is mostly code adapted from Py3.2's tokenize module.
+
+    def _get_normal_name(orig_enc):
+        """Imitates get_normal_name in tokenizer.c."""
+        # Only care about the first 12 characters.
+        enc = orig_enc[:12].lower().replace("_", "-")
+        if re.match(r"^utf-8($|-)", enc):
+            return "utf-8"
+        if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
+            return "iso-8859-1"
+        return orig_enc
+
+    # From detect_encode():
+    # It detects the encoding from the presence of a UTF-8 BOM or an encoding
+    # cookie as specified in PEP-0263.  If both a BOM and a cookie are present,
+    # but disagree, a SyntaxError will be raised.  If the encoding cookie is an
+    # invalid charset, raise a SyntaxError.  Note that if a UTF-8 BOM is found,
+    # 'utf-8-sig' is returned.
+
+    # If no encoding is specified, then the default will be returned.
+    default = 'ascii'
+
+    bom_found = False
+    encoding = None
+
+    def read_or_stop():
+        """Get the next source line, or ''."""
+        try:
+            return readline()
+        except StopIteration:
+            return ''
+
+    def find_cookie(line):
+        """Find an encoding cookie in `line`."""
+        try:
+            line_string = line.decode('ascii')
+        except UnicodeDecodeError:
+            return None
+
+        matches = COOKIE_RE.findall(line_string)
+        if not matches:
+            return None
+        encoding = _get_normal_name(matches[0])
+        try:
+            codec = codecs.lookup(encoding)
+        except LookupError:
+            # This behavior mimics the Python interpreter
+            raise SyntaxError("unknown encoding: " + encoding)
+
+        if bom_found:
+            # codecs in 2.3 were raw tuples of functions, assume the best.
+            codec_name = getattr(codec, 'name', encoding)
+            if codec_name != 'utf-8':
+                # This behavior mimics the Python interpreter
+                raise SyntaxError('encoding problem: utf-8')
+            encoding += '-sig'
+        return encoding
+
+    first = read_or_stop()
+    if first.startswith(codecs.BOM_UTF8):
+        bom_found = True
+        first = first[3:]
+        default = 'utf-8-sig'
+    if not first:
+        return default
+
+    encoding = find_cookie(first)
+    if encoding:
+        return encoding
+
+    second = read_or_stop()
+    if not second:
+        return default
+
+    encoding = find_cookie(second)
+    if encoding:
+        return encoding
+
+    return default
+
+
+@contract(source='bytes')
+def _source_encoding_py3(source):
+    """Determine the encoding for `source`, according to PEP 263.
+
+    `source` is a byte string: the text of the program.
+
+    Returns a string, the name of the encoding.
+
+    """
+    readline = iternext(source.splitlines(True))
+    return tokenize.detect_encoding(readline)[0]
+
+
+if env.PY3:
+    source_encoding = _source_encoding_py3
+else:
+    source_encoding = _source_encoding_py2
+
+
+@contract(source='unicode')
+def compile_unicode(source, filename, mode):
+    """Just like the `compile` builtin, but works on any Unicode string.
+
+    Python 2's compile() builtin has a stupid restriction: if the source string
+    is Unicode, then it may not have a encoding declaration in it.  Why not?
+    Who knows!  It also decodes to utf8, and then tries to interpret those utf8
+    bytes according to the encoding declaration.  Why? Who knows!
+
+    This function neuters the coding declaration, and compiles it.
+
+    """
+    source = neuter_encoding_declaration(source)
+    if env.PY2 and isinstance(filename, unicode):
+        filename = filename.encode(sys.getfilesystemencoding(), "replace")
+    code = compile(source, filename, mode)
+    return code
+
+
+@contract(source='unicode', returns='unicode')
+def neuter_encoding_declaration(source):
+    """Return `source`, with any encoding declaration neutered."""
+    source = COOKIE_RE.sub("# (deleted declaration)", source, count=1)
+    return source
diff --git a/catapult/third_party/coverage/coverage/pickle2json.py b/catapult/third_party/coverage/coverage/pickle2json.py
new file mode 100644
index 0000000..95b42ef
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/pickle2json.py
@@ -0,0 +1,47 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Convert pickle to JSON for coverage.py."""
+
+from coverage.backward import pickle
+from coverage.data import CoverageData
+
+
+def pickle_read_raw_data(cls_unused, file_obj):
+    """Replacement for CoverageData._read_raw_data."""
+    return pickle.load(file_obj)
+
+
+def pickle2json(infile, outfile):
+    """Convert a coverage.py 3.x pickle data file to a 4.x JSON data file."""
+    try:
+        old_read_raw_data = CoverageData._read_raw_data
+        CoverageData._read_raw_data = pickle_read_raw_data
+
+        covdata = CoverageData()
+
+        with open(infile, 'rb') as inf:
+            covdata.read_fileobj(inf)
+
+        covdata.write_file(outfile)
+    finally:
+        CoverageData._read_raw_data = old_read_raw_data
+
+
+if __name__ == "__main__":
+    from optparse import OptionParser
+
+    parser = OptionParser(usage="usage: %s [options]" % __file__)
+    parser.description = "Convert .coverage files from pickle to JSON format"
+    parser.add_option(
+        "-i", "--input-file", action="store", default=".coverage",
+        help="Name of input file. Default .coverage",
+    )
+    parser.add_option(
+        "-o", "--output-file", action="store", default=".coverage",
+        help="Name of output file. Default .coverage",
+    )
+
+    (options, args) = parser.parse_args()
+
+    pickle2json(options.input_file, options.output_file)
diff --git a/catapult/third_party/coverage/coverage/plugin.py b/catapult/third_party/coverage/coverage/plugin.py
new file mode 100644
index 0000000..f870c25
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/plugin.py
@@ -0,0 +1,381 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Plugin interfaces for coverage.py"""
+
+from coverage import files
+from coverage.misc import contract, _needs_to_implement
+
+
+class CoveragePlugin(object):
+    """Base class for coverage.py plugins.
+
+    To write a coverage.py plugin, create a module with a subclass of
+    :class:`CoveragePlugin`.  You will override methods in your class to
+    participate in various aspects of coverage.py's processing.
+
+    Currently the only plugin type is a file tracer, for implementing
+    measurement support for non-Python files.  File tracer plugins implement
+    the :meth:`file_tracer` method to claim files and the :meth:`file_reporter`
+    method to report on those files.
+
+    Any plugin can optionally implement :meth:`sys_info` to provide debugging
+    information about their operation.
+
+    Coverage.py will store its own information on your plugin object, using
+    attributes whose names start with ``_coverage_``.  Don't be startled.
+
+    To register your plugin, define a function called `coverage_init` in your
+    module::
+
+        def coverage_init(reg, options):
+            reg.add_file_tracer(MyPlugin())
+
+    You use the `reg` parameter passed to your `coverage_init` function to
+    register your plugin object.  It has one method, `add_file_tracer`, which
+    takes a newly created instance of your plugin.
+
+    If your plugin takes options, the `options` parameter is a dictionary of
+    your plugin's options from the coverage.py configuration file.  Use them
+    however you want to configure your object before registering it.
+
+    """
+
+    def file_tracer(self, filename):        # pylint: disable=unused-argument
+        """Get a :class:`FileTracer` object for a file.
+
+        Every Python source file is offered to the plugin to give it a chance
+        to take responsibility for tracing the file.  If your plugin can handle
+        the file, then return a :class:`FileTracer` object.  Otherwise return
+        None.
+
+        There is no way to register your plugin for particular files.  Instead,
+        this method is invoked for all files, and the plugin decides whether it
+        can trace the file or not.  Be prepared for `filename` to refer to all
+        kinds of files that have nothing to do with your plugin.
+
+        The file name will be a Python file being executed.  There are two
+        broad categories of behavior for a plugin, depending on the kind of
+        files your plugin supports:
+
+        * Static file names: each of your original source files has been
+          converted into a distinct Python file.  Your plugin is invoked with
+          the Python file name, and it maps it back to its original source
+          file.
+
+        * Dynamic file names: all of your source files are executed by the same
+          Python file.  In this case, your plugin implements
+          :meth:`FileTracer.dynamic_source_filename` to provide the actual
+          source file for each execution frame.
+
+        `filename` is a string, the path to the file being considered.  This is
+        the absolute real path to the file.  If you are comparing to other
+        paths, be sure to take this into account.
+
+        Returns a :class:`FileTracer` object to use to trace `filename`, or
+        None if this plugin cannot trace this file.
+
+        """
+        return None
+
+    def file_reporter(self, filename):      # pylint: disable=unused-argument
+        """Get the :class:`FileReporter` class to use for a file.
+
+        This will only be invoked if `filename` returns non-None from
+        :meth:`file_tracer`.  It's an error to return None from this method.
+
+        Returns a :class:`FileReporter` object to use to report on `filename`.
+
+        """
+        _needs_to_implement(self, "file_reporter")
+
+    def sys_info(self):
+        """Get a list of information useful for debugging.
+
+        This method will be invoked for ``--debug=sys``.  Your
+        plugin can return any information it wants to be displayed.
+
+        Returns a list of pairs: `[(name, value), ...]`.
+
+        """
+        return []
+
+
+class FileTracer(object):
+    """Support needed for files during the execution phase.
+
+    You may construct this object from :meth:`CoveragePlugin.file_tracer` any
+    way you like.  A natural choice would be to pass the file name given to
+    `file_tracer`.
+
+    `FileTracer` objects should only be created in the
+    :meth:`CoveragePlugin.file_tracer` method.
+
+    See :ref:`howitworks` for details of the different coverage.py phases.
+
+    """
+
+    def source_filename(self):
+        """The source file name for this file.
+
+        This may be any file name you like.  A key responsibility of a plugin
+        is to own the mapping from Python execution back to whatever source
+        file name was originally the source of the code.
+
+        See :meth:`CoveragePlugin.file_tracer` for details about static and
+        dynamic file names.
+
+        Returns the file name to credit with this execution.
+
+        """
+        _needs_to_implement(self, "source_filename")
+
+    def has_dynamic_source_filename(self):
+        """Does this FileTracer have dynamic source file names?
+
+        FileTracers can provide dynamically determined file names by
+        implementing :meth:`dynamic_source_filename`.  Invoking that function
+        is expensive. To determine whether to invoke it, coverage.py uses the
+        result of this function to know if it needs to bother invoking
+        :meth:`dynamic_source_filename`.
+
+        See :meth:`CoveragePlugin.file_tracer` for details about static and
+        dynamic file names.
+
+        Returns True if :meth:`dynamic_source_filename` should be called to get
+        dynamic source file names.
+
+        """
+        return False
+
+    def dynamic_source_filename(self, filename, frame):     # pylint: disable=unused-argument
+        """Get a dynamically computed source file name.
+
+        Some plugins need to compute the source file name dynamically for each
+        frame.
+
+        This function will not be invoked if
+        :meth:`has_dynamic_source_filename` returns False.
+
+        Returns the source file name for this frame, or None if this frame
+        shouldn't be measured.
+
+        """
+        return None
+
+    def line_number_range(self, frame):
+        """Get the range of source line numbers for a given a call frame.
+
+        The call frame is examined, and the source line number in the original
+        file is returned.  The return value is a pair of numbers, the starting
+        line number and the ending line number, both inclusive.  For example,
+        returning (5, 7) means that lines 5, 6, and 7 should be considered
+        executed.
+
+        This function might decide that the frame doesn't indicate any lines
+        from the source file were executed.  Return (-1, -1) in this case to
+        tell coverage.py that no lines should be recorded for this frame.
+
+        """
+        lineno = frame.f_lineno
+        return lineno, lineno
+
+
+class FileReporter(object):
+    """Support needed for files during the analysis and reporting phases.
+
+    See :ref:`howitworks` for details of the different coverage.py phases.
+
+    `FileReporter` objects should only be created in the
+    :meth:`CoveragePlugin.file_reporter` method.
+
+    There are many methods here, but only :meth:`lines` is required, to provide
+    the set of executable lines in the file.
+
+    """
+
+    def __init__(self, filename):
+        """Simple initialization of a `FileReporter`.
+
+        The `filename` argument is the path to the file being reported.  This
+        will be available as the `.filename` attribute on the object.  Other
+        method implementations on this base class rely on this attribute.
+
+        """
+        self.filename = filename
+
+    def __repr__(self):
+        return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
+
+    def relative_filename(self):
+        """Get the relative file name for this file.
+
+        This file path will be displayed in reports.  The default
+        implementation will supply the actual project-relative file path.  You
+        only need to supply this method if you have an unusual syntax for file
+        paths.
+
+        """
+        return files.relative_filename(self.filename)
+
+    @contract(returns='unicode')
+    def source(self):
+        """Get the source for the file.
+
+        Returns a Unicode string.
+
+        The base implementation simply reads the `self.filename` file and
+        decodes it as UTF8.  Override this method if your file isn't readable
+        as a text file, or if you need other encoding support.
+
+        """
+        with open(self.filename, "rb") as f:
+            return f.read().decode("utf8")
+
+    def lines(self):
+        """Get the executable lines in this file.
+
+        Your plugin must determine which lines in the file were possibly
+        executable.  This method returns a set of those line numbers.
+
+        Returns a set of line numbers.
+
+        """
+        _needs_to_implement(self, "lines")
+
+    def excluded_lines(self):
+        """Get the excluded executable lines in this file.
+
+        Your plugin can use any method it likes to allow the user to exclude
+        executable lines from consideration.
+
+        Returns a set of line numbers.
+
+        The base implementation returns the empty set.
+
+        """
+        return set()
+
+    def translate_lines(self, lines):
+        """Translate recorded lines into reported lines.
+
+        Some file formats will want to report lines slightly differently than
+        they are recorded.  For example, Python records the last line of a
+        multi-line statement, but reports are nicer if they mention the first
+        line.
+
+        Your plugin can optionally define this method to perform these kinds of
+        adjustment.
+
+        `lines` is a sequence of integers, the recorded line numbers.
+
+        Returns a set of integers, the adjusted line numbers.
+
+        The base implementation returns the numbers unchanged.
+
+        """
+        return set(lines)
+
+    def arcs(self):
+        """Get the executable arcs in this file.
+
+        To support branch coverage, your plugin needs to be able to indicate
+        possible execution paths, as a set of line number pairs.  Each pair is
+        a `(prev, next)` pair indicating that execution can transition from the
+        `prev` line number to the `next` line number.
+
+        Returns a set of pairs of line numbers.  The default implementation
+        returns an empty set.
+
+        """
+        return set()
+
+    def no_branch_lines(self):
+        """Get the lines excused from branch coverage in this file.
+
+        Your plugin can use any method it likes to allow the user to exclude
+        lines from consideration of branch coverage.
+
+        Returns a set of line numbers.
+
+        The base implementation returns the empty set.
+
+        """
+        return set()
+
+    def translate_arcs(self, arcs):
+        """Translate recorded arcs into reported arcs.
+
+        Similar to :meth:`translate_lines`, but for arcs.  `arcs` is a set of
+        line number pairs.
+
+        Returns a set of line number pairs.
+
+        The default implementation returns `arcs` unchanged.
+
+        """
+        return arcs
+
+    def exit_counts(self):
+        """Get a count of exits from that each line.
+
+        To determine which lines are branches, coverage.py looks for lines that
+        have more than one exit.  This function creates a dict mapping each
+        executable line number to a count of how many exits it has.
+
+        To be honest, this feels wrong, and should be refactored.  Let me know
+        if you attempt to implement this...
+
+        """
+        return {}
+
+    def source_token_lines(self):
+        """Generate a series of tokenized lines, one for each line in `source`.
+
+        These tokens are used for syntax-colored reports.
+
+        Each line is a list of pairs, each pair is a token::
+
+            [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+
+        Each pair has a token class, and the token text.  The token classes
+        are:
+
+        * ``'com'``: a comment
+        * ``'key'``: a keyword
+        * ``'nam'``: a name, or identifier
+        * ``'num'``: a number
+        * ``'op'``: an operator
+        * ``'str'``: a string literal
+        * ``'txt'``: some other kind of text
+
+        If you concatenate all the token texts, and then join them with
+        newlines, you should have your original source back.
+
+        The default implementation simply returns each line tagged as
+        ``'txt'``.
+
+        """
+        for line in self.source().splitlines():
+            yield [('txt', line)]
+
+    # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
+    # of them defined.
+
+    def __eq__(self, other):
+        return isinstance(other, FileReporter) and self.filename == other.filename
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __lt__(self, other):
+        return self.filename < other.filename
+
+    def __le__(self, other):
+        return self.filename <= other.filename
+
+    def __gt__(self, other):
+        return self.filename > other.filename
+
+    def __ge__(self, other):
+        return self.filename >= other.filename
diff --git a/catapult/third_party/coverage/coverage/plugin_support.py b/catapult/third_party/coverage/coverage/plugin_support.py
new file mode 100644
index 0000000..8a4fbec
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/plugin_support.py
@@ -0,0 +1,247 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Support for plugins."""
+
+import os
+import os.path
+import sys
+
+from coverage.misc import CoverageException, isolate_module
+from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
+
+os = isolate_module(os)
+
+
+class Plugins(object):
+    """The currently loaded collection of coverage.py plugins."""
+
+    def __init__(self):
+        self.order = []
+        self.names = {}
+        self.file_tracers = []
+
+        self.current_module = None
+        self.debug = None
+
+    @classmethod
+    def load_plugins(cls, modules, config, debug=None):
+        """Load plugins from `modules`.
+
+        Returns a list of loaded and configured plugins.
+
+        """
+        plugins = cls()
+        plugins.debug = debug
+
+        for module in modules:
+            plugins.current_module = module
+            __import__(module)
+            mod = sys.modules[module]
+
+            coverage_init = getattr(mod, "coverage_init", None)
+            if not coverage_init:
+                raise CoverageException(
+                    "Plugin module %r didn't define a coverage_init function" % module
+                )
+
+            options = config.get_plugin_options(module)
+            coverage_init(plugins, options)
+
+        plugins.current_module = None
+        return plugins
+
+    def add_file_tracer(self, plugin):
+        """Add a file tracer plugin.
+
+        `plugin` is an instance of a third-party plugin class.  It must
+        implement the :meth:`CoveragePlugin.file_tracer` method.
+
+        """
+        self._add_plugin(plugin, self.file_tracers)
+
+    def add_noop(self, plugin):
+        """Add a plugin that does nothing.
+
+        This is only useful for testing the plugin support.
+
+        """
+        self._add_plugin(plugin, None)
+
+    def _add_plugin(self, plugin, specialized):
+        """Add a plugin object.
+
+        `plugin` is a :class:`CoveragePlugin` instance to add.  `specialized`
+        is a list to append the plugin to.
+
+        """
+        plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__)
+        if self.debug and self.debug.should('plugin'):
+            self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin))
+            labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug)
+            plugin = DebugPluginWrapper(plugin, labelled)
+
+        # pylint: disable=attribute-defined-outside-init
+        plugin._coverage_plugin_name = plugin_name
+        plugin._coverage_enabled = True
+        self.order.append(plugin)
+        self.names[plugin_name] = plugin
+        if specialized is not None:
+            specialized.append(plugin)
+
+    def __nonzero__(self):
+        return bool(self.order)
+
+    __bool__ = __nonzero__
+
+    def __iter__(self):
+        return iter(self.order)
+
+    def get(self, plugin_name):
+        """Return a plugin by name."""
+        return self.names[plugin_name]
+
+
+class LabelledDebug(object):
+    """A Debug writer, but with labels for prepending to the messages."""
+
+    def __init__(self, label, debug, prev_labels=()):
+        self.labels = list(prev_labels) + [label]
+        self.debug = debug
+
+    def add_label(self, label):
+        """Add a label to the writer, and return a new `LabelledDebug`."""
+        return LabelledDebug(label, self.debug, self.labels)
+
+    def message_prefix(self):
+        """The prefix to use on messages, combining the labels."""
+        prefixes = self.labels + ['']
+        return ":\n".join("  "*i+label for i, label in enumerate(prefixes))
+
+    def write(self, message):
+        """Write `message`, but with the labels prepended."""
+        self.debug.write("%s%s" % (self.message_prefix(), message))
+
+
+class DebugPluginWrapper(CoveragePlugin):
+    """Wrap a plugin, and use debug to report on what it's doing."""
+
+    def __init__(self, plugin, debug):
+        super(DebugPluginWrapper, self).__init__()
+        self.plugin = plugin
+        self.debug = debug
+
+    def file_tracer(self, filename):
+        tracer = self.plugin.file_tracer(filename)
+        self.debug.write("file_tracer(%r) --> %r" % (filename, tracer))
+        if tracer:
+            debug = self.debug.add_label("file %r" % (filename,))
+            tracer = DebugFileTracerWrapper(tracer, debug)
+        return tracer
+
+    def file_reporter(self, filename):
+        reporter = self.plugin.file_reporter(filename)
+        self.debug.write("file_reporter(%r) --> %r" % (filename, reporter))
+        if reporter:
+            debug = self.debug.add_label("file %r" % (filename,))
+            reporter = DebugFileReporterWrapper(filename, reporter, debug)
+        return reporter
+
+    def sys_info(self):
+        return self.plugin.sys_info()
+
+
+class DebugFileTracerWrapper(FileTracer):
+    """A debugging `FileTracer`."""
+
+    def __init__(self, tracer, debug):
+        self.tracer = tracer
+        self.debug = debug
+
+    def _show_frame(self, frame):
+        """A short string identifying a frame, for debug messages."""
+        return "%s@%d" % (
+            os.path.basename(frame.f_code.co_filename),
+            frame.f_lineno,
+        )
+
+    def source_filename(self):
+        sfilename = self.tracer.source_filename()
+        self.debug.write("source_filename() --> %r" % (sfilename,))
+        return sfilename
+
+    def has_dynamic_source_filename(self):
+        has = self.tracer.has_dynamic_source_filename()
+        self.debug.write("has_dynamic_source_filename() --> %r" % (has,))
+        return has
+
+    def dynamic_source_filename(self, filename, frame):
+        dyn = self.tracer.dynamic_source_filename(filename, frame)
+        self.debug.write("dynamic_source_filename(%r, %s) --> %r" % (
+            filename, self._show_frame(frame), dyn,
+        ))
+        return dyn
+
+    def line_number_range(self, frame):
+        pair = self.tracer.line_number_range(frame)
+        self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair))
+        return pair
+
+
+class DebugFileReporterWrapper(FileReporter):
+    """A debugging `FileReporter`."""
+
+    def __init__(self, filename, reporter, debug):
+        super(DebugFileReporterWrapper, self).__init__(filename)
+        self.reporter = reporter
+        self.debug = debug
+
+    def relative_filename(self):
+        ret = self.reporter.relative_filename()
+        self.debug.write("relative_filename() --> %r" % (ret,))
+        return ret
+
+    def lines(self):
+        ret = self.reporter.lines()
+        self.debug.write("lines() --> %r" % (ret,))
+        return ret
+
+    def excluded_lines(self):
+        ret = self.reporter.excluded_lines()
+        self.debug.write("excluded_lines() --> %r" % (ret,))
+        return ret
+
+    def translate_lines(self, lines):
+        ret = self.reporter.translate_lines(lines)
+        self.debug.write("translate_lines(%r) --> %r" % (lines, ret))
+        return ret
+
+    def translate_arcs(self, arcs):
+        ret = self.reporter.translate_arcs(arcs)
+        self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret))
+        return ret
+
+    def no_branch_lines(self):
+        ret = self.reporter.no_branch_lines()
+        self.debug.write("no_branch_lines() --> %r" % (ret,))
+        return ret
+
+    def exit_counts(self):
+        ret = self.reporter.exit_counts()
+        self.debug.write("exit_counts() --> %r" % (ret,))
+        return ret
+
+    def arcs(self):
+        ret = self.reporter.arcs()
+        self.debug.write("arcs() --> %r" % (ret,))
+        return ret
+
+    def source(self):
+        ret = self.reporter.source()
+        self.debug.write("source() --> %d chars" % (len(ret),))
+        return ret
+
+    def source_token_lines(self):
+        ret = list(self.reporter.source_token_lines())
+        self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
+        return ret
diff --git a/catapult/third_party/coverage/coverage/python.py b/catapult/third_party/coverage/coverage/python.py
new file mode 100644
index 0000000..4f58973
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/python.py
@@ -0,0 +1,198 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Python source expertise for coverage.py"""
+
+import os.path
+import zipimport
+
+from coverage import env, files
+from coverage.misc import contract, expensive, NoSource, join_regex, isolate_module
+from coverage.parser import PythonParser
+from coverage.phystokens import source_token_lines, source_encoding
+from coverage.plugin import FileReporter
+
+os = isolate_module(os)
+
+
+@contract(returns='bytes')
+def read_python_source(filename):
+    """Read the Python source text from `filename`.
+
+    Returns bytes.
+
+    """
+    with open(filename, "rb") as f:
+        return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
+
+
+@contract(returns='unicode')
+def get_python_source(filename):
+    """Return the source code, as unicode."""
+    base, ext = os.path.splitext(filename)
+    if ext == ".py" and env.WINDOWS:
+        exts = [".py", ".pyw"]
+    else:
+        exts = [ext]
+
+    for ext in exts:
+        try_filename = base + ext
+        if os.path.exists(try_filename):
+            # A regular text file: open it.
+            source = read_python_source(try_filename)
+            break
+
+        # Maybe it's in a zip file?
+        source = get_zip_bytes(try_filename)
+        if source is not None:
+            break
+    else:
+        # Couldn't find source.
+        raise NoSource("No source for code: '%s'." % filename)
+
+    source = source.decode(source_encoding(source), "replace")
+
+    # Python code should always end with a line with a newline.
+    if source and source[-1] != '\n':
+        source += '\n'
+
+    return source
+
+
+@contract(returns='bytes|None')
+def get_zip_bytes(filename):
+    """Get data from `filename` if it is a zip file path.
+
+    Returns the bytestring data read from the zip file, or None if no zip file
+    could be found or `filename` isn't in it.  The data returned will be
+    an empty string if the file is empty.
+
+    """
+    markers = ['.zip'+os.sep, '.egg'+os.sep]
+    for marker in markers:
+        if marker in filename:
+            parts = filename.split(marker)
+            try:
+                zi = zipimport.zipimporter(parts[0]+marker[:-1])
+            except zipimport.ZipImportError:
+                continue
+            try:
+                data = zi.get_data(parts[1])
+            except IOError:
+                continue
+            return data
+    return None
+
+
+class PythonFileReporter(FileReporter):
+    """Report support for a Python file."""
+
+    def __init__(self, morf, coverage=None):
+        self.coverage = coverage
+
+        if hasattr(morf, '__file__'):
+            filename = morf.__file__
+        else:
+            filename = morf
+
+        filename = files.unicode_filename(filename)
+
+        # .pyc files should always refer to a .py instead.
+        if filename.endswith(('.pyc', '.pyo')):
+            filename = filename[:-1]
+        elif filename.endswith('$py.class'):   # Jython
+            filename = filename[:-9] + ".py"
+
+        super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
+
+        if hasattr(morf, '__name__'):
+            name = morf.__name__
+            name = name.replace(".", os.sep) + ".py"
+            name = files.unicode_filename(name)
+        else:
+            name = files.relative_filename(filename)
+        self.relname = name
+
+        self._source = None
+        self._parser = None
+        self._statements = None
+        self._excluded = None
+
+    @contract(returns='unicode')
+    def relative_filename(self):
+        return self.relname
+
+    @property
+    def parser(self):
+        """Lazily create a :class:`PythonParser`."""
+        if self._parser is None:
+            self._parser = PythonParser(
+                filename=self.filename,
+                exclude=self.coverage._exclude_regex('exclude'),
+            )
+        return self._parser
+
+    @expensive
+    def lines(self):
+        """Return the line numbers of statements in the file."""
+        if self._statements is None:
+            self._statements, self._excluded = self.parser.parse_source()
+        return self._statements
+
+    @expensive
+    def excluded_lines(self):
+        """Return the line numbers of statements in the file."""
+        if self._excluded is None:
+            self._statements, self._excluded = self.parser.parse_source()
+        return self._excluded
+
+    def translate_lines(self, lines):
+        return self.parser.translate_lines(lines)
+
+    def translate_arcs(self, arcs):
+        return self.parser.translate_arcs(arcs)
+
+    @expensive
+    def no_branch_lines(self):
+        no_branch = self.parser.lines_matching(
+            join_regex(self.coverage.config.partial_list),
+            join_regex(self.coverage.config.partial_always_list)
+            )
+        return no_branch
+
+    @expensive
+    def arcs(self):
+        return self.parser.arcs()
+
+    @expensive
+    def exit_counts(self):
+        return self.parser.exit_counts()
+
+    @contract(returns='unicode')
+    def source(self):
+        if self._source is None:
+            self._source = get_python_source(self.filename)
+        return self._source
+
+    def should_be_python(self):
+        """Does it seem like this file should contain Python?
+
+        This is used to decide if a file reported as part of the execution of
+        a program was really likely to have contained Python in the first
+        place.
+
+        """
+        # Get the file extension.
+        _, ext = os.path.splitext(self.filename)
+
+        # Anything named *.py* should be Python.
+        if ext.startswith('.py'):
+            return True
+        # A file with no extension should be Python.
+        if not ext:
+            return True
+        # Everything else is probably not Python.
+        return False
+
+    def source_token_lines(self):
+        return source_token_lines(self.source())
diff --git a/catapult/third_party/coverage/coverage/pytracer.py b/catapult/third_party/coverage/coverage/pytracer.py
new file mode 100644
index 0000000..cdb3ae7
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/pytracer.py
@@ -0,0 +1,152 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Raw data collector for coverage.py."""
+
+import dis
+import sys
+
+from coverage import env
+
+# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
+YIELD_VALUE = dis.opmap['YIELD_VALUE']
+if env.PY2:
+    YIELD_VALUE = chr(YIELD_VALUE)
+
+
+class PyTracer(object):
+    """Python implementation of the raw data tracer."""
+
+    # Because of poor implementations of trace-function-manipulating tools,
+    # the Python trace function must be kept very simple.  In particular, there
+    # must be only one function ever set as the trace function, both through
+    # sys.settrace, and as the return value from the trace function.  Put
+    # another way, the trace function must always return itself.  It cannot
+    # swap in other functions, or return None to avoid tracing a particular
+    # frame.
+    #
+    # The trace manipulator that introduced this restriction is DecoratorTools,
+    # which sets a trace function, and then later restores the pre-existing one
+    # by calling sys.settrace with a function it found in the current frame.
+    #
+    # Systems that use DecoratorTools (or similar trace manipulations) must use
+    # PyTracer to get accurate results.  The command-line --timid argument is
+    # used to force the use of this tracer.
+
+    def __init__(self):
+        # Attributes set from the collector:
+        self.data = None
+        self.trace_arcs = False
+        self.should_trace = None
+        self.should_trace_cache = None
+        self.warn = None
+        # The threading module to use, if any.
+        self.threading = None
+
+        self.cur_file_dict = []
+        self.last_line = [0]
+
+        self.data_stack = []
+        self.last_exc_back = None
+        self.last_exc_firstlineno = 0
+        self.thread = None
+        self.stopped = False
+
+    def __repr__(self):
+        return "<PyTracer at 0x{0:0x}: {1} lines in {2} files>".format(
+            id(self),
+            sum(len(v) for v in self.data.values()),
+            len(self.data),
+        )
+
+    def _trace(self, frame, event, arg_unused):
+        """The trace function passed to sys.settrace."""
+
+        if self.stopped:
+            return
+
+        if self.last_exc_back:
+            if frame == self.last_exc_back:
+                # Someone forgot a return event.
+                if self.trace_arcs and self.cur_file_dict:
+                    pair = (self.last_line, -self.last_exc_firstlineno)
+                    self.cur_file_dict[pair] = None
+                self.cur_file_dict, self.last_line = self.data_stack.pop()
+            self.last_exc_back = None
+
+        if event == 'call':
+            # Entering a new function context.  Decide if we should trace
+            # in this file.
+            self.data_stack.append((self.cur_file_dict, self.last_line))
+            filename = frame.f_code.co_filename
+            disp = self.should_trace_cache.get(filename)
+            if disp is None:
+                disp = self.should_trace(filename, frame)
+                self.should_trace_cache[filename] = disp
+
+            self.cur_file_dict = None
+            if disp.trace:
+                tracename = disp.source_filename
+                if tracename not in self.data:
+                    self.data[tracename] = {}
+                self.cur_file_dict = self.data[tracename]
+            # The call event is really a "start frame" event, and happens for
+            # function calls and re-entering generators.  The f_lasti field is
+            # -1 for calls, and a real offset for generators.  Use -1 as the
+            # line number for calls, and the real line number for generators.
+            self.last_line = -1 if (frame.f_lasti < 0) else frame.f_lineno
+        elif event == 'line':
+            # Record an executed line.
+            if self.cur_file_dict is not None:
+                lineno = frame.f_lineno
+                if self.trace_arcs:
+                    self.cur_file_dict[(self.last_line, lineno)] = None
+                else:
+                    self.cur_file_dict[lineno] = None
+                self.last_line = lineno
+        elif event == 'return':
+            if self.trace_arcs and self.cur_file_dict:
+                # Record an arc leaving the function, but beware that a
+                # "return" event might just mean yielding from a generator.
+                bytecode = frame.f_code.co_code[frame.f_lasti]
+                if bytecode != YIELD_VALUE:
+                    first = frame.f_code.co_firstlineno
+                    self.cur_file_dict[(self.last_line, -first)] = None
+            # Leaving this function, pop the filename stack.
+            self.cur_file_dict, self.last_line = self.data_stack.pop()
+        elif event == 'exception':
+            self.last_exc_back = frame.f_back
+            self.last_exc_firstlineno = frame.f_code.co_firstlineno
+        return self._trace
+
+    def start(self):
+        """Start this Tracer.
+
+        Return a Python function suitable for use with sys.settrace().
+
+        """
+        if self.threading:
+            self.thread = self.threading.currentThread()
+        sys.settrace(self._trace)
+        self.stopped = False
+        return self._trace
+
+    def stop(self):
+        """Stop this Tracer."""
+        self.stopped = True
+        if self.threading and self.thread != self.threading.currentThread():
+            # Called on a different thread than started us: we can't unhook
+            # ourselves, but we've set the flag that we should stop, so we
+            # won't do any more tracing.
+            return
+
+        if self.warn:
+            if sys.gettrace() != self._trace:
+                msg = "Trace function changed, measurement is likely wrong: %r"
+                self.warn(msg % (sys.gettrace(),))
+
+        sys.settrace(None)
+
+    def get_stats(self):
+        """Return a dictionary of statistics, or None."""
+        return None
diff --git a/catapult/third_party/coverage/coverage/report.py b/catapult/third_party/coverage/coverage/report.py
new file mode 100644
index 0000000..df34e43
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/report.py
@@ -0,0 +1,85 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Reporter foundation for coverage.py."""
+
+import os
+
+from coverage.files import prep_patterns, FnmatchMatcher
+from coverage.misc import CoverageException, NoSource, NotPython, isolate_module
+
+os = isolate_module(os)
+
+
+class Reporter(object):
+    """A base class for all reporters."""
+
+    def __init__(self, coverage, config):
+        """Create a reporter.
+
+        `coverage` is the coverage instance. `config` is an instance  of
+        CoverageConfig, for controlling all sorts of behavior.
+
+        """
+        self.coverage = coverage
+        self.config = config
+
+        # The FileReporters to report on.  Set by find_file_reporters.
+        self.file_reporters = []
+
+        # The directory into which to place the report, used by some derived
+        # classes.
+        self.directory = None
+
+    def find_file_reporters(self, morfs):
+        """Find the FileReporters we'll report on.
+
+        `morfs` is a list of modules or file names.
+
+        """
+        reporters = self.coverage._get_file_reporters(morfs)
+
+        if self.config.include:
+            matcher = FnmatchMatcher(prep_patterns(self.config.include))
+            reporters = [fr for fr in reporters if matcher.match(fr.filename)]
+
+        if self.config.omit:
+            matcher = FnmatchMatcher(prep_patterns(self.config.omit))
+            reporters = [fr for fr in reporters if not matcher.match(fr.filename)]
+
+        self.file_reporters = sorted(reporters)
+
+    def report_files(self, report_fn, morfs, directory=None):
+        """Run a reporting function on a number of morfs.
+
+        `report_fn` is called for each relative morf in `morfs`.  It is called
+        as::
+
+            report_fn(file_reporter, analysis)
+
+        where `file_reporter` is the `FileReporter` for the morf, and
+        `analysis` is the `Analysis` for the morf.
+
+        """
+        self.find_file_reporters(morfs)
+
+        if not self.file_reporters:
+            raise CoverageException("No data to report.")
+
+        self.directory = directory
+        if self.directory and not os.path.exists(self.directory):
+            os.makedirs(self.directory)
+
+        for fr in self.file_reporters:
+            try:
+                report_fn(fr, self.coverage._analyze(fr))
+            except NoSource:
+                if not self.config.ignore_errors:
+                    raise
+            except NotPython:
+                # Only report errors for .py files, and only if we didn't
+                # explicitly suppress those errors.
+                # NotPython is only raised by PythonFileReporter, which has a
+                # should_be_python() method.
+                if fr.should_be_python() and not self.config.ignore_errors:
+                    raise
diff --git a/catapult/third_party/coverage/coverage/results.py b/catapult/third_party/coverage/coverage/results.py
new file mode 100644
index 0000000..9627373
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/results.py
@@ -0,0 +1,273 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Results of coverage measurement."""
+
+import collections
+
+from coverage.backward import iitems
+from coverage.misc import format_lines
+
+
+class Analysis(object):
+    """The results of analyzing a FileReporter."""
+
+    def __init__(self, data, file_reporter):
+        self.data = data
+        self.file_reporter = file_reporter
+        self.filename = self.file_reporter.filename
+        self.statements = self.file_reporter.lines()
+        self.excluded = self.file_reporter.excluded_lines()
+
+        # Identify missing statements.
+        executed = self.data.lines(self.filename) or []
+        executed = self.file_reporter.translate_lines(executed)
+        self.missing = self.statements - executed
+
+        if self.data.has_arcs():
+            self._arc_possibilities = sorted(self.file_reporter.arcs())
+            self.exit_counts = self.file_reporter.exit_counts()
+            self.no_branch = self.file_reporter.no_branch_lines()
+            n_branches = self.total_branches()
+            mba = self.missing_branch_arcs()
+            n_partial_branches = sum(
+                len(v) for k,v in iitems(mba) if k not in self.missing
+                )
+            n_missing_branches = sum(len(v) for k,v in iitems(mba))
+        else:
+            self._arc_possibilities = []
+            self.exit_counts = {}
+            self.no_branch = set()
+            n_branches = n_partial_branches = n_missing_branches = 0
+
+        self.numbers = Numbers(
+            n_files=1,
+            n_statements=len(self.statements),
+            n_excluded=len(self.excluded),
+            n_missing=len(self.missing),
+            n_branches=n_branches,
+            n_partial_branches=n_partial_branches,
+            n_missing_branches=n_missing_branches,
+            )
+
+    def missing_formatted(self):
+        """The missing line numbers, formatted nicely.
+
+        Returns a string like "1-2, 5-11, 13-14".
+
+        """
+        return format_lines(self.statements, self.missing)
+
+    def has_arcs(self):
+        """Were arcs measured in this result?"""
+        return self.data.has_arcs()
+
+    def arc_possibilities(self):
+        """Returns a sorted list of the arcs in the code."""
+        return self._arc_possibilities
+
+    def arcs_executed(self):
+        """Returns a sorted list of the arcs actually executed in the code."""
+        executed = self.data.arcs(self.filename) or []
+        executed = self.file_reporter.translate_arcs(executed)
+        return sorted(executed)
+
+    def arcs_missing(self):
+        """Returns a sorted list of the arcs in the code not executed."""
+        possible = self.arc_possibilities()
+        executed = self.arcs_executed()
+        missing = (
+            p for p in possible
+                if p not in executed
+                    and p[0] not in self.no_branch
+        )
+        return sorted(missing)
+
+    def arcs_missing_formatted(self):
+        """ The missing branch arcs, formatted nicely.
+
+        Returns a string like "1->2, 1->3, 16->20". Omits any mention of
+        branches from missing lines, so if line 17 is missing, then 17->18
+        won't be included.
+
+        """
+        arcs = self.missing_branch_arcs()
+        missing = self.missing
+        line_exits = sorted(iitems(arcs))
+        pairs = []
+        for line, exits in line_exits:
+            for ex in sorted(exits):
+                if line not in missing:
+                    pairs.append('%d->%d' % (line, ex))
+        return ', '.join(pairs)
+
+    def arcs_unpredicted(self):
+        """Returns a sorted list of the executed arcs missing from the code."""
+        possible = self.arc_possibilities()
+        executed = self.arcs_executed()
+        # Exclude arcs here which connect a line to itself.  They can occur
+        # in executed data in some cases.  This is where they can cause
+        # trouble, and here is where it's the least burden to remove them.
+        # Also, generators can somehow cause arcs from "enter" to "exit", so
+        # make sure we have at least one positive value.
+        unpredicted = (
+            e for e in executed
+                if e not in possible
+                    and e[0] != e[1]
+                    and (e[0] > 0 or e[1] > 0)
+        )
+        return sorted(unpredicted)
+
+    def branch_lines(self):
+        """Returns a list of line numbers that have more than one exit."""
+        return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
+
+    def total_branches(self):
+        """How many total branches are there?"""
+        return sum(count for count in self.exit_counts.values() if count > 1)
+
+    def missing_branch_arcs(self):
+        """Return arcs that weren't executed from branch lines.
+
+        Returns {l1:[l2a,l2b,...], ...}
+
+        """
+        missing = self.arcs_missing()
+        branch_lines = set(self.branch_lines())
+        mba = collections.defaultdict(list)
+        for l1, l2 in missing:
+            if l1 in branch_lines:
+                mba[l1].append(l2)
+        return mba
+
+    def branch_stats(self):
+        """Get stats about branches.
+
+        Returns a dict mapping line numbers to a tuple:
+        (total_exits, taken_exits).
+        """
+
+        missing_arcs = self.missing_branch_arcs()
+        stats = {}
+        for lnum in self.branch_lines():
+            exits = self.exit_counts[lnum]
+            try:
+                missing = len(missing_arcs[lnum])
+            except KeyError:
+                missing = 0
+            stats[lnum] = (exits, exits - missing)
+        return stats
+
+
+class Numbers(object):
+    """The numerical results of measuring coverage.
+
+    This holds the basic statistics from `Analysis`, and is used to roll
+    up statistics across files.
+
+    """
+    # A global to determine the precision on coverage percentages, the number
+    # of decimal places.
+    _precision = 0
+    _near0 = 1.0              # These will change when _precision is changed.
+    _near100 = 99.0
+
+    def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
+                    n_branches=0, n_partial_branches=0, n_missing_branches=0
+                    ):
+        self.n_files = n_files
+        self.n_statements = n_statements
+        self.n_excluded = n_excluded
+        self.n_missing = n_missing
+        self.n_branches = n_branches
+        self.n_partial_branches = n_partial_branches
+        self.n_missing_branches = n_missing_branches
+
+    def init_args(self):
+        """Return a list for __init__(*args) to recreate this object."""
+        return [
+            self.n_files, self.n_statements, self.n_excluded, self.n_missing,
+            self.n_branches, self.n_partial_branches, self.n_missing_branches,
+        ]
+
+    @classmethod
+    def set_precision(cls, precision):
+        """Set the number of decimal places used to report percentages."""
+        assert 0 <= precision < 10
+        cls._precision = precision
+        cls._near0 = 1.0 / 10**precision
+        cls._near100 = 100.0 - cls._near0
+
+    @property
+    def n_executed(self):
+        """Returns the number of executed statements."""
+        return self.n_statements - self.n_missing
+
+    @property
+    def n_executed_branches(self):
+        """Returns the number of executed branches."""
+        return self.n_branches - self.n_missing_branches
+
+    @property
+    def pc_covered(self):
+        """Returns a single percentage value for coverage."""
+        if self.n_statements > 0:
+            numerator, denominator = self.ratio_covered
+            pc_cov = (100.0 * numerator) / denominator
+        else:
+            pc_cov = 100.0
+        return pc_cov
+
+    @property
+    def pc_covered_str(self):
+        """Returns the percent covered, as a string, without a percent sign.
+
+        Note that "0" is only returned when the value is truly zero, and "100"
+        is only returned when the value is truly 100.  Rounding can never
+        result in either "0" or "100".
+
+        """
+        pc = self.pc_covered
+        if 0 < pc < self._near0:
+            pc = self._near0
+        elif self._near100 < pc < 100:
+            pc = self._near100
+        else:
+            pc = round(pc, self._precision)
+        return "%.*f" % (self._precision, pc)
+
+    @classmethod
+    def pc_str_width(cls):
+        """How many characters wide can pc_covered_str be?"""
+        width = 3   # "100"
+        if cls._precision > 0:
+            width += 1 + cls._precision
+        return width
+
+    @property
+    def ratio_covered(self):
+        """Return a numerator and denominator for the coverage ratio."""
+        numerator = self.n_executed + self.n_executed_branches
+        denominator = self.n_statements + self.n_branches
+        return numerator, denominator
+
+    def __add__(self, other):
+        nums = Numbers()
+        nums.n_files = self.n_files + other.n_files
+        nums.n_statements = self.n_statements + other.n_statements
+        nums.n_excluded = self.n_excluded + other.n_excluded
+        nums.n_missing = self.n_missing + other.n_missing
+        nums.n_branches = self.n_branches + other.n_branches
+        nums.n_partial_branches = (
+            self.n_partial_branches + other.n_partial_branches
+            )
+        nums.n_missing_branches = (
+            self.n_missing_branches + other.n_missing_branches
+            )
+        return nums
+
+    def __radd__(self, other):
+        # Implementing 0+Numbers allows us to sum() a list of Numbers.
+        if other == 0:
+            return self
+        return NotImplemented
diff --git a/catapult/third_party/coverage/coverage/summary.py b/catapult/third_party/coverage/coverage/summary.py
new file mode 100644
index 0000000..5ddbb38
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/summary.py
@@ -0,0 +1,121 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Summary reporting"""
+
+import sys
+
+from coverage import env
+from coverage.report import Reporter
+from coverage.results import Numbers
+from coverage.misc import NotPython, CoverageException, output_encoding
+
+
+class SummaryReporter(Reporter):
+    """A reporter for writing the summary report."""
+
+    def __init__(self, coverage, config):
+        super(SummaryReporter, self).__init__(coverage, config)
+        self.branches = coverage.data.has_arcs()
+
+    def report(self, morfs, outfile=None):
+        """Writes a report summarizing coverage statistics per module.
+
+        `outfile` is a file object to write the summary to. It must be opened
+        for native strings (bytes on Python 2, Unicode on Python 3).
+
+        """
+        self.find_file_reporters(morfs)
+
+        # Prepare the formatting strings
+        max_name = max([len(fr.relative_filename()) for fr in self.file_reporters] + [5])
+        fmt_name = u"%%- %ds  " % max_name
+        fmt_err = u"%s   %s: %s\n"
+        fmt_skip_covered = u"\n%s file%s skipped due to complete coverage.\n"
+
+        header = (fmt_name % "Name") + u" Stmts   Miss"
+        fmt_coverage = fmt_name + u"%6d %6d"
+        if self.branches:
+            header += u" Branch BrPart"
+            fmt_coverage += u" %6d %6d"
+        width100 = Numbers.pc_str_width()
+        header += u"%*s" % (width100+4, "Cover")
+        fmt_coverage += u"%%%ds%%%%" % (width100+3,)
+        if self.config.show_missing:
+            header += u"   Missing"
+            fmt_coverage += u"   %s"
+        rule = u"-" * len(header) + u"\n"
+        header += u"\n"
+        fmt_coverage += u"\n"
+
+        if outfile is None:
+            outfile = sys.stdout
+
+        if env.PY2:
+            writeout = lambda u: outfile.write(u.encode(output_encoding()))
+        else:
+            writeout = outfile.write
+
+        # Write the header
+        writeout(header)
+        writeout(rule)
+
+        total = Numbers()
+        skipped_count = 0
+
+        for fr in self.file_reporters:
+            try:
+                analysis = self.coverage._analyze(fr)
+                nums = analysis.numbers
+                total += nums
+
+                if self.config.skip_covered:
+                    # Don't report on 100% files.
+                    no_missing_lines = (nums.n_missing == 0)
+                    no_missing_branches = (nums.n_partial_branches == 0)
+                    if no_missing_lines and no_missing_branches:
+                        skipped_count += 1
+                        continue
+
+                args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
+                if self.branches:
+                    args += (nums.n_branches, nums.n_partial_branches)
+                args += (nums.pc_covered_str,)
+                if self.config.show_missing:
+                    missing_fmtd = analysis.missing_formatted()
+                    if self.branches:
+                        branches_fmtd = analysis.arcs_missing_formatted()
+                        if branches_fmtd:
+                            if missing_fmtd:
+                                missing_fmtd += ", "
+                            missing_fmtd += branches_fmtd
+                    args += (missing_fmtd,)
+                writeout(fmt_coverage % args)
+            except Exception:
+                report_it = not self.config.ignore_errors
+                if report_it:
+                    typ, msg = sys.exc_info()[:2]
+                    # NotPython is only raised by PythonFileReporter, which has a
+                    # should_be_python() method.
+                    if typ is NotPython and not fr.should_be_python():
+                        report_it = False
+                if report_it:
+                    writeout(fmt_err % (fr.relative_filename(), typ.__name__, msg))
+
+        if total.n_files > 1:
+            writeout(rule)
+            args = ("TOTAL", total.n_statements, total.n_missing)
+            if self.branches:
+                args += (total.n_branches, total.n_partial_branches)
+            args += (total.pc_covered_str,)
+            if self.config.show_missing:
+                args += ("",)
+            writeout(fmt_coverage % args)
+
+        if not total.n_files and not skipped_count:
+            raise CoverageException("No data to report.")
+
+        if self.config.skip_covered and skipped_count:
+            writeout(fmt_skip_covered % (skipped_count, 's' if skipped_count > 1 else ''))
+
+        return total.n_statements and total.pc_covered
diff --git a/catapult/third_party/coverage/coverage/templite.py b/catapult/third_party/coverage/coverage/templite.py
new file mode 100644
index 0000000..f131f74
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/templite.py
@@ -0,0 +1,276 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""A simple Python template renderer, for a nano-subset of Django syntax.
+
+For a detailed discussion of this code, see this chapter from 500 Lines:
+http://aosabook.org/en/500L/a-template-engine.html
+
+"""
+
+# Coincidentally named the same as http://code.activestate.com/recipes/496702/
+
+import re
+
+from coverage import env
+
+
+class TempliteSyntaxError(ValueError):
+    """Raised when a template has a syntax error."""
+    pass
+
+
+class TempliteValueError(ValueError):
+    """Raised when an expression won't evaluate in a template."""
+    pass
+
+
+class CodeBuilder(object):
+    """Build source code conveniently."""
+
+    def __init__(self, indent=0):
+        self.code = []
+        self.indent_level = indent
+
+    def __str__(self):
+        return "".join(str(c) for c in self.code)
+
+    def add_line(self, line):
+        """Add a line of source to the code.
+
+        Indentation and newline will be added for you, don't provide them.
+
+        """
+        self.code.extend([" " * self.indent_level, line, "\n"])
+
+    def add_section(self):
+        """Add a section, a sub-CodeBuilder."""
+        section = CodeBuilder(self.indent_level)
+        self.code.append(section)
+        return section
+
+    INDENT_STEP = 4      # PEP8 says so!
+
+    def indent(self):
+        """Increase the current indent for following lines."""
+        self.indent_level += self.INDENT_STEP
+
+    def dedent(self):
+        """Decrease the current indent for following lines."""
+        self.indent_level -= self.INDENT_STEP
+
+    def get_globals(self):
+        """Execute the code, and return a dict of globals it defines."""
+        # A check that the caller really finished all the blocks they started.
+        assert self.indent_level == 0
+        # Get the Python source as a single string.
+        python_source = str(self)
+        # Execute the source, defining globals, and return them.
+        global_namespace = {}
+        exec(python_source, global_namespace)
+        return global_namespace
+
+
+class Templite(object):
+    """A simple template renderer, for a nano-subset of Django syntax.
+
+    Supported constructs are extended variable access::
+
+        {{var.modifier.modifier|filter|filter}}
+
+    loops::
+
+        {% for var in list %}...{% endfor %}
+
+    and ifs::
+
+        {% if var %}...{% endif %}
+
+    Comments are within curly-hash markers::
+
+        {# This will be ignored #}
+
+    Construct a Templite with the template text, then use `render` against a
+    dictionary context to create a finished string::
+
+        templite = Templite('''
+            <h1>Hello {{name|upper}}!</h1>
+            {% for topic in topics %}
+                <p>You are interested in {{topic}}.</p>
+            {% endif %}
+            ''',
+            {'upper': str.upper},
+        )
+        text = templite.render({
+            'name': "Ned",
+            'topics': ['Python', 'Geometry', 'Juggling'],
+        })
+
+    """
+    def __init__(self, text, *contexts):
+        """Construct a Templite with the given `text`.
+
+        `contexts` are dictionaries of values to use for future renderings.
+        These are good for filters and global values.
+
+        """
+        self.context = {}
+        for context in contexts:
+            self.context.update(context)
+
+        self.all_vars = set()
+        self.loop_vars = set()
+
+        # We construct a function in source form, then compile it and hold onto
+        # it, and execute it to render the template.
+        code = CodeBuilder()
+
+        code.add_line("def render_function(context, do_dots):")
+        code.indent()
+        vars_code = code.add_section()
+        code.add_line("result = []")
+        code.add_line("append_result = result.append")
+        code.add_line("extend_result = result.extend")
+        if env.PY2:
+            code.add_line("to_str = unicode")
+        else:
+            code.add_line("to_str = str")
+
+        buffered = []
+
+        def flush_output():
+            """Force `buffered` to the code builder."""
+            if len(buffered) == 1:
+                code.add_line("append_result(%s)" % buffered[0])
+            elif len(buffered) > 1:
+                code.add_line("extend_result([%s])" % ", ".join(buffered))
+            del buffered[:]
+
+        ops_stack = []
+
+        # Split the text to form a list of tokens.
+        tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
+
+        for token in tokens:
+            if token.startswith('{#'):
+                # Comment: ignore it and move on.
+                continue
+            elif token.startswith('{{'):
+                # An expression to evaluate.
+                expr = self._expr_code(token[2:-2].strip())
+                buffered.append("to_str(%s)" % expr)
+            elif token.startswith('{%'):
+                # Action tag: split into words and parse further.
+                flush_output()
+                words = token[2:-2].strip().split()
+                if words[0] == 'if':
+                    # An if statement: evaluate the expression to determine if.
+                    if len(words) != 2:
+                        self._syntax_error("Don't understand if", token)
+                    ops_stack.append('if')
+                    code.add_line("if %s:" % self._expr_code(words[1]))
+                    code.indent()
+                elif words[0] == 'for':
+                    # A loop: iterate over expression result.
+                    if len(words) != 4 or words[2] != 'in':
+                        self._syntax_error("Don't understand for", token)
+                    ops_stack.append('for')
+                    self._variable(words[1], self.loop_vars)
+                    code.add_line(
+                        "for c_%s in %s:" % (
+                            words[1],
+                            self._expr_code(words[3])
+                        )
+                    )
+                    code.indent()
+                elif words[0].startswith('end'):
+                    # Endsomething.  Pop the ops stack.
+                    if len(words) != 1:
+                        self._syntax_error("Don't understand end", token)
+                    end_what = words[0][3:]
+                    if not ops_stack:
+                        self._syntax_error("Too many ends", token)
+                    start_what = ops_stack.pop()
+                    if start_what != end_what:
+                        self._syntax_error("Mismatched end tag", end_what)
+                    code.dedent()
+                else:
+                    self._syntax_error("Don't understand tag", words[0])
+            else:
+                # Literal content.  If it isn't empty, output it.
+                if token:
+                    buffered.append(repr(token))
+
+        if ops_stack:
+            self._syntax_error("Unmatched action tag", ops_stack[-1])
+
+        flush_output()
+
+        for var_name in self.all_vars - self.loop_vars:
+            vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
+
+        code.add_line('return "".join(result)')
+        code.dedent()
+        self._render_function = code.get_globals()['render_function']
+
+    def _expr_code(self, expr):
+        """Generate a Python expression for `expr`."""
+        if "|" in expr:
+            pipes = expr.split("|")
+            code = self._expr_code(pipes[0])
+            for func in pipes[1:]:
+                self._variable(func, self.all_vars)
+                code = "c_%s(%s)" % (func, code)
+        elif "." in expr:
+            dots = expr.split(".")
+            code = self._expr_code(dots[0])
+            args = ", ".join(repr(d) for d in dots[1:])
+            code = "do_dots(%s, %s)" % (code, args)
+        else:
+            self._variable(expr, self.all_vars)
+            code = "c_%s" % expr
+        return code
+
+    def _syntax_error(self, msg, thing):
+        """Raise a syntax error using `msg`, and showing `thing`."""
+        raise TempliteSyntaxError("%s: %r" % (msg, thing))
+
+    def _variable(self, name, vars_set):
+        """Track that `name` is used as a variable.
+
+        Adds the name to `vars_set`, a set of variable names.
+
+        Raises an syntax error if `name` is not a valid name.
+
+        """
+        if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
+            self._syntax_error("Not a valid name", name)
+        vars_set.add(name)
+
+    def render(self, context=None):
+        """Render this template by applying it to `context`.
+
+        `context` is a dictionary of values to use in this rendering.
+
+        """
+        # Make the complete context we'll use.
+        render_context = dict(self.context)
+        if context:
+            render_context.update(context)
+        return self._render_function(render_context, self._do_dots)
+
+    def _do_dots(self, value, *dots):
+        """Evaluate dotted expressions at run-time."""
+        for dot in dots:
+            try:
+                value = getattr(value, dot)
+            except AttributeError:
+                try:
+                    value = value[dot]
+                except (TypeError, KeyError):
+                    raise TempliteValueError(
+                        "Couldn't evaluate %r.%s" % (value, dot)
+                    )
+            if callable(value):
+                value = value()
+        return value
diff --git a/catapult/third_party/coverage/coverage/test_helpers.py b/catapult/third_party/coverage/coverage/test_helpers.py
new file mode 100644
index 0000000..50cc329
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/test_helpers.py
@@ -0,0 +1,337 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Mixin classes to help make good tests."""
+
+import atexit
+import collections
+import contextlib
+import os
+import random
+import shutil
+import sys
+import tempfile
+import textwrap
+
+from coverage.backunittest import TestCase
+from coverage.backward import StringIO, to_bytes
+
+
+class Tee(object):
+    """A file-like that writes to all the file-likes it has."""
+
+    def __init__(self, *files):
+        """Make a Tee that writes to all the files in `files.`"""
+        self._files = files
+        if hasattr(files[0], "encoding"):
+            self.encoding = files[0].encoding
+
+    def write(self, data):
+        """Write `data` to all the files."""
+        for f in self._files:
+            f.write(data)
+
+    def flush(self):
+        """Flush the data on all the files."""
+        for f in self._files:
+            f.flush()
+
+    if 0:
+        # Use this if you need to use a debugger, though it makes some tests
+        # fail, I'm not sure why...
+        def __getattr__(self, name):
+            return getattr(self._files[0], name)
+
+
+@contextlib.contextmanager
+def change_dir(new_dir):
+    """Change directory, and then change back.
+
+    Use as a context manager, it will give you the new directory, and later
+    restore the old one.
+
+    """
+    old_dir = os.getcwd()
+    os.chdir(new_dir)
+    try:
+        yield os.getcwd()
+    finally:
+        os.chdir(old_dir)
+
+
+@contextlib.contextmanager
+def saved_sys_path():
+    """Save sys.path, and restore it later."""
+    old_syspath = sys.path[:]
+    try:
+        yield
+    finally:
+        sys.path = old_syspath
+
+
+def setup_with_context_manager(testcase, cm):
+    """Use a contextmanager to setUp a test case.
+
+    If you have a context manager you like::
+
+        with ctxmgr(a, b, c) as v:
+            # do something with v
+
+    and you want to have that effect for a test case, call this function from
+    your setUp, and it will start the context manager for your test, and end it
+    when the test is done::
+
+        def setUp(self):
+            self.v = setup_with_context_manager(self, ctxmgr(a, b, c))
+
+        def test_foo(self):
+            # do something with self.v
+
+    """
+    val = cm.__enter__()
+    testcase.addCleanup(cm.__exit__, None, None, None)
+    return val
+
+
+class ModuleAwareMixin(TestCase):
+    """A test case mixin that isolates changes to sys.modules."""
+
+    def setUp(self):
+        super(ModuleAwareMixin, self).setUp()
+
+        # Record sys.modules here so we can restore it in cleanup_modules.
+        self.old_modules = list(sys.modules)
+        self.addCleanup(self.cleanup_modules)
+
+    def cleanup_modules(self):
+        """Remove any new modules imported during the test run.
+
+        This lets us import the same source files for more than one test.
+
+        """
+        for m in [m for m in sys.modules if m not in self.old_modules]:
+            del sys.modules[m]
+
+
+class SysPathAwareMixin(TestCase):
+    """A test case mixin that isolates changes to sys.path."""
+
+    def setUp(self):
+        super(SysPathAwareMixin, self).setUp()
+        setup_with_context_manager(self, saved_sys_path())
+
+
+class EnvironmentAwareMixin(TestCase):
+    """A test case mixin that isolates changes to the environment."""
+
+    def setUp(self):
+        super(EnvironmentAwareMixin, self).setUp()
+
+        # Record environment variables that we changed with set_environ.
+        self.environ_undos = {}
+
+        self.addCleanup(self.cleanup_environ)
+
+    def set_environ(self, name, value):
+        """Set an environment variable `name` to be `value`.
+
+        The environment variable is set, and record is kept that it was set,
+        so that `cleanup_environ` can restore its original value.
+
+        """
+        if name not in self.environ_undos:
+            self.environ_undos[name] = os.environ.get(name)
+        os.environ[name] = value
+
+    def cleanup_environ(self):
+        """Undo all the changes made by `set_environ`."""
+        for name, value in self.environ_undos.items():
+            if value is None:
+                del os.environ[name]
+            else:
+                os.environ[name] = value
+
+
+class StdStreamCapturingMixin(TestCase):
+    """A test case mixin that captures stdout and stderr."""
+
+    def setUp(self):
+        super(StdStreamCapturingMixin, self).setUp()
+
+        # Capture stdout and stderr so we can examine them in tests.
+        # nose keeps stdout from littering the screen, so we can safely Tee it,
+        # but it doesn't capture stderr, so we don't want to Tee stderr to the
+        # real stderr, since it will interfere with our nice field of dots.
+        self.old_stdout = sys.stdout
+        self.captured_stdout = StringIO()
+        sys.stdout = Tee(sys.stdout, self.captured_stdout)
+
+        self.old_stderr = sys.stderr
+        self.captured_stderr = StringIO()
+        sys.stderr = self.captured_stderr
+
+        self.addCleanup(self.cleanup_std_streams)
+
+    def cleanup_std_streams(self):
+        """Restore stdout and stderr."""
+        sys.stdout = self.old_stdout
+        sys.stderr = self.old_stderr
+
+    def stdout(self):
+        """Return the data written to stdout during the test."""
+        return self.captured_stdout.getvalue()
+
+    def stderr(self):
+        """Return the data written to stderr during the test."""
+        return self.captured_stderr.getvalue()
+
+
+class TempDirMixin(SysPathAwareMixin, ModuleAwareMixin, TestCase):
+    """A test case mixin that creates a temp directory and files in it.
+
+    Includes SysPathAwareMixin and ModuleAwareMixin, because making and using
+    temp directories like this will also need that kind of isolation.
+
+    """
+
+    # Our own setting: most of these tests run in their own temp directory.
+    # Set this to False in your subclass if you don't want a temp directory
+    # created.
+    run_in_temp_dir = True
+
+    # Set this if you aren't creating any files with make_file, but still want
+    # the temp directory.  This will stop the test behavior checker from
+    # complaining.
+    no_files_in_temp_dir = False
+
+    def setUp(self):
+        super(TempDirMixin, self).setUp()
+
+        if self.run_in_temp_dir:
+            # Create a temporary directory.
+            self.temp_dir = self.make_temp_dir("test_cover")
+            self.chdir(self.temp_dir)
+
+            # Modules should be importable from this temp directory.  We don't
+            # use '' because we make lots of different temp directories and
+            # nose's caching importer can get confused.  The full path prevents
+            # problems.
+            sys.path.insert(0, os.getcwd())
+
+        class_behavior = self.class_behavior()
+        class_behavior.tests += 1
+        class_behavior.temp_dir = self.run_in_temp_dir
+        class_behavior.no_files_ok = self.no_files_in_temp_dir
+
+        self.addCleanup(self.check_behavior)
+
+    def make_temp_dir(self, slug="test_cover"):
+        """Make a temp directory that is cleaned up when the test is done."""
+        name = "%s_%08d" % (slug, random.randint(0, 99999999))
+        temp_dir = os.path.join(tempfile.gettempdir(), name)
+        os.makedirs(temp_dir)
+        self.addCleanup(shutil.rmtree, temp_dir)
+        return temp_dir
+
+    def chdir(self, new_dir):
+        """Change directory, and change back when the test is done."""
+        old_dir = os.getcwd()
+        os.chdir(new_dir)
+        self.addCleanup(os.chdir, old_dir)
+
+    def check_behavior(self):
+        """Check that we did the right things."""
+
+        class_behavior = self.class_behavior()
+        if class_behavior.test_method_made_any_files:
+            class_behavior.tests_making_files += 1
+
+    def make_file(self, filename, text="", newline=None):
+        """Create a file for testing.
+
+        `filename` is the relative path to the file, including directories if
+        desired, which will be created if need be.
+
+        `text` is the content to create in the file, a native string (bytes in
+        Python 2, unicode in Python 3).
+
+        If `newline` is provided, it is a string that will be used as the line
+        endings in the created file, otherwise the line endings are as provided
+        in `text`.
+
+        Returns `filename`.
+
+        """
+        # Tests that call `make_file` should be run in a temp environment.
+        assert self.run_in_temp_dir
+        self.class_behavior().test_method_made_any_files = True
+
+        text = textwrap.dedent(text)
+        if newline:
+            text = text.replace("\n", newline)
+
+        # Make sure the directories are available.
+        dirs, _ = os.path.split(filename)
+        if dirs and not os.path.exists(dirs):
+            os.makedirs(dirs)
+
+        # Create the file.
+        with open(filename, 'wb') as f:
+            f.write(to_bytes(text))
+
+        return filename
+
+    # We run some tests in temporary directories, because they may need to make
+    # files for the tests. But this is expensive, so we can change per-class
+    # whether a temp directory is used or not.  It's easy to forget to set that
+    # option properly, so we track information about what the tests did, and
+    # then report at the end of the process on test classes that were set
+    # wrong.
+
+    class ClassBehavior(object):
+        """A value object to store per-class."""
+        def __init__(self):
+            self.tests = 0
+            self.skipped = 0
+            self.temp_dir = True
+            self.no_files_ok = False
+            self.tests_making_files = 0
+            self.test_method_made_any_files = False
+
+    # Map from class to info about how it ran.
+    class_behaviors = collections.defaultdict(ClassBehavior)
+
+    @classmethod
+    def report_on_class_behavior(cls):
+        """Called at process exit to report on class behavior."""
+        for test_class, behavior in cls.class_behaviors.items():
+            bad = ""
+            if behavior.tests <= behavior.skipped:
+                bad = ""
+            elif behavior.temp_dir and behavior.tests_making_files == 0:
+                if not behavior.no_files_ok:
+                    bad = "Inefficient"
+            elif not behavior.temp_dir and behavior.tests_making_files > 0:
+                bad = "Unsafe"
+
+            if bad:
+                if behavior.temp_dir:
+                    where = "in a temp directory"
+                else:
+                    where = "without a temp directory"
+                print(
+                    "%s: %s ran %d tests, %d made files %s" % (
+                        bad,
+                        test_class.__name__,
+                        behavior.tests,
+                        behavior.tests_making_files,
+                        where,
+                    )
+                )
+
+    def class_behavior(self):
+        """Get the ClassBehavior instance for this test."""
+        return self.class_behaviors[self.__class__]
+
+# When the process ends, find out about bad classes.
+atexit.register(TempDirMixin.report_on_class_behavior)
diff --git a/catapult/third_party/coverage/coverage/version.py b/catapult/third_party/coverage/coverage/version.py
new file mode 100644
index 0000000..9897319
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/version.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""The version and URL for coverage.py"""
+# This file is exec'ed in setup.py, don't import anything!
+
+# Same semantics as sys.version_info.
+version_info = (4, 0, 3, 'final', 0)
+
+
+def _make_version(major, minor, micro, releaselevel, serial):
+    """Create a readable version string from version_info tuple components."""
+    assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
+    version = "%d.%d" % (major, minor)
+    if micro:
+        version += ".%d" % (micro,)
+    if releaselevel != 'final':
+        short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
+        version += "%s%d" % (short, serial)
+    return version
+
+
+def _make_url(major, minor, micro, releaselevel, serial):
+    """Make the URL people should start at for this version of coverage.py."""
+    url = "https://coverage.readthedocs.org"
+    if releaselevel != 'final':
+        # For pre-releases, use a version-specific URL.
+        url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
+    return url
+
+
+__version__ = _make_version(*version_info)
+__url__ = _make_url(*version_info)
diff --git a/catapult/third_party/coverage/coverage/xmlreport.py b/catapult/third_party/coverage/coverage/xmlreport.py
new file mode 100644
index 0000000..50a4684
--- /dev/null
+++ b/catapult/third_party/coverage/coverage/xmlreport.py
@@ -0,0 +1,214 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""XML reporting for coverage.py"""
+
+import os
+import os.path
+import sys
+import time
+import xml.dom.minidom
+
+from coverage import env
+from coverage import __url__, __version__, files
+from coverage.misc import isolate_module
+from coverage.report import Reporter
+
+os = isolate_module(os)
+
+
+DTD_URL = (
+    'https://raw.githubusercontent.com/cobertura/web/'
+    'f0366e5e2cf18f111cbd61fc34ef720a6584ba02'
+    '/htdocs/xml/coverage-03.dtd'
+)
+
+
+def rate(hit, num):
+    """Return the fraction of `hit`/`num`, as a string."""
+    if num == 0:
+        return "1"
+    else:
+        return "%.4g" % (float(hit) / num)
+
+
+class XmlReporter(Reporter):
+    """A reporter for writing Cobertura-style XML coverage results."""
+
+    def __init__(self, coverage, config):
+        super(XmlReporter, self).__init__(coverage, config)
+
+        self.source_paths = set()
+        if config.source:
+            for src in config.source:
+                if os.path.exists(src):
+                    self.source_paths.add(files.canonical_filename(src))
+        self.packages = {}
+        self.xml_out = None
+        self.has_arcs = coverage.data.has_arcs()
+
+    def report(self, morfs, outfile=None):
+        """Generate a Cobertura-compatible XML report for `morfs`.
+
+        `morfs` is a list of modules or file names.
+
+        `outfile` is a file object to write the XML to.
+
+        """
+        # Initial setup.
+        outfile = outfile or sys.stdout
+
+        # Create the DOM that will store the data.
+        impl = xml.dom.minidom.getDOMImplementation()
+        self.xml_out = impl.createDocument(None, "coverage", None)
+
+        # Write header stuff.
+        xcoverage = self.xml_out.documentElement
+        xcoverage.setAttribute("version", __version__)
+        xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
+        xcoverage.appendChild(self.xml_out.createComment(
+            " Generated by coverage.py: %s " % __url__
+            ))
+        xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
+
+        # Call xml_file for each file in the data.
+        self.report_files(self.xml_file, morfs)
+
+        xsources = self.xml_out.createElement("sources")
+        xcoverage.appendChild(xsources)
+
+        # Populate the XML DOM with the source info.
+        for path in sorted(self.source_paths):
+            xsource = self.xml_out.createElement("source")
+            xsources.appendChild(xsource)
+            txt = self.xml_out.createTextNode(path)
+            xsource.appendChild(txt)
+
+        lnum_tot, lhits_tot = 0, 0
+        bnum_tot, bhits_tot = 0, 0
+
+        xpackages = self.xml_out.createElement("packages")
+        xcoverage.appendChild(xpackages)
+
+        # Populate the XML DOM with the package info.
+        for pkg_name in sorted(self.packages.keys()):
+            pkg_data = self.packages[pkg_name]
+            class_elts, lhits, lnum, bhits, bnum = pkg_data
+            xpackage = self.xml_out.createElement("package")
+            xpackages.appendChild(xpackage)
+            xclasses = self.xml_out.createElement("classes")
+            xpackage.appendChild(xclasses)
+            for class_name in sorted(class_elts.keys()):
+                xclasses.appendChild(class_elts[class_name])
+            xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
+            xpackage.setAttribute("line-rate", rate(lhits, lnum))
+            if self.has_arcs:
+                branch_rate = rate(bhits, bnum)
+            else:
+                branch_rate = "0"
+            xpackage.setAttribute("branch-rate", branch_rate)
+            xpackage.setAttribute("complexity", "0")
+
+            lnum_tot += lnum
+            lhits_tot += lhits
+            bnum_tot += bnum
+            bhits_tot += bhits
+
+        xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
+        if self.has_arcs:
+            branch_rate = rate(bhits_tot, bnum_tot)
+        else:
+            branch_rate = "0"
+        xcoverage.setAttribute("branch-rate", branch_rate)
+
+        # Use the DOM to write the output file.
+        out = self.xml_out.toprettyxml()
+        if env.PY2:
+            out = out.encode("utf8")
+        outfile.write(out)
+
+        # Return the total percentage.
+        denom = lnum_tot + bnum_tot
+        if denom == 0:
+            pct = 0.0
+        else:
+            pct = 100.0 * (lhits_tot + bhits_tot) / denom
+        return pct
+
+    def xml_file(self, fr, analysis):
+        """Add to the XML report for a single file."""
+
+        # Create the 'lines' and 'package' XML elements, which
+        # are populated later.  Note that a package == a directory.
+        filename = fr.relative_filename()
+        filename = filename.replace("\\", "/")
+        dirname = os.path.dirname(filename) or "."
+        parts = dirname.split("/")
+        dirname = "/".join(parts[:self.config.xml_package_depth])
+        package_name = dirname.replace("/", ".")
+        rel_name = fr.relative_filename()
+
+        if rel_name != fr.filename:
+            self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
+        package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
+
+        xclass = self.xml_out.createElement("class")
+
+        xclass.appendChild(self.xml_out.createElement("methods"))
+
+        xlines = self.xml_out.createElement("lines")
+        xclass.appendChild(xlines)
+
+        xclass.setAttribute("name", os.path.relpath(filename, dirname))
+        xclass.setAttribute("filename", filename)
+        xclass.setAttribute("complexity", "0")
+
+        branch_stats = analysis.branch_stats()
+        missing_branch_arcs = analysis.missing_branch_arcs()
+
+        # For each statement, create an XML 'line' element.
+        for line in sorted(analysis.statements):
+            xline = self.xml_out.createElement("line")
+            xline.setAttribute("number", str(line))
+
+            # Q: can we get info about the number of times a statement is
+            # executed?  If so, that should be recorded here.
+            xline.setAttribute("hits", str(int(line not in analysis.missing)))
+
+            if self.has_arcs:
+                if line in branch_stats:
+                    total, taken = branch_stats[line]
+                    xline.setAttribute("branch", "true")
+                    xline.setAttribute(
+                        "condition-coverage",
+                        "%d%% (%d/%d)" % (100*taken/total, taken, total)
+                        )
+                if line in missing_branch_arcs:
+                    annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
+                    xline.setAttribute("missing-branches", ",".join(annlines))
+            xlines.appendChild(xline)
+
+        class_lines = len(analysis.statements)
+        class_hits = class_lines - len(analysis.missing)
+
+        if self.has_arcs:
+            class_branches = sum(t for t, k in branch_stats.values())
+            missing_branches = sum(t - k for t, k in branch_stats.values())
+            class_br_hits = class_branches - missing_branches
+        else:
+            class_branches = 0.0
+            class_br_hits = 0.0
+
+        # Finalize the statistics that are collected in the XML DOM.
+        xclass.setAttribute("line-rate", rate(class_hits, class_lines))
+        if self.has_arcs:
+            branch_rate = rate(class_br_hits, class_branches)
+        else:
+            branch_rate = "0"
+        xclass.setAttribute("branch-rate", branch_rate)
+
+        package[0][rel_name] = xclass
+        package[1] += class_hits
+        package[2] += class_lines
+        package[3] += class_br_hits
+        package[4] += class_branches
diff --git a/catapult/third_party/coverage/howto.txt b/catapult/third_party/coverage/howto.txt
new file mode 100644
index 0000000..8fe691c
--- /dev/null
+++ b/catapult/third_party/coverage/howto.txt
@@ -0,0 +1,99 @@
+* Release checklist
+
+- Version number in coverage/version.py
+        version_info = (4, 0, 2, 'alpha', 1)
+        version_info = (4, 0, 2, 'beta', 1)
+        version_info = (4, 0, 2, 'candidate', 1)
+        version_info = (4, 0, 2, 'final', 0)
+- Python version number in classifiers in setup.py
+- Copyright date in NOTICE.txt
+- Update CHANGES.rst, including release date.
+- Update README.rst, including "New in x.y:"
+- Update docs
+    - Version, date, and changes in doc/changes.rst
+    - Version and date in doc/index.rst
+    - Version and copyright date in doc/conf.py
+    - Don't forget the man page: doc/python-coverage.1.txt
+    - Done with changes to source files, check them in.
+    - Generate new sample_html to get the latest, incl footer version number:
+        pip install -e .
+        cd ~/cog/trunk
+        rm -rf htmlcov
+        coverage run --branch --source=cogapp -m nose cogapp/test_cogapp.py:CogTestsInMemory
+        coverage combine
+        coverage html
+        - IF BETA:
+            rm -f ~/coverage/trunk/doc/sample_html_beta/*.*
+            cp -r htmlcov/ ~/coverage/trunk/doc/sample_html_beta/
+        - ELSE:
+            rm -f ~/coverage/trunk/doc/sample_html/*.*
+            cp -r htmlcov/ ~/coverage/trunk/doc/sample_html/
+        cd ~/coverage/trunk
+        check in the new sample html
+    - IF BETA:
+        - Build and publish docs:
+            $ make publishbeta
+    - ELSE:
+        - Build and publish docs:
+            $ make publish
+- Kits:
+    - Source kit:
+        - $ make clean kit
+    - Wheels
+        - $ make wheel
+    - Windows kits
+        - $ hg push
+        - wait about an hour for Appveyor to build kits.
+        - $ make download_appveyor
+    - examine the dist directory, and remove anything that looks malformed.
+- Update PyPi:
+    - $ make pypi
+    - upload kits:
+        - $ make kit_upload
+    - Visit http://pypi.python.org/pypi?%3Aaction=pkg_edit&name=coverage :
+        - show/hide the proper versions.
+- Tag the tree
+    - hg tag -m "Coverage 3.0.1" coverage-3.0.1
+- Update nedbatchelder.com
+    - Blog post?
+- Update readthedocs
+    - visit https://readthedocs.org/projects/coverage/versions/
+        - find the latest tag in the inactive list, edit it, make it active.
+    - IF NOT BETA:
+        - visit https://readthedocs.org/dashboard/coverage/advanced/
+        - change the default version to the new version
+- Update bitbucket:
+    - Issue tracker should get new version number in picker.
+    # Note: don't delete old version numbers: it marks changes on the tickets
+    # with that number.
+- Announce on coveragepy-announce@googlegroups.com .
+- Announce on TIP.
+
+
+* Building
+
+- Create PythonXX\Lib\distutils\distutils.cfg::
+    [build]
+    compiler = mingw32
+
+* Testing
+
+- Testing of Python code is handled by tox.
+    - Create and activate a virtualenv
+    - pip install -r requirements/dev.pip
+    - $ tox
+
+- For complete coverage testing:
+
+    $ make metacov
+
+    This will run coverage.py under its own measurement.  You can do this in
+    different environments (Linux vs. Windows, for example), then copy the data
+    files (.metacov.*) to one machine for combination and reporting.  To
+    combine and report:
+
+    $ make metahtml
+
+- To run the Javascript tests:
+
+    open tests/js/index.html in variety of browsers.
diff --git a/catapult/third_party/coverage/igor.py b/catapult/third_party/coverage/igor.py
new file mode 100644
index 0000000..409fdc9
--- /dev/null
+++ b/catapult/third_party/coverage/igor.py
@@ -0,0 +1,390 @@
+# coding: utf-8
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Helper for building, testing, and linting coverage.py.
+
+To get portability, all these operations are written in Python here instead
+of in shell scripts, batch files, or Makefiles.
+
+"""
+
+import contextlib
+import fnmatch
+import glob
+import inspect
+import os
+import platform
+import sys
+import textwrap
+import warnings
+import zipfile
+
+
+# We want to see all warnings while we are running tests.  But we also need to
+# disable warnings for some of the more complex setting up of tests.
+warnings.simplefilter("default")
+
+
+@contextlib.contextmanager
+def ignore_warnings():
+    """Context manager to ignore warning within the with statement."""
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")
+        yield
+
+
+# Functions named do_* are executable from the command line: do_blah is run
+# by "python igor.py blah".
+
+
+def do_show_env():
+    """Show the environment variables."""
+    print("Environment:")
+    for env in sorted(os.environ):
+        print("  %s = %r" % (env, os.environ[env]))
+
+
+def do_remove_extension():
+    """Remove the compiled C extension, no matter what its name."""
+
+    so_patterns = """
+        tracer.so
+        tracer.*.so
+        tracer.pyd
+        tracer.*.pyd
+        """.split()
+
+    for pattern in so_patterns:
+        pattern = os.path.join("coverage", pattern)
+        for filename in glob.glob(pattern):
+            try:
+                os.remove(filename)
+            except OSError:
+                pass
+
+
+def label_for_tracer(tracer):
+    """Get the label for these tests."""
+    if tracer == "py":
+        label = "with Python tracer"
+    else:
+        label = "with C tracer"
+
+    return label
+
+
+def should_skip(tracer):
+    """Is there a reason to skip these tests?"""
+    if tracer == "py":
+        skipper = os.environ.get("COVERAGE_NO_PYTRACER")
+    else:
+        skipper = (
+            os.environ.get("COVERAGE_NO_EXTENSION") or
+            os.environ.get("COVERAGE_NO_CTRACER")
+        )
+
+    if skipper:
+        msg = "Skipping tests " + label_for_tracer(tracer)
+        if len(skipper) > 1:
+            msg += ": " + skipper
+    else:
+        msg = ""
+
+    return msg
+
+
+def run_tests(tracer, *nose_args):
+    """The actual running of tests."""
+    with ignore_warnings():
+        import nose.core
+
+    if 'COVERAGE_TESTING' not in os.environ:
+        os.environ['COVERAGE_TESTING'] = "True"
+    print_banner(label_for_tracer(tracer))
+    nose_args = ["nosetests"] + list(nose_args)
+    nose.core.main(argv=nose_args)
+
+
+def run_tests_with_coverage(tracer, *nose_args):
+    """Run tests, but with coverage."""
+
+    # Need to define this early enough that the first import of env.py sees it.
+    os.environ['COVERAGE_TESTING'] = "True"
+    os.environ['COVERAGE_PROCESS_START'] = os.path.abspath('metacov.ini')
+    os.environ['COVERAGE_HOME'] = os.getcwd()
+
+    # Create the .pth file that will let us measure coverage in sub-processes.
+    # The .pth file seems to have to be alphabetically after easy-install.pth
+    # or the sys.path entries aren't created right?
+    import nose
+    pth_dir = os.path.dirname(os.path.dirname(nose.__file__))
+    pth_path = os.path.join(pth_dir, "zzz_metacov.pth")
+    with open(pth_path, "w") as pth_file:
+        pth_file.write("import coverage; coverage.process_startup()\n")
+
+    # Make names for the data files that keep all the test runs distinct.
+    impl = platform.python_implementation().lower()
+    version = "%s%s" % sys.version_info[:2]
+    if '__pypy__' in sys.builtin_module_names:
+        version += "_%s%s" % sys.pypy_version_info[:2]
+    suffix = "%s%s_%s_%s" % (impl, version, tracer, platform.platform())
+
+    os.environ['COVERAGE_METAFILE'] = os.path.abspath(".metacov."+suffix)
+
+    import coverage
+    cov = coverage.Coverage(config_file="metacov.ini", data_suffix=False)
+    # Cheap trick: the coverage.py code itself is excluded from measurement,
+    # but if we clobber the cover_prefix in the coverage object, we can defeat
+    # the self-detection.
+    cov.cover_prefix = "Please measure coverage.py!"
+    cov._warn_unimported_source = False
+    cov.start()
+
+    try:
+        # Re-import coverage to get it coverage tested!  I don't understand all
+        # the mechanics here, but if I don't carry over the imported modules
+        # (in covmods), then things go haywire (os == None, eventually).
+        covmods = {}
+        covdir = os.path.split(coverage.__file__)[0]
+        # We have to make a list since we'll be deleting in the loop.
+        modules = list(sys.modules.items())
+        for name, mod in modules:
+            if name.startswith('coverage'):
+                if getattr(mod, '__file__', "??").startswith(covdir):
+                    covmods[name] = mod
+                    del sys.modules[name]
+        import coverage                         # pylint: disable=reimported
+        sys.modules.update(covmods)
+
+        # Run nosetests, with the arguments from our command line.
+        try:
+            run_tests(tracer, *nose_args)
+        except SystemExit:
+            # nose3 seems to raise SystemExit, not sure why?
+            pass
+    finally:
+        cov.stop()
+        os.remove(pth_path)
+
+    cov.combine()
+    cov.save()
+
+
+def do_combine_html():
+    """Combine data from a meta-coverage run, and make the HTML and XML reports."""
+    import coverage
+    os.environ['COVERAGE_HOME'] = os.getcwd()
+    os.environ['COVERAGE_METAFILE'] = os.path.abspath(".metacov")
+    cov = coverage.Coverage(config_file="metacov.ini")
+    cov.load()
+    cov.combine()
+    cov.save()
+    cov.html_report()
+    cov.xml_report()
+
+
+def do_test_with_tracer(tracer, *noseargs):
+    """Run nosetests with a particular tracer."""
+    # If we should skip these tests, skip them.
+    skip_msg = should_skip(tracer)
+    if skip_msg:
+        print(skip_msg)
+        return
+
+    os.environ["COVERAGE_TEST_TRACER"] = tracer
+    if os.environ.get("COVERAGE_COVERAGE", ""):
+        return run_tests_with_coverage(tracer, *noseargs)
+    else:
+        return run_tests(tracer, *noseargs)
+
+
+def do_zip_mods():
+    """Build the zipmods.zip file."""
+    zf = zipfile.ZipFile("tests/zipmods.zip", "w")
+
+    # Take one file from disk.
+    zf.write("tests/covmodzip1.py", "covmodzip1.py")
+
+    # The others will be various encodings.
+    source = textwrap.dedent(u"""\
+        # coding: {encoding}
+        text = u"{text}"
+        ords = {ords}
+        assert [ord(c) for c in text] == ords
+        print(u"All OK with {encoding}")
+        """)
+    # These encodings should match the list in tests/test_python.py
+    details = [
+        (u'utf8', u'ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ'),
+        (u'gb2312', u'你好,世界'),
+        (u'hebrew', u'שלום, עולם'),
+        (u'shift_jis', u'こんにちは世界'),
+        (u'cp1252', u'“hi”'),
+    ]
+    for encoding, text in details:
+        filename = 'encoded_{0}.py'.format(encoding)
+        ords = [ord(c) for c in text]
+        source_text = source.format(encoding=encoding, text=text, ords=ords)
+        zf.writestr(filename, source_text.encode(encoding))
+
+    zf.close()
+
+
+def do_install_egg():
+    """Install the egg1 egg for tests."""
+    # I am pretty certain there are easier ways to install eggs...
+    # pylint: disable=import-error,no-name-in-module
+    cur_dir = os.getcwd()
+    os.chdir("tests/eggsrc")
+    with ignore_warnings():
+        import distutils.core
+        distutils.core.run_setup("setup.py", ["--quiet", "bdist_egg"])
+        egg = glob.glob("dist/*.egg")[0]
+        distutils.core.run_setup(
+            "setup.py", ["--quiet", "easy_install", "--no-deps", "--zip-ok", egg]
+        )
+    os.chdir(cur_dir)
+
+
+def do_check_eol():
+    """Check files for incorrect newlines and trailing whitespace."""
+
+    ignore_dirs = [
+        '.svn', '.hg', '.git',
+        '.tox*',
+        '*.egg-info',
+        '_build',
+    ]
+    checked = set()
+
+    def check_file(fname, crlf=True, trail_white=True):
+        """Check a single file for whitespace abuse."""
+        fname = os.path.relpath(fname)
+        if fname in checked:
+            return
+        checked.add(fname)
+
+        line = None
+        with open(fname, "rb") as f:
+            for n, line in enumerate(f, start=1):
+                if crlf:
+                    if "\r" in line:
+                        print("%s@%d: CR found" % (fname, n))
+                        return
+                if trail_white:
+                    line = line[:-1]
+                    if not crlf:
+                        line = line.rstrip('\r')
+                    if line.rstrip() != line:
+                        print("%s@%d: trailing whitespace found" % (fname, n))
+                        return
+
+        if line is not None and not line.strip():
+            print("%s: final blank line" % (fname,))
+
+    def check_files(root, patterns, **kwargs):
+        """Check a number of files for whitespace abuse."""
+        for root, dirs, files in os.walk(root):
+            for f in files:
+                fname = os.path.join(root, f)
+                for p in patterns:
+                    if fnmatch.fnmatch(fname, p):
+                        check_file(fname, **kwargs)
+                        break
+            for ignore_dir in ignore_dirs:
+                ignored = []
+                for dir_name in dirs:
+                    if fnmatch.fnmatch(dir_name, ignore_dir):
+                        ignored.append(dir_name)
+                for dir_name in ignored:
+                    dirs.remove(dir_name)
+
+    check_files("coverage", ["*.py"])
+    check_files("coverage/ctracer", ["*.c", "*.h"])
+    check_files("coverage/htmlfiles", ["*.html", "*.css", "*.js"])
+    check_file("tests/farm/html/src/bom.py", crlf=False)
+    check_files("tests", ["*.py"])
+    check_files("tests", ["*,cover"], trail_white=False)
+    check_files("tests/js", ["*.js", "*.html"])
+    check_file("setup.py")
+    check_file("igor.py")
+    check_file("Makefile")
+    check_file(".hgignore")
+    check_file(".travis.yml")
+    check_files(".", ["*.rst", "*.txt"])
+    check_files(".", ["*.pip"])
+
+
+def print_banner(label):
+    """Print the version of Python."""
+    try:
+        impl = platform.python_implementation()
+    except AttributeError:
+        impl = "Python"
+
+    version = platform.python_version()
+
+    if '__pypy__' in sys.builtin_module_names:
+        version += " (pypy %s)" % ".".join(str(v) for v in sys.pypy_version_info)
+
+    which_python = os.path.relpath(sys.executable)
+    print('=== %s %s %s (%s) ===' % (impl, version, label, which_python))
+    sys.stdout.flush()
+
+
+def do_help():
+    """List the available commands"""
+    items = list(globals().items())
+    items.sort()
+    for name, value in items:
+        if name.startswith('do_'):
+            print("%-20s%s" % (name[3:], value.__doc__))
+
+
+def analyze_args(function):
+    """What kind of args does `function` expect?
+
+    Returns:
+        star, num_pos:
+            star(boolean): Does `function` accept *args?
+            num_args(int): How many positional arguments does `function` have?
+    """
+    try:
+        getargspec = inspect.getfullargspec
+    except AttributeError:
+        getargspec = inspect.getargspec
+    argspec = getargspec(function)
+    return bool(argspec[1]), len(argspec[0])
+
+
+def main(args):
+    """Main command-line execution for igor.
+
+    Verbs are taken from the command line, and extra words taken as directed
+    by the arguments needed by the handler.
+
+    """
+    while args:
+        verb = args.pop(0)
+        handler = globals().get('do_'+verb)
+        if handler is None:
+            print("*** No handler for %r" % verb)
+            return 1
+        star, num_args = analyze_args(handler)
+        if star:
+            # Handler has *args, give it all the rest of the command line.
+            handler_args = args
+            args = []
+        else:
+            # Handler has specific arguments, give it only what it needs.
+            handler_args = args[:num_args]
+            args = args[num_args:]
+        ret = handler(*handler_args)
+        # If a handler returns a failure-like value, stop.
+        if ret:
+            return ret
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/catapult/third_party/coverage/metacov.ini b/catapult/third_party/coverage/metacov.ini
new file mode 100644
index 0000000..a356cb7
--- /dev/null
+++ b/catapult/third_party/coverage/metacov.ini
@@ -0,0 +1,35 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+# Settings to use when using coverage.py to measure itself.
+[run]
+branch = true
+data_file = $COVERAGE_METAFILE
+parallel = true
+source =
+    $COVERAGE_HOME/coverage
+    $COVERAGE_HOME/tests
+
+[report]
+# We set a different pragma so our code won't be confused with test code.
+exclude_lines =
+    # pragma: not covered
+    # pragma: nested
+    def __repr__
+    raise AssertionError
+    # pragma: debugging
+    # pragma: only failure
+
+partial_branches =
+    # pragma: part covered
+    if env.TESTING:
+
+ignore_errors = true
+precision = 1
+
+[paths]
+source =
+    .
+    *\coverage\trunk
+    */coverage/trunk
+    *\coveragepy
diff --git a/catapult/third_party/coverage/pylintrc b/catapult/third_party/coverage/pylintrc
new file mode 100644
index 0000000..09ac141
--- /dev/null
+++ b/catapult/third_party/coverage/pylintrc
@@ -0,0 +1,345 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+# lint Python modules using external checkers.
+# 
+# This is the main checker controling the other ones and the reports
+# generation. It is itself both a raw checker and an astng checker in order
+# to:
+# * handle message activation / deactivation at the module level
+# * handle some basic but necessary stats'data (number of classes, methods...)
+# 
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# Set the cache size for astng objects.
+cache-size=500
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable only checker(s) with the given id(s). This option conflicts with the
+# disable-checker option
+#enable-checker=
+
+# Enable all checker(s) except those with the given id(s). This option
+# conflicts with the enable-checker option
+#disable-checker=
+
+# Enable all messages in the listed categories.
+#enable-msg-cat=
+
+# Disable all messages in the listed categories.
+#disable-msg-cat=
+
+# Enable the message(s) with the given id(s).
+enable=
+#   I0021: Useless suppression
+    I0021
+
+# Disable the message(s) with the given id(s).
+disable= 
+    spelling,
+# Messages that are just silly:
+#   I0011:106: Locally disabling E1101
+#   W0122: 30:run_python_file: Use of the exec statement
+#   W0142: 31:call_singleton_method: Used * or ** magic
+#   W0232:  6:AnyOldObject: Class has no __init__ method
+#   C0323:311:coverage.report: Operator not followed by a space
+#   C0324: 15: Comma not followed by a space
+#   W0603: 28:call_singleton_method: Using the global statement
+#   W0703:133:CoverageData._read_file: Catch "Exception"
+    I0011,W0122,W0142,W0232,C0323,C0324,W0603,W0703,
+# Messages that may be silly:
+#   R0201: 42:Tracer.stop: Method could be a function
+#   E1103: 26:RunTests.test_run_python_file: Instance of 'file' has no 'getvalue' member (but some types could not be inferred)
+    R0201,E1103,
+# formatting stuff
+    superfluous-parens,bad-continuation,
+# Messages that are noisy for now, eventually maybe we'll turn them on:
+#   C0103:256:coverage.morf_filename: Invalid name "f" (should match [a-z_][a-z0-9_]{2,30}$)
+#   W0212: 86:Reporter.report_files: Access to a protected member _analyze of a client class     
+    C0103,W0212,
+    duplicate-code,
+    cyclic-import
+
+msg-template={path}:{line}: {msg} ({symbol})
+
+[REPORTS]
+
+# set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells wether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note).You have access to the variables errors warning, statement which
+# respectivly contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (R0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (R0004).
+comment=no
+
+# Enable the report(s) with the given id(s).
+#enable-report=
+
+# Disable the report(s) with the given id(s).
+#disable-report=
+
+
+# checks for :
+# * doc strings
+# * modules / classes / functions / methods / arguments / variables name
+# * number of arguments, local variables, branchs, returns and statements in
+# functions, methods
+# * required module attributes
+# * dangerous default values as arguments
+# * redefinition of function / method / class
+# * uses of the global statement
+# 
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__|test[A-Z_].*|setUp|tearDown
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$|setUp|tearDown|test_.*
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=
+
+
+# try to find bugs in the code using type inference
+# 
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamicaly set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, consider the acquired-members option to ignore
+# access to some undefined attributes.
+zope=no
+
+# List of members which are usually get through zope's acquisition mecanism and
+# so shouldn't trigger E0201 when accessed (need zope=yes to be considered).
+acquired-members=REQUEST,acl_users,aq_parent
+
+
+# checks for
+# * unused variables / imports
+# * undefined variables
+# * redefinition of variable from builtins or from an outer scope
+# * use of variable before assigment
+# 
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=_|dummy|unused|.*_unused
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+# checks for :
+# * methods without self as first argument
+# * overridden methods signature
+# * access only to existant members via self
+# * attributes not defined in the __init__ method
+# * supported interfaces implementation
+# * unreachable code
+# 
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp,reset
+
+
+# checks for sign of poor/misdesign:
+# * number of methods, attributes, local variables...
+# * size, complexity of functions, methods
+# 
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=15
+
+# Maximum number of locals for function / method body
+max-locals=50
+
+# Maximum number of return / yield for function / method body
+max-returns=20
+
+# Maximum number of branch for function / method body
+max-branches=50
+
+# Maximum number of statements in function / method body
+max-statements=150
+
+# Maximum number of parents for a class (see R0901).
+max-parents=12
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=40
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=500
+
+
+# checks for
+# * external modules dependencies
+# * relative / wildcard imports
+# * cyclic imports
+# * uses of deprecated modules
+# 
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report R0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report R0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report R0402 must
+# not be disabled)
+int-import-graph=
+
+
+# checks for :
+# * unauthorized constructions
+# * strict indentation
+# * line length
+# * use of <> instead of !=
+# 
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Maximum number of lines in a module
+max-module-lines=10000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+
+# checks for:
+# * warning notes in the code like FIXME, XXX
+# * PEP 263: source code with non ascii character but no encoding declaration
+# 
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+# checks for similarities and duplicated code. This computation may be
+# memory / CPU intensive, so you should disable it if you experiments some
+# problems.
+# 
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+#
+# SPELLING
+#
+
+spelling-dict=en_US
+# pylint doesn't strip the words, so insert a dummy x at the beginning to make
+# the other words work properly.
+# https://bitbucket.org/logilab/pylint/issue/398/spelling-words-need-to-be-stripped-or-the
+spelling-private-dict-file=doc/dict.txt
diff --git a/catapult/third_party/coverage/requirements/dev.pip b/catapult/third_party/coverage/requirements/dev.pip
new file mode 100644
index 0000000..503601f
--- /dev/null
+++ b/catapult/third_party/coverage/requirements/dev.pip
@@ -0,0 +1,20 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+# Requirements for doing local development work on coverage.py.
+# https://requires.io/github/nedbat/coveragepy/requirements/
+
+# PyPI requirements for running tests.
+nose==1.3.7
+-r tox.pip
+
+# for linting.
+greenlet==0.4.9
+mock==1.3.0
+PyContracts==1.7.6
+pyenchant==1.6.6
+pylint==1.4.4
+
+# for kitting.
+requests==2.8.1
+twine==1.6.4
diff --git a/catapult/third_party/coverage/requirements/tox.pip b/catapult/third_party/coverage/requirements/tox.pip
new file mode 100644
index 0000000..9080d82
--- /dev/null
+++ b/catapult/third_party/coverage/requirements/tox.pip
@@ -0,0 +1,2 @@
+# The version of tox used by coverage.py
+tox==2.1.1
diff --git a/catapult/third_party/coverage/requirements/wheel.pip b/catapult/third_party/coverage/requirements/wheel.pip
new file mode 100644
index 0000000..3b683e4
--- /dev/null
+++ b/catapult/third_party/coverage/requirements/wheel.pip
@@ -0,0 +1,3 @@
+# Things needed to make wheels for coverage.py
+setuptools==18.4
+wheel==0.26.0
diff --git a/catapult/third_party/coverage/setup.cfg b/catapult/third_party/coverage/setup.cfg
new file mode 100644
index 0000000..861a9f5
--- /dev/null
+++ b/catapult/third_party/coverage/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/catapult/third_party/coverage/setup.py b/catapult/third_party/coverage/setup.py
new file mode 100644
index 0000000..010b5b7
--- /dev/null
+++ b/catapult/third_party/coverage/setup.py
@@ -0,0 +1,197 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+"""Code coverage measurement for Python"""
+
+# Distutils setup for coverage.py
+# This file is used unchanged under all versions of Python, 2.x and 3.x.
+
+import os
+import sys
+
+from setuptools import setup
+from distutils.core import Extension                # pylint: disable=no-name-in-module, import-error
+from distutils.command.build_ext import build_ext   # pylint: disable=no-name-in-module, import-error
+from distutils import errors                        # pylint: disable=no-name-in-module
+
+# Get or massage our metadata.  We exec coverage/version.py so we can avoid
+# importing the product code into setup.py.
+
+classifiers = """\
+Environment :: Console
+Intended Audience :: Developers
+License :: OSI Approved :: Apache Software License
+Operating System :: OS Independent
+Programming Language :: Python :: 2.6
+Programming Language :: Python :: 2.7
+Programming Language :: Python :: 3.3
+Programming Language :: Python :: 3.4
+Programming Language :: Python :: 3.5
+Programming Language :: Python :: Implementation :: CPython
+Programming Language :: Python :: Implementation :: PyPy
+Topic :: Software Development :: Quality Assurance
+Topic :: Software Development :: Testing
+"""
+
+cov_ver_py = os.path.join(os.path.split(__file__)[0], "coverage/version.py")
+with open(cov_ver_py) as version_file:
+    # __doc__ will be overwritten by version.py.
+    doc = __doc__
+    # Keep pylint happy.
+    __version__ = __url__ = version_info = ""
+    # Execute the code in version.py.
+    exec(compile(version_file.read(), cov_ver_py, 'exec'))
+
+with open("README.rst") as readme:
+    long_description = readme.read().replace("http://coverage.readthedocs.org", __url__)
+
+classifier_list = classifiers.splitlines()
+
+if version_info[3] == 'alpha':
+    devstat = "3 - Alpha"
+elif version_info[3] in ['beta', 'candidate']:
+    devstat = "4 - Beta"
+else:
+    assert version_info[3] == 'final'
+    devstat = "5 - Production/Stable"
+classifier_list.append("Development Status :: " + devstat)
+
+# Create the keyword arguments for setup()
+
+setup_args = dict(
+    name='coverage',
+    version=__version__,
+
+    packages=[
+        'coverage',
+    ],
+
+    package_data={
+        'coverage': [
+            'htmlfiles/*.*',
+        ]
+    },
+
+    entry_points={
+        # Install a script as "coverage", and as "coverage[23]", and as
+        # "coverage-2.7" (or whatever).
+        'console_scripts': [
+            'coverage = coverage.cmdline:main',
+            'coverage%d = coverage.cmdline:main' % sys.version_info[:1],
+            'coverage-%d.%d = coverage.cmdline:main' % sys.version_info[:2],
+        ],
+    },
+
+    # We need to get HTML assets from our htmlfiles directory.
+    zip_safe=False,
+
+    author='Ned Batchelder and others',
+    author_email='ned@nedbatchelder.com',
+    description=doc,
+    long_description=long_description,
+    keywords='code coverage testing',
+    license='Apache 2.0',
+    classifiers=classifier_list,
+    url=__url__,
+)
+
+# A replacement for the build_ext command which raises a single exception
+# if the build fails, so we can fallback nicely.
+
+ext_errors = (
+    errors.CCompilerError,
+    errors.DistutilsExecError,
+    errors.DistutilsPlatformError,
+)
+if sys.platform == 'win32':
+    # distutils.msvc9compiler can raise an IOError when failing to
+    # find the compiler
+    ext_errors += (IOError,)
+
+
+class BuildFailed(Exception):
+    """Raise this to indicate the C extension wouldn't build."""
+    def __init__(self):
+        Exception.__init__(self)
+        self.cause = sys.exc_info()[1]      # work around py 2/3 different syntax
+
+
+class ve_build_ext(build_ext):
+    """Build C extensions, but fail with a straightforward exception."""
+
+    def run(self):
+        """Wrap `run` with `BuildFailed`."""
+        try:
+            build_ext.run(self)
+        except errors.DistutilsPlatformError:
+            raise BuildFailed()
+
+    def build_extension(self, ext):
+        """Wrap `build_extension` with `BuildFailed`."""
+        try:
+            # Uncomment to test compile failure handling:
+            #   raise errors.CCompilerError("OOPS")
+            build_ext.build_extension(self, ext)
+        except ext_errors:
+            raise BuildFailed()
+        except ValueError as err:
+            # this can happen on Windows 64 bit, see Python issue 7511
+            if "'path'" in str(err):    # works with both py 2/3
+                raise BuildFailed()
+            raise
+
+# There are a few reasons we might not be able to compile the C extension.
+# Figure out if we should attempt the C extension or not.
+
+compile_extension = True
+
+if sys.platform.startswith('java'):
+    # Jython can't compile C extensions
+    compile_extension = False
+
+if '__pypy__' in sys.builtin_module_names:
+    # Pypy can't compile C extensions
+    compile_extension = False
+
+if compile_extension:
+    setup_args.update(dict(
+        ext_modules=[
+            Extension(
+                "coverage.tracer",
+                sources=[
+                    "coverage/ctracer/datastack.c",
+                    "coverage/ctracer/filedisp.c",
+                    "coverage/ctracer/module.c",
+                    "coverage/ctracer/tracer.c",
+                ],
+            ),
+        ],
+        cmdclass={
+            'build_ext': ve_build_ext,
+        },
+    ))
+
+# Py3.x-specific details.
+
+if sys.version_info >= (3, 0):
+    setup_args.update(dict(
+        use_2to3=False,
+    ))
+
+
+def main():
+    """Actually invoke setup() with the arguments we built above."""
+    # For a variety of reasons, it might not be possible to install the C
+    # extension.  Try it with, and if it fails, try it without.
+    try:
+        setup(**setup_args)
+    except BuildFailed as exc:
+        msg = "Couldn't install with extension module, trying without it..."
+        exc_msg = "%s: %s" % (exc.__class__.__name__, exc.cause)
+        print("**\n** %s\n** %s\n**" % (msg, exc_msg))
+
+        del setup_args['ext_modules']
+        setup(**setup_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/catapult/third_party/coverage/tox.ini b/catapult/third_party/coverage/tox.ini
new file mode 100644
index 0000000..009fa79
--- /dev/null
+++ b/catapult/third_party/coverage/tox.ini
@@ -0,0 +1,73 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+[tox]
+envlist = py{26,27,33,34,35}, pypy{24,26,40,3_24}, doc
+skip_missing_interpreters = True
+
+[testenv]
+usedevelop = True
+
+deps =
+    # https://requires.io/github/nedbat/coveragepy/requirements/
+    nose==1.3.7
+    mock==1.3.0
+    PyContracts==1.7.6
+    py26: unittest2==1.1.0
+    py{26,27,33,34,35}: gevent==1.1b6
+    py{26,27,33,34}: eventlet==0.17.4
+    py{26,27,33,34,35}: greenlet==0.4.9
+
+passenv = *
+setenv =
+    pypy,pypy{24,26,40,3_24}: COVERAGE_NO_EXTENSION=no C extension under PyPy
+
+commands =
+    python setup.py --quiet clean develop
+
+    # Create tests/zipmods.zip
+    # Install the egg1 egg
+    # Remove the C extension so that we can test the PyTracer
+    python igor.py zip_mods install_egg remove_extension
+
+    # Test with the PyTracer
+    python igor.py test_with_tracer py {posargs}
+
+    # Build the C extension and test with the CTracer
+    python setup.py --quiet build_ext --inplace
+    python igor.py test_with_tracer c {posargs}
+
+[testenv:pypy]
+basepython = pypy
+
+[testenv:pypy24]
+basepython = pypy2.4
+
+[testenv:pypy26]
+basepython = pypy2.6
+
+[testenv:pypy3_24]
+basepython = pypy3-2.4
+
+[testenv:pypy40]
+basepython = pypy4.0
+
+[testenv:doc]
+# Build the docs so we know if they are successful.  We build twice: once with
+# -q to get all warnings, and once with -QW to get a success/fail status
+# return.
+deps = -rdoc/requirements.pip
+commands =
+    sphinx-build -aEnq doc doc/_build/html
+    sphinx-build -aEnQW doc doc/_build/html
+    rst2html.py --strict CHANGES.rst doc/_build/trash
+    rst2html.py --strict README.rst doc/_build/trash
+
+# Yes, pep8 will read its settings from tox.ini!
+[pep8]
+# E265: block comment should start with '# '
+# E301 expected 1 blank line, found 0
+# E401 multiple imports on one line
+# The rest are the default ignored warnings.
+ignore = E265,E123,E133,E226,E241,E242,E301,E401
+max-line-length = 100
diff --git a/catapult/third_party/coverage/tox_wheels.ini b/catapult/third_party/coverage/tox_wheels.ini
new file mode 100644
index 0000000..f956183
--- /dev/null
+++ b/catapult/third_party/coverage/tox_wheels.ini
@@ -0,0 +1,13 @@
+# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
+# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
+
+[tox]
+envlist = py{26,27,33,34,35}
+toxworkdir = {toxinidir}/.tox_kits
+
+[testenv]
+deps =
+    -rrequirements/wheel.pip
+
+commands = 
+    python setup.py bdist_wheel {posargs}
diff --git a/catapult/third_party/gsutil/CHANGES.md b/catapult/third_party/gsutil/CHANGES.md
index ed3d6e4..ccd153a 100644
--- a/catapult/third_party/gsutil/CHANGES.md
+++ b/catapult/third_party/gsutil/CHANGES.md
@@ -1,3 +1,104 @@
+Release 4.15 (release date: 2015-09-08)
+=======================================
+Bug Fixes
+---------
+- Fixed an OverflowError in apitools that caused download
+  failures for large files on 32-bit machines.
+- Removed unnecessary sending of range headers for downloads when
+  using the XML API.
+- Fixed a bug that caused perfdiag to report extremely high throughput
+  when the -p flag was unspecified and exactly one of the -c or -k flags
+  were specified.
+- Fixed a ValueError that occurred on Python 2.6 with sliced object downloads.
+
+Other Changes
+-------------
+- HTTP connections for downloads and uploads in the JSON API are now
+  re-used per-thread.
+- When gsutil's automatic update feature prompts and the user
+  chooses to update, gsutil will now exit with status code 1 after
+  updating (because the original command was not executed).
+- The cp -A flag is disabled when using gsutil -m to ensure that
+  ordering is preserved when copying between versioned buckets.
+
+Release 4.14 (release date: 2015-08-24)
+=======================================
+New Features
+------------
+- Implemented Sliced Object Download feature.
+  This breaks up a single large object into multiple pieces and
+  downloads them in parallel, improving performance. The gsutil cp, mv
+  and rsync commands now use this by default when compiled crcmod
+  is available for performing fast end-to-end integrity checks.
+  If compiled crcmod is not available, normal object download will
+  be used. Sliced download can be used in conjunction with the global -m
+  flag for maximum performance to download multiple objects in
+  parallel while additionally slicing each object.
+  See the "SLICED OBJECT DOWNLOAD" section of "gsutil help cp" for
+  details.
+  Note: sliced download may cause performance degradation for disks
+  with very slow seek times. You can disable this feature by setting
+  sliced_object_download_threshold = 0 in your .boto configuration file.
+- Added rthru_file and wthru_file test modes to perfdiag, allowing
+  measurement of reads and writes from a disk. This also allows
+  measurement of transferring objects too large to fit in memory.
+  The size restriction of 20GiB has been lifted.
+- perfdiag now supports a -p flag to choose a parallelism strategy
+  (slice, fan, or both) when using multiple threads and/or processes.
+
+Bug Fixes
+---------
+- Fixed an IOError that could occur in apitools when acquiring credentials
+  using multiple threads and/or processes on Google Compute Engine.
+- Fixed a bug where rm -r would attempt to delete a nonexistent bucket.
+- Fixed a bug where a default object ACL could not be set or changed to empty.
+- Fixed a bug where cached credentials corresponding to an old account could
+  be used (for example, credentials associated with a prior .boto
+  configuration file).
+- Fixed a bug in apitools for retrieving byte ranges of size 1 (for example,
+  "cat -r 1-1 ...")
+- Fixed a bug that caused the main gsutil process to perform all work leaving
+  all gsutil child processes idle.
+- Fixed a bug that caused multiple threads not to be used when
+  multiprocessing was unavailable.
+- Fixed a bug that caused rsync to skip files that start with "." when the
+  -r option was not used.
+- Fixed a bug that caused rsync -C to bail out when it failed to read
+  a source file.
+- Fixed a bug where gsutil stat printed unwanted output to stderr.
+- Fixed a bug where a parallel composite upload could return a nonzero exit
+  code even though the upload completed successfully. This occurred if
+  temporary component deletion triggered a retry but the original request
+  succeeded.
+- Fixed a bug where gsutil would exit with code 0 when both running in
+  debug mode and encountering an unhandled exception.
+- Fixed a bug where gsutil would suggest using parallel composite uploads
+  multiple times.
+
+Other Changes
+-------------
+- Bucket removal is now supported even if billing is disabled for that bucket.
+- Refactored Windows installs to no longer use any multiprocessing module
+  functions, as gsutil has never supported multiple processes on Windows.
+  Multithreading is unaffected and still available on Windows.
+- All downloads are now written to a temporary file with a "_.gstmp" suffix
+  while the download is still in progress.
+- Re-hashing of existing bytes when resuming downloads now displays progress.
+- Reduced the total number of multiprocessing.Manager processes to two.
+- The rm command now correctly counts the number of objects that could
+  not be removed.
+- Increased the default retries to match the Google Cloud Storage SLA.
+  By default, gsutil will now retry 23 times with exponential backoff up
+  to 32 seconds, for a total timespan of ~10 minutes.
+- Improved bucket subdirectory checks to a single HTTP call. Detection of
+  _$folder$ placeholder objects is now eventually consistent.
+- Eliminated two unnecessary HTTP calls when performing uploads via
+  the cp, mv, or rsync commands.
+- Updated documentation for several topics including acl, cache-control,
+  crcmod, cp, mb, rsync, and subdirs.
+- Added a warning about using parallel composite upload with NEARLINE
+  storage-class buckets.
+
 Release 4.13 (release date: 2015-06-03)
 =======================================
 New Features
diff --git a/catapult/third_party/gsutil/CHECKSUM b/catapult/third_party/gsutil/CHECKSUM
index 69650dd..5af8847 100644
--- a/catapult/third_party/gsutil/CHECKSUM
+++ b/catapult/third_party/gsutil/CHECKSUM
@@ -1 +1 @@
-141a3e09b42e1b0b6033108aa24c2286
+94ba5be034230bd88a03f0006e59034c
diff --git a/catapult/third_party/gsutil/README.chromium b/catapult/third_party/gsutil/README.chromium
index 01d176a..78648e4 100644
--- a/catapult/third_party/gsutil/README.chromium
+++ b/catapult/third_party/gsutil/README.chromium
@@ -1,8 +1,8 @@
 Name: gsutil
 Short Name: gsutil
 URL: https://cloud.google.com/storage/docs/gsutil
-Version: 4.13
-Date: 2015-07-28
+Version: 4.15
+Date: 2015-10-01
 License: Apache 2.0
 License File: no
 Security Critical: no
@@ -18,7 +18,4 @@
 * Editing object and bucket ACLs.
 
 Local Modifications:
-_ Flip executable bits of file with shebang to pass the checkperm script.
-_ Temporarily remove gsutilz/third_party/ to avoid overloading build bots.
-_ Temporarily rename top level naming from gsutil to gsutilz so patch can apply
-cleanly on the buildbots.
+_ Added this file.
diff --git a/catapult/third_party/gsutil/VERSION b/catapult/third_party/gsutil/VERSION
index 8c3a015..6ae0dcb 100644
--- a/catapult/third_party/gsutil/VERSION
+++ b/catapult/third_party/gsutil/VERSION
@@ -1 +1 @@
-4.13
+4.15
diff --git a/catapult/third_party/gsutil/gslib/__main__.py b/catapult/third_party/gsutil/gslib/__main__.py
old mode 100755
new mode 100644
index 89511bc..92c03c1
--- a/catapult/third_party/gsutil/gslib/__main__.py
+++ b/catapult/third_party/gsutil/gslib/__main__.py
@@ -103,6 +103,14 @@
 ***************************** WARNING *****************************
 """.lstrip()
 
+TRACE_WARNING = """
+***************************** WARNING *****************************
+*** You are running gsutil with trace output enabled.
+*** Be aware that trace output includes authentication credentials
+*** and may include the contents of any files accessed during the trace.
+***************************** WARNING *****************************
+""".lstrip()
+
 HTTP_WARNING = """
 ***************************** WARNING *****************************
 *** You are running gsutil with the "https_validate_certificates" config
@@ -187,14 +195,16 @@
   from gslib.util import CERTIFICATE_VALIDATION_ENABLED
   # pylint: disable=unused-variable
   from gcs_oauth2_boto_plugin import oauth2_client
+  from apitools.base.py import credentials_lib
   # pylint: enable=unused-variable
-  from gslib.util import MultiprocessingIsAvailable
-  if MultiprocessingIsAvailable()[0]:
+  from gslib.util import CheckMultiprocessingAvailableAndInit
+  if CheckMultiprocessingAvailableAndInit().is_available:
     # These setup methods must be called, and, on Windows, they can only be
     # called from within an "if __name__ == '__main__':" block.
-    gslib.util.InitializeMultiprocessingVariables()
     gslib.command.InitializeMultiprocessingVariables()
     gslib.boto_translation.InitializeMultiprocessingVariables()
+  else:
+    gslib.command.InitializeThreadingVariables()
 
   # This needs to be done after gslib.util.InitializeMultiprocessingVariables(),
   # since otherwise we can't call gslib.util.CreateLock.
@@ -204,6 +214,7 @@
     gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret(
         GSUTIL_CLIENT_ID, GSUTIL_CLIENT_NOTSOSECRET)
     gcs_oauth2_boto_plugin.oauth2_helper.SetLock(CreateLock())
+    credentials_lib.SetCredentialsCacheFileLock(CreateLock())
   except ImportError:
     pass
 
@@ -230,6 +241,7 @@
   quiet = False
   version = False
   debug = 0
+  trace_token = None
   test_exception_traces = False
 
   # If user enters no commands just print the usage info.
@@ -253,7 +265,7 @@
       opts, args = getopt.getopt(sys.argv[1:], 'dDvo:h:mq',
                                  ['debug', 'detailedDebug', 'version', 'option',
                                   'help', 'header', 'multithreaded', 'quiet',
-                                  'testexceptiontraces'])
+                                  'testexceptiontraces', 'trace-token='])
     except getopt.GetoptError as e:
       _HandleCommandException(gslib.exception.CommandException(e.msg))
     for o, a in opts:
@@ -282,6 +294,8 @@
         quiet = True
       elif o in ('-v', '--version'):
         version = True
+      elif o == '--trace-token':
+        trace_token = a
       elif o == '--testexceptiontraces':  # Hidden flag for integration tests.
         test_exception_traces = True
       elif o in ('-o', '--option'):
@@ -295,6 +309,8 @@
           boto.config.add_section(opt_section)
         boto.config.set(opt_section, opt_name, opt_value)
     httplib2.debuglevel = debug
+    if trace_token:
+      sys.stderr.write(TRACE_WARNING)
     if debug > 1:
       sys.stderr.write(DEBUG_WARNING)
     if debug >= 2:
@@ -339,7 +355,8 @@
 
     return _RunNamedCommandAndHandleExceptions(
         command_runner, command_name, args=args[1:], headers=headers,
-        debug_level=debug, parallel_operations=parallel_operations)
+        debug_level=debug, trace_token=trace_token,
+        parallel_operations=parallel_operations)
   finally:
     _Cleanup()
 
@@ -389,14 +406,8 @@
 
 
 def _HandleUnknownFailure(e):
-  # Called if we fall through all known/handled exceptions. Allows us to
-  # print a stacktrace if -D option used.
-  if debug >= 2:
-    stack_trace = traceback.format_exc()
-    sys.stderr.write('DEBUG: Exception stack trace:\n    %s\n' %
-                     re.sub('\\n', '\n    ', stack_trace))
-  else:
-    _OutputAndExit('Failure: %s.' % e)
+  # Called if we fall through all known/handled exceptions.
+  _OutputAndExit('Failure: %s.' % e)
 
 
 def _HandleCommandException(e):
@@ -506,6 +517,7 @@
 
 def _RunNamedCommandAndHandleExceptions(command_runner, command_name, args=None,
                                         headers=None, debug_level=0,
+                                        trace_token=None,
                                         parallel_operations=False):
   """Runs the command with the given command runner and arguments."""
   # pylint: disable=g-import-not-at-top
@@ -521,7 +533,8 @@
     if not IS_WINDOWS:
       RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit)
     return command_runner.RunNamedCommand(command_name, args, headers,
-                                          debug_level, parallel_operations)
+                                          debug_level, trace_token,
+                                          parallel_operations)
   except AttributeError as e:
     if str(e).find('secret_access_key') != -1:
       _OutputAndExit('Missing credentials for the given URI(s). Does your '
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/acls.py b/catapult/third_party/gsutil/gslib/addlhelp/acls.py
index b9ac443..12c43c0 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/acls.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/acls.py
@@ -34,22 +34,23 @@
 <B>BUCKET VS OBJECT ACLS</B>
   In Google Cloud Storage, the bucket ACL works as follows:
 
-  - Users granted READ access are allowed to list the bucket contents.
+  - Users granted READ access are allowed to list the bucket contents and read
+    bucket metadata other than its ACL.
 
-  - Users granted WRITE access are allowed READ access and also are
-    allowed to write and delete objects in that bucket -- including
-    overwriting previously written objects.
+  - Users granted WRITE access are allowed READ access and also are allowed to
+    write and delete objects in that bucket, including overwriting previously
+    written objects.
 
-  - Users granted OWNER access are allowed WRITE access and also
-    are allowed to read and write the bucket's ACL.
+  - Users granted OWNER access are allowed WRITE access and also are allowed to
+    read and write the bucket's ACL.
 
   The object ACL works as follows:
 
   - Users granted READ access are allowed to read the object's data and
     metadata.
 
-  - Users granted OWNER access are allowed READ access and also
-    are allowed to read and write the object's ACL.
+  - Users granted OWNER access are allowed READ access and also are allowed to
+    read and write the object's ACL.
 
   A couple of points are worth noting, that sometimes surprise users:
 
@@ -60,13 +61,13 @@
      object ACL matters for that purpose. This is different from how things
      work in Linux file systems, where both the file and directory permission
      control file read access. It also means, for example, that someone with
-     OWNER over the bucket may not have read access to objects in
-     the bucket.  This is by design, and supports useful cases. For example,
-     you might want to set up bucket ownership so that a small group of
-     administrators have OWNER on the bucket (with the ability to
-     delete data to control storage costs), but not grant those users read
-     access to the object data (which might be sensitive data that should
-     only be accessed by a different specific group of users).
+     OWNER over the bucket may not have read access to objects in the bucket.
+     This is by design, and supports useful cases. For example, you might want
+     to set up bucket ownership so that a small group of administrators have
+     OWNER on the bucket (with the ability to delete data to control storage
+     costs), but not grant those users read access to the object data (which
+     might be sensitive data that should only be accessed by a different
+     specific group of users).
 
 
 <B>CANNED ACLS</B>
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/command_opts.py b/catapult/third_party/gsutil/gslib/addlhelp/command_opts.py
index 2421aee..215458c 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/command_opts.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/command_opts.py
@@ -106,12 +106,6 @@
               threads or processes, and the number of failed operations (if any)
               will be reported at the end of the command's execution.
 
-              WARNING: If you use the gsutil -m option when copying data
-              between versioned buckets, object version ordering will not be
-              preserved. For more information see the
-              "COPYING VERSIONED BUCKETS" section under
-              'gsutil help versions'.
-
   -o          Set/override values in the boto configuration value, in the format
               <section>:<name>=<value>, e.g. gsutil -o "Boto:proxy=host" ...
               This will not pass the option to gsutil integration tests, which
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/crc32c.py b/catapult/third_party/gsutil/gslib/addlhelp/crc32c.py
index d2befe2..ed2bd8d 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/crc32c.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/crc32c.py
@@ -110,7 +110,19 @@
   https://pypi.python.org/pypi/crcmod/1.7
 
   MSI installers are available for the 32-bit versions of Python 2.6 and 2.7.
+  Make sure to install to a 32-bit Python directory. If you're using 64-bit
+  Python it won't work with 32-bit crcmod, and instead you'll need to install
+  32-bit Python in order to use crcmod.
 
+  Note: If you have installed crcmod and gsutil hasn't detected it, it may have
+  been installed to the wrong directory. It should be located at
+  <python_dir>\\files\\Lib\\site-packages\\crcmod\\
+
+  In some cases the installer will incorrectly install to
+  <python_dir>\\Lib\\site-packages\\crcmod\\
+
+  Manually copying the crcmod directory to the correct location should resolve
+  the issue.
 """)
 
 
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/metadata.py b/catapult/third_party/gsutil/gslib/addlhelp/metadata.py
index f0b547f..60f6471 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/metadata.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/metadata.py
@@ -85,7 +85,7 @@
   allowed to cache your objects. Cache-Control only applies to objects with
   a public-read ACL. Non-public data are not cacheable.
 
-  Here's an example of uploading an object set to allow caching:
+  Here's an example of uploading a set of objects to allow caching:
 
     gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read \\
            -r html gs://bucket/html
@@ -98,7 +98,11 @@
   of objects after uploading a newer replacement object. Note also that because
   objects can be cached at various places on the Internet there is no way to
   force a cached object to expire globally (unlike the way you can force your
-  browser to refresh its cache).
+  browser to refresh its cache). If you want to prevent caching of publicly
+  readable objects you should set a Cache-Control:private header on the object.
+  You can do this with a command such as:
+
+    gsutil -h Cache-Control:private cp -a public-read file.png gs://your-bucket
 
   Another use of the Cache-Control header is through the "no-transform" value,
   which instructs Google Cloud Storage to not apply any content transformations
@@ -107,14 +111,6 @@
   respected by the XML API. The Google Cloud Storage JSON API respects only the
   no-cache and max-age Cache-Control parameters.
 
-  Note that if you upload an object with a public-read ACL and don't include a
-  Cache-Control header, it will be served with a Cache-Control header allowing
-  the object to be cached for 3600 seconds. This will not happen if the object
-  is uploaded with a non-public ACL and then changed to public. Moreover, if you
-  upload an object with a public-read ACL and later change the ACL not to be
-  public-read, the object will no longer be served with the default
-  Cache-Control header noted above (so will be served as not cacheable).
-  
   For details about how to set the Cache-Control header see
   "gsutil help setmeta".
 
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/prod.py b/catapult/third_party/gsutil/gslib/addlhelp/prod.py
index df09014..58524b9 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/prod.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/prod.py
@@ -35,7 +35,7 @@
   threshold is 2 MiB). When a transfer fails partway through (e.g., because of
   an intermittent network problem), gsutil uses a truncated randomized binary
   exponential backoff-and-retry strategy that by default will retry transfers up
-  to 6 times over a 63 second period of time (see "gsutil help retries" for
+  to 23 times over a 10 minute period of time (see "gsutil help retries" for
   details). If the transfer fails each of these attempts with no intervening
   progress, gsutil gives up on the transfer, but keeps a "tracker" file for
   it in a configurable location (the default location is ~/.gsutil/, in a file
@@ -53,8 +53,8 @@
   we offer a number of suggestions about how this type of scripting should
   be implemented:
 
-  1. When resumable transfers fail without any progress 6 times in a row
-     over the course of up to 63 seconds, it probably won't work to simply
+  1. When resumable transfers fail without any progress 23 times in a row
+     over the course of up to 10 minutes, it probably won't work to simply
      retry the transfer immediately. A more successful strategy would be to
      have a cron job that runs every 30 minutes, determines which transfers
      need to be run, and runs them. If the network experiences intermittent
@@ -107,15 +107,11 @@
      your periodic download script by querying the database locally instead
      of performing a bucket listing.
 
-  5. Make sure you don't delete partially downloaded files after a transfer
-     fails: gsutil picks up where it left off (and performs an MD5 check of
-     the final downloaded content to ensure data integrity), so deleting
+  5. Make sure you don't delete partially downloaded temporary files after a
+     transfer fails: gsutil picks up where it left off (and performs a hash
+     of the final downloaded content to ensure data integrity), so deleting
      partially transferred files will cause you to lose progress and make
-     more wasteful use of your network. You should also make sure whatever
-     process is waiting to consume the downloaded data doesn't get pointed
-     at the partially downloaded files. One way to do this is to download
-     into a staging directory and then move successfully downloaded files to
-     a directory where consumer processes will read them.
+     more wasteful use of your network.
 
   6. If you have a fast network connection, you can speed up the transfer of
      large numbers of files by using the gsutil -m (multi-threading /
@@ -134,17 +130,6 @@
      speed, available memory, CPU load, and other conditions, this may or may
      not be optimal. Try experimenting with higher or lower numbers of threads
      to find the best number of threads for your environment.
-
-<B>RUNNING GSUTIL ON MULTIPLE MACHINES</B>
-  When running gsutil on multiple machines that are all attempting to use the
-  same OAuth2 refresh token, it is possible to encounter rate limiting errors
-  for the refresh requests (especially if all of these machines are likely to
-  start running gsutil at the same time). To account for this, gsutil will
-  automatically retry OAuth2 refresh requests with a truncated randomized
-  exponential backoff strategy like that which is described in the
-  "BACKGROUND ON RESUMABLE TRANSFERS" section above. The number of retries
-  attempted for OAuth2 refresh requests can be controlled via the
-  "oauth2_refresh_retries" variable in the .boto config file.
 """)
 
 
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/security.py b/catapult/third_party/gsutil/gslib/addlhelp/security.py
index da651e1..8c9cad8 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/security.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/security.py
@@ -187,6 +187,10 @@
   before sending this debug output to anyone during troubleshooting/tech support
   interactions.
 
+  If you run gsutil --trace-token (to send a trace directly to Google),
+  sensitive information like OAuth2 tokens and the contents of any files
+  accessed during the trace may be included in the content of the trace.
+
   The proxy configuration information in the .boto configuration is
   security-sensitive, especially if your proxy setup requires user and
   password information. Even if your proxy setup doesn't require user and
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/subdirs.py b/catapult/third_party/gsutil/gslib/addlhelp/subdirs.py
index 69bcda4..1d43fef 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/subdirs.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/subdirs.py
@@ -33,13 +33,13 @@
   name space supported by the Google Cloud Storage service. To the service,
   the object gs://your-bucket/abc/def/ghi.txt is just an object that happens to
   have "/" characters in its name. There are no "abc" or "abc/def" directories;
-  just a single object with the given name. This
-  `diagram <https://cloud.google.com/storage/images/gsutil-subdirectories-thumb.png>`
+  just a single object with the given name. This diagram:
+  https://cloud.google.com/storage/images/gsutil-subdirectories-thumb.png
   illustrates how gsutil provides a hierarchical view of objects in a bucket.
 
   gsutil achieves the hierarchical file tree illusion by applying a variety of
   rules, to try to make naming work the way users would expect. For example, in
-  order to determine whether to treat a destination URI as an object name or the
+  order to determine whether to treat a destination URL as an object name or the
   root of a directory under which objects should be copied gsutil uses these
   rules:
 
@@ -48,7 +48,7 @@
 
        gsutil cp your-file gs://your-bucket/abc/
 
-     gsutil will create the object gs://your-bucket/abc/file.
+     gsutil will create the object gs://your-bucket/abc/your-file.
 
   2. If the destination object is XYZ and an object exists called XYZ_$folder$
      gsutil treats XYZ as a directory. For example, if you run the command:
@@ -56,16 +56,16 @@
        gsutil cp your-file gs://your-bucket/abc
 
      and there exists an object called abc_$folder$, gsutil will create the
-     object gs://your-bucket/abc/file.
+     object gs://your-bucket/abc/your-file.
 
-  3. If you attempt to copy multiple source files to a destination URI, gsutil
-     treats the destination URI as a directory. For example, if you run
+  3. If you attempt to copy multiple source files to a destination URL, gsutil
+     treats the destination URL as a directory. For example, if you run
      the command:
 
        gsutil cp -r your-dir gs://your-bucket/abc
 
      gsutil will create objects like gs://your-bucket/abc/your-dir/file1, etc.
-     (assuming file1 is a file under the source directory).
+     (assuming file1 is a file under the source directory your-dir).
 
   4. If none of the above rules applies, gsutil performs a bucket listing to
      determine if the target of the operation is a prefix match to the
@@ -79,8 +79,8 @@
      starts with gs://your-bucket/abc/, to determine whether to treat the target
      as an object name or a directory name. In turn this impacts the name of the
      object you create: If the above check indicates there is an "abc" directory
-     you will end up with the object gs://your-bucket/abc/file; otherwise you
-     will end up with the object gs://your-bucket/abc. (See
+     you will end up with the object gs://your-bucket/abc/your-file; otherwise
+     you will end up with the object gs://your-bucket/abc. (See
      "HOW NAMES ARE CONSTRUCTED" under "gsutil help cp" for more details.)
 
   This rule-based approach stands in contrast to the way many tools work, which
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/throttling.py b/catapult/third_party/gsutil/gslib/addlhelp/throttling.py
index 3e228fa..df36363 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/throttling.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/throttling.py
@@ -23,7 +23,7 @@
   Particularly when used with the -m (multi-threading) option, gsutil can
   consume a significant amount of network bandwidth. In some cases this can
   cause problems, for example if you start a large rsync operation over a
-  network connection used by a number of other important production tasks.
+  network link that's also used by a number of other important jobs.
 
   While gsutil has no built-in support for throttling requests, there are
   various tools available on Linux and MacOS that can be used to throttle
@@ -43,7 +43,7 @@
   example, the following command would reduce I/O priority of gsutil so it
   doesn't monopolize your local disk:
 
-      ionice -c 2 -n 7 gsutil gsutil -m rsync -r ./dir gs://some bucket
+      ionice -c 2 -n 7 gsutil -m rsync -r ./dir gs://some bucket
 """)
 
 
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/versions.py b/catapult/third_party/gsutil/gslib/addlhelp/versions.py
index 715125b..5c11566 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/versions.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/versions.py
@@ -143,7 +143,7 @@
 <B>COPYING VERSIONED BUCKETS</B>
   You can copy data between two versioned buckets, using a command like:
 
-    gsutil cp -r gs://bucket1/* gs://bucket2
+    gsutil cp -r -A gs://bucket1/* gs://bucket2
 
   When run using versioned buckets, this command will cause every object version
   to be copied. The copies made in gs://bucket2 will have different generation
@@ -167,12 +167,6 @@
   same sequence of sizes in both listings), but the generation numbers (and
   timestamps) are newer in gs://bucket2.
 
-  WARNING: If you use the gsutil -m option when copying the objects (to parallel
-  copy the data), object version ordering will NOT be preserved. All object
-  versions will be copied, but (for example) the latest/live version in the
-  destination bucket might be from one of the earlier versions in the source
-  bucket (and similarly, other versions may be out of order). When copying
-  versioned data it is advisable not to use the gsutil -m option.
 
 
 <B>CONCURRENCY CONTROL</B>
diff --git a/catapult/third_party/gsutil/gslib/addlhelp/wildcards.py b/catapult/third_party/gsutil/gslib/addlhelp/wildcards.py
index ae2e74d..6ffae29 100644
--- a/catapult/third_party/gsutil/gslib/addlhelp/wildcards.py
+++ b/catapult/third_party/gsutil/gslib/addlhelp/wildcards.py
@@ -98,6 +98,22 @@
     gs://bucket/[a-m]??.j*g
 
 
+<B>DIFFERENT BEHAVIOR FOR "DOT" FILES IN LOCAL FILE SYSTEM</B>
+  Per standard Unix behavior, the wildcard "*" only matches files that don't
+  start with a "." character (to avoid confusion with the "." and ".."
+  directories present in all Unix directories). gsutil provides this same
+  behavior when using wildcards over a file system URI, but does not provide
+  this behavior over cloud URIs. For example, the following command will copy
+  all objects from gs://bucket1 to gs://bucket2:
+
+    gsutil cp gs://bucket1/* gs://bucket2
+
+  but the following command will copy only files that don't start with a "."
+  from the directory "dir" to gs://bucket1:
+
+    gsutil cp dir/* gs://bucket1
+
+
 <B>EFFICIENCY CONSIDERATION: USING WILDCARDS OVER MANY OBJECTS</B>
   It is more efficient, faster, and less network traffic-intensive
   to use wildcards that have a non-wildcard object-name prefix, like:
diff --git a/catapult/third_party/gsutil/gslib/boto_translation.py b/catapult/third_party/gsutil/gslib/boto_translation.py
index 9201390..edb69dd 100644
--- a/catapult/third_party/gsutil/gslib/boto_translation.py
+++ b/catapult/third_party/gsutil/gslib/boto_translation.py
@@ -30,6 +30,7 @@
 import socket
 import tempfile
 import textwrap
+import threading
 import time
 import xml
 from xml.dom.minidom import parseString as XmlParseString
@@ -74,6 +75,7 @@
 from gslib.translation_helper import AddS3MarkerAclToObjectMetadata
 from gslib.translation_helper import CorsTranslation
 from gslib.translation_helper import CreateBucketNotFoundException
+from gslib.translation_helper import CreateNotFoundExceptionForObjectWrite
 from gslib.translation_helper import CreateObjectNotFoundException
 from gslib.translation_helper import DEFAULT_CONTENT_TYPE
 from gslib.translation_helper import EncodeStringAsLong
@@ -84,10 +86,8 @@
 from gslib.translation_helper import S3MarkerAclFromObjectMetadata
 from gslib.util import ConfigureNoOpAuthIfNeeded
 from gslib.util import DEFAULT_FILE_BUFFER_SIZE
-from gslib.util import GetFileSize
 from gslib.util import GetMaxRetryDelay
 from gslib.util import GetNumRetries
-from gslib.util import MultiprocessingIsAvailable
 from gslib.util import S3_DELETE_MARKER_GUID
 from gslib.util import TWO_MIB
 from gslib.util import UnaryDictToXml
@@ -101,8 +101,11 @@
                                 boto.exception.StorageCreateError,
                                 boto.exception.StorageResponseError)
 
-# If multiprocessing is available, this will be overridden to a (thread-safe)
-# multiprocessing.Value in a call to InitializeMultiprocessingVariables.
+# pylint: disable=global-at-module-level
+global boto_auth_initialized, boto_auth_initialized_lock
+# If multiprocessing is available, these will be overridden to process-safe
+# variables in InitializeMultiprocessingVariables.
+boto_auth_initialized_lock = threading.Lock()
 boto_auth_initialized = False
 
 NON_EXISTENT_OBJECT_REGEX = re.compile(r'.*non-\s*existent\s*object',
@@ -111,16 +114,36 @@
 MD5_REGEX = re.compile(r'^"*[a-fA-F0-9]{32}"*$')
 
 
-def InitializeMultiprocessingVariables():
+def InitializeMultiprocessingVariables():  # pylint: disable=invalid-name
   """Perform necessary initialization for multiprocessing.
 
     See gslib.command.InitializeMultiprocessingVariables for an explanation
     of why this is necessary.
   """
-  global boto_auth_initialized  # pylint: disable=global-variable-undefined
+  # pylint: disable=global-variable-undefined
+  global boto_auth_initialized, boto_auth_initialized_lock
+  boto_auth_initialized_lock = gslib.util.CreateLock()
   boto_auth_initialized = multiprocessing.Value('i', 0)
 
 
+class DownloadProxyCallbackHandler(object):
+  """Intermediary callback to keep track of the number of bytes downloaded."""
+
+  def __init__(self, start_byte, callback):
+    self._start_byte = start_byte
+    self._callback = callback
+
+  def call(self, bytes_downloaded, total_size):
+    """Saves necessary data and then calls the given Cloud API callback.
+
+    Args:
+      bytes_downloaded: Number of bytes processed so far.
+      total_size: Total size of the ongoing operation.
+    """
+    if self._callback:
+      self._callback(self._start_byte + bytes_downloaded, total_size)
+
+
 class BotoTranslation(CloudApi):
   """Boto-based XML translation implementation of gsutil Cloud API.
 
@@ -130,7 +153,7 @@
   """
 
   def __init__(self, bucket_storage_uri_class, logger, provider=None,
-               credentials=None, debug=0):
+               credentials=None, debug=0, trace_token=None):
     """Performs necessary setup for interacting with the cloud storage provider.
 
     Args:
@@ -142,17 +165,19 @@
                 the provider argument and use this one instead.
       credentials: Unused.
       debug: Debug level for the API implementation (0..3).
+      trace_token: Unused in this subclass.
     """
     super(BotoTranslation, self).__init__(bucket_storage_uri_class, logger,
                                           provider=provider, debug=debug)
     _ = credentials
-    global boto_auth_initialized  # pylint: disable=global-variable-undefined
-    if MultiprocessingIsAvailable()[0] and not boto_auth_initialized.value:
+    # pylint: disable=global-variable-undefined, global-variable-not-assigned
+    global boto_auth_initialized, boto_auth_initialized_lock
+    with boto_auth_initialized_lock:
       ConfigureNoOpAuthIfNeeded()
-      boto_auth_initialized.value = 1
-    elif not boto_auth_initialized:
-      ConfigureNoOpAuthIfNeeded()
-      boto_auth_initialized = True
+      if isinstance(boto_auth_initialized, bool):
+        boto_auth_initialized = True
+      else:
+        boto_auth_initialized.value = 1
     self.api_version = boto.config.get_value(
         'GSUtil', 'default_api_version', '1')
 
@@ -318,8 +343,10 @@
     _ = provider
     get_fields = self._ListToGetFields(list_fields=fields)
     bucket_uri = self._StorageUriForBucket(bucket_name)
-    prefix_list = []
     headers = {}
+    yield_prefixes = fields is None or 'prefixes' in fields
+    yield_objects = fields is None or any(
+        field.startswith('items/') for field in fields)
     self._AddApiVersionToHeaders(headers)
     try:
       objects_iter = bucket_uri.list_bucket(prefix=prefix or '',
@@ -331,11 +358,10 @@
 
     try:
       for key in objects_iter:
-        if isinstance(key, Prefix):
-          prefix_list.append(key.name)
+        if yield_prefixes and isinstance(key, Prefix):
           yield CloudApi.CsObjectOrPrefix(key.name,
                                           CloudApi.CsObjectOrPrefixType.PREFIX)
-        else:
+        elif yield_objects:
           key_to_convert = key
 
           # Listed keys are populated with these fields during bucket listing.
@@ -407,11 +433,11 @@
     self._AddApiVersionToHeaders(headers)
     if 'accept-encoding' not in headers:
       headers['accept-encoding'] = 'gzip'
-    if end_byte:
+    if end_byte is not None:
       headers['range'] = 'bytes=%s-%s' % (start_byte, end_byte)
     elif start_byte > 0:
       headers['range'] = 'bytes=%s-' % (start_byte)
-    else:
+    elif start_byte < 0:
       headers['range'] = 'bytes=%s' % (start_byte)
 
     # Since in most cases we already made a call to get the object metadata,
@@ -443,7 +469,8 @@
     try:
       if download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
         self._PerformResumableDownload(
-            download_stream, key, headers=headers, callback=progress_callback,
+            download_stream, start_byte, end_byte, key,
+            headers=headers, callback=progress_callback,
             num_callbacks=num_progress_callbacks, hash_algs=hash_algs)
       elif download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
         self._PerformSimpleDownload(
@@ -505,13 +532,16 @@
       key.get_contents_to_file(download_stream, cb=progress_callback,
                                num_cb=num_progress_callbacks, headers=headers)
 
-  def _PerformResumableDownload(self, fp, key, headers=None, callback=None,
+  def _PerformResumableDownload(self, fp, start_byte, end_byte, key,
+                                headers=None, callback=None,
                                 num_callbacks=XML_PROGRESS_CALLBACKS,
                                 hash_algs=None):
     """Downloads bytes from key to fp, resuming as needed.
 
     Args:
-      fp: File pointer into which data should be downloaded
+      fp: File pointer into which data should be downloaded.
+      start_byte: Start byte of the download.
+      end_byte: End byte of the download.
       key: Key object from which data is to be downloaded
       headers: Headers to send when retrieving the file
       callback: (optional) a callback function that will be called to report
@@ -540,37 +570,22 @@
 
     num_retries = GetNumRetries()
     progress_less_iterations = 0
+    last_progress_byte = start_byte
 
     while True:  # Retry as long as we're making progress.
-      had_file_bytes_before_attempt = GetFileSize(fp)
       try:
-        cur_file_size = GetFileSize(fp, position_to_eof=True)
-
-        def DownloadProxyCallback(total_bytes_downloaded, total_size):
-          """Translates a boto callback into a gsutil Cloud API callback.
-
-          Callbacks are originally made by boto.s3.Key.get_file(); here we take
-          into account that we're resuming a download.
-
-          Args:
-            total_bytes_downloaded: Actual bytes downloaded so far, not
-                                    including the point we resumed from.
-            total_size: Total size of the download.
-          """
-          if callback:
-            callback(cur_file_size + total_bytes_downloaded, total_size)
-
+        cb_handler = DownloadProxyCallbackHandler(start_byte, callback)
         headers = headers.copy()
-        headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
-        cb = DownloadProxyCallback
+        headers['Range'] = 'bytes=%d-%d' % (start_byte, end_byte)
 
         # Disable AWSAuthConnection-level retry behavior, since that would
         # cause downloads to restart from scratch.
         try:
-          key.get_file(fp, headers, cb, num_callbacks, override_num_retries=0,
-                       hash_algs=hash_algs)
+          key.get_file(fp, headers, cb_handler.call, num_callbacks,
+                       override_num_retries=0, hash_algs=hash_algs)
         except TypeError:
-          key.get_file(fp, headers, cb, num_callbacks, override_num_retries=0)
+          key.get_file(fp, headers, cb_handler.call, num_callbacks,
+                       override_num_retries=0)
         fp.flush()
         # Download succeeded.
         return
@@ -583,9 +598,10 @@
           # so we need to close and reopen the key before resuming
           # the download.
           if self.provider == 's3':
-            key.get_file(fp, headers, cb, num_callbacks, override_num_retries=0)
+            key.get_file(fp, headers, cb_handler.call, num_callbacks,
+                         override_num_retries=0)
           else:  # self.provider == 'gs'
-            key.get_file(fp, headers, cb, num_callbacks,
+            key.get_file(fp, headers, cb_handler.call, num_callbacks,
                          override_num_retries=0, hash_algs=hash_algs)
       except BotoResumableDownloadException, e:
         if (e.disposition ==
@@ -597,7 +613,9 @@
                              'retry', e.message)
 
       # At this point we had a re-tryable failure; see if made progress.
-      if GetFileSize(fp) > had_file_bytes_before_attempt:
+      start_byte = fp.tell()
+      if start_byte > last_progress_byte:
+        last_progress_byte = start_byte
         progress_less_iterations = 0
       else:
         progress_less_iterations += 1
@@ -819,8 +837,11 @@
       return self._HandleSuccessfulUpload(dst_uri, object_metadata,
                                           fields=fields)
     except TRANSLATABLE_BOTO_EXCEPTIONS, e:
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, object_metadata.bucket)
       self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
-                                       object_name=object_metadata.name)
+                                       object_name=object_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def UploadObjectStreaming(self, upload_stream, object_metadata,
                             canned_acl=None, progress_callback=None,
@@ -836,8 +857,11 @@
       return self._HandleSuccessfulUpload(dst_uri, object_metadata,
                                           fields=fields)
     except TRANSLATABLE_BOTO_EXCEPTIONS, e:
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, object_metadata.bucket)
       self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
-                                       object_name=object_metadata.name)
+                                       object_name=object_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def UploadObject(self, upload_stream, object_metadata, canned_acl=None,
                    preconditions=None, size=None, progress_callback=None,
@@ -860,8 +884,11 @@
       return self._HandleSuccessfulUpload(dst_uri, object_metadata,
                                           fields=fields)
     except TRANSLATABLE_BOTO_EXCEPTIONS, e:
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, object_metadata.bucket)
       self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
-                                       object_name=object_metadata.name)
+                                       object_name=object_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def DeleteObject(self, bucket_name, object_name, preconditions=None,
                    generation=None, provider=None):
@@ -920,8 +947,13 @@
 
       return self._BotoKeyToObject(new_key, fields=fields)
     except TRANSLATABLE_BOTO_EXCEPTIONS, e:
-      self._TranslateExceptionAndRaise(e, dst_obj_metadata.bucket,
-                                       dst_obj_metadata.name)
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, dst_obj_metadata.bucket, src_provider=self.provider,
+          src_bucket_name=src_obj_metadata.bucket,
+          src_object_name=src_obj_metadata.name, src_generation=src_generation)
+      self._TranslateExceptionAndRaise(e, bucket_name=dst_obj_metadata.bucket,
+                                       object_name=dst_obj_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def ComposeObject(self, src_objs_metadata, dst_obj_metadata,
                     preconditions=None, provider=None, fields=None):
@@ -1382,7 +1414,7 @@
         raise
 
   def _TranslateExceptionAndRaise(self, e, bucket_name=None, object_name=None,
-                                  generation=None):
+                                  generation=None, not_found_exception=None):
     """Translates a Boto exception and raises the translated or original value.
 
     Args:
@@ -1390,6 +1422,7 @@
       bucket_name: Optional bucket name in request that caused the exception.
       object_name: Optional object name in request that caused the exception.
       generation: Optional generation in request that caused the exception.
+      not_found_exception: Optional exception to raise in the not-found case.
 
     Raises:
       Translated CloudApi exception, or the original exception if it was not
@@ -1397,14 +1430,14 @@
     """
     translated_exception = self._TranslateBotoException(
         e, bucket_name=bucket_name, object_name=object_name,
-        generation=generation)
+        generation=generation, not_found_exception=not_found_exception)
     if translated_exception:
       raise translated_exception
     else:
       raise
 
   def _TranslateBotoException(self, e, bucket_name=None, object_name=None,
-                              generation=None):
+                              generation=None, not_found_exception=None):
     """Translates boto exceptions into their gsutil Cloud API equivalents.
 
     Args:
@@ -1412,6 +1445,7 @@
       bucket_name: Optional bucket name in request that caused the exception.
       object_name: Optional object name in request that caused the exception.
       generation: Optional generation in request that caused the exception.
+      not_found_exception: Optional exception to raise in the not-found case.
 
     Returns:
       CloudStorageApiServiceException for translatable exceptions, None
@@ -1425,14 +1459,20 @@
       elif e.status == 401 or e.status == 403:
         return AccessDeniedException(e.code, status=e.status, body=e.body)
       elif e.status == 404:
-        if bucket_name:
+        if not_found_exception:
+          # The exception is pre-constructed prior to translation; the HTTP
+          # status code isn't available at that time.
+          setattr(not_found_exception, 'status', e.status)
+          return not_found_exception
+        elif bucket_name:
           if object_name:
             return CreateObjectNotFoundException(e.status, self.provider,
                                                  bucket_name, object_name,
                                                  generation=generation)
           return CreateBucketNotFoundException(e.status, self.provider,
                                                bucket_name)
-        return NotFoundException(e.code, status=e.status, body=e.body)
+        return NotFoundException(e.message, status=e.status, body=e.body)
+
       elif e.status == 409 and e.code and 'BucketNotEmpty' in e.code:
         return NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
                                  status=e.status, body=e.body)
diff --git a/catapult/third_party/gsutil/gslib/cloud_api.py b/catapult/third_party/gsutil/gslib/cloud_api.py
index b7af6b6..79d592d 100644
--- a/catapult/third_party/gsutil/gslib/cloud_api.py
+++ b/catapult/third_party/gsutil/gslib/cloud_api.py
@@ -26,7 +26,8 @@
   a separate instance of the gsutil Cloud API should be instantiated per-thread.
   """
 
-  def __init__(self, bucket_storage_uri_class, logger, provider=None, debug=0):
+  def __init__(self, bucket_storage_uri_class, logger, provider=None,
+               debug=0, trace_token=None):
     """Performs necessary setup for interacting with the cloud storage provider.
 
     Args:
@@ -36,11 +37,14 @@
       provider: Default provider prefix describing cloud storage provider to
                 connect to.
       debug: Debug level for the API implementation (0..3).
+      trace_token: Google internal trace token to pass to the API
+                   implementation (string).
     """
     self.bucket_storage_uri_class = bucket_storage_uri_class
     self.logger = logger
     self.provider = provider
     self.debug = debug
+    self.trace_token = trace_token
 
   def GetBucket(self, bucket_name, provider=None, fields=None):
     """Gets Bucket metadata.
@@ -269,12 +273,13 @@
       start_byte: Starting point for download (for resumable downloads and
                   range requests). Can be set to negative to request a range
                   of bytes (python equivalent of [:-3])
-      end_byte: Ending point for download (for range requests).
+      end_byte: Ending byte number, inclusive, for download (for range
+                requests). If None, download the rest of the object.
       progress_callback: Optional callback function for progress notifications.
                          Receives calls with arguments
                          (bytes_transferred, total_size).
-      serialization_data: Implementation-specific dict containing serialization
-                          information for the download.
+      serialization_data: Implementation-specific JSON string of a dict
+                          containing serialization information for the download.
       digesters: Dict of {string : digester}, where string is a name of a hash
                  algorithm, and digester is a validation digester that supports
                  update(bytes) and digest() using that algorithm.
@@ -602,6 +607,15 @@
   """Exception raised when a resource is not found (404)."""
 
 
+class BucketNotFoundException(NotFoundException):
+  """Exception raised when a bucket resource is not found (404)."""
+
+  def __init__(self, reason, bucket_name, status=None, body=None):
+    super(BucketNotFoundException, self).__init__(reason, status=status,
+                                                  body=body)
+    self.bucket_name = bucket_name
+
+
 class NotEmptyException(ServiceException):
   """Exception raised when trying to delete a bucket is not empty."""
 
diff --git a/catapult/third_party/gsutil/gslib/cloud_api_delegator.py b/catapult/third_party/gsutil/gslib/cloud_api_delegator.py
index 05c8732..3fc3aa5 100644
--- a/catapult/third_party/gsutil/gslib/cloud_api_delegator.py
+++ b/catapult/third_party/gsutil/gslib/cloud_api_delegator.py
@@ -41,7 +41,7 @@
   """
 
   def __init__(self, bucket_storage_uri_class, gsutil_api_map, logger,
-               provider=None, debug=0):
+               provider=None, debug=0, trace_token=None):
     """Performs necessary setup for delegating cloud storage requests.
 
     This function has different arguments than the gsutil Cloud API __init__
@@ -56,9 +56,11 @@
       provider: Default provider prefix describing cloud storage provider to
                 connect to.
       debug: Debug level for the API implementation (0..3).
+      trace_token: Apiary trace token to pass to API.
     """
     super(CloudApiDelegator, self).__init__(bucket_storage_uri_class, logger,
-                                            provider=provider, debug=debug)
+                                            provider=provider, debug=debug,
+                                            trace_token=trace_token)
     self.api_map = gsutil_api_map
     self.prefer_api = boto.config.get('GSUtil', 'prefer_api', '').upper()
     self.loaded_apis = {}
@@ -116,7 +118,8 @@
             self.bucket_storage_uri_class,
             self.logger,
             provider=provider,
-            debug=self.debug))
+            debug=self.debug,
+            trace_token=self.trace_token))
 
   def GetApiSelector(self, provider=None):
     """Returns a cs_api_map.ApiSelector based on input and configuration.
diff --git a/catapult/third_party/gsutil/gslib/cloud_api_helper.py b/catapult/third_party/gsutil/gslib/cloud_api_helper.py
index c570bb1..f7fc930 100644
--- a/catapult/third_party/gsutil/gslib/cloud_api_helper.py
+++ b/catapult/third_party/gsutil/gslib/cloud_api_helper.py
@@ -16,6 +16,8 @@
 
 from __future__ import absolute_import
 
+import json
+
 from gslib.cloud_api import ArgumentException
 
 
@@ -39,13 +41,12 @@
         'Object metadata supplied for destination object had no bucket name.')
 
 
-def GetDownloadSerializationDict(src_obj_metadata):
-  """Returns a baseline serialization dict from the source object metadata.
+def GetDownloadSerializationData(src_obj_metadata, progress=0):
+  """Returns download serialization data.
 
   There are four entries:
     auto_transfer: JSON-specific field, always False.
-    progress: How much of the download has already been completed. Caller
-              should override this value if the download is being resumed.
+    progress: How much of the download has already been completed.
     total_size: Total object size.
     url: Implementation-specific field used for saving a metadata get call.
          For JSON, this the download URL of the object.
@@ -53,13 +54,17 @@
 
   Args:
     src_obj_metadata: Object to be downloaded.
+    progress: See above.
 
   Returns:
-    Serialization dict for use with Cloud API GetObjectMedia.
+    Serialization data for use with Cloud API GetObjectMedia.
   """
-  return {
+
+  serialization_dict = {
       'auto_transfer': 'False',
-      'progress': 0,
+      'progress': progress,
       'total_size': src_obj_metadata.size,
       'url': src_obj_metadata.mediaLink
   }
+
+  return json.dumps(serialization_dict)
diff --git a/catapult/third_party/gsutil/gslib/command.py b/catapult/third_party/gsutil/gslib/command.py
index 3823b91..1a1da96 100644
--- a/catapult/third_party/gsutil/gslib/command.py
+++ b/catapult/third_party/gsutil/gslib/command.py
@@ -50,20 +50,19 @@
 from gslib.help_provider import HelpProvider
 from gslib.name_expansion import NameExpansionIterator
 from gslib.name_expansion import NameExpansionResult
-from gslib.parallelism_framework_util import AtomicIncrementDict
-from gslib.parallelism_framework_util import BasicIncrementDict
-from gslib.parallelism_framework_util import ThreadAndProcessSafeDict
+from gslib.parallelism_framework_util import AtomicDict
 from gslib.plurality_checkable_iterator import PluralityCheckableIterator
 from gslib.sig_handling import RegisterSignalHandler
 from gslib.storage_url import StorageUrlFromString
 from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
 from gslib.translation_helper import AclTranslation
+from gslib.translation_helper import PRIVATE_DEFAULT_OBJ_ACL
+from gslib.util import CheckMultiprocessingAvailableAndInit
 from gslib.util import GetConfigFilePath
 from gslib.util import GsutilStreamHandler
 from gslib.util import HaveFileUrls
 from gslib.util import HaveProviderUrls
 from gslib.util import IS_WINDOWS
-from gslib.util import MultiprocessingIsAvailable
 from gslib.util import NO_MAX
 from gslib.util import UrlsAreForSingleProvider
 from gslib.util import UTF8
@@ -221,14 +220,14 @@
   caller_id_counter = multiprocessing.Value('i', 0)
 
   # Map from caller_id to total number of tasks to be completed for that ID.
-  total_tasks = ThreadAndProcessSafeDict(manager)
+  total_tasks = AtomicDict(manager=manager)
 
   # Map from caller_id to a boolean which is True iff all its tasks are
   # finished.
-  call_completed_map = ThreadAndProcessSafeDict(manager)
+  call_completed_map = AtomicDict(manager=manager)
 
   # Used to keep track of the set of return values for each caller ID.
-  global_return_values_map = AtomicIncrementDict(manager)
+  global_return_values_map = AtomicDict(manager=manager)
 
   # Condition used to notify any waiting threads that a task has finished or
   # that a call to Apply needs a new set of consumer processes.
@@ -239,7 +238,7 @@
   worker_checking_level_lock = manager.Lock()
 
   # Map from caller_id to the current number of completed tasks for that ID.
-  caller_id_finished_count = AtomicIncrementDict(manager)
+  caller_id_finished_count = AtomicDict(manager=manager)
 
   # Used as a way for the main thread to distinguish between being woken up
   # by another call finishing and being woken up by a call that needs a new set
@@ -249,8 +248,8 @@
   current_max_recursive_level = multiprocessing.Value('i', 0)
 
   # Map from (caller_id, name) to the value of that shared variable.
-  shared_vars_map = AtomicIncrementDict(manager)
-  shared_vars_list_map = ThreadAndProcessSafeDict(manager)
+  shared_vars_map = AtomicDict(manager=manager)
+  shared_vars_list_map = AtomicDict(manager=manager)
 
   # Map from caller_id to calling class.
   class_map = manager.dict()
@@ -259,6 +258,33 @@
   failure_count = multiprocessing.Value('i', 0)
 
 
+def InitializeThreadingVariables():
+  """Initializes module-level variables used when running multi-threaded.
+
+  When multiprocessing is not available (or on Windows where only 1 process
+  is used), thread-safe analogs to the multiprocessing global variables
+  must be initialized. This function is the thread-safe analog to
+  InitializeMultiprocessingVariables.
+  """
+  # pylint: disable=global-variable-undefined
+  global global_return_values_map, shared_vars_map, failure_count
+  global caller_id_finished_count, shared_vars_list_map, total_tasks
+  global need_pool_or_done_cond, call_completed_map, class_map
+  global task_queues, caller_id_lock, caller_id_counter
+  caller_id_counter = 0
+  caller_id_finished_count = AtomicDict()
+  caller_id_lock = threading.Lock()
+  call_completed_map = AtomicDict()
+  class_map = AtomicDict()
+  failure_count = 0
+  global_return_values_map = AtomicDict()
+  need_pool_or_done_cond = threading.Condition()
+  shared_vars_list_map = AtomicDict()
+  shared_vars_map = AtomicDict()
+  task_queues = []
+  total_tasks = AtomicDict()
+
+
 # Each subclass of Command must define a property named 'command_spec' that is
 # an instance of the following class.
 CommandSpec = namedtuple('CommandSpec', [
@@ -359,9 +385,9 @@
            'command_name': self.command_name})))
     return args
 
-  def __init__(self, command_runner, args, headers, debug, parallel_operations,
-               bucket_storage_uri_class, gsutil_api_class_map_factory,
-               test_method=None, logging_filters=None,
+  def __init__(self, command_runner, args, headers, debug, trace_token,
+               parallel_operations, bucket_storage_uri_class,
+               gsutil_api_class_map_factory, logging_filters=None,
                command_alias_used=None):
     """Instantiates a Command.
 
@@ -370,15 +396,13 @@
       args: Command-line args (arg0 = actual arg, not command name ala bash).
       headers: Dictionary containing optional HTTP headers to pass to boto.
       debug: Debug level to pass in to boto connection (range 0..3).
+      trace_token: Trace token to pass to the API implementation.
       parallel_operations: Should command operations be executed in parallel?
       bucket_storage_uri_class: Class to instantiate for cloud StorageUris.
                                 Settable for testing/mocking.
       gsutil_api_class_map_factory: Creates map of cloud storage interfaces.
                                     Settable for testing/mocking.
-      test_method: Optional general purpose method for testing purposes.
-                   Application and semantics of this method will vary by
-                   command and test type.
-      logging_filters: Optional list of logging.Filters to apply to this
+      logging_filters: Optional list of logging. Filters to apply to this
                        command's logger.
       command_alias_used: The alias that was actually used when running this
                           command (as opposed to the "official" command name,
@@ -395,10 +419,10 @@
     self.unparsed_args = args
     self.headers = headers
     self.debug = debug
+    self.trace_token = trace_token
     self.parallel_operations = parallel_operations
     self.bucket_storage_uri_class = bucket_storage_uri_class
     self.gsutil_api_class_map_factory = gsutil_api_class_map_factory
-    self.test_method = test_method
     self.exclude_symlinks = False
     self.recursion_requested = False
     self.all_versions = False
@@ -445,7 +469,7 @@
     self.project_id = None
     self.gsutil_api = CloudApiDelegator(
         bucket_storage_uri_class, self.gsutil_api_map,
-        self.logger, debug=self.debug)
+        self.logger, debug=self.debug, trace_token=self.trace_token)
 
     # Cross-platform path to run gsutil binary.
     self.gsutil_cmd = ''
@@ -464,7 +488,8 @@
           self.recursion_requested = True
           break
 
-    self.multiprocessing_is_available = MultiprocessingIsAvailable()[0]
+    self.multiprocessing_is_available = (
+        CheckMultiprocessingAvailableAndInit().is_available)
 
   def RaiseWrongNumberOfArgumentsException(self):
     """Raises exception for wrong number of arguments supplied to command."""
@@ -689,6 +714,10 @@
           else:
             def_obj_acl = AclTranslation.JsonToMessage(
                 self.acl_arg, apitools_messages.ObjectAccessControl)
+            if not def_obj_acl:
+              # Use a sentinel value to indicate a private (no entries) default
+              # object ACL.
+              def_obj_acl.append(PRIVATE_DEFAULT_OBJ_ACL)
             bucket_metadata = apitools_messages.Bucket(
                 defaultObjectAcl=def_obj_acl)
             gsutil_api.PatchBucket(url.bucket_name, bucket_metadata,
@@ -749,8 +778,6 @@
       self.canned = False
     else:
       # No file exists, so expect a canned ACL string.
-      # Canned ACLs are not supported in JSON and we need to use the XML API
-      # to set them.
       # validate=False because we allow wildcard urls.
       storage_uri = boto.storage_uri(
           url_args[0], debug=self.debug, validate=False,
@@ -866,8 +893,7 @@
           'command' % (url_str, self.command_name))
     return list(plurality_iter)[0]
 
-  def _HandleMultiProcessingSigs(self, unused_signal_num,
-                                 unused_cur_stack_frame):
+  def _HandleMultiProcessingSigs(self, signal_num, unused_cur_stack_frame):
     """Handles signals INT AND TERM during a multi-process/multi-thread request.
 
     Kills subprocesses.
@@ -880,7 +906,8 @@
     # https://github.com/GoogleCloudPlatform/gsutil/issues/99 for details
     # about why making it work correctly across OS's is harder and still open.
     ShutDownGsutil()
-    sys.stderr.write('Caught ^C - exiting\n')
+    if signal_num == signal.SIGINT:
+      sys.stderr.write('Caught ^C - exiting\n')
     # Simply calling sys.exit(1) doesn't work - see above bug for details.
     KillProcess(os.getpid())
 
@@ -1002,10 +1029,21 @@
 
   def _SetUpPerCallerState(self):
     """Set up the state for a caller id, corresponding to one Apply call."""
+    # pylint: disable=global-variable-undefined,global-variable-not-assigned
+    # These variables are initialized in InitializeMultiprocessingVariables or
+    # InitializeThreadingVariables
+    global global_return_values_map, shared_vars_map, failure_count
+    global caller_id_finished_count, shared_vars_list_map, total_tasks
+    global need_pool_or_done_cond, call_completed_map, class_map
+    global task_queues, caller_id_lock, caller_id_counter
     # Get a new caller ID.
     with caller_id_lock:
-      caller_id_counter.value += 1
-      caller_id = caller_id_counter.value
+      if isinstance(caller_id_counter, int):
+        caller_id_counter += 1
+        caller_id = caller_id_counter
+      else:
+        caller_id_counter.value += 1
+        caller_id = caller_id_counter.value
 
     # Create a copy of self with an incremented recursive level. This allows
     # the class to report its level correctly if the function called from it
@@ -1025,8 +1063,8 @@
     class_map[caller_id] = cls
     total_tasks[caller_id] = -1  # -1 => the producer hasn't finished yet.
     call_completed_map[caller_id] = False
-    caller_id_finished_count.Put(caller_id, 0)
-    global_return_values_map.Put(caller_id, [])
+    caller_id_finished_count[caller_id] = 0
+    global_return_values_map[caller_id] = []
     return caller_id
 
   def _CreateNewConsumerPool(self, num_processes, num_threads):
@@ -1103,32 +1141,16 @@
     # fact that it's  wasteful to try this multiple times in general, it also
     # will never work when called from a subprocess since we use daemon
     # processes, and daemons can't create other processes.
-    if is_main_thread:
-      if ((not self.multiprocessing_is_available)
-          and thread_count * process_count > 1):
-        # Run the check again and log the appropriate warnings. This was run
-        # before, when the Command object was created, in order to calculate
-        # self.multiprocessing_is_available, but we don't want to print the
-        # warning until we're sure the user actually tried to use multiple
-        # threads or processes.
-        MultiprocessingIsAvailable(logger=self.logger)
+    if (is_main_thread and not self.multiprocessing_is_available and
+        process_count > 1):
+      # Run the check again and log the appropriate warnings. This was run
+      # before, when the Command object was created, in order to calculate
+      # self.multiprocessing_is_available, but we don't want to print the
+      # warning until we're sure the user actually tried to use multiple
+      # threads or processes.
+      CheckMultiprocessingAvailableAndInit(logger=self.logger)
 
-    if self.multiprocessing_is_available:
-      caller_id = self._SetUpPerCallerState()
-    else:
-      self.sequential_caller_id += 1
-      caller_id = self.sequential_caller_id
-
-      if is_main_thread:
-        # pylint: disable=global-variable-undefined
-        global global_return_values_map, shared_vars_map, failure_count
-        global caller_id_finished_count, shared_vars_list_map
-        global_return_values_map = BasicIncrementDict()
-        global_return_values_map.Put(caller_id, [])
-        shared_vars_map = BasicIncrementDict()
-        caller_id_finished_count = BasicIncrementDict()
-        shared_vars_list_map = {}
-        failure_count = 0
+    caller_id = self._SetUpPerCallerState()
 
     # If any shared attributes passed by caller, create a dictionary of
     # shared memory variables for every element in the list of shared
@@ -1136,12 +1158,14 @@
     if shared_attrs:
       shared_vars_list_map[caller_id] = shared_attrs
       for name in shared_attrs:
-        shared_vars_map.Put((caller_id, name), 0)
+        shared_vars_map[(caller_id, name)] = 0
 
     # Make all of the requested function calls.
-    if self.multiprocessing_is_available and thread_count * process_count > 1:
+    usable_processes_count = (process_count if self.multiprocessing_is_available
+                              else 1)
+    if thread_count * usable_processes_count > 1:
       self._ParallelApply(func, args_iterator, exception_handler, caller_id,
-                          arg_checker, process_count, thread_count,
+                          arg_checker, usable_processes_count, thread_count,
                           should_return_results, fail_on_error)
     else:
       self._SequentialApply(func, args_iterator, exception_handler, caller_id,
@@ -1153,11 +1177,11 @@
         # and simply apply the delta after what was done during the call to
         # apply.
         final_value = (original_shared_vars_values[name] +
-                       shared_vars_map.Get((caller_id, name)))
+                       shared_vars_map.get((caller_id, name)))
         setattr(self, name, final_value)
 
     if should_return_results:
-      return global_return_values_map.Get(caller_id)
+      return global_return_values_map.get(caller_id)
 
   def _MaybeSuggestGsutilDashM(self):
     """Outputs a sugestion to the user to use gsutil -m."""
@@ -1222,6 +1246,10 @@
       # start and it scrolled off-screen.
       self._MaybeSuggestGsutilDashM()
 
+    # If the final iterated argument results in an exception, and that
+    # exception modifies shared_attrs, we need to publish the results.
+    worker_thread.shared_vars_updater.Update(caller_id, self)
+
   # pylint: disable=g-doc-args
   def _ParallelApply(self, func, args_iterator, exception_handler, caller_id,
                      arg_checker, process_count, thread_count,
@@ -1283,7 +1311,10 @@
       # of task queues if it makes a call to Apply, so we always keep around
       # one more queue than we know we need. OTOH, if we don't create a new
       # process, the existing process still needs a task queue to use.
-      task_queues.append(_NewMultiprocessingQueue())
+      if process_count > 1:
+        task_queues.append(_NewMultiprocessingQueue())
+      else:
+        task_queues.append(_NewThreadsafeQueue())
 
     if process_count > 1:  # Handle process pool creation.
       # Check whether this call will need a new set of workers.
@@ -1315,8 +1346,10 @@
     # be consumer pools trying to use our processes.
     if process_count > 1:
       task_queue = task_queues[self.recursive_apply_level]
-    else:
+    elif self.multiprocessing_is_available:
       task_queue = _NewMultiprocessingQueue()
+    else:
+      task_queue = _NewThreadsafeQueue()
 
     # Kick off a producer thread to throw tasks in the global task queue. We
     # do this asynchronously so that the main thread can be free to create new
@@ -1394,16 +1427,25 @@
 
     task_queue = task_queue or task_queues[recursive_apply_level]
 
+    # Ensure fairness across processes by filling our WorkerPool only with
+    # as many tasks as it has WorkerThreads. This semaphore is acquired each
+    # time that a task is retrieved from the queue and released each time
+    # a task is completed by a WorkerThread.
+    worker_semaphore = threading.BoundedSemaphore(thread_count)
+
     assert thread_count * process_count > 1, (
         'Invalid state, calling command._ApplyThreads with only one thread '
         'and process.')
+    # TODO: Presently, this pool gets recreated with each call to Apply. We
+    # should be able to do it just once, at process creation time.
     worker_pool = WorkerPool(
-        thread_count, self.logger,
+        thread_count, self.logger, worker_semaphore,
         bucket_storage_uri_class=self.bucket_storage_uri_class,
         gsutil_api_map=self.gsutil_api_map, debug=self.debug)
 
     num_enqueued = 0
     while True:
+      worker_semaphore.acquire()
       task = task_queue.get()
       if task.args != ZERO_TASKS_TO_DO_ARGUMENT:
         # If we have no tasks to do and we're performing a blocking call, we
@@ -1411,6 +1453,9 @@
         # the call to task_queue.get() forever.
         worker_pool.AddTask(task)
         num_enqueued += 1
+      else:
+        # No tasks remain; don't block the semaphore on WorkerThread completion.
+        worker_semaphore.release()
 
       if is_blocking_call:
         num_to_do = total_tasks[task.caller_id]
@@ -1588,19 +1633,19 @@
       # It's possible that the workers finished before we updated total_tasks,
       # so we need to check here as well.
       _NotifyIfDone(self.caller_id,
-                    caller_id_finished_count.Get(self.caller_id))
+                    caller_id_finished_count.get(self.caller_id))
 
 
 class WorkerPool(object):
   """Pool of worker threads to which tasks can be added."""
 
-  def __init__(self, thread_count, logger, bucket_storage_uri_class=None,
-               gsutil_api_map=None, debug=0):
+  def __init__(self, thread_count, logger, worker_semaphore,
+               bucket_storage_uri_class=None, gsutil_api_map=None, debug=0):
     self.task_queue = _NewThreadsafeQueue()
     self.threads = []
     for _ in range(thread_count):
       worker_thread = WorkerThread(
-          self.task_queue, logger,
+          self.task_queue, logger, worker_semaphore=worker_semaphore,
           bucket_storage_uri_class=bucket_storage_uri_class,
           gsutil_api_map=gsutil_api_map, debug=debug)
       self.threads.append(worker_thread)
@@ -1620,14 +1665,16 @@
   calling logic is also used in the single-threaded case.
   """
 
-  def __init__(self, task_queue, logger, bucket_storage_uri_class=None,
-               gsutil_api_map=None, debug=0):
+  def __init__(self, task_queue, logger, worker_semaphore=None,
+               bucket_storage_uri_class=None, gsutil_api_map=None, debug=0):
     """Initializes the worker thread.
 
     Args:
       task_queue: The thread-safe queue from which this thread should obtain
                   its work.
       logger: Logger to use for this thread.
+      worker_semaphore: threading.BoundedSemaphore to be released each time a
+          task is completed, or None for single-threaded execution.
       bucket_storage_uri_class: Class to instantiate for cloud StorageUris.
                                 Settable for testing/mocking.
       gsutil_api_map: Map of providers and API selector tuples to api classes
@@ -1637,6 +1684,7 @@
     """
     super(WorkerThread, self).__init__()
     self.task_queue = task_queue
+    self.worker_semaphore = worker_semaphore
     self.daemon = True
     self.cached_classes = {}
     self.shared_vars_updater = _SharedVariablesUpdater()
@@ -1658,7 +1706,8 @@
     try:
       results = task.func(cls, task.args, thread_state=self.thread_gsutil_api)
       if task.should_return_results:
-        global_return_values_map.Update(caller_id, [results], default_value=[])
+        global_return_values_map.Increment(caller_id, [results],
+                                           default_value=[])
     except Exception, e:  # pylint: disable=broad-except
       _IncrementFailureCount()
       if task.fail_on_error:
@@ -1673,15 +1722,15 @@
               'Caught exception while handling exception for %s:\n%s',
               task, traceback.format_exc())
     finally:
+      if self.worker_semaphore:
+        self.worker_semaphore.release()
       self.shared_vars_updater.Update(caller_id, cls)
 
       # Even if we encounter an exception, we still need to claim that that
       # the function finished executing. Otherwise, we won't know when to
       # stop waiting and return results.
-      num_done = caller_id_finished_count.Update(caller_id, 1)
-
-      if cls.multiprocessing_is_available:
-        _NotifyIfDone(caller_id, num_done)
+      num_done = caller_id_finished_count.Increment(caller_id, 1)
+      _NotifyIfDone(caller_id, num_done)
 
   def run(self):
     while True:
@@ -1741,7 +1790,7 @@
 
         # Update the globally-consistent value by simply increasing it by the
         # computed delta.
-        shared_vars_map.Update(key, delta)
+        shared_vars_map.Increment(key, delta)
 
 
 def _NotifyIfDone(caller_id, num_done):
diff --git a/catapult/third_party/gsutil/gslib/command_runner.py b/catapult/third_party/gsutil/gslib/command_runner.py
index e3f4bda..5ca1628 100644
--- a/catapult/third_party/gsutil/gslib/command_runner.py
+++ b/catapult/third_party/gsutil/gslib/command_runner.py
@@ -41,13 +41,13 @@
 from gslib.gcs_json_api import GcsJsonApi
 from gslib.no_op_credentials import NoOpCredentials
 from gslib.tab_complete import MakeCompleter
+from gslib.util import CheckMultiprocessingAvailableAndInit
 from gslib.util import CompareVersions
 from gslib.util import GetGsutilVersionModifiedTime
 from gslib.util import GSUTIL_PUB_TARBALL
 from gslib.util import IsRunningInteractively
 from gslib.util import LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE
 from gslib.util import LookUpGsutilVersion
-from gslib.util import MultiprocessingIsAvailable
 from gslib.util import RELEASE_NOTES_URL
 from gslib.util import SECONDS_PER_DAY
 from gslib.util import UTF8
@@ -199,7 +199,7 @@
             command_parser, command.command_spec.argparse_arguments, gsutil_api)
 
   def RunNamedCommand(self, command_name, args=None, headers=None, debug=0,
-                      parallel_operations=False, test_method=None,
+                      trace_token=None, parallel_operations=False,
                       skip_update_check=False, logging_filters=None,
                       do_shutdown=True):
     """Runs the named command.
@@ -211,10 +211,8 @@
       args: Command-line args (arg0 = actual arg, not command name ala bash).
       headers: Dictionary containing optional HTTP headers to pass to boto.
       debug: Debug level to pass in to boto connection (range 0..3).
+      trace_token: Trace token to pass to the underlying API.
       parallel_operations: Should command operations be executed in parallel?
-      test_method: Optional general purpose method for testing purposes.
-                   Application and semantics of this method will vary by
-                   command and test type.
       skip_update_check: Set to True to disable checking for gsutil updates.
       logging_filters: Optional list of logging.Filters to apply to this
                        command's logger.
@@ -226,9 +224,11 @@
     Returns:
       Return value(s) from Command that was run.
     """
+    command_changed_to_update = False
     if (not skip_update_check and
         self.MaybeCheckForAndOfferSoftwareUpdate(command_name, debug)):
       command_name = 'update'
+      command_changed_to_update = True
       args = ['-n']
 
     if not args:
@@ -271,15 +271,22 @@
 
     command_class = self.command_map[command_name]
     command_inst = command_class(
-        self, args, headers, debug, parallel_operations,
+        self, args, headers, debug, trace_token, parallel_operations,
         self.bucket_storage_uri_class, self.gsutil_api_class_map_factory,
-        test_method, logging_filters, command_alias_used=command_name)
+        logging_filters, command_alias_used=command_name)
     return_code = command_inst.RunCommand()
 
-    if MultiprocessingIsAvailable()[0] and do_shutdown:
+    if CheckMultiprocessingAvailableAndInit().is_available and do_shutdown:
       ShutDownGsutil()
     if GetFailureCount() > 0:
       return_code = 1
+    if command_changed_to_update:
+      # If the command changed to update, the user's original command was
+      # not executed.
+      return_code = 1
+      print '\n'.join(textwrap.wrap(
+          'Update was successful. Exiting with code 1 as the original command '
+          'issued prior to the update was not executed and should be re-run.'))
     return return_code
 
   def MaybeCheckForAndOfferSoftwareUpdate(self, command_name, debug):
diff --git a/catapult/third_party/gsutil/gslib/commands/acl.py b/catapult/third_party/gsutil/gslib/commands/acl.py
index a63dcfe..096d946 100644
--- a/catapult/third_party/gsutil/gslib/commands/acl.py
+++ b/catapult/third_party/gsutil/gslib/commands/acl.py
@@ -96,8 +96,9 @@
     gsutil -m acl set acl.txt gs://bucket/*.jpg
 
   Note that multi-threading/multi-processing is only done when the named URLs
-  refer to objects. gsutil -m acl set gs://bucket1 gs://bucket2 will run the
-  acl set operations sequentially.
+  refer to objects, which happens either if you name specific objects or 
+  if you enumerate objects by using an object wildcard or specifying
+  the acl -r flag.
 
 
 <B>SET OPTIONS</B>
diff --git a/catapult/third_party/gsutil/gslib/commands/config.py b/catapult/third_party/gsutil/gslib/commands/config.py
index 386af15..17cedbf 100644
--- a/catapult/third_party/gsutil/gslib/commands/config.py
+++ b/catapult/third_party/gsutil/gslib/commands/config.py
@@ -194,6 +194,9 @@
       json_api_version
       parallel_composite_upload_component_size
       parallel_composite_upload_threshold
+      sliced_object_download_component_size
+      sliced_object_download_max_components
+      sliced_object_download_threshold
       parallel_process_count
       parallel_thread_count
       prefer_api
@@ -298,6 +301,9 @@
 # revert DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD value to '150M'.
 DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD = '0'
 DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE = '50M'
+DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD = '150M'
+DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE = '200M'
+DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS = 4
 
 CONFIG_BOTO_SECTION_CONTENT = """
 [Boto]
@@ -433,9 +439,23 @@
 # Linux distributions to get crcmod included with the stock distribution. Once
 # that is done we will re-enable parallel composite uploads by default in
 # gsutil.
+#
+# Note: Parallel composite uploads should not be used with NEARLINE storage
+# class buckets, as doing this would incur an early deletion charge for each
+# component object.
 #parallel_composite_upload_threshold = %(parallel_composite_upload_threshold)s
 #parallel_composite_upload_component_size = %(parallel_composite_upload_component_size)s
 
+# 'sliced_object_download_threshold' and
+# 'sliced_object_download_component_size' have analogous functionality to
+# their respective parallel_composite_upload config values.
+# 'sliced_object_download_max_components' specifies the maximum number of 
+# slices to be used when performing a sliced object download. It is not
+# restricted by MAX_COMPONENT_COUNT.
+#sliced_object_download_threshold = %(sliced_object_download_threshold)s
+#sliced_object_download_component_size = %(sliced_object_download_component_size)s
+#sliced_object_download_max_components = %(sliced_object_download_max_components)s
+
 # 'use_magicfile' specifies if the 'file --mime-type <filename>' command should
 # be used to guess content types instead of the default filename extension-based
 # mechanism. Available on UNIX and MacOS (and possibly on Windows, if you're
@@ -493,6 +513,12 @@
            DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD),
        'parallel_composite_upload_component_size': (
            DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE),
+       'sliced_object_download_threshold': (
+           DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD),
+       'sliced_object_download_component_size': (
+           DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE),
+       'sliced_object_download_max_components': (
+           DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS),
        'max_component_count': MAX_COMPONENT_COUNT}
 
 CONFIG_OAUTH2_CONFIG_CONTENT = """
diff --git a/catapult/third_party/gsutil/gslib/commands/cp.py b/catapult/third_party/gsutil/gslib/commands/cp.py
index 34636dc..5bed77a 100644
--- a/catapult/third_party/gsutil/gslib/commands/cp.py
+++ b/catapult/third_party/gsutil/gslib/commands/cp.py
@@ -23,8 +23,6 @@
 
 from gslib import copy_helper
 from gslib.cat_helper import CatHelper
-from gslib.cloud_api import AccessDeniedException
-from gslib.cloud_api import NotFoundException
 from gslib.command import Command
 from gslib.command_argument import CommandArgument
 from gslib.commands.compose import MAX_COMPONENT_COUNT
@@ -209,12 +207,19 @@
   option (see OPTIONS below).
 
   One additional note about copying in the cloud: If the destination bucket has
-  versioning enabled, gsutil cp will copy all versions of the source object(s).
-  For example:
+  versioning enabled, gsutil cp will by default copy only live versions of the
+  source object(s). For example:
 
     gsutil cp gs://bucket1/obj gs://bucket2
 
-  will cause all versions of gs://bucket1/obj to be copied to gs://bucket2.
+  will cause only the single live version of of gs://bucket1/obj to be copied
+  to gs://bucket2, even if there are archived versions of gs://bucket1/obj. To
+  also copy archived versions, use the -A flag:
+
+    gsutil cp -A gs://bucket1/obj gs://bucket2
+
+  The gsutil -m flag is disallowed when using the cp -A flag, to ensure that
+  version ordering is preserved.
 """
 
 _CHECKSUM_VALIDATION_TEXT = """
@@ -300,18 +305,11 @@
 
   Similarly, gsutil automatically performs resumable downloads (using HTTP
   standard Range GET operations) whenever you use the cp command, unless the
-  destination is a stream or null. In this case the partially downloaded file
-  will be visible as soon as it starts being written. Thus, before you attempt
-  to use any files downloaded by gsutil you should make sure the download
-  completed successfully, by checking the exit status from the gsutil command.
-  This can be done in a bash script, for example, by doing:
+  destination is a stream or null. In this case, a partially downloaded
+  temporary file will be visible in the destination directory. Upon completion,
+  the original file is deleted and overwritten with the downloaded contents.
 
-     gsutil cp gs://your-bucket/your-object ./local-file
-     if [ "$status" -ne "0" ] ; then
-       << Code that handles failures >>
-     fi
-
-  Resumable uploads and downloads store some state information in a file
+  Resumable uploads and downloads store some state information in a files
   in ~/.gsutil named by the destination object or file. If you attempt to
   resume a transfer from a machine with a different directory, the transfer
   will start over from scratch.
@@ -342,6 +340,31 @@
   transfers (which perform integrity checking automatically).
 """
 
+_SLICED_OBJECT_DOWNLOADS_TEXT = """
+<B>SLICED OBJECT DOWNLOADS</B>
+  gsutil automatically uses HTTP Range GET requests to perform "sliced"
+  downloads in parallel for downloads of large objects. This means that, if
+  enabled, disk space for the temporary download destination file will be
+  pre-allocated and byte ranges (slices) within the file will be downloaded in
+  parallel. Once all slices have completed downloading, the temporary file will
+  be renamed to the destination file. No additional local disk space is
+  required for this operation.
+
+  This feature is only available for Google Cloud Storage objects because it
+  requires a fast composable checksum that can be used to verify the data
+  integrity of the slices. Thus, using sliced object downloads also requires a
+  compiled crcmod (see "gsutil help crcmod") on the machine performing the
+  download. If compiled crcmod is not available, normal download will instead
+  be used.
+
+  Note: since sliced object downloads cause multiple writes to occur at various
+  locations on disk, this can degrade performance for disks with slow seek
+  times, especially for large numbers of slices. While the default number of
+  slices is small to avoid this, sliced object download can be completely
+  disabled by setting the "sliced_object_download_threshold" variable in the
+  .boto config file to 0.
+"""
+
 _PARALLEL_COMPOSITE_UPLOADS_TEXT = """
 <B>PARALLEL COMPOSITE UPLOADS</B>
   gsutil can automatically use
@@ -364,6 +387,10 @@
   distributions to get crcmod included with the stock distribution. Once that is
   done we will re-enable parallel composite uploads by default in gsutil.
 
+  Parallel composite uploads should not be used with NEARLINE storage
+  class buckets, as doing this would incur an early deletion charge for each
+  component object.
+
   To try parallel composite uploads you can run the command:
 
     gsutil -o GSUtil:parallel_composite_upload_threshold=150M cp bigfile gs://your-bucket
@@ -464,8 +491,13 @@
 
 _OPTIONS_TEXT = """
 <B>OPTIONS</B>
-  -a canned_acl   Sets named canned_acl when uploaded objects created. See
-                  'gsutil help acls' for further details.
+  -a canned_acl  Sets named canned_acl when uploaded objects created. See
+                 'gsutil help acls' for further details.
+
+  -A             Copy all source versions from a source buckets/folders.
+                 If not set, only the live version of each source object is
+                 copied. Note: this option is only useful when the destination
+                 bucket has versioning enabled.
 
   -c             If an error occurs, continue to attempt to copy the remaining
                  files. If any copies were unsuccessful, gsutil's exit status
@@ -573,7 +605,8 @@
                  directory level, and skip any subdirectories.
 
   -U             Skip objects with unsupported object types instead of failing.
-                 Unsupported object types are s3 glacier objects.
+                 Unsupported object types are Amazon S3 Objects in the GLACIER
+                 storage class.
 
   -v             Requests that the version-specific URL for each uploaded object
                  be printed. Given this URL you can make future upload requests
@@ -626,12 +659,13 @@
                                    _RETRY_HANDLING_TEXT,
                                    _RESUMABLE_TRANSFERS_TEXT,
                                    _STREAMING_TRANSFERS_TEXT,
+                                   _SLICED_OBJECT_DOWNLOADS_TEXT,
                                    _PARALLEL_COMPOSITE_UPLOADS_TEXT,
                                    _CHANGING_TEMP_DIRECTORIES_TEXT,
                                    _OPTIONS_TEXT])
 
 
-CP_SUB_ARGS = 'a:cDeIL:MNnprRtUvz:'
+CP_SUB_ARGS = 'a:AcDeIL:MNnprRtUvz:'
 
 
 def _CopyFuncWrapper(cls, args, thread_state=None):
@@ -732,10 +766,8 @@
     # (e.g., trying to download an object called "mydata/" where the local
     # directory "mydata" exists).
     if IsCloudSubdirPlaceholder(exp_src_url):
-      self.logger.info('Skipping cloud sub-directory placeholder object (%s) '
-                       'because such objects aren\'t needed in (and would '
-                       'interfere with) directories in the local file system',
-                       exp_src_url)
+      # We used to output the message 'Skipping cloud sub-directory placeholder
+      # object...' but we no longer do so because it caused customer confusion.
       return
 
     if copy_helper_opts.use_manifest and self.manifest.WasSuccessful(
@@ -789,7 +821,7 @@
               self.logger, exp_src_url, dst_url, gsutil_api,
               self, _CopyExceptionHandler, allow_splitting=True,
               headers=self.headers, manifest=self.manifest,
-              gzip_exts=self.gzip_exts, test_method=self.test_method))
+              gzip_exts=self.gzip_exts))
       if copy_helper_opts.use_manifest:
         if md5:
           self.manifest.Set(exp_src_url.url_string, 'md5', md5)
@@ -872,33 +904,11 @@
         copy_helper.ExpandUrlToSingleBlr(self.args[-1], self.gsutil_api,
                                          self.debug, self.project_id))
 
-    # If the destination bucket has versioning enabled iterate with
-    # all_versions=True. That way we'll copy all versions if the source bucket
-    # is versioned; and by leaving all_versions=False if the destination bucket
-    # has versioning disabled we will avoid copying old versions all to the same
-    # un-versioned destination object.
-    all_versions = False
-    try:
-      bucket = self._GetBucketWithVersioningConfig(self.exp_dst_url)
-      if bucket and bucket.versioning and bucket.versioning.enabled:
-        all_versions = True
-    except AccessDeniedException:
-      # This happens (in the XML API only) if the user doesn't have OWNER access
-      # on the bucket (needed to check if versioning is enabled). In this case
-      # fall back to copying all versions (which can be inefficient for the
-      # reason noted in the comment above). We don't try to warn the user
-      # because that would result in false positive warnings (since we can't
-      # check if versioning is enabled on the destination bucket).
-      #
-      # For JSON, we will silently not return versioning if we don't have
-      # access.
-      all_versions = True
-
     name_expansion_iterator = NameExpansionIterator(
         self.command_name, self.debug,
         self.logger, self.gsutil_api, url_strs,
         self.recursion_requested or copy_helper_opts.perform_mv,
-        project_id=self.project_id, all_versions=all_versions,
+        project_id=self.project_id, all_versions=self.all_versions,
         continue_on_error=self.continue_on_error or self.parallel_operations)
 
     # Use a lock to ensure accurate statistics in the face of
@@ -948,7 +958,7 @@
             self.total_bytes_transferred, self.total_elapsed_time,
             MakeHumanReadable(self.total_bytes_per_second))
     if self.op_failure_count:
-      plural_str = 's' if self.op_failure_count else ''
+      plural_str = 's' if self.op_failure_count > 1 else ''
       raise CommandException('%d file%s/object%s could not be transferred.' % (
           self.op_failure_count, plural_str, plural_str))
 
@@ -973,6 +983,8 @@
     # Command class, so save in Command state rather than CopyHelperOpts.
     self.canned = None
 
+    self.all_versions = False
+
     self.skip_unsupported_objects = False
 
     # Files matching these extensions should be gzipped before uploading.
@@ -988,6 +1000,8 @@
         if o == '-a':
           canned_acl = a
           self.canned = True
+        if o == '-A':
+          self.all_versions = True
         if o == '-c':
           self.continue_on_error = True
         elif o == '-D':
@@ -1024,6 +1038,11 @@
     if preserve_acl and canned_acl:
       raise CommandException(
           'Specifying both the -p and -a options together is invalid.')
+    if self.all_versions and self.parallel_operations:
+      raise CommandException(
+          'The gsutil -m option is not supported with the cp -A flag, to '
+          'ensure that object version ordering is preserved. Please re-run '
+          'the command without the -m option.')
     return CreateCopyHelperOpts(
         perform_mv=perform_mv,
         no_clobber=no_clobber,
@@ -1035,33 +1054,3 @@
         canned_acl=canned_acl,
         skip_unsupported_objects=self.skip_unsupported_objects,
         test_callback_file=test_callback_file)
-
-  def _GetBucketWithVersioningConfig(self, exp_dst_url):
-    """Gets versioning config for a bucket and ensures that it exists.
-
-    Args:
-      exp_dst_url: Wildcard-expanded destination StorageUrl.
-
-    Raises:
-      AccessDeniedException: if there was a permissions problem accessing the
-                             bucket or its versioning config.
-      CommandException: if URL refers to a cloud bucket that does not exist.
-
-    Returns:
-      apitools Bucket with versioning configuration.
-    """
-    bucket = None
-    if exp_dst_url.IsCloudUrl() and exp_dst_url.IsBucket():
-      try:
-        bucket = self.gsutil_api.GetBucket(
-            exp_dst_url.bucket_name, provider=exp_dst_url.scheme,
-            fields=['versioning'])
-      except AccessDeniedException, e:
-        raise
-      except NotFoundException, e:
-        raise CommandException('Destination bucket %s does not exist.' %
-                               exp_dst_url)
-      except Exception, e:
-        raise CommandException('Error retrieving destination bucket %s: %s' %
-                               (exp_dst_url, e.message))
-      return bucket
diff --git a/catapult/third_party/gsutil/gslib/commands/defacl.py b/catapult/third_party/gsutil/gslib/commands/defacl.py
index 24c6e25..87648a8 100644
--- a/catapult/third_party/gsutil/gslib/commands/defacl.py
+++ b/catapult/third_party/gsutil/gslib/commands/defacl.py
@@ -30,6 +30,7 @@
 from gslib.help_provider import CreateHelpText
 from gslib.storage_url import StorageUrlFromString
 from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
+from gslib.translation_helper import PRIVATE_DEFAULT_OBJ_ACL
 from gslib.util import NO_MAX
 from gslib.util import Retry
 from gslib.util import UrlsAreForSingleProvider
@@ -101,10 +102,15 @@
 
     gsutil defacl ch -g admins@example.com:O gs://example-bucket
 
-  Grant the owners of project example-project-123 READ access to new objects
-  created in the bucket example-bucket:
+  Remove the group admins@example.com from the default object ACL on bucket
+  example-bucket:
 
-    gsutil acl ch -p owners-example-project-123:R gs://example-bucket
+    gsutil defacl ch -d admins@example.com gs://example-bucket
+
+  Add the owners of project example-project-123 to the default object ACL on
+  bucket example-bucket with READ access:
+
+    gsutil defacl ch -p owners-example-project-123:R gs://example-bucket
 
   NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
   to a project's viewers/editors respectively.
@@ -268,6 +274,11 @@
       self.logger.info('No changes to %s', url)
       return
 
+    if not current_acl:
+      # Use a sentinel value to indicate a private (no entries) default
+      # object ACL.
+      current_acl.append(PRIVATE_DEFAULT_OBJ_ACL)
+
     try:
       preconditions = Preconditions(meta_gen_match=bucket.metageneration)
       bucket_metadata = apitools_messages.Bucket(defaultObjectAcl=current_acl)
diff --git a/catapult/third_party/gsutil/gslib/commands/mb.py b/catapult/third_party/gsutil/gslib/commands/mb.py
index 591cb14..2cdd349 100644
--- a/catapult/third_party/gsutil/gslib/commands/mb.py
+++ b/catapult/third_party/gsutil/gslib/commands/mb.py
@@ -98,15 +98,15 @@
 
   If you specify the Durable Reduced Availability storage class (-c DRA), you
   can specify one of the continental locations above or one of the regional
-  locations below:
+  locations below: [1]_
 
   - ASIA-EAST1 (Eastern Asia-Pacific)
-  - US-EAST1 (Eastern United States) [1]_
-  - US-EAST2 (Eastern United States) [1]_
-  - US-EAST3 (Eastern United States) [1]_
-  - US-CENTRAL1 (Central United States) [1]_
-  - US-CENTRAL2 (Central United States) [1]_
-  - US-WEST1 (Western United States) [1]_
+  - US-EAST1 (Eastern United States)
+  - US-EAST2 (Eastern United States)
+  - US-EAST3 (Eastern United States)
+  - US-CENTRAL1 (Central United States)
+  - US-CENTRAL2 (Central United States)
+  - US-WEST1 (Western United States)
 
   Example:
     gsutil mb -c DRA -l US-CENTRAL1 gs://some-bucket
diff --git a/catapult/third_party/gsutil/gslib/commands/perfdiag.py b/catapult/third_party/gsutil/gslib/commands/perfdiag.py
index d88eae7..f95545b 100644
--- a/catapult/third_party/gsutil/gslib/commands/perfdiag.py
+++ b/catapult/third_party/gsutil/gslib/commands/perfdiag.py
@@ -18,6 +18,7 @@
 
 import calendar
 from collections import defaultdict
+from collections import namedtuple
 import contextlib
 import cStringIO
 import datetime
@@ -41,17 +42,21 @@
 import gslib
 from gslib.cloud_api import NotFoundException
 from gslib.cloud_api import ServiceException
-from gslib.cloud_api_helper import GetDownloadSerializationDict
+from gslib.cloud_api_helper import GetDownloadSerializationData
 from gslib.command import Command
 from gslib.command import DummyArgChecker
 from gslib.command_argument import CommandArgument
 from gslib.commands import config
 from gslib.cs_api_map import ApiSelector
 from gslib.exception import CommandException
+from gslib.file_part import FilePart
 from gslib.hashing_helper import CalculateB64EncodedMd5FromContents
 from gslib.storage_url import StorageUrlFromString
 from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
+from gslib.util import CheckFreeSpace
+from gslib.util import DivideAndCeil
 from gslib.util import GetCloudApiInstance
+from gslib.util import GetFileSize
 from gslib.util import GetMaxRetryDelay
 from gslib.util import HumanReadableToBytes
 from gslib.util import IS_LINUX
@@ -60,11 +65,11 @@
 from gslib.util import Percentile
 from gslib.util import ResumableThreshold
 
-
 _SYNOPSIS = """
   gsutil perfdiag [-i in.json]
-  gsutil perfdiag [-o out.json] [-n iterations] [-c processes]
-      [-k threads] [-s size] [-t tests] url...
+  gsutil perfdiag [-o out.json] [-n objects] [-c processes]
+      [-k threads] [-p parallelism type] [-y slices] [-s size] [-d directory]
+      [-t tests] url...
 """
 
 _DETAILED_HELP_TEXT = ("""
@@ -99,9 +104,8 @@
 
 
 <B>OPTIONS</B>
-  -n          Sets the number of iterations performed when downloading and
-              uploading files during latency and throughput tests. Defaults to
-              5.
+  -n          Sets the number of objects to use when downloading and uploading
+              files during tests. Defaults to 5.
 
   -c          Sets the number of processes to use while running throughput
               experiments. The default value is 1.
@@ -110,20 +114,55 @@
               throughput experiments. Each process will receive an equal number
               of threads. The default value is 1.
 
-  -s          Sets the size (in bytes) of the test file used to perform read
-              and write throughput tests. The default is 1 MiB. This can also
-              be specified using byte suffixes such as 500K or 1M. Note: these
-              values are interpreted as multiples of 1024 (K=1024, M=1024*1024,
-              etc.)
+              Note: All specified threads and processes will be created, but may
+              not by saturated with work if too few objects (specified with -n)
+              and too few components (specified with -y) are specified.
+
+  -p          Sets the type of parallelism to be used (only applicable when
+              threads or processes are specified and threads * processes > 1).
+              The default is to use fan. Must be one of the following:
+
+              fan
+                 Use one thread per object. This is akin to using gsutil -m cp,
+                 with sliced object download / parallel composite upload
+                 disabled.
+
+              slice
+                 Use Y (specified with -y) threads for each object, transferring
+                 one object at a time. This is akin to using parallel object
+                 download / parallel composite upload, without -m. Sliced
+                 uploads not supported for s3.
+
+              both
+                 Use Y (specified with -y) threads for each object, transferring
+                 multiple objects at a time. This is akin to simultaneously
+                 using sliced object download / parallel composite upload and
+                 gsutil -m cp. Sliced uploads not supported for s3.
+
+  -y          Sets the number of slices to divide each file/object into while
+              transferring data. Only applicable with the slice (or both)
+              parallelism type. The default is 4 slices.
+
+  -s          Sets the size (in bytes) for each of the N (set with -n) objects
+              used in the read and write throughput tests. The default is 1 MiB.
+              This can also be specified using byte suffixes such as 500K or 1M.
+              Note: these values are interpreted as multiples of 1024 (K=1024,
+              M=1024*1024, etc.)
+              Note: If rthru_file or wthru_file are performed, N (set with -n)
+              times as much disk space as specified will be required for the
+              operation.
+
+  -d          Sets the directory to store temporary local files in. If not
+              specified, a default temporary directory will be used.
 
   -t          Sets the list of diagnostic tests to perform. The default is to
-              run all diagnostic tests. Must be a comma-separated list
-              containing one or more of the following:
+              run the lat, rthru, and wthru diagnostic tests. Must be a
+              comma-separated list containing one or more of the following:
 
               lat
-                 Runs N iterations (set with -n) of writing the file,
-                 retrieving its metadata, reading the file, and deleting
-                 the file. Records the latency of each operation.
+                 For N (set with -n) objects, write the object, retrieve its
+                 metadata, read the object, and finally delete the object.
+                 Record the latency of each operation.
 
               list
                  Write N (set with -n) objects to the bucket, record how long
@@ -136,15 +175,22 @@
                  Runs N (set with -n) read operations, with at most C
                  (set with -c) reads outstanding at any given time.
 
+              rthru_file
+                 The same as rthru, but simultaneously writes data to the disk,
+                 to gauge the performance impact of the local disk on downloads.
+
               wthru
                  Runs N (set with -n) write operations, with at most C
                  (set with -c) writes outstanding at any given time.
 
+              wthru_file
+                 The same as wthru, but simultaneously reads data from the disk,
+                 to gauge the performance impact of the local disk on uploads.
+
   -m          Adds metadata to the result JSON file. Multiple -m values can be
               specified. Example:
 
-                  gsutil perfdiag -m "key1:value1" -m "key2:value2" \
-                                  gs://bucketname/
+                  gsutil perfdiag -m "key1:val1" -m "key2:val2" gs://bucketname
 
               Each metadata key will be added to the top-level "metadata"
               dictionary in the output JSON file.
@@ -178,6 +224,41 @@
   this information will be sent to Google unless you choose to send it.
 """)
 
+FileDataTuple = namedtuple(
+    'FileDataTuple',
+    'size md5 data')
+
+# Describes one object in a fanned download. If need_to_slice is specified as
+# True, the object should be downloaded with the slice strategy. Other field
+# names are the same as documented in PerfDiagCommand.Download.
+FanDownloadTuple = namedtuple(
+    'FanDownloadTuple',
+    'need_to_slice object_name file_name serialization_data')
+
+# Describes one slice in a sliced download.
+# Field names are the same as documented in PerfDiagCommand.Download.
+SliceDownloadTuple = namedtuple(
+    'SliceDownloadTuple',
+    'object_name file_name serialization_data start_byte end_byte')
+
+# Describes one file in a fanned upload. If need_to_slice is specified as
+# True, the file should be uploaded with the slice strategy. Other field
+# names are the same as documented in PerfDiagCommand.Upload.
+FanUploadTuple = namedtuple(
+    'FanUploadTuple',
+    'need_to_slice file_name object_name use_file')
+
+# Describes one slice in a sliced upload.
+# Field names are the same as documented in PerfDiagCommand.Upload.
+SliceUploadTuple = namedtuple(
+    'SliceUploadTuple',
+    'file_name object_name use_file file_start file_size')
+
+# Dict storing file_path:FileDataTuple for each temporary file used by
+# perfdiag. This data should be kept outside of the PerfDiagCommand class
+# since calls to Apply will make copies of all member data.
+temp_file_dict = {}
+
 
 class Error(Exception):
   """Base exception class for this module."""
@@ -189,16 +270,73 @@
   pass
 
 
-def _DownloadWrapper(cls, arg, thread_state=None):
-  cls.Download(arg, thread_state=thread_state)
+def _DownloadObject(cls, args, thread_state=None):
+  """Function argument to apply for performing fanned parallel downloads.
+
+  Args:
+    cls: The calling PerfDiagCommand class instance.
+    args: A FanDownloadTuple object describing this download.
+    thread_state: gsutil Cloud API instance to use for the operation.
+  """
+  cls.gsutil_api = GetCloudApiInstance(cls, thread_state)
+  if args.need_to_slice:
+    cls.PerformSlicedDownload(args.object_name, args.file_name,
+                              args.serialization_data)
+  else:
+    cls.Download(args.object_name, args.file_name, args.serialization_data)
 
 
-def _UploadWrapper(cls, arg, thread_state=None):
-  cls.Upload(arg, thread_state=thread_state)
+def _DownloadSlice(cls, args, thread_state=None):
+  """Function argument to apply for performing sliced downloads.
+
+  Args:
+    cls: The calling PerfDiagCommand class instance.
+    args: A SliceDownloadTuple object describing this download.
+    thread_state: gsutil Cloud API instance to use for the operation.
+  """
+  cls.gsutil_api = GetCloudApiInstance(cls, thread_state)
+  cls.Download(args.object_name, args.file_name, args.serialization_data,
+               args.start_byte, args.end_byte)
 
 
-def _DeleteWrapper(cls, arg, thread_state=None):
-  cls.Delete(arg, thread_state=thread_state)
+def _UploadObject(cls, args, thread_state=None):
+  """Function argument to apply for performing fanned parallel uploads.
+
+  Args:
+    cls: The calling PerfDiagCommand class instance.
+    args: A FanUploadTuple object describing this upload.
+    thread_state: gsutil Cloud API instance to use for the operation.
+  """
+  cls.gsutil_api = GetCloudApiInstance(cls, thread_state)
+  if args.need_to_slice:
+    cls.PerformSlicedUpload(args.file_name, args.object_name, args.use_file)
+  else:
+    cls.Upload(args.file_name, args.object_name, args.use_file)
+
+
+def _UploadSlice(cls, args, thread_state=None):
+  """Function argument to apply for performing sliced parallel uploads.
+
+  Args:
+    cls: The calling PerfDiagCommand class instance.
+    args: A SliceUploadTuple object describing this upload.
+    thread_state: gsutil Cloud API instance to use for the operation.
+  """
+  cls.gsutil_api = GetCloudApiInstance(cls, thread_state)
+  cls.Upload(args.file_name, args.object_name, args.use_file,
+             args.file_start, args.file_size)
+
+
+def _DeleteWrapper(cls, object_name, thread_state=None):
+  """Function argument to apply for performing parallel object deletions.
+
+  Args:
+    cls: The calling PerfDiagCommand class instance.
+    object_name: The object name to delete from the test bucket.
+    thread_state: gsutil Cloud API instance to use for the operation.
+  """
+  cls.gsutil_api = GetCloudApiInstance(cls, thread_state)
+  cls.Delete(object_name)
 
 
 def _PerfdiagExceptionHandler(cls, e):
@@ -216,6 +354,9 @@
   def write(self, *args, **kwargs):  # pylint: disable=invalid-name
     pass
 
+  def close(self):  # pylint: disable=invalid-name
+    pass
+
 
 # Many functions in perfdiag re-define a temporary function based on a
 # variable from a loop, resulting in a false positive from the linter.
@@ -230,7 +371,7 @@
       usage_synopsis=_SYNOPSIS,
       min_args=0,
       max_args=1,
-      supported_sub_args='n:c:k:s:t:m:i:o:',
+      supported_sub_args='n:c:k:p:y:s:d:t:m:i:o:',
       file_url_ok=False,
       provider_url_ok=False,
       urls_start_arg=0,
@@ -253,7 +394,7 @@
   # Byte sizes to use for latency testing files.
   # TODO: Consider letting the user specify these sizes with a configuration
   # parameter.
-  test_file_sizes = (
+  test_lat_file_sizes = (
       0,  # 0 bytes
       1024,  # 1 KiB
       102400,  # 100 KiB
@@ -262,15 +403,26 @@
 
   # Test names.
   RTHRU = 'rthru'
+  RTHRU_FILE = 'rthru_file'
   WTHRU = 'wthru'
+  WTHRU_FILE = 'wthru_file'
   LAT = 'lat'
   LIST = 'list'
 
+  # Parallelism strategies.
+  FAN = 'fan'
+  SLICE = 'slice'
+  BOTH = 'both'
+
   # List of all diagnostic tests.
-  ALL_DIAG_TESTS = (RTHRU, WTHRU, LAT, LIST)
+  ALL_DIAG_TESTS = (RTHRU, RTHRU_FILE, WTHRU, WTHRU_FILE, LAT, LIST)
+
   # List of diagnostic tests to run by default.
   DEFAULT_DIAG_TESTS = (RTHRU, WTHRU, LAT)
 
+  # List of parallelism strategies.
+  PARALLEL_STRATEGIES = (FAN, SLICE, BOTH)
+
   # Google Cloud Storage XML API endpoint host.
   XML_API_HOST = boto.config.get(
       'Credentials', 'gs_host', boto.gs.connection.GSConnection.DefaultHost)
@@ -324,21 +476,58 @@
                              "subprocess '%s'." % (p.returncode, ' '.join(cmd)))
     return stdoutdata if return_output else p.returncode
 
+  def _WarnIfLargeData(self):
+    """Outputs a warning message if a large amount of data is being used."""
+    if self.num_objects * self.thru_filesize > HumanReadableToBytes('2GiB'):
+      self.logger.info('This is a large operation, and could take a while.')
+
+  def _MakeTempFile(self, file_size=0, mem_metadata=False,
+                    mem_data=False, prefix='gsutil_test_file'):
+    """Creates a temporary file of the given size and returns its path.
+
+    Args:
+      file_size: The size of the temporary file to create.
+      mem_metadata: If true, store md5 and file size in memory at
+                    temp_file_dict[fpath].md5, tempfile_data[fpath].file_size.
+      mem_data: If true, store the file data in memory at
+                temp_file_dict[fpath].data
+      prefix: The prefix to use for the temporary file. Defaults to
+              gsutil_test_file.
+
+    Returns:
+      The file path of the created temporary file.
+    """
+    fd, fpath = tempfile.mkstemp(suffix='.bin', prefix=prefix,
+                                 dir=self.directory, text=False)
+    with os.fdopen(fd, 'wb') as fp:
+      random_bytes = os.urandom(min(file_size,
+                                    self.MAX_UNIQUE_RANDOM_BYTES))
+      total_bytes_written = 0
+      while total_bytes_written < file_size:
+        num_bytes = min(self.MAX_UNIQUE_RANDOM_BYTES,
+                        file_size - total_bytes_written)
+        fp.write(random_bytes[:num_bytes])
+        total_bytes_written += num_bytes
+
+    if mem_metadata or mem_data:
+      with open(fpath, 'rb') as fp:
+        file_size = GetFileSize(fp) if mem_metadata else None
+        md5 = CalculateB64EncodedMd5FromContents(fp) if mem_metadata else None
+        data = fp.read() if mem_data else None
+        temp_file_dict[fpath] = FileDataTuple(file_size, md5, data)
+
+    self.temporary_files.add(fpath)
+    return fpath
+
   def _SetUp(self):
     """Performs setup operations needed before diagnostics can be run."""
 
     # Stores test result data.
     self.results = {}
-    # List of test files in a temporary location on disk for latency ops.
-    self.latency_files = []
-    # List of test objects to clean up in the test bucket.
-    self.test_object_names = set()
-    # Maps each test file path to its size in bytes.
-    self.file_sizes = {}
-    # Maps each test file to its contents as a string.
-    self.file_contents = {}
-    # Maps each test file to its MD5 hash.
-    self.file_md5s = {}
+    # Set of file paths for local temporary files.
+    self.temporary_files = set()
+    # Set of names for test objects that exist in the test bucket.
+    self.temporary_objects = set()
     # Total number of HTTP requests made.
     self.total_requests = 0
     # Total number of HTTP 5xx errors.
@@ -347,63 +536,86 @@
     self.error_responses_by_code = defaultdict(int)
     # Total number of socket errors.
     self.connection_breaks = 0
+    # Boolean to prevent doing cleanup twice.
+    self.teardown_completed = False
 
-    def _MakeFile(file_size):
-      """Creates a temporary file of the given size and returns its path."""
-      fd, fpath = tempfile.mkstemp(suffix='.bin', prefix='gsutil_test_file',
-                                   text=False)
-      self.file_sizes[fpath] = file_size
-      random_bytes = os.urandom(min(file_size, self.MAX_UNIQUE_RANDOM_BYTES))
-      total_bytes = 0
-      file_contents = ''
-      while total_bytes < file_size:
-        num_bytes = min(self.MAX_UNIQUE_RANDOM_BYTES, file_size - total_bytes)
-        file_contents += random_bytes[:num_bytes]
-        total_bytes += num_bytes
-      self.file_contents[fpath] = file_contents
-      with os.fdopen(fd, 'wb') as f:
-        f.write(self.file_contents[fpath])
-      with open(fpath, 'rb') as f:
-        self.file_md5s[fpath] = CalculateB64EncodedMd5FromContents(f)
-      return fpath
+    # Create files for latency test.
+    if self.LAT in self.diag_tests:
+      self.latency_files = []
+      for file_size in self.test_lat_file_sizes:
+        fpath = self._MakeTempFile(file_size, mem_metadata=True, mem_data=True)
+        self.latency_files.append(fpath)
 
-    # Create files for latency tests.
-    for file_size in self.test_file_sizes:
-      fpath = _MakeFile(file_size)
-      self.latency_files.append(fpath)
+    # Create files for throughput tests.
+    if self.diag_tests.intersection(
+        (self.RTHRU, self.WTHRU, self.RTHRU_FILE, self.WTHRU_FILE)):
+      # Create a file for warming up the TCP connection.
+      self.tcp_warmup_file = self._MakeTempFile(
+          5 * 1024 * 1024, mem_metadata=True, mem_data=True)
 
-    # Creating a file for warming up the TCP connection.
-    self.tcp_warmup_file = _MakeFile(5 * 1024 * 1024)  # 5 Mebibytes.
-    # Remote file to use for TCP warmup.
-    self.tcp_warmup_remote_file = (str(self.bucket_url) +
-                                   os.path.basename(self.tcp_warmup_file))
+      # For in memory tests, throughput tests transfer the same object N times
+      # instead of creating N objects, in order to avoid excessive memory usage.
+      if self.diag_tests.intersection((self.RTHRU, self.WTHRU)):
+        self.mem_thru_file_name = self._MakeTempFile(
+            self.thru_filesize, mem_metadata=True, mem_data=True)
+        self.mem_thru_object_name = os.path.basename(self.mem_thru_file_name)
 
-    # Local file on disk for write throughput tests.
-    self.thru_local_file = _MakeFile(self.thru_filesize)
+      # For tests that use disk I/O, it is necessary to create N objects in
+      # in order to properly measure the performance impact of seeks.
+      if self.diag_tests.intersection((self.RTHRU_FILE, self.WTHRU_FILE)):
+        # List of file names and corresponding object names to use for file
+        # throughput tests.
+        self.thru_file_names = []
+        self.thru_object_names = []
+
+        free_disk_space = CheckFreeSpace(self.directory)
+        if free_disk_space >= self.thru_filesize * self.num_objects:
+          self.logger.info('\nCreating %d local files each of size %s.'
+                           % (self.num_objects,
+                              MakeHumanReadable(self.thru_filesize)))
+          self._WarnIfLargeData()
+          for _ in range(self.num_objects):
+            file_name = self._MakeTempFile(self.thru_filesize,
+                                           mem_metadata=True)
+            self.thru_file_names.append(file_name)
+            self.thru_object_names.append(os.path.basename(file_name))
+        else:
+          raise CommandException(
+              'Not enough free disk space for throughput files: '
+              '%s of disk space required, but only %s available.'
+              % (MakeHumanReadable(self.thru_filesize * self.num_objects),
+                 MakeHumanReadable(free_disk_space)))
 
     # Dummy file buffer to use for downloading that goes nowhere.
     self.discard_sink = DummyFile()
 
+    # Filter out misleading progress callback output and the incorrect
+    # suggestion to use gsutil -m perfdiag.
+    self.logger.addFilter(self._PerfdiagFilter())
+
   def _TearDown(self):
     """Performs operations to clean things up after performing diagnostics."""
-    for fpath in self.latency_files + [self.thru_local_file,
-                                       self.tcp_warmup_file]:
+    if not self.teardown_completed:
+      temp_file_dict.clear()
+
       try:
-        os.remove(fpath)
+        for fpath in self.temporary_files:
+          os.remove(fpath)
+        if self.delete_directory:
+          os.rmdir(self.directory)
       except OSError:
         pass
 
-    for object_name in self.test_object_names:
-
-      def _Delete():
-        try:
-          self.gsutil_api.DeleteObject(self.bucket_url.bucket_name,
-                                       object_name,
-                                       provider=self.provider)
-        except NotFoundException:
-          pass
-
-      self._RunOperation(_Delete)
+      if self.threads > 1 or self.processes > 1:
+        args = [obj for obj in self.temporary_objects]
+        self.Apply(_DeleteWrapper, args, _PerfdiagExceptionHandler,
+                   arg_checker=DummyArgChecker,
+                   parallel_operations_override=True,
+                   process_count=self.processes, thread_count=self.threads)
+      else:
+        for object_name in self.temporary_objects:
+          self.Delete(object_name)
+    self.teardown_completed = True
 
   @contextlib.contextmanager
   def _Time(self, key, bucket):
@@ -478,12 +690,13 @@
     # Stores timing information for each category of operation.
     self.results['latency'] = defaultdict(list)
 
-    for i in range(self.num_iterations):
+    for i in range(self.num_objects):
       self.logger.info('\nRunning latency iteration %d...', i+1)
       for fpath in self.latency_files:
+        file_data = temp_file_dict[fpath]
         url = self.bucket_url.Clone()
         url.object_name = os.path.basename(fpath)
-        file_size = self.file_sizes[fpath]
+        file_size = file_data.size
         readable_file_size = MakeHumanReadable(file_size)
 
         self.logger.info(
@@ -493,7 +706,7 @@
         upload_target = StorageUrlToUploadObjectMetadata(url)
 
         def _Upload():
-          io_fp = cStringIO.StringIO(self.file_contents[fpath])
+          io_fp = cStringIO.StringIO(file_data.data)
           with self._Time('UPLOAD_%d' % file_size, self.results['latency']):
             self.gsutil_api.UploadObject(
                 io_fp, upload_target, size=file_size, provider=self.provider,
@@ -508,8 +721,7 @@
                                                 'mediaLink', 'size'])
         # Download will get the metadata first if we don't pass it in.
         download_metadata = self._RunOperation(_Metadata)
-        serialization_dict = GetDownloadSerializationDict(download_metadata)
-        serialization_data = json.dumps(serialization_dict)
+        serialization_data = GetDownloadSerializationData(download_metadata)
 
         def _Download():
           with self._Time('DOWNLOAD_%d' % file_size, self.results['latency']):
@@ -524,213 +736,308 @@
                                          provider=self.provider)
         self._RunOperation(_Delete)
 
-  class _CpFilter(logging.Filter):
+  class _PerfdiagFilter(logging.Filter):
 
     def filter(self, record):
-      # Used to prevent cp._LogCopyOperation from spewing output from
-      # subprocesses about every iteration.
+      # Used to prevent unnecessary output when using multiprocessing.
       msg = record.getMessage()
       return not (('Copying file:///' in msg) or ('Copying gs://' in msg) or
-                  ('Computing CRC' in msg))
+                  ('Computing CRC' in msg) or ('gsutil -m perfdiag' in msg))
 
   def _PerfdiagExceptionHandler(self, e):
     """Simple exception handler to allow post-completion status."""
     self.logger.error(str(e))
 
-  def _RunReadThruTests(self):
+  def PerformFannedDownload(self, need_to_slice, object_names, file_names,
+                            serialization_data):
+    """Performs a parallel download of multiple objects using the fan strategy.
+
+    Args:
+      need_to_slice: If True, additionally apply the slice strategy to each
+                     object in object_names.
+      object_names: A list of object names to be downloaded. Each object must
+                    already exist in the test bucket.
+      file_names: A list, corresponding by index to object_names, of file names
+                  for downloaded data. If None, discard downloaded data.
+      serialization_data: A list, corresponding by index to object_names,
+                          of serialization data for each object.
+    """
+    args = []
+    for i in range(len(object_names)):
+      file_name = file_names[i] if file_names else None
+      args.append(FanDownloadTuple(
+          need_to_slice, object_names[i], file_name,
+          serialization_data[i]))
+    self.Apply(_DownloadObject, args, _PerfdiagExceptionHandler,
+               ('total_requests', 'request_errors'),
+               arg_checker=DummyArgChecker, parallel_operations_override=True,
+               process_count=self.processes, thread_count=self.threads)
+
+  def PerformSlicedDownload(self, object_name, file_name, serialization_data):
+    """Performs a download of an object using the slice strategy.
+
+    Args:
+      object_name: The name of the object to download.
+      file_name: The name of the file to download data to, or None if data
+                 should be discarded.
+      serialization_data: The serialization data for the object.
+    """
+    if file_name:
+      with open(file_name, 'ab') as fp:
+        fp.truncate(self.thru_filesize)
+    component_size = DivideAndCeil(self.thru_filesize, self.num_slices)
+    args = []
+    for i in range(self.num_slices):
+      start_byte = i * component_size
+      end_byte = min((i + 1) * (component_size) - 1, self.thru_filesize - 1)
+      args.append(SliceDownloadTuple(object_name, file_name, serialization_data,
+                                     start_byte, end_byte))
+    self.Apply(_DownloadSlice, args, _PerfdiagExceptionHandler,
+               ('total_requests', 'request_errors'),
+               arg_checker=DummyArgChecker, parallel_operations_override=True,
+               process_count=self.processes, thread_count=self.threads)
+
+  def PerformFannedUpload(self, need_to_slice, file_names, object_names,
+                          use_file):
+    """Performs a parallel upload of multiple files using the fan strategy.
+
+    The metadata for file_name should be present in temp_file_dict prior
+    to calling. Also, the data for file_name should be present in temp_file_dict
+    if use_file is specified as False.
+
+    Args:
+      need_to_slice: If True, additionally apply the slice strategy to each
+                     file in file_names.
+      file_names: A list of file names to be uploaded.
+      object_names: A list, corresponding by by index to file_names, of object
+                    names to upload data to.
+      use_file: If true, use disk I/O, otherwise read upload data from memory.
+    """
+    args = []
+    for i in range(len(file_names)):
+      args.append(FanUploadTuple(
+          need_to_slice, file_names[i], object_names[i], use_file))
+    self.Apply(_UploadObject, args, _PerfdiagExceptionHandler,
+               ('total_requests', 'request_errors'),
+               arg_checker=DummyArgChecker, parallel_operations_override=True,
+               process_count=self.processes, thread_count=self.threads)
+
+  def PerformSlicedUpload(self, file_name, object_name, use_file):
+    """Performs a parallel upload of a file using the slice strategy.
+
+    The metadata for file_name should be present in temp_file_dict prior
+    to calling. Also, the data from for file_name should be present in
+    temp_file_dict if use_file is specified as False.
+
+    Args:
+      file_name: The name of the file to upload.
+      object_name: The name of the object to upload to.
+      use_file: If true, use disk I/O, otherwise read upload data from memory.
+    """
+    # Divide the file into components.
+    component_size = DivideAndCeil(self.thru_filesize, self.num_slices)
+    component_object_names = (
+        [object_name + str(i) for i in range(self.num_slices)])
+
+    args = []
+    for i in range(self.num_slices):
+      component_start = i * component_size
+      component_size = min(component_size,
+                           temp_file_dict[file_name].size - component_start)
+      args.append(SliceUploadTuple(file_name, component_object_names[i],
+                                   use_file, component_start, component_size))
+
+    # Upload the components in parallel.
+    try:
+      self.Apply(_UploadSlice, args, _PerfdiagExceptionHandler,
+                 ('total_requests', 'request_errors'),
+                 arg_checker=DummyArgChecker, parallel_operations_override=True,
+                 process_count=self.processes, thread_count=self.threads)
+
+      # Compose the components into an object.
+      request_components = []
+      for i in range(self.num_slices):
+        src_obj_metadata = (
+            apitools_messages.ComposeRequest.SourceObjectsValueListEntry(
+                name=component_object_names[i]))
+        request_components.append(src_obj_metadata)
+
+      dst_obj_metadata = apitools_messages.Object()
+      dst_obj_metadata.name = object_name
+      dst_obj_metadata.bucket = self.bucket_url.bucket_name
+      def _Compose():
+        self.gsutil_api.ComposeObject(request_components, dst_obj_metadata,
+                                      provider=self.provider)
+      self._RunOperation(_Compose)
+    finally:
+      # Delete the temporary components.
+      self.Apply(_DeleteWrapper, component_object_names,
+                 _PerfdiagExceptionHandler,
+                 ('total_requests', 'request_errors'),
+                 arg_checker=DummyArgChecker, parallel_operations_override=True,
+                 process_count=self.processes, thread_count=self.threads)
+
+  def _RunReadThruTests(self, use_file=False):
     """Runs read throughput tests."""
+    test_name = 'read_throughput_file' if use_file else 'read_throughput'
+    file_io_string = 'with file I/O' if use_file else ''
     self.logger.info(
-        '\nRunning read throughput tests (%s iterations of size %s)' %
-        (self.num_iterations, MakeHumanReadable(self.thru_filesize)))
+        '\nRunning read throughput tests %s (%s objects of size %s)' %
+        (file_io_string, self.num_objects,
+         MakeHumanReadable(self.thru_filesize)))
+    self._WarnIfLargeData()
 
-    self.results['read_throughput'] = {'file_size': self.thru_filesize,
-                                       'num_times': self.num_iterations,
-                                       'processes': self.processes,
-                                       'threads': self.threads}
+    self.results[test_name] = {'file_size': self.thru_filesize,
+                               'processes': self.processes,
+                               'threads': self.threads,
+                               'parallelism': self.parallel_strategy
+                              }
 
-    # Copy the TCP warmup file.
-    warmup_url = self.bucket_url.Clone()
-    warmup_url.object_name = os.path.basename(self.tcp_warmup_file)
-    warmup_target = StorageUrlToUploadObjectMetadata(warmup_url)
-    self.test_object_names.add(warmup_url.object_name)
+    # Copy the file(s) to the test bucket, and also get the serialization data
+    # so that we can pass it to download.
+    if use_file:
+      # For test with file I/O use N files on disk to preserve seek performance.
+      file_names = self.thru_file_names
+      object_names = self.thru_object_names
+      serialization_data = []
+      for i in range(self.num_objects):
+        self.temporary_objects.add(self.thru_object_names[i])
+        if self.WTHRU_FILE in self.diag_tests:
+          # If we ran the WTHRU_FILE test, then the objects already exist.
+          obj_metadata = self.gsutil_api.GetObjectMetadata(
+              self.bucket_url.bucket_name, self.thru_object_names[i],
+              fields=['size', 'mediaLink'], provider=self.bucket_url.scheme)
+        else:
+          obj_metadata = self.Upload(self.thru_file_names[i],
+                                     self.thru_object_names[i], use_file)
 
-    def _Upload1():
-      self.gsutil_api.UploadObject(
-          cStringIO.StringIO(self.file_contents[self.tcp_warmup_file]),
-          warmup_target, provider=self.provider, fields=['name'])
-    self._RunOperation(_Upload1)
-
-    # Copy the file to remote location before reading.
-    thru_url = self.bucket_url.Clone()
-    thru_url.object_name = os.path.basename(self.thru_local_file)
-    thru_target = StorageUrlToUploadObjectMetadata(thru_url)
-    thru_target.md5Hash = self.file_md5s[self.thru_local_file]
-    self.test_object_names.add(thru_url.object_name)
-
-    # Get the mediaLink here so that we can pass it to download.
-    def _Upload2():
-      return self.gsutil_api.UploadObject(
-          cStringIO.StringIO(self.file_contents[self.thru_local_file]),
-          thru_target, provider=self.provider, size=self.thru_filesize,
-          fields=['name', 'mediaLink', 'size'])
-
-    # Get the metadata for the object so that we are just measuring performance
-    # on the actual bytes transfer.
-    download_metadata = self._RunOperation(_Upload2)
-    serialization_dict = GetDownloadSerializationDict(download_metadata)
-    serialization_data = json.dumps(serialization_dict)
-
-    if self.processes == 1 and self.threads == 1:
-
-      # Warm up the TCP connection.
-      def _Warmup():
-        self.gsutil_api.GetObjectMedia(warmup_url.bucket_name,
-                                       warmup_url.object_name,
-                                       self.discard_sink,
-                                       provider=self.provider)
-      self._RunOperation(_Warmup)
-
-      times = []
-
-      def _Download():
-        t0 = time.time()
-        self.gsutil_api.GetObjectMedia(
-            thru_url.bucket_name, thru_url.object_name, self.discard_sink,
-            provider=self.provider, serialization_data=serialization_data)
-        t1 = time.time()
-        times.append(t1 - t0)
-      for _ in range(self.num_iterations):
-        self._RunOperation(_Download)
-      time_took = sum(times)
+        # File overwrite causes performance issues with sliced downloads.
+        # Delete the file and reopen it for download. This matches what a real
+        # download would look like.
+        os.unlink(self.thru_file_names[i])
+        open(self.thru_file_names[i], 'ab').close()
+        serialization_data.append(GetDownloadSerializationData(obj_metadata))
     else:
-      args = ([(thru_url.bucket_name, thru_url.object_name, serialization_data)]
-              * self.num_iterations)
-      self.logger.addFilter(self._CpFilter())
+      # For in-memory test only use one file but copy it num_objects times, to
+      # allow scalability in num_objects.
+      self.temporary_objects.add(self.mem_thru_object_name)
+      obj_metadata = self.Upload(self.mem_thru_file_name,
+                                 self.mem_thru_object_name, use_file)
+      file_names = None
+      object_names = [self.mem_thru_object_name] * self.num_objects
+      serialization_data = (
+          [GetDownloadSerializationData(obj_metadata)] * self.num_objects)
 
-      t0 = time.time()
-      self.Apply(_DownloadWrapper,
-                 args,
-                 _PerfdiagExceptionHandler,
-                 arg_checker=DummyArgChecker,
-                 parallel_operations_override=True,
-                 process_count=self.processes,
-                 thread_count=self.threads)
-      t1 = time.time()
-      time_took = t1 - t0
+    # Warmup the TCP connection.
+    warmup_obj_name = os.path.basename(self.tcp_warmup_file)
+    self.temporary_objects.add(warmup_obj_name)
+    self.Upload(self.tcp_warmup_file, warmup_obj_name)
+    self.Download(warmup_obj_name)
 
-    total_bytes_copied = self.thru_filesize * self.num_iterations
+    t0 = time.time()
+    if self.processes == 1 and self.threads == 1:
+      for i in range(self.num_objects):
+        file_name = file_names[i] if use_file else None
+        self.Download(object_names[i], file_name, serialization_data[i])
+    else:
+      if self.parallel_strategy in (self.FAN, self.BOTH):
+        need_to_slice = (self.parallel_strategy == self.BOTH)
+        self.PerformFannedDownload(need_to_slice, object_names, file_names,
+                                   serialization_data)
+      elif self.parallel_strategy == self.SLICE:
+        for i in range(self.num_objects):
+          file_name = file_names[i] if use_file else None
+          self.PerformSlicedDownload(
+              object_names[i], file_name, serialization_data[i])
+    t1 = time.time()
+
+    time_took = t1 - t0
+    total_bytes_copied = self.thru_filesize * self.num_objects
     bytes_per_second = total_bytes_copied / time_took
 
-    self.results['read_throughput']['time_took'] = time_took
-    self.results['read_throughput']['total_bytes_copied'] = total_bytes_copied
-    self.results['read_throughput']['bytes_per_second'] = bytes_per_second
+    self.results[test_name]['time_took'] = time_took
+    self.results[test_name]['total_bytes_copied'] = total_bytes_copied
+    self.results[test_name]['bytes_per_second'] = bytes_per_second
 
-  def _RunWriteThruTests(self):
+  def _RunWriteThruTests(self, use_file=False):
     """Runs write throughput tests."""
+    test_name = 'write_throughput_file' if use_file else 'write_throughput'
+    file_io_string = 'with file I/O' if use_file else ''
     self.logger.info(
-        '\nRunning write throughput tests (%s iterations of size %s)' %
-        (self.num_iterations, MakeHumanReadable(self.thru_filesize)))
+        '\nRunning write throughput tests %s (%s objects of size %s)' %
+        (file_io_string, self.num_objects,
+         MakeHumanReadable(self.thru_filesize)))
+    self._WarnIfLargeData()
 
-    self.results['write_throughput'] = {'file_size': self.thru_filesize,
-                                        'num_copies': self.num_iterations,
-                                        'processes': self.processes,
-                                        'threads': self.threads}
+    self.results[test_name] = {'file_size': self.thru_filesize,
+                               'processes': self.processes,
+                               'threads': self.threads,
+                               'parallelism': self.parallel_strategy}
 
-    warmup_url = self.bucket_url.Clone()
-    warmup_url.object_name = os.path.basename(self.tcp_warmup_file)
-    warmup_target = StorageUrlToUploadObjectMetadata(warmup_url)
-    self.test_object_names.add(warmup_url.object_name)
+    # Warmup the TCP connection.
+    warmup_obj_name = os.path.basename(self.tcp_warmup_file)
+    self.temporary_objects.add(warmup_obj_name)
+    self.Upload(self.tcp_warmup_file, warmup_obj_name)
 
-    thru_url = self.bucket_url.Clone()
-    thru_url.object_name = os.path.basename(self.thru_local_file)
-    thru_target = StorageUrlToUploadObjectMetadata(thru_url)
-    thru_tuples = []
-    for i in xrange(self.num_iterations):
-      # Create a unique name for each uploaded object.  Otherwise,
-      # the XML API would fail when trying to non-atomically get metadata
-      # for the object that gets blown away by the overwrite.
-      remote_object_name = thru_target.name + str(i)
-      self.test_object_names.add(remote_object_name)
-      thru_tuples.append(UploadObjectTuple(thru_target.bucket,
-                                           remote_object_name,
-                                           filepath=self.thru_local_file))
-
-    if self.processes == 1 and self.threads == 1:
-      # Warm up the TCP connection.
-      def _Warmup():
-        self.gsutil_api.UploadObject(
-            cStringIO.StringIO(self.file_contents[self.tcp_warmup_file]),
-            warmup_target, provider=self.provider, size=self.thru_filesize,
-            fields=['name'])
-      self._RunOperation(_Warmup)
-
-      times = []
-
-      for i in xrange(self.num_iterations):
-        thru_tuple = thru_tuples[i]
-        def _Upload():
-          """Uploads the write throughput measurement object."""
-          upload_target = apitools_messages.Object(
-              bucket=thru_tuple.bucket_name, name=thru_tuple.object_name,
-              md5Hash=thru_tuple.md5)
-          io_fp = cStringIO.StringIO(self.file_contents[self.thru_local_file])
-          t0 = time.time()
-          if self.thru_filesize < ResumableThreshold():
-            self.gsutil_api.UploadObject(
-                io_fp, upload_target, provider=self.provider,
-                size=self.thru_filesize, fields=['name'])
-          else:
-            self.gsutil_api.UploadObjectResumable(
-                io_fp, upload_target, provider=self.provider,
-                size=self.thru_filesize, fields=['name'],
-                tracker_callback=_DummyTrackerCallback)
-
-          t1 = time.time()
-          times.append(t1 - t0)
-
-        self._RunOperation(_Upload)
-      time_took = sum(times)
-
+    if use_file:
+      # For test with file I/O use N files on disk to preserve seek performance.
+      file_names = self.thru_file_names
+      object_names = self.thru_object_names
     else:
-      args = thru_tuples
-      t0 = time.time()
-      self.Apply(_UploadWrapper,
-                 args,
-                 _PerfdiagExceptionHandler,
-                 arg_checker=DummyArgChecker,
-                 parallel_operations_override=True,
-                 process_count=self.processes,
-                 thread_count=self.threads)
-      t1 = time.time()
-      time_took = t1 - t0
+      # For in-memory test only use one file but copy it num_objects times, to
+      # allow for scalability in num_objects.
+      file_names = [self.mem_thru_file_name] * self.num_objects
+      object_names = (
+          [self.mem_thru_object_name + str(i) for i in range(self.num_objects)])
 
-    total_bytes_copied = self.thru_filesize * self.num_iterations
+    for object_name in object_names:
+      self.temporary_objects.add(object_name)
+
+    t0 = time.time()
+    if self.processes == 1 and self.threads == 1:
+      for i in range(self.num_objects):
+        self.Upload(file_names[i], object_names[i], use_file)
+    else:
+      if self.parallel_strategy in (self.FAN, self.BOTH):
+        need_to_slice = (self.parallel_strategy == self.BOTH)
+        self.PerformFannedUpload(need_to_slice, file_names, object_names,
+                                 use_file)
+      elif self.parallel_strategy == self.SLICE:
+        for i in range(self.num_objects):
+          self.PerformSlicedUpload(file_names[i], object_names[i], use_file)
+    t1 = time.time()
+
+    time_took = t1 - t0
+    total_bytes_copied = self.thru_filesize * self.num_objects
     bytes_per_second = total_bytes_copied / time_took
 
-    self.results['write_throughput']['time_took'] = time_took
-    self.results['write_throughput']['total_bytes_copied'] = total_bytes_copied
-    self.results['write_throughput']['bytes_per_second'] = bytes_per_second
+    self.results[test_name]['time_took'] = time_took
+    self.results[test_name]['total_bytes_copied'] = total_bytes_copied
+    self.results[test_name]['bytes_per_second'] = bytes_per_second
 
   def _RunListTests(self):
     """Runs eventual consistency listing latency tests."""
-    self.results['listing'] = {'num_files': self.num_iterations}
+    self.results['listing'] = {'num_files': self.num_objects}
 
-    # Generate N random object names to put in the bucket.
+    # Generate N random objects to put into the bucket.
     list_prefix = 'gsutil-perfdiag-list-'
+    list_fpaths = []
     list_objects = []
-    for _ in xrange(self.num_iterations):
-      list_object_name = u'%s%s' % (list_prefix, os.urandom(20).encode('hex'))
-      self.test_object_names.add(list_object_name)
-      list_objects.append(list_object_name)
+    args = []
+    for _ in xrange(self.num_objects):
+      fpath = self._MakeTempFile(0, mem_data=True, mem_metadata=True,
+                                 prefix=list_prefix)
+      list_fpaths.append(fpath)
+      object_name = os.path.basename(fpath)
+      list_objects.append(object_name)
+      args.append(FanUploadTuple(False, fpath, object_name, False))
+      self.temporary_objects.add(object_name)
 
     # Add the objects to the bucket.
     self.logger.info(
-        '\nWriting %s objects for listing test...', self.num_iterations)
-    empty_md5 = CalculateB64EncodedMd5FromContents(cStringIO.StringIO(''))
-    args = [
-        UploadObjectTuple(self.bucket_url.bucket_name, name, md5=empty_md5,
-                          contents='') for name in list_objects]
-    self.Apply(_UploadWrapper, args, _PerfdiagExceptionHandler,
+        '\nWriting %s objects for listing test...', self.num_objects)
+
+    self.Apply(_UploadObject, args, _PerfdiagExceptionHandler,
                arg_checker=DummyArgChecker)
 
     list_latencies = []
@@ -743,7 +1050,7 @@
       """Lists and returns objects in the bucket. Also records latency."""
       t0 = time.time()
       objects = list(self.gsutil_api.ListObjects(
-          self.bucket_url.bucket_name, prefix=list_prefix, delimiter='/',
+          self.bucket_url.bucket_name, delimiter='/',
           provider=self.provider, fields=['items/name']))
       t1 = time.time()
       list_latencies.append(t1 - t0)
@@ -751,7 +1058,7 @@
 
     self.logger.info(
         'Listing bucket %s waiting for %s objects to appear...',
-        self.bucket_url.bucket_name, self.num_iterations)
+        self.bucket_url.bucket_name, self.num_objects)
     while expected_objects - found_objects:
       def _ListAfterUpload():
         names = _List()
@@ -771,14 +1078,15 @@
         'time_took': total_end_time - total_start_time,
     }
 
+    args = [object_name for object_name in list_objects]
     self.logger.info(
-        'Deleting %s objects for listing test...', self.num_iterations)
+        'Deleting %s objects for listing test...', self.num_objects)
     self.Apply(_DeleteWrapper, args, _PerfdiagExceptionHandler,
                arg_checker=DummyArgChecker)
 
     self.logger.info(
         'Listing bucket %s waiting for %s objects to disappear...',
-        self.bucket_url.bucket_name, self.num_iterations)
+        self.bucket_url.bucket_name, self.num_objects)
     list_latencies = []
     files_seen = []
     total_start_time = time.time()
@@ -802,45 +1110,102 @@
         'time_took': total_end_time - total_start_time,
     }
 
-  def Upload(self, thru_tuple, thread_state=None):
-    gsutil_api = GetCloudApiInstance(self, thread_state)
+  def Upload(self, file_name, object_name, use_file=False, file_start=0,
+             file_size=None):
+    """Performs an upload to the test bucket.
 
-    md5hash = thru_tuple.md5
-    contents = thru_tuple.contents
-    if thru_tuple.filepath:
-      md5hash = self.file_md5s[thru_tuple.filepath]
-      contents = self.file_contents[thru_tuple.filepath]
-
-    upload_target = apitools_messages.Object(
-        bucket=thru_tuple.bucket_name, name=thru_tuple.object_name,
-        md5Hash=md5hash)
-    file_size = len(contents)
-    if file_size < ResumableThreshold():
-      gsutil_api.UploadObject(
-          cStringIO.StringIO(contents), upload_target,
-          provider=self.provider, size=file_size, fields=['name'])
-    else:
-      gsutil_api.UploadObjectResumable(
-          cStringIO.StringIO(contents), upload_target,
-          provider=self.provider, size=file_size, fields=['name'],
-          tracker_callback=_DummyTrackerCallback)
-
-  def Download(self, download_tuple, thread_state=None):
-    """Downloads a file.
+    The file is uploaded to the bucket referred to by self.bucket_url, and has
+    name object_name.
 
     Args:
-      download_tuple: (bucket name, object name, serialization data for object).
-      thread_state: gsutil Cloud API instance to use for the download.
-    """
-    gsutil_api = GetCloudApiInstance(self, thread_state)
-    gsutil_api.GetObjectMedia(
-        download_tuple[0], download_tuple[1], self.discard_sink,
-        provider=self.provider, serialization_data=download_tuple[2])
+      file_name: The path to the local file, and the key to its entry in
+                 temp_file_dict.
+      object_name: The name of the remote object.
+      use_file: If true, use disk I/O, otherwise read everything from memory.
+      file_start: The first byte in the file to upload to the object.
+                  (only should be specified for sliced uploads)
+      file_size: The size of the file to upload.
+                 (only should be specified for sliced uploads)
 
-  def Delete(self, thru_tuple, thread_state=None):
-    gsutil_api = thread_state or self.gsutil_api
-    gsutil_api.DeleteObject(
-        thru_tuple.bucket_name, thru_tuple.object_name, provider=self.provider)
+    Returns:
+      Uploaded Object Metadata.
+    """
+    fp = None
+    if file_size is None:
+      file_size = temp_file_dict[file_name].size
+
+    upload_url = self.bucket_url.Clone()
+    upload_url.object_name = object_name
+    upload_target = StorageUrlToUploadObjectMetadata(upload_url)
+
+    try:
+      if use_file:
+        fp = FilePart(file_name, file_start, file_size)
+      else:
+        data = temp_file_dict[file_name].data[file_start:file_start+file_size]
+        fp = cStringIO.StringIO(data)
+
+      def _InnerUpload():
+        if file_size < ResumableThreshold():
+          return self.gsutil_api.UploadObject(
+              fp, upload_target, provider=self.provider, size=file_size,
+              fields=['name', 'mediaLink', 'size'])
+        else:
+          return self.gsutil_api.UploadObjectResumable(
+              fp, upload_target, provider=self.provider, size=file_size,
+              fields=['name', 'mediaLink', 'size'],
+              tracker_callback=_DummyTrackerCallback)
+      return self._RunOperation(_InnerUpload)
+    finally:
+      if fp:
+        fp.close()
+
+  def Download(self, object_name, file_name=None, serialization_data=None,
+               start_byte=0, end_byte=None):
+    """Downloads an object from the test bucket.
+
+    Args:
+      object_name: The name of the object (in the test bucket) to download.
+      file_name: Optional file name to write downloaded data to. If None,
+                 downloaded data is discarded immediately.
+      serialization_data: Optional serialization data, used so that we don't
+                          have to get the metadata before downloading.
+      start_byte: The first byte in the object to download.
+                  (only should be specified for sliced downloads)
+      end_byte: The last byte in the object to download.
+                (only should be specified for sliced downloads)
+    """
+    fp = None
+    try:
+      if file_name is not None:
+        fp = open(file_name, 'r+b')
+        fp.seek(start_byte)
+      else:
+        fp = self.discard_sink
+
+      def _InnerDownload():
+        self.gsutil_api.GetObjectMedia(
+            self.bucket_url.bucket_name, object_name, fp,
+            provider=self.provider, start_byte=start_byte, end_byte=end_byte,
+            serialization_data=serialization_data)
+      self._RunOperation(_InnerDownload)
+    finally:
+      if fp:
+        fp.close()
+
+  def Delete(self, object_name):
+    """Deletes an object from the test bucket.
+
+    Args:
+      object_name: The name of the object to delete.
+    """
+    try:
+      def _InnerDelete():
+        self.gsutil_api.DeleteObject(self.bucket_url.bucket_name, object_name,
+                                     provider=self.provider)
+      self._RunOperation(_InnerDelete)
+    except NotFoundException:
+      pass
 
   def _GetDiskCounters(self):
     """Retrieves disk I/O statistics for all disks.
@@ -966,7 +1331,7 @@
       sysinfo['ip_address'] = ''
     # Record the temporary directory used since it can affect performance, e.g.
     # when on a networked filesystem.
-    sysinfo['tempdir'] = tempfile.gettempdir()
+    sysinfo['tempdir'] = self.directory
 
     # Produces an RFC 2822 compliant GMT timestamp.
     sysinfo['gmt_timestamp'] = time.strftime('%a, %d %b %Y %H:%M:%S +0000',
@@ -1178,12 +1543,27 @@
       print 'Write Throughput'.center(78)
       print '-' * 78
       write_thru = self.results['write_throughput']
-      print 'Copied a %s file %d times for a total transfer size of %s.' % (
+      print 'Copied %s %s file(s) for a total transfer size of %s.' % (
+          self.num_objects,
           MakeHumanReadable(write_thru['file_size']),
-          write_thru['num_copies'],
           MakeHumanReadable(write_thru['total_bytes_copied']))
       print 'Write throughput: %s/s.' % (
           MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8))
+      print 'Parallelism strategy: %s' % write_thru['parallelism']
+
+    if 'write_throughput_file' in self.results:
+      print
+      print '-' * 78
+      print 'Write Throughput With File I/O'.center(78)
+      print '-' * 78
+      write_thru_file = self.results['write_throughput_file']
+      print 'Copied %s %s file(s) for a total transfer size of %s.' % (
+          self.num_objects,
+          MakeHumanReadable(write_thru_file['file_size']),
+          MakeHumanReadable(write_thru_file['total_bytes_copied']))
+      print 'Write throughput: %s/s.' % (
+          MakeBitsHumanReadable(write_thru_file['bytes_per_second'] * 8))
+      print 'Parallelism strategy: %s' % write_thru_file['parallelism']
 
     if 'read_throughput' in self.results:
       print
@@ -1191,12 +1571,27 @@
       print 'Read Throughput'.center(78)
       print '-' * 78
       read_thru = self.results['read_throughput']
-      print 'Copied a %s file %d times for a total transfer size of %s.' % (
+      print 'Copied %s %s file(s) for a total transfer size of %s.' % (
+          self.num_objects,
           MakeHumanReadable(read_thru['file_size']),
-          read_thru['num_times'],
           MakeHumanReadable(read_thru['total_bytes_copied']))
       print 'Read throughput: %s/s.' % (
           MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8))
+      print 'Parallelism strategy: %s' % read_thru['parallelism']
+
+    if 'read_throughput_file' in self.results:
+      print
+      print '-' * 78
+      print 'Read Throughput With File I/O'.center(78)
+      print '-' * 78
+      read_thru_file = self.results['read_throughput_file']
+      print 'Copied %s %s file(s) for a total transfer size of %s.' % (
+          self.num_objects,
+          MakeHumanReadable(read_thru_file['file_size']),
+          MakeHumanReadable(read_thru_file['total_bytes_copied']))
+      print 'Read throughput: %s/s.' % (
+          MakeBitsHumanReadable(read_thru_file['bytes_per_second'] * 8))
+      print 'Parallelism strategy: %s' % read_thru_file['parallelism']
 
     if 'listing' in self.results:
       print
@@ -1389,15 +1784,23 @@
   def _ParseArgs(self):
     """Parses arguments for perfdiag command."""
     # From -n.
-    self.num_iterations = 5
+    self.num_objects = 5
     # From -c.
     self.processes = 1
     # From -k.
     self.threads = 1
+    # From -p
+    self.parallel_strategy = None
+    # From -y
+    self.num_slices = 4
     # From -s.
     self.thru_filesize = 1048576
+    # From -d.
+    self.directory = tempfile.gettempdir()
+    # Keep track of whether or not to delete the directory upon completion.
+    self.delete_directory = False
     # From -t.
-    self.diag_tests = self.DEFAULT_DIAG_TESTS
+    self.diag_tests = set(self.DEFAULT_DIAG_TESTS)
     # From -o.
     self.output_file = None
     # From -i.
@@ -1408,7 +1811,7 @@
     if self.sub_opts:
       for o, a in self.sub_opts:
         if o == '-n':
-          self.num_iterations = self._ParsePositiveInteger(
+          self.num_objects = self._ParsePositiveInteger(
               a, 'The -n parameter must be a positive integer.')
         if o == '-c':
           self.processes = self._ParsePositiveInteger(
@@ -1416,21 +1819,32 @@
         if o == '-k':
           self.threads = self._ParsePositiveInteger(
               a, 'The -k parameter must be a positive integer.')
+        if o == '-p':
+          if a.lower() in self.PARALLEL_STRATEGIES:
+            self.parallel_strategy = a.lower()
+          else:
+            raise CommandException(
+                "'%s' is not a valid parallelism strategy." % a)
+        if o == '-y':
+          self.num_slices = self._ParsePositiveInteger(
+              a, 'The -y parameter must be a positive integer.')
         if o == '-s':
           try:
             self.thru_filesize = HumanReadableToBytes(a)
           except ValueError:
             raise CommandException('Invalid -s parameter.')
-          if self.thru_filesize > (20 * 1024 ** 3):  # Max 20 GiB.
-            raise CommandException(
-                'Maximum throughput file size parameter (-s) is 20 GiB.')
+        if o == '-d':
+          self.directory = a
+          if not os.path.exists(self.directory):
+            self.delete_directory = True
+            os.makedirs(self.directory)
         if o == '-t':
-          self.diag_tests = []
+          self.diag_tests = set()
           for test_name in a.strip().split(','):
             if test_name.lower() not in self.ALL_DIAG_TESTS:
               raise CommandException("List of test names (-t) contains invalid "
                                      "test name '%s'." % test_name)
-            self.diag_tests.append(test_name)
+            self.diag_tests.add(test_name)
         if o == '-m':
           pieces = a.split(':')
           if len(pieces) != 2:
@@ -1452,15 +1866,39 @@
             raise CommandException("Could not decode input file (-i): '%s'." %
                                    a)
           return
+
+    # If parallelism is specified, default parallelism strategy to fan.
+    if (self.processes > 1 or self.threads > 1) and not self.parallel_strategy:
+      self.parallel_strategy = self.FAN
+    elif self.processes == 1 and self.threads == 1 and self.parallel_strategy:
+      raise CommandException(
+          'Cannot specify parallelism strategy (-p) without also specifying '
+          'multiple threads and/or processes (-c and/or -k).')
+
     if not self.args:
       self.RaiseWrongNumberOfArgumentsException()
 
     self.bucket_url = StorageUrlFromString(self.args[0])
     self.provider = self.bucket_url.scheme
-    if not (self.bucket_url.IsCloudUrl() and self.bucket_url.IsBucket()):
+    if not self.bucket_url.IsCloudUrl() and self.bucket_url.IsBucket():
       raise CommandException('The perfdiag command requires a URL that '
                              'specifies a bucket.\n"%s" is not '
                              'valid.' % self.args[0])
+
+    if (self.thru_filesize > HumanReadableToBytes('2GiB') and
+        (self.RTHRU in self.diag_tests or self.WTHRU in self.diag_tests)):
+      raise CommandException(
+          'For in-memory tests maximum file size is 2GiB. For larger file '
+          'sizes, specify rthru_file and/or wthru_file with the -t option.')
+
+    perform_slice = self.parallel_strategy in (self.SLICE, self.BOTH)
+    slice_not_available = (
+        self.provider == 's3' and self.diag_tests.intersection(self.WTHRU,
+                                                               self.WTHRU_FILE))
+    if perform_slice and slice_not_available:
+      raise CommandException('Sliced uploads are not available for s3. '
+                             'Use -p fan or sequential uploads for s3.')
+
     # Ensure the bucket exists.
     self.gsutil_api.GetBucket(self.bucket_url.bucket_name,
                               provider=self.bucket_url.scheme,
@@ -1487,12 +1925,14 @@
         'Base bucket URI: %s\n'
         'Number of processes: %d\n'
         'Number of threads: %d\n'
+        'Parallelism strategy: %s\n'
         'Throughput file size: %s\n'
         'Diagnostics to run: %s',
-        self.num_iterations,
+        self.num_objects,
         self.bucket_url,
         self.processes,
         self.threads,
+        self.parallel_strategy,
         MakeHumanReadable(self.thru_filesize),
         (', '.join(self.diag_tests)))
 
@@ -1516,6 +1956,12 @@
         self._RunLatencyTests()
       if self.RTHRU in self.diag_tests:
         self._RunReadThruTests()
+      # Run WTHRU_FILE before RTHRU_FILE. If data is created in WTHRU_FILE it
+      # will be used in RTHRU_FILE to save time and bandwidth.
+      if self.WTHRU_FILE in self.diag_tests:
+        self._RunWriteThruTests(use_file=True)
+      if self.RTHRU_FILE in self.diag_tests:
+        self._RunReadThruTests(use_file=True)
       if self.WTHRU in self.diag_tests:
         self._RunWriteThruTests()
       if self.LIST in self.diag_tests:
@@ -1535,6 +1981,7 @@
       self.results['gsutil_version'] = gslib.VERSION
       self.results['boto_version'] = boto.__version__
 
+      self._TearDown()
       self._DisplayResults()
     finally:
       # TODO: Install signal handlers so this is performed in response to a
@@ -1545,40 +1992,6 @@
     return 0
 
 
-class UploadObjectTuple(object):
-  """Picklable tuple with necessary metadata for an insert object call."""
-
-  def __init__(self, bucket_name, object_name, filepath=None, md5=None,
-               contents=None):
-    """Create an upload tuple.
-
-    Args:
-      bucket_name: Name of the bucket to upload to.
-      object_name: Name of the object to upload to.
-      filepath: A file path located in self.file_contents and self.file_md5s.
-      md5: The MD5 hash of the object being uploaded.
-      contents: The contents of the file to be uploaded.
-
-    Note: (contents + md5) and filepath are mutually exlusive. You may specify
-          one or the other, but not both.
-    Note: If one of contents or md5 are specified, they must both be specified.
-
-    Raises:
-      InvalidArgument: if the arguments are invalid.
-    """
-    self.bucket_name = bucket_name
-    self.object_name = object_name
-    self.filepath = filepath
-    self.md5 = md5
-    self.contents = contents
-    if filepath and (md5 or contents is not None):
-      raise InvalidArgument(
-          'Only one of filepath or (md5 + contents) may be specified.')
-    if not filepath and (not md5 or contents is None):
-      raise InvalidArgument(
-          'Both md5 and contents must be specified.')
-
-
 def StorageUrlToUploadObjectMetadata(storage_url):
   if storage_url.IsCloudUrl() and storage_url.IsObject():
     upload_target = apitools_messages.Object()
diff --git a/catapult/third_party/gsutil/gslib/commands/rb.py b/catapult/third_party/gsutil/gslib/commands/rb.py
index 9dbc7d6..6893896 100644
--- a/catapult/third_party/gsutil/gslib/commands/rb.py
+++ b/catapult/third_party/gsutil/gslib/commands/rb.py
@@ -102,10 +102,14 @@
       # with -f option if a non-existent URL listed, permission denial happens
       # while listing, etc.
       try:
-        # Need to materialize iterator results into a list to catch exceptions.
+        # Materialize iterator results into a list to catch exceptions.
         # Since this is listing buckets this list shouldn't be too large to fit
         # in memory at once.
-        blrs = list(self.WildcardIterator(url_str).IterBuckets())
+        # Also, avoid listing all fields to avoid performing unnecessary bucket
+        # metadata GETs. These would also be problematic when billing is
+        # disabled, as deletes are allowed but GetBucket is not.
+        blrs = list(
+            self.WildcardIterator(url_str).IterBuckets(bucket_fields=['id']))
       except:  # pylint: disable=bare-except
         some_failed = True
         if self.continue_on_error:
@@ -118,6 +122,7 @@
         try:
           self.gsutil_api.DeleteBucket(url.bucket_name, provider=url.scheme)
         except NotEmptyException as e:
+          some_failed = True
           if self.continue_on_error:
             continue
           elif 'VersionedBucketNotEmpty' in e.reason:
@@ -128,6 +133,7 @@
           else:
             raise
         except:  # pylint: disable=bare-except
+          some_failed = True
           if not self.continue_on_error:
             raise
         did_some_work = True
diff --git a/catapult/third_party/gsutil/gslib/commands/rm.py b/catapult/third_party/gsutil/gslib/commands/rm.py
index 721314a..3bd5e41 100644
--- a/catapult/third_party/gsutil/gslib/commands/rm.py
+++ b/catapult/third_party/gsutil/gslib/commands/rm.py
@@ -16,7 +16,9 @@
 
 from __future__ import absolute_import
 
+from gslib.cloud_api import BucketNotFoundException
 from gslib.cloud_api import NotEmptyException
+from gslib.cloud_api import NotFoundException
 from gslib.cloud_api import ServiceException
 from gslib.command import Command
 from gslib.command import GetFailureCount
@@ -139,14 +141,21 @@
   """Simple exception handler to allow post-completion status."""
   if not cls.continue_on_error:
     cls.logger.error(str(e))
-  cls.everything_removed_okay = False
+  # TODO: Use shared state to track missing bucket names when we get a
+  # BucketNotFoundException. Then improve bucket removal logic and exception
+  # messages.
+  if isinstance(e, BucketNotFoundException):
+    cls.bucket_not_found_count += 1
+    cls.logger.error(str(e))
+  else:
+    cls.op_failure_count += 1
 
 
 # pylint: disable=unused-argument
 def _RemoveFoldersExceptionHandler(cls, e):
   """When removing folders, we don't mind if none exist."""
   if (isinstance(e, CommandException.__class__) and
-      'No URLs matched' in e.message):
+      'No URLs matched' in e.message) or isinstance(e, NotFoundException):
     pass
   else:
     raise e
@@ -190,7 +199,7 @@
     """Command entry point for the rm command."""
     # self.recursion_requested is initialized in command.py (so it can be
     # checked in parent class for all commands).
-    self.continue_on_error = False
+    self.continue_on_error = self.parallel_operations
     self.read_args_from_stdin = False
     self.all_versions = False
     if self.sub_opts:
@@ -215,6 +224,12 @@
                                'least one URL.')
       url_strs = self.args
 
+    # Tracks if any deletes failed.
+    self.op_failure_count = 0
+
+    # Tracks if any buckets were missing.
+    self.bucket_not_found_count = 0
+
     bucket_urls_to_delete = []
     bucket_strings_to_delete = []
     if self.recursion_requested:
@@ -229,9 +244,6 @@
 
     self.preconditions = PreconditionsFromHeaders(self.headers or {})
 
-    # Used to track if any files failed to be removed.
-    self.everything_removed_okay = True
-
     try:
       # Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
       name_expansion_iterator = NameExpansionIterator(
@@ -245,7 +257,8 @@
       # perform requests with sequential function calls in current process.
       self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
                  _RemoveExceptionHandler,
-                 fail_on_error=(not self.continue_on_error))
+                 fail_on_error=(not self.continue_on_error),
+                 shared_attrs=['op_failure_count', 'bucket_not_found_count'])
 
     # Assuming the bucket has versioning enabled, url's that don't map to
     # objects should throw an error even with all_versions, since the prior
@@ -265,11 +278,16 @@
         parts = str(e).split(msg)
         if len(parts) == 2 and parts[1] in bucket_strings_to_delete:
           ResetFailureCount()
+        else:
+          raise
     except ServiceException, e:
       if not self.continue_on_error:
         raise
 
-    if not self.everything_removed_okay and not self.continue_on_error:
+    if self.bucket_not_found_count:
+      raise CommandException('Encountered non-existent bucket during listing')
+
+    if self.op_failure_count and not self.continue_on_error:
       raise CommandException('Some files could not be removed.')
 
     # If this was a gsutil rm -r command covering any bucket subdirs,
@@ -312,6 +330,11 @@
 
       BucketDeleteWithRetry()
 
+    if self.op_failure_count:
+      plural_str = 's' if self.op_failure_count else ''
+      raise CommandException('%d file%s/object%s could not be removed.' % (
+          self.op_failure_count, plural_str, plural_str))
+
     return 0
 
   def RemoveFunc(self, name_expansion_result, thread_state=None):
diff --git a/catapult/third_party/gsutil/gslib/commands/rsync.py b/catapult/third_party/gsutil/gslib/commands/rsync.py
index 4eb9b92..902cc21 100644
--- a/catapult/third_party/gsutil/gslib/commands/rsync.py
+++ b/catapult/third_party/gsutil/gslib/commands/rsync.py
@@ -31,6 +31,7 @@
 import crcmod
 
 from gslib import copy_helper
+from gslib.bucket_listing_ref import BucketListingObject
 from gslib.cloud_api import NotFoundException
 from gslib.command import Command
 from gslib.command import DummyArgChecker
@@ -253,8 +254,13 @@
      match those of the source object (it can't; timestamp setting is not
      allowed by the GCS API).
 
-  2. The gsutil rsync command ignores versioning, synchronizing only the live
-     object versions in versioned buckets.
+  2. The gsutil rsync command considers only the current object generations in
+     the source and destination buckets when deciding what to copy / delete. If
+     versioning is enabled in the destination bucket then gsutil rsync's
+     overwriting or deleting objects will end up creating versions, but the
+     command doesn't try to make the archived generations match in the source
+     and destination buckets.
+
 
 
 <B>OPTIONS</B>
@@ -303,7 +309,8 @@
                 and destination URLs match, skipping any sub-directories.
 
   -U            Skip objects with unsupported object types instead of failing.
-                Unsupported object types are s3 glacier objects.
+                Unsupported object types are Amazon S3 Objects in the GLACIER
+                storage class.
 
   -x pattern    Causes files/objects matching pattern to be excluded, i.e., any
                 matching files/objects will not be copied or deleted. Note that
@@ -472,6 +479,26 @@
   out_file.close()
 
 
+def _LocalDirIterator(base_url):
+  """A generator that yields a BLR for each file in a local directory.
+
+     We use this function instead of WildcardIterator for listing a local
+     directory without recursion, because the glob.globi implementation called
+     by WildcardIterator skips "dot" files (which we don't want to do when
+     synchronizing to or from a local directory).
+
+  Args:
+    base_url: URL for the directory over which to iterate.
+
+  Yields:
+    BucketListingObject for each file in the directory.
+  """
+  for filename in os.listdir(base_url.object_name):
+    filename = os.path.join(base_url.object_name, filename)
+    if os.path.isfile(filename):
+      yield BucketListingObject(StorageUrlFromString(filename), None)
+
+
 def _FieldedListingIterator(cls, gsutil_api, base_url_str, desc):
   """Iterator over base_url_str formatting output per _BuildTmpOutputLine.
 
@@ -484,16 +511,22 @@
   Yields:
     Output line formatted per _BuildTmpOutputLine.
   """
-  if cls.recursion_requested:
-    wildcard = '%s/**' % base_url_str.rstrip('/\\')
+  base_url = StorageUrlFromString(base_url_str)
+  if base_url.scheme == 'file' and not cls.recursion_requested:
+    iterator = _LocalDirIterator(base_url)
   else:
-    wildcard = '%s/*' % base_url_str.rstrip('/\\')
+    if cls.recursion_requested:
+      wildcard = '%s/**' % base_url_str.rstrip('/\\')
+    else:
+      wildcard = '%s/*' % base_url_str.rstrip('/\\')
+    iterator = CreateWildcardIterator(
+        wildcard, gsutil_api, debug=cls.debug,
+        project_id=cls.project_id).IterObjects(
+            # Request just the needed fields, to reduce bandwidth usage.
+            bucket_listing_fields=['crc32c', 'md5Hash', 'name', 'size'])
+
   i = 0
-  for blr in CreateWildcardIterator(
-      wildcard, gsutil_api, debug=cls.debug,
-      project_id=cls.project_id).IterObjects(
-          # Request just the needed fields, to reduce bandwidth usage.
-          bucket_listing_fields=['crc32c', 'md5Hash', 'name', 'size']):
+  for blr in iterator:
     # Various GUI tools (like the GCS web console) create placeholder objects
     # ending with '/' when the user creates an empty directory. Normally these
     # tools should delete those placeholders once objects have been written
@@ -504,10 +537,8 @@
     # local directory "mydata" exists).
     url = blr.storage_url
     if IsCloudSubdirPlaceholder(url, blr=blr):
-      cls.logger.info('Skipping cloud sub-directory placeholder object (%s) '
-                      'because such objects aren\'t needed in (and would '
-                      'interfere with) directories in the local file system',
-                      url)
+      # We used to output the message 'Skipping cloud sub-directory placeholder
+      # object...' but we no longer do so because it caused customer confusion.
       continue
     if (cls.exclude_symlinks and url.IsFileUrl()
         and os.path.islink(url.object_name)):
diff --git a/catapult/third_party/gsutil/gslib/commands/stat.py b/catapult/third_party/gsutil/gslib/commands/stat.py
index 499e9af..4d215f7 100644
--- a/catapult/third_party/gsutil/gslib/commands/stat.py
+++ b/catapult/third_party/gsutil/gslib/commands/stat.py
@@ -17,6 +17,7 @@
 from __future__ import absolute_import
 
 import logging
+import sys
 
 from gslib.bucket_listing_ref import BucketListingObject
 from gslib.cloud_api import AccessDeniedException
@@ -138,14 +139,14 @@
             if logging.getLogger().isEnabledFor(logging.INFO):
               PrintFullInfoAboutObject(blr, incl_acl=False)
       except AccessDeniedException:
-        logging.info('You aren\'t authorized to read %s - skipping', url_str)
+        sys.stderr.write('You aren\'t authorized to read %s - skipping' %
+                         url_str)
       except InvalidUrlError:
         raise
       except NotFoundException:
         pass
       if not arg_matches:
-        if logging.getLogger().isEnabledFor(logging.INFO):
-          logging.info('No URLs matched %s', url_str)
+        sys.stderr.write('No URLs matched %s' % url_str)
         found_nonmatching_arg = True
     if found_nonmatching_arg:
       return 1
diff --git a/catapult/third_party/gsutil/gslib/commands/version.py b/catapult/third_party/gsutil/gslib/commands/version.py
index f07be03..90a3c4d 100644
--- a/catapult/third_party/gsutil/gslib/commands/version.py
+++ b/catapult/third_party/gsutil/gslib/commands/version.py
@@ -26,8 +26,8 @@
 import crcmod
 import gslib
 from gslib.command import Command
+from gslib.util import CheckMultiprocessingAvailableAndInit
 from gslib.util import GetConfigFilePath
-from gslib.util import MultiprocessingIsAvailable
 from gslib.util import UsingCrcmodExtension
 
 
@@ -120,7 +120,8 @@
           boto_version=boto.__version__,
           python_version=sys.version.replace('\n', ''),
           os_version='%s %s' % (platform.system(), platform.release()),
-          multiprocessing_available=MultiprocessingIsAvailable()[0],
+          multiprocessing_available=(
+              CheckMultiprocessingAvailableAndInit().is_available),
           cloud_sdk=(os.environ.get('CLOUDSDK_WRAPPER') == '1'),
           config_path=config_path,
           gsutil_path=gslib.GSUTIL_PATH,
diff --git a/catapult/third_party/gsutil/gslib/commands/web.py b/catapult/third_party/gsutil/gslib/commands/web.py
index 971cb8b..aa7708a 100644
--- a/catapult/third_party/gsutil/gslib/commands/web.py
+++ b/catapult/third_party/gsutil/gslib/commands/web.py
@@ -16,7 +16,6 @@
 
 from __future__ import absolute_import
 
-import getopt
 import sys
 
 from apitools.base.py import encoding
@@ -90,16 +89,16 @@
 
   3. Configure the bucket to have website behavior using the command:
 
-       gsutil web set -m index.html -e 404.html gs://example.com
+       gsutil web set -m index.html -e 404.html gs://www.example.com
 
   4. Add a DNS CNAME record for example.com pointing to c.storage.googleapis.com
      (ask your DNS administrator for help with this).
 
-  Now if you open a browser and navigate to http://example.com, it will display
-  the main page instead of the default bucket listing. Note: It can take time
-  for DNS updates to propagate because of caching used by the DNS, so it may
-  take up to a day for the domain-named bucket website to work after you create
-  the CNAME DNS record.
+  Now if you open a browser and navigate to http://www.example.com, it will
+  display the main page instead of the default bucket listing. Note: It can
+  take time for DNS updates to propagate because of caching used by the DNS,
+  so it may take up to a day for the domain-named bucket website to work after
+  you create the CNAME DNS record.
 
   Additional notes:
 
@@ -109,14 +108,15 @@
 
   2. The main_page_suffix applies to each subdirectory of the bucket. For
      example, with the main_page_suffix configured to be index.html, a GET
-     request for http://example.com would retrieve
-     http://example.com/index.html, and a GET request for
-     http://example.com/photos would retrieve
-     http://example.com/photos/index.html.
+     request for http://www.example.com would retrieve
+     http://www.example.com/index.html, and a GET request for
+     http://www.example.com/photos would retrieve
+     http://www.example.com/photos/index.html.
 
   3. There is just one 404.html page: For example, a GET request for
-     http://example.com/photos/missing would retrieve
-     http://example.com/404.html, not http://example.com/photos/404.html.
+     http://www.example.com/photos/missing would retrieve
+     http://www.example.com/404.html, not
+     http://www.example.com/photos/404.html.
 
   4. For additional details see
      https://developers.google.com/storage/docs/website-configuration.
diff --git a/catapult/third_party/gsutil/gslib/copy_helper.py b/catapult/third_party/gsutil/gslib/copy_helper.py
index 456b15c..aea4195 100644
--- a/catapult/third_party/gsutil/gslib/copy_helper.py
+++ b/catapult/third_party/gsutil/gslib/copy_helper.py
@@ -27,7 +27,7 @@
 import json
 import logging
 import mimetypes
-import multiprocessing
+from operator import attrgetter
 import os
 import pickle
 import random
@@ -53,10 +53,13 @@
 from gslib.cloud_api import ResumableUploadAbortException
 from gslib.cloud_api import ResumableUploadException
 from gslib.cloud_api import ResumableUploadStartOverException
-from gslib.cloud_api_helper import GetDownloadSerializationDict
+from gslib.cloud_api_helper import GetDownloadSerializationData
 from gslib.commands.compose import MAX_COMPOSE_ARITY
 from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE
 from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD
+from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE
+from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS
+from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
 from gslib.cs_api_map import ApiSelector
 from gslib.daisy_chain_wrapper import DaisyChainWrapper
 from gslib.exception import CommandException
@@ -65,11 +68,13 @@
 from gslib.hashing_helper import Base64EncodeHash
 from gslib.hashing_helper import CalculateB64EncodedMd5FromContents
 from gslib.hashing_helper import CalculateHashesFromContents
+from gslib.hashing_helper import CHECK_HASH_IF_FAST_ELSE_FAIL
+from gslib.hashing_helper import CHECK_HASH_NEVER
+from gslib.hashing_helper import ConcatCrc32c
 from gslib.hashing_helper import GetDownloadHashAlgs
 from gslib.hashing_helper import GetUploadHashAlgs
 from gslib.hashing_helper import HashingFileUploadWrapper
-from gslib.parallelism_framework_util import ThreadAndProcessSafeDict
-from gslib.parallelism_framework_util import ThreadSafeDict
+from gslib.parallelism_framework_util import AtomicDict
 from gslib.progress_callback import ConstructAnnounceText
 from gslib.progress_callback import FileProgressCallbackHandler
 from gslib.progress_callback import ProgressCallbackWithBackoff
@@ -77,11 +82,13 @@
 from gslib.storage_url import ContainsWildcard
 from gslib.storage_url import StorageUrlFromString
 from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
+from gslib.tracker_file import DeleteDownloadTrackerFiles
 from gslib.tracker_file import DeleteTrackerFile
 from gslib.tracker_file import GetTrackerFilePath
 from gslib.tracker_file import RaiseUnwritableTrackerFileException
 from gslib.tracker_file import ReadOrCreateDownloadTrackerFile
 from gslib.tracker_file import TrackerFileType
+from gslib.tracker_file import WriteDownloadComponentTrackerFile
 from gslib.translation_helper import AddS3MarkerAclToObjectMetadata
 from gslib.translation_helper import CopyObjectMetadata
 from gslib.translation_helper import DEFAULT_CONTENT_TYPE
@@ -89,8 +96,11 @@
 from gslib.translation_helper import ObjectMetadataFromHeaders
 from gslib.translation_helper import PreconditionsFromHeaders
 from gslib.translation_helper import S3MarkerAclFromObjectMetadata
+from gslib.util import CheckFreeSpace
+from gslib.util import CheckMultiprocessingAvailableAndInit
 from gslib.util import CreateLock
 from gslib.util import DEFAULT_FILE_BUFFER_SIZE
+from gslib.util import DivideAndCeil
 from gslib.util import GetCloudApiInstance
 from gslib.util import GetFileSize
 from gslib.util import GetJsonResumableChunkSize
@@ -102,39 +112,34 @@
 from gslib.util import IsCloudSubdirPlaceholder
 from gslib.util import MakeHumanReadable
 from gslib.util import MIN_SIZE_COMPUTE_LOGGING
-from gslib.util import MultiprocessingIsAvailable
 from gslib.util import ResumableThreshold
 from gslib.util import TEN_MIB
+from gslib.util import UsingCrcmodExtension
 from gslib.util import UTF8
 from gslib.wildcard_iterator import CreateWildcardIterator
 
 # pylint: disable=g-import-not-at-top
 if IS_WINDOWS:
   import msvcrt
-  from ctypes import c_int
-  from ctypes import c_uint64
-  from ctypes import c_char_p
-  from ctypes import c_wchar_p
-  from ctypes import windll
-  from ctypes import POINTER
-  from ctypes import WINFUNCTYPE
-  from ctypes import WinError
 
 # Declare copy_helper_opts as a global because namedtuple isn't aware of
 # assigning to a class member (which breaks pickling done by multiprocessing).
 # For details see
 # http://stackoverflow.com/questions/16377215/how-to-pickle-a-namedtuple-instance-correctly
-# Similarly can't pickle logger.
 # pylint: disable=global-at-module-level
-global global_copy_helper_opts, global_logger
+global global_copy_helper_opts
 
 # In-memory map of local files that are currently opened for write. Used to
 # ensure that if we write to the same file twice (say, for example, because the
 # user specified two identical source URLs), the writes occur serially.
-global open_files_map
+global open_files_map, open_files_lock
 open_files_map = (
-    ThreadSafeDict() if (IS_WINDOWS or not MultiprocessingIsAvailable()[0])
-    else ThreadAndProcessSafeDict(multiprocessing.Manager()))
+    AtomicDict() if not CheckMultiprocessingAvailableAndInit().is_available
+    else AtomicDict(manager=gslib.util.manager))
+
+# We don't allow multiple processes on Windows, so using a process-safe lock
+# would be unnecessary.
+open_files_lock = CreateLock()
 
 # For debugging purposes; if True, files and objects that fail hash validation
 # will be saved with the below suffix appended.
@@ -178,6 +183,15 @@
     'filename file_start file_length src_url dst_url canned_acl '
     'content_type tracker_file tracker_file_lock')
 
+PerformSlicedDownloadObjectToFileArgs = namedtuple(
+    'PerformSlicedDownloadObjectToFileArgs',
+    'component_num src_url src_obj_metadata dst_url download_file_name '
+    'start_byte end_byte')
+
+PerformSlicedDownloadReturnValues = namedtuple(
+    'PerformSlicedDownloadReturnValues',
+    'component_num crc32c bytes_transferred server_encoding')
+
 ObjectFromTracker = namedtuple('ObjectFromTracker',
                                'object_name generation')
 
@@ -188,13 +202,23 @@
 # Chunk size to use while zipping/unzipping gzip files.
 GZIP_CHUNK_SIZE = 8192
 
+# Number of bytes to wait before updating a sliced download component tracker
+# file.
+TRACKERFILE_UPDATE_THRESHOLD = TEN_MIB
+
 PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD = 150 * 1024 * 1024
 
 # S3 requires special Multipart upload logic (that we currently don't implement)
 # for files > 5GiB in size.
 S3_MAX_UPLOAD_SIZE = 5 * 1024 * 1024 * 1024
 
-suggested_parallel_composites = False
+# TODO: Create a multiprocessing framework value allocator, then use it instead
+# of a dict.
+global suggested_sliced_transfers, suggested_sliced_transfers_lock
+suggested_sliced_transfers = (
+    AtomicDict() if not CheckMultiprocessingAvailableAndInit().is_available
+    else AtomicDict(manager=gslib.util.manager))
+suggested_sliced_transfers_lock = CreateLock()
 
 
 class FileConcurrencySkipError(Exception):
@@ -206,7 +230,7 @@
   cls.logger.error(str(e))
 
 
-def _ParallelUploadCopyExceptionHandler(cls, e):
+def _ParallelCopyExceptionHandler(cls, e):
   """Simple exception handler to allow post-completion status."""
   cls.logger.error(str(e))
   cls.op_failure_count += 1
@@ -248,7 +272,7 @@
       ret = _UploadFileToObject(args.src_url, fp, args.file_length,
                                 args.dst_url, dst_object_metadata,
                                 preconditions, gsutil_api, cls.logger, cls,
-                                _ParallelUploadCopyExceptionHandler,
+                                _ParallelCopyExceptionHandler,
                                 gzip_exts=None, allow_splitting=False)
     finally:
       if global_copy_helper_opts.canned_acl:
@@ -641,23 +665,23 @@
   return digests
 
 
-def _CreateDigestsFromLocalFile(logger, algs, file_name, src_obj_metadata):
+def _CreateDigestsFromLocalFile(logger, algs, file_name, final_file_name,
+                                src_obj_metadata):
   """Creates a base64 CRC32C and/or MD5 digest from file_name.
 
   Args:
-    logger: for outputting log messages.
-    algs: list of algorithms to compute.
-    file_name: file to digest.
-    src_obj_metadata: metadta of source object.
+    logger: For outputting log messages.
+    algs: List of algorithms to compute.
+    file_name: File to digest.
+    final_file_name: Permanent location to be used for the downloaded file
+                     after validation (used for logging).
+    src_obj_metadata: Metadata of source object.
 
   Returns:
     Dict of algorithm name : base 64 encoded digest
   """
   hash_dict = {}
   if 'md5' in algs:
-    if src_obj_metadata.size and src_obj_metadata.size > TEN_MIB:
-      logger.info(
-          'Computing MD5 for %s...', file_name)
     hash_dict['md5'] = md5()
   if 'crc32c' in algs:
     hash_dict['crc32c'] = crcmod.predefined.Crc('crc-32c')
@@ -666,7 +690,8 @@
         fp, hash_dict, ProgressCallbackWithBackoff(
             src_obj_metadata.size,
             FileProgressCallbackHandler(
-                ConstructAnnounceText('Hashing', file_name), logger).call))
+                ConstructAnnounceText('Hashing', final_file_name),
+                logger).call))
   digests = {}
   for alg_name, digest in hash_dict.iteritems():
     digests[alg_name] = Base64EncodeHash(digest.hexdigest())
@@ -730,7 +755,8 @@
     logger: for outputting log messages.
     obj_url: CloudUrl for cloud object.
     obj_metadata: Cloud Object being downloaded from or uploaded to.
-    file_name: Local file name on disk being downloaded to or uploaded from.
+    file_name: Local file name on disk being downloaded to or uploaded from
+               (used only for logging).
     digests: Computed Digests for the object.
     is_upload: If true, comparing for an uploaded object (controls logging).
 
@@ -985,9 +1011,10 @@
       # those that were uploaded by a previous, failed run and have since
       # changed (but still have an old generation lying around).
       objects_to_delete = components + existing_objects_to_delete
-      command_obj.Apply(_DeleteObjectFn, objects_to_delete, _RmExceptionHandler,
-                        arg_checker=gslib.command.DummyArgChecker,
-                        parallel_operations_override=True)
+      command_obj.Apply(
+          _DeleteTempComponentObjectFn, objects_to_delete, _RmExceptionHandler,
+          arg_checker=gslib.command.DummyArgChecker,
+          parallel_operations_override=True)
     except Exception:  # pylint: disable=broad-except
       # If some of the delete calls fail, don't cause the whole command to
       # fail. The copy was successful iff the compose call succeeded, so
@@ -1025,7 +1052,7 @@
   Returns:
     True iff a parallel upload should be performed on the source file.
   """
-  global suggested_parallel_composites
+  global suggested_slice_transfers, suggested_sliced_transfers_lock
   parallel_composite_upload_threshold = HumanReadableToBytes(config.get(
       'GSUtil', 'parallel_composite_upload_threshold',
       DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD))
@@ -1041,17 +1068,18 @@
   # TODO: Once compiled crcmod is being distributed by major Linux distributions
   # remove this check.
   if (all_factors_but_size and parallel_composite_upload_threshold == 0
-      and file_size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD
-      and not suggested_parallel_composites):
-    logger.info('\n'.join(textwrap.wrap(
-        '==> NOTE: You are uploading one or more large file(s), which would '
-        'run significantly faster if you enable parallel composite uploads. '
-        'This feature can be enabled by editing the '
-        '"parallel_composite_upload_threshold" value in your .boto '
-        'configuration file. However, note that if you do this you and any '
-        'users that download such composite files will need to have a compiled '
-        'crcmod installed (see "gsutil help crcmod").')) + '\n')
-    suggested_parallel_composites = True
+      and file_size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD):
+    with suggested_sliced_transfers_lock:
+      if not suggested_sliced_transfers.get('suggested'):
+        logger.info('\n'.join(textwrap.wrap(
+            '==> NOTE: You are uploading one or more large file(s), which '
+            'would run significantly faster if you enable parallel composite '
+            'uploads. This feature can be enabled by editing the '
+            '"parallel_composite_upload_threshold" value in your .boto '
+            'configuration file. However, note that if you do this you and any '
+            'users that download such composite files will need to have a '
+            'compiled crcmod installed (see "gsutil help crcmod").')) + '\n')
+        suggested_sliced_transfers['suggested'] = True
 
   return (all_factors_but_size
           and parallel_composite_upload_threshold > 0
@@ -1104,34 +1132,45 @@
   if storage_url.IsBucket():
     return (storage_url, True)
 
-  # For object/prefix URLs check 3 cases: (a) if the name ends with '/' treat
-  # as a subdir; otherwise, use the wildcard iterator with url to
-  # find if (b) there's a Prefix matching url, or (c) name is of form
-  # dir_$folder$ (and in both these cases also treat dir as a subdir).
-  # Cloud subdirs are always considered to be an existing container.
+  # For object/prefix URLs, there are four cases that indicate the destination
+  # is a cloud subdirectory; these are always considered to be an existing
+  # container. Checking each case allows gsutil to provide Unix-like
+  # destination folder semantics, but requires up to three HTTP calls, noted
+  # below.
+
+  # Case 1: If a placeholder object ending with '/' exists.
   if IsCloudSubdirPlaceholder(storage_url):
     return (storage_url, True)
 
-  # Check for the special case where we have a folder marker object.
-  folder_expansion = CreateWildcardIterator(
-      storage_url.versionless_url_string + '_$folder$', gsutil_api,
-      debug=debug, project_id=project_id).IterAll(
-          bucket_listing_fields=['name'])
-  for blr in folder_expansion:
-    return (storage_url, True)
-
-  blr_expansion = CreateWildcardIterator(url_str, gsutil_api,
-                                         debug=debug,
-                                         project_id=project_id).IterAll(
-                                             bucket_listing_fields=['name'])
+  # HTTP call to make an eventually consistent check for a matching prefix,
+  # _$folder$, or empty listing.
   expansion_empty = True
-  for blr in blr_expansion:
+  list_iterator = gsutil_api.ListObjects(
+      storage_url.bucket_name, prefix=storage_url.object_name, delimiter='/',
+      provider=storage_url.scheme, fields=['prefixes', 'items/name'])
+  for obj_or_prefix in list_iterator:
+    # To conserve HTTP calls for the common case, we make a single listing
+    # that covers prefixes and object names. Listing object names covers the
+    # _$folder$ case and the nonexistent-object-as-subdir case. However, if
+    # there are many existing objects for which the target URL is an exact
+    # prefix, this listing could be paginated and span multiple HTTP calls.
+    # If this case becomes common, we could heurestically abort the
+    # listing operation after the first page of results and just query for the
+    # _$folder$ object directly using GetObjectMetadata.
     expansion_empty = False
-    if blr.IsPrefix():
+
+    if obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.PREFIX:
+      # Case 2: If there is a matching prefix when listing the destination URL.
+      return (storage_url, True)
+    elif (obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.OBJECT and
+          obj_or_prefix.data.name == storage_url.object_name + '_$folder$'):
+      # Case 3: If a placeholder object matching destination + _$folder$
+      # exists.
       return (storage_url, True)
 
-  return (storage_url,
-          expansion_empty and treat_nonexistent_object_as_subdir)
+  # Case 4: If no objects/prefixes matched, and nonexistent objects should be
+  # treated as subdirectories.
+  return (storage_url, expansion_empty and treat_nonexistent_object_as_subdir)
 
 
 def FixWindowsNaming(src_url, dst_url):
@@ -1242,46 +1281,6 @@
           dst_obj.md5Hash)
 
 
-def _CheckFreeSpace(path):
-  """Return path/drive free space (in bytes)."""
-  if IS_WINDOWS:
-    # pylint: disable=g-import-not-at-top
-    try:
-      # pylint: disable=invalid-name
-      get_disk_free_space_ex = WINFUNCTYPE(c_int, c_wchar_p,
-                                           POINTER(c_uint64),
-                                           POINTER(c_uint64),
-                                           POINTER(c_uint64))
-      get_disk_free_space_ex = get_disk_free_space_ex(
-          ('GetDiskFreeSpaceExW', windll.kernel32), (
-              (1, 'lpszPathName'),
-              (2, 'lpFreeUserSpace'),
-              (2, 'lpTotalSpace'),
-              (2, 'lpFreeSpace'),))
-    except AttributeError:
-      get_disk_free_space_ex = WINFUNCTYPE(c_int, c_char_p,
-                                           POINTER(c_uint64),
-                                           POINTER(c_uint64),
-                                           POINTER(c_uint64))
-      get_disk_free_space_ex = get_disk_free_space_ex(
-          ('GetDiskFreeSpaceExA', windll.kernel32), (
-              (1, 'lpszPathName'),
-              (2, 'lpFreeUserSpace'),
-              (2, 'lpTotalSpace'),
-              (2, 'lpFreeSpace'),))
-
-    def GetDiskFreeSpaceExErrCheck(result, unused_func, args):
-      if not result:
-        raise WinError()
-      return args[1].value
-    get_disk_free_space_ex.errcheck = GetDiskFreeSpaceExErrCheck
-
-    return get_disk_free_space_ex(os.getenv('SystemDrive'))
-  else:
-    (_, f_frsize, _, _, f_bavail, _, _, _, _, _) = os.statvfs(path)
-    return f_frsize * f_bavail
-
-
 def _SetContentTypeFromFile(src_url, dst_obj_metadata):
   """Detects and sets Content-Type if src_url names a local file.
 
@@ -1509,7 +1508,7 @@
     # Check for temp space. Assume the compressed object is at most 2x
     # the size of the object (normally should compress to smaller than
     # the object)
-    if _CheckFreeSpace(gzip_path) < 2*int(src_obj_size):
+    if CheckFreeSpace(gzip_path) < 2*int(src_obj_size):
       raise CommandException('Inadequate temp space available to compress '
                              '%s. See the CHANGING TEMP DIRECTORIES section '
                              'of "gsutil help cp" for more info.' % src_url)
@@ -1657,30 +1656,26 @@
           uploaded_object.md5Hash)
 
 
-# TODO: Refactor this long function into smaller pieces.
-# pylint: disable=too-many-statements
-def _DownloadObjectToFile(src_url, src_obj_metadata, dst_url,
-                          gsutil_api, logger, test_method=None):
-  """Downloads an object to a local file.
+def _GetDownloadFile(dst_url, src_obj_metadata, logger):
+  """Creates a new download file, and deletes the file that will be replaced.
+
+  Names and creates a temporary file for this download. Also, if there is an
+  existing file at the path where this file will be placed after the download
+  is completed, that file will be deleted.
 
   Args:
-    src_url: Source CloudUrl.
-    src_obj_metadata: Metadata from the source object.
     dst_url: Destination FileUrl.
-    gsutil_api: gsutil Cloud API instance to use for the download.
+    src_obj_metadata: Metadata from the source object.
     logger: for outputting log messages.
-    test_method: Optional test method for modifying the file before validation
-                 during unit tests.
-  Returns:
-    (elapsed_time, bytes_transferred, dst_url, md5), excluding overhead like
-    initial GET.
 
-  Raises:
-    CommandException: if errors encountered.
+  Returns:
+    (download_file_name, need_to_unzip)
+    download_file_name: The name of the temporary file to which the object will
+                        be downloaded.
+    need_to_unzip: If true, a temporary zip file was used and must be
+                   uncompressed as part of validation.
   """
-  global open_files_map
-  file_name = dst_url.object_name
-  dir_name = os.path.dirname(file_name)
+  dir_name = os.path.dirname(dst_url.object_name)
   if dir_name and not os.path.exists(dir_name):
     # Do dir creation in try block so can ignore case where dir already
     # exists. This is needed to avoid a race condition when running gsutil
@@ -1690,120 +1685,489 @@
     except OSError, e:
       if e.errno != errno.EEXIST:
         raise
-  api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
+
+  need_to_unzip = False
   # For gzipped objects download to a temp file and unzip. For the XML API,
-  # the represents the result of a HEAD request. For the JSON API, this is
+  # this represents the result of a HEAD request. For the JSON API, this is
   # the stored encoding which the service may not respect. However, if the
   # server sends decompressed bytes for a file that is stored compressed
   # (double compressed case), there is no way we can validate the hash and
   # we will fail our hash check for the object.
   if (src_obj_metadata.contentEncoding and
       src_obj_metadata.contentEncoding.lower().endswith('gzip')):
-    # We can't use tempfile.mkstemp() here because we need a predictable
-    # filename for resumable downloads.
-    download_file_name = _GetDownloadZipFileName(file_name)
+    need_to_unzip = True
+    download_file_name = _GetDownloadTempZipFileName(dst_url)
     logger.info(
         'Downloading to temp gzip filename %s', download_file_name)
-    need_to_unzip = True
   else:
-    download_file_name = file_name
-    need_to_unzip = False
+    download_file_name = _GetDownloadTempFileName(dst_url)
 
-  if download_file_name.endswith(dst_url.delim):
-    logger.warn('\n'.join(textwrap.wrap(
-        'Skipping attempt to download to filename ending with slash (%s). This '
-        'typically happens when using gsutil to download from a subdirectory '
-        'created by the Cloud Console (https://cloud.google.com/console)'
-        % download_file_name)))
-    return (0, 0, dst_url, '')
+  # If a file exists at the permanent destination (where the file will be moved
+  # after the download is completed), delete it here to reduce disk space
+  # requirements.
+  if os.path.exists(dst_url.object_name):
+    os.unlink(dst_url.object_name)
 
-  # Set up hash digesters.
+  # Downloads open the temporary download file in r+b mode, which requires it
+  # to already exist, so we create it here if it doesn't exist already.
+  fp = open(download_file_name, 'ab')
+  fp.close()
+  return download_file_name, need_to_unzip
+
+
+def _ShouldDoSlicedDownload(download_strategy, src_obj_metadata,
+                            allow_splitting, logger):
+  """Determines whether the sliced download strategy should be used.
+
+  Args:
+    download_strategy: CloudApi download strategy.
+    src_obj_metadata: Metadata from the source object.
+    allow_splitting: If false, then this function returns false.
+    logger: logging.Logger for log message output.
+
+  Returns:
+    True iff a sliced download should be performed on the source file.
+  """
+  sliced_object_download_threshold = HumanReadableToBytes(config.get(
+      'GSUtil', 'sliced_object_download_threshold',
+      DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
+
+  max_components = config.getint(
+      'GSUtil', 'sliced_object_download_max_components',
+      DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
+
+  # Don't use sliced download if it will prevent us from performing an
+  # integrity check.
+  check_hashes_config = config.get(
+      'GSUtil', 'check_hashes', CHECK_HASH_IF_FAST_ELSE_FAIL)
+  parallel_hashing = src_obj_metadata.crc32c and UsingCrcmodExtension(crcmod)
+  hashing_okay = parallel_hashing or check_hashes_config == CHECK_HASH_NEVER
+
+  use_slice = (
+      allow_splitting
+      and download_strategy is not CloudApi.DownloadStrategy.ONE_SHOT
+      and max_components > 1
+      and hashing_okay
+      and sliced_object_download_threshold > 0
+      and src_obj_metadata.size >= sliced_object_download_threshold)
+
+  if (not use_slice
+      and src_obj_metadata.size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD
+      and not UsingCrcmodExtension(crcmod)
+      and check_hashes_config != CHECK_HASH_NEVER):
+    with suggested_sliced_transfers_lock:
+      if not suggested_sliced_transfers.get('suggested'):
+        logger.info('\n'.join(textwrap.wrap(
+            '==> NOTE: You are downloading one or more large file(s), which '
+            'would run significantly faster if you enabled sliced object '
+            'uploads. This feature is enabled by default but requires that '
+            'compiled crcmod be installed (see "gsutil help crcmod").')) + '\n')
+        suggested_sliced_transfers['suggested'] = True
+
+  return use_slice
+
+
+def _PerformSlicedDownloadObjectToFile(cls, args, thread_state=None):
+  """Function argument to Apply for performing sliced downloads.
+
+  Args:
+    cls: Calling Command class.
+    args: PerformSlicedDownloadObjectToFileArgs tuple describing the target.
+    thread_state: gsutil Cloud API instance to use for the operation.
+
+  Returns:
+    PerformSlicedDownloadReturnValues named-tuple filled with:
+    component_num: The component number for this download.
+    crc32c: CRC32C hash value (integer) of the downloaded bytes.
+    bytes_transferred: The number of bytes transferred, potentially less
+                       than the component size if the download was resumed.
+  """
+  gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
   hash_algs = GetDownloadHashAlgs(
-      logger, src_has_md5=src_obj_metadata.md5Hash,
-      src_has_crc32c=src_obj_metadata.crc32c)
+      cls.logger, consider_crc32c=args.src_obj_metadata.crc32c)
   digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
 
-  fp = None
-  # Tracks whether the server used a gzip encoding.
-  server_encoding = None
-  download_complete = False
-  download_strategy = _SelectDownloadStrategy(dst_url)
-  download_start_point = 0
-  # This is used for resuming downloads, but also for passing the mediaLink
-  # and size into the download for new downloads so that we can avoid
-  # making an extra HTTP call.
-  serialization_data = None
-  serialization_dict = GetDownloadSerializationDict(src_obj_metadata)
-  open_files = []
+  (bytes_transferred, server_encoding) = (
+      _DownloadObjectToFileResumable(args.src_url, args.src_obj_metadata,
+                                     args.dst_url, args.download_file_name,
+                                     gsutil_api, cls.logger, digesters,
+                                     component_num=args.component_num,
+                                     start_byte=args.start_byte,
+                                     end_byte=args.end_byte))
+
+  crc32c_val = None
+  if 'crc32c' in digesters:
+    crc32c_val = digesters['crc32c'].crcValue
+  return PerformSlicedDownloadReturnValues(
+      args.component_num, crc32c_val, bytes_transferred, server_encoding)
+
+
+def _MaintainSlicedDownloadTrackerFiles(src_obj_metadata, dst_url,
+                                        download_file_name, logger,
+                                        api_selector, num_components):
+  """Maintains sliced download tracker files in order to permit resumability.
+
+  Reads or creates a sliced download tracker file representing this object
+  download. Upon an attempt at cross-process resumption, the contents of the
+  sliced download tracker file are verified to make sure a resumption is
+  possible and appropriate. In the case that a resumption should not be
+  attempted, existing component tracker files are deleted (to prevent child
+  processes from attempting resumption), and a new sliced download tracker
+  file is created.
+
+  Args:
+    src_obj_metadata: Metadata from the source object. Must include etag and
+                      generation.
+    dst_url: Destination FileUrl.
+    download_file_name: Temporary file name to be used for the download.
+    logger: for outputting log messages.
+    api_selector: The Cloud API implementation used.
+    num_components: The number of components to perform this download with.
+  """
+  assert src_obj_metadata.etag
+  tracker_file = None
+
+  # Only can happen if the resumable threshold is set higher than the
+  # parallel transfer threshold.
+  if src_obj_metadata.size < ResumableThreshold():
+    return
+
+  tracker_file_name = GetTrackerFilePath(dst_url,
+                                         TrackerFileType.SLICED_DOWNLOAD,
+                                         api_selector)
+
+  # Check to see if we should attempt resuming the download.
   try:
-    if download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
-      fp = open(download_file_name, 'wb')
-    elif download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
-      # If this is a resumable download, we need to open the file for append and
-      # manage a tracker file.
-      if open_files_map.get(download_file_name, False):
-        # Ensure another process/thread is not already writing to this file.
-        raise FileConcurrencySkipError
-      open_files.append(download_file_name)
-      open_files_map[download_file_name] = True
-      fp = open(download_file_name, 'ab')
-
-      resuming = ReadOrCreateDownloadTrackerFile(
-          src_obj_metadata, dst_url, api_selector)
-      if resuming:
-        # Find out how far along we are so we can request the appropriate
-        # remaining range of the object.
-        existing_file_size = GetFileSize(fp, position_to_eof=True)
-        if existing_file_size > src_obj_metadata.size:
-          DeleteTrackerFile(GetTrackerFilePath(
-              dst_url, TrackerFileType.DOWNLOAD, api_selector))
-          raise CommandException(
-              '%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
-              'if you re-try this download it will start from scratch' %
-              (download_file_name, existing_file_size, src_url.object_name,
-               src_obj_metadata.size))
-        else:
-          if existing_file_size == src_obj_metadata.size:
-            logger.info(
-                'Download already complete for file %s, skipping download but '
-                'will run integrity checks.', download_file_name)
-            download_complete = True
-          else:
-            download_start_point = existing_file_size
-            serialization_dict['progress'] = download_start_point
-            logger.info('Resuming download for %s', src_url.url_string)
-          # Catch up our digester with the hash data.
-          if existing_file_size > TEN_MIB:
-            for alg_name in digesters:
-              logger.info(
-                  'Catching up %s for %s', alg_name, download_file_name)
-          with open(download_file_name, 'rb') as hash_fp:
-            while True:
-              data = hash_fp.read(DEFAULT_FILE_BUFFER_SIZE)
-              if not data:
-                break
-              for alg_name in digesters:
-                digesters[alg_name].update(data)
+    fp = open(download_file_name, 'rb')
+    existing_file_size = GetFileSize(fp)
+    # A parallel resumption should be attempted only if the destination file
+    # size is exactly the same as the source size and the tracker file matches.
+    if existing_file_size == src_obj_metadata.size:
+      tracker_file = open(tracker_file_name, 'r')
+      tracker_file_data = json.load(tracker_file)
+      if (tracker_file_data['etag'] == src_obj_metadata.etag and
+          tracker_file_data['generation'] == src_obj_metadata.generation and
+          tracker_file_data['num_components'] == num_components):
+        return
       else:
-        # Starting a new download, blow away whatever is already there.
-        fp.truncate(0)
-        fp.seek(0)
+        tracker_file.close()
+        logger.warn('Sliced download tracker file doesn\'t match for '
+                    'download of %s. Restarting download from scratch.' %
+                    dst_url.object_name)
 
+  except (IOError, ValueError) as e:
+    # Ignore non-existent file (happens first time a download
+    # is attempted on an object), but warn user for other errors.
+    if isinstance(e, ValueError) or e.errno != errno.ENOENT:
+      logger.warn('Couldn\'t read sliced download tracker file (%s): %s. '
+                  'Restarting download from scratch.' %
+                  (tracker_file_name, str(e)))
+  finally:
+    if fp:
+      fp.close()
+    if tracker_file:
+      tracker_file.close()
+
+  # Delete component tracker files to guarantee download starts from scratch.
+  DeleteDownloadTrackerFiles(dst_url, api_selector)
+
+  # Create a new sliced download tracker file to represent this download.
+  try:
+    with open(tracker_file_name, 'w') as tracker_file:
+      tracker_file_data = {'etag': src_obj_metadata.etag,
+                           'generation': src_obj_metadata.generation,
+                           'num_components': num_components}
+      tracker_file.write(json.dumps(tracker_file_data))
+  except IOError as e:
+    RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
+
+
+class SlicedDownloadFileWrapper(object):
+  """Wraps a file object to be used in GetObjectMedia for sliced downloads.
+
+  In order to allow resumability, the file object used by each thread in a
+  sliced object download should be wrapped using SlicedDownloadFileWrapper.
+  Passing a SlicedDownloadFileWrapper object to GetObjectMedia will allow the
+  download component tracker file for this component to be updated periodically,
+  while the downloaded bytes are normally written to file.
+  """
+
+  def __init__(self, fp, tracker_file_name, src_obj_metadata, start_byte,
+               end_byte):
+    """Initializes the SlicedDownloadFileWrapper.
+
+    Args:
+      fp: The already-open file object to be used for writing in
+          GetObjectMedia. Data will be written to file starting at the current
+          seek position.
+      tracker_file_name: The name of the tracker file for this component.
+      src_obj_metadata: Metadata from the source object. Must include etag and
+                        generation.
+      start_byte: The first byte to be downloaded for this parallel component.
+      end_byte: The last byte to be downloaded for this parallel component.
+    """
+    self._orig_fp = fp
+    self._tracker_file_name = tracker_file_name
+    self._src_obj_metadata = src_obj_metadata
+    self._last_tracker_file_byte = None
+    self._start_byte = start_byte
+    self._end_byte = end_byte
+
+  def write(self, data):  # pylint: disable=invalid-name
+    current_file_pos = self._orig_fp.tell()
+    assert (self._start_byte <= current_file_pos and
+            current_file_pos + len(data) <= self._end_byte + 1)
+
+    self._orig_fp.write(data)
+    current_file_pos = self._orig_fp.tell()
+
+    threshold = TRACKERFILE_UPDATE_THRESHOLD
+    if (self._last_tracker_file_byte is None or
+        current_file_pos - self._last_tracker_file_byte > threshold or
+        current_file_pos == self._end_byte + 1):
+      WriteDownloadComponentTrackerFile(
+          self._tracker_file_name, self._src_obj_metadata, current_file_pos)
+      self._last_tracker_file_byte = current_file_pos
+
+  def seek(self, offset, whence=os.SEEK_SET):  # pylint: disable=invalid-name
+    if whence == os.SEEK_END:
+      self._orig_fp.seek(offset + self._end_byte + 1)
     else:
-      raise CommandException('Invalid download strategy %s chosen for'
-                             'file %s' % (download_strategy, fp.name))
+      self._orig_fp.seek(offset, whence)
+    assert self._start_byte <= self._orig_fp.tell() <= self._end_byte + 1
 
-    if not dst_url.IsStream():
-      serialization_data = json.dumps(serialization_dict)
+  def tell(self):  # pylint: disable=invalid-name
+    return self._orig_fp.tell()
+
+  def flush(self):  # pylint: disable=invalid-name
+    self._orig_fp.flush()
+
+  def close(self):  # pylint: disable=invalid-name
+    if self._orig_fp:
+      self._orig_fp.close()
+
+
+def _PartitionObject(src_url, src_obj_metadata, dst_url,
+                     download_file_name):
+  """Partitions an object into components to be downloaded.
+
+  Each component is a byte range of the object. The byte ranges
+  of the returned components are mutually exclusive and collectively
+  exhaustive. The byte ranges are inclusive at both end points.
+
+  Args:
+    src_url: Source CloudUrl.
+    src_obj_metadata: Metadata from the source object.
+    dst_url: Destination FileUrl.
+    download_file_name: Temporary file name to be used for the download.
+
+  Returns:
+    components_to_download: A list of PerformSlicedDownloadObjectToFileArgs
+                            to be used in Apply for the sliced download.
+  """
+  sliced_download_component_size = HumanReadableToBytes(
+      config.get('GSUtil', 'sliced_object_download_component_size',
+                 DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE))
+
+  max_components = config.getint(
+      'GSUtil', 'sliced_object_download_max_components',
+      DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
+
+  num_components, component_size = _GetPartitionInfo(
+      src_obj_metadata.size, max_components, sliced_download_component_size)
+
+  components_to_download = []
+  component_lengths = []
+  for i in range(num_components):
+    start_byte = i * component_size
+    end_byte = min((i + 1) * (component_size) - 1, src_obj_metadata.size - 1)
+    component_lengths.append(end_byte - start_byte + 1)
+    components_to_download.append(
+        PerformSlicedDownloadObjectToFileArgs(
+            i, src_url, src_obj_metadata, dst_url, download_file_name,
+            start_byte, end_byte))
+  return components_to_download, component_lengths
+
+
+def _DoSlicedDownload(src_url, src_obj_metadata, dst_url, download_file_name,
+                      command_obj, logger, copy_exception_handler,
+                      api_selector):
+  """Downloads a cloud object to a local file using sliced download.
+
+  Byte ranges are decided for each thread/process, and then the parts are
+  downloaded in parallel.
+
+  Args:
+    src_url: Source CloudUrl.
+    src_obj_metadata: Metadata from the source object.
+    dst_url: Destination FileUrl.
+    download_file_name: Temporary file name to be used for download.
+    command_obj: command object for use in Apply in parallel composite uploads.
+    logger: for outputting log messages.
+    copy_exception_handler: For handling copy exceptions during Apply.
+    api_selector: The Cloud API implementation used.
+
+  Returns:
+    (bytes_transferred, crc32c)
+    bytes_transferred: Number of bytes transferred from server this call.
+    crc32c: a crc32c hash value (integer) for the downloaded bytes, or None if
+            crc32c hashing wasn't performed.
+  """
+  components_to_download, component_lengths = _PartitionObject(
+      src_url, src_obj_metadata, dst_url, download_file_name)
+
+  num_components = len(components_to_download)
+  _MaintainSlicedDownloadTrackerFiles(src_obj_metadata, dst_url,
+                                      download_file_name, logger,
+                                      api_selector, num_components)
+
+  # Resize the download file so each child process can seek to its start byte.
+  with open(download_file_name, 'ab') as fp:
+    fp.truncate(src_obj_metadata.size)
+
+  cp_results = command_obj.Apply(
+      _PerformSlicedDownloadObjectToFile, components_to_download,
+      copy_exception_handler, arg_checker=gslib.command.DummyArgChecker,
+      parallel_operations_override=True, should_return_results=True)
+
+  if len(cp_results) < num_components:
+    raise CommandException(
+        'Some components of %s were not downloaded successfully. '
+        'Please retry this download.' % dst_url.object_name)
+
+  # Crc32c hashes have to be concatenated in the correct order.
+  cp_results = sorted(cp_results, key=attrgetter('component_num'))
+  crc32c = cp_results[0].crc32c
+  if crc32c is not None:
+    for i in range(1, num_components):
+      crc32c = ConcatCrc32c(crc32c, cp_results[i].crc32c,
+                            component_lengths[i])
+
+  bytes_transferred = 0
+  expect_gzip = (src_obj_metadata.contentEncoding and
+                 src_obj_metadata.contentEncoding.lower().endswith('gzip'))
+  for cp_result in cp_results:
+    bytes_transferred += cp_result.bytes_transferred
+    server_gzip = (cp_result.server_encoding and
+                   cp_result.server_encoding.lower().endswith('gzip'))
+    # If the server gzipped any components on the fly, we will have no chance of
+    # properly reconstructing the file.
+    if server_gzip and not expect_gzip:
+      raise CommandException(
+          'Download of %s failed because the server sent back data with an '
+          'unexpected encoding.' % dst_url.object_name)
+
+  return bytes_transferred, crc32c
+
+
+def _DownloadObjectToFileResumable(src_url, src_obj_metadata, dst_url,
+                                   download_file_name, gsutil_api, logger,
+                                   digesters, component_num=None, start_byte=0,
+                                   end_byte=None):
+  """Downloads an object to a local file using the resumable strategy.
+
+  Args:
+    src_url: Source CloudUrl.
+    src_obj_metadata: Metadata from the source object.
+    dst_url: Destination FileUrl.
+    download_file_name: Temporary file name to be used for download.
+    gsutil_api: gsutil Cloud API instance to use for the download.
+    logger: for outputting log messages.
+    digesters: Digesters corresponding to the hash algorithms that will be used
+               for validation.
+    component_num: Which component of a sliced download this call is for, or
+                   None if this is not a sliced download.
+    start_byte: The first byte of a byte range for a sliced download.
+    end_byte: The last byte of a byte range for a sliced download.
+
+  Returns:
+    (bytes_transferred, server_encoding)
+    bytes_transferred: Number of bytes transferred from server this call.
+    server_encoding: Content-encoding string if it was detected that the server
+                     sent encoded bytes during transfer, None otherwise.
+  """
+  if end_byte is None:
+    end_byte = src_obj_metadata.size - 1
+  download_size = end_byte - start_byte + 1
+
+  is_sliced = component_num is not None
+  api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
+  server_encoding = None
+
+  # Used for logging
+  download_name = dst_url.object_name
+  if is_sliced:
+    download_name += ' component %d' % component_num
+
+  try:
+    fp = open(download_file_name, 'r+b')
+    fp.seek(start_byte)
+    api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
+    existing_file_size = GetFileSize(fp)
+
+    tracker_file_name, download_start_byte = (
+        ReadOrCreateDownloadTrackerFile(src_obj_metadata, dst_url, logger,
+                                        api_selector, start_byte,
+                                        existing_file_size, component_num))
+
+    if download_start_byte < start_byte or download_start_byte > end_byte + 1:
+      DeleteTrackerFile(tracker_file_name)
+      raise CommandException(
+          'Resumable download start point for %s is not in the correct byte '
+          'range. Deleting tracker file, so if you re-try this download it '
+          'will start from scratch' % download_name)
+
+    download_complete = (download_start_byte == start_byte + download_size)
+    resuming = (download_start_byte != start_byte) and not download_complete
+    if resuming:
+      logger.info('Resuming download for %s', download_name)
+    elif download_complete:
+      logger.info(
+          'Download already complete for %s, skipping download but '
+          'will run integrity checks.', download_name)
+
+    # This is used for resuming downloads, but also for passing the mediaLink
+    # and size into the download for new downloads so that we can avoid
+    # making an extra HTTP call.
+    serialization_data = GetDownloadSerializationData(
+        src_obj_metadata, progress=download_start_byte)
+
+    if resuming or download_complete:
+      # Catch up our digester with the hash data.
+      bytes_digested = 0
+      total_bytes_to_digest = download_start_byte - start_byte
+      hash_callback = ProgressCallbackWithBackoff(
+          total_bytes_to_digest,
+          FileProgressCallbackHandler(
+              ConstructAnnounceText('Hashing',
+                                    dst_url.url_string), logger).call)
+
+      while bytes_digested < total_bytes_to_digest:
+        bytes_to_read = min(DEFAULT_FILE_BUFFER_SIZE,
+                            total_bytes_to_digest - bytes_digested)
+        data = fp.read(bytes_to_read)
+        bytes_digested += bytes_to_read
+        for alg_name in digesters:
+          digesters[alg_name].update(data)
+        hash_callback.Progress(len(data))
+
+    elif not is_sliced:
+      # Delete file contents and start entire object download from scratch.
+      fp.truncate(0)
+      existing_file_size = 0
 
     progress_callback = FileProgressCallbackHandler(
-        ConstructAnnounceText('Downloading', dst_url.url_string),
-        logger).call
+        ConstructAnnounceText('Downloading', dst_url.url_string), logger,
+        start_byte, download_size).call
+
     if global_copy_helper_opts.test_callback_file:
       with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
         progress_callback = pickle.loads(test_fp.read()).call
 
-    start_time = time.time()
+    if is_sliced and src_obj_metadata.size >= ResumableThreshold():
+      fp = SlicedDownloadFileWrapper(fp, tracker_file_name, src_obj_metadata,
+                                     start_byte, end_byte)
+
     # TODO: With gzip encoding (which may occur on-the-fly and not be part of
     # the object's metadata), when we request a range to resume, it's possible
     # that the server will just resend the entire object, which means our
@@ -1811,58 +2175,186 @@
     # the local file in the case of a failed gzip hash anyway, but it would
     # be better if we actively detected this case.
     if not download_complete:
+      fp.seek(download_start_byte)
       server_encoding = gsutil_api.GetObjectMedia(
           src_url.bucket_name, src_url.object_name, fp,
-          start_byte=download_start_point, generation=src_url.generation,
-          object_size=src_obj_metadata.size,
-          download_strategy=download_strategy, provider=src_url.scheme,
-          serialization_data=serialization_data, digesters=digesters,
-          progress_callback=progress_callback)
-
-    end_time = time.time()
-
-    # If a custom test method is defined, call it here. For the copy command,
-    # test methods are expected to take one argument: an open file pointer,
-    # and are used to perturb the open file during download to exercise
-    # download error detection.
-    if test_method:
-      test_method(fp)
+          start_byte=download_start_byte, end_byte=end_byte,
+          generation=src_url.generation, object_size=src_obj_metadata.size,
+          download_strategy=CloudApi.DownloadStrategy.RESUMABLE,
+          provider=src_url.scheme, serialization_data=serialization_data,
+          digesters=digesters, progress_callback=progress_callback)
 
   except ResumableDownloadException as e:
-    logger.warning('Caught ResumableDownloadException (%s) for file %s.',
-                   e.reason, file_name)
+    logger.warning('Caught ResumableDownloadException (%s) for download of %s.',
+                   e.reason, download_name)
     raise
-
   finally:
     if fp:
       fp.close()
-    for file_name in open_files:
-      open_files_map.delete(file_name)
 
-  # If we decompressed a content-encoding gzip file on the fly, this may not
-  # be accurate, but it is the best we can do without going deep into the
-  # underlying HTTP libraries. Note that this value is only used for
-  # reporting in log messages; inaccuracy doesn't impact the integrity of the
-  # download.
-  bytes_transferred = src_obj_metadata.size - download_start_point
+  bytes_transferred = end_byte - download_start_byte + 1
+  return bytes_transferred, server_encoding
+
+
+def _DownloadObjectToFileNonResumable(src_url, src_obj_metadata, dst_url,
+                                      download_file_name, gsutil_api, logger,
+                                      digesters):
+  """Downloads an object to a local file using the non-resumable strategy.
+
+  Args:
+    src_url: Source CloudUrl.
+    src_obj_metadata: Metadata from the source object.
+    dst_url: Destination FileUrl.
+    download_file_name: Temporary file name to be used for download.
+    gsutil_api: gsutil Cloud API instance to use for the download.
+    logger: for outputting log messages.
+    digesters: Digesters corresponding to the hash algorithms that will be used
+               for validation.
+  Returns:
+    (bytes_transferred, server_encoding)
+    bytes_transferred: Number of bytes transferred from server this call.
+    server_encoding: Content-encoding string if it was detected that the server
+                     sent encoded bytes during transfer, None otherwise.
+  """
+  try:
+    fp = open(download_file_name, 'w')
+
+    # This is used to pass the mediaLink and the size into the download so that
+    # we can avoid making an extra HTTP call.
+    serialization_data = GetDownloadSerializationData(src_obj_metadata)
+
+    progress_callback = FileProgressCallbackHandler(
+        ConstructAnnounceText('Downloading', dst_url.url_string), logger).call
+
+    if global_copy_helper_opts.test_callback_file:
+      with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
+        progress_callback = pickle.loads(test_fp.read()).call
+
+    server_encoding = gsutil_api.GetObjectMedia(
+        src_url.bucket_name, src_url.object_name, fp,
+        generation=src_url.generation, object_size=src_obj_metadata.size,
+        download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
+        provider=src_url.scheme, serialization_data=serialization_data,
+        digesters=digesters, progress_callback=progress_callback)
+  finally:
+    if fp:
+      fp.close()
+
+  return src_obj_metadata.size, server_encoding
+
+
+def _DownloadObjectToFile(src_url, src_obj_metadata, dst_url,
+                          gsutil_api, logger, command_obj,
+                          copy_exception_handler, allow_splitting=True):
+  """Downloads an object to a local file.
+
+  Args:
+    src_url: Source CloudUrl.
+    src_obj_metadata: Metadata from the source object.
+    dst_url: Destination FileUrl.
+    gsutil_api: gsutil Cloud API instance to use for the download.
+    logger: for outputting log messages.
+    command_obj: command object for use in Apply in sliced downloads.
+    copy_exception_handler: For handling copy exceptions during Apply.
+    allow_splitting: Whether or not to allow sliced download.
+  Returns:
+    (elapsed_time, bytes_transferred, dst_url, md5), where time elapsed
+    excludes initial GET.
+
+  Raises:
+    FileConcurrencySkipError: if this download is already in progress.
+    CommandException: if other errors encountered.
+  """
+  global open_files_map, open_files_lock
+  if dst_url.object_name.endswith(dst_url.delim):
+    logger.warn('\n'.join(textwrap.wrap(
+        'Skipping attempt to download to filename ending with slash (%s). This '
+        'typically happens when using gsutil to download from a subdirectory '
+        'created by the Cloud Console (https://cloud.google.com/console)'
+        % dst_url.object_name)))
+    return (0, 0, dst_url, '')
+
+  api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
+  download_strategy = _SelectDownloadStrategy(dst_url)
+  sliced_download = _ShouldDoSlicedDownload(
+      download_strategy, src_obj_metadata, allow_splitting, logger)
+
+  download_file_name, need_to_unzip = _GetDownloadFile(
+      dst_url, src_obj_metadata, logger)
+
+  # Ensure another process/thread is not already writing to this file.
+  with open_files_lock:
+    if open_files_map.get(download_file_name, False):
+      raise FileConcurrencySkipError
+    open_files_map[download_file_name] = True
+
+  # Set up hash digesters.
+  consider_md5 = src_obj_metadata.md5Hash and not sliced_download
+  hash_algs = GetDownloadHashAlgs(logger, consider_md5=consider_md5,
+                                  consider_crc32c=src_obj_metadata.crc32c)
+  digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
+
+  # Tracks whether the server used a gzip encoding.
+  server_encoding = None
+  download_complete = (src_obj_metadata.size == 0)
+  bytes_transferred = 0
+
+  start_time = time.time()
+  if not download_complete:
+    if sliced_download:
+      (bytes_transferred, crc32c) = (
+          _DoSlicedDownload(src_url, src_obj_metadata, dst_url,
+                            download_file_name, command_obj, logger,
+                            copy_exception_handler, api_selector))
+      if 'crc32c' in digesters:
+        digesters['crc32c'].crcValue = crc32c
+    elif download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
+      (bytes_transferred, server_encoding) = (
+          _DownloadObjectToFileNonResumable(src_url, src_obj_metadata, dst_url,
+                                            download_file_name, gsutil_api,
+                                            logger, digesters))
+    elif download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
+      (bytes_transferred, server_encoding) = (
+          _DownloadObjectToFileResumable(src_url, src_obj_metadata, dst_url,
+                                         download_file_name, gsutil_api, logger,
+                                         digesters))
+    else:
+      raise CommandException('Invalid download strategy %s chosen for'
+                             'file %s' % (download_strategy,
+                                          download_file_name))
+  end_time = time.time()
+
   server_gzip = server_encoding and server_encoding.lower().endswith('gzip')
-  local_md5 = _ValidateDownloadHashes(logger, src_url, src_obj_metadata,
-                                      dst_url, need_to_unzip, server_gzip,
-                                      digesters, hash_algs, api_selector,
-                                      bytes_transferred)
+  local_md5 = _ValidateAndCompleteDownload(
+      logger, src_url, src_obj_metadata, dst_url, need_to_unzip, server_gzip,
+      digesters, hash_algs, download_file_name, api_selector, bytes_transferred)
+
+  with open_files_lock:
+    open_files_map.delete(download_file_name)
 
   return (end_time - start_time, bytes_transferred, dst_url, local_md5)
 
 
-def _GetDownloadZipFileName(file_name):
-  """Returns the file name for a temporarily compressed downloaded file."""
-  return '%s_.gztmp' % file_name
+def _GetDownloadTempZipFileName(dst_url):
+  """Returns temporary file name for a temporarily compressed download."""
+  return '%s_.gztmp' % dst_url.object_name
 
 
-def _ValidateDownloadHashes(logger, src_url, src_obj_metadata, dst_url,
-                            need_to_unzip, server_gzip, digesters, hash_algs,
-                            api_selector, bytes_transferred):
-  """Validates a downloaded file's integrity.
+def _GetDownloadTempFileName(dst_url):
+  """Returns temporary download file name for uncompressed downloads."""
+  return '%s_.gstmp' % dst_url.object_name
+
+
+def _ValidateAndCompleteDownload(logger, src_url, src_obj_metadata, dst_url,
+                                 need_to_unzip, server_gzip, digesters,
+                                 hash_algs, download_file_name,
+                                 api_selector, bytes_transferred):
+  """Validates and performs necessary operations on a downloaded file.
+
+  Validates the integrity of the downloaded file using hash_algs. If the file
+  was compressed (temporarily), the file will be decompressed. Then, if the
+  integrity of the file was successfully validated, the file will be moved
+  from its temporary download location to its permanent location on disk.
 
   Args:
     logger: For outputting log messages.
@@ -1880,6 +2372,7 @@
                hash must be recomputed from the local file.
     hash_algs: dict of {string, hash algorithm} that can be used if digesters
                don't have up-to-date digests.
+    download_file_name: Temporary file name that was used for download.
     api_selector: The Cloud API implementation used (used tracker file naming).
     bytes_transferred: Number of bytes downloaded (used for logging).
 
@@ -1887,10 +2380,10 @@
     An MD5 of the local file, if one was calculated as part of the integrity
     check.
   """
-  file_name = dst_url.object_name
-  download_file_name = (_GetDownloadZipFileName(file_name) if need_to_unzip else
-                        file_name)
+  final_file_name = dst_url.object_name
+  file_name = download_file_name
   digesters_succeeded = True
+
   for alg in digesters:
     # If we get a digester with a None algorithm, the underlying
     # implementation failed to calculate a digest, so we will need to
@@ -1903,15 +2396,14 @@
     local_hashes = _CreateDigestsFromDigesters(digesters)
   else:
     local_hashes = _CreateDigestsFromLocalFile(
-        logger, hash_algs, download_file_name, src_obj_metadata)
+        logger, hash_algs, file_name, final_file_name, src_obj_metadata)
 
   digest_verified = True
   hash_invalid_exception = None
   try:
-    _CheckHashes(logger, src_url, src_obj_metadata, download_file_name,
+    _CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
                  local_hashes)
-    DeleteTrackerFile(GetTrackerFilePath(
-        dst_url, TrackerFileType.DOWNLOAD, api_selector))
+    DeleteDownloadTrackerFiles(dst_url, api_selector)
   except HashMismatchException, e:
     # If an non-gzipped object gets sent with gzip content encoding, the hash
     # we calculate will match the gzipped bytes, not the original object. Thus,
@@ -1929,34 +2421,27 @@
       hash_invalid_exception = e
       digest_verified = False
     else:
-      DeleteTrackerFile(GetTrackerFilePath(
-          dst_url, TrackerFileType.DOWNLOAD, api_selector))
+      DeleteDownloadTrackerFiles(dst_url, api_selector)
       if _RENAME_ON_HASH_MISMATCH:
-        os.rename(download_file_name,
-                  download_file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
+        os.rename(file_name,
+                  final_file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
       else:
-        os.unlink(download_file_name)
+        os.unlink(file_name)
       raise
 
-  if server_gzip and not need_to_unzip:
-    # Server compressed bytes on-the-fly, thus we need to rename and decompress.
-    # We can't decompress on-the-fly because prior to Python 3.2 the gzip
-    # module makes a bunch of seek calls on the stream.
-    download_file_name = _GetDownloadZipFileName(file_name)
-    os.rename(file_name, download_file_name)
-
   if need_to_unzip or server_gzip:
     # Log that we're uncompressing if the file is big enough that
     # decompressing would make it look like the transfer "stalled" at the end.
     if bytes_transferred > TEN_MIB:
       logger.info(
-          'Uncompressing downloaded tmp file to %s...', file_name)
+          'Uncompressing temporarily gzipped file to %s...', final_file_name)
 
-    # Downloaded gzipped file to a filename w/o .gz extension, so unzip.
     gzip_fp = None
     try:
-      gzip_fp = gzip.open(download_file_name, 'rb')
-      with open(file_name, 'wb') as f_out:
+      # Downloaded temporarily gzipped file, unzip to file without '_.gztmp'
+      # suffix.
+      gzip_fp = gzip.open(file_name, 'rb')
+      with open(final_file_name, 'wb') as f_out:
         data = gzip_fp.read(GZIP_CHUNK_SIZE)
         while data:
           f_out.write(data)
@@ -1972,19 +2457,19 @@
       if gzip_fp:
         gzip_fp.close()
 
-    os.unlink(download_file_name)
+    os.unlink(file_name)
+    file_name = final_file_name
 
   if not digest_verified:
     try:
       # Recalculate hashes on the unzipped local file.
-      local_hashes = _CreateDigestsFromLocalFile(logger, hash_algs, file_name,
-                                                 src_obj_metadata)
-      _CheckHashes(logger, src_url, src_obj_metadata, file_name, local_hashes)
-      DeleteTrackerFile(GetTrackerFilePath(
-          dst_url, TrackerFileType.DOWNLOAD, api_selector))
+      local_hashes = _CreateDigestsFromLocalFile(
+          logger, hash_algs, file_name, final_file_name, src_obj_metadata)
+      _CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
+                   local_hashes)
+      DeleteDownloadTrackerFiles(dst_url, api_selector)
     except HashMismatchException:
-      DeleteTrackerFile(GetTrackerFilePath(
-          dst_url, TrackerFileType.DOWNLOAD, api_selector))
+      DeleteDownloadTrackerFiles(dst_url, api_selector)
       if _RENAME_ON_HASH_MISMATCH:
         os.rename(file_name,
                   file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
@@ -1992,6 +2477,13 @@
         os.unlink(file_name)
       raise
 
+  if file_name != final_file_name:
+    # Data is still in a temporary file, so move it to a permanent location.
+    if os.path.exists(final_file_name):
+      os.unlink(final_file_name)
+    os.rename(file_name,
+              final_file_name)
+
   if 'md5' in local_hashes:
     return local_hashes['md5']
 
@@ -2123,7 +2615,7 @@
 # pylint: disable=too-many-statements
 def PerformCopy(logger, src_url, dst_url, gsutil_api, command_obj,
                 copy_exception_handler, allow_splitting=True,
-                headers=None, manifest=None, gzip_exts=None, test_method=None):
+                headers=None, manifest=None, gzip_exts=None):
   """Performs copy from src_url to dst_url, handling various special cases.
 
   Args:
@@ -2131,14 +2623,14 @@
     src_url: Source StorageUrl.
     dst_url: Destination StorageUrl.
     gsutil_api: gsutil Cloud API instance to use for the copy.
-    command_obj: command object for use in Apply in parallel composite uploads.
+    command_obj: command object for use in Apply in parallel composite uploads
+        and sliced object downloads.
     copy_exception_handler: for handling copy exceptions during Apply.
     allow_splitting: Whether to allow the file to be split into component
-                     pieces for an parallel composite upload.
+                     pieces for an parallel composite upload or download.
     headers: optional headers to use for the copy operation.
     manifest: optional manifest for tracking copy operations.
     gzip_exts: List of file extensions to gzip for uploads, if any.
-    test_method: optional test method for modifying files during unit tests.
 
   Returns:
     (elapsed_time, bytes_transferred, version-specific dst_url) excluding
@@ -2194,7 +2686,7 @@
     else:
       # Just get the fields needed to validate the download.
       src_obj_fields = ['crc32c', 'contentEncoding', 'contentType', 'etag',
-                        'mediaLink', 'md5Hash', 'size']
+                        'mediaLink', 'md5Hash', 'size', 'generation']
 
     if (src_url.scheme == 's3' and
         global_copy_helper_opts.skip_unsupported_objects):
@@ -2231,8 +2723,14 @@
     try:
       src_obj_filestream = GetStreamFromFileUrl(src_url)
     except Exception, e:  # pylint: disable=broad-except
-      raise CommandException('Error opening file "%s": %s.' % (src_url,
-                                                               e.message))
+      if command_obj.continue_on_error:
+        message = 'Error copying %s: %s' % (src_url, str(e))
+        command_obj.op_failure_count += 1
+        logger.error(message)
+        return
+      else:
+        raise CommandException('Error opening file "%s": %s.' % (src_url,
+                                                                 e.message))
     if src_url.IsStream():
       src_obj_size = None
     else:
@@ -2308,7 +2806,9 @@
   if src_url.IsCloudUrl():
     if dst_url.IsFileUrl():
       return _DownloadObjectToFile(src_url, src_obj_metadata, dst_url,
-                                   gsutil_api, logger, test_method=test_method)
+                                   gsutil_api, logger, command_obj,
+                                   copy_exception_handler,
+                                   allow_splitting=allow_splitting)
     elif copy_in_the_cloud:
       return _CopyObjToObjInTheCloud(src_url, src_obj_metadata, dst_url,
                                      dst_obj_metadata, preconditions,
@@ -2499,27 +2999,8 @@
   return url.url_string.rstrip(sep).rpartition(sep)[0]
 
 
-def _DivideAndCeil(dividend, divisor):
-  """Returns ceil(dividend / divisor).
-
-  Takes care to avoid the pitfalls of floating point arithmetic that could
-  otherwise yield the wrong result for large numbers.
-
-  Args:
-    dividend: Dividend for the operation.
-    divisor: Divisor for the operation.
-
-  Returns:
-    Quotient.
-  """
-  quotient = dividend // divisor
-  if (dividend % divisor) != 0:
-    quotient += 1
-  return quotient
-
-
 def _GetPartitionInfo(file_size, max_components, default_component_size):
-  """Gets info about a file partition for parallel composite uploads.
+  """Gets info about a file partition for parallel file/object transfers.
 
   Args:
     file_size: The number of bytes in the file to be partitioned.
@@ -2532,22 +3013,29 @@
     file_size != 0 (mod num_components)).
   """
   # num_components = ceil(file_size / default_component_size)
-  num_components = _DivideAndCeil(file_size, default_component_size)
+  num_components = DivideAndCeil(file_size, default_component_size)
 
   # num_components must be in the range [2, max_components]
   num_components = max(min(num_components, max_components), 2)
 
   # component_size = ceil(file_size / num_components)
-  component_size = _DivideAndCeil(file_size, num_components)
+  component_size = DivideAndCeil(file_size, num_components)
   return (num_components, component_size)
 
 
-def _DeleteObjectFn(cls, url_to_delete, thread_state=None):
-  """Wrapper function to be used with command.Apply()."""
+def _DeleteTempComponentObjectFn(cls, url_to_delete, thread_state=None):
+  """Wrapper func to be used with command.Apply to delete temporary objects."""
   gsutil_api = GetCloudApiInstance(cls, thread_state)
-  gsutil_api.DeleteObject(
-      url_to_delete.bucket_name, url_to_delete.object_name,
-      generation=url_to_delete.generation, provider=url_to_delete.scheme)
+  try:
+    gsutil_api.DeleteObject(
+        url_to_delete.bucket_name, url_to_delete.object_name,
+        generation=url_to_delete.generation, provider=url_to_delete.scheme)
+  except NotFoundException:
+    # The temporary object could already be gone if a retry was
+    # issued at a lower layer but the original request succeeded.
+    # Barring other errors, the top-level command should still report success,
+    # so don't raise here.
+    pass
 
 
 def _ParseParallelUploadTrackerFile(tracker_file, tracker_file_lock):
diff --git a/catapult/third_party/gsutil/gslib/gcs_json_api.py b/catapult/third_party/gsutil/gslib/gcs_json_api.py
index e57671d..d16fae9 100644
--- a/catapult/third_party/gsutil/gslib/gcs_json_api.py
+++ b/catapult/third_party/gsutil/gslib/gcs_json_api.py
@@ -36,6 +36,7 @@
 from boto import config
 from gcs_oauth2_boto_plugin import oauth2_helper
 import httplib2
+import oauth2client
 from oauth2client import devshell
 from oauth2client import multistore_file
 
@@ -73,8 +74,10 @@
 from gslib.tracker_file import ReadRewriteTrackerFile
 from gslib.tracker_file import WriteRewriteTrackerFile
 from gslib.translation_helper import CreateBucketNotFoundException
+from gslib.translation_helper import CreateNotFoundExceptionForObjectWrite
 from gslib.translation_helper import CreateObjectNotFoundException
 from gslib.translation_helper import DEFAULT_CONTENT_TYPE
+from gslib.translation_helper import PRIVATE_DEFAULT_OBJ_ACL
 from gslib.translation_helper import REMOVE_CORS_CONFIG
 from gslib.util import GetBotoConfigFileList
 from gslib.util import GetCertsFile
@@ -132,7 +135,7 @@
   """Google Cloud Storage JSON implementation of gsutil Cloud API."""
 
   def __init__(self, bucket_storage_uri_class, logger, provider=None,
-               credentials=None, debug=0):
+               credentials=None, debug=0, trace_token=None):
     """Performs necessary setup for interacting with Google Cloud Storage.
 
     Args:
@@ -142,6 +145,7 @@
       credentials: Credentials to be used for interacting with Google Cloud
                    Storage.
       debug: Debug level for the API implementation (0..3).
+      trace_token: Trace token to pass to the API implementation.
     """
     # TODO: Plumb host_header for perfdiag / test_perfdiag.
     # TODO: Add jitter to apitools' http_wrapper retry mechanism.
@@ -163,6 +167,22 @@
     self.certs_file = GetCertsFile()
 
     self.http = GetNewHttp()
+
+    # Re-use download and upload connections. This class is only called
+    # sequentially, but we can share TCP warmed-up connections across calls.
+    self.download_http = self._GetNewDownloadHttp()
+    self.upload_http = self._GetNewUploadHttp()
+    if self.credentials:
+      self.authorized_download_http = self.credentials.authorize(
+          self.download_http)
+      self.authorized_upload_http = self.credentials.authorize(self.upload_http)
+    else:
+      self.authorized_download_http = self.download_http
+      self.authorized_upload_http = self.upload_http
+
+    WrapDownloadHttpRequest(self.authorized_download_http)
+    WrapUploadHttpRequest(self.authorized_upload_http)
+
     self.http_base = 'https://'
     gs_json_host = config.get('Credentials', 'gs_json_host', None)
     self.host_base = gs_json_host or 'www.googleapis.com'
@@ -195,19 +215,27 @@
     self.url_base = (self.http_base + self.host_base + self.host_port + '/' +
                      'storage/' + self.api_version + '/')
 
+    credential_store_key_dict = self._GetCredentialStoreKeyDict(
+        self.credentials)
+
     self.credentials.set_store(
-        multistore_file.get_credential_storage_custom_string_key(
-            GetCredentialStoreFilename(), self.api_version))
+        multistore_file.get_credential_storage_custom_key(
+            GetCredentialStoreFilename(), credential_store_key_dict))
 
     self.num_retries = GetNumRetries()
+    self.max_retry_wait = GetMaxRetryDelay()
 
     log_request = (debug >= 3)
     log_response = (debug >= 3)
 
+    self.global_params = apitools_messages.StandardQueryParameters(
+        trace='token:%s' % trace_token) if trace_token else None
+
     self.api_client = apitools_client.StorageV1(
         url=self.url_base, http=self.http, log_request=log_request,
         log_response=log_response, credentials=self.credentials,
-        version=self.api_version)
+        version=self.api_version, default_global_params=self.global_params)
+    self.api_client.max_retry_wait = self.max_retry_wait
     self.api_client.num_retries = self.num_retries
 
     if no_op_credentials:
@@ -309,8 +337,40 @@
     except:
       raise
 
-  def _GetNewDownloadHttp(self, download_stream):
-    return GetNewHttp(http_class=HttpWithDownloadStream, stream=download_stream)
+  def _GetCredentialStoreKeyDict(self, credentials):
+    """Disambiguates a credential for caching in a credential store.
+
+    Different credential types have different fields that identify them.
+    This function assembles relevant information in a dict and returns it.
+
+    Args:
+      credentials: An OAuth2Credentials object.
+
+    Returns:
+      Dict of relevant identifiers for credentials.
+    """
+    # TODO: If scopes ever become available in the credentials themselves,
+    # include them in the key dict.
+    key_dict = {'api_version': self.api_version}
+    # pylint: disable=protected-access
+    if isinstance(credentials, devshell.DevshellCredentials):
+      key_dict['user_email'] = credentials.user_email
+    elif isinstance(credentials,
+                    oauth2client.service_account._ServiceAccountCredentials):
+      key_dict['_service_account_email'] = credentials._service_account_email
+    elif isinstance(credentials,
+                    oauth2client.client.SignedJwtAssertionCredentials):
+      key_dict['service_account_name'] = credentials.service_account_name
+    elif isinstance(credentials, oauth2client.client.OAuth2Credentials):
+      if credentials.client_id and credentials.client_id != 'null':
+        key_dict['client_id'] = credentials.client_id
+      key_dict['refresh_token'] = credentials.refresh_token
+    # pylint: enable=protected-access
+
+    return key_dict
+
+  def _GetNewDownloadHttp(self):
+    return GetNewHttp(http_class=HttpWithDownloadStream)
 
   def _GetNewUploadHttp(self):
     """Returns an upload-safe Http object (by disabling httplib2 retries)."""
@@ -360,6 +420,11 @@
       bucket_metadata.cors = []
       apitools_include_fields.append('cors')
 
+    if (bucket_metadata.defaultObjectAcl and
+        bucket_metadata.defaultObjectAcl[0] == PRIVATE_DEFAULT_OBJ_ACL):
+      bucket_metadata.defaultObjectAcl = []
+      apitools_include_fields.append('defaultObjectAcl')
+
     predefined_acl = None
     if canned_acl:
       # Must null out existing ACLs to apply a canned ACL.
@@ -528,14 +593,16 @@
         yield object_or_prefix
 
   def _YieldObjectsAndPrefixes(self, object_list):
-    if object_list.items:
-      for cloud_obj in object_list.items:
-        yield CloudApi.CsObjectOrPrefix(cloud_obj,
-                                        CloudApi.CsObjectOrPrefixType.OBJECT)
+    # Yield prefixes first so that checking for the presence of a subdirectory
+    # is fast.
     if object_list.prefixes:
       for prefix in object_list.prefixes:
         yield CloudApi.CsObjectOrPrefix(prefix,
                                         CloudApi.CsObjectOrPrefixType.PREFIX)
+    if object_list.items:
+      for cloud_obj in object_list.items:
+        yield CloudApi.CsObjectOrPrefix(cloud_obj,
+                                        CloudApi.CsObjectOrPrefixType.OBJECT)
 
   def GetObjectMetadata(self, bucket_name, object_name, generation=None,
                         provider=None, fields=None):
@@ -573,8 +640,14 @@
     if generation:
       generation = long(generation)
 
+    # 'outer_total_size' is only used for formatting user output, and is
+    # expected to be one higher than the last byte that should be downloaded.
+    # TODO: Change DownloadCallbackConnectionClassFactory and progress callbacks
+    # to more elegantly handle total size for components of files.
     outer_total_size = object_size
-    if serialization_data:
+    if end_byte:
+      outer_total_size = end_byte + 1
+    elif serialization_data:
       outer_total_size = json.loads(serialization_data)['total_size']
 
     if progress_callback:
@@ -582,7 +655,7 @@
         raise ArgumentException('Download size is required when callbacks are '
                                 'requested for a download, but no size was '
                                 'provided.')
-      progress_callback(0, outer_total_size)
+      progress_callback(start_byte, outer_total_size)
 
     bytes_downloaded_container = BytesTransferredContainer()
     bytes_downloaded_container.bytes_transferred = start_byte
@@ -592,10 +665,9 @@
         progress_callback=progress_callback, digesters=digesters)
     download_http_class = callback_class_factory.GetConnectionClass()
 
-    download_http = self._GetNewDownloadHttp(download_stream)
-    download_http.connections = {'https': download_http_class}
-    authorized_download_http = self.credentials.authorize(download_http)
-    WrapDownloadHttpRequest(authorized_download_http)
+    # Point our download HTTP at our download stream.
+    self.download_http.stream = download_stream
+    self.download_http.connections = {'https': download_http_class}
 
     if serialization_data:
       apitools_download = apitools_transfer.Download.FromData(
@@ -606,7 +678,7 @@
           download_stream, auto_transfer=False, total_size=object_size,
           num_retries=self.num_retries)
 
-    apitools_download.bytes_http = authorized_download_http
+    apitools_download.bytes_http = self.authorized_download_http
     apitools_request = apitools_messages.StorageObjectsGetRequest(
         bucket=bucket_name, object=object_name, generation=generation)
 
@@ -654,7 +726,7 @@
           raise ResumableDownloadException(
               'Transfer failed after %d retries. Final exception: %s' %
               (self.num_retries, unicode(e).encode(UTF8)))
-        time.sleep(CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
+        time.sleep(CalculateWaitForRetry(retries, max_wait=self.max_retry_wait))
         if self.logger.isEnabledFor(logging.DEBUG):
           self.logger.debug(
               'Retrying download from byte %s after exception: %s. Trace: %s',
@@ -694,7 +766,7 @@
         'accept-encoding': 'gzip',
         'user-agent': self.api_client.user_agent
     }
-    if start_byte or end_byte:
+    if start_byte or end_byte is not None:
       apitools_download.GetRange(additional_headers=additional_headers,
                                  start=start_byte, end=end_byte,
                                  use_chunks=False)
@@ -781,13 +853,10 @@
         bytes_uploaded_container, total_size=total_size,
         progress_callback=progress_callback)
 
-    upload_http = self._GetNewUploadHttp()
     upload_http_class = callback_class_factory.GetConnectionClass()
-    upload_http.connections = {'http': upload_http_class,
-                               'https': upload_http_class}
+    self.upload_http.connections = {'http': upload_http_class,
+                                    'https': upload_http_class}
 
-    authorized_upload_http = self.credentials.authorize(upload_http)
-    WrapUploadHttpRequest(authorized_upload_http)
     # Since bytes_http is created in this function, we don't get the
     # user-agent header from api_client's http automatically.
     additional_headers = {
@@ -822,7 +891,7 @@
             upload_stream, content_type, total_size=size, auto_transfer=True,
             num_retries=self.num_retries)
         apitools_upload.strategy = apitools_strategy
-        apitools_upload.bytes_http = authorized_upload_http
+        apitools_upload.bytes_http = self.authorized_upload_http
 
         return self.api_client.objects.Insert(
             apitools_request,
@@ -830,13 +899,16 @@
             global_params=global_params)
       else:  # Resumable upload.
         return self._PerformResumableUpload(
-            upload_stream, authorized_upload_http, content_type, size,
+            upload_stream, self.authorized_upload_http, content_type, size,
             serialization_data, apitools_strategy, apitools_request,
             global_params, bytes_uploaded_container, tracker_callback,
             additional_headers, progress_callback)
     except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, object_metadata.bucket)
       self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
-                                       object_name=object_metadata.name)
+                                       object_name=object_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def _PerformResumableUpload(
       self, upload_stream, authorized_upload_http, content_type, size,
@@ -929,7 +1001,7 @@
                     'Transfer failed after %d retries. Final exception: %s' %
                     (self.num_retries, e2))
               time.sleep(
-                  CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
+                  CalculateWaitForRetry(retries, max_wait=self.max_retry_wait))
           if start_byte > last_progress_byte:
             # We've made progress, so allow a fresh set of retries.
             last_progress_byte = start_byte
@@ -941,7 +1013,7 @@
                   'Transfer failed after %d retries. Final exception: %s' %
                   (self.num_retries, unicode(e).encode(UTF8)))
             time.sleep(
-                CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
+                CalculateWaitForRetry(retries, max_wait=self.max_retry_wait))
           if self.logger.isEnabledFor(logging.DEBUG):
             self.logger.debug(
                 'Retrying upload from byte %s after exception: %s. Trace: %s',
@@ -1069,8 +1141,13 @@
       DeleteTrackerFile(tracker_file_name)
       return rewrite_response.resource
     except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
+      not_found_exception = CreateNotFoundExceptionForObjectWrite(
+          self.provider, dst_obj_metadata.bucket, src_provider=self.provider,
+          src_bucket_name=src_obj_metadata.bucket,
+          src_object_name=src_obj_metadata.name, src_generation=src_generation)
       self._TranslateExceptionAndRaise(e, bucket_name=dst_obj_metadata.bucket,
-                                       object_name=dst_obj_metadata.name)
+                                       object_name=dst_obj_metadata.name,
+                                       not_found_exception=not_found_exception)
 
   def DeleteObject(self, bucket_name, object_name, preconditions=None,
                    generation=None, provider=None):
@@ -1210,7 +1287,7 @@
     raise ArgumentException('Invalid canned ACL %s' % canned_acl_string)
 
   def _TranslateExceptionAndRaise(self, e, bucket_name=None, object_name=None,
-                                  generation=None):
+                                  generation=None, not_found_exception=None):
     """Translates an HTTP exception and raises the translated or original value.
 
     Args:
@@ -1218,6 +1295,7 @@
       bucket_name: Optional bucket name in request that caused the exception.
       object_name: Optional object name in request that caused the exception.
       generation: Optional generation in request that caused the exception.
+      not_found_exception: Optional exception to raise in the not-found case.
 
     Raises:
       Translated CloudApi exception, or the original exception if it was not
@@ -1225,7 +1303,7 @@
     """
     translated_exception = self._TranslateApitoolsException(
         e, bucket_name=bucket_name, object_name=object_name,
-        generation=generation)
+        generation=generation, not_found_exception=not_found_exception)
     if translated_exception:
       raise translated_exception
     else:
@@ -1242,8 +1320,7 @@
           # If we couldn't decode anything, just leave the message as None.
           pass
 
-  def _TranslateApitoolsResumableUploadException(
-      self, e, bucket_name=None, object_name=None, generation=None):
+  def _TranslateApitoolsResumableUploadException(self, e):
     if isinstance(e, apitools_exceptions.HttpError):
       message = self._GetMessageFromHttpError(e)
       if (e.status_code == 503 and
@@ -1274,7 +1351,7 @@
       return ResumableUploadAbortException(e.message)
 
   def _TranslateApitoolsException(self, e, bucket_name=None, object_name=None,
-                                  generation=None):
+                                  generation=None, not_found_exception=None):
     """Translates apitools exceptions into their gsutil Cloud Api equivalents.
 
     Args:
@@ -1282,6 +1359,7 @@
       bucket_name: Optional bucket name in request that caused the exception.
       object_name: Optional object name in request that caused the exception.
       generation: Optional generation in request that caused the exception.
+      not_found_exception: Optional exception to raise in the not-found case.
 
     Returns:
       CloudStorageApiServiceException for translatable exceptions, None
@@ -1333,7 +1411,12 @@
           return AccessDeniedException(message or e.message,
                                        status=e.status_code)
       elif e.status_code == 404:
-        if bucket_name:
+        if not_found_exception:
+          # The exception is pre-constructed prior to translation; the HTTP
+          # status code isn't available at that time.
+          setattr(not_found_exception, 'status', e.status_code)
+          return not_found_exception
+        elif bucket_name:
           if object_name:
             return CreateObjectNotFoundException(e.status_code, self.provider,
                                                  bucket_name, object_name,
@@ -1341,6 +1424,7 @@
           return CreateBucketNotFoundException(e.status_code, self.provider,
                                                bucket_name)
         return NotFoundException(e.message, status=e.status_code)
+
       elif e.status_code == 409 and bucket_name:
         if 'The bucket you tried to delete was not empty.' in str(e):
           return NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
diff --git a/catapult/third_party/gsutil/gslib/gcs_json_media.py b/catapult/third_party/gsutil/gslib/gcs_json_media.py
index 45440f9..9968489 100644
--- a/catapult/third_party/gsutil/gslib/gcs_json_media.py
+++ b/catapult/third_party/gsutil/gslib/gcs_json_media.py
@@ -454,11 +454,8 @@
   class doc).
   """
 
-  def __init__(self, stream=None, *args, **kwds):
-    if stream is None:
-      raise apitools_exceptions.InvalidUserInputError(
-          'Cannot create HttpWithDownloadStream with no stream')
-    self._stream = stream
+  def __init__(self, *args, **kwds):
+    self._stream = None
     self._logger = logging.getLogger()
     super(HttpWithDownloadStream, self).__init__(*args, **kwds)
 
@@ -466,6 +463,10 @@
   def stream(self):
     return self._stream
 
+  @stream.setter
+  def stream(self, value):
+    self._stream = value
+
   def _conn_request(self, conn, request_uri, method, body, headers):  # pylint: disable=too-many-statements
     try:
       if hasattr(conn, 'sock') and conn.sock is None:
@@ -513,6 +514,9 @@
           while True:
             new_data = http_stream.read(TRANSFER_BUFFER_SIZE)
             if new_data:
+              if self.stream is None:
+                raise apitools_exceptions.InvalidUserInputError(
+                    'Cannot exercise HttpWithDownloadStream with no stream')
               self.stream.write(new_data)
               bytes_read += len(new_data)
             else:
diff --git a/catapult/third_party/gsutil/gslib/hashing_helper.py b/catapult/third_party/gsutil/gslib/hashing_helper.py
index dee2f96..c26831f 100644
--- a/catapult/third_party/gsutil/gslib/hashing_helper.py
+++ b/catapult/third_party/gsutil/gslib/hashing_helper.py
@@ -79,6 +79,87 @@
 CHECK_HASH_ALWAYS = 'always'
 CHECK_HASH_NEVER = 'never'
 
+# Table storing polynomial values of x^(2^k) mod CASTAGNOLI_POLY for all k < 31,
+# where x^(2^k) and CASTAGNOLI_POLY are both considered polynomials. This is
+# sufficient since x^(2^31) mod CASTAGNOLI_POLY = x.
+X_POW_2K_TABLE = [2, 4, 16, 256, 65536, 517762881, 984302966,
+                  408362264, 1503875210, 2862076957, 3884826397, 1324787473,
+                  621200174, 1758783527, 1416537776, 1180494764, 648569364,
+                  2521473789, 994858823, 1728245375, 3498467999, 4059169852,
+                  3345064394, 2828422810, 2429203150, 3336788029, 860151998,
+                  2102628683, 1033187991, 4243778976, 1123580069]
+# Castagnoli polynomial and its degree.
+CASTAGNOLI_POLY = 4812730177
+DEGREE = 32
+
+
+def ConcatCrc32c(crc_a, crc_b, num_bytes_in_b):
+  """Computes CRC32C for concat(A, B) given crc(A), crc(B) and len(B).
+
+  An explanation of the algorithm can be found at
+  crcutil.googlecode.com/files/crc-doc.1.0.pdf.
+
+  Args:
+    crc_a: A 32-bit integer representing crc(A) with least-significant
+           coefficient first.
+    crc_b: Same as crc_a.
+    num_bytes_in_b: Length of B in bytes.
+
+  Returns:
+    CRC32C for concat(A, B)
+  """
+  if not num_bytes_in_b:
+    return crc_a
+
+  return _ExtendByZeros(crc_a, 8 * num_bytes_in_b) ^ crc_b
+
+
+def _CrcMultiply(p, q):
+  """Multiplies two polynomials together modulo CASTAGNOLI_POLY.
+
+  Args:
+    p: The first polynomial.
+    q: The second polynomial.
+
+  Returns:
+    Result of the multiplication.
+  """
+
+  result = 0
+  top_bit = 1 << DEGREE
+  for _ in range(DEGREE):
+    if p & 1:
+      result ^= q
+    q <<= 1
+    if q & top_bit:
+      q ^= CASTAGNOLI_POLY
+    p >>= 1
+  return result
+
+
+def _ExtendByZeros(crc, num_bits):
+  """Given crc representing polynomial P(x), compute P(x)*x^num_bits.
+
+  Args:
+    crc: crc respresenting polynomial P(x).
+    num_bits: number of bits in crc.
+
+  Returns:
+    P(x)*x^num_bits
+  """
+  def _ReverseBits32(crc):
+    return int('{0:032b}'.format(crc, width=32)[::-1], 2)
+  crc = _ReverseBits32(crc)
+  i = 0
+
+  while num_bits != 0:
+    if num_bits & 1:
+      crc = _CrcMultiply(crc, X_POW_2K_TABLE[i % len(X_POW_2K_TABLE)])
+    i += 1
+    num_bits >>= 1
+  crc = _ReverseBits32(crc)
+  return crc
+
 
 def _CalculateHashFromContents(fp, hash_alg):
   """Calculates a base64 digest of the contents of a seekable stream.
@@ -212,13 +293,13 @@
   return {'md5': md5}
 
 
-def GetDownloadHashAlgs(logger, src_has_md5=False, src_has_crc32c=False):
+def GetDownloadHashAlgs(logger, consider_md5=False, consider_crc32c=False):
   """Returns a dict of hash algorithms for validating an object.
 
   Args:
     logger: logging.Logger for outputting log messages.
-    src_has_md5: If True, source object has an md5 hash.
-    src_has_crc32c: If True, source object has a crc32c hash.
+    consider_md5: If True, consider using a md5 hash.
+    consider_crc32c: If True, consider using a crc32c hash.
 
   Returns:
     Dict of (string, hash algorithm).
@@ -233,9 +314,9 @@
     return {}
 
   hash_algs = {}
-  if src_has_md5:
+  if consider_md5:
     hash_algs['md5'] = md5
-  elif src_has_crc32c:
+  elif consider_crc32c:
     # If the cloud provider supplies a CRC, we'll compute a checksum to
     # validate if we're using a native crcmod installation and MD5 isn't
     # offered as an alternative.
diff --git a/catapult/third_party/gsutil/gslib/name_expansion.py b/catapult/third_party/gsutil/gslib/name_expansion.py
index 0d8b6ca..ee42cde 100644
--- a/catapult/third_party/gsutil/gslib/name_expansion.py
+++ b/catapult/third_party/gsutil/gslib/name_expansion.py
@@ -26,10 +26,10 @@
 
 from __future__ import absolute_import
 
-import multiprocessing
 import os
 import sys
 
+import gslib
 from gslib.exception import CommandException
 from gslib.plurality_checkable_iterator import PluralityCheckableIterator
 import gslib.wildcard_iterator
@@ -70,7 +70,7 @@
     self.expanded_storage_url = expanded_storage_url
 
   def __repr__(self):
-    return '%s' % self._expanded_storage_url
+    return '%s' % self.expanded_storage_url
 
 
 class _NameExpansionIterator(object):
@@ -355,7 +355,7 @@
   def __init__(self, name_expansion_iterator, final_value):
     self.name_expansion_iterator = name_expansion_iterator
     self.final_value = final_value
-    self.lock = multiprocessing.Manager().Lock()
+    self.lock = gslib.util.manager.Lock()
 
   def qsize(self):
     raise NotImplementedError(
diff --git a/catapult/third_party/gsutil/gslib/parallelism_framework_util.py b/catapult/third_party/gsutil/gslib/parallelism_framework_util.py
index 63c781a..ae4daa1 100644
--- a/catapult/third_party/gsutil/gslib/parallelism_framework_util.py
+++ b/catapult/third_party/gsutil/gslib/parallelism_framework_util.py
@@ -16,82 +16,28 @@
 
 from __future__ import absolute_import
 
-import multiprocessing
 import threading
 
 
-class BasicIncrementDict(object):
-  """Dictionary meant for storing values for which increment is defined.
+class AtomicDict(object):
+  """Thread-safe (and optionally process-safe) dictionary protected by a lock.
 
-  This handles any values for which the "+" operation is defined (e.g., floats,
-  lists, etc.). This class is neither thread- nor process-safe.
+  If a multiprocessing.Manager is supplied on init, the dictionary is
+  both process and thread safe. Otherwise, it is only thread-safe.
   """
 
-  def __init__(self):
-    self.dict = {}
-
-  def Get(self, key, default_value=None):
-    return self.dict.get(key, default_value)
-
-  def Put(self, key, value):
-    self.dict[key] = value
-
-  def Update(self, key, inc, default_value=0):
-    """Update the stored value associated with the given key.
-
-    Performs the equivalent of
-    self.put(key, self.get(key, default_value) + inc).
+  def __init__(self, manager=None):
+    """Initializes the dict.
 
     Args:
-      key: lookup key for the value of the first operand of the "+" operation.
-      inc: Second operand of the "+" operation.
-      default_value: Default value if there is no existing value for the key.
-
-    Returns:
-      Incremented value.
+      manager: multiprocessing.Manager instance (required for process safety).
     """
-    val = self.dict.get(key, default_value) + inc
-    self.dict[key] = val
-    return val
-
-
-class AtomicIncrementDict(BasicIncrementDict):
-  """Dictionary meant for storing values for which increment is defined.
-
-  This handles any values for which the "+" operation is defined (e.g., floats,
-  lists, etc.) in a thread- and process-safe way that allows for atomic get,
-  put, and update.
-  """
-
-  def __init__(self, manager):  # pylint: disable=super-init-not-called
-    self.dict = ThreadAndProcessSafeDict(manager)
-    self.lock = multiprocessing.Lock()
-
-  def Update(self, key, inc, default_value=0):
-    """Atomically update the stored value associated with the given key.
-
-    Performs the atomic equivalent of
-    self.put(key, self.get(key, default_value) + inc).
-
-    Args:
-      key: lookup key for the value of the first operand of the "+" operation.
-      inc: Second operand of the "+" operation.
-      default_value: Default value if there is no existing value for the key.
-
-    Returns:
-      Incremented value.
-    """
-    with self.lock:
-      return super(AtomicIncrementDict, self).Update(key, inc, default_value)
-
-
-class ThreadSafeDict(object):
-  """Provides a thread-safe dictionary (protected by a lock)."""
-
-  def __init__(self):
-    """Initializes the thread-safe dict."""
-    self.lock = threading.Lock()
-    self.dict = {}
+    if manager:
+      self.lock = manager.Lock()
+      self.dict = manager.dict()
+    else:
+      self.lock = threading.Lock()
+      self.dict = {}
 
   def __getitem__(self, key):
     with self.lock:
@@ -110,21 +56,21 @@
     with self.lock:
       del self.dict[key]
 
+  def Increment(self, key, inc, default_value=0):
+    """Atomically updates the stored value associated with the given key.
 
-class ThreadAndProcessSafeDict(ThreadSafeDict):
-  """Wraps a multiprocessing.Manager's proxy objects for thread-safety.
-
-  The proxy objects returned by a manager are process-safe but not necessarily
-  thread-safe, so this class simply wraps their access with a lock for ease of
-  use. Since the objects are process-safe, we can use the more efficient
-  threading Lock.
-  """
-
-  def __init__(self, manager):
-    """Initializes the thread and process safe dict.
+    Performs the atomic equivalent of
+    dict[key] = dict.get(key, default_value) + inc.
 
     Args:
-      manager: Multiprocessing.manager object.
+      key: lookup key for the value of the first operand of the "+" operation.
+      inc: Second operand of the "+" operation.
+      default_value: Default value if there is no existing value for the key.
+
+    Returns:
+      Incremented value.
     """
-    super(ThreadAndProcessSafeDict, self).__init__()
-    self.dict = manager.dict()
+    with self.lock:
+      val = self.dict.get(key, default_value) + inc
+      self.dict[key] = val
+      return val
diff --git a/catapult/third_party/gsutil/gslib/progress_callback.py b/catapult/third_party/gsutil/gslib/progress_callback.py
index 73ee490..69ee3ed 100644
--- a/catapult/third_party/gsutil/gslib/progress_callback.py
+++ b/catapult/third_party/gsutil/gslib/progress_callback.py
@@ -120,32 +120,41 @@
 class FileProgressCallbackHandler(object):
   """Outputs progress info for large operations like file copy or hash."""
 
-  def __init__(self, announce_text, logger):
+  def __init__(self, announce_text, logger, start_byte=0,
+               override_total_size=None):
     """Initializes the callback handler.
 
     Args:
       announce_text: String describing the operation.
       logger: For outputting log messages.
+      start_byte: The beginning of the file component, if one is being used.
+      override_total_size: The size of the file component, if one is being used.
     """
     self._announce_text = announce_text
     self._logger = logger
+    self._start_byte = start_byte
+    self._override_total_size = override_total_size
     # Ensures final newline is written once even if we get multiple callbacks.
     self._last_byte_written = False
 
   # Function signature is in boto callback format, which cannot be changed.
   def call(self,  # pylint: disable=invalid-name
-           total_bytes_processed,
+           last_byte_processed,
            total_size):
     """Prints an overwriting line to stderr describing the operation progress.
 
     Args:
-      total_bytes_processed: Number of bytes processed so far.
+      last_byte_processed: The last byte processed in the file. For file
+                           components, this number should be in the range
+                           [start_byte:start_byte + override_total_size].
       total_size: Total size of the ongoing operation.
     """
     if not self._logger.isEnabledFor(logging.INFO) or self._last_byte_written:
       return
 
-    # Handle streaming case specially where we don't know the total size:
+    if self._override_total_size:
+      total_size = self._override_total_size
+
     if total_size:
       total_size_string = '/%s' % MakeHumanReadable(total_size)
     else:
@@ -155,8 +164,8 @@
     # TODO: Make this work with logging.Logger.
     sys.stderr.write('%s%s%s    \r' % (
         self._announce_text,
-        MakeHumanReadable(total_bytes_processed),
+        MakeHumanReadable(last_byte_processed - self._start_byte),
         total_size_string))
-    if total_size and total_bytes_processed == total_size:
+    if total_size and last_byte_processed - self._start_byte == total_size:
       self._last_byte_written = True
       sys.stderr.write('\n')
diff --git a/catapult/third_party/gsutil/gslib/storage_url.py b/catapult/third_party/gsutil/gslib/storage_url.py
index 657883c..07a22a1 100644
--- a/catapult/third_party/gsutil/gslib/storage_url.py
+++ b/catapult/third_party/gsutil/gslib/storage_url.py
@@ -105,7 +105,7 @@
     raise NotImplementedError('versionless_url_string not overridden')
 
   def __eq__(self, other):
-    return self.url_string == other.url_string
+    return isinstance(other, StorageUrl) and self.url_string == other.url_string
 
   def __hash__(self):
     return hash(self.url_string)
diff --git a/catapult/third_party/gsutil/gslib/tests/test_Doption.py b/catapult/third_party/gsutil/gslib/tests/test_Doption.py
index ded088c..b5612a8 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_Doption.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_Doption.py
@@ -16,6 +16,8 @@
 
 from __future__ import absolute_import
 
+import platform
+
 import gslib
 from gslib.cs_api_map import ApiSelector
 import gslib.tests.testcase as testcase
@@ -88,8 +90,9 @@
       self.assertIn('header: x-goog-hash: md5=eB5eJF1ptWaXm4bijSPyxw==', stderr)
     elif self.test_api == ApiSelector.JSON:
       self.assertRegexpMatches(
-          stderr, '.*GET.*b/%s/o/%s.*user-agent:.*gsutil/%s' %
-          (key_uri.bucket_name, key_uri.object_name, gslib.VERSION))
+          stderr, '.*GET.*b/%s/o/%s.*user-agent:.*gsutil/%s.Python/%s' %
+          (key_uri.bucket_name, key_uri.object_name, gslib.VERSION,
+           platform.python_version()))
       self.assertIn(('header: Cache-Control: no-cache, no-store, max-age=0, '
                      'must-revalidate'), stderr)
       self.assertIn("md5Hash: u'eB5eJF1ptWaXm4bijSPyxw=='", stderr)
diff --git a/catapult/third_party/gsutil/gslib/tests/test_cat.py b/catapult/third_party/gsutil/gslib/tests/test_cat.py
index 43b0ca5..fdc2c98 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_cat.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_cat.py
@@ -48,6 +48,9 @@
     stdout = self.RunGsUtil(['cat', '-r 8-', suri(key_uri)],
                             return_stdout=True)
     self.assertEqual('89', stdout)
+    stdout = self.RunGsUtil(['cat', '-r 0-0', suri(key_uri)],
+                            return_stdout=True)
+    self.assertEqual('0', stdout)
     stdout = self.RunGsUtil(['cat', '-r -3', suri(key_uri)],
                             return_stdout=True)
     self.assertEqual('789', stdout)
diff --git a/catapult/third_party/gsutil/gslib/tests/test_cp.py b/catapult/third_party/gsutil/gslib/tests/test_cp.py
index 7c44c36..c216fb6 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_cp.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_cp.py
@@ -33,13 +33,14 @@
 import boto
 from boto import storage_uri
 from boto.exception import ResumableTransferDisposition
-from boto.exception import ResumableUploadException
 from boto.exception import StorageResponseError
 from boto.storage_uri import BucketStorageUri
+import crcmod
 
 from gslib.cloud_api import ResumableDownloadException
 from gslib.cloud_api import ResumableUploadException
 from gslib.cloud_api import ResumableUploadStartOverException
+from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
 from gslib.copy_helper import GetTrackerFilePath
 from gslib.copy_helper import TrackerFileType
 from gslib.cs_api_map import ApiSelector
@@ -52,19 +53,22 @@
 from gslib.tests.util import GenerationFromURI as urigen
 from gslib.tests.util import HAS_S3_CREDS
 from gslib.tests.util import ObjectToURI as suri
-from gslib.tests.util import PerformsFileToObjectUpload
+from gslib.tests.util import SequentialAndParallelTransfer
 from gslib.tests.util import SetBotoConfigForTest
 from gslib.tests.util import unittest
 from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
 from gslib.tracker_file import DeleteTrackerFile
 from gslib.tracker_file import GetRewriteTrackerFilePath
+from gslib.tracker_file import GetSlicedDownloadTrackerFilePaths
 from gslib.util import EIGHT_MIB
+from gslib.util import HumanReadableToBytes
 from gslib.util import IS_WINDOWS
 from gslib.util import MakeHumanReadable
 from gslib.util import ONE_KIB
 from gslib.util import ONE_MIB
 from gslib.util import Retry
 from gslib.util import START_CALLBACK_PER_BYTES
+from gslib.util import UsingCrcmodExtension
 from gslib.util import UTF8
 
 
@@ -139,6 +143,24 @@
           ResumableTransferDisposition.START_OVER)
 
 
+class _HaltOneComponentCopyCallbackHandler(object):
+  """Test callback handler for stopping part of a sliced download."""
+
+  def __init__(self, halt_at_byte):
+    self._last_progress_byte = None
+    self._halt_at_byte = halt_at_byte
+
+  # pylint: disable=invalid-name
+  # pylint: disable=unused-argument
+  def call(self, current_progress_byte, total_size_unused):
+    """Forcibly exits if the passed the halting point since the last call."""
+    if (self._last_progress_byte is not None and
+        self._last_progress_byte < self._halt_at_byte < current_progress_byte):
+      sys.stderr.write('Halting transfer.\r\n')
+      raise ResumableDownloadException('Artifically halting download.')
+    self._last_progress_byte = current_progress_byte
+
+
 class _DeleteBucketThenStartOverCopyCallbackHandler(object):
   """Test callback handler that deletes bucket then raises start-over."""
 
@@ -239,7 +261,7 @@
     contents = pkgutil.get_data('gslib', 'tests/test_data/%s' % name)
     return self.CreateTempFile(file_name=name, contents=contents)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_noclobber(self):
     key_uri = self.CreateObject(contents='foo')
     fpath = self.CreateTempFile(contents='bar')
@@ -259,7 +281,7 @@
         '%s://%s' % (self.default_provider, self.nonexistent_bucket_name))
     stderr = self.RunGsUtil(['cp', fpath, invalid_bucket_uri],
                             expected_status=1, return_stderr=True)
-    self.assertIn('does not exist.', stderr)
+    self.assertIn('does not exist', stderr)
 
   def test_copy_in_cloud_noclobber(self):
     bucket1_uri = self.CreateBucket()
@@ -275,7 +297,7 @@
     self.assertIn('Skipping existing item: %s' %
                   suri(bucket2_uri, key_uri.object_name), stderr)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_streaming(self):
     bucket_uri = self.CreateBucket()
     stderr = self.RunGsUtil(['cp', '-', '%s' % suri(bucket_uri, 'foo')],
@@ -293,7 +315,7 @@
 
   # TODO: Implement a way to test both with and without using magic file.
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_detect_content_type(self):
     """Tests local detection of content type."""
     bucket_uri = self.CreateBucket()
@@ -375,7 +397,7 @@
     _Check2()
 
   @unittest.skipIf(IS_WINDOWS, 'magicfile is not available on Windows.')
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_magicfile_override(self):
     """Tests content type override with magicfile value."""
     bucket_uri = self.CreateBucket()
@@ -393,7 +415,7 @@
       self.assertRegexpMatches(stdout, r'Content-Type:\s+%s' % content_type)
     _Check1()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_content_type_mismatches(self):
     """Tests overriding content type when it does not match the file type."""
     bucket_uri = self.CreateBucket()
@@ -429,7 +451,7 @@
       self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
     _Check3()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_content_type_header_case_insensitive(self):
     """Tests that content type header is treated with case insensitivity."""
     bucket_uri = self.CreateBucket()
@@ -459,7 +481,7 @@
       self.assertNotRegexpMatches(stdout, r'image/gif,\s*image/gif')
     _Check2()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_other_headers(self):
     """Tests that non-content-type headers are applied successfully on copy."""
     bucket_uri = self.CreateBucket()
@@ -481,7 +503,7 @@
     self.assertRegexpMatches(stdout, r'Cache-Control\s*:\s*public,max-age=12')
     self.assertRegexpMatches(stdout, r'Metadata:\s*1:\s*abcd')
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_versioning(self):
     """Tests copy with versioning."""
     bucket_uri = self.CreateVersionedBucket()
@@ -524,11 +546,20 @@
                             expected_status=1)
     self.assertIn('cannot be the destination for gsutil cp', stderr)
 
+  def test_versioning_no_parallelism(self):
+    """Tests that copy all-versions errors when parallelism is enabled."""
+    stderr = self.RunGsUtil(
+        ['-m', 'cp', '-A', suri(self.nonexistent_bucket_name, 'foo'),
+         suri(self.nonexistent_bucket_name, 'bar')],
+        expected_status=1, return_stderr=True)
+    self.assertIn('-m option is not supported with the cp -A flag', stderr)
+
   @SkipForS3('S3 lists versioned objects in reverse timestamp order.')
   def test_recursive_copying_versioned_bucket(self):
-    """Tests that cp -R with versioned buckets copies all versions in order."""
+    """Tests cp -R with versioned buckets."""
     bucket1_uri = self.CreateVersionedBucket()
     bucket2_uri = self.CreateVersionedBucket()
+    bucket3_uri = self.CreateVersionedBucket()
 
     # Write two versions of an object to the bucket1.
     self.CreateObject(bucket_uri=bucket1_uri, object_name='k', contents='data0')
@@ -537,9 +568,12 @@
 
     self.AssertNObjectsInBucket(bucket1_uri, 2, versioned=True)
     self.AssertNObjectsInBucket(bucket2_uri, 0, versioned=True)
+    self.AssertNObjectsInBucket(bucket3_uri, 0, versioned=True)
 
     # Recursively copy to second versioned bucket.
-    self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'), suri(bucket2_uri)])
+    # -A flag should copy all versions in order.
+    self.RunGsUtil(['cp', '-R', '-A', suri(bucket1_uri, '*'),
+                    suri(bucket2_uri)])
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -570,7 +604,31 @@
       self.assertEquals(storage_uri(uri_str2).object_name, 'k')
     _Check2()
 
-  @PerformsFileToObjectUpload
+    # Recursively copy to second versioned bucket with no -A flag.
+    # This should copy only the live object.
+    self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'),
+                    suri(bucket3_uri)])
+
+    # Use @Retry as hedge against bucket listing eventual consistency.
+    @Retry(AssertionError, tries=3, timeout_secs=1)
+    def _Check3():
+      """Validates the results of the cp -R."""
+      listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
+                                return_stdout=True).split('\n')
+      listing2 = self.RunGsUtil(['ls', '-la', suri(bucket3_uri)],
+                                return_stdout=True).split('\n')
+      # 2 lines of listing output, 1 summary line, 1 empty line from \n split.
+      self.assertEquals(len(listing1), 4)
+      # 1 lines of listing output, 1 summary line, 1 empty line from \n split.
+      self.assertEquals(len(listing2), 3)
+
+      # Live (second) object in bucket 1 should match the single live object.
+      size1, _, uri_str1, _ = listing2[0].split()
+      self.assertEquals(size1, str(len('longer_data1')))
+      self.assertEquals(storage_uri(uri_str1).object_name, 'k')
+    _Check3()
+
+  @SequentialAndParallelTransfer
   @SkipForS3('Preconditions not supported for S3.')
   def test_cp_generation_zero_match(self):
     """Tests that cp handles an object-not-exists precondition header."""
@@ -591,7 +649,7 @@
                             return_stderr=True, expected_status=1)
     self.assertIn('PreconditionException', stderr)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   @SkipForS3('Preconditions not supported for S3.')
   def test_cp_v_generation_match(self):
     """Tests that cp -v option handles the if-generation-match header."""
@@ -623,7 +681,7 @@
     self.assertIn('Specifying x-goog-if-generation-match is not supported '
                   'with cp -n', stderr)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_nv(self):
     """Tests that cp -nv works when skipping existing file."""
     bucket_uri = self.CreateVersionedBucket()
@@ -640,7 +698,7 @@
                             return_stderr=True)
     self.assertIn('Skipping existing item:', stderr)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   @SkipForS3('S3 lists versioned objects in reverse timestamp order.')
   def test_cp_v_option(self):
     """"Tests that cp -v returns the created object's version-specific URI."""
@@ -699,7 +757,7 @@
       self.assertEqual(created_uri, lines[-2])
     _Check1()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_stdin_args(self):
     """Tests cp with the -I option."""
     tmpdir = self.CreateTempDir()
@@ -844,7 +902,7 @@
       self.assertEqual(public_read_acl, new_acl_json)
     _Check()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_canned_acl_upload(self):
     """Tests uploading a file with a canned ACL."""
     bucket1_uri = self.CreateBucket()
@@ -889,7 +947,7 @@
     stdout = self.RunGsUtil(['cp', fpath, '-'], return_stdout=True)
     self.assertIn(contents, stdout)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_zero_byte_file(self):
     dst_bucket_uri = self.CreateBucket()
     src_dir = self.CreateTempDir()
@@ -909,7 +967,7 @@
     self.assertTrue(os.stat(download_path))
 
   def test_copy_bucket_to_bucket(self):
-    """Tests that recursively copying from bucket to bucket.
+    """Tests recursively copying from bucket to bucket.
 
     This should produce identically named objects (and not, in particular,
     destination objects named by the version-specific URI from source objects).
@@ -978,9 +1036,7 @@
     key_uri.set_contents_from_string('')
     self.AssertNObjectsInBucket(src_bucket_uri, 3)
 
-    stderr = self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir],
-                            return_stderr=True)
-    self.assertIn('Skipping cloud sub-directory placeholder object', stderr)
+    self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
     dir_list = []
     for dirname, _, filenames in os.walk(dst_dir):
       for filename in filenames:
@@ -1025,12 +1081,12 @@
 
   @unittest.skipIf(IS_WINDOWS,
                    'Unicode handling on Windows requires mods to site-packages')
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_manifest_upload_unicode(self):
     return self._ManifestUpload('foo-unicöde', 'bar-unicöde',
                                 'manifest-unicöde')
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_manifest_upload(self):
     """Tests uploading with a mnifest file."""
     return self._ManifestUpload('foo', 'bar', 'manifest')
@@ -1071,7 +1127,7 @@
     self.assertEqual(int(results[7]), 3)  # Bytes Transferred
     self.assertEqual(results[8], 'OK')  # Result
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_manifest_download(self):
     """Tests downloading with a manifest file."""
     key_uri = self.CreateObject(contents='foo')
@@ -1097,14 +1153,13 @@
     start_date = datetime.datetime.strptime(results[2], date_format)
     end_date = datetime.datetime.strptime(results[3], date_format)
     self.assertEqual(end_date > start_date, True)
-    self.assertEqual(results[4], 'rL0Y20zC+Fzt72VPzMSk2A==')  # md5
     self.assertEqual(int(results[6]), 3)  # Source Size
     # Bytes transferred might be more than 3 if the file was gzipped, since
     # the minimum gzip header is 10 bytes.
     self.assertGreaterEqual(int(results[7]), 3)  # Bytes Transferred
     self.assertEqual(results[8], 'OK')  # Result
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_copy_unicode_non_ascii_filename(self):
     key_uri = self.CreateObject(contents='foo')
     # Make file large enough to cause a resumable upload (which hashes filename
@@ -1122,6 +1177,7 @@
   # such files (so, failed that test). Given that, we decided to remove the
   # test.
 
+  @SequentialAndParallelTransfer
   def test_gzip_upload_and_download(self):
     bucket_uri = self.CreateBucket()
     contents = 'x' * 10000
@@ -1158,6 +1214,7 @@
     self.assertIn('Copying file:', stderr)
     self.AssertNObjectsInBucket(bucket_uri, 1)
 
+  @SequentialAndParallelTransfer
   def test_cp_object_ending_with_slash(self):
     """Tests that cp works with object names ending with slash."""
     tmpdir = self.CreateTempDir()
@@ -1214,6 +1271,7 @@
     self.RunGsUtil(['-m', 'cp', wildcard_uri, suri(bucket_uri)])
     self.AssertNObjectsInBucket(bucket_uri, num_test_files)
 
+  @SequentialAndParallelTransfer
   def test_cp_duplicate_source_args(self):
     """Tests that cp -m works when a source argument is provided twice."""
     object_contents = 'edge'
@@ -1416,7 +1474,7 @@
   @NotParallelizable
   @SkipForS3('No resumable upload support for S3.')
   @unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_cp_unwritable_tracker_file(self):
     """Tests a resumable upload with an unwritable tracker file."""
     bucket_uri = self.CreateBucket()
@@ -1443,6 +1501,7 @@
   # interferes with any parallel running tests that use the tracker directory.
   @NotParallelizable
   @unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
+  @SequentialAndParallelTransfer
   def test_cp_unwritable_tracker_file_download(self):
     """Tests downloads with an unwritable tracker file."""
     object_uri = self.CreateObject(contents='foo' * ONE_KIB)
@@ -1491,6 +1550,7 @@
                               return_stderr=True)
       self.assertIn('Resuming download', stderr)
 
+  @SequentialAndParallelTransfer
   def test_cp_resumable_download_etag_differs(self):
     """Tests that download restarts the file when the source object changes.
 
@@ -1498,7 +1558,7 @@
     """
     bucket_uri = self.CreateBucket()
     object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
-                                   contents='a' * self.halt_size)
+                                   contents='abc' * self.halt_size)
     fpath = self.CreateTempFile()
     test_callback_file = self.CreateTempFile(
         contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
@@ -1517,6 +1577,44 @@
                               return_stderr=True)
       self.assertNotIn('Resuming download', stderr)
 
+  # TODO: Enable this test for sequential downloads when their tracker files are
+  # modified to contain the source object generation.
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_resumable_download_generation_differs(self):
+    """Tests that a resumable download restarts if the generation differs."""
+    bucket_uri = self.CreateBucket()
+    file_contents = 'abcd' * self.halt_size
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents=file_contents)
+    fpath = self.CreateTempFile()
+
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '3')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('Artifically halting download.', stderr)
+
+      # Overwrite the object with an identical object, increasing
+      # the generation but leaving other metadata the same.
+      identical_file = self.CreateTempFile(contents=file_contents)
+      self.RunGsUtil(['cp', suri(identical_file), suri(object_uri)])
+
+      stderr = self.RunGsUtil(['cp', suri(object_uri), suri(fpath)],
+                              return_stderr=True)
+      self.assertIn('Restarting download from scratch', stderr)
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), file_contents, 'File contents differ')
+
   def test_cp_resumable_download_file_larger(self):
     """Tests download deletes the tracker file when existing file is larger."""
     bucket_uri = self.CreateBucket()
@@ -1531,13 +1629,12 @@
                                suri(object_uri), fpath],
                               expected_status=1, return_stderr=True)
       self.assertIn('Artifically halting download.', stderr)
-      with open(fpath, 'w') as larger_file:
+      with open(fpath + '_.gstmp', 'w') as larger_file:
         for _ in range(self.halt_size * 2):
           larger_file.write('a')
       stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
                               expected_status=1, return_stderr=True)
       self.assertNotIn('Resuming download', stderr)
-      self.assertIn('is larger', stderr)
       self.assertIn('Deleting tracker file', stderr)
 
   def test_cp_resumable_download_content_differs(self):
@@ -1550,7 +1647,11 @@
     """
     bucket_uri = self.CreateBucket()
     tmp_dir = self.CreateTempDir()
-    fpath = self.CreateTempFile(tmpdir=tmp_dir, contents='abcd' * ONE_KIB)
+    fpath = self.CreateTempFile(tmpdir=tmp_dir)
+    temp_download_file = fpath + '_.gstmp'
+    with open(temp_download_file, 'w') as fp:
+      fp.write('abcd' * ONE_KIB)
+
     object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
                                    contents='efgh' * ONE_KIB)
     stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
@@ -1569,11 +1670,13 @@
       with SetBotoConfigForTest([boto_config_for_test]):
         stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
                                 return_stderr=True, expected_status=1)
-        self.assertIn('Download already complete for file', stderr)
+        self.assertIn('Download already complete', stderr)
         self.assertIn('doesn\'t match cloud-supplied digest', stderr)
         # File and tracker file should be deleted.
-        self.assertFalse(os.path.isfile(fpath))
+        self.assertFalse(os.path.isfile(temp_download_file))
         self.assertFalse(os.path.isfile(tracker_filename))
+        # Permanent file should not have been created.
+        self.assertFalse(os.path.isfile(fpath))
     finally:
       if os.path.exists(tracker_filename):
         os.unlink(tracker_filename)
@@ -1582,8 +1685,12 @@
     """Tests download no-ops when tracker file matches existing file."""
     bucket_uri = self.CreateBucket()
     tmp_dir = self.CreateTempDir()
+    fpath = self.CreateTempFile(tmpdir=tmp_dir)
     matching_contents = 'abcd' * ONE_KIB
-    fpath = self.CreateTempFile(tmpdir=tmp_dir, contents=matching_contents)
+    temp_download_file = fpath + '_.gstmp'
+    with open(temp_download_file, 'w') as fp:
+      fp.write(matching_contents)
+
     object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
                                    contents=matching_contents)
     stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
@@ -1601,7 +1708,7 @@
       with SetBotoConfigForTest([boto_config_for_test]):
         stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
                                 return_stderr=True)
-        self.assertIn('Download already complete for file', stderr)
+        self.assertIn('Download already complete', stderr)
         # Tracker file should be removed after successful hash validation.
         self.assertFalse(os.path.isfile(tracker_filename))
     finally:
@@ -1643,6 +1750,7 @@
       if os.path.exists(tracker_filename):
         os.unlink(tracker_filename)
 
+  @SequentialAndParallelTransfer
   def test_cp_resumable_download_gzip(self):
     """Tests that download can be resumed successfully with a gzipped file."""
     # Generate some reasonably incompressible data.  This compresses to a bit
@@ -1683,12 +1791,26 @@
                                suri(object_uri), suri(fpath2)],
                               return_stderr=True, expected_status=1)
       self.assertIn('Artifically halting download.', stderr)
-      tracker_filename = GetTrackerFilePath(
-          StorageUrlFromString(fpath2), TrackerFileType.DOWNLOAD, self.test_api)
-      self.assertTrue(os.path.isfile(tracker_filename))
       self.assertIn('Downloading to temp gzip filename', stderr)
+
+      # Tracker files will have different names depending on if we are
+      # downloading sequentially or in parallel.
+      sliced_download_threshold = HumanReadableToBytes(
+          boto.config.get('GSUtil', 'sliced_object_download_threshold',
+                          DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
+      sliced_download = (len(contents) > sliced_download_threshold
+                         and sliced_download_threshold > 0
+                         and UsingCrcmodExtension(crcmod))
+      if sliced_download:
+        trackerfile_type = TrackerFileType.SLICED_DOWNLOAD
+      else:
+        trackerfile_type = TrackerFileType.DOWNLOAD
+      tracker_filename = GetTrackerFilePath(
+          StorageUrlFromString(fpath2), trackerfile_type, self.test_api)
+
       # We should have a temporary gzipped file, a tracker file, and no
       # final file yet.
+      self.assertTrue(os.path.isfile(tracker_filename))
       self.assertTrue(os.path.isfile('%s_.gztmp' % fpath2))
       stderr = self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)],
                               return_stderr=True)
@@ -1698,6 +1820,31 @@
       self.assertFalse(os.path.isfile(tracker_filename))
       self.assertFalse(os.path.isfile('%s_.gztmp' % fpath2))
 
+  @SequentialAndParallelTransfer
+  def test_cp_resumable_download_check_hashes_never(self):
+    """Tests that resumble downloads work with check_hashes = never."""
+    bucket_uri = self.CreateBucket()
+    contents = 'abcd' * self.halt_size
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents=contents)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
+                            ('GSUtil', 'check_hashes', 'never')]
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), fpath],
+                              expected_status=1, return_stderr=True)
+      self.assertIn('Artifically halting download.', stderr)
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True)
+      self.assertIn('Resuming download', stderr)
+      self.assertIn('Found no hashes to validate object downloaded', stderr)
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), contents, 'File contents did not match.')
+
   @SkipForS3('No resumable upload support for S3.')
   def test_cp_resumable_upload_bucket_deleted(self):
     """Tests that a not found exception is raised if bucket no longer exists."""
@@ -1715,6 +1862,312 @@
     self.assertIn('Deleting bucket', stderr)
     self.assertIn('bucket does not exist', stderr)
 
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download(self):
+    """Tests that sliced object download works in the general case."""
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abc' * ONE_KIB)
+    fpath = self.CreateTempFile()
+
+    # Force fast crcmod to return True to test the basic sliced download
+    # scenario, ensuring that if the user installs crcmod, it will work.
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(ONE_KIB)),
+        ('GSUtil', 'test_assume_fast_crcmod', 'True'),
+        ('GSUtil', 'sliced_object_download_threshold', str(ONE_KIB)),
+        ('GSUtil', 'sliced_object_download_max_components', '3')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      self.RunGsUtil(['cp', suri(object_uri), fpath])
+
+      # Each tracker file should have been deleted.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), 'abc' * ONE_KIB, 'File contents differ')
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_unresumable_sliced_download(self):
+    """Tests sliced download works when resumability is disabled."""
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abcd' * self.halt_size)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size*5)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '4')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+      # Temporary download file should exist.
+      self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
+
+      # No tracker files should exist.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+
+    # Perform the entire download, without resuming.
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', suri(object_uri), suri(fpath)],
+                              return_stderr=True)
+      self.assertNotIn('Resuming download', stderr)
+      # Temporary download file should have been deleted.
+      self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), 'abcd' * self.halt_size,
+                         'File contents differ')
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download_resume(self):
+    """Tests that sliced object download is resumable."""
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abc' * self.halt_size)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '3')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+
+      # Each tracker file should exist.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertTrue(os.path.isfile(tracker_filename))
+
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True)
+      self.assertIn('Resuming download', stderr)
+
+      # Each tracker file should have been deleted.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), 'abc' * self.halt_size,
+                         'File contents differ')
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download_partial_resume(self):
+    """Test sliced download resumability when some components are finished."""
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abc' * self.halt_size)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltOneComponentCopyCallbackHandler(5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '3')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+
+      # Each tracker file should exist.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertTrue(os.path.isfile(tracker_filename))
+
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True)
+      self.assertIn('Resuming download', stderr)
+      self.assertIn('Download already complete', stderr)
+
+      # Each tracker file should have been deleted.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), 'abc' * self.halt_size,
+                         'File contents differ')
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download_resume_content_differs(self):
+    """Tests differing file contents are detected by sliced downloads."""
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abc' * self.halt_size)
+    fpath = self.CreateTempFile(contents='')
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '3')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+
+      # Temporary download file should exist.
+      self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
+
+      # Each tracker file should exist.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertTrue(os.path.isfile(tracker_filename))
+
+      with open(fpath + '_.gstmp', 'r+b') as f:
+        f.write('altered file contents')
+
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('Resuming download', stderr)
+      self.assertIn('doesn\'t match cloud-supplied digest', stderr)
+      self.assertIn('HashMismatchException: crc32c', stderr)
+
+      # Each tracker file should have been deleted.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+
+      # Temporary file should have been deleted due to hash mismatch.
+      self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
+      # Final file should not exist.
+      self.assertFalse(os.path.isfile(fpath))
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download_component_size_changed(self):
+    """Tests sliced download doesn't break when the boto config changes.
+
+    If the number of components used changes cross-process, the download should
+    be restarted.
+    """
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abcd' * self.halt_size)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_component_size',
+         str(self.halt_size//4)),
+        ('GSUtil', 'sliced_object_download_max_components', '4')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_component_size',
+         str(self.halt_size//2)),
+        ('GSUtil', 'sliced_object_download_max_components', '2')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True)
+      self.assertIn('Sliced download tracker file doesn\'t match ', stderr)
+      self.assertIn('Restarting download from scratch', stderr)
+      self.assertNotIn('Resuming download', stderr)
+
+  @unittest.skipUnless(UsingCrcmodExtension(crcmod),
+                       'Sliced download requires fast crcmod.')
+  @SkipForS3('No sliced download support for S3.')
+  def test_cp_sliced_download_disabled_cross_process(self):
+    """Tests temporary files are not orphaned if sliced download is disabled.
+
+    Specifically, temporary files should be deleted when the corresponding
+    non-sliced download is completed.
+    """
+    bucket_uri = self.CreateBucket()
+    object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
+                                   contents='abcd' * self.halt_size)
+    fpath = self.CreateTempFile()
+    test_callback_file = self.CreateTempFile(
+        contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
+
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_max_components', '4')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
+                               suri(object_uri), suri(fpath)],
+                              return_stderr=True, expected_status=1)
+      self.assertIn('not downloaded successfully', stderr)
+      # Temporary download file should exist.
+      self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
+
+      # Each tracker file should exist.
+      tracker_filenames = GetSlicedDownloadTrackerFilePaths(
+          StorageUrlFromString(fpath), self.test_api)
+      for tracker_filename in tracker_filenames:
+        self.assertTrue(os.path.isfile(tracker_filename))
+
+    # Disable sliced downloads by increasing the threshold
+    boto_config_for_test = [
+        ('GSUtil', 'resumable_threshold', str(self.halt_size)),
+        ('GSUtil', 'sliced_object_download_threshold', str(self.halt_size*5)),
+        ('GSUtil', 'sliced_object_download_max_components', '4')]
+
+    with SetBotoConfigForTest(boto_config_for_test):
+      stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
+                              return_stderr=True)
+      self.assertNotIn('Resuming download', stderr)
+      # Temporary download file should have been deleted.
+      self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
+
+      # Each tracker file should have been deleted.
+      for tracker_filename in tracker_filenames:
+        self.assertFalse(os.path.isfile(tracker_filename))
+      with open(fpath, 'r') as f:
+        self.assertEqual(f.read(), 'abcd' * self.halt_size)
+
   @SkipForS3('No resumable upload support for S3.')
   def test_cp_resumable_upload_start_over_http_error(self):
     for start_over_error in (404, 410):
diff --git a/catapult/third_party/gsutil/gslib/tests/test_defacl.py b/catapult/third_party/gsutil/gslib/tests/test_defacl.py
index f933955..49cafe7 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_defacl.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_defacl.py
@@ -16,8 +16,9 @@
 
 from __future__ import absolute_import
 
-import os
 import re
+
+from gslib.cs_api_map import ApiSelector
 import gslib.tests.testcase as case
 from gslib.tests.testcase.integration_testcase import SkipForS3
 from gslib.tests.util import ObjectToURI as suri
@@ -68,20 +69,37 @@
                             return_stderr=True, expected_status=1)
     self.assertIn('WRITER cannot be set as a default object ACL', stderr)
 
-  def testChangeDefaultAclPrivate(self):
+  def testChangeDefaultAclEmpty(self):
+    """Tests adding and removing an entry from an empty default object ACL."""
+
     bucket = self.CreateBucket()
-    test_regex = self._MakeScopeRegex(
-        'READER', 'group', self.GROUP_TEST_ADDRESS)
+
+    # First, clear out the default object ACL on the bucket.
     self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
     json_text = self.RunGsUtil(self._defacl_get_prefix +
                                [suri(bucket)], return_stdout=True)
-    self.assertRegexpMatches(json_text, r'\[\]\s*')
+    empty_regex = r'\[\]\s*'
+    self.assertRegexpMatches(json_text, empty_regex)
 
+    group_regex = self._MakeScopeRegex(
+        'READER', 'group', self.GROUP_TEST_ADDRESS)
     self.RunGsUtil(self._defacl_ch_prefix +
                    ['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
     json_text2 = self.RunGsUtil(self._defacl_get_prefix +
                                 [suri(bucket)], return_stdout=True)
-    self.assertRegexpMatches(json_text2, test_regex)
+    self.assertRegexpMatches(json_text2, group_regex)
+
+    if self.test_api == ApiSelector.JSON:
+      # TODO: Enable when JSON service respects creating a private (no entries)
+      # default object ACL via PATCH. For now, only supported in XML.
+      return
+
+    # After adding and removing a group, the default object ACL should be empty.
+    self.RunGsUtil(self._defacl_ch_prefix +
+                   ['-d', self.GROUP_TEST_ADDRESS, suri(bucket)])
+    json_text3 = self.RunGsUtil(self._defacl_get_prefix +
+                                [suri(bucket)], return_stdout=True)
+    self.assertRegexpMatches(json_text3, empty_regex)
 
   def testChangeMultipleBuckets(self):
     """Tests defacl ch on multiple buckets."""
diff --git a/catapult/third_party/gsutil/gslib/tests/test_hash.py b/catapult/third_party/gsutil/gslib/tests/test_hash.py
index 8d1da4f..033c5f9 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_hash.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_hash.py
@@ -41,7 +41,8 @@
       self.RunCommand('hash', args=['non-existent-file'])
       self.fail('Did not get expected CommandException')
     except CommandException, e:
-      self.assertRaisesRegexp(e, r'No files matched')
+      # assertRaisesRegexp causes issues with python 2.6.
+      self.assertIn('No files matched', e.reason)
 
   def testHashCloudObject(self):
     try:
diff --git a/catapult/third_party/gsutil/gslib/tests/test_mv.py b/catapult/third_party/gsutil/gslib/tests/test_mv.py
index 4c33d1e..035a900 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_mv.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_mv.py
@@ -20,7 +20,7 @@
 
 import gslib.tests.testcase as testcase
 from gslib.tests.util import ObjectToURI as suri
-from gslib.tests.util import PerformsFileToObjectUpload
+from gslib.tests.util import SequentialAndParallelTransfer
 from gslib.util import Retry
 
 
@@ -77,7 +77,7 @@
     self.RunGsUtil(['mv', dir_to_move, suri(bucket_uri)])
     self.AssertNObjectsInBucket(bucket_uri, 2)
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def test_stdin_args(self):
     """Tests mv with the -I option."""
     tmpdir = self.CreateTempDir()
diff --git a/catapult/third_party/gsutil/gslib/tests/test_naming.py b/catapult/third_party/gsutil/gslib/tests/test_naming.py
index b486608..e228d0e 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_naming.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_naming.py
@@ -39,7 +39,6 @@
 from gslib.cloud_api import NotFoundException
 from gslib.cloud_api import ServiceException
 from gslib.exception import CommandException
-from gslib.exception import HashMismatchException
 from gslib.storage_url import StorageUrlFromString
 import gslib.tests.testcase as testcase
 from gslib.tests.util import ObjectToURI as suri
@@ -61,7 +60,7 @@
   fp.flush()
 
 
-# TODO: Re-enable PerformsFileToObjectUpload decorator on tests in this file
+# TODO: Re-enable SequentialAndParallelTransfer decorator on tests in this file
 # once we refactor to a thread-safe mock storage service implementation.
 class GsutilNamingTests(testcase.GsUtilUnitTestCase):
   """Unit tests for gsutil naming logic."""
@@ -87,7 +86,7 @@
                      copy_helper.GetPathBeforeFinalDir(
                          StorageUrlFromString(suri(subdir))))
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingTopLevelFileToBucket(self):
     """Tests copying one top-level file to a bucket."""
     src_file = self.CreateTempFile(file_name='f0')
@@ -98,7 +97,7 @@
     self.assertEqual(1, len(actual))
     self.assertEqual('f0', actual[0].root_object.name)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingMultipleFilesToBucket(self):
     """Tests copying multiple files to a bucket."""
     src_file0 = self.CreateTempFile(file_name='f0')
@@ -113,7 +112,7 @@
     ])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingNestedFileToBucketSubdir(self):
     """Tests copying a nested file to a bucket subdir.
 
@@ -136,7 +135,7 @@
     ])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingAbsolutePathDirToBucket(self):
     """Tests recursively copying absolute path directory to a bucket."""
     dst_bucket_uri = self.CreateBucket()
@@ -153,7 +152,7 @@
         suri(dst_bucket_uri, src_tmpdir, 'dir0', 'dir1', 'nested')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingRelativePathDirToBucket(self):
     """Tests recursively copying relative directory to a bucket."""
     dst_bucket_uri = self.CreateBucket()
@@ -164,15 +163,15 @@
     expected = set([suri(dst_bucket_uri, 'dir0', 'f1')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingRelPathSubDirToBucketSubdirWithDollarFolderObj(self):
     """Tests recursively copying relative sub-directory to bucket subdir.
 
     Subdir is signified by a $folder$ object.
     """
-    # Create a $folder$ object to simulate a folder created by GCS manager (or
-    # various other tools), which gsutil understands to mean there is a folder
-    # into which the object is being copied.
+    # Create a $folder$ object to simulate a folder created by the legacy GCS
+    # console (or various other tools), which gsutil understands to mean there
+    # is a folder into which the object is being copied.
     dst_bucket_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=dst_bucket_uri, object_name='abc_$folder$',
                       contents='')
@@ -185,7 +184,7 @@
                     suri(dst_bucket_uri, 'abc', 'dir1', 'f1')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingRelativePathSubDirToBucketSubdirSignifiedBySlash(self):
     """Tests recursively copying relative sub-directory to bucket subdir.
 
@@ -200,7 +199,7 @@
     expected = set([suri(dst_bucket_uri, 'abc', 'dir1', 'f1')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingRelativePathSubDirToBucket(self):
     """Tests recursively copying relative sub-directory to a bucket."""
     dst_bucket_uri = self.CreateBucket()
@@ -212,7 +211,7 @@
     expected = set([suri(dst_bucket_uri, 'dir1', 'f1')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingDotSlashToBucket(self):
     """Tests copying ./ to a bucket produces expected naming."""
     # When running a command like gsutil cp -r . gs://dest we expect the dest
@@ -227,7 +226,7 @@
       expected = set([suri(dst_bucket_uri, 'foo')])
       self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingDirContainingOneFileToBucket(self):
     """Tests copying a directory containing 1 file to a bucket.
 
@@ -301,7 +300,7 @@
     self.assertEqual(1, len(actual))
     self.assertEqual(suri(dst_dir, 'foo'), str(actual[0]))
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingFileToObjectWithConsecutiveSlashes(self):
     """Tests copying a file to an object containing consecutive slashes."""
     src_file = self.CreateTempFile(file_name='f0')
@@ -386,7 +385,7 @@
       expected = set([os.path.join(dst_dir, 'f1')])
       self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingObjsAndFilesToBucket(self):
     """Tests copying objects and files to a bucket."""
     src_bucket_uri = self.CreateBucket(test_objects=['f1'])
@@ -399,7 +398,7 @@
     expected = set([suri(dst_bucket_uri, 'f1'), suri(dst_bucket_uri, 'f2')])
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingSubdirRecursiveToNonexistentSubdir(self):
     """Tests copying a directory with a single file recursively to a bucket.
 
@@ -639,7 +638,7 @@
     except CommandException, e:
       self.assertIn('URL must name a bucket', e.reason)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testMinusDOptionWorks(self):
     """Tests using gsutil -D option."""
     src_file = self.CreateTempFile(file_name='f0')
@@ -650,47 +649,7 @@
     self.assertEqual(1, len(actual))
     self.assertEqual('f0', actual[0].root_object.name)
 
-  def DownloadTestHelper(self, func):
-    """Test resumable download with custom test function.
-
-    The custom function distorts downloaded data. We expect an exception to be
-    raised and the dest file to be removed.
-
-    Args:
-      func: Custom test function used to distort the downloaded data.
-    """
-    object_uri = self.CreateObject(contents='foo')
-    # Need to explicitly tell the key to populate its etag so that hash
-    # validation will be performed.
-    object_uri.get_key().set_etag()
-    dst_dir = self.CreateTempDir()
-    got_expected_exception = False
-    try:
-      self.RunCommand('cp', [suri(object_uri), dst_dir], test_method=func)
-      self.fail('Did not get expected CommandException')
-    except HashMismatchException:
-      self.assertFalse(os.listdir(dst_dir))
-      got_expected_exception = True
-    except Exception, e:
-      self.fail('Unexpected exception raised: %s' % e)
-    if not got_expected_exception:
-      self.fail('Did not get expected CommandException')
-
-  def testDownloadWithObjectSizeChange(self):
-    """Test resumable download on an object that changes size.
-
-    Size change occurs before the downloaded file's checksum is validated.
-    """
-    self.DownloadTestHelper(_Append)
-
-  def testDownloadWithFileContentChange(self):
-    """Tests resumable download on an object that changes content.
-
-    Content change occurs before the downloaded file's checksum is validated.
-    """
-    self.DownloadTestHelper(_Overwrite)
-
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testFlatCopyingObjsAndFilesToBucketSubDir(self):
     """Tests copying flatly listed objects and files to bucket subdir."""
     src_bucket_uri = self.CreateBucket(test_objects=['f0', 'd0/f1', 'd1/d2/f2'])
@@ -713,7 +672,7 @@
         expected.add(suri(dst_bucket_uri, 'dst_subdir%d' % i, 'f%d' % j))
     self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testRecursiveCopyObjsAndFilesToExistingBucketSubDir(self):
     """Tests recursive copy of objects and files to existing bucket subdir."""
     src_bucket_uri = self.CreateBucket(test_objects=['f0', 'nested/f1'])
@@ -739,7 +698,7 @@
       ])
       self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testRecursiveCopyObjsAndFilesToNonExistentBucketSubDir(self):
     """Tests recursive copy of objs + files to non-existent bucket subdir."""
     src_bucket_uri = self.CreateBucket(test_objects=['f0', 'nested/f1'])
@@ -891,7 +850,7 @@
                       suri(dst_bucket_uri, 'dir%d' % i, 'existing')])
       self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingWildcardedFilesToBucketSubDir(self):
     """Tests copying wildcarded files to a bucket subdir."""
     dst_bucket_uri = self.CreateBucket(test_objects=['subdir0/existing',
@@ -911,7 +870,7 @@
                       suri(dst_bucket_uri, 'subdir%d' % i, 'f2')])
       self.assertEqual(expected, actual)
 
-  # @PerformsFileToObjectUpload
+  # @SequentialAndParallelTransfer
   def testCopyingOneNestedFileToBucketSubDir(self):
     """Tests copying one nested file to a bucket subdir."""
     dst_bucket_uri = self.CreateBucket(test_objects=['d0/placeholder',
@@ -1130,16 +1089,6 @@
     except ServiceException, e:
       self.assertEqual(e.status, 409)
 
-  def testRemoveBucketsCommand(self):
-    """Test rb on non-existent bucket."""
-    dst_bucket_uri = self.CreateBucket()
-    try:
-      self.RunCommand(
-          'rb', ['gs://no_exist_%s' % dst_bucket_uri.bucket_name])
-      self.fail('Did not get expected NotFoundException')
-    except NotFoundException, e:
-      self.assertEqual(e.status, 404)
-
   def testRemoveObjsCommand(self):
     """Test rm command on non-existent object."""
     dst_bucket_uri = self.CreateBucket()
diff --git a/catapult/third_party/gsutil/gslib/tests/test_parallel_cp.py b/catapult/third_party/gsutil/gslib/tests/test_parallel_cp.py
index f1958c4..4563a17 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_parallel_cp.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_parallel_cp.py
@@ -36,14 +36,14 @@
 
 import gslib.tests.testcase as testcase
 from gslib.tests.util import ObjectToURI as suri
-from gslib.tests.util import PerformsFileToObjectUpload
+from gslib.tests.util import SequentialAndParallelTransfer
 from gslib.util import Retry
 
 
 class TestParallelCp(testcase.GsUtilIntegrationTestCase):
   """Unit tests for gsutil naming logic."""
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingTopLevelFileToBucket(self):
     """Tests copying one top-level file to a bucket."""
     src_file = self.CreateTempFile(file_name='f0')
@@ -53,7 +53,7 @@
     lines = self.AssertNObjectsInBucket(dst_bucket_uri, 1)
     self.assertEqual(suri(dst_bucket_uri, 'f0'), lines[0])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingMultipleFilesToBucket(self):
     """Tests copying multiple files to a bucket."""
     src_file0 = self.CreateTempFile(file_name='f0')
@@ -65,7 +65,7 @@
     self.assertEqual(suri(dst_bucket_uri, 'f0'), lines[0])
     self.assertEqual(suri(dst_bucket_uri, 'f1'), lines[1])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingNestedFileToBucketSubdir(self):
     """Tests copying a nested file to a bucket subdir.
 
@@ -85,7 +85,7 @@
     self.assertEqual(suri(dst_bucket_uri, 'subdir/a'), lines[0])
     self.assertEqual(suri(dst_bucket_uri, 'subdir/obj'), lines[1])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingAbsolutePathDirToBucket(self):
     """Tests recursively copying absolute path directory to a bucket."""
     dst_bucket_uri = self.CreateBucket()
@@ -101,7 +101,7 @@
     self.assertEqual(suri(dst_bucket_uri, src_tmpdir, 'f1'), lines[2])
     self.assertEqual(suri(dst_bucket_uri, src_tmpdir, 'f2.txt'), lines[3])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingDirContainingOneFileToBucket(self):
     """Tests copying a directory containing 1 file to a bucket.
 
@@ -116,7 +116,7 @@
     lines = self.AssertNObjectsInBucket(dst_bucket_uri, 1)
     self.assertEqual(suri(dst_bucket_uri, 'dir1', 'foo'), lines[0])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingFileToObjectWithConsecutiveSlashes(self):
     """Tests copying a file to an object containing consecutive slashes."""
     src_file = self.CreateTempFile(file_name='f0')
@@ -126,7 +126,7 @@
     lines = self.AssertNObjectsInBucket(dst_bucket_uri, 1)
     self.assertEqual(suri(dst_bucket_uri) + '//obj', lines[0])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingObjsAndFilesToBucket(self):
     """Tests copying objects and files to a bucket."""
     src_bucket_uri = self.CreateBucket()
@@ -140,7 +140,7 @@
     self.assertEqual(suri(dst_bucket_uri, 'f1'), lines[0])
     self.assertEqual(suri(dst_bucket_uri, 'f2'), lines[1])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingSubdirRecursiveToNonexistentSubdir(self):
     """Tests copying a directory with a single file recursively to a bucket.
 
@@ -161,7 +161,7 @@
     lines = self.AssertNObjectsInBucket(dst_bucket_uri, 1)
     self.assertEqual(suri(dst_bucket_uri, 'dir3/dir2/foo'), lines[0])
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingWildcardedFilesToBucketSubDir(self):
     """Tests copying wildcarded files to a bucket subdir."""
     # Test with and without final slash on dest subdir.
@@ -193,7 +193,7 @@
           self.assertEqual(suri(dst_bucket_uri, 'subdir%d' % i, 'f2'), lines[3])
         _Check1()
 
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   def testCopyingOneNestedFileToBucketSubDir(self):
     """Tests copying one nested file to a bucket subdir."""
     # Test with and without final slash on dest subdir.
diff --git a/catapult/third_party/gsutil/gslib/tests/test_parallelism_framework.py b/catapult/third_party/gsutil/gslib/tests/test_parallelism_framework.py
index e895c16..b3b9f46 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_parallelism_framework.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_parallelism_framework.py
@@ -24,7 +24,10 @@
 from __future__ import absolute_import
 
 import functools
+import os
 import signal
+import threading
+import time
 
 from boto.storage_uri import BucketStorageUri
 from gslib import cs_api_map
@@ -34,8 +37,8 @@
 import gslib.tests.testcase as testcase
 from gslib.tests.testcase.base import RequiresIsolation
 from gslib.tests.util import unittest
+from gslib.util import CheckMultiprocessingAvailableAndInit
 from gslib.util import IS_WINDOWS
-from gslib.util import MultiprocessingIsAvailable
 
 
 # Amount of time for an individual test to run before timing out. We need a
@@ -74,6 +77,18 @@
   return 1
 
 
+def _ReturnProcAndThreadId(cls, args, thread_state=None):
+  return (os.getpid(), threading.currentThread().ident)
+
+
+def _SleepThenReturnProcAndThreadId(cls, args, thread_state=None):
+  # This can fail if the total time to spawn new processes and threads takes
+  # longer than 5 seconds, but if that occurs, then we have a performance
+  # problem that needs to be addressed.
+  time.sleep(5)
+  return _ReturnProcAndThreadId(cls, args, thread_state=thread_state)
+
+
 def _FailureFunc(cls, args, thread_state=None):
   raise CustomException('Failing on purpose.')
 
@@ -214,7 +229,8 @@
     self.logger = CreateGsutilLogger('FakeCommand')
     self.parallel_operations = do_parallel
     self.failure_count = 0
-    self.multiprocessing_is_available = MultiprocessingIsAvailable()[0]
+    self.multiprocessing_is_available = (
+        CheckMultiprocessingAvailableAndInit().is_available)
     self.debug = 0
 
 
@@ -272,6 +288,75 @@
     self.assertEqual(len(args), len(results))
 
   @RequiresIsolation
+  def testNoTasksSingleProcessSingleThread(self):
+    self._TestApplyWithNoTasks(1, 1)
+
+  @RequiresIsolation
+  def testNoTasksSingleProcessMultiThread(self):
+    self._TestApplyWithNoTasks(1, 3)
+
+  @RequiresIsolation
+  @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
+  def testNoTasksMultiProcessSingleThread(self):
+    self._TestApplyWithNoTasks(3, 1)
+
+  @RequiresIsolation
+  @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
+  def testNoTasksMultiProcessMultiThread(self):
+    self._TestApplyWithNoTasks(3, 3)
+
+  @Timeout
+  def _TestApplyWithNoTasks(self, process_count, thread_count):
+    """Tests that calling Apply with no tasks releases locks/semaphores."""
+    empty_args = [()]
+
+    for _ in range(process_count * thread_count + 1):
+      self._RunApply(_ReturnOneValue, empty_args, process_count, thread_count)
+
+    # Ensure that work can still be performed.
+    self._TestBasicApply(process_count, thread_count)
+
+  @RequiresIsolation
+  @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
+  def testApplySaturatesMultiProcessSingleThread(self):
+    self._TestApplySaturatesAvailableProcessesAndThreads(3, 1)
+
+  @RequiresIsolation
+  def testApplySaturatesSingleProcessMultiThread(self):
+    self._TestApplySaturatesAvailableProcessesAndThreads(1, 3)
+
+  @RequiresIsolation
+  @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
+  def testApplySaturatesMultiProcessMultiThread(self):
+    self._TestApplySaturatesAvailableProcessesAndThreads(3, 3)
+
+  @RequiresIsolation
+  def _TestApplySaturatesAvailableProcessesAndThreads(self, process_count,
+                                                      thread_count):
+    """Tests that created processes and threads evenly share tasks."""
+    calls_per_thread = 2
+    args = [()] * (process_count * thread_count * calls_per_thread)
+    expected_calls_per_thread = calls_per_thread
+
+    if not self.command_class(True).multiprocessing_is_available:
+      # When multiprocessing is unavailable, only a single process is used.
+      # Calls should be evenly distributed across threads.
+      expected_calls_per_thread = calls_per_thread * process_count
+
+    results = self._RunApply(_SleepThenReturnProcAndThreadId, args,
+                             process_count, thread_count)
+    usage_dict = {}  # (process_id, thread_id): number of tasks performed
+    for (process_id, thread_id) in results:
+      usage_dict[(process_id, thread_id)] = (
+          usage_dict.get((process_id, thread_id), 0) + 1)
+
+    for (id_tuple, num_tasks_completed) in usage_dict.iteritems():
+      self.assertEqual(num_tasks_completed, expected_calls_per_thread,
+                       'Process %s thread %s completed %s tasks. Expected: %s' %
+                       (id_tuple[0], id_tuple[1], num_tasks_completed,
+                        expected_calls_per_thread))
+
+  @RequiresIsolation
   def testIteratorFailureSingleProcessSingleThread(self):
     self._TestIteratorFailure(1, 1)
 
@@ -353,12 +438,22 @@
       expected_sum += len(arg)
     self.assertEqual(expected_sum, command_inst.arg_length_sum)
 
-    # Test that shared variables work when the iterator fails.
-    command_inst = self.command_class(True)
-    args = FailingIterator(10, [1, 3, 5])
-    self._RunApply(_ReturnOneValue, args, process_count, thread_count,
-                   command_inst=command_inst, shared_attrs=['failure_count'])
-    self.assertEqual(3, command_inst.failure_count)
+    # Test that shared variables work when the iterator fails at the beginning,
+    # middle, and end.
+    for (failing_iterator, expected_failure_count) in (
+        (FailingIterator(5, [0]), 1),
+        (FailingIterator(10, [1, 3, 5]), 3),
+        (FailingIterator(5, [4]), 1)):
+      command_inst = self.command_class(True)
+      args = failing_iterator
+      self._RunApply(_ReturnOneValue, args, process_count, thread_count,
+                     command_inst=command_inst, shared_attrs=['failure_count'])
+      self.assertEqual(
+          expected_failure_count, command_inst.failure_count,
+          msg='Failure count did not match. Expected: %s, actual: %s '
+          'for failing iterator of size %s, failing indices %s' %
+          (expected_failure_count, command_inst.failure_count,
+           failing_iterator.size, failing_iterator.failure_indices))
 
   @RequiresIsolation
   def testThreadsSurviveExceptionsInFuncSingleProcessSingleThread(self):
@@ -564,11 +659,11 @@
   """Tests parallelism framework works with multiprocessing module unavailable.
 
   Notably, this test has no way to override previous calls
-  to gslib.util.MultiprocessingIsAvailable to prevent the initialization of
-  all of the global variables in command.py, so this still behaves slightly
-  differently than the behavior one would see on a machine where the
-  multiprocessing functionality is actually not available (in particular, it
-  will not catch the case where a global variable that is not available for
-  the sequential path is referenced before initialization).
+  to gslib.util.CheckMultiprocessingAvailableAndInit to prevent the
+  initialization of all of the global variables in command.py, so this still
+  behaves slightly differently than the behavior one would see on a machine
+  where the multiprocessing functionality is actually not available (in
+  particular, it will not catch the case where a global variable that is not
+  available for the sequential path is referenced before initialization).
   """
   command_class = FakeCommandWithoutMultiprocessingModule
diff --git a/catapult/third_party/gsutil/gslib/tests/test_perfdiag.py b/catapult/third_party/gsutil/gslib/tests/test_perfdiag.py
index 0f0409d..a60155c 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_perfdiag.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_perfdiag.py
@@ -19,8 +19,11 @@
 import os
 import socket
 
+import boto
+
 import gslib.tests.testcase as testcase
 from gslib.tests.util import ObjectToURI as suri
+from gslib.tests.util import RUN_S3_TESTS
 from gslib.tests.util import unittest
 from gslib.util import IS_WINDOWS
 
@@ -30,21 +33,23 @@
 
   # We want to test that perfdiag works both when connecting to the standard gs
   # endpoint, and when connecting to a specific IP or host while setting the
-  # host header. For the 2nd case we resolve storage.googleapis.com to a
-  # specific IP and connect to that explicitly.
-  _gs_ip = socket.gethostbyname('storage.googleapis.com')
+  # host header. For the 2nd case we resolve gs_host (normally
+  # storage.googleapis.com) to a specific IP and connect to that explicitly.
+  _gs_host = boto.config.get(
+      'Credentials', 'gs_host', boto.gs.connection.GSConnection.DefaultHost)
+  _gs_ip = socket.gethostbyname(_gs_host)
   _custom_endpoint_flags = [
       '-o', 'Credentials:gs_host=' + _gs_ip,
-      '-o', 'Credentials:gs_host_header=storage.googleapis.com',
+      '-o', 'Credentials:gs_host_header=' + _gs_host,
       # TODO: gsutil-beta: Add host header support for JSON
       '-o', 'Boto:https_validate_certificates=False']
 
   def _should_run_with_custom_endpoints(self):
     # Host headers are only supported for XML, and not when
     # using environment variables for proxies.
-    return self.test_api == 'XML' and not (os.environ.get('http_proxy') or
-                                           os.environ.get('https_proxy') or
-                                           os.environ.get('HTTPS_PROXY'))
+    return (self.test_api == 'XML' and not RUN_S3_TESTS and not
+            (os.environ.get('http_proxy') or os.environ.get('https_proxy') or
+             os.environ.get('HTTPS_PROXY')))
 
   def test_latency(self):
     bucket_uri = self.CreateBucket()
@@ -54,43 +59,73 @@
       self.RunGsUtil(self._custom_endpoint_flags + cmd)
     self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
 
-  def _run_basic_wthru_or_rthru(self, test_name, num_processes, num_threads):
+  def _run_throughput_test(self, test_name, num_processes, num_threads,
+                           parallelism_strategy=None):
     bucket_uri = self.CreateBucket()
+
     cmd = ['perfdiag', '-n', str(num_processes * num_threads),
-           '-s', '1024', '-c', str(num_processes),
-           '-k', str(num_threads), '-t', test_name, suri(bucket_uri)]
+           '-s', '1024', '-c', str(num_processes), '-k', str(num_threads),
+           '-t', test_name]
+    if parallelism_strategy:
+      cmd += ['-p', parallelism_strategy]
+    cmd += [suri(bucket_uri)]
+
     self.RunGsUtil(cmd)
     if self._should_run_with_custom_endpoints():
       self.RunGsUtil(self._custom_endpoint_flags + cmd)
     self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
 
+  def _run_each_parallel_throughput_test(self, test_name, num_processes,
+                                         num_threads):
+    self._run_throughput_test(test_name, num_processes, num_threads, 'fan')
+    if not RUN_S3_TESTS:
+      self._run_throughput_test(test_name, num_processes, num_threads, 'slice')
+      self._run_throughput_test(test_name, num_processes, num_threads, 'both')
+
   def test_write_throughput_single_process_single_thread(self):
-    self._run_basic_wthru_or_rthru('wthru', 1, 1)
+    self._run_throughput_test('wthru', 1, 1)
+    self._run_throughput_test('wthru_file', 1, 1)
 
   def test_write_throughput_single_process_multi_thread(self):
-    self._run_basic_wthru_or_rthru('wthru', 1, 2)
+    self._run_each_parallel_throughput_test('wthru', 1, 2)
+    self._run_each_parallel_throughput_test('wthru_file', 1, 2)
 
   @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
   def test_write_throughput_multi_process_single_thread(self):
-    self._run_basic_wthru_or_rthru('wthru', 2, 1)
+    self._run_each_parallel_throughput_test('wthru', 2, 1)
+    self._run_each_parallel_throughput_test('wthru_file', 2, 1)
 
   @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
   def test_write_throughput_multi_process_multi_thread(self):
-    self._run_basic_wthru_or_rthru('wthru', 2, 2)
+    self._run_each_parallel_throughput_test('wthru', 2, 2)
+    self._run_each_parallel_throughput_test('wthru_file', 2, 2)
 
   def test_read_throughput_single_process_single_thread(self):
-    self._run_basic_wthru_or_rthru('rthru', 1, 1)
+    self._run_throughput_test('rthru', 1, 1)
+    self._run_throughput_test('rthru_file', 1, 1)
 
   def test_read_throughput_single_process_multi_thread(self):
-    self._run_basic_wthru_or_rthru('rthru', 1, 2)
+    self._run_each_parallel_throughput_test('rthru', 1, 2)
+    self._run_each_parallel_throughput_test('rthru_file', 1, 2)
 
   @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
   def test_read_throughput_multi_process_single_thread(self):
-    self._run_basic_wthru_or_rthru('rthru', 2, 1)
+    self._run_each_parallel_throughput_test('rthru', 2, 1)
+    self._run_each_parallel_throughput_test('rthru_file', 2, 1)
 
   @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
   def test_read_throughput_multi_process_multi_thread(self):
-    self._run_basic_wthru_or_rthru('rthru', 2, 2)
+    self._run_each_parallel_throughput_test('rthru', 2, 2)
+    self._run_each_parallel_throughput_test('rthru_file', 2, 2)
+
+  @unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
+  def test_read_and_write_file_ordering(self):
+    """Tests that rthru_file and wthru_file work when run together."""
+    self._run_throughput_test('rthru_file,wthru_file', 1, 1)
+    self._run_throughput_test('rthru_file,wthru_file', 2, 2, 'fan')
+    if not RUN_S3_TESTS:
+      self._run_throughput_test('rthru_file,wthru_file', 2, 2, 'slice')
+      self._run_throughput_test('rthru_file,wthru_file', 2, 2, 'both')
 
   def test_input_output(self):
     outpath = self.CreateTempFile()
@@ -109,7 +144,7 @@
     stderr = self.RunGsUtil(
         ['perfdiag', '-n', '1', '-s', '3pb', '-t', 'wthru', 'gs://foobar'],
         expected_status=1, return_stderr=True)
-    self.assertIn('Maximum throughput file size', stderr)
+    self.assertIn('in-memory tests maximum file size', stderr)
 
   def test_listing(self):
     bucket_uri = self.CreateBucket()
diff --git a/catapult/third_party/gsutil/gslib/tests/test_rb.py b/catapult/third_party/gsutil/gslib/tests/test_rb.py
index 245f4e4..046084e 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_rb.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_rb.py
@@ -44,6 +44,11 @@
     self.assertIn('Bucket is not empty. Note: this is a versioned bucket',
                   stderr)
 
+  def test_rb_nonexistent_bucket(self):
+    stderr = self.RunGsUtil(['rb', 'gs://%s' % self.nonexistent_bucket_name],
+                            return_stderr=True, expected_status=1)
+    self.assertIn('does not exist.', stderr)
+
   def test_rb_minus_f(self):
     bucket_uri = self.CreateBucket()
     stderr = self.RunGsUtil([
diff --git a/catapult/third_party/gsutil/gslib/tests/test_rm.py b/catapult/third_party/gsutil/gslib/tests/test_rm.py
index d4fc411..06651a4 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_rm.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_rm.py
@@ -435,9 +435,8 @@
     ouri1 = self.CreateObject(bucket_uri=buri1,
                               object_name='foo',
                               contents='foocontents')
-    ouri2 = self.CreateObject(bucket_uri=buri1,
-                              object_name='bar',
-                              contents='barcontents')
+    self.CreateObject(bucket_uri=buri1, object_name='bar',
+                      contents='barcontents')
     ouri3 = self.CreateObject(bucket_uri=buri1,
                               object_name='baz',
                               contents='bazcontents')
@@ -457,3 +456,19 @@
                                    stdin=stdin)
     self.AssertNObjectsInBucket(buri1, 1, versioned=True)
     self.AssertNObjectsInBucket(buri2, 0, versioned=True)
+
+  def test_rm_nonexistent_bucket_recursive(self):
+    stderr = self.RunGsUtil(
+        ['rm', '-rf', '%s://%s' % (self.default_provider,
+                                   self.nonexistent_bucket_name)],
+        return_stderr=True, expected_status=1)
+    self.assertIn('Encountered non-existent bucket', stderr)
+
+  def test_rm_multiple_nonexistent_objects(self):
+    bucket_uri = self.CreateBucket()
+    nonexistent_object1 = suri(bucket_uri, 'nonexistent1')
+    nonexistent_object2 = suri(bucket_uri, 'nonexistent1')
+    stderr = self.RunGsUtil(
+        ['rm', '-rf', nonexistent_object1, nonexistent_object2],
+        return_stderr=True, expected_status=1)
+    self.assertIn('2 files/objects could not be removed.', stderr)
diff --git a/catapult/third_party/gsutil/gslib/tests/test_rsync.py b/catapult/third_party/gsutil/gslib/tests/test_rsync.py
index 0bf6c5f..40108b8 100644
--- a/catapult/third_party/gsutil/gslib/tests/test_rsync.py
+++ b/catapult/third_party/gsutil/gslib/tests/test_rsync.py
@@ -21,7 +21,7 @@
 import gslib.tests.testcase as testcase
 from gslib.tests.testcase.integration_testcase import SkipForS3
 from gslib.tests.util import ObjectToURI as suri
-from gslib.tests.util import PerformsFileToObjectUpload
+from gslib.tests.util import SequentialAndParallelTransfer
 from gslib.tests.util import SetBotoConfigForTest
 from gslib.tests.util import unittest
 from gslib.util import IS_WINDOWS
@@ -50,6 +50,7 @@
   """
   return set(l[len(start_point):] for l in listing.strip().split('\n'))
 
+
 # TODO: Add inspection to the retry wrappers in this test suite where the state
 # at the end of a retry block is depended upon by subsequent tests (since
 # listing content can vary depending on which backend server is reached until
@@ -112,19 +113,20 @@
   def test_bucket_to_bucket(self):
     """Tests that flat and recursive rsync between 2 buckets works correctly."""
     # Create 2 buckets with 1 overlapping object, 1 extra object at root level
-    # in each, and 1 extra object 1 level down in each. Make the overlapping
-    # objects named the same but with different content, to test that we detect
-    # and properly copy in that case.
+    # in each, and 1 extra object 1 level down in each, where one of the objects
+    # starts with "." to test that we don't skip those objects. Make the
+    # overlapping objects named the same but with different content, to test
+    # that we detect and properly copy in that case.
     bucket1_uri = self.CreateBucket()
     bucket2_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+                      contents='.obj2')
     self.CreateObject(bucket_uri=bucket1_uri, object_name='subdir/obj3',
                       contents='subdir/obj3')
-    self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
-                      contents='OBJ2')
+    self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+                      contents='.OBJ2')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
                       contents='obj4')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='subdir/obj5',
@@ -138,19 +140,19 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Second bucket should have new objects added from source bucket (without
       # removing extraneeous object found in dest bucket), and without the
       # subdir objects synchronized.
       self.assertEquals(listing2,
-                        set(['/obj1', '/obj2', '/obj4', '/subdir/obj5']))
+                        set(['/obj1', '/.obj2', '/obj4', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were correctly synchronized (bucket to bucket sync uses
       # checksums).
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket1_uri, 'obj2')], return_stdout=True))
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket2_uri, 'obj2')], return_stdout=True))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket1_uri, '.obj2')], return_stdout=True))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket2_uri, '.obj2')], return_stdout=True))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -167,7 +169,7 @@
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj7',
                       contents='obj7')
     self.RunGsUtil(['rm', suri(bucket1_uri, 'obj1')])
-    self.RunGsUtil(['rm', suri(bucket2_uri, 'obj2')])
+    self.RunGsUtil(['rm', suri(bucket2_uri, '.obj2')])
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -176,11 +178,11 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
       # Second bucket should have objects tha were newly added to first bucket
       # (wihout removing extraneous dest bucket objects), and without the
       # subdir objects synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj4', '/obj6',
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj4', '/obj6',
                                        '/obj7', '/subdir/obj3',
                                        '/subdir/obj5']))
     _Check3()
@@ -197,19 +199,20 @@
   def test_bucket_to_bucket_minus_d(self):
     """Tests that flat and recursive rsync between 2 buckets works correctly."""
     # Create 2 buckets with 1 overlapping object, 1 extra object at root level
-    # in each, and 1 extra object 1 level down in each. Make the overlapping
-    # objects named the same but with different content, to test that we detect
-    # and properly copy in that case.
+    # in each, and 1 extra object 1 level down in each, where one of the objects
+    # starts with "." to test that we don't skip those objects. Make the
+    # overlapping objects named the same but with different content, to test
+    # that we detect and properly copy in that case.
     bucket1_uri = self.CreateBucket()
     bucket2_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+                      contents='.obj2')
     self.CreateObject(bucket_uri=bucket1_uri, object_name='subdir/obj3',
                       contents='subdir/obj3')
-    self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
-                      contents='OBJ2')
+    self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+                      contents='.OBJ2')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
                       contents='obj4')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='subdir/obj5',
@@ -223,17 +226,17 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Second bucket should have content like first bucket but without the
       # subdir objects synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were correctly synchronized (bucket to bucket sync uses
       # checksums).
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket1_uri, 'obj2')], return_stdout=True))
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket2_uri, 'obj2')], return_stdout=True))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket1_uri, '.obj2')], return_stdout=True))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket2_uri, '.obj2')], return_stdout=True))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -251,7 +254,7 @@
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj7',
                       contents='obj7')
     self.RunGsUtil(['rm', suri(bucket1_uri, 'obj1')])
-    self.RunGsUtil(['rm', suri(bucket2_uri, 'obj2')])
+    self.RunGsUtil(['rm', suri(bucket2_uri, '.obj2')])
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -261,10 +264,10 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
       # Second bucket should have content like first bucket but without the
       # subdir objects synchronized.
-      self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
     _Check3()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -277,13 +280,14 @@
     _Check4()
 
   # Test sequential upload as well as parallel composite upload case.
-  @PerformsFileToObjectUpload
+  @SequentialAndParallelTransfer
   @unittest.skipUnless(UsingCrcmodExtension(crcmod),
                        'Test requires fast crcmod.')
   def test_dir_to_bucket_minus_d(self):
     """Tests that flat and recursive rsync dir to bucket works correctly."""
     # Create dir and bucket with 1 overlapping object, 1 extra object at root
-    # level in each, and 1 extra object 1 level down in each. Make the
+    # level in each, and 1 extra object 1 level down in each, where one of the
+    # objects starts with "." to test that we don't skip those objects. Make the
     # overlapping objects named the same but with different content, to test
     # that we detect and properly copy in that case.
     tmpdir = self.CreateTempDir()
@@ -291,17 +295,17 @@
     os.mkdir(subdir)
     bucket_uri = self.CreateBucket()
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
-    self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
-                      contents='OBJ2')
+    self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+                      contents='.OBJ2')
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
                       contents='obj4')
     self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj5',
                       contents='subdir/obj5')
 
     # Need to make sure the bucket listing is caught-up, otherwise the
-    # first rsync may not see obj2 and overwrite it.
+    # first rsync may not see .obj2 and overwrite it.
     self.AssertNObjectsInBucket(bucket_uri, 3)
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -312,17 +316,17 @@
       listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       # Dir should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Bucket should have content like dir but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were not synchronized (dir to bucket sync doesn't use checksums
       # unless you specify -c).
-      with open(os.path.join(tmpdir, 'obj2')) as f:
-        self.assertEquals('obj2', '\n'.join(f.readlines()))
-      self.assertEquals('OBJ2', self.RunGsUtil(
-          ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
+      with open(os.path.join(tmpdir, '.obj2')) as f:
+        self.assertEquals('.obj2', '\n'.join(f.readlines()))
+      self.assertEquals('.OBJ2', self.RunGsUtil(
+          ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -342,16 +346,16 @@
       listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       # Dir should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Bucket should have content like dir but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were synchronized (dir to bucket sync with -c uses checksums).
-      with open(os.path.join(tmpdir, 'obj2')) as f:
-        self.assertEquals('obj2', '\n'.join(f.readlines()))
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
+      with open(os.path.join(tmpdir, '.obj2')) as f:
+        self.assertEquals('.obj2', '\n'.join(f.readlines()))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
     _Check3()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -367,7 +371,7 @@
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj7',
                       contents='obj7')
     os.unlink(os.path.join(tmpdir, 'obj1'))
-    self.RunGsUtil(['rm', suri(bucket_uri, 'obj2')])
+    self.RunGsUtil(['rm', suri(bucket_uri, '.obj2')])
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -376,10 +380,10 @@
       listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       # Dir should have un-altered content.
-      self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
       # Bucket should have content like dir but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
     _Check5()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -395,7 +399,8 @@
   def test_dir_to_dir_minus_d(self):
     """Tests that flat and recursive rsync dir to dir works correctly."""
     # Create 2 dirs with 1 overlapping file, 1 extra file at root
-    # level in each, and 1 extra file 1 level down in each. Make the
+    # level in each, and 1 extra file 1 level down in each, where one of the
+    # objects starts with "." to test that we don't skip those objects. Make the
     # overlapping files named the same but with different content, to test
     # that we detect and properly copy in that case.
     tmpdir1 = self.CreateTempDir()
@@ -405,10 +410,10 @@
     os.mkdir(subdir1)
     os.mkdir(subdir2)
     self.CreateTempFile(tmpdir=tmpdir1, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir1, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir1, file_name='.obj2', contents='.obj2')
     self.CreateTempFile(
         tmpdir=subdir1, file_name='obj3', contents='subdir1/obj3')
-    self.CreateTempFile(tmpdir=tmpdir2, file_name='obj2', contents='OBJ2')
+    self.CreateTempFile(tmpdir=tmpdir2, file_name='.obj2', contents='.OBJ2')
     self.CreateTempFile(tmpdir=tmpdir2, file_name='obj4', contents='obj4')
     self.CreateTempFile(
         tmpdir=subdir2, file_name='obj5', contents='subdir2/obj5')
@@ -417,17 +422,17 @@
     listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
     listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
     # dir1 should have un-altered content.
-    self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir1/obj3']))
+    self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))
     # dir2 should have content like dir1 but without the subdir1 objects
     # synchronized.
-    self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir2/obj5']))
+    self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))
     # Assert that the src/dest objects that had same length but different
     # checksums were not synchronized (dir to dir sync doesn't use checksums
     # unless you specify -c).
-    with open(os.path.join(tmpdir1, 'obj2')) as f:
-      self.assertEquals('obj2', '\n'.join(f.readlines()))
-    with open(os.path.join(tmpdir2, 'obj2')) as f:
-      self.assertEquals('OBJ2', '\n'.join(f.readlines()))
+    with open(os.path.join(tmpdir1, '.obj2')) as f:
+      self.assertEquals('.obj2', '\n'.join(f.readlines()))
+    with open(os.path.join(tmpdir2, '.obj2')) as f:
+      self.assertEquals('.OBJ2', '\n'.join(f.readlines()))
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -442,16 +447,16 @@
     listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
     listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
     # dir1 should have un-altered content.
-    self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir1/obj3']))
+    self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))
     # dir2 should have content like dir but without the subdir objects
     # synchronized.
-    self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir2/obj5']))
+    self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))
     # Assert that the src/dest objects that had same length but different
     # content were synchronized (dir to dir sync with -c uses checksums).
-    with open(os.path.join(tmpdir1, 'obj2')) as f:
-      self.assertEquals('obj2', '\n'.join(f.readlines()))
-    with open(os.path.join(tmpdir1, 'obj2')) as f:
-      self.assertEquals('obj2', '\n'.join(f.readlines()))
+    with open(os.path.join(tmpdir1, '.obj2')) as f:
+      self.assertEquals('.obj2', '\n'.join(f.readlines()))
+    with open(os.path.join(tmpdir1, '.obj2')) as f:
+      self.assertEquals('.obj2', '\n'.join(f.readlines()))
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -465,16 +470,16 @@
     self.CreateTempFile(tmpdir=tmpdir1, file_name='obj6', contents='obj6')
     self.CreateTempFile(tmpdir=tmpdir2, file_name='obj7', contents='obj7')
     os.unlink(os.path.join(tmpdir1, 'obj1'))
-    os.unlink(os.path.join(tmpdir2, 'obj2'))
+    os.unlink(os.path.join(tmpdir2, '.obj2'))
 
     self.RunGsUtil(['rsync', '-d', '-r', tmpdir1, tmpdir2])
     listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
     listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
     # dir1 should have un-altered content.
-    self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir1/obj3']))
+    self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir1/obj3']))
     # dir2 should have content like dir but without the subdir objects
     # synchronized.
-    self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir1/obj3']))
+    self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir1/obj3']))
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -518,7 +523,8 @@
   def test_bucket_to_dir_minus_d(self):
     """Tests that flat and recursive rsync bucket to dir works correctly."""
     # Create bucket and dir with 1 overlapping object, 1 extra object at root
-    # level in each, and 1 extra object 1 level down in each. Make the
+    # level in each, and 1 extra object 1 level down in each, where one of the
+    # objects starts with "." to test that we don't skip those objects. Make the
     # overlapping objects named the same but with different content, to test
     # that we detect and properly copy in that case.
     bucket_uri = self.CreateBucket()
@@ -527,11 +533,11 @@
     os.mkdir(subdir)
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+                      contents='.obj2')
     self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj3',
                       contents='subdir/obj3')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='OBJ2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.OBJ2')
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj4', contents='obj4')
     self.CreateTempFile(tmpdir=subdir, file_name='obj5', contents='subdir/obj5')
 
@@ -543,17 +549,17 @@
       listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       # Bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Dir should have content like bucket but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were not synchronized (bucket to dir sync doesn't use checksums
       # unless you specify -c).
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
-      with open(os.path.join(tmpdir, 'obj2')) as f:
-        self.assertEquals('OBJ2', '\n'.join(f.readlines()))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
+      with open(os.path.join(tmpdir, '.obj2')) as f:
+        self.assertEquals('.OBJ2', '\n'.join(f.readlines()))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -573,16 +579,16 @@
       listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       # Bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Dir should have content like bucket but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
       # Assert that the src/dest objects that had same length but different
       # content were synchronized (bucket to dir sync with -c uses checksums).
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
-      with open(os.path.join(tmpdir, 'obj2')) as f:
-        self.assertEquals('obj2', '\n'.join(f.readlines()))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
+      with open(os.path.join(tmpdir, '.obj2')) as f:
+        self.assertEquals('.obj2', '\n'.join(f.readlines()))
     _Check3()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -598,7 +604,7 @@
                       contents='obj6')
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj7', contents='obj7')
     self.RunGsUtil(['rm', suri(bucket_uri, 'obj1')])
-    os.unlink(os.path.join(tmpdir, 'obj2'))
+    os.unlink(os.path.join(tmpdir, '.obj2'))
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -607,10 +613,10 @@
       listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       # Bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
       # Dir should have content like bucket but without the subdir objects
       # synchronized.
-      self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+      self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
     _Check5()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -683,8 +689,6 @@
       self.assertEquals(listing1, set(['/obj1', '//']))
       # Bucket should not have the placeholder object.
       self.assertEquals(listing2, set(['/obj1']))
-      # Stdout should report what happened.
-      self.assertRegexpMatches(output, r'.*Skipping cloud sub-directory.*')
     _Check1()
 
   @unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
@@ -696,7 +700,7 @@
     bucket_uri = self.CreateBucket()
     fpath1 = self.CreateTempFile(
         tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
     good_symlink_path = os.path.join(tmpdir, 'symlink1')
     os.symlink(fpath1, good_symlink_path)
@@ -704,8 +708,8 @@
     # handles that case.
     bad_symlink_path = os.path.join(tmpdir, 'symlink2')
     os.symlink(os.path.join('/', 'non-existent'), bad_symlink_path)
-    self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
-                      contents='OBJ2')
+    self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+                      contents='.OBJ2')
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
                       contents='obj4')
     self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj5',
@@ -721,10 +725,10 @@
       # Dir should have un-altered content.
       self.assertEquals(
           listing1,
-          set(['/obj1', '/obj2', '/subdir/obj3', '/symlink1', '/symlink2']))
+          set(['/obj1', '/.obj2', '/subdir/obj3', '/symlink1', '/symlink2']))
       # Bucket should have content like dir but without the symlink, and
       # without subdir objects synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
     _Check1()
 
     # Now remove invalid symlink and run without -e, and see that symlink gets
@@ -739,11 +743,11 @@
       listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       # Dir should have un-altered content.
       self.assertEquals(
-          listing1, set(['/obj1', '/obj2', '/subdir/obj3', '/symlink1']))
+          listing1, set(['/obj1', '/.obj2', '/subdir/obj3', '/symlink1']))
       # Bucket should have content like dir but without the symlink, and
       # without subdir objects synchronized.
       self.assertEquals(
-          listing2, set(['/obj1', '/obj2', '/subdir/obj5', '/symlink1']))
+          listing2, set(['/obj1', '/.obj2', '/subdir/obj5', '/symlink1']))
       self.assertEquals('obj1', self.RunGsUtil(
           ['cat', suri(bucket_uri, 'symlink1')], return_stdout=True))
     _Check2()
@@ -763,13 +767,13 @@
     bucket2_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+                      contents='.obj2')
     self.RunGsUtil(
-        ['compose', suri(bucket1_uri, 'obj1'), suri(bucket1_uri, 'obj2'),
+        ['compose', suri(bucket1_uri, 'obj1'), suri(bucket1_uri, '.obj2'),
          suri(bucket1_uri, 'obj3')])
-    self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
-                      contents='OBJ2')
+    self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+                      contents='.OBJ2')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
                       contents='obj4')
 
@@ -780,10 +784,10 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/obj3']))
       # Second bucket should have content like first bucket but without the
       # subdir objects synchronized.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj3']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj3']))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -801,8 +805,8 @@
     bucket2_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+                      contents='.obj2')
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -810,8 +814,8 @@
       self.RunGsUtil(['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)])
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
-      self.assertEquals(listing1, set(['/obj1', '/obj2']))
-      self.assertEquals(listing2, set(['/obj1', '/obj2']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2']))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -829,8 +833,8 @@
     bucket2_uri = self.CreateBucket()
     self.CreateObject(bucket_uri=bucket2_uri, object_name='obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+                      contents='.obj2')
 
     # Use @Retry as hedge against bucket listing eventual consistency.
     @Retry(AssertionError, tries=3, timeout_secs=1)
@@ -896,7 +900,7 @@
     os.mkdir(subdir)
     bucket_url = self.CreateBucket()
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -909,9 +913,9 @@
           suri(bucket_url, 'subdir'),
           self._FlatListBucket(bucket_url.clone_replace_name('subdir')))
       # Dir should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
       # Bucket subdir should have content like dir.
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj3']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj3']))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -927,7 +931,7 @@
     """Tests that rsync from a non-existent bucket subdir fails gracefully."""
     tmpdir = self.CreateTempDir()
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     bucket_url_str = '%s://%s' % (
         self.default_provider, self.nonexistent_bucket_name)
     stderr = self.RunGsUtil(['rsync', '-d', bucket_url_str, tmpdir],
@@ -935,13 +939,13 @@
     self.assertIn('Caught non-retryable exception', stderr)
     listing = _TailSet(tmpdir, self._FlatListDir(tmpdir))
     # Dir should have un-altered content.
-    self.assertEquals(listing, set(['/obj1', '/obj2']))
+    self.assertEquals(listing, set(['/obj1', '/.obj2']))
 
   def test_rsync_to_nonexistent_bucket(self):
     """Tests that rsync from a non-existent bucket subdir fails gracefully."""
     tmpdir = self.CreateTempDir()
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     bucket_url_str = '%s://%s' % (
         self.default_provider, self.nonexistent_bucket_name)
     stderr = self.RunGsUtil(['rsync', '-d', bucket_url_str, tmpdir],
@@ -949,7 +953,7 @@
     self.assertIn('Caught non-retryable exception', stderr)
     listing = _TailSet(tmpdir, self._FlatListDir(tmpdir))
     # Dir should have un-altered content.
-    self.assertEquals(listing, set(['/obj1', '/obj2']))
+    self.assertEquals(listing, set(['/obj1', '/.obj2']))
 
   def test_bucket_to_bucket_minus_d_with_overwrite_and_punc_chars(self):
     """Tests that punc chars in filenames don't confuse sort order."""
@@ -960,12 +964,12 @@
     # compared without encoding.
     self.CreateObject(bucket_uri=bucket1_uri, object_name='e/obj1',
                       contents='obj1')
-    self.CreateObject(bucket_uri=bucket1_uri, object_name='e-1/obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket1_uri, object_name='e-1/.obj2',
+                      contents='.obj2')
     self.CreateObject(bucket_uri=bucket2_uri, object_name='e/obj1',
                       contents='OBJ1')
-    self.CreateObject(bucket_uri=bucket2_uri, object_name='e-1/obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket2_uri, object_name='e-1/.obj2',
+                      contents='.obj2')
     # Need to make sure the bucket listings are caught-up, otherwise the
     # rsync may not see all objects and fail to synchronize correctly.
     self.AssertNObjectsInBucket(bucket1_uri, 2)
@@ -979,13 +983,13 @@
       listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
       listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
       # First bucket should have un-altered content.
-      self.assertEquals(listing1, set(['/e/obj1', '/e-1/obj2']))
-      self.assertEquals(listing2, set(['/e/obj1', '/e-1/obj2']))
+      self.assertEquals(listing1, set(['/e/obj1', '/e-1/.obj2']))
+      self.assertEquals(listing2, set(['/e/obj1', '/e-1/.obj2']))
       # Assert correct contents.
       self.assertEquals('obj1', self.RunGsUtil(
           ['cat', suri(bucket2_uri, 'e/obj1')], return_stdout=True))
-      self.assertEquals('obj2', self.RunGsUtil(
-          ['cat', suri(bucket2_uri, 'e-1/obj2')], return_stdout=True))
+      self.assertEquals('.obj2', self.RunGsUtil(
+          ['cat', suri(bucket2_uri, 'e-1/.obj2')], return_stdout=True))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -1003,17 +1007,17 @@
     tmpdir = self.CreateTempDir()
     bucket_uri = self.CreateBucket()
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
-    self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
     self.CreateTempFile(tmpdir=tmpdir, file_name='obj3', contents='obj3')
-    self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
-                      contents='obj2')
+    self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+                      contents='.obj2')
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
                       contents='obj4')
     self.CreateObject(bucket_uri=bucket_uri, object_name='obj5',
                       contents='obj5')
 
     # Need to make sure the bucket listing is caught-up, otherwise the
-    # first rsync may not see obj2 and overwrite it.
+    # first rsync may not see .obj2 and overwrite it.
     self.AssertNObjectsInBucket(bucket_uri, 3)
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -1024,10 +1028,10 @@
       listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
       listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
       # Dir should have un-altered content.
-      self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+      self.assertEquals(listing1, set(['/obj1', '/.obj2', '/obj3']))
       # Bucket should have content like dir but ignoring obj3 from dir and not
       # deleting obj4 from bucket (per exclude regex).
-      self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj4']))
+      self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj4']))
     _Check1()
 
     # Use @Retry as hedge against bucket listing eventual consistency.
@@ -1038,3 +1042,30 @@
           ['rsync', '-d', '-x', 'obj[34]', tmpdir, suri(bucket_uri)],
           return_stderr=True))
     _Check2()
+
+  @unittest.skipIf(IS_WINDOWS,
+                   "os.chmod() won't make file unreadable on Windows.")
+  def test_dir_to_bucket_minus_C(self):
+    """Tests that rsync -C option works correctly."""
+    # Create dir with 3 objects, the middle of which is unreadable.
+    tmpdir = self.CreateTempDir()
+    bucket_uri = self.CreateBucket()
+    self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
+    path = self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+    os.chmod(path, 0)
+    self.CreateTempFile(tmpdir=tmpdir, file_name='obj3', contents='obj3')
+
+    # Use @Retry as hedge against bucket listing eventual consistency.
+    @Retry(AssertionError, tries=3, timeout_secs=1)
+    def _Check():
+      """Tests rsync works as expected."""
+      stderr =  self.RunGsUtil(['rsync', '-C', tmpdir, suri(bucket_uri)],
+                               expected_status=1, return_stderr=True)
+      self.assertIn('1 files/objects could not be copied/removed.', stderr)
+      listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
+      listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
+      # Dir should have un-altered content.
+      self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+      # Bucket should have obj1 and obj3 even though obj2 was unreadable.
+      self.assertEquals(listing2, set(['/obj1', '/obj3']))
+    _Check()
diff --git a/catapult/third_party/gsutil/gslib/tests/test_trace.py b/catapult/third_party/gsutil/gslib/tests/test_trace.py
new file mode 100644
index 0000000..673b0cf
--- /dev/null
+++ b/catapult/third_party/gsutil/gslib/tests/test_trace.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Integration tests for gsutil --trace-token option."""
+
+from __future__ import absolute_import
+
+from gslib.cs_api_map import ApiSelector
+import gslib.tests.testcase as testcase
+from gslib.tests.testcase.integration_testcase import SkipForS3
+from gslib.tests.util import ObjectToURI as suri
+
+
+@SkipForS3('--trace-token is supported only on GCS JSON API.')
+class TestTraceTokenOption(testcase.GsUtilIntegrationTestCase):
+  """Integration tests for gsutil --trace-token option."""
+
+  def test_minus_tracetoken_cat(self):
+    """Tests cat command with trace-token option."""
+    key_uri = self.CreateObject(contents='0123456789')
+    (_, stderr) = self.RunGsUtil(
+        ['-D', '--trace-token=THISISATOKEN', 'cat', suri(key_uri)],
+        return_stdout=True, return_stderr=True)
+    if self.test_api == ApiSelector.JSON:
+      self.assertIn('You are running gsutil with trace output enabled.', stderr)
+      self.assertRegexpMatches(
+          stderr, r'.*GET.*b/%s/o/%s\?.*&trace=token%%3ATHISISATOKEN' %
+          (key_uri.bucket_name, key_uri.object_name))
diff --git a/catapult/third_party/gsutil/gslib/tests/testcase/integration_testcase.py b/catapult/third_party/gsutil/gslib/tests/testcase/integration_testcase.py
index 0979c45..e7f01a3 100644
--- a/catapult/third_party/gsutil/gslib/tests/testcase/integration_testcase.py
+++ b/catapult/third_party/gsutil/gslib/tests/testcase/integration_testcase.py
@@ -86,9 +86,9 @@
   GROUP_TEST_ADDRESS = 'gs-discussion@googlegroups.com'
   GROUP_TEST_ID = (
       '00b4903a97d097895ab58ef505d535916a712215b79c3e54932c2eb502ad97f5')
-  USER_TEST_ADDRESS = 'gs-team@google.com'
+  USER_TEST_ADDRESS = 'gsutiltestuser@gmail.com'
   USER_TEST_ID = (
-      '00b4903a9703325c6bfc98992d72e75600387a64b3b6bee9ef74613ef8842080')
+      '00b4903a97b201e40d2a5a3ddfe044bb1ab79c75b2e817cbe350297eccc81c84')
   DOMAIN_TEST = 'google.com'
   # No one can create this bucket without owning the gmail.com domain, and we
   # won't create this bucket, so it shouldn't exist.
@@ -391,8 +391,8 @@
       arguments.
     """
     cmd = ([gslib.GSUTIL_PATH] + ['--testexceptiontraces'] +
-          ['-o', 'GSUtil:default_project_id=' + PopulateProjectId()] +
-          cmd)
+           ['-o', 'GSUtil:default_project_id=' + PopulateProjectId()] +
+           cmd)
     if IS_WINDOWS:
       cmd = [sys.executable] + cmd
     p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
diff --git a/catapult/third_party/gsutil/gslib/tests/testcase/unit_testcase.py b/catapult/third_party/gsutil/gslib/tests/testcase/unit_testcase.py
index e36bd28..9c38084 100644
--- a/catapult/third_party/gsutil/gslib/tests/testcase/unit_testcase.py
+++ b/catapult/third_party/gsutil/gslib/tests/testcase/unit_testcase.py
@@ -133,7 +133,7 @@
       sys.stderr.write('==== end log output ====\n')
 
   def RunCommand(self, command_name, args=None, headers=None, debug=0,
-                 test_method=None, return_stdout=False, return_stderr=False,
+                 return_stdout=False, return_stderr=False,
                  return_log_handler=False, cwd=None):
     """Method for calling gslib.command_runner.CommandRunner.
 
@@ -148,9 +148,6 @@
       args: Command-line args (arg0 = actual arg, not command name ala bash).
       headers: Dictionary containing optional HTTP headers to pass to boto.
       debug: Debug level to pass in to boto connection (range 0..3).
-      test_method: Optional general purpose method for testing purposes.
-                   Application and semantics of this method will vary by
-                   command and test type.
       return_stdout: If True, will save and return stdout produced by command.
       return_stderr: If True, will save and return stderr produced by command.
       return_log_handler: If True, will return a MockLoggingHandler instance
@@ -193,8 +190,7 @@
       with WorkingDirectory(cwd):
         self.command_runner.RunNamedCommand(
             command_name, args=args, headers=headers, debug=debug,
-            parallel_operations=False, test_method=test_method,
-            do_shutdown=False)
+            parallel_operations=False, do_shutdown=False)
     finally:
       sys.stdout.seek(0)
       stdout = sys.stdout.read()
diff --git a/catapult/third_party/gsutil/gslib/tests/util.py b/catapult/third_party/gsutil/gslib/tests/util.py
index 06da870..7b1e204 100644
--- a/catapult/third_party/gsutil/gslib/tests/util.py
+++ b/catapult/third_party/gsutil/gslib/tests/util.py
@@ -26,7 +26,9 @@
 import urlparse
 
 import boto
+import crcmod
 import gslib.tests as gslib_tests
+from gslib.util import UsingCrcmodExtension
 
 if not hasattr(unittest.TestCase, 'assertIsNone'):
   # external dependency unittest2 required for Python <= 2.6
@@ -226,12 +228,12 @@
     boto.config.remove_section(section)
 
 
-def PerformsFileToObjectUpload(func):
-  """Decorator indicating that a test uploads from a local file to an object.
+def SequentialAndParallelTransfer(func):
+  """Decorator for tests that perform file to object transfers, or vice versa.
 
   This forces the test to run once normally, and again with special boto
   config settings that will ensure that the test follows the parallel composite
-  upload code path.
+  upload and/or sliced object download code paths.
 
   Args:
     func: Function to wrap.
@@ -244,11 +246,14 @@
     # Run the test normally once.
     func(*args, **kwargs)
 
-    # Try again, forcing parallel composite uploads.
-    with SetBotoConfigForTest([
-        ('GSUtil', 'parallel_composite_upload_threshold', '1'),
-        ('GSUtil', 'check_hashes', 'always')]):
-      func(*args, **kwargs)
+    if not RUN_S3_TESTS and UsingCrcmodExtension(crcmod):
+      # Try again, forcing parallel upload and sliced download.
+      with SetBotoConfigForTest([
+          ('GSUtil', 'parallel_composite_upload_threshold', '1'),
+          ('GSUtil', 'sliced_object_download_threshold', '1'),
+          ('GSUtil', 'sliced_object_download_max_components', '3'),
+          ('GSUtil', 'check_hashes', 'always')]):
+        func(*args, **kwargs)
 
   return Wrapper
 
diff --git a/catapult/third_party/gsutil/gslib/third_party/storage_apitools/storage_v1_client.py b/catapult/third_party/gsutil/gslib/third_party/storage_apitools/storage_v1_client.py
index bbffce3..3389b69 100644
--- a/catapult/third_party/gsutil/gslib/third_party/storage_apitools/storage_v1_client.py
+++ b/catapult/third_party/gsutil/gslib/third_party/storage_apitools/storage_v1_client.py
@@ -14,6 +14,7 @@
 """Generated client library for storage version v1."""
 
 import os
+import platform
 import sys
 
 from apitools.base.py import base_api
@@ -32,7 +33,8 @@
   _VERSION = u'v1'
   _CLIENT_ID = 'nomatter'
   _CLIENT_SECRET = 'nomatter'
-  _USER_AGENT = 'apitools gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
+  _USER_AGENT = 'apitools gsutil/%s Python/%s (%s)' % (
+      gslib.VERSION, platform.python_version(), sys.platform)
   if os.environ.get('CLOUDSDK_WRAPPER') == '1':
     _USER_AGENT += ' Cloud SDK Command Line Tool'
     if os.environ.get('CLOUDSDK_VERSION'):
diff --git a/catapult/third_party/gsutil/gslib/tracker_file.py b/catapult/third_party/gsutil/gslib/tracker_file.py
index 4fddc8a..55541c1 100644
--- a/catapult/third_party/gsutil/gslib/tracker_file.py
+++ b/catapult/third_party/gsutil/gslib/tracker_file.py
@@ -16,6 +16,7 @@
 
 import errno
 import hashlib
+import json
 import os
 import re
 
@@ -40,7 +41,9 @@
 class TrackerFileType(object):
   UPLOAD = 'upload'
   DOWNLOAD = 'download'
+  DOWNLOAD_COMPONENT = 'download_component'
   PARALLEL_UPLOAD = 'parallel_upload'
+  SLICED_DOWNLOAD = 'sliced_download'
   REWRITE = 'rewrite'
 
 
@@ -110,7 +113,8 @@
   return _HashAndReturnPath(res_tracker_file_name, TrackerFileType.REWRITE)
 
 
-def GetTrackerFilePath(dst_url, tracker_file_type, api_selector, src_url=None):
+def GetTrackerFilePath(dst_url, tracker_file_type, api_selector, src_url=None,
+                       component_num=None):
   """Gets the tracker file name described by the arguments.
 
   Args:
@@ -118,6 +122,7 @@
     tracker_file_type: TrackerFileType for this operation.
     api_selector: API to use for this operation.
     src_url: Source URL for the source file name for parallel uploads.
+    component_num: Component number if this is a download component, else None.
 
   Returns:
     File path to tracker file.
@@ -132,6 +137,13 @@
     res_tracker_file_name = (
         re.sub('[/\\\\]', '_', 'resumable_download__%s__%s.etag' %
                (os.path.realpath(dst_url.object_name), api_selector)))
+  elif tracker_file_type == TrackerFileType.DOWNLOAD_COMPONENT:
+    # Encode the fully-qualified dest file name and the component number
+    # into the tracker file name.
+    res_tracker_file_name = (
+        re.sub('[/\\\\]', '_', 'resumable_download__%s__%s__%d.etag' %
+               (os.path.realpath(dst_url.object_name), api_selector,
+                component_num)))
   elif tracker_file_type == TrackerFileType.PARALLEL_UPLOAD:
     # Encode the dest bucket and object names as well as the source file name
     # into the tracker file name.
@@ -139,6 +151,11 @@
         re.sub('[/\\\\]', '_', 'parallel_upload__%s__%s__%s__%s.url' %
                (dst_url.bucket_name, dst_url.object_name,
                 src_url, api_selector)))
+  elif tracker_file_type == TrackerFileType.SLICED_DOWNLOAD:
+    # Encode the fully-qualified dest file name into the tracker file name.
+    res_tracker_file_name = (
+        re.sub('[/\\\\]', '_', 'sliced_download__%s__%s.etag' %
+               (os.path.realpath(dst_url.object_name), api_selector)))
   elif tracker_file_type == TrackerFileType.REWRITE:
     # Should use GetRewriteTrackerFilePath instead.
     raise NotImplementedError()
@@ -146,7 +163,73 @@
   return _HashAndReturnPath(res_tracker_file_name, tracker_file_type)
 
 
+def DeleteDownloadTrackerFiles(dst_url, api_selector):
+  """Deletes all tracker files corresponding to an object download.
+
+  Args:
+    dst_url: StorageUrl describing the destination file.
+    api_selector: The Cloud API implementation used.
+  """
+  # Delete non-sliced download tracker file.
+  DeleteTrackerFile(GetTrackerFilePath(dst_url, TrackerFileType.DOWNLOAD,
+                                       api_selector))
+
+  # Delete all sliced download tracker files.
+  tracker_files = GetSlicedDownloadTrackerFilePaths(dst_url, api_selector)
+  for tracker_file in tracker_files:
+    DeleteTrackerFile(tracker_file)
+
+
+def GetSlicedDownloadTrackerFilePaths(dst_url, api_selector,
+                                      num_components=None):
+  """Gets a list of sliced download tracker file paths.
+
+  The list consists of the parent tracker file path in index 0, and then
+  any existing component tracker files in [1:].
+
+  Args:
+    dst_url: Destination URL for tracker file.
+    api_selector: API to use for this operation.
+    num_components: The number of component tracker files, if already known.
+                    If not known, the number will be retrieved from the parent
+                    tracker file on disk.
+  Returns:
+    File path to tracker file.
+  """
+  parallel_tracker_file_path = GetTrackerFilePath(
+      dst_url, TrackerFileType.SLICED_DOWNLOAD, api_selector)
+  tracker_file_paths = [parallel_tracker_file_path]
+
+  # If we don't know the number of components, check the tracker file.
+  if num_components is None:
+    tracker_file = None
+    try:
+      tracker_file = open(parallel_tracker_file_path, 'r')
+      num_components = json.load(tracker_file)['num_components']
+    except (IOError, ValueError):
+      return tracker_file_paths
+    finally:
+      if tracker_file:
+        tracker_file.close()
+
+  for i in range(num_components):
+    tracker_file_paths.append(GetTrackerFilePath(
+        dst_url, TrackerFileType.DOWNLOAD_COMPONENT, api_selector,
+        component_num=i))
+
+  return tracker_file_paths
+
+
 def _HashAndReturnPath(res_tracker_file_name, tracker_file_type):
+  """Hashes and returns a tracker file path.
+
+  Args:
+    res_tracker_file_name: The tracker file name prior to it being hashed.
+    tracker_file_type: The TrackerFileType of res_tracker_file_name.
+
+  Returns:
+    Final (hashed) tracker file path.
+  """
   resumable_tracker_dir = CreateTrackerDirIfNeeded()
   hashed_tracker_file_name = _HashFilename(res_tracker_file_name)
   tracker_file_name = '%s_%s' % (str(tracker_file_type).lower(),
@@ -256,49 +339,104 @@
                                                      rewrite_token))
 
 
-def ReadOrCreateDownloadTrackerFile(src_obj_metadata, dst_url,
-                                    api_selector):
+def ReadOrCreateDownloadTrackerFile(src_obj_metadata, dst_url, logger,
+                                    api_selector, start_byte,
+                                    existing_file_size, component_num=None):
   """Checks for a download tracker file and creates one if it does not exist.
 
+  The methodology for determining the download start point differs between
+  normal and sliced downloads. For normal downloads, the existing bytes in
+  the file are presumed to be correct and have been previously downloaded from
+  the server (if a tracker file exists). In this case, the existing file size
+  is used to determine the download start point. For sliced downloads, the
+  number of bytes previously retrieved from the server cannot be determined
+  from the existing file size, and so the number of bytes known to have been
+  previously downloaded is retrieved from the tracker file.
+
   Args:
-    src_obj_metadata: Metadata for the source object. Must include
-                      etag and size.
-    dst_url: Destination file StorageUrl.
-    api_selector: API mode to use (for tracker file naming).
+    src_obj_metadata: Metadata for the source object. Must include etag and
+                      generation.
+    dst_url: Destination URL for tracker file.
+    logger: For outputting log messages.
+    api_selector: API to use for this operation.
+    start_byte: The start byte of the byte range for this download.
+    existing_file_size: Size of existing file for this download on disk.
+    component_num: The component number, if this is a component of a parallel
+                   download, else None.
 
   Returns:
-    True if the tracker file already exists (resume existing download),
-    False if we created a new tracker file (new download).
+    tracker_file_name: The name of the tracker file, if one was used.
+    download_start_byte: The first byte that still needs to be downloaded.
   """
+  assert src_obj_metadata.etag
+
+  tracker_file_name = None
   if src_obj_metadata.size < ResumableThreshold():
     # Don't create a tracker file for a small downloads; cross-process resumes
     # won't work, but restarting a small download is inexpensive.
-    return False
+    return tracker_file_name, start_byte
 
-  assert src_obj_metadata.etag
-  tracker_file_name = GetTrackerFilePath(
-      dst_url, TrackerFileType.DOWNLOAD, api_selector)
+  download_name = dst_url.object_name
+  if component_num is None:
+    tracker_file_type = TrackerFileType.DOWNLOAD
+  else:
+    tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
+    download_name += ' component %d' % component_num
+
+  tracker_file_name = GetTrackerFilePath(dst_url, tracker_file_type,
+                                         api_selector,
+                                         component_num=component_num)
   tracker_file = None
-
   # Check to see if we already have a matching tracker file.
   try:
     tracker_file = open(tracker_file_name, 'r')
-    etag_value = tracker_file.readline().rstrip('\n')
-    if etag_value == src_obj_metadata.etag:
-      return True
-  except IOError as e:
+    if tracker_file_type is TrackerFileType.DOWNLOAD:
+      etag_value = tracker_file.readline().rstrip('\n')
+      if etag_value == src_obj_metadata.etag:
+        return tracker_file_name, existing_file_size
+    elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
+      component_data = json.loads(tracker_file.read())
+      if (component_data['etag'] == src_obj_metadata.etag and
+          component_data['generation'] == src_obj_metadata.generation):
+        return tracker_file_name, component_data['download_start_byte']
+
+    logger.warn('Tracker file doesn\'t match for download of %s. Restarting '
+                'download from scratch.' % download_name)
+
+  except (IOError, ValueError) as e:
     # Ignore non-existent file (happens first time a download
     # is attempted on an object), but warn user for other errors.
-    if e.errno != errno.ENOENT:
-      print('Couldn\'t read URL tracker file (%s): %s. Restarting '
-            'download from scratch.' %
-            (tracker_file_name, e.strerror))
+    if isinstance(e, ValueError) or e.errno != errno.ENOENT:
+      logger.warn('Couldn\'t read download tracker file (%s): %s. Restarting '
+                  'download from scratch.' % (tracker_file_name, str(e)))
   finally:
     if tracker_file:
       tracker_file.close()
 
-  # Otherwise, create a new tracker file and start from scratch.
-  _WriteTrackerFile(tracker_file_name, '%s\n' % src_obj_metadata.etag)
+  # There wasn't a matching tracker file, so create one and then start the
+  # download from scratch.
+  if tracker_file_type is TrackerFileType.DOWNLOAD:
+    _WriteTrackerFile(tracker_file_name, '%s\n' % src_obj_metadata.etag)
+  elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
+    WriteDownloadComponentTrackerFile(tracker_file_name, src_obj_metadata,
+                                      start_byte)
+  return tracker_file_name, start_byte
+
+
+def WriteDownloadComponentTrackerFile(tracker_file_name, src_obj_metadata,
+                                      current_file_pos):
+  """Updates or creates a download component tracker file on disk.
+
+  Args:
+    tracker_file_name: The name of the tracker file.
+    src_obj_metadata: Metadata for the source object. Must include etag.
+    current_file_pos: The current position in the file.
+  """
+  component_data = {'etag': src_obj_metadata.etag,
+                    'generation': src_obj_metadata.generation,
+                    'download_start_byte': current_file_pos}
+
+  _WriteTrackerFile(tracker_file_name, json.dumps(component_data))
 
 
 def _WriteTrackerFile(tracker_file_name, data):
diff --git a/catapult/third_party/gsutil/gslib/translation_helper.py b/catapult/third_party/gsutil/gslib/translation_helper.py
index 91adc83..a1b06f7 100644
--- a/catapult/third_party/gsutil/gslib/translation_helper.py
+++ b/catapult/third_party/gsutil/gslib/translation_helper.py
@@ -36,6 +36,7 @@
 from boto.gs.acl import USER_BY_ID
 
 from gslib.cloud_api import ArgumentException
+from gslib.cloud_api import BucketNotFoundException
 from gslib.cloud_api import NotFoundException
 from gslib.cloud_api import Preconditions
 from gslib.exception import CommandException
@@ -74,7 +75,7 @@
 
 # Because CORS is just a list in apitools, we need special handling or blank
 # CORS lists will get sent with other configuration commands such as lifecycle,
-# commands, which would cause CORS configuration to be unintentionally removed.
+# which would cause CORS configuration to be unintentionally removed.
 # Protorpc defaults list values to an empty list, and won't allow us to set the
 # value to None like other configuration fields, so there is no way to
 # distinguish the default value from when we actually want to remove the CORS
@@ -85,6 +86,14 @@
 REMOVE_CORS_CONFIG = [apitools_messages.Bucket.CorsValueListEntry(
     maxAgeSeconds=-1, method=['REMOVE_CORS_CONFIG'])]
 
+# Similar to CORS above, we need a sentinel value allowing us to specify
+# when a default object ACL should be private (containing no entries).
+# A defaultObjectAcl value of [] means don't modify the default object ACL.
+# A value of [PRIVATE_DEFAULT_OBJ_ACL] means create an empty/private default
+# object ACL.
+PRIVATE_DEFAULT_OBJ_ACL = apitools_messages.ObjectAccessControl(
+    id='PRIVATE_DEFAULT_OBJ_ACL')
+
 
 def ObjectMetadataFromHeaders(headers):
   """Creates object metadata according to the provided headers.
@@ -312,9 +321,44 @@
   return return_preconditions
 
 
+def CreateNotFoundExceptionForObjectWrite(
+    dst_provider, dst_bucket_name, src_provider=None,
+    src_bucket_name=None, src_object_name=None, src_generation=None):
+  """Creates a NotFoundException for an object upload or copy.
+
+  This is necessary because 404s don't necessarily specify which resource
+  does not exist.
+
+  Args:
+    dst_provider: String abbreviation of destination provider, e.g., 'gs'.
+    dst_bucket_name: Destination bucket name for the write operation.
+    src_provider: String abbreviation of source provider, i.e. 'gs', if any.
+    src_bucket_name: Source bucket name, if any (for the copy case).
+    src_object_name: Source object name, if any (for the copy case).
+    src_generation: Source object generation, if any (for the copy case).
+
+  Returns:
+    NotFoundException with appropriate message.
+  """
+  dst_url_string = '%s://%s' % (dst_provider, dst_bucket_name)
+  if src_bucket_name and src_object_name:
+    src_url_string = '%s://%s/%s' % (src_provider, src_bucket_name,
+                                     src_object_name)
+    if src_generation:
+      src_url_string += '#%s' % str(src_generation)
+    return NotFoundException(
+        'The source object %s or the destination bucket %s does not exist.' %
+        (src_url_string, dst_url_string))
+
+  return NotFoundException(
+      'The destination bucket %s does not exist or the write to the '
+      'destination must be restarted' % dst_url_string)
+
+
 def CreateBucketNotFoundException(code, provider, bucket_name):
-  return NotFoundException('%s://%s bucket does not exist.' %
-                           (provider, bucket_name), status=code)
+  return BucketNotFoundException('%s://%s bucket does not exist.' %
+                                 (provider, bucket_name), bucket_name,
+                                 status=code)
 
 
 def CreateObjectNotFoundException(code, provider, bucket_name, object_name,
@@ -638,6 +682,10 @@
   def BotoAclFromMessage(cls, acl_message):
     acl_dicts = []
     for message in acl_message:
+      if message == PRIVATE_DEFAULT_OBJ_ACL:
+        # Sentinel value indicating acl_dicts should be an empty list to create
+        # a private (no entries) default object ACL.
+        break
       acl_dicts.append(encoding.MessageToDict(message))
     return cls.BotoAclFromJson(acl_dicts)
 
diff --git a/catapult/third_party/gsutil/gslib/util.py b/catapult/third_party/gsutil/gslib/util.py
index ece5112..df07671 100644
--- a/catapult/third_party/gsutil/gslib/util.py
+++ b/catapult/third_party/gsutil/gslib/util.py
@@ -16,6 +16,7 @@
 
 from __future__ import absolute_import
 
+import collections
 import errno
 import logging
 import math
@@ -51,6 +52,24 @@
 from gslib.translation_helper import S3_DELETE_MARKER_GUID
 from gslib.translation_helper import S3_MARKER_GUIDS
 
+# Detect platform types.
+PLATFORM = str(sys.platform).lower()
+IS_WINDOWS = 'win32' in PLATFORM
+IS_CYGWIN = 'cygwin' in PLATFORM
+IS_LINUX = 'linux' in PLATFORM
+IS_OSX = 'darwin' in PLATFORM
+
+# pylint: disable=g-import-not-at-top
+if IS_WINDOWS:
+  from ctypes import c_int
+  from ctypes import c_uint64
+  from ctypes import c_char_p
+  from ctypes import c_wchar_p
+  from ctypes import windll
+  from ctypes import POINTER
+  from ctypes import WINFUNCTYPE
+  from ctypes import WinError
+
 # pylint: disable=g-import-not-at-top
 try:
   # This module doesn't necessarily exist on Windows.
@@ -114,16 +133,6 @@
 configured_certs_files = []
 
 
-def InitializeMultiprocessingVariables():
-  """Perform necessary initialization for multiprocessing.
-
-    See gslib.command.InitializeMultiprocessingVariables for an explanation
-    of why this is necessary.
-  """
-  global manager  # pylint: disable=global-variable-undefined
-  manager = multiprocessing.Manager()
-
-
 def _GenerateSuffixRegex():
   """Creates a suffix regex for human-readable byte counts."""
   human_bytes_re = r'(?P<num>\d*\.\d+|\d+)\s*(?P<suffix>%s)?'
@@ -142,13 +151,6 @@
 
 SECONDS_PER_DAY = 3600 * 24
 
-# Detect platform types.
-PLATFORM = str(sys.platform).lower()
-IS_WINDOWS = 'win32' in PLATFORM
-IS_CYGWIN = 'cygwin' in PLATFORM
-IS_LINUX = 'linux' in PLATFORM
-IS_OSX = 'darwin' in PLATFORM
-
 # On Unix-like systems, we will set the maximum number of open files to avoid
 # hitting the limit imposed by the OS. This number was obtained experimentally.
 MIN_ACCEPTABLE_OPEN_FILES_LIMIT = 1000
@@ -173,8 +175,48 @@
 
 
 def UsingCrcmodExtension(crcmod):
-  return (getattr(crcmod, 'crcmod', None) and
-          getattr(crcmod.crcmod, '_usingExtension', None))
+  return (boto.config.get('GSUtil', 'test_assume_fast_crcmod', None) or
+          (getattr(crcmod, 'crcmod', None) and
+           getattr(crcmod.crcmod, '_usingExtension', None)))
+
+
+def CheckFreeSpace(path):
+  """Return path/drive free space (in bytes)."""
+  if IS_WINDOWS:
+    try:
+      # pylint: disable=invalid-name
+      get_disk_free_space_ex = WINFUNCTYPE(c_int, c_wchar_p,
+                                           POINTER(c_uint64),
+                                           POINTER(c_uint64),
+                                           POINTER(c_uint64))
+      get_disk_free_space_ex = get_disk_free_space_ex(
+          ('GetDiskFreeSpaceExW', windll.kernel32), (
+              (1, 'lpszPathName'),
+              (2, 'lpFreeUserSpace'),
+              (2, 'lpTotalSpace'),
+              (2, 'lpFreeSpace'),))
+    except AttributeError:
+      get_disk_free_space_ex = WINFUNCTYPE(c_int, c_char_p,
+                                           POINTER(c_uint64),
+                                           POINTER(c_uint64),
+                                           POINTER(c_uint64))
+      get_disk_free_space_ex = get_disk_free_space_ex(
+          ('GetDiskFreeSpaceExA', windll.kernel32), (
+              (1, 'lpszPathName'),
+              (2, 'lpFreeUserSpace'),
+              (2, 'lpTotalSpace'),
+              (2, 'lpFreeSpace'),))
+
+    def GetDiskFreeSpaceExErrCheck(result, unused_func, args):
+      if not result:
+        raise WinError()
+      return args[1].value
+    get_disk_free_space_ex.errcheck = GetDiskFreeSpaceExErrCheck
+
+    return get_disk_free_space_ex(os.getenv('SystemDrive'))
+  else:
+    (_, f_frsize, _, _, f_bavail, _, _, _, _, _) = os.statvfs(path)
+    return f_frsize * f_bavail
 
 
 def CreateDirIfNeeded(dir_path, mode=0777):
@@ -192,6 +234,25 @@
         raise
 
 
+def DivideAndCeil(dividend, divisor):
+  """Returns ceil(dividend / divisor).
+
+  Takes care to avoid the pitfalls of floating point arithmetic that could
+  otherwise yield the wrong result for large numbers.
+
+  Args:
+    dividend: Dividend for the operation.
+    divisor: Divisor for the operation.
+
+  Returns:
+    Quotient.
+  """
+  quotient = dividend // divisor
+  if (dividend % divisor) != 0:
+    quotient += 1
+  return quotient
+
+
 def GetGsutilStateDir():
   """Returns the location of the directory for gsutil state files.
 
@@ -439,12 +500,15 @@
   return http
 
 
+# Retry for 10 minutes with exponential backoff, which corresponds to
+# the maximum Downtime Period specified in the GCS SLA
+# (https://cloud.google.com/storage/sla)
 def GetNumRetries():
-  return config.getint('Boto', 'num_retries', 6)
+  return config.getint('Boto', 'num_retries', 23)
 
 
 def GetMaxRetryDelay():
-  return config.getint('Boto', 'max_retry_delay', 60)
+  return config.getint('Boto', 'max_retry_delay', 32)
 
 
 # Resumable downloads and uploads make one HTTP call per chunk (and must be
@@ -890,8 +954,12 @@
       return True
   return False
 
+# This must be defined at the module level for pickling across processes.
+MultiprocessingIsAvailableResult = collections.namedtuple(
+    'MultiprocessingIsAvailableResult', ['is_available', 'stack_trace'])
 
-def MultiprocessingIsAvailable(logger=None):
+
+def CheckMultiprocessingAvailableAndInit(logger=None):
   """Checks if multiprocessing is available.
 
   There are some environments in which there is no way to use multiprocessing
@@ -900,6 +968,10 @@
   needed to make sure the environment can support the pieces of the
   multiprocessing module that we need.
 
+  If multiprocessing is available, this performs necessary initialization for
+  multiprocessing.  See gslib.command.InitializeMultiprocessingVariables for
+  an explanation of why this is necessary.
+
   Args:
     logger: logging.logger to use for debug output.
 
@@ -917,16 +989,27 @@
     if logger:
       logger.debug(cached_multiprocessing_check_stack_trace)
       logger.warn(cached_multiprocessing_is_available_message)
-    return (cached_multiprocessing_is_available,
-            cached_multiprocessing_check_stack_trace)
+    return MultiprocessingIsAvailableResult(
+        is_available=cached_multiprocessing_is_available,
+        stack_trace=cached_multiprocessing_check_stack_trace)
+
+  if IS_WINDOWS:
+    message = """
+Multiple processes are not supported on Windows. Operations requesting
+parallelism will be executed with multiple threads in a single process only.    
+"""
+    if logger:
+      logger.warn(message)
+    return MultiprocessingIsAvailableResult(is_available=False,
+                                            stack_trace=None)
 
   stack_trace = None
   multiprocessing_is_available = True
   message = """
-You have requested multiple threads or processes for an operation, but the
+You have requested multiple processes for an operation, but the
 required functionality of Python\'s multiprocessing module is not available.
-Your operations will be performed sequentially, and any requests for
-parallelism will be ignored.
+Operations requesting parallelism will be executed with multiple threads in a
+single process only.
 """
   try:
     # Fails if /dev/shm (or some equivalent thereof) is not available for use
@@ -934,8 +1017,7 @@
     try:
       multiprocessing.Value('i', 0)
     except:
-      if not IS_WINDOWS:
-        message += """
+      message += """
 Please ensure that you have write access to both /dev/shm and /run/shm.
 """
       raise  # We'll handle this in one place below.
@@ -944,7 +1026,9 @@
     # out as a sanity check. This definitely works on some versions of Windows,
     # but it's certainly possible that there is some unknown configuration for
     # which it won't.
-    multiprocessing.Manager()
+    global manager  # pylint: disable=global-variable-undefined
+
+    manager = multiprocessing.Manager()
 
     # Check that the max number of open files is reasonable. Always check this
     # after we're sure that the basic multiprocessing functionality is
@@ -968,7 +1052,7 @@
       except AttributeError:
         pass
 
-    if limit < MIN_ACCEPTABLE_OPEN_FILES_LIMIT and not IS_WINDOWS:
+    if limit < MIN_ACCEPTABLE_OPEN_FILES_LIMIT:
       message += ("""
 Your max number of open files, %s, is too low to allow safe multiprocessing.
 On Linux you can fix this by adding something like "ulimit -n 10000" to your
@@ -995,7 +1079,9 @@
   cached_multiprocessing_is_available = multiprocessing_is_available
   cached_multiprocessing_check_stack_trace = stack_trace
   cached_multiprocessing_is_available_message = message
-  return (multiprocessing_is_available, stack_trace)
+  return MultiprocessingIsAvailableResult(
+      is_available=cached_multiprocessing_is_available,
+      stack_trace=cached_multiprocessing_check_stack_trace)
 
 
 def CreateLock():
@@ -1007,7 +1093,7 @@
   Returns:
     Multiprocessing or threading lock.
   """
-  if MultiprocessingIsAvailable()[0]:
+  if CheckMultiprocessingAvailableAndInit().is_available:
     return manager.Lock()
   else:
     return threading.Lock()
diff --git a/catapult/third_party/gsutil/setup.py b/catapult/third_party/gsutil/setup.py
index 187a721..61df66c 100755
--- a/catapult/third_party/gsutil/setup.py
+++ b/catapult/third_party/gsutil/setup.py
@@ -38,14 +38,14 @@
     'boto==2.38.0',
     'crcmod>=1.7',
     'gcs-oauth2-boto-plugin>=1.9',
-    'google-apitools==0.4.8',
+    'google-apitools==0.4.10',
     'httplib2>=0.8',
     'oauth2client>=1.4.11',
     'protorpc>=0.10.0',
     'pyOpenSSL>=0.13',
     'python-gflags>=2.0',
     'retry_decorator>=1.0.0',
-    'six>=1.8.0',
+    'six>=1.9.0',
     # Not using 1.02 because of:
     #   https://code.google.com/p/socksipy-branch/issues/detail?id=3
     'SocksiPy-branch==1.01',
diff --git a/catapult/third_party/gsutil/third_party/apitools/.gitignore b/catapult/third_party/gsutil/third_party/apitools/.gitignore
index b51fd87..02eda25 100644
--- a/catapult/third_party/gsutil/third_party/apitools/.gitignore
+++ b/catapult/third_party/gsutil/third_party/apitools/.gitignore
@@ -7,9 +7,12 @@
 
 # Test files
 .tox/
+nosetests.xml
+
+# Coverage related
 .coverage
 coverage.xml
-nosetests.xml
+htmlcov/
 
 # Make sure a generated file isn't accidentally committed.
 reduced.pylintrc
diff --git a/catapult/third_party/gsutil/third_party/apitools/.travis.yml b/catapult/third_party/gsutil/third_party/apitools/.travis.yml
index 61f21e0..5bbdd50 100644
--- a/catapult/third_party/gsutil/third_party/apitools/.travis.yml
+++ b/catapult/third_party/gsutil/third_party/apitools/.travis.yml
@@ -1,4 +1,5 @@
 language: python
+sudo: false
 env:
   - TOX_ENV=py26
   - TOX_ENV=py27
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/__init__.py
old mode 100755
new mode 100644
index 53b34d0..0bbcf9f
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/__init__.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/__init__.py
@@ -12,3 +12,9 @@
 from apitools.base.py.list_pager import *
 from apitools.base.py.transfer import *
 from apitools.base.py.util import *
+
+try:
+    # pylint:disable=no-name-in-module
+    from apitools.base.py.internal import *
+except ImportError:
+    pass
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/app2.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/app2.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api.py
old mode 100755
new mode 100644
index d10314b..97bbb6f
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 """Base class for api services."""
 
+import base64
 import contextlib
 import datetime
 import logging
@@ -126,12 +127,29 @@
     return api_endpoint
 
 
+def _urljoin(base, url):  # pylint: disable=invalid-name
+    """Custom urljoin replacement supporting : before / in url."""
+    # In general, it's unsafe to simply join base and url. However, for
+    # the case of discovery documents, we know:
+    #  * base will never contain params, query, or fragment
+    #  * url will never contain a scheme or net_loc.
+    # In general, this means we can safely join on /; we just need to
+    # ensure we end up with precisely one / joining base and url. The
+    # exception here is the case of media uploads, where url will be an
+    # absolute url.
+    if url.startswith('http://') or url.startswith('https://'):
+        return urllib.parse.urljoin(base, url)
+    new_base = base if base.endswith('/') else base + '/'
+    new_url = url[1:] if url.startswith('/') else url
+    return new_base + new_url
+
+
 class _UrlBuilder(object):
 
     """Convenient container for url data."""
 
     def __init__(self, base_url, relative_path=None, query_params=None):
-        components = urllib.parse.urlsplit(urllib.parse.urljoin(
+        components = urllib.parse.urlsplit(_urljoin(
             base_url, relative_path or ''))
         if components.fragment:
             raise exceptions.ConfigurationValueError(
@@ -184,6 +202,11 @@
             self.__scheme, self.__netloc, self.relative_path, self.query, ''))
 
 
+def _SkipGetCredentials():
+    """Hook for skipping credentials. For internal use."""
+    return False
+
+
 class BaseApiClient(object):
 
     """Base class for client libraries."""
@@ -198,7 +221,7 @@
 
     def __init__(self, url, credentials=None, get_credentials=True, http=None,
                  model=None, log_request=False, log_response=False,
-                 num_retries=5, credentials_args=None,
+                 num_retries=5, max_retry_wait=60, credentials_args=None,
                  default_global_params=None, additional_http_headers=None):
         _RequireClassAttrs(self, ('_package', '_scopes', 'messages_module'))
         if default_global_params is not None:
@@ -207,9 +230,12 @@
         self.log_request = log_request
         self.log_response = log_response
         self.__num_retries = 5
+        self.__max_retry_wait = 60
         # We let the @property machinery below do our validation.
         self.num_retries = num_retries
+        self.max_retry_wait = max_retry_wait
         self._credentials = credentials
+        get_credentials = get_credentials and not _SkipGetCredentials()
         if get_credentials and not credentials:
             credentials_args = credentials_args or {}
             self._SetCredentials(**credentials_args)
@@ -334,6 +360,18 @@
                 'Cannot have negative value for num_retries')
         self.__num_retries = value
 
+    @property
+    def max_retry_wait(self):
+        return self.__max_retry_wait
+
+    @max_retry_wait.setter
+    def max_retry_wait(self, value):
+        util.Typecheck(value, six.integer_types)
+        if value <= 0:
+            raise exceptions.InvalidDataError(
+                'max_retry_wait must be a postiive integer')
+        self.__max_retry_wait = value
+
     @contextlib.contextmanager
     def WithRetries(self, num_retries):
         old_num_retries = self.num_retries
@@ -452,6 +490,18 @@
             query_info['pp'] = 0
         return query_info
 
+    def __FinalUrlValue(self, value, field):
+        """Encode value for the URL, using field to skip encoding for bytes."""
+        if isinstance(field, messages.BytesField) and value is not None:
+            return base64.urlsafe_b64encode(value)
+        elif isinstance(value, six.text_type):
+            return value.encode('utf8')
+        elif isinstance(value, six.binary_type):
+            return value.decode('utf8')
+        elif isinstance(value, datetime.datetime):
+            return value.isoformat()
+        return value
+
     def __ConstructQueryParams(self, query_params, request, global_params):
         """Construct a dictionary of query parameters for this request."""
         # First, handle the global params.
@@ -460,23 +510,24 @@
         global_param_names = util.MapParamNames(
             [x.name for x in self.__client.params_type.all_fields()],
             self.__client.params_type)
-        query_info = dict((param, getattr(global_params, param))
-                          for param in global_param_names)
+        global_params_type = type(global_params)
+        query_info = dict(
+            (param,
+             self.__FinalUrlValue(getattr(global_params, param),
+                                  getattr(global_params_type, param)))
+            for param in global_param_names)
         # Next, add the query params.
         query_param_names = util.MapParamNames(query_params, type(request))
-        query_info.update((param, getattr(request, param, None))
-                          for param in query_param_names)
+        request_type = type(request)
+        query_info.update(
+            (param,
+             self.__FinalUrlValue(getattr(request, param, None),
+                                  getattr(request_type, param)))
+            for param in query_param_names)
         query_info = dict((k, v) for k, v in query_info.items()
                           if v is not None)
         query_info = self.__EncodePrettyPrint(query_info)
         query_info = util.MapRequestParams(query_info, type(request))
-        for k, v in query_info.items():
-            if isinstance(v, six.text_type):
-                query_info[k] = v.encode('utf8')
-            elif isinstance(v, str):
-                query_info[k] = v.decode('utf8')
-            elif isinstance(v, datetime.datetime):
-                query_info[k] = v.isoformat()
         return query_info
 
     def __ConstructRelativePath(self, method_config, request,
@@ -547,6 +598,9 @@
             util.Typecheck(body_field, messages.MessageField)
             body_type = body_field.type
 
+        # If there was no body provided, we use an empty message of the
+        # appropriate type.
+        body_value = body_value or body_type()
         if upload and not body_value:
             # We're going to fill in the body later.
             return
@@ -617,7 +671,8 @@
             if upload and upload.bytes_http:
                 http = upload.bytes_http
             http_response = http_wrapper.MakeRequest(
-                http, http_request, retries=self.__client.num_retries)
+                http, http_request, retries=self.__client.num_retries,
+                max_retry_wait=self.__client.max_retry_wait)
 
         return self.ProcessHttpResponse(method_config, http_response)
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api_test.py
index 86141a3..7758c43 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_api_test.py
@@ -1,8 +1,10 @@
+import base64
 import datetime
 import sys
 
 from protorpc import message_types
 from protorpc import messages
+import six
 from six.moves import urllib_parse
 import unittest2
 
@@ -13,6 +15,7 @@
 
 class SimpleMessage(messages.Message):
     field = messages.StringField(1)
+    bytes_field = messages.BytesField(2)
 
 
 class MessageWithTime(messages.Message):
@@ -40,6 +43,7 @@
     prettyPrint = messages.BooleanField(
         5, default=True)  # pylint: disable=invalid-name
     pp = messages.BooleanField(6, default=True)
+    nextPageToken = messages.BytesField(7)  # pylint:disable=invalid-name
 
 
 class FakeCredentials(object):
@@ -147,6 +151,37 @@
         self.assertTrue('prettyPrint=0' in http_request.url)
         self.assertTrue('pp=0' in http_request.url)
 
+    def testQueryBytesRequest(self):
+        method_config = base_api.ApiMethodInfo(
+            request_type_name='SimpleMessage', query_params=['bytes_field'])
+        service = FakeService()
+        non_unicode_message = b''.join((six.int2byte(100),
+                                        six.int2byte(200)))
+        request = SimpleMessage(bytes_field=non_unicode_message)
+        global_params = StandardQueryParameters()
+        http_request = service.PrepareHttpRequest(method_config, request,
+                                                  global_params=global_params)
+        want = urllib_parse.urlencode({
+            'bytes_field': base64.urlsafe_b64encode(non_unicode_message),
+        })
+        self.assertIn(want, http_request.url)
+
+    def testQueryBytesGlobalParams(self):
+        method_config = base_api.ApiMethodInfo(
+            request_type_name='SimpleMessage', query_params=['bytes_field'])
+        service = FakeService()
+        non_unicode_message = b''.join((six.int2byte(100),
+                                        six.int2byte(200)))
+        request = SimpleMessage()
+        global_params = StandardQueryParameters(
+            nextPageToken=non_unicode_message)
+        http_request = service.PrepareHttpRequest(method_config, request,
+                                                  global_params=global_params)
+        want = urllib_parse.urlencode({
+            'nextPageToken': base64.urlsafe_b64encode(non_unicode_message),
+        })
+        self.assertIn(want, http_request.url)
+
     def testQueryRemapping(self):
         method_config = base_api.ApiMethodInfo(
             request_type_name='MessageWithRemappings',
@@ -177,3 +212,13 @@
         expected_url = service.client.url + 'parameters/gonna/remap/ONE/TWO'
         http_request = service.PrepareHttpRequest(method_config, request)
         self.assertEqual(expected_url, http_request.url)
+
+    def testColonInRelativePath(self):
+        method_config = base_api.ApiMethodInfo(
+            relative_path='path:withJustColon',
+            request_type_name='SimpleMessage')
+        service = FakeService()
+        request = SimpleMessage()
+        http_request = service.PrepareHttpRequest(method_config, request)
+        self.assertEqual('http://www.example.com/path:withJustColon',
+                         http_request.url)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_cli.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/base_cli.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch.py
old mode 100755
new mode 100644
index 4910d4a..2cf03a4
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch.py
@@ -321,11 +321,6 @@
         gen.flatten(msg, unixfrom=False)
         body = str_io.getvalue()
 
-        # Strip off the \n\n that the MIME lib tacks onto the end of the
-        # payload.
-        if request.body is None:
-            body = body[:-2]
-
         return status_line + body
 
     def _DeserializeResponse(self, payload):
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch_test.py
index cf77364..7c20171 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/batch_test.py
@@ -332,6 +332,8 @@
             'Content-Type: protocol/version',
             'MIME-Version: 1.0',
             'Host: ',
+            '',
+            '',
         ])
         batch_request = batch.BatchHttpRequest('https://www.example.com')
         self.assertEqual(expected_serialized_request,
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/buffered_stream.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/buffered_stream.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/cli.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/cli.py
old mode 100755
new mode 100644
index 6f7aa32..eccd66b
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/cli.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/cli.py
@@ -12,3 +12,9 @@
 
 from apitools.base.py.app2 import *
 from apitools.base.py.base_cli import *
+
+try:
+    # pylint:disable=no-name-in-module
+    from apitools.base.py.internal.cli import *
+except ImportError:
+    pass
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib.py
old mode 100755
new mode 100644
index 3078943..9a8f36e
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib.py
@@ -5,6 +5,7 @@
 import datetime
 import json
 import os
+import threading
 
 import httplib2
 import oauth2client
@@ -38,63 +39,70 @@
 ]
 
 
-# TODO(craigcitro): Expose the extra args here somewhere higher up,
-# possibly as flags in the generated CLI.
+# Lock when accessing the cache file to avoid resource contention.
+cache_file_lock = threading.Lock()
+
+
+def SetCredentialsCacheFileLock(lock):
+    global cache_file_lock  # pylint: disable=global-statement
+    cache_file_lock = lock
+
+
+# List of additional methods we use when attempting to construct
+# credentials. Users can register their own methods here, which we try
+# before the defaults.
+_CREDENTIALS_METHODS = []
+
+
+def _RegisterCredentialsMethod(method, position=None):
+    """Register a new method for fetching credentials.
+
+    This new method should be a function with signature:
+      client_info, **kwds -> Credentials or None
+    This method can be used as a decorator, unless position needs to
+    be supplied.
+
+    Note that method must *always* accept arbitrary keyword arguments.
+
+    Args:
+      method: New credential-fetching method.
+      position: (default: None) Where in the list of methods to
+        add this; if None, we append. In all but rare cases,
+        this should be either 0 or None.
+    Returns:
+      method, for use as a decorator.
+
+    """
+    if position is None:
+        position = len(_CREDENTIALS_METHODS)
+    else:
+        position = min(position, len(_CREDENTIALS_METHODS))
+    _CREDENTIALS_METHODS.insert(position, method)
+    return method
+
+
 def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
                    credentials_filename=None,
-                   service_account_name=None, service_account_keyfile=None,
-                   service_account_json_keyfile=None,
                    api_key=None,  # pylint: disable=unused-argument
-                   client=None):  # pylint: disable=unused-argument
+                   client=None,  # pylint: disable=unused-argument
+                   oauth2client_args=None,
+                   **kwds):
     """Attempt to get credentials, using an oauth dance as the last resort."""
     scopes = util.NormalizeScopes(scopes)
-    if ((service_account_name and not service_account_keyfile) or
-            (service_account_keyfile and not service_account_name)):
-        raise exceptions.CredentialsError(
-            'Service account name or keyfile provided without the other')
-    # TODO(craigcitro): Error checking.
     client_info = {
         'client_id': client_id,
         'client_secret': client_secret,
-        'scope': ' '.join(sorted(util.NormalizeScopes(scopes))),
+        'scope': ' '.join(sorted(scopes)),
         'user_agent': user_agent or '%s-generated/0.1' % package_name,
     }
-    service_account_kwargs = {
-        'user_agent': client_info['user_agent'],
-    }
-    if service_account_json_keyfile:
-        with open(service_account_json_keyfile) as keyfile:
-            service_account_info = json.load(keyfile)
-        account_type = service_account_info.get('type')
-        if account_type != oauth2client.client.SERVICE_ACCOUNT:
-            raise exceptions.CredentialsError(
-                'Invalid service account credentials: %s' % (
-                    service_account_json_keyfile,))
-        # pylint: disable=protected-access
-        credentials = oauth2client.service_account._ServiceAccountCredentials(
-            service_account_id=service_account_info['client_id'],
-            service_account_email=service_account_info['client_email'],
-            private_key_id=service_account_info['private_key_id'],
-            private_key_pkcs8_text=service_account_info['private_key'],
-            scopes=scopes,
-            **service_account_kwargs)
-        # pylint: enable=protected-access
-        return credentials
-    if service_account_name is not None:
-        credentials = ServiceAccountCredentialsFromFile(
-            service_account_name, service_account_keyfile, scopes,
-            service_account_kwargs=service_account_kwargs)
+    for method in _CREDENTIALS_METHODS:
+        credentials = method(client_info, **kwds)
         if credentials is not None:
             return credentials
-    credentials = GaeAssertionCredentials.Get(scopes)
-    if credentials is not None:
-        return credentials
-    credentials = GceAssertionCredentials.Get(scopes)
-    if credentials is not None:
-        return credentials
     credentials_filename = credentials_filename or os.path.expanduser(
         '~/.apitools.token')
-    credentials = CredentialsFromFile(credentials_filename, client_info)
+    credentials = CredentialsFromFile(credentials_filename, client_info,
+                                      oauth2client_args=oauth2client_args)
     if credentials is not None:
         return credentials
     raise exceptions.CredentialsError('Could not create valid credentials')
@@ -130,15 +138,26 @@
     return True
 
 
-def _OpenNoProxy(request):
-    """Wrapper around urllib2.open that ignores proxies."""
+def _GceMetadataRequest(relative_url, use_metadata_ip=False):
+    """Request the given url from the GCE metadata service."""
+    if use_metadata_ip:
+        base_url = 'http://169.254.169.254/'
+    else:
+        base_url = 'http://metadata.google.internal/'
+    url = base_url + 'computeMetadata/v1/' + relative_url
+    # Extra header requirement can be found here:
+    # https://developers.google.com/compute/docs/metadata
+    headers = {'Metadata-Flavor': 'Google'}
+    request = urllib.request.Request(url, headers=headers)
     opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
-    return opener.open(request)
+    try:
+        response = opener.open(request)
+    except urllib.error.URLError as e:
+        raise exceptions.CommunicationError(
+            'Could not reach metadata service: %s' % e.reason)
+    return response
 
 
-# TODO(craigcitro): We override to add some utility code, and to
-# update the old refresh implementation. Push this code into
-# oauth2client.
 class GceAssertionCredentials(oauth2client.gce.AppAssertionCredentials):
 
     """Assertion credentials for GCE instances."""
@@ -159,13 +178,10 @@
         # identified these scopes in the same execution. However, the
         # available scopes don't change once an instance is created,
         # so there is no reason to perform more than one query.
-        #
-        # TODO(craigcitro): Move this into oauth2client.
         self.__service_account_name = service_account_name
-        cache_filename = None
         cached_scopes = None
-        if 'cache_filename' in kwds:
-            cache_filename = kwds['cache_filename']
+        cache_filename = kwds.get('cache_filename')
+        if cache_filename:
             cached_scopes = self._CheckCacheFileForMatch(
                 cache_filename, scopes)
 
@@ -197,20 +213,23 @@
             'scopes': sorted(list(scopes)) if scopes else None,
             'svc_acct_name': self.__service_account_name,
         }
-        if _EnsureFileExists(cache_filename):
-            locked_file = oauth2client.locked_file.LockedFile(
-                cache_filename, 'r+b', 'rb')
-            try:
-                locked_file.open_and_lock()
-                cached_creds_str = locked_file.file_handle().read()
-                if cached_creds_str:
-                    # Cached credentials metadata dict.
-                    cached_creds = json.loads(cached_creds_str)
-                    if creds['svc_acct_name'] == cached_creds['svc_acct_name']:
-                        if creds['scopes'] in (None, cached_creds['scopes']):
-                            scopes = cached_creds['scopes']
-            finally:
-                locked_file.unlock_and_close()
+        with cache_file_lock:
+            if _EnsureFileExists(cache_filename):
+                locked_file = oauth2client.locked_file.LockedFile(
+                    cache_filename, 'r+b', 'rb')
+                try:
+                    locked_file.open_and_lock()
+                    cached_creds_str = locked_file.file_handle().read()
+                    if cached_creds_str:
+                        # Cached credentials metadata dict.
+                        cached_creds = json.loads(cached_creds_str)
+                        if (creds['svc_acct_name'] ==
+                                cached_creds['svc_acct_name']):
+                            if (creds['scopes'] in
+                                    (None, cached_creds['scopes'])):
+                                scopes = cached_creds['scopes']
+                finally:
+                    locked_file.unlock_and_close()
         return scopes
 
     def _WriteCacheFile(self, cache_filename, scopes):
@@ -223,22 +242,23 @@
           cache_filename: Cache filename to check.
           scopes: Scopes for the desired credentials.
         """
-        if _EnsureFileExists(cache_filename):
-            locked_file = oauth2client.locked_file.LockedFile(
-                cache_filename, 'r+b', 'rb')
-            try:
-                locked_file.open_and_lock()
-                if locked_file.is_locked():
-                    creds = {  # Credentials metadata dict.
-                        'scopes': sorted(list(scopes)),
-                        'svc_acct_name': self.__service_account_name}
-                    locked_file.file_handle().write(
-                        json.dumps(creds, encoding='ascii'))
-                    # If it's not locked, the locking process will
-                    # write the same data to the file, so just
-                    # continue.
-            finally:
-                locked_file.unlock_and_close()
+        with cache_file_lock:
+            if _EnsureFileExists(cache_filename):
+                locked_file = oauth2client.locked_file.LockedFile(
+                    cache_filename, 'r+b', 'rb')
+                try:
+                    locked_file.open_and_lock()
+                    if locked_file.is_locked():
+                        creds = {  # Credentials metadata dict.
+                            'scopes': sorted(list(scopes)),
+                            'svc_acct_name': self.__service_account_name}
+                        locked_file.file_handle().write(
+                            json.dumps(creds, encoding='ascii'))
+                        # If it's not locked, the locking process will
+                        # write the same data to the file, so just
+                        # continue.
+                finally:
+                    locked_file.unlock_and_close()
 
     def _ScopesFromMetadataServer(self, scopes):
         if not util.DetectGce():
@@ -260,35 +280,16 @@
         return scopes
 
     def GetServiceAccount(self, account):
-        account_uri = (
-            'http://metadata.google.internal/computeMetadata/'
-            'v1/instance/service-accounts')
-        additional_headers = {'X-Google-Metadata-Request': 'True'}
-        request = urllib.request.Request(
-            account_uri, headers=additional_headers)
-        try:
-            response = _OpenNoProxy(request)
-        except urllib.error.URLError as e:
-            raise exceptions.CommunicationError(
-                'Could not reach metadata service: %s' % e.reason)
+        relative_url = 'instance/service-accounts'
+        response = _GceMetadataRequest(relative_url)
         response_lines = [line.rstrip('/\n\r')
                           for line in response.readlines()]
         return account in response_lines
 
     def GetInstanceScopes(self):
-        # Extra header requirement can be found here:
-        # https://developers.google.com/compute/docs/metadata
-        scopes_uri = (
-            'http://metadata.google.internal/computeMetadata/v1/instance/'
-            'service-accounts/%s/scopes') % self.__service_account_name
-        additional_headers = {'X-Google-Metadata-Request': 'True'}
-        request = urllib.request.Request(
-            scopes_uri, headers=additional_headers)
-        try:
-            response = _OpenNoProxy(request)
-        except urllib.error.URLError as e:
-            raise exceptions.CommunicationError(
-                'Could not reach metadata service: %s' % e.reason)
+        relative_url = 'instance/service-accounts/{0}/scopes'.format(
+            self.__service_account_name)
+        response = _GceMetadataRequest(relative_url)
         return util.NormalizeScopes(scope.strip()
                                     for scope in response.readlines())
 
@@ -312,24 +313,21 @@
 
         If self.store is initialized, store acquired credentials there.
         """
-        token_uri = (
-            'http://metadata.google.internal/computeMetadata/v1/instance/'
-            'service-accounts/%s/token') % self.__service_account_name
-        extra_headers = {'X-Google-Metadata-Request': 'True'}
-        request = urllib.request.Request(token_uri, headers=extra_headers)
+        relative_url = 'instance/service-accounts/{0}/token'.format(
+            self.__service_account_name)
         try:
-            content = _OpenNoProxy(request).read()
-        except urllib.error.URLError as e:
+            response = _GceMetadataRequest(relative_url)
+        except exceptions.CommunicationError:
             self.invalid = True
             if self.store:
                 self.store.locked_put(self)
-            raise exceptions.CommunicationError(
-                'Could not reach metadata service: %s' % e.reason)
+            raise
+        content = response.read()
         try:
             credential_info = json.loads(content)
         except ValueError:
             raise exceptions.CredentialsError(
-                'Invalid credentials response: uri %s' % token_uri)
+                'Could not parse response as JSON: %s' % content)
 
         self.access_token = credential_info['access_token']
         if 'expires_in' in credential_info:
@@ -346,7 +344,11 @@
     @classmethod
     def from_json(cls, json_data):
         data = json.loads(json_data)
-        credentials = GceAssertionCredentials(scopes=[data['scope']])
+        kwargs = {}
+        if 'cache_filename' in data.get('kwargs', []):
+            kwargs['cache_filename'] = data['kwargs']['cache_filename']
+        credentials = GceAssertionCredentials(scopes=[data['scope']],
+                                              **kwargs)
         if 'access_token' in data:
             credentials.access_token = data['access_token']
         if 'token_expiry' in data:
@@ -415,7 +417,7 @@
 
     parser = argparse.ArgumentParser(parents=[tools.argparser])
     # Get command line argparse flags.
-    flags = parser.parse_args(args=args)
+    flags, _ = parser.parse_known_args(args=args)
 
     # Allow `gflags` and `argparse` to be used side-by-side.
     if hasattr(FLAGS, 'auth_host_name'):
@@ -428,7 +430,7 @@
 
 
 # TODO(craigcitro): Switch this from taking a path to taking a stream.
-def CredentialsFromFile(path, client_info):
+def CredentialsFromFile(path, client_info, oauth2client_args=None):
     """Read credentials from a file."""
     credential_store = oauth2client.multistore_file.get_credential_storage(
         path,
@@ -440,19 +442,19 @@
     credentials = credential_store.get()
     if credentials is None or credentials.invalid:
         print('Generating new OAuth credentials ...')
-        while True:
+        for _ in range(20):
             # If authorization fails, we want to retry, rather than let this
             # cascade up and get caught elsewhere. If users want out of the
             # retry loop, they can ^C.
             try:
                 flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
-                flags = _GetRunFlowFlags()
+                flags = _GetRunFlowFlags(args=oauth2client_args)
                 credentials = tools.run_flow(flow, credential_store, flags)
                 break
             except (oauth2client.client.FlowExchangeError, SystemExit) as e:
                 # Here SystemExit is "no credential at all", and the
-                # FlowExchangeError is "invalid" -- usually because you reused
-                # a token.
+                # FlowExchangeError is "invalid" -- usually because
+                # you reused a token.
                 print('Invalid authorization: %s' % (e,))
             except httplib2.HttpLib2Error as e:
                 print('Communication error: %s' % (e,))
@@ -487,3 +489,74 @@
         credentials.refresh(http)
         response, content = http.request(url)
     return json.loads(content or '{}')  # Save ourselves from an empty reply.
+
+
+@_RegisterCredentialsMethod
+def _GetServiceAccountCredentials(
+        client_info, service_account_name=None, service_account_keyfile=None,
+        service_account_json_keyfile=None, **unused_kwds):
+    if ((service_account_name and not service_account_keyfile) or
+            (service_account_keyfile and not service_account_name)):
+        raise exceptions.CredentialsError(
+            'Service account name or keyfile provided without the other')
+    scopes = client_info['scope'].split()
+    user_agent = client_info['user_agent']
+    if service_account_json_keyfile:
+        with open(service_account_json_keyfile) as keyfile:
+            service_account_info = json.load(keyfile)
+        account_type = service_account_info.get('type')
+        if account_type != oauth2client.client.SERVICE_ACCOUNT:
+            raise exceptions.CredentialsError(
+                'Invalid service account credentials: %s' % (
+                    service_account_json_keyfile,))
+        # pylint: disable=protected-access
+        credentials = oauth2client.service_account._ServiceAccountCredentials(
+            service_account_id=service_account_info['client_id'],
+            service_account_email=service_account_info['client_email'],
+            private_key_id=service_account_info['private_key_id'],
+            private_key_pkcs8_text=service_account_info['private_key'],
+            scopes=scopes, user_agent=user_agent)
+        # pylint: enable=protected-access
+        return credentials
+    if service_account_name is not None:
+        credentials = ServiceAccountCredentialsFromFile(
+            service_account_name, service_account_keyfile, scopes,
+            service_account_kwargs={'user_agent': user_agent})
+        if credentials is not None:
+            return credentials
+
+
+@_RegisterCredentialsMethod
+def _GetGaeServiceAccount(unused_client_info, scopes, **unused_kwds):
+    return GaeAssertionCredentials.Get(scopes=scopes)
+
+
+@_RegisterCredentialsMethod
+def _GetGceServiceAccount(unused_client_info, scopes, **unused_kwds):
+    return GceAssertionCredentials.Get(scopes=scopes)
+
+
+@_RegisterCredentialsMethod
+def _GetApplicationDefaultCredentials(
+        unused_client_info, scopes, skip_application_default_credentials=False,
+        **unused_kwds):
+    if skip_application_default_credentials:
+        return None
+    gc = oauth2client.client.GoogleCredentials
+    with cache_file_lock:
+        try:
+            # pylint: disable=protected-access
+            # We've already done our own check for GAE/GCE
+            # credentials, we don't want to pay for checking again.
+            credentials = gc._implicit_credentials_from_files()
+        except oauth2client.client.ApplicationDefaultCredentialsError:
+            return None
+    # If we got back a non-service account credential, we need to use
+    # a heuristic to decide whether or not the application default
+    # credential will work for us. We assume that if we're requesting
+    # cloud-platform, our scopes are a subset of cloud scopes, and the
+    # ADC will work.
+    cp = 'https://www.googleapis.com/auth/cloud-platform'
+    if not isinstance(credentials, gc) or cp in scopes:
+        return credentials
+    return None
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
index cf4e5df..067b874 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
@@ -26,16 +26,15 @@
 class CredentialsLibTest(unittest2.TestCase):
 
     def _GetServiceCreds(self, service_account_name=None, scopes=None):
-        scopes = scopes or ['scope1']
         kwargs = {}
         if service_account_name is not None:
             kwargs['service_account_name'] = service_account_name
         service_account_name = service_account_name or 'default'
 
-        def MockMetadataCalls(request):
-            request_url = request.get_full_url()
+        def MockMetadataCalls(request_url):
+            default_scopes = scopes or ['scope1']
             if request_url.endswith('scopes'):
-                return six.StringIO(''.join(scopes))
+                return six.StringIO(''.join(default_scopes))
             elif request_url.endswith('service-accounts'):
                 return six.StringIO(service_account_name)
             elif request_url.endswith(
@@ -43,7 +42,7 @@
                 return six.StringIO('{"access_token": "token"}')
             self.fail('Unexpected HTTP request to %s' % request_url)
 
-        with mock.patch.object(credentials_lib, '_OpenNoProxy',
+        with mock.patch.object(credentials_lib, '_GceMetadataRequest',
                                side_effect=MockMetadataCalls,
                                autospec=True) as opener_mock:
             with mock.patch.object(util, 'DetectGce',
@@ -58,8 +57,11 @@
             self.assertEqual(3, opener_mock.call_count)
 
     def testGceServiceAccounts(self):
+        scopes = ['scope1']
         self._GetServiceCreds()
-        self._GetServiceCreds(service_account_name='my_service_account')
+        self._GetServiceCreds(scopes=scopes)
+        self._GetServiceCreds(service_account_name='my_service_account',
+                              scopes=scopes)
 
 
 class TestGetRunFlowFlags(unittest2.TestCase):
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py
old mode 100755
new mode 100644
index c35e4cf..972a95f
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py
@@ -6,7 +6,8 @@
 import datetime
 import json
 import logging
-
+import os
+import sys
 
 from protorpc import message_types
 from protorpc import messages
@@ -265,10 +266,12 @@
         # remove this later.
         old_level = logging.getLogger().level
         logging.getLogger().setLevel(logging.ERROR)
-        result = _DecodeCustomFieldNames(message_type, encoded_message)
-        result = super(_ProtoJsonApiTools, self).decode_message(
-            message_type, result)
-        logging.getLogger().setLevel(old_level)
+        try:
+            result = _DecodeCustomFieldNames(message_type, encoded_message)
+            result = super(_ProtoJsonApiTools, self).decode_message(
+                message_type, result)
+        finally:
+            logging.getLogger().setLevel(old_level)
         result = _ProcessUnknownEnums(result, encoded_message)
         result = _ProcessUnknownMessages(result, encoded_message)
         return _DecodeUnknownFields(result, encoded_message)
@@ -382,6 +385,8 @@
         if name in all_field_names:
             continue
         value = PyValueToMessage(field_type, value_dict)
+        if pair_type.value.repeated:
+            value = _AsMessageList(value)
         new_pair = pair_type(key=name, value=value)
         new_values.append(new_pair)
     return new_values
@@ -399,6 +404,8 @@
         value_type = pair_type.field_by_name('value')
         if isinstance(value_type, messages.MessageField):
             decoded_value = DictToMessage(value, pair_type.value.message_type)
+        elif isinstance(value_type, messages.EnumField):
+            decoded_value = pair_type.value.type(value)
         else:
             decoded_value = value
         new_pair = pair_type(key=str(unknown_field), value=decoded_value)
@@ -520,7 +527,30 @@
 _JSON_FIELD_MAPPINGS = {}
 
 
-def AddCustomJsonEnumMapping(enum_type, python_name, json_name):
+def _GetTypeKey(message_type, package):
+    """Get the prefix for this message type in mapping dicts."""
+    key = message_type.definition_name()
+    if package and key.startswith(package + '.'):
+        module_name = message_type.__module__
+        # We normalize '__main__' to something unique, if possible.
+        if module_name == '__main__':
+            try:
+                file_name = sys.modules[module_name].__file__
+            except (AttributeError, KeyError):
+                pass
+            else:
+                base_name = os.path.basename(file_name)
+                split_name = os.path.splitext(base_name)
+                if len(split_name) == 1:
+                    module_name = unicode(base_name)
+                else:
+                    module_name = u'.'.join(split_name[:-1])
+        key = module_name + '.' + key.partition('.')[2]
+    return key
+
+
+def AddCustomJsonEnumMapping(enum_type, python_name, json_name,
+                             package=''):
     """Add a custom wire encoding for a given enum value.
 
     This is primarily used in generated code, to handle enum values
@@ -530,11 +560,14 @@
       enum_type: (messages.Enum) An enum type
       python_name: (string) Python name for this value.
       json_name: (string) JSON name to be used on the wire.
+      package: (basestring, optional) Package prefix for this enum, if
+          present. We strip this off the enum name in order to generate
+          unique keys.
     """
     if not issubclass(enum_type, messages.Enum):
         raise exceptions.TypecheckError(
             'Cannot set JSON enum mapping for non-enum "%s"' % enum_type)
-    enum_name = enum_type.definition_name()
+    enum_name = _GetTypeKey(enum_type, package)
     if python_name not in enum_type.names():
         raise exceptions.InvalidDataError(
             'Enum value %s not a value for type %s' % (python_name, enum_type))
@@ -543,7 +576,8 @@
     field_mappings[python_name] = json_name
 
 
-def AddCustomJsonFieldMapping(message_type, python_name, json_name):
+def AddCustomJsonFieldMapping(message_type, python_name, json_name,
+                              package=''):
     """Add a custom wire encoding for a given message field.
 
     This is primarily used in generated code, to handle enum values
@@ -553,12 +587,15 @@
       message_type: (messages.Message) A message type
       python_name: (string) Python name for this value.
       json_name: (string) JSON name to be used on the wire.
+      package: (basestring, optional) Package prefix for this message, if
+          present. We strip this off the message name in order to generate
+          unique keys.
     """
     if not issubclass(message_type, messages.Message):
         raise exceptions.TypecheckError(
             'Cannot set JSON field mapping for '
             'non-message "%s"' % message_type)
-    message_name = message_type.definition_name()
+    message_name = _GetTypeKey(message_type, package)
     try:
         _ = message_type.field_by_name(python_name)
     except KeyError:
@@ -647,3 +684,28 @@
                 decoded_message[python_name] = decoded_message.pop(json_name)
         encoded_message = json.dumps(decoded_message)
     return encoded_message
+
+
+def _AsMessageList(msg):
+    """Convert the provided list-as-JsonValue to a list."""
+    # This really needs to live in extra_types, but extra_types needs
+    # to import this file to be able to register codecs.
+    # TODO(craigcitro): Split out a codecs module and fix this ugly
+    # import.
+    from apitools.base.py import extra_types
+
+    def _IsRepeatedJsonValue(msg):
+        """Return True if msg is a repeated value as a JsonValue."""
+        if isinstance(msg, extra_types.JsonArray):
+            return True
+        if isinstance(msg, extra_types.JsonValue) and msg.array_value:
+            return True
+        return False
+
+    if not _IsRepeatedJsonValue(msg):
+        raise ValueError('invalid argument to _AsMessageList')
+    if isinstance(msg, extra_types.JsonValue):
+        msg = msg.array_value
+    if isinstance(msg, extra_types.JsonArray):
+        msg = msg.entries
+    return msg
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding_test.py
index 0d10d8b..0389af1 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding_test.py
@@ -1,6 +1,7 @@
 import base64
 import datetime
 import json
+import sys
 
 from protorpc import message_types
 from protorpc import messages
@@ -9,6 +10,7 @@
 
 from apitools.base.py import encoding
 from apitools.base.py import exceptions
+from apitools.base.py import extra_types
 
 
 class SimpleMessage(messages.Message):
@@ -33,6 +35,21 @@
         value = messages.StringField(2)
 
     additional_properties = messages.MessageField(
+        'AdditionalProperty', 1, repeated=True)
+
+
+@encoding.MapUnrecognizedFields('additional_properties')
+class UnrecognizedEnumMessage(messages.Message):
+
+    class ThisEnum(messages.Enum):
+        VALUE_ONE = 1
+        VALUE_TWO = 2
+
+    class AdditionalProperty(messages.Message):
+        key = messages.StringField(1)
+        value = messages.EnumField('UnrecognizedEnumMessage.ThisEnum', 2)
+
+    additional_properties = messages.MessageField(
         AdditionalProperty, 1, repeated=True)
 
 
@@ -85,6 +102,17 @@
     repeated_field = messages.StringField(5, repeated=True)
 
 
+@encoding.MapUnrecognizedFields('additional_properties')
+class RepeatedJsonValueMessage(messages.Message):
+
+    class AdditionalProperty(messages.Message):
+        key = messages.StringField(1)
+        value = messages.MessageField(extra_types.JsonValue, 2, repeated=True)
+
+    additional_properties = messages.MessageField('AdditionalProperty', 1,
+                                                  repeated=True)
+
+
 encoding.AddCustomJsonEnumMapping(MessageWithRemappings.SomeEnum,
                                   'enum_value', 'wire_name')
 encoding.AddCustomJsonFieldMapping(MessageWithRemappings,
@@ -187,6 +215,14 @@
         self.assertEqual(1, len(result.additional_properties))
         self.assertEqual(0, result.additional_properties[0].value.index)
 
+    def testUnrecognizedEnum(self):
+        json_msg = '{"input": "VALUE_ONE"}'
+        result = encoding.JsonToMessage(
+            UnrecognizedEnumMessage, json_msg)
+        self.assertEqual(1, len(result.additional_properties))
+        self.assertEqual(UnrecognizedEnumMessage.ThisEnum.VALUE_ONE,
+                         result.additional_properties[0].value)
+
     def testNestedFieldMapping(self):
         nested_msg = AdditionalPropertiesMessage()
         nested_msg.additional_properties = [
@@ -342,3 +378,33 @@
             'TimeMessage(\n    '
             'timefield=datetime.datetime(2014, 7, 2, 23, 33, 25, 541000, '
             'tzinfo=TimeZoneOffset(datetime.timedelta(0))),\n)')
+
+    def testPackageMappingsNoPackage(self):
+        this_module_name = util.get_package_for_module(__name__)
+        full_type_name = 'MessageWithEnum.ThisEnum'
+        full_key = '%s.%s' % (this_module_name, full_type_name)
+        self.assertEqual(full_key,
+                         encoding._GetTypeKey(MessageWithEnum.ThisEnum, ''))
+
+    def testPackageMappingsWithPackage(self):
+        this_module_name = util.get_package_for_module(__name__)
+        full_type_name = 'MessageWithEnum.ThisEnum'
+        full_key = '%s.%s' % (this_module_name, full_type_name)
+        this_module = sys.modules[__name__]
+        new_package = 'new_package'
+        try:
+            setattr(this_module, 'package', new_package)
+            new_key = '%s.%s' % (new_package, full_type_name)
+            self.assertEqual(
+                new_key,
+                encoding._GetTypeKey(MessageWithEnum.ThisEnum, ''))
+            self.assertEqual(
+                full_key,
+                encoding._GetTypeKey(MessageWithEnum.ThisEnum, new_package))
+        finally:
+            delattr(this_module, 'package')
+
+    def testRepeatedJsonValuesAsRepeatedProperty(self):
+        encoded_msg = '{"a": [{"one": 1}]}'
+        msg = encoding.JsonToMessage(RepeatedJsonValueMessage, encoded_msg)
+        self.assertEqual(encoded_msg, encoding.MessageToJson(msg))
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/exceptions.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/exceptions.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/extra_types.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/extra_types.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/http_wrapper.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/http_wrapper.py
old mode 100755
new mode 100644
index 94c7e32..03a094d
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/http_wrapper.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/http_wrapper.py
@@ -47,7 +47,8 @@
 # exc: Exception being raised.
 # num_retries: Number of retries consumed; used for exponential backoff.
 ExceptionRetryArgs = collections.namedtuple(
-    'ExceptionRetryArgs', ['http', 'http_request', 'exc', 'num_retries'])
+    'ExceptionRetryArgs', ['http', 'http_request', 'exc', 'num_retries',
+                           'max_retry_wait'])
 
 
 @contextlib.contextmanager
@@ -276,10 +277,12 @@
     logging.debug('Retrying request to url %s after exception %s',
                   retry_args.http_request.url, retry_args.exc)
     time.sleep(
-        retry_after or util.CalculateWaitForRetry(retry_args.num_retries))
+        retry_after or util.CalculateWaitForRetry(
+            retry_args.num_retries, max_wait=retry_args.max_retry_wait))
 
 
-def MakeRequest(http, http_request, retries=7, redirections=5,
+def MakeRequest(http, http_request, retries=7, max_retry_wait=60,
+                redirections=5,
                 retry_func=HandleExceptionsAndRebuildHttpConnections,
                 check_response_func=CheckResponse):
     """Send http_request via the given http, performing error/retry handling.
@@ -288,7 +291,10 @@
       http: An httplib2.Http instance, or a http multiplexer that delegates to
           an underlying http, for example, HTTPMultiplexer.
       http_request: A Request to send.
-      retries: (int, default 5) Number of retries to attempt on 5XX replies.
+      retries: (int, default 7) Number of retries to attempt on retryable
+          replies (such as 429 or 5XX).
+      max_retry_wait: (int, default 60) Maximum number of seconds to wait
+          when retrying.
       redirections: (int, default 5) Number of redirects to follow.
       retry_func: Function to handle retries on exceptions. Arguments are
           (Httplib2.Http, Request, Exception, int num_retries).
@@ -315,7 +321,8 @@
             if retry >= retries:
                 raise
             else:
-                retry_func(ExceptionRetryArgs(http, http_request, e, retry))
+                retry_func(ExceptionRetryArgs(
+                    http, http_request, e, retry, max_retry_wait))
 
 
 def _MakeRequestNoRetry(http, http_request, redirections=5,
@@ -365,5 +372,16 @@
     return response
 
 
+_HTTP_FACTORIES = []
+
+
+def _RegisterHttpFactory(factory):
+    _HTTP_FACTORIES.append(factory)
+
+
 def GetHttp(**kwds):
+    for factory in _HTTP_FACTORIES:
+        http = factory(**kwds)
+        if http is not None:
+            return http
     return httplib2.Http(**kwds)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/list_pager.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/list_pager.py
old mode 100755
new mode 100644
index cf90389..85c2594
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/list_pager.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/list_pager.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """A helper function that executes a series of List queries for many APIs."""
 
-import copy
+from apitools.base.py import encoding
 
 __all__ = [
     'YieldFromList',
@@ -41,7 +41,7 @@
       protorpc.message.Message, The resources listed by the service.
 
     """
-    request = copy.deepcopy(request)
+    request = encoding.CopyProtoMessage(request)
     setattr(request, batch_size_attribute, batch_size)
     setattr(request, current_token_attribute, None)
     while limit is None or limit:
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/stream_slice.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/stream_slice.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/testing/testclient/fusiontables_v1_messages.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/testing/testclient/fusiontables_v1_messages.py
index fd727a0..68284b5 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/testing/testclient/fusiontables_v1_messages.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/testing/testclient/fusiontables_v1_messages.py
@@ -4,13 +4,13 @@
 """
 # NOTE: This file is autogenerated and should not be edited by hand.
 
-from protorpc import messages
+from protorpc import messages as _messages
 
 
 package = 'fusiontables'
 
 
-class Column(messages.Message):
+class Column(_messages.Message):
 
     """Specifies the id, name and type of a column in a table.
 
@@ -33,7 +33,7 @@
 
     """
 
-    class BaseColumnValue(messages.Message):
+    class BaseColumnValue(_messages.Message):
 
         """Optional identifier of the base column. If present, this column is
         derived from the specified base column.
@@ -46,19 +46,19 @@
 
         """
 
-        columnId = messages.IntegerField(1, variant=messages.Variant.INT32)
-        tableIndex = messages.IntegerField(2, variant=messages.Variant.INT32)
+        columnId = _messages.IntegerField(1, variant=_messages.Variant.INT32)
+        tableIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32)
 
-    baseColumn = messages.MessageField('BaseColumnValue', 1)
-    columnId = messages.IntegerField(2, variant=messages.Variant.INT32)
-    description = messages.StringField(3)
-    graph_predicate = messages.StringField(4)
-    kind = messages.StringField(5, default=u'fusiontables#column')
-    name = messages.StringField(6)
-    type = messages.StringField(7)
+    baseColumn = _messages.MessageField('BaseColumnValue', 1)
+    columnId = _messages.IntegerField(2, variant=_messages.Variant.INT32)
+    description = _messages.StringField(3)
+    graph_predicate = _messages.StringField(4)
+    kind = _messages.StringField(5, default=u'fusiontables#column')
+    name = _messages.StringField(6)
+    type = _messages.StringField(7)
 
 
-class ColumnList(messages.Message):
+class ColumnList(_messages.Message):
 
     """Represents a list of columns in a table.
 
@@ -71,13 +71,13 @@
 
     """
 
-    items = messages.MessageField('Column', 1, repeated=True)
-    kind = messages.StringField(2, default=u'fusiontables#columnList')
-    nextPageToken = messages.StringField(3)
-    totalItems = messages.IntegerField(4, variant=messages.Variant.INT32)
+    items = _messages.MessageField('Column', 1, repeated=True)
+    kind = _messages.StringField(2, default=u'fusiontables#columnList')
+    nextPageToken = _messages.StringField(3)
+    totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
 
 
-class FusiontablesColumnListRequest(messages.Message):
+class FusiontablesColumnListRequest(_messages.Message):
 
     """A FusiontablesColumnListRequest object.
 
@@ -88,12 +88,12 @@
       tableId: Table whose columns are being listed.
     """
 
-    maxResults = messages.IntegerField(1, variant=messages.Variant.UINT32)
-    pageToken = messages.StringField(2)
-    tableId = messages.StringField(3, required=True)
+    maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
+    pageToken = _messages.StringField(2)
+    tableId = _messages.StringField(3, required=True)
 
 
-class FusiontablesColumnListAlternateRequest(messages.Message):
+class FusiontablesColumnListAlternateRequest(_messages.Message):
 
     """A FusiontablesColumnListRequest object.
 
@@ -104,12 +104,12 @@
       tableId: Table whose columns are being listed.
     """
 
-    pageSize = messages.IntegerField(1, variant=messages.Variant.UINT32)
-    pageToken = messages.StringField(2)
-    tableId = messages.StringField(3, required=True)
+    pageSize = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
+    pageToken = _messages.StringField(2)
+    tableId = _messages.StringField(3, required=True)
 
 
-class ColumnListAlternate(messages.Message):
+class ColumnListAlternate(_messages.Message):
 
     """Represents a list of columns in a table.
 
@@ -122,7 +122,7 @@
 
     """
 
-    columns = messages.MessageField('Column', 1, repeated=True)
-    kind = messages.StringField(2, default=u'fusiontables#columnList')
-    nextPageToken = messages.StringField(3)
-    totalItems = messages.IntegerField(4, variant=messages.Variant.INT32)
+    columns = _messages.MessageField('Column', 1, repeated=True)
+    kind = _messages.StringField(2, default=u'fusiontables#columnList')
+    nextPageToken = _messages.StringField(3)
+    totalItems = _messages.IntegerField(4, variant=_messages.Variant.INT32)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer.py
old mode 100755
new mode 100644
index 35a4774..aba55ca
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer.py
@@ -331,7 +331,7 @@
         else:
             if start < 0:
                 start = max(0, start + self.total_size)
-            return start, self.total_size
+            return start, self.total_size - 1
 
     def __SetRangeHeader(self, request, start, end=None):
         if start < 0:
@@ -364,12 +364,24 @@
 
         """
         end_byte = end
+
+        if start < 0 and not self.total_size:
+            return end_byte
+
         if use_chunks:
             alternate = start + self.chunksize - 1
-            end_byte = min(end_byte, alternate) if end_byte else alternate
+            if end_byte is not None:
+                end_byte = min(end_byte, alternate)
+            else:
+                end_byte = alternate
+
         if self.total_size:
             alternate = self.total_size - 1
-            end_byte = min(end_byte, alternate) if end_byte else alternate
+            if end_byte is not None:
+                end_byte = min(end_byte, alternate)
+            else:
+                end_byte = alternate
+
         return end_byte
 
     def __GetChunk(self, start, end, additional_headers=None):
@@ -434,22 +446,24 @@
         self.EnsureInitialized()
         progress_end_normalized = False
         if self.total_size is not None:
-            progress, end = self.__NormalizeStartEnd(start, end)
+            progress, end_byte = self.__NormalizeStartEnd(start, end)
             progress_end_normalized = True
         else:
             progress = start
-        while not progress_end_normalized or progress < end:
-            end_byte = self.__ComputeEndByte(progress, end=end,
+            end_byte = end
+        while (not progress_end_normalized or end_byte is None or
+               progress <= end_byte):
+            end_byte = self.__ComputeEndByte(progress, end=end_byte,
                                              use_chunks=use_chunks)
             response = self.__GetChunk(progress, end_byte,
                                        additional_headers=additional_headers)
             if not progress_end_normalized:
                 self.__SetTotal(response.info)
-                progress, end = self.__NormalizeStartEnd(start, end)
+                progress, end_byte = self.__NormalizeStartEnd(start, end)
                 progress_end_normalized = True
             response = self.__ProcessResponse(response)
             progress += response.length
-            if not response:
+            if response.length == 0:
                 raise exceptions.TransferRetryError(
                     'Zero bytes unexpectedly returned in download response')
 
@@ -659,6 +673,8 @@
         Returns:
           None.
         """
+        if upload_config.resumable_path is None:
+            self.strategy = SIMPLE_UPLOAD
         if self.strategy is not None:
             return
         strategy = SIMPLE_UPLOAD
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer_test.py
index 9d58b1e..8e29f12 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/transfer_test.py
@@ -76,6 +76,36 @@
                              download._Download__ComputeEndByte(start),
                              msg='Failed on start={0}'.format(start))
 
+    def testGetRange(self):
+        for (start_byte, end_byte) in [(0, 25), (5, 15), (0, 0), (25, 25)]:
+            bytes_http = object()
+            http = object()
+            download_stream = six.StringIO()
+            download = transfer.Download.FromStream(download_stream,
+                                                    total_size=26,
+                                                    auto_transfer=False)
+            download.bytes_http = bytes_http
+            base_url = 'https://part.one/'
+            with mock.patch.object(http_wrapper, 'MakeRequest',
+                                   autospec=True) as make_request:
+                make_request.return_value = http_wrapper.Response(
+                    info={
+                        'content-range': 'bytes %d-%d/26' %
+                                         (start_byte, end_byte),
+                        'status': http_client.OK,
+                    },
+                    content=string.ascii_lowercase[start_byte:end_byte+1],
+                    request_url=base_url,
+                )
+                request = http_wrapper.Request(url='https://part.one/')
+                download.InitializeDownload(request, http=http)
+                download.GetRange(start_byte, end_byte)
+                self.assertEqual(1, make_request.call_count)
+                received_request = make_request.call_args[0][1]
+                self.assertEqual(base_url, received_request.url)
+                self.assertRangeAndContentRangeCompatible(
+                    received_request, make_request.return_value)
+
     def testNonChunkedDownload(self):
         bytes_http = object()
         http = object()
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util.py
old mode 100755
new mode 100644
index 779dd97..b92c9f8
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util.py
@@ -126,20 +126,17 @@
 
     Args:
       retry_attempt: Retry attempt counter.
-      max_wait: Upper bound for wait time.
+      max_wait: Upper bound for wait time [seconds].
 
     Returns:
-      Amount of time to wait before retrying request.
+      Number of seconds to wait before retrying request.
 
     """
 
     wait_time = 2 ** retry_attempt
-    # randrange requires a nonzero interval, so we want to drop it if
-    # the range is too small for jitter.
-    if retry_attempt:
-        max_jitter = (2 ** retry_attempt) / 2
-        wait_time += random.randrange(-max_jitter, max_jitter)
-    return min(wait_time, max_wait)
+    max_jitter = wait_time / 4.0
+    wait_time += random.uniform(-max_jitter, max_jitter)
+    return max(1, min(wait_time, max_wait))
 
 
 def AcceptableMimeType(accept_patterns, mime_type):
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util_test.py
index 54dc3e1..6cb551d 100644
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/util_test.py
@@ -56,14 +56,23 @@
             method_config_no_reserved, {'x': 'foo/:bar:'}))
 
     def testCalculateWaitForRetry(self):
-        self.assertTrue(util.CalculateWaitForRetry(1) in range(1, 4))
-        self.assertTrue(util.CalculateWaitForRetry(2) in range(2, 7))
-        self.assertTrue(util.CalculateWaitForRetry(3) in range(4, 13))
-        self.assertTrue(util.CalculateWaitForRetry(4) in range(8, 25))
+        try0 = util.CalculateWaitForRetry(0)
+        self.assertTrue(try0 >= 1.0)
+        self.assertTrue(try0 <= 1.5)
+        try1 = util.CalculateWaitForRetry(1)
+        self.assertTrue(try1 >= 1.0)
+        self.assertTrue(try1 <= 3.0)
+        try2 = util.CalculateWaitForRetry(2)
+        self.assertTrue(try2 >= 2.0)
+        self.assertTrue(try2 <= 6.0)
+        try3 = util.CalculateWaitForRetry(3)
+        self.assertTrue(try3 >= 4.0)
+        self.assertTrue(try3 <= 12.0)
+        try4 = util.CalculateWaitForRetry(4)
+        self.assertTrue(try4 >= 8.0)
+        self.assertTrue(try4 <= 24.0)
 
-        self.assertEquals(10, util.CalculateWaitForRetry(5, max_wait=10))
-
-        self.assertGreater(util.CalculateWaitForRetry(0), 0)
+        self.assertAlmostEqual(10, util.CalculateWaitForRetry(5, max_wait=10))
 
     def testTypecheck(self):
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/data/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/data/__init__.py
new file mode 100644
index 0000000..54fa3d5
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/data/__init__.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+"""Shared __init__.py for apitools."""
+
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/data/apitools_client_secrets.json b/catapult/third_party/gsutil/third_party/apitools/apitools/data/apitools_client_secrets.json
new file mode 100644
index 0000000..5761d14
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/data/apitools_client_secrets.json
@@ -0,0 +1,15 @@
+{
+  "installed": {
+    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+    "client_email": "",
+    "client_id": "1042881264118.apps.googleusercontent.com",
+    "client_secret": "x_Tw5K8nnjoRAqULM9PFAC2b",
+    "client_x509_cert_url": "",
+    "redirect_uris": [
+      "urn:ietf:wg:oauth:2.0:oob",
+      "oob"
+    ],
+    "token_uri": "https://accounts.google.com/o/oauth2/token"
+  }
+}
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/command_registry.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/command_registry.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py
old mode 100755
new mode 100644
index abff599..5274100
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py
@@ -149,9 +149,11 @@
     proto_printer.PrintPreamble(package, version, file_descriptor)
     _PrintEnums(proto_printer, file_descriptor.enum_types)
     _PrintMessages(proto_printer, file_descriptor.message_types)
-    custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types)
+    custom_json_mappings = _FetchCustomMappings(
+        file_descriptor.enum_types, file_descriptor.package)
     custom_json_mappings.extend(
-        _FetchCustomMappings(file_descriptor.message_types))
+        _FetchCustomMappings(
+            file_descriptor.message_types, file_descriptor.package))
     for mapping in custom_json_mappings:
         proto_printer.PrintCustomJsonMapping(mapping)
 
@@ -183,29 +185,31 @@
                         printer(line)
 
 
-def _FetchCustomMappings(descriptor_ls):
+def _FetchCustomMappings(descriptor_ls, package):
     """Find and return all custom mappings for descriptors in descriptor_ls."""
     custom_mappings = []
     for descriptor in descriptor_ls:
         if isinstance(descriptor, ExtendedEnumDescriptor):
             custom_mappings.extend(
-                _FormatCustomJsonMapping('Enum', m, descriptor)
+                _FormatCustomJsonMapping('Enum', m, descriptor, package)
                 for m in descriptor.enum_mappings)
         elif isinstance(descriptor, ExtendedMessageDescriptor):
             custom_mappings.extend(
-                _FormatCustomJsonMapping('Field', m, descriptor)
+                _FormatCustomJsonMapping('Field', m, descriptor, package)
                 for m in descriptor.field_mappings)
-            custom_mappings.extend(_FetchCustomMappings(descriptor.enum_types))
             custom_mappings.extend(
-                _FetchCustomMappings(descriptor.message_types))
+                _FetchCustomMappings(descriptor.enum_types, package))
+            custom_mappings.extend(
+                _FetchCustomMappings(descriptor.message_types, package))
     return custom_mappings
 
 
-def _FormatCustomJsonMapping(mapping_type, mapping, descriptor):
+def _FormatCustomJsonMapping(mapping_type, mapping, descriptor, package):
     return '\n'.join((
         'encoding.AddCustomJson%sMapping(' % mapping_type,
-        "    %s, '%s', '%s')" % (descriptor.full_name, mapping.python_name,
-                                 mapping.json_name)
+        "    %s, '%s', '%s'," % (descriptor.full_name, mapping.python_name,
+                                 mapping.json_name),
+        '    package=%r)' % package,
     ))
 
 
@@ -364,7 +368,7 @@
         self.__printer('"""')
 
     def PrintEnum(self, enum_type):
-        self.__printer('class %s(messages.Enum):', enum_type.name)
+        self.__printer('class %s(_messages.Enum):', enum_type.name)
         with self.__printer.Indent():
             self.__PrintEnumDocstringLines(enum_type)
             enum_values = sorted(
@@ -440,7 +444,7 @@
             return
         for decorator in message_type.decorators:
             self.__printer('@%s', decorator)
-        self.__printer('class %s(messages.Message):', message_type.name)
+        self.__printer('class %s(_messages.Message):', message_type.name)
         with self.__printer.Indent():
             self.__PrintMessageDocstringLines(message_type)
             _PrintEnums(self, message_type.enum_types)
@@ -476,7 +480,7 @@
         field = extended_field.field_descriptor
         printed_field_info = {
             'name': field.name,
-            'module': 'messages',
+            'module': '_messages',
             'type_name': '',
             'type_format': '',
             'number': field.number,
@@ -487,7 +491,7 @@
 
         message_field = _MESSAGE_FIELD_MAP.get(field.type_name)
         if message_field:
-            printed_field_info['module'] = 'message_types'
+            printed_field_info['module'] = '_message_types'
             field_type = message_field
         elif field.type_name == 'extra_types.DateField':
             printed_field_info['module'] = 'extra_types'
@@ -506,7 +510,7 @@
 
         if field_type.DEFAULT_VARIANT != field.variant:
             printed_field_info['variant_format'] = (
-                ', variant=messages.Variant.%s' % field.variant)
+                ', variant=_messages.Variant.%s' % field.variant)
 
         if field.default_value:
             if field_type in [messages.BytesField, messages.StringField]:
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client.py
old mode 100755
new mode 100644
index 7b7b648..a8e32f7
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client.py
@@ -103,16 +103,29 @@
         out.write(src_data)
 
 
+_DISCOVERY_DOC = None
+
+
+def _GetDiscoveryDocFromFlags():
+    """Get the discovery doc from flags."""
+    global _DISCOVERY_DOC  # pylint: disable=global-statement
+    if _DISCOVERY_DOC is None:
+        if FLAGS.discovery_url:
+            try:
+                discovery_doc = util.FetchDiscoveryDoc(FLAGS.discovery_url)
+            except exceptions.CommunicationError:
+                raise exceptions.GeneratedClientError(
+                    'Could not fetch discovery doc')
+        else:
+            infile = os.path.expanduser(FLAGS.infile) or '/dev/stdin'
+            discovery_doc = json.load(open(infile))
+        _DISCOVERY_DOC = discovery_doc
+    return _DISCOVERY_DOC
+
+
 def _GetCodegenFromFlags():
     """Create a codegen object from flags."""
-    if FLAGS.discovery_url:
-        try:
-            discovery_doc = util.FetchDiscoveryDoc(FLAGS.discovery_url)
-        except exceptions.CommunicationError:
-            return None
-    else:
-        infile = os.path.expanduser(FLAGS.infile) or '/dev/stdin'
-        discovery_doc = json.load(open(infile))
+    discovery_doc = _GetDiscoveryDocFromFlags()
     names = util.Names(
         FLAGS.strip_prefix,
         FLAGS.experimental_name_convention,
@@ -122,7 +135,7 @@
         try:
             with open(FLAGS.client_json) as client_json:
                 f = json.loads(client_json.read())
-                web = f.get('web', {})
+                web = f.get('installed', f.get('web', {}))
                 client_id = web.get('client_id')
                 client_secret = web.get('client_secret')
         except IOError:
@@ -148,9 +161,10 @@
         raise exceptions.ConfigurationValueError(
             'Output directory exists, pass --overwrite to replace '
             'the existing files.')
+    if not os.path.exists(outdir):
+        os.makedirs(outdir)
 
-    root_package = FLAGS.root_package or util.GetPackage(
-        outdir)  # pylint: disable=line-too-long
+    root_package = FLAGS.root_package or util.GetPackage(outdir)
     return gen_client_lib.DescriptorGenerator(
         discovery_doc, client_info, names, root_package, outdir,
         base_package=FLAGS.base_package,
@@ -169,6 +183,11 @@
         _CopyLocalFile('exceptions.py')
 
 
+def _WriteIntermediateInit(codegen):
+    with open('__init__.py', 'w') as out:
+        codegen.WriteIntermediateInit(out)
+
+
 def _WriteProtoFiles(codegen):
     with util.Chdir(codegen.outdir):
         with open(codegen.client_info.messages_proto_file_name, 'w') as out:
@@ -197,6 +216,11 @@
             codegen.WriteInit(out)
 
 
+def _WriteSetupPy(codegen):
+    with open('setup.py', 'w') as out:
+        codegen.WriteSetupPy(out)
+
+
 class GenerateClient(appcommands.Cmd):
 
     """Driver for client code generation."""
@@ -211,6 +235,33 @@
         _WriteInit(codegen)
 
 
+class GeneratePipPackage(appcommands.Cmd):
+
+    """Generate a client as a pip-installable tarball."""
+
+    def Run(self, _):
+        """Create a client in a pip package."""
+        discovery_doc = _GetDiscoveryDocFromFlags()
+        package = discovery_doc['name']
+        original_outdir = os.path.expanduser(FLAGS.outdir)
+        FLAGS.outdir = os.path.join(
+            FLAGS.outdir, 'apitools/clients/%s' % package)
+        FLAGS.root_package = 'apitools.clients.%s' % package
+        FLAGS.generate_cli = False
+        codegen = _GetCodegenFromFlags()
+        if codegen is None:
+            logging.error('Failed to create codegen, exiting.')
+            return 1
+        _WriteGeneratedFiles(codegen)
+        _WriteInit(codegen)
+        with util.Chdir(original_outdir):
+            _WriteSetupPy(codegen)
+            with util.Chdir('apitools'):
+                _WriteIntermediateInit(codegen)
+                with util.Chdir('clients'):
+                    _WriteIntermediateInit(codegen)
+
+
 class GenerateProto(appcommands.Cmd):
 
     """Generate just the two proto files for a given API."""
@@ -247,6 +298,7 @@
 
 def main(_):
     appcommands.AddCmd('client', GenerateClient)
+    appcommands.AddCmd('pip_package', GeneratePipPackage)
     appcommands.AddCmd('proto', GenerateProto)
 
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client_lib.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client_lib.py
old mode 100755
new mode 100644
index f9feb77..c7f4bee
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client_lib.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/gen_client_lib.py
@@ -5,6 +5,8 @@
   https://developers.google.com/discovery/v1/reference/apis#resource
 """
 
+import datetime
+
 from six.moves import urllib_parse
 
 from apitools.base.py import base_cli
@@ -56,6 +58,7 @@
             self.__discovery_doc.get('description', ''))
         self.__package = self.__client_info.package
         self.__version = self.__client_info.version
+        self.__revision = discovery_doc.get('revision', '1')
         self.__generate_cli = generate_cli
         self.__root_package = root_package
         self.__base_files_package = base_package
@@ -133,6 +136,10 @@
         return self.__outdir
 
     @property
+    def package(self):
+        return self.__package
+
+    @property
     def use_proto2(self):
         return self.__use_proto2
 
@@ -150,16 +157,88 @@
         printer('import pkgutil')
         printer()
         printer('from %s import *', self.__base_files_package)
+        if self.__root_package == '.':
+            import_prefix = ''
+        else:
+            import_prefix = '%s.' % self.__root_package
         if self.__generate_cli:
-            printer('from %s.%s import *',
-                    self.__root_package, self.__client_info.cli_rule_name)
-        printer('from %s.%s import *',
-                self.__root_package, self.__client_info.client_rule_name)
-        printer('from %s.%s import *',
-                self.__root_package, self.__client_info.messages_rule_name)
+            printer('from %s%s import *',
+                    import_prefix, self.__client_info.cli_rule_name)
+        printer('from %s%s import *',
+                import_prefix, self.__client_info.client_rule_name)
+        printer('from %s%s import *',
+                import_prefix, self.__client_info.messages_rule_name)
         printer()
         printer('__path__ = pkgutil.extend_path(__path__, __name__)')
 
+    def WriteIntermediateInit(self, out):
+        """Write a simple __init__.py for an intermediate directory."""
+        printer = self._GetPrinter(out)
+        printer('#!/usr/bin/env python')
+        printer('"""Shared __init__.py for apitools."""')
+        printer()
+        printer('from pkgutil import extend_path')
+        printer('__path__ = extend_path(__path__, __name__)')
+
+    def WriteSetupPy(self, out):
+        """Write a setup.py for upload to PyPI."""
+        printer = self._GetPrinter(out)
+        year = datetime.datetime.now().year
+        printer('# Copyright %s Google Inc. All Rights Reserved.' % year)
+        printer('#')
+        printer('# Licensed under the Apache License, Version 2.0 (the'
+                '"License");')
+        printer('# you may not use this file except in compliance with '
+                'the License.')
+        printer('# You may obtain a copy of the License at')
+        printer('#')
+        printer('#   http://www.apache.org/licenses/LICENSE-2.0')
+        printer('#')
+        printer('# Unless required by applicable law or agreed to in writing, '
+                'software')
+        printer('# distributed under the License is distributed on an "AS IS" '
+                'BASIS,')
+        printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either '
+                'express or implied.')
+        printer('# See the License for the specific language governing '
+                'permissions and')
+        printer('# limitations under the License.')
+        printer()
+        printer('import setuptools')
+        printer('REQUIREMENTS = [')
+        with printer.Indent(indent='    '):
+            # TODO(craigcitro): Have this track apitools' version.
+            printer('"google-apitools>=0.4.8",')
+            printer('"httplib2>=0.9",')
+            printer('"oauth2client>=1.4.12",')
+            printer('"protorpc>=0.10.0",')
+        printer(']')
+        printer('_PACKAGE = "apitools.clients.%s"' % self.__package)
+        printer()
+        printer('setuptools.setup(')
+        # TODO(craigcitro): Allow customization of these options.
+        with printer.Indent(indent='    '):
+            printer('name="google-apitools-%s-%s",',
+                    self.__package, self.__version)
+            printer('version="0.4.%s",', self.__revision)
+            printer('description="Autogenerated apitools library for %s",' % (
+                self.__package,))
+            printer('url="https://github.com/google/apitools",')
+            printer('author="Craig Citro",')
+            printer('author_email="craigcitro@google.com",')
+            printer('packages=setuptools.find_packages(),')
+            printer('install_requires=REQUIREMENTS,')
+            printer('classifiers=[')
+            with printer.Indent(indent='    '):
+                printer('"Programming Language :: Python :: 2.7",')
+                printer('"License :: OSI Approved :: Apache Software '
+                        'License",')
+            printer('],')
+            printer('license="Apache 2.0",')
+            printer('keywords="apitools apitools-%s %s",' % (
+                self.__package, self.__package))
+        printer(')')
+
     def WriteMessagesFile(self, out):
         self.__message_registry.WriteFile(self._GetPrinter(out))
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/message_registry.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/message_registry.py
old mode 100755
new mode 100644
index 4a7a3e9..d467e9b
--- a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/message_registry.py
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/message_registry.py
@@ -73,7 +73,7 @@
             package=self.__package, description=self.__description)
         # Add required imports
         self.__file_descriptor.additional_imports = [
-            'from protorpc import messages',
+            'from protorpc import messages as _messages',
         ]
         # Map from scoped names (i.e. Foo.Bar) to MessageDescriptors.
         self.__message_registry = collections.OrderedDict()
@@ -394,7 +394,8 @@
                     attrs['format'], type_name))
             if (type_info.type_name.startswith('protorpc.message_types.') or
                     type_info.type_name.startswith('message_types.')):
-                self.__AddImport('from protorpc import message_types')
+                self.__AddImport(
+                    'from protorpc import message_types as _message_types')
             if type_info.type_name.startswith('extra_types.'):
                 self.__AddImport(
                     'from %s import extra_types' % self.__base_files_package)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/service_registry.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/service_registry.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/gen/util.py b/catapult/third_party/gsutil/third_party/apitools/apitools/gen/util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/__init__.py b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/__init__.py
new file mode 100644
index 0000000..54fa3d5
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/__init__.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+"""Shared __init__.py for apitools."""
+
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l.py b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l.py
new file mode 100755
index 0000000..44bb9bc
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l.py
@@ -0,0 +1,309 @@
+"""Command-line utility for fetching/inspecting credentials.
+
+oauth2l (pronounced "oauthtool") is a small utility for fetching
+credentials, or inspecting existing credentials. Here we demonstrate
+some sample use:
+
+    $ oauth2l fetch userinfo.email bigquery compute
+    Fetched credentials of type:
+      oauth2client.client.OAuth2Credentials
+    Access token:
+      ya29.abcdefghijklmnopqrstuvwxyz123yessirree
+    $ oauth2l header userinfo.email
+    Authorization: Bearer ya29.zyxwvutsrqpnmolkjihgfedcba
+    $ oauth2l validate thisisnotatoken
+    <exit status: 1>
+    $ oauth2l validate ya29.zyxwvutsrqpnmolkjihgfedcba
+    $ oauth2l scopes ya29.abcdefghijklmnopqrstuvwxyz123yessirree
+    https://www.googleapis.com/auth/bigquery
+    https://www.googleapis.com/auth/compute
+    https://www.googleapis.com/auth/userinfo.email
+
+The `header` command is designed to be easy to use with `curl`:
+
+    $ curl "$(oauth2l header bigquery)" \
+           'https://www.googleapis.com/bigquery/v2/projects'
+
+The token can also be printed in other formats, for easy chaining
+into other programs:
+
+    $ oauth2l fetch -f json_compact userinfo.email
+    <one-line JSON object with credential information>
+    $ oauth2l fetch -f bare drive
+    ya29.suchT0kenManyCredentialsW0Wokyougetthepoint
+
+"""
+
+import httplib
+import json
+import logging
+import os
+import pkgutil
+import sys
+import textwrap
+
+import gflags as flags
+from google.apputils import appcommands
+import oauth2client.client
+
+import apitools.base.py as apitools_base
+from apitools.base.py import cli as apitools_cli
+
+FLAGS = flags.FLAGS
+# We could use a generated client here, but it's used for precisely
+# one URL, with one parameter and no worries about URL encoding. Let's
+# go with simple.
+_OAUTH2_TOKENINFO_TEMPLATE = (
+    'https://www.googleapis.com/oauth2/v2/tokeninfo'
+    '?access_token={access_token}'
+)
+
+
+flags.DEFINE_string(
+    'client_secrets', '',
+    'If specified, use the client ID/secret from the named '
+    'file, which should be a client_secrets.json file as downloaded '
+    'from the Developer Console.')
+flags.DEFINE_string(
+    'credentials_filename', '',
+    '(optional) Filename for fetching/storing credentials.')
+flags.DEFINE_string(
+    'service_account_json_keyfile', '',
+    'Filename for a JSON service account key downloaded from the Developer '
+    'Console.')
+
+
+def GetDefaultClientInfo():
+    client_secrets = json.loads(pkgutil.get_data(
+        'apitools.data', 'apitools_client_secrets.json'))['installed']
+    return {
+        'client_id': client_secrets['client_id'],
+        'client_secret': client_secrets['client_secret'],
+        'user_agent': 'apitools/0.2 oauth2l/0.1',
+    }
+
+
+def GetClientInfoFromFlags():
+    """Fetch client info from FLAGS."""
+    if FLAGS.client_secrets:
+        client_secrets_path = os.path.expanduser(FLAGS.client_secrets)
+        if not os.path.exists(client_secrets_path):
+            raise ValueError('Cannot find file: %s' % FLAGS.client_secrets)
+        with open(client_secrets_path) as client_secrets_file:
+            client_secrets = json.load(client_secrets_file)
+        if 'installed' not in client_secrets:
+            raise ValueError('Provided client ID must be for an installed app')
+        client_secrets = client_secrets['installed']
+        return {
+            'client_id': client_secrets['client_id'],
+            'client_secret': client_secrets['client_secret'],
+            'user_agent': 'apitools/0.2 oauth2l/0.1',
+        }
+    else:
+        return GetDefaultClientInfo()
+
+
+def _ExpandScopes(scopes):
+    scope_prefix = 'https://www.googleapis.com/auth/'
+    return [s if s.startswith('https://') else scope_prefix + s
+            for s in scopes]
+
+
+def _PrettyJson(data):
+    return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
+
+
+def _CompactJson(data):
+    return json.dumps(data, sort_keys=True, separators=(',', ':'))
+
+
+def _Format(fmt, credentials):
+    """Format credentials according to fmt."""
+    if fmt == 'bare':
+        return credentials.access_token
+    elif fmt == 'header':
+        return 'Authorization: Bearer %s' % credentials.access_token
+    elif fmt == 'json':
+        return _PrettyJson(json.loads(credentials.to_json()))
+    elif fmt == 'json_compact':
+        return _CompactJson(json.loads(credentials.to_json()))
+    elif fmt == 'pretty':
+        format_str = textwrap.dedent('\n'.join([
+            'Fetched credentials of type:',
+            '  {credentials_type.__module__}.{credentials_type.__name__}',
+            'Access token:',
+            '  {credentials.access_token}',
+        ]))
+        return format_str.format(credentials=credentials,
+                                 credentials_type=type(credentials))
+    raise ValueError('Unknown format: {}'.format(fmt))
+
+_FORMATS = set(('bare', 'header', 'json', 'json_compact', 'pretty'))
+
+
+def _GetTokenScopes(access_token):
+    """Return the list of valid scopes for the given token as a list."""
+    url = _OAUTH2_TOKENINFO_TEMPLATE.format(access_token=access_token)
+    response = apitools_base.MakeRequest(
+        apitools_base.GetHttp(), apitools_base.Request(url))
+    if response.status_code not in [httplib.OK, httplib.BAD_REQUEST]:
+        raise apitools_base.HttpError.FromResponse(response)
+    if response.status_code == httplib.BAD_REQUEST:
+        return []
+    return json.loads(response.content)['scope'].split(' ')
+
+
+def _ValidateToken(access_token):
+    """Return True iff the provided access token is valid."""
+    return bool(_GetTokenScopes(access_token))
+
+
+def FetchCredentials(scopes, client_info=None, credentials_filename=None):
+    """Fetch a credential for the given client_info and scopes."""
+    client_info = client_info or GetClientInfoFromFlags()
+    scopes = _ExpandScopes(scopes)
+    if not scopes:
+        raise ValueError('No scopes provided')
+    credentials_filename = credentials_filename or FLAGS.credentials_filename
+    # TODO(craigcitro): Remove this logging nonsense once we quiet the
+    # spurious logging in oauth2client.
+    old_level = logging.getLogger().level
+    logging.getLogger().setLevel(logging.ERROR)
+    credentials = apitools_base.GetCredentials(
+        'oauth2l', scopes, credentials_filename=credentials_filename,
+        service_account_json_keyfile=FLAGS.service_account_json_keyfile,
+        oauth2client_args='', **client_info)
+    logging.getLogger().setLevel(old_level)
+    if not _ValidateToken(credentials.access_token):
+        credentials.refresh(apitools_base.GetHttp())
+    return credentials
+
+
+class _Email(apitools_cli.NewCmd):
+
+    """Get user email."""
+
+    usage = 'email <access_token>'
+
+    def RunWithArgs(self, access_token):
+        """Print the email address for this token, if possible."""
+        userinfo = apitools_base.GetUserinfo(
+            oauth2client.client.AccessTokenCredentials(access_token,
+                                                       'oauth2l/1.0'))
+        user_email = userinfo.get('email')
+        if user_email:
+            print user_email
+
+
+class _Fetch(apitools_cli.NewCmd):
+
+    """Fetch credentials."""
+
+    usage = 'fetch <scope> [<scope> ...]'
+
+    def __init__(self, name, flag_values):
+        super(_Fetch, self).__init__(name, flag_values)
+        flags.DEFINE_enum(
+            'credentials_format', 'pretty', sorted(_FORMATS),
+            'Output format for token.',
+            short_name='f', flag_values=flag_values)
+
+    def RunWithArgs(self, *scopes):
+        """Fetch a valid access token and display it."""
+        credentials = FetchCredentials(scopes)
+        print _Format(FLAGS.credentials_format.lower(), credentials)
+
+
+class _Header(apitools_cli.NewCmd):
+
+    """Print credentials for a header."""
+
+    usage = 'header <scope> [<scope> ...]'
+
+    def RunWithArgs(self, *scopes):
+        """Fetch a valid access token and display it formatted for a header."""
+        print _Format('header', FetchCredentials(scopes))
+
+
+class _Scopes(apitools_cli.NewCmd):
+
+    """Get the list of scopes for a token."""
+
+    usage = 'scopes <access_token>'
+
+    def RunWithArgs(self, access_token):
+        """Print the list of scopes for a valid token."""
+        scopes = _GetTokenScopes(access_token)
+        if not scopes:
+            return 1
+        for scope in sorted(scopes):
+            print scope
+
+
+class _Userinfo(apitools_cli.NewCmd):
+
+    """Get userinfo."""
+
+    usage = 'userinfo <access_token>'
+
+    def __init__(self, name, flag_values):
+        super(_Userinfo, self).__init__(name, flag_values)
+        flags.DEFINE_enum(
+            'format', 'json', sorted(('json', 'json_compact')),
+            'Output format for userinfo.',
+            short_name='f', flag_values=flag_values)
+
+    def RunWithArgs(self, access_token):
+        """Print the userinfo for this token (if we have the right scopes)."""
+        userinfo = apitools_base.GetUserinfo(
+            oauth2client.client.AccessTokenCredentials(access_token,
+                                                       'oauth2l/1.0'))
+        if FLAGS.format == 'json':
+            print _PrettyJson(userinfo)
+        else:
+            print _CompactJson(userinfo)
+
+
+class _Validate(apitools_cli.NewCmd):
+
+    """Validate a token."""
+
+    usage = 'validate <access_token>'
+
+    def RunWithArgs(self, access_token):
+        """Validate an access token. Exits with 0 if valid, 1 otherwise."""
+        return 1 - (_ValidateToken(access_token))
+
+
+def run_main():  # pylint:disable=invalid-name
+    """Function to be used as setuptools script entry point."""
+    # Put the flags for this module somewhere the flags module will look
+    # for them.
+
+    # pylint:disable=protected-access
+    new_name = flags._GetMainModule()
+    sys.modules[new_name] = sys.modules['__main__']
+    for flag in FLAGS.FlagsByModuleDict().get(__name__, []):
+        FLAGS._RegisterFlagByModule(new_name, flag)
+        for key_flag in FLAGS.KeyFlagsByModuleDict().get(__name__, []):
+            FLAGS._RegisterKeyFlagForModule(new_name, key_flag)
+    # pylint:enable=protected-access
+
+    # Now set __main__ appropriately so that appcommands will be
+    # happy.
+    sys.modules['__main__'] = sys.modules[__name__]
+    appcommands.Run()
+    sys.modules['__main__'] = sys.modules.pop(new_name)
+
+
+def main(unused_argv):
+    appcommands.AddCmd('email', _Email)
+    appcommands.AddCmd('fetch', _Fetch)
+    appcommands.AddCmd('header', _Header)
+    appcommands.AddCmd('scopes', _Scopes)
+    appcommands.AddCmd('userinfo', _Userinfo)
+    appcommands.AddCmd('validate', _Validate)
+
+
+if __name__ == '__main__':
+    appcommands.Run()
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l_test.py b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l_test.py
new file mode 100755
index 0000000..8f25b35
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/oauth2l_test.py
@@ -0,0 +1,356 @@
+"""Tests for oauth2l."""
+
+import json
+import os
+import sys
+
+import mock
+import oauth2client.client
+import six
+from six.moves import http_client
+import unittest2
+
+import apitools.base.py as apitools_base
+
+_OAUTH2L_MAIN_RUN = False
+
+if six.PY2:
+    import gflags as flags
+    from google.apputils import appcommands
+    from apitools.scripts import oauth2l
+    FLAGS = flags.FLAGS
+
+
+class _FakeResponse(object):
+
+    def __init__(self, status_code, scopes=None):
+        self.status_code = status_code
+        if self.status_code == http_client.OK:
+            self.content = json.dumps({'scope': ' '.join(scopes or [])})
+        else:
+            self.content = 'Error'
+            self.info = str(http_client.responses[self.status_code])
+            self.request_url = 'some-url'
+
+
+def _GetCommandOutput(t, command_name, command_argv):
+    global _OAUTH2L_MAIN_RUN  # pylint: disable=global-statement
+    if not _OAUTH2L_MAIN_RUN:
+        oauth2l.main(None)
+        _OAUTH2L_MAIN_RUN = True
+    command = appcommands.GetCommandByName(command_name)
+    if command is None:
+        t.fail('Unknown command: %s' % command_name)
+    orig_stdout = sys.stdout
+    new_stdout = six.StringIO()
+    try:
+        sys.stdout = new_stdout
+        command.CommandRun([command_name] + command_argv)
+    finally:
+        sys.stdout = orig_stdout
+        FLAGS.Reset()
+    new_stdout.seek(0)
+    return new_stdout.getvalue().rstrip()
+
+
+@unittest2.skipIf(six.PY3, 'oauth2l unsupported in python3')
+class TestTest(unittest2.TestCase):
+
+    def testOutput(self):
+        self.assertRaises(AssertionError,
+                          _GetCommandOutput, self, 'foo', [])
+
+
+@unittest2.skipIf(six.PY3, 'oauth2l unsupported in python3')
+class Oauth2lFormattingTest(unittest2.TestCase):
+
+    def setUp(self):
+        # Set up an access token to use
+        self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
+        self.user_agent = 'oauth2l/1.0'
+        self.credentials = oauth2client.client.AccessTokenCredentials(
+            self.access_token, self.user_agent)
+
+    def _Args(self, credentials_format):
+        return ['--credentials_format=' + credentials_format, 'userinfo.email']
+
+    def testFormatBare(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'fetch', self._Args('bare'))
+            self.assertEqual(self.access_token, output)
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testFormatHeader(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'fetch', self._Args('header'))
+            header = 'Authorization: Bearer %s' % self.access_token
+            self.assertEqual(header, output)
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testHeaderCommand(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'header', ['userinfo.email'])
+            header = 'Authorization: Bearer %s' % self.access_token
+            self.assertEqual(header, output)
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testFormatJson(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'fetch', self._Args('json'))
+            output_lines = [l.strip() for l in output.splitlines()]
+            expected_lines = [
+                '"_class": "AccessTokenCredentials",',
+                '"access_token": "%s",' % self.access_token,
+            ]
+            for line in expected_lines:
+                self.assertIn(line, output_lines)
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testFormatJsonCompact(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'fetch',
+                                       self._Args('json_compact'))
+            expected_clauses = [
+                '"_class":"AccessTokenCredentials",',
+                '"access_token":"%s",' % self.access_token,
+            ]
+            for clause in expected_clauses:
+                self.assertIn(clause, output)
+            self.assertEqual(1, len(output.splitlines()))
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testFormatPretty(self):
+        with mock.patch.object(oauth2l, 'FetchCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_credentials:
+            output = _GetCommandOutput(self, 'fetch', self._Args('pretty'))
+            expecteds = ['oauth2client.client.AccessTokenCredentials',
+                         self.access_token]
+            for expected in expecteds:
+                self.assertIn(expected, output)
+            self.assertEqual(1, mock_credentials.call_count)
+
+    def testFakeFormat(self):
+        self.assertRaises(ValueError,
+                          oauth2l._Format, 'xml', self.credentials)
+
+
+@unittest2.skipIf(six.PY3, 'oauth2l unsupported in python3')
+class TestFetch(unittest2.TestCase):
+
+    def setUp(self):
+        # Set up an access token to use
+        self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
+        self.user_agent = 'oauth2l/1.0'
+        self.credentials = oauth2client.client.AccessTokenCredentials(
+            self.access_token, self.user_agent)
+
+    def testNoScopes(self):
+        output = _GetCommandOutput(self, 'fetch', [])
+        self.assertEqual(
+            'Exception raised in fetch operation: No scopes provided',
+            output)
+
+    def testScopes(self):
+        expected_scopes = [
+            'https://www.googleapis.com/auth/userinfo.email',
+            'https://www.googleapis.com/auth/cloud-platform',
+        ]
+        with mock.patch.object(apitools_base, 'GetCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_fetch:
+            with mock.patch.object(oauth2l, '_GetTokenScopes',
+                                   return_value=expected_scopes,
+                                   autospec=True) as mock_get_scopes:
+                output = _GetCommandOutput(
+                    self, 'fetch', ['userinfo.email', 'cloud-platform'])
+                self.assertIn(self.access_token, output)
+                self.assertEqual(1, mock_fetch.call_count)
+                args, _ = mock_fetch.call_args
+                self.assertEqual(expected_scopes, args[-1])
+                self.assertEqual(1, mock_get_scopes.call_count)
+                self.assertEqual((self.access_token,),
+                                 mock_get_scopes.call_args[0])
+
+    def testCredentialsRefreshed(self):
+        with mock.patch.object(apitools_base, 'GetCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_fetch:
+            with mock.patch.object(oauth2l, '_ValidateToken',
+                                   return_value=False,
+                                   autospec=True) as mock_validate:
+                with mock.patch.object(self.credentials, 'refresh',
+                                       return_value=None,
+                                       autospec=True) as mock_refresh:
+                    output = _GetCommandOutput(self, 'fetch',
+                                               ['userinfo.email'])
+                    self.assertIn(self.access_token, output)
+                    self.assertEqual(1, mock_fetch.call_count)
+                    self.assertEqual(1, mock_validate.call_count)
+                    self.assertEqual(1, mock_refresh.call_count)
+
+    def testDefaultClientInfo(self):
+        with mock.patch.object(apitools_base, 'GetCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_fetch:
+            with mock.patch.object(oauth2l, '_ValidateToken',
+                                   return_value=True,
+                                   autospec=True) as mock_validate:
+                output = _GetCommandOutput(self, 'fetch', ['userinfo.email'])
+                self.assertIn(self.access_token, output)
+                self.assertEqual(1, mock_fetch.call_count)
+                _, kwargs = mock_fetch.call_args
+                self.assertEqual(
+                    '1042881264118.apps.googleusercontent.com',
+                    kwargs['client_id'])
+                self.assertEqual(1, mock_validate.call_count)
+
+    def testMissingClientSecrets(self):
+        try:
+            FLAGS.client_secrets = '/non/existent/file'
+            self.assertRaises(
+                ValueError,
+                oauth2l.GetClientInfoFromFlags)
+        finally:
+            FLAGS.Reset()
+
+    def testWrongClientSecretsFormat(self):
+        client_secrets_path = os.path.join(
+            os.path.dirname(__file__),
+            'testdata/noninstalled_client_secrets.json')
+        try:
+            FLAGS.client_secrets = client_secrets_path
+            self.assertRaises(
+                ValueError,
+                oauth2l.GetClientInfoFromFlags)
+        finally:
+            FLAGS.Reset()
+
+    def testCustomClientInfo(self):
+        client_secrets_path = os.path.join(
+            os.path.dirname(__file__), 'testdata/fake_client_secrets.json')
+        with mock.patch.object(apitools_base, 'GetCredentials',
+                               return_value=self.credentials,
+                               autospec=True) as mock_fetch:
+            with mock.patch.object(oauth2l, '_ValidateToken',
+                                   return_value=True,
+                                   autospec=True) as mock_validate:
+                fetch_args = [
+                    '--client_secrets=' + client_secrets_path,
+                    'userinfo.email']
+                output = _GetCommandOutput(self, 'fetch', fetch_args)
+                self.assertIn(self.access_token, output)
+                self.assertEqual(1, mock_fetch.call_count)
+                _, kwargs = mock_fetch.call_args
+                self.assertEqual('144169.apps.googleusercontent.com',
+                                 kwargs['client_id'])
+                self.assertEqual('awesomesecret',
+                                 kwargs['client_secret'])
+                self.assertEqual(1, mock_validate.call_count)
+
+
+@unittest2.skipIf(six.PY3, 'oauth2l unsupported in python3')
+class TestOtherCommands(unittest2.TestCase):
+
+    def setUp(self):
+        # Set up an access token to use
+        self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
+        self.user_agent = 'oauth2l/1.0'
+        self.credentials = oauth2client.client.AccessTokenCredentials(
+            self.access_token, self.user_agent)
+
+    def testEmail(self):
+        user_info = {'email': 'foo@example.com'}
+        with mock.patch.object(apitools_base, 'GetUserinfo',
+                               return_value=user_info,
+                               autospec=True) as mock_get_userinfo:
+            output = _GetCommandOutput(self, 'email', [self.access_token])
+            self.assertEqual(user_info['email'], output)
+            self.assertEqual(1, mock_get_userinfo.call_count)
+            self.assertEqual(self.access_token,
+                             mock_get_userinfo.call_args[0][0].access_token)
+
+    def testNoEmail(self):
+        with mock.patch.object(apitools_base, 'GetUserinfo',
+                               return_value={},
+                               autospec=True) as mock_get_userinfo:
+            output = _GetCommandOutput(self, 'email', [self.access_token])
+            self.assertEqual('', output)
+            self.assertEqual(1, mock_get_userinfo.call_count)
+
+    def testUserinfo(self):
+        user_info = {'email': 'foo@example.com'}
+        with mock.patch.object(apitools_base, 'GetUserinfo',
+                               return_value=user_info,
+                               autospec=True) as mock_get_userinfo:
+            output = _GetCommandOutput(self, 'userinfo', [self.access_token])
+            self.assertEqual(json.dumps(user_info, indent=4), output)
+            self.assertEqual(1, mock_get_userinfo.call_count)
+            self.assertEqual(self.access_token,
+                             mock_get_userinfo.call_args[0][0].access_token)
+
+    def testUserinfoCompact(self):
+        user_info = {'email': 'foo@example.com'}
+        with mock.patch.object(apitools_base, 'GetUserinfo',
+                               return_value=user_info,
+                               autospec=True) as mock_get_userinfo:
+            output = _GetCommandOutput(
+                self, 'userinfo', ['--format=json_compact', self.access_token])
+            self.assertEqual(json.dumps(user_info, separators=(',', ':')),
+                             output)
+            self.assertEqual(1, mock_get_userinfo.call_count)
+            self.assertEqual(self.access_token,
+                             mock_get_userinfo.call_args[0][0].access_token)
+
+    def testScopes(self):
+        scopes = [u'https://www.googleapis.com/auth/userinfo.email',
+                  u'https://www.googleapis.com/auth/cloud-platform']
+        response = _FakeResponse(http_client.OK, scopes=scopes)
+        with mock.patch.object(apitools_base, 'MakeRequest',
+                               return_value=response,
+                               autospec=True) as mock_make_request:
+            output = _GetCommandOutput(self, 'scopes', [self.access_token])
+            self.assertEqual(sorted(scopes), output.splitlines())
+            self.assertEqual(1, mock_make_request.call_count)
+
+    def testValidate(self):
+        scopes = [u'https://www.googleapis.com/auth/userinfo.email',
+                  u'https://www.googleapis.com/auth/cloud-platform']
+        response = _FakeResponse(http_client.OK, scopes=scopes)
+        with mock.patch.object(apitools_base, 'MakeRequest',
+                               return_value=response,
+                               autospec=True) as mock_make_request:
+            output = _GetCommandOutput(self, 'validate', [self.access_token])
+            self.assertEqual('', output)
+            self.assertEqual(1, mock_make_request.call_count)
+
+    def testBadResponseCode(self):
+        response = _FakeResponse(http_client.BAD_REQUEST)
+        with mock.patch.object(apitools_base, 'MakeRequest',
+                               return_value=response,
+                               autospec=True) as mock_make_request:
+            output = _GetCommandOutput(self, 'scopes', [self.access_token])
+            self.assertEqual('', output)
+            self.assertEqual(1, mock_make_request.call_count)
+
+    def testUnexpectedResponseCode(self):
+        response = _FakeResponse(http_client.INTERNAL_SERVER_ERROR)
+        with mock.patch.object(apitools_base, 'MakeRequest',
+                               return_value=response,
+                               autospec=True) as mock_make_request:
+            output = _GetCommandOutput(self, 'scopes', [self.access_token])
+            self.assertIn(str(http_client.responses[response.status_code]),
+                          output)
+            self.assertIn('Exception raised in scopes operation: HttpError',
+                          output)
+            self.assertEqual(1, mock_make_request.call_count)
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/fake_client_secrets.json b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/fake_client_secrets.json
new file mode 100644
index 0000000..f1fabe6
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/fake_client_secrets.json
@@ -0,0 +1,15 @@
+{
+  "installed": {
+    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+    "client_email": "",
+    "client_id": "144169.apps.googleusercontent.com",
+    "client_secret": "awesomesecret",
+    "client_x509_cert_url": "",
+    "redirect_uris": [
+      "urn:ietf:wg:oauth:2.0:oob",
+      "oob"
+    ],
+    "token_uri": "https://accounts.google.com/o/oauth2/token"
+  }
+}
diff --git a/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/noninstalled_client_secrets.json b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/noninstalled_client_secrets.json
new file mode 100644
index 0000000..6e67027
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/apitools/scripts/testdata/noninstalled_client_secrets.json
@@ -0,0 +1,3 @@
+{
+  "webapp": {}
+}
diff --git a/catapult/third_party/gsutil/third_party/apitools/default.pylintrc b/catapult/third_party/gsutil/third_party/apitools/default.pylintrc
index bdea83a..eb0266f 100644
--- a/catapult/third_party/gsutil/third_party/apitools/default.pylintrc
+++ b/catapult/third_party/gsutil/third_party/apitools/default.pylintrc
@@ -44,7 +44,9 @@
 
 [MESSAGES CONTROL]
 
+# TODO: remove cyclic-import.
 disable =
+    cyclic-import,
     fixme,
     import-error,
     locally-disabled,
@@ -55,12 +57,14 @@
     no-member,
     no-self-use,
     redefined-builtin,
+    redundant-keyword-arg,
     similarities,
     star-args,
     super-on-old-class,
     too-few-public-methods,
     too-many-arguments,
     too-many-branches,
+    too-many-function-args,
     too-many-instance-attributes,
     too-many-locals,
     too-many-public-methods,
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/downloads_test.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/downloads_test.py
index 2276d98..72a4821 100644
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/downloads_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/downloads_test.py
@@ -37,7 +37,9 @@
             self.__buffer, auto_transfer=auto_transfer)
 
     def __GetTestdataFileContents(self, filename):
-        file_contents = open('testdata/%s' % filename).read()
+        file_path = os.path.join(
+            os.path.dirname(__file__), self._TESTDATA_PREFIX, filename)
+        file_contents = open(file_path).read()
         self.assertIsNotNone(
             file_contents, msg=('Could not read file %s' % filename))
         return file_contents
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/generate_clients.sh b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/generate_clients.sh
new file mode 100644
index 0000000..5744480
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/generate_clients.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+gen_client --discovery_url=storage.v1 --overwrite --outdir=storage --root_package=. client
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/__init__.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/__init__.py
index 6426fac..2c8e598 100644
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/__init__.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/__init__.py
@@ -4,7 +4,6 @@
 import pkgutil
 
 from apitools.base.py import *
-from storage_v1 import *
 from storage_v1_client import *
 from storage_v1_messages import *
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py
old mode 100755
new mode 100644
index 97627e2..5b9e5a2
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py
@@ -1,7 +1,9 @@
 #!/usr/bin/env python
 """CLI for storage, version v1."""
+# NOTE: This file is autogenerated and should not be edited by hand.
 
 import code
+import os
 import platform
 import sys
 
@@ -33,8 +35,12 @@
       'File with interactive shell history.')
   flags.DEFINE_multistring(
       'add_header', [],
-      'Additional http headers (as key=value strings). Can be '
-      'specified multiple times.')
+      'Additional http headers (as key=value strings). '
+      'Can be specified multiple times.')
+  flags.DEFINE_string(
+      'service_account_json_keyfile', '',
+      'Filename for a JSON service account key downloaded'
+      ' from the Developer Console.')
   flags.DEFINE_enum(
       'alt',
       u'json',
@@ -109,10 +115,14 @@
   log_response = FLAGS.log_response or FLAGS.log_request_response
   api_endpoint = apitools_base.NormalizeApiEndpoint(FLAGS.api_endpoint)
   additional_http_headers = dict(x.split('=', 1) for x in FLAGS.add_header)
+  credentials_args = {
+      'service_account_json_keyfile': os.path.expanduser(FLAGS.service_account_json_keyfile)
+  }
   try:
     client = client_lib.StorageV1(
         api_endpoint, log_request=log_request,
         log_response=log_response,
+        credentials_args=credentials_args,
         additional_http_headers=additional_http_headers)
   except apitools_base.CredentialsError as e:
     print 'Error creating credentials: %s' % e
@@ -153,6 +163,948 @@
       return e.code
 
 
+class BucketAccessControlsDelete(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.Delete."""
+
+  usage = """bucketAccessControls_delete <bucket> <entity>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsDelete, self).__init__(name, fv)
+
+  def RunWithArgs(self, bucket, entity):
+    """Permanently deletes the ACL entry for the specified entity on the
+    specified bucket.
+
+    Args:
+      bucket: Name of a bucket.
+      entity: The entity holding the permission. Can be user-userId, user-
+        emailAddress, group-groupId, group-emailAddress, allUsers, or
+        allAuthenticatedUsers.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketAccessControlsDeleteRequest(
+        bucket=bucket.decode('utf8'),
+        entity=entity.decode('utf8'),
+        )
+    result = client.bucketAccessControls.Delete(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketAccessControlsGet(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.Get."""
+
+  usage = """bucketAccessControls_get <bucket> <entity>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsGet, self).__init__(name, fv)
+
+  def RunWithArgs(self, bucket, entity):
+    """Returns the ACL entry for the specified entity on the specified bucket.
+
+    Args:
+      bucket: Name of a bucket.
+      entity: The entity holding the permission. Can be user-userId, user-
+        emailAddress, group-groupId, group-emailAddress, allUsers, or
+        allAuthenticatedUsers.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketAccessControlsGetRequest(
+        bucket=bucket.decode('utf8'),
+        entity=entity.decode('utf8'),
+        )
+    result = client.bucketAccessControls.Get(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketAccessControlsInsert(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.Insert."""
+
+  usage = """bucketAccessControls_insert <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsInsert, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'domain',
+        None,
+        u'The domain associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'email',
+        None,
+        u'The email address associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'entity',
+        None,
+        u'The entity holding the permission, in one of the following forms:  '
+        u'- user-userId  - user-email  - group-groupId  - group-email  - '
+        u'domain-domain  - project-team-projectId  - allUsers  - '
+        u'allAuthenticatedUsers Examples:  - The user liz@example.com would '
+        u'be user-liz@example.com.  - The group example@googlegroups.com '
+        u'would be group-example@googlegroups.com.  - To refer to all members'
+        u' of the Google Apps for Business domain example.com, the entity '
+        u'would be domain-example.com.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'entityId',
+        None,
+        u'The ID for the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'etag',
+        None,
+        u'HTTP 1.1 Entity tag for the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'id',
+        None,
+        u'The ID of the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'kind',
+        u'storage#bucketAccessControl',
+        u'The kind of item this is. For bucket access control entries, this '
+        u'is always storage#bucketAccessControl.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'projectTeam',
+        None,
+        u'The project team associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'role',
+        None,
+        u'The access permission for the entity. Can be READER, WRITER, or '
+        u'OWNER.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'selfLink',
+        None,
+        u'The link to this access-control entry.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Creates a new ACL entry on the specified bucket.
+
+    Args:
+      bucket: The name of the bucket.
+
+    Flags:
+      domain: The domain associated with the entity, if any.
+      email: The email address associated with the entity, if any.
+      entity: The entity holding the permission, in one of the following
+        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
+        domain-domain  - project-team-projectId  - allUsers  -
+        allAuthenticatedUsers Examples:  - The user liz@example.com would be
+        user-liz@example.com.  - The group example@googlegroups.com would be
+        group-example@googlegroups.com.  - To refer to all members of the
+        Google Apps for Business domain example.com, the entity would be
+        domain-example.com.
+      entityId: The ID for the entity, if any.
+      etag: HTTP 1.1 Entity tag for the access-control entry.
+      id: The ID of the access-control entry.
+      kind: The kind of item this is. For bucket access control entries, this
+        is always storage#bucketAccessControl.
+      projectTeam: The project team associated with the entity, if any.
+      role: The access permission for the entity. Can be READER, WRITER, or
+        OWNER.
+      selfLink: The link to this access-control entry.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.BucketAccessControl(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['domain'].present:
+      request.domain = FLAGS.domain.decode('utf8')
+    if FLAGS['email'].present:
+      request.email = FLAGS.email.decode('utf8')
+    if FLAGS['entity'].present:
+      request.entity = FLAGS.entity.decode('utf8')
+    if FLAGS['entityId'].present:
+      request.entityId = FLAGS.entityId.decode('utf8')
+    if FLAGS['etag'].present:
+      request.etag = FLAGS.etag.decode('utf8')
+    if FLAGS['id'].present:
+      request.id = FLAGS.id.decode('utf8')
+    if FLAGS['kind'].present:
+      request.kind = FLAGS.kind.decode('utf8')
+    if FLAGS['projectTeam'].present:
+      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
+    if FLAGS['role'].present:
+      request.role = FLAGS.role.decode('utf8')
+    if FLAGS['selfLink'].present:
+      request.selfLink = FLAGS.selfLink.decode('utf8')
+    result = client.bucketAccessControls.Insert(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketAccessControlsList(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.List."""
+
+  usage = """bucketAccessControls_list <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsList, self).__init__(name, fv)
+
+  def RunWithArgs(self, bucket):
+    """Retrieves ACL entries on the specified bucket.
+
+    Args:
+      bucket: Name of a bucket.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketAccessControlsListRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    result = client.bucketAccessControls.List(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketAccessControlsPatch(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.Patch."""
+
+  usage = """bucketAccessControls_patch <bucket> <entity>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsPatch, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'domain',
+        None,
+        u'The domain associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'email',
+        None,
+        u'The email address associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'entityId',
+        None,
+        u'The ID for the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'etag',
+        None,
+        u'HTTP 1.1 Entity tag for the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'id',
+        None,
+        u'The ID of the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'kind',
+        u'storage#bucketAccessControl',
+        u'The kind of item this is. For bucket access control entries, this '
+        u'is always storage#bucketAccessControl.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'projectTeam',
+        None,
+        u'The project team associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'role',
+        None,
+        u'The access permission for the entity. Can be READER, WRITER, or '
+        u'OWNER.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'selfLink',
+        None,
+        u'The link to this access-control entry.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket, entity):
+    """Updates an ACL entry on the specified bucket. This method supports
+    patch semantics.
+
+    Args:
+      bucket: The name of the bucket.
+      entity: The entity holding the permission, in one of the following
+        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
+        domain-domain  - project-team-projectId  - allUsers  -
+        allAuthenticatedUsers Examples:  - The user liz@example.com would be
+        user-liz@example.com.  - The group example@googlegroups.com would be
+        group-example@googlegroups.com.  - To refer to all members of the
+        Google Apps for Business domain example.com, the entity would be
+        domain-example.com.
+
+    Flags:
+      domain: The domain associated with the entity, if any.
+      email: The email address associated with the entity, if any.
+      entityId: The ID for the entity, if any.
+      etag: HTTP 1.1 Entity tag for the access-control entry.
+      id: The ID of the access-control entry.
+      kind: The kind of item this is. For bucket access control entries, this
+        is always storage#bucketAccessControl.
+      projectTeam: The project team associated with the entity, if any.
+      role: The access permission for the entity. Can be READER, WRITER, or
+        OWNER.
+      selfLink: The link to this access-control entry.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.BucketAccessControl(
+        bucket=bucket.decode('utf8'),
+        entity=entity.decode('utf8'),
+        )
+    if FLAGS['domain'].present:
+      request.domain = FLAGS.domain.decode('utf8')
+    if FLAGS['email'].present:
+      request.email = FLAGS.email.decode('utf8')
+    if FLAGS['entityId'].present:
+      request.entityId = FLAGS.entityId.decode('utf8')
+    if FLAGS['etag'].present:
+      request.etag = FLAGS.etag.decode('utf8')
+    if FLAGS['id'].present:
+      request.id = FLAGS.id.decode('utf8')
+    if FLAGS['kind'].present:
+      request.kind = FLAGS.kind.decode('utf8')
+    if FLAGS['projectTeam'].present:
+      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
+    if FLAGS['role'].present:
+      request.role = FLAGS.role.decode('utf8')
+    if FLAGS['selfLink'].present:
+      request.selfLink = FLAGS.selfLink.decode('utf8')
+    result = client.bucketAccessControls.Patch(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketAccessControlsUpdate(apitools_base_cli.NewCmd):
+  """Command wrapping bucketAccessControls.Update."""
+
+  usage = """bucketAccessControls_update <bucket> <entity>"""
+
+  def __init__(self, name, fv):
+    super(BucketAccessControlsUpdate, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'domain',
+        None,
+        u'The domain associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'email',
+        None,
+        u'The email address associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'entityId',
+        None,
+        u'The ID for the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'etag',
+        None,
+        u'HTTP 1.1 Entity tag for the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'id',
+        None,
+        u'The ID of the access-control entry.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'kind',
+        u'storage#bucketAccessControl',
+        u'The kind of item this is. For bucket access control entries, this '
+        u'is always storage#bucketAccessControl.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'projectTeam',
+        None,
+        u'The project team associated with the entity, if any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'role',
+        None,
+        u'The access permission for the entity. Can be READER, WRITER, or '
+        u'OWNER.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'selfLink',
+        None,
+        u'The link to this access-control entry.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket, entity):
+    """Updates an ACL entry on the specified bucket.
+
+    Args:
+      bucket: The name of the bucket.
+      entity: The entity holding the permission, in one of the following
+        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
+        domain-domain  - project-team-projectId  - allUsers  -
+        allAuthenticatedUsers Examples:  - The user liz@example.com would be
+        user-liz@example.com.  - The group example@googlegroups.com would be
+        group-example@googlegroups.com.  - To refer to all members of the
+        Google Apps for Business domain example.com, the entity would be
+        domain-example.com.
+
+    Flags:
+      domain: The domain associated with the entity, if any.
+      email: The email address associated with the entity, if any.
+      entityId: The ID for the entity, if any.
+      etag: HTTP 1.1 Entity tag for the access-control entry.
+      id: The ID of the access-control entry.
+      kind: The kind of item this is. For bucket access control entries, this
+        is always storage#bucketAccessControl.
+      projectTeam: The project team associated with the entity, if any.
+      role: The access permission for the entity. Can be READER, WRITER, or
+        OWNER.
+      selfLink: The link to this access-control entry.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.BucketAccessControl(
+        bucket=bucket.decode('utf8'),
+        entity=entity.decode('utf8'),
+        )
+    if FLAGS['domain'].present:
+      request.domain = FLAGS.domain.decode('utf8')
+    if FLAGS['email'].present:
+      request.email = FLAGS.email.decode('utf8')
+    if FLAGS['entityId'].present:
+      request.entityId = FLAGS.entityId.decode('utf8')
+    if FLAGS['etag'].present:
+      request.etag = FLAGS.etag.decode('utf8')
+    if FLAGS['id'].present:
+      request.id = FLAGS.id.decode('utf8')
+    if FLAGS['kind'].present:
+      request.kind = FLAGS.kind.decode('utf8')
+    if FLAGS['projectTeam'].present:
+      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
+    if FLAGS['role'].present:
+      request.role = FLAGS.role.decode('utf8')
+    if FLAGS['selfLink'].present:
+      request.selfLink = FLAGS.selfLink.decode('utf8')
+    result = client.bucketAccessControls.Update(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsDelete(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.Delete."""
+
+  usage = """buckets_delete <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketsDelete, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u'If set, only deletes the bucket if its metageneration matches this '
+        u'value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u'If set, only deletes the bucket if its metageneration does not '
+        u'match this value.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Permanently deletes an empty bucket.
+
+    Args:
+      bucket: Name of a bucket.
+
+    Flags:
+      ifMetagenerationMatch: If set, only deletes the bucket if its
+        metageneration matches this value.
+      ifMetagenerationNotMatch: If set, only deletes the bucket if its
+        metageneration does not match this value.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsDeleteRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    result = client.buckets.Delete(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsGet(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.Get."""
+
+  usage = """buckets_get <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketsGet, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration matches the given value.",
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration does not match the given value.",
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Returns metadata for the specified bucket.
+
+    Args:
+      bucket: Name of a bucket.
+
+    Flags:
+      ifMetagenerationMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration matches the
+        given value.
+      ifMetagenerationNotMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration does not
+        match the given value.
+      projection: Set of properties to return. Defaults to noAcl.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsGetRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageBucketsGetRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.buckets.Get(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsInsert(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.Insert."""
+
+  usage = """buckets_insert <project>"""
+
+  def __init__(self, name, fv):
+    super(BucketsInsert, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'bucket',
+        None,
+        u'A Bucket resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
+        u'Apply a predefined set of access controls to this bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedDefaultObjectAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of default object access controls to this '
+        u'bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl, unless the bucket '
+        u'resource specifies acl or defaultObjectAcl properties, when it '
+        u'defaults to full.',
+        flag_values=fv)
+
+  def RunWithArgs(self, project):
+    """Creates a new bucket.
+
+    Args:
+      project: A valid API project identifier.
+
+    Flags:
+      bucket: A Bucket resource to be passed as the request body.
+      predefinedAcl: Apply a predefined set of access controls to this bucket.
+      predefinedDefaultObjectAcl: Apply a predefined set of default object
+        access controls to this bucket.
+      projection: Set of properties to return. Defaults to noAcl, unless the
+        bucket resource specifies acl or defaultObjectAcl properties, when it
+        defaults to full.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsInsertRequest(
+        project=project.decode('utf8'),
+        )
+    if FLAGS['bucket'].present:
+      request.bucket = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucket)
+    if FLAGS['predefinedAcl'].present:
+      request.predefinedAcl = messages.StorageBucketsInsertRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
+    if FLAGS['predefinedDefaultObjectAcl'].present:
+      request.predefinedDefaultObjectAcl = messages.StorageBucketsInsertRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageBucketsInsertRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.buckets.Insert(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsList(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.List."""
+
+  usage = """buckets_list <project>"""
+
+  def __init__(self, name, fv):
+    super(BucketsList, self).__init__(name, fv)
+    flags.DEFINE_integer(
+        'maxResults',
+        None,
+        u'Maximum number of buckets to return.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'pageToken',
+        None,
+        u'A previously-returned page token representing part of the larger '
+        u'set of results to view.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'prefix',
+        None,
+        u'Filter results to buckets whose names begin with this prefix.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl.',
+        flag_values=fv)
+
+  def RunWithArgs(self, project):
+    """Retrieves a list of buckets for a given project.
+
+    Args:
+      project: A valid API project identifier.
+
+    Flags:
+      maxResults: Maximum number of buckets to return.
+      pageToken: A previously-returned page token representing part of the
+        larger set of results to view.
+      prefix: Filter results to buckets whose names begin with this prefix.
+      projection: Set of properties to return. Defaults to noAcl.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsListRequest(
+        project=project.decode('utf8'),
+        )
+    if FLAGS['maxResults'].present:
+      request.maxResults = FLAGS.maxResults
+    if FLAGS['pageToken'].present:
+      request.pageToken = FLAGS.pageToken.decode('utf8')
+    if FLAGS['prefix'].present:
+      request.prefix = FLAGS.prefix.decode('utf8')
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageBucketsListRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.buckets.List(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsPatch(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.Patch."""
+
+  usage = """buckets_patch <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketsPatch, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'bucketResource',
+        None,
+        u'A Bucket resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration matches the given value.",
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration does not match the given value.",
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
+        u'Apply a predefined set of access controls to this bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedDefaultObjectAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of default object access controls to this '
+        u'bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to full.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Updates a bucket. This method supports patch semantics.
+
+    Args:
+      bucket: Name of a bucket.
+
+    Flags:
+      bucketResource: A Bucket resource to be passed as the request body.
+      ifMetagenerationMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration matches the
+        given value.
+      ifMetagenerationNotMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration does not
+        match the given value.
+      predefinedAcl: Apply a predefined set of access controls to this bucket.
+      predefinedDefaultObjectAcl: Apply a predefined set of default object
+        access controls to this bucket.
+      projection: Set of properties to return. Defaults to full.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsPatchRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['bucketResource'].present:
+      request.bucketResource = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucketResource)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['predefinedAcl'].present:
+      request.predefinedAcl = messages.StorageBucketsPatchRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
+    if FLAGS['predefinedDefaultObjectAcl'].present:
+      request.predefinedDefaultObjectAcl = messages.StorageBucketsPatchRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageBucketsPatchRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.buckets.Patch(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class BucketsUpdate(apitools_base_cli.NewCmd):
+  """Command wrapping buckets.Update."""
+
+  usage = """buckets_update <bucket>"""
+
+  def __init__(self, name, fv):
+    super(BucketsUpdate, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'bucketResource',
+        None,
+        u'A Bucket resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration matches the given value.",
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u'Makes the return of the bucket metadata conditional on whether the '
+        u"bucket's current metageneration does not match the given value.",
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
+        u'Apply a predefined set of access controls to this bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'predefinedDefaultObjectAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of default object access controls to this '
+        u'bucket.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to full.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Updates a bucket.
+
+    Args:
+      bucket: Name of a bucket.
+
+    Flags:
+      bucketResource: A Bucket resource to be passed as the request body.
+      ifMetagenerationMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration matches the
+        given value.
+      ifMetagenerationNotMatch: Makes the return of the bucket metadata
+        conditional on whether the bucket's current metageneration does not
+        match the given value.
+      predefinedAcl: Apply a predefined set of access controls to this bucket.
+      predefinedDefaultObjectAcl: Apply a predefined set of default object
+        access controls to this bucket.
+      projection: Set of properties to return. Defaults to full.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageBucketsUpdateRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['bucketResource'].present:
+      request.bucketResource = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucketResource)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['predefinedAcl'].present:
+      request.predefinedAcl = messages.StorageBucketsUpdateRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
+    if FLAGS['predefinedDefaultObjectAcl'].present:
+      request.predefinedDefaultObjectAcl = messages.StorageBucketsUpdateRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageBucketsUpdateRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.buckets.Update(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class ChannelsStop(apitools_base_cli.NewCmd):
+  """Command wrapping channels.Stop."""
+
+  usage = """channels_stop"""
+
+  def __init__(self, name, fv):
+    super(ChannelsStop, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'address',
+        None,
+        u'The address where notifications are delivered for this channel.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'expiration',
+        None,
+        u'Date and time of notification channel expiration, expressed as a '
+        u'Unix timestamp, in milliseconds. Optional.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'id',
+        None,
+        u'A UUID or similar unique string that identifies this channel.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'kind',
+        u'api#channel',
+        u'Identifies this as a notification channel used to watch for changes'
+        u' to a resource. Value: the fixed string "api#channel".',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'params',
+        None,
+        u'Additional parameters controlling delivery channel behavior. '
+        u'Optional.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'payload',
+        None,
+        u'A Boolean value to indicate whether payload is wanted. Optional.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'resourceId',
+        None,
+        u'An opaque ID that identifies the resource being watched on this '
+        u'channel. Stable across different API versions.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'resourceUri',
+        None,
+        u'A version-specific identifier for the watched resource.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'token',
+        None,
+        u'An arbitrary string delivered to the target address with each '
+        u'notification delivered over this channel. Optional.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'type',
+        None,
+        u'The type of delivery mechanism used for this channel.',
+        flag_values=fv)
+
+  def RunWithArgs(self):
+    """Stop watching resources through this channel
+
+    Flags:
+      address: The address where notifications are delivered for this channel.
+      expiration: Date and time of notification channel expiration, expressed
+        as a Unix timestamp, in milliseconds. Optional.
+      id: A UUID or similar unique string that identifies this channel.
+      kind: Identifies this as a notification channel used to watch for
+        changes to a resource. Value: the fixed string "api#channel".
+      params: Additional parameters controlling delivery channel behavior.
+        Optional.
+      payload: A Boolean value to indicate whether payload is wanted.
+        Optional.
+      resourceId: An opaque ID that identifies the resource being watched on
+        this channel. Stable across different API versions.
+      resourceUri: A version-specific identifier for the watched resource.
+      token: An arbitrary string delivered to the target address with each
+        notification delivered over this channel. Optional.
+      type: The type of delivery mechanism used for this channel.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.Channel(
+        )
+    if FLAGS['address'].present:
+      request.address = FLAGS.address.decode('utf8')
+    if FLAGS['expiration'].present:
+      request.expiration = int(FLAGS.expiration)
+    if FLAGS['id'].present:
+      request.id = FLAGS.id.decode('utf8')
+    if FLAGS['kind'].present:
+      request.kind = FLAGS.kind.decode('utf8')
+    if FLAGS['params'].present:
+      request.params = apitools_base.JsonToMessage(messages.Channel.ParamsValue, FLAGS.params)
+    if FLAGS['payload'].present:
+      request.payload = FLAGS.payload
+    if FLAGS['resourceId'].present:
+      request.resourceId = FLAGS.resourceId.decode('utf8')
+    if FLAGS['resourceUri'].present:
+      request.resourceUri = FLAGS.resourceUri.decode('utf8')
+    if FLAGS['token'].present:
+      request.token = FLAGS.token.decode('utf8')
+    if FLAGS['type'].present:
+      request.type = FLAGS.type.decode('utf8')
+    result = client.channels.Stop(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
 class DefaultObjectAccessControlsDelete(apitools_base_cli.NewCmd):
   """Command wrapping defaultObjectAccessControls.Delete."""
 
@@ -646,1554 +1598,6 @@
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketAccessControlsDelete(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.Delete."""
-
-  usage = """bucketAccessControls_delete <bucket> <entity>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsDelete, self).__init__(name, fv)
-
-  def RunWithArgs(self, bucket, entity):
-    """Permanently deletes the ACL entry for the specified entity on the
-    specified bucket.
-
-    Args:
-      bucket: Name of a bucket.
-      entity: The entity holding the permission. Can be user-userId, user-
-        emailAddress, group-groupId, group-emailAddress, allUsers, or
-        allAuthenticatedUsers.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketAccessControlsDeleteRequest(
-        bucket=bucket.decode('utf8'),
-        entity=entity.decode('utf8'),
-        )
-    result = client.bucketAccessControls.Delete(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class BucketAccessControlsGet(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.Get."""
-
-  usage = """bucketAccessControls_get <bucket> <entity>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsGet, self).__init__(name, fv)
-
-  def RunWithArgs(self, bucket, entity):
-    """Returns the ACL entry for the specified entity on the specified bucket.
-
-    Args:
-      bucket: Name of a bucket.
-      entity: The entity holding the permission. Can be user-userId, user-
-        emailAddress, group-groupId, group-emailAddress, allUsers, or
-        allAuthenticatedUsers.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketAccessControlsGetRequest(
-        bucket=bucket.decode('utf8'),
-        entity=entity.decode('utf8'),
-        )
-    result = client.bucketAccessControls.Get(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class BucketAccessControlsInsert(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.Insert."""
-
-  usage = """bucketAccessControls_insert <bucket>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsInsert, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'domain',
-        None,
-        u'The domain associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'email',
-        None,
-        u'The email address associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'entity',
-        None,
-        u'The entity holding the permission, in one of the following forms:  '
-        u'- user-userId  - user-email  - group-groupId  - group-email  - '
-        u'domain-domain  - project-team-projectId  - allUsers  - '
-        u'allAuthenticatedUsers Examples:  - The user liz@example.com would '
-        u'be user-liz@example.com.  - The group example@googlegroups.com '
-        u'would be group-example@googlegroups.com.  - To refer to all members'
-        u' of the Google Apps for Business domain example.com, the entity '
-        u'would be domain-example.com.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'entityId',
-        None,
-        u'The ID for the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'etag',
-        None,
-        u'HTTP 1.1 Entity tag for the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'id',
-        None,
-        u'The ID of the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'kind',
-        u'storage#bucketAccessControl',
-        u'The kind of item this is. For bucket access control entries, this '
-        u'is always storage#bucketAccessControl.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'projectTeam',
-        None,
-        u'The project team associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'role',
-        None,
-        u'The access permission for the entity. Can be READER, WRITER, or '
-        u'OWNER.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'selfLink',
-        None,
-        u'The link to this access-control entry.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket):
-    """Creates a new ACL entry on the specified bucket.
-
-    Args:
-      bucket: The name of the bucket.
-
-    Flags:
-      domain: The domain associated with the entity, if any.
-      email: The email address associated with the entity, if any.
-      entity: The entity holding the permission, in one of the following
-        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
-        domain-domain  - project-team-projectId  - allUsers  -
-        allAuthenticatedUsers Examples:  - The user liz@example.com would be
-        user-liz@example.com.  - The group example@googlegroups.com would be
-        group-example@googlegroups.com.  - To refer to all members of the
-        Google Apps for Business domain example.com, the entity would be
-        domain-example.com.
-      entityId: The ID for the entity, if any.
-      etag: HTTP 1.1 Entity tag for the access-control entry.
-      id: The ID of the access-control entry.
-      kind: The kind of item this is. For bucket access control entries, this
-        is always storage#bucketAccessControl.
-      projectTeam: The project team associated with the entity, if any.
-      role: The access permission for the entity. Can be READER, WRITER, or
-        OWNER.
-      selfLink: The link to this access-control entry.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.BucketAccessControl(
-        bucket=bucket.decode('utf8'),
-        )
-    if FLAGS['domain'].present:
-      request.domain = FLAGS.domain.decode('utf8')
-    if FLAGS['email'].present:
-      request.email = FLAGS.email.decode('utf8')
-    if FLAGS['entity'].present:
-      request.entity = FLAGS.entity.decode('utf8')
-    if FLAGS['entityId'].present:
-      request.entityId = FLAGS.entityId.decode('utf8')
-    if FLAGS['etag'].present:
-      request.etag = FLAGS.etag.decode('utf8')
-    if FLAGS['id'].present:
-      request.id = FLAGS.id.decode('utf8')
-    if FLAGS['kind'].present:
-      request.kind = FLAGS.kind.decode('utf8')
-    if FLAGS['projectTeam'].present:
-      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
-    if FLAGS['role'].present:
-      request.role = FLAGS.role.decode('utf8')
-    if FLAGS['selfLink'].present:
-      request.selfLink = FLAGS.selfLink.decode('utf8')
-    result = client.bucketAccessControls.Insert(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class BucketAccessControlsList(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.List."""
-
-  usage = """bucketAccessControls_list <bucket>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsList, self).__init__(name, fv)
-
-  def RunWithArgs(self, bucket):
-    """Retrieves ACL entries on the specified bucket.
-
-    Args:
-      bucket: Name of a bucket.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketAccessControlsListRequest(
-        bucket=bucket.decode('utf8'),
-        )
-    result = client.bucketAccessControls.List(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class BucketAccessControlsPatch(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.Patch."""
-
-  usage = """bucketAccessControls_patch <bucket> <entity>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsPatch, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'domain',
-        None,
-        u'The domain associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'email',
-        None,
-        u'The email address associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'entityId',
-        None,
-        u'The ID for the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'etag',
-        None,
-        u'HTTP 1.1 Entity tag for the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'id',
-        None,
-        u'The ID of the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'kind',
-        u'storage#bucketAccessControl',
-        u'The kind of item this is. For bucket access control entries, this '
-        u'is always storage#bucketAccessControl.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'projectTeam',
-        None,
-        u'The project team associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'role',
-        None,
-        u'The access permission for the entity. Can be READER, WRITER, or '
-        u'OWNER.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'selfLink',
-        None,
-        u'The link to this access-control entry.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, entity):
-    """Updates an ACL entry on the specified bucket. This method supports
-    patch semantics.
-
-    Args:
-      bucket: The name of the bucket.
-      entity: The entity holding the permission, in one of the following
-        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
-        domain-domain  - project-team-projectId  - allUsers  -
-        allAuthenticatedUsers Examples:  - The user liz@example.com would be
-        user-liz@example.com.  - The group example@googlegroups.com would be
-        group-example@googlegroups.com.  - To refer to all members of the
-        Google Apps for Business domain example.com, the entity would be
-        domain-example.com.
-
-    Flags:
-      domain: The domain associated with the entity, if any.
-      email: The email address associated with the entity, if any.
-      entityId: The ID for the entity, if any.
-      etag: HTTP 1.1 Entity tag for the access-control entry.
-      id: The ID of the access-control entry.
-      kind: The kind of item this is. For bucket access control entries, this
-        is always storage#bucketAccessControl.
-      projectTeam: The project team associated with the entity, if any.
-      role: The access permission for the entity. Can be READER, WRITER, or
-        OWNER.
-      selfLink: The link to this access-control entry.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.BucketAccessControl(
-        bucket=bucket.decode('utf8'),
-        entity=entity.decode('utf8'),
-        )
-    if FLAGS['domain'].present:
-      request.domain = FLAGS.domain.decode('utf8')
-    if FLAGS['email'].present:
-      request.email = FLAGS.email.decode('utf8')
-    if FLAGS['entityId'].present:
-      request.entityId = FLAGS.entityId.decode('utf8')
-    if FLAGS['etag'].present:
-      request.etag = FLAGS.etag.decode('utf8')
-    if FLAGS['id'].present:
-      request.id = FLAGS.id.decode('utf8')
-    if FLAGS['kind'].present:
-      request.kind = FLAGS.kind.decode('utf8')
-    if FLAGS['projectTeam'].present:
-      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
-    if FLAGS['role'].present:
-      request.role = FLAGS.role.decode('utf8')
-    if FLAGS['selfLink'].present:
-      request.selfLink = FLAGS.selfLink.decode('utf8')
-    result = client.bucketAccessControls.Patch(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class BucketAccessControlsUpdate(apitools_base_cli.NewCmd):
-  """Command wrapping bucketAccessControls.Update."""
-
-  usage = """bucketAccessControls_update <bucket> <entity>"""
-
-  def __init__(self, name, fv):
-    super(BucketAccessControlsUpdate, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'domain',
-        None,
-        u'The domain associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'email',
-        None,
-        u'The email address associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'entityId',
-        None,
-        u'The ID for the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'etag',
-        None,
-        u'HTTP 1.1 Entity tag for the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'id',
-        None,
-        u'The ID of the access-control entry.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'kind',
-        u'storage#bucketAccessControl',
-        u'The kind of item this is. For bucket access control entries, this '
-        u'is always storage#bucketAccessControl.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'projectTeam',
-        None,
-        u'The project team associated with the entity, if any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'role',
-        None,
-        u'The access permission for the entity. Can be READER, WRITER, or '
-        u'OWNER.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'selfLink',
-        None,
-        u'The link to this access-control entry.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, entity):
-    """Updates an ACL entry on the specified bucket.
-
-    Args:
-      bucket: The name of the bucket.
-      entity: The entity holding the permission, in one of the following
-        forms:  - user-userId  - user-email  - group-groupId  - group-email  -
-        domain-domain  - project-team-projectId  - allUsers  -
-        allAuthenticatedUsers Examples:  - The user liz@example.com would be
-        user-liz@example.com.  - The group example@googlegroups.com would be
-        group-example@googlegroups.com.  - To refer to all members of the
-        Google Apps for Business domain example.com, the entity would be
-        domain-example.com.
-
-    Flags:
-      domain: The domain associated with the entity, if any.
-      email: The email address associated with the entity, if any.
-      entityId: The ID for the entity, if any.
-      etag: HTTP 1.1 Entity tag for the access-control entry.
-      id: The ID of the access-control entry.
-      kind: The kind of item this is. For bucket access control entries, this
-        is always storage#bucketAccessControl.
-      projectTeam: The project team associated with the entity, if any.
-      role: The access permission for the entity. Can be READER, WRITER, or
-        OWNER.
-      selfLink: The link to this access-control entry.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.BucketAccessControl(
-        bucket=bucket.decode('utf8'),
-        entity=entity.decode('utf8'),
-        )
-    if FLAGS['domain'].present:
-      request.domain = FLAGS.domain.decode('utf8')
-    if FLAGS['email'].present:
-      request.email = FLAGS.email.decode('utf8')
-    if FLAGS['entityId'].present:
-      request.entityId = FLAGS.entityId.decode('utf8')
-    if FLAGS['etag'].present:
-      request.etag = FLAGS.etag.decode('utf8')
-    if FLAGS['id'].present:
-      request.id = FLAGS.id.decode('utf8')
-    if FLAGS['kind'].present:
-      request.kind = FLAGS.kind.decode('utf8')
-    if FLAGS['projectTeam'].present:
-      request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)
-    if FLAGS['role'].present:
-      request.role = FLAGS.role.decode('utf8')
-    if FLAGS['selfLink'].present:
-      request.selfLink = FLAGS.selfLink.decode('utf8')
-    result = client.bucketAccessControls.Update(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ChannelsStop(apitools_base_cli.NewCmd):
-  """Command wrapping channels.Stop."""
-
-  usage = """channels_stop"""
-
-  def __init__(self, name, fv):
-    super(ChannelsStop, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'address',
-        None,
-        u'The address where notifications are delivered for this channel.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'expiration',
-        None,
-        u'Date and time of notification channel expiration, expressed as a '
-        u'Unix timestamp, in milliseconds. Optional.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'id',
-        None,
-        u'A UUID or similar unique string that identifies this channel.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'kind',
-        u'api#channel',
-        u'Identifies this as a notification channel used to watch for changes'
-        u' to a resource. Value: the fixed string "api#channel".',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'params',
-        None,
-        u'Additional parameters controlling delivery channel behavior. '
-        u'Optional.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'payload',
-        None,
-        u'A Boolean value to indicate whether payload is wanted. Optional.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'resourceId',
-        None,
-        u'An opaque ID that identifies the resource being watched on this '
-        u'channel. Stable across different API versions.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'resourceUri',
-        None,
-        u'A version-specific identifier for the watched resource.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'token',
-        None,
-        u'An arbitrary string delivered to the target address with each '
-        u'notification delivered over this channel. Optional.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'type',
-        None,
-        u'The type of delivery mechanism used for this channel.',
-        flag_values=fv)
-
-  def RunWithArgs(self):
-    """Stop watching resources through this channel
-
-    Flags:
-      address: The address where notifications are delivered for this channel.
-      expiration: Date and time of notification channel expiration, expressed
-        as a Unix timestamp, in milliseconds. Optional.
-      id: A UUID or similar unique string that identifies this channel.
-      kind: Identifies this as a notification channel used to watch for
-        changes to a resource. Value: the fixed string "api#channel".
-      params: Additional parameters controlling delivery channel behavior.
-        Optional.
-      payload: A Boolean value to indicate whether payload is wanted.
-        Optional.
-      resourceId: An opaque ID that identifies the resource being watched on
-        this channel. Stable across different API versions.
-      resourceUri: A version-specific identifier for the watched resource.
-      token: An arbitrary string delivered to the target address with each
-        notification delivered over this channel. Optional.
-      type: The type of delivery mechanism used for this channel.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.Channel(
-        )
-    if FLAGS['address'].present:
-      request.address = FLAGS.address.decode('utf8')
-    if FLAGS['expiration'].present:
-      request.expiration = int(FLAGS.expiration)
-    if FLAGS['id'].present:
-      request.id = FLAGS.id.decode('utf8')
-    if FLAGS['kind'].present:
-      request.kind = FLAGS.kind.decode('utf8')
-    if FLAGS['params'].present:
-      request.params = apitools_base.JsonToMessage(messages.Channel.ParamsValue, FLAGS.params)
-    if FLAGS['payload'].present:
-      request.payload = FLAGS.payload
-    if FLAGS['resourceId'].present:
-      request.resourceId = FLAGS.resourceId.decode('utf8')
-    if FLAGS['resourceUri'].present:
-      request.resourceUri = FLAGS.resourceUri.decode('utf8')
-    if FLAGS['token'].present:
-      request.token = FLAGS.token.decode('utf8')
-    if FLAGS['type'].present:
-      request.type = FLAGS.type.decode('utf8')
-    result = client.channels.Stop(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsCompose(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Compose."""
-
-  usage = """objects_compose <destinationBucket> <destinationObject>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsCompose, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'composeRequest',
-        None,
-        u'A ComposeRequest resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'destinationPredefinedAcl',
-        u'authenticatedRead',
-        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of access controls to the destination '
-        u'object.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'download_filename',
-        '',
-        'Filename to use for download.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'overwrite',
-        'False',
-        'If True, overwrite the existing file when downloading.',
-        flag_values=fv)
-
-  def RunWithArgs(self, destinationBucket, destinationObject):
-    """Concatenates a list of existing objects into a new object in the same
-    bucket.
-
-    Args:
-      destinationBucket: Name of the bucket in which to store the new object.
-      destinationObject: Name of the new object.
-
-    Flags:
-      composeRequest: A ComposeRequest resource to be passed as the request
-        body.
-      destinationPredefinedAcl: Apply a predefined set of access controls to
-        the destination object.
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's current generation matches the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      download_filename: Filename to use for download.
-      overwrite: If True, overwrite the existing file when downloading.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsComposeRequest(
-        destinationBucket=destinationBucket.decode('utf8'),
-        destinationObject=destinationObject.decode('utf8'),
-        )
-    if FLAGS['composeRequest'].present:
-      request.composeRequest = apitools_base.JsonToMessage(messages.ComposeRequest, FLAGS.composeRequest)
-    if FLAGS['destinationPredefinedAcl'].present:
-      request.destinationPredefinedAcl = messages.StorageObjectsComposeRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    download = None
-    if FLAGS.download_filename:
-      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite)
-    result = client.objects.Compose(
-        request, global_params=global_params, download=download)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsCopy(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Copy."""
-
-  usage = """objects_copy <sourceBucket> <sourceObject> <destinationBucket> <destinationObject>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsCopy, self).__init__(name, fv)
-    flags.DEFINE_enum(
-        'destinationPredefinedAcl',
-        u'authenticatedRead',
-        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of access controls to the destination '
-        u'object.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the destination object's"
-        u' current generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the destination object's"
-        u' current generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the destination object's"
-        u' current metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the destination object's"
-        u' current metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifSourceGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the source object's "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifSourceGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the source object's "
-        u'generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifSourceMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the source object's "
-        u'current metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifSourceMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the source object's "
-        u'current metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'object',
-        None,
-        u'A Object resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl, unless the object '
-        u'resource specifies the acl property, when it defaults to full.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'sourceGeneration',
-        None,
-        u'If present, selects a specific revision of the source object (as '
-        u'opposed to the latest version, the default).',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'download_filename',
-        '',
-        'Filename to use for download.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'overwrite',
-        'False',
-        'If True, overwrite the existing file when downloading.',
-        flag_values=fv)
-
-  def RunWithArgs(self, sourceBucket, sourceObject, destinationBucket, destinationObject):
-    """Copies an object to a specified location. Optionally overrides
-    metadata.
-
-    Args:
-      sourceBucket: Name of the bucket in which to find the source object.
-      sourceObject: Name of the source object.
-      destinationBucket: Name of the bucket in which to store the new object.
-        Overrides the provided object metadata's bucket value, if any.
-      destinationObject: Name of the new object. Required when the object
-        metadata is not otherwise provided. Overrides the object metadata's
-        name value, if any.
-
-    Flags:
-      destinationPredefinedAcl: Apply a predefined set of access controls to
-        the destination object.
-      ifGenerationMatch: Makes the operation conditional on whether the
-        destination object's current generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        destination object's current generation does not match the given
-        value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        destination object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        destination object's current metageneration does not match the given
-        value.
-      ifSourceGenerationMatch: Makes the operation conditional on whether the
-        source object's generation matches the given value.
-      ifSourceGenerationNotMatch: Makes the operation conditional on whether
-        the source object's generation does not match the given value.
-      ifSourceMetagenerationMatch: Makes the operation conditional on whether
-        the source object's current metageneration matches the given value.
-      ifSourceMetagenerationNotMatch: Makes the operation conditional on
-        whether the source object's current metageneration does not match the
-        given value.
-      object: A Object resource to be passed as the request body.
-      projection: Set of properties to return. Defaults to noAcl, unless the
-        object resource specifies the acl property, when it defaults to full.
-      sourceGeneration: If present, selects a specific revision of the source
-        object (as opposed to the latest version, the default).
-      download_filename: Filename to use for download.
-      overwrite: If True, overwrite the existing file when downloading.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsCopyRequest(
-        sourceBucket=sourceBucket.decode('utf8'),
-        sourceObject=sourceObject.decode('utf8'),
-        destinationBucket=destinationBucket.decode('utf8'),
-        destinationObject=destinationObject.decode('utf8'),
-        )
-    if FLAGS['destinationPredefinedAcl'].present:
-      request.destinationPredefinedAcl = messages.StorageObjectsCopyRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    if FLAGS['ifSourceGenerationMatch'].present:
-      request.ifSourceGenerationMatch = int(FLAGS.ifSourceGenerationMatch)
-    if FLAGS['ifSourceGenerationNotMatch'].present:
-      request.ifSourceGenerationNotMatch = int(FLAGS.ifSourceGenerationNotMatch)
-    if FLAGS['ifSourceMetagenerationMatch'].present:
-      request.ifSourceMetagenerationMatch = int(FLAGS.ifSourceMetagenerationMatch)
-    if FLAGS['ifSourceMetagenerationNotMatch'].present:
-      request.ifSourceMetagenerationNotMatch = int(FLAGS.ifSourceMetagenerationNotMatch)
-    if FLAGS['object'].present:
-      request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object)
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsCopyRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    if FLAGS['sourceGeneration'].present:
-      request.sourceGeneration = int(FLAGS.sourceGeneration)
-    download = None
-    if FLAGS.download_filename:
-      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite)
-    result = client.objects.Copy(
-        request, global_params=global_params, download=download)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsDelete(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Delete."""
-
-  usage = """objects_delete <bucket> <object>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsDelete, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'generation',
-        None,
-        u'If present, permanently deletes a specific revision of this object '
-        u'(as opposed to the latest version, the default).',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration does not match the given value.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, object):
-    """Deletes an object and its metadata. Deletions are permanent if
-    versioning is not enabled for the bucket, or if the generation parameter
-    is used.
-
-    Args:
-      bucket: Name of the bucket in which the object resides.
-      object: Name of the object.
-
-    Flags:
-      generation: If present, permanently deletes a specific revision of this
-        object (as opposed to the latest version, the default).
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's current generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        object's current generation does not match the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        object's current metageneration does not match the given value.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsDeleteRequest(
-        bucket=bucket.decode('utf8'),
-        object=object.decode('utf8'),
-        )
-    if FLAGS['generation'].present:
-      request.generation = int(FLAGS.generation)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    result = client.objects.Delete(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsGet(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Get."""
-
-  usage = """objects_get <bucket> <object>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsGet, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'generation',
-        None,
-        u'If present, selects a specific revision of this object (as opposed '
-        u'to the latest version, the default).',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's generation "
-        u'matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's generation "
-        u'does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'download_filename',
-        '',
-        'Filename to use for download.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'overwrite',
-        'False',
-        'If True, overwrite the existing file when downloading.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, object):
-    """Retrieves an object or its metadata.
-
-    Args:
-      bucket: Name of the bucket in which the object resides.
-      object: Name of the object.
-
-    Flags:
-      generation: If present, selects a specific revision of this object (as
-        opposed to the latest version, the default).
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        object's generation does not match the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        object's current metageneration does not match the given value.
-      projection: Set of properties to return. Defaults to noAcl.
-      download_filename: Filename to use for download.
-      overwrite: If True, overwrite the existing file when downloading.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsGetRequest(
-        bucket=bucket.decode('utf8'),
-        object=object.decode('utf8'),
-        )
-    if FLAGS['generation'].present:
-      request.generation = int(FLAGS.generation)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsGetRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    download = None
-    if FLAGS.download_filename:
-      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite)
-    result = client.objects.Get(
-        request, global_params=global_params, download=download)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsInsert(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Insert."""
-
-  usage = """objects_insert <bucket>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsInsert, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'contentEncoding',
-        None,
-        u'If set, sets the contentEncoding property of the final object to '
-        u'this value. Setting this parameter is equivalent to setting the '
-        u'contentEncoding metadata property. This can be useful when '
-        u'uploading an object with uploadType=media to indicate the encoding '
-        u'of the content being uploaded.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'name',
-        None,
-        u'Name of the object. Required when the object metadata is not '
-        u"otherwise provided. Overrides the object metadata's name value, if "
-        u'any.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'object',
-        None,
-        u'A Object resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedAcl',
-        u'authenticatedRead',
-        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of access controls to this object.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl, unless the object '
-        u'resource specifies the acl property, when it defaults to full.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'upload_filename',
-        '',
-        'Filename to use for upload.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'upload_mime_type',
-        '',
-        'MIME type to use for the upload. Only needed if the extension on '
-        '--upload_filename does not determine the correct (or any) MIME '
-        'type.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'download_filename',
-        '',
-        'Filename to use for download.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'overwrite',
-        'False',
-        'If True, overwrite the existing file when downloading.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket):
-    """Stores a new object and metadata.
-
-    Args:
-      bucket: Name of the bucket in which to store the new object. Overrides
-        the provided object metadata's bucket value, if any.
-
-    Flags:
-      contentEncoding: If set, sets the contentEncoding property of the final
-        object to this value. Setting this parameter is equivalent to setting
-        the contentEncoding metadata property. This can be useful when
-        uploading an object with uploadType=media to indicate the encoding of
-        the content being uploaded.
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's current generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        object's current generation does not match the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        object's current metageneration does not match the given value.
-      name: Name of the object. Required when the object metadata is not
-        otherwise provided. Overrides the object metadata's name value, if
-        any.
-      object: A Object resource to be passed as the request body.
-      predefinedAcl: Apply a predefined set of access controls to this object.
-      projection: Set of properties to return. Defaults to noAcl, unless the
-        object resource specifies the acl property, when it defaults to full.
-      upload_filename: Filename to use for upload.
-      upload_mime_type: MIME type to use for the upload. Only needed if the
-        extension on --upload_filename does not determine the correct (or any)
-        MIME type.
-      download_filename: Filename to use for download.
-      overwrite: If True, overwrite the existing file when downloading.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsInsertRequest(
-        bucket=bucket.decode('utf8'),
-        )
-    if FLAGS['contentEncoding'].present:
-      request.contentEncoding = FLAGS.contentEncoding.decode('utf8')
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    if FLAGS['name'].present:
-      request.name = FLAGS.name.decode('utf8')
-    if FLAGS['object'].present:
-      request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object)
-    if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageObjectsInsertRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsInsertRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    upload = None
-    if FLAGS.upload_filename:
-      upload = apitools_base.Upload.FromFile(
-          FLAGS.upload_filename, FLAGS.upload_mime_type)
-    download = None
-    if FLAGS.download_filename:
-      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite)
-    result = client.objects.Insert(
-        request, global_params=global_params, upload=upload, download=download)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsList(apitools_base_cli.NewCmd):
-  """Command wrapping objects.List."""
-
-  usage = """objects_list <bucket>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsList, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'delimiter',
-        None,
-        u'Returns results in a directory-like mode. items will contain only '
-        u'objects whose names, aside from the prefix, do not contain '
-        u'delimiter. Objects whose names, aside from the prefix, contain '
-        u'delimiter will have their name, truncated after the delimiter, '
-        u'returned in prefixes. Duplicate prefixes are omitted.',
-        flag_values=fv)
-    flags.DEFINE_integer(
-        'maxResults',
-        None,
-        u'Maximum number of items plus prefixes to return. As duplicate '
-        u'prefixes are omitted, fewer total results may be returned than '
-        u'requested.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'pageToken',
-        None,
-        u'A previously-returned page token representing part of the larger '
-        u'set of results to view.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'prefix',
-        None,
-        u'Filter results to objects whose names begin with this prefix.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'versions',
-        None,
-        u'If true, lists all versions of a file as distinct results.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket):
-    """Retrieves a list of objects matching the criteria.
-
-    Args:
-      bucket: Name of the bucket in which to look for objects.
-
-    Flags:
-      delimiter: Returns results in a directory-like mode. items will contain
-        only objects whose names, aside from the prefix, do not contain
-        delimiter. Objects whose names, aside from the prefix, contain
-        delimiter will have their name, truncated after the delimiter,
-        returned in prefixes. Duplicate prefixes are omitted.
-      maxResults: Maximum number of items plus prefixes to return. As
-        duplicate prefixes are omitted, fewer total results may be returned
-        than requested.
-      pageToken: A previously-returned page token representing part of the
-        larger set of results to view.
-      prefix: Filter results to objects whose names begin with this prefix.
-      projection: Set of properties to return. Defaults to noAcl.
-      versions: If true, lists all versions of a file as distinct results.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsListRequest(
-        bucket=bucket.decode('utf8'),
-        )
-    if FLAGS['delimiter'].present:
-      request.delimiter = FLAGS.delimiter.decode('utf8')
-    if FLAGS['maxResults'].present:
-      request.maxResults = FLAGS.maxResults
-    if FLAGS['pageToken'].present:
-      request.pageToken = FLAGS.pageToken.decode('utf8')
-    if FLAGS['prefix'].present:
-      request.prefix = FLAGS.prefix.decode('utf8')
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsListRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    if FLAGS['versions'].present:
-      request.versions = FLAGS.versions
-    result = client.objects.List(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsPatch(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Patch."""
-
-  usage = """objects_patch <bucket> <object>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsPatch, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'generation',
-        None,
-        u'If present, selects a specific revision of this object (as opposed '
-        u'to the latest version, the default).',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'objectResource',
-        None,
-        u'A Object resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedAcl',
-        u'authenticatedRead',
-        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of access controls to this object.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to full.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, object):
-    """Updates an object's metadata. This method supports patch semantics.
-
-    Args:
-      bucket: Name of the bucket in which the object resides.
-      object: Name of the object.
-
-    Flags:
-      generation: If present, selects a specific revision of this object (as
-        opposed to the latest version, the default).
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's current generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        object's current generation does not match the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        object's current metageneration does not match the given value.
-      objectResource: A Object resource to be passed as the request body.
-      predefinedAcl: Apply a predefined set of access controls to this object.
-      projection: Set of properties to return. Defaults to full.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsPatchRequest(
-        bucket=bucket.decode('utf8'),
-        object=object.decode('utf8'),
-        )
-    if FLAGS['generation'].present:
-      request.generation = int(FLAGS.generation)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    if FLAGS['objectResource'].present:
-      request.objectResource = apitools_base.JsonToMessage(messages.Object, FLAGS.objectResource)
-    if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageObjectsPatchRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsPatchRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.objects.Patch(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsUpdate(apitools_base_cli.NewCmd):
-  """Command wrapping objects.Update."""
-
-  usage = """objects_update <bucket> <object>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsUpdate, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'generation',
-        None,
-        u'If present, selects a specific revision of this object (as opposed '
-        u'to the latest version, the default).',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifGenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'generation does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration matches the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'ifMetagenerationNotMatch',
-        None,
-        u"Makes the operation conditional on whether the object's current "
-        u'metageneration does not match the given value.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'objectResource',
-        None,
-        u'A Object resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedAcl',
-        u'authenticatedRead',
-        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of access controls to this object.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to full.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'download_filename',
-        '',
-        'Filename to use for download.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'overwrite',
-        'False',
-        'If True, overwrite the existing file when downloading.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket, object):
-    """Updates an object's metadata.
-
-    Args:
-      bucket: Name of the bucket in which the object resides.
-      object: Name of the object.
-
-    Flags:
-      generation: If present, selects a specific revision of this object (as
-        opposed to the latest version, the default).
-      ifGenerationMatch: Makes the operation conditional on whether the
-        object's current generation matches the given value.
-      ifGenerationNotMatch: Makes the operation conditional on whether the
-        object's current generation does not match the given value.
-      ifMetagenerationMatch: Makes the operation conditional on whether the
-        object's current metageneration matches the given value.
-      ifMetagenerationNotMatch: Makes the operation conditional on whether the
-        object's current metageneration does not match the given value.
-      objectResource: A Object resource to be passed as the request body.
-      predefinedAcl: Apply a predefined set of access controls to this object.
-      projection: Set of properties to return. Defaults to full.
-      download_filename: Filename to use for download.
-      overwrite: If True, overwrite the existing file when downloading.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsUpdateRequest(
-        bucket=bucket.decode('utf8'),
-        object=object.decode('utf8'),
-        )
-    if FLAGS['generation'].present:
-      request.generation = int(FLAGS.generation)
-    if FLAGS['ifGenerationMatch'].present:
-      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
-    if FLAGS['ifGenerationNotMatch'].present:
-      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
-    if FLAGS['ifMetagenerationMatch'].present:
-      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
-    if FLAGS['ifMetagenerationNotMatch'].present:
-      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    if FLAGS['objectResource'].present:
-      request.objectResource = apitools_base.JsonToMessage(messages.Object, FLAGS.objectResource)
-    if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageObjectsUpdateRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsUpdateRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    download = None
-    if FLAGS.download_filename:
-      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite)
-    result = client.objects.Update(
-        request, global_params=global_params, download=download)
-    print apitools_base_cli.FormatOutput(result)
-
-
-class ObjectsWatchAll(apitools_base_cli.NewCmd):
-  """Command wrapping objects.WatchAll."""
-
-  usage = """objects_watchAll <bucket>"""
-
-  def __init__(self, name, fv):
-    super(ObjectsWatchAll, self).__init__(name, fv)
-    flags.DEFINE_string(
-        'channel',
-        None,
-        u'A Channel resource to be passed as the request body.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'delimiter',
-        None,
-        u'Returns results in a directory-like mode. items will contain only '
-        u'objects whose names, aside from the prefix, do not contain '
-        u'delimiter. Objects whose names, aside from the prefix, contain '
-        u'delimiter will have their name, truncated after the delimiter, '
-        u'returned in prefixes. Duplicate prefixes are omitted.',
-        flag_values=fv)
-    flags.DEFINE_integer(
-        'maxResults',
-        None,
-        u'Maximum number of items plus prefixes to return. As duplicate '
-        u'prefixes are omitted, fewer total results may be returned than '
-        u'requested.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'pageToken',
-        None,
-        u'A previously-returned page token representing part of the larger '
-        u'set of results to view.',
-        flag_values=fv)
-    flags.DEFINE_string(
-        'prefix',
-        None,
-        u'Filter results to objects whose names begin with this prefix.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'projection',
-        u'full',
-        [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl.',
-        flag_values=fv)
-    flags.DEFINE_boolean(
-        'versions',
-        None,
-        u'If true, lists all versions of a file as distinct results.',
-        flag_values=fv)
-
-  def RunWithArgs(self, bucket):
-    """Watch for changes on all objects in a bucket.
-
-    Args:
-      bucket: Name of the bucket in which to look for objects.
-
-    Flags:
-      channel: A Channel resource to be passed as the request body.
-      delimiter: Returns results in a directory-like mode. items will contain
-        only objects whose names, aside from the prefix, do not contain
-        delimiter. Objects whose names, aside from the prefix, contain
-        delimiter will have their name, truncated after the delimiter,
-        returned in prefixes. Duplicate prefixes are omitted.
-      maxResults: Maximum number of items plus prefixes to return. As
-        duplicate prefixes are omitted, fewer total results may be returned
-        than requested.
-      pageToken: A previously-returned page token representing part of the
-        larger set of results to view.
-      prefix: Filter results to objects whose names begin with this prefix.
-      projection: Set of properties to return. Defaults to noAcl.
-      versions: If true, lists all versions of a file as distinct results.
-    """
-    client = GetClientFromFlags()
-    global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageObjectsWatchAllRequest(
-        bucket=bucket.decode('utf8'),
-        )
-    if FLAGS['channel'].present:
-      request.channel = apitools_base.JsonToMessage(messages.Channel, FLAGS.channel)
-    if FLAGS['delimiter'].present:
-      request.delimiter = FLAGS.delimiter.decode('utf8')
-    if FLAGS['maxResults'].present:
-      request.maxResults = FLAGS.maxResults
-    if FLAGS['pageToken'].present:
-      request.pageToken = FLAGS.pageToken.decode('utf8')
-    if FLAGS['prefix'].present:
-      request.prefix = FLAGS.prefix.decode('utf8')
-    if FLAGS['projection'].present:
-      request.projection = messages.StorageObjectsWatchAllRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    if FLAGS['versions'].present:
-      request.versions = FLAGS.versions
-    result = client.objects.WatchAll(
-        request, global_params=global_params)
-    print apitools_base_cli.FormatOutput(result)
-
-
 class ObjectAccessControlsDelete(apitools_base_cli.NewCmd):
   """Command wrapping objectAccessControls.Delete."""
 
@@ -2467,70 +1871,384 @@
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsDelete(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.Delete."""
+class ObjectsCompose(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Compose."""
 
-  usage = """buckets_delete <bucket>"""
+  usage = """objects_compose <destinationBucket> <destinationObject>"""
 
   def __init__(self, name, fv):
-    super(BucketsDelete, self).__init__(name, fv)
+    super(ObjectsCompose, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'composeRequest',
+        None,
+        u'A ComposeRequest resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'destinationPredefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of access controls to the destination '
+        u'object.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation matches the given value.',
+        flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationMatch',
         None,
-        u'If set, only deletes the bucket if its metageneration matches this '
-        u'value.',
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'download_filename',
+        '',
+        'Filename to use for download.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'overwrite',
+        'False',
+        'If True, overwrite the existing file when downloading.',
+        flag_values=fv)
+
+  def RunWithArgs(self, destinationBucket, destinationObject):
+    """Concatenates a list of existing objects into a new object in the same
+    bucket.
+
+    Args:
+      destinationBucket: Name of the bucket in which to store the new object.
+      destinationObject: Name of the new object.
+
+    Flags:
+      composeRequest: A ComposeRequest resource to be passed as the request
+        body.
+      destinationPredefinedAcl: Apply a predefined set of access controls to
+        the destination object.
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's current generation matches the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      download_filename: Filename to use for download.
+      overwrite: If True, overwrite the existing file when downloading.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageObjectsComposeRequest(
+        destinationBucket=destinationBucket.decode('utf8'),
+        destinationObject=destinationObject.decode('utf8'),
+        )
+    if FLAGS['composeRequest'].present:
+      request.composeRequest = apitools_base.JsonToMessage(messages.ComposeRequest, FLAGS.composeRequest)
+    if FLAGS['destinationPredefinedAcl'].present:
+      request.destinationPredefinedAcl = messages.StorageObjectsComposeRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    download = None
+    if FLAGS.download_filename:
+      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite,
+          progress_callback=apitools_base.DownloadProgressPrinter,
+          finish_callback=apitools_base.DownloadCompletePrinter)
+    result = client.objects.Compose(
+        request, global_params=global_params, download=download)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class ObjectsCopy(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Copy."""
+
+  usage = """objects_copy <sourceBucket> <sourceObject> <destinationBucket> <destinationObject>"""
+
+  def __init__(self, name, fv):
+    super(ObjectsCopy, self).__init__(name, fv)
+    flags.DEFINE_enum(
+        'destinationPredefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of access controls to the destination '
+        u'object.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the destination object's"
+        u' current generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the destination object's"
+        u' current generation does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the destination object's"
+        u' current metageneration matches the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationNotMatch',
         None,
-        u'If set, only deletes the bucket if its metageneration does not '
-        u'match this value.',
+        u"Makes the operation conditional on whether the destination object's"
+        u' current metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'generation does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'current metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceMetagenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'current metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'object',
+        None,
+        u'A Object resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl, unless the object '
+        u'resource specifies the acl property, when it defaults to full.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'sourceGeneration',
+        None,
+        u'If present, selects a specific revision of the source object (as '
+        u'opposed to the latest version, the default).',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'download_filename',
+        '',
+        'Filename to use for download.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'overwrite',
+        'False',
+        'If True, overwrite the existing file when downloading.',
         flag_values=fv)
 
-  def RunWithArgs(self, bucket):
-    """Permanently deletes an empty bucket.
+  def RunWithArgs(self, sourceBucket, sourceObject, destinationBucket, destinationObject):
+    """Copies a source object to a destination object. Optionally overrides
+    metadata.
 
     Args:
-      bucket: Name of a bucket.
+      sourceBucket: Name of the bucket in which to find the source object.
+      sourceObject: Name of the source object.
+      destinationBucket: Name of the bucket in which to store the new object.
+        Overrides the provided object metadata's bucket value, if any.
+      destinationObject: Name of the new object. Required when the object
+        metadata is not otherwise provided. Overrides the object metadata's
+        name value, if any.
 
     Flags:
-      ifMetagenerationMatch: If set, only deletes the bucket if its
-        metageneration matches this value.
-      ifMetagenerationNotMatch: If set, only deletes the bucket if its
-        metageneration does not match this value.
+      destinationPredefinedAcl: Apply a predefined set of access controls to
+        the destination object.
+      ifGenerationMatch: Makes the operation conditional on whether the
+        destination object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        destination object's current generation does not match the given
+        value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        destination object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        destination object's current metageneration does not match the given
+        value.
+      ifSourceGenerationMatch: Makes the operation conditional on whether the
+        source object's generation matches the given value.
+      ifSourceGenerationNotMatch: Makes the operation conditional on whether
+        the source object's generation does not match the given value.
+      ifSourceMetagenerationMatch: Makes the operation conditional on whether
+        the source object's current metageneration matches the given value.
+      ifSourceMetagenerationNotMatch: Makes the operation conditional on
+        whether the source object's current metageneration does not match the
+        given value.
+      object: A Object resource to be passed as the request body.
+      projection: Set of properties to return. Defaults to noAcl, unless the
+        object resource specifies the acl property, when it defaults to full.
+      sourceGeneration: If present, selects a specific revision of the source
+        object (as opposed to the latest version, the default).
+      download_filename: Filename to use for download.
+      overwrite: If True, overwrite the existing file when downloading.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsDeleteRequest(
-        bucket=bucket.decode('utf8'),
+    request = messages.StorageObjectsCopyRequest(
+        sourceBucket=sourceBucket.decode('utf8'),
+        sourceObject=sourceObject.decode('utf8'),
+        destinationBucket=destinationBucket.decode('utf8'),
+        destinationObject=destinationObject.decode('utf8'),
         )
+    if FLAGS['destinationPredefinedAcl'].present:
+      request.destinationPredefinedAcl = messages.StorageObjectsCopyRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
     if FLAGS['ifMetagenerationMatch'].present:
       request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
     if FLAGS['ifMetagenerationNotMatch'].present:
       request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
-    result = client.buckets.Delete(
-        request, global_params=global_params)
+    if FLAGS['ifSourceGenerationMatch'].present:
+      request.ifSourceGenerationMatch = int(FLAGS.ifSourceGenerationMatch)
+    if FLAGS['ifSourceGenerationNotMatch'].present:
+      request.ifSourceGenerationNotMatch = int(FLAGS.ifSourceGenerationNotMatch)
+    if FLAGS['ifSourceMetagenerationMatch'].present:
+      request.ifSourceMetagenerationMatch = int(FLAGS.ifSourceMetagenerationMatch)
+    if FLAGS['ifSourceMetagenerationNotMatch'].present:
+      request.ifSourceMetagenerationNotMatch = int(FLAGS.ifSourceMetagenerationNotMatch)
+    if FLAGS['object'].present:
+      request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageObjectsCopyRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    if FLAGS['sourceGeneration'].present:
+      request.sourceGeneration = int(FLAGS.sourceGeneration)
+    download = None
+    if FLAGS.download_filename:
+      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite,
+          progress_callback=apitools_base.DownloadProgressPrinter,
+          finish_callback=apitools_base.DownloadCompletePrinter)
+    result = client.objects.Copy(
+        request, global_params=global_params, download=download)
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsGet(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.Get."""
+class ObjectsDelete(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Delete."""
 
-  usage = """buckets_get <bucket>"""
+  usage = """objects_delete <bucket> <object>"""
 
   def __init__(self, name, fv):
-    super(BucketsGet, self).__init__(name, fv)
+    super(ObjectsDelete, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'generation',
+        None,
+        u'If present, permanently deletes a specific revision of this object '
+        u'(as opposed to the latest version, the default).',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation does not match the given value.',
+        flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration matches the given value.",
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationNotMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration does not match the given value.",
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration does not match the given value.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket, object):
+    """Deletes an object and its metadata. Deletions are permanent if
+    versioning is not enabled for the bucket, or if the generation parameter
+    is used.
+
+    Args:
+      bucket: Name of the bucket in which the object resides.
+      object: Name of the object.
+
+    Flags:
+      generation: If present, permanently deletes a specific revision of this
+        object (as opposed to the latest version, the default).
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        object's current generation does not match the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        object's current metageneration does not match the given value.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageObjectsDeleteRequest(
+        bucket=bucket.decode('utf8'),
+        object=object.decode('utf8'),
+        )
+    if FLAGS['generation'].present:
+      request.generation = int(FLAGS.generation)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    result = client.objects.Delete(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class ObjectsGet(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Get."""
+
+  usage = """objects_get <bucket> <object>"""
+
+  def __init__(self, name, fv):
+    super(ObjectsGet, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'generation',
+        None,
+        u'If present, selects a specific revision of this object (as opposed '
+        u'to the latest version, the default).',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's generation "
+        u'matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's generation "
+        u'does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration does not match the given value.',
         flag_values=fv)
     flags.DEFINE_enum(
         'projection',
@@ -2538,116 +2256,251 @@
         [u'full', u'noAcl'],
         u'Set of properties to return. Defaults to noAcl.',
         flag_values=fv)
+    flags.DEFINE_string(
+        'download_filename',
+        '',
+        'Filename to use for download.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'overwrite',
+        'False',
+        'If True, overwrite the existing file when downloading.',
+        flag_values=fv)
 
-  def RunWithArgs(self, bucket):
-    """Returns metadata for the specified bucket.
+  def RunWithArgs(self, bucket, object):
+    """Retrieves an object or its metadata.
 
     Args:
-      bucket: Name of a bucket.
+      bucket: Name of the bucket in which the object resides.
+      object: Name of the object.
 
     Flags:
-      ifMetagenerationMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration matches the
-        given value.
-      ifMetagenerationNotMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration does not
-        match the given value.
+      generation: If present, selects a specific revision of this object (as
+        opposed to the latest version, the default).
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        object's generation does not match the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        object's current metageneration does not match the given value.
       projection: Set of properties to return. Defaults to noAcl.
+      download_filename: Filename to use for download.
+      overwrite: If True, overwrite the existing file when downloading.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsGetRequest(
+    request = messages.StorageObjectsGetRequest(
         bucket=bucket.decode('utf8'),
+        object=object.decode('utf8'),
         )
+    if FLAGS['generation'].present:
+      request.generation = int(FLAGS.generation)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
     if FLAGS['ifMetagenerationMatch'].present:
       request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
     if FLAGS['ifMetagenerationNotMatch'].present:
       request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
     if FLAGS['projection'].present:
-      request.projection = messages.StorageBucketsGetRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.buckets.Get(
-        request, global_params=global_params)
+      request.projection = messages.StorageObjectsGetRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    download = None
+    if FLAGS.download_filename:
+      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite,
+          progress_callback=apitools_base.DownloadProgressPrinter,
+          finish_callback=apitools_base.DownloadCompletePrinter)
+    result = client.objects.Get(
+        request, global_params=global_params, download=download)
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsInsert(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.Insert."""
+class ObjectsInsert(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Insert."""
 
-  usage = """buckets_insert <project>"""
+  usage = """objects_insert <bucket>"""
 
   def __init__(self, name, fv):
-    super(BucketsInsert, self).__init__(name, fv)
+    super(ObjectsInsert, self).__init__(name, fv)
     flags.DEFINE_string(
-        'bucket',
+        'contentEncoding',
         None,
-        u'A Bucket resource to be passed as the request body.',
+        u'If set, sets the contentEncoding property of the final object to '
+        u'this value. Setting this parameter is equivalent to setting the '
+        u'contentEncoding metadata property. This can be useful when '
+        u'uploading an object with uploadType=media to indicate the encoding '
+        u'of the content being uploaded.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'name',
+        None,
+        u'Name of the object. Required when the object metadata is not '
+        u"otherwise provided. Overrides the object metadata's name value, if "
+        u'any.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'object',
+        None,
+        u'A Object resource to be passed as the request body.',
         flag_values=fv)
     flags.DEFINE_enum(
         'predefinedAcl',
         u'authenticatedRead',
-        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
-        u'Apply a predefined set of access controls to this bucket.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedDefaultObjectAcl',
-        u'authenticatedRead',
         [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of default object access controls to this '
-        u'bucket.',
+        u'Apply a predefined set of access controls to this object.',
         flag_values=fv)
     flags.DEFINE_enum(
         'projection',
         u'full',
         [u'full', u'noAcl'],
-        u'Set of properties to return. Defaults to noAcl, unless the bucket '
-        u'resource specifies acl or defaultObjectAcl properties, when it '
-        u'defaults to full.',
+        u'Set of properties to return. Defaults to noAcl, unless the object '
+        u'resource specifies the acl property, when it defaults to full.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'upload_filename',
+        '',
+        'Filename to use for upload.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'upload_mime_type',
+        '',
+        'MIME type to use for the upload. Only needed if the extension on '
+        '--upload_filename does not determine the correct (or any) MIME '
+        'type.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'download_filename',
+        '',
+        'Filename to use for download.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'overwrite',
+        'False',
+        'If True, overwrite the existing file when downloading.',
         flag_values=fv)
 
-  def RunWithArgs(self, project):
-    """Creates a new bucket.
+  def RunWithArgs(self, bucket):
+    """Stores a new object and metadata.
 
     Args:
-      project: A valid API project identifier.
+      bucket: Name of the bucket in which to store the new object. Overrides
+        the provided object metadata's bucket value, if any.
 
     Flags:
-      bucket: A Bucket resource to be passed as the request body.
-      predefinedAcl: Apply a predefined set of access controls to this bucket.
-      predefinedDefaultObjectAcl: Apply a predefined set of default object
-        access controls to this bucket.
+      contentEncoding: If set, sets the contentEncoding property of the final
+        object to this value. Setting this parameter is equivalent to setting
+        the contentEncoding metadata property. This can be useful when
+        uploading an object with uploadType=media to indicate the encoding of
+        the content being uploaded.
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        object's current generation does not match the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        object's current metageneration does not match the given value.
+      name: Name of the object. Required when the object metadata is not
+        otherwise provided. Overrides the object metadata's name value, if
+        any.
+      object: A Object resource to be passed as the request body.
+      predefinedAcl: Apply a predefined set of access controls to this object.
       projection: Set of properties to return. Defaults to noAcl, unless the
-        bucket resource specifies acl or defaultObjectAcl properties, when it
-        defaults to full.
+        object resource specifies the acl property, when it defaults to full.
+      upload_filename: Filename to use for upload.
+      upload_mime_type: MIME type to use for the upload. Only needed if the
+        extension on --upload_filename does not determine the correct (or any)
+        MIME type.
+      download_filename: Filename to use for download.
+      overwrite: If True, overwrite the existing file when downloading.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsInsertRequest(
-        project=project.decode('utf8'),
+    request = messages.StorageObjectsInsertRequest(
+        bucket=bucket.decode('utf8'),
         )
-    if FLAGS['bucket'].present:
-      request.bucket = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucket)
+    if FLAGS['contentEncoding'].present:
+      request.contentEncoding = FLAGS.contentEncoding.decode('utf8')
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['name'].present:
+      request.name = FLAGS.name.decode('utf8')
+    if FLAGS['object'].present:
+      request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object)
     if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageBucketsInsertRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['predefinedDefaultObjectAcl'].present:
-      request.predefinedDefaultObjectAcl = messages.StorageBucketsInsertRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+      request.predefinedAcl = messages.StorageObjectsInsertRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
     if FLAGS['projection'].present:
-      request.projection = messages.StorageBucketsInsertRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.buckets.Insert(
-        request, global_params=global_params)
+      request.projection = messages.StorageObjectsInsertRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    upload = None
+    if FLAGS.upload_filename:
+      upload = apitools_base.Upload.FromFile(
+          FLAGS.upload_filename, FLAGS.upload_mime_type,
+          progress_callback=apitools_base.UploadProgressPrinter,
+          finish_callback=apitools_base.UploadCompletePrinter)
+    download = None
+    if FLAGS.download_filename:
+      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite,
+          progress_callback=apitools_base.DownloadProgressPrinter,
+          finish_callback=apitools_base.DownloadCompletePrinter)
+    result = client.objects.Insert(
+        request, global_params=global_params, upload=upload, download=download)
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsList(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.List."""
+class ObjectsList(apitools_base_cli.NewCmd):
+  """Command wrapping objects.List."""
 
-  usage = """buckets_list <project>"""
+  usage = """objects_list <bucket>"""
 
   def __init__(self, name, fv):
-    super(BucketsList, self).__init__(name, fv)
+    super(ObjectsList, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'delimiter',
+        None,
+        u'Returns results in a directory-like mode. items will contain only '
+        u'objects whose names, aside from the prefix, do not contain '
+        u'delimiter. Objects whose names, aside from the prefix, contain '
+        u'delimiter will have their name, truncated after the delimiter, '
+        u'returned in prefixes. Duplicate prefixes are omitted.',
+        flag_values=fv)
     flags.DEFINE_integer(
         'maxResults',
         None,
-        u'Maximum number of buckets to return.',
+        u'Maximum number of items plus prefixes to return. As duplicate '
+        u'prefixes are omitted, fewer total results may be returned than '
+        u'requested. The default value of this parameter is 1,000 items.',
         flag_values=fv)
     flags.DEFINE_string(
         'pageToken',
@@ -2658,7 +2511,7 @@
     flags.DEFINE_string(
         'prefix',
         None,
-        u'Filter results to buckets whose names begin with this prefix.',
+        u'Filter results to objects whose names begin with this prefix.',
         flag_values=fv)
     flags.DEFINE_enum(
         'projection',
@@ -2666,25 +2519,42 @@
         [u'full', u'noAcl'],
         u'Set of properties to return. Defaults to noAcl.',
         flag_values=fv)
+    flags.DEFINE_boolean(
+        'versions',
+        None,
+        u'If true, lists all versions of an object as distinct results. The '
+        u'default is false. For more information, see Object Versioning.',
+        flag_values=fv)
 
-  def RunWithArgs(self, project):
-    """Retrieves a list of buckets for a given project.
+  def RunWithArgs(self, bucket):
+    """Retrieves a list of objects matching the criteria.
 
     Args:
-      project: A valid API project identifier.
+      bucket: Name of the bucket in which to look for objects.
 
     Flags:
-      maxResults: Maximum number of buckets to return.
+      delimiter: Returns results in a directory-like mode. items will contain
+        only objects whose names, aside from the prefix, do not contain
+        delimiter. Objects whose names, aside from the prefix, contain
+        delimiter will have their name, truncated after the delimiter,
+        returned in prefixes. Duplicate prefixes are omitted.
+      maxResults: Maximum number of items plus prefixes to return. As
+        duplicate prefixes are omitted, fewer total results may be returned
+        than requested. The default value of this parameter is 1,000 items.
       pageToken: A previously-returned page token representing part of the
         larger set of results to view.
-      prefix: Filter results to buckets whose names begin with this prefix.
+      prefix: Filter results to objects whose names begin with this prefix.
       projection: Set of properties to return. Defaults to noAcl.
+      versions: If true, lists all versions of an object as distinct results.
+        The default is false. For more information, see Object Versioning.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsListRequest(
-        project=project.decode('utf8'),
+    request = messages.StorageObjectsListRequest(
+        bucket=bucket.decode('utf8'),
         )
+    if FLAGS['delimiter'].present:
+      request.delimiter = FLAGS.delimiter.decode('utf8')
     if FLAGS['maxResults'].present:
       request.maxResults = FLAGS.maxResults
     if FLAGS['pageToken'].present:
@@ -2692,48 +2562,61 @@
     if FLAGS['prefix'].present:
       request.prefix = FLAGS.prefix.decode('utf8')
     if FLAGS['projection'].present:
-      request.projection = messages.StorageBucketsListRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.buckets.List(
+      request.projection = messages.StorageObjectsListRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    if FLAGS['versions'].present:
+      request.versions = FLAGS.versions
+    result = client.objects.List(
         request, global_params=global_params)
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsPatch(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.Patch."""
+class ObjectsPatch(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Patch."""
 
-  usage = """buckets_patch <bucket>"""
+  usage = """objects_patch <bucket> <object>"""
 
   def __init__(self, name, fv):
-    super(BucketsPatch, self).__init__(name, fv)
+    super(ObjectsPatch, self).__init__(name, fv)
     flags.DEFINE_string(
-        'bucketResource',
+        'generation',
         None,
-        u'A Bucket resource to be passed as the request body.',
+        u'If present, selects a specific revision of this object (as opposed '
+        u'to the latest version, the default).',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation does not match the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration matches the given value.",
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationNotMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration does not match the given value.",
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'objectResource',
+        None,
+        u'A Object resource to be passed as the request body.',
         flag_values=fv)
     flags.DEFINE_enum(
         'predefinedAcl',
         u'authenticatedRead',
-        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
-        u'Apply a predefined set of access controls to this bucket.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedDefaultObjectAcl',
-        u'authenticatedRead',
         [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of default object access controls to this '
-        u'bucket.',
+        u'Apply a predefined set of access controls to this object.',
         flag_values=fv)
     flags.DEFINE_enum(
         'projection',
@@ -2742,83 +2625,299 @@
         u'Set of properties to return. Defaults to full.',
         flag_values=fv)
 
-  def RunWithArgs(self, bucket):
-    """Updates a bucket. This method supports patch semantics.
+  def RunWithArgs(self, bucket, object):
+    """Updates an object's metadata. This method supports patch semantics.
 
     Args:
-      bucket: Name of a bucket.
+      bucket: Name of the bucket in which the object resides.
+      object: Name of the object.
 
     Flags:
-      bucketResource: A Bucket resource to be passed as the request body.
-      ifMetagenerationMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration matches the
-        given value.
-      ifMetagenerationNotMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration does not
-        match the given value.
-      predefinedAcl: Apply a predefined set of access controls to this bucket.
-      predefinedDefaultObjectAcl: Apply a predefined set of default object
-        access controls to this bucket.
+      generation: If present, selects a specific revision of this object (as
+        opposed to the latest version, the default).
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        object's current generation does not match the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        object's current metageneration does not match the given value.
+      objectResource: A Object resource to be passed as the request body.
+      predefinedAcl: Apply a predefined set of access controls to this object.
       projection: Set of properties to return. Defaults to full.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsPatchRequest(
+    request = messages.StorageObjectsPatchRequest(
         bucket=bucket.decode('utf8'),
+        object=object.decode('utf8'),
         )
-    if FLAGS['bucketResource'].present:
-      request.bucketResource = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucketResource)
+    if FLAGS['generation'].present:
+      request.generation = int(FLAGS.generation)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
     if FLAGS['ifMetagenerationMatch'].present:
       request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
     if FLAGS['ifMetagenerationNotMatch'].present:
       request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['objectResource'].present:
+      request.objectResource = apitools_base.JsonToMessage(messages.Object, FLAGS.objectResource)
     if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageBucketsPatchRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['predefinedDefaultObjectAcl'].present:
-      request.predefinedDefaultObjectAcl = messages.StorageBucketsPatchRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+      request.predefinedAcl = messages.StorageObjectsPatchRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
     if FLAGS['projection'].present:
-      request.projection = messages.StorageBucketsPatchRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.buckets.Patch(
+      request.projection = messages.StorageObjectsPatchRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    result = client.objects.Patch(
         request, global_params=global_params)
     print apitools_base_cli.FormatOutput(result)
 
 
-class BucketsUpdate(apitools_base_cli.NewCmd):
-  """Command wrapping buckets.Update."""
+class ObjectsRewrite(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Rewrite."""
 
-  usage = """buckets_update <bucket>"""
+  usage = """objects_rewrite <sourceBucket> <sourceObject> <destinationBucket> <destinationObject>"""
 
   def __init__(self, name, fv):
-    super(BucketsUpdate, self).__init__(name, fv)
+    super(ObjectsRewrite, self).__init__(name, fv)
+    flags.DEFINE_enum(
+        'destinationPredefinedAcl',
+        u'authenticatedRead',
+        [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
+        u'Apply a predefined set of access controls to the destination '
+        u'object.',
+        flag_values=fv)
     flags.DEFINE_string(
-        'bucketResource',
+        'ifGenerationMatch',
         None,
-        u'A Bucket resource to be passed as the request body.',
+        u"Makes the operation conditional on whether the destination object's"
+        u' current generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the destination object's"
+        u' current generation does not match the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration matches the given value.",
+        u"Makes the operation conditional on whether the destination object's"
+        u' current metageneration matches the given value.',
         flag_values=fv)
     flags.DEFINE_string(
         'ifMetagenerationNotMatch',
         None,
-        u'Makes the return of the bucket metadata conditional on whether the '
-        u"bucket's current metageneration does not match the given value.",
+        u"Makes the operation conditional on whether the destination object's"
+        u' current metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'generation does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'current metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifSourceMetagenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the source object's "
+        u'current metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'maxBytesRewrittenPerCall',
+        None,
+        u'The maximum number of bytes that will be rewritten per rewrite '
+        u"request. Most callers shouldn't need to specify this parameter - it"
+        u' is primarily in place to support testing. If specified the value '
+        u'must be an integral multiple of 1 MiB (1048576). Also, this only '
+        u'applies to requests where the source and destination span locations'
+        u' and/or storage classes. Finally, this value must not change across'
+        u" rewrite calls else you'll get an error that the rewriteToken is "
+        u'invalid.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'object',
+        None,
+        u'A Object resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl, unless the object '
+        u'resource specifies the acl property, when it defaults to full.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'rewriteToken',
+        None,
+        u'Include this field (from the previous rewrite response) on each '
+        u'rewrite request after the first one, until the rewrite response '
+        u"'done' flag is true. Calls that provide a rewriteToken can omit all"
+        u' other request fields, but if included those fields must match the '
+        u'values provided in the first rewrite request.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'sourceGeneration',
+        None,
+        u'If present, selects a specific revision of the source object (as '
+        u'opposed to the latest version, the default).',
+        flag_values=fv)
+
+  def RunWithArgs(self, sourceBucket, sourceObject, destinationBucket, destinationObject):
+    """Rewrites a source object to a destination object. Optionally overrides
+    metadata.
+
+    Args:
+      sourceBucket: Name of the bucket in which to find the source object.
+      sourceObject: Name of the source object.
+      destinationBucket: Name of the bucket in which to store the new object.
+        Overrides the provided object metadata's bucket value, if any.
+      destinationObject: Name of the new object. Required when the object
+        metadata is not otherwise provided. Overrides the object metadata's
+        name value, if any.
+
+    Flags:
+      destinationPredefinedAcl: Apply a predefined set of access controls to
+        the destination object.
+      ifGenerationMatch: Makes the operation conditional on whether the
+        destination object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        destination object's current generation does not match the given
+        value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        destination object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        destination object's current metageneration does not match the given
+        value.
+      ifSourceGenerationMatch: Makes the operation conditional on whether the
+        source object's generation matches the given value.
+      ifSourceGenerationNotMatch: Makes the operation conditional on whether
+        the source object's generation does not match the given value.
+      ifSourceMetagenerationMatch: Makes the operation conditional on whether
+        the source object's current metageneration matches the given value.
+      ifSourceMetagenerationNotMatch: Makes the operation conditional on
+        whether the source object's current metageneration does not match the
+        given value.
+      maxBytesRewrittenPerCall: The maximum number of bytes that will be
+        rewritten per rewrite request. Most callers shouldn't need to specify
+        this parameter - it is primarily in place to support testing. If
+        specified the value must be an integral multiple of 1 MiB (1048576).
+        Also, this only applies to requests where the source and destination
+        span locations and/or storage classes. Finally, this value must not
+        change across rewrite calls else you'll get an error that the
+        rewriteToken is invalid.
+      object: A Object resource to be passed as the request body.
+      projection: Set of properties to return. Defaults to noAcl, unless the
+        object resource specifies the acl property, when it defaults to full.
+      rewriteToken: Include this field (from the previous rewrite response) on
+        each rewrite request after the first one, until the rewrite response
+        'done' flag is true. Calls that provide a rewriteToken can omit all
+        other request fields, but if included those fields must match the
+        values provided in the first rewrite request.
+      sourceGeneration: If present, selects a specific revision of the source
+        object (as opposed to the latest version, the default).
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageObjectsRewriteRequest(
+        sourceBucket=sourceBucket.decode('utf8'),
+        sourceObject=sourceObject.decode('utf8'),
+        destinationBucket=destinationBucket.decode('utf8'),
+        destinationObject=destinationObject.decode('utf8'),
+        )
+    if FLAGS['destinationPredefinedAcl'].present:
+      request.destinationPredefinedAcl = messages.StorageObjectsRewriteRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
+    if FLAGS['ifMetagenerationMatch'].present:
+      request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
+    if FLAGS['ifMetagenerationNotMatch'].present:
+      request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['ifSourceGenerationMatch'].present:
+      request.ifSourceGenerationMatch = int(FLAGS.ifSourceGenerationMatch)
+    if FLAGS['ifSourceGenerationNotMatch'].present:
+      request.ifSourceGenerationNotMatch = int(FLAGS.ifSourceGenerationNotMatch)
+    if FLAGS['ifSourceMetagenerationMatch'].present:
+      request.ifSourceMetagenerationMatch = int(FLAGS.ifSourceMetagenerationMatch)
+    if FLAGS['ifSourceMetagenerationNotMatch'].present:
+      request.ifSourceMetagenerationNotMatch = int(FLAGS.ifSourceMetagenerationNotMatch)
+    if FLAGS['maxBytesRewrittenPerCall'].present:
+      request.maxBytesRewrittenPerCall = int(FLAGS.maxBytesRewrittenPerCall)
+    if FLAGS['object'].present:
+      request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object)
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageObjectsRewriteRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    if FLAGS['rewriteToken'].present:
+      request.rewriteToken = FLAGS.rewriteToken.decode('utf8')
+    if FLAGS['sourceGeneration'].present:
+      request.sourceGeneration = int(FLAGS.sourceGeneration)
+    result = client.objects.Rewrite(
+        request, global_params=global_params)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class ObjectsUpdate(apitools_base_cli.NewCmd):
+  """Command wrapping objects.Update."""
+
+  usage = """objects_update <bucket> <object>"""
+
+  def __init__(self, name, fv):
+    super(ObjectsUpdate, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'generation',
+        None,
+        u'If present, selects a specific revision of this object (as opposed '
+        u'to the latest version, the default).',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifGenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'generation does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration matches the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'ifMetagenerationNotMatch',
+        None,
+        u"Makes the operation conditional on whether the object's current "
+        u'metageneration does not match the given value.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'objectResource',
+        None,
+        u'A Object resource to be passed as the request body.',
         flag_values=fv)
     flags.DEFINE_enum(
         'predefinedAcl',
         u'authenticatedRead',
-        [u'authenticatedRead', u'private', u'projectPrivate', u'publicRead', u'publicReadWrite'],
-        u'Apply a predefined set of access controls to this bucket.',
-        flag_values=fv)
-    flags.DEFINE_enum(
-        'predefinedDefaultObjectAcl',
-        u'authenticatedRead',
         [u'authenticatedRead', u'bucketOwnerFullControl', u'bucketOwnerRead', u'private', u'projectPrivate', u'publicRead'],
-        u'Apply a predefined set of default object access controls to this '
-        u'bucket.',
+        u'Apply a predefined set of access controls to this object.',
         flag_values=fv)
     flags.DEFINE_enum(
         'projection',
@@ -2826,63 +2925,199 @@
         [u'full', u'noAcl'],
         u'Set of properties to return. Defaults to full.',
         flag_values=fv)
+    flags.DEFINE_string(
+        'download_filename',
+        '',
+        'Filename to use for download.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'overwrite',
+        'False',
+        'If True, overwrite the existing file when downloading.',
+        flag_values=fv)
 
-  def RunWithArgs(self, bucket):
-    """Updates a bucket.
+  def RunWithArgs(self, bucket, object):
+    """Updates an object's metadata.
 
     Args:
-      bucket: Name of a bucket.
+      bucket: Name of the bucket in which the object resides.
+      object: Name of the object.
 
     Flags:
-      bucketResource: A Bucket resource to be passed as the request body.
-      ifMetagenerationMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration matches the
-        given value.
-      ifMetagenerationNotMatch: Makes the return of the bucket metadata
-        conditional on whether the bucket's current metageneration does not
-        match the given value.
-      predefinedAcl: Apply a predefined set of access controls to this bucket.
-      predefinedDefaultObjectAcl: Apply a predefined set of default object
-        access controls to this bucket.
+      generation: If present, selects a specific revision of this object (as
+        opposed to the latest version, the default).
+      ifGenerationMatch: Makes the operation conditional on whether the
+        object's current generation matches the given value.
+      ifGenerationNotMatch: Makes the operation conditional on whether the
+        object's current generation does not match the given value.
+      ifMetagenerationMatch: Makes the operation conditional on whether the
+        object's current metageneration matches the given value.
+      ifMetagenerationNotMatch: Makes the operation conditional on whether the
+        object's current metageneration does not match the given value.
+      objectResource: A Object resource to be passed as the request body.
+      predefinedAcl: Apply a predefined set of access controls to this object.
       projection: Set of properties to return. Defaults to full.
+      download_filename: Filename to use for download.
+      overwrite: If True, overwrite the existing file when downloading.
     """
     client = GetClientFromFlags()
     global_params = GetGlobalParamsFromFlags()
-    request = messages.StorageBucketsUpdateRequest(
+    request = messages.StorageObjectsUpdateRequest(
         bucket=bucket.decode('utf8'),
+        object=object.decode('utf8'),
         )
-    if FLAGS['bucketResource'].present:
-      request.bucketResource = apitools_base.JsonToMessage(messages.Bucket, FLAGS.bucketResource)
+    if FLAGS['generation'].present:
+      request.generation = int(FLAGS.generation)
+    if FLAGS['ifGenerationMatch'].present:
+      request.ifGenerationMatch = int(FLAGS.ifGenerationMatch)
+    if FLAGS['ifGenerationNotMatch'].present:
+      request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch)
     if FLAGS['ifMetagenerationMatch'].present:
       request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch)
     if FLAGS['ifMetagenerationNotMatch'].present:
       request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch)
+    if FLAGS['objectResource'].present:
+      request.objectResource = apitools_base.JsonToMessage(messages.Object, FLAGS.objectResource)
     if FLAGS['predefinedAcl'].present:
-      request.predefinedAcl = messages.StorageBucketsUpdateRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
-    if FLAGS['predefinedDefaultObjectAcl'].present:
-      request.predefinedDefaultObjectAcl = messages.StorageBucketsUpdateRequest.PredefinedDefaultObjectAclValueValuesEnum(FLAGS.predefinedDefaultObjectAcl)
+      request.predefinedAcl = messages.StorageObjectsUpdateRequest.PredefinedAclValueValuesEnum(FLAGS.predefinedAcl)
     if FLAGS['projection'].present:
-      request.projection = messages.StorageBucketsUpdateRequest.ProjectionValueValuesEnum(FLAGS.projection)
-    result = client.buckets.Update(
+      request.projection = messages.StorageObjectsUpdateRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    download = None
+    if FLAGS.download_filename:
+      download = apitools_base.Download.FromFile(FLAGS.download_filename, overwrite=FLAGS.overwrite,
+          progress_callback=apitools_base.DownloadProgressPrinter,
+          finish_callback=apitools_base.DownloadCompletePrinter)
+    result = client.objects.Update(
+        request, global_params=global_params, download=download)
+    print apitools_base_cli.FormatOutput(result)
+
+
+class ObjectsWatchAll(apitools_base_cli.NewCmd):
+  """Command wrapping objects.WatchAll."""
+
+  usage = """objects_watchAll <bucket>"""
+
+  def __init__(self, name, fv):
+    super(ObjectsWatchAll, self).__init__(name, fv)
+    flags.DEFINE_string(
+        'channel',
+        None,
+        u'A Channel resource to be passed as the request body.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'delimiter',
+        None,
+        u'Returns results in a directory-like mode. items will contain only '
+        u'objects whose names, aside from the prefix, do not contain '
+        u'delimiter. Objects whose names, aside from the prefix, contain '
+        u'delimiter will have their name, truncated after the delimiter, '
+        u'returned in prefixes. Duplicate prefixes are omitted.',
+        flag_values=fv)
+    flags.DEFINE_integer(
+        'maxResults',
+        None,
+        u'Maximum number of items plus prefixes to return. As duplicate '
+        u'prefixes are omitted, fewer total results may be returned than '
+        u'requested. The default value of this parameter is 1,000 items.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'pageToken',
+        None,
+        u'A previously-returned page token representing part of the larger '
+        u'set of results to view.',
+        flag_values=fv)
+    flags.DEFINE_string(
+        'prefix',
+        None,
+        u'Filter results to objects whose names begin with this prefix.',
+        flag_values=fv)
+    flags.DEFINE_enum(
+        'projection',
+        u'full',
+        [u'full', u'noAcl'],
+        u'Set of properties to return. Defaults to noAcl.',
+        flag_values=fv)
+    flags.DEFINE_boolean(
+        'versions',
+        None,
+        u'If true, lists all versions of an object as distinct results. The '
+        u'default is false. For more information, see Object Versioning.',
+        flag_values=fv)
+
+  def RunWithArgs(self, bucket):
+    """Watch for changes on all objects in a bucket.
+
+    Args:
+      bucket: Name of the bucket in which to look for objects.
+
+    Flags:
+      channel: A Channel resource to be passed as the request body.
+      delimiter: Returns results in a directory-like mode. items will contain
+        only objects whose names, aside from the prefix, do not contain
+        delimiter. Objects whose names, aside from the prefix, contain
+        delimiter will have their name, truncated after the delimiter,
+        returned in prefixes. Duplicate prefixes are omitted.
+      maxResults: Maximum number of items plus prefixes to return. As
+        duplicate prefixes are omitted, fewer total results may be returned
+        than requested. The default value of this parameter is 1,000 items.
+      pageToken: A previously-returned page token representing part of the
+        larger set of results to view.
+      prefix: Filter results to objects whose names begin with this prefix.
+      projection: Set of properties to return. Defaults to noAcl.
+      versions: If true, lists all versions of an object as distinct results.
+        The default is false. For more information, see Object Versioning.
+    """
+    client = GetClientFromFlags()
+    global_params = GetGlobalParamsFromFlags()
+    request = messages.StorageObjectsWatchAllRequest(
+        bucket=bucket.decode('utf8'),
+        )
+    if FLAGS['channel'].present:
+      request.channel = apitools_base.JsonToMessage(messages.Channel, FLAGS.channel)
+    if FLAGS['delimiter'].present:
+      request.delimiter = FLAGS.delimiter.decode('utf8')
+    if FLAGS['maxResults'].present:
+      request.maxResults = FLAGS.maxResults
+    if FLAGS['pageToken'].present:
+      request.pageToken = FLAGS.pageToken.decode('utf8')
+    if FLAGS['prefix'].present:
+      request.prefix = FLAGS.prefix.decode('utf8')
+    if FLAGS['projection'].present:
+      request.projection = messages.StorageObjectsWatchAllRequest.ProjectionValueValuesEnum(FLAGS.projection)
+    if FLAGS['versions'].present:
+      request.versions = FLAGS.versions
+    result = client.objects.WatchAll(
         request, global_params=global_params)
     print apitools_base_cli.FormatOutput(result)
 
 
 def main(_):
   appcommands.AddCmd('pyshell', PyShell)
-  appcommands.AddCmd('defaultObjectAccessControls_delete', DefaultObjectAccessControlsDelete)
-  appcommands.AddCmd('defaultObjectAccessControls_get', DefaultObjectAccessControlsGet)
-  appcommands.AddCmd('defaultObjectAccessControls_insert', DefaultObjectAccessControlsInsert)
-  appcommands.AddCmd('defaultObjectAccessControls_list', DefaultObjectAccessControlsList)
-  appcommands.AddCmd('defaultObjectAccessControls_patch', DefaultObjectAccessControlsPatch)
-  appcommands.AddCmd('defaultObjectAccessControls_update', DefaultObjectAccessControlsUpdate)
   appcommands.AddCmd('bucketAccessControls_delete', BucketAccessControlsDelete)
   appcommands.AddCmd('bucketAccessControls_get', BucketAccessControlsGet)
   appcommands.AddCmd('bucketAccessControls_insert', BucketAccessControlsInsert)
   appcommands.AddCmd('bucketAccessControls_list', BucketAccessControlsList)
   appcommands.AddCmd('bucketAccessControls_patch', BucketAccessControlsPatch)
   appcommands.AddCmd('bucketAccessControls_update', BucketAccessControlsUpdate)
+  appcommands.AddCmd('buckets_delete', BucketsDelete)
+  appcommands.AddCmd('buckets_get', BucketsGet)
+  appcommands.AddCmd('buckets_insert', BucketsInsert)
+  appcommands.AddCmd('buckets_list', BucketsList)
+  appcommands.AddCmd('buckets_patch', BucketsPatch)
+  appcommands.AddCmd('buckets_update', BucketsUpdate)
   appcommands.AddCmd('channels_stop', ChannelsStop)
+  appcommands.AddCmd('defaultObjectAccessControls_delete', DefaultObjectAccessControlsDelete)
+  appcommands.AddCmd('defaultObjectAccessControls_get', DefaultObjectAccessControlsGet)
+  appcommands.AddCmd('defaultObjectAccessControls_insert', DefaultObjectAccessControlsInsert)
+  appcommands.AddCmd('defaultObjectAccessControls_list', DefaultObjectAccessControlsList)
+  appcommands.AddCmd('defaultObjectAccessControls_patch', DefaultObjectAccessControlsPatch)
+  appcommands.AddCmd('defaultObjectAccessControls_update', DefaultObjectAccessControlsUpdate)
+  appcommands.AddCmd('objectAccessControls_delete', ObjectAccessControlsDelete)
+  appcommands.AddCmd('objectAccessControls_get', ObjectAccessControlsGet)
+  appcommands.AddCmd('objectAccessControls_insert', ObjectAccessControlsInsert)
+  appcommands.AddCmd('objectAccessControls_list', ObjectAccessControlsList)
+  appcommands.AddCmd('objectAccessControls_patch', ObjectAccessControlsPatch)
+  appcommands.AddCmd('objectAccessControls_update', ObjectAccessControlsUpdate)
   appcommands.AddCmd('objects_compose', ObjectsCompose)
   appcommands.AddCmd('objects_copy', ObjectsCopy)
   appcommands.AddCmd('objects_delete', ObjectsDelete)
@@ -2890,20 +3125,9 @@
   appcommands.AddCmd('objects_insert', ObjectsInsert)
   appcommands.AddCmd('objects_list', ObjectsList)
   appcommands.AddCmd('objects_patch', ObjectsPatch)
+  appcommands.AddCmd('objects_rewrite', ObjectsRewrite)
   appcommands.AddCmd('objects_update', ObjectsUpdate)
   appcommands.AddCmd('objects_watchAll', ObjectsWatchAll)
-  appcommands.AddCmd('objectAccessControls_delete', ObjectAccessControlsDelete)
-  appcommands.AddCmd('objectAccessControls_get', ObjectAccessControlsGet)
-  appcommands.AddCmd('objectAccessControls_insert', ObjectAccessControlsInsert)
-  appcommands.AddCmd('objectAccessControls_list', ObjectAccessControlsList)
-  appcommands.AddCmd('objectAccessControls_patch', ObjectAccessControlsPatch)
-  appcommands.AddCmd('objectAccessControls_update', ObjectAccessControlsUpdate)
-  appcommands.AddCmd('buckets_delete', BucketsDelete)
-  appcommands.AddCmd('buckets_get', BucketsGet)
-  appcommands.AddCmd('buckets_insert', BucketsInsert)
-  appcommands.AddCmd('buckets_list', BucketsList)
-  appcommands.AddCmd('buckets_patch', BucketsPatch)
-  appcommands.AddCmd('buckets_update', BucketsUpdate)
 
   apitools_base_cli.SetupLogger()
   if hasattr(appcommands, 'SetDefaultCommand'):
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_client.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_client.py
index 4d5024d..18bd33d 100644
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_client.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_client.py
@@ -1,4 +1,5 @@
 """Generated client library for storage version v1."""
+# NOTE: This file is autogenerated and should not be edited by hand.
 from apitools.base.py import base_api
 import storage_v1_messages as messages
 
@@ -32,175 +33,12 @@
         credentials_args=credentials_args,
         default_global_params=default_global_params,
         additional_http_headers=additional_http_headers)
-    self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self)
     self.bucketAccessControls = self.BucketAccessControlsService(self)
-    self.channels = self.ChannelsService(self)
-    self.objects = self.ObjectsService(self)
-    self.objectAccessControls = self.ObjectAccessControlsService(self)
     self.buckets = self.BucketsService(self)
-
-  class DefaultObjectAccessControlsService(base_api.BaseApiService):
-    """Service class for the defaultObjectAccessControls resource."""
-
-    _NAME = u'defaultObjectAccessControls'
-
-    def __init__(self, client):
-      super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client)
-      self._method_configs = {
-          'Delete': base_api.ApiMethodInfo(
-              http_method=u'DELETE',
-              method_id=u'storage.defaultObjectAccessControls.delete',
-              ordered_params=[u'bucket', u'entity'],
-              path_params=[u'bucket', u'entity'],
-              query_params=[],
-              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
-              request_field='',
-              request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest',
-              response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse',
-              supports_download=False,
-          ),
-          'Get': base_api.ApiMethodInfo(
-              http_method=u'GET',
-              method_id=u'storage.defaultObjectAccessControls.get',
-              ordered_params=[u'bucket', u'entity'],
-              path_params=[u'bucket', u'entity'],
-              query_params=[],
-              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
-              request_field='',
-              request_type_name=u'StorageDefaultObjectAccessControlsGetRequest',
-              response_type_name=u'ObjectAccessControl',
-              supports_download=False,
-          ),
-          'Insert': base_api.ApiMethodInfo(
-              http_method=u'POST',
-              method_id=u'storage.defaultObjectAccessControls.insert',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[],
-              relative_path=u'b/{bucket}/defaultObjectAcl',
-              request_field='<request>',
-              request_type_name=u'ObjectAccessControl',
-              response_type_name=u'ObjectAccessControl',
-              supports_download=False,
-          ),
-          'List': base_api.ApiMethodInfo(
-              http_method=u'GET',
-              method_id=u'storage.defaultObjectAccessControls.list',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
-              relative_path=u'b/{bucket}/defaultObjectAcl',
-              request_field='',
-              request_type_name=u'StorageDefaultObjectAccessControlsListRequest',
-              response_type_name=u'ObjectAccessControls',
-              supports_download=False,
-          ),
-          'Patch': base_api.ApiMethodInfo(
-              http_method=u'PATCH',
-              method_id=u'storage.defaultObjectAccessControls.patch',
-              ordered_params=[u'bucket', u'entity'],
-              path_params=[u'bucket', u'entity'],
-              query_params=[],
-              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
-              request_field='<request>',
-              request_type_name=u'ObjectAccessControl',
-              response_type_name=u'ObjectAccessControl',
-              supports_download=False,
-          ),
-          'Update': base_api.ApiMethodInfo(
-              http_method=u'PUT',
-              method_id=u'storage.defaultObjectAccessControls.update',
-              ordered_params=[u'bucket', u'entity'],
-              path_params=[u'bucket', u'entity'],
-              query_params=[],
-              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
-              request_field='<request>',
-              request_type_name=u'ObjectAccessControl',
-              response_type_name=u'ObjectAccessControl',
-              supports_download=False,
-          ),
-          }
-
-      self._upload_configs = {
-          }
-
-    def Delete(self, request, global_params=None):
-      """Permanently deletes the default object ACL entry for the specified entity on the specified bucket.
-
-      Args:
-        request: (StorageDefaultObjectAccessControlsDeleteRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (StorageDefaultObjectAccessControlsDeleteResponse) The response message.
-      """
-      config = self.GetMethodConfig('Delete')
-      return self._RunMethod(
-          config, request, global_params=global_params)
-
-    def Get(self, request, global_params=None):
-      """Returns the default object ACL entry for the specified entity on the specified bucket.
-
-      Args:
-        request: (StorageDefaultObjectAccessControlsGetRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (ObjectAccessControl) The response message.
-      """
-      config = self.GetMethodConfig('Get')
-      return self._RunMethod(
-          config, request, global_params=global_params)
-
-    def Insert(self, request, global_params=None):
-      """Creates a new default object ACL entry on the specified bucket.
-
-      Args:
-        request: (ObjectAccessControl) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (ObjectAccessControl) The response message.
-      """
-      config = self.GetMethodConfig('Insert')
-      return self._RunMethod(
-          config, request, global_params=global_params)
-
-    def List(self, request, global_params=None):
-      """Retrieves default object ACL entries on the specified bucket.
-
-      Args:
-        request: (StorageDefaultObjectAccessControlsListRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (ObjectAccessControls) The response message.
-      """
-      config = self.GetMethodConfig('List')
-      return self._RunMethod(
-          config, request, global_params=global_params)
-
-    def Patch(self, request, global_params=None):
-      """Updates a default object ACL entry on the specified bucket. This method supports patch semantics.
-
-      Args:
-        request: (ObjectAccessControl) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (ObjectAccessControl) The response message.
-      """
-      config = self.GetMethodConfig('Patch')
-      return self._RunMethod(
-          config, request, global_params=global_params)
-
-    def Update(self, request, global_params=None):
-      """Updates a default object ACL entry on the specified bucket.
-
-      Args:
-        request: (ObjectAccessControl) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (ObjectAccessControl) The response message.
-      """
-      config = self.GetMethodConfig('Update')
-      return self._RunMethod(
-          config, request, global_params=global_params)
+    self.channels = self.ChannelsService(self)
+    self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self)
+    self.objectAccessControls = self.ObjectAccessControlsService(self)
+    self.objects = self.ObjectsService(self)
 
   class BucketAccessControlsService(base_api.BaseApiService):
     """Service class for the bucketAccessControls resource."""
@@ -365,6 +203,169 @@
       return self._RunMethod(
           config, request, global_params=global_params)
 
+  class BucketsService(base_api.BaseApiService):
+    """Service class for the buckets resource."""
+
+    _NAME = u'buckets'
+
+    def __init__(self, client):
+      super(StorageV1.BucketsService, self).__init__(client)
+      self._method_configs = {
+          'Delete': base_api.ApiMethodInfo(
+              http_method=u'DELETE',
+              method_id=u'storage.buckets.delete',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
+              relative_path=u'b/{bucket}',
+              request_field='',
+              request_type_name=u'StorageBucketsDeleteRequest',
+              response_type_name=u'StorageBucketsDeleteResponse',
+              supports_download=False,
+          ),
+          'Get': base_api.ApiMethodInfo(
+              http_method=u'GET',
+              method_id=u'storage.buckets.get',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
+              relative_path=u'b/{bucket}',
+              request_field='',
+              request_type_name=u'StorageBucketsGetRequest',
+              response_type_name=u'Bucket',
+              supports_download=False,
+          ),
+          'Insert': base_api.ApiMethodInfo(
+              http_method=u'POST',
+              method_id=u'storage.buckets.insert',
+              ordered_params=[u'project'],
+              path_params=[],
+              query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'],
+              relative_path=u'b',
+              request_field=u'bucket',
+              request_type_name=u'StorageBucketsInsertRequest',
+              response_type_name=u'Bucket',
+              supports_download=False,
+          ),
+          'List': base_api.ApiMethodInfo(
+              http_method=u'GET',
+              method_id=u'storage.buckets.list',
+              ordered_params=[u'project'],
+              path_params=[],
+              query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'],
+              relative_path=u'b',
+              request_field='',
+              request_type_name=u'StorageBucketsListRequest',
+              response_type_name=u'Buckets',
+              supports_download=False,
+          ),
+          'Patch': base_api.ApiMethodInfo(
+              http_method=u'PATCH',
+              method_id=u'storage.buckets.patch',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
+              relative_path=u'b/{bucket}',
+              request_field=u'bucketResource',
+              request_type_name=u'StorageBucketsPatchRequest',
+              response_type_name=u'Bucket',
+              supports_download=False,
+          ),
+          'Update': base_api.ApiMethodInfo(
+              http_method=u'PUT',
+              method_id=u'storage.buckets.update',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
+              relative_path=u'b/{bucket}',
+              request_field=u'bucketResource',
+              request_type_name=u'StorageBucketsUpdateRequest',
+              response_type_name=u'Bucket',
+              supports_download=False,
+          ),
+          }
+
+      self._upload_configs = {
+          }
+
+    def Delete(self, request, global_params=None):
+      """Permanently deletes an empty bucket.
+
+      Args:
+        request: (StorageBucketsDeleteRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (StorageBucketsDeleteResponse) The response message.
+      """
+      config = self.GetMethodConfig('Delete')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def Get(self, request, global_params=None):
+      """Returns metadata for the specified bucket.
+
+      Args:
+        request: (StorageBucketsGetRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Bucket) The response message.
+      """
+      config = self.GetMethodConfig('Get')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def Insert(self, request, global_params=None):
+      """Creates a new bucket.
+
+      Args:
+        request: (StorageBucketsInsertRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Bucket) The response message.
+      """
+      config = self.GetMethodConfig('Insert')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def List(self, request, global_params=None):
+      """Retrieves a list of buckets for a given project.
+
+      Args:
+        request: (StorageBucketsListRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Buckets) The response message.
+      """
+      config = self.GetMethodConfig('List')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def Patch(self, request, global_params=None):
+      """Updates a bucket. This method supports patch semantics.
+
+      Args:
+        request: (StorageBucketsPatchRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Bucket) The response message.
+      """
+      config = self.GetMethodConfig('Patch')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def Update(self, request, global_params=None):
+      """Updates a bucket.
+
+      Args:
+        request: (StorageBucketsUpdateRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Bucket) The response message.
+      """
+      config = self.GetMethodConfig('Update')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
   class ChannelsService(base_api.BaseApiService):
     """Service class for the channels resource."""
 
@@ -403,269 +404,167 @@
       return self._RunMethod(
           config, request, global_params=global_params)
 
-  class ObjectsService(base_api.BaseApiService):
-    """Service class for the objects resource."""
+  class DefaultObjectAccessControlsService(base_api.BaseApiService):
+    """Service class for the defaultObjectAccessControls resource."""
 
-    _NAME = u'objects'
+    _NAME = u'defaultObjectAccessControls'
 
     def __init__(self, client):
-      super(StorageV1.ObjectsService, self).__init__(client)
+      super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client)
       self._method_configs = {
-          'Compose': base_api.ApiMethodInfo(
-              http_method=u'POST',
-              method_id=u'storage.objects.compose',
-              ordered_params=[u'destinationBucket', u'destinationObject'],
-              path_params=[u'destinationBucket', u'destinationObject'],
-              query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifMetagenerationMatch'],
-              relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose',
-              request_field=u'composeRequest',
-              request_type_name=u'StorageObjectsComposeRequest',
-              response_type_name=u'Object',
-              supports_download=True,
-          ),
-          'Copy': base_api.ApiMethodInfo(
-              http_method=u'POST',
-              method_id=u'storage.objects.copy',
-              ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
-              path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
-              query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'],
-              relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}',
-              request_field=u'object',
-              request_type_name=u'StorageObjectsCopyRequest',
-              response_type_name=u'Object',
-              supports_download=True,
-          ),
           'Delete': base_api.ApiMethodInfo(
               http_method=u'DELETE',
-              method_id=u'storage.objects.delete',
-              ordered_params=[u'bucket', u'object'],
-              path_params=[u'bucket', u'object'],
-              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
-              relative_path=u'b/{bucket}/o/{object}',
+              method_id=u'storage.defaultObjectAccessControls.delete',
+              ordered_params=[u'bucket', u'entity'],
+              path_params=[u'bucket', u'entity'],
+              query_params=[],
+              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
               request_field='',
-              request_type_name=u'StorageObjectsDeleteRequest',
-              response_type_name=u'StorageObjectsDeleteResponse',
+              request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest',
+              response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse',
               supports_download=False,
           ),
           'Get': base_api.ApiMethodInfo(
               http_method=u'GET',
-              method_id=u'storage.objects.get',
-              ordered_params=[u'bucket', u'object'],
-              path_params=[u'bucket', u'object'],
-              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
-              relative_path=u'b/{bucket}/o/{object}',
+              method_id=u'storage.defaultObjectAccessControls.get',
+              ordered_params=[u'bucket', u'entity'],
+              path_params=[u'bucket', u'entity'],
+              query_params=[],
+              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
               request_field='',
-              request_type_name=u'StorageObjectsGetRequest',
-              response_type_name=u'Object',
-              supports_download=True,
+              request_type_name=u'StorageDefaultObjectAccessControlsGetRequest',
+              response_type_name=u'ObjectAccessControl',
+              supports_download=False,
           ),
           'Insert': base_api.ApiMethodInfo(
               http_method=u'POST',
-              method_id=u'storage.objects.insert',
+              method_id=u'storage.defaultObjectAccessControls.insert',
               ordered_params=[u'bucket'],
               path_params=[u'bucket'],
-              query_params=[u'contentEncoding', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'],
-              relative_path=u'b/{bucket}/o',
-              request_field=u'object',
-              request_type_name=u'StorageObjectsInsertRequest',
-              response_type_name=u'Object',
-              supports_download=True,
+              query_params=[],
+              relative_path=u'b/{bucket}/defaultObjectAcl',
+              request_field='<request>',
+              request_type_name=u'ObjectAccessControl',
+              response_type_name=u'ObjectAccessControl',
+              supports_download=False,
           ),
           'List': base_api.ApiMethodInfo(
               http_method=u'GET',
-              method_id=u'storage.objects.list',
+              method_id=u'storage.defaultObjectAccessControls.list',
               ordered_params=[u'bucket'],
               path_params=[u'bucket'],
-              query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
-              relative_path=u'b/{bucket}/o',
+              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
+              relative_path=u'b/{bucket}/defaultObjectAcl',
               request_field='',
-              request_type_name=u'StorageObjectsListRequest',
-              response_type_name=u'Objects',
+              request_type_name=u'StorageDefaultObjectAccessControlsListRequest',
+              response_type_name=u'ObjectAccessControls',
               supports_download=False,
           ),
           'Patch': base_api.ApiMethodInfo(
               http_method=u'PATCH',
-              method_id=u'storage.objects.patch',
-              ordered_params=[u'bucket', u'object'],
-              path_params=[u'bucket', u'object'],
-              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
-              relative_path=u'b/{bucket}/o/{object}',
-              request_field=u'objectResource',
-              request_type_name=u'StorageObjectsPatchRequest',
-              response_type_name=u'Object',
+              method_id=u'storage.defaultObjectAccessControls.patch',
+              ordered_params=[u'bucket', u'entity'],
+              path_params=[u'bucket', u'entity'],
+              query_params=[],
+              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
+              request_field='<request>',
+              request_type_name=u'ObjectAccessControl',
+              response_type_name=u'ObjectAccessControl',
               supports_download=False,
           ),
           'Update': base_api.ApiMethodInfo(
               http_method=u'PUT',
-              method_id=u'storage.objects.update',
-              ordered_params=[u'bucket', u'object'],
-              path_params=[u'bucket', u'object'],
-              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
-              relative_path=u'b/{bucket}/o/{object}',
-              request_field=u'objectResource',
-              request_type_name=u'StorageObjectsUpdateRequest',
-              response_type_name=u'Object',
-              supports_download=True,
-          ),
-          'WatchAll': base_api.ApiMethodInfo(
-              http_method=u'POST',
-              method_id=u'storage.objects.watchAll',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
-              relative_path=u'b/{bucket}/o/watch',
-              request_field=u'channel',
-              request_type_name=u'StorageObjectsWatchAllRequest',
-              response_type_name=u'Channel',
+              method_id=u'storage.defaultObjectAccessControls.update',
+              ordered_params=[u'bucket', u'entity'],
+              path_params=[u'bucket', u'entity'],
+              query_params=[],
+              relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
+              request_field='<request>',
+              request_type_name=u'ObjectAccessControl',
+              response_type_name=u'ObjectAccessControl',
               supports_download=False,
           ),
           }
 
       self._upload_configs = {
-          'Insert': base_api.ApiUploadInfo(
-              accept=['*/*'],
-              max_size=None,
-              resumable_multipart=True,
-              resumable_path=u'/resumable/upload/storage/v1/b/{bucket}/o',
-              simple_multipart=True,
-              simple_path=u'/upload/storage/v1/b/{bucket}/o',
-          ),
           }
 
-    def Compose(self, request, global_params=None, download=None):
-      """Concatenates a list of existing objects into a new object in the same bucket.
-
-      Args:
-        request: (StorageObjectsComposeRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-        download: (Download, default: None) If present, download
-            data from the request via this stream.
-      Returns:
-        (Object) The response message.
-      """
-      config = self.GetMethodConfig('Compose')
-      return self._RunMethod(
-          config, request, global_params=global_params,
-          download=download)
-
-    def Copy(self, request, global_params=None, download=None):
-      """Copies an object to a specified location. Optionally overrides metadata.
-
-      Args:
-        request: (StorageObjectsCopyRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-        download: (Download, default: None) If present, download
-            data from the request via this stream.
-      Returns:
-        (Object) The response message.
-      """
-      config = self.GetMethodConfig('Copy')
-      return self._RunMethod(
-          config, request, global_params=global_params,
-          download=download)
-
     def Delete(self, request, global_params=None):
-      """Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.
+      """Permanently deletes the default object ACL entry for the specified entity on the specified bucket.
 
       Args:
-        request: (StorageObjectsDeleteRequest) input message
+        request: (StorageDefaultObjectAccessControlsDeleteRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (StorageObjectsDeleteResponse) The response message.
+        (StorageDefaultObjectAccessControlsDeleteResponse) The response message.
       """
       config = self.GetMethodConfig('Delete')
       return self._RunMethod(
           config, request, global_params=global_params)
 
-    def Get(self, request, global_params=None, download=None):
-      """Retrieves an object or its metadata.
+    def Get(self, request, global_params=None):
+      """Returns the default object ACL entry for the specified entity on the specified bucket.
 
       Args:
-        request: (StorageObjectsGetRequest) input message
+        request: (StorageDefaultObjectAccessControlsGetRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
-        download: (Download, default: None) If present, download
-            data from the request via this stream.
       Returns:
-        (Object) The response message.
+        (ObjectAccessControl) The response message.
       """
       config = self.GetMethodConfig('Get')
       return self._RunMethod(
-          config, request, global_params=global_params,
-          download=download)
+          config, request, global_params=global_params)
 
-    def Insert(self, request, global_params=None, upload=None, download=None):
-      """Stores a new object and metadata.
+    def Insert(self, request, global_params=None):
+      """Creates a new default object ACL entry on the specified bucket.
 
       Args:
-        request: (StorageObjectsInsertRequest) input message
+        request: (ObjectAccessControl) input message
         global_params: (StandardQueryParameters, default: None) global arguments
-        upload: (Upload, default: None) If present, upload
-            this stream with the request.
-        download: (Download, default: None) If present, download
-            data from the request via this stream.
       Returns:
-        (Object) The response message.
+        (ObjectAccessControl) The response message.
       """
       config = self.GetMethodConfig('Insert')
-      upload_config = self.GetUploadConfig('Insert')
       return self._RunMethod(
-          config, request, global_params=global_params,
-          upload=upload, upload_config=upload_config,
-          download=download)
+          config, request, global_params=global_params)
 
     def List(self, request, global_params=None):
-      """Retrieves a list of objects matching the criteria.
+      """Retrieves default object ACL entries on the specified bucket.
 
       Args:
-        request: (StorageObjectsListRequest) input message
+        request: (StorageDefaultObjectAccessControlsListRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (Objects) The response message.
+        (ObjectAccessControls) The response message.
       """
       config = self.GetMethodConfig('List')
       return self._RunMethod(
           config, request, global_params=global_params)
 
     def Patch(self, request, global_params=None):
-      """Updates an object's metadata. This method supports patch semantics.
+      """Updates a default object ACL entry on the specified bucket. This method supports patch semantics.
 
       Args:
-        request: (StorageObjectsPatchRequest) input message
+        request: (ObjectAccessControl) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (Object) The response message.
+        (ObjectAccessControl) The response message.
       """
       config = self.GetMethodConfig('Patch')
       return self._RunMethod(
           config, request, global_params=global_params)
 
-    def Update(self, request, global_params=None, download=None):
-      """Updates an object's metadata.
+    def Update(self, request, global_params=None):
+      """Updates a default object ACL entry on the specified bucket.
 
       Args:
-        request: (StorageObjectsUpdateRequest) input message
+        request: (ObjectAccessControl) input message
         global_params: (StandardQueryParameters, default: None) global arguments
-        download: (Download, default: None) If present, download
-            data from the request via this stream.
       Returns:
-        (Object) The response message.
+        (ObjectAccessControl) The response message.
       """
       config = self.GetMethodConfig('Update')
       return self._RunMethod(
-          config, request, global_params=global_params,
-          download=download)
-
-    def WatchAll(self, request, global_params=None):
-      """Watch for changes on all objects in a bucket.
-
-      Args:
-        request: (StorageObjectsWatchAllRequest) input message
-        global_params: (StandardQueryParameters, default: None) global arguments
-      Returns:
-        (Channel) The response message.
-      """
-      config = self.GetMethodConfig('WatchAll')
-      return self._RunMethod(
           config, request, global_params=global_params)
 
   class ObjectAccessControlsService(base_api.BaseApiService):
@@ -831,165 +730,292 @@
       return self._RunMethod(
           config, request, global_params=global_params)
 
-  class BucketsService(base_api.BaseApiService):
-    """Service class for the buckets resource."""
+  class ObjectsService(base_api.BaseApiService):
+    """Service class for the objects resource."""
 
-    _NAME = u'buckets'
+    _NAME = u'objects'
 
     def __init__(self, client):
-      super(StorageV1.BucketsService, self).__init__(client)
+      super(StorageV1.ObjectsService, self).__init__(client)
       self._method_configs = {
+          'Compose': base_api.ApiMethodInfo(
+              http_method=u'POST',
+              method_id=u'storage.objects.compose',
+              ordered_params=[u'destinationBucket', u'destinationObject'],
+              path_params=[u'destinationBucket', u'destinationObject'],
+              query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifMetagenerationMatch'],
+              relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose',
+              request_field=u'composeRequest',
+              request_type_name=u'StorageObjectsComposeRequest',
+              response_type_name=u'Object',
+              supports_download=True,
+          ),
+          'Copy': base_api.ApiMethodInfo(
+              http_method=u'POST',
+              method_id=u'storage.objects.copy',
+              ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
+              path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
+              query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'],
+              relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}',
+              request_field=u'object',
+              request_type_name=u'StorageObjectsCopyRequest',
+              response_type_name=u'Object',
+              supports_download=True,
+          ),
           'Delete': base_api.ApiMethodInfo(
               http_method=u'DELETE',
-              method_id=u'storage.buckets.delete',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
-              relative_path=u'b/{bucket}',
+              method_id=u'storage.objects.delete',
+              ordered_params=[u'bucket', u'object'],
+              path_params=[u'bucket', u'object'],
+              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
+              relative_path=u'b/{bucket}/o/{object}',
               request_field='',
-              request_type_name=u'StorageBucketsDeleteRequest',
-              response_type_name=u'StorageBucketsDeleteResponse',
+              request_type_name=u'StorageObjectsDeleteRequest',
+              response_type_name=u'StorageObjectsDeleteResponse',
               supports_download=False,
           ),
           'Get': base_api.ApiMethodInfo(
               http_method=u'GET',
-              method_id=u'storage.buckets.get',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
-              relative_path=u'b/{bucket}',
+              method_id=u'storage.objects.get',
+              ordered_params=[u'bucket', u'object'],
+              path_params=[u'bucket', u'object'],
+              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
+              relative_path=u'b/{bucket}/o/{object}',
               request_field='',
-              request_type_name=u'StorageBucketsGetRequest',
-              response_type_name=u'Bucket',
-              supports_download=False,
+              request_type_name=u'StorageObjectsGetRequest',
+              response_type_name=u'Object',
+              supports_download=True,
           ),
           'Insert': base_api.ApiMethodInfo(
               http_method=u'POST',
-              method_id=u'storage.buckets.insert',
-              ordered_params=[u'project'],
-              path_params=[],
-              query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'],
-              relative_path=u'b',
-              request_field=u'bucket',
-              request_type_name=u'StorageBucketsInsertRequest',
-              response_type_name=u'Bucket',
-              supports_download=False,
+              method_id=u'storage.objects.insert',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'contentEncoding', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'],
+              relative_path=u'b/{bucket}/o',
+              request_field=u'object',
+              request_type_name=u'StorageObjectsInsertRequest',
+              response_type_name=u'Object',
+              supports_download=True,
           ),
           'List': base_api.ApiMethodInfo(
               http_method=u'GET',
-              method_id=u'storage.buckets.list',
-              ordered_params=[u'project'],
-              path_params=[],
-              query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'],
-              relative_path=u'b',
+              method_id=u'storage.objects.list',
+              ordered_params=[u'bucket'],
+              path_params=[u'bucket'],
+              query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
+              relative_path=u'b/{bucket}/o',
               request_field='',
-              request_type_name=u'StorageBucketsListRequest',
-              response_type_name=u'Buckets',
+              request_type_name=u'StorageObjectsListRequest',
+              response_type_name=u'Objects',
               supports_download=False,
           ),
           'Patch': base_api.ApiMethodInfo(
               http_method=u'PATCH',
-              method_id=u'storage.buckets.patch',
-              ordered_params=[u'bucket'],
-              path_params=[u'bucket'],
-              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
-              relative_path=u'b/{bucket}',
-              request_field=u'bucketResource',
-              request_type_name=u'StorageBucketsPatchRequest',
-              response_type_name=u'Bucket',
+              method_id=u'storage.objects.patch',
+              ordered_params=[u'bucket', u'object'],
+              path_params=[u'bucket', u'object'],
+              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
+              relative_path=u'b/{bucket}/o/{object}',
+              request_field=u'objectResource',
+              request_type_name=u'StorageObjectsPatchRequest',
+              response_type_name=u'Object',
+              supports_download=False,
+          ),
+          'Rewrite': base_api.ApiMethodInfo(
+              http_method=u'POST',
+              method_id=u'storage.objects.rewrite',
+              ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
+              path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
+              query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'maxBytesRewrittenPerCall', u'projection', u'rewriteToken', u'sourceGeneration'],
+              relative_path=u'b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}',
+              request_field=u'object',
+              request_type_name=u'StorageObjectsRewriteRequest',
+              response_type_name=u'RewriteResponse',
               supports_download=False,
           ),
           'Update': base_api.ApiMethodInfo(
               http_method=u'PUT',
-              method_id=u'storage.buckets.update',
+              method_id=u'storage.objects.update',
+              ordered_params=[u'bucket', u'object'],
+              path_params=[u'bucket', u'object'],
+              query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
+              relative_path=u'b/{bucket}/o/{object}',
+              request_field=u'objectResource',
+              request_type_name=u'StorageObjectsUpdateRequest',
+              response_type_name=u'Object',
+              supports_download=True,
+          ),
+          'WatchAll': base_api.ApiMethodInfo(
+              http_method=u'POST',
+              method_id=u'storage.objects.watchAll',
               ordered_params=[u'bucket'],
               path_params=[u'bucket'],
-              query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
-              relative_path=u'b/{bucket}',
-              request_field=u'bucketResource',
-              request_type_name=u'StorageBucketsUpdateRequest',
-              response_type_name=u'Bucket',
+              query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
+              relative_path=u'b/{bucket}/o/watch',
+              request_field=u'channel',
+              request_type_name=u'StorageObjectsWatchAllRequest',
+              response_type_name=u'Channel',
               supports_download=False,
           ),
           }
 
       self._upload_configs = {
+          'Insert': base_api.ApiUploadInfo(
+              accept=['*/*'],
+              max_size=None,
+              resumable_multipart=True,
+              resumable_path=u'/resumable/upload/storage/v1/b/{bucket}/o',
+              simple_multipart=True,
+              simple_path=u'/upload/storage/v1/b/{bucket}/o',
+          ),
           }
 
-    def Delete(self, request, global_params=None):
-      """Permanently deletes an empty bucket.
+    def Compose(self, request, global_params=None, download=None):
+      """Concatenates a list of existing objects into a new object in the same bucket.
 
       Args:
-        request: (StorageBucketsDeleteRequest) input message
+        request: (StorageObjectsComposeRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+        download: (Download, default: None) If present, download
+            data from the request via this stream.
+      Returns:
+        (Object) The response message.
+      """
+      config = self.GetMethodConfig('Compose')
+      return self._RunMethod(
+          config, request, global_params=global_params,
+          download=download)
+
+    def Copy(self, request, global_params=None, download=None):
+      """Copies a source object to a destination object. Optionally overrides metadata.
+
+      Args:
+        request: (StorageObjectsCopyRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+        download: (Download, default: None) If present, download
+            data from the request via this stream.
+      Returns:
+        (Object) The response message.
+      """
+      config = self.GetMethodConfig('Copy')
+      return self._RunMethod(
+          config, request, global_params=global_params,
+          download=download)
+
+    def Delete(self, request, global_params=None):
+      """Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.
+
+      Args:
+        request: (StorageObjectsDeleteRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (StorageBucketsDeleteResponse) The response message.
+        (StorageObjectsDeleteResponse) The response message.
       """
       config = self.GetMethodConfig('Delete')
       return self._RunMethod(
           config, request, global_params=global_params)
 
-    def Get(self, request, global_params=None):
-      """Returns metadata for the specified bucket.
+    def Get(self, request, global_params=None, download=None):
+      """Retrieves an object or its metadata.
 
       Args:
-        request: (StorageBucketsGetRequest) input message
+        request: (StorageObjectsGetRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
+        download: (Download, default: None) If present, download
+            data from the request via this stream.
       Returns:
-        (Bucket) The response message.
+        (Object) The response message.
       """
       config = self.GetMethodConfig('Get')
       return self._RunMethod(
-          config, request, global_params=global_params)
+          config, request, global_params=global_params,
+          download=download)
 
-    def Insert(self, request, global_params=None):
-      """Creates a new bucket.
+    def Insert(self, request, global_params=None, upload=None, download=None):
+      """Stores a new object and metadata.
 
       Args:
-        request: (StorageBucketsInsertRequest) input message
+        request: (StorageObjectsInsertRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
+        upload: (Upload, default: None) If present, upload
+            this stream with the request.
+        download: (Download, default: None) If present, download
+            data from the request via this stream.
       Returns:
-        (Bucket) The response message.
+        (Object) The response message.
       """
       config = self.GetMethodConfig('Insert')
+      upload_config = self.GetUploadConfig('Insert')
       return self._RunMethod(
-          config, request, global_params=global_params)
+          config, request, global_params=global_params,
+          upload=upload, upload_config=upload_config,
+          download=download)
 
     def List(self, request, global_params=None):
-      """Retrieves a list of buckets for a given project.
+      """Retrieves a list of objects matching the criteria.
 
       Args:
-        request: (StorageBucketsListRequest) input message
+        request: (StorageObjectsListRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (Buckets) The response message.
+        (Objects) The response message.
       """
       config = self.GetMethodConfig('List')
       return self._RunMethod(
           config, request, global_params=global_params)
 
     def Patch(self, request, global_params=None):
-      """Updates a bucket. This method supports patch semantics.
+      """Updates an object's metadata. This method supports patch semantics.
 
       Args:
-        request: (StorageBucketsPatchRequest) input message
+        request: (StorageObjectsPatchRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (Bucket) The response message.
+        (Object) The response message.
       """
       config = self.GetMethodConfig('Patch')
       return self._RunMethod(
           config, request, global_params=global_params)
 
-    def Update(self, request, global_params=None):
-      """Updates a bucket.
+    def Rewrite(self, request, global_params=None):
+      """Rewrites a source object to a destination object. Optionally overrides metadata.
 
       Args:
-        request: (StorageBucketsUpdateRequest) input message
+        request: (StorageObjectsRewriteRequest) input message
         global_params: (StandardQueryParameters, default: None) global arguments
       Returns:
-        (Bucket) The response message.
+        (RewriteResponse) The response message.
+      """
+      config = self.GetMethodConfig('Rewrite')
+      return self._RunMethod(
+          config, request, global_params=global_params)
+
+    def Update(self, request, global_params=None, download=None):
+      """Updates an object's metadata.
+
+      Args:
+        request: (StorageObjectsUpdateRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+        download: (Download, default: None) If present, download
+            data from the request via this stream.
+      Returns:
+        (Object) The response message.
       """
       config = self.GetMethodConfig('Update')
       return self._RunMethod(
+          config, request, global_params=global_params,
+          download=download)
+
+    def WatchAll(self, request, global_params=None):
+      """Watch for changes on all objects in a bucket.
+
+      Args:
+        request: (StorageObjectsWatchAllRequest) input message
+        global_params: (StandardQueryParameters, default: None) global arguments
+      Returns:
+        (Channel) The response message.
+      """
+      config = self.GetMethodConfig('WatchAll')
+      return self._RunMethod(
           config, request, global_params=global_params)
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_messages.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_messages.py
index 0e7b585..7148d0a 100644
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_messages.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1_messages.py
@@ -2,17 +2,18 @@
 
 Lets you store and retrieve potentially-large, immutable data objects.
 """
+# NOTE: This file is autogenerated and should not be edited by hand.
 
 from apitools.base.py import encoding
 from apitools.base.py import extra_types
-from protorpc import message_types
-from protorpc import messages
+from protorpc import message_types as _message_types
+from protorpc import messages as _messages
 
 
 package = 'storage'
 
 
-class Bucket(messages.Message):
+class Bucket(_messages.Message):
   """A bucket.
 
   Messages:
@@ -50,15 +51,15 @@
     projectNumber: The project number of the project the bucket belongs to.
     selfLink: The URI of this bucket.
     storageClass: The bucket's storage class. This defines how objects in the
-      bucket are stored and determines the SLA and the cost of storage.
-      Typical values are STANDARD and DURABLE_REDUCED_AVAILABILITY. Defaults
-      to STANDARD. See the developer's guide for the authoritative list.
+      bucket are stored and determines the SLA and the cost of storage. Values
+      include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to
+      STANDARD. For more information, see storage classes.
     timeCreated: Creation time of the bucket in RFC 3339 format.
     versioning: The bucket's versioning configuration.
     website: The bucket's website configuration.
   """
 
-  class CorsValueListEntry(messages.Message):
+  class CorsValueListEntry(_messages.Message):
     """A CorsValueListEntry object.
 
     Fields:
@@ -73,12 +74,12 @@
         headers to give permission for the user-agent to share across domains.
     """
 
-    maxAgeSeconds = messages.IntegerField(1, variant=messages.Variant.INT32)
-    method = messages.StringField(2, repeated=True)
-    origin = messages.StringField(3, repeated=True)
-    responseHeader = messages.StringField(4, repeated=True)
+    maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
+    method = _messages.StringField(2, repeated=True)
+    origin = _messages.StringField(3, repeated=True)
+    responseHeader = _messages.StringField(4, repeated=True)
 
-  class LifecycleValue(messages.Message):
+  class LifecycleValue(_messages.Message):
     """The bucket's lifecycle configuration. See lifecycle management for more
     information.
 
@@ -90,7 +91,7 @@
         and the condition(s) under which the action will be taken.
     """
 
-    class RuleValueListEntry(messages.Message):
+    class RuleValueListEntry(_messages.Message):
       """A RuleValueListEntry object.
 
       Messages:
@@ -102,16 +103,16 @@
         condition: The condition(s) under which the action will be taken.
       """
 
-      class ActionValue(messages.Message):
+      class ActionValue(_messages.Message):
         """The action to take.
 
         Fields:
           type: Type of the action. Currently, only Delete is supported.
         """
 
-        type = messages.StringField(1)
+        type = _messages.StringField(1)
 
-      class ConditionValue(messages.Message):
+      class ConditionValue(_messages.Message):
         """The condition(s) under which the action will be taken.
 
         Fields:
@@ -129,17 +130,17 @@
             the object.
         """
 
-        age = messages.IntegerField(1, variant=messages.Variant.INT32)
+        age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
         createdBefore = extra_types.DateField(2)
-        isLive = messages.BooleanField(3)
-        numNewerVersions = messages.IntegerField(4, variant=messages.Variant.INT32)
+        isLive = _messages.BooleanField(3)
+        numNewerVersions = _messages.IntegerField(4, variant=_messages.Variant.INT32)
 
-      action = messages.MessageField('ActionValue', 1)
-      condition = messages.MessageField('ConditionValue', 2)
+      action = _messages.MessageField('ActionValue', 1)
+      condition = _messages.MessageField('ConditionValue', 2)
 
-    rule = messages.MessageField('RuleValueListEntry', 1, repeated=True)
+    rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
 
-  class LoggingValue(messages.Message):
+  class LoggingValue(_messages.Message):
     """The bucket's logging configuration, which defines the destination
     bucket and optional name prefix for the current bucket's logs.
 
@@ -149,10 +150,10 @@
       logObjectPrefix: A prefix for log object names.
     """
 
-    logBucket = messages.StringField(1)
-    logObjectPrefix = messages.StringField(2)
+    logBucket = _messages.StringField(1)
+    logObjectPrefix = _messages.StringField(2)
 
-  class OwnerValue(messages.Message):
+  class OwnerValue(_messages.Message):
     """The owner of the bucket. This is always the project team's owner group.
 
     Fields:
@@ -160,19 +161,19 @@
       entityId: The ID for the entity.
     """
 
-    entity = messages.StringField(1)
-    entityId = messages.StringField(2)
+    entity = _messages.StringField(1)
+    entityId = _messages.StringField(2)
 
-  class VersioningValue(messages.Message):
+  class VersioningValue(_messages.Message):
     """The bucket's versioning configuration.
 
     Fields:
       enabled: While set to true, versioning is fully enabled for this bucket.
     """
 
-    enabled = messages.BooleanField(1)
+    enabled = _messages.BooleanField(1)
 
-  class WebsiteValue(messages.Message):
+  class WebsiteValue(_messages.Message):
     """The bucket's website configuration.
 
     Fields:
@@ -182,30 +183,30 @@
         not found.
     """
 
-    mainPageSuffix = messages.StringField(1)
-    notFoundPage = messages.StringField(2)
+    mainPageSuffix = _messages.StringField(1)
+    notFoundPage = _messages.StringField(2)
 
-  acl = messages.MessageField('BucketAccessControl', 1, repeated=True)
-  cors = messages.MessageField('CorsValueListEntry', 2, repeated=True)
-  defaultObjectAcl = messages.MessageField('ObjectAccessControl', 3, repeated=True)
-  etag = messages.StringField(4)
-  id = messages.StringField(5)
-  kind = messages.StringField(6, default=u'storage#bucket')
-  lifecycle = messages.MessageField('LifecycleValue', 7)
-  location = messages.StringField(8)
-  logging = messages.MessageField('LoggingValue', 9)
-  metageneration = messages.IntegerField(10)
-  name = messages.StringField(11)
-  owner = messages.MessageField('OwnerValue', 12)
-  projectNumber = messages.IntegerField(13, variant=messages.Variant.UINT64)
-  selfLink = messages.StringField(14)
-  storageClass = messages.StringField(15)
-  timeCreated = message_types.DateTimeField(16)
-  versioning = messages.MessageField('VersioningValue', 17)
-  website = messages.MessageField('WebsiteValue', 18)
+  acl = _messages.MessageField('BucketAccessControl', 1, repeated=True)
+  cors = _messages.MessageField('CorsValueListEntry', 2, repeated=True)
+  defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 3, repeated=True)
+  etag = _messages.StringField(4)
+  id = _messages.StringField(5)
+  kind = _messages.StringField(6, default=u'storage#bucket')
+  lifecycle = _messages.MessageField('LifecycleValue', 7)
+  location = _messages.StringField(8)
+  logging = _messages.MessageField('LoggingValue', 9)
+  metageneration = _messages.IntegerField(10)
+  name = _messages.StringField(11)
+  owner = _messages.MessageField('OwnerValue', 12)
+  projectNumber = _messages.IntegerField(13, variant=_messages.Variant.UINT64)
+  selfLink = _messages.StringField(14)
+  storageClass = _messages.StringField(15)
+  timeCreated = _message_types.DateTimeField(16)
+  versioning = _messages.MessageField('VersioningValue', 17)
+  website = _messages.MessageField('WebsiteValue', 18)
 
 
-class BucketAccessControl(messages.Message):
+class BucketAccessControl(_messages.Message):
   """An access-control entry.
 
   Messages:
@@ -233,7 +234,7 @@
     selfLink: The link to this access-control entry.
   """
 
-  class ProjectTeamValue(messages.Message):
+  class ProjectTeamValue(_messages.Message):
     """The project team associated with the entity, if any.
 
     Fields:
@@ -241,23 +242,23 @@
       team: The team. Can be owners, editors, or viewers.
     """
 
-    projectNumber = messages.StringField(1)
-    team = messages.StringField(2)
+    projectNumber = _messages.StringField(1)
+    team = _messages.StringField(2)
 
-  bucket = messages.StringField(1)
-  domain = messages.StringField(2)
-  email = messages.StringField(3)
-  entity = messages.StringField(4)
-  entityId = messages.StringField(5)
-  etag = messages.StringField(6)
-  id = messages.StringField(7)
-  kind = messages.StringField(8, default=u'storage#bucketAccessControl')
-  projectTeam = messages.MessageField('ProjectTeamValue', 9)
-  role = messages.StringField(10)
-  selfLink = messages.StringField(11)
+  bucket = _messages.StringField(1)
+  domain = _messages.StringField(2)
+  email = _messages.StringField(3)
+  entity = _messages.StringField(4)
+  entityId = _messages.StringField(5)
+  etag = _messages.StringField(6)
+  id = _messages.StringField(7)
+  kind = _messages.StringField(8, default=u'storage#bucketAccessControl')
+  projectTeam = _messages.MessageField('ProjectTeamValue', 9)
+  role = _messages.StringField(10)
+  selfLink = _messages.StringField(11)
 
 
-class BucketAccessControls(messages.Message):
+class BucketAccessControls(_messages.Message):
   """An access-control list.
 
   Fields:
@@ -266,11 +267,11 @@
       entries, this is always storage#bucketAccessControls.
   """
 
-  items = messages.MessageField('BucketAccessControl', 1, repeated=True)
-  kind = messages.StringField(2, default=u'storage#bucketAccessControls')
+  items = _messages.MessageField('BucketAccessControl', 1, repeated=True)
+  kind = _messages.StringField(2, default=u'storage#bucketAccessControls')
 
 
-class Buckets(messages.Message):
+class Buckets(_messages.Message):
   """A list of buckets.
 
   Fields:
@@ -282,12 +283,12 @@
       of results.
   """
 
-  items = messages.MessageField('Bucket', 1, repeated=True)
-  kind = messages.StringField(2, default=u'storage#buckets')
-  nextPageToken = messages.StringField(3)
+  items = _messages.MessageField('Bucket', 1, repeated=True)
+  kind = _messages.StringField(2, default=u'storage#buckets')
+  nextPageToken = _messages.StringField(3)
 
 
-class Channel(messages.Message):
+class Channel(_messages.Message):
   """An notification channel used to watch for resource changes.
 
   Messages:
@@ -313,7 +314,7 @@
   """
 
   @encoding.MapUnrecognizedFields('additionalProperties')
-  class ParamsValue(messages.Message):
+  class ParamsValue(_messages.Message):
     """Additional parameters controlling delivery channel behavior. Optional.
 
     Messages:
@@ -323,7 +324,7 @@
       additionalProperties: Declares a new parameter by name.
     """
 
-    class AdditionalProperty(messages.Message):
+    class AdditionalProperty(_messages.Message):
       """An additional property for a ParamsValue object.
 
       Fields:
@@ -331,24 +332,24 @@
         value: A string attribute.
       """
 
-      key = messages.StringField(1)
-      value = messages.StringField(2)
+      key = _messages.StringField(1)
+      value = _messages.StringField(2)
 
-    additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
+    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
 
-  address = messages.StringField(1)
-  expiration = messages.IntegerField(2)
-  id = messages.StringField(3)
-  kind = messages.StringField(4, default=u'api#channel')
-  params = messages.MessageField('ParamsValue', 5)
-  payload = messages.BooleanField(6)
-  resourceId = messages.StringField(7)
-  resourceUri = messages.StringField(8)
-  token = messages.StringField(9)
-  type = messages.StringField(10)
+  address = _messages.StringField(1)
+  expiration = _messages.IntegerField(2)
+  id = _messages.StringField(3)
+  kind = _messages.StringField(4, default=u'api#channel')
+  params = _messages.MessageField('ParamsValue', 5)
+  payload = _messages.BooleanField(6)
+  resourceId = _messages.StringField(7)
+  resourceUri = _messages.StringField(8)
+  token = _messages.StringField(9)
+  type = _messages.StringField(10)
 
 
-class ComposeRequest(messages.Message):
+class ComposeRequest(_messages.Message):
   """A Compose request.
 
   Messages:
@@ -361,7 +362,7 @@
       single object.
   """
 
-  class SourceObjectsValueListEntry(messages.Message):
+  class SourceObjectsValueListEntry(_messages.Message):
     """A SourceObjectsValueListEntry object.
 
     Messages:
@@ -376,7 +377,7 @@
         execute.
     """
 
-    class ObjectPreconditionsValue(messages.Message):
+    class ObjectPreconditionsValue(_messages.Message):
       """Conditions that must be met for this operation to execute.
 
       Fields:
@@ -386,18 +387,18 @@
           value or the call will fail.
       """
 
-      ifGenerationMatch = messages.IntegerField(1)
+      ifGenerationMatch = _messages.IntegerField(1)
 
-    generation = messages.IntegerField(1)
-    name = messages.StringField(2)
-    objectPreconditions = messages.MessageField('ObjectPreconditionsValue', 3)
+    generation = _messages.IntegerField(1)
+    name = _messages.StringField(2)
+    objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3)
 
-  destination = messages.MessageField('Object', 1)
-  kind = messages.StringField(2, default=u'storage#composeRequest')
-  sourceObjects = messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
+  destination = _messages.MessageField('Object', 1)
+  kind = _messages.StringField(2, default=u'storage#composeRequest')
+  sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
 
 
-class Object(messages.Message):
+class Object(_messages.Message):
   """An object.
 
   Messages:
@@ -416,7 +417,7 @@
     contentLanguage: Content-Language of the object data.
     contentType: Content-Type of the object data.
     crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded
-      using base64.
+      using base64 in big-endian byte order.
     etag: HTTP 1.1 Entity tag for the object.
     generation: The content generation of this object. Used for object
       versioning.
@@ -444,7 +445,7 @@
   """
 
   @encoding.MapUnrecognizedFields('additionalProperties')
-  class MetadataValue(messages.Message):
+  class MetadataValue(_messages.Message):
     """User-provided metadata, in key/value pairs.
 
     Messages:
@@ -454,7 +455,7 @@
       additionalProperties: An individual metadata entry.
     """
 
-    class AdditionalProperty(messages.Message):
+    class AdditionalProperty(_messages.Message):
       """An additional property for a MetadataValue object.
 
       Fields:
@@ -462,12 +463,12 @@
         value: A string attribute.
       """
 
-      key = messages.StringField(1)
-      value = messages.StringField(2)
+      key = _messages.StringField(1)
+      value = _messages.StringField(2)
 
-    additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
+    additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
 
-  class OwnerValue(messages.Message):
+  class OwnerValue(_messages.Message):
     """The owner of the object. This will always be the uploader of the
     object.
 
@@ -476,36 +477,36 @@
       entityId: The ID for the entity.
     """
 
-    entity = messages.StringField(1)
-    entityId = messages.StringField(2)
+    entity = _messages.StringField(1)
+    entityId = _messages.StringField(2)
 
-  acl = messages.MessageField('ObjectAccessControl', 1, repeated=True)
-  bucket = messages.StringField(2)
-  cacheControl = messages.StringField(3)
-  componentCount = messages.IntegerField(4, variant=messages.Variant.INT32)
-  contentDisposition = messages.StringField(5)
-  contentEncoding = messages.StringField(6)
-  contentLanguage = messages.StringField(7)
-  contentType = messages.StringField(8)
-  crc32c = messages.StringField(9)
-  etag = messages.StringField(10)
-  generation = messages.IntegerField(11)
-  id = messages.StringField(12)
-  kind = messages.StringField(13, default=u'storage#object')
-  md5Hash = messages.StringField(14)
-  mediaLink = messages.StringField(15)
-  metadata = messages.MessageField('MetadataValue', 16)
-  metageneration = messages.IntegerField(17)
-  name = messages.StringField(18)
-  owner = messages.MessageField('OwnerValue', 19)
-  selfLink = messages.StringField(20)
-  size = messages.IntegerField(21, variant=messages.Variant.UINT64)
-  storageClass = messages.StringField(22)
-  timeDeleted = message_types.DateTimeField(23)
-  updated = message_types.DateTimeField(24)
+  acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
+  bucket = _messages.StringField(2)
+  cacheControl = _messages.StringField(3)
+  componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
+  contentDisposition = _messages.StringField(5)
+  contentEncoding = _messages.StringField(6)
+  contentLanguage = _messages.StringField(7)
+  contentType = _messages.StringField(8)
+  crc32c = _messages.StringField(9)
+  etag = _messages.StringField(10)
+  generation = _messages.IntegerField(11)
+  id = _messages.StringField(12)
+  kind = _messages.StringField(13, default=u'storage#object')
+  md5Hash = _messages.StringField(14)
+  mediaLink = _messages.StringField(15)
+  metadata = _messages.MessageField('MetadataValue', 16)
+  metageneration = _messages.IntegerField(17)
+  name = _messages.StringField(18)
+  owner = _messages.MessageField('OwnerValue', 19)
+  selfLink = _messages.StringField(20)
+  size = _messages.IntegerField(21, variant=_messages.Variant.UINT64)
+  storageClass = _messages.StringField(22)
+  timeDeleted = _message_types.DateTimeField(23)
+  updated = _message_types.DateTimeField(24)
 
 
-class ObjectAccessControl(messages.Message):
+class ObjectAccessControl(_messages.Message):
   """An access-control entry.
 
   Messages:
@@ -534,7 +535,7 @@
     selfLink: The link to this access-control entry.
   """
 
-  class ProjectTeamValue(messages.Message):
+  class ProjectTeamValue(_messages.Message):
     """The project team associated with the entity, if any.
 
     Fields:
@@ -542,25 +543,25 @@
       team: The team. Can be owners, editors, or viewers.
     """
 
-    projectNumber = messages.StringField(1)
-    team = messages.StringField(2)
+    projectNumber = _messages.StringField(1)
+    team = _messages.StringField(2)
 
-  bucket = messages.StringField(1)
-  domain = messages.StringField(2)
-  email = messages.StringField(3)
-  entity = messages.StringField(4)
-  entityId = messages.StringField(5)
-  etag = messages.StringField(6)
-  generation = messages.IntegerField(7)
-  id = messages.StringField(8)
-  kind = messages.StringField(9, default=u'storage#objectAccessControl')
-  object = messages.StringField(10)
-  projectTeam = messages.MessageField('ProjectTeamValue', 11)
-  role = messages.StringField(12)
-  selfLink = messages.StringField(13)
+  bucket = _messages.StringField(1)
+  domain = _messages.StringField(2)
+  email = _messages.StringField(3)
+  entity = _messages.StringField(4)
+  entityId = _messages.StringField(5)
+  etag = _messages.StringField(6)
+  generation = _messages.IntegerField(7)
+  id = _messages.StringField(8)
+  kind = _messages.StringField(9, default=u'storage#objectAccessControl')
+  object = _messages.StringField(10)
+  projectTeam = _messages.MessageField('ProjectTeamValue', 11)
+  role = _messages.StringField(12)
+  selfLink = _messages.StringField(13)
 
 
-class ObjectAccessControls(messages.Message):
+class ObjectAccessControls(_messages.Message):
   """An access-control list.
 
   Fields:
@@ -569,11 +570,11 @@
       entries, this is always storage#objectAccessControls.
   """
 
-  items = messages.MessageField('extra_types.JsonValue', 1, repeated=True)
-  kind = messages.StringField(2, default=u'storage#objectAccessControls')
+  items = _messages.MessageField('extra_types.JsonValue', 1, repeated=True)
+  kind = _messages.StringField(2, default=u'storage#objectAccessControls')
 
 
-class Objects(messages.Message):
+class Objects(_messages.Message):
   """A list of objects.
 
   Fields:
@@ -587,13 +588,40 @@
       and including the requested delimiter.
   """
 
-  items = messages.MessageField('Object', 1, repeated=True)
-  kind = messages.StringField(2, default=u'storage#objects')
-  nextPageToken = messages.StringField(3)
-  prefixes = messages.StringField(4, repeated=True)
+  items = _messages.MessageField('Object', 1, repeated=True)
+  kind = _messages.StringField(2, default=u'storage#objects')
+  nextPageToken = _messages.StringField(3)
+  prefixes = _messages.StringField(4, repeated=True)
 
 
-class StandardQueryParameters(messages.Message):
+class RewriteResponse(_messages.Message):
+  """A rewrite response.
+
+  Fields:
+    done: true if the copy is finished; otherwise, false if the copy is in
+      progress. This property is always present in the response.
+    kind: The kind of item this is.
+    objectSize: The total size of the object being copied in bytes. This
+      property is always present in the response.
+    resource: A resource containing the metadata for the copied-to object.
+      This property is present in the response only when copying completes.
+    rewriteToken: A token to use in subsequent requests to continue copying
+      data. This token is present in the response only when there is more data
+      to copy.
+    totalBytesRewritten: The total bytes written so far, which can be used to
+      provide a waiting user with a progress indicator. This property is
+      always present in the response.
+  """
+
+  done = _messages.BooleanField(1)
+  kind = _messages.StringField(2, default=u'storage#rewriteResponse')
+  objectSize = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
+  resource = _messages.MessageField('Object', 4)
+  rewriteToken = _messages.StringField(5)
+  totalBytesRewritten = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
+
+
+class StandardQueryParameters(_messages.Message):
   """Query parameters accepted by all methods.
 
   Enums:
@@ -616,7 +644,7 @@
       you want to enforce per-user limits.
   """
 
-  class AltValueValuesEnum(messages.Enum):
+  class AltValueValuesEnum(_messages.Enum):
     """Data format for the response.
 
     Values:
@@ -624,17 +652,17 @@
     """
     json = 0
 
-  alt = messages.EnumField('AltValueValuesEnum', 1, default=u'json')
-  fields = messages.StringField(2)
-  key = messages.StringField(3)
-  oauth_token = messages.StringField(4)
-  prettyPrint = messages.BooleanField(5, default=True)
-  quotaUser = messages.StringField(6)
-  trace = messages.StringField(7)
-  userIp = messages.StringField(8)
+  alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
+  fields = _messages.StringField(2)
+  key = _messages.StringField(3)
+  oauth_token = _messages.StringField(4)
+  prettyPrint = _messages.BooleanField(5, default=True)
+  quotaUser = _messages.StringField(6)
+  trace = _messages.StringField(7)
+  userIp = _messages.StringField(8)
 
 
-class StorageBucketAccessControlsDeleteRequest(messages.Message):
+class StorageBucketAccessControlsDeleteRequest(_messages.Message):
   """A StorageBucketAccessControlsDeleteRequest object.
 
   Fields:
@@ -644,15 +672,15 @@
       allAuthenticatedUsers.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
 
 
-class StorageBucketAccessControlsDeleteResponse(messages.Message):
+class StorageBucketAccessControlsDeleteResponse(_messages.Message):
   """An empty StorageBucketAccessControlsDelete response."""
 
 
-class StorageBucketAccessControlsGetRequest(messages.Message):
+class StorageBucketAccessControlsGetRequest(_messages.Message):
   """A StorageBucketAccessControlsGetRequest object.
 
   Fields:
@@ -662,21 +690,21 @@
       allAuthenticatedUsers.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
 
 
-class StorageBucketAccessControlsListRequest(messages.Message):
+class StorageBucketAccessControlsListRequest(_messages.Message):
   """A StorageBucketAccessControlsListRequest object.
 
   Fields:
     bucket: Name of a bucket.
   """
 
-  bucket = messages.StringField(1, required=True)
+  bucket = _messages.StringField(1, required=True)
 
 
-class StorageBucketsDeleteRequest(messages.Message):
+class StorageBucketsDeleteRequest(_messages.Message):
   """A StorageBucketsDeleteRequest object.
 
   Fields:
@@ -687,16 +715,16 @@
       metageneration does not match this value.
   """
 
-  bucket = messages.StringField(1, required=True)
-  ifMetagenerationMatch = messages.IntegerField(2)
-  ifMetagenerationNotMatch = messages.IntegerField(3)
+  bucket = _messages.StringField(1, required=True)
+  ifMetagenerationMatch = _messages.IntegerField(2)
+  ifMetagenerationNotMatch = _messages.IntegerField(3)
 
 
-class StorageBucketsDeleteResponse(messages.Message):
+class StorageBucketsDeleteResponse(_messages.Message):
   """An empty StorageBucketsDelete response."""
 
 
-class StorageBucketsGetRequest(messages.Message):
+class StorageBucketsGetRequest(_messages.Message):
   """A StorageBucketsGetRequest object.
 
   Enums:
@@ -712,7 +740,7 @@
     projection: Set of properties to return. Defaults to noAcl.
   """
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl.
 
     Values:
@@ -722,13 +750,13 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  ifMetagenerationMatch = messages.IntegerField(2)
-  ifMetagenerationNotMatch = messages.IntegerField(3)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 4)
+  bucket = _messages.StringField(1, required=True)
+  ifMetagenerationMatch = _messages.IntegerField(2)
+  ifMetagenerationNotMatch = _messages.IntegerField(3)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 4)
 
 
-class StorageBucketsInsertRequest(messages.Message):
+class StorageBucketsInsertRequest(_messages.Message):
   """A StorageBucketsInsertRequest object.
 
   Enums:
@@ -751,7 +779,7 @@
       defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this bucket.
 
     Values:
@@ -771,7 +799,7 @@
     publicRead = 3
     publicReadWrite = 4
 
-  class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum):
+  class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of default object access controls to this
     bucket.
 
@@ -795,7 +823,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl, unless the bucket
     resource specifies acl or defaultObjectAcl properties, when it defaults to
     full.
@@ -807,14 +835,14 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.MessageField('Bucket', 1)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 2)
-  predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
-  project = messages.StringField(4, required=True)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 5)
+  bucket = _messages.MessageField('Bucket', 1)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2)
+  predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
+  project = _messages.StringField(4, required=True)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
 
 
-class StorageBucketsListRequest(messages.Message):
+class StorageBucketsListRequest(_messages.Message):
   """A StorageBucketsListRequest object.
 
   Enums:
@@ -829,7 +857,7 @@
     projection: Set of properties to return. Defaults to noAcl.
   """
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl.
 
     Values:
@@ -839,14 +867,14 @@
     full = 0
     noAcl = 1
 
-  maxResults = messages.IntegerField(1, variant=messages.Variant.UINT32)
-  pageToken = messages.StringField(2)
-  prefix = messages.StringField(3)
-  project = messages.StringField(4, required=True)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 5)
+  maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
+  pageToken = _messages.StringField(2)
+  prefix = _messages.StringField(3)
+  project = _messages.StringField(4, required=True)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
 
 
-class StorageBucketsPatchRequest(messages.Message):
+class StorageBucketsPatchRequest(_messages.Message):
   """A StorageBucketsPatchRequest object.
 
   Enums:
@@ -870,7 +898,7 @@
     projection: Set of properties to return. Defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this bucket.
 
     Values:
@@ -890,7 +918,7 @@
     publicRead = 3
     publicReadWrite = 4
 
-  class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum):
+  class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of default object access controls to this
     bucket.
 
@@ -914,7 +942,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to full.
 
     Values:
@@ -924,16 +952,16 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  bucketResource = messages.MessageField('Bucket', 2)
-  ifMetagenerationMatch = messages.IntegerField(3)
-  ifMetagenerationNotMatch = messages.IntegerField(4)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 5)
-  predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 7)
+  bucket = _messages.StringField(1, required=True)
+  bucketResource = _messages.MessageField('Bucket', 2)
+  ifMetagenerationMatch = _messages.IntegerField(3)
+  ifMetagenerationNotMatch = _messages.IntegerField(4)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
+  predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
 
 
-class StorageBucketsUpdateRequest(messages.Message):
+class StorageBucketsUpdateRequest(_messages.Message):
   """A StorageBucketsUpdateRequest object.
 
   Enums:
@@ -957,7 +985,7 @@
     projection: Set of properties to return. Defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this bucket.
 
     Values:
@@ -977,7 +1005,7 @@
     publicRead = 3
     publicReadWrite = 4
 
-  class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum):
+  class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of default object access controls to this
     bucket.
 
@@ -1001,7 +1029,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to full.
 
     Values:
@@ -1011,20 +1039,20 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  bucketResource = messages.MessageField('Bucket', 2)
-  ifMetagenerationMatch = messages.IntegerField(3)
-  ifMetagenerationNotMatch = messages.IntegerField(4)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 5)
-  predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 7)
+  bucket = _messages.StringField(1, required=True)
+  bucketResource = _messages.MessageField('Bucket', 2)
+  ifMetagenerationMatch = _messages.IntegerField(3)
+  ifMetagenerationNotMatch = _messages.IntegerField(4)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
+  predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
 
 
-class StorageChannelsStopResponse(messages.Message):
+class StorageChannelsStopResponse(_messages.Message):
   """An empty StorageChannelsStop response."""
 
 
-class StorageDefaultObjectAccessControlsDeleteRequest(messages.Message):
+class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message):
   """A StorageDefaultObjectAccessControlsDeleteRequest object.
 
   Fields:
@@ -1034,15 +1062,15 @@
       allAuthenticatedUsers.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
 
 
-class StorageDefaultObjectAccessControlsDeleteResponse(messages.Message):
+class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message):
   """An empty StorageDefaultObjectAccessControlsDelete response."""
 
 
-class StorageDefaultObjectAccessControlsGetRequest(messages.Message):
+class StorageDefaultObjectAccessControlsGetRequest(_messages.Message):
   """A StorageDefaultObjectAccessControlsGetRequest object.
 
   Fields:
@@ -1052,11 +1080,11 @@
       allAuthenticatedUsers.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
 
 
-class StorageDefaultObjectAccessControlsListRequest(messages.Message):
+class StorageDefaultObjectAccessControlsListRequest(_messages.Message):
   """A StorageDefaultObjectAccessControlsListRequest object.
 
   Fields:
@@ -1067,12 +1095,12 @@
       the bucket's current metageneration does not match the given value.
   """
 
-  bucket = messages.StringField(1, required=True)
-  ifMetagenerationMatch = messages.IntegerField(2)
-  ifMetagenerationNotMatch = messages.IntegerField(3)
+  bucket = _messages.StringField(1, required=True)
+  ifMetagenerationMatch = _messages.IntegerField(2)
+  ifMetagenerationNotMatch = _messages.IntegerField(3)
 
 
-class StorageObjectAccessControlsDeleteRequest(messages.Message):
+class StorageObjectAccessControlsDeleteRequest(_messages.Message):
   """A StorageObjectAccessControlsDeleteRequest object.
 
   Fields:
@@ -1085,17 +1113,17 @@
     object: Name of the object.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
-  generation = messages.IntegerField(3)
-  object = messages.StringField(4, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
+  generation = _messages.IntegerField(3)
+  object = _messages.StringField(4, required=True)
 
 
-class StorageObjectAccessControlsDeleteResponse(messages.Message):
+class StorageObjectAccessControlsDeleteResponse(_messages.Message):
   """An empty StorageObjectAccessControlsDelete response."""
 
 
-class StorageObjectAccessControlsGetRequest(messages.Message):
+class StorageObjectAccessControlsGetRequest(_messages.Message):
   """A StorageObjectAccessControlsGetRequest object.
 
   Fields:
@@ -1108,13 +1136,13 @@
     object: Name of the object.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
-  generation = messages.IntegerField(3)
-  object = messages.StringField(4, required=True)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
+  generation = _messages.IntegerField(3)
+  object = _messages.StringField(4, required=True)
 
 
-class StorageObjectAccessControlsInsertRequest(messages.Message):
+class StorageObjectAccessControlsInsertRequest(_messages.Message):
   """A StorageObjectAccessControlsInsertRequest object.
 
   Fields:
@@ -1126,13 +1154,13 @@
       request body.
   """
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  object = messages.StringField(3, required=True)
-  objectAccessControl = messages.MessageField('ObjectAccessControl', 4)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  object = _messages.StringField(3, required=True)
+  objectAccessControl = _messages.MessageField('ObjectAccessControl', 4)
 
 
-class StorageObjectAccessControlsListRequest(messages.Message):
+class StorageObjectAccessControlsListRequest(_messages.Message):
   """A StorageObjectAccessControlsListRequest object.
 
   Fields:
@@ -1142,12 +1170,12 @@
     object: Name of the object.
   """
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  object = messages.StringField(3, required=True)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  object = _messages.StringField(3, required=True)
 
 
-class StorageObjectAccessControlsPatchRequest(messages.Message):
+class StorageObjectAccessControlsPatchRequest(_messages.Message):
   """A StorageObjectAccessControlsPatchRequest object.
 
   Fields:
@@ -1162,14 +1190,14 @@
       request body.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
-  generation = messages.IntegerField(3)
-  object = messages.StringField(4, required=True)
-  objectAccessControl = messages.MessageField('ObjectAccessControl', 5)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
+  generation = _messages.IntegerField(3)
+  object = _messages.StringField(4, required=True)
+  objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
 
 
-class StorageObjectAccessControlsUpdateRequest(messages.Message):
+class StorageObjectAccessControlsUpdateRequest(_messages.Message):
   """A StorageObjectAccessControlsUpdateRequest object.
 
   Fields:
@@ -1184,14 +1212,14 @@
       request body.
   """
 
-  bucket = messages.StringField(1, required=True)
-  entity = messages.StringField(2, required=True)
-  generation = messages.IntegerField(3)
-  object = messages.StringField(4, required=True)
-  objectAccessControl = messages.MessageField('ObjectAccessControl', 5)
+  bucket = _messages.StringField(1, required=True)
+  entity = _messages.StringField(2, required=True)
+  generation = _messages.IntegerField(3)
+  object = _messages.StringField(4, required=True)
+  objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
 
 
-class StorageObjectsComposeRequest(messages.Message):
+class StorageObjectsComposeRequest(_messages.Message):
   """A StorageObjectsComposeRequest object.
 
   Enums:
@@ -1211,7 +1239,7 @@
       object's current metageneration matches the given value.
   """
 
-  class DestinationPredefinedAclValueValuesEnum(messages.Enum):
+  class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to the destination object.
 
     Values:
@@ -1234,15 +1262,15 @@
     projectPrivate = 4
     publicRead = 5
 
-  composeRequest = messages.MessageField('ComposeRequest', 1)
-  destinationBucket = messages.StringField(2, required=True)
-  destinationObject = messages.StringField(3, required=True)
-  destinationPredefinedAcl = messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
-  ifGenerationMatch = messages.IntegerField(5)
-  ifMetagenerationMatch = messages.IntegerField(6)
+  composeRequest = _messages.MessageField('ComposeRequest', 1)
+  destinationBucket = _messages.StringField(2, required=True)
+  destinationObject = _messages.StringField(3, required=True)
+  destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
+  ifGenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationMatch = _messages.IntegerField(6)
 
 
-class StorageObjectsCopyRequest(messages.Message):
+class StorageObjectsCopyRequest(_messages.Message):
   """A StorageObjectsCopyRequest object.
 
   Enums:
@@ -1287,7 +1315,7 @@
     sourceObject: Name of the source object.
   """
 
-  class DestinationPredefinedAclValueValuesEnum(messages.Enum):
+  class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to the destination object.
 
     Values:
@@ -1310,7 +1338,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl, unless the object
     resource specifies the acl property, when it defaults to full.
 
@@ -1321,25 +1349,25 @@
     full = 0
     noAcl = 1
 
-  destinationBucket = messages.StringField(1, required=True)
-  destinationObject = messages.StringField(2, required=True)
-  destinationPredefinedAcl = messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
-  ifGenerationMatch = messages.IntegerField(4)
-  ifGenerationNotMatch = messages.IntegerField(5)
-  ifMetagenerationMatch = messages.IntegerField(6)
-  ifMetagenerationNotMatch = messages.IntegerField(7)
-  ifSourceGenerationMatch = messages.IntegerField(8)
-  ifSourceGenerationNotMatch = messages.IntegerField(9)
-  ifSourceMetagenerationMatch = messages.IntegerField(10)
-  ifSourceMetagenerationNotMatch = messages.IntegerField(11)
-  object = messages.MessageField('Object', 12)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 13)
-  sourceBucket = messages.StringField(14, required=True)
-  sourceGeneration = messages.IntegerField(15)
-  sourceObject = messages.StringField(16, required=True)
+  destinationBucket = _messages.StringField(1, required=True)
+  destinationObject = _messages.StringField(2, required=True)
+  destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
+  ifGenerationMatch = _messages.IntegerField(4)
+  ifGenerationNotMatch = _messages.IntegerField(5)
+  ifMetagenerationMatch = _messages.IntegerField(6)
+  ifMetagenerationNotMatch = _messages.IntegerField(7)
+  ifSourceGenerationMatch = _messages.IntegerField(8)
+  ifSourceGenerationNotMatch = _messages.IntegerField(9)
+  ifSourceMetagenerationMatch = _messages.IntegerField(10)
+  ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
+  object = _messages.MessageField('Object', 12)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 13)
+  sourceBucket = _messages.StringField(14, required=True)
+  sourceGeneration = _messages.IntegerField(15)
+  sourceObject = _messages.StringField(16, required=True)
 
 
-class StorageObjectsDeleteRequest(messages.Message):
+class StorageObjectsDeleteRequest(_messages.Message):
   """A StorageObjectsDeleteRequest object.
 
   Fields:
@@ -1357,20 +1385,20 @@
     object: Name of the object.
   """
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  ifGenerationMatch = messages.IntegerField(3)
-  ifGenerationNotMatch = messages.IntegerField(4)
-  ifMetagenerationMatch = messages.IntegerField(5)
-  ifMetagenerationNotMatch = messages.IntegerField(6)
-  object = messages.StringField(7, required=True)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  ifGenerationMatch = _messages.IntegerField(3)
+  ifGenerationNotMatch = _messages.IntegerField(4)
+  ifMetagenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationNotMatch = _messages.IntegerField(6)
+  object = _messages.StringField(7, required=True)
 
 
-class StorageObjectsDeleteResponse(messages.Message):
+class StorageObjectsDeleteResponse(_messages.Message):
   """An empty StorageObjectsDelete response."""
 
 
-class StorageObjectsGetRequest(messages.Message):
+class StorageObjectsGetRequest(_messages.Message):
   """A StorageObjectsGetRequest object.
 
   Enums:
@@ -1392,7 +1420,7 @@
     projection: Set of properties to return. Defaults to noAcl.
   """
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl.
 
     Values:
@@ -1402,17 +1430,17 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  ifGenerationMatch = messages.IntegerField(3)
-  ifGenerationNotMatch = messages.IntegerField(4)
-  ifMetagenerationMatch = messages.IntegerField(5)
-  ifMetagenerationNotMatch = messages.IntegerField(6)
-  object = messages.StringField(7, required=True)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 8)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  ifGenerationMatch = _messages.IntegerField(3)
+  ifGenerationNotMatch = _messages.IntegerField(4)
+  ifMetagenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationNotMatch = _messages.IntegerField(6)
+  object = _messages.StringField(7, required=True)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
 
 
-class StorageObjectsInsertRequest(messages.Message):
+class StorageObjectsInsertRequest(_messages.Message):
   """A StorageObjectsInsertRequest object.
 
   Enums:
@@ -1446,7 +1474,7 @@
       object resource specifies the acl property, when it defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this object.
 
     Values:
@@ -1469,7 +1497,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl, unless the object
     resource specifies the acl property, when it defaults to full.
 
@@ -1480,19 +1508,19 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  contentEncoding = messages.StringField(2)
-  ifGenerationMatch = messages.IntegerField(3)
-  ifGenerationNotMatch = messages.IntegerField(4)
-  ifMetagenerationMatch = messages.IntegerField(5)
-  ifMetagenerationNotMatch = messages.IntegerField(6)
-  name = messages.StringField(7)
-  object = messages.MessageField('Object', 8)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 9)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 10)
+  bucket = _messages.StringField(1, required=True)
+  contentEncoding = _messages.StringField(2)
+  ifGenerationMatch = _messages.IntegerField(3)
+  ifGenerationNotMatch = _messages.IntegerField(4)
+  ifMetagenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationNotMatch = _messages.IntegerField(6)
+  name = _messages.StringField(7)
+  object = _messages.MessageField('Object', 8)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
 
 
-class StorageObjectsListRequest(messages.Message):
+class StorageObjectsListRequest(_messages.Message):
   """A StorageObjectsListRequest object.
 
   Enums:
@@ -1507,15 +1535,16 @@
       prefixes. Duplicate prefixes are omitted.
     maxResults: Maximum number of items plus prefixes to return. As duplicate
       prefixes are omitted, fewer total results may be returned than
-      requested.
+      requested. The default value of this parameter is 1,000 items.
     pageToken: A previously-returned page token representing part of the
       larger set of results to view.
     prefix: Filter results to objects whose names begin with this prefix.
     projection: Set of properties to return. Defaults to noAcl.
-    versions: If true, lists all versions of a file as distinct results.
+    versions: If true, lists all versions of an object as distinct results.
+      The default is false. For more information, see Object Versioning.
   """
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl.
 
     Values:
@@ -1525,16 +1554,16 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  delimiter = messages.StringField(2)
-  maxResults = messages.IntegerField(3, variant=messages.Variant.UINT32)
-  pageToken = messages.StringField(4)
-  prefix = messages.StringField(5)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 6)
-  versions = messages.BooleanField(7)
+  bucket = _messages.StringField(1, required=True)
+  delimiter = _messages.StringField(2)
+  maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
+  pageToken = _messages.StringField(4)
+  prefix = _messages.StringField(5)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 6)
+  versions = _messages.BooleanField(7)
 
 
-class StorageObjectsPatchRequest(messages.Message):
+class StorageObjectsPatchRequest(_messages.Message):
   """A StorageObjectsPatchRequest object.
 
   Enums:
@@ -1560,7 +1589,7 @@
     projection: Set of properties to return. Defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this object.
 
     Values:
@@ -1583,7 +1612,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to full.
 
     Values:
@@ -1593,19 +1622,131 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  ifGenerationMatch = messages.IntegerField(3)
-  ifGenerationNotMatch = messages.IntegerField(4)
-  ifMetagenerationMatch = messages.IntegerField(5)
-  ifMetagenerationNotMatch = messages.IntegerField(6)
-  object = messages.StringField(7, required=True)
-  objectResource = messages.MessageField('Object', 8)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 9)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 10)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  ifGenerationMatch = _messages.IntegerField(3)
+  ifGenerationNotMatch = _messages.IntegerField(4)
+  ifMetagenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationNotMatch = _messages.IntegerField(6)
+  object = _messages.StringField(7, required=True)
+  objectResource = _messages.MessageField('Object', 8)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
 
 
-class StorageObjectsUpdateRequest(messages.Message):
+class StorageObjectsRewriteRequest(_messages.Message):
+  """A StorageObjectsRewriteRequest object.
+
+  Enums:
+    DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
+      controls to the destination object.
+    ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
+      unless the object resource specifies the acl property, when it defaults
+      to full.
+
+  Fields:
+    destinationBucket: Name of the bucket in which to store the new object.
+      Overrides the provided object metadata's bucket value, if any.
+    destinationObject: Name of the new object. Required when the object
+      metadata is not otherwise provided. Overrides the object metadata's name
+      value, if any.
+    destinationPredefinedAcl: Apply a predefined set of access controls to the
+      destination object.
+    ifGenerationMatch: Makes the operation conditional on whether the
+      destination object's current generation matches the given value.
+    ifGenerationNotMatch: Makes the operation conditional on whether the
+      destination object's current generation does not match the given value.
+    ifMetagenerationMatch: Makes the operation conditional on whether the
+      destination object's current metageneration matches the given value.
+    ifMetagenerationNotMatch: Makes the operation conditional on whether the
+      destination object's current metageneration does not match the given
+      value.
+    ifSourceGenerationMatch: Makes the operation conditional on whether the
+      source object's generation matches the given value.
+    ifSourceGenerationNotMatch: Makes the operation conditional on whether the
+      source object's generation does not match the given value.
+    ifSourceMetagenerationMatch: Makes the operation conditional on whether
+      the source object's current metageneration matches the given value.
+    ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
+      the source object's current metageneration does not match the given
+      value.
+    maxBytesRewrittenPerCall: The maximum number of bytes that will be
+      rewritten per rewrite request. Most callers shouldn't need to specify
+      this parameter - it is primarily in place to support testing. If
+      specified the value must be an integral multiple of 1 MiB (1048576).
+      Also, this only applies to requests where the source and destination
+      span locations and/or storage classes. Finally, this value must not
+      change across rewrite calls else you'll get an error that the
+      rewriteToken is invalid.
+    object: A Object resource to be passed as the request body.
+    projection: Set of properties to return. Defaults to noAcl, unless the
+      object resource specifies the acl property, when it defaults to full.
+    rewriteToken: Include this field (from the previous rewrite response) on
+      each rewrite request after the first one, until the rewrite response
+      'done' flag is true. Calls that provide a rewriteToken can omit all
+      other request fields, but if included those fields must match the values
+      provided in the first rewrite request.
+    sourceBucket: Name of the bucket in which to find the source object.
+    sourceGeneration: If present, selects a specific revision of the source
+      object (as opposed to the latest version, the default).
+    sourceObject: Name of the source object.
+  """
+
+  class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
+    """Apply a predefined set of access controls to the destination object.
+
+    Values:
+      authenticatedRead: Object owner gets OWNER access, and
+        allAuthenticatedUsers get READER access.
+      bucketOwnerFullControl: Object owner gets OWNER access, and project team
+        owners get OWNER access.
+      bucketOwnerRead: Object owner gets OWNER access, and project team owners
+        get READER access.
+      private: Object owner gets OWNER access.
+      projectPrivate: Object owner gets OWNER access, and project team members
+        get access according to their roles.
+      publicRead: Object owner gets OWNER access, and allUsers get READER
+        access.
+    """
+    authenticatedRead = 0
+    bucketOwnerFullControl = 1
+    bucketOwnerRead = 2
+    private = 3
+    projectPrivate = 4
+    publicRead = 5
+
+  class ProjectionValueValuesEnum(_messages.Enum):
+    """Set of properties to return. Defaults to noAcl, unless the object
+    resource specifies the acl property, when it defaults to full.
+
+    Values:
+      full: Include all properties.
+      noAcl: Omit the acl property.
+    """
+    full = 0
+    noAcl = 1
+
+  destinationBucket = _messages.StringField(1, required=True)
+  destinationObject = _messages.StringField(2, required=True)
+  destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
+  ifGenerationMatch = _messages.IntegerField(4)
+  ifGenerationNotMatch = _messages.IntegerField(5)
+  ifMetagenerationMatch = _messages.IntegerField(6)
+  ifMetagenerationNotMatch = _messages.IntegerField(7)
+  ifSourceGenerationMatch = _messages.IntegerField(8)
+  ifSourceGenerationNotMatch = _messages.IntegerField(9)
+  ifSourceMetagenerationMatch = _messages.IntegerField(10)
+  ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
+  maxBytesRewrittenPerCall = _messages.IntegerField(12)
+  object = _messages.MessageField('Object', 13)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 14)
+  rewriteToken = _messages.StringField(15)
+  sourceBucket = _messages.StringField(16, required=True)
+  sourceGeneration = _messages.IntegerField(17)
+  sourceObject = _messages.StringField(18, required=True)
+
+
+class StorageObjectsUpdateRequest(_messages.Message):
   """A StorageObjectsUpdateRequest object.
 
   Enums:
@@ -1631,7 +1772,7 @@
     projection: Set of properties to return. Defaults to full.
   """
 
-  class PredefinedAclValueValuesEnum(messages.Enum):
+  class PredefinedAclValueValuesEnum(_messages.Enum):
     """Apply a predefined set of access controls to this object.
 
     Values:
@@ -1654,7 +1795,7 @@
     projectPrivate = 4
     publicRead = 5
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to full.
 
     Values:
@@ -1664,19 +1805,19 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  generation = messages.IntegerField(2)
-  ifGenerationMatch = messages.IntegerField(3)
-  ifGenerationNotMatch = messages.IntegerField(4)
-  ifMetagenerationMatch = messages.IntegerField(5)
-  ifMetagenerationNotMatch = messages.IntegerField(6)
-  object = messages.StringField(7, required=True)
-  objectResource = messages.MessageField('Object', 8)
-  predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 9)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 10)
+  bucket = _messages.StringField(1, required=True)
+  generation = _messages.IntegerField(2)
+  ifGenerationMatch = _messages.IntegerField(3)
+  ifGenerationNotMatch = _messages.IntegerField(4)
+  ifMetagenerationMatch = _messages.IntegerField(5)
+  ifMetagenerationNotMatch = _messages.IntegerField(6)
+  object = _messages.StringField(7, required=True)
+  objectResource = _messages.MessageField('Object', 8)
+  predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
 
 
-class StorageObjectsWatchAllRequest(messages.Message):
+class StorageObjectsWatchAllRequest(_messages.Message):
   """A StorageObjectsWatchAllRequest object.
 
   Enums:
@@ -1692,15 +1833,16 @@
       prefixes. Duplicate prefixes are omitted.
     maxResults: Maximum number of items plus prefixes to return. As duplicate
       prefixes are omitted, fewer total results may be returned than
-      requested.
+      requested. The default value of this parameter is 1,000 items.
     pageToken: A previously-returned page token representing part of the
       larger set of results to view.
     prefix: Filter results to objects whose names begin with this prefix.
     projection: Set of properties to return. Defaults to noAcl.
-    versions: If true, lists all versions of a file as distinct results.
+    versions: If true, lists all versions of an object as distinct results.
+      The default is false. For more information, see Object Versioning.
   """
 
-  class ProjectionValueValuesEnum(messages.Enum):
+  class ProjectionValueValuesEnum(_messages.Enum):
     """Set of properties to return. Defaults to noAcl.
 
     Values:
@@ -1710,13 +1852,13 @@
     full = 0
     noAcl = 1
 
-  bucket = messages.StringField(1, required=True)
-  channel = messages.MessageField('Channel', 2)
-  delimiter = messages.StringField(3)
-  maxResults = messages.IntegerField(4, variant=messages.Variant.UINT32)
-  pageToken = messages.StringField(5)
-  prefix = messages.StringField(6)
-  projection = messages.EnumField('ProjectionValueValuesEnum', 7)
-  versions = messages.BooleanField(8)
+  bucket = _messages.StringField(1, required=True)
+  channel = _messages.MessageField('Channel', 2)
+  delimiter = _messages.StringField(3)
+  maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
+  pageToken = _messages.StringField(5)
+  prefix = _messages.StringField(6)
+  projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
+  versions = _messages.BooleanField(8)
 
 
diff --git a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/uploads_test.py b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/uploads_test.py
index ad4416f..4eb5aba 100644
--- a/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/uploads_test.py
+++ b/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/uploads_test.py
@@ -103,6 +103,20 @@
         response = self.__InsertFile(filename, request=request)
         self.assertEqual(size, response.size)
 
+    def testStreamMedia(self):
+        filename = 'ten_meg_file'
+        size = 10 << 20
+        self.__ResetUpload(size, auto_transfer=False)
+        self.__upload.strategy = 'resumable'
+        self.__upload.total_size = size
+        request = self.__InsertRequest(filename)
+        initial_response = self.__client.objects.Insert(
+            request, upload=self.__upload)
+        self.assertIsNotNone(initial_response)
+        self.assertEqual(0, self.__buffer.tell())
+        self.__upload.StreamMedia()
+        self.assertEqual(size, self.__buffer.tell())
+
     def testBreakAndResumeUpload(self):
         filename = ('ten_meg_file_' +
                     ''.join(random.sample(string.ascii_letters, 5)))
diff --git a/catapult/third_party/gsutil/third_party/apitools/setup.py b/catapult/third_party/gsutil/third_party/apitools/setup.py
index 0da6c70..19a6d58 100755
--- a/catapult/third_party/gsutil/third_party/apitools/setup.py
+++ b/catapult/third_party/gsutil/third_party/apitools/setup.py
@@ -31,7 +31,7 @@
     'httplib2>=0.8',
     'oauth2client>=1.4.8',
     'protorpc>=0.9.1',
-    'six>=1.8.0',
+    'six>=1.9.0',
     ]
 
 CLI_PACKAGES = [
@@ -47,14 +47,15 @@
 
 CONSOLE_SCRIPTS = [
     'gen_client = apitools.gen.gen_client:run_main',
-    ]
+    'oauth2l = apitools.scripts.oauth2l:run_main [cli]',
+]
 
 py_version = platform.python_version()
 
 if py_version < '2.7':
     REQUIRED_PACKAGES.append('argparse>=1.2.1')
 
-_APITOOLS_VERSION = '0.4.8'
+_APITOOLS_VERSION = '0.4.10'
 
 with open('README.rst') as fileobj:
     README = fileobj.read()
@@ -78,6 +79,11 @@
         'cli': CLI_PACKAGES,
         'testing': TESTING_PACKAGES,
         },
+    # Add in any packaged data.
+    include_package_data=True,
+    package_data={
+        'apitools.data': ['*'],
+    },
     # PyPI package information.
     classifiers=[
         'License :: OSI Approved :: Apache Software License',
diff --git a/catapult/third_party/gsutil/third_party/apitools/tox.ini b/catapult/third_party/gsutil/third_party/apitools/tox.ini
index 5cd3c74..8baa1c8 100644
--- a/catapult/third_party/gsutil/third_party/apitools/tox.ini
+++ b/catapult/third_party/gsutil/third_party/apitools/tox.ini
@@ -61,3 +61,18 @@
     {[testenv:cover]deps}
     coveralls
 passenv = TRAVIS*
+
+[testenv:transfer_coverage]
+basepython =
+    python2.7
+deps =
+    mock
+    nose
+    unittest2
+    coverage
+commands =
+    coverage run --branch -p samples/storage_sample/downloads_test.py
+    coverage run --branch -p samples/storage_sample/uploads_test.py
+    coverage run --branch -p apitools/base/py/transfer_test.py
+    coverage combine
+    coverage html
diff --git a/catapult/third_party/gsutil/third_party/boto/boto/pyami/launch_ami.py b/catapult/third_party/gsutil/third_party/boto/boto/pyami/launch_ami.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/boto/services/bs.py b/catapult/third_party/gsutil/third_party/boto/boto/services/bs.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/boto/services/result.py b/catapult/third_party/gsutil/third_party/boto/boto/services/result.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/scripts/git-release-notes.py b/catapult/third_party/gsutil/third_party/boto/scripts/git-release-notes.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/devpay/test_s3.py b/catapult/third_party/gsutil/third_party/boto/tests/devpay/test_s3.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/fps/test.py b/catapult/third_party/gsutil/third_party/boto/tests/fps/test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/fps/test_verify_signature.py b/catapult/third_party/gsutil/third_party/boto/tests/fps/test_verify_signature.py
index e89abd5..efc037f 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/fps/test_verify_signature.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/fps/test_verify_signature.py
@@ -1,12 +1,12 @@
-from boto.fps.connection import FPSConnection
-
-def test():
-    conn = FPSConnection()
-    # example response from the docs
-    params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1'
-    endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp'
-    conn.verify_signature(endpoint, params)
-
-
-if __name__ == '__main__':
-    test()
+from boto.fps.connection import FPSConnection

+

+def test():

+    conn = FPSConnection()

+    # example response from the docs

+    params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1'

+    endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp'

+    conn.verify_signature(endpoint, params)

+

+

+if __name__ == '__main__':

+    test()

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/integration/cloudformation/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/integration/cloudformation/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/integration/datapipeline/test_layer1.py b/catapult/third_party/gsutil/third_party/boto/tests/integration/datapipeline/test_layer1.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/integration/ec2/vpc/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/integration/ec2/vpc/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/integration/mws/test.py b/catapult/third_party/gsutil/third_party/boto/tests/integration/mws/test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/.gitignore b/catapult/third_party/gsutil/third_party/boto/tests/mturk/.gitignore
index 039e4d4..8917c2c 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/.gitignore
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/.gitignore
@@ -1 +1 @@
-local.py
+local.py

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/_init_environment.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/_init_environment.py
index a930697..3ca5cf6 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/_init_environment.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/_init_environment.py
@@ -1,28 +1,28 @@
-import os
-import functools
-
-live_connection = False
-mturk_host = 'mechanicalturk.sandbox.amazonaws.com'
-external_url = 'http://www.example.com/'
-
-
-SetHostMTurkConnection = None
-
-def config_environment():
-    global SetHostMTurkConnection
-    try:
-            local = os.path.join(os.path.dirname(__file__), 'local.py')
-            execfile(local)
-    except:
-            pass
-
-    if live_connection:
-            #TODO: you must set the auth credentials to something valid
-            from boto.mturk.connection import MTurkConnection
-    else:
-            # Here the credentials must be set, but it doesn't matter what
-            #  they're set to.
-            os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo')
-            os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar')
-            from mocks import MTurkConnection
-    SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host)
+import os

+import functools

+

+live_connection = False

+mturk_host = 'mechanicalturk.sandbox.amazonaws.com'

+external_url = 'http://www.example.com/'

+

+

+SetHostMTurkConnection = None

+

+def config_environment():

+    global SetHostMTurkConnection

+    try:

+            local = os.path.join(os.path.dirname(__file__), 'local.py')

+            execfile(local)

+    except:

+            pass

+

+    if live_connection:

+            #TODO: you must set the auth credentials to something valid

+            from boto.mturk.connection import MTurkConnection

+    else:

+            # Here the credentials must be set, but it doesn't matter what

+            #  they're set to.

+            os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo')

+            os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar')

+            from mocks import MTurkConnection

+    SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host)

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/common.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/common.py
index 7fb705e..151714a 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/common.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/common.py
@@ -1,45 +1,45 @@
-import unittest
-import uuid
-import datetime
-
-from boto.mturk.question import (
-        Question, QuestionContent, AnswerSpecification, FreeTextAnswer,
-)
-from _init_environment import SetHostMTurkConnection, config_environment
-
-class MTurkCommon(unittest.TestCase):
-        def setUp(self):
-                config_environment()
-                self.conn = SetHostMTurkConnection()
-
-        @staticmethod
-        def get_question():
-                # create content for a question
-                qn_content = QuestionContent()
-                qn_content.append_field('Title', 'Boto no hit type question content')
-                qn_content.append_field('Text', 'What is a boto no hit type?')
-
-                # create the question specification
-                qn = Question(identifier=str(uuid.uuid4()),
-                        content=qn_content,
-                        answer_spec=AnswerSpecification(FreeTextAnswer()))
-                return qn
-
-        @staticmethod
-        def get_hit_params():
-                return dict(
-                        lifetime=datetime.timedelta(minutes=65),
-                        max_assignments=2,
-                        title='Boto create_hit title',
-                        description='Boto create_hit description',
-                        keywords=['boto', 'test'],
-                        reward=0.23,
-                        duration=datetime.timedelta(minutes=6),
-                        approval_delay=60*60,
-                        annotation='An annotation from boto create_hit test',
-                        response_groups=['Minimal',
-                                'HITDetail',
-                                'HITQuestion',
-                                'HITAssignmentSummary',],
-                        )
-
+import unittest

+import uuid

+import datetime

+

+from boto.mturk.question import (

+        Question, QuestionContent, AnswerSpecification, FreeTextAnswer,

+)

+from _init_environment import SetHostMTurkConnection, config_environment

+

+class MTurkCommon(unittest.TestCase):

+        def setUp(self):

+                config_environment()

+                self.conn = SetHostMTurkConnection()

+

+        @staticmethod

+        def get_question():

+                # create content for a question

+                qn_content = QuestionContent()

+                qn_content.append_field('Title', 'Boto no hit type question content')

+                qn_content.append_field('Text', 'What is a boto no hit type?')

+

+                # create the question specification

+                qn = Question(identifier=str(uuid.uuid4()),

+                        content=qn_content,

+                        answer_spec=AnswerSpecification(FreeTextAnswer()))

+                return qn

+

+        @staticmethod

+        def get_hit_params():

+                return dict(

+                        lifetime=datetime.timedelta(minutes=65),

+                        max_assignments=2,

+                        title='Boto create_hit title',

+                        description='Boto create_hit description',

+                        keywords=['boto', 'test'],

+                        reward=0.23,

+                        duration=datetime.timedelta(minutes=6),

+                        approval_delay=60*60,

+                        annotation='An annotation from boto create_hit test',

+                        response_groups=['Minimal',

+                                'HITDetail',

+                                'HITQuestion',

+                                'HITAssignmentSummary',],

+                        )

+

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/create_hit_test.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/create_hit_test.py
index a690d80..ea134b4 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/create_hit_test.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/create_hit_test.py
@@ -1,21 +1,21 @@
-import unittest
-import os
-from boto.mturk.question import QuestionForm
-
-from common import MTurkCommon
-
-class TestHITCreation(MTurkCommon):
-	def testCallCreateHitWithOneQuestion(self):
-		create_hit_rs = self.conn.create_hit(
-			question=self.get_question(),
-			**self.get_hit_params()
-			)
-
-	def testCallCreateHitWithQuestionForm(self):
-		create_hit_rs = self.conn.create_hit(
-			questions=QuestionForm([self.get_question()]),
-			**self.get_hit_params()
-			)
-
-if __name__ == '__main__':
-	unittest.main()
+import unittest

+import os

+from boto.mturk.question import QuestionForm

+

+from common import MTurkCommon

+

+class TestHITCreation(MTurkCommon):

+	def testCallCreateHitWithOneQuestion(self):

+		create_hit_rs = self.conn.create_hit(

+			question=self.get_question(),

+			**self.get_hit_params()

+			)

+

+	def testCallCreateHitWithQuestionForm(self):

+		create_hit_rs = self.conn.create_hit(

+			questions=QuestionForm([self.get_question()]),

+			**self.get_hit_params()

+			)

+

+if __name__ == '__main__':

+	unittest.main()

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/hit_persistence.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/hit_persistence.py
index 6991856..04ebd0c 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/hit_persistence.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/hit_persistence.py
@@ -1,27 +1,27 @@
-import unittest
-import pickle
-
-from common import MTurkCommon
-
-class TestHITPersistence(MTurkCommon):
-	def create_hit_result(self):
-		return self.conn.create_hit(
-			question=self.get_question(), **self.get_hit_params()
-			)
-
-	def test_pickle_hit_result(self):
-		result = self.create_hit_result()
-		new_result = pickle.loads(pickle.dumps(result))
-
-	def test_pickle_deserialized_version(self):
-		"""
-		It seems the technique used to store and reload the object must
-		result in an equivalent object, or subsequent pickles may fail.
-		This tests a double-pickle to elicit that error.
-		"""
-		result = self.create_hit_result()
-		new_result = pickle.loads(pickle.dumps(result))
-		pickle.dumps(new_result)
-
-if __name__ == '__main__':
-	unittest.main()
+import unittest

+import pickle

+

+from common import MTurkCommon

+

+class TestHITPersistence(MTurkCommon):

+	def create_hit_result(self):

+		return self.conn.create_hit(

+			question=self.get_question(), **self.get_hit_params()

+			)

+

+	def test_pickle_hit_result(self):

+		result = self.create_hit_result()

+		new_result = pickle.loads(pickle.dumps(result))

+

+	def test_pickle_deserialized_version(self):

+		"""

+		It seems the technique used to store and reload the object must

+		result in an equivalent object, or subsequent pickles may fail.

+		This tests a double-pickle to elicit that error.

+		"""

+		result = self.create_hit_result()

+		new_result = pickle.loads(pickle.dumps(result))

+		pickle.dumps(new_result)

+

+if __name__ == '__main__':

+	unittest.main()

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/mocks.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/mocks.py
index d3f0f2e..0b2c52c 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/mocks.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/mocks.py
@@ -1,11 +1,11 @@
-from boto.mturk.connection import MTurkConnection as RealMTurkConnection
-
-class MTurkConnection(RealMTurkConnection):
-	"""
-	Mock MTurkConnection that doesn't connect, but instead just prepares
-	the request and captures information about its usage.
-	"""
-	
-	def _process_request(self, *args, **kwargs):
-		saved_args = self.__dict__.setdefault('_mock_saved_args', dict())
-		saved_args['_process_request'] = (args, kwargs)
+from boto.mturk.connection import MTurkConnection as RealMTurkConnection

+

+class MTurkConnection(RealMTurkConnection):

+	"""

+	Mock MTurkConnection that doesn't connect, but instead just prepares

+	the request and captures information about its usage.

+	"""

+	

+	def _process_request(self, *args, **kwargs):

+		saved_args = self.__dict__.setdefault('_mock_saved_args', dict())

+		saved_args['_process_request'] = (args, kwargs)

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/run-doctest.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/run-doctest.py
index e6d9f83..802b773 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/run-doctest.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/run-doctest.py
@@ -1,13 +1,13 @@
-import argparse
-import doctest
-
-parser = argparse.ArgumentParser(
-	description="Run a test by name"
-	)
-parser.add_argument('test_name')
-args = parser.parse_args()
-
-doctest.testfile(
-	args.test_name,
-	optionflags=doctest.REPORT_ONLY_FIRST_FAILURE
-	)
+import argparse

+import doctest

+

+parser = argparse.ArgumentParser(

+	description="Run a test by name"

+	)

+parser.add_argument('test_name')

+args = parser.parse_args()

+

+doctest.testfile(

+	args.test_name,

+	optionflags=doctest.REPORT_ONLY_FIRST_FAILURE

+	)

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/mturk/test_disable_hit.py b/catapult/third_party/gsutil/third_party/boto/tests/mturk/test_disable_hit.py
index 67c68e5..2d9bd9b 100644
--- a/catapult/third_party/gsutil/third_party/boto/tests/mturk/test_disable_hit.py
+++ b/catapult/third_party/gsutil/third_party/boto/tests/mturk/test_disable_hit.py
@@ -1,11 +1,11 @@
-from tests.mturk.support import unittest
-
-from common import MTurkCommon
-from boto.mturk.connection import MTurkRequestError
-
-class TestDisableHITs(MTurkCommon):
-	def test_disable_invalid_hit(self):
-		self.assertRaises(MTurkRequestError, self.conn.disable_hit, 'foo')
-
-if __name__ == '__main__':
-	unittest.main()
+from tests.mturk.support import unittest

+

+from common import MTurkCommon

+from boto.mturk.connection import MTurkRequestError

+

+class TestDisableHITs(MTurkCommon):

+	def test_disable_invalid_hit(self):

+		self.assertRaises(MTurkRequestError, self.conn.disable_hit, 'foo')

+

+if __name__ == '__main__':

+	unittest.main()

diff --git a/catapult/third_party/gsutil/third_party/boto/tests/test.py b/catapult/third_party/gsutil/third_party/boto/tests/test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/beanstalk/test_layer1.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/beanstalk/test_layer1.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudformation/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudformation/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudformation/test_stack.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudformation/test_stack.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudfront/test_invalidation_list.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudfront/test_invalidation_list.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_document.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_document.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_search.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_search.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_document.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_document.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_search.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearch2/test_search.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudtrail/test_layer1.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/cloudtrail/test_layer1.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_batch.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_batch.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_layer2.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_layer2.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_types.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/dynamodb/test_types.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/autoscale/test_group.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/autoscale/test_group.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/cloudwatch/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/cloudwatch/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/elb/test_listener.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/elb/test_listener.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/elb/test_loadbalancer.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/elb/test_loadbalancer.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_ec2object.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_ec2object.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instance.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instance.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instancestatus.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instancestatus.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instancetype.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_instancetype.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_networkinterface.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_networkinterface.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_securitygroup.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ec2/test_securitygroup.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/glacier/test_concurrent.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/glacier/test_concurrent.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/glacier/test_vault.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/glacier/test_vault.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/iam/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/iam/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/logs/test_layer1.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/logs/test_layer1.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/manage/test_ssh.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/manage/test_ssh.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/mws/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/mws/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/mws/test_response.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/mws/test_response.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/provider/test_provider.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/provider/test_provider.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/rds/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/rds/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/rds2/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/rds2/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/route53/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/route53/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/route53/test_zone.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/route53/test_zone.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_cors_configuration.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_cors_configuration.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_key.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_key.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_uri.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/s3/test_uri.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/ses/test_identity.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/ses/test_identity.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/sns/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/sns/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/sqs/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/sqs/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/boto/tests/unit/sts/test_connection.py b/catapult/third_party/gsutil/third_party/boto/tests/unit/sts/test_connection.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/crcmod/.hgeol b/catapult/third_party/gsutil/third_party/crcmod/.hgeol
index 40fd184..3bce4ef 100644
--- a/catapult/third_party/gsutil/third_party/crcmod/.hgeol
+++ b/catapult/third_party/gsutil/third_party/crcmod/.hgeol
@@ -1,14 +1,14 @@
-[patterns]
-MANIFEST.in = native
-README = native
-LICENSE = native
-changelog = native
-**.c = native
-**.h = native
-**.py = native
-**.txt = native
-**.rst = native
-
-**.bat = CRLF
-
-Makefile = LF
+[patterns]

+MANIFEST.in = native

+README = native

+LICENSE = native

+changelog = native

+**.c = native

+**.h = native

+**.py = native

+**.txt = native

+**.rst = native

+

+**.bat = CRLF

+

+Makefile = LF

diff --git a/catapult/third_party/gsutil/third_party/crcmod/docs/source/Makefile b/catapult/third_party/gsutil/third_party/crcmod/docs/source/Makefile
index 2afff0e..3bf748e 100644
--- a/catapult/third_party/gsutil/third_party/crcmod/docs/source/Makefile
+++ b/catapult/third_party/gsutil/third_party/crcmod/docs/source/Makefile
@@ -1,118 +1,118 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp epub latex changes linkcheck doctest
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/crcmod.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/crcmod.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) _build/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/crcmod"
-	@echo "# ln -s _build/devhelp $$HOME/.local/share/devhelp/crcmod"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-	      "run these through (pdf)latex."
-
-latexpdf: latex
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
-	@echo "Running LaTeX files through pdflatex..."
-	make -C _build/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in _build/latex."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
+# Makefile for Sphinx documentation

+#

+

+# You can set these variables from the command line.

+SPHINXOPTS    =

+SPHINXBUILD   = sphinx-build

+PAPER         =

+BUILDDIR      = _build

+

+# Internal variables.

+PAPEROPT_a4     = -D latex_paper_size=a4

+PAPEROPT_letter = -D latex_paper_size=letter

+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .

+

+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp epub latex changes linkcheck doctest

+

+help:

+	@echo "Please use \`make <target>' where <target> is one of"

+	@echo "  html       to make standalone HTML files"

+	@echo "  dirhtml    to make HTML files named index.html in directories"

+	@echo "  singlehtml to make a single large HTML file"

+	@echo "  pickle     to make pickle files"

+	@echo "  json       to make JSON files"

+	@echo "  htmlhelp   to make HTML files and a HTML help project"

+	@echo "  qthelp     to make HTML files and a qthelp project"

+	@echo "  devhelp    to make HTML files and a Devhelp project"

+	@echo "  epub       to make an epub"

+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"

+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"

+	@echo "  changes    to make an overview of all changed/added/deprecated items"

+	@echo "  linkcheck  to check all external links for integrity"

+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"

+

+clean:

+	-rm -rf $(BUILDDIR)/*

+

+html:

+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html

+	@echo

+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

+

+dirhtml:

+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml

+	@echo

+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."

+

+singlehtml:

+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml

+	@echo

+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."

+

+pickle:

+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle

+	@echo

+	@echo "Build finished; now you can process the pickle files."

+

+json:

+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json

+	@echo

+	@echo "Build finished; now you can process the JSON files."

+

+htmlhelp:

+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp

+	@echo

+	@echo "Build finished; now you can run HTML Help Workshop with the" \

+	      ".hhp project file in $(BUILDDIR)/htmlhelp."

+

+qthelp:

+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp

+	@echo

+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \

+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"

+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/crcmod.qhcp"

+	@echo "To view the help file:"

+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/crcmod.qhc"

+

+devhelp:

+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) _build/devhelp

+	@echo

+	@echo "Build finished."

+	@echo "To view the help file:"

+	@echo "# mkdir -p $$HOME/.local/share/devhelp/crcmod"

+	@echo "# ln -s _build/devhelp $$HOME/.local/share/devhelp/crcmod"

+	@echo "# devhelp"

+

+epub:

+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub

+	@echo

+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."

+

+latex:

+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex

+	@echo

+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."

+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \

+	      "run these through (pdf)latex."

+

+latexpdf: latex

+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex

+	@echo "Running LaTeX files through pdflatex..."

+	make -C _build/latex all-pdf

+	@echo "pdflatex finished; the PDF files are in _build/latex."

+

+changes:

+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes

+	@echo

+	@echo "The overview file is in $(BUILDDIR)/changes."

+

+linkcheck:

+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck

+	@echo

+	@echo "Link check complete; look for any errors in the above output " \

+	      "or in $(BUILDDIR)/linkcheck/output.txt."

+

+doctest:

+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest

+	@echo "Testing of doctests in the sources finished, look at the " \

+	      "results in $(BUILDDIR)/doctest/output.txt."

diff --git a/catapult/third_party/gsutil/third_party/crcmod/docs/source/make.bat b/catapult/third_party/gsutil/third_party/crcmod/docs/source/make.bat
old mode 100755
new mode 100644
index f484c98..2a1895e
--- a/catapult/third_party/gsutil/third_party/crcmod/docs/source/make.bat
+++ b/catapult/third_party/gsutil/third_party/crcmod/docs/source/make.bat
@@ -1,137 +1,137 @@
-@ECHO OFF
-
-REM Command file for Sphinx documentation
-
-set SPHINXBUILD=sphinx-build
-set BUILDDIR=_build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
-if NOT "%PAPER%" == "" (
-	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
-)
-
-if "%1" == "" goto help
-
-if "%1" == "help" (
-	:help
-	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html       to make standalone HTML files
-	echo.  dirhtml    to make HTML files named index.html in directories
-	echo.  singlehtml to make a single large HTML file
-	echo.  pickle     to make pickle files
-	echo.  json       to make JSON files
-	echo.  htmlhelp   to make HTML files and a HTML help project
-	echo.  qthelp     to make HTML files and a qthelp project
-	echo.  devhelp    to make HTML files and a Devhelp project
-	echo.  epub       to make an epub
-	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  changes    to make an overview over all changed/added/deprecated items
-	echo.  linkcheck  to check all external links for integrity
-	echo.  doctest    to run all doctests embedded in the documentation if enabled
-	goto end
-)
-
-if "%1" == "clean" (
-	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
-	del /q /s %BUILDDIR%\*
-	goto end
-)
-
-if "%1" == "html" (
-	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
-	goto end
-)
-
-if "%1" == "dirhtml" (
-	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
-	goto end
-)
-
-if "%1" == "singlehtml" (
-	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
-	goto end
-)
-
-if "%1" == "pickle" (
-	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
-	echo.
-	echo.Build finished; now you can process the pickle files.
-	goto end
-)
-
-if "%1" == "json" (
-	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
-	echo.
-	echo.Build finished; now you can process the JSON files.
-	goto end
-)
-
-if "%1" == "htmlhelp" (
-	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
-	echo.
-	echo.Build finished; now you can run HTML Help Workshop with the ^
-.hhp project file in %BUILDDIR%/htmlhelp.
-	goto end
-)
-
-if "%1" == "qthelp" (
-	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
-	echo.
-	echo.Build finished; now you can run "qcollectiongenerator" with the ^
-.qhcp project file in %BUILDDIR%/qthelp, like this:
-	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\crcmod.qhcp
-	echo.To view the help file:
-	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\crcmod.ghc
-	goto end
-)
-
-if "%1" == "devhelp" (
-	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% _build/devhelp
-	echo.
-	echo.Build finished.
-	goto end
-)
-
-if "%1" == "epub" (
-	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
-	echo.
-	echo.Build finished. The epub file is in %BUILDDIR%/epub.
-	goto end
-)
-
-if "%1" == "latex" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	echo.
-	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "changes" (
-	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
-	echo.
-	echo.The overview file is in %BUILDDIR%/changes.
-	goto end
-)
-
-if "%1" == "linkcheck" (
-	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
-	echo.
-	echo.Link check complete; look for any errors in the above output ^
-or in %BUILDDIR%/linkcheck/output.txt.
-	goto end
-)
-
-if "%1" == "doctest" (
-	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
-	echo.
-	echo.Testing of doctests in the sources finished, look at the ^
-results in %BUILDDIR%/doctest/output.txt.
-	goto end
-)
-
-:end
+@ECHO OFF

+

+REM Command file for Sphinx documentation

+

+set SPHINXBUILD=sphinx-build

+set BUILDDIR=_build

+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .

+if NOT "%PAPER%" == "" (

+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%

+)

+

+if "%1" == "" goto help

+

+if "%1" == "help" (

+	:help

+	echo.Please use `make ^<target^>` where ^<target^> is one of

+	echo.  html       to make standalone HTML files

+	echo.  dirhtml    to make HTML files named index.html in directories

+	echo.  singlehtml to make a single large HTML file

+	echo.  pickle     to make pickle files

+	echo.  json       to make JSON files

+	echo.  htmlhelp   to make HTML files and a HTML help project

+	echo.  qthelp     to make HTML files and a qthelp project

+	echo.  devhelp    to make HTML files and a Devhelp project

+	echo.  epub       to make an epub

+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter

+	echo.  changes    to make an overview over all changed/added/deprecated items

+	echo.  linkcheck  to check all external links for integrity

+	echo.  doctest    to run all doctests embedded in the documentation if enabled

+	goto end

+)

+

+if "%1" == "clean" (

+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i

+	del /q /s %BUILDDIR%\*

+	goto end

+)

+

+if "%1" == "html" (

+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.

+	goto end

+)

+

+if "%1" == "dirhtml" (

+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.

+	goto end

+)

+

+if "%1" == "singlehtml" (

+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml

+	echo.

+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.

+	goto end

+)

+

+if "%1" == "pickle" (

+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle

+	echo.

+	echo.Build finished; now you can process the pickle files.

+	goto end

+)

+

+if "%1" == "json" (

+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json

+	echo.

+	echo.Build finished; now you can process the JSON files.

+	goto end

+)

+

+if "%1" == "htmlhelp" (

+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp

+	echo.

+	echo.Build finished; now you can run HTML Help Workshop with the ^

+.hhp project file in %BUILDDIR%/htmlhelp.

+	goto end

+)

+

+if "%1" == "qthelp" (

+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp

+	echo.

+	echo.Build finished; now you can run "qcollectiongenerator" with the ^

+.qhcp project file in %BUILDDIR%/qthelp, like this:

+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\crcmod.qhcp

+	echo.To view the help file:

+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\crcmod.ghc

+	goto end

+)

+

+if "%1" == "devhelp" (

+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% _build/devhelp

+	echo.

+	echo.Build finished.

+	goto end

+)

+

+if "%1" == "epub" (

+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub

+	echo.

+	echo.Build finished. The epub file is in %BUILDDIR%/epub.

+	goto end

+)

+

+if "%1" == "latex" (

+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex

+	echo.

+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.

+	goto end

+)

+

+if "%1" == "changes" (

+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes

+	echo.

+	echo.The overview file is in %BUILDDIR%/changes.

+	goto end

+)

+

+if "%1" == "linkcheck" (

+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck

+	echo.

+	echo.Link check complete; look for any errors in the above output ^

+or in %BUILDDIR%/linkcheck/output.txt.

+	goto end

+)

+

+if "%1" == "doctest" (

+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest

+	echo.

+	echo.Testing of doctests in the sources finished, look at the ^

+results in %BUILDDIR%/doctest/output.txt.

+	goto end

+)

+

+:end

diff --git a/catapult/third_party/gsutil/third_party/crcmod/setup.py b/catapult/third_party/gsutil/third_party/crcmod/setup.py
old mode 100644
new mode 100755
diff --git a/catapult/third_party/gsutil/third_party/httplib2/README.chromium b/catapult/third_party/gsutil/third_party/httplib2/README.chromium
deleted file mode 100644
index bfa3f3d..0000000
--- a/catapult/third_party/gsutil/third_party/httplib2/README.chromium
+++ /dev/null
@@ -1,15 +0,0 @@
-Name: httplib2
-Short Name: httplib2
-URL: https://github.com/jcgregorio/httplib2
-Version: 0.8
-Revision: 0197ec868a4fc638c08358b94200ffd6ddb1bf50
-License: MIT License
-
-Description:
-A comprehensive HTTP client library in Python.
-
-Local Modifications:
-ref.tex and ref/ folder is removed.
-
-Notes:
-Required by oauth2client library.
diff --git a/catapult/third_party/gsutil/third_party/httplib2/python2/httplib2test.py b/catapult/third_party/gsutil/third_party/httplib2/python2/httplib2test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/httplib2/python3/README b/catapult/third_party/gsutil/third_party/httplib2/python3/README
index 5e09bc5..f4b4409 100644
--- a/catapult/third_party/gsutil/third_party/httplib2/python3/README
+++ b/catapult/third_party/gsutil/third_party/httplib2/python3/README
@@ -1,68 +1,68 @@
-httplib2 for Python 3
-
-This directory contains a port of httplib2 to Python 3. As you may
-know, Python 3 is not backward-compatible with Python 2. The biggest
-change in Python 3 (that affects httplib2) is the distinction between
-bytes and strings.
-
-To successfully use http2lib for Python 3, you absolutely must
-understand the following sentence:
-
-** THE RESPONSE HEADERS ARE STRINGS, BUT THE CONTENT BODY IS BYTES **
-
-
-Example:
-
->>> import httplib2, pprint
->>> h = httplib2.Http(".cache")
->>> (resp_headers, content) = h.request("http://example.org/", "GET")
->>> pprint.pprint(resp_headers)
-{'accept-ranges': 'bytes',
- 'connection': 'close',
- 'content-length': '438',
- 'content-location': 'http://example.org/',
- 'content-type': 'text/html; charset=UTF-8',
- 'date': 'Fri, 29 May 2009 03:57:29 GMT',
- 'etag': '"b80f4-1b6-80bfd280"',
- 'last-modified': 'Tue, 15 Nov 2005 13:24:10 GMT',
- 'server': 'Apache/2.2.3 (CentOS)',
- 'status': '200'}
->>> type(content)
-<class 'bytes'>
->>> content[:49]
-b'<HTML>\r\n<HEAD>\r\n  <TITLE>Example Web Page</TITLE>'
-
-
-Further reading:
-
-  * http://diveintopython3.org/strings.html
-  * http://docs.python.org/3.0/whatsnew/3.0.html#text-vs-data-instead-of-unicode-vs-8-bit
-  * http://docs.python.org/3.0/howto/unicode.html
-
-
---------------------------------------------------------------------
-Httplib2 Software License
-
-Copyright (c) 2006 by Joe Gregorio
-Copyright (c) 2009 by Mark Pilgrim
-
-Permission is hereby granted, free of charge, to any person 
-obtaining a copy of this software and associated documentation 
-files (the "Software"), to deal in the Software without restriction, 
-including without limitation the rights to use, copy, modify, merge, 
-publish, distribute, sublicense, and/or sell copies of the Software, 
-and to permit persons to whom the Software is furnished to do so, 
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be 
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 
-BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 
-SOFTWARE.
-
+httplib2 for Python 3

+

+This directory contains a port of httplib2 to Python 3. As you may

+know, Python 3 is not backward-compatible with Python 2. The biggest

+change in Python 3 (that affects httplib2) is the distinction between

+bytes and strings.

+

+To successfully use http2lib for Python 3, you absolutely must

+understand the following sentence:

+

+** THE RESPONSE HEADERS ARE STRINGS, BUT THE CONTENT BODY IS BYTES **

+

+

+Example:

+

+>>> import httplib2, pprint

+>>> h = httplib2.Http(".cache")

+>>> (resp_headers, content) = h.request("http://example.org/", "GET")

+>>> pprint.pprint(resp_headers)

+{'accept-ranges': 'bytes',

+ 'connection': 'close',

+ 'content-length': '438',

+ 'content-location': 'http://example.org/',

+ 'content-type': 'text/html; charset=UTF-8',

+ 'date': 'Fri, 29 May 2009 03:57:29 GMT',

+ 'etag': '"b80f4-1b6-80bfd280"',

+ 'last-modified': 'Tue, 15 Nov 2005 13:24:10 GMT',

+ 'server': 'Apache/2.2.3 (CentOS)',

+ 'status': '200'}

+>>> type(content)

+<class 'bytes'>

+>>> content[:49]

+b'<HTML>\r\n<HEAD>\r\n  <TITLE>Example Web Page</TITLE>'

+

+

+Further reading:

+

+  * http://diveintopython3.org/strings.html

+  * http://docs.python.org/3.0/whatsnew/3.0.html#text-vs-data-instead-of-unicode-vs-8-bit

+  * http://docs.python.org/3.0/howto/unicode.html

+

+

+--------------------------------------------------------------------

+Httplib2 Software License

+

+Copyright (c) 2006 by Joe Gregorio

+Copyright (c) 2009 by Mark Pilgrim

+

+Permission is hereby granted, free of charge, to any person 

+obtaining a copy of this software and associated documentation 

+files (the "Software"), to deal in the Software without restriction, 

+including without limitation the rights to use, copy, modify, merge, 

+publish, distribute, sublicense, and/or sell copies of the Software, 

+and to permit persons to whom the Software is furnished to do so, 

+subject to the following conditions:

+

+The above copyright notice and this permission notice shall be 

+included in all copies or substantial portions of the Software.

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 

+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 

+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 

+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 

+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 

+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 

+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 

+SOFTWARE.

+

diff --git a/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/iri2uri.py b/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/iri2uri.py
index 711377c..98985f8 100644
--- a/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/iri2uri.py
+++ b/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/iri2uri.py
@@ -1,110 +1,110 @@
-"""
-iri2uri
-
-Converts an IRI to a URI.
-
-"""
-__author__ = "Joe Gregorio (joe@bitworking.org)"
-__copyright__ = "Copyright 2006, Joe Gregorio"
-__contributors__ = []
-__version__ = "1.0.0"
-__license__ = "MIT"
-__history__ = """
-"""
-
-import urllib.parse
-
-
-# Convert an IRI to a URI following the rules in RFC 3987
-#
-# The characters we need to enocde and escape are defined in the spec:
-#
-# iprivate =  %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
-# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
-#         / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
-#         / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
-#         / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
-#         / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
-#         / %xD0000-DFFFD / %xE1000-EFFFD
-
-escape_range = [
-    (0xA0, 0xD7FF),
-    (0xE000, 0xF8FF),
-    (0xF900, 0xFDCF),
-    (0xFDF0, 0xFFEF),
-    (0x10000, 0x1FFFD),
-    (0x20000, 0x2FFFD),
-    (0x30000, 0x3FFFD),
-    (0x40000, 0x4FFFD),
-    (0x50000, 0x5FFFD),
-    (0x60000, 0x6FFFD),
-    (0x70000, 0x7FFFD),
-    (0x80000, 0x8FFFD),
-    (0x90000, 0x9FFFD),
-    (0xA0000, 0xAFFFD),
-    (0xB0000, 0xBFFFD),
-    (0xC0000, 0xCFFFD),
-    (0xD0000, 0xDFFFD),
-    (0xE1000, 0xEFFFD),
-    (0xF0000, 0xFFFFD),
-    (0x100000, 0x10FFFD),
-]
-
-def encode(c):
-    retval = c
-    i = ord(c)
-    for low, high in escape_range:
-        if i < low:
-            break
-        if i >= low and i <= high:
-            retval = "".join(["%%%2X" % o for o in c.encode('utf-8')])
-            break
-    return retval
-
-
-def iri2uri(uri):
-    """Convert an IRI to a URI. Note that IRIs must be
-    passed in a unicode strings. That is, do not utf-8 encode
-    the IRI before passing it into the function."""
-    if isinstance(uri ,str):
-        (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)
-        authority = authority.encode('idna').decode('utf-8')
-        # For each character in 'ucschar' or 'iprivate'
-        #  1. encode as utf-8
-        #  2. then %-encode each octet of that utf-8
-        uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))
-        uri = "".join([encode(c) for c in uri])
-    return uri
-
-if __name__ == "__main__":
-    import unittest
-
-    class Test(unittest.TestCase):
-
-        def test_uris(self):
-            """Test that URIs are invariant under the transformation."""
-            invariant = [
-                "ftp://ftp.is.co.za/rfc/rfc1808.txt",
-                "http://www.ietf.org/rfc/rfc2396.txt",
-                "ldap://[2001:db8::7]/c=GB?objectClass?one",
-                "mailto:John.Doe@example.com",
-                "news:comp.infosystems.www.servers.unix",
-                "tel:+1-816-555-1212",
-                "telnet://192.0.2.16:80/",
-                "urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
-            for uri in invariant:
-                self.assertEqual(uri, iri2uri(uri))
-
-        def test_iri(self):
-            """ Test that the right type of escaping is done for each part of the URI."""
-            self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri("http://\N{COMET}.com/\N{COMET}"))
-            self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri("http://bitworking.org/?fred=\N{COMET}"))
-            self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri("http://bitworking.org/#\N{COMET}"))
-            self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
-            self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
-            self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
-            self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
-
-    unittest.main()
-
-
+"""

+iri2uri

+

+Converts an IRI to a URI.

+

+"""

+__author__ = "Joe Gregorio (joe@bitworking.org)"

+__copyright__ = "Copyright 2006, Joe Gregorio"

+__contributors__ = []

+__version__ = "1.0.0"

+__license__ = "MIT"

+__history__ = """

+"""

+

+import urllib.parse

+

+

+# Convert an IRI to a URI following the rules in RFC 3987

+#

+# The characters we need to enocde and escape are defined in the spec:

+#

+# iprivate =  %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD

+# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF

+#         / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD

+#         / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD

+#         / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD

+#         / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD

+#         / %xD0000-DFFFD / %xE1000-EFFFD

+

+escape_range = [

+    (0xA0, 0xD7FF),

+    (0xE000, 0xF8FF),

+    (0xF900, 0xFDCF),

+    (0xFDF0, 0xFFEF),

+    (0x10000, 0x1FFFD),

+    (0x20000, 0x2FFFD),

+    (0x30000, 0x3FFFD),

+    (0x40000, 0x4FFFD),

+    (0x50000, 0x5FFFD),

+    (0x60000, 0x6FFFD),

+    (0x70000, 0x7FFFD),

+    (0x80000, 0x8FFFD),

+    (0x90000, 0x9FFFD),

+    (0xA0000, 0xAFFFD),

+    (0xB0000, 0xBFFFD),

+    (0xC0000, 0xCFFFD),

+    (0xD0000, 0xDFFFD),

+    (0xE1000, 0xEFFFD),

+    (0xF0000, 0xFFFFD),

+    (0x100000, 0x10FFFD),

+]

+

+def encode(c):

+    retval = c

+    i = ord(c)

+    for low, high in escape_range:

+        if i < low:

+            break

+        if i >= low and i <= high:

+            retval = "".join(["%%%2X" % o for o in c.encode('utf-8')])

+            break

+    return retval

+

+

+def iri2uri(uri):

+    """Convert an IRI to a URI. Note that IRIs must be

+    passed in a unicode strings. That is, do not utf-8 encode

+    the IRI before passing it into the function."""

+    if isinstance(uri ,str):

+        (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)

+        authority = authority.encode('idna').decode('utf-8')

+        # For each character in 'ucschar' or 'iprivate'

+        #  1. encode as utf-8

+        #  2. then %-encode each octet of that utf-8

+        uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))

+        uri = "".join([encode(c) for c in uri])

+    return uri

+

+if __name__ == "__main__":

+    import unittest

+

+    class Test(unittest.TestCase):

+

+        def test_uris(self):

+            """Test that URIs are invariant under the transformation."""

+            invariant = [

+                "ftp://ftp.is.co.za/rfc/rfc1808.txt",

+                "http://www.ietf.org/rfc/rfc2396.txt",

+                "ldap://[2001:db8::7]/c=GB?objectClass?one",

+                "mailto:John.Doe@example.com",

+                "news:comp.infosystems.www.servers.unix",

+                "tel:+1-816-555-1212",

+                "telnet://192.0.2.16:80/",

+                "urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]

+            for uri in invariant:

+                self.assertEqual(uri, iri2uri(uri))

+

+        def test_iri(self):

+            """ Test that the right type of escaping is done for each part of the URI."""

+            self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri("http://\N{COMET}.com/\N{COMET}"))

+            self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri("http://bitworking.org/?fred=\N{COMET}"))

+            self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri("http://bitworking.org/#\N{COMET}"))

+            self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))

+            self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))

+            self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))

+            self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))

+

+    unittest.main()

+

+

diff --git a/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2test.py b/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2test.py
old mode 100755
new mode 100644
index 8884a5a..480d28e
--- a/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2test.py
+++ b/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2test.py
@@ -1,1580 +1,1580 @@
-#!/usr/bin/env python3
-"""
-httplib2test
-
-A set of unit tests for httplib2.py.
-
-Requires Python 3.0 or later
-"""
-
-__author__ = "Joe Gregorio (joe@bitworking.org)"
-__copyright__ = "Copyright 2006, Joe Gregorio"
-__contributors__ = ["Mark Pilgrim"]
-__license__ = "MIT"
-__history__ = """ """
-__version__ = "0.2 ($Rev: 118 $)"
-
-import base64
-import http.client
-import httplib2
-import io
-import os
-import pickle
-import socket
-import ssl
-import sys
-import time
-import unittest
-import urllib.parse
-
-# The test resources base uri
-base = 'http://bitworking.org/projects/httplib2/test/'
-#base = 'http://localhost/projects/httplib2/test/'
-cacheDirName = ".cache"
-
-
-class CredentialsTest(unittest.TestCase):
-    def test(self):
-        c = httplib2.Credentials()
-        c.add("joe", "password")
-        self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])
-        self.assertEqual(("joe", "password"), list(c.iter(""))[0])
-        c.add("fred", "password2", "wellformedweb.org")
-        self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])
-        self.assertEqual(1, len(list(c.iter("bitworking.org"))))
-        self.assertEqual(2, len(list(c.iter("wellformedweb.org"))))
-        self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))
-        c.clear()
-        self.assertEqual(0, len(list(c.iter("bitworking.org"))))
-        c.add("fred", "password2", "wellformedweb.org")
-        self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))
-        self.assertEqual(0, len(list(c.iter("bitworking.org"))))
-        self.assertEqual(0, len(list(c.iter(""))))
-
-
-class ParserTest(unittest.TestCase):
-    def testFromStd66(self):
-        self.assertEqual( ('http', 'example.com', '', None, None ), httplib2.parse_uri("http://example.com"))
-        self.assertEqual( ('https', 'example.com', '', None, None ), httplib2.parse_uri("https://example.com"))
-        self.assertEqual( ('https', 'example.com:8080', '', None, None ), httplib2.parse_uri("https://example.com:8080"))
-        self.assertEqual( ('http', 'example.com', '/', None, None ), httplib2.parse_uri("http://example.com/"))
-        self.assertEqual( ('http', 'example.com', '/path', None, None ), httplib2.parse_uri("http://example.com/path"))
-        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', None ), httplib2.parse_uri("http://example.com/path?a=1&b=2"))
-        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))
-        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))
-
-
-class UrlNormTest(unittest.TestCase):
-    def test(self):
-        self.assertEqual( "http://example.org/", httplib2.urlnorm("http://example.org")[-1])
-        self.assertEqual( "http://example.org/", httplib2.urlnorm("http://EXAMple.org")[-1])
-        self.assertEqual( "http://example.org/?=b", httplib2.urlnorm("http://EXAMple.org?=b")[-1])
-        self.assertEqual( "http://example.org/mypath?a=b", httplib2.urlnorm("http://EXAMple.org/mypath?a=b")[-1])
-        self.assertEqual( "http://localhost:80/", httplib2.urlnorm("http://localhost:80")[-1])
-        self.assertEqual( httplib2.urlnorm("http://localhost:80/"), httplib2.urlnorm("HTTP://LOCALHOST:80"))
-        try:
-            httplib2.urlnorm("/")
-            self.fail("Non-absolute URIs should raise an exception")
-        except httplib2.RelativeURIError:
-            pass
-
-class UrlSafenameTest(unittest.TestCase):
-    def test(self):
-        # Test that different URIs end up generating different safe names
-        self.assertEqual( "example.org,fred,a=b,58489f63a7a83c3b7794a6a398ee8b1f", httplib2.safename("http://example.org/fred/?a=b"))
-        self.assertEqual( "example.org,fred,a=b,8c5946d56fec453071f43329ff0be46b", httplib2.safename("http://example.org/fred?/a=b"))
-        self.assertEqual( "www.example.org,fred,a=b,499c44b8d844a011b67ea2c015116968", httplib2.safename("http://www.example.org/fred?/a=b"))
-        self.assertEqual( httplib2.safename(httplib2.urlnorm("http://www")[-1]), httplib2.safename(httplib2.urlnorm("http://WWW")[-1]))
-        self.assertEqual( "www.example.org,fred,a=b,692e843a333484ce0095b070497ab45d", httplib2.safename("https://www.example.org/fred?/a=b"))
-        self.assertNotEqual( httplib2.safename("http://www"), httplib2.safename("https://www"))
-        # Test the max length limits
-        uri = "http://" + ("w" * 200) + ".org"
-        uri2 = "http://" + ("w" * 201) + ".org"
-        self.assertNotEqual( httplib2.safename(uri2), httplib2.safename(uri))
-        # Max length should be 200 + 1 (",") + 32
-        self.assertEqual(233, len(httplib2.safename(uri2)))
-        self.assertEqual(233, len(httplib2.safename(uri)))
-        # Unicode
-        if sys.version_info >= (2,3):
-            self.assertEqual( "xn--http,-4y1d.org,fred,a=b,579924c35db315e5a32e3d9963388193", httplib2.safename("http://\u2304.org/fred/?a=b"))
-
-class _MyResponse(io.BytesIO):
-    def __init__(self, body, **kwargs):
-        io.BytesIO.__init__(self, body)
-        self.headers = kwargs
-
-    def items(self):
-        return self.headers.items()
-
-    def iteritems(self):
-        return iter(self.headers.items())
-
-
-class _MyHTTPConnection(object):
-    "This class is just a mock of httplib.HTTPConnection used for testing"
-
-    def __init__(self, host, port=None, key_file=None, cert_file=None,
-                 strict=None, timeout=None, proxy_info=None):
-        self.host = host
-        self.port = port
-        self.timeout = timeout
-        self.log = ""
-        self.sock = None
-
-    def set_debuglevel(self, level):
-        pass
-
-    def connect(self):
-        "Connect to a host on a given port."
-        pass
-
-    def close(self):
-        pass
-
-    def request(self, method, request_uri, body, headers):
-        pass
-
-    def getresponse(self):
-        return _MyResponse(b"the body", status="200")
-
-
-class HttpTest(unittest.TestCase):
-    def setUp(self):
-        if os.path.exists(cacheDirName):
-            [os.remove(os.path.join(cacheDirName, file)) for file in os.listdir(cacheDirName)]
-        self.http = httplib2.Http(cacheDirName)
-        self.http.clear_credentials()
-
-    def testIPv6NoSSL(self):
-        try:
-          self.http.request("http://[::1]/")
-        except socket.gaierror:
-          self.fail("should get the address family right for IPv6")
-        except socket.error:
-          # Even if IPv6 isn't installed on a machine it should just raise socket.error
-          pass
-
-    def testIPv6SSL(self):
-        try:
-          self.http.request("https://[::1]/")
-        except socket.gaierror:
-          self.fail("should get the address family right for IPv6")
-        except socket.error:
-          # Even if IPv6 isn't installed on a machine it should just raise socket.error
-          pass
-
-    def testConnectionType(self):
-        self.http.force_exception_to_status_code = False
-        response, content = self.http.request("http://bitworking.org", connection_type=_MyHTTPConnection)
-        self.assertEqual(response['content-location'], "http://bitworking.org")
-        self.assertEqual(content, b"the body")
-
-    def testGetUnknownServer(self):
-        self.http.force_exception_to_status_code = False
-        try:
-            self.http.request("http://fred.bitworking.org/")
-            self.fail("An httplib2.ServerNotFoundError Exception must be thrown on an unresolvable server.")
-        except httplib2.ServerNotFoundError:
-            pass
-
-        # Now test with exceptions turned off
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request("http://fred.bitworking.org/")
-        self.assertEqual(response['content-type'], 'text/plain')
-        self.assertTrue(content.startswith(b"Unable to find"))
-        self.assertEqual(response.status, 400)
-
-    def testGetConnectionRefused(self):
-        self.http.force_exception_to_status_code = False
-        try:
-            self.http.request("http://localhost:7777/")
-            self.fail("An socket.error exception must be thrown on Connection Refused.")
-        except socket.error:
-            pass
-
-        # Now test with exceptions turned off
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request("http://localhost:7777/")
-        self.assertEqual(response['content-type'], 'text/plain')
-        self.assertTrue(b"Connection refused" in content)
-        self.assertEqual(response.status, 400)
-
-    def testGetIRI(self):
-        if sys.version_info >= (2,3):
-            uri = urllib.parse.urljoin(base, "reflector/reflector.cgi?d=\N{CYRILLIC CAPITAL LETTER DJE}")
-            (response, content) = self.http.request(uri, "GET")
-            d = self.reflector(content)
-            self.assertTrue('QUERY_STRING' in d)
-            self.assertTrue(d['QUERY_STRING'].find('%D0%82') > 0)
-
-    def testGetIsDefaultMethod(self):
-        # Test that GET is the default method
-        uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi")
-        (response, content) = self.http.request(uri)
-        self.assertEqual(response['x-method'], "GET")
-
-    def testDifferentMethods(self):
-        # Test that all methods can be used
-        uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi")
-        for method in ["GET", "PUT", "DELETE", "POST"]:
-            (response, content) = self.http.request(uri, method, body=b" ")
-            self.assertEqual(response['x-method'], method)
-
-    def testHeadRead(self):
-        # Test that we don't try to read the response of a HEAD request
-        # since httplib blocks response.read() for HEAD requests.
-        # Oddly enough this doesn't appear as a problem when doing HEAD requests
-        # against Apache servers.
-        uri = "http://www.google.com/"
-        (response, content) = self.http.request(uri, "HEAD")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"")
-
-    def testGetNoCache(self):
-        # Test that can do a GET w/o the cache turned on.
-        http = httplib2.Http()
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.previous, None)
-
-    def testGetOnlyIfCachedCacheHit(self):
-        # Test that can do a GET with cache and 'only-if-cached'
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = self.http.request(uri, "GET")
-        (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
-        self.assertEqual(response.fromcache, True)
-        self.assertEqual(response.status, 200)
-
-    def testGetOnlyIfCachedCacheMiss(self):
-        # Test that can do a GET with no cache with 'only-if-cached'
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
-        self.assertEqual(response.fromcache, False)
-        self.assertEqual(response.status, 504)
-
-    def testGetOnlyIfCachedNoCacheAtAll(self):
-        # Test that can do a GET with no cache with 'only-if-cached'
-        # Of course, there might be an intermediary beyond us
-        # that responds to the 'only-if-cached', so this
-        # test can't really be guaranteed to pass.
-        http = httplib2.Http()
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})
-        self.assertEqual(response.fromcache, False)
-        self.assertEqual(response.status, 504)
-
-    def testUserAgent(self):
-        # Test that we provide a default user-agent
-        uri = urllib.parse.urljoin(base, "user-agent/test.cgi")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertTrue(content.startswith(b"Python-httplib2/"))
-
-    def testUserAgentNonDefault(self):
-        # Test that the default user-agent can be over-ridden
-
-        uri = urllib.parse.urljoin(base, "user-agent/test.cgi")
-        (response, content) = self.http.request(uri, "GET", headers={'User-Agent': 'fred/1.0'})
-        self.assertEqual(response.status, 200)
-        self.assertTrue(content.startswith(b"fred/1.0"))
-
-    def testGet300WithLocation(self):
-        # Test the we automatically follow 300 redirects if a Location: header is provided
-        uri = urllib.parse.urljoin(base, "300/with-location-header.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 300)
-        self.assertEqual(response.previous.fromcache, False)
-
-        # Confirm that the intermediate 300 is not cached
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 300)
-        self.assertEqual(response.previous.fromcache, False)
-
-    def testGet300WithLocationNoRedirect(self):
-        # Test the we automatically follow 300 redirects if a Location: header is provided
-        self.http.follow_redirects = False
-        uri = urllib.parse.urljoin(base, "300/with-location-header.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 300)
-
-    def testGet300WithoutLocation(self):
-        # Not giving a Location: header in a 300 response is acceptable
-        # In which case we just return the 300 response
-        uri = urllib.parse.urljoin(base, "300/without-location-header.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 300)
-        self.assertTrue(response['content-type'].startswith("text/html"))
-        self.assertEqual(response.previous, None)
-
-    def testGet301(self):
-        # Test that we automatically follow 301 redirects
-        # and that we cache the 301 response
-        uri = urllib.parse.urljoin(base, "301/onestep.asis")
-        destination = urllib.parse.urljoin(base, "302/final-destination.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertTrue('content-location' in response)
-        self.assertEqual(response['content-location'], destination)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 301)
-        self.assertEqual(response.previous.fromcache, False)
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response['content-location'], destination)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 301)
-        self.assertEqual(response.previous.fromcache, True)
-
-    def testHead301(self):
-        # Test that we automatically follow 301 redirects
-        uri = urllib.parse.urljoin(base, "301/onestep.asis")
-        (response, content) = self.http.request(uri, "HEAD")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.previous.status, 301)
-        self.assertEqual(response.previous.fromcache, False)
-
-    def testGet301NoRedirect(self):
-        # Test that we automatically follow 301 redirects
-        # and that we cache the 301 response
-        self.http.follow_redirects = False
-        uri = urllib.parse.urljoin(base, "301/onestep.asis")
-        destination = urllib.parse.urljoin(base, "302/final-destination.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 301)
-
-
-    def testGet302(self):
-        # Test that we automatically follow 302 redirects
-        # and that we DO NOT cache the 302 response
-        uri = urllib.parse.urljoin(base, "302/onestep.asis")
-        destination = urllib.parse.urljoin(base, "302/final-destination.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response['content-location'], destination)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 302)
-        self.assertEqual(response.previous.fromcache, False)
-
-        uri = urllib.parse.urljoin(base, "302/onestep.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        self.assertEqual(response['content-location'], destination)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 302)
-        self.assertEqual(response.previous.fromcache, False)
-        self.assertEqual(response.previous['content-location'], uri)
-
-        uri = urllib.parse.urljoin(base, "302/twostep.asis")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 302)
-        self.assertEqual(response.previous.fromcache, False)
-
-    def testGet302RedirectionLimit(self):
-        # Test that we can set a lower redirection limit
-        # and that we raise an exception when we exceed
-        # that limit.
-        self.http.force_exception_to_status_code = False
-
-        uri = urllib.parse.urljoin(base, "302/twostep.asis")
-        try:
-            (response, content) = self.http.request(uri, "GET", redirections = 1)
-            self.fail("This should not happen")
-        except httplib2.RedirectLimit:
-            pass
-        except Exception as e:
-            self.fail("Threw wrong kind of exception ")
-
-        # Re-run the test with out the exceptions
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request(uri, "GET", redirections = 1)
-        self.assertEqual(response.status, 500)
-        self.assertTrue(response.reason.startswith("Redirected more"))
-        self.assertEqual("302", response['status'])
-        self.assertTrue(content.startswith(b"<html>"))
-        self.assertTrue(response.previous != None)
-
-    def testGet302NoLocation(self):
-        # Test that we throw an exception when we get
-        # a 302 with no Location: header.
-        self.http.force_exception_to_status_code = False
-        uri = urllib.parse.urljoin(base, "302/no-location.asis")
-        try:
-            (response, content) = self.http.request(uri, "GET")
-            self.fail("Should never reach here")
-        except httplib2.RedirectMissingLocation:
-            pass
-        except Exception as e:
-            self.fail("Threw wrong kind of exception ")
-
-        # Re-run the test with out the exceptions
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 500)
-        self.assertTrue(response.reason.startswith("Redirected but"))
-        self.assertEqual("302", response['status'])
-        self.assertTrue(content.startswith(b"This is content"))
-
-    def testGet301ViaHttps(self):
-        # Google always redirects to http://google.com
-        (response, content) = self.http.request("https://code.google.com/apis/", "GET")
-        self.assertEqual(200, response.status)
-        self.assertEqual(301, response.previous.status)
-
-    def testGetViaHttps(self):
-        # Test that we can handle HTTPS
-        (response, content) = self.http.request("https://google.com/adsense/", "GET")
-        self.assertEqual(200, response.status)
-
-    def testGetViaHttpsSpecViolationOnLocation(self):
-        # Test that we follow redirects through HTTPS
-        # even if they violate the spec by including
-        # a relative Location: header instead of an
-        # absolute one.
-        (response, content) = self.http.request("https://google.com/adsense", "GET")
-        self.assertEqual(200, response.status)
-        self.assertNotEqual(None, response.previous)
-
-
-    def testGetViaHttpsKeyCert(self):
-        #  At this point I can only test
-        #  that the key and cert files are passed in
-        #  correctly to httplib. It would be nice to have
-        #  a real https endpoint to test against.
-        http = httplib2.Http(timeout=2)
-
-        http.add_certificate("akeyfile", "acertfile", "bitworking.org")
-        try:
-          (response, content) = http.request("https://bitworking.org", "GET")
-        except AttributeError:
-          self.assertEqual(http.connections["https:bitworking.org"].key_file, "akeyfile")
-          self.assertEqual(http.connections["https:bitworking.org"].cert_file, "acertfile")
-        except IOError:
-          # Skip on 3.2
-          pass
-
-        try:
-            (response, content) = http.request("https://notthere.bitworking.org", "GET")
-        except httplib2.ServerNotFoundError:
-          self.assertEqual(http.connections["https:notthere.bitworking.org"].key_file, None)
-          self.assertEqual(http.connections["https:notthere.bitworking.org"].cert_file, None)
-        except IOError:
-          # Skip on 3.2
-          pass
-
-    def testSslCertValidation(self):
-          # Test that we get an ssl.SSLError when specifying a non-existent CA
-          # certs file.
-          http = httplib2.Http(ca_certs='/nosuchfile')
-          self.assertRaises(IOError,
-                  http.request, "https://www.google.com/", "GET")
-
-          # Test that we get a SSLHandshakeError if we try to access
-          # https://www.google.com, using a CA cert file that doesn't contain
-          # the CA Gogole uses (i.e., simulating a cert that's not signed by a
-          # trusted CA).
-          other_ca_certs = os.path.join(
-                  os.path.dirname(os.path.abspath(httplib2.__file__ )),
-                  "test", "other_cacerts.txt")
-          http = httplib2.Http(ca_certs=other_ca_certs)
-          self.assertRaises(ssl.SSLError,
-            http.request,"https://www.google.com/", "GET")
-
-    def testSniHostnameValidation(self):
-        self.http.request("https://google.com/", method="GET")
-
-    def testGet303(self):
-        # Do a follow-up GET on a Location: header
-        # returned from a POST that gave a 303.
-        uri = urllib.parse.urljoin(base, "303/303.cgi")
-        (response, content) = self.http.request(uri, "POST", " ")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 303)
-
-    def testGet303NoRedirect(self):
-        # Do a follow-up GET on a Location: header
-        # returned from a POST that gave a 303.
-        self.http.follow_redirects = False
-        uri = urllib.parse.urljoin(base, "303/303.cgi")
-        (response, content) = self.http.request(uri, "POST", " ")
-        self.assertEqual(response.status, 303)
-
-    def test303ForDifferentMethods(self):
-        # Test that all methods can be used
-        uri = urllib.parse.urljoin(base, "303/redirect-to-reflector.cgi")
-        for (method, method_on_303) in [("PUT", "GET"), ("DELETE", "GET"), ("POST", "GET"), ("GET", "GET"), ("HEAD", "GET")]:
-            (response, content) = self.http.request(uri, method, body=b" ")
-            self.assertEqual(response['x-method'], method_on_303)
-
-    def testGet304(self):
-        # Test that we use ETags properly to validate our cache
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertNotEqual(response['etag'], "")
-
-        (response, content) = self.http.request(uri, "GET")
-        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'must-revalidate'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-
-        cache_file_name = os.path.join(cacheDirName, httplib2.safename(httplib2.urlnorm(uri)[-1]))
-        f = open(cache_file_name, "r")
-        status_line = f.readline()
-        f.close()
-
-        self.assertTrue(status_line.startswith("status:"))
-
-        (response, content) = self.http.request(uri, "HEAD")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-
-        (response, content) = self.http.request(uri, "GET", headers = {'range': 'bytes=0-0'})
-        self.assertEqual(response.status, 206)
-        self.assertEqual(response.fromcache, False)
-
-    def testGetIgnoreEtag(self):
-        # Test that we can forcibly ignore ETags
-        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertNotEqual(response['etag'], "")
-
-        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
-        d = self.reflector(content)
-        self.assertTrue('HTTP_IF_NONE_MATCH' in d)
-
-        self.http.ignore_etag = True
-        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
-        d = self.reflector(content)
-        self.assertEqual(response.fromcache, False)
-        self.assertFalse('HTTP_IF_NONE_MATCH' in d)
-
-    def testOverrideEtag(self):
-        # Test that we can forcibly ignore ETags
-        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertNotEqual(response['etag'], "")
-
-        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})
-        d = self.reflector(content)
-        self.assertTrue('HTTP_IF_NONE_MATCH' in d)
-        self.assertNotEqual(d['HTTP_IF_NONE_MATCH'], "fred")
-
-        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0', 'if-none-match': 'fred'})
-        d = self.reflector(content)
-        self.assertTrue('HTTP_IF_NONE_MATCH' in d)
-        self.assertEqual(d['HTTP_IF_NONE_MATCH'], "fred")
-
-#MAP-commented this out because it consistently fails
-#    def testGet304EndToEnd(self):
-#       # Test that end to end headers get overwritten in the cache
-#        uri = urllib.parse.urljoin(base, "304/end2end.cgi")
-#        (response, content) = self.http.request(uri, "GET")
-#        self.assertNotEqual(response['etag'], "")
-#        old_date = response['date']
-#        time.sleep(2)
-#
-#        (response, content) = self.http.request(uri, "GET", headers = {'Cache-Control': 'max-age=0'})
-#        # The response should be from the cache, but the Date: header should be updated.
-#        new_date = response['date']
-#        self.assertNotEqual(new_date, old_date)
-#        self.assertEqual(response.status, 200)
-#        self.assertEqual(response.fromcache, True)
-
-    def testGet304LastModified(self):
-        # Test that we can still handle a 304
-        # by only using the last-modified cache validator.
-        uri = urllib.parse.urljoin(base, "304/last-modified-only/last-modified-only.txt")
-        (response, content) = self.http.request(uri, "GET")
-
-        self.assertNotEqual(response['last-modified'], "")
-        (response, content) = self.http.request(uri, "GET")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-
-    def testGet307(self):
-        # Test that we do follow 307 redirects but
-        # do not cache the 307
-        uri = urllib.parse.urljoin(base, "307/onestep.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 307)
-        self.assertEqual(response.previous.fromcache, False)
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        self.assertEqual(content, b"This is the final destination.\n")
-        self.assertEqual(response.previous.status, 307)
-        self.assertEqual(response.previous.fromcache, False)
-
-    def testGet410(self):
-        # Test that we pass 410's through
-        uri = urllib.parse.urljoin(base, "410/410.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 410)
-
-    def testVaryHeaderSimple(self):
-        """
-        RFC 2616 13.6
-        When the cache receives a subsequent request whose Request-URI
-        specifies one or more cache entries including a Vary header field,
-        the cache MUST NOT use such a cache entry to construct a response
-        to the new request unless all of the selecting request-headers
-        present in the new request match the corresponding stored
-        request-headers in the original request.
-        """
-        # test that the vary header is sent
-        uri = urllib.parse.urljoin(base, "vary/accept.asis")
-        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
-        self.assertEqual(response.status, 200)
-        self.assertTrue('vary' in response)
-
-        # get the resource again, from the cache since accept header in this
-        # request is the same as the request
-        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True, msg="Should be from cache")
-
-        # get the resource again, not from cache since Accept headers does not match
-        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False, msg="Should not be from cache")
-
-        # get the resource again, without any Accept header, so again no match
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False, msg="Should not be from cache")
-
-    def testNoVary(self):
-        pass
-        # when there is no vary, a different Accept header (e.g.) should not
-        # impact if the cache is used
-        # test that the vary header is not sent
-        # uri = urllib.parse.urljoin(base, "vary/no-vary.asis")
-        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
-        # self.assertEqual(response.status, 200)
-        # self.assertFalse('vary' in response)
-        #
-        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
-        # self.assertEqual(response.status, 200)
-        # self.assertEqual(response.fromcache, True, msg="Should be from cache")
-        #
-        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})
-        # self.assertEqual(response.status, 200)
-        # self.assertEqual(response.fromcache, True, msg="Should be from cache")
-
-    def testVaryHeaderDouble(self):
-        uri = urllib.parse.urljoin(base, "vary/accept-double.asis")
-        (response, content) = self.http.request(uri, "GET", headers={
-            'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})
-        self.assertEqual(response.status, 200)
-        self.assertTrue('vary' in response)
-
-        # we are from cache
-        (response, content) = self.http.request(uri, "GET", headers={
-            'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})
-        self.assertEqual(response.fromcache, True, msg="Should be from cache")
-
-        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-        # get the resource again, not from cache, varied headers don't match exact
-        (response, content) = self.http.request(uri, "GET", headers={'Accept-Language': 'da'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False, msg="Should not be from cache")
-
-    def testVaryUnusedHeader(self):
-        # A header's value is not considered to vary if it's not used at all.
-        uri = urllib.parse.urljoin(base, "vary/unused-header.asis")
-        (response, content) = self.http.request(uri, "GET", headers={
-            'Accept': 'text/plain'})
-        self.assertEqual(response.status, 200)
-        self.assertTrue('vary' in response)
-
-        # we are from cache
-        (response, content) = self.http.request(uri, "GET", headers={
-            'Accept': 'text/plain',})
-        self.assertEqual(response.fromcache, True, msg="Should be from cache")
-
-    def testHeadGZip(self):
-        # Test that we don't try to decompress a HEAD response
-        uri = urllib.parse.urljoin(base, "gzip/final-destination.txt")
-        (response, content) = self.http.request(uri, "HEAD")
-        self.assertEqual(response.status, 200)
-        self.assertNotEqual(int(response['content-length']), 0)
-        self.assertEqual(content, b"")
-
-    def testGetGZip(self):
-        # Test that we support gzip compression
-        uri = urllib.parse.urljoin(base, "gzip/final-destination.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertFalse('content-encoding' in response)
-        self.assertTrue('-content-encoding' in response)
-        self.assertEqual(int(response['content-length']), len(b"This is the final destination.\n"))
-        self.assertEqual(content, b"This is the final destination.\n")
-
-    def testPostAndGZipResponse(self):
-        uri = urllib.parse.urljoin(base, "gzip/post.cgi")
-        (response, content) = self.http.request(uri, "POST", body=" ")
-        self.assertEqual(response.status, 200)
-        self.assertFalse('content-encoding' in response)
-        self.assertTrue('-content-encoding' in response)
-
-    def testGetGZipFailure(self):
-        # Test that we raise a good exception when the gzip fails
-        self.http.force_exception_to_status_code = False
-        uri = urllib.parse.urljoin(base, "gzip/failed-compression.asis")
-        try:
-            (response, content) = self.http.request(uri, "GET")
-            self.fail("Should never reach here")
-        except httplib2.FailedToDecompressContent:
-            pass
-        except Exception:
-            self.fail("Threw wrong kind of exception")
-
-        # Re-run the test with out the exceptions
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 500)
-        self.assertTrue(response.reason.startswith("Content purported"))
-
-    def testIndividualTimeout(self):
-        uri = urllib.parse.urljoin(base, "timeout/timeout.cgi")
-        http = httplib2.Http(timeout=1)
-        http.force_exception_to_status_code = True
-
-        (response, content) = http.request(uri)
-        self.assertEqual(response.status, 408)
-        self.assertTrue(response.reason.startswith("Request Timeout"))
-        self.assertTrue(content.startswith(b"Request Timeout"))
-
-
-    def testGetDeflate(self):
-        # Test that we support deflate compression
-        uri = urllib.parse.urljoin(base, "deflate/deflated.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertFalse('content-encoding' in response)
-        self.assertEqual(int(response['content-length']), len("This is the final destination."))
-        self.assertEqual(content, b"This is the final destination.")
-
-    def testGetDeflateFailure(self):
-        # Test that we raise a good exception when the deflate fails
-        self.http.force_exception_to_status_code = False
-
-        uri = urllib.parse.urljoin(base, "deflate/failed-compression.asis")
-        try:
-            (response, content) = self.http.request(uri, "GET")
-            self.fail("Should never reach here")
-        except httplib2.FailedToDecompressContent:
-            pass
-        except Exception:
-            self.fail("Threw wrong kind of exception")
-
-        # Re-run the test with out the exceptions
-        self.http.force_exception_to_status_code = True
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 500)
-        self.assertTrue(response.reason.startswith("Content purported"))
-
-    def testGetDuplicateHeaders(self):
-        # Test that duplicate headers get concatenated via ','
-        uri = urllib.parse.urljoin(base, "duplicate-headers/multilink.asis")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(content, b"This is content\n")
-        self.assertEqual(response['link'].split(",")[0], '<http://bitworking.org>; rel="home"; title="BitWorking"')
-
-    def testGetCacheControlNoCache(self):
-        # Test Cache-Control: no-cache on requests
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertNotEqual(response['etag'], "")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-
-        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-cache'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-    def testGetCacheControlPragmaNoCache(self):
-        # Test Pragma: no-cache on requests
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertNotEqual(response['etag'], "")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-
-        (response, content) = self.http.request(uri, "GET", headers={'Pragma': 'no-cache'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-    def testGetCacheControlNoStoreRequest(self):
-        # A no-store request means that the response should not be stored.
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-
-        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-    def testGetCacheControlNoStoreResponse(self):
-        # A no-store response means that the response should not be stored.
-        uri = urllib.parse.urljoin(base, "no-store/no-store.asis")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-    def testGetCacheControlNoCacheNoStoreRequest(self):
-        # Test that a no-store, no-cache clears the entry from the cache
-        # even if it was cached previously.
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-
-        (response, content) = self.http.request(uri, "GET")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.fromcache, True)
-        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})
-        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-
-    def testUpdateInvalidatesCache(self):
-        # Test that calling PUT or DELETE on a
-        # URI that is cache invalidates that cache.
-        uri = urllib.parse.urljoin(base, "304/test_etag.txt")
-
-        (response, content) = self.http.request(uri, "GET")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.fromcache, True)
-        (response, content) = self.http.request(uri, "DELETE")
-        self.assertEqual(response.status, 405)
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.fromcache, False)
-
-    def testUpdateUsesCachedETag(self):
-        # Test that we natively support http://www.w3.org/1999/04/Editing/
-        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        (response, content) = self.http.request(uri, "PUT", body="foo")
-        self.assertEqual(response.status, 200)
-        (response, content) = self.http.request(uri, "PUT", body="foo")
-        self.assertEqual(response.status, 412)
-
-
-    def testUpdatePatchUsesCachedETag(self):
-        # Test that we natively support http://www.w3.org/1999/04/Editing/
-        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        (response, content) = self.http.request(uri, "PATCH", body="foo")
-        self.assertEqual(response.status, 200)
-        (response, content) = self.http.request(uri, "PATCH", body="foo")
-        self.assertEqual(response.status, 412)
-
-    def testUpdateUsesCachedETagAndOCMethod(self):
-        # Test that we natively support http://www.w3.org/1999/04/Editing/
-        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        self.http.optimistic_concurrency_methods.append("DELETE")
-        (response, content) = self.http.request(uri, "DELETE")
-        self.assertEqual(response.status, 200)
-
-
-    def testUpdateUsesCachedETagOverridden(self):
-        # Test that we natively support http://www.w3.org/1999/04/Editing/
-        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")
-
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, False)
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-        self.assertEqual(response.fromcache, True)
-        (response, content) = self.http.request(uri, "PUT", body="foo", headers={'if-match': 'fred'})
-        self.assertEqual(response.status, 412)
-
-    def testBasicAuth(self):
-        # Test Basic Authentication
-        uri = urllib.parse.urljoin(base, "basic/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        uri = urllib.parse.urljoin(base, "basic/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        self.http.add_credentials('joe', 'password')
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "basic/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-    def testBasicAuthWithDomain(self):
-        # Test Basic Authentication
-        uri = urllib.parse.urljoin(base, "basic/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        uri = urllib.parse.urljoin(base, "basic/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        self.http.add_credentials('joe', 'password', "example.org")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        uri = urllib.parse.urljoin(base, "basic/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        domain = urllib.parse.urlparse(base)[1]
-        self.http.add_credentials('joe', 'password', domain)
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "basic/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-
-
-
-
-
-    def testBasicAuthTwoDifferentCredentials(self):
-        # Test Basic Authentication with multiple sets of credentials
-        uri = urllib.parse.urljoin(base, "basic2/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        uri = urllib.parse.urljoin(base, "basic2/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        self.http.add_credentials('fred', 'barney')
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "basic2/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-    def testBasicAuthNested(self):
-        # Test Basic Authentication with resources
-        # that are nested
-        uri = urllib.parse.urljoin(base, "basic-nested/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        uri = urllib.parse.urljoin(base, "basic-nested/subdir")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        # Now add in credentials one at a time and test.
-        self.http.add_credentials('joe', 'password')
-
-        uri = urllib.parse.urljoin(base, "basic-nested/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "basic-nested/subdir")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        self.http.add_credentials('fred', 'barney')
-
-        uri = urllib.parse.urljoin(base, "basic-nested/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "basic-nested/subdir")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-    def testDigestAuth(self):
-        # Test that we support Digest Authentication
-        uri = urllib.parse.urljoin(base, "digest/")
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 401)
-
-        self.http.add_credentials('joe', 'password')
-        (response, content) = self.http.request(uri, "GET")
-        self.assertEqual(response.status, 200)
-
-        uri = urllib.parse.urljoin(base, "digest/file.txt")
-        (response, content) = self.http.request(uri, "GET")
-
-    def testDigestAuthNextNonceAndNC(self):
-        # Test that if the server sets nextnonce that we reset
-        # the nonce count back to 1
-        uri = urllib.parse.urljoin(base, "digest/file.txt")
-        self.http.add_credentials('joe', 'password')
-        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
-        info = httplib2._parse_www_authenticate(response, 'authentication-info')
-        self.assertEqual(response.status, 200)
-        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
-        info2 = httplib2._parse_www_authenticate(response, 'authentication-info')
-        self.assertEqual(response.status, 200)
-
-        if 'nextnonce' in info:
-            self.assertEqual(info2['nc'], 1)
-
-    def testDigestAuthStale(self):
-        # Test that we can handle a nonce becoming stale
-        uri = urllib.parse.urljoin(base, "digest-expire/file.txt")
-        self.http.add_credentials('joe', 'password')
-        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
-        info = httplib2._parse_www_authenticate(response, 'authentication-info')
-        self.assertEqual(response.status, 200)
-
-        time.sleep(3)
-        # Sleep long enough that the nonce becomes stale
-
-        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})
-        self.assertFalse(response.fromcache)
-        self.assertTrue(response._stale_digest)
-        info3 = httplib2._parse_www_authenticate(response, 'authentication-info')
-        self.assertEqual(response.status, 200)
-
-    def reflector(self, content):
-        return  dict( [tuple(x.split("=", 1)) for x in content.decode('utf-8').strip().split("\n")] )
-
-    def testReflector(self):
-        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")
-        (response, content) = self.http.request(uri, "GET")
-        d = self.reflector(content)
-        self.assertTrue('HTTP_USER_AGENT' in d)
-
-
-    def testConnectionClose(self):
-        uri = "http://www.google.com/"
-        (response, content) = self.http.request(uri, "GET")
-        for c in self.http.connections.values():
-            self.assertNotEqual(None, c.sock)
-        (response, content) = self.http.request(uri, "GET", headers={"connection": "close"})
-        for c in self.http.connections.values():
-            self.assertEqual(None, c.sock)
-
-    def testPickleHttp(self):
-        pickled_http = pickle.dumps(self.http)
-        new_http = pickle.loads(pickled_http)
-
-        self.assertEqual(sorted(new_http.__dict__.keys()),
-                         sorted(self.http.__dict__.keys()))
-        for key in new_http.__dict__:
-            if key in ('certificates', 'credentials'):
-                self.assertEqual(new_http.__dict__[key].credentials,
-                                 self.http.__dict__[key].credentials)
-            elif key == 'cache':
-                self.assertEqual(new_http.__dict__[key].cache,
-                                 self.http.__dict__[key].cache)
-            else:
-                self.assertEqual(new_http.__dict__[key],
-                                 self.http.__dict__[key])
-
-    def testPickleHttpWithConnection(self):
-        self.http.request('http://bitworking.org',
-                          connection_type=_MyHTTPConnection)
-        pickled_http = pickle.dumps(self.http)
-        new_http = pickle.loads(pickled_http)
-
-        self.assertEqual(list(self.http.connections.keys()),
-                         ['http:bitworking.org'])
-        self.assertEqual(new_http.connections, {})
-
-    def testPickleCustomRequestHttp(self):
-        def dummy_request(*args, **kwargs):
-            return new_request(*args, **kwargs)
-        dummy_request.dummy_attr = 'dummy_value'
-
-        self.http.request = dummy_request
-        pickled_http = pickle.dumps(self.http)
-        self.assertFalse(b"S'request'" in pickled_http)
-
-try:
-    import memcache
-    class HttpTestMemCached(HttpTest):
-        def setUp(self):
-            self.cache = memcache.Client(['127.0.0.1:11211'], debug=0)
-            #self.cache = memcache.Client(['10.0.0.4:11211'], debug=1)
-            self.http = httplib2.Http(self.cache)
-            self.cache.flush_all()
-            # Not exactly sure why the sleep is needed here, but
-            # if not present then some unit tests that rely on caching
-            # fail. Memcached seems to lose some sets immediately
-            # after a flush_all if the set is to a value that
-            # was previously cached. (Maybe the flush is handled async?)
-            time.sleep(1)
-            self.http.clear_credentials()
-except:
-    pass
-
-
-
-# ------------------------------------------------------------------------
-
-class HttpPrivateTest(unittest.TestCase):
-
-    def testParseCacheControl(self):
-        # Test that we can parse the Cache-Control header
-        self.assertEqual({}, httplib2._parse_cache_control({}))
-        self.assertEqual({'no-cache': 1}, httplib2._parse_cache_control({'cache-control': ' no-cache'}))
-        cc = httplib2._parse_cache_control({'cache-control': ' no-cache, max-age = 7200'})
-        self.assertEqual(cc['no-cache'], 1)
-        self.assertEqual(cc['max-age'], '7200')
-        cc = httplib2._parse_cache_control({'cache-control': ' , '})
-        self.assertEqual(cc[''], 1)
-
-        try:
-            cc = httplib2._parse_cache_control({'cache-control': 'Max-age=3600;post-check=1800,pre-check=3600'})
-            self.assertTrue("max-age" in cc)
-        except:
-            self.fail("Should not throw exception")
-
-
-
-
-    def testNormalizeHeaders(self):
-        # Test that we normalize headers to lowercase
-        h = httplib2._normalize_headers({'Cache-Control': 'no-cache', 'Other': 'Stuff'})
-        self.assertTrue('cache-control' in h)
-        self.assertTrue('other' in h)
-        self.assertEqual('Stuff', h['other'])
-
-    def testExpirationModelTransparent(self):
-        # Test that no-cache makes our request TRANSPARENT
-        response_headers = {
-            'cache-control': 'max-age=7200'
-        }
-        request_headers = {
-            'cache-control': 'no-cache'
-        }
-        self.assertEqual("TRANSPARENT", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testMaxAgeNonNumeric(self):
-        # Test that no-cache makes our request TRANSPARENT
-        response_headers = {
-            'cache-control': 'max-age=fred, min-fresh=barney'
-        }
-        request_headers = {
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-
-    def testExpirationModelNoCacheResponse(self):
-        # The date and expires point to an entry that should be
-        # FRESH, but the no-cache over-rides that.
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),
-            'cache-control': 'no-cache'
-        }
-        request_headers = {
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelStaleRequestMustReval(self):
-        # must-revalidate forces STALE
-        self.assertEqual("STALE", httplib2._entry_disposition({}, {'cache-control': 'must-revalidate'}))
-
-    def testExpirationModelStaleResponseMustReval(self):
-        # must-revalidate forces STALE
-        self.assertEqual("STALE", httplib2._entry_disposition({'cache-control': 'must-revalidate'}, {}))
-
-    def testExpirationModelFresh(self):
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
-            'cache-control': 'max-age=2'
-        }
-        request_headers = {
-        }
-        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
-        time.sleep(3)
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationMaxAge0(self):
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
-            'cache-control': 'max-age=0'
-        }
-        request_headers = {
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelDateAndExpires(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),
-        }
-        request_headers = {
-        }
-        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
-        time.sleep(3)
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpiresZero(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'expires': "0",
-        }
-        request_headers = {
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelDateOnly(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+3)),
-        }
-        request_headers = {
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelOnlyIfCached(self):
-        response_headers = {
-        }
-        request_headers = {
-            'cache-control': 'only-if-cached',
-        }
-        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelMaxAgeBoth(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'cache-control': 'max-age=2'
-        }
-        request_headers = {
-            'cache-control': 'max-age=0'
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelDateAndExpiresMinFresh1(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),
-        }
-        request_headers = {
-            'cache-control': 'min-fresh=2'
-        }
-        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testExpirationModelDateAndExpiresMinFresh2(self):
-        now = time.time()
-        response_headers = {
-            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),
-            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),
-        }
-        request_headers = {
-            'cache-control': 'min-fresh=2'
-        }
-        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))
-
-    def testParseWWWAuthenticateEmpty(self):
-        res = httplib2._parse_www_authenticate({})
-        self.assertEqual(len(list(res.keys())), 0)
-
-    def testParseWWWAuthenticate(self):
-        # different uses of spaces around commas
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="test realm" , foo=foo ,bar="bar", baz=baz,qux=qux'})
-        self.assertEqual(len(list(res.keys())), 1)
-        self.assertEqual(len(list(res['test'].keys())), 5)
-
-        # tokens with non-alphanum
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'T*!%#st realm=to*!%#en, to*!%#en="quoted string"'})
-        self.assertEqual(len(list(res.keys())), 1)
-        self.assertEqual(len(list(res['t*!%#st'].keys())), 2)
-
-        # quoted string with quoted pairs
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="a \\"test\\" realm"'})
-        self.assertEqual(len(list(res.keys())), 1)
-        self.assertEqual(res['test']['realm'], 'a "test" realm')
-
-    def testParseWWWAuthenticateStrict(self):
-        httplib2.USE_WWW_AUTH_STRICT_PARSING = 1;
-        self.testParseWWWAuthenticate();
-        httplib2.USE_WWW_AUTH_STRICT_PARSING = 0;
-
-    def testParseWWWAuthenticateBasic(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me"'})
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm="MD5"'})
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-        self.assertEqual('MD5', basic['algorithm'])
-
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm=MD5'})
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-        self.assertEqual('MD5', basic['algorithm'])
-
-    def testParseWWWAuthenticateBasic2(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me",other="fred" '})
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-        self.assertEqual('fred', basic['other'])
-
-    def testParseWWWAuthenticateBasic3(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic REAlm="me" '})
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-
-
-    def testParseWWWAuthenticateDigest(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate':
-                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"'})
-        digest = res['digest']
-        self.assertEqual('testrealm@host.com', digest['realm'])
-        self.assertEqual('auth,auth-int', digest['qop'])
-
-
-    def testParseWWWAuthenticateMultiple(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate':
-                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41" Basic REAlm="me" '})
-        digest = res['digest']
-        self.assertEqual('testrealm@host.com', digest['realm'])
-        self.assertEqual('auth,auth-int', digest['qop'])
-        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
-        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-
-    def testParseWWWAuthenticateMultiple2(self):
-        # Handle an added comma between challenges, which might get thrown in if the challenges were
-        # originally sent in separate www-authenticate headers.
-        res = httplib2._parse_www_authenticate({ 'www-authenticate':
-                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me" '})
-        digest = res['digest']
-        self.assertEqual('testrealm@host.com', digest['realm'])
-        self.assertEqual('auth,auth-int', digest['qop'])
-        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
-        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-
-    def testParseWWWAuthenticateMultiple3(self):
-        # Handle an added comma between challenges, which might get thrown in if the challenges were
-        # originally sent in separate www-authenticate headers.
-        res = httplib2._parse_www_authenticate({ 'www-authenticate':
-                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})
-        digest = res['digest']
-        self.assertEqual('testrealm@host.com', digest['realm'])
-        self.assertEqual('auth,auth-int', digest['qop'])
-        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])
-        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])
-        basic = res['basic']
-        self.assertEqual('me', basic['realm'])
-        wsse = res['wsse']
-        self.assertEqual('foo', wsse['realm'])
-        self.assertEqual('UsernameToken', wsse['profile'])
-
-    def testParseWWWAuthenticateMultiple4(self):
-        res = httplib2._parse_www_authenticate({ 'www-authenticate':
-                'Digest realm="test-real.m@host.com", qop \t=\t"\tauth,auth-int", nonce="(*)&^&$%#",opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})
-        digest = res['digest']
-        self.assertEqual('test-real.m@host.com', digest['realm'])
-        self.assertEqual('\tauth,auth-int', digest['qop'])
-        self.assertEqual('(*)&^&$%#', digest['nonce'])
-
-    def testParseWWWAuthenticateMoreQuoteCombos(self):
-        res = httplib2._parse_www_authenticate({'www-authenticate':'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'})
-        digest = res['digest']
-        self.assertEqual('myrealm', digest['realm'])
-
-    def testParseWWWAuthenticateMalformed(self):
-        try:
-          res = httplib2._parse_www_authenticate({'www-authenticate':'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'})
-          self.fail("should raise an exception")
-        except httplib2.MalformedHeader:
-          pass
-
-    def testDigestObject(self):
-        credentials = ('joe', 'password')
-        host = None
-        request_uri = '/projects/httplib2/test/digest/'
-        headers = {}
-        response = {
-            'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth"'
-        }
-        content = b""
-
-        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
-        d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")
-        our_request = "authorization: %s" % headers['authorization']
-        working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46"'
-        self.assertEqual(our_request, working_request)
-
-    def testDigestObjectWithOpaque(self):
-        credentials = ('joe', 'password')
-        host = None
-        request_uri = '/projects/httplib2/test/digest/'
-        headers = {}
-        response = {
-            'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", opaque="atestopaque"'
-        }
-        content = ""
-
-        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
-        d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")
-        our_request = "authorization: %s" % headers['authorization']
-        working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46", opaque="atestopaque"'
-        self.assertEqual(our_request, working_request)
-
-    def testDigestObjectStale(self):
-        credentials = ('joe', 'password')
-        host = None
-        request_uri = '/projects/httplib2/test/digest/'
-        headers = {}
-        response = httplib2.Response({ })
-        response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'
-        response.status = 401
-        content = b""
-        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
-        # Returns true to force a retry
-        self.assertTrue( d.response(response, content) )
-
-    def testDigestObjectAuthInfo(self):
-        credentials = ('joe', 'password')
-        host = None
-        request_uri = '/projects/httplib2/test/digest/'
-        headers = {}
-        response = httplib2.Response({ })
-        response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'
-        response['authentication-info'] = 'nextnonce="fred"'
-        content = b""
-        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)
-        # Returns true to force a retry
-        self.assertFalse( d.response(response, content) )
-        self.assertEqual('fred', d.challenge['nonce'])
-        self.assertEqual(1, d.challenge['nc'])
-
-    def testWsseAlgorithm(self):
-        digest = httplib2._wsse_username_token("d36e316282959a9ed4c89851497a717f", "2003-12-15T14:43:07Z", "taadtaadpstcsm")
-        expected = b"quR/EWLAV4xLf9Zqyw4pDmfV9OY="
-        self.assertEqual(expected, digest)
-
-    def testEnd2End(self):
-        # one end to end header
-        response = {'content-type': 'application/atom+xml', 'te': 'deflate'}
-        end2end = httplib2._get_end2end_headers(response)
-        self.assertTrue('content-type' in end2end)
-        self.assertTrue('te' not in end2end)
-        self.assertTrue('connection' not in end2end)
-
-        # one end to end header that gets eliminated
-        response = {'connection': 'content-type', 'content-type': 'application/atom+xml', 'te': 'deflate'}
-        end2end = httplib2._get_end2end_headers(response)
-        self.assertTrue('content-type' not in end2end)
-        self.assertTrue('te' not in end2end)
-        self.assertTrue('connection' not in end2end)
-
-        # Degenerate case of no headers
-        response = {}
-        end2end = httplib2._get_end2end_headers(response)
-        self.assertEqual(0, len(end2end))
-
-        # Degenerate case of connection referrring to a header not passed in
-        response = {'connection': 'content-type'}
-        end2end = httplib2._get_end2end_headers(response)
-        self.assertEqual(0, len(end2end))
-
-
-class TestProxyInfo(unittest.TestCase):
-    def setUp(self):
-        self.orig_env = dict(os.environ)
-
-    def tearDown(self):
-        os.environ.clear()
-        os.environ.update(self.orig_env)
-
-    def test_from_url(self):
-        pi = httplib2.proxy_info_from_url('http://myproxy.example.com')
-        self.assertEqual(pi.proxy_host, 'myproxy.example.com')
-        self.assertEqual(pi.proxy_port, 80)
-        self.assertEqual(pi.proxy_user, None)
-
-    def test_from_url_ident(self):
-        pi = httplib2.proxy_info_from_url('http://zoidberg:fish@someproxy:99')
-        self.assertEqual(pi.proxy_host, 'someproxy')
-        self.assertEqual(pi.proxy_port, 99)
-        self.assertEqual(pi.proxy_user, 'zoidberg')
-        self.assertEqual(pi.proxy_pass, 'fish')
-
-    def test_from_env(self):
-        os.environ['http_proxy'] = 'http://myproxy.example.com:8080'
-        pi = httplib2.proxy_info_from_environment()
-        self.assertEqual(pi.proxy_host, 'myproxy.example.com')
-        self.assertEqual(pi.proxy_port, 8080)
-
-    def test_from_env_no_proxy(self):
-        os.environ['http_proxy'] = 'http://myproxy.example.com:80'
-        os.environ['https_proxy'] = 'http://myproxy.example.com:81'
-        pi = httplib2.proxy_info_from_environment('https')
-        self.assertEqual(pi.proxy_host, 'myproxy.example.com')
-        self.assertEqual(pi.proxy_port, 81)
-
-    def test_from_env_none(self):
-        os.environ.clear()
-        pi = httplib2.proxy_info_from_environment()
-        self.assertEqual(pi, None)
-
-
-if __name__ == '__main__':
-    unittest.main()
+#!/usr/bin/env python3

+"""

+httplib2test

+

+A set of unit tests for httplib2.py.

+

+Requires Python 3.0 or later

+"""

+

+__author__ = "Joe Gregorio (joe@bitworking.org)"

+__copyright__ = "Copyright 2006, Joe Gregorio"

+__contributors__ = ["Mark Pilgrim"]

+__license__ = "MIT"

+__history__ = """ """

+__version__ = "0.2 ($Rev: 118 $)"

+

+import base64

+import http.client

+import httplib2

+import io

+import os

+import pickle

+import socket

+import ssl

+import sys

+import time

+import unittest

+import urllib.parse

+

+# The test resources base uri

+base = 'http://bitworking.org/projects/httplib2/test/'

+#base = 'http://localhost/projects/httplib2/test/'

+cacheDirName = ".cache"

+

+

+class CredentialsTest(unittest.TestCase):

+    def test(self):

+        c = httplib2.Credentials()

+        c.add("joe", "password")

+        self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])

+        self.assertEqual(("joe", "password"), list(c.iter(""))[0])

+        c.add("fred", "password2", "wellformedweb.org")

+        self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0])

+        self.assertEqual(1, len(list(c.iter("bitworking.org"))))

+        self.assertEqual(2, len(list(c.iter("wellformedweb.org"))))

+        self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))

+        c.clear()

+        self.assertEqual(0, len(list(c.iter("bitworking.org"))))

+        c.add("fred", "password2", "wellformedweb.org")

+        self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org")))

+        self.assertEqual(0, len(list(c.iter("bitworking.org"))))

+        self.assertEqual(0, len(list(c.iter(""))))

+

+

+class ParserTest(unittest.TestCase):

+    def testFromStd66(self):

+        self.assertEqual( ('http', 'example.com', '', None, None ), httplib2.parse_uri("http://example.com"))

+        self.assertEqual( ('https', 'example.com', '', None, None ), httplib2.parse_uri("https://example.com"))

+        self.assertEqual( ('https', 'example.com:8080', '', None, None ), httplib2.parse_uri("https://example.com:8080"))

+        self.assertEqual( ('http', 'example.com', '/', None, None ), httplib2.parse_uri("http://example.com/"))

+        self.assertEqual( ('http', 'example.com', '/path', None, None ), httplib2.parse_uri("http://example.com/path"))

+        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', None ), httplib2.parse_uri("http://example.com/path?a=1&b=2"))

+        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))

+        self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred"))

+

+

+class UrlNormTest(unittest.TestCase):

+    def test(self):

+        self.assertEqual( "http://example.org/", httplib2.urlnorm("http://example.org")[-1])

+        self.assertEqual( "http://example.org/", httplib2.urlnorm("http://EXAMple.org")[-1])

+        self.assertEqual( "http://example.org/?=b", httplib2.urlnorm("http://EXAMple.org?=b")[-1])

+        self.assertEqual( "http://example.org/mypath?a=b", httplib2.urlnorm("http://EXAMple.org/mypath?a=b")[-1])

+        self.assertEqual( "http://localhost:80/", httplib2.urlnorm("http://localhost:80")[-1])

+        self.assertEqual( httplib2.urlnorm("http://localhost:80/"), httplib2.urlnorm("HTTP://LOCALHOST:80"))

+        try:

+            httplib2.urlnorm("/")

+            self.fail("Non-absolute URIs should raise an exception")

+        except httplib2.RelativeURIError:

+            pass

+

+class UrlSafenameTest(unittest.TestCase):

+    def test(self):

+        # Test that different URIs end up generating different safe names

+        self.assertEqual( "example.org,fred,a=b,58489f63a7a83c3b7794a6a398ee8b1f", httplib2.safename("http://example.org/fred/?a=b"))

+        self.assertEqual( "example.org,fred,a=b,8c5946d56fec453071f43329ff0be46b", httplib2.safename("http://example.org/fred?/a=b"))

+        self.assertEqual( "www.example.org,fred,a=b,499c44b8d844a011b67ea2c015116968", httplib2.safename("http://www.example.org/fred?/a=b"))

+        self.assertEqual( httplib2.safename(httplib2.urlnorm("http://www")[-1]), httplib2.safename(httplib2.urlnorm("http://WWW")[-1]))

+        self.assertEqual( "www.example.org,fred,a=b,692e843a333484ce0095b070497ab45d", httplib2.safename("https://www.example.org/fred?/a=b"))

+        self.assertNotEqual( httplib2.safename("http://www"), httplib2.safename("https://www"))

+        # Test the max length limits

+        uri = "http://" + ("w" * 200) + ".org"

+        uri2 = "http://" + ("w" * 201) + ".org"

+        self.assertNotEqual( httplib2.safename(uri2), httplib2.safename(uri))

+        # Max length should be 200 + 1 (",") + 32

+        self.assertEqual(233, len(httplib2.safename(uri2)))

+        self.assertEqual(233, len(httplib2.safename(uri)))

+        # Unicode

+        if sys.version_info >= (2,3):

+            self.assertEqual( "xn--http,-4y1d.org,fred,a=b,579924c35db315e5a32e3d9963388193", httplib2.safename("http://\u2304.org/fred/?a=b"))

+

+class _MyResponse(io.BytesIO):

+    def __init__(self, body, **kwargs):

+        io.BytesIO.__init__(self, body)

+        self.headers = kwargs

+

+    def items(self):

+        return self.headers.items()

+

+    def iteritems(self):

+        return iter(self.headers.items())

+

+

+class _MyHTTPConnection(object):

+    "This class is just a mock of httplib.HTTPConnection used for testing"

+

+    def __init__(self, host, port=None, key_file=None, cert_file=None,

+                 strict=None, timeout=None, proxy_info=None):

+        self.host = host

+        self.port = port

+        self.timeout = timeout

+        self.log = ""

+        self.sock = None

+

+    def set_debuglevel(self, level):

+        pass

+

+    def connect(self):

+        "Connect to a host on a given port."

+        pass

+

+    def close(self):

+        pass

+

+    def request(self, method, request_uri, body, headers):

+        pass

+

+    def getresponse(self):

+        return _MyResponse(b"the body", status="200")

+

+

+class HttpTest(unittest.TestCase):

+    def setUp(self):

+        if os.path.exists(cacheDirName):

+            [os.remove(os.path.join(cacheDirName, file)) for file in os.listdir(cacheDirName)]

+        self.http = httplib2.Http(cacheDirName)

+        self.http.clear_credentials()

+

+    def testIPv6NoSSL(self):

+        try:

+          self.http.request("http://[::1]/")

+        except socket.gaierror:

+          self.fail("should get the address family right for IPv6")

+        except socket.error:

+          # Even if IPv6 isn't installed on a machine it should just raise socket.error

+          pass

+

+    def testIPv6SSL(self):

+        try:

+          self.http.request("https://[::1]/")

+        except socket.gaierror:

+          self.fail("should get the address family right for IPv6")

+        except socket.error:

+          # Even if IPv6 isn't installed on a machine it should just raise socket.error

+          pass

+

+    def testConnectionType(self):

+        self.http.force_exception_to_status_code = False

+        response, content = self.http.request("http://bitworking.org", connection_type=_MyHTTPConnection)

+        self.assertEqual(response['content-location'], "http://bitworking.org")

+        self.assertEqual(content, b"the body")

+

+    def testGetUnknownServer(self):

+        self.http.force_exception_to_status_code = False

+        try:

+            self.http.request("http://fred.bitworking.org/")

+            self.fail("An httplib2.ServerNotFoundError Exception must be thrown on an unresolvable server.")

+        except httplib2.ServerNotFoundError:

+            pass

+

+        # Now test with exceptions turned off

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request("http://fred.bitworking.org/")

+        self.assertEqual(response['content-type'], 'text/plain')

+        self.assertTrue(content.startswith(b"Unable to find"))

+        self.assertEqual(response.status, 400)

+

+    def testGetConnectionRefused(self):

+        self.http.force_exception_to_status_code = False

+        try:

+            self.http.request("http://localhost:7777/")

+            self.fail("An socket.error exception must be thrown on Connection Refused.")

+        except socket.error:

+            pass

+

+        # Now test with exceptions turned off

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request("http://localhost:7777/")

+        self.assertEqual(response['content-type'], 'text/plain')

+        self.assertTrue(b"Connection refused" in content)

+        self.assertEqual(response.status, 400)

+

+    def testGetIRI(self):

+        if sys.version_info >= (2,3):

+            uri = urllib.parse.urljoin(base, "reflector/reflector.cgi?d=\N{CYRILLIC CAPITAL LETTER DJE}")

+            (response, content) = self.http.request(uri, "GET")

+            d = self.reflector(content)

+            self.assertTrue('QUERY_STRING' in d)

+            self.assertTrue(d['QUERY_STRING'].find('%D0%82') > 0)

+

+    def testGetIsDefaultMethod(self):

+        # Test that GET is the default method

+        uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi")

+        (response, content) = self.http.request(uri)

+        self.assertEqual(response['x-method'], "GET")

+

+    def testDifferentMethods(self):

+        # Test that all methods can be used

+        uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi")

+        for method in ["GET", "PUT", "DELETE", "POST"]:

+            (response, content) = self.http.request(uri, method, body=b" ")

+            self.assertEqual(response['x-method'], method)

+

+    def testHeadRead(self):

+        # Test that we don't try to read the response of a HEAD request

+        # since httplib blocks response.read() for HEAD requests.

+        # Oddly enough this doesn't appear as a problem when doing HEAD requests

+        # against Apache servers.

+        uri = "http://www.google.com/"

+        (response, content) = self.http.request(uri, "HEAD")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"")

+

+    def testGetNoCache(self):

+        # Test that can do a GET w/o the cache turned on.

+        http = httplib2.Http()

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.previous, None)

+

+    def testGetOnlyIfCachedCacheHit(self):

+        # Test that can do a GET with cache and 'only-if-cached'

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = self.http.request(uri, "GET")

+        (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})

+        self.assertEqual(response.fromcache, True)

+        self.assertEqual(response.status, 200)

+

+    def testGetOnlyIfCachedCacheMiss(self):

+        # Test that can do a GET with no cache with 'only-if-cached'

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})

+        self.assertEqual(response.fromcache, False)

+        self.assertEqual(response.status, 504)

+

+    def testGetOnlyIfCachedNoCacheAtAll(self):

+        # Test that can do a GET with no cache with 'only-if-cached'

+        # Of course, there might be an intermediary beyond us

+        # that responds to the 'only-if-cached', so this

+        # test can't really be guaranteed to pass.

+        http = httplib2.Http()

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = http.request(uri, "GET", headers={'cache-control': 'only-if-cached'})

+        self.assertEqual(response.fromcache, False)

+        self.assertEqual(response.status, 504)

+

+    def testUserAgent(self):

+        # Test that we provide a default user-agent

+        uri = urllib.parse.urljoin(base, "user-agent/test.cgi")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertTrue(content.startswith(b"Python-httplib2/"))

+

+    def testUserAgentNonDefault(self):

+        # Test that the default user-agent can be over-ridden

+

+        uri = urllib.parse.urljoin(base, "user-agent/test.cgi")

+        (response, content) = self.http.request(uri, "GET", headers={'User-Agent': 'fred/1.0'})

+        self.assertEqual(response.status, 200)

+        self.assertTrue(content.startswith(b"fred/1.0"))

+

+    def testGet300WithLocation(self):

+        # Test the we automatically follow 300 redirects if a Location: header is provided

+        uri = urllib.parse.urljoin(base, "300/with-location-header.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 300)

+        self.assertEqual(response.previous.fromcache, False)

+

+        # Confirm that the intermediate 300 is not cached

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 300)

+        self.assertEqual(response.previous.fromcache, False)

+

+    def testGet300WithLocationNoRedirect(self):

+        # Test the we automatically follow 300 redirects if a Location: header is provided

+        self.http.follow_redirects = False

+        uri = urllib.parse.urljoin(base, "300/with-location-header.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 300)

+

+    def testGet300WithoutLocation(self):

+        # Not giving a Location: header in a 300 response is acceptable

+        # In which case we just return the 300 response

+        uri = urllib.parse.urljoin(base, "300/without-location-header.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 300)

+        self.assertTrue(response['content-type'].startswith("text/html"))

+        self.assertEqual(response.previous, None)

+

+    def testGet301(self):

+        # Test that we automatically follow 301 redirects

+        # and that we cache the 301 response

+        uri = urllib.parse.urljoin(base, "301/onestep.asis")

+        destination = urllib.parse.urljoin(base, "302/final-destination.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertTrue('content-location' in response)

+        self.assertEqual(response['content-location'], destination)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 301)

+        self.assertEqual(response.previous.fromcache, False)

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response['content-location'], destination)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 301)

+        self.assertEqual(response.previous.fromcache, True)

+

+    def testHead301(self):

+        # Test that we automatically follow 301 redirects

+        uri = urllib.parse.urljoin(base, "301/onestep.asis")

+        (response, content) = self.http.request(uri, "HEAD")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.previous.status, 301)

+        self.assertEqual(response.previous.fromcache, False)

+

+    def testGet301NoRedirect(self):

+        # Test that we automatically follow 301 redirects

+        # and that we cache the 301 response

+        self.http.follow_redirects = False

+        uri = urllib.parse.urljoin(base, "301/onestep.asis")

+        destination = urllib.parse.urljoin(base, "302/final-destination.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 301)

+

+

+    def testGet302(self):

+        # Test that we automatically follow 302 redirects

+        # and that we DO NOT cache the 302 response

+        uri = urllib.parse.urljoin(base, "302/onestep.asis")

+        destination = urllib.parse.urljoin(base, "302/final-destination.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response['content-location'], destination)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 302)

+        self.assertEqual(response.previous.fromcache, False)

+

+        uri = urllib.parse.urljoin(base, "302/onestep.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        self.assertEqual(response['content-location'], destination)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 302)

+        self.assertEqual(response.previous.fromcache, False)

+        self.assertEqual(response.previous['content-location'], uri)

+

+        uri = urllib.parse.urljoin(base, "302/twostep.asis")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 302)

+        self.assertEqual(response.previous.fromcache, False)

+

+    def testGet302RedirectionLimit(self):

+        # Test that we can set a lower redirection limit

+        # and that we raise an exception when we exceed

+        # that limit.

+        self.http.force_exception_to_status_code = False

+

+        uri = urllib.parse.urljoin(base, "302/twostep.asis")

+        try:

+            (response, content) = self.http.request(uri, "GET", redirections = 1)

+            self.fail("This should not happen")

+        except httplib2.RedirectLimit:

+            pass

+        except Exception as e:

+            self.fail("Threw wrong kind of exception ")

+

+        # Re-run the test with out the exceptions

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request(uri, "GET", redirections = 1)

+        self.assertEqual(response.status, 500)

+        self.assertTrue(response.reason.startswith("Redirected more"))

+        self.assertEqual("302", response['status'])

+        self.assertTrue(content.startswith(b"<html>"))

+        self.assertTrue(response.previous != None)

+

+    def testGet302NoLocation(self):

+        # Test that we throw an exception when we get

+        # a 302 with no Location: header.

+        self.http.force_exception_to_status_code = False

+        uri = urllib.parse.urljoin(base, "302/no-location.asis")

+        try:

+            (response, content) = self.http.request(uri, "GET")

+            self.fail("Should never reach here")

+        except httplib2.RedirectMissingLocation:

+            pass

+        except Exception as e:

+            self.fail("Threw wrong kind of exception ")

+

+        # Re-run the test with out the exceptions

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 500)

+        self.assertTrue(response.reason.startswith("Redirected but"))

+        self.assertEqual("302", response['status'])

+        self.assertTrue(content.startswith(b"This is content"))

+

+    def testGet301ViaHttps(self):

+        # Google always redirects to http://google.com

+        (response, content) = self.http.request("https://code.google.com/apis/", "GET")

+        self.assertEqual(200, response.status)

+        self.assertEqual(301, response.previous.status)

+

+    def testGetViaHttps(self):

+        # Test that we can handle HTTPS

+        (response, content) = self.http.request("https://google.com/adsense/", "GET")

+        self.assertEqual(200, response.status)

+

+    def testGetViaHttpsSpecViolationOnLocation(self):

+        # Test that we follow redirects through HTTPS

+        # even if they violate the spec by including

+        # a relative Location: header instead of an

+        # absolute one.

+        (response, content) = self.http.request("https://google.com/adsense", "GET")

+        self.assertEqual(200, response.status)

+        self.assertNotEqual(None, response.previous)

+

+

+    def testGetViaHttpsKeyCert(self):

+        #  At this point I can only test

+        #  that the key and cert files are passed in

+        #  correctly to httplib. It would be nice to have

+        #  a real https endpoint to test against.

+        http = httplib2.Http(timeout=2)

+

+        http.add_certificate("akeyfile", "acertfile", "bitworking.org")

+        try:

+          (response, content) = http.request("https://bitworking.org", "GET")

+        except AttributeError:

+          self.assertEqual(http.connections["https:bitworking.org"].key_file, "akeyfile")

+          self.assertEqual(http.connections["https:bitworking.org"].cert_file, "acertfile")

+        except IOError:

+          # Skip on 3.2

+          pass

+

+        try:

+            (response, content) = http.request("https://notthere.bitworking.org", "GET")

+        except httplib2.ServerNotFoundError:

+          self.assertEqual(http.connections["https:notthere.bitworking.org"].key_file, None)

+          self.assertEqual(http.connections["https:notthere.bitworking.org"].cert_file, None)

+        except IOError:

+          # Skip on 3.2

+          pass

+

+    def testSslCertValidation(self):

+          # Test that we get an ssl.SSLError when specifying a non-existent CA

+          # certs file.

+          http = httplib2.Http(ca_certs='/nosuchfile')

+          self.assertRaises(IOError,

+                  http.request, "https://www.google.com/", "GET")

+

+          # Test that we get a SSLHandshakeError if we try to access

+          # https://www.google.com, using a CA cert file that doesn't contain

+          # the CA Gogole uses (i.e., simulating a cert that's not signed by a

+          # trusted CA).

+          other_ca_certs = os.path.join(

+                  os.path.dirname(os.path.abspath(httplib2.__file__ )),

+                  "test", "other_cacerts.txt")

+          http = httplib2.Http(ca_certs=other_ca_certs)

+          self.assertRaises(ssl.SSLError,

+            http.request,"https://www.google.com/", "GET")

+

+    def testSniHostnameValidation(self):

+        self.http.request("https://google.com/", method="GET")

+

+    def testGet303(self):

+        # Do a follow-up GET on a Location: header

+        # returned from a POST that gave a 303.

+        uri = urllib.parse.urljoin(base, "303/303.cgi")

+        (response, content) = self.http.request(uri, "POST", " ")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 303)

+

+    def testGet303NoRedirect(self):

+        # Do a follow-up GET on a Location: header

+        # returned from a POST that gave a 303.

+        self.http.follow_redirects = False

+        uri = urllib.parse.urljoin(base, "303/303.cgi")

+        (response, content) = self.http.request(uri, "POST", " ")

+        self.assertEqual(response.status, 303)

+

+    def test303ForDifferentMethods(self):

+        # Test that all methods can be used

+        uri = urllib.parse.urljoin(base, "303/redirect-to-reflector.cgi")

+        for (method, method_on_303) in [("PUT", "GET"), ("DELETE", "GET"), ("POST", "GET"), ("GET", "GET"), ("HEAD", "GET")]:

+            (response, content) = self.http.request(uri, method, body=b" ")

+            self.assertEqual(response['x-method'], method_on_303)

+

+    def testGet304(self):

+        # Test that we use ETags properly to validate our cache

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertNotEqual(response['etag'], "")

+

+        (response, content) = self.http.request(uri, "GET")

+        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'must-revalidate'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+

+        cache_file_name = os.path.join(cacheDirName, httplib2.safename(httplib2.urlnorm(uri)[-1]))

+        f = open(cache_file_name, "r")

+        status_line = f.readline()

+        f.close()

+

+        self.assertTrue(status_line.startswith("status:"))

+

+        (response, content) = self.http.request(uri, "HEAD")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+

+        (response, content) = self.http.request(uri, "GET", headers = {'range': 'bytes=0-0'})

+        self.assertEqual(response.status, 206)

+        self.assertEqual(response.fromcache, False)

+

+    def testGetIgnoreEtag(self):

+        # Test that we can forcibly ignore ETags

+        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertNotEqual(response['etag'], "")

+

+        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})

+        d = self.reflector(content)

+        self.assertTrue('HTTP_IF_NONE_MATCH' in d)

+

+        self.http.ignore_etag = True

+        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})

+        d = self.reflector(content)

+        self.assertEqual(response.fromcache, False)

+        self.assertFalse('HTTP_IF_NONE_MATCH' in d)

+

+    def testOverrideEtag(self):

+        # Test that we can forcibly ignore ETags

+        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertNotEqual(response['etag'], "")

+

+        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'})

+        d = self.reflector(content)

+        self.assertTrue('HTTP_IF_NONE_MATCH' in d)

+        self.assertNotEqual(d['HTTP_IF_NONE_MATCH'], "fred")

+

+        (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0', 'if-none-match': 'fred'})

+        d = self.reflector(content)

+        self.assertTrue('HTTP_IF_NONE_MATCH' in d)

+        self.assertEqual(d['HTTP_IF_NONE_MATCH'], "fred")

+

+#MAP-commented this out because it consistently fails

+#    def testGet304EndToEnd(self):

+#       # Test that end to end headers get overwritten in the cache

+#        uri = urllib.parse.urljoin(base, "304/end2end.cgi")

+#        (response, content) = self.http.request(uri, "GET")

+#        self.assertNotEqual(response['etag'], "")

+#        old_date = response['date']

+#        time.sleep(2)

+#

+#        (response, content) = self.http.request(uri, "GET", headers = {'Cache-Control': 'max-age=0'})

+#        # The response should be from the cache, but the Date: header should be updated.

+#        new_date = response['date']

+#        self.assertNotEqual(new_date, old_date)

+#        self.assertEqual(response.status, 200)

+#        self.assertEqual(response.fromcache, True)

+

+    def testGet304LastModified(self):

+        # Test that we can still handle a 304

+        # by only using the last-modified cache validator.

+        uri = urllib.parse.urljoin(base, "304/last-modified-only/last-modified-only.txt")

+        (response, content) = self.http.request(uri, "GET")

+

+        self.assertNotEqual(response['last-modified'], "")

+        (response, content) = self.http.request(uri, "GET")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+

+    def testGet307(self):

+        # Test that we do follow 307 redirects but

+        # do not cache the 307

+        uri = urllib.parse.urljoin(base, "307/onestep.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 307)

+        self.assertEqual(response.previous.fromcache, False)

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        self.assertEqual(content, b"This is the final destination.\n")

+        self.assertEqual(response.previous.status, 307)

+        self.assertEqual(response.previous.fromcache, False)

+

+    def testGet410(self):

+        # Test that we pass 410's through

+        uri = urllib.parse.urljoin(base, "410/410.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 410)

+

+    def testVaryHeaderSimple(self):

+        """

+        RFC 2616 13.6

+        When the cache receives a subsequent request whose Request-URI

+        specifies one or more cache entries including a Vary header field,

+        the cache MUST NOT use such a cache entry to construct a response

+        to the new request unless all of the selecting request-headers

+        present in the new request match the corresponding stored

+        request-headers in the original request.

+        """

+        # test that the vary header is sent

+        uri = urllib.parse.urljoin(base, "vary/accept.asis")

+        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})

+        self.assertEqual(response.status, 200)

+        self.assertTrue('vary' in response)

+

+        # get the resource again, from the cache since accept header in this

+        # request is the same as the request

+        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True, msg="Should be from cache")

+

+        # get the resource again, not from cache since Accept headers does not match

+        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False, msg="Should not be from cache")

+

+        # get the resource again, without any Accept header, so again no match

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False, msg="Should not be from cache")

+

+    def testNoVary(self):

+        pass

+        # when there is no vary, a different Accept header (e.g.) should not

+        # impact if the cache is used

+        # test that the vary header is not sent

+        # uri = urllib.parse.urljoin(base, "vary/no-vary.asis")

+        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})

+        # self.assertEqual(response.status, 200)

+        # self.assertFalse('vary' in response)

+        #

+        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})

+        # self.assertEqual(response.status, 200)

+        # self.assertEqual(response.fromcache, True, msg="Should be from cache")

+        #

+        # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'})

+        # self.assertEqual(response.status, 200)

+        # self.assertEqual(response.fromcache, True, msg="Should be from cache")

+

+    def testVaryHeaderDouble(self):

+        uri = urllib.parse.urljoin(base, "vary/accept-double.asis")

+        (response, content) = self.http.request(uri, "GET", headers={

+            'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})

+        self.assertEqual(response.status, 200)

+        self.assertTrue('vary' in response)

+

+        # we are from cache

+        (response, content) = self.http.request(uri, "GET", headers={

+            'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'})

+        self.assertEqual(response.fromcache, True, msg="Should be from cache")

+

+        (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+        # get the resource again, not from cache, varied headers don't match exact

+        (response, content) = self.http.request(uri, "GET", headers={'Accept-Language': 'da'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False, msg="Should not be from cache")

+

+    def testVaryUnusedHeader(self):

+        # A header's value is not considered to vary if it's not used at all.

+        uri = urllib.parse.urljoin(base, "vary/unused-header.asis")

+        (response, content) = self.http.request(uri, "GET", headers={

+            'Accept': 'text/plain'})

+        self.assertEqual(response.status, 200)

+        self.assertTrue('vary' in response)

+

+        # we are from cache

+        (response, content) = self.http.request(uri, "GET", headers={

+            'Accept': 'text/plain',})

+        self.assertEqual(response.fromcache, True, msg="Should be from cache")

+

+    def testHeadGZip(self):

+        # Test that we don't try to decompress a HEAD response

+        uri = urllib.parse.urljoin(base, "gzip/final-destination.txt")

+        (response, content) = self.http.request(uri, "HEAD")

+        self.assertEqual(response.status, 200)

+        self.assertNotEqual(int(response['content-length']), 0)

+        self.assertEqual(content, b"")

+

+    def testGetGZip(self):

+        # Test that we support gzip compression

+        uri = urllib.parse.urljoin(base, "gzip/final-destination.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertFalse('content-encoding' in response)

+        self.assertTrue('-content-encoding' in response)

+        self.assertEqual(int(response['content-length']), len(b"This is the final destination.\n"))

+        self.assertEqual(content, b"This is the final destination.\n")

+

+    def testPostAndGZipResponse(self):

+        uri = urllib.parse.urljoin(base, "gzip/post.cgi")

+        (response, content) = self.http.request(uri, "POST", body=" ")

+        self.assertEqual(response.status, 200)

+        self.assertFalse('content-encoding' in response)

+        self.assertTrue('-content-encoding' in response)

+

+    def testGetGZipFailure(self):

+        # Test that we raise a good exception when the gzip fails

+        self.http.force_exception_to_status_code = False

+        uri = urllib.parse.urljoin(base, "gzip/failed-compression.asis")

+        try:

+            (response, content) = self.http.request(uri, "GET")

+            self.fail("Should never reach here")

+        except httplib2.FailedToDecompressContent:

+            pass

+        except Exception:

+            self.fail("Threw wrong kind of exception")

+

+        # Re-run the test with out the exceptions

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 500)

+        self.assertTrue(response.reason.startswith("Content purported"))

+

+    def testIndividualTimeout(self):

+        uri = urllib.parse.urljoin(base, "timeout/timeout.cgi")

+        http = httplib2.Http(timeout=1)

+        http.force_exception_to_status_code = True

+

+        (response, content) = http.request(uri)

+        self.assertEqual(response.status, 408)

+        self.assertTrue(response.reason.startswith("Request Timeout"))

+        self.assertTrue(content.startswith(b"Request Timeout"))

+

+

+    def testGetDeflate(self):

+        # Test that we support deflate compression

+        uri = urllib.parse.urljoin(base, "deflate/deflated.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertFalse('content-encoding' in response)

+        self.assertEqual(int(response['content-length']), len("This is the final destination."))

+        self.assertEqual(content, b"This is the final destination.")

+

+    def testGetDeflateFailure(self):

+        # Test that we raise a good exception when the deflate fails

+        self.http.force_exception_to_status_code = False

+

+        uri = urllib.parse.urljoin(base, "deflate/failed-compression.asis")

+        try:

+            (response, content) = self.http.request(uri, "GET")

+            self.fail("Should never reach here")

+        except httplib2.FailedToDecompressContent:

+            pass

+        except Exception:

+            self.fail("Threw wrong kind of exception")

+

+        # Re-run the test with out the exceptions

+        self.http.force_exception_to_status_code = True

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 500)

+        self.assertTrue(response.reason.startswith("Content purported"))

+

+    def testGetDuplicateHeaders(self):

+        # Test that duplicate headers get concatenated via ','

+        uri = urllib.parse.urljoin(base, "duplicate-headers/multilink.asis")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(content, b"This is content\n")

+        self.assertEqual(response['link'].split(",")[0], '<http://bitworking.org>; rel="home"; title="BitWorking"')

+

+    def testGetCacheControlNoCache(self):

+        # Test Cache-Control: no-cache on requests

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertNotEqual(response['etag'], "")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+

+        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-cache'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+    def testGetCacheControlPragmaNoCache(self):

+        # Test Pragma: no-cache on requests

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertNotEqual(response['etag'], "")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+

+        (response, content) = self.http.request(uri, "GET", headers={'Pragma': 'no-cache'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+    def testGetCacheControlNoStoreRequest(self):

+        # A no-store request means that the response should not be stored.

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+

+        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+    def testGetCacheControlNoStoreResponse(self):

+        # A no-store response means that the response should not be stored.

+        uri = urllib.parse.urljoin(base, "no-store/no-store.asis")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+    def testGetCacheControlNoCacheNoStoreRequest(self):

+        # Test that a no-store, no-cache clears the entry from the cache

+        # even if it was cached previously.

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+

+        (response, content) = self.http.request(uri, "GET")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.fromcache, True)

+        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})

+        (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'})

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+

+    def testUpdateInvalidatesCache(self):

+        # Test that calling PUT or DELETE on a

+        # URI that is cache invalidates that cache.

+        uri = urllib.parse.urljoin(base, "304/test_etag.txt")

+

+        (response, content) = self.http.request(uri, "GET")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.fromcache, True)

+        (response, content) = self.http.request(uri, "DELETE")

+        self.assertEqual(response.status, 405)

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.fromcache, False)

+

+    def testUpdateUsesCachedETag(self):

+        # Test that we natively support http://www.w3.org/1999/04/Editing/

+        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        (response, content) = self.http.request(uri, "PUT", body="foo")

+        self.assertEqual(response.status, 200)

+        (response, content) = self.http.request(uri, "PUT", body="foo")

+        self.assertEqual(response.status, 412)

+

+

+    def testUpdatePatchUsesCachedETag(self):

+        # Test that we natively support http://www.w3.org/1999/04/Editing/

+        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        (response, content) = self.http.request(uri, "PATCH", body="foo")

+        self.assertEqual(response.status, 200)

+        (response, content) = self.http.request(uri, "PATCH", body="foo")

+        self.assertEqual(response.status, 412)

+

+    def testUpdateUsesCachedETagAndOCMethod(self):

+        # Test that we natively support http://www.w3.org/1999/04/Editing/

+        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        self.http.optimistic_concurrency_methods.append("DELETE")

+        (response, content) = self.http.request(uri, "DELETE")

+        self.assertEqual(response.status, 200)

+

+

+    def testUpdateUsesCachedETagOverridden(self):

+        # Test that we natively support http://www.w3.org/1999/04/Editing/

+        uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi")

+

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, False)

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+        self.assertEqual(response.fromcache, True)

+        (response, content) = self.http.request(uri, "PUT", body="foo", headers={'if-match': 'fred'})

+        self.assertEqual(response.status, 412)

+

+    def testBasicAuth(self):

+        # Test Basic Authentication

+        uri = urllib.parse.urljoin(base, "basic/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        uri = urllib.parse.urljoin(base, "basic/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        self.http.add_credentials('joe', 'password')

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "basic/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+    def testBasicAuthWithDomain(self):

+        # Test Basic Authentication

+        uri = urllib.parse.urljoin(base, "basic/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        uri = urllib.parse.urljoin(base, "basic/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        self.http.add_credentials('joe', 'password', "example.org")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        uri = urllib.parse.urljoin(base, "basic/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        domain = urllib.parse.urlparse(base)[1]

+        self.http.add_credentials('joe', 'password', domain)

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "basic/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+

+

+

+

+

+    def testBasicAuthTwoDifferentCredentials(self):

+        # Test Basic Authentication with multiple sets of credentials

+        uri = urllib.parse.urljoin(base, "basic2/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        uri = urllib.parse.urljoin(base, "basic2/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        self.http.add_credentials('fred', 'barney')

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "basic2/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+    def testBasicAuthNested(self):

+        # Test Basic Authentication with resources

+        # that are nested

+        uri = urllib.parse.urljoin(base, "basic-nested/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        uri = urllib.parse.urljoin(base, "basic-nested/subdir")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        # Now add in credentials one at a time and test.

+        self.http.add_credentials('joe', 'password')

+

+        uri = urllib.parse.urljoin(base, "basic-nested/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "basic-nested/subdir")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        self.http.add_credentials('fred', 'barney')

+

+        uri = urllib.parse.urljoin(base, "basic-nested/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "basic-nested/subdir")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+    def testDigestAuth(self):

+        # Test that we support Digest Authentication

+        uri = urllib.parse.urljoin(base, "digest/")

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 401)

+

+        self.http.add_credentials('joe', 'password')

+        (response, content) = self.http.request(uri, "GET")

+        self.assertEqual(response.status, 200)

+

+        uri = urllib.parse.urljoin(base, "digest/file.txt")

+        (response, content) = self.http.request(uri, "GET")

+

+    def testDigestAuthNextNonceAndNC(self):

+        # Test that if the server sets nextnonce that we reset

+        # the nonce count back to 1

+        uri = urllib.parse.urljoin(base, "digest/file.txt")

+        self.http.add_credentials('joe', 'password')

+        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})

+        info = httplib2._parse_www_authenticate(response, 'authentication-info')

+        self.assertEqual(response.status, 200)

+        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})

+        info2 = httplib2._parse_www_authenticate(response, 'authentication-info')

+        self.assertEqual(response.status, 200)

+

+        if 'nextnonce' in info:

+            self.assertEqual(info2['nc'], 1)

+

+    def testDigestAuthStale(self):

+        # Test that we can handle a nonce becoming stale

+        uri = urllib.parse.urljoin(base, "digest-expire/file.txt")

+        self.http.add_credentials('joe', 'password')

+        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})

+        info = httplib2._parse_www_authenticate(response, 'authentication-info')

+        self.assertEqual(response.status, 200)

+

+        time.sleep(3)

+        # Sleep long enough that the nonce becomes stale

+

+        (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"})

+        self.assertFalse(response.fromcache)

+        self.assertTrue(response._stale_digest)

+        info3 = httplib2._parse_www_authenticate(response, 'authentication-info')

+        self.assertEqual(response.status, 200)

+

+    def reflector(self, content):

+        return  dict( [tuple(x.split("=", 1)) for x in content.decode('utf-8').strip().split("\n")] )

+

+    def testReflector(self):

+        uri = urllib.parse.urljoin(base, "reflector/reflector.cgi")

+        (response, content) = self.http.request(uri, "GET")

+        d = self.reflector(content)

+        self.assertTrue('HTTP_USER_AGENT' in d)

+

+

+    def testConnectionClose(self):

+        uri = "http://www.google.com/"

+        (response, content) = self.http.request(uri, "GET")

+        for c in self.http.connections.values():

+            self.assertNotEqual(None, c.sock)

+        (response, content) = self.http.request(uri, "GET", headers={"connection": "close"})

+        for c in self.http.connections.values():

+            self.assertEqual(None, c.sock)

+

+    def testPickleHttp(self):

+        pickled_http = pickle.dumps(self.http)

+        new_http = pickle.loads(pickled_http)

+

+        self.assertEqual(sorted(new_http.__dict__.keys()),

+                         sorted(self.http.__dict__.keys()))

+        for key in new_http.__dict__:

+            if key in ('certificates', 'credentials'):

+                self.assertEqual(new_http.__dict__[key].credentials,

+                                 self.http.__dict__[key].credentials)

+            elif key == 'cache':

+                self.assertEqual(new_http.__dict__[key].cache,

+                                 self.http.__dict__[key].cache)

+            else:

+                self.assertEqual(new_http.__dict__[key],

+                                 self.http.__dict__[key])

+

+    def testPickleHttpWithConnection(self):

+        self.http.request('http://bitworking.org',

+                          connection_type=_MyHTTPConnection)

+        pickled_http = pickle.dumps(self.http)

+        new_http = pickle.loads(pickled_http)

+

+        self.assertEqual(list(self.http.connections.keys()),

+                         ['http:bitworking.org'])

+        self.assertEqual(new_http.connections, {})

+

+    def testPickleCustomRequestHttp(self):

+        def dummy_request(*args, **kwargs):

+            return new_request(*args, **kwargs)

+        dummy_request.dummy_attr = 'dummy_value'

+

+        self.http.request = dummy_request

+        pickled_http = pickle.dumps(self.http)

+        self.assertFalse(b"S'request'" in pickled_http)

+

+try:

+    import memcache

+    class HttpTestMemCached(HttpTest):

+        def setUp(self):

+            self.cache = memcache.Client(['127.0.0.1:11211'], debug=0)

+            #self.cache = memcache.Client(['10.0.0.4:11211'], debug=1)

+            self.http = httplib2.Http(self.cache)

+            self.cache.flush_all()

+            # Not exactly sure why the sleep is needed here, but

+            # if not present then some unit tests that rely on caching

+            # fail. Memcached seems to lose some sets immediately

+            # after a flush_all if the set is to a value that

+            # was previously cached. (Maybe the flush is handled async?)

+            time.sleep(1)

+            self.http.clear_credentials()

+except:

+    pass

+

+

+

+# ------------------------------------------------------------------------

+

+class HttpPrivateTest(unittest.TestCase):

+

+    def testParseCacheControl(self):

+        # Test that we can parse the Cache-Control header

+        self.assertEqual({}, httplib2._parse_cache_control({}))

+        self.assertEqual({'no-cache': 1}, httplib2._parse_cache_control({'cache-control': ' no-cache'}))

+        cc = httplib2._parse_cache_control({'cache-control': ' no-cache, max-age = 7200'})

+        self.assertEqual(cc['no-cache'], 1)

+        self.assertEqual(cc['max-age'], '7200')

+        cc = httplib2._parse_cache_control({'cache-control': ' , '})

+        self.assertEqual(cc[''], 1)

+

+        try:

+            cc = httplib2._parse_cache_control({'cache-control': 'Max-age=3600;post-check=1800,pre-check=3600'})

+            self.assertTrue("max-age" in cc)

+        except:

+            self.fail("Should not throw exception")

+

+

+

+

+    def testNormalizeHeaders(self):

+        # Test that we normalize headers to lowercase

+        h = httplib2._normalize_headers({'Cache-Control': 'no-cache', 'Other': 'Stuff'})

+        self.assertTrue('cache-control' in h)

+        self.assertTrue('other' in h)

+        self.assertEqual('Stuff', h['other'])

+

+    def testExpirationModelTransparent(self):

+        # Test that no-cache makes our request TRANSPARENT

+        response_headers = {

+            'cache-control': 'max-age=7200'

+        }

+        request_headers = {

+            'cache-control': 'no-cache'

+        }

+        self.assertEqual("TRANSPARENT", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testMaxAgeNonNumeric(self):

+        # Test that no-cache makes our request TRANSPARENT

+        response_headers = {

+            'cache-control': 'max-age=fred, min-fresh=barney'

+        }

+        request_headers = {

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+

+    def testExpirationModelNoCacheResponse(self):

+        # The date and expires point to an entry that should be

+        # FRESH, but the no-cache over-rides that.

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),

+            'cache-control': 'no-cache'

+        }

+        request_headers = {

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelStaleRequestMustReval(self):

+        # must-revalidate forces STALE

+        self.assertEqual("STALE", httplib2._entry_disposition({}, {'cache-control': 'must-revalidate'}))

+

+    def testExpirationModelStaleResponseMustReval(self):

+        # must-revalidate forces STALE

+        self.assertEqual("STALE", httplib2._entry_disposition({'cache-control': 'must-revalidate'}, {}))

+

+    def testExpirationModelFresh(self):

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),

+            'cache-control': 'max-age=2'

+        }

+        request_headers = {

+        }

+        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))

+        time.sleep(3)

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationMaxAge0(self):

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),

+            'cache-control': 'max-age=0'

+        }

+        request_headers = {

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelDateAndExpires(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),

+        }

+        request_headers = {

+        }

+        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))

+        time.sleep(3)

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpiresZero(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'expires': "0",

+        }

+        request_headers = {

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelDateOnly(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+3)),

+        }

+        request_headers = {

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelOnlyIfCached(self):

+        response_headers = {

+        }

+        request_headers = {

+            'cache-control': 'only-if-cached',

+        }

+        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelMaxAgeBoth(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'cache-control': 'max-age=2'

+        }

+        request_headers = {

+            'cache-control': 'max-age=0'

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelDateAndExpiresMinFresh1(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)),

+        }

+        request_headers = {

+            'cache-control': 'min-fresh=2'

+        }

+        self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testExpirationModelDateAndExpiresMinFresh2(self):

+        now = time.time()

+        response_headers = {

+            'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)),

+            'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)),

+        }

+        request_headers = {

+            'cache-control': 'min-fresh=2'

+        }

+        self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers))

+

+    def testParseWWWAuthenticateEmpty(self):

+        res = httplib2._parse_www_authenticate({})

+        self.assertEqual(len(list(res.keys())), 0)

+

+    def testParseWWWAuthenticate(self):

+        # different uses of spaces around commas

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="test realm" , foo=foo ,bar="bar", baz=baz,qux=qux'})

+        self.assertEqual(len(list(res.keys())), 1)

+        self.assertEqual(len(list(res['test'].keys())), 5)

+

+        # tokens with non-alphanum

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'T*!%#st realm=to*!%#en, to*!%#en="quoted string"'})

+        self.assertEqual(len(list(res.keys())), 1)

+        self.assertEqual(len(list(res['t*!%#st'].keys())), 2)

+

+        # quoted string with quoted pairs

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="a \\"test\\" realm"'})

+        self.assertEqual(len(list(res.keys())), 1)

+        self.assertEqual(res['test']['realm'], 'a "test" realm')

+

+    def testParseWWWAuthenticateStrict(self):

+        httplib2.USE_WWW_AUTH_STRICT_PARSING = 1;

+        self.testParseWWWAuthenticate();

+        httplib2.USE_WWW_AUTH_STRICT_PARSING = 0;

+

+    def testParseWWWAuthenticateBasic(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me"'})

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm="MD5"'})

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+        self.assertEqual('MD5', basic['algorithm'])

+

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm=MD5'})

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+        self.assertEqual('MD5', basic['algorithm'])

+

+    def testParseWWWAuthenticateBasic2(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me",other="fred" '})

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+        self.assertEqual('fred', basic['other'])

+

+    def testParseWWWAuthenticateBasic3(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic REAlm="me" '})

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+

+

+    def testParseWWWAuthenticateDigest(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate':

+                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"'})

+        digest = res['digest']

+        self.assertEqual('testrealm@host.com', digest['realm'])

+        self.assertEqual('auth,auth-int', digest['qop'])

+

+

+    def testParseWWWAuthenticateMultiple(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate':

+                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41" Basic REAlm="me" '})

+        digest = res['digest']

+        self.assertEqual('testrealm@host.com', digest['realm'])

+        self.assertEqual('auth,auth-int', digest['qop'])

+        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])

+        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+

+    def testParseWWWAuthenticateMultiple2(self):

+        # Handle an added comma between challenges, which might get thrown in if the challenges were

+        # originally sent in separate www-authenticate headers.

+        res = httplib2._parse_www_authenticate({ 'www-authenticate':

+                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me" '})

+        digest = res['digest']

+        self.assertEqual('testrealm@host.com', digest['realm'])

+        self.assertEqual('auth,auth-int', digest['qop'])

+        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])

+        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+

+    def testParseWWWAuthenticateMultiple3(self):

+        # Handle an added comma between challenges, which might get thrown in if the challenges were

+        # originally sent in separate www-authenticate headers.

+        res = httplib2._parse_www_authenticate({ 'www-authenticate':

+                'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})

+        digest = res['digest']

+        self.assertEqual('testrealm@host.com', digest['realm'])

+        self.assertEqual('auth,auth-int', digest['qop'])

+        self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce'])

+        self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque'])

+        basic = res['basic']

+        self.assertEqual('me', basic['realm'])

+        wsse = res['wsse']

+        self.assertEqual('foo', wsse['realm'])

+        self.assertEqual('UsernameToken', wsse['profile'])

+

+    def testParseWWWAuthenticateMultiple4(self):

+        res = httplib2._parse_www_authenticate({ 'www-authenticate':

+                'Digest realm="test-real.m@host.com", qop \t=\t"\tauth,auth-int", nonce="(*)&^&$%#",opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'})

+        digest = res['digest']

+        self.assertEqual('test-real.m@host.com', digest['realm'])

+        self.assertEqual('\tauth,auth-int', digest['qop'])

+        self.assertEqual('(*)&^&$%#', digest['nonce'])

+

+    def testParseWWWAuthenticateMoreQuoteCombos(self):

+        res = httplib2._parse_www_authenticate({'www-authenticate':'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'})

+        digest = res['digest']

+        self.assertEqual('myrealm', digest['realm'])

+

+    def testParseWWWAuthenticateMalformed(self):

+        try:

+          res = httplib2._parse_www_authenticate({'www-authenticate':'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'})

+          self.fail("should raise an exception")

+        except httplib2.MalformedHeader:

+          pass

+

+    def testDigestObject(self):

+        credentials = ('joe', 'password')

+        host = None

+        request_uri = '/projects/httplib2/test/digest/'

+        headers = {}

+        response = {

+            'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth"'

+        }

+        content = b""

+

+        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)

+        d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")

+        our_request = "authorization: %s" % headers['authorization']

+        working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46"'

+        self.assertEqual(our_request, working_request)

+

+    def testDigestObjectWithOpaque(self):

+        credentials = ('joe', 'password')

+        host = None

+        request_uri = '/projects/httplib2/test/digest/'

+        headers = {}

+        response = {

+            'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", opaque="atestopaque"'

+        }

+        content = ""

+

+        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)

+        d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")

+        our_request = "authorization: %s" % headers['authorization']

+        working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46", opaque="atestopaque"'

+        self.assertEqual(our_request, working_request)

+

+    def testDigestObjectStale(self):

+        credentials = ('joe', 'password')

+        host = None

+        request_uri = '/projects/httplib2/test/digest/'

+        headers = {}

+        response = httplib2.Response({ })

+        response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'

+        response.status = 401

+        content = b""

+        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)

+        # Returns true to force a retry

+        self.assertTrue( d.response(response, content) )

+

+    def testDigestObjectAuthInfo(self):

+        credentials = ('joe', 'password')

+        host = None

+        request_uri = '/projects/httplib2/test/digest/'

+        headers = {}

+        response = httplib2.Response({ })

+        response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'

+        response['authentication-info'] = 'nextnonce="fred"'

+        content = b""

+        d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None)

+        # Returns true to force a retry

+        self.assertFalse( d.response(response, content) )

+        self.assertEqual('fred', d.challenge['nonce'])

+        self.assertEqual(1, d.challenge['nc'])

+

+    def testWsseAlgorithm(self):

+        digest = httplib2._wsse_username_token("d36e316282959a9ed4c89851497a717f", "2003-12-15T14:43:07Z", "taadtaadpstcsm")

+        expected = b"quR/EWLAV4xLf9Zqyw4pDmfV9OY="

+        self.assertEqual(expected, digest)

+

+    def testEnd2End(self):

+        # one end to end header

+        response = {'content-type': 'application/atom+xml', 'te': 'deflate'}

+        end2end = httplib2._get_end2end_headers(response)

+        self.assertTrue('content-type' in end2end)

+        self.assertTrue('te' not in end2end)

+        self.assertTrue('connection' not in end2end)

+

+        # one end to end header that gets eliminated

+        response = {'connection': 'content-type', 'content-type': 'application/atom+xml', 'te': 'deflate'}

+        end2end = httplib2._get_end2end_headers(response)

+        self.assertTrue('content-type' not in end2end)

+        self.assertTrue('te' not in end2end)

+        self.assertTrue('connection' not in end2end)

+

+        # Degenerate case of no headers

+        response = {}

+        end2end = httplib2._get_end2end_headers(response)

+        self.assertEqual(0, len(end2end))

+

+        # Degenerate case of connection referrring to a header not passed in

+        response = {'connection': 'content-type'}

+        end2end = httplib2._get_end2end_headers(response)

+        self.assertEqual(0, len(end2end))

+

+

+class TestProxyInfo(unittest.TestCase):

+    def setUp(self):

+        self.orig_env = dict(os.environ)

+

+    def tearDown(self):

+        os.environ.clear()

+        os.environ.update(self.orig_env)

+

+    def test_from_url(self):

+        pi = httplib2.proxy_info_from_url('http://myproxy.example.com')

+        self.assertEqual(pi.proxy_host, 'myproxy.example.com')

+        self.assertEqual(pi.proxy_port, 80)

+        self.assertEqual(pi.proxy_user, None)

+

+    def test_from_url_ident(self):

+        pi = httplib2.proxy_info_from_url('http://zoidberg:fish@someproxy:99')

+        self.assertEqual(pi.proxy_host, 'someproxy')

+        self.assertEqual(pi.proxy_port, 99)

+        self.assertEqual(pi.proxy_user, 'zoidberg')

+        self.assertEqual(pi.proxy_pass, 'fish')

+

+    def test_from_env(self):

+        os.environ['http_proxy'] = 'http://myproxy.example.com:8080'

+        pi = httplib2.proxy_info_from_environment()

+        self.assertEqual(pi.proxy_host, 'myproxy.example.com')

+        self.assertEqual(pi.proxy_port, 8080)

+

+    def test_from_env_no_proxy(self):

+        os.environ['http_proxy'] = 'http://myproxy.example.com:80'

+        os.environ['https_proxy'] = 'http://myproxy.example.com:81'

+        pi = httplib2.proxy_info_from_environment('https')

+        self.assertEqual(pi.proxy_host, 'myproxy.example.com')

+        self.assertEqual(pi.proxy_port, 81)

+

+    def test_from_env_none(self):

+        os.environ.clear()

+        pi = httplib2.proxy_info_from_environment()

+        self.assertEqual(pi, None)

+

+

+if __name__ == '__main__':

+    unittest.main()

diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref.tex b/catapult/third_party/gsutil/third_party/httplib2/ref.tex
new file mode 100644
index 0000000..8093e3b
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref.tex
@@ -0,0 +1,91 @@
+% Complete documentation on the extended LaTeX markup used for Python
+% documentation is available in ``Documenting Python'', which is part
+% of the standard documentation for Python.  It may be found online
+% at:
+%
+%     http://www.python.org/doc/current/doc/doc.html
+
+\documentclass{manual}
+
+\title{The httplib2 Library}
+
+\author{Joe Gregorio}
+
+% Please at least include a long-lived email address;
+% the rest is at your discretion.
+\authoraddress{
+%   Organization name, if applicable \\
+%   Street address, if you want to use it \\
+    Email: \email{joe@bitworking.org}
+}
+
+\date{Mar 8, 2007}       % update before release!
+
+\release{0.3}     % release version; this is used to define the
+                  % \version macro
+
+\makeindex          % tell \index to actually write the .idx file
+\makemodindex       % If this contains a lot of module sections.
+
+
+\begin{document}
+
+\maketitle
+
+% This makes the contents more accessible from the front page of the HTML.
+%\ifhtml
+%\chapter*{Front Matter\label{front}}
+%\fi
+
+%\input{copyright}
+
+\begin{abstract}
+\noindent
+
+The \module{httplib2} module is a comprehensive HTTP client library
+that handles caching, keep-alive, compression, redirects and
+many kinds of authentication.
+
+
+\end{abstract}
+
+\tableofcontents
+
+\chapter{Reference}
+
+\input{libhttplib2.tex}
+
+%\appendix
+%\chapter{...}
+
+%My appendix.
+
+%The \code{\e appendix} markup need not be repeated for additional
+%appendices.
+
+
+
+
+
+
+
+
+%
+%  The ugly "%begin{latexonly}" pseudo-environments are really just to
+%  keep LaTeX2HTML quiet during the \renewcommand{} macros; they're
+%  not really valuable.
+%
+%  If you don't want the Module Index, you can remove all of this up
+%  until the second \input line.
+%
+%begin{latexonly}
+\renewcommand{\indexname}{Module Index}
+%end{latexonly}
+\input{mod\jobname.ind}     % Module Index
+
+%begin{latexonly}
+\renewcommand{\indexname}{Index}
+%end{latexonly}
+\input{\jobname.ind}        % Index
+
+\end{document}
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/about.html b/catapult/third_party/gsutil/third_party/httplib2/ref/about.html
new file mode 100644
index 0000000..b452de2
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/about.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="prev" href="node2.html" />
+<link rel="parent" href="ref.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>About this document ...</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.4 Examples"
+  href="httplib2-example.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="httplib2-example.html">1.1.4 Examples</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h1><a name="SECTION003000000000000000000">
+About this document ...</a>
+</h1>
+ <strong>The httplib2 Library</strong>,
+Mar 8, 2007, Release 0.3
+<p> This document was generated using the <a
+    href="http://saftsack.fs.uni-bayreuth.de/~latex2ht/">
+    <strong>LaTeX</strong>2<tt>HTML</tt></a> translator.
+</p>
+
+<p> <a
+    href="http://saftsack.fs.uni-bayreuth.de/~latex2ht/">
+    <strong>LaTeX</strong>2<tt>HTML</tt></a> is Copyright &copy;
+  1993, 1994, 1995, 1996, 1997, <a
+    href="http://cbl.leeds.ac.uk/nikos/personal.html">Nikos
+    Drakos</a>, Computer Based Learning Unit, University of
+  Leeds, and Copyright &copy; 1997, 1998, <a
+    href="http://www.maths.mq.edu.au/~ross/">Ross
+    Moore</a>, Mathematics Department, Macquarie University,
+  Sydney.
+</p>
+
+<p> The application of <a
+    href="http://saftsack.fs.uni-bayreuth.de/~latex2ht/">
+    <strong>LaTeX</strong>2<tt>HTML</tt></a> to the Python
+  documentation has been heavily tailored by Fred L. Drake,
+  Jr.  Original navigation icons were contributed by Christopher
+  Petrilli.
+</p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.4 Examples"
+  href="httplib2-example.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="httplib2-example.html">1.1.4 Examples</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/blank.png b/catapult/third_party/gsutil/third_party/httplib2/ref/blank.png
new file mode 100644
index 0000000..2af5639
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/blank.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/cache-objects.html b/catapult/third_party/gsutil/third_party/httplib2/ref/cache-objects.html
new file mode 100644
index 0000000..9baa7c2
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/cache-objects.html
@@ -0,0 +1,129 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="response-objects.html" />
+<link rel="prev" href="http-objects.html" />
+<link rel="parent" href="module-httplib2.html" />
+<link rel="next" href="response-objects.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1.1.2 Cache Objects</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.1 http Objects"
+  href="http-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.3 response Objects"
+  href="response-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="http-objects.html">1.1.1 Http Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="response-objects.html">1.1.3 Response Objects</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h2><a name="SECTION002120000000000000000"></a>
+<a name="cache-objects"></a>
+<br>
+1.1.2 Cache Objects
+</h2>
+
+<p>
+If you wish to supply your own caching implementation
+then you will need to pass in an object that supports the
+following methods. Note that the <tt class="module">memcache</tt> module
+supports this interface natively.
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-23' xml:id='l2h-23' class="method">get</tt></b>(</nobr></td>
+  <td><var>key</var>)</td></tr></table></dt>
+<dd>
+Takes a string <var>key</var> and returns the value as a string.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-24' xml:id='l2h-24' class="method">set</tt></b>(</nobr></td>
+  <td><var>key, value</var>)</td></tr></table></dt>
+<dd>
+Takes a string <var>key</var> and <var>value</var> and stores it in the cache.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-25' xml:id='l2h-25' class="method">delete</tt></b>(</nobr></td>
+  <td><var>key</var>)</td></tr></table></dt>
+<dd>
+Deletes the cached value stored at <var>key</var>. The value
+of <var>key</var> is a string.
+</dl>
+
+<p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.1 http Objects"
+  href="http-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.3 response Objects"
+  href="response-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="http-objects.html">1.1.1 Http Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="response-objects.html">1.1.3 Response Objects</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/contents.html b/catapult/third_party/gsutil/third_party/httplib2/ref/contents.html
new file mode 100644
index 0000000..cfee565
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/contents.html
@@ -0,0 +1,105 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="node2.html" />
+<link rel="prev" href="ref.html" />
+<link rel="parent" href="ref.html" />
+<link rel="next" href="node2.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>Contents</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="The httplib2 Library"
+  href="ref.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1. Reference"
+  href="node2.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="node2.html">1. Reference</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+<br><h2><a name="SECTION001000000000000000000">
+Contents</a>
+</h2>
+<!--Table of Contents-->
+
+<ul class="TofC">
+<li><a href="node2.html">1. Reference</a>
+<ul>
+<li><a href="module-httplib2.html">1.1 httplib2 A comprehensive HTTP client library.</a>
+<ul>
+<li><a href="http-objects.html">1.1.1 Http Objects</a>
+<li><a href="cache-objects.html">1.1.2 Cache Objects</a>
+<li><a href="response-objects.html">1.1.3 Response Objects</a>
+<li><a href="httplib2-example.html">1.1.4 Examples</a>
+</ul></ul></ul>
+<!--End of Table of Contents-->
+<p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="The httplib2 Library"
+  href="ref.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1. Reference"
+  href="node2.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="node2.html">1. Reference</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/contents.png b/catapult/third_party/gsutil/third_party/httplib2/ref/contents.png
new file mode 100644
index 0000000..3429be0
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/contents.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/http-objects.html b/catapult/third_party/gsutil/third_party/httplib2/ref/http-objects.html
new file mode 100644
index 0000000..603e52a
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/http-objects.html
@@ -0,0 +1,205 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="cache-objects.html" />
+<link rel="prev" href="module-httplib2.html" />
+<link rel="parent" href="module-httplib2.html" />
+<link rel="next" href="cache-objects.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1.1.1 Http Objects</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.2 cache Objects"
+  href="cache-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="cache-objects.html">1.1.2 Cache Objects</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h2><a name="SECTION002110000000000000000"></a>
+<a name="http-objects"></a>
+<br>
+1.1.1 Http Objects
+</h2>
+
+<p>
+Http objects have the following methods:
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-15' xml:id='l2h-15' class="method">request</tt></b>(</nobr></td>
+  <td><var>uri, </var><big>[</big><var>method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None</var><big>]</big><var></var>)</td></tr></table></dt>
+<dd>
+Performs a single HTTP request.
+The <var>uri</var> is the URI of the HTTP resource and can begin with either <code>http</code> or <code>https</code>. The value of <var>uri</var> must be an absolute URI.
+
+<p>
+The <var>method</var> is the HTTP method to perform, such as <code>GET</code>, <code>POST</code>, <code>DELETE</code>, etc. There is no restriction
+on the methods allowed.
+
+<p>
+The <var>body</var> is the entity body to be sent with the request. It is a string
+object.
+
+<p>
+Any extra headers that are to be sent with the request should be provided in the
+<var>headers</var> dictionary.
+
+<p>
+The maximum number of redirect to follow before raising an exception is <var>redirections</var>. The default is 5.
+
+<p>
+The <var>connection_type</var> is the type of connection object to use. The supplied class
+should implement the interface of httplib.HTTPConnection.
+
+<p>
+The return value is a tuple of (response, content), the first being and instance of the
+<tt class="class">Response</tt> class, the second being a string that contains the response entity body.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-16' xml:id='l2h-16' class="method">add_credentials</tt></b>(</nobr></td>
+  <td><var>name, password, </var><big>[</big><var>domain=None</var><big>]</big><var></var>)</td></tr></table></dt>
+<dd>
+Adds a name and password that will be used when a request
+requires authentication. Supplying the optional <var>domain</var> name will
+restrict these credentials to only be sent to the specified
+domain. If <var>domain</var> is not specified then the given credentials will
+be used to try to satisfy every HTTP 401 challenge.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-17' xml:id='l2h-17' class="method">add_certificate</tt></b>(</nobr></td>
+  <td><var>key, cert, domain</var>)</td></tr></table></dt>
+<dd>
+Add a <var>key</var> and <var>cert</var> that will be used for an SSL connection
+to the specified domain. <var>keyfile</var> is the name of a PEM formatted
+file that contains your private key. <var>certfile</var> is a PEM formatted certificate chain file.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><tt id='l2h-18' xml:id='l2h-18' class="method">clear_credentials</tt></b>(</nobr></td>
+  <td><var></var>)</td></tr></table></dt>
+<dd>
+Remove all the names and passwords used for authentication.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-19' xml:id='l2h-19' class="member">follow_redirects</tt></b></dt>
+<dd>
+If <code>True</code>, which is the default, safe redirects are followed, where
+safe means that the client is only doing a <code>GET</code> or <code>HEAD</code> on the
+URI to which it is being redirected. If <code>False</code> then no redirects are followed.
+Note that a False 'follow_redirects' takes precedence over a True 'follow_all_redirects'.
+Another way of saying that is for 'follow_all_redirects' to have any affect, 'follow_redirects'
+must be True.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-20' xml:id='l2h-20' class="member">follow_all_redirects</tt></b></dt>
+<dd>
+If <code>False</code>, which is the default, only safe redirects are followed, where
+safe means that the client is only doing a <code>GET</code> or <code>HEAD</code> on the
+URI to which it is being redirected. If <code>True</code> then all redirects are followed.
+Note that a False 'follow_redirects' takes precedence over a True 'follow_all_redirects'.
+Another way of saying that is for 'follow_all_redirects' to have any affect, 'follow_redirects'
+must be True.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-21' xml:id='l2h-21' class="member">force_exception_to_status_code</tt></b></dt>
+<dd>
+If <code>True</code>, which is the default, then no <tt class="module">httplib2</tt> exceptions will be thrown. Instead,
+those error conditions will be turned into <tt class="class">Response</tt> objects
+that will be returned normally.
+
+<p>
+If <code>False</code>, then exceptions will be thrown.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-22' xml:id='l2h-22' class="member">ignore_etag</tt></b></dt>
+<dd>
+Defaults to <code>False</code>. If <code>True</code>, then any etags present in the cached response
+are ignored when processing the current request, i.e. httplib2 does <strong>not</strong> use
+'if-match' for PUT or 'if-none-match' when GET or HEAD requests are made. This
+is mainly to deal with broken servers which supply an etag, but change it capriciously.
+</dl>
+
+<p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.2 cache Objects"
+  href="cache-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="cache-objects.html">1.1.2 Cache Objects</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/httplib2-example.html b/catapult/third_party/gsutil/third_party/httplib2/ref/httplib2-example.html
new file mode 100644
index 0000000..6607efe
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/httplib2-example.html
@@ -0,0 +1,188 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="prev" href="response-objects.html" />
+<link rel="parent" href="module-httplib2.html" />
+<link rel="next" href="about.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1.1.4 Examples </title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.3 response Objects"
+  href="response-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="About this document ..."
+  href="about.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="response-objects.html">1.1.3 Response Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="about.html">About this document ...</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h2><a name="SECTION002140000000000000000"></a><a name="httplib2-example"></a>
+<br>
+1.1.4 Examples
+</h2>
+
+<p>
+To do a simple <code>GET</code> request just supply the absolute URI
+of the resource:
+
+<p>
+<div class="verbatim"><pre>
+import httplib2
+h = httplib2.Http()
+resp, content = h.request("http://bitworking.org/")
+assert resp.status == 200
+assert resp['content-type'] == 'text/html'
+</pre></div>
+
+<p>
+Here is more complex example that does a PUT
+of some text to a resource that requires authentication.
+The Http instance also uses a file cache
+in the directory <code>.cache</code>.
+
+<p>
+<div class="verbatim"><pre>
+import httplib2
+h = httplib2.Http(".cache")
+h.add_credentials('name', 'password')
+resp, content = h.request("https://example.org/chap/2",
+    "PUT", body="This is text",
+    headers={'content-type':'text/plain'} )
+</pre></div>
+
+<p>
+Here is an example that connects to a server that
+supports the Atom Publishing Protocol.
+
+<p>
+<div class="verbatim"><pre>
+import httplib2
+h = httplib2.Http()
+h.add_credentials(myname, mypasswd)
+h.follow_all_redirects = True
+headers = {'Content-Type': 'application/atom+xml'}
+body    = """&lt;?xml version="1.0" ?&gt;
+    &lt;entry xmlns="http://www.w3.org/2005/Atom"&gt;
+      &lt;title&gt;Atom-Powered Robots Run Amok&lt;/title&gt;
+      &lt;id&gt;urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a&lt;/id&gt;
+      &lt;updated&gt;2003-12-13T18:30:02Z&lt;/updated&gt;
+      &lt;author&gt;&lt;name&gt;John Doe&lt;/name&gt;&lt;/author&gt;
+      &lt;content&gt;Some text.&lt;/content&gt;
+&lt;/entry&gt;
+"""
+uri     = "http://www.example.com/collection/"
+resp, content = h.request(uri, "POST", body=body, headers=headers)
+</pre></div>
+
+<p>
+Here is an example of providing data to an HTML form processor.
+In this case we presume this is a POST form. We need to take our
+data and format it as "application/x-www-form-urlencoded" data and use that as a
+body for a POST request.
+
+<p>
+<div class="verbatim"><pre>
+&gt;&gt;&gt; import httplib2
+&gt;&gt;&gt; import urllib
+&gt;&gt;&gt; data = {'name': 'fred', 'address': '123 shady lane'}
+&gt;&gt;&gt; body = urllib.urlencode(data)
+&gt;&gt;&gt; body
+'name=fred&amp;address=123+shady+lane'
+&gt;&gt;&gt; h = httplib2.Http()
+&gt;&gt;&gt; resp, content = h.request("http://example.com", method="POST", body=body)
+</pre></div>
+
+<p>
+Here is an example of using a proxy server:
+<div class="verbatim"><pre>
+import httplib2
+import socks
+
+httplib2.debuglevel=4
+h = httplib2.Http(proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, 'localhost', 8000))
+r,c = h.request("http://bitworking.org/news/")
+</pre></div>
+
+<p>
+
+<p>
+<IMG
+ WIDTH="556" HEIGHT="20" ALIGN="BOTTOM" BORDER="0"
+ SRC="img1.png"
+ ALT="\begin{center}\vbox{\input{modref.ind}
+}\end{center}">
+<p>
+
+<p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.3 response Objects"
+  href="response-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="About this document ..."
+  href="about.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="response-objects.html">1.1.3 Response Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="about.html">About this document ...</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/gsutil/third_party/httplib2/ref/images.idx
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/gsutil/third_party/httplib2/ref/images.idx
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/img1.old b/catapult/third_party/gsutil/third_party/httplib2/ref/img1.old
new file mode 100644
index 0000000..c9ce471
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/img1.old
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/img1.png b/catapult/third_party/gsutil/third_party/httplib2/ref/img1.png
new file mode 100644
index 0000000..c9ce471
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/img1.png
Binary files differ
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/gsutil/third_party/httplib2/ref/img2.old
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/gsutil/third_party/httplib2/ref/img2.old
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/gsutil/third_party/httplib2/ref/img2.png
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/gsutil/third_party/httplib2/ref/img2.png
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/index.html b/catapult/third_party/gsutil/third_party/httplib2/ref/index.html
new file mode 100644
index 0000000..839d65e
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/index.html
@@ -0,0 +1,129 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="contents.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>The httplib2 Library</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></td>
+<td class='online-navigation'><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></td>
+<td class='online-navigation'><a rel="next" title="Contents"
+  href="contents.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="contents.html">Contents</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<p>
+
+<div class="titlepage">
+<div class='center'>
+<h1>The httplib2 Library</h1>
+<p><b><font size="+2">Joe Gregorio</font></b></p>
+<p>
+Email: <span class="email">joe@bitworking.org</span>
+</p>
+<p><strong>Release 0.3</strong><br />
+<strong>Mar 8, 2007</strong></p>
+<p></p>
+</div>
+</div>
+
+<p>
+
+<h3>Abstract:</h3>
+<div class="ABSTRACT">
+
+<p>
+The <tt class="module">httplib2</tt> module is a comprehensive HTTP client library
+that handles caching, keep-alive, compression, redirects and
+many kinds of authentication.
+
+<p>
+</div>
+<p>
+
+<p>
+
+<p><br /></p><hr class='online-navigation' />
+<div class='online-navigation'>
+<!--Table of Child-Links-->
+<a name="CHILD_LINKS"></a>
+
+<ul class="ChildLinks">
+<li><a href="contents.html">Contents</a>
+<li><a href="node2.html">1. Reference</a>
+<ul>
+<li><a href="module-httplib2.html">1.1 <tt class="module">httplib2</tt>
+A comprehensive HTTP client library.</a>
+<ul>
+<li><a href="http-objects.html">1.1.1 Http Objects</a>
+<li><a href="cache-objects.html">1.1.2 Cache Objects</a>
+<li><a href="response-objects.html">1.1.3 Response Objects</a>
+<li><a href="httplib2-example.html">1.1.4 Examples</a>
+</ul>
+</ul>
+<li><a href="about.html">About this document ...</a>
+</ul>
+<!--End of Table of Child-Links-->
+</div>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></td>
+<td class='online-navigation'><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></td>
+<td class='online-navigation'><a rel="next" title="Contents"
+  href="contents.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="contents.html">Contents</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/index.png b/catapult/third_party/gsutil/third_party/httplib2/ref/index.png
new file mode 100644
index 0000000..cd918af
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/index.png
Binary files differ
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/gsutil/third_party/httplib2/ref/modimages.idx
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/gsutil/third_party/httplib2/ref/modimages.idx
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/module-httplib2.html b/catapult/third_party/gsutil/third_party/httplib2/ref/module-httplib2.html
new file mode 100644
index 0000000..155592b
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/module-httplib2.html
@@ -0,0 +1,280 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="prev" href="node2.html" />
+<link rel="parent" href="node2.html" />
+<link rel="next" href="http-objects.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1.1 httplib2 A comprehensive HTTP client library. </title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1. Reference"
+  href="node2.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1. Reference"
+  href="node2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.1 http Objects"
+  href="http-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="node2.html">1. Reference</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="node2.html">1. Reference</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="http-objects.html">1.1.1 Http Objects</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h1><a name="SECTION002100000000000000000">
+1.1 <tt class="module">httplib2</tt>
+         A comprehensive HTTP client library.  </a>
+</h1>
+
+<p>
+<a name="module-httplib2"></a>
+<p>
+
+<p>
+
+<p>
+The <tt class="module">httplib2</tt> module is a comprehensive HTTP client library with the following features:
+
+<p>
+<dl>
+<dt><strong>HTTP and HTTPS</strong></dt>
+<dd>HTTPS support is only available if the socket module was compiled with SSL support.
+</dd>
+<dt><strong>Keep-Alive</strong></dt>
+<dd>Supports HTTP 1.1 Keep-Alive, keeping the socket open and performing multiple requests over the same connection if possible.
+</dd>
+<dt><strong>Authentication</strong></dt>
+<dd>The following three types of HTTP Authentication are supported. These can be used over both HTTP and HTTPS.
+
+<ul>
+<li>Digest
+</li>
+<li>Basic
+</li>
+<li>WSSE
+
+</li>
+</ul>
+</dd>
+<dt><strong>Caching</strong></dt>
+<dd>The module can optionally operate with a private cache that understands the Cache-Control: header and uses both the ETag and Last-Modified cache validators.
+</dd>
+<dt><strong>All Methods</strong></dt>
+<dd>The module can handle any HTTP request method, not just GET and POST.
+</dd>
+<dt><strong>Redirects</strong></dt>
+<dd>Automatically follows 3XX redirects on GETs.
+</dd>
+<dt><strong>Compression</strong></dt>
+<dd>Handles both 'deflate' and 'gzip' types of compression.
+</dd>
+<dt><strong>Proxies</strong></dt>
+<dd>If the Socksipy module is installed then httplib2 can handle sock4, sock5 and http proxies.
+</dd>
+<dt><strong>Lost update support</strong></dt>
+<dd>Automatically adds back ETags into PUT requests to resources we have already cached. This implements Section 3.2 of Detecting the Lost Update Problem Using Unreserved Checkout
+</dd>
+</dl>
+
+<p>
+The <tt class="module">httplib2</tt> module defines the following variables:
+
+<p>
+<dl><dt><b><tt id='l2h-2' xml:id='l2h-2'>debuglevel</tt></b></dt>
+<dd>
+The amount of debugging information to print. The default is 0.
+</dd></dl>
+
+<p>
+The <tt class="module">httplib2</tt> module may raise the following Exceptions. Note that
+there is an option that turns exceptions into
+normal responses with an HTTP status code indicating
+an error occured. See <tt class="member">Http.force_exception_to_status_code</tt>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-3' xml:id='l2h-3' class="exception">HttpLib2Error</tt></b></dt>
+<dd>
+The Base Exception for all exceptions raised by httplib2.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-4' xml:id='l2h-4' class="exception">RedirectMissingLocation</tt></b></dt>
+<dd>
+A 3xx redirect response code was provided but no Location: header
+was provided to point to the new location.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-5' xml:id='l2h-5' class="exception">RedirectLimit</tt></b></dt>
+<dd>
+The maximum number of redirections was reached without coming to a final URI.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-6' xml:id='l2h-6' class="exception">ServerNotFoundError</tt></b></dt>
+<dd>
+Unable to resolve the host name given.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-7' xml:id='l2h-7' class="exception">RelativeURIError</tt></b></dt>
+<dd>
+A relative, as opposed to an absolute URI, was passed into request().
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-8' xml:id='l2h-8' class="exception">FailedToDecompressContent</tt></b></dt>
+<dd>
+The headers claimed that the content of the response was compressed but the
+decompression algorithm applied to the content failed.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-9' xml:id='l2h-9' class="exception">UnimplementedDigestAuthOptionError</tt></b></dt>
+<dd>
+The server requested a type of Digest authentication that we
+are unfamiliar with.
+</dd></dl>
+
+<p>
+<dl><dt><b><span class="typelabel">exception</span>&nbsp;<tt id='l2h-10' xml:id='l2h-10' class="exception">UnimplementedHmacDigestAuthOptionError</tt></b></dt>
+<dd>
+The server requested a type of HMACDigest authentication that we
+are unfamiliar with.
+</dd></dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><span class="typelabel">class</span>&nbsp;<tt id='l2h-11' xml:id='l2h-11' class="class">Http</tt></b>(</nobr></td>
+  <td><var></var><big>[</big><var>cache=None</var><big>]</big><var>, </var><big>[</big><var>timeout=None</var><big>]</big><var>, </var><big>[</big><var>proxy_info=None</var><big>]</big><var></var>)</td></tr></table></dt>
+<dd>
+The class that represents a client HTTP interface.
+The <var>cache</var> parameter is either the name of a directory
+to be used as a flat file cache, or it must an object that
+implements the required caching interface.
+The <var>timeout</var> parameter is the socket level timeout.
+The <var>proxy_info</var> is an instance of <tt class="class">ProxyInfo</tt> and is supplied
+if a proxy is to be used. Note that the Socksipy module must be
+installed for proxy support to work.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><span class="typelabel">class</span>&nbsp;<tt id='l2h-12' xml:id='l2h-12' class="class">Response</tt></b>(</nobr></td>
+  <td><var>info</var>)</td></tr></table></dt>
+<dd>
+Response is a subclass of <tt class="class">dict</tt> and instances of this
+class are returned from calls
+to Http.request. The <var>info</var> parameter is either
+an <tt class="class">rfc822.Message</tt> or an <tt class="class">httplib.HTTPResponse</tt> object.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><span class="typelabel">class</span>&nbsp;<tt id='l2h-13' xml:id='l2h-13' class="class">FileCache</tt></b>(</nobr></td>
+  <td><var>dir_name, </var><big>[</big><var>safe=safename</var><big>]</big><var></var>)</td></tr></table></dt>
+<dd>
+FileCache implements a Cache as a directory of files.
+The <var>dir_name</var> parameter is
+the name of the directory to use. If the directory does
+not exist then FileCache attempts to create the directory.
+The optional <var>safe</var> parameter is a funtion which generates
+the cache filename for each URI. A FileCache object is
+constructed and used for caching when you pass a directory name
+into the constructor of <tt class="class">Http</tt>.
+</dl>
+
+<p>
+<dl><dt><table cellpadding="0" cellspacing="0"><tr valign="baseline">
+  <td><nobr><b><span class="typelabel">class</span>&nbsp;<tt id='l2h-14' xml:id='l2h-14' class="class">ProxyInfo</tt></b>(</nobr></td>
+  <td><var>proxy_type, proxy_host, proxy_port, </var><big>[</big><var>proxy_rdns=None</var><big>]</big><var>, </var><big>[</big><var>proxy_user=None</var><big>]</big><var>, </var><big>[</big><var>proxy_pass=None</var><big>]</big><var></var>)</td></tr></table></dt>
+<dd>
+The parameter <var>proxy_type</var> must be set to one of socks.PROXY_TYPE_XXX
+constants. The <var>proxy_host</var> and <var>proxy_port</var> must be set to the location
+of the proxy. The optional <var>proxy_rdns</var> should be set to True if
+the DNS server on the proxy should be used. The <var>proxy_user</var> and
+<var>proxy_pass</var> are supplied when the proxy is protected by authentication.
+</dl>
+
+<p>
+
+<p><br /></p><hr class='online-navigation' />
+<div class='online-navigation'>
+<!--Table of Child-Links-->
+<a name="CHILD_LINKS"><strong>Subsections</strong></a>
+
+<ul class="ChildLinks">
+<li><a href="http-objects.html">1.1.1 Http Objects</a>
+<li><a href="cache-objects.html">1.1.2 Cache Objects</a>
+<li><a href="response-objects.html">1.1.3 Response Objects</a>
+<li><a href="httplib2-example.html">1.1.4 Examples</a>
+</ul>
+<!--End of Table of Child-Links-->
+</div>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1. Reference"
+  href="node2.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1. Reference"
+  href="node2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.1 http Objects"
+  href="http-objects.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="node2.html">1. Reference</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="node2.html">1. Reference</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="http-objects.html">1.1.1 Http Objects</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/modules.png b/catapult/third_party/gsutil/third_party/httplib2/ref/modules.png
new file mode 100644
index 0000000..8fa8b75
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/modules.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/next.png b/catapult/third_party/gsutil/third_party/httplib2/ref/next.png
new file mode 100644
index 0000000..cfe5e51
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/next.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/node2.html b/catapult/third_party/gsutil/third_party/httplib2/ref/node2.html
new file mode 100644
index 0000000..408be41
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/node2.html
@@ -0,0 +1,115 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="about.html" />
+<link rel="prev" href="contents.html" />
+<link rel="parent" href="ref.html" />
+<link rel="next" href="module-httplib2.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1. Reference</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="Contents"
+  href="contents.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="contents.html">Contents</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h1><a name="SECTION002000000000000000000">
+1. Reference</a>
+</h1>
+
+<p>
+
+<p>
+
+<p><br /></p><hr class='online-navigation' />
+<div class='online-navigation'>
+<!--Table of Child-Links-->
+<a name="CHILD_LINKS"><strong>Subsections</strong></a>
+
+<ul class="ChildLinks">
+<li><a href="module-httplib2.html">1.1 <tt class="module">httplib2</tt>
+A comprehensive HTTP client library.</a>
+<ul>
+<li><a href="http-objects.html">1.1.1 Http Objects</a>
+<li><a href="cache-objects.html">1.1.2 Cache Objects</a>
+<li><a href="response-objects.html">1.1.3 Response Objects</a>
+<li><a href="httplib2-example.html">1.1.4 Examples</a>
+</ul></ul>
+<!--End of Table of Child-Links-->
+</div>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="Contents"
+  href="contents.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="The httplib2 Library"
+  href="ref.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="contents.html">Contents</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="ref.html">The httplib2 Library</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/previous.png b/catapult/third_party/gsutil/third_party/httplib2/ref/previous.png
new file mode 100644
index 0000000..497def4
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/previous.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/pyfav.png b/catapult/third_party/gsutil/third_party/httplib2/ref/pyfav.png
new file mode 100644
index 0000000..d2d8669
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/pyfav.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/ref.css b/catapult/third_party/gsutil/third_party/httplib2/ref/ref.css
new file mode 100644
index 0000000..06a613c
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/ref.css
@@ -0,0 +1,243 @@
+/*
+ * The first part of this is the standard CSS generated by LaTeX2HTML,
+ * with the "empty" declarations removed.
+ */
+
+/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */
+.math                   { font-family: "Century Schoolbook", serif; }
+.math i                 { font-family: "Century Schoolbook", serif;
+                          font-weight: bold }
+.boldmath               { font-family: "Century Schoolbook", serif;
+                          font-weight: bold }
+
+/*
+ * Implement both fixed-size and relative sizes.
+ *
+ * I think these can be safely removed, as it doesn't appear that
+ * LaTeX2HTML ever generates these, even though these are carried
+ * over from the LaTeX2HTML stylesheet.
+ */
+small.xtiny             { font-size : xx-small; }
+small.tiny              { font-size : x-small; }
+small.scriptsize        { font-size : smaller; }
+small.footnotesize      { font-size : small; }
+big.xlarge              { font-size : large; }
+big.xxlarge             { font-size : x-large; }
+big.huge                { font-size : larger; }
+big.xhuge               { font-size : xx-large; }
+
+/*
+ * Document-specific styles come next;
+ * these are added for the Python documentation.
+ *
+ * Note that the size specifications for the H* elements are because
+ * Netscape on Solaris otherwise doesn't get it right; they all end up
+ * the normal text size.
+ */
+
+body                    { color: #000000;
+                          background-color: #ffffff; }
+
+a:link:active           { color: #ff0000; }
+a:link:hover            { background-color: #bbeeff; }
+a:visited:hover         { background-color: #bbeeff; }
+a:visited               { color: #551a8b; }
+a:link                  { color: #0000bb; }
+
+h1, h2, h3, h4, h5, h6  { font-family: avantgarde, sans-serif;
+                          font-weight: bold; }
+h1                      { font-size: 180%; }
+h2                      { font-size: 150%; }
+h3, h4                  { font-size: 120%; }
+
+/* These are section titles used in navigation links, so make sure we
+ * match the section header font here, even it not the weight.
+ */
+.sectref                { font-family: avantgarde, sans-serif; }
+/* And the label before the titles in navigation: */
+.navlabel               { font-size: 85%; }
+
+
+/* LaTeX2HTML insists on inserting <br> elements into headers which
+ * are marked with \label.  This little bit of CSS magic ensures that
+ * these elements don't cause spurious whitespace to be added.
+ */
+h1>br, h2>br, h3>br,
+h4>br, h5>br, h6>br     { display: none; }
+
+code, tt                { font-family: "lucida typewriter", lucidatypewriter,
+                                       monospace; }
+var                     { font-family: times, serif;
+                          font-style: italic;
+                          font-weight: normal; }
+
+.Unix                   { font-variant: small-caps; }
+
+.typelabel              { font-family: lucida, sans-serif; }
+
+.navigation td          { background-color: #99ccff;
+                          font-weight: bold;
+                          font-family: avantgarde, sans-serif;
+                          font-size: 110%; }
+
+div.warning             { background-color: #fffaf0;
+                          border: thin solid black;
+                          padding: 1em;
+                          margin-left: 2em;
+                          margin-right: 2em; }
+
+div.warning .label      { font-family: sans-serif;
+                          font-size: 110%;
+                          margin-right: 0.5em; }
+
+div.note                { background-color: #fffaf0;
+                          border: thin solid black;
+                          padding: 1em;
+                          margin-left: 2em;
+                          margin-right: 2em; }
+
+div.note .label         { margin-right: 0.5em;
+                          font-family: sans-serif; }
+
+address                 { font-size: 80%; }
+.release-info           { font-style: italic;
+                          font-size: 80%; }
+
+.titlegraphic           { vertical-align: top; }
+
+.verbatim pre           { color: #00008b;
+                          font-family: "lucida typewriter", lucidatypewriter,
+                                       monospace;
+                          font-size: 90%; }
+.verbatim               { margin-left: 2em; }
+.verbatim .footer       { padding: 0.05in;
+                          font-size: 85%;
+                          background-color: #99ccff;
+                          margin-right: 0.5in; }
+
+.grammar                { background-color: #99ccff;
+                          margin-right: 0.5in;
+                          padding: 0.05in; }
+.grammar-footer         { padding: 0.05in;
+                          font-size: 85%; }
+.grammartoken           { font-family: "lucida typewriter", lucidatypewriter,
+                                       monospace; }
+
+.productions                  { background-color: #bbeeff; }
+.productions a:active         { color: #ff0000; }
+.productions a:link:hover     { background-color: #99ccff; }
+.productions a:visited:hover  { background-color: #99ccff; }
+.productions a:visited        { color: #551a8b; }
+.productions a:link           { color: #0000bb; }
+.productions table            { vertical-align: baseline;
+                                empty-cells: show; }
+.productions > table td,
+.productions > table th       { padding: 2px; }
+.productions > table td:first-child,
+.productions > table td:last-child {
+                                font-family: "lucida typewriter",
+                                             lucidatypewriter,
+                                             monospace;
+                                }
+/* same as the second selector above, but expressed differently for Opera */
+.productions > table td:first-child + td + td {
+                                font-family: "lucida typewriter",
+                                             lucidatypewriter,
+                                             monospace;
+                                vertical-align: baseline;
+                                }
+.productions > table td:first-child + td {
+                                padding-left: 1em;
+                                padding-right: 1em;
+                                }
+.productions > table tr       { vertical-align: baseline; }
+
+.email                  { font-family: avantgarde, sans-serif; }
+.mailheader             { font-family: avantgarde, sans-serif; }
+.mimetype               { font-family: avantgarde, sans-serif; }
+.newsgroup              { font-family: avantgarde, sans-serif; }
+.url                    { font-family: avantgarde, sans-serif; }
+.file                   { font-family: avantgarde, sans-serif; }
+.guilabel               { font-family: avantgarde, sans-serif; }
+
+.realtable              { border-collapse: collapse;
+                          border-color: black;
+                          border-style: solid;
+                          border-width: 0px 0px 2px 0px;
+                          empty-cells: show;
+                          margin-left: auto;
+                          margin-right: auto;
+                          padding-left: 0.4em;
+                          padding-right: 0.4em;
+                          }
+.realtable tbody        { vertical-align: baseline; }
+.realtable tfoot        { display: table-footer-group; }
+.realtable thead        { background-color: #99ccff;
+                          border-width: 0px 0px 2px 1px;
+                          display: table-header-group;
+                          font-family: avantgarde, sans-serif;
+                          font-weight: bold;
+                          vertical-align: baseline;
+                          }
+.realtable thead :first-child {
+                          border-width: 0px 0px 2px 0px;
+                          }
+.realtable thead th     { border-width: 0px 0px 2px 1px }
+.realtable td,
+.realtable th           { border-color: black;
+                          border-style: solid;
+                          border-width: 0px 0px 1px 1px;
+                          padding-left: 0.4em;
+                          padding-right: 0.4em;
+                          }
+.realtable td:first-child,
+.realtable th:first-child {
+                          border-left-width: 0px;
+                          vertical-align: baseline;
+                          }
+.center                 { text-align: center; }
+.left                   { text-align: left; }
+.right                  { text-align: right; }
+
+.refcount-info          { font-style: italic; }
+.refcount-info .value   { font-weight: bold;
+                          color: #006600; }
+
+/*
+ * Some decoration for the "See also:" blocks, in part inspired by some of
+ * the styling on Lars Marius Garshol's XSA pages.
+ * (The blue in the navigation bars is #99CCFF.)
+ */
+.seealso                { background-color: #fffaf0;
+                          border: thin solid black;
+                          padding: 0pt 1em 4pt 1em; }
+
+.seealso > .heading     { font-size: 110%;
+                          font-weight: bold; }
+
+/*
+ * Class 'availability' is used for module availability statements at
+ * the top of modules.
+ */
+.availability .platform { font-weight: bold; }
+
+
+/*
+ * Additional styles for the distutils package.
+ */
+.du-command             { font-family: monospace; }
+.du-option              { font-family: avantgarde, sans-serif; }
+.du-filevar             { font-family: avantgarde, sans-serif;
+                          font-style: italic; }
+.du-xxx:before          { content: "** ";
+                          font-weight: bold; }
+.du-xxx:after           { content: " **";
+                          font-weight: bold; }
+
+
+/*
+ * Some specialization for printed output.
+ */
+@media print {
+  .online-navigation    { display: none; }
+  }
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/ref.html b/catapult/third_party/gsutil/third_party/httplib2/ref/ref.html
new file mode 100644
index 0000000..839d65e
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/ref.html
@@ -0,0 +1,129 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="contents.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>The httplib2 Library</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></td>
+<td class='online-navigation'><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></td>
+<td class='online-navigation'><a rel="next" title="Contents"
+  href="contents.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="contents.html">Contents</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<p>
+
+<div class="titlepage">
+<div class='center'>
+<h1>The httplib2 Library</h1>
+<p><b><font size="+2">Joe Gregorio</font></b></p>
+<p>
+Email: <span class="email">joe@bitworking.org</span>
+</p>
+<p><strong>Release 0.3</strong><br />
+<strong>Mar 8, 2007</strong></p>
+<p></p>
+</div>
+</div>
+
+<p>
+
+<h3>Abstract:</h3>
+<div class="ABSTRACT">
+
+<p>
+The <tt class="module">httplib2</tt> module is a comprehensive HTTP client library
+that handles caching, keep-alive, compression, redirects and
+many kinds of authentication.
+
+<p>
+</div>
+<p>
+
+<p>
+
+<p><br /></p><hr class='online-navigation' />
+<div class='online-navigation'>
+<!--Table of Child-Links-->
+<a name="CHILD_LINKS"></a>
+
+<ul class="ChildLinks">
+<li><a href="contents.html">Contents</a>
+<li><a href="node2.html">1. Reference</a>
+<ul>
+<li><a href="module-httplib2.html">1.1 <tt class="module">httplib2</tt>
+A comprehensive HTTP client library.</a>
+<ul>
+<li><a href="http-objects.html">1.1.1 Http Objects</a>
+<li><a href="cache-objects.html">1.1.2 Cache Objects</a>
+<li><a href="response-objects.html">1.1.3 Response Objects</a>
+<li><a href="httplib2-example.html">1.1.4 Examples</a>
+</ul>
+</ul>
+<li><a href="about.html">About this document ...</a>
+</ul>
+<!--End of Table of Child-Links-->
+</div>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></td>
+<td class='online-navigation'><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></td>
+<td class='online-navigation'><a rel="next" title="Contents"
+  href="contents.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="contents.html">Contents</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/response-objects.html b/catapult/third_party/gsutil/third_party/httplib2/ref/response-objects.html
new file mode 100644
index 0000000..74b79f5
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/response-objects.html
@@ -0,0 +1,151 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<html>
+<head>
+<link rel="STYLESHEET" href="ref.css" type='text/css' />
+<link rel="first" href="ref.html" title='The httplib2 Library' />
+<link rel='contents' href='contents.html' title="Contents" />
+<link rel='last' href='about.html' title='About this document...' />
+<link rel='help' href='about.html' title='About this document...' />
+<link rel="next" href="httplib2-example.html" />
+<link rel="prev" href="cache-objects.html" />
+<link rel="parent" href="module-httplib2.html" />
+<link rel="next" href="httplib2-example.html" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name='aesop' content='information' />
+<title>1.1.3 Response Objects</title>
+</head>
+<body>
+<div class="navigation">
+<div id='top-navigation-panel' xml:id='top-navigation-panel'>
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.2 cache Objects"
+  href="cache-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.4 Examples"
+  href="httplib2-example.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="cache-objects.html">1.1.2 Cache Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="httplib2-example.html">1.1.4 Examples</a>
+</div>
+<hr /></div>
+</div>
+<!--End of Navigation Panel-->
+
+<h2><a name="SECTION002130000000000000000"></a>
+<a name="response-objects"></a>
+<br>
+1.1.3 Response Objects
+</h2>
+
+<p>
+Response objects are derived from <tt class="class">dict</tt> and map
+header names (lower case with the trailing colon removed)
+to header values. In addition to the dict methods
+a Response object also has:
+
+<p>
+<dl><dt><b><tt id='l2h-26' xml:id='l2h-26' class="member">fromcache</tt></b></dt>
+<dd>
+If <code>true</code> the the response was returned from the cache.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-27' xml:id='l2h-27' class="member">version</tt></b></dt>
+<dd>
+The version of HTTP that the server supports. A value
+of 11 means '1.1'.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-28' xml:id='l2h-28' class="member">status</tt></b></dt>
+<dd>
+The numerical HTTP status code returned in the response.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-29' xml:id='l2h-29' class="member">reason</tt></b></dt>
+<dd>
+The human readable component of the HTTP response status code.
+</dl>
+
+<p>
+<dl><dt><b><tt id='l2h-30' xml:id='l2h-30' class="member">previous</tt></b></dt>
+<dd>
+If redirects are followed then the <tt class="class">Response</tt> object returned
+is just for the very last HTTP request and <var>previous</var> points to
+the previous <tt class="class">Response</tt> object. In this manner they form a chain
+going back through the responses to the very first response.
+Will be <code>None</code> if there are no previous respones.
+</dl>
+
+<p>
+The Response object also populates the header <code>content-location</code>, that
+contains the URI that was ultimately requested. This is useful if
+redirects were encountered, you can determine the ultimate URI that
+the request was sent to. All Response objects contain this key value,
+including <code>previous</code> responses so you can determine the entire
+chain of redirects. If <tt class="member">Http.force_exception_to_status_code</tt> is <code>True</code>
+and the number of redirects has exceeded the number of allowed number
+of redirects then the <tt class="class">Response</tt> object will report the error
+in the status code, but the complete chain of previous responses will
+still be in tact.
+
+<p>
+
+<div class="navigation">
+<div class='online-navigation'>
+<p></p><hr />
+<table align="center" width="100%" cellpadding="0" cellspacing="2">
+<tr>
+<td class='online-navigation'><a rel="prev" title="1.1.2 cache Objects"
+  href="cache-objects.html"><img src='previous.png'
+  border='0' height='32'  alt='Previous Page' width='32' /></a></td>
+<td class='online-navigation'><a rel="parent" title="1.1 httplib2 A comprehensive"
+  href="module-httplib2.html"><img src='up.png'
+  border='0' height='32'  alt='Up one Level' width='32' /></a></td>
+<td class='online-navigation'><a rel="next" title="1.1.4 Examples"
+  href="httplib2-example.html"><img src='next.png'
+  border='0' height='32'  alt='Next Page' width='32' /></a></td>
+<td align="center" width="100%">The httplib2 Library</td>
+<td class='online-navigation'><a rel="contents" title="Table of Contents"
+  href="contents.html"><img src='contents.png'
+  border='0' height='32'  alt='Contents' width='32' /></a></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+<td class='online-navigation'><img src='blank.png'
+  border='0' height='32'  alt='' width='32' /></td>
+</tr></table>
+<div class='online-navigation'>
+<b class="navlabel">Previous:</b>
+<a class="sectref" rel="prev" href="cache-objects.html">1.1.2 Cache Objects</a>
+<b class="navlabel">Up:</b>
+<a class="sectref" rel="parent" href="module-httplib2.html">1.1 httplib2 A comprehensive</a>
+<b class="navlabel">Next:</b>
+<a class="sectref" rel="next" href="httplib2-example.html">1.1.4 Examples</a>
+</div>
+</div>
+<hr />
+<span class="release-info">Release 0.3, documentation updated on Mar 8, 2007.</span>
+</div>
+<!--End of Navigation Panel-->
+
+</body>
+</html>
diff --git a/catapult/third_party/gsutil/third_party/httplib2/ref/up.png b/catapult/third_party/gsutil/third_party/httplib2/ref/up.png
new file mode 100644
index 0000000..a90e028
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/httplib2/ref/up.png
Binary files differ
diff --git a/catapult/third_party/gsutil/third_party/httplib2/setup.py b/catapult/third_party/gsutil/third_party/httplib2/setup.py
old mode 100644
new mode 100755
diff --git a/catapult/third_party/gsutil/third_party/httplib2/upload-diffs.py b/catapult/third_party/gsutil/third_party/httplib2/upload-diffs.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/util.py b/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/scripts/build-docs b/catapult/third_party/gsutil/third_party/oauth2client/scripts/build-docs
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/scripts/fetch_gae_sdk.py b/catapult/third_party/gsutil/third_party/oauth2client/scripts/fetch_gae_sdk.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/scripts/push-docs b/catapult/third_party/gsutil/third_party/oauth2client/scripts/push-docs
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/setup.py b/catapult/third_party/gsutil/third_party/oauth2client/setup.py
old mode 100644
new mode 100755
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_appengine.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_appengine.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_django_orm.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_django_orm.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_file.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_file.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_jwt.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_jwt.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_oauth2client.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_oauth2client.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/oauth2client/tests/test_service_account.py b/catapult/third_party/gsutil/third_party/oauth2client/tests/test_service_account.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/appstats/protorpc_appstats/__init__.py b/catapult/third_party/gsutil/third_party/protorpc/demos/appstats/protorpc_appstats/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/appstats/protorpc_appstats/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/appstats/protorpc_appstats/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/echo/app.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/echo/app.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/echo/appengine_config.py b/catapult/third_party/gsutil/third_party/protorpc/demos/echo/appengine_config.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/echo/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/echo/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/echo/services.py b/catapult/third_party/gsutil/third_party/protorpc/demos/echo/services.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/appengine_config.py b/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/appengine_config.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/guestbook.py b/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/guestbook.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/client/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/server/appengine_config.py b/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/server/appengine_config.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/server/guestbook.py b/catapult/third_party/gsutil/third_party/protorpc/demos/guestbook/server/guestbook.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/app.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/app.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/appengine_config.py b/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/appengine_config.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/services.py b/catapult/third_party/gsutil/third_party/protorpc/demos/hello/server/services.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/app.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/app.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/backends.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/backends.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/services.py b/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/services.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/services_test.py b/catapult/third_party/gsutil/third_party/protorpc/demos/quotas/backend/quotas/services_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/app.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/app.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/appengine_config.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/appengine_config.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/fetch_descriptor.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/fetch_descriptor.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/tunes_db.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/client/tunes_db.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/app.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/app.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/datastore_test_util.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/datastore_test_util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/index.yaml b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/index.yaml
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/main.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/main.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model_test.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/services.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/services.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/tunes_db.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/tunes_db.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/tunes_db_test.py b/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/tunes_db_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/gen_protorpc.py b/catapult/third_party/gsutil/third_party/protorpc/gen_protorpc.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/__init__.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/descriptor.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/descriptor.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/descriptor_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/descriptor_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/end2end_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/end2end_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/experimental/__init__.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/experimental/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_proto.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_proto.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_proto_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_proto_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_python.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_python.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_python_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_python_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/generate_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/message_types.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/message_types.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/message_types_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/message_types_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/messages.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/messages.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/messages_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/messages_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protobuf.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protobuf.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protobuf_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protobuf_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protourlencode.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protourlencode.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/protourlencode_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/protourlencode_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/registry.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/registry.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/registry_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/registry_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/test_util.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/test_util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/transport.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/transport.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/transport_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/transport_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/util.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/util_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/util_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/__init__.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/forms.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/forms.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/forms_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/forms_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/service_handlers.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/service_handlers.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/service_handlers_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/service_handlers_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp_test_util.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp_test_util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/__init__.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/__init__.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/service.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/service.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/service_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/service_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/util.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/util.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/util_test.py b/catapult/third_party/gsutil/third_party/protorpc/protorpc/wsgi/util_test.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/cmpdump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/cmpdump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/crldump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/crldump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/crmfdump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/crmfdump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspclient.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspclient.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspreqdump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspreqdump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocsprspdump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocsprspdump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspserver.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/ocspserver.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs10dump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs10dump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs1dump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs1dump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs7dump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs7dump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs8dump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/pkcs8dump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/snmpget.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/snmpget.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/x509dump.py b/catapult/third_party/gsutil/third_party/pyasn1-modules/tools/x509dump.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/python-gflags/debian/rules b/catapult/third_party/gsutil/third_party/python-gflags/debian/rules
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/python-gflags/gflags.py b/catapult/third_party/gsutil/third_party/python-gflags/gflags.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/python-gflags/gflags2man.py b/catapult/third_party/gsutil/third_party/python-gflags/gflags2man.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/python-gflags/gflags_validators.py b/catapult/third_party/gsutil/third_party/python-gflags/gflags_validators.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/retry-decorator/retry_decorator/retry_decorator.py b/catapult/third_party/gsutil/third_party/retry-decorator/retry_decorator/retry_decorator.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/.pydevproject b/catapult/third_party/gsutil/third_party/rsa/.pydevproject
new file mode 100644
index 0000000..af1e4ee
--- /dev/null
+++ b/catapult/third_party/gsutil/third_party/rsa/.pydevproject
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>

+<?eclipse-pydev version="1.0"?>

+

+<pydev_project>

+<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>

+<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>

+<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">

+<path>/python-rsa</path>

+</pydev_pathproperty>

+<pydev_pathproperty name="org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH">

+<path>C:\cygwin\home\Sybren\python-rsa-venv-py27\Lib\site-packages\pyasn1-0.1.3-py2.7.egg</path>

+</pydev_pathproperty>

+</pydev_project>

diff --git a/catapult/third_party/gsutil/third_party/rsa/create_timing_table.py b/catapult/third_party/gsutil/third_party/rsa/create_timing_table.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/doc/make.bat b/catapult/third_party/gsutil/third_party/rsa/doc/make.bat
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/run_tests.py b/catapult/third_party/gsutil/third_party/rsa/run_tests.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/speed.sh b/catapult/third_party/gsutil/third_party/rsa/speed.sh
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/tests/test_common.py b/catapult/third_party/gsutil/third_party/rsa/tests/test_common.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/rsa/tests/test_pem.py b/catapult/third_party/gsutil/third_party/rsa/tests/test_pem.py
old mode 100755
new mode 100644
diff --git a/catapult/third_party/gsutil/third_party/six/.hgignore b/catapult/third_party/gsutil/third_party/six/.hgignore
index 2a22acf..8815294 100644
--- a/catapult/third_party/gsutil/third_party/six/.hgignore
+++ b/catapult/third_party/gsutil/third_party/six/.hgignore
@@ -4,3 +4,4 @@
 MANIFEST
 documentation/_build
 .tox
+six.egg-info
diff --git a/catapult/third_party/gsutil/third_party/six/.hgtags b/catapult/third_party/gsutil/third_party/six/.hgtags
index 619c287..dcaeaa0 100644
--- a/catapult/third_party/gsutil/third_party/six/.hgtags
+++ b/catapult/third_party/gsutil/third_party/six/.hgtags
@@ -14,3 +14,4 @@
 307b88b684ecd1cdefb7fa7b0afec5a055d74a81 1.7.1
 cb0ed571b64a7fd87d2062b2f7350b671d842095 1.7.2
 31d9088544784ca0dd3bffc7e14df4f80327016d 1.7.3
+fa79f187d6f288d698f2486358d70df688074a94 1.8.0
diff --git a/catapult/third_party/gsutil/third_party/six/CHANGES b/catapult/third_party/gsutil/third_party/six/CHANGES
index 87315e3..4b9425f 100644
--- a/catapult/third_party/gsutil/third_party/six/CHANGES
+++ b/catapult/third_party/gsutil/third_party/six/CHANGES
@@ -3,38 +3,61 @@
 
 This file lists the changes in each six version.
 
+1.9.0
+-----
+
+- Issue #106: Support the `flush` parameter to `six.print_`.
+
+- Pull request #48 and issue #15: Add the `python_2_unicode_compatible`
+  decorator.
+
+- Pull request #57 and issue #50: Add several compatibility methods for unittest
+  assertions that were renamed between Python 2 and 3.
+
+- Issue #105 and pull request #58: Ensure `six.wraps` respects the *updated* and
+  *assigned* arguments.
+
+- Issue #102: Add `raise_from` to abstract out Python 3's raise from syntax.
+
+- Issue #97: Optimize `six.iterbytes` on Python 2.
+
+- Issue #98: Fix `six.moves` race condition in multi-threaded code.
+
+- Pull request #51: Add `six.view(keys|values|itmes)`, which provide dictionary
+  views on Python 2.7+.
+
 1.8.0
 -----
 
-- Issue #90: Add six.moves.shlex_quote.
+- Issue #90: Add `six.moves.shlex_quote`.
 
-- Issue #59: Add six.moves.intern.
+- Issue #59: Add `six.moves.intern`.
 
-- Add six.urllib.parse.uses_(fragment|netloc|params|query|relative).
+- Add `six.urllib.parse.uses_(fragment|netloc|params|query|relative)`.
 
-- Issue #88: Fix add_metaclass when the class has __slots__ containing
-  "__weakref__" or "__dict__".
+- Issue #88: Fix add_metaclass when the class has `__slots__` containing
+  `__weakref__` or `__dict__`.
 
 - Issue #89: Make six use absolute imports.
 
-- Issue #85: Always accept *updated* and *assigned* arguments for wraps().
+- Issue #85: Always accept *updated* and *assigned* arguments for `wraps()`.
 
-- Issue #86: In reraise(), instantiate the exception if the second argument is
-  None.
+- Issue #86: In `reraise()`, instantiate the exception if the second argument is
+  `None`.
 
-- Pull request #45: Add six.moves.email_mime_nonmultipart.
+- Pull request #45: Add `six.moves.email_mime_nonmultipart`.
 
-- Issue #81: Add six.urllib.request.splittag mapping.
+- Issue #81: Add `six.urllib.request.splittag` mapping.
 
-- Issue #80: Add six.urllib.request.splituser mapping.
+- Issue #80: Add `six.urllib.request.splituser` mapping.
 
 1.7.3
 -----
 
 - Issue #77: Fix import six on Python 3.4 with a custom loader.
 
-- Issue #74: six.moves.xmlrpc_server should map to SimpleXMLRPCServer on Python
-  2 as documented not xmlrpclib.
+- Issue #74: `six.moves.xmlrpc_server` should map to `SimpleXMLRPCServer` on Python
+  2 as documented not `xmlrpclib`.
 
 1.7.2
 -----
diff --git a/catapult/third_party/gsutil/third_party/six/CONTRIBUTORS b/catapult/third_party/gsutil/third_party/six/CONTRIBUTORS
index 29b0f6a..81c2eae 100644
--- a/catapult/third_party/gsutil/third_party/six/CONTRIBUTORS
+++ b/catapult/third_party/gsutil/third_party/six/CONTRIBUTORS
@@ -3,12 +3,15 @@
 otherwise worked to improve six:
 
 Marc Abramowitz
+Alexander Artemenko
 Aymeric Augustin
 Ned Batchelder
 Jason R. Coombs
 Julien Danjou
 Ben Darnell
 Ben Davis
+Tim Graham
+Thomas Grainger
 Joshua Harlow
 Anselm Kruis
 Alexander Lukanin
diff --git a/catapult/third_party/gsutil/third_party/six/LICENSE b/catapult/third_party/gsutil/third_party/six/LICENSE
index d76e024..e558f9d 100644
--- a/catapult/third_party/gsutil/third_party/six/LICENSE
+++ b/catapult/third_party/gsutil/third_party/six/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2010-2014 Benjamin Peterson
+Copyright (c) 2010-2015 Benjamin Peterson
 
 Permission is hereby granted, free of charge, to any person obtaining a copy of
 this software and associated documentation files (the "Software"), to deal in
diff --git a/catapult/third_party/gsutil/third_party/six/README b/catapult/third_party/gsutil/third_party/six/README
index 4de73fa..32bab7c 100644
--- a/catapult/third_party/gsutil/third_party/six/README
+++ b/catapult/third_party/gsutil/third_party/six/README
@@ -9,8 +9,8 @@
 
 Online documentation is at http://pythonhosted.org/six/.
 
-Bugs can be reported to http://bitbucket.org/gutworth/six.  The code can also be
-found there.
+Bugs can be reported to https://bitbucket.org/gutworth/six.  The code can also
+be found there.
 
 For questions about six or porting in general, email the python-porting mailing
 list: http://mail.python.org/mailman/listinfo/python-porting
diff --git a/catapult/third_party/gsutil/third_party/six/documentation/conf.py b/catapult/third_party/gsutil/third_party/six/documentation/conf.py
index fd285c7..0215bdd 100644
--- a/catapult/third_party/gsutil/third_party/six/documentation/conf.py
+++ b/catapult/third_party/gsutil/third_party/six/documentation/conf.py
@@ -33,7 +33,7 @@
 
 # General information about the project.
 project = u"six"
-copyright = u"2010-2014, Benjamin Peterson"
+copyright = u"2010-2015, Benjamin Peterson"
 
 sys.path.append(os.path.abspath(os.path.join(".", "..")))
 from six import __version__ as six_version
@@ -213,5 +213,5 @@
 
 # -- Intersphinx ---------------------------------------------------------------
 
-intersphinx_mapping = {"py2" : ("http://docs.python.org/2/", None),
-                       "py3" : ("http://docs.python.org/3/", None)}
+intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
+                       "py3" : ("https://docs.python.org/3/", None)}
diff --git a/catapult/third_party/gsutil/third_party/six/documentation/index.rst b/catapult/third_party/gsutil/third_party/six/documentation/index.rst
index 0adadc2..1f27400 100644
--- a/catapult/third_party/gsutil/third_party/six/documentation/index.rst
+++ b/catapult/third_party/gsutil/third_party/six/documentation/index.rst
@@ -18,7 +18,7 @@
 
 The name, "six", comes from the fact that 2*3 equals 6.  Why not addition?
 Multiplication is more powerful, and, anyway, "five" has already been snatched
-away by the Zope Five project.
+away by the (admittedly now moribund) Zope Five project.
 
 
 Indices and tables
@@ -160,7 +160,7 @@
 
 
 .. function:: next(it)
-.. function:: advance_iterator(it)
+              advance_iterator(it)
 
    Get the next item of iterator *it*.  :exc:`py3:StopIteration` is raised if
    the iterator is exhausted.  This is a replacement for calling ``it.next()``
@@ -203,6 +203,27 @@
    *kwargs* are passed through to the underlying method.
 
 
+.. function:: viewkeys(dictionary)
+
+   Return a view over *dictionary*\'s keys. This replaces
+   :meth:`py2:dict.viewkeys` on Python 2.7 and :meth:`py3:dict.keys` on
+   Python 3.
+
+
+.. function:: viewvalues(dictionary)
+
+   Return a view over *dictionary*\'s values. This replaces
+   :meth:`py2:dict.viewvalues` on Python 2.7 and :meth:`py3:dict.values` on
+   Python 3.
+
+
+.. function:: viewitems(dictionary)
+
+   Return a view over *dictionary*\'s items. This replaces
+   :meth:`py2:dict.viewitems` on Python 2.7 and :meth:`py3:dict.items` on
+   Python 3.
+
+
 .. function:: create_bound_method(func, obj)
 
    Return a method object wrapping *func* and bound to *obj*.  On both Python 2
@@ -222,7 +243,7 @@
    aliased to :class:`py3:object`.)
 
 
-.. function:: wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES)
+.. decorator:: wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES)
 
    This is exactly the :func:`py3:functools.wraps` decorator, but it sets the
    ``__wrapped__`` attribute on what it decorates as :func:`py3:functools.wraps`
@@ -249,10 +270,11 @@
       :func:`exec` with them should be avoided.
 
 
-.. function:: print_(*args, *, file=sys.stdout, end="\\n", sep=" ")
+.. function:: print_(*args, *, file=sys.stdout, end="\\n", sep=" ", flush=False)
 
    Print *args* into *file*.  Each argument will be separated with *sep* and
-   *end* will be written to the file after the last argument is printed.
+   *end* will be written to the file after the last argument is printed.  If
+   *flush* is true, ``file.flush()`` will be called after all data is written.
 
    .. note::
 
@@ -261,6 +283,13 @@
       ok. :)
 
 
+.. function:: raise_from(exc_value, exc_value_from)
+
+   Raise an exception from a context.  On Python 3, this is equivalent to
+   ``raise exc_value from exc_value_from``.  On Python 2, which does not support
+   exception chaining, it is equivalent to ``raise exc_value``.
+
+
 .. function:: reraise(exc_type, exc_value, exc_traceback=None)
 
    Reraise an exception, possibly with a different traceback.  In the simple
@@ -292,7 +321,7 @@
    decorator.
 
 
-.. function:: add_metaclass(metaclass)
+.. decorator:: add_metaclass(metaclass)
 
    Class decorator that replaces a normally-constructed class with a
    metaclass-constructed one.  Example usage: ::
@@ -409,6 +438,48 @@
    :class:`py3:io.BytesIO`.
 
 
+.. decorator:: python_2_unicode_compatible
+
+   A class decorator that takes a class defining a ``__str__`` method.  On
+   Python 3, the decorator does nothing.  On Python 2, it aliases the
+   ``__str__`` method to ``__unicode__`` and creates a new ``__str__`` method
+   that returns the result of ``__unicode__()`` encoded with UTF-8.
+
+
+unittest assertions
+>>>>>>>>>>>>>>>>>>>
+
+Six contains compatibility shims for unittest assertions that have been renamed.
+The parameters are the same as their aliases, but you must pass the test method
+as the first argument. For example::
+
+    import six
+    import unittest
+
+    class TestAssertCountEqual(unittest.TestCase):
+        def test(self):
+            six.assertCountEqual(self, (1, 2), [2, 1])
+
+Note these functions are only available on Python 2.7 or later.
+
+.. function:: assertCountEqual()
+
+   Alias for :meth:`~py3:unittest.TestCase.assertCountEqual` on Python 3 and
+   :meth:`~py2:unittest.TestCase.assertItemsEqual` on Python 2.
+
+
+.. function:: assertRaisesRegex()
+
+   Alias for :meth:`~py3:unittest.TestCase.assertRaisesRegex` on Python 3 and
+   :meth:`~py2:unittest.TestCase.assertRaisesRegexp` on Python 2.
+
+
+.. function:: assertRegex()
+
+   Alias for :meth:`~py3:unittest.TestCase.assertRegex` on Python 3 and
+   :meth:`~py2:unittest.TestCase.assertRegexpMatches` on Python 2.
+
+
 Renamed modules and attributes compatibility
 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
 
diff --git a/catapult/third_party/gsutil/third_party/six/setup.py b/catapult/third_party/gsutil/third_party/six/setup.py
old mode 100644
new mode 100755
diff --git a/catapult/third_party/gsutil/third_party/six/six.py b/catapult/third_party/gsutil/third_party/six/six.py
index 21b0e80..ffa3fe1 100644
--- a/catapult/third_party/gsutil/third_party/six/six.py
+++ b/catapult/third_party/gsutil/third_party/six/six.py
@@ -1,6 +1,6 @@
 """Utilities for writing code that runs on Python 2 and 3"""
 
-# Copyright (c) 2010-2014 Benjamin Peterson
+# Copyright (c) 2010-2015 Benjamin Peterson
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
 # of this software and associated documentation files (the "Software"), to deal
@@ -23,12 +23,13 @@
 from __future__ import absolute_import
 
 import functools
+import itertools
 import operator
 import sys
 import types
 
 __author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.8.0"
+__version__ = "1.9.0"
 
 
 # Useful for very coarse version differentiation.
@@ -88,8 +89,12 @@
     def __get__(self, obj, tp):
         result = self._resolve()
         setattr(obj, self.name, result) # Invokes __set__.
-        # This is a bit ugly, but it avoids running this again.
-        delattr(obj.__class__, self.name)
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
         return result
 
 
@@ -554,6 +559,12 @@
 
     def iterlists(d, **kw):
         return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
 else:
     def iterkeys(d, **kw):
         return iter(d.iterkeys(**kw))
@@ -567,6 +578,12 @@
     def iterlists(d, **kw):
         return iter(d.iterlists(**kw))
 
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
 _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
 _add_doc(itervalues, "Return an iterator over the values of a dictionary.")
 _add_doc(iteritems,
@@ -593,6 +610,9 @@
     import io
     StringIO = io.StringIO
     BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    _assertRaisesRegex = "assertRaisesRegex"
+    _assertRegex = "assertRegex"
 else:
     def b(s):
         return s
@@ -605,14 +625,28 @@
         return ord(bs[0])
     def indexbytes(buf, i):
         return ord(buf[i])
-    def iterbytes(buf):
-        return (ord(byte) for byte in buf)
+    iterbytes = functools.partial(itertools.imap, ord)
     import StringIO
     StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
 _add_doc(b, """Byte literal""")
 _add_doc(u, """Text literal""")
 
 
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
 if PY3:
     exec_ = getattr(moves.builtins, "exec")
 
@@ -643,6 +677,21 @@
 """)
 
 
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
 print_ = getattr(moves.builtins, "print", None)
 if print_ is None:
     def print_(*args, **kwargs):
@@ -697,6 +746,14 @@
                 write(sep)
             write(arg)
         write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
 
 _add_doc(reraise, """Reraise an exception.""")
 
@@ -704,7 +761,7 @@
     def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
               updated=functools.WRAPPER_UPDATES):
         def wrapper(f):
-            f = functools.wraps(wrapped)(f)
+            f = functools.wraps(wrapped, assigned, updated)(f)
             f.__wrapped__ = wrapped
             return f
         return wrapper
@@ -737,6 +794,25 @@
         return metaclass(cls.__name__, cls.__bases__, orig_vars)
     return wrapper
 
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
 # Complete the moves implementation.
 # This code is at the end of this module to speed up module loading.
 # Turn this module into a package.
diff --git a/catapult/third_party/gsutil/third_party/six/test_six.py b/catapult/third_party/gsutil/third_party/six/test_six.py
index 0125d6b..76a8ccb 100644
--- a/catapult/third_party/gsutil/third_party/six/test_six.py
+++ b/catapult/third_party/gsutil/third_party/six/test_six.py
@@ -1,6 +1,7 @@
 import operator
 import sys
 import types
+import unittest
 
 import py
 
@@ -389,6 +390,24 @@
         monkeypatch.undo()
 
 
+@py.test.mark.skipif(sys.version_info[:2] < (2, 7),
+                reason="view methods on dictionaries only available on 2.7+")
+def test_dictionary_views():
+    def stock_method_name(viewwhat):
+        """Given a method suffix like "keys" or "values", return the name
+        of the dict method that delivers those on the version of Python
+        we're running in."""
+        if six.PY3:
+            return viewwhat
+        return 'view' + viewwhat
+
+    d = dict(zip(range(10), (range(11, 20))))
+    for name in "keys", "values", "items":
+        meth = getattr(six, "view" + name)
+        view = meth(d)
+        assert set(view) == set(getattr(d, name)())
+
+
 def test_advance_iterator():
     assert six.next is six.advance_iterator
     l = [1, 2]
@@ -570,6 +589,27 @@
         assert tb is get_next(tb2)
 
 
+def test_raise_from():
+    try:
+        try:
+            raise Exception("blah")
+        except Exception:
+            ctx = sys.exc_info()[1]
+            f = Exception("foo")
+            six.raise_from(f, None)
+    except Exception:
+        tp, val, tb = sys.exc_info()
+    if sys.version_info[:2] > (3, 0):
+        # We should have done a raise f from None equivalent.
+        assert val.__cause__ is None
+        assert val.__context__ is ctx
+    if sys.version_info[:2] >= (3, 3):
+        # And that should suppress the context on the exception.
+        assert val.__suppress_context__
+    # For all versions the outer exception should have raised successfully.
+    assert str(val) == "foo"
+
+
 def test_print_():
     save = sys.stdout
     out = sys.stdout = six.moves.StringIO()
@@ -596,6 +636,17 @@
     out = six.StringIO()
     six.print_(None, file=out)
     assert out.getvalue() == "None\n"
+    class FlushableStringIO(six.StringIO):
+        def __init__(self):
+            six.StringIO.__init__(self)
+            self.flushed = False
+        def flush(self):
+            self.flushed = True
+    out = FlushableStringIO()
+    six.print_("Hello", file=out)
+    assert not out.flushed
+    six.print_("Hello", file=out, flush=True)
+    assert out.flushed
 
 
 @py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
@@ -662,6 +713,18 @@
     assert k is original_k
     assert not hasattr(k, '__wrapped__')
 
+    def f(g, assign, update):
+        def w():
+            return 42
+        w.glue = {"foo" : "bar"}
+        return six.wraps(g, assign, update)(w)
+    k.glue = {"melon" : "egg"}
+    k.turnip = 43
+    k = f(k, ["turnip"], ["glue"])
+    assert k.__name__ == "w"
+    assert k.turnip == 43
+    assert k.glue == {"melon" : "egg", "foo" : "bar"}
+
 
 def test_add_metaclass():
     class Meta(type):
@@ -734,3 +797,62 @@
         __slots__ = "__weakref__",
     MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
     assert type(MySlotsWeakref) is Meta
+
+
+@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
+def test_assertCountEqual():
+    class TestAssertCountEqual(unittest.TestCase):
+        def test(self):
+            with self.assertRaises(AssertionError):
+                six.assertCountEqual(self, (1, 2), [3, 4, 5])
+
+            six.assertCountEqual(self, (1, 2), [2, 1])
+
+    TestAssertCountEqual('test').test()
+
+
+@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
+def test_assertRegex():
+    class TestAssertRegex(unittest.TestCase):
+        def test(self):
+            with self.assertRaises(AssertionError):
+                six.assertRegex(self, 'test', r'^a')
+
+            six.assertRegex(self, 'test', r'^t')
+
+    TestAssertRegex('test').test()
+
+
+@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
+def test_assertRaisesRegex():
+    class TestAssertRaisesRegex(unittest.TestCase):
+        def test(self):
+            with six.assertRaisesRegex(self, AssertionError, '^Foo'):
+                raise AssertionError('Foo')
+
+            with self.assertRaises(AssertionError):
+                with six.assertRaisesRegex(self, AssertionError, r'^Foo'):
+                    raise AssertionError('Bar')
+
+    TestAssertRaisesRegex('test').test()
+
+
+def test_python_2_unicode_compatible():
+    @six.python_2_unicode_compatible
+    class MyTest(object):
+        def __str__(self):
+            return six.u('hello')
+
+        def __bytes__(self):
+            return six.b('hello')
+
+    my_test = MyTest()
+
+    if six.PY2:
+        assert str(my_test) == six.b("hello")
+        assert unicode(my_test) == six.u("hello")
+    elif six.PY3:
+        assert bytes(my_test) == six.b("hello")
+        assert str(my_test) == six.u("hello")
+
+    assert getattr(six.moves.builtins, 'bytes', str)(my_test) == six.b("hello")
diff --git a/catapult/third_party/mox3/.gitignore b/catapult/third_party/mox3/.gitignore
new file mode 100644
index 0000000..f8f74ec
--- /dev/null
+++ b/catapult/third_party/mox3/.gitignore
@@ -0,0 +1,71 @@
+*.py[co]
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+.testrepository
+
+#Translations
+*.mo
+
+# virtualenv
+.venv
+
+#Mr Developer
+.mr.developer.cfg
+
+# https://github.com/h5bp/html5-boilerplate/blob/master/.gitignore
+# Numerous always-ignore extensions
+*.diff
+*.err
+*.orig
+*.log
+*.rej
+*.swo
+*.swp
+*.vi
+*~
+
+# OS or Editor folders
+.DS_Store
+Thumbs.db
+.cache
+.project
+.settings
+.tmproj
+nbproject
+*.sublime-project
+*.sublime-workspace
+*.komodoproject
+.komodotools
+
+# Folders to ignore
+.hg
+.svn
+.CVS
+intermediate
+publish
+.idea
+
+# PyDev
+.pydevproject
+
+# pbr
+AUTHORS
+ChangeLog
diff --git a/catapult/third_party/mox3/.gitreview b/catapult/third_party/mox3/.gitreview
new file mode 100644
index 0000000..3653540
--- /dev/null
+++ b/catapult/third_party/mox3/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/mox3.git
diff --git a/catapult/third_party/mox3/.mailmap b/catapult/third_party/mox3/.mailmap
new file mode 100644
index 0000000..6a6d090
--- /dev/null
+++ b/catapult/third_party/mox3/.mailmap
@@ -0,0 +1 @@
+Przemysław Gajda <quermit@gmail.com> <quermit@gmail.com>
diff --git a/catapult/third_party/mox3/.testr.conf b/catapult/third_party/mox3/.testr.conf
new file mode 100644
index 0000000..6c1541e
--- /dev/null
+++ b/catapult/third_party/mox3/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/catapult/third_party/mox3/CONTRIBUTING.rst b/catapult/third_party/mox3/CONTRIBUTING.rst
new file mode 100644
index 0000000..8121da2
--- /dev/null
+++ b/catapult/third_party/mox3/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps in the "If you're a developer, start here"
+section of this page:
+
+   http://wiki.openstack.org/HowToContribute
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+   http://wiki.openstack.org/GerritWorkflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+   https://bugs.launchpad.net/mox3
diff --git a/catapult/third_party/mox3/COPYING.txt b/catapult/third_party/mox3/COPYING.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/catapult/third_party/mox3/COPYING.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/catapult/third_party/mox3/MANIFEST.in b/catapult/third_party/mox3/MANIFEST.in
new file mode 100644
index 0000000..c978a52
--- /dev/null
+++ b/catapult/third_party/mox3/MANIFEST.in
@@ -0,0 +1,6 @@
+include AUTHORS
+include ChangeLog
+exclude .gitignore
+exclude .gitreview
+
+global-exclude *.pyc
diff --git a/catapult/third_party/mox3/README.chromium b/catapult/third_party/mox3/README.chromium
new file mode 100644
index 0000000..cecbc0a
--- /dev/null
+++ b/catapult/third_party/mox3/README.chromium
@@ -0,0 +1,19 @@
+Name: mox3
+Short Name: mox3
+URL: https://github.com/openstack/mox3
+Version: 60dd893a8095f9d7957bf6635dc1620a7908d86b (commit hash)
+License: Apache License 2.0
+License File: NOT_SHIPPED
+Security Critical: no
+
+Local modification:
+Remove doc/source/conf.py because it's not needed and cause the checklicense.py
+to fail.
+
+Description:
+Mox3 is an unofficial port of the Google mox framework
+(http://code.google.com/p/pymox/) to Python 3. It was meant to be as compatible
+with mox as possible, but small enhancements have been made. The library was
+tested on Python version 3.2, 2.7 and 2.6.
+
+This library is added since pyfakefs depends on it.
diff --git a/catapult/third_party/mox3/README.rst b/catapult/third_party/mox3/README.rst
new file mode 100644
index 0000000..7f9e9db
--- /dev/null
+++ b/catapult/third_party/mox3/README.rst
@@ -0,0 +1,60 @@
+Mox3 - Mock object framework for Python 3
+=========================================
+
+Mox3 is an unofficial port of the Google mox framework
+(http://code.google.com/p/pymox/) to Python 3. It was meant to be as compatible
+with mox as possible, but small enhancements have been made. The library was
+tested on Python version 3.2, 2.7 and 2.6.
+
+Use at your own risk ;) 
+
+To install:
+
+  $ python setup.py install
+
+Running Tests
+-------------
+The testing system is based on a combination of tox and testr. The canonical
+approach to running tests is to simply run the command `tox`. This will
+create virtual environments, populate them with depenedencies and run all of
+the tests that OpenStack CI systems run. Behind the scenes, tox is running
+`testr run --parallel`, but is set up such that you can supply any additional
+testr arguments that are needed to tox. For example, you can run:
+`tox -- --analyze-isolation` to cause tox to tell testr to add
+--analyze-isolation to its argument list.
+
+It is also possible to run the tests inside of a virtual environment
+you have created, or it is possible that you have all of the dependencies
+installed locally already. In this case, you can interact with the testr
+command directly. Running `testr run` will run the entire test suite. `testr
+run --parallel` will run it in parallel (this is the default incantation tox
+uses.) More information about testr can be found at:
+http://wiki.openstack.org/testr
+
+Basic Usage
+-----------
+  
+The basic usage of mox3 is the same as with mox, but the initial import should
+be made from the mox3 module:
+
+  from mox3 import mox
+
+To learn how to use mox3 you may check the documentation of the original mox
+framework:
+
+  http://code.google.com/p/pymox/wiki/MoxDocumentation
+
+Original Copyright
+------------------
+
+Mox is Copyright 2008 Google Inc, and licensed under the Apache
+License, Version 2.0; see the file COPYING.txt for details.  If you would
+like to help us improve Mox, join the group.
+
+OpenStack Fork
+--------------
+
+* Free software: Apache license
+* Documentation: http://docs.openstack.org/developer/mox3
+* Source: http://git.openstack.org/cgit/openstack/mox3
+* Bugs: http://bugs.launchpad.net/python-mox3
diff --git a/catapult/third_party/mox3/doc/source/contributing.rst b/catapult/third_party/mox3/doc/source/contributing.rst
new file mode 100644
index 0000000..2ca75d1
--- /dev/null
+++ b/catapult/third_party/mox3/doc/source/contributing.rst
@@ -0,0 +1,5 @@
+==============
+ Contributing
+==============
+
+.. include:: ../../CONTRIBUTING.rst
diff --git a/catapult/third_party/mox3/doc/source/index.rst b/catapult/third_party/mox3/doc/source/index.rst
new file mode 100644
index 0000000..2df4863
--- /dev/null
+++ b/catapult/third_party/mox3/doc/source/index.rst
@@ -0,0 +1,21 @@
+mox3
+====
+
+A fork of mox with Python 3 support.
+
+Contents
+========
+
+.. toctree::
+   :maxdepth: 2
+
+   readme
+   contributing
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/catapult/third_party/mox3/doc/source/readme.rst b/catapult/third_party/mox3/doc/source/readme.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/catapult/third_party/mox3/doc/source/readme.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/mox3/mox3/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/mox3/mox3/__init__.py
diff --git a/catapult/third_party/mox3/mox3/fixture.py b/catapult/third_party/mox3/mox3/fixture.py
new file mode 100644
index 0000000..f6e39d8
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/fixture.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from mox3 import mox
+from mox3 import stubout
+
+
+class MoxStubout(fixtures.Fixture):
+    """Deal with code around mox and stubout as a fixture."""
+
+    def setUp(self):
+        super(MoxStubout, self).setUp()
+        self.mox = mox.Mox()
+        self.stubs = stubout.StubOutForTesting()
+        self.addCleanup(self.mox.UnsetStubs)
+        self.addCleanup(self.stubs.UnsetAll)
+        self.addCleanup(self.stubs.SmartUnsetAll)
+        self.addCleanup(self.mox.VerifyAll)
diff --git a/catapult/third_party/mox3/mox3/mox.py b/catapult/third_party/mox3/mox3/mox.py
new file mode 100644
index 0000000..3c10cc8
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/mox.py
@@ -0,0 +1,2168 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+"""Mox, an object-mocking framework for Python.
+
+Mox works in the record-replay-verify paradigm.  When you first create
+a mock object, it is in record mode.  You then programmatically set
+the expected behavior of the mock object (what methods are to be
+called on it, with what parameters, what they should return, and in
+what order).
+
+Once you have set up the expected mock behavior, you put it in replay
+mode.  Now the mock responds to method calls just as you told it to.
+If an unexpected method (or an expected method with unexpected
+parameters) is called, then an exception will be raised.
+
+Once you are done interacting with the mock, you need to verify that
+all the expected interactions occured.  (Maybe your code exited
+prematurely without calling some cleanup method!)  The verify phase
+ensures that every expected method was called; otherwise, an exception
+will be raised.
+
+WARNING! Mock objects created by Mox are not thread-safe.  If you are
+call a mock in multiple threads, it should be guarded by a mutex.
+
+TODO(stevepm): Add the option to make mocks thread-safe!
+
+Suggested usage / workflow:
+
+    # Create Mox factory
+    my_mox = Mox()
+
+    # Create a mock data access object
+    mock_dao = my_mox.CreateMock(DAOClass)
+
+    # Set up expected behavior
+    mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
+    mock_dao.DeletePerson(person)
+
+    # Put mocks in replay mode
+    my_mox.ReplayAll()
+
+    # Inject mock object and run test
+    controller.SetDao(mock_dao)
+    controller.DeletePersonById('1')
+
+    # Verify all methods were called as expected
+    my_mox.VerifyAll()
+"""
+
+import collections
+import difflib
+import inspect
+import re
+import types
+import unittest
+
+from mox3 import stubout
+
+
+class Error(AssertionError):
+    """Base exception for this module."""
+
+    pass
+
+
+class ExpectedMethodCallsError(Error):
+    """Raised when an expected method wasn't called.
+
+    This can occur if Verify() is called before all expected methods have been
+    called.
+    """
+
+    def __init__(self, expected_methods):
+        """Init exception.
+
+        Args:
+            # expected_methods: A sequence of MockMethod objects that should
+            #                   have been called.
+            expected_methods: [MockMethod]
+
+        Raises:
+            ValueError: if expected_methods contains no methods.
+        """
+
+        if not expected_methods:
+            raise ValueError("There must be at least one expected method")
+        Error.__init__(self)
+        self._expected_methods = expected_methods
+
+    def __str__(self):
+        calls = "\n".join(["%3d.  %s" % (i, m)
+                          for i, m in enumerate(self._expected_methods)])
+        return "Verify: Expected methods never called:\n%s" % (calls,)
+
+
+class UnexpectedMethodCallError(Error):
+    """Raised when an unexpected method is called.
+
+    This can occur if a method is called with incorrect parameters, or out of
+    the specified order.
+    """
+
+    def __init__(self, unexpected_method, expected):
+        """Init exception.
+
+        Args:
+            # unexpected_method: MockMethod that was called but was not at the
+            #     head of the expected_method queue.
+            # expected: MockMethod or UnorderedGroup the method should have
+            #     been in.
+            unexpected_method: MockMethod
+            expected: MockMethod or UnorderedGroup
+        """
+
+        Error.__init__(self)
+        if expected is None:
+            self._str = "Unexpected method call %s" % (unexpected_method,)
+        else:
+            differ = difflib.Differ()
+            diff = differ.compare(str(unexpected_method).splitlines(True),
+                                  str(expected).splitlines(True))
+            self._str = ("Unexpected method call."
+                         "  unexpected:-  expected:+\n%s"
+                         % ("\n".join(line.rstrip() for line in diff),))
+
+    def __str__(self):
+        return self._str
+
+
+class UnknownMethodCallError(Error):
+    """Raised if an unknown method is requested of the mock object."""
+
+    def __init__(self, unknown_method_name):
+        """Init exception.
+
+        Args:
+            # unknown_method_name: Method call that is not part of the mocked
+            #     class's public interface.
+            unknown_method_name: str
+        """
+
+        Error.__init__(self)
+        self._unknown_method_name = unknown_method_name
+
+    def __str__(self):
+        return ("Method called is not a member of the object: %s" %
+                self._unknown_method_name)
+
+
+class PrivateAttributeError(Error):
+    """Raised if a MockObject is passed a private additional attribute name."""
+
+    def __init__(self, attr):
+        Error.__init__(self)
+        self._attr = attr
+
+    def __str__(self):
+        return ("Attribute '%s' is private and should not be available"
+                "in a mock object." % self._attr)
+
+
+class ExpectedMockCreationError(Error):
+    """Raised if mocks should have been created by StubOutClassWithMocks."""
+
+    def __init__(self, expected_mocks):
+        """Init exception.
+
+        Args:
+            # expected_mocks: A sequence of MockObjects that should have been
+            #     created
+
+        Raises:
+            ValueError: if expected_mocks contains no methods.
+        """
+
+        if not expected_mocks:
+            raise ValueError("There must be at least one expected method")
+        Error.__init__(self)
+        self._expected_mocks = expected_mocks
+
+    def __str__(self):
+        mocks = "\n".join(["%3d.  %s" % (i, m)
+                          for i, m in enumerate(self._expected_mocks)])
+        return "Verify: Expected mocks never created:\n%s" % (mocks,)
+
+
+class UnexpectedMockCreationError(Error):
+    """Raised if too many mocks were created by StubOutClassWithMocks."""
+
+    def __init__(self, instance, *params, **named_params):
+        """Init exception.
+
+        Args:
+            # instance: the type of obejct that was created
+            # params: parameters given during instantiation
+            # named_params: named parameters given during instantiation
+        """
+
+        Error.__init__(self)
+        self._instance = instance
+        self._params = params
+        self._named_params = named_params
+
+    def __str__(self):
+        args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
+        error = "Unexpected mock creation: %s(%s" % (self._instance, args)
+
+        if self._named_params:
+            error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
+                                      self._named_params.items()])
+
+        error += ")"
+        return error
+
+
+class Mox(object):
+    """Mox: a factory for creating mock objects."""
+
+    # A list of types that should be stubbed out with MockObjects (as
+    # opposed to MockAnythings).
+    _USE_MOCK_OBJECT = [types.FunctionType, types.ModuleType, types.MethodType]
+
+    def __init__(self):
+        """Initialize a new Mox."""
+
+        self._mock_objects = []
+        self.stubs = stubout.StubOutForTesting()
+
+    def CreateMock(self, class_to_mock, attrs=None, bounded_to=None):
+        """Create a new mock object.
+
+        Args:
+            # class_to_mock: the class to be mocked
+            class_to_mock: class
+            attrs: dict of attribute names to values that will be
+                   set on the mock object. Only public attributes may be set.
+            bounded_to: optionally, when class_to_mock is not a class,
+                        it points to a real class object, to which
+                        attribute is bound
+
+        Returns:
+            MockObject that can be used as the class_to_mock would be.
+        """
+        if attrs is None:
+            attrs = {}
+        new_mock = MockObject(class_to_mock, attrs=attrs,
+                              class_to_bind=bounded_to)
+        self._mock_objects.append(new_mock)
+        return new_mock
+
+    def CreateMockAnything(self, description=None):
+        """Create a mock that will accept any method calls.
+
+        This does not enforce an interface.
+
+        Args:
+        description: str. Optionally, a descriptive name for the mock object
+        being created, for debugging output purposes.
+        """
+        new_mock = MockAnything(description=description)
+        self._mock_objects.append(new_mock)
+        return new_mock
+
+    def ReplayAll(self):
+        """Set all mock objects to replay mode."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Replay()
+
+    def VerifyAll(self):
+        """Call verify on all mock objects created."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Verify()
+
+    def ResetAll(self):
+        """Call reset on all mock objects.    This does not unset stubs."""
+
+        for mock_obj in self._mock_objects:
+            mock_obj._Reset()
+
+    def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
+        """Replace a method, attribute, etc. with a Mock.
+
+        This will replace a class or module with a MockObject, and everything
+        else (method, function, etc) with a MockAnything. This can be
+        overridden to always use a MockAnything by setting use_mock_anything
+        to True.
+
+        Args:
+            obj: A Python object (class, module, instance, callable).
+            attr_name: str. The name of the attribute to replace with a mock.
+            use_mock_anything: bool. True if a MockAnything should be used
+                               regardless of the type of attribute.
+        """
+
+        if inspect.isclass(obj):
+            class_to_bind = obj
+        else:
+            class_to_bind = None
+
+        attr_to_replace = getattr(obj, attr_name)
+        attr_type = type(attr_to_replace)
+
+        if attr_type == MockAnything or attr_type == MockObject:
+            raise TypeError('Cannot mock a MockAnything! Did you remember to '
+                            'call UnsetStubs in your previous test?')
+
+        type_check = (
+            attr_type in self._USE_MOCK_OBJECT or
+            inspect.isclass(attr_to_replace) or
+            isinstance(attr_to_replace, object))
+        if type_check and not use_mock_anything:
+            stub = self.CreateMock(attr_to_replace, bounded_to=class_to_bind)
+        else:
+            stub = self.CreateMockAnything(
+                description='Stub for %s' % attr_to_replace)
+            stub.__name__ = attr_name
+
+        self.stubs.Set(obj, attr_name, stub)
+
+    def StubOutClassWithMocks(self, obj, attr_name):
+        """Replace a class with a "mock factory" that will create mock objects.
+
+        This is useful if the code-under-test directly instantiates
+        dependencies.    Previously some boilder plate was necessary to
+        create a mock that would act as a factory.    Using
+        StubOutClassWithMocks, once you've stubbed out the class you may
+        use the stubbed class as you would any other mock created by mox:
+        during the record phase, new mock instances will be created, and
+        during replay, the recorded mocks will be returned.
+
+        In replay mode
+
+        # Example using StubOutWithMock (the old, clunky way):
+
+        mock1 = mox.CreateMock(my_import.FooClass)
+        mock2 = mox.CreateMock(my_import.FooClass)
+        foo_factory = mox.StubOutWithMock(my_import, 'FooClass',
+                                          use_mock_anything=True)
+        foo_factory(1, 2).AndReturn(mock1)
+        foo_factory(9, 10).AndReturn(mock2)
+        mox.ReplayAll()
+
+        my_import.FooClass(1, 2)     # Returns mock1 again.
+        my_import.FooClass(9, 10)    # Returns mock2 again.
+        mox.VerifyAll()
+
+        # Example using StubOutClassWithMocks:
+
+        mox.StubOutClassWithMocks(my_import, 'FooClass')
+        mock1 = my_import.FooClass(1, 2)     # Returns a new mock of FooClass
+        mock2 = my_import.FooClass(9, 10)    # Returns another mock instance
+        mox.ReplayAll()
+
+        my_import.FooClass(1, 2)     # Returns mock1 again.
+        my_import.FooClass(9, 10)    # Returns mock2 again.
+        mox.VerifyAll()
+        """
+        attr_to_replace = getattr(obj, attr_name)
+        attr_type = type(attr_to_replace)
+
+        if attr_type == MockAnything or attr_type == MockObject:
+            raise TypeError('Cannot mock a MockAnything! Did you remember to '
+                            'call UnsetStubs in your previous test?')
+
+        if not inspect.isclass(attr_to_replace):
+            raise TypeError('Given attr is not a Class. Use StubOutWithMock.')
+
+        factory = _MockObjectFactory(attr_to_replace, self)
+        self._mock_objects.append(factory)
+        self.stubs.Set(obj, attr_name, factory)
+
+    def UnsetStubs(self):
+        """Restore stubs to their original state."""
+
+        self.stubs.UnsetAll()
+
+
+def Replay(*args):
+    """Put mocks into Replay mode.
+
+    Args:
+        # args is any number of mocks to put into replay mode.
+    """
+
+    for mock in args:
+        mock._Replay()
+
+
+def Verify(*args):
+    """Verify mocks.
+
+    Args:
+        # args is any number of mocks to be verified.
+    """
+
+    for mock in args:
+        mock._Verify()
+
+
+def Reset(*args):
+    """Reset mocks.
+
+    Args:
+        # args is any number of mocks to be reset.
+    """
+
+    for mock in args:
+        mock._Reset()
+
+
+class MockAnything(object):
+    """A mock that can be used to mock anything.
+
+    This is helpful for mocking classes that do not provide a public interface.
+    """
+
+    def __init__(self, description=None):
+        """Initialize a new MockAnything.
+
+        Args:
+            description: str. Optionally, a descriptive name for the mock
+                         object being created, for debugging output purposes.
+        """
+        self._description = description
+        self._Reset()
+
+    def __repr__(self):
+        if self._description:
+            return '<MockAnything instance of %s>' % self._description
+        else:
+            return '<MockAnything instance>'
+
+    def __getattr__(self, method_name):
+        """Intercept method calls on this object.
+
+         A new MockMethod is returned that is aware of the MockAnything's
+         state (record or replay).    The call will be recorded or replayed
+         by the MockMethod's __call__.
+
+        Args:
+            # method name: the name of the method being called.
+            method_name: str
+
+        Returns:
+            A new MockMethod aware of MockAnything's state (record or replay).
+        """
+        if method_name == '__dir__':
+                return self.__class__.__dir__.__get__(self, self.__class__)
+
+        return self._CreateMockMethod(method_name)
+
+    def __str__(self):
+        return self._CreateMockMethod('__str__')()
+
+    def __call__(self, *args, **kwargs):
+        return self._CreateMockMethod('__call__')(*args, **kwargs)
+
+    def __getitem__(self, i):
+        return self._CreateMockMethod('__getitem__')(i)
+
+    def _CreateMockMethod(self, method_name, method_to_mock=None,
+                          class_to_bind=object):
+        """Create a new mock method call and return it.
+
+        Args:
+            # method_name: the name of the method being called.
+            # method_to_mock: The actual method being mocked, used for
+            #                 introspection.
+            # class_to_bind: Class to which method is bounded
+            #                (object by default)
+            method_name: str
+            method_to_mock: a method object
+
+        Returns:
+            A new MockMethod aware of MockAnything's state (record or replay).
+        """
+
+        return MockMethod(method_name, self._expected_calls_queue,
+                          self._replay_mode, method_to_mock=method_to_mock,
+                          description=self._description,
+                          class_to_bind=class_to_bind)
+
+    def __nonzero__(self):
+        """Return 1 for nonzero so the mock can be used as a conditional."""
+
+        return 1
+
+    def __bool__(self):
+        """Return True for nonzero so the mock can be used as a conditional."""
+        return True
+
+    def __eq__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return (isinstance(rhs, MockAnything) and
+                self._replay_mode == rhs._replay_mode and
+                self._expected_calls_queue == rhs._expected_calls_queue)
+
+    def __ne__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return not self == rhs
+
+    def _Replay(self):
+        """Start replaying expected method calls."""
+
+        self._replay_mode = True
+
+    def _Verify(self):
+        """Verify that all of the expected calls have been made.
+
+        Raises:
+            ExpectedMethodCallsError: if there are still more method calls in
+                                      the expected queue.
+        """
+
+        # If the list of expected calls is not empty, raise an exception
+        if self._expected_calls_queue:
+            # The last MultipleTimesGroup is not popped from the queue.
+            if (len(self._expected_calls_queue) == 1 and
+                    isinstance(self._expected_calls_queue[0],
+                               MultipleTimesGroup) and
+                    self._expected_calls_queue[0].IsSatisfied()):
+                pass
+            else:
+                raise ExpectedMethodCallsError(self._expected_calls_queue)
+
+    def _Reset(self):
+        """Reset the state of this mock to record mode with an empty queue."""
+
+        # Maintain a list of method calls we are expecting
+        self._expected_calls_queue = collections.deque()
+
+        # Make sure we are in setup mode, not replay mode
+        self._replay_mode = False
+
+
+class MockObject(MockAnything):
+    """Mock object that simulates the public/protected interface of a class."""
+
+    def __init__(self, class_to_mock, attrs=None, class_to_bind=None):
+        """Initialize a mock object.
+
+        Determines the methods and properties of the class and stores them.
+
+        Args:
+            # class_to_mock: class to be mocked
+            class_to_mock: class
+            attrs: dict of attribute names to values that will be set on the
+                   mock object. Only public attributes may be set.
+            class_to_bind: optionally, when class_to_mock is not a class at
+                           all, it points to a real class
+
+        Raises:
+            PrivateAttributeError: if a supplied attribute is not public.
+            ValueError: if an attribute would mask an existing method.
+        """
+        if attrs is None:
+            attrs = {}
+
+        # Used to hack around the mixin/inheritance of MockAnything, which
+        # is not a proper object (it can be anything. :-)
+        MockAnything.__dict__['__init__'](self)
+
+        # Get a list of all the public and special methods we should mock.
+        self._known_methods = set()
+        self._known_vars = set()
+        self._class_to_mock = class_to_mock
+
+        if inspect.isclass(class_to_mock):
+            self._class_to_bind = self._class_to_mock
+        else:
+            self._class_to_bind = class_to_bind
+
+        try:
+            if inspect.isclass(self._class_to_mock):
+                self._description = class_to_mock.__name__
+            else:
+                self._description = type(class_to_mock).__name__
+        except Exception:
+            pass
+
+        for method in dir(class_to_mock):
+            attr = getattr(class_to_mock, method)
+            if callable(attr):
+                self._known_methods.add(method)
+            elif not (type(attr) is property):
+                # treating properties as class vars makes little sense.
+                self._known_vars.add(method)
+
+        # Set additional attributes at instantiation time; this is quicker
+        # than manually setting attributes that are normally created in
+        # __init__.
+        for attr, value in attrs.items():
+            if attr.startswith("_"):
+                raise PrivateAttributeError(attr)
+            elif attr in self._known_methods:
+                raise ValueError("'%s' is a method of '%s' objects." % (attr,
+                                 class_to_mock))
+            else:
+                setattr(self, attr, value)
+
+    def _CreateMockMethod(self, *args, **kwargs):
+        """Overridden to provide self._class_to_mock to class_to_bind."""
+        kwargs.setdefault("class_to_bind", self._class_to_bind)
+        return super(MockObject, self)._CreateMockMethod(*args, **kwargs)
+
+    def __getattr__(self, name):
+        """Intercept attribute request on this object.
+
+        If the attribute is a public class variable, it will be returned and
+        not recorded as a call.
+
+        If the attribute is not a variable, it is handled like a method
+        call. The method name is checked against the set of mockable
+        methods, and a new MockMethod is returned that is aware of the
+        MockObject's state (record or replay).    The call will be recorded
+        or replayed by the MockMethod's __call__.
+
+        Args:
+            # name: the name of the attribute being requested.
+            name: str
+
+        Returns:
+            Either a class variable or a new MockMethod that is aware of the
+            state of the mock (record or replay).
+
+        Raises:
+            UnknownMethodCallError if the MockObject does not mock the
+            requested method.
+        """
+
+        if name in self._known_vars:
+            return getattr(self._class_to_mock, name)
+
+        if name in self._known_methods:
+            return self._CreateMockMethod(
+                name,
+                method_to_mock=getattr(self._class_to_mock, name))
+
+        raise UnknownMethodCallError(name)
+
+    def __eq__(self, rhs):
+        """Provide custom logic to compare objects."""
+
+        return (isinstance(rhs, MockObject) and
+                self._class_to_mock == rhs._class_to_mock and
+                self._replay_mode == rhs._replay_mode and
+                self._expected_calls_queue == rhs._expected_calls_queue)
+
+    def __setitem__(self, key, value):
+        """Custom logic for mocking classes that support item assignment.
+
+        Args:
+            key: Key to set the value for.
+            value: Value to set.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __setitem__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class does not support item assignment.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __setitem__.
+
+        """
+        # Verify the class supports item assignment.
+        if '__setitem__' not in dir(self._class_to_mock):
+            raise TypeError('object does not support item assignment')
+
+        # If we are in replay mode then simply call the mock __setitem__ method
+        if self._replay_mode:
+            return MockMethod('__setitem__', self._expected_calls_queue,
+                              self._replay_mode)(key, value)
+
+        # Otherwise, create a mock method __setitem__.
+        return self._CreateMockMethod('__setitem__')(key, value)
+
+    def __getitem__(self, key):
+        """Provide custom logic for mocking classes that are subscriptable.
+
+        Args:
+            key: Key to return the value for.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __getitem__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class is not subscriptable.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __getitem__.
+
+        """
+        # Verify the class supports item assignment.
+        if '__getitem__' not in dir(self._class_to_mock):
+            raise TypeError('unsubscriptable object')
+
+        # If we are in replay mode then simply call the mock __getitem__ method
+        if self._replay_mode:
+            return MockMethod('__getitem__', self._expected_calls_queue,
+                              self._replay_mode)(key)
+
+        # Otherwise, create a mock method __getitem__.
+        return self._CreateMockMethod('__getitem__')(key)
+
+    def __iter__(self):
+        """Provide custom logic for mocking classes that are iterable.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __iter__ method that has already been called if not in replay mode.
+
+        Raises:
+            TypeError if the underlying class is not iterable.
+            UnexpectedMethodCallError if the object does not expect the call to
+                __iter__.
+
+        """
+        methods = dir(self._class_to_mock)
+
+        # Verify the class supports iteration.
+        if '__iter__' not in methods:
+            # If it doesn't have iter method and we are in replay method,
+            # then try to iterate using subscripts.
+            if '__getitem__' not in methods or not self._replay_mode:
+                raise TypeError('not iterable object')
+            else:
+                results = []
+                index = 0
+                try:
+                    while True:
+                        results.append(self[index])
+                        index += 1
+                except IndexError:
+                    return iter(results)
+
+        # If we are in replay mode then simply call the mock __iter__ method.
+        if self._replay_mode:
+            return MockMethod('__iter__', self._expected_calls_queue,
+                              self._replay_mode)()
+
+        # Otherwise, create a mock method __iter__.
+        return self._CreateMockMethod('__iter__')()
+
+    def __contains__(self, key):
+        """Provide custom logic for mocking classes that contain items.
+
+        Args:
+            key: Key to look in container for.
+
+        Returns:
+            Expected return value in replay mode. A MockMethod object for the
+            __contains__ method that has already been called if not in replay
+            mode.
+
+        Raises:
+            TypeError if the underlying class does not implement __contains__
+            UnexpectedMethodCaller if the object does not expect the call to
+            __contains__.
+
+        """
+        contains = self._class_to_mock.__dict__.get('__contains__', None)
+
+        if contains is None:
+            raise TypeError('unsubscriptable object')
+
+        if self._replay_mode:
+            return MockMethod('__contains__', self._expected_calls_queue,
+                              self._replay_mode)(key)
+
+        return self._CreateMockMethod('__contains__')(key)
+
+    def __call__(self, *params, **named_params):
+        """Provide custom logic for mocking classes that are callable."""
+
+        # Verify the class we are mocking is callable.
+        is_callable = hasattr(self._class_to_mock, '__call__')
+        if not is_callable:
+            raise TypeError('Not callable')
+
+        # Because the call is happening directly on this object instead of
+        # a method, the call on the mock method is made right here
+
+        # If we are mocking a Function, then use the function, and not the
+        # __call__ method
+        method = None
+        if type(self._class_to_mock) in (types.FunctionType, types.MethodType):
+            method = self._class_to_mock
+        else:
+            method = getattr(self._class_to_mock, '__call__')
+        mock_method = self._CreateMockMethod('__call__', method_to_mock=method)
+
+        return mock_method(*params, **named_params)
+
+    @property
+    def __name__(self):
+        """Return the name that is being mocked."""
+        return self._description
+
+    # TODO(dejw): this property stopped to work after I introduced changes with
+    #     binding classes. Fortunately I found a solution in the form of
+    #     __getattribute__ method below, but this issue should be investigated
+    @property
+    def __class__(self):
+        return self._class_to_mock
+
+    def __dir__(self):
+        """Return only attributes of a class to mock."""
+        return dir(self._class_to_mock)
+
+    def __getattribute__(self, name):
+        """Return _class_to_mock on __class__ attribute."""
+        if name == "__class__":
+            return super(MockObject, self).__getattribute__("_class_to_mock")
+
+        return super(MockObject, self).__getattribute__(name)
+
+
+class _MockObjectFactory(MockObject):
+    """A MockObjectFactory creates mocks and verifies __init__ params.
+
+    A MockObjectFactory removes the boiler plate code that was previously
+    necessary to stub out direction instantiation of a class.
+
+    The MockObjectFactory creates new MockObjects when called and verifies the
+    __init__ params are correct when in record mode.    When replaying,
+    existing mocks are returned, and the __init__ params are verified.
+
+    See StubOutWithMock vs StubOutClassWithMocks for more detail.
+    """
+
+    def __init__(self, class_to_mock, mox_instance):
+        MockObject.__init__(self, class_to_mock)
+        self._mox = mox_instance
+        self._instance_queue = collections.deque()
+
+    def __call__(self, *params, **named_params):
+        """Instantiate and record that a new mock has been created."""
+
+        method = getattr(self._class_to_mock, '__init__')
+        mock_method = self._CreateMockMethod('__init__', method_to_mock=method)
+        # Note: calling mock_method() is deferred in order to catch the
+        # empty instance_queue first.
+
+        if self._replay_mode:
+            if not self._instance_queue:
+                raise UnexpectedMockCreationError(self._class_to_mock, *params,
+                                                  **named_params)
+
+            mock_method(*params, **named_params)
+
+            return self._instance_queue.pop()
+        else:
+            mock_method(*params, **named_params)
+
+            instance = self._mox.CreateMock(self._class_to_mock)
+            self._instance_queue.appendleft(instance)
+            return instance
+
+    def _Verify(self):
+        """Verify that all mocks have been created."""
+        if self._instance_queue:
+            raise ExpectedMockCreationError(self._instance_queue)
+        super(_MockObjectFactory, self)._Verify()
+
+
+class MethodSignatureChecker(object):
+    """Ensures that methods are called correctly."""
+
+    _NEEDED, _DEFAULT, _GIVEN = range(3)
+
+    def __init__(self, method, class_to_bind=None):
+        """Creates a checker.
+
+        Args:
+            # method: A method to check.
+            # class_to_bind: optionally, a class used to type check first
+            #                method parameter, only used with unbound methods
+            method: function
+            class_to_bind: type or None
+
+        Raises:
+            ValueError: method could not be inspected, so checks aren't
+                        possible. Some methods and functions like built-ins
+                        can't be inspected.
+        """
+        try:
+            self._args, varargs, varkw, defaults = inspect.getargspec(method)
+        except TypeError:
+            raise ValueError('Could not get argument specification for %r'
+                             % (method,))
+        if inspect.ismethod(method) or class_to_bind:
+            self._args = self._args[1:]    # Skip 'self'.
+        self._method = method
+        self._instance = None    # May contain the instance this is bound to.
+        self._instance = getattr(method, "__self__", None)
+
+        # _bounded_to determines whether the method is bound or not
+        if self._instance:
+            self._bounded_to = self._instance.__class__
+        else:
+            self._bounded_to = class_to_bind or getattr(method, "im_class",
+                                                        None)
+
+        self._has_varargs = varargs is not None
+        self._has_varkw = varkw is not None
+        if defaults is None:
+            self._required_args = self._args
+            self._default_args = []
+        else:
+            self._required_args = self._args[:-len(defaults)]
+            self._default_args = self._args[-len(defaults):]
+
+    def _RecordArgumentGiven(self, arg_name, arg_status):
+        """Mark an argument as being given.
+
+        Args:
+            # arg_name: The name of the argument to mark in arg_status.
+            # arg_status: Maps argument names to one of
+            #             _NEEDED, _DEFAULT, _GIVEN.
+            arg_name: string
+            arg_status: dict
+
+        Raises:
+            AttributeError: arg_name is already marked as _GIVEN.
+        """
+        if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN:
+            raise AttributeError('%s provided more than once' % (arg_name,))
+        arg_status[arg_name] = MethodSignatureChecker._GIVEN
+
+    def Check(self, params, named_params):
+        """Ensures that the parameters used while recording a call are valid.
+
+        Args:
+            # params: A list of positional parameters.
+            # named_params: A dict of named parameters.
+            params: list
+            named_params: dict
+
+        Raises:
+            AttributeError: the given parameters don't work with the given
+                            method.
+        """
+        arg_status = dict((a, MethodSignatureChecker._NEEDED)
+                          for a in self._required_args)
+        for arg in self._default_args:
+            arg_status[arg] = MethodSignatureChecker._DEFAULT
+
+        # WARNING: Suspect hack ahead.
+        #
+        # Check to see if this is an unbound method, where the instance
+        # should be bound as the first argument.    We try to determine if
+        # the first argument (param[0]) is an instance of the class, or it
+        # is equivalent to the class (used to account for Comparators).
+        #
+        # NOTE: If a Func() comparator is used, and the signature is not
+        # correct, this will cause extra executions of the function.
+        if inspect.ismethod(self._method) or self._bounded_to:
+            # The extra param accounts for the bound instance.
+            if len(params) > len(self._required_args):
+                expected = self._bounded_to
+
+                # Check if the param is an instance of the expected class,
+                # or check equality (useful for checking Comparators).
+
+                # This is a hack to work around the fact that the first
+                # parameter can be a Comparator, and the comparison may raise
+                # an exception during this comparison, which is OK.
+                try:
+                    param_equality = (params[0] == expected)
+                except Exception:
+                    param_equality = False
+
+                if isinstance(params[0], expected) or param_equality:
+                    params = params[1:]
+                # If the IsA() comparator is being used, we need to check the
+                # inverse of the usual case - that the given instance is a
+                # subclass of the expected class. For example, the code under
+                # test does late binding to a subclass.
+                elif (isinstance(params[0], IsA) and
+                      params[0]._IsSubClass(expected)):
+                    params = params[1:]
+
+        # Check that each positional param is valid.
+        for i in range(len(params)):
+            try:
+                arg_name = self._args[i]
+            except IndexError:
+                if not self._has_varargs:
+                    raise AttributeError(
+                        '%s does not take %d or more positional '
+                        'arguments' % (self._method.__name__, i))
+            else:
+                self._RecordArgumentGiven(arg_name, arg_status)
+
+        # Check each keyword argument.
+        for arg_name in named_params:
+            if arg_name not in arg_status and not self._has_varkw:
+                raise AttributeError('%s is not expecting keyword argument %s'
+                                     % (self._method.__name__, arg_name))
+            self._RecordArgumentGiven(arg_name, arg_status)
+
+        # Ensure all the required arguments have been given.
+        still_needed = [k for k, v in arg_status.items()
+                        if v == MethodSignatureChecker._NEEDED]
+        if still_needed:
+            raise AttributeError('No values given for arguments: %s'
+                                 % (' '.join(sorted(still_needed))))
+
+
+class MockMethod(object):
+    """Callable mock method.
+
+    A MockMethod should act exactly like the method it mocks, accepting
+    parameters and returning a value, or throwing an exception (as specified).
+    When this method is called, it can optionally verify whether the called
+    method (name and signature) matches the expected method.
+    """
+
+    def __init__(self, method_name, call_queue, replay_mode,
+                 method_to_mock=None, description=None, class_to_bind=None):
+        """Construct a new mock method.
+
+        Args:
+            # method_name: the name of the method
+            # call_queue: deque of calls, verify this call against the head,
+            #             or add this call to the queue.
+            # replay_mode: False if we are recording, True if we are verifying
+            #              calls against the call queue.
+            # method_to_mock: The actual method being mocked, used for
+            #                 introspection.
+            # description: optionally, a descriptive name for this method.
+            #              Typically this is equal to the descriptive name of
+            #              the method's class.
+            # class_to_bind: optionally, a class that is used for unbound
+            #                methods (or functions in Python3) to which method
+            #                is bound, in order not to loose binding
+            #                information. If given, it will be used for
+            #                checking the type of first method parameter
+            method_name: str
+            call_queue: list or deque
+            replay_mode: bool
+            method_to_mock: a method object
+            description: str or None
+            class_to_bind: type or None
+        """
+
+        self._name = method_name
+        self.__name__ = method_name
+        self._call_queue = call_queue
+        if not isinstance(call_queue, collections.deque):
+            self._call_queue = collections.deque(self._call_queue)
+        self._replay_mode = replay_mode
+        self._description = description
+
+        self._params = None
+        self._named_params = None
+        self._return_value = None
+        self._exception = None
+        self._side_effects = None
+
+        try:
+            self._checker = MethodSignatureChecker(method_to_mock,
+                                                   class_to_bind=class_to_bind)
+        except ValueError:
+            self._checker = None
+
+    def __call__(self, *params, **named_params):
+        """Log parameters and return the specified return value.
+
+        If the Mock(Anything/Object) associated with this call is in record
+        mode, this MockMethod will be pushed onto the expected call queue.
+        If the mock is in replay mode, this will pop a MockMethod off the
+        top of the queue and verify this call is equal to the expected call.
+
+        Raises:
+            UnexpectedMethodCall if this call is supposed to match an expected
+                method call and it does not.
+        """
+
+        self._params = params
+        self._named_params = named_params
+
+        if not self._replay_mode:
+            if self._checker is not None:
+                self._checker.Check(params, named_params)
+            self._call_queue.append(self)
+            return self
+
+        expected_method = self._VerifyMethodCall()
+
+        if expected_method._side_effects:
+            result = expected_method._side_effects(*params, **named_params)
+            if expected_method._return_value is None:
+                expected_method._return_value = result
+
+        if expected_method._exception:
+            raise expected_method._exception
+
+        return expected_method._return_value
+
+    def __getattr__(self, name):
+        """Raise an AttributeError with a helpful message."""
+
+        raise AttributeError(
+            'MockMethod has no attribute "%s". '
+            'Did you remember to put your mocks in replay mode?' % name)
+
+    def __iter__(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def next(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def __next__(self):
+        """Raise a TypeError with a helpful message."""
+        raise TypeError(
+            'MockMethod cannot be iterated. '
+            'Did you remember to put your mocks in replay mode?')
+
+    def _PopNextMethod(self):
+        """Pop the next method from our call queue."""
+        try:
+            return self._call_queue.popleft()
+        except IndexError:
+            raise UnexpectedMethodCallError(self, None)
+
+    def _VerifyMethodCall(self):
+        """Verify the called method is expected.
+
+        This can be an ordered method, or part of an unordered set.
+
+        Returns:
+            The expected mock method.
+
+        Raises:
+            UnexpectedMethodCall if the method called was not expected.
+        """
+
+        expected = self._PopNextMethod()
+
+        # Loop here, because we might have a MethodGroup followed by another
+        # group.
+        while isinstance(expected, MethodGroup):
+            expected, method = expected.MethodCalled(self)
+            if method is not None:
+                return method
+
+        # This is a mock method, so just check equality.
+        if expected != self:
+            raise UnexpectedMethodCallError(self, expected)
+
+        return expected
+
+    def __str__(self):
+        params = ', '.join(
+            [repr(p) for p in self._params or []] +
+            ['%s=%r' % x for x in sorted((self._named_params or {}).items())])
+        full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
+        if self._description:
+            full_desc = "%s.%s" % (self._description, full_desc)
+        return full_desc
+
+    def __hash__(self):
+        return id(self)
+
+    def __eq__(self, rhs):
+        """Test whether this MockMethod is equivalent to another MockMethod.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: MockMethod
+        """
+
+        return (isinstance(rhs, MockMethod) and
+                self._name == rhs._name and
+                self._params == rhs._params and
+                self._named_params == rhs._named_params)
+
+    def __ne__(self, rhs):
+        """Test if this MockMethod is not equivalent to another MockMethod.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: MockMethod
+        """
+
+        return not self == rhs
+
+    def GetPossibleGroup(self):
+        """Returns a possible group from the end of the call queue.
+
+        Return None if no other methods are on the stack.
+        """
+
+        # Remove this method from the tail of the queue so we can add it
+        # to a group.
+        this_method = self._call_queue.pop()
+        assert this_method == self
+
+        # Determine if the tail of the queue is a group, or just a regular
+        # ordered mock method.
+        group = None
+        try:
+            group = self._call_queue[-1]
+        except IndexError:
+            pass
+
+        return group
+
+    def _CheckAndCreateNewGroup(self, group_name, group_class):
+        """Checks if the last method (a possible group) is an instance of our
+        group_class. Adds the current method to this group or creates a
+        new one.
+
+        Args:
+
+            group_name: the name of the group.
+            group_class: the class used to create instance of this new group
+        """
+        group = self.GetPossibleGroup()
+
+        # If this is a group, and it is the correct group, add the method.
+        if isinstance(group, group_class) and group.group_name() == group_name:
+            group.AddMethod(self)
+            return self
+
+        # Create a new group and add the method.
+        new_group = group_class(group_name)
+        new_group.AddMethod(self)
+        self._call_queue.append(new_group)
+        return self
+
+    def InAnyOrder(self, group_name="default"):
+        """Move this method into a group of unordered calls.
+
+        A group of unordered calls must be defined together, and must be
+        executed in full before the next expected method can be called.
+        There can be multiple groups that are expected serially, if they are
+        given different group names. The same group name can be reused if there
+        is a standard method call, or a group with a different name, spliced
+        between usages.
+
+        Args:
+            group_name: the name of the unordered group.
+
+        Returns:
+            self
+        """
+        return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
+
+    def MultipleTimes(self, group_name="default"):
+        """Move method into group of calls which may be called multiple times.
+
+        A group of repeating calls must be defined together, and must be
+        executed in full before the next expected method can be called.
+
+        Args:
+            group_name: the name of the unordered group.
+
+        Returns:
+            self
+        """
+        return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
+
+    def AndReturn(self, return_value):
+        """Set the value to return when this method is called.
+
+        Args:
+            # return_value can be anything.
+        """
+
+        self._return_value = return_value
+        return return_value
+
+    def AndRaise(self, exception):
+        """Set the exception to raise when this method is called.
+
+        Args:
+            # exception: the exception to raise when this method is called.
+            exception: Exception
+        """
+
+        self._exception = exception
+
+    def WithSideEffects(self, side_effects):
+        """Set the side effects that are simulated when this method is called.
+
+        Args:
+            side_effects: A callable which modifies the parameters or other
+                          relevant state which a given test case depends on.
+
+        Returns:
+            Self for chaining with AndReturn and AndRaise.
+        """
+        self._side_effects = side_effects
+        return self
+
+
+class Comparator:
+    """Base class for all Mox comparators.
+
+    A Comparator can be used as a parameter to a mocked method when the exact
+    value is not known.    For example, the code you are testing might build up
+    a long SQL string that is passed to your mock DAO. You're only interested
+    that the IN clause contains the proper primary keys, so you can set your
+    mock up as follows:
+
+    mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
+
+    Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
+
+    A Comparator may replace one or more parameters, for example:
+    # return at most 10 rows
+    mock_dao.RunQuery(StrContains('SELECT'), 10)
+
+    or
+
+    # Return some non-deterministic number of rows
+    mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
+    """
+
+    def equals(self, rhs):
+        """Special equals method that all comparators must implement.
+
+        Args:
+            rhs: any python object
+        """
+
+        raise NotImplementedError('method must be implemented by a subclass.')
+
+    def __eq__(self, rhs):
+        return self.equals(rhs)
+
+    def __ne__(self, rhs):
+        return not self.equals(rhs)
+
+
+class Is(Comparator):
+    """Comparison class used to check identity, instead of equality."""
+
+    def __init__(self, obj):
+        self._obj = obj
+
+    def equals(self, rhs):
+        return rhs is self._obj
+
+    def __repr__(self):
+        return "<is %r (%s)>" % (self._obj, id(self._obj))
+
+
+class IsA(Comparator):
+    """This class wraps a basic Python type or class.    It is used to verify
+    that a parameter is of the given type or class.
+
+    Example:
+    mock_dao.Connect(IsA(DbConnectInfo))
+    """
+
+    def __init__(self, class_name):
+        """Initialize IsA
+
+        Args:
+            class_name: basic python type or a class
+        """
+
+        self._class_name = class_name
+
+    def equals(self, rhs):
+        """Check to see if the RHS is an instance of class_name.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return isinstance(rhs, self._class_name)
+        except TypeError:
+            # Check raw types if there was a type error.    This is helpful for
+            # things like cStringIO.StringIO.
+            return type(rhs) == type(self._class_name)
+
+    def _IsSubClass(self, clazz):
+        """Check to see if the IsA comparators class is a subclass of clazz.
+
+        Args:
+            # clazz: a class object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return issubclass(self._class_name, clazz)
+        except TypeError:
+            # Check raw types if there was a type error.    This is helpful for
+            # things like cStringIO.StringIO.
+            return type(clazz) == type(self._class_name)
+
+    def __repr__(self):
+        return 'mox.IsA(%s) ' % str(self._class_name)
+
+
+class IsAlmost(Comparator):
+    """Comparison class used to check whether a parameter is nearly equal
+    to a given value.    Generally useful for floating point numbers.
+
+    Example mock_dao.SetTimeout((IsAlmost(3.9)))
+    """
+
+    def __init__(self, float_value, places=7):
+        """Initialize IsAlmost.
+
+        Args:
+            float_value: The value for making the comparison.
+            places: The number of decimal places to round to.
+        """
+
+        self._float_value = float_value
+        self._places = places
+
+    def equals(self, rhs):
+        """Check to see if RHS is almost equal to float_value
+
+        Args:
+            rhs: the value to compare to float_value
+
+        Returns:
+            bool
+        """
+
+        try:
+            return round(rhs - self._float_value, self._places) == 0
+        except Exception:
+            # Probably because either float_value or rhs is not a number.
+            return False
+
+    def __repr__(self):
+        return str(self._float_value)
+
+
+class StrContains(Comparator):
+    """Comparison class used to check whether a substring exists in a
+    string parameter.    This can be useful in mocking a database with SQL
+    passed in as a string parameter, for example.
+
+    Example:
+    mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
+    """
+
+    def __init__(self, search_string):
+        """Initialize.
+
+        Args:
+            # search_string: the string you are searching for
+            search_string: str
+        """
+
+        self._search_string = search_string
+
+    def equals(self, rhs):
+        """Check to see if the search_string is contained in the rhs string.
+
+        Args:
+            # rhs: the right hand side of the test
+            rhs: object
+
+        Returns:
+            bool
+        """
+
+        try:
+            return rhs.find(self._search_string) > -1
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<str containing \'%s\'>' % self._search_string
+
+
+class Regex(Comparator):
+    """Checks if a string matches a regular expression.
+
+    This uses a given regular expression to determine equality.
+    """
+
+    def __init__(self, pattern, flags=0):
+        """Initialize.
+
+        Args:
+            # pattern is the regular expression to search for
+            pattern: str
+            # flags passed to re.compile function as the second argument
+            flags: int
+        """
+        self.flags = flags
+        self.regex = re.compile(pattern, flags=flags)
+
+    def equals(self, rhs):
+        """Check to see if rhs matches regular expression pattern.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return self.regex.search(rhs) is not None
+        except Exception:
+            return False
+
+    def __repr__(self):
+        s = '<regular expression \'%s\'' % self.regex.pattern
+        if self.flags:
+            s += ', flags=%d' % self.flags
+        s += '>'
+        return s
+
+
+class In(Comparator):
+    """Checks whether an item (or key) is in a list (or dict) parameter.
+
+    Example:
+    mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
+    """
+
+    def __init__(self, key):
+        """Initialize.
+
+        Args:
+            # key is any thing that could be in a list or a key in a dict
+        """
+
+        self._key = key
+
+    def equals(self, rhs):
+        """Check to see whether key is in rhs.
+
+        Args:
+            rhs: dict
+
+        Returns:
+            bool
+        """
+
+        try:
+            return self._key in rhs
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<sequence or map containing \'%s\'>' % str(self._key)
+
+
+class Not(Comparator):
+    """Checks whether a predicates is False.
+
+    Example:
+        mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm',
+                                                  stevepm_user_info)))
+    """
+
+    def __init__(self, predicate):
+        """Initialize.
+
+        Args:
+            # predicate: a Comparator instance.
+        """
+
+        assert isinstance(predicate, Comparator), ("predicate %r must be a"
+                                                   " Comparator." % predicate)
+        self._predicate = predicate
+
+    def equals(self, rhs):
+        """Check to see whether the predicate is False.
+
+        Args:
+            rhs: A value that will be given in argument of the predicate.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return not self._predicate.equals(rhs)
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<not \'%s\'>' % self._predicate
+
+
+class ContainsKeyValue(Comparator):
+    """Checks whether a key/value pair is in a dict parameter.
+
+    Example:
+    mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
+    """
+
+    def __init__(self, key, value):
+        """Initialize.
+
+        Args:
+            # key: a key in a dict
+            # value: the corresponding value
+        """
+
+        self._key = key
+        self._value = value
+
+    def equals(self, rhs):
+        """Check whether the given key/value pair is in the rhs dict.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return rhs[self._key] == self._value
+        except Exception:
+            return False
+
+    def __repr__(self):
+        return '<map containing the entry \'%s: %s\'>' % (str(self._key),
+                                                          str(self._value))
+
+
+class ContainsAttributeValue(Comparator):
+    """Checks whether passed parameter contains attributes with a given value.
+
+    Example:
+    mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info))
+    """
+
+    def __init__(self, key, value):
+        """Initialize.
+
+        Args:
+            # key: an attribute name of an object
+            # value: the corresponding value
+        """
+
+        self._key = key
+        self._value = value
+
+    def equals(self, rhs):
+        """Check if the given attribute has a matching value in the rhs object.
+
+        Returns:
+            bool
+        """
+
+        try:
+            return getattr(rhs, self._key) == self._value
+        except Exception:
+            return False
+
+
+class SameElementsAs(Comparator):
+    """Checks whether sequences contain the same elements (ignoring order).
+
+    Example:
+    mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
+    """
+
+    def __init__(self, expected_seq):
+        """Initialize.
+
+        Args:
+            expected_seq: a sequence
+        """
+        # Store in case expected_seq is an iterator.
+        self._expected_list = list(expected_seq)
+
+    def equals(self, actual_seq):
+        """Check to see whether actual_seq has same elements as expected_seq.
+
+        Args:
+            actual_seq: sequence
+
+        Returns:
+            bool
+        """
+        try:
+            # Store in case actual_seq is an iterator. We potentially iterate
+            # twice: once to make the dict, once in the list fallback.
+            actual_list = list(actual_seq)
+        except TypeError:
+            # actual_seq cannot be read as a sequence.
+            #
+            # This happens because Mox uses __eq__ both to check object
+            # equality (in MethodSignatureChecker) and to invoke Comparators.
+            return False
+
+        try:
+            return set(self._expected_list) == set(actual_list)
+        except TypeError:
+            # Fall back to slower list-compare if any of the objects
+            # are unhashable.
+            if len(self._expected_list) != len(actual_list):
+                return False
+            for el in actual_list:
+                if el not in self._expected_list:
+                    return False
+        return True
+
+    def __repr__(self):
+        return '<sequence with same elements as \'%s\'>' % self._expected_list
+
+
+class And(Comparator):
+    """Evaluates one or more Comparators on RHS, returns an AND of the results.
+    """
+
+    def __init__(self, *args):
+        """Initialize.
+
+        Args:
+            *args: One or more Comparator
+        """
+
+        self._comparators = args
+
+    def equals(self, rhs):
+        """Checks whether all Comparators are equal to rhs.
+
+        Args:
+            # rhs: can be anything
+
+        Returns:
+            bool
+        """
+
+        for comparator in self._comparators:
+            if not comparator.equals(rhs):
+                return False
+
+        return True
+
+    def __repr__(self):
+        return '<AND %s>' % str(self._comparators)
+
+
+class Or(Comparator):
+    """Evaluates one or more Comparators on RHS; returns OR of the results."""
+
+    def __init__(self, *args):
+        """Initialize.
+
+        Args:
+            *args: One or more Mox comparators
+        """
+
+        self._comparators = args
+
+    def equals(self, rhs):
+        """Checks whether any Comparator is equal to rhs.
+
+        Args:
+            # rhs: can be anything
+
+        Returns:
+            bool
+        """
+
+        for comparator in self._comparators:
+            if comparator.equals(rhs):
+                return True
+
+        return False
+
+    def __repr__(self):
+        return '<OR %s>' % str(self._comparators)
+
+
+class Func(Comparator):
+    """Call a function that should verify the parameter passed in is correct.
+
+    You may need the ability to perform more advanced operations on the
+    parameter in order to validate it. You can use this to have a callable
+    validate any parameter. The callable should return either True or False.
+
+
+    Example:
+
+    def myParamValidator(param):
+        # Advanced logic here
+        return True
+
+    mock_dao.DoSomething(Func(myParamValidator), true)
+    """
+
+    def __init__(self, func):
+        """Initialize.
+
+        Args:
+            func: callable that takes one parameter and returns a bool
+        """
+
+        self._func = func
+
+    def equals(self, rhs):
+        """Test whether rhs passes the function test.
+
+        rhs is passed into func.
+
+        Args:
+            rhs: any python object
+
+        Returns:
+            the result of func(rhs)
+        """
+
+        return self._func(rhs)
+
+    def __repr__(self):
+        return str(self._func)
+
+
+class IgnoreArg(Comparator):
+    """Ignore an argument.
+
+    This can be used when we don't care about an argument of a method call.
+
+    Example:
+    # Check if CastMagic is called with 3 as first arg and
+    # 'disappear' as third.
+    mymock.CastMagic(3, IgnoreArg(), 'disappear')
+    """
+
+    def equals(self, unused_rhs):
+        """Ignores arguments and returns True.
+
+        Args:
+            unused_rhs: any python object
+
+        Returns:
+            always returns True
+        """
+
+        return True
+
+    def __repr__(self):
+        return '<IgnoreArg>'
+
+
+class Value(Comparator):
+    """Compares argument against a remembered value.
+
+    To be used in conjunction with Remember comparator.    See Remember()
+    for example.
+    """
+
+    def __init__(self):
+        self._value = None
+        self._has_value = False
+
+    def store_value(self, rhs):
+        self._value = rhs
+        self._has_value = True
+
+    def equals(self, rhs):
+        if not self._has_value:
+            return False
+        else:
+            return rhs == self._value
+
+    def __repr__(self):
+        if self._has_value:
+            return "<Value %r>" % self._value
+        else:
+            return "<Value>"
+
+
+class Remember(Comparator):
+    """Remembers the argument to a value store.
+
+    To be used in conjunction with Value comparator.
+
+    Example:
+    # Remember the argument for one method call.
+    users_list = Value()
+    mock_dao.ProcessUsers(Remember(users_list))
+
+    # Check argument against remembered value.
+    mock_dao.ReportUsers(users_list)
+    """
+
+    def __init__(self, value_store):
+        if not isinstance(value_store, Value):
+            raise TypeError(
+                "value_store is not an instance of the Value class")
+        self._value_store = value_store
+
+    def equals(self, rhs):
+        self._value_store.store_value(rhs)
+        return True
+
+    def __repr__(self):
+        return "<Remember %d>" % id(self._value_store)
+
+
+class MethodGroup(object):
+    """Base class containing common behaviour for MethodGroups."""
+
+    def __init__(self, group_name):
+        self._group_name = group_name
+
+    def group_name(self):
+        return self._group_name
+
+    def __str__(self):
+        return '<%s "%s">' % (self.__class__.__name__, self._group_name)
+
+    def AddMethod(self, mock_method):
+        raise NotImplementedError
+
+    def MethodCalled(self, mock_method):
+        raise NotImplementedError
+
+    def IsSatisfied(self):
+        raise NotImplementedError
+
+
+class UnorderedGroup(MethodGroup):
+    """UnorderedGroup holds a set of method calls that may occur in any order.
+
+    This construct is helpful for non-deterministic events, such as iterating
+    over the keys of a dict.
+    """
+
+    def __init__(self, group_name):
+        super(UnorderedGroup, self).__init__(group_name)
+        self._methods = []
+
+    def __str__(self):
+        return '%s "%s" pending calls:\n%s' % (
+            self.__class__.__name__,
+            self._group_name,
+            "\n".join(str(method) for method in self._methods))
+
+    def AddMethod(self, mock_method):
+        """Add a method to this group.
+
+        Args:
+            mock_method: A mock method to be added to this group.
+        """
+
+        self._methods.append(mock_method)
+
+    def MethodCalled(self, mock_method):
+        """Remove a method call from the group.
+
+        If the method is not in the set, an UnexpectedMethodCallError will be
+        raised.
+
+        Args:
+            mock_method: a mock method that should be equal to a method in the
+                         group.
+
+        Returns:
+            The mock method from the group
+
+        Raises:
+            UnexpectedMethodCallError if the mock_method was not in the group.
+        """
+
+        # Check to see if this method exists, and if so, remove it from the set
+        # and return it.
+        for method in self._methods:
+            if method == mock_method:
+                # Remove the called mock_method instead of the method in the
+                # group. The called method will match any comparators when
+                # equality is checked during removal. The method in the group
+                # could pass a comparator to another comparator during the
+                # equality check.
+                self._methods.remove(mock_method)
+
+                # If group is not empty, put it back at the head of the queue.
+                if not self.IsSatisfied():
+                    mock_method._call_queue.appendleft(self)
+
+                return self, method
+
+        raise UnexpectedMethodCallError(mock_method, self)
+
+    def IsSatisfied(self):
+        """Return True if there are not any methods in this group."""
+
+        return len(self._methods) == 0
+
+
+class MultipleTimesGroup(MethodGroup):
+    """MultipleTimesGroup holds methods that may be called any number of times.
+
+    Note: Each method must be called at least once.
+
+    This is helpful, if you don't know or care how many times a method is
+    called.
+    """
+
+    def __init__(self, group_name):
+        super(MultipleTimesGroup, self).__init__(group_name)
+        self._methods = set()
+        self._methods_left = set()
+
+    def AddMethod(self, mock_method):
+        """Add a method to this group.
+
+        Args:
+            mock_method: A mock method to be added to this group.
+        """
+
+        self._methods.add(mock_method)
+        self._methods_left.add(mock_method)
+
+    def MethodCalled(self, mock_method):
+        """Remove a method call from the group.
+
+        If the method is not in the set, an UnexpectedMethodCallError will be
+        raised.
+
+        Args:
+            mock_method: a mock method that should be equal to a method in the
+                         group.
+
+        Returns:
+            The mock method from the group
+
+        Raises:
+            UnexpectedMethodCallError if the mock_method was not in the group.
+        """
+
+        # Check to see if this method exists, and if so add it to the set of
+        # called methods.
+        for method in self._methods:
+            if method == mock_method:
+                self._methods_left.discard(method)
+                # Always put this group back on top of the queue,
+                # because we don't know when we are done.
+                mock_method._call_queue.appendleft(self)
+                return self, method
+
+        if self.IsSatisfied():
+            next_method = mock_method._PopNextMethod()
+            return next_method, None
+        else:
+            raise UnexpectedMethodCallError(mock_method, self)
+
+    def IsSatisfied(self):
+        """Return True if all methods in group are called at least once."""
+        return len(self._methods_left) == 0
+
+
+class MoxMetaTestBase(type):
+    """Metaclass to add mox cleanup and verification to every test.
+
+    As the mox unit testing class is being constructed (MoxTestBase or a
+    subclass), this metaclass will modify all test functions to call the
+    CleanUpMox method of the test class after they finish. This means that
+    unstubbing and verifying will happen for every test with no additional
+    code, and any failures will result in test failures as opposed to errors.
+    """
+
+    def __init__(cls, name, bases, d):
+        type.__init__(cls, name, bases, d)
+
+        # also get all the attributes from the base classes to account
+        # for a case when test class is not the immediate child of MoxTestBase
+        for base in bases:
+            for attr_name in dir(base):
+                if attr_name not in d:
+                    d[attr_name] = getattr(base, attr_name)
+
+        for func_name, func in d.items():
+            if func_name.startswith('test') and callable(func):
+
+                setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
+
+    @staticmethod
+    def CleanUpTest(cls, func):
+        """Adds Mox cleanup code to any MoxTestBase method.
+
+        Always unsets stubs after a test. Will verify all mocks for tests that
+        otherwise pass.
+
+        Args:
+            cls: MoxTestBase or subclass; the class whose method we are
+                                          altering.
+            func: method; the method of the MoxTestBase test class we wish to
+                          alter.
+
+        Returns:
+            The modified method.
+        """
+        def new_method(self, *args, **kwargs):
+            mox_obj = getattr(self, 'mox', None)
+            stubout_obj = getattr(self, 'stubs', None)
+            cleanup_mox = False
+            cleanup_stubout = False
+            if mox_obj and isinstance(mox_obj, Mox):
+                cleanup_mox = True
+            if stubout_obj and isinstance(stubout_obj,
+                                          stubout.StubOutForTesting):
+                cleanup_stubout = True
+            try:
+                func(self, *args, **kwargs)
+            finally:
+                if cleanup_mox:
+                    mox_obj.UnsetStubs()
+                if cleanup_stubout:
+                    stubout_obj.UnsetAll()
+                    stubout_obj.SmartUnsetAll()
+            if cleanup_mox:
+                mox_obj.VerifyAll()
+        new_method.__name__ = func.__name__
+        new_method.__doc__ = func.__doc__
+        new_method.__module__ = func.__module__
+        return new_method
+
+
+_MoxTestBase = MoxMetaTestBase('_MoxTestBase', (unittest.TestCase, ), {})
+
+
+class MoxTestBase(_MoxTestBase):
+    """Convenience test class to make stubbing easier.
+
+    Sets up a "mox" attribute which is an instance of Mox (any mox tests will
+    want this), and a "stubs" attribute that is an instance of
+    StubOutForTesting (needed at times). Also automatically unsets any stubs
+    and verifies that all mock methods have been called at the end of each
+    test, eliminating boilerplate code.
+    """
+
+    def setUp(self):
+        super(MoxTestBase, self).setUp()
+        self.mox = Mox()
+        self.stubs = stubout.StubOutForTesting()
diff --git a/catapult/third_party/mox3/mox3/stubout.py b/catapult/third_party/mox3/mox3/stubout.py
new file mode 100644
index 0000000..a02ed40
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/stubout.py
@@ -0,0 +1,152 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import inspect
+
+
+class StubOutForTesting(object):
+    """Sample Usage:
+
+       You want os.path.exists() to always return true during testing.
+
+       stubs = StubOutForTesting()
+       stubs.Set(os.path, 'exists', lambda x: 1)
+           ...
+       stubs.UnsetAll()
+
+       The above changes os.path.exists into a lambda that returns 1.    Once
+       the ... part of the code finishes, the UnsetAll() looks up the old value
+       of os.path.exists and restores it.
+
+    """
+    def __init__(self):
+        self.cache = []
+        self.stubs = []
+
+    def __del__(self):
+        self.SmartUnsetAll()
+        self.UnsetAll()
+
+    def SmartSet(self, obj, attr_name, new_attr):
+        """Replace obj.attr_name with new_attr.
+
+        This method is smart and works at the module, class, and instance level
+        while preserving proper inheritance. It will not stub out C types
+        however unless that has been explicitly allowed by the type.
+
+        This method supports the case where attr_name is a staticmethod or a
+        classmethod of obj.
+
+        Notes:
+          - If obj is an instance, then it is its class that will actually be
+            stubbed. Note that the method Set() does not do that: if obj is
+            an instance, it (and not its class) will be stubbed.
+          - The stubbing is using the builtin getattr and setattr. So, the
+            __get__ and __set__ will be called when stubbing (TODO: A better
+            idea would probably be to manipulate obj.__dict__ instead of
+            getattr() and setattr()).
+
+        Raises AttributeError if the attribute cannot be found.
+        """
+        if (inspect.ismodule(obj) or
+                (not inspect.isclass(obj) and attr_name in obj.__dict__)):
+            orig_obj = obj
+            orig_attr = getattr(obj, attr_name)
+
+        else:
+            if not inspect.isclass(obj):
+                mro = list(inspect.getmro(obj.__class__))
+            else:
+                mro = list(inspect.getmro(obj))
+
+            mro.reverse()
+
+            orig_attr = None
+
+            for cls in mro:
+                try:
+                    orig_obj = cls
+                    orig_attr = getattr(obj, attr_name)
+                except AttributeError:
+                    continue
+
+        if orig_attr is None:
+            raise AttributeError("Attribute not found.")
+
+        # Calling getattr() on a staticmethod transforms it to a 'normal'
+        # function. We need to ensure that we put it back as a staticmethod.
+        old_attribute = obj.__dict__.get(attr_name)
+        if (old_attribute is not None
+                and isinstance(old_attribute, staticmethod)):
+            orig_attr = staticmethod(orig_attr)
+
+        self.stubs.append((orig_obj, attr_name, orig_attr))
+        setattr(orig_obj, attr_name, new_attr)
+
+    def SmartUnsetAll(self):
+        """Reverses all the SmartSet() calls.
+
+        Restores things to their original definition. Its okay to call
+        SmartUnsetAll() repeatedly, as later calls have no effect if no
+        SmartSet() calls have been made.
+        """
+        self.stubs.reverse()
+
+        for args in self.stubs:
+            setattr(*args)
+
+        self.stubs = []
+
+    def Set(self, parent, child_name, new_child):
+        """Replace child_name's old definition with new_child.
+
+        Replace definiion in the context of the given parent. The parent could
+        be a module when the child is a function at module scope. Or the parent
+        could be a class when a class' method is being replaced. The named
+        child is set to new_child, while the prior definition is saved away
+        for later, when UnsetAll() is called.
+
+        This method supports the case where child_name is a staticmethod or a
+        classmethod of parent.
+        """
+        old_child = getattr(parent, child_name)
+
+        old_attribute = parent.__dict__.get(child_name)
+        if old_attribute is not None:
+            if isinstance(old_attribute, staticmethod):
+                old_child = staticmethod(old_child)
+            elif isinstance(old_attribute, classmethod):
+                old_child = classmethod(old_child.__func__)
+
+        self.cache.append((parent, old_child, child_name))
+        setattr(parent, child_name, new_child)
+
+    def UnsetAll(self):
+        """Reverses all the Set() calls.
+
+        Restores things to their original definition. Its okay to call
+        UnsetAll() repeatedly, as later calls have no effect if no Set()
+        calls have been made.
+        """
+        # Undo calls to Set() in reverse order, in case Set() was called on the
+        # same arguments repeatedly (want the original call to be last one
+        # undone)
+        self.cache.reverse()
+
+        for (parent, old_child, child_name) in self.cache:
+            setattr(parent, child_name, old_child)
+        self.cache = []
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/mox3/mox3/tests/__init__.py
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/mox3/mox3/tests/__init__.py
diff --git a/catapult/third_party/mox3/mox3/tests/mox_helper.py b/catapult/third_party/mox3/mox3/tests/mox_helper.py
new file mode 100644
index 0000000..67843a9
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/tests/mox_helper.py
@@ -0,0 +1,145 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+"""A very basic test class derived from mox.MoxTestBase, used by test_mox.py.
+
+The class defined in this module is used to test the features of
+MoxTestBase and is not intended to be a standalone test.  It needs to
+be in a separate module, because otherwise the tests in this class
+(which should not all pass) would be executed as part of the
+test_mox.py test suite.
+
+See test_mox.MoxTestBaseTest for how this class is actually used.
+"""
+
+import os
+
+from mox3 import mox
+
+
+class ExampleMoxTestMixin(object):
+    """Mix-in class for mox test case class.
+
+    It stubs out the same function as one of the test methods in
+    the example test case.    Both tests must pass as meta class wraps
+    test methods in all base classes.
+    """
+
+    def testStat(self):
+        self.mox.StubOutWithMock(os, 'stat')
+        os.stat(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.stat(self.DIR_PATH)
+
+
+class ExampleMoxTest(mox.MoxTestBase, ExampleMoxTestMixin):
+
+    DIR_PATH = '/path/to/some/directory'
+
+    def testSuccess(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.listdir(self.DIR_PATH)
+
+    def testExpectedNotCalled(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+
+    def testUnexpectedCall(self):
+        self.mox.StubOutWithMock(os, 'listdir')
+        os.listdir(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.listdir('/path/to/some/other/directory')
+        os.listdir(self.DIR_PATH)
+
+    def testFailure(self):
+        self.assertTrue(False)
+
+    def testStatOther(self):
+        self.mox.StubOutWithMock(os, 'stat')
+        os.stat(self.DIR_PATH)
+        self.mox.ReplayAll()
+        os.stat(self.DIR_PATH)
+
+    def testHasStubs(self):
+        listdir_list = []
+
+        def MockListdir(directory):
+            listdir_list.append(directory)
+
+        self.stubs.Set(os, 'listdir', MockListdir)
+        os.listdir(self.DIR_PATH)
+        self.assertEqual([self.DIR_PATH], listdir_list)
+
+
+class TestClassFromAnotherModule(object):
+
+    def __init__(self):
+        return None
+
+    def Value(self):
+        return 'Not mock'
+
+
+class ChildClassFromAnotherModule(TestClassFromAnotherModule):
+    """A child class of TestClassFromAnotherModule.
+
+    Used to test stubbing out unbound methods, where child classes
+    are eventually bound.
+    """
+
+    def __init__(self):
+        TestClassFromAnotherModule.__init__(self)
+
+
+class CallableClass(object):
+
+    def __init__(self, one, two, nine=None):
+        pass
+
+    def __call__(self, one):
+        return 'Not mock'
+
+    def Value(self):
+        return 'Not mock'
+
+
+def MyTestFunction(one, two, nine=None):
+    pass
+
+
+class ExampleClass(object):
+    def __init__(self, foo='bar'):
+        pass
+
+    def TestMethod(self, one, two, nine=None):
+        pass
+
+    def NamedParams(self, ignore, foo='bar', baz='qux'):
+        pass
+
+    def SpecialArgs(self, *args, **kwargs):
+        pass
+
+
+# This class is used to test stubbing out __init__ of a parent class.
+class ChildExampleClass(ExampleClass):
+    def __init__(self):
+        ExampleClass.__init__(self)
diff --git a/catapult/third_party/mox3/mox3/tests/stubout_helper.py b/catapult/third_party/mox3/mox3/tests/stubout_helper.py
new file mode 100644
index 0000000..7a6b266
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/tests/stubout_helper.py
@@ -0,0 +1,20 @@
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+
+def SampleFunction():
+    raise Exception('I should never be called!')
diff --git a/catapult/third_party/mox3/mox3/tests/test_mox.py b/catapult/third_party/mox3/mox3/tests/test_mox.py
new file mode 100644
index 0000000..48d1ecf
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/tests/test_mox.py
@@ -0,0 +1,2408 @@
+# Unit tests for Mox.
+#
+# Copyright 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import io
+import re
+import sys
+
+from mox3 import mox
+from mox3.tests import mox_helper
+
+import six
+import testtools
+
+
+OS_LISTDIR = mox_helper.os.listdir
+
+
+class ExpectedMethodCallsErrorTest(testtools.TestCase):
+    """Test creation and string conversion of ExpectedMethodCallsError."""
+
+    def testAtLeastOneMethod(self):
+        self.assertRaises(ValueError, mox.ExpectedMethodCallsError, [])
+
+    def testOneError(self):
+        method = mox.MockMethod("testMethod", [], False)
+        method(1, 2).AndReturn('output')
+        e = mox.ExpectedMethodCallsError([method])
+        self.assertEqual(
+            "Verify: Expected methods never called:\n"
+            "  0.  testMethod(1, 2) -> 'output'",
+            str(e))
+
+    def testManyErrors(self):
+        method1 = mox.MockMethod("testMethod", [], False)
+        method1(1, 2).AndReturn('output')
+        method2 = mox.MockMethod("testMethod", [], False)
+        method2(a=1, b=2, c="only named")
+        method3 = mox.MockMethod("testMethod2", [], False)
+        method3().AndReturn(44)
+        method4 = mox.MockMethod("testMethod", [], False)
+        method4(1, 2).AndReturn('output')
+        e = mox.ExpectedMethodCallsError([method1, method2, method3, method4])
+        self.assertEqual(
+            "Verify: Expected methods never called:\n"
+            "  0.  testMethod(1, 2) -> 'output'\n"
+            "  1.  testMethod(a=1, b=2, c='only named') -> None\n"
+            "  2.  testMethod2() -> 44\n"
+            "  3.  testMethod(1, 2) -> 'output'",
+            str(e))
+
+
+class OrTest(testtools.TestCase):
+    """Test Or correctly chains Comparators."""
+
+    def testValidOr(self):
+        """Or should be True if either Comparator returns True."""
+        self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == {})
+        self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test')
+        self.assertTrue(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test')
+
+    def testInvalidOr(self):
+        """Or should be False if both Comparators return False."""
+        self.assertFalse(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0)
+
+
+class AndTest(testtools.TestCase):
+    """Test And correctly chains Comparators."""
+
+    def testValidAnd(self):
+        """And should be True if both Comparators return True."""
+        self.assertTrue(mox.And(mox.IsA(str), mox.IsA(str)) == '1')
+
+    def testClauseOneFails(self):
+        """And should be False if the first Comparator returns False."""
+
+        self.assertFalse(mox.And(mox.IsA(dict), mox.IsA(str)) == '1')
+
+    def testAdvancedUsage(self):
+        """And should work with other Comparators.
+
+        Note: this test is reliant on In and ContainsKeyValue.
+        """
+        test_dict = {"mock": "obj", "testing": "isCOOL"}
+        self.assertTrue(mox.And(mox.In("testing"),
+                        mox.ContainsKeyValue("mock", "obj")) == test_dict)
+
+    def testAdvancedUsageFails(self):
+        """Note: this test is reliant on In and ContainsKeyValue."""
+        test_dict = {"mock": "obj", "testing": "isCOOL"}
+        self.assertFalse(mox.And(mox.In("NOTFOUND"),
+                         mox.ContainsKeyValue("mock", "obj")) == test_dict)
+
+
+class FuncTest(testtools.TestCase):
+    """Test Func correctly evaluates based upon true-false return."""
+
+    def testFuncTrueFalseEvaluation(self):
+        """Should return True if the validating function returns True."""
+        equals_one = lambda x: x == 1
+        always_none = lambda x: None
+
+        self.assertTrue(mox.Func(equals_one) == 1)
+        self.assertFalse(mox.Func(equals_one) == 0)
+
+        self.assertFalse(mox.Func(always_none) == 1)
+        self.assertFalse(mox.Func(always_none) == 0)
+        self.assertFalse(mox.Func(always_none) is None)
+
+    def testFuncExceptionPropagation(self):
+        """Exceptions within the validating function should propagate."""
+        class TestException(Exception):
+            pass
+
+        def raiseExceptionOnNotOne(value):
+            if value != 1:
+                raise TestException
+            else:
+                return True
+
+        self.assertTrue(mox.Func(raiseExceptionOnNotOne) == 1)
+        self.assertRaises(
+            TestException, mox.Func(raiseExceptionOnNotOne).__eq__, 2)
+
+
+class SameElementsAsTest(testtools.TestCase):
+    """SameElementsAs correctly identifies sequences with same elements."""
+
+    def testSortedLists(self):
+        """Should return True if two lists are exactly equal."""
+        self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c'])
+
+    def testUnsortedLists(self):
+        """Should return True if lists are unequal but have same elements."""
+        self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1])
+
+    def testUnhashableLists(self):
+        """Should return True if lists have the same unhashable elements."""
+        self.assertTrue(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) ==
+                        [{2: 'b'}, {'a': 1}])
+
+    def testEmptyLists(self):
+        """Should return True for two empty lists."""
+        self.assertTrue(mox.SameElementsAs([]) == [])
+
+    def testUnequalLists(self):
+        """Should return False if the lists are not equal."""
+        self.assertFalse(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c'])
+
+    def testUnequalUnhashableLists(self):
+        """Should return False if lists with unhashable items are unequal."""
+        self.assertFalse(mox.SameElementsAs(
+            [{'a': 1}, {2: 'b'}]) == [{2: 'b'}])
+
+    def testActualIsNotASequence(self):
+        """Should return False if the actual object is not a sequence."""
+        self.assertFalse(mox.SameElementsAs([1]) == object())
+
+    def testOneUnhashableObjectInActual(self):
+        """Store the entire iterator for a correct comparison.
+
+        In a previous version of SameElementsAs, iteration stopped when an
+        unhashable object was encountered and then was restarted, so the actual
+        list appeared smaller than it was.
+        """
+        self.assertFalse(mox.SameElementsAs([1, 2]) == iter([{}, 1, 2]))
+
+
+class ContainsKeyValueTest(testtools.TestCase):
+    """Test ContainsKeyValue correctly identifies key/value pairs in a dict.
+    """
+
+    def testValidPair(self):
+        """Should return True if the key value is in the dict."""
+        self.assertTrue(mox.ContainsKeyValue("key", 1) == {"key": 1})
+
+    def testInvalidValue(self):
+        """Should return False if the value is not correct."""
+        self.assertFalse(mox.ContainsKeyValue("key", 1) == {"key": 2})
+
+    def testInvalidKey(self):
+        """Should return False if they key is not in the dict."""
+        self.assertFalse(mox.ContainsKeyValue("qux", 1) == {"key": 2})
+
+
+class ContainsAttributeValueTest(testtools.TestCase):
+    """Test ContainsAttributeValue identifies properties in an object."""
+
+    def setUp(self):
+        """Create an object to test with."""
+
+        class TestObject(object):
+            key = 1
+
+        super(ContainsAttributeValueTest, self).setUp()
+        self.test_object = TestObject()
+
+    def testValidPair(self):
+        """Return True if the object has the key attribute that matches."""
+        self.assertTrue(mox.ContainsAttributeValue("key", 1)
+                        == self.test_object)
+
+    def testInvalidValue(self):
+        """Should return False if the value is not correct."""
+        self.assertFalse(mox.ContainsKeyValue("key", 2) == self.test_object)
+
+    def testInvalidKey(self):
+        """Should return False if they the object doesn't have the property."""
+        self.assertFalse(mox.ContainsKeyValue("qux", 1) == self.test_object)
+
+
+class InTest(testtools.TestCase):
+    """Test In correctly identifies a key in a list/dict."""
+
+    def testItemInList(self):
+        """Should return True if the item is in the list."""
+        self.assertTrue(mox.In(1) == [1, 2, 3])
+
+    def testKeyInDict(self):
+        """Should return True if the item is a key in a dict."""
+        self.assertTrue(mox.In("test") == {"test": "module"})
+
+    def testItemInTuple(self):
+        """Should return True if the item is in the list."""
+        self.assertTrue(mox.In(1) == (1, 2, 3))
+
+    def testTupleInTupleOfTuples(self):
+        self.assertTrue(mox.In((1, 2, 3)) == ((1, 2, 3), (1, 2)))
+
+    def testItemNotInList(self):
+        self.assertFalse(mox.In(1) == [2, 3])
+
+    def testTupleNotInTupleOfTuples(self):
+        self.assertFalse(mox.In((1, 2)) == ((1, 2, 3), (4, 5)))
+
+
+class NotTest(testtools.TestCase):
+    """Test Not correctly identifies False predicates."""
+
+    def testItemInList(self):
+        """Should return True if the item is NOT in the list."""
+        self.assertTrue(mox.Not(mox.In(42)) == [1, 2, 3])
+
+    def testKeyInDict(self):
+        """Should return True if the item is NOT a key in a dict."""
+        self.assertTrue(mox.Not(mox.In("foo")) == {"key": 42})
+
+    def testInvalidKeyWithNot(self):
+        """Should return False if they key is NOT in the dict."""
+        self.assertTrue(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2})
+
+
+class StrContainsTest(testtools.TestCase):
+    """Test StrContains checks for substring occurrence of a parameter."""
+
+    def testValidSubstringAtStart(self):
+        """Should return True if substring is at the start of the string."""
+        self.assertTrue(mox.StrContains("hello") == "hello world")
+
+    def testValidSubstringInMiddle(self):
+        """Should return True if substring is in the middle of the string."""
+        self.assertTrue(mox.StrContains("lo wo") == "hello world")
+
+    def testValidSubstringAtEnd(self):
+        """Should return True if the substring is at the end of the string."""
+        self.assertTrue(mox.StrContains("ld") == "hello world")
+
+    def testInvaildSubstring(self):
+        """Should return False if the substring is not in the string."""
+        self.assertFalse(mox.StrContains("AAA") == "hello world")
+
+    def testMultipleMatches(self):
+        """Should return True if there are multiple occurances of substring."""
+        self.assertTrue(mox.StrContains("abc") == "ababcabcabcababc")
+
+
+class RegexTest(testtools.TestCase):
+    """Test Regex correctly matches regular expressions."""
+
+    def testIdentifyBadSyntaxDuringInit(self):
+        """The user should know immediately if a regex has bad syntax."""
+        self.assertRaises(re.error, mox.Regex, '(a|b')
+
+    def testPatternInMiddle(self):
+        """Return True if the pattern matches at the middle of the string.
+
+        This ensures that re.search is used (instead of re.find).
+        """
+        self.assertTrue(mox.Regex(r"a\s+b") == "x y z a b c")
+
+    def testNonMatchPattern(self):
+        """Should return False if the pattern does not match the string."""
+        self.assertFalse(mox.Regex(r"a\s+b") == "x y z")
+
+    def testFlagsPassedCorrectly(self):
+        """Should return True as we pass IGNORECASE flag."""
+        self.assertTrue(mox.Regex(r"A", re.IGNORECASE) == "a")
+
+    def testReprWithoutFlags(self):
+        """repr should return the regular expression pattern."""
+        self.assertTrue(
+            repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
+
+    def testReprWithFlags(self):
+        """repr should return the regular expression pattern and flags."""
+        self.assertTrue(repr(mox.Regex(r"a\s+b", flags=4)) ==
+                        "<regular expression 'a\s+b', flags=4>")
+
+
+class IsTest(testtools.TestCase):
+    """Verify Is correctly checks equality based upon identity, not value."""
+
+    class AlwaysComparesTrue(object):
+        def __eq__(self, other):
+            return True
+
+        def __cmp__(self, other):
+            return 0
+
+        def __ne__(self, other):
+            return False
+
+    def testEqualityValid(self):
+        o1 = self.AlwaysComparesTrue()
+        self.assertTrue(mox.Is(o1), o1)
+
+    def testEqualityInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        self.assertTrue(o1 == o2)
+        # but...
+        self.assertFalse(mox.Is(o1) == o2)
+
+    def testInequalityValid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        self.assertTrue(mox.Is(o1) != o2)
+
+    def testInequalityInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        self.assertFalse(mox.Is(o1) != o1)
+
+    def testEqualityInListValid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        isa_list = [mox.Is(o1), mox.Is(o2)]
+        str_list = [o1, o2]
+        self.assertTrue(isa_list == str_list)
+
+    def testEquailtyInListInvalid(self):
+        o1 = self.AlwaysComparesTrue()
+        o2 = self.AlwaysComparesTrue()
+        isa_list = [mox.Is(o1), mox.Is(o2)]
+        mixed_list = [o2, o1]
+        self.assertFalse(isa_list == mixed_list)
+
+
+class IsATest(testtools.TestCase):
+    """Verify IsA correctly checks equality based upon class type not value."""
+
+    def testEqualityValid(self):
+        """Verify that == correctly identifies objects of the same type."""
+        self.assertTrue(mox.IsA(str) == 'test')
+
+    def testEqualityInvalid(self):
+        """Verify that == correctly identifies objects of different types."""
+        self.assertFalse(mox.IsA(str) == 10)
+
+    def testInequalityValid(self):
+        """Verify that != identifies objects of different type."""
+        self.assertTrue(mox.IsA(str) != 10)
+
+    def testInequalityInvalid(self):
+        """Verify that != correctly identifies objects of the same type."""
+        self.assertFalse(mox.IsA(str) != "test")
+
+    def testEqualityInListValid(self):
+        """Verify list contents are properly compared."""
+        isa_list = [mox.IsA(str), mox.IsA(str)]
+        str_list = ["abc", "def"]
+        self.assertTrue(isa_list == str_list)
+
+    def testEquailtyInListInvalid(self):
+        """Verify list contents are properly compared."""
+        isa_list = [mox.IsA(str), mox.IsA(str)]
+        mixed_list = ["abc", 123]
+        self.assertFalse(isa_list == mixed_list)
+
+    def testSpecialTypes(self):
+        """Verify that IsA can handle objects like io.StringIO."""
+        isA = mox.IsA(io.StringIO())
+        stringIO = io.StringIO()
+        self.assertTrue(isA == stringIO)
+
+
+class IsAlmostTest(testtools.TestCase):
+    """Verify IsAlmost correctly checks equality of floating point numbers."""
+
+    def testEqualityValid(self):
+        """Verify that == correctly identifies nearly equivalent floats."""
+        self.assertEqual(mox.IsAlmost(1.8999999999), 1.9)
+
+    def testEqualityInvalid(self):
+        """Verify that == correctly identifies non-equivalent floats."""
+        self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
+
+    def testEqualityWithPlaces(self):
+        """Verify that specifying places has the desired effect."""
+        self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
+        self.assertEqual(mox.IsAlmost(1.899, places=2), 1.9)
+
+    def testNonNumericTypes(self):
+        """Verify that IsAlmost handles non-numeric types properly."""
+
+        self.assertNotEqual(mox.IsAlmost(1.8999999999), '1.9')
+        self.assertNotEqual(mox.IsAlmost('1.8999999999'), 1.9)
+        self.assertNotEqual(mox.IsAlmost('1.8999999999'), '1.9')
+
+
+class ValueRememberTest(testtools.TestCase):
+    """Verify comparing argument against remembered value."""
+
+    def testValueEquals(self):
+        """Verify that value will compare to stored value."""
+        value = mox.Value()
+        value.store_value('hello world')
+        self.assertEqual(value, 'hello world')
+
+    def testNoValue(self):
+        """Verify that uninitialized value does not compare to empty values."""
+        value = mox.Value()
+        self.assertNotEqual(value, None)
+        self.assertNotEqual(value, False)
+        self.assertNotEqual(value, 0)
+        self.assertNotEqual(value, '')
+        self.assertNotEqual(value, ())
+        self.assertNotEqual(value, [])
+        self.assertNotEqual(value, {})
+        self.assertNotEqual(value, object())
+        self.assertNotEqual(value, set())
+
+    def testRememberValue(self):
+        """Verify that comparing against remember will store argument."""
+        value = mox.Value()
+        remember = mox.Remember(value)
+        self.assertNotEqual(value, 'hello world')  # value not yet stored.
+        self.assertEqual(remember, 'hello world')  # store value here.
+        self.assertEqual(value, 'hello world')  # compare against stored value.
+
+
+class MockMethodTest(testtools.TestCase):
+    """Test class to verify that the MockMethod class is working correctly."""
+
+    def setUp(self):
+        super(MockMethodTest, self).setUp()
+        self.expected_method = mox.MockMethod(
+            "testMethod", [], False)(['original'])
+        self.mock_method = mox.MockMethod(
+            "testMethod", [self.expected_method], True)
+
+    def testNameAttribute(self):
+        """Should provide a __name__ attribute."""
+        self.assertEqual('testMethod', self.mock_method.__name__)
+
+    def testAndReturnNoneByDefault(self):
+        """Should return None by default."""
+        return_value = self.mock_method(['original'])
+        self.assertTrue(return_value is None)
+
+    def testAndReturnValue(self):
+        """Should return a specificed return value."""
+        expected_return_value = "test"
+        self.expected_method.AndReturn(expected_return_value)
+        return_value = self.mock_method(['original'])
+        self.assertTrue(return_value == expected_return_value)
+
+    def testAndRaiseException(self):
+        """Should raise a specified exception."""
+        class TestException(Exception):
+            pass
+
+        expected_exception = TestException('test exception')
+        self.expected_method.AndRaise(expected_exception)
+        self.assertRaises(TestException, self.mock_method, ['original'])
+
+    def testWithSideEffects(self):
+        """Should call state modifier."""
+        local_list = ['original']
+
+        def modifier(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+
+        self.expected_method.WithSideEffects(modifier).AndReturn(1)
+        self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+
+    def testWithReturningSideEffects(self):
+        """Should call state modifier and propagate its return value."""
+        local_list = ['original']
+        expected_return = 'expected_return'
+
+        def modifier_with_return(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+            return expected_return
+
+        self.expected_method.WithSideEffects(modifier_with_return)
+        actual_return = self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+        self.assertEqual(expected_return, actual_return)
+
+    def testWithReturningSideEffectsWithAndReturn(self):
+        """Should call state modifier and ignore its return value."""
+        local_list = ['original']
+        expected_return = 'expected_return'
+        unexpected_return = 'unexpected_return'
+
+        def modifier_with_return(mutable_list):
+            self.assertTrue(local_list is mutable_list)
+            mutable_list[0] = 'mutation'
+            return unexpected_return
+
+        self.expected_method.WithSideEffects(modifier_with_return).AndReturn(
+            expected_return)
+        actual_return = self.mock_method(local_list)
+        self.assertEqual('mutation', local_list[0])
+        self.assertEqual(expected_return, actual_return)
+
+    def testEqualityNoParamsEqual(self):
+        """Methods with the same name and without params should be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityNoParamsNotEqual(self):
+        """Methods with different names without params should not be equal."""
+        expected_method = mox.MockMethod("otherMethod", [], False)
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityParamsEqual(self):
+        """Methods with the same name and parameters should be equal."""
+        params = [1, 2, 3]
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = params
+
+        self.mock_method._params = params
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityParamsNotEqual(self):
+        """Methods with same name and different params should not be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = [1, 2, 3]
+
+        self.mock_method._params = ['a', 'b', 'c']
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityNamedParamsEqual(self):
+        """Methods with the same name and same named params should be equal."""
+        named_params = {"input1": "test", "input2": "params"}
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._named_params = named_params
+
+        self.mock_method._named_params = named_params
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testEqualityNamedParamsNotEqual(self):
+        """Methods with same name and diffnamed params should not be equal."""
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._named_params = {"input1": "test", "input2": "params"}
+
+        self.mock_method._named_params = {
+            "input1": "test2", "input2": "params2"}
+        self.assertNotEqual(self.mock_method, expected_method)
+
+    def testEqualityWrongType(self):
+        """Method should not be equal to an object of a different type."""
+        self.assertNotEqual(self.mock_method, "string?")
+
+    def testObjectEquality(self):
+        """Equality of objects should work without a Comparator"""
+        instA = TestClass()
+        instB = TestClass()
+
+        params = [instA, ]
+        expected_method = mox.MockMethod("testMethod", [], False)
+        expected_method._params = params
+
+        self.mock_method._params = [instB, ]
+        self.assertEqual(self.mock_method, expected_method)
+
+    def testStrConversion(self):
+        method = mox.MockMethod("f", [], False)
+        method(1, 2, "st", n1=8, n2="st2")
+        self.assertEqual(str(method),
+                         ("f(1, 2, 'st', n1=8, n2='st2') -> None"))
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(1, 2, "only positional")
+        self.assertEqual(str(method),
+                         "testMethod(1, 2, 'only positional') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(a=1, b=2, c="only named")
+        self.assertEqual(str(method),
+                         "testMethod(a=1, b=2, c='only named') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method()
+        self.assertEqual(str(method), "testMethod() -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method(x="only 1 parameter")
+        self.assertEqual(str(method),
+                         "testMethod(x='only 1 parameter') -> None")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method().AndReturn('return_value')
+        self.assertEqual(str(method), "testMethod() -> 'return_value'")
+
+        method = mox.MockMethod("testMethod", [], False)
+        method().AndReturn(('a', {1: 2}))
+        self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})")
+
+
+class MockAnythingTest(testtools.TestCase):
+    """Verify that the MockAnything class works as expected."""
+
+    def setUp(self):
+        super(MockAnythingTest, self).setUp()
+        self.mock_object = mox.MockAnything()
+
+    def testRepr(self):
+        """Calling repr on a MockAnything instance must work."""
+        self.assertEqual('<MockAnything instance>', repr(self.mock_object))
+
+    def testCanMockStr(self):
+        self.mock_object.__str__().AndReturn("foo")
+        self.mock_object._Replay()
+        actual = str(self.mock_object)
+        self.mock_object._Verify()
+        self.assertEqual("foo", actual)
+
+    def testSetupMode(self):
+        """Verify the mock will accept any call."""
+        self.mock_object.NonsenseCall()
+        self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
+
+    def testReplayWithExpectedCall(self):
+        """Verify the mock replays method calls as expected."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                        # start replay mode
+        self.mock_object.ValidCall()                    # make method call
+
+    def testReplayWithUnexpectedCall(self):
+        """Unexpected method calls should raise UnexpectedMethodCallError."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          self.mock_object.OtherValidCall)
+
+    def testVerifyWithCompleteReplay(self):
+        """Verify should not raise an exception for a valid replay."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        self.mock_object.ValidCall()                    # make method call
+        self.mock_object._Verify()
+
+    def testVerifyWithIncompleteReplay(self):
+        """Verify should raise an exception if the replay was not complete."""
+        self.mock_object.ValidCall()                    # setup method call
+        self.mock_object._Replay()                         # start replay mode
+        # ValidCall() is never made
+        self.assertRaises(
+            mox.ExpectedMethodCallsError, self.mock_object._Verify)
+
+    def testSpecialClassMethod(self):
+        """Verify should not raise exception when special methods are used."""
+        self.mock_object[1].AndReturn(True)
+        self.mock_object._Replay()
+        returned_val = self.mock_object[1]
+        self.assertTrue(returned_val)
+        self.mock_object._Verify()
+
+    def testNonzero(self):
+        """You should be able to use the mock object in an if."""
+        self.mock_object._Replay()
+        if self.mock_object:
+            pass
+
+    def testNotNone(self):
+        """Mock should be comparable to None."""
+        self.mock_object._Replay()
+        if self.mock_object is not None:
+            pass
+
+        if self.mock_object is None:
+            pass
+
+    def testEquals(self):
+        """A mock should be able to compare itself to another object."""
+        self.mock_object._Replay()
+        self.assertEqual(self.mock_object, self.mock_object)
+
+    def testEqualsMockFailure(self):
+        """Verify equals identifies unequal objects."""
+        self.mock_object.SillyCall()
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, mox.MockAnything())
+
+    def testEqualsInstanceFailure(self):
+        """Verify equals identifies that objects are different instances."""
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, TestClass())
+
+    def testNotEquals(self):
+        """Verify not equals works."""
+        self.mock_object._Replay()
+        self.assertFalse(self.mock_object != self.mock_object)
+
+    def testNestedMockCallsRecordedSerially(self):
+        """Test that nested calls work when recorded serially."""
+        self.mock_object.CallInner().AndReturn(1)
+        self.mock_object.CallOuter(1)
+        self.mock_object._Replay()
+
+        self.mock_object.CallOuter(self.mock_object.CallInner())
+
+        self.mock_object._Verify()
+
+    def testNestedMockCallsRecordedNested(self):
+        """Test that nested cals work when recorded in a nested fashion."""
+        self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1))
+        self.mock_object._Replay()
+
+        self.mock_object.CallOuter(self.mock_object.CallInner())
+
+        self.mock_object._Verify()
+
+    def testIsCallable(self):
+        """Test that MockAnything can even mock a simple callable.
+
+        This is handy for "stubbing out" a method in a module with a mock, and
+        verifying that it was called.
+        """
+        self.mock_object().AndReturn('mox0rd')
+        self.mock_object._Replay()
+
+        self.assertEqual('mox0rd', self.mock_object())
+
+        self.mock_object._Verify()
+
+    def testIsReprable(self):
+        """Test that MockAnythings can be repr'd without causing a failure."""
+        self.assertTrue('MockAnything' in repr(self.mock_object))
+
+
+class MethodCheckerTest(testtools.TestCase):
+    """Tests MockMethod's use of MethodChecker method."""
+
+    def testUnboundMethodsRequiresInstance(self):
+        # SKIP TEST IN PYTHON 2.x (Ugly hack for python 2.6)
+        # REASON: semantics for unbound methods has changed only in Python 3
+        #     so this test in earlier versions is invald
+        if sys.version_info < (3, 0):
+            return
+
+        instance = CheckCallTestClass()
+        method = mox.MockMethod('NoParameters', [], False,
+                                CheckCallTestClass.NoParameters)
+
+        self.assertRaises(AttributeError, method)
+        method(instance)
+        self.assertRaises(AttributeError, method, instance, 1)
+
+    def testNoParameters(self):
+        method = mox.MockMethod('NoParameters', [], False,
+                                CheckCallTestClass.NoParameters,
+                                class_to_bind=CheckCallTestClass)
+        method()
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, a=1)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testOneParameter(self):
+        method = mox.MockMethod('OneParameter', [], False,
+                                CheckCallTestClass.OneParameter,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testTwoParameters(self):
+        method = mox.MockMethod('TwoParameters', [], False,
+                                CheckCallTestClass.TwoParameters,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        method(1, 2)
+        method(1, b=2)
+        method(a=1, b=2)
+        method(b=2, a=1)
+        self.assertRaises(AttributeError, method, b=2, c=3)
+        self.assertRaises(AttributeError, method, a=1, b=2, c=3)
+        self.assertRaises(AttributeError, method, 1, 2, 3)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4)
+        self.assertRaises(AttributeError, method, 3, a=1, b=2)
+
+    def testOneDefaultValue(self):
+        method = mox.MockMethod('OneDefaultValue', [], False,
+                                CheckCallTestClass.OneDefaultValue,
+                                class_to_bind=CheckCallTestClass)
+        method()
+        method(1)
+        method(a=1)
+        self.assertRaises(AttributeError, method, b=1)
+        self.assertRaises(AttributeError, method, 1, 2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        self.assertRaises(AttributeError, method, 1, b=2)
+
+    def testTwoDefaultValues(self):
+        method = mox.MockMethod('TwoDefaultValues', [], False,
+                                CheckCallTestClass.TwoDefaultValues,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, c=3)
+        self.assertRaises(AttributeError, method, 1)
+        self.assertRaises(AttributeError, method, 1, d=4)
+        self.assertRaises(AttributeError, method, 1, d=4, c=3)
+        method(1, 2)
+        method(a=1, b=2)
+        method(1, 2, 3)
+        method(1, 2, 3, 4)
+        method(1, 2, c=3)
+        method(1, 2, c=3, d=4)
+        method(1, 2, d=4, c=3)
+        method(d=4, c=3, a=1, b=2)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5)
+        self.assertRaises(AttributeError, method, 1, 2, e=9)
+        self.assertRaises(AttributeError, method, a=1, b=2, e=9)
+
+    def testArgs(self):
+        method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        self.assertRaises(AttributeError, method, 1)
+        method(1, 2)
+        method(a=1, b=2)
+        method(1, 2, 3)
+        method(1, 2, 3, 4)
+        self.assertRaises(AttributeError, method, 1, 2, a=3)
+        self.assertRaises(AttributeError, method, 1, 2, c=3)
+
+    def testKwargs(self):
+        method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(1, 2)
+        method(a=1, b=2)
+        method(b=2, a=1)
+        self.assertRaises(AttributeError, method, 1, 2, 3)
+        self.assertRaises(AttributeError, method, 1, 2, a=3)
+        method(1, 2, c=3)
+        method(a=1, b=2, c=3)
+        method(c=3, a=1, b=2)
+        method(a=1, b=2, c=3, d=4)
+        self.assertRaises(AttributeError, method, 1, 2, 3, 4)
+
+    def testArgsAndKwargs(self):
+        method = mox.MockMethod('ArgsAndKwargs', [], False,
+                                CheckCallTestClass.ArgsAndKwargs,
+                                class_to_bind=CheckCallTestClass)
+        self.assertRaises(AttributeError, method)
+        method(1)
+        method(1, 2)
+        method(1, 2, 3)
+        method(a=1)
+        method(1, b=2)
+        self.assertRaises(AttributeError, method, 1, a=2)
+        method(b=2, a=1)
+        method(c=3, b=2, a=1)
+        method(1, 2, c=3)
+
+
+class CheckCallTestClass(object):
+    def NoParameters(self):
+        pass
+
+    def OneParameter(self, a):
+        pass
+
+    def TwoParameters(self, a, b):
+        pass
+
+    def OneDefaultValue(self, a=1):
+        pass
+
+    def TwoDefaultValues(self, a, b, c=1, d=2):
+        pass
+
+    def Args(self, a, b, *args):
+        pass
+
+    def Kwargs(self, a, b=2, **kwargs):
+        pass
+
+    def ArgsAndKwargs(self, a, *args, **kwargs):
+        pass
+
+
+class MockObjectTest(testtools.TestCase):
+    """Verify that the MockObject class works as exepcted."""
+
+    def setUp(self):
+        super(MockObjectTest, self).setUp()
+        self.mock_object = mox.MockObject(TestClass)
+
+    def testSetupModeWithValidCall(self):
+        """Verify the mock object properly mocks a basic method call."""
+        self.mock_object.ValidCall()
+        self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
+
+    def testSetupModeWithInvalidCall(self):
+        """Rase UnknownMethodCallError for a non-member method call.
+        """
+        # Note: assertRaises does not catch exceptions thrown by MockObject's
+        # __getattr__
+        try:
+            self.mock_object.InvalidCall()
+            self.fail("No exception thrown, expected UnknownMethodCallError")
+        except mox.UnknownMethodCallError:
+            pass
+        except Exception:
+            self.fail("Wrong exception type thrown,"
+                      " expected UnknownMethodCallError")
+
+    def testReplayWithInvalidCall(self):
+        """Rase UnknownMethodCallError for a non-member method call.
+        """
+        self.mock_object.ValidCall()  # setup method call
+        self.mock_object._Replay()  # start replay mode
+        # Note: assertRaises does not catch exceptions thrown by MockObject's
+        # __getattr__
+        try:
+            self.mock_object.InvalidCall()
+            self.fail("No exception thrown, expected UnknownMethodCallError")
+        except mox.UnknownMethodCallError:
+            pass
+        except Exception:
+            self.fail("Wrong exception type thrown,"
+                      " expected UnknownMethodCallError")
+
+    def testIsInstance(self):
+        """Mock should be able to pass as an instance of the mocked class."""
+        self.assertTrue(isinstance(self.mock_object, TestClass))
+
+    def testFindValidMethods(self):
+        """Mock should be able to mock all public methods."""
+        self.assertTrue('ValidCall' in self.mock_object._known_methods)
+        self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
+        self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
+        self.assertTrue('MyStaticMethod' in self.mock_object._known_methods)
+        self.assertTrue('_ProtectedCall' in self.mock_object._known_methods)
+        self.assertTrue('__PrivateCall' not in self.mock_object._known_methods)
+        self.assertTrue(
+            '_TestClass__PrivateCall' in self.mock_object._known_methods)
+
+    def testFindsSuperclassMethods(self):
+        """Mock should be able to mock superclasses methods."""
+        self.mock_object = mox.MockObject(ChildClass)
+        self.assertTrue('ValidCall' in self.mock_object._known_methods)
+        self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
+        self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
+        self.assertTrue('ChildValidCall' in self.mock_object._known_methods)
+
+    def testAccessClassVariables(self):
+        """Class variables should be accessible through the mock."""
+        self.assertTrue('SOME_CLASS_VAR' in self.mock_object._known_vars)
+        self.assertTrue('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars)
+        self.assertEqual('test_value', self.mock_object.SOME_CLASS_VAR)
+
+    def testEquals(self):
+        """A mock should be able to compare itself to another object."""
+        self.mock_object._Replay()
+        self.assertEqual(self.mock_object, self.mock_object)
+
+    def testEqualsMockFailure(self):
+        """Verify equals identifies unequal objects."""
+        self.mock_object.ValidCall()
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, mox.MockObject(TestClass))
+
+    def testEqualsInstanceFailure(self):
+        """Verify equals identifies that objects are different instances."""
+        self.mock_object._Replay()
+        self.assertNotEqual(self.mock_object, TestClass())
+
+    def testNotEquals(self):
+        """Verify not equals works."""
+        self.mock_object._Replay()
+        self.assertFalse(self.mock_object != self.mock_object)
+
+    def testMockSetItem_ExpectedSetItem_Success(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        dummy['X'] = 'Y'
+
+        dummy._Verify()
+
+    def testMockSetItem_ExpectedSetItem_NoSuccess(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        # NOT doing dummy['X'] = 'Y'
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockSetItem_ExpectedNoSetItem_Success(self):
+        """Test that __setitem__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        def call():
+            dummy['X'] = 'Y'
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockSetItem_ExpectedNoSetItem_NoSuccess(self):
+        """Test that __setitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        # NOT doing dummy['X'] = 'Y'
+
+        dummy._Verify()
+
+    def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self):
+        """Test that __setitem__() fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy['X'] = 'Y'
+
+        dummy._Replay()
+
+        def call():
+            dummy['wrong'] = 'Y'
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockSetItem_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __init__(self):
+                self.my_dict = {}
+
+            def __setitem__(self, key, value):
+                self.my_dict[key], value
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        dummy[1] = 2
+        dummy._Replay()
+        dummy[1] = 2
+        dummy._Verify()
+
+    def testMockGetItem_ExpectedGetItem_Success(self):
+        """Test that __getitem__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        self.assertEqual(dummy['X'], 'value')
+
+        dummy._Verify()
+
+    def testMockGetItem_ExpectedGetItem_NoSuccess(self):
+        """Test that __getitem__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        # NOT doing dummy['X']
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockGetItem_ExpectedNoGetItem_NoSuccess(self):
+        """Test that __getitem__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing dummy['X']
+
+        dummy._Replay()
+
+        def call():
+            return dummy['X']
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self):
+        """Test that __getitem__() fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy['X'].AndReturn('value')
+
+        dummy._Replay()
+
+        def call():
+            return dummy['wrong']
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockGetItem_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __getitem__(self, key):
+                return {1: '1', 2: '2'}[key]
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        dummy[1].AndReturn('3')
+
+        dummy._Replay()
+        self.assertEqual('3', dummy.__getitem__(1))
+        dummy._Verify()
+
+    def testMockIter_ExpectedIter_Success(self):
+        """Test that __iter__() gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        iter(dummy).AndReturn(iter(['X', 'Y']))
+
+        dummy._Replay()
+
+        self.assertEqual([x for x in dummy], ['X', 'Y'])
+
+        dummy._Verify()
+
+    def testMockContains_ExpectedContains_Success(self):
+        """Test that __contains__ gets mocked in Dummy.
+
+        In this test, _Verify() succeeds.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn(True)
+
+        dummy._Replay()
+
+        self.assertTrue('X' in dummy)
+
+        dummy._Verify()
+
+    def testMockContains_ExpectedContains_NoSuccess(self):
+        """Test that __contains__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn('True')
+
+        dummy._Replay()
+
+        # NOT doing 'X' in dummy
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockContains_ExpectedContains_NonmatchingParameter(self):
+        """Test that __contains__ fails if other parameters are expected."""
+        dummy = mox.MockObject(TestClass)
+        dummy.__contains__('X').AndReturn(True)
+
+        dummy._Replay()
+
+        def call():
+            return 'Y' in dummy
+
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+        dummy._Verify()
+
+    def testMockIter_ExpectedIter_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy.
+
+        In this test, _Verify() fails.
+        """
+        dummy = mox.MockObject(TestClass)
+        iter(dummy).AndReturn(iter(['X', 'Y']))
+
+        dummy._Replay()
+
+        # NOT doing self.assertEqual([x for x in dummy], ['X', 'Y'])
+
+        self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
+
+    def testMockIter_ExpectedNoIter_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy."""
+        dummy = mox.MockObject(TestClass)
+        # NOT doing iter(dummy)
+
+        dummy._Replay()
+
+        def call():
+            return [x for x in dummy]
+        self.assertRaises(mox.UnexpectedMethodCallError, call)
+
+    def testMockIter_ExpectedGetItem_Success(self):
+        """Test that __iter__() gets mocked in Dummy using getitem."""
+        dummy = mox.MockObject(SubscribtableNonIterableClass)
+        dummy[0].AndReturn('a')
+        dummy[1].AndReturn('b')
+        dummy[2].AndRaise(IndexError)
+
+        dummy._Replay()
+        self.assertEqual(['a', 'b'], [x for x in dummy])
+        dummy._Verify()
+
+    def testMockIter_ExpectedNoGetItem_NoSuccess(self):
+        """Test that __iter__() gets mocked in Dummy using getitem."""
+        dummy = mox.MockObject(SubscribtableNonIterableClass)
+        # NOT doing dummy[index]
+
+        dummy._Replay()
+        function = lambda: [x for x in dummy]
+        self.assertRaises(mox.UnexpectedMethodCallError, function)
+
+    def testMockGetIter_WithSubClassOfNewStyleClass(self):
+        class NewStyleTestClass(object):
+            def __iter__(self):
+                return iter([1, 2, 3])
+
+        class TestSubClass(NewStyleTestClass):
+            pass
+
+        dummy = mox.MockObject(TestSubClass)
+        iter(dummy).AndReturn(iter(['a', 'b']))
+        dummy._Replay()
+        self.assertEqual(['a', 'b'], [x for x in dummy])
+        dummy._Verify()
+
+    def testInstantiationWithAdditionalAttributes(self):
+        mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"})
+        self.assertEqual(mock_object.attr1, "value")
+
+    def testCantOverrideMethodsWithAttributes(self):
+        self.assertRaises(ValueError, mox.MockObject, TestClass,
+                          attrs={"ValidCall": "value"})
+
+    def testCantMockNonPublicAttributes(self):
+        self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
+                          attrs={"_protected": "value"})
+        self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
+                          attrs={"__private": "value"})
+
+
+class MoxTest(testtools.TestCase):
+    """Verify Mox works correctly."""
+
+    def setUp(self):
+        super(MoxTest, self).setUp()
+        self.mox = mox.Mox()
+
+    def testCreateObject(self):
+        """Mox should create a mock object."""
+        self.mox.CreateMock(TestClass)
+
+    def testVerifyObjectWithCompleteReplay(self):
+        """Mox should replay and verify all objects it created."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall()
+        mock_obj.ValidCallWithArgs(mox.IsA(TestClass))
+        self.mox.ReplayAll()
+        mock_obj.ValidCall()
+        mock_obj.ValidCallWithArgs(TestClass("some_value"))
+        self.mox.VerifyAll()
+
+    def testVerifyObjectWithIncompleteReplay(self):
+        """Mox should raise an exception if a mock didn't replay completely."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall()
+        self.mox.ReplayAll()
+        # ValidCall() is never made
+        self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll)
+
+    def testEntireWorkflow(self):
+        """Test the whole work flow."""
+        mock_obj = self.mox.CreateMock(TestClass)
+        mock_obj.ValidCall().AndReturn("yes")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj.ValidCall()
+        self.assertEqual("yes", ret_val)
+        self.mox.VerifyAll()
+
+    def testSignatureMatchingWithComparatorAsFirstArg(self):
+        """Test that the first argument can be a comparator."""
+
+        def VerifyLen(val):
+            """This will raise an exception when not given a list.
+
+            This exception will be raised when trying to infer/validate the
+            method signature.
+            """
+            return len(val) != 1
+
+        mock_obj = self.mox.CreateMock(TestClass)
+        # This intentionally does not name the 'nine' param so it triggers
+        # deeper inspection.
+        mock_obj.MethodWithArgs(mox.Func(VerifyLen), mox.IgnoreArg(), None)
+        self.mox.ReplayAll()
+
+        mock_obj.MethodWithArgs([1, 2], "foo", None)
+
+        self.mox.VerifyAll()
+
+    def testCallableObject(self):
+        """Test recording calls to a callable object works."""
+        mock_obj = self.mox.CreateMock(CallableClass)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj("foo")
+        self.assertEqual("qux", ret_val)
+        self.mox.VerifyAll()
+
+    def testInheritedCallableObject(self):
+        """Recording calls to an object inheriting from a callable object."""
+        mock_obj = self.mox.CreateMock(InheritsFromCallable)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        ret_val = mock_obj("foo")
+        self.assertEqual("qux", ret_val)
+        self.mox.VerifyAll()
+
+    def testCallOnNonCallableObject(self):
+        """Test that you cannot call a non-callable object."""
+        mock_obj = self.mox.CreateMock("string is not callable")
+        self.assertRaises(TypeError, mock_obj)
+
+    def testCallableObjectWithBadCall(self):
+        """Test verifying calls to a callable object works."""
+        mock_obj = self.mox.CreateMock(CallableClass)
+        mock_obj("foo").AndReturn("qux")
+        self.mox.ReplayAll()
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ")
+
+    def testCallableObjectVerifiesSignature(self):
+        mock_obj = self.mox.CreateMock(CallableClass)
+        # Too many arguments
+        self.assertRaises(AttributeError, mock_obj, "foo", "bar")
+
+    def testUnorderedGroup(self):
+        """Test that using one unordered group works."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+
+        self.mox.VerifyAll()
+
+    def testUnorderedGroupsInline(self):
+        """Unordered groups should work in the context of ordered calls."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+    def testMultipleUnorderdGroups(self):
+        """Multiple unoreded groups should work."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Foo().InAnyOrder('group2')
+        mock_obj.Bar().InAnyOrder('group2')
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        mock_obj.Method(1)
+        mock_obj.Bar()
+        mock_obj.Foo()
+
+        self.mox.VerifyAll()
+
+    def testMultipleUnorderdGroupsOutOfOrder(self):
+        """Multiple unordered groups should maintain external order"""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).InAnyOrder()
+        mock_obj.Method(2).InAnyOrder()
+        mock_obj.Foo().InAnyOrder('group2')
+        mock_obj.Bar().InAnyOrder('group2')
+        self.mox.ReplayAll()
+
+        mock_obj.Method(2)
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar)
+
+    def testUnorderedGroupWithReturnValue(self):
+        """Unordered groups should work with return values."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).InAnyOrder().AndReturn(9)
+        mock_obj.Method(2).InAnyOrder().AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_two = mock_obj.Method(2)
+        actual_one = mock_obj.Method(1)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(10, actual_two)
+
+        self.mox.VerifyAll()
+
+    def testUnorderedGroupWithComparator(self):
+        """Unordered groups should work with comparators."""
+
+        def VerifyOne(cmd):
+            if not isinstance(cmd, str):
+                self.fail('Unexpected type passed to comparator: ' + str(cmd))
+            return cmd == 'test'
+
+        def VerifyTwo(cmd):
+            return True
+
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\
+            AndReturn('yes test')
+        mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\
+            AndReturn('anything')
+
+        self.mox.ReplayAll()
+
+        mock_obj.Foo(['test'], 'anything', bar=1)
+        mock_obj.Foo(['test'], 'test', bar=1)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimes(self):
+        """Test if MultipleTimesGroup works."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).AndReturn(10)
+        mock_obj.Method(3).MultipleTimes().AndReturn(42)
+        self.mox.ReplayAll()
+
+        actual_one = mock_obj.Method(1)
+        second_one = mock_obj.Method(1)    # This tests MultipleTimes.
+        actual_two = mock_obj.Method(2)
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Method(3)
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(9, actual_one)
+        # Repeated calls should return same number.
+        self.assertEqual(9, second_one)
+        self.assertEqual(10, actual_two)
+        self.assertEqual(42, actual_three)
+
+    def testMultipleTimesUsingIsAParameter(self):
+        """Test if MultipleTimesGroup works with a IsA parameter."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_one = mock_obj.Method("1")
+        second_one = mock_obj.Method("2")    # This tests MultipleTimes.
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(9, actual_one)
+        # Repeated calls should return same number.
+        self.assertEqual(9, second_one)
+
+    def testMutlipleTimesUsingFunc(self):
+        """Test that the Func is not evaluated more times than necessary.
+
+        If a Func() has side effects, it can cause a passing test to fail.
+        """
+
+        self.counter = 0
+
+        def MyFunc(actual_str):
+            """Increment the counter if actual_str == 'foo'."""
+            if actual_str == 'foo':
+                self.counter += 1
+            return True
+
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(mox.Func(MyFunc)).MultipleTimes()
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method('foo')
+        mock_obj.Method('foo')
+        mock_obj.Method('not-foo')
+        mock_obj.Close()
+
+        self.mox.VerifyAll()
+
+        self.assertEqual(2, self.counter)
+
+    def testMultipleTimesThreeMethods(self):
+        """Test if MultipleTimesGroup works with three or more methods."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).MultipleTimes().AndReturn(8)
+        mock_obj.Method(3).MultipleTimes().AndReturn(7)
+        mock_obj.Method(4).AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(1)
+        actual_two = mock_obj.Method(2)
+        mock_obj.Method(3)
+        actual_one = mock_obj.Method(1)
+        actual_four = mock_obj.Method(4)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(8, actual_two)
+        self.assertEqual(7, actual_three)
+        self.assertEqual(10, actual_four)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimesMissingOne(self):
+        """Test if MultipleTimesGroup fails if one method is missing."""
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(2).MultipleTimes().AndReturn(8)
+        mock_obj.Method(3).MultipleTimes().AndReturn(7)
+        mock_obj.Method(4).AndReturn(10)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(3)
+        mock_obj.Method(2)
+        mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Method(2)
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4)
+
+    def testMultipleTimesTwoGroups(self):
+        """Test if MultipleTimesGroup works with a group after a
+        MultipleTimesGroup.
+        """
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        actual_one = mock_obj.Method(1)
+        mock_obj.Method(1)
+        actual_three = mock_obj.Method(3)
+        mock_obj.Method(3)
+        mock_obj.Close()
+
+        self.assertEqual(9, actual_one)
+        self.assertEqual(42, actual_three)
+
+        self.mox.VerifyAll()
+
+    def testMultipleTimesTwoGroupsFailure(self):
+        """Test if MultipleTimesGroup fails with a group after a
+        MultipleTimesGroup.
+        """
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.Open()
+        mock_obj.Method(1).MultipleTimes().AndReturn(9)
+        mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
+        mock_obj.Close()
+        self.mox.ReplayAll()
+
+        mock_obj.Open()
+        mock_obj.Method(1)
+        mock_obj.Method(1)
+        mock_obj.Method(3)
+
+        self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1)
+
+    def testWithSideEffects(self):
+        """Test side effect operations actually modify their target objects."""
+        def modifier(mutable_list):
+            mutable_list[0] = 'mutated'
+        mock_obj = self.mox.CreateMockAnything()
+        mock_obj.ConfigureInOutParameter(
+            ['original']).WithSideEffects(modifier)
+        mock_obj.WorkWithParameter(['mutated'])
+        self.mox.ReplayAll()
+
+        local_list = ['original']
+        mock_obj.ConfigureInOutParameter(local_list)
+        mock_obj.WorkWithParameter(local_list)
+
+        self.mox.VerifyAll()
+
+    def testWithSideEffectsException(self):
+        """Test side effect operations actually modify their target objects."""
+        class TestException(Exception):
+            pass
+
+        def modifier(mutable_list):
+            mutable_list[0] = 'mutated'
+        mock_obj = self.mox.CreateMockAnything()
+        method = mock_obj.ConfigureInOutParameter(['original'])
+        method.WithSideEffects(modifier).AndRaise(TestException('exception'))
+        mock_obj.WorkWithParameter(['mutated'])
+        self.mox.ReplayAll()
+
+        local_list = ['original']
+        self.assertRaises(TestException,
+                          mock_obj.ConfigureInOutParameter,
+                          local_list)
+        mock_obj.WorkWithParameter(local_list)
+
+        self.mox.VerifyAll()
+
+    def testStubOutMethod(self):
+        """Test that a method is replaced with a MockObject."""
+        test_obj = TestClass()
+        method_type = type(test_obj.OtherValidCall)
+        # Replace OtherValidCall with a mock.
+        self.mox.StubOutWithMock(test_obj, 'OtherValidCall')
+        self.assertTrue(isinstance(test_obj.OtherValidCall, mox.MockObject))
+        self.assertFalse(type(test_obj.OtherValidCall) is method_type)
+
+        test_obj.OtherValidCall().AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = test_obj.OtherValidCall()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+        self.assertTrue(type(test_obj.OtherValidCall) is method_type)
+
+    def testStubOutMethod_Unbound_Comparator(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(mox.IgnoreArg()).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = TestClass.OtherValidCall(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Unbound_Subclass_Comparator(self):
+        self.mox.StubOutWithMock(
+            mox_helper.TestClassFromAnotherModule, 'Value')
+        mox_helper.TestClassFromAnotherModule.Value(
+            mox.IsA(mox_helper.ChildClassFromAnotherModule)).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        instance = mox_helper.ChildClassFromAnotherModule()
+        actual = mox_helper.TestClassFromAnotherModule.Value(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOuMethod_Unbound_WithOptionalParams(self):
+        self.mox = mox.Mox()
+        self.mox.StubOutWithMock(TestClass, 'OptionalArgs')
+        TestClass.OptionalArgs(mox.IgnoreArg(), foo=2)
+        self.mox.ReplayAll()
+
+        t = TestClass()
+        TestClass.OptionalArgs(t, foo=2)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_ActualInstance(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(instance).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = TestClass.OtherValidCall(instance)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Unbound_DifferentInstance(self):
+        instance = TestClass()
+        self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
+
+        TestClass.OtherValidCall(instance).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        # This should fail, since the instances are different
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          TestClass.OtherValidCall, "wrong self")
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_NamedUsingPositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_NamedUsingPositional_SomePositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Unbound_SpecialArgs(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
+        instance = mox_helper.ExampleClass()
+        mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
+        self.mox.ReplayAll()
+
+        mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_SimpleTest(self):
+        t = self.mox.CreateMock(TestClass)
+
+        t.MethodWithArgs(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('foo')
+        self.mox.ReplayAll()
+
+        actual = t.MethodWithArgs(None, None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('foo', actual)
+
+    def testStubOutMethod_Bound_NamedUsingPositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
+        instance = mox_helper.ExampleClass()
+        instance.NamedParams('foo', baz=None)
+        self.mox.ReplayAll()
+
+        instance.NamedParams('foo', baz=None)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_NamedUsingPositional_SomePositional(self):
+        """Check positional parameters can be matched to keyword arguments."""
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
+        instance = mox_helper.ExampleClass()
+        instance.TestMethod(instance, 'one', 'two', 'nine')
+        self.mox.ReplayAll()
+
+        instance.TestMethod(instance, 'one', 'two', 'nine')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Bound_SpecialArgs(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
+        instance = mox_helper.ExampleClass()
+        instance.SpecialArgs(instance, 'foo', None, bar='bar')
+        self.mox.ReplayAll()
+
+        instance.SpecialArgs(instance, 'foo', None, bar='bar')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOutMethod_Func_PropgatesExceptions(self):
+        """Errors in Func comparator should propagate to the calling method."""
+        class TestException(Exception):
+            pass
+
+        def raiseExceptionOnNotOne(value):
+            if value == 1:
+                return True
+            else:
+                raise TestException
+
+        test_obj = TestClass()
+        self.mox.StubOutWithMock(test_obj, 'MethodWithArgs')
+        test_obj.MethodWithArgs(
+            mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
+        test_obj.MethodWithArgs(
+            mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
+        self.mox.ReplayAll()
+
+        self.assertEqual(test_obj.MethodWithArgs('ignored', 1), 1)
+        self.assertRaises(TestException,
+                          test_obj.MethodWithArgs, 'ignored', 2)
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    def testStubOut_SignatureMatching_init_(self):
+        self.mox.StubOutWithMock(mox_helper.ExampleClass, '__init__')
+        mox_helper.ExampleClass.__init__(mox.IgnoreArg())
+        self.mox.ReplayAll()
+
+        # Create an instance of a child class, which calls the parent
+        # __init__
+        mox_helper.ChildExampleClass()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+    # FIXME(dhellmann): Skip this test until someone can debug why it
+    # fails on python 3.4.
+
+    @testtools.skipIf(six.PY3, "This test needs to be fixed for python 3")
+    def testStubOutClass_OldStyle(self):
+        """Test a mocked class whose __init__ returns a Mock."""
+        self.mox.StubOutWithMock(mox_helper, 'TestClassFromAnotherModule')
+        self.assertTrue(isinstance(mox_helper.TestClassFromAnotherModule,
+                                   mox.MockObject))
+
+        mock_instance = self.mox.CreateMock(
+            mox_helper.TestClassFromAnotherModule)
+        mox_helper.TestClassFromAnotherModule().AndReturn(mock_instance)
+        mock_instance.Value().AndReturn('mock instance')
+
+        self.mox.ReplayAll()
+
+        a_mock = mox_helper.TestClassFromAnotherModule()
+        actual = a_mock.Value()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertEqual('mock instance', actual)
+
+    def testStubOutClass(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        # Instance one
+        mock_one = mox_helper.CallableClass(1, 2)
+        mock_one.Value().AndReturn('mock')
+
+        # Instance two
+        mock_two = mox_helper.CallableClass(8, 9)
+        mock_two('one').AndReturn('called mock')
+
+        self.mox.ReplayAll()
+
+        one = mox_helper.CallableClass(1, 2)
+        actual_one = one.Value()
+
+        two = mox_helper.CallableClass(8, 9)
+        actual_two = two('one')
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+
+        # Verify the correct mocks were returned
+        self.assertEqual(mock_one, one)
+        self.assertEqual(mock_two, two)
+
+        # Verify
+        self.assertEqual('mock', actual_one)
+        self.assertEqual('called mock', actual_two)
+
+    def testStubOutClass_NotAClass(self):
+        self.assertRaises(TypeError, self.mox.StubOutClassWithMocks,
+                          mox_helper, 'MyTestFunction')
+
+    def testStubOutClassNotEnoughCreated(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+        mox_helper.CallableClass(8, 9)
+
+        self.mox.ReplayAll()
+        mox_helper.CallableClass(1, 2)
+
+        self.assertRaises(mox.ExpectedMockCreationError, self.mox.VerifyAll)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassWrongSignature(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        self.assertRaises(AttributeError, mox_helper.CallableClass)
+
+        self.mox.UnsetStubs()
+
+    def testStubOutClassWrongParameters(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+
+        self.mox.ReplayAll()
+
+        self.assertRaises(mox.UnexpectedMethodCallError,
+                          mox_helper.CallableClass, 8, 9)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassTooManyCreated(self):
+        self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
+
+        mox_helper.CallableClass(1, 2)
+
+        self.mox.ReplayAll()
+        mox_helper.CallableClass(1, 2)
+        self.assertRaises(mox.UnexpectedMockCreationError,
+                          mox_helper.CallableClass, 8, 9)
+
+        self.mox.UnsetStubs()
+
+    def testWarnsUserIfMockingMock(self):
+        """Test that user is warned if they try to stub out a MockAnything."""
+        self.mox.StubOutWithMock(TestClass, 'MyStaticMethod')
+        self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass,
+                          'MyStaticMethod')
+
+    def testStubOutFirstClassMethodVerifiesSignature(self):
+        self.mox.StubOutWithMock(mox_helper, 'MyTestFunction')
+
+        # Wrong number of arguments
+        self.assertRaises(AttributeError, mox_helper.MyTestFunction, 1)
+        self.mox.UnsetStubs()
+
+    def _testMethodSignatureVerification(self, stubClass):
+        # If stubClass is true, the test is run against an a stubbed out class,
+        # else the test is run against a stubbed out instance.
+        if stubClass:
+            self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
+            obj = mox_helper.ExampleClass()
+        else:
+            obj = mox_helper.ExampleClass()
+            self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
+        self.assertRaises(AttributeError, obj.TestMethod)
+        self.assertRaises(AttributeError, obj.TestMethod, 1)
+        self.assertRaises(AttributeError, obj.TestMethod, nine=2)
+        obj.TestMethod(1, 2)
+        obj.TestMethod(1, 2, 3)
+        obj.TestMethod(1, 2, nine=3)
+        self.assertRaises(AttributeError, obj.TestMethod, 1, 2, 3, 4)
+        self.mox.UnsetStubs()
+
+    def testStubOutClassMethodVerifiesSignature(self):
+        self._testMethodSignatureVerification(stubClass=True)
+
+    def testStubOutObjectMethodVerifiesSignature(self):
+        self._testMethodSignatureVerification(stubClass=False)
+
+    def testStubOutObject(self):
+        """Test than object is replaced with a Mock."""
+
+        class Foo(object):
+            def __init__(self):
+                self.obj = TestClass()
+
+        foo = Foo()
+        self.mox.StubOutWithMock(foo, "obj")
+        self.assertTrue(isinstance(foo.obj, mox.MockObject))
+        foo.obj.ValidCall()
+        self.mox.ReplayAll()
+
+        foo.obj.ValidCall()
+
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()
+        self.assertFalse(isinstance(foo.obj, mox.MockObject))
+
+    def testForgotReplayHelpfulMessage(self):
+        """If there is an AttributeError on a MockMethod, give helpful msg."""
+        foo = self.mox.CreateMockAnything()
+        bar = self.mox.CreateMockAnything()
+        foo.GetBar().AndReturn(bar)
+        bar.ShowMeTheMoney()
+        # Forgot to replay!
+        try:
+            foo.GetBar().ShowMeTheMoney()
+        except AttributeError as e:
+            self.assertEqual(
+                'MockMethod has no attribute "ShowMeTheMoney". '
+                'Did you remember to put your mocks in replay mode?', str(e))
+
+
+class ReplayTest(testtools.TestCase):
+    """Verify Replay works properly."""
+
+    def testReplay(self):
+        """Replay should put objects into replay mode."""
+        mock_obj = mox.MockObject(TestClass)
+        self.assertFalse(mock_obj._replay_mode)
+        mox.Replay(mock_obj)
+        self.assertTrue(mock_obj._replay_mode)
+
+
+class MoxTestBaseTest(testtools.TestCase):
+    """Verify that all tests in class derived from MoxTestBase are wrapped."""
+
+    def setUp(self):
+        super(MoxTestBaseTest, self).setUp()
+        self.mox = mox.Mox()
+        self.addCleanup(self.mox.UnsetStubs)
+        self.test_mox = mox.Mox()
+        self.addCleanup(self.test_mox.UnsetStubs)
+        self.test_stubs = mox.stubout.StubOutForTesting()
+        self.addCleanup(self.test_stubs.UnsetAll)
+        self.addCleanup(self.test_stubs.SmartUnsetAll)
+        self.result = testtools.TestResult()
+
+    def _setUpTestClass(self):
+        """Replacement for setUp in the test class instance.
+
+        Assigns a mox.Mox instance as the mox attribute of the test instance.
+        Replacement Mox instance is under our control before setUp is called
+        in the test class instance.
+        """
+        self.test.mox = self.test_mox
+        self.test.stubs = self.test_stubs
+
+    def _CreateTest(self, test_name):
+        """Create a test from our example mox class.
+
+        The created test instance is assigned to this instances test attribute.
+        """
+        self.test = mox_helper.ExampleMoxTest(test_name)
+        self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass)
+
+    def _VerifySuccess(self):
+        """Run the checks to confirm test method completed successfully."""
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        self.test_mox.UnsetStubs()
+        self.test_mox.VerifyAll()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+        self.mox.UnsetStubs()    # Needed to call the real VerifyAll() below.
+        self.test_mox.VerifyAll()
+
+    def testSuccess(self):
+        """Successful test method execution test."""
+        self._CreateTest('testSuccess')
+        self._VerifySuccess()
+
+    def testSuccessNoMocks(self):
+        """testSuccess() unsets all the mocks. Vverify they've been unset."""
+        self._CreateTest('testSuccess')
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testStubs(self):
+        """Test that "self.stubs" is provided as is useful."""
+        self._CreateTest('testHasStubs')
+        self._VerifySuccess()
+
+    def testStubsNoMocks(self):
+        """Let testHasStubs() unset the stubs by itself."""
+        self._CreateTest('testHasStubs')
+        self.test.run(result=self.result)
+        self.assertTrue(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testExpectedNotCalled(self):
+        """Stubbed out method is not called."""
+        self._CreateTest('testExpectedNotCalled')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Don't stub out VerifyAll - that's what causes the test to fail
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testExpectedNotCalledNoMocks(self):
+        """Let testExpectedNotCalled() unset all the mocks by itself."""
+        self._CreateTest('testExpectedNotCalled')
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
+
+    def testUnexpectedCall(self):
+        """Stubbed out method is called with unexpected arguments."""
+        self._CreateTest('testUnexpectedCall')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Ensure no calls are made to VerifyAll()
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testFailure(self):
+        """Failing assertion in test method."""
+        self._CreateTest('testFailure')
+        self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
+        self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
+        self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
+        # Ensure no calls are made to VerifyAll()
+        self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
+        self.test_mox.UnsetStubs()
+        self.test_stubs.UnsetAll()
+        self.test_stubs.SmartUnsetAll()
+        self.mox.ReplayAll()
+        self.test.run(result=self.result)
+        self.assertFalse(self.result.wasSuccessful())
+        self.mox.VerifyAll()
+
+    def testMixin(self):
+        """Run test from mix-in test class, ensure it passes."""
+        self._CreateTest('testStat')
+        self._VerifySuccess()
+
+    def testMixinAgain(self):
+        """Run same test as above but from the current test class.
+
+        Ensures metaclass properly wrapped test methods from all base classes.
+        If unsetting of stubs doesn't happen, this will fail.
+        """
+        self._CreateTest('testStatOther')
+        self._VerifySuccess()
+
+
+class VerifyTest(testtools.TestCase):
+    """Verify Verify works properly."""
+
+    def testVerify(self):
+        """Verify should be called for all objects.
+
+        Should throw an exception because the expected behavior did not occur.
+        """
+        mock_obj = mox.MockObject(TestClass)
+        mock_obj.ValidCall()
+        mock_obj._Replay()
+        self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj)
+
+
+class ResetTest(testtools.TestCase):
+    """Verify Reset works properly."""
+
+    def testReset(self):
+        """Should empty all queues and put mocks in record mode."""
+        mock_obj = mox.MockObject(TestClass)
+        mock_obj.ValidCall()
+        self.assertFalse(mock_obj._replay_mode)
+        mock_obj._Replay()
+        self.assertTrue(mock_obj._replay_mode)
+        self.assertEqual(1, len(mock_obj._expected_calls_queue))
+
+        mox.Reset(mock_obj)
+        self.assertFalse(mock_obj._replay_mode)
+        self.assertEqual(0, len(mock_obj._expected_calls_queue))
+
+
+class MyTestCase(testtools.TestCase):
+    """Simulate the use of a fake wrapper around Python's unittest library."""
+
+    def setUp(self):
+        super(MyTestCase, self).setUp()
+        self.critical_variable = 42
+        self.another_critical_variable = 42
+
+    def testMethodOverride(self):
+        """Should be properly overriden in a derived class."""
+        self.assertEqual(42, self.another_critical_variable)
+        self.another_critical_variable += 1
+
+
+class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase):
+    """Test that multiple inheritance can be used with MoxTestBase."""
+
+    def setUp(self):
+        super(MoxTestBaseMultipleInheritanceTest, self).setUp()
+        self.another_critical_variable = 99
+
+    def testMultipleInheritance(self):
+        """Should be able to access members created by all parent setUp()."""
+        self.assertTrue(isinstance(self.mox, mox.Mox))
+        self.assertEqual(42, self.critical_variable)
+
+    def testMethodOverride(self):
+        """Should run before MyTestCase.testMethodOverride."""
+        self.assertEqual(99, self.another_critical_variable)
+        self.another_critical_variable = 42
+        super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride()
+        self.assertEqual(43, self.another_critical_variable)
+
+
+class MoxTestDontMockProperties(MoxTestBaseTest):
+        def testPropertiesArentMocked(self):
+                mock_class = self.mox.CreateMock(ClassWithProperties)
+                self.assertRaises(mox.UnknownMethodCallError,
+                                  lambda: mock_class.prop_attr)
+
+
+class TestClass(object):
+    """This class is used only for testing the mock framework."""
+
+    SOME_CLASS_VAR = "test_value"
+    _PROTECTED_CLASS_VAR = "protected value"
+
+    def __init__(self, ivar=None):
+        self.__ivar = ivar
+
+    def __eq__(self, rhs):
+        return self.__ivar == rhs
+
+    def __ne__(self, rhs):
+        return not self.__eq__(rhs)
+
+    def ValidCall(self):
+        pass
+
+    def MethodWithArgs(self, one, two, nine=None):
+        pass
+
+    def OtherValidCall(self):
+        pass
+
+    def OptionalArgs(self, foo='boom'):
+        pass
+
+    def ValidCallWithArgs(self, *args, **kwargs):
+        pass
+
+    @classmethod
+    def MyClassMethod(cls):
+        pass
+
+    @staticmethod
+    def MyStaticMethod():
+        pass
+
+    def _ProtectedCall(self):
+        pass
+
+    def __PrivateCall(self):
+        pass
+
+    def __DoNotMock(self):
+        pass
+
+    def __getitem__(self, key):
+        """Return the value for key."""
+        return self.d[key]
+
+    def __setitem__(self, key, value):
+        """Set the value for key to value."""
+        self.d[key] = value
+
+    def __contains__(self, key):
+        """Returns True if d contains the key."""
+        return key in self.d
+
+    def __iter__(self):
+        pass
+
+
+class ChildClass(TestClass):
+    """This inherits from TestClass."""
+    def __init__(self):
+        TestClass.__init__(self)
+
+    def ChildValidCall(self):
+        pass
+
+
+class CallableClass(object):
+    """This class is callable, and that should be mockable!"""
+
+    def __init__(self):
+        pass
+
+    def __call__(self, param):
+        return param
+
+
+class ClassWithProperties(object):
+        def setter_attr(self, value):
+                pass
+
+        def getter_attr(self):
+                pass
+
+        prop_attr = property(getter_attr, setter_attr)
+
+
+class SubscribtableNonIterableClass(object):
+    def __getitem__(self, index):
+        raise IndexError
+
+
+class InheritsFromCallable(CallableClass):
+    """This class should be mockable; it inherits from a callable class."""
+
+    pass
+
+
+if __name__ == '__main__':
+    testtools.main()
diff --git a/catapult/third_party/mox3/mox3/tests/test_stubout.py b/catapult/third_party/mox3/mox3/tests/test_stubout.py
new file mode 100644
index 0000000..4a04170
--- /dev/null
+++ b/catapult/third_party/mox3/mox3/tests/test_stubout.py
@@ -0,0 +1,49 @@
+# Unit tests for stubout.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a fork of the pymox library intended to work with Python 3.
+# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
+
+import fixtures
+import testtools
+
+from mox3 import mox
+from mox3 import stubout
+from mox3.tests import stubout_helper
+
+
+class StubOutForTestingTest(testtools.TestCase):
+    def setUp(self):
+        super(StubOutForTestingTest, self).setUp()
+        self.mox = mox.Mox()
+        self.useFixture(fixtures.MonkeyPatch(
+            'mox3.tests.stubout_helper.SampleFunction',
+            stubout_helper.SampleFunction))
+
+    def testSmartSetOnModule(self):
+        mock_function = self.mox.CreateMockAnything()
+        mock_function()
+
+        stubber = stubout.StubOutForTesting()
+        stubber.SmartSet(stubout_helper, 'SampleFunction', mock_function)
+
+        self.mox.ReplayAll()
+
+        stubout_helper.SampleFunction()
+
+        self.mox.VerifyAll()
+
+
+if __name__ == '__main__':
+    testtools.main()
diff --git a/catapult/third_party/mox3/requirements.txt b/catapult/third_party/mox3/requirements.txt
new file mode 100644
index 0000000..d52427f
--- /dev/null
+++ b/catapult/third_party/mox3/requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr<2.0,>=1.6
+
+fixtures>=1.3.1
diff --git a/catapult/third_party/mox3/setup.cfg b/catapult/third_party/mox3/setup.cfg
new file mode 100644
index 0000000..4a3de06
--- /dev/null
+++ b/catapult/third_party/mox3/setup.cfg
@@ -0,0 +1,27 @@
+[metadata]
+name = mox3
+summary = Mock object framework for Python
+description-file =
+    README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifiers =
+    Environment :: OpenStack
+    Programming Language :: Python
+    License :: OSI Approved :: Apache Software License
+    Programming Language :: Python :: 2.6
+    Programming Language :: Python :: 2.7
+    Programming Language :: Python :: 3
+    Operating System :: OS Independent
+    Development Status :: 4 - Beta
+    Intended Audience :: Developers
+    Topic :: Software Development :: Testing
+
+[files]
+packages =
+    mox3
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
diff --git a/catapult/third_party/mox3/setup.py b/catapult/third_party/mox3/setup.py
new file mode 100644
index 0000000..d8080d0
--- /dev/null
+++ b/catapult/third_party/mox3/setup.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
+setuptools.setup(
+    setup_requires=['pbr>=1.3'],
+    pbr=True)
diff --git a/catapult/third_party/mox3/test-requirements.txt b/catapult/third_party/mox3/test-requirements.txt
new file mode 100644
index 0000000..22f6480
--- /dev/null
+++ b/catapult/third_party/mox3/test-requirements.txt
@@ -0,0 +1,22 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+# this file lists dependencies required for the testing of heat
+
+# Install bounded pep8/pyflakes first, then let flake8 install
+pep8==1.5.7
+pyflakes==0.8.1
+flake8<=2.4.1,>=2.2.4
+
+coverage>=3.6
+discover
+python-subunit>=0.0.18
+testrepository>=0.0.18
+testtools>=1.4.0
+
+six>=1.9.0
+
+# this is required for the docs build jobs
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
+oslosphinx>=2.5.0 # Apache-2.0
+
diff --git a/catapult/third_party/mox3/tox.ini b/catapult/third_party/mox3/tox.ini
new file mode 100644
index 0000000..eea97fc
--- /dev/null
+++ b/catapult/third_party/mox3/tox.ini
@@ -0,0 +1,28 @@
+[tox]
+envlist = py34,py27,pep8
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands =
+  python setup.py testr --slowest --testr-args='{posargs}'
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[testenv:pep8]
+commands = flake8
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:cover]
+setenv = VIRTUAL_ENV={envdir}
+commands =
+  python setup.py testr --coverage
+
+[flake8]
+show-source = true
+builtins = _
+exclude=.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg
diff --git a/catapult/third_party/py_vulcanize/bin/run_py_tests b/catapult/third_party/py_vulcanize/bin/run_py_tests
index b9b2f33..2584fcd 100755
--- a/catapult/third_party/py_vulcanize/bin/run_py_tests
+++ b/catapult/third_party/py_vulcanize/bin/run_py_tests
@@ -24,16 +24,6 @@
   else:
     install.InstallHooks()
 
-  # This flag is added only temporarily so that the tests can
-  # be re-enabled on the waterfall and fixed using try jobs.
-  # See https://github.com/catapult-project/catapult/issues/1584
-  if '--really-run' in sys.argv:
-    sys.argv.remove('--really-run')
-  else:
-    print 'Skipping test!'
-    sys.exit(0)
-
   from catapult_build import run_with_typ
   sys.exit(run_with_typ.Run(
       os.path.join(_CATAPULT, 'third_party', 'py_vulcanize')))
-
diff --git a/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py b/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py
index 99b21f1..6fbe31d 100644
--- a/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py
+++ b/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py
@@ -25,6 +25,9 @@
   html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-python')
   _AddToPathIfNeeded(html5lib_path)
 
+  six_path = os.path.join(catapult_path, 'third_party', 'six')
+  _AddToPathIfNeeded(six_path)
+
 
 _InitBeautifulSoup()
 import bs4
diff --git a/catapult/third_party/pyfakefs/COPYING b/catapult/third_party/pyfakefs/COPYING
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/catapult/third_party/pyfakefs/COPYING
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/catapult/third_party/pyfakefs/README.chromium b/catapult/third_party/pyfakefs/README.chromium
new file mode 100644
index 0000000..90826ad
--- /dev/null
+++ b/catapult/third_party/pyfakefs/README.chromium
@@ -0,0 +1,16 @@
+Name: pyfakefs
+URL: https://pypi.python.org/pypi/pyfakefs
+Version: 2.7
+Date: 2016-01-04
+License: Apache 2.0
+License File: COPYING
+Security Critical: no
+
+Description:
+pyfakefs implements a fake file system that mocks the Python file system
+modules. Using pyfakefs, your tests operate on a fake file system in memory
+without touching the real disk. The software under test requires no modification
+to work with pyfakefs.
+
+Local Modifications:
+Remove everything except pyfakefs/ and COPYING.
diff --git a/catapult/perf_insights/third_party/__init__.py b/catapult/third_party/pyfakefs/pyfakefs/__init__.py
old mode 100644
new mode 100755
similarity index 100%
copy from catapult/perf_insights/third_party/__init__.py
copy to catapult/third_party/pyfakefs/pyfakefs/__init__.py
diff --git a/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem.py b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem.py
new file mode 100644
index 0000000..4026781
--- /dev/null
+++ b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem.py
@@ -0,0 +1,2227 @@
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable-msg=W0612,W0613,C6409
+
+"""A fake filesystem implementation for unit testing.
+
+Includes:
+  FakeFile:  Provides the appearance of a real file.
+  FakeDirectory: Provides the appearance of a real dir.
+  FakeFilesystem:  Provides the appearance of a real directory hierarchy.
+  FakeOsModule:  Uses FakeFilesystem to provide a fake os module replacement.
+  FakePathModule:  Faked os.path module replacement.
+  FakeFileOpen:  Faked file() and open() function replacements.
+
+Usage:
+>>> from pyfakefs import fake_filesystem
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> os_module = fake_filesystem.FakeOsModule(filesystem)
+>>> pathname = '/a/new/dir/new-file'
+
+Create a new file object, creating parent directory objects as needed:
+>>> os_module.path.exists(pathname)
+False
+>>> new_file = filesystem.CreateFile(pathname)
+
+File objects can't be overwritten:
+>>> os_module.path.exists(pathname)
+True
+>>> try:
+...   filesystem.CreateFile(pathname)
+... except IOError as e:
+...   assert e.errno == errno.EEXIST, 'unexpected errno: %d' % e.errno
+...   assert e.strerror == 'File already exists in fake filesystem'
+
+Remove a file object:
+>>> filesystem.RemoveObject(pathname)
+>>> os_module.path.exists(pathname)
+False
+
+Create a new file object at the previous path:
+>>> beatles_file = filesystem.CreateFile(pathname,
+...     contents='Dear Prudence\\nWon\\'t you come out to play?\\n')
+>>> os_module.path.exists(pathname)
+True
+
+Use the FakeFileOpen class to read fake file objects:
+>>> file_module = fake_filesystem.FakeFileOpen(filesystem)
+>>> for line in file_module(pathname):
+...     print line.rstrip()
+...
+Dear Prudence
+Won't you come out to play?
+
+File objects cannot be treated like directory objects:
+>>> os_module.listdir(pathname)  #doctest: +NORMALIZE_WHITESPACE
+Traceback (most recent call last):
+  File "fake_filesystem.py", line 291, in listdir
+    raise OSError(errno.ENOTDIR,
+OSError: [Errno 20] Fake os module: not a directory: '/a/new/dir/new-file'
+
+The FakeOsModule can list fake directory objects:
+>>> os_module.listdir(os_module.path.dirname(pathname))
+['new-file']
+
+The FakeOsModule also supports stat operations:
+>>> import stat
+>>> stat.S_ISREG(os_module.stat(pathname).st_mode)
+True
+>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)
+True
+"""
+
+import errno
+import heapq
+import os
+import stat
+import sys
+import time
+import warnings
+import binascii
+
+try:
+  import cStringIO as io  # pylint: disable-msg=C6204
+except ImportError:
+  import io  # pylint: disable-msg=C6204
+
+__pychecker__ = 'no-reimportself'
+
+__version__ = '2.7'
+
+PERM_READ = 0o400      # Read permission bit.
+PERM_WRITE = 0o200     # Write permission bit.
+PERM_EXE = 0o100       # Write permission bit.
+PERM_DEF = 0o777       # Default permission bits.
+PERM_DEF_FILE = 0o666  # Default permission bits (regular file)
+PERM_ALL = 0o7777      # All permission bits.
+
+_OPEN_MODE_MAP = {
+    # mode name:(file must exist, need read, need write,
+    #            truncate [implies need write], append)
+    'r': (True, True, False, False, False),
+    'w': (False, False, True, True, False),
+    'a': (False, False, True, False, True),
+    'r+': (True, True, True, False, False),
+    'w+': (False, True, True, True, False),
+    'a+': (False, True, True, False, True),
+    }
+
+_MAX_LINK_DEPTH = 20
+
+FAKE_PATH_MODULE_DEPRECATION = ('Do not instantiate a FakePathModule directly; '
+                                'let FakeOsModule instantiate it.  See the '
+                                'FakeOsModule docstring for details.')
+
+
+class Error(Exception):
+  pass
+
+_is_windows = sys.platform.startswith('win')
+_is_cygwin = sys.platform == 'cygwin'
+
+if _is_windows:
+  # On Windows, raise WindowsError instead of OSError if available
+  OSError = WindowsError  # pylint: disable-msg=E0602,W0622
+
+
+class FakeLargeFileIoException(Error):
+  def __init__(self, file_path):
+    Error.__init__(self,
+                   'Read and write operations not supported for '
+                   'fake large file: %s' % file_path)
+
+
+def CopyModule(old):
+  """Recompiles and creates new module object."""
+  saved = sys.modules.pop(old.__name__, None)
+  new = __import__(old.__name__)
+  sys.modules[old.__name__] = saved
+  return new
+
+
+class Hexlified(object):
+  """Wraps binary data in non-binary string"""
+  def __init__(self, contents):
+    self.contents = binascii.hexlify(contents).decode('utf-8')
+
+  def __len__(self):
+    return len(self.contents)//2
+
+  def recover(self, binary):
+    if binary:
+      return binascii.unhexlify(bytearray(self.contents, 'utf-8'))
+    else:
+      return binascii.unhexlify(bytearray(self.contents, 'utf-8')).decode(sys.getdefaultencoding())
+
+
+class FakeFile(object):
+  """Provides the appearance of a real file.
+
+     Attributes currently faked out:
+       st_mode: user-specified, otherwise S_IFREG
+       st_ctime: the time.time() timestamp when the file is created.
+       st_size: the size of the file
+
+     Other attributes needed by os.stat are assigned default value of None
+      these include: st_ino, st_dev, st_nlink, st_uid, st_gid, st_atime,
+      st_mtime
+  """
+
+  def __init__(self, name, st_mode=stat.S_IFREG | PERM_DEF_FILE,
+               contents=None):
+    """init.
+
+    Args:
+      name:  name of the file/directory, without parent path information
+      st_mode:  the stat.S_IF* constant representing the file type (i.e.
+        stat.S_IFREG, stat.SIFDIR)
+      contents:  the contents of the filesystem object; should be a string for
+        regular files, and a list of other FakeFile or FakeDirectory objects
+        for FakeDirectory objects
+    """
+    self.name = name
+    self.st_mode = st_mode
+    self.contents = contents
+    self.epoch = 0
+    self.st_ctime = int(time.time())
+    self.st_atime = self.st_ctime
+    self.st_mtime = self.st_ctime
+    if contents:
+      self.st_size = len(contents)
+    else:
+      self.st_size = 0
+    # Non faked features, write setter methods for fakeing them
+    self.st_ino = None
+    self.st_dev = None
+    self.st_nlink = None
+    self.st_uid = None
+    self.st_gid = None
+
+  def SetLargeFileSize(self, st_size):
+    """Sets the self.st_size attribute and replaces self.content with None.
+
+    Provided specifically to simulate very large files without regards
+    to their content (which wouldn't fit in memory)
+
+    Args:
+      st_size: The desired file size
+
+    Raises:
+      IOError: if the st_size is not a non-negative integer
+    """
+    # the st_size should be an positive integer value
+    if not isinstance(st_size, int) or st_size < 0:
+      raise IOError(errno.ENOSPC,
+                    'Fake file object: can not create non negative integer '
+                    'size=%r fake file' % st_size,
+                    self.name)
+
+    self.st_size = st_size
+    self.contents = None
+
+  def IsLargeFile(self):
+    """Return True if this file was initialized with size but no contents."""
+    return self.contents is None
+
+  def SetContents(self, contents):
+    """Sets the file contents and size.
+
+    Args:
+      contents: string, new content of file.
+    """
+    # Wrap byte arrays into a safe format
+    if sys.version_info >= (3, 0) and isinstance(contents, bytes):
+      contents = Hexlified(contents)
+      
+    self.st_size = len(contents)
+    self.contents = contents
+    self.epoch += 1
+
+  def SetSize(self, st_size):
+    """Resizes file content, padding with nulls if new size exceeds the old.
+
+    Args:
+      st_size: The desired size for the file.
+
+    Raises:
+      IOError: if the st_size arg is not a non-negative integer
+    """
+
+    if not isinstance(st_size, int) or st_size < 0:
+      raise IOError(errno.ENOSPC,
+                    'Fake file object: can not create non negative integer '
+                    'size=%r fake file' % st_size,
+                    self.name)
+
+    current_size = len(self.contents)
+    if st_size < current_size:
+      self.contents = self.contents[:st_size]
+    else:
+      self.contents = '%s%s' % (self.contents, '\0' * (st_size - current_size))
+    self.st_size = len(self.contents)
+    self.epoch += 1
+
+  def SetATime(self, st_atime):
+    """Set the self.st_atime attribute.
+
+    Args:
+      st_atime: The desired atime.
+    """
+    self.st_atime = st_atime
+
+  def SetMTime(self, st_mtime):
+    """Set the self.st_mtime attribute.
+
+    Args:
+      st_mtime: The desired mtime.
+    """
+    self.st_mtime = st_mtime
+
+  def __str__(self):
+    return '%s(%o)' % (self.name, self.st_mode)
+
+  def SetIno(self, st_ino):
+    """Set the self.st_ino attribute.
+
+    Args:
+      st_ino: The desired inode.
+    """
+    self.st_ino = st_ino
+
+
+class FakeDirectory(FakeFile):
+  """Provides the appearance of a real dir."""
+
+  def __init__(self, name, perm_bits=PERM_DEF):
+    """init.
+
+    Args:
+      name:  name of the file/directory, without parent path information
+      perm_bits: permission bits. defaults to 0o777.
+    """
+    FakeFile.__init__(self, name, stat.S_IFDIR | perm_bits, {})
+
+  def AddEntry(self, pathname):
+    """Adds a child FakeFile to this directory.
+
+    Args:
+      pathname:  FakeFile instance to add as a child of this directory
+    """
+    self.contents[pathname.name] = pathname
+
+  def GetEntry(self, pathname_name):
+    """Retrieves the specified child file or directory.
+
+    Args:
+      pathname_name: basename of the child object to retrieve
+    Returns:
+      string, file contents
+    Raises:
+      KeyError: if no child exists by the specified name
+    """
+    return self.contents[pathname_name]
+
+  def RemoveEntry(self, pathname_name):
+    """Removes the specified child file or directory.
+
+    Args:
+      pathname_name: basename of the child object to remove
+
+    Raises:
+      KeyError: if no child exists by the specified name
+    """
+    del self.contents[pathname_name]
+
+  def __str__(self):
+    rc = super(FakeDirectory, self).__str__() + ':\n'
+    for item in self.contents:
+      item_desc = self.contents[item].__str__()
+      for line in item_desc.split('\n'):
+        if line:
+          rc = rc + '  ' + line + '\n'
+    return rc
+
+
+class FakeFilesystem(object):
+  """Provides the appearance of a real directory tree for unit testing."""
+
+  def __init__(self, path_separator=os.path.sep):
+    """init.
+
+    Args:
+      path_separator:  optional substitute for os.path.sep
+    """
+    self.path_separator = path_separator
+    self.root = FakeDirectory(self.path_separator)
+    self.cwd = self.root.name
+    # We can't query the current value without changing it:
+    self.umask = os.umask(0o22)
+    os.umask(self.umask)
+    # A list of open file objects. Their position in the list is their
+    # file descriptor number
+    self.open_files = []
+    # A heap containing all free positions in self.open_files list
+    self.free_fd_heap = []
+
+  def SetIno(self, path, st_ino):
+    """Set the self.st_ino attribute of file at 'path'.
+
+    Args:
+      path: Path to file.
+      st_ino: The desired inode.
+    """
+    self.GetObject(path).SetIno(st_ino)
+
+  def AddOpenFile(self, file_obj):
+    """Adds file_obj to the list of open files on the filesystem.
+
+    The position in the self.open_files array is the file descriptor number
+
+    Args:
+      file_obj:  file object to be added to open files list.
+
+    Returns:
+      File descriptor number for the file object.
+    """
+    if self.free_fd_heap:
+      open_fd = heapq.heappop(self.free_fd_heap)
+      self.open_files[open_fd] = file_obj
+      return open_fd
+
+    self.open_files.append(file_obj)
+    return len(self.open_files) - 1
+
+  def CloseOpenFile(self, file_obj):
+    """Removes file_obj from the list of open files on the filesystem.
+
+    Sets the entry in open_files to None.
+
+    Args:
+      file_obj:  file object to be removed to open files list.
+    """
+    self.open_files[file_obj.filedes] = None
+    heapq.heappush(self.free_fd_heap, file_obj.filedes)
+
+  def GetOpenFile(self, file_des):
+    """Returns an open file.
+
+    Args:
+      file_des:  file descriptor of the open file.
+
+    Raises:
+      OSError: an invalid file descriptor.
+      TypeError: filedes is not an integer.
+
+    Returns:
+      Open file object.
+    """
+    if not isinstance(file_des, int):
+      raise TypeError('an integer is required')
+    if (file_des >= len(self.open_files) or
+        self.open_files[file_des] is None):
+      raise OSError(errno.EBADF, 'Bad file descriptor', file_des)
+    return self.open_files[file_des]
+
+  def CollapsePath(self, path):
+    """Mimics os.path.normpath using the specified path_separator.
+
+    Mimics os.path.normpath using the path_separator that was specified
+    for this FakeFilesystem.  Normalizes the path, but unlike the method
+    NormalizePath, does not make it absolute.  Eliminates dot components
+    (. and ..) and combines repeated path separators (//).  Initial ..
+    components are left in place for relative paths.  If the result is an empty
+    path, '.' is returned instead.  Unlike the real os.path.normpath, this does
+    not replace '/' with '\\' on Windows.
+
+    Args:
+      path:  (str) The path to normalize.
+
+    Returns:
+      (str) A copy of path with empty components and dot components removed.
+    """
+    is_absolute_path = path.startswith(self.path_separator)
+    path_components = path.split(self.path_separator)
+    collapsed_path_components = []
+    for component in path_components:
+      if (not component) or (component == '.'):
+        continue
+      if component == '..':
+        if collapsed_path_components and (
+            collapsed_path_components[-1] != '..'):
+          # Remove an up-reference: directory/..
+          collapsed_path_components.pop()
+          continue
+        elif is_absolute_path:
+          # Ignore leading .. components if starting from the root directory.
+          continue
+      collapsed_path_components.append(component)
+    collapsed_path = self.path_separator.join(collapsed_path_components)
+    if is_absolute_path:
+      collapsed_path = self.path_separator + collapsed_path
+    return collapsed_path or '.'
+
+  def NormalizePath(self, path):
+    """Absolutize and minimalize the given path.
+
+    Forces all relative paths to be absolute, and normalizes the path to
+    eliminate dot and empty components.
+
+    Args:
+      path:  path to normalize
+
+    Returns:
+      The normalized path relative to the current working directory, or the root
+        directory if path is empty.
+    """
+    if not path:
+      path = self.path_separator
+    elif not path.startswith(self.path_separator):
+      # Prefix relative paths with cwd, if cwd is not root.
+      path = self.path_separator.join(
+          (self.cwd != self.root.name and self.cwd or '',
+           path))
+    if path == '.':
+      path = self.cwd
+    return self.CollapsePath(path)
+
+  def SplitPath(self, path):
+    """Mimics os.path.split using the specified path_separator.
+
+    Mimics os.path.split using the path_separator that was specified
+    for this FakeFilesystem.
+
+    Args:
+      path:  (str) The path to split.
+
+    Returns:
+      (str) A duple (pathname, basename) for which pathname does not
+          end with a slash, and basename does not contain a slash.
+    """
+    path_components = path.split(self.path_separator)
+    if not path_components:
+      return ('', '')
+    basename = path_components.pop()
+    if not path_components:
+      return ('', basename)
+    for component in path_components:
+      if component:
+        # The path is not the root; it contains a non-separator component.
+        # Strip all trailing separators.
+        while not path_components[-1]:
+          path_components.pop()
+        return (self.path_separator.join(path_components), basename)
+    # Root path.  Collapse all leading separators.
+    return (self.path_separator, basename)
+
+  def JoinPaths(self, *paths):
+    """Mimics os.path.join using the specified path_separator.
+
+    Mimics os.path.join using the path_separator that was specified
+    for this FakeFilesystem.
+
+    Args:
+      *paths:  (str) Zero or more paths to join.
+
+    Returns:
+      (str) The paths joined by the path separator, starting with the last
+          absolute path in paths.
+    """
+    if len(paths) == 1:
+      return paths[0]
+    joined_path_segments = []
+    for path_segment in paths:
+      if path_segment.startswith(self.path_separator):
+        # An absolute path
+        joined_path_segments = [path_segment]
+      else:
+        if (joined_path_segments and
+            not joined_path_segments[-1].endswith(self.path_separator)):
+          joined_path_segments.append(self.path_separator)
+        if path_segment:
+          joined_path_segments.append(path_segment)
+    return ''.join(joined_path_segments)
+
+  def GetPathComponents(self, path):
+    """Breaks the path into a list of component names.
+
+    Does not include the root directory as a component, as all paths
+    are considered relative to the root directory for the FakeFilesystem.
+    Callers should basically follow this pattern:
+
+      file_path = self.NormalizePath(file_path)
+      path_components = self.GetPathComponents(file_path)
+      current_dir = self.root
+      for component in path_components:
+        if component not in current_dir.contents:
+          raise IOError
+        DoStuffWithComponent(curent_dir, component)
+        current_dir = current_dir.GetEntry(component)
+
+    Args:
+      path:  path to tokenize
+
+    Returns:
+      The list of names split from path
+    """
+    if not path or path == self.root.name:
+      return []
+    path_components = path.split(self.path_separator)
+    assert path_components
+    if not path_components[0]:
+      # This is an absolute path.
+      path_components = path_components[1:]
+    return path_components
+
+  def Exists(self, file_path):
+    """True if a path points to an existing file system object.
+
+    Args:
+      file_path:  path to examine
+
+    Returns:
+      bool(if object exists)
+
+    Raises:
+      TypeError: if file_path is None
+    """
+    if file_path is None:
+      raise TypeError
+    if not file_path:
+      return False
+    try:
+      file_path = self.ResolvePath(file_path)
+    except IOError:
+      return False
+    if file_path == self.root.name:
+      return True
+    path_components = self.GetPathComponents(file_path)
+    current_dir = self.root
+    for component in path_components:
+      if component not in current_dir.contents:
+        return False
+      current_dir = current_dir.contents[component]
+    return True
+
+  def ResolvePath(self, file_path):
+    """Follow a path, resolving symlinks.
+
+    ResolvePath traverses the filesystem along the specified file path,
+    resolving file names and symbolic links until all elements of the path are
+    exhausted, or we reach a file which does not exist.  If all the elements
+    are not consumed, they just get appended to the path resolved so far.
+    This gives us the path which is as resolved as it can be, even if the file
+    does not exist.
+
+    This behavior mimics Unix semantics, and is best shown by example.  Given a
+    file system that looks like this:
+
+          /a/b/
+          /a/b/c -> /a/b2          c is a symlink to /a/b2
+          /a/b2/x
+          /a/c   -> ../d
+          /a/x   -> y
+     Then:
+          /a/b/x      =>  /a/b/x
+          /a/c        =>  /a/d
+          /a/x        =>  /a/y
+          /a/b/c/d/e  =>  /a/b2/d/e
+
+    Args:
+      file_path:  path to examine
+
+    Returns:
+      resolved_path (string) or None
+
+    Raises:
+      TypeError: if file_path is None
+      IOError: if file_path is '' or a part of the path doesn't exist
+    """
+
+    def _ComponentsToPath(component_folders):
+      return '%s%s' % (self.path_separator,
+                       self.path_separator.join(component_folders))
+
+    def _ValidRelativePath(file_path):
+      while file_path and '/..' in file_path:
+        file_path = file_path[:file_path.rfind('/..')]
+        if not self.Exists(self.NormalizePath(file_path)):
+          return False
+      return True
+
+    def _FollowLink(link_path_components, link):
+      """Follow a link w.r.t. a path resolved so far.
+
+      The component is either a real file, which is a no-op, or a symlink.
+      In the case of a symlink, we have to modify the path as built up so far
+        /a/b => ../c   should yield /a/../c (which will normalize to /a/c)
+        /a/b => x      should yield /a/x
+        /a/b => /x/y/z should yield /x/y/z
+      The modified path may land us in a new spot which is itself a
+      link, so we may repeat the process.
+
+      Args:
+        link_path_components: The resolved path built up to the link so far.
+        link: The link object itself.
+
+      Returns:
+        (string) the updated path resolved after following the link.
+
+      Raises:
+        IOError: if there are too many levels of symbolic link
+      """
+      link_path = link.contents
+      # For links to absolute paths, we want to throw out everything in the
+      # path built so far and replace with the link.  For relative links, we
+      # have to append the link to what we have so far,
+      if not link_path.startswith(self.path_separator):
+        # Relative path.  Append remainder of path to what we have processed
+        # so far, excluding the name of the link itself.
+        # /a/b => ../c   should yield /a/../c (which will normalize to /c)
+        # /a/b => d should yield a/d
+        components = link_path_components[:-1]
+        components.append(link_path)
+        link_path = self.path_separator.join(components)
+      # Don't call self.NormalizePath(), as we don't want to prepend self.cwd.
+      return self.CollapsePath(link_path)
+
+    if file_path is None:
+      # file.open(None) raises TypeError, so mimic that.
+      raise TypeError('Expected file system path string, received None')
+    if not file_path or not _ValidRelativePath(file_path):
+      # file.open('') raises IOError, so mimic that, and validate that all
+      # parts of a relative path exist.
+      raise IOError(errno.ENOENT,
+                    'No such file or directory: \'%s\'' % file_path)
+    file_path = self.NormalizePath(file_path)
+    if file_path == self.root.name:
+      return file_path
+
+    current_dir = self.root
+    path_components = self.GetPathComponents(file_path)
+
+    resolved_components = []
+    link_depth = 0
+    while path_components:
+      component = path_components.pop(0)
+      resolved_components.append(component)
+      if component not in current_dir.contents:
+        # The component of the path at this point does not actually exist in
+        # the folder.   We can't resolve the path any more.  It is legal to link
+        # to a file that does not yet exist, so rather than raise an error, we
+        # just append the remaining components to what return path we have built
+        # so far and return that.
+        resolved_components.extend(path_components)
+        break
+      current_dir = current_dir.contents[component]
+
+      # Resolve any possible symlinks in the current path component.
+      if stat.S_ISLNK(current_dir.st_mode):
+        # This link_depth check is not really meant to be an accurate check.
+        # It is just a quick hack to prevent us from looping forever on
+        # cycles.
+        link_depth += 1
+        if link_depth > _MAX_LINK_DEPTH:
+          raise IOError(errno.EMLINK,
+                        'Too many levels of symbolic links: \'%s\'' %
+                        _ComponentsToPath(resolved_components))
+        link_path = _FollowLink(resolved_components, current_dir)
+
+        # Following the link might result in the complete replacement of the
+        # current_dir, so we evaluate the entire resulting path.
+        target_components = self.GetPathComponents(link_path)
+        path_components = target_components + path_components
+        resolved_components = []
+        current_dir = self.root
+    return _ComponentsToPath(resolved_components)
+
+  def GetObjectFromNormalizedPath(self, file_path):
+    """Searches for the specified filesystem object within the fake filesystem.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve, with a
+          path that has already been normalized/resolved
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    if file_path == self.root.name:
+      return self.root
+    path_components = self.GetPathComponents(file_path)
+    target_object = self.root
+    try:
+      for component in path_components:
+        if not isinstance(target_object, FakeDirectory):
+          raise IOError(errno.ENOENT,
+                        'No such file or directory in fake filesystem',
+                        file_path)
+        target_object = target_object.GetEntry(component)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in fake filesystem',
+                    file_path)
+    return target_object
+
+  def GetObject(self, file_path):
+    """Searches for the specified filesystem object within the fake filesystem.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    file_path = self.NormalizePath(file_path)
+    return self.GetObjectFromNormalizedPath(file_path)
+
+  def ResolveObject(self, file_path):
+    """Searches for the specified filesystem object, resolving all links.
+
+    Args:
+      file_path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to file_path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    return self.GetObjectFromNormalizedPath(self.ResolvePath(file_path))
+
+  def LResolveObject(self, path):
+    """Searches for the specified object, resolving only parent links.
+
+    This is analogous to the stat/lstat difference.  This resolves links *to*
+    the object but not of the final object itself.
+
+    Args:
+      path: specifies target FakeFile object to retrieve
+
+    Returns:
+      the FakeFile object corresponding to path
+
+    Raises:
+      IOError: if the object is not found
+    """
+    if path == self.root.name:
+      # The root directory will never be a link
+      return self.root
+    parent_directory, child_name = self.SplitPath(path)
+    if not parent_directory:
+      parent_directory = self.cwd
+    try:
+      parent_obj = self.ResolveObject(parent_directory)
+      assert parent_obj
+      if not isinstance(parent_obj, FakeDirectory):
+        raise IOError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      return parent_obj.GetEntry(child_name)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in the fake filesystem',
+                    path)
+
+  def AddObject(self, file_path, file_object):
+    """Add a fake file or directory into the filesystem at file_path.
+
+    Args:
+      file_path: the path to the file to be added relative to self
+      file_object: file or directory to add
+
+    Raises:
+      IOError: if file_path does not correspond to a directory
+    """
+    try:
+      target_directory = self.GetObject(file_path)
+      target_directory.AddEntry(file_object)
+    except AttributeError:
+      raise IOError(errno.ENOTDIR,
+                    'Not a directory in the fake filesystem',
+                    file_path)
+
+  def RemoveObject(self, file_path):
+    """Remove an existing file or directory.
+
+    Args:
+      file_path: the path to the file relative to self
+
+    Raises:
+      IOError: if file_path does not correspond to an existing file, or if part
+        of the path refers to something other than a directory
+      OSError: if the directory is in use (eg, if it is '/')
+    """
+    if file_path == self.root.name:
+      raise OSError(errno.EBUSY, 'Fake device or resource busy',
+                    file_path)
+    try:
+      dirname, basename = self.SplitPath(file_path)
+      target_directory = self.GetObject(dirname)
+      target_directory.RemoveEntry(basename)
+    except KeyError:
+      raise IOError(errno.ENOENT,
+                    'No such file or directory in the fake filesystem',
+                    file_path)
+    except AttributeError:
+      raise IOError(errno.ENOTDIR,
+                    'Not a directory in the fake filesystem',
+                    file_path)
+
+  def CreateDirectory(self, directory_path, perm_bits=PERM_DEF, inode=None):
+    """Creates directory_path, and all the parent directories.
+
+    Helper method to set up your test faster
+
+    Args:
+      directory_path:  directory to create
+      perm_bits: permission bits
+      inode: inode of directory
+
+    Returns:
+      the newly created FakeDirectory object
+
+    Raises:
+      OSError:  if the directory already exists
+    """
+    directory_path = self.NormalizePath(directory_path)
+    if self.Exists(directory_path):
+      raise OSError(errno.EEXIST,
+                    'Directory exists in fake filesystem',
+                    directory_path)
+    path_components = self.GetPathComponents(directory_path)
+    current_dir = self.root
+
+    for component in path_components:
+      if component not in current_dir.contents:
+        new_dir = FakeDirectory(component, perm_bits)
+        current_dir.AddEntry(new_dir)
+        current_dir = new_dir
+      else:
+        current_dir = current_dir.contents[component]
+
+    current_dir.SetIno(inode)
+    return current_dir
+
+  def CreateFile(self, file_path, st_mode=stat.S_IFREG | PERM_DEF_FILE,
+                 contents='', st_size=None, create_missing_dirs=True,
+                 apply_umask=False, inode=None):
+    """Creates file_path, including all the parent directories along the way.
+
+    Helper method to set up your test faster.
+
+    Args:
+      file_path: path to the file to create
+      st_mode: the stat.S_IF constant representing the file type
+      contents: the contents of the file
+      st_size: file size; only valid if contents=None
+      create_missing_dirs: if True, auto create missing directories
+      apply_umask: whether or not the current umask must be applied on st_mode
+      inode: inode of the file
+
+    Returns:
+      the newly created FakeFile object
+
+    Raises:
+      IOError: if the file already exists
+      IOError: if the containing directory is required and missing
+    """
+    file_path = self.NormalizePath(file_path)
+    if self.Exists(file_path):
+      raise IOError(errno.EEXIST,
+                    'File already exists in fake filesystem',
+                    file_path)
+    parent_directory, new_file = self.SplitPath(file_path)
+    if not parent_directory:
+      parent_directory = self.cwd
+    if not self.Exists(parent_directory):
+      if not create_missing_dirs:
+        raise IOError(errno.ENOENT, 'No such fake directory', parent_directory)
+      self.CreateDirectory(parent_directory)
+    if apply_umask:
+      st_mode &= ~self.umask
+    file_object = FakeFile(new_file, st_mode, contents)
+    file_object.SetIno(inode)
+    self.AddObject(parent_directory, file_object)
+
+    # set the size if st_size is given
+    if not contents and st_size is not None:
+      try:
+        file_object.SetLargeFileSize(st_size)
+      except IOError:
+        self.RemoveObject(file_path)
+        raise
+
+    return file_object
+
+  def CreateLink(self, file_path, link_target):
+    """Creates the specified symlink, pointed at the specified link target.
+
+    Args:
+      file_path:  path to the symlink to create
+      link_target:  the target of the symlink
+
+    Returns:
+      the newly created FakeFile object
+
+    Raises:
+      IOError:  if the file already exists
+    """
+    resolved_file_path = self.ResolvePath(file_path)
+    return self.CreateFile(resolved_file_path, st_mode=stat.S_IFLNK | PERM_DEF,
+                           contents=link_target)
+
+  def __str__(self):
+    return str(self.root)
+
+
+class FakePathModule(object):
+  """Faked os.path module replacement.
+
+  FakePathModule should *only* be instantiated by FakeOsModule.  See the
+  FakeOsModule docstring for details.
+  """
+  _OS_PATH_COPY = CopyModule(os.path)
+
+  def __init__(self, filesystem, os_module=None):
+    """Init.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      os_module: (deprecated) FakeOsModule to assign to self.os
+    """
+    self.filesystem = filesystem
+    self._os_path = self._OS_PATH_COPY
+    if os_module is None:
+      warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,
+                    stacklevel=2)
+    self._os_path.os = self.os = os_module
+    self.sep = self.filesystem.path_separator
+
+  def exists(self, path):
+    """Determines whether the file object exists within the fake filesystem.
+
+    Args:
+      path:  path to the file object
+
+    Returns:
+      bool (if file exists)
+    """
+    return self.filesystem.Exists(path)
+
+  def lexists(self, path):
+    """Test whether a path exists.  Returns True for broken symbolic links.
+
+    Args:
+      path:  path to the symlnk object
+
+    Returns:
+      bool (if file exists)
+    """
+    return self.exists(path) or self.islink(path)
+
+  def getsize(self, path):
+    """Return the file object size in bytes.
+
+    Args:
+      path:  path to the file object
+
+    Returns:
+      file size in bytes
+    """
+    file_obj = self.filesystem.GetObject(path)
+    return file_obj.st_size
+
+  def _istype(self, path, st_flag):
+    """Helper function to implement isdir(), islink(), etc.
+
+    See the stat(2) man page for valid stat.S_I* flag values
+
+    Args:
+      path:  path to file to stat and test
+      st_flag:  the stat.S_I* flag checked for the file's st_mode
+
+    Returns:
+      boolean (the st_flag is set in path's st_mode)
+
+    Raises:
+      TypeError: if path is None
+    """
+    if path is None:
+      raise TypeError
+    try:
+      obj = self.filesystem.ResolveObject(path)
+      if obj:
+        return stat.S_IFMT(obj.st_mode) == st_flag
+    except IOError:
+      return False
+    return False
+
+  def isabs(self, path):
+    if self.filesystem.path_separator == os.path.sep:
+      # Pass through to os.path.isabs, which on Windows has special
+      # handling for a leading drive letter.
+      return self._os_path.isabs(path)
+    else:
+      return path.startswith(self.filesystem.path_separator)
+
+  def isdir(self, path):
+    """Determines if path identifies a directory."""
+    return self._istype(path, stat.S_IFDIR)
+
+  def isfile(self, path):
+    """Determines if path identifies a regular file."""
+    return self._istype(path, stat.S_IFREG)
+
+  def islink(self, path):
+    """Determines if path identifies a symbolic link.
+
+    Args:
+      path: path to filesystem object.
+
+    Returns:
+      boolean (the st_flag is set in path's st_mode)
+
+    Raises:
+      TypeError: if path is None
+    """
+    if path is None:
+      raise TypeError
+    try:
+      link_obj = self.filesystem.LResolveObject(path)
+      return stat.S_IFMT(link_obj.st_mode) == stat.S_IFLNK
+    except IOError:
+      return False
+    except KeyError:
+      return False
+    return False
+
+  def getmtime(self, path):
+    """Returns the mtime of the file."""
+    try:
+      file_obj = self.filesystem.GetObject(path)
+    except IOError as e:
+      raise OSError(errno.ENOENT, str(e))
+    return file_obj.st_mtime
+
+  def abspath(self, path):
+    """Return the absolute version of a path."""
+    if not self.isabs(path):
+      if sys.version_info < (3, 0) and isinstance(path, unicode):
+        cwd = self.os.getcwdu()
+      else:
+        cwd = self.os.getcwd()
+      path = self.join(cwd, path)
+    return self.normpath(path)
+
+  def join(self, *p):
+    """Returns the completed path with a separator of the parts."""
+    return self.filesystem.JoinPaths(*p)
+
+  def normpath(self, path):
+    """Normalize path, eliminating double slashes, etc."""
+    return self.filesystem.CollapsePath(path)
+
+  if _is_windows:
+
+    def relpath(self, path, start=None):
+      """ntpath.relpath() needs the cwd passed in the start argument."""
+      if start is None:
+        start = self.filesystem.cwd
+      path = self._os_path.relpath(path, start)
+      return path.replace(self._os_path.sep, self.filesystem.path_separator)
+
+    realpath = abspath
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to os.path."""
+    return self._os_path.__dict__[name]
+
+
+class FakeOsModule(object):
+  """Uses FakeFilesystem to provide a fake os module replacement.
+
+  Do not create os.path separately from os, as there is a necessary circular
+  dependency between os and os.path to replicate the behavior of the standard
+  Python modules.  What you want to do is to just let FakeOsModule take care of
+  os.path setup itself.
+
+  # You always want to do this.
+  filesystem = fake_filesystem.FakeFilesystem()
+  my_os_module = fake_filesystem.FakeOsModule(filesystem)
+  """
+
+  def __init__(self, filesystem, os_path_module=None):
+    """Also exposes self.path (to fake os.path).
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      os_path_module: (deprecated) optional FakePathModule instance
+    """
+    self.filesystem = filesystem
+    self.sep = filesystem.path_separator
+    self._os_module = os
+    if os_path_module is None:
+      self.path = FakePathModule(self.filesystem, self)
+    else:
+      warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,
+                    stacklevel=2)
+      self.path = os_path_module
+    if sys.version_info < (3, 0):
+      self.fdopen = self._fdopen_ver2
+    else:
+      self.fdopen = self._fdopen
+
+  def _fdopen(self, *args, **kwargs):
+    """Redirector to open() builtin function.
+
+    Args:
+      *args: pass through args
+      **kwargs: pass through kwargs
+
+    Returns:
+      File object corresponding to file_des.
+
+    Raises:
+      TypeError: if file descriptor is not an integer.
+    """
+    if not isinstance(args[0], int):
+      raise TypeError('an integer is required')
+    return FakeFileOpen(self.filesystem)(*args, **kwargs)
+
+  def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
+    """Returns an open file object connected to the file descriptor file_des.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      mode: additional file flags. Currently checks to see if the mode matches
+        the mode of the requested file object.
+      bufsize: ignored. (Used for signature compliance with __builtin__.fdopen)
+
+    Returns:
+      File object corresponding to file_des.
+
+    Raises:
+      OSError: if bad file descriptor or incompatible mode is given.
+      TypeError: if file descriptor is not an integer.
+    """
+    if not isinstance(file_des, int):
+      raise TypeError('an integer is required')
+
+    try:
+      return FakeFileOpen(self.filesystem).Call(file_des, mode=mode)
+    except IOError as e:
+      raise OSError(e)
+
+  def open(self, file_path, flags, mode=None):
+    """Returns the file descriptor for a FakeFile.
+
+    WARNING: This implementation only implements creating a file. Please fill
+    out the remainder for your needs.
+
+    Args:
+      file_path: the path to the file
+      flags: low-level bits to indicate io operation
+      mode: bits to define default permissions
+
+    Returns:
+      A file descriptor.
+
+    Raises:
+      OSError: if the path cannot be found
+      ValueError: if invalid mode is given
+      NotImplementedError: if an unsupported flag is passed in
+    """
+    if flags & os.O_CREAT:
+      fake_file = FakeFileOpen(self.filesystem)(file_path, 'w')
+      if mode:
+        self.chmod(file_path, mode)
+      return fake_file.fileno()
+    else:
+      raise NotImplementedError('FakeOsModule.open')
+
+  def close(self, file_des):
+    """Closes a file descriptor.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    fh.close()
+
+  def read(self, file_des, num_bytes):
+    """Reads number of bytes from a file descriptor, returns bytes read.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      num_bytes: Number of bytes to read from file.
+
+    Returns:
+      Bytes read from file.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    return fh.read(num_bytes)
+
+  def write(self, file_des, contents):
+    """Writes string to file descriptor, returns number of bytes written.
+
+    Args:
+      file_des: An integer file descriptor for the file object requested.
+      contents: String of bytes to write to file.
+
+    Returns:
+      Number of bytes written.
+
+    Raises:
+      OSError: bad file descriptor.
+      TypeError: if file descriptor is not an integer.
+    """
+    fh = self.filesystem.GetOpenFile(file_des)
+    fh.write(contents)
+    fh.flush()
+    return len(contents)
+
+  def fstat(self, file_des):
+    """Returns the os.stat-like tuple for the FakeFile object of file_des.
+
+    Args:
+      file_des:  file descriptor of filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    stats = self.filesystem.GetOpenFile(file_des).GetObject()
+    st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                             stats.st_nlink, stats.st_uid, stats.st_gid,
+                             stats.st_size, stats.st_atime,
+                             stats.st_mtime, stats.st_ctime))
+    return st_obj
+
+  def _ConfirmDir(self, target_directory):
+    """Tests that the target is actually a directory, raising OSError if not.
+
+    Args:
+      target_directory:  path to the target directory within the fake
+        filesystem
+
+    Returns:
+      the FakeFile object corresponding to target_directory
+
+    Raises:
+      OSError:  if the target is not a directory
+    """
+    try:
+      directory = self.filesystem.GetObject(target_directory)
+    except IOError as e:
+      raise OSError(e.errno, e.strerror, target_directory)
+    if not directory.st_mode & stat.S_IFDIR:
+      raise OSError(errno.ENOTDIR,
+                    'Fake os module: not a directory',
+                    target_directory)
+    return directory
+
+  def umask(self, new_mask):
+    """Change the current umask.
+
+    Args:
+      new_mask: An integer.
+
+    Returns:
+      The old mask.
+
+    Raises:
+      TypeError: new_mask is of an invalid type.
+    """
+    if not isinstance(new_mask, int):
+      raise TypeError('an integer is required')
+    old_umask = self.filesystem.umask
+    self.filesystem.umask = new_mask
+    return old_umask
+
+  def chdir(self, target_directory):
+    """Change current working directory to target directory.
+
+    Args:
+      target_directory:  path to new current working directory
+
+    Raises:
+      OSError: if user lacks permission to enter the argument directory or if
+               the target is not a directory
+    """
+    target_directory = self.filesystem.ResolvePath(target_directory)
+    self._ConfirmDir(target_directory)
+    directory = self.filesystem.GetObject(target_directory)
+    # A full implementation would check permissions all the way up the tree.
+    if not directory.st_mode | PERM_EXE:
+      raise OSError(errno.EACCES, 'Fake os module: permission denied',
+                    directory)
+    self.filesystem.cwd = target_directory
+
+  def getcwd(self):
+    """Return current working directory."""
+    return self.filesystem.cwd
+
+  def getcwdu(self):
+    """Return current working directory. Deprecated in Python 3."""
+    if sys.version_info >= (3, 0):
+      raise AttributeError('no attribute getcwdu')
+    return unicode(self.filesystem.cwd)
+
+  def listdir(self, target_directory):
+    """Returns a sorted list of filenames in target_directory.
+
+    Args:
+      target_directory:  path to the target directory within the fake
+        filesystem
+
+    Returns:
+      a sorted list of file names within the target directory
+
+    Raises:
+      OSError:  if the target is not a directory
+    """
+    target_directory = self.filesystem.ResolvePath(target_directory)
+    directory = self._ConfirmDir(target_directory)
+    return sorted(directory.contents)
+
+  def _ClassifyDirectoryContents(self, root):
+    """Classify contents of a directory as files/directories.
+
+    Args:
+      root: (str) Directory to examine.
+
+    Returns:
+      (tuple) A tuple consisting of three values: the directory examined, a
+      list containing all of the directory entries, and a list containing all
+      of the non-directory entries.  (This is the same format as returned by
+      the os.walk generator.)
+
+    Raises:
+      Nothing on its own, but be ready to catch exceptions generated by
+      underlying mechanisms like os.listdir.
+    """
+    dirs = []
+    files = []
+    for entry in self.listdir(root):
+      if self.path.isdir(self.path.join(root, entry)):
+        dirs.append(entry)
+      else:
+        files.append(entry)
+    return (root, dirs, files)
+
+  def walk(self, top, topdown=True, onerror=None):
+    """Performs an os.walk operation over the fake filesystem.
+
+    Args:
+      top:  root directory from which to begin walk
+      topdown:  determines whether to return the tuples with the root as the
+        first entry (True) or as the last, after all the child directory
+        tuples (False)
+      onerror:  if not None, function which will be called to handle the
+        os.error instance provided when os.listdir() fails
+
+    Yields:
+      (path, directories, nondirectories) for top and each of its
+      subdirectories.  See the documentation for the builtin os module for
+      further details.
+    """
+    top = self.path.normpath(top)
+    try:
+      top_contents = self._ClassifyDirectoryContents(top)
+    except OSError as e:
+      top_contents = None
+      if onerror is not None:
+        onerror(e)
+
+    if top_contents is not None:
+      if topdown:
+        yield top_contents
+
+      for directory in top_contents[1]:
+        for contents in self.walk(self.path.join(top, directory),
+                                  topdown=topdown, onerror=onerror):
+          yield contents
+
+      if not topdown:
+        yield top_contents
+
+  def readlink(self, path):
+    """Reads the target of a symlink.
+
+    Args:
+      path:  symlink to read the target of
+
+    Returns:
+      the string representing the path to which the symbolic link points.
+
+    Raises:
+      TypeError: if path is None
+      OSError: (with errno=ENOENT) if path is not a valid path, or
+               (with errno=EINVAL) if path is valid, but is not a symlink
+    """
+    if path is None:
+      raise TypeError
+    try:
+      link_obj = self.filesystem.LResolveObject(path)
+    except IOError:
+      raise OSError(errno.ENOENT, 'Fake os module: path does not exist', path)
+    if stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK:
+      raise OSError(errno.EINVAL, 'Fake os module: not a symlink', path)
+    return link_obj.contents
+
+  def stat(self, entry_path):
+    """Returns the os.stat-like tuple for the FakeFile object of entry_path.
+
+    Args:
+      entry_path:  path to filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    try:
+      stats = self.filesystem.ResolveObject(entry_path)
+      st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                               stats.st_nlink, stats.st_uid, stats.st_gid,
+                               stats.st_size, stats.st_atime,
+                               stats.st_mtime, stats.st_ctime))
+      return st_obj
+    except IOError as io_error:
+      raise OSError(io_error.errno, io_error.strerror, entry_path)
+
+  def lstat(self, entry_path):
+    """Returns the os.stat-like tuple for entry_path, not following symlinks.
+
+    Args:
+      entry_path:  path to filesystem object to retrieve
+
+    Returns:
+      the os.stat_result object corresponding to entry_path
+
+    Raises:
+      OSError: if the filesystem object doesn't exist.
+    """
+    # stat should return the tuple representing return value of os.stat
+    try:
+      stats = self.filesystem.LResolveObject(entry_path)
+      st_obj = os.stat_result((stats.st_mode, stats.st_ino, stats.st_dev,
+                               stats.st_nlink, stats.st_uid, stats.st_gid,
+                               stats.st_size, stats.st_atime,
+                               stats.st_mtime, stats.st_ctime))
+      return st_obj
+    except IOError as io_error:
+      raise OSError(io_error.errno, io_error.strerror, entry_path)
+
+  def remove(self, path):
+    """Removes the FakeFile object representing the specified file."""
+    path = self.filesystem.NormalizePath(path)
+    if self.path.isdir(path) and not self.path.islink(path):
+      raise OSError(errno.EISDIR, "Is a directory: '%s'" % path)
+    try:
+      self.filesystem.RemoveObject(path)
+    except IOError as e:
+      raise OSError(e.errno, e.strerror, e.filename)
+
+  # As per the documentation unlink = remove.
+  unlink = remove
+
+  def rename(self, old_file, new_file):
+    """Adds a FakeFile object at new_file containing contents of old_file.
+
+    Also removes the FakeFile object for old_file, and replaces existing
+    new_file object, if one existed.
+
+    Args:
+      old_file:  path to filesystem object to rename
+      new_file:  path to where the filesystem object will live after this call
+
+    Raises:
+      OSError:  if old_file does not exist.
+      IOError:  if dirname(new_file) does not exist
+    """
+    old_file = self.filesystem.NormalizePath(old_file)
+    new_file = self.filesystem.NormalizePath(new_file)
+    if not self.filesystem.Exists(old_file):
+      raise OSError(errno.ENOENT,
+                    'Fake os object: can not rename nonexistent file '
+                    'with name',
+                    old_file)
+    if self.filesystem.Exists(new_file):
+      if old_file == new_file:
+        return None  # Nothing to do here.
+      else:
+        self.remove(new_file)
+    old_dir, old_name = self.path.split(old_file)
+    new_dir, new_name = self.path.split(new_file)
+    if not self.filesystem.Exists(new_dir):
+      raise IOError(errno.ENOENT, 'No such fake directory', new_dir)
+    old_dir_object = self.filesystem.ResolveObject(old_dir)
+    old_object = old_dir_object.GetEntry(old_name)
+    old_object_mtime = old_object.st_mtime
+    new_dir_object = self.filesystem.ResolveObject(new_dir)
+    if old_object.st_mode & stat.S_IFDIR:
+      old_object.name = new_name
+      new_dir_object.AddEntry(old_object)
+      old_dir_object.RemoveEntry(old_name)
+    else:
+      self.filesystem.CreateFile(new_file,
+                                 st_mode=old_object.st_mode,
+                                 contents=old_object.contents,
+                                 create_missing_dirs=False)
+      self.remove(old_file)
+    new_object = self.filesystem.GetObject(new_file)
+    new_object.SetMTime(old_object_mtime)
+    self.chown(new_file, old_object.st_uid, old_object.st_gid)
+
+  def rmdir(self, target_directory):
+    """Remove a leaf Fake directory.
+
+    Args:
+      target_directory: (str) Name of directory to remove.
+
+    Raises:
+      OSError: if target_directory does not exist or is not a directory,
+      or as per FakeFilesystem.RemoveObject. Cannot remove '.'.
+    """
+    if target_directory == '.':
+      raise OSError(errno.EINVAL, 'Invalid argument: \'.\'')
+    target_directory = self.filesystem.NormalizePath(target_directory)
+    if self._ConfirmDir(target_directory):
+      if self.listdir(target_directory):
+        raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty',
+                      target_directory)
+      try:
+        self.filesystem.RemoveObject(target_directory)
+      except IOError as e:
+        raise OSError(e.errno, e.strerror, e.filename)
+
+  def removedirs(self, target_directory):
+    """Remove a leaf Fake directory and all empty intermediate ones."""
+    target_directory = self.filesystem.NormalizePath(target_directory)
+    directory = self._ConfirmDir(target_directory)
+    if directory.contents:
+      raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty',
+                    self.path.basename(target_directory))
+    else:
+      self.rmdir(target_directory)
+    head, tail = self.path.split(target_directory)
+    if not tail:
+      head, tail = self.path.split(head)
+    while head and tail:
+      head_dir = self._ConfirmDir(head)
+      if head_dir.contents:
+        break
+      self.rmdir(head)
+      head, tail = self.path.split(head)
+
+  def mkdir(self, dir_name, mode=PERM_DEF):
+    """Create a leaf Fake directory.
+
+    Args:
+      dir_name: (str) Name of directory to create.  Relative paths are assumed
+        to be relative to '/'.
+      mode: (int) Mode to create directory with.  This argument defaults to
+        0o777.  The umask is applied to this mode.
+
+    Raises:
+      OSError: if the directory name is invalid or parent directory is read only
+      or as per FakeFilesystem.AddObject.
+    """
+    if dir_name.endswith(self.sep):
+      dir_name = dir_name[:-1]
+
+    parent_dir, _ = self.path.split(dir_name)
+    if parent_dir:
+      base_dir = self.path.normpath(parent_dir)
+      if parent_dir.endswith(self.sep + '..'):
+        base_dir, unused_dotdot, _ = parent_dir.partition(self.sep + '..')
+      if not self.filesystem.Exists(base_dir):
+        raise OSError(errno.ENOENT, 'No such fake directory', base_dir)
+
+    dir_name = self.filesystem.NormalizePath(dir_name)
+    if self.filesystem.Exists(dir_name):
+      raise OSError(errno.EEXIST, 'Fake object already exists', dir_name)
+    head, tail = self.path.split(dir_name)
+    directory_object = self.filesystem.GetObject(head)
+    if not directory_object.st_mode & PERM_WRITE:
+      raise OSError(errno.EACCES, 'Permission Denied', dir_name)
+
+    self.filesystem.AddObject(
+        head, FakeDirectory(tail, mode & ~self.filesystem.umask))
+
+  def makedirs(self, dir_name, mode=PERM_DEF):
+    """Create a leaf Fake directory + create any non-existent parent dirs.
+
+    Args:
+      dir_name: (str) Name of directory to create.
+      mode: (int) Mode to create directory (and any necessary parent
+        directories) with. This argument defaults to 0o777.  The umask is
+        applied to this mode.
+
+    Raises:
+      OSError: if the directory already exists or as per
+      FakeFilesystem.CreateDirectory
+    """
+    dir_name = self.filesystem.NormalizePath(dir_name)
+    path_components = self.filesystem.GetPathComponents(dir_name)
+
+    # Raise a permission denied error if the first existing directory is not
+    # writeable.
+    current_dir = self.filesystem.root
+    for component in path_components:
+      if component not in current_dir.contents:
+        if not current_dir.st_mode & PERM_WRITE:
+          raise OSError(errno.EACCES, 'Permission Denied', dir_name)
+        else:
+          break
+      else:
+        current_dir = current_dir.contents[component]
+
+    self.filesystem.CreateDirectory(dir_name, mode & ~self.filesystem.umask)
+
+  def access(self, path, mode):
+    """Check if a file exists and has the specified permissions.
+
+    Args:
+      path: (str) Path to the file.
+      mode: (int) Permissions represented as a bitwise-OR combination of
+          os.F_OK, os.R_OK, os.W_OK, and os.X_OK.
+    Returns:
+      boolean, True if file is accessible, False otherwise
+    """
+    try:
+      st = self.stat(path)
+    except OSError as os_error:
+      if os_error.errno == errno.ENOENT:
+        return False
+      raise
+    return (mode & ((st.st_mode >> 6) & 7)) == mode
+
+  def chmod(self, path, mode):
+    """Change the permissions of a file as encoded in integer mode.
+
+    Args:
+      path: (str) Path to the file.
+      mode: (int) Permissions
+    """
+    try:
+      file_object = self.filesystem.GetObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      raise
+    file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
+                           (mode & PERM_ALL))
+    file_object.st_ctime = int(time.time())
+
+  def utime(self, path, times):
+    """Change the access and modified times of a file.
+
+    Args:
+      path: (str) Path to the file.
+      times: 2-tuple of numbers, of the form (atime, mtime) which is used to set
+          the access and modified times, respectively. If None, file's access
+          and modified times are set to the current time.
+
+    Raises:
+      TypeError: If anything other than integers is specified in passed tuple or
+          number of elements in the tuple is not equal to 2.
+    """
+    try:
+      file_object = self.filesystem.ResolveObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+      raise
+    if times is None:
+      file_object.st_atime = int(time.time())
+      file_object.st_mtime = int(time.time())
+    else:
+      if len(times) != 2:
+        raise TypeError('utime() arg 2 must be a tuple (atime, mtime)')
+      for t in times:
+        if not isinstance(t, (int, float)):
+          raise TypeError('atime and mtime must be numbers')
+
+      file_object.st_atime = times[0]
+      file_object.st_mtime = times[1]
+
+  def chown(self, path, uid, gid):
+    """Set ownership of a faked file.
+
+    Args:
+      path: (str) Path to the file or directory.
+      uid: (int) Numeric uid to set the file or directory to.
+      gid: (int) Numeric gid to set the file or directory to.
+
+    `None` is also allowed for `uid` and `gid`.  This permits `os.rename` to
+    use `os.chown` even when the source file `uid` and `gid` are `None` (unset).
+    """
+    try:
+      file_object = self.filesystem.GetObject(path)
+    except IOError as io_error:
+      if io_error.errno == errno.ENOENT:
+        raise OSError(errno.ENOENT,
+                      'No such file or directory in fake filesystem',
+                      path)
+    if not ((isinstance(uid, int) or uid is None) and
+            (isinstance(gid, int) or gid is None)):
+        raise TypeError("An integer is required")
+    if uid != -1:
+      file_object.st_uid = uid
+    if gid != -1:
+      file_object.st_gid = gid
+
+  def mknod(self, filename, mode=None, device=None):
+    """Create a filesystem node named 'filename'.
+
+    Does not support device special files or named pipes as the real os
+    module does.
+
+    Args:
+      filename: (str) Name of the file to create
+      mode: (int) permissions to use and type of file to be created.
+        Default permissions are 0o666.  Only the stat.S_IFREG file type
+        is supported by the fake implementation.  The umask is applied
+        to this mode.
+      device: not supported in fake implementation
+
+    Raises:
+      OSError: if called with unsupported options or the file can not be
+      created.
+    """
+    if mode is None:
+      mode = stat.S_IFREG | PERM_DEF_FILE
+    if device or not mode & stat.S_IFREG:
+      raise OSError(errno.EINVAL,
+                    'Fake os mknod implementation only supports '
+                    'regular files.')
+
+    head, tail = self.path.split(filename)
+    if not tail:
+      if self.filesystem.Exists(head):
+        raise OSError(errno.EEXIST, 'Fake filesystem: %s: %s' % (
+            os.strerror(errno.EEXIST), filename))
+      raise OSError(errno.ENOENT, 'Fake filesystem: %s: %s' % (
+          os.strerror(errno.ENOENT), filename))
+    if tail == '.' or tail == '..' or self.filesystem.Exists(filename):
+      raise OSError(errno.EEXIST, 'Fake fileystem: %s: %s' % (
+          os.strerror(errno.EEXIST), filename))
+    try:
+      self.filesystem.AddObject(head, FakeFile(tail,
+                                               mode & ~self.filesystem.umask))
+    except IOError:
+      raise OSError(errno.ENOTDIR, 'Fake filesystem: %s: %s' % (
+          os.strerror(errno.ENOTDIR), filename))
+
+  def symlink(self, link_target, path):
+    """Creates the specified symlink, pointed at the specified link target.
+
+    Args:
+      link_target:  the target of the symlink
+      path:  path to the symlink to create
+
+    Returns:
+      None
+
+    Raises:
+      IOError:  if the file already exists
+    """
+    self.filesystem.CreateLink(path, link_target)
+
+  # pylint: disable-msg=C6002
+  # TODO: Link doesn't behave like os.link, this needs to be fixed properly.
+  link = symlink
+
+  def __getattr__(self, name):
+    """Forwards any unfaked calls to the standard os module."""
+    return getattr(self._os_module, name)
+
+
+class FakeFileOpen(object):
+  """Faked file() and open() function replacements.
+
+  Returns FakeFile objects in a FakeFilesystem in place of the file()
+  or open() function.
+  """
+
+  def __init__(self, filesystem, delete_on_close=False):
+    """init.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+      delete_on_close:  optional boolean, deletes file on close()
+    """
+    self.filesystem = filesystem
+    self._delete_on_close = delete_on_close
+
+  def __call__(self, *args, **kwargs):
+    """Redirects calls to file() or open() to appropriate method."""
+    if sys.version_info < (3, 0):
+      return self._call_ver2(*args, **kwargs)
+    else:
+      return self.Call(*args, **kwargs)
+
+  def _call_ver2(self, file_path, mode='r', buffering=-1, flags=None):
+    """Limits args of open() or file() for Python 2.x versions."""
+    # Backwards compatibility, mode arg used to be named flags
+    mode = flags or mode
+    return self.Call(file_path, mode, buffering)
+
+  def Call(self, file_, mode='r', buffering=-1, encoding=None,
+           errors=None, newline=None, closefd=True, opener=None):
+    """Returns a StringIO object with the contents of the target file object.
+
+    Args:
+      file_: path to target file or a file descriptor
+      mode: additional file modes. All r/w/a r+/w+/a+ modes are supported.
+        't', and 'U' are ignored, e.g., 'wU' is treated as 'w'. 'b' sets
+        binary mode, no end of line translations in StringIO.
+      buffering: ignored. (Used for signature compliance with __builtin__.open)
+      encoding: ignored, strings have no encoding
+      errors: ignored, this relates to encoding
+      newline: controls universal newlines, passed to StringIO object
+      closefd: if a file descriptor rather than file name is passed, and set
+        to false, then the file descriptor is kept open when file is closed
+      opener: not supported
+
+    Returns:
+      a StringIO object containing the contents of the target file
+
+    Raises:
+      IOError: if the target object is a directory, the path is invalid or
+        permission is denied.
+    """
+    orig_modes = mode  # Save original mdoes for error messages.
+    # Binary mode for non 3.x or set by mode
+    binary = sys.version_info < (3, 0) or 'b' in mode
+    # Normalize modes. Ignore 't' and 'U'.
+    mode = mode.replace('t', '').replace('b', '')
+    mode = mode.replace('rU', 'r').replace('U', 'r')
+
+    if mode not in _OPEN_MODE_MAP:
+      raise IOError('Invalid mode: %r' % orig_modes)
+
+    must_exist, need_read, need_write, truncate, append = _OPEN_MODE_MAP[mode]
+
+    file_object = None
+    filedes = None
+    # opening a file descriptor
+    if isinstance(file_, int):
+      filedes = file_
+      file_object = self.filesystem.GetOpenFile(filedes).GetObject()
+      file_path = file_object.name
+    else:
+      file_path = file_
+      real_path = self.filesystem.ResolvePath(file_path)
+      if self.filesystem.Exists(file_path):
+        file_object = self.filesystem.GetObjectFromNormalizedPath(real_path)
+      closefd = True
+
+    if file_object:
+      if ((need_read and not file_object.st_mode & PERM_READ) or
+          (need_write and not file_object.st_mode & PERM_WRITE)):
+        raise IOError(errno.EACCES, 'Permission denied', file_path)
+      if need_write:
+        file_object.st_ctime = int(time.time())
+        if truncate:
+          file_object.SetContents('')
+    else:
+      if must_exist:
+        raise IOError(errno.ENOENT, 'No such file or directory', file_path)
+      file_object = self.filesystem.CreateFile(
+          real_path, create_missing_dirs=False, apply_umask=True)
+
+    if file_object.st_mode & stat.S_IFDIR:
+      raise IOError(errno.EISDIR, 'Fake file object: is a directory', file_path)
+
+    class FakeFileWrapper(object):
+      """Wrapper for a StringIO object for use by a FakeFile object.
+
+      If the wrapper has any data written to it, it will propagate to
+      the FakeFile object on close() or flush().
+      """
+      if sys.version_info < (3, 0):
+        _OPERATION_ERROR = IOError
+      else:
+        _OPERATION_ERROR = io.UnsupportedOperation
+
+      def __init__(self, file_object, update=False, read=False, append=False,
+                   delete_on_close=False, filesystem=None, newline=None,
+                   binary=True, closefd=True):
+        self._file_object = file_object
+        self._append = append
+        self._read = read
+        self._update = update
+        self._closefd = closefd
+        self._file_epoch = file_object.epoch
+        contents = file_object.contents
+        newline_arg = {} if binary else {'newline': newline}
+        io_class = io.StringIO
+        if contents and isinstance(contents, Hexlified):
+          contents = contents.recover(binary)
+        # For Python 3, files opened as binary only read/write byte contents.
+        if sys.version_info >= (3, 0) and binary:
+          io_class = io.BytesIO
+          if contents and isinstance(contents, str):
+            contents = bytes(contents, 'ascii')
+        if contents:
+          if update:
+            self._io = io_class(**newline_arg)
+            self._io.write(contents)
+            if not append:
+              self._io.seek(0)
+            else:
+              self._read_whence = 0
+              if read:
+                self._read_seek = 0
+              else:
+                self._read_seek = self._io.tell()
+          else:
+            self._io = io_class(contents, **newline_arg)
+        else:
+          self._io = io_class(**newline_arg)
+          self._read_whence = 0
+          self._read_seek = 0
+        if delete_on_close:
+          assert filesystem, 'delete_on_close=True requires filesystem='
+        self._filesystem = filesystem
+        self._delete_on_close = delete_on_close
+        # override, don't modify FakeFile.name, as FakeFilesystem expects
+        # it to be the file name only, no directories.
+        self.name = file_object.opened_as
+
+      def __enter__(self):
+        """To support usage of this fake file with the 'with' statement."""
+        return self
+
+      def __exit__(self, type, value, traceback):  # pylint: disable-msg=W0622
+        """To support usage of this fake file with the 'with' statement."""
+        self.close()
+
+      def GetObject(self):
+        """Returns FakeFile object that is wrapped by current class."""
+        return self._file_object
+
+      def fileno(self):
+        """Returns file descriptor of file object."""
+        return self.filedes
+
+      def close(self):
+        """File close."""
+        if self._update:
+          self._file_object.SetContents(self._io.getvalue())
+        if self._closefd:
+          self._filesystem.CloseOpenFile(self)
+        if self._delete_on_close:
+          self._filesystem.RemoveObject(self.name)
+
+      def flush(self):
+        """Flush file contents to 'disk'."""
+        if self._update:
+          self._file_object.SetContents(self._io.getvalue())
+          self._file_epoch = self._file_object.epoch
+
+      def seek(self, offset, whence=0):
+        """Move read/write pointer in 'file'."""
+        if not self._append:
+          self._io.seek(offset, whence)
+        else:
+          self._read_seek = offset
+          self._read_whence = whence
+
+      def tell(self):
+        """Return the file's current position.
+
+        Returns:
+          int, file's current position in bytes.
+        """
+        if not self._append:
+          return self._io.tell()
+        if self._read_whence:
+          write_seek = self._io.tell()
+          self._io.seek(self._read_seek, self._read_whence)
+          self._read_seek = self._io.tell()
+          self._read_whence = 0
+          self._io.seek(write_seek)
+        return self._read_seek
+
+      def _UpdateStringIO(self):
+        """Updates the StringIO with changes to the file object contents."""
+        if self._file_epoch == self._file_object.epoch:
+          return
+        whence = self._io.tell()
+        self._io.seek(0)
+        self._io.truncate()
+        self._io.write(self._file_object.contents)
+        self._io.seek(whence)
+        self._file_epoch = self._file_object.epoch
+
+      def _ReadWrappers(self, name):
+        """Wrap a StringIO attribute in a read wrapper.
+
+        Returns a read_wrapper which tracks our own read pointer since the
+        StringIO object has no concept of a different read and write pointer.
+
+        Args:
+          name: the name StringIO attribute to wrap.  Should be a read call.
+
+        Returns:
+          either a read_error or read_wrapper function.
+        """
+        io_attr = getattr(self._io, name)
+
+        def read_wrapper(*args, **kwargs):
+          """Wrap all read calls to the StringIO Object.
+
+          We do this to track the read pointer separate from the write
+          pointer.  Anything that wants to read from the StringIO object
+          while we're in append mode goes through this.
+
+          Args:
+            *args: pass through args
+            **kwargs: pass through kwargs
+          Returns:
+            Wrapped StringIO object method
+          """
+          self._io.seek(self._read_seek, self._read_whence)
+          ret_value = io_attr(*args, **kwargs)
+          self._read_seek = self._io.tell()
+          self._read_whence = 0
+          self._io.seek(0, 2)
+          return ret_value
+        return read_wrapper
+
+      def _OtherWrapper(self, name):
+        """Wrap a StringIO attribute in an other_wrapper.
+
+        Args:
+          name: the name of the StringIO attribute to wrap.
+
+        Returns:
+          other_wrapper which is described below.
+        """
+        io_attr = getattr(self._io, name)
+
+        def other_wrapper(*args, **kwargs):
+          """Wrap all other calls to the StringIO Object.
+
+          We do this to track changes to the write pointer.  Anything that
+          moves the write pointer in a file open for appending should move
+          the read pointer as well.
+
+          Args:
+            *args: pass through args
+            **kwargs: pass through kwargs
+          Returns:
+            Wrapped StringIO object method
+          """
+          write_seek = self._io.tell()
+          ret_value = io_attr(*args, **kwargs)
+          if write_seek != self._io.tell():
+            self._read_seek = self._io.tell()
+            self._read_whence = 0
+            self._file_object.st_size += (self._read_seek - write_seek)
+          return ret_value
+        return other_wrapper
+
+      def Size(self):
+        return self._file_object.st_size
+
+      def __getattr__(self, name):
+        if self._file_object.IsLargeFile():
+          raise FakeLargeFileIoException(file_path)
+
+        # errors on called method vs. open mode
+        if not self._read and name.startswith('read'):
+          def read_error(*args, **kwargs):
+            """Throw an error unless the argument is zero."""
+            if args and args[0] == 0:
+              return ''
+            raise self._OPERATION_ERROR('File is not open for reading.')
+          return read_error
+        if not self._update and (name.startswith('write')
+                                 or name == 'truncate'):
+          def write_error(*args, **kwargs):
+            """Throw an error."""
+            raise self._OPERATION_ERROR('File is not open for writing.')
+          return write_error
+
+        if name.startswith('read'):
+          self._UpdateStringIO()
+        if self._append:
+          if name.startswith('read'):
+            return self._ReadWrappers(name)
+          else:
+            return self._OtherWrapper(name)
+        return getattr(self._io, name)
+
+      def __iter__(self):
+        if not self._read:
+          raise self._OPERATION_ERROR('File is not open for reading')
+        return self._io.__iter__()
+
+    # if you print obj.name, the argument to open() must be printed. Not the
+    # abspath, not the filename, but the actual argument.
+    file_object.opened_as = file_path
+
+    fakefile = FakeFileWrapper(file_object,
+                               update=need_write,
+                               read=need_read,
+                               append=append,
+                               delete_on_close=self._delete_on_close,
+                               filesystem=self.filesystem,
+                               newline=newline,
+                               binary=binary,
+                               closefd=closefd)
+    if filedes is not None:
+      fakefile.filedes = filedes
+    else:
+      fakefile.filedes = self.filesystem.AddOpenFile(fakefile)
+    return fakefile
+
+
+def _RunDoctest():
+  # pylint: disable-msg=C6204
+  import doctest
+  from pyfakefs import fake_filesystem  # pylint: disable-msg=W0406
+  return doctest.testmod(fake_filesystem)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py
new file mode 100755
index 0000000..4ac5c8e
--- /dev/null
+++ b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_glob.py
@@ -0,0 +1,120 @@
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A fake glob module implementation that uses fake_filesystem for unit tests.
+
+Includes:
+  FakeGlob: Uses a FakeFilesystem to provide a fake replacement for the
+    glob module.
+
+Usage:
+>>> from pyfakefs import fake_filesystem
+>>> from pyfakefs import fake_filesystem_glob
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> glob_module = fake_filesystem_glob.FakeGlobModule(filesystem)
+
+>>> file = filesystem.CreateFile('new-file')
+>>> glob_module.glob('*')
+['new-file']
+>>> glob_module.glob('???-file')
+['new-file']
+"""
+
+import fnmatch
+import glob
+import os
+
+from pyfakefs import fake_filesystem
+
+
+class FakeGlobModule(object):
+  """Uses a FakeFilesystem to provide a fake replacement for glob module."""
+
+  def __init__(self, filesystem):
+    """Construct fake glob module using the fake filesystem.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+    """
+    self._glob_module = glob
+    self._os_module = fake_filesystem.FakeOsModule(filesystem)
+    self._path_module = self._os_module.path
+
+  def glob(self, pathname):  # pylint: disable-msg=C6409
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain shell-style wildcards a la fnmatch.
+
+    Args:
+      pathname: the pattern with which to find a list of paths
+
+    Returns:
+      List of strings matching the glob pattern.
+    """
+    if not self.has_magic(pathname):
+      if self._path_module.exists(pathname):
+        return [pathname]
+      else:
+        return []
+
+    dirname, basename = self._path_module.split(pathname)
+
+    if not dirname:
+      return self.glob1(self._path_module.curdir, basename)
+    elif self.has_magic(dirname):
+      path_list = self.glob(dirname)
+    else:
+      path_list = [dirname]
+
+    if not self.has_magic(basename):
+      result = []
+      for dirname in path_list:
+        if basename or self._path_module.isdir(dirname):
+          name = self._path_module.join(dirname, basename)
+          if self._path_module.exists(name):
+            result.append(name)
+    else:
+      result = []
+      for dirname in path_list:
+        sublist = self.glob1(dirname, basename)
+        for name in sublist:
+          result.append(self._path_module.join(dirname, name))
+
+    return result
+
+  def glob1(self, dirname, pattern):  # pylint: disable-msg=C6409
+    if not dirname:
+      dirname = self._path_module.curdir
+    try:
+      names = self._os_module.listdir(dirname)
+    except os.error:
+      return []
+    if pattern[0] != '.':
+      names = filter(lambda x: x[0] != '.', names)
+    return fnmatch.filter(names, pattern)
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to the standard glob module."""
+    return getattr(self._glob_module, name)
+
+
+def _RunDoctest():
+  # pylint: disable-msg=C6111,C6204,W0406
+  import doctest
+  from pyfakefs import fake_filesystem_glob
+  return doctest.testmod(fake_filesystem_glob)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py
new file mode 100755
index 0000000..d4efa93
--- /dev/null
+++ b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable-msg=W0612,W0613,C6409
+
+"""A fake shutil module implementation that uses fake_filesystem for unit tests.
+
+Includes:
+  FakeShutil: Uses a FakeFilesystem to provide a fake replacement for the
+    shutil module.
+
+Usage:
+>>> from pyfakefs import fake_filesystem
+>>> from pyfakefs import fake_filesystem_shutil
+>>> filesystem = fake_filesystem.FakeFilesystem()
+>>> shutil_module = fake_filesystem_shutil.FakeShutilModule(filesystem)
+
+Copy a fake_filesystem directory tree:
+>>> new_file = filesystem.CreateFile('/src/new-file')
+>>> shutil_module.copytree('/src', '/dst')
+>>> filesystem.Exists('/dst/new-file')
+True
+
+Remove a fake_filesystem directory tree:
+>>> shutil_module.rmtree('/src')
+>>> filesystem.Exists('/src/new-file')
+False
+"""
+
+import errno
+import os
+import shutil
+import stat
+
+__pychecker__ = 'no-reimportself'
+
+_PERM_WRITE = 0o200  # Write permission bit.
+_PERM_READ = 0o400   # Read permission bit.
+_PERM_ALL = 0o7777   # All permission bits.
+
+
+class FakeShutilModule(object):
+  """Uses a FakeFilesystem to provide a fake replacement for shutil module."""
+
+  def __init__(self, filesystem):
+    """Construct fake shutil module using the fake filesystem.
+
+    Args:
+      filesystem:  FakeFilesystem used to provide file system information
+    """
+    self.filesystem = filesystem
+    self._shutil_module = shutil
+
+  def rmtree(self, path, ignore_errors=False, onerror=None):
+    """Remove a directory and all its contents.
+
+    Args:
+      path: (str) Directory tree to remove.
+      ignore_errors: (bool) unimplemented
+      onerror: (func) unimplemented
+    """
+    self.filesystem.RemoveObject(path)
+
+  def copy(self, src, dst):
+    """Copy data and mode bits ("cp src dst").
+
+    Args:
+      src: (str) source file
+      dst: (str) destination, may be a directory
+    """
+    if self.filesystem.Exists(dst):
+      if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
+        dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
+    self.copyfile(src, dst)
+    src_object = self.filesystem.GetObject(src)
+    dst_object = self.filesystem.GetObject(dst)
+    dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
+                          (src_object.st_mode & _PERM_ALL))
+
+  def copyfile(self, src, dst):
+    """Copy data from src to dst.
+
+    Args:
+      src: (str) source file
+      dst: (dst) destination file
+
+    Raises:
+      IOError: if the file can't be copied
+      shutil.Error: if the src and dst files are the same
+    """
+    src_file_object = self.filesystem.GetObject(src)
+    if not src_file_object.st_mode & _PERM_READ:
+      raise IOError(errno.EACCES, 'Permission denied', src)
+    if stat.S_ISDIR(src_file_object.st_mode):
+      raise IOError(errno.EISDIR, 'Is a directory', src)
+
+    dst_dir = os.path.dirname(dst)
+    if dst_dir:
+      if not self.filesystem.Exists(dst_dir):
+        raise IOError(errno.ENOTDIR, 'Not a directory', dst)
+      dst_dir_object = self.filesystem.GetObject(dst_dir)
+      if not dst_dir_object.st_mode & _PERM_WRITE:
+        raise IOError(errno.EACCES, 'Permission denied', dst_dir)
+
+    abspath_src = self.filesystem.NormalizePath(
+        self.filesystem.ResolvePath(src))
+    abspath_dst = self.filesystem.NormalizePath(
+        self.filesystem.ResolvePath(dst))
+    if abspath_src == abspath_dst:
+      raise shutil.Error('`%s` and `%s` are the same file' % (src, dst))
+
+    if self.filesystem.Exists(dst):
+      dst_file_object = self.filesystem.GetObject(dst)
+      if stat.S_ISDIR(dst_file_object.st_mode):
+        raise IOError(errno.EISDIR, 'Is a directory', dst)
+      if not dst_file_object.st_mode & _PERM_WRITE:
+        raise IOError(errno.EACCES, 'Permission denied', dst)
+      dst_file_object.SetContents(src_file_object.contents)
+
+    else:
+      self.filesystem.CreateFile(dst, contents=src_file_object.contents)
+
+  def copystat(self, src, dst):
+    """Copy all stat info (mode bits, atime, and mtime) from src to dst.
+
+    Args:
+      src: (str) source file
+      dst: (str) destination file
+    """
+    src_object = self.filesystem.GetObject(src)
+    dst_object = self.filesystem.GetObject(dst)
+    dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
+                          (src_object.st_mode & _PERM_ALL))
+    dst_object.st_uid = src_object.st_uid
+    dst_object.st_gid = src_object.st_gid
+    dst_object.st_atime = src_object.st_atime
+    dst_object.st_mtime = src_object.st_mtime
+
+  def copy2(self, src, dst):
+    """Copy data and all stat info ("cp -p src dst").
+
+    Args:
+      src: (str) source file
+      dst: (str) destination, may be a directory
+    """
+    if self.filesystem.Exists(dst):
+      if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
+        dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
+    self.copyfile(src, dst)
+    self.copystat(src, dst)
+
+  def copytree(self, src, dst, symlinks=False):
+    """Recursively copy a directory tree.
+
+    Args:
+      src: (str) source directory
+      dst: (str) destination directory, must not already exist
+      symlinks: (bool) copy symlinks as symlinks instead of copying the
+                contents of the linked files. Currently unused.
+
+    Raises:
+      OSError: if src is missing or isn't a directory
+    """
+    self.filesystem.CreateDirectory(dst)
+    try:
+      directory = self.filesystem.GetObject(src)
+    except IOError as e:
+      raise OSError(e.errno, e.message)
+    if not stat.S_ISDIR(directory.st_mode):
+      raise OSError(errno.ENOTDIR,
+                    'Fake os module: %r not a directory' % src)
+    for name in directory.contents:
+      srcname = self.filesystem.JoinPaths(src, name)
+      dstname = self.filesystem.JoinPaths(dst, name)
+      src_mode = self.filesystem.GetObject(srcname).st_mode
+      if stat.S_ISDIR(src_mode):
+        self.copytree(srcname, dstname, symlinks)
+      else:
+        self.copy2(srcname, dstname)
+
+  def move(self, src, dst):
+    """Rename a file or directory.
+
+    Args:
+      src: (str) source file or directory
+      dst: (str) if the src is a directory, the dst must not already exist
+    """
+    if stat.S_ISDIR(self.filesystem.GetObject(src).st_mode):
+      self.copytree(src, dst, symlinks=True)
+    else:
+      self.copy2(src, dst)
+    self.filesystem.RemoveObject(src)
+
+  def __getattr__(self, name):
+    """Forwards any non-faked calls to the standard shutil module."""
+    return getattr(self._shutil_module, name)
+
+
+def _RunDoctest():
+  # pylint: disable-msg=C6111,C6204,W0406
+  import doctest
+  from pyfakefs import fake_filesystem_shutil
+  return doctest.testmod(fake_filesystem_shutil)
+
+
+if __name__ == '__main__':
+  _RunDoctest()
diff --git a/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py
new file mode 100644
index 0000000..33824c4
--- /dev/null
+++ b/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_unittest.py
@@ -0,0 +1,236 @@
+# Copyright 2014 Altera Corporation. All Rights Reserved.
+# Copyright 2015 John McGehee
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A base class for unit tests using the :py:class:`pyfakefs` module.
+
+This class searches `sys.modules` for modules that import the `os`, `glob`,
+`shutil`, and `tempfile` modules.
+
+The `setUp()` method binds these modules to the corresponding fake
+modules from `pyfakefs`.  Further, the built in functions `file()` and
+`open()` are bound to fake functions.
+
+The `tearDownPyfakefs()` method returns the module bindings to their original
+state.
+
+It is expected that `setUp()` be invoked at the beginning of the derived
+class' `setUp()` method, and `tearDownPyfakefs()` be invoked at the end of the
+derived class' `tearDown()` method.
+
+During the test, everything uses the fake file system and modules.  This means
+that even in your test, you can use familiar functions like `open()` and
+`os.makedirs()` to manipulate the fake file system.
+
+This also means existing unit tests that use the real file system can be
+retrofitted to use `pyfakefs` by simply changing their base class from
+`:py:class`unittest.TestCase` to
+`:py:class`pyfakefs.fake_filesystem_unittest.TestCase`.
+"""
+
+import sys
+if sys.version_info < (2, 7):
+    import unittest2 as unittest
+else:
+    import unittest
+import doctest
+import inspect
+from pyfakefs import fake_filesystem
+from pyfakefs import fake_filesystem_glob
+from pyfakefs import fake_filesystem_shutil
+from pyfakefs import fake_tempfile
+if sys.version_info < (3,):
+    import __builtin__ as builtins
+else:
+    import builtins
+
+import mox3.stubout
+
+def load_doctests(loader, tests, ignore, module):
+    '''Load the doctest tests for the specified module into unittest.'''
+    _patcher = Patcher()
+    globs = _patcher.replaceGlobs(vars(module))
+    tests.addTests(doctest.DocTestSuite(module,
+                                        globs=globs,
+                                        setUp=_patcher.setUp,
+                                        tearDown=_patcher.tearDown))
+    return tests
+
+
+class TestCase(unittest.TestCase):
+    def __init__(self, methodName='runTest'):
+        super(TestCase, self).__init__(methodName)
+        self._stubber = Patcher()
+
+    @property
+    def fs(self):
+        return self._stubber.fs
+
+    @property
+    def patches(self):
+        return self._stubber.patches
+
+    def setUpPyfakefs(self):
+        '''Bind the file-related modules to the :py:class:`pyfakefs` fake file
+        system instead of the real file system.  Also bind the fake `file()` and
+        `open()` functions.
+
+        Invoke this at the beginning of the `setUp()` method in your unit test
+        class.
+        '''
+        self._stubber.setUp()
+        self.addCleanup(self._stubber.tearDown)
+
+
+    def tearDownPyfakefs(self):
+        ''':meth:`pyfakefs.fake_filesystem_unittest.setUpPyfakefs` registers the
+        tear down procedure using :py:meth:`unittest.TestCase.addCleanup`.  Thus this
+        method is deprecated, and remains just for backward compatibility.
+        '''
+        pass
+
+class Patcher(object):
+    '''
+    Instantiate a stub creator to bind and un-bind the file-related modules to
+    the :py:mod:`pyfakefs` fake modules.
+    '''
+    SKIPMODULES = set([None, fake_filesystem, fake_filesystem_glob,
+                      fake_filesystem_shutil, fake_tempfile, sys])
+    '''Stub nothing that is imported within these modules.
+    `sys` is included to prevent `sys.path` from being stubbed with the fake
+    `os.path`.
+    '''
+    assert None in SKIPMODULES, "sys.modules contains 'None' values; must skip them."
+
+    # To add py.test support per issue https://github.com/jmcgeheeiv/pyfakefs/issues/43,
+    # it appears that adding  'py', 'pytest', '_pytest' to SKIPNAMES will help
+    SKIPNAMES = set(['os', 'glob', 'path', 'shutil', 'tempfile'])
+
+    def __init__(self):
+        # Attributes set by _findModules()
+        self._osModules = None
+        self._globModules = None
+        self._pathModules = None
+        self._shutilModules = None
+        self._tempfileModules = None
+        self._findModules()
+        assert None not in vars(self).values(), \
+                "_findModules() missed the initialization of an instance variable"
+
+        # Attributes set by _refresh()
+        self._stubs = None
+        self.fs = None
+        self.fake_os = None
+        self.fake_glob = None
+        self.fake_path = None
+        self.fake_shutil = None
+        self.fake_tempfile_ = None
+        self.fake_open = None
+        # _isStale is set by tearDown(), reset by _refresh()
+        self._isStale = True
+        self._refresh()
+        assert None not in vars(self).values(), \
+                "_refresh() missed the initialization of an instance variable"
+        assert self._isStale == False, "_refresh() did not reset _isStale"
+
+    def _findModules(self):
+        '''Find and cache all modules that import file system modules.
+        Later, `setUp()` will stub these with the fake file system
+        modules.
+        '''
+        self._osModules = set()
+        self._globModules = set()
+        self._pathModules = set()
+        self._shutilModules = set()
+        self._tempfileModules = set()
+        for name, module in set(sys.modules.items()):
+            if (module in self.SKIPMODULES or
+                (not inspect.ismodule(module)) or
+                name.split('.')[0] in self.SKIPNAMES):
+                continue
+            if 'os' in module.__dict__:
+                self._osModules.add(module)
+            if 'glob' in module.__dict__:
+                self._globModules.add(module)
+            if 'path' in module.__dict__:
+                self._pathModules.add(module)
+            if 'shutil' in module.__dict__:
+                self._shutilModules.add(module)
+            if 'tempfile' in module.__dict__:
+                self._tempfileModules.add(module)
+
+    def _refresh(self):
+        '''Renew the fake file system and set the _isStale flag to `False`.'''
+        if self._stubs is not None:
+            self._stubs.SmartUnsetAll()
+        self._stubs = mox3.stubout.StubOutForTesting()
+
+        self.fs = fake_filesystem.FakeFilesystem()
+        self.fake_os = fake_filesystem.FakeOsModule(self.fs)
+        self.fake_glob = fake_filesystem_glob.FakeGlobModule(self.fs)
+        self.fake_path = self.fake_os.path
+        self.fake_shutil = fake_filesystem_shutil.FakeShutilModule(self.fs)
+        self.fake_tempfile_ = fake_tempfile.FakeTempfileModule(self.fs)
+        self.fake_open = fake_filesystem.FakeFileOpen(self.fs)
+
+        self._isStale = False
+
+    def setUp(self, doctester=None):
+        '''Bind the file-related modules to the :py:mod:`pyfakefs` fake
+        modules real ones.  Also bind the fake `file()` and `open()` functions.
+        '''
+        if self._isStale:
+            self._refresh()
+
+        if doctester is not None:
+            doctester.globs = self.replaceGlobs(doctester.globs)
+
+        if sys.version_info < (3,):
+            # file() was eliminated in Python3
+            self._stubs.SmartSet(builtins, 'file', self.fake_open)
+        self._stubs.SmartSet(builtins, 'open', self.fake_open)
+
+        for module in self._osModules:
+            self._stubs.SmartSet(module,  'os', self.fake_os)
+        for module in self._globModules:
+            self._stubs.SmartSet(module,  'glob', self.fake_glob)
+        for module in self._pathModules:
+            self._stubs.SmartSet(module,  'path', self.fake_path)
+        for module in self._shutilModules:
+            self._stubs.SmartSet(module,  'shutil', self.fake_shutil)
+        for module in self._tempfileModules:
+            self._stubs.SmartSet(module,  'tempfile', self.fake_tempfile_)
+
+    def replaceGlobs(self, globs_):
+        globs = globs_.copy()
+        if self._isStale:
+            self._refresh()
+        if 'os' in globs:
+            globs['os'] = fake_filesystem.FakeOsModule(self.fs)
+        if 'glob' in globs:
+            globs['glob'] = fake_filesystem_glob.FakeGlobModule(self.fs)
+        if 'path' in globs:
+            fake_os = globs['os'] if 'os' in globs \
+                else fake_filesystem.FakeOsModule(self.fs)
+            globs['path'] = fake_os.path
+        if 'shutil' in globs:
+            globs['shutil'] = fake_filesystem_shutil.FakeShutilModule(self.fs)
+        if 'tempfile' in globs:
+            globs['tempfile'] = fake_tempfile.FakeTempfileModule(self.fs)
+        return globs
+
+    def tearDown(self, doctester=None):
+        '''Clear the fake filesystem bindings created by `setUp()`.'''
+        self._isStale = True
+        self._stubs.SmartUnsetAll()
diff --git a/catapult/third_party/pyfakefs/pyfakefs/fake_tempfile.py b/catapult/third_party/pyfakefs/pyfakefs/fake_tempfile.py
new file mode 100644
index 0000000..283f976
--- /dev/null
+++ b/catapult/third_party/pyfakefs/pyfakefs/fake_tempfile.py
@@ -0,0 +1,369 @@
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Fake tempfile module.
+
+Fake implementation of the python2.4.1 tempfile built-in module that works with
+a FakeFilesystem object.
+"""
+#pylint: disable-all
+
+import errno
+import logging
+import os
+import stat
+import tempfile
+import warnings
+
+from pyfakefs import fake_filesystem
+
+try:
+  import StringIO as io  # pylint: disable-msg=C6204
+except ImportError:
+  import io  # pylint: disable-msg=C6204
+
+
+class FakeTempfileModule(object):
+  """Uses a FakeFilesystem to provide a mock for the tempfile 2.4.1 module.
+
+  Common usage:
+  filesystem = fake_filesystem.FakeFilesystem()
+  my_tempfile_module = mock_tempfile.FakeTempfileModule(filesystem)
+
+  See also: default keyword arguments for Dependency Injection on
+  http://go/tott-episode-12
+  """
+
+  def __init__(self, filesystem):
+    self._filesystem = filesystem
+    self._tempfile = tempfile
+    self.tempdir = None  # initialized by mktemp(), others
+    self._temp_prefix = 'tmp'
+    self._mktemp_retvals = []
+
+  # pylint: disable-msg=W0622
+  def _TempFilename(self, suffix='', prefix=None, dir=None):
+    """Create a temporary filename that does not exist.
+
+    This is a re-implementation of how tempfile creates random filenames,
+    and is probably different.
+
+    Does not modify self._filesystem, that's your job.
+
+    Output: self.tempdir is initialized if unset
+    Args:
+      suffix: filename suffix
+      prefix: filename prefix
+      dir: dir to put filename in
+    Returns:
+      string, temp filename that does not exist
+    """
+    if dir is None:
+      dir = self._filesystem.JoinPaths(self._filesystem.root.name, 'tmp')
+    filename = None
+    if prefix is None:
+      prefix = self._temp_prefix
+    while not filename or self._filesystem.Exists(filename):
+      # pylint: disable-msg=W0212
+      filename = self._filesystem.JoinPaths(dir, '%s%s%s' % (
+          prefix,
+          next(self._tempfile._RandomNameSequence()),
+          suffix))
+    return filename
+
+  # pylint: disable-msg=W0622,W0613
+  def TemporaryFile(self, mode='w+b', bufsize=-1,
+                    suffix='', prefix=None, dir=None):
+    """Return a file-like object deleted on close().
+
+    Python 2.4.1 tempfile.TemporaryFile.__doc__ =
+    >Return a file (or file-like) object that can be used as a temporary
+    >storage area. The file is created using mkstemp. It will be destroyed as
+    >soon as it is closed (including an implicit close when the object is
+    >garbage collected). Under Unix, the directory entry for the file is
+    >removed immediately after the file is created. Other platforms do not
+    >support this; your code should not rely on a temporary file created using
+    >this function having or not having a visible name in the file system.
+    >
+    >The mode parameter defaults to 'w+b' so that the file created can be read
+    >and written without being closed. Binary mode is used so that it behaves
+    >consistently on all platforms without regard for the data that is stored.
+    >bufsize defaults to -1, meaning that the operating system default is used.
+    >
+    >The dir, prefix and suffix parameters are passed to mkstemp()
+
+    Args:
+      mode: optional string, see above
+      bufsize: optional int, see above
+      suffix: optional string, see above
+      prefix: optional string, see above
+      dir: optional string, see above
+    Returns:
+      a file-like object.
+    """
+    # pylint: disable-msg=C6002
+    # TODO: prefix, suffix, bufsize, dir, mode unused?
+    # cannot be cStringIO due to .name requirement below
+    retval = io.StringIO()
+    retval.name = '<fdopen>'  # as seen on 2.4.3
+    return retval
+
+  # pylint: disable-msg=W0622,W0613
+  def NamedTemporaryFile(self, mode='w+b', bufsize=-1,
+                         suffix='', prefix=None, dir=None, delete=True):
+    """Return a file-like object with name that is deleted on close().
+
+    Python 2.4.1 tempfile.NamedTemporaryFile.__doc__ =
+    >This function operates exactly as TemporaryFile() does, except that
+    >the file is guaranteed to have a visible name in the file system. That
+    >name can be retrieved from the name member of the file object.
+
+    Args:
+      mode: optional string, see above
+      bufsize: optional int, see above
+      suffix: optional string, see above
+      prefix: optional string, see above
+      dir: optional string, see above
+      delete: optional bool, see above
+    Returns:
+      a file-like object including obj.name
+    """
+    # pylint: disable-msg=C6002
+    # TODO: bufsiz unused?
+    temp = self.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
+    filename = temp[1]
+    mock_open = fake_filesystem.FakeFileOpen(
+        self._filesystem, delete_on_close=delete)
+    obj = mock_open(filename, mode)
+    obj.name = filename
+    return obj
+
+  # pylint: disable-msg=C6409
+  def mkstemp(self, suffix='', prefix=None, dir=None, text=False):
+    """Create temp file, returning a 2-tuple: (9999, filename).
+
+    Important: Returns 9999 instead of a real file descriptor!
+
+    Python 2.4.1 tempfile.mkstemp.__doc__ =
+    >mkstemp([suffix, [prefix, [dir, [text]]]])
+    >
+    >User-callable function to create and return a unique temporary file.
+    >The return value is a pair (fd, name) where fd is the file descriptor
+    >returned by os.open, and name is the filename.
+    >
+    >...[snip args]...
+    >
+    >The file is readable and writable only by the creating user ID.
+    >If the operating system uses permission bits to indicate whether
+    >a file is executable, the file is executable by no one. The file
+    >descriptor is not inherited by children of this process.
+    >
+    >Caller is responsible for deleting the file when done with it.
+
+    NOTE: if dir is unspecified, this call creates a directory.
+
+    Output: self.tempdir is initialized if unset
+    Args:
+      suffix: optional string, filename suffix
+      prefix: optional string, filename prefix
+      dir: optional string, directory for temp file; must exist before call
+      text: optional boolean, True = open file in text mode.
+          default False = open file in binary mode.
+    Returns:
+      2-tuple containing
+      [0] = int, file descriptor number for the file object
+      [1] = string, absolute pathname of a file
+    Raises:
+      OSError: when dir= is specified but does not exist
+    """
+    # pylint: disable-msg=C6002
+    # TODO: optional boolean text is unused?
+    # default dir affected by "global"
+    filename = self._TempEntryname(suffix, prefix, dir)
+    fh = self._filesystem.CreateFile(filename, st_mode=stat.S_IFREG|0o600)
+    fd = self._filesystem.AddOpenFile(fh)
+
+    self._mktemp_retvals.append(filename)
+    return (fd, filename)
+
+  # pylint: disable-msg=C6409
+  def mkdtemp(self, suffix='', prefix=None, dir=None):
+    """Create temp directory, returns string, absolute pathname.
+
+    Python 2.4.1 tempfile.mkdtemp.__doc__ =
+    >mkdtemp([suffix[, prefix[, dir]]])
+    >Creates a temporary directory in the most secure manner
+    >possible. [...]
+    >
+    >The user of mkdtemp() is responsible for deleting the temporary
+    >directory and its contents when done with it.
+    > [...]
+    >mkdtemp() returns the absolute pathname of the new directory. [...]
+
+    Args:
+      suffix: optional string, filename suffix
+      prefix: optional string, filename prefix
+      dir: optional string, directory for temp dir. Must exist before call
+    Returns:
+      string, directory name
+    """
+    dirname = self._TempEntryname(suffix, prefix, dir)
+    self._filesystem.CreateDirectory(dirname, perm_bits=0o700)
+
+    self._mktemp_retvals.append(dirname)
+    return dirname
+
+  def _TempEntryname(self, suffix, prefix, dir):
+    """Helper function for mk[ds]temp.
+
+    Args:
+      suffix: string, filename suffix
+      prefix: string, filename prefix
+      dir: string, directory for temp dir. Must exist before call
+    Returns:
+      string, entry name
+    """
+    # default dir affected by "global"
+    if dir is None:
+      call_mkdir = True
+      dir = self.gettempdir()
+    else:
+      call_mkdir = False
+
+    entryname = None
+    while not entryname or self._filesystem.Exists(entryname):
+      entryname = self._TempFilename(suffix=suffix, prefix=prefix, dir=dir)
+    if not call_mkdir:
+      # This is simplistic. A bad input of suffix=/f will cause tempfile
+      # to blow up, but this mock won't.  But that's already a broken
+      # corner case
+      parent_dir = os.path.dirname(entryname)
+      try:
+        self._filesystem.GetObject(parent_dir)
+      except IOError as err:
+        assert 'No such file or directory' in str(err)
+        # python -c 'import tempfile; tempfile.mkstemp(dir="/no/such/dr")'
+        # OSError: [Errno 2] No such file or directory: '/no/such/dr/tmpFBuqjO'
+        raise OSError(
+            errno.ENOENT,
+            'No such directory in mock filesystem',
+            parent_dir)
+    return entryname
+
+  # pylint: disable-msg=C6409
+  def gettempdir(self):
+    """Get default temp dir.  Sets default if unset."""
+    if self.tempdir:
+      return self.tempdir
+    # pylint: disable-msg=C6002
+    # TODO: environment variables TMPDIR TEMP TMP, or other dirs?
+    self.tempdir = '/tmp'
+    return self.tempdir
+
+  # pylint: disable-msg=C6409
+  def gettempprefix(self):
+    """Get temp filename prefix.
+
+    NOTE: This has no effect on py2.4
+
+    Returns:
+      string, prefix to use in temporary filenames
+    """
+    return self._temp_prefix
+
+  # pylint: disable-msg=C6409
+  def mktemp(self, suffix=''):
+    """mktemp is deprecated in 2.4.1, and is thus unimplemented."""
+    raise NotImplementedError
+
+  def _SetTemplate(self, template):
+    """Setter for 'template' property."""
+    self._temp_prefix = template
+    logging.error('tempfile.template= is a NOP in python2.4')
+
+  def __SetTemplate(self, template):
+    """Indirect setter for 'template' property."""
+    self._SetTemplate(template)
+
+  def __DeprecatedTemplate(self):
+    """template property implementation."""
+    raise NotImplementedError
+
+  # reading from template is deprecated, setting is ok.
+  template = property(__DeprecatedTemplate, __SetTemplate,
+                      doc="""Set the prefix for temp filenames""")
+
+  def FakeReturnedMktempValues(self):
+    """For validation purposes, mktemp()'s return values are stored."""
+    return self._mktemp_retvals
+
+  def FakeMktempReset(self):
+    """Clear the stored mktemp() values."""
+    self._mktemp_retvals = []
+
+  def TemporaryDirectory(self, suffix='', prefix='tmp', dir=None):
+    """Return a file-like object deleted on close().
+  
+    Python 3.4 tempfile.TemporaryDirectory.__doc__ =
+    >Create and return a temporary directory.  This has the same
+    >behavior as mkdtemp but can be used as a context manager.  For
+    >example:
+    >
+    >    with TemporaryDirectory() as tmpdir:
+    >        ...
+    >
+    >Upon exiting the context, the directory and everything contained
+    >in it are removed.
+  
+    Args:
+      suffix: optional string, see above
+      prefix: optional string, see above
+      dir: optional string, see above
+    Returns:
+      a context manager
+    """
+    
+    class FakeTemporaryDirectory(object):
+      def __init__(self, filesystem, tempfile, suffix=None, prefix=None, dir=None):
+        self.closed = False
+        self.filesystem = filesystem
+        self.name = tempfile.mkdtemp(suffix, prefix, dir)
+        
+      def cleanup(self, _warn=False):
+        self.filesystem.RemoveObject(name)
+        warnings.warn(warn_message, ResourceWarning)
+    
+      def __repr__(self):
+        return "<{} {!r}>".format(self.__class__.__name__, self.name)
+    
+      def __enter__(self):
+        return self.name
+    
+      def __exit__(self, exc, value, tb):
+        self.cleanup()
+    
+      def cleanup(self, warn=False):
+        if self.name and not self.closed:
+          self.filesystem.RemoveObject(self.name)
+          self.closed = True
+          if warn:
+            warnings.warn("Implicitly cleaning up {!r}".format(self),
+                         ResourceWarning)
+          
+      def __del__(self):
+        # Issue a ResourceWarning if implicit cleanup needed
+        self.cleanup(warn=True)
+
+
+    return FakeTemporaryDirectory(self._filesystem, self, suffix, prefix, dir)
diff --git a/catapult/third_party/six/CHANGES b/catapult/third_party/six/CHANGES
index 4b9425f..6e9df6d 100644
--- a/catapult/third_party/six/CHANGES
+++ b/catapult/third_party/six/CHANGES
@@ -3,6 +3,19 @@
 
 This file lists the changes in each six version.
 
+1.10.0
+------
+
+- Issue #122: Improve the performance of `six.int2byte` on Python 3.
+
+- Pull request #55 and issue #99: Don't add the `winreg` module to `six.moves`
+  on non-Windows platforms.
+
+- Pull request #60 and issue #108: Add `six.moves.getcwd` and
+  `six.moves.getcwdu`.
+
+- Pull request #64: Add `create_unbound_method` to create unbound methods.
+
 1.9.0
 -----
 
@@ -26,6 +39,9 @@
 - Pull request #51: Add `six.view(keys|values|itmes)`, which provide dictionary
   views on Python 2.7+.
 
+- Issue #112: `six.moves.reload_module` now uses the importlib module on
+  Python 3.4+.
+
 1.8.0
 -----
 
diff --git a/catapult/third_party/six/PKG-INFO b/catapult/third_party/six/PKG-INFO
deleted file mode 100644
index 6fddf85..0000000
--- a/catapult/third_party/six/PKG-INFO
+++ /dev/null
@@ -1,32 +0,0 @@
-Metadata-Version: 1.1
-Name: six
-Version: 1.9.0
-Summary: Python 2 and 3 compatibility utilities
-Home-page: http://pypi.python.org/pypi/six/
-Author: Benjamin Peterson
-Author-email: benjamin@python.org
-License: MIT
-Description: Six is a Python 2 and 3 compatibility library.  It provides utility functions
-        for smoothing over the differences between the Python versions with the goal of
-        writing Python code that is compatible on both Python versions.  See the
-        documentation for more information on what is provided.
-        
-        Six supports every Python version since 2.5.  It is contained in only one Python
-        file, so it can be easily copied into your project. (The copyright and license
-        notice must be retained.)
-        
-        Online documentation is at http://pythonhosted.org/six/.
-        
-        Bugs can be reported to https://bitbucket.org/gutworth/six.  The code can also
-        be found there.
-        
-        For questions about six or porting in general, email the python-porting mailing
-        list: http://mail.python.org/mailman/listinfo/python-porting
-        
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 3
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
diff --git a/catapult/third_party/six/README b/catapult/third_party/six/README
index 32bab7c..ee628a9 100644
--- a/catapult/third_party/six/README
+++ b/catapult/third_party/six/README
@@ -3,14 +3,14 @@
 writing Python code that is compatible on both Python versions.  See the
 documentation for more information on what is provided.
 
-Six supports every Python version since 2.5.  It is contained in only one Python
+Six supports every Python version since 2.6.  It is contained in only one Python
 file, so it can be easily copied into your project. (The copyright and license
 notice must be retained.)
 
-Online documentation is at http://pythonhosted.org/six/.
+Online documentation is at https://pythonhosted.org/six/.
 
 Bugs can be reported to https://bitbucket.org/gutworth/six.  The code can also
 be found there.
 
 For questions about six or porting in general, email the python-porting mailing
-list: http://mail.python.org/mailman/listinfo/python-porting
+list: https://mail.python.org/mailman/listinfo/python-porting
diff --git a/catapult/third_party/six/README.chromium b/catapult/third_party/six/README.chromium
index e26d279..4606beb 100644
--- a/catapult/third_party/six/README.chromium
+++ b/catapult/third_party/six/README.chromium
@@ -1,6 +1,7 @@
 Name: six
-URL: https://pypi.python.org/pypi/six
-Version: 1.9.0
+URL: https://bitbucket.org/gutworth/six/
+Version: 1991f8b5b654f077e773f05695a08e0506b7367f
+Date: 2016-01-13
 License: MIT
 License File: LICENSE
 Security Critical: no
diff --git a/catapult/third_party/six/documentation/index.rst b/catapult/third_party/six/documentation/index.rst
index 1f27400..1391bac 100644
--- a/catapult/third_party/six/documentation/index.rst
+++ b/catapult/third_party/six/documentation/index.rst
@@ -232,6 +232,13 @@
    requires the *obj*'s class to be passed.
 
 
+.. function:: create_unbound_method(func, cls)
+
+   Return an unbound method object wrapping *func*.  In Python 2, this will
+   return a :func:`py2:types.MethodType` object.  In Python 3, unbound methods
+   do not exist and this wrapper will simply return *func*.
+
+
 .. class:: Iterator
 
    A class for making portable iterators. The intention is that it be subclassed
@@ -383,7 +390,7 @@
    .. note::
 
       In Python 3.3, the ``u`` prefix has been reintroduced. Code that only
-      supports Python 3 versions greater than 3.3 thus does not need
+      supports Python 3 versions of 3.3 and higher thus does not need
       :func:`u`.
 
    .. note::
@@ -570,6 +577,10 @@
 +------------------------------+-------------------------------------+-------------------------------------+
 | ``filterfalse``              | :func:`py2:itertools.ifilterfalse`  | :func:`py3:itertools.filterfalse`   |
 +------------------------------+-------------------------------------+-------------------------------------+
+| ``getcwd``                   | :func:`py2:os.getcwdu`              | :func:`py3:os.getcwd`               |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``getcwdb``                  | :func:`py2:os.getcwd`               | :func:`py3:os.getcwdb`              |
++------------------------------+-------------------------------------+-------------------------------------+
 | ``http_cookiejar``           | :mod:`py2:cookielib`                | :mod:`py3:http.cookiejar`           |
 +------------------------------+-------------------------------------+-------------------------------------+
 | ``http_cookies``             | :mod:`py2:Cookie`                   | :mod:`py3:http.cookies`             |
@@ -598,7 +609,9 @@
 +------------------------------+-------------------------------------+-------------------------------------+
 | ``reduce``                   | :func:`py2:reduce`                  | :func:`py3:functools.reduce`        |
 +------------------------------+-------------------------------------+-------------------------------------+
-| ``reload_module``            | :func:`py2:reload`                  | :func:`py3:imp.reload`              |
+| ``reload_module``            | :func:`py2:reload`                  | :func:`py3:imp.reload`,             |
+|                              |                                     | :func:`py3:importlib.reload`        |
+|                              |                                     | on Python 3.4+                      |
 +------------------------------+-------------------------------------+-------------------------------------+
 | ``reprlib``                  | :mod:`py2:repr`                     | :mod:`py3:reprlib`                  |
 +------------------------------+-------------------------------------+-------------------------------------+
diff --git a/catapult/third_party/six/setup.cfg b/catapult/third_party/six/setup.cfg
index fbb3ca0..4a5b847 100644
--- a/catapult/third_party/six/setup.cfg
+++ b/catapult/third_party/six/setup.cfg
@@ -1,8 +1,18 @@
 [wheel]
 universal = 1
 
-[egg_info]
-tag_build = 
-tag_svn_revision = 0
-tag_date = 0
+[flake8]
+max-line-length = 100
+ignore = F821
+
+[pytest]
+minversion=2.2.0
+pep8ignore =
+    documentation/*.py ALL
+    test_six.py ALL
+
+flakes-ignore =
+    documentation/*.py ALL
+    test_six.py ALL
+    six.py UndefinedName
 
diff --git a/catapult/third_party/six/setup.py b/catapult/third_party/six/setup.py
index b0cca52..31d4625 100644
--- a/catapult/third_party/six/setup.py
+++ b/catapult/third_party/six/setup.py
@@ -1,3 +1,23 @@
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
 from __future__ import with_statement
 
 try:
diff --git a/catapult/third_party/six/six.egg-info/PKG-INFO b/catapult/third_party/six/six.egg-info/PKG-INFO
deleted file mode 100644
index 6fddf85..0000000
--- a/catapult/third_party/six/six.egg-info/PKG-INFO
+++ /dev/null
@@ -1,32 +0,0 @@
-Metadata-Version: 1.1
-Name: six
-Version: 1.9.0
-Summary: Python 2 and 3 compatibility utilities
-Home-page: http://pypi.python.org/pypi/six/
-Author: Benjamin Peterson
-Author-email: benjamin@python.org
-License: MIT
-Description: Six is a Python 2 and 3 compatibility library.  It provides utility functions
-        for smoothing over the differences between the Python versions with the goal of
-        writing Python code that is compatible on both Python versions.  See the
-        documentation for more information on what is provided.
-        
-        Six supports every Python version since 2.5.  It is contained in only one Python
-        file, so it can be easily copied into your project. (The copyright and license
-        notice must be retained.)
-        
-        Online documentation is at http://pythonhosted.org/six/.
-        
-        Bugs can be reported to https://bitbucket.org/gutworth/six.  The code can also
-        be found there.
-        
-        For questions about six or porting in general, email the python-porting mailing
-        list: http://mail.python.org/mailman/listinfo/python-porting
-        
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 3
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
diff --git a/catapult/third_party/six/six.egg-info/SOURCES.txt b/catapult/third_party/six/six.egg-info/SOURCES.txt
deleted file mode 100644
index 9aa7c20..0000000
--- a/catapult/third_party/six/six.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-CHANGES
-LICENSE
-MANIFEST.in
-README
-setup.cfg
-setup.py
-six.py
-test_six.py
-documentation/Makefile
-documentation/conf.py
-documentation/index.rst
-six.egg-info/PKG-INFO
-six.egg-info/SOURCES.txt
-six.egg-info/dependency_links.txt
-six.egg-info/top_level.txt
\ No newline at end of file
diff --git a/catapult/third_party/six/six.egg-info/top_level.txt b/catapult/third_party/six/six.egg-info/top_level.txt
deleted file mode 100644
index ffe2fce..0000000
--- a/catapult/third_party/six/six.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-six
diff --git a/catapult/third_party/six/six.py b/catapult/third_party/six/six.py
index ffa3fe1..56e4272 100644
--- a/catapult/third_party/six/six.py
+++ b/catapult/third_party/six/six.py
@@ -1,5 +1,3 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
 # Copyright (c) 2010-2015 Benjamin Peterson
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -20,6 +18,8 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+"""Utilities for writing code that runs on Python 2 and 3"""
+
 from __future__ import absolute_import
 
 import functools
@@ -29,12 +29,13 @@
 import types
 
 __author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.9.0"
+__version__ = "1.10.0"
 
 
 # Useful for very coarse version differentiation.
 PY2 = sys.version_info[0] == 2
 PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
 
 if PY3:
     string_types = str,
@@ -57,6 +58,7 @@
     else:
         # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
         class X(object):
+
             def __len__(self):
                 return 1 << 31
         try:
@@ -88,7 +90,7 @@
 
     def __get__(self, obj, tp):
         result = self._resolve()
-        setattr(obj, self.name, result) # Invokes __set__.
+        setattr(obj, self.name, result)  # Invokes __set__.
         try:
             # This is a bit ugly, but it avoids running this again by
             # removing this descriptor.
@@ -160,12 +162,14 @@
 
 
 class _SixMetaPathImporter(object):
+
     """
     A meta path importer to import six.moves and its submodules.
 
     This class implements a PEP302 finder and loader. It should be compatible
     with Python 2.5 and all existing versions of Python3
     """
+
     def __init__(self, six_module_name):
         self.name = six_module_name
         self.known_modules = {}
@@ -223,6 +227,7 @@
 
 
 class _MovedItems(_LazyModule):
+
     """Lazy loading of moved objects"""
     __path__ = []  # mark as package
 
@@ -234,8 +239,10 @@
     MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
     MovedAttribute("intern", "__builtin__", "sys"),
     MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
     MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
-    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
     MovedAttribute("reduce", "__builtin__", "functools"),
     MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
     MovedAttribute("StringIO", "StringIO", "io"),
@@ -245,7 +252,6 @@
     MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
     MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
-
     MovedModule("builtins", "__builtin__"),
     MovedModule("configparser", "ConfigParser"),
     MovedModule("copyreg", "copy_reg"),
@@ -292,8 +298,13 @@
     MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
     MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
     MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
-    MovedModule("winreg", "_winreg"),
 ]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
 for attr in _moved_attributes:
     setattr(_MovedItems, attr.name, attr)
     if isinstance(attr, MovedModule):
@@ -307,6 +318,7 @@
 
 
 class Module_six_moves_urllib_parse(_LazyModule):
+
     """Lazy loading of moved objects in six.moves.urllib_parse"""
 
 
@@ -346,6 +358,7 @@
 
 
 class Module_six_moves_urllib_error(_LazyModule):
+
     """Lazy loading of moved objects in six.moves.urllib_error"""
 
 
@@ -365,6 +378,7 @@
 
 
 class Module_six_moves_urllib_request(_LazyModule):
+
     """Lazy loading of moved objects in six.moves.urllib_request"""
 
 
@@ -414,6 +428,7 @@
 
 
 class Module_six_moves_urllib_response(_LazyModule):
+
     """Lazy loading of moved objects in six.moves.urllib_response"""
 
 
@@ -434,6 +449,7 @@
 
 
 class Module_six_moves_urllib_robotparser(_LazyModule):
+
     """Lazy loading of moved objects in six.moves.urllib_robotparser"""
 
 
@@ -451,6 +467,7 @@
 
 
 class Module_six_moves_urllib(types.ModuleType):
+
     """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
     __path__ = []  # mark as package
     parse = _importer._get_module("moves.urllib_parse")
@@ -521,6 +538,9 @@
 
     create_bound_method = types.MethodType
 
+    def create_unbound_method(func, cls):
+        return func
+
     Iterator = object
 else:
     def get_unbound_function(unbound):
@@ -529,6 +549,9 @@
     def create_bound_method(func, obj):
         return types.MethodType(func, obj, obj.__class__)
 
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
     class Iterator(object):
 
         def next(self):
@@ -567,16 +590,16 @@
     viewitems = operator.methodcaller("items")
 else:
     def iterkeys(d, **kw):
-        return iter(d.iterkeys(**kw))
+        return d.iterkeys(**kw)
 
     def itervalues(d, **kw):
-        return iter(d.itervalues(**kw))
+        return d.itervalues(**kw)
 
     def iteritems(d, **kw):
-        return iter(d.iteritems(**kw))
+        return d.iteritems(**kw)
 
     def iterlists(d, **kw):
-        return iter(d.iterlists(**kw))
+        return d.iterlists(**kw)
 
     viewkeys = operator.methodcaller("viewkeys")
 
@@ -595,15 +618,13 @@
 if PY3:
     def b(s):
         return s.encode("latin-1")
+
     def u(s):
         return s
     unichr = chr
-    if sys.version_info[1] <= 1:
-        def int2byte(i):
-            return bytes((i,))
-    else:
-        # This is about 2x faster than the implementation above on 3.2+
-        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
     byte2int = operator.itemgetter(0)
     indexbytes = operator.getitem
     iterbytes = iter
@@ -611,18 +632,25 @@
     StringIO = io.StringIO
     BytesIO = io.BytesIO
     _assertCountEqual = "assertCountEqual"
-    _assertRaisesRegex = "assertRaisesRegex"
-    _assertRegex = "assertRegex"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
 else:
     def b(s):
         return s
     # Workaround for standalone backslash
+
     def u(s):
         return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
     unichr = unichr
     int2byte = chr
+
     def byte2int(bs):
         return ord(bs[0])
+
     def indexbytes(buf, i):
         return ord(buf[i])
     iterbytes = functools.partial(itertools.imap, ord)
@@ -650,7 +678,6 @@
 if PY3:
     exec_ = getattr(moves.builtins, "exec")
 
-
     def reraise(tp, value, tb=None):
         if value is None:
             value = tp()
@@ -671,7 +698,6 @@
             _locs_ = _globs_
         exec("""exec _code_ in _globs_, _locs_""")
 
-
     exec_("""def reraise(tp, value, tb=None):
     raise tp, value, tb
 """)
@@ -699,13 +725,14 @@
         fp = kwargs.pop("file", sys.stdout)
         if fp is None:
             return
+
         def write(data):
             if not isinstance(data, basestring):
                 data = str(data)
             # If the file has an encoding, encode unicode with it.
             if (isinstance(fp, file) and
-                isinstance(data, unicode) and
-                fp.encoding is not None):
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
                 errors = getattr(fp, "errors", None)
                 if errors is None:
                     errors = "strict"
@@ -748,6 +775,7 @@
         write(end)
 if sys.version_info[:2] < (3, 3):
     _print = print_
+
     def print_(*args, **kwargs):
         fp = kwargs.get("file", sys.stdout)
         flush = kwargs.pop("flush", False)
@@ -768,12 +796,14 @@
 else:
     wraps = functools.wraps
 
+
 def with_metaclass(meta, *bases):
     """Create a base class with a metaclass."""
     # This requires a bit of explanation: the basic idea is to make a dummy
     # metaclass for one level of class instantiation that replaces itself with
     # the actual metaclass.
     class metaclass(meta):
+
         def __new__(cls, name, this_bases, d):
             return meta(name, bases, d)
     return type.__new__(metaclass, 'temporary_class', (), {})
@@ -830,7 +860,7 @@
         # the six meta path importer, since the other six instance will have
         # inserted an importer with different class.
         if (type(importer).__name__ == "_SixMetaPathImporter" and
-            importer.name == __name__):
+                importer.name == __name__):
             del sys.meta_path[i]
             break
     del i, importer
diff --git a/catapult/third_party/six/test_six.py b/catapult/third_party/six/test_six.py
index 76a8ccb..b68e006 100644
--- a/catapult/third_party/six/test_six.py
+++ b/catapult/third_party/six/test_six.py
@@ -1,3 +1,23 @@
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
 import operator
 import sys
 import types
@@ -390,7 +410,7 @@
         monkeypatch.undo()
 
 
-@py.test.mark.skipif(sys.version_info[:2] < (2, 7),
+@py.test.mark.skipif("sys.version_info[:2] < (2, 7)",
                 reason="view methods on dictionaries only available on 2.7+")
 def test_dictionary_views():
     def stock_method_name(viewwhat):
@@ -456,6 +476,20 @@
     assert b() is x
 
 
+def test_create_unbound_method():
+    class X(object):
+        pass
+
+    def f(self):
+        return self
+    u = six.create_unbound_method(f, X)
+    py.test.raises(TypeError, u)
+    if six.PY2:
+        assert isinstance(u, types.MethodType)
+    x = X()
+    assert f(x) is x
+
+
 if six.PY3:
 
     def test_b():
@@ -497,7 +531,7 @@
 
 def test_int2byte():
     assert six.int2byte(3) == six.b("\x03")
-    py.test.raises((OverflowError, ValueError), six.int2byte, 256)
+    py.test.raises(Exception, six.int2byte, 256)
 
 
 def test_byte2int():
@@ -799,7 +833,7 @@
     assert type(MySlotsWeakref) is Meta
 
 
-@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
+@py.test.mark.skipif("sys.version_info[:2] < (2, 7) or sys.version_info[:2] in ((3, 0), (3, 1))")
 def test_assertCountEqual():
     class TestAssertCountEqual(unittest.TestCase):
         def test(self):
diff --git a/catapult/third_party/vinn/bin/vinn b/catapult/third_party/vinn/bin/vinn
index 5c2e6d9..89e48f7 100755
--- a/catapult/third_party/vinn/bin/vinn
+++ b/catapult/third_party/vinn/bin/vinn
@@ -7,8 +7,8 @@
 
 sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
 
-from vinn import d8_runner
+from vinn import _vinn
 
 
 if __name__ == '__main__':
-  sys.exit(d8_runner.main())
+  sys.exit(_vinn.main())
diff --git a/catapult/third_party/vinn/run_test b/catapult/third_party/vinn/run_test
index 5c2f8f9..12c9145 100755
--- a/catapult/third_party/vinn/run_test
+++ b/catapult/third_party/vinn/run_test
@@ -2,12 +2,43 @@
 # Copyright (c) 2015 The Chromium Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
+import argparse
 import os
+import sys
 import unittest
 
-if __name__ == '__main__':
+
+def _IterateTestCase(test):
+  if isinstance(test, unittest.TestCase):
+    yield test
+  else:
+    for t in test:
+      for u in _IterateTestCase(t):
+        yield u
+
+
+def main(args):
+  parser = argparse.ArgumentParser(description='Run all vinn tests')
+  parser.add_argument('test_name', type=str, nargs='?',
+                      help=('Specify a specific test to run. If this is empty, '
+                            'all tests are run. (the name can be a substring '
+                            ' of test names)'))
+  options = parser.parse_args(args)
+  def _IsTestMatched(test):
+    if not options.test_name:
+      return True
+    return options.test_name in test.id()
+
   suite = unittest.TestSuite()
   vinn_dir = os.path.join(os.path.dirname(__file__), 'vinn')
-  suite.addTest(unittest.TestLoader().discover(
-    start_dir=vinn_dir, pattern='*test.py'))
-  unittest.TextTestRunner(verbosity=2).run(suite)
+  discover_tests = unittest.TestLoader().discover(
+    start_dir=vinn_dir, pattern='*test.py')
+  for t in _IterateTestCase(discover_tests):
+    if _IsTestMatched(t):
+      suite.addTest(t)
+  results = unittest.TextTestRunner(verbosity=2).run(suite)
+  return len(results.failures)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/catapult/third_party/vinn/third_party/v8/README.chromium b/catapult/third_party/vinn/third_party/v8/README.chromium
index 2e028c9..a9eec70 100644
--- a/catapult/third_party/vinn/third_party/v8/README.chromium
+++ b/catapult/third_party/vinn/third_party/v8/README.chromium
@@ -1,11 +1,11 @@
-Name: Chrome d8 for executing javascript
-Short Name: d8
-URL: https://developers.google.com/v8/
-Version: 4.5.103.29 (linux), 4.5.103.29 (mac)
-License: BSD
-License File: LICENSE, LICENSE.v8, LICENSE.strongtalk, LICENSE.valgrind
-Security Critical: no
-Description: d8 binaries is used for executing trace-viewer's trace analyzing
-code.
-Local modifications: We ignore the source code & only keep the d8 binaries. Use
-tracing/bin/update_v8 to update the binary on your current OS.
+Name: Chrome d8 for executing javascript

+Short Name: d8

+URL: https://developers.google.com/v8/

+Version: 4.5.103.29 (linux), 4.5.103.29 (mac), 4.8.271.20 (win)

+License: BSD

+License File: LICENSE, LICENSE.v8, LICENSE.strongtalk, LICENSE.valgrind

+Security Critical: no

+Description: d8 binaries is used for executing trace-viewer's trace analyzing

+code.

+Local modifications: We ignore the source code & only keep the d8 binaries. Use

+tracing/bin/update_v8 to update the binary on your current OS.

diff --git a/catapult/third_party/vinn/third_party/v8/win/AMD64/d8.exe b/catapult/third_party/vinn/third_party/v8/win/AMD64/d8.exe
new file mode 100644
index 0000000..b1f88cd
--- /dev/null
+++ b/catapult/third_party/vinn/third_party/v8/win/AMD64/d8.exe
Binary files differ
diff --git a/catapult/third_party/vinn/third_party/v8/win/AMD64/icudt.dll b/catapult/third_party/vinn/third_party/v8/win/AMD64/icudt.dll
new file mode 100644
index 0000000..0e0fff1
--- /dev/null
+++ b/catapult/third_party/vinn/third_party/v8/win/AMD64/icudt.dll
Binary files differ
diff --git a/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcp120.dll b/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcp120.dll
new file mode 100644
index 0000000..a237d2d
--- /dev/null
+++ b/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcp120.dll
Binary files differ
diff --git a/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcr120.dll b/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcr120.dll
new file mode 100644
index 0000000..8c36149
--- /dev/null
+++ b/catapult/third_party/vinn/third_party/v8/win/AMD64/msvcr120.dll
Binary files differ
diff --git a/catapult/third_party/vinn/third_party/v8/win/AMD64/pgort120.dll b/catapult/third_party/vinn/third_party/v8/win/AMD64/pgort120.dll
new file mode 100644
index 0000000..0a86bb4
--- /dev/null
+++ b/catapult/third_party/vinn/third_party/v8/win/AMD64/pgort120.dll
Binary files differ
diff --git a/catapult/third_party/vinn/vinn/_vinn.py b/catapult/third_party/vinn/vinn/_vinn.py
index 95857a9..3f8bfc8 100644
--- a/catapult/third_party/vinn/vinn/_vinn.py
+++ b/catapult/third_party/vinn/vinn/_vinn.py
@@ -10,6 +10,7 @@
 import shutil
 import subprocess
 import sys
+import re
 import json
 import tempfile
 
@@ -26,9 +27,15 @@
 _BOOTSTRAP_JS_DIR = os.path.abspath(
     os.path.join(os.path.dirname(__file__), 'd8_bootstrap.js'))
 
+_BASE64_COMPAT_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), 'base64_compat.js'))
+
 _PATH_UTILS_JS_DIR = os.path.abspath(
     os.path.join(os.path.dirname(__file__), 'path_utils.js'))
 
+_HTML_IMPORTS_LOADER_JS_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), 'html_imports_loader.js'))
+
 _HTML_TO_JS_GENERATOR_JS_DIR = os.path.abspath(
     os.path.join(os.path.dirname(__file__), 'html_to_js_generator.js'))
 
@@ -45,7 +52,16 @@
     assert os.path.isabs(x)
 
 
+def _EscapeJsString(s):
+  assert isinstance(s, str)
+  return json.dumps(s)
+
+def _RenderTemplateStringForJsSource(source, template, replacement_string):
+  return source.replace(template, _EscapeJsString(replacement_string))
+
+
 def _GetBootStrapJsContent(source_paths):
+  assert isinstance(source_paths, list)
   global _BOOTSTRAP_JS_CONTENT
   if not _BOOTSTRAP_JS_CONTENT:
     with open(_BOOTSTRAP_JS_DIR, 'r') as f:
@@ -53,15 +69,24 @@
 
   bsc = _BOOTSTRAP_JS_CONTENT
 
+
   # Ensure that source paths are unique.
   source_paths = list(set(source_paths))
-  source_path_string = json.dumps(source_paths)
-  bsc = bsc.replace('<%source_paths%>', source_path_string)
-  bsc = bsc.replace('<%current_working_directory%>', os.getcwd())
-  bsc = bsc.replace('<%path_utils_js_path%>', _PATH_UTILS_JS_DIR)
-  bsc = bsc.replace('<%html_to_js_generator_js_path%>',
-                    _HTML_TO_JS_GENERATOR_JS_DIR)
-  bsc = bsc.replace('<%js_parser_path%>', _JS_PARSER_DIR)
+  source_paths_string = '[%s]' % (
+      ','.join(_EscapeJsString(s) for s in source_paths))
+  bsc = bsc.replace('<%source_paths%>', source_paths_string)
+  bsc = _RenderTemplateStringForJsSource(
+      bsc, '<%current_working_directory%>', os.getcwd())
+  bsc = _RenderTemplateStringForJsSource(
+      bsc, '<%path_utils_js_path%>', _PATH_UTILS_JS_DIR)
+  bsc = _RenderTemplateStringForJsSource(
+    bsc, '<%html_imports_loader_js_path%>', _HTML_IMPORTS_LOADER_JS_DIR)
+  bsc = _RenderTemplateStringForJsSource(
+      bsc, '<%html_to_js_generator_js_path%>', _HTML_TO_JS_GENERATOR_JS_DIR)
+  bsc = _RenderTemplateStringForJsSource(
+      bsc, '<%js_parser_path%>', _JS_PARSER_DIR)
+  bsc = _RenderTemplateStringForJsSource(
+      bsc, '<%base64_compat_path%>', _BASE64_COMPAT_DIR)
   bsc += '\n//@ sourceURL=%s\n' % _BOOTSTRAP_JS_DIR
   return bsc
 
@@ -80,9 +105,12 @@
     return os.path.join(_V8_DIR, 'linux', 'x86_64', 'd8')
   elif platform.system() == 'Darwin' and platform.machine() == 'x86_64':
     return os.path.join(_V8_DIR, 'mac', 'x86_64', 'd8')
+  elif platform.system() == 'Windows' and platform.machine() == 'AMD64':
+    return os.path.join(_V8_DIR, 'win', 'AMD64', 'd8.exe')
   else:
     raise NotImplementedError(
-        'd8 binary for this platform and architecture is not yet supported')
+        'd8 binary for this platform (%s) and architecture (%s) is not yet'
+        ' supported' % (platform.system(), platform.machine()))
 
 
 class RunResult(object):
@@ -129,7 +157,7 @@
   if source_paths is None:
     source_paths = [os.path.dirname(file_path)]
 
-  abs_file_path = os.path.abspath(file_path)
+  abs_file_path_str = _EscapeJsString(os.path.abspath(file_path))
 
   try:
     temp_dir = tempfile.mkdtemp()
@@ -137,9 +165,10 @@
     with open(temp_boostrap_file, 'w') as f:
       f.write(_GetBootStrapJsContent(source_paths))
       if extension == '.html':
-        f.write('\nloadHTMLFile("%s", "%s");' % (abs_file_path, abs_file_path))
+        f.write('\nHTMLImportsLoader.loadHTMLFile(%s, %s);' %
+                (abs_file_path_str, abs_file_path_str))
       else:
-        f.write('\nloadFile("%s");' % abs_file_path)
+        f.write('\nHTMLImportsLoader.loadFile(%s);' % abs_file_path_str)
     return _RunFileWithD8(temp_boostrap_file, js_args, v8_args, stdout, stdin)
   finally:
     shutil.rmtree(temp_dir)
@@ -196,10 +225,17 @@
     full_js_args += js_args
 
   args += ['--js_arguments'] + full_js_args
+
   # Set stderr=None since d8 doesn't write into stderr anyway.
   sp = subprocess.Popen(args, stdout=stdout, stderr=None, stdin=stdin)
   out, _ = sp.communicate()
 
+  # On Windows, d8's print() method add the carriage return characters \r to
+  # newline, which make the output different from d8 on posix. We remove the
+  # extra \r's  to make the output consistent with posix platforms.
+  if platform.system() == 'Windows' and out:
+    out = re.sub('\r+\n', '\n', out)
+
   # d8 uses returncode 1 to indicate an uncaught exception, but
   # _RunFileWithD8 needs to distingiush between that and quit(1).
   #
diff --git a/catapult/third_party/vinn/vinn/base64_compat.js b/catapult/third_party/vinn/vinn/base64_compat.js
new file mode 100644
index 0000000..c794c03
--- /dev/null
+++ b/catapult/third_party/vinn/vinn/base64_compat.js
@@ -0,0 +1,73 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+/* This is a Base64 Polyfill adapted from
+ * https://github.com/davidchambers/Base64.js/blob/0.3.0/,
+ * which has a "do whatever you want" license,
+ * https://github.com/davidchambers/Base64.js/blob/0.3.0/LICENSE.
+ */
+(function(global) {
+  var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +
+      '0123456789+/=';
+
+  function InvalidCharacterError(message) {
+    this.message = message;
+  }
+  InvalidCharacterError.prototype = new Error;
+  InvalidCharacterError.prototype.name = 'InvalidCharacterError';
+
+  // encoder
+  // [https://gist.github.com/999166] by [https://github.com/nignag]
+  global.btoa = function(input) {
+    var str = String(input);
+    for (
+        // Initialize result and counter.
+        var block, charCode, idx = 0, map = chars, output = '';
+        // If the next str index does not exist:
+        //   change the mapping table to "="
+        //   check if d has no fractional digits
+        str.charAt(idx | 0) || (map = '=', idx % 1);
+        // "8 - idx % 1 * 8" generates the sequence 2, 4, 6, 8.
+        output += map.charAt(63 & block >> 8 - idx % 1 * 8)) {
+      charCode = str.charCodeAt(idx += 3 / 4);
+      if (charCode > 0xFF) {
+        throw new InvalidCharacterError(
+            '\'btoa\' failed: The string to be encoded contains characters ' +
+            'outside of the Latin1 range.');
+      }
+      block = block << 8 | charCode;
+    }
+    return output;
+  };
+
+  // decoder
+  // [https://gist.github.com/1020396] by [https://github.com/atk]
+  global.atob = function(input) {
+    var str = String(input).replace(/=+$/, '');
+    if (str.length % 4 == 1) {
+      throw new InvalidCharacterError(
+          '\'atob\' failed: The string to be decoded is not ' +
+          'correctly encoded.');
+    }
+    for (
+        // Initialize result and counters.
+        var bc = 0, bs, buffer, idx = 0, output = '';
+        // Get next character.
+        buffer = str.charAt(idx++);
+        // Character found in table? initialize bit storage and add its
+        // ascii value;
+        ~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer,
+            // And if not first of each 4 characters,
+            // convert the first 8 bits to one ascii character.
+            bc++ % 4) ? output += String.fromCharCode(
+                  255 & bs >> (-2 * bc & 6)) : 0) {
+      // Try to find character in table (0-63, not found => -1).
+      buffer = chars.indexOf(buffer);
+    }
+    return output;
+  };
+
+})(this);
diff --git a/catapult/third_party/vinn/vinn/d8_bootstrap.js b/catapult/third_party/vinn/vinn/d8_bootstrap.js
index 1293f8b..15ca295 100644
--- a/catapult/third_party/vinn/vinn/d8_bootstrap.js
+++ b/catapult/third_party/vinn/vinn/d8_bootstrap.js
@@ -104,210 +104,43 @@
     }
   }
 
-  /* This is a Base64 Polyfill adapted from
-   * https://github.com/davidchambers/Base64.js/blob/0.3.0/,
-   * which has a "do whatever you want" license,
-   * https://github.com/davidchambers/Base64.js/blob/0.3.0/LICENSE.
-   */
-  (function() {
-    var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +
-        '0123456789+/=';
+  var path_to_base64_compat = <%base64_compat_path%>;
+  load(path_to_base64_compat);
 
-    function InvalidCharacterError(message) {
-      this.message = message;
-    }
-    InvalidCharacterError.prototype = new Error;
-    InvalidCharacterError.prototype.name = 'InvalidCharacterError';
-
-
-    // encoder
-    // [https://gist.github.com/999166] by [https://github.com/nignag]
-    global.btoa = function(input) {
-      var str = String(input);
-      for (
-          // Initialize result and counter.
-          var block, charCode, idx = 0, map = chars, output = '';
-          // If the next str index does not exist:
-          //   change the mapping table to "="
-          //   check if d has no fractional digits
-          str.charAt(idx | 0) || (map = '=', idx % 1);
-          // "8 - idx % 1 * 8" generates the sequence 2, 4, 6, 8.
-          output += map.charAt(63 & block >> 8 - idx % 1 * 8)) {
-        charCode = str.charCodeAt(idx += 3 / 4);
-        if (charCode > 0xFF) {
-          throw new InvalidCharacterError(
-              '\'btoa\' failed: The string to be encoded contains characters ' +
-              'outside of the Latin1 range.');
-        }
-        block = block << 8 | charCode;
-      }
-      return output;
-    };
-
-    // decoder
-    // [https://gist.github.com/1020396] by [https://github.com/atk]
-    global.atob = function(input) {
-      var str = String(input).replace(/=+$/, '');
-      if (str.length % 4 == 1) {
-        throw new InvalidCharacterError(
-            '\'atob\' failed: The string to be decoded is not ' +
-            'correctly encoded.');
-      }
-      for (
-          // Initialize result and counters.
-          var bc = 0, bs, buffer, idx = 0, output = '';
-          // Get next character.
-          buffer = str.charAt(idx++);
-          // Character found in table? initialize bit storage and add its
-          // ascii value;
-          ~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer,
-              // And if not first of each 4 characters,
-              // convert the first 8 bits to one ascii character.
-              bc++ % 4) ? output += String.fromCharCode(
-                    255 & bs >> (-2 * bc & 6)) : 0) {
-        // Try to find character in table (0-63, not found => -1).
-        buffer = chars.indexOf(buffer);
-      }
-      return output;
-    };
-
-  })();
+  // We deliberately call eval() on content of parse5.js instead of using load()
+  // because load() does not hoist the |global| variable in this method to
+  // parse5.js (which export its modules to |global|).
+  //
+  // This is because d8's load('xyz.js') does not hoist non global varibles in
+  // the caller's environment to xyz.js, no matter where load() is called.
+  global.path_to_js_parser = <%js_parser_path%>;
+  eval(read(global.path_to_js_parser));
 
   // Bring in html_to_js_generator.
-  global.path_to_js_parser = '<%js_parser_path%>';
-  load('<%html_to_js_generator_js_path%>');
+  global.path_to_js_parser = <%js_parser_path%>;
+  load(<%html_to_js_generator_js_path%>);
+
+  // Bring in html imports loader.
+  load(<%html_imports_loader_js_path%>);
+  global.HTMLImportsLoader.addArrayToSourcePath(<%source_paths%>);
 
   // Bring in path utils.
-  load('<%path_utils_js_path%>');
-
-  var d8_path_utils = new PathUtils(
+  load(<%path_utils_js_path%>);
+  var pathUtils = new PathUtils(
       {
-        currentWorkingDirectory: '<%current_working_directory%>',
+        currentWorkingDirectory: <%current_working_directory%>,
         exists: function(fileName) {
           try {
             // Try a dummy read to check whether file_path exists.
             // TODO(nednguyen): find a more efficient way to check whether
             // some file path exists in d8.
-            read(fileName);
+            readbuffer(fileName);
             return true;
           } catch (err) {
             return false;
           }
         }
       });
+  global.HTMLImportsLoader.setPathUtils(pathUtils);
 
-  /**
-   * Strips the starting '/' in file_path if |file_path| is meant to be a
-   * relative path.
-   *
-   * @param {string} file_path path to some file, can be relative or absolute
-   * path.
-   * @return {string} the file_path with starting '/' removed if |file_path|
-   * does not exist or the original |file_path| otherwise.
-   */
-  function _stripStartingSlashIfNeeded(file_path) {
-    if (file_path.substring(0, 1) !== '/') {
-      return file_path;
-    }
-    if (d8_path_utils.exists(file_path))
-      return file_path;
-    return file_path.substring(1);
-  }
-
-  var sourcePaths = JSON.parse('<%source_paths%>');
-
-  global.hrefToAbsolutePath = function(href) {
-    var pathPart;
-    if (!d8_path_utils.isAbs(href)) {
-      throw new Error('Found a non absolute import and thats not supported: ' +
-                      href);
-    } else {
-      pathPart = href.substring(1);
-    }
-
-    var candidates = [];
-    for (var i = 0; i < sourcePaths.length; i++) {
-      var candidate = d8_path_utils.join(sourcePaths[i], pathPart);
-      if (d8_path_utils.exists(candidate))
-        candidates.push(candidate);
-    }
-    if (candidates.length > 1) {
-      throw new Error('Multiple candidates found for ' + href + ': ' +
-          candidates + '\nSource paths:\n' + sourcePaths.join(',\n'));
-    }
-    if (candidates.length === 0) {
-      throw new Error(href + ' not found!' +
-          '\nSource paths:\n' + sourcePaths.join(',\n'));
-    }
-    return candidates[0];
-  }
-
-  var loadedModulesByFilePath = {};
-
-  /**
-   * Load a HTML file, which absolute path or path relative to <%search-path%>.
-   * Unlike the native load() method of d8, variables declared in |file_path|
-   * will not be hoisted to the caller environment. For example:
-   *
-   * a.html:
-   * <script>
-   *   var x = 1;
-   * </script>
-   *
-   * test.js:
-   * loadHTML("a.html");
-   * print(x);  // <- ReferenceError is thrown because x is not defined.
-   *
-   * @param {string} file_path path to the HTML file to be loaded.
-   */
-  global.loadHTML = function(href) {
-    var absPath = global.hrefToAbsolutePath(href);
-    global.loadHTMLFile(absPath, href);
-  };
-
-  global.loadScript = function(href) {
-    var absPath = global.hrefToAbsolutePath(href);
-    global.loadFile(absPath, href);
-  };
-
-  global.loadHTMLFile = function(absPath, opt_href) {
-    var href = opt_href || absPath;
-    if (loadedModulesByFilePath[absPath])
-      return;
-    loadedModulesByFilePath[absPath] = true;
-    try {
-      var html_content = read(absPath);
-    } catch (err) {
-      throw new Error('Error in loading html file ' + href +
-          ': File does not exist');
-    }
-
-    try {
-      var stripped_js = generateJsFromHTML(html_content);
-    } catch (err) {
-      throw new Error('Error in loading html file ' + href + ': ' + err);
-    }
-
-    // If there is blank line at the beginning of generated js, we add
-    // "//@ sourceURL=|file_path|" to the beginning of generated source so
-    // the stack trace show the source file even in case of syntax error.
-    // If not, we add it to the end of generated source to preserve the line
-    // number.
-    if (stripped_js.startsWith('\n')) {
-      stripped_js = '//@ sourceURL=' + href + stripped_js;
-    } else {
-      stripped_js = stripped_js + '\n//@ sourceURL=' + href;
-    }
-    eval(stripped_js);
-  };
-
-  global.loadFile = function(absPath, opt_href) {
-    var href = opt_href || absPath;
-    var relPath = d8_path_utils.relPath(absPath);
-    try {
-      load(relPath);
-    } catch (err) {
-      throw new Error('Error in loading script file ' + href + ': ' + err);
-    }
-  };
 })(this, arguments);
diff --git a/catapult/third_party/vinn/vinn/html_imports_loader.js b/catapult/third_party/vinn/vinn/html_imports_loader.js
new file mode 100644
index 0000000..1a42bec
--- /dev/null
+++ b/catapult/third_party/vinn/vinn/html_imports_loader.js
@@ -0,0 +1,168 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+/**
+ * @fileoverview Provides tools for loading an HTML import file into D8.
+ */
+(function(global) {
+  var isNode = global.process && global.process.versions.node;
+
+  var fs;
+  if (isNode)
+    fs = require('fs');
+
+  var pathUtils = undefined;
+  function setPathUtils(newPathUtils) {
+    if (pathUtils !== undefined)
+      throw new Error('Already set');
+    pathUtils = newPathUtils;
+  }
+
+  function readFileContents(fileName) {
+    if (!isNode)
+      return read(fileName);
+    return fs.readFileSync(fileName, 'utf8');
+  }
+
+  /**
+   * Strips the starting '/' in file_path if |file_path| is meant to be a
+   * relative path.
+   *
+   * @param {string} file_path path to some file, can be relative or absolute
+   * path.
+   * @return {string} the file_path with starting '/' removed if |file_path|
+   * does not exist or the original |file_path| otherwise.
+   */
+  function _stripStartingSlashIfNeeded(file_path) {
+    if (file_path.substring(0, 1) !== '/') {
+      return file_path;
+    }
+    if (pathUtils.exists(file_path))
+      return file_path;
+    return file_path.substring(1);
+  }
+
+  var sourcePaths = [];
+
+  function addArrayToSourcePath(paths) {
+    for (var i = 0; i < paths.length; i++) {
+      if (sourcePaths.indexOf(paths[i]) !== -1)
+        continue;
+      sourcePaths.push(paths[i]);
+    }
+  }
+
+  function hrefToAbsolutePath(href) {
+    var pathPart;
+    if (!pathUtils.isAbs(href)) {
+      throw new Error('Found a non absolute import and thats not supported: ' +
+                      href);
+    } else {
+      pathPart = href.substring(1);
+    }
+
+    var candidates = [];
+    for (var i = 0; i < sourcePaths.length; i++) {
+      var candidate = pathUtils.join(sourcePaths[i], pathPart);
+      if (pathUtils.exists(candidate))
+        candidates.push(candidate);
+    }
+    if (candidates.length > 1) {
+      throw new Error('Multiple candidates found for ' + href + ': ' +
+          candidates + '\nSource paths:\n' + sourcePaths.join(',\n'));
+    }
+    if (candidates.length === 0) {
+      throw new Error(href + ' not found!' +
+          '\nSource paths:\n' + sourcePaths.join(',\n'));
+    }
+    return candidates[0];
+  }
+
+  var loadedModulesByFilePath = {};
+
+  /**
+   * Load a HTML file, which absolute path or path relative to <%search-path%>.
+   * Unlike the native load() method of d8, variables declared in |file_path|
+   * will not be hoisted to the caller environment. For example:
+   *
+   * a.html:
+   * <script>
+   *   var x = 1;
+   * </script>
+   *
+   * test.js:
+   * loadHTML("a.html");
+   * print(x);  // <- ReferenceError is thrown because x is not defined.
+   *
+   * @param {string} file_path path to the HTML file to be loaded.
+   */
+  function loadHTML(href) {
+    var absPath = hrefToAbsolutePath(href);
+    loadHTMLFile.call(global, absPath, href);
+  };
+
+  function loadScript(href) {
+    var absPath = hrefToAbsolutePath(href);
+    loadFile.call(global, absPath, href);
+  };
+
+  function loadHTMLFile(absPath, opt_href) {
+    var href = opt_href || absPath;
+    if (loadedModulesByFilePath[absPath])
+      return;
+    loadedModulesByFilePath[absPath] = true;
+    try {
+      var html_content = readFileContents(absPath);
+    } catch (err) {
+      throw new Error('Error in loading html file ' + href +
+          ': File does not exist');
+    }
+
+    try {
+      var stripped_js = generateJsFromHTML(html_content);
+    } catch (err) {
+      throw new Error('Error in loading html file ' + href + ': ' + err);
+    }
+
+    // If there is blank line at the beginning of generated js, we add
+    // "//@ sourceURL=|file_path|" to the beginning of generated source so
+    // the stack trace show the source file even in case of syntax error.
+    // If not, we add it to the end of generated source to preserve the line
+    // number.
+    if (stripped_js.startsWith('\n')) {
+      stripped_js = '//@ sourceURL=' + href + stripped_js;
+    } else {
+      stripped_js = stripped_js + '\n//@ sourceURL=' + href;
+    }
+    eval(stripped_js);
+  };
+
+  function loadFile(absPath, opt_href) {
+    var href = opt_href || absPath;
+    try {
+      if (!isNode) {
+        load(absPath);
+      } else {
+        var relPath = pathUtils.relPath(absPath);
+        var contents = readFileContents(relPath);
+        eval(contents);
+      }
+    } catch (err) {
+      throw new Error('Error in loading script file ' + href + ': ' + err);
+    }
+  };
+
+  global.HTMLImportsLoader = {
+    setPathUtils: setPathUtils,
+    sourcePaths: sourcePaths,
+    addArrayToSourcePath: addArrayToSourcePath,
+    hrefToAbsolutePath: hrefToAbsolutePath,
+    loadHTML: loadHTML,
+    loadScript: loadScript,
+    loadHTMLFile: loadHTMLFile,
+    loadFile: loadFile
+  };
+})(this);
diff --git a/catapult/third_party/vinn/vinn/html_to_js_generator.js b/catapult/third_party/vinn/vinn/html_to_js_generator.js
index 889b94c..3e8d21a 100644
--- a/catapult/third_party/vinn/vinn/html_to_js_generator.js
+++ b/catapult/third_party/vinn/vinn/html_to_js_generator.js
@@ -6,18 +6,13 @@
 /**
  * @fileoverview Provides tools for parsing HTML to generate javascript, needed
  * for d8 bootstrapping process.
+ *
+ * This file depends on Parse5.js. It must be loaded into global before
+ * this file executes.
  */
 
 (function(global) {
 
-  // We deliberately call eval() on content of parse5.js instead of using load()
-  // because load() does not hoist the |global| variable in this method to
-  // parse5.js (which export its modules to |global|).
-  //
-  // This is because d8's load('xyz.js') does not hoist non global varibles in
-  // the caller's environment to xyz.js, no matter where load() is called.
-  eval(read(global.path_to_js_parser));
-
   var adapter = parse5.TreeAdapters.default;
   var parser = new parse5.Parser(adapter, {locationInfo: true});
   var serializer = new parse5.Serializer(adapter);
@@ -115,7 +110,7 @@
       if (!is_import_link)
         return [];
       var chunk = this.createJsChunk(
-          'loadHTML(\'' + href + '\');',
+          'global.HTMLImportsLoader.loadHTML(\'' + href + '\');',
           this.startLocation(node),
           this.startLineNumber(node));
       return [chunk];
@@ -130,7 +125,7 @@
       if (!src)
         return [];
       var chunk = this.createJsChunk(
-          'loadScript(\'' + src + '\');',
+          'global.HTMLImportsLoader.loadScript(\'' + src + '\');',
           this.startLocation(node),
           this.startLineNumber(node));
       return [chunk];
diff --git a/catapult/third_party/vinn/vinn/path_utils.js b/catapult/third_party/vinn/vinn/path_utils.js
index 6b0b0df..4d00700 100644
--- a/catapult/third_party/vinn/vinn/path_utils.js
+++ b/catapult/third_party/vinn/vinn/path_utils.js
@@ -12,7 +12,6 @@
  * be included directly by the boostrap.
  */
 (function(global) {
-
   /**
    *  Class provides common operations on pathnames.
    *  @param {object} os_client An object that defines:
@@ -43,7 +42,7 @@
     },
 
     isAbs: function(a) {
-      return a[0] === '/';
+      return a[0] === '/' || a[1] === ':';
     },
 
     join: function(a, b) {
@@ -64,6 +63,7 @@
       return a;
     },
 
+    /* TODO(nednguyen): fix this implementation on windows */
     absPath: function(a) {
       if (this.isAbs(a))
         return a;
diff --git a/catapult/third_party/vinn/vinn/test_data/error_stack_test.js b/catapult/third_party/vinn/vinn/test_data/error_stack_test.js
index b289e07..7d6b977 100644
--- a/catapult/third_party/vinn/vinn/test_data/error_stack_test.js
+++ b/catapult/third_party/vinn/vinn/test_data/error_stack_test.js
@@ -10,5 +10,5 @@
 this.throw_error = true;
 
 
-loadHTML('/load_simple_html.html');
+HTMLImportsLoader.loadHTML('/load_simple_html.html');
 maybeRaiseExceptionInFoo();
diff --git a/catapult/third_party/vinn/vinn/test_data/foo.html b/catapult/third_party/vinn/vinn/test_data/foo.html
index d932864..40fe1f2 100644
--- a/catapult/third_party/vinn/vinn/test_data/foo.html
+++ b/catapult/third_party/vinn/vinn/test_data/foo.html
@@ -18,7 +18,7 @@
 <script>
   'use strict';
   var x = 1;
-  loadScript('/error.js');
+  HTMLImportsLoader.loadScript('/error.js');
   print('File foo.html is loaded');
   print('x = ' + x);
 </script>
diff --git a/catapult/third_party/vinn/vinn/test_data/load_error_2.html b/catapult/third_party/vinn/vinn/test_data/load_error_2.html
index c5630de..044f9c2 100644
--- a/catapult/third_party/vinn/vinn/test_data/load_error_2.html
+++ b/catapult/third_party/vinn/vinn/test_data/load_error_2.html
@@ -18,7 +18,7 @@
   print('File load_error2.html is loaded');
 
   // The import below should raise a loading exception for d8.
-  loadHTML('/does_not_exist.html');
+  HTMLImportsLoader.loadHTML('/does_not_exist.html');
 </script>
 
 
diff --git a/catapult/third_party/vinn/vinn/test_data/load_js_error_2.html b/catapult/third_party/vinn/vinn/test_data/load_js_error_2.html
index fb974c0..e0e1376 100644
--- a/catapult/third_party/vinn/vinn/test_data/load_js_error_2.html
+++ b/catapult/third_party/vinn/vinn/test_data/load_js_error_2.html
@@ -17,7 +17,7 @@
   print('File load_error2.html is loaded');
 
   // The script src below should raise a loading exception for d8.
-  loadScript('/does_not_exist.js');
+  HTMLImportsLoader.loadScript('/does_not_exist.js');
 </script>
 
 <div>
diff --git a/catapult/third_party/vinn/vinn/test_data/load_simple_html.js b/catapult/third_party/vinn/vinn/test_data/load_simple_html.js
index 4a2b9ba..224e54d 100644
--- a/catapult/third_party/vinn/vinn/test_data/load_simple_html.js
+++ b/catapult/third_party/vinn/vinn/test_data/load_simple_html.js
@@ -1,5 +1,5 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-loadHTML('/foo.html');
+HTMLImportsLoader.loadHTML('/foo.html');
 print('load_simple_html.js is loaded');
diff --git a/catapult/third_party/vinn/vinn/test_data/load_simple_js.js b/catapult/third_party/vinn/vinn/test_data/load_simple_js.js
index 8662f90..98df67d 100644
--- a/catapult/third_party/vinn/vinn/test_data/load_simple_js.js
+++ b/catapult/third_party/vinn/vinn/test_data/load_simple_js.js
@@ -1,5 +1,5 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-loadScript('/bar.js');
+HTMLImportsLoader.loadScript('/bar.js');
 print('load_simple_js.js is loaded');
diff --git a/catapult/third_party/vinn/vinn/vinn_unittest.py b/catapult/third_party/vinn/vinn/vinn_unittest.py
index e9657de..922e68c 100644
--- a/catapult/third_party/vinn/vinn/vinn_unittest.py
+++ b/catapult/third_party/vinn/vinn/vinn_unittest.py
@@ -2,6 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import json
 import os
 import re
 import sys
@@ -15,9 +16,10 @@
 
 import vinn
 
+def _EscapeJsString(s):
+  return json.dumps(s)
 
-@unittest.skipIf(sys.platform.startswith('win'),
-                 'd8 not yet supported on Windows.')
+
 class VinnUnittest(unittest.TestCase):
 
   @classmethod
@@ -109,7 +111,7 @@
 
   def testDuplicateSourcePaths(self):
     output = vinn.ExecuteJsString(
-      "loadHTML('/load_simple_html.html');",
+      "HTMLImportsLoader.loadHTML('/load_simple_html.html');",
       source_paths=[self.test_data_dir]*100)
     self.assertIn(
         'load_simple_html.html is loaded', output)
@@ -311,8 +313,6 @@
     self.assertTrue(b_duration > c_duration)
 
 
-@unittest.skipIf(sys.platform.startswith('win'),
-                 'd8 not yet supported on Windows.')
 class PathUtilUnittest(unittest.TestCase):
   def testPathUtil(self):
     path_util_js_test = os.path.abspath(os.path.join(
@@ -321,10 +321,11 @@
     os.path.join(os.path.dirname(__file__), 'path_utils.js'))
 
     test_loading_js = """
-    load('%s');
-    load('%s');
+    load(%s);
+    load(%s);
     runTests();
-    """ % (path_utils_js_dir, path_util_js_test)
+    """ % (_EscapeJsString(path_utils_js_dir),
+           _EscapeJsString(path_util_js_test))
 
     res = vinn.RunJsString(test_loading_js)
     self.assertEquals(res.returncode, 0)
@@ -355,8 +356,6 @@
   return '\n'.join(results)
 
 
-@unittest.skipIf(sys.platform.startswith('win'),
-                 'd8 not yet supported on Windows.')
 class HTMLGeneratorTest(unittest.TestCase):
 
   def AssertStringEquals(self, actual, expected):
@@ -372,26 +371,27 @@
       with open(temp_file_name, 'w') as f:
         f.write(html_text)
       return vinn.ExecuteJsString(
-          'write(generateJsFromHTML(read("%s")));' % temp_file_name)
+          'write(generateJsFromHTML(read(%s)));' %
+          _EscapeJsString(temp_file_name))
     finally:
       shutil.rmtree(tmp_dir)
 
   def testGenerateJsForD8RunnerSimpleHTMLImport(self):
     html = '<link rel="import" href="/base/math.html">'
-    expected_js = "loadHTML('/base/math.html');"
+    expected_js = "global.HTMLImportsLoader.loadHTML('/base/math.html');"
     self.AssertStringEquals(self.GetGeneratedJs(html), expected_js)
 
   def testGenerateJSForD8RunnerImportMultilineHTMLImport(self):
     html = """
           <link rel="import"
           href="/base/math.html">"""
-    expected_js = "\nloadHTML('/base/math.html');"
+    expected_js = "\nglobal.HTMLImportsLoader.loadHTML('/base/math.html');"
     self.AssertStringEquals(self.GetGeneratedJs(html),
                             expected_js)
 
   def testGenerateJsForD8RunnerImportSimpleScriptWithSrc(self):
     html = '<script src="/base/math.js"></script>'
-    expected_js = "loadScript('/base/math.js');"
+    expected_js = "global.HTMLImportsLoader.loadScript('/base/math.js');"
     self.AssertStringEquals(self.GetGeneratedJs(html),
                             expected_js)
 
@@ -400,7 +400,7 @@
                   type="text/javascript"
                   src="/base/math.js">
                   </script>"""
-    expected_js = """loadScript('/base/math.js');
+    expected_js = """global.HTMLImportsLoader.loadScript('/base/math.js');
 
 
                   """
@@ -423,17 +423,17 @@
   href="/base/random.html">
 """
     expected_js = ("""
-loadHTML('/base.html');loadHTML('/base64.html');
-loadHTML('/base/math.html');
-loadScript('/base/3d.js');
+global.HTMLImportsLoader.loadHTML('/base.html');global.HTMLImportsLoader.loadHTML('/base64.html');
+global.HTMLImportsLoader.loadHTML('/base/math.html');
+global.HTMLImportsLoader.loadScript('/base/3d.js');
 
 
 
             """ + """
 
-loadScript('/base/math.js');
+global.HTMLImportsLoader.loadScript('/base/math.js');
 
-loadHTML('/base/random.html');""")
+global.HTMLImportsLoader.loadHTML('/base/random.html');""")
     self.AssertStringEquals(self.GetGeneratedJs(html),
                             expected_js)
 
@@ -475,7 +475,7 @@
                ];
     </script>
     """
-    expected_js = """loadScript('/base.js');
+    expected_js = """global.HTMLImportsLoader.loadScript('/base.js');
 var html_lines = [
                 '<script>',
                 '< /script>',
@@ -523,11 +523,11 @@
 
 
 
-loadHTML('/base/math.html');var x = 1;
-loadScript('/base/computer.js');
+global.HTMLImportsLoader.loadHTML('/base/math.html');var x = 1;
+global.HTMLImportsLoader.loadScript('/base/computer.js');
           var linux = os.system;  // line number of this is 9
         """ + """
-loadHTML('/base/physics.html');
+global.HTMLImportsLoader.loadHTML('/base/physics.html');
 
 
               var html_lines = [
@@ -544,7 +544,7 @@
               }
         """ + """
 
-loadHTML('/base/this_is_line_28.html');
+global.HTMLImportsLoader.loadHTML('/base/this_is_line_28.html');
 
           var i = '<link rel="import" href="/base/math.html">';
          """
diff --git a/catapult/tracing/.npmignore b/catapult/tracing/.npmignore
new file mode 100644
index 0000000..70d51d2
--- /dev/null
+++ b/catapult/tracing/.npmignore
@@ -0,0 +1,17 @@
+test_data
+skp_data
+third_party
+
+images
+tracing_build
+build
+
+app.yaml
+*_test.html
+
+*.gyp
+*.gypi
+*.gn
+
+*.py
+*.pyc
diff --git a/catapult/tracing/OWNERS b/catapult/tracing/OWNERS
index e34da9b..6e5d380 100644
--- a/catapult/tracing/OWNERS
+++ b/catapult/tracing/OWNERS
@@ -5,6 +5,10 @@
 nduca@chromium.org
 petrcermak@chromium.org
 dsinclair@chromium.org
+fmeawad@chromium.org
+charliea@chromium.org
+eakuefner@chromium.org
+
 
 # Tracing, in general, doesn't require an owners stamp to commit. If you
 # don't think the complexity requires the review of the above you can use
diff --git a/catapult/tracing/PRESUBMIT.py b/catapult/tracing/PRESUBMIT.py
index 9f7bc33..381e06d 100644
--- a/catapult/tracing/PRESUBMIT.py
+++ b/catapult/tracing/PRESUBMIT.py
@@ -5,27 +5,39 @@
 import sys
 
 
-def RunChecks(input_api, output_api):  # pylint: disable=unused-argument
-  results = []
-  from tracing_build import check_gypi
-  err = check_gypi.GypiCheck()
-  if err:
-    results += [err]
-  return map(output_api.PresubmitError, results)
-
-
-def CheckChange(input_api, output_api):
-  original_sys_path = sys.path
-  try:
-    sys.path += [input_api.PresubmitLocalPath()]
-    return RunChecks(input_api, output_api)
-  finally:
-    sys.path = original_sys_path
-
-
 def CheckChangeOnUpload(input_api, output_api):
-  return CheckChange(input_api, output_api)
+  return _CheckChange(input_api, output_api)
 
 
 def CheckChangeOnCommit(input_api, output_api):
-  return CheckChange(input_api, output_api)
+  return _CheckChange(input_api, output_api)
+
+
+def _CheckChange(input_api, output_api):
+  results = []
+
+  original_sys_path = sys.path
+  try:
+    sys.path += [input_api.PresubmitLocalPath()]
+    from tracing_build import check_gypi
+    error = check_gypi.GypiCheck()
+    if error:
+      results.append(output_api.PresubmitError(error))
+  finally:
+    sys.path = original_sys_path
+
+  results += input_api.RunTests(input_api.canned_checks.GetPylint(
+      input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
+      pylintrc='../pylintrc'))
+
+  return results
+
+
+def _GetPathsToPrepend(input_api):
+  project_dir = input_api.PresubmitLocalPath()
+  catapult_dir = input_api.os_path.join(project_dir, '..')
+  return [
+      project_dir,
+
+      input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
+  ]
diff --git a/catapult/tracing/README.md b/catapult/tracing/README.md
index 9721bad..46dbe82 100644
--- a/catapult/tracing/README.md
+++ b/catapult/tracing/README.md
@@ -12,31 +12,40 @@
 files. Its particularly good at viewing linux kernel traces (aka [ftrace](https://www.kernel.org/doc/Documentation/trace/ftrace.txt)) and Chrome's
 [trace_event format](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). Trace viewer can be [embedded](https://github.com/catapult-project/catapult/wiki/Embedding-Trace-Viewer) as a component in your own code, or used from a plain checkout to turn trace files into standalone, emailable HTML files from the commandline:
 
-    ./tracing/trace2html my_trace.json --output=my_trace.html && open my_trace.html
+```
+$CATAPULT/tracing/bin/trace2html my_trace.json --output=my_trace.html && open my_trace.html
+```
 
 Its easy to [extend trace viewer](https://github.com/catapult-project/catapult/wiki/Extending-and-Customizing-Trace-Viewer) to support your favorite trace format, or add domain specific visualizations to the UI to simplify drilling down into complex data.
 
 Contributing, quick version
-===========================================================================
-We welcome contributions! To hack on this code, from toplevel:
-  ./bin/run_dev_server
+===
+We welcome contributions! To hack on this code.
 
-In any browser, navigate to
-  http://localhost:8003/
+There are two type of tests.
 
-To run all python unittests:
-  ./tracing/run_py_tests
+### In the browser
 
-To run all tracing unittests in d8 environment:
- ./tracing/run_d8_tests
+Run http server `$CATAPULT/bin/run_dev_server`. In any browser, navigate to `http://localhost:8003/`
 
-To run all the unittests, you can also do:
+**Unit tests**| **Descripton**
+--- | ---
+All tests | http://localhost:8003/tests.html
+All tests with short format | http://localhost:8003/tracing/tests.html?shortFormat
+An individual test suite(such as ui/foo_test.js) | http://localhost:8003/tests.html?testSuiteName=ui.foo
+Tests named foo| http://localhost:8003/tests.html?testFilterString=foo
 
- ./tracing/run_tests
+### On command
+
+**Unit tests**| **Description**
+--- | ---
+All python tests | `$CATAPULT/tracing/bin/run_py_tests`
+All tracing tests in d8 environment | `$CATAPULT/tracing/bin/run_vinn_tests`
+All tests | `$CATAPULT/tracing/bin/run_tests`
 
 Make sure tests pass before sending us changelist. **We use rietveld for codereview**. For more details, esp on rietveld, [read our contributing guide](https://github.com/catapult-project/catapult/blob/master/CONTRIBUTING.md) or check out the [trace viewer wiki](https://github.com/catapult-project/catapult/wiki/Trace-Viewer-Getting-Started).
 
 Contact Us
-===========================================================================
+===
 Join our Google Group:
 * [tracing@chromium.org](https://groups.google.com/a/chromium.org/forum/#!forum/tracing)
diff --git a/catapult/tracing/bin/PRESUBMIT.py b/catapult/tracing/bin/PRESUBMIT.py
index 7527192..799215d 100644
--- a/catapult/tracing/bin/PRESUBMIT.py
+++ b/catapult/tracing/bin/PRESUBMIT.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import os
-import sys
 
 def CheckChange(input_api, output_api):
   init_py_path = os.path.join(input_api.PresubmitLocalPath(), '__init__.py')
diff --git a/catapult/tracing/bin/run_metric b/catapult/tracing/bin/run_metric
new file mode 100755
index 0000000..76d8c66
--- /dev/null
+++ b/catapult/tracing/bin/run_metric
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import json
+import os
+import sys
+
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), '..'))
+from tracing.metrics import metric_runner
+
+def Main(argv):
+  parser = argparse.ArgumentParser(
+      description='Runs metrics on local traces')
+  parser.add_argument('metric',
+                      help='A metric from tracing/metrics/')
+  parser.add_argument('trace_file_or_dir',
+                      help='A trace file, or a dir containing trace files')
+
+  args = parser.parse_args(argv[1:])
+  metric = args.metric
+
+  if os.path.isdir(args.trace_file_or_dir):
+    trace_dir = args.trace_file_or_dir
+    traces = [os.path.join(trace_dir, trace) for trace in os.listdir(trace_dir)]
+  else:
+    traces = [args.trace_file_or_dir]
+
+  results = {}
+  for trace in traces:
+    results[trace] = metric_runner.RunMetric(trace, metric).AsDict()
+
+  print json.dumps(results, indent=2, sort_keys=True, separators=(',', ': '))
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv))
diff --git a/catapult/tracing/bin/run_node_tests b/catapult/tracing/bin/run_node_tests
new file mode 100755
index 0000000..d044e0f
--- /dev/null
+++ b/catapult/tracing/bin/run_node_tests
@@ -0,0 +1,23 @@
+#!/usr/bin/env node
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+var fs = require('fs');
+var path = require('path');
+
+var catapultPath = fs.realpathSync(path.join(__dirname, '..', '..'));
+var catapultBuildPath = path.join(catapultPath, 'catapult_build');
+
+var node_bootstrap = require(path.join(catapultBuildPath, 'node_bootstrap.js'));
+
+HTMLImportsLoader.addArrayToSourcePath(
+    node_bootstrap.getSourcePathsForProject('tracing'));
+
+// Go!
+var headless_test_module_filenames =
+    node_bootstrap.getHeadlessTestModuleFilenamesForProject('tracing');
+
+HTMLImportsLoader.loadHTML('/tracing/base/headless_tests.html');
+tr.b.unittest.loadAndRunTests(headless_test_module_filenames);
\ No newline at end of file
diff --git a/catapult/tracing/bin/run_py_tests b/catapult/tracing/bin/run_py_tests
index 07af929..d5cf781 100755
--- a/catapult/tracing/bin/run_py_tests
+++ b/catapult/tracing/bin/run_py_tests
@@ -4,6 +4,7 @@
 # found in the LICENSE file.
 
 import os
+import platform
 import sys
 
 _CATAPULT_PATH = os.path.abspath(os.path.join(
@@ -32,5 +33,8 @@
     install.InstallHooks()
 
   from catapult_build import run_with_typ
+  # https://github.com/catapult-project/catapult/issues/2050
+  if platform.system() != 'Windows':
+    _RunTestsOrDie(os.path.join(_TRACING_PATH, 'tracing'))
   _RunTestsOrDie(os.path.join(_TRACING_PATH, 'tracing_build'))
   sys.exit(0)
diff --git a/catapult/tracing/bin/why_imported b/catapult/tracing/bin/why_imported
index e4ffef9..a8c7822 100755
--- a/catapult/tracing/bin/why_imported
+++ b/catapult/tracing/bin/why_imported
@@ -15,12 +15,10 @@
 $ dot -Grankdir=LR -Tpng ~/analysis_view.dot -o ~/analysis_view.png
 """
 
+import os
 import sys
 import argparse
 
-import tracing_project
-tracing_project.UpdateSysPathIfNeeded()
-
 
 def Main():
   project = tracing_project.TracingProject()
@@ -42,4 +40,9 @@
 
 
 if __name__ == '__main__':
+  tracing_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                 os.path.pardir))
+  sys.path.append(tracing_path)
+  import tracing_project
+  tracing_project.UpdateSysPathIfNeeded()
   sys.exit(Main())
diff --git a/catapult/tracing/docs/coordinate-systems.md b/catapult/tracing/docs/coordinate-systems.md
new file mode 100644
index 0000000..011fed4
--- /dev/null
+++ b/catapult/tracing/docs/coordinate-systems.md
@@ -0,0 +1,46 @@
+# Trace-Viewer Coordinate Systems.
+
+## Coordinate Systems
+
+To represent browser content in trace-viewer we need to draw boxes and
+pictures created in one browser in the DOM of another browser window.
+How does a pixel in the output relate to a pixel in the original browser view?
+
+### Scaling
+
+The snapshot view lives in a quad-stack-viewer DOM element. This is area of
+pixels in trace-viewer, for example 685x342 px.
+
+The quad-stack-viewer contains a view-container with a CSS transform. The
+transform will zoom (CSS scale), pan (CSS translateX, translateY),
+orient (CSS rotateX, rotateY) its contents, a canvas.  Common scale factors
+will be 0.1 - 2.0. The transformation is controlled by user inputs.
+
+Internally the canvas has the _world_ coordinates.
+
+The _world_ coordinates completely enclose the boxes we may draw, plus some
+padding so the edges of boxes do not sit against the edge of the world. For
+example, padding space of .75 times the minimum of width and height may be
+added. Since the original browser has a few thousand pixels, the padded world
+may be 5-6000 pixels on a side.
+
+The _world_ coordinates are scaled by several factors:
+ * _quad_stack_scale_ adjusts the size of the canvas (eg 0.5).
+ * _devicePixelRatio_ adjusts for high-res devices (eg 1 or 2),
+ * _ui.RASTER_SCALE_, adjusts the size of the canvas. (eg 0.75)
+
+*Do we still need RASTER_SCALE?*
+
+### Translation (origins)
+
+The quad-stack-viewer DOM element is positioned by CSS at some offset in the
+document. All of our origins are relative to the top left corner of the
+quad-stack-viewer.
+
+The CSS transforms move us from the DOM coordinate system to the world system.
+*What is the origin of the canvas in the DOM coordinate system
+when the final size of the canvas is less than the element?*
+
+The _deviceViewportRect_ is the visible browser window in _world_ coordinates.
+Typically it will be at X,Y = 0,0. Thus the _world_ origin will be eg
+-0.75\*3000px , -0.75\*2500px, due to the world padding.
diff --git a/catapult/tracing/docs/embedding-trace-viewer.md b/catapult/tracing/docs/embedding-trace-viewer.md
new file mode 100644
index 0000000..1d2d532
--- /dev/null
+++ b/catapult/tracing/docs/embedding-trace-viewer.md
@@ -0,0 +1,54 @@
+# Making standalone HTML files
+
+If you have a trace file that you want to turn into a html file with a viewer then:
+
+```
+sys.path.append(os.path.join(path_to_catapult, 'tracing'))
+from trace_viewer_build import trace2html
+with open('my_trace.html', 'w') as new_file:
+   trace2html.WriteHTMLForTracesToFile(['my_trace.json'], new_file)
+```
+
+This will produce a standalone trace viewer with my_trace packed inside.
+
+# Embedding the Easy Way
+Running `$CATAPULT/tracing/bin/vulcanize_trace_viewer` will create `$CATAPULT/tracing/bin/trace_viewer_full.html`. That file has all the js, css and html-templates that you need for a standalone trace viewer instance.
+
+In your code, `<link rel="import" href="trace_viewer_full.html">`. Then, to get a trace viewer up, you need to do two things: make the timeline viewer, and make a model and give it to the viewer:
+```
+    var container = document.createElement('track-view-container');
+    container.id = 'track_view_container';
+
+    viewer = document.createElement('tr-ui-timeline-view');
+    viewer.track_view_container = container;
+    viewer.appendChild(container);
+
+    viewer.id = 'trace-viewer';
+    viewer.globalMode = true;
+    document.body.appendChild(viewer);
+```
+
+With the viewer created, you need to then make a TraceModel:
+```
+    var model = new tr.Model();
+    var i = new tr.importer.Import(m);
+    var p = i.importTracesWithProgressDialog([result]);
+    p.then(function() {
+      viewer.model = model;
+    }, onImportFail);
+
+```
+
+Model has a variety of import options, from synchronous import to importWithProgressDialog. And, it
+lets you customize the types of postprocessing to be done on the model before it is displayed by the view.
+
+# Configs
+Trace viewer has a lot of extra pieces, for domain-specific use cases. By default, trace2html and vulcanize take everything and combine them together. We call this the "full" config. Passing --help to
+vulcanize or trace2html will show the current set of configs we support, which maps to
+`trace_viewer/extras/*_config.html`. Some of the other configs are smaller, leading to a more compact redistributable.
+
+# Customizing
+For more information on how to customize and extend trace viewer, see [Extending-and-Customizing-Trace-Viewer](Extending-and-Customizing-Trace-Viewer)
+
+# Example
+See bin/index.html for an example of using the embedding system.
diff --git a/catapult/tracing/docs/extending-and-customizing-trace-viewer.md b/catapult/tracing/docs/extending-and-customizing-trace-viewer.md
new file mode 100644
index 0000000..aa493ce
--- /dev/null
+++ b/catapult/tracing/docs/extending-and-customizing-trace-viewer.md
@@ -0,0 +1,26 @@
+Though there are some concepts hold the same across all trace formats we've encountered, there are an always plenty of domain-specific details to a given expertise area that defy standard treatment.
+
+In trace-viewer, we distinguish between "core" pieces, which are domain-neutral and belong in `trace_viewer/core` and domain-specific pieces, which we are in `trace_viewer/extras`. As such, core/ has a variety of extension points that then extras/ pulls in.
+
+# Importers
+TraceViewer is not tied to one specific trace file format: everyone has their own ideal way for getting performance data, storing it, and eventually getting it into the HTML file for viewing. And, since trace-viewer tries to be able to view traces from multiple systems all together, it may not even be possible to get traces into a single file format. Thats fine, as we see it.
+
+The main unit of extension here is the Importer object, `core/importer/importer.html`. To teach trace viewer about a new file format, subclass that importer, then hook it up to `default_importers.html`. Voila, you have the beginnings
+
+When you call TraceModel.import, you pass array of objects. We then run over this array one at a time, then walk through the registered importers looking for one that `.canHandle` that trace. Once it is found, we assign that trace to the importer.
+
+Because some trace formats are container formats, we support sub-traces, where an importer does a bit of processing, then yields another trace that needs more importing. This is, for instance, how we import gzip files.
+
+# Slice Views
+The display and storage of slices can be overridden based on their model-level name and category. This allows domain specific customization of that particular type of data. Some keywords to search for are  SliceView.register and AsyncSlice.register.
+
+One way this is used is to customize the display title of a slice. In the trace files and the model, slices with the "net" category are traced with titles that correspond to their probe point. And, the URL of a request is just one of many events in the trace that is discovered quite late in the overall sequence of events. But, when viewing a network trace, the most interesting thing to see is the URL for which a traces corresponds. This transformation is accomplished by registering a custom net async slice, which overrides the `displayTitle` property: this leaves the model in-tact [e.g. exactly as it was traced] but improves on the display.
+
+# Object Views and Types
+In Chrome, some of our traces have a complex and massive JSON dump from our graphics subsystem that, when
+interpreted exactly the right way, let us reconstruct a view of the page just from the trace.
+
+There are two extension points that make this possible:
+- We allow subtypes to be registered for ObjectSnapshots and ObjectInstances. This way you can build up a domain-specific model of the trace instead of having to parse the trace yourself after the fact. See `extras/cc/layer_tree_host_impl.html` for an example.
+
+- We allow custom viewer objects to be registered for Snapshots and Instances. When a user clicks on one, we look for a viewer and use that object instead. See `extras/cc/layer_tree_host_impl_view.html` as an example.
diff --git a/catapult/tracing/docs/getting-started.md b/catapult/tracing/docs/getting-started.md
new file mode 100644
index 0000000..6af9365
--- /dev/null
+++ b/catapult/tracing/docs/getting-started.md
@@ -0,0 +1,21 @@
+Using Trace Viewer Casually
+==================================
+ * [[Embedding-Trace-Viewer]] the trace-viewer in your own app.
+ * How to [[extend and customize|Extending-and-Customizing-Trace-Viewer]] the trace-viewer to suit your domain
+
+Making Traces
+=============
+ * [Trace Event Format](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit?usp=sharing) if you want to generate traces yourself
+ * [py-trace-event](https://github.com/natduca/py_trace_event) for generating traces from python
+ * [Chrome's trace_event.h](http://src.chromium.org/chrome/trunk/src/base/debug/trace_event.h) if you're in Chrome's ecosystem
+ * [ftrace](https://www.kernel.org/doc/Documentation/trace/ftrace.txt) for generating traces on Linux
+
+Note: trace-viewer supports custom trace file formats. Just [[add an importer|Extending-and-Customizing-Trace-Viewer]] to trace viewer for your favorite file format.
+
+Contributing New Stuff
+======================
+ * Join our Google Groups: [trace-viewer](https://groups.google.com/forum/#!forum/trace-viewer), [trace-viewer-bugs](https://groups.google.com/forum/#!forum/trace-viewer-bugs)
+ * Learn how to start: [Contributing](https://github.com/catapult-project/catapult/blob/master/CONTRIBUTING.md)
+ * Read the [Trace Viewer style guide](https://docs.google.com/document/d/1MMOfywou2Oaho4jOttUk-ZSJcHVd5G5BTsD48rPrBtQ/edit)
+ * Pick a feature from the [tracing wish list](https://docs.google.com/a/chromium.org/document/d/1T1UJHIgImSEPSugCt2TFrkNsraBFITPHpYFGDJStePc/preview).
+ * Familiarize yourself with the [[Trace-Viewer's-Internals]] and our [[Trace-Viewer-Components]].
diff --git a/catapult/tracing/docs/trace-viewer-internals.md b/catapult/tracing/docs/trace-viewer-internals.md
new file mode 100644
index 0000000..13c23e7
--- /dev/null
+++ b/catapult/tracing/docs/trace-viewer-internals.md
@@ -0,0 +1,129 @@
+# TraceViewer’s Internals
+
+## Module system
+
+ * Tracing currently uses html imports for modules.
+ * We aspire to one-class-per file. Feel free to break up big files as you
+encounter them, they exist purely for legacy reasons.
+
+## Tests
+
+ * See unittest.html -- mostly compatible with closure tests
+ * See [[/docs/dev-server-tests.md]] for more information
+
+## Components
+
+ * New UI elements should be Polymer components.
+ * You will see some old references to tvcm.ui.define('x'). This is our old
+approach for building components. Its like polymer, in that you can subclass
+the element, but it doesn't use shadow dom or have any templating or data
+binding.
+
+## Rough module breakdown
+
+ * *Importers:* load files, produce a model
+ * *Model:* stateless, just the data that came from different trace formats
+ * *TimelineTrackView:* shows the data in gantt-chart form
+ * *Tracks:* visualize a particular part of the model
+ * *Selection:* a vector of things in the tracks that the user has selected (counter samples, slices)
+ * *Analysis:* provides summary of selection
+ * *TimelineView:* glues everything together
+ * *ProfilingView:* chrome-specific UI and glue
+
+## Importer notes
+
+ * The importer to model abstraction is meant to allow us to support multiple trace formats
+
+## Model notes
+
+ * The most important concept in the model is a slice. A slice is a range of time, and some metadata about that range, e.g. title, arguments, etc.
+ * Model has
+     * Processes
+         * Counters
+             * Counter samples (at ts=0.2s, we had 10mb allocated and 3mb free)
+         * Threads
+             * Slices (the FFT::compute function ran from 0.7s to 0.9s)
+             * AsyncSlices (at 0.2s we started a file read in the background and it finished at 0.5s)
+             * CpuSlices (at ts=0.2s we were running on cpu2)
+         * CPUs
+             * Slices (at ts=0.2 to 0.4 we were running "top")
+             * Counters (the clock frequency was 1.2ghz at ts=0.1s)
+
+## Slice
+A slice is something which consumes time synchronously on a CPU or a thread. The
+canonical example of this would be a B/E event pair. An async operation is also
+considered a slice. Things get a bit more murky when looking at instant events.
+A thread scoped instant event is a duration 0 slice. Other instant events,
+process or global scoped, don't correlate to a thread or CPU and aren't
+considered slices.
+
+A flow event, on the other hand, is not a slice. It doesn't consume time and is,
+conceptually, a graph of data flow in the system.
+
+
+## Slice groups
+
+ * When you see the tracing UI, you see lots of things like this:
+
+```
+Thread 7:     [  a     ]   [    c   ]
+                        [ b ]
+```
+
+ * This of visualization starts as a *flat* array of slices:
+
+```
+ [{title: “a”, start: 0, end: 1), {title: “c”, start: 1.5, end: 3.5}, {title: “b”, start: 0.5, end: 0.8}[
+```
+
+ * We call this a slice group. A slice group can be composed into subRows -- a subRow is an array of slices that are all non-overlapping. E.g. in the thread7 example above, there are two subrows:
+
+```
+subrow 1:     [  a     ]   [    c   ]
+subrow 2:     [ b ]
+```
+
+ * The SliceTrack is built around the idea of visualizing a single subrow. So when you see a thread like thread 7, you’re really looking at 2 !SliceTracks, each of which has its own subrow.
+
+ * We have two slice group types:
+     * SliceGroup, for nested data. Used for threads.
+         * e.g.  like ( (0,2), (0.1,0.3) )
+         * We convert the slices into subrows based on containment.
+         * b is considered contained by a if b.start >= a.start && b.end <= a.end
+     * AsyncSliceGroup, for overlapping data. Used for async operations.
+         * e.g. ( (0, 2), (1, 3) )
+         * We convert the slices into subrows by greedily packing them into rows, adding rows as needed when there’s no room on an existing subrow
+
+## Timeline notes
+
+ * Timeline is just an array of tracks. A track is one of the rows in the UI. A single thread of data may turn into 5+ tracks, one track for each row of squares.
+ * The core of the Timeline is Slice
+ * Panning/zooming state is on the TimelineViewport, as is the grid and user defined markers
+
+## Tracks
+### there are three broad types of tracks
+
+ * Building blocks
+     * *Container track*
+         * A track that is itself made up of more tracks. Just a div plus logic to delegate overall track interface calls down to its children.
+     * *CanvasBasedTrack*
+         * A track that renders its content using HTML5 canvas
+ * Visualizations
+     * *SliceTrack:* visualizes an array of non-overlapping monotonically-increasing slices. Has some simple but critical logic to efficiently render even with thousands (or more) slices by merging small slices together when they really close together.
+     * *CounterTrack:* visualizes an array of samples values over time. Has support for stacked area charts. Tries to merge samples together when they are not perceptually significant to reduce canvas drawing overhead.
+     * *Model tracks:* e.g. ThreadTrack
+         * Derives from a container track, takes a timeline model object, e.g. a thread, and creates the appropriate child tracks that visualize that thread
+
+## Selection notes
+
+ * When you drag to select, that creates a selection object by asking every track to append things that intersect the dragged-box to the selection
+ * A selection object is an array of hits.
+ * A hit is the pairing of the track-level entity that was selected with the model-level entity that that visual thing represents.
+ * Eg a thread has a bunch of slices in it. That gets turned into a bunch of subrows that we then turn into !SliceTracks. When you click on a slice in a thread in the UI, the hit is {slice: <the slice you clicked>, thread: <the thread it came from>}.
+ * The hit concept exists partly because slices can’t know their parent (see model section for why). Yet, analysis code want to know parentage in order to do things like group-by-thread.
+
+## Analysis code
+
+ * Takes as input a selection
+ * Does the numeric analysis and dumps the numeric results to a builder
+ * The builder is responsible for creating HTML (or any other textual representation of the results)
diff --git a/catapult/tracing/package.json b/catapult/tracing/package.json
new file mode 100644
index 0000000..7153679
--- /dev/null
+++ b/catapult/tracing/package.json
@@ -0,0 +1,22 @@
+{
+  "name": "traceviewer",
+  "version": "1.0.5",
+  "description": "Trace-Viewer is the javascript frontend for Chrome about:tracing and Android systrace.",
+  "main": "tracing/index.js",
+  "scripts": {
+    "test": "echo \"Error: no test specified\" && exit 1"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/catapult-project/catapult/tree/master/tracing"
+  },
+  "keywords": [
+    "tracing",
+    "traceviewer",
+    "trace",
+    "events"
+  ],
+  "author": "The Chromium Authors",
+  "license": "BSD-2-Clause",
+  "gypfile": false
+}
diff --git a/catapult/tracing/test_data/battor.zip b/catapult/tracing/test_data/battor.zip
index 0b850fe..697a8e2 100644
--- a/catapult/tracing/test_data/battor.zip
+++ b/catapult/tracing/test_data/battor.zip
Binary files differ
diff --git a/catapult/tracing/trace_viewer.gypi b/catapult/tracing/trace_viewer.gypi
index 70452bc..8dbfe98 100644
--- a/catapult/tracing/trace_viewer.gypi
+++ b/catapult/tracing/trace_viewer.gypi
@@ -20,9 +20,6 @@
       'tracing/ui/extras/chrome/gpu/state_view.css',
       'tracing/ui/extras/system_stats/system_stats_instance_track.css',
       'tracing/ui/extras/system_stats/system_stats_snapshot_view.css',
-      'tracing/ui/extras/tcmalloc/heap_instance_track.css',
-      'tracing/ui/extras/tcmalloc/tcmalloc_instance_view.css',
-      'tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.css',
       'tracing/ui/tracks/drawing_container.css',
       'tracing/ui/tracks/object_instance_track.css',
       'tracing/ui/tracks/process_track_base.css',
@@ -49,6 +46,7 @@
       'tracing/base/interval_tree.html',
       'tracing/base/iteration_helpers.html',
       'tracing/base/math.html',
+      'tracing/base/multi_dimensional_view.html',
       'tracing/base/quad.html',
       'tracing/base/raf.html',
       'tracing/base/range.html',
@@ -58,22 +56,13 @@
       'tracing/base/sorted_array_utils.html',
       'tracing/base/statistics.html',
       'tracing/base/task.html',
-      'tracing/base/units/generic_table.html',
-      'tracing/base/units/histogram.html',
-      'tracing/base/units/scalar.html',
-      'tracing/base/units/time_display_mode.html',
-      'tracing/base/units/time_duration.html',
-      'tracing/base/units/time_stamp.html',
-      'tracing/base/units/units.html',
+      'tracing/base/timing.html',
       'tracing/base/utils.html',
       'tracing/core/auditor.html',
       'tracing/core/filter.html',
       'tracing/core/scripting_controller.html',
       'tracing/core/scripting_object.html',
-      'tracing/extras/android/android_app.html',
       'tracing/extras/android/android_auditor.html',
-      'tracing/extras/android/android_model_helper.html',
-      'tracing/extras/android/android_surface_flinger.html',
       'tracing/extras/chrome/cc/cc.html',
       'tracing/extras/chrome/cc/constants.html',
       'tracing/extras/chrome/cc/debug_colors.html',
@@ -91,15 +80,13 @@
       'tracing/extras/chrome/cc/tile_coverage_rect.html',
       'tracing/extras/chrome/cc/util.html',
       'tracing/extras/chrome/chrome_auditor.html',
-      'tracing/extras/chrome/chrome_browser_helper.html',
-      'tracing/extras/chrome/chrome_gpu_helper.html',
-      'tracing/extras/chrome/chrome_model_helper.html',
-      'tracing/extras/chrome/chrome_process_helper.html',
-      'tracing/extras/chrome/chrome_renderer_helper.html',
       'tracing/extras/chrome/chrome_user_friendly_category_driver.html',
+      'tracing/extras/chrome/frame_tree_node.html',
       'tracing/extras/chrome/gpu/gpu_async_slice.html',
       'tracing/extras/chrome/gpu/state.html',
       'tracing/extras/chrome/layout_object.html',
+      'tracing/extras/chrome/layout_tree.html',
+      'tracing/extras/chrome/render_frame.html',
       'tracing/extras/chrome_config.html',
       'tracing/extras/importer/android/event_log_importer.html',
       'tracing/extras/importer/battor_importer.html',
@@ -147,17 +134,8 @@
       'tracing/extras/measure/measure_async_slice.html',
       'tracing/extras/net/net.html',
       'tracing/extras/net/net_async_slice.html',
-      'tracing/extras/rail/animation_interaction_record.html',
-      'tracing/extras/rail/idle_interaction_record.html',
-      'tracing/extras/rail/load_interaction_record.html',
-      'tracing/extras/rail/proto_ir.html',
-      'tracing/extras/rail/rail_interaction_record.html',
-      'tracing/extras/rail/rail_ir_finder.html',
-      'tracing/extras/rail/rail_score.html',
-      'tracing/extras/rail/response_interaction_record.html',
       'tracing/extras/system_stats/system_stats_snapshot.html',
       'tracing/extras/systrace_config.html',
-      'tracing/extras/tcmalloc/heap.html',
       'tracing/extras/tquery/context.html',
       'tracing/extras/tquery/filter.html',
       'tracing/extras/tquery/filter_all_of.html',
@@ -166,20 +144,38 @@
       'tracing/extras/tquery/filter_has_duration.html',
       'tracing/extras/tquery/filter_has_title.html',
       'tracing/extras/tquery/filter_is_top_level.html',
+      'tracing/extras/tquery/filter_not.html',
       'tracing/extras/tquery/tquery.html',
       'tracing/extras/vsync/vsync_auditor.html',
       'tracing/importer/empty_importer.html',
+      'tracing/importer/find_input_expectations.html',
+      'tracing/importer/find_load_expectations.html',
       'tracing/importer/import.html',
       'tracing/importer/importer.html',
+      'tracing/importer/proto_expectation.html',
       'tracing/importer/simple_line_reader.html',
+      'tracing/importer/user_model_builder.html',
+      'tracing/metrics/all_metrics.html',
+      'tracing/metrics/metric_registry.html',
+      'tracing/metrics/sample_metric.html',
+      'tracing/metrics/system_health/animation_smoothness_metric.html',
+      'tracing/metrics/system_health/animation_throughput_metric.html',
+      'tracing/metrics/system_health/efficiency_metric.html',
+      'tracing/metrics/system_health/responsiveness_metric.html',
+      'tracing/metrics/system_health/system_health_metrics.html',
+      'tracing/metrics/system_health/utils.html',
+      'tracing/metrics/tracing_metric.html',
+      'tracing/metrics/value_list.html',
       'tracing/model/activity.html',
       'tracing/model/alert.html',
       'tracing/model/annotation.html',
       'tracing/model/async_slice.html',
       'tracing/model/async_slice_group.html',
-      'tracing/model/attribute.html',
+      'tracing/model/clock_sync_manager.html',
+      'tracing/model/clock_sync_record.html',
       'tracing/model/comment_box_annotation.html',
       'tracing/model/compound_event_selection_state.html',
+      'tracing/model/constants.html',
       'tracing/model/container_memory_dump.html',
       'tracing/model/counter.html',
       'tracing/model/counter_sample.html',
@@ -196,8 +192,15 @@
       'tracing/model/frame.html',
       'tracing/model/global_memory_dump.html',
       'tracing/model/heap_dump.html',
+      'tracing/model/helpers/android_app.html',
+      'tracing/model/helpers/android_model_helper.html',
+      'tracing/model/helpers/android_surface_flinger.html',
+      'tracing/model/helpers/chrome_browser_helper.html',
+      'tracing/model/helpers/chrome_gpu_helper.html',
+      'tracing/model/helpers/chrome_model_helper.html',
+      'tracing/model/helpers/chrome_process_helper.html',
+      'tracing/model/helpers/chrome_renderer_helper.html',
       'tracing/model/instant_event.html',
-      'tracing/model/interaction_record.html',
       'tracing/model/ir_coverage.html',
       'tracing/model/kernel.html',
       'tracing/model/location.html',
@@ -205,6 +208,7 @@
       'tracing/model/model.html',
       'tracing/model/model_indices.html',
       'tracing/model/model_settings.html',
+      'tracing/model/model_stats.html',
       'tracing/model/object_collection.html',
       'tracing/model/object_instance.html',
       'tracing/model/object_snapshot.html',
@@ -216,6 +220,7 @@
       'tracing/model/proxy_selectable_item.html',
       'tracing/model/rect_annotation.html',
       'tracing/model/sample.html',
+      'tracing/model/scoped_id.html',
       'tracing/model/selectable_item.html',
       'tracing/model/selection_state.html',
       'tracing/model/slice.html',
@@ -228,6 +233,13 @@
       'tracing/model/thread_time_slice.html',
       'tracing/model/time_to_object_instance_map.html',
       'tracing/model/timed_event.html',
+      'tracing/model/user_model/animation_expectation.html',
+      'tracing/model/user_model/idle_expectation.html',
+      'tracing/model/user_model/load_expectation.html',
+      'tracing/model/user_model/response_expectation.html',
+      'tracing/model/user_model/user_expectation.html',
+      'tracing/model/user_model/user_model.html',
+      'tracing/model/vm_region.html',
       'tracing/model/x_marker_annotation.html',
       'tracing/ui/analysis/alert_sub_view.html',
       'tracing/ui/analysis/analysis_link.html',
@@ -238,6 +250,7 @@
       'tracing/ui/analysis/flow_classifier.html',
       'tracing/ui/analysis/frame_power_usage_chart.html',
       'tracing/ui/analysis/generic_object_view.html',
+      'tracing/ui/analysis/layout_tree_sub_view.html',
       'tracing/ui/analysis/memory_dump_allocator_details_pane.html',
       'tracing/ui/analysis/memory_dump_header_pane.html',
       'tracing/ui/analysis/memory_dump_heap_details_pane.html',
@@ -253,12 +266,12 @@
       'tracing/ui/analysis/multi_flow_event_sub_view.html',
       'tracing/ui/analysis/multi_frame_sub_view.html',
       'tracing/ui/analysis/multi_instant_event_sub_view.html',
-      'tracing/ui/analysis/multi_interaction_record_sub_view.html',
       'tracing/ui/analysis/multi_object_sub_view.html',
       'tracing/ui/analysis/multi_power_sample_sub_view.html',
       'tracing/ui/analysis/multi_sample_sub_view.html',
       'tracing/ui/analysis/multi_thread_slice_sub_view.html',
       'tracing/ui/analysis/multi_thread_time_slice_sub_view.html',
+      'tracing/ui/analysis/multi_user_expectation_sub_view.html',
       'tracing/ui/analysis/object_instance_view.html',
       'tracing/ui/analysis/object_snapshot_view.html',
       'tracing/ui/analysis/power_sample_summary_table.html',
@@ -271,18 +284,16 @@
       'tracing/ui/analysis/single_flow_event_sub_view.html',
       'tracing/ui/analysis/single_frame_sub_view.html',
       'tracing/ui/analysis/single_instant_event_sub_view.html',
-      'tracing/ui/analysis/single_interaction_record_sub_view.html',
       'tracing/ui/analysis/single_object_instance_sub_view.html',
       'tracing/ui/analysis/single_object_snapshot_sub_view.html',
       'tracing/ui/analysis/single_power_sample_sub_view.html',
       'tracing/ui/analysis/single_sample_sub_view.html',
       'tracing/ui/analysis/single_thread_slice_sub_view.html',
       'tracing/ui/analysis/single_thread_time_slice_sub_view.html',
+      'tracing/ui/analysis/single_user_expectation_sub_view.html',
       'tracing/ui/analysis/stack_frame.html',
-      'tracing/ui/analysis/stack_frame_tree.html',
       'tracing/ui/analysis/stacked_pane.html',
       'tracing/ui/analysis/stacked_pane_view.html',
-      'tracing/ui/analysis/tab_view.html',
       'tracing/ui/annotations/annotation_view.html',
       'tracing/ui/annotations/comment_box_annotation_view.html',
       'tracing/ui/annotations/rect_annotation_view.html',
@@ -306,6 +317,8 @@
       'tracing/ui/base/event_presenter.html',
       'tracing/ui/base/fast_rect_renderer.html',
       'tracing/ui/base/favicons.html',
+      'tracing/ui/base/grouping_table.html',
+      'tracing/ui/base/grouping_table_groupby_picker.html',
       'tracing/ui/base/heading.html',
       'tracing/ui/base/hot_key.html',
       'tracing/ui/base/hotkey_controller.html',
@@ -321,6 +334,7 @@
       'tracing/ui/base/pie_chart.html',
       'tracing/ui/base/polymer_utils.html',
       'tracing/ui/base/quad_stack_view.html',
+      'tracing/ui/base/tab_view.html',
       'tracing/ui/base/table.html',
       'tracing/ui/base/timing_tool.html',
       'tracing/ui/base/toolbar_button.html',
@@ -333,12 +347,13 @@
       'tracing/ui/extras/about_tracing/inspector_connection.html',
       'tracing/ui/extras/about_tracing/inspector_tracing_controller_client.html',
       'tracing/ui/extras/about_tracing/profiling_view.html',
-      'tracing/ui/extras/about_tracing/record_and_capture_controller.html',
+      'tracing/ui/extras/about_tracing/record_controller.html',
       'tracing/ui/extras/about_tracing/record_selection_dialog.html',
       'tracing/ui/extras/about_tracing/tracing_controller_client.html',
       'tracing/ui/extras/about_tracing/xhr_based_tracing_controller_client.html',
       'tracing/ui/extras/chrome/cc/cc.html',
       'tracing/ui/extras/chrome/cc/display_item_debugger.html',
+      'tracing/ui/extras/chrome/cc/display_item_list_item.html',
       'tracing/ui/extras/chrome/cc/display_item_list_view.html',
       'tracing/ui/extras/chrome/cc/layer_picker.html',
       'tracing/ui/extras/chrome/cc/layer_tree_host_impl_view.html',
@@ -359,23 +374,17 @@
       'tracing/ui/extras/full_config.html',
       'tracing/ui/extras/highlighter/vsync_highlighter.html',
       'tracing/ui/extras/lean_config.html',
-      'tracing/ui/extras/rail/rail_score_side_panel.html',
-      'tracing/ui/extras/rail/rail_score_span.html',
       'tracing/ui/extras/side_panel/alerts_side_panel.html',
-      'tracing/ui/extras/side_panel/category_summary_side_panel.html',
       'tracing/ui/extras/side_panel/input_latency_side_panel.html',
       'tracing/ui/extras/side_panel/time_summary_side_panel.html',
       'tracing/ui/extras/system_stats/system_stats.html',
       'tracing/ui/extras/system_stats/system_stats_instance_track.html',
       'tracing/ui/extras/system_stats/system_stats_snapshot_view.html',
       'tracing/ui/extras/systrace_config.html',
-      'tracing/ui/extras/tcmalloc/heap_instance_track.html',
-      'tracing/ui/extras/tcmalloc/tcmalloc.html',
-      'tracing/ui/extras/tcmalloc/tcmalloc_instance_view.html',
-      'tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.html',
       'tracing/ui/find_control.html',
       'tracing/ui/find_controller.html',
       'tracing/ui/scripting_control.html',
+      'tracing/ui/side_panel/file_size_stats_side_panel.html',
       'tracing/ui/side_panel/side_panel.html',
       'tracing/ui/side_panel/side_panel_container.html',
       'tracing/ui/timeline_display_transform.html',
@@ -425,19 +434,20 @@
       'tracing/ui/tracks/stacked_bars_track.html',
       'tracing/ui/tracks/thread_track.html',
       'tracing/ui/tracks/track.html',
-      'tracing/ui/units/array_of_numbers_span.html',
-      'tracing/ui/units/generic_table_view.html',
-      'tracing/ui/units/preferred_display_unit.html',
-      'tracing/ui/units/scalar_span.html',
-      'tracing/ui/units/time_duration_span.html',
-      'tracing/ui/units/time_stamp_span.html',
       'tracing/ui/view_specific_brushing_state.html',
+      'tracing/value/generic_table.html',
+      'tracing/value/numeric.html',
+      'tracing/value/time_display_mode.html',
+      'tracing/value/ui/array_of_numbers_span.html',
+      'tracing/value/ui/generic_table_view.html',
+      'tracing/value/ui/preferred_display_unit.html',
+      'tracing/value/ui/scalar_span.html',
+      'tracing/value/unit.html',
+      'tracing/value/value.html'
     ],
     'tracing_img_files': [
       'tracing/ui/extras/chrome/cc/images/input-event.png',
       'tracing/ui/extras/chrome/gpu/images/checkerboard.png',
-      'tracing/ui/extras/tcmalloc/images/collapse.png',
-      'tracing/ui/extras/tcmalloc/images/expand.png',
       'tracing/ui/images/chrome-left.png',
       'tracing/ui/images/chrome-mid.png',
       'tracing/ui/images/chrome-right.png',
diff --git a/catapult/tracing/tracing/__init__.py b/catapult/tracing/tracing/__init__.py
new file mode 100644
index 0000000..76063aa
--- /dev/null
+++ b/catapult/tracing/tracing/__init__.py
@@ -0,0 +1,6 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import tracing_project
+tracing_project.UpdateSysPathIfNeeded()
diff --git a/catapult/tracing/tracing/base/base64.html b/catapult/tracing/tracing/base/base64.html
index d7e758f..bceb821 100644
--- a/catapult/tracing/tracing/base/base64.html
+++ b/catapult/tracing/tracing/base/base64.html
@@ -29,7 +29,7 @@
 
   Base64.getDecodedBufferLength = function(input) {
     return input.length * 3 + 1 >> 2;
-  }
+  };
 
   Base64.EncodeArrayBufferToString = function(input) {
     // http://stackoverflow.com/questions/9267899/
@@ -39,7 +39,7 @@
     for (var i = 0; i < len; i++)
       binary += String.fromCharCode(bytes[i]);
     return btoa(binary);
-  }
+  };
 
   Base64.DecodeToTypedArray = function(input, output) {
 
@@ -64,7 +64,27 @@
       }
     }
     return nOutIdx - 1;
-  }
+  };
+
+  /*
+   * Wrapper of btoa
+   * The reason is that window object has a builtin btoa,
+   * but we also want to use btoa when it is headless.
+   * For example we want to use it in a mapper
+   */
+  Base64.btoa = function(input) {
+    return btoa(input);
+  };
+
+  /*
+   * Wrapper of atob
+   * The reason is that window object has a builtin atob,
+   * but we also want to use atob when it is headless.
+   * For example we want to use it in a mapper
+   */
+  Base64.atob = function(input) {
+    return atob(input);
+  };
 
   return {
     Base64: Base64
diff --git a/catapult/tracing/tracing/base/base64_test.html b/catapult/tracing/tracing/base/base64_test.html
index 218c050..a2b46d8 100644
--- a/catapult/tracing/tracing/base/base64_test.html
+++ b/catapult/tracing/tracing/base/base64_test.html
@@ -34,5 +34,15 @@
     var len = tr.b.Base64.DecodeToTypedArray(btoa('hello'), buffer);
     assert.equal(len, 5);
   });
+
+  test('Base64.atob', function() {
+    var output = tr.b.Base64.atob('aGVsbG8gd29ybGQ=');
+    assert.equal(output, 'hello world');
+  });
+
+  test('Base64.btoa', function() {
+    var output = tr.b.Base64.btoa('hello world');
+    assert.equal(output, 'aGVsbG8gd29ybGQ=');
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/base/color_scheme.html b/catapult/tracing/tracing/base/color_scheme.html
index c6ceaf9..c95ba81 100644
--- a/catapult/tracing/tracing/base/color_scheme.html
+++ b/catapult/tracing/tracing/base/color_scheme.html
@@ -67,7 +67,7 @@
     black: new tr.b.Color(0, 0, 0),
 
     rail_response: new tr.b.Color(67, 135, 253),
-    rail_animate: new tr.b.Color(244, 74, 63),
+    rail_animation: new tr.b.Color(244, 74, 63),
     rail_idle: new tr.b.Color(238, 142, 0),
     rail_load: new tr.b.Color(13, 168, 97),
 
@@ -75,6 +75,9 @@
     older_used_memory_column: new tr.b.Color(153, 204, 255),
     tracing_memory_column: new tr.b.Color(153, 153, 153),
 
+    heap_dump_stack_frame: new tr.b.Color(128, 128, 128),
+    heap_dump_object_type: new tr.b.Color(0, 0, 255),
+
     cq_build_running: new tr.b.Color(255, 255, 119),
     cq_build_passed: new tr.b.Color(153, 238, 102),
     cq_build_failed: new tr.b.Color(238, 136, 136),
diff --git a/catapult/tracing/tracing/base/d8_tests.html b/catapult/tracing/tracing/base/d8_tests.html
deleted file mode 100644
index e201511..0000000
--- a/catapult/tracing/tracing/base/d8_tests.html
+++ /dev/null
@@ -1,75 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/unittest.html">
-<link rel="import" href="/tracing/base/unittest/test_runner.html">
-<link rel="import" href="/tracing/base/unittest/text_test_results.html">
-<script>
-'use strict';
-
-(function() {
-  if (!tr.isHeadless) {
-    throw new Error('d8_tests.html only works in headless mode');
-  }
-
-  var results = new tr.b.unittest.TextTestResults();
-
-  function printSpacer() {
-    console.log('\n\n------------------------------------------------------' +
-                '----------------');
-  }
-  function loadAndRunTests() {
-    var args = sys.argv.slice(1);
-    var suiteRelpathsToLoad = args.map(function(x) {
-      return x;
-    });
-
-    var loader = new tr.b.unittest.SuiteLoader(suiteRelpathsToLoad);
-
-    var p = loader.allSuitesLoadedPromise;
-
-    p = p.then(
-      function didAllSuitesLoad() {
-        var tests = loader.getAllTests().filter(function(testCase) {
-          if (testCase instanceof tr.b.unittest.PerfTestCase)
-            return false;
-          return true;
-        });
-        if (tests.length === 0) {
-          printSpacer();
-          console.log('FAILED: No tests to run.');
-          console.log(err.stack);
-          quit(1);
-        }
-        var runner = new tr.b.unittest.TestRunner(results, tests);
-        return runner.beginRunning();
-      },
-      function suiteLoadingFailure(err) {
-        printSpacer();
-        console.log('FAILED: A test suite failed to load.');
-        console.log(err.stack);
-        quit(1);
-      });
-
-    p = p.then(
-      function didAllTestRun() {
-        if (results.numTestsThatFailed > 0)
-          quit(1);
-        else
-          quit(0);
-      },
-      function testHarnessError(e) {
-        console.log('FAILED: A test harness error has ocurred.');
-        console.log(e.stack);
-        quit(1);
-      });
-    return p;
-  }
-
-  loadAndRunTests();
-})();
-</script>
diff --git a/catapult/tracing/tracing/base/extension_registry_basic.html b/catapult/tracing/tracing/base/extension_registry_basic.html
index d086cda..d21d394 100644
--- a/catapult/tracing/tracing/base/extension_registry_basic.html
+++ b/catapult/tracing/tracing/base/extension_registry_basic.html
@@ -4,8 +4,8 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/base/extension_registry_base.html">
 <link rel="import" href="/tracing/base/event.html">
+<link rel="import" href="/tracing/base/extension_registry_base.html">
 <script>
 'use strict';
 
@@ -100,6 +100,17 @@
       }
       return extensionRegistryOptions.defaultTypeInfo;
     };
+
+    registry.findTypeInfoWithName = function(name) {
+      if (typeof(name) !== 'string')
+        throw new Error('Name is not a string.');
+      var typeInfo = registry.findTypeInfoMatching(function(ti) {
+        return ti.constructor.name === name;
+      });
+      if (typeInfo)
+        return typeInfo;
+      return undefined;
+    };
   }
 
   return {
diff --git a/catapult/tracing/tracing/base/headless_tests.html b/catapult/tracing/tracing/base/headless_tests.html
new file mode 100644
index 0000000..6c3f5f4
--- /dev/null
+++ b/catapult/tracing/tracing/base/headless_tests.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/unittest.html">
+<link rel="import" href="/tracing/base/unittest/test_runner.html">
+<link rel="import" href="/tracing/base/unittest/text_test_results.html">
+<script>
+'use strict';
+
+tr.exportTo('tr.b.unittest', function() {
+  if (!tr.isHeadless) {
+    throw new Error('headless_tests.html only works in headless mode');
+  }
+  function quit(errCode) {
+    if (tr.isVinn)
+      global.quit(errCode);
+    else
+      process.exit(errCode);
+  }
+
+  function printSpacer() {
+    console.log('\n\n------------------------------------------------------' +
+                '----------------');
+  }
+  function loadAndRunTests(suiteRelpathsToLoad) {
+    var results = new tr.b.unittest.TextTestResults();
+
+    var loader = new tr.b.unittest.SuiteLoader(suiteRelpathsToLoad);
+
+    var p = loader.allSuitesLoadedPromise;
+
+    p = p.then(
+      function didAllSuitesLoad() {
+        var tests = loader.getAllTests().filter(function(testCase) {
+          if (testCase instanceof tr.b.unittest.PerfTestCase)
+            return false;
+          return true;
+        });
+        if (tests.length === 0) {
+          printSpacer();
+          console.log('FAILED: No tests to run.');
+          console.log(err.stack);
+          quit(1);
+        }
+        var runner = new tr.b.unittest.TestRunner(results, tests);
+        return runner.beginRunning();
+      },
+      function suiteLoadingFailure(err) {
+        printSpacer();
+        console.log('FAILED: A test suite failed to load.');
+        console.log(err.stack);
+        quit(1);
+      });
+
+    p = p.then(
+      function didAllTestRun() {
+        if (results.numTestsThatFailed > 0)
+          quit(1);
+        else
+          quit(0);
+      },
+      function testHarnessError(e) {
+        console.log('FAILED: A test harness error has ocurred.');
+        console.log(e.stack);
+        quit(1);
+      });
+    return p;
+  }
+
+  return {
+    loadAndRunTests: loadAndRunTests
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/base/math.html b/catapult/tracing/tracing/base/math.html
index b6f7fd5..1a1ba99 100644
--- a/catapult/tracing/tracing/base/math.html
+++ b/catapult/tracing/tracing/base/math.html
@@ -10,6 +10,26 @@
 <script>
 'use strict';
 
+// In node, the script-src for gl-matrix-min above brings in glmatrix into
+// a module, instead of into the global scope. Whereas, Tracing code
+// assumes that glMatrix is in the global scope. So, in Node only, we
+// require() it in, and then take all its exports and shove them into the
+// global scope by hand.
+(function(global) {
+  if (tr.isNode) {
+    var glMatrixAbsPath = HTMLImportsLoader.hrefToAbsolutePath(
+        '/gl-matrix-min.js');
+    var glMatrixModule = require(glMatrixAbsPath);
+    for (var exportName in glMatrixModule) {
+      global[exportName] = glMatrixModule[exportName];
+    }
+  }
+})(this);
+</script>
+
+<script>
+'use strict';
+
 tr.exportTo('tr.b', function() {
   function clamp(x, lo, hi) {
     return Math.min(Math.max(x, lo), hi);
@@ -78,17 +98,17 @@
 
   vec3.toString = function(a) {
     return 'vec3(' + a[0] + ', ' + a[1] + ', ' + a[2] + ')';
-  }
+  };
 
   mat2d.translateXY = function(out, x, y) {
     vec2.set(tmp_vec2, x, y);
     mat2d.translate(out, out, tmp_vec2);
-  }
+  };
 
   mat2d.scaleXY = function(out, x, y) {
     vec2.set(tmp_vec2, x, y);
     mat2d.scale(out, out, tmp_vec2);
-  }
+  };
 
   vec4.unitize = function(out, a) {
     out[0] = a[0] / a[3];
@@ -96,12 +116,12 @@
     out[2] = a[2] / a[3];
     out[3] = 1;
     return out;
-  }
+  };
 
   vec2.copyFromVec4 = function(out, a) {
     vec4.unitize(tmp_vec4, a);
     vec2.copy(out, tmp_vec4);
-  }
+  };
 
   return {
     clamp: clamp,
diff --git a/catapult/tracing/tracing/base/multi_dimensional_view.html b/catapult/tracing/tracing/base/multi_dimensional_view.html
new file mode 100644
index 0000000..640c55b
--- /dev/null
+++ b/catapult/tracing/tracing/base/multi_dimensional_view.html
@@ -0,0 +1,1070 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Multi-dimensional view data structure.
+ *
+ * A multi-dimensional view provides a hierarchical representation of a
+ * collection of multi-dimensional paths with associated scalar values. Unlike
+ * separate single-dimensional views (e.g. one tree for each dimension),
+ * multi-dimensional views facilitate aggregation over combinations of
+ * substrings of the path dimensions (rather than just substrings of a single
+ * path dimension).
+ *
+ * Every view consists of multi-dimensional nodes (see MultiDimensionalViewNode
+ * for more details). This file also provides a builder class for constructing
+ * top-down and bottom-up representations of arbitrary collections of
+ * multi-dimensional paths (see MultiDimensionalViewBuilder for more details).
+ *
+ * Example: Given the following collection of two dimensional paths:
+ *
+ *    <----------- dimension 0 ------------>  <-- dimension 1 -->
+ *   [['Loop::Run()', 'Execute()', 'Call()'], ['Object', 'View']]:   total=1
+ *   [['Loop::Run()', 'Execute()', 'Call()'], ['Object', 'Widget']]: total=2
+ *   [['Loop::Run()', 'Execute()', 'Load()'], ['Object']]:           total=4
+ *   [['Loop::Run()', 'Execute()'],           ['int']]:              total=8
+ *   [['Loop::Run()'],                        ['Object', 'Window']]: total=16
+ *   [['Loop::Stop()'],                       ['Object']]:           total=32
+ *
+ * a multi-dimensional view provides a recursive breakdown of the aggregated
+ * values, e.g.:
+ *
+ *   (root):      total=63
+ *     |
+ *     | break down by 0th dimension
+ *     v
+ *   Loop::Run(): total=31
+ *     |
+ *     | break down by 0th dimension
+ *     v
+ *   Execute():   total=15
+ *     |
+ *     | break down by 1st dimension
+ *     v
+ *   Object:      total=7
+ *     |
+ *     | break down by 0th dimension again
+ *     v
+ *   Call():      total=3
+ *     |
+ *     | break down by 1st dimension again
+ *     v
+ *   View:        total=1
+ *
+ * Observe that the recursive breakdown above is over both dimensions.
+ * Furthermore, the underlying single-dimension paths (Loop::Run() -> Execute()
+ * -> Call() and Object -> View) can be arbitrarily interleaved in the
+ * breakdown.
+ */
+tr.exportTo('tr.b', function() {
+
+  /**
+   * Node of a multi-dimensional view.
+   *
+   * The structure of a view is encoded in the nodes using links to their
+   * children wrt each dimension. The diagram below shows how the nodes
+   * corresponding to the following four two-dimensional view paths:
+   *
+   *   1. [['A', 'B'], ['1', '2']]
+   *   2. [['A', 'C'], ['1', '2']]
+   *   3. [['A', 'B'], ['1', '3']]
+   *   4. [['A', 'C'], ['1', '3']]
+   *
+   * can be reached from the root of a two-dimensional view using these links
+   * ('*' stands for undefined):
+   *
+   *                       +---------------------+
+   *                       | title: [*,*] (root) |
+   *                       +---------------------+
+   *                     children wrt    children wrt
+   *                    0th dimension    1st dimension
+   *                              |        :
+   *              _______A________|        :........1.........
+   *             |                                           :
+   *             v                                           v
+   *         +--------------+                     +--------------+
+   *         | title: [A,*] |                     | title: [*,1] |
+   *         +--------------+                     +--------------+
+   *    children wrt   children wrt         children wrt   children wrt
+   *   0th dimension   1st dimension       0th dimension   1st dimension
+   *           | |       :.....1......    _____A_____|       : :
+   *        _B_| |__C__              :   |             ...2..: :.3..
+   *       |           |             :   |             :           :
+   *       v           v             v   v             v           v
+   *   +-------+   +-------+       +-------+       +-------+   +-------+
+   *   | [B,*] |   | [C,*] |       | [A,1] |       | [*,2] |   | [*,3] |
+   *   +-------+   +-------+       +-------+       +-------+   +-------+
+   *       :        ___:_____B______| | : :......3.....|....       |
+   *       :.1..   |   :.1..    __C___| :...2...    _A_|   :    _A_|
+   *           :   |       :   |               :   |       :   |
+   *           v   v       v   v               v   v       v   v
+   *         +-------+   +-------+           +-------+   +-------+
+   *         | [B,1] |   | [C,1] |           | [A,2] |   | [A,3] |
+   *         +-------+   +-------+           +-------+   +-------+
+   *           :   :       :   :.......3.......||..........   ||
+   *           :   :..3....:................   BC         :   BC
+   *           :     ______:_______________:___||         :   ||
+   *           2    |      2        _______:____|   ______:___||
+   *           :    |      :       |       :       |      :    |
+   *           v    v      v       v       v       v      v    v
+   *       +----------+   +----------+   +----------+   +----------+
+   *       |  [B,2]   |   |  [C,2]   |   |  [B,3]   |   |  [C,3]   |
+   *       | (node 1) |   | (node 2) |   | (node 3) |   | (node 4) |
+   *       +----------+   +----------+   +----------+   +----------+
+   *
+   * The self/total value of a node represents the aggregated value of all
+   * paths (in the collection from which the view was built) matching the node
+   * excluding/including the node's descendants.
+   *
+   * Terminology examples:
+   *
+   *   - Children of [A,*] wrt 0th dimension: [B,*], [C,*]
+   *   - Children of [A,*] (wrt all dimensions): [B,*], [C,*], [A,1]
+   *   - Descendants of [A,*] wrt 1st dimension: [A,1], [A,2], [A,3]
+   *   - Single-dimensional descendants of [A,*]: [A,1], [A,2], [A,3], [B,*],
+   *     [C,*]
+   *   - Descendants of [A,*] (wrt all dimensions): [A,1], [A,2], [A,3], [B,*],
+   *     [C,*], [B,1], [C,1], [B,2], [C,2], [B,3], [C,3]
+   *
+   * @{constructor}
+   */
+  function MultiDimensionalViewNode(title, isLowerBound) {
+    // List of titles of this node wrt each dimension.
+    this.title = title;
+
+    // Map from child name to child node for each dimension.
+    var dimensions = title.length;
+    this.children = new Array(dimensions);
+    for (var i = 0; i < dimensions; i++)
+      this.children[i] = new Map();
+
+    this.total = 0;
+    this.self = 0;
+
+    // Flag whether the values of this node are only lower bounds (i.e.
+    // aggregated from children rather than provided directly).
+    this.isLowerBound = !!isLowerBound;
+  }
+
+  MultiDimensionalViewNode.prototype = {
+    /** Duck type <tr-ui-b-table> rows. */
+    get subRows() {
+      return tr.b.mapValues(this.children[0]);
+    }
+  };
+
+  /**
+   * Types of multi-dimensional views provided by MultiDimensionalViewBuilder.
+   *
+   * @enum
+   */
+  var MultiDimensionalViewType = {
+    TOP_DOWN_TREE_VIEW: 0,
+    TOP_DOWN_HEAVY_VIEW: 1,
+    BOTTOM_UP_HEAVY_VIEW: 2
+  };
+
+  /**
+   * Builder for multi-dimensional views.
+   *
+   * Given a collection of multi-dimensional paths, a builder can be used to
+   * construct the following three representations of the paths:
+   *
+   *   1. Top-down tree view
+   *      A multi-dimensional path in the view corresponds to all paths in the
+   *      collection that have it as their prefix.
+   *
+   *   2. Top-down heavy view
+   *      A multi-dimensional path in the view corresponds to all paths in the
+   *      collection that have it as their substring
+   *
+   *   3. Bottom-up heavy view
+   *      A multi-dimensional path in the view corresponds to all paths in the
+   *      collection that have it as their substring reversed.
+   *
+   * For example, the following collection of 2-dimensional paths:
+   *
+   *                  2-dimensional path                | self
+   *    Time (0th dimension) | Activity (1st dimension) | value
+   *   ========================+========================+=======
+   *    Saturday             | Cooking                  |   1 h
+   *    Saturday             | Sports -> Football       |   2 h
+   *    Sunday               | Sports -> Basketball     |   3 h
+   *
+   * gives rise to the following top-down tree view, which aggregates the
+   * scalar values over prefixes of the given paths:
+   *
+   *                              +---------+
+   *                              |    *    |
+   *                              |    *    |
+   *                              | self=0  |
+   *                              | total=6 |
+   *                              +---------+
+   *                                | : | :
+   *         _________Cooking_______| : | :............Sunday............
+   *        |                         : |                               :
+   *        |            ...Saturday..: |_Sports_                       :
+   *        |            :                       |                      :
+   *        v            v                       v                      v
+   *   +---------+  +---------+            +---------+             +---------+
+   *   |    *    |  |   Sat   |            |    *    |             |   Sun   |
+   *   | Cooking |  |    *    |            | Sports  |             |    *    |
+   *   | self=0  |  | self=0  |            | self=0  |             | self=0  |
+   *   | total=1 |  | total=3 |            | total=5 |             | total=3 |
+   *   +---------+  +---------+            +---------+             +---------+
+   *      :          |   |                   : | | :                     |
+   *    Saturday     | Sports                : | | :                  Sports
+   *      :          |   |  .....Saturday....: | | :.....Sunday.......   |
+   *      :    _Cook_|   |  :            _Foot_| |_Bask_             :   |
+   *      :   |          |  :           |               |            :   |
+   *      v   v          v  v           v               v            v   v
+   *   +---------+  +---------+  +------------+  +--------------+  +---------+
+   *   |   Sat   |  |   Sat   |  |     *      |  |      *       |  |   Sun   |
+   *   | Cooking |  | Sports  |  | S/Football |  | S/Basketball |  | Sports  |
+   *   | self=1  |  | self=0  |  | self=0     |  | self=0       |  | self=0  |
+   *   | total=1 |  | total=2 |  | total=2    |  | total=3      |  | total=3 |
+   *   +---------+  +---------+  +------------+  +--------------+  +---------+
+   *                    |              :                 :               |
+   *                    |_Foot_  ..Sat.:                 :.Sun..   _Bask_|
+   *                           | :                             :  |
+   *                           v v                             v  v
+   *                     +------------+                   +--------------+
+   *                     |    Sat     |                   |     Sun      |
+   *                     | S/Football |                   | S/Basketball |
+   *                     | self=2     |                   | self=3       |
+   *                     | total=2    |                   | total=3      |
+   *                     +------------+                   +--------------+
+   *
+   * To build a multi-dimensional view of a collection of multi-dimensional
+   * paths, you create a builder, add the paths to it and then use it to
+   * construct the view. For example, the following code generates the
+   * 2-dimensional top-down tree view shown above:
+   *
+   *   var builder = new MultiDimensionalViewBuilder(2);
+   *   builder.addPath([['Saturday'], ['Cooking']], 1, SELF);
+   *   builder.addPath([['Saturday'], ['Sports', 'Football']], 2, SELF);
+   *   builder.addPath([['Sunday'], ['Sports', 'Basketball']], 3, SELF);
+   *   var treeViewRoot = builder.buildTopDownTreeView();
+   *
+   * The heavy views can be constructed analogously (by calling
+   * buildTopDownHeavyView() or buildBottomUpHeavyView() at the end instead).
+   *
+   * Note that the same builder can be used to construct both the tree and
+   * heavy views (for the same collection of paths). However, no more paths can
+   * be added once either view has been built.
+   *
+   * @{constructor}
+   */
+  function MultiDimensionalViewBuilder(dimensions) {
+    if (dimensions < 0)
+      throw new Error('Dimensions must be non-negative');
+    this.dimensions_ = dimensions;
+
+    this.buildRoot_ = this.createRootNode_();
+    this.topDownTreeViewRoot_ = undefined;
+    this.topDownHeavyViewRoot_ = undefined;
+    this.bottomUpHeavyViewNode_ = undefined;
+
+    this.maxDimensionDepths_ = new Array(dimensions);
+    for (var d = 0; d < dimensions; d++)
+      this.maxDimensionDepths_[d] = 0;
+  }
+
+  /** @{enum} */
+  MultiDimensionalViewBuilder.ValueKind = {
+    SELF: 0,
+    TOTAL: 1
+  };
+
+  MultiDimensionalViewBuilder.prototype = {
+    /**
+     * Add a value associated with a multi-dimensional path to the tree.
+     *
+     * The path must have the same number of dimensions as the builder. Its
+     * elements must be single-dimension paths (lists of strings) of arbitrary
+     * length (empty for the root of the given dimension). Starting from the
+     * root of the tree, each single-dimension path is traversed from left to
+     * right to reach the node corresponding to the whole path.
+     *
+     * The builder supports adding both kinds of values (self/total) for an
+     * arbitrary multi-dimensional path. The rationale for adding a total value
+     * (in addition to/instead of a self value) is to cater for missing
+     * sub-paths. Example: Consider the following collection of
+     * single-dimensional paths:
+     *
+     *   [['Loop::Run()', 'Execute()', 'FunctionBig']]:       self=99000
+     *   [['Loop::Run()', 'Execute()', 'FunctionSmall1']]:    self=1
+     *   [['Loop::Run()', 'Execute()', 'FunctionSmall2']]:    self=1
+     *   ...
+     *   [['Loop::Run()', 'Execute()', 'FunctionSmall1000']]: self=1
+     *
+     * If we required that only self values could be added to the builder, then
+     * all of the 1001 paths would need to be provided (most likely in a trace)
+     * to obtain the correct total of [['Loop::Run()', 'Execute()']]. However,
+     * since we allow adding total values as well, only the following 2 paths
+     * need to be provided to get the correct numbers explaining 99% of the
+     * aggregated total value:
+     *
+     *   [['Loop::Run()', 'Execute()']]:                total=100000
+     *   [['Loop::Run()', 'Execute()', 'FunctionBig']]: self=99000
+     *
+     * In other words, the long tail containing 1000 small paths need not be
+     * dumped (greatly reducing the size of a trace where applicable).
+     *
+     * Important: No paths can be added to a builder once either view has been
+     * built!
+     */
+    addPath: function(path, value, valueKind) {
+      if (this.buildRoot_ === undefined) {
+        throw new Error(
+            'Paths cannot be added after either view has been built');
+      }
+      if (path.length !== this.dimensions_)
+        throw new Error('Path must be ' + this.dimensions_ + '-dimensional');
+
+      var node = this.buildRoot_;
+
+      for (var d = 0; d < path.length; d++) {
+        var singleDimensionPath = path[d];
+        var singleDimensionPathLength = singleDimensionPath.length;
+        this.maxDimensionDepths_[d] =
+            Math.max(this.maxDimensionDepths_[d], singleDimensionPathLength);
+        for (var i = 0; i < singleDimensionPathLength; i++)
+          node = this.getOrCreateChildNode_(node, d, singleDimensionPath[i]);
+      }
+
+      switch (valueKind) {
+        case MultiDimensionalViewBuilder.ValueKind.SELF:
+          node.self += value;
+          break;
+
+        case MultiDimensionalViewBuilder.ValueKind.TOTAL:
+          node.total += value;
+          break;
+
+        default:
+          throw new Error('Invalid value kind: ' + valueKind);
+      }
+
+      node.isLowerBound = false;
+    },
+
+    buildView: function(viewType) {
+      switch (viewType) {
+        case MultiDimensionalViewType.TOP_DOWN_TREE_VIEW:
+          return this.buildTopDownTreeView();
+        case MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW:
+          return this.buildTopDownHeavyView();
+        case MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW:
+          return this.buildBottomUpHeavyView();
+        default:
+          throw new Error('Unknown multi-dimensional view type: ' + viewType);
+      }
+    },
+
+    /**
+     * Build the top-down tree view of the multi-dimensional view.
+     *
+     * Note that no more paths can be added to the builder once either view has
+     * been built.
+     */
+    buildTopDownTreeView: function() {
+      if (this.topDownTreeViewRoot_ === undefined) {
+        var treeViewRoot = this.buildRoot_;
+        this.buildRoot_ = undefined;
+
+        this.setUpMissingChildRelationships_(treeViewRoot,
+            0 /* firstDimensionToSetUp */);
+        this.finalizeTotalValues_(treeViewRoot,
+            0 /* firstDimensionToFinalize */,
+            new WeakMap() /* dimensionalSelfSumsMap */);
+
+        this.topDownTreeViewRoot_ = treeViewRoot;
+      }
+
+      return this.topDownTreeViewRoot_;
+    },
+
+    /**
+     * Build the top-down heavy view of the multi-dimensional view.
+     *
+     * Note that no more paths can be added to the builder once either view has
+     * been built.
+     */
+    buildTopDownHeavyView: function() {
+      if (this.topDownHeavyViewRoot_ === undefined) {
+        this.topDownHeavyViewRoot_ = this.buildGenericHeavyView_(
+            this.addDimensionToTopDownHeavyViewNode_.bind(this));
+      }
+      return this.topDownHeavyViewRoot_;
+    },
+
+    /**
+     * Build the bottom-up heavy view of the multi-dimensional view.
+     *
+     * Note that no more paths can be added to the builder once either view has
+     * been built.
+     */
+    buildBottomUpHeavyView: function() {
+      if (this.bottomUpHeavyViewNode_ === undefined) {
+        this.bottomUpHeavyViewNode_ = this.buildGenericHeavyView_(
+            this.addDimensionToBottomUpHeavyViewNode_.bind(this));
+      }
+      return this.bottomUpHeavyViewNode_;
+    },
+
+    createRootNode_: function() {
+      return new MultiDimensionalViewNode(
+          new Array(this.dimensions_) /* title */, true /* isLowerBound */);
+    },
+
+    getOrCreateChildNode_: function(
+        parentNode, dimension, childDimensionTitle) {
+      if (dimension < 0 || dimension >= this.dimensions_)
+        throw new Error('Invalid dimension');
+
+      var dimensionChildren = parentNode.children[dimension];
+
+      var childNode = dimensionChildren.get(childDimensionTitle);
+      if (childNode !== undefined)
+        return childNode;
+
+      var childTitle = parentNode.title.slice();
+      childTitle[dimension] = childDimensionTitle;
+      childNode = new MultiDimensionalViewNode(
+          childTitle, true /* isLowerBound */);
+      dimensionChildren.set(childDimensionTitle, childNode);
+
+      return childNode;
+    },
+
+    /**
+     * Set up missing child relationships.
+     *
+     * When an arbitrary multi-dimensional path [path1, path2, ..., pathN] is
+     * added to the build tree (see addPath), only the nodes on the path1 ->
+     * path2 -> ... -> pathN chain are created (i.e. no interleavings of the
+     * single-dimensional paths are added to the tree). This method recursively
+     * adds all the missing paths.
+     *
+     * Two-dimensional example:
+     *
+     *    Initial build tree   .       After path      .  After missing child
+     *        (root only)      .    [[A, B], [1, 2]]   .   relationships were
+     *                         .       was added       .        set up
+     *                         .                       .
+     *           +---+         .         +---+         .         +---+
+     *           |*,*|         .         |*,*|         .         |*,*|
+     *           +---+         .         +---+         .         +---+
+     *                         .         A             .         A   1
+     *                         .         |             .         |   :
+     *                         .         v             .         v   V
+     *                         .     +---+             .     +---+   +---+
+     *                         .     |A,*|             .     |A,*|   |*,1|
+     *                         .     +---+             .     +---+   +---+
+     *                         .     B                 .     B   1   A   2
+     *                         .     |                 .     |   :   |   :
+     *                         .     v                 .     v   v   v   v
+     *                         . +---+                 . +---+   +---+   +---+
+     *                         . |B,*|                 . |B,*|   |A,1|   |*,2|
+     *                         . +---+                 . +---+   +---+   +---+
+     *                         .     1                 .     1   B   2   A
+     *                         .     :                 .     :   |   :   |
+     *                         .     v                 .     v   v   v   v
+     *                         .     +---+             .     +---+   +---+
+     *                         .     |B,1|             .     |B,1|   |A,2|
+     *                         .     +---+             .     +---+   +---+
+     *                         .         2             .         2   B
+     *                         .         :             .         :   |
+     *                         .         v             .         v   V
+     *                         .         +---+         .         +---+
+     *                         .         |B,2|         .         |B,2|
+     *                         .         +---+         .         +---+
+     */
+    setUpMissingChildRelationships_: function(node, firstDimensionToSetUp) {
+      // Missing child relationships of this node wrt dimensions 0, ...,
+      // (firstDimensionToSetUp - 1) and all descendants of the associated
+      // children have already been set up. Now we do the same for dimensions
+      // firstDimensionToSetUp, ..., (this.dimensions_ - 1).
+      for (var d = firstDimensionToSetUp; d < this.dimensions_; d++) {
+        // Step 1. Gather the names of all children wrt the current dimension.
+        var currentDimensionChildTitles = new Set(node.children[d].keys());
+        for (var i = 0; i < d; i++) {
+          for (var previousDimensionChildNode of node.children[i].values()) {
+            for (var previousDimensionGrandChildTitle of
+                 previousDimensionChildNode.children[d].keys()) {
+              currentDimensionChildTitles.add(previousDimensionGrandChildTitle);
+            }
+          }
+        }
+
+        // Step 2. Add missing children wrt the current dimension and
+        // recursively set up its missing child relationships.
+        for (var currentDimensionChildTitle of currentDimensionChildTitles) {
+          // Add a missing child (if it doesn't exist).
+          var currentDimensionChildNode =
+              this.getOrCreateChildNode_(node, d, currentDimensionChildTitle);
+
+          // Set-up child relationships (of the child node) wrt dimensions 0,
+          // ..., d - 1.
+          for (var i = 0; i < d; i++) {
+            for (var previousDimensionChildNode of node.children[i].values()) {
+              var previousDimensionGrandChildNode =
+                  previousDimensionChildNode.children[d].get(
+                      currentDimensionChildTitle);
+              if (previousDimensionGrandChildNode !== undefined) {
+                currentDimensionChildNode.children[i].set(
+                    previousDimensionChildNode.title[i],
+                    previousDimensionGrandChildNode);
+              }
+            }
+          }
+
+          // Set-up child relationships (of the child node) wrt dimensions d,
+          // ..., (this.dimensions_ - 1).
+          this.setUpMissingChildRelationships_(currentDimensionChildNode, d);
+        }
+      }
+    },
+
+    /**
+     * Finalize the total values of a multi-dimensional tree.
+     *
+     * The intermediate builder tree, a node of which we want to finalize
+     * recursively, already has the right shape. The only thing that needs to
+     * be done is to propagate self and total values from subsumed child nodes
+     * in each dimension.
+     *
+     * To derive the expression for the lower bound on the total value, we rely
+     * on the following assumptions:
+     *
+     *   1. Each node's self value does NOT overlap with the self or total value
+     *      of any other node.
+     *
+     *   2. The total values of a node's children wrt a single dimension (e.g.
+     *      [path1/A, path2] and [path1/B, path2]) do NOT overlap.
+     *
+     *   3. The total values of a node's children wrt different dimensions
+     *      (e.g. [path1/A, path2] and [path1, path2/1]) MIGHT overlap.
+     *
+     * As a consequence of assumptions 1 and 3, the total value of a node can
+     * be split into the part that cannot overlap (so-called "self-sum") and
+     * the part that can overlap (so-called "residual"):
+     *
+     *   total(N) = selfSum(N) + residual(N)                            (A)
+     *
+     * where the self-sum is calculated as the sum of the node's self value
+     * plus the sum of its descendants' self values (summed over all
+     * dimensions):
+     *
+     *   selfSum(N) = self(N) + sum over all descendants C of N {
+     *       self (C)                                                   (B)
+     *   }
+     *
+     * Observe that the residual of a node does not include any self value (of
+     * any node in the view). Furthermore, by assumption 2, we derive that the
+     * residuals of a node's children wrt a single dimension don't overlap. On
+     * the other hand, the residuals of a node's children wrt different
+     * dimensions might overlap. This gives us the following lower bound on the
+     * residual of a node:
+     *
+     *   residual(N) >= minResidual(N) = max over dimensions D {
+     *       sum over children C of N at dimension D {
+     *           residual(C)                                            (C)
+     *       }
+     *   })
+     *
+     * By combining equation (A) and inequality (C), we get a lower bound on
+     * the total value of a node:
+     *
+     *   total(N) >= selfSum(N) + minResidual(N)
+     *
+     * For example, given a two-dimensional node [path1, path2] with self value
+     * 10 and four children (2 wrt each dimension):
+     *
+     *    Child            | Self value | Total value
+     *   ==================+============+=============
+     *    [path1/A, path2] |         21 |          30
+     *    [path1/B, path2] |         25 |          32
+     *    [path1, path2/1] |         3  |          15
+     *    [path1, path2/2] |         40 |          41
+     *
+     * and assuming that the children have no further descendants (i.e. their
+     * residual values are equal to the differences between their total and
+     * self values), the lower bound on the total value of [path1, path2] is:
+     *
+     *   total([path1, path2])
+     *       >= selfSum([path1, path2]) +
+     *          minResidual([path1, path2])
+     *        = self([path1, path2]) +
+     *          sum over all descendants C of [path1, path2] {
+     *              self (C)
+     *          } +
+     *          max over dimensions D {
+     *              sum over children C of [path1, path2] at dimension D {
+     *                  residual(C)
+     *              }
+     *          }
+     *        = self([path1, path2]) +
+     *          ((self([path1/A, path2]) + self([path1/B, path2])) +
+     *           (self([path1, path2/1]) + self([path1, path2/2]))) +
+     *          max(residual([path1/A, path2]) + residual([path1/B, path2]),
+     *              residual([path1, path2/1]) + residual([path1, path2/2]))
+     *        = 10 +
+     *          ((21 + 25) + (3 + 40)) +
+     *          max((30 - 21) + (32 - 25), (15 - 3) + (41 - 40))
+     *        = 115
+     *
+     * To reduce the complexity of the calculation, we keep a temporary list of
+     * dimensional self-sums for each node that we have already visited. For a
+     * given node, the Kth element in the list is equal to the self size of the
+     * node plus the sum of self sizes of all its descendants wrt dimensions 0
+     * to K (inclusive). The list has two important properties:
+     *
+     *   1. The last element in the list is equal to the self-sum of the
+     *      associated node (equation (B)).
+     *
+     *   2. The calculation of the list can be performed recursively using the
+     *      lists of the associated node's children (avoids square complexity
+     *      in the size of the graph):
+     *
+     *        dimensionalSelfSum(N)[D] =
+     *            self(N) +
+     *            sum I = 0 to D {
+     *                sum over children C of N at dimension I {
+     *                    dimensionalSelfSum(C)[I]
+     *                }
+     *            }
+     */
+    finalizeTotalValues_: function(
+        node, firstDimensionToFinalize, dimensionalSelfSumsMap) {
+      var dimensionalSelfSums = new Array(this.dimensions_);
+      var maxChildResidualSum = 0;
+      var nodeSelfSum = node.self;
+
+      for (var d = 0; d < this.dimensions_; d++) {
+        var childResidualSum = 0;
+        for (var childNode of node.children[d].values()) {
+          if (d >= firstDimensionToFinalize)
+            this.finalizeTotalValues_(childNode, d, dimensionalSelfSumsMap);
+          var childNodeSelfSums = dimensionalSelfSumsMap.get(childNode);
+          nodeSelfSum += childNodeSelfSums[d];
+          var residual =
+              childNode.total - childNodeSelfSums[this.dimensions_ - 1];
+          childResidualSum += residual;
+        }
+        dimensionalSelfSums[d] = nodeSelfSum;
+        maxChildResidualSum = Math.max(maxChildResidualSum, childResidualSum);
+      }
+
+      node.total = Math.max(node.total, nodeSelfSum + maxChildResidualSum);
+
+      if (dimensionalSelfSumsMap.has(node))
+        throw new Error('Internal error: Node finalized more than once');
+      dimensionalSelfSumsMap.set(node, dimensionalSelfSums);
+    },
+
+    /**
+     * Build a generic heavy view of the multi-dimensional view.
+     */
+    buildGenericHeavyView_: function(treeViewNodeHandler) {
+      // 1. Clone the root node of the top-down tree view node (except
+      // children).
+      var treeViewRoot = this.buildTopDownTreeView();
+      var heavyViewRoot = this.createRootNode_();
+      heavyViewRoot.total = treeViewRoot.total;
+      heavyViewRoot.self = treeViewRoot.self;
+      heavyViewRoot.isLowerBound = treeViewRoot.isLowerBound;
+
+      // 2. Create recursion depth trackers (to avoid total value
+      // double-counting).
+      var recursionDepthTrackers = new Array(this.dimensions_);
+      for (var d = 0; d < this.dimensions_; d++) {
+        recursionDepthTrackers[d] =
+            new RecursionDepthTracker(this.maxDimensionDepths_[d], d);
+      }
+
+      // 3. Add all paths associated with the single-dimensional descendants of
+      // the top-down tree view root node to the heavy view root node
+      // (depending on the type of the target heavy view).
+      this.addDimensionsToGenericHeavyViewNode_(treeViewRoot, heavyViewRoot,
+          0 /* startDimension */, recursionDepthTrackers,
+          false /* previousDimensionsRecursive */, treeViewNodeHandler);
+
+      // 4. Set up missing child relationships.
+      this.setUpMissingChildRelationships_(heavyViewRoot,
+          0 /* firstDimensionToSetUp */);
+
+      return heavyViewRoot;
+    },
+
+    /**
+     * Add all paths associated with the single-dimensional descendants of a
+     * top-down tree-view node wrt multiple dimensions to a generic heavy-view
+     * node (depending on the type of the target heavy view).
+     */
+    addDimensionsToGenericHeavyViewNode_: function(treeViewParentNode,
+        heavyViewParentNode, startDimension, recursionDepthTrackers,
+        previousDimensionsRecursive, treeViewNodeHandler) {
+      for (var d = startDimension; d < this.dimensions_; d++) {
+        this.addDimensionDescendantsToGenericHeavyViewNode_(treeViewParentNode,
+            heavyViewParentNode, d, recursionDepthTrackers,
+            previousDimensionsRecursive, treeViewNodeHandler);
+      }
+    },
+
+    /**
+     * Add all paths associated with the descendants of a top-down tree-view
+     * node wrt a single dimension to a generic heavy-view node (depending on
+     * the type of the target heavy view).
+     */
+    addDimensionDescendantsToGenericHeavyViewNode_: function(treeViewParentNode,
+        heavyViewParentNode, currentDimension, recursionDepthTrackers,
+        previousDimensionsRecursive, treeViewNodeHandler) {
+      var treeViewChildren = treeViewParentNode.children[currentDimension];
+      var recursionDepthTracker = recursionDepthTrackers[currentDimension];
+      for (var treeViewChildNode of treeViewChildren.values()) {
+        recursionDepthTracker.push(treeViewChildNode);
+
+        // Add all paths associated with the child node to the heavy view-node
+        // parent node.
+        treeViewNodeHandler(
+            treeViewChildNode, heavyViewParentNode, currentDimension,
+            recursionDepthTrackers, previousDimensionsRecursive);
+
+        // Recursively add all paths associated with the descendants of the
+        // tree view child node wrt the current dimension to the heavy-view
+        // parent node.
+        this.addDimensionDescendantsToGenericHeavyViewNode_(treeViewChildNode,
+            heavyViewParentNode, currentDimension, recursionDepthTrackers,
+            previousDimensionsRecursive, treeViewNodeHandler);
+
+        recursionDepthTracker.pop();
+      }
+    },
+
+    /**
+     * Add a top-down tree-view child node together with its single-dimensional
+     * subtree to a top-down heavy-view parent node (tree-view node handler for
+     * top-down heavy view).
+     *
+     * Sample resulting top-down heavy view:
+     *
+     *       +----------------+                    +-----------------+
+     *       |     source     |                    |   destination   |
+     *       | tree-view root |  ===============>  | heavy-view root |
+     *       |     self=0     |                    |     self=0      |
+     *       |    total=48    |                    |    total=48     |
+     *       +----------------+                    +-----------------+
+     *         |            |                  ______|      |      |______
+     *         v            v                 v             v             v
+     *    +----------+ +----------+      +----------+ +----------+ +----------+
+     *    |    A*    | |    B     |      |    A***  | |    B     | |    C     |
+     *    | self=10  | | self=12  |      | self=13  | | self=13  | | self=2   |
+     *    | total=30 | | total=18 |      | total=30 | | total=34 | | total=7  |
+     *    +----------+ +----------+      +----------+ +----------+ +----------+
+     *         |                              :            :   :.........
+     *         v                              v            v            v
+     *    +----------+                   ............ ............ ............
+     *    |    B     |                   :    B     : :    A     : :    C     :
+     *    | self=1   |                   : self=1   : : self=3   : : self=2   :
+     *    | total=16 |                   : total=16 : : total=8  : : total=7  :
+     *    +----------+                   ............ ............ ............
+     *         |   |________                  :   :.........
+     *         v            v                 v            v
+     *    +----------+ +----------+      ............ ............
+     *    |    A**   | |    C     |      :    A     : :    C     :
+     *    | self=3   | | self=2   |      : self=3   : : self=2   :
+     *    | total=8  | | total=7  |      : total=8  : : total=7  :
+     *    +----------+ +----------+      ............ ............
+     *
+     * Observe that care needs to be taken when dealing with recursion to avoid
+     * double-counting, e.g. the total value of A** (8) was not added to the
+     * total value of A*** (30) because it is already included in the total
+     * value of A* (30) (which was also added to A***). That is why we need to
+     * keep track of the path we traversed along the current dimension (to
+     * determine whether total value should be added or not).
+     */
+    addDimensionToTopDownHeavyViewNode_: function(
+        treeViewChildNode, heavyViewParentNode, currentDimension,
+        recursionDepthTrackers, previousDimensionsRecursive) {
+      this.addDimensionToTopDownHeavyViewNodeRecursively_(treeViewChildNode,
+          heavyViewParentNode, currentDimension, recursionDepthTrackers,
+          previousDimensionsRecursive, 1 /* subTreeDepth */);
+    },
+
+    addDimensionToTopDownHeavyViewNodeRecursively_: function(
+        treeViewChildNode, heavyViewParentNode, currentDimension,
+        recursionDepthTrackers, previousDimensionsRecursive, subTreeDepth) {
+      var recursionDepthTracker = recursionDepthTrackers[currentDimension];
+      var currentDimensionRecursive =
+          subTreeDepth <= recursionDepthTracker.recursionDepth;
+      var currentOrPreviousDimensionsRecursive =
+          currentDimensionRecursive || previousDimensionsRecursive;
+
+      var dimensionTitle = treeViewChildNode.title[currentDimension];
+      var heavyViewChildNode = this.getOrCreateChildNode_(
+          heavyViewParentNode, currentDimension, dimensionTitle);
+
+      heavyViewChildNode.self += treeViewChildNode.self;
+      if (!currentOrPreviousDimensionsRecursive)
+        heavyViewChildNode.total += treeViewChildNode.total;
+
+      // Add the descendants of the tree-view child node wrt the next
+      // dimensions as children of the heavy-view child node.
+      this.addDimensionsToGenericHeavyViewNode_(treeViewChildNode,
+          heavyViewChildNode, currentDimension + 1, recursionDepthTrackers,
+          currentOrPreviousDimensionsRecursive,
+          this.addDimensionToTopDownHeavyViewNode_.bind(this));
+
+      for (var treeViewGrandChildNode of
+           treeViewChildNode.children[currentDimension].values()) {
+        recursionDepthTracker.push(treeViewGrandChildNode);
+
+        // Recursively add the tree-view grandchild node to the heavy-view
+        // child node.
+        this.addDimensionToTopDownHeavyViewNodeRecursively_(
+            treeViewGrandChildNode, heavyViewChildNode, currentDimension,
+            recursionDepthTrackers, previousDimensionsRecursive,
+            subTreeDepth + 1);
+
+        recursionDepthTracker.pop();
+      }
+    },
+
+    /**
+     * Add a top-down tree-view child node together with all its ancestors wrt
+     * the given dimension as descendants of a bottom-up heavy-view parent node
+     * in the reverse order (tree-view node handler for bottom-up heavy view).
+     *
+     * Sample resulting bottom-up heavy view:
+     *
+     *       +----------------+                    +-----------------+
+     *       |     source     |                    |   destination   |
+     *       | tree-view root |  ===============>  | heavy-view root |
+     *       |     self=0     |                    |     self=0      |
+     *       |    total=48    |                    |    total=48     |
+     *       +----------------+                    +-----------------+
+     *         |            |                  ______|      |      |______
+     *         v            v                 v             v             v
+     *    +----------+ +----------+      +----------+ +----------+ +----------+
+     *    |    A*    | |    B     |      |    A***  | |    B     | |    C     |
+     *    | self=10  | | self=12  |      | self=13  | | self=13  | | self=2   |
+     *    | total=30 | | total=18 |      | total=30 | | total=34 | | total=7  |
+     *    +----------+ +----------+      +----------+ +----------+ +----------+
+     *         |                              :            :            :
+     *         v                              v            v            v
+     *    +----------+                   ............ ............ ............
+     *    |    B#    |                   :    B     : :    A     : :    B##   :
+     *    | self=1   |                   : self=3   : : self=1   : : self=2   :
+     *    | total=16 |                   : total=8  : : total=16 : : total=7  :
+     *    +----------+                   ............ ............ ............
+     *         |   |________                  :                         :
+     *         v            v                 v                         v
+     *    +----------+ +----------+      ............              ............
+     *    |    A**   | |    C     |      :    A     :              :    A     :
+     *    | self=3   | | self=2   |      : self=3   :              : self=2   :
+     *    | total=8  | | total=7  |      : total=8  :              : total=7  :
+     *    +----------+ +----------+      ............              ............
+     *
+     * Similarly to the construction of the top-down heavy view, care needs to
+     * be taken when dealing with recursion to avoid double-counting, e.g. the
+     * total value of A** (8) was not added to the total value of A*** (30)
+     * because it is already included in the total value of A* (30) (which was
+     * also added to A***). That is why we need to keep track of the path we
+     * traversed along the current dimension (to determine whether total value
+     * should be added or not).
+     *
+     * Note that when we add an ancestor (B#) of a top-down tree-view node (C)
+     * to the bottom-up heavy view, the values of the original tree-view node
+     * (C) (rather than the ancestor's values) are added to the corresponding
+     * heavy-view node (B##).
+     */
+    addDimensionToBottomUpHeavyViewNode_: function(
+        treeViewChildNode, heavyViewParentNode, currentDimension,
+        recursionDepthTrackers, previousDimensionsRecursive) {
+      var recursionDepthTracker = recursionDepthTrackers[currentDimension];
+      var bottomIndex = recursionDepthTracker.bottomIndex;
+      var topIndex = recursionDepthTracker.topIndex;
+      var firstNonRecursiveIndex =
+          bottomIndex + recursionDepthTracker.recursionDepth;
+      var viewNodePath = recursionDepthTracker.viewNodePath;
+
+      var trackerAncestorNode = recursionDepthTracker.trackerAncestorNode;
+      var heavyViewDescendantNode = heavyViewParentNode;
+      for (var i = bottomIndex; i < topIndex; i++) {
+        var treeViewAncestorNode = viewNodePath[i];
+        var dimensionTitle = treeViewAncestorNode.title[currentDimension];
+        heavyViewDescendantNode = this.getOrCreateChildNode_(
+            heavyViewDescendantNode, currentDimension, dimensionTitle);
+
+        var currentDimensionRecursive = i < firstNonRecursiveIndex;
+        var currentOrPreviousDimensionsRecursive =
+            currentDimensionRecursive || previousDimensionsRecursive;
+
+        // The self and total values are taken from the original top-down tree
+        // view child node (rather than the ancestor node).
+        heavyViewDescendantNode.self += treeViewChildNode.self;
+        if (!currentOrPreviousDimensionsRecursive)
+          heavyViewDescendantNode.total += treeViewChildNode.total;
+
+        // Add the descendants of the tree-view child node wrt the next
+        // dimensions as children of the heavy-view child node.
+        this.addDimensionsToGenericHeavyViewNode_(treeViewChildNode,
+            heavyViewDescendantNode, currentDimension + 1,
+            recursionDepthTrackers, currentOrPreviousDimensionsRecursive,
+            this.addDimensionToBottomUpHeavyViewNode_.bind(this));
+      }
+    }
+  };
+
+  /**
+   * Recursion depth tracker.
+   *
+   * This class tracks the recursion depth of the current stack (updated via
+   * the push and pop methods). The recursion depth of a stack is the lengh of
+   * its longest leaf suffix that is repeated within the stack itself.
+   *
+   * For example, the recursion depth of the stack A -> B -> C -> A -> B -> B
+   * -> C (where C is the leaf node) is 2 because the suffix B -> C is repeated
+   * within it.
+   *
+   * @{constructor}
+   */
+  function RecursionDepthTracker(maxDepth, dimension) {
+    this.titlePath = new Array(maxDepth);
+    this.viewNodePath = new Array(maxDepth);
+    this.bottomIndex = this.topIndex = maxDepth;
+
+    this.dimension_ = dimension;
+    this.currentTrackerNode_ =
+        this.createNode_(0 /* recursionDepth */, undefined /* parent */);
+  }
+
+  RecursionDepthTracker.prototype = {
+    push: function(viewNode) {
+      if (this.bottomIndex === 0)
+        throw new Error('Cannot push to a full tracker');
+      var title = viewNode.title[this.dimension_];
+      this.bottomIndex--;
+      this.titlePath[this.bottomIndex] = title;
+      this.viewNodePath[this.bottomIndex] = viewNode;
+
+      var childTrackerNode = this.currentTrackerNode_.children.get(title);
+      if (childTrackerNode !== undefined) {
+        // Child node already exists, so we don't need to calculate anything.
+        this.currentTrackerNode_ = childTrackerNode;
+        return;
+      }
+
+      // Child node doesn't exist yet, so we need to calculate its recursion
+      // depth.
+      var maxLengths = zFunction(this.titlePath, this.bottomIndex);
+      var recursionDepth = 0;
+      for (var i = 0; i < maxLengths.length; i++)
+        recursionDepth = Math.max(recursionDepth, maxLengths[i]);
+
+      childTrackerNode =
+          this.createNode_(recursionDepth, this.currentTrackerNode_);
+      this.currentTrackerNode_.children.set(title, childTrackerNode);
+      this.currentTrackerNode_ = childTrackerNode;
+    },
+
+    pop: function() {
+      if (this.bottomIndex === this.topIndex)
+        throw new Error('Cannot pop from an empty tracker');
+
+      this.titlePath[this.bottomIndex] = undefined;
+      this.viewNodePath[this.bottomIndex] = undefined;
+      this.bottomIndex++;
+
+      this.currentTrackerNode_ = this.currentTrackerNode_.parent;
+    },
+
+    get recursionDepth() {
+      return this.currentTrackerNode_.recursionDepth;
+    },
+
+    createNode_: function(recursionDepth, parent) {
+      return {
+        recursionDepth: recursionDepth,
+        parent: parent,
+        children: new Map()
+      };
+    }
+  };
+
+  /**
+   * Calculate the Z-function of (a suffix of) a list.
+   *
+   * Z-function: Given a list (or a string) of length n, for each index i from
+   * 1 to n - 1, find the length z[i] of the longest substring starting at
+   * position i which is also a prefix of the list. This function returns the
+   * list of maximum lengths z.
+   *
+   * Mathematically, for each i from 1 to n - 1, z[i] is the maximum value such
+   * that [list[0], ..., list[i - 1]] = [list[i], ..., list[i + z[i] - 1]].
+   * z[0] is defined to be zero for convenience.
+   *
+   * Example:
+   *
+   *   Input (list): ['A', 'B', 'A', 'C', 'A', 'B', 'A']
+   *   Output (z):   [ 0 ,  0 ,  1 ,  0 ,  3 ,  0 ,  1 ]
+   *
+   * Unlike the brute-force approach (which is O(n^2) in the worst case), the
+   * complexity of this implementation is linear in the size of the list, i.e.
+   * O(n).
+   *
+   * Source: http://e-maxx-eng.github.io/string/z-function.html
+   */
+  function zFunction(list, startIndex) {
+    var n = list.length - startIndex;
+    if (n === 0)
+      return [];
+
+    var z = new Array(n);
+    z[0] = 0;
+
+    for (var i = 1, left = 0, right = 0; i < n; ++i) {
+      var maxLength;
+      if (i <= right)
+        maxLength = Math.min(right - i + 1, z[i - left]);
+      else
+        maxLength = 0;
+
+      while (i + maxLength < n && list[startIndex + maxLength] ===
+             list[startIndex + i + maxLength]) {
+        ++maxLength;
+      }
+
+      if (i + maxLength - 1 > right) {
+        left = i;
+        right = i + maxLength - 1;
+      }
+
+      z[i] = maxLength;
+    }
+
+    return z;
+  }
+
+  return {
+    MultiDimensionalViewBuilder: MultiDimensionalViewBuilder,
+    MultiDimensionalViewType: MultiDimensionalViewType,
+
+    // Exports below are for testing only.
+    MultiDimensionalViewNode: MultiDimensionalViewNode,
+    RecursionDepthTracker: RecursionDepthTracker,
+    zFunction: zFunction
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/base/multi_dimensional_view_test.html b/catapult/tracing/tracing/base/multi_dimensional_view_test.html
new file mode 100644
index 0000000..38b69a1
--- /dev/null
+++ b/catapult/tracing/tracing/base/multi_dimensional_view_test.html
@@ -0,0 +1,9598 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/multi_dimensional_view.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var MultiDimensionalViewBuilder = tr.b.MultiDimensionalViewBuilder;
+  var MultiDimensionalViewType = tr.b.MultiDimensionalViewType;
+  var MultiDimensionalViewNode = tr.b.MultiDimensionalViewNode;
+  var SELF = MultiDimensionalViewBuilder.ValueKind.SELF;
+  var TOTAL = MultiDimensionalViewBuilder.ValueKind.TOTAL;
+  var RecursionDepthTracker = tr.b.RecursionDepthTracker;
+  var zFunction = tr.b.zFunction;
+
+  function assertListStrictEqual(a, b) {
+    assert.lengthOf(a, b.length);
+    for (var i = 0; i < a.length; i++)
+      assert.strictEqual(a[i], b[i]);
+  }
+
+  function checkTree(actualTreeRootNode, expectedStructureRootNode) {
+    // Build map from expected structure node IDs to expected structure nodes.
+    var expectedStructureNodesById = new Map();
+    addExpectedStructureToMap(
+        expectedStructureRootNode, expectedStructureNodesById);
+
+    // Recursively check the structure of the actual tree.
+    var actualTreeNodesById = new Map();
+    checkTreeStructure(actualTreeRootNode, expectedStructureRootNode,
+        actualTreeNodesById, expectedStructureNodesById);
+
+    // Test sanity check.
+    assert.strictEqual(
+        actualTreeNodesById.size, expectedStructureNodesById.size);
+  }
+
+  function addExpectedStructureToMap(expectedStructureNode, map) {
+    if (typeof expectedStructureNode === 'string')
+      return;  // Reference to another expected structure node.
+
+    var expectedStructureNodeId = expectedStructureNode.id;
+    if (expectedStructureNodeId !== undefined) {
+      assert.isFalse(map.has(expectedStructureNodeId));
+      map.set(expectedStructureNodeId, expectedStructureNode);
+    }
+
+    var expectedStructureChildren = expectedStructureNode.children;
+    for (var d = 0; d < expectedStructureChildren.length; d++) {
+      var expectedStructureDimensionChildren = expectedStructureChildren[d];
+      for (var i = 0; i < expectedStructureDimensionChildren.length; i++)
+        addExpectedStructureToMap(expectedStructureDimensionChildren[i], map);
+    }
+  }
+
+  function checkTreeStructure(actualTreeNode, expectedStructureNode,
+      actualTreeNodesById, expectedStructureNodesById) {
+    // Check the multi-dimensional title.
+    assert.deepEqual(
+        tr.b.asArray(actualTreeNode.title), expectedStructureNode.title);
+
+    // Check the values.
+    assert.strictEqual(actualTreeNode.total, expectedStructureNode.total);
+    assert.strictEqual(actualTreeNode.self, expectedStructureNode.self);
+    assert.strictEqual(
+        actualTreeNode.isLowerBound, expectedStructureNode.isLowerBound);
+
+    // Check the children.
+    var expectedStructureChildNodes = expectedStructureNode.children;
+    var actualTreeChildNodes = actualTreeNode.children;
+    assert.lengthOf(actualTreeChildNodes, expectedStructureChildNodes.length);
+
+    for (var d = 0; d < expectedStructureChildNodes.length; d++) {
+      var expectedStructureDimensionChildNodes = expectedStructureChildNodes[d];
+      var actualTreeDimensionChildNodes = actualTreeChildNodes[d];
+      assert.strictEqual(actualTreeDimensionChildNodes.size,
+          expectedStructureDimensionChildNodes.length);
+
+      var expectedStructureDimensionChildNodeTitles = new Set();
+
+      for (var i = 0; i < expectedStructureDimensionChildNodes.length; i++) {
+        var expectedStructureDimensionChildNode =
+            expectedStructureDimensionChildNodes[i];
+        var isReference = false;
+
+        // If the expected structure child node is a reference to another
+        // expected structure node, resolve it.
+        if (typeof expectedStructureDimensionChildNode === 'string') {
+          expectedStructureDimensionChildNode = expectedStructureNodesById.get(
+              expectedStructureDimensionChildNode);
+          assert.isDefined(expectedStructureDimensionChildNode);
+          isReference = true;
+        }
+
+        // Check that the expected structure doesn't contain two children with
+        // the same title.
+        var childTitle = expectedStructureDimensionChildNode.title[d];
+        assert.isFalse(
+            expectedStructureDimensionChildNodeTitles.has(childTitle));
+        expectedStructureDimensionChildNodeTitles.add(childTitle);
+
+        // Get the associated child node of the actual tree.
+        var actualTreeDimensionChildNode =
+            actualTreeDimensionChildNodes.get(childTitle);
+        assert.isDefined(actualTreeDimensionChildNode);
+
+        // Check that all expected structure nodes with the same ID correspond
+        // to the same actual tree node.
+        var childId = expectedStructureDimensionChildNode.id;
+        if (childId !== undefined) {
+          if (actualTreeNodesById.has(childId)) {
+            assert.strictEqual(actualTreeDimensionChildNode,
+                actualTreeNodesById.get(childId));
+          } else {
+            actualTreeNodesById.set(childId, actualTreeDimensionChildNode);
+          }
+        }
+
+        // Recursively check the structure of the actual tree child node
+        // (unless the expected structure child node was a reference).
+        if (!isReference) {
+          checkTreeStructure(actualTreeDimensionChildNode,
+              expectedStructureDimensionChildNode, actualTreeNodesById,
+              expectedStructureNodesById);
+        }
+      }
+
+      // Test sanity check (all child titles should be unique).
+      assert.strictEqual(expectedStructureDimensionChildNodeTitles.size,
+          expectedStructureDimensionChildNodes.length);
+    }
+  }
+
+  function createBuilderWithEntries(dimensions, pathEntries) {
+    var builder = new MultiDimensionalViewBuilder(dimensions);
+    pathEntries.forEach(function(pathEntry) {
+      builder.addPath(pathEntry.path, pathEntry.value, pathEntry.kind);
+    });
+    return builder;
+  }
+
+  function builderTest(testName, dimensions, pathEntries,
+      expectedTopDownTreeViewStructure, expectedTopDownHeavyViewStructure,
+      expectedBottomUpHeavyViewStructure) {
+    test('builder_' + testName, function() {
+      // Create a multi-dimensional tree builder and add all paths to it.
+      var builder = createBuilderWithEntries(dimensions, pathEntries);
+
+      // Build and check the views.
+      checkTree(
+          builder.buildView(MultiDimensionalViewType.TOP_DOWN_TREE_VIEW),
+          expectedTopDownTreeViewStructure);
+      checkTree(
+          builder.buildView(MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW),
+          expectedTopDownHeavyViewStructure);
+      checkTree(
+          builder.buildView(MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW),
+          expectedBottomUpHeavyViewStructure);
+    });
+  }
+
+  /**
+   * Calculate the sum of binary powers.
+   *
+   * Each exponent can either be (1) a single number corresponding to a single
+   * power of two (2**exponent), or (2) a two-element list for a sum over a
+   * range of exponents (2**exponent[0] + 2**(exponent[0] + 1) + ... +
+   * 2**exponent[1]).
+   */
+  function b(/* exponent1, ..., exponentN */) {
+
+    var sum = 0;
+    for (var i = 0; i < arguments.length; i++) {
+      var exponent = arguments[i];
+      if (typeof exponent === 'number') {
+        sum += 1 << arguments[i];
+      } else {
+        assert.lengthOf(exponent, 2);  // Test sanity check.
+        // We use the fact that 2**A + 2**(A + 1) ... + 2**B =
+        // (2**0 + 2**1 + ... 2**B) - (2**0 + 2**1 + ... + 2**(A - 1)) =
+        // (2**(B + 1) - 1) - (2**A - 1) = 2**(B + 1) - 2**A.
+        sum += (1 << (exponent[1] + 1)) - (1 << exponent[0]);
+      }
+    }
+    return sum;
+  }
+
+  function checkZFunction(list, expectedResult) {
+    if (typeof list === 'string') {
+      assert.deepEqual(zFunction(list, 0), expectedResult);
+      assert.deepEqual(zFunction(list[0] + list, 1), expectedResult);
+      assert.deepEqual(zFunction(list + list, list.length), expectedResult);
+    } else {
+      assert.deepEqual(zFunction([].concat(list), 0), expectedResult);
+      assert.deepEqual(zFunction([list[0]].concat(list), 1), expectedResult);
+      assert.deepEqual(
+          zFunction(list.concat(list), list.length), expectedResult);
+    }
+  }
+
+  /**
+   * Helper function for generating builder tests. Given a number of dimensions
+   * and a list of path entries, this function generates the source code of
+   * the corresponding builder test with expected top-down tree view, top-down
+   * heavy view and bottom-up heavy view structures.
+   *
+   * This avoids the need to write such tests manually, which is very tedious.
+   * However, the correctness of the generated structures needs to be verified
+   * by the developer! Maximum line length must also be enforced manually.
+   */
+  function generateBuilderTest(targetTestName, dimensions, pathEntries) {
+    test('builderTestGenerator_' + targetTestName, function() {
+      // Create the builder.
+      var builder = createBuilderWithEntries(dimensions, pathEntries);
+
+      // Generate the test case source code.
+      var generator = new tr.c.TestUtils.SourceGenerator();
+      generator.indentBlock(2, false /* don't break line */, function() {
+        // Test name and number of dimensions (first line).
+        generator.push('builderTest(\'', targetTestName, '\', ',
+            String(dimensions), ' /* dimensions */,');
+
+        generator.indentBlock(4, true /* break line */, function() {
+          // Path entries.
+          generator.formatMultiLineList(pathEntries, function(pathEntry) {
+            generator.push('{ path: ');
+            generator.formatSingleLineList(
+                pathEntry.path,
+                function(singleDimensionPath) {
+                  generator.formatSingleLineList(
+                      singleDimensionPath, generator.formatString, generator);
+                });
+            generator.push(', value: ', String(pathEntry.value));
+            var kind = pathEntry.kind === SELF ? 'SELF' : 'TOTAL';
+            generator.push(', kind: ', kind, ' }');
+          });
+          generator.push(',');
+          generator.breakLine();
+
+          function formatExpectedTreeStructure(root, label) {
+            var nextNodeId = 0;
+            var nodeInfos = new WeakMap();
+
+            function assignNodeIdsToRepeatedNodes(node) {
+              if (nodeInfos.has(node)) {
+                // We have already visited the node (one or more times), so
+                // there is no need to visit its children.
+                if (nodeInfos.get(node) === undefined) {
+                  // This is the second time we visited the node: Change the
+                  // undefined entry to a defined node info entry.
+                  nodeInfos.set(node, { id: undefined });
+                }
+                return;
+              }
+
+              // This is the first time we visited the node: Add an undefined
+              // entry to the node info map and recursively visit all its
+              // children.
+              nodeInfos.set(node, undefined);
+              node.children.forEach(function(singleDimensionChildren) {
+                for (var child of singleDimensionChildren.values())
+                  assignNodeIdsToRepeatedNodes(child);
+              });
+            }
+            assignNodeIdsToRepeatedNodes(root);
+
+            // Track the multi-dimensional path to the current node to generate
+            // comments.
+            var paths = new Array(dimensions);
+            for (var i = 0; i < paths.length; i++)
+              paths[i] = [];
+            function withChild(childNode, dimension, callback) {
+              paths[dimension].push(childNode.title[dimension]);
+              callback();
+              paths[dimension].pop();
+            }
+            function appendPathComment(opt_label) {
+              if (opt_label) {
+                generator.pushComment(opt_label);
+                return;
+              }
+
+              paths.forEach(function(dimensionPath, dimensionIndex) {
+                if (dimensionIndex > 0)
+                  generator.pushComment(', ');
+                if (dimensionPath.length === 0) {
+                  generator.pushComment('*');
+                  return;
+                }
+                dimensionPath.forEach(function(ancestorTitle, ancestorIndex) {
+                  if (ancestorIndex > 0)
+                    generator.pushComment(' -> ');
+                  generator.pushComment(ancestorTitle);
+                });
+              });
+            }
+
+            function formatExpectedTreeStructureRecursively(node, opt_label) {
+              var nodeId = undefined;
+              var nodeInfo = nodeInfos.get(node);
+              if (nodeInfo !== undefined) {
+                // This node is referenced multiple times in the expected tree
+                // structure.
+                nodeId = nodeInfo.id;
+                if (nodeId === undefined) {
+                  // This is the first time we visited the node: Assign it a
+                  // unique node id and then format it and its descendants
+                  // recursively.
+                  nodeId = '#' + (nextNodeId++);
+                  nodeInfo.id = nodeId;
+                } else {
+                  // We have already visited this node: Just insert the node's
+                  // id (instead of formatting it and its descendants
+                  // recursively again).
+                  generator.push('\'', nodeId, '\'');
+                  appendPathComment();
+                  return;
+                }
+              }
+
+              generator.push('{');
+              appendPathComment(opt_label);
+
+              generator.indentBlock(2, true /* break line */, function() {
+                // Node id (if defined).
+                if (nodeId !== undefined) {
+                  generator.push('id: \'', nodeId, '\',');
+                  generator.breakLine();
+                }
+
+                // Node title.
+                generator.push('title: ');
+                generator.formatSingleLineList(
+                    node.title, generator.formatString, generator);
+                generator.push(',');
+                generator.breakLine();
+
+                // Node values.
+                generator.push('total: ', String(node.total), ',');
+                generator.breakLine();
+                generator.push('self: ', String(node.self), ',');
+                generator.breakLine();
+                generator.push(
+                    'isLowerBound: ', String(node.isLowerBound), ',');
+                generator.breakLine();
+
+                // Node children.
+                var children = node.children;
+                generator.push('children: ');
+                generator.formatMultiLineList(
+                    children,
+                    function(singleDimensionChildren, dimension) {
+                      generator.formatMultiLineList(
+                          tr.b.mapValues(singleDimensionChildren),
+                          function(child, childIndex) {
+                            withChild(child, dimension, function() {
+                              formatExpectedTreeStructureRecursively(child);
+                            });
+                          });
+                    });
+              });
+              generator.breakLine();
+              generator.push('}');
+            }
+
+            formatExpectedTreeStructureRecursively(root, label);
+          }
+
+          // Build and format the three multi-dimensional views.
+          formatExpectedTreeStructure(
+              builder.buildTopDownTreeView(), 'Top-down tree view');
+          generator.push(',');
+          generator.breakLine();
+          formatExpectedTreeStructure(
+              builder.buildTopDownHeavyView(), 'Top-down heavy view');
+          generator.push(',');
+          generator.breakLine();
+          formatExpectedTreeStructure(
+              builder.buildBottomUpHeavyView(), 'Bottom-up heavy view');
+          generator.push(');');
+        });
+      });
+
+      tr.c.TestUtils.addSourceListing(this, generator.build());
+
+      throw new Error('This error is thrown to prevent accidentally ' +
+          'checking in a test generator instead of an actual test.');
+    });
+  }
+
+  builderTest('zeroDimensions_noPaths', 0 /* dimensions */,
+      [],
+      {  // Top-down tree view.
+        title: [],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: []
+      },
+      {  // Top-down heavy view.
+        title: [],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: []
+      },
+      {  // Bottom-up heavy view.
+        title: [],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: []
+      });
+
+  builderTest('zeroDimensions_withPaths', 0 /* dimensions */,
+      [
+        { path: [], value: 2, kind: SELF },
+        { path: [], value: 3, kind: TOTAL },
+        { path: [], value: 4, kind: SELF },
+        { path: [], value: 5, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [],
+        total: 8,
+        self: 6,
+        isLowerBound: false,
+        children: []
+      },
+      {  // Top-down heavy view.
+        title: [],
+        total: 8,
+        self: 6,
+        isLowerBound: false,
+        children: []
+      },
+      {  // Bottom-up heavy view.
+        title: [],
+        total: 8,
+        self: 6,
+        isLowerBound: false,
+        children: []
+      });
+
+  builderTest('oneDimension_noPaths', 1 /* dimensions */,
+      [],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          []
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          []
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          []
+        ]
+      });
+
+  builderTest('oneDimension_zeroLengthPath', 1 /* dimensions */,
+      [
+        { path: [[]], value: 42, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 42,
+        self: 42,
+        isLowerBound: false,
+        children: [
+          []
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 42,
+        self: 42,
+        isLowerBound: false,
+        children: [
+          []
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 42,
+        self: 42,
+        isLowerBound: false,
+        children: [
+          []
+        ]
+      });
+
+  builderTest('oneDimension_noRecursion', 1 /* dimensions */,
+      [
+        { path: [['A', 'B', 'C']], value: 10, kind: SELF },
+        { path: [['A', 'B']], value: 20, kind: SELF },
+        { path: [['B', 'D']], value: 30, kind: SELF },
+        { path: [['A', 'B', 'D']], value: 40, kind: SELF },
+        { path: [['A', 'C']], value: 50, kind: SELF },
+        { path: [[]], value: 60, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 210,
+        self: 60,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 120,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 70,
+                    self: 20,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        },
+                        {  // A -> B -> D.
+                          title: ['D'],
+                          total: 40,
+                          self: 40,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> C.
+                    title: ['C'],
+                    total: 50,
+                    self: 50,
+                    isLowerBound: false,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 30,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 30,
+                    self: 30,
+                    isLowerBound: false,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 210,
+        self: 60,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 120,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 70,
+                    self: 20,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        },
+                        {  // A -> B -> D.
+                          title: ['D'],
+                          total: 40,
+                          self: 40,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> C.
+                    title: ['C'],
+                    total: 50,
+                    self: 50,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 100,
+              self: 20,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 10,
+                    self: 10,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  },
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 70,
+                    self: 70,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 60,
+              self: 60,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 70,
+              self: 70,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 210,
+        self: 60,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 120,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 100,
+              self: 20,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A.
+                    title: ['A'],
+                    total: 70,
+                    self: 20,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 60,
+              self: 60,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 10,
+                    self: 10,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> A.
+                          title: ['A'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // C -> A.
+                    title: ['A'],
+                    total: 50,
+                    self: 50,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 70,
+              self: 70,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // D -> B.
+                    title: ['B'],
+                    total: 70,
+                    self: 70,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // D -> B -> A.
+                          title: ['A'],
+                          total: 40,
+                          self: 40,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('oneDimension_simpleRecursion', 1 /* dimensions */,
+      [
+        { path: [['A']], value: 10, kind: SELF },
+        { path: [['A', 'A', 'A']], value: 20, kind: SELF },
+        { path: [['A', 'A']], value: 30, kind: SELF },
+        { path: [['A', 'A', 'A', 'A']], value: 40, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 100,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 100,
+              self: 10,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // A -> A.
+                    title: ['A'],
+                    total: 90,
+                    self: 30,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        {  // A -> A -> A.
+                          title: ['A'],
+                          total: 60,
+                          self: 20,
+                          isLowerBound: false,
+                          children: [
+                            [
+                              {  // A -> A -> A -> A.
+                                title: ['A'],
+                                total: 40,
+                                self: 40,
+                                isLowerBound: false,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 100,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 100,
+              self: 100,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> A.
+                    title: ['A'],
+                    total: 90,
+                    self: 90,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> A -> A.
+                          title: ['A'],
+                          total: 60,
+                          self: 60,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // A -> A -> A -> A.
+                                title: ['A'],
+                                total: 40,
+                                self: 40,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 100,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 100,
+              self: 100,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> A.
+                    title: ['A'],
+                    total: 90,
+                    self: 90,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> A -> A.
+                          title: ['A'],
+                          total: 60,
+                          self: 60,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // A -> A -> A -> A.
+                                title: ['A'],
+                                total: 40,
+                                self: 40,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('oneDimension_complexRecursion', 1 /* dimensions */,
+      [
+        { path: [['A', 'B', 'C']], value: 10, kind: SELF },
+        { path: [['A', 'D', 'B', 'C', 'A', 'B', 'C']], value: 20, kind: SELF },
+        { path: [['A', 'D', 'B', 'C', 'A', 'B', 'D']], value: 30, kind: SELF },
+        { path: [['C', 'B', 'C']], value: 40, kind: SELF },
+        { path: [['C', 'B', 'C', 'B', 'C']], value: 50, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 150,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> D.
+                    title: ['D'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> D -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // A -> D -> B -> C.
+                                title: ['C'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // A -> D -> B -> C -> A.
+                                      title: ['A'],
+                                      total: 50,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // A -> D -> B -> C -> A -> B.
+                                            title: ['B'],
+                                            total: 50,
+                                            self: 0,
+                                            isLowerBound: true,
+                                            children: [
+                                              [
+                                                {  // A -> D -> B -> C -> A ->
+                                                   // B -> C.
+                                                  title: ['C'],
+                                                  total: 20,
+                                                  self: 20,
+                                                  isLowerBound: false,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                },
+                                                {  // A -> D -> B -> C -> A ->
+                                                   // B -> D.
+                                                  title: ['D'],
+                                                  total: 30,
+                                                  self: 30,
+                                                  isLowerBound: false,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                }
+                                              ]
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 90,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 90,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> C.
+                          title: ['C'],
+                          total: 90,
+                          self: 40,
+                          isLowerBound: false,
+                          children: [
+                            [
+                              {  // C -> B -> C -> B.
+                                title: ['B'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // C -> B -> C -> B -> C.
+                                      title: ['C'],
+                                      total: 50,
+                                      self: 50,
+                                      isLowerBound: false,
+                                      children: [
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 150,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 30,
+                          self: 30,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        },
+                        {  // A -> B -> D.
+                          title: ['D'],
+                          total: 30,
+                          self: 30,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> D.
+                    title: ['D'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> D -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // A -> D -> B -> C.
+                                title: ['C'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // A -> D -> B -> C -> A.
+                                      title: ['A'],
+                                      total: 50,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // A -> D -> B -> C -> A -> B.
+                                            title: ['B'],
+                                            total: 50,
+                                            self: 0,
+                                            isLowerBound: true,
+                                            children: [
+                                              [
+                                                {  // A -> D -> B -> C -> A ->
+                                                   // B -> C.
+                                                  title: ['C'],
+                                                  total: 20,
+                                                  self: 20,
+                                                  isLowerBound: true,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                },
+                                                {  // A -> D -> B -> C -> A ->
+                                                   // B -> D.
+                                                  title: ['D'],
+                                                  total: 30,
+                                                  self: 30,
+                                                  isLowerBound: true,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                }
+                                              ]
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 150,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 150,
+                    self: 120,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> C -> A.
+                          title: ['A'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // B -> C -> A -> B.
+                                title: ['B'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // B -> C -> A -> B -> C.
+                                      title: ['C'],
+                                      total: 20,
+                                      self: 20,
+                                      isLowerBound: true,
+                                      children: [
+                                        []
+                                      ]
+                                    },
+                                    {  // B -> C -> A -> B -> D.
+                                      title: ['D'],
+                                      total: 30,
+                                      self: 30,
+                                      isLowerBound: true,
+                                      children: [
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B -> C -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // B -> C -> B -> C.
+                                title: ['C'],
+                                total: 50,
+                                self: 50,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 30,
+                    self: 30,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 150,
+              self: 120,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> A.
+                    title: ['A'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> A -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // C -> A -> B -> C.
+                                title: ['C'],
+                                total: 20,
+                                self: 20,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              },
+                              {  // C -> A -> B -> D.
+                                title: ['D'],
+                                total: 30,
+                                self: 30,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 90,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> C.
+                          title: ['C'],
+                          total: 90,
+                          self: 90,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // C -> B -> C -> B.
+                                title: ['B'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // C -> B -> C -> B -> C.
+                                      title: ['C'],
+                                      total: 50,
+                                      self: 50,
+                                      isLowerBound: true,
+                                      children: [
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 50,
+              self: 30,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // D -> B.
+                    title: ['B'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // D -> B -> C.
+                          title: ['C'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // D -> B -> C -> A.
+                                title: ['A'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // D -> B -> C -> A -> B.
+                                      title: ['B'],
+                                      total: 50,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // D -> B -> C -> A -> B -> C.
+                                            title: ['C'],
+                                            total: 20,
+                                            self: 20,
+                                            isLowerBound: true,
+                                            children: [
+                                              []
+                                            ]
+                                          },
+                                          {  // D -> B -> C -> A -> B -> D.
+                                            title: ['D'],
+                                            total: 30,
+                                            self: 30,
+                                            isLowerBound: true,
+                                            children: [
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 150,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> C.
+                    title: ['C'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> C -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // A -> C -> B -> D.
+                                title: ['D'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // A -> C -> B -> D -> A.
+                                      title: ['A'],
+                                      total: 50,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 150,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A.
+                    title: ['A'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> A -> C.
+                          title: ['C'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // B -> A -> C -> B.
+                                title: ['B'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // B -> A -> C -> B -> D.
+                                      title: ['D'],
+                                      total: 50,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // B -> A -> C -> B -> D -> A.
+                                            title: ['A'],
+                                            total: 50,
+                                            self: 0,
+                                            isLowerBound: true,
+                                            children: [
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> D -> A.
+                          title: ['A'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 90,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> C -> B.
+                          title: ['B'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // B -> C -> B -> C.
+                                title: ['C'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 150,
+              self: 120,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 150,
+                    self: 120,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> A.
+                          title: ['A'],
+                          total: 30,
+                          self: 30,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // C -> B -> A -> C.
+                                title: ['C'],
+                                total: 20,
+                                self: 20,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // C -> B -> A -> C -> B.
+                                      title: ['B'],
+                                      total: 20,
+                                      self: 20,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // C -> B -> A -> C -> B -> D.
+                                            title: ['D'],
+                                            total: 20,
+                                            self: 20,
+                                            isLowerBound: true,
+                                            children: [
+                                              [
+                                                {  // C -> B -> A -> C -> B ->
+                                                   // D -> A.
+                                                  title: ['A'],
+                                                  total: 20,
+                                                  self: 20,
+                                                  isLowerBound: true,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                }
+                                              ]
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // C -> B -> D.
+                          title: ['D'],
+                          total: 50,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // C -> B -> D -> A.
+                                title: ['A'],
+                                total: 50,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // C -> B -> C.
+                          title: ['C'],
+                          total: 90,
+                          self: 90,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // C -> B -> C -> B.
+                                title: ['B'],
+                                total: 50,
+                                self: 50,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // C -> B -> C -> B -> C.
+                                      title: ['C'],
+                                      total: 50,
+                                      self: 50,
+                                      isLowerBound: true,
+                                      children: [
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 50,
+              self: 30,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // D -> A.
+                    title: ['A'],
+                    total: 50,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  },
+                  {  // D -> B.
+                    title: ['B'],
+                    total: 30,
+                    self: 30,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // D -> B -> A.
+                          title: ['A'],
+                          total: 30,
+                          self: 30,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              {  // D -> B -> A -> C.
+                                title: ['C'],
+                                total: 30,
+                                self: 30,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    {  // D -> B -> A -> C -> B.
+                                      title: ['B'],
+                                      total: 30,
+                                      self: 30,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          {  // D -> B -> A -> C -> B -> D.
+                                            title: ['D'],
+                                            total: 30,
+                                            self: 30,
+                                            isLowerBound: true,
+                                            children: [
+                                              [
+                                                {  // D -> B -> A -> C -> B ->
+                                                   // D -> A.
+                                                  title: ['A'],
+                                                  total: 30,
+                                                  self: 30,
+                                                  isLowerBound: true,
+                                                  children: [
+                                                    []
+                                                  ]
+                                                }
+                                              ]
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('oneDimension_withTotalSizes', 1 /* dimensions */,
+      [
+        { path: [['B', 'C']], value: 10, kind: TOTAL },
+        { path: [['B', 'C', 'D']], value: 5, kind: TOTAL },
+        { path: [['B']], value: 15, kind: SELF },
+        { path: [['B']], value: 20, kind: TOTAL },
+        { path: [['B', 'D']], value: 40, kind: SELF },
+        { path: [['C']], value: 50, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 115,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // B.
+              title: ['B'],
+              total: 65,
+              self: 15,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        {  // B -> C -> D.
+                          title: ['D'],
+                          total: 5,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 40,
+                    self: 40,
+                    isLowerBound: false,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 50,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 115,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // B.
+              title: ['B'],
+              total: 65,
+              self: 15,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> C -> D.
+                          title: ['D'],
+                          total: 5,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> D.
+                    title: ['D'],
+                    total: 40,
+                    self: 40,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> D.
+                    title: ['D'],
+                    total: 5,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 45,
+              self: 40,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 115,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // B.
+              title: ['B'],
+              total: 65,
+              self: 15,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // D.
+              title: ['D'],
+              total: 45,
+              self: 40,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // D -> C.
+                    title: ['C'],
+                    total: 5,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // D -> C -> B.
+                          title: ['B'],
+                          total: 5,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // D -> B.
+                    title: ['B'],
+                    total: 40,
+                    self: 40,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('oneDimension_protoTitle', 1 /* dimensions */,
+      [
+        { path: [['__proto__']], value: 45, kind: SELF },
+        { path: [['A']], value: 18, kind: SELF },
+        { path: [['A', '__proto__']], value: 89, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 152,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // __proto__.
+              title: ['__proto__'],
+              total: 45,
+              self: 45,
+              isLowerBound: false,
+              children: [
+                []
+              ]
+            },
+            {  // A.
+              title: ['A'],
+              total: 107,
+              self: 18,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // A -> __proto__.
+                    title: ['__proto__'],
+                    total: 89,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 152,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // __proto__.
+              title: ['__proto__'],
+              total: 134,
+              self: 45,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            },
+            {  // A.
+              title: ['A'],
+              total: 107,
+              self: 18,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> __proto__.
+                    title: ['__proto__'],
+                    total: 89,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 152,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // __proto__.
+              title: ['__proto__'],
+              total: 134,
+              self: 45,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // __proto__ -> A.
+                    title: ['A'],
+                    total: 89,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // A.
+              title: ['A'],
+              total: 107,
+              self: 18,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      });
+
+  // See tracing/tracing/base/multi_dimensional_view.html
+  // (MultiDimensionalViewBuilder.addDimensionToTopDownHeavyViewNode_ and
+  // MultiDimensionalViewBuilder.addDimensionToBottomUpHeavyViewNode_
+  // documentation).
+  builderTest('oneDimension_documentationExample', 1 /* dimensions */,
+      [
+        { path: [['A']], value: 10, kind: SELF },
+        { path: [['A']], value: 30, kind: TOTAL },
+        { path: [['A', 'B']], value: 1, kind: SELF },
+        { path: [['A', 'B', 'A']], value: 3, kind: SELF },
+        { path: [['A', 'B', 'A']], value: 8, kind: TOTAL },
+        { path: [['A', 'B', 'C']], value: 2, kind: SELF },
+        { path: [['A', 'B', 'C']], value: 7, kind: TOTAL },
+        { path: [['B']], value: 12, kind: SELF },
+        { path: [['B']], value: 18, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined],
+        total: 48,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 30,
+              self: 10,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 16,
+                    self: 1,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        {  // A -> B -> A.
+                          title: ['A'],
+                          total: 8,
+                          self: 3,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        },
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 7,
+                          self: 2,
+                          isLowerBound: false,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 18,
+              self: 12,
+              isLowerBound: false,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined],
+        total: 48,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 30,
+              self: 13,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 16,
+                    self: 1,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> A.
+                          title: ['A'],
+                          total: 8,
+                          self: 3,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        },
+                        {  // A -> B -> C.
+                          title: ['C'],
+                          total: 7,
+                          self: 2,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 34,
+              self: 13,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A.
+                    title: ['A'],
+                    total: 8,
+                    self: 3,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  },
+                  {  // B -> C.
+                    title: ['C'],
+                    total: 7,
+                    self: 2,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 7,
+              self: 2,
+              isLowerBound: true,
+              children: [
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined],
+        total: 48,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A.
+              title: ['A'],
+              total: 30,
+              self: 13,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B.
+                    title: ['B'],
+                    total: 8,
+                    self: 3,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> A.
+                          title: ['A'],
+                          total: 8,
+                          self: 3,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B.
+              title: ['B'],
+              total: 34,
+              self: 13,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A.
+                    title: ['A'],
+                    total: 16,
+                    self: 1,
+                    isLowerBound: true,
+                    children: [
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C.
+              title: ['C'],
+              total: 7,
+              self: 2,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B.
+                    title: ['B'],
+                    total: 7,
+                    self: 2,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> A.
+                          title: ['A'],
+                          total: 7,
+                          self: 2,
+                          isLowerBound: true,
+                          children: [
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('twoDimensions_noPaths', 2 /* dimensions */,
+      [],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [],
+          []
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [],
+          []
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 0,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [],
+          []
+        ]
+      });
+
+  // See tracing/tracing/base/multi_dimensional_view.html
+  // (MultiDimensionalViewNode.finalizeTotalValues_ documentation).
+  builderTest('twoDimensions_totalCalculation', 2 /* dimensions */,
+      [
+        { path: [[], []], value: 10, kind: SELF },
+        { path: [['A'], []], value: 21, kind: SELF },
+        { path: [['A'], []], value: 30, kind: TOTAL },
+        { path: [['B'], []], value: 25, kind: SELF },
+        { path: [['B'], []], value: 32, kind: TOTAL },
+        { path: [[], ['1']], value: 3, kind: SELF },
+        { path: [[], ['1']], value: 15, kind: TOTAL },
+        { path: [[], ['2']], value: 40, kind: SELF },
+        { path: [[], ['2']], value: 41, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 115,
+        self: 10,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 30,
+              self: 21,
+              isLowerBound: false,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 32,
+              self: 25,
+              isLowerBound: false,
+              children: [
+                [],
+                []
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 15,
+              self: 3,
+              isLowerBound: false,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // *, 2.
+              title: [undefined, '2'],
+              total: 41,
+              self: 40,
+              isLowerBound: false,
+              children: [
+                [],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 115,
+        self: 10,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 30,
+              self: 21,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 32,
+              self: 25,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 15,
+              self: 3,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // *, 2.
+              title: [undefined, '2'],
+              total: 41,
+              self: 40,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 115,
+        self: 10,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 30,
+              self: 21,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 32,
+              self: 25,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 15,
+              self: 3,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            },
+            {  // *, 2.
+              title: [undefined, '2'],
+              total: 41,
+              self: 40,
+              isLowerBound: true,
+              children: [
+                [],
+                []
+              ]
+            }
+          ]
+        ]
+      });
+
+  // See tracing/tracing/base/multi_dimensional_view.html
+  // (MultiDimensionalViewNode documentation).
+  builderTest('twoDimensions_documentationExample1', 2 /* dimensions */,
+      [
+        { path: [['A', 'B'], ['T1', 'T2']], value: 1, kind: TOTAL },
+        { path: [['A', 'B'], ['T1']], value: 2, kind: TOTAL },
+        { path: [['A', 'B'], []], value: 4, kind: TOTAL },
+        { path: [['A'], ['T1', 'T2']], value: 10, kind: TOTAL },
+        { path: [['A'], ['T1']], value: 20, kind: TOTAL },
+        { path: [['A'], []], value: 40, kind: TOTAL },
+        { path: [[], ['T1', 'T2']], value: 100, kind: TOTAL },
+        { path: [[], ['T1']], value: 200, kind: TOTAL },
+        { path: [[], []], value: 400, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 400,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 40,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 4,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // A -> B, T1.
+                          id: '#0',
+                          title: ['B', 'T1'],
+                          total: 2,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B, T1 -> T2.
+                                id: '#1',
+                                title: ['B', 'T2'],
+                                total: 1,
+                                self: 0,
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, T1.
+                    id: '#2',
+                    title: ['A', 'T1'],
+                    total: 20,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#0'  // A -> B, T1.
+                      ],
+                      [
+                        {  // A, T1 -> T2.
+                          id: '#3',
+                          title: ['A', 'T2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#1'  // A -> B, T1 -> T2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 200,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  '#2'  // A, T1.
+                ],
+                [
+                  {  // *, T1 -> T2.
+                    title: [undefined, 'T2'],
+                    total: 100,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#3'  // A, T1 -> T2.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 400,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 40,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 4,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // A -> B, T1.
+                          id: '#0',
+                          title: ['B', 'T1'],
+                          total: 2,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B, T1 -> T2.
+                                id: '#1',
+                                title: ['B', 'T2'],
+                                total: 1,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B, T2.
+                          id: '#2',
+                          title: ['B', 'T2'],
+                          total: 1,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, T1.
+                    id: '#3',
+                    title: ['A', 'T1'],
+                    total: 20,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#0'  // A -> B, T1.
+                      ],
+                      [
+                        {  // A, T1 -> T2.
+                          id: '#4',
+                          title: ['A', 'T2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#1'  // A -> B, T1 -> T2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, T2.
+                    id: '#5',
+                    title: ['A', 'T2'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#2'  // A -> B, T2.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 4,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // B, T1.
+                    id: '#6',
+                    title: ['B', 'T1'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B, T1 -> T2.
+                          id: '#7',
+                          title: ['B', 'T2'],
+                          total: 1,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, T2.
+                    id: '#8',
+                    title: ['B', 'T2'],
+                    total: 1,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 200,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#3',  // A, T1.
+                  '#6'  // B, T1.
+                ],
+                [
+                  {  // *, T1 -> T2.
+                    title: [undefined, 'T2'],
+                    total: 100,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#4',  // A, T1 -> T2.
+                        '#7'  // B, T1 -> T2.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, T2.
+              title: [undefined, 'T2'],
+              total: 100,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#5',  // A, T2.
+                  '#8'  // B, T2.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 400,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 40,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // A, T1.
+                    id: '#0',
+                    title: ['A', 'T1'],
+                    total: 20,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // A, T2.
+                    id: '#1',
+                    title: ['A', 'T2'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // A, T2 -> T1.
+                          id: '#2',
+                          title: ['A', 'T1'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 4,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *.
+                    title: ['A', undefined],
+                    total: 4,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> A, T1.
+                          id: '#3',
+                          title: ['A', 'T1'],
+                          total: 2,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // B -> A, T2.
+                          id: '#4',
+                          title: ['A', 'T2'],
+                          total: 1,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> A, T2 -> T1.
+                                id: '#5',
+                                title: ['A', 'T1'],
+                                total: 1,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, T1.
+                    id: '#6',
+                    title: ['B', 'T1'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // B -> A, T1.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // B, T2.
+                    id: '#7',
+                    title: ['B', 'T2'],
+                    total: 1,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#4'  // B -> A, T2.
+                      ],
+                      [
+                        {  // B, T2 -> T1.
+                          id: '#8',
+                          title: ['B', 'T1'],
+                          total: 1,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#5'  // B -> A, T2 -> T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 200,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#0',  // A, T1.
+                  '#6'  // B, T1.
+                ],
+                []
+              ]
+            },
+            {  // *, T2.
+              title: [undefined, 'T2'],
+              total: 100,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#1',  // A, T2.
+                  '#7'  // B, T2.
+                ],
+                [
+                  {  // *, T2 -> T1.
+                    title: [undefined, 'T1'],
+                    total: 100,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#2',  // A, T2 -> T1.
+                        '#8'  // B, T2 -> T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  // See tracing/tracing/base/multi_dimensional_view.html
+  // (MultiDimensionalViewBuilder documentation).
+  builderTest('twoDimensions_documentationExample2', 2 /* dimensions */,
+      [
+        { path: [['Saturday'], ['Cooking']], value: 1, kind: SELF },
+        { path: [['Saturday'], ['Sports', 'Football']], value: 2, kind: SELF },
+        { path: [['Sunday'], ['Sports', 'Basketball']], value: 3, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 6,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // Saturday, *.
+              title: ['Saturday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Saturday, Cooking.
+                    id: '#0',
+                    title: ['Saturday', 'Cooking'],
+                    total: 1,
+                    self: 1,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Saturday, Sports.
+                    id: '#1',
+                    title: ['Saturday', 'Sports'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Saturday, Sports -> Football.
+                          id: '#2',
+                          title: ['Saturday', 'Football'],
+                          total: 2,
+                          self: 2,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // Sunday, *.
+              title: ['Sunday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Sunday, Sports.
+                    id: '#3',
+                    title: ['Sunday', 'Sports'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Sunday, Sports -> Basketball.
+                          id: '#4',
+                          title: ['Sunday', 'Basketball'],
+                          total: 3,
+                          self: 3,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, Cooking.
+              title: [undefined, 'Cooking'],
+              total: 1,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#0'  // Saturday, Cooking.
+                ],
+                []
+              ]
+            },
+            {  // *, Sports.
+              title: [undefined, 'Sports'],
+              total: 5,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#1',  // Saturday, Sports.
+                  '#3'  // Sunday, Sports.
+                ],
+                [
+                  {  // *, Sports -> Football.
+                    title: [undefined, 'Football'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#2'  // Saturday, Sports -> Football.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // *, Sports -> Basketball.
+                    title: [undefined, 'Basketball'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#4'  // Sunday, Sports -> Basketball.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 6,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // Saturday, *.
+              title: ['Saturday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Saturday, Cooking.
+                    id: '#0',
+                    title: ['Saturday', 'Cooking'],
+                    total: 1,
+                    self: 1,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Saturday, Sports.
+                    id: '#1',
+                    title: ['Saturday', 'Sports'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Saturday, Sports -> Football.
+                          id: '#2',
+                          title: ['Saturday', 'Football'],
+                          total: 2,
+                          self: 2,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // Saturday, Football.
+                    id: '#3',
+                    title: ['Saturday', 'Football'],
+                    total: 2,
+                    self: 2,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // Sunday, *.
+              title: ['Sunday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Sunday, Sports.
+                    id: '#4',
+                    title: ['Sunday', 'Sports'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Sunday, Sports -> Basketball.
+                          id: '#5',
+                          title: ['Sunday', 'Basketball'],
+                          total: 3,
+                          self: 3,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // Sunday, Basketball.
+                    id: '#6',
+                    title: ['Sunday', 'Basketball'],
+                    total: 3,
+                    self: 3,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, Cooking.
+              title: [undefined, 'Cooking'],
+              total: 1,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#0'  // Saturday, Cooking.
+                ],
+                []
+              ]
+            },
+            {  // *, Sports.
+              title: [undefined, 'Sports'],
+              total: 5,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#1',  // Saturday, Sports.
+                  '#4'  // Sunday, Sports.
+                ],
+                [
+                  {  // *, Sports -> Football.
+                    title: [undefined, 'Football'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#2'  // Saturday, Sports -> Football.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // *, Sports -> Basketball.
+                    title: [undefined, 'Basketball'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#5'  // Sunday, Sports -> Basketball.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, Football.
+              title: [undefined, 'Football'],
+              total: 2,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#3'  // Saturday, Football.
+                ],
+                []
+              ]
+            },
+            {  // *, Basketball.
+              title: [undefined, 'Basketball'],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#6'  // Sunday, Basketball.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 6,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // Saturday, *.
+              title: ['Saturday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Saturday, Cooking.
+                    id: '#0',
+                    title: ['Saturday', 'Cooking'],
+                    total: 1,
+                    self: 1,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Saturday, Sports.
+                    id: '#1',
+                    title: ['Saturday', 'Sports'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Saturday, Football.
+                    id: '#2',
+                    title: ['Saturday', 'Football'],
+                    total: 2,
+                    self: 2,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Saturday, Football -> Sports.
+                          id: '#3',
+                          title: ['Saturday', 'Sports'],
+                          total: 2,
+                          self: 2,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // Sunday, *.
+              title: ['Sunday', undefined],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Sunday, Sports.
+                    id: '#4',
+                    title: ['Sunday', 'Sports'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Sunday, Basketball.
+                    id: '#5',
+                    title: ['Sunday', 'Basketball'],
+                    total: 3,
+                    self: 3,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Sunday, Basketball -> Sports.
+                          id: '#6',
+                          title: ['Sunday', 'Sports'],
+                          total: 3,
+                          self: 3,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, Cooking.
+              title: [undefined, 'Cooking'],
+              total: 1,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#0'  // Saturday, Cooking.
+                ],
+                []
+              ]
+            },
+            {  // *, Sports.
+              title: [undefined, 'Sports'],
+              total: 5,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#1',  // Saturday, Sports.
+                  '#4'  // Sunday, Sports.
+                ],
+                []
+              ]
+            },
+            {  // *, Football.
+              title: [undefined, 'Football'],
+              total: 2,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#2'  // Saturday, Football.
+                ],
+                [
+                  {  // *, Football -> Sports.
+                    title: [undefined, 'Sports'],
+                    total: 2,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // Saturday, Football -> Sports.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, Basketball.
+              title: [undefined, 'Basketball'],
+              total: 3,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#5'  // Sunday, Basketball.
+                ],
+                [
+                  {  // *, Basketball -> Sports.
+                    title: [undefined, 'Sports'],
+                    total: 3,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#6'  // Sunday, Basketball -> Sports.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  // See https://goo.gl/KY7zVE.
+  builderTest('twoDimensions_heapDumpExample', 2 /* dimensions */,
+      [
+        { path: [['BrMain', 'Init'], ['T']], value: 151, kind: TOTAL },
+        { path: [['BrMain', 'Init'], ['W']], value: 83, kind: TOTAL },
+        { path: [['BrMain', 'Init'], []], value: 242, kind: TOTAL },
+        { path: [['BrMain', 'MsgLp'], ['T']], value: 307, kind: TOTAL },
+        { path: [['BrMain', 'MsgLp'], ['V']], value: 281, kind: TOTAL },
+        { path: [['BrMain', 'MsgLp'], []], value: 601, kind: TOTAL },
+        { path: [['RdMain', 'RTask'], ['T']], value: 211, kind: TOTAL },
+        { path: [['RdMain', 'RTask'], ['W']], value: 337, kind: TOTAL },
+        { path: [['RdMain', 'RTask'], []], value: 556, kind: TOTAL },
+        { path: [[], ['T']], value: 698, kind: TOTAL },
+        { path: [[], ['V']], value: 340, kind: TOTAL },
+        { path: [[], ['W']], value: 461, kind: TOTAL },
+        { path: [[], []], value: 1538, kind: TOTAL },
+        { path: [['BrMain'], ['T']], value: 465, kind: TOTAL },
+        { path: [['BrMain'], ['V']], value: 297, kind: TOTAL },
+        { path: [['BrMain'], ['W']], value: 96, kind: TOTAL },
+        { path: [['BrMain'], []], value: 876, kind: TOTAL },
+        { path: [['RdMain'], ['T']], value: 229, kind: TOTAL },
+        { path: [['RdMain'], ['W']], value: 355, kind: TOTAL },
+        { path: [['RdMain'], []], value: 628, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 1538,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // BrMain, *.
+              title: ['BrMain', undefined],
+              total: 876,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // BrMain -> Init, *.
+                    title: ['Init', undefined],
+                    total: 242,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // BrMain -> Init, T.
+                          id: '#0',
+                          title: ['Init', 'T'],
+                          total: 151,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // BrMain -> Init, W.
+                          id: '#1',
+                          title: ['Init', 'W'],
+                          total: 83,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // BrMain -> MsgLp, *.
+                    title: ['MsgLp', undefined],
+                    total: 601,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // BrMain -> MsgLp, T.
+                          id: '#2',
+                          title: ['MsgLp', 'T'],
+                          total: 307,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // BrMain -> MsgLp, V.
+                          id: '#3',
+                          title: ['MsgLp', 'V'],
+                          total: 281,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // BrMain, T.
+                    id: '#4',
+                    title: ['BrMain', 'T'],
+                    total: 465,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#0',  // BrMain -> Init, T.
+                        '#2'  // BrMain -> MsgLp, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // BrMain, V.
+                    id: '#5',
+                    title: ['BrMain', 'V'],
+                    total: 297,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#3'  // BrMain -> MsgLp, V.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // BrMain, W.
+                    id: '#6',
+                    title: ['BrMain', 'W'],
+                    total: 96,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#1'  // BrMain -> Init, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // RdMain, *.
+              title: ['RdMain', undefined],
+              total: 628,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  {  // RdMain -> RTask, *.
+                    title: ['RTask', undefined],
+                    total: 556,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // RdMain -> RTask, T.
+                          id: '#7',
+                          title: ['RTask', 'T'],
+                          total: 211,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // RdMain -> RTask, W.
+                          id: '#8',
+                          title: ['RTask', 'W'],
+                          total: 337,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // RdMain, T.
+                    id: '#9',
+                    title: ['RdMain', 'T'],
+                    total: 229,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#7'  // RdMain -> RTask, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // RdMain, W.
+                    id: '#10',
+                    title: ['RdMain', 'W'],
+                    total: 355,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#8'  // RdMain -> RTask, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T.
+              title: [undefined, 'T'],
+              total: 698,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  '#4',  // BrMain, T.
+                  '#9'  // RdMain, T.
+                ],
+                []
+              ]
+            },
+            {  // *, V.
+              title: [undefined, 'V'],
+              total: 340,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  '#5'  // BrMain, V.
+                ],
+                []
+              ]
+            },
+            {  // *, W.
+              title: [undefined, 'W'],
+              total: 461,
+              self: 0,
+              isLowerBound: false,
+              children: [
+                [
+                  '#6',  // BrMain, W.
+                  '#10'  // RdMain, W.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 1538,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // BrMain, *.
+              title: ['BrMain', undefined],
+              total: 876,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // BrMain -> Init, *.
+                    title: ['Init', undefined],
+                    total: 242,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // BrMain -> Init, T.
+                          id: '#0',
+                          title: ['Init', 'T'],
+                          total: 151,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // BrMain -> Init, W.
+                          id: '#1',
+                          title: ['Init', 'W'],
+                          total: 83,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // BrMain -> MsgLp, *.
+                    title: ['MsgLp', undefined],
+                    total: 601,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // BrMain -> MsgLp, T.
+                          id: '#2',
+                          title: ['MsgLp', 'T'],
+                          total: 307,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // BrMain -> MsgLp, V.
+                          id: '#3',
+                          title: ['MsgLp', 'V'],
+                          total: 281,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // BrMain, T.
+                    id: '#4',
+                    title: ['BrMain', 'T'],
+                    total: 465,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#0',  // BrMain -> Init, T.
+                        '#2'  // BrMain -> MsgLp, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // BrMain, V.
+                    id: '#5',
+                    title: ['BrMain', 'V'],
+                    total: 297,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // BrMain -> MsgLp, V.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // BrMain, W.
+                    id: '#6',
+                    title: ['BrMain', 'W'],
+                    total: 96,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#1'  // BrMain -> Init, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // Init, *.
+              title: ['Init', undefined],
+              total: 242,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // Init, T.
+                    id: '#7',
+                    title: ['Init', 'T'],
+                    total: 151,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // Init, W.
+                    id: '#8',
+                    title: ['Init', 'W'],
+                    total: 83,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // MsgLp, *.
+              title: ['MsgLp', undefined],
+              total: 601,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // MsgLp, T.
+                    id: '#9',
+                    title: ['MsgLp', 'T'],
+                    total: 307,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // MsgLp, V.
+                    id: '#10',
+                    title: ['MsgLp', 'V'],
+                    total: 281,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // RdMain, *.
+              title: ['RdMain', undefined],
+              total: 628,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // RdMain -> RTask, *.
+                    title: ['RTask', undefined],
+                    total: 556,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // RdMain -> RTask, T.
+                          id: '#11',
+                          title: ['RTask', 'T'],
+                          total: 211,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // RdMain -> RTask, W.
+                          id: '#12',
+                          title: ['RTask', 'W'],
+                          total: 337,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // RdMain, T.
+                    id: '#13',
+                    title: ['RdMain', 'T'],
+                    total: 229,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#11'  // RdMain -> RTask, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // RdMain, W.
+                    id: '#14',
+                    title: ['RdMain', 'W'],
+                    total: 355,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#12'  // RdMain -> RTask, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // RTask, *.
+              title: ['RTask', undefined],
+              total: 556,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // RTask, T.
+                    id: '#15',
+                    title: ['RTask', 'T'],
+                    total: 211,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // RTask, W.
+                    id: '#16',
+                    title: ['RTask', 'W'],
+                    total: 337,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T.
+              title: [undefined, 'T'],
+              total: 698,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#4',  // BrMain, T.
+                  '#7',  // Init, T.
+                  '#9',  // MsgLp, T.
+                  '#13',  // RdMain, T.
+                  '#15'  // RTask, T.
+                ],
+                []
+              ]
+            },
+            {  // *, V.
+              title: [undefined, 'V'],
+              total: 340,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#5',  // BrMain, V.
+                  '#10'  // MsgLp, V.
+                ],
+                []
+              ]
+            },
+            {  // *, W.
+              title: [undefined, 'W'],
+              total: 461,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#6',  // BrMain, W.
+                  '#8',  // Init, W.
+                  '#14',  // RdMain, W.
+                  '#16'  // RTask, W.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 1538,
+        self: 0,
+        isLowerBound: false,
+        children: [
+          [
+            {  // BrMain, *.
+              title: ['BrMain', undefined],
+              total: 876,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // BrMain, T.
+                    id: '#0',
+                    title: ['BrMain', 'T'],
+                    total: 465,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // BrMain, V.
+                    id: '#1',
+                    title: ['BrMain', 'V'],
+                    total: 297,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // BrMain, W.
+                    id: '#2',
+                    title: ['BrMain', 'W'],
+                    total: 96,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // Init, *.
+              title: ['Init', undefined],
+              total: 242,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // Init -> BrMain, *.
+                    title: ['BrMain', undefined],
+                    total: 242,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // Init -> BrMain, T.
+                          id: '#3',
+                          title: ['BrMain', 'T'],
+                          total: 151,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // Init -> BrMain, W.
+                          id: '#4',
+                          title: ['BrMain', 'W'],
+                          total: 83,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // Init, T.
+                    id: '#5',
+                    title: ['Init', 'T'],
+                    total: 151,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // Init -> BrMain, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // Init, W.
+                    id: '#6',
+                    title: ['Init', 'W'],
+                    total: 83,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#4'  // Init -> BrMain, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // MsgLp, *.
+              title: ['MsgLp', undefined],
+              total: 601,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // MsgLp -> BrMain, *.
+                    title: ['BrMain', undefined],
+                    total: 601,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // MsgLp -> BrMain, T.
+                          id: '#7',
+                          title: ['BrMain', 'T'],
+                          total: 307,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // MsgLp -> BrMain, V.
+                          id: '#8',
+                          title: ['BrMain', 'V'],
+                          total: 281,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // MsgLp, T.
+                    id: '#9',
+                    title: ['MsgLp', 'T'],
+                    total: 307,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#7'  // MsgLp -> BrMain, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // MsgLp, V.
+                    id: '#10',
+                    title: ['MsgLp', 'V'],
+                    total: 281,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#8'  // MsgLp -> BrMain, V.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // RdMain, *.
+              title: ['RdMain', undefined],
+              total: 628,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // RdMain, T.
+                    id: '#11',
+                    title: ['RdMain', 'T'],
+                    total: 229,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  },
+                  {  // RdMain, W.
+                    id: '#12',
+                    title: ['RdMain', 'W'],
+                    total: 355,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // RTask, *.
+              title: ['RTask', undefined],
+              total: 556,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // RTask -> RdMain, *.
+                    title: ['RdMain', undefined],
+                    total: 556,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // RTask -> RdMain, T.
+                          id: '#13',
+                          title: ['RdMain', 'T'],
+                          total: 211,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // RTask -> RdMain, W.
+                          id: '#14',
+                          title: ['RdMain', 'W'],
+                          total: 337,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // RTask, T.
+                    id: '#15',
+                    title: ['RTask', 'T'],
+                    total: 211,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#13'  // RTask -> RdMain, T.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // RTask, W.
+                    id: '#16',
+                    title: ['RTask', 'W'],
+                    total: 337,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#14'  // RTask -> RdMain, W.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T.
+              title: [undefined, 'T'],
+              total: 698,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#0',  // BrMain, T.
+                  '#5',  // Init, T.
+                  '#9',  // MsgLp, T.
+                  '#11',  // RdMain, T.
+                  '#15'  // RTask, T.
+                ],
+                []
+              ]
+            },
+            {  // *, V.
+              title: [undefined, 'V'],
+              total: 340,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#1',  // BrMain, V.
+                  '#10'  // MsgLp, V.
+                ],
+                []
+              ]
+            },
+            {  // *, W.
+              title: [undefined, 'W'],
+              total: 461,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#2',  // BrMain, W.
+                  '#6',  // Init, W.
+                  '#12',  // RdMain, W.
+                  '#16'  // RTask, W.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('twoDimensions_oneRecursiveDimension', 2 /* dimensions */,
+      [
+        { path: [['A', 'B'], []], value: 1500, kind: TOTAL },
+        { path: [['A', 'B', 'A'], []], value: 200, kind: TOTAL },
+        { path: [['A', 'B', 'B'], []], value: 300, kind: TOTAL },
+        { path: [['A', 'B', 'C'], []], value: 700, kind: TOTAL },
+        { path: [['A', 'B'], ['T1']], value: 15, kind: TOTAL },
+        { path: [['A', 'B', 'A'], ['T1']], value: 2, kind: TOTAL },
+        { path: [['A', 'B', 'B'], ['T1']], value: 3, kind: TOTAL },
+        { path: [['A', 'B', 'C'], ['T1']], value: 7, kind: TOTAL },
+        { path: [['B', 'A'], ['T1']], value: 30000, kind: TOTAL },
+        { path: [['B', 'A'], []], value: 40000, kind: TOTAL }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 41500,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 1500,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 1500,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [
+                        {  // A -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 200,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> A, T1.
+                                id: '#0',
+                                title: ['A', 'T1'],
+                                total: 2,
+                                self: 0,
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B -> B, *.
+                          title: ['B', undefined],
+                          total: 300,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> B, T1.
+                                id: '#1',
+                                title: ['B', 'T1'],
+                                total: 3,
+                                self: 0,
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B -> C, *.
+                          title: ['C', undefined],
+                          total: 700,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> C, T1.
+                                id: '#2',
+                                title: ['C', 'T1'],
+                                total: 7,
+                                self: 0,
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, T1.
+                          id: '#3',
+                          title: ['B', 'T1'],
+                          total: 15,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#0',  // A -> B -> A, T1.
+                              '#1',  // A -> B -> B, T1.
+                              '#2'  // A -> B -> C, T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, T1.
+                    id: '#4',
+                    title: ['A', 'T1'],
+                    total: 15,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // A -> B, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 40000,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *.
+                    title: ['A', undefined],
+                    total: 40000,
+                    self: 0,
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // B -> A, T1.
+                          id: '#5',
+                          title: ['A', 'T1'],
+                          total: 30000,
+                          self: 0,
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, T1.
+                    id: '#6',
+                    title: ['B', 'T1'],
+                    total: 30000,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#5'  // B -> A, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 30015,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#4',  // A, T1.
+                  '#6'  // B, T1.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 41500,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 41500,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 1500,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 200,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> A, T1.
+                                id: '#0',
+                                title: ['A', 'T1'],
+                                total: 2,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B -> B, *.
+                          title: ['B', undefined],
+                          total: 300,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> B, T1.
+                                id: '#1',
+                                title: ['B', 'T1'],
+                                total: 3,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B -> C, *.
+                          title: ['C', undefined],
+                          total: 700,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> C, T1.
+                                id: '#2',
+                                title: ['C', 'T1'],
+                                total: 7,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, T1.
+                          id: '#3',
+                          title: ['B', 'T1'],
+                          total: 15,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#0',  // A -> B -> A, T1.
+                              '#1',  // A -> B -> B, T1.
+                              '#2'  // A -> B -> C, T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, T1.
+                    id: '#4',
+                    title: ['A', 'T1'],
+                    total: 30015,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3'  // A -> B, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 41500,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *.
+                    title: ['A', undefined],
+                    total: 40200,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> A, T1.
+                          id: '#5',
+                          title: ['A', 'T1'],
+                          total: 30002,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> B, *.
+                    title: ['B', undefined],
+                    total: 300,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> B, T1.
+                          id: '#6',
+                          title: ['B', 'T1'],
+                          total: 3,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> C, *.
+                    title: ['C', undefined],
+                    total: 700,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> C, T1.
+                          id: '#7',
+                          title: ['C', 'T1'],
+                          total: 7,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, T1.
+                    id: '#8',
+                    title: ['B', 'T1'],
+                    total: 30015,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#5',  // B -> A, T1.
+                        '#6',  // B -> B, T1.
+                        '#7'  // B -> C, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C, *.
+              title: ['C', undefined],
+              total: 700,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // C, T1.
+                    id: '#9',
+                    title: ['C', 'T1'],
+                    total: 7,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 30015,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#4',  // A, T1.
+                  '#8',  // B, T1.
+                  '#9'  // C, T1.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 41500,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 41500,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 40200,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 200,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> A, T1.
+                                id: '#0',
+                                title: ['A', 'T1'],
+                                total: 2,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, T1.
+                          id: '#1',
+                          title: ['B', 'T1'],
+                          total: 30002,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#0'  // A -> B -> A, T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, T1.
+                    id: '#2',
+                    title: ['A', 'T1'],
+                    total: 30015,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#1'  // A -> B, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 41500,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *.
+                    title: ['A', undefined],
+                    total: 1500,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> A, T1.
+                          id: '#3',
+                          title: ['A', 'T1'],
+                          total: 15,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> B, *.
+                    title: ['B', undefined],
+                    total: 300,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 300,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> B -> A, T1.
+                                id: '#4',
+                                title: ['A', 'T1'],
+                                total: 3,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B -> B, T1.
+                          id: '#5',
+                          title: ['B', 'T1'],
+                          total: 3,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#4'  // B -> B -> A, T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, T1.
+                    id: '#6',
+                    title: ['B', 'T1'],
+                    total: 30015,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3',  // B -> A, T1.
+                        '#5'  // B -> B, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // C, *.
+              title: ['C', undefined],
+              total: 700,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // C -> B, *.
+                    title: ['B', undefined],
+                    total: 700,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // C -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 700,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // C -> B -> A, T1.
+                                id: '#7',
+                                title: ['A', 'T1'],
+                                total: 7,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // C -> B, T1.
+                          id: '#8',
+                          title: ['B', 'T1'],
+                          total: 7,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#7'  // C -> B -> A, T1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // C, T1.
+                    id: '#9',
+                    title: ['C', 'T1'],
+                    total: 7,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#8'  // C -> B, T1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, T1.
+              title: [undefined, 'T1'],
+              total: 30015,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#2',  // A, T1.
+                  '#6',  // B, T1.
+                  '#9'  // C, T1.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('twoDimensions_twoRecursiveDimensions', 2 /* dimensions */,
+      [
+        { path: [['A', 'A', 'B'], ['1', '2', '2']], value: 10, kind: SELF },
+        { path: [['A', 'A'], ['1', '2']], value: 40, kind: TOTAL },
+        { path: [['A', 'B', 'B'], ['1', '1', '2']], value: 20, kind: TOTAL },
+        { path: [['A', 'B'], ['1', '1']], value: 5, kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined],
+        total: 65,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> A, *.
+                    title: ['A', undefined],
+                    total: 40,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> A -> B, *.
+                          title: ['B', undefined],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> A -> B, 1.
+                                id: '#0',
+                                title: ['B', '1'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> A -> B, 1 -> 2.
+                                      id: '#1',
+                                      title: ['B', '2'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // A -> A -> B, 1 -> 2 -> 2.
+                                            id: '#2',
+                                            title: ['B', '2'],
+                                            total: 10,
+                                            self: 10,
+                                            isLowerBound: false,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> A, 1.
+                          id: '#3',
+                          title: ['A', '1'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#0'  // A -> A -> B, 1.
+                            ],
+                            [
+                              {  // A -> A, 1 -> 2.
+                                id: '#4',
+                                title: ['A', '2'],
+                                total: 40,
+                                self: 0,
+                                isLowerBound: false,
+                                children: [
+                                  [
+                                    '#1'  // A -> A -> B, 1 -> 2.
+                                  ],
+                                  [
+                                    {  // A -> A, 1 -> 2 -> 2.
+                                      id: '#5',
+                                      title: ['A', '2'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#2'  // A -> A -> B, 1 -> 2 -> 2.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 25,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> B, *.
+                          title: ['B', undefined],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> B, 1.
+                                id: '#6',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> B -> B, 1 -> 1.
+                                      id: '#7',
+                                      title: ['B', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // A -> B -> B, 1 -> 1 -> 2.
+                                            id: '#8',
+                                            title: ['B', '2'],
+                                            total: 20,
+                                            self: 0,
+                                            isLowerBound: false,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, 1.
+                          id: '#9',
+                          title: ['B', '1'],
+                          total: 25,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#6'  // A -> B -> B, 1.
+                            ],
+                            [
+                              {  // A -> B, 1 -> 1.
+                                id: '#10',
+                                title: ['B', '1'],
+                                total: 25,
+                                self: 5,
+                                isLowerBound: false,
+                                children: [
+                                  [
+                                    '#7'  // A -> B -> B, 1 -> 1.
+                                  ],
+                                  [
+                                    {  // A -> B, 1 -> 1 -> 2.
+                                      id: '#11',
+                                      title: ['B', '2'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#8'  // A -> B -> B, 1 -> 1 -> 2.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, 1.
+                    id: '#12',
+                    title: ['A', '1'],
+                    total: 65,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#3',  // A -> A, 1.
+                        '#9'  // A -> B, 1.
+                      ],
+                      [
+                        {  // A, 1 -> 2.
+                          id: '#13',
+                          title: ['A', '2'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#4'  // A -> A, 1 -> 2.
+                            ],
+                            [
+                              {  // A, 1 -> 2 -> 2.
+                                id: '#14',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#5'  // A -> A, 1 -> 2 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A, 1 -> 1.
+                          id: '#15',
+                          title: ['A', '1'],
+                          total: 25,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#10'  // A -> B, 1 -> 1.
+                            ],
+                            [
+                              {  // A, 1 -> 1 -> 2.
+                                id: '#16',
+                                title: ['A', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#11'  // A -> B, 1 -> 1 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#12'  // A, 1.
+                ],
+                [
+                  {  // *, 1 -> 2.
+                    title: [undefined, '2'],
+                    total: 40,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#13'  // A, 1 -> 2.
+                      ],
+                      [
+                        {  // *, 1 -> 2 -> 2.
+                          title: [undefined, '2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#14'  // A, 1 -> 2 -> 2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // *, 1 -> 1.
+                    title: [undefined, '1'],
+                    total: 25,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#15'  // A, 1 -> 1.
+                      ],
+                      [
+                        {  // *, 1 -> 1 -> 2.
+                          title: [undefined, '2'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#16'  // A, 1 -> 1 -> 2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined],
+        total: 65,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> A, *.
+                    title: ['A', undefined],
+                    total: 40,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> A -> B, *.
+                          title: ['B', undefined],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> A -> B, 1.
+                                id: '#0',
+                                title: ['B', '1'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> A -> B, 1 -> 2.
+                                      id: '#1',
+                                      title: ['B', '2'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // A -> A -> B, 1 -> 2 -> 2.
+                                            id: '#2',
+                                            title: ['B', '2'],
+                                            total: 10,
+                                            self: 10,
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A -> A -> B, 2.
+                                id: '#3',
+                                title: ['B', '2'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> A -> B, 2 -> 2.
+                                      id: '#4',
+                                      title: ['B', '2'],
+                                      total: 10,
+                                      self: 10,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> A, 1.
+                          id: '#5',
+                          title: ['A', '1'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#0'  // A -> A -> B, 1.
+                            ],
+                            [
+                              {  // A -> A, 1 -> 2.
+                                id: '#6',
+                                title: ['A', '2'],
+                                total: 40,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#1'  // A -> A -> B, 1 -> 2.
+                                  ],
+                                  [
+                                    {  // A -> A, 1 -> 2 -> 2.
+                                      id: '#7',
+                                      title: ['A', '2'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#2'  // A -> A -> B, 1 -> 2 -> 2.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> A, 2.
+                          id: '#8',
+                          title: ['A', '2'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#3'  // A -> A -> B, 2.
+                            ],
+                            [
+                              {  // A -> A, 2 -> 2.
+                                id: '#9',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#4'  // A -> A -> B, 2 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A -> B, *.
+                    title: ['B', undefined],
+                    total: 35,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // A -> B -> B, *.
+                          title: ['B', undefined],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B -> B, 1.
+                                id: '#10',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> B -> B, 1 -> 1.
+                                      id: '#11',
+                                      title: ['B', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // A -> B -> B, 1 -> 1 -> 2.
+                                            id: '#12',
+                                            title: ['B', '2'],
+                                            total: 20,
+                                            self: 0,
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    },
+                                    {  // A -> B -> B, 1 -> 2.
+                                      id: '#13',
+                                      title: ['B', '2'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A -> B -> B, 2.
+                                id: '#14',
+                                title: ['B', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, 1.
+                          id: '#15',
+                          title: ['B', '1'],
+                          total: 35,
+                          self: 5,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#10'  // A -> B -> B, 1.
+                            ],
+                            [
+                              {  // A -> B, 1 -> 1.
+                                id: '#16',
+                                title: ['B', '1'],
+                                total: 25,
+                                self: 5,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#11'  // A -> B -> B, 1 -> 1.
+                                  ],
+                                  [
+                                    {  // A -> B, 1 -> 1 -> 2.
+                                      id: '#17',
+                                      title: ['B', '2'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#12'  // A -> B -> B, 1 -> 1 -> 2.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A -> B, 1 -> 2.
+                                id: '#18',
+                                title: ['B', '2'],
+                                total: 30,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#13'  // A -> B -> B, 1 -> 2.
+                                  ],
+                                  [
+                                    {  // A -> B, 1 -> 2 -> 2.
+                                      id: '#19',
+                                      title: ['B', '2'],
+                                      total: 10,
+                                      self: 10,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B, 2.
+                          id: '#20',
+                          title: ['B', '2'],
+                          total: 30,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#14'  // A -> B -> B, 2.
+                            ],
+                            [
+                              {  // A -> B, 2 -> 2.
+                                id: '#21',
+                                title: ['B', '2'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, 1.
+                    id: '#22',
+                    title: ['A', '1'],
+                    total: 65,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#5',  // A -> A, 1.
+                        '#15'  // A -> B, 1.
+                      ],
+                      [
+                        {  // A, 1 -> 2.
+                          id: '#23',
+                          title: ['A', '2'],
+                          total: 60,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#6',  // A -> A, 1 -> 2.
+                              '#18'  // A -> B, 1 -> 2.
+                            ],
+                            [
+                              {  // A, 1 -> 2 -> 2.
+                                id: '#24',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#7',  // A -> A, 1 -> 2 -> 2.
+                                    '#19'  // A -> B, 1 -> 2 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A, 1 -> 1.
+                          id: '#25',
+                          title: ['A', '1'],
+                          total: 25,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#16'  // A -> B, 1 -> 1.
+                            ],
+                            [
+                              {  // A, 1 -> 1 -> 2.
+                                id: '#26',
+                                title: ['A', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#17'  // A -> B, 1 -> 1 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, 2.
+                    id: '#27',
+                    title: ['A', '2'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#8',  // A -> A, 2.
+                        '#20'  // A -> B, 2.
+                      ],
+                      [
+                        {  // A, 2 -> 2.
+                          id: '#28',
+                          title: ['A', '2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#9',  // A -> A, 2 -> 2.
+                              '#21'  // A -> B, 2 -> 2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 35,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> B, *.
+                    title: ['B', undefined],
+                    total: 20,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> B, 1.
+                          id: '#29',
+                          title: ['B', '1'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> B, 1 -> 1.
+                                id: '#30',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // B -> B, 1 -> 1 -> 2.
+                                      id: '#31',
+                                      title: ['B', '2'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // B -> B, 1 -> 2.
+                                id: '#32',
+                                title: ['B', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B -> B, 2.
+                          id: '#33',
+                          title: ['B', '2'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, 1.
+                    id: '#34',
+                    title: ['B', '1'],
+                    total: 35,
+                    self: 5,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#29'  // B -> B, 1.
+                      ],
+                      [
+                        {  // B, 1 -> 2.
+                          id: '#35',
+                          title: ['B', '2'],
+                          total: 30,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#32'  // B -> B, 1 -> 2.
+                            ],
+                            [
+                              {  // B, 1 -> 2 -> 2.
+                                id: '#36',
+                                title: ['B', '2'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B, 1 -> 1.
+                          id: '#37',
+                          title: ['B', '1'],
+                          total: 25,
+                          self: 5,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#30'  // B -> B, 1 -> 1.
+                            ],
+                            [
+                              {  // B, 1 -> 1 -> 2.
+                                id: '#38',
+                                title: ['B', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#31'  // B -> B, 1 -> 1 -> 2.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, 2.
+                    id: '#39',
+                    title: ['B', '2'],
+                    total: 30,
+                    self: 10,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#33'  // B -> B, 2.
+                      ],
+                      [
+                        {  // B, 2 -> 2.
+                          id: '#40',
+                          title: ['B', '2'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#22',  // A, 1.
+                  '#34'  // B, 1.
+                ],
+                [
+                  {  // *, 1 -> 2.
+                    title: [undefined, '2'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#23',  // A, 1 -> 2.
+                        '#35'  // B, 1 -> 2.
+                      ],
+                      [
+                        {  // *, 1 -> 2 -> 2.
+                          title: [undefined, '2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#24',  // A, 1 -> 2 -> 2.
+                              '#36'  // B, 1 -> 2 -> 2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // *, 1 -> 1.
+                    title: [undefined, '1'],
+                    total: 25,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#25',  // A, 1 -> 1.
+                        '#37'  // B, 1 -> 1.
+                      ],
+                      [
+                        {  // *, 1 -> 1 -> 2.
+                          title: [undefined, '2'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#26',  // A, 1 -> 1 -> 2.
+                              '#38'  // B, 1 -> 1 -> 2.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, 2.
+              title: [undefined, '2'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#27',  // A, 2.
+                  '#39'  // B, 2.
+                ],
+                [
+                  {  // *, 2 -> 2.
+                    title: [undefined, '2'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#28',  // A, 2 -> 2.
+                        '#40'  // B, 2 -> 2.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined],
+        total: 65,
+        self: 0,
+        isLowerBound: true,
+        children: [
+          [
+            {  // A, *.
+              title: ['A', undefined],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> A, *.
+                    title: ['A', undefined],
+                    total: 40,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // A -> A, 1.
+                          id: '#0',
+                          title: ['A', '1'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        },
+                        {  // A -> A, 2.
+                          id: '#1',
+                          title: ['A', '2'],
+                          total: 40,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> A, 2 -> 1.
+                                id: '#2',
+                                title: ['A', '1'],
+                                total: 40,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              },
+                              {  // A -> A, 2 -> 2.
+                                id: '#3',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // A -> A, 2 -> 2 -> 1.
+                                      id: '#4',
+                                      title: ['A', '1'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, 1.
+                    id: '#5',
+                    title: ['A', '1'],
+                    total: 65,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#0'  // A -> A, 1.
+                      ],
+                      [
+                        {  // A, 1 -> 1.
+                          id: '#6',
+                          title: ['A', '1'],
+                          total: 25,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, 2.
+                    id: '#7',
+                    title: ['A', '2'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#1'  // A -> A, 2.
+                      ],
+                      [
+                        {  // A, 2 -> 1.
+                          id: '#8',
+                          title: ['A', '1'],
+                          total: 60,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#2'  // A -> A, 2 -> 1.
+                            ],
+                            [
+                              {  // A, 2 -> 1 -> 1.
+                                id: '#9',
+                                title: ['A', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A, 2 -> 2.
+                          id: '#10',
+                          title: ['A', '2'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#3'  // A -> A, 2 -> 2.
+                            ],
+                            [
+                              {  // A, 2 -> 2 -> 1.
+                                id: '#11',
+                                title: ['A', '1'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#4'  // A -> A, 2 -> 2 -> 1.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *.
+              title: ['B', undefined],
+              total: 35,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *.
+                    title: ['A', undefined],
+                    total: 35,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> A -> A, *.
+                          title: ['A', undefined],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> A -> A, 1.
+                                id: '#12',
+                                title: ['A', '1'],
+                                total: 10,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              },
+                              {  // B -> A -> A, 2.
+                                id: '#13',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // B -> A -> A, 2 -> 1.
+                                      id: '#14',
+                                      title: ['A', '1'],
+                                      total: 10,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    },
+                                    {  // B -> A -> A, 2 -> 2.
+                                      id: '#15',
+                                      title: ['A', '2'],
+                                      total: 10,
+                                      self: 10,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // B -> A -> A, 2 -> 2 -> 1.
+                                            id: '#16',
+                                            title: ['A', '1'],
+                                            total: 10,
+                                            self: 10,
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B -> A, 1.
+                          id: '#17',
+                          title: ['A', '1'],
+                          total: 35,
+                          self: 5,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#12'  // B -> A -> A, 1.
+                            ],
+                            [
+                              {  // B -> A, 1 -> 1.
+                                id: '#18',
+                                title: ['A', '1'],
+                                total: 25,
+                                self: 5,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B -> A, 2.
+                          id: '#19',
+                          title: ['A', '2'],
+                          total: 30,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#13'  // B -> A -> A, 2.
+                            ],
+                            [
+                              {  // B -> A, 2 -> 1.
+                                id: '#20',
+                                title: ['A', '1'],
+                                total: 30,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#14'  // B -> A -> A, 2 -> 1.
+                                  ],
+                                  [
+                                    {  // B -> A, 2 -> 1 -> 1.
+                                      id: '#21',
+                                      title: ['A', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // B -> A, 2 -> 2.
+                                id: '#22',
+                                title: ['A', '2'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#15'  // B -> A -> A, 2 -> 2.
+                                  ],
+                                  [
+                                    {  // B -> A, 2 -> 2 -> 1.
+                                      id: '#23',
+                                      title: ['A', '1'],
+                                      total: 10,
+                                      self: 10,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#16'  // B -> A -> A, 2 -> 2 -> 1.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B -> B, *.
+                    title: ['B', undefined],
+                    total: 20,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        {  // B -> B -> A, *.
+                          title: ['A', undefined],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> B -> A, 1.
+                                id: '#24',
+                                title: ['A', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // B -> B -> A, 1 -> 1.
+                                      id: '#25',
+                                      title: ['A', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // B -> B -> A, 2.
+                                id: '#26',
+                                title: ['A', '2'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    {  // B -> B -> A, 2 -> 1.
+                                      id: '#27',
+                                      title: ['A', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          {  // B -> B -> A, 2 -> 1 -> 1.
+                                            id: '#28',
+                                            title: ['A', '1'],
+                                            total: 20,
+                                            self: 0,
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B -> B, 1.
+                          id: '#29',
+                          title: ['B', '1'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#24'  // B -> B -> A, 1.
+                            ],
+                            [
+                              {  // B -> B, 1 -> 1.
+                                id: '#30',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#25'  // B -> B -> A, 1 -> 1.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B -> B, 2.
+                          id: '#31',
+                          title: ['B', '2'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#26'  // B -> B -> A, 2.
+                            ],
+                            [
+                              {  // B -> B, 2 -> 1.
+                                id: '#32',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#27'  // B -> B -> A, 2 -> 1.
+                                  ],
+                                  [
+                                    {  // B -> B, 2 -> 1 -> 1.
+                                      id: '#33',
+                                      title: ['B', '1'],
+                                      total: 20,
+                                      self: 0,
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#28'  // B -> B -> A, 2 -> 1 -> 1.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, 1.
+                    id: '#34',
+                    title: ['B', '1'],
+                    total: 35,
+                    self: 5,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#17',  // B -> A, 1.
+                        '#29'  // B -> B, 1.
+                      ],
+                      [
+                        {  // B, 1 -> 1.
+                          id: '#35',
+                          title: ['B', '1'],
+                          total: 25,
+                          self: 5,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#18',  // B -> A, 1 -> 1.
+                              '#30'  // B -> B, 1 -> 1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, 2.
+                    id: '#36',
+                    title: ['B', '2'],
+                    total: 30,
+                    self: 10,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#19',  // B -> A, 2.
+                        '#31'  // B -> B, 2.
+                      ],
+                      [
+                        {  // B, 2 -> 1.
+                          id: '#37',
+                          title: ['B', '1'],
+                          total: 30,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#20',  // B -> A, 2 -> 1.
+                              '#32'  // B -> B, 2 -> 1.
+                            ],
+                            [
+                              {  // B, 2 -> 1 -> 1.
+                                id: '#38',
+                                title: ['B', '1'],
+                                total: 20,
+                                self: 0,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#21',  // B -> A, 2 -> 1 -> 1.
+                                    '#33'  // B -> B, 2 -> 1 -> 1.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B, 2 -> 2.
+                          id: '#39',
+                          title: ['B', '2'],
+                          total: 10,
+                          self: 10,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#22'  // B -> A, 2 -> 2.
+                            ],
+                            [
+                              {  // B, 2 -> 2 -> 1.
+                                id: '#40',
+                                title: ['B', '1'],
+                                total: 10,
+                                self: 10,
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#23'  // B -> A, 2 -> 2 -> 1.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, 1.
+              title: [undefined, '1'],
+              total: 65,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#5',  // A, 1.
+                  '#34'  // B, 1.
+                ],
+                [
+                  {  // *, 1 -> 1.
+                    title: [undefined, '1'],
+                    total: 25,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#6',  // A, 1 -> 1.
+                        '#35'  // B, 1 -> 1.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, 2.
+              title: [undefined, '2'],
+              total: 60,
+              self: 0,
+              isLowerBound: true,
+              children: [
+                [
+                  '#7',  // A, 2.
+                  '#36'  // B, 2.
+                ],
+                [
+                  {  // *, 2 -> 1.
+                    title: [undefined, '1'],
+                    total: 60,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#8',  // A, 2 -> 1.
+                        '#37'  // B, 2 -> 1.
+                      ],
+                      [
+                        {  // *, 2 -> 1 -> 1.
+                          title: [undefined, '1'],
+                          total: 20,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#9',  // A, 2 -> 1 -> 1.
+                              '#38'  // B, 2 -> 1 -> 1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // *, 2 -> 2.
+                    title: [undefined, '2'],
+                    total: 10,
+                    self: 0,
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#10',  // A, 2 -> 2.
+                        '#39'  // B, 2 -> 2.
+                      ],
+                      [
+                        {  // *, 2 -> 2 -> 1.
+                          title: [undefined, '1'],
+                          total: 10,
+                          self: 0,
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#11',  // A, 2 -> 2 -> 1.
+                              '#40'  // B, 2 -> 2 -> 1.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  builderTest('threeDimensions', 3 /* dimensions */,
+      [
+        { path: [['A', 'B'], ['C', 'D'], ['E', 'F']], value: b(0), kind: SELF },
+        { path: [['A', 'B'], ['C', 'D'], ['E']], value: b(1), kind: SELF },
+        { path: [['A', 'B'], ['C', 'D'], []], value: b(2), kind: SELF },
+        { path: [['A', 'B'], ['C'], ['E', 'F']], value: b(3), kind: SELF },
+        { path: [['A', 'B'], ['C'], ['E']], value: b(4), kind: SELF },
+        { path: [['A', 'B'], ['C'], []], value: b(5), kind: SELF },
+        { path: [['A', 'B'], [], ['E', 'F']], value: b(6), kind: SELF },
+        { path: [['A', 'B'], [], ['E']], value: b(7), kind: SELF },
+        { path: [['A', 'B'], [], []], value: b(8), kind: SELF },
+
+        { path: [['A'], ['C', 'D'], ['E', 'F']], value: b(9), kind: SELF },
+        { path: [['A'], ['C', 'D'], ['E']], value: b(10), kind: SELF },
+        { path: [['A'], ['C', 'D'], []], value: b(11), kind: SELF },
+        { path: [['A'], ['C'], ['E', 'F']], value: b(12), kind: SELF },
+        { path: [['A'], ['C'], ['E']], value: b(13), kind: SELF },
+        { path: [['A'], ['C'], []], value: b(14), kind: SELF },
+        { path: [['A'], [], ['E', 'F']], value: b(15), kind: SELF },
+        { path: [['A'], [], ['E']], value: b(16), kind: SELF },
+        { path: [['A'], [], []], value: b(17), kind: SELF },
+
+        { path: [[], ['C', 'D'], ['E', 'F']], value: b(18), kind: SELF },
+        { path: [[], ['C', 'D'], ['E']], value: b(19), kind: SELF },
+        { path: [[], ['C', 'D'], []], value: b(20), kind: SELF },
+        { path: [[], ['C'], ['E', 'F']], value: b(21), kind: SELF },
+        { path: [[], ['C'], ['E']], value: b(22), kind: SELF },
+        { path: [[], ['C'], []], value: b(23), kind: SELF },
+        { path: [[], [], ['E', 'F']], value: b(24), kind: SELF },
+        { path: [[], [], ['E']], value: b(25), kind: SELF },
+        { path: [[], [], []], value: b(26), kind: SELF }
+      ],
+      {  // Top-down tree view.
+        title: [undefined, undefined, undefined],
+        total: b([0, 26]),
+        self: b(26),
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *, *.
+              title: ['A', undefined, undefined],
+              total: b([0, 17]),
+              self: b(17),
+              isLowerBound: false,
+              children: [
+                [
+                  {  // A -> B, *, *.
+                    title: ['B', undefined, undefined],
+                    total: b([0, 8]),
+                    self: b(8),
+                    isLowerBound: false,
+                    children: [
+                      [],
+                      [
+                        {  // A -> B, C, *.
+                          id: '#0',
+                          title: ['B', 'C', undefined],
+                          total: b([0, 5]),
+                          self: b(5),
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B, C -> D, *.
+                                id: '#1',
+                                title: ['B', 'D', undefined],
+                                total: b([0, 2]),
+                                self: b(2),
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // A -> B, C -> D, E.
+                                      id: '#2',
+                                      title: ['B', 'D', 'E'],
+                                      total: b(0, 1),
+                                      self: b(1),
+                                      isLowerBound: false,
+                                      children: [
+                                        [],
+                                        [],
+                                        [
+                                          {  // A -> B, C -> D, E -> F.
+                                            id: '#3',
+                                            title: ['B', 'D', 'F'],
+                                            total: b(0),
+                                            self: b(0),
+                                            isLowerBound: false,
+                                            children: [
+                                              [],
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ],
+                            [
+                              {  // A -> B, C, E.
+                                id: '#4',
+                                title: ['B', 'C', 'E'],
+                                total: b(0, 1, 3, 4),
+                                self: b(4),
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  [
+                                    '#2'  // A -> B, C -> D, E.
+                                  ],
+                                  [
+                                    {  // A -> B, C, E -> F.
+                                      id: '#5',
+                                      title: ['B', 'C', 'F'],
+                                      total: b(0, 3),
+                                      self: b(3),
+                                      isLowerBound: false,
+                                      children: [
+                                        [],
+                                        [
+                                          '#3'  // A -> B, C -> D, E -> F.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, *, E.
+                          id: '#6',
+                          title: ['B', undefined, 'E'],
+                          total: b(0, 1, 3, 4, 6, 7),
+                          self: b(7),
+                          isLowerBound: false,
+                          children: [
+                            [],
+                            [
+                              '#4'  // A -> B, C, E.
+                            ],
+                            [
+                              {  // A -> B, *, E -> F.
+                                id: '#7',
+                                title: ['B', undefined, 'F'],
+                                total: b(0, 3, 6),
+                                self: b(6),
+                                isLowerBound: false,
+                                children: [
+                                  [],
+                                  [
+                                    '#5'  // A -> B, C, E -> F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, C, *.
+                    id: '#8',
+                    title: ['A', 'C', undefined],
+                    total: b([0, 5], [9, 14]),
+                    self: b(14),
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#0'  // A -> B, C, *.
+                      ],
+                      [
+                        {  // A, C -> D, *.
+                          id: '#9',
+                          title: ['A', 'D', undefined],
+                          total: b([0, 2], [9, 11]),
+                          self: b(11),
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#1'  // A -> B, C -> D, *.
+                            ],
+                            [],
+                            [
+                              {  // A, C -> D, E.
+                                id: '#10',
+                                title: ['A', 'D', 'E'],
+                                total: b(0, 1, 9, 10),
+                                self: b(10),
+                                isLowerBound: false,
+                                children: [
+                                  [
+                                    '#2'  // A -> B, C -> D, E.
+                                  ],
+                                  [],
+                                  [
+                                    {  // A, C -> D, E -> F.
+                                      id: '#11',
+                                      title: ['A', 'D', 'F'],
+                                      total: b(0, 9),
+                                      self: b(9),
+                                      isLowerBound: false,
+                                      children: [
+                                        [
+                                          '#3'  // A -> B, C -> D, E -> F.
+                                        ],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A, C, E.
+                          id: '#12',
+                          title: ['A', 'C', 'E'],
+                          total: b(0, 1, 3, 4, 9, 10, 12, 13),
+                          self: b(13),
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#4'  // A -> B, C, E.
+                            ],
+                            [
+                              '#10'  // A, C -> D, E.
+                            ],
+                            [
+                              {  // A, C, E -> F.
+                                id: '#13',
+                                title: ['A', 'C', 'F'],
+                                total: b(0, 3, 9, 12),
+                                self: b(12),
+                                isLowerBound: false,
+                                children: [
+                                  [
+                                    '#5'  // A -> B, C, E -> F.
+                                  ],
+                                  [
+                                    '#11'  // A, C -> D, E -> F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, *, E.
+                    id: '#14',
+                    title: ['A', undefined, 'E'],
+                    total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16),
+                    self: b(16),
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#6'  // A -> B, *, E.
+                      ],
+                      [
+                        '#12'  // A, C, E.
+                      ],
+                      [
+                        {  // A, *, E -> F.
+                          id: '#15',
+                          title: ['A', undefined, 'F'],
+                          total: b(0, 3, 6, 9, 12, 15),
+                          self: b(15),
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#7'  // A -> B, *, E -> F.
+                            ],
+                            [
+                              '#13'  // A, C, E -> F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, C, *.
+              title: [undefined, 'C', undefined],
+              total: b([0, 5], [9, 14], [18, 23]),
+              self: b(23),
+              isLowerBound: false,
+              children: [
+                [
+                  '#8'  // A, C, *.
+                ],
+                [
+                  {  // *, C -> D, *.
+                    title: [undefined, 'D', undefined],
+                    total: b([0, 2], [9, 11], [18, 20]),
+                    self: b(20),
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#9'  // A, C -> D, *.
+                      ],
+                      [],
+                      [
+                        {  // *, C -> D, E.
+                          id: '#16',
+                          title: [undefined, 'D', 'E'],
+                          total: b(0, 1, 9, 10, 18, 19),
+                          self: b(19),
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#10'  // A, C -> D, E.
+                            ],
+                            [],
+                            [
+                              {  // *, C -> D, E -> F.
+                                id: '#17',
+                                title: [undefined, 'D', 'F'],
+                                total: b(0, 9, 18),
+                                self: b(18),
+                                isLowerBound: false,
+                                children: [
+                                  [
+                                    '#11'  // A, C -> D, E -> F.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // *, C, E.
+                    id: '#18',
+                    title: [undefined, 'C', 'E'],
+                    total: b(0, 1, 3, 4, 9, 10, 12, 13, 18, 19, 21, 22),
+                    self: b(22),
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#12'  // A, C, E.
+                      ],
+                      [
+                        '#16'  // *, C -> D, E.
+                      ],
+                      [
+                        {  // *, C, E -> F.
+                          id: '#19',
+                          title: [undefined, 'C', 'F'],
+                          total: b(0, 3, 9, 12, 18, 21),
+                          self: b(21),
+                          isLowerBound: false,
+                          children: [
+                            [
+                              '#13'  // A, C, E -> F.
+                            ],
+                            [
+                              '#17'  // *, C -> D, E -> F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, *, E.
+              title: [undefined, undefined, 'E'],
+              total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 18, 19, 21, 22,
+                  24, 25),
+              self: b(25),
+              isLowerBound: false,
+              children: [
+                [
+                  '#14'  // A, *, E.
+                ],
+                [
+                  '#18'  // *, C, E.
+                ],
+                [
+                  {  // *, *, E -> F.
+                    title: [undefined, undefined, 'F'],
+                    total: b(0, 3, 6, 9, 12, 15, 18, 21, 24),
+                    self: b(24),
+                    isLowerBound: false,
+                    children: [
+                      [
+                        '#15'  // A, *, E -> F.
+                      ],
+                      [
+                        '#19'  // *, C, E -> F.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Top-down heavy view.
+        title: [undefined, undefined, undefined],
+        total: b([0, 26]),
+        self: b(26),
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *, *.
+              title: ['A', undefined, undefined],
+              total: b([0, 17]),
+              self: b(17),
+              isLowerBound: true,
+              children: [
+                [
+                  {  // A -> B, *, *.
+                    title: ['B', undefined, undefined],
+                    total: b([0, 8]),
+                    self: b(8),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // A -> B, C, *.
+                          id: '#0',
+                          title: ['B', 'C', undefined],
+                          total: b([0, 5]),
+                          self: b(5),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // A -> B, C -> D, *.
+                                id: '#1',
+                                title: ['B', 'D', undefined],
+                                total: b([0, 2]),
+                                self: b(2),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // A -> B, C -> D, E.
+                                      id: '#2',
+                                      title: ['B', 'D', 'E'],
+                                      total: b(0, 1),
+                                      self: b(1),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        [
+                                          {  // A -> B, C -> D, E -> F.
+                                            id: '#3',
+                                            title: ['B', 'D', 'F'],
+                                            total: b(0),
+                                            self: b(0),
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    },
+                                    {  // A -> B, C -> D, F.
+                                      id: '#4',
+                                      title: ['B', 'D', 'F'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ],
+                            [
+                              {  // A -> B, C, E.
+                                id: '#5',
+                                title: ['B', 'C', 'E'],
+                                total: b(0, 1, 3, 4),
+                                self: b(4),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#2'  // A -> B, C -> D, E.
+                                  ],
+                                  [
+                                    {  // A -> B, C, E -> F.
+                                      id: '#6',
+                                      title: ['B', 'C', 'F'],
+                                      total: b(0, 3),
+                                      self: b(3),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          '#3'  // A -> B, C -> D, E -> F.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A -> B, C, F.
+                                id: '#7',
+                                title: ['B', 'C', 'F'],
+                                total: b(0, 3),
+                                self: b(3),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#4'  // A -> B, C -> D, F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B, D, *.
+                          id: '#8',
+                          title: ['B', 'D', undefined],
+                          total: b([0, 2]),
+                          self: b(2),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // A -> B, D, E.
+                                id: '#9',
+                                title: ['B', 'D', 'E'],
+                                total: b(0, 1),
+                                self: b(1),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // A -> B, D, E -> F.
+                                      id: '#10',
+                                      title: ['B', 'D', 'F'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A -> B, D, F.
+                                id: '#11',
+                                title: ['B', 'D', 'F'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A -> B, *, E.
+                          id: '#12',
+                          title: ['B', undefined, 'E'],
+                          total: b(0, 1, 3, 4, 6, 7),
+                          self: b(7),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#5',  // A -> B, C, E.
+                              '#9'  // A -> B, D, E.
+                            ],
+                            [
+                              {  // A -> B, *, E -> F.
+                                id: '#13',
+                                title: ['B', undefined, 'F'],
+                                total: b(0, 3, 6),
+                                self: b(6),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#6',  // A -> B, C, E -> F.
+                                    '#10'  // A -> B, D, E -> F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A -> B, *, F.
+                          id: '#14',
+                          title: ['B', undefined, 'F'],
+                          total: b(0, 3, 6),
+                          self: b(6),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#7',  // A -> B, C, F.
+                              '#11'  // A -> B, D, F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, C, *.
+                    id: '#15',
+                    title: ['A', 'C', undefined],
+                    total: b([0, 5], [9, 14]),
+                    self: b(14),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#0'  // A -> B, C, *.
+                      ],
+                      [
+                        {  // A, C -> D, *.
+                          id: '#16',
+                          title: ['A', 'D', undefined],
+                          total: b([0, 2], [9, 11]),
+                          self: b(11),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#1'  // A -> B, C -> D, *.
+                            ],
+                            [],
+                            [
+                              {  // A, C -> D, E.
+                                id: '#17',
+                                title: ['A', 'D', 'E'],
+                                total: b(0, 1, 9, 10),
+                                self: b(10),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#2'  // A -> B, C -> D, E.
+                                  ],
+                                  [],
+                                  [
+                                    {  // A, C -> D, E -> F.
+                                      id: '#18',
+                                      title: ['A', 'D', 'F'],
+                                      total: b(0, 9),
+                                      self: b(9),
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#3'  // A -> B, C -> D, E -> F.
+                                        ],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // A, C -> D, F.
+                                id: '#19',
+                                title: ['A', 'D', 'F'],
+                                total: b(0, 9),
+                                self: b(9),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#4'  // A -> B, C -> D, F.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A, C, E.
+                          id: '#20',
+                          title: ['A', 'C', 'E'],
+                          total: b(0, 1, 3, 4, 9, 10, 12, 13),
+                          self: b(13),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#5'  // A -> B, C, E.
+                            ],
+                            [
+                              '#17'  // A, C -> D, E.
+                            ],
+                            [
+                              {  // A, C, E -> F.
+                                id: '#21',
+                                title: ['A', 'C', 'F'],
+                                total: b(0, 3, 9, 12),
+                                self: b(12),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#6'  // A -> B, C, E -> F.
+                                  ],
+                                  [
+                                    '#18'  // A, C -> D, E -> F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A, C, F.
+                          id: '#22',
+                          title: ['A', 'C', 'F'],
+                          total: b(0, 3, 9, 12),
+                          self: b(12),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#7'  // A -> B, C, F.
+                            ],
+                            [
+                              '#19'  // A, C -> D, F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, D, *.
+                    id: '#23',
+                    title: ['A', 'D', undefined],
+                    total: b([0, 2], [9, 11]),
+                    self: b(11),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#8'  // A -> B, D, *.
+                      ],
+                      [],
+                      [
+                        {  // A, D, E.
+                          id: '#24',
+                          title: ['A', 'D', 'E'],
+                          total: b(0, 1, 9, 10),
+                          self: b(10),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#9'  // A -> B, D, E.
+                            ],
+                            [],
+                            [
+                              {  // A, D, E -> F.
+                                id: '#25',
+                                title: ['A', 'D', 'F'],
+                                total: b(0, 9),
+                                self: b(9),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#10'  // A -> B, D, E -> F.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // A, D, F.
+                          id: '#26',
+                          title: ['A', 'D', 'F'],
+                          total: b(0, 9),
+                          self: b(9),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#11'  // A -> B, D, F.
+                            ],
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, *, E.
+                    id: '#27',
+                    title: ['A', undefined, 'E'],
+                    total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16),
+                    self: b(16),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#12'  // A -> B, *, E.
+                      ],
+                      [
+                        '#20',  // A, C, E.
+                        '#24'  // A, D, E.
+                      ],
+                      [
+                        {  // A, *, E -> F.
+                          id: '#28',
+                          title: ['A', undefined, 'F'],
+                          total: b(0, 3, 6, 9, 12, 15),
+                          self: b(15),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#13'  // A -> B, *, E -> F.
+                            ],
+                            [
+                              '#21',  // A, C, E -> F.
+                              '#25'  // A, D, E -> F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, *, F.
+                    id: '#29',
+                    title: ['A', undefined, 'F'],
+                    total: b(0, 3, 6, 9, 12, 15),
+                    self: b(15),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#14'  // A -> B, *, F.
+                      ],
+                      [
+                        '#22',  // A, C, F.
+                        '#26'  // A, D, F.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *, *.
+              title: ['B', undefined, undefined],
+              total: b([0, 8]),
+              self: b(8),
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // B, C, *.
+                    id: '#30',
+                    title: ['B', 'C', undefined],
+                    total: b([0, 5]),
+                    self: b(5),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B, C -> D, *.
+                          id: '#31',
+                          title: ['B', 'D', undefined],
+                          total: b([0, 2]),
+                          self: b(2),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // B, C -> D, E.
+                                id: '#32',
+                                title: ['B', 'D', 'E'],
+                                total: b(0, 1),
+                                self: b(1),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // B, C -> D, E -> F.
+                                      id: '#33',
+                                      title: ['B', 'D', 'F'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              },
+                              {  // B, C -> D, F.
+                                id: '#34',
+                                title: ['B', 'D', 'F'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B, C, E.
+                          id: '#35',
+                          title: ['B', 'C', 'E'],
+                          total: b(0, 1, 3, 4),
+                          self: b(4),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#32'  // B, C -> D, E.
+                            ],
+                            [
+                              {  // B, C, E -> F.
+                                id: '#36',
+                                title: ['B', 'C', 'F'],
+                                total: b(0, 3),
+                                self: b(3),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#33'  // B, C -> D, E -> F.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B, C, F.
+                          id: '#37',
+                          title: ['B', 'C', 'F'],
+                          total: b(0, 3),
+                          self: b(3),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#34'  // B, C -> D, F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, D, *.
+                    id: '#38',
+                    title: ['B', 'D', undefined],
+                    total: b([0, 2]),
+                    self: b(2),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [],
+                      [
+                        {  // B, D, E.
+                          id: '#39',
+                          title: ['B', 'D', 'E'],
+                          total: b(0, 1),
+                          self: b(1),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // B, D, E -> F.
+                                id: '#40',
+                                title: ['B', 'D', 'F'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B, D, F.
+                          id: '#41',
+                          title: ['B', 'D', 'F'],
+                          total: b(0),
+                          self: b(0),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, *, E.
+                    id: '#42',
+                    title: ['B', undefined, 'E'],
+                    total: b(0, 1, 3, 4, 6, 7),
+                    self: b(7),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        '#35',  // B, C, E.
+                        '#39'  // B, D, E.
+                      ],
+                      [
+                        {  // B, *, E -> F.
+                          id: '#43',
+                          title: ['B', undefined, 'F'],
+                          total: b(0, 3, 6),
+                          self: b(6),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#36',  // B, C, E -> F.
+                              '#40'  // B, D, E -> F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, *, F.
+                    id: '#44',
+                    title: ['B', undefined, 'F'],
+                    total: b(0, 3, 6),
+                    self: b(6),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        '#37',  // B, C, F.
+                        '#41'  // B, D, F.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, C, *.
+              title: [undefined, 'C', undefined],
+              total: b([0, 5], [9, 14], [18, 23]),
+              self: b(23),
+              isLowerBound: true,
+              children: [
+                [
+                  '#15',  // A, C, *.
+                  '#30'  // B, C, *.
+                ],
+                [
+                  {  // *, C -> D, *.
+                    title: [undefined, 'D', undefined],
+                    total: b([0, 2], [9, 11], [18, 20]),
+                    self: b(20),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#16',  // A, C -> D, *.
+                        '#31'  // B, C -> D, *.
+                      ],
+                      [],
+                      [
+                        {  // *, C -> D, E.
+                          id: '#45',
+                          title: [undefined, 'D', 'E'],
+                          total: b(0, 1, 9, 10, 18, 19),
+                          self: b(19),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#17',  // A, C -> D, E.
+                              '#32'  // B, C -> D, E.
+                            ],
+                            [],
+                            [
+                              {  // *, C -> D, E -> F.
+                                id: '#46',
+                                title: [undefined, 'D', 'F'],
+                                total: b(0, 9, 18),
+                                self: b(18),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#18',  // A, C -> D, E -> F.
+                                    '#33'  // B, C -> D, E -> F.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // *, C -> D, F.
+                          id: '#47',
+                          title: [undefined, 'D', 'F'],
+                          total: b(0, 9, 18),
+                          self: b(18),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#19',  // A, C -> D, F.
+                              '#34'  // B, C -> D, F.
+                            ],
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // *, C, E.
+                    id: '#48',
+                    title: [undefined, 'C', 'E'],
+                    total: b(0, 1, 3, 4, 9, 10, 12, 13, 18, 19, 21, 22),
+                    self: b(22),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#20',  // A, C, E.
+                        '#35'  // B, C, E.
+                      ],
+                      [
+                        '#45'  // *, C -> D, E.
+                      ],
+                      [
+                        {  // *, C, E -> F.
+                          id: '#49',
+                          title: [undefined, 'C', 'F'],
+                          total: b(0, 3, 9, 12, 18, 21),
+                          self: b(21),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#21',  // A, C, E -> F.
+                              '#36'  // B, C, E -> F.
+                            ],
+                            [
+                              '#46'  // *, C -> D, E -> F.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // *, C, F.
+                    id: '#50',
+                    title: [undefined, 'C', 'F'],
+                    total: b(0, 3, 9, 12, 18, 21),
+                    self: b(21),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#22',  // A, C, F.
+                        '#37'  // B, C, F.
+                      ],
+                      [
+                        '#47'  // *, C -> D, F.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, D, *.
+              title: [undefined, 'D', undefined],
+              total: b([0, 2], [9, 11], [18, 20]),
+              self: b(20),
+              isLowerBound: true,
+              children: [
+                [
+                  '#23',  // A, D, *.
+                  '#38'  // B, D, *.
+                ],
+                [],
+                [
+                  {  // *, D, E.
+                    id: '#51',
+                    title: [undefined, 'D', 'E'],
+                    total: b(0, 1, 9, 10, 18, 19),
+                    self: b(19),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#24',  // A, D, E.
+                        '#39'  // B, D, E.
+                      ],
+                      [],
+                      [
+                        {  // *, D, E -> F.
+                          id: '#52',
+                          title: [undefined, 'D', 'F'],
+                          total: b(0, 9, 18),
+                          self: b(18),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#25',  // A, D, E -> F.
+                              '#40'  // B, D, E -> F.
+                            ],
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // *, D, F.
+                    id: '#53',
+                    title: [undefined, 'D', 'F'],
+                    total: b(0, 9, 18),
+                    self: b(18),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#26',  // A, D, F.
+                        '#41'  // B, D, F.
+                      ],
+                      [],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, *, E.
+              title: [undefined, undefined, 'E'],
+              total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 18, 19, 21, 22,
+                  24, 25),
+              self: b(25),
+              isLowerBound: true,
+              children: [
+                [
+                  '#27',  // A, *, E.
+                  '#42'  // B, *, E.
+                ],
+                [
+                  '#48',  // *, C, E.
+                  '#51'  // *, D, E.
+                ],
+                [
+                  {  // *, *, E -> F.
+                    title: [undefined, undefined, 'F'],
+                    total: b(0, 3, 6, 9, 12, 15, 18, 21, 24),
+                    self: b(24),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#28',  // A, *, E -> F.
+                        '#43'  // B, *, E -> F.
+                      ],
+                      [
+                        '#49',  // *, C, E -> F.
+                        '#52'  // *, D, E -> F.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, *, F.
+              title: [undefined, undefined, 'F'],
+              total: b(0, 3, 6, 9, 12, 15, 18, 21, 24),
+              self: b(24),
+              isLowerBound: true,
+              children: [
+                [
+                  '#29',  // A, *, F.
+                  '#44'  // B, *, F.
+                ],
+                [
+                  '#50',  // *, C, F.
+                  '#53'  // *, D, F.
+                ],
+                []
+              ]
+            }
+          ]
+        ]
+      },
+      {  // Bottom-up heavy view.
+        title: [undefined, undefined, undefined],
+        total: b([0, 26]),
+        self: b(26),
+        isLowerBound: false,
+        children: [
+          [
+            {  // A, *, *.
+              title: ['A', undefined, undefined],
+              total: b([0, 17]),
+              self: b(17),
+              isLowerBound: true,
+              children: [
+                [],
+                [
+                  {  // A, C, *.
+                    id: '#0',
+                    title: ['A', 'C', undefined],
+                    total: b([0, 5], [9, 14]),
+                    self: b(14),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [],
+                      [
+                        {  // A, C, E.
+                          id: '#1',
+                          title: ['A', 'C', 'E'],
+                          total: b(0, 1, 3, 4, 9, 10, 12, 13),
+                          self: b(13),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            []
+                          ]
+                        },
+                        {  // A, C, F.
+                          id: '#2',
+                          title: ['A', 'C', 'F'],
+                          total: b(0, 3, 9, 12),
+                          self: b(12),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // A, C, F -> E.
+                                id: '#3',
+                                title: ['A', 'C', 'E'],
+                                total: b(0, 3, 9, 12),
+                                self: b(12),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // A, D, *.
+                    id: '#4',
+                    title: ['A', 'D', undefined],
+                    total: b([0, 2], [9, 11]),
+                    self: b(11),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // A, D -> C, *.
+                          id: '#5',
+                          title: ['A', 'C', undefined],
+                          total: b([0, 2], [9, 11]),
+                          self: b(11),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // A, D -> C, E.
+                                id: '#6',
+                                title: ['A', 'C', 'E'],
+                                total: b(0, 1, 9, 10),
+                                self: b(10),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              },
+                              {  // A, D -> C, F.
+                                id: '#7',
+                                title: ['A', 'C', 'F'],
+                                total: b(0, 9),
+                                self: b(9),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // A, D -> C, F -> E.
+                                      id: '#8',
+                                      title: ['A', 'C', 'E'],
+                                      total: b(0, 9),
+                                      self: b(9),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // A, D, E.
+                          id: '#9',
+                          title: ['A', 'D', 'E'],
+                          total: b(0, 1, 9, 10),
+                          self: b(10),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#6'  // A, D -> C, E.
+                            ],
+                            []
+                          ]
+                        },
+                        {  // A, D, F.
+                          id: '#10',
+                          title: ['A', 'D', 'F'],
+                          total: b(0, 9),
+                          self: b(9),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#7'  // A, D -> C, F.
+                            ],
+                            [
+                              {  // A, D, F -> E.
+                                id: '#11',
+                                title: ['A', 'D', 'E'],
+                                total: b(0, 9),
+                                self: b(9),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#8'  // A, D -> C, F -> E.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // A, *, E.
+                    id: '#12',
+                    title: ['A', undefined, 'E'],
+                    total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16),
+                    self: b(16),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        '#1',  // A, C, E.
+                        '#9'  // A, D, E.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // A, *, F.
+                    id: '#13',
+                    title: ['A', undefined, 'F'],
+                    total: b(0, 3, 6, 9, 12, 15),
+                    self: b(15),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        '#2',  // A, C, F.
+                        '#10'  // A, D, F.
+                      ],
+                      [
+                        {  // A, *, F -> E.
+                          id: '#14',
+                          title: ['A', undefined, 'E'],
+                          total: b(0, 3, 6, 9, 12, 15),
+                          self: b(15),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#3',  // A, C, F -> E.
+                              '#11'  // A, D, F -> E.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // B, *, *.
+              title: ['B', undefined, undefined],
+              total: b([0, 8]),
+              self: b(8),
+              isLowerBound: true,
+              children: [
+                [
+                  {  // B -> A, *, *.
+                    title: ['A', undefined, undefined],
+                    total: b([0, 8]),
+                    self: b(8),
+                    isLowerBound: true,
+                    children: [
+                      [],
+                      [
+                        {  // B -> A, C, *.
+                          id: '#15',
+                          title: ['A', 'C', undefined],
+                          total: b([0, 5]),
+                          self: b(5),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [],
+                            [
+                              {  // B -> A, C, E.
+                                id: '#16',
+                                title: ['A', 'C', 'E'],
+                                total: b(0, 1, 3, 4),
+                                self: b(4),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  []
+                                ]
+                              },
+                              {  // B -> A, C, F.
+                                id: '#17',
+                                title: ['A', 'C', 'F'],
+                                total: b(0, 3),
+                                self: b(3),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // B -> A, C, F -> E.
+                                      id: '#18',
+                                      title: ['A', 'C', 'E'],
+                                      total: b(0, 3),
+                                      self: b(3),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        },
+                        {  // B -> A, D, *.
+                          id: '#19',
+                          title: ['A', 'D', undefined],
+                          total: b([0, 2]),
+                          self: b(2),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              {  // B -> A, D -> C, *.
+                                id: '#20',
+                                title: ['A', 'C', undefined],
+                                total: b([0, 2]),
+                                self: b(2),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [],
+                                  [
+                                    {  // B -> A, D -> C, E.
+                                      id: '#21',
+                                      title: ['A', 'C', 'E'],
+                                      total: b(0, 1),
+                                      self: b(1),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        []
+                                      ]
+                                    },
+                                    {  // B -> A, D -> C, F.
+                                      id: '#22',
+                                      title: ['A', 'C', 'F'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [],
+                                        [
+                                          {  // B -> A, D -> C, F -> E.
+                                            id: '#23',
+                                            title: ['A', 'C', 'E'],
+                                            total: b(0),
+                                            self: b(0),
+                                            isLowerBound: true,
+                                            children: [
+                                              [],
+                                              [],
+                                              []
+                                            ]
+                                          }
+                                        ]
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ],
+                            [
+                              {  // B -> A, D, E.
+                                id: '#24',
+                                title: ['A', 'D', 'E'],
+                                total: b(0, 1),
+                                self: b(1),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#21'  // B -> A, D -> C, E.
+                                  ],
+                                  []
+                                ]
+                              },
+                              {  // B -> A, D, F.
+                                id: '#25',
+                                title: ['A', 'D', 'F'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#22'  // B -> A, D -> C, F.
+                                  ],
+                                  [
+                                    {  // B -> A, D, F -> E.
+                                      id: '#26',
+                                      title: ['A', 'D', 'E'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [],
+                                        [
+                                          '#23'  // B -> A, D -> C, F -> E.
+                                        ],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B -> A, *, E.
+                          id: '#27',
+                          title: ['A', undefined, 'E'],
+                          total: b(0, 1, 3, 4, 6, 7),
+                          self: b(7),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#16',  // B -> A, C, E.
+                              '#24'  // B -> A, D, E.
+                            ],
+                            []
+                          ]
+                        },
+                        {  // B -> A, *, F.
+                          id: '#28',
+                          title: ['A', undefined, 'F'],
+                          total: b(0, 3, 6),
+                          self: b(6),
+                          isLowerBound: true,
+                          children: [
+                            [],
+                            [
+                              '#17',  // B -> A, C, F.
+                              '#25'  // B -> A, D, F.
+                            ],
+                            [
+                              {  // B -> A, *, F -> E.
+                                id: '#29',
+                                title: ['A', undefined, 'E'],
+                                total: b(0, 3, 6),
+                                self: b(6),
+                                isLowerBound: true,
+                                children: [
+                                  [],
+                                  [
+                                    '#18',  // B -> A, C, F -> E.
+                                    '#26'  // B -> A, D, F -> E.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, C, *.
+                    id: '#30',
+                    title: ['B', 'C', undefined],
+                    total: b([0, 5]),
+                    self: b(5),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#15'  // B -> A, C, *.
+                      ],
+                      [],
+                      [
+                        {  // B, C, E.
+                          id: '#31',
+                          title: ['B', 'C', 'E'],
+                          total: b(0, 1, 3, 4),
+                          self: b(4),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#16'  // B -> A, C, E.
+                            ],
+                            [],
+                            []
+                          ]
+                        },
+                        {  // B, C, F.
+                          id: '#32',
+                          title: ['B', 'C', 'F'],
+                          total: b(0, 3),
+                          self: b(3),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#17'  // B -> A, C, F.
+                            ],
+                            [],
+                            [
+                              {  // B, C, F -> E.
+                                id: '#33',
+                                title: ['B', 'C', 'E'],
+                                total: b(0, 3),
+                                self: b(3),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#18'  // B -> A, C, F -> E.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  },
+                  {  // B, D, *.
+                    id: '#34',
+                    title: ['B', 'D', undefined],
+                    total: b([0, 2]),
+                    self: b(2),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#19'  // B -> A, D, *.
+                      ],
+                      [
+                        {  // B, D -> C, *.
+                          id: '#35',
+                          title: ['B', 'C', undefined],
+                          total: b([0, 2]),
+                          self: b(2),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#20'  // B -> A, D -> C, *.
+                            ],
+                            [],
+                            [
+                              {  // B, D -> C, E.
+                                id: '#36',
+                                title: ['B', 'C', 'E'],
+                                total: b(0, 1),
+                                self: b(1),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#21'  // B -> A, D -> C, E.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              },
+                              {  // B, D -> C, F.
+                                id: '#37',
+                                title: ['B', 'C', 'F'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#22'  // B -> A, D -> C, F.
+                                  ],
+                                  [],
+                                  [
+                                    {  // B, D -> C, F -> E.
+                                      id: '#38',
+                                      title: ['B', 'C', 'E'],
+                                      total: b(0),
+                                      self: b(0),
+                                      isLowerBound: true,
+                                      children: [
+                                        [
+                                          '#23'  // B -> A, D -> C, F -> E.
+                                        ],
+                                        [],
+                                        []
+                                      ]
+                                    }
+                                  ]
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ],
+                      [
+                        {  // B, D, E.
+                          id: '#39',
+                          title: ['B', 'D', 'E'],
+                          total: b(0, 1),
+                          self: b(1),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#24'  // B -> A, D, E.
+                            ],
+                            [
+                              '#36'  // B, D -> C, E.
+                            ],
+                            []
+                          ]
+                        },
+                        {  // B, D, F.
+                          id: '#40',
+                          title: ['B', 'D', 'F'],
+                          total: b(0),
+                          self: b(0),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#25'  // B -> A, D, F.
+                            ],
+                            [
+                              '#37'  // B, D -> C, F.
+                            ],
+                            [
+                              {  // B, D, F -> E.
+                                id: '#41',
+                                title: ['B', 'D', 'E'],
+                                total: b(0),
+                                self: b(0),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#26'  // B -> A, D, F -> E.
+                                  ],
+                                  [
+                                    '#38'  // B, D -> C, F -> E.
+                                  ],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // B, *, E.
+                    id: '#42',
+                    title: ['B', undefined, 'E'],
+                    total: b(0, 1, 3, 4, 6, 7),
+                    self: b(7),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#27'  // B -> A, *, E.
+                      ],
+                      [
+                        '#31',  // B, C, E.
+                        '#39'  // B, D, E.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // B, *, F.
+                    id: '#43',
+                    title: ['B', undefined, 'F'],
+                    total: b(0, 3, 6),
+                    self: b(6),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#28'  // B -> A, *, F.
+                      ],
+                      [
+                        '#32',  // B, C, F.
+                        '#40'  // B, D, F.
+                      ],
+                      [
+                        {  // B, *, F -> E.
+                          id: '#44',
+                          title: ['B', undefined, 'E'],
+                          total: b(0, 3, 6),
+                          self: b(6),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#29'  // B -> A, *, F -> E.
+                            ],
+                            [
+                              '#33',  // B, C, F -> E.
+                              '#41'  // B, D, F -> E.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, C, *.
+              title: [undefined, 'C', undefined],
+              total: b([0, 5], [9, 14], [18, 23]),
+              self: b(23),
+              isLowerBound: true,
+              children: [
+                [
+                  '#0',  // A, C, *.
+                  '#30'  // B, C, *.
+                ],
+                [],
+                [
+                  {  // *, C, E.
+                    id: '#45',
+                    title: [undefined, 'C', 'E'],
+                    total: b(0, 1, 3, 4, 9, 10, 12, 13, 18, 19, 21, 22),
+                    self: b(22),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#1',  // A, C, E.
+                        '#31'  // B, C, E.
+                      ],
+                      [],
+                      []
+                    ]
+                  },
+                  {  // *, C, F.
+                    id: '#46',
+                    title: [undefined, 'C', 'F'],
+                    total: b(0, 3, 9, 12, 18, 21),
+                    self: b(21),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#2',  // A, C, F.
+                        '#32'  // B, C, F.
+                      ],
+                      [],
+                      [
+                        {  // *, C, F -> E.
+                          id: '#47',
+                          title: [undefined, 'C', 'E'],
+                          total: b(0, 3, 9, 12, 18, 21),
+                          self: b(21),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#3',  // A, C, F -> E.
+                              '#33'  // B, C, F -> E.
+                            ],
+                            [],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            },
+            {  // *, D, *.
+              title: [undefined, 'D', undefined],
+              total: b([0, 2], [9, 11], [18, 20]),
+              self: b(20),
+              isLowerBound: true,
+              children: [
+                [
+                  '#4',  // A, D, *.
+                  '#34'  // B, D, *.
+                ],
+                [
+                  {  // *, D -> C, *.
+                    title: [undefined, 'C', undefined],
+                    total: b([0, 2], [9, 11], [18, 20]),
+                    self: b(20),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#5',  // A, D -> C, *.
+                        '#35'  // B, D -> C, *.
+                      ],
+                      [],
+                      [
+                        {  // *, D -> C, E.
+                          id: '#48',
+                          title: [undefined, 'C', 'E'],
+                          total: b(0, 1, 9, 10, 18, 19),
+                          self: b(19),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#6',  // A, D -> C, E.
+                              '#36'  // B, D -> C, E.
+                            ],
+                            [],
+                            []
+                          ]
+                        },
+                        {  // *, D -> C, F.
+                          id: '#49',
+                          title: [undefined, 'C', 'F'],
+                          total: b(0, 9, 18),
+                          self: b(18),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#7',  // A, D -> C, F.
+                              '#37'  // B, D -> C, F.
+                            ],
+                            [],
+                            [
+                              {  // *, D -> C, F -> E.
+                                id: '#50',
+                                title: [undefined, 'C', 'E'],
+                                total: b(0, 9, 18),
+                                self: b(18),
+                                isLowerBound: true,
+                                children: [
+                                  [
+                                    '#8',  // A, D -> C, F -> E.
+                                    '#38'  // B, D -> C, F -> E.
+                                  ],
+                                  [],
+                                  []
+                                ]
+                              }
+                            ]
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ],
+                [
+                  {  // *, D, E.
+                    id: '#51',
+                    title: [undefined, 'D', 'E'],
+                    total: b(0, 1, 9, 10, 18, 19),
+                    self: b(19),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#9',  // A, D, E.
+                        '#39'  // B, D, E.
+                      ],
+                      [
+                        '#48'  // *, D -> C, E.
+                      ],
+                      []
+                    ]
+                  },
+                  {  // *, D, F.
+                    id: '#52',
+                    title: [undefined, 'D', 'F'],
+                    total: b(0, 9, 18),
+                    self: b(18),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#10',  // A, D, F.
+                        '#40'  // B, D, F.
+                      ],
+                      [
+                        '#49'  // *, D -> C, F.
+                      ],
+                      [
+                        {  // *, D, F -> E.
+                          id: '#53',
+                          title: [undefined, 'D', 'E'],
+                          total: b(0, 9, 18),
+                          self: b(18),
+                          isLowerBound: true,
+                          children: [
+                            [
+                              '#11',  // A, D, F -> E.
+                              '#41'  // B, D, F -> E.
+                            ],
+                            [
+                              '#50'  // *, D -> C, F -> E.
+                            ],
+                            []
+                          ]
+                        }
+                      ]
+                    ]
+                  }
+                ]
+              ]
+            }
+          ],
+          [
+            {  // *, *, E.
+              title: [undefined, undefined, 'E'],
+              total: b(0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 18, 19, 21, 22,
+                  24, 25),
+              self: b(25),
+              isLowerBound: true,
+              children: [
+                [
+                  '#12',  // A, *, E.
+                  '#42'  // B, *, E.
+                ],
+                [
+                  '#45',  // *, C, E.
+                  '#51'  // *, D, E.
+                ],
+                []
+              ]
+            },
+            {  // *, *, F.
+              title: [undefined, undefined, 'F'],
+              total: b(0, 3, 6, 9, 12, 15, 18, 21, 24),
+              self: b(24),
+              isLowerBound: true,
+              children: [
+                [
+                  '#13',  // A, *, F.
+                  '#43'  // B, *, F.
+                ],
+                [
+                  '#46',  // *, C, F.
+                  '#52'  // *, D, F.
+                ],
+                [
+                  {  // *, *, F -> E.
+                    title: [undefined, undefined, 'E'],
+                    total: b(0, 3, 6, 9, 12, 15, 18, 21, 24),
+                    self: b(24),
+                    isLowerBound: true,
+                    children: [
+                      [
+                        '#14',  // A, *, F -> E.
+                        '#44'  // B, *, F -> E.
+                      ],
+                      [
+                        '#47',  // *, C, F -> E.
+                        '#53'  // *, D, F -> E.
+                      ],
+                      []
+                    ]
+                  }
+                ]
+              ]
+            }
+          ]
+        ]
+      });
+
+  test('recursionDepthTracker', function() {
+    var MAX_DEPTH = 5;
+    var tracker = new RecursionDepthTracker(MAX_DEPTH, 2 /* dimension */);
+
+    function pushNewNode(title) {
+      var node = new MultiDimensionalViewNode(
+          [undefined, 'ignored dimension', title, 'also ignored'],
+          false /* isLowerBound (not relevant for this test) */);
+      tracker.push(node);
+      return node;
+    }
+
+    function checkTracker(expectedDefinedViewNodePath, expectedRecursionDepth) {
+      var expectedBottomIndex = MAX_DEPTH - expectedDefinedViewNodePath.length;
+      assert.strictEqual(tracker.bottomIndex, expectedBottomIndex);
+      assert.strictEqual(tracker.topIndex, MAX_DEPTH);
+
+      var undefinedPadding = new Array(expectedBottomIndex);
+      var expectedViewNodePath =
+          undefinedPadding.concat(expectedDefinedViewNodePath);
+      var expectedTitlePath =
+          undefinedPadding.concat(expectedDefinedViewNodePath.map(
+              function(node) { return node.title[2]; }));
+      assertListStrictEqual(tracker.viewNodePath, expectedViewNodePath);
+      assertListStrictEqual(tracker.titlePath, expectedTitlePath);
+
+      assert.strictEqual(tracker.recursionDepth, expectedRecursionDepth);
+    }
+
+    checkTracker([] /* empty stack */, 0);
+    var a1 = pushNewNode('A');
+    checkTracker([a1], 0);
+    var b1 = pushNewNode('B');
+    checkTracker([b1, a1], 0);
+    var c1 = pushNewNode('C');
+    checkTracker([c1, b1, a1], 0);
+    var d1 = pushNewNode('D');
+    checkTracker([d1, c1, b1, a1], 0);
+    tracker.pop();
+    checkTracker([c1, b1, a1], 0);
+    var a2 = pushNewNode('A');
+    checkTracker([a2, c1, b1, a1], 1);
+    var b2 = pushNewNode('B');
+    checkTracker([b2, a2, c1, b1, a1], 2);
+    tracker.pop();
+    checkTracker([a2, c1, b1, a1], 1);
+    tracker.pop();
+    checkTracker([c1, b1, a1], 0);
+    tracker.push(b2);
+    checkTracker([b2, c1, b1, a1], 1);
+    tracker.pop();
+    checkTracker([c1, b1, a1], 0);
+    tracker.pop();
+    checkTracker([b1, a1], 0);
+    tracker.pop();
+    checkTracker([a1], 0);
+    var a3 = pushNewNode('A');
+    checkTracker([a3, a1], 1);
+    tracker.push(a2);
+    checkTracker([a2, a3, a1], 2);
+    var a4 = pushNewNode('A');
+    checkTracker([a4, a2, a3, a1], 3);
+    tracker.pop();
+    checkTracker([a2, a3, a1], 2);
+    var b3 = pushNewNode('B');
+    checkTracker([b3, a2, a3, a1], 0);
+    tracker.push(a4);
+    checkTracker([a4, b3, a2, a3, a1], 1);
+    tracker.pop();
+    checkTracker([b3, a2, a3, a1], 0);
+    tracker.pop();
+    checkTracker([a2, a3, a1], 2);
+    tracker.pop();
+    checkTracker([a3, a1], 1);
+    tracker.pop();
+    checkTracker([a1], 0);
+    tracker.pop();
+    checkTracker([], 0);
+    tracker.push(a4);
+    checkTracker([a4], 0);
+    tracker.push(b1);
+    checkTracker([b1, a4], 0);
+    tracker.push(a1);
+    checkTracker([a1, b1, a4], 1);
+    tracker.pop();
+    checkTracker([b1, a4], 0);
+    var c2 = pushNewNode('C');
+    checkTracker([c2, b1, a4], 0);
+    tracker.push(a3);
+    checkTracker([a3, c2, b1, a4], 1);
+    tracker.pop();
+    checkTracker([c2, b1, a4], 0);
+    tracker.pop();
+    checkTracker([b1, a4], 0);
+    tracker.pop();
+    checkTracker([a4], 0);
+    tracker.pop();
+    checkTracker([], 0);
+
+    assert.throws(function() {
+      // Try popping from an empty tracker.
+      tracker.pop();
+    });
+
+    pushNewNode('F');
+    pushNewNode('U');
+    pushNewNode('L');
+    pushNewNode('L');
+    pushNewNode('!');
+    assert.throws(function() {
+      // Try pushing to a full tracker.
+      pushNewNode(':-(');
+    });
+  });
+
+  test('zFunction', function() {
+    // Empty list/string (suffix).
+    assert.deepEqual(zFunction([], 0), []);
+    assert.deepEqual(zFunction(['A'], 1), []);
+    assert.deepEqual(zFunction(['A', 'B', 'C'], 3), []);
+    assert.deepEqual(zFunction('', 0), []);
+    assert.deepEqual(zFunction('A', 1), []);
+    assert.deepEqual(zFunction('ABC', 3), []);
+
+    // Singleton list/string.
+    checkZFunction([1], [0]);
+    checkZFunction('T', [0]);
+
+    // No duplicate elements.
+    checkZFunction([1, 2, 3, 4, 5], [0, 0, 0, 0, 0]);
+    checkZFunction('ABCDEF', [0, 0, 0, 0, 0, 0]);
+
+    // No substring is a suffix.
+    checkZFunction([1, 2, 3, 2], [0, 0, 0, 0]);
+    checkZFunction('ABBB', [0, 0, 0, 0]);
+
+    // Pure repetition.
+    checkZFunction([1, 1, 1, 1, 1], [0, 4, 3, 2, 1]);
+    checkZFunction('AAAAA', [0, 4, 3, 2, 1]);
+
+    // Interleaved repetition.
+    checkZFunction([1, 2, 1, 3, 1, 2, 1], [0, 0, 1, 0, 3, 0, 1]);
+    checkZFunction('AAABAAB', [0, 2, 1, 0, 2, 1, 0]);
+
+    // Complex patterns.
+    checkZFunction([7, 9, 7, 9, 7, 9, 7, 9], [0, 0, 6, 0, 4, 0, 2, 0]);
+    checkZFunction('CCGTCCCGTACC', [0, 1, 0, 0, 2, 4, 1, 0, 0, 0, 2, 1]);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/base/raf.html b/catapult/tracing/tracing/base/raf.html
index 229585e..9082e97 100644
--- a/catapult/tracing/tracing/base/raf.html
+++ b/catapult/tracing/tracing/base/raf.html
@@ -10,6 +10,9 @@
 
 tr.exportTo('tr.b', function() {
   var ESTIMATED_IDLE_PERIOD_LENGTH_MILLISECONDS = 10;
+  // The maximum amount of time that we allow for a task to get scheduled
+  // in idle time before forcing the task to run.
+  var REQUEST_IDLE_CALLBACK_TIMEOUT_MILLISECONDS = 100;
 
   // Setting this to true will cause stack traces to get dumped into the
   // tasks. When an exception happens the original stack will be printed.
@@ -60,7 +63,9 @@
       return;
     }
     idleWorkScheduled = true;
-    window.requestIdleCallback(processIdleWork);
+    window.requestIdleCallback(function(deadline, didTimeout) {
+      processIdleWork(false /* forceAllTasksToRun */, deadline);
+    }, { timeout: REQUEST_IDLE_CALLBACK_TIMEOUT_MILLISECONDS });
   }
 
   function onAnimationFrameError(e, opt_stack) {
@@ -150,7 +155,7 @@
   function requestPreAnimationFrame(callback, opt_this) {
     pendingPreAFs.push({
       callback: callback,
-      context: opt_this || window,
+      context: opt_this || global,
       stack: getStack_()});
     scheduleRAF();
   }
@@ -162,7 +167,7 @@
     }
     currentRAFDispatchList.push({
       callback: callback,
-      context: opt_this || window,
+      context: opt_this || global,
       stack: getStack_()});
     return;
   }
@@ -170,7 +175,7 @@
   function requestAnimationFrame(callback, opt_this) {
     pendingRAFs.push({
       callback: callback,
-      context: opt_this || window,
+      context: opt_this || global,
       stack: getStack_()});
     scheduleRAF();
   }
@@ -178,7 +183,7 @@
   function requestIdleCallback(callback, opt_this) {
     pendingIdleCallbacks.push({
       callback: callback,
-      context: opt_this || window,
+      context: opt_this || global,
       stack: getStack_()});
     scheduleIdleWork();
   }
diff --git a/catapult/tracing/tracing/base/raf_test.html b/catapult/tracing/tracing/base/raf_test.html
index 7bcdaf3..289ab52 100644
--- a/catapult/tracing/tracing/base/raf_test.html
+++ b/catapult/tracing/tracing/base/raf_test.html
@@ -179,5 +179,69 @@
     });
   });
 
+  function withFixedIdleTimeRemaining(idleTime, func) {
+    var oldRIC = window.requestIdleCallback;
+    try {
+      var pendingIdleCallbacks = [];
+      window.requestIdleCallback = function(callback) {
+        var deadline = {
+          timeRemaining: function() {
+            return idleTime;
+          }
+        };
+        pendingIdleCallbacks.push(function() {
+          callback(deadline, false /* didTimeout */);
+        });
+      };
+      func(pendingIdleCallbacks);
+    } finally {
+      window.requestIdleCallback = oldRIC;
+    }
+  }
+
+  test('idleCallbackWithIdletime', function() {
+    withFixedIdleTimeRemaining(1000, function(pendingIdleCallbacks) {
+      var idle1Ran = false;
+      var idle2Ran = false;
+      tr.b.requestIdleCallback(function() {
+        idle1Ran = true;
+      });
+      tr.b.requestIdleCallback(function() {
+        idle2Ran = true;
+      });
+      assert.lengthOf(pendingIdleCallbacks, 1);
+      pendingIdleCallbacks.shift()();
+
+      // Both callbacks should have run since there was idle time.
+      assert.isTrue(idle1Ran);
+      assert.isTrue(idle2Ran);
+    });
+  });
+
+  test('idleCallbackWithoutIdletime', function() {
+    withFixedIdleTimeRemaining(0, function(pendingIdleCallbacks) {
+      var idle1Ran = false;
+      var idle2Ran = false;
+      tr.b.requestIdleCallback(function() {
+        idle1Ran = true;
+      });
+      tr.b.requestIdleCallback(function() {
+        idle2Ran = true;
+      });
+      assert.lengthOf(pendingIdleCallbacks, 1);
+      pendingIdleCallbacks.shift()();
+
+      // Only the first idle callback should have run since there was no idle
+      // time left.
+      assert.isTrue(idle1Ran);
+      assert.isFalse(idle2Ran);
+
+      // Run the remaining idle task.
+      assert.lengthOf(pendingIdleCallbacks, 1);
+      pendingIdleCallbacks.shift()();
+      assert.isTrue(idle2Ran);
+    });
+  });
+
 });
 </script>
diff --git a/catapult/tracing/tracing/base/rect.html b/catapult/tracing/tracing/base/rect.html
index 817c5c0..75fcadf 100644
--- a/catapult/tracing/tracing/base/rect.html
+++ b/catapult/tracing/tracing/base/rect.html
@@ -9,10 +9,6 @@
 <script>
 'use strict';
 
-/**
- * @fileoverview 2D Rectangle math.
- */
-
 tr.exportTo('tr.b', function() {
 
   /**
diff --git a/catapult/tracing/tracing/base/statistics.html b/catapult/tracing/tracing/base/statistics.html
index 874b095..38e8624 100644
--- a/catapult/tracing/tracing/base/statistics.html
+++ b/catapult/tracing/tracing/base/statistics.html
@@ -22,7 +22,7 @@
     if (denominator === 0)
       return 0;
     return numerator / denominator;
-  }
+  };
 
   Statistics.sum = function(ary, opt_func, opt_this) {
     var func = opt_func || identity;
@@ -44,8 +44,10 @@
     var denominator = 0;
 
     for (var i = 0; i < ary.length; i++) {
-      var weight = weightCallback.call(opt_this, ary[i], i);
       var value = valueCallback.call(opt_this, ary[i], i);
+      if (value === undefined)
+        continue;
+      var weight = weightCallback.call(opt_this, ary[i], i, value);
       numerator += weight * value;
       denominator += weight;
     }
@@ -96,7 +98,7 @@
     for (var i = 0; i < ary.length; i++)
       ret.addValue(func.call(opt_this, ary[i], i));
     return ret;
-  }
+  };
 
   Statistics.percentile = function(ary, percent, opt_func, opt_this) {
     if (!(percent >= 0 && percent <= 1))
@@ -116,7 +118,7 @@
     opt_low = opt_low || 0.0;
     opt_high = opt_high || 1.0;
     return Math.min(Math.max(value, opt_low), opt_high);
-  }
+  };
 
   /**
    * Sorts the samples, and map them linearly to the range [0,1].
@@ -164,7 +166,7 @@
       normalized_samples: samples,
       scale: scale
     };
-  }
+  };
 
   /**
    * Computes the discrepancy of a set of 1D samples from the interval [0,1].
@@ -445,7 +447,7 @@
         samplesA[i] = samplesB[i];
       }
     }
-  }
+  };
 
   return {
     Statistics: Statistics
diff --git a/catapult/tracing/tracing/base/task.html b/catapult/tracing/tracing/base/task.html
index bd876cb..54f8e85 100644
--- a/catapult/tracing/tracing/base/task.html
+++ b/catapult/tracing/tracing/base/task.html
@@ -5,11 +5,13 @@
 found in the LICENSE file.
 -->
 <link rel="import" href="/tracing/base/raf.html">
+<link rel="import" href="/tracing/base/timing.html">
 
 <script>
 'use strict';
 
 tr.exportTo('tr.b', function() {
+  var Timing = tr.b.Timing;
   /**
    * A task is a combination of a run callback, a set of subtasks, and an after
    * task.
@@ -46,6 +48,10 @@
   }
 
   Task.prototype = {
+    get name() {
+      return this.runCb_.name;
+    },
+
     /*
      * See constructor documentation on semantics of subtasks.
      */
@@ -94,6 +100,42 @@
     },
 
     /*
+     * See constructor documentation on semantics of after tasks.
+     * Note: timedAfter doesn't work when a task throws an exception.
+     * This is because task system doesn't support catching currently.
+     * At the time of writing, this is considered to be an acceptable tradeoff.
+     */
+    timedAfter: function(groupName, cb, thisArg, opt_args) {
+      if (cb.name === '')
+        throw new Error('Anonymous Task is not allowed');
+      return this.namedTimedAfter(groupName, cb.name, cb, thisArg, opt_args);
+    },
+
+    /*
+     * See constructor documentation on semantics of after tasks.
+     * Note: namedTimedAfter doesn't work when a task throws an exception.
+     * This is because task system doesn't support catching currently.
+     * At the time of writing, this is considered to be an acceptable tradeoff.
+     */
+    namedTimedAfter: function(groupName, name, cb, thisArg, opt_args) {
+      if (this.afterTask_)
+        throw new Error('Has an after task already');
+      var realTask;
+      if (cb instanceof Task)
+        realTask = cb;
+      else
+        realTask = new Task(cb, thisArg);
+      this.afterTask_ = new Task(function(task) {
+        var markedTask = Timing.mark(groupName, name, opt_args);
+        task.subTask(realTask, thisArg);
+        task.subTask(function() {
+          markedTask.end();
+        }, thisArg);
+      }, thisArg);
+      return this.afterTask_;
+    },
+
+    /*
      * Adds a task after the chain of tasks.
      */
     enqueue: function(cb, thisArg) {
diff --git a/catapult/tracing/tracing/base/task_test.html b/catapult/tracing/tracing/base/task_test.html
index 720be37..59a6f69 100644
--- a/catapult/tracing/tracing/base/task_test.html
+++ b/catapult/tracing/tracing/base/task_test.html
@@ -76,5 +76,96 @@
     });
   });
 
+  test('timedAfter', function() {
+    if (tr.isHeadless)
+      return;
+
+    var results = [];
+
+    var startingTask = new Task(function(task) {
+      results.push('a');
+    }, this);
+    startingTask.timedAfter('Test', function pushB() {
+      results.push('b');
+    }, this).after(function() {
+      results.push('c');
+    }, this);
+
+    Task.RunSynchronously(startingTask);
+    assert.deepEqual(results, ['a', 'b', 'c']);
+    var result = window.performance.getEntriesByName('Test:pushB');
+    var duration = parseFloat(result[0].duration);
+    assert.isAbove(duration, 0.0);
+  });
+
+  test('timedAfterWithSubTask', function() {
+    if (tr.isHeadless)
+      return;
+
+    var results = [];
+
+    var startingTask = new Task(function(task) {
+      results.push('a');
+      task.subTask(function(task) {
+        results.push('a/1');
+      }, this);
+      task.subTask(function(task) {
+        results.push('a/2');
+      }, this);
+    }, this);
+    startingTask.timedAfter('Test', function pushB() {
+      results.push('b');
+    }, this).after(function() {
+      results.push('c');
+    }, this);
+
+    Task.RunSynchronously(startingTask);
+    assert.deepEqual(results, ['a', 'a/1', 'a/2', 'b', 'c']);
+    var result = window.performance.getEntriesByName('Test:pushB');
+    var duration = parseFloat(result[0].duration);
+    assert.isAbove(duration, 0.0);
+  });
+
+  test('timedAfterWithAnoymousTask', function() {
+    if (tr.isHeadless)
+      return;
+
+    var results = [];
+
+    var startingTask = new Task(function(task) {
+      results.push('a');
+    }, this);
+    assert.throw(function() {
+      startingTask.timedAfter('Test', function() {
+        results.push('b');
+      }, this);
+    }, Error, 'Anonymous Task is not allowed');
+
+    Task.RunSynchronously(startingTask);
+    assert.deepEqual(results, ['a']);
+  });
+
+  test('namedTimedAfter', function() {
+    if (tr.isHeadless)
+      return;
+
+    var results = [];
+
+    var startingTask = new Task(function(task) {
+      results.push('a');
+    }, this);
+    startingTask.namedTimedAfter('Test', 'pushB', function() {
+      results.push('b');
+    }, this).after(function() {
+      results.push('c');
+    }, this);
+
+    Task.RunSynchronously(startingTask);
+    assert.deepEqual(results, ['a', 'b', 'c']);
+    var result = window.performance.getEntriesByName('Test:pushB');
+    var duration = parseFloat(result[0].duration);
+    assert.isAbove(duration, 0.0);
+  });
+
 });
 </script>
diff --git a/catapult/tracing/tracing/base/time_function.html b/catapult/tracing/tracing/base/time_function.html
deleted file mode 100644
index ab1e6f1..0000000
--- a/catapult/tracing/tracing/base/time_function.html
+++ /dev/null
@@ -1,65 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.b', function() {
-  function markTimeBegin(functionName) {
-    if (tr.isHeadless)
-      return;
-    window.performance.mark(functionName + '_Begin');
-  }
-
-  function markTimeEnd(functionName) {
-    if (tr.isHeadless)
-      return;
-    window.performance.mark(functionName + '_End');
-    window.performance.measure(functionName,
-                               functionName + '_Begin',
-                               functionName + '_End');
-  }
-
-  var timeFunction = function(callback) {
-    if (callback.name === '')
-      throw new Error('Anonymous function is not allowed');
-    timeNamedFunction(callback.name, callback);
-  };
-
-  var timeNamedFunction = function(functionName, callback) {
-    markTimeBegin(functionName);
-    try {
-      callback();
-    } finally {
-      markTimeEnd(functionName);
-    }
-  };
-
-  function TimedNamedPromise(name, executor) {
-    markTimeBegin(name);
-    var promise = new Promise(executor);
-    promise.then(function(result) {
-      markTimeEnd(name);
-      return result;
-    }, function(e) {
-      markTimeEnd(name);
-      throw e;
-    });
-    return promise;
-  }
-
-  return {
-    timeFunction: timeFunction,
-    timeNamedFunction: timeNamedFunction,
-    markTimeBegin: markTimeBegin,
-    markTimeEnd: markTimeEnd,
-    TimedNamedPromise: TimedNamedPromise
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/time_function_test.html b/catapult/tracing/tracing/base/time_function_test.html
deleted file mode 100644
index b239ae9..0000000
--- a/catapult/tracing/tracing/base/time_function_test.html
+++ /dev/null
@@ -1,49 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/time_function.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('timeFunctionWithAnonymousFunction', function() {
-    assert.throw(function() {
-      tr.b.timeFunction(function() {});
-    }, Error, 'Anonymous function is not allowed');
-  });
-
-  test('timeFunction', function(done) {
-    if (tr.isHeadless)
-      return;
-    tr.b.timeFunction(function addTo1000() {
-      var x = 0;
-      for (var i = 0; i <= 1000; ++i)
-        x += i;
-      assert.equal(x, 500500);
-    });
-    var result = window.performance.getEntriesByName('addTo1000');
-    var duration = parseFloat(result[0].duration);
-    assert.isAbove(duration, 0.0);
-  });
-
-  test('timeNamedFunction', function() {
-    if (tr.isHeadless)
-      return;
-    tr.b.timeNamedFunction('addTo100', function() {
-      var x = 0;
-      for (var i = 0; i <= 100; ++i)
-        x += i;
-      assert.equal(x, 5050);
-    });
-    var result = window.performance.getEntriesByName('addTo100');
-    var duration = parseFloat(result[0].duration);
-    assert.isAbove(duration, 0.0);
-  });
-
-
-});
-</script>
diff --git a/catapult/tracing/tracing/base/timing.html b/catapult/tracing/tracing/base/timing.html
new file mode 100644
index 0000000..5a6ee39
--- /dev/null
+++ b/catapult/tracing/tracing/base/timing.html
@@ -0,0 +1,97 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/base64.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.b', function() {
+  var Base64 = tr.b.Base64;
+
+  function computeUserTimingMarkName(groupName, functionName, opt_args) {
+    if (groupName === undefined)
+      throw new Error('getMeasureString should have group name');
+    if (functionName === undefined)
+      throw new Error('getMeasureString should have function name');
+    var userTimingMarkName = groupName + ':' + functionName;
+    if (opt_args !== undefined) {
+      userTimingMarkName += '/';
+      userTimingMarkName += Base64.btoa(JSON.stringify(opt_args));
+    }
+    return userTimingMarkName;
+  }
+
+  function Timing() {
+  }
+
+  Timing.nextMarkNumber = 0;
+
+  Timing.mark = function(groupName, functionName, opt_args) {
+    if (tr.isHeadless) {
+      return {
+        end: function() {}
+      };
+    }
+    var userTimingMarkName = computeUserTimingMarkName(
+      groupName, functionName, opt_args);
+    var markBeginName = 'tvcm.mark' + Timing.nextMarkNumber++;
+    var markEndName = 'tvcm.mark' + Timing.nextMarkNumber++;
+    window.performance.mark(markBeginName);
+    return {
+      end: function() {
+        window.performance.mark(markEndName);
+        window.performance.measure(userTimingMarkName,
+                                   markBeginName,
+                                   markEndName);
+      }
+    };
+  };
+
+  Timing.wrap = function(groupName, callback, opt_args) {
+    if (groupName === undefined)
+      throw new Error('Timing.wrap should have group name');
+    if (callback.name === '')
+      throw new Error('Anonymous function is not allowed');
+    return Timing.wrapNamedFunction(
+      groupName, callback.name, callback, opt_args);
+  };
+
+  Timing.wrapNamedFunction = function(groupName, functionName, callback,
+                                      opt_args) {
+    function timedNamedFunction() {
+      var markedTime = Timing.mark(groupName, functionName, opt_args);
+      try {
+        callback.apply(this, arguments);
+      } finally {
+        markedTime.end();
+      }
+    }
+    return timedNamedFunction;
+  };
+
+  function TimedNamedPromise(groupName, name, executor, opt_args) {
+    var markedTime = Timing.mark(groupName, name, opt_args);
+    var promise = new Promise(executor);
+    promise.then(function(result) {
+      markedTime.end();
+      return result;
+    }, function(e) {
+      markedTime.end();
+      throw e;
+    });
+    return promise;
+  }
+
+  return {
+    _computeUserTimingMarkName: computeUserTimingMarkName, // export for testing
+    TimedNamedPromise: TimedNamedPromise,
+    Timing: Timing
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/base/timing_test.html b/catapult/tracing/tracing/base/timing_test.html
new file mode 100644
index 0000000..459bfa1
--- /dev/null
+++ b/catapult/tracing/tracing/base/timing_test.html
@@ -0,0 +1,85 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/timing.html">
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var Timing = tr.b.Timing;
+
+  test('computeUserTimingMarkName', function() {
+    if (tr.isHeadless)
+      return;
+    var name1 = tr.b._computeUserTimingMarkName('Test', 'test1', {'a': 1});
+    assert.equal(name1, 'Test:test1/eyJhIjoxfQ==');
+    var name2 = tr.b._computeUserTimingMarkName('Test', 'test2');
+    assert.equal(name2, 'Test:test2');
+    var name3 = tr.b._computeUserTimingMarkName(
+        'Test', 'test1', {'a': 1, 'b': {'c': 2}});
+    assert.equal(name3, 'Test:test1/eyJhIjoxLCJiIjp7ImMiOjJ9fQ==');
+  });
+
+  test('TimingMarkWithoutArgs', function() {
+    if (tr.isHeadless)
+      return;
+    var markedTime = Timing.mark('Test', 'TimingMarkWithoutArgs');
+    markedTime.end();
+    var result = window.performance.getEntriesByName(
+        'Test:TimingMarkWithoutArgs');
+    var duration = parseFloat(result[0].duration);
+    assert.isTrue(duration >= 0.0);
+  });
+
+  test('TimingMarkWithArgs', function() {
+    if (tr.isHeadless)
+      return;
+    var markedTime = Timing.mark('Test', 'TimingMarkWithoutArgs', {'a': 1});
+    markedTime.end();
+    var result = window.performance.getEntriesByName(
+        'Test:TimingMarkWithoutArgs/eyJhIjoxfQ==');
+    var duration = parseFloat(result[0].duration);
+    assert.isTrue(duration >= 0.0);
+  });
+
+  test('TimingWrapWithAnonymousFunction', function() {
+    assert.throw(function() {
+      Timing.wrap('Test', function() {})();
+    }, Error, 'Anonymous function is not allowed');
+  });
+
+  test('TimingWrap', function(done) {
+    if (tr.isHeadless)
+      return;
+    Timing.wrap('Test', function addTo1000() {
+      var x = 0;
+      for (var i = 0; i <= 1000; ++i)
+        x += i;
+      assert.equal(x, 500500);
+    })();
+    var result = window.performance.getEntriesByName('Test:addTo1000');
+    var duration = parseFloat(result[0].duration);
+    assert.isTrue(duration >= 0.0);
+  });
+
+  test('TimingWrapNamedFunction', function() {
+    if (tr.isHeadless)
+      return;
+    Timing.wrapNamedFunction('Test', 'addTo100', function() {
+      var x = 0;
+      for (var i = 0; i <= 100; ++i)
+        x += i;
+      assert.equal(x, 5050);
+    })();
+    var result = window.performance.getEntriesByName('Test:addTo100');
+    var duration = parseFloat(result[0].duration);
+    assert.isTrue(duration >= 0.0);
+  });
+
+
+});
+</script>
diff --git a/catapult/tracing/tracing/base/units/generic_table.html b/catapult/tracing/tracing/base/units/generic_table.html
deleted file mode 100644
index 7f9bae5..0000000
--- a/catapult/tracing/tracing/base/units/generic_table.html
+++ /dev/null
@@ -1,29 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  /**
-   * Tabular data wrapper. Simply wraps an array of items.
-   */
-  function GenericTable(items) {
-    if (items !== undefined)
-      this.items = items;
-    else
-      this.items = [];
-  };
-
-  GenericTable.prototype = {
-  };
-
-  return {
-    GenericTable: GenericTable
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/histogram.html b/catapult/tracing/tracing/base/units/histogram.html
deleted file mode 100644
index a553986..0000000
--- a/catapult/tracing/tracing/base/units/histogram.html
+++ /dev/null
@@ -1,278 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/base/range.html">
-<link rel="import" href="/tracing/base/statistics.html">
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  var Range = tr.b.Range;
-
-  var MAX_SOURCE_INFOS = 16;
-
-  function HistogramBin(parentHistogram, opt_range) {
-    this.parentHistogram = parentHistogram;
-    this.range = opt_range || (new tr.b.Range());
-    this.count = 0;
-    this.sourceInfos = [];
-  }
-
-  HistogramBin.fromDict = function(parentHistogram, d) {
-    var h = new HistogramBin(parentHistogram);
-    h.range.min = d.min;
-    h.range.max = d.max;
-    h.count = d.count;
-    h.sourceInfos = d.sourceInfos;
-    return h;
-  }
-
-  HistogramBin.prototype = {
-    add: function(value, sourceInfo) {
-      this.count += 1;
-      tr.b.Statistics.uniformlySampleStream(this.sourceInfos, this.count,
-          sourceInfo, MAX_SOURCE_INFOS);
-    },
-
-    addBin: function(other) {
-      if (!this.range.equals(other.range))
-        throw new Error('Merging incompatible histogram bins.');
-      tr.b.Statistics.mergeSampledStreams(this.sourceInfos, this.count,
-          other.sourceInfos, other.count, MAX_SOURCE_INFOS);
-      this.count += other.count;
-    },
-
-    asDict: function() {
-      return {
-        min: this.range.min,
-        max: this.range.max,
-        count: this.count,
-        sourceInfos: this.sourceInfos.slice(0)
-      };
-    },
-
-    asJSON: function() {
-      return this.asDict();
-    }
-  };
-
-  function Histogram(unit, range, binInfo) {
-    this.range = range;
-    this.unit = unit;
-
-    this.numNans = 0;
-    this.nanSourceInfos = [];
-
-    this.runningSum = 0;
-    this.maxCount_ = 0;
-
-    this.underflowBin = binInfo.underflowBin;
-    this.centralBins = binInfo.centralBins;
-    this.centralBinWidth = binInfo.centralBinWidth;
-    this.overflowBin = binInfo.overflowBin;
-
-    this.allBins = [];
-    this.allBins.push(this.underflowBin);
-    this.allBins.push.apply(this.allBins, this.centralBins);
-    this.allBins.push(this.overflowBin);
-
-    this.allBins.forEach(function(bin) {
-      if (bin.count > this.maxCount_)
-        this.maxCount_ = bin.count;
-    }, this);
-  }
-
-  Histogram.fromDict = function(d) {
-    var range = Range.fromExplicitRange(d.min, d.max);
-    var binInfo = {};
-    binInfo.underflowBin = HistogramBin.fromDict(undefined, d.underflowBin);
-    binInfo.centralBins = d.centralBins.map(function(binAsDict) {
-      return HistogramBin.fromDict(undefined, binAsDict);
-    });
-    binInfo.centralBinWidth = d.centralBinWidth;
-    binInfo.overflowBin = HistogramBin.fromDict(undefined, d.overflowBin);
-    var h = new Histogram(tr.b.u.Units.fromJSON(d.unit), range, binInfo);
-    h.allBins.forEach(function(bin) {
-      bin.parentHistogram = h;
-    });
-    h.runningSum = d.runningSum;
-    h.numNans = d.numNans;
-    h.nanSourceInfos = d.nanSourceInfos;
-    return h;
-  }
-
-  Histogram.createLinear = function(unit, range, numBins) {
-    if (range.isEmpty)
-      throw new Error('Nope');
-
-    var binInfo = {};
-    binInfo.underflowBin = new HistogramBin(
-        this, Range.fromExplicitRange(-Number.MAX_VALUE, range.min));
-    binInfo.overflowBin = new HistogramBin(
-        this, Range.fromExplicitRange(range.max, Number.MAX_VALUE));
-    binInfo.centralBins = [];
-    binInfo.centralBinWidth = range.range / numBins;
-
-    for (var i = 0; i < numBins; i++) {
-      var lo = range.min + (binInfo.centralBinWidth * i);
-      var hi = lo + binInfo.centralBinWidth;
-      binInfo.centralBins.push(
-          new HistogramBin(undefined, Range.fromExplicitRange(lo, hi)));
-    }
-
-    var h = new Histogram(unit, range, binInfo);
-    h.allBins.forEach(function(bin) {
-      bin.parentHistogram = h;
-    });
-    return h;
-  },
-
-  Histogram.prototype = {
-    get numValues() {
-      return tr.b.Statistics.sum(this.allBins, function(e) {
-        return e.count;
-      });
-    },
-
-    get average() {
-      return this.runningSum / this.numValues;
-    },
-
-    get maxCount() {
-      return this.maxCount_;
-    },
-
-    getInterpolatedCountAt: function(value) {
-      var bin = this.getBinForValue(value);
-      var idx = this.centralBins.indexOf(bin);
-      if (idx < 0) {
-        // |value| is in either the underflowBin or the overflowBin.
-        // We can't interpolate between infinities.
-        return bin.count;
-      }
-
-      // |value| must fall between the centers of two bins.
-      // The bin whose center is less than |value| will be this:
-      var lesserBin = bin;
-
-      // The bin whose center is greater than |value| will be this:
-      var greaterBin = bin;
-
-      // One of those bins could be an under/overflow bin.
-      // Avoid dealing with Infinities by arbitrarily saying that center of the
-      // underflow bin is its range.max, and the center of the overflow bin is
-      // its range.min.
-      // The centers of bins in |this.centralBins| will default to their
-      // |range.center|.
-
-      var lesserBinCenter = undefined;
-      var greaterBinCenter = undefined;
-
-      if (value < greaterBin.range.center) {
-        if (idx > 0) {
-          lesserBin = this.centralBins[idx - 1];
-        } else {
-          lesserBin = this.underflowBin;
-          lesserBinCenter = lesserBin.range.max;
-        }
-      } else {
-        if (idx < (this.centralBins.length - 1)) {
-          greaterBin = this.centralBins[idx + 1];
-        } else {
-          greaterBin = this.overflowBin;
-          greaterBinCenter = greaterBin.range.min;
-        }
-      }
-
-      if (greaterBinCenter === undefined)
-        greaterBinCenter = greaterBin.range.center;
-
-      if (lesserBinCenter === undefined)
-        lesserBinCenter = lesserBin.range.center;
-
-      value = tr.b.normalize(value, lesserBinCenter, greaterBinCenter);
-
-      return tr.b.lerp(value, lesserBin.count, greaterBin.count);
-    },
-
-    getBinForValue: function(value) {
-      if (value < this.range.min)
-        return this.underflowBin;
-      if (value >= this.range.max)
-        return this.overflowBin;
-      var binIdx = Math.floor((value - this.range.min) / this.centralBinWidth);
-      return this.centralBins[binIdx];
-    },
-
-    add: function(value, sourceInfo) {
-      if (typeof(value) !== 'number' || isNaN(value)) {
-        this.numNans++;
-        tr.b.Statistics.uniformlySampleStream(this.nanSourceInfos, this.numNans,
-            sourceInfo, MAX_SOURCE_INFOS);
-        return;
-      }
-
-      var bin = this.getBinForValue(value);
-      bin.add(value, sourceInfo);
-      this.runningSum += value;
-      if (bin.count > this.maxCount_)
-        this.maxCount_ = bin.count;
-    },
-
-    addHistogram: function(other) {
-      if (!this.range.equals(other.range) ||
-          !this.unit === other.unit ||
-          this.allBins.length !== other.allBins.length) {
-        throw new Error('Merging incompatible histograms.');
-      }
-      tr.b.Statistics.mergeSampledStreams(this.nanSourceInfos, this.numNans,
-          other.nanSourceInfos, other.numNans, MAX_SOURCE_INFOS);
-      this.numNans += other.numNans;
-      this.runningSum += other.runningSum;
-      for (var i = 0; i < this.allBins.length; ++i) {
-        this.allBins[i].addBin(other.allBins[i]);
-      }
-    },
-
-    clone: function() {
-      return Histogram.fromDict(this.asDict());
-    },
-
-    asDict: function() {
-      var d = {
-        unit: this.unit.asJSON(),
-
-        min: this.range.min,
-        max: this.range.max,
-
-        numNans: this.numNans,
-        nanSourceInfos: this.nanSourceInfos,
-
-        runningSum: this.runningSum,
-
-        underflowBin: this.underflowBin.asDict(),
-        centralBins: this.centralBins.map(function(bin) {
-          return bin.asDict();
-        }),
-        centralBinWidth: this.centralBinWidth,
-        overflowBin: this.overflowBin.asDict()
-      };
-      return d;
-    },
-
-    asJSON: function() {
-      return this.asDict();
-    }
-  };
-
-  return {
-    HistogramBin: HistogramBin,
-    Histogram: Histogram
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/histogram_test.html b/catapult/tracing/tracing/base/units/histogram_test.html
deleted file mode 100644
index a836b78..0000000
--- a/catapult/tracing/tracing/base/units/histogram_test.html
+++ /dev/null
@@ -1,151 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/histogram.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('basic', function() {
-    var h = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    assert.equal(h.getBinForValue(250).range.min, 200);
-    assert.equal(h.getBinForValue(250).range.max, 300);
-    h.add(-1, 'a');
-    h.add(0, 'b');
-    h.add(0, 'c');
-    h.add(500, 'c');
-    h.add(999, 'd');
-    h.add(1000, 'd');
-    assert.equal(h.underflowBin.count, 1);
-
-    assert.equal(h.getBinForValue(0).count, 2);
-    assert.deepEqual(h.getBinForValue(0).sourceInfos,
-                     ['b', 'c']);
-
-    assert.equal(h.getBinForValue(500).count, 1);
-    assert.equal(h.getBinForValue(999).count, 1);
-
-    assert.equal(h.overflowBin.count, 1);
-    assert.equal(h.numValues, 6);
-    assert.closeTo(h.average, 416.3, 0.1);
-  });
-
-  test('nans', function() {
-    var h = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    h.add(undefined, 'b');
-    h.add(NaN, 'c');
-
-    assert.equal(h.numNans, 2);
-    assert.deepEqual(h.nanSourceInfos, ['b', 'c']);
-  });
-
-  test('addHistogramsValid', function() {
-    var h0 = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    var h1 = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    h0.add(-1, 'a0');
-    h0.add(0, 'b0');
-    h0.add(0, 'c0');
-    h0.add(500, 'c0');
-    h0.add(1000, 'd0');
-    h0.add(NaN, 'e0');
-
-    h1.add(-1, 'a1');
-    h1.add(0, 'b1');
-    h1.add(0, 'c1');
-    h1.add(999, 'd1');
-    h1.add(1000, 'd1');
-    h1.add(NaN, 'e1');
-
-    h0.addHistogram(h1);
-
-    assert.equal(h0.numNans, 2);
-    assert.deepEqual(h0.nanSourceInfos, ['e0', 'e1']);
-
-    assert.equal(h0.underflowBin.count, 2);
-    assert.deepEqual(h0.underflowBin.sourceInfos, ['a0', 'a1']);
-
-    assert.equal(h0.getBinForValue(0).count, 4);
-    assert.deepEqual(h0.getBinForValue(0).sourceInfos,
-        ['b0', 'c0', 'b1', 'c1']);
-
-    assert.equal(h0.getBinForValue(500).count, 1);
-    assert.deepEqual(h0.getBinForValue(500).sourceInfos, ['c0']);
-
-    assert.equal(h0.getBinForValue(999).count, 1);
-    assert.deepEqual(h0.getBinForValue(999).sourceInfos, ['d1']);
-
-    assert.equal(h0.overflowBin.count, 2);
-    assert.deepEqual(h0.overflowBin.sourceInfos, ['d0', 'd1']);
-
-    assert.equal(h0.numValues, 10);
-    assert.closeTo(h0.average, 349.7, 0.1);
-
-    assert.equal(2, h0.maxCount);
-    assert.equal(2, h1.maxCount);
-  });
-
-  test('addHistogramsInvalid', function() {
-    var h0 = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    var h1 = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1001),
-        10);
-    var h2 = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        11);
-
-    assert.throws(h0.addHistogram.bind(h0, h1), Error);
-    assert.throws(h0.addHistogram.bind(h0, h1), Error);
-  });
-
-  test('getInterpolateCountAt', function() {
-    var h = tr.b.u.Histogram.fromDict({
-      unit: 'unitless',
-      min: 0,
-      max: 100,
-      centralBinWidth: 10,
-      underflowBin: {min: -Number.MAX_VALUE, max: 0, count: 11},
-      centralBins: [
-        {min: 0, max: 10, count: 10},
-        {min: 10, max: 20, count: 9},
-        {min: 20, max: 30, count: 8},
-        {min: 30, max: 40, count: 7},
-        {min: 40, max: 50, count: 6},
-        {min: 50, max: 60, count: 5},
-        {min: 60, max: 70, count: 4},
-        {min: 70, max: 80, count: 3},
-        {min: 80, max: 90, count: 2},
-        {min: 90, max: 100, count: 1}
-      ],
-      overflowBin: {min: 100, max: Number.MAX_VALUE, count: 0}
-    });
-
-    assert.equal(11, h.maxCount);
-    assert.equal(11, h.getInterpolatedCountAt(-1));
-    assert.equal(0, h.getInterpolatedCountAt(101));
-    assert.closeTo(10.8, h.getInterpolatedCountAt(1), 1e-3);
-    assert.closeTo(9.5, h.getInterpolatedCountAt(10), 1e-3);
-    assert.closeTo(0.2, h.getInterpolatedCountAt(99), 1e-3);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/scalar.html b/catapult/tracing/tracing/base/units/scalar.html
deleted file mode 100644
index b187344..0000000
--- a/catapult/tracing/tracing/base/units/scalar.html
+++ /dev/null
@@ -1,32 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  /**
-   * Scalar wrapper, representing a scalar value and its unit.
-   */
-  function Scalar(value, unit) {
-    this.value = value;
-    this.unit = unit;
-  };
-
-  Scalar.prototype = {
-    toString: function() {
-      return this.unit.format(this.value);
-    }
-  };
-
-  return {
-    Scalar: Scalar
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_display_mode.html b/catapult/tracing/tracing/base/units/time_display_mode.html
deleted file mode 100644
index 4fd8bec..0000000
--- a/catapult/tracing/tracing/base/units/time_display_mode.html
+++ /dev/null
@@ -1,54 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/event_target.html">
-<link rel="import" href="/tracing/ui/base/deep_utils.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Time currentDisplayUnit
- */
-tr.exportTo('tr.b.u', function() {
-  var msDisplayMode = {
-    scale: 1e-3,
-    suffix: 'ms',
-    // Compares a < b with adjustments to precision errors.
-    roundedLess: function(a, b) {
-      return Math.round(a * 1000) < Math.round(b * 1000);
-    },
-    format: function(ts) {
-      return new Number(ts)
-          .toLocaleString(undefined, { minimumFractionDigits: 3 }) + ' ms';
-    }
-  };
-
-  var nsDisplayMode = {
-    scale: 1e-9,
-    suffix: 'ns',
-    // Compares a < b with adjustments to precision errors.
-    roundedLess: function(a, b) {
-      return Math.round(a * 1000000) < Math.round(b * 1000000);
-    },
-    format: function(ts) {
-      return new Number(ts * 1000000)
-          .toLocaleString(undefined, { maximumFractionDigits: 0 }) + ' ns';
-    }
-  };
-
-  var TimeDisplayModes = {
-    ns: nsDisplayMode,
-    ms: msDisplayMode
-  };
-
-  return {
-    TimeDisplayModes: TimeDisplayModes
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_display_mode_test.html b/catapult/tracing/tracing/base/units/time_display_mode_test.html
deleted file mode 100644
index 3523129..0000000
--- a/catapult/tracing/tracing/base/units/time_display_mode_test.html
+++ /dev/null
@@ -1,42 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('Time.ms.format', function() {
-    function local(v) {
-      return v.toLocaleString(undefined, { minimumFractionDigits: 3}) + ' ms';
-    }
-
-    var unit = tr.b.u.TimeDisplayModes.ms;
-    assert.equal(unit.format(1), local(1));
-    assert.equal(unit.format(1.001), local(1.001));
-    assert.equal(unit.format(1.0005), local(1.001));
-    assert.equal(unit.format(1.0004), local(1));
-    assert.equal(unit.format(0.999), local(0.999));
-    assert.equal(unit.format(0.9995), local(1));
-  });
-
-  test('Time.ns.format', function() {
-    function local(v) {
-      return v.toLocaleString(undefined, { maximumFractionDigits: 0}) + ' ns';
-    }
-
-    var unit = tr.b.u.TimeDisplayModes.ns;
-    assert.equal(unit.format(1), local(1000000));
-    assert.equal(unit.format(0.001), local(1000));
-    assert.equal(unit.format(0.000001), local(1));
-    assert.equal(unit.format(0.0000005), local(1));
-    assert.equal(unit.format(0.0000004), local(0));
-    assert.equal(unit.format(0.0000015), local(2));
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_duration.html b/catapult/tracing/tracing/base/units/time_duration.html
deleted file mode 100644
index 5db2f82..0000000
--- a/catapult/tracing/tracing/base/units/time_duration.html
+++ /dev/null
@@ -1,38 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/scalar.html">
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  /**
-   * Float wrapper, representing a time duration, capable of pretty-printing.
-   */
-  function TimeDuration(duration) {
-    tr.b.u.Scalar.call(this, duration, tr.b.u.Units.timeDurationInMs);
-  };
-
-  TimeDuration.prototype = {
-    __proto__: tr.b.u.Scalar.prototype,
-
-    get duration() {
-      return this.value;
-    }
-  };
-
-  TimeDuration.format = function(duration) {
-    return tr.b.u.Units.timeDurationInMs.format(duration);
-  };
-
-  return {
-    TimeDuration: TimeDuration
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_duration_test.html b/catapult/tracing/tracing/base/units/time_duration_test.html
deleted file mode 100644
index c257c74..0000000
--- a/catapult/tracing/tracing/base/units/time_duration_test.html
+++ /dev/null
@@ -1,55 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_duration.html">
-<link rel="import" href="/tracing/base/units/units.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var Units = tr.b.u.Units;
-  var TimeDuration = tr.b.u.TimeDuration;
-
-  function checkFormat(timestamp, expectedString) {
-    assert.equal(TimeDuration.format(timestamp), expectedString);
-    assert.equal(new TimeDuration(timestamp).toString(), expectedString);
-  }
-
-  test('format', function() {
-    try {
-      // Use milliseconds to display time (default behavior).
-      Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ms;
-
-      checkFormat(0, '0.000 ms');
-      checkFormat(0.02, '0.020 ms');
-      checkFormat(0.001, '0.001 ms');
-      checkFormat(0.0005, '0.001 ms');
-      checkFormat(0.00049, '0.000 ms');
-      checkFormat(999.999, '999.999 ms');
-      checkFormat(1000.001, '1,000.001 ms');
-      checkFormat(123456789, '123,456,789.000 ms');
-      checkFormat(-0.00051, '-0.001 ms');
-      checkFormat(-123456789, '-123,456,789.000 ms');
-
-      // Change the unit to nanoseconds.
-      Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ns;
-
-      checkFormat(0, '0 ns');
-      checkFormat(1, '1,000,000 ns');
-      checkFormat(0.000042, '42 ns');
-      checkFormat(0.000001, '1 ns');
-      checkFormat(0.0000005, '1 ns');
-      checkFormat(0.00000049, '0 ns');
-      checkFormat(123.456, '123,456,000 ns');
-      checkFormat(-0.07, '-70,000 ns');
-    } finally {
-      Units.reset();
-    }
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_stamp.html b/catapult/tracing/tracing/base/units/time_stamp.html
deleted file mode 100644
index 27730ac..0000000
--- a/catapult/tracing/tracing/base/units/time_stamp.html
+++ /dev/null
@@ -1,38 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/scalar.html">
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  /**
-   * Float wrapper, representing a time stamp, capable of pretty-printing.
-   */
-  function TimeStamp(timestamp) {
-    tr.b.u.Scalar.call(this, timestamp, tr.b.u.Units.timeStampInMs);
-  };
-
-  TimeStamp.prototype = {
-    __proto__: tr.b.u.Scalar.prototype,
-
-    get timestamp() {
-      return this.value;
-    }
-  };
-
-  TimeStamp.format = function(timestamp) {
-    return tr.b.u.Units.timeStampInMs.format(timestamp);
-  };
-
-  return {
-    TimeStamp: TimeStamp
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/time_stamp_test.html b/catapult/tracing/tracing/base/units/time_stamp_test.html
deleted file mode 100644
index 9f5035a..0000000
--- a/catapult/tracing/tracing/base/units/time_stamp_test.html
+++ /dev/null
@@ -1,55 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var Units = tr.b.u.Units;
-  var TimeStamp = tr.b.u.TimeStamp;
-
-  function checkFormat(timestamp, expectedString) {
-    assert.equal(TimeStamp.format(timestamp), expectedString);
-    assert.equal(new TimeStamp(timestamp).toString(), expectedString);
-  }
-
-  test('format', function() {
-    try {
-      // Use milliseconds to display time (default behavior).
-      Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ms;
-
-      checkFormat(0, '0.000 ms');
-      checkFormat(0.02, '0.020 ms');
-      checkFormat(0.001, '0.001 ms');
-      checkFormat(0.0005, '0.001 ms');
-      checkFormat(0.00049, '0.000 ms');
-      checkFormat(999.999, '999.999 ms');
-      checkFormat(1000.001, '1,000.001 ms');
-      checkFormat(123456789, '123,456,789.000 ms');
-      checkFormat(-0.00051, '-0.001 ms');
-      checkFormat(-123456789, '-123,456,789.000 ms');
-
-      // Change the unit to nanoseconds.
-      Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ns;
-
-      checkFormat(0, '0 ns');
-      checkFormat(1, '1,000,000 ns');
-      checkFormat(0.000042, '42 ns');
-      checkFormat(0.000001, '1 ns');
-      checkFormat(0.0000005, '1 ns');
-      checkFormat(0.00000049, '0 ns');
-      checkFormat(123.456, '123,456,000 ns');
-      checkFormat(-0.07, '-70,000 ns');
-    } finally {
-      Units.reset();
-    }
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/units.html b/catapult/tracing/tracing/base/units/units.html
deleted file mode 100644
index 36865b0..0000000
--- a/catapult/tracing/tracing/base/units/units.html
+++ /dev/null
@@ -1,160 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/event.html">
-<link rel="import" href="/tracing/base/event_target.html">
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.b.u', function() {
-  var TimeDisplayModes = tr.b.u.TimeDisplayModes;
-
-  function max(a, b) {
-    if (a === undefined)
-      return b;
-    if (b === undefined)
-      return a;
-    return a.scale > b.scale ? a : b;
-  }
-
-  var Units = {
-    reset: function() {
-      this.currentTimeDisplayMode = TimeDisplayModes.ms;
-    },
-
-    timestampFromUs: function(us) {
-      return us / 1000;
-    },
-
-    maybeTimestampFromUs: function(us) {
-      return us === undefined ? undefined : us / 1000;
-    },
-
-    get currentTimeDisplayMode() {
-      return this.currentTimeDisplayMode_;
-    },
-
-    // Use tr-ui-u-preferred-display-unit element instead of directly setting.
-    set currentTimeDisplayMode(value) {
-      if (this.currentTimeDisplayMode_ == value)
-        return;
-
-      this.currentTimeDisplayMode_ = value;
-      this.dispatchEvent(new tr.b.Event('display-mode-changed'));
-    },
-
-    didPreferredTimeDisplayUnitChange: function() {
-      var largest = undefined;
-      var els = tr.b.findDeepElementsMatching(document.body,
-          'tr-ui-u-preferred-display-unit');
-      els.forEach(function(el) {
-        largest = max(largest, el.preferredTimeDisplayMode);
-      });
-
-      this.currentDisplayUnit = largest === undefined ?
-          TimeDisplayModes.ms : largest;
-    },
-
-    unitsByJSONName: {},
-
-    fromJSON: function(object) {
-      var u = this.unitsByJSONName[object];
-      if (u) {
-        return u;
-      }
-      throw new Error('Unrecognized unit');
-    }
-  };
-
-  tr.b.EventTarget.decorate(Units);
-  Units.reset();
-
-  // Known display units follow.
-  //////////////////////////////////////////////////////////////////////////////
-  Units.timeDurationInMs = {
-    asJSON: function() { return 'ms'; },
-    format: function(value) {
-      return Units.currentTimeDisplayMode_.format(value);
-    }
-  };
-  Units.unitsByJSONName['ms'] = Units.timeDurationInMs;
-
-  Units.timeStampInMs = {
-    asJSON: function() { return 'tsMs'; },
-    format: function(value) {
-      return Units.currentTimeDisplayMode_.format(value);
-    }
-  };
-  Units.unitsByJSONName['tsMs'] = Units.timeStampInMs;
-
-  Units.normalizedPercentage = {
-    asJSON: function() { return 'n%'; },
-    format: function(value) {
-      var tmp = new Number(Math.round(value * 100000) / 1000);
-      return tmp.toLocaleString(undefined, {minimumFractionDigits: 3}) + '%';
-    }
-  };
-  Units.unitsByJSONName['n%'] = Units.normalizedPercentage;
-
-  var SIZE_UNIT_PREFIXES = ['', 'Ki', 'Mi', 'Gi', 'Ti'];
-  Units.sizeInBytes = {
-    asJSON: function() { return 'sizeInBytes'; },
-    format: function(value) {
-      var signPrefix = '';
-      if (value < 0) {
-        signPrefix = '-';
-        value = -value;
-      }
-
-      var i = 0;
-      while (value >= 1024 && i < SIZE_UNIT_PREFIXES.length - 1) {
-        value /= 1024;
-        i++;
-      }
-
-      return signPrefix + value.toFixed(1) + ' ' + SIZE_UNIT_PREFIXES[i] + 'B';
-    }
-  };
-  Units.unitsByJSONName['sizeInBytes'] = Units.sizeInBytes;
-
-  Units.energyInJoules = {
-    asJSON: function() { return 'J'; },
-    format: function(value) {
-      return value
-          .toLocaleString(undefined, { minimumFractionDigits: 3 }) + ' J';
-    }
-  };
-  Units.unitsByJSONName['J'] = Units.energyInJoules;
-
-  Units.powerInWatts = {
-    asJSON: function() { return 'W'; },
-    format: function(value) {
-      return (value * 1000.0)
-          .toLocaleString(undefined, { minimumFractionDigits: 3 }) + ' mW';
-    }
-  };
-  Units.unitsByJSONName['W'] = Units.powerInWatts;
-
-  Units.unitlessNumber = {
-    asJSON: function() { return 'unitless'; },
-    format: function(value) {
-      return value.toLocaleString(
-          undefined, {
-            minimumFractionDigits: 3,
-            maximumFractionDigits: 3});
-    }
-  };
-  Units.unitsByJSONName['unitless'] = Units.unitlessNumber;
-
-  return {
-    Units: Units
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/base/units/units_test.html b/catapult/tracing/tracing/base/units/units_test.html
deleted file mode 100644
index cb39a3c..0000000
--- a/catapult/tracing/tracing/base/units/units_test.html
+++ /dev/null
@@ -1,67 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/units.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var Units = tr.b.u.Units;
-
-  test('Units.display-mode-changed', function() {
-    var Units = tr.b.u.Units;
-    var TimeDisplayModes = tr.b.u.TimeDisplayModes;
-
-    var listenerWasCalled = false;
-    function listener(e) {
-      listenerWasCalled = true;
-    }
-
-    try {
-      Units.currentTimeDisplayMode = TimeDisplayModes.ms;
-      Units.addEventListener('display-mode-changed', listener);
-
-      listenerWasCalled = false;
-      Units.currentTimeDisplayMode = TimeDisplayModes.ns;
-      assert.isTrue(listenerWasCalled);
-      assert.equal(Units.currentTimeDisplayMode, TimeDisplayModes.ns);
-    } finally {
-      Units.removeEventListener('display-mode-changed', listener);
-      Units.reset();
-    }
-  });
-
-  test('sizeInBytes', function() {
-    function checkFormat(value, expectation) {
-      assert.equal(Units.sizeInBytes.format(value), expectation);
-    }
-    checkFormat(0, '0.0 B');
-    checkFormat(1, '1.0 B');
-    checkFormat(1536, '1.5 KiB');
-    checkFormat(424.2 * 1024 * 1024, '424.2 MiB');
-    checkFormat(5 * 1024 * 1024 * 1024, '5.0 GiB');
-    checkFormat(1025 * 1024 * 1024 * 1024 * 1024, '1025.0 TiB');
-    checkFormat(-2.5 * 1024 * 1024, '-2.5 MiB');
-  });
-
-  test('energyInJoules', function() {
-    assert.equal(Units.energyInJoules.format(1000), '1,000.000 J');
-    assert.equal(Units.energyInJoules.format(1), '1.000 J');
-    assert.equal(Units.energyInJoules.format(.005), '0.005 J');
-    assert.equal(Units.energyInJoules.format(.0005), '0.001 J');
-    assert.equal(Units.energyInJoules.format(.0004), '0.000 J');
-  });
-
-  test('powerInWatts', function() {
-    assert.equal(Units.powerInWatts.format(1000), '1,000,000.000 mW');
-    assert.equal(Units.powerInWatts.format(1), '1,000.000 mW');
-    assert.equal(Units.powerInWatts.format(.001), '1.000 mW');
-    assert.equal(Units.powerInWatts.format(.001005), '1.005 mW');
-    assert.equal(Units.powerInWatts.format(.0010005), '1.001 mW');
-    assert.equal(Units.powerInWatts.format(.0010004), '1.000 mW');
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/base/unittest.html b/catapult/tracing/tracing/base/unittest.html
index f88e467..8602f1e 100644
--- a/catapult/tracing/tracing/base/unittest.html
+++ b/catapult/tracing/tracing/base/unittest.html
@@ -12,7 +12,21 @@
   /**
    * Alias chai assert to the global assert.
    */
-  global.assert = chai.assert;
+  if (tr.isNode) {
+    // In node, chai.js knows to act as a node module, whereas our HTML
+    // imports code expects chai to end up in the global scope. So, in Node,
+    // copy the chai exports into global.
+    var chaiAbsPath = HTMLImportsLoader.hrefToAbsolutePath(
+        '/chai/chai.js');
+    var chaiModule = require(chaiAbsPath);
+    for (var exportName in chaiModule)
+      global[exportName] = chaiModule[exportName];
+  } else {
+    /**
+     * Catapult presubmit wanted me to put a jsdoc here. So nduca did.
+     */
+    global.assert = chai.assert;
+  }
 </script>
 
 <link rel="import" href="/tracing/base/unittest/suite_loader.html">
diff --git a/catapult/tracing/tracing/base/unittest/html_test_results.html b/catapult/tracing/tracing/base/unittest/html_test_results.html
index 1e96e6b..fa65a9e 100644
--- a/catapult/tracing/tracing/base/unittest/html_test_results.html
+++ b/catapult/tracing/tracing/base/unittest/html_test_results.html
@@ -18,22 +18,24 @@
     flex: 0 0 auto;
   }
 
-  x-tr-b-unittest-test-results > x-html-test-case-result.dark {
+  x-tr-b-unittest-test-results > x-html-test-case-result.dark > #summary {
     background-color: #eee;
   }
 
   x-html-test-case-result {
     display: block;
   }
-  x-html-test-case-result > #title,
-  x-html-test-case-result > #status,
+  x-html-test-case-result > #summary > #title,
+  x-html-test-case-result > #summary > #status,
   x-html-test-case-result > #details > x-html-test-case-error > #message,
   x-html-test-case-result > #details > x-html-test-case-error > #stack,
-  x-html-test-case-result > #details > x-html-test-case-error > #return-value {
+  x-html-test-case-result > #details > x-html-test-case-error > #return-value,
+  x-html-test-case-result > #details > x-html-test-case-flaky > #message {
     -webkit-user-select: auto;
   }
 
-  x-html-test-case-result > #details > x-html-test-case-error {
+  x-html-test-case-result > #details > x-html-test-case-error,
+  x-html-test-case-result > #details > x-html-test-case-flaky {
     display: block;
     border: 1px solid grey;
     border-radius: 5px;
@@ -42,7 +44,8 @@
   }
 
   x-html-test-case-result > #details > x-html-test-case-error > #message,
-  x-html-test-case-result > #details > x-html-test-case-error > #stack {
+  x-html-test-case-result > #details > x-html-test-case-error > #stack,
+  x-html-test-case-result > #details > x-html-test-case-flaky > #message {
     white-space: pre;
   }
 
@@ -67,6 +70,10 @@
     font-weight: bold;
   }
 
+  .unittest-flaky {
+    color: darkorange;
+  }
+
   .unittest-exception {
     color: red;
     font-weight: bold;
@@ -79,9 +86,11 @@
   }
 </style>
 <template id="x-html-test-case-result-template">
-  <span id="title"></span>&nbsp;
-  <span id="status"></span>&nbsp;
-  <span id="return-value"></span>
+  <div id="summary">
+    <span id="title"></span>&nbsp;
+    <span id="status"></span>&nbsp;
+    <span id="return-value"></span>
+  </div>
   <div id="details"></div>
 </template>
 
@@ -89,6 +98,10 @@
   <div id="stack"></div>
 </template>
 
+<template id="x-html-test-case-flaky-template">
+  <div id="message"></div>
+</template>
+
 <script>
 'use strict';
 tr.exportTo('tr.b.unittest', function() {
@@ -167,14 +180,19 @@
       this.updateColorAndStatus_();
     },
 
+    addFlaky: function() {
+      var flakyEl = document.createElement('x-html-test-case-flaky');
+      flakyEl.appendChild(tr.ui.b.instantiateTemplate(
+          '#x-html-test-case-flaky-template', THIS_DOC));
+      flakyEl.querySelector('#message').textContent = 'FLAKY';
+      this.querySelector('#details').appendChild(flakyEl);
+      this.updateColorAndStatus_();
+    },
+
     addHTMLOutput: function(element) {
       var htmlResultEl = document.createElement('x-html-test-case-html-result');
       htmlResultEl.appendChild(element);
       this.querySelector('#details').appendChild(htmlResultEl);
-
-      var bounds = element.getBoundingClientRect();
-      assert(bounds.width !== 0, 'addHTMLOutput element as 0 width');
-      assert(bounds.height !== 0, 'addHTMLOutput element has 0 height');
     },
 
     updateHTMLOutputDisplayState_: function() {
@@ -192,6 +210,10 @@
       return !!this.querySelector('x-html-test-case-error');
     },
 
+    get isFlaky() {
+      return !!this.querySelector('x-html-test-case-flaky');
+    },
+
     get duration() {
       return this.duration_;
     },
@@ -217,6 +239,9 @@
       if (this.hadErrors) {
         colorCls = 'unittest-failed';
         status = 'failed';
+      } else if (this.isFlaky) {
+        colorCls = 'unittest-flaky';
+        status = 'flaky';
       } else if (this.testStatus_ == TestStatus.PENDING) {
         colorCls = 'unittest-pending';
         status = 'pending';
@@ -259,10 +284,12 @@
     __proto__: HTMLUnknownElement.prototype,
 
     decorate: function() {
+      this.testCaseResultsByCaseGUID_ = {};
       this.currentTestCaseStartTime_ = undefined;
       this.totalRunTime_ = 0;
       this.numTestsThatPassed_ = 0;
       this.numTestsThatFailed_ = 0;
+      this.numFlakyTests_ = 0;
       this.showHTMLOutput_ = false;
       this.showPendingAndPassedTests_ = false;
       this.linkifyCallback_ = undefined;
@@ -318,47 +345,32 @@
         display = '';
       }
       res.style.display = display;
+    },
 
-      // This bit of mess gives res objects a dark class based on whether their
-      // last visible sibling was not dark. It relies on the
-      // updateDisplayStateForResult_ being called on all next siblings of
-      // an element before being called on the element itself. Yay induction.
-      var dark;
-      if (!res.nextSibling) {
-        dark = true;
-      } else {
-        var lastVisible;
-        for (var cur = res.nextSibling;
-             cur;
-             cur = cur.nextSibling) {
-          if (cur.style.display == '') {
-            lastVisible = cur;
-            break;
-          }
-        }
-        if (lastVisible) {
-          dark = !lastVisible.classList.contains('dark');
-        } else {
-          dark = true;
-        }
-      }
+    willRunTests: function(testCases) {
+      this.timeAtBeginningOfTest_ = window.performance.now();
+      testCases.forEach(function(testCase, i) {
+        var testCaseResult = new HTMLTestCaseResult();
+        testCaseResult.showHTMLOutput = this.showHTMLOutput_;
+        testCaseResult.testCase = testCase;
+        if ((i % 2) === 0)
+          testCaseResult.classList.add('dark');
 
-      if (dark)
-        res.classList.add('dark');
-      else
-        res.classList.remove('dark');
+        var href = this.getHRefForTestCase(testCase);
+        if (href)
+          testCaseResult.testCaseHRef = href;
+        testCaseResult.testStatus = TestStatus.PENDING;
+        this.testCaseResultsByCaseGUID_[testCase.guid] = testCaseResult;
+        this.appendChild(testCaseResult);
+        this.updateDisplayStateForResult_(testCaseResult);
+      }, this);
     },
 
     willRunTest: function(testCase) {
-      this.currentTestCaseResult_ = new HTMLTestCaseResult();
-      this.currentTestCaseResult_.showHTMLOutput = this.showHTMLOutput_;
-      this.currentTestCaseResult_.testCase = testCase;
-      var href = this.getHRefForTestCase(testCase);
-      if (href)
-        this.currentTestCaseResult_.testCaseHRef = href;
-      this.currentTestCaseResult_.testStatus = TestStatus.RUNNING;
+      this.currentTestCaseResult_ = this.testCaseResultsByCaseGUID_[
+          testCase.guid];
       this.currentTestCaseStartTime_ = window.performance.now();
-      this.insertBefore(this.currentTestCaseResult_, this.firstChild);
+      this.currentTestCaseResult_.testStatus = TestStatus.RUNNING;
       this.updateDisplayStateForResult_(this.currentTestCaseResult_);
       this.log_(testCase.fullyQualifiedName + ': ');
     },
@@ -381,31 +393,40 @@
       this.updateDisplayStateForResult_(this.currentTestCaseResult_);
     },
 
+    setCurrentTestFlaky: function() {
+      this.currentTestCaseResult_.addFlaky();
+      this.updateDisplayStateForResult_(this.currentTestCaseResult_);
+    },
+
     setReturnValueFromCurrentTest: function(returnValue) {
       this.currentTestCaseResult_.testReturnValue = returnValue;
     },
 
     didCurrentTestEnd: function() {
+      var now = window.performance.now();
       var testCaseResult = this.currentTestCaseResult_;
-      var testCaseDuration = window.performance.now() -
-          this.currentTestCaseStartTime_;
+      var testCaseDuration = now - this.currentTestCaseStartTime_;
       this.currentTestCaseResult_.testStatus = TestStatus.DONE_RUNNING;
       testCaseResult.duration = testCaseDuration;
-      this.totalRunTime_ += testCaseDuration;
+      this.totalRunTime_ = now - this.timeAtBeginningOfTest_;
+      var resultString;
       if (testCaseResult.hadErrors) {
-        this.log_('[FAILED]\n');
+        resultString = 'FAILED';
         this.numTestsThatFailed_ += 1;
         tr.b.dispatchSimpleEvent(this, 'testfailed');
+      } else if (testCaseResult.isFlaky) {
+        resultString = 'FLAKY';
+        this.numFlakyTests_ += 1;
+        tr.b.dispatchSimpleEvent(this, 'testflaky');
       } else {
-        this.log_('[PASSED]\n');
+        resultString = 'PASSED';
         this.numTestsThatPassed_ += 1;
         tr.b.dispatchSimpleEvent(this, 'testpassed');
       }
+      this.log_('[' + resultString + ']\n');
 
-      if (this.headless_) {
-        this.notifyTestResultToDevServer_(testCaseResult.hadErrors ?
-                                          'FAILED' : 'PASSED');
-      }
+      if (this.headless_)
+        this.notifyTestResultToDevServer_(resultString);
 
       this.updateDisplayStateForResult_(this.currentTestCaseResult_);
       this.currentTestCaseResult_ = undefined;
@@ -421,6 +442,7 @@
       return {
         numTestsThatPassed: this.numTestsThatPassed_,
         numTestsThatFailed: this.numTestsThatFailed_,
+        numFlakyTests: this.numFlakyTests_,
         totalRunTime: this.totalRunTime_
       };
     },
@@ -433,11 +455,14 @@
     },
 
     notifyTestCompletionToDevServer_: function() {
-      if (this.numTestsThatPassed_ + this.numTestsThatFailed_ == 0)
+      if (this.numTestsThatPassed_ + this.numTestsThatFailed_ +
+          this.numFlakyTests_ == 0) {
         return;
+      }
       var data = this.numTestsThatFailed_ == 0 ? 'ALL_PASSED' : 'HAD_FAILURES';
       data += '\nPassed tests: ' + this.numTestsThatPassed_ +
-              '  Failed tests: ' + this.numTestsThatFailed_;
+              '  Failed tests: ' + this.numTestsThatFailed_ +
+              '  Flaky tests: ' + this.numFlakyTests_;
 
       tr.b.postAsync('/tracing/notify_tests_completed', data);
     },
diff --git a/catapult/tracing/tracing/base/unittest/interactive_test_runner.html b/catapult/tracing/tracing/base/unittest/interactive_test_runner.html
index 41c247a..79a5649 100644
--- a/catapult/tracing/tracing/base/unittest/interactive_test_runner.html
+++ b/catapult/tracing/tracing/base/unittest/interactive_test_runner.html
@@ -66,6 +66,10 @@
     font-weight: bold;
   }
 
+  x-base-interactive-test-runner > #shortform-results > .flaky {
+    color: darkorange;
+  }
+
   x-base-interactive-test-runner > #results-container {
     flex: 1 1 auto;
     min-height: 0;
@@ -90,6 +94,10 @@
     font-weight: bold;
   }
 
+  .unittest-flaky {
+    color: darkorange;
+  }
+
   .unittest-exception {
     color: red;
     font-weight: bold;
@@ -173,6 +181,7 @@
 
       this.onResultsStatsChanged_ = this.onResultsStatsChanged_.bind(this);
       this.onTestFailed_ = this.onTestFailed_.bind(this);
+      this.onTestFlaky_ = this.onTestFlaky_.bind(this);
       this.onTestPassed_ = this.onTestPassed_.bind(this);
 
       this.appendChild(tr.ui.b.instantiateTemplate(
@@ -338,16 +347,26 @@
       this.querySelector('#shortform-results').appendChild(span);
     },
 
+    onTestFlaky_: function() {
+      var span = document.createElement('span');
+      span.classList.add('flaky');
+      span.appendChild(document.createTextNode('~'));
+      this.querySelector('#shortform-results').appendChild(span);
+    },
+
     onResultsStatsChanged_: function() {
       var statsEl = this.querySelector('#stats');
       var stats = this.results_.getStats();
       var numTestsOverall = this.runner_.testCases.length;
-      var numTestsThatRan = stats.numTestsThatPassed + stats.numTestsThatFailed;
+      var numTestsThatRan = stats.numTestsThatPassed +
+          stats.numTestsThatFailed + stats.numFlakyTests;
       statsEl.innerHTML =
           '<span>' + numTestsThatRan + '/' + numTestsOverall +
           '</span> tests run, ' +
           '<span class="unittest-failed">' + stats.numTestsThatFailed +
           '</span> failures, ' +
+          '<span class="unittest-flaky">' + stats.numFlakyTests +
+          '</span> flaky, ' +
           ' in ' + stats.totalRunTime.toFixed(2) + 'ms.';
     },
 
@@ -371,10 +390,9 @@
     beginRunning_: function() {
       var resultsContainer = this.querySelector('#results-container');
       if (this.results_) {
-        this.results_.removeEventListener('testpassed',
-                                          this.onTestPassed_);
-        this.results_.removeEventListener('testfailed',
-                                          this.onTestFailed_);
+        this.results_.removeEventListener('testpassed', this.onTestPassed_);
+        this.results_.removeEventListener('testfailed', this.onTestFailed_);
+        this.results_.removeEventListener('testflaky', this.onTestFlaky_);
         this.results_.removeEventListener('statschange',
                                           this.onResultsStatsChanged_);
         delete this.results_.getHRefForTestCase;
@@ -387,10 +405,9 @@
       this.updateResultsGivenShortFormat_();
 
       this.results_.shortFormat = this.shortFormat_;
-      this.results_.addEventListener('testpassed',
-                                     this.onTestPassed_);
-      this.results_.addEventListener('testfailed',
-                                     this.onTestFailed_);
+      this.results_.addEventListener('testpassed', this.onTestPassed_);
+      this.results_.addEventListener('testfailed', this.onTestFailed_);
+      this.results_.addEventListener('testflaky', this.onTestFlaky_);
       this.results_.addEventListener('statschange',
                                      this.onResultsStatsChanged_);
       resultsContainer.appendChild(this.results_);
@@ -405,10 +422,6 @@
         return true;
       }, this);
 
-      // Reverse the tests array to keep them in alphabetical order since
-      // TestRunner displays them last-to-first.
-      tests.reverse();
-
       this.runner_ = new tr.b.unittest.TestRunner(this.results_, tests);
       this.runner_.beginRunning();
 
@@ -669,7 +682,7 @@
           return window.location.pathname + '?' + stateString;
         else
           return window.location.pathname;
-      }
+      };
     }
 
     loadAndRunTestsImpl();
diff --git a/catapult/tracing/tracing/base/unittest/suite_loader.html b/catapult/tracing/tracing/base/unittest/suite_loader.html
index 43b447a..08fc7dc 100644
--- a/catapult/tracing/tracing/base/unittest/suite_loader.html
+++ b/catapult/tracing/tracing/base/unittest/suite_loader.html
@@ -61,10 +61,10 @@
     }
   };
 
-  function D8ModuleLoader() {
+  function HeadlessModuleLoader() {
     this.currentlyExecutingModuleInfo_ = undefined;
   }
-  D8ModuleLoader.prototype = {
+  HeadlessModuleLoader.prototype = {
     loadModule: function(testRelpath, moduleName) {
       return Promise.resolve().then(function() {
         var moduleInfo = {
@@ -104,9 +104,7 @@
     this.testSuites = [];
 
     if (tr.isHeadless) {
-      if (!tr.isVinn)
-        throw new Error('No module loader exists fro this platform');
-      this.currentModuleLoader_ = new D8ModuleLoader();
+      this.currentModuleLoader_ = new HeadlessModuleLoader();
     } else {
       this.currentModuleLoader_ = new HTMLImportsModuleLoader();
     }
diff --git a/catapult/tracing/tracing/base/unittest/test_case.html b/catapult/tracing/tracing/base/unittest/test_case.html
index 568e5d8..d4bb028 100644
--- a/catapult/tracing/tracing/base/unittest/test_case.html
+++ b/catapult/tracing/tracing/base/unittest/test_case.html
@@ -41,7 +41,7 @@
     __proto__: Object.prototype,
 
     get guid() {
-      return this.guid;
+      return this.guid_;
     },
 
     get suite() {
diff --git a/catapult/tracing/tracing/base/unittest/test_runner.html b/catapult/tracing/tracing/base/unittest/test_runner.html
index 359701b..683372b 100644
--- a/catapult/tracing/tracing/base/unittest/test_runner.html
+++ b/catapult/tracing/tracing/base/unittest/test_runner.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 <link rel="import" href="/tracing/base/raf.html">
+<link rel="import" href="/tracing/base/timing.html">
 <link rel="import" href="/tracing/base/event_target.html">
 <script>
 'use strict';
@@ -14,11 +15,13 @@
   var realGlobalOnError;
   var realGlobalHistoryPushState;
 
+  var NUM_TESTS_PER_RIC = 16;
+
   function installGlobalTestHooks(runner) {
     realTvOnAnimationFrameError = tr.b.onAnimationFrameError;
     tr.b.onAnimationFrameError = function(error) {
       runner.results.addErrorForCurrentTest(error);
-    }
+    };
 
     if (tr.isExported('global.onerror')) {
       realGlobalOnError = global.onerror;
@@ -28,7 +31,7 @@
         if (realGlobalOnError)
           return realGlobalOnError(errorMsg, url, lineNumber);
         return false;
-      }
+      };
     }
 
     if (tr.isExported('global.history')) {
@@ -39,7 +42,7 @@
 
     tr.b.unittest.addHTMLOutputForCurrentTest = function(element) {
       runner.results.addHTMLOutputForCurrentTest(element);
-    }
+    };
 
     if (tr.isExported('global.sessionStorage')) {
       global.sessionStorage.clear();
@@ -73,6 +76,7 @@
     this.pendingTestCases_ = [];
 
     this.runOneTestCaseScheduled_ = false;
+    this.numRunsSinceLastRIC_ = 0;
 
     this.runCompletedPromise = undefined;
     this.runCompletedResolver_ = undefined;
@@ -96,6 +100,8 @@
 
       this.pendingTestCases_ = this.testCases_.slice(0);
 
+      this.results_.willRunTests(this.pendingTestCases_);
+
       this.scheduleRunOneTestCase_();
 
       return this.runCompletedPromise;
@@ -120,7 +126,14 @@
       if (this.runOneTestCaseScheduled_)
         return;
       this.runOneTestCaseScheduled_ = true;
-      tr.b.requestIdleCallback(this.runOneTestCase_, this);
+
+      this.numRunsSinceLastRIC_++;
+      if (this.numRunsSinceLastRIC_ === NUM_TESTS_PER_RIC) {
+        this.numRunsSinceLastRIC_ = 0;
+        tr.b.requestIdleCallback(this.runOneTestCase_, this);
+      } else {
+        Promise.resolve().then(this.runOneTestCase_.bind(this));
+      }
     },
 
     runOneTestCase_: function() {
@@ -132,10 +145,22 @@
       }
 
       this.currentTestCase_ = this.pendingTestCases_.splice(0, 1)[0];
-
+      this.currentMark_ = tr.b.Timing.mark(
+          'TestRunner', 'RunTest', {testName: this.currentTestCase_.name});
       this.results_.willRunTest(this.currentTestCase_);
+
+      if (this.isCurrentTestFlaky_()) {
+        this.results_.setCurrentTestFlaky();
+        this.results_.didCurrentTestEnd();
+        this.currentMark_.end();
+        this.currentTestCase_ = undefined;
+        this.scheduleRunOneTestCase_();
+        return;
+      }
+
       if (!this.setUpCurrentTestCase_()) {
         this.results_.didCurrentTestEnd();
+        this.currentMark_.end();
         this.currentTestCase_ = undefined;
         this.scheduleRunOneTestCase_();
         return;
@@ -148,6 +173,7 @@
               if (result)
                 this.results_.setReturnValueFromCurrentTest(result);
               this.results_.didCurrentTestEnd();
+              this.currentMark_.end();
               this.currentTestCase_ = undefined;
               this.scheduleRunOneTestCase_();
             } catch (e) {
@@ -160,6 +186,7 @@
               this.results_.addErrorForCurrentTest(error);
               this.tearDownCurrentTestCase_(false);
               this.results_.didCurrentTestEnd();
+              this.currentMark_.end();
               this.currentTestCase_ = undefined;
               this.scheduleRunOneTestCase_();
             } catch (e) {
@@ -169,6 +196,10 @@
           }.bind(this));
     },
 
+    isCurrentTestFlaky_: function() {
+      return !!this.currentTestCase_.options['flaky'];
+    },
+
     setUpCurrentTestCase_: function() {
       // Try setting it up. Return true if succeeded.
       installGlobalTestHooks(this);
diff --git a/catapult/tracing/tracing/base/unittest/test_suite.html b/catapult/tracing/tracing/base/unittest/test_suite.html
index 5f8114a..d497591 100644
--- a/catapult/tracing/tracing/base/unittest/test_suite.html
+++ b/catapult/tracing/tracing/base/unittest/test_suite.html
@@ -23,6 +23,17 @@
     this.tests_ = [];
     this.testNames_ = {}; // For dupe checking.
 
+    global.flakyTest = function(testCaseOrName, opt_testFn, opt_options) {
+      if (testCaseOrName instanceof TestCase) {
+        testCaseOrName.options['flaky'] = true;
+        test(testCaseOrName);
+      } else {
+        var options = opt_options || {};
+        options['flaky'] = true;
+        test(testCaseOrName, opt_testFn, options);
+      }
+    }.bind(this);
+
     global.test = function(testCaseOrName, opt_testFn, opt_options) {
       if (testCaseOrName instanceof TestCase) {
         if (opt_testFn !== undefined)
diff --git a/catapult/tracing/tracing/base/unittest/text_test_results.html b/catapult/tracing/tracing/base/unittest/text_test_results.html
index a468de2..c1cae62 100644
--- a/catapult/tracing/tracing/base/unittest/text_test_results.html
+++ b/catapult/tracing/tracing/base/unittest/text_test_results.html
@@ -17,12 +17,15 @@
   function TextTestResults() {
     this.numTestsThatPassed_ = 0;
     this.numTestsThatFailed_ = 0;
+    this.numFlakyTests_ = 0;
     this.currentTestCaseHadErrors_ = false;
+    this.currentTestIsFlaky_ = false;
   }
 
   TextTestResults.prototype = {
     get numTestsThatRan() {
-      return this.numTestsThatPassed_ + this.numTestsThatFailed_;
+      return this.numTestsThatPassed_ + this.numTestsThatFailed_ +
+          this.numFlakyTests_;
     },
 
     get numTestsThatFailed() {
@@ -33,9 +36,17 @@
       return this.numTestsThatPassed_;
     },
 
+    get numFlakyTests() {
+      return this.numFlakyTests_;
+    },
+
+    willRunTests: function(testCases) {
+    },
+
     willRunTest: function(testCase) {
       this.write_(testCase.name + ' (' + testCase.suite.name + ') ... ');
       this.currentTestCaseHadErrors_ = false;
+      this.currentTestIsFlaky_ = false;
     },
 
     addErrorForCurrentTest: function(error) {
@@ -50,6 +61,12 @@
       this.curHTMLOutput_.push(element);
     },
 
+    setCurrentTestFlaky: function() {
+      if (!this.currentTestIsFlaky_)
+        this.write_('FLAKY\n');
+      this.currentTestIsFlaky_ = true;
+    },
+
     setReturnValueFromCurrentTest: function(returnValue) {
       this.write_('[RESULT] ' + JSON.stringify(returnValue) + '\n');
     },
@@ -57,6 +74,8 @@
     didCurrentTestEnd: function() {
       if (this.currentTestCaseHadErrors_) {
         this.numTestsThatFailed_ += 1;
+      } else if (this.currentTestIsFlaky_) {
+        this.numFlakyTests_ += 1;
       } else {
         this.numTestsThatPassed_ += 1;
         this.write_('ok\n');
@@ -72,16 +91,22 @@
         this.write_('Ran ' + this.numTestsThatRan + ' tests\n');
 
       if (this.numTestsThatFailed > 0) {
-        this.write_('\nFAILED (errors=' + this.numTestsThatFailed + ')');
+        var flakyString = this.numFlakyTests == 0 ? '' :
+            ' flaky=' + this.numFlakyTests;
+        this.write_('\nFAILED (errors=' + this.numTestsThatFailed +
+            flakyString + ')');
       } else {
-        this.write_('\nOK');
+        var flakyString = this.numFlakyTests == 0 ? '' :
+            ' (flaky=' + this.numFlakyTests + ')';
+        this.write_('\nOK' + flakyString);
       }
     },
 
     write_: function(msg) {
-      if (!tr.isVinn)
-        throw new Error('Unsupported');
-      global.write(msg);
+      if (tr.isVinn)
+        global.write(msg);
+      else
+        console.log(msg);
     }
   };
 
diff --git a/catapult/tracing/tracing/base/xhr.html b/catapult/tracing/tracing/base/xhr.html
index b8954d8..37ce4c2 100644
--- a/catapult/tracing/tracing/base/xhr.html
+++ b/catapult/tracing/tracing/base/xhr.html
@@ -11,16 +11,25 @@
 'use strict';
 
 tr.exportTo('tr.b', function() {
+  var fs;
+  if (tr.isNode)
+    fs = require('fs');
+
   function guessBinary(url) {
     return /[.]gz$/.test(url) || /[.]zip$/.test(url);
   }
-  function xhr(method, url, async, opt_data) {
+  function xhr(method, url, async, opt_data, force_binary) {
     var req = new XMLHttpRequest();
     req.overrideMimeType('text/plain; charset=x-user-defined');
     req.open(method, url, async);
-    var isBinary = guessBinary(url);
-    if (isBinary && async)
-      req.responseType = 'arraybuffer';
+
+    var isBinary = force_binary;
+
+    if (isBinary === undefined) {
+      guessBinary(url);
+      if (isBinary && async)
+        req.responseType = 'arraybuffer';
+    }
 
     var data = opt_data !== undefined ? opt_data : null;
 
@@ -53,14 +62,33 @@
   }
 
   function getAsync(url) {
+    // Browser.
     if (!tr.isHeadless)
       return xhr('GET', url, true);
+
+    // Node or vinn prep.
     var filename;
     if (url.startsWith('file:///'))
       filename = url.substring(7);
     else
-      filename = global.hrefToAbsolutePath(url);
+      filename = global.HTMLImportsLoader.hrefToAbsolutePath(url);
     var isBinary = guessBinary(url);
+
+    // Node.
+    if (tr.isNode) {
+      var encoding = isBinary ? undefined : 'utf8';
+      return new Promise(function(resolve, reject) {
+        fs.readFile(filename, encoding, function(err, data) {
+          if (err) {
+            reject(err);
+            return;
+          }
+          resolve(data);
+        });
+      });
+    }
+
+    // Vinn.
     return Promise.resolve().then(function() {
       if (isBinary)
         return readbuffer(filename);
@@ -69,14 +97,27 @@
   }
 
   function getSync(url) {
+    // Browser.
     if (!tr.isHeadless)
       return xhr('GET', url, false);
+
+    // Node or vinn prep.
     var filename;
-    if (url.startsWith('file:///'))
+    if (url.startsWith('file:///'))  // posix
+      filename = url.substring(7);
+    else if (url.startsWith('file://') && url[8] === ':')  // win
       filename = url.substring(7);
     else
-      filename = global.hrefToAbsolutePath(url);
+      filename = global.HTMLImportsLoader.hrefToAbsolutePath(url);
     var isBinary = guessBinary(url);
+
+    // Node.
+    if (tr.isNode) {
+      var encoding = isBinary ? undefined : 'utf8';
+      return fs.readFileSync(filename, encoding);
+    }
+
+    // Vinn.
     if (isBinary)
       return readbuffer(filename);
     return read(filename);
@@ -88,6 +129,12 @@
     return xhr('POST', url, true, data);
   }
 
+  function postTextAsync(url, data) {
+    if (tr.isHeadless)
+      throw new Error('Only supported inside a browser');
+    return xhr('POST', url, true, data, false);
+  }
+
   return {
     getAsync: getAsync,
     getSync: getSync,
diff --git a/catapult/tracing/tracing/core/filter_test.html b/catapult/tracing/tracing/core/filter_test.html
index eb5406c..6dbdcb1 100644
--- a/catapult/tracing/tracing/core/filter_test.html
+++ b/catapult/tracing/tracing/core/filter_test.html
@@ -5,9 +5,9 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/unittest.html">
 <link rel="import" href="/tracing/core/filter.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/base/unittest.html">
 
 <script>
 'use strict';
@@ -25,14 +25,16 @@
       new TitleOrCategoryFilter('');
     });
 
-    var s0 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s0 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     assert.isTrue(new TitleOrCategoryFilter('a').matchSlice(s0));
     assert.isTrue(new TitleOrCategoryFilter('cat').matchSlice(s0));
     assert.isTrue(new TitleOrCategoryFilter('at').matchSlice(s0));
     assert.isFalse(new TitleOrCategoryFilter('b').matchSlice(s0));
     assert.isFalse(new TitleOrCategoryFilter('X').matchSlice(s0));
 
-    var s1 = tr.c.TestUtils.newSliceCategory('cat', 'abc', 1, 3);
+    var s1 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'abc', start: 1, duration: 3});
     assert.isTrue(new TitleOrCategoryFilter('abc').matchSlice(s1));
     assert.isTrue(new TitleOrCategoryFilter('Abc').matchSlice(s1));
     assert.isTrue(new TitleOrCategoryFilter('cat').matchSlice(s1));
@@ -49,12 +51,12 @@
       new ExactTitleFilter('');
     });
 
-    var s0 = tr.c.TestUtils.newSliceNamed('a', 1, 3);
+    var s0 = tr.c.TestUtils.newSliceEx({title: 'a', start: 1, duration: 3});
     assert.isTrue(new ExactTitleFilter('a').matchSlice(s0));
     assert.isFalse(new ExactTitleFilter('b').matchSlice(s0));
     assert.isFalse(new ExactTitleFilter('A').matchSlice(s0));
 
-    var s1 = tr.c.TestUtils.newSliceNamed('abc', 1, 3);
+    var s1 = tr.c.TestUtils.newSliceEx({title: 'abc', start: 1, duration: 3});
     assert.isTrue(new ExactTitleFilter('abc').matchSlice(s1));
     assert.isFalse(new ExactTitleFilter('Abc').matchSlice(s1));
     assert.isFalse(new ExactTitleFilter('bc').matchSlice(s1));
@@ -69,7 +71,8 @@
       new FullTextFilter('');
     });
 
-    var s0 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s0 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     s0.args['key'] = 'value';
     s0.args['anotherKey'] = 'anotherValue';
     assert.isTrue(new FullTextFilter('cat').matchSlice(s0));
@@ -79,20 +82,24 @@
     assert.isTrue(new FullTextFilter('anotherValue').matchSlice(s0));
     assert.isFalse(new FullTextFilter('not there').matchSlice(s0));
 
-    var s1 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s1 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     s1.args['key'] = 123;
     assert.isTrue(new FullTextFilter('123').matchSlice(s1));
 
-    var s2 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s2 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     s2.args['key'] = ['innerValue1', 'innerValue2'];
     assert.isTrue(new FullTextFilter('innerValue1').matchSlice(s2));
     assert.isTrue(new FullTextFilter('innerValue2').matchSlice(s2));
 
-    var s3 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s3 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     s3.args['key'] = ['one', 'two', 'three'];
     assert.isTrue(new FullTextFilter('two').matchSlice(s3));
 
-    var s4 = tr.c.TestUtils.newSliceCategory('cat', 'a', 1, 3);
+    var s4 = tr.c.TestUtils.newSliceEx(
+        {cat: 'cat', title: 'a', start: 1, duration: 3});
     s4.args['key'] = undefined;
     assert.isFalse(new FullTextFilter('not there').matchSlice(s4));
   });
diff --git a/catapult/tracing/tracing/core/test_utils.html b/catapult/tracing/tracing/core/test_utils.html
index 5becdba..f0b8d84 100644
--- a/catapult/tracing/tracing/core/test_utils.html
+++ b/catapult/tracing/tracing/core/test_utils.html
@@ -5,14 +5,15 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/model/counter.html">
-<link rel="import" href="/tracing/model/interaction_record.html">
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/slice.html">
 <link rel="import" href="/tracing/model/slice_group.html">
 <link rel="import" href="/tracing/model/stack_frame.html">
 <link rel="import" href="/tracing/model/thread_time_slice.html">
+<link rel="import" href="/tracing/model/user_model/stub_expectation.html">
 
 <script>
 'use strict';
@@ -68,12 +69,12 @@
   TestUtils.getStartAndDurationFromDict = function(options) {
     return _getStartAndCpuDurationFromDict(
         options, true, 'start', 'duration', 'end');
-  }
+  };
 
   TestUtils.newAsyncSlice = function(start, duration, startThread, endThread) {
     return TestUtils.newAsyncSliceNamed(
         'a', start, duration, startThread, endThread);
-  }
+  };
 
   TestUtils.newAsyncSliceNamed = function(
       name, start, duration, startThread, endThread) {
@@ -85,6 +86,19 @@
     s.startThread = startThread;
     s.endThread = endThread;
     return s;
+  };
+
+  function getColorId(colorId) {
+    if (colorId) {
+      if (colorId === 'random') {
+        colorId = Math.floor(
+            Math.random() *
+            ColorScheme.proprties.numGeneralPurposeColorIds);
+      }
+    } else {
+      colorId = 0;
+    }
+    return colorId;
   }
 
   TestUtils.newAsyncSliceEx = function(options) {
@@ -92,20 +106,7 @@
 
     var cat = options.cat ? options.cat : 'cat';
     var title = options.title ? options.title : 'a';
-
-    var colorId;
-    if (options.colorId) {
-      if (options.colorId === 'random') {
-        colorId = Math.floor(
-            Math.random() *
-            ColorScheme.proprties.numGeneralPurposeColorIds);
-      } else {
-        colorId = options.colorId;
-      }
-    } else {
-      colorId = 0;
-    }
-
+    var colorId = getColorId(options.colorId);
 
     var isTopLevel;
     if (options.isTopLevel !== undefined)
@@ -140,26 +141,26 @@
     if (options.endThread)
       slice.endThread = options.endThread;
     return slice;
-  }
+  };
 
   TestUtils.newCounter = function(parent) {
     return TestUtils.newCounterNamed(parent, 'a');
-  }
+  };
 
   TestUtils.newCounterNamed = function(parent, name) {
     var s = new tr.model.Counter(parent, name, null, name);
     return s;
-  }
+  };
 
   TestUtils.newCounterCategory = function(parent, category, name) {
     var s = new tr.model.Counter(parent, name, category, name);
     return s;
-  }
+  };
 
   TestUtils.newCounterSeries = function() {
     var s = new tr.model.CounterSeries('a', 0);
     return s;
-  }
+  };
 
   TestUtils.newFlowEventEx = function(options) {
     if (options.start === undefined)
@@ -201,16 +202,7 @@
       event.endSlice.inFlowEvents.push(event);
     }
     return event;
-  }
-
-  TestUtils.newSlice = function(start, duration) {
-    return TestUtils.newSliceNamed('a', start, duration);
-  }
-
-  TestUtils.newSliceNamed = function(name, start, duration) {
-    var s = new tr.model.Slice('', name, 0, start, {}, duration);
-    return s;
-  }
+  };
 
   TestUtils.newThreadSlice = function(thread, state, start, duration, opt_cpu) {
     var s = new tr.model.ThreadTimeSlice(
@@ -218,7 +210,7 @@
     if (opt_cpu)
       s.cpuOnWhichThreadWasRunning = opt_cpu;
     return s;
-  }
+  };
 
   TestUtils.newSampleNamed = function(
       thread, sampleName, category, frameNames, start) {
@@ -233,13 +225,7 @@
                                         sf,
                                         1);
     return s;
-  }
-
-  TestUtils.newSliceCategory = function(category, name, start, duration) {
-    var s = new tr.model.Slice(
-        category, name, 0, start, {}, duration);
-    return s;
-  }
+  };
 
   TestUtils.newSliceEx = function(options) {
     var sd = TestUtils.getStartAndDurationFromDict(options);
@@ -267,7 +253,7 @@
 
 
     return slice;
-  }
+  };
 
   TestUtils.newStackTrace = function(model, titles) {
     var frame = undefined;
@@ -277,7 +263,7 @@
         model.addStackFrame(frame);
     });
     return frame;
-  }
+  };
 
   TestUtils.findSliceNamed = function(slices, name) {
     if (slices instanceof tr.model.SliceGroup)
@@ -286,11 +272,12 @@
       if (slices[i].title == name)
         return slices[i];
       return undefined;
-  }
+  };
 
   TestUtils.newInteractionRecord = function(parentModel, start, duration) {
-    return new tr.model.InteractionRecord(parentModel, 'a', 0, start, duration);
-  }
+    return new tr.model.um.StubExpectation({
+      parentModel: parentModel, start: start, duration: duration});
+  };
 
   TestUtils.newModel = function(customizeModelCallback) {
     return TestUtils.newModelWithEvents([], {
@@ -298,7 +285,7 @@
       pruneEmptyContainers: false,
       customizeModelCallback: customizeModelCallback
     });
-  }
+  };
 
   TestUtils.newModelWithEvents = function(events, opts) {
     if (!(events instanceof Array))
@@ -309,6 +296,8 @@
     var io = new tr.importer.ImportOptions();
     io.showImportWarnings = false;
     io.customizeModelCallback = opts.customizeModelCallback;
+    io.trackDetailedModelStats = opts.trackDetailedModelStats === undefined ?
+        false : opts.trackDetailedModelStats;
     io.shiftWorldToZero = opts.shiftWorldToZero === undefined ?
         true : opts.shiftWorldToZero;
     io.pruneEmptyContainers = opts.pruneEmptyContainers === undefined ?
@@ -320,7 +309,7 @@
     var i = new tr.importer.Import(m, io);
     i.importTraces(events);
     return m;
-  }
+  };
 
   TestUtils.newModelWithAuditor = function(customizeModelCallback, auditor) {
     return TestUtils.newModelWithEvents([], {
@@ -329,12 +318,148 @@
       customizeModelCallback: customizeModelCallback,
       auditorConstructors: [auditor]
     });
-  }
+  };
 
   TestUtils.newFakeThread = function() {
     var process = {model: {}};
     return new tr.model.Thread(process);
-  }
+  };
+
+  /** @constructor */
+  TestUtils.SourceGenerator = function() {
+    this.sourceList_ = [];
+    this.currentLineCommentList_ = [];
+    this.currentIndent_ = 0;
+    this.currentLineEmpty_ = true;
+  };
+
+  TestUtils.SourceGenerator.prototype = {
+    push: function(/* arguments */) {
+      if (this.currentLineEmpty_) {
+        this.sourceList_.push(' '.repeat(this.currentIndent_));
+        this.currentLineEmpty_ = false;
+      }
+      this.sourceList_.push.apply(
+          this.sourceList_, Array.prototype.slice.call(arguments));
+    },
+
+    pushComment: function(/* arguments */) {
+      this.currentLineCommentList_.push.apply(
+          this.currentLineCommentList_, Array.prototype.slice.call(arguments));
+    },
+
+    build: function() {
+      this.finishLine_();
+      return this.sourceList_.join('');
+    },
+
+    breakLine: function() {
+      this.finishLine_();
+      this.push('\n');
+      this.currentLineEmpty_ = true;
+    },
+
+    finishLine_: function() {
+      if (this.currentLineCommentList_.length === 0)
+        return;
+      this.push('  // ');
+      this.push.apply(this, this.currentLineCommentList_);
+      this.push('.');
+      this.currentLineCommentList_ = [];
+    },
+
+    indentBlock: function(spaces, breakLine, blockCallback, opt_this) {
+      opt_this = opt_this || this;
+      this.currentIndent_ += spaces;
+      if (breakLine)
+        this.breakLine();
+      blockCallback.call(opt_this);
+      this.currentIndent_ -= spaces;
+    },
+
+    formatSingleLineList: function(list, itemCallback, opt_this) {
+      opt_this = opt_this || this;
+      this.push('[');
+      tr.b.asArray(list).forEach(function(item, index) {
+        if (index > 0)
+          this.push(', ');
+        itemCallback.call(opt_this, item, index);
+      }, this);
+      this.push(']');
+    },
+
+    formatMultiLineList: function(list, itemCallback, opt_this) {
+      opt_this = opt_this || this;
+      this.push('[');
+      this.indentBlock(2, false /* don't break line */, function() {
+        tr.b.asArray(list).forEach(function(item, index) {
+          if (index > 0)
+            this.push(',');
+          this.breakLine();
+          itemCallback.call(opt_this, item, index);
+        }, this);
+      }, this);
+      if (list.length > 0)
+        this.breakLine();
+      this.push(']');
+    },
+
+    formatString: function(string) {
+      if (string === undefined)
+        this.push('undefined');
+      else
+        this.push('\'', string, '\'');
+    }
+  };
+
+  TestUtils.addSourceListing = function(test, source) {
+    var testSourceEl = document.createElement('pre');
+    testSourceEl.style.fontFamily = 'monospace';
+    testSourceEl.textContent = source;
+
+    var copyButtonEl = document.createElement('button');
+    copyButtonEl.textContent = 'Copy into to clipboard';
+    copyButtonEl.addEventListener('click', function() {
+      var selection = window.getSelection();
+
+      // Store the original selection.
+      var originalRanges = new Array(selection.rangeCount);
+      for (var i = 0; i < originalRanges.length; i++)
+        originalRanges[i] = selection.getRangeAt(i);
+
+      // Copy the generated test source code into clipboard.
+      selection.removeAllRanges();
+      var range = document.createRange();
+      range.selectNode(testSourceEl);
+      selection.addRange(range);
+      document.execCommand('copy');
+
+      // Restore the original selection.
+      selection.removeAllRanges();
+      for (var i = 0; i < originalRanges.length; i++)
+        selection.addRange(originalRanges[i]);
+    });
+
+    var outputEl = document.createElement('div');
+    outputEl.appendChild(copyButtonEl);
+    outputEl.appendChild(testSourceEl);
+    test.addHTMLOutput(outputEl);
+  };
+
+  TestUtils.newInstantEvent = function(options) {
+    var title = options.title;
+    var start = options.start;
+    if ((title === undefined) ||
+        (title === '') ||
+        (start === undefined))
+      throw new Error('too little information');
+
+    var category = options.category || 'category';
+    var colorId = getColorId(options.colorId);
+    var args = options.args || {};
+    return new tr.model.InstantEvent(
+        category, title, colorId, start, args);
+  };
 
   return {
     TestUtils: TestUtils
diff --git a/catapult/tracing/tracing/extras/android/android_app.html b/catapult/tracing/tracing/extras/android/android_app.html
deleted file mode 100644
index 348a048..0000000
--- a/catapult/tracing/tracing/extras/android/android_app.html
+++ /dev/null
@@ -1,310 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/base/sorted_array_utils.html">
-<link rel="import" href="/tracing/model/frame.html">
-<link rel="import" href="/tracing/base/range_utils.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Class for managing android-specific model meta data,
- * such as rendering apps, and frames rendered.
- */
-tr.exportTo('tr.e.audits', function() {
-  var Frame = tr.model.Frame;
-  var Statistics = tr.b.Statistics;
-
-  var UI_DRAW_TYPE = {
-    NONE: 'none',
-    LEGACY: 'legacy',
-    MARSHMALLOW: 'marshmallow'
-  };
-
-  var UI_THREAD_DRAW_NAMES = {
-    'performTraversals': UI_DRAW_TYPE.LEGACY,
-    'Choreographer#doFrame': UI_DRAW_TYPE.MARSHMALLOW
-  };
-
-  var RENDER_THREAD_DRAW_NAME = 'DrawFrame';
-  var RENDER_THREAD_INDEP_DRAW_NAME = 'doFrame';
-  var THREAD_SYNC_NAME = 'syncFrameState';
-
-  function getSlicesForThreadTimeRanges(threadTimeRanges) {
-    var ret = [];
-    threadTimeRanges.forEach(function(threadTimeRange) {
-      var slices = [];
-
-      threadTimeRange.thread.sliceGroup.iterSlicesInTimeRange(
-        function(slice) { slices.push(slice); },
-        threadTimeRange.start, threadTimeRange.end);
-      ret.push.apply(ret, slices);
-    });
-    return ret;
-  }
-
-  function makeFrame(threadTimeRanges, surfaceFlinger) {
-    var args = {};
-    if (surfaceFlinger && surfaceFlinger.hasVsyncs) {
-      var start = Statistics.min(threadTimeRanges,
-          function(threadTimeRanges) { return threadTimeRanges.start; });
-      args['deadline'] = surfaceFlinger.getFrameDeadline(start);
-      args['frameKickoff'] = surfaceFlinger.getFrameKickoff(start);
-    }
-    var events = getSlicesForThreadTimeRanges(threadTimeRanges);
-    return new Frame(events, threadTimeRanges, args);
-  }
-
-  function findOverlappingDrawFrame(renderThread, time) {
-    if (!renderThread)
-      return undefined;
-
-    var slices = renderThread.sliceGroup.slices;
-    for (var i = 0; i < slices.length; i++) {
-      var slice = slices[i];
-      if (slice.title == RENDER_THREAD_DRAW_NAME &&
-          slice.start <= time &&
-          time <= slice.end) {
-        return slice;
-      }
-    }
-    return undefined;
-  }
-
-  /**
-   * Builds an array of {start, end} ranges grouping common work of a frame
-   * that occurs just before performTraversals().
-   *
-   * Only necessary before Choreographer#doFrame tracing existed.
-   */
-  function getPreTraversalWorkRanges(uiThread) {
-    if (!uiThread)
-      return [];
-
-    // gather all frame work that occurs outside of performTraversals
-    var preFrameEvents = [];
-    uiThread.sliceGroup.slices.forEach(function(slice) {
-      if (slice.title == 'obtainView' ||
-          slice.title == 'setupListItem' ||
-          slice.title == 'deliverInputEvent' ||
-          slice.title == 'RV Scroll')
-        preFrameEvents.push(slice);
-    });
-    uiThread.asyncSliceGroup.slices.forEach(function(slice) {
-      if (slice.title == 'deliverInputEvent')
-        preFrameEvents.push(slice);
-    });
-
-    return tr.b.mergeRanges(
-        tr.b.convertEventsToRanges(preFrameEvents),
-        3,
-        function(events) {
-      return {
-        start: events[0].min,
-        end: events[events.length - 1].max
-      };
-    });
-  }
-
-  function getFrameStartTime(traversalStart, preTraversalWorkRanges) {
-    var preTraversalWorkRange = tr.b.findClosestIntervalInSortedIntervals(
-        preTraversalWorkRanges,
-        function(range) { return range.start },
-        function(range) { return range.end },
-        traversalStart,
-        3);
-
-    if (preTraversalWorkRange)
-      return preTraversalWorkRange.start;
-    return traversalStart;
-  }
-
-  function getUiThreadDrivenFrames(app) {
-    if (!app.uiThread)
-      return [];
-
-    var preTraversalWorkRanges = [];
-    if (app.uiDrawType == UI_DRAW_TYPE.LEGACY)
-      preTraversalWorkRanges = getPreTraversalWorkRanges(app.uiThread);
-
-    var frames = [];
-    app.uiThread.sliceGroup.slices.forEach(function(slice) {
-      if (!(slice.title in UI_THREAD_DRAW_NAMES)) {
-        return;
-      }
-
-      var threadTimeRanges = [];
-      var uiThreadTimeRange = {
-        thread: app.uiThread,
-        start: getFrameStartTime(slice.start, preTraversalWorkRanges),
-        end: slice.end
-      };
-      threadTimeRanges.push(uiThreadTimeRange);
-
-      // on SDK 21+ devices with RenderThread,
-      // account for time taken on RenderThread
-      var rtDrawSlice = findOverlappingDrawFrame(
-          app.renderThread, slice.end);
-      if (rtDrawSlice) {
-        var rtSyncSlice = rtDrawSlice.findDescendentSlice(THREAD_SYNC_NAME);
-        if (rtSyncSlice) {
-          // Generally, the UI thread is only on the critical path
-          // until the start of sync.
-          uiThreadTimeRange.end = Math.min(uiThreadTimeRange.end,
-                                           rtSyncSlice.start);
-        }
-
-        threadTimeRanges.push({
-          thread: app.renderThread,
-          start: rtDrawSlice.start,
-          end: rtDrawSlice.end
-        });
-      }
-      frames.push(makeFrame(threadTimeRanges, app.surfaceFlinger));
-    });
-    return frames;
-  }
-
-  function getRenderThreadDrivenFrames(app) {
-    if (!app.renderThread)
-      return [];
-
-    var frames = [];
-    app.renderThread.sliceGroup.getSlicesOfName(RENDER_THREAD_INDEP_DRAW_NAME)
-        .forEach(function(slice) {
-      var threadTimeRanges = [{
-        thread: app.renderThread,
-        start: slice.start,
-        end: slice.end
-      }];
-      frames.push(makeFrame(threadTimeRanges, app.surfaceFlinger));
-    });
-    return frames;
-  }
-
-  function getUiDrawType(uiThread) {
-    if (!uiThread)
-      return UI_DRAW_TYPE.NONE;
-
-    var slices = uiThread.sliceGroup.slices;
-    for (var i = 0; i < slices.length; i++) {
-      if (slices[i].title in UI_THREAD_DRAW_NAMES) {
-        return UI_THREAD_DRAW_NAMES[slices[i].title];
-      }
-    }
-    return UI_DRAW_TYPE.NONE;
-  }
-
-  function getInputSamples(process) {
-    var samples = undefined;
-    for (var counterName in process.counters) {
-          if (/^android\.aq\:pending/.test(counterName) &&
-        process.counters[counterName].numSeries == 1) {
-        samples = process.counters[counterName].series[0].samples;
-        break;
-      }
-    }
-
-    if (!samples)
-      return [];
-
-    // output rising edges only, since those are user inputs
-    var inputSamples = [];
-    var lastValue = 0;
-    samples.forEach(function(sample) {
-      if (sample.value > lastValue) {
-        inputSamples.push(sample);
-      }
-      lastValue = sample.value;
-    });
-    return inputSamples;
-  }
-
-  function getAnimationAsyncSlices(uiThread) {
-    if (!uiThread)
-      return [];
-
-    var slices = [];
-    uiThread.asyncSliceGroup.iterateAllEvents(function(slice) {
-      if (/^animator\:/.test(slice.title))
-        slices.push(slice);
-    });
-    return slices;
-  }
-
-  /**
-   * Model for Android App specific data.
-   * @constructor
-   */
-  function AndroidApp(process, uiThread, renderThread, surfaceFlinger,
-      uiDrawType) {
-    this.process = process;
-    this.uiThread = uiThread;
-    this.renderThread = renderThread;
-    this.surfaceFlinger = surfaceFlinger;
-    this.uiDrawType = uiDrawType;
-
-    this.frames_ = undefined;
-    this.inputs_ = undefined;
-  };
-
-  AndroidApp.createForProcessIfPossible = function(process, surfaceFlinger) {
-    var uiThread = process.getThread(process.pid);
-    var uiDrawType = getUiDrawType(uiThread);
-    if (uiDrawType == UI_DRAW_TYPE.NONE) {
-      uiThread = undefined;
-    }
-    var renderThreads = process.findAllThreadsNamed('RenderThread');
-    var renderThread = renderThreads.length == 1 ? renderThreads[0] : undefined;
-
-    if (uiThread || renderThread) {
-      return new AndroidApp(process, uiThread, renderThread, surfaceFlinger,
-        uiDrawType);
-    }
-  }
-
-  AndroidApp.prototype = {
-  /**
-   * Returns a list of all frames in the trace for the app,
-   * constructed on first query.
-   */
-    getFrames: function() {
-      if (!this.frames_) {
-        var uiFrames = getUiThreadDrivenFrames(this);
-        var rtFrames = getRenderThreadDrivenFrames(this);
-        this.frames_ = uiFrames.concat(rtFrames);
-
-        // merge frames by sorting by end timestamp
-        this.frames_.sort(function(a, b) { a.end - b.end });
-      }
-      return this.frames_;
-    },
-
-    /**
-     * Returns list of CounterSamples for each input event enqueued to the app.
-     */
-    getInputSamples: function() {
-      if (!this.inputs_) {
-        this.inputs_ = getInputSamples(this.process);
-      }
-      return this.inputs_;
-    },
-
-    getAnimationAsyncSlices: function() {
-      if (!this.animations_) {
-        this.animations_ = getAnimationAsyncSlices(this.uiThread);
-      }
-      return this.animations_;
-    }
-  };
-
-  return {
-    AndroidApp: AndroidApp
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/android/android_auditor.html b/catapult/tracing/tracing/extras/android/android_auditor.html
index 0f7c44f..3e2def1 100644
--- a/catapult/tracing/tracing/extras/android/android_auditor.html
+++ b/catapult/tracing/tracing/extras/android/android_auditor.html
@@ -4,16 +4,18 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+
 <link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/range_utils.html">
 <link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/base/units/time_duration.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/core/auditor.html">
 <link rel="import" href="/tracing/model/alert.html">
 <link rel="import" href="/tracing/model/frame.html">
-<link rel="import" href="/tracing/model/interaction_record.html">
-<link rel="import" href="/tracing/extras/android/android_model_helper.html">
-<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/model/helpers/android_model_helper.html">
+<link rel="import" href="/tracing/model/thread_time_slice.html">
+<link rel="import" href="/tracing/model/user_model/response_expectation.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -24,14 +26,14 @@
 tr.exportTo('tr.e.audits', function() {
   var SCHEDULING_STATE = tr.model.SCHEDULING_STATE;
   var Auditor = tr.c.Auditor;
-  var AndroidModelHelper = tr.e.audits.AndroidModelHelper;
+  var AndroidModelHelper = tr.model.helpers.AndroidModelHelper;
   var ColorScheme = tr.b.ColorScheme;
   var Statistics = tr.b.Statistics;
   var FRAME_PERF_CLASS = tr.model.FRAME_PERF_CLASS;
-  var InteractionRecord = tr.model.InteractionRecord;
   var Alert = tr.model.Alert;
   var EventInfo = tr.model.EventInfo;
-  var TimeDuration = tr.b.u.TimeDuration;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var timeDurationInMs = tr.v.Unit.byName.timeDurationInMs;
 
   // TODO: extract from VSYNC, since not all devices have vsync near 60fps
   var EXPECTED_FRAME_TIME_MS = 16.67;
@@ -51,9 +53,6 @@
     return false;
   }
 
-  var Auditor = tr.c.Auditor;
-  var AndroidModelHelper = tr.e.audits.AndroidModelHelper;
-
   function frameMissedDeadline(frame) {
     return frame.args['deadline'] && frame.args['deadline'] < frame.end;
   }
@@ -91,7 +90,7 @@
   function AndroidAuditor(model) {
     Auditor.call(this, model);
 
-    var helper = new AndroidModelHelper(model);
+    var helper = model.getOrCreateHelper(AndroidModelHelper);
     if (helper.apps.length || helper.surfaceFlinger)
       this.helper = helper;
   };
@@ -178,8 +177,8 @@
 
     events.push(frame);
     return new Alert(AndroidAuditor.pathAlertInfo_, start, events,
-                     { 'Time spent': new TimeDuration(duration) });
-  }
+        { 'Time spent': new ScalarNumeric(timeDurationInMs, duration) });
+  };
 
 
   AndroidAuditor.uploadAlertInfo_ = new EventInfo(
@@ -206,10 +205,10 @@
 
     var mPixels = (pixelsUploaded / 1000000).toFixed(2) + ' million';
     var args = { 'Pixels uploaded': mPixels,
-                 'Time spent': new TimeDuration(duration) };
+                 'Time spent': new ScalarNumeric(timeDurationInMs, duration) };
     events.push(frame);
     return new Alert(AndroidAuditor.uploadAlertInfo_, start, events, args);
-  }
+  };
 
   //////////////////////////////////////////////////////////////////////////////
   // UI responsiveness alerts
@@ -242,14 +241,14 @@
     }
 
     var start = Statistics.min(events, getStart);
-    var args = { 'Time spent': new TimeDuration(duration) };
+    var args = { 'Time spent': new ScalarNumeric(timeDurationInMs, duration) };
     args['ListView items ' + (hasInflation ? 'inflated' : 'rebound')] =
         events.length / 2;
     var eventInfo = hasInflation ? AndroidAuditor.ListViewInflateAlertInfo_ :
         AndroidAuditor.ListViewBindAlertInfo_;
     events.push(frame);
     return new Alert(eventInfo, start, events, args);
-  }
+  };
 
 
   AndroidAuditor.measureLayoutAlertInfo_ = new EventInfo(
@@ -270,8 +269,8 @@
     var start = Statistics.min(events, getStart);
     events.push(frame);
     return new Alert(AndroidAuditor.measureLayoutAlertInfo_, start, events,
-                     { 'Time spent': new TimeDuration(duration) });
-  }
+        { 'Time spent': new ScalarNumeric(timeDurationInMs, duration) });
+  };
 
 
   AndroidAuditor.viewDrawAlertInfo_ = new EventInfo(
@@ -294,9 +293,10 @@
     if (!slice || getCpuDuration(slice) < 3)
       return undefined;
     return new Alert(AndroidAuditor.viewDrawAlertInfo_, slice.start,
-                     [slice, frame],
-                     { 'Time spent': new TimeDuration(getCpuDuration(slice)) });
-  }
+        [slice, frame],
+        { 'Time spent': new ScalarNumeric(
+            timeDurationInMs, getCpuDuration(slice)) });
+  };
 
 
   //////////////////////////////////////////////////////////////////////////////
@@ -322,7 +322,8 @@
     var start = Statistics.min(events, getStart);
     events.push(frame);
     return new Alert(AndroidAuditor.blockingGcAlertInfo_, start, events,
-                     { 'Blocked duration': new TimeDuration(blockedDuration) });
+        { 'Blocked duration': new ScalarNumeric(
+            timeDurationInMs, blockedDuration) });
   };
 
 
@@ -341,7 +342,8 @@
     var start = Statistics.min(events, getStart);
     events.push(frame);
     return new Alert(AndroidAuditor.lockContentionAlertInfo_, start, events,
-                     { 'Blocked duration': new TimeDuration(blockedDuration) });
+        { 'Blocked duration': new ScalarNumeric(
+            timeDurationInMs, blockedDuration) });
   };
 
   AndroidAuditor.schedulingAlertInfo_ = new EventInfo(
@@ -373,7 +375,7 @@
         key = 'Not scheduled, but runnable';
       else if (key === SCHEDULING_STATE.UNINTR_SLEEP)
         key = 'Blocking I/O delay';
-      args[key] = new TimeDuration(value);
+      args[key] = new ScalarNumeric(timeDurationInMs, value);
     });
 
     return new Alert(AndroidAuditor.schedulingAlertInfo_, frame.start, [frame],
@@ -533,12 +535,11 @@
       });
 
       var mergerFunction = function(events) {
-        var ir = new InteractionRecord(
+        var ir = new tr.model.um.ResponseExpectation(
             this.model, 'Rendering',
-            ColorScheme.getColorIdForGeneralPurposeString('mt_rendering'),
             events[0].min,
             events[events.length - 1].max - events[0].min);
-        this.model.addInteractionRecord(ir);
+        this.model.userModel.expectations.push(ir);
       }.bind(this);
       tr.b.mergeRanges(tr.b.convertEventsToRanges(events), 30, mergerFunction);
     },
@@ -550,12 +551,11 @@
       });
 
       var mergerFunction = function(events) {
-        var ir = new InteractionRecord(
+        var ir = new tr.model.um.ResponseExpectation(
             this.model, 'Input',
-            ColorScheme.getColorIdForGeneralPurposeString('mt_input'),
             events[0].min,
             events[events.length - 1].max - events[0].min);
-        this.model.addInteractionRecord(ir);
+        this.model.userModel.expectations.push(ir);
       }.bind(this);
       var inputRanges = inputSamples.map(function(sample) {
         return tr.b.Range.fromExplicitRange(sample.timestamp, sample.timestamp);
@@ -722,7 +722,7 @@
         return expectedParentNames.some(function(name) {
           return name in parentNames;
         });
-      }
+      };
 
 
       // Set EventInfo on the slice if it matches title, and parent.
diff --git a/catapult/tracing/tracing/extras/android/android_auditor_test.html b/catapult/tracing/tracing/extras/android/android_auditor_test.html
index 475ffe2..9cecad7 100644
--- a/catapult/tracing/tracing/extras/android/android_auditor_test.html
+++ b/catapult/tracing/tracing/extras/android/android_auditor_test.html
@@ -6,9 +6,11 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/frame.html">
 <link rel="import" href="/tracing/extras/android/android_auditor.html">
 <link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
+<link rel="import" href="/tracing/model/frame.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -18,7 +20,8 @@
   var newSliceEx = tr.c.TestUtils.newSliceEx;
   var FRAME_PERF_CLASS = tr.model.FRAME_PERF_CLASS;
   var newThreadSlice = tr.c.TestUtils.newThreadSlice;
-  var TimeDuration = tr.b.u.TimeDuration;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var timeDurationInMs = tr.v.Unit.byName.timeDurationInMs;
 
   test('saveLayerAlert_badAlpha', function() {
     var model = tr.c.TestUtils.newModelWithAuditor(function(model) {
@@ -77,7 +80,8 @@
     assert.equal(model.alerts.length, 1);
 
     var alert = model.alerts[0];
-    assert.deepEqual(alert.args['Time spent'], new TimeDuration(9));
+    assert.deepEqual(alert.args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 9));
     assert.equal(alert.associatedEvents.length, 3);
   });
 
@@ -95,7 +99,8 @@
 
     var alert = model.alerts[0];
     assert.equal(alert.args['Pixels uploaded'], '1.00 million');
-    assert.deepEqual(alert.args['Time spent'], new TimeDuration(15));
+    assert.deepEqual(alert.args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 15));
     assert.equal(alert.associatedEvents.length, 2);
   });
 
@@ -134,12 +139,14 @@
     assert.equal(model.alerts.length, 2);
     var alert = model.alerts[0];
     assert.equal(alert.args['ListView items rebound'], 2);
-    assert.deepEqual(alert.args['Time spent'], new TimeDuration(20));
+    assert.deepEqual(alert.args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 20));
     assert.equal(alert.associatedEvents.length, 5);
 
     var alert = model.alerts[1];
     assert.equal(alert.args['ListView items inflated'], 1);
-    assert.deepEqual(alert.args['Time spent'], new TimeDuration(20));
+    assert.deepEqual(alert.args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 20));
     assert.equal(alert.associatedEvents.length, 3); // note: inflate not assoc.
   });
 
@@ -157,7 +164,8 @@
     assert.equal(model.alerts.length, 1);
 
     var alert = model.alerts[0];
-    assert.deepEqual(alert.args['Time spent'], new TimeDuration(10));
+    assert.deepEqual(alert.args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 10));
     assert.equal(alert.associatedEvents.length, 3);
   });
 
@@ -178,8 +186,10 @@
     }, tr.e.audits.AndroidAuditor);
 
     assert.equal(model.alerts.length, 2);
-    assert.deepEqual(model.alerts[0].args['Time spent'], new TimeDuration(10));
-    assert.deepEqual(model.alerts[1].args['Time spent'], new TimeDuration(10));
+    assert.deepEqual(model.alerts[0].args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 10));
+    assert.deepEqual(model.alerts[1].args['Time spent'],
+        new ScalarNumeric(timeDurationInMs, 10));
   });
 
   test('blockingGcAlert', function() {
@@ -199,9 +209,9 @@
 
     assert.equal(model.alerts.length, 2);
     assert.deepEqual(model.alerts[0].args['Blocked duration'],
-        new TimeDuration(15));
+        new ScalarNumeric(timeDurationInMs, 15));
     assert.deepEqual(model.alerts[1].args['Blocked duration'],
-        new TimeDuration(15));
+        new ScalarNumeric(timeDurationInMs, 15));
   });
 
   test('lockContentionAlert', function() {
@@ -216,7 +226,7 @@
 
     assert.equal(model.alerts.length, 1);
     assert.deepEqual(model.alerts[0].args['Blocked duration'],
-        new TimeDuration(15));
+        new ScalarNumeric(timeDurationInMs, 15));
   });
 
   test('schedulingAlerts', function() {
@@ -233,7 +243,7 @@
     var alert = model.alerts[0];
     assert.equal(alert.info.title, 'Scheduling delay');
     assert.deepEqual(alert.args['Not scheduled, but runnable'],
-        new TimeDuration(10));
+        new ScalarNumeric(timeDurationInMs, 10));
 
     model = tr.c.TestUtils.newModelWithAuditor(function(model) {
       var uiThread = model.getOrCreateProcess(1).getOrCreateThread(1);
@@ -247,7 +257,8 @@
     assert.equal(model.alerts.length, 1);
     var alert = model.alerts[0];
     assert.equal(alert.info.title, 'Scheduling delay');
-    assert.deepEqual(alert.args['Blocking I/O delay'], new TimeDuration(10));
+    assert.deepEqual(alert.args['Blocking I/O delay'],
+        new ScalarNumeric(timeDurationInMs, 10));
   });
 
   test('addFramesToModel', function() {
@@ -319,11 +330,11 @@
       var pushInfoSlice = function(slice) {
         eventsExpectingInfo.push(slice);
         uiThread.sliceGroup.pushSlice(slice);
-      }
+      };
       var pushNonInfoSlice = function(slice) {
         eventsNotExpectingInfo.push(slice);
         uiThread.sliceGroup.pushSlice(slice);
-      }
+      };
 
       pushInfoSlice(newSliceEx(
           {title: 'performTraversals', start: 0, duration: 10}));
diff --git a/catapult/tracing/tracing/extras/android/android_model_helper.html b/catapult/tracing/tracing/extras/android/android_model_helper.html
deleted file mode 100644
index 8e01caf..0000000
--- a/catapult/tracing/tracing/extras/android/android_model_helper.html
+++ /dev/null
@@ -1,97 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/core/auditor.html">
-<link rel="import" href="/tracing/extras/android/android_app.html">
-<link rel="import" href="/tracing/extras/android/android_surface_flinger.html">
-<link rel="import" href="/tracing/base/range_utils.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Class for managing android-specific model meta data,
- * such as rendering apps, frames rendered, and SurfaceFlinger.
- */
-tr.exportTo('tr.e.audits', function() {
-  var AndroidApp = tr.e.audits.AndroidApp;
-  var AndroidSurfaceFlinger = tr.e.audits.AndroidSurfaceFlinger;
-
-  var IMPORTANT_SURFACE_FLINGER_SLICES = {
-    'doComposition' : true,
-    'updateTexImage' : true,
-    'postFramebuffer' : true
-  };
-  var IMPORTANT_UI_THREAD_SLICES = {
-    'Choreographer#doFrame' : true,
-    'performTraversals' : true,
-    'deliverInputEvent' : true
-  };
-  var IMPORTANT_RENDER_THREAD_SLICES = {
-    'doFrame' : true
-  };
-
-  function iterateImportantThreadSlices(thread, important, callback) {
-    if (!thread)
-      return;
-
-    thread.sliceGroup.slices.forEach(function(slice) {
-      if (slice.title in important)
-        callback(slice);
-    });
-  }
-
-  /**
-   * Model for Android-specific data.
-   * @constructor
-   */
-  function AndroidModelHelper(model) {
-    this.model = model;
-    this.apps = [];
-    this.surfaceFlinger = undefined;
-
-    var processes = model.getAllProcesses();
-    for (var i = 0; i < processes.length && !this.surfaceFlinger; i++) {
-      this.surfaceFlinger =
-          AndroidSurfaceFlinger.createForProcessIfPossible(processes[i]);
-    }
-
-    model.getAllProcesses().forEach(function(process) {
-      var app = AndroidApp.createForProcessIfPossible(
-          process, this.surfaceFlinger);
-      if (app)
-        this.apps.push(app);
-    }, this);
-  };
-
-  AndroidModelHelper.prototype = {
-    iterateImportantSlices: function(callback) {
-      if (this.surfaceFlinger) {
-        iterateImportantThreadSlices(
-            this.surfaceFlinger.thread,
-            IMPORTANT_SURFACE_FLINGER_SLICES,
-            callback);
-      }
-
-      this.apps.forEach(function(app) {
-        iterateImportantThreadSlices(
-            app.uiThread,
-            IMPORTANT_UI_THREAD_SLICES,
-            callback);
-        iterateImportantThreadSlices(
-            app.renderThread,
-            IMPORTANT_RENDER_THREAD_SLICES,
-            callback);
-      });
-    }
-  };
-
-  return {
-    AndroidModelHelper: AndroidModelHelper
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/android/android_model_helper_test.html b/catapult/tracing/tracing/extras/android/android_model_helper_test.html
deleted file mode 100644
index 5b269dc..0000000
--- a/catapult/tracing/tracing/extras/android/android_model_helper_test.html
+++ /dev/null
@@ -1,227 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/android/android_auditor.html">
-<link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var AndroidModelHelper = tr.e.audits.AndroidModelHelper;
-  var newAsyncSliceNamed = tr.c.TestUtils.newAsyncSliceNamed;
-  var newSliceEx = tr.c.TestUtils.newSliceEx;
-  var newCounterNamed = tr.c.TestUtils.newCounterNamed;
-  var newCounterSeries = tr.c.TestUtils.newCounterSeries;
-
-  function createSurfaceFlingerWithVsyncs(model) {
-      if (model.getProcess(2))
-        throw new Error('process already exists');
-
-      var sfProcess = model.getOrCreateProcess(2);
-      var sfThread = sfProcess.getOrCreateThread(2); // main thread, tid = pid
-      sfThread.name = '/system/bin/surfaceflinger';
-
-      // ensure slicegroup has data
-      sfThread.sliceGroup.pushSlice(newSliceEx({
-        title: 'doComposition',
-        start: 8,
-        duration: 2
-      }));
-
-      var counter = sfProcess.getOrCreateCounter('android', 'VSYNC');
-      var series = newCounterSeries();
-      for (var i = 0; i <= 10; i++) {
-        series.addCounterSample(i * 10, i % 2);
-      }
-      counter.addSeries(series);
-  }
-
-  /*
-   * List of customizeModelCallbacks which produce different 80ms frames,
-   * each starting at 10ms, and with a single important slice
-   */
-  var SINGLE_FRAME_CUSTOM_MODELS = [
-    function(model) {
-      // UI thread only
-      var uiThread = model.getOrCreateProcess(120).getOrCreateThread(120);
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 10, duration: 80}));
-
-      model.uiThread = uiThread;
-    },
-
-    function(model) {
-      // RenderThread only
-      var renderThread = model.getOrCreateProcess(120).getOrCreateThread(200);
-      renderThread.name = 'RenderThread';
-      renderThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'doFrame', start: 10, duration: 80}));
-
-      model.renderThread = renderThread;
-    },
-
-    function(model) {
-      var uiThread = model.getOrCreateProcess(120).getOrCreateThread(120);
-
-      // UI thread time - 19 (from 10 to 29)
-      uiThread.asyncSliceGroup.push(
-        newAsyncSliceNamed('deliverInputEvent', 10, 9, uiThread, uiThread));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 20, duration: 10}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'draw', start: 20, duration: 8}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'Record View#draw()', start: 20, duration: 8}));
-
-      // RenderThread time - 61 (from 29 to 90)
-      var renderThread = model.getOrCreateProcess(120).getOrCreateThread(200);
-      renderThread.name = 'RenderThread';
-      renderThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'DrawFrame', start: 29, duration: 61}));
-      renderThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'syncFrameState', start: 29, duration: 1}));
-
-      model.uiThread = uiThread;
-      model.renderThread = renderThread;
-    }
-  ];
-
-  test('getThreads', function() {
-    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
-      var model = tr.c.TestUtils.newModel(customizeModelCallback);
-      var helper = new AndroidModelHelper(model);
-      assert.equal(helper.apps[0].uiThread, model.uiThread);
-      assert.equal(helper.apps[0].renderThread, model.renderThread);
-    });
-  });
-
-  test('iterateImportantSlices', function() {
-    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
-      var model = tr.c.TestUtils.newModel(customizeModelCallback);
-      var helper = new AndroidModelHelper(model);
-
-      var seen = 0;
-      helper.iterateImportantSlices(function(importantSlice) {
-        assert.isTrue(importantSlice instanceof tr.model.Slice);
-        seen++;
-      });
-      assert.equal(seen, 1);
-    });
-  });
-
-  test('getFrames', function() {
-    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
-      var model = tr.c.TestUtils.newModel(customizeModelCallback);
-      var helper = new AndroidModelHelper(model);
-      assert.equal(helper.apps.length, 1);
-
-      var frames = helper.apps[0].getFrames();
-      assert.equal(frames.length, 1);
-      assert.closeTo(frames[0].totalDuration, 80, 1e-5);
-
-      assert.closeTo(frames[0].start, 10, 1e-5);
-      assert.closeTo(frames[0].end, 90, 1e-5);
-    });
-  });
-
-  test('surfaceFlingerVsyncs', function() {
-    var model = tr.c.TestUtils.newModel(createSurfaceFlingerWithVsyncs);
-    var helper = new AndroidModelHelper(model);
-    assert.isTrue(helper.surfaceFlinger.hasVsyncs);
-
-    // test querying the vsyncs
-    assert.closeTo(helper.surfaceFlinger.getFrameKickoff(5), 0, 1e-5);
-    assert.closeTo(helper.surfaceFlinger.getFrameDeadline(95), 100, 1e-5);
-
-    assert.closeTo(helper.surfaceFlinger.getFrameKickoff(10), 10, 1e-5);
-    assert.closeTo(helper.surfaceFlinger.getFrameDeadline(90), 100, 1e-5);
-
-    // test undefined behavior outside of vsyncs.
-    assert.isUndefined(helper.surfaceFlinger.getFrameKickoff(-5));
-    assert.isUndefined(helper.surfaceFlinger.getFrameDeadline(105));
-  });
-
-  test('frameVsyncInterop', function() {
-    var model = tr.c.TestUtils.newModel(function(model) {
-      // app - 3 good, 3 bad frames
-      var uiThread = model.getOrCreateProcess(1).getOrCreateThread(1);
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 1, duration: 8}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 10, duration: 8}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 20, duration: 8}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 31, duration: 11}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 45, duration: 6}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 60, duration: 20}));
-
-      // surface flinger - vsync every 10ms
-      createSurfaceFlingerWithVsyncs(model);
-    });
-    var helper = new AndroidModelHelper(model);
-
-    var frames = helper.apps[0].getFrames();
-    assert.equal(frames.length, 6);
-    for (var i = 0; i < 6; i++) {
-      var shouldMissDeadline = i >= 3;
-      var missedDeadline = frames[i].args['deadline'] < frames[i].end;
-      assert.equal(shouldMissDeadline, missedDeadline);
-    }
-  });
-
-  test('appInputs', function() {
-    var model = tr.c.TestUtils.newModel(function(model) {
-      var process = model.getOrCreateProcess(120);
-      var uiThread = process.getOrCreateThread(120);
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 20, duration: 4}));
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 40, duration: 4}));
-
-      var counter = process.getOrCreateCounter('android', 'aq:pending:foo');
-      var series = newCounterSeries();
-      series.addCounterSample(10, 1);
-      series.addCounterSample(20, 0);
-      series.addCounterSample(30, 1);
-      series.addCounterSample(40, 2);
-      series.addCounterSample(50, 0);
-      counter.addSeries(series);
-    });
-    var helper = new AndroidModelHelper(model);
-    assert.equal(helper.apps.length, 1);
-
-    var inputSamples = helper.apps[0].getInputSamples();
-    assert.equal(inputSamples.length, 3);
-    assert.equal(inputSamples[0].timestamp, 10);
-    assert.equal(inputSamples[1].timestamp, 30);
-    assert.equal(inputSamples[2].timestamp, 40);
-  });
-
-  test('appAnimations', function() {
-    var model = tr.c.TestUtils.newModel(function(model) {
-      var process = model.getOrCreateProcess(120);
-      var uiThread = process.getOrCreateThread(120);
-      uiThread.sliceGroup.pushSlice(newSliceEx(
-          {title: 'performTraversals', start: 10, duration: 10}));
-      uiThread.asyncSliceGroup.push(newAsyncSliceNamed('animator:foo', 0, 10,
-                                                       uiThread, uiThread));
-    });
-    var helper = new AndroidModelHelper(model);
-    assert.equal(helper.apps.length, 1);
-
-    var animations = helper.apps[0].getAnimationAsyncSlices();
-    assert.equal(animations.length, 1);
-    assert.equal(animations[0].start, 0);
-    assert.equal(animations[0].end, 10);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/android/android_surface_flinger.html b/catapult/tracing/tracing/extras/android/android_surface_flinger.html
deleted file mode 100644
index 53393b7..0000000
--- a/catapult/tracing/tracing/extras/android/android_surface_flinger.html
+++ /dev/null
@@ -1,105 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/sorted_array_utils.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Class for representing SurfaceFlinger process and its Vsyncs.
- */
-tr.exportTo('tr.e.audits', function() {
-  var findLowIndexInSortedArray = tr.b.findLowIndexInSortedArray;
-
-  var VSYNC_SF_NAME = 'android.VSYNC-sf';
-  var VSYNC_APP_NAME = 'android.VSYNC-app';
-  var VSYNC_FALLBACK_NAME = 'android.VSYNC';
-
-  // when sampling vsync, push samples back by this much to ensure
-  // frame start samples *between* vsyncs
-  var TIMESTAMP_FUDGE_MS = 0.01;
-
-  function getVsyncTimestamps(process, counterName) {
-
-    var vsync = process.counters[counterName];
-    if (!vsync)
-      vsync = process.counters[VSYNC_FALLBACK_NAME];
-
-    if (vsync && vsync.numSeries == 1 && vsync.numSamples > 1)
-      return vsync.series[0].timestamps;
-    return undefined;
-  }
-
-  /**
-   * Model for SurfaceFlinger specific data.
-   * @constructor
-   */
-  function AndroidSurfaceFlinger(process, thread) {
-    this.process = process;
-    this.thread = thread;
-
-    this.appVsync_ = undefined;
-    this.sfVsync_ = undefined;
-
-    this.appVsyncTimestamps_ = getVsyncTimestamps(process, VSYNC_APP_NAME);
-    this.sfVsyncTimestamps_ = getVsyncTimestamps(process, VSYNC_SF_NAME);
-  };
-
-  AndroidSurfaceFlinger.createForProcessIfPossible = function(process) {
-    var mainThread = process.getThread(process.pid);
-
-    // newer versions - main thread, lowercase name, preceeding forward slash
-    if (mainThread && mainThread.name &&
-        /surfaceflinger/.test(mainThread.name))
-      return new AndroidSurfaceFlinger(process, mainThread);
-
-    // older versions - another thread is named SurfaceFlinger
-    var primaryThreads = process.findAllThreadsNamed('SurfaceFlinger');
-    if (primaryThreads.length == 1)
-      return new AndroidSurfaceFlinger(process, primaryThreads[0]);
-    return undefined;
-  };
-
-  AndroidSurfaceFlinger.prototype = {
-    get hasVsyncs() {
-      return !!this.appVsyncTimestamps_ && !!this.sfVsyncTimestamps_;
-    },
-
-    getFrameKickoff: function(timestamp) {
-      if (!this.hasVsyncs)
-        throw new Error('cannot query vsync info without vsyncs');
-
-      var firstGreaterIndex =
-          findLowIndexInSortedArray(this.appVsyncTimestamps_,
-                                    function(x) { return x; },
-                                    timestamp + TIMESTAMP_FUDGE_MS);
-
-      if (firstGreaterIndex < 1)
-        return undefined;
-      return this.appVsyncTimestamps_[firstGreaterIndex - 1];
-    },
-
-    getFrameDeadline: function(timestamp) {
-      if (!this.hasVsyncs)
-        throw new Error('cannot query vsync info without vsyncs');
-
-      var firstGreaterIndex =
-          findLowIndexInSortedArray(this.sfVsyncTimestamps_,
-                                    function(x) { return x; },
-                                    timestamp + TIMESTAMP_FUDGE_MS);
-      if (firstGreaterIndex >= this.sfVsyncTimestamps_.length)
-        return undefined;
-      return this.sfVsyncTimestamps_[firstGreaterIndex];
-    }
-  };
-
-  return {
-    AndroidSurfaceFlinger: AndroidSurfaceFlinger
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/cc/input_latency_async_slice.html b/catapult/tracing/tracing/extras/chrome/cc/input_latency_async_slice.html
index ce1e651..4487193 100644
--- a/catapult/tracing/tracing/extras/chrome/cc/input_latency_async_slice.html
+++ b/catapult/tracing/tracing/extras/chrome/cc/input_latency_async_slice.html
@@ -4,7 +4,7 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
 <link rel="import" href="/tracing/model/async_slice.html">
 <link rel="import" href="/tracing/model/event_set.html">
 
@@ -160,7 +160,9 @@
 
     getRendererHelper: function(sourceSlices) {
       var traceModel = this.startThread.parent.model;
-      if (!tr.e.audits.ChromeModelHelper.supportsModel(traceModel))
+      var modelHelper = traceModel.getOrCreateHelper(
+          tr.model.helpers.ChromeModelHelper);
+      if (!modelHelper)
         return undefined;
 
       var mainThread = undefined;
@@ -178,7 +180,6 @@
           break;
       }
 
-      var modelHelper = new tr.e.audits.ChromeModelHelper(traceModel);
       var rendererHelpers = modelHelper.rendererHelpers;
 
       var pids = Object.keys(rendererHelpers);
diff --git a/catapult/tracing/tracing/extras/chrome/cc/layer_tree_impl.html b/catapult/tracing/tracing/extras/chrome/cc/layer_tree_impl.html
index 1d341b4..ca98c01 100644
--- a/catapult/tracing/tracing/extras/chrome/cc/layer_tree_impl.html
+++ b/catapult/tracing/tracing/extras/chrome/cc/layer_tree_impl.html
@@ -5,7 +5,7 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
 <link rel="import" href="/tracing/extras/chrome/cc/constants.html">
 <link rel="import" href="/tracing/extras/chrome/cc/layer_impl.html">
 <link rel="import" href="/tracing/model/object_instance.html">
@@ -48,19 +48,16 @@
         this.tracedInputLatencies = [];
 
         var ownProcess = this.objectInstance.parent;
-        var model = ownProcess.model;
-        if (tr.e.audits.ChromeModelHelper.supportsModel(model))
-          this._initializeTracedInputLatencies(model);
+        var modelHelper = ownProcess.model.getOrCreateHelper(
+            tr.model.helpers.ChromeModelHelper);
+        if (modelHelper)
+          this._initializeTracedInputLatencies(modelHelper);
       }
     },
 
-    _initializeTracedInputLatencies: function(model) {
-      var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-      if (!modelHelper.browserHelper)
-        return;
-
+    _initializeTracedInputLatencies: function(modelHelper) {
       var latencyEvents = modelHelper.browserHelper.getLatencyEventsInRange(
-          model.bounds);
+          modelHelper.model.bounds);
 
       // Convert all ids to InputLatency Async objects.
       latencyEvents.forEach(function(event) {
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_auditor.html b/catapult/tracing/tracing/extras/chrome/chrome_auditor.html
index e7f0fd5..971505a 100644
--- a/catapult/tracing/tracing/extras/chrome/chrome_auditor.html
+++ b/catapult/tracing/tracing/extras/chrome/chrome_auditor.html
@@ -10,10 +10,10 @@
 <link rel="import" href="/tracing/core/auditor.html">
 <link rel="import"
     href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
 <link rel="import"
     href="/tracing/extras/chrome/chrome_user_friendly_category_driver.html">
-<link rel="import" href="/tracing/extras/rail/rail_ir_finder.html">
+<link rel="import" href="/tracing/model/constants.html">
 <link rel="import" href="/tracing/model/event_info.html">
 
 <script>
@@ -32,14 +32,11 @@
   function ChromeAuditor(model) {
     Auditor.call(this, model);
 
-    if (tr.e.audits.ChromeModelHelper.supportsModel(this.model)) {
-      var modelHelper = new tr.e.audits.ChromeModelHelper(this.model);
-
+    var modelHelper = this.model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    if (modelHelper && modelHelper.browserHelper) {
       // Must be a browserHelper in order to do audits.
-      if (modelHelper.browserHelper === undefined)
-        this.modelHelper = undefined;
-      else
-        this.modelHelper = modelHelper;
+      this.modelHelper = modelHelper;
     } else {
       this.modelHelper = undefined;
     }
@@ -72,26 +69,10 @@
       if (!this.modelHelper)
         return;
 
-      if (!tr.e.rail.RAILIRFinder.supportsModelHelper(this.modelHelper))
-        return;
-
-      var rirf = new tr.e.rail.RAILIRFinder(this.model, this.modelHelper);
-      var rirs = undefined;
-      try {
-        rirs = rirf.findAllInteractionRecords();
-        // There are not currently any known cases when this could throw.
-      } catch (error) {
-        this.model.importWarning({
-          type: 'RAILIRFinder',
-          message: error,
-          showToUser: true
-        });
-        return;
-      }
-
-      rirs.forEach(function(ir) {
-        this.model.addInteractionRecord(ir);
-      }, this);
+      this.model.replacePIDRefsInPatchups(
+          tr.model.BROWSER_PROCESS_PID_REF,
+          this.modelHelper.browserProcess.pid);
+      this.model.applyObjectRefPatchups();
     }
   };
 
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_auditor_test.html b/catapult/tracing/tracing/extras/chrome/chrome_auditor_test.html
index 6ac6c5a..c31231f 100644
--- a/catapult/tracing/tracing/extras/chrome/chrome_auditor_test.html
+++ b/catapult/tracing/tracing/extras/chrome/chrome_auditor_test.html
@@ -9,6 +9,7 @@
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/chrome/chrome_auditor.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/scoped_id.html">
 
 <script>
 'use strict';
@@ -16,21 +17,17 @@
 tr.b.unittest.testSuite(function() {
   var ColorScheme = tr.b.ColorScheme;
 
-  function createModelWithChromeAuditor(customizeModelCallback) {
-    return tr.c.TestUtils.newModelWithAuditor(function(m) {
-      m.browserProcess = m.getOrCreateProcess(1);
-      m.browserMain = m.browserProcess.getOrCreateThread(2);
-      m.browserMain.name = 'CrBrowserMain';
+  function createMainProcesses(m) {
+    m.browserProcess = m.getOrCreateProcess(1);
+    m.browserMain = m.browserProcess.getOrCreateThread(2);
+    m.browserMain.name = 'CrBrowserMain';
 
-      m.renderer1 = m.getOrCreateProcess(3);
-      m.renderer1Main = m.renderer1.getOrCreateThread(4);
-      m.renderer1Main.name = 'CrRendererMain';
+    m.renderer1 = m.getOrCreateProcess(3);
+    m.renderer1Main = m.renderer1.getOrCreateThread(4);
+    m.renderer1Main.name = 'CrRendererMain';
 
-      m.renderer1Compositor = m.renderer1.getOrCreateThread(4);
-      m.renderer1Compositor.name = 'Compositor';
-
-      customizeModelCallback(m);
-    }, tr.e.audits.ChromeAuditor);
+    m.renderer1Compositor = m.renderer1.getOrCreateThread(4);
+    m.renderer1Compositor.name = 'Compositor';
   }
 
   function newInputLatencyEvent(tsStart, tsEnd, opt_args) {
@@ -51,7 +48,8 @@
   }
 
   test('simple', function() {
-    var m = createModelWithChromeAuditor(function(m) {
+    tr.c.TestUtils.newModelWithAuditor(function(m) {
+      createMainProcesses(m);
       var bAsyncSlices = m.browserMain.asyncSliceGroup;
       bAsyncSlices.push(newInputLatencyEvent(100, 130));
       bAsyncSlices.push(newInputLatencyEvent(116, 150));
@@ -70,7 +68,41 @@
       rm1Slices.pushSlice(newImplRenderingStatsEvent(213));
       rm1Slices.pushSlice(newImplRenderingStatsEvent(230));
       rm1Slices.pushSlice(newImplRenderingStatsEvent(247));
+    }, tr.e.audits.ChromeAuditor);
+  });
+
+  test('refsToBrowser', function() {
+    var events = [
+      // An object created and snapshotted in the browser process.
+      {ts: 1000, pid: 1, tid: 2, ph: 'N', cat: 'c', id: '0x1000', name: 'a',
+       args: {}},
+      {ts: 1100, pid: 1, tid: 2, ph: 'O', cat: 'c', id: '0x1000', name: 'a',
+       args: {snapshot: {foo: 12345}}},
+      {ts: 1300, pid: 1, tid: 2, ph: 'D', cat: 'c', id: '0x1000', name: 'a',
+       args: {}},
+
+      // A reference to the object in the browser from the renderer process.
+      {ts: 1200, pid: 3, tid: 4, ph: 'X', cat: 'c', name: 'b', dur: 100,
+       args: {bar: {pid_ref: -1, id_ref: '0x1000'}}}
+    ];
+
+    var m = tr.c.TestUtils.newModelWithEvents([events], {
+      shiftWorldToZero: false,
+      pruneEmptyContainers: false,
+      customizeModelCallback: createMainProcesses,
+      auditorConstructors: [tr.e.audits.ChromeAuditor]
     });
+
+    var browserObject = m.browserProcess.objects.getObjectInstanceAt(
+        new tr.model.ScopedId('ptr', '0x1000'), 1.2);
+    assert.isDefined(browserObject);
+    var foo = browserObject.getSnapshotAt(1.2);
+    assert.isDefined(foo);
+
+    assert.equal(m.renderer1Main.sliceGroup.slices.length, 1);
+    var slice = m.renderer1Main.sliceGroup.slices[0];
+    assert.equal(slice.title, 'b');
+    assert.equal(slice.args.bar, foo);
   });
 });
 </script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_browser_helper.html b/catapult/tracing/tracing/extras/chrome/chrome_browser_helper.html
deleted file mode 100644
index e4f113f..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_browser_helper.html
+++ /dev/null
@@ -1,123 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/chrome/chrome_process_helper.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Utilities for accessing trace data about the Chrome browser.
- */
-tr.exportTo('tr.e.audits', function() {
-  function ChromeBrowserHelper(modelHelper, process) {
-    tr.e.audits.ChromeProcessHelper.call(this, modelHelper, process);
-    this.mainThread_ = process.findAtMostOneThreadNamed('CrBrowserMain');
-  }
-
-  ChromeBrowserHelper.isBrowserProcess = function(process) {
-    return !!process.findAtMostOneThreadNamed('CrBrowserMain');
-  };
-
-  ChromeBrowserHelper.prototype = {
-    __proto__: tr.e.audits.ChromeProcessHelper.prototype,
-
-    get rendererHelpers() {
-      return this.modelHelper.rendererHelpers;
-    },
-
-    getLoadingEventsInRange: function(rangeOfInterest) {
-      return this.getAllAsyncSlicesMatching(function(slice) {
-        return slice.title.indexOf('WebContentsImpl Loading') === 0 &&
-            rangeOfInterest.intersectsExplicitRangeInclusive(
-                slice.start, slice.end);
-      });
-    },
-
-    getCommitProvisionalLoadEventsInRange: function(rangeOfInterest) {
-      return this.getAllAsyncSlicesMatching(function(slice) {
-        return slice.title === 'RenderFrameImpl::didCommitProvisionalLoad' &&
-            rangeOfInterest.intersectsExplicitRangeInclusive(
-                slice.start, slice.end);
-      });
-    },
-
-    get hasLatencyEvents() {
-      var hasLatency = false;
-      this.modelHelper.model.getAllThreads().some(function(thread) {
-        thread.iterateAllEvents(function(event) {
-          if (!event.isTopLevel)
-            return;
-          if (!(event instanceof tr.e.cc.InputLatencyAsyncSlice))
-            return;
-          hasLatency = true;
-        });
-        return hasLatency;
-      });
-      return hasLatency;
-    },
-
-    getLatencyEventsInRange: function(rangeOfInterest) {
-      return this.getAllAsyncSlicesMatching(function(slice) {
-        return (slice.title.indexOf('InputLatency') === 0) &&
-            rangeOfInterest.intersectsExplicitRangeInclusive(
-                slice.start, slice.end);
-      });
-    },
-
-    getAllAsyncSlicesMatching: function(pred, opt_this) {
-      var events = [];
-      this.iterAllThreads(function(thread) {
-        thread.iterateAllEvents(function(slice) {
-          if (pred.call(opt_this, slice))
-            events.push(slice);
-        });
-      });
-      return events;
-    },
-
-    getAllNetworkEventsInRange: function(rangeOfInterest) {
-      var networkEvents = [];
-      this.modelHelper.model.getAllThreads().forEach(function(thread) {
-        thread.asyncSliceGroup.slices.forEach(function(slice) {
-          var match = false;
-          if (slice.category == 'net' ||  // old-style URLRequest/Resource
-              slice.category == 'disabled-by-default-netlog' ||
-              slice.category == 'netlog') {
-            match = true;
-          }
-
-          if (!match)
-            return;
-
-          if (rangeOfInterest.intersectsExplicitRangeInclusive(
-                slice.start, slice.end))
-            networkEvents.push(slice);
-        });
-      });
-      return networkEvents;
-    },
-
-    iterAllThreads: function(func, opt_this) {
-      tr.b.iterItems(this.process.threads, function(tid, thread) {
-        func.call(opt_this, thread);
-      });
-
-      tr.b.iterItems(this.rendererHelpers, function(pid, rendererHelper) {
-        var rendererProcess = rendererHelper.process;
-        tr.b.iterItems(rendererProcess.threads, function(tid, thread) {
-          func.call(opt_this, thread);
-        });
-      }, this);
-    }
-  };
-
-  return {
-    ChromeBrowserHelper: ChromeBrowserHelper
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_browser_helper_test.html b/catapult/tracing/tracing/extras/chrome/chrome_browser_helper_test.html
deleted file mode 100644
index c10f85e..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_browser_helper_test.html
+++ /dev/null
@@ -1,63 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_browser_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
-<link rel="import" href="/tracing/model/model.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
-
-  function getRange(min, max) {
-    var range = new tr.b.Range();
-    range.min = min;
-    range.max = max;
-    return range;
-  }
-
-  test('LoadingEvent', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
-    var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-    tr.e.chrome.ChromeTestUtils.addLoadingEvent(model, {start: 1, end: 10});
-    assert.equal(1, modelHelper.browserHelper.getLoadingEventsInRange(
-      getRange(0, 100)).length);
-  });
-
-  test('ProvisionalLoadEvent', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
-    var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-    tr.e.chrome.ChromeTestUtils.addCommitLoadEvent(model, {start: 1, end: 10});
-    assert.equal(1,
-      modelHelper.browserHelper.getCommitProvisionalLoadEventsInRange(
-        getRange(0, 100)).length);
-  });
-
-  test('LatencyEvent', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
-    var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-    tr.e.chrome.ChromeTestUtils.addInputEvent(
-        model, INPUT_TYPE.UNKNOWN, {start: 1, end: 10});
-    assert.equal(1, modelHelper.browserHelper.getLatencyEventsInRange(
-      getRange(0, 100)).length);
-  });
-
-  test('NetworkEvent', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
-    var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-    tr.e.chrome.ChromeTestUtils.addNetworkEvent(model, {start: 1, end: 10});
-    assert.equal(1, modelHelper.browserHelper.getAllNetworkEventsInRange(
-      getRange(0, 100)).length);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_gpu_helper.html b/catapult/tracing/tracing/extras/chrome/chrome_gpu_helper.html
deleted file mode 100644
index b95c6f0..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_gpu_helper.html
+++ /dev/null
@@ -1,41 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/extras/chrome/chrome_process_helper.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Utilities for accessing the Chrome GPU Process.
- */
-tr.exportTo('tr.e.audits', function() {
-  function ChromeGpuHelper(modelHelper, process) {
-    tr.e.audits.ChromeProcessHelper.call(this, modelHelper, process);
-    this.mainThread_ = process.findAtMostOneThreadNamed('CrGpuMain');
-  };
-
-  ChromeGpuHelper.isGpuProcess = function(process) {
-    // In some android builds the GPU thread is not in a separate process.
-    if (process.findAtMostOneThreadNamed('CrBrowserMain') ||
-        process.findAtMostOneThreadNamed('CrRendererMain'))
-      return false;
-    return process.findAtMostOneThreadNamed('CrGpuMain');
-  };
-
-  ChromeGpuHelper.prototype = {
-    __proto__: tr.e.audits.ChromeProcessHelper.prototype,
-
-    get mainThread() {
-      return this.mainThread_;
-    }
-  };
-
-  return {
-    ChromeGpuHelper: ChromeGpuHelper
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_model_helper.html b/catapult/tracing/tracing/extras/chrome/chrome_model_helper.html
deleted file mode 100644
index 5acb184..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_model_helper.html
+++ /dev/null
@@ -1,129 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_browser_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_gpu_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_renderer_helper.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Utilities for accessing trace data about the Chrome browser.
- */
-tr.exportTo('tr.e.audits', function() {
-  function findChromeBrowserProcess(model) {
-    var browserProcesses = [];
-    model.getAllProcesses().forEach(function(process) {
-      if (!tr.e.audits.ChromeBrowserHelper.isBrowserProcess(process))
-        return;
-      browserProcesses.push(process);
-    }, this);
-    if (browserProcesses.length === 0)
-      return undefined;
-    if (browserProcesses.length > 1)
-      return undefined;
-    return browserProcesses[0];
-  }
-
-  function findChromeRenderProcesses(model) {
-    var rendererProcesses = [];
-    model.getAllProcesses().forEach(function(process) {
-      if (!tr.e.audits.ChromeRendererHelper.isRenderProcess(process))
-        return;
-      rendererProcesses.push(process);
-    });
-    return rendererProcesses;
-  }
-
-  function findChromeGpuProcess(model) {
-    var gpuProcesses = model.getAllProcesses().filter(
-      tr.e.audits.ChromeGpuHelper.isGpuProcess);
-    if (gpuProcesses.length != 1)
-      return undefined;
-    return gpuProcesses[0];
-  }
-
-  /**
-   * @constructor
-   */
-  function ChromeModelHelper(model) {
-    this.model_ = model;
-
-    // Find browserHelper.
-    this.browserProcess_ = findChromeBrowserProcess(model);
-    if (this.browserProcess_) {
-      this.browserHelper_ = new tr.e.audits.ChromeBrowserHelper(
-          this, this.browserProcess_);
-    } else {
-      this.browserHelper_ = undefined;
-    }
-
-    // Find gpuHelper.
-    var gpuProcess = findChromeGpuProcess(model);
-    if (gpuProcess) {
-      this.gpuHelper_ = new tr.e.audits.ChromeGpuHelper(
-          this, gpuProcess);
-    } else {
-      this.gpuHelper_ = undefined;
-    }
-
-    // Find rendererHelpers.
-    var rendererProcesses_ = findChromeRenderProcesses(model);
-
-    this.rendererHelpers_ = {};
-    rendererProcesses_.forEach(function(renderProcess) {
-      var rendererHelper = new tr.e.audits.ChromeRendererHelper(
-        this, renderProcess);
-      this.rendererHelpers_[rendererHelper.pid] = rendererHelper;
-    }, this);
-  }
-
-  ChromeModelHelper.supportsModel = function(model) {
-    if (findChromeBrowserProcess(model) !== undefined)
-      return true;
-    if (findChromeRenderProcesses(model).length)
-      return true;
-    return false;
-  }
-
-  ChromeModelHelper.prototype = {
-    get pid() {
-      throw new Error('woah');
-    },
-
-    get process() {
-      throw new Error('woah');
-    },
-
-    get model() {
-      return this.model_;
-    },
-
-    get browserProcess() {
-      return this.browserProcess_;
-    },
-
-    get browserHelper() {
-      return this.browserHelper_;
-    },
-
-    get gpuHelper() {
-      return this.gpuHelper_;
-    },
-
-    get rendererHelpers() {
-      return this.rendererHelpers_;
-    }
-  };
-
-  return {
-    ChromeModelHelper: ChromeModelHelper
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_model_helper_test.html b/catapult/tracing/tracing/extras/chrome/chrome_model_helper_test.html
deleted file mode 100644
index 32149ed..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_model_helper_test.html
+++ /dev/null
@@ -1,136 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/model.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var newAsyncSliceEx = tr.c.TestUtils.newAsyncSliceEx;
-
-  test('getLatencyData', function() {
-    var m = tr.e.chrome.ChromeTestUtils.newChromeModel(function(m) {
-      m.browserMain.asyncSliceGroup.push(newAsyncSliceEx({
-        title: 'InputLatency::GestureScrollUpdate',
-        cat: 'benchmark',
-        start: 0,
-        end: 10,
-        id: '0x100',
-        isTopLevel: true,
-        args: {
-          data: {
-            INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT: {'time' : 0},
-            INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT: {time: 10}
-          }
-        }
-      }));
-    });
-
-    var modelHelper = new tr.e.audits.ChromeModelHelper(m);
-    var latencyEvents = modelHelper.browserHelper.getLatencyEventsInRange(
-      m.bounds);
-    assert.equal(latencyEvents.length, 1);
-  });
-
-  test('getFrametime', function() {
-    var frame_ts;
-    var events = [];
-    // Browser process 3507
-    events.push({'cat' : '__metadata', 'pid' : 3507, 'tid' : 3507, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrBrowserMain'}}); // @suppress longLineCheck
-
-    // Renderer process 3508
-    events.push({'cat' : '__metadata', 'pid' : 3508, 'tid' : 3508, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrRendererMain'}}); // @suppress longLineCheck
-    // Compositor thread 3510
-    events.push({'cat' : '__metadata', 'pid' : 3508, 'tid' : 3510, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'Compositor'}}); // @suppress longLineCheck
-
-    // Renderer process 3509
-    events.push({'cat' : '__metadata', 'pid' : 3509, 'tid' : 3509, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrRendererMain'}}); // @suppress longLineCheck
-
-    // Compositor thread 3511
-    events.push({'cat' : '__metadata', 'pid' : 3509, 'tid' : 3511, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'Compositor'}}); // @suppress longLineCheck
-
-    frame_ts = 0;
-    // Add impl rendering stats for browser process 3507
-    for (var i = 0; i < 10; i++) {
-      events.push({'cat' : 'benchmark', 'pid' : 3507, 'tid' : 3507, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::ImplThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
-      frame_ts += 16000 + 1000 * (i % 2);
-    }
-
-    frame_ts = 0;
-    // Add main rendering stats for renderer process 3508
-    for (var i = 0; i < 10; i++) {
-      events.push({'cat' : 'benchmark', 'pid' : 3508, 'tid' : 3508, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::MainThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
-      frame_ts += 16000 + 1000 * (i % 2);
-    }
-    events.push({'cat' : 'benchmark', 'pid' : 3508, 'tid' : 3510, 'ts' : 1600, 'ph' : 'i', 'name' : 'KeepAlive', 's' : 't'}); // @suppress longLineCheck
-
-    frame_ts = 0;
-    // Add impl and main rendering stats for renderer process 3509
-    for (var i = 0; i < 10; i++) {
-      events.push({'cat' : 'benchmark', 'pid' : 3509, 'tid' : 3511, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::ImplThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
-      events.push({'cat' : 'benchmark', 'pid' : 3509, 'tid' : 3509, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::MainThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
-      frame_ts += 16000 + 1000 * (i % 2);
-    }
-
-    var m = tr.c.TestUtils.newModelWithEvents([events]);
-    var modelHelper = new tr.e.audits.ChromeModelHelper(m);
-
-    // Testing browser impl and main rendering stats.
-    var frameEvents = modelHelper.browserHelper.getFrameEventsInRange(
-        tr.e.audits.IMPL_FRAMETIME_TYPE, m.bounds);
-    var frametimeData = tr.e.audits.getFrametimeDataFromEvents(frameEvents);
-    assert.equal(frametimeData.length, 9);
-    for (var i = 0; i < frametimeData.length; i++) {
-      assert.equal(frametimeData[i].frametime, 16 + i % 2);
-    }
-    // No main rendering stats.
-    frameEvents = modelHelper.browserHelper.getFrameEventsInRange(
-        tr.e.audits.MAIN_FRAMETIME_TYPE, m.bounds);
-    assert.equal(frameEvents.length, 0);
-
-
-    // Testing renderer 3508 impl and main rendering stats.
-    frameEvents = modelHelper.rendererHelpers[3508].getFrameEventsInRange(
-        tr.e.audits.MAIN_FRAMETIME_TYPE, m.bounds);
-    frametimeData = tr.e.audits.getFrametimeDataFromEvents(frameEvents);
-    assert.equal(frametimeData.length, 9);
-    for (var i = 0; i < frametimeData.length; i++) {
-      assert.equal(frametimeData[i].frametime, 16 + i % 2);
-    }
-
-    // No impl rendering stats.
-    frameEvents = modelHelper.rendererHelpers[3508].getFrameEventsInRange(
-        tr.e.audits.IMPL_FRAMETIME_TYPE, m.bounds);
-    assert.equal(frameEvents.length, 0);
-
-
-    // Testing renderer 3509 impl and main rendering stats.
-    frameEvents = modelHelper.rendererHelpers[3509].getFrameEventsInRange(
-        tr.e.audits.IMPL_FRAMETIME_TYPE, m.bounds);
-    frametimeData = tr.e.audits.getFrametimeDataFromEvents(frameEvents);
-    assert.equal(frametimeData.length, 9);
-    for (var i = 0; i < frametimeData.length; i++) {
-      assert.equal(frametimeData[i].frametime, 16 + i % 2);
-    }
-
-    frameEvents = modelHelper.rendererHelpers[3509].getFrameEventsInRange(
-        tr.e.audits.MAIN_FRAMETIME_TYPE, m.bounds);
-    frametimeData = tr.e.audits.getFrametimeDataFromEvents(frameEvents);
-    assert.equal(frametimeData.length, 9);
-    for (var i = 0; i < frametimeData.length; i++) {
-      assert.equal(frametimeData[i].frametime, 16 + i % 2);
-    }
-
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_process_helper.html b/catapult/tracing/tracing/extras/chrome/chrome_process_helper.html
deleted file mode 100644
index 2af74ea..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_process_helper.html
+++ /dev/null
@@ -1,91 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Utilities for accessing trace data about the Chrome browser.
- */
-tr.exportTo('tr.e.audits', function() {
-  var MAIN_FRAMETIME_TYPE = 'main_frametime_type';
-  var IMPL_FRAMETIME_TYPE = 'impl_frametime_type';
-
-  var MAIN_RENDERING_STATS =
-      'BenchmarkInstrumentation::MainThreadRenderingStats';
-  var IMPL_RENDERING_STATS =
-      'BenchmarkInstrumentation::ImplThreadRenderingStats';
-
-
-  function getSlicesIntersectingRange(rangeOfInterest, slices) {
-    var slicesInFilterRange = [];
-    for (var i = 0; i < slices.length; i++) {
-      var slice = slices[i];
-      if (rangeOfInterest.intersectsExplicitRangeInclusive(
-            slice.start, slice.end))
-        slicesInFilterRange.push(slice);
-    }
-    return slicesInFilterRange;
-  }
-
-
-  function ChromeProcessHelper(modelHelper, process) {
-    this.modelHelper = modelHelper;
-    this.process = process;
-  }
-
-  ChromeProcessHelper.prototype = {
-    get pid() {
-      return this.process.pid;
-    },
-
-    getFrameEventsInRange: function(frametimeType, range) {
-      var titleToGet;
-      if (frametimeType == MAIN_FRAMETIME_TYPE)
-        titleToGet = MAIN_RENDERING_STATS;
-      else
-        titleToGet = IMPL_RENDERING_STATS;
-
-      var frameEvents = [];
-      this.process.iterateAllEvents(function(event) {
-        if (event.title !== titleToGet)
-          return;
-        if (range.intersectsExplicitRangeInclusive(event.start, event.end))
-          frameEvents.push(event);
-      });
-
-      frameEvents.sort(function(a, b) {return a.start - b.start});
-      return frameEvents;
-    }
-  };
-
-  function getFrametimeDataFromEvents(frameEvents) {
-    var frametimeData = [];
-    for (var i = 1; i < frameEvents.length; i++) {
-      var diff = frameEvents[i].start - frameEvents[i - 1].start;
-      frametimeData.push({
-        'x': frameEvents[i].start,
-        'frametime': diff
-      });
-    }
-    return frametimeData;
-  }
-
-  return {
-    ChromeProcessHelper: ChromeProcessHelper,
-
-    MAIN_FRAMETIME_TYPE: MAIN_FRAMETIME_TYPE,
-    IMPL_FRAMETIME_TYPE: IMPL_FRAMETIME_TYPE,
-    MAIN_RENDERING_STATS: MAIN_RENDERING_STATS,
-    IMPL_RENDERING_STATS: IMPL_RENDERING_STATS,
-
-    getSlicesIntersectingRange: getSlicesIntersectingRange,
-    getFrametimeDataFromEvents: getFrametimeDataFromEvents
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_renderer_helper.html b/catapult/tracing/tracing/extras/chrome/chrome_renderer_helper.html
deleted file mode 100644
index fdb03fb..0000000
--- a/catapult/tracing/tracing/extras/chrome/chrome_renderer_helper.html
+++ /dev/null
@@ -1,59 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/extras/chrome/chrome_process_helper.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Utilities for accessing trace data about the Chrome browser.
- */
-tr.exportTo('tr.e.audits', function() {
-  function ChromeRendererHelper(modelHelper, process) {
-    tr.e.audits.ChromeProcessHelper.call(this, modelHelper, process);
-    this.mainThread_ = process.findAtMostOneThreadNamed('CrRendererMain');
-    this.compositorThread_ = process.findAtMostOneThreadNamed('Compositor');
-    this.rasterWorkerThreads_ = process.findAllThreadsMatching(function(t) {
-      if (t.name === undefined)
-        return false;
-      if (t.name.indexOf('CompositorTileWorker') === 0)
-        return true;
-      if (t.name.indexOf('CompositorRasterWorker') === 0)
-        return true;
-      return false;
-    });
-  };
-
-  ChromeRendererHelper.isRenderProcess = function(process) {
-    if (!process.findAtMostOneThreadNamed('CrRendererMain'))
-      return false;
-    if (!process.findAtMostOneThreadNamed('Compositor'))
-      return false;
-    return true;
-  };
-
-  ChromeRendererHelper.prototype = {
-    __proto__: tr.e.audits.ChromeProcessHelper.prototype,
-
-    get mainThread() {
-      return this.mainThread_;
-    },
-
-    get compositorThread() {
-      return this.compositorThread_;
-    },
-
-    get rasterWorkerThreads() {
-      return this.rasterWorkerThreads_;
-    }
-  };
-
-  return {
-    ChromeRendererHelper: ChromeRendererHelper
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_test_utils.html b/catapult/tracing/tracing/extras/chrome/chrome_test_utils.html
index 7edcdab..5ff273d 100644
--- a/catapult/tracing/tracing/extras/chrome/chrome_test_utils.html
+++ b/catapult/tracing/tracing/extras/chrome/chrome_test_utils.html
@@ -6,7 +6,6 @@
 -->
 <link rel="import" href="/tracing/base/base.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_process_helper.html">
 <link rel="import" href="/tracing/model/model.html">
 
 <script>
@@ -37,13 +36,27 @@
 
       customizeModelCallback(model);
     });
-  }
+  };
 
   ChromeTestUtils.addEvent = function(thread, dict) {
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     thread.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
+
+  ChromeTestUtils.addNavigationStartEvent = function(model, dict) {
+    dict.title = 'NavigationTiming navigationStart';
+    var event = tr.c.TestUtils.newInstantEvent(dict);
+    model.instantEvents.push(event);
+    return event;
+  };
+
+  ChromeTestUtils.addFirstContentfulPaintEvent = function(model, dict) {
+    dict.title = 'firstContentfulPaint';
+    var event = tr.c.TestUtils.newInstantEvent(dict);
+    model.instantEvents.push(event);
+    return event;
+  };
 
   ChromeTestUtils.addInputEvent = function(model, typeName, dict) {
     dict.title = 'InputLatency::' + typeName;
@@ -52,14 +65,14 @@
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.browserMain.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addFlingAnimationEvent = function(model, dict) {
     dict.title = 'InputHandlerProxy::HandleGestureFling::started';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererCompositor.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addRenderingEvent = function(model, dict) {
     dict.title = dict.title || 'DummyEvent';
@@ -67,22 +80,22 @@
     var slice = tr.c.TestUtils.newSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addFrameEvent = function(model, dict) {
-    dict.title = tr.e.audits.IMPL_RENDERING_STATS;
+    dict.title = tr.model.helpers.IMPL_RENDERING_STATS;
     dict.type = tr.model.ThreadSlice;
     var slice = tr.c.TestUtils.newSliceEx(dict);
     model.rendererMain.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addLoadingEvent = function(model, dict) {
     dict.title = 'WebContentsImpl Loading';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererMain.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addNetworkEvent = function(model, dict) {
     dict.cat = 'netlog';
@@ -90,56 +103,56 @@
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.browserMain.asyncSliceGroup.push(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addCommitLoadEvent = function(model, dict) {
     dict.title = 'RenderFrameImpl::didCommitProvisionalLoad';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addCreateChildFrameEvent = function(model, dict) {
     dict.title = 'RenderFrameImpl::createChildFrame';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addStartProvisionalLoadEvent = function(model, dict) {
     dict.title = 'RenderFrameImpl::didStartProvisionalLoad';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addFailProvisionalLoadEvent = function(model, dict) {
     dict.title = 'RenderFrameImpl::didFailProvisionalLoad';
     var slice = tr.c.TestUtils.newAsyncSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addFinishLoadEvent = function(model, dict) {
     dict.title = 'RenderFrameImpl::didFinishLoad';
     var slice = tr.c.TestUtils.newSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addLoadFinishedEvent = function(model, dict) {
     dict.title = 'LoadFinished';
     var slice = tr.c.TestUtils.newSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   ChromeTestUtils.addCreateThreadsEvent = function(model, dict) {
     dict.title = 'BrowserMainLoop::CreateThreads';
     var slice = tr.c.TestUtils.newSliceEx(dict);
     model.rendererMain.sliceGroup.pushSlice(slice);
     return slice;
-  }
+  };
 
   return {
     ChromeTestUtils: ChromeTestUtils
diff --git a/catapult/tracing/tracing/extras/chrome/chrome_user_friendly_category_driver.html b/catapult/tracing/tracing/extras/chrome/chrome_user_friendly_category_driver.html
index 5dc38b0..800f92c 100644
--- a/catapult/tracing/tracing/extras/chrome/chrome_user_friendly_category_driver.html
+++ b/catapult/tracing/tracing/extras/chrome/chrome_user_friendly_category_driver.html
@@ -131,6 +131,16 @@
       'v8.compile'
     ],
 
+    script_parse: [
+      'V8Test.ParseScript',
+      'V8Test.ParseFunction',
+    ],
+
+    script_compile: [
+      'V8Test.Compile',
+      'V8Test.CompileFullCode',
+    ],
+
     resource_loading: [
       'ResourceFetcher::requestResource',
       'ResourceDispatcher::OnReceivedData',
diff --git a/catapult/tracing/tracing/extras/chrome/frame_tree_node.html b/catapult/tracing/tracing/extras/chrome/frame_tree_node.html
new file mode 100644
index 0000000..dde8fc2
--- /dev/null
+++ b/catapult/tracing/tracing/extras/chrome/frame_tree_node.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/object_instance.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.e.chrome', function() {
+  var constants = tr.e.cc.constants;
+
+  var ObjectSnapshot = tr.model.ObjectSnapshot;
+  var ObjectInstance = tr.model.ObjectInstance;
+
+  function FrameTreeNodeSnapshot() {
+    ObjectSnapshot.apply(this, arguments);
+  }
+
+  FrameTreeNodeSnapshot.prototype = {
+    __proto__: ObjectSnapshot.prototype,
+
+    preInitialize: function() {
+    },
+
+    initialize: function() {
+    },
+
+    get userFriendlyName() {
+      return 'FrameTreeNode';
+    }
+  };
+
+  ObjectSnapshot.register(
+      FrameTreeNodeSnapshot,
+      {typeName: 'FrameTreeNode'});
+
+  function FrameTreeNodeInstance() {
+    ObjectInstance.apply(this, arguments);
+  }
+
+  FrameTreeNodeInstance.prototype = {
+    __proto__: ObjectInstance.prototype
+  };
+
+  ObjectInstance.register(
+      FrameTreeNodeInstance,
+      {typeName: 'FrameTreeNode'});
+
+  return {
+    FrameTreeNodeSnapshot: FrameTreeNodeSnapshot,
+    FrameTreeNodeInstance: FrameTreeNodeInstance
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/extras/chrome/layout_object.html b/catapult/tracing/tracing/extras/chrome/layout_object.html
index 191f8e8..1403ff1 100644
--- a/catapult/tracing/tracing/extras/chrome/layout_object.html
+++ b/catapult/tracing/tracing/extras/chrome/layout_object.html
@@ -10,25 +10,66 @@
 <script>
 'use strict';
 
-/**
- * @fileoverview Provides the LayoutObject class.
- */
 tr.exportTo('tr.e.chrome', function() {
   var KNOWN_PROPERTIES = {
+    absX: 1,
+    absY: 1,
+    address: 1,
+    anonymous: 1,
+    childNeeds: 1,
     children: 1,
+    classNames: 1,
+    col: 1,
+    colSpan: 1,
+    float: 1,
+    height: 1,
+    htmlId: 1,
     name: 1,
-    address: 1
+    posChildNeeds: 1,
+    positioned: 1,
+    positionedMovement: 1,
+    relX: 1,
+    relY: 1,
+    relativePositioned: 1,
+    row: 1,
+    rowSpan: 1,
+    selfNeeds: 1,
+    stickyPositioned: 1,
+    tag: 1,
+    width: 1
   };
 
-  /**
-   * @constructor
-   */
   function LayoutObject(snapshot, args) {
     this.snapshot_ = snapshot;
     this.id_ = args.address;
     this.name_ = args.name;
     this.childLayoutObjects_ = [];
     this.otherProperties_ = {};
+    this.tag_ = args.tag;
+    this.relativeRect_ = tr.b.Rect.fromXYWH(
+        args.relX, args.relY, args.width, args.height);
+    this.absoluteRect_ = tr.b.Rect.fromXYWH(
+        args.absX, args.absY, args.width, args.height);
+    this.isFloat_ = args.float;
+    this.isStickyPositioned_ = args.stickyPositioned;
+    this.isPositioned_ = args.positioned;
+    this.isRelativePositioned_ = args.relativePositioned;
+    this.isAnonymous_ = args.anonymous;
+    this.htmlId_ = args.htmlId;
+    this.classNames_ = args.classNames;
+    this.needsLayoutReasons_ = [];
+    if (args.selfNeeds)
+      this.needsLayoutReasons_.push('self');
+    if (args.childNeeds)
+      this.needsLayoutReasons_.push('child');
+    if (args.posChildNeeds)
+      this.needsLayoutReasons_.push('positionedChild');
+    if (args.positionedMovement)
+      this.needsLayoutReasons_.push('positionedMovement');
+    this.tableRow_ = args.row;
+    this.tableCol_ = args.col;
+    this.tableRowSpan_ = args.rowSpan;
+    this.tableColSpan_ = args.colSpan;
 
     if (args.children) {
       args.children.forEach(function(child) {
@@ -55,6 +96,66 @@
       return this.name_;
     },
 
+    get tag() {
+      return this.tag_;
+    },
+
+    get relativeRect() {
+      return this.relativeRect_;
+    },
+
+    get absoluteRect() {
+      return this.absoluteRect_;
+    },
+
+    get isPositioned() {
+      return this.isPositioned_;
+    },
+
+    get isFloat() {
+      return this.isFloat_;
+    },
+
+    get isStickyPositioned() {
+      return this.isStickyPositioned_;
+    },
+
+    get isRelativePositioned() {
+      return this.isRelativePositioned_;
+    },
+
+    get isAnonymous() {
+      return this.isAnonymous_;
+    },
+
+    get tableRow() {
+      return this.tableRow_;
+    },
+
+    get tableCol() {
+      return this.tableCol_;
+    },
+
+    get tableRowSpan() {
+      return this.tableRowSpan_;
+    },
+
+    get tableColSpan() {
+      return this.tableColSpan_;
+    },
+
+    get htmlId() {
+      return this.htmlId_;
+    },
+
+    get classNames() {
+      return this.classNames_;
+    },
+
+    get needsLayoutReasons() {
+      return this.needsLayoutReasons_;
+    },
+
     get hasChildLayoutObjects() {
       return this.childLayoutObjects_.length > 0;
     },
diff --git a/catapult/tracing/tracing/extras/chrome/layout_tree.html b/catapult/tracing/tracing/extras/chrome/layout_tree.html
new file mode 100644
index 0000000..98e8f14
--- /dev/null
+++ b/catapult/tracing/tracing/extras/chrome/layout_tree.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/event_registry.html">
+<link rel="import" href="/tracing/model/object_instance.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.e.chrome', function() {
+  var ObjectSnapshot = tr.model.ObjectSnapshot;
+  var ObjectInstance = tr.model.ObjectInstance;
+
+  function LayoutTreeInstance() {
+    ObjectInstance.apply(this, arguments);
+  }
+
+  LayoutTreeInstance.prototype = {
+    __proto__: ObjectInstance.prototype,
+  };
+
+  ObjectInstance.register(LayoutTreeInstance, {typeName: 'LayoutTree'});
+
+  function LayoutTreeSnapshot() {
+    ObjectSnapshot.apply(this, arguments);
+    this.rootLayoutObject = new tr.e.chrome.LayoutObject(this, this.args);
+  }
+
+  LayoutTreeSnapshot.prototype = {
+    __proto__: ObjectSnapshot.prototype,
+  };
+
+  ObjectSnapshot.register(LayoutTreeSnapshot, {typeName: 'LayoutTree'});
+
+  tr.model.EventRegistry.register(
+      LayoutTreeSnapshot,
+      {
+        name: 'layoutTree',
+        pluralName: 'layoutTrees',
+        singleViewElementName: 'tr-ui-a-layout-tree-sub-view',
+        multiViewElementName: 'tr-ui-a-layout-tree-sub-view'
+      });
+
+  return {
+    LayoutTreeInstance: LayoutTreeInstance,
+    LayoutTreeSnapshot: LayoutTreeSnapshot
+  };
+});
+</script>
+
diff --git a/catapult/tracing/tracing/extras/chrome/render_frame.html b/catapult/tracing/tracing/extras/chrome/render_frame.html
new file mode 100644
index 0000000..289477d
--- /dev/null
+++ b/catapult/tracing/tracing/extras/chrome/render_frame.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/object_instance.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.e.chrome', function() {
+  var constants = tr.e.cc.constants;
+
+  var ObjectSnapshot = tr.model.ObjectSnapshot;
+  var ObjectInstance = tr.model.ObjectInstance;
+
+  function RenderFrameSnapshot() {
+    ObjectSnapshot.apply(this, arguments);
+  }
+
+  RenderFrameSnapshot.prototype = {
+    __proto__: ObjectSnapshot.prototype,
+
+    preInitialize: function() {
+    },
+
+    initialize: function() {
+    },
+
+    get userFriendlyName() {
+      return 'RenderFrame';
+    }
+  };
+
+  ObjectSnapshot.register(
+      RenderFrameSnapshot,
+      {typeName: 'RenderFrame'});
+
+  function RenderFrameInstance() {
+    ObjectInstance.apply(this, arguments);
+  }
+
+  RenderFrameInstance.prototype = {
+    __proto__: ObjectInstance.prototype
+  };
+
+  ObjectInstance.register(
+      RenderFrameInstance,
+      {typeName: 'RenderFrame'});
+
+  return {
+    RenderFrameSnapshot: RenderFrameSnapshot,
+    RenderFrameInstance: RenderFrameInstance
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/extras/chrome_config.html b/catapult/tracing/tracing/extras/chrome_config.html
index 02aacb3..4cba43b 100644
--- a/catapult/tracing/tracing/extras/chrome_config.html
+++ b/catapult/tracing/tracing/extras/chrome_config.html
@@ -13,25 +13,19 @@
     - telemetry
 -->
 
-<!-- Chrome also supports systrace & lean config -->
-<link rel="import" href="/tracing/extras/systrace_config.html">
-<link rel="import" href="/tracing/extras/lean_config.html">
-
-<!-- General importers -->
-<link rel="import" href="/tracing/extras/importer/gzip_importer.html">
-<link rel="import" href="/tracing/extras/importer/zip_importer.html">
-
-<!--- Domain specific importers -->
-<link rel="import" href="/tracing/extras/importer/v8/v8_log_importer.html">
+<link rel="import" href="/tracing/extras/android/android_auditor.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_auditor.html">
+<link rel="import" href="/tracing/extras/chrome/frame_tree_node.html">
+<link rel="import" href="/tracing/extras/chrome/layout_object.html">
+<link rel="import" href="/tracing/extras/chrome/layout_tree.html">
+<link rel="import" href="/tracing/extras/chrome/render_frame.html">
 <link rel="import" href="/tracing/extras/importer/etw/etw_importer.html">
+<link rel="import" href="/tracing/extras/importer/gzip_importer.html">
 <link rel="import" href="/tracing/extras/importer/trace2html_importer.html">
-
-<!-- Lots of chrome-specific extras -->
+<link rel="import" href="/tracing/extras/importer/v8/v8_log_importer.html">
+<link rel="import" href="/tracing/extras/importer/zip_importer.html">
+<link rel="import" href="/tracing/extras/lean_config.html">
 <link rel="import" href="/tracing/extras/measure/measure.html">
 <link rel="import" href="/tracing/extras/net/net.html">
-<link rel="import" href="/tracing/extras/chrome/layout_object.html">
-
-<!-- Auditors are fun -->
-<link rel="import" href="/tracing/extras/chrome/chrome_auditor.html">
-<link rel="import" href="/tracing/extras/android/android_auditor.html">
+<link rel="import" href="/tracing/extras/systrace_config.html">
 <link rel="import" href="/tracing/extras/vsync/vsync_auditor.html">
diff --git a/catapult/tracing/tracing/extras/full_config.html b/catapult/tracing/tracing/extras/full_config.html
index 4eab0d4..2f15718 100644
--- a/catapult/tracing/tracing/extras/full_config.html
+++ b/catapult/tracing/tracing/extras/full_config.html
@@ -6,7 +6,7 @@
 -->
 
 <!-- The full config is all the configs slammed together. -->
-<link rel="import" href="/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html">
 <link rel="import" href="/tracing/extras/chrome_config.html">
-<link rel="import" href="/tracing/extras/systrace_config.html">
+<link rel="import" href="/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html">
 <link rel="import" href="/tracing/extras/lean_config.html">
+<link rel="import" href="/tracing/extras/systrace_config.html">
diff --git a/catapult/tracing/tracing/extras/importer/android/event_log_importer.html b/catapult/tracing/tracing/extras/importer/android/event_log_importer.html
index a80852a..ff2e22e 100644
--- a/catapult/tracing/tracing/extras/importer/android/event_log_importer.html
+++ b/catapult/tracing/tracing/extras/importer/android/event_log_importer.html
@@ -103,6 +103,10 @@
   EventLogImporter.prototype = {
     __proto__: Importer.prototype,
 
+    get importerName() {
+      return 'EventLogImporter';
+    },
+
     get model() {
       return this.model_;
     },
@@ -278,7 +282,7 @@
       }
     },
 
-    importEvents: function(isSecondaryImport) {
+    importEvents: function() {
       // Check if we have a mapping from real-time to CLOCK_MONOTONIC
       if (isNaN(this.model_.realtime_to_monotonic_offset_ms)) {
         this.model_.importWarning({
diff --git a/catapult/tracing/tracing/extras/importer/battor_importer.html b/catapult/tracing/tracing/extras/importer/battor_importer.html
index e0ddb4c..591bd7e 100644
--- a/catapult/tracing/tracing/extras/importer/battor_importer.html
+++ b/catapult/tracing/tracing/extras/importer/battor_importer.html
@@ -12,7 +12,7 @@
 <script>
 /**
  * @fileoverview Imports text files in the BattOr format into the
- * Model. This format is output by the BattOr executable.
+ * Model. This format is output by the battor_agent executable and library.
  *
  * This importer assumes the events arrive as a string. The unit tests provide
  * examples of the trace format.
@@ -21,7 +21,7 @@
 
 tr.exportTo('tr.e.importer.battor', function() {
   /**
-   * Imports linux perf events into a specified model.
+   * Imports a BattOr power trace into a specified model.
    * @constructor
    */
   function BattorImporter(model, events) {
@@ -29,13 +29,17 @@
     this.sampleRate_ = undefined;
     this.model_ = model;
     this.events_ = events;
+    this.explicitSyncMark_ = undefined;
   }
 
   var TestExports = {};
 
-  var battorDataLineRE = /^(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$/;
+  var battorDataLineRE = new RegExp(
+      '^(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)' +
+      '(?:\\s+<(\\S+)>)?$'
+  );
   var battorHeaderLineRE = /^# BattOr/;
-  var sampleRateLineRE = /^# sample_rate=(\d+)Hz/;
+  var sampleRateLineRE = /^# sample_rate (\d+) Hz/;
 
   /**
    * Guesses whether the provided events is a BattOr string.
@@ -53,6 +57,10 @@
   BattorImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'BattorImporter';
+    },
+
     get model() {
       return this.model_;
     },
@@ -60,7 +68,7 @@
     /**
      * Imports the data in this.events_ into model_.
      */
-    importEvents: function(isSecondaryImport) {
+    importEvents: function() {
       // Fail if the model already has a Power counter.
       if (this.model_.device.powerSeries) {
         this.model_.importWarning({
@@ -75,18 +83,14 @@
       var series = new tr.model.PowerSeries(this.model_.device);
       this.importPowerSamples(series);
 
-      // Find the sync markers.
-      var syncMarks = this.model_.getClockSyncRecordsNamed('battor');
-      if (syncMarks.length < 1) {
-        this.model_.importWarning({
-          type: 'clock_sync',
-          message: 'Cannot import BattOr power trace without a sync signal.'
-        });
-        return;
-      }
+      // Find the sync markers that are identified as being for the BattOr.
+      var battorSyncMarks = this.model_.getClockSyncRecordsWithSyncId('battor');
 
       // Try each of the clock sync techinques in order of their accuracy.
-      var shiftTs = this.correlationClockSync(syncMarks, series);
+      var shiftTs = undefined;
+      shiftTs = this.correlationClockSync(battorSyncMarks, series);
+      if (shiftTs === undefined)
+        shiftTs = this.explicitClockSync();
 
       if (shiftTs === undefined) {
         this.model_.importWarning({
@@ -134,15 +138,27 @@
             return;
           }
 
+          // Add power sample.
           var time = parseFloat(groups[1]) + minTs;
           var voltage_mV = parseFloat(groups[2]);
           var current_mA = parseFloat(groups[3]);
           series.addPowerSample(time, (voltage_mV * current_mA) / 1000);
+
+          // Found first explicit clock sync - save it.
+          if (groups[4] !== undefined &&
+            this.explicitSyncMark_ === undefined) {
+            var id = groups[4];
+            this.explicitSyncMark_ = {'id' : id, 'ts' : time};
+          }
         }
       }, this);
     },
 
     correlationClockSync: function(syncMarks, series) {
+      // Check for the two markers that surround the sync signal are present.
+      if (syncMarks.length !== 2)
+        return undefined;
+
       // Find the regulator counter for the sync.
       var syncCtr = this.model_.kernel.counters[
           'null.vreg ' + syncMarks[0].args['regulator'] + ' enabled'];
@@ -158,8 +174,8 @@
       var syncEvents = [];
       var firstSyncEventTs = undefined;
       syncCtr.series[0].iterateAllEvents(function(event) {
-        if (event.timestamp >= syncMarks[0].ts &&
-            event.timestamp <= syncMarks[1].ts) {
+        if (event.timestamp >= syncMarks[0].start &&
+            event.timestamp <= syncMarks[1].start) {
           if (firstSyncEventTs === undefined)
             firstSyncEventTs = event.timestamp;
           var newEvent = {
@@ -236,9 +252,46 @@
       var shiftTs = syncStartTs - corrPeakTs;
 
       return shiftTs;
+    },
+
+    explicitClockSync: function() {
+      // Check to see if an explicit clock sync was found in the BattOr trace.
+      if (this.explicitSyncMark_ === undefined)
+        return undefined;
+
+      // Try to get the matching clock sync record for this explicit sync.
+      var syncMarks = this.model.getClockSyncRecordsWithSyncId(
+          this.explicitSyncMark_['id']);
+      if (syncMarks.length !== 1) {
+        this.model_.importWarning({
+          type: 'missing_sync_marker',
+          message: 'No single clock sync record found for explicit clock sync.'
+        });
+        return undefined;
+      }
+
+      var clockSync = syncMarks[0];
+
+      // TODO(aschulman) Actual time of sync is assumed to be half-way between
+      // when the sync message was sent and when it was received.  Chromium's
+      // serial I/O library sends bytes within a millisecond, however it seems
+      // to take tens of milliseconds to receive bytes. Therefore, until we
+      // figure out how to receive bytes without this delay, time of sync is
+      // set to the time when the the bytes are sent.
+      var syncTs = clockSync.start;
+      var traceTs = this.explicitSyncMark_['ts'];
+
+      // Shift by the difference between the explicit sync timestamps.
+      return syncTs - traceTs;
+    },
+
+    foundExplicitSyncMark: function() {
+      return this.explicitSyncMark_ !== undefined;
     }
   };
 
+
+
   tr.importer.Importer.register(BattorImporter);
 
   return {
diff --git a/catapult/tracing/tracing/extras/importer/battor_importer_test.html b/catapult/tracing/tracing/extras/importer/battor_importer_test.html
index f25d976..f1a1c29 100644
--- a/catapult/tracing/tracing/extras/importer/battor_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/battor_importer_test.html
@@ -8,18 +8,31 @@
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/battor_importer.html">
 <link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
+<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
 
+  var CHROMIUM_EVENTS = [
+    {
+      name: 'a', args: {}, pid: 52, ts: 0,
+      cat: 'foo', tid: 53, ph: 'B'
+    },
+    {
+       pid: 94936, tid: 5643, ts: 11000,
+       ph: 'c', cat: '__metadata', name: 'clock_sync',
+       args: {sync_id: 'ABCDEF-01234-5678-0A1B2C3D', issue_ts: 10000},
+       tts: 16496444
+    }
+  ];
+
   var BATTOR_LINES = [
     '# BattOr',
-    '# voltage range [0.000000, 6144.000000] mV',
-    '# current range [0.000000, 2275.555556] mA',
-    '# sample_rate=2000Hz, gain=5.000000x',
-    '# filpot_pos=4, amppot_pos=256, timer_ovf=499, timer_div=4 ovs_bits=1',
+    '# voltage_range [0.0, 6144.0] mV',
+    '# current_range [0.0, 2275.5] mA',
+    '# sample_rate 2000 Hz, gain 5.0x',
     '0.000000 0.000000 4000.000000',
     '0.500000 0.000000 4000.000000',
     '1.000000 0.000000 4000.000000',
@@ -54,7 +67,7 @@
         BATTOR_LINES.join('\n')));
   });
 
-  test('importPowerSamplesAndTrace', function() {
+  test('importCrossCorrelationSync', function() {
     var m = tr.c.TestUtils.newModelWithEvents(
         [SYSTRACE_LINES.join('\n'), BATTOR_LINES.join('\n')]);
 
@@ -69,11 +82,74 @@
     assert.equal(m.device.powerSeries.samples[1].start, 0.5);
   });
 
+  test('importExplicitClockSync', function() {
+    // Add a BattOr sample with an explicit clock sync.
+    var battorLinesWithExplicitSync = BATTOR_LINES;
+    battorLinesWithExplicitSync.push(
+      '2.500000 1.000000 4000.000000 <ABCDEF-01234-5678-0A1B2C3D>');
+
+    var m = tr.c.TestUtils.newModelWithEvents(
+        [CHROMIUM_EVENTS, battorLinesWithExplicitSync.join('\n')]);
+
+    // Check to see if power samples were imported successfully.
+    assert.isDefined(m.device.powerSeries);
+
+    // Check to see if the power trace is time shifted correctly.
+    assert.equal(m.device.powerSeries.samples[0].start, 7.5);
+    assert.equal(m.device.powerSeries.samples[5].start, 10.0);
+  });
+
   test('importMissingLinuxTrace', function() {
     var m = tr.c.TestUtils.newModelWithEvents(BATTOR_LINES.join('\n'));
     assert.isTrue(m.hasImportWarnings);
   });
 
+  test('crossCorrelateWithoutSyncMarkers', function() {
+    // Create model.
+    var m = new tr.Model();
+    var io = new tr.importer.ImportOptions();
+    io.showImportWarnings = false;
+    m.importOptions = io;
+
+    // Create importer and import power trace.
+    var importer = new tr.e.importer.battor.BattorImporter(
+        m, BATTOR_LINES.join('\n'));
+    var series = new tr.model.PowerSeries(m.device);
+    importer.importPowerSamples(series);
+
+    // Check to make sure corrleation sync fails because there are no marks.
+    var syncMarks = [];
+    assert.isUndefined(importer.correlationClockSync(series, syncMarks));
+    assert.isUndefined(m.device.powerSeries);
+  });
+
+  test('explicitClockSyncWithoutSyncMarkers', function() {
+    // Create an empty model.
+    var m = new tr.Model();
+    var io = new tr.importer.ImportOptions();
+    io.showImportWarnings = false;
+    m.importOptions = io;
+
+    // Add a BattOr sample with an explicit clock sync.
+    var battorLinesWithExplicitSync = BATTOR_LINES;
+    battorLinesWithExplicitSync.push(
+      '2.500000 1.000000 4000.000000 <ABCDEF-01234-5678-0A1B2C3D>');
+
+    // Create BattOr importer and import the trace.
+    var importer = new tr.e.importer.battor.BattorImporter(
+        m, battorLinesWithExplicitSync.join('\n'));
+    var series = new tr.model.PowerSeries(m.device);
+    importer.importPowerSamples(series);
+
+    // Check to see if explicit clock sync was found by parser.
+    assert.isTrue(importer.foundExplicitSyncMark());
+
+    // Check to make sure explicit sync fails because there are no marks.
+    var syncMarks = [];
+    assert.isUndefined(importer.explicitClockSync(series, syncMarks));
+    assert.isUndefined(m.device.powerSeries);
+  });
+
   test('importNotEnoughSamples', function() {
     var m = tr.c.TestUtils.newModelWithEvents(
         BATTOR_LINES.slice(0, 5).join('\n'));
diff --git a/catapult/tracing/tracing/extras/importer/ddms_importer.html b/catapult/tracing/tracing/extras/importer/ddms_importer.html
index 03e97b0..fd760c7 100644
--- a/catapult/tracing/tracing/extras/importer/ddms_importer.html
+++ b/catapult/tracing/tracing/extras/importer/ddms_importer.html
@@ -6,8 +6,8 @@
 -->
 
 <link rel="import" href="/tracing/extras/importer/jszip.html">
-<link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/importer/importer.html">
+<link rel="import" href="/tracing/model/model.html">
 
 <script>
 /**
@@ -112,6 +112,10 @@
   DdmsImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'DdmsImporter';
+    },
+
     get model() {
       return this.model_;
     },
@@ -119,7 +123,7 @@
     /**
      * Imports the data in this.data_ into this.model_.
      */
-    importEvents: function(isSecondaryImport) {
+    importEvents: function() {
       var divider = this.data_.indexOf(kMethodLutEndMarker) +
           kMethodLutEndMarker.length;
       this.metadata_ = this.data_.slice(0, divider);
diff --git a/catapult/tracing/tracing/extras/importer/ddms_importer_test.html b/catapult/tracing/tracing/extras/importer/ddms_importer_test.html
index c741aa1..997556b 100644
--- a/catapult/tracing/tracing/extras/importer/ddms_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/ddms_importer_test.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/ddms_importer.html">
 
@@ -12,6 +13,8 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
+
   test('canImport', function() {
     assert.isFalse(tr.e.importer.ddms.DdmsImporter.canImport('string'));
     assert.isFalse(tr.e.importer.ddms.DdmsImporter.canImport([]));
@@ -46,7 +49,8 @@
                  thread.sliceGroup.slices[0].title);
   });
 
-  var TEST_DATA = atob('KnZlcnNpb24KMwpkYXRhLWZpbGUtb3ZlcmZsb3c9ZmFsc2UKY2' +
+  var TEST_DATA = Base64.atob(
+      'KnZlcnNpb24KMwpkYXRhLWZpbGUtb3ZlcmZsb3c9ZmFsc2UKY2' +
       'xvY2s9ZHVhbAplbGFwc2VkLXRpbWUtdXNlYz02MzMwNzc5Cm51' +
       'bS1tZXRob2QtY2FsbHM9NzYKY2xvY2stY2FsbC1vdmVyaGVhZC' +
       '1uc2VjPTMzNDMKdm09YXJ0Cip0aHJlYWRzCjI3MDMJbWFpbgoy' +
diff --git a/catapult/tracing/tracing/extras/importer/etw/etw_importer.html b/catapult/tracing/tracing/extras/importer/etw/etw_importer.html
index b3f57f6..a128e4f 100644
--- a/catapult/tracing/tracing/extras/importer/etw/etw_importer.html
+++ b/catapult/tracing/tracing/extras/importer/etw/etw_importer.html
@@ -5,11 +5,11 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/extras/importer/etw/eventtrace_parser.html">
 <link rel="import" href="/tracing/extras/importer/etw/process_parser.html">
 <link rel="import" href="/tracing/extras/importer/etw/thread_parser.html">
 <link rel="import" href="/tracing/importer/importer.html">
-<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/model/model.html">
 
 <script>
@@ -300,6 +300,10 @@
   EtwImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'EtwImporter';
+    },
+
     get model() {
       return this.model_;
     },
@@ -343,7 +347,7 @@
     /**
      * Imports the data in this.events_ into this.model_.
      */
-    importEvents: function(isSecondaryImport) {
+    importEvents: function() {
       this.events_.content.forEach(this.parseInfo.bind(this));
 
       if (this.walltime_ == undefined || this.ticks_ == undefined)
diff --git a/catapult/tracing/tracing/extras/importer/etw/etw_importer_test.html b/catapult/tracing/tracing/extras/importer/etw/etw_importer_test.html
index 4c4d50e..91a94d6 100644
--- a/catapult/tracing/tracing/extras/importer/etw/etw_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/etw/etw_importer_test.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/etw/etw_importer.html">
 
@@ -12,6 +13,8 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
+
   test('canImport', function() {
     assert.isFalse(tr.e.importer.etw.EtwImporter.canImport('string'));
     assert.isFalse(tr.e.importer.etw.EtwImporter.canImport([]));
@@ -69,7 +72,8 @@
 
     // Try to parse a valid event.
     var valid_event = {
-      guid: 'aaaa', 'op': 42, 'ver': 0, 'cpu': 0, 'ts': 0, 'payload': btoa('0')
+      guid: 'aaaa', 'op': 42, 'ver': 0, 'cpu': 0, 'ts': 0,
+      'payload': Base64.btoa('0')
     };
     assert.isTrue(importer.parseEvent(valid_event));
     assert.isTrue(handler_called);
diff --git a/catapult/tracing/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html b/catapult/tracing/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html
index 71a8353..b110a7e 100644
--- a/catapult/tracing/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html
+++ b/catapult/tracing/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html
@@ -39,6 +39,10 @@
 
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'GcloudTraceImporter';
+    },
+
     /**
      * Called by the Model to extract subtraces from the event data. The
      * subtraces are passed on to other importers that can recognize them.
diff --git a/catapult/tracing/tracing/extras/importer/gzip_importer.html b/catapult/tracing/tracing/extras/importer/gzip_importer.html
index eb696a1..8dfd2f1 100644
--- a/catapult/tracing/tracing/extras/importer/gzip_importer.html
+++ b/catapult/tracing/tracing/extras/importer/gzip_importer.html
@@ -113,12 +113,54 @@
     // Inflate the data using jszip.
     var inflated_data =
         JSZip.compressions['DEFLATE'].uncompress(data.subarray(position));
-    return JSZip.utils.transformTo('string', inflated_data);
-  },
+    var string = GzipImporter.transformToString(inflated_data);
+
+    if (inflated_data.length > 0 && string.length === 0) {
+      throw new RangeError('Inflated gzip data too long to fit into a string' +
+          ' (' + inflated_data.length + ').');
+    }
+
+    return string;
+  };
+
+  /**
+   * Transforms an array-like object to a string.
+   *
+   * Note that the following two expressions yield identical results:
+   *
+   *   GzipImporter.transformToString_(data)
+   *   JSZip.utils.transformTo('string', data)
+   *
+   * We use a custom static method because it is faster and, more importantly,
+   * avoids OOMing on large traces. See
+   * https://github.com/catapult-project/catapult/issues/2051.
+   */
+  GzipImporter.transformToString = function(data) {
+    if (typeof TextDecoder === 'undefined') {
+      // Fall back to jszip if TextDecoder is not available.
+      return JSZip.utils.transformTo('string', data);
+    }
+
+    var type = JSZip.utils.getTypeOf(data);
+    if (type === 'string')
+      return data;  // We already have a string.
+
+    if (type === 'array') {
+      // TextDecoder requires an ArrayBuffer or an ArrayBufferView.
+      data = new Uint8Array(data);
+    }
+
+    var decoder = new TextDecoder('utf-8');
+    return decoder.decode(data);
+  };
 
   GzipImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'GzipImporter';
+    },
+
     /**
      * Called by the Model to check whether the importer just encapsulates
      * the actual trace data which needs to be imported by another importer.
diff --git a/catapult/tracing/tracing/extras/importer/gzip_importer_test.html b/catapult/tracing/tracing/extras/importer/gzip_importer_test.html
index 2b354eb..f226aae 100644
--- a/catapult/tracing/tracing/extras/importer/gzip_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/gzip_importer_test.html
@@ -5,14 +5,16 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/extras/importer/gzip_importer.html">
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/importer/gzip_importer.html">
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
   var findSliceNamed = tr.c.TestUtils.findSliceNamed;
   var original_data =
       '[{"name":"a","args":{},"pid":52,"ts":520,"cat":"foo","tid":53,' +
@@ -30,7 +32,7 @@
 
   test('inflateString', function() {
     // Test inflating the data from a string.
-    var gzip_data = atob(gzip_data_base64);
+    var gzip_data = Base64.atob(gzip_data_base64);
     var importer = new tr.e.importer.GzipImporter(null, gzip_data);
     assert.isTrue(tr.e.importer.GzipImporter.canImport(gzip_data));
     assert.equal(importer.extractSubtraces()[0], original_data);
@@ -38,7 +40,7 @@
 
   test('inflateArrayBuffer', function() {
     // Test inflating the data from an ArrayBuffer.
-    var gzip_data = atob(gzip_data_base64);
+    var gzip_data = Base64.atob(gzip_data_base64);
     var buffer = new ArrayBuffer(gzip_data.length);
     var view = new Uint8Array(buffer);
     for (var i = 0; i < gzip_data.length; i++)
@@ -49,7 +51,7 @@
   });
 
   test('import', function() {
-    var gzip_data = atob(gzip_data_base64);
+    var gzip_data = Base64.atob(gzip_data_base64);
     assert.isTrue(tr.e.importer.GzipImporter.canImport(gzip_data));
 
     var model = tr.c.TestUtils.newModelWithEvents(gzip_data);
@@ -59,6 +61,45 @@
     var slice = findSliceNamed(threads[0].sliceGroup, 'a');
     assert.equal(slice.category, 'foo');
   });
+
+  test('transformToString', function() {
+    function checkTransform(data, expectedString) {
+      assert.strictEqual(tr.e.importer.GzipImporter.transformToString(data),
+          expectedString);
+    }
+
+    function createArrayBuffer(values) {
+      var buffer = new ArrayBuffer(values.length);
+      var view = new Uint8Array(buffer);
+      view.set(values);
+      return buffer;
+    }
+
+    // If the browser supports TextDecoder, this will test our custom
+    // implementation. Otherwise, the jszip fallback will be tested.
+    checkTransform('abc012', 'abc012');
+    checkTransform([100, 101, 102, 51, 52, 53], 'def345');
+    checkTransform(createArrayBuffer([103, 104, 105, 54, 55, 56]), 'ghi678');
+    checkTransform(new Uint8Array([106, 107, 108, 57, 58, 59]), 'jkl9:;');
+
+    if (typeof TextDecoder === 'undefined') {
+      // The browser doesn't support TextDecoder, so we have already checked
+      // the jszip fallback.
+      return;
+    }
+
+    // The browser supports TextDecoder, so we now check the jszip fallback.
+    var oldTextDecoder = TextDecoder;
+    TextDecoder = undefined;
+    try {
+      checkTransform('abc012', 'abc012');
+      checkTransform([100, 101, 102, 51, 52, 53], 'def345');
+      checkTransform(createArrayBuffer([103, 104, 105, 54, 55, 56]), 'ghi678');
+      checkTransform(new Uint8Array([106, 107, 108, 57, 58, 59]), 'jkl9:;');
+    } finally {
+      TextDecoder = oldTextDecoder;
+    }
+  });
 });
 </script>
 
diff --git a/catapult/tracing/tracing/extras/importer/jszip.html b/catapult/tracing/tracing/extras/importer/jszip.html
index 5cd3c5e..0840c06 100644
--- a/catapult/tracing/tracing/extras/importer/jszip.html
+++ b/catapult/tracing/tracing/extras/importer/jszip.html
@@ -8,8 +8,8 @@
 
 <script>
 'use strict';
-// Workaround for JSzip requiring window.
-if (tr.isHeadless) {
+// Vinn workaround for JSzip requiring window.
+if (tr.isVinn) {
   /**
    * Hack.
    */
@@ -19,12 +19,17 @@
 <script src="/jszip.min.js"></script>
 <script>
 'use strict';
-// Workaround for JSzip requiring window.
-if (tr.isHeadless) {
+// Vinn workaround for JSzip requiring window.
+if (tr.isVinn) {
   /**
    * Hack.
    */
   global.JSZip = global.window.JSZip;
   global.window = undefined;
+} else if (tr.isNode) {
+  var jsZipAbsPath = HTMLImportsLoader.hrefToAbsolutePath(
+      '/jszip.min.js');
+  var jsZipModule = require(jsZipAbsPath);
+  global.JSZip = jsZipModule;
 }
 </script>
diff --git a/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer.html b/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer.html
index 5851ac3..c8a0dd3 100644
--- a/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer.html
+++ b/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer.html
@@ -27,6 +27,7 @@
 <link rel="import" href="/tracing/extras/importer/linux_perf/workqueue_parser.html">
 <link rel="import" href="/tracing/importer/importer.html">
 <link rel="import" href="/tracing/importer/simple_line_reader.html">
+<link rel="import" href="/tracing/model/clock_sync_record.html">
 <link rel="import" href="/tracing/model/model.html">
 
 <script>
@@ -46,7 +47,7 @@
 'use strict';
 
 tr.exportTo('tr.e.importer.linux_perf', function() {
-  var ClockSyncRecord = tr.ClockSyncRecord;
+  var InstantClockSyncRecord = tr.model.InstantClockSyncRecord;
 
   /**
    * Imports linux perf events into a specified model.
@@ -315,6 +316,10 @@
   LinuxPerfImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'LinuxPerfImporter';
+    },
+
     get model() {
       return this.model_;
     },
@@ -648,10 +653,10 @@
      * the necessary sync records were not found.
      */
     computeTimeTransform: function() {
-      var isSecondaryImport = this.model.getClockSyncRecordsNamed(
+      var isSecondaryImport = this.model.getClockSyncRecordsWithSyncId(
           'ftrace_importer').length !== 0;
 
-      var mSyncs = this.model_.getClockSyncRecordsNamed('monotonic');
+      var mSyncs = this.model_.getClockSyncRecordsWithSyncId('monotonic');
       // If this is a secondary import, and no clock syncing records were
       // found, then abort the import. Otherwise, just skip clock alignment.
       if (mSyncs.length == 0)
@@ -749,7 +754,7 @@
             throw new Error('omgbbq');
           args[parts[0]] = parts[1];
         }
-        this.addClockSyncRecord(new ClockSyncRecord(name, ts, args));
+        this.addClockSyncRecord(new InstantClockSyncRecord(name, ts, args));
         return true;
       }
 
@@ -758,7 +763,7 @@
       if (!event)
         return false;
 
-      this.addClockSyncRecord(new ClockSyncRecord('monotonic', ts, {
+      this.addClockSyncRecord(new InstantClockSyncRecord('monotonic', ts, {
         perfTS: ts,
         parentTS: event[1] * 1000
       }));
@@ -830,7 +835,7 @@
 
     shiftNewlyAddedClockSyncRecords: function(timeShift) {
       this.newlyAddedClockSyncRecords_.forEach(function(csr) {
-        csr.ts += timeShift;
+        csr.start += timeShift;
       });
     },
 
diff --git a/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer_test.html b/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer_test.html
index 40e858b..ddbd994 100644
--- a/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/linux_perf/ftrace_importer_test.html
@@ -7,7 +7,6 @@
 
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
-<link rel="import" href="/tracing/base/xhr.html">
 
 <script>
 'use strict';
@@ -186,9 +185,9 @@
     var m = newModel(lines.join('\n'));
     assert.isFalse(m.hasImportWarnings);
 
-    var battorSyncs = m.getClockSyncRecordsNamed('battor');
+    var battorSyncs = m.getClockSyncRecordsWithSyncId('battor');
     assert.equal(battorSyncs.length, 1);
-    assert.equal(battorSyncs[0].ts, 107464000.0);
+    assert.equal(battorSyncs[0].start, 107464000.0);
     assert.equal(battorSyncs[0].args.perfTS, 107464000.0);
     assert.equal(battorSyncs[0].args.regulator, '8941_smbb_boost');
   });
diff --git a/catapult/tracing/tracing/extras/importer/trace2html_importer.html b/catapult/tracing/tracing/extras/importer/trace2html_importer.html
index 077f337..758f66a 100644
--- a/catapult/tracing/tracing/extras/importer/trace2html_importer.html
+++ b/catapult/tracing/tracing/extras/importer/trace2html_importer.html
@@ -68,6 +68,10 @@
   Trace2HTMLImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'Trace2HTMLImporter';
+    },
+
     isTraceDataContainer: function() {
       return true;
     },
diff --git a/catapult/tracing/tracing/extras/importer/trace2html_importer_test.html b/catapult/tracing/tracing/extras/importer/trace2html_importer_test.html
index 90c4b85..8d91c14 100644
--- a/catapult/tracing/tracing/extras/importer/trace2html_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/trace2html_importer_test.html
@@ -5,20 +5,23 @@
 found in the LICENSE file.
 -->
 
+<link rel="improt" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/extras/importer/trace2html_importer.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
+
   test('simple', function() {
     var html_lines = [
       '<!DOCTYPE html>',
       '<script id="viewer-data" type="application/json">',
-      btoa('hello'),
+      Base64.btoa('hello'),
       '<\/script>',
       '<script id="viewer-data" type="text/plain">',
-      btoa('world'),
+      Base64.btoa('world'),
       '<\/script>',
       '</html>'
     ];
diff --git a/catapult/tracing/tracing/extras/importer/trace_event_importer.html b/catapult/tracing/tracing/extras/importer/trace_event_importer.html
index 662eaf7..82f14eb 100644
--- a/catapult/tracing/tracing/extras/importer/trace_event_importer.html
+++ b/catapult/tracing/tracing/extras/importer/trace_event_importer.html
@@ -5,17 +5,17 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/base/color_scheme.html">
-<link rel="import" href="/tracing/base/quad.html">
 <link rel="import" href="/tracing/base/range.html">
-<link rel="import" href="/tracing/base/units/units.html">
 <link rel="import" href="/tracing/base/utils.html">
 <link rel="import" href="/tracing/extras/importer/trace_code_entry.html">
 <link rel="import" href="/tracing/extras/importer/trace_code_map.html">
 <link rel="import" href="/tracing/extras/importer/v8/codemap.html">
 <link rel="import" href="/tracing/importer/importer.html">
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/model/clock_sync_record.html">
 <link rel="import" href="/tracing/model/comment_box_annotation.html">
+<link rel="import" href="/tracing/model/constants.html">
 <link rel="import" href="/tracing/model/counter_series.html">
 <link rel="import" href="/tracing/model/flow_event.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
@@ -25,8 +25,12 @@
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
 <link rel="import" href="/tracing/model/rect_annotation.html">
+<link rel="import" href="/tracing/model/scoped_id.html">
 <link rel="import" href="/tracing/model/slice_group.html">
+<link rel="import" href="/tracing/model/vm_region.html">
 <link rel="import" href="/tracing/model/x_marker_annotation.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -36,6 +40,7 @@
  * into the provided model.
  */
 tr.exportTo('tr.e.importer', function() {
+  var Base64 = tr.b.Base64;
   var deepCopy = tr.b.deepCopy;
   var ColorScheme = tr.b.ColorScheme;
 
@@ -48,8 +53,8 @@
     }
   }
 
-  var timestampFromUs = tr.b.u.Units.timestampFromUs;
-  var maybeTimestampFromUs = tr.b.u.Units.maybeTimestampFromUs;
+  var timestampFromUs = tr.v.Unit.timestampFromUs;
+  var maybeTimestampFromUs = tr.v.Unit.maybeTimestampFromUs;
 
   var PRODUCER = 'producer';
   var CONSUMER = 'consumer';
@@ -58,6 +63,41 @@
   var MEMORY_DUMP_LEVELS_OF_DETAIL = [undefined, 'light', 'detailed'];
   var GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX = 'global/';
 
+  // Map from raw memory dump byte stat names to model byte stat names. See
+  // //base/trace_event/process_memory_maps.cc in Chromium.
+  var BYTE_STAT_NAME_MAP = {
+    'pc': 'privateCleanResident',
+    'pd': 'privateDirtyResident',
+    'sc': 'sharedCleanResident',
+    'sd': 'sharedDirtyResident',
+    'pss': 'proportionalResident',
+    'sw': 'swapped'
+  };
+
+  // See tr.model.MemoryAllocatorDump 'weak' field and
+  // base::trace_event::MemoryAllocatorDump::Flags::WEAK in the Chromium
+  // codebase.
+  var WEAK_MEMORY_ALLOCATOR_DUMP_FLAG = 1 << 0;
+
+  // Object type name patterns for various compilers.
+  var OBJECT_TYPE_NAME_PATTERNS = [
+    {
+      // Clang.
+      prefix: 'const char *WTF::getStringWithTypeName() [T = ',
+      suffix: ']'
+    },
+    {
+      // GCC.
+      prefix: 'const char* WTF::getStringWithTypeName() [with T = ',
+      suffix: ']'
+    },
+    {
+      // Microsoft Visual C++
+      prefix: 'const char *__cdecl WTF::getStringWithTypeName<',
+      suffix: '>(void)'
+    }
+  ];
+
   function TraceEventImporter(model, eventData) {
     this.importPriority = 1;
     this.model_ = model;
@@ -77,10 +117,14 @@
 
     this.v8ProcessCodeMaps_ = {};
     this.v8ProcessRootStackFrame_ = {};
+    this.v8SamplingData_ = [];
 
     // Dump ID -> PID -> [process memory dump events].
     this.allMemoryDumpEvents_ = {};
 
+    // PID -> Object type ID -> Object type name.
+    this.objectTypeNameMap_ = {};
+
     if (typeof(eventData) === 'string' || eventData instanceof String) {
       eventData = eventData.trim();
       // If the event data begins with a [, then we know it should end with a ].
@@ -115,9 +159,9 @@
       this.systemTraceEvents_ = container.systemTraceEvents;
 
       // Some trace_event implementations put battor power traces as a
-      // huge string inside container.battorLogAsString. If we see that, pull
+      // huge string inside container.powerTraceAsString. If we see that, pull
       // it out. It will be picked up by extractSubtraces later on.
-      this.battorData_ = container.battorLogAsString;
+      this.battorData_ = container.powerTraceAsString;
 
       // Sampling data.
       this.sampleEvents_ = container.samples;
@@ -126,7 +170,7 @@
       // Some implementations specify displayTimeUnit
       if (container.displayTimeUnit) {
         var unitName = container.displayTimeUnit;
-        var unit = tr.b.u.TimeDisplayModes[unitName];
+        var unit = tr.v.TimeDisplayModes[unitName];
         if (unit === undefined) {
           throw new Error('Unit ' + unitName + ' is not supported.');
         }
@@ -134,7 +178,7 @@
       }
 
       var knownFieldNames = {
-        battorLogAsString: true,
+        powerTraceAsString: true,
         samples: true,
         stackFrames: true,
         systemTraceEvents: true,
@@ -189,6 +233,10 @@
   TraceEventImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'TraceEventImporter';
+    },
+
     extractSubtraces: function() {
       var systemEventsTmp = this.systemTraceEvents_;
       var battorDataTmp = this.battorData_;
@@ -408,7 +456,29 @@
       return slice;
     },
 
+    processJitCodeEvent: function(event) {
+      if (this.v8ProcessCodeMaps_[event.pid] === undefined)
+        this.v8ProcessCodeMaps_[event.pid] = new tr.e.importer.TraceCodeMap();
+      var map = this.v8ProcessCodeMaps_[event.pid];
+
+      var data = event.args.data;
+      // TODO(dsinclair): There are _a lot_ of JitCode events so I'm skipping
+      // the display for now. Can revisit later if we want to show them.
+      // Handle JitCodeMoved and JitCodeAdded event.
+      if (event.name === 'JitCodeMoved')
+        map.moveEntry(data.code_start, data.new_code_start, data.code_len);
+      else  // event.name === 'JitCodeAdded'
+        map.addEntry(data.code_start, data.code_len, data.name, data.script_id);
+    },
+
     processMetadataEvent: function(event) {
+      // V8 JIT events are currently logged as phase 'M' so we need to
+      // separate them out and handle specially.
+      if (event.name === 'JitCodeAdded' || event.name === 'JitCodeMoved') {
+        this.v8SamplingData_.push(event);
+        return;
+      }
+
       // The metadata events aren't useful without args.
       if (event.argsStripped)
         return;
@@ -449,8 +519,18 @@
                 '\' metadata event'
           });
         } else {
-          this.importStackFrames_(
-              stackFrames, 'p' + event.pid + ':', true /* addRootFrame */);
+          this.importStackFrames_(stackFrames, 'p' + event.pid + ':');
+        }
+      } else if (event.name === 'typeNames') {
+        var objectTypeNameMap = event.args.typeNames;
+        if (objectTypeNameMap === undefined) {
+          this.model_.importWarning({
+            type: 'metadata_parse_error',
+            message: 'No mapping from object type IDs to names found in a \'' +
+                event.name + '\' metadata event'
+          });
+        } else {
+          this.importObjectTypeNameMap_(objectTypeNameMap, event.pid);
         }
       } else {
         this.model_.importWarning({
@@ -460,26 +540,11 @@
       }
     },
 
-    processJitCodeEvent: function(event) {
-      if (this.v8ProcessCodeMaps_[event.pid] === undefined)
-        this.v8ProcessCodeMaps_[event.pid] = new tr.e.importer.TraceCodeMap();
-      var map = this.v8ProcessCodeMaps_[event.pid];
-
-      var data = event.args.data;
-      if (event.name === 'JitCodeMoved')
-        map.moveEntry(data.code_start, data.new_code_start, data.code_len);
-      else
-        map.addEntry(data.code_start, data.code_len, data.name, data.script_id);
-    },
-
     processInstantEvent: function(event) {
-      // V8 JIT events are logged as phase 'I' so we need to separate them out
-      // and handle specially.
-      //
-      // TODO(dsinclair): There are _a lot_ of JitCode events so I'm skipping
-      // the display for now. Can revisit later if we want to show them.
+      // V8 JIT events were logged as phase 'I' in the old format,
+      // so we need to separate them out and handle specially.
       if (event.name === 'JitCodeAdded' || event.name === 'JitCodeMoved') {
-        this.processJitCodeEvent(event);
+        this.v8SamplingData_.push(event);
         return;
       }
 
@@ -511,12 +576,12 @@
 
       switch (instantEvent.type) {
         case tr.model.InstantEventType.GLOBAL:
-          this.model_.pushInstantEvent(instantEvent);
+          this.model_.instantEvents.push(instantEvent);
           break;
 
         case tr.model.InstantEventType.PROCESS:
           var process = this.model_.getOrCreateProcess(event.pid);
-          process.pushInstantEvent(instantEvent);
+          process.instantEvents.push(instantEvent);
           break;
 
         default:
@@ -609,7 +674,7 @@
 
     processTraceSampleEvent: function(event) {
       if (event.name === 'V8Sample') {
-        this.processV8Sample(event);
+        this.v8SamplingData_.push(event);
         return;
       }
 
@@ -677,33 +742,102 @@
       processEvents.push(event);
     },
 
+    processClockSyncEvent: function(event) {
+      if (event.ph !== 'c')
+        throw new Error('Invalid clock sync event phase "' + event.ph + '".');
+
+      var syncId = event.args.sync_id;
+      var issueStartTs = event.args.issue_ts;
+      var issueEndTs = event.ts;
+
+      if (syncId === undefined) {
+        this.model_.importWarning({
+          type: 'clock_sync_parse_error',
+          message: 'Clock sync at time ' + issueEndTs + ' without an ID.'
+        });
+        return;
+      }
+
+      if (issueStartTs === undefined) {
+        this.model_.importWarning({
+          type: 'clock_sync_parse_error',
+          message: 'Clock sync at time ' + issueEndTs + ' with ID ' + syncId +
+              ' without a start timestamp.'
+        });
+        return;
+      }
+
+      this.model_.clockSyncRecords.push(new tr.model.PingPongClockSyncRecord(
+          syncId, timestampFromUs(issueStartTs),
+          timestampFromUs(issueEndTs - issueStartTs)));
+    },
+
+    // Because the order of Jit code events and V8 samples are not guaranteed,
+    // We store them in an array, sort by timestamp, and then process them.
+    processV8Events: function() {
+      this.v8SamplingData_.sort(function(a, b) {
+        if (a.ts !== b.ts)
+          return a.ts - b.ts;
+        if (a.ph === 'M' || a.ph === 'I')
+          return -1;
+        else if (b.ph === 'M' || b.ph === 'I')
+          return 1;
+        return 0;
+      });
+      var length = this.v8SamplingData_.length;
+      for (var i = 0; i < length; ++i) {
+        var event = this.v8SamplingData_[i];
+        if (event.ph === 'M' || event.ph === 'I') {
+          this.processJitCodeEvent(event);
+        } else if (event.ph === 'P') {
+          this.processV8Sample(event);
+        }
+      }
+    },
+
     /**
      * Walks through the events_ list and outputs the structures discovered to
      * model_.
      */
     importEvents: function() {
-      var csr = new tr.ClockSyncRecord('ftrace_importer', 0, {});
+      var csr = new tr.model.InstantClockSyncRecord('ftrace_importer', 0, {});
       this.model_.clockSyncRecords.push(csr);
-      if (this.stackFrameEvents_) {
-        this.importStackFrames_(
-            this.stackFrameEvents_, 'g', false /* addRootFrame */);
-      }
+      if (this.stackFrameEvents_)
+        this.importStackFrames_(this.stackFrameEvents_, 'g');
 
       if (this.traceAnnotations_)
         this.importAnnotations_();
 
+      var importOptions = this.model_.importOptions;
+      var trackDetailedModelStats = importOptions.trackDetailedModelStats;
+
+      var modelStats = this.model_.stats;
+
       var events = this.events_;
       for (var eI = 0; eI < events.length; eI++) {
         var event = events[eI];
+
         if (event.args === '__stripped__') {
           event.argsStripped = true;
           event.args = undefined;
         }
 
+        var eventSizeInBytes;
+        if (trackDetailedModelStats)
+          eventSizeInBytes = JSON.stringify(event).length;
+        else
+          eventSizeInBytes = undefined;
+
         if (event.ph === 'B' || event.ph === 'E') {
+          modelStats.willProcessBasicTraceEvent(
+              'begin_end (non-compact)', event.cat, event.name, event.ts,
+              eventSizeInBytes);
           this.processDurationEvent(event);
 
         } else if (event.ph === 'X') {
+          modelStats.willProcessBasicTraceEvent(
+              'begin_end (compact)', event.cat, event.name, event.ts,
+              eventSizeInBytes);
           var slice = this.processCompleteEvent(event);
           // TODO(yuhaoz): If Chrome supports creating other events with flow,
           // we will need to call processFlowEvent for them also.
@@ -714,32 +848,53 @@
         } else if (event.ph === 'b' || event.ph === 'e' || event.ph === 'n' ||
                    event.ph === 'S' || event.ph === 'F' || event.ph === 'T' ||
                    event.ph === 'p') {
+          modelStats.willProcessBasicTraceEvent(
+              'async', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processAsyncEvent(event);
 
         // Note, I is historic. The instant event marker got changed, but we
         // want to support loading old trace files so we have both I and i.
         } else if (event.ph === 'I' || event.ph === 'i' || event.ph === 'R') {
+          modelStats.willProcessBasicTraceEvent(
+              'instant', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processInstantEvent(event);
 
         } else if (event.ph === 'P') {
+          modelStats.willProcessBasicTraceEvent(
+              'samples', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processTraceSampleEvent(event);
-
         } else if (event.ph === 'C') {
+          modelStats.willProcessBasicTraceEvent(
+              'counters', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processCounterEvent(event);
-
         } else if (event.ph === 'M') {
+          modelStats.willProcessBasicTraceEvent(
+              'metadata', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processMetadataEvent(event);
 
         } else if (event.ph === 'N' || event.ph === 'D' || event.ph === 'O') {
+          modelStats.willProcessBasicTraceEvent(
+              'objects', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processObjectEvent(event);
 
         } else if (event.ph === 's' || event.ph === 't' || event.ph === 'f') {
+          modelStats.willProcessBasicTraceEvent(
+              'flows', event.cat, event.name, event.ts, eventSizeInBytes);
           this.processFlowEvent(event);
 
         } else if (event.ph === 'v') {
+          modelStats.willProcessBasicTraceEvent(
+              'memory_dumps', event.cat, event.name, event.ts,
+              eventSizeInBytes);
           this.processMemoryDumpEvent(event);
 
+        } else if (event.ph === 'c') {
+          modelStats.willProcessBasicTraceEvent(
+              'clock_sync', event.cat, event.name, event.ts, eventSizeInBytes);
+          this.processClockSyncEvent(event);
         } else {
+          modelStats.willProcessBasicTraceEvent(
+              'unknown', event.cat, event.name, event.ts, eventSizeInBytes);
           this.model_.importWarning({
             type: 'parse_error',
             message: 'Unrecognized event phase: ' +
@@ -747,6 +902,7 @@
           });
         }
       }
+      this.processV8Events();
 
       // Remove all the root stack frame children as they should
       // already be added.
@@ -755,26 +911,9 @@
       });
     },
 
-    importStackFrames_: function(rawStackFrames, idPrefix, addRootFrame) {
+    importStackFrames_: function(rawStackFrames, idPrefix) {
       var model = this.model_;
 
-      var rootStackFrame;
-      if (addRootFrame) {
-        // In certain cases (heap dumps), we need to be able to distinguish
-        // between an empty and an undefined stack trace. To this end, we add
-        // an auxiliary root stack frame which is common to all stack frames
-        // in a process. An empty stack trace is then represented by setting
-        // the root stack frame as the leaf stack frame (of the relevant model
-        // object with an associated empty stack trace, e.g. HeapEntry in the
-        // case of heap dumps).
-        rootStackFrame = new tr.model.StackFrame(
-            undefined /* parentFrame */, idPrefix, undefined /* title */,
-            undefined /* colorId */);
-        model.addStackFrame(rootStackFrame);
-      } else {
-        rootStackFrame = undefined;
-      }
-
       for (var id in rawStackFrames) {
         var rawStackFrame = rawStackFrames[id];
         var fullId = idPrefix + id;
@@ -796,7 +935,7 @@
         var parentId = rawStackFrame.parent;
         var parentStackFrame;
         if (parentId === undefined) {
-          parentStackFrame = rootStackFrame;
+          parentStackFrame = undefined;
         } else {
           var parentFullId = idPrefix + parentId;
           parentStackFrame = model.stackFrames[parentFullId];
@@ -807,13 +946,60 @@
                   ' for stack frame \'' + stackFrame.name + '\' (ID ' + fullId +
                   ').'
             });
-            parentStackFrame = rootStackFrame;
           }
         }
         stackFrame.parentFrame = parentStackFrame;
       }
     },
 
+    importObjectTypeNameMap_: function(rawObjectTypeNameMap, pid) {
+      if (pid in this.objectTypeNameMap_) {
+        this.model_.importWarning({
+          type: 'metadata_parse_error',
+          message: 'Mapping from object type IDs to names provided for pid=' +
+              pid + ' multiple times.'
+        });
+        return;
+      }
+
+      var objectTypeNamePrefix = undefined;
+      var objectTypeNameSuffix = undefined;
+      var objectTypeNameMap = {};
+      for (var objectTypeId in rawObjectTypeNameMap) {
+        var rawObjectTypeName = rawObjectTypeNameMap[objectTypeId];
+
+        // If we haven't figured out yet which compiler the object type names
+        // come from, we try to do it now.
+        if (objectTypeNamePrefix === undefined) {
+          for (var i = 0; i < OBJECT_TYPE_NAME_PATTERNS.length; i++) {
+            var pattern = OBJECT_TYPE_NAME_PATTERNS[i];
+            if (rawObjectTypeName.startsWith(pattern.prefix) &&
+                rawObjectTypeName.endsWith(pattern.suffix)) {
+              objectTypeNamePrefix = pattern.prefix;
+              objectTypeNameSuffix = pattern.suffix;
+              break;
+            }
+          }
+        }
+
+        if (objectTypeNamePrefix !== undefined &&
+            rawObjectTypeName.startsWith(objectTypeNamePrefix) &&
+            rawObjectTypeName.endsWith(objectTypeNameSuffix)) {
+          // With compiler-specific prefix and suffix (automatically annotated
+          // object types).
+          objectTypeNameMap[objectTypeId] = rawObjectTypeName.substring(
+               objectTypeNamePrefix.length,
+               rawObjectTypeName.length - objectTypeNameSuffix.length);
+        } else {
+          // Without compiler-specific prefix and suffix (manually annotated
+          // object types and '[unknown]').
+          objectTypeNameMap[objectTypeId] = rawObjectTypeName;
+        }
+      }
+
+      this.objectTypeNameMap_[pid] = objectTypeNameMap;
+    },
+
     importAnnotations_: function() {
       for (var id in this.traceAnnotations_) {
         var annotation = tr.model.Annotation.fromDictIfPossible(
@@ -937,14 +1123,6 @@
       }
     },
 
-    /**
-     * Called by the model to join references between objects, after final model
-     * bounds have been computed.
-     */
-    joinRefs: function() {
-      this.joinObjectRefs_();
-    },
-
     createAsyncSlices_: function() {
       if (this.allAsyncEvents_.length === 0)
         return;
@@ -960,6 +1138,7 @@
       // Group nestable async events by ID. Events with the same ID should
       // belong to the same parent async event.
       var nestableAsyncEventsByKey = {};
+      var nestableMeasureAsyncEventsByKey = {};
       for (var i = 0; i < this.allAsyncEvents_.length; i++) {
         var asyncEventState = this.allAsyncEvents_[i];
         var event = asyncEventState.event;
@@ -994,6 +1173,19 @@
           });
           continue;
         }
+
+        if (event.cat === 'blink.user_timing') {
+          var matched = /([^\/:]+):([^\/:]+)\/?(.*)/.exec(event.name);
+          if (matched !== null) {
+            var key = matched[1] + ':' + event.cat;
+            event.args = JSON.parse(Base64.atob(matched[3]) || '{}');
+            if (nestableMeasureAsyncEventsByKey[key] === undefined)
+              nestableMeasureAsyncEventsByKey[key] = [];
+            nestableMeasureAsyncEventsByKey[key].push(asyncEventState);
+            continue;
+          }
+        }
+
         var key = event.cat + ':' + event.id;
         if (nestableAsyncEventsByKey[key] === undefined)
            nestableAsyncEventsByKey[key] = [];
@@ -1002,153 +1194,11 @@
       // Handle legacy async events.
       this.createLegacyAsyncSlices_(legacyEvents);
 
+      // Parse nestable measure async events into AsyncSlices.
+      this.createNestableAsyncSlices_(nestableMeasureAsyncEventsByKey);
+
       // Parse nestable async events into AsyncSlices.
-      for (var key in nestableAsyncEventsByKey) {
-        var eventStateEntries = nestableAsyncEventsByKey[key];
-        // Stack of enclosing BEGIN events.
-        var parentStack = [];
-        for (var i = 0; i < eventStateEntries.length; ++i) {
-          var eventStateEntry = eventStateEntries[i];
-          // If this is the end of an event, match it to the start.
-          if (eventStateEntry.event.ph === 'e') {
-            // Walk up the parent stack to find the corresponding BEGIN for
-            // this END.
-            var parentIndex = -1;
-            for (var k = parentStack.length - 1; k >= 0; --k) {
-              if (parentStack[k].event.name === eventStateEntry.event.name) {
-                parentIndex = k;
-                break;
-              }
-            }
-            if (parentIndex === -1) {
-              // Unmatched end.
-              eventStateEntry.finished = false;
-            } else {
-              parentStack[parentIndex].end = eventStateEntry;
-              // Pop off all enclosing unmatched BEGINs util parentIndex.
-              while (parentIndex < parentStack.length) {
-                parentStack.pop();
-              }
-            }
-          }
-          // Inherit the current parent.
-          if (parentStack.length > 0)
-            eventStateEntry.parentEntry = parentStack[parentStack.length - 1];
-          if (eventStateEntry.event.ph === 'b')
-            parentStack.push(eventStateEntry);
-        }
-        var topLevelSlices = [];
-        for (var i = 0; i < eventStateEntries.length; ++i) {
-          var eventStateEntry = eventStateEntries[i];
-          // Skip matched END, as its slice will be created when we
-          // encounter its corresponding BEGIN.
-          if (eventStateEntry.event.ph === 'e' &&
-              eventStateEntry.finished === undefined) {
-            continue;
-          }
-          var startState = undefined;
-          var endState = undefined;
-          var sliceArgs = eventStateEntry.event.args || {};
-          var sliceError = undefined;
-          if (eventStateEntry.event.ph === 'n') {
-            startState = eventStateEntry;
-            endState = eventStateEntry;
-          } else if (eventStateEntry.event.ph === 'b') {
-            if (eventStateEntry.end === undefined) {
-              // Unmatched BEGIN. End it when last event with this ID ends.
-              eventStateEntry.end =
-                  eventStateEntries[eventStateEntries.length - 1];
-              sliceError =
-                  'Slice has no matching END. End time has been adjusted.';
-              this.model_.importWarning({
-                type: 'async_slice_parse_error',
-                message: 'Nestable async BEGIN event at ' +
-                    eventStateEntry.event.ts + ' with name=' +
-                    eventStateEntry.event.name +
-                    ' and id=' + eventStateEntry.event.id + ' was unmatched.'
-              });
-            } else {
-              // Include args for both END and BEGIN for a matched pair.
-              function concatenateArguments(args1, args2) {
-                if (args1.params === undefined || args2.params === undefined)
-                  return tr.b.concatenateObjects(args1, args2);
-                // Make an argument object to hold the combined params.
-                var args3 = {};
-                args3.params = tr.b.concatenateObjects(args1.params,
-                                                       args2.params);
-                return tr.b.concatenateObjects(args1, args2, args3);
-              }
-              var endArgs = eventStateEntry.end.event.args || {};
-              sliceArgs = concatenateArguments(sliceArgs, endArgs);
-            }
-            startState = eventStateEntry;
-            endState = eventStateEntry.end;
-          } else {
-            // Unmatched END. Start it at the first event with this ID starts.
-            sliceError =
-                'Slice has no matching BEGIN. Start time has been adjusted.';
-            this.model_.importWarning({
-              type: 'async_slice_parse_error',
-              message: 'Nestable async END event at ' +
-                  eventStateEntry.event.ts + ' with name=' +
-                  eventStateEntry.event.name +
-                  ' and id=' + eventStateEntry.event.id + ' was unmatched.'
-            });
-            startState = eventStateEntries[0];
-            endState = eventStateEntry;
-          }
-
-          var isTopLevel = (eventStateEntry.parentEntry === undefined);
-          var asyncSliceConstructor =
-             tr.model.AsyncSlice.getConstructor(
-                eventStateEntry.event.cat,
-                eventStateEntry.event.name);
-
-          var thread_start = undefined;
-          var thread_duration = undefined;
-          if (startState.event.tts && startState.event.use_async_tts) {
-            thread_start = timestampFromUs(startState.event.tts);
-            if (endState.event.tts) {
-              var thread_end = timestampFromUs(endState.event.tts);
-              thread_duration = thread_end - thread_start;
-            }
-          }
-
-          var slice = new asyncSliceConstructor(
-              eventStateEntry.event.cat,
-              eventStateEntry.event.name,
-              getEventColor(endState.event),
-              timestampFromUs(startState.event.ts),
-              sliceArgs,
-              timestampFromUs(endState.event.ts - startState.event.ts),
-              isTopLevel,
-              thread_start,
-              thread_duration,
-              startState.event.argsStripped);
-
-          slice.startThread = startState.thread;
-          slice.endThread = endState.thread;
-
-          slice.startStackFrame = this.getStackFrameForEvent_(startState.event);
-          slice.endStackFrame = this.getStackFrameForEvent_(endState.event);
-
-          slice.id = key;
-          if (sliceError !== undefined)
-            slice.error = sliceError;
-          eventStateEntry.slice = slice;
-          // Add the slice to the topLevelSlices array if there is no parent.
-          // Otherwise, add the slice to the subSlices of its parent.
-          if (isTopLevel) {
-            topLevelSlices.push(slice);
-          } else if (eventStateEntry.parentEntry.slice !== undefined) {
-            eventStateEntry.parentEntry.slice.subSlices.push(slice);
-          }
-        }
-        for (var si = 0; si < topLevelSlices.length; si++) {
-          topLevelSlices[si].startThread.asyncSliceGroup.push(
-              topLevelSlices[si]);
-        }
-      }
+      this.createNestableAsyncSlices_(nestableAsyncEventsByKey);
     },
 
     createLegacyAsyncSlices_: function(legacyEvents) {
@@ -1313,6 +1363,156 @@
       }
     },
 
+    createNestableAsyncSlices_: function(nestableEventsByKey) {
+      for (var key in nestableEventsByKey) {
+        var eventStateEntries = nestableEventsByKey[key];
+        // Stack of enclosing BEGIN events.
+        var parentStack = [];
+        for (var i = 0; i < eventStateEntries.length; ++i) {
+          var eventStateEntry = eventStateEntries[i];
+          // If this is the end of an event, match it to the start.
+          if (eventStateEntry.event.ph === 'e') {
+            // Walk up the parent stack to find the corresponding BEGIN for
+            // this END.
+            var parentIndex = -1;
+            for (var k = parentStack.length - 1; k >= 0; --k) {
+              if (parentStack[k].event.name === eventStateEntry.event.name) {
+                parentIndex = k;
+                break;
+              }
+            }
+            if (parentIndex === -1) {
+              // Unmatched end.
+              eventStateEntry.finished = false;
+            } else {
+              parentStack[parentIndex].end = eventStateEntry;
+              // Pop off all enclosing unmatched BEGINs util parentIndex.
+              while (parentIndex < parentStack.length) {
+                parentStack.pop();
+              }
+            }
+          }
+          // Inherit the current parent.
+          if (parentStack.length > 0)
+            eventStateEntry.parentEntry = parentStack[parentStack.length - 1];
+          if (eventStateEntry.event.ph === 'b') {
+            parentStack.push(eventStateEntry);
+          }
+        }
+        var topLevelSlices = [];
+        for (var i = 0; i < eventStateEntries.length; ++i) {
+          var eventStateEntry = eventStateEntries[i];
+          // Skip matched END, as its slice will be created when we
+          // encounter its corresponding BEGIN.
+          if (eventStateEntry.event.ph === 'e' &&
+              eventStateEntry.finished === undefined) {
+            continue;
+          }
+          var startState = undefined;
+          var endState = undefined;
+          var sliceArgs = eventStateEntry.event.args || {};
+          var sliceError = undefined;
+          if (eventStateEntry.event.ph === 'n') {
+            startState = eventStateEntry;
+            endState = eventStateEntry;
+          } else if (eventStateEntry.event.ph === 'b') {
+            if (eventStateEntry.end === undefined) {
+              // Unmatched BEGIN. End it when last event with this ID ends.
+              eventStateEntry.end =
+                eventStateEntries[eventStateEntries.length - 1];
+              sliceError =
+                'Slice has no matching END. End time has been adjusted.';
+              this.model_.importWarning({
+                type: 'async_slice_parse_error',
+                message: 'Nestable async BEGIN event at ' +
+                  eventStateEntry.event.ts + ' with name=' +
+                  eventStateEntry.event.name +
+                  ' and id=' + eventStateEntry.event.id + ' was unmatched.'
+              });
+            } else {
+              // Include args for both END and BEGIN for a matched pair.
+              function concatenateArguments(args1, args2) {
+                if (args1.params === undefined || args2.params === undefined)
+                  return tr.b.concatenateObjects(args1, args2);
+                // Make an argument object to hold the combined params.
+                var args3 = {};
+                args3.params = tr.b.concatenateObjects(args1.params,
+                                                       args2.params);
+                return tr.b.concatenateObjects(args1, args2, args3);
+              }
+              var endArgs = eventStateEntry.end.event.args || {};
+              sliceArgs = concatenateArguments(sliceArgs, endArgs);
+            }
+            startState = eventStateEntry;
+            endState = eventStateEntry.end;
+          } else {
+            // Unmatched END. Start it at the first event with this ID starts.
+            sliceError =
+              'Slice has no matching BEGIN. Start time has been adjusted.';
+            this.model_.importWarning({
+              type: 'async_slice_parse_error',
+              message: 'Nestable async END event at ' +
+                eventStateEntry.event.ts + ' with name=' +
+                eventStateEntry.event.name +
+                ' and id=' + eventStateEntry.event.id + ' was unmatched.'
+            });
+            startState = eventStateEntries[0];
+            endState = eventStateEntry;
+          }
+
+          var isTopLevel = (eventStateEntry.parentEntry === undefined);
+          var asyncSliceConstructor =
+              tr.model.AsyncSlice.getConstructor(
+                eventStateEntry.event.cat,
+                eventStateEntry.event.name);
+
+          var thread_start = undefined;
+          var thread_duration = undefined;
+          if (startState.event.tts && startState.event.use_async_tts) {
+            thread_start = timestampFromUs(startState.event.tts);
+            if (endState.event.tts) {
+              var thread_end = timestampFromUs(endState.event.tts);
+              thread_duration = thread_end - thread_start;
+            }
+          }
+
+          var slice = new asyncSliceConstructor(
+            eventStateEntry.event.cat,
+            eventStateEntry.event.name,
+            getEventColor(endState.event),
+            timestampFromUs(startState.event.ts),
+            sliceArgs,
+            timestampFromUs(endState.event.ts - startState.event.ts),
+            isTopLevel,
+            thread_start,
+            thread_duration,
+            startState.event.argsStripped);
+
+          slice.startThread = startState.thread;
+          slice.endThread = endState.thread;
+
+          slice.startStackFrame = this.getStackFrameForEvent_(startState.event);
+          slice.endStackFrame = this.getStackFrameForEvent_(endState.event);
+
+          slice.id = key;
+          if (sliceError !== undefined)
+            slice.error = sliceError;
+          eventStateEntry.slice = slice;
+          // Add the slice to the topLevelSlices array if there is no parent.
+          // Otherwise, add the slice to the subSlices of its parent.
+          if (isTopLevel) {
+            topLevelSlices.push(slice);
+          } else if (eventStateEntry.parentEntry.slice !== undefined) {
+            eventStateEntry.parentEntry.slice.subSlices.push(slice);
+          }
+        }
+        for (var si = 0; si < topLevelSlices.length; si++) {
+          topLevelSlices[si].startThread.asyncSliceGroup.push(
+            topLevelSlices[si]);
+        }
+      }
+    },
+
     assertStepTypeMatches_: function(stepType, event) {
       if (stepType != event.event.ph) {
         this.model_.importWarning({
@@ -1613,6 +1813,8 @@
 
       function processEvent(objectEventState) {
         var event = objectEventState.event;
+        var scopedId = new tr.model.ScopedId(
+            event.scope || tr.model.OBJECT_DEFAULT_SCOPE, event.id);
         var thread = objectEventState.thread;
         if (event.name === undefined) {
           this.model_.importWarning({
@@ -1622,7 +1824,7 @@
           });
         }
 
-        if (event.id === undefined) {
+        if (scopedId.id === undefined) {
           this.model_.importWarning({
             type: 'object_parse_error',
             message: 'While processing ' + JSON.stringify(event) + ': ' +
@@ -1635,12 +1837,12 @@
         if (event.ph === 'N') {
           try {
             instance = process.objects.idWasCreated(
-                event.id, event.cat, event.name, ts);
+                scopedId, event.cat, event.name, ts);
           } catch (e) {
             this.model_.importWarning({
               type: 'object_parse_error',
               message: 'While processing create of ' +
-                  event.id + ' at ts=' + ts + ': ' + e
+                  scopedId + ' at ts=' + ts + ': ' + e
             });
             return;
           }
@@ -1648,7 +1850,7 @@
           if (event.args.snapshot === undefined) {
             this.model_.importWarning({
               type: 'object_parse_error',
-              message: 'While processing ' + event.id + ' at ts=' + ts + ': ' +
+              message: 'While processing ' + scopedId + ' at ts=' + ts + ': ' +
                   'Snapshots must have args: {snapshot: ...}'
             });
             return;
@@ -1672,28 +1874,27 @@
               baseTypename = undefined;
             }
             snapshot = process.objects.addSnapshot(
-                event.id, cat, event.name, ts,
-                args, baseTypename);
+                scopedId, cat, event.name, ts, args, baseTypename);
             snapshot.snapshottedOnThread = thread;
           } catch (e) {
             this.model_.importWarning({
               type: 'object_parse_error',
               message: 'While processing snapshot of ' +
-                  event.id + ' at ts=' + ts + ': ' + e
+                  scopedId + ' at ts=' + ts + ': ' + e
             });
             return;
           }
           instance = snapshot.objectInstance;
         } else if (event.ph === 'D') {
           try {
-            process.objects.idWasDeleted(event.id, event.cat, event.name, ts);
-            var instanceMap = process.objects.getOrCreateInstanceMap_(event.id);
+            process.objects.idWasDeleted(scopedId, event.cat, event.name, ts);
+            var instanceMap = process.objects.getOrCreateInstanceMap_(scopedId);
             instance = instanceMap.lastInstance;
           } catch (e) {
             this.model_.importWarning({
               type: 'object_parse_error',
               message: 'While processing delete of ' +
-                  event.id + ' at ts=' + ts + ': ' + e
+                  scopedId + ' at ts=' + ts + ': ' + e
             });
             return;
           }
@@ -1771,9 +1972,11 @@
         else
           baseTypename = undefined;
 
+        var scope = containingSnapshot.objectInstance.scopedId.scope;
+
         try {
           res = process.objects.addSnapshot(
-              id, cat,
+              new tr.model.ScopedId(scope, id), cat,
               name, containingSnapshot.ts,
               implicitSnapshot, baseTypename);
         } catch (e) {
@@ -2058,25 +2261,38 @@
         return;
       }
 
-      function parseByteStat(rawValue) {
-        if (rawValue === undefined)
-          return undefined;
-        return parseInt(rawValue, 16);
-      }
-
+      // See //base/trace_event/process_memory_maps.cc in Chromium.
       var vmRegions = new Array(rawVmRegions.length);
-
       for (var i = 0; i < rawVmRegions.length; i++) {
         var rawVmRegion = rawVmRegions[i];
 
-        // See //base/trace_event/process_memory_maps.cc in Chromium.
-        var byteStats = new tr.model.VMRegionByteStats(
-            parseByteStat(rawVmRegion.bs.pc),
-            parseByteStat(rawVmRegion.bs.pd),
-            parseByteStat(rawVmRegion.bs.sc),
-            parseByteStat(rawVmRegion.bs.sd),
-            parseByteStat(rawVmRegion.bs.pss),
-            parseByteStat(rawVmRegion.bs.sw));
+        var byteStats = {};
+        var rawByteStats = rawVmRegion.bs;
+        for (var rawByteStatName in rawByteStats) {
+          var rawByteStatValue = rawByteStats[rawByteStatName];
+          if (rawByteStatValue === undefined) {
+            this.model_.importWarning({
+              type: 'memory_dump_parse_error',
+              message: 'Byte stat \'' + rawByteStatName + '\' of VM region ' +
+                  i + ' (' + rawVmRegion.mf + ') in process memory dump for ' +
+                  'PID=' + pid + ' and dump ID=' + dumpId +
+                  ' does not have a value.'
+            });
+            continue;
+          }
+          var byteStatName = BYTE_STAT_NAME_MAP[rawByteStatName];
+          if (byteStatName === undefined) {
+            this.model_.importWarning({
+              type: 'memory_dump_parse_error',
+              message: 'Unknown byte stat name \'' + rawByteStatName + '\' (' +
+                  rawByteStatValue + ') of VM region ' + i + ' (' +
+                  rawVmRegion.mf + ') in process memory dump for PID=' + pid +
+                  ' and dump ID=' + dumpId + '.'
+            });
+            continue;
+          }
+          byteStats[byteStatName] = parseInt(rawByteStatValue, 16);
+        }
 
         vmRegions[i] = new tr.model.VMRegion(
             parseInt(rawVmRegion.sa, 16),  // startAddress
@@ -2086,7 +2302,8 @@
             byteStats);
       }
 
-      processMemoryDump.vmRegions = vmRegions;
+      processMemoryDump.vmRegions =
+          tr.model.VMRegionClassificationNode.fromRegions(vmRegions);
     },
 
     parseMemoryDumpHeapDumps_: function(processMemoryDump, dumps, pid, dumpId) {
@@ -2108,53 +2325,149 @@
       var idPrefix = 'p' + pid + ':';
       var heapDumps = {};
 
+      var objectTypeNameMap = this.objectTypeNameMap_[pid];
+      if (objectTypeNameMap === undefined) {
+        this.model_.importWarning({
+          type: 'memory_dump_parse_error',
+          message: 'Missing mapping from object type IDs to names.'
+        });
+      }
+
       for (var allocatorName in rawHeapDumps) {
         var entries = rawHeapDumps[allocatorName].entries;
-        if (entries === undefined) {
+        if (entries === undefined || entries.length === 0) {
           this.model_.importWarning({
             type: 'memory_dump_parse_error',
-            message: 'Missing heap entries in a ' + allocatorName +
+            message: 'No heap entries in a ' + allocatorName +
                 ' heap dump for PID=' + pid + ' and dump ID=' + dumpId + '.'
           });
           continue;
         }
 
+        // The old format always starts with a {size: <total>} entry.
+        // See https://goo.gl/WYStil
+        // TODO(petrcermak): Remove support for the old format once the new
+        // format has been around long enough.
+        var isOldFormat = entries[0].bt === undefined;
+        if (!isOldFormat && objectTypeNameMap === undefined) {
+          // Mapping from object type IDs to names must be provided in the new
+          // format.
+          continue;
+        }
+
         var heapDump = new tr.model.HeapDump(processMemoryDump, allocatorName);
 
         for (var i = 0; i < entries.length; i++) {
           var entry = entries[i];
-          var type = entry.type;
-          if (type !== undefined) {
-            // Heap dump entries with specified object type ID need to be
-            // ignored for the time being because Chrome doesn't dump the
-            // mapping from object type IDs to object type names yet. See
-            // crbug.com/524631.
-            continue;
-          }
-          var size = parseInt(entry.size, 16);
           var leafStackFrameIndex = entry.bt;
           var leafStackFrame;
-          if (leafStackFrameIndex === undefined) {
-            leafStackFrame = undefined;
+
+          // There are two possible mappings from leaf stack frame indices
+          // (provided in the trace) to the corresponding stack frames
+          // depending on the format.
+          if (isOldFormat) {
+            // Old format:
+            //   Undefined index        -> / (root)
+            //   Defined index for /A/B -> /A/B/<self>
+            if (leafStackFrameIndex === undefined) {
+              leafStackFrame = undefined /* root */;
+            } else {
+              // Get the leaf stack frame corresponding to the provided index.
+              var leafStackFrameId = idPrefix + leafStackFrameIndex;
+              if (leafStackFrameIndex === '') {
+                leafStackFrame = undefined /* root */;
+              } else {
+                leafStackFrame = model.stackFrames[leafStackFrameId];
+                if (leafStackFrame === undefined) {
+                  this.model_.importWarning({
+                    type: 'memory_dump_parse_error',
+                    message: 'Missing leaf stack frame (ID ' +
+                        leafStackFrameId + ') of heap entry ' + i + ' (size ' +
+                        size + ') in a ' + allocatorName +
+                        ' heap dump for PID=' + pid + '.'
+                  });
+                  continue;
+                }
+              }
+
+              // Inject an artificial <self> leaf stack frame.
+              leafStackFrameId += ':self';
+              if (model.stackFrames[leafStackFrameId] !== undefined) {
+                // The frame might already exist if there are multiple process
+                // memory dumps (for the same process) in the trace.
+                leafStackFrame = model.stackFrames[leafStackFrameId];
+              } else {
+                leafStackFrame = new tr.model.StackFrame(
+                    leafStackFrame, leafStackFrameId, '<self>',
+                    undefined /* colorId */);
+                model.addStackFrame(leafStackFrame);
+              }
+            }
           } else {
-            var leafStackFrameId = idPrefix + leafStackFrameIndex;
-            leafStackFrame = model.stackFrames[leafStackFrameId];
-            if (leafStackFrame === undefined) {
+            // New format:
+            //   Undefined index        -> (invalid value)
+            //   Defined index for /A/B -> /A/B
+            if (leafStackFrameIndex === undefined) {
               this.model_.importWarning({
                 type: 'memory_dump_parse_error',
-                message: 'Missing leaf stack frame (ID ' + leafStackFrameId +
+                message: 'Missing stack frame ID of heap entry ' + i +
+                    ' (size ' + size + ') in a ' + allocatorName +
+                    ' heap dump for PID=' + pid + '.'
+              });
+              continue;
+            }
+
+            // Get the leaf stack frame corresponding to the provided index.
+            var leafStackFrameId = idPrefix + leafStackFrameIndex;
+            if (leafStackFrameIndex === '') {
+              leafStackFrame = undefined /* root */;
+            } else {
+              leafStackFrame = model.stackFrames[leafStackFrameId];
+              if (leafStackFrame === undefined) {
+                this.model_.importWarning({
+                  type: 'memory_dump_parse_error',
+                  message: 'Missing leaf stack frame (ID ' + leafStackFrameId +
+                      ') of heap entry ' + i + ' (size ' + size + ') in a ' +
+                      allocatorName + ' heap dump for PID=' + pid + '.'
+                });
+                continue;
+              }
+            }
+          }
+
+          var objectTypeId = entry.type;
+          var objectTypeName;
+          if (objectTypeId === undefined) {
+            objectTypeName = undefined /* total over all types */;
+          } else if (objectTypeNameMap === undefined) {
+            // This can only happen when the old format is used.
+            continue;
+          } else {
+            objectTypeName = objectTypeNameMap[objectTypeId];
+            if (objectTypeName === undefined) {
+              this.model_.importWarning({
+                type: 'memory_dump_parse_error',
+                message: 'Missing object type name (ID ' + objectTypeId +
                     ') of heap entry ' + i + ' (size ' + size + ') in a ' +
-                    allocatorName + ' heap dump for PID=' + pid + '.'
+                    allocatorName + ' heap dump for pid=' + pid + '.'
               });
               continue;
             }
           }
-          heapDump.addEntry(leafStackFrame, size);
+
+          var size = parseInt(entry.size, 16);
+          heapDump.addEntry(leafStackFrame, objectTypeName, size);
         }
-        heapDumps[allocatorName] = heapDump;
+
+        // Throw away heap dumps with no entries. This can happen if all raw
+        // entries in the trace are skipped for some reason (e.g. invalid leaf
+        // stack frame ID).
+        if (heapDump.entries.length > 0)
+          heapDumps[allocatorName] = heapDump;
       }
 
-      processMemoryDump.heapDumps = heapDumps;
+      if (Object.keys(heapDumps).length > 0)
+        processMemoryDump.heapDumps = heapDumps;
     },
 
     parseMemoryDumpLevelOfDetail_: function(levelOfDetailIndices, dumps, pid,
@@ -2227,6 +2540,10 @@
           });
         }
 
+        // A memory allocator dump can have optional flags.
+        var flags = rawAllocatorDump.flags || 0;
+        var isWeakDump = !!(flags & WEAK_MEMORY_ALLOCATOR_DUMP_FLAG);
+
         // Determine if this is a global memory allocator dump (check if
         // it's prefixed with 'global/').
         var containerMemoryDump;
@@ -2259,6 +2576,7 @@
           }
           allocatorDump = new tr.model.MemoryAllocatorDump(
               containerMemoryDump, fullName, guid);
+          allocatorDump.weak = isWeakDump;
           dstIndex[fullName] = allocatorDump;
           if (guid !== undefined)
             allMemoryAllocatorDumpsByGuid[guid] = allocatorDump;
@@ -2285,6 +2603,11 @@
             });
             continue;
           }
+          if (!isWeakDump) {
+            // A MemoryAllocatorDump is non-weak if at least one process dumped
+            // it without WEAK_MEMORY_ALLOCATOR_DUMP_FLAG.
+            allocatorDump.weak = false;
+          }
         }
 
         // Add all new attributes to the memory allocator dump.
@@ -2300,20 +2623,54 @@
         }
 
         for (var attrName in attributes) {
-          if (attrName in allocatorDump.attributes) {
-            // Skip existing attributes of the memory allocator dump.
-            this.model_.importWarning({
-            type: 'memory_dump_parse_error',
-            message: 'Multiple values provided for attribute ' + attrName +
-                ' of memory allocator dump ' + fullName + ' (GUID=' + guid +
-                ') for PID=' + pid + ' and dump ID=' + dumpId + '.'
-            });
-            continue;
-          }
-
           var attrArgs = attributes[attrName];
-          var attrValue = tr.model.Attribute.fromDictIfPossible(attrArgs);
-          allocatorDump.addAttribute(attrName, attrValue);
+          var attrType = attrArgs.type;
+          var attrValue = attrArgs.value;
+
+          switch (attrType) {
+            case 'scalar':
+              if (attrName in allocatorDump.numerics) {
+                this.model_.importWarning({
+                type: 'memory_dump_parse_error',
+                message: 'Multiple values provided for scalar attribute ' +
+                    attrName + ' of memory allocator dump ' + fullName +
+                    ' (GUID=' + guid + ') for PID=' + pid + ' and dump ID=' +
+                    dumpId + '.'
+                });
+                break;
+              }
+              var unit = attrArgs.units === 'bytes' ?
+                  tr.v.Unit.byName.sizeInBytes_smallerIsBetter :
+                  tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+              var value = parseInt(attrValue, 16);
+              allocatorDump.addNumeric(attrName,
+                  new tr.v.ScalarNumeric(unit, value));
+              break;
+
+            case 'string':
+              if (attrName in allocatorDump.diagnostics) {
+                this.model_.importWarning({
+                type: 'memory_dump_parse_error',
+                message: 'Multiple values provided for string attribute ' +
+                    attrName + ' of memory allocator dump ' + fullName +
+                    ' (GUID=' + guid + ') for PID=' + pid + ' and dump ID=' +
+                    dumpId + '.'
+                });
+                break;
+              }
+              allocatorDump.addDiagnostic(attrName, attrValue);
+              break;
+
+            default:
+              this.model_.importWarning({
+              type: 'memory_dump_parse_error',
+              message: 'Unknown type provided for attribute ' + attrName +
+                  ' of memory allocator dump ' + fullName + ' (GUID=' + guid +
+                  ') for PID=' + pid + ' and dump ID=' + dumpId + ': ' +
+                  attrType
+              });
+              break;
+          }
         }
       }
     },
@@ -2350,6 +2707,38 @@
             parentAlreadyExisted = false;
             parentAllocatorDump = new tr.model.MemoryAllocatorDump(
                 allocatorDump.containerMemoryDump, parentFullName);
+            if (allocatorDump.weak !== false) {
+              // If we are inferring a parent dump (e.g. 'root/parent') of a
+              // current dump (e.g. 'root/parent/current') which is weak (or
+              // was also inferred and we don't know yet whether it's weak or
+              // not), then we clear the weak flag on the parent dump because
+              // we don't know yet whether it should be weak or non-weak:
+              //
+              //   * We can't mark the parent as non-weak straightaway because
+              //     the parent might have no non-weak descendants (in which
+              //     case we want the inferred parent to be weak, so that it
+              //     would be later removed like the current dump).
+              //   * We can't mark the parent as weak immediately either. If we
+              //     did and later encounter a non-weak child of the parent
+              //     (e.g. 'root/parent/another_child'), then we couldn't
+              //     retroactively mark the inferred parent dump as non-weak
+              //     because we couldn't tell whether the parent dump was
+              //     dumped in the trace as weak (in which case it should stay
+              //     weak and be subsequently removed) or whether it was
+              //     inferred as weak (in which case it should be changed to
+              //     non-weak).
+              //
+              // Therefore, we defer marking the inferred parent as
+              // weak/non-weak. If an inferred parent dump does not have any
+              // non-weak child, it will be marked as weak at the end of this
+              // method.
+              //
+              // Note that this should not be confused with the recursive
+              // propagation of the weak flag from parent dumps to their
+              // children and from owned dumps to their owners, which is
+              // performed in GlobalMemoryDump.prototype.removeWeakDumps().
+              parentAllocatorDump.weak = undefined;
+            }
             memoryAllocatorDumpsByFullName[parentFullName] =
                 parentAllocatorDump;
           }
@@ -2360,14 +2749,32 @@
 
           // If the parent already existed, then its ancestors were/will be
           // constructed in another iteration of the forEach loop.
-          if (parentAlreadyExisted)
+          if (parentAlreadyExisted) {
+            if (!allocatorDump.weak) {
+              // If the current dump is non-weak, then we must ensure that all
+              // its inferred ancestors are also non-weak.
+              while (parentAllocatorDump !== undefined &&
+                     parentAllocatorDump.weak === undefined) {
+                parentAllocatorDump.weak = false;
+                parentAllocatorDump = parentAllocatorDump.parent;
+              }
+            }
             break;
+          }
 
           fullName = parentFullName;
           allocatorDump = parentAllocatorDump;
         }
       }
 
+      // All inferred ancestor dumps that have a non-weak child have already
+      // been marked as non-weak. We now mark the rest as weak.
+      for (var fullName in memoryAllocatorDumpsByFullName) {
+        var allocatorDump = memoryAllocatorDumpsByFullName[fullName];
+        if (allocatorDump.weak === undefined)
+          allocatorDump.weak = true;
+      }
+
       return rootAllocatorDumps;
     },
 
@@ -2451,87 +2858,6 @@
           }
         }
       }
-    },
-
-    joinObjectRefs_: function() {
-      tr.b.iterItems(this.model_.processes, function(pid, process) {
-        this.joinObjectRefsForProcess_(process);
-      }, this);
-    },
-
-    joinObjectRefsForProcess_: function(process) {
-      // Iterate the world, looking for id_refs
-      var patchupsToApply = [];
-      tr.b.iterItems(process.threads, function(tid, thread) {
-        thread.asyncSliceGroup.slices.forEach(function(item) {
-          this.searchItemForIDRefs_(
-              patchupsToApply, process.objects, 'start', item);
-        }, this);
-        thread.sliceGroup.slices.forEach(function(item) {
-          this.searchItemForIDRefs_(
-              patchupsToApply, process.objects, 'start', item);
-        }, this);
-      }, this);
-      process.objects.iterObjectInstances(function(instance) {
-        instance.snapshots.forEach(function(item) {
-          this.searchItemForIDRefs_(
-              patchupsToApply, process.objects, 'ts', item);
-        }, this);
-      }, this);
-
-      // Change all the fields pointing at id_refs to their real values.
-      patchupsToApply.forEach(function(patchup) {
-        patchup.object[patchup.field] = patchup.value;
-      });
-    },
-
-    searchItemForIDRefs_: function(patchupsToApply, objectCollection,
-                                   itemTimestampField, item) {
-      if (!item.args)
-        throw new Error('item is missing its args');
-
-      function handleField(object, fieldName, fieldValue) {
-        if (!fieldValue || (!fieldValue.id_ref && !fieldValue.idRef))
-          return;
-
-        var id = fieldValue.id_ref || fieldValue.idRef;
-        var ts = item[itemTimestampField];
-        var snapshot = objectCollection.getSnapshotAt(id, ts);
-        if (!snapshot)
-          return;
-
-        // We have to delay the actual change to the new value until after all
-        // refs have been located. Otherwise, we could end up recursing in
-        // ways we definitely didn't intend.
-        patchupsToApply.push({object: object,
-          field: fieldName,
-          value: snapshot});
-      }
-      function iterObjectFieldsRecursively(object) {
-        if (!(object instanceof Object))
-          return;
-
-        if ((object instanceof tr.model.ObjectSnapshot) ||
-            (object instanceof Float32Array) ||
-            (object instanceof tr.b.Quad))
-          return;
-
-        if (object instanceof Array) {
-          for (var i = 0; i < object.length; i++) {
-            handleField(object, i, object[i]);
-            iterObjectFieldsRecursively(object[i]);
-          }
-          return;
-        }
-
-        for (var key in object) {
-          var value = object[key];
-          handleField(object, key, value);
-          iterObjectFieldsRecursively(value);
-        }
-      }
-
-      iterObjectFieldsRecursively(item.args);
     }
   };
 
diff --git a/catapult/tracing/tracing/extras/importer/trace_event_importer_test.html b/catapult/tracing/tracing/extras/importer/trace_event_importer_test.html
index 975b3e9..bc64657 100644
--- a/catapult/tracing/tracing/extras/importer/trace_event_importer_test.html
+++ b/catapult/tracing/tracing/extras/importer/trace_event_importer_test.html
@@ -5,10 +5,16 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/extras/measure/measure.html">
 <link rel="import" href="/tracing/importer/import.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/model/scoped_id.html">
+<link rel="import" href="/tracing/model/vm_region.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -16,6 +22,15 @@
 tr.b.unittest.testSuite(function() {
   var findSliceNamed = tr.c.TestUtils.findSliceNamed;
   var ColorScheme = tr.b.ColorScheme;
+  var MeasureAsyncSlice = tr.e.measure.MeasureAsyncSlice;
+  var ScopedId = tr.model.ScopedId;
+  var VMRegion = tr.model.VMRegion;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+  var checkDumpNumericsAndDiagnostics =
+      tr.model.MemoryDumpTestUtils.checkDumpNumericsAndDiagnostics;
+  var checkVMRegions = tr.model.MemoryDumpTestUtils.checkVMRegions;
 
   function makeModel(events, opt_shift, opt_prune) {
     return tr.c.TestUtils.newModelWithEvents([events], {
@@ -28,6 +43,26 @@
     return makeModel(events, false);
   }
 
+  function checkHeapEntry(entry, expectedDump, expectedSize, expectedTitles,
+      expectedObjectTypeName) {
+    assert.strictEqual(entry.heapDump, expectedDump);
+    assert.strictEqual(entry.size, expectedSize);
+    assert.strictEqual(entry.objectTypeName, expectedObjectTypeName);
+    if (expectedTitles === undefined) {
+      assert.isUndefined(entry.leafStackFrame);
+    } else {
+      assert.deepEqual(
+          entry.leafStackFrame.getUserFriendlyStackTrace(), expectedTitles);
+    }
+  }
+
+  function getFrame(heapEntry, distance) {
+    var frame = heapEntry.leafStackFrame;
+    for (; distance > 0; distance--)
+      frame = frame.parentFrame;
+    return frame;
+  }
+
   test('canImportEmpty', function() {
     assert.isFalse(tr.e.importer.TraceEventImporter.canImport([]));
     assert.isFalse(tr.e.importer.TraceEventImporter.canImport(''));
@@ -538,7 +573,7 @@
 
     var m = makeModel(events);
     var p = m.processes[1];
-    var i = p.objects.instanceMapsById_[1].instances[0];
+    var i = p.objects.instanceMapsByScopedId_['ptr'][1].instances[0];
     assert.equal(i.colorId,
                  ColorScheme.getColorIdForReservedName('generic_work'));
   });
@@ -552,7 +587,7 @@
 
     var m = makeModel(events);
     var p = m.processes[1];
-    var i = p.objects.instanceMapsById_[1].instances[0];
+    var i = p.objects.instanceMapsByScopedId_['ptr'][1].instances[0];
     assert.equal(i.colorId,
                  ColorScheme.getColorIdForReservedName('generic_work'));
   });
@@ -1904,6 +1939,34 @@
   test('importV8Samples', function() {
     var eventData = {
       traceEvents: [
+        { name: 'V8Sample', args: {data: {stack: ['0x2a574306061', '0x2a574306224'], vm_state: 'js'}}, pid: 1, ts: 4, cat: 'test', tid: 2, ph: 'P' }, // @suppress longLineCheck
+        { name: 'V8Sample', args: {data: {stack: [], vm_state: 'gc'}}, pid: 1, ts: 6, cat: 'test', tid: 2, ph: 'P' }, // @suppress longLineCheck
+        { name: 'JitCodeAdded', args: {data: {code_len: 2, name: 'LazyCompile:~foo http://example.com/bar.js:23', code_start: '0x2a574306060'}}, pid: 1, ts: 1, cat: 'test', tid: 2, ph: 'M' }, // @suppress longLineCheck
+        { name: 'JitCodeAdded', args: {data: {code_len: 20, name: 'bar', code_start: '0x2a574306220'}}, pid: 1, ts: 2, cat: 'test', tid: 2, ph: 'M' }, // @suppress longLineCheck
+        { name: 'JitCodeMoved', args: {data: {code_len: 2, old_code_start: '0x2a574306220', code_start: '0x2a574306222'}}, pid: 1, ts: 3, cat: 'test', tid: 2, ph: 'M' }, // @suppress longLineCheck
+        { name: 'JitCodeAdded', args: {data: {code_len: 20, name: 'baz', code_start: '0xffffffff9f90a1a0'}}, pid: 1, ts: 4, cat: 'test', tid: 2, ph: 'M' } // @suppress longLineCheck
+      ]
+    };
+
+    var m = makeModel(eventData);
+    var p = m.processes[1];
+    var t = p.threads[2];
+
+    assert.isFalse(m.hasImportWarnings);
+    assert.equal(t.samples.length, 2);
+
+    var sample = t.samples_[0];
+    assert.equal(sample.leafStackFrame.title,
+        'foo http://example.com/bar.js:22');
+    assert.equal(sample.leafStackFrame.parentFrame.title, 'bar');
+
+    var sample = t.samples_[1];
+    assert.equal(sample.leafStackFrame.title, 'gc');
+  });
+
+  test('importOldFormatV8Samples', function() {
+    var eventData = {
+      traceEvents: [
         { name: 'JitCodeAdded', args: {data: {code_len: 2, name: 'LazyCompile:~foo http://example.com/bar.js:23', code_start: '0x2a574306060'}}, pid: 1, ts: 0, cat: 'test', tid: 2, ph: 'I' }, // @suppress longLineCheck
         { name: 'JitCodeAdded', args: {data: {code_len: 20, name: 'bar', code_start: '0x2a574306220'}}, pid: 1, ts: 0, cat: 'test', tid: 2, ph: 'I' }, // @suppress longLineCheck
         { name: 'JitCodeMoved', args: {data: {code_len: 2, old_code_start: '0x2a574306220', code_start: '0x2a574306222'}}, pid: 1, ts: 0, cat: 'test', tid: 2, ph: 'I' }, // @suppress longLineCheck
@@ -1944,7 +2007,7 @@
     var p = m.processes[1];
     assert.isDefined(p);
 
-    var i10 = p.objects.getObjectInstanceAt('0x1000', 10);
+    var i10 = p.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 10);
     assert.equal(i10.category, 'c');
     assert.equal(i10.creationTs, 10);
     assert.equal(i10.deletionTs, 50);
@@ -1982,12 +2045,13 @@
     var m = makeModel(events, false);
     var p1 = m.processes[1];
 
-    var iA = p1.objects.getObjectInstanceAt('0x1000', 10);
+    var iA = p1.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 10);
     var subObjectInstances = p1.objects.getAllInstancesByTypeName()[
         'subObject'];
 
     assert.equal(subObjectInstances.length, 2);
-    var subObject1 = p1.objects.getObjectInstanceAt('0x1', 15);
+    var subObject1 = p1.objects.getObjectInstanceAt(
+        new ScopedId('ptr', '0x1'), 15);
     assert.equal(subObject1.name, 'subObject');
     assert.equal(subObject1.creationTs, 15);
 
@@ -1997,7 +2061,8 @@
     assert.equal(subObject1.snapshots[1].ts, 20);
     assert.equal(subObject1.snapshots[1].args.foo, 2);
 
-    var subObject2 = p1.objects.getObjectInstanceAt('0x2', 20);
+    var subObject2 = p1.objects.getObjectInstanceAt(
+        new ScopedId('ptr', '0x2'), 20);
     assert.equal(subObject2.name, 'subObject');
     assert.equal(subObject2.creationTs, 20);
     assert.equal(subObject2.snapshots.length, 1);
@@ -2019,7 +2084,7 @@
     var m = makeModel(events);
     var p1 = m.processes[1];
 
-    var iA = p1.objects.getObjectInstanceAt('0x1000', 10);
+    var iA = p1.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 10);
     var subObjectInstances = p1.objects.getAllInstancesByTypeName()[
         'subObject'];
 
@@ -2040,7 +2105,7 @@
     var p1 = m.processes[1];
     assert.equal(m.importWarnings.length, 0);
 
-    var iA = p1.objects.getObjectInstanceAt('0x1000', 10);
+    var iA = p1.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 10);
     assert.equal(iA.snapshots.length, 1);
   });
 
@@ -2060,7 +2125,7 @@
     var m = makeModel(events, false);
     var p1 = m.processes[1];
 
-    var iA = p1.objects.getObjectInstanceAt('0x1000', 10);
+    var iA = p1.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 10);
     var s15 = iA.getSnapshotAt(15);
 
     var taskSlice = p1.threads[1].sliceGroup.slices[0];
@@ -2086,8 +2151,9 @@
     var m = makeModel(events);
     var p1 = m.processes[1];
 
-    var iA = p1.objects.getObjectInstanceAt('0x1000', 15);
-    var iFoo = p1.objects.getObjectInstanceAt('0x1001', 15);
+    var iA = p1.objects.getObjectInstanceAt(new ScopedId('ptr', '0x1000'), 15);
+    var iFoo = p1.objects.getObjectInstanceAt(
+        new ScopedId('ptr', '0x1001'), 15);
     assert.isDefined(iA);
     assert.isDefined(iFoo);
 
@@ -2111,7 +2177,7 @@
     var m = makeModel(events, false);
     var p1 = m.processes[1];
 
-    var sA = p1.objects.getSnapshotAt('0x1000', 15);
+    var sA = p1.objects.getSnapshotAt(new ScopedId('ptr', '0x1000'), 15);
     assert.isTrue(sA.args.x instanceof Array);
     assert.equal(sA.args.x.length, 3);
     assert.isTrue(sA.args.x[0] instanceof tr.model.ObjectSnapshot);
@@ -3009,6 +3075,21 @@
                   bs: {
                     pss: 'cd',
                     pd: 'cd',
+                    sc: undefined,
+                    sw: '0'
+                  }
+                },
+                {
+                  sa: '7ff10ff4b000',
+                  sz: '40000',
+                  pf: 134,
+                  mf: '/run/shm/.org.chromium.Chromium.sqqN11 (deleted)',
+                  bs: {
+                    pss: '40000',
+                    pc: '0',
+                    pd: '40000',
+                    sc: '0',
+                    sd: '0',
                     sw: '0'
                   }
                 }
@@ -3022,33 +3103,50 @@
     var p = m.getProcess(42);
     var d = p.memoryDumps[0];
 
-    assert.equal(d.mostRecentVmRegions.length, 2);
-
-    var vr1 = d.mostRecentVmRegions[0];
-    assert.equal(vr1.startAddress, 240);
-    assert.equal(vr1.sizeInBytes, 336);
-    assert.equal(vr1.protectionFlags, 6);
-    assert.equal(vr1.protectionFlagsToString, 'rw-');
-    assert.equal(vr1.mappedFile, '[stack:20310]');
-    assert.equal(vr1.byteStats.privateCleanResident, 64);
-    assert.equal(vr1.byteStats.privateDirtyResident, 32);
-    assert.equal(vr1.byteStats.sharedCleanResident, 256);
-    assert.equal(vr1.byteStats.sharedDirtyResident, 0);
-    assert.equal(vr1.byteStats.proportionalResident, 158);
-    assert.equal(vr1.byteStats.swapped, 80);
-
-    var vr2 = d.mostRecentVmRegions[1];
-    assert.equal(vr2.startAddress, 848);
-    assert.equal(vr2.sizeInBytes, 592);
-    assert.equal(vr2.protectionFlags, 5);
-    assert.equal(vr2.protectionFlagsToString, 'r-x');
-    assert.equal(vr2.mappedFile, '/dev/ashmem/dalvik');
-    assert.equal(vr2.byteStats.proportionalResident, 205);
-    assert.isUndefined(vr2.byteStats.privateCleanResident);
-    assert.equal(vr2.byteStats.privateDirtyResident, 205);
-    assert.isUndefined(vr2.byteStats.sharedCleanResident);
-    assert.isUndefined(vr2.byteStats.sharedDirtyResident);
-    assert.equal(vr2.byteStats.swapped, 0);
+    checkVMRegions(d.vmRegions, [
+      {
+        startAddress: 240,
+        sizeInBytes: 336,
+        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
+            VMRegion.PROTECTION_FLAG_WRITE,
+        mappedFile: '[stack:20310]',
+        byteStats: {
+          privateCleanResident: 64,
+          privateDirtyResident: 32,
+          sharedCleanResident: 256,
+          sharedDirtyResident: 0,
+          proportionalResident: 158,
+          swapped: 80
+        }
+      },
+      {
+        startAddress: 848,
+        sizeInBytes: 592,
+        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
+            VMRegion.PROTECTION_FLAG_EXECUTE,
+        mappedFile: '/dev/ashmem/dalvik',
+        byteStats: {
+          proportionalResident: 205,
+          privateDirtyResident: 205,
+          swapped: 0
+        }
+      },
+      {
+        startAddress: 140673331539968,
+        sizeInBytes: 262144,
+        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
+            VMRegion.PROTECTION_FLAG_WRITE | VMRegion.PROTECTION_FLAG_MAYSHARE,
+        mappedFile: '/run/shm/.org.chromium.Chromium.sqqN11 (deleted)',
+        byteStats: {
+          privateCleanResident: 0,
+          privateDirtyResident: 262144,
+          sharedCleanResident: 0,
+          sharedDirtyResident: 0,
+          proportionalResident: 262144,
+          swapped: 0
+        }
+      }
+    ]);
 
     assert.equal(d.totals.residentBytes, 0);
     assert.isUndefined(d.totals.peakResidentBytes);
@@ -3142,9 +3240,12 @@
     assert.include(d.memoryAllocatorDumps, oilpanRoot);
     assert.include(d.memoryAllocatorDumps, v8Root);
 
-    assert.equal(oilpanRoot.attributes['objects_count'].value, 47);
-    assert.equal(oilpanRoot.attributes['size'].value, 32768);
-    assert.equal(oilpanRoot.attributes['inner_size'].value, 4096);
+    checkDumpNumericsAndDiagnostics(oilpanRoot, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 47),
+      'size': 32768,
+      'effective_size': 32768,
+      'inner_size': 4096
+    }, {});
     assert.equal(oilpanRoot.children.length, 2);
 
     var oilpanBucket1 = d.getMemoryAllocatorDumpByFullName(
@@ -3152,9 +3253,12 @@
     assert.isDefined(oilpanBucket1);
     assert.equal(oilpanBucket1.fullName, 'oilpan/heap2/bucket1');
     assert.equal(oilpanBucket1.name, 'bucket1');
-    assert.equal(oilpanBucket1.attributes['objects_count'].value, 31);
-    assert.equal(oilpanBucket1.attributes['size'].value, 8192);
-    assert.equal(oilpanBucket1.attributes['inner_size'].value, 8192);
+    checkDumpNumericsAndDiagnostics(oilpanBucket1, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 31),
+      'size': 8192,
+      'effective_size': 8192,
+      'inner_size': 8192
+    }, {});
     assert.equal(oilpanBucket1.children.length, 0);
 
     assert.isDefined(oilpanBucket1.parent);
@@ -3236,9 +3340,12 @@
     assert.include(d.memoryAllocatorDumps, oilpanRoot);
     assert.include(d.memoryAllocatorDumps, v8Root);
 
-    assert.equal(oilpanRoot.attributes['objects_count'].value, 94);
-    assert.equal(oilpanRoot.attributes['size'].value, 24576);
-    assert.equal(oilpanRoot.attributes['inner_size'].value, 20480);
+    checkDumpNumericsAndDiagnostics(oilpanRoot, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 94),
+      'size': 24576,
+      'effective_size': 24576,
+      'inner_size': 20480
+    }, {});
     assert.equal(oilpanRoot.children.length, 2);
 
     var oilpanBucket1 = d.getMemoryAllocatorDumpByFullName(
@@ -3246,9 +3353,12 @@
     assert.isDefined(oilpanBucket1);
     assert.equal(oilpanBucket1.fullName, 'oilpan/heap2/bucket1');
     assert.equal(oilpanBucket1.name, 'bucket1');
-    assert.equal(oilpanBucket1.attributes['objects_count'].value, 31);
-    assert.equal(oilpanBucket1.attributes['size'].value, 8192);
-    assert.equal(oilpanBucket1.attributes['inner_size'].value, 8192);
+    checkDumpNumericsAndDiagnostics(oilpanBucket1, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 31),
+      'size': 8192,
+      'effective_size': 8192,
+      'inner_size': 8192
+    }, {});
     assert.equal(oilpanBucket1.children.length, 0);
 
     assert.isDefined(oilpanBucket1.parent);
@@ -3332,10 +3442,12 @@
     assert.isDefined(sharedBitmapManager);
     assert.include(gmd.memoryAllocatorDumps, sharedBitmapManager);
 
-    assert.equal(sharedBitmapManager.attributes['objects_count'].value, 31);
-    assert.equal(sharedBitmapManager.attributes['size'].value, 8192);
-    assert.equal(sharedBitmapManager.attributes['inner_size'].value, 8192);
-    assert.isUndefined(sharedBitmapManager.attributes['weather']);
+    checkDumpNumericsAndDiagnostics(sharedBitmapManager, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 31),
+      'size': 8192,
+      'effective_size': 8192,
+      'inner_size': 8192
+    }, {});
     assert.lengthOf(sharedBitmapManager.children, 1);
 
     var bitmap2 = gmd.getMemoryAllocatorDumpByFullName(
@@ -3344,10 +3456,12 @@
     assert.include(sharedBitmapManager.children, bitmap2);
     assert.strictEqual(bitmap2.parent, sharedBitmapManager);
 
-    assert.equal(bitmap2.attributes['objects_count'].value, 31);
-    assert.equal(bitmap2.attributes['size'].value, 8192);
-    assert.equal(bitmap2.attributes['inner_size'].value, 8192);
-    assert.equal(bitmap2.attributes['weather'].value, 'sunny');
+    checkDumpNumericsAndDiagnostics(bitmap2, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 31),
+      'size': 8192,
+      'effective_size': 8192,
+      'inner_size': 8192
+    }, { 'weather': 'sunny' });
     assert.lengthOf(bitmap2.children, 0);
 
     assert.isUndefined(gmd.getMemoryAllocatorDumpByFullName('tile_manager'));
@@ -3360,10 +3474,12 @@
     assert.include(pmd.memoryAllocatorDumps, tileManagerRoot);
     assert.isUndefined(tileManagerRoot.parent);
 
-    assert.equal(tileManagerRoot.attributes['objects_count'].value, 63);
-    assert.equal(tileManagerRoot.attributes['size'].value, 16384);
-    assert.equal(tileManagerRoot.attributes['inner_size'].value, 12288);
-    assert.isUndefined(tileManagerRoot.attributes['weather']);
+    checkDumpNumericsAndDiagnostics(tileManagerRoot, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 63),
+      'size': 16384,
+      'effective_size': 16384,
+      'inner_size': 12288
+    }, {});
     assert.lengthOf(tileManagerRoot.children, 1);
 
     var tile1 = pmd.getMemoryAllocatorDumpByFullName(
@@ -3372,10 +3488,12 @@
     assert.include(tileManagerRoot.children, tile1);
     assert.strictEqual(tile1.parent, tileManagerRoot);
 
-    assert.equal(tile1.attributes['objects_count'].value, 63);
-    assert.equal(tile1.attributes['size'].value, 16384);
-    assert.equal(tile1.attributes['inner_size'].value, 12288);
-    assert.equal(tile1.attributes['weather'].value, 'rainy');
+    checkDumpNumericsAndDiagnostics(tile1, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 63),
+      'size': 16384,
+      'effective_size': 16384,
+      'inner_size': 12288
+    }, { 'weather': 'rainy' });
     assert.lengthOf(tile1.children, 0);
 
     assert.isUndefined(
@@ -3534,12 +3652,9 @@
         'shared');
     assert.isDefined(globalDumpShared);
     assert.include(globalDump.memoryAllocatorDumps, globalDumpShared);
-    assert.equal(globalDumpShared.attributes['color'].value, 'blue');
-    assert.equal(globalDumpShared.attributes['area'].value, 9);
-    assert.isUndefined(globalDumpShared.attributes['mood']);
-    assert.isUndefined(globalDumpShared.attributes['size']);
-    assert.isUndefined(globalDumpShared.attributes['state']);
-    assert.isUndefined(globalDumpShared.attributes['temperature']);
+    checkDumpNumericsAndDiagnostics(globalDumpShared, {
+      'area': new ScalarNumeric(unitlessNumber_smallerIsBetter, 9)
+    }, { 'color': 'blue' });
     assert.lengthOf(globalDumpShared.children, 0);
     assert.isUndefined(globalDumpShared.parent);
 
@@ -3555,9 +3670,9 @@
         'local');
     assert.isDefined(browserDumpLocal);
     assert.include(browserDump.memoryAllocatorDumps, browserDumpLocal);
-    assert.equal(browserDumpLocal.attributes['mood'].value, 'very good');
-    assert.equal(browserDumpLocal.attributes['area'].value, 9);
-    assert.equal(browserDumpLocal.attributes['color'].value, 'blue');
+    checkDumpNumericsAndDiagnostics(browserDumpLocal, {
+      'area': new ScalarNumeric(unitlessNumber_smallerIsBetter, 9)
+    }, { 'color': 'blue', 'mood': 'very good' });
     assert.lengthOf(browserDumpLocal.children, 0);
     assert.isUndefined(browserDumpLocal.parent);
 
@@ -3579,9 +3694,10 @@
         'local');
     assert.isDefined(rendererDumpLocal);
     assert.include(rendererDump.memoryAllocatorDumps, rendererDumpLocal);
-    assert.equal(rendererDumpLocal.attributes['length'].value, 3);
-    assert.equal(rendererDumpLocal.attributes['area'].value, 9);
-    assert.equal(rendererDumpLocal.attributes['color'].value, 'blue');
+    checkDumpNumericsAndDiagnostics(rendererDumpLocal, {
+      'area': new ScalarNumeric(unitlessNumber_smallerIsBetter, 9),
+      'length': 3
+    }, { 'color': 'blue' });
     assert.lengthOf(rendererDumpLocal.children, 0);
     assert.isUndefined(rendererDumpLocal.parent);
 
@@ -3602,8 +3718,9 @@
     var gpuDumpLocal1 = gpuDump.getMemoryAllocatorDumpByFullName('local1');
     assert.isDefined(gpuDumpLocal1);
     assert.include(gpuDump.memoryAllocatorDumps, gpuDumpLocal1);
-    assert.equal(gpuDumpLocal1.attributes['state'].value, 'ON');
-    assert.isUndefined(gpuDumpLocal1.attributes['temperature']);
+    checkDumpNumericsAndDiagnostics(gpuDumpLocal1, {
+      'area': new ScalarNumeric(unitlessNumber_smallerIsBetter, 9)
+    }, { 'state': 'ON', 'color': 'blue' });
     assert.lengthOf(gpuDumpLocal1.children, 0);
     assert.isUndefined(gpuDumpLocal1.parent);
 
@@ -3627,8 +3744,9 @@
     var gpuDumpLocal2 = gpuDump.getMemoryAllocatorDumpByFullName('local2');
     assert.isDefined(gpuDumpLocal2);
     assert.include(gpuDump.memoryAllocatorDumps, gpuDumpLocal2);
-    assert.equal(gpuDumpLocal2.attributes['temperature'].value, 100);
-    assert.isUndefined(gpuDumpLocal2.attributes['state']);
+    checkDumpNumericsAndDiagnostics(gpuDumpLocal2, {
+      'temperature': new ScalarNumeric(unitlessNumber_smallerIsBetter, 100)
+    }, {});
     assert.lengthOf(gpuDumpLocal2.children, 0);
     assert.isUndefined(gpuDumpLocal2.parent);
 
@@ -3670,12 +3788,231 @@
     assert.equal(d.memoryAllocatorDumps.length, 1);
     var noCrashRoot = d.getMemoryAllocatorDumpByFullName('no_crash');
     assert.lengthOf(noCrashRoot.children, 0);
-    assert.deepEqual(noCrashRoot.attributes,
-        { size: undefined, effective_size: undefined });
+    checkDumpNumericsAndDiagnostics(noCrashRoot, {}, {});
     assert.isUndefined(noCrashRoot.parent);
     assert.isUndefined(noCrashRoot.guid);
   });
 
+  test('importMemoryDumps_weakMemoryAllocatorDumps', function() {
+    var events = [
+      {
+        pid: 42,
+        ts: 10,
+        ph: 'v',
+        id: '0x0001',
+        args: {
+          dumps: {
+            allocators: {
+              // Sinks for ownership edges (to check that the correct ownership
+              // edges are removed).
+              'root_sink': { guid: '100', attrs: {} },
+              'root_sink/child_sink': { guid: '200', attrs: {} },
+              'root_sink/child_sink/descendant_sink': {
+                guid: '300', attrs: {}
+              },
+
+              // Note: 'removed' in the name of a dump means that the dump will
+              // be removed despite being non-weak (strong), e.g. due to one of
+              // its ancestors being weak.
+
+              // All descendants of a weak root dump should be removed.
+              'weak_root': { guid: '1', attrs: {}, flags: 1 },
+              'weak_root/removed_child': { guid: '2', attrs: {} },
+              'weak_root/inferred_removed_child/removed_descendant': {
+                guid: '3', attrs: {}, flags: 0
+              },
+
+              // A strong root should be kept even if all its descendants are
+              // weak.
+              'strong_root': { guid: '4', attrs: {}, flags: 0 },
+              'strong_root/weak_child': { guid: '5', attrs: {}, flags: 1 },
+              'strong_root/inferred_weak_child/weak_descendant': {
+                guid: '6', attrs: {}, flags: 1
+              },
+
+              // All inferred ancestors of a weak descendant should be marked
+              // weak and, consequently, removed (provided that they don't have
+              // any non-weak descendants).
+              'inferred_weak_root/inferred_weak_child/weak_descendant': {
+                guid: '7', attrs: {}, flags: 1
+              },
+
+              // An inferred dump should be marked non-weak if it has at least
+              // one strong descendant.
+              'inferred_strong_root/child1_weak': {
+                guid: '8', attrs: {}, flags: 1
+              },
+              'inferred_strong_root/child2_strong': {
+                guid: '9', attrs: {}
+              },
+              'inferred_strong_root/child3_weak': {
+                guid: '10', attrs: {}, flags: 1
+              },
+              'inferred_strong_root2/inferred_strong_child/desc1_strong': {
+                guid: '11', attrs: {}
+              },
+              'inferred_strong_root2/inferred_strong_child/desc2_weak': {
+                guid: '12', attrs: {}, flags: 1
+              },
+              'inferred_strong_root2/inferred_strong_child/desc3_strong': {
+                guid: '13', attrs: {}
+              },
+              'inferred_strong_root2/weak_child': {
+                guid: '14', attrs: {}, flags: 1
+              },
+
+              // A desdendant dump should be removed if it has a weak ancestor.
+              'strong_root2': { guid: '15', attrs: {} },
+              'strong_root2/weak_child': { guid: '16', attrs: {}, flags: 1 },
+              'strong_root2/weak_child/removed_descendant': {
+                guid: '17', attrs: {}
+              },
+
+              // Check that "weakness" also propagates across ownership edges.
+              'removed_root': { guid: '18', attrs: {} },
+              'removed_root/removed_child': {
+                guid: '19', attrs: {}
+              },
+              'inferred_strong_root3/removed_child': {
+                guid: '20', attrs: {}
+              },
+            },
+            allocators_graph: [
+              { source: '1', target: '100', type: 'ownership' },
+              { source: '2', target: '200', type: 'ownership' },
+              { source: '3', target: '300', type: 'ownership' },
+
+              { source: '4', target: '100', type: 'ownership' },  // Kept.
+              { source: '5', target: '200', type: 'ownership' },
+              { source: '6', target: '300', type: 'ownership' },
+
+              { source: '7', target: '300', type: 'ownership' },
+
+              { source: '8', target: '200', type: 'ownership' },
+              { source: '9', target: '200', type: 'ownership' },  // Kept.
+              { source: '10', target: '200', type: 'ownership' },
+              { source: '11', target: '300', type: 'ownership' },  // Kept.
+              { source: '12', target: '300', type: 'ownership' },
+              { source: '13', target: '300', type: 'ownership' },  // Kept.
+              { source: '14', target: '200', type: 'ownership' },
+
+              { source: '15', target: '100', type: 'ownership' },  // Kept.
+              { source: '16', target: '200', type: 'ownership' },
+              { source: '17', target: '300', type: 'ownership' },
+
+              { source: '18', target: '3' /* not a sink */, type: 'ownership' },
+              { source: '19', target: '200', type: 'ownership' },
+              { source: '20', target: '19' /* not a sink */, type: 'ownership' }
+            ]
+          }
+        }
+      }
+    ];
+    var m = makeModel(events);
+    var p = m.getProcess(42);
+    var d = p.memoryDumps[0];
+    var memoryAllocatorDumps = d.memoryAllocatorDumps;
+    assert.lengthOf(memoryAllocatorDumps, 6);
+
+    function checkDump(dump, expectedFullName, expectedGuid, expectedParent,
+        expectedChildCount, expectedOwnsLink, expectedOwnedByLinkCount) {
+      assert.strictEqual(dump.fullName, expectedFullName);
+      assert.strictEqual(dump.guid, expectedGuid);
+      assert.strictEqual(dump.parent, expectedParent);
+      assert.lengthOf(dump.children, expectedChildCount);
+      assert.strictEqual(dump.owns, expectedOwnsLink);
+      assert.lengthOf(dump.ownedBy, expectedOwnedByLinkCount);
+      assert.strictEqual(
+          d.getMemoryAllocatorDumpByFullName(expectedFullName), dump);
+    }
+
+    function checkOwnsLink(ownerDump, expectedTarget) {
+      assert.strictEqual(ownerDump.owns.source, ownerDump);
+      assert.strictEqual(ownerDump.owns.target, expectedTarget);
+    }
+
+    // Check root_sink/* dumps.
+    var rootSink = d.memoryAllocatorDumps[3];
+    checkDump(rootSink, 'root_sink', '100', undefined, 1, undefined, 2);
+    var childSink = rootSink.children[0];
+    checkDump(childSink, 'root_sink/child_sink', '200', rootSink, 1, undefined,
+        1);
+    var descendantSink = childSink.children[0];
+    checkDump(descendantSink, 'root_sink/child_sink/descendant_sink', '300',
+        childSink, 0, undefined, 2);
+
+    // Check strong_root/* dumps.
+    var strongRoot = d.memoryAllocatorDumps[4];
+    checkDump(strongRoot, 'strong_root', '4', undefined, 0, rootSink.ownedBy[0],
+        0);
+
+    // Check inferred_strong_root/* dumps.
+    var inferredStrongRoot = d.memoryAllocatorDumps[0];
+    checkDump(inferredStrongRoot, 'inferred_strong_root', undefined, undefined,
+        1, undefined, 0);
+    var child2Strong = inferredStrongRoot.children[0];
+    checkDump(child2Strong, 'inferred_strong_root/child2_strong', '9',
+        inferredStrongRoot, 0, childSink.ownedBy[0], 0);
+
+    // Check inferred_strong_root2/* dumps.
+    var inferredStrongRoot2 = d.memoryAllocatorDumps[1];
+    checkDump(inferredStrongRoot2, 'inferred_strong_root2', undefined,
+        undefined, 1, undefined, 0);
+    var inferredStrongChild = inferredStrongRoot2.children[0];
+    checkDump(inferredStrongChild,
+        'inferred_strong_root2/inferred_strong_child', undefined,
+        inferredStrongRoot2, 2, undefined, 0);
+    var desc1Strong = inferredStrongChild.children[0];
+    checkDump(desc1Strong,
+        'inferred_strong_root2/inferred_strong_child/desc1_strong', '11',
+        inferredStrongChild, 0, descendantSink.ownedBy[0], 0);
+    var desc3Strong = inferredStrongChild.children[1];
+    checkDump(desc3Strong,
+        'inferred_strong_root2/inferred_strong_child/desc3_strong', '13',
+        inferredStrongChild, 0, descendantSink.ownedBy[1], 0);
+
+    // Check strong_root2/* dumps.
+    var strongRoot2 = d.memoryAllocatorDumps[5];
+    checkDump(strongRoot2, 'strong_root2', '15', undefined, 0,
+        rootSink.ownedBy[1], 0);
+
+    // Check inferred_strong_root3/* dumps.
+    var inferredStrongRoot3 = d.memoryAllocatorDumps[2];
+    checkDump(inferredStrongRoot3, 'inferred_strong_root3', undefined,
+        undefined, 0, undefined, 0);
+
+    // Check the links.
+    checkOwnsLink(strongRoot, rootSink);
+    checkOwnsLink(child2Strong, childSink);
+    checkOwnsLink(desc1Strong, descendantSink);
+    checkOwnsLink(desc3Strong, descendantSink);
+    checkOwnsLink(strongRoot2, rootSink);
+
+    // Check that the removed weak dumps are not indexed.
+    [
+      'weak_root',
+      'weak_root/removed_child',
+      'weak_root/inferred_removed_child',
+      'weak_root/inferred_removed_child/removed_descendant',
+      'strong_root/weak_child',
+      'strong_root/inferred_weak_child/weak_descendant',
+      'inferred_weak_root',
+      'inferred_weak_root/inferred_weak_child',
+      'inferred_weak_root/inferred_weak_child/weak_descendant',
+      'inferred_strong_root/child1_weak',
+      'inferred_strong_root/child3_weak',
+      'inferred_strong_root2/inferred_strong_child/desc2_weak',
+      'inferred_strong_root2/weak_child',
+      'strong_root2/weak_child',
+      'strong_root2/removed_descendant',
+      'removed_root',
+      'removed_root/removed_child',
+      'inferred_strong_root3/removed_child'
+    ].forEach(function(fullName) {
+      assert.isUndefined(d.getMemoryAllocatorDumpByFullName(fullName));
+    });
+  });
+
   test('importMemoryDumps_levelsOfDetail', function() {
     function checkLevelsOfDetail(rawLevelsOfDetail, expectedGlobalLevelOfDetail,
         expectedProcessLevelsOfDetail) {
@@ -3742,25 +4079,7 @@
         ['light', undefined]);
   });
 
-  test('importMemoryDumps_heapDumps', function() {
-    function checkHeapEntry(entry, expectedDump, expectedSize, expectedTitles) {
-      assert.strictEqual(entry.heapDump, expectedDump);
-      assert.strictEqual(entry.size, expectedSize);
-      if (expectedTitles === undefined) {
-        assert.isUndefined(entry.leafStackFrame);
-      } else {
-        assert.deepEqual(
-            entry.leafStackFrame.getUserFriendlyStackTrace(), expectedTitles);
-      }
-    }
-
-    function getFrame(entry, distance) {
-      var frame = entry.leafStackFrame;
-      while (distance-- > 0)
-        frame = frame.parentFrame;
-      return frame;
-    }
-
+  test('importMemoryDumps_heapDumps_oldFormat', function() {
     var events = [  // Intentionally shuffled.
       {
         pid: 21,
@@ -3797,6 +4116,23 @@
       },
       {
         pid: 42,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {
+            // GCC.
+            '22': '[unknown]',
+            '23': 'testing::ManuallyAnnotatedMockClass',
+            '24': 'const char* WTF::getStringWithTypeName() [with T = ' +
+                'blink::Event]',
+            '25': 'blink::ContextLifecycleObserver*',
+            '26': 'const char* WTF::getStringWithTypeName() [with T = ' +
+                'blink::WebFrame*]'
+          }
+        }
+      },
+      {
+        pid: 42,
         ts: 10,
         ph: 'v',
         id: '0123',
@@ -3808,17 +4144,20 @@
             heaps: {
               partition_alloc: {
                 entries: [
+                  { type: '24', size: '2e6fc8' },
                   { size: '5cdf91' },
-                  { size: 'abcde', type: '' /* entry should be ignored */ },
-                  { bt: '' /* empty stack trace */, size: '5b6cd6' },
+                  { type: '25', size: '1737e4' },
+                  { bt: '', size: '5b6cd6' },
                   { bt: '4', size: '18f0' },
                   { bt: '3', size: 'e3a8' }
                 ]
               },
               malloc: {
                 entries: [
+                  { size: '789' },
                   { bt: '0', size: '123' },
-                  { bt: '5', size: '456' }
+                  { bt: '5', size: '456' },
+                  { type: '25', size: 'cd' }
                 ]
               }
             }
@@ -3867,16 +4206,20 @@
     assert.deepEqual(
         tr.b.mapItems(m.stackFrames, function(id, f) { return f.title }),
         {
-          'p21:': undefined /* root */,
           'p21:0': 'FrameView::layout',
+          'p21:0:self': '<self>',
           'p21:1': 'MessageLoop::RunTask',
-          'p42:': undefined /* root */,
+          'p42::self': '<self>',
           'p42:0': 'MessageLoop::RunTask',
+          'p42:0:self': '<self>',
           'p42:1': 'TimerBase::run',
           'p42:TWO': 'ScheduledAction::execute',
           'p42:3': 'FunctionCall',
+          'p42:3:self': '<self>',
           'p42:4': 'UpdateLayoutTree',
-          'p42:5': 'MessageLoop::JogTask'
+          'p42:4:self': '<self>',
+          'p42:5': 'MessageLoop::JogTask',
+          'p42:5:self': '<self>'
         });
 
     // 1. Process 21, first dump.
@@ -3890,9 +4233,9 @@
     var partitionAllocEntries1 = partitionAllocDump1.entries;
     assert.lengthOf(partitionAllocEntries1, 2);
     checkHeapEntry(partitionAllocEntries1[0], partitionAllocDump1, 4096,
-        undefined);
+        undefined /* root */, undefined /* sum over all types */);
     checkHeapEntry(partitionAllocEntries1[1], partitionAllocDump1, 2748,
-        ['FrameView::layout', 'MessageLoop::RunTask', undefined]);
+        ['<self>', 'FrameView::layout', 'MessageLoop::RunTask']);
 
     // 2. Process 21, second dump.
     var pmd2 = p1.memoryDumps[1];
@@ -3905,13 +4248,14 @@
     var partitionAllocEntries2 = partitionAllocDump2.entries;
     assert.lengthOf(partitionAllocEntries2, 2);
     checkHeapEntry(partitionAllocEntries2[0], partitionAllocDump2, 8192,
-        undefined);
+        undefined /* root */, undefined /* sum over all types */);
     checkHeapEntry(partitionAllocEntries2[1], partitionAllocDump2, 3567,
-        ['FrameView::layout', 'MessageLoop::RunTask', undefined]);
+        ['<self>', 'FrameView::layout', 'MessageLoop::RunTask'],
+        undefined /* sum over all types */);
 
     // All heap dumps in Process 21 should use the same stack frames.
     assert.strictEqual(
-        getFrame(partitionAllocEntries2[1], 0),
+        getFrame(partitionAllocEntries1[1], 0),
         getFrame(partitionAllocEntries2[1], 0));
 
     // 3. Process 42.
@@ -3923,41 +4267,419 @@
     assert.strictEqual(partitionAllocDump3.processMemoryDump, pmd3);
     assert.equal(partitionAllocDump3.allocatorName, 'partition_alloc');
     var partitionAllocEntries3 = partitionAllocDump3.entries;
-    assert.lengthOf(partitionAllocEntries3, 4);
-    checkHeapEntry(partitionAllocEntries3[0], partitionAllocDump3, 6086545,
-        undefined);
-    checkHeapEntry(partitionAllocEntries3[1], partitionAllocDump3, 5991638,
-        [undefined]);
-    checkHeapEntry(partitionAllocEntries3[2], partitionAllocDump3, 6384,
-        ['UpdateLayoutTree', 'TimerBase::run', 'MessageLoop::RunTask',
-        undefined]);
-    checkHeapEntry(partitionAllocEntries3[3], partitionAllocDump3, 58280,
-        ['FunctionCall', 'ScheduledAction::execute', 'TimerBase::run',
-            'MessageLoop::RunTask', undefined]);
+    assert.lengthOf(partitionAllocEntries3, 6);
+    checkHeapEntry(partitionAllocEntries3[0], partitionAllocDump3, 3043272,
+        undefined /* root */, 'blink::Event');
+    checkHeapEntry(partitionAllocEntries3[1], partitionAllocDump3, 6086545,
+        undefined /* root */, undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries3[2], partitionAllocDump3, 1521636,
+        undefined /* root */, 'blink::ContextLifecycleObserver*');
+    checkHeapEntry(partitionAllocEntries3[3], partitionAllocDump3, 5991638,
+        ['<self>'], undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries3[4], partitionAllocDump3, 6384,
+        ['<self>', 'UpdateLayoutTree', 'TimerBase::run',
+            'MessageLoop::RunTask'], undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries3[5], partitionAllocDump3, 58280,
+        ['<self>', 'FunctionCall', 'ScheduledAction::execute', 'TimerBase::run',
+            'MessageLoop::RunTask'], undefined /* sum over all types */);
 
     var mallocDump3 = hds3['malloc'];
     assert.strictEqual(mallocDump3.processMemoryDump, pmd3);
     assert.equal(mallocDump3.allocatorName, 'malloc');
     var mallocEntries3 = mallocDump3.entries;
-    assert.lengthOf(mallocEntries3, 2);
-    checkHeapEntry(mallocEntries3[0], mallocDump3, 291,
-        ['MessageLoop::RunTask', undefined]);
-    checkHeapEntry(mallocEntries3[1], mallocDump3, 1110,
-        ['MessageLoop::JogTask', undefined]);
+    assert.lengthOf(mallocEntries3, 4);
+    checkHeapEntry(mallocEntries3[0], mallocDump3, 1929, undefined /* root */,
+        undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[1], mallocDump3, 291,
+        ['<self>', 'MessageLoop::RunTask'], undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[2], mallocDump3, 1110,
+        ['<self>', 'MessageLoop::JogTask'], undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[3], mallocDump3, 205, undefined /* root */,
+        'blink::ContextLifecycleObserver*');
 
     // All heap dumps in Process 42 should use the same stack frames.
     assert.strictEqual(
-        getFrame(partitionAllocEntries3[2], 3),
-        getFrame(partitionAllocEntries3[1], 0));
-    assert.strictEqual(
-        getFrame(partitionAllocEntries3[3], 2),
-        getFrame(partitionAllocEntries3[2], 1));
-    assert.strictEqual(
-        getFrame(mallocEntries3[0], 0),
-        getFrame(partitionAllocEntries3[2], 2));
+        getFrame(partitionAllocEntries3[5], 3),
+        getFrame(partitionAllocEntries3[4], 2));
     assert.strictEqual(
         getFrame(mallocEntries3[1], 1),
-        getFrame(mallocEntries3[0], 1));
+        getFrame(partitionAllocEntries3[4], 3));
+  });
+
+  test('importMemoryDumps_heapDumps_newFormat', function() {
+    var events = [  // Intentionally shuffled.
+      {
+        pid: 21,
+        ts: 9,
+        ph: 'v',
+        id: '0123',
+        args: {
+          dumps: {
+            heaps: {
+              partition_alloc: {
+                entries: [
+                  { bt: '', type: '25', size: '1000' },
+                  { bt: 'A', size: 'abc' }
+                ]
+              }
+            }
+          }
+        }
+      },
+      {
+        pid: 42,
+        ph: 'M',
+        name: 'stackFrames',
+        args: {
+          stackFrames: {
+            '-1': { name: '<self>' },
+            '0': { name: 'MessageLoop::RunTask' },
+            '0.5': { name: '<self>', parent: '0' },
+            '1': { name: 'TimerBase::run', parent: '0' },
+            'TWO': { name: 'ScheduledAction::execute', 'parent': '1' },
+            '2.72': { name: '<self>', 'parent': 'TWO' },
+            '3': { name: 'FunctionCall', parent: 'TWO' },
+            '\u03C0': { name: '<self>', parent: '3' },
+            '4': { name: 'UpdateLayoutTree', parent: '1' },
+            'FOUR-AND-A-BIT': { name: '<self>', parent: '4' },
+            '5': { name: 'MessageLoop::JogTask' },
+            'NaN': { name: '<self>', parent: '5' }
+          }
+        }
+      },
+      {
+        pid: 42,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {
+            // Clang.
+            '22': '[unknown]',
+            '23': 'testing::ManuallyAnnotatedMockClass',
+            '24': 'const char *WTF::getStringWithTypeName() [T = ' +
+                'blink::Event]',
+            '25': 'blink::ContextLifecycleObserver *',
+            '26': 'const char *WTF::getStringWithTypeName() [T = ' +
+                'blink::WebFrame *]'
+          }
+        }
+      },
+      {
+        pid: 42,
+        ts: 10,
+        ph: 'v',
+        id: '0123',
+        args: {
+          dumps: {
+            process_totals: {
+              resident_set_bytes: '0'
+            },
+            heaps: {
+              partition_alloc: {
+                entries: [
+                  { bt: '' /* root */, size: '5cdf91' },
+                  { bt: '' /* root */, type: '24', size: '2e6fc8' },
+                  { bt: '' /* root */, type: '25', size: '1737e4' },
+                  { bt: '-1', type: '22', size: '5b6cd6' },
+                  { bt: 'FOUR-AND-A-BIT', size: '18f0' },
+                  { bt: 'FOUR-AND-A-BIT', type: '26', size: 'c78' },
+                  { bt: '\u03C0', size: 'e3a8' }
+                ]
+              },
+              malloc: {
+                entries: [
+                  { bt: '', size: '789' },
+                  { bt: '0.5', size: '123' },
+                  { bt: 'NaN', size: '456' },
+                  { bt: '3', type: '25', size: 'cd' }
+                ]
+              }
+            }
+          }
+        }
+      },
+      {
+        pid: 21,
+        ph: 'M',
+        name: 'stackFrames',
+        args: {
+          stackFrames: {
+            // Intentionally in reverse order.
+            'A': { name: '<self>', parent: '0' },
+            '0': { name: 'FrameView::layout', parent: '1' },
+            '1': { name: 'MessageLoop::RunTask' }
+          }
+        }
+      },
+      {
+        pid: 21,
+        ts: 12,
+        ph: 'v',
+        id: '0987',
+        args: {
+          dumps: {
+            heaps: {
+              winheap: {
+                entries: []  // Intentionally empty.
+              },
+              partition_alloc: {
+                entries: [
+                  { bt: '', size: '2000' },
+                  { bt: 'A', type: '25', size: 'def' },
+                  { bt: '3' /* invalid */, size: 'aaa' },
+                  { bt: 'A', type: '24' /* invalid */, size: 'bbb' },
+                  { bt: '0', size: 'fff' }
+                ]
+              }
+            }
+          }
+        }
+      },
+      {
+        pid: 21,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {
+            // Microsoft Visual C++.
+            '25': 'const char *__cdecl WTF::getStringWithTypeName<class ' +
+                'v8::FunctionCallbackInfo<class v8::Value>>(void)'
+          }
+        }
+      },
+      {
+        pid: 63,
+        ph: 'M',
+        name: 'stackFrames',
+        args: {
+          stackFrames: {}  // Intentionally empty.
+        }
+      },
+      {
+        pid: 63,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {}  // Intentionally empty.
+        }
+      },
+      {
+        pid: 63,
+        ts: 13,
+        ph: 'v',
+        id: '0987',
+        args: {
+          dumps: {
+            heaps: {
+              winheap: {
+                entries: [
+                  { bt: '', size: '10000' }
+                ]
+              }
+            }
+          }
+        }
+      },
+      {
+        pid: 84,
+        ph: 'M',
+        name: 'stackFrames',
+        args: {
+          stackFrames: {
+            '5': { name: 'MessageLoop::WalkTask' }
+          }
+        }
+      },
+      {
+        pid: 84,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {
+            '0': '[unknown]',
+            '1': 'base::All',
+            '3': 'content::Manually',
+            '4': 'net::Annotated'
+          }
+        }
+      },
+      {
+        pid: 84,
+        ts: 14,
+        ph: 'v',
+        id: '0987',
+        args: {
+          dumps: {
+            heaps: {
+              malloc: {
+                entries: [
+                  { bt: '5', type: '3', size: 'abcd' }
+                ]
+              }
+            }
+          }
+        }
+      }
+    ];
+    var m = makeModel(events);
+    var p1 = m.getProcess(21);
+    var p2 = m.getProcess(42);
+    var p3 = m.getProcess(63);
+    var p4 = m.getProcess(84);
+    assert.lengthOf(m.globalMemoryDumps, 2);
+    assert.lengthOf(p1.memoryDumps, 2);
+    assert.lengthOf(p2.memoryDumps, 1);
+    assert.lengthOf(p3.memoryDumps, 1);
+    assert.lengthOf(p4.memoryDumps, 1);
+
+    // Stack frames.
+    assert.deepEqual(
+        tr.b.mapItems(m.stackFrames, function(id, f) { return f.title }),
+        {
+          'p21:0': 'FrameView::layout',
+          'p21:A': '<self>',
+          'p21:1': 'MessageLoop::RunTask',
+          'p42:-1': '<self>',
+          'p42:0': 'MessageLoop::RunTask',
+          'p42:0.5': '<self>',
+          'p42:1': 'TimerBase::run',
+          'p42:TWO': 'ScheduledAction::execute',
+          'p42:2.72': '<self>',
+          'p42:3': 'FunctionCall',
+          'p42:\u03C0': '<self>',
+          'p42:4': 'UpdateLayoutTree',
+          'p42:FOUR-AND-A-BIT': '<self>',
+          'p42:5': 'MessageLoop::JogTask',
+          'p42:NaN': '<self>',
+          'p84:5': 'MessageLoop::WalkTask'
+        });
+
+    // 1. Process 21, first dump.
+    var pmd1 = p1.memoryDumps[0];
+    var hds1 = pmd1.heapDumps;
+    assert.sameMembers(Object.keys(hds1), ['partition_alloc']);
+
+    var partitionAllocDump1 = hds1['partition_alloc'];
+    assert.strictEqual(partitionAllocDump1.processMemoryDump, pmd1);
+    assert.equal(partitionAllocDump1.allocatorName, 'partition_alloc');
+    var partitionAllocEntries1 = partitionAllocDump1.entries;
+    assert.lengthOf(partitionAllocEntries1, 2);
+    checkHeapEntry(partitionAllocEntries1[0], partitionAllocDump1, 4096,
+        undefined /* root */,
+        'class v8::FunctionCallbackInfo<class v8::Value>');
+    checkHeapEntry(partitionAllocEntries1[1], partitionAllocDump1, 2748,
+        ['<self>', 'FrameView::layout', 'MessageLoop::RunTask'],
+        undefined /* sum over all types */);
+
+    // 2. Process 21, second dump.
+    var pmd2 = p1.memoryDumps[1];
+    var hds2 = pmd2.heapDumps;
+    assert.sameMembers(Object.keys(hds2), ['partition_alloc']);
+
+    var partitionAllocDump2 = hds2['partition_alloc'];
+    assert.strictEqual(partitionAllocDump2.processMemoryDump, pmd2);
+    assert.equal(partitionAllocDump2.allocatorName, 'partition_alloc');
+    var partitionAllocEntries2 = partitionAllocDump2.entries;
+    assert.lengthOf(partitionAllocEntries2, 3);
+    checkHeapEntry(partitionAllocEntries2[0], partitionAllocDump2, 8192,
+        undefined /* root */, undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries2[1], partitionAllocDump2, 3567,
+        ['<self>', 'FrameView::layout', 'MessageLoop::RunTask'],
+        'class v8::FunctionCallbackInfo<class v8::Value>');
+    checkHeapEntry(partitionAllocEntries2[2], partitionAllocDump2, 4095,
+        ['FrameView::layout', 'MessageLoop::RunTask'],
+        undefined /* sum over all types */);
+
+    // All heap dumps in Process 21 should use the same stack frames.
+    assert.strictEqual(
+        getFrame(partitionAllocEntries1[1], 0),
+        getFrame(partitionAllocEntries2[1], 0));
+    assert.strictEqual(
+        getFrame(partitionAllocEntries2[2], 0),
+        getFrame(partitionAllocEntries2[1], 1));
+
+    // 3. Process 42.
+    var pmd3 = p2.memoryDumps[0];
+    var hds3 = pmd3.heapDumps;
+    assert.sameMembers(Object.keys(hds3), ['partition_alloc', 'malloc']);
+
+    var partitionAllocDump3 = hds3['partition_alloc'];
+    assert.strictEqual(partitionAllocDump3.processMemoryDump, pmd3);
+    assert.equal(partitionAllocDump3.allocatorName, 'partition_alloc');
+    var partitionAllocEntries3 = partitionAllocDump3.entries;
+    assert.lengthOf(partitionAllocEntries3, 7);
+    checkHeapEntry(partitionAllocEntries3[0], partitionAllocDump3, 6086545,
+        undefined /* root */, undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries3[1], partitionAllocDump3, 3043272,
+        undefined /* root */, 'blink::Event');
+    checkHeapEntry(partitionAllocEntries3[2], partitionAllocDump3, 1521636,
+        undefined /* root */, 'blink::ContextLifecycleObserver *');
+    checkHeapEntry(partitionAllocEntries3[3], partitionAllocDump3, 5991638,
+        ['<self>'], '[unknown]');
+    checkHeapEntry(partitionAllocEntries3[4], partitionAllocDump3, 6384,
+        ['<self>', 'UpdateLayoutTree', 'TimerBase::run',
+            'MessageLoop::RunTask'], undefined /* sum over all types */);
+    checkHeapEntry(partitionAllocEntries3[5], partitionAllocDump3, 3192,
+        ['<self>', 'UpdateLayoutTree', 'TimerBase::run',
+            'MessageLoop::RunTask'], 'blink::WebFrame *');
+    checkHeapEntry(partitionAllocEntries3[6], partitionAllocDump3, 58280,
+        ['<self>', 'FunctionCall', 'ScheduledAction::execute', 'TimerBase::run',
+            'MessageLoop::RunTask'], undefined /* sum over all types */);
+
+    var mallocDump3 = hds3['malloc'];
+    assert.strictEqual(mallocDump3.processMemoryDump, pmd3);
+    assert.equal(mallocDump3.allocatorName, 'malloc');
+    var mallocEntries3 = mallocDump3.entries;
+    assert.lengthOf(mallocEntries3, 4);
+    checkHeapEntry(mallocEntries3[0], mallocDump3, 1929, undefined /* root */,
+        undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[1], mallocDump3, 291,
+        ['<self>', 'MessageLoop::RunTask'], undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[2], mallocDump3, 1110,
+        ['<self>', 'MessageLoop::JogTask'], undefined /* sum over all types */);
+    checkHeapEntry(mallocEntries3[3], mallocDump3, 205,
+        ['FunctionCall', 'ScheduledAction::execute', 'TimerBase::run',
+            'MessageLoop::RunTask'], 'blink::ContextLifecycleObserver *');
+
+    // All heap dumps in Process 42 should use the same stack frames.
+    assert.strictEqual(
+        getFrame(partitionAllocEntries3[5], 0),
+        getFrame(partitionAllocEntries3[4], 0));
+    assert.strictEqual(
+        getFrame(partitionAllocEntries3[6], 3),
+        getFrame(partitionAllocEntries3[4], 2));
+    assert.strictEqual(
+        getFrame(mallocEntries3[1], 1),
+        getFrame(partitionAllocEntries3[4], 3));
+    assert.strictEqual(
+        getFrame(mallocEntries3[3], 0),
+        getFrame(partitionAllocEntries3[6], 1));
+
+    // 4. Process 63.
+    var pmd4 = p3.memoryDumps[0];
+    var hds4 = pmd4.heapDumps;
+    assert.sameMembers(Object.keys(hds4), ['winheap']);
+
+    var winheapDump = hds4['winheap'];
+    assert.strictEqual(winheapDump.processMemoryDump, pmd4);
+    assert.equal(winheapDump.allocatorName, 'winheap');
+    var winheapEntries = winheapDump.entries;
+    assert.lengthOf(winheapEntries, 1);
+    checkHeapEntry(winheapEntries[0], winheapDump, 65536,
+        undefined /* root */, undefined /* sum over all types */);
+
+    // 5. Process 84.
+    var pmd5 = p4.memoryDumps[0];
+    var hds5 = pmd5.heapDumps;
+    assert.sameMembers(Object.keys(hds5), ['malloc']);
+
+    var mallocDump4 = hds5['malloc'];
+    assert.strictEqual(mallocDump4.processMemoryDump, pmd5);
+    assert.equal(mallocDump4.allocatorName, 'malloc');
+    var mallocEntries4 = mallocDump4.entries;
+    assert.lengthOf(mallocEntries4, 1);
+    checkHeapEntry(mallocEntries4[0], mallocDump4, 43981,
+        ['MessageLoop::WalkTask'], 'content::Manually');
   });
 
   test('importMemoryDumps_composableDumps', function() {
@@ -3976,7 +4698,7 @@
             heaps: {
               partition_alloc: {
                 entries: [
-                  { size: '500' }
+                  { bt: '99', type: '888', size: '500' }
                 ]
               }
             }
@@ -4015,6 +4737,16 @@
           }
         }
       },
+      {  // Stack frames (required for heap dumps).
+        pid: 42,
+        ph: 'M',
+        name: 'stackFrames',
+        args: {
+          stackFrames: {
+            '99': { name: 'MessageLoop::RunTask' }
+          }
+        }
+      },
       {  // Allocator dumps.
         pid: 42,
         ts: 10001,
@@ -4126,6 +4858,18 @@
           }
         }
       },
+      {  // Object type names (required for heap dumps).
+        pid: 42,
+        ph: 'M',
+        name: 'typeNames',
+        args: {
+          typeNames: {
+            // GCC.
+            '888': 'const char* WTF::getStringWithTypeName() [with T = ' +
+                'cc::SurfaceFactory]'
+          }
+        }
+      },
       {  // Allocator dumps and dump edges (should be merged).
         pid: 42,
         ts: 10002,
@@ -4173,13 +4917,6 @@
       assert.closeTo(dump.duration, expectedDuration / 1000, 1e-5);
     }
 
-    function checkAttribute(attribute, expectedType, expectedUnits,
-        expectedValue) {
-      assert.instanceOf(attribute, expectedType);
-      assert.strictEqual(attribute.units, expectedUnits);
-      assert.strictEqual(attribute.value, expectedValue);
-    }
-
     function checkLinkCounts(allocatorDump, expectedHasOwns,
         expectedOwnedByCount, expectedRetainsCount, expectedRetainedByCount) {
       assert.strictEqual(allocatorDump.owns !== undefined, expectedHasOwns);
@@ -4229,13 +4966,18 @@
     assert.deepEqual(totals.platformSpecific, {private_bytes: 128});
 
     var vmRegions = pmd1.vmRegions;
-    assert.lengthOf(vmRegions, 1);
-    assert.strictEqual(vmRegions[0].mappedFile, '[stack:20310]');
-    assert.strictEqual(vmRegions[0].startAddress, 240);
-    assert.strictEqual(vmRegions[0].sizeInBytes, 336);
-    assert.strictEqual(vmRegions[0].protectionFlagsToString, 'rw-');
-    assert.strictEqual(vmRegions[0].byteStats.proportionalResident, 158);
-    assert.isUndefined(vmRegions[0].byteStats.privateDirtyResident);
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: '[stack:20310]',
+        startAddress: 240,
+        sizeInBytes: 336,
+        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
+            VMRegion.PROTECTION_FLAG_WRITE,
+        byteStats: {
+          proportionalResident: 158
+        }
+      }
+    ]);
 
     var memoryAllocatorDumps = pmd1.memoryAllocatorDumps;
     assert.lengthOf(memoryAllocatorDumps, 2);
@@ -4245,14 +4987,8 @@
     assert.strictEqual(local1Dump.fullName, 'local1');
     assert.isUndefined(local1Dump.parent);
     assert.lengthOf(local1Dump.children, 0);
-    assert.sameMembers(Object.keys(local1Dump.attributes),
-        ['A', 'B', 'size', 'effective_size']);
-    checkAttribute(local1Dump.attributes['A'], tr.model.StringAttribute, '',
-        'blue');
-    checkAttribute(local1Dump.attributes['B'], tr.model.StringAttribute, '',
-        'red');
-    assert.isUndefined(local1Dump.attributes['size']);
-    assert.isUndefined(local1Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(local1Dump, {},
+        { 'A': 'blue', 'B': 'red' });
     checkLinkCounts(local1Dump, true /* owns */, 0 /* owned by */,
         0 /* retains */, 0 /* retained by */);
 
@@ -4261,12 +4997,7 @@
     assert.strictEqual(local2Dump.fullName, 'local2');
     assert.isUndefined(local2Dump.parent);
     assert.lengthOf(local2Dump.children, 0);
-    assert.sameMembers(Object.keys(local2Dump.attributes),
-        ['B', 'size', 'effective_size']);
-    checkAttribute(local2Dump.attributes['B'], tr.model.StringAttribute, '',
-        'yellow');
-    assert.isUndefined(local2Dump.attributes['size']);
-    assert.isUndefined(local2Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(local2Dump, {}, { 'B': 'yellow' });
     checkLinkCounts(local2Dump, false /* owns */, 0 /* owned by */,
         1 /* retains */, 0 /* retained by */);
 
@@ -4278,7 +5009,8 @@
     var entries = heapDump.entries;
     assert.lengthOf(entries, 1);
     assert.strictEqual(entries[0].heapDump, heapDump);
-    assert.isUndefined(entries[0].leafStackFrame);
+    assert.strictEqual(entries[0].leafStackFrame.title, 'MessageLoop::RunTask');
+    assert.strictEqual(entries[0].objectTypeName, 'cc::SurfaceFactory');
     assert.strictEqual(entries[0].size, 1280);
 
     // Check the other dumps.
@@ -4289,25 +5021,25 @@
     assert.strictEqual(otherLocal1Dump, pmd2.memoryAllocatorDumps[0]);
     assert.strictEqual(otherLocal1Dump.fullName, 'local1');
     assert.isUndefined(otherLocal1Dump.parent);
-    assert.sameMembers(Object.keys(otherLocal1Dump.attributes),
-        ['A', 'size', 'effective_size']);
-    checkAttribute(otherLocal1Dump.attributes['A'], tr.model.ScalarAttribute,
-        'bytes', 2989);
-    assert.isUndefined(otherLocal1Dump.attributes['size']);
-    assert.isUndefined(otherLocal1Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(otherLocal1Dump, { 'A': 2989 }, {});
     assert.isUndefined(pmd2.heapDumps);
     checkLinkCounts(otherLocal1Dump, false /* owns */, 1 /* owned by */,
         0 /* retains */, 0 /* retained by */);
 
     assert.isUndefined(pmd3.levelOfDetail);
     var otherVmRegions = pmd3.vmRegions;
-    assert.lengthOf(otherVmRegions, 1);
-    assert.strictEqual(otherVmRegions[0].mappedFile, '/dev/ashmem/dalvik');
-    assert.strictEqual(otherVmRegions[0].startAddress, 848);
-    assert.strictEqual(otherVmRegions[0].sizeInBytes, 592);
-    assert.strictEqual(otherVmRegions[0].protectionFlagsToString, 'r-x');
-    assert.strictEqual(otherVmRegions[0].byteStats.privateDirtyResident, 205);
-    assert.isUndefined(otherVmRegions[0].byteStats.proportionalResident);
+    checkVMRegions(otherVmRegions, [
+      {
+        mappedFile: '/dev/ashmem/dalvik',
+        startAddress: 848,
+        sizeInBytes: 592,
+        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
+            VMRegion.PROTECTION_FLAG_EXECUTE,
+        byteStats: {
+          privateDirtyResident: 205
+        }
+      }
+    ]);
     assert.lengthOf(pmd3.memoryAllocatorDumps, 0);
     assert.isUndefined(pmd3.heapDumps);
 
@@ -4317,26 +5049,15 @@
     assert.strictEqual(shared1Dump, gmd1.memoryAllocatorDumps[0]);
     assert.strictEqual(shared1Dump.fullName, 'shared1');
     assert.isUndefined(shared1Dump.parent);
-    assert.sameMembers(Object.keys(shared1Dump.attributes),
-        ['A', 'B', 'size', 'effective_size']);
-    checkAttribute(shared1Dump.attributes['A'], tr.model.StringAttribute, '',
-        'purple');
-    checkAttribute(shared1Dump.attributes['B'], tr.model.StringAttribute, '',
-        'green');
-    assert.isUndefined(shared1Dump.attributes['size']);
-    assert.isUndefined(shared1Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(shared1Dump, {},
+        { 'A': 'purple', 'B': 'green' });
     checkLinkCounts(shared1Dump, false /* owns */, 1 /* owned by */,
         0 /* retains */, 0 /* retained by */);
     var shared2Dump = gmd1.getMemoryAllocatorDumpByFullName('shared2');
     assert.strictEqual(shared2Dump, gmd1.memoryAllocatorDumps[1]);
     assert.strictEqual(shared2Dump.fullName, 'shared2');
     assert.isUndefined(shared2Dump.parent);
-    assert.sameMembers(Object.keys(shared2Dump.attributes),
-        ['A', 'size', 'effective_size']);
-    checkAttribute(shared2Dump.attributes['A'], tr.model.StringAttribute, '',
-        'cyan');
-    assert.isUndefined(shared2Dump.attributes['size']);
-    assert.isUndefined(shared2Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(shared2Dump, {}, { 'A': 'cyan' });
     checkLinkCounts(shared2Dump, false /* owns */, 0 /* owned by */,
         0 /* retains */, 1 /* retained by */);
 
@@ -4345,12 +5066,7 @@
     assert.strictEqual(otherShared1Dump, gmd2.memoryAllocatorDumps[0]);
     assert.strictEqual(otherShared1Dump.fullName, 'shared1');
     assert.isUndefined(otherShared1Dump.parent);
-    assert.sameMembers(Object.keys(otherShared1Dump.attributes),
-        ['A', 'size', 'effective_size']);
-    checkAttribute(otherShared1Dump.attributes['A'], tr.model.StringAttribute,
-        '', 'brown');
-    assert.isUndefined(otherShared1Dump.attributes['size']);
-    assert.isUndefined(otherShared1Dump.attributes['effective_size']);
+    checkDumpNumericsAndDiagnostics(otherShared1Dump, {}, { 'A': 'brown' });
     checkLinkCounts(otherShared1Dump, true /* owns */, 0 /* owned by */,
         0 /* retains */, 0 /* retained by */);
 
@@ -4481,7 +5197,7 @@
       displayTimeUnit: 'ns'
     };
     var m = makeModel(JSON.stringify(eventData));
-    assert.equal(m.intrinsicTimeUnit, tr.b.u.TimeDisplayModes.ns);
+    assert.equal(m.intrinsicTimeUnit, tr.v.TimeDisplayModes.ns);
   });
 
   test('extractBattorSubTraces', function() {
@@ -4500,7 +5216,7 @@
         { name: 'a', args: {}, pid: 1, ts: 0, cat: 'baz', tid: 2, ph: 'B', sf: 7 }, // @suppress longLineCheck
         { name: 'b', args: {}, pid: 1, ts: 5, cat: 'baz', tid: 2, ph: 'E', sf: 8 } // @suppress longLineCheck
       ],
-      battorLogAsString: battorLog
+      powerTraceAsString: battorLog
     };
 
     var m = makeModel(eventData);
@@ -4551,6 +5267,376 @@
     assert.equal(s1.start, .01);
   });
 
+  test('createNestableAsyncSlicesForUserTimingWithoutArgs', function() {
+    /**
+     * Structure of this async slices
+     *
+     * Group A:
+     *
+     * |__________|
+     *      a1
+     *
+     * Group B:
+     *
+     *                                |______________________________|
+     *                                               b1
+     *                                           |__________||_|
+     *                                                b2      b4
+     *                                            |_|
+     *                                             b3
+     **/
+    var events = [
+      {
+        name: 'A:a1',
+        args: {params: ''},
+        pid: 1,
+        ts: 100,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 3,
+        ph: 'b'
+      },
+      {
+        name: 'A:a1',
+        args: {params: ''},
+        pid: 1,
+        ts: 110,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 3,
+        ph: 'e'
+      },
+      {
+        name: 'B:b1',
+        args: {params: ''},
+        pid: 1,
+        ts: 120,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 4,
+        ph: 'b'
+      },
+      {
+        name: 'B:b2',
+        args: {params: ''},
+        pid: 1,
+        ts: 130,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'b'
+      },
+      {
+        name: 'B:b3',
+        args: {params: ''},
+        pid: 1,
+        ts: 131,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'b'
+      },
+      {
+        name: 'B:b3',
+        args: {params: ''},
+        pid: 1,
+        ts: 132,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'e'
+      },
+      {
+        name: 'B:b2',
+        args: {params: ''},
+        pid: 1,
+        ts: 140,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'e'
+      },
+      {
+        name: 'B:b4',
+        args: {params: ''},
+        pid: 1,
+        ts: 141,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'b'
+      },
+      {
+        name: 'B:b4',
+        args: {params: ''},
+        pid: 1,
+        ts: 142,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 5,
+        ph: 'e'
+      },
+      {
+        name: 'B:b1',
+        args: {params: ''},
+        pid: 1,
+        ts: 150,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 4,
+        ph: 'e'
+      }
+    ];
+
+    var m = makeModel(events);
+    assert(m.numProcesses, 1);
+    var p = m.processes[1];
+    assert.isDefined(p);
+    assert.equal(p.numThreads, 1);
+    var t = p.threads[2];
+    var asyncSliceGroup = t.asyncSliceGroup;
+    assert.equal(asyncSliceGroup.length, 2);
+    for (var i = 0; i < asyncSliceGroup.length; ++i) {
+      assert.isTrue(asyncSliceGroup.slices[i] instanceof MeasureAsyncSlice);
+    }
+
+    var groupA = asyncSliceGroup.slices[0];
+    assert.equal(groupA.viewSubGroupTitle, 'A');
+    assert.equal(groupA.title, 'a1');
+    assert.equal(groupA.subSlices.length, 0);
+    var groupB = asyncSliceGroup.slices[1];
+    assert.equal(groupB.viewSubGroupTitle, 'B');
+    assert.equal(groupB.title, 'b1');
+    assert.equal(groupB.subSlices.length, 2);
+    var groupBSubSlice1 = groupB.subSlices[0];
+    assert.equal(groupBSubSlice1.viewSubGroupTitle, 'B');
+    assert.equal(groupBSubSlice1.title, 'b2');
+    assert.equal(groupBSubSlice1.subSlices.length, 1);
+    assert.equal(groupBSubSlice1.subSlices[0].viewSubGroupTitle, 'B');
+    assert.equal(groupBSubSlice1.subSlices[0].title, 'b3');
+    assert.equal(groupBSubSlice1.subSlices[0].subSlices.length, 0);
+    var groupBSubSlice2 = groupB.subSlices[1];
+    assert.equal(groupBSubSlice2.viewSubGroupTitle, 'B');
+    assert.equal(groupBSubSlice2.title, 'b4');
+    assert.equal(groupBSubSlice2.subSlices.length, 0);
+  });
+
+  test('createNestableAsyncSlicesForUserTimingWithArgs', function() {
+    /**
+     * Structure of this async slices
+     *
+     * Group A:
+     *
+     * |__________|          |__________|
+     *      a1                    a2
+     *
+     * a1.args = {a: 1}
+     * a2.args = {a: 2, b: 2}
+     **/
+    var events = [
+      {
+        name: 'A:a1/eyJhIjoxfQ==',
+        args: {params: ''},
+        pid: 1,
+        ts: 100,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 3,
+        ph: 'b'
+      },
+      {
+        name: 'A:a1/eyJhIjoxfQ==',
+        args: {params: ''},
+        pid: 1,
+        ts: 110,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 3,
+        ph: 'e'
+      },
+      {
+        name: 'A:a2/eyJhIjoyLCJiIjoyfQ==',
+        args: {params: ''},
+        pid: 1,
+        ts: 120,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 4,
+        ph: 'b'
+      },
+      {
+        name: 'A:a2/eyJhIjoyLCJiIjoyfQ==',
+        args: {params: ''},
+        pid: 1,
+        ts: 130,
+        cat: 'blink.user_timing',
+        tid: 2,
+        id: 4,
+        ph: 'e'
+      }
+    ];
+
+    var m = makeModel(events);
+    assert(m.numProcesses, 1);
+    var p = m.processes[1];
+    assert.isDefined(p);
+    assert.equal(p.numThreads, 1);
+    var t = p.threads[2];
+    var asyncSliceGroup = t.asyncSliceGroup;
+    assert.equal(asyncSliceGroup.length, 2);
+    for (var i = 0; i < asyncSliceGroup.length; ++i) {
+      assert.isTrue(asyncSliceGroup.slices[i] instanceof MeasureAsyncSlice);
+    }
+
+    var a1 = asyncSliceGroup.slices[0];
+    assert.equal(a1.viewSubGroupTitle, 'A');
+    assert.equal(a1.title, 'a1');
+    assert.equal(a1.subSlices.length, 0);
+    assert.deepEqual(a1.args, {a: 1});
+    var a2 = asyncSliceGroup.slices[1];
+    assert.equal(a2.viewSubGroupTitle, 'A');
+    assert.equal(a2.title, 'a2');
+    assert.equal(a2.subSlices.length, 0);
+    assert.deepEqual(a2.args, {a: 2, b: 2});
+  });
+
+  test('UserTimingAsyncSlicesWithNormalAsyncSlices', function() {
+    /**
+     * Structure of user timing async slices
+     *
+     * Group A:
+     *
+     * |__________|
+     *      a1
+     *  |__|
+     *   a2
+     *
+     * B
+     *         |__|
+     *          B
+     * C
+     *             |_|
+     *              C
+     **/
+    var events = [
+      {
+        name: 'A:a1', args: {params: ''}, pid: 1, ts: 1,
+        cat: 'blink.user_timing', tid: 2, id: 3, ph: 'b'
+      },
+      {
+        name: 'A:a1', args: {params: ''}, pid: 1, ts: 11,
+        cat: 'blink.user_timing', tid: 2, id: 3, ph: 'e'
+      },
+      {
+        name: 'A:a2', args: {params: ''}, pid: 1, ts: 2,
+        cat: 'blink.user_timing', tid: 2, id: 4, ph: 'b'
+      },
+      {
+        name: 'A:a2', args: {params: ''}, pid: 1, ts: 4,
+        cat: 'blink.user_timing', tid: 2, id: 4, ph: 'e'
+      },
+      {
+        name: 'B', args: {}, pid: 1, ts: 9, cat: 'foo',
+        tid: 2, ph: 'b', id: 5
+      },
+      {
+        name: 'B', args: {}, pid: 1, ts: 11, cat: 'foo',
+        tid: 2, ph: 'e', id: 5
+      },
+      {
+        name: 'C', args: {}, pid: 1, ts: 12, cat: 'foo',
+        tid: 2, ph: 'b', id: 6
+      },
+      {
+        name: 'C', args: {}, pid: 1, ts: 13, cat: 'foo',
+        tid: 2, ph: 'e', id: 6
+      }
+    ];
+
+    var m = makeModel(events);
+    assert(m.numProcesses, 1);
+    var p = m.processes[1];
+    assert.isDefined(p);
+    assert.equal(p.numThreads, 1);
+    var t = p.threads[2];
+    var asyncSliceGroup = t.asyncSliceGroup;
+    assert.equal(asyncSliceGroup.length, 3);
+    assert.isTrue(asyncSliceGroup.slices[0] instanceof MeasureAsyncSlice);
+    assert.isFalse(asyncSliceGroup.slices[1] instanceof MeasureAsyncSlice);
+    assert.isFalse(asyncSliceGroup.slices[2] instanceof MeasureAsyncSlice);
+
+    var a1 = asyncSliceGroup.slices[0];
+    assert.equal(a1.viewSubGroupTitle, 'A');
+    assert.equal(a1.title, 'a1');
+    assert.equal(a1.subSlices.length, 1);
+    var a2 = a1.subSlices[0];
+    assert.equal(a2.viewSubGroupTitle, 'A');
+    assert.equal(a2.title, 'a2');
+    assert.equal(a2.subSlices.length, 0);
+    var B = asyncSliceGroup.slices[1];
+    assert.equal(B.viewSubGroupTitle, 'B');
+    assert.equal(B.title, 'B');
+    assert.equal(B.subSlices.length, 0);
+    var C = asyncSliceGroup.slices[2];
+    assert.equal(C.viewSubGroupTitle, 'C');
+    assert.equal(C.title, 'C');
+    assert.equal(C.subSlices.length, 0);
+  });
+
+  test('clockSync', function() {
+    var events = [{
+      name: 'clock_sync', args: {sync_id: 'abc', issue_ts: 5},
+      pid: 1, ts: 15, cat: '__metadata', tid: 2, ph: 'c'
+    }];
+
+    var m = makeModel(events);
+
+    var foundClockSync = false;
+    for (var i = 0; i < m.clockSyncRecords.length; i++) {
+      var clockSync = m.clockSyncRecords[i];
+      if (clockSync.syncId !== 'abc')
+        continue;
+
+      foundClockSync = true;
+      assert.equal(clockSync.start, .005);
+      assert.equal(clockSync.duration, .010);
+    }
+
+    assert.isTrue(foundClockSync);
+    assert.isFalse(m.hasImportWarnings);
+  });
+
+  test('clockSync_missingSyncId', function() {
+    var events = [{
+      name: 'clock_sync', args: {issue_ts: 5},
+      pid: 1, ts: 15, cat: '__metadata', tid: 2, ph: 'c'
+    }];
+
+    var m = makeModel(events);
+    assert.isTrue(m.hasImportWarnings);
+  });
+
+  test('clockSync_missingStartTs', function() {
+    var events = [{
+      name: 'clock_sync', args: {sync_id: 'abc'},
+      pid: 1, ts: 15, cat: '__metadata', tid: 2, ph: 'c'
+    }];
+
+    var m = makeModel(events);
+
+    var foundClockSync = false;
+    for (var i = 0; i < m.clockSyncRecords.length; i++) {
+      var clockSync = m.clockSyncRecords[i];
+      if (clockSync.syncId !== 'abc')
+        continue;
+
+      foundClockSync = true;
+    }
+
+    assert.isFalse(foundClockSync);
+    assert.isTrue(m.hasImportWarnings);
+  });
+
   // TODO(nduca): one slice, two threads
   // TODO(nduca): one slice, two pids
 
diff --git a/catapult/tracing/tracing/extras/importer/v8/v8_log_importer.html b/catapult/tracing/tracing/extras/importer/v8/v8_log_importer.html
index a9e7136..116eb57 100644
--- a/catapult/tracing/tracing/extras/importer/v8/v8_log_importer.html
+++ b/catapult/tracing/tracing/extras/importer/v8/v8_log_importer.html
@@ -76,6 +76,10 @@
 
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'V8LogImporter';
+    },
+
     processTimerEvent_: function(name, start, length) {
       var args = TimerEventDefaultArgs[name];
       if (args === undefined) return;
diff --git a/catapult/tracing/tracing/extras/importer/zip_importer.html b/catapult/tracing/tracing/extras/importer/zip_importer.html
index a81bca4..9f1a2aa 100644
--- a/catapult/tracing/tracing/extras/importer/zip_importer.html
+++ b/catapult/tracing/tracing/extras/importer/zip_importer.html
@@ -43,6 +43,10 @@
   ZipImporter.prototype = {
     __proto__: tr.importer.Importer.prototype,
 
+    get importerName() {
+      return 'ZipImporter';
+    },
+
     isTraceDataContainer: function() {
       return true;
     },
diff --git a/catapult/tracing/tracing/extras/lean_config.html b/catapult/tracing/tracing/extras/lean_config.html
index 0c54aea..b39e510 100644
--- a/catapult/tracing/tracing/extras/lean_config.html
+++ b/catapult/tracing/tracing/extras/lean_config.html
@@ -9,6 +9,6 @@
 The lean config is just enough to import uncompressed, trace-event-formatted
 json blobs.
 -->
-<link rel="import" href="/tracing/model/model.html">
 
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/model/model.html">
diff --git a/catapult/tracing/tracing/extras/measure/measure_async_slice.html b/catapult/tracing/tracing/extras/measure/measure_async_slice.html
index f3fa0bb..427a401 100644
--- a/catapult/tracing/tracing/extras/measure/measure_async_slice.html
+++ b/catapult/tracing/tracing/extras/measure/measure_async_slice.html
@@ -13,6 +13,12 @@
   var AsyncSlice = tr.model.AsyncSlice;
 
   function MeasureAsyncSlice() {
+    this.groupTitle_ = 'Ungrouped Measure';
+    var matched = /([^\/:]+):([^\/:]+)\/?(.*)/.exec(arguments[1]);
+    if (matched !== null) {
+      arguments[1] = matched[2];
+      this.groupTitle_ = matched[1];
+    }
     AsyncSlice.apply(this, arguments);
   }
 
@@ -20,7 +26,7 @@
     __proto__: AsyncSlice.prototype,
 
     get viewSubGroupTitle() {
-      return 'Mark/Measure';
+      return this.groupTitle_;
     },
 
     get title() {
diff --git a/catapult/tracing/tracing/extras/measure/measure_async_slice_test.html b/catapult/tracing/tracing/extras/measure/measure_async_slice_test.html
index 7919bef..d175d4e 100644
--- a/catapult/tracing/tracing/extras/measure/measure_async_slice_test.html
+++ b/catapult/tracing/tracing/extras/measure/measure_async_slice_test.html
@@ -24,7 +24,7 @@
     assert.equal(AsyncSlice.getConstructor(
       'blink.user_timing', 'createImports'),
                  MeasureAsyncSlice);
-    assert.equal(s.viewSubGroupTitle, 'Mark/Measure');
+    assert.equal(s.viewSubGroupTitle, 'Ungrouped Measure');
   });
 
   test('import', function() {
diff --git a/catapult/tracing/tracing/extras/rail/animation_interaction_record.html b/catapult/tracing/tracing/extras/rail/animation_interaction_record.html
deleted file mode 100644
index 4439162..0000000
--- a/catapult/tracing/tracing/extras/rail/animation_interaction_record.html
+++ /dev/null
@@ -1,95 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_process_helper.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview The Animation phase of RAIL.
- */
-tr.exportTo('tr.e.rail', function() {
-  // The FPS comfort score is maximized at this value of average
-  // frames-per-second.
-  var MAX_COMFORT_FPS = 60;
-
-  // The FPS comfort score is minimized at this value of average
-  // frames-per-second.
-  var MIN_COMFORT_FPS = 10;
-
-  // The jank comfort score is maximized when frame timestamp discrepancy is
-  // less than or equal to this:
-  var MIN_DISCOMFORT_JANK = 0.05;
-
-  // The jank comfort score is minimized when frame timestamp discrepancy is
-  // greater than or equal to this:
-  var MAX_DISCOMFORT_JANK = 0.3;
-
-  function AnimationInteractionRecord(parentModel, start, duration) {
-    tr.e.rail.RAILInteractionRecord.call(
-        this, parentModel, 'Animation', 'rail_animate',
-        start, duration);
-    this.frameEvents_ = undefined;
-  }
-
-  AnimationInteractionRecord.prototype = {
-    __proto__: tr.e.rail.RAILInteractionRecord.prototype,
-
-    get frameEvents() {
-      if (this.frameEvents_)
-        return this.frameEvents_;
-
-      this.frameEvents_ = new tr.model.EventSet();
-
-      this.associatedEvents.forEach(function(event) {
-        if (event.title === tr.e.audits.IMPL_RENDERING_STATS)
-          this.frameEvents_.push(event);
-      }, this);
-
-      return this.frameEvents_;
-    },
-
-    get normalizedUserComfort() {
-      // Combine jank comfort and fps comfort non-linearly.
-      // weightedAverage2 weights lower scores exponentially more heavily than
-      // higher scores.
-      // http://goo.gl/W6MswA
-      return tr.e.rail.weightedAverage2(
-          this.normalizedJankComfort, this.normalizedFPSComfort);
-    },
-
-    get normalizedFPSComfort() {
-      var durationSeconds = this.duration / 1000;
-      var avgSpf = durationSeconds / this.frameEvents.length;
-      var normalizedDiscomfort = tr.b.normalize(
-          avgSpf, 1 / MAX_COMFORT_FPS, 1 / MIN_COMFORT_FPS);
-      var normalizedComfort = 1 - normalizedDiscomfort;
-      return tr.b.clamp(normalizedComfort, 0, 1);
-    },
-
-    get normalizedJankComfort() {
-      var frameTimestamps = this.frameEvents.toArray().map(function(event) {
-        return event.start;
-      });
-      var absolute = false;
-      var discrepancy = tr.b.Statistics.timestampsDiscrepancy(
-          frameTimestamps, absolute);
-      var normalizedDiscomfort = tr.b.normalize(
-          discrepancy, MIN_DISCOMFORT_JANK, MAX_DISCOMFORT_JANK);
-      var normalizedComfort = 1 - normalizedDiscomfort;
-      return tr.b.clamp(normalizedComfort, 0, 1);
-    }
-  };
-
-  return {
-    AnimationInteractionRecord: AnimationInteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/animation_interaction_record_test.html b/catapult/tracing/tracing/extras/rail/animation_interaction_record_test.html
deleted file mode 100644
index 59bff11..0000000
--- a/catapult/tracing/tracing/extras/rail/animation_interaction_record_test.html
+++ /dev/null
@@ -1,81 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/animation_interaction_record.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-
-  test('OneHundredFPS', function() {
-    var animationIR = new tr.e.rail.AnimationInteractionRecord(
-        undefined, 0, 100);
-    for (var i = 1; i < 10; ++i) {
-      animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-        title: tr.e.audits.IMPL_RENDERING_STATS,
-        start: i * 10,
-        end: (i * 10) + 1
-      }));
-    }
-    assert.closeTo(1, animationIR.normalizedJankComfort, 1e-4);
-    assert.closeTo(1, animationIR.normalizedUserComfort, 1e-3);
-  });
-
-  test('OneFPS', function() {
-    // Minimum comfort is when at least max(2, frameCount/10) frames are longer
-    // than 50ms, and avgFPS <= 10.
-    // One frame-per-second causes FPS comfort = 0.
-    var animationIR = new tr.e.rail.AnimationInteractionRecord(
-        undefined, 0, 2000);
-    animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-      title: tr.e.audits.IMPL_RENDERING_STATS,
-      start: 0,
-      end: 1
-    }));
-    animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-      title: tr.e.audits.IMPL_RENDERING_STATS,
-      start: 999,
-      end: 1000
-    }));
-    animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-      title: tr.e.audits.IMPL_RENDERING_STATS,
-      start: 1999,
-      end: 2000
-    }));
-    assert.equal(1, animationIR.normalizedJankComfort);
-    assert.equal(0, animationIR.normalizedFPSComfort);
-    assert.closeTo(0.2689, animationIR.normalizedUserComfort, 1e-4);
-  });
-
-  test('jank', function() {
-    var animationIR = new tr.e.rail.AnimationInteractionRecord(
-        undefined, 0, 101000);
-    var timestamp = 0;
-    for (var i = 0; i < 100; ++i) {
-      timestamp += 16;
-      animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-        title: tr.e.audits.IMPL_RENDERING_STATS,
-        start: timestamp,
-        end: timestamp + 1
-      }));
-    }
-    timestamp += 1000;
-    animationIR.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
-      title: tr.e.audits.IMPL_RENDERING_STATS,
-      start: timestamp,
-      end: timestamp + 1
-    }));
-    assert.equal(0, animationIR.normalizedJankComfort);
-    assert.equal(0, animationIR.normalizedFPSComfort);
-    assert.equal(0, animationIR.normalizedUserComfort);
-  });
-});
-</script>
-
diff --git a/catapult/tracing/tracing/extras/rail/idle_interaction_record.html b/catapult/tracing/tracing/extras/rail/idle_interaction_record.html
deleted file mode 100644
index 44d7677..0000000
--- a/catapult/tracing/tracing/extras/rail/idle_interaction_record.html
+++ /dev/null
@@ -1,46 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Base class for trace data Auditors.
- */
-tr.exportTo('tr.e.rail', function() {
-  function IdleInteractionRecord(parentModel, start, duration) {
-    tr.e.rail.RAILInteractionRecord.call(
-        this, parentModel, 'Idle', 'rail_idle',
-        start, duration);
-  }
-
-  IdleInteractionRecord.prototype = {
-    __proto__: tr.e.rail.RAILInteractionRecord.prototype,
-
-    get normalizedUserComfort() {
-      return 1;
-    },
-
-    // Unlike during active IRs, while the user is idle, the CPU should not be
-    // utilized much.
-
-    get minCpuFraction() {
-      return 0.1;
-    },
-
-    get maxCpuFraction() {
-      return 1;
-    }
-  };
-
-  return {
-    IdleInteractionRecord: IdleInteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/idle_interaction_record_test.html b/catapult/tracing/tracing/extras/rail/idle_interaction_record_test.html
deleted file mode 100644
index 6697dca..0000000
--- a/catapult/tracing/tracing/extras/rail/idle_interaction_record_test.html
+++ /dev/null
@@ -1,40 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/idle_interaction_record.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-
-  test('optimalEfficiency', function() {
-    var idleIR = new tr.e.rail.IdleInteractionRecord(undefined, 0, 150);
-    assert.equal(1, idleIR.normalizedUserComfort);
-    assert.equal(1, idleIR.normalizedEfficiency);
-  });
-
-  test('pessimalEfficiency', function() {
-    var slice = tr.c.TestUtils.newSliceEx({
-      title: 'foo',
-      start: 0,
-      end: 150,
-      type: tr.model.ThreadSlice
-    });
-    slice.isTopLevel = true;
-    slice.cpuSelfTime = 150;
-
-    var idleIR = new tr.e.rail.IdleInteractionRecord(undefined, 0, 150);
-    idleIR.associatedEvents.push(slice);
-
-    assert.equal(1, idleIR.normalizedUserComfort);
-    assert.equal(0, idleIR.normalizedEfficiency);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/ir_verifier.html b/catapult/tracing/tracing/extras/rail/ir_verifier.html
deleted file mode 100644
index 81da237..0000000
--- a/catapult/tracing/tracing/extras/rail/ir_verifier.html
+++ /dev/null
@@ -1,138 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/rail/rail_ir_finder.html">
-
-<script>
-'use strict';
-tr.exportTo('tr.e.rail', function() {
-  function compareEvents(x, y) {
-    if (x.start !== y.start)
-      return x.start - y.start;
-    return x.guid - y.guid;
-  }
-
-  function IRVerifier() {
-    this.customizeModelCallback_ = undefined;
-    this.expectedIRs_ = undefined;
-  }
-
-  IRVerifier.prototype = {
-    set customizeModelCallback(cmc) {
-      this.customizeModelCallback_ = cmc;
-    },
-
-    // |irs| must be sorted by start time.
-    set expectedIRs(irs) {
-      this.expectedIRs_ = irs;
-    },
-
-    importView_: function(cb) {
-      // Dynamically import the ir-verifier-view.
-      // It is a layering violation to statically import UI from non-UI.
-      // 'Polymer' is undefined in d8, so this will fail if tr.isHeadless.
-      var viewPath = '/ui/extras/rail/ir_verifier_view.html';
-      var viewLink = document.querySelector('link[rel="import"][href="' +
-          viewPath + '"]');
-      if (viewLink) {
-        if (viewLink.isLoaded) {
-          cb();
-        } else {
-          viewLink.addEventListener('load', cb);
-        }
-        return;
-      }
-      viewLink = document.createElement('link');
-      viewLink.rel = 'import';
-      viewLink.href = viewPath;
-      viewLink.isLoaded = false;
-      viewLink.addEventListener('load', function() {
-        viewLink.isLoaded = true;
-        cb();
-      });
-      document.head.appendChild(viewLink);
-    },
-
-    maybeAddHTMLOutput_: function(model, browserHelper, actualIRs, failure) {
-      if (tr.isHeadless)
-        return;
-
-      // The view might not be available until after the test finishes.
-      // The view cannot be added to the DOM after the test finishes.
-      // Add a placeholder to the DOM now, so that whenever the view is loaded,
-      // it can be added to the placeholder.
-      var placeholder = document.createElement('div');
-      placeholder.style.minWidth = '1px';
-      placeholder.style.minHeight = '1px';
-      tr.b.unittest.addHTMLOutputForCurrentTest(placeholder);
-
-      this.importView_(function() {
-        var view = document.createElement('tr-ui-e-rail-ir-verifier-view');
-        view.bounds = model.bounds;
-        if (failure)
-          view.expectedIRs = this.expectedIRs_;
-        view.actualIRs = actualIRs;
-        view.model = browserHelper.getAllAsyncSlicesMatching(
-            function(slice) {
-              return true;
-        }).sort(compareEvents);
-        placeholder.appendChild(view);
-        view.update();
-      });
-    },
-
-    verify: function() {
-      var model = tr.e.chrome.ChromeTestUtils.newChromeModel(
-          this.customizeModelCallback_);
-      var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-      var rirf = new tr.e.rail.RAILIRFinder(model, modelHelper);
-      var actualIRs = rirf.findAllInteractionRecords();
-
-      // findAllInteractionRecords() doesn't sort, but we need to sort them in
-      // order to compare them to expectedIRs.
-      actualIRs.sort(compareEvents);
-
-      var failure = undefined;
-      try {
-        assert.equal(this.expectedIRs_.length, actualIRs.length);
-        for (var i = 0; i < this.expectedIRs_.length; ++i) {
-          var at = 'IRs[' + i + '].';
-          assert.equal(this.expectedIRs_[i].title, actualIRs[i].title,
-                      at + 'title');
-          if (this.expectedIRs_[i].name !== undefined) {
-            assert.equal(this.expectedIRs_[i].name, actualIRs[i].name,
-                        at + 'name');
-          }
-          assert.equal(this.expectedIRs_[i].start, actualIRs[i].start,
-                      at + 'start');
-          assert.equal(this.expectedIRs_[i].end, actualIRs[i].end, at + 'end');
-          assert.equal(this.expectedIRs_[i].eventCount,
-                      actualIRs[i].associatedEvents.length, at + 'eventCount');
-        }
-      } catch (caught) {
-        failure = caught;
-      }
-
-      var debug = !tr.isHeadless && (
-          location.search.split('&').indexOf('debug') >= 0);
-      if (!failure && !debug)
-        return;
-
-      // TODO(benjhayden): Why is the
-      // getComputedStyle(HTMLTestCaseResult).display sometimes none?
-      // this.maybeAddHTMLOutput_(
-      //    model, modelHelper.browserHelper, actualIRs, failure);
-
-      if (failure)
-        throw failure;
-    }
-  };
-
-  return {IRVerifier: IRVerifier};
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/load_interaction_record.html b/catapult/tracing/tracing/extras/rail/load_interaction_record.html
deleted file mode 100644
index 9b7d902..0000000
--- a/catapult/tracing/tracing/extras/rail/load_interaction_record.html
+++ /dev/null
@@ -1,84 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview The Load phase of RAIL.
- */
-tr.exportTo('tr.e.rail', function() {
-  // This histogram represents the number of people who we believe would have
-  // comfort with a response level of a certain value. We have set this with
-  // just a best-effort guess, though. In #1696, we plan to derive this
-  // experimentally.
-  var COMFORT_HISTOGRAM = tr.b.u.Histogram.fromDict({
-    unit: 'unitless',
-    min: 1000,
-    max: 60000,
-    centralBinWidth: 5900,
-    underflowBin: {min: -Number.MAX_VALUE, max: 1000, count: 1000},
-    centralBins: [
-      {min: 1000, max: 6900, count: 901},
-      {min: 6900, max: 12800, count: 574},
-      {min: 12800, max: 18700, count: 298},
-      {min: 18700, max: 24600, count: 65},
-      {min: 24600, max: 30500, count: 35},
-      {min: 30500, max: 36400, count: 23},
-      {min: 36400, max: 42300, count: 16},
-      {min: 42300, max: 48200, count: 10},
-      {min: 48200, max: 54100, count: 5},
-      {min: 54100, max: 60000, count: 2}
-    ],
-    overflowBin: {min: 60000, max: Number.MAX_VALUE, count: 0}
-  });
-
-  function LoadInteractionRecord(parentModel, start, duration) {
-    tr.e.rail.RAILInteractionRecord.call(
-        this, parentModel, 'Load', 'rail_load',
-        start, duration);
-
-    // |renderProcess| is the renderer process that contains the loading
-    // RenderFrame.
-    this.renderProcess = undefined;
-
-    // |renderMainThread| is the CrRendererMain thread in the |renderProcess|
-    // that contains the loading RenderFrame.
-    this.renderMainThread = undefined;
-
-    // |routingId| identifies the loading RenderFrame within the renderer
-    // process.
-    this.routingId = undefined;
-
-    // |parentRoutingId| identifies the RenderFrame that created and contains
-    // the loading RenderFrame.
-    this.parentRoutingId = undefined;
-
-    // |loadFinishedEvent|, if present, signals that this is a main frame.
-    this.loadFinishedEvent = undefined;
-
-    // Startup LoadIRs do not have renderProcess, routingId, or
-    // parentRoutingId. Maybe RenderLoadIR should be a separate class?
-  }
-
-  LoadInteractionRecord.prototype = {
-    __proto__: tr.e.rail.RAILInteractionRecord.prototype,
-
-    get normalizedUserComfort() {
-      return COMFORT_HISTOGRAM.getInterpolatedCountAt(this.duration) /
-        COMFORT_HISTOGRAM.maxCount;
-    }
-  };
-
-  return {
-    LoadInteractionRecord: LoadInteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/proto_ir.html b/catapult/tracing/tracing/extras/rail/proto_ir.html
deleted file mode 100644
index bdd199d..0000000
--- a/catapult/tracing/tracing/extras/rail/proto_ir.html
+++ /dev/null
@@ -1,168 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/range_utils.html">
-<link rel="import" href="/tracing/core/auditor.html">
-<link rel="import" href="/tracing/extras/rail/animation_interaction_record.html">
-<link rel="import" href="/tracing/extras/rail/response_interaction_record.html">
-<link rel="import" href="/tracing/model/event_info.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview An intermediate data format between InputLatencyAsyncSlices
- * and RAILIRs.
- */
-tr.exportTo('tr.e.rail', function() {
-  // This is an intermediate data format between InputLatencyAsyncSlices and
-  // Response and Animation IRs.
-  function ProtoIR(irType, name) {
-    this.irType = irType;
-    this.names = new Set(name ? [name] : undefined);
-    this.start = Infinity;
-    this.end = -Infinity;
-    this.associatedEvents = new tr.model.EventSet();
-  }
-
-  ProtoIR.RESPONSE_TYPE = 'r';
-  ProtoIR.ANIMATION_TYPE = 'a';
-
-  // Explicitly ignore some input events to allow
-  // RAILIRFinder.checkAllInputEventsHandled() to determine which events were
-  // unintentionally ignored due to a bug.
-  ProtoIR.IGNORED_TYPE = 'ignored';
-
-  ProtoIR.prototype = {
-    get isValid() {
-      return this.end > this.start;
-    },
-
-    // Return true if any associatedEvent's typeName is in typeNames.
-    containsTypeNames: function(typeNames) {
-      for (var i = 0; i < this.associatedEvents.length; ++i) {
-        if (typeNames.indexOf(this.associatedEvents[i].typeName) >= 0)
-          return true;
-      }
-      return false;
-    },
-
-    containsSliceTitle: function(title) {
-      for (var i = 0; i < this.associatedEvents.length; ++i) {
-        if (title === this.associatedEvents[i].title)
-          return true;
-      }
-      return false;
-    },
-
-    getIRConstructor: function() {
-      switch (this.irType) {
-        case ProtoIR.RESPONSE_TYPE:
-          return tr.e.rail.ResponseInteractionRecord;
-        case ProtoIR.ANIMATION_TYPE:
-          return tr.e.rail.AnimationInteractionRecord;
-      }
-      return undefined;
-    },
-
-    createInteractionRecord: function(model) {
-      if (!this.isValid) {
-        console.error('Invalid ProtoIR: ' + this.debug() +
-                      ' File a bug with this trace!');
-        return undefined;
-      }
-
-      var constructor = this.getIRConstructor();
-      if (constructor === undefined)
-        return undefined;
-
-      var ir = new constructor(model, this.start, this.end - this.start);
-      var names = [];
-      this.names.forEach(function(name) { names.push(name); });
-      ir.name = names.sort().join(',');
-
-      ir.sourceEvents.addEventSet(this.associatedEvents);
-
-      function pushAssociatedEvents(event) {
-        ir.associatedEvents.push(event);
-
-        // |event| is either an InputLatencyAsyncSlice (which collects all of
-        // its associated events transitively) or a CSS Animation (which doesn't
-        // have any associated events). So this does not need to recurse.
-        if (event.associatedEvents)
-          ir.associatedEvents.addEventSet(event.associatedEvents);
-      }
-
-      this.associatedEvents.forEach(function(event) {
-        pushAssociatedEvents(event);
-
-        // Old-style InputLatencyAsyncSlices have subSlices.
-        if (event.subSlices)
-          event.subSlices.forEach(pushAssociatedEvents);
-      });
-
-      return ir;
-    },
-
-    // Merge the other ProtoIR into this one.
-    // The irTypes need not match: ignored ProtoIRs might be merged into
-    // overlapping ProtoIRs, and Touch-only Animations are merged into Tap
-    // Responses.
-    merge: function(other) {
-      other.names.forEach(function(name) { this.names.add(name); }.bind(this));
-
-      // Don't use pushEvent(), which would lose special start, end.
-      this.associatedEvents.addEventSet(other.associatedEvents);
-      this.start = Math.min(this.start, other.start);
-      this.end = Math.max(this.end, other.end);
-    },
-
-    // Include |event| in this ProtoIR, expanding start/end to include it.
-    pushEvent: function(event) {
-      // Usually, this method will be called while iterating over a list of
-      // events sorted by start time, so this method won't usually change
-      // this.start. However, this will sometimes be called for ProtoIRs created
-      // by previous handlers, in which case event.start could possibly be
-      // before this.start.
-      this.start = Math.min(this.start, event.start);
-      this.end = Math.max(this.end, event.end);
-      this.associatedEvents.push(event);
-    },
-
-    // Returns true if timestamp is contained in this ProtoIR.
-    containsTimestampInclusive: function(timestamp) {
-      return (this.start <= timestamp) && (timestamp <= this.end);
-    },
-
-    // Return true if the other event intersects this ProtoIR.
-    intersects: function(other) {
-      // http://stackoverflow.com/questions/325933
-      return (other.start < this.end) && (other.end > this.start);
-    },
-
-    isNear: function(event, threshold) {
-      return (this.end + threshold) > event.start;
-    },
-
-    // Return a string describing this ProtoIR for debugging.
-    debug: function() {
-      var debugString = this.irType + '(';
-      debugString += parseInt(this.start) + ' ';
-      debugString += parseInt(this.end);
-      this.associatedEvents.forEach(function(event) {
-        debugString += ' ' + event.typeName;
-      });
-      return debugString + ')';
-    }
-  };
-
-  return {
-    ProtoIR: ProtoIR
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_interaction_record.html b/catapult/tracing/tracing/extras/rail/rail_interaction_record.html
deleted file mode 100644
index d0277a0..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_interaction_record.html
+++ /dev/null
@@ -1,227 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/core/auditor.html">
-<link rel="import" href="/tracing/model/model.html">
-<link rel="import" href="/tracing/base/range_utils.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Base class for trace data Auditors.
- */
-tr.exportTo('tr.e.rail', function() {
-  var ColorScheme = tr.b.ColorScheme;
-
-  // When computing an IR's RAIL score, the IR's comfort and efficiency are
-  // averaged together such that the lower score has a higher weight.
-  // Without knowing which sub-score is lower, comfort is
-  // theoretically twice as important as efficiency. If the entire web were to
-  // eventually achieve relatively high comfort scores such that comfort was
-  // less of a concern than efficiency, then this number could be lowered. If
-  // further thought suggests that comfort is even more than twice as important
-  // as efficiency, then this number could be raised.
-  // Must be greater than 0.
-  var COMFORT_IMPORTANCE = 2;
-
-  // We need an up-front list of all IR types in order to keep various groupings
-  // stable, in presence of only a portion of interactions in a given trace.
-  var ALL_RAIL_TYPE_NAMES = [
-    'rail_response',
-    'rail_animate',
-    'rail_idle',
-    'rail_load'
-  ];
-
-  var DOES_RAIL_TYPE_NAME_EXIST = {};
-  ALL_RAIL_TYPE_NAMES.forEach(function(railTypeName) {
-    DOES_RAIL_TYPE_NAME_EXIST[railTypeName] = true;
-  });
-
-  var RAIL_ORDER = [];
-  ALL_RAIL_TYPE_NAMES.forEach(function(railTypeName) {
-    RAIL_ORDER.push(railTypeName.toUpperCase());
-    RAIL_ORDER.push(userFriendlyRailTypeName(railTypeName).toUpperCase());
-  });
-
-
-
-  function RAILInteractionRecord(
-      parentModel, title, railTypeName, start, duration) {
-    if (!DOES_RAIL_TYPE_NAME_EXIST[railTypeName])
-      throw new Error(railTypeName + ' is not listed in ALL_RAIL_TYPE_NAMES');
-
-    var colorId = ColorScheme.getColorIdForReservedName(railTypeName);
-    this.railTypeName_ = railTypeName;
-    this.name = '';
-    tr.model.InteractionRecord.call(
-        this, parentModel, title, colorId, start, duration);
-  }
-
-  RAILInteractionRecord.prototype = {
-    __proto__: tr.model.InteractionRecord.prototype,
-
-    updateArgs: function() {
-      var args = {};
-
-      var layoutSlices = this.associatedEvents.filter(function(event) {
-        return event.title === 'FrameView::layout';
-      });
-      var timeInLayout = tr.b.Statistics.sum(layoutSlices, function(event) {
-        return event.duration;
-      });
-
-      args['layoutInfo'] = {
-        'timeInLayout': timeInLayout
-      };
-
-      this.args = args;
-    },
-
-    get railTypeName() {
-      return this.railTypeName_;
-    },
-
-    /**
-     * Returns the overall rail score, from 0 to 1.
-     *
-     * RAILScore for an interaction merges the user's comfort with the
-     * efficiency, in order to create a perception-oriented measure
-     * of how users percieve speed during this interaction.
-     *
-     *  0 means a bad user experience.
-     *  1 means a perfect user experience.
-     */
-    get railScore() {
-      var comfort = this.normalizedUserComfort;
-      var efficiency = this.normalizedEfficiency;
-      return weightedAverage2(comfort, efficiency, COMFORT_IMPORTANCE);
-    },
-
-    /**
-     * Measures the comfort the user experienced, from 0 to 1.
-     *
-     * A user performs an interaction with an expectation in mind.
-     * When we meet their expectations, we get perfect comfort.
-     * When we don't live up to their expectations, comfort goes down.
-     */
-    get normalizedUserComfort() {
-      throw new Error('Not implemented');
-    },
-
-    /**
-     * Returns the sum of the number of CPU ms spent by this IR.
-     */
-    get rawCpuMs() {
-      var cpuMs = 0;
-      this.associatedEvents.forEach(function(event) {
-        if (event.cpuSelfTime)
-          cpuMs += event.cpuSelfTime;
-      });
-      return cpuMs;
-    },
-
-    /**
-     * Returns a number between 0 and 1 representing how efficiently this IR
-     * used CPU resources. 0 is maximally in-efficient, 1 is maximally
-     * efficient.
-     */
-    get normalizedCpuEfficiency() {
-      var minCpuMs = this.duration * this.minCpuFraction;
-      var maxCpuMs = this.duration * this.maxCpuFraction;
-      var normalizedCpu = tr.b.normalize(this.rawCpuMs, minCpuMs, maxCpuMs);
-      return 1 - tr.b.clamp(normalizedCpu, 0, 1);
-    },
-
-    /**
-     * The minimum fraction of a CPU that can be spent on this IR before the
-     * efficiency score will be impacted.
-     * If less CPU ms than this is spent on this IR, then
-     * normalizedCpuEfficiency will be 1.
-     */
-    get minCpuFraction() {
-      return 0.5;
-    },
-
-    /**
-     * The maximum fraction of a CPU that can be spent on this IR.
-     * If more CPU ms than this is spent on this IR, then
-     * normalizedCpuEfficiency will be 0.
-     */
-    get maxCpuFraction() {
-      return 1.5;
-    },
-
-    /**
-     * Measures the efficiency of the interaction from 0 to 1.
-     *
-     * Efficiency is a notion of how well we used the machine's limited
-     * resources in service of this interaction. If we used it perfectly,
-     * we would get a 1.0. If we used everything that there was to use ---
-     * power, memory, cpu, then we'd get a zero.
-     */
-    get normalizedEfficiency() {
-      return this.normalizedCpuEfficiency;
-    }
-  };
-
-  // Returns a weighted average of numbers between 0 and 1.
-  // The lower input has a higher weight.
-  // If the first input should have a higher weight a priori its relationship to
-  // the other input, then set opt_apriori > 1.
-  // This function is graphed at http://goo.gl/XMWUKA
-  function weightedAverage2(x, y, opt_apriori) {
-    var numerator = 0;
-    var denominator = 0;
-
-    var xWeight = (opt_apriori || 1) * Math.exp(1 - x);
-    numerator += xWeight * x;
-    denominator += xWeight;
-
-    var yWeight = Math.exp(1 - y);
-    numerator += yWeight * y;
-    denominator += yWeight;
-
-    return numerator / denominator;
-  }
-
-  // A user friendly name is currently formed by dropping the rail_ prefix and
-  // capitalizing.
-  function userFriendlyRailTypeName(railTypeName) {
-    if (railTypeName.length < 6 || railTypeName.indexOf('rail_') != 0)
-      return railTypeName;
-    return railTypeName[5].toUpperCase() + railTypeName.slice(6);
-  }
-
-  // Compare two rail type names or rail user-friendly names so they are sorted
-  // in R,A,I,L order. Capitalization is ignore. Non rail names are sorted
-  // lexicographically after rail names.
-  function railCompare(name1, name2) {
-    var i1 = RAIL_ORDER.indexOf(name1.toUpperCase());
-    var i2 = RAIL_ORDER.indexOf(name2.toUpperCase());
-    if (i1 == -1 && i2 == -1)
-      return name1.localeCompare(name2);
-    if (i1 == -1)
-      return 1;   // i2 is a RAIL name but not i1.
-    if (i2 == -1)
-      return -1;  // i1 is a RAIL name but not i2.
-    // Two rail names.
-    return i1 - i2;
-  }
-
-  return {
-    RAILInteractionRecord: RAILInteractionRecord,
-    weightedAverage2: weightedAverage2,
-    userFriendlyRailTypeName: userFriendlyRailTypeName,
-    railCompare: railCompare,
-    ALL_RAIL_TYPE_NAMES: ALL_RAIL_TYPE_NAMES
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_interaction_record_test.html b/catapult/tracing/tracing/extras/rail/rail_interaction_record_test.html
deleted file mode 100644
index 3e60e87..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_interaction_record_test.html
+++ /dev/null
@@ -1,90 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
-<link rel="import" href="/tracing/extras/rail/stub_rail_interaction_record.html">
-<link rel="import" href="/tracing/model/model.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-
-  test('layoutInfo', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function(model) {
-      // TODO(benjhayden): Create
-      model.rendererMain.sliceGroup.pushSlice(test_utils.newSliceEx({
-        title: 'FrameView::layout',
-        start: 0,
-        duration: 100
-      }));
-    });
-
-    var modelHelper = new tr.e.audits.ChromeModelHelper(model);
-    var rir = new tr.e.rail.RAILInteractionRecord(
-        model, 'simple', 'rail_response', 0, 100);
-    rir.associatedEvents.push(model.rendererMain.sliceGroup.slices[0]);
-    rir.updateArgs();
-
-    assert.equal(rir.args.layoutInfo.timeInLayout, 100);
-  });
-
-  // This basically just tests weightedAverage2.
-  test('railScore', function() {
-    var ir = new tr.e.rail.StubRAILInteractionRecord({
-      start: 0, end: 100,
-      railTypeName: 'rail_idle',
-      normalizedUserComfort: 0,
-      normalizedEfficiency: 0
-    });
-    assert.closeTo(0, ir.railScore, 1e-5);
-
-    ir = new tr.e.rail.StubRAILInteractionRecord({
-      start: 0, end: 100,
-      railTypeName: 'rail_idle',
-      normalizedUserComfort: 1,
-      normalizedEfficiency: 1
-    });
-    assert.closeTo(1, ir.railScore, 1e-5);
-
-    ir = new tr.e.rail.StubRAILInteractionRecord({
-      start: 0, end: 100,
-      railTypeName: 'rail_idle',
-      normalizedUserComfort: 1,
-      normalizedEfficiency: 0
-    });
-    assert.closeTo(0.42388, ir.railScore, 1e-5);
-
-    ir = new tr.e.rail.StubRAILInteractionRecord({
-      start: 0, end: 100,
-      railTypeName: 'rail_idle',
-      normalizedUserComfort: 0,
-      normalizedEfficiency: 1
-    });
-    assert.closeTo(0.15536, ir.railScore, 1e-5);
-  });
-
-  test('userFriendlyRailTypeName', function() {
-    // Invalid names shouldn't be modified.
-    var result = tr.e.rail.userFriendlyRailTypeName('not_a_rail_type_name');
-    assert.equal('not_a_rail_type_name', result);
-    result = tr.e.rail.userFriendlyRailTypeName('rail_');
-    assert.equal('rail_', result);
-
-    // Some valid things.
-    result = tr.e.rail.userFriendlyRailTypeName('rail_animate');
-    assert.equal('Animate', result);
-    result = tr.e.rail.userFriendlyRailTypeName('rail_b');
-    assert.equal('B', result);
-    result = tr.e.rail.userFriendlyRailTypeName('rail_123');
-    assert.equal('123', result);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_ir_finder.html b/catapult/tracing/tracing/extras/rail/rail_ir_finder.html
deleted file mode 100644
index 026f5a9..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_ir_finder.html
+++ /dev/null
@@ -1,1373 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/base/range_utils.html">
-<link rel="import" href="/tracing/core/auditor.html">
-<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
-<link rel="import" href="/tracing/extras/rail/idle_interaction_record.html">
-<link rel="import" href="/tracing/extras/rail/load_interaction_record.html">
-<link rel="import" href="/tracing/extras/rail/proto_ir.html">
-<link rel="import" href="/tracing/model/event_info.html">
-<link rel="import" href="/tracing/model/ir_coverage.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Base class for trace data Auditors.
- */
-tr.exportTo('tr.e.rail', function() {
-  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
-  var ProtoIR = tr.e.rail.ProtoIR;
-
-  function compareEvents(x, y) {
-    if (x.start !== y.start)
-      return x.start - y.start;
-    if (x.end !== y.end)
-      return x.end - y.end;
-    if (x.guid && y.guid)
-      return x.guid - y.guid;
-    return 0;
-  }
-
-  function causedFrame(event) {
-    for (var i = 0; i < event.associatedEvents.length; ++i) {
-      if (event.associatedEvents[i].title === tr.e.audits.IMPL_RENDERING_STATS)
-        return true;
-    }
-    return false;
-  }
-
-  function forEventTypesIn(events, typeNames, cb, opt_this) {
-    events.forEach(function(event) {
-      if (typeNames.indexOf(event.typeName) >= 0) {
-        cb.call(opt_this, event);
-      }
-    });
-  }
-
-  var RENDER_FRAME_IMPL_PREFIX = 'RenderFrameImpl::';
-  var CREATE_CHILD_TITLE = RENDER_FRAME_IMPL_PREFIX + 'createChildFrame';
-  var START_LOAD_TITLE = RENDER_FRAME_IMPL_PREFIX + 'didStartProvisionalLoad';
-  var FAIL_LOAD_TITLE = RENDER_FRAME_IMPL_PREFIX + 'didFailProvisionalLoad';
-  var FINISH_LOAD_TITLE = RENDER_FRAME_IMPL_PREFIX + 'didFinishLoad';
-
-  // This is an instant event that is a subSlice of a FINISH_LOAD_TITLE
-  // event.
-  var LOAD_FINISHED_TITLE = 'LoadFinished';
-
-  function isRenderFrameImplEvent(event) {
-    return event.title.indexOf(RENDER_FRAME_IMPL_PREFIX) === 0;
-  }
-
-  // If there's less than this much time between the end of one event and the
-  // start of the next, then they might be merged.
-  // There was not enough thought given to this value, so if you have any slight
-  // reason to change it, then please do so. It might also be good to split this
-  // into multiple values.
-  var INPUT_MERGE_THRESHOLD_MS = 200;
-  var ANIMATION_MERGE_THRESHOLD_MS = 1;
-
-  // If two MouseWheel events begin this close together, then they're an
-  // Animation, not two responses.
-  var MOUSE_WHEEL_THRESHOLD_MS = 40;
-
-  // If two MouseMoves are more than this far apart, then they're two Responses,
-  // not Animation.
-  var MOUSE_MOVE_THRESHOLD_MS = 40;
-
-  var INSIGNIFICANT_MS = 1;
-
-  var KEYBOARD_TYPE_NAMES = [
-    INPUT_TYPE.CHAR,
-    INPUT_TYPE.KEY_DOWN_RAW,
-    INPUT_TYPE.KEY_DOWN,
-    INPUT_TYPE.KEY_UP
-  ];
-  var MOUSE_RESPONSE_TYPE_NAMES = [
-    INPUT_TYPE.CLICK,
-    INPUT_TYPE.CONTEXT_MENU
-  ];
-  var MOUSE_WHEEL_TYPE_NAMES = [
-    INPUT_TYPE.MOUSE_WHEEL
-  ];
-  var MOUSE_DRAG_TYPE_NAMES = [
-    INPUT_TYPE.MOUSE_DOWN,
-    INPUT_TYPE.MOUSE_MOVE,
-    INPUT_TYPE.MOUSE_UP
-  ];
-  var TAP_TYPE_NAMES = [
-    INPUT_TYPE.TAP,
-    INPUT_TYPE.TAP_CANCEL,
-    INPUT_TYPE.TAP_DOWN
-  ];
-  var PINCH_TYPE_NAMES = [
-    INPUT_TYPE.PINCH_BEGIN,
-    INPUT_TYPE.PINCH_END,
-    INPUT_TYPE.PINCH_UPDATE
-  ];
-  var FLING_TYPE_NAMES = [
-    INPUT_TYPE.FLING_CANCEL,
-    INPUT_TYPE.FLING_START
-  ];
-  var TOUCH_TYPE_NAMES = [
-    INPUT_TYPE.TOUCH_END,
-    INPUT_TYPE.TOUCH_MOVE,
-    INPUT_TYPE.TOUCH_START
-  ];
-  var SCROLL_TYPE_NAMES = [
-    INPUT_TYPE.SCROLL_BEGIN,
-    INPUT_TYPE.SCROLL_END,
-    INPUT_TYPE.SCROLL_UPDATE
-  ];
-  var ALL_HANDLED_TYPE_NAMES = [].concat(
-    KEYBOARD_TYPE_NAMES,
-    MOUSE_RESPONSE_TYPE_NAMES,
-    MOUSE_WHEEL_TYPE_NAMES,
-    MOUSE_DRAG_TYPE_NAMES,
-    PINCH_TYPE_NAMES,
-    TAP_TYPE_NAMES,
-    FLING_TYPE_NAMES,
-    TOUCH_TYPE_NAMES,
-    SCROLL_TYPE_NAMES
-  );
-
-  var RENDERER_FLING_TITLE = 'InputHandlerProxy::HandleGestureFling::started';
-  var CSS_ANIMATION_TITLE = 'Animation';
-
-  // Strings used to name IRs.
-  var LOAD_STARTUP_IR_NAME = 'Startup';
-  var LOAD_SUCCEEDED_IR_NAME = 'Succeeded';
-  var LOAD_FAILED_IR_NAME = 'Failed';
-  var KEYBOARD_IR_NAME = 'Keyboard';
-  var MOUSE_IR_NAME = 'Mouse';
-  var MOUSEWHEEL_IR_NAME = 'MouseWheel';
-  var TAP_IR_NAME = 'Tap';
-  var PINCH_IR_NAME = 'Pinch';
-  var FLING_IR_NAME = 'Fling';
-  var TOUCH_IR_NAME = 'Touch';
-  var SCROLL_IR_NAME = 'Scroll';
-  var CSS_IR_NAME = 'CSS';
-
-  function RAILIRFinder(model, modelHelper) {
-    this.model = model;
-    this.modelHelper = modelHelper;
-  };
-
-  RAILIRFinder.supportsModelHelper = function(modelHelper) {
-    return modelHelper.browserHelper !== undefined;
-  };
-
-  RAILIRFinder.prototype = {
-    findAllInteractionRecords: function() {
-      var rirs = [];
-      rirs.push.apply(rirs, this.findLoadInteractionRecords());
-      rirs.push.apply(rirs, this.findInputInteractionRecords());
-      // findIdleInteractionRecords must be called last!
-      rirs.push.apply(rirs, this.findIdleInteractionRecords(rirs));
-      this.collectUnassociatedEvents_(rirs);
-      return rirs;
-    },
-
-    setIRNames_: function(name, irs) {
-      irs.forEach(function(ir) {
-        ir.name = name;
-      });
-    },
-
-    // Find all unassociated top-level ThreadSlices. If they start during an
-    // Idle or Load IR, then add their entire hierarchy to that IR.
-    collectUnassociatedEvents_: function(rirs) {
-      var vacuumIRs = [];
-      rirs.forEach(function(ir) {
-        if (ir instanceof tr.e.rail.LoadInteractionRecord ||
-            ir instanceof tr.e.rail.IdleInteractionRecord)
-          vacuumIRs.push(ir);
-      });
-      if (vacuumIRs.length === 0)
-        return;
-
-      var allAssociatedEvents = tr.model.getAssociatedEvents(rirs);
-      var unassociatedEvents = tr.model.getUnassociatedEvents(
-          this.model, allAssociatedEvents);
-
-      unassociatedEvents.forEach(function(event) {
-        if (!(event instanceof tr.model.ThreadSlice))
-          return;
-
-        if (!event.isTopLevel)
-          return;
-
-        for (var iri = 0; iri < vacuumIRs.length; ++iri) {
-          var ir = vacuumIRs[iri];
-
-          if ((event.start >= ir.start) &&
-              (event.start < ir.end)) {
-            ir.associatedEvents.addEventSet(event.entireHierarchy);
-            return;
-          }
-        }
-      });
-    },
-
-    // Fill in the empty space between IRs with IdleIRs.
-    findIdleInteractionRecords: function(otherIRs) {
-      if (this.model.bounds.isEmpty)
-        return;
-      var emptyRanges = tr.b.findEmptyRangesBetweenRanges(
-          tr.b.convertEventsToRanges(otherIRs),
-          this.model.bounds);
-      var irs = [];
-      var model = this.model;
-      emptyRanges.forEach(function(range) {
-        // Ignore insignificantly tiny idle ranges.
-        if (range.max < (range.min + INSIGNIFICANT_MS))
-          return;
-        irs.push(new tr.e.rail.IdleInteractionRecord(
-            model, range.min, range.max - range.min));
-      });
-      return irs;
-    },
-
-    getAllFrameEvents: function() {
-      var frameEvents = [];
-      frameEvents.push.apply(frameEvents,
-          this.modelHelper.browserHelper.getFrameEventsInRange(
-              tr.e.audits.IMPL_FRAMETIME_TYPE, this.model.bounds));
-
-      tr.b.iterItems(this.modelHelper.rendererHelpers, function(pid, renderer) {
-        frameEvents.push.apply(frameEvents, renderer.getFrameEventsInRange(
-            tr.e.audits.IMPL_FRAMETIME_TYPE, this.model.bounds));
-      }, this);
-      return frameEvents.sort(compareEvents);
-    },
-
-    getStartLoadEvents: function() {
-      function isStartLoadSlice(slice) {
-        return slice.title === START_LOAD_TITLE;
-      }
-      return this.modelHelper.browserHelper.getAllAsyncSlicesMatching(
-          isStartLoadSlice).sort(compareEvents);
-    },
-
-    getFailLoadEvents: function() {
-      function isFailLoadSlice(slice) {
-        return slice.title === FAIL_LOAD_TITLE;
-      }
-      return this.modelHelper.browserHelper.getAllAsyncSlicesMatching(
-          isFailLoadSlice).sort(compareEvents);
-    },
-
-    // If a thread contains a typical initialization slice, then the first event
-    // on that thread is a startup event.
-    getStartupEvents: function() {
-      function isStartupSlice(slice) {
-        return slice.title === 'BrowserMainLoop::CreateThreads';
-      }
-      var events = this.modelHelper.browserHelper.getAllAsyncSlicesMatching(
-          isStartupSlice);
-      var deduper = new tr.model.EventSet();
-      events.forEach(function(event) {
-        var sliceGroup = event.parentContainer.sliceGroup;
-        var slice = sliceGroup && sliceGroup.findFirstSlice();
-        if (slice)
-          deduper.push(slice);
-      });
-      return deduper.toArray();
-    },
-
-    // Match every event in |openingEvents| to the first following event from
-    // |closingEvents| and return an array containing a load interaction record
-    // for each pair.
-    findLoadInteractionRecords_: function(openingEvents, closingEvents) {
-      var lirs = [];
-      var model = this.model;
-      openingEvents.forEach(function(openingEvent) {
-        closingEvents.forEach(function(closingEvent) {
-          // Ignore opening event that already have a closing event.
-          if (openingEvent.closingEvent)
-            return;
-
-          // Ignore closing events that already belong to an opening event.
-          if (closingEvent.openingEvent)
-            return;
-
-          // Ignore closing events before |openingEvent|.
-          if (closingEvent.start <= openingEvent.start)
-            return;
-
-          // Ignore events from different threads.
-          if (openingEvent.parentContainer.parent.pid !==
-               closingEvent.parentContainer.parent.pid)
-            return;
-
-          // This is the first closing event for this opening event, record it.
-          openingEvent.closingEvent = closingEvent;
-          closingEvent.openingEvent = openingEvent;
-          var lir = new tr.e.rail.LoadInteractionRecord(
-              model, openingEvent.start,
-              closingEvent.end - openingEvent.start);
-          lir.associatedEvents.push(openingEvent);
-          lir.associatedEvents.push(closingEvent);
-
-          // All RenderFrameImpl events contain the routingId.
-          // |openingEvent| may be either didStartProvisionaLoad or
-          // didCommitProvisionalLoad, so use a general prefix test.
-          if (isRenderFrameImplEvent(openingEvent)) {
-            var renderProcessId = openingEvent.parentContainer.parent.pid;
-            lir.renderProcess = this.model.processes[renderProcessId];
-            lir.renderMainThread = lir.renderProcess.findAtMostOneThreadNamed(
-                'CrRendererMain');
-            lir.routingId = openingEvent.args.id;
-            lir.parentRoutingId = this.findLoadParentRoutingId_(lir);
-            this.findLoadFinishedEvent_(lir);
-          }
-          lirs.push(lir);
-        }, this);
-      }, this);
-      return lirs;
-    },
-
-    // Find the routingId of the createChildFrame event that created the Load
-    // IR's RenderFrame.
-    findLoadParentRoutingId_: function(lir) {
-      var createChildEvent = undefined;
-      lir.renderMainThread.iterateAllEvents(function(event) {
-        if (event.title !== CREATE_CHILD_TITLE)
-          return;
-
-        if (event.args.child !== lir.routingId)
-          return;
-
-        createChildEvent = event;
-      });
-
-      if (!createChildEvent)
-        return undefined;
-
-      return createChildEvent.args.id;
-    },
-
-    findLoadFinishedEvent_: function(lir) {
-      // First, find the RenderFrameImpl::didFinishLoad event that indicates a
-      // successful load.
-
-      var finishLoadEvent = undefined;
-      lir.renderMainThread.iterateAllEvents(function(event) {
-        if (event.title !== FINISH_LOAD_TITLE)
-          return;
-
-        if (event.start < lir.start)
-          return;
-
-        // TODO(benjhayden) This part of the heuristic is problematic for now
-        // because |lir.end| is naively the first paint after the load starts.
-        if (event.start > lir.end)
-          return;
-
-        if (event.args.id !== lir.routingId)
-          return;
-
-        finishLoadEvent = event;
-      });
-
-      if (!finishLoadEvent)
-        return undefined;
-
-      lir.associatedEvents.push(finishLoadEvent);
-
-      // Then, see if finishLoadEvent contains a subSlice titled
-      // 'LoadFinished', which indicates that the load was for a main frame.
-
-      var loadFinishedEvent = undefined;
-      finishLoadEvent.subSlices.forEach(function(event) {
-        if (event.title !== LOAD_FINISHED_TITLE)
-          return;
-
-        loadFinishedEvent = event;
-      });
-
-      if (!loadFinishedEvent)
-        return;
-
-      lir.loadFinishedEvent = loadFinishedEvent;
-      lir.associatedEvents.push(loadFinishedEvent);
-    },
-
-    // Match up RenderFrameImpl events with frame render events.
-    findLoadInteractionRecords: function() {
-      var startupEvents = this.getStartupEvents();
-      var commitLoadEvents =
-          this.modelHelper.browserHelper.getCommitProvisionalLoadEventsInRange(
-              this.model.bounds);
-      var frameEvents = this.getAllFrameEvents();
-      var startLoadEvents = this.getStartLoadEvents();
-      var failLoadEvents = this.getFailLoadEvents();
-      var lirs = [];
-
-      // Attach frame events to every startup events.
-      var startupLIRs = this.findLoadInteractionRecords_(startupEvents,
-          frameEvents);
-      this.setIRNames_(LOAD_STARTUP_IR_NAME, startupLIRs);
-      lirs.push.apply(lirs, startupLIRs);
-
-      // Attach frame events to every commit load events.
-      var successfulLIRs = this.findLoadInteractionRecords_(commitLoadEvents,
-          frameEvents);
-      this.setIRNames_(LOAD_SUCCEEDED_IR_NAME, successfulLIRs);
-      successfulLIRs.forEach(function(lir) {
-        // If a successful Load IR has a loadFinishedEvent, then it is a main
-        // frame.
-        // Drop sub-frame Loads for now.
-        if (lir.loadFinishedEvent)
-          lirs.push(lir);
-      });
-
-      // Attach fail load events to every start load events.
-      var failedLIRs = this.findLoadInteractionRecords_(startLoadEvents,
-          failLoadEvents);
-      this.setIRNames_(LOAD_FAILED_IR_NAME, failedLIRs);
-      failedLIRs.forEach(function(lir) {
-        // If a failed Load IR has a parentRoutingId, then it is a sub-frame.
-        // Drop sub-frame Loads for now.
-        if (lir.parentRoutingId === undefined)
-          lirs.push(lir);
-      });
-
-      return lirs;
-    },
-
-    // Find ProtoIRs, post-process them, convert them to real IRs.
-    findInputInteractionRecords: function() {
-      var sortedInputEvents = this.getSortedInputEvents();
-      var protoIRs = this.findProtoIRs(sortedInputEvents);
-      protoIRs = this.postProcessProtoIRs(protoIRs);
-      this.checkAllInputEventsHandled(sortedInputEvents, protoIRs);
-
-      var irs = [];
-      var model = this.model;
-      protoIRs.forEach(function(protoIR) {
-        var ir = protoIR.createInteractionRecord(model);
-        if (ir)
-          irs.push(ir);
-      });
-      return irs;
-    },
-
-    findProtoIRs: function(sortedInputEvents) {
-      var protoIRs = [];
-      // This order is not important. Handlers are independent.
-      var handlers = [
-        this.handleKeyboardEvents,
-        this.handleMouseResponseEvents,
-        this.handleMouseWheelEvents,
-        this.handleMouseDragEvents,
-        this.handleTapResponseEvents,
-        this.handlePinchEvents,
-        this.handleFlingEvents,
-        this.handleTouchEvents,
-        this.handleScrollEvents,
-        this.handleCSSAnimations
-      ];
-      handlers.forEach(function(handler) {
-        protoIRs.push.apply(protoIRs, handler.call(this, sortedInputEvents));
-      }, this);
-      protoIRs.sort(compareEvents);
-      return protoIRs;
-    },
-
-    getSortedInputEvents: function() {
-      var inputEvents = [];
-
-      var browserProcess = this.modelHelper.browserHelper.process;
-      var mainThread = browserProcess.findAtMostOneThreadNamed(
-          'CrBrowserMain');
-      mainThread.asyncSliceGroup.iterateAllEvents(function(slice) {
-        if (!slice.isTopLevel)
-          return;
-
-        if (!(slice instanceof tr.e.cc.InputLatencyAsyncSlice))
-          return;
-
-        // TODO(beaudoin): This should never happen but it does. Investigate
-        // the trace linked at in #1567 and remove that when it's fixed.
-        if (isNaN(slice.start) ||
-            isNaN(slice.duration) ||
-            isNaN(slice.end))
-          return;
-
-        inputEvents.push(slice);
-      }, this);
-
-      return inputEvents.sort(compareEvents);
-    },
-
-    // Every keyboard event is a Response.
-    handleKeyboardEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      forEventTypesIn(sortedInputEvents, KEYBOARD_TYPE_NAMES, function(event) {
-        var pir = new ProtoIR(ProtoIR.RESPONSE_TYPE, KEYBOARD_IR_NAME);
-        pir.pushEvent(event);
-        protoIRs.push(pir);
-      });
-      return protoIRs;
-    },
-
-    // Some mouse events can be translated directly into Responses.
-    handleMouseResponseEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      forEventTypesIn(
-          sortedInputEvents, MOUSE_RESPONSE_TYPE_NAMES, function(event) {
-        var pir = new ProtoIR(ProtoIR.RESPONSE_TYPE, MOUSE_IR_NAME);
-        pir.pushEvent(event);
-        protoIRs.push(pir);
-      });
-      return protoIRs;
-    },
-
-    // MouseWheel events are caused either by a physical wheel on a physical
-    // mouse, or by a touch-drag gesture on a track-pad. The physical wheel
-    // causes MouseWheel events that are much more spaced out, and have no
-    // chance of hitting 60fps, so they are each turned into separate Response
-    // IRs. The track-pad causes MouseWheel events that are much closer
-    // together, and are expected to be 60fps, so the first event in a sequence
-    // is turned into a Response, and the rest are merged into an Animation.
-    // NB this threshold uses the two events' start times, unlike
-    // ProtoIR.isNear, which compares the end time of the previous event with
-    // the start time of the next.
-    handleMouseWheelEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      var prevEvent_ = undefined;
-      forEventTypesIn(
-          sortedInputEvents, MOUSE_WHEEL_TYPE_NAMES, function(event) {
-        // Switch prevEvent in one place so that we can early-return later.
-        var prevEvent = prevEvent_;
-        prevEvent_ = event;
-
-        if (currentPIR &&
-            (prevEvent.start + MOUSE_WHEEL_THRESHOLD_MS) >= event.start) {
-          if (currentPIR.irType === ProtoIR.ANIMATION_TYPE) {
-            currentPIR.pushEvent(event);
-          } else {
-            currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE,
-                MOUSEWHEEL_IR_NAME);
-            currentPIR.pushEvent(event);
-            protoIRs.push(currentPIR);
-          }
-          return;
-        }
-        currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, MOUSEWHEEL_IR_NAME);
-        currentPIR.pushEvent(event);
-        protoIRs.push(currentPIR);
-      });
-      return protoIRs;
-    },
-
-    // Down events followed closely by Up events are click Responses, but the
-    // Response doesn't start until the Up event.
-    //
-    //     RRR
-    // DDD UUU
-    //
-    // If there are any Move events in between a Down and an Up, then the Down
-    // and the first Move are a Response, then the rest of the Moves are an
-    // Animation:
-    //
-    // RRRRRRRAAAAAAAAAAAAAAAAAAAA
-    // DDD MMM MMM MMM MMM MMM UUU
-    //
-    handleMouseDragEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      var mouseDownEvent = undefined;
-      forEventTypesIn(
-          sortedInputEvents, MOUSE_DRAG_TYPE_NAMES, function(event) {
-        switch (event.typeName) {
-          case INPUT_TYPE.MOUSE_DOWN:
-            if (causedFrame(event)) {
-              var pir = new ProtoIR(ProtoIR.RESPONSE_TYPE, MOUSE_IR_NAME);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-            } else {
-              // Responses typically don't start until the mouse up event.
-              // Add this MouseDown to the Response that starts at the MouseUp.
-              mouseDownEvent = event;
-            }
-            break;
-            // There may be more than 100ms between the start of the mouse down
-            // and the start of the mouse up. Chrome and the web don't start to
-            // respond until the mouse up. ResponseIRs start deducting comfort
-            // at 100ms duration. If more than that 100ms duration is burned
-            // through while waiting for the user to release the
-            // mouse button, then ResponseIR will unfairly start deducting
-            // comfort before Chrome even has a mouse up to respond to.
-            // It is technically possible for a site to afford one response on
-            // mouse down and another on mouse up, but that is an edge case. The
-            // vast majority of mouse downs are not responses.
-
-          case INPUT_TYPE.MOUSE_MOVE:
-            if (!causedFrame(event)) {
-              // Ignore MouseMoves that do not affect the screen. They are not
-              // part of an interaction record by definition.
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-            } else if (!currentPIR ||
-                       !currentPIR.isNear(event, MOUSE_MOVE_THRESHOLD_MS)) {
-              // The first MouseMove after a MouseDown or after a while is a
-              // Response.
-              currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, MOUSE_IR_NAME);
-              currentPIR.pushEvent(event);
-              if (mouseDownEvent) {
-                currentPIR.associatedEvents.push(mouseDownEvent);
-                mouseDownEvent = undefined;
-              }
-              protoIRs.push(currentPIR);
-            } else {
-              // Merge this event into an Animation.
-              if (currentPIR.irType === ProtoIR.ANIMATION_TYPE) {
-                currentPIR.pushEvent(event);
-              } else {
-                currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, MOUSE_IR_NAME);
-                currentPIR.pushEvent(event);
-                protoIRs.push(currentPIR);
-              }
-            }
-            break;
-
-          case INPUT_TYPE.MOUSE_UP:
-            if (!mouseDownEvent) {
-              var pir = new ProtoIR(causedFrame(event) ? ProtoIR.RESPONSE_TYPE :
-                  ProtoIR.IGNORED_TYPE, MOUSE_IR_NAME);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-              break;
-            }
-
-            if (currentPIR) {
-              currentPIR.pushEvent(event);
-            } else {
-              currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, MOUSE_IR_NAME);
-              if (mouseDownEvent)
-                currentPIR.associatedEvents.push(mouseDownEvent);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-            }
-            mouseDownEvent = undefined;
-            currentPIR = undefined;
-            break;
-        }
-      });
-      if (mouseDownEvent) {
-        currentPIR = new ProtoIR(ProtoIR.IGNORED_TYPE);
-        currentPIR.pushEvent(mouseDownEvent);
-        protoIRs.push(currentPIR);
-      }
-      return protoIRs;
-    },
-
-    // Solitary Tap events are simple Responses:
-    //
-    // RRR
-    // TTT
-    //
-    // TapDowns are part of Responses.
-    //
-    // RRRRRRR
-    // DDD TTT
-    //
-    // TapCancels are part of Responses, which seems strange. They always go
-    // with scrolls, so they'll probably be merged with scroll Responses.
-    // TapCancels can take a significant amount of time and account for a
-    // significant amount of work, which should be grouped with the scroll IRs
-    // if possible.
-    //
-    // RRRRRRR
-    // DDD CCC
-    //
-    handleTapResponseEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      forEventTypesIn(sortedInputEvents, TAP_TYPE_NAMES, function(event) {
-        switch (event.typeName) {
-          case INPUT_TYPE.TAP_DOWN:
-            currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, TAP_IR_NAME);
-            currentPIR.pushEvent(event);
-            protoIRs.push(currentPIR);
-            break;
-
-          case INPUT_TYPE.TAP:
-            if (currentPIR) {
-              currentPIR.pushEvent(event);
-            } else {
-              // Sometimes we get Tap events with no TapDown, sometimes we get
-              // TapDown events. Handle both.
-              currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, TAP_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-            }
-            currentPIR = undefined;
-            break;
-
-          case INPUT_TYPE.TAP_CANCEL:
-            if (!currentPIR) {
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-              break;
-            }
-
-            if (currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
-              currentPIR.pushEvent(event);
-            } else {
-              currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, TAP_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-            }
-            currentPIR = undefined;
-            break;
-        }
-      });
-      return protoIRs;
-    },
-
-    // The PinchBegin and the first PinchUpdate comprise a Response, then the
-    // rest of the PinchUpdates comprise an Animation.
-    //
-    // RRRRRRRAAAAAAAAAAAAAAAAAAAA
-    // BBB UUU UUU UUU UUU UUU EEE
-    //
-    handlePinchEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      var sawFirstUpdate = false;
-      var modelBounds = this.model.bounds;
-      forEventTypesIn(sortedInputEvents, PINCH_TYPE_NAMES, function(event) {
-        switch (event.typeName) {
-          case INPUT_TYPE.PINCH_BEGIN:
-            if (currentPIR &&
-                currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
-              currentPIR.pushEvent(event);
-              break;
-            }
-            currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, PINCH_IR_NAME);
-            currentPIR.pushEvent(event);
-            protoIRs.push(currentPIR);
-            sawFirstUpdate = false;
-            break;
-
-          case INPUT_TYPE.PINCH_UPDATE:
-            // Like ScrollUpdates, the Begin and the first Update constitute a
-            // Response, then the rest of the Updates constitute an Animation
-            // that begins when the Response ends. If the user pauses in the
-            // middle of an extended pinch gesture, then multiple Animations
-            // will be created.
-            if (!currentPIR ||
-                ((currentPIR.irType === ProtoIR.RESPONSE_TYPE) &&
-                 sawFirstUpdate) ||
-                !currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
-              currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, PINCH_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-            } else {
-              currentPIR.pushEvent(event);
-              sawFirstUpdate = true;
-            }
-            break;
-
-          case INPUT_TYPE.PINCH_END:
-            if (currentPIR) {
-              currentPIR.pushEvent(event);
-            } else {
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-            }
-            currentPIR = undefined;
-            break;
-        }
-      });
-      return protoIRs;
-    },
-
-    // Flings are defined by 3 types of events: FlingStart, FlingCancel, and the
-    // renderer fling event. Flings do not begin with a Response. Flings end
-    // either at the beginning of a FlingCancel, or at the end of the renderer
-    // fling event.
-    //
-    // AAAAAAAAAAAAAAAAAAAAAAAAAA
-    // SSS
-    //     RRRRRRRRRRRRRRRRRRRRRR
-    //
-    //
-    // AAAAAAAAAAA
-    // SSS        CCC
-    //
-    handleFlingEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-
-      function isRendererFling(event) {
-        return event.title === RENDERER_FLING_TITLE;
-      }
-      var browserHelper = this.modelHelper.browserHelper;
-      var flingEvents = browserHelper.getAllAsyncSlicesMatching(
-          isRendererFling);
-
-      forEventTypesIn(sortedInputEvents, FLING_TYPE_NAMES, function(event) {
-        flingEvents.push(event);
-      });
-      flingEvents.sort(compareEvents);
-
-      flingEvents.forEach(function(event) {
-        if (event.title === RENDERER_FLING_TITLE) {
-          if (currentPIR) {
-            currentPIR.pushEvent(event);
-          } else {
-            currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, FLING_IR_NAME);
-            currentPIR.pushEvent(event);
-            protoIRs.push(currentPIR);
-          }
-          return;
-        }
-
-        switch (event.typeName) {
-          case INPUT_TYPE.FLING_START:
-            if (currentPIR) {
-              console.error('Another FlingStart? File a bug with this trace!');
-              currentPIR.pushEvent(event);
-            } else {
-              currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, FLING_IR_NAME);
-              currentPIR.pushEvent(event);
-              // Set end to an invalid value so that it can be noticed and fixed
-              // later.
-              currentPIR.end = 0;
-              protoIRs.push(currentPIR);
-            }
-            break;
-
-          case INPUT_TYPE.FLING_CANCEL:
-            if (currentPIR) {
-              currentPIR.pushEvent(event);
-              // FlingCancel events start when TouchStart events start, which is
-              // typically when a Response starts. FlingCancel events end when
-              // chrome acknowledges them, not when they update the screen. So
-              // there might be one more frame during the FlingCancel, after
-              // this Animation ends. That won't affect the scoring algorithms,
-              // and it will make the IRs look more correct if they don't
-              // overlap unnecessarily.
-              currentPIR.end = event.start;
-              currentPIR = undefined;
-            } else {
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-            }
-            break;
-        }
-      });
-      // If there was neither a FLING_CANCEL nor a renderer fling after the
-      // FLING_START, then assume that it ends at the end of the model, so set
-      // the end of currentPIR to the end of the model.
-      if (currentPIR && !currentPIR.end)
-        currentPIR.end = this.model.bounds.max;
-      return protoIRs;
-    },
-
-    // The TouchStart and the first TouchMove comprise a Response, then the
-    // rest of the TouchMoves comprise an Animation.
-    //
-    // RRRRRRRAAAAAAAAAAAAAAAAAAAA
-    // SSS MMM MMM MMM MMM MMM EEE
-    //
-    // If there are no TouchMove events in between a TouchStart and a TouchEnd,
-    // then it's just a Response.
-    //
-    // RRRRRRR
-    // SSS EEE
-    //
-    handleTouchEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      var sawFirstMove = false;
-      forEventTypesIn(sortedInputEvents, TOUCH_TYPE_NAMES, function(event) {
-        switch (event.typeName) {
-          case INPUT_TYPE.TOUCH_START:
-            if (currentPIR) {
-              // NB: currentPIR will probably be merged with something from
-              // handlePinchEvents(). Multiple TouchStart events without an
-              // intervening TouchEnd logically implies that multiple fingers
-              // are on the screen, so this is probably a pinch gesture.
-              currentPIR.pushEvent(event);
-            } else {
-              currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, TOUCH_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-              sawFirstMove = false;
-            }
-            break;
-
-          case INPUT_TYPE.TOUCH_MOVE:
-            if (!currentPIR) {
-              currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, TOUCH_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-              break;
-            }
-
-            // Like Scrolls and Pinches, the Response is defined to be the
-            // TouchStart plus the first TouchMove, then the rest of the
-            // TouchMoves constitute an Animation.
-            if ((sawFirstMove &&
-                (currentPIR.irType === ProtoIR.RESPONSE_TYPE)) ||
-                !currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
-              // If there's already a touchmove in the currentPIR or it's not
-              // near event, then finish it and start a new animation.
-              var prevEnd = currentPIR.end;
-              currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, TOUCH_IR_NAME);
-              currentPIR.pushEvent(event);
-              // It's possible for there to be a gap between TouchMoves, but
-              // that doesn't mean that there should be an Idle IR there.
-              currentPIR.start = prevEnd;
-              protoIRs.push(currentPIR);
-            } else {
-              currentPIR.pushEvent(event);
-              sawFirstMove = true;
-            }
-            break;
-
-          case INPUT_TYPE.TOUCH_END:
-            if (!currentPIR) {
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-              break;
-            }
-            if (currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
-              currentPIR.pushEvent(event);
-            } else {
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-            }
-            currentPIR = undefined;
-            break;
-        }
-      });
-      return protoIRs;
-    },
-
-    // The first ScrollBegin and the first ScrollUpdate comprise a Response,
-    // then the rest comprise an Animation.
-    //
-    // RRRRRRRAAAAAAAAAAAAAAAAAAAA
-    // BBB UUU UUU UUU UUU UUU EEE
-    //
-    handleScrollEvents: function(sortedInputEvents) {
-      var protoIRs = [];
-      var currentPIR = undefined;
-      var sawFirstUpdate = false;
-      forEventTypesIn(sortedInputEvents, SCROLL_TYPE_NAMES, function(event) {
-        switch (event.typeName) {
-          case INPUT_TYPE.SCROLL_BEGIN:
-            // Always begin a new PIR even if there already is one, unlike
-            // PinchBegin.
-            currentPIR = new ProtoIR(ProtoIR.RESPONSE_TYPE, SCROLL_IR_NAME);
-            currentPIR.pushEvent(event);
-            protoIRs.push(currentPIR);
-            sawFirstUpdate = false;
-            break;
-
-          case INPUT_TYPE.SCROLL_UPDATE:
-            if (currentPIR) {
-              if (currentPIR.isNear(event, INPUT_MERGE_THRESHOLD_MS) &&
-                  ((currentPIR.irType === ProtoIR.ANIMATION_TYPE) ||
-                  !sawFirstUpdate)) {
-                currentPIR.pushEvent(event);
-                sawFirstUpdate = true;
-              } else {
-                currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE,
-                    SCROLL_IR_NAME);
-                currentPIR.pushEvent(event);
-                protoIRs.push(currentPIR);
-              }
-            } else {
-              // ScrollUpdate without ScrollBegin.
-              currentPIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, SCROLL_IR_NAME);
-              currentPIR.pushEvent(event);
-              protoIRs.push(currentPIR);
-            }
-            break;
-
-          case INPUT_TYPE.SCROLL_END:
-            if (!currentPIR) {
-              console.error('ScrollEnd without ScrollUpdate? ' +
-                            'File a bug with this trace!');
-              var pir = new ProtoIR(ProtoIR.IGNORED_TYPE);
-              pir.pushEvent(event);
-              protoIRs.push(pir);
-              break;
-            }
-            currentPIR.pushEvent(event);
-            break;
-        }
-      });
-      return protoIRs;
-    },
-
-    // CSS Animations are merged into Animations when they intersect.
-    handleCSSAnimations: function(sortedInputEvents) {
-      var animationEvents = this.modelHelper.browserHelper.
-          getAllAsyncSlicesMatching(function(event) {
-            return ((event.title === CSS_ANIMATION_TITLE) &&
-                    (event.duration > 0));
-      });
-
-      var animationRanges = [];
-      animationEvents.forEach(function(event) {
-        animationRanges.push({
-          min: event.start,
-          max: event.end,
-          event: event
-        });
-      });
-
-      function merge(ranges) {
-        var protoIR = new ProtoIR(ProtoIR.ANIMATION_TYPE, CSS_IR_NAME);
-        ranges.forEach(function(range) {
-          protoIR.pushEvent(range.event);
-        });
-        return protoIR;
-      }
-
-      return tr.b.mergeRanges(animationRanges,
-                              ANIMATION_MERGE_THRESHOLD_MS,
-                              merge);
-    },
-
-    postProcessProtoIRs: function(protoIRs) {
-      // protoIRs is input only. Returns a modified set of ProtoIRs.
-      // The order is important.
-      protoIRs = this.mergeIntersectingResponses(protoIRs);
-      protoIRs = this.mergeIntersectingAnimations(protoIRs);
-      protoIRs = this.fixResponseAnimationStarts(protoIRs);
-      protoIRs = this.fixTapResponseTouchAnimations(protoIRs);
-      return protoIRs;
-    },
-
-    // TouchStarts happen at the same time as ScrollBegins.
-    // It's easier to let multiple handlers create multiple overlapping
-    // Responses and then merge them, rather than make the handlers aware of the
-    // other handlers' PIRs.
-    //
-    // For example:
-    // RR
-    //  RRR  -> RRRRR
-    //    RR
-    //
-    // protoIRs is input only.
-    // Returns a modified set of ProtoIRs.
-    mergeIntersectingResponses: function(protoIRs) {
-      var newPIRs = [];
-      while (protoIRs.length) {
-        var pir = protoIRs.shift();
-        newPIRs.push(pir);
-
-        // Only consider Responses for now.
-        if (pir.irType !== ProtoIR.RESPONSE_TYPE)
-          continue;
-
-        for (var i = 0; i < protoIRs.length; ++i) {
-          var otherPIR = protoIRs[i];
-
-          if (otherPIR.irType !== pir.irType)
-            continue;
-
-          if (!otherPIR.intersects(pir))
-            continue;
-
-          // Don't merge together Responses of the same type.
-          // If handleTouchEvents wanted two of its Responses to be merged, then
-          // it would have made them that way to begin with.
-          var typeNames = pir.associatedEvents.map(function(event) {
-            return event.typeName;
-          });
-          if (otherPIR.containsTypeNames(typeNames))
-            continue;
-
-          pir.merge(otherPIR);
-          protoIRs.splice(i, 1);
-          // Don't skip the next otherPIR!
-          --i;
-        }
-      }
-      return newPIRs;
-    },
-
-    // An animation is simply an expectation of 60fps between start and end.
-    // If two animations overlap, then merge them.
-    //
-    // For example:
-    // AA
-    //  AAA  -> AAAAA
-    //    AA
-    //
-    // protoIRs is input only.
-    // Returns a modified set of ProtoIRs.
-    mergeIntersectingAnimations: function(protoIRs) {
-      var newPIRs = [];
-      while (protoIRs.length) {
-        var pir = protoIRs.shift();
-        newPIRs.push(pir);
-
-        // Only consider Animations for now.
-        if (pir.irType !== ProtoIR.ANIMATION_TYPE)
-          continue;
-
-        var isCSS = pir.containsSliceTitle(CSS_ANIMATION_TITLE);
-        var isFling = pir.containsTypeNames([INPUT_TYPE.FLING_START]);
-
-        for (var i = 0; i < protoIRs.length; ++i) {
-          var otherPIR = protoIRs[i];
-
-          if (otherPIR.irType !== pir.irType)
-            continue;
-
-          // Don't merge CSS Animations with any other types.
-          if (isCSS != otherPIR.containsSliceTitle(CSS_ANIMATION_TITLE))
-            continue;
-
-          if (!otherPIR.intersects(pir))
-            continue;
-
-          // Don't merge Fling Animations with any other types.
-          if (isFling != otherPIR.containsTypeNames([INPUT_TYPE.FLING_START]))
-            continue;
-
-          pir.merge(otherPIR);
-          protoIRs.splice(i, 1);
-          // Don't skip the next otherPIR!
-          --i;
-        }
-      }
-      return newPIRs;
-    },
-
-    // The ends of responses frequently overlap the starts of animations.
-    // Fix the animations to reflect the fact that the user can only start to
-    // expect 60fps after the response.
-    //
-    // For example:
-    // RRR   -> RRRAA
-    //  AAAA
-    //
-    // protoIRs is input only.
-    // Returns a modified set of ProtoIRs.
-    fixResponseAnimationStarts: function(protoIRs) {
-      protoIRs.forEach(function(apir) {
-        // Only consider animations for now.
-        if (apir.irType !== ProtoIR.ANIMATION_TYPE)
-          return;
-
-        protoIRs.forEach(function(rpir) {
-          // Only consider responses for now.
-          if (rpir.irType !== ProtoIR.RESPONSE_TYPE)
-            return;
-
-          // Only consider responses that end during the animation.
-          if (!apir.containsTimestampInclusive(rpir.end))
-            return;
-
-          // Ignore Responses that are entirely contained by the animation.
-          if (apir.containsTimestampInclusive(rpir.start))
-            return;
-
-          // Move the animation start to the response end.
-          apir.start = rpir.end;
-        });
-      });
-      return protoIRs;
-    },
-
-    // Merge Tap Responses that overlap Touch-only Animations.
-    // https://github.com/catapult-project/catapult/issues/1431
-    fixTapResponseTouchAnimations: function(protoIRs) {
-      function isTapResponse(pir) {
-        return (pir.irType === ProtoIR.RESPONSE_TYPE) &&
-               pir.containsTypeNames([INPUT_TYPE.TAP]);
-      }
-      function isTouchAnimation(pir) {
-        return (pir.irType === ProtoIR.ANIMATION_TYPE) &&
-               pir.containsTypeNames([INPUT_TYPE.TOUCH_MOVE]) &&
-               !pir.containsTypeNames([
-                   INPUT_TYPE.SCROLL_UPDATE, INPUT_TYPE.PINCH_UPDATE]);
-      }
-      var newPIRs = [];
-      while (protoIRs.length) {
-        var pir = protoIRs.shift();
-        newPIRs.push(pir);
-
-        // protoIRs are sorted by start time, and we don't know whether the Tap
-        // Response or the Touch Animation will be first
-        var pirIsTapResponse = isTapResponse(pir);
-        var pirIsTouchAnimation = isTouchAnimation(pir);
-        if (!pirIsTapResponse && !pirIsTouchAnimation)
-          continue;
-
-        for (var i = 0; i < protoIRs.length; ++i) {
-          var otherPIR = protoIRs[i];
-
-          if (!otherPIR.intersects(pir))
-            continue;
-
-          if (pirIsTapResponse && !isTouchAnimation(otherPIR))
-            continue;
-
-          if (pirIsTouchAnimation && !isTapResponse(otherPIR))
-            continue;
-
-          // pir might be the Touch Animation, but the merged ProtoIR should be
-          // a Response.
-          pir.irType = ProtoIR.RESPONSE_TYPE;
-
-          pir.merge(otherPIR);
-          protoIRs.splice(i, 1);
-          // Don't skip the next otherPIR!
-          --i;
-        }
-      }
-      return newPIRs;
-    },
-
-    // Check that none of the handlers accidentally ignored an input event.
-    checkAllInputEventsHandled: function(sortedInputEvents, protoIRs) {
-      var handledEvents = [];
-      protoIRs.forEach(function(protoIR) {
-        protoIR.associatedEvents.forEach(function(event) {
-          if (handledEvents.indexOf(event) >= 0) {
-            console.error('double-handled event', event.typeName,
-                parseInt(event.start), parseInt(event.end), protoIR);
-            return;
-          }
-          handledEvents.push(event);
-        });
-      });
-
-      sortedInputEvents.forEach(function(event) {
-        if (handledEvents.indexOf(event) < 0) {
-          console.error('UNHANDLED INPUT EVENT!',
-              event.typeName, parseInt(event.start), parseInt(event.end));
-        }
-      });
-    }
-  };
-
-  function createCustomizeModelLinesFromModel(model) {
-    var modelLines = [];
-    modelLines.push('      audits.addEvent(model.browserMain,');
-    modelLines.push('          {title: \'model start\', start: 0, end: 1});');
-
-    var typeNames = {};
-    for (var typeName in tr.e.cc.INPUT_EVENT_TYPE_NAMES) {
-      typeNames[tr.e.cc.INPUT_EVENT_TYPE_NAMES[typeName]] = typeName;
-    }
-
-    var modelEvents = new tr.model.EventSet();
-    model.interactionRecords.forEach(function(ir, index) {
-      modelEvents.addEventSet(ir.sourceEvents);
-    });
-    modelEvents = modelEvents.toArray();
-    modelEvents.sort(compareEvents);
-
-    modelEvents.forEach(function(event) {
-      var startAndEnd = 'start: ' + parseInt(event.start) + ', ' +
-                        'end: ' + parseInt(event.end) + '});';
-      if (event instanceof tr.e.cc.InputLatencyAsyncSlice) {
-        modelLines.push('      audits.addInputEvent(model, INPUT_TYPE.' +
-                        typeNames[event.typeName] + ',');
-      } else if (event.title === 'RenderFrameImpl::didCommitProvisionalLoad') {
-        modelLines.push('      audits.addCommitLoadEvent(model,');
-      } else if (event.title ===
-                 'InputHandlerProxy::HandleGestureFling::started') {
-        modelLines.push('      audits.addFlingAnimationEvent(model,');
-      } else if (event.title === tr.e.audits.IMPL_RENDERING_STATS) {
-        modelLines.push('      audits.addFrameEvent(model,');
-      } else if (event.title === CSS_ANIMATION_TITLE) {
-        modelLines.push('      audits.addEvent(model.rendererMain, {');
-        modelLines.push('        title: \'Animation\', ' + startAndEnd);
-        return;
-      } else {
-        throw ('You must extend createCustomizeModelLinesFromModel()' +
-               'to support this event:\n' + event.title + '\n');
-      }
-      modelLines.push('          {' + startAndEnd);
-    });
-
-    modelLines.push('      audits.addEvent(model.browserMain,');
-    modelLines.push('          {' +
-                    'title: \'model end\', ' +
-                    'start: ' + (parseInt(model.bounds.max) - 1) + ', ' +
-                    'end: ' + parseInt(model.bounds.max) + '});');
-    return modelLines;
-  }
-
-  function createExpectedIRLinesFromModel(model) {
-    var expectedLines = [];
-    var irCount = model.interactionRecords.length;
-    model.interactionRecords.forEach(function(ir, index) {
-      var irString = '      {';
-      irString += 'title: \'' + ir.title + '\', ';
-      irString += 'start: ' + parseInt(ir.start) + ', ';
-      irString += 'end: ' + parseInt(ir.end) + ', ';
-      irString += 'eventCount: ' + ir.sourceEvents.length;
-      irString += '}';
-      if (index < (irCount - 1))
-        irString += ',';
-      expectedLines.push(irString);
-    });
-    return expectedLines;
-  }
-
-  function createIRFinderTestCaseStringFromModel(model) {
-    var filename = window.location.hash.substr(1);
-    var testName = filename.substr(filename.lastIndexOf('/') + 1);
-    testName = testName.substr(0, testName.indexOf('.'));
-
-    // createCustomizeModelLinesFromModel() throws an error if there's an
-    // unsupported event.
-    try {
-      var testLines = [];
-      testLines.push('  /*');
-      testLines.push('    This test was generated from');
-      testLines.push('    ' + filename + '');
-      testLines.push('   */');
-      testLines.push('  test(\'' + testName + '\', function() {');
-      testLines.push('    var verifier = new IRVerifier();');
-      testLines.push('    verifier.customizeModelCallback = function(model) {');
-      testLines.push.apply(testLines,
-          createCustomizeModelLinesFromModel(model));
-      testLines.push('    };');
-      testLines.push('    verifier.expectedIRs = [');
-      testLines.push.apply(testLines, createExpectedIRLinesFromModel(model));
-      testLines.push('    ];');
-      testLines.push('    verifier.verify();');
-      testLines.push('  });');
-      return testLines.join('\n');
-    } catch (error) {
-      return error;
-    }
-  }
-
-  return {
-    RAILIRFinder: RAILIRFinder,
-    createIRFinderTestCaseStringFromModel: createIRFinderTestCaseStringFromModel
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_ir_finder_test.html b/catapult/tracing/tracing/extras/rail/rail_ir_finder_test.html
deleted file mode 100644
index 2b3067b..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_ir_finder_test.html
+++ /dev/null
@@ -1,838 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/rail/ir_verifier.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
-  var chrome_test_utils = tr.e.chrome.ChromeTestUtils;
-  var IRVerifier = tr.e.rail.IRVerifier;
-
-  function addFrameEventForInput(model, event) {
-    var frame = chrome_test_utils.addFrameEvent(model,
-        {start: event.start, end: event.end, isTopLevel: true});
-    model.flowEvents.push(tr.c.TestUtils.newFlowEventEx({
-      id: event.id,
-      start: event.start,
-      end: event.end,
-      startSlice: frame,
-      endSlice: frame
-    }));
-  }
-
-  test('empty', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-    };
-    verifier.expectedIRs = [
-    ];
-    verifier.verify();
-  });
-
-  test('slowMouseMoveResponses', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 10});
-      var mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 10, end: 20, id: '0x100'});
-      addFrameEventForInput(model, mouseMove);
-
-      mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 70, end: 80, id: '0x101'});
-      addFrameEventForInput(model, mouseMove);
-
-      mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 130, end: 140, id: '0x102'});
-      addFrameEventForInput(model, mouseMove);
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 10, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 10, end: 20, eventCount: 4},
-      {title: 'Idle', start: 20, end: 70, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 70, end: 80, eventCount: 3},
-      {title: 'Idle', start: 80, end: 130, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 130, end: 140, eventCount: 3}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseEventResponses', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      var mouseDown = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 50, id: '0x100'});
-      addFrameEventForInput(model, mouseDown);
-
-      var mouseUp = chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_UP,
-          {start: 50, end: 100, id: '0x101'});
-      addFrameEventForInput(model, mouseUp);
-
-      var mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 200, end: 250, id: '0x102'});
-      addFrameEventForInput(model, mouseMove);
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Mouse', start: 0, end: 50, eventCount: 3},
-      {title: 'Response', name: 'Mouse', start: 50, end: 100, eventCount: 3},
-      {title: 'Idle', start: 100, end: 200, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 200, end: 250, eventCount: 3}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseEventsIgnored', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_MOVE,
-          {start: 0, end: 50});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_DOWN,
-          {start: 50, end: 100});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 100, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('unassociatedEvents', function() {
-    // Unassociated ThreadSlices that start during an Idle should be associated
-    // with it. Expect the Idle IR to have 2 associated events: both of the
-    // ThreadSlices in the model.
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      var start = tr.c.TestUtils.newSliceEx(
-          {title: 'model start', start: 0, end: 1, type: tr.model.ThreadSlice});
-      start.isTopLevel = true;
-      model.browserMain.sliceGroup.pushSlice(start);
-
-      var end = tr.c.TestUtils.newSliceEx(
-          {title: 'model end', start: 9, end: 10, type: tr.model.ThreadSlice});
-      end.isTopLevel = true;
-      model.browserMain.sliceGroup.pushSlice(end);
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 10, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('overlappingIdleAndLoadCollectUnassociatedEvents', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
-      chrome_test_utils.addCommitLoadEvent(
-          model, {start: 10, end: 20, args: {id: 1}});
-      chrome_test_utils.addFinishLoadEvent(
-          model, {start: 21, end: 24, args: {id: 1}});
-      chrome_test_utils.addLoadFinishedEvent(
-          model, {start: 22, end: 23});
-      chrome_test_utils.addFrameEvent(model, {start: 25, end: 30});
-      chrome_test_utils.addFrameEvent(model, {start: 35, end: 40});
-      // 3 Idle events.
-      chrome_test_utils.addRenderingEvent(model, {start: 5, end: 15});
-      chrome_test_utils.addRenderingEvent(model, {start: 11, end: 15});
-      chrome_test_utils.addRenderingEvent(model, {start: 13, end: 15});
-      // 1 Idle event.
-      chrome_test_utils.addRenderingEvent(model, {start: 35, end: 36});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 10, eventCount: 1},
-      {title: 'Load', name: 'Succeeded', start: 10, end: 30, eventCount: 4},
-      {title: 'Idle', start: 30, end: 40, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('flingFlingFling', function() {
-    // This trace gave me so many different kinds of trouble that I'm just going
-    // to copy it straight in here, without trying to clarify it at all.
-    // measurmt-traces/mobile/cnet_fling_up_fling_down_motox_2013.json
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 919, end: 998});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
-          {start: 919, end: 1001});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 919, end: 1001});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
-          {start: 974, end: 1020});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 974, end: 1020});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 974, end: 1040});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 974, end: 1054});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 990, end: 1021});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 990, end: 1052});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1006, end: 1021});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1022, end: 1036});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1022, end: 1052});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1038, end: 1049});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1038, end: 1068});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 1046, end: 1050});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 1046, end: 1077});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 1432, end: 2238});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
-          {start: 1432, end: 2241});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1516, end: 2605});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 1532, end: 2274});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1532, end: 2294});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1549, end: 2310});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 1627, end: 2275});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 1627, end: 2310});
-      chrome_test_utils.addFrameEvent(model, {start: 2990, end: 3000});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 919, eventCount: 0},
-      {title: 'Response', name: 'Scroll,Tap,Touch', start: 919, end: 1054,
-          eventCount: 6},
-      {title: 'Animation', name: 'Scroll,Touch', start: 1054, end: 1068,
-          eventCount: 8},
-      {title: 'Animation', name: 'Fling', start: 1054, end: 1432,
-          eventCount: 2},
-      {title: 'Response', name: 'Scroll,Touch', start: 1432, end: 2605,
-          eventCount: 5},
-      {title: 'Animation', name: 'Scroll', start: 1549, end: 2310,
-          eventCount: 1},
-      {title: 'Animation', name: 'Fling', start: 2605, end: 3000,
-          eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('keyboardEvents', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.KEY_DOWN_RAW,
-          {start: 0, end: 45});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CHAR,
-          {start: 10, end: 50});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Keyboard', start: 0, end: 50, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseResponses', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CLICK,
-          {start: 0, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CONTEXT_MENU,
-          {start: 200, end: 300});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 400, end: 500});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Mouse', start: 0, end: 100, eventCount: 1},
-      {title: 'Idle', start: 100, end: 200, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 200, end: 300, eventCount: 1},
-      {title: 'Idle', start: 300, end: 400, eventCount: 0},
-      {title: 'Response', name: 'MouseWheel', start: 400, end: 500,
-          eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseWheelAnimation', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 0, end: 20});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 16, end: 36});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 55, end: 75});
-
-      // This threshold uses both events' start times, not end...start.
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 100, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 141, end: 191});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
-          {start: 182, end: 200});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'MouseWheel', start: 0, end: 20, eventCount: 1},
-      {title: 'Animation', name: 'MouseWheel', start: 20, end: 75,
-          eventCount: 2},
-      {title: 'Idle', start: 75, end: 100, eventCount: 0},
-      {title: 'Response', name: 'MouseWheel', start: 100, end: 150,
-          eventCount: 1},
-      {title: 'Response', name: 'MouseWheel', start: 141, end: 191,
-          eventCount: 1},
-      {title: 'Response', name: 'MouseWheel', start: 182, end: 200,
-          eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseDownUpResponse', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_DOWN,
-          {start: 0, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_UP,
-          {start: 200, end: 210});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 200, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 200, end: 210, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('ignoreLoneMouseMoves', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_MOVE,
-          {start: 0, end: 100});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 100, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('mouseDrags', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 100});
-      var mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 200, end: 215});
-      addFrameEventForInput(model, mouseMove);
-      mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 210, end: 220});
-      addFrameEventForInput(model, mouseMove);
-      mouseMove = chrome_test_utils.addInputEvent(
-          model, INPUT_TYPE.MOUSE_MOVE, {start: 221, end: 240});
-      addFrameEventForInput(model, mouseMove);
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 200, eventCount: 0},
-      {title: 'Response', name: 'Mouse', start: 200, end: 215, eventCount: 4},
-      {title: 'Animation', name: 'Mouse', start: 215, end: 240, eventCount: 6}
-    ];
-    verifier.verify();
-  });
-
-  test('twoScrollsNoFling', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 0, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 20, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 40, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 60, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 70, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_END,
-          {start: 80, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 300, end: 400});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 320, end: 400});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 330, end: 450});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 340, end: 450});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 350, end: 500});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_END,
-          {start: 360, end: 500});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Scroll', start: 0, end: 100, eventCount: 2},
-      {title: 'Animation', name: 'Scroll', start: 100, end: 150, eventCount: 4},
-      {title: 'Idle', start: 150, end: 300, eventCount: 0},
-      {title: 'Response', name: 'Scroll', start: 300, end: 400, eventCount: 2},
-      {title: 'Animation', name: 'Scroll', start: 400, end: 500, eventCount: 4}
-    ];
-    verifier.verify();
-  });
-
-  test('cssAnimations', function() {
-    // CSS Animations happen on the renderer process, not the browser process.
-    // They are merged if they overlap.
-    // They are merged with other kinds of animations.
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addEvent(model.rendererMain, {
-        title: 'Animation', start: 0, end: 100});
-      chrome_test_utils.addEvent(model.rendererMain, {
-        title: 'Animation', start: 99, end: 200});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 150, end: 180});
-      chrome_test_utils.addFrameEvent(model, {start: 290, end: 300});
-    };
-    verifier.expectedIRs = [
-      {title: 'Animation', name: 'CSS', start: 0, end: 200, eventCount: 2},
-      {title: 'Animation', name: 'Fling', start: 150, end: 300, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('flingThatIsntstopped', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 32, end: 100});
-      chrome_test_utils.addFlingAnimationEvent(model, {start: 38, end: 200});
-      chrome_test_utils.addFrameEvent(model, {start: 290, end: 300});
-    };
-    verifier.expectedIRs = [
-      {title: 'Animation', name: 'Fling', start: 32, end: 200, eventCount: 2},
-      {title: 'Idle', start: 200, end: 300, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('flingThatIsStopped', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 32, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
-          {start: 105, end: 150});
-    };
-    verifier.expectedIRs = [
-      {title: 'Animation', name: 'Fling', start: 32, end: 105, eventCount: 2},
-      {title: 'Idle', start: 105, end: 150, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('flingFling', function() {
-    // measurmt-traces/mobile/facebook_obama_scroll_dialog_box.html
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 0, end: 30});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 100, end: 130});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
-          {start: 100, end: 130});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 110, end: 140});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 170, end: 180});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 200, end: 210});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 200, end: 220});
-      chrome_test_utils.addFrameEvent(model, {start: 230, end: 240});
-    };
-    verifier.expectedIRs = [
-      {title: 'Animation', name: 'Fling', start: 0, end: 100, eventCount: 2},
-      {title: 'Response', name: 'Touch', start: 100, end: 140, eventCount: 2},
-      {title: 'Animation', name: 'Touch', start: 140, end: 210, eventCount: 2},
-      {title: 'Animation', name: 'Fling', start: 200, end: 240, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('load', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addCommitLoadEvent(model, {
-          start: 0, end: 10, args: {id: 1}});
-      chrome_test_utils.addFinishLoadEvent(
-          model, {start: 11, end: 14, args: {id: 1}});
-      chrome_test_utils.addLoadFinishedEvent(
-          model, {start: 12, end: 13});
-      chrome_test_utils.addFrameEvent(model, {start: 15, end: 20});
-    };
-    verifier.expectedIRs = [
-      {title: 'Load', name: 'Succeeded', start: 0, end: 20, eventCount: 4}
-    ];
-    verifier.verify();
-  });
-
-  test('loadFailed', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addStartProvisionalLoadEvent(
-          model, {start: 0, end: 10});
-      chrome_test_utils.addFailProvisionalLoadEvent(
-          model, {start: 11, end: 20});
-    };
-    verifier.expectedIRs = [
-      {title: 'Load', name: 'Failed', start: 0, end: 20, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('loadStartup', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addRenderingEvent(model, {start: 2, end: 3});
-      chrome_test_utils.addCreateThreadsEvent(model, {start: 5, end: 10});
-      // Throw an second one in there, just to try to confuse the algo.
-      chrome_test_utils.addCreateThreadsEvent(model, {start: 25, end: 30});
-      chrome_test_utils.addFrameEvent(model, {start: 11, end: 20});
-    };
-    verifier.expectedIRs = [
-      {title: 'Load', name: 'Startup', start: 2, end: 20, eventCount: 2},
-      {title: 'Idle', start: 20, end: 30, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('totalIdle', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 10, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('multipleIdles', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 1});
-      chrome_test_utils.addCommitLoadEvent(model, {
-          start: 1, end: 2, args: {id: 1}});
-      chrome_test_utils.addFinishLoadEvent(
-          model, {start: 2.1, end: 2.4, args: {id: 1}});
-      chrome_test_utils.addLoadFinishedEvent(
-          model, {start: 2.2, end: 2.3});
-      chrome_test_utils.addFrameEvent(model, {start: 3, end: 4});
-      chrome_test_utils.addCreateChildFrameEvent(model, {
-          start: 4, end: 5, args: {id: 1, child: 2}});
-      chrome_test_utils.addCommitLoadEvent(model, {
-          start: 5, end: 6, args: {id: 2}});
-      chrome_test_utils.addFrameEvent(model, {start: 7, end: 8});
-      chrome_test_utils.addCreateChildFrameEvent(model, {
-          start: 8, end: 9, args: {id: 1, child: 3}});
-      chrome_test_utils.addCommitLoadEvent(model, {
-          start: 9, end: 10, args: {id: 3}});
-      chrome_test_utils.addFrameEvent(model, {start: 11, end: 12});
-      chrome_test_utils.addFrameEvent(model, {start: 12, end: 13});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 1, eventCount: 0},
-      {title: 'Load', name: 'Succeeded', start: 1, end: 4, eventCount: 4},
-      {title: 'Idle', start: 4, end: 13, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('touchStartTouchEndTap', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 200, end: 210});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Touch', start: 0, end: 210, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('touchMoveResponseAnimation', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 50, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 70, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 200, end: 300});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Touch', start: 0, end: 100, eventCount: 2},
-      {title: 'Animation', name: 'Touch', start: 100, end: 300, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('tapEvents', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
-          {start: 0, end: 50});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 300, end: 310});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
-          {start: 320, end: 350});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Tap', start: 0, end: 50, eventCount: 1},
-      {title: 'Idle', start: 50, end: 300, eventCount: 0},
-      {title: 'Response', name: 'Tap', start: 300, end: 350, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('tapAndTapCancelResponses', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 0, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
-          {start: 300, end: 350});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Tap', start: 0, end: 100, eventCount: 1},
-      {title: 'Idle', start: 100, end: 300, eventCount: 0},
-      {title: 'Response', name: 'Tap', start: 300, end: 350, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('tapCancelResponse', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 0, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
-          {start: 150, end: 200});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Tap', start: 0, end: 200, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('pinchResponseAnimation', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_BEGIN,
-          {start: 100, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 130, end: 160});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 140, end: 200});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 150, end: 205});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 210, end: 220});
-      // pause > 200ms
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 421, end: 470});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_END,
-          {start: 460, end: 500});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 100, eventCount: 0},
-      {title: 'Response', name: 'Pinch', start: 100, end: 160, eventCount: 2},
-      {title: 'Animation', name: 'Pinch', start: 160, end: 220, eventCount: 3},
-      {title: 'Idle', start: 220, end: 421, eventCount: 0},
-      {title: 'Animation', name: 'Pinch', start: 421, end: 500, eventCount: 2}
-    ];
-    verifier.verify();
-  });
-
-  test('tapThenScroll', function() {
-    // measurmt-traces/mobile/google_io_instrument_strumming.json
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 0, end: 20});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 40, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 50, end: 120});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 80, end: 150});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 180, end: 200});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Touch', start: 0, end: 100, eventCount: 2},
-      {title: 'Response', name: 'Touch', start: 50, end: 150, eventCount: 2},
-      {title: 'Animation', name: 'Touch', start: 150, end: 200, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('pinchFlingTapTouchEventsOverlap', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 20, end: 50});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 20, end: 30});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
-          {start: 20, end: 50});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 60, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 60, end: 110});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_BEGIN,
-          {start: 60, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
-          {start: 65, end: 75});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 70, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
-          {start: 70, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 75, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 80, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 85, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 75, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 150, end: 200});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 150, end: 200});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 180, end: 210});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 190, end: 210});
-      chrome_test_utils.addFrameEvent(model, {start: 215, end: 220});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 20, eventCount: 0},
-      {title: 'Response', name: 'Pinch,Scroll,Tap,Touch', start: 20, end: 110,
-          eventCount: 9},
-      {title: 'Animation', name: 'Scroll,Touch', start: 110, end: 210,
-          eventCount: 6},
-      {title: 'Animation', name: 'Fling', start: 180, end: 220, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  test('scrollThenFling', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 0, end: 40});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 50, end: 100});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 80, end: 100});
-      chrome_test_utils.addFrameEvent(model, {start: 190, end: 200});
-    };
-    verifier.expectedIRs = [
-      {title: 'Animation', name: 'Scroll', start: 0, end: 100, eventCount: 2},
-      {title: 'Animation', name: 'Fling', start: 80, end: 200, eventCount: 1}
-    ];
-    verifier.verify();
-  });
-
-  /*
-    This test was generated from
-    /test_data/measurmt-traces/mobile/fling_HN_to_rest.json
-   */
-  test('flingHNToRest', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addEvent(model.browserMain,
-          {title: 'model start', start: 0, end: 1});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
-          {start: 1274, end: 1297});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
-          {start: 1274, end: 1305});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1343, end: 1350});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1359, end: 1366});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
-          {start: 1359, end: 1366});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
-          {start: 1359, end: 1367});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1359, end: 1387});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1375, end: 1385});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1375, end: 1416});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1389, end: 1404});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1389, end: 1429});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1405, end: 1418});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1405, end: 1449});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 1419, end: 1432});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
-          {start: 1419, end: 1474});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
-          {start: 1427, end: 1435});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
-          {start: 1427, end: 1474});
-      chrome_test_utils.addFlingAnimationEvent(model, {start: 1440, end: 2300});
-      chrome_test_utils.addEvent(model.browserMain,
-          {title: 'model end', start: 3184, end: 3185});
-    };
-    verifier.expectedIRs = [
-      {title: 'Idle', start: 0, end: 1274, eventCount: 0},
-      {title: 'Response', name: 'Scroll,Tap,Touch', start: 1274, end: 1387,
-          eventCount: 6},
-      {title: 'Animation', name: 'Scroll,Touch', start: 1387, end: 1474,
-          eventCount: 10},
-      {title: 'Animation', name: 'Fling', start: 1427, end: 2300,
-          eventCount: 2},
-      {title: 'Idle', start: 2300, end: 3185, eventCount: 0}
-    ];
-    verifier.verify();
-  });
-
-  test('TapResponseOverlappingTouchAnimation', function() {
-    var verifier = new IRVerifier();
-    verifier.customizeModelCallback = function(model) {
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 0, end: 10});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 5, end: 15});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
-          {start: 10, end: 20});
-      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
-          {start: 15, end: 100});
-    };
-    verifier.expectedIRs = [
-      {title: 'Response', name: 'Tap,Touch', start: 0, end: 100,
-          eventCount: 4}
-    ];
-    verifier.verify();
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_score.html b/catapult/tracing/tracing/extras/rail/rail_score.html
deleted file mode 100644
index 80ff251..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_score.html
+++ /dev/null
@@ -1,99 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.e.rail', function() {
-  function RAILScore(opt_irs, opt_rangeOfInterest) {
-    this.interactionRecords_ = [];
-    if (opt_irs)
-      this.interactionRecords_.push.apply(this.interactionRecords_, opt_irs);
-    this.rangeOfInterest_ = opt_rangeOfInterest;
-    if (!this.rangeOfInterest_ || this.rangeOfInterest_.isEmpty)
-      this.rangeOfInterest_ = tr.b.Range.fromExplicitRange(
-          -Number.MAX_VALUE, Number.MAX_VALUE);
-  };
-
-  RAILScore.prototype = {
-    get interactionRecords() {
-      return this.interactionRecords_;
-    },
-
-    get overallScore() {
-      // The design of this algorithm is discussed here: https://goo.gl/Cc0H1z
-      // TODO(benjhayden): Make doc public and remove below comment?
-      // Until the doc is made public, this is basically a weighted average,
-      // where the weights are tunable. The weights are recommended to be higher
-      // for lower scores, so that lower scores will bring down the overallScore
-      // more than higher scores bring it up. The optional fields provide an
-      // opportunity to customize the tunable parameters based on IR type,
-      // duration, etc. The continuity and monotonicity of the weighting
-      // function are also important characteristics. The weighting function is
-      // not composed of meaningful sub-expressions; it is a monolithic
-      // combination of the score and tunable parameters, and is open to
-      // reformulation.
-      // The weighting function is graphed here: https://goo.gl/1blsXW
-
-      function getScore(ir) {
-        return ir.railScore;
-      }
-
-      function getWeight(ir) {
-        // If this IR is not in the range of interest, then remove it from the
-        // weightedMean calculation by setting its weight to zero.
-        if (!this.rangeOfInterest_.intersectsExplicitRangeExclusive(
-              ir.start, ir.end))
-          return 0;
-
-        var score = getScore(ir);
-
-        var scale = ir.railScoreScale;
-        if (scale === undefined)
-          scale = 3;
-
-        var power = ir.railScorePower;
-        if (power === undefined)
-          power = 0.3;
-
-        var base = ir.railScoreBase;
-        if (base === undefined)
-          base = Math.exp(1);
-
-        return Math.pow(base, -scale * Math.pow(score, power));
-      }
-
-      return tr.b.Statistics.weightedMean(
-          this.interactionRecords, getWeight, getScore, this);
-    },
-
-    asDict: function() {
-      return {
-        overallScore: this.overallScore
-      };
-    }
-  };
-
-  RAILScore.fromModel = function(model, opt_rangeOfInterest) {
-    var rirs = model.interactionRecords.filter(function(ir) {
-      return ir instanceof tr.e.rail.RAILInteractionRecord;
-    });
-
-    if (rirs.length === 0)
-      return undefined;
-
-    return new RAILScore(rirs, opt_rangeOfInterest);
-  };
-
-  return {
-    RAILScore: RAILScore
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/rail_score_test.html b/catapult/tracing/tracing/extras/rail/rail_score_test.html
deleted file mode 100644
index 0b6b049..0000000
--- a/catapult/tracing/tracing/extras/rail/rail_score_test.html
+++ /dev/null
@@ -1,200 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var RAILScore = tr.e.rail.RAILScore;
-
-  test('empty', function() {
-    var railScore = new RAILScore();
-    assert.isUndefined(railScore.overallScore);
-  });
-
-  test('singleIR', function() {
-    // Solitary scores always reduce to exactly themselves.
-    for (var i = 0; i < 11; ++i) {
-      var railScore = new RAILScore([{
-        railScore: i / 10,
-        start: 0,
-        end: 10
-      }]);
-      assert.closeTo(railScore.interactionRecords[0].railScore,
-                     railScore.overallScore, 0.001);
-
-      railScore = new RAILScore([{
-        railScore: i / 10,
-        railScorePower: 0.1,
-        start: 0,
-        end: 10
-      }]);
-      assert.closeTo(railScore.interactionRecords[0].railScore,
-                     railScore.overallScore, 0.001);
-
-      railScore = new RAILScore([{
-        railScore: i / 10,
-        railScoreScale: -1,
-        start: 0,
-        end: 10
-      }]);
-      assert.closeTo(railScore.interactionRecords[0].railScore,
-                     railScore.overallScore, 0.001);
-
-      railScore = new RAILScore([{
-        railScore: i / 10,
-        railScorePower: 0.1,
-        railScoreScale: -1,
-        start: 0,
-        end: 10
-      }]);
-      assert.closeTo(railScore.interactionRecords[0].railScore,
-                     railScore.overallScore, 0.001);
-    }
-  });
-
-  var binaryScoreSets = [
-    [0, 0, 0, 1],
-    [0, 0, 1],
-    [0, 1],
-    [0, 1, 1],
-    [0, 1, 1, 1]];
-
-  test('binaryScoreSets', function() {
-    function simpleIR(score) {
-      return {railScore: score, start: 0, end: 10};
-    }
-
-    for (var index = 1; index < binaryScoreSets.length; ++index) {
-      var prevScore = new RAILScore(binaryScoreSets[index - 1].map(simpleIR));
-      prevScore = prevScore.overallScore;
-      var currentScore = new RAILScore(binaryScoreSets[index].map(simpleIR));
-      currentScore = currentScore.overallScore;
-
-      assert.isBelow(0, prevScore);
-      assert.isBelow(prevScore, currentScore);
-
-      var altScore = new RAILScore(binaryScoreSets[index].map(function(score) {
-        return {railScore: score, railScoreScale: -1, start: 0, end: 10};
-      }));
-      altScore = altScore.overallScore;
-
-      // When scale < 0, then higher scores are weighted more heavily than lower
-      // scores.
-      assert.isBelow(currentScore, altScore);
-      assert.isBelow(altScore, 1);
-    }
-  });
-
-  test('zeroBase', function() {
-    // When base = 0, then weight could be 0, which could cause NaN.
-    var railScore = new RAILScore([{
-      railScore: 0,
-      railScoreBase: 0,
-      start: 0,
-      end: 10
-    }]);
-    assert.equal(0, railScore.overallScore);
-
-    railScore = new RAILScore([{
-      railScore: 1,
-      railScoreBase: 0,
-      start: 0,
-      end: 10
-    }]);
-    assert.isTrue(isNaN(railScore.overallScore));
-
-    railScore = new RAILScore([{
-      railScore: 1,
-      railScoreScale: -1,
-      railScoreBase: 0,
-      start: 0,
-      end: 10
-    }]);
-    assert.isTrue(isNaN(railScore.overallScore));
-  });
-
-  test('oneBase', function() {
-    // When base = 1, then scale and power don't matter, and the algorithm
-    // simplifies to un-weighted averaging.
-
-    function mean(scores) {
-      var sum = 0;
-      scores.forEach(function(score) {
-        sum += score;
-      });
-      return sum / scores.length;
-    }
-
-    binaryScoreSets.forEach(function(scores) {
-      var railScore = new RAILScore(scores.map(function(score) {
-        return {railScore: score, railScoreBase: 1, start: 0, end: 10};
-      }));
-      assert.closeTo(mean(scores), railScore.overallScore, 0.01);
-
-      railScore = new RAILScore(scores.map(function(score) {
-        return {
-          railScore: score,
-          railScoreBase: 1,
-          railScoreScale: -1,
-          start: 0,
-          end: 10
-        };
-      }));
-      assert.closeTo(mean(scores), railScore.overallScore, 0.01);
-    });
-  });
-
-  test('zeroInteractionRecords', function() {
-    var score = new tr.e.rail.RAILScore();
-    assert.equal(0, score.interactionRecords.length);
-    assert.isUndefined(score.overallScore);
-  });
-
-  test('overallScore', function() {
-    var score = new tr.e.rail.RAILScore([
-      {railScore: 0.9, start: 0, end: 10},
-      {railScore: 1.0, start: 0, end: 10},
-      {railScore: 0.89, start: 0, end: 10},
-      {railScore: 1.0, start: 0, end: 10},
-      {railScore: 0.1, start: 0, end: 10}
-    ]);
-    assert.equal(5, score.interactionRecords.length);
-    assert.closeTo(score.overallScore, 0.51, 0.01);
-  });
-
-  test('containedInRange', function() {
-    var score = new tr.e.rail.RAILScore([
-      {railScore: 0.9, start: 0, end: 10},
-      {railScore: 1.0, start: 10, end: 20},
-      {railScore: 0.89, start: 20, end: 30},
-      {railScore: 1.0, start: 30, end: 40},
-      {railScore: 0.1, start: 40, end: 50}
-    ], tr.b.Range.fromExplicitRange(5, 35));
-    assert.equal(5, score.interactionRecords.length);
-    assert.closeTo(score.overallScore, 0.94, 0.01);
-  });
-
-  test('fromModel', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function(model) {
-      model.addInteractionRecord(new tr.e.rail.RAILInteractionRecord(
-          model, 'Idle', 'rail_idle', 0, 1));
-    });
-    var score = tr.e.rail.RAILScore.fromModel(model);
-    assert.equal(1, score.interactionRecords.length);
-  });
-
-  test('fromEmptyModel', function() {
-    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function(model) {
-    });
-    assert.isUndefined(tr.e.rail.RAILScore.fromModel(model));
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/response_interaction_record.html b/catapult/tracing/tracing/extras/rail/response_interaction_record.html
deleted file mode 100644
index 806d159..0000000
--- a/catapult/tracing/tracing/extras/rail/response_interaction_record.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview The Response phase of RAIL.
- */
-tr.exportTo('tr.e.rail', function() {
-  // This histogram represents the number of people who we believe would have
-  // comfort with a response level of a certain value. We have set this with
-  // just a best-effort guess, though. In #1696, we plan to derive this
-  // experimentally.
-  var COMFORT_HISTOGRAM = tr.b.u.Histogram.fromDict({
-    unit: 'unitless',
-    min: 150,
-    max: 5000,
-    centralBinWidth: 485,
-    underflowBin: {min: -Number.MAX_VALUE, max: 150, count: 1000},
-    centralBins: [
-      {min: 150, max: 635, count: 708},
-      {min: 635, max: 1120, count: 223},
-      {min: 1120, max: 1605, count: 50},
-      {min: 1605, max: 2090, count: 33},
-      {min: 2090, max: 2575, count: 23},
-      {min: 2575, max: 3060, count: 17},
-      {min: 3060, max: 3545, count: 12},
-      {min: 3545, max: 4030, count: 8},
-      {min: 4030, max: 4515, count: 4},
-      {min: 4515, max: 5000, count: 1}
-    ],
-    overflowBin: {min: 5000, max: Number.MAX_VALUE, count: 0}
-  });
-
-  function ResponseInteractionRecord(parentModel, start, duration) {
-    tr.e.rail.RAILInteractionRecord.call(
-        this, parentModel, 'Response', 'rail_response', start, duration);
-  }
-
-  ResponseInteractionRecord.prototype = {
-    __proto__: tr.e.rail.RAILInteractionRecord.prototype,
-
-    get normalizedUserComfort() {
-      // User comfort is derived from the time between when the user thinks they
-      // begin an interation (expectedStart) and the time when the screen first
-      // changes to reflect the interaction (actualEnd).  There may be a delay
-      // between expectedStart and when chrome first starts processing the
-      // interaction (actualStart) if the main thread is busy.  The user doesn't
-      // know when actualStart is, they only know when expectedStart is. User
-      // comfort, by definition, considers only what the user experiences, so
-      // "duration" is defined as actualEnd - expectedStart.
-
-      return COMFORT_HISTOGRAM.getInterpolatedCountAt(this.duration) /
-        COMFORT_HISTOGRAM.maxCount;
-    }
-  };
-
-  return {
-    ResponseInteractionRecord: ResponseInteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/response_interaction_record_test.html b/catapult/tracing/tracing/extras/rail/response_interaction_record_test.html
deleted file mode 100644
index 844e7ed..0000000
--- a/catapult/tracing/tracing/extras/rail/response_interaction_record_test.html
+++ /dev/null
@@ -1,43 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/rail/response_interaction_record.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-
-  test('instantiate', function() {
-    var responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 150);
-    assert.equal(1, responseIR.normalizedUserComfort);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 150.0001);
-    assert.closeTo(1, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 299.9999);
-    assert.closeTo(0.81938, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 300.0001);
-    assert.closeTo(0.81938, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 999.9999);
-    assert.closeTo(0.1793, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 1000.0001);
-    assert.closeTo(0.1793, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 4999.999);
-    assert.closeTo(0, responseIR.normalizedUserComfort, 1e-5);
-    responseIR = new tr.e.rail.ResponseInteractionRecord(
-        undefined, 0, 5000);
-    assert.equal(0, responseIR.normalizedUserComfort);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/rail/stub_rail_interaction_record.html b/catapult/tracing/tracing/extras/rail/stub_rail_interaction_record.html
deleted file mode 100644
index 7cadee3..0000000
--- a/catapult/tracing/tracing/extras/rail/stub_rail_interaction_record.html
+++ /dev/null
@@ -1,47 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/rail_interaction_record.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Stub version of RAILInteractionRecord for testing.
- */
-tr.exportTo('tr.e.rail', function() {
-  function StubRAILInteractionRecord(args) {
-    var sd = tr.c.TestUtils.getStartAndDurationFromDict(args);
-    tr.e.rail.RAILInteractionRecord.call(
-         this, args.parentModel, args.railTypeName, args.railTypeName,
-         sd.start, sd.duration);
-    this.args = args;
-    if (args.associatedEvents) {
-      args.associatedEvents.forEach(function(event) {
-        this.associatedEvents.push(event);
-      }, this);
-    }
-  }
-
-  StubRAILInteractionRecord.prototype = {
-    __proto__: tr.e.rail.RAILInteractionRecord.prototype,
-
-    get normalizedUserComfort() {
-      return this.args.normalizedUserComfort;
-    },
-
-    get normalizedEfficiency() {
-      return this.args.normalizedEfficiency;
-    }
-  };
-
-  return {
-    StubRAILInteractionRecord: StubRAILInteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/systrace_config.html b/catapult/tracing/tracing/extras/systrace_config.html
index 1b250d2..aac08d5 100644
--- a/catapult/tracing/tracing/extras/systrace_config.html
+++ b/catapult/tracing/tracing/extras/systrace_config.html
@@ -5,14 +5,11 @@
 found in the LICENSE file.
 -->
 
-<!-- Core configs. -->
-<link rel="import" href="/tracing/model/model.html">
-
-<!-- Features used by Android systrace. -->
-<link rel="import" href="/tracing/importer/import.html">
+<link rel="import" href="/tracing/extras/android/android_auditor.html">
 <link rel="import" href="/tracing/extras/importer/android/event_log_importer.html">
 <link rel="import" href="/tracing/extras/importer/battor_importer.html">
 <link rel="import" href="/tracing/extras/importer/ddms_importer.html">
 <link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
-<link rel="import" href="/tracing/extras/android/android_auditor.html">
 <link rel="import" href="/tracing/extras/vsync/vsync_auditor.html">
+<link rel="import" href="/tracing/importer/import.html">
+<link rel="import" href="/tracing/model/model.html">
diff --git a/catapult/tracing/tracing/extras/tcmalloc/heap.html b/catapult/tracing/tracing/extras/tcmalloc/heap.html
deleted file mode 100644
index dd79873..0000000
--- a/catapult/tracing/tracing/extras/tcmalloc/heap.html
+++ /dev/null
@@ -1,98 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/model/object_instance.html">
-<link rel="import" href="/tracing/extras/chrome/cc/util.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.e.tcmalloc', function() {
-  var ObjectSnapshot = tr.model.ObjectSnapshot;
-
-  /**
-   * @constructor
-   */
-  function HeapSnapshot() {
-    ObjectSnapshot.apply(this, arguments);
-  }
-
-  HeapSnapshot.prototype = {
-    __proto__: ObjectSnapshot.prototype,
-
-    preInitialize: function() {
-      tr.e.cc.preInitializeObject(this);
-    },
-
-    // TODO(jamescook): This seems to be called before the green dot is clicked.
-    // Consider doing it in heap_view.js.
-    initialize: function() {
-      if (this.args.length == 0)
-        throw new Error('No heap snapshot data.');
-
-      // The first entry is total allocations across all stack traces.
-      this.total_ = this.args[0];
-      // The rest is a list of allocations.
-      var allocs = this.args.slice(1);
-
-      // Build a nested dictionary of trace event names.
-      this.heap_ = {
-        children: {},
-        currentBytes: 0,
-        currentAllocs: 0,
-        totalBytes: 0,
-        totalAllocs: 0
-      };
-      for (var i = 0; i < allocs.length; i++) {
-        var alloc = allocs[i];
-        var traceNames = alloc.trace.split(' ');
-        // We don't want to record allocations caused by the heap profiling
-        // system itself, so skip allocations with this special name.
-        if (traceNames.indexOf('trace-memory-ignore') != -1)
-          continue;
-        var heapEntry = this.heap_;
-        // Walk down into the heap of stack traces.
-        for (var j = 0; j < traceNames.length; j++) {
-          // Look for existing children with this trace.
-          var traceName = traceNames[j];
-          // The empty trace name means "(here)", so don't roll those up into
-          // parent traces because they have already been counted.
-          if (traceName.length != 0) {
-            // Add up the total memory for intermediate entries, so the top of
-            // each subtree is the total memory for that tree.
-            heapEntry.currentBytes += alloc.currentBytes;
-            heapEntry.currentAllocs += alloc.currentAllocs;
-            heapEntry.totalBytes += alloc.totalBytes;
-            heapEntry.totalAllocs += alloc.totalAllocs;
-          }
-          if (!heapEntry.children[traceName]) {
-            // New trace entry at this depth, so create a child for it.
-            heapEntry.children[traceName] = {
-              children: {},
-              currentBytes: alloc.currentBytes,
-              currentAllocs: alloc.currentAllocs,
-              totalBytes: alloc.totalBytes,
-              totalAllocs: alloc.totalAllocs
-            };
-          }
-          // Descend into the children.
-          heapEntry = heapEntry.children[traceName];
-        }
-      }
-    }
-
-  };
-
-  ObjectSnapshot.register(
-    HeapSnapshot,
-    {typeName: 'memory::Heap'});
-
-  return {
-    HeapSnapshot: HeapSnapshot
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/tcmalloc/heap_test.html b/catapult/tracing/tracing/extras/tcmalloc/heap_test.html
deleted file mode 100644
index 0b8e3be..0000000
--- a/catapult/tracing/tracing/extras/tcmalloc/heap_test.html
+++ /dev/null
@@ -1,112 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/tcmalloc/heap.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var HeapSnapshot = tr.e.tcmalloc.HeapSnapshot;
-
-  // Tests total allocation count.
-  test('totals', function() {
-    var snapshot = new HeapSnapshot({}, 1, [
-      {
-        'current_allocs': 10,
-        'total_allocs': 100,
-        'current_bytes': 10000,
-        'trace': '',
-        'total_bytes': 100000
-      },
-      {
-        'current_allocs': 2,
-        'total_allocs': 22,
-        'current_bytes': 200,
-        'trace': 'TestObject::TestMethod ',
-        'total_bytes': 2200
-      }
-    ]);
-    snapshot.preInitialize();
-    snapshot.initialize();
-
-    // Base class got the timestamp.
-    assert.equal(snapshot.ts, 1);
-
-    // The first entry in the list is for totals.
-    assert.equal(snapshot.total_.currentAllocs, 10);
-    assert.equal(snapshot.total_.currentBytes, 10000);
-  });
-
-  // Tests multi-level trace stacks.
-  test('multiLevel', function() {
-    var snapshot = new HeapSnapshot({}, 1, [
-      {
-        'current_allocs': 10,
-        'total_allocs': 100,
-        'current_bytes': 10000,
-        'trace': '',
-        'total_bytes': 100000
-      },
-      {
-        'current_allocs': 2,
-        'total_allocs': 22,
-        'current_bytes': 200,
-        'trace': 'TestObject::TestMethod ',
-        'total_bytes': 2200
-      },
-      {
-        'current_allocs': 3,
-        'total_allocs': 33,
-        'current_bytes': 300,
-        'trace': 'TestObject2::TestMethod2  ',
-        'total_bytes': 3300
-      },
-      {
-        'current_allocs': 5,
-        'total_allocs': 55,
-        'current_bytes': 500,
-        'trace': 'TestObject2::TestMethod2 TestObject3::TestMethod3 ',
-        'total_bytes': 5500
-      }
-    ]);
-    snapshot.preInitialize();
-    snapshot.initialize();
-
-    // Our heap has two top-level stacks.
-    var heap = snapshot.heap_;
-    var childKeys = Object.keys(heap.children);
-    assert.equal(childKeys.length, 2);
-    // Both methods exist as children.
-    assert.notEqual(childKeys.indexOf('TestObject::TestMethod'), -1);
-    assert.notEqual(childKeys.indexOf('TestObject2::TestMethod2'), -1);
-
-    // Verify the first trace entry stack.
-    var trace = heap.children['TestObject::TestMethod'];
-    assert.equal(trace.currentAllocs, 2);
-    assert.equal(trace.currentBytes, 200);
-    // One child for "(here)".
-    assert.equal(Object.keys(trace.children).length, 1);
-    assert.isDefined(trace.children['']);
-
-    // Verify the second trace entry stack.
-    trace = heap.children['TestObject2::TestMethod2'];
-    // Memory should have summed up.
-    assert.equal(trace.currentAllocs, 8);
-    assert.equal(trace.currentBytes, 800);
-    // Two children, "(here)" and another stack.
-    assert.equal(Object.keys(trace.children).length, 2);
-    assert.isDefined(trace.children['TestObject3::TestMethod3']);
-    assert.isDefined(trace.children['']);
-
-    trace = trace.children['TestObject3::TestMethod3'];
-    assert.equal(trace.currentAllocs, 5);
-    assert.equal(trace.currentBytes, 500);
-    assert.equal(Object.keys(trace.children).length, 1);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/extras/tquery/filter_any_of.html b/catapult/tracing/tracing/extras/tquery/filter_any_of.html
index c85ff1a..4118d40 100644
--- a/catapult/tracing/tracing/extras/tquery/filter_any_of.html
+++ b/catapult/tracing/tracing/extras/tquery/filter_any_of.html
@@ -7,6 +7,7 @@
 
 <link rel="import" href="/tracing/core/scripting_controller.html">
 <link rel="import" href="/tracing/extras/tquery/filter.html">
+<link rel="import" href="/tracing/extras/tquery/filter_not.html">
 
 <script>
 'use strict';
@@ -44,16 +45,22 @@
   };
   tr.c.ScriptingObjectRegistry.register(
       function() {
-        var exprs = [];
-        for (var i = 0; i < arguments.length; i++) {
-          exprs.push(arguments[i]);
-        }
+        var exprs = Array.prototype.slice.call(arguments);
         return new FilterAnyOf(exprs);
       },
       {
         name: 'anyOf'
       }
   );
+  tr.c.ScriptingObjectRegistry.register(
+      function() {
+        var exprs = Array.prototype.slice.call(arguments);
+        return new tr.e.tquery.FilterNot(new FilterAnyOf(exprs));
+      },
+      {
+        name: 'noneOf'
+      }
+  );
   return {
     FilterAnyOf: FilterAnyOf
   };
diff --git a/catapult/tracing/tracing/extras/tquery/filter_not.html b/catapult/tracing/tracing/extras/tquery/filter_not.html
new file mode 100644
index 0000000..64a4be9
--- /dev/null
+++ b/catapult/tracing/tracing/extras/tquery/filter_not.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/scripting_controller.html">
+<link rel="import" href="/tracing/extras/tquery/filter.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.e.tquery', function() {
+  function FilterNot(subExpression) {
+    tr.e.tquery.Filter.call(this);
+    this.subExpression = subExpression;
+  }
+
+  FilterNot.prototype = {
+    __proto__: tr.e.tquery.Filter.prototype,
+
+    set subExpression(expr) {
+      this.subExpression_ = tr.e.tquery.Filter.normalizeFilterExpression(expr);
+    },
+
+    get subExpression() {
+      return this.subExpression_;
+    },
+
+    evaluate: function(context) {
+      return !this.subExpression.evaluate(context);
+    }
+  };
+  tr.c.ScriptingObjectRegistry.register(
+      function() {
+        var exprs = Array.prototype.slice.call(arguments);
+        if (exprs.length !== 1)
+          throw new Error('not() must have exactly one subexpression');
+        return new FilterNot(exprs[0]);
+      },
+      {
+        name: 'not'
+      }
+  );
+  return {
+    FilterNot: FilterNot
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/extras/tquery/tquery.html b/catapult/tracing/tracing/extras/tquery/tquery.html
index cbef132..8953d7e 100644
--- a/catapult/tracing/tracing/extras/tquery/tquery.html
+++ b/catapult/tracing/tracing/extras/tquery/tquery.html
@@ -15,6 +15,7 @@
 <link rel="import" href="/tracing/extras/tquery/filter_has_duration.html">
 <link rel="import" href="/tracing/extras/tquery/filter_has_title.html">
 <link rel="import" href="/tracing/extras/tquery/filter_is_top_level.html">
+<link rel="import" href="/tracing/extras/tquery/filter_not.html">
 <link rel="import" href="/tracing/model/event_set.html">
 
 <script>
diff --git a/catapult/tracing/tracing/extras/tquery/tquery_test.html b/catapult/tracing/tracing/extras/tquery/tquery_test.html
index f822a6c..9f56ada 100644
--- a/catapult/tracing/tracing/extras/tquery/tquery_test.html
+++ b/catapult/tracing/tracing/extras/tquery/tquery_test.html
@@ -17,7 +17,7 @@
   function createTestModel(sliceCount) {
     var slices = [];
     for (var i = 0; i < sliceCount; i++)
-      slices.push(tr.c.TestUtils.newSlice(1, 2));
+      slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 2}));
     return createTestModelWithSlices(slices);
   }
 
@@ -70,9 +70,9 @@
   test('tqueryFilterHasTitle', function() {
     var hasTitle = getScriptObject('hasTitle');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a'},
-        {guid: 2, title: 'b'},
-        {guid: 3, title: 'c'}
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b'},
+      {guid: 3, title: 'c'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -92,9 +92,9 @@
   test('tqueryFilterHasAncestor', function() {
     var hasAncestor = getScriptObject('hasAncestor');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a'},
-        {guid: 2, title: 'b', subSlices: [{guid: 4}]},
-        {guid: 3, title: 'c'}
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b', subSlices: [{guid: 4}]},
+      {guid: 3, title: 'c'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -113,9 +113,9 @@
   test('tqueryFilterAllOf', function() {
     var allOf = getScriptObject('allOf');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a1'},
-        {guid: 2, title: 'b1'},
-        {guid: 3, title: 'c1'}
+      {guid: 1, title: 'a1'},
+      {guid: 2, title: 'b1'},
+      {guid: 3, title: 'c1'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -134,9 +134,9 @@
   test('tqueryFilterAnyOf', function() {
     var anyOf = getScriptObject('anyOf');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a'},
-        {guid: 2, title: 'b'},
-        {guid: 3, title: 'c'}
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b'},
+      {guid: 3, title: 'c'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -156,9 +156,9 @@
   test('tqueryFilterIsTopLevel', function() {
     var isTopLevel = getScriptObject('isTopLevel');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a'},
-        {guid: 2, title: 'b', subSlices: [{guid: 4}]},
-        {guid: 3, title: 'c'}
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b', subSlices: [{guid: 4}]},
+      {guid: 3, title: 'c'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -176,10 +176,10 @@
   test('tqueryFilterHasDuration', function() {
     var hasDuration = getScriptObject('hasDuration');
     var model = createTestModelWithSlices([
-        {guid: 1, title: 'a', duration: 1},
-        {guid: 2, title: 'b', duration: 2},
-        {guid: 3, title: 'c', duration: 3},
-        {guid: 4, title: 'no duration'}
+      {guid: 1, title: 'a', duration: 1},
+      {guid: 2, title: 'b', duration: 2},
+      {guid: 3, title: 'c', duration: 3},
+      {guid: 4, title: 'no duration'}
     ]);
     var tquery = new tr.e.tquery.TQuery(model);
 
@@ -200,5 +200,40 @@
     assert.equal(result.length, 1);
     assert.equal(result[0].guid, 3);
   });
+
+  test('tqueryFilterNot', function() {
+    var not = getScriptObject('not');
+    var model = createTestModelWithSlices([
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b'},
+      {guid: 3, title: 'c'}
+    ]);
+    var tquery = new tr.e.tquery.TQuery(model);
+
+    var result = tquery.filter(not(/a/)).selection;
+    assert.equal(result.length, 2);
+    assert.equal(result[0].guid, 2);
+    assert.equal(result[1].guid, 3);
+
+    // Test a not() without any subexpressions.
+    assert.throws(function() { not(); });
+
+    // Test a not() with too many subexpressions.
+    assert.throws(function() { not(/a/, /b/); });
+  });
+
+  test('tqueryFilterNoneOf', function() {
+    var noneOf = getScriptObject('noneOf');
+    var model = createTestModelWithSlices([
+      {guid: 1, title: 'a'},
+      {guid: 2, title: 'b'},
+      {guid: 3, title: 'c'}
+    ]);
+    var tquery = new tr.e.tquery.TQuery(model);
+
+    var result = tquery.filter(noneOf(/a/, /b/)).selection;
+    assert.equal(result.length, 1);
+    assert.equal(result[0].guid, 3);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/importer/empty_importer.html b/catapult/tracing/tracing/importer/empty_importer.html
index be51cbc..dc5f867 100644
--- a/catapult/tracing/tracing/importer/empty_importer.html
+++ b/catapult/tracing/tracing/importer/empty_importer.html
@@ -32,7 +32,11 @@
   };
 
   EmptyImporter.prototype = {
-    __proto__: tr.importer.Importer.prototype
+    __proto__: tr.importer.Importer.prototype,
+
+    get importerName() {
+      return 'EmptyImporter';
+    }
   };
 
   tr.importer.Importer.register(EmptyImporter);
diff --git a/catapult/tracing/tracing/importer/find_input_expectations.html b/catapult/tracing/tracing/importer/find_input_expectations.html
new file mode 100644
index 0000000..b018b00
--- /dev/null
+++ b/catapult/tracing/tracing/importer/find_input_expectations.html
@@ -0,0 +1,997 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
+<link rel="import" href="/tracing/importer/proto_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.importer', function() {
+  var ProtoExpectation = tr.importer.ProtoExpectation;
+
+  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
+
+  var KEYBOARD_TYPE_NAMES = [
+    INPUT_TYPE.CHAR,
+    INPUT_TYPE.KEY_DOWN_RAW,
+    INPUT_TYPE.KEY_DOWN,
+    INPUT_TYPE.KEY_UP
+  ];
+  var MOUSE_RESPONSE_TYPE_NAMES = [
+    INPUT_TYPE.CLICK,
+    INPUT_TYPE.CONTEXT_MENU
+  ];
+  var MOUSE_WHEEL_TYPE_NAMES = [
+    INPUT_TYPE.MOUSE_WHEEL
+  ];
+  var MOUSE_DRAG_TYPE_NAMES = [
+    INPUT_TYPE.MOUSE_DOWN,
+    INPUT_TYPE.MOUSE_MOVE,
+    INPUT_TYPE.MOUSE_UP
+  ];
+  var TAP_TYPE_NAMES = [
+    INPUT_TYPE.TAP,
+    INPUT_TYPE.TAP_CANCEL,
+    INPUT_TYPE.TAP_DOWN
+  ];
+  var PINCH_TYPE_NAMES = [
+    INPUT_TYPE.PINCH_BEGIN,
+    INPUT_TYPE.PINCH_END,
+    INPUT_TYPE.PINCH_UPDATE
+  ];
+  var FLING_TYPE_NAMES = [
+    INPUT_TYPE.FLING_CANCEL,
+    INPUT_TYPE.FLING_START
+  ];
+  var TOUCH_TYPE_NAMES = [
+    INPUT_TYPE.TOUCH_END,
+    INPUT_TYPE.TOUCH_MOVE,
+    INPUT_TYPE.TOUCH_START
+  ];
+  var SCROLL_TYPE_NAMES = [
+    INPUT_TYPE.SCROLL_BEGIN,
+    INPUT_TYPE.SCROLL_END,
+    INPUT_TYPE.SCROLL_UPDATE
+  ];
+  var ALL_HANDLED_TYPE_NAMES = [].concat(
+    KEYBOARD_TYPE_NAMES,
+    MOUSE_RESPONSE_TYPE_NAMES,
+    MOUSE_WHEEL_TYPE_NAMES,
+    MOUSE_DRAG_TYPE_NAMES,
+    PINCH_TYPE_NAMES,
+    TAP_TYPE_NAMES,
+    FLING_TYPE_NAMES,
+    TOUCH_TYPE_NAMES,
+    SCROLL_TYPE_NAMES
+  );
+
+  var RENDERER_FLING_TITLE = 'InputHandlerProxy::HandleGestureFling::started';
+
+  // TODO(benjhayden) share with rail_ir_finder
+  var CSS_ANIMATION_TITLE = 'Animation';
+
+  // If there's less than this much time between the end of one event and the
+  // start of the next, then they might be merged.
+  // There was not enough thought given to this value, so if you have any slight
+  // reason to change it, then please do so. It might also be good to split this
+  // into multiple values.
+  var INPUT_MERGE_THRESHOLD_MS = 200;
+  var ANIMATION_MERGE_THRESHOLD_MS = 1;
+
+  // If two MouseWheel events begin this close together, then they're an
+  // Animation, not two responses.
+  var MOUSE_WHEEL_THRESHOLD_MS = 40;
+
+  // If two MouseMoves are more than this far apart, then they're two Responses,
+  // not Animation.
+  var MOUSE_MOVE_THRESHOLD_MS = 40;
+
+  // Strings used to name IRs.
+  var KEYBOARD_IR_NAME = 'Keyboard';
+  var MOUSE_IR_NAME = 'Mouse';
+  var MOUSEWHEEL_IR_NAME = 'MouseWheel';
+  var TAP_IR_NAME = 'Tap';
+  var PINCH_IR_NAME = 'Pinch';
+  var FLING_IR_NAME = 'Fling';
+  var TOUCH_IR_NAME = 'Touch';
+  var SCROLL_IR_NAME = 'Scroll';
+  var CSS_IR_NAME = 'CSS';
+
+  // TODO(benjhayden) Find a better home for this.
+  function compareEvents(x, y) {
+    if (x.start !== y.start)
+      return x.start - y.start;
+    if (x.end !== y.end)
+      return x.end - y.end;
+    if (x.guid && y.guid)
+      return x.guid - y.guid;
+    return 0;
+  }
+
+  function forEventTypesIn(events, typeNames, cb, opt_this) {
+    events.forEach(function(event) {
+      if (typeNames.indexOf(event.typeName) >= 0) {
+        cb.call(opt_this, event);
+      }
+    });
+  }
+
+  function causedFrame(event) {
+    for (var i = 0; i < event.associatedEvents.length; ++i) {
+      if (event.associatedEvents[i].title ===
+          tr.model.helpers.IMPL_RENDERING_STATS)
+        return true;
+    }
+    return false;
+  }
+
+  function getSortedInputEvents(modelHelper) {
+    var inputEvents = [];
+
+    var browserProcess = modelHelper.browserHelper.process;
+    var mainThread = browserProcess.findAtMostOneThreadNamed(
+        'CrBrowserMain');
+    mainThread.asyncSliceGroup.iterateAllEvents(function(slice) {
+      if (!slice.isTopLevel)
+        return;
+
+      if (!(slice instanceof tr.e.cc.InputLatencyAsyncSlice))
+        return;
+
+      // TODO(beaudoin): This should never happen but it does. Investigate
+      // the trace linked at in #1567 and remove that when it's fixed.
+      if (isNaN(slice.start) ||
+          isNaN(slice.duration) ||
+          isNaN(slice.end))
+        return;
+
+      inputEvents.push(slice);
+    });
+
+    return inputEvents.sort(compareEvents);
+  }
+
+  function findProtoExpectations(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    // This order is not important. Handlers are independent.
+    var handlers = [
+      handleKeyboardEvents,
+      handleMouseResponseEvents,
+      handleMouseWheelEvents,
+      handleMouseDragEvents,
+      handleTapResponseEvents,
+      handlePinchEvents,
+      handleFlingEvents,
+      handleTouchEvents,
+      handleScrollEvents,
+      handleCSSAnimations
+    ];
+    handlers.forEach(function(handler) {
+      protoExpectations.push.apply(protoExpectations, handler(
+          modelHelper, sortedInputEvents));
+    });
+    protoExpectations.sort(compareEvents);
+    return protoExpectations;
+  }
+
+  // Every keyboard event is a Response.
+  function handleKeyboardEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    forEventTypesIn(sortedInputEvents, KEYBOARD_TYPE_NAMES, function(event) {
+      var pe = new ProtoExpectation(
+          ProtoExpectation.RESPONSE_TYPE, KEYBOARD_IR_NAME);
+      pe.pushEvent(event);
+      protoExpectations.push(pe);
+    });
+    return protoExpectations;
+  }
+
+  // Some mouse events can be translated directly into Responses.
+  function handleMouseResponseEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    forEventTypesIn(
+        sortedInputEvents, MOUSE_RESPONSE_TYPE_NAMES, function(event) {
+      var pe = new ProtoExpectation(
+          ProtoExpectation.RESPONSE_TYPE, MOUSE_IR_NAME);
+      pe.pushEvent(event);
+      protoExpectations.push(pe);
+    });
+    return protoExpectations;
+  }
+
+  // MouseWheel events are caused either by a physical wheel on a physical
+  // mouse, or by a touch-drag gesture on a track-pad. The physical wheel
+  // causes MouseWheel events that are much more spaced out, and have no
+  // chance of hitting 60fps, so they are each turned into separate Response
+  // IRs. The track-pad causes MouseWheel events that are much closer
+  // together, and are expected to be 60fps, so the first event in a sequence
+  // is turned into a Response, and the rest are merged into an Animation.
+  // NB this threshold uses the two events' start times, unlike
+  // ProtoExpectation.isNear, which compares the end time of the previous event
+  // with the start time of the next.
+  function handleMouseWheelEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    var prevEvent_ = undefined;
+    forEventTypesIn(
+        sortedInputEvents, MOUSE_WHEEL_TYPE_NAMES, function(event) {
+      // Switch prevEvent in one place so that we can early-return later.
+      var prevEvent = prevEvent_;
+      prevEvent_ = event;
+
+      if (currentPE &&
+          (prevEvent.start + MOUSE_WHEEL_THRESHOLD_MS) >= event.start) {
+        if (currentPE.irType === ProtoExpectation.ANIMATION_TYPE) {
+          currentPE.pushEvent(event);
+        } else {
+          currentPE = new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,
+              MOUSEWHEEL_IR_NAME);
+          currentPE.pushEvent(event);
+          protoExpectations.push(currentPE);
+        }
+        return;
+      }
+      currentPE = new ProtoExpectation(
+          ProtoExpectation.RESPONSE_TYPE, MOUSEWHEEL_IR_NAME);
+      currentPE.pushEvent(event);
+      protoExpectations.push(currentPE);
+    });
+    return protoExpectations;
+  }
+
+  // Down events followed closely by Up events are click Responses, but the
+  // Response doesn't start until the Up event.
+  //
+  //     RRR
+  // DDD UUU
+  //
+  // If there are any Move events in between a Down and an Up, then the Down
+  // and the first Move are a Response, then the rest of the Moves are an
+  // Animation:
+  //
+  // RRRRRRRAAAAAAAAAAAAAAAAAAAA
+  // DDD MMM MMM MMM MMM MMM UUU
+  //
+  function handleMouseDragEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    var mouseDownEvent = undefined;
+    forEventTypesIn(
+        sortedInputEvents, MOUSE_DRAG_TYPE_NAMES, function(event) {
+      switch (event.typeName) {
+        case INPUT_TYPE.MOUSE_DOWN:
+          if (causedFrame(event)) {
+            var pe = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, MOUSE_IR_NAME);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+          } else {
+            // Responses typically don't start until the mouse up event.
+            // Add this MouseDown to the Response that starts at the MouseUp.
+            mouseDownEvent = event;
+          }
+          break;
+          // There may be more than 100ms between the start of the mouse down
+          // and the start of the mouse up. Chrome and the web don't start to
+          // respond until the mouse up. ResponseIRs start deducting comfort
+          // at 100ms duration. If more than that 100ms duration is burned
+          // through while waiting for the user to release the
+          // mouse button, then ResponseIR will unfairly start deducting
+          // comfort before Chrome even has a mouse up to respond to.
+          // It is technically possible for a site to afford one response on
+          // mouse down and another on mouse up, but that is an edge case. The
+          // vast majority of mouse downs are not responses.
+
+        case INPUT_TYPE.MOUSE_MOVE:
+          if (!causedFrame(event)) {
+            // Ignore MouseMoves that do not affect the screen. They are not
+            // part of an interaction record by definition.
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+          } else if (!currentPE ||
+                      !currentPE.isNear(event, MOUSE_MOVE_THRESHOLD_MS)) {
+            // The first MouseMove after a MouseDown or after a while is a
+            // Response.
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, MOUSE_IR_NAME);
+            currentPE.pushEvent(event);
+            if (mouseDownEvent) {
+              currentPE.associatedEvents.push(mouseDownEvent);
+              mouseDownEvent = undefined;
+            }
+            protoExpectations.push(currentPE);
+          } else {
+            // Merge this event into an Animation.
+            if (currentPE.irType === ProtoExpectation.ANIMATION_TYPE) {
+              currentPE.pushEvent(event);
+            } else {
+              currentPE = new ProtoExpectation(
+                  ProtoExpectation.ANIMATION_TYPE, MOUSE_IR_NAME);
+              currentPE.pushEvent(event);
+              protoExpectations.push(currentPE);
+            }
+          }
+          break;
+
+        case INPUT_TYPE.MOUSE_UP:
+          if (!mouseDownEvent) {
+            var pe = new ProtoExpectation(
+                causedFrame(event) ? ProtoExpectation.RESPONSE_TYPE :
+                ProtoExpectation.IGNORED_TYPE,
+                MOUSE_IR_NAME);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+            break;
+          }
+
+          if (currentPE) {
+            currentPE.pushEvent(event);
+          } else {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, MOUSE_IR_NAME);
+            if (mouseDownEvent)
+              currentPE.associatedEvents.push(mouseDownEvent);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+          }
+          mouseDownEvent = undefined;
+          currentPE = undefined;
+          break;
+      }
+    });
+    if (mouseDownEvent) {
+      currentPE = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+      currentPE.pushEvent(mouseDownEvent);
+      protoExpectations.push(currentPE);
+    }
+    return protoExpectations;
+  }
+
+  // Solitary Tap events are simple Responses:
+  //
+  // RRR
+  // TTT
+  //
+  // TapDowns are part of Responses.
+  //
+  // RRRRRRR
+  // DDD TTT
+  //
+  // TapCancels are part of Responses, which seems strange. They always go
+  // with scrolls, so they'll probably be merged with scroll Responses.
+  // TapCancels can take a significant amount of time and account for a
+  // significant amount of work, which should be grouped with the scroll IRs
+  // if possible.
+  //
+  // RRRRRRR
+  // DDD CCC
+  //
+  function handleTapResponseEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    forEventTypesIn(sortedInputEvents, TAP_TYPE_NAMES, function(event) {
+      switch (event.typeName) {
+        case INPUT_TYPE.TAP_DOWN:
+          currentPE = new ProtoExpectation(
+              ProtoExpectation.RESPONSE_TYPE, TAP_IR_NAME);
+          currentPE.pushEvent(event);
+          protoExpectations.push(currentPE);
+          break;
+
+        case INPUT_TYPE.TAP:
+          if (currentPE) {
+            currentPE.pushEvent(event);
+          } else {
+            // Sometimes we get Tap events with no TapDown, sometimes we get
+            // TapDown events. Handle both.
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, TAP_IR_NAME);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+          }
+          currentPE = undefined;
+          break;
+
+        case INPUT_TYPE.TAP_CANCEL:
+          if (!currentPE) {
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+            break;
+          }
+
+          if (currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
+            currentPE.pushEvent(event);
+          } else {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, TAP_IR_NAME);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+          }
+          currentPE = undefined;
+          break;
+      }
+    });
+    return protoExpectations;
+  }
+
+  // The PinchBegin and the first PinchUpdate comprise a Response, then the
+  // rest of the PinchUpdates comprise an Animation.
+  //
+  // RRRRRRRAAAAAAAAAAAAAAAAAAAA
+  // BBB UUU UUU UUU UUU UUU EEE
+  //
+  function handlePinchEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    var sawFirstUpdate = false;
+    var modelBounds = modelHelper.model.bounds;
+    forEventTypesIn(sortedInputEvents, PINCH_TYPE_NAMES, function(event) {
+      switch (event.typeName) {
+        case INPUT_TYPE.PINCH_BEGIN:
+          if (currentPE &&
+              currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
+            currentPE.pushEvent(event);
+            break;
+          }
+          currentPE = new ProtoExpectation(
+              ProtoExpectation.RESPONSE_TYPE, PINCH_IR_NAME);
+          currentPE.pushEvent(event);
+          currentPE.isAnimationBegin = true;
+          protoExpectations.push(currentPE);
+          sawFirstUpdate = false;
+          break;
+
+        case INPUT_TYPE.PINCH_UPDATE:
+          // Like ScrollUpdates, the Begin and the first Update constitute a
+          // Response, then the rest of the Updates constitute an Animation
+          // that begins when the Response ends. If the user pauses in the
+          // middle of an extended pinch gesture, then multiple Animations
+          // will be created.
+          if (!currentPE ||
+              ((currentPE.irType === ProtoExpectation.RESPONSE_TYPE) &&
+                sawFirstUpdate) ||
+              !currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.ANIMATION_TYPE, PINCH_IR_NAME);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+          } else {
+            currentPE.pushEvent(event);
+            sawFirstUpdate = true;
+          }
+          break;
+
+        case INPUT_TYPE.PINCH_END:
+          if (currentPE) {
+            currentPE.pushEvent(event);
+          } else {
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+          }
+          currentPE = undefined;
+          break;
+      }
+    });
+    return protoExpectations;
+  }
+
+  // Flings are defined by 3 types of events: FlingStart, FlingCancel, and the
+  // renderer fling event. Flings do not begin with a Response. Flings end
+  // either at the beginning of a FlingCancel, or at the end of the renderer
+  // fling event.
+  //
+  // AAAAAAAAAAAAAAAAAAAAAAAAAA
+  // SSS
+  //     RRRRRRRRRRRRRRRRRRRRRR
+  //
+  //
+  // AAAAAAAAAAA
+  // SSS        CCC
+  //
+  function handleFlingEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+
+    function isRendererFling(event) {
+      return event.title === RENDERER_FLING_TITLE;
+    }
+    var browserHelper = modelHelper.browserHelper;
+    var flingEvents = browserHelper.getAllAsyncSlicesMatching(
+        isRendererFling);
+
+    forEventTypesIn(sortedInputEvents, FLING_TYPE_NAMES, function(event) {
+      flingEvents.push(event);
+    });
+    flingEvents.sort(compareEvents);
+
+    flingEvents.forEach(function(event) {
+      if (event.title === RENDERER_FLING_TITLE) {
+        if (currentPE) {
+          currentPE.pushEvent(event);
+        } else {
+          currentPE = new ProtoExpectation(
+              ProtoExpectation.ANIMATION_TYPE, FLING_IR_NAME);
+          currentPE.pushEvent(event);
+          protoExpectations.push(currentPE);
+        }
+        return;
+      }
+
+      switch (event.typeName) {
+        case INPUT_TYPE.FLING_START:
+          if (currentPE) {
+            console.error('Another FlingStart? File a bug with this trace!');
+            currentPE.pushEvent(event);
+          } else {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.ANIMATION_TYPE, FLING_IR_NAME);
+            currentPE.pushEvent(event);
+            // Set end to an invalid value so that it can be noticed and fixed
+            // later.
+            currentPE.end = 0;
+            protoExpectations.push(currentPE);
+          }
+          break;
+
+        case INPUT_TYPE.FLING_CANCEL:
+          if (currentPE) {
+            currentPE.pushEvent(event);
+            // FlingCancel events start when TouchStart events start, which is
+            // typically when a Response starts. FlingCancel events end when
+            // chrome acknowledges them, not when they update the screen. So
+            // there might be one more frame during the FlingCancel, after
+            // this Animation ends. That won't affect the scoring algorithms,
+            // and it will make the IRs look more correct if they don't
+            // overlap unnecessarily.
+            currentPE.end = event.start;
+            currentPE = undefined;
+          } else {
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+          }
+          break;
+      }
+    });
+    // If there was neither a FLING_CANCEL nor a renderer fling after the
+    // FLING_START, then assume that it ends at the end of the model, so set
+    // the end of currentPE to the end of the model.
+    if (currentPE && !currentPE.end)
+      currentPE.end = modelHelper.model.bounds.max;
+    return protoExpectations;
+  }
+
+  // The TouchStart and the first TouchMove comprise a Response, then the
+  // rest of the TouchMoves comprise an Animation.
+  //
+  // RRRRRRRAAAAAAAAAAAAAAAAAAAA
+  // SSS MMM MMM MMM MMM MMM EEE
+  //
+  // If there are no TouchMove events in between a TouchStart and a TouchEnd,
+  // then it's just a Response.
+  //
+  // RRRRRRR
+  // SSS EEE
+  //
+  function handleTouchEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    var sawFirstMove = false;
+    forEventTypesIn(sortedInputEvents, TOUCH_TYPE_NAMES, function(event) {
+      switch (event.typeName) {
+        case INPUT_TYPE.TOUCH_START:
+          if (currentPE) {
+            // NB: currentPE will probably be merged with something from
+            // handlePinchEvents(). Multiple TouchStart events without an
+            // intervening TouchEnd logically implies that multiple fingers
+            // are on the screen, so this is probably a pinch gesture.
+            currentPE.pushEvent(event);
+          } else {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.RESPONSE_TYPE, TOUCH_IR_NAME);
+            currentPE.pushEvent(event);
+            currentPE.isAnimationBegin = true;
+            protoExpectations.push(currentPE);
+            sawFirstMove = false;
+          }
+          break;
+
+        case INPUT_TYPE.TOUCH_MOVE:
+          if (!currentPE) {
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.ANIMATION_TYPE, TOUCH_IR_NAME);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+            break;
+          }
+
+          // Like Scrolls and Pinches, the Response is defined to be the
+          // TouchStart plus the first TouchMove, then the rest of the
+          // TouchMoves constitute an Animation.
+          if ((sawFirstMove &&
+              (currentPE.irType === ProtoExpectation.RESPONSE_TYPE)) ||
+              !currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
+            // If there's already a touchmove in the currentPE or it's not
+            // near event, then finish it and start a new animation.
+            var prevEnd = currentPE.end;
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.ANIMATION_TYPE, TOUCH_IR_NAME);
+            currentPE.pushEvent(event);
+            // It's possible for there to be a gap between TouchMoves, but
+            // that doesn't mean that there should be an Idle IR there.
+            currentPE.start = prevEnd;
+            protoExpectations.push(currentPE);
+          } else {
+            currentPE.pushEvent(event);
+            sawFirstMove = true;
+          }
+          break;
+
+        case INPUT_TYPE.TOUCH_END:
+          if (!currentPE) {
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+            break;
+          }
+          if (currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS)) {
+            currentPE.pushEvent(event);
+          } else {
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+          }
+          currentPE = undefined;
+          break;
+      }
+    });
+    return protoExpectations;
+  }
+
+  // The first ScrollBegin and the first ScrollUpdate comprise a Response,
+  // then the rest comprise an Animation.
+  //
+  // RRRRRRRAAAAAAAAAAAAAAAAAAAA
+  // BBB UUU UUU UUU UUU UUU EEE
+  //
+  function handleScrollEvents(modelHelper, sortedInputEvents) {
+    var protoExpectations = [];
+    var currentPE = undefined;
+    var sawFirstUpdate = false;
+    forEventTypesIn(sortedInputEvents, SCROLL_TYPE_NAMES, function(event) {
+      switch (event.typeName) {
+        case INPUT_TYPE.SCROLL_BEGIN:
+          // Always begin a new PE even if there already is one, unlike
+          // PinchBegin.
+          currentPE = new ProtoExpectation(
+              ProtoExpectation.RESPONSE_TYPE, SCROLL_IR_NAME);
+          currentPE.pushEvent(event);
+          currentPE.isAnimationBegin = true;
+          protoExpectations.push(currentPE);
+          sawFirstUpdate = false;
+          break;
+
+        case INPUT_TYPE.SCROLL_UPDATE:
+          if (currentPE) {
+            if (currentPE.isNear(event, INPUT_MERGE_THRESHOLD_MS) &&
+                ((currentPE.irType === ProtoExpectation.ANIMATION_TYPE) ||
+                !sawFirstUpdate)) {
+              currentPE.pushEvent(event);
+              sawFirstUpdate = true;
+            } else {
+              currentPE = new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,
+                  SCROLL_IR_NAME);
+              currentPE.pushEvent(event);
+              protoExpectations.push(currentPE);
+            }
+          } else {
+            // ScrollUpdate without ScrollBegin.
+            currentPE = new ProtoExpectation(
+                ProtoExpectation.ANIMATION_TYPE, SCROLL_IR_NAME);
+            currentPE.pushEvent(event);
+            protoExpectations.push(currentPE);
+          }
+          break;
+
+        case INPUT_TYPE.SCROLL_END:
+          if (!currentPE) {
+            console.error('ScrollEnd without ScrollUpdate? ' +
+                          'File a bug with this trace!');
+            var pe = new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);
+            pe.pushEvent(event);
+            protoExpectations.push(pe);
+            break;
+          }
+          currentPE.pushEvent(event);
+          break;
+      }
+    });
+    return protoExpectations;
+  }
+
+  // CSS Animations are merged into Animations when they intersect.
+  function handleCSSAnimations(modelHelper, sortedInputEvents) {
+    var animationEvents = modelHelper.browserHelper.
+        getAllAsyncSlicesMatching(function(event) {
+          return ((event.title === CSS_ANIMATION_TITLE) &&
+                  (event.duration > 0));
+    });
+
+    var animationRanges = [];
+    animationEvents.forEach(function(event) {
+      var rendererHelper = new tr.model.helpers.ChromeRendererHelper(
+          modelHelper, event.parentContainer.parent);
+      animationRanges.push({
+        min: event.start,
+        max: event.end,
+        event: event,
+        frames: rendererHelper.getFrameEventsInRange(
+            tr.model.helpers.IMPL_FRAMETIME_TYPE,
+            tr.b.Range.fromExplicitRange(event.start, event.end))
+      });
+    });
+
+    function merge(ranges) {
+      var protoExpectation = new ProtoExpectation(
+          ProtoExpectation.ANIMATION_TYPE, CSS_IR_NAME);
+      ranges.forEach(function(range) {
+        protoExpectation.pushEvent(range.event);
+        protoExpectation.associatedEvents.addEventSet(range.frames);
+      });
+      return protoExpectation;
+    }
+
+    return tr.b.mergeRanges(animationRanges,
+                            ANIMATION_MERGE_THRESHOLD_MS,
+                            merge);
+  }
+
+  function postProcessProtoExpectations(protoExpectations) {
+    // protoExpectations is input only. Returns a modified set of
+    // ProtoExpectations.  The order is important.
+    protoExpectations = mergeIntersectingResponses(protoExpectations);
+    protoExpectations = mergeIntersectingAnimations(protoExpectations);
+    protoExpectations = fixResponseAnimationStarts(protoExpectations);
+    protoExpectations = fixTapResponseTouchAnimations(protoExpectations);
+    return protoExpectations;
+  }
+
+  // TouchStarts happen at the same time as ScrollBegins.
+  // It's easier to let multiple handlers create multiple overlapping
+  // Responses and then merge them, rather than make the handlers aware of the
+  // other handlers' PEs.
+  //
+  // For example:
+  // RR
+  //  RRR  -> RRRRR
+  //    RR
+  //
+  // protoExpectations is input only.
+  // Returns a modified set of ProtoExpectations.
+  function mergeIntersectingResponses(protoExpectations) {
+    var newPEs = [];
+    while (protoExpectations.length) {
+      var pe = protoExpectations.shift();
+      newPEs.push(pe);
+
+      // Only consider Responses for now.
+      if (pe.irType !== ProtoExpectation.RESPONSE_TYPE)
+        continue;
+
+      for (var i = 0; i < protoExpectations.length; ++i) {
+        var otherPE = protoExpectations[i];
+
+        if (otherPE.irType !== pe.irType)
+          continue;
+
+        if (!otherPE.intersects(pe))
+          continue;
+
+        // Don't merge together Responses of the same type.
+        // If handleTouchEvents wanted two of its Responses to be merged, then
+        // it would have made them that way to begin with.
+        var typeNames = pe.associatedEvents.map(function(event) {
+          return event.typeName;
+        });
+        if (otherPE.containsTypeNames(typeNames))
+          continue;
+
+        pe.merge(otherPE);
+        protoExpectations.splice(i, 1);
+        // Don't skip the next otherPE!
+        --i;
+      }
+    }
+    return newPEs;
+  }
+
+  // An animation is simply an expectation of 60fps between start and end.
+  // If two animations overlap, then merge them.
+  //
+  // For example:
+  // AA
+  //  AAA  -> AAAAA
+  //    AA
+  //
+  // protoExpectations is input only.
+  // Returns a modified set of ProtoExpectations.
+  function mergeIntersectingAnimations(protoExpectations) {
+    var newPEs = [];
+    while (protoExpectations.length) {
+      var pe = protoExpectations.shift();
+      newPEs.push(pe);
+
+      // Only consider Animations for now.
+      if (pe.irType !== ProtoExpectation.ANIMATION_TYPE)
+        continue;
+
+      var isCSS = pe.containsSliceTitle(CSS_ANIMATION_TITLE);
+      var isFling = pe.containsTypeNames([INPUT_TYPE.FLING_START]);
+
+      for (var i = 0; i < protoExpectations.length; ++i) {
+        var otherPE = protoExpectations[i];
+
+        if (otherPE.irType !== pe.irType)
+          continue;
+
+        // Don't merge CSS Animations with any other types.
+        if (isCSS != otherPE.containsSliceTitle(CSS_ANIMATION_TITLE))
+          continue;
+
+        if (!otherPE.intersects(pe))
+          continue;
+
+        // Don't merge Fling Animations with any other types.
+        if (isFling != otherPE.containsTypeNames([INPUT_TYPE.FLING_START]))
+          continue;
+
+        pe.merge(otherPE);
+        protoExpectations.splice(i, 1);
+        // Don't skip the next otherPE!
+        --i;
+      }
+    }
+    return newPEs;
+  }
+
+  // The ends of responses frequently overlap the starts of animations.
+  // Fix the animations to reflect the fact that the user can only start to
+  // expect 60fps after the response.
+  //
+  // For example:
+  // RRR   -> RRRAA
+  //  AAAA
+  //
+  // protoExpectations is input only.
+  // Returns a modified set of ProtoExpectations.
+  function fixResponseAnimationStarts(protoExpectations) {
+    protoExpectations.forEach(function(ape) {
+      // Only consider animations for now.
+      if (ape.irType !== ProtoExpectation.ANIMATION_TYPE)
+        return;
+
+      protoExpectations.forEach(function(rpe) {
+        // Only consider responses for now.
+        if (rpe.irType !== ProtoExpectation.RESPONSE_TYPE)
+          return;
+
+        // Only consider responses that end during the animation.
+        if (!ape.containsTimestampInclusive(rpe.end))
+          return;
+
+        // Ignore Responses that are entirely contained by the animation.
+        if (ape.containsTimestampInclusive(rpe.start))
+          return;
+
+        // Move the animation start to the response end.
+        ape.start = rpe.end;
+      });
+    });
+    return protoExpectations;
+  }
+
+  // Merge Tap Responses that overlap Touch-only Animations.
+  // https://github.com/catapult-project/catapult/issues/1431
+  function fixTapResponseTouchAnimations(protoExpectations) {
+    function isTapResponse(pe) {
+      return (pe.irType === ProtoExpectation.RESPONSE_TYPE) &&
+              pe.containsTypeNames([INPUT_TYPE.TAP]);
+    }
+    function isTouchAnimation(pe) {
+      return (pe.irType === ProtoExpectation.ANIMATION_TYPE) &&
+              pe.containsTypeNames([INPUT_TYPE.TOUCH_MOVE]) &&
+              !pe.containsTypeNames([
+                  INPUT_TYPE.SCROLL_UPDATE, INPUT_TYPE.PINCH_UPDATE]);
+    }
+    var newPEs = [];
+    while (protoExpectations.length) {
+      var pe = protoExpectations.shift();
+      newPEs.push(pe);
+
+      // protoExpectations are sorted by start time, and we don't know whether
+      // the Tap Response or the Touch Animation will be first
+      var peIsTapResponse = isTapResponse(pe);
+      var peIsTouchAnimation = isTouchAnimation(pe);
+      if (!peIsTapResponse && !peIsTouchAnimation)
+        continue;
+
+      for (var i = 0; i < protoExpectations.length; ++i) {
+        var otherPE = protoExpectations[i];
+
+        if (!otherPE.intersects(pe))
+          continue;
+
+        if (peIsTapResponse && !isTouchAnimation(otherPE))
+          continue;
+
+        if (peIsTouchAnimation && !isTapResponse(otherPE))
+          continue;
+
+        // pe might be the Touch Animation, but the merged ProtoExpectation
+        // should be a Response.
+        pe.irType = ProtoExpectation.RESPONSE_TYPE;
+
+        pe.merge(otherPE);
+        protoExpectations.splice(i, 1);
+        // Don't skip the next otherPE!
+        --i;
+      }
+    }
+    return newPEs;
+  }
+
+  // Check that none of the handlers accidentally ignored an input event.
+  function checkAllInputEventsHandled(sortedInputEvents, protoExpectations) {
+    var handledEvents = [];
+    protoExpectations.forEach(function(protoExpectation) {
+      protoExpectation.associatedEvents.forEach(function(event) {
+        if (handledEvents.indexOf(event) >= 0) {
+          console.error('double-handled event', event.typeName,
+              parseInt(event.start), parseInt(event.end), protoExpectation);
+          return;
+        }
+        handledEvents.push(event);
+      });
+    });
+
+    sortedInputEvents.forEach(function(event) {
+      if (handledEvents.indexOf(event) < 0) {
+        console.error('UNHANDLED INPUT EVENT!',
+            event.typeName, parseInt(event.start), parseInt(event.end));
+      }
+    });
+  }
+
+  // Find ProtoExpectations, post-process them, convert them to real IRs.
+  function findInputExpectations(modelHelper) {
+    var sortedInputEvents = getSortedInputEvents(modelHelper);
+    var protoExpectations = findProtoExpectations(
+        modelHelper, sortedInputEvents);
+    protoExpectations = postProcessProtoExpectations(protoExpectations);
+    checkAllInputEventsHandled(sortedInputEvents, protoExpectations);
+
+    var irs = [];
+    protoExpectations.forEach(function(protoExpectation) {
+      var ir = protoExpectation.createInteractionRecord(modelHelper.model);
+      if (ir)
+        irs.push(ir);
+    });
+    return irs;
+  }
+
+  return {
+    findInputExpectations: findInputExpectations,
+    compareEvents: compareEvents,
+    CSS_ANIMATION_TITLE: CSS_ANIMATION_TITLE
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/importer/find_load_expectations.html b/catapult/tracing/tracing/importer/find_load_expectations.html
new file mode 100644
index 0000000..7ea898d
--- /dev/null
+++ b/catapult/tracing/tracing/importer/find_load_expectations.html
@@ -0,0 +1,151 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.importer', function() {
+  // This global instant event marks the start of a navigation.
+  var NAVIGATION_START = 'NavigationTiming navigationStart';
+
+  // This render-process instant event marks the first contentful paint in a
+  // main frame.
+  var FIRST_CONTENTFUL_PAINT_TITLE = 'firstContentfulPaint';
+
+  function getAllFrameEvents(modelHelper) {
+    var frameEvents = [];
+    frameEvents.push.apply(frameEvents,
+        modelHelper.browserHelper.getFrameEventsInRange(
+            tr.model.helpers.IMPL_FRAMETIME_TYPE, modelHelper.model.bounds));
+
+    tr.b.iterItems(modelHelper.rendererHelpers, function(pid, renderer) {
+      frameEvents.push.apply(frameEvents, renderer.getFrameEventsInRange(
+          tr.model.helpers.IMPL_FRAMETIME_TYPE, modelHelper.model.bounds));
+    });
+    return frameEvents.sort(tr.importer.compareEvents);
+  }
+
+  // If a thread contains a typical initialization slice, then the first event
+  // on that thread is a startup event.
+  function getStartupEvents(modelHelper) {
+    function isStartupSlice(slice) {
+      return slice.title === 'BrowserMainLoop::CreateThreads';
+    }
+    var events = modelHelper.browserHelper.getAllAsyncSlicesMatching(
+        isStartupSlice);
+    var deduper = new tr.model.EventSet();
+    events.forEach(function(event) {
+      var sliceGroup = event.parentContainer.sliceGroup;
+      var slice = sliceGroup && sliceGroup.findFirstSlice();
+      if (slice)
+        deduper.push(slice);
+    });
+    return deduper.toArray();
+  }
+
+  // Match every event in |openingEvents| to the first following event from
+  // |closingEvents| and return an array containing a load interaction record
+  // for each pair.
+  function findLoadExpectationsInternal(
+      modelHelper, subtypeName, openingEvents, closingEvents) {
+    var loads = [];
+    openingEvents.forEach(function(openingEvent) {
+      closingEvents.forEach(function(closingEvent) {
+        // Ignore opening event that already have a closing event.
+        if (openingEvent.closingEvent)
+          return;
+
+        // Ignore closing events that already belong to an opening event.
+        if (closingEvent.openingEvent)
+          return;
+
+        // Ignore closing events before |openingEvent|.
+        if (closingEvent.start <= openingEvent.start)
+          return;
+
+        // Ignore events from different threads.
+        if (openingEvent.parentContainer.parent.pid !==
+              closingEvent.parentContainer.parent.pid)
+          return;
+
+        // This is the first closing event for this opening event, record it.
+        openingEvent.closingEvent = closingEvent;
+        closingEvent.openingEvent = openingEvent;
+        var lir = new tr.model.um.LoadExpectation(
+            modelHelper.model, subtypeName, openingEvent.start,
+            closingEvent.end - openingEvent.start);
+        lir.associatedEvents.push(openingEvent);
+        lir.associatedEvents.push(closingEvent);
+        loads.push(lir);
+      });
+    });
+    return loads;
+  }
+
+  function findRenderLoadExpectations(modelHelper) {
+    var events = [];
+    modelHelper.model.iterateAllEvents(function(event) {
+      if ((event.title === NAVIGATION_START) ||
+          (event.title === FIRST_CONTENTFUL_PAINT_TITLE))
+        events.push(event);
+    });
+    events.sort(tr.importer.compareEvents);
+
+    var loads = [];
+    var startEvent = undefined;
+    events.forEach(function(event) {
+      if (event.title === NAVIGATION_START) {
+        startEvent = event;
+      } else if (event.title === FIRST_CONTENTFUL_PAINT_TITLE) {
+        if (startEvent) {
+          loads.push(new tr.model.um.LoadExpectation(
+              modelHelper.model, tr.model.um.LOAD_SUBTYPE_NAMES.SUCCESSFUL,
+              startEvent.start, event.start - startEvent.start));
+          startEvent = undefined;
+        }
+      }
+    });
+
+    // If the trace ended between navigation start and first contentful paint,
+    // then make a LoadExpectation that ends at the end of the trace.
+    if (startEvent) {
+      loads.push(new tr.model.um.LoadExpectation(
+            modelHelper.model, tr.model.um.LOAD_SUBTYPE_NAMES.SUCCESSFUL,
+            startEvent.start, modelHelper.model.bounds.max - startEvent.start));
+    }
+
+    return loads;
+  }
+
+  // Match up RenderFrameImpl events with frame render events.
+  function findLoadExpectations(modelHelper) {
+    var loads = [];
+
+    var commitLoadEvents =
+        modelHelper.browserHelper.getCommitProvisionalLoadEventsInRange(
+            modelHelper.model.bounds);
+
+    // Attach frame events to every startup events.
+    var startupEvents = getStartupEvents(modelHelper);
+    var frameEvents = getAllFrameEvents(modelHelper);
+    var startupLoads = findLoadExpectationsInternal(
+        modelHelper, tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP,
+        startupEvents, frameEvents);
+    loads.push.apply(loads, startupLoads);
+
+    loads.push.apply(loads, findRenderLoadExpectations(modelHelper));
+
+    return loads;
+  }
+
+  return {
+    findLoadExpectations: findLoadExpectations
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/importer/import.html b/catapult/tracing/tracing/importer/import.html
index ddc6a94..8fe2163 100644
--- a/catapult/tracing/tracing/importer/import.html
+++ b/catapult/tracing/tracing/importer/import.html
@@ -6,17 +6,22 @@
 -->
 
 <link rel='import' href='/tracing/base/base.html'>
+<link rel='import' href='/tracing/base/timing.html'>
 <link rel="import" href="/tracing/importer/empty_importer.html">
 <link rel="import" href="/tracing/importer/importer.html">
+<link rel="import" href="/tracing/importer/user_model_builder.html">
 
 <script>
 'use strict';
 
 tr.exportTo('tr.importer', function() {
+  var Timing = tr.b.Timing;
+
   function ImportOptions() {
     this.shiftWorldToZero = true;
     this.pruneEmptyContainers = true;
     this.showImportWarnings = true;
+    this.trackDetailedModelStats = false;
 
     // Callback called after
     // importers run in which more data can be added to the model, before it is
@@ -85,7 +90,7 @@
       overlay.msgEl.style.margin = '20px';
       overlay.update = function(msg) {
         this.msgEl.textContent = msg;
-      }
+      };
       overlay.visible = true;
 
       var promise =
@@ -117,7 +122,7 @@
 
       var importers = [];
 
-      lastTask = lastTask.after(function createImports() {
+      lastTask = lastTask.timedAfter('TraceImport', function createImports() {
         // Copy the traces array, we may mutate it.
         traces = traces.slice(0);
         progressMeter.update('Creating importers...');
@@ -155,27 +160,48 @@
         });
       }, this);
 
-      // Run the import.
-      lastTask = lastTask.after(function runImport(task) {
+      // We import clock sync markers before all other events. This is necessary
+      // because we need the clock sync markers in order to know by how much we
+      // need to shift the timestamps of other events.
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function importClockSyncMarkers(task) {
         importers.forEach(function(importer, index) {
-          task.subTask(function runImportEventsOnOneImporter() {
-            progressMeter.update(
-                'Importing ' + (index + 1) + ' of ' + importers.length);
-            importer.importEvents();
-          }, this);
+          task.subTask(Timing.wrapNamedFunction(
+              'TraceImport', importer.importerName,
+              function runImportClockSyncMarkersOnOneImporter() {
+                progressMeter.update(
+                    'Importing clock sync markers ' + (index + 1) + ' of ' +
+                      importers.length);
+                importer.importClockSyncMarkers();
+              }), this);
+        }, this);
+      }, this);
+
+      // Run the import.
+      lastTask = lastTask.timedAfter('TraceImport', function runImport(task) {
+        importers.forEach(function(importer, index) {
+          task.subTask(Timing.wrapNamedFunction(
+              'TraceImport', importer.importerName,
+              function runImportEventsOnOneImporter() {
+                progressMeter.update(
+                    'Importing ' + (index + 1) + ' of ' + importers.length);
+                importer.importEvents();
+              }), this);
         }, this);
       }, this);
 
       // Run the cusomizeModelCallback if needed.
       if (this.importOptions_.customizeModelCallback) {
-        lastTask = lastTask.after(function runCustomizeCallbacks(task) {
+        lastTask = lastTask.timedAfter('TraceImport',
+                                       function runCustomizeCallbacks(task) {
           this.importOptions_.customizeModelCallback(this.model_);
         }, this);
       }
 
       // Import sample data.
-      lastTask = lastTask.after(function(task) {
-        importers.forEach(function importSampleData(importer, index) {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function importSampleData(task) {
+        importers.forEach(function(importer, index) {
           progressMeter.update(
               'Importing sample data ' + (index + 1) + '/' + importers.length);
           importer.importSampleData();
@@ -183,14 +209,15 @@
       }, this);
 
       // Autoclose open slices and create subSlices.
-      lastTask = lastTask.after(function runAutoclosers() {
+      lastTask = lastTask.timedAfter('TraceImport', function runAutoclosers() {
         progressMeter.update('Autoclosing open slices...');
         this.model_.autoCloseOpenSlices();
         this.model_.createSubSlices();
       }, this);
 
       // Finalize import.
-      lastTask = lastTask.after(function finalizeImport(task) {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function finalizeImport(task) {
         importers.forEach(function(importer, index) {
           progressMeter.update(
               'Finalizing import ' + (index + 1) + '/' + importers.length);
@@ -199,28 +226,31 @@
       }, this);
 
       // Run preinit.
-      lastTask = lastTask.after(function runPreinits() {
+      lastTask = lastTask.timedAfter('TraceImport', function runPreinits() {
         progressMeter.update('Initializing objects (step 1/2)...');
         this.model_.preInitializeObjects();
       }, this);
 
       // Prune empty containers.
       if (this.importOptions_.pruneEmptyContainers) {
-        lastTask = lastTask.after(function runPruneEmptyContainers() {
+        lastTask = lastTask.timedAfter('TraceImport',
+                                       function runPruneEmptyContainers() {
           progressMeter.update('Pruning empty containers...');
           this.model_.pruneEmptyContainers();
         }, this);
       }
 
       // Merge kernel and userland slices on each thread.
-      lastTask = lastTask.after(function runMergeKernelWithuserland() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function runMergeKernelWithuserland() {
         progressMeter.update('Merging kernel with userland...');
         this.model_.mergeKernelWithUserland();
       }, this);
 
       // Create auditors
       var auditors = [];
-      lastTask = lastTask.after(function createAuditorsAndRunAnnotate() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function createAuditorsAndRunAnnotate() {
         progressMeter.update('Adding arbitrary data to model...');
         auditors = this.importOptions_.auditorConstructors.map(
           function(auditorConstructor) {
@@ -232,78 +262,94 @@
         });
       }, this);
 
-      lastTask = lastTask.after(function computeWorldBounds() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function computeWorldBounds() {
         progressMeter.update('Computing final world bounds...');
         this.model_.computeWorldBounds(this.importOptions_.shiftWorldToZero);
       }, this);
 
       // Build the flow event interval tree.
-      lastTask = lastTask.after(function buildFlowEventIntervalTree() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function buildFlowEventIntervalTree() {
         progressMeter.update('Building flow event map...');
         this.model_.buildFlowEventIntervalTree();
       }, this);
 
       // Join refs.
-      lastTask = lastTask.after(function joinRefs() {
+      lastTask = lastTask.timedAfter('TraceImport', function joinRefs() {
         progressMeter.update('Joining object refs...');
-        for (var i = 0; i < importers.length; i++)
-          importers[i].joinRefs();
+        this.model_.joinRefs();
       }, this);
 
       // Delete any undeleted objects.
-      lastTask = lastTask.after(function cleanupUndeletedObjects() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function cleanupUndeletedObjects() {
         progressMeter.update('Cleaning up undeleted objects...');
         this.model_.cleanupUndeletedObjects();
       }, this);
 
       // Sort global and process memory dumps.
-      lastTask = lastTask.after(function sortMemoryDumps() {
+      lastTask = lastTask.timedAfter('TraceImport', function sortMemoryDumps() {
         progressMeter.update('Sorting memory dumps...');
         this.model_.sortMemoryDumps();
       }, this);
 
-      // Calculate memory dump graph attributes.
-      lastTask = lastTask.after(function calculateMemoryGraphAttributes() {
-        progressMeter.update('Calculating memory dump graph attributes...');
-        this.model_.calculateMemoryGraphAttributes();
+      // Finalize memory dump graphs.
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function finalizeMemoryGraphs() {
+        progressMeter.update('Finalizing memory dump graphs...');
+        this.model_.finalizeMemoryGraphs();
       }, this);
 
       // Run initializers.
-      lastTask = lastTask.after(function initializeObjects() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function initializeObjects() {
         progressMeter.update('Initializing objects (step 2/2)...');
         this.model_.initializeObjects();
       }, this);
 
       // Build event indices mapping from an event id to all flow events.
-      lastTask = lastTask.after(function buildEventIndices() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function buildEventIndices() {
         progressMeter.update('Building event indices...');
         this.model_.buildEventIndices();
       }, this);
 
+      // Build the UserModel.
+      lastTask = lastTask.timedAfter('TraceImport', function buildUserModel() {
+        progressMeter.update('Building UserModel...');
+        var userModelBuilder = new tr.importer.UserModelBuilder(this.model_);
+        userModelBuilder.buildUserModel();
+      }, this);
+
+      // Sort Expectations.
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function sortExpectations() {
+        progressMeter.update('Sorting user expectations...');
+        this.model_.userModel.sortExpectations();
+      }, this);
+
       // Run audits.
-      lastTask = lastTask.after(function runAudits() {
+      lastTask = lastTask.timedAfter('TraceImport', function runAudits() {
         progressMeter.update('Running auditors...');
         auditors.forEach(function(auditor) {
           auditor.runAudit();
         });
       }, this);
 
-      lastTask = lastTask.after(function sortInteractionRecords() {
-        progressMeter.update('Updating interaction records...');
-        this.model_.sortInteractionRecords();
-      }, this);
-
-      lastTask = lastTask.after(function sortAlerts() {
+      lastTask = lastTask.timedAfter('TraceImport', function sortAlerts() {
         progressMeter.update('Updating alerts...');
         this.model_.sortAlerts();
       }, this);
 
-      lastTask = lastTask.after(function lastUpdateBounds() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function lastUpdateBounds() {
         progressMeter.update('Update bounds...');
         this.model_.updateBounds();
       }, this);
 
-      lastTask = lastTask.after(function addModelWarnings() {
+      lastTask = lastTask.timedAfter('TraceImport',
+                                     function addModelWarnings() {
         progressMeter.update('Looking for warnings...');
         // Log an import warning if the clock is low resolution.
         if (!this.model_.isTimeHighResolution) {
@@ -332,13 +378,11 @@
     },
 
     hasEventDataDecoder_: function(importers) {
-      if (importers.length === 0)
-        return false;
-
       for (var i = 0; i < importers.length; ++i) {
         if (!importers[i].isTraceDataContainer())
           return true;
       }
+
       return false;
     }
   };
diff --git a/catapult/tracing/tracing/importer/import_test.html b/catapult/tracing/tracing/importer/import_test.html
index c4a03cc..a6e11b7 100644
--- a/catapult/tracing/tracing/importer/import_test.html
+++ b/catapult/tracing/tracing/importer/import_test.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
@@ -16,6 +17,8 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
+
   test('canImportEmpty', function() {
     var m = tr.c.TestUtils.newModelWithEvents([]);
     assert.isDefined(m.modelIndices);
@@ -60,7 +63,8 @@
   });
 
   test('canImportCompressedSingleSubtrace', function() {
-    var compressedTrace = atob('H4sIACKfFVUC/wsuLUpLTE51y8nMS08t0jVSUIg2MDCMV' +
+    var compressedTrace = Base64.atob(
+        'H4sIACKfFVUC/wsuLUpLTE51y8nMS08t0jVSUIg2MDCMV' +
         'dDT0zNUMDQwMNAzsFIAIqcaw5qSxOJsR65gfDqMEDpcATiC61ZbAAAA');
     var m = tr.c.TestUtils.newModelWithEvents([compressedTrace]);
     assert.equal(1, tr.b.dictionaryValues(m.processes).length);
diff --git a/catapult/tracing/tracing/importer/importer.html b/catapult/tracing/tracing/importer/importer.html
index 847519d..b981c1b 100644
--- a/catapult/tracing/tracing/importer/importer.html
+++ b/catapult/tracing/tracing/importer/importer.html
@@ -18,6 +18,10 @@
   Importer.prototype = {
     __proto__: Object.prototype,
 
+    get importerName() {
+      return 'Importer';
+    },
+
     /**
      * Called by the Model to check whether the importer type stores the actual
      * trace data or just holds it as container for further extraction.
@@ -34,6 +38,12 @@
     },
 
     /**
+     * Called to import clock sync markers into the Model.
+     */
+    importClockSyncMarkers: function() {
+    },
+
+    /**
      * Called to import events into the Model.
      */
     importEvents: function() {
@@ -50,13 +60,6 @@
      * events.
      */
     finalizeImport: function() {
-    },
-
-    /**
-     * Called by the Model to join references between objects, after final
-     * model bounds have been computed.
-     */
-    joinRefs: function() {
     }
   };
 
diff --git a/catapult/tracing/tracing/importer/proto_expectation.html b/catapult/tracing/tracing/importer/proto_expectation.html
new file mode 100644
index 0000000..31263c9
--- /dev/null
+++ b/catapult/tracing/tracing/importer/proto_expectation.html
@@ -0,0 +1,172 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/core/auditor.html">
+<link rel="import" href="/tracing/model/event_info.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+<link rel="import" href="/tracing/model/user_model/response_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.importer', function() {
+  // This is an intermediate data format between InputLatencyAsyncSlices and
+  // Response and Animation IRs.
+  function ProtoExpectation(irType, name) {
+    this.irType = irType;
+    this.names = new Set(name ? [name] : undefined);
+    this.start = Infinity;
+    this.end = -Infinity;
+    this.associatedEvents = new tr.model.EventSet();
+    this.isAnimationBegin = false;
+  }
+
+  ProtoExpectation.RESPONSE_TYPE = 'r';
+  ProtoExpectation.ANIMATION_TYPE = 'a';
+
+  // Explicitly ignore some input events to allow
+  // UserModelBuilder.checkAllInputEventsHandled() to determine which events
+  // were unintentionally ignored due to a bug.
+  ProtoExpectation.IGNORED_TYPE = 'ignored';
+
+  ProtoExpectation.prototype = {
+    get isValid() {
+      return this.end > this.start;
+    },
+
+    // Return true if any associatedEvent's typeName is in typeNames.
+    containsTypeNames: function(typeNames) {
+      for (var i = 0; i < this.associatedEvents.length; ++i) {
+        if (typeNames.indexOf(this.associatedEvents[i].typeName) >= 0)
+          return true;
+      }
+      return false;
+    },
+
+    containsSliceTitle: function(title) {
+      for (var i = 0; i < this.associatedEvents.length; ++i) {
+        if (title === this.associatedEvents[i].title)
+          return true;
+      }
+      return false;
+    },
+
+    createInteractionRecord: function(model) {
+      if (!this.isValid) {
+        console.error('Invalid ProtoExpectation: ' + this.debug() +
+                      ' File a bug with this trace!');
+        return undefined;
+      }
+
+      var initiatorTitles = [];
+      this.names.forEach(function(name) {
+        initiatorTitles.push(name);
+      });
+      initiatorTitles = initiatorTitles.sort().join(',');
+
+      var duration = this.end - this.start;
+
+      var ir = undefined;
+      switch (this.irType) {
+        case ProtoExpectation.RESPONSE_TYPE:
+          ir = new tr.model.um.ResponseExpectation(
+              model, initiatorTitles, this.start, duration,
+              this.isAnimationBegin);
+          break;
+        case ProtoExpectation.ANIMATION_TYPE:
+          ir = new tr.model.um.AnimationExpectation(
+              model, initiatorTitles, this.start, duration);
+          break;
+      }
+      if (!ir)
+        return undefined;
+
+      ir.sourceEvents.addEventSet(this.associatedEvents);
+
+      function pushAssociatedEvents(event) {
+        ir.associatedEvents.push(event);
+
+        // |event| is either an InputLatencyAsyncSlice (which collects all of
+        // its associated events transitively) or a CSS Animation (which doesn't
+        // have any associated events). So this does not need to recurse.
+        if (event.associatedEvents)
+          ir.associatedEvents.addEventSet(event.associatedEvents);
+      }
+
+      this.associatedEvents.forEach(function(event) {
+        pushAssociatedEvents(event);
+
+        // Old-style InputLatencyAsyncSlices have subSlices.
+        if (event.subSlices)
+          event.subSlices.forEach(pushAssociatedEvents);
+      });
+
+      return ir;
+    },
+
+    // Merge the other ProtoExpectation into this one.
+    // The irTypes need not match: ignored ProtoExpectations might be merged
+    // into overlapping ProtoExpectations, and Touch-only Animations are merged
+    // into Tap Responses.
+    merge: function(other) {
+      other.names.forEach(function(name) { this.names.add(name); }.bind(this));
+
+      // Don't use pushEvent(), which would lose special start, end.
+      this.associatedEvents.addEventSet(other.associatedEvents);
+      this.start = Math.min(this.start, other.start);
+      this.end = Math.max(this.end, other.end);
+      if (other.isAnimationBegin)
+        this.isAnimationBegin = true;
+    },
+
+    // Include |event| in this ProtoExpectation, expanding start/end to include
+    // it.
+    pushEvent: function(event) {
+      // Usually, this method will be called while iterating over a list of
+      // events sorted by start time, so this method won't usually change
+      // this.start. However, this will sometimes be called for
+      // ProtoExpectations created by previous handlers, in which case
+      // event.start could possibly be before this.start.
+      this.start = Math.min(this.start, event.start);
+      this.end = Math.max(this.end, event.end);
+      this.associatedEvents.push(event);
+    },
+
+    // Returns true if timestamp is contained in this ProtoExpectation.
+    containsTimestampInclusive: function(timestamp) {
+      return (this.start <= timestamp) && (timestamp <= this.end);
+    },
+
+    // Return true if the other event intersects this ProtoExpectation.
+    intersects: function(other) {
+      // http://stackoverflow.com/questions/325933
+      return (other.start < this.end) && (other.end > this.start);
+    },
+
+    isNear: function(event, threshold) {
+      return (this.end + threshold) > event.start;
+    },
+
+    // Return a string describing this ProtoExpectation for debugging.
+    debug: function() {
+      var debugString = this.irType + '(';
+      debugString += parseInt(this.start) + ' ';
+      debugString += parseInt(this.end);
+      this.associatedEvents.forEach(function(event) {
+        debugString += ' ' + event.typeName;
+      });
+      return debugString + ')';
+    }
+  };
+
+  return {
+    ProtoExpectation: ProtoExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/importer/user_expectation_verifier.html b/catapult/tracing/tracing/importer/user_expectation_verifier.html
new file mode 100644
index 0000000..bbc3fd8
--- /dev/null
+++ b/catapult/tracing/tracing/importer/user_expectation_verifier.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+<link rel="import" href="/tracing/importer/user_model_builder.html">
+
+<script>
+'use strict';
+tr.exportTo('tr.importer', function() {
+  function compareEvents(x, y) {
+    if (x.start !== y.start)
+      return x.start - y.start;
+    return x.guid - y.guid;
+  }
+
+  function UserExpectationVerifier() {
+    this.customizeModelCallback_ = undefined;
+    this.expectedIRs_ = undefined;
+  }
+
+  UserExpectationVerifier.prototype = {
+    set customizeModelCallback(cmc) {
+      this.customizeModelCallback_ = cmc;
+    },
+
+    // |irs| must be sorted by start time.
+    set expectedIRs(irs) {
+      this.expectedIRs_ = irs;
+    },
+
+    verify: function() {
+      var model = tr.e.chrome.ChromeTestUtils.newChromeModel(
+          this.customizeModelCallback_);
+      var actualUEs = model.userModel.expectations;
+
+      var failure = undefined;
+      try {
+        assert.equal(this.expectedIRs_.length, actualUEs.length);
+        for (var i = 0; i < this.expectedIRs_.length; ++i) {
+          var at = 'IRs[' + i + '].';
+          assert.equal(this.expectedIRs_[i].title, actualUEs[i].title,
+                      at + 'title');
+          if (this.expectedIRs_[i].name !== undefined) {
+            assert.equal(this.expectedIRs_[i].name, actualUEs[i].name,
+                        at + 'name');
+          }
+          assert.equal(this.expectedIRs_[i].start, actualUEs[i].start,
+                      at + 'start');
+          assert.equal(this.expectedIRs_[i].end, actualUEs[i].end, at + 'end');
+          assert.equal(this.expectedIRs_[i].eventCount,
+                      actualUEs[i].associatedEvents.length, at + 'eventCount');
+          if (actualUEs[i] instanceof tr.model.um.ResponseExpectation)
+            assert.equal(this.expectedIRs_[i].isAnimationBegin || false,
+                         actualUEs[i].isAnimationBegin,
+                         at + 'isAnimationBegin');
+        }
+      } catch (caught) {
+        failure = caught;
+      }
+
+      var debug = !tr.isHeadless && (
+          location.search.split('&').indexOf('debug') >= 0);
+      if (!failure && !debug)
+        return;
+
+      if (failure)
+        throw failure;
+    }
+  };
+
+  return {UserExpectationVerifier: UserExpectationVerifier};
+});
+</script>
diff --git a/catapult/tracing/tracing/importer/user_model_builder.html b/catapult/tracing/tracing/importer/user_model_builder.html
new file mode 100644
index 0000000..6159b7d
--- /dev/null
+++ b/catapult/tracing/tracing/importer/user_model_builder.html
@@ -0,0 +1,227 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/core/auditor.html">
+<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
+<link rel="import" href="/tracing/importer/find_input_expectations.html">
+<link rel="import" href="/tracing/importer/find_load_expectations.html">
+<link rel="import" href="/tracing/model/event_info.html">
+<link rel="import" href="/tracing/model/ir_coverage.html">
+<link rel="import" href="/tracing/model/user_model/idle_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.importer', function() {
+  var INSIGNIFICANT_MS = 1;
+
+  function UserModelBuilder(model) {
+    this.model = model;
+    this.modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+  };
+
+  UserModelBuilder.supportsModelHelper = function(modelHelper) {
+    return modelHelper.browserHelper !== undefined;
+  };
+
+  UserModelBuilder.prototype = {
+    buildUserModel: function() {
+      if (!this.modelHelper || !this.modelHelper.browserHelper)
+        return;
+
+      var expectations = undefined;
+      try {
+        expectations = this.findUserExpectations();
+        // There are not currently any known cases when this could throw.
+      } catch (error) {
+        this.model.importWarning({
+          type: 'UserModelBuilder',
+          message: error,
+          showToUser: true
+        });
+        return;
+      }
+      expectations.forEach(function(expectation) {
+        this.model.userModel.expectations.push(expectation);
+      }, this);
+
+      // TODO(benjhayden) Find Gestures here.
+    },
+
+    findUserExpectations: function() {
+      var expectations = [];
+      expectations.push.apply(expectations, tr.importer.findLoadExpectations(
+          this.modelHelper));
+      expectations.push.apply(expectations, tr.importer.findInputExpectations(
+          this.modelHelper));
+      // findIdleExpectations must be called last!
+      expectations.push.apply(
+          expectations, this.findIdleExpectations(expectations));
+      this.collectUnassociatedEvents_(expectations);
+      return expectations;
+    },
+
+    // Find all unassociated top-level ThreadSlices. If they start during an
+    // Idle or Load IR, then add their entire hierarchy to that IR.
+    collectUnassociatedEvents_: function(rirs) {
+      var vacuumIRs = [];
+      rirs.forEach(function(ir) {
+        if (ir instanceof tr.model.um.LoadExpectation ||
+            ir instanceof tr.model.um.IdleExpectation)
+          vacuumIRs.push(ir);
+      });
+      if (vacuumIRs.length === 0)
+        return;
+
+      var allAssociatedEvents = tr.model.getAssociatedEvents(rirs);
+      var unassociatedEvents = tr.model.getUnassociatedEvents(
+          this.model, allAssociatedEvents);
+
+      unassociatedEvents.forEach(function(event) {
+        if (!(event instanceof tr.model.ThreadSlice))
+          return;
+
+        if (!event.isTopLevel)
+          return;
+
+        for (var iri = 0; iri < vacuumIRs.length; ++iri) {
+          var ir = vacuumIRs[iri];
+
+          if ((event.start >= ir.start) &&
+              (event.start < ir.end)) {
+            ir.associatedEvents.addEventSet(event.entireHierarchy);
+            return;
+          }
+        }
+      });
+    },
+
+    // Fill in the empty space between IRs with IdleIRs.
+    findIdleExpectations: function(otherIRs) {
+      if (this.model.bounds.isEmpty)
+        return;
+      var emptyRanges = tr.b.findEmptyRangesBetweenRanges(
+          tr.b.convertEventsToRanges(otherIRs),
+          this.model.bounds);
+      var irs = [];
+      var model = this.model;
+      emptyRanges.forEach(function(range) {
+        // Ignore insignificantly tiny idle ranges.
+        if (range.max < (range.min + INSIGNIFICANT_MS))
+          return;
+        irs.push(new tr.model.um.IdleExpectation(
+            model, range.min, range.max - range.min));
+      });
+      return irs;
+    }
+  };
+
+  function createCustomizeModelLinesFromModel(model) {
+    var modelLines = [];
+    modelLines.push('      audits.addEvent(model.browserMain,');
+    modelLines.push('          {title: \'model start\', start: 0, end: 1});');
+
+    var typeNames = {};
+    for (var typeName in tr.e.cc.INPUT_EVENT_TYPE_NAMES) {
+      typeNames[tr.e.cc.INPUT_EVENT_TYPE_NAMES[typeName]] = typeName;
+    }
+
+    var modelEvents = new tr.model.EventSet();
+    model.userModel.expectations.forEach(function(ir, index) {
+      modelEvents.addEventSet(ir.sourceEvents);
+    });
+    modelEvents = modelEvents.toArray();
+    modelEvents.sort(tr.importer.compareEvents);
+
+    modelEvents.forEach(function(event) {
+      var startAndEnd = 'start: ' + parseInt(event.start) + ', ' +
+                        'end: ' + parseInt(event.end) + '});';
+      if (event instanceof tr.e.cc.InputLatencyAsyncSlice) {
+        modelLines.push('      audits.addInputEvent(model, INPUT_TYPE.' +
+                        typeNames[event.typeName] + ',');
+      } else if (event.title === 'RenderFrameImpl::didCommitProvisionalLoad') {
+        modelLines.push('      audits.addCommitLoadEvent(model,');
+      } else if (event.title ===
+                 'InputHandlerProxy::HandleGestureFling::started') {
+        modelLines.push('      audits.addFlingAnimationEvent(model,');
+      } else if (event.title === tr.model.helpers.IMPL_RENDERING_STATS) {
+        modelLines.push('      audits.addFrameEvent(model,');
+      } else if (event.title === tr.importer.CSS_ANIMATION_TITLE) {
+        modelLines.push('      audits.addEvent(model.rendererMain, {');
+        modelLines.push('        title: \'Animation\', ' + startAndEnd);
+        return;
+      } else {
+        throw ('You must extend createCustomizeModelLinesFromModel()' +
+               'to support this event:\n' + event.title + '\n');
+      }
+      modelLines.push('          {' + startAndEnd);
+    });
+
+    modelLines.push('      audits.addEvent(model.browserMain,');
+    modelLines.push('          {' +
+                    'title: \'model end\', ' +
+                    'start: ' + (parseInt(model.bounds.max) - 1) + ', ' +
+                    'end: ' + parseInt(model.bounds.max) + '});');
+    return modelLines;
+  }
+
+  function createExpectedIRLinesFromModel(model) {
+    var expectedLines = [];
+    var irCount = model.userModel.expectations.length;
+    model.userModel.expectations.forEach(function(ir, index) {
+      var irString = '      {';
+      irString += 'title: \'' + ir.title + '\', ';
+      irString += 'start: ' + parseInt(ir.start) + ', ';
+      irString += 'end: ' + parseInt(ir.end) + ', ';
+      irString += 'eventCount: ' + ir.sourceEvents.length;
+      irString += '}';
+      if (index < (irCount - 1))
+        irString += ',';
+      expectedLines.push(irString);
+    });
+    return expectedLines;
+  }
+
+  function createIRFinderTestCaseStringFromModel(model) {
+    var filename = window.location.hash.substr(1);
+    var testName = filename.substr(filename.lastIndexOf('/') + 1);
+    testName = testName.substr(0, testName.indexOf('.'));
+
+    // createCustomizeModelLinesFromModel() throws an error if there's an
+    // unsupported event.
+    try {
+      var testLines = [];
+      testLines.push('  /*');
+      testLines.push('    This test was generated from');
+      testLines.push('    ' + filename + '');
+      testLines.push('   */');
+      testLines.push('  test(\'' + testName + '\', function() {');
+      testLines.push('    var verifier = new UserExpectationVerifier();');
+      testLines.push('    verifier.customizeModelCallback = function(model) {');
+      testLines.push.apply(testLines,
+          createCustomizeModelLinesFromModel(model));
+      testLines.push('    };');
+      testLines.push('    verifier.expectedIRs = [');
+      testLines.push.apply(testLines, createExpectedIRLinesFromModel(model));
+      testLines.push('    ];');
+      testLines.push('    verifier.verify();');
+      testLines.push('  });');
+      return testLines.join('\n');
+    } catch (error) {
+      return error;
+    }
+  }
+
+  return {
+    UserModelBuilder: UserModelBuilder,
+    createIRFinderTestCaseStringFromModel: createIRFinderTestCaseStringFromModel
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/importer/user_model_builder_test.html b/catapult/tracing/tracing/importer/user_model_builder_test.html
new file mode 100644
index 0000000..765d718
--- /dev/null
+++ b/catapult/tracing/tracing/importer/user_model_builder_test.html
@@ -0,0 +1,821 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+<link rel="import" href="/tracing/importer/user_expectation_verifier.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
+  var chrome_test_utils = tr.e.chrome.ChromeTestUtils;
+  var UserExpectationVerifier = tr.importer.UserExpectationVerifier;
+
+  function addFrameEventForInput(model, event) {
+    var frame = chrome_test_utils.addFrameEvent(model,
+        {start: event.start, end: event.end, isTopLevel: true});
+    model.flowEvents.push(tr.c.TestUtils.newFlowEventEx({
+      id: event.id,
+      start: event.start,
+      end: event.end,
+      startSlice: frame,
+      endSlice: frame
+    }));
+  }
+
+  test('empty', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+    };
+    verifier.expectedIRs = [
+    ];
+    verifier.verify();
+  });
+
+  test('slowMouseMoveResponses', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 10});
+      var mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 10, end: 20, id: '0x100'});
+      addFrameEventForInput(model, mouseMove);
+
+      mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 70, end: 80, id: '0x101'});
+      addFrameEventForInput(model, mouseMove);
+
+      mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 130, end: 140, id: '0x102'});
+      addFrameEventForInput(model, mouseMove);
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 10, eventCount: 0},
+      {title: 'Mouse Response', start: 10, end: 20, eventCount: 4},
+      {title: 'Idle', start: 20, end: 70, eventCount: 0},
+      {title: 'Mouse Response', start: 70, end: 80, eventCount: 3},
+      {title: 'Idle', start: 80, end: 130, eventCount: 0},
+      {title: 'Mouse Response', start: 130, end: 140, eventCount: 3}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseEventResponses', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      var mouseDown = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 50, id: '0x100'});
+      addFrameEventForInput(model, mouseDown);
+
+      var mouseUp = chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_UP,
+          {start: 50, end: 100, id: '0x101'});
+      addFrameEventForInput(model, mouseUp);
+
+      var mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 200, end: 250, id: '0x102'});
+      addFrameEventForInput(model, mouseMove);
+    };
+    verifier.expectedIRs = [
+      {title: 'Mouse Response', start: 0, end: 50, eventCount: 3},
+      {title: 'Mouse Response', start: 50, end: 100, eventCount: 3},
+      {title: 'Idle', start: 100, end: 200, eventCount: 0},
+      {title: 'Mouse Response', start: 200, end: 250, eventCount: 3}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseEventsIgnored', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_MOVE,
+          {start: 0, end: 50});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_DOWN,
+          {start: 50, end: 100});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 100, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('unassociatedEvents', function() {
+    // Unassociated ThreadSlices that start during an Idle should be associated
+    // with it. Expect the Idle IR to have 2 associated events: both of the
+    // ThreadSlices in the model.
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      var start = tr.c.TestUtils.newSliceEx(
+          {title: 'model start', start: 0, end: 1, type: tr.model.ThreadSlice});
+      start.isTopLevel = true;
+      model.browserMain.sliceGroup.pushSlice(start);
+
+      var end = tr.c.TestUtils.newSliceEx(
+          {title: 'model end', start: 9, end: 10, type: tr.model.ThreadSlice});
+      end.isTopLevel = true;
+      model.browserMain.sliceGroup.pushSlice(end);
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 10, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('stillLoading', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+      chrome_test_utils.addNavigationStartEvent(model, {start: 10});
+      chrome_test_utils.addFrameEvent(model, {start: 19, end: 20});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 10, eventCount: 0},
+      {title: 'Successful Load', start: 10, end: 20, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('overlappingIdleAndLoadCollectUnassociatedEvents', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+      chrome_test_utils.addNavigationStartEvent(model, {start: 10});
+      chrome_test_utils.addFirstContentfulPaintEvent(model, {start: 30});
+      chrome_test_utils.addFrameEvent(model, {start: 35, end: 40});
+      // 3 Idle events.
+      chrome_test_utils.addRenderingEvent(model, {start: 5, end: 15});
+      chrome_test_utils.addRenderingEvent(model, {start: 11, end: 15});
+      chrome_test_utils.addRenderingEvent(model, {start: 13, end: 15});
+      // 1 Idle event.
+      chrome_test_utils.addRenderingEvent(model, {start: 35, end: 36});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 10, eventCount: 3},
+      {title: 'Successful Load', start: 10, end: 30, eventCount: 0},
+      {title: 'Idle', start: 30, end: 40, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('flingFlingFling', function() {
+    // This trace gave me so many different kinds of trouble that I'm just going
+    // to copy it straight in here, without trying to clarify it at all.
+    // measurmt-traces/mobile/cnet_fling_up_fling_down_motox_2013.json
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 919, end: 998});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
+          {start: 919, end: 1001});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 919, end: 1001});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
+          {start: 974, end: 1020});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 974, end: 1020});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 974, end: 1040});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 974, end: 1054});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 990, end: 1021});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 990, end: 1052});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1006, end: 1021});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1022, end: 1036});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1022, end: 1052});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1038, end: 1049});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1038, end: 1068});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 1046, end: 1050});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 1046, end: 1077});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 1432, end: 2238});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
+          {start: 1432, end: 2241});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1516, end: 2605});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 1532, end: 2274});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1532, end: 2294});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1549, end: 2310});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 1627, end: 2275});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 1627, end: 2310});
+      chrome_test_utils.addFrameEvent(model, {start: 2990, end: 3000});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 919, eventCount: 0},
+      {title: 'Scroll,Tap,Touch Response', start: 919, end: 1054,
+          eventCount: 6, isAnimationBegin: true},
+      {title: 'Scroll,Touch Animation', start: 1054, end: 1068,
+          eventCount: 8},
+      {title: 'Fling Animation', start: 1054, end: 1432,
+          eventCount: 2},
+      {title: 'Scroll,Touch Response', start: 1432, end: 2605,
+          eventCount: 5, isAnimationBegin: true},
+      {title: 'Scroll Animation', start: 1549, end: 2310,
+          eventCount: 1},
+      {title: 'Fling Animation', start: 2605, end: 3000,
+          eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('keyboardEvents', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.KEY_DOWN_RAW,
+          {start: 0, end: 45});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CHAR,
+          {start: 10, end: 50});
+    };
+    verifier.expectedIRs = [
+      {title: 'Keyboard Response', start: 0, end: 50, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseResponses', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CLICK,
+          {start: 0, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.CONTEXT_MENU,
+          {start: 200, end: 300});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 400, end: 500});
+    };
+    verifier.expectedIRs = [
+      {title: 'Mouse Response', start: 0, end: 100, eventCount: 1},
+      {title: 'Idle', start: 100, end: 200, eventCount: 0},
+      {title: 'Mouse Response', start: 200, end: 300, eventCount: 1},
+      {title: 'Idle', start: 300, end: 400, eventCount: 0},
+      {title: 'MouseWheel Response', start: 400, end: 500,
+          eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseWheelAnimation', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 0, end: 20});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 16, end: 36});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 55, end: 75});
+
+      // This threshold uses both events' start times, not end...start.
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 100, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 141, end: 191});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_WHEEL,
+          {start: 182, end: 200});
+    };
+    verifier.expectedIRs = [
+      {title: 'MouseWheel Response', start: 0, end: 20, eventCount: 1},
+      {title: 'MouseWheel Animation', start: 20, end: 75,
+          eventCount: 2},
+      {title: 'Idle', start: 75, end: 100, eventCount: 0},
+      {title: 'MouseWheel Response', start: 100, end: 150,
+          eventCount: 1},
+      {title: 'MouseWheel Response', start: 141, end: 191,
+          eventCount: 1},
+      {title: 'MouseWheel Response', start: 182, end: 200,
+          eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseDownUpResponse', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_DOWN,
+          {start: 0, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_UP,
+          {start: 200, end: 210});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 200, eventCount: 0},
+      {title: 'Mouse Response', start: 200, end: 210, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('ignoreLoneMouseMoves', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.MOUSE_MOVE,
+          {start: 0, end: 100});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 100, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('mouseDrags', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_DOWN, {start: 0, end: 100});
+      var mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 200, end: 215});
+      addFrameEventForInput(model, mouseMove);
+      mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 210, end: 220});
+      addFrameEventForInput(model, mouseMove);
+      mouseMove = chrome_test_utils.addInputEvent(
+          model, INPUT_TYPE.MOUSE_MOVE, {start: 221, end: 240});
+      addFrameEventForInput(model, mouseMove);
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 200, eventCount: 0},
+      {title: 'Mouse Response', start: 200, end: 215, eventCount: 4},
+      {title: 'Mouse Animation', start: 215, end: 240, eventCount: 6}
+    ];
+    verifier.verify();
+  });
+
+  test('twoScrollsNoFling', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 0, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 20, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 40, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 60, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 70, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_END,
+          {start: 80, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 300, end: 400});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 320, end: 400});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 330, end: 450});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 340, end: 450});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 350, end: 500});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_END,
+          {start: 360, end: 500});
+    };
+    verifier.expectedIRs = [
+      {title: 'Scroll Response', start: 0, end: 100, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Scroll Animation', start: 100, end: 150, eventCount: 4},
+      {title: 'Idle', start: 150, end: 300, eventCount: 0},
+      {title: 'Scroll Response', start: 300, end: 400, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Scroll Animation', start: 400, end: 500, eventCount: 4}
+    ];
+    verifier.verify();
+  });
+
+  test('cssAnimations', function() {
+    // CSS Animations happen on the renderer process, not the browser process.
+    // They are merged if they overlap.
+    // They are merged with other kinds of animations.
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addEvent(model.rendererMain, {
+        title: 'Animation', start: 0, end: 100});
+      chrome_test_utils.addEvent(model.rendererMain, {
+        title: 'Animation', start: 99, end: 200});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 150, end: 180});
+      chrome_test_utils.addFrameEvent(model, {start: 290, end: 300});
+    };
+    verifier.expectedIRs = [
+      {title: 'CSS Animation', start: 0, end: 200, eventCount: 2},
+      {title: 'Fling Animation', start: 150, end: 300, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('flingThatIsntstopped', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 32, end: 100});
+      chrome_test_utils.addFlingAnimationEvent(model, {start: 38, end: 200});
+      chrome_test_utils.addFrameEvent(model, {start: 290, end: 300});
+    };
+    verifier.expectedIRs = [
+      {title: 'Fling Animation', start: 32, end: 200, eventCount: 2},
+      {title: 'Idle', start: 200, end: 300, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('flingThatIsStopped', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 32, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
+          {start: 105, end: 150});
+    };
+    verifier.expectedIRs = [
+      {title: 'Fling Animation', start: 32, end: 105, eventCount: 2},
+      {title: 'Idle', start: 105, end: 150, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('flingFling', function() {
+    // measurmt-traces/mobile/facebook_obama_scroll_dialog_box.html
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 0, end: 30});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 100, end: 130});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
+          {start: 100, end: 130});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 110, end: 140});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 170, end: 180});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 200, end: 210});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 200, end: 220});
+      chrome_test_utils.addFrameEvent(model, {start: 230, end: 240});
+    };
+    verifier.expectedIRs = [
+      {title: 'Fling Animation', start: 0, end: 100, eventCount: 2},
+      {title: 'Touch Response', start: 100, end: 140, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Touch Animation', start: 140, end: 210, eventCount: 2},
+      {title: 'Fling Animation', start: 200, end: 240, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('load', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addNavigationStartEvent(model, {start: 0});
+      chrome_test_utils.addFirstContentfulPaintEvent(model, {start: 20});
+    };
+    verifier.expectedIRs = [
+      {title: 'Successful Load', start: 0, end: 20, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('loadStartup', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addRenderingEvent(model, {start: 2, end: 3});
+      chrome_test_utils.addCreateThreadsEvent(model, {start: 5, end: 10});
+      // Throw an second one in there, just to try to confuse the algo.
+      chrome_test_utils.addCreateThreadsEvent(model, {start: 25, end: 30});
+      chrome_test_utils.addFrameEvent(model, {start: 11, end: 20});
+    };
+    verifier.expectedIRs = [
+      {title: 'Startup Load', start: 2, end: 20, eventCount: 2},
+      {title: 'Idle', start: 20, end: 30, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('totalIdle', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 10, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('multipleIdles', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 1});
+      chrome_test_utils.addNavigationStartEvent(model, {start: 1});
+      chrome_test_utils.addFirstContentfulPaintEvent(model, {start: 4});
+      chrome_test_utils.addFrameEvent(model, {start: 12, end: 13});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 1, eventCount: 0},
+      {title: 'Successful Load', start: 1, end: 4, eventCount: 0},
+      {title: 'Idle', start: 4, end: 13, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('touchStartTouchEndTap', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 200, end: 210});
+    };
+    verifier.expectedIRs = [
+      {title: 'Touch Response', start: 0, end: 210, eventCount: 2,
+          isAnimationBegin: true}
+    ];
+    verifier.verify();
+  });
+
+  test('touchMoveResponseAnimation', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 50, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 70, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 200, end: 300});
+    };
+    verifier.expectedIRs = [
+      {title: 'Touch Response', start: 0, end: 100, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Touch Animation', start: 100, end: 300, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('tapEvents', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
+          {start: 0, end: 50});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 300, end: 310});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
+          {start: 320, end: 350});
+    };
+    verifier.expectedIRs = [
+      {title: 'Tap Response', start: 0, end: 50, eventCount: 1},
+      {title: 'Idle', start: 50, end: 300, eventCount: 0},
+      {title: 'Tap Response', start: 300, end: 350, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('tapAndTapCancelResponses', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 0, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
+          {start: 300, end: 350});
+    };
+    verifier.expectedIRs = [
+      {title: 'Tap Response', start: 0, end: 100, eventCount: 1},
+      {title: 'Idle', start: 100, end: 300, eventCount: 0},
+      {title: 'Tap Response', start: 300, end: 350, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('tapCancelResponse', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 0, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
+          {start: 150, end: 200});
+    };
+    verifier.expectedIRs = [
+      {title: 'Tap Response', start: 0, end: 200, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('pinchResponseAnimation', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_BEGIN,
+          {start: 100, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 130, end: 160});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 140, end: 200});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 150, end: 205});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 210, end: 220});
+      // pause > 200ms
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 421, end: 470});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_END,
+          {start: 460, end: 500});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 100, eventCount: 0},
+      {title: 'Pinch Response', start: 100, end: 160, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Pinch Animation', start: 160, end: 220, eventCount: 3},
+      {title: 'Idle', start: 220, end: 421, eventCount: 0},
+      {title: 'Pinch Animation', start: 421, end: 500, eventCount: 2}
+    ];
+    verifier.verify();
+  });
+
+  test('tapThenScroll', function() {
+    // measurmt-traces/mobile/google_io_instrument_strumming.json
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 0, end: 20});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 40, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 50, end: 120});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 80, end: 150});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 180, end: 200});
+    };
+    verifier.expectedIRs = [
+      {title: 'Touch Response', start: 0, end: 100, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Touch Response', start: 50, end: 150, eventCount: 2,
+          isAnimationBegin: true},
+      {title: 'Touch Animation', start: 150, end: 200, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('pinchFlingTapTouchEventsOverlap', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addFrameEvent(model, {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 20, end: 50});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 20, end: 30});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_CANCEL,
+          {start: 20, end: 50});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 60, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 60, end: 110});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_BEGIN,
+          {start: 60, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
+          {start: 65, end: 75});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 70, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.PINCH_UPDATE,
+          {start: 70, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 75, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 80, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 85, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 75, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 150, end: 200});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 150, end: 200});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 180, end: 210});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 190, end: 210});
+      chrome_test_utils.addFrameEvent(model, {start: 215, end: 220});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 20, eventCount: 0},
+      {title: 'Pinch,Scroll,Tap,Touch Response', start: 20, end: 110,
+          eventCount: 9, isAnimationBegin: true},
+      {title: 'Scroll,Touch Animation', start: 110, end: 210,
+          eventCount: 6},
+      {title: 'Fling Animation', start: 180, end: 220, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  test('scrollThenFling', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 0, end: 40});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 50, end: 100});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 80, end: 100});
+      chrome_test_utils.addFrameEvent(model, {start: 190, end: 200});
+    };
+    verifier.expectedIRs = [
+      {title: 'Scroll Animation', start: 0, end: 100, eventCount: 2},
+      {title: 'Fling Animation', start: 80, end: 200, eventCount: 1}
+    ];
+    verifier.verify();
+  });
+
+  /*
+    This test was generated from
+    /test_data/measurmt-traces/mobile/fling_HN_to_rest.json
+   */
+  test('flingHNToRest', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addEvent(model.browserMain,
+          {title: 'model start', start: 0, end: 1});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_START,
+          {start: 1274, end: 1297});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_DOWN,
+          {start: 1274, end: 1305});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1343, end: 1350});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1359, end: 1366});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP_CANCEL,
+          {start: 1359, end: 1366});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_BEGIN,
+          {start: 1359, end: 1367});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1359, end: 1387});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1375, end: 1385});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1375, end: 1416});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1389, end: 1404});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1389, end: 1429});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1405, end: 1418});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1405, end: 1449});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 1419, end: 1432});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.SCROLL_UPDATE,
+          {start: 1419, end: 1474});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_END,
+          {start: 1427, end: 1435});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.FLING_START,
+          {start: 1427, end: 1474});
+      chrome_test_utils.addFlingAnimationEvent(model, {start: 1440, end: 2300});
+      chrome_test_utils.addEvent(model.browserMain,
+          {title: 'model end', start: 3184, end: 3185});
+    };
+    verifier.expectedIRs = [
+      {title: 'Idle', start: 0, end: 1274, eventCount: 0},
+      {title: 'Scroll,Tap,Touch Response', start: 1274, end: 1387,
+          eventCount: 6, isAnimationBegin: true},
+      {title: 'Scroll,Touch Animation', start: 1387, end: 1474,
+          eventCount: 10},
+      {title: 'Fling Animation', start: 1427, end: 2300,
+          eventCount: 2},
+      {title: 'Idle', start: 2300, end: 3185, eventCount: 0}
+    ];
+    verifier.verify();
+  });
+
+  test('TapResponseOverlappingTouchAnimation', function() {
+    var verifier = new UserExpectationVerifier();
+    verifier.customizeModelCallback = function(model) {
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 0, end: 10});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 5, end: 15});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TOUCH_MOVE,
+          {start: 10, end: 20});
+      chrome_test_utils.addInputEvent(model, INPUT_TYPE.TAP,
+          {start: 15, end: 100});
+    };
+    verifier.expectedIRs = [
+      {title: 'Tap,Touch Response', start: 0, end: 100,
+          eventCount: 4}
+    ];
+    verifier.verify();
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/index.js b/catapult/tracing/tracing/index.js
new file mode 100644
index 0000000..e4f346a
--- /dev/null
+++ b/catapult/tracing/tracing/index.js
@@ -0,0 +1,23 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+var fs = require('fs');
+var path = require('path');
+
+var catapultPath = fs.realpathSync(path.join(__dirname, '..', '..'));
+var catapultBuildPath = path.join(catapultPath, 'catapult_build');
+
+var node_bootstrap = require(path.join(catapultBuildPath, 'node_bootstrap.js'));
+
+HTMLImportsLoader.addArrayToSourcePath(
+    node_bootstrap.getSourcePathsForProject('tracing'));
+
+// Go!
+HTMLImportsLoader.loadHTML('/tracing/importer/import.html');
+HTMLImportsLoader.loadHTML('/tracing/model/model.html');
+HTMLImportsLoader.loadHTML('/tracing/extras/full_config.html');
+
+// Make the tracing namespace the main tracing export.
+module.exports = global.tr;
diff --git a/catapult/tracing/tracing/metrics/__init__.py b/catapult/tracing/tracing/metrics/__init__.py
new file mode 100644
index 0000000..cffcee6
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+
+_CATAPULT_DIR = os.path.abspath(os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
+
+_PI_PATH = os.path.join(_CATAPULT_DIR, 'perf_insights')
+
+if _PI_PATH not in sys.path:
+  sys.path.insert(1, _PI_PATH)
diff --git a/catapult/tracing/tracing/metrics/all_metrics.html b/catapult/tracing/tracing/metrics/all_metrics.html
new file mode 100644
index 0000000..9a0824b
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/all_metrics.html
@@ -0,0 +1,14 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/metrics/sample_metric.html">
+<link rel="import" href="/tracing/metrics/system_health/efficiency_metric.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/responsiveness_metric.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/system_health_metrics.html">
+<link rel="import" href="/tracing/metrics/tracing_metric.html">
diff --git a/catapult/tracing/tracing/metrics/metric_map_function.html b/catapult/tracing/tracing/metrics/metric_map_function.html
new file mode 100644
index 0000000..89fecf0
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/metric_map_function.html
@@ -0,0 +1,37 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/perf_insights/function_handle.html">
+<link rel="import" href="/tracing/metrics/all_metrics.html">
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import" href="/tracing/metrics/value_list.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics', function() {
+  function metricMapFunction(result, model, options) {
+    if (options === undefined)
+      throw new Error('Expected an options dict.');
+    var metricName = options.metric;
+    if (metricName === undefined)
+      throw new Error('A metric name should be specified.');
+    var valueList = new tr.metrics.ValueList();
+    var metric = tr.metrics.MetricRegistry.findTypeInfoWithName(metricName);
+    if (metric === undefined)
+      throw new Error('"' + metricName + '" is not a registered metric.');
+    metric.constructor(valueList, model);
+
+    result.addPair('values', valueList.valueDicts);
+  }
+
+  pi.FunctionRegistry.register(metricMapFunction);
+
+  return {
+    metricMapFunction: metricMapFunction
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/metric_map_function_test.html b/catapult/tracing/tracing/metrics/metric_map_function_test.html
new file mode 100644
index 0000000..b08abe4
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/metric_map_function_test.html
@@ -0,0 +1,63 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/perf_insights/mre/mre_result.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/metrics/metric_map_function.html">
+<link rel="import" href="/tracing/metrics/sample_metric.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var test_utils = tr.c.TestUtils;
+  var ThreadSlice = tr.model.ThreadSlice;
+
+  test('metricMapTest', function() {
+    var events = [
+      {name: 'a', args: {}, pid: 52, ts: 524, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'a', args: {}, pid: 52, ts: 560, cat: 'foo', tid: 53, ph: 'E'}
+    ];
+    var m = test_utils.newModelWithEvents(JSON.stringify(events), {
+      shiftWorldToZero: false,
+      pruneEmptyContainers: false,
+      trackDetailedModelStats: true,
+      customizeModelCallback: function(m) {
+        var p1 = m.getOrCreateProcess(1);
+        var t2 = p1.getOrCreateThread(2);
+        var t2_s1 = t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+          type: ThreadSlice,
+          name: 'some_slice',
+          start: 0, end: 10
+        }));
+        var t2_s2 = t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+          type: ThreadSlice,
+          name: 'some_slice',
+          start: 20, end: 30
+        }));
+      }
+    });
+
+    m.canonicalUrlThatCreatedThisTrace = '/foo.json';
+
+    assert.throw(function() {
+      var result = new pi.mre.MreResult();
+      tr.metrics.metricMapFunction(result, m, {});
+    }, Error, 'A metric name should be specified.');
+
+    assert.throw(function() {
+      var result = new pi.mre.MreResult();
+      tr.metrics.metricMapFunction(result, m, {'metric': 'wrongMetric'});
+    }, Error, '"wrongMetric" is not a registered metric.');
+
+    var result = new pi.mre.MreResult();
+    tr.metrics.metricMapFunction(result, m, {'metric': 'sampleMetric'});
+    assert.property(result.pairs, 'values');
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/metric_registry.html b/catapult/tracing/tracing/metrics/metric_registry.html
new file mode 100644
index 0000000..be25bcb
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/metric_registry.html
@@ -0,0 +1,27 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/extension_registry.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics', function() {
+
+  function MetricRegistry() {}
+
+  var options = new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);
+  options.defaultMetadata = {};
+  options.mandatoryBaseClass = Function;
+  tr.b.decorateExtensionRegistry(MetricRegistry, options);
+
+  return {
+    MetricRegistry: MetricRegistry
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/metric_registry_test.html b/catapult/tracing/tracing/metrics/metric_registry_test.html
new file mode 100644
index 0000000..22549e7
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/metric_registry_test.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/value.html">
+
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var test_utils = tr.c.TestUtils;
+  var ThreadSlice = tr.model.ThreadSlice;
+
+  test('FindMetricByName', function() {
+    function sampleMetricA(valueList, model) {
+      var unit = tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+      var n1 = new tr.v.ScalarNumeric(unit, 1);
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'foo', n1));
+    }
+    sampleMetricA.prototype = {
+      __proto__: Function.prototype
+    };
+    tr.metrics.MetricRegistry.register(sampleMetricA);
+
+    function sampleMetricB(valueList, model) {
+      var unit = tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+      var n1 = new tr.v.ScalarNumeric(unit, 1);
+      var n2 = new tr.v.ScalarNumeric(unit, 2);
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'foo', n1));
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'bar', n2));
+    }
+    sampleMetricB.prototype = {
+      __proto__: Function.prototype
+    };
+    tr.metrics.MetricRegistry.register(sampleMetricB);
+
+    function sampleMetricC(valueList, model) {
+      var unit = tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+      var n1 = new tr.v.ScalarNumeric(unit, 1);
+      var n2 = new tr.v.ScalarNumeric(unit, 2);
+      var n3 = new tr.v.ScalarNumeric(unit, 3);
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'foo', n1));
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'bar', n2));
+      valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'baz', n3));
+    }
+    sampleMetricC.prototype = {
+      __proto__: Function.prototype
+    };
+    tr.metrics.MetricRegistry.register(sampleMetricC);
+
+    assert.isTrue(tr.metrics.MetricRegistry.findTypeInfoWithName('sampleMetricB').constructor === sampleMetricB); // @suppress longLineCheck
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/metric_runner.py b/catapult/tracing/tracing/metrics/metric_runner.py
new file mode 100644
index 0000000..32ad121
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/metric_runner.py
@@ -0,0 +1,34 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+
+from perf_insights import map_single_trace
+from perf_insights import function_handle
+from perf_insights.mre import file_handle
+from perf_insights.mre import job as job_module
+
+_METRIC_MAP_FUNCTION_FILENAME = 'metric_map_function.html'
+
+_METRIC_MAP_FUNCTION_NAME = 'metricMapFunction'
+
+def _GetMetricsDir():
+  return os.path.dirname(os.path.abspath(__file__))
+
+def _GetMetricRunnerHandle(metric):
+  assert isinstance(metric, basestring)
+  metrics_dir = _GetMetricsDir()
+  metric_mapper_path = os.path.join(metrics_dir, _METRIC_MAP_FUNCTION_FILENAME)
+
+  modules_to_load = [function_handle.ModuleToLoad(filename=metric_mapper_path)]
+  map_function_handle = function_handle.FunctionHandle(
+      modules_to_load, _METRIC_MAP_FUNCTION_NAME, {'metric': metric})
+
+  return job_module.Job(map_function_handle, None)
+
+def RunMetric(filename, metric, extra_import_options=None):
+  th = file_handle.URLFileHandle(filename, 'file://' + filename)
+  result = map_single_trace.MapSingleTrace(
+      th, _GetMetricRunnerHandle(metric), extra_import_options)
+
+  return result
diff --git a/catapult/tracing/tracing/metrics/sample_metric.html b/catapult/tracing/tracing/metrics/sample_metric.html
new file mode 100644
index 0000000..1925106
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/sample_metric.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics', function() {
+
+  function sampleMetric(valueList, model) {
+    var unit = tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+    var n1 = new tr.v.ScalarNumeric(unit, 1);
+    var n2 = new tr.v.ScalarNumeric(unit, 2);
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'foo', n1));
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'bar', n2));
+  }
+
+  sampleMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(sampleMetric);
+
+  return {
+    sampleMetric: sampleMetric
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/animation_smoothness_metric.html b/catapult/tracing/tracing/metrics/system_health/animation_smoothness_metric.html
new file mode 100644
index 0000000..8224861
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/animation_smoothness_metric.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/metrics/system_health/utils.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  // The smoothness score is maximized when frame timestamp discrepancy is
+  // less than or equal to this:
+  var MIN_DISCREPANCY = 0.05;
+
+  // The smoothness score is minimized when frame timestamp discrepancy is
+  // greater than or equal to this:
+  var MAX_DISCREPANCY = 0.3;
+
+  var UNIT = tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;
+
+  var DESCRIPTION = 'Mean Opinion Score for Animation smoothness';
+
+  function AnimationSmoothnessMetric(valueList, model) {
+    model.userModel.expectations.forEach(function(ue) {
+      if (!(ue instanceof tr.model.um.AnimationExpectation))
+        return;
+
+      if (ue.frameEvents === undefined ||
+          ue.frameEvents.length === 0)
+        throw new Error('Animation missing frameEvents ' + ue.stableId);
+
+      var options = {};
+      options.description = DESCRIPTION;
+
+      var groupingKeys = {};
+      groupingKeys.userExpectationStableId = ue.stableId;
+      groupingKeys.userExpectationStageTitle = ue.stageTitle;
+      groupingKeys.userExpectationInitiatorTitle = ue.initiatorTitle;
+
+      var frameTimestamps = ue.frameEvents.toArray().map(function(event) {
+        return event.start;
+      });
+
+      var absolute = false;
+      var discrepancy = tr.b.Statistics.timestampsDiscrepancy(
+          frameTimestamps, absolute);
+      var smoothness = 1 - tr.b.normalize(
+          discrepancy, MIN_DISCREPANCY, MAX_DISCREPANCY);
+      var score = tr.b.clamp(smoothness, 0, 1);
+
+      valueList.addValue(new tr.v.NumericValue(
+          model.canonicalUrlThatCreatedThisTrace, 'smoothness',
+          new tr.v.ScalarNumeric(UNIT, score),
+          options, groupingKeys));
+    });
+  }
+
+  AnimationSmoothnessMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(AnimationSmoothnessMetric);
+
+  return {
+    AnimationSmoothnessMetric: AnimationSmoothnessMetric
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/animation_throughput_metric.html b/catapult/tracing/tracing/metrics/system_health/animation_throughput_metric.html
new file mode 100644
index 0000000..765869b
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/animation_throughput_metric.html
@@ -0,0 +1,68 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/metrics/system_health/utils.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  // The Animation Throughput score is maximized at this value of average
+  // frames-per-second.
+  var MAX_FPS = 60;
+
+  // The Animation Throughput score is minimized at this value of average
+  // frames-per-second.
+  var MIN_FPS = 10;
+
+  var UNIT = tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;
+
+  var DESCRIPTION = 'Mean Opinion Score for Animation throughput';
+
+  function AnimationThroughputMetric(valueList, model) {
+    model.userModel.expectations.forEach(function(ue) {
+      if (!(ue instanceof tr.model.um.AnimationExpectation))
+        return;
+
+      if (ue.frameEvents === undefined ||
+          ue.frameEvents.length === 0)
+        throw new Error('Animation missing frameEvents ' + ue.stableId);
+
+      var options = {};
+      options.description = DESCRIPTION;
+
+      var groupingKeys = {};
+      groupingKeys.userExpectationStableId = ue.stableId;
+      groupingKeys.userExpectationStageTitle = ue.stageTitle;
+      groupingKeys.userExpectationInitiatorTitle = ue.initiatorTitle;
+
+      var durationSeconds = ue.duration / 1000;
+      var avgSpf = durationSeconds / ue.frameEvents.length;
+      var throughput = 1 - tr.b.normalize(avgSpf, 1 / MAX_FPS, 1 / MIN_FPS);
+      var score = tr.b.clamp(throughput, 0, 1);
+
+      valueList.addValue(new tr.v.NumericValue(
+          model.canonicalUrlThatCreatedThisTrace, 'throughput',
+          new tr.v.ScalarNumeric(UNIT, score),
+          options, groupingKeys));
+    });
+  }
+
+  AnimationThroughputMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(AnimationThroughputMetric);
+
+  return {
+    AnimationThroughputMetric: AnimationThroughputMetric
+  };
+});
+</script>
+
diff --git a/catapult/tracing/tracing/metrics/system_health/efficiency_metric.html b/catapult/tracing/tracing/metrics/system_health/efficiency_metric.html
new file mode 100644
index 0000000..8626b4d
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/efficiency_metric.html
@@ -0,0 +1,92 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import" href="/tracing/metrics/system_health/utils.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+<link rel="import" href="/tracing/model/user_model/idle_expectation.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  var UNIT = tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;
+
+  var DESCRIPTION = 'Normalized CPU budget consumption';
+
+  function EfficiencyMetric(valueList, model) {
+    var scores = [];
+
+    model.userModel.expectations.forEach(function(ue) {
+      var options = {};
+      options.description = DESCRIPTION;
+
+      var groupingKeys = {};
+      groupingKeys.userExpectationStableId = ue.stableId;
+      groupingKeys.userExpectationStageTitle = ue.stageTitle;
+      groupingKeys.userExpectationInitiatorTitle = ue.initiatorTitle;
+
+      var score = undefined;
+
+      if ((ue.totalCpuMs === undefined) ||
+          (ue.totalCpuMs == 0))
+        return;
+
+      var cpuFractionBudget = tr.b.Range.fromExplicitRange(0.5, 1.5);
+
+      if (ue instanceof tr.model.um.IdleExpectation) {
+        cpuFractionBudget = tr.b.Range.fromExplicitRange(0.1, 1);
+      } else if (ue instanceof tr.model.um.AnimationExpectation) {
+        cpuFractionBudget = tr.b.Range.fromExplicitRange(1, 2);
+      }
+
+      var cpuMsBudget = tr.b.Range.fromExplicitRange(
+          ue.duration * cpuFractionBudget.min,
+          ue.duration * cpuFractionBudget.max);
+      var normalizedCpu = tr.b.normalize(
+          ue.totalCpuMs, cpuMsBudget.min, cpuMsBudget.max);
+      score = 1 - tr.b.clamp(normalizedCpu, 0, 1);
+
+      scores.push(score);
+
+      valueList.addValue(new tr.v.NumericValue(
+          model.canonicalUrlThatCreatedThisTrace, 'efficiency',
+          new tr.v.ScalarNumeric(UNIT, score),
+          options, groupingKeys));
+    });
+
+    // Manually reduce scores.
+    // https://github.com/catapult-project/catapult/issues/2036
+
+    var options = {};
+    options.description = DESCRIPTION;
+    var groupingKeys = {};
+    var overallScore = tr.b.Statistics.weightedMean(
+        scores, tr.metrics.sh.perceptualBlend);
+    if (overallScore === undefined)
+      return;
+
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'efficiency',
+        new tr.v.ScalarNumeric(UNIT, overallScore),
+        options, groupingKeys));
+  }
+
+  EfficiencyMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(EfficiencyMetric);
+
+  return {
+    EfficiencyMetric: EfficiencyMetric
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/efficiency_metric_test.html b/catapult/tracing/tracing/metrics/system_health/efficiency_metric_test.html
new file mode 100644
index 0000000..3d676c7
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/efficiency_metric_test.html
@@ -0,0 +1,53 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/efficiency_metric.html">
+<link rel="import" href="/tracing/metrics/value_list.html">
+<link rel="import" href="/tracing/model/user_model/idle_expectation.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  function createModel() {
+    return tr.c.TestUtils.newModel(function(model) {
+      model.p1 = model.getOrCreateProcess(1);
+      model.t2 = model.p1.getOrCreateThread(2);
+
+      var slice = tr.c.TestUtils.newSliceEx({
+        title: 'foo',
+        start: 0,
+        end: 100,
+        type: tr.model.ThreadSlice
+      });
+      slice.isTopLevel = true;
+
+      var idle = new tr.model.um.IdleExpectation(model, 0, 100);
+      idle.associatedEvents.push(slice);
+      model.userModel.expectations.push(idle);
+    });
+  }
+
+  test('optimalEfficiency', function() {
+    var model = createModel();
+    model.userModel.expectations[0].associatedEvents[0].cpuSelfTime = 10;
+    var valueList = new tr.metrics.ValueList();
+    tr.metrics.sh.EfficiencyMetric(valueList, model);
+    assert.equal(1, valueList.valueDicts[0].numeric.value);
+  });
+
+  test('pessimalEfficiency', function() {
+    var model = createModel();
+    model.userModel.expectations[0].associatedEvents[0].cpuSelfTime = 100;
+    var valueList = new tr.metrics.ValueList();
+    tr.metrics.sh.EfficiencyMetric(valueList, model);
+    assert.equal(0, valueList.valueDicts[0].numeric.value);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/responsiveness_metric.html b/catapult/tracing/tracing/metrics/system_health/responsiveness_metric.html
new file mode 100644
index 0000000..eb0ac8f
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/responsiveness_metric.html
@@ -0,0 +1,210 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/animation_smoothness_metric.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/animation_throughput_metric.html">
+<link rel="import" href="/tracing/metrics/system_health/utils.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+<link rel="import" href="/tracing/model/user_model/load_expectation.html">
+<link rel="import" href="/tracing/model/user_model/response_expectation.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  // In the case of Response, Load, and DiscreteAnimation IRs, Responsiveness is
+  // derived from the time between when the user thinks they begin an interation
+  // (expectedStart) and the time when the screen first changes to reflect the
+  // interaction (actualEnd).  There may be a delay between expectedStart and
+  // when chrome first starts processing the interaction (actualStart) if the
+  // main thread is busy.  The user doesn't know when actualStart is, they only
+  // know when expectedStart is. User responsiveness, by definition, considers
+  // only what the user experiences, so "duration" is defined as actualEnd -
+  // expectedStart.
+
+  // This histogram represents the number of people who we believe would
+  // score the responsiveness at a certain value. We have set this with
+  // just a best-effort guess, though. In #1696, we plan to derive this
+  // experimentally.
+  var RESPONSE_HISTOGRAM = tr.v.Numeric.fromDict({
+    unit: 'unitless',
+    min: 150,
+    max: 5000,
+    centralBinWidth: 485,
+    underflowBin: {min: -Number.MAX_VALUE, max: 150, count: 1000},
+    centralBins: [
+      {min: 150, max: 635, count: 708},
+      {min: 635, max: 1120, count: 223},
+      {min: 1120, max: 1605, count: 50},
+      {min: 1605, max: 2090, count: 33},
+      {min: 2090, max: 2575, count: 23},
+      {min: 2575, max: 3060, count: 17},
+      {min: 3060, max: 3545, count: 12},
+      {min: 3545, max: 4030, count: 8},
+      {min: 4030, max: 4515, count: 4},
+      {min: 4515, max: 5000, count: 1}
+    ],
+    overflowBin: {min: 5000, max: Number.MAX_VALUE, count: 0}
+  });
+
+  var FAST_RESPONSE_HISTOGRAM = tr.v.Numeric.fromDict({
+    unit: 'unitless',
+    min: 66,
+    max: 2200,
+    centralBinWidth: 214,
+    underflowBin: {min: -Number.MAX_VALUE, max: 66, count: 1000},
+    centralBins: [
+      {min: 66, max: 280, count: 708},
+      {min: 280, max: 493, count: 223},
+      {min: 493, max: 706, count: 50},
+      {min: 706, max: 920, count: 33},
+      {min: 920, max: 1133, count: 23},
+      {min: 1133, max: 1346, count: 17},
+      {min: 1346, max: 1560, count: 12},
+      {min: 1560, max: 1773, count: 8},
+      {min: 1773, max: 1987, count: 4},
+      {min: 1987, max: 2200, count: 1}
+    ],
+    overflowBin: {min: 2200, max: Number.MAX_VALUE, count: 0}
+  });
+
+  var LOAD_HISTOGRAM = tr.v.Numeric.fromDict({
+    unit: 'unitless',
+    min: 1000,
+    max: 60000,
+    centralBinWidth: 5900,
+    underflowBin: {min: -Number.MAX_VALUE, max: 1000, count: 1000},
+    centralBins: [
+      {min: 1000, max: 6900, count: 901},
+      {min: 6900, max: 12800, count: 574},
+      {min: 12800, max: 18700, count: 298},
+      {min: 18700, max: 24600, count: 65},
+      {min: 24600, max: 30500, count: 35},
+      {min: 30500, max: 36400, count: 23},
+      {min: 36400, max: 42300, count: 16},
+      {min: 42300, max: 48200, count: 10},
+      {min: 48200, max: 54100, count: 5},
+      {min: 54100, max: 60000, count: 2}
+    ],
+    overflowBin: {min: 60000, max: Number.MAX_VALUE, count: 0}
+  });
+
+  var UNIT = tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;
+
+  var DESCRIPTION = (
+      'For Load and Response, Mean Opinion Score of completion time; ' +
+      'For Animation, perceptual blend of Mean Opinion Scores of ' +
+      'throughput and smoothness');
+
+  function getDurationScore(histogram, duration) {
+    return histogram.getInterpolatedCountAt(duration) / histogram.maxCount;
+  }
+
+  function ResponsivenessMetric(valueList, model) {
+    tr.metrics.sh.AnimationThroughputMetric(valueList, model);
+    tr.metrics.sh.AnimationSmoothnessMetric(valueList, model);
+
+    var throughputForAnimation = {};
+    var smoothnessForAnimation = {};
+    valueList.valueDicts.forEach(function(value) {
+      if ((value.type !== 'numeric') ||
+          (value.numeric.type !== 'scalar'))
+        return;
+
+      var ue = value.grouping_keys.userExpectationStableId;
+
+      if (value.grouping_keys.name === 'throughput')
+        throughputForAnimation[ue] = value.numeric.value;
+      if (value.grouping_keys.name === 'smoothness')
+        smoothnessForAnimation[ue] = value.numeric.value;
+    });
+
+    var scores = [];
+
+    model.userModel.expectations.forEach(function(ue) {
+      var score = undefined;
+
+      if (ue instanceof tr.model.um.IdleExpectation) {
+        // Responsiveness is not defined for Idle.
+        return;
+      } else if (ue instanceof tr.model.um.LoadExpectation) {
+        score = getDurationScore(LOAD_HISTOGRAM, ue.duration);
+      } else if (ue instanceof tr.model.um.ResponseExpectation) {
+        var histogram = RESPONSE_HISTOGRAM;
+        if (ue.isAnimationBegin)
+          histogram = FAST_RESPONSE_HISTOGRAM;
+
+        score = getDurationScore(histogram, ue.duration);
+      } else if (ue instanceof tr.model.um.AnimationExpectation) {
+        var throughput = throughputForAnimation[ue.stableId];
+        var smoothness = smoothnessForAnimation[ue.stableId];
+
+        if (throughput === undefined)
+          throw new Error('Missing throughput for ' + ue.stableId);
+
+        if (smoothness === undefined)
+          throw new Error('Missing smoothness for ' + ue.stableId);
+
+        score = tr.b.Statistics.weightedMean(
+            [throughput, smoothness], tr.metrics.sh.perceptualBlend);
+      } else {
+        throw new Error('Unrecognized stage for ' + ue.stableId);
+      }
+
+      if (score === undefined)
+        throw new Error('Failed to compute responsiveness for ' + ue.stableId);
+
+      scores.push(score);
+
+      var options = {};
+      options.description = DESCRIPTION;
+
+      var groupingKeys = {};
+      groupingKeys.userExpectationStableId = ue.stableId;
+      groupingKeys.userExpectationStageTitle = ue.stageTitle;
+      groupingKeys.userExpectationInitiatorTitle = ue.initiatorTitle;
+
+      valueList.addValue(new tr.v.NumericValue(
+          model.canonicalUrlThatCreatedThisTrace, 'responsiveness',
+          new tr.v.ScalarNumeric(UNIT, score),
+          options, groupingKeys));
+    });
+
+    // Manually reduce scores.
+    // https://github.com/catapult-project/catapult/issues/2036
+
+    var options = {};
+    options.description = DESCRIPTION;
+    var groupingKeys = {};
+    var overallScore = tr.b.Statistics.weightedMean(
+        scores, tr.metrics.sh.perceptualBlend);
+    if (overallScore === undefined)
+      return;
+
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace, 'responsiveness',
+        new tr.v.ScalarNumeric(UNIT, overallScore),
+        options, groupingKeys));
+  }
+
+  ResponsivenessMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(ResponsivenessMetric);
+
+  return {
+    ResponsivenessMetric: ResponsivenessMetric
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/responsiveness_metric_test.html b/catapult/tracing/tracing/metrics/system_health/responsiveness_metric_test.html
new file mode 100644
index 0000000..d5712ed
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/responsiveness_metric_test.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import"
+    href="/tracing/metrics/system_health/responsiveness_metric.html">
+<link rel="import" href="/tracing/metrics/value_list.html">
+<link rel="import" href="/tracing/model/user_model/animation_expectation.html">
+<link rel="import" href="/tracing/model/user_model/response_expectation.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  function responsivenessForDuration(duration, opt_isAnimationBegin) {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      var ue = new tr.model.um.ResponseExpectation(
+          model, 'test', 0, duration, opt_isAnimationBegin);
+      model.userModel.expectations.push(ue);
+    });
+    var valueList = new tr.metrics.ValueList();
+    tr.metrics.sh.ResponsivenessMetric(valueList, model);
+    return valueList.valueDicts[0].numeric.value;
+  }
+
+  function metricsForAnimation(customizeAnimationCallback) {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      var ue = new tr.model.um.AnimationExpectation(
+          model, 'test', 0, 100);
+      customizeAnimationCallback(ue);
+      model.userModel.expectations.push(ue);
+    });
+    var valueList = new tr.metrics.ValueList();
+    tr.metrics.sh.ResponsivenessMetric(valueList, model);
+    valueList = valueList.valueDicts;
+    var metrics = {};
+    for (var i = 0; i < valueList.length; ++i) {
+      if (valueList[i].grouping_keys.name === 'responsiveness') {
+        metrics.responsiveness = valueList[i].numeric.value;
+      } else if (valueList[i].grouping_keys.name === 'smoothness') {
+        metrics.smoothness = valueList[i].numeric.value;
+      } else if (valueList[i].grouping_keys.name === 'throughput') {
+        metrics.throughput = valueList[i].numeric.value;
+      }
+    }
+    return metrics;
+  }
+
+  test('response', function() {
+    assert.equal(1, responsivenessForDuration(150));
+    assert.closeTo(1, responsivenessForDuration(150.0001), 1e-5);
+    assert.closeTo(0.81938, responsivenessForDuration(299.9999), 1e-5);
+    assert.closeTo(0.81938, responsivenessForDuration(300.0001), 1e-5);
+    assert.closeTo(0.1793, responsivenessForDuration(999.9999), 1e-5);
+    assert.closeTo(0.1793, responsivenessForDuration(1000.0001), 1e-5);
+    assert.closeTo(0, responsivenessForDuration(4999.999), 1e-5);
+    assert.equal(0, responsivenessForDuration(5000));
+  });
+
+  test('animationBegin', function() {
+    assert.equal(1, responsivenessForDuration(66, true));
+    assert.closeTo(1, responsivenessForDuration(66.0001, true), 1e-5);
+    assert.closeTo(0.90721, responsivenessForDuration(99.9999, true), 1e-5);
+    assert.closeTo(0.90721, responsivenessForDuration(100.0001, true), 1e-5);
+    assert.closeTo(0.04996, responsivenessForDuration(599.9999, true), 1e-5);
+    assert.closeTo(0.04996, responsivenessForDuration(600.0001, true), 1e-5);
+    assert.closeTo(0, responsivenessForDuration(2199.999, true), 1e-5);
+    assert.equal(0, responsivenessForDuration(2200, true));
+  });
+
+  test('animation_OneHundredFPS', function() {
+    var metrics = metricsForAnimation(function(animation) {
+      for (var i = 1; i < 10; ++i) {
+        animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+          title: tr.model.helpers.IMPL_RENDERING_STATS,
+          start: i * 10,
+          end: (i * 10) + 1
+        }));
+      }
+    });
+
+    assert.closeTo(1, metrics.smoothness, 1e-4);
+    assert.closeTo(1, metrics.throughput, 1e-4);
+    assert.closeTo(1, metrics.responsiveness, 1e-3);
+  });
+
+  test('animation_OneFPS', function() {
+    // Minimum comfort is when at least max(2, frameCount/10) frames are longer
+    // than 50ms, and avgFPS <= 10.
+    // One frame-per-second causes FPS comfort = 0.
+    var metrics = metricsForAnimation(function(animation) {
+      animation.duration = 2000;
+      animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+        title: tr.model.helpers.IMPL_RENDERING_STATS,
+        start: 0,
+        end: 1
+      }));
+      animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+        title: tr.model.helpers.IMPL_RENDERING_STATS,
+        start: 999,
+        end: 1000
+      }));
+      animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+        title: tr.model.helpers.IMPL_RENDERING_STATS,
+        start: 1999,
+        end: 2000
+      }));
+    });
+    assert.closeTo(1, metrics.smoothness, 1e-4);
+    assert.closeTo(0, metrics.throughput, 1e-4);
+    assert.closeTo(0.2689, metrics.responsiveness, 1e-3);
+  });
+
+  test('animation_jank', function() {
+    var metrics = metricsForAnimation(function(animation) {
+      animation.duration = 101000;
+      var timestamp = 0;
+      for (var i = 0; i < 100; ++i) {
+        timestamp += 16;
+        animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+          title: tr.model.helpers.IMPL_RENDERING_STATS,
+          start: timestamp,
+          end: timestamp + 1
+        }));
+      }
+      timestamp += 1000;
+      animation.associatedEvents.push(tr.c.TestUtils.newAsyncSliceEx({
+        title: tr.model.helpers.IMPL_RENDERING_STATS,
+        start: timestamp,
+        end: timestamp + 1
+      }));
+    });
+
+    assert.closeTo(0, metrics.smoothness, 1e-4);
+    assert.closeTo(0, metrics.throughput, 1e-4);
+    assert.closeTo(0, metrics.responsiveness, 1e-3);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/system_health_metrics.html b/catapult/tracing/tracing/metrics/system_health/system_health_metrics.html
new file mode 100644
index 0000000..9ed67cb
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/system_health_metrics.html
@@ -0,0 +1,31 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/metrics/system_health/efficiency_metric.html">
+<link rel="import"
+      href="/tracing/metrics/system_health/responsiveness_metric.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  function SystemHealthMetrics(valueList, model) {
+    tr.metrics.sh.ResponsivenessMetric(valueList, model);
+    tr.metrics.sh.EfficiencyMetric(valueList, model);
+  }
+
+  SystemHealthMetrics.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(SystemHealthMetrics);
+
+  return {
+    SystemHealthMetrics: SystemHealthMetrics
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/system_health/utils.html b/catapult/tracing/tracing/metrics/system_health/utils.html
new file mode 100644
index 0000000..9e0d09b
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/system_health/utils.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics.sh', function() {
+  // Returns a weight for this score.
+  // score should be a number between 0 and 1 inclusive.
+  // This function is expected to be passed to tr.b.Statistics.weightedMean as
+  // its weightCallback.
+  function perceptualBlend(ir, index, score) {
+    // Lower scores are exponentially more important than higher scores
+    // due to the Peak-end rule.
+    // Other than that general rule, there is no specific reasoning behind this
+    // specific formula -- it is fairly arbitrary.
+    return Math.exp(1 - score);
+  }
+
+  function filterExpectationsByRange(irs, opt_range) {
+    var filteredExpectations = [];
+    irs.forEach(function(ir) {
+      if (!(ir instanceof tr.model.um.UserExpectation))
+        return;
+
+      if (!opt_range ||
+          opt_range.intersectsExplicitRangeExclusive(ir.start, ir.end))
+        filteredExpectations.push(ir);
+    });
+    return filteredExpectations;
+  }
+
+  return {
+    perceptualBlend: perceptualBlend,
+    filterExpectationsByRange: filterExpectationsByRange
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/tracing_metric.html b/catapult/tracing/tracing/metrics/tracing_metric.html
new file mode 100644
index 0000000..c8d8785
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/tracing_metric.html
@@ -0,0 +1,125 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/metrics/metric_registry.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics', function() {
+
+  function tracingMetric(valueList, model) {
+    if (!model.stats.hasEventSizesinBytes) {
+      throw new Error('Model stats does not have event size information. ' +
+                      'Please enable ImportOptions.trackDetailedModelStats.');
+    }
+
+    var eventStats = model.stats.allTraceEventStatsInTimeIntervals;
+    eventStats.sort(function(a, b) {
+      return a.timeInterval - b.timeInterval;
+    });
+
+    var maxEventCountPerSec = 0;
+    var maxEventBytesPerSec = 0;
+    var totalTraceBytes = 0;
+
+    var WINDOW_SIZE = Math.floor(1000 / model.stats.TIME_INTERVAL_SIZE_IN_MS);
+    var runningEventNumPerSec = 0;
+    var runningEventBytesPerSec = 0;
+    var start = 0;
+    var end = 0;
+    while (end < eventStats.length) {
+      var startEventStats = eventStats[start];
+      var endEventStats = eventStats[end];
+      var timeWindow =
+          endEventStats.timeInterval - startEventStats.timeInterval;
+      if (timeWindow >= WINDOW_SIZE) {
+        runningEventNumPerSec -= startEventStats.numEvents;
+        runningEventBytesPerSec -= startEventStats.totalEventSizeinBytes;
+        start++;
+        continue;
+      }
+
+      runningEventNumPerSec += endEventStats.numEvents;
+      if (maxEventCountPerSec < runningEventNumPerSec)
+        maxEventCountPerSec = runningEventNumPerSec;
+
+      runningEventBytesPerSec += endEventStats.totalEventSizeinBytes;
+      if (maxEventBytesPerSec < runningEventBytesPerSec)
+        maxEventBytesPerSec = runningEventBytesPerSec;
+
+      totalTraceBytes += endEventStats.totalEventSizeinBytes;
+
+      end++;
+    }
+
+    var stats = model.stats.allTraceEventStats;
+    var categoryStatsMap = new Map();
+    var categoryStats = [];
+    for (var i = 0; i < stats.length; i++) {
+      var categoryStat = categoryStatsMap.get(stats[i].category);
+      if (categoryStat === undefined) {
+        categoryStat = {
+          category: stats[i].category,
+          totalEventSizeinBytes: 0
+        };
+        categoryStatsMap.set(stats[i].category, categoryStat);
+        categoryStats.push(categoryStat);
+      }
+      categoryStat.totalEventSizeinBytes += stats[i].totalEventSizeinBytes;
+    }
+    var maxCategoryStats = categoryStats.reduce(function(a, b) {
+      return a.totalEventSizeinBytes < b.totalEventSizeinBytes ? b : a;
+    });
+    var maxEventBytesPerCategory = maxCategoryStats.totalEventSizeinBytes;
+    var maxCategoryName = maxCategoryStats.category;
+
+    var maxEventCountPerSecValue = new tr.v.ScalarNumeric(
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, maxEventCountPerSec);
+    var maxEventBytesPerSecValue = new tr.v.ScalarNumeric(
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, maxEventBytesPerSec);
+    var totalTraceBytesValue = new tr.v.ScalarNumeric(
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, totalTraceBytes);
+
+    var diagnostics = {
+      category_with_max_event_size: {
+        name: maxCategoryName,
+        size_in_bytes: maxEventBytesPerCategory
+      }
+    };
+
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace,
+        'Total trace size in bytes',
+        totalTraceBytesValue,
+        undefined, undefined, diagnostics));
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace,
+        'Max number of events per second',
+        maxEventCountPerSecValue,
+        undefined, undefined, diagnostics));
+    valueList.addValue(new tr.v.NumericValue(
+        model.canonicalUrlThatCreatedThisTrace,
+        'Max event size in bytes per second',
+        maxEventBytesPerSecValue,
+        undefined, undefined, diagnostics));
+  }
+
+  tracingMetric.prototype = {
+    __proto__: Function.prototype
+  };
+
+  tr.metrics.MetricRegistry.register(tracingMetric);
+
+  return {
+    tracingMetric: tracingMetric
+  };
+
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/tracing_metric_test.html b/catapult/tracing/tracing/metrics/tracing_metric_test.html
new file mode 100644
index 0000000..5b10efb
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/tracing_metric_test.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/metrics/tracing_metric.html">
+<link rel="import" href="/tracing/metrics/value_list.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  function makeModel(events, opt_track) {
+    return tr.c.TestUtils.newModelWithEvents([events], {
+      trackDetailedModelStats: opt_track
+    });
+  }
+
+  function getEventStringSize(events, indices) {
+    return indices.reduce(function(sum, index) {
+      return sum + JSON.stringify(events[index]).length;
+    }, 0);
+  }
+
+  test('hasEventSizesInBytes', function() {
+    var valueList = new tr.metrics.ValueList();
+    var events = [
+      {name: 'a', args: {}, pid: 52, ts: 524, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'a', args: {}, pid: 52, ts: 560, cat: 'foo', tid: 53, ph: 'B'}
+    ];
+
+    var model = makeModel(JSON.stringify(events), false);
+    assert.isFalse(model.importOptions.trackDetailedModelStats);
+    assert.throws(function() {
+      tr.metrics.tracingMetric(valueList, model);
+    }, 'Please enable ImportOptions.trackDetailedModelStats.');
+
+    model = makeModel(JSON.stringify(events), true);
+    assert.isTrue(model.importOptions.trackDetailedModelStats);
+    tr.metrics.tracingMetric(valueList, model);
+  });
+
+  test('totalTraceSize', function() {
+    var valueList = new tr.metrics.ValueList();
+    var events = [
+      {name: 'a', args: {}, pid: 52, ts: 524, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'a', args: {}, pid: 52, ts: 560, cat: 'foo', tid: 53, ph: 'B'}
+    ];
+    var model = makeModel(JSON.stringify(events), true);
+    tr.metrics.tracingMetric(valueList, model);
+
+    var eventStringSize = getEventStringSize(events, [0, 1]);
+    var values = valueList.getValuesWithName('Total trace size in bytes');
+    assert.strictEqual(values.length, 1);
+    assert.strictEqual(values[0].numeric.value, eventStringSize);
+  });
+
+  test('maxValuePerSec', function() {
+    var ONE_SEC_IN_US = 1000000;
+    var events = [
+      {name: 'a', pid: 52, ts: 1, cat: 'foo', ph: 'B'},
+      {name: 'a', pid: 52, ts: ONE_SEC_IN_US + 1, cat: 'foo', ph: 'B'},
+      {name: 'a', pid: 52, ts: 2 * ONE_SEC_IN_US + 1, cat: 'foo', ph: 'B'},
+      {name: 'a', pid: 52, ts: 2 * ONE_SEC_IN_US + 3, cat: 'foo', ph: 'B'},
+      {name: 'a', pid: 52, ts: ONE_SEC_IN_US + 2, cat: 'foo', ph: 'B'},
+      {name: 'a', pid: 52, ts: 2 * ONE_SEC_IN_US + 2, cat: 'foo', ph: 'B'}
+    ];
+    var model = makeModel(JSON.stringify(events), true);
+    var valueList = new tr.metrics.ValueList();
+    tr.metrics.tracingMetric(valueList, model);
+
+    var maxEventCountPerSec = 3;
+    var values = valueList.getValuesWithName('Max number of events per second');
+    assert.strictEqual(values.length, 1);
+    assert.strictEqual(values[0].numeric.value, maxEventCountPerSec);
+
+    var maxEventBytesPerSec = getEventStringSize(events, [2, 3, 5]);
+    var values = valueList.getValuesWithName(
+        'Max event size in bytes per second');
+    assert.strictEqual(values.length, 1);
+    assert.strictEqual(values[0].numeric.value, maxEventBytesPerSec);
+  });
+
+  test('diagnostics', function() {
+    var valueList = new tr.metrics.ValueList();
+    var events = [
+      {name: 'a', args: {}, pid: 52, ts: 524, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'a', args: {}, pid: 52, ts: 535, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'bb', args: {}, pid: 52, ts: 546, cat: 'bar', tid: 53, ph: 'E'},
+      {name: 'a', args: {}, pid: 52, ts: 560, cat: 'foo', tid: 53, ph: 'B'},
+      {name: 'bb', args: {}, pid: 52, ts: 578, cat: 'bar', tid: 53, ph: 'E'}
+    ];
+    var model = makeModel(JSON.stringify(events), true);
+    tr.metrics.tracingMetric(valueList, model);
+
+    for (var i = 0; i < valueList.length; i++) {
+      assert.strictEqual(
+          values[i].diagnostics.category_with_max_event_size.name,
+          'foo');
+      assert.strictEqual(
+          values[i].diagnostics.category_with_max_event_size.size_in_bytes,
+          getEventStringSize(events, [0, 1, 3]));
+    }
+  });
+
+});
+</script>
diff --git a/catapult/tracing/tracing/metrics/value_list.html b/catapult/tracing/tracing/metrics/value_list.html
new file mode 100644
index 0000000..8d3bf82
--- /dev/null
+++ b/catapult/tracing/tracing/metrics/value_list.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/numeric.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.metrics', function() {
+  function ValueList(values) {
+    if (values !== undefined)
+      this.values_ = values;
+    else
+      this.values_ = [];
+  }
+
+  ValueList.prototype = {
+    get valueDicts() {
+      return this.values_.map(function(v) { return v.asDict(); });
+    },
+
+    getValuesWithName: function(name) {
+      return this.values_.filter(function(value) {
+        return value.name.indexOf(name) > -1;
+      });
+    },
+
+    addValue: function(v) {
+      if (!(v instanceof tr.v.NumericValue)) {
+        var err = new Error('Tried to add value ' + v +
+                            ' which is non-Numeric');
+        err.name = 'ValueError';
+        throw err;
+      }
+
+      this.values_.push(v);
+    }
+  };
+
+  return {
+    ValueList: ValueList
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/alert.html b/catapult/tracing/tracing/model/alert.html
index 9f5b784..53a2fa7 100644
--- a/catapult/tracing/tracing/model/alert.html
+++ b/catapult/tracing/tracing/model/alert.html
@@ -5,10 +5,10 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/event_info.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -21,7 +21,7 @@
     this.args = opt_args || {};
     this.associatedEvents = new tr.model.EventSet(opt_associatedEvents);
     this.associatedEvents.forEach(function(event) {
-      event.associatedAlerts.push(this);
+      event.addAssociatedAlert(this);
     }, this);
   }
 
@@ -38,7 +38,7 @@
 
     get userFriendlyName() {
       return 'Alert ' + this.title + ' at ' +
-          tr.b.u.TimeStamp.format(this.start);
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/async_slice.html b/catapult/tracing/tracing/model/async_slice.html
index 2377903..14143ea 100644
--- a/catapult/tracing/tracing/model/async_slice.html
+++ b/catapult/tracing/tracing/model/async_slice.html
@@ -6,8 +6,8 @@
 -->
 
 <link rel="import" href="/tracing/base/extension_registry.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -40,7 +40,7 @@
     this.didNotFinish = false;
     this.important = false;
     this.subSlices = [];
-    this.parentContainer = undefined;
+    this.parentContainer_ = undefined;
 
     this.id = undefined;
     this.startThread = undefined;
@@ -75,13 +75,26 @@
       return this.title;
     },
 
+    get parentContainer() {
+      return this.parentContainer_;
+    },
+
+    set parentContainer(parentContainer) {
+      this.parentContainer_ = parentContainer;
+      for (var i = 0; i < this.subSlices.length; i++) {
+        var subSlice = this.subSlices[i];
+        if (subSlice.parentContainer === undefined)
+          subSlice.parentContainer = parentContainer;
+      }
+    },
+
     get viewSubGroupTitle() {
       return this.title;
     },
 
     get userFriendlyName() {
       return 'Async slice ' + this.title + ' at ' +
-          tr.b.u.TimeStamp.format(this.start);
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
     },
 
     get stableId() {
diff --git a/catapult/tracing/tracing/model/async_slice_test.html b/catapult/tracing/tracing/model/async_slice_test.html
index 26027fd..3fadbcb 100644
--- a/catapult/tracing/tracing/model/async_slice_test.html
+++ b/catapult/tracing/tracing/model/async_slice_test.html
@@ -11,6 +11,10 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var AsyncSlice = tr.model.AsyncSlice;
+  var Process = tr.model.Process;
+  var Thread = tr.model.Thread;
+  var newAsyncSlice = tr.c.TestUtils.newAsyncSlice;
   var newAsyncSliceEx = tr.c.TestUtils.newAsyncSliceEx;
   var newFakeThread = tr.c.TestUtils.newFakeThread;
 
@@ -29,5 +33,25 @@
     assert.equal(group.stableId + '.1', sB.stableId);
     assert.equal(group.stableId + '.2', sC.stableId);
   });
+
+  test('setParentContainerForSubSlices', function() {
+    var model = new tr.Model();
+    var p1 = new Process(model, 1);
+    var t1 = new Thread(p1, 1);
+    var asyncSlice = newAsyncSlice(0, 10, t1, t1);
+    var subSlice1 = newAsyncSlice(1, 5, t1, t1);
+    var subSlice2 = newAsyncSlice(6, 9, t1, t1);
+    var subSlice3 = newAsyncSlice(2, 3, t1, t1);
+    subSlice1.subSlices.push(subSlice3);
+    asyncSlice.subSlices.push(subSlice1);
+    asyncSlice.subSlices.push(subSlice2);
+    asyncSlice.parentContainer = t1;
+    assert.equal(asyncSlice.subSlices.length, 2);
+    assert.equal(subSlice1.subSlices.length, 1);
+    assert.deepEqual(asyncSlice.parentContainer, t1);
+    assert.deepEqual(subSlice1.parentContainer, t1);
+    assert.deepEqual(subSlice2.parentContainer, t1);
+    assert.deepEqual(subSlice3.parentContainer, t1);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/model/attribute.html b/catapult/tracing/tracing/model/attribute.html
deleted file mode 100644
index 33aea72..0000000
--- a/catapult/tracing/tracing/model/attribute.html
+++ /dev/null
@@ -1,284 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/extension_registry.html">
-
-<script>
-'use strict';
-
-/**
- * @fileoverview Provides the Attribute class.
- */
-tr.exportTo('tr.model', function() {
-
-  /**
-   * @constructor
-   */
-  function Attribute(units) {
-    this.units = units;
-
-    // AttributeInfo(s) about the attribute (e.g. information about how it was
-    // calculated).
-    this.infos = [];
-  }
-
-  Attribute.fromDictIfPossible = function(dict, opt_model) {
-    var typeInfo = Attribute.findTypeInfoMatching(function(typeInfo) {
-      return typeInfo.metadata.type === dict.type;
-    });
-
-    if (typeInfo === undefined) {
-      if (opt_model) {
-        opt_model.importWarning({
-          type: 'attribute_parse_error',
-          message: 'Unknown attribute type \'' + dict.type + '\'.'
-        });
-      }
-      return UnknownAttribute.fromDict(dict, opt_model);
-    }
-
-    return typeInfo.constructor.fromDict(dict, opt_model);
-  };
-
-  /**
-   * Find the common constructor and units of a list of attribute values. If
-   * they have different types (e.g. ScalarAttribute and UnknownAttribute) or
-   * units (e.g. 'ms' and 'Hz'), the common constructor will be
-   * UnknownAttribute and the common units will be undefined.
-   *
-   * Undefined attribute values are skipped. This function will return undefined
-   * if the list of attribute values contains no defined attribute values.
-   */
-  Attribute.findCommonTraits = function(attributes, opt_model) {
-    var commonTraits;
-    for (var i = 0; i < attributes.length; i++) {
-      var attribute = attributes[i];
-      if (attribute === undefined)
-        continue;
-
-      var attributeConstructor = attribute.constructor;
-      var attributeUnits = attribute.units;
-
-      if (commonTraits === undefined) {
-        commonTraits = {
-          constructor: attributeConstructor,
-          units: attributeUnits
-        };
-      } else if (attributeConstructor !== commonTraits.constructor) {
-        if (opt_model) {
-          opt_model.importWarning({
-            type: 'attribute_parse_error',
-            message: 'Attribute with different types: ' +
-                commonTraits.constructor + ' and ' + attributeConstructor + '.'
-          });
-        }
-        commonTraits = {
-          constructor: UnknownAttribute,
-          units: undefined
-        };
-        break;
-      } else if (attributeUnits !== commonTraits.units) {
-        if (opt_model) {
-          opt_model.importWarning({
-            type: 'attribute_parse_error',
-            message: 'Attribute with different units: ' + commonTraits.units +
-                ' and ' + attributeUnits + '.'
-          });
-        }
-        commonTraits = {
-          constructor: UnknownAttribute,
-          units: undefined
-        };
-        break;
-      }
-    }
-    return commonTraits;
-  };
-
-  /**
-   * Aggregate a list of child attribute values with an existing attribute
-   * value. The individual values can be undefined, in which case they are
-   * ignored.
-   */
-  Attribute.aggregate = function(childAttributes, existingParentAttribute,
-                                 opt_model) {
-    var definedChildAttributes = childAttributes.filter(
-        function(childAttribute) {
-      return childAttribute !== undefined;
-    });
-
-    // If all child attribute values were undefined, return the existing parent
-    // attribute value (possibly undefined).
-    var traits = Attribute.findCommonTraits(definedChildAttributes, opt_model);
-    if (traits === undefined)
-      return existingParentAttribute;
-
-    var constructor = traits.constructor;
-
-    // If the common type does not support merging child attribute values,
-    // return the existing parent attribute value (possibly undefined).
-    if (constructor.merge === undefined)
-      return existingParentAttribute;
-
-    var mergedAttribute = constructor.merge(
-        definedChildAttributes, traits.units, opt_model);
-
-    // If there is no existing parent attribute value, use the merged value
-    // (possibly undefined).
-    if (existingParentAttribute === undefined)
-      return mergedAttribute;
-
-    // Leave it up to the existing parent attribute value to decide if/how it
-    // will use the merged value (e.g. generate an import warning if the
-    // existing and merged attribute value types differ).
-    existingParentAttribute.useMergedAttribute(mergedAttribute, opt_model);
-
-    return existingParentAttribute;
-  }
-
-  Attribute.fromTraceValue = function(dict, opt_model) {
-    throw new Error('Not implemented');
-  };
-
-  Attribute.prototype.useMergedAttribute = function(mergedAttribute,
-                                                    opt_model) {
-    if (mergedAttribute.constructor !== this.constructor) {
-      if (opt_model) {
-        opt_model.importWarning({
-          type: 'attribute_parse_error',
-          message: 'Attribute with different types: ' + this.constructor +
-              ' and ' + mergedAttribute.constructor + '.'
-        });
-      }
-    } else if (mergedAttribute.units !== this.units) {
-      if (opt_model) {
-        opt_model.importWarning({
-          type: 'attribute_parse_error',
-          message: 'Attribute with different units: ' + this.units +
-              ' and ' + mergedAttribute.units + '.'
-        });
-      }
-    }
-  };
-
-  var options = new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);
-  tr.b.decorateExtensionRegistry(Attribute, options);
-
-  Attribute.addEventListener('will-register', function(e) {
-    if (!e.typeInfo.constructor.hasOwnProperty('fromDict'))
-      throw new Error('Attributes must have fromDict method');
-
-    if (!e.typeInfo.metadata.type)
-      throw new Error('Attributes must provide type');
-
-    if (e.typeInfo.constructor.prototype.constructor !== e.typeInfo.constructor)
-      throw new Error('Attribute prototypes must provide constructor.');
-  });
-
-  /**
-   * @constructor
-   */
-  function ScalarAttribute(units, value) {
-    Attribute.call(this, units);
-    this.value = value;
-  }
-
-  ScalarAttribute.fromDict = function(dict) {
-    return new ScalarAttribute(dict.units, parseInt(dict.value, 16));
-  };
-
-  ScalarAttribute.merge = function(childAttributes, units) {
-    var sum = 0;
-    childAttributes.forEach(function(childAttribute) {
-      sum += childAttribute.value;
-    });
-    return new ScalarAttribute(units, sum);
-  }
-
-  ScalarAttribute.prototype.__proto__ = Attribute.prototype;
-
-  Attribute.register(ScalarAttribute, {type: 'scalar'});
-
-  /**
-   * @constructor
-   */
-  function StringAttribute(units, value) {
-    Attribute.call(this, units);
-    this.value = value;
-  }
-
-  StringAttribute.fromDict = function(dict) {
-    return new StringAttribute(dict.units, dict.value);
-  };
-
-  Attribute.register(StringAttribute, {type: 'string'});
-
-  /**
-   * @constructor
-   */
-  function UnknownAttribute(units, opt_value) {
-    Attribute.call(this, units, opt_value);
-    this.value = opt_value;
-  }
-
-  UnknownAttribute.fromDict = function(dict) {
-    return new UnknownAttribute(dict.units);
-  };
-
-  UnknownAttribute.prototype.__proto__ = Attribute.prototype;
-
-  /**
-   * @constructor
-   */
-  function AttributeInfo(type, message) {
-    this.type = type;
-    this.message = message;
-  }
-
-  /**
-   * The type of AttributeInfo.
-   * @enum
-   */
-  var AttributeInfoType = {
-    // Generic information (e.g. how the attribute was calculated).
-    INFORMATION: 0,
-
-    // Warning (e.g. inconsistent attribute values provided).
-    WARNING: 1,
-
-    // Attribute source (e.g. attribute refers to an older dump's attribute).
-    LINK: 2,
-
-    // Corresponding memory allocator dump owns another MAD.
-    // TODO(petrcermak): Figure out if there's a better place to store this.
-    MEMORY_OWNER: 3,
-
-    // Corresponding memory allocator dump is owned by another MAD.
-    // TODO(petrcermak): Figure out if there's a better place to store this.
-    MEMORY_OWNED: 4,
-
-    // Overall value (e.g. peak value since start process).
-    OVERALL_VALUE: 5,
-
-    // Recent value (e.g. peak value since the previous memory dump).
-    RECENT_VALUE: 6,
-
-    // The allocator has an associated memory heap dump.
-    // TODO(petrcermak): Move this into the UI.
-    HAS_HEAP_DUMP: 7
-  };
-
-  return {
-    Attribute: Attribute,
-    ScalarAttribute: ScalarAttribute,
-    StringAttribute: StringAttribute,
-    UnknownAttribute: UnknownAttribute,
-    AttributeInfo: AttributeInfo,
-    AttributeInfoType: AttributeInfoType
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/model/attribute_test.html b/catapult/tracing/tracing/model/attribute_test.html
deleted file mode 100644
index 005d4a5..0000000
--- a/catapult/tracing/tracing/model/attribute_test.html
+++ /dev/null
@@ -1,247 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var Attribute = tr.model.Attribute;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-  var StringAttribute = tr.model.StringAttribute;
-  var UnknownAttribute = tr.model.UnknownAttribute;
-
-  test('findCommonTraits', function() {
-    // Empty list.
-    var traits = Attribute.findCommonTraits([]);
-    assert.isUndefined(traits);
-
-    // List containing no defined attribute values.
-    var traits = Attribute.findCommonTraits([undefined, undefined]);
-    assert.isUndefined(traits);
-
-    // Singleton list.
-    var traits = Attribute.findCommonTraits([
-      new ScalarAttribute('ms', 24)
-    ]);
-    assert.strictEqual(traits.constructor, ScalarAttribute);
-    assert.strictEqual(traits.units, 'ms');
-
-    // Long list.
-    var traits = Attribute.findCommonTraits([
-      undefined,
-      new ScalarAttribute('km', 15),
-      new ScalarAttribute('km', 16),
-      undefined,
-      new ScalarAttribute('km', 17),
-      undefined
-    ]);
-    assert.strictEqual(traits.constructor, ScalarAttribute);
-    assert.strictEqual(traits.units, 'km');
-
-    // List containing attribute values of different types.
-    var traits = Attribute.findCommonTraits([
-      new ScalarAttribute('km/h', 15),
-      undefined,
-      new StringAttribute('km/h', 'speed-of-light')
-    ]);
-    assert.strictEqual(traits.constructor, UnknownAttribute);
-    assert.isUndefined(traits.units);
-
-    // List containing attribute values with different units.
-    var traits = Attribute.findCommonTraits([
-      new ScalarAttribute('m', 10),
-      new ScalarAttribute('ft', 10)
-    ]);
-    assert.strictEqual(traits.constructor, UnknownAttribute);
-    assert.isUndefined(traits.units);
-  });
-
-  test('aggregate', function() {
-    // No parent or children.
-    var aggregatedAttr = Attribute.aggregate([], undefined);
-    assert.isUndefined(aggregatedAttr);
-
-    // No parent, children with a single type.
-    var aggregatedAttr = Attribute.aggregate([
-      undefined,
-      new ScalarAttribute('bytes', 128),
-      undefined,
-      new ScalarAttribute('bytes', 64),
-      undefined
-    ], undefined);
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, 192);
-    assert.equal(aggregatedAttr.units, 'bytes');
-    assert.instanceOf(aggregatedAttr, ScalarAttribute);
-
-    // No parent, children with multiple types.
-    var aggregatedAttr = Attribute.aggregate([
-      new StringAttribute('Hz', 128),
-      new ScalarAttribute('Hz', 64)
-    ], undefined);
-    assert.isUndefined(aggregatedAttr);
-
-    // No parent, children with multiple units.
-    var aggregatedAttr = Attribute.aggregate([
-      new ScalarAttribute('s', 10),
-      new ScalarAttribute('Hz', 0.1)
-    ], undefined);
-    assert.isUndefined(aggregatedAttr);
-
-    // No parent, children which do not support merging.
-    var aggregatedAttr = Attribute.aggregate([
-      new StringAttribute('items', 'a piece of text'),
-      new StringAttribute('items', 'another piece of text')
-    ], undefined);
-    assert.isUndefined(aggregatedAttr);
-
-    // Defined parent, no children.
-    var aggregatedAttr = Attribute.aggregate([], new ScalarAttribute('C', -12));
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, -12);
-    assert.equal(aggregatedAttr.units, 'C');
-    assert.instanceOf(aggregatedAttr, ScalarAttribute);
-
-    // Defined parent, children with the same type.
-    var aggregatedAttr = Attribute.aggregate([
-      new ScalarAttribute('W', 110),
-      new ScalarAttribute('W', 13)
-    ], new ScalarAttribute('W', -123));
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, -123);
-    assert.equal(aggregatedAttr.units, 'W');
-    assert.instanceOf(aggregatedAttr, ScalarAttribute);
-
-    // Defined parent, children with a different type.
-    var aggregatedAttr = Attribute.aggregate([
-      new StringAttribute('colors', 640),
-      new StringAttribute('colors', 640)
-    ], new ScalarAttribute('colors', -1234));
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, -1234);
-    assert.equal(aggregatedAttr.units, 'colors');
-    assert.instanceOf(aggregatedAttr, ScalarAttribute);
-
-    // Defined parent, children with multiple types.
-    var aggregatedAttr = Attribute.aggregate([
-      new ScalarAttribute('mm', 999),
-      new StringAttribute('mm', 640)
-    ], new ScalarAttribute('mm', -12345));
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, -12345);
-    assert.equal(aggregatedAttr.units, 'mm');
-    assert.instanceOf(aggregatedAttr, ScalarAttribute);
-
-    // Defined parent, children which do not support merging.
-    var aggregatedAttr = Attribute.aggregate([
-      new StringAttribute('m', 'X'),
-      new StringAttribute('m', 'Y')
-    ], new StringAttribute('m', 'Z'));
-    assert.isDefined(aggregatedAttr);
-    assert.equal(aggregatedAttr.value, 'Z');
-    assert.equal(aggregatedAttr.units, 'm');
-    assert.instanceOf(aggregatedAttr, StringAttribute);
-  });
-
-  test('useMergedAttribute', function() {
-    var importWarningCallbackFired;
-    var model = {
-      importWarning: function() {
-        importWarningCallbackFired = true;
-      }
-    };
-
-    // Same type.
-    var attr = new ScalarAttribute('C', 42);
-    importWarningCallbackFired = false;
-    attr.useMergedAttribute(new ScalarAttribute('C', 24), model);
-    assert.isFalse(importWarningCallbackFired);
-
-    // Different type.
-    var attr = new ScalarAttribute('C', 42);
-    importWarningCallbackFired = false;
-    attr.useMergedAttribute(new UnknownAttribute('C'), model);
-    assert.isTrue(importWarningCallbackFired);
-
-    // Different units.
-    var attr = new ScalarAttribute('C', 42);
-    importWarningCallbackFired = false;
-    attr.useMergedAttribute(new ScalarAttribute('F', 75.2), model);
-    assert.isTrue(importWarningCallbackFired);
-  });
-
-  test('scalar_construct', function() {
-    var attr = new ScalarAttribute('kHz', 1024);
-    assert.equal(attr.value, 1024);
-    assert.equal(attr.units, 'kHz');
-  });
-
-  test('scalar_fromDict', function() {
-    var attr = Attribute.fromDictIfPossible({
-      type: 'scalar',
-      units: 'kHz',
-      value: '400'
-    });
-    assert.isDefined(attr);
-    assert.equal(attr.value, 1024);
-    assert.equal(attr.units, 'kHz');
-    assert.instanceOf(attr, ScalarAttribute);
-  });
-
-  test('scalar_merge', function() {
-    var mergedAttr = ScalarAttribute.merge([
-      new ScalarAttribute('objects', 10),
-      new ScalarAttribute('objects', 20),
-      new ScalarAttribute('objects', -3)
-    ], 'objects');
-    assert.isDefined(mergedAttr);
-    assert.equal(mergedAttr.value, 27);
-    assert.equal(mergedAttr.units, 'objects');
-    assert.instanceOf(mergedAttr, ScalarAttribute);
-  });
-
-  test('string_construct', function() {
-    var attr = new StringAttribute('C', 'absolute zero');
-    assert.equal(attr.value, 'absolute zero');
-    assert.equal(attr.units, 'C');
-  });
-
-  test('string_fromDict', function() {
-    var attr = Attribute.fromDictIfPossible({
-      type: 'string',
-      units: 'm/s',
-      value: 'almost zero'
-    });
-    assert.isDefined(attr);
-    assert.equal(attr.value, 'almost zero');
-    assert.equal(attr.units, 'm/s');
-    assert.instanceOf(attr, StringAttribute);
-  });
-
-  test('unknown_construct', function() {
-    var attr = new UnknownAttribute('ml');
-    assert.equal(attr.units, 'ml');
-  });
-
-  test('unknown_fromDict', function() {
-    // Missing type.
-    var attr = Attribute.fromDictIfPossible({units: 'F'});
-    assert.isDefined(attr);
-    assert.equal(attr.units, 'F');
-    assert.instanceOf(attr, UnknownAttribute);
-
-    // Non-existent type.
-    var attr = Attribute.fromDictIfPossible({type: 'hashmap'});
-    assert.isDefined(attr);
-    assert.isUndefined(attr.units);
-    assert.instanceOf(attr, UnknownAttribute);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/model/clock_sync_manager.html b/catapult/tracing/tracing/model/clock_sync_manager.html
new file mode 100644
index 0000000..92f6d84
--- /dev/null
+++ b/catapult/tracing/tracing/model/clock_sync_manager.html
@@ -0,0 +1,299 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model', function() {
+  var ClockDomainId = {
+    BATTOR: 'battor',
+    CHROME: 'chrome'
+  };
+
+  /**
+   * A ClockSyncManager holds clock sync markers and uses them to shift
+   * timestamps from agents' clock domains onto the model's clock domain.
+   *
+   * In this context, a "clock domain" is a single perspective on the passage
+   * of time. A single computer can have multiple clock domains because it
+   * can have multiple methods of retrieving a timestamp (e.g.
+   * clock_gettime(CLOCK_MONOTONIC) and clock_gettime(CLOCK_REALTIME) on Linux).
+   * Another common reason for multiple clock domains within a single trace
+   * is that traces can span devices (e.g. a laptop collecting a Chrome trace
+   * can have its power consumption recorded by a second device and the two
+   * traces can be viewed alongside each other).
+   *
+   * For more information on how to synchronize multiple time domains using this
+   * method, see: http://bit.ly/1OVkqju.
+   *
+   * @constructor
+   */
+  function ClockSyncManager() {
+    this.connectorBySyncId_ = {};
+    this.modelDomainId_ = undefined;
+    this.modelTimeTransformerByDomainId_ = undefined;
+  }
+
+  ClockSyncManager.prototype = {
+    /**
+     * Adds a clock sync marker to the list of known markers.
+     *
+     * @param {string} domainId The clock domain that the marker is in.
+     * @param {string} syncId The identifier shared by both sides of the clock
+     *                 sync marker.
+     * @param {number} startTs The time (in ms) at which the sync started.
+     * @param {number=} opt_endTs The time (in ms) at which the sync ended. If
+     *                 unspecified, it's assumed to be the same as the start,
+     *                 indicating an instantaneous sync.
+     */
+    addClockSyncMarker: function(domainId, syncId, startTs, opt_endTs) {
+      if (tr.b.dictionaryValues(ClockDomainId).indexOf(domainId) < 0) {
+        throw new Error('"' + domainId + '" is not in the list of known ' +
+            'clock domain IDs.');
+      }
+
+      if (this.modelDomainId_ !== undefined) {
+        throw new Error('Cannot add new clock sync markers after getting ' +
+            'a model time transformer.');
+      }
+
+      var marker = new ClockSyncMarker(domainId, startTs, opt_endTs);
+
+      var connector = this.connectorBySyncId_[syncId];
+      if (connector === undefined) {
+        this.connectorBySyncId_[syncId] = new ClockSyncConnector(marker);
+        return;
+      }
+
+      if (connector.marker2 !== undefined) {
+        throw new Error('Clock sync with ID "' + syncId + '" is already ' +
+            'complete - cannot add a third clock sync marker to it.');
+      }
+
+      if (connector.marker1.domainId === domainId)
+        throw new Error('A clock domain cannot sync with itself.');
+
+      if (this.getConnectorBetween_(connector.marker1.domainId, domainId) !==
+          undefined) {
+        throw new Error('Cannot add multiple connectors between the same ' +
+            'clock domains.');
+      }
+
+      connector.marker2 = marker;
+    },
+
+    /**
+     * Returns a function that, given a timestamp in the specified clock domain,
+     * returns a timestamp in the model's clock domain.
+     *
+     * NOTE: All clock sync markers should be added before calling this function
+     * for the first time. This is because the first time that this function is
+     * called, a model clock domain is selected. This clock domain must have
+     * syncs connecting it with all other clock domains. If multiple clock
+     * domains are viable candidates, the one with the clock domain ID that is
+     * the first alphabetically is selected.
+     */
+    getModelTimeTransformer: function(domainId) {
+      if (this.modelTimeTransformerByDomainId_ === undefined)
+        this.buildModelTimeTransformerMap_();
+
+      var transformer = this.modelTimeTransformerByDomainId_[domainId];
+      if (transformer === undefined) {
+        throw new Error('No clock sync markers exist pairing clock domain "' +
+            domainId + '" ' + 'with model clock domain "' +
+            this.modelDomainId_ + '".');
+      }
+
+      return transformer;
+    },
+
+    /**
+     * Selects a model clock domain and builds the map of transformers to that
+     * domain. If no clock domains are viable candidates, an error is thrown.
+     */
+    buildModelTimeTransformerMap_() {
+      var completeConnectorsByDomainId =
+          this.getCompleteConnectorsByDomainId_();
+      var uniqueClockDomainIds =
+          tr.b.dictionaryKeys(completeConnectorsByDomainId);
+
+      // If there are |n| unique clock domains, then the model clock domain
+      // is the first one alphabetically that's connected to the |n-1| other
+      // clock domains.
+      uniqueClockDomainIds.sort();
+      var isFullyConnected = function(domainId) {
+        return completeConnectorsByDomainId[domainId].length ===
+            uniqueClockDomainIds.length - 1;
+      };
+      this.modelDomainId_ =
+          tr.b.findFirstInArray(uniqueClockDomainIds, isFullyConnected);
+
+      if (this.modelDomainId_ === undefined) {
+        throw new Error('Unable to select a master clock domain because no ' +
+              'clock domain is directly connected to all others.');
+      }
+
+      this.modelTimeTransformerByDomainId_ = {};
+      this.modelTimeTransformerByDomainId_[this.modelDomainId_] = tr.b.identity;
+
+      var modelConnectors = completeConnectorsByDomainId[this.modelDomainId_];
+      for (var i = 0; i < modelConnectors.length; i++) {
+        var conn = modelConnectors[i];
+        if (conn.marker1.domainId === this.modelDomainId_) {
+          this.modelTimeTransformerByDomainId_[conn.marker2.domainId] =
+              conn.getTransformer(conn.marker2.domainId, conn.marker1.domainId);
+        } else {
+          this.modelTimeTransformerByDomainId_[conn.marker1.domainId] =
+              conn.getTransformer(conn.marker1.domainId, conn.marker2.domainId);
+        }
+      }
+    },
+
+    /**
+     * Returns a map from clock domain ID to the complete connectors linked
+     * to that clock domain.
+     */
+    getCompleteConnectorsByDomainId_: function() {
+      var completeConnectorsByDomainId = {};
+      for (var syncId in this.connectorBySyncId_) {
+        var conn = this.connectorBySyncId_[syncId];
+
+        var domain1 = conn.marker1.domainId;
+        if (completeConnectorsByDomainId[domain1] === undefined)
+          completeConnectorsByDomainId[domain1] = [];
+
+        if (conn.marker2 === undefined)
+          continue;
+
+        var domain2 = conn.marker2.domainId;
+        if (completeConnectorsByDomainId[domain2] === undefined)
+          completeConnectorsByDomainId[domain2] = [];
+
+        completeConnectorsByDomainId[domain1].push(conn);
+        completeConnectorsByDomainId[domain2].push(conn);
+      }
+
+      return completeConnectorsByDomainId;
+    },
+
+    /**
+     * Returns the connector between the specified domains (or undefined if no
+     * such connector exists).
+     */
+    getConnectorBetween_(domain1Id, domain2Id) {
+      for (var syncId in this.connectorBySyncId_) {
+        var connector = this.connectorBySyncId_[syncId];
+        if (connector.isBetween(domain1Id, domain2Id))
+          return connector;
+      }
+
+      return undefined;
+    }
+  };
+
+  /**
+   * A ClockSyncMarker is an internal entity that represents a marker in a
+   * trace log indicating that a clock sync happened at a specified time.
+   *
+   * If no end timestamp argument is specified in the constructor, it's assumed
+   * that the end timestamp is the same as the start (i.e. the clock sync
+   * was instantaneous).
+   */
+  function ClockSyncMarker(domainId, startTs, opt_endTs) {
+    this.domainId = domainId;
+    this.startTs = startTs;
+    this.endTs = opt_endTs === undefined ? startTs : opt_endTs;
+  }
+
+  ClockSyncMarker.prototype = {
+    get ts() { return (this.startTs + this.endTs) / 2; }
+  };
+
+  /**
+   * A ClockSyncConnector is an internal entity that gives us the ability to
+   * compare timestamps taken in two distinct clock domains. It's formed from
+   * two clock sync markers issued at (approximately) the same time in
+   * two separate trace logs.
+   *
+   * @constructor
+   */
+  function ClockSyncConnector(opt_marker1, opt_marker2) {
+    this.marker1 = opt_marker1;
+    this.marker2 = opt_marker2;
+  }
+
+  ClockSyncConnector.prototype = {
+    /**
+     * Returns a function that transforms timestamps from one clock domain to
+     * another. If this connector isn't able to do this, an error is thrown.
+     */
+    getTransformer: function(fromDomainId, toDomainId) {
+      if (!this.isBetween(fromDomainId, toDomainId))
+        throw new Error('This connector cannot perform this transformation.');
+
+      var fromMarker, toMarker;
+      if (this.marker1.domainId === fromDomainId) {
+        fromMarker = this.marker1;
+        toMarker = this.marker2;
+      } else {
+        fromMarker = this.marker2;
+        toMarker = this.marker1;
+      }
+
+      var fromTs = fromMarker.ts, toTs = toMarker.ts;
+
+      // TODO(charliea): Usually, we estimate that the clock sync marker is
+      // issued by the agent exactly in the middle of the controller's start and
+      // end timestamps. However, there's currently a bug in the Chrome serial
+      // code that's making the clock sync ack for BattOr take much longer to
+      // read than it should (by about 8ms). This is causing the above estimate
+      // of the controller's sync timestamp to be off by a substantial enough
+      // amount that it makes traces hard to read. For now, make an exception
+      // for BattOr and just use the controller's start timestamp as the sync
+      // time. In the medium term, we should fix the Chrome serial code in order
+      // to remove this special logic and get an even more accurate estimate.
+      if (fromDomainId == ClockDomainId.BATTOR &&
+          toDomainId == ClockDomainId.CHROME) {
+        toTs = toMarker.startTs;
+      } else if (fromDomainId == ClockDomainId.CHROME &&
+          toDomainId == ClockDomainId.BATTOR) {
+        fromTs = fromMarker.startTs;
+      }
+
+      var tsShift = toTs - fromTs;
+      return function(ts) { return ts + tsShift; };
+    },
+
+    /**
+     * Returns true if this connector is between the specified clock domains.
+     */
+    isBetween: function(domain1Id, domain2Id) {
+      if (this.marker1 === undefined || this.marker2 === undefined)
+        return false;
+
+      if (this.marker1.domainId === domain1Id &&
+          this.marker2.domainId === domain2Id) {
+        return true;
+      }
+
+      if (this.marker1.domainId === domain2Id &&
+          this.marker2.domainId === domain1Id) {
+        return true;
+      }
+
+      return false;
+    }
+  };
+
+  return {
+    ClockDomainId: ClockDomainId,
+    ClockSyncManager: ClockSyncManager
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/clock_sync_manager_test.html b/catapult/tracing/tracing/model/clock_sync_manager_test.html
new file mode 100644
index 0000000..77306f7
--- /dev/null
+++ b/catapult/tracing/tracing/model/clock_sync_manager_test.html
@@ -0,0 +1,220 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/clock_sync_manager.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  var ClockDomainId = tr.model.ClockDomainId;
+  var ClockSyncManager = tr.model.ClockSyncManager;
+
+  var testOptions = {
+    setUp: function() {
+      // Add a few testing clock domains to the list of permissible domains.
+      ClockDomainId.DOMAIN_1 = 'domain1';
+      ClockDomainId.DOMAIN_2 = 'domain2';
+      ClockDomainId.DOMAIN_3 = 'domain3';
+      ClockDomainId.DOMAIN_4 = 'domain4';
+      ClockDomainId.DOMAIN_5 = 'domain5';
+    },
+
+    tearDown: function() {
+      delete ClockDomainId.DOMAIN_1;
+      delete ClockDomainId.DOMAIN_2;
+      delete ClockDomainId.DOMAIN_3;
+      delete ClockDomainId.DOMAIN_4;
+      delete ClockDomainId.DOMAIN_5;
+    }
+  };
+
+  test('addClockSyncMarker_throwsWithUnknownClockDomain', function() {
+    var mgr = new ClockSyncManager();
+
+    assert.throws(function() {
+      mgr.addClockSyncMarker('unknown', 'sync1', 100, 200);
+    }, '"unknown" is not in the list of known clock domain IDs.');
+  }, testOptions);
+
+
+  test('addClockSyncMarker_throwsWhenSelfSyncing', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100, 200);
+
+    assert.throws(function() {
+      mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 200, 300);
+    }, 'A clock domain cannot sync with itself.');
+  }, testOptions);
+
+  test('addClockSyncMarker_throwsWhenAddingThirdSyncMarkerToSync', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 100);
+
+    assert.throws(function() {
+      mgr.addClockSyncMarker(ClockDomainId.DOMAIN_3, 'sync1', 100);
+    }, 'Clock sync with ID "sync1" is already complete - cannot add a third ' +
+        'clock sync marker to it.');
+  }, testOptions);
+
+  test('addClockSyncMarker_throwsWhenAddingDuplicateConnectors', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 100);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync2', 100);
+
+    assert.throws(function() {
+      mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync2', 100);
+    }, 'Cannot add multiple connectors between the same clock domains.');
+  }, testOptions);
+
+  test('addClockSyncMarker_throwsAfterGetModelTimeTransformer', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 100);
+
+    mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1);
+
+    assert.throws(function() {
+      mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync2', 100);
+    }, 'Cannot add new clock sync markers after getting a model time ' +
+        'transformer.');
+  }, testOptions);
+
+  test('getModelTimeTransformer_oneIncompleteSync', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1)(100), 100);
+  }, testOptions);
+
+  test('getModelTimeTransformer_oneCompleteSync', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 350);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_2)(350), 100);
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1)(100), 100);
+  }, testOptions);
+
+  test('getModelTimeTransformer_twoCompleteSyncs', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 350);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync2', 200);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_3, 'sync2', 250);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_2)(350), 100);
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1)(100), 100);
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_3)(250), 200);
+  }, testOptions);
+
+  test('getModelTimeTransformer_twoSyncMarkersWithEndTs', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100, 200);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 350);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_2)(350), 150);
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1)(150), 150);
+  }, testOptions);
+
+  test('getModelTimeTransformer_battorSyncUsesChromeStartTs_battorMaster',
+      function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.CHROME, 'sync1', 100, 200);
+    mgr.addClockSyncMarker(ClockDomainId.BATTOR, 'sync1', 350);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.CHROME)(100),
+        350);
+  }, testOptions);
+
+  test('getModelTimeTransformer_battorSyncUsesChromeStartTs_chromeMaster',
+      function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.CHROME, 'sync1', 100, 200);
+    mgr.addClockSyncMarker(ClockDomainId.BATTOR, 'sync1', 350);
+
+    // We have to add another clock domain attached to chrome to guarantee
+    // that the manager chooses chrome as the master clock domain and not
+    // battor, which it would usually select because battor comes first
+    // alphabetically and they'd both be connected to one other domain.
+    mgr.addClockSyncMarker(ClockDomainId.CHROME, 'sync2', 0, 50);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync2', 0);
+
+    assert.strictEqual(
+        mgr.getModelTimeTransformer(ClockDomainId.BATTOR)(350),
+        100);
+  }, testOptions);
+
+  test('getModelTimeTransformer_throwsWithNoAgentMarker', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+
+    assert.throws(function() {
+      mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_2);
+    }, 'No clock sync markers exist pairing clock domain "domain2" with model' +
+        ' clock domain "domain1".');
+  }, testOptions);
+
+  test('getModelTimeTransformer_throwsWithTwoDistinctGraphs', function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 100);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync2', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_3, 'sync2', 100);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_4, 'sync3', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_5, 'sync3', 100);
+
+    assert.throws(function() {
+      mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_5);
+    }, 'Unable to select a master clock domain because no clock domain is ' +
+        'directly connected to all others.');
+  }, testOptions);
+
+  test('getModelTimeTransformer_throwsWithIndirectlyConnectedGraph',
+      function() {
+    var mgr = new ClockSyncManager();
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_1, 'sync1', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync1', 100);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_2, 'sync2', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_3, 'sync2', 100);
+
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_3, 'sync3', 100);
+    mgr.addClockSyncMarker(ClockDomainId.DOMAIN_4, 'sync3', 100);
+
+    assert.throws(function() {
+      mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1);
+    }, 'Unable to select a master clock domain because no clock domain is ' +
+        'directly connected to all others.');
+  }, testOptions);
+
+  test('getModelTimeTransformer_throwsWithNoClockSyncMarkers', function() {
+    var mgr = new ClockSyncManager();
+
+    assert.throws(function() {
+      mgr.getModelTimeTransformer(ClockDomainId.DOMAIN_1);
+    }, 'Unable to select a master clock domain because no clock domain is ' +
+        'directly connected to all others.');
+  }, testOptions);
+});
+</script>
diff --git a/catapult/tracing/tracing/model/clock_sync_record.html b/catapult/tracing/tracing/model/clock_sync_record.html
new file mode 100644
index 0000000..6bc3308
--- /dev/null
+++ b/catapult/tracing/tracing/model/clock_sync_record.html
@@ -0,0 +1,87 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All righstart reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model', function() {
+  /**
+   * A record that indicates how times in one clock domain map to another.
+   *
+   * @constructor
+   * @param {string} syncId The ID of the clock sync used to find the
+   *        corresponding marker in another trace.
+   * @param {string} start The timestamp the clock sync started.
+   * @param {object} args Any additional argument.
+   */
+  function ClockSyncRecord(syncId, start, args) {
+    this.syncId_ = syncId;
+    this.start_ = start;
+    this.args_ = args;
+  };
+
+  ClockSyncRecord.prototype = {
+    get syncId() {
+      return this.syncId_;
+    },
+
+    get start() {
+      return this.start_;
+    },
+
+    set start(value) {
+      this.start_ = value;
+    },
+
+    get args() {
+      return this.args_;
+    }
+  };
+
+  /**
+   * A clock sync record that is recorded instantaneously.
+   */
+  function InstantClockSyncRecord(syncId, start, args) {
+    ClockSyncRecord.call(this, syncId, start, args);
+  };
+
+  InstantClockSyncRecord.prototype = {
+    __proto__: ClockSyncRecord.prototype
+  };
+
+  /**
+   * A clock sync record that requires an external call to record.
+   *
+   * This type of clock sync record needs to be treated differently because the
+   * clock sync could actually have been recorded any time in
+   * [start, start + duration].
+   */
+  function PingPongClockSyncRecord(syncId, start, duration, args) {
+    ClockSyncRecord.call(this, syncId, start, args);
+    this.duration_ = duration;
+  };
+
+  PingPongClockSyncRecord.prototype = {
+    __proto__: ClockSyncRecord.prototype,
+
+    get duration() {
+      return this.duration_;
+    },
+
+    set duration(value) {
+      this.duration_ = value;
+    },
+  };
+
+  return {
+    InstantClockSyncRecord: InstantClockSyncRecord,
+    PingPongClockSyncRecord: PingPongClockSyncRecord
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/constants.html b/catapult/tracing/tracing/model/constants.html
new file mode 100644
index 0000000..3c1ec4f
--- /dev/null
+++ b/catapult/tracing/tracing/model/constants.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2012 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model', function() {
+  return {
+    // Since the PID of the browser process is not known to the child processes,
+    // we let them use "pid_ref = -1" to reference an object created in the
+    // browser process.
+    BROWSER_PROCESS_PID_REF: -1,
+
+    // The default scope of object events, when not explicitly specified.
+    OBJECT_DEFAULT_SCOPE: 'ptr'
+  };
+});
+</script>
+
diff --git a/catapult/tracing/tracing/model/container_memory_dump.html b/catapult/tracing/tracing/model/container_memory_dump.html
index fe5ad7c..ce0ac1c 100644
--- a/catapult/tracing/tracing/model/container_memory_dump.html
+++ b/catapult/tracing/tracing/model/container_memory_dump.html
@@ -43,9 +43,7 @@
 
     set memoryAllocatorDumps(memoryAllocatorDumps) {
       this.memoryAllocatorDumps_ = memoryAllocatorDumps;
-
-      // Clear the index and generate it lazily.
-      this.memoryAllocatorDumpsByFullName_ = undefined;
+      this.forceRebuildingMemoryAllocatorDumpByFullNameIndex();
     },
 
     getMemoryAllocatorDumpByFullName: function(fullName) {
@@ -68,6 +66,11 @@
       return this.memoryAllocatorDumpsByFullName_[fullName];
     },
 
+    forceRebuildingMemoryAllocatorDumpByFullNameIndex: function() {
+      // Clear the index and generate it lazily.
+      this.memoryAllocatorDumpsByFullName_ = undefined;
+    },
+
     iterateRootAllocatorDumps: function(fn, opt_this) {
       if (this.memoryAllocatorDumps === undefined)
         return;
diff --git a/catapult/tracing/tracing/model/container_memory_dump_test.html b/catapult/tracing/tracing/model/container_memory_dump_test.html
index 9686ef3..b615ff6 100644
--- a/catapult/tracing/tracing/model/container_memory_dump_test.html
+++ b/catapult/tracing/tracing/model/container_memory_dump_test.html
@@ -5,9 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/container_memory_dump.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -15,12 +17,11 @@
 tr.b.unittest.testSuite(function() {
   var ContainerMemoryDump = tr.model.ContainerMemoryDump;
   var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-
-  function setUpParentChildRelationship(parent, child) {
-    child.parent = parent;
-    parent.children.push(child);
-  }
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var newChildDump = tr.model.MemoryDumpTestUtils.newChildDump;
 
   test('memoryAllocatorDumps_undefined', function() {
     var md = new ContainerMemoryDump(42);
@@ -40,16 +41,16 @@
   test('memoryAllocatorDumps_flat', function() {
     var md = new ContainerMemoryDump(42);
 
-    var oilpanDump = new MemoryAllocatorDump(md, 'oilpan');
-    oilpanDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-    oilpanDump.addAttribute('objects_count', new ScalarAttribute('objects', 7));
-    oilpanDump.addAttribute('inner_size', new ScalarAttribute('bytes', 768));
-
-    var v8Dump = new MemoryAllocatorDump(md, 'v8');
-    v8Dump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-    v8Dump.addAttribute('objects_count', new ScalarAttribute('objects', 15));
-    v8Dump.addAttribute('inner_size', new ScalarAttribute('bytes', 1999));
-
+    var oilpanDump = newAllocatorDump(md, 'oilpan', {
+      size: 1024,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 7),
+      inner_size: 768
+    });
+    var v8Dump = newAllocatorDump(md, 'v8', {
+      size: 2048,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 15),
+      inner_size: 1999
+    });
     md.memoryAllocatorDumps = [oilpanDump, v8Dump];
 
     assert.lengthOf(md.memoryAllocatorDumps, 2);
@@ -64,45 +65,35 @@
   test('memoryAllocatorDumps_nested', function() {
     var md = new ContainerMemoryDump(42);
 
-    var oilpanDump = new MemoryAllocatorDump(md, 'oilpan');
-    oilpanDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-    oilpanDump.addAttribute('objects_count', new ScalarAttribute('objects', 7));
-    oilpanDump.addAttribute('inner_size', new ScalarAttribute('bytes', 768));
+    var oilpanDump = newAllocatorDump(md, 'oilpan', {
+      size: 1024,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 7),
+      inner_size: 768
+    });
 
-    var oilpanBucket1Dump = new MemoryAllocatorDump(
-        md, 'oilpan/bucket1', oilpanDump);
-    oilpanBucket1Dump.addAttribute('size',
-        new ScalarAttribute('bytes', 512));
-    oilpanBucket1Dump.addAttribute('objects_count',
-        new ScalarAttribute('objects', 3));
-    oilpanBucket1Dump.addAttribute('inner_size',
-        new ScalarAttribute('bytes', 256));
-    setUpParentChildRelationship(oilpanDump, oilpanBucket1Dump);
+    var oilpanBucket1Dump = newChildDump(oilpanDump, 'bucket1', {
+      size: 512,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 3),
+      inner_size: 256
+    });
 
-    var oilpanBucket2Dump = new MemoryAllocatorDump(
-        md, 'oilpan/bucket2', oilpanDump);
-    oilpanBucket2Dump.addAttribute('size',
-        new ScalarAttribute('bytes', 512));
-    oilpanBucket2Dump.addAttribute('objects_count',
-        new ScalarAttribute('objects', 4));
-    oilpanBucket2Dump.addAttribute('inner_size',
-        new ScalarAttribute('bytes', 512));
-    setUpParentChildRelationship(oilpanDump, oilpanBucket2Dump);
+    var oilpanBucket2Dump = newChildDump(oilpanDump, 'bucket2', {
+      size: 512,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 4),
+      inner_size: 512
+    });
 
-    var oilpanBucket2StringsDump = new MemoryAllocatorDump(
-        md, 'oilpan/bucket2/strings', oilpanBucket2Dump);
-    oilpanBucket2StringsDump.addAttribute('size',
-        new ScalarAttribute('bytes', 512));
-    oilpanBucket2StringsDump.addAttribute('objects_count',
-        new ScalarAttribute('objects', 4));
-    oilpanBucket2StringsDump.addAttribute('inner_size',
-        new ScalarAttribute('bytes', 512));
-    setUpParentChildRelationship(oilpanBucket2Dump, oilpanBucket2StringsDump);
+    var oilpanBucket2StringsDump = newChildDump(oilpanBucket2Dump, 'strings', {
+      size: 512,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 4),
+      inner_size: 512
+    });
 
-    var v8Dump = new MemoryAllocatorDump(md, 'v8');
-    v8Dump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-    v8Dump.addAttribute('objects_count', new ScalarAttribute('objects', 15));
-    v8Dump.addAttribute('inner_size', new ScalarAttribute('bytes', 1999));
+    var v8Dump = newAllocatorDump(md, 'v8', {
+      size: 2048,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 15),
+      inner_size: 1999
+    });
 
     md.memoryAllocatorDumps = [oilpanDump, v8Dump];
 
@@ -126,8 +117,7 @@
 
     var oilpanDump = new MemoryAllocatorDump(containerDump, 'oilpan');
     var v8Dump = new MemoryAllocatorDump(containerDump, 'v8');
-    var v8HeapsDump = new MemoryAllocatorDump(containerDump, 'v8/heaps');
-    setUpParentChildRelationship(v8Dump, v8HeapsDump);
+    newChildDump(v8Dump, 'heaps');
 
     containerDump.memoryAllocatorDumps = [oilpanDump, v8Dump];
 
@@ -137,5 +127,51 @@
         { visitedAllocatorDumps: visitedAllocatorDumps });
     assert.sameMembers(visitedAllocatorDumps, [oilpanDump, v8Dump]);
   });
+
+  test('forceRebuildingMemoryAllocatorDumpByFullNameIndex', function() {
+    var containerDump = new ContainerMemoryDump(42);
+
+    var v8Dump = new MemoryAllocatorDump(containerDump, 'v8');
+    var v8HeapsDump = newChildDump(v8Dump, 'heaps');
+    var v8HeapSmallDump = newChildDump(v8HeapsDump, 'S');
+
+    // Setting the memory allocator dumps should update the index properly.
+    containerDump.memoryAllocatorDumps = [v8Dump];
+    assert.strictEqual(
+        containerDump.getMemoryAllocatorDumpByFullName('v8'), v8Dump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps'), v8HeapsDump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps/S'), v8HeapSmallDump);
+
+    // Add a second grandchild (v8/heaps/L).
+    var v8HeapLargeDump = newChildDump(v8HeapsDump, 'L');
+
+    // Setting the memory allocator dumps again should update the index
+    // properly again.
+    containerDump.memoryAllocatorDumps = [v8Dump];
+    assert.strictEqual(
+        containerDump.getMemoryAllocatorDumpByFullName('v8'), v8Dump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps'), v8HeapsDump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps/S'), v8HeapSmallDump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps/L'), v8HeapLargeDump);
+
+    // Remove the first grandchild (v8/heaps/S).
+    v8HeapsDump.children.splice(0, 1);
+
+    // Force rebuilding the index and check that it was updated properly.
+    containerDump.forceRebuildingMemoryAllocatorDumpByFullNameIndex();
+    assert.strictEqual(
+        containerDump.getMemoryAllocatorDumpByFullName('v8'), v8Dump);
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps'), v8HeapsDump);
+    assert.isUndefined(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps/S'));
+    assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
+        'v8/heaps/L'), v8HeapLargeDump);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/model/counter_sample.html b/catapult/tracing/tracing/model/counter_sample.html
index e8e51a4..f188afc 100644
--- a/catapult/tracing/tracing/model/counter_sample.html
+++ b/catapult/tracing/tracing/model/counter_sample.html
@@ -7,8 +7,9 @@
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/base/sorted_array_utils.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/event.html">
+<link rel="import" href="/tracing/model/event_registry.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -49,7 +50,7 @@
       groups.push(group);
     }
     return groups;
-  }
+  };
 
   CounterSample.prototype = {
     __proto__: tr.model.Event.prototype,
@@ -83,7 +84,7 @@
 
     get userFriendlyName() {
       return 'Counter sample from ' + this.series_.title + ' at ' +
-          tr.b.u.TimeStamp.format(this.timestamp);
+          tr.v.Unit.byName.timeStampInMs.format(this.timestamp);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/cpu_test.html b/catapult/tracing/tracing/model/cpu_test.html
index e58cd76..2ac7338 100644
--- a/catapult/tracing/tracing/model/cpu_test.html
+++ b/catapult/tracing/tracing/model/cpu_test.html
@@ -26,7 +26,7 @@
 
   test('cpuBounds_OneSlice', function() {
     var cpu = new Cpu({}, 1);
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
     cpu.updateBounds();
     assert.equal(cpu.bounds.min, 1);
     assert.equal(cpu.bounds.max, 4);
@@ -42,13 +42,13 @@
   test('shiftTimestampsForward', function() {
     var cpu = new Cpu({}, 1);
     var ctr = cpu.getOrCreateCounter('foo', 'bar');
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
     var shiftCount = 0;
     ctr.shiftTimestampsForward = function(ts) {
       if (ts == 0.32)
         shiftCount++;
     };
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
     cpu.shiftTimestampsForward(0.32);
     assert.equal(1, shiftCount);
     assert.equal(cpu.slices[0].start, 1.32);
diff --git a/catapult/tracing/tracing/model/event.html b/catapult/tracing/tracing/model/event.html
index 273cb59..19b4e68 100644
--- a/catapult/tracing/tracing/model/event.html
+++ b/catapult/tracing/tracing/model/event.html
@@ -19,6 +19,7 @@
 tr.exportTo('tr.model', function() {
   var SelectableItem = tr.model.SelectableItem;
   var SelectionState = tr.model.SelectionState;
+  var IMMUTABLE_EMPTY_SET = tr.model.EventSet.IMMUTABLE_EMPTY_SET;
 
   /**
    * An Event is the base type for any non-container, selectable piece
@@ -31,7 +32,6 @@
     SelectableItem.call(this, this /* modelItem */);
     this.guid_ = tr.b.GUID.allocate();
     this.selectionState = SelectionState.NONE;
-    this.associatedAlerts = new tr.model.EventSet();
     this.info = undefined;
   }
 
@@ -46,6 +46,16 @@
       return undefined;
     },
 
+    // Empty by default. Lazily initialized on an instance in
+    // addAssociatedAlert(). See #1930.
+    associatedAlerts: IMMUTABLE_EMPTY_SET,
+
+    addAssociatedAlert: function(alert) {
+      if (this.associatedAlerts === IMMUTABLE_EMPTY_SET)
+        this.associatedAlerts = new tr.model.EventSet();
+      this.associatedAlerts.push(alert);
+    },
+
     /** Adds the range of timestamps for this event to the specified range. */
     addBoundsToRange: function(range) {
       throw new Error('Not implemented');
diff --git a/catapult/tracing/tracing/model/event_set.html b/catapult/tracing/tracing/model/event_set.html
index 6dd0476..74f2eee 100644
--- a/catapult/tracing/tracing/model/event_set.html
+++ b/catapult/tracing/tracing/model/event_set.html
@@ -47,12 +47,8 @@
     __proto__: Object.prototype,
 
     get bounds() {
-      if (this.bounds_dirty_) {
-        this.bounds_.reset();
-        for (var i = 0; i < this.length_; i++)
-          this[i].addBoundsToRange(this.bounds_);
-        this.bounds_dirty_ = false;
-      }
+      if (this.bounds_dirty_)
+        this.resolveBounds_();
       return this.bounds_;
     },
 
@@ -77,6 +73,13 @@
       this.bounds_dirty_ = true;
     },
 
+    resolveBounds_: function() {
+      this.bounds_.reset();
+      for (var i = 0; i < this.length_; i++)
+        this[i].addBoundsToRange(this.bounds_);
+      this.bounds_dirty_ = false;
+    },
+
     // push pushes only unique events.
     // If an event has been already pushed, do nothing.
     push: function(event) {
@@ -96,6 +99,14 @@
       return this.pushed_guids_[event.guid];
     },
 
+    indexOf: function(event) {
+      for (var i = 0; i < this.length; i++) {
+        if (this[i].guid === event.guid)
+          return i;
+      }
+      return -1;
+    },
+
     addEventSet: function(eventSet) {
       for (var i = 0; i < eventSet.length; i++)
         this.push(eventSet[i]);
@@ -272,6 +283,19 @@
     }
   };
 
+  EventSet.IMMUTABLE_EMPTY_SET = (function() {
+    var s = new EventSet();
+    s.resolveBounds_();
+    s.push = function() {
+      throw new Error('Cannot push to an immutable event set');
+    };
+    s.addEventSet = function() {
+      throw new Error('Cannot add to an immutable event set');
+    };
+    Object.freeze(s);
+    return s;
+  })();
+
   return {
     EventSet: EventSet,
     RequestSelectionChangeEvent: RequestSelectionChangeEvent
diff --git a/catapult/tracing/tracing/model/event_set_test.html b/catapult/tracing/tracing/model/event_set_test.html
index 4b167dc..7cfc573 100644
--- a/catapult/tracing/tracing/model/event_set_test.html
+++ b/catapult/tracing/tracing/model/event_set_test.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/model/model.html">
@@ -295,5 +296,23 @@
             ['1.1.SliceGroup.0', '1.1.SliceGroup.1', '1.1.SliceGroup.2']},
         eventSet.asDict());
    });
+
+  test('immutableEmptySet', function() {
+    var s = tr.model.EventSet.IMMUTABLE_EMPTY_SET;
+    assert.lengthOf(s, 0);
+    assert.isTrue(s.bounds.isEmpty);
+
+    // Check that the iteration methods still work correctly.
+    function throwOnCall() {
+      throw new Error('This function should never be called!!!');
+    }
+    assert.deepEqual(s.map(throwOnCall), []);
+    s.forEach(throwOnCall);
+
+    // Check that the set is indeed immutable.
+    assert.throws(function() { s[0] = b; });
+    assert.throws(function() { s.push(b); });
+    assert.throws(function() { s.addEventSet(new tr.model.EventSet()); });
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/model/event_test.html b/catapult/tracing/tracing/model/event_test.html
index a855c30..0d0deca 100644
--- a/catapult/tracing/tracing/model/event_test.html
+++ b/catapult/tracing/tracing/model/event_test.html
@@ -5,18 +5,42 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/alert.html">
 <link rel="import" href="/tracing/model/event.html">
+<link rel="import" href="/tracing/model/event_info.html">
+<link rel="import" href="/tracing/model/event_set.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Alert = tr.model.Alert;
   var Event = tr.model.Event;
+  var EventInfo = tr.model.EventInfo;
+  var EventSet = tr.model.EventSet;
+  var ImmutableEventSet = tr.model.ImmutableEventSet;
 
   test('checkModelItem', function() {
     var event = new Event;
     assert.equal(event.modelItem, event);
   });
+
+  test('checkAssociatedAlerts', function() {
+    var event = new Event();
+    assert.strictEqual(event.associatedAlerts, EventSet.IMMUTABLE_EMPTY_SET);
+    assert.sameMembers(event.associatedAlerts.toArray(), []);
+
+    var info1 = new EventInfo('Critical', 'Critical alert!!!', []);
+    var alert1 = new Alert(info1, 7);
+    event.addAssociatedAlert(alert1);
+    assert.instanceOf(event.associatedAlerts, EventSet);
+    assert.sameMembers(event.associatedAlerts.toArray(), [alert1]);
+
+    var info2 = new EventInfo('Warning', 'Warning alert???', []);
+    var alert2 = new Alert(info2, 42);
+    event.addAssociatedAlert(alert2);
+    assert.instanceOf(event.associatedAlerts, EventSet);
+    assert.sameMembers(event.associatedAlerts.toArray(), [alert1, alert2]);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/model/flow_event.html b/catapult/tracing/tracing/model/flow_event.html
index 861bcab..bffb866 100644
--- a/catapult/tracing/tracing/model/flow_event.html
+++ b/catapult/tracing/tracing/model/flow_event.html
@@ -5,8 +5,8 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -47,7 +47,7 @@
 
     get userFriendlyName() {
       return 'Flow event named ' + this.title + ' at ' +
-          tr.b.u.TimeStamp.format(this.timestamp);
+          tr.v.Unit.byName.timeStampInMs.format(this.timestamp);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/global_memory_dump.html b/catapult/tracing/tracing/model/global_memory_dump.html
index 355f93e..80bb03f 100644
--- a/catapult/tracing/tracing/model/global_memory_dump.html
+++ b/catapult/tracing/tracing/model/global_memory_dump.html
@@ -5,10 +5,12 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/model/container_memory_dump.html">
+<link rel="import" href="/tracing/model/event_registry.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -28,19 +30,43 @@
     this.processMemoryDumps = {};
   }
 
-  var SIZE_ATTRIBUTE_NAME = tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME;
-  var EFFECTIVE_SIZE_ATTRIBUTE_NAME =
-      tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME;
+  // Size numeric names.
+  var SIZE_NUMERIC_NAME = tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME;
+  var EFFECTIVE_SIZE_NUMERIC_NAME =
+      tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME;
+
+  // Size numeric info types.
+  var MemoryAllocatorDumpInfoType = tr.model.MemoryAllocatorDumpInfoType;
+  var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;
+  var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;
+
+  // TODO(petrcermak): Move this to tracing/base/iteration_helpers.html.
+  function inPlaceFilter(array, predicate, opt_this) {
+    opt_this = opt_this || this;
+    var nextPosition = 0;
+    for (var i = 0; i < array.length; i++) {
+      if (!predicate.call(opt_this, array[i], i))
+        continue;
+      if (nextPosition < i)
+        array[nextPosition] = array[i];  // Move elements only if necessary.
+      nextPosition++;
+    }
+
+    if (nextPosition < array.length)
+      array.length = nextPosition;  // Truncate the array only if necessary.
+  }
 
   function getSize(dump) {
-    var attr = dump.attributes[SIZE_ATTRIBUTE_NAME];
-    if (attr === undefined)
+    var numeric = dump.numerics[SIZE_NUMERIC_NAME];
+    if (numeric === undefined)
       return 0;
-    return attr.value;
+    return numeric.value;
   }
 
   function hasSize(dump) {
-    return dump.attributes[SIZE_ATTRIBUTE_NAME] !== undefined;
+    return dump.numerics[SIZE_NUMERIC_NAME] !== undefined;
   }
 
   function optional(value, defaultValue) {
@@ -49,60 +75,107 @@
     return value;
   }
 
-  function ownershipToUserFriendlyString(dump, importance) {
-    return dump.quantifiedName + ' (importance: ' +
-        optional(importance, 0) + ')';
-  }
-
   GlobalMemoryDump.prototype = {
     __proto__: tr.model.ContainerMemoryDump.prototype,
 
     get userFriendlyName() {
-      return 'Global memory dump at ' + tr.b.u.TimeStamp.format(this.start);
+      return 'Global memory dump at ' +
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
     },
 
     get containerName() {
       return 'global space';
     },
 
-    calculateGraphAttributes: function() {
-      // 1. Add ownership links from tracing MADs to descendants of malloc or
+    finalizeGraph: function() {
+      // 1. Transitively remove weak memory allocator dumps and all their
+      // owners and descendants from the model. This must be performed before
+      // any other steps.
+      this.removeWeakDumps();
+
+      // 2. Add ownership links from tracing MADs to descendants of malloc or
       // winheap MADs so that tracing would be automatically discounted from
       // them later (step 3).
       this.setUpTracingOverheadOwnership();
 
-      // 2. Calculate the sizes of all memory allocator dumps (MADs). This step
+      // 3. Aggregate all other numerics of all MADs (*excluding* sizes and
+      // effective sizes) and propagate numerics from global MADs to their
+      // owners (*including* sizes and effective sizes). This step must be
+      // carried out before the sizes of all MADs are calculated (step 3).
+      // Otherwise, the propagated sizes of all MADs would not be aggregated.
+      this.aggregateNumerics();
+
+      // 4. Calculate the sizes of all memory allocator dumps (MADs). This step
       // requires that the memory allocator dump graph has been finalized (step
-      // 1). Subsequent modifications of the graph will most likely break the
-      // calculation invariants.
+      // 1) and numerics were propagated from global MADs (step 2). Subsequent
+      // modifications of the graph will most likely break the calculation
+      // invariants.
       this.calculateSizes();
 
-      // 3. Calculate the effective sizes of all MADs. This step requires that
-      // the sizes of all MADs have already been calculated (step 2).
+      // 5. Calculate the effective sizes of all MADs. This step requires that
+      // the sizes of all MADs have already been calculated (step 3).
       this.calculateEffectiveSizes();
 
-      // 4. Aggregate all other attributes of all MADs. This step must be
-      // carried out after the sizes of all MADs were calculated (step 2).
-      // Otherwise, the sizes of all MADs would be aggregated as direct sums
-      // of their children, which would most likely lead to double-counting.
-      this.aggregateAttributes();
-
-      // 5. Discount tracing from VM regions stats. This steps requires that
-      // sizes (step 2) and resident sizes (step 4) of the tracing MADs have
+      // 6. Discount tracing from VM regions stats. This steps requires that
+      // resident sizes (step 2) and sizes (step 3) of the tracing MADs have
       // already been calculated.
       this.discountTracingOverheadFromVmRegions();
+
+      // 7. The above steps (especially steps 1 and 3) can create new memory
+      // allocator dumps, so we force rebuilding the memory allocator dump
+      // indices of all container memory dumps.
+      this.forceRebuildingMemoryAllocatorDumpByFullNameIndices();
+    },
+
+    removeWeakDumps: function() {
+      // Mark all transitive owners and children of weak memory allocator dumps
+      // as weak.
+      this.traverseAllocatorDumpsInDepthFirstPreOrder(function(dump) {
+        if (dump.weak)
+          return;
+        if ((dump.owns !== undefined && dump.owns.target.weak) ||
+            (dump.parent !== undefined && dump.parent.weak)) {
+          dump.weak = true;
+        }
+      });
+
+      function removeWeakDumpsFromListRecursively(dumps) {
+        inPlaceFilter(dumps, function(dump) {
+          if (dump.weak) {
+            // The dump is weak, so remove it. This will implicitly remove all
+            // its descendants, which are also weak due to the initial marking
+            // step.
+            return false;
+          }
+
+          // This dump is non-weak, so keep it. Recursively remove its weak
+          // descendants and ownership links from weak dumps instead.
+          removeWeakDumpsFromListRecursively(dump.children);
+          inPlaceFilter(dump.ownedBy, function(ownershipLink) {
+            return !ownershipLink.source.weak;
+          });
+
+          return true;
+        });
+      }
+
+      this.iterateContainerDumps(function(containerDump) {
+        var memoryAllocatorDumps = containerDump.memoryAllocatorDumps;
+        if (memoryAllocatorDumps !== undefined)
+          removeWeakDumpsFromListRecursively(memoryAllocatorDumps);
+      });
     },
 
     /**
      * Calculate the size of all memory allocator dumps in the dump graph.
      *
      * The size refers to the allocated size of a (sub)component. It is a
-     * natural extension of the optional size attribute provided by
+     * natural extension of the optional size numeric provided by
      * MemoryAllocatorDump(s):
      *
-     *   - If a MAD provides a size attribute, then its size is assumed to be
+     *   - If a MAD provides a size numeric, then its size is assumed to be
      *     equal to it.
-     *   - If a MAD does not provide a size attribute, then its size is assumed
+     *   - If a MAD does not provide a size numeric, then its size is assumed
      *     to be the maximum of (1) the size of the largest owner of the MAD
      *     and (2) the aggregated size of the MAD's children.
      *
@@ -122,51 +195,59 @@
      * been calculated.
      */
     calculateMemoryAllocatorDumpSize_: function(dump) {
-      // This flag becomes true if the size attribute of the current dump
-      // should be defined, i.e. if (1) the current dump's size attribute is
-      // defined, (2) the size of at least one of its children is defined or
-      // (3) the size of at least one of its owners is defined.
+      // This flag becomes true if the size numeric of the current dump should
+      // be defined, i.e. if (1) the current dump's size numeric is defined,
+      // (2) the size of at least one of its children is defined or (3) the
+      // size of at least one of its owners is defined.
       var shouldDefineSize = false;
 
-      // This helper function returns the numeric value of the size attribute
-      // of the given dependent memory allocator dump. If the attribute is
-      // defined, the shouldDefineSize flag above is also set to true (because
-      // condition (2) or (3) is satisfied). Otherwise, zero is returned (and
-      // the flag is left unchanged).
+      // This helper function returns the value of the size numeric of the
+      // given dependent memory allocator dump. If the numeric is defined, the
+      // shouldDefineSize flag above is also set to true (because condition
+      // (2) or (3) is satisfied). Otherwise, zero is returned (and the flag is
+      // left unchanged).
       function getDependencySize(dependencyDump) {
-        var attr = dependencyDump.attributes[SIZE_ATTRIBUTE_NAME];
-        if (attr === undefined)
+        var numeric = dependencyDump.numerics[SIZE_NUMERIC_NAME];
+        if (numeric === undefined)
           return 0;
         shouldDefineSize = true;
-        return attr.value;
+        return numeric.value;
       }
 
       // 1. Get the size provided by the dump. If present, define a function
       // for checking dependent size consistency (a dump must always be bigger
       // than all its children aggregated together and/or its largest owner).
-      var sizeAttribute = dump.getValidSizeAttributeOrUndefined(
-          SIZE_ATTRIBUTE_NAME, this.model);
+      var sizeNumeric = dump.numerics[SIZE_NUMERIC_NAME];
       var size = 0;
-      var infos = [];
-      var checkDependentSizeIsConsistent = function() { /* no-op */ };
-      if (sizeAttribute !== undefined) {
-        size = sizeAttribute.value;
+      var checkDependencySizeIsConsistent = function() { /* no-op */ };
+      if (sizeNumeric !== undefined) {
+        size = sizeNumeric.value;
         shouldDefineSize = true;
-        checkDependentSizeIsConsistent = function(dependentSize,
-            dependentName) {
-          if (size >= dependentSize)
+        if (sizeNumeric.unit !== tr.v.Unit.byName.sizeInBytes_smallerIsBetter) {
+          this.model.importWarning({
+            type: 'memory_dump_parse_error',
+            message: 'Invalid unit of \'size\' numeric of memory allocator ' +
+                'dump ' + dump.quantifiedName + ': ' +
+                sizeNumeric.unit.unitName + '.'
+          });
+        }
+        checkDependencySizeIsConsistent = function(
+            dependencySize, dependencyInfoType, dependencyName) {
+          if (size >= dependencySize)
             return;
-          var messageSuffix = ' (' + tr.b.u.Units.sizeInBytes.format(size) +
-              ') is less than ' + dependentName + ' (' +
-                tr.b.u.Units.sizeInBytes.format(dependentSize) + ').';
           this.model.importWarning({
             type: 'memory_dump_parse_error',
             message: 'Size provided by memory allocator dump \'' +
-                dump.fullName + '\'' + messageSuffix
+                dump.fullName + '\'' +
+                tr.v.Unit.byName.sizeInBytes.format(size) +
+                ') is less than ' + dependencyName + ' (' +
+                tr.v.Unit.byName.sizeInBytes.format(dependencySize) + ').'
           });
-          infos.push(new tr.model.AttributeInfo(
-              tr.model.AttributeInfoType.WARNING,
-              'Size provided by this memory allocator dump' + messageSuffix));
+          dump.infos.push({
+            type: dependencyInfoType,
+            providedSize: size,
+            dependencySize: dependencySize
+          });
         }.bind(this);
       }
 
@@ -187,21 +268,18 @@
             // of the the current dump (i.e. not childDump), then we remember
             // the ownership so that we could explain why the size of the
             // current dump is not equal to the sum of its children.
-            var ownedDescendantDump = ownedDumpLink.target;
-            var ownedChildDump = ownedDescendantDump;
+            var ownedChildDump = ownedDumpLink.target;
             while (ownedChildDump.parent !== dump)
               ownedChildDump = ownedChildDump.parent;
             if (childDump !== ownedChildDump) {
-              var overlap = getDependencySize(descendantDump);
-              if (overlap > 0) {
-                // Owner child dump -> total overlapping size.
-                var ownedChildOverlaps = allOverlaps[ownedChildDump.name];
-                if (ownedChildOverlaps === undefined)
-                  allOverlaps[ownedChildDump.name] = ownedChildOverlaps = {};
-                var previousTotalOverlap =
-                    ownedChildOverlaps[childDump.name] || 0;
-                var updatedTotalOverlap = previousTotalOverlap + overlap;
-                ownedChildOverlaps[childDump.name] = updatedTotalOverlap;
+              var ownedBySiblingSize = getDependencySize(descendantDump);
+              if (ownedBySiblingSize > 0) {
+                var previousTotalOwnedBySiblingSize =
+                    ownedChildDump.ownedBySiblingSizes.get(childDump) || 0;
+                var updatedTotalOwnedBySiblingSize =
+                    previousTotalOwnedBySiblingSize + ownedBySiblingSize;
+                ownedChildDump.ownedBySiblingSizes.set(
+                    childDump, updatedTotalOwnedBySiblingSize);
               }
             }
             return;
@@ -221,25 +299,10 @@
         }
         aggregateDescendantDump(childDump);
       });
-      // If the size of the dump is not equal to the sum of its children, add
-      // infos to its children explaining the difference.
-      dump.children.forEach(function(childDump) {
-        var childOverlaps = allOverlaps[childDump.name];
-        if (childOverlaps === undefined)
-          return;
-
-        var message = tr.b.dictionaryValues(tr.b.mapItems(childOverlaps,
-            function(ownerChildName, overlap) {
-          return 'overlaps with its sibling \'' + ownerChildName + '\' (' +
-              tr.b.u.Units.sizeInBytes.format(overlap) + ')';
-        })).join(' ');
-
-        childDump.attributes[SIZE_ATTRIBUTE_NAME].infos.push(
-            new tr.model.AttributeInfo(
-                tr.model.AttributeInfoType.INFORMATION, message));
-      });
-      checkDependentSizeIsConsistent(
-          aggregatedChildrenSize, 'the aggregated size of its children');
+      checkDependencySizeIsConsistent(
+          aggregatedChildrenSize,
+          PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,
+          'the aggregated size of its children');
 
       // 3. Calculate the largest owner size.
       var largestOwnerSize = 0;
@@ -248,15 +311,17 @@
         var ownerSize = getDependencySize(owner);
         largestOwnerSize = Math.max(largestOwnerSize, ownerSize);
       });
-      checkDependentSizeIsConsistent(
-          largestOwnerSize, 'the size of its largest owner');
+      checkDependencySizeIsConsistent(
+          largestOwnerSize,
+          PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER,
+          'the size of its largest owner');
 
       // If neither the dump nor any of its dependencies (children and owners)
-      // provide a size, do NOT add a zero size attribute.
+      // provide a size, do NOT add a zero size numeric.
       if (!shouldDefineSize) {
         // The rest of the pipeline relies on size being either a valid
-        // ScalarAttribute, or undefined.
-        dump.attributes[SIZE_ATTRIBUTE_NAME] = undefined;
+        // ScalarNumeric, or undefined.
+        delete dump.numerics[SIZE_NUMERIC_NAME];
         return;
       }
 
@@ -264,9 +329,8 @@
       // together and/or its largest owner.
       size = Math.max(size, aggregatedChildrenSize, largestOwnerSize);
 
-      var sizeAttribute = new tr.model.ScalarAttribute('bytes', size);
-      sizeAttribute.infos = infos;
-      dump.attributes[SIZE_ATTRIBUTE_NAME] = sizeAttribute;
+      dump.numerics[SIZE_NUMERIC_NAME] = new tr.v.ScalarNumeric(
+          tr.v.Unit.byName.sizeInBytes_smallerIsBetter, size);
 
       // Add a virtual child to make up for extra size of the dump with
       // respect to its children (if applicable).
@@ -276,9 +340,9 @@
             dump.containerMemoryDump, dump.fullName + '/<unspecified>');
         virtualChild.parent = dump;
         dump.children.unshift(virtualChild);
-        virtualChild.attributes[SIZE_ATTRIBUTE_NAME] =
-            new tr.model.ScalarAttribute(
-                'bytes', size - aggregatedChildrenSize);
+        virtualChild.numerics[SIZE_NUMERIC_NAME] = new tr.v.ScalarNumeric(
+            tr.v.Unit.byName.sizeInBytes_smallerIsBetter,
+            size - aggregatedChildrenSize);
       }
     },
 
@@ -617,8 +681,8 @@
       // have defined effective size if and only if it has defined size.
       if (!hasSize(dump)) {
         // The rest of the pipeline relies on effective size being either a
-        // valid ScalarAttribute, or undefined.
-        dump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME] = undefined;
+        // valid ScalarNumeric, or undefined.
+        delete dump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME];
         return;
       }
 
@@ -634,89 +698,51 @@
           if (!hasSize(childDump))
             return;
           effectiveSize +=
-              childDump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME].value;
+              childDump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME].value;
         });
       }
-      var attribute = new tr.model.ScalarAttribute('bytes', effectiveSize);
-      dump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME] = attribute;
-
-      // Add attribute infos regarding ownership (if applicable).
-      // TODO(petrcermak): This belongs to the corresponding analysis UI code.
-      if (dump.ownedBy.length > 0) {
-        var message = 'shared by:' +
-            dump.ownedBy.map(function(ownershipLink) {
-              return '\n  - ' + ownershipToUserFriendlyString(
-                  ownershipLink.source, ownershipLink.importance);
-            }).join();
-        attribute.infos.push(new tr.model.AttributeInfo(
-            tr.model.AttributeInfoType.MEMORY_OWNED, message));
-      }
-      if (dump.owns !== undefined) {
-        var target = dump.owns.target;
-        var message = 'shares ' +
-            ownershipToUserFriendlyString(target, dump.owns.importance) +
-            ' with';
-
-        var otherOwnershipLinks = target.ownedBy.filter(
-            function(ownershipLink) {
-          return ownershipLink.source !== dump;
-        });
-        if (otherOwnershipLinks.length > 0) {
-          message += ':';
-          message += otherOwnershipLinks.map(function(ownershipLink) {
-            return '\n  - ' + ownershipToUserFriendlyString(
-                ownershipLink.source, ownershipLink.importance);
-          }).join();
-        } else {
-          message += ' no other dumps';
-        }
-
-        attribute.infos.push(new tr.model.AttributeInfo(
-            tr.model.AttributeInfoType.MEMORY_OWNER, message));
-      }
+      dump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME] = new tr.v.ScalarNumeric(
+          tr.v.Unit.byName.sizeInBytes_smallerIsBetter, effectiveSize);
     },
 
-    aggregateAttributes: function() {
-      // 1. Aggregate attributes in this global memory dump.
+    aggregateNumerics: function() {
+      // 1. Aggregate numerics in this global memory dump.
       this.iterateRootAllocatorDumps(function(dump) {
-        dump.aggregateAttributes(this.model);
+        dump.aggregateNumericsRecursively(this.model);
       });
 
-      // 2. Propagate attributes from global memory allocator dumps to their
-      // owners.
-      this.iterateRootAllocatorDumps(this.propagateAttributesRecursively);
+      // 2. Propagate numerics and diagnostics from global memory allocator
+      // dumps to their owners.
+      this.iterateRootAllocatorDumps(
+          this.propagateNumericsAndDiagnosticsRecursively);
 
-      // 3. Aggregate attributes in the associated process memory dumps.
+      // 3. Aggregate numerics in the associated process memory dumps.
       tr.b.iterItems(this.processMemoryDumps, function(pid, processMemoryDump) {
         processMemoryDump.iterateRootAllocatorDumps(function(dump) {
-          dump.aggregateAttributes(this.model);
+          dump.aggregateNumericsRecursively(this.model);
         }, this);
       }, this);
     },
 
-    propagateAttributesRecursively: function(globalAllocatorDump) {
-      tr.b.iterItems(globalAllocatorDump.attributes, function(attrName, attr) {
-        if (attrName === SIZE_ATTRIBUTE_NAME ||
-            attrName === EFFECTIVE_SIZE_ATTRIBUTE_NAME) {
-          // We cannot propagate size and effective_size attributes because it
-          // would break the complex maths [see calculateSizes() and
-          // calculateEffectiveSizes()].
-          return;
-        }
-        globalAllocatorDump.ownedBy.forEach(function(ownershipLink) {
-          var processAllocatorDump = ownershipLink.source;
-          if (processAllocatorDump.attributes[attrName] !== undefined) {
-            // Attributes provided by process memory allocator dumps themselves
-            // have precedence over attributes propagated from global memory
-            // allocator dumps.
-            return;
-          }
-          processAllocatorDump.attributes[attrName] = attr;
+    propagateNumericsAndDiagnosticsRecursively: function(globalAllocatorDump) {
+      ['numerics', 'diagnostics'].forEach(function(field) {
+        tr.b.iterItems(globalAllocatorDump[field], function(name, value) {
+          globalAllocatorDump.ownedBy.forEach(function(ownershipLink) {
+            var processAllocatorDump = ownershipLink.source;
+            if (processAllocatorDump[field][name] !== undefined) {
+              // Numerics and diagnostics provided by process memory allocator
+              // dumps themselves have precedence over numerics and diagnostics
+              // propagated from global memory allocator dumps.
+              return;
+            }
+            processAllocatorDump[field][name] = value;
+          });
         });
       });
-      // Recursively propagate attributes from all child memory allocator dumps.
+
+      // Recursively propagate numerics from all child memory allocator dumps.
       globalAllocatorDump.children.forEach(
-          this.propagateAttributesRecursively, this);
+          this.propagateNumericsAndDiagnosticsRecursively, this);
     },
 
     setUpTracingOverheadOwnership: function() {
@@ -733,6 +759,12 @@
       }, this);
     },
 
+    forceRebuildingMemoryAllocatorDumpByFullNameIndices: function() {
+      this.iterateContainerDumps(function(containerDump) {
+        containerDump.forceRebuildingMemoryAllocatorDumpByFullNameIndex();
+      });
+    },
+
     iterateContainerDumps: function(fn) {
       fn.call(this, this);
       tr.b.iterItems(this.processMemoryDumps, function(pid, processDump) {
diff --git a/catapult/tracing/tracing/model/global_memory_dump_test.html b/catapult/tracing/tracing/model/global_memory_dump_test.html
index 5d99c28..e813359 100644
--- a/catapult/tracing/tracing/model/global_memory_dump_test.html
+++ b/catapult/tracing/tracing/model/global_memory_dump_test.html
@@ -5,12 +5,16 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -21,11 +25,20 @@
   var ProcessMemoryDump = tr.model.ProcessMemoryDump;
   var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
   var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-  var AttributeInfo = tr.model.AttributeInfo;
-  var AttributeInfoType = tr.model.AttributeInfoType;
-
-  var SIZE_DELTA = 0.0001;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var newChildDump = tr.model.MemoryDumpTestUtils.newChildDump;
+  var addOwnershipLink = tr.model.MemoryDumpTestUtils.addOwnershipLink;
+  var checkDumpNumericsAndDiagnostics =
+      tr.model.MemoryDumpTestUtils.checkDumpNumericsAndDiagnostics;
+  var SIZE_DELTA = tr.model.MemoryDumpTestUtils.SIZE_DELTA;
+  var MemoryAllocatorDumpInfoType = tr.model.MemoryAllocatorDumpInfoType;
+  var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;
+  var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;
 
   function buildArgPusher(array) {
     return function(arg) { array.push(arg); };
@@ -36,69 +49,81 @@
     assert.sameMembers(actualArray, expectedArray);
   }
 
-  function assertUndefinedAttribute(dump, attributeName) {
-    var attribute = dump.attributes[attributeName];
-    assert.isUndefined(attribute, 'expected attribute \'' + attributeName +
+  function assertUndefinedNumeric(dump, numericName) {
+    var numeric = dump.numerics[numericName];
+    assert.isUndefined(numeric, 'expected numeric \'' + numericName +
         '\' of memory allocator dump \'' + dump.fullName + '\' in ' +
         dump.containerMemoryDump.userFriendlyName + ' to be undefined');
   }
 
-  function assertDefinedAttribute(dump, attributeName, expectedType,
-      expectedUnits, expectedValue, expectedInfos, opt_delta) {
-    var attribute = dump.attributes[attributeName];
-    var errorMessagePrefix = 'expected attribute \'' + attributeName +
+  function assertDefinedNumeric(dump, numericName, expectedUnit, expectedValue,
+      opt_delta) {
+    var numeric = dump.numerics[numericName];
+    var errorMessagePrefix = 'expected numeric \'' + numericName +
         '\' of memory allocator dump \'' + dump.fullName + '\' in ' +
         dump.containerMemoryDump.userFriendlyName + ' to ';
 
-    assert.instanceOf(attribute, expectedType,
-        errorMessagePrefix + 'be an instance of ' + expectedType);
-    assert.equal(attribute.units, expectedUnits,
-        errorMessagePrefix + 'have units \'' + expectedUnits + '\' but got \'' +
-        attribute.units + '\'');
+    assert.instanceOf(numeric, ScalarNumeric,
+        errorMessagePrefix + 'be an instance of ScalarNumeric');
+    assert.equal(numeric.unit, expectedUnit,
+        errorMessagePrefix + 'have unit \'' + expectedUnit.unitName +
+        '\' but got \'' + numeric.unit.unitName + '\' instead');
 
     var valueErrorMessage = errorMessagePrefix + 'have value \'' +
-        expectedValue + '\' but got \'' + attribute.value + '\'';
+        expectedValue + '\' but got \'' + numeric.value + '\'';
     if (opt_delta !== undefined) {
       assert.closeTo(
-          attribute.value, expectedValue, opt_delta, valueErrorMessage);
+          numeric.value, expectedValue, opt_delta, valueErrorMessage);
     } else {
-      assert.equal(attribute.value, expectedValue, valueErrorMessage);
-    }
-
-    if (expectedInfos === undefined)
-      expectedInfos = [];
-    var actualInfos = dump.attributes[attributeName].infos;
-    assert.lengthOf(actualInfos, expectedInfos.length,
-        'expected the \'' + attributeName + '\' attribute of ' +
-        'memory allocator dump \'' + dump.fullName + '\' in ' +
-        dump.containerMemoryDump.userFriendlyName + ' to have ' +
-        expectedInfos.length + ' infos but got ' + actualInfos.length);
-    for (var k = 0; k < actualInfos.length; k++) {
-      var actualInfo = actualInfos[k];
-      var expectedInfo = expectedInfos[k];
-      assert.equal(actualInfo.type, expectedInfo.type,
-          'expected info ' + k + ' of the \'' + attributeName + '\' ' +
-          'attribute of memory allocator dump \'' + dump.fullName + '\' in ' +
-          dump.containerMemoryDump.userFriendlyName + ' to have type ' +
-          expectedInfo.type + ' but got ' + actualInfo.type);
-      assert.match(actualInfo.message, expectedInfo.message,
-          'invalid message of info ' + k + ' of the \'' + attributeName +
-          '\' attribute of memory allocator dump \'' + dump.fullName +
-          '\' in ' + dump.containerMemoryDump.userFriendlyName);
+      assert.equal(numeric.value, expectedValue, valueErrorMessage);
     }
   }
 
-  function assertSizeAttribute(dump, sizeName, expectedValue, expectedInfos) {
+  function assertSizeNumeric(dump, sizeName, expectedValue) {
     if (expectedValue === undefined) {
-      assertUndefinedAttribute(dump, sizeName);
-      // No size attribute infos should be expected (test sanity check).
-      assert(expectedInfos === undefined || expectedInfos.length === 0);
+      assertUndefinedNumeric(dump, sizeName);
     } else {
-      assertDefinedAttribute(dump, sizeName, ScalarAttribute, 'bytes',
-          expectedValue, expectedInfos, SIZE_DELTA);
+      assertDefinedNumeric(dump, sizeName, sizeInBytes_smallerIsBetter,
+          expectedValue, SIZE_DELTA);
     }
   }
 
+  function assertDumpSizes(dump, expectedSize, expectedEffectiveSize,
+      opt_expectedInfos, opt_expectedOwnedBySiblingSizes) {
+    // Check the 'size' numeric.
+    assertSizeNumeric(dump, 'size', expectedSize);
+
+    // Check the 'effective_size' numeric.
+    assertSizeNumeric(dump, 'effective_size', expectedEffectiveSize);
+
+    // Check the 'infos' list.
+    var expectedInfos = opt_expectedInfos || [];
+    var actualInfos = dump.infos;
+    assert.lengthOf(actualInfos, expectedInfos.length,
+        'expected memory allocator dump \'' + dump.fullName + '\' in ' +
+        dump.containerMemoryDump.userFriendlyName + ' to have ' +
+        expectedInfos.length + ' infos but got ' + actualInfos.length);
+    for (var k = 0; k < actualInfos.length; k++) {
+      assert.deepEqual(actualInfos[k], expectedInfos[k],
+          'info ' + k + ' of memory allocator dump \'' + dump.fullName +
+          '\' in ' + dump.containerMemoryDump.userFriendlyName +
+          ' doesn\'t match the expected info');
+    }
+
+    // Checked the 'ownedBySiblingSizes' map.
+    var expectedOwnedBySiblingSizes = opt_expectedOwnedBySiblingSizes || {};
+    var actualOwnedBySiblingSizes = {};
+    for (var siblingDump of dump.ownedBySiblingSizes.keys()) {
+      assert.strictEqual(siblingDump.parent, dump.parent);
+      actualOwnedBySiblingSizes[siblingDump.name] =
+          dump.ownedBySiblingSizes.get(siblingDump);
+    }
+    assert.deepEqual(actualOwnedBySiblingSizes, expectedOwnedBySiblingSizes,
+        'ownedBySiblingSizes of memory allocator dump \'' + dump.fullName +
+        '\' in ' + dump.containerMemoryDump.userFriendlyName +
+        ' doesn\'t contain the expected values');
+  }
+
   function createContainerDumps(processMemoryDumpCount, opt_model) {
     var model = opt_model;
     if (model === undefined)
@@ -150,8 +175,12 @@
    *   owns: GUID of another MAD owned by the resulting MAD (no owned MAD if
    *       undefined).
    *   importance: Importance of the above ownership (can be undefined).
-   *   size: Value of the 'size' attribute of the resulting MAD (no 'size'
-   *       attribute if undefined).
+   *   size: Value of the 'size' numeric of the resulting MAD (no 'size'
+   *       numeric if undefined).
+   *   numerics: Extra numerics of the resulting MAD (dictionary).
+   *   diagnostics: Extra diagnostics of the resulting MAD (dictionary).
+   *   weak: Whether the resulting MAD should be weak (undefined implies
+   *       non-weak).
    *   children: List of tree recipes for child MADs (no children if undefined).
    *   skip_build: If this optional property is set to true, this function will
    *       skip the corresponding tree recipe node and will not create a MAD
@@ -176,8 +205,10 @@
         var guid = treeRecipe['guid'];
         var owns = treeRecipe['owns'];
         var size = treeRecipe['size'];
-        var attrs = treeRecipe['attrs'];
+        var numerics = treeRecipe['numerics'];
+        var diagnostics = treeRecipe['diagnostics'];
         var importance = treeRecipe['importance'];
+        var weak = treeRecipe['weak'];
 
         assert.notStrictEqual(skipBuild, true);
         assert.isDefined(name);
@@ -185,8 +216,10 @@
         var fullName = namePrefix + name;
         var dump = new MemoryAllocatorDump(containerDump, fullName, guid);
 
-        if (size !== undefined)
-          dump.addAttribute('size', new ScalarAttribute('bytes', size));
+        if (size !== undefined) {
+          dump.addNumeric(
+              'size', new ScalarNumeric(sizeInBytes_smallerIsBetter, size));
+        }
         if (guid !== undefined) {
           assert.notProperty(guid, ownableDumps);
           ownableDumps[guid] = dump;
@@ -195,6 +228,8 @@
           if (!(owns in ownerDumps))
             ownerDumps[owns] = [];
           ownerDumps[owns].push({dump: dump, importance: importance});
+        } else {
+          assert.isUndefined(importance);  // Test sanity check.
         }
 
         if (treeRecipe.children !== undefined) {
@@ -209,8 +244,13 @@
           });
         }
 
-        if (attrs !== undefined)
-          tr.b.iterItems(attrs, dump.addAttribute, dump);
+        if (numerics !== undefined)
+          tr.b.iterItems(numerics, dump.addNumeric, dump);
+        if (diagnostics !== undefined)
+          tr.b.iterItems(diagnostics, dump.addDiagnostic, dump);
+
+        if (weak)
+          dump.weak = true;
 
         return dump;
       }
@@ -234,11 +274,8 @@
           'allocator dump with guid \'' + ownedGuid + '\'');
 
       ownershipInfos.forEach(function(ownershipInfo) {
-        var ownerDump = ownershipInfo.dump;
-        var ownershipLink = new MemoryAllocatorDumpLink(
-            ownerDump, ownedDump, ownershipInfo.importance);
-        ownerDump.owns = ownershipLink;
-        ownedDump.ownedBy.push(ownershipLink);
+        addOwnershipLink(
+            ownershipInfo.dump, ownedDump, ownershipInfo.importance);
       });
     });
 
@@ -269,7 +306,8 @@
           'children': [
             {
               'name': 'isolate1',
-              'guid': 7
+              'guid': 7,
+              'weak': true
             },
             {
               'name': 'isolate2',
@@ -287,7 +325,8 @@
                 },
                 {
                   'name': 'obj2',
-                  'owns': 3
+                  'owns': 3,
+                  'weak': true
                 },
                 {
                   'name': 'obj3',
@@ -307,7 +346,7 @@
 
     function checkDump(dump, expectedGuid, expectedFullName, expectedParent,
         expectedChildrenCount, expectedSize, expectedIsOwner,
-        expectedOwnersCount, expectedContainerDump) {
+        expectedOwnersCount, expectedContainerDump, opt_expectedWeak) {
       assert.isDefined(dump);
       assert.instanceOf(dump, MemoryAllocatorDump);
       assert.strictEqual(dump.guid, expectedGuid);
@@ -315,8 +354,8 @@
       assert.strictEqual(dump.parent, expectedParent);
       assert.lengthOf(dump.children, expectedChildrenCount);
 
-      assertSizeAttribute(dump, 'size', expectedSize);
-      assertSizeAttribute(dump, 'subsystem_size', undefined);
+      assertSizeNumeric(dump, 'size', expectedSize);
+      assertSizeNumeric(dump, 'subsystem_size', undefined);
 
       if (expectedIsOwner)
         assert.isDefined(dump.owns);
@@ -327,6 +366,7 @@
       assert.strictEqual(dump.containerMemoryDump, expectedContainerDump);
       assert.strictEqual(expectedContainerDump.getMemoryAllocatorDumpByFullName(
           expectedFullName), dump);
+      assert.strictEqual(dump.weak, !!opt_expectedWeak);
     }
 
     function checkOwnershipLink(expectedSourceDump, expectedTargetDump,
@@ -359,7 +399,7 @@
         pmd2);
     var isolate1Dump = v8Dump.children[0];
     checkDump(isolate1Dump, 7, 'v8/isolate1', v8Dump, 0, undefined, false, 1,
-        pmd2);
+        pmd2, true /* weak dump */);
     var isolate3Dump = v8Dump.children[1];
     checkDump(isolate3Dump, 60, 'v8/isolate3', v8Dump, 3, 54, false, 0, pmd2);
     var obj1Dump = isolate3Dump.children[0];
@@ -367,7 +407,7 @@
         pmd2);
     var obj2Dump = isolate3Dump.children[1];
     checkDump(obj2Dump, undefined, 'v8/isolate3/obj2', isolate3Dump, 0,
-        undefined, true, 0, pmd2);
+        undefined, true, 0, pmd2, true /* weak dump */);
     var obj3Dump = isolate3Dump.children[2];
     checkDump(obj3Dump, undefined, 'v8/isolate3/obj3', isolate3Dump, 0,
         undefined, true, 0, pmd2);
@@ -386,19 +426,20 @@
    * tree recipes):
    *
    *   name: Expected name of the MAD.
-   *   expected_size: Expected value of the 'size' attribute of the MAD (no
-   *       'size' attribute expected if undefined).
-   *   expected_size_infos: List of expected 'size' attribute infos (zero infos
-   *       expected if undefined). The items in the list are object with two
-   *       fields: 'type' (expected value of the info type), and 'message'
-   *       (regular expression over the info message).
+   *   expected_removed: If provided and true, it is expected that there is no
+   *       dump for the recipe.
+   *   expected_size: Expected value of the 'size' numeric of the MAD (no
+   *       'size' numeric expected if undefined).
    *   expected_effective_size: Expected value of the 'effective_size'
-   *       attribute of the MAD (no 'effective_size' attribute expected if
+   *       numeric of the MAD (no 'effective_size' numeric expected if
    *       undefined).
-   *   expected_effective_size_infos: List of expected 'effective_size'
-   *       attribute infos (zero infos expected if undefined). The items in the
-   *       list are object with two fields: 'type' (expected value of the info
-   *       type), and 'message' (regular expression over the info message).
+   *   expected_infos: List of expected MAD infos (zero infos expected if
+   *       undefined).
+   *   weak: Whether the MAD is expected to be weak (non-weak if undefined).
+   *   owns: Expected GUID of the dump owned by the MAD.
+   *   importance: Expected importance of the owhership from this MAD.
+   *   expected_owned_by_links_count: Expected number of 'ownedBy' links of the
+   *       MAD.
    *   children: List of tree recipes for child MADs (no children expected if
    *       undefined).
    *
@@ -420,45 +461,92 @@
         continue;
       }
 
+      var expectedTreeRecipes = treeRecipes.filter(function(treeRecipe) {
+        return !treeRecipe['expected_removed'];
+      });
+
       assert.isDefined(memoryAllocatorDumps,
           'expected defined memory allocator dumps in ' +
           containerDump.userFriendlyName);
-      assert.lengthOf(memoryAllocatorDumps, treeRecipes.length,
-          'expected ' + treeRecipes.length + ' root memory allocator dumps ' +
-          'but got ' + memoryAllocatorDumps.length + ' in ' +
+      assert.lengthOf(memoryAllocatorDumps, expectedTreeRecipes.length,
+          'expected ' + expectedTreeRecipes.length + ' root memory allocator ' +
+          'dumps but got ' + memoryAllocatorDumps.length + ' in ' +
           containerDump.userFriendlyName);
 
       function checkDumpTree(dump, treeRecipe, expectedParent, namePrefix) {
+        // Test sanity check.
+        assert.isFalse(!!treeRecipe['expected_removed']);
+
         // Check full name, parent, and container dump.
         var expectedFullName = namePrefix + treeRecipe['name'];
-        var errorMessagePrefix = 'memory allocator dump \'' + dump.fullName +
-            '\' in ' + dump.containerMemoryDump.userFriendlyName + ' ';
-
+        var quantifiedName = dump.quantifiedName;
         assert.equal(dump.fullName, expectedFullName,
-            errorMessagePrefix + 'has invalid full name');
+            quantifiedName + ' has invalid full name');
         assert.strictEqual(dump.parent, expectedParent,
-            errorMessagePrefix + 'has invalid parent');
+            quantifiedName + ' has invalid parent');
         assert.strictEqual(dump.containerMemoryDump, containerDump,
-            errorMessagePrefix + 'has invalid container memory dump');
+            quantifiedName + ' has invalid container memory dump');
         assert.strictEqual(containerDump.getMemoryAllocatorDumpByFullName(
-            expectedFullName), dump, errorMessagePrefix +
+            expectedFullName), dump, quantifiedName +
             'is not indexed in its container memory dump');
 
-        // Check that 'size' was calculated correctly.
-        assertSizeAttribute(dump, 'size', treeRecipe['expected_size'],
-            treeRecipe['expected_size_infos']);
+        // Check the guid of the dump.
+        assert.strictEqual(dump.guid, treeRecipe['guid'],
+            quantifiedName + ' has invalid guid');
 
-        // Check that 'effective_size' was calculated correctly.
-        assertSizeAttribute(dump, 'effective_size',
+        // Check that the 'weak' flag is correct.
+        assert.strictEqual(dump.weak, !!treeRecipe['weak'],
+            quantifiedName + ' has invalid weak flag');
+
+        // Check that sizes were calculated correctly.
+        assertDumpSizes(dump,
+            treeRecipe['expected_size'],
             treeRecipe['expected_effective_size'],
-            treeRecipe['expected_effective_size_infos']);
+            treeRecipe['expected_infos'],
+            treeRecipe['expected_owned_by_sibling_sizes']);
+
+        // Check that the 'owns' link is correct.
+        if (treeRecipe['owns'] === undefined) {
+          assert.isUndefined(dump.owns,
+              quantifiedName + ' was expected not to own another dump');
+        } else {
+          var ownershipLink = dump.owns;
+          assert.isDefined(dump.owns, quantifiedName +
+              ' was expected to have an \'owns\' link');
+          assert.strictEqual(ownershipLink.source, dump,
+              'the \'owns\' link of ' + quantifiedName + ' has invalid source');
+          var expectedImportance = treeRecipe['importance'];
+          assert.strictEqual(ownershipLink.importance, expectedImportance,
+              'expected the importance of the \'owns\' link of ' +
+              quantifiedName + ' to be ' + expectedImportance +
+              ' but got ' + ownershipLink.importance);
+          var ownedDump = ownershipLink.target;
+          assert.strictEqual(ownedDump.guid, treeRecipe['owns'],
+              'the \'owns\' link of ' + quantifiedName +
+              ' has an invalid target');
+          assert.include(ownedDump.ownedBy, ownershipLink,
+              'the target of the \'owns\' link of ' + quantifiedName +
+              ' doesn\'t have the link in its \'ownedBy\' list');
+        }
+
+        // Check that the number of 'ownedBy' links is correct.
+        var expectedOwnedByLinksCount =
+            treeRecipe['expected_owned_by_links_count'];
+        if (expectedOwnedByLinksCount !== undefined) {
+          assert.lengthOf(dump.ownedBy, expectedOwnedByLinksCount,
+              'expected ' + quantifiedName + ' to have ' +
+              expectedOwnedByLinksCount + ' \'ownedBy\' links but got ' +
+              dump.ownedBy.length);
+        }
 
         // Check children recursively.
         var actualChildren = dump.children;
-        var expectedChildren = treeRecipe.children || [];
+        var expectedChildren = (treeRecipe.children || []).filter(
+            function(childRecipe) {
+              return !childRecipe['expected_removed'];
+            });
         assert.lengthOf(actualChildren, expectedChildren.length,
-            'expected memory allocator dump \'' + dump.fullName + '\' in ' +
-            dump.containerMemoryDump.userFriendlyName + ' to have ' +
+            'expected ' + quantifiedName + ' to have ' +
             expectedChildren.length + ' children but got ' +
             actualChildren.length);
         for (var k = 0; k < actualChildren.length; k++) {
@@ -467,64 +555,11 @@
         }
       }
 
-      for (var j = 0; j < memoryAllocatorDumps.length; j++)
-        checkDumpTree(memoryAllocatorDumps[j], treeRecipes[j], undefined, '');
-    }
-  }
-
-  function genericInfo(type, regex) {
-    return {type: type, message: regex};
-  }
-
-  function informationInfo(regex) {
-    return genericInfo(AttributeInfoType.INFORMATION, regex);
-  }
-
-  function warningInfo(regex) {
-    return genericInfo(AttributeInfoType.WARNING, regex);
-  }
-
-  function ownershipInfo(/* type, prefix1, entries1, ... */) {
-    // Test sanity checks.
-    assert(arguments.length >= 3);
-    assert(arguments.length % 2 === 1);
-
-    var type = arguments[0];
-    var regExpString = '';
-
-    for (var i = 1; i < arguments.length; i += 2) {
-      var prefix = arguments[i];
-      var entries = arguments[i + 1];
-      assert(entries.length % 3 === 0);  // Test sanity check.
-
-      regExpString += '.*' + prefix;
-
-      for (var j = 0; j < entries.length; j += 3) {
-        regExpString += '[\\s\\S]*\'' + entries[j] + '\'';
-        regExpString += '.*\\b' + entries[j + 1] + '\\b';
-        regExpString += '.*\\bimportance: ' + entries[j + 2] + '\\b';
+      for (var j = 0; j < memoryAllocatorDumps.length; j++) {
+        checkDumpTree(
+            memoryAllocatorDumps[j], expectedTreeRecipes[j], undefined, '');
       }
     }
-
-    return genericInfo(type, new RegExp(regExpString));
-  }
-
-  function ownerInfo(/* ownedFullName, ownedContainerName, ownedImportance,
-                        otherOwnerFullName1, otherOwnerContainerName1,
-                        otherOwnerImportance1, ... */) {
-    var ownedEntries = Array.prototype.slice.call(arguments, 0, 3);
-    var otherOwnerEntries = Array.prototype.slice.call(arguments, 3);
-    return ownershipInfo(AttributeInfoType.MEMORY_OWNER,
-        '\\bshares\\b', ownedEntries,
-        '\\bwith' + (otherOwnerEntries.length === 0 ? ' no other\\b' : ':\\n'),
-        otherOwnerEntries);
-  }
-
-  function ownedInfo(/* ownerFullName1, ownerContainerName1, ownerImportance1,
-                        ... */) {
-    var ownerEntries = Array.prototype.slice.call(arguments);
-    return ownershipInfo(AttributeInfoType.MEMORY_OWNED, '\\bshared by\\b',
-        ownerEntries);
   }
 
   // Check that the checkDumpTrees testing helper method above actually
@@ -557,13 +592,28 @@
     }, /'heaps'.*invalid full name/);
   });
 
+  test('testSanityCheck_checkDumpTrees_invalidGuid', function() {
+    var containerDumps = createContainerDumps(0);
+    var gmd = containerDumps[0];
+    gmd.memoryAllocatorDumps = [new MemoryAllocatorDump(gmd, 'v8', 42)];
+
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'v8',
+            'guid': 43  // This should be 42.
+          }
+        ]
+      ]);
+    }, /'v8'.*\binvalid guid\b/);
+  });
+
   test('testSanityCheck_checkDumpTrees_invalidStructure', function() {
     var containerDumps = createContainerDumps(0);
     var gmd = containerDumps[0];
     var rootDump = new MemoryAllocatorDump(gmd, 'root');
-    var child1Dump = new MemoryAllocatorDump(gmd, 'root/child1');
-    rootDump.children.push(child1Dump);
-    child1Dump.parent = rootDump;
+    newChildDump(rootDump, 'child1');
     gmd.memoryAllocatorDumps = [rootDump];
 
     assert.throws(function() {
@@ -591,11 +641,8 @@
     var containerDumps = createContainerDumps(0);
     var gmd = containerDumps[0];
     var rootDump = new MemoryAllocatorDump(gmd, 'root');
-    var parentDump = new MemoryAllocatorDump(gmd, 'root/parent');
-    rootDump.children.push(parentDump);
-    parentDump.parent = rootDump;
-    var childDump = new MemoryAllocatorDump(gmd, 'root/parent/child');
-    parentDump.children.push(childDump);
+    var parentDump = newChildDump(rootDump, 'parent');
+    var childDump = newChildDump(parentDump, 'child');
     childDump.parent = rootDump;  // This should correctly be parentDump.
     gmd.memoryAllocatorDumps = [rootDump];
 
@@ -624,12 +671,8 @@
     var containerDumps = createContainerDumps(1);
     var gmd = containerDumps[0];
     var pmd = containerDumps[1];
-    var rootDump = new MemoryAllocatorDump(pmd, 'root');
-    rootDump.addAttribute('size', new ScalarAttribute('bytes', 100));
-    var parentDump = new MemoryAllocatorDump(pmd, 'root/parent');
-    parentDump.addAttribute('size', new ScalarAttribute('bytes', 49));
-    rootDump.children.push(parentDump);
-    parentDump.parent = rootDump;
+    var rootDump = newAllocatorDump(pmd, 'root', { size: 100 });
+    newChildDump(rootDump, 'parent', { size: 49 });
     pmd.memoryAllocatorDumps = [rootDump];
 
     assert.throws(function() {
@@ -655,12 +698,8 @@
     var containerDumps = createContainerDumps(1);
     var gmd = containerDumps[0];
     var pmd = containerDumps[1];
-    var rootDump = new MemoryAllocatorDump(pmd, 'root');
-    rootDump.addAttribute('effective_size', new ScalarAttribute('bytes', 99));
-    var parentDump = new MemoryAllocatorDump(pmd, 'root/parent');
-    parentDump.addAttribute('effective_size', new ScalarAttribute('bytes', 50));
-    rootDump.children.push(parentDump);
-    parentDump.parent = rootDump;
+    var rootDump = newAllocatorDump(pmd, 'root', { effective_size: 99 });
+    newChildDump(rootDump, 'parent', { effective_size: 50 });
     pmd.memoryAllocatorDumps = [rootDump];
 
     assert.throws(function() {
@@ -682,12 +721,43 @@
     }, /expected.*'effective_size'.*value.*\b100\b.*got.*\b99\b/);
   });
 
-  test('testSanityCheck_checkDumpTrees_invalidSizeInfoCount', function() {
+  test('testSanityCheck_checkDumpTrees_invalidInfoCount', function() {
     var containerDumps = createContainerDumps(0);
     var gmd = containerDumps[0];
-    var v8Dump = new MemoryAllocatorDump(gmd, 'v8');
-    // This attribute should have an info.
-    v8Dump.addAttribute('size', new ScalarAttribute('bytes', 50));
+    gmd.memoryAllocatorDumps = [
+      newAllocatorDump(gmd, 'v8', {
+        size: 50
+      })
+    ];
+
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'v8',
+            'expected_size': 50,
+            'expected_infos': [
+              {
+                type: PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,
+                providedSize: 50,
+                dependencySize: 60
+              }
+            ]
+          }
+        ]
+      ]);
+    }, /expected.*'v8'.*\b1 infos\b.*\bgot\b.*\b0\b/);
+  });
+
+  test('testSanityCheck_checkDumpTrees_invalidInfo', function() {
+    var containerDumps = createContainerDumps(0);
+    var gmd = containerDumps[0];
+    var v8Dump = newAllocatorDump(gmd, 'v8', { size: 50 });
+    v8Dump.infos.push({
+      type: PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,
+      providedSize: 40,
+      dependencySize: 50
+    });
     gmd.memoryAllocatorDumps = [v8Dump];
 
     assert.throws(function() {
@@ -696,23 +766,26 @@
           {
             'name': 'v8',
             'expected_size': 50,
-            'expected_size_infos': [
-              warningInfo(/[some_message]/)
+            'expected_infos': [
+              {
+                // Should be PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN below.
+                type: PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER,
+                providedSize: 40,
+                dependencySize: 50
+              }
             ]
           }
         ]
       ]);
-    }, /expected.*\'size'.*'v8'.*\b1 infos\b.*\bgot\b.*\b0\b/);
+    }, /\binfo 0\b.*'v8'.*\bexpected\b/);
   });
 
-  test('testSanityCheck_checkDumpTrees_invalidSizeInfoType', function() {
+  test('testSanityCheck_checkDumpTrees_invalidOwnedBySiblingSizes', function() {
     var containerDumps = createContainerDumps(0);
     var gmd = containerDumps[0];
     var v8Dump = new MemoryAllocatorDump(gmd, 'v8');
-    var attr = new ScalarAttribute('bytes', 50);
-    attr.infos.push(new AttributeInfo(
-        AttributeInfoType.INFORMATION, 'message text!'));
-    v8Dump.addAttribute('size', attr);
+    newChildDump(v8Dump, 'child1', {}, 42 /* guid */);
+    newChildDump(v8Dump, 'child2');
     gmd.memoryAllocatorDumps = [v8Dump];
 
     assert.throws(function() {
@@ -720,67 +793,336 @@
         [
           {
             'name': 'v8',
-            'expected_size': 50,
-            'expected_size_infos': [
-              warningInfo(/^message text!$/)  // This should be informationInfo.
+            'children': [
+              {
+                'name': 'child1',
+                'guid': 42
+              },
+              {
+                'name': 'child2',
+                'expected_owned_by_sibling_sizes': {
+                  'child1': 40  // This should be 30.
+                }
+              }
             ]
           }
         ]
       ]);
-    }, /expected.*\binfo 0\b.*'size'.*'v8'.*\btype 1\b.*\bgot\b.*\b0\b/);
+    }, /\bownedBySiblingSizes\b.*'v8\/child2'.*\bexpected\b/);
   });
 
-  test('testSanityCheck_checkDumpTrees_invalidSizeInfoMessage', function() {
-    var containerDumps = createContainerDumps(0);
-    var gmd = containerDumps[0];
-    var v8Dump = new MemoryAllocatorDump(gmd, 'v8');
-    var attr = new ScalarAttribute('bytes', 50);
-    attr.infos.push(new AttributeInfo(AttributeInfoType.WARNING, 'ok'));
-    attr.infos.push(new AttributeInfo(AttributeInfoType.INFORMATION, 'one'));
-    v8Dump.addAttribute('size', attr);
-    gmd.memoryAllocatorDumps = [v8Dump];
-
-    assert.throws(function() {
-      checkDumpTrees(containerDumps, [
-        [
-          {
-            'name': 'v8',
-            'expected_size': 50,
-            'expected_size_infos': [
-              warningInfo(/.*/),
-              informationInfo(/two/)  // This should be /one/.
-            ]
-          }
-        ]
-      ]);
-    }, /invalid message.*\binfo 1\b.*'size'.*'v8'/);
-  });
-
-  test('testSanityCheck_checkDumpTrees_invalidEffectiveSizeInfo',
+  test('testSanityCheck_checkDumpTrees_invalidWeakFlag',
       function() {
     var containerDumps = createContainerDumps(0);
     var gmd = containerDumps[0];
-    var oilpanDump = new MemoryAllocatorDump(gmd, 'oilpan');
-    var attr = new ScalarAttribute('bytes', 78);
-    attr.infos.push(new AttributeInfo(AttributeInfoType.INFORMATION, 'three'));
-    oilpanDump.addAttribute('size', new ScalarAttribute('bytes', 49));
-    oilpanDump.addAttribute('effective_size', attr);
-    gmd.memoryAllocatorDumps = [oilpanDump];
+    var parentDump = new MemoryAllocatorDump(gmd, 'parent');
+    var childDump = newChildDump(parentDump, 'child');
+    childDump.weak = true;
+    gmd.memoryAllocatorDumps = [parentDump];
 
     assert.throws(function() {
       checkDumpTrees(containerDumps, [
         [
           {
-            'name': 'oilpan',
-            'expected_size': 49,
-            'expected_effective_size': 78,
-            'expected_effective_size_infos': [
-              informationInfo(/two/)  // This should be /three/.
+            'name': 'parent',
+            'children': [
+              {
+                'name': 'child',
+                // Missing "'weak': true".
+              }
             ]
           }
         ]
       ]);
-    }, /invalid message.*\binfo 0\b.*'effective_size'.*'oilpan'/);
+    }, /'parent\/child'.*\binvalid weak flag\b/);
+
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'parent',
+            'weak': true,  // This should be false (or not provided).
+            'children': [
+              {
+                'name': 'child',
+                'weak': true
+              }
+            ]
+          }
+        ]
+      ]);
+    }, /'parent'.*\binvalid weak flag\b/);
+  });
+
+  test('testSanityCheck_checkDumpTrees_dumpNotRemoved', function() {
+    var containerDumps = createContainerDumps(0);
+    var gmd = containerDumps[0];
+    var parentDump = new MemoryAllocatorDump(gmd, 'parent');
+    for (var i = 1; i <= 3; i++)
+      newChildDump(parentDump, 'child' + i);
+    var otherDump = new MemoryAllocatorDump(gmd, 'other');
+    gmd.memoryAllocatorDumps = [parentDump, otherDump];
+
+    // Child MAD not removed.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'parent',
+            'children': [
+              {
+                'name': 'child1',
+              },
+              {
+                'name': 'child2',
+                'expected_removed': true
+              },
+              {
+                'name': 'child3',
+              }
+            ]
+          },
+          {
+            'name': 'other'
+          }
+        ]
+      ]);
+    }, /\bexpected\b.*'parent'.*\b2 children\b.*\bgot 3\b/);
+
+    // Root MAD not removed.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'parent',
+            'children': [
+              {
+                'name': 'child1',
+              },
+              {
+                'name': 'child2'
+              },
+              {
+                'name': 'child3',
+              }
+            ]
+          },
+          {
+            'name': 'other',
+            'expected_removed': true
+          }
+        ]
+      ]);
+    }, /\bexpected\b.*\b1 root memory allocator dumps\b.*\bgot 2\b/);
+  });
+
+  test('testSanityCheck_checkDumpTrees_invalidOwnership', function() {
+    var containerDumps = createContainerDumps(1);
+    var gmd = containerDumps[0];
+    var pmd1 = containerDumps[1];
+    var ownedDump = new MemoryAllocatorDump(gmd, 'owned', 42);
+    var ownerDump1 = new MemoryAllocatorDump(pmd1, 'owner1');
+    var link1 = addOwnershipLink(ownerDump1, ownedDump);
+    var ownerDump2 = new MemoryAllocatorDump(pmd1, 'owner2');
+    var link2 = addOwnershipLink(ownerDump2, ownedDump, 3);
+    var nonOwnerDump = new MemoryAllocatorDump(pmd1, 'non-owner', 90);
+    gmd.memoryAllocatorDumps = [ownedDump];
+    pmd1.memoryAllocatorDumps = [ownerDump1, ownerDump2, nonOwnerDump];
+
+    // Missing 'owns' link.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 42
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90,
+            'owns': 42  // This should not be here.
+          }
+        ]
+      ]);
+    }, /'non-owner'.*\bwas expected to have\b.*'owns' link\b/);
+
+    // Extra 'owns' link.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1'
+            // Missing: "'owns': 42".
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /'owner1'.*\bwas expected not to own\b/);
+
+    // Invalid ownership importance.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 42
+          },
+          {
+            'name': 'owner2',
+            'importance': 2,  // This should be 3.
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /\bexpected\b.*\bimportance\b.*'owner2'.*\b2 but got 3\b/);
+
+    // Invalid ownership target.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 90  // This should be 42.
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /'owner1'.*\binvalid target\b/);
+
+    // Invalid 'ownedBy' ownership links count.
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42,
+            'expected_owned_by_links_count': 3  // This should be 2.
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 42
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /'owned'.*\bhave 3 'ownedBy' links\b.*\bgot 2\b/);
+
+    // Invalid ownership source.
+    link1.source = ownerDump2;
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 42
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /'owns' link\b.*'owner1'.*\binvalid source\b/);
+    link1.source = ownerDump1;
+
+    // Ownership link not in target's 'ownedBy' list.
+    ownedDump.ownedBy.pop();
+    assert.throws(function() {
+      checkDumpTrees(containerDumps, [
+        [
+          {
+            'name': 'owned',
+            'guid': 42
+          }
+        ],
+        [
+          {
+            'name': 'owner1',
+            'owns': 42
+          },
+          {
+            'name': 'owner2',
+            'importance': 3,
+            'owns': 42
+          },
+          {
+            'name': 'non-owner',
+            'guid': 90
+          }
+        ]
+      ]);
+    }, /\btarget of\b.*'owner2'.*'ownedBy' list\b/);
+    ownedDump.ownedBy.push(link2);
   });
 
   /**
@@ -793,7 +1135,7 @@
    * details about the structure of tree recipes.
    */
   function testSizesCalculation(allTreeRecipes) {
-    var m = new tr.Model();
+    var m = new Model();
     var io = new tr.importer.ImportOptions();
     io.showImportWarnings = false;
     m.importOptions = io;
@@ -802,6 +1144,7 @@
     var gmd = containerDumps[0];
     gmd.calculateSizes();
     gmd.calculateEffectiveSizes();
+    gmd.forceRebuildingMemoryAllocatorDumpByFullNameIndices();
     checkDumpTrees(containerDumps, allTreeRecipes);
   }
 
@@ -845,6 +1188,70 @@
     });
   }
 
+  /**
+   * Build container memory dumps from tree recipes, let the resulting
+   * GlobalMemoryDump remove weak memory dumps, and then check that the updated
+   * container memory dumps have the expected structure (as described by the
+   * same tree recipes).
+   *
+   * See the documentation for buildDumpTrees and checkDumpTrees for more
+   * details about the structure of tree recipes.
+   */
+  function testWeakDumpRemoval(allTreeRecipes) {
+    var m = new tr.Model();
+    var io = new tr.importer.ImportOptions();
+    io.showImportWarnings = false;
+    m.importOptions = io;
+
+    var containerDumps = buildDumpTrees(allTreeRecipes, m);
+    var gmd = containerDumps[0];
+    gmd.removeWeakDumps();
+    gmd.forceRebuildingMemoryAllocatorDumpByFullNameIndices();
+    checkDumpTrees(containerDumps, allTreeRecipes);
+  }
+
+  // Similarly to testSanityCheck_testSizesCalculation, check that the
+  // testWeakDumpRemoval testing helper method above actually performs the
+  // expected checks.
+  test('testSanityCheck_testWeakDumpRemoval', function() {
+    assert.throws(function() {
+      testWeakDumpRemoval([
+        [],
+        undefined,
+        [
+          {
+            'name': 'winheap'
+          },
+          {
+            'name': 'malloc',
+            'children': [
+              {
+                'name': 'allocated_objects'
+              },
+              {
+                'name': 'directly_weak',
+                'guid': 42,
+                'weak': true,
+                'expected_removed': true
+              },
+              {
+                'name': 'indirectly_weak',
+                'owns': 42
+                // Missing: "'expected_removed': true".
+              }
+            ]
+          }
+        ]
+      ]);
+    }, /expected.*'malloc'.*\b2 children\b.*\bgot 1\b/);
+  });
+
+  function weakDumpRemovalTest(caseName, treeRecipes) {
+    test('removeWeakDumps_' + caseName, function() {
+      testWeakDumpRemoval(treeRecipes);
+    });
+  }
+
   /////////////////////////////////////////////////////////////////////////////
   // Actual tests begin here.
   /////////////////////////////////////////////////////////////////////////////
@@ -1522,9 +1929,6 @@
         'size': 15,
         'expected_size': 15,
         'expected_effective_size': 15,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1548,9 +1952,6 @@
         'name': 'bitmap',
         'expected_size': 9,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1560,9 +1961,6 @@
         'size': 5,
         'expected_size': 5,
         'expected_effective_size': 2.5,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'chunk', 'Process 1', 0)
-        ],
         'owns': 7
       }
     ],
@@ -1572,9 +1970,6 @@
         'size': 9,
         'expected_size': 9,
         'expected_effective_size': 6.5,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'tile', 'Process 0', 0)
-        ],
         'owns': 7
       }
     ]
@@ -1586,9 +1981,6 @@
         'name': 'bitmap',
         'expected_size': 16,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1598,9 +1990,6 @@
         'size': 16,
         'expected_size': 16,
         'expected_effective_size': 16,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'chunk', 'Process 1', 0)
-        ],
         'owns': 7
       }
     ],
@@ -1619,9 +2008,6 @@
         'size': 20,
         'expected_size': 20,
         'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1637,9 +2023,6 @@
         'size': 18,
         'expected_size': 18,
         'expected_effective_size': 18,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'tile', 'Process 0', 0)
-        ],
         'owns': 7
       }
     ]
@@ -1652,9 +2035,6 @@
         'size': 60,
         'expected_size': 60,
         'expected_effective_size': 31,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1664,9 +2044,6 @@
         'size': 29,
         'expected_size': 29,
         'expected_effective_size': 19.5,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'chunk', 'Process 1', 0)
-        ],
         'owns': 7
       }
     ],
@@ -1676,9 +2053,6 @@
         'size': 19,
         'expected_size': 19,
         'expected_effective_size': 9.5,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'tile', 'Process 0', 0)
-        ],
         'owns': 7
       }
     ]
@@ -1690,9 +2064,6 @@
         'name': 'bitmap',
         'expected_size': 50,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0, 'chunk', 'Process 1', 0)
-        ],
         'guid': 7
       }
     ],
@@ -1701,11 +2072,6 @@
         'name': 'tile',
         'expected_size': 50,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('object1', 'Process 0', 0, 'object2', 'Process 0', 0,
-              'object3', 'Process 0', 0),
-          ownerInfo('bitmap', 'global space', 0, 'chunk', 'Process 1', 0)
-        ],
         'owns': 7,
         'guid': 0
       },
@@ -1714,11 +2080,7 @@
         'size': 30,
         'owns': 0,
         'expected_size': 30,
-        'expected_effective_size': 9,
-        'expected_effective_size_infos': [
-          ownerInfo('tile', 'Process 0', 0, 'object2', 'Process 0', 0,
-              'object3', 'Process 0', 0)
-        ]
+        'expected_effective_size': 9
       },
       {
         'name': 'object2',
@@ -1729,11 +2091,7 @@
         'size': 50,
         'owns': 0,
         'expected_size': 50,
-        'expected_effective_size': 21,
-        'expected_effective_size_infos': [
-          ownerInfo('tile', 'Process 0', 0, 'object1', 'Process 0', 0,
-              'object2', 'Process 0', 0)
-        ]
+        'expected_effective_size': 21
       }
     ],
     [  // PMD2.
@@ -1742,9 +2100,6 @@
         'size': 40,
         'expected_size': 40,
         'expected_effective_size': 20,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap', 'global space', 0, 'tile', 'Process 0', 0)
-        ],
         'owns': 7
       }
     ]
@@ -1757,9 +2112,6 @@
         'guid': 7,
         'expected_size': 48,
         'expected_effective_size': 17,
-        'expected_effective_size_infos': [
-          ownedInfo('tile', 'Process 0', 0)
-        ],
         'children': [
           {
             'name': 'subbitmap1',
@@ -1781,10 +2133,6 @@
         'name': 'tile',
         'expected_size': 31,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('cc', 'Process 0', 0),
-          ownerInfo('bitmap', 'global space', 0)
-        ],
         'guid': 8,
         'owns': 7,
         'children': [
@@ -1807,10 +2155,7 @@
         'owns': 8,
         'size': 31,
         'expected_size': 31,
-        'expected_effective_size': 31,
-        'expected_effective_size_infos': [
-          ownerInfo('tile', 'Process 0', 0)
-        ]
+        'expected_effective_size': 31
       }
     ]
   ]);
@@ -1833,10 +2178,7 @@
             'name': 'subbitmap',
             'guid': 2,
             'expected_size': 64,
-            'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('tile/subtile', 'Process 0', 0)
-            ]
+            'expected_effective_size': 0
           }
         ]
       }
@@ -1852,11 +2194,7 @@
             'guid': 1,
             'owns': 2,
             'expected_size': 64,
-            'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('cc', 'Process 0', 0),
-              ownerInfo('bitmap/subbitmap', 'global space', 0)
-            ]
+            'expected_effective_size': 0
           }
         ]
       },
@@ -1865,10 +2203,7 @@
         'owns': 1,
         'size': 64,
         'expected_size': 64,
-        'expected_effective_size': 64,
-        'expected_effective_size_infos': [
-          ownerInfo('tile/subtile', 'Process 0', 0)
-        ]
+        'expected_effective_size': 64
       }
     ]
   ]);
@@ -1891,10 +2226,7 @@
             'name': 'subbitmap',
             'guid': 2,
             'expected_size': 64,
-            'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('tile', 'Process 0', 0)
-            ]
+            'expected_effective_size': 0
           }
         ]
       }
@@ -1904,19 +2236,13 @@
         'name': 'tile',
         'expected_size': 64,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownerInfo('bitmap/subbitmap', 'global space', 0)
-        ],
         'owns': 2,
         'children': [
           {
             'name': 'subtile',
             'guid': 1,
             'expected_size': 64,
-            'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('cc', 'Process 0', 0)
-            ]
+            'expected_effective_size': 0
           }
         ]
       },
@@ -1925,10 +2251,7 @@
         'owns': 1,
         'size': 64,
         'expected_size': 64,
-        'expected_effective_size': 64,
-        'expected_effective_size_infos': [
-          ownerInfo('tile/subtile', 'Process 0', 0)
-        ]
+        'expected_effective_size': 64
       }
     ]
   ]);
@@ -1963,10 +2286,7 @@
             'guid': 1,
             'size': 10,
             'expected_size': 10,
-            'expected_effective_size': 10,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_effective_size': 10
           },
           {
             'name': 'objects',
@@ -1988,23 +2308,17 @@
             'name': 'heaps',
             'guid': 1,
             'expected_size': 20,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b20\.0 B\b/)
-            ],
             'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 20
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 20,
             'expected_size': 20,
-            'expected_effective_size': 20,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 20
           }
         ]
       }
@@ -2023,23 +2337,17 @@
             'guid': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b20\.0 B\b/)
-            ],
             'expected_effective_size': 10,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 20
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 20,
             'expected_size': 20,
-            'expected_effective_size': 20,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 20
           }
         ]
       }
@@ -2059,10 +2367,7 @@
             'guid': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_effective_size': 30,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_effective_size': 30
           },
           {
             'name': 'objects',
@@ -2092,10 +2397,7 @@
             'guid': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_effective_size': 30,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_effective_size': 30
           },
           {
             'name': 'objects',
@@ -2118,23 +2420,17 @@
             'name': 'heaps',
             'guid': 1,
             'expected_size': 30,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b30\.0 B\b/)
-            ],
             'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 30
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_effective_size': 30,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 30
           }
         ]
       }
@@ -2159,23 +2455,17 @@
             'name': 'heaps',
             'guid': 1,
             'expected_size': 30,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b30\.0 B\b/)
-            ],
             'expected_effective_size': 0,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 30
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_effective_size': 30,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 30
           }
         ]
       }
@@ -2195,23 +2485,17 @@
             'guid': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b14\.0 B\b/)
-            ],
             'expected_effective_size': 16,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 14
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 14,
             'expected_size': 14,
-            'expected_effective_size': 14,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 14
           }
         ]
       }
@@ -2237,23 +2521,17 @@
             'guid': 1,
             'size': 30,
             'expected_size': 30,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b14\.0 B\b/)
-            ],
             'expected_effective_size': 16,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ]
+            'expected_owned_by_sibling_sizes': {
+              'objects': 14
+            }
           },
           {
             'name': 'objects',
             'owns': 1,
             'size': 14,
             'expected_size': 14,
-            'expected_effective_size': 14,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ]
+            'expected_effective_size': 14
           }
         ]
       }
@@ -2272,9 +2550,9 @@
             'size': 10,
             'expected_size': 10,
             'expected_effective_size': 5,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'parent2'.*\b5\.0 B\b/)
-            ],
+            'expected_owned_by_sibling_sizes': {
+              'parent2': 5
+            },
             'children': [
               {
                 'name': '<unspecified>',
@@ -2287,10 +2565,7 @@
                 'guid': 1,
                 'size': 8,
                 'expected_size': 8,
-                'expected_effective_size': 3,
-                'expected_effective_size_infos': [
-                  ownedInfo('root/parent2/child', 'global space', 0)
-                ]
+                'expected_effective_size': 3
               }
             ]
           },
@@ -2299,9 +2574,9 @@
             'size': 8,
             'expected_size': 8,
             'expected_effective_size': 5,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'parent3'.*\b3\.0 B\b/)
-            ],
+            'expected_owned_by_sibling_sizes': {
+              'parent3': 3
+            },
             'children': [
               {
                 'name': '<unspecified>',
@@ -2315,11 +2590,7 @@
                 'owns': 1,
                 'size': 5,
                 'expected_size': 5,
-                'expected_effective_size': 2,
-                'expected_effective_size_infos': [
-                  ownedInfo('root/parent3/child', 'global space', 0),
-                  ownerInfo('root/parent1/child', 'global space', 0)
-                ]
+                'expected_effective_size': 2
               }
             ]
           },
@@ -2340,10 +2611,7 @@
                 'owns': 2,
                 'size': 3,
                 'expected_size': 3,
-                'expected_effective_size': 3,
-                'expected_effective_size_infos': [
-                  ownerInfo('root/parent2/child', 'global space', 0)
-                ]
+                'expected_effective_size': 3
               }
             ]
           }
@@ -2364,32 +2632,23 @@
             'owns': 15,
             'expected_size': 5,
             'expected_effective_size': 5,
-            'expected_effective_size_infos': [
-              ownerInfo('system/subsystem-B', 'global space', 0)
-            ],
             'children': [
               {
                 'name': 'objects',
                 'owns': 30,
                 'size': 3,
                 'expected_size': 3,
-                'expected_effective_size': 3,
-                'expected_effective_size_infos': [
-                  ownerInfo('system/subsystem-A/heaps', 'global space', 0)
-                ]
+                'expected_effective_size': 3
               },
               {
                 'name': 'heaps',
                 'guid': 30,
                 'size': 5,
                 'expected_size': 5,
-                'expected_size_infos': [
-                  informationInfo(/\boverlaps\b.*'objects'.*\b3\.0 B\b/)
-                ],
                 'expected_effective_size': 2,
-                'expected_effective_size_infos': [
-                  ownedInfo('system/subsystem-A/objects', 'global space', 0)
-                ]
+                'expected_owned_by_sibling_sizes': {
+                  'objects': 3
+                }
               }
             ]
           },
@@ -2397,35 +2656,26 @@
             'name': 'subsystem-B',
             'guid': 15,
             'expected_size': 7,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'subsystem-A'.*\b5\.0 B\b/)
-            ],
             'expected_effective_size': 2,
-            'expected_effective_size_infos': [
-              ownedInfo('system/subsystem-A', 'global space', 0)
-            ],
+            'expected_owned_by_sibling_sizes': {
+              'subsystem-A': 5
+            },
             'children': [
               {
                 'name': 'objects',
                 'owns': 40,
                 'size': 7,
                 'expected_size': 7,
-                'expected_effective_size': 2,
-                'expected_effective_size_infos': [
-                  ownerInfo('system/subsystem-B/heaps', 'global space', 0)
-                ]
+                'expected_effective_size': 2
               },
               {
                 'name': 'heaps',
                 'guid': 40,
                 'expected_size': 7,
-                'expected_size_infos': [
-                  informationInfo(/\boverlaps\b.*'objects'.*\b7\.0 B\b/)
-                ],
                 'expected_effective_size': 0,
-                'expected_effective_size_infos': [
-                  ownedInfo('system/subsystem-B/objects', 'global space', 0)
-                ]
+                'expected_owned_by_sibling_sizes': {
+                  'objects': 7
+                }
               }
             ]
           }
@@ -2441,30 +2691,21 @@
         'guid': 1,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 4,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'global space', 0, 'owner2', 'global space', 0)
-        ]
+        'expected_effective_size': 4
       },
       {
         'name': 'owner1',
         'owns': 1,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 3,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 0, 'owner2', 'global space', 0)
-        ]
+        'expected_effective_size': 3
       },
       {
         'name': 'owner2',
         'owns': 1,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 3,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 0, 'owner1', 'global space', 0)
-        ]
+        'expected_effective_size': 3
       }
     ],
     [  // PMD1 (only one importance defined and different sizes).
@@ -2473,10 +2714,7 @@
         'guid': 2,
         'size': 20,
         'expected_size': 20,
-        'expected_effective_size': 5,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 0', 0, 'owner2', 'Process 0', 0)
-        ]
+        'expected_effective_size': 5
       },
       {
         'name': 'owner1',
@@ -2484,20 +2722,14 @@
         'importance': 0,
         'size': 15,
         'expected_size': 15,
-        'expected_effective_size': 10 / 2 + 5,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 0', 0, 'owner2', 'Process 0', 0)
-        ]
+        'expected_effective_size': 10 / 2 + 5
       },
       {
         'name': 'owner2',
         'owns': 2,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 10 / 2,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 0', 0, 'owner1', 'Process 0', 0)
-        ]
+        'expected_effective_size': 10 / 2
       }
     ],
     [  // PMD2 (all importances defined and different sizes).
@@ -2506,11 +2738,7 @@
         'guid': 3,
         'size': 15,
         'expected_size': 15,
-        'expected_effective_size': 5,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 1', 3, 'owner2', 'Process 1', 3,
-              'owner3', 'Process 1', 3)
-        ]
+        'expected_effective_size': 5
       },
       {
         'name': 'owner1',
@@ -2518,11 +2746,7 @@
         'importance': 3,
         'size': 8,
         'expected_size': 8,
-        'expected_effective_size': 8 / 3,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 3, 'owner2', 'Process 1', 3,
-              'owner3', 'Process 1', 3)
-        ]
+        'expected_effective_size': 8 / 3
       },
       {
         'name': 'owner2',
@@ -2530,11 +2754,7 @@
         'importance': 3,
         'size': 9,
         'expected_size': 9,
-        'expected_effective_size': 8 / 3 + 1 / 2,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 3, 'owner1', 'Process 1', 3,
-              'owner3', 'Process 1', 3)
-        ]
+        'expected_effective_size': 8 / 3 + 1 / 2
       },
       {
         'name': 'owner3',
@@ -2542,11 +2762,7 @@
         'importance': 3,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 8 / 3 + 1 / 2 + 1,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 3, 'owner1', 'Process 1', 3,
-              'owner2', 'Process 1', 3)
-        ]
+        'expected_effective_size': 8 / 3 + 1 / 2 + 1
       }
     ]
   ]);
@@ -2558,20 +2774,14 @@
         'guid': 1,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 4,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'global space', 0, 'owner2', 'global space', 1)
-        ]
+        'expected_effective_size': 4
       },
       {
         'name': 'owner1',
         'owns': 1,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 0, 'owner2', 'global space', 1)
-        ]
+        'expected_effective_size': 0
       },
       {
         'name': 'owner2',
@@ -2579,10 +2789,7 @@
         'importance': 1,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 6,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 1, 'owner1', 'global space', 0)
-        ]
+        'expected_effective_size': 6
       }
     ],
     [  // PMD1 (one importance undefined and different sizes).
@@ -2591,10 +2798,7 @@
         'guid': 2,
         'size': 20,
         'expected_size': 20,
-        'expected_effective_size': 4,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 0', -1, 'owner2', 'Process 0', 0)
-        ]
+        'expected_effective_size': 4
       },
       {
         'name': 'owner1',
@@ -2602,20 +2806,14 @@
         'importance': -1,
         'size': 16,
         'expected_size': 16,
-        'expected_effective_size': 6,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 0', -1, 'owner2', 'Process 0', 0)
-        ]
+        'expected_effective_size': 6
       },
       {
         'name': 'owner2',
         'owns': 2,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 10,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 0', 0, 'owner1', 'Process 0', -1)
-        ]
+        'expected_effective_size': 10
       }
     ],
     [  // PMD2 (all importances defined and different sizes).
@@ -2624,11 +2822,7 @@
         'guid': 3,
         'size': 15,
         'expected_size': 15,
-        'expected_effective_size': 5,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 1', 4, 'owner2', 'Process 1', 3,
-              'owner3', 'Process 1', 2)
-        ]
+        'expected_effective_size': 5
       },
       {
         'name': 'owner1',
@@ -2636,11 +2830,7 @@
         'importance': 4,
         'size': 8,
         'expected_size': 8,
-        'expected_effective_size': 8,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 4, 'owner2', 'Process 1', 3,
-              'owner3', 'Process 1', 2)
-        ]
+        'expected_effective_size': 8
       },
       {
         'name': 'owner2',
@@ -2648,11 +2838,7 @@
         'importance': 3,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 3, 'owner1', 'Process 1', 4,
-              'owner3', 'Process 1', 2)
-        ]
+        'expected_effective_size': 0
       },
       {
         'name': 'owner3',
@@ -2660,11 +2846,7 @@
         'importance': 2,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'Process 1', 2, 'owner1', 'Process 1', 4,
-              'owner2', 'Process 1', 3)
-        ]
+        'expected_effective_size': 2
       }
     ]
   ]);
@@ -2678,12 +2860,7 @@
         'guid': 4,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 0', 2,
-              'some_parent/owner2', 'Process 1', 2, 'owner3', 'Process 2', 1,
-              'owner4', 'Process 2', 0)
-        ]
+        'expected_effective_size': 2
       }
     ],
     [  // PMD1.
@@ -2693,12 +2870,7 @@
         'importance': 2,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 6 / 2,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 2,
-              'some_parent/owner2', 'Process 1', 2, 'owner3', 'Process 2', 1,
-              'owner4', 'Process 2', 0)
-        ]
+        'expected_effective_size': 6 / 2
       }
     ],
     [  // PMD2.
@@ -2713,11 +2885,7 @@
             'importance': 2,
             'size': 7,
             'expected_size': 7,
-            'expected_effective_size': 6 / 2 + 1,
-            'expected_effective_size_infos': [
-              ownerInfo('owned', 'global space', 2, 'owner1', 'Process 0', 2,
-                  'owner3', 'Process 2', 1, 'owner4', 'Process 2', 0)
-            ]
+            'expected_effective_size': 6 / 2 + 1
           }
         ]
       }
@@ -2729,11 +2897,7 @@
         'importance': 1,
         'size': 5,
         'expected_size': 5,
-        'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 1, 'owner1', 'Process 0', 2,
-              'some_parent/owner2', 'Process 1', 2, 'owner4', 'Process 2', 0)
-        ]
+        'expected_effective_size': 0
       },
       {
         'name': 'owner4',
@@ -2741,11 +2905,7 @@
         'importance': 0,
         'size': 8,
         'expected_size': 8,
-        'expected_effective_size': 1,
-        'expected_effective_size_infos': [
-          ownerInfo('owned', 'global space', 0, 'owner1', 'Process 0', 2,
-              'some_parent/owner2', 'Process 1', 2, 'owner3', 'Process 2', 1)
-        ]
+        'expected_effective_size': 1
       }
     ]
   ]);
@@ -2757,10 +2917,7 @@
         'guid': 5,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownedInfo('owner1', 'Process 0', 2, 'owner2', 'Process 1', 1)
-        ]
+        'expected_effective_size': 2
       }
     ],
     [  // PMD1.
@@ -2771,21 +2928,14 @@
         'guid': 6,
         'size': 6,
         'expected_size': 6,
-        'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownedInfo('subowner1', 'Process 0', 0),
-          ownerInfo('owned', 'global space', 2, 'owner2', 'Process 1', 1)
-        ]
+        'expected_effective_size': 2
       },
       {
         'name': 'subowner1',
         'owns': 6,
         'size': 4,
         'expected_size': 4,
-        'expected_effective_size': 4,
-        'expected_effective_size_infos': [
-          ownerInfo('owner1', 'Process 0', 0)
-        ]
+        'expected_effective_size': 4
       }
     ],
     [  // PMD2.
@@ -2796,21 +2946,14 @@
         'guid': 8,
         'size': 8,
         'expected_size': 8,
-        'expected_effective_size': 2 - 2 / 4,
-        'expected_effective_size_infos': [
-          ownedInfo('subowner2', 'Process 1', 0),
-          ownerInfo('owned', 'global space', 1, 'owner1', 'Process 0', 2)
-        ]
+        'expected_effective_size': 2 - 2 / 4
       },
       {
         'name': 'subowner2',
         'owns': 8,
         'size': 2,
         'expected_size': 2,
-        'expected_effective_size': 2 / 4,
-        'expected_effective_size_infos': [
-          ownerInfo('owner2', 'Process 1', 0)
-        ]
+        'expected_effective_size': 2 / 4
       }
     ]
   ]);
@@ -2822,10 +2965,7 @@
         'guid': 15,
         'size': 20,
         'expected_size': 20,
-        'expected_effective_size': 6,
-        'expected_effective_size_infos': [
-          ownedInfo('blue', 'global space', 1, 'purple', 'global space', 2)
-        ]
+        'expected_effective_size': 6
       },
       {
         'name': 'blue',
@@ -2834,11 +2974,7 @@
         'importance': 1,
         'size': 14,
         'expected_size': 14,
-        'expected_effective_size': 1,
-        'expected_effective_size_infos': [
-          ownedInfo('red', 'global space', 0),
-          ownerInfo('grey', 'global space', 1, 'purple', 'global space', 2)
-        ]
+        'expected_effective_size': 1
       },
       {
         'name': 'purple',
@@ -2846,10 +2982,7 @@
         'importance': 2,
         'size': 7,
         'expected_size': 7,
-        'expected_effective_size': 7,
-        'expected_effective_size_infos': [
-          ownerInfo('grey', 'global space', 2, 'blue', 'global space', 1)
-        ]
+        'expected_effective_size': 7
       },
       {
         'name': 'yellow',
@@ -2857,10 +2990,7 @@
         'importance': 3,
         'size': 10,
         'expected_size': 10,
-        'expected_effective_size': 3,
-        'expected_effective_size_infos': [
-          ownerInfo('red', 'global space', 3, 'green', 'global space', 3)
-        ]
+        'expected_effective_size': 3
       },
       {
         'name': 'red',
@@ -2868,11 +2998,7 @@
         'owns': 18,
         'size': 12,
         'expected_size': 12,
-        'expected_effective_size': 1,
-        'expected_effective_size_infos': [
-          ownedInfo('yellow', 'global space', 3, 'green', 'global space', 3),
-          ownerInfo('blue', 'global space', 0)
-        ]
+        'expected_effective_size': 1
       },
       {
         'name': 'green',
@@ -2880,10 +3006,7 @@
         'importance': 3,
         'size': 8,
         'expected_size': 8,
-        'expected_effective_size': 2,
-        'expected_effective_size_infos': [
-          ownerInfo('red', 'global space', 3, 'yellow', 'global space', 3)
-        ]
+        'expected_effective_size': 2
       }
     ]
   ]);
@@ -2901,9 +3024,6 @@
             'size': 11,
             'expected_size': 11,
             'expected_effective_size': 11,
-            'expected_effective_size_infos': [
-              ownerInfo('v8/heaps', 'global space', 0)
-            ],
             'children': [
               {
                 'name': '<unspecified>',
@@ -2916,10 +3036,7 @@
                 'owns': 2,
                 'size': 7,
                 'expected_size': 7,
-                'expected_effective_size': 7,
-                'expected_effective_size_infos': [
-                  ownerInfo('v8/heaps/heap1', 'global space', 0)
-                ]
+                'expected_effective_size': 7
               }
             ]
           },
@@ -2928,13 +3045,10 @@
             'guid': 1,
             'size': 13,
             'expected_size': 13,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b11\.0 B\b/)
-            ],
             'expected_effective_size': 2,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects', 'global space', 0)
-            ],
+            'expected_owned_by_sibling_sizes': {
+              'objects': 11
+            },
             'children': [
               {
                 'name': '<unspecified>',
@@ -2948,9 +3062,6 @@
                 'size': 10,
                 'expected_size': 10,
                 'expected_effective_size': 1,
-                'expected_effective_size_infos': [
-                  ownedInfo('v8/objects/object1', 'global space', 0)
-                ]
               }
             ]
           }
@@ -2967,9 +3078,6 @@
         'size': 23,
         'expected_size': 23,
         'expected_effective_size': 5,
-        'expected_effective_size_infos': [
-          ownedInfo('tile_manager', 'Process 0', 2, 'gpu', 'Process 1', 1)
-        ],
         'children': [
           {
             'name': '<unspecified>',
@@ -2983,10 +3091,6 @@
             'size': 10,
             'expected_size': 10,
             'expected_effective_size': 3 * 5 / (13 + 3),
-            'expected_effective_size_infos': [
-              ownedInfo('tile_manager/tile42', 'Process 0', 1,
-                  'gpu/chunk-3\\.14', 'Process 1', 2)
-            ]
           }
         ]
       }
@@ -2999,9 +3103,6 @@
         'size': 12,
         'expected_size': 12,
         'expected_effective_size': 5 + 2,
-        'expected_effective_size_infos': [
-          ownerInfo('shared_bitmap', 'global space', 2, 'gpu', 'Process 1', 1)
-        ],
         'children': [
           {
             'name': '<unspecified>',
@@ -3016,10 +3117,6 @@
             'size': 7,
             'expected_size': 7,
             'expected_effective_size': 2,
-            'expected_effective_size_infos': [
-              ownerInfo('shared_bitmap/bitmap0x7', 'global space', 1,
-                  'gpu/chunk-3\\.14', 'Process 1', 2)
-            ]
           }
         ]
       }
@@ -3032,10 +3129,6 @@
         'size': 16,
         'expected_size': 16,
         'expected_effective_size': 6 + 5,
-        'expected_effective_size_infos': [
-          ownerInfo('shared_bitmap', 'global space', 1,
-              'tile_manager', 'Process 0', 2)
-        ],
         'children': [
           {
             'name': '<unspecified>',
@@ -3050,10 +3143,6 @@
             'size': 5,
             'expected_size': 5,
             'expected_effective_size': 5,
-            'expected_effective_size_infos': [
-              ownerInfo('shared_bitmap/bitmap0x7', 'global space', 2,
-                  'tile_manager/tile42', 'Process 0', 1)
-            ]
           }
         ]
       }
@@ -3068,10 +3157,6 @@
         'guid': 2,
         'expected_size': 16,
         'expected_effective_size': 0,
-        'expected_effective_size_infos': [
-          ownedInfo('sharedbitmap/0x7', 'Process 0', 1,
-              'v8/heaps/1', 'Process 1', 2)
-        ]
       }
     ],
     [  // PMD1, Browser process.
@@ -3092,10 +3177,6 @@
             'size': 16,
             'expected_size': 16,
             'expected_effective_size': 8,
-            'expected_effective_size_infos': [
-              ownerInfo('unknown', 'global space', 1,
-                  'v8/heaps/1', 'Process 1', 2)
-            ],
             'owns': 2,
             'importance': 1,
             'children': [
@@ -3123,23 +3204,16 @@
             'name': 'heaps',
             'guid': 100,
             'expected_size': 12,
-            'expected_size_infos': [
-              informationInfo(/\boverlaps\b.*'objects'.*\b9\.0 B\b/)
-            ],
             'expected_effective_size': 3,
-            'expected_effective_size_infos': [
-              ownedInfo('v8/objects/strings', 'Process 1', 0)
-            ],
+            'expected_owned_by_sibling_sizes': {
+              'objects': 9
+            },
             'children': [
               {
                 'name': '1',
                 'size': 8,
                 'expected_size': 8,
                 'expected_effective_size': 2,
-                'expected_effective_size_infos': [
-                  ownerInfo('unknown', 'global space', 2,
-                      'sharedbitmap/0x7', 'Process 0', 1)
-                ],
                 'owns': 2,
                 'importance': 2
               },
@@ -3168,9 +3242,6 @@
                 'size': 9,
                 'expected_size': 9,
                 'expected_effective_size': 9,
-                'expected_effective_size_infos': [
-                  ownerInfo('v8/heaps', 'Process 1', 0)
-                ],
                 'owns': 100
               }
             ]
@@ -3199,10 +3270,20 @@
           },
           {
             'name': 'parent',
+            'guid': 2,
             'size': 17,  // Invalid: child has larger size.
             'expected_size': 20,
-            'expected_size_infos': [
-              warningInfo(/\bless than\b.*\bchildren\b.*\b20\.0 B\b/)
+            'expected_infos': [
+              {
+                type: PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,
+                providedSize: 17,
+                dependencySize: 20
+              },
+              {
+                type: PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER,
+                providedSize: 17,
+                dependencySize: 18
+              }
             ],
             'expected_effective_size': 0,
             'children': [
@@ -3211,13 +3292,14 @@
                 'guid': 1,
                 'size': 10,  // Invalid: owner has larger size.
                 'expected_size': 20,
-                'expected_size_infos': [
-                  warningInfo(/\bless than\b.*\blargest owner\b.*\b20\.0 B\b/)
+                'expected_infos': [
+                  {
+                    type: PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER,
+                    providedSize: 10,
+                    dependencySize: 20
+                  }
                 ],
                 'expected_effective_size': 0,
-                'expected_effective_size_infos': [
-                  ownedInfo('root2', 'global space', 0)
-                ]
               }
             ]
           }
@@ -3228,10 +3310,15 @@
         'owns': 1,
         'size': 20,
         'expected_size': 20,
-        'expected_effective_size': 20,
-        'expected_effective_size_infos': [
-          ownerInfo('root1/parent/child', 'global space', 0)
-        ]
+        'expected_effective_size': 20
+      },
+      {
+        'name': 'root3',
+        'owns': 2,
+        'importance': -1,
+        'size': 18,
+        'expected_size': 18,
+        'expected_effective_size': 18
       }
     ]
   ]);
@@ -3247,11 +3334,18 @@
             'name': 'parent1',
             'size': 5,
             'expected_size': 10,
-            'expected_size_infos': [
-              warningInfo(/\bless than\b.*\bchildren\b.*\b10.0 B\b/),
-              informationInfo(/\boverlaps\b.*'parent2'.*\b17\.0 B\b.*\boverlaps\b.*'parent3'.*\b7.0 B\b/)  // @suppress longLineCheck
+            'expected_infos': [
+              {
+                type: PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,
+                providedSize: 5,
+                dependencySize: 10
+              }
             ],
             'expected_effective_size': 1,
+            'expected_owned_by_sibling_sizes': {
+              'parent2': 17,
+              'parent3': 7
+            },
             'children': [
               {
                 'name': 'child',
@@ -3259,11 +3353,6 @@
                 'size': 10,
                 'expected_size': 10,
                 'expected_effective_size': 1,
-                'expected_effective_size_infos': [
-                  ownedInfo('root/parent2/child1', 'global space', 0,
-                      'root/parent2/child2', 'global space', 0,
-                      'root/parent3', 'global space', 0)
-                ]
               }
             ]
           },
@@ -3284,11 +3373,6 @@
                 'size': 9,
                 'expected_size': 9,
                 'expected_effective_size': 7 / 3 + 1 / 2 + 1,
-                'expected_effective_size_infos': [
-                  ownerInfo('root/parent1/child', 'global space', 0,
-                      'root/parent2/child2', 'global space', 0,
-                      'root/parent3', 'global space', 0)
-                ]
               },
               {
                 'name': 'child2',
@@ -3296,11 +3380,6 @@
                 'size': 8,
                 'expected_size': 8,
                 'expected_effective_size': 7 / 3 + 1 / 2,
-                'expected_effective_size_infos': [
-                  ownerInfo('root/parent1/child', 'global space', 0,
-                      'root/parent2/child1', 'global space', 0,
-                      'root/parent3', 'global space', 0)
-                ]
               }
             ]
           },
@@ -3309,11 +3388,6 @@
             'size': 7,
             'expected_size': 7,
             'expected_effective_size': 7 / 3,
-            'expected_effective_size_infos': [
-              ownerInfo('root/parent1/child', 'global space', 0,
-                  'root/parent2/child1', 'global space', 0,
-                  'root/parent2/child2', 'global space', 0)
-            ],
             'owns': 3
           }
         ]
@@ -3321,9 +3395,9 @@
     ]
   ]);
 
-  // Check that size calculation is NOT preceded by attribute
-  // aggregation, which would recursively sum up size attributes.
-  test('calculateGraphAttributes_aggregation', function() {
+  // Check that size calculation is NOT preceded by numeric aggregation, which
+  // would recursively sum up size numerics.
+  test('finalizeGraph_aggregation', function() {
     var model = tr.c.TestUtils.newModel(function(model) {
       buildDumpTrees([
         undefined,  // GMD.
@@ -3349,27 +3423,21 @@
     var pmd = model.getProcess(0).memoryDumps[0];
 
     var rootDump = pmd.getMemoryAllocatorDumpByFullName('root');
-    assertSizeAttribute(rootDump, 'size', 20);
-    assertSizeAttribute(rootDump, 'effective_size', 20);
+    assertDumpSizes(rootDump, 20, 20);
 
     var ownerChildDump = pmd.getMemoryAllocatorDumpByFullName(
         'root/owner_child');
-    assertSizeAttribute(ownerChildDump, 'size', 7);
-    assertSizeAttribute(ownerChildDump, 'effective_size', 7, [
-      ownerInfo('root/owned_child', 'Process 0', 0)
-    ]);
+    assertDumpSizes(ownerChildDump, 7, 7);
 
     var ownedChildDump = pmd.getMemoryAllocatorDumpByFullName(
         'root/owned_child');
-    assertSizeAttribute(ownedChildDump, 'size', 20,
-        [informationInfo(/\boverlaps\b.*'owner_child'.*\b7\.0 B\b/)]);
-    assertSizeAttribute(ownedChildDump, 'effective_size', 13,
-        [ownedInfo('root/owner_child', 'Process 0', 0)]);
+    assertDumpSizes(ownedChildDump, 20, 13, [] /* expectedInfos */,
+        { owner_child: 7 } /* expectedOwnedBySiblingSizes */);
   });
 
-  // Check that attribute propagation and aggregation are performed in the
-  // correct order.
-  test('calculateGraphAttributes_propagation', function() {
+  // Check that numeric and diagnostics propagation and aggregation are
+  // performed in the correct order.
+  test('finalizeGraph_propagation', function() {
     var model = tr.c.TestUtils.newModel(function(model) {
       buildDumpTrees([
         [  // GMD.
@@ -3377,17 +3445,23 @@
             'name': 'owned_root',
             'guid': 1,
             'size': 10,
+            'diagnostics': {
+              'url': 'https://hello.world.com:42'
+            },
             'children': [
               {
                 'name': 'owned_child1',
-                'attrs': {
-                  'summed': new ScalarAttribute('bytes', 12)
+                'numerics': {
+                  'summed': new ScalarNumeric(sizeInBytes_smallerIsBetter, 12)
+                },
+                'diagnostics': {
+                  'url2': 'http://not.aggregated.to/owned/parent/dump'
                 }
               },
               {
                 'name': 'owned_child2',
-                'attrs': {
-                  'summed': new ScalarAttribute('bytes', 15)
+                'numerics': {
+                  'summed': new ScalarNumeric(sizeInBytes_smallerIsBetter, 15)
                 }
               }
             ]
@@ -3397,7 +3471,10 @@
           {
             'name': 'direct_owner',
             'owns': 1,
-            'guid': 2
+            'guid': 2,
+            'diagnostics': {
+              'url': 'file://not_overriden.html'
+            }
           },
           {
             'name': 'parent_owner',
@@ -3408,8 +3485,9 @@
               },
               {
                 'name': 'sibling',
-                'attrs': {
-                  'summed': new ScalarAttribute('bytes', 13)
+                'size': 5,
+                'numerics': {
+                  'summed': new ScalarNumeric(sizeInBytes_smallerIsBetter, 13)
                 }
               }
             ]
@@ -3417,8 +3495,8 @@
           {
             'name': 'precedent_owner',
             'owns': 1,
-            'attrs': {
-              'summed': new ScalarAttribute('bytes', 0)
+            'numerics': {
+              'summed': new ScalarNumeric(sizeInBytes_smallerIsBetter, 0)
             }
           },
           {
@@ -3430,33 +3508,437 @@
     });
     var pmd = model.getProcess(0).memoryDumps[0];
 
-    var directOwnerDump = pmd.getMemoryAllocatorDumpByFullName('direct_owner');
-    assertSizeAttribute(directOwnerDump, 'summed', 27);
-    assertUndefinedAttribute(directOwnerDump, 'size');
-    assertUndefinedAttribute(directOwnerDump, 'effective_size');
-
-    var childOwnerDump =
-        pmd.getMemoryAllocatorDumpByFullName('parent_owner/child_owner');
-    assertSizeAttribute(childOwnerDump, 'summed', 27);
-    assertUndefinedAttribute(childOwnerDump, 'size');
-    assertUndefinedAttribute(childOwnerDump, 'effective_size');
-
-    var parentOwnerDump = pmd.getMemoryAllocatorDumpByFullName('parent_owner');
-    assertSizeAttribute(parentOwnerDump, 'summed', 40);
-    assertUndefinedAttribute(parentOwnerDump, 'size');
-    assertUndefinedAttribute(parentOwnerDump, 'effective_size');
-
-    var precedentOwnerDump =
-        pmd.getMemoryAllocatorDumpByFullName('precedent_owner');
-    assertSizeAttribute(precedentOwnerDump, 'summed', 0);
-    assertUndefinedAttribute(precedentOwnerDump, 'size');
-    assertUndefinedAttribute(precedentOwnerDump, 'effective_size');
-
-    var indirectOwnerDump =
-        pmd.getMemoryAllocatorDumpByFullName('indirect_owner');
-    assertUndefinedAttribute(indirectOwnerDump, 'summed');
-    assertUndefinedAttribute(indirectOwnerDump, 'size');
-    assertUndefinedAttribute(indirectOwnerDump, 'effective_size');
+    checkDumpNumericsAndDiagnostics(
+        pmd.getMemoryAllocatorDumpByFullName('direct_owner'),
+        {
+          'size': 10,
+          'effective_size': 3.3333,
+          'summed': 27
+        },
+        {
+          'url': 'file://not_overriden.html'
+        });
+    checkDumpNumericsAndDiagnostics(
+        pmd.getMemoryAllocatorDumpByFullName('parent_owner/child_owner'),
+        {
+          'size': 10,
+          'effective_size': 3.3333,
+          'summed': 27
+        },
+        {
+          'url': 'https://hello.world.com:42'
+        });
+    checkDumpNumericsAndDiagnostics(
+        pmd.getMemoryAllocatorDumpByFullName('parent_owner'),
+        {
+          'size': 15,
+          'effective_size': 8.3333,
+          'summed': 40
+        }, {});
+    checkDumpNumericsAndDiagnostics(
+        pmd.getMemoryAllocatorDumpByFullName('precedent_owner'),
+        {
+          'size': 10,
+          'effective_size': 3.3333,
+          'summed': 0
+        },
+        {
+          'url': 'https://hello.world.com:42'
+        });
+    checkDumpNumericsAndDiagnostics(
+        pmd.getMemoryAllocatorDumpByFullName('indirect_owner'), {}, {});
   });
+
+  // Check that weak dumps are removed before size size calculation.
+  test('finalizeGraph_weakDumpRemoval', function() {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      buildDumpTrees([
+        undefined,  // GMD.
+        [  // PMD.
+          {
+            'name': 'root',
+            'children': [
+              {
+                'name': 'directly_weak_child',
+                'weak': true,
+                'guid': 5,
+                'owns': 10,
+                'size': 100
+              },
+              {
+                'name': 'strong_child',
+                'guid': 10,
+                'size': 120
+              },
+              {
+                'name': 'indirectly_weak_child',
+                'owns': 5,
+                'size': 70
+              },
+              {
+                'name': 'separate_weak_child',
+                'weak': true,
+                'size': 300
+              }
+            ]
+          }
+        ]
+      ], model);
+    });
+    var pmd = model.getProcess(0).memoryDumps[0];
+
+    var rootDump = pmd.getMemoryAllocatorDumpByFullName('root');
+    assertDumpSizes(rootDump, 120, 120);
+    assert.lengthOf(rootDump.children, 1);
+
+    var strongChildDump = pmd.getMemoryAllocatorDumpByFullName(
+        'root/strong_child');
+    assertDumpSizes(strongChildDump, 120, 120);
+    assert.lengthOf(strongChildDump.ownedBy, 0);
+
+    assert.isUndefined(pmd.getMemoryAllocatorDumpByFullName(
+        'root/directly_weak_child'));
+    assert.isUndefined(pmd.getMemoryAllocatorDumpByFullName(
+        'root/indirectly_weak_child'));
+    assert.isUndefined(pmd.getMemoryAllocatorDumpByFullName(
+        'root/separate_weak_child'));
+  });
+
+  test('indicesUpdatedCorrectly', function() {
+    var gmd, rootDump, childDump;
+    var model = tr.c.TestUtils.newModel(function(model) {
+      gmd = new GlobalMemoryDump(model, 10);
+      model.globalMemoryDumps.push(gmd);
+
+      rootDump = newAllocatorDump(gmd, 'root', { size: 64 });
+      childDump = newChildDump(rootDump, 'child', { size: 48 });
+
+      gmd.memoryAllocatorDumps = [rootDump];
+
+      // Before model is finalized.
+      assert.strictEqual(
+          gmd.getMemoryAllocatorDumpByFullName('root'), rootDump);
+      assert.strictEqual(
+          gmd.getMemoryAllocatorDumpByFullName('root/child'), childDump);
+      assert.isUndefined(
+          gmd.getMemoryAllocatorDumpByFullName('root/<unspecified>'));
+    });
+
+    // Test sanity check.
+    assert.isDefined(gmd);
+    assert.isDefined(rootDump);
+    assert.isDefined(childDump);
+
+    // After model is finalized.
+    assert.strictEqual(gmd.getMemoryAllocatorDumpByFullName('root'), rootDump);
+    assert.strictEqual(
+        gmd.getMemoryAllocatorDumpByFullName('root/child'), childDump);
+    var unspecifiedDump =
+        gmd.getMemoryAllocatorDumpByFullName('root/<unspecified>');
+    assert.strictEqual(unspecifiedDump.fullName, 'root/<unspecified>');
+    assert.strictEqual(unspecifiedDump.parent, rootDump);
+    assert.strictEqual(rootDump.children[0], unspecifiedDump);
+  });
+
+  weakDumpRemovalTest('allDumpsNonWeak', [
+    [  // GMD.
+      {
+        'name': 'malloc',
+        'children': [
+          {
+            'name': 'allocated_objects',
+            'children': [
+              {
+                'name': 'obj42',
+                'guid': 5,
+                'expected_owned_by_links_count': 2
+              }
+            ]
+          }
+        ]
+      }
+    ],
+    undefined,  // PMD1.
+    [  // PMD2.
+      {
+        'name': 'oilpan'
+      },
+      {
+        'name': 'v8',
+        'children': [
+          {
+            'name': 'heaps',
+            'children': [
+              {
+                'name': 'S',
+                'owns': 5
+              },
+              {
+                'name': 'L',
+                'owns': 5
+              }
+            ]
+          }
+        ]
+      }
+    ]
+  ]);
+
+  weakDumpRemovalTest('weakRootDump', [
+    [],  // GMD.
+    [  // PMD1.
+      {
+        'name': 'strong1'
+      },
+      {
+        'name': 'weak',
+        'weak': true,
+        'expected_removed': true
+      },
+      {
+        'name': 'strong2'
+      }
+    ]
+  ]);
+
+  weakDumpRemovalTest('weakChildDump', [
+    [  // GMD.
+      {
+        'name': 'root',
+        'children': [
+          {
+            'name': 'parent',
+            'children': [
+              {
+                'name': 'strong1'
+              },
+              {
+                'name': 'weak',
+                'weak': true,
+                'expected_removed': true,
+                'children': [
+                  {
+                    'name': 'implicitly-removed'
+                  }
+                ]
+              },
+              {
+                'name': 'strong2'
+              }
+            ]
+          }
+        ]
+      }
+    ]
+  ]);
+
+  weakDumpRemovalTest('transitiveOwnerRemoval', [
+    [  // GMD.
+      {
+        'name': 'not-removed-strong-dump',
+        'guid': 0,
+        'expected_owned_by_links_count': 1
+      },
+      {
+        'name': 'weak-owned-dump',
+        'guid': 1,
+        'owns': 0,
+        'weak': true,
+        'expected_removed': true
+      }
+    ],
+    [  // PMD1.
+      {
+        'name': 'direct-owner-dump',
+        'guid': 2,
+        'owns': 1,
+        'expected_removed': true
+      },
+      {
+        'name': 'also-not-removed-strong-dump',
+        'owns': 0
+      }
+    ],
+    [  // PMD2.
+      {
+        'name': 'indirect-owner-dump',
+        'owns': 2,
+        'expected_removed': true
+      }
+    ]
+  ]);
+
+  weakDumpRemovalTest('transitiveDescendantRemoval', [
+    [  // GMD.
+      {
+        'name': 'A',
+        'owns': 10,
+        // A =owns=> B -child-of-> C -> D => E -> F -> G (weak).
+        'expected_removed': true
+      },
+      {
+        'name': 'D',
+        'owns': 5,
+        'expected_removed': true,  // D =owns=> E -child-of-> F -> G (weak).
+        'children': [
+          {
+            'name': 'C',
+            'children': [
+              {
+                'name': 'B',
+                'guid': 10
+              }
+            ]
+          }
+        ]
+      }
+    ],
+    undefined,  // PMD1.
+    [  // PMD2.
+      {
+        'name': 'first-retained-dump',
+        'children': [
+          {
+            'name': 'G',
+            'weak': true,
+            'expected_removed': true,
+            'children': [
+              {
+                'name': 'F',
+                'children': [
+                  {
+                    'name': 'E',
+                    'guid': 5
+                  }
+                ]
+              },
+              {
+                'name': 'H',
+                'children': [
+                  {
+                    'name': 'I',
+                    'children': [
+                      {
+                        'name': 'J',
+                        'owns': 2
+                      }
+                    ]
+                  }
+                ]
+              }
+            ]
+          }
+        ]
+      }
+    ],
+    [  // PMD3.
+      {
+        'name': 'second-retained-dump',
+        'guid': 2,
+        // The only owner (J) is removed because J -child-of-> I -> H ->
+        // G (weak).
+        'expected_owned_by_links_count': 0
+      }
+    ]
+  ]);
+
+  weakDumpRemovalTest('subownerships', [
+    [  // GMD.
+      {
+        'name': 'root1',
+        'owns': 20,
+        'expected_removed': true,  // root1 =owns=> root2 (weak).
+        'children': [
+          {
+            'name': 'child1',
+            'owns': 2
+          }
+        ]
+      },
+      {
+        'name': 'root2',
+        'guid': 20,
+        'owns': 30,
+        'weak': true,
+        'expected_removed': true,
+        'children': [
+          {
+            'name': 'child2',
+            'guid': 2,
+            'owns': 3
+          }
+        ]
+      },
+      {
+        'name': 'root3',
+        'guid': 30,
+        'owns': 40,
+        'expected_owned_by_links_count': 0,
+        'children': [
+          {
+            'name': 'child3',
+            'guid': 3,
+            'owns': 4,
+            'weak': true,
+            'expected_removed': true
+          }
+        ]
+      }
+    ],
+    [  // PMD1.
+      {
+        'name': 'root4',
+        'guid': 40,
+        'expected_owned_by_links_count': 1,
+        'children': [
+          {
+            'name': 'child4',
+            'guid': 4,
+            'expected_owned_by_links_count': 0
+          }
+        ]
+      }
+    ],
+    [  // PMD2.
+      {
+        'name': 'root5',
+        'owns': 60,
+        'expected_removed': true,  // root5 =owns=> root6 => root7 (weak).
+        'children': [
+          {
+            'name': 'child5',
+            'owns': 6
+          }
+        ]
+      },
+      {
+        'name': 'root6',
+        'guid': 60,
+        'owns': 70,
+        'expected_removed': true,  // root6 =owns=> root7 (weak).
+        'children': [
+          {
+            'name': 'child6',
+            'guid': 6,
+            'owns': 7
+          }
+        ]
+      },
+      {
+        'name': 'root7',
+        'guid': 70,
+        'owns': 40,
+        'weak': true,
+        'expected_removed': true,
+        'children': [
+          {
+            'name': 'child7',
+            'guid': 7,
+            'owns': 4
+          }
+        ]
+      }
+    ]
+  ]);
 });
 </script>
diff --git a/catapult/tracing/tracing/model/heap_dump.html b/catapult/tracing/tracing/model/heap_dump.html
index 3d4f39a..272b16d 100644
--- a/catapult/tracing/tracing/model/heap_dump.html
+++ b/catapult/tracing/tracing/model/heap_dump.html
@@ -18,20 +18,22 @@
    *
    * An entry specifies how much space (e.g. 19 MiB) was allocated in a
    * particular context, which consists of a codepath (e.g. drawQuad <- draw <-
-   * MessageLoop::RunTask).
+   * MessageLoop::RunTask) and an object type (e.g. HTMLImportLoader).
    *
    * @{constructor}
    */
-  function HeapEntry(heapDump, leafStackFrame, size) {
+  function HeapEntry(heapDump, leafStackFrame, objectTypeName, size) {
     this.heapDump = heapDump;
 
     // The leaf stack frame of the associated backtrace (e.g. drawQuad for the
     // drawQuad <- draw <- MessageLoop::RunTask backtrace). If undefined, the
-    // heap entry is a sum over all backtraces. On the other hand, an empty
-    // backtrace is represented by the root stack frame, which has an undefined
-    // name.
+    // backtrace is empty.
     this.leafStackFrame = leafStackFrame;
 
+    // The name of the allocated object type (e.g. 'HTMLImportLoader'). If
+    // undefined, the entry represents the sum over all object types.
+    this.objectTypeName = objectTypeName;
+
     this.size = size;
   }
 
@@ -48,8 +50,8 @@
   }
 
   HeapDump.prototype = {
-    addEntry: function(leafStackFrame, size) {
-      var entry = new HeapEntry(this, leafStackFrame, size);
+    addEntry: function(leafStackFrame, objectTypeName, size) {
+      var entry = new HeapEntry(this, leafStackFrame, objectTypeName, size);
       this.entries.push(entry);
       return entry;
     }
diff --git a/catapult/tracing/tracing/model/heap_dump_test.html b/catapult/tracing/tracing/model/heap_dump_test.html
index 69b8088..d76a26c 100644
--- a/catapult/tracing/tracing/model/heap_dump_test.html
+++ b/catapult/tracing/tracing/model/heap_dump_test.html
@@ -38,14 +38,16 @@
     assert.strictEqual(dump.processMemoryDump, pmd);
     assert.lengthOf(dump.entries, 0);
 
-    var entry1 = dump.addEntry(childFrame, 1024);
+    var entry1 = dump.addEntry(childFrame, 'HTMLImportLoader', 1024);
     assert.strictEqual(entry1.heapDump, dump);
     assert.strictEqual(entry1.leafStackFrame, childFrame);
+    assert.strictEqual(entry1.objectTypeName, 'HTMLImportLoader');
     assert.equal(entry1.size, 1024);
 
-    var entry2 = dump.addEntry(undefined, 1048576);
+    var entry2 = dump.addEntry(undefined, undefined, 1048576);
     assert.strictEqual(entry2.heapDump, dump);
     assert.isUndefined(entry2.leafStackFrame);
+    assert.isUndefined(entry2.objectTypeName);
     assert.equal(entry2.size, 1048576);
 
     assert.deepEqual(dump.entries, [entry1, entry2]);
diff --git a/catapult/tracing/tracing/model/helpers/android_app.html b/catapult/tracing/tracing/model/helpers/android_app.html
new file mode 100644
index 0000000..5831d00
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/android_app.html
@@ -0,0 +1,310 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/base/sorted_array_utils.html">
+<link rel="import" href="/tracing/model/frame.html">
+<link rel="import" href="/tracing/base/range_utils.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Class for managing android-specific model meta data,
+ * such as rendering apps, and frames rendered.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  var Frame = tr.model.Frame;
+  var Statistics = tr.b.Statistics;
+
+  var UI_DRAW_TYPE = {
+    NONE: 'none',
+    LEGACY: 'legacy',
+    MARSHMALLOW: 'marshmallow'
+  };
+
+  var UI_THREAD_DRAW_NAMES = {
+    'performTraversals': UI_DRAW_TYPE.LEGACY,
+    'Choreographer#doFrame': UI_DRAW_TYPE.MARSHMALLOW
+  };
+
+  var RENDER_THREAD_DRAW_NAME = 'DrawFrame';
+  var RENDER_THREAD_INDEP_DRAW_NAME = 'doFrame';
+  var THREAD_SYNC_NAME = 'syncFrameState';
+
+  function getSlicesForThreadTimeRanges(threadTimeRanges) {
+    var ret = [];
+    threadTimeRanges.forEach(function(threadTimeRange) {
+      var slices = [];
+
+      threadTimeRange.thread.sliceGroup.iterSlicesInTimeRange(
+        function(slice) { slices.push(slice); },
+        threadTimeRange.start, threadTimeRange.end);
+      ret.push.apply(ret, slices);
+    });
+    return ret;
+  }
+
+  function makeFrame(threadTimeRanges, surfaceFlinger) {
+    var args = {};
+    if (surfaceFlinger && surfaceFlinger.hasVsyncs) {
+      var start = Statistics.min(threadTimeRanges,
+          function(threadTimeRanges) { return threadTimeRanges.start; });
+      args['deadline'] = surfaceFlinger.getFrameDeadline(start);
+      args['frameKickoff'] = surfaceFlinger.getFrameKickoff(start);
+    }
+    var events = getSlicesForThreadTimeRanges(threadTimeRanges);
+    return new Frame(events, threadTimeRanges, args);
+  }
+
+  function findOverlappingDrawFrame(renderThread, time) {
+    if (!renderThread)
+      return undefined;
+
+    var slices = renderThread.sliceGroup.slices;
+    for (var i = 0; i < slices.length; i++) {
+      var slice = slices[i];
+      if (slice.title == RENDER_THREAD_DRAW_NAME &&
+          slice.start <= time &&
+          time <= slice.end) {
+        return slice;
+      }
+    }
+    return undefined;
+  }
+
+  /**
+   * Builds an array of {start, end} ranges grouping common work of a frame
+   * that occurs just before performTraversals().
+   *
+   * Only necessary before Choreographer#doFrame tracing existed.
+   */
+  function getPreTraversalWorkRanges(uiThread) {
+    if (!uiThread)
+      return [];
+
+    // gather all frame work that occurs outside of performTraversals
+    var preFrameEvents = [];
+    uiThread.sliceGroup.slices.forEach(function(slice) {
+      if (slice.title == 'obtainView' ||
+          slice.title == 'setupListItem' ||
+          slice.title == 'deliverInputEvent' ||
+          slice.title == 'RV Scroll')
+        preFrameEvents.push(slice);
+    });
+    uiThread.asyncSliceGroup.slices.forEach(function(slice) {
+      if (slice.title == 'deliverInputEvent')
+        preFrameEvents.push(slice);
+    });
+
+    return tr.b.mergeRanges(
+        tr.b.convertEventsToRanges(preFrameEvents),
+        3,
+        function(events) {
+      return {
+        start: events[0].min,
+        end: events[events.length - 1].max
+      };
+    });
+  }
+
+  function getFrameStartTime(traversalStart, preTraversalWorkRanges) {
+    var preTraversalWorkRange = tr.b.findClosestIntervalInSortedIntervals(
+        preTraversalWorkRanges,
+        function(range) { return range.start },
+        function(range) { return range.end },
+        traversalStart,
+        3);
+
+    if (preTraversalWorkRange)
+      return preTraversalWorkRange.start;
+    return traversalStart;
+  }
+
+  function getUiThreadDrivenFrames(app) {
+    if (!app.uiThread)
+      return [];
+
+    var preTraversalWorkRanges = [];
+    if (app.uiDrawType == UI_DRAW_TYPE.LEGACY)
+      preTraversalWorkRanges = getPreTraversalWorkRanges(app.uiThread);
+
+    var frames = [];
+    app.uiThread.sliceGroup.slices.forEach(function(slice) {
+      if (!(slice.title in UI_THREAD_DRAW_NAMES)) {
+        return;
+      }
+
+      var threadTimeRanges = [];
+      var uiThreadTimeRange = {
+        thread: app.uiThread,
+        start: getFrameStartTime(slice.start, preTraversalWorkRanges),
+        end: slice.end
+      };
+      threadTimeRanges.push(uiThreadTimeRange);
+
+      // on SDK 21+ devices with RenderThread,
+      // account for time taken on RenderThread
+      var rtDrawSlice = findOverlappingDrawFrame(
+          app.renderThread, slice.end);
+      if (rtDrawSlice) {
+        var rtSyncSlice = rtDrawSlice.findDescendentSlice(THREAD_SYNC_NAME);
+        if (rtSyncSlice) {
+          // Generally, the UI thread is only on the critical path
+          // until the start of sync.
+          uiThreadTimeRange.end = Math.min(uiThreadTimeRange.end,
+                                           rtSyncSlice.start);
+        }
+
+        threadTimeRanges.push({
+          thread: app.renderThread,
+          start: rtDrawSlice.start,
+          end: rtDrawSlice.end
+        });
+      }
+      frames.push(makeFrame(threadTimeRanges, app.surfaceFlinger));
+    });
+    return frames;
+  }
+
+  function getRenderThreadDrivenFrames(app) {
+    if (!app.renderThread)
+      return [];
+
+    var frames = [];
+    app.renderThread.sliceGroup.getSlicesOfName(RENDER_THREAD_INDEP_DRAW_NAME)
+        .forEach(function(slice) {
+      var threadTimeRanges = [{
+        thread: app.renderThread,
+        start: slice.start,
+        end: slice.end
+      }];
+      frames.push(makeFrame(threadTimeRanges, app.surfaceFlinger));
+    });
+    return frames;
+  }
+
+  function getUiDrawType(uiThread) {
+    if (!uiThread)
+      return UI_DRAW_TYPE.NONE;
+
+    var slices = uiThread.sliceGroup.slices;
+    for (var i = 0; i < slices.length; i++) {
+      if (slices[i].title in UI_THREAD_DRAW_NAMES) {
+        return UI_THREAD_DRAW_NAMES[slices[i].title];
+      }
+    }
+    return UI_DRAW_TYPE.NONE;
+  }
+
+  function getInputSamples(process) {
+    var samples = undefined;
+    for (var counterName in process.counters) {
+          if (/^android\.aq\:pending/.test(counterName) &&
+        process.counters[counterName].numSeries == 1) {
+        samples = process.counters[counterName].series[0].samples;
+        break;
+      }
+    }
+
+    if (!samples)
+      return [];
+
+    // output rising edges only, since those are user inputs
+    var inputSamples = [];
+    var lastValue = 0;
+    samples.forEach(function(sample) {
+      if (sample.value > lastValue) {
+        inputSamples.push(sample);
+      }
+      lastValue = sample.value;
+    });
+    return inputSamples;
+  }
+
+  function getAnimationAsyncSlices(uiThread) {
+    if (!uiThread)
+      return [];
+
+    var slices = [];
+    uiThread.asyncSliceGroup.iterateAllEvents(function(slice) {
+      if (/^animator\:/.test(slice.title))
+        slices.push(slice);
+    });
+    return slices;
+  }
+
+  /**
+   * Model for Android App specific data.
+   * @constructor
+   */
+  function AndroidApp(process, uiThread, renderThread, surfaceFlinger,
+      uiDrawType) {
+    this.process = process;
+    this.uiThread = uiThread;
+    this.renderThread = renderThread;
+    this.surfaceFlinger = surfaceFlinger;
+    this.uiDrawType = uiDrawType;
+
+    this.frames_ = undefined;
+    this.inputs_ = undefined;
+  };
+
+  AndroidApp.createForProcessIfPossible = function(process, surfaceFlinger) {
+    var uiThread = process.getThread(process.pid);
+    var uiDrawType = getUiDrawType(uiThread);
+    if (uiDrawType == UI_DRAW_TYPE.NONE) {
+      uiThread = undefined;
+    }
+    var renderThreads = process.findAllThreadsNamed('RenderThread');
+    var renderThread = renderThreads.length == 1 ? renderThreads[0] : undefined;
+
+    if (uiThread || renderThread) {
+      return new AndroidApp(process, uiThread, renderThread, surfaceFlinger,
+        uiDrawType);
+    }
+  };
+
+  AndroidApp.prototype = {
+  /**
+   * Returns a list of all frames in the trace for the app,
+   * constructed on first query.
+   */
+    getFrames: function() {
+      if (!this.frames_) {
+        var uiFrames = getUiThreadDrivenFrames(this);
+        var rtFrames = getRenderThreadDrivenFrames(this);
+        this.frames_ = uiFrames.concat(rtFrames);
+
+        // merge frames by sorting by end timestamp
+        this.frames_.sort(function(a, b) { a.end - b.end });
+      }
+      return this.frames_;
+    },
+
+    /**
+     * Returns list of CounterSamples for each input event enqueued to the app.
+     */
+    getInputSamples: function() {
+      if (!this.inputs_) {
+        this.inputs_ = getInputSamples(this.process);
+      }
+      return this.inputs_;
+    },
+
+    getAnimationAsyncSlices: function() {
+      if (!this.animations_) {
+        this.animations_ = getAnimationAsyncSlices(this.uiThread);
+      }
+      return this.animations_;
+    }
+  };
+
+  return {
+    AndroidApp: AndroidApp
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/android_model_helper.html b/catapult/tracing/tracing/model/helpers/android_model_helper.html
new file mode 100644
index 0000000..8c84f5e
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/android_model_helper.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/guid.html">
+<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/core/auditor.html">
+<link rel="import" href="/tracing/model/helpers/android_app.html">
+<link rel="import" href="/tracing/model/helpers/android_surface_flinger.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Class for managing android-specific model meta data,
+ * such as rendering apps, frames rendered, and SurfaceFlinger.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  var AndroidApp = tr.model.helpers.AndroidApp;
+  var AndroidSurfaceFlinger = tr.model.helpers.AndroidSurfaceFlinger;
+
+  var IMPORTANT_SURFACE_FLINGER_SLICES = {
+    'doComposition' : true,
+    'updateTexImage' : true,
+    'postFramebuffer' : true
+  };
+  var IMPORTANT_UI_THREAD_SLICES = {
+    'Choreographer#doFrame' : true,
+    'performTraversals' : true,
+    'deliverInputEvent' : true
+  };
+  var IMPORTANT_RENDER_THREAD_SLICES = {
+    'doFrame' : true
+  };
+
+  function iterateImportantThreadSlices(thread, important, callback) {
+    if (!thread)
+      return;
+
+    thread.sliceGroup.slices.forEach(function(slice) {
+      if (slice.title in important)
+        callback(slice);
+    });
+  }
+
+  /**
+   * Model for Android-specific data.
+   * @constructor
+   */
+  function AndroidModelHelper(model) {
+    this.model = model;
+    this.apps = [];
+    this.surfaceFlinger = undefined;
+
+    var processes = model.getAllProcesses();
+    for (var i = 0; i < processes.length && !this.surfaceFlinger; i++) {
+      this.surfaceFlinger =
+          AndroidSurfaceFlinger.createForProcessIfPossible(processes[i]);
+    }
+
+    model.getAllProcesses().forEach(function(process) {
+      var app = AndroidApp.createForProcessIfPossible(
+          process, this.surfaceFlinger);
+      if (app)
+        this.apps.push(app);
+    }, this);
+  };
+
+  AndroidModelHelper.guid = tr.b.GUID.allocate();
+
+  AndroidModelHelper.supportsModel = function(model) {
+    return true;
+  };
+
+  AndroidModelHelper.prototype = {
+    iterateImportantSlices: function(callback) {
+      if (this.surfaceFlinger) {
+        iterateImportantThreadSlices(
+            this.surfaceFlinger.thread,
+            IMPORTANT_SURFACE_FLINGER_SLICES,
+            callback);
+      }
+
+      this.apps.forEach(function(app) {
+        iterateImportantThreadSlices(
+            app.uiThread,
+            IMPORTANT_UI_THREAD_SLICES,
+            callback);
+        iterateImportantThreadSlices(
+            app.renderThread,
+            IMPORTANT_RENDER_THREAD_SLICES,
+            callback);
+      });
+    }
+  };
+
+  return {
+    AndroidModelHelper: AndroidModelHelper
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/android_model_helper_test.html b/catapult/tracing/tracing/model/helpers/android_model_helper_test.html
new file mode 100644
index 0000000..114bd5a
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/android_model_helper_test.html
@@ -0,0 +1,227 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/android/android_auditor.html">
+<link rel="import" href="/tracing/extras/importer/linux_perf/ftrace_importer.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var AndroidModelHelper = tr.model.helpers.AndroidModelHelper;
+  var newAsyncSliceNamed = tr.c.TestUtils.newAsyncSliceNamed;
+  var newSliceEx = tr.c.TestUtils.newSliceEx;
+  var newCounterNamed = tr.c.TestUtils.newCounterNamed;
+  var newCounterSeries = tr.c.TestUtils.newCounterSeries;
+
+  function createSurfaceFlingerWithVsyncs(model) {
+      if (model.getProcess(2))
+        throw new Error('process already exists');
+
+      var sfProcess = model.getOrCreateProcess(2);
+      var sfThread = sfProcess.getOrCreateThread(2); // main thread, tid = pid
+      sfThread.name = '/system/bin/surfaceflinger';
+
+      // ensure slicegroup has data
+      sfThread.sliceGroup.pushSlice(newSliceEx({
+        title: 'doComposition',
+        start: 8,
+        duration: 2
+      }));
+
+      var counter = sfProcess.getOrCreateCounter('android', 'VSYNC');
+      var series = newCounterSeries();
+      for (var i = 0; i <= 10; i++) {
+        series.addCounterSample(i * 10, i % 2);
+      }
+      counter.addSeries(series);
+  }
+
+  /*
+   * List of customizeModelCallbacks which produce different 80ms frames,
+   * each starting at 10ms, and with a single important slice
+   */
+  var SINGLE_FRAME_CUSTOM_MODELS = [
+    function(model) {
+      // UI thread only
+      var uiThread = model.getOrCreateProcess(120).getOrCreateThread(120);
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 10, duration: 80}));
+
+      model.uiThread = uiThread;
+    },
+
+    function(model) {
+      // RenderThread only
+      var renderThread = model.getOrCreateProcess(120).getOrCreateThread(200);
+      renderThread.name = 'RenderThread';
+      renderThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'doFrame', start: 10, duration: 80}));
+
+      model.renderThread = renderThread;
+    },
+
+    function(model) {
+      var uiThread = model.getOrCreateProcess(120).getOrCreateThread(120);
+
+      // UI thread time - 19 (from 10 to 29)
+      uiThread.asyncSliceGroup.push(
+        newAsyncSliceNamed('deliverInputEvent', 10, 9, uiThread, uiThread));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 20, duration: 10}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'draw', start: 20, duration: 8}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'Record View#draw()', start: 20, duration: 8}));
+
+      // RenderThread time - 61 (from 29 to 90)
+      var renderThread = model.getOrCreateProcess(120).getOrCreateThread(200);
+      renderThread.name = 'RenderThread';
+      renderThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'DrawFrame', start: 29, duration: 61}));
+      renderThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'syncFrameState', start: 29, duration: 1}));
+
+      model.uiThread = uiThread;
+      model.renderThread = renderThread;
+    }
+  ];
+
+  test('getThreads', function() {
+    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
+      var model = tr.c.TestUtils.newModel(customizeModelCallback);
+      var helper = model.getOrCreateHelper(AndroidModelHelper);
+      assert.equal(helper.apps[0].uiThread, model.uiThread);
+      assert.equal(helper.apps[0].renderThread, model.renderThread);
+    });
+  });
+
+  test('iterateImportantSlices', function() {
+    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
+      var model = tr.c.TestUtils.newModel(customizeModelCallback);
+      var helper = model.getOrCreateHelper(AndroidModelHelper);
+
+      var seen = 0;
+      helper.iterateImportantSlices(function(importantSlice) {
+        assert.isTrue(importantSlice instanceof tr.model.Slice);
+        seen++;
+      });
+      assert.equal(seen, 1);
+    });
+  });
+
+  test('getFrames', function() {
+    SINGLE_FRAME_CUSTOM_MODELS.forEach(function(customizeModelCallback) {
+      var model = tr.c.TestUtils.newModel(customizeModelCallback);
+      var helper = model.getOrCreateHelper(AndroidModelHelper);
+      assert.equal(helper.apps.length, 1);
+
+      var frames = helper.apps[0].getFrames();
+      assert.equal(frames.length, 1);
+      assert.closeTo(frames[0].totalDuration, 80, 1e-5);
+
+      assert.closeTo(frames[0].start, 10, 1e-5);
+      assert.closeTo(frames[0].end, 90, 1e-5);
+    });
+  });
+
+  test('surfaceFlingerVsyncs', function() {
+    var model = tr.c.TestUtils.newModel(createSurfaceFlingerWithVsyncs);
+    var helper = model.getOrCreateHelper(AndroidModelHelper);
+    assert.isTrue(helper.surfaceFlinger.hasVsyncs);
+
+    // test querying the vsyncs
+    assert.closeTo(helper.surfaceFlinger.getFrameKickoff(5), 0, 1e-5);
+    assert.closeTo(helper.surfaceFlinger.getFrameDeadline(95), 100, 1e-5);
+
+    assert.closeTo(helper.surfaceFlinger.getFrameKickoff(10), 10, 1e-5);
+    assert.closeTo(helper.surfaceFlinger.getFrameDeadline(90), 100, 1e-5);
+
+    // test undefined behavior outside of vsyncs.
+    assert.isUndefined(helper.surfaceFlinger.getFrameKickoff(-5));
+    assert.isUndefined(helper.surfaceFlinger.getFrameDeadline(105));
+  });
+
+  test('frameVsyncInterop', function() {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      // app - 3 good, 3 bad frames
+      var uiThread = model.getOrCreateProcess(1).getOrCreateThread(1);
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 1, duration: 8}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 10, duration: 8}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 20, duration: 8}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 31, duration: 11}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 45, duration: 6}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 60, duration: 20}));
+
+      // surface flinger - vsync every 10ms
+      createSurfaceFlingerWithVsyncs(model);
+    });
+    var helper = model.getOrCreateHelper(AndroidModelHelper);
+
+    var frames = helper.apps[0].getFrames();
+    assert.equal(frames.length, 6);
+    for (var i = 0; i < 6; i++) {
+      var shouldMissDeadline = i >= 3;
+      var missedDeadline = frames[i].args['deadline'] < frames[i].end;
+      assert.equal(shouldMissDeadline, missedDeadline);
+    }
+  });
+
+  test('appInputs', function() {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      var process = model.getOrCreateProcess(120);
+      var uiThread = process.getOrCreateThread(120);
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 20, duration: 4}));
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 40, duration: 4}));
+
+      var counter = process.getOrCreateCounter('android', 'aq:pending:foo');
+      var series = newCounterSeries();
+      series.addCounterSample(10, 1);
+      series.addCounterSample(20, 0);
+      series.addCounterSample(30, 1);
+      series.addCounterSample(40, 2);
+      series.addCounterSample(50, 0);
+      counter.addSeries(series);
+    });
+    var helper = model.getOrCreateHelper(AndroidModelHelper);
+    assert.equal(helper.apps.length, 1);
+
+    var inputSamples = helper.apps[0].getInputSamples();
+    assert.equal(inputSamples.length, 3);
+    assert.equal(inputSamples[0].timestamp, 10);
+    assert.equal(inputSamples[1].timestamp, 30);
+    assert.equal(inputSamples[2].timestamp, 40);
+  });
+
+  test('appAnimations', function() {
+    var model = tr.c.TestUtils.newModel(function(model) {
+      var process = model.getOrCreateProcess(120);
+      var uiThread = process.getOrCreateThread(120);
+      uiThread.sliceGroup.pushSlice(newSliceEx(
+          {title: 'performTraversals', start: 10, duration: 10}));
+      uiThread.asyncSliceGroup.push(newAsyncSliceNamed('animator:foo', 0, 10,
+                                                       uiThread, uiThread));
+    });
+    var helper = model.getOrCreateHelper(AndroidModelHelper);
+    assert.equal(helper.apps.length, 1);
+
+    var animations = helper.apps[0].getAnimationAsyncSlices();
+    assert.equal(animations.length, 1);
+    assert.equal(animations[0].start, 0);
+    assert.equal(animations[0].end, 10);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/android_surface_flinger.html b/catapult/tracing/tracing/model/helpers/android_surface_flinger.html
new file mode 100644
index 0000000..0ecc05f
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/android_surface_flinger.html
@@ -0,0 +1,105 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/sorted_array_utils.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Class for representing SurfaceFlinger process and its Vsyncs.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  var findLowIndexInSortedArray = tr.b.findLowIndexInSortedArray;
+
+  var VSYNC_SF_NAME = 'android.VSYNC-sf';
+  var VSYNC_APP_NAME = 'android.VSYNC-app';
+  var VSYNC_FALLBACK_NAME = 'android.VSYNC';
+
+  // when sampling vsync, push samples back by this much to ensure
+  // frame start samples *between* vsyncs
+  var TIMESTAMP_FUDGE_MS = 0.01;
+
+  function getVsyncTimestamps(process, counterName) {
+
+    var vsync = process.counters[counterName];
+    if (!vsync)
+      vsync = process.counters[VSYNC_FALLBACK_NAME];
+
+    if (vsync && vsync.numSeries == 1 && vsync.numSamples > 1)
+      return vsync.series[0].timestamps;
+    return undefined;
+  }
+
+  /**
+   * Model for SurfaceFlinger specific data.
+   * @constructor
+   */
+  function AndroidSurfaceFlinger(process, thread) {
+    this.process = process;
+    this.thread = thread;
+
+    this.appVsync_ = undefined;
+    this.sfVsync_ = undefined;
+
+    this.appVsyncTimestamps_ = getVsyncTimestamps(process, VSYNC_APP_NAME);
+    this.sfVsyncTimestamps_ = getVsyncTimestamps(process, VSYNC_SF_NAME);
+  };
+
+  AndroidSurfaceFlinger.createForProcessIfPossible = function(process) {
+    var mainThread = process.getThread(process.pid);
+
+    // newer versions - main thread, lowercase name, preceeding forward slash
+    if (mainThread && mainThread.name &&
+        /surfaceflinger/.test(mainThread.name))
+      return new AndroidSurfaceFlinger(process, mainThread);
+
+    // older versions - another thread is named SurfaceFlinger
+    var primaryThreads = process.findAllThreadsNamed('SurfaceFlinger');
+    if (primaryThreads.length == 1)
+      return new AndroidSurfaceFlinger(process, primaryThreads[0]);
+    return undefined;
+  };
+
+  AndroidSurfaceFlinger.prototype = {
+    get hasVsyncs() {
+      return !!this.appVsyncTimestamps_ && !!this.sfVsyncTimestamps_;
+    },
+
+    getFrameKickoff: function(timestamp) {
+      if (!this.hasVsyncs)
+        throw new Error('cannot query vsync info without vsyncs');
+
+      var firstGreaterIndex =
+          findLowIndexInSortedArray(this.appVsyncTimestamps_,
+                                    function(x) { return x; },
+                                    timestamp + TIMESTAMP_FUDGE_MS);
+
+      if (firstGreaterIndex < 1)
+        return undefined;
+      return this.appVsyncTimestamps_[firstGreaterIndex - 1];
+    },
+
+    getFrameDeadline: function(timestamp) {
+      if (!this.hasVsyncs)
+        throw new Error('cannot query vsync info without vsyncs');
+
+      var firstGreaterIndex =
+          findLowIndexInSortedArray(this.sfVsyncTimestamps_,
+                                    function(x) { return x; },
+                                    timestamp + TIMESTAMP_FUDGE_MS);
+      if (firstGreaterIndex >= this.sfVsyncTimestamps_.length)
+        return undefined;
+      return this.sfVsyncTimestamps_[firstGreaterIndex];
+    }
+  };
+
+  return {
+    AndroidSurfaceFlinger: AndroidSurfaceFlinger
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_browser_helper.html b/catapult/tracing/tracing/model/helpers/chrome_browser_helper.html
new file mode 100644
index 0000000..6a715c1
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_browser_helper.html
@@ -0,0 +1,123 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/helpers/chrome_process_helper.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Utilities for accessing trace data about the Chrome browser.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  function ChromeBrowserHelper(modelHelper, process) {
+    tr.model.helpers.ChromeProcessHelper.call(this, modelHelper, process);
+    this.mainThread_ = process.findAtMostOneThreadNamed('CrBrowserMain');
+  }
+
+  ChromeBrowserHelper.isBrowserProcess = function(process) {
+    return !!process.findAtMostOneThreadNamed('CrBrowserMain');
+  };
+
+  ChromeBrowserHelper.prototype = {
+    __proto__: tr.model.helpers.ChromeProcessHelper.prototype,
+
+    get rendererHelpers() {
+      return this.modelHelper.rendererHelpers;
+    },
+
+    getLoadingEventsInRange: function(rangeOfInterest) {
+      return this.getAllAsyncSlicesMatching(function(slice) {
+        return slice.title.indexOf('WebContentsImpl Loading') === 0 &&
+            rangeOfInterest.intersectsExplicitRangeInclusive(
+                slice.start, slice.end);
+      });
+    },
+
+    getCommitProvisionalLoadEventsInRange: function(rangeOfInterest) {
+      return this.getAllAsyncSlicesMatching(function(slice) {
+        return slice.title === 'RenderFrameImpl::didCommitProvisionalLoad' &&
+            rangeOfInterest.intersectsExplicitRangeInclusive(
+                slice.start, slice.end);
+      });
+    },
+
+    get hasLatencyEvents() {
+      var hasLatency = false;
+      this.modelHelper.model.getAllThreads().some(function(thread) {
+        thread.iterateAllEvents(function(event) {
+          if (!event.isTopLevel)
+            return;
+          if (!(event instanceof tr.e.cc.InputLatencyAsyncSlice))
+            return;
+          hasLatency = true;
+        });
+        return hasLatency;
+      });
+      return hasLatency;
+    },
+
+    getLatencyEventsInRange: function(rangeOfInterest) {
+      return this.getAllAsyncSlicesMatching(function(slice) {
+        return (slice.title.indexOf('InputLatency') === 0) &&
+            rangeOfInterest.intersectsExplicitRangeInclusive(
+                slice.start, slice.end);
+      });
+    },
+
+    getAllAsyncSlicesMatching: function(pred, opt_this) {
+      var events = [];
+      this.iterAllThreads(function(thread) {
+        thread.iterateAllEvents(function(slice) {
+          if (pred.call(opt_this, slice))
+            events.push(slice);
+        });
+      });
+      return events;
+    },
+
+    getAllNetworkEventsInRange: function(rangeOfInterest) {
+      var networkEvents = [];
+      this.modelHelper.model.getAllThreads().forEach(function(thread) {
+        thread.asyncSliceGroup.slices.forEach(function(slice) {
+          var match = false;
+          if (slice.category == 'net' ||  // old-style URLRequest/Resource
+              slice.category == 'disabled-by-default-netlog' ||
+              slice.category == 'netlog') {
+            match = true;
+          }
+
+          if (!match)
+            return;
+
+          if (rangeOfInterest.intersectsExplicitRangeInclusive(
+                slice.start, slice.end))
+            networkEvents.push(slice);
+        });
+      });
+      return networkEvents;
+    },
+
+    iterAllThreads: function(func, opt_this) {
+      tr.b.iterItems(this.process.threads, function(tid, thread) {
+        func.call(opt_this, thread);
+      });
+
+      tr.b.iterItems(this.rendererHelpers, function(pid, rendererHelper) {
+        var rendererProcess = rendererHelper.process;
+        tr.b.iterItems(rendererProcess.threads, function(tid, thread) {
+          func.call(opt_this, thread);
+        });
+      }, this);
+    }
+  };
+
+  return {
+    ChromeBrowserHelper: ChromeBrowserHelper
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_browser_helper_test.html b/catapult/tracing/tracing/model/helpers/chrome_browser_helper_test.html
new file mode 100644
index 0000000..737b93a
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_browser_helper_test.html
@@ -0,0 +1,67 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/extras/chrome/cc/input_latency_async_slice.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/model/helpers/chrome_browser_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+<link rel="import" href="/tracing/model/model.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var INPUT_TYPE = tr.e.cc.INPUT_EVENT_TYPE_NAMES;
+
+  function getRange(min, max) {
+    var range = new tr.b.Range();
+    range.min = min;
+    range.max = max;
+    return range;
+  }
+
+  test('LoadingEvent', function() {
+    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    tr.e.chrome.ChromeTestUtils.addLoadingEvent(model, {start: 1, end: 10});
+    assert.equal(1, modelHelper.browserHelper.getLoadingEventsInRange(
+      getRange(0, 100)).length);
+  });
+
+  test('ProvisionalLoadEvent', function() {
+    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    tr.e.chrome.ChromeTestUtils.addCommitLoadEvent(model, {start: 1, end: 10});
+    assert.equal(1,
+      modelHelper.browserHelper.getCommitProvisionalLoadEventsInRange(
+        getRange(0, 100)).length);
+  });
+
+  test('LatencyEvent', function() {
+    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    tr.e.chrome.ChromeTestUtils.addInputEvent(
+        model, INPUT_TYPE.UNKNOWN, {start: 1, end: 10});
+    assert.equal(1, modelHelper.browserHelper.getLatencyEventsInRange(
+      getRange(0, 100)).length);
+  });
+
+  test('NetworkEvent', function() {
+    var model = tr.e.chrome.ChromeTestUtils.newChromeModel(function() { });
+    var modelHelper = model.getOrCreateHelper(
+        tr.model.helpers.ChromeModelHelper);
+    tr.e.chrome.ChromeTestUtils.addNetworkEvent(model, {start: 1, end: 10});
+    assert.equal(1, modelHelper.browserHelper.getAllNetworkEventsInRange(
+      getRange(0, 100)).length);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_gpu_helper.html b/catapult/tracing/tracing/model/helpers/chrome_gpu_helper.html
new file mode 100644
index 0000000..fe3cb49
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_gpu_helper.html
@@ -0,0 +1,41 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/model/helpers/chrome_process_helper.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Utilities for accessing the Chrome GPU Process.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  function ChromeGpuHelper(modelHelper, process) {
+    tr.model.helpers.ChromeProcessHelper.call(this, modelHelper, process);
+    this.mainThread_ = process.findAtMostOneThreadNamed('CrGpuMain');
+  };
+
+  ChromeGpuHelper.isGpuProcess = function(process) {
+    // In some android builds the GPU thread is not in a separate process.
+    if (process.findAtMostOneThreadNamed('CrBrowserMain') ||
+        process.findAtMostOneThreadNamed('CrRendererMain'))
+      return false;
+    return process.findAtMostOneThreadNamed('CrGpuMain');
+  };
+
+  ChromeGpuHelper.prototype = {
+    __proto__: tr.model.helpers.ChromeProcessHelper.prototype,
+
+    get mainThread() {
+      return this.mainThread_;
+    }
+  };
+
+  return {
+    ChromeGpuHelper: ChromeGpuHelper
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_model_helper.html b/catapult/tracing/tracing/model/helpers/chrome_model_helper.html
new file mode 100644
index 0000000..74fd4d6
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_model_helper.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/guid.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/model/helpers/chrome_browser_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_gpu_helper.html">
+<link rel="import" href="/tracing/model/helpers/chrome_renderer_helper.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Utilities for accessing trace data about the Chrome browser.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  function findChromeBrowserProcess(model) {
+    var browserProcesses = [];
+    model.getAllProcesses().forEach(function(process) {
+      if (!tr.model.helpers.ChromeBrowserHelper.isBrowserProcess(process))
+        return;
+      browserProcesses.push(process);
+    }, this);
+    if (browserProcesses.length === 0)
+      return undefined;
+    if (browserProcesses.length > 1)
+      return undefined;
+    return browserProcesses[0];
+  }
+
+  function findChromeRenderProcesses(model) {
+    var rendererProcesses = [];
+    model.getAllProcesses().forEach(function(process) {
+      if (!tr.model.helpers.ChromeRendererHelper.isRenderProcess(process))
+        return;
+      rendererProcesses.push(process);
+    });
+    return rendererProcesses;
+  }
+
+  function findChromeGpuProcess(model) {
+    var gpuProcesses = model.getAllProcesses().filter(
+      tr.model.helpers.ChromeGpuHelper.isGpuProcess);
+    if (gpuProcesses.length != 1)
+      return undefined;
+    return gpuProcesses[0];
+  }
+
+  /**
+   * @constructor
+   */
+  function ChromeModelHelper(model) {
+    this.model_ = model;
+
+    // Find browserHelper.
+    this.browserProcess_ = findChromeBrowserProcess(model);
+    if (this.browserProcess_) {
+      this.browserHelper_ = new tr.model.helpers.ChromeBrowserHelper(
+          this, this.browserProcess_);
+    } else {
+      this.browserHelper_ = undefined;
+    }
+
+    // Find gpuHelper.
+    var gpuProcess = findChromeGpuProcess(model);
+    if (gpuProcess) {
+      this.gpuHelper_ = new tr.model.helpers.ChromeGpuHelper(
+          this, gpuProcess);
+    } else {
+      this.gpuHelper_ = undefined;
+    }
+
+    // Find rendererHelpers.
+    var rendererProcesses_ = findChromeRenderProcesses(model);
+
+    this.rendererHelpers_ = {};
+    rendererProcesses_.forEach(function(renderProcess) {
+      var rendererHelper = new tr.model.helpers.ChromeRendererHelper(
+        this, renderProcess);
+      this.rendererHelpers_[rendererHelper.pid] = rendererHelper;
+    }, this);
+  }
+
+  ChromeModelHelper.guid = tr.b.GUID.allocate();
+
+  ChromeModelHelper.supportsModel = function(model) {
+    if (findChromeBrowserProcess(model) !== undefined)
+      return true;
+    if (findChromeRenderProcesses(model).length)
+      return true;
+    return false;
+  };
+
+  ChromeModelHelper.prototype = {
+    get pid() {
+      throw new Error('woah');
+    },
+
+    get process() {
+      throw new Error('woah');
+    },
+
+    get model() {
+      return this.model_;
+    },
+
+    get browserProcess() {
+      return this.browserProcess_;
+    },
+
+    get browserHelper() {
+      return this.browserHelper_;
+    },
+
+    get gpuHelper() {
+      return this.gpuHelper_;
+    },
+
+    get rendererHelpers() {
+      return this.rendererHelpers_;
+    }
+  };
+
+  return {
+    ChromeModelHelper: ChromeModelHelper
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_model_helper_test.html b/catapult/tracing/tracing/model/helpers/chrome_model_helper_test.html
new file mode 100644
index 0000000..8200550
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_model_helper_test.html
@@ -0,0 +1,137 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
+<link rel="import" href="/tracing/extras/chrome/chrome_test_utils.html">
+<link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/model.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var newAsyncSliceEx = tr.c.TestUtils.newAsyncSliceEx;
+
+  test('getLatencyData', function() {
+    var m = tr.e.chrome.ChromeTestUtils.newChromeModel(function(m) {
+      m.browserMain.asyncSliceGroup.push(newAsyncSliceEx({
+        title: 'InputLatency::GestureScrollUpdate',
+        cat: 'benchmark',
+        start: 0,
+        end: 10,
+        id: '0x100',
+        isTopLevel: true,
+        args: {
+          data: {
+            INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT: {'time' : 0},
+            INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT: {time: 10}
+          }
+        }
+      }));
+    });
+
+    var modelHelper = m.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);
+    var latencyEvents = modelHelper.browserHelper.getLatencyEventsInRange(
+      m.bounds);
+    assert.equal(latencyEvents.length, 1);
+  });
+
+  test('getFrametime', function() {
+    var frame_ts;
+    var events = [];
+    // Browser process 3507
+    events.push({'cat' : '__metadata', 'pid' : 3507, 'tid' : 3507, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrBrowserMain'}}); // @suppress longLineCheck
+
+    // Renderer process 3508
+    events.push({'cat' : '__metadata', 'pid' : 3508, 'tid' : 3508, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrRendererMain'}}); // @suppress longLineCheck
+    // Compositor thread 3510
+    events.push({'cat' : '__metadata', 'pid' : 3508, 'tid' : 3510, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'Compositor'}}); // @suppress longLineCheck
+
+    // Renderer process 3509
+    events.push({'cat' : '__metadata', 'pid' : 3509, 'tid' : 3509, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'CrRendererMain'}}); // @suppress longLineCheck
+
+    // Compositor thread 3511
+    events.push({'cat' : '__metadata', 'pid' : 3509, 'tid' : 3511, 'ts' : 0, 'ph' : 'M', 'name' : 'thread_name', 'args' : {'name' : 'Compositor'}}); // @suppress longLineCheck
+
+    frame_ts = 0;
+    // Add impl rendering stats for browser process 3507
+    for (var i = 0; i < 10; i++) {
+      events.push({'cat' : 'benchmark', 'pid' : 3507, 'tid' : 3507, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::ImplThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
+      frame_ts += 16000 + 1000 * (i % 2);
+    }
+
+    frame_ts = 0;
+    // Add main rendering stats for renderer process 3508
+    for (var i = 0; i < 10; i++) {
+      events.push({'cat' : 'benchmark', 'pid' : 3508, 'tid' : 3508, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::MainThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
+      frame_ts += 16000 + 1000 * (i % 2);
+    }
+    events.push({'cat' : 'benchmark', 'pid' : 3508, 'tid' : 3510, 'ts' : 1600, 'ph' : 'i', 'name' : 'KeepAlive', 's' : 't'}); // @suppress longLineCheck
+
+    frame_ts = 0;
+    // Add impl and main rendering stats for renderer process 3509
+    for (var i = 0; i < 10; i++) {
+      events.push({'cat' : 'benchmark', 'pid' : 3509, 'tid' : 3511, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::ImplThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
+      events.push({'cat' : 'benchmark', 'pid' : 3509, 'tid' : 3509, 'ts' : frame_ts, 'ph' : 'i', 'name' : 'BenchmarkInstrumentation::MainThreadRenderingStats', 's' : 't'}); // @suppress longLineCheck
+      frame_ts += 16000 + 1000 * (i % 2);
+    }
+
+    var m = tr.c.TestUtils.newModelWithEvents([events]);
+    var modelHelper = m.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);
+
+    // Testing browser impl and main rendering stats.
+    var frameEvents = modelHelper.browserHelper.getFrameEventsInRange(
+        tr.model.helpers.IMPL_FRAMETIME_TYPE, m.bounds);
+    var frametimeData = tr.model.helpers.getFrametimeDataFromEvents(
+        frameEvents);
+    assert.equal(frametimeData.length, 9);
+    for (var i = 0; i < frametimeData.length; i++) {
+      assert.equal(frametimeData[i].frametime, 16 + i % 2);
+    }
+    // No main rendering stats.
+    frameEvents = modelHelper.browserHelper.getFrameEventsInRange(
+        tr.model.helpers.MAIN_FRAMETIME_TYPE, m.bounds);
+    assert.equal(frameEvents.length, 0);
+
+
+    // Testing renderer 3508 impl and main rendering stats.
+    frameEvents = modelHelper.rendererHelpers[3508].getFrameEventsInRange(
+        tr.model.helpers.MAIN_FRAMETIME_TYPE, m.bounds);
+    frametimeData = tr.model.helpers.getFrametimeDataFromEvents(frameEvents);
+    assert.equal(frametimeData.length, 9);
+    for (var i = 0; i < frametimeData.length; i++) {
+      assert.equal(frametimeData[i].frametime, 16 + i % 2);
+    }
+
+    // No impl rendering stats.
+    frameEvents = modelHelper.rendererHelpers[3508].getFrameEventsInRange(
+        tr.model.helpers.IMPL_FRAMETIME_TYPE, m.bounds);
+    assert.equal(frameEvents.length, 0);
+
+
+    // Testing renderer 3509 impl and main rendering stats.
+    frameEvents = modelHelper.rendererHelpers[3509].getFrameEventsInRange(
+        tr.model.helpers.IMPL_FRAMETIME_TYPE, m.bounds);
+    frametimeData = tr.model.helpers.getFrametimeDataFromEvents(frameEvents);
+    assert.equal(frametimeData.length, 9);
+    for (var i = 0; i < frametimeData.length; i++) {
+      assert.equal(frametimeData[i].frametime, 16 + i % 2);
+    }
+
+    frameEvents = modelHelper.rendererHelpers[3509].getFrameEventsInRange(
+        tr.model.helpers.MAIN_FRAMETIME_TYPE, m.bounds);
+    frametimeData = tr.model.helpers.getFrametimeDataFromEvents(frameEvents);
+    assert.equal(frametimeData.length, 9);
+    for (var i = 0; i < frametimeData.length; i++) {
+      assert.equal(frametimeData[i].frametime, 16 + i % 2);
+    }
+
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_process_helper.html b/catapult/tracing/tracing/model/helpers/chrome_process_helper.html
new file mode 100644
index 0000000..147b3d7
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_process_helper.html
@@ -0,0 +1,91 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Utilities for accessing trace data about the Chrome browser.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  var MAIN_FRAMETIME_TYPE = 'main_frametime_type';
+  var IMPL_FRAMETIME_TYPE = 'impl_frametime_type';
+
+  var MAIN_RENDERING_STATS =
+      'BenchmarkInstrumentation::MainThreadRenderingStats';
+  var IMPL_RENDERING_STATS =
+      'BenchmarkInstrumentation::ImplThreadRenderingStats';
+
+
+  function getSlicesIntersectingRange(rangeOfInterest, slices) {
+    var slicesInFilterRange = [];
+    for (var i = 0; i < slices.length; i++) {
+      var slice = slices[i];
+      if (rangeOfInterest.intersectsExplicitRangeInclusive(
+            slice.start, slice.end))
+        slicesInFilterRange.push(slice);
+    }
+    return slicesInFilterRange;
+  }
+
+
+  function ChromeProcessHelper(modelHelper, process) {
+    this.modelHelper = modelHelper;
+    this.process = process;
+  }
+
+  ChromeProcessHelper.prototype = {
+    get pid() {
+      return this.process.pid;
+    },
+
+    getFrameEventsInRange: function(frametimeType, range) {
+      var titleToGet;
+      if (frametimeType == MAIN_FRAMETIME_TYPE)
+        titleToGet = MAIN_RENDERING_STATS;
+      else
+        titleToGet = IMPL_RENDERING_STATS;
+
+      var frameEvents = [];
+      this.process.iterateAllEvents(function(event) {
+        if (event.title !== titleToGet)
+          return;
+        if (range.intersectsExplicitRangeInclusive(event.start, event.end))
+          frameEvents.push(event);
+      });
+
+      frameEvents.sort(function(a, b) {return a.start - b.start});
+      return frameEvents;
+    }
+  };
+
+  function getFrametimeDataFromEvents(frameEvents) {
+    var frametimeData = [];
+    for (var i = 1; i < frameEvents.length; i++) {
+      var diff = frameEvents[i].start - frameEvents[i - 1].start;
+      frametimeData.push({
+        'x': frameEvents[i].start,
+        'frametime': diff
+      });
+    }
+    return frametimeData;
+  }
+
+  return {
+    ChromeProcessHelper: ChromeProcessHelper,
+
+    MAIN_FRAMETIME_TYPE: MAIN_FRAMETIME_TYPE,
+    IMPL_FRAMETIME_TYPE: IMPL_FRAMETIME_TYPE,
+    MAIN_RENDERING_STATS: MAIN_RENDERING_STATS,
+    IMPL_RENDERING_STATS: IMPL_RENDERING_STATS,
+
+    getSlicesIntersectingRange: getSlicesIntersectingRange,
+    getFrametimeDataFromEvents: getFrametimeDataFromEvents
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/helpers/chrome_renderer_helper.html b/catapult/tracing/tracing/model/helpers/chrome_renderer_helper.html
new file mode 100644
index 0000000..9fad9e2
--- /dev/null
+++ b/catapult/tracing/tracing/model/helpers/chrome_renderer_helper.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/model/helpers/chrome_process_helper.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Utilities for accessing trace data about the Chrome browser.
+ */
+tr.exportTo('tr.model.helpers', function() {
+  function ChromeRendererHelper(modelHelper, process) {
+    tr.model.helpers.ChromeProcessHelper.call(this, modelHelper, process);
+    this.mainThread_ = process.findAtMostOneThreadNamed('CrRendererMain');
+    this.compositorThread_ = process.findAtMostOneThreadNamed('Compositor');
+    this.rasterWorkerThreads_ = process.findAllThreadsMatching(function(t) {
+      if (t.name === undefined)
+        return false;
+      if (t.name.indexOf('CompositorTileWorker') === 0)
+        return true;
+      if (t.name.indexOf('CompositorRasterWorker') === 0)
+        return true;
+      return false;
+    });
+  };
+
+  ChromeRendererHelper.isRenderProcess = function(process) {
+    if (!process.findAtMostOneThreadNamed('CrRendererMain'))
+      return false;
+    if (!process.findAtMostOneThreadNamed('Compositor'))
+      return false;
+    return true;
+  };
+
+  ChromeRendererHelper.prototype = {
+    __proto__: tr.model.helpers.ChromeProcessHelper.prototype,
+
+    get mainThread() {
+      return this.mainThread_;
+    },
+
+    get compositorThread() {
+      return this.compositorThread_;
+    },
+
+    get rasterWorkerThreads() {
+      return this.rasterWorkerThreads_;
+    }
+  };
+
+  return {
+    ChromeRendererHelper: ChromeRendererHelper
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/instant_event.html b/catapult/tracing/tracing/model/instant_event.html
index aa071cc..5c50132 100644
--- a/catapult/tracing/tracing/model/instant_event.html
+++ b/catapult/tracing/tracing/model/instant_event.html
@@ -5,28 +5,29 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
 
-/**
- * @fileoverview Provides the InstantEvent class.
- */
 tr.exportTo('tr.model', function() {
   var InstantEventType = {
     GLOBAL: 1,
     PROCESS: 2
   };
 
+  /**
+   * An InstantEvent is a zero-duration event.
+   *
+   * @constructor
+   */
   function InstantEvent(category, title, colorId, start, args) {
-    tr.model.TimedEvent.call(this);
+    tr.model.TimedEvent.call(this, start);
 
     this.category = category || '';
     this.title = title;
     this.colorId = colorId;
-    this.start = start;
     this.args = args;
 
     this.type = undefined;
@@ -36,6 +37,15 @@
     __proto__: tr.model.TimedEvent.prototype
   };
 
+  /**
+   * A GlobalInstantEvent is a zero-duration event that's not tied to any
+   * particular process.
+   *
+   * An example is a trace event that's issued when a new USB device is plugged
+   * into the machine.
+   *
+   * @constructor
+   */
   function GlobalInstantEvent(category, title, colorId, start, args) {
     InstantEvent.apply(this, arguments);
     this.type = InstantEventType.GLOBAL;
@@ -45,10 +55,18 @@
     __proto__: InstantEvent.prototype,
     get userFriendlyName() {
       return 'Global instant event ' + this.title + ' @ ' +
-          tr.b.u.TimeStamp.format(start);
+          tr.v.Unit.byName.timeStampInMs.format(start);
     }
   };
 
+  /**
+   * A ProcessInstantEvent is a zero-duration event that's tied to a
+   * particular process.
+   *
+   * An example is a trace event that's issued when a kill signal is received.
+   *
+   * @constructor
+   */
   function ProcessInstantEvent(category, title, colorId, start, args) {
     InstantEvent.apply(this, arguments);
     this.type = InstantEventType.PROCESS;
@@ -59,7 +77,7 @@
 
     get userFriendlyName() {
       return 'Process-level instant event ' + this.title + ' @ ' +
-          tr.b.u.TimeStamp.format(start);
+          tr.v.Unit.byName.timeStampInMs.format(start);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/interaction_record.html b/catapult/tracing/tracing/model/interaction_record.html
deleted file mode 100644
index 444a352..0000000
--- a/catapult/tracing/tracing/model/interaction_record.html
+++ /dev/null
@@ -1,81 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_stamp.html">
-<link rel="import" href="/tracing/model/compound_event_selection_state.html">
-<link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/model/timed_event.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.model', function() {
-  var CompoundEventSelectionState = tr.model.CompoundEventSelectionState;
-
-  function InteractionRecord(parentModel, title, colorId, start, duration) {
-    tr.model.TimedEvent.call(this, start);
-    this.title = title;
-    this.colorId = colorId;
-    this.duration = duration;
-    this.args = {};
-    this.associatedEvents = new tr.model.EventSet();
-    this.parentModel = parentModel;
-
-    // sourceEvents are the ones that caused the IR Finder to create this IR.
-    this.sourceEvents = new tr.model.EventSet();
-  }
-
-  InteractionRecord.prototype = {
-    __proto__: tr.model.TimedEvent.prototype,
-
-    get subSlices() {
-      return [];
-    },
-
-    get userFriendlyName() {
-      return this.title + ' interaction at ' +
-          tr.b.u.TimeStamp.format(this.start);
-    },
-
-    get stableId() {
-      return 'IR.' + this.parentModel.interactionRecords.indexOf(this);
-    },
-
-    computeCompoundEvenSelectionState: function(selection) {
-      var cess = CompoundEventSelectionState.NOT_SELECTED;
-      if (selection.contains(this))
-        cess |= CompoundEventSelectionState.EVENT_SELECTED;
-
-      if (this.associatedEvents.intersectionIsEmpty(selection))
-        return cess;
-
-      var allContained = this.associatedEvents.every(function(event) {
-        return selection.contains(event);
-      });
-
-      if (allContained)
-        cess |= CompoundEventSelectionState.ALL_ASSOCIATED_EVENTS_SELECTED;
-      else
-        cess |= CompoundEventSelectionState.SOME_ASSOCIATED_EVENTS_SELECTED;
-      return cess;
-    }
-  };
-
-  tr.model.EventRegistry.register(
-      InteractionRecord,
-      {
-        name: 'interaction',
-        pluralName: 'interactions',
-        singleViewElementName: 'tr-ui-a-single-interaction-record-sub-view',
-        multiViewElementName: 'tr-ui-a-multi-interaction-record-sub-view'
-      });
-
-  return {
-    InteractionRecord: InteractionRecord
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/model/interaction_record_test.html b/catapult/tracing/tracing/model/interaction_record_test.html
index 611e24b..597583b 100644
--- a/catapult/tracing/tracing/model/interaction_record_test.html
+++ b/catapult/tracing/tracing/model/interaction_record_test.html
@@ -6,36 +6,36 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/stub_rail_interaction_record.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/user_model/stub_expectation.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
+  var TestUtils = tr.c.TestUtils;
   var CompoundEventSelectionState = tr.model.CompoundEventSelectionState;
 
   function createModel(opt_customizeModelCallback) {
-    return test_utils.newModel(function(model) {
+    return TestUtils.newModel(function(model) {
       model.p1 = model.getOrCreateProcess(1);
       model.t2 = model.p1.getOrCreateThread(2);
 
-      model.s1 = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+      model.s1 = model.t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
         title: 'a', start: 10, end: 20
       }));
-      model.s2 = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+      model.s2 = model.t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
         title: 'b', start: 20, end: 30
       }));
 
-      model.ir1 = new tr.e.rail.StubRAILInteractionRecord({
+      model.ir1 = new tr.model.um.StubExpectation({
         parentModel: model,
         start: 100, end: 200,
-        railTypeName: 'rail_response',
+        typeName: 'Response',
         normalizedEfficiency: 1.,
         normalizedUserComfort: 0.0
       });
-      model.addInteractionRecord(model.ir1);
+      model.userModel.expectations.push(model.ir1);
       model.ir1.associatedEvents.push(model.s1);
       model.ir1.associatedEvents.push(model.s2);
 
@@ -99,18 +99,18 @@
   });
 
   test('stableId', function() {
-    var model = test_utils.newModel();
+    var model = TestUtils.newModel();
 
-    var ir1 = model.addInteractionRecord(
-        test_utils.newInteractionRecord(model, 0, 10));
-    var ir2 = model.addInteractionRecord(
-        test_utils.newInteractionRecord(model, 10, 10));
-    var ir3 = model.addInteractionRecord(
-        test_utils.newInteractionRecord(model, 20, 10));
+    var ir1 = model.userModel.expectations.push(
+        TestUtils.newInteractionRecord(model, 0, 10));
+    var ir2 = model.userModel.expectations.push(
+        TestUtils.newInteractionRecord(model, 10, 10));
+    var ir3 = model.userModel.expectations.push(
+        TestUtils.newInteractionRecord(model, 20, 10));
 
-    assert.equal('IR.0', ir1.stableId);
-    assert.equal('IR.1', ir2.stableId);
-    assert.equal('IR.2', ir3.stableId);
+    assert.equal('UserExpectation.0', ir1.stableId);
+    assert.equal('UserExpectation.1', ir2.stableId);
+    assert.equal('UserExpectation.2', ir3.stableId);
   });
 
 });
diff --git a/catapult/tracing/tracing/model/ir_coverage.html b/catapult/tracing/tracing/model/ir_coverage.html
index d148b33..e4bd3d7 100644
--- a/catapult/tracing/tracing/model/ir_coverage.html
+++ b/catapult/tracing/tracing/model/ir_coverage.html
@@ -55,7 +55,7 @@
   }
 
   function getIRCoverageFromModel(model) {
-    var associatedEvents = getAssociatedEvents(model.interactionRecords);
+    var associatedEvents = getAssociatedEvents(model.userModel.expectations);
 
     if (!associatedEvents.length)
       return undefined;
@@ -68,6 +68,9 @@
 
     var totalEventCount = associatedEvents.length + unassociatedEvents.length;
     var totalCpuMs = associatedCpuMs + unassociatedCpuMs;
+    var coveredEventsCpuTimeRatio = undefined;
+    if (totalCpuMs !== 0)
+      coveredEventsCpuTimeRatio = associatedCpuMs / totalCpuMs;
 
     return {
       associatedEventsCount: associatedEvents.length,
@@ -75,7 +78,7 @@
       associatedEventsCpuTimeMs: associatedCpuMs,
       unassociatedEventsCpuTimeMs: unassociatedCpuMs,
       coveredEventsCountRatio: associatedEvents.length / totalEventCount,
-      coveredEventsCpuTimeRatio: associatedCpuMs / totalCpuMs
+      coveredEventsCpuTimeRatio: coveredEventsCpuTimeRatio
     };
   }
 
diff --git a/catapult/tracing/tracing/model/ir_coverage_test.html b/catapult/tracing/tracing/model/ir_coverage_test.html
index ea2c90e..c3ab2d8 100644
--- a/catapult/tracing/tracing/model/ir_coverage_test.html
+++ b/catapult/tracing/tracing/model/ir_coverage_test.html
@@ -14,21 +14,18 @@
 tr.b.unittest.testSuite(function() {
   var newSliceEx = tr.c.TestUtils.newSliceEx;
 
-  test('computeCoverage', function() {
-    var model = tr.c.TestUtils.newModel(function(model) {
+  function createModel() {
+    return tr.c.TestUtils.newModel(function(model) {
       var process = model.getOrCreateProcess(1);
       var thread = process.getOrCreateThread(2);
       var s0 = thread.sliceGroup.pushSlice(newSliceEx(
           {title: 's0', start: 0.0, duration: 1.0}));
-      s0.cpuDuration = 0.4;
       s0.isTopLevel = true;
       var unassociatedEvent = thread.sliceGroup.pushSlice(newSliceEx(
           {title: 's1', start: 6.0, duration: 1.0}));
-      unassociatedEvent.cpuDuration = 0.8;
       unassociatedEvent.isTopLevel = true;
       var s2 = thread.sliceGroup.pushSlice(newSliceEx(
           {title: 's2', start: 2.0, duration: 1.0}));
-      s2.cpuDuration = 0.4;
       s2.isTopLevel = true;
       var f0 = tr.c.TestUtils.newFlowEventEx({
         title: 'test1',
@@ -49,12 +46,24 @@
         startThread: thread
       });
       thread.asyncSliceGroup.push(as1);
-      var ir = new tr.model.InteractionRecord(model, 'test', 0, 0, 7);
+      var ir = new tr.model.um.StubExpectation(
+          {parentModel: model, start: 0, duration: 7});
       ir.associatedEvents.push(as1);
       ir.associatedEvents.push(s0);
       ir.associatedEvents.push(s2);
       ir.associatedEvents.push(f0);
-      model.addInteractionRecord(ir);
+      model.userModel.expectations.push(ir);
+    });
+  }
+
+  test('computeCoverage', function() {
+    var model = createModel();
+    model.iterateAllEvents(function(event) {
+      if (event.title === 's0' || event.title === 's2') {
+        event.cpuSelfTime = 0.4;
+      } else if (event.title === 's1') {
+        event.cpuSelfTime = 0.8;
+      }
     });
 
     var coverage = tr.model.getIRCoverageFromModel(model);
@@ -65,6 +74,17 @@
     assert.closeTo(0.8, coverage.unassociatedEventsCpuTimeMs, 1e-3);
     assert.closeTo(0.5, coverage.coveredEventsCpuTimeRatio, 1e-3);
   });
+
+  test('zeroCPU', function() {
+    var model = createModel();
+    var coverage = tr.model.getIRCoverageFromModel(model);
+    assert.equal(3, coverage.associatedEventsCount);
+    assert.equal(1, coverage.unassociatedEventsCount);
+    assert.closeTo(0.75, coverage.coveredEventsCountRatio, 1e-3);
+    assert.closeTo(0.0, coverage.associatedEventsCpuTimeMs, 1e-3);
+    assert.closeTo(0.0, coverage.unassociatedEventsCpuTimeMs, 1e-3);
+    assert.equal(undefined, coverage.coveredEventsCpuTimeRatio, 1e-3);
+  });
 });
 </script>
 
diff --git a/catapult/tracing/tracing/model/memory_allocator_dump.html b/catapult/tracing/tracing/model/memory_allocator_dump.html
index 31a3986..f9e8235 100644
--- a/catapult/tracing/tracing/model/memory_allocator_dump.html
+++ b/catapult/tracing/tracing/model/memory_allocator_dump.html
@@ -5,7 +5,9 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -21,7 +23,12 @@
     this.fullName = fullName;
     this.parent = undefined;
     this.children = [];
-    this.attributes = {};
+
+    // String -> ScalarNumeric.
+    this.numerics = {};
+
+    // String -> string.
+    this.diagnostics = {};
 
     // The associated container memory dump.
     this.containerMemoryDump = containerMemoryDump;
@@ -30,23 +37,39 @@
     this.owns = undefined;
     this.ownedBy = [];
 
+    // Map from sibling dumps (other children of this dump's parent) to the
+    // proportion of this dump's size which they (or their descendants) own.
+    this.ownedBySiblingSizes = new Map();
+
     // Retention relationship between memory allocator dumps.
     this.retains = [];
     this.retainedBy = [];
 
+    // Weak memory allocator dumps are removed from the model after import in
+    // tr.model.GlobalMemoryDump.removeWeakDumps(). See
+    // base::trace_event::MemoryAllocatorDump::Flags::WEAK in the Chromium
+    // codebase.
+    this.weak = false;
+
+    // A list of information about the memory allocator dump (e.g. about how
+    // its fields were calculated). Each item should be an object with
+    // a mandatory 'type' property and type-specific extra arguments (see
+    // MemoryAllocatorDumpInfoType).
+    this.infos = [];
+
     // For debugging purposes.
     this.guid = opt_guid;
   };
 
   /**
-   * Size attribute names. Please refer to the Memory Dump Graph Metric
+   * Size numeric names. Please refer to the Memory Dump Graph Metric
    * Calculation design document for more details (https://goo.gl/fKg0dt).
    */
-  MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME = 'size';
-  MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME = 'effective_size';
-  MemoryAllocatorDump.RESIDENT_SIZE_ATTRIBUTE_NAME = 'resident_size';
-  MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME =
-      MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME;
+  MemoryAllocatorDump.SIZE_NUMERIC_NAME = 'size';
+  MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME = 'effective_size';
+  MemoryAllocatorDump.RESIDENT_SIZE_NUMERIC_NAME = 'resident_size';
+  MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME =
+      MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME;
 
   MemoryAllocatorDump.prototype = {
     get name() {
@@ -68,50 +91,87 @@
       return false;
     },
 
-    addAttribute: function(name, value) {
-      if (name in this.attributes)
-        throw new Error('Duplicate attribute name: ' + name + '.');
-      this.attributes[name] = value;
+    addNumeric: function(name, numeric) {
+      if (!(numeric instanceof tr.v.ScalarNumeric))
+        throw new Error('Numeric value must be an instance of ScalarNumeric.');
+      if (name in this.numerics)
+        throw new Error('Duplicate numeric name: ' + name + '.');
+      this.numerics[name] = numeric;
     },
 
-    aggregateAttributes: function(opt_model) {
-      var attributes = {};
+    addDiagnostic: function(name, text) {
+      if (typeof text !== 'string')
+        throw new Error('Diagnostic text must be a string.');
+      if (name in this.diagnostics)
+        throw new Error('Duplicate diagnostic name: ' + name + '.');
+      this.diagnostics[name] = text;
+    },
 
+    aggregateNumericsRecursively: function(opt_model) {
+      var numericNames = new Set();
+
+      // Aggregate descendants's numerics recursively and gather children's
+      // numeric names.
       this.children.forEach(function(child) {
-        child.aggregateAttributes(opt_model);
-        tr.b.iterItems(child.attributes, function(name) {
-          attributes[name] = true;
-        }, this);
+        child.aggregateNumericsRecursively(opt_model);
+        tr.b.iterItems(child.numerics, numericNames.add, numericNames);
       }, this);
 
-      tr.b.iterItems(attributes, function(name) {
-        var childAttributes = this.children.map(function(child) {
-          return child.attributes[name];
-        }, this);
-        var currentAttribute = this.attributes[name];
-        this.attributes[name] = tr.model.Attribute.aggregate(
-            childAttributes, currentAttribute, opt_model);
-      }, this);
-    },
-
-    getValidSizeAttributeOrUndefined: function(sizeAttrName, opt_model) {
-      var sizeAttr = this.attributes[sizeAttrName];
-      if (sizeAttr === undefined)
-        return undefined;
-
-      if (!(sizeAttr instanceof tr.model.ScalarAttribute)) {
-        if (opt_model !== undefined) {
-          opt_model.importWarning({
-            type: 'memory_dump_parse_error',
-            message: '\'' + sizeAttrName + '\' attribute of memory allocator ' +
-                'dump \'' + memoryAllocatorDump.fullName + '\' is not a scalar.'
-          });
+      // Aggregate children's numerics.
+      numericNames.forEach(function(numericName) {
+        if (numericName === MemoryAllocatorDump.SIZE_NUMERIC_NAME ||
+            numericName === MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME ||
+            this.numerics[numericName] !== undefined) {
+            // Don't aggregate size and effective size numerics. These are
+            // calculated in GlobalMemoryDump.prototype.calculateSizes() and
+            // GlobalMemoryDump.prototype.calculateEffectiveSizes respectively.
+            // Also don't aggregate numerics that the parent already has.
+          return;
         }
-        return undefined;
+
+        this.numerics[numericName] = MemoryAllocatorDump.aggregateNumerics(
+            this.children.map(function(child) {
+              return child.numerics[numericName];
+            }), opt_model);
+      }, this);
+    }
+  };
+
+  // TODO(petrcermak): Consider moving this to tr.v.Numeric.
+  MemoryAllocatorDump.aggregateNumerics = function(numerics, opt_model) {
+    var shouldLogWarning = !!opt_model;
+    var aggregatedUnit = undefined;
+    var aggregatedValue = 0;
+
+    // Aggregate the units and sum up the values of the numerics.
+    numerics.forEach(function(numeric) {
+      if (numeric === undefined)
+        return;
+
+      var unit = numeric.unit;
+      if (aggregatedUnit === undefined) {
+        aggregatedUnit = unit;
+      } else if (aggregatedUnit !== unit) {
+        if (shouldLogWarning) {
+          opt_model.importWarning({
+            type: 'numeric_parse_error',
+            message: 'Multiple units provided for numeric: \'' +
+                aggregatedUnit.unitName + '\' and \'' + unit.unitName + '\'.'
+          });
+          shouldLogWarning = false;  // Don't log multiple warnings.
+        }
+        // Use the most generic unit when the numerics don't agree (best
+        // effort).
+        aggregatedUnit = tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
       }
 
-      return sizeAttr;
-    }
+      aggregatedValue += numeric.value;
+    }, this);
+
+    if (aggregatedUnit === undefined)
+      return undefined;
+
+    return new tr.v.ScalarNumeric(aggregatedUnit, aggregatedValue);
   };
 
   /**
@@ -121,11 +181,36 @@
     this.source = source;
     this.target = target;
     this.importance = opt_importance;
+    this.size = undefined;
   }
 
+  /**
+   * Types of size numeric information.
+   *
+   * @enum
+   */
+  var MemoryAllocatorDumpInfoType = {
+    // The provided size of a MemoryAllocatorDump was less than the aggregated
+    // size of its children.
+    //
+    // Mandatory extra args:
+    //   * providedSize: The inconsistent provided size.
+    //   * dependencySize: The aggregated size of the children.
+    PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN: 0,
+
+    // The provided size of a MemoryAllocatorDump was less than the size of its
+    // largest owner.
+    //
+    // Mandatory extra args:
+    //   * providedSize: The inconsistent provided size.
+    //   * dependencySize: The size of the largest owner.
+    PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER: 1
+  };
+
   return {
     MemoryAllocatorDump: MemoryAllocatorDump,
-    MemoryAllocatorDumpLink: MemoryAllocatorDumpLink
+    MemoryAllocatorDumpLink: MemoryAllocatorDumpLink,
+    MemoryAllocatorDumpInfoType: MemoryAllocatorDumpInfoType
   };
 });
 </script>
diff --git a/catapult/tracing/tracing/model/memory_allocator_dump_test.html b/catapult/tracing/tracing/model/memory_allocator_dump_test.html
index 274c6c7..b44e7ed 100644
--- a/catapult/tracing/tracing/model/memory_allocator_dump_test.html
+++ b/catapult/tracing/tracing/model/memory_allocator_dump_test.html
@@ -5,9 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/container_memory_dump.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -16,15 +18,15 @@
   var ContainerMemoryDump = tr.model.ContainerMemoryDump;
   var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
   var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-
-  function newChildDump(parentDump, name) {
-    var childDump = new MemoryAllocatorDump(
-        parentDump.containerMemoryDump, parentDump.fullName + '/' + name);
-    childDump.parent = parentDump;
-    parentDump.children.push(childDump);
-    return childDump;
-  }
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+  var sizeInBytes = tr.v.Unit.byName.sizeInBytes;
+  var powerInWatts = tr.v.Unit.byName.powerInWatts;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var newChildDump = tr.model.MemoryDumpTestUtils.newChildDump;
+  var checkDumpNumericsAndDiagnostics =
+      tr.model.MemoryDumpTestUtils.checkDumpNumericsAndDiagnostics;
 
   test('memoryAllocatorDump_instantiate', function() {
     var containerDump = new ContainerMemoryDump(42);
@@ -37,48 +39,120 @@
     assert.equal(dump.quantifiedName, '\'v8/objects/object7\' in super dump');
   });
 
-  test('memoryAllocatorDumps_aggregateAttributes', function() {
+  test('memoryAllocatorDumps_aggregateNumericsRecursively', function() {
     var md = new ContainerMemoryDump(42);
 
-    var oilpanDump = new MemoryAllocatorDump(md, 'oilpan');
-    oilpanDump.addAttribute('objects_count', new ScalarAttribute('objects', 7));
+    var oilpanDump = newAllocatorDump(md, 'oilpan', {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 7)
+    });
 
-    var oilpanBucket1Dump = newChildDump(oilpanDump, 'bucket1');
-    oilpanBucket1Dump.addAttribute('size',
-        new ScalarAttribute('bytes', 512));
-    oilpanBucket1Dump.addAttribute('objects_count',
-        new ScalarAttribute('objects', 3));
-    oilpanBucket1Dump.addAttribute('inner_size',
-        new ScalarAttribute('bytes', 256));
+    newChildDump(oilpanDump, 'bucket1', {
+      size: 512,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 3),
+      inner_size: 256,
+      outer_size: 1024
+    });
 
     var oilpanBucket2Dump = newChildDump(oilpanDump, 'bucket2');
 
-    var oilpanBucket2StringsDump = newChildDump(oilpanBucket2Dump, 'strings');
-    oilpanBucket2StringsDump.addAttribute('size',
-        new ScalarAttribute('bytes', 512));
-    oilpanBucket2StringsDump.addAttribute('objects_count',
-        new ScalarAttribute('objects', 4));
-    oilpanBucket2StringsDump.addAttribute('inner_size',
-        new ScalarAttribute('bytes', 512));
+    var oilpanBucket2StringsDump = newChildDump(oilpanBucket2Dump, 'strings', {
+      size: 512,
+      objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 4),
+      inner_size: 512,
+      outer_size: 2048
+    });
 
-    oilpanDump.aggregateAttributes();
+    oilpanDump.aggregateNumericsRecursively();
 
-    // oilpan has *some* attributes aggregated.
-    assert.equal(oilpanDump.attributes['objects_count'].value, 7);
-    assert.equal(oilpanDump.attributes['inner_size'].value, 768);
-    assert.equal(oilpanDump.attributes['size'].value, 1024);
+    // oilpan has *some* numerics aggregated.
+    checkDumpNumericsAndDiagnostics(oilpanDump, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 7),
+      'inner_size': 768,
+      'outer_size': 3072
+    }, {});
 
-    // oilpan/bucket2 has *all* attributes aggregated.
-    assert.equal(oilpanBucket2Dump.attributes['objects_count'].value,
-        4);
-    assert.equal(oilpanBucket2Dump.attributes['inner_size'].value, 512);
-    assert.equal(oilpanBucket2Dump.attributes['size'].value, 512);
+    // oilpan/bucket2 has *all* numerics aggregated (except for size).
+    checkDumpNumericsAndDiagnostics(oilpanBucket2Dump, {
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 4),
+      'inner_size': 512,
+      'outer_size': 2048
+    }, {});
 
-    // oilpan/bucket2/strings has *no* attributes aggregated.
-    assert.equal(oilpanBucket2StringsDump.attributes[
-        'objects_count'].value, 4);
-    assert.equal(oilpanBucket2StringsDump.attributes['inner_size'].value, 512);
-    assert.equal(oilpanBucket2StringsDump.attributes['size'].value, 512);
+    // oilpan/bucket2/strings has *no* numerics aggregated.
+    checkDumpNumericsAndDiagnostics(oilpanBucket2StringsDump, {
+      'size': 512,
+      'objects_count': new ScalarNumeric(unitlessNumber_smallerIsBetter, 4),
+      'inner_size': 512,
+      'outer_size': 2048
+    }, {});
+  });
+
+  test('memoryAllocatorDump_aggregateNumerics', function() {
+    function checkAggregateNumerics(numerics, expectedValue, expectedUnit,
+        opt_expectedWarningCount) {
+      function checkResult(result) {
+          if (expectedValue === undefined) {
+          assert.isUndefined(result);
+          assert.isUndefined(expectedUnit);  // Test sanity check.
+        } else {
+          assert.instanceOf(result, ScalarNumeric);
+          assert.strictEqual(result.value, expectedValue);
+          assert.strictEqual(result.unit, expectedUnit);
+        }
+      }
+
+      // Without model parameter.
+      var result1 = MemoryAllocatorDump.aggregateNumerics(numerics);
+      checkResult(result1);
+
+      // With model parameter.
+      var mockModel = {
+        warnings: [],
+        importWarning: function(warning) {
+          this.warnings.push(warning);
+        }
+      };
+      var result2 = MemoryAllocatorDump.aggregateNumerics(numerics, mockModel);
+      checkResult(result2);
+      assert.lengthOf(mockModel.warnings, opt_expectedWarningCount || 0);
+    }
+
+    // No defined numerics.
+    checkAggregateNumerics([], undefined);
+    checkAggregateNumerics([undefined], undefined);
+    checkAggregateNumerics([undefined, undefined], undefined);
+
+    // Consistent units.
+    checkAggregateNumerics(
+        [new ScalarNumeric(unitlessNumber_smallerIsBetter, 10)],
+        10, unitlessNumber_smallerIsBetter);
+    checkAggregateNumerics(
+        [new ScalarNumeric(sizeInBytes, 10),
+         new ScalarNumeric(sizeInBytes, 20),
+         new ScalarNumeric(sizeInBytes, 40)],
+        70, sizeInBytes);
+    checkAggregateNumerics(
+        [undefined,
+         new ScalarNumeric(sizeInBytes, 16),
+         undefined,
+         new ScalarNumeric(sizeInBytes, 32),
+         undefined],
+        48, sizeInBytes);
+
+    // Inconsistent units.
+    checkAggregateNumerics(
+        [new ScalarNumeric(sizeInBytes, 10),
+         new ScalarNumeric(powerInWatts, 20)],
+        30, unitlessNumber_smallerIsBetter, 1 /* opt_expectedWarningCount */);
+    checkAggregateNumerics(
+        [undefined,
+         new ScalarNumeric(powerInWatts, 16),
+         undefined,
+         new ScalarNumeric(unitlessNumber_smallerIsBetter, 32),
+         undefined,
+         new ScalarNumeric(sizeInBytes, 64),
+         undefined],
+        112, unitlessNumber_smallerIsBetter, 1 /* opt_expectedWarningCount */);
   });
 
   test('memoryAllocatorDumps_isDescendantOf', function() {
diff --git a/catapult/tracing/tracing/model/memory_dump_test_utils.html b/catapult/tracing/tracing/model/memory_dump_test_utils.html
new file mode 100644
index 0000000..8acf29f
--- /dev/null
+++ b/catapult/tracing/tracing/model/memory_dump_test_utils.html
@@ -0,0 +1,124 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/vm_region.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Helper functions for tests involving memory dumps.
+ */
+tr.exportTo('tr.model', function() {
+  var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
+  var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
+  var VMRegion = tr.model.VMRegion;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+
+  function castToScalarNumeric(value) {
+    if (typeof value === 'number')
+      return new ScalarNumeric(sizeInBytes_smallerIsBetter, value);
+    assert.instanceOf(value, ScalarNumeric);
+    return value;
+  }
+
+  function MemoryDumpTestUtils() {
+    throw new Error('Static class');
+  }
+
+  MemoryDumpTestUtils.SIZE_DELTA = 0.0001;
+
+  MemoryDumpTestUtils.newAllocatorDump = function(
+      containerDump, fullName, opt_numerics, opt_guid) {
+    var dump = new MemoryAllocatorDump(containerDump, fullName, opt_guid);
+    if (opt_numerics !== undefined) {
+      tr.b.iterItems(opt_numerics, function(numericName, value) {
+        dump.addNumeric(numericName, castToScalarNumeric(value));
+      });
+    }
+    return dump;
+  };
+
+  MemoryDumpTestUtils.newChildDump =
+      function(parentDump, name, opt_numerics, opt_guid) {
+    var childDump = MemoryDumpTestUtils.newAllocatorDump(
+        parentDump.containerMemoryDump, parentDump.fullName + '/' + name,
+        opt_numerics, opt_guid);
+    childDump.parent = parentDump;
+    parentDump.children.push(childDump);
+    return childDump;
+  };
+
+  MemoryDumpTestUtils.addOwnershipLink = function(
+      ownerDump, ownedDump, opt_importance) {
+    assert.isUndefined(ownerDump.owns);  // Sanity check.
+    var ownershipLink =
+        new MemoryAllocatorDumpLink(ownerDump, ownedDump, opt_importance);
+    ownerDump.owns = ownershipLink;
+    ownedDump.ownedBy.push(ownershipLink);
+    return ownershipLink;
+  };
+
+  MemoryDumpTestUtils.checkDumpNumericsAndDiagnostics =
+      function(dump, expectedNumerics, expectedDiagnostics) {
+    var actualNumerics = dump.numerics;
+    assert.sameMembers(
+        Object.keys(actualNumerics), Object.keys(expectedNumerics));
+    for (var numericName in actualNumerics) {
+      var actualNumeric = actualNumerics[numericName];
+      var expectedNumeric = castToScalarNumeric(expectedNumerics[numericName]);
+      assert.instanceOf(actualNumeric, tr.v.ScalarNumeric);
+      assert.strictEqual(actualNumeric.unit, expectedNumeric.unit);
+      assert.closeTo(actualNumeric.value, expectedNumeric.value,
+          MemoryDumpTestUtils.SIZE_DELTA);
+    }
+
+    assert.deepEqual(dump.diagnostics, expectedDiagnostics);
+  };
+
+  MemoryDumpTestUtils.checkVMRegions = function(vmRegions, expectedRegions) {
+    if (vmRegions instanceof VMRegionClassificationNode)
+      vmRegions = vmRegions.allRegionsForTesting;
+
+    var expectedRegionsMap = new Map();
+    expectedRegions.forEach(function(region) {
+      if (!(region instanceof VMRegion))
+        region = VMRegion.fromDict(region);
+      expectedRegionsMap.set(region.uniqueIdWithinProcess, region);
+    });
+    var actualRegionsMap = new Map();
+    vmRegions.forEach(function(region) {
+      actualRegionsMap.set(region.uniqueIdWithinProcess, region);
+    });
+
+    assert.strictEqual(actualRegionsMap.size, expectedRegionsMap.size);
+    for (var id of expectedRegionsMap.keys()) {
+      var expectedRegion = expectedRegionsMap.get(id);
+      var actualRegion = actualRegionsMap.get(id);
+
+      assert.instanceOf(actualRegion, VMRegion);
+      assert.strictEqual(actualRegion.startAddress,
+          expectedRegion.startAddress);
+      assert.strictEqual(actualRegion.sizeInBytes, expectedRegion.sizeInBytes);
+      assert.strictEqual(actualRegion.protectionFlags,
+          expectedRegion.protectionFlags);
+      assert.strictEqual(actualRegion.mappedFile, expectedRegion.mappedFile);
+      assert.deepEqual(actualRegion.byteStats, expectedRegion.byteStats);
+    }
+  };
+
+  return {
+    MemoryDumpTestUtils: MemoryDumpTestUtils
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/model.html b/catapult/tracing/tracing/model/model.html
index ee213f7..cbbfc3c 100644
--- a/catapult/tracing/tracing/model/model.html
+++ b/catapult/tracing/tracing/model/model.html
@@ -8,25 +8,32 @@
 <link rel="import" href="/tracing/base/base.html">
 <link rel="import" href="/tracing/base/event.html">
 <link rel="import" href="/tracing/base/interval_tree.html">
+<link rel="import" href="/tracing/base/quad.html">
 <link rel="import" href="/tracing/base/range.html">
 <link rel="import" href="/tracing/base/task.html">
-<link rel="import" href="/tracing/base/units/units.html">
 <link rel="import" href="/tracing/core/auditor.html">
 <link rel="import" href="/tracing/core/filter.html">
 <link rel="import" href="/tracing/model/alert.html">
+<link rel="import" href="/tracing/model/clock_sync_manager.html">
+<link rel="import" href="/tracing/model/constants.html">
 <link rel="import" href="/tracing/model/device.html">
 <link rel="import" href="/tracing/model/flow_event.html">
 <link rel="import" href="/tracing/model/frame.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
 <link rel="import" href="/tracing/model/instant_event.html">
-<link rel="import" href="/tracing/model/interaction_record.html">
 <link rel="import" href="/tracing/model/kernel.html">
 <link rel="import" href="/tracing/model/model_indices.html">
+<link rel="import" href="/tracing/model/model_stats.html">
+<link rel="import" href="/tracing/model/object_snapshot.html">
 <link rel="import" href="/tracing/model/process.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
 <link rel="import" href="/tracing/model/sample.html">
 <link rel="import" href="/tracing/model/stack_frame.html">
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+<link rel="import" href="/tracing/model/user_model/user_model.html">
 <link rel="import" href="/tracing/ui/base/overlay.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -55,15 +62,8 @@
   var GlobalInstantEvent = tr.model.GlobalInstantEvent;
   var FlowEvent = tr.model.FlowEvent;
   var Alert = tr.model.Alert;
-  var InteractionRecord = tr.model.InteractionRecord;
   var Sample = tr.model.Sample;
 
-  function ClockSyncRecord(name, ts, args) {
-    this.name = name;
-    this.ts = ts;
-    this.args = args;
-  }
-
   /**
    * @constructor
    */
@@ -82,6 +82,9 @@
     this.categories = [];
     this.instantEvents = [];
     this.flowEvents = [];
+    this.clockSyncManager = new tr.model.ClockSyncManager();
+    // TODO(charliea): Remove clockSyncRecords once we move over to using the
+    // clockSyncManager.
     this.clockSyncRecords = [];
     this.intrinsicTimeUnit_ = undefined;
 
@@ -89,12 +92,9 @@
     this.samples = [];
 
     this.alerts = [];
-    this.interactionRecords = [];
+    this.userModel = new tr.model.um.UserModel(this);
 
-    this.flowIntervalTree = new tr.b.IntervalTree(
-        function(f) { return f.start; },
-        function(f) { return f.end; });
-
+    this.flowIntervalTree = new tr.b.IntervalTree((f) => f.start, (f) => f.end);
     this.globalMemoryDumps = [];
 
     this.userFriendlyCategoryDrivers_ = [];
@@ -102,15 +102,42 @@
     this.annotationsByGuid_ = {};
     this.modelIndices = undefined;
 
+    this.stats = new tr.model.ModelStats();
+
     this.importWarnings_ = [];
     this.reportedImportWarnings_ = {};
 
     this.isTimeHighResolution_ = undefined;
+
+    this.patchupsToApply_ = [];
+
+    this.doesHelperGUIDSupportThisModel_ = {};
+    this.helpersByConstructorGUID_ = {};
   }
 
   Model.prototype = {
     __proto__: tr.model.EventContainer.prototype,
 
+    getOrCreateHelper: function(constructor) {
+      if (!constructor.guid)
+        throw new Error('Helper constructors must have GUIDs');
+
+      if (this.helpersByConstructorGUID_[constructor.guid] === undefined) {
+        if (this.doesHelperGUIDSupportThisModel_[constructor.guid] ===
+            undefined) {
+          this.doesHelperGUIDSupportThisModel_[constructor.guid] =
+            constructor.supportsModel(this);
+        }
+
+        if (!this.doesHelperGUIDSupportThisModel_[constructor.guid])
+          return undefined;
+
+        this.helpersByConstructorGUID_[constructor.guid] = new constructor(
+            this);
+      }
+      return this.helpersByConstructorGUID_[constructor.guid];
+    },
+
     iterateAllEventsInThisContainer: function(eventTypePredicate,
                                               callback, opt_this) {
       if (eventTypePredicate.call(opt_this, GlobalMemoryDump))
@@ -125,14 +152,12 @@
       if (eventTypePredicate.call(opt_this, Alert))
         this.alerts.forEach(callback, opt_this);
 
-      if (eventTypePredicate.call(opt_this, InteractionRecord))
-        this.interactionRecords.forEach(callback, opt_this);
-
       if (eventTypePredicate.call(opt_this, Sample))
         this.samples.forEach(callback, opt_this);
     },
 
     iterateAllChildEventContainers: function(callback, opt_this) {
+      callback.call(opt_this, this.userModel);
       callback.call(opt_this, this.device);
       callback.call(opt_this, this.kernel);
       for (var pid in this.processes)
@@ -182,7 +207,7 @@
     convertTimestampToModelTime: function(sourceClockDomainName, ts) {
       if (sourceClockDomainName !== 'traceEventClock')
         throw new Error('Only traceEventClock is supported.');
-      return tr.b.u.Units.timestampFromUs(ts) +
+      return tr.v.Unit.timestampFromUs(ts) +
         this.timestampShiftToZeroAmount_;
     },
 
@@ -211,10 +236,6 @@
       return this.processes[pid];
     },
 
-    pushInstantEvent: function(instantEvent) {
-      this.instantEvents.push(instantEvent);
-    },
-
     addStackFrame: function(stackFrame) {
       if (this.stackFrames[stackFrame.id])
         throw new Error('Stack frame already exists');
@@ -222,14 +243,9 @@
       return stackFrame;
     },
 
-    addInteractionRecord: function(ir) {
-      this.interactionRecords.push(ir);
-      return ir;
-    },
-
-    getClockSyncRecordsNamed: function(name) {
+    getClockSyncRecordsWithSyncId: function(syncId) {
       return this.clockSyncRecords.filter(function(x) {
-        return x.name === name;
+        return x.syncId === syncId;
       });
     },
 
@@ -238,6 +254,7 @@
      */
     updateCategories_: function() {
       var categoriesDict = {};
+      this.userModel.addCategoriesToDict(categoriesDict);
       this.device.addCategoriesToDict(categoriesDict);
       this.kernel.addCategoriesToDict(categoriesDict);
       for (var pid in this.processes)
@@ -348,6 +365,10 @@
       return namedThreads;
     },
 
+    get importOptions() {
+      return this.importOptions_;
+    },
+
     set importOptions(options) {
       this.importOptions_ = options;
     },
@@ -358,7 +379,7 @@
      */
     get intrinsicTimeUnit() {
       if (this.intrinsicTimeUnit_ === undefined)
-        return tr.b.u.TimeDisplayModes.ms;
+        return tr.v.TimeDisplayModes.ms;
       return this.intrinsicTimeUnit_;
     },
 
@@ -385,6 +406,23 @@
     },
 
     /**
+     * Returns a link to a trace data file that this model was imported from.
+     * This is NOT the URL of a site being traced, but instead an indicator of
+     * where the data is stored.
+     */
+    get canonicalUrlThatCreatedThisTrace() {
+      return this.canonicalUrlThatCreatedThisTrace_;
+    },
+
+    set canonicalUrlThatCreatedThisTrace(value) {
+      if (this.canonicalUrlThatCreatedThisTrace_ === value)
+        return;
+      if (this.canonicalUrlThatCreatedThisTrace_ !== undefined)
+        throw new Error('canonicalUrlThatCreatedThisTrace already set');
+      this.canonicalUrlThatCreatedThisTrace_ = value;
+    },
+
+    /**
      * Saves a warning that happened during import.
      *
      * Warnings are typically logged to the console, and optionally, the
@@ -494,9 +532,9 @@
         this.processes[pid].sortMemoryDumps();
     },
 
-    calculateMemoryGraphAttributes: function() {
+    finalizeMemoryGraphs: function() {
       this.globalMemoryDumps.forEach(function(dump) {
-        dump.calculateGraphAttributes();
+        dump.finalizeGraph();
       });
     },
 
@@ -504,20 +542,38 @@
       this.modelIndices = new tr.model.ModelIndices(this);
     },
 
-    sortInteractionRecords: function() {
-      this.interactionRecords.sort(function(x, y) {
-        return x.start - y.start;
-      });
-    },
-
     sortAlerts: function() {
       this.alerts.sort(function(x, y) {
         return x.start - y.start;
       });
     },
 
+    applyObjectRefPatchups: function() {
+      // Change all the fields pointing at id_refs to their real values.
+      var unresolved = [];
+      this.patchupsToApply_.forEach(function(patchup) {
+        if (patchup.pidRef in this.processes) {
+          var snapshot = this.processes[patchup.pidRef].objects.getSnapshotAt(
+              patchup.scopedId, patchup.ts);
+          if (snapshot) {
+            patchup.object[patchup.field] = snapshot;
+            return;
+          }
+        }
+        unresolved.push(patchup);
+      }, this);
+      this.patchupsToApply_ = unresolved;
+    },
+
+    replacePIDRefsInPatchups: function(old_pid_ref, new_pid_ref) {
+      this.patchupsToApply_.forEach(function(patchup) {
+        if (patchup.pidRef === old_pid_ref)
+          patchup.pidRef = new_pid_ref;
+      });
+    },
+
     isTimeHighResolutionHeuristic_: function() {
-      if (this.intrinsicTimeUnit !== tr.b.u.TimeDisplayModes.ms)
+      if (this.intrinsicTimeUnit !== tr.v.TimeDisplayModes.ms)
         return false;
       // If the timer is only precise to the millisecond, then almost all event
       // will be precisely X ms apart. We check that by looking at the
@@ -546,11 +602,93 @@
       // If more than 90% of the events are snapped precisely on milliseconds
       // boundary we got a trace with a low resolution timer.
       return (maxEvents / nbEvents) < 0.9;
+    },
+
+    /**
+     * Called by the model to join references between objects, after final model
+     * bounds have been computed.
+     */
+    joinRefs: function() {
+      this.joinObjectRefs_();
+      this.applyObjectRefPatchups();
+    },
+
+    joinObjectRefs_: function() {
+      tr.b.iterItems(this.processes, function(pid, process) {
+        this.joinObjectRefsForProcess_(pid, process);
+      }, this);
+    },
+
+    joinObjectRefsForProcess_: function(pid, process) {
+      // Iterate the world, looking for id_refs
+      tr.b.iterItems(process.threads, function(tid, thread) {
+        thread.asyncSliceGroup.slices.forEach(function(item) {
+          this.searchItemForIDRefs_(pid, 'start', item);
+        }, this);
+        thread.sliceGroup.slices.forEach(function(item) {
+          this.searchItemForIDRefs_(pid, 'start', item);
+        }, this);
+      }, this);
+      process.objects.iterObjectInstances(function(instance) {
+        instance.snapshots.forEach(function(item) {
+          this.searchItemForIDRefs_(pid, 'ts', item);
+        }, this);
+      }, this);
+    },
+
+    searchItemForIDRefs_: function(pid, itemTimestampField, item) {
+      if (!item.args)
+        return;
+      var patchupsToApply = this.patchupsToApply_;
+
+      function handleField(object, fieldName, fieldValue) {
+        if (!fieldValue || (!fieldValue.id_ref && !fieldValue.idRef))
+          return;
+
+        var scope = fieldValue.scope || tr.model.OBJECT_DEFAULT_SCOPE;
+        var idRef = fieldValue.id_ref || fieldValue.idRef;
+        var scopedId = new tr.model.ScopedId(scope, idRef);
+        var pidRef = fieldValue.pid_ref || fieldValue.pidRef || pid;
+        var ts = item[itemTimestampField];
+        // We have to delay the actual change to the new value until after all
+        // refs have been located. Otherwise, we could end up recursing in
+        // ways we definitely didn't intend.
+        patchupsToApply.push({
+          object: object,
+          field: fieldName,
+          pidRef: pidRef,
+          scopedId: scopedId,
+          ts: ts});
+      }
+      function iterObjectFieldsRecursively(object) {
+        if (!(object instanceof Object))
+          return;
+
+        if ((object instanceof tr.model.ObjectSnapshot) ||
+            (object instanceof Float32Array) ||
+            (object instanceof tr.b.Quad))
+          return;
+
+        if (object instanceof Array) {
+          for (var i = 0; i < object.length; i++) {
+            handleField(object, i, object[i]);
+            iterObjectFieldsRecursively(object[i]);
+          }
+          return;
+        }
+
+        for (var key in object) {
+          var value = object[key];
+          handleField(object, key, value);
+          iterObjectFieldsRecursively(value);
+        }
+      }
+
+      iterObjectFieldsRecursively(item.args);
     }
   };
 
   return {
-    ClockSyncRecord: ClockSyncRecord,
     Model: Model
   };
 });
diff --git a/catapult/tracing/tracing/model/model_stats.html b/catapult/tracing/tracing/model/model_stats.html
new file mode 100644
index 0000000..fd5bf95
--- /dev/null
+++ b/catapult/tracing/tracing/model/model_stats.html
@@ -0,0 +1,89 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model', function() {
+
+  /**
+   * @constructor
+   */
+  function ModelStats() {
+    this.traceEventCountsByKey_ = new Map();
+    this.allTraceEventStats_ = [];
+
+    this.traceEventStatsInTimeIntervals_ = new Map();
+    this.allTraceEventStatsInTimeIntervals_ = [];
+
+    this.hasEventSizesinBytes_ = false;
+  }
+
+  ModelStats.prototype = {
+    TIME_INTERVAL_SIZE_IN_MS: 100,
+
+    willProcessBasicTraceEvent: function(phase, category, title, ts,
+                                         opt_eventSizeinBytes) {
+      var key = phase + '/' + category + '/' + title;
+      var eventStats = this.traceEventCountsByKey_.get(key);
+      if (eventStats === undefined) {
+        eventStats = {
+          phase: phase,
+          category: category,
+          title: title,
+          numEvents: 0,
+          totalEventSizeinBytes: 0
+        };
+        this.traceEventCountsByKey_.set(key, eventStats);
+        this.allTraceEventStats_.push(eventStats);
+      }
+      eventStats.numEvents++;
+
+      var timeIntervalKey = Math.floor(
+          tr.v.Unit.timestampFromUs(ts) / this.TIME_INTERVAL_SIZE_IN_MS);
+      var eventStatsByTimeInverval =
+          this.traceEventStatsInTimeIntervals_.get(timeIntervalKey);
+      if (eventStatsByTimeInverval === undefined) {
+        eventStatsByTimeInverval = {
+          timeInterval: timeIntervalKey,
+          numEvents: 0,
+          totalEventSizeinBytes: 0
+        };
+        this.traceEventStatsInTimeIntervals_.set(timeIntervalKey,
+                                                eventStatsByTimeInverval);
+        this.allTraceEventStatsInTimeIntervals_.push(eventStatsByTimeInverval);
+      }
+      eventStatsByTimeInverval.numEvents++;
+
+      if (opt_eventSizeinBytes !== undefined) {
+        this.hasEventSizesinBytes_ = true;
+        eventStats.totalEventSizeinBytes += opt_eventSizeinBytes;
+        eventStatsByTimeInverval.totalEventSizeinBytes += opt_eventSizeinBytes;
+      }
+    },
+
+    get allTraceEventStats() {
+      return this.allTraceEventStats_;
+    },
+
+    get allTraceEventStatsInTimeIntervals() {
+      return this.allTraceEventStatsInTimeIntervals_;
+    },
+
+    get hasEventSizesinBytes() {
+      return this.hasEventSizesinBytes_;
+    }
+  };
+
+  return {
+    ModelStats: ModelStats
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/model_stats_test.html b/catapult/tracing/tracing/model/model_stats_test.html
new file mode 100644
index 0000000..2a56318
--- /dev/null
+++ b/catapult/tracing/tracing/model/model_stats_test.html
@@ -0,0 +1,62 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/model/model_stats.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var ModelStats = tr.model.ModelStats;
+
+  test('getTraceEventStatsByCategory', function() {
+    var modelStats = new ModelStats();
+    modelStats.willProcessBasicTraceEvent('X', 'cat1', 'title1');
+    modelStats.willProcessBasicTraceEvent('X', 'cat1', 'title1');
+    modelStats.willProcessBasicTraceEvent('X', 'cat2', 'title3');
+
+    assert.equal(modelStats.allTraceEventStats.length, 2);
+    assert.equal(
+        modelStats.traceEventCountsByKey_.get('X/cat1/title1').numEvents,
+        2);
+    assert.equal(
+        modelStats.traceEventCountsByKey_.get('X/cat2/title3').numEvents,
+        1);
+  });
+
+  test('getTraceEventStatsInTimeIntervals', function() {
+    var modelStats = new ModelStats();
+    var timeIntervalSizeInUs = modelStats.TIME_INTERVAL_SIZE_IN_MS * 1000;
+    modelStats.willProcessBasicTraceEvent('X', 'cat1', 'title1', 1, 1);
+    modelStats.willProcessBasicTraceEvent(
+        'X', 'cat1', 'title1', timeIntervalSizeInUs + 1, 2);
+    modelStats.willProcessBasicTraceEvent(
+        'X', 'cat1', 'title1', 2 * timeIntervalSizeInUs + 1, 3);
+    modelStats.willProcessBasicTraceEvent(
+        'X', 'cat2', 'title3', 2 * timeIntervalSizeInUs + 2, 4);
+
+    assert.strictEqual(modelStats.allTraceEventStatsInTimeIntervals.length, 3);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(0).numEvents, 1);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(1).numEvents, 1);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(2).numEvents, 2);
+
+    assert.isTrue(modelStats.hasEventSizesinBytes);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(0).totalEventSizeinBytes,
+        1);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(1).totalEventSizeinBytes,
+        2);
+    assert.strictEqual(
+        modelStats.traceEventStatsInTimeIntervals_.get(2).totalEventSizeinBytes,
+        7);
+  });
+
+});
+</script>
diff --git a/catapult/tracing/tracing/model/model_test.html b/catapult/tracing/tracing/model/model_test.html
index eb2cf85..154b0b0 100644
--- a/catapult/tracing/tracing/model/model_test.html
+++ b/catapult/tracing/tracing/model/model_test.html
@@ -5,12 +5,12 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
 <link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/model/annotation.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
 
 <script>
 'use strict';
@@ -23,7 +23,7 @@
   var createModelWithOneOfEverything = function() {
     var m = new tr.Model();
     var cpu = m.kernel.getOrCreateCpu(1);
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
 
     var p = m.getOrCreateProcess(1);
     var t = p.getOrCreateThread(1);
@@ -77,6 +77,33 @@
     return m;
   };
 
+  test('helper', function() {
+    function Helper(model) {
+      this.model = model;
+    }
+    Helper.guid = tr.b.GUID.allocate();
+    Helper.supportsModel = function(model) {
+      return true;
+    };
+
+    var m = new tr.Model();
+    var h = m.getOrCreateHelper(Helper);
+    assert.isTrue(h instanceof Helper);
+    assert.isTrue(h === m.getOrCreateHelper(Helper));
+
+    function UnsupportedHelper(model) {
+      this.model = model;
+    }
+    UnsupportedHelper.guid = tr.b.GUID.allocate();
+    UnsupportedHelper.supportsModel = function(model) {
+      return false;
+    };
+
+    assert.isUndefined(m.getOrCreateHelper(UnsupportedHelper));
+    // Try again to test doesHelperGUIDSupportThisModel_ .
+    assert.isUndefined(m.getOrCreateHelper(UnsupportedHelper));
+  });
+
   test('modelBounds_EmptyModel', function() {
     var m = new tr.Model();
     m.updateBounds();
@@ -114,7 +141,7 @@
   test('modelBounds_OneCpu', function() {
     var m = new tr.Model();
     var cpu = m.kernel.getOrCreateCpu(1);
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
     m.updateBounds();
     assert.equal(m.bounds.min, 1);
     assert.equal(m.bounds.max, 4);
@@ -123,7 +150,7 @@
   test('modelBounds_OneCpuOneThread', function() {
     var m = new tr.Model();
     var cpu = m.kernel.getOrCreateCpu(1);
-    cpu.slices.push(tr.c.TestUtils.newSlice(1, 3));
+    cpu.slices.push(tr.c.TestUtils.newSliceEx({start: 1, duration: 3}));
 
     var t = m.getOrCreateProcess(1).getOrCreateThread(1);
     t.sliceGroup.pushSlice(new ThreadSlice('', 'a', 0, 1, {}, 4));
@@ -171,7 +198,7 @@
   });
 
   test('TitleOrCategoryFilter', function() {
-    var s0 = tr.c.TestUtils.newSlice(1, 3);
+    var s0 = tr.c.TestUtils.newSliceEx({start: 1, duration: 3});
     assert.isTrue(new TitleOrCategoryFilter('a').matchSlice(s0));
     assert.isFalse(new TitleOrCategoryFilter('x').matchSlice(s0));
 
@@ -236,7 +263,7 @@
   });
 
   test('model_intrinsicTimeUnit', function() {
-    var unit = tr.b.u.TimeDisplayModes;
+    var unit = tr.v.TimeDisplayModes;
     var m = new tr.Model();
 
     // by default it should be milliseconds
diff --git a/catapult/tracing/tracing/model/object_collection.html b/catapult/tracing/tracing/model/object_collection.html
index 43aa8b1..db2aafc 100644
--- a/catapult/tracing/tracing/model/object_collection.html
+++ b/catapult/tracing/tracing/model/object_collection.html
@@ -31,7 +31,8 @@
   function ObjectCollection(parent) {
     tr.model.EventContainer.call(this);
     this.parent = parent;
-    this.instanceMapsById_ = {}; // id -> TimeToObjectInstanceMap
+    // scope -> {id -> TimeToObjectInstanceMap}
+    this.instanceMapsByScopedId_ = {};
     this.instancesByTypeName_ = {};
     this.createObjectInstance_ = this.createObjectInstance_.bind(this);
   }
@@ -57,11 +58,11 @@
     },
 
     createObjectInstance_: function(
-        parent, id, category, name, creationTs, opt_baseTypeName) {
+        parent, scopedId, category, name, creationTs, opt_baseTypeName) {
       var constructor = tr.model.ObjectInstance.getConstructor(
           category, name);
       var instance = new constructor(
-          parent, id, category, name, creationTs, opt_baseTypeName);
+          parent, scopedId, category, name, creationTs, opt_baseTypeName);
       var typeName = instance.typeName;
       var instancesOfTypeName = this.instancesByTypeName_[typeName];
       if (!instancesOfTypeName) {
@@ -72,23 +73,31 @@
       return instance;
     },
 
-    getOrCreateInstanceMap_: function(id) {
-      var instanceMap = this.instanceMapsById_[id];
+    getOrCreateInstanceMap_: function(scopedId) {
+      var dict;
+      if (scopedId.scope in this.instanceMapsByScopedId_) {
+        dict = this.instanceMapsByScopedId_[scopedId.scope];
+      } else {
+        dict = {};
+        this.instanceMapsByScopedId_[scopedId.scope] = dict;
+      }
+      var instanceMap = dict[scopedId.id];
       if (instanceMap)
         return instanceMap;
       instanceMap = new tr.model.TimeToObjectInstanceMap(
-          this.createObjectInstance_, this.parent, id);
-      this.instanceMapsById_[id] = instanceMap;
+          this.createObjectInstance_, this.parent, scopedId);
+      dict[scopedId.id] = instanceMap;
       return instanceMap;
     },
 
-    idWasCreated: function(id, category, name, ts) {
-      var instanceMap = this.getOrCreateInstanceMap_(id);
+    idWasCreated: function(scopedId, category, name, ts) {
+      var instanceMap = this.getOrCreateInstanceMap_(scopedId);
       return instanceMap.idWasCreated(category, name, ts);
     },
 
-    addSnapshot: function(id, category, name, ts, args, opt_baseTypeName) {
-      var instanceMap = this.getOrCreateInstanceMap_(id);
+    addSnapshot: function(
+        scopedId, category, name, ts, args, opt_baseTypeName) {
+      var instanceMap = this.getOrCreateInstanceMap_(scopedId);
       var snapshot = instanceMap.addSnapshot(
           category, name, ts, args, opt_baseTypeName);
       if (snapshot.objectInstance.category != category) {
@@ -113,8 +122,8 @@
       return snapshot;
     },
 
-    idWasDeleted: function(id, category, name, ts) {
-      var instanceMap = this.getOrCreateInstanceMap_(id);
+    idWasDeleted: function(scopedId, category, name, ts) {
+      var instanceMap = this.getOrCreateInstanceMap_(scopedId);
       var deletedInstance = instanceMap.idWasDeleted(category, name, ts);
       if (!deletedInstance)
         return;
@@ -135,27 +144,31 @@
     },
 
     autoDeleteObjects: function(maxTimestamp) {
-      tr.b.iterItems(this.instanceMapsById_, function(id, i2imap) {
-        var lastInstance = i2imap.lastInstance;
-        if (lastInstance.deletionTs != Number.MAX_VALUE)
-          return;
-        i2imap.idWasDeleted(
-            lastInstance.category, lastInstance.name, maxTimestamp);
-        // idWasDeleted will cause lastInstance.deletionTsWasExplicit to be set
-        // to true. Unset it here.
-        lastInstance.deletionTsWasExplicit = false;
+      tr.b.iterItems(this.instanceMapsByScopedId_, function(scope, imapById) {
+        tr.b.iterItems(imapById, function(id, i2imap) {
+          var lastInstance = i2imap.lastInstance;
+          if (lastInstance.deletionTs != Number.MAX_VALUE)
+            return;
+          i2imap.idWasDeleted(
+              lastInstance.category, lastInstance.name, maxTimestamp);
+          // idWasDeleted will cause lastInstance.deletionTsWasExplicit to be
+          // set to true. Unset it here.
+          lastInstance.deletionTsWasExplicit = false;
+        });
       });
     },
 
-    getObjectInstanceAt: function(id, ts) {
-      var instanceMap = this.instanceMapsById_[id];
+    getObjectInstanceAt: function(scopedId, ts) {
+      var instanceMap;
+      if (scopedId.scope in this.instanceMapsByScopedId_)
+        instanceMap = this.instanceMapsByScopedId_[scopedId.scope][scopedId.id];
       if (!instanceMap)
         return undefined;
       return instanceMap.getInstanceAt(ts);
     },
 
-    getSnapshotAt: function(id, ts) {
-      var instance = this.getObjectInstanceAt(id, ts);
+    getSnapshotAt: function(scopedId, ts) {
+      var instance = this.getObjectInstanceAt(scopedId, ts);
       if (!instance)
         return undefined;
       return instance.getSnapshotAt(ts);
@@ -163,8 +176,10 @@
 
     iterObjectInstances: function(iter, opt_this) {
       opt_this = opt_this || this;
-      tr.b.iterItems(this.instanceMapsById_, function(id, i2imap) {
-        i2imap.instances.forEach(iter, opt_this);
+      tr.b.iterItems(this.instanceMapsByScopedId_, function(scope, imapById) {
+        tr.b.iterItems(imapById, function(id, i2imap) {
+          i2imap.instances.forEach(iter, opt_this);
+        });
       });
     },
 
diff --git a/catapult/tracing/tracing/model/object_collection_test.html b/catapult/tracing/tracing/model/object_collection_test.html
index b9503db..7137f89 100644
--- a/catapult/tracing/tracing/model/object_collection_test.html
+++ b/catapult/tracing/tracing/model/object_collection_test.html
@@ -7,14 +7,16 @@
 
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/object_collection.html">
+<link rel="import" href="/tracing/model/scoped_id.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var TestObjectInstance = function(parent, id, category, name, creationTs) {
+  var TestObjectInstance = function(
+      parent, scopedId, category, name, creationTs) {
     tr.model.ObjectInstance.call(
-        this, parent, id, category, name, creationTs);
+        this, parent, scopedId, category, name, creationTs);
   };
 
   TestObjectInstance.prototype = {
@@ -29,20 +31,21 @@
 
     try {
       var collection = new tr.model.ObjectCollection({ });
+      var scopedId = new tr.model.ScopedId('ptr', '0x1000');
       collection.idWasCreated(
-          '0x1000', 'tr.e.cc', 'Frame', 10);
+          scopedId, 'tr.e.cc', 'Frame', 10);
       collection.idWasDeleted(
-          '0x1000', 'tr.e.cc', 'Frame', 15);
+          scopedId, 'tr.e.cc', 'Frame', 15);
       collection.idWasCreated(
-          '0x1000', 'skia', 'TestObject', 20);
+          scopedId, 'skia', 'TestObject', 20);
       collection.idWasDeleted(
-          '0x1000', 'skia', 'TestObject', 25);
+          scopedId, 'skia', 'TestObject', 25);
 
-      var testFrame = collection.getObjectInstanceAt('0x1000', 10);
+      var testFrame = collection.getObjectInstanceAt(scopedId, 10);
       assert.instanceOf(testFrame, tr.model.ObjectInstance);
       assert.notInstanceOf(testFrame, TestObjectInstance);
 
-      var testObject = collection.getObjectInstanceAt('0x1000', 20);
+      var testObject = collection.getObjectInstanceAt(scopedId, 20);
       assert.instanceOf(testObject, tr.model.ObjectInstance);
       assert.instanceOf(testObject, TestObjectInstance);
     } finally {
@@ -52,51 +55,53 @@
 
   test('twoSnapshots', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasCreated(
-        '0x1000', 'cat', 'Frame', 10);
+        scopedId, 'cat', 'Frame', 10);
     collection.addSnapshot(
-        '0x1000', 'cat', 'Frame', 10, {foo: 1});
+        scopedId, 'cat', 'Frame', 10, {foo: 1});
     collection.addSnapshot(
-        '0x1000', 'cat', 'Frame', 20, {foo: 2});
+        scopedId, 'cat', 'Frame', 20, {foo: 2});
 
     collection.updateBounds();
     assert.equal(collection.bounds.min, 10);
     assert.equal(collection.bounds.max, 20);
 
-    var s0 = collection.getSnapshotAt('0x1000', 1);
+    var s0 = collection.getSnapshotAt(scopedId, 1);
     assert.isUndefined(s0);
 
-    var s1 = collection.getSnapshotAt('0x1000', 10);
+    var s1 = collection.getSnapshotAt(scopedId, 10);
     assert.equal(s1.args.foo, 1);
 
-    var s2 = collection.getSnapshotAt('0x1000', 15);
+    var s2 = collection.getSnapshotAt(scopedId, 15);
     assert.equal(s2.args.foo, 1);
     assert.equal(s1, s2);
 
-    var s3 = collection.getSnapshotAt('0x1000', 20);
+    var s3 = collection.getSnapshotAt(scopedId, 20);
     assert.equal(s3.args.foo, 2);
     assert.equal(s1.object, s3.object);
 
-    var s4 = collection.getSnapshotAt('0x1000', 25);
+    var s4 = collection.getSnapshotAt(scopedId, 25);
     assert.equal(s4, s3);
   });
 
   test('twoObjectsSharingOneID', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasCreated(
-        '0x1000', 'tr.e.cc', 'Frame', 10);
+        scopedId, 'tr.e.cc', 'Frame', 10);
     collection.idWasDeleted(
-        '0x1000', 'tr.e.cc', 'Frame', 15);
+        scopedId, 'tr.e.cc', 'Frame', 15);
     collection.idWasCreated(
-        '0x1000', 'skia', 'Picture', 20);
+        scopedId, 'skia', 'Picture', 20);
     collection.idWasDeleted(
-        '0x1000', 'skia', 'Picture', 25);
+        scopedId, 'skia', 'Picture', 25);
 
-    var frame = collection.getObjectInstanceAt('0x1000', 10);
+    var frame = collection.getObjectInstanceAt(scopedId, 10);
     assert.equal(frame.category, 'tr.e.cc');
     assert.equal(frame.name, 'Frame');
 
-    var picture = collection.getObjectInstanceAt('0x1000', 20);
+    var picture = collection.getObjectInstanceAt(scopedId, 20);
     assert.equal(picture.category, 'skia');
     assert.equal(picture.name, 'Picture');
 
@@ -115,18 +120,19 @@
 
   test('createSnapDelete', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasCreated(
-        '0x1000', 'cat', 'Frame', 10);
+        scopedId, 'cat', 'Frame', 10);
     collection.addSnapshot(
-        '0x1000', 'cat', 'Frame', 10, {foo: 1});
+        scopedId, 'cat', 'Frame', 10, {foo: 1});
     collection.idWasDeleted(
-        '0x1000', 'cat', 'Frame', 15);
+        scopedId, 'cat', 'Frame', 15);
 
     collection.updateBounds();
     assert.equal(collection.bounds.min, 10);
     assert.equal(collection.bounds.max, 15);
 
-    var s10 = collection.getSnapshotAt('0x1000', 10);
+    var s10 = collection.getSnapshotAt(scopedId, 10);
     var i10 = s10.objectInstance;
     assert.equal(i10.creationTs, 10);
     assert.equal(i10.deletionTs, 15);
@@ -134,10 +140,11 @@
 
   test('boundsOnUndeletedObject', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasCreated(
-        '0x1000', 'cat', 'Frame', 10);
+        scopedId, 'cat', 'Frame', 10);
     collection.addSnapshot(
-        '0x1000', 'cat', 'Frame', 15, {foo: 1});
+        scopedId, 'cat', 'Frame', 15, {foo: 1});
 
     collection.updateBounds();
     assert.equal(10, collection.bounds.min);
@@ -146,10 +153,11 @@
 
   test('snapshotWithCustomBaseTypeThenDelete', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     var s10 = collection.addSnapshot(
-        '0x1000', 'cat', 'cc::PictureLayerImpl', 10, {}, 'cc::LayerImpl');
+        scopedId, 'cat', 'cc::PictureLayerImpl', 10, {}, 'cc::LayerImpl');
     collection.idWasDeleted(
-        '0x1000', 'cat', 'cc::LayerImpl', 15);
+        scopedId, 'cat', 'cc::LayerImpl', 15);
     collection.updateBounds();
     assert.equal(10, collection.bounds.min);
     assert.equal(15, collection.bounds.max);
@@ -159,10 +167,11 @@
 
   test('newWithSnapshotThatChangesBaseType', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     var i10 = collection.idWasCreated(
-        '0x1000', 'cat', 'cc::LayerImpl', 10);
+        scopedId, 'cat', 'cc::LayerImpl', 10);
     var s15 = collection.addSnapshot(
-        '0x1000', 'cat', 'cc::PictureLayerImpl', 15, {}, 'cc::LayerImpl');
+        scopedId, 'cat', 'cc::PictureLayerImpl', 15, {}, 'cc::LayerImpl');
     collection.updateBounds();
     assert.equal(10, collection.bounds.min);
     assert.equal(15, collection.bounds.max);
@@ -173,10 +182,11 @@
 
   test('deleteThenSnapshotWithCustomBase', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasDeleted(
-        '0x1000', 'cat', 'cc::LayerImpl', 10);
+        scopedId, 'cat', 'cc::LayerImpl', 10);
     var s15 = collection.addSnapshot(
-        '0x1000', 'cat', 'cc::PictureLayerImpl', 15, {}, 'cc::LayerImpl');
+        scopedId, 'cat', 'cc::PictureLayerImpl', 15, {}, 'cc::LayerImpl');
     collection.updateBounds();
     assert.equal(10, collection.bounds.min);
     assert.equal(15, collection.bounds.max);
@@ -185,16 +195,37 @@
 
   test('autoDelete', function() {
     var collection = new tr.model.ObjectCollection({});
+    var scopedId = new tr.model.ScopedId('ptr', '0x1000');
     collection.idWasCreated(
-        '0x1000', 'cat', 'Frame', 10);
+        scopedId, 'cat', 'Frame', 10);
     collection.addSnapshot(
-        '0x1000', 'cat', 'Frame', 10, {foo: 1});
+        scopedId, 'cat', 'Frame', 10, {foo: 1});
     collection.autoDeleteObjects(15);
 
-    var s10 = collection.getSnapshotAt('0x1000', 10);
+    var s10 = collection.getSnapshotAt(scopedId, 10);
     var i10 = s10.objectInstance;
     assert.equal(15, i10.deletionTs);
   });
+
+  test('differentScopes', function() {
+    var collection = new tr.model.ObjectCollection({});
+    var scopedId1 = new tr.model.ScopedId('ptr', '0x1000');
+    var scopedId2 = new tr.model.ScopedId('cc', '0x1000');
+    collection.idWasCreated(
+        scopedId1, 'cat', 'ptr::object', 10);
+    collection.idWasDeleted(
+        scopedId1, 'cat', 'ptr::object', 15);
+    collection.idWasCreated(
+        scopedId2, 'cat', 'cc::object', 10);
+    collection.idWasDeleted(
+        scopedId2, 'cat', 'cc::object', 15);
+
+    var instance = collection.getObjectInstanceAt(scopedId1, 10);
+    assert.equal(instance.name, 'ptr::object');
+
+    var instance = collection.getObjectInstanceAt(scopedId2, 10);
+    assert.equal(instance.name, 'cc::object');
+  });
 });
 </script>
 
diff --git a/catapult/tracing/tracing/model/object_instance.html b/catapult/tracing/tracing/model/object_instance.html
index 056ed8b..cfc1356 100644
--- a/catapult/tracing/tracing/model/object_instance.html
+++ b/catapult/tracing/tracing/model/object_instance.html
@@ -27,10 +27,10 @@
    * @constructor
    */
   function ObjectInstance(
-      parent, id, category, name, creationTs, opt_baseTypeName) {
+      parent, scopedId, category, name, creationTs, opt_baseTypeName) {
     tr.model.Event.call(this);
     this.parent = parent;
-    this.id = id;
+    this.scopedId = scopedId;
     this.category = category;
     this.baseTypeName = opt_baseTypeName ? opt_baseTypeName : name;
     this.name = name;
@@ -171,7 +171,7 @@
     },
 
     get userFriendlyName() {
-      return this.typeName + ' object ' + this.id;
+      return this.typeName + ' object ' + this.scopedId;
     }
   };
 
diff --git a/catapult/tracing/tracing/model/object_snapshot.html b/catapult/tracing/tracing/model/object_snapshot.html
index 54d868a..6b0e590 100644
--- a/catapult/tracing/tracing/model/object_snapshot.html
+++ b/catapult/tracing/tracing/model/object_snapshot.html
@@ -6,8 +6,8 @@
 -->
 
 <link rel="import" href="/tracing/base/extension_registry.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -65,7 +65,7 @@
       return 'Snapshot of ' +
              this.objectInstance.typeName + ' ' +
              this.objectInstance.id + ' @ ' +
-             tr.b.u.TimeStamp.format(this.ts);
+             tr.v.Unit.byName.timeStampInMs.format(this.ts);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/power_series.html b/catapult/tracing/tracing/model/power_series.html
index a9fbccd..794f73b 100644
--- a/catapult/tracing/tracing/model/power_series.html
+++ b/catapult/tracing/tracing/model/power_series.html
@@ -73,8 +73,13 @@
 
         var timeIntersection = measurementRange.findIntersection(sampleRange);
 
+        // Divide by 1000 to convert milliwatts to watts.
+        var powerInWatts = sample.power / 1000.0;
+
         // Divide by 1000 to convert milliseconds to seconds.
-        energyConsumed += timeIntersection.duration / 1000 * sample.power;
+        var durationInSeconds = timeIntersection.duration / 1000;
+
+        energyConsumed += durationInSeconds * powerInWatts;
       }
 
       return energyConsumed;
diff --git a/catapult/tracing/tracing/model/power_series_test.html b/catapult/tracing/tracing/model/power_series_test.html
index cebb20f..a0bfa90 100644
--- a/catapult/tracing/tracing/model/power_series_test.html
+++ b/catapult/tracing/tracing/model/power_series_test.html
@@ -47,7 +47,7 @@
     series.addPowerSample(0, 1);
     series.addPowerSample(1000, 2);
 
-    assert.equal(series.getEnergyConsumed(0, 1000), 1);
+    assert.equal(series.getEnergyConsumed(0, 1000), 0.001);
   });
 
   test('getEnergyConsumed_twoSamples', function() {
@@ -55,14 +55,14 @@
     series.addPowerSample(0, 1);
     series.addPowerSample(1000, 2);
 
-    assert.equal(series.getEnergyConsumed(0, 2000), 3);
+    assert.equal(series.getEnergyConsumed(0, 2000), 0.003);
   });
 
   test('getEnergyConsumed_firstSampleAfterStart', function() {
     var series = new PowerSeries(new Model().device);
     series.addPowerSample(1000, 1);
 
-    assert.equal(series.getEnergyConsumed(0, 2000), 1);
+    assert.equal(series.getEnergyConsumed(0, 2000), 0.001);
   });
 
   test('getEnergyConsumed_extraSamplesBeforeStart', function() {
@@ -72,7 +72,7 @@
     series.addPowerSample(2000, 1);
     series.addPowerSample(3000, 1);
 
-    assert.equal(series.getEnergyConsumed(2000, 4000), 2);
+    assert.equal(series.getEnergyConsumed(2000, 4000), 0.002);
   });
 
   test('getEnergyConsumed_extraSamplesAfterEnd', function() {
@@ -82,7 +82,7 @@
     series.addPowerSample(2000, 1);
     series.addPowerSample(3000, 10);
 
-    assert.equal(series.getEnergyConsumed(0, 2000), 2);
+    assert.equal(series.getEnergyConsumed(0, 2000), 0.002);
   });
 
   test('shiftTimestampsForward', function() {
diff --git a/catapult/tracing/tracing/model/process.html b/catapult/tracing/tracing/model/process.html
index 59fc82f..e2252df 100644
--- a/catapult/tracing/tracing/model/process.html
+++ b/catapult/tracing/tracing/model/process.html
@@ -88,10 +88,6 @@
         this.memoryDumps.forEach(callback, opt_this);
     },
 
-    pushInstantEvent: function(instantEvent) {
-      this.instantEvents.push(instantEvent);
-    },
-
     addLabelIfNeeded: function(labelName) {
       for (var i = 0; i < this.labels.length; i++) {
         if (this.labels[i] === labelName)
@@ -126,8 +122,8 @@
     },
 
     shiftTimestampsForward: function(amount) {
-      for (var id in this.instantEvents)
-        this.instantEvents[id].start += amount;
+      for (var i = 0; i < this.instantEvents.length; i++)
+        this.instantEvents[i].start += amount;
 
       for (var i = 0; i < this.frames.length; i++)
         this.frames[i].shiftTimestampsForward(amount);
diff --git a/catapult/tracing/tracing/model/process_memory_dump.html b/catapult/tracing/tracing/model/process_memory_dump.html
index aa993cb..c9d6071 100644
--- a/catapult/tracing/tracing/model/process_memory_dump.html
+++ b/catapult/tracing/tracing/model/process_memory_dump.html
@@ -5,10 +5,10 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/container_memory_dump.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/vm_region.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -26,16 +26,15 @@
   // winheap/malloc allocator dump tree.
   var TRACING_OVERHEAD_PATH = ['allocated_objects', 'tracing_overhead'];
 
-  var SIZE_ATTRIBUTE_NAME = tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME;
-  var RESIDENT_SIZE_ATTRIBUTE_NAME =
-      tr.model.MemoryAllocatorDump.RESIDENT_SIZE_ATTRIBUTE_NAME;
+  var SIZE_NUMERIC_NAME = tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME;
+  var RESIDENT_SIZE_NUMERIC_NAME =
+      tr.model.MemoryAllocatorDump.RESIDENT_SIZE_NUMERIC_NAME;
 
-  function getSizeAttrValue(dump, sizeAttrName, opt_model) {
-    var sizeAttr = dump.getValidSizeAttributeOrUndefined(
-        sizeAttrName, opt_model);
-    if (sizeAttr === undefined)
+  function getSizeNumericValue(dump, sizeNumericName) {
+    var sizeNumeric = dump.numerics[sizeNumericName];
+    if (sizeNumeric === undefined)
       return 0;
-    return sizeAttr.value;
+    return sizeNumeric.value;
   }
 
   /**
@@ -57,7 +56,7 @@
     //     (number)
     this.totals = undefined;
 
-    this.vmRegions_ = undefined;
+    this.vmRegions = undefined;
 
     // Map from allocator names to heap dumps.
     this.heapDumps = undefined;
@@ -71,7 +70,7 @@
 
     get userFriendlyName() {
       return 'Process memory dump at ' +
-          tr.b.u.TimeStamp.format(this.start);
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
     },
 
     get containerName() {
@@ -84,30 +83,8 @@
       return dumps;
     },
 
-    get vmRegions() {
-      return this.vmRegions_;
-    },
-
-    set vmRegions(vmRegions) {
-      this.vmRegions_ = vmRegions;
-    },
-
     get hasOwnVmRegions() {
-      return this.vmRegions_ !== undefined;
-    },
-
-    getMostRecentTotalVmRegionStat: function(statName) {
-      if (this.mostRecentVmRegions === undefined)
-        return undefined;
-
-      var total = 0;
-      this.mostRecentVmRegions.forEach(function(vmRegion) {
-        var statValue = vmRegion.byteStats[statName];
-        if (statValue === undefined)
-          return;
-        total += statValue;
-      });
-      return total;
+      return this.vmRegions !== undefined;
     },
 
     setUpTracingOverheadOwnership: function(opt_model) {
@@ -181,7 +158,7 @@
       // Force rebuilding the memory allocator dump index (if we've just added
       // a new memory allocator dump).
       if (hasDiscountedFromAllocatorDumps)
-        this.memoryAllocatorDumps = this.memoryAllocatorDumps;
+        this.forceRebuildingMemoryAllocatorDumpByFullNameIndex();
     },
 
     discountTracingOverheadFromVmRegions: function(opt_model) {
@@ -195,9 +172,10 @@
       if (tracingDump === undefined)
         return;
 
-      var discountedSize = getSizeAttrValue(tracingDump, SIZE_ATTRIBUTE_NAME);
+      var discountedSize =
+          getSizeNumericValue(tracingDump, SIZE_NUMERIC_NAME);
       var discountedResidentSize =
-          getSizeAttrValue(tracingDump, RESIDENT_SIZE_ATTRIBUTE_NAME);
+          getSizeNumericValue(tracingDump, RESIDENT_SIZE_NUMERIC_NAME);
 
       if (discountedSize <= 0 && discountedResidentSize <= 0)
         return;
@@ -214,40 +192,25 @@
       // tracing resident_size from byte stats (private dirty and PSS) and
       // tracing size from virtual size by injecting a fake VM region with
       // negative values.
-      if (this.vmRegions_ !== undefined) {
-        var hasSizeInBytes = false;
-        var hasPrivateDirtyResident = false;
-        var hasProportionalResident = false;
-
-        for (var i = 0; i < this.vmRegions_.length; i++) {
-          var vmRegion = this.vmRegions_[i];
-          if (vmRegion.sizeInBytes !== undefined)
-            hasSizeInBytes = true;
-
-          var byteStats = vmRegion.byteStats;
-          if (byteStats.privateDirtyResident !== undefined)
-            hasPrivateDirtyResident = true;
-          if (byteStats.proportionalResident !== undefined)
-            hasProportionalResident = true;
-
-          if (hasSizeInBytes && hasPrivateDirtyResident &&
-              hasProportionalResident) {
-            break;
-          }
-        }
+      if (this.vmRegions !== undefined) {
+        var hasSizeInBytes = this.vmRegions.sizeInBytes !== undefined;
+        var hasPrivateDirtyResident =
+            this.vmRegions.byteStats.privateDirtyResident !== undefined;
+        var hasProportionalResident =
+            this.vmRegions.byteStats.proportionalResident !== undefined;
 
         if ((hasSizeInBytes && discountedSize > 0) ||
             ((hasPrivateDirtyResident || hasProportionalResident) &&
                 discountedResidentSize > 0)) {
-          this.vmRegions_.push(VMRegion.fromDict({
+          var byteStats = {};
+          if (hasPrivateDirtyResident)
+            byteStats.privateDirtyResident = -discountedResidentSize;
+          if (hasProportionalResident)
+            byteStats.proportionalResident = -discountedResidentSize;
+          this.vmRegions.addRegion(tr.model.VMRegion.fromDict({
             mappedFile: '[discounted tracing overhead]',
             sizeInBytes: hasSizeInBytes ? -discountedSize : undefined,
-            byteStats: {
-              privateDirtyResident: hasPrivateDirtyResident ?
-                  -discountedResidentSize : undefined,
-              proportionalResident: hasProportionalResident ?
-                  -discountedResidentSize : undefined
-            }
+            byteStats: byteStats
           }));
         }
       }
@@ -259,75 +222,14 @@
 
     processDumps.forEach(function(processDump) {
       // Update the most recent VM regions from the current dump.
-      if (processDump.vmRegions_ !== undefined)
-        mostRecentVmRegions = processDump.vmRegions_;
+      if (processDump.vmRegions !== undefined)
+        mostRecentVmRegions = processDump.vmRegions;
 
       // Set the most recent VM regions of the current dump.
       processDump.mostRecentVmRegions = mostRecentVmRegions;
     });
   };
 
-  /**
-   * @constructor
-   */
-  function VMRegion(startAddress, sizeInBytes, protectionFlags,
-      mappedFile, byteStats) {
-    this.startAddress = startAddress;
-    this.sizeInBytes = sizeInBytes;
-    this.protectionFlags = protectionFlags;
-    this.mappedFile = mappedFile;
-    this.byteStats = byteStats;
-  };
-
-  VMRegion.PROTECTION_FLAG_READ = 4;
-  VMRegion.PROTECTION_FLAG_WRITE = 2;
-  VMRegion.PROTECTION_FLAG_EXECUTE = 1;
-
-  VMRegion.prototype = {
-    get protectionFlagsToString() {
-      if (this.protectionFlags === undefined)
-        return undefined;
-      return (
-          (this.protectionFlags & VMRegion.PROTECTION_FLAG_READ ? 'r' : '-') +
-          (this.protectionFlags & VMRegion.PROTECTION_FLAG_WRITE ? 'w' : '-') +
-          (this.protectionFlags & VMRegion.PROTECTION_FLAG_EXECUTE ? 'x' : '-')
-      );
-    }
-  };
-
-  VMRegion.fromDict = function(dict) {
-    return new VMRegion(
-        dict.startAddress,
-        dict.sizeInBytes,
-        dict.protectionFlags,
-        dict.mappedFile,
-        VMRegionByteStats.fromDict(dict.byteStats));
-  };
-
-  /**
-   * @constructor
-   */
-  function VMRegionByteStats(privateCleanResident, privateDirtyResident,
-                             sharedCleanResident, sharedDirtyResident,
-                             proportionalResident, swapped) {
-    this.privateCleanResident = privateCleanResident;
-    this.privateDirtyResident = privateDirtyResident;
-    this.sharedCleanResident = sharedCleanResident;
-    this.sharedDirtyResident = sharedDirtyResident;
-    this.proportionalResident = proportionalResident;
-    this.swapped = swapped;
-  }
-
-  VMRegionByteStats.fromDict = function(dict) {
-    return new VMRegionByteStats(
-        dict.privateCleanResident,
-        dict.privateDirtyResident,
-        dict.sharedCleanResident,
-        dict.sharedDirtyResident,
-        dict.proportionalResident,
-        dict.swapped);
-  }
-
   tr.model.EventRegistry.register(
       ProcessMemoryDump,
       {
@@ -338,9 +240,7 @@
       });
 
   return {
-    ProcessMemoryDump: ProcessMemoryDump,
-    VMRegion: VMRegion,
-    VMRegionByteStats: VMRegionByteStats
+    ProcessMemoryDump: ProcessMemoryDump
   };
 });
 </script>
diff --git a/catapult/tracing/tracing/model/process_memory_dump_test.html b/catapult/tracing/tracing/model/process_memory_dump_test.html
index f222af4..771c593 100644
--- a/catapult/tracing/tracing/model/process_memory_dump_test.html
+++ b/catapult/tracing/tracing/model/process_memory_dump_test.html
@@ -6,11 +6,13 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
-<link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
-<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
+<link rel="import" href="/tracing/model/vm_region.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -18,10 +20,29 @@
 tr.b.unittest.testSuite(function() {
   var GlobalMemoryDump = tr.model.GlobalMemoryDump;
   var ProcessMemoryDump = tr.model.ProcessMemoryDump;
-  var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
   var VMRegion = tr.model.VMRegion;
-  var ScalarAttribute = tr.model.ScalarAttribute;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var newChildDump = tr.model.MemoryDumpTestUtils.newChildDump;
+  var addOwnershipLink = tr.model.MemoryDumpTestUtils.addOwnershipLink;
+  var checkDumpNumericsAndDiagnostics =
+      tr.model.MemoryDumpTestUtils.checkDumpNumericsAndDiagnostics;
+  var checkVMRegions = tr.model.MemoryDumpTestUtils.checkVMRegions;
+
+  function createClassificationNode(opt_sizeInBytes, opt_byteStats) {
+    var node = new VMRegionClassificationNode();
+    if (opt_sizeInBytes !== undefined || opt_byteStats !== undefined) {
+      node.addRegion(VMRegion.fromDict({
+        mappedFile: 'mock.so',
+        sizeInBytes: opt_sizeInBytes,
+        byteStats: opt_byteStats
+      }));
+    }
+    return node;
+  }
 
   function createProcessMemoryDump(timestamp, model) {
     var gmd = new GlobalMemoryDump(model, timestamp);
@@ -52,21 +73,6 @@
     return pmds;
   }
 
-  function checkProtectionFlagsToString(protectionFlags, expectedString) {
-    var vmRegion = VMRegion.fromDict({
-      startAddress: 256,
-      sizeInBytes: 336,
-      protectionFlags: protectionFlags,
-      mappedFile: '[stack:20310]',
-      byteStats: {
-        privateDirtyResident: 96,
-        swapped: 144,
-        proportionalResident: 158
-      }
-    });
-    assert.strictEqual(vmRegion.protectionFlagsToString, expectedString);
-  }
-
   test('processMemoryDumps', function() {
     var pmd = createFinalizedProcessMemoryDump(42);
     var pmds = pmd.processMemoryDumps;
@@ -74,86 +80,6 @@
     assert.strictEqual(pmds[123], pmd);
   });
 
-  test('totalResidentSizeInBytes_undefinedVmRegions', function() {
-    var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {});
-    assert.isUndefined(pmd.mostRecentTotalProportionalResidentSizeInBytes);
-    assert.isUndefined(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'));
-    assert.isUndefined(
-        pmd.getMostRecentTotalVmRegionStat('privateCleanResident'));
-  });
-
-  test('totalResidentSizeInBytes_zeroVmRegions', function() {
-    var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [];
-    });
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('proportionalResident'), 0);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'), 0);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('privateCleanResident'), 0);
-  });
-
-  test('totalResidentSizeInBytes_oneVmRegion', function() {
-    var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          startAddress: 256,
-          sizeInBytes: 336,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_WRITE,
-          mappedFile: '[stack:20310]',
-          byteStats: {
-            privateDirtyResident: 96,
-            swapped: 144,
-            proportionalResident: 158
-          }
-        })
-      ];
-    });
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'), 158);
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'), 96);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('privateCleanResident'), 0);
-  });
-
-  test('totalResidentSizeInBytes_twoVmRegions', function() {
-    var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          startAddress: 256,
-          sizeInBytes: 336,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_WRITE,
-          mappedFile: '[stack:20310]',
-          byteStats: {
-            privateDirtyResident: 96,
-            swapped: 144,
-            proportionalResident: 158
-          }
-        }),
-        VMRegion.fromDict({
-          startAddress: 848,
-          sizeInBytes: 592,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_EXECUTE,
-          mappedFile: '/dev/ashmem/dalvik',
-          byteStats: {
-            privateDirtyResident: 205,
-            privateCleanResident: 0,
-            proportionalResident: 205
-          }
-        })
-      ];
-    });
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'), 363);
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'), 301);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('swapped'), 144);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('privateCleanResident'), 0);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('sharedCleanResident'), 0);
-  });
-
   test('hookUpMostRecentVmRegionsLinks_emptyArray', function() {
     var dumps = [];
     ProcessMemoryDump.hookUpMostRecentVmRegionsLinks(dumps);
@@ -168,55 +94,38 @@
 
     // A dump with VM regions and malloc and Oilpan allocator dumps.
     var dump2 = createProcessMemoryDump(2, m);
-    dump2.vmRegions = [];
-    dump2.memoryAllocatorDumps = (function() {
-      var oilpanDump = new MemoryAllocatorDump('oilpan');
-      oilpanDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-      oilpanDump.addAttribute('objects_count',
-          new ScalarAttribute('objects', 7));
-      oilpanDump.addAttribute('inner_size', new ScalarAttribute('bytes', 768));
-
-      var v8Dump = new MemoryAllocatorDump('v8');
-      v8Dump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-      v8Dump.addAttribute('objects_count', new ScalarAttribute('objects', 15));
-      v8Dump.addAttribute('inner_size', new ScalarAttribute('bytes', 1999));
-
-      return [oilpanDump. v8Dump];
-    })();
+    dump2.vmRegions = createClassificationNode();
+    dump2.memoryAllocatorDumps = [
+      newAllocatorDump(dump2, 'oilpan', {
+        size: 1024,
+        objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 7),
+        inner_size: 768
+      }),
+      newAllocatorDump(dump2, 'v8', {
+        size: 2048,
+        objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 15),
+        inner_size: 1999
+      })
+    ];
 
     // A dump with malloc and V8 allocator dumps.
     var dump3 = createProcessMemoryDump(3, m);
-    dump3.memoryAllocatorDumps = (function() {
-      var mallocDump = new MemoryAllocatorDump('malloc');
-      mallocDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-      mallocDump.addAttribute('objects_count',
-          new ScalarAttribute('objects', 7));
-      mallocDump.addAttribute('inner_size', new ScalarAttribute('bytes', 768));
-
-      var v8Dump = new MemoryAllocatorDump('v8');
-      v8Dump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-      v8Dump.addAttribute('objects_count', new ScalarAttribute('objects', 15));
-      v8Dump.addAttribute('inner_size', new ScalarAttribute('bytes', 1999));
-
-      return [mallocDump. v8Dump];
-    })();
+    dump3.memoryAllocatorDumps = [
+      newAllocatorDump(dump3, 'malloc', {
+        size: 1024,
+        objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 7),
+        inner_size: 768
+      }),
+      newAllocatorDump(dump3, 'v8', {
+        size: 2048,
+        objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 15),
+        inner_size: 1999
+      })
+    ];
 
     // A dump with VM regions.
     var dump4 = createProcessMemoryDump(4, m);
-    dump4.vmRegions = [
-      VMRegion.fromDict({
-        startAddress: 256,
-        sizeInBytes: 336,
-        protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-            VMRegion.PROTECTION_FLAG_WRITE,
-        mappedFile: '[stack:20310]',
-        byteStats: {
-          privateResident: 96,
-          sharedResident: 144,
-          proportionalResident: 158
-        }
-      })
-    ];
+    dump4.vmRegions = createClassificationNode();
 
     var dumps = [dump1, dump2, dump3, dump4];
     ProcessMemoryDump.hookUpMostRecentVmRegionsLinks(dumps);
@@ -227,81 +136,53 @@
     assert.isUndefined(dump1.mostRecentVmRegions);
 
     assert.equal(dumps[1], dump2);
-    assert.equal(dump2.mostRecentVmRegions, dump2.vmRegions_);
+    assert.equal(dump2.mostRecentVmRegions, dump2.vmRegions);
 
     assert.equal(dumps[2], dump3);
-    assert.equal(dump3.mostRecentVmRegions, dump2.vmRegions_);
+    assert.equal(dump3.mostRecentVmRegions, dump2.vmRegions);
 
     assert.equal(dumps[3], dump4);
-    assert.equal(dump4.mostRecentVmRegions, dump4.vmRegions_);
-  });
-
-  test('vmRegion_protectionFlagsToString', function() {
-    checkProtectionFlagsToString(undefined, undefined);
-    checkProtectionFlagsToString(0, '---');
-    checkProtectionFlagsToString(VMRegion.PROTECTION_FLAG_READ, 'r--');
-    checkProtectionFlagsToString(
-        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_EXECUTE,
-        'r-x');
-    checkProtectionFlagsToString(
-        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_WRITE,
-        'rw-');
+    assert.equal(dump4.mostRecentVmRegions, dump4.vmRegions);
   });
 
   test('checkDiscountTracingOverhead_undefinedFields', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      var v8Dump = new MemoryAllocatorDump(pmd, 'v8');
-      v8Dump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-
-      pmd.memoryAllocatorDumps = [v8Dump, tracingDump];
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'v8', { size: 2048 }),
+        newAllocatorDump(pmd, 'tracing', { size: 1024 })
+      ];
     });
 
     assert.isUndefined(pmd.totals);
-    assert.isUndefined(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'));
+    assert.isUndefined(pmd.vmRegions);
 
     var v8Dump = pmd.getMemoryAllocatorDumpByFullName('v8');
-    assert.equal(v8Dump.attributes['size'].value, 2048);
+    checkDumpNumericsAndDiagnostics(v8Dump, {
+      'size': 2048,
+      'effective_size': 2048
+    }, {});
 
     var tracingDump = pmd.getMemoryAllocatorDumpByFullName('tracing');
-    assert.equal(tracingDump.attributes['size'].value, 1024);
+    checkDumpNumericsAndDiagnostics(tracingDump, {
+      'size': 1024,
+      'effective_size': 1024
+    }, {});
   });
 
   test('checkDiscountTracingOverhead_definedFields', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
       pmd.totals = { residentBytes: 10240 };
+      pmd.vmRegions = createClassificationNode(6000, {
+        privateDirtyResident: 4096,
+        proportionalResident: 5120,
+        swapped: 1536
+      });
 
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          startAddress: 256,
-          sizeInBytes: 6000,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_WRITE,
-          mappedFile: '[stack:20310]',
-          byteStats: {
-            privateDirtyResident: 4096,
-            swapped: 1536,
-            proportionalResident: 5120
-          }
-        })
-      ];
+      var mallocDump = newAllocatorDump(pmd, 'malloc', { size: 3072 });
+      newChildDump(mallocDump, 'allocated_objects', { size: 2560 });
 
-      var mallocDump = new MemoryAllocatorDump(pmd, 'malloc');
-      mallocDump.addAttribute('size', new ScalarAttribute('bytes', 3072));
-      var allocatedObjectsDump = new MemoryAllocatorDump(
-          pmd, 'malloc/allocated_objects');
-      allocatedObjectsDump.addAttribute(
-          'size', new ScalarAttribute('bytes', 2560));
-      allocatedObjectsDump.parent = mallocDump;
-      mallocDump.children.push(allocatedObjectsDump);
-
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute('size', new ScalarAttribute('bytes', 1024));
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1000));
+      var tracingDump = newAllocatorDump(
+          pmd, 'tracing', { size: 1024, resident_size: 1000 });
 
       pmd.memoryAllocatorDumps = [mallocDump, tracingDump];
     });
@@ -309,29 +190,48 @@
     assert.equal(pmd.totals.residentBytes, 9240);
     assert.isUndefined(pmd.totals.peakResidentBytes);
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 2);
-    var discountRegion = vmRegions[1];
-    assert.strictEqual(
-        discountRegion.mappedFile, '[discounted tracing overhead]');
-    assert.strictEqual(discountRegion.sizeInBytes, -1024);
+    var vmRegions = pmd.vmRegions;
+    assert.strictEqual(vmRegions.sizeInBytes, 4976);
+    assert.deepEqual(vmRegions.byteStats, {
+      privateDirtyResident: 3096,
+      proportionalResident: 4120,
+      swapped: 1536
+    });
 
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'), 3096);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('swapped'), 1536);
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'), 4120);
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 6000,
+        byteStats: {
+          privateDirtyResident: 4096,
+          proportionalResident: 5120,
+          swapped: 1536
+        }
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -1024,
+        byteStats: {
+          privateDirtyResident: -1000,
+          proportionalResident: -1000
+        }
+      }
+    ]);
 
     var mallocDump = pmd.getMemoryAllocatorDumpByFullName('malloc');
-    assert.equal(mallocDump.attributes['size'].value, 3072);
-    assert.equal(mallocDump.attributes['effective_size'].value, 2048);
+    checkDumpNumericsAndDiagnostics(mallocDump, {
+      'size': 3072,
+      'effective_size': 2048
+    }, {});
     assert.lengthOf(
         mallocDump.children, 2 /* 'allocated_objects' and '<unspecified>' */);
 
     var allocatedObjectsDump = pmd.getMemoryAllocatorDumpByFullName(
         'malloc/allocated_objects');
-    assert.equal(allocatedObjectsDump.attributes['size'].value, 2560);
-    assert.equal(allocatedObjectsDump.attributes['effective_size'].value, 1536);
+    checkDumpNumericsAndDiagnostics(allocatedObjectsDump, {
+      'size': 2560,
+      'effective_size': 1536
+    }, {});
     assert.lengthOf(
         allocatedObjectsDump.children,
         2 /* 'tracing_overhead' and '<unspecified>' */);
@@ -340,45 +240,45 @@
         'malloc/allocated_objects/tracing_overhead');
     assert.strictEqual(discountDump.parent, allocatedObjectsDump);
     assert.include(allocatedObjectsDump.children, discountDump);
-    assert.equal(discountDump.attributes['size'].value, 1024);
-    assert.equal(discountDump.attributes['effective_size'].value, 0);
+    checkDumpNumericsAndDiagnostics(discountDump, {
+      'size': 1024,
+      'effective_size': 0
+    }, {});
 
     var tracingDump = pmd.getMemoryAllocatorDumpByFullName('tracing');
-    assert.equal(tracingDump.attributes['size'].value, 1024);
-    assert.equal(tracingDump.attributes['effective_size'].value, 1024);
-    assert.equal(tracingDump.attributes['resident_size'].value, 1000);
+    checkDumpNumericsAndDiagnostics(tracingDump, {
+      'size': 1024,
+      'effective_size': 1024,
+      'resident_size': 1000
+    }, {});
     assert.strictEqual(tracingDump.owns.target, discountDump);
   });
 
   test('checkDiscountTracingOverhead_winheap', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      var winheapDump = new MemoryAllocatorDump(pmd, 'winheap');
-      winheapDump.addAttribute('size', new ScalarAttribute('bytes', 5120));
-
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute('size', new ScalarAttribute('bytes', 2048));
-
-      pmd.memoryAllocatorDumps = [tracingDump, winheapDump];
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'tracing', { size: 2048 }),
+        newAllocatorDump(pmd, 'winheap', { size: 5120 })
+      ];
     });
 
     assert.isUndefined(pmd.totals);
-
-    assert.isUndefined(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'));
-    assert.isUndefined(pmd.getMostRecentTotalVmRegionStat('swapped'));
-    assert.isUndefined(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'));
+    assert.isUndefined(pmd.vmRegions);
 
     var winheapDump = pmd.getMemoryAllocatorDumpByFullName('winheap');
-    assert.equal(winheapDump.attributes['size'].value, 5120);
-    assert.equal(winheapDump.attributes['effective_size'].value, 3072);
+    checkDumpNumericsAndDiagnostics(winheapDump, {
+      'size': 5120,
+      'effective_size': 3072
+    }, {});
     assert.lengthOf(winheapDump.children,
         2 /* 'allocated_objects' and '<unspecified>' */);
 
     var allocatedObjectsDump = pmd.getMemoryAllocatorDumpByFullName(
         'winheap/allocated_objects');
-    assert.equal(allocatedObjectsDump.attributes['size'].value, 2048);
-    assert.equal(allocatedObjectsDump.attributes['effective_size'].value, 0);
+    checkDumpNumericsAndDiagnostics(allocatedObjectsDump, {
+      'size': 2048,
+      'effective_size': 0
+    }, {});
     assert.lengthOf(
         allocatedObjectsDump.children, 1 /* 'tracing_overhead' */);
 
@@ -386,55 +286,60 @@
         'winheap/allocated_objects/tracing_overhead');
     assert.strictEqual(discountDump.parent, allocatedObjectsDump);
     assert.include(allocatedObjectsDump.children, discountDump);
-    assert.equal(discountDump.attributes['size'].value, 2048);
-    assert.equal(discountDump.attributes['effective_size'].value, 0);
+    checkDumpNumericsAndDiagnostics(discountDump, {
+      'size': 2048,
+      'effective_size': 0
+    }, {});
 
     var tracingDump = pmd.getMemoryAllocatorDumpByFullName('tracing');
-    assert.equal(tracingDump.attributes['size'].value, 2048);
-    assert.equal(tracingDump.attributes['effective_size'].value, 2048);
+    checkDumpNumericsAndDiagnostics(tracingDump, {
+      'size': 2048,
+      'effective_size': 2048
+    }, {});
     assert.strictEqual(tracingDump.owns.target, discountDump);
   });
 
   test('checkDiscountTracingOverhead_withMostRecentVmRegionsLinks', function() {
     var pmds = createFinalizedProcessMemoryDumps([42, 90], function(pmds) {
       pmds[0].totals = { residentBytes: 1000, peakResidentBytes: 2000 };
-      pmds[0].vmRegions = [
-        VMRegion.fromDict({
-          startAddress: 256,
-          sizeInBytes: 6000,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_WRITE,
-          mappedFile: '[stack:20310]',
-          byteStats: {
-            privateDirtyResident: 4096
-          }
-        })
+      pmds[0].vmRegions = createClassificationNode(6000, {
+        privateDirtyResident: 4096
+      });
+      pmds[0].memoryAllocatorDumps = [
+        newAllocatorDump(pmds[0], 'tracing', { size: 300, resident_size: 100 })
       ];
-      pmds[0].memoryAllocatorDumps = (function() {
-        var tracingDump = new MemoryAllocatorDump(pmds[0], 'tracing');
-        tracingDump.addAttribute(
-            'resident_size', new ScalarAttribute('bytes', 100));
-        tracingDump.addAttribute(
-            'size', new ScalarAttribute('bytes', 300));
-        return [tracingDump];
-      })();
 
       pmds[1].totals = { peakResidentBytes: 3000 };
-      pmds[1].memoryAllocatorDumps = (function() {
-        var tracingDump = new MemoryAllocatorDump(pmds[0], 'tracing');
-        tracingDump.addAttribute(
-            'resident_size', new ScalarAttribute('bytes', 200));
-        return [tracingDump];
-      })();
+      pmds[1].memoryAllocatorDumps = [
+        newAllocatorDump(pmds[0], 'tracing', { resident_size: 200 })
+      ];
     });
 
     // First PMD: Both total resident and private dirty resident size should be
     // reduced by 100. Virtual size should be reduced by 300.
-    assert.equal(pmds[0].totals.residentBytes, 900);
-    assert.equal(pmds[0].totals.peakResidentBytes, 1900);
-    assert.equal(pmds[0].mostRecentVmRegions[1].sizeInBytes, -300);
-    assert.equal(
-        pmds[0].getMostRecentTotalVmRegionStat('privateDirtyResident'), 3996);
+    assert.strictEqual(pmds[0].totals.residentBytes, 900);
+    assert.strictEqual(pmds[0].totals.peakResidentBytes, 1900);
+    assert.strictEqual(pmds[0].vmRegions.sizeInBytes, 5700);
+    assert.deepEqual(pmds[0].vmRegions.byteStats, {
+      privateDirtyResident: 3996
+    });
+    checkVMRegions(pmds[0].vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 6000,
+        byteStats: {
+          privateDirtyResident: 4096,
+        }
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -300,
+        byteStats: {
+          privateDirtyResident: -100
+        }
+      }
+    ]);
+    assert.strictEqual(pmds[0].mostRecentVmRegions, pmds[0].vmRegions);
 
     // Second PMD: Total resident size should be reduced by 200, whereas private
     // dirty resident size should be reduced by 100 (because it comes from
@@ -442,219 +347,206 @@
     // reduced by 300.
     assert.isUndefined(pmds[1].totals.residentBytes);
     assert.equal(pmds[1].totals.peakResidentBytes, 2800);
-    assert.equal(pmds[1].mostRecentVmRegions[1].sizeInBytes, -300);
-    assert.equal(
-        pmds[1].getMostRecentTotalVmRegionStat('privateDirtyResident'), 3996);
+    assert.isUndefined(pmds[1].vmRegions);
+    assert.strictEqual(pmds[1].mostRecentVmRegions, pmds[0].vmRegions);
   });
 
   test('checkDiscountTracingOverhead_allDiscountedVmRegionFields', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          mappedFile: '[stack]',
-          sizeInBytes: 10000,
-          byteStats: {
-            privateDirtyResident: 4096,
-            proportionalResident: 8192,
-            swapped: 1536
-          }
-        })
+      pmd.vmRegions = createClassificationNode(10000, {
+        privateDirtyResident: 4096,
+        proportionalResident: 8192,
+        swapped: 1536
+      });
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'tracing', { size: 1000, resident_size: 1024 })
       ];
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute(
-          'size', new ScalarAttribute('bytes', 1000));
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1024));
-      pmd.memoryAllocatorDumps = [tracingDump];
     });
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 2);
-
-    var regularRegion = vmRegions[0];
-    assert.equal(regularRegion.mappedFile, '[stack]');
-    assert.equal(regularRegion.sizeInBytes, 10000);
-    assert.equal(regularRegion.byteStats.privateDirtyResident, 4096);
-    assert.equal(regularRegion.byteStats.swapped, 1536);
-    assert.equal(regularRegion.byteStats.proportionalResident, 8192);
-
-    var discountedRegion = vmRegions[1];
-    assert.equal(discountedRegion.mappedFile, '[discounted tracing overhead]');
-    assert.equal(discountedRegion.sizeInBytes, -1000);
-    assert.equal(discountedRegion.byteStats.privateDirtyResident, -1024);
-    assert.isUndefined(discountedRegion.byteStats.swapped);
-    assert.equal(discountedRegion.byteStats.proportionalResident, -1024);
+    var vmRegions = pmd.vmRegions;
+    assert.strictEqual(vmRegions.sizeInBytes, 9000);
+    assert.deepEqual(vmRegions.byteStats, {
+      privateDirtyResident: 3072,
+      proportionalResident: 7168,
+      swapped: 1536
+    });
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 10000,
+        byteStats: {
+          privateDirtyResident: 4096,
+          proportionalResident: 8192,
+          swapped: 1536
+        }
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -1000,
+        byteStats: {
+          privateDirtyResident: -1024,
+          proportionalResident: -1024
+        }
+      }
+    ]);
   });
 
   test('checkDiscountTracingOverhead_twoDiscountedVmRegionField', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          mappedFile: '[stack]',
-          sizeInBytes: 10000,
-          byteStats: {
-            privateDirtyResident: 4096,
-            swapped: 1536
-          }
-        })
+      pmd.vmRegions = createClassificationNode(10000, {
+        privateDirtyResident: 4096,
+        swapped: 1536
+      });
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'tracing', { size: 1000, resident_size: 1024 })
       ];
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute(
-          'size', new ScalarAttribute('bytes', 1000));
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1024));
-      pmd.memoryAllocatorDumps = [tracingDump];
     });
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 2);
-
-    var regularRegion = vmRegions[0];
-    assert.equal(regularRegion.mappedFile, '[stack]');
-    assert.equal(regularRegion.sizeInBytes, 10000);
-    assert.equal(regularRegion.byteStats.privateDirtyResident, 4096);
-    assert.equal(regularRegion.byteStats.swapped, 1536);
-    assert.isUndefined(regularRegion.byteStats.proportionalResident);
-
-    var discountedRegion = vmRegions[1];
-    assert.equal(discountedRegion.mappedFile, '[discounted tracing overhead]');
-    assert.equal(discountedRegion.sizeInBytes, -1000);
-    assert.equal(discountedRegion.byteStats.privateDirtyResident, -1024);
-    assert.isUndefined(discountedRegion.byteStats.swapped);
-    assert.isUndefined(discountedRegion.byteStats.proportionalResident);
+    var vmRegions = pmd.vmRegions;
+    assert.strictEqual(vmRegions.sizeInBytes, 9000);
+    assert.deepEqual(vmRegions.byteStats, {
+      privateDirtyResident: 3072,
+      swapped: 1536
+    });
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 10000,
+        byteStats: {
+          privateDirtyResident: 4096,
+          swapped: 1536
+        }
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -1000,
+        byteStats: {
+          privateDirtyResident: -1024
+        }
+      }
+    ]);
   });
 
   test('checkDiscountTracingOverhead_oneDiscountedVmRegionField', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          mappedFile: '[stack]',
-          sizeInBytes: 10000,
-          byteStats: {}
-        })
+      pmd.vmRegions = createClassificationNode(10000);
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'tracing', { size: 1000, resident_size: 1024 })
       ];
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute(
-          'size', new ScalarAttribute('bytes', 1000));
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1024));
-      pmd.memoryAllocatorDumps = [tracingDump];
     });
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 2);
-
-    var regularRegion = vmRegions[0];
-    assert.equal(regularRegion.mappedFile, '[stack]');
-    assert.equal(regularRegion.sizeInBytes, 10000);
-    assert.isUndefined(regularRegion.byteStats.privateDirtyResident);
-    assert.isUndefined(regularRegion.byteStats.swapped);
-    assert.isUndefined(regularRegion.byteStats.proportionalResident);
-
-    var discountedRegion = vmRegions[1];
-    assert.equal(discountedRegion.mappedFile, '[discounted tracing overhead]');
-    assert.equal(discountedRegion.sizeInBytes, -1000);
-    assert.isUndefined(discountedRegion.byteStats.privateDirtyResident);
-    assert.isUndefined(discountedRegion.byteStats.swapped);
-    assert.isUndefined(discountedRegion.byteStats.proportionalResident);
+    var vmRegions = pmd.vmRegions;
+    assert.strictEqual(vmRegions.sizeInBytes, 9000);
+    assert.deepEqual(vmRegions.byteStats, {});
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 10000
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -1000
+      }
+    ]);
   });
 
   test('checkDiscountTracingOverhead_noDiscountedVmRegionFields', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          mappedFile: '[stack]',
-          byteStats: {
-            swapped: 1536
-          }
-        })
+      pmd.vmRegions = createClassificationNode(undefined, {
+        swapped: 1536
+      });
+      pmd.memoryAllocatorDumps = [
+        newAllocatorDump(pmd, 'tracing', { size: 1000, resident_size: 1024 })
       ];
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1024));
-      pmd.memoryAllocatorDumps = [tracingDump];
     });
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 1);
-
-    var regularRegion = vmRegions[0];
-    assert.equal(regularRegion.mappedFile, '[stack]');
-    assert.isUndefined(regularRegion.sizeInBytes);
-    assert.isUndefined(regularRegion.byteStats.privateDirtyResident);
-    assert.equal(regularRegion.byteStats.swapped, 1536);
-    assert.isUndefined(regularRegion.byteStats.proportionalResident);
+    var vmRegions = pmd.vmRegions;
+    assert.isUndefined(vmRegions.sizeInBytes);
+    assert.deepEqual(vmRegions.byteStats, {
+      swapped: 1536
+    });
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        byteStats: {
+          swapped: 1536
+        }
+      }
+    ]);
   });
 
   test('checkDiscountTracingOverhead_existingLink', function() {
     var pmd = createFinalizedProcessMemoryDump(42, function(pmd) {
       pmd.totals = { residentBytes: 10240 };
 
-      pmd.vmRegions = [
-        VMRegion.fromDict({
-          startAddress: 256,
-          sizeInBytes: 6000,
-          protectionFlags: VMRegion.PROTECTION_FLAG_READ |
-              VMRegion.PROTECTION_FLAG_WRITE,
-          mappedFile: '[stack:20310]',
-          byteStats: {
-            privateDirtyResident: 4096,
-            swapped: 1536,
-            proportionalResident: 5120
-          }
-        })
-      ];
+      pmd.vmRegions = createClassificationNode(6000, {
+        privateDirtyResident: 4096,
+        swapped: 1536,
+        proportionalResident: 5120
+      });
 
-      var mallocDump = new MemoryAllocatorDump(pmd, 'malloc');
-      mallocDump.addAttribute('size', new ScalarAttribute('bytes', 3072));
-
-      var tracingDump = new MemoryAllocatorDump(pmd, 'tracing');
-      tracingDump.addAttribute(
-          'size', new ScalarAttribute('bytes', 1024));
-      tracingDump.addAttribute(
-          'resident_size', new ScalarAttribute('bytes', 1000));
-
-      var ownedDump = new MemoryAllocatorDump(pmd, 'owned');
+      var mallocDump = newAllocatorDump(pmd, 'malloc', { size: 3072 });
+      var tracingDump = newAllocatorDump(pmd, 'tracing',
+          { size: 1024, resident_size: 1000 });
+      var ownedDump = newAllocatorDump(pmd, 'owned');
 
       // The code for discounting tracing overhead should *not* override an
       // existing ownership.
-      var ownershipLink = new MemoryAllocatorDumpLink(tracingDump, ownedDump);
-      tracingDump.owns = ownershipLink;
-      ownedDump.ownedBy.push(ownershipLink);
+      addOwnershipLink(tracingDump, ownedDump);
 
       pmd.memoryAllocatorDumps = [mallocDump, tracingDump, ownedDump];
     });
 
-    assert.equal(pmd.totals.residentBytes, 9240);
+    assert.strictEqual(pmd.totals.residentBytes, 9240);
     assert.isUndefined(pmd.totals.peakResidentBytes);
 
-    var vmRegions = pmd.mostRecentVmRegions;
-    assert.lengthOf(vmRegions, 2);
-    var discountRegion = vmRegions[1];
-    assert.strictEqual(
-        discountRegion.mappedFile, '[discounted tracing overhead]');
-    assert.strictEqual(discountRegion.sizeInBytes, -1024);
-
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('privateDirtyResident'), 3096);
-    assert.equal(pmd.getMostRecentTotalVmRegionStat('swapped'), 1536);
-    assert.equal(
-        pmd.getMostRecentTotalVmRegionStat('proportionalResident'), 4120);
+    var vmRegions = pmd.vmRegions;
+    assert.strictEqual(vmRegions.sizeInBytes, 4976);
+    assert.deepEqual(vmRegions.byteStats, {
+      privateDirtyResident: 3096,
+      proportionalResident: 4120,
+      swapped: 1536
+    });
+    checkVMRegions(vmRegions, [
+      {
+        mappedFile: 'mock.so',
+        sizeInBytes: 6000,
+        byteStats: {
+          privateDirtyResident: 4096,
+          proportionalResident: 5120,
+          swapped: 1536
+        }
+      },
+      {
+        mappedFile: '[discounted tracing overhead]',
+        sizeInBytes: -1024,
+        byteStats: {
+          privateDirtyResident: -1000,
+          proportionalResident: -1000
+        }
+      }
+    ]);
 
     var mallocDump = pmd.getMemoryAllocatorDumpByFullName('malloc');
-    assert.equal(mallocDump.attributes['size'].value, 3072);
-    assert.equal(mallocDump.attributes['effective_size'].value, 3072);
+    checkDumpNumericsAndDiagnostics(mallocDump, {
+      'size': 3072,
+      'effective_size': 3072
+    }, {});
     assert.lengthOf(mallocDump.children, 0);
 
     var ownedDump = pmd.getMemoryAllocatorDumpByFullName('owned');
-    assert.equal(ownedDump.attributes['size'].value, 1024);
-    assert.equal(ownedDump.attributes['effective_size'].value, 0);
+    checkDumpNumericsAndDiagnostics(ownedDump, {
+      'size': 1024,
+      'effective_size': 0
+    }, {});
     assert.lengthOf(ownedDump.children, 0);
 
     var tracingDump = pmd.getMemoryAllocatorDumpByFullName('tracing');
-    assert.equal(tracingDump.attributes['size'].value, 1024);
-    assert.equal(tracingDump.attributes['effective_size'].value, 1024);
-    assert.equal(tracingDump.attributes['resident_size'].value, 1000);
+    checkDumpNumericsAndDiagnostics(tracingDump, {
+      'size': 1024,
+      'effective_size': 1024,
+      'resident_size': 1000
+    }, {});
     assert.strictEqual(tracingDump.owns.target, ownedDump);
   });
 });
diff --git a/catapult/tracing/tracing/model/process_test.html b/catapult/tracing/tracing/model/process_test.html
index 59a1b26..46c2ca8 100644
--- a/catapult/tracing/tracing/model/process_test.html
+++ b/catapult/tracing/tracing/model/process_test.html
@@ -32,7 +32,7 @@
     var thread = process.getOrCreateThread(1);
 
     var instantEvent = new tr.model.InstantEvent('cat', 'event1', 1, 100);
-    process.pushInstantEvent(instantEvent);
+    process.instantEvents.push(instantEvent);
 
     var slice = new tr.model.ThreadSlice('', 'a', 0, 1, {}, 4);
     var frame =
diff --git a/catapult/tracing/tracing/model/sample.html b/catapult/tracing/tracing/model/sample.html
index f31ad19..a0936a3 100644
--- a/catapult/tracing/tracing/model/sample.html
+++ b/catapult/tracing/tracing/model/sample.html
@@ -5,8 +5,8 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -49,7 +49,7 @@
     },
 
     get userFriendlyName() {
-      return 'Sample at ' + tr.b.u.TimeStamp.format(this.start);
+      return 'Sample at ' + tr.v.Unit.byName.timeStampInMs.format(this.start);
     }
   };
 
diff --git a/catapult/tracing/tracing/model/scoped_id.html b/catapult/tracing/tracing/model/scoped_id.html
new file mode 100644
index 0000000..652d6f0
--- /dev/null
+++ b/catapult/tracing/tracing/model/scoped_id.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/model/constants.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model', function() {
+  function ScopedId(scope, id) {
+    if (scope === undefined) {
+      throw new Error('Scope should be defined. Use \'' +
+                      tr.model.OBJECT_DEFAULT_SCOPE +
+                      '\' as the default scope.');
+    }
+    this.scope = scope;
+    this.id = id;
+  }
+
+  ScopedId.prototype = {
+    toString: function() {
+      return '{scope: ' + this.scope + ', id: ' + this.id + '}';
+    }
+  };
+
+  return {
+    ScopedId: ScopedId
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/slice.html b/catapult/tracing/tracing/model/slice.html
index 49c440b..1236268 100644
--- a/catapult/tracing/tracing/model/slice.html
+++ b/catapult/tracing/tracing/model/slice.html
@@ -5,8 +5,8 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -73,7 +73,7 @@
 
     get userFriendlyName() {
       return 'Slice ' + this.title + ' at ' +
-          tr.b.u.TimeStamp.format(this.start);
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
     },
 
     get stableId() {
diff --git a/catapult/tracing/tracing/model/slice_group_test.html b/catapult/tracing/tracing/model/slice_group_test.html
index fe141fd..8ae66bc 100644
--- a/catapult/tracing/tracing/model/slice_group_test.html
+++ b/catapult/tracing/tracing/model/slice_group_test.html
@@ -6,8 +6,8 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/slice_group.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/slice_group.html">
 
 <script>
 'use strict';
@@ -15,7 +15,6 @@
 tr.b.unittest.testSuite(function() {
   var Slice = tr.model.Slice;
   var SliceGroup = tr.model.SliceGroup;
-  var newSlice = tr.c.TestUtils.newSlice;
   var newSliceEx = tr.c.TestUtils.newSliceEx;
   var newThreadSlice = tr.c.TestUtils.newThreadSlice;
   var newModel = tr.c.TestUtils.newModel;
@@ -612,8 +611,8 @@
     assert.isUndefined(group.bounds.min);
     assert.isUndefined(group.bounds.max);
 
-    group.pushSlice(newSlice(1, 3));
-    group.pushSlice(newSlice(7, 2));
+    group.pushSlice(newSliceEx({start: 1, duration: 3}));
+    group.pushSlice(newSliceEx({start: 7, duration: 2}));
     group.updateBounds();
     assert.equal(group.bounds.min, 1);
     assert.equal(group.bounds.max, 9);
@@ -642,7 +641,7 @@
     assert.isUndefined(group.bounds.min);
     assert.isUndefined(group.bounds.max);
 
-    group.pushSlice(newSlice(1, 3));
+    group.pushSlice(newSliceEx({start: 1, duration: 3}));
     group.beginSlice('', 'a', 7);
     group.updateBounds();
     assert.equal(group.bounds.min, 1);
diff --git a/catapult/tracing/tracing/model/slice_test.html b/catapult/tracing/tracing/model/slice_test.html
index cb91bf4..0b6e781 100644
--- a/catapult/tracing/tracing/model/slice_test.html
+++ b/catapult/tracing/tracing/model/slice_test.html
@@ -14,9 +14,7 @@
 tr.b.unittest.testSuite(function() {
   var Slice = tr.model.Slice;
   var SliceGroup = tr.model.SliceGroup;
-  var newSlice = tr.c.TestUtils.newSlice;
   var newSliceEx = tr.c.TestUtils.newSliceEx;
-  var newSliceNamed = tr.c.TestUtils.newSliceNamed;
   var newFakeThread = tr.c.TestUtils.newFakeThread;
 
   test('findDescendentSlice', function() {
diff --git a/catapult/tracing/tracing/model/time_to_object_instance_map.html b/catapult/tracing/tracing/model/time_to_object_instance_map.html
index 1b1b724..bd30fc1 100644
--- a/catapult/tracing/tracing/model/time_to_object_instance_map.html
+++ b/catapult/tracing/tracing/model/time_to_object_instance_map.html
@@ -17,16 +17,18 @@
   /**
    * Tracks all the instances associated with a given ID over its lifetime.
    *
-   * An id can be used multiple times throughout a trace, referring to different
-   * objects at different times. This data structure does the bookkeeping to
-   * figure out what ObjectInstance is referred to at a given timestamp.
+   * A scoped id can be used multiple times throughout a trace, referring to
+   * different objects at different times. This data structure does the
+   * bookkeeping to figure out what ObjectInstance is referred to at a given
+   * timestamp.
    *
    * @constructor
    */
-  function TimeToObjectInstanceMap(createObjectInstanceFunction, parent, id) {
+  function TimeToObjectInstanceMap(
+      createObjectInstanceFunction, parent, scopedId) {
     this.createObjectInstanceFunction_ = createObjectInstanceFunction;
     this.parent = parent;
-    this.id = id;
+    this.scopedId = scopedId;
     this.instances = [];
   }
 
@@ -34,7 +36,7 @@
     idWasCreated: function(category, name, ts) {
       if (this.instances.length == 0) {
         this.instances.push(this.createObjectInstanceFunction_(
-            this.parent, this.id, category, name, ts));
+            this.parent, this.scopedId, category, name, ts));
         this.instances[0].creationTsWasExplicit = true;
         return this.instances[0];
       }
@@ -45,7 +47,7 @@
                         'done in ascending timestamp order.');
       }
       lastInstance = this.createObjectInstanceFunction_(
-          this.parent, this.id, category, name, ts);
+          this.parent, this.scopedId, category, name, ts);
       lastInstance.creationTsWasExplicit = true;
       this.instances.push(lastInstance);
       return lastInstance;
@@ -54,7 +56,7 @@
     addSnapshot: function(category, name, ts, args, opt_baseTypeName) {
       if (this.instances.length == 0) {
         this.instances.push(this.createObjectInstanceFunction_(
-            this.parent, this.id, category, name, ts, opt_baseTypeName));
+            this.parent, this.scopedId, category, name, ts, opt_baseTypeName));
       }
 
       var i = tr.b.findIndexInSortedIntervals(
@@ -86,7 +88,7 @@
           // The snap is added after our oldest and deleted instance. This means
           // that this is a new implicit instance.
           instance = this.createObjectInstanceFunction_(
-              this.parent, this.id, category, name, ts, opt_baseTypeName);
+              this.parent, this.scopedId, category, name, ts, opt_baseTypeName);
           this.instances.push(instance);
         } else {
           // If the ts is before the last objects deletion time, then the caller
@@ -128,11 +130,11 @@
     idWasDeleted: function(category, name, ts) {
       if (this.instances.length == 0) {
         this.instances.push(this.createObjectInstanceFunction_(
-            this.parent, this.id, category, name, ts));
+            this.parent, this.scopedId, category, name, ts));
       }
       var lastInstance = this.instances[this.instances.length - 1];
       if (ts < lastInstance.creationTs)
-        throw new Error('Cannot delete a id before it was crated');
+        throw new Error('Cannot delete an id before it was created');
       if (lastInstance.deletionTs == Number.MAX_VALUE) {
         lastInstance.wasDeleted(ts);
         return lastInstance;
@@ -144,7 +146,7 @@
       // A new instance was deleted with no snapshots in-between.
       // Create an instance then kill it.
       lastInstance = this.createObjectInstanceFunction_(
-          this.parent, this.id, category, name, ts);
+          this.parent, this.scopedId, category, name, ts);
       this.instances.push(lastInstance);
       lastInstance.wasDeleted(ts);
       return lastInstance;
diff --git a/catapult/tracing/tracing/model/timed_event.html b/catapult/tracing/tracing/model/timed_event.html
index c5675e7..6ca7523 100644
--- a/catapult/tracing/tracing/model/timed_event.html
+++ b/catapult/tracing/tracing/model/timed_event.html
@@ -5,20 +5,17 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-<link rel="import" href="/tracing/model/event.html">
 <link rel="import" href="/tracing/base/guid.html">
+<link rel="import" href="/tracing/model/event.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
 
 <script>
 'use strict';
 
-/**
- * @fileoverview Provides the TimedEvent class.
- */
 tr.exportTo('tr.model', function() {
   /**
-   * A TimedEvent is the base type for any piece of data in the trace model with
-   * a specific start and duration.
+   * TimedEvent is a base type for any entity in the trace model with a specific
+   * start and duration.
    *
    * @constructor
    */
@@ -42,13 +39,14 @@
       range.addValue(this.end);
     },
 
-    // bounds returns whether that TimedEvent happens within this timed event
-    bounds: function(that, precisionUnit) {
-      if (precisionUnit === undefined) {
-        precisionUnit = tr.b.u.TimeDisplayModes.ms;
-      }
-      var startsBefore = precisionUnit.roundedLess(that.start, this.start);
-      var endsAfter = precisionUnit.roundedLess(this.end, that.end);
+    // Returns true if 'that' TimedEvent is fully contained within 'this' timed
+    // event.
+    bounds: function(that, opt_precisionUnit) {
+      if (opt_precisionUnit === undefined)
+        opt_precisionUnit = tr.v.TimeDisplayModes.ms;
+
+      var startsBefore = opt_precisionUnit.roundedLess(that.start, this.start);
+      var endsAfter = opt_precisionUnit.roundedLess(this.end, that.end);
       return !startsBefore && !endsAfter;
     }
   };
diff --git a/catapult/tracing/tracing/model/timed_event_test.html b/catapult/tracing/tracing/model/timed_event_test.html
index 9288c50..4beba91 100644
--- a/catapult/tracing/tracing/model/timed_event_test.html
+++ b/catapult/tracing/tracing/model/timed_event_test.html
@@ -6,15 +6,15 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
 <link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
   test('bounds_startPrecision', function() {
-    var unit = tr.b.u.TimeDisplayModes;
+    var unit = tr.v.TimeDisplayModes;
 
     var outer = new tr.model.TimedEvent(10.0001);
     outer.duration = 0.9999;
@@ -28,7 +28,7 @@
   });
 
   test('bounds_endPrecision', function() {
-    var unit = tr.b.u.TimeDisplayModes;
+    var unit = tr.v.TimeDisplayModes;
 
     var outer = new tr.model.TimedEvent(10.0000);
     outer.duration = 0.9999;
diff --git a/catapult/tracing/tracing/model/user_model/animation_expectation.html b/catapult/tracing/tracing/model/user_model/animation_expectation.html
new file mode 100644
index 0000000..9d73f06
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/animation_expectation.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  function AnimationExpectation(
+      parentModel, initiatorTitle, start, duration) {
+    tr.model.um.UserExpectation.call(
+        this, parentModel, initiatorTitle, start, duration);
+    this.frameEvents_ = undefined;
+  }
+
+  AnimationExpectation.prototype = {
+    __proto__: tr.model.um.UserExpectation.prototype,
+    constructor: AnimationExpectation,
+
+    get frameEvents() {
+      if (this.frameEvents_)
+        return this.frameEvents_;
+
+      this.frameEvents_ = new tr.model.EventSet();
+
+      this.associatedEvents.forEach(function(event) {
+        if (event.title === tr.model.helpers.IMPL_RENDERING_STATS)
+          this.frameEvents_.push(event);
+      }, this);
+
+      return this.frameEvents_;
+    }
+  };
+
+  tr.model.um.UserExpectation.register(AnimationExpectation, {
+    stageTitle: 'Animation',
+    colorId: tr.b.ColorScheme.getColorIdForReservedName('rail_animation')
+  });
+
+  return {
+    AnimationExpectation: AnimationExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/idle_expectation.html b/catapult/tracing/tracing/model/user_model/idle_expectation.html
new file mode 100644
index 0000000..80c22c0
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/idle_expectation.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  function IdleExpectation(parentModel, start, duration) {
+    var initiatorTitle = '';
+    tr.model.um.UserExpectation.call(
+        this, parentModel, initiatorTitle, start, duration);
+  }
+
+  IdleExpectation.prototype = {
+    __proto__: tr.model.um.UserExpectation.prototype,
+    constructor: IdleExpectation
+  };
+
+  tr.model.um.UserExpectation.register(IdleExpectation, {
+    stageTitle: 'Idle',
+    colorId: tr.b.ColorScheme.getColorIdForReservedName('rail_idle')
+  });
+
+  return {
+    IdleExpectation: IdleExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/load_expectation.html b/catapult/tracing/tracing/model/user_model/load_expectation.html
new file mode 100644
index 0000000..45657fd
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/load_expectation.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  var LOAD_SUBTYPE_NAMES = {
+    SUCCESSFUL: 'Successful',
+    FAILED: 'Failed',
+    STARTUP: 'Startup'
+  };
+
+  var DOES_LOAD_SUBTYPE_NAME_EXIST = {};
+  for (var key in LOAD_SUBTYPE_NAMES) {
+    DOES_LOAD_SUBTYPE_NAME_EXIST[LOAD_SUBTYPE_NAMES[key]] = true;;
+  }
+
+  function LoadExpectation(parentModel, initiatorTitle, start, duration) {
+    if (!DOES_LOAD_SUBTYPE_NAME_EXIST[initiatorTitle])
+      throw new Error(initiatorTitle + ' is not in LOAD_SUBTYPE_NAMES');
+
+    tr.model.um.UserExpectation.call(
+        this, parentModel, initiatorTitle, start, duration);
+
+    // |renderProcess| is the renderer process that contains the loading
+    // RenderFrame.
+    this.renderProcess = undefined;
+
+    // |renderMainThread| is the CrRendererMain thread in the |renderProcess|
+    // that contains the loading RenderFrame.
+    this.renderMainThread = undefined;
+
+    // |routingId| identifies the loading RenderFrame within the renderer
+    // process.
+    this.routingId = undefined;
+
+    // |parentRoutingId| identifies the RenderFrame that created and contains
+    // the loading RenderFrame.
+    this.parentRoutingId = undefined;
+
+    // |loadFinishedEvent|, if present, signals that this is a main frame.
+    this.loadFinishedEvent = undefined;
+
+    // Startup LoadIRs do not have renderProcess, routingId, or
+    // parentRoutingId. Maybe RenderLoadIR should be a separate class?
+  }
+
+  LoadExpectation.prototype = {
+    __proto__: tr.model.um.UserExpectation.prototype,
+    constructor: LoadExpectation
+  };
+
+  tr.model.um.UserExpectation.register(LoadExpectation, {
+    stageTitle: 'Load',
+    colorId: tr.b.ColorScheme.getColorIdForReservedName('rail_load')
+  });
+
+  return {
+    LOAD_SUBTYPE_NAMES: LOAD_SUBTYPE_NAMES,
+    LoadExpectation: LoadExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/response_expectation.html b/catapult/tracing/tracing/model/user_model/response_expectation.html
new file mode 100644
index 0000000..9608467
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/response_expectation.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  function ResponseExpectation(
+      parentModel, initiatorTitle, start, duration, opt_isAnimationBegin) {
+    tr.model.um.UserExpectation.call(
+        this, parentModel, initiatorTitle, start, duration);
+    this.isAnimationBegin = opt_isAnimationBegin || false;
+  }
+
+  ResponseExpectation.prototype = {
+    __proto__: tr.model.um.UserExpectation.prototype,
+    constructor: ResponseExpectation
+  };
+
+  tr.model.um.UserExpectation.register(ResponseExpectation, {
+    stageTitle: 'Response',
+    colorId: tr.b.ColorScheme.getColorIdForReservedName('rail_response')
+  });
+
+  return {
+    ResponseExpectation: ResponseExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/stub_expectation.html b/catapult/tracing/tracing/model/user_model/stub_expectation.html
new file mode 100644
index 0000000..d544581
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/stub_expectation.html
@@ -0,0 +1,74 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/user_model/user_expectation.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Stub version of UserExpectation for testing.
+ */
+tr.exportTo('tr.model.um', function() {
+  function StubExpectation(args) {
+    this.stageTitle_ = args.stageTitle || 'Idle';
+    this.initiatorTitle_ = args.initiatorTitle || '';
+
+    this.title_ = args.title;
+    if (!this.title_) {
+      var defaultTitle = [];
+      if (this.initiatorTitle_)
+        defaultTitle.push(this.initiatorTitle_);
+      if (this.stageTitle_)
+        defaultTitle.push(this.stageTitle_);
+      this.title_ = defaultTitle.join(' ') || 'title';
+    }
+
+    this.normalizedUserComfort_ = args.normalizedUserComfort || 0;
+    this.normalizedEfficiency_ = args.normalizedEfficiency || 0;
+
+    var sd = tr.c.TestUtils.getStartAndDurationFromDict(args);
+
+    tr.model.um.UserExpectation.call(
+        this, args.parentModel, this.initiatorTitle, sd.start, sd.duration);
+
+    // Must be set after base class call.
+    this.colorId_ = args.colorId || 0;
+
+    if (args.associatedEvents) {
+      args.associatedEvents.forEach(function(event) {
+        this.associatedEvents.push(event);
+      }, this);
+    }
+  }
+
+  StubExpectation.prototype = {
+    __proto__: tr.model.um.UserExpectation.prototype,
+
+    get colorId() {
+      return this.colorId_;
+    },
+
+    get title() {
+      return this.title_;
+    },
+
+    get stageTitle() {
+      return this.stageTitle_;
+    },
+
+    get initiatorTitle() {
+      return this.initiatorTitle_;
+    }
+  };
+
+  return {
+    StubExpectation: StubExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/user_expectation.html b/catapult/tracing/tracing/model/user_model/user_expectation.html
new file mode 100644
index 0000000..7be2319
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/user_expectation.html
@@ -0,0 +1,142 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/range_utils.html">
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/model/compound_event_selection_state.html">
+<link rel="import" href="/tracing/model/event_set.html">
+<link rel="import" href="/tracing/model/timed_event.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  var CompoundEventSelectionState = tr.model.CompoundEventSelectionState;
+
+  function UserExpectation(parentModel, initiatorTitle, start, duration) {
+    tr.model.TimedEvent.call(this, start);
+    this.associatedEvents = new tr.model.EventSet();
+    this.duration = duration;
+    this.initiatorTitle_ = initiatorTitle;
+    this.parentModel = parentModel;
+    this.typeInfo_ = undefined;
+
+    // sourceEvents are the ones that caused the UserModelBuilder to create this
+    // UserExpectation.
+    this.sourceEvents = new tr.model.EventSet();
+  }
+
+  UserExpectation.prototype = {
+    __proto__: tr.model.TimedEvent.prototype,
+
+    computeCompoundEvenSelectionState: function(selection) {
+      var cess = CompoundEventSelectionState.NOT_SELECTED;
+      if (selection.contains(this))
+        cess |= CompoundEventSelectionState.EVENT_SELECTED;
+
+      if (this.associatedEvents.intersectionIsEmpty(selection))
+        return cess;
+
+      var allContained = this.associatedEvents.every(function(event) {
+        return selection.contains(event);
+      });
+
+      if (allContained)
+        cess |= CompoundEventSelectionState.ALL_ASSOCIATED_EVENTS_SELECTED;
+      else
+        cess |= CompoundEventSelectionState.SOME_ASSOCIATED_EVENTS_SELECTED;
+      return cess;
+    },
+
+    get userFriendlyName() {
+      return this.title + ' User Expectation at ' +
+          tr.v.Unit.byName.timeStampInMs.format(this.start);
+    },
+
+    get stableId() {
+      return ('UserExpectation.' +
+          this.parentModel.userModel.expectations.indexOf(this));
+    },
+
+    get typeInfo() {
+      if (!this.typeInfo_)
+        this.typeInfo_ = UserExpectation.findTypeInfo(this.constructor);
+
+      // If you set Subclass.prototype = {}, then you must explicitly specify
+      // constructor in that prototype object!
+      // http://javascript.info/tutorial/constructor
+
+      if (!this.typeInfo_)
+        throw new Error('Unregistered UserExpectation');
+
+      return this.typeInfo_;
+    },
+
+    get colorId() {
+      return this.typeInfo.metadata.colorId;
+    },
+
+    get stageTitle() {
+      return this.typeInfo.metadata.stageTitle;
+    },
+
+    get initiatorTitle() {
+      return this.initiatorTitle_;
+    },
+
+    get title() {
+      if (!this.initiatorTitle)
+        return this.stageTitle;
+
+      return this.initiatorTitle + ' ' + this.stageTitle;
+    },
+
+    /**
+     * Returns the sum of the number of CPU ms spent by this UserExpectation.
+     */
+    get totalCpuMs() {
+      var cpuMs = 0;
+      this.associatedEvents.forEach(function(event) {
+        if (event.cpuSelfTime)
+          cpuMs += event.cpuSelfTime;
+      });
+      return cpuMs;
+    }
+  };
+
+  var options = new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);
+  tr.b.decorateExtensionRegistry(UserExpectation, options);
+
+  UserExpectation.addEventListener('will-register', function(e) {
+    var metadata = e.typeInfo.metadata;
+
+    if (metadata.stageTitle === undefined) {
+      throw new Error('Registered UserExpectations must provide ' +
+          'stageTitle');
+    }
+
+    if (metadata.colorId === undefined) {
+      throw new Error('Registered UserExpectations must provide ' +
+          'colorId');
+    }
+  });
+
+  tr.model.EventRegistry.register(
+      UserExpectation,
+      {
+        name: 'user-expectation',
+        pluralName: 'user-expectations',
+        singleViewElementName: 'tr-ui-a-single-user-expectation-sub-view',
+        multiViewElementName: 'tr-ui-a-multi-user-expectation-sub-view'
+      });
+
+  return {
+    UserExpectation: UserExpectation
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/user_model/user_model.html b/catapult/tracing/tracing/model/user_model/user_model.html
new file mode 100644
index 0000000..718e32f
--- /dev/null
+++ b/catapult/tracing/tracing/model/user_model/user_model.html
@@ -0,0 +1,69 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/event_container.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.model.um', function() {
+  function UserModel(parentModel) {
+    tr.model.EventContainer.call(this);
+    this.parentModel_ = parentModel;
+    this.expectations_ = new tr.model.EventSet();
+  }
+
+  UserModel.prototype = {
+    __proto__: tr.model.EventContainer.prototype,
+
+    get stableId() {
+      return 'UserModel';
+    },
+
+    get parentModel() {
+      return this.parentModel_;
+    },
+
+    sortExpectations: function() {
+      Array.prototype.sort.call(this.expectations_, function(x, y) {
+        return x.start - y.start;
+      });
+    },
+
+    get expectations() {
+      return this.expectations_;
+    },
+
+    shiftTimestampsForward: function(amount) {
+    },
+
+    addCategoriesToDict: function(categoriesDict) {
+    },
+
+    iterateAllEventsInThisContainer: function(eventTypePredicate,
+                                              callback, opt_this) {
+      if (eventTypePredicate.call(opt_this, tr.model.um.UserExpectation))
+        this.expectations.forEach(callback, opt_this);
+    },
+
+    iterateAllChildEventContainers: function(callback, opt_this) {
+    },
+
+    updateBounds: function() {
+      this.bounds.reset();
+      this.expectations.forEach(function(expectation) {
+        expectation.addBoundsToRange(this.bounds);
+      }, this);
+    }
+  };
+
+  return {
+    UserModel: UserModel
+  };
+});
+</script>
+
diff --git a/catapult/tracing/tracing/model/vm_region.html b/catapult/tracing/tracing/model/vm_region.html
new file mode 100644
index 0000000..45ce7f3
--- /dev/null
+++ b/catapult/tracing/tracing/model/vm_region.html
@@ -0,0 +1,389 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Provides classes for representing and classifying VM regions.
+ *
+ * See https://goo.gl/5SSPv0 for more details.
+ */
+tr.exportTo('tr.model', function() {
+
+  /**
+   * A single virtual memory region (also called a memory map).
+   *
+   * @constructor
+   */
+  function VMRegion(startAddress, sizeInBytes, protectionFlags,
+      mappedFile, byteStats) {
+    this.startAddress = startAddress;
+    this.sizeInBytes = sizeInBytes;
+    this.protectionFlags = protectionFlags;
+    this.mappedFile = mappedFile || '';
+    this.byteStats = byteStats || {};
+  };
+
+  VMRegion.PROTECTION_FLAG_READ = 4;
+  VMRegion.PROTECTION_FLAG_WRITE = 2;
+  VMRegion.PROTECTION_FLAG_EXECUTE = 1;
+  VMRegion.PROTECTION_FLAG_MAYSHARE = 128;
+
+  VMRegion.prototype = {
+    get uniqueIdWithinProcess() {
+      // This value is assumed to be unique within a process.
+      return this.mappedFile + '#' + this.startAddress;
+    },
+
+    get protectionFlagsToString() {
+      if (this.protectionFlags === undefined)
+        return undefined;
+      return (
+          (this.protectionFlags & VMRegion.PROTECTION_FLAG_READ ? 'r' : '-') +
+          (this.protectionFlags & VMRegion.PROTECTION_FLAG_WRITE ? 'w' : '-') +
+          (this.protectionFlags & VMRegion.PROTECTION_FLAG_EXECUTE ?
+              'x' : '-') +
+          (this.protectionFlags & VMRegion.PROTECTION_FLAG_MAYSHARE ? 's' : 'p')
+      );
+    }
+  };
+
+  VMRegion.fromDict = function(dict) {
+    return new VMRegion(
+        dict.startAddress,
+        dict.sizeInBytes,
+        dict.protectionFlags,
+        dict.mappedFile,
+        dict.byteStats);
+  };
+
+  /**
+   * Node in a VM region classification tree.
+   *
+   * Note: Most users of this class should use the
+   * VMRegionClassificationNode.fromRegions static method instead of this
+   * constructor because it leads to better performance due to fewer memory
+   * allocations.
+   *
+   * @constructor
+   */
+  function VMRegionClassificationNode(opt_rule) {
+    this.rule_ = opt_rule || VMRegionClassificationNode.CLASSIFICATION_RULES;
+
+    // True iff this node or any of its descendant classification nodes has at
+    // least one classified VM region.
+    this.hasRegions = false;
+
+    // Total virtual size and byte stats of all regions matching this node's
+    // rule (including its sub-rules).
+    this.sizeInBytes = undefined;
+    this.byteStats = {};
+
+    // Array of child classification nodes if this is an intermediate node.
+    this.children_ = undefined;
+
+    // Array of VM regions. If this is an intermediate node, then the regions
+    // are cached for lazy tree construction (i.e. its child classification
+    // nodes yet have to be built).
+    this.regions_ = [];
+  }
+
+  /**
+   * Rules for classifying memory maps.
+   *
+   * These rules are derived from core/jni/android_os_Debug.cpp in Android.
+   */
+  VMRegionClassificationNode.CLASSIFICATION_RULES = {
+    name: 'Total',
+    children: [
+      {
+        name: 'Android',
+        file: /^\/dev\/ashmem(?!\/libc malloc)/,
+        children: [
+          {
+            name: 'Java runtime',
+            file: /^\/dev\/ashmem\/dalvik-/,
+            children: [
+              {
+                name: 'Spaces',
+                file: /\/dalvik-(alloc|main|large object|non moving|zygote) space/,  // @suppress longLineCheck
+                children: [
+                  {
+                    name: 'Normal',
+                    file: /\/dalvik-(alloc|main)/
+                  },
+                  {
+                    name: 'Large',
+                    file: /\/dalvik-large object/
+                  },
+                  {
+                    name: 'Zygote',
+                    file: /\/dalvik-zygote/
+                  },
+                  {
+                    name: 'Non-moving',
+                    file: /\/dalvik-non moving/
+                  }
+                ]
+              },
+              {
+                name: 'Linear Alloc',
+                file: /\/dalvik-LinearAlloc/
+              },
+              {
+                name: 'Indirect Reference Table',
+                file: /\/dalvik-indirect.ref/
+              },
+              {
+                name: 'Cache',
+                file: /\/dalvik-jit-code-cache/
+              },
+              {
+                name: 'Accounting'
+              }
+            ]
+          },
+          {
+            name: 'Cursor',
+            file: /\/CursorWindow/
+          },
+          {
+            name: 'Ashmem'
+          }
+        ]
+      },
+      {
+        name: 'Native heap',
+        file: /^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|(\[discounted tracing overhead\])|$)/  // @suppress longLineCheck
+      },
+      {
+        name: 'Stack',
+        file: /^\[stack/
+      },
+      {
+        name: 'Files',
+        file: /\.((((jar)|(apk)|(ttf)|(odex)|(oat)|(art))$)|(dex)|(so))/,
+        children: [
+          {
+            name: 'so',
+            file: /\.so/
+          },
+          {
+            name: 'jar',
+            file: /\.jar$/
+          },
+          {
+            name: 'apk',
+            file: /\.apk$/
+          },
+          {
+            name: 'ttf',
+            file: /\.ttf$/
+          },
+          {
+            name: 'dex',
+            file: /\.((dex)|(odex$))/
+          },
+          {
+            name: 'oat',
+            file: /\.oat$/
+          },
+          {
+            name: 'art',
+            file: /\.art$/
+          }
+        ]
+      },
+      {
+        name: 'Devices',
+        file: /(^\/dev\/)|(anon_inode:dmabuf)/,
+        children: [
+          {
+            name: 'GPU',
+            file: /\/((nv)|(mali)|(kgsl))/
+          },
+          {
+            name: 'DMA',
+            file: /anon_inode:dmabuf/
+          }
+        ]
+      }
+    ]
+  };
+  VMRegionClassificationNode.OTHER_RULE = { name: 'Other' };
+
+  VMRegionClassificationNode.fromRegions = function(regions, opt_rules) {
+    var tree = new VMRegionClassificationNode(opt_rules);
+    tree.regions_ = regions;
+    for (var i = 0; i < regions.length; i++)
+      tree.addStatsFromRegion_(regions[i]);
+    return tree;
+  };
+
+  VMRegionClassificationNode.prototype = {
+    get title() {
+      return this.rule_.name;
+    },
+
+    get children() {
+      if (this.isLeafNode)
+        return undefined;  // Leaf nodes don't have children (by definition).
+      if (this.children_ === undefined)
+        this.buildTree_();  // Lazily classify VM regions.
+      return this.children_;
+    },
+
+    get regions() {
+      if (!this.isLeafNode) {
+        // Intermediate nodes only temporarily cache VM regions for lazy tree
+        // construction.
+        return undefined;
+      }
+      return this.regions_;
+    },
+
+    get allRegionsForTesting() {
+      if (this.regions_ !== undefined) {
+        if (this.children_ !== undefined) {
+          throw new Error('Internal error: a VM region classification node ' +
+              'cannot have both regions and children');
+        }
+        // Leaf node (or caching internal node).
+        return this.regions_;
+      }
+
+      // Intermediate node.
+      var regions = [];
+      this.children_.forEach(function(childNode) {
+        regions = regions.concat(childNode.allRegionsForTesting);
+      });
+      return regions;
+    },
+
+    get isLeafNode() {
+      var children = this.rule_.children;
+      return children === undefined || children.length === 0;
+    },
+
+    addRegion: function(region) {
+      this.addRegionRecursively_(region, true /* addStatsToThisNode */);
+    },
+
+    someRegion: function(fn, opt_this) {
+      if (this.regions_ !== undefined) {
+        // Leaf node (or caching internal node).
+        return this.regions_.some(fn, opt_this);
+      }
+
+      // Intermediate node.
+      return this.children_.some(function(childNode) {
+        return childNode.someRegion(fn, opt_this);
+      });
+    },
+
+    addRegionRecursively_: function(region, addStatsToThisNode) {
+      if (addStatsToThisNode)
+        this.addStatsFromRegion_(region);
+
+      if (this.regions_ !== undefined) {
+        if (this.children_ !== undefined) {
+          throw new Error('Internal error: a VM region classification node ' +
+              'cannot have both regions and children');
+        }
+        // Leaf node or an intermediate node caching VM regions (add the
+        // region to this node and don't classify further).
+        this.regions_.push(region);
+        return;
+      }
+
+      // Non-leaf rule (classify region row further down the tree).
+      function regionRowMatchesChildNide(child) {
+        var fileRegExp = child.rule_.file;
+        if (fileRegExp === undefined)
+          return true;
+        return fileRegExp.test(region.mappedFile);
+      }
+
+      var matchedChild = tr.b.findFirstInArray(
+          this.children_, regionRowMatchesChildNide);
+      if (matchedChild === undefined) {
+        // Region belongs to the 'Other' node (created lazily).
+        if (this.children_.length !== this.rule_.children.length)
+          throw new Error('Internal error');
+        matchedChild = new VMRegionClassificationNode(
+            VMRegionClassificationNode.OTHER_RULE);
+        this.children_.push(matchedChild);
+      }
+
+      matchedChild.addRegionRecursively_(region, true);
+    },
+
+    buildTree_: function() {
+      var cachedRegions = this.regions_;
+      this.regions_ = undefined;
+
+      this.buildChildNodesRecursively_();
+      for (var i = 0; i < cachedRegions.length; i++) {
+        // Note that we don't add the VM region's stats to this node because
+        // they have already been added to it.
+        this.addRegionRecursively_(
+            cachedRegions[i], false /* addStatsToThisNode */);
+      }
+    },
+
+    buildChildNodesRecursively_: function() {
+      if (this.children_ !== undefined) {
+        throw new Error(
+            'Internal error: Classification node already has children');
+      }
+      if (this.regions_ !== undefined && this.regions_.length !== 0) {
+        throw new Error(
+            'Internal error: Classification node should have no regions');
+      }
+
+      if (this.isLeafNode)
+        return;  // Leaf node: Nothing to do.
+
+      // Intermediate node: Clear regions and build children recursively.
+      this.regions_ = undefined;
+      this.children_ = this.rule_.children.map(function(childRule) {
+        var child = new VMRegionClassificationNode(childRule);
+        child.buildChildNodesRecursively_();
+        return child;
+      });
+    },
+
+    addStatsFromRegion_: function(region) {
+      this.hasRegions = true;
+
+      // Aggregate virtual size.
+      var regionSizeInBytes = region.sizeInBytes;
+      if (regionSizeInBytes !== undefined)
+        this.sizeInBytes = (this.sizeInBytes || 0) + regionSizeInBytes;
+
+      // Aggregate byte stats.
+      var thisByteStats = this.byteStats;
+      var regionByteStats = region.byteStats;
+      for (var byteStatName in regionByteStats) {
+        var regionByteStatValue = regionByteStats[byteStatName];
+        if (regionByteStatValue === undefined)
+          continue;
+        thisByteStats[byteStatName] =
+            (thisByteStats[byteStatName] || 0) + regionByteStatValue;
+      }
+    }
+  };
+
+  return {
+    VMRegion: VMRegion,
+    VMRegionClassificationNode: VMRegionClassificationNode
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/model/vm_region_test.html b/catapult/tracing/tracing/model/vm_region_test.html
new file mode 100644
index 0000000..a57c0e9
--- /dev/null
+++ b/catapult/tracing/tracing/model/vm_region_test.html
@@ -0,0 +1,1071 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var VMRegion = tr.model.VMRegion;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
+  var checkVMRegions = tr.model.MemoryDumpTestUtils.checkVMRegions;
+
+  function checkProtectionFlagsToString(protectionFlags, expectedString) {
+    var vmRegion = VMRegion.fromDict({
+      startAddress: 256,
+      sizeInBytes: 336,
+      protectionFlags: protectionFlags,
+      mappedFile: '[stack:20310]',
+      byteStats: {
+        privateDirtyResident: 96,
+        swapped: 144,
+        proportionalResident: 158
+      }
+    });
+    assert.strictEqual(vmRegion.protectionFlagsToString, expectedString);
+  }
+
+  var TEST_RULES = {
+    name: 'Root',
+    children: [
+      {
+        name: 'Words',
+        file: /^[a-zA-Z]/,
+        children: [
+          {
+            name: 'A-D',
+            file: /^[a-dA-D]/
+          },
+          {
+            name: 'E-H',
+            file: /^[e-hE-H]/
+          }
+        ]
+      },
+      {
+        name: 'Digits',
+        file: /\d$/,
+        children: []
+      }
+    ]
+  };
+
+  // Constant representing the expectation that the children of a
+  // VMRegionClassificationNode have not been built yet.
+  var CHILDREN_NOT_BUILT_YET = {};
+
+  function checkTree(node, expectedStructure) {
+    assert.strictEqual(node.title, expectedStructure.title);
+    assert.strictEqual(node.hasRegions, expectedStructure.hasRegions);
+    assert.strictEqual(node.sizeInBytes, expectedStructure.sizeInBytes);
+    assert.deepEqual(node.byteStats, expectedStructure.byteStats || {});
+    assert.strictEqual(node.isLeafNode, expectedStructure.isLeafNode);
+
+    var actualRegions = node.regions;
+    var expectedRegions = expectedStructure.regions;
+    if (expectedRegions === undefined) {
+      assert.isUndefined(actualRegions);
+    } else {
+      assert.instanceOf(actualRegions, Array);
+      checkVMRegions(actualRegions, expectedRegions);
+    }
+
+    var expectedChildren = expectedStructure.children;
+    if (expectedChildren === CHILDREN_NOT_BUILT_YET) {
+      assert.isUndefined(node.children_);
+    } else if (expectedChildren === undefined) {
+      assert.isUndefined(node.children);
+    } else {
+      var actualChildrenMap = new Map();
+      node.children.forEach(function(childNode) {
+        actualChildrenMap.set(childNode.title, childNode);
+      });
+      var expectedChildrenMap = new Map();
+      expectedChildren.forEach(function(childNode) {
+        expectedChildrenMap.set(childNode.title, childNode);
+      });
+      assert.strictEqual(actualChildrenMap.size, expectedChildrenMap.size);
+      for (var title of expectedChildrenMap.keys()) {
+        checkTree(actualChildrenMap.get(title),
+            expectedChildrenMap.get(title));
+      }
+    }
+  }
+
+  function checkClassificationRules(mappedFile, expectedPath) {
+    var region = VMRegion.fromDict({
+      mappedFile: mappedFile,
+      sizeInBytes: 16,
+      byteStats: {
+        privateDirtyResident: 7
+      }
+    });
+    var node = VMRegionClassificationNode.fromRegions([region]);
+    expectedPath.forEach(function(title) {
+      node = tr.b.findFirstInArray(node.children, function(childNode) {
+        return childNode.title === title;
+      });
+    });
+    assert.deepEqual(node.regions, [region]);
+  }
+
+  test('vmRegion_protectionFlagsToString', function() {
+    checkProtectionFlagsToString(undefined, undefined);
+    checkProtectionFlagsToString(0, '---p');
+    checkProtectionFlagsToString(VMRegion.PROTECTION_FLAG_READ, 'r--p');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_MAYSHARE,
+        'r--s');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_EXECUTE,
+        'r-xp');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_WRITE,
+        'rw-p');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_WRITE |
+            VMRegion.PROTECTION_FLAG_EXECUTE,
+        'rwxp');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_WRITE |
+            VMRegion.PROTECTION_FLAG_MAYSHARE,
+        'rw-s');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_EXECUTE |
+            VMRegion.PROTECTION_FLAG_MAYSHARE,
+        'r-xs');
+    checkProtectionFlagsToString(
+        VMRegion.PROTECTION_FLAG_READ | VMRegion.PROTECTION_FLAG_WRITE |
+            VMRegion.PROTECTION_FLAG_EXECUTE |
+            VMRegion.PROTECTION_FLAG_MAYSHARE,
+        'rwxs');
+  });
+
+  // The add(After|Before)Build tests below check that the classification tree
+  // has the correct structure regardless of the ordering of adding regions and
+  // the lazy construction.
+
+  test('vmRegionClassificationNode_constructor_addAfterBuild', function() {
+    var rootNode = new VMRegionClassificationNode(TEST_RULES);
+
+    // Check the root node and verify that the full tree structure has *not*
+    // been constructed yet.
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: false,
+      isLeafNode: false,
+      children: CHILDREN_NOT_BUILT_YET
+    });
+
+    // Reading the children of the root node *should* trigger building the
+    // full tree.
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: false,
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: false,
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: false,
+          isLeafNode: true,
+          regions: []
+        }
+      ]
+    });
+
+    // Add VM regions to the tree *after* it has been fully built.
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'W2',  // Root/Words/Other.
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32,
+        swapped: 64
+      }
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__42',  // Root/Digits.
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77
+      }
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 77,
+        swapped: 64
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: true,
+          sizeInBytes: 16,
+          byteStats: {
+            proportionalResident: 32,
+            swapped: 64
+          },
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'Other',
+              hasRegions: true,
+              sizeInBytes: 16,
+              byteStats: {
+                proportionalResident: 32,
+                swapped: 64
+              },
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'W2',
+                  sizeInBytes: 16,
+                  byteStats: {
+                    proportionalResident: 32,
+                    swapped: 64
+                  }
+                }
+              ]
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            }
+          ]
+        }
+      ]
+    });
+  });
+
+  test('vmRegionClassificationNode_constructor_addBeforeBuild', function() {
+    var rootNode = new VMRegionClassificationNode(TEST_RULES);
+
+    // Add regions to the tree *before* it has been fully built. This should
+    // *not* trigger building the full tree (but the total sizeInBytes and
+    // byteStats should be updated accordingly).
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__42',  // Root/Digits.
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77
+      }
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'W2',  // Root/Words/Other.
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32,
+        swapped: 64
+      }
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 77,
+        swapped: 64
+      },
+      isLeafNode: false,
+      children: CHILDREN_NOT_BUILT_YET
+    });
+
+    // Reading the children of the root node should trigger building the full
+    // tree.
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 77,
+        swapped: 64
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: true,
+          sizeInBytes: 16,
+          byteStats: {
+            proportionalResident: 32,
+            swapped: 64
+          },
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'Other',
+              hasRegions: true,
+              sizeInBytes: 16,
+              byteStats: {
+                proportionalResident: 32,
+                swapped: 64
+              },
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'W2',
+                  sizeInBytes: 16,
+                  byteStats: {
+                    proportionalResident: 32,
+                    swapped: 64
+                  }
+                }
+              ]
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            }
+          ]
+        }
+      ]
+    });
+
+    // Add more VM regions *after* the tree has been fully built.
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '%invalid%',  // Root/Other.
+      sizeInBytes: 123
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__43',  // Root/Digits.
+      byteStats: {
+        swapped: 19
+      }
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'free',  // Root/Words/E-H.
+      sizeInBytes: undefined
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 16 + 123,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 77,
+        swapped: 64 + 19,
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: true,
+          sizeInBytes: 16,
+          byteStats: {
+            proportionalResident: 32,
+            swapped: 64
+          },
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: true,
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'free'
+                }
+              ]
+            },
+            {
+              title: 'Other',
+              hasRegions: true,
+              sizeInBytes: 16,
+              byteStats: {
+                proportionalResident: 32,
+                swapped: 64
+              },
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'W2',
+                  sizeInBytes: 16,
+                  byteStats: {
+                    proportionalResident: 32,
+                    swapped: 64
+                  }
+                }
+              ]
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77,
+            swapped: 19
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            },
+            {
+              mappedFile: '__43',
+              byteStats: {
+                swapped: 19
+              }
+            }
+          ]
+        },
+        {
+          title: 'Other',
+          hasRegions: true,
+          sizeInBytes: 123,
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '%invalid%',
+              sizeInBytes: 123
+            }
+          ]
+        }
+      ]
+    });
+  });
+
+  test('vmRegionClassificationNode_fromRegions_addAfterBuild', function() {
+    // Construct the root node from a list of regions. This should *not*
+    // trigger building the full tree (but the total sizeInBytes and byteStats
+    // should be updated accordingly).
+    var rootNode = VMRegionClassificationNode.fromRegions([
+      VMRegion.fromDict({
+        mappedFile: '__42',  // Root/Digits.
+        byteStats: {
+          proportionalResident: 33,
+          privateDirtyResident: 77
+        }
+      }),
+      VMRegion.fromDict({
+        mappedFile: '__43',  // Root/Digits.
+        byteStats: {
+          swapped: 19
+        }
+      })
+    ], TEST_RULES);
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77,
+        swapped: 19
+      },
+      isLeafNode: false,
+      children: CHILDREN_NOT_BUILT_YET
+    });
+
+    // Reading the children of the root node should trigger building the full
+    // tree.
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77,
+        swapped: 19
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: false,
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77,
+            swapped: 19
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            },
+            {
+              mappedFile: '__43',
+              byteStats: {
+                swapped: 19
+              }
+            }
+          ]
+        }
+      ]
+    });
+
+    // Add more VM regions *after* the tree has been fully built.
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'W2',  // Root/Words/Other.
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32,
+        swapped: 64
+      }
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 77,
+        swapped: 19 + 64,
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: true,
+          sizeInBytes: 16,
+          byteStats: {
+            proportionalResident: 32,
+            swapped: 64
+          },
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'Other',
+              hasRegions: true,
+              sizeInBytes: 16,
+              byteStats: {
+                proportionalResident: 32,
+                swapped: 64
+              },
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'W2',
+                  sizeInBytes: 16,
+                  byteStats: {
+                    proportionalResident: 32,
+                    swapped: 64
+                  }
+                }
+              ]
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77,
+            swapped: 19
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            },
+            {
+              mappedFile: '__43',
+              byteStats: {
+                swapped: 19
+              }
+            }
+          ]
+        }
+      ]
+    });
+  });
+
+  test('vmRegionClassificationNode_fromRegions_addBeforeBuild', function() {
+    // Construct the root node from a list of regions and then add another
+    // region. This should *not* trigger building the full tree (but the total
+    // sizeInBytes and byteStats should be updated accordingly).
+    var rootNode = VMRegionClassificationNode.fromRegions([
+      VMRegion.fromDict({
+        mappedFile: '__42',  // Root/Digits.
+        byteStats: {
+          proportionalResident: 33,
+          privateDirtyResident: 77
+        }
+      }),
+      VMRegion.fromDict({
+        mappedFile: '__43',  // Root/Digits.
+        byteStats: {
+          swapped: 19
+        }
+      })
+    ], TEST_RULES);
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__42',  // Root/Digits.
+      startAddress: 2048,  // Necessary to distinguish from the first region.
+      sizeInBytes: 1000,
+      byteStats: {
+        privateDirtyResident: 500
+      }
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 1000,
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77 + 500,
+        swapped: 19
+      },
+      isLeafNode: false,
+      children: CHILDREN_NOT_BUILT_YET
+    });
+
+    // Reading the children of the root node should trigger building the full
+    // tree.
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 1000,
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77 + 500,
+        swapped: 19
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: false,
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          sizeInBytes: 1000,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 77 + 500,
+            swapped: 19
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            },
+            {
+              mappedFile: '__43',
+              byteStats: {
+                swapped: 19
+              }
+            },
+            {
+              mappedFile: '__42',
+              startAddress: 2048,
+              sizeInBytes: 1000,
+              byteStats: {
+                privateDirtyResident: 500
+              }
+            }
+          ]
+        }
+      ]
+    });
+
+    // Add more VM regions *after* the tree has been fully built.
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'W2',  // Root/Words/Other.
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32,
+        swapped: 64
+      }
+    }));
+    checkTree(rootNode, {
+      title: 'Root',
+      hasRegions: true,
+      sizeInBytes: 1000 + 16,
+      byteStats: {
+        proportionalResident: 32 + 33,
+        privateDirtyResident: 500 + 77,
+        swapped: 19 + 64,
+      },
+      isLeafNode: false,
+      children: [
+        {
+          title: 'Words',
+          hasRegions: true,
+          sizeInBytes: 16,
+          byteStats: {
+            proportionalResident: 32,
+            swapped: 64
+          },
+          isLeafNode: false,
+          children: [
+            {
+              title: 'A-D',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'E-H',
+              hasRegions: false,
+              isLeafNode: true,
+              regions: []
+            },
+            {
+              title: 'Other',
+              hasRegions: true,
+              sizeInBytes: 16,
+              byteStats: {
+                proportionalResident: 32,
+                swapped: 64
+              },
+              isLeafNode: true,
+              regions: [
+                {
+                  mappedFile: 'W2',
+                  sizeInBytes: 16,
+                  byteStats: {
+                    proportionalResident: 32,
+                    swapped: 64
+                  }
+                }
+              ]
+            }
+          ]
+        },
+        {
+          title: 'Digits',
+          hasRegions: true,
+          sizeInBytes: 1000,
+          byteStats: {
+            proportionalResident: 33,
+            privateDirtyResident: 500 + 77,
+            swapped: 19
+          },
+          isLeafNode: true,
+          regions: [
+            {
+              mappedFile: '__42',
+              byteStats: {
+                proportionalResident: 33,
+                privateDirtyResident: 77
+              }
+            },
+            {
+              mappedFile: '__43',
+              byteStats: {
+                swapped: 19
+              }
+            },
+            {
+              mappedFile: '__42',
+              startAddress: 2048,
+              sizeInBytes: 1000,
+              byteStats: {
+                privateDirtyResident: 500
+              }
+            }
+          ]
+        }
+      ]
+    });
+  });
+
+  test('vmRegionClassificationNode_someRegion', function() {
+    var rootNode = new VMRegionClassificationNode(TEST_RULES);
+
+    // There are no regions in the tree, so the method should always return
+    // false.
+    assert.isFalse(rootNode.someRegion(function(region) {
+      throw new Error('There are no regions in the tree!!!');
+    }));
+
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: 'W2',  // Root/Words/Other.
+      sizeInBytes: 16,
+      byteStats: {
+        proportionalResident: 32,
+        swapped: 64
+      }
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__42',  // Root/Digits.
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77
+      }
+    }));
+    rootNode.addRegion(VMRegion.fromDict({
+      mappedFile: '__43',  // Root/Digits.
+      byteStats: {
+        proportionalResident: 33,
+        privateDirtyResident: 77
+      }
+    }));
+
+    function checkSomeRegion() {
+      // Find the order in which the regions are traversed and checked that all
+      // regions were visited.
+      var visitedRegionMappedFiles = [];
+      assert.isFalse(rootNode.someRegion(function(region) {
+        visitedRegionMappedFiles.push(region.mappedFile);
+        return false;
+      }));
+      assert.lengthOf(visitedRegionMappedFiles, 3);
+      assert.sameMembers(visitedRegionMappedFiles, ['W2', '__42', '__43']);
+
+      // Assuming the traversal order is deterministic, we check that once the
+      // callback returns true, no further regions are visited.
+      visitedRegionMappedFiles.forEach(
+          function(mappedFileToMatch, index) {
+            var visitedRegionMappedFiles2 = [];
+            assert.isTrue(rootNode.someRegion(function(region) {
+              this.files.push(region.mappedFile);
+              return region.mappedFile === mappedFileToMatch;
+            }, { files: visitedRegionMappedFiles2 } /* opt_this */));
+            assert.deepEqual(visitedRegionMappedFiles2,
+                visitedRegionMappedFiles.slice(0, index + 1));
+          });
+    }
+
+    // Before lazy construction (single node with a flat list of regions).
+    checkSomeRegion();
+    assert.isUndefined(rootNode.children_);
+
+    // After lazy construction (tree of nodes with lists of regions).
+    assert.isDefined(rootNode.children);  // Force building the tree.
+    assert.isDefined(rootNode.children_);
+    checkSomeRegion();
+  });
+
+  test('classificationRules', function() {
+    checkClassificationRules('/dev/ashmem/dalvik-main space (deleted)',
+        ['Android', 'Java runtime', 'Spaces', 'Normal']);
+    checkClassificationRules('/dev/ashmem/dalvik-non moving space',
+        ['Android', 'Java runtime', 'Spaces', 'Non-moving']);
+    checkClassificationRules('/dev/ashmem/dalvik-zygote space (deleted)',
+        ['Android', 'Java runtime', 'Spaces', 'Zygote']);
+    checkClassificationRules('/dev/ashmem/dalvik-allocation stack (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules(
+        '/dev/ashmem/dalvik-allocspace main rosalloc space 1 live-bitmap 2',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules(
+        '/dev/ashmem/dalvik-allocspace non moving space live-bitmap 4',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-allocspace zygote / ' +
+        'non moving space live-bitmap 0 (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-card table (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-large live objects (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-live stack (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules(
+        '/dev/ashmem/dalvik-mark sweep sweep array free buffer (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-rosalloc page map (deleted)',
+        ['Android', 'Java runtime', 'Accounting']);
+    checkClassificationRules('/dev/ashmem/dalvik-indirect ref table (deleted)',
+        ['Android', 'Java runtime', 'Indirect Reference Table']);
+    checkClassificationRules('/dev/ashmem/dalvik-LinearAlloc (deleted)',
+        ['Android', 'Java runtime', 'Linear Alloc']);
+    checkClassificationRules('/dev/ashmem/dalvik-jit-code-cache (deleted)',
+        ['Android', 'Java runtime', 'Cache']);
+    checkClassificationRules('/dev/ashmem/CursorWindow (deleted)',
+        ['Android', 'Cursor']);
+    checkClassificationRules('/dev/ashmem (deleted)', ['Android', 'Ashmem']);
+    checkClassificationRules('/dev/ashmem/GFXStats-10082',
+        ['Android', 'Ashmem']);
+
+    checkClassificationRules('[stack:23164]', ['Stack']);
+    checkClassificationRules('[stack]', ['Stack']);
+
+    checkClassificationRules('[discounted tracing overhead]', ['Native heap']);
+    checkClassificationRules('', ['Native heap']);
+    checkClassificationRules('[heap]', ['Native heap']);
+    checkClassificationRules('[anon:libc_malloc]', ['Native heap']);
+    checkClassificationRules('[anon:thread signal stack]', ['Native heap']);
+    checkClassificationRules('/dev/ashmem/libc malloc (deleted)',
+        ['Native heap']);
+
+    checkClassificationRules('/usr/lib/nvidia-340/libGL.so.331.79',
+        ['Files', 'so']);
+    checkClassificationRules('/usr/lib/x86_64-linux-gnu/libibus-1.0.so.5.0.505',
+        ['Files', 'so']);
+    checkClassificationRules('/data/data/com.google.android.apps.chrome/' +
+        'app_chrome/RELRO:libchrome.so (deleted)', ['Files', 'so']);
+    checkClassificationRules(
+        '/usr/share/fonts/truetype/msttcorefonts/Times_New_Roman.ttf',
+        ['Files', 'ttf']);
+    checkClassificationRules(
+        '/data/app/com.google.android.apps.chrome-2/base.apk',
+        ['Files', 'apk']);
+    checkClassificationRules(
+        '/data/app/com.google.android.apps.chrome-2/lib/arm/libchrome.so',
+        ['Files', 'so']);
+    checkClassificationRules(
+        '/data/app/com.google.android.apps.chrome-2/oat/arm/base.odex',
+        ['Files', 'dex']);
+    checkClassificationRules(
+        '/data/dalvik-cache/arm/system@framework@boot.art', ['Files', 'art']);
+    checkClassificationRules(
+        '/data/dalvik-cache/arm/system@framework@boot.oat', ['Files', 'oat']);
+
+    checkClassificationRules('/dev/nvidia0', ['Devices', 'GPU']);
+    checkClassificationRules('/dev/kgsl-3d0', ['Devices', 'GPU']);
+    checkClassificationRules('anon_inode:dmabuf', ['Devices', 'DMA']);
+    checkClassificationRules('/dev/binder', ['Devices', 'Other']);
+
+    checkClassificationRules('/src/out/Release/chrome', ['Other']);
+    checkClassificationRules('/tmp/gluY4SVp (deleted)', ['Other']);
+    checkClassificationRules('/src/out/Release/resources.pak', ['Other']);
+    checkClassificationRules('[vdso]', ['Other']);
+    checkClassificationRules('[vsyscall]', ['Other']);
+    checkClassificationRules('[vectors]', ['Other']);
+    checkClassificationRules('[vvar]', ['Other']);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/trace2html.html b/catapult/tracing/tracing/trace2html.html
index f26ab62..05790b5 100644
--- a/catapult/tracing/tracing/trace2html.html
+++ b/catapult/tracing/tracing/trace2html.html
@@ -4,6 +4,7 @@
 Use of this source code is governed by a BSD-style license that can be
 found in the LICENSE file.
 -->
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/ui/timeline_view.html">
 <script>
 'use strict';
@@ -49,7 +50,7 @@
     // Trim leading newlines off the text. They happen during writing.
     while (text[0] == '\n')
       text = text.substring(1);
-    traces.push(atob(text));
+    traces.push(tr.b.Base64.atob(text));
   }
 
   var m = new tr.Model();
diff --git a/catapult/tracing/tracing/ui/analysis/analysis_view.html b/catapult/tracing/tracing/ui/analysis/analysis_view.html
index 04183b5..b5961ac 100644
--- a/catapult/tracing/tracing/ui/analysis/analysis_view.html
+++ b/catapult/tracing/tracing/ui/analysis/analysis_view.html
@@ -7,56 +7,47 @@
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/analysis/tab_view.html">
-<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
-
-<!-- Sub Views. -->
-<link rel="import" href="/tracing/ui/analysis/single_thread_slice_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_thread_slice_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_async_slice_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_async_slice_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_cpu_slice_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_cpu_slice_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_thread_time_slice_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_thread_time_slice_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_instant_event_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_instant_event_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/counter_sample_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_flow_event_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_flow_event_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_object_instance_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/single_object_snapshot_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_object_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_sample_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/multi_sample_sub_view.html">
-
-<link rel="import"
-    href="/tracing/ui/analysis/single_interaction_record_sub_view.html">
-<link rel="import"
-    href="/tracing/ui/analysis/multi_interaction_record_sub_view.html">
-
 <link rel="import" href="/tracing/ui/analysis/alert_sub_view.html">
-
-<link rel="import"
-    href="/tracing/ui/analysis/single_frame_sub_view.html">
-<link rel="import"
-    href="/tracing/ui/analysis/multi_frame_sub_view.html">
-
+<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
 <link rel="import"
     href="/tracing/ui/analysis/container_memory_dump_sub_view.html">
-
-<link rel="import" href="/tracing/ui/analysis/single_power_sample_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/counter_sample_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/layout_tree_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_async_slice_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_cpu_slice_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_flow_event_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_frame_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/multi_instant_event_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_object_sub_view.html">
 <link rel="import" href="/tracing/ui/analysis/multi_power_sample_sub_view.html">
-
+<link rel="import" href="/tracing/ui/analysis/multi_sample_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/multi_thread_slice_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/multi_thread_time_slice_sub_view.html">
+<link rel="import"
+    href="/tracing/ui/analysis/multi_user_expectation_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_async_slice_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_cpu_slice_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_flow_event_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_frame_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_instant_event_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_object_instance_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_object_snapshot_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_power_sample_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_sample_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_thread_slice_sub_view.html">
+<link rel="import"
+      href="/tracing/ui/analysis/single_thread_time_slice_sub_view.html">
+<link rel="import"
+    href="/tracing/ui/analysis/single_user_expectation_sub_view.html">
 <link rel="import" href="/tracing/ui/base/polymer_utils.html">
+<link rel="import" href="/tracing/ui/base/tab_view.html">
 
 <!--
 @fileoverview A component used to display an analysis of a selection,
diff --git a/catapult/tracing/tracing/ui/analysis/analysis_view_test.html b/catapult/tracing/tracing/ui/analysis/analysis_view_test.html
index dfc8ec0..37776f8 100644
--- a/catapult/tracing/tracing/ui/analysis/analysis_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/analysis_view_test.html
@@ -10,9 +10,10 @@
 <link rel="import" href="/tracing/model/counter_sample.html">
 <link rel="import" href="/tracing/model/counter_series.html">
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/model/interaction_record.html">
+<link rel="import" href="/tracing/model/user_model/stub_expectation.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_view.html">
 <link rel="import" href="/tracing/ui/brushing_state_controller.html">
+<link rel="import" href="/tracing/ui/extras/full_config.html">
 
 <script>
 'use strict';
@@ -26,7 +27,7 @@
   var CounterSample = tr.model.CounterSample;
   var newThreadSlice = tr.c.TestUtils.newThreadSlice;
   var SCHEDULING_STATE = tr.model.SCHEDULING_STATE;
-  var InteractionRecord = tr.model.InteractionRecord;
+  var StubExpectation = tr.model.um.StubExpectation;
 
   function assertEventSet(actualEventSet, expectedEvents) {
     var expectedEventSet = new EventSet(expectedEvents);
@@ -55,10 +56,12 @@
     var slice2 = newThreadSlice(thread, SCHEDULING_STATE.SLEEPING, 1, 2.718);
     thread.timeSlices = [slice1, slice2];
 
-    var record1 = new InteractionRecord(model, 'r1', 0, 200, 300);
+    var record1 = new StubExpectation(
+        {parentModel: model, initiatorTitle: 'r1', start: 200, duration: 300});
     record1.associatedEvents.push(sample1);
     record1.associatedEvents.push(slice1);
-    var record2 = new InteractionRecord(model, 'r2', 0, 600, 100);
+    var record2 = new StubExpectation(
+        {parentModel: model, initiatorTitle: 'r2', start: 600, duration: 100});
     record2.associatedEvents.push(sample2);
     record2.associatedEvents.push(sample3);
     record2.associatedEvents.push(slice1);
@@ -99,8 +102,8 @@
     tabView.selectedTab = singleThreadSliceTab2;
     checkSelectedTab(singleThreadSliceTab2, []);
 
-    // 4. Event selection: one sample, two thread slices, and one interaction
-    // record.
+    // 4. Event selection: one sample, two thread slices, and one
+    // user expectation.
     controller.changeSelectionFromRequestSelectionChangeEvent(
         new EventSet([slice1, slice2, sample3, record1]));
     assert.lengthOf(tabView.tabs, 3);
@@ -112,7 +115,7 @@
     assert.strictEqual(sampleTab4, sampleTab2);
     var singleRecordTab4 = tabView.tabs[2];
     checkTab(singleRecordTab4,
-        'tr-ui-a-single-interaction-record-sub-view',
+        'tr-ui-a-single-user-expectation-sub-view',
         [record1]);
     var multiThreadSliceTab4 = tabView.tabs[0];
     checkTab(multiThreadSliceTab4,
@@ -121,17 +124,17 @@
     // Remember selected tab (even though the tab was destroyed).
     checkSelectedTab(multiThreadSliceTab4, []);
 
-    // 5. Tab selection: single interaction record tab.
+    // 5. Tab selection: single user expectation tab.
     tabView.selectedTab = singleRecordTab4;
     checkSelectedTab(singleRecordTab4, [sample1, slice1]);
 
-    // 6. Event selection: one interaction record.
+    // 6. Event selection: one user expectation.
     controller.changeSelectionFromRequestSelectionChangeEvent(
         new EventSet([record2]));
     assert.lengthOf(tabView.tabs, 1);
     var singleRecordTab6 = tabView.tabs[0];
     checkTab(singleRecordTab6,
-        'tr-ui-a-single-interaction-record-sub-view',
+        'tr-ui-a-single-user-expectation-sub-view',
         [record2]);
     // Reuse tab (same event type and sub-view tag name).
     assert.strictEqual(singleRecordTab6, singleRecordTab4);
diff --git a/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view.html b/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view.html
index 4933876..d6a437e 100644
--- a/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view.html
@@ -12,7 +12,8 @@
 <link rel="import" href="/tracing/ui/analysis/memory_dump_header_pane.html">
 <link rel="import" href="/tracing/ui/analysis/stacked_pane_view.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-container-memory-dump-sub-view"
     extends="tr-ui-a-sub-view">
@@ -82,7 +83,8 @@
     },
 
     buildViewForSingleContainerName_: function() {
-      var containerMemoryDumps = this.currentSelection_;
+      var containerMemoryDumps =
+          tr.b.dictionaryValues(this.dumpsByContainerName_)[0];
       var dumpView = this.ownerDocument.createElement(
           'tr-ui-a-stacked-pane-view');
       this.$.content.appendChild(dumpView);
@@ -124,8 +126,10 @@
           singleDumpValue_: function(row) {
             var linkEl = ownerDocument.createElement('tr-ui-a-analysis-link');
             linkEl.setSelectionAndContent(new tr.model.EventSet([row]));
-            linkEl.appendChild(tr.ui.units.createTimeStampSpan(
-                row.start, {ownerDocument: ownerDocument}));
+            linkEl.appendChild(tr.v.ui.createScalarSpan(row.start, {
+              unit: tr.v.Unit.byName.timeStampInMs,
+              ownerDocument: ownerDocument
+            }));
             return linkEl;
           },
 
diff --git a/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view_test.html b/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view_test.html
index 2c844f0..bc959b5 100644
--- a/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/container_memory_dump_sub_view_test.html
@@ -28,10 +28,16 @@
         'tr-ui-a-container-memory-dump-sub-view');
     if (opt_parentElement)
       opt_parentElement.appendChild(viewEl);
-    if (selection === undefined)
+    if (selection === undefined) {
       viewEl.selection = undefined;
-    else
-      viewEl.selection = new tr.model.EventSet(selection);
+    } else {
+      // Rotate the list of selected dumps to check that the sub-view sorts
+      // them properly.
+      var length = selection.length;
+      viewEl.selection = new tr.model.EventSet(
+          selection.slice(length / 2, length).concat(
+              selection.slice(0, length / 2)));
+    }
     return viewEl;
   }
 
@@ -115,13 +121,19 @@
     // All these views should be completely empty.
     var unsetViewEl = document.createElement(
         'tr-ui-a-container-memory-dump-sub-view');
-    assert.throws(this.addHTMLOutput.bind(this, unsetViewEl), '0 width');
+    this.addHTMLOutput(unsetViewEl);
+    assert.equal(unsetViewEl.getBoundingClientRect().width, 0);
+    assert.equal(unsetViewEl.getBoundingClientRect().height, 0);
 
     var undefinedViewEl = createViewWithSelection(undefined);
-    assert.throws(this.addHTMLOutput.bind(this, undefinedViewEl), '0 width');
+    this.addHTMLOutput(undefinedViewEl);
+    assert.equal(undefinedViewEl.getBoundingClientRect().width, 0);
+    assert.equal(undefinedViewEl.getBoundingClientRect().height, 0);
 
     var emptyViewEl = createViewWithSelection([]);
-    assert.throws(this.addHTMLOutput.bind(this, emptyViewEl), '0 width');
+    this.addHTMLOutput(emptyViewEl);
+    assert.equal(emptyViewEl.getBoundingClientRect().width, 0);
+    assert.equal(emptyViewEl.getBoundingClientRect().height, 0);
   });
 
   test('instantiate_singleGlobalMemoryDump', function() {
@@ -174,7 +186,7 @@
           // Peak total resident of Process 4.
           overviewTableEl.selectedTableRow = overviewTableEl.tableRows[3];
           overviewTableEl.selectedColumnIndex = 2;
-          checkVmRegionsPane(4 /* PID */);
+          checkVmRegionsPane(undefined);
           checkAllocatorPane(undefined);
 
           // V8 of Process 3.
diff --git a/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart.html b/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart.html
index 5bbff0d..6d06e38 100644
--- a/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart.html
+++ b/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart.html
@@ -28,7 +28,7 @@
 
 var EventSet = tr.model.EventSet;
 
-var CHART_TITLE = 'Power (in mW) by ms since vertical sync';
+var CHART_TITLE = 'Power (W) by ms since vertical sync';
 // TODO(charliea): Find out how to make this specifiable via CSS.
 var CHART_WIDTH_FRACTION_OF_BODY = 0.5;
 
@@ -123,7 +123,8 @@
         return;
 
       var point = { x: sample.start - lastVSyncTimestamp };
-      point['f' + frameNumber] = sample.power;
+      // Divide by 1000 to convert to Watts
+      point['f' + frameNumber] = sample.power / 1000;
       points.push(point);
     });
 
diff --git a/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart_test.html b/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart_test.html
index 56ed33a..bc51290 100644
--- a/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart_test.html
+++ b/catapult/tracing/tracing/ui/analysis/frame_power_usage_chart_test.html
@@ -62,10 +62,10 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 1000 },
-      { x: 1, f1: 2000 },
-      { x: 2, f1: 3000 },
-      { x: 3, f1: 2000 }
+      { x: 0, f1: 1 },
+      { x: 1, f1: 2 },
+      { x: 2, f1: 3 },
+      { x: 3, f1: 2 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
@@ -89,14 +89,14 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 1000 },
-      { x: 1, f1: 2000 },
-      { x: 2, f1: 3000 },
-      { x: 3, f1: 2000 },
-      { x: 0, f2: 2000 },
-      { x: 1, f2: 3000 },
-      { x: 2, f2: 4000 },
-      { x: 3, f2: 3000 }
+      { x: 0, f1: 1 },
+      { x: 1, f1: 2 },
+      { x: 2, f1: 3 },
+      { x: 3, f1: 2 },
+      { x: 0, f2: 2 },
+      { x: 1, f2: 3 },
+      { x: 2, f2: 4 },
+      { x: 3, f2: 3 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
@@ -122,14 +122,14 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 1000 },
-      { x: 1, f1: 2000 },
-      { x: 2, f1: 3000 },
-      { x: 3, f1: 2000 },
-      { x: 0.5, f2: 2000 },
-      { x: 1.5, f2: 3000 },
-      { x: 2.5, f2: 4000 },
-      { x: 3.5, f2: 3000 }
+      { x: 0, f1: 1 },
+      { x: 1, f1: 2 },
+      { x: 2, f1: 3 },
+      { x: 3, f1: 2 },
+      { x: 0.5, f2: 2 },
+      { x: 1.5, f2: 3 },
+      { x: 2.5, f2: 4 },
+      { x: 3.5, f2: 3 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
@@ -153,10 +153,10 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 2000 },
-      { x: 1, f1: 3000 },
-      { x: 2, f1: 4000 },
-      { x: 3, f1: 3000 }
+      { x: 0, f1: 2 },
+      { x: 1, f1: 3 },
+      { x: 2, f1: 4 },
+      { x: 3, f1: 3 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
@@ -174,10 +174,10 @@
     chart.setData(new tr.model.EventSet(series.samples), vSyncTimestamps);
 
     var expectedChartData = [
-      { x: 0, f1: 2000 },
-      { x: 1, f1: 3000 },
-      { x: 2, f1: 4000 },
-      { x: 3, f1: 3000 }
+      { x: 0, f1: 2 },
+      { x: 1, f1: 3 },
+      { x: 2, f1: 4 },
+      { x: 3, f1: 3 }
     ];
     assert.isUndefined(chart.chart);
   });
@@ -201,14 +201,14 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 1000 },
-      { x: 1, f1: 2000 },
-      { x: 2, f1: 3000 },
-      { x: 3, f1: 2000 },
-      { x: 0, f2: 2000 },
-      { x: 1, f2: 3000 },
-      { x: 2, f2: 4000 },
-      { x: 3, f2: 3000 }
+      { x: 0, f1: 1 },
+      { x: 1, f1: 2 },
+      { x: 2, f1: 3 },
+      { x: 3, f1: 2 },
+      { x: 0, f2: 2 },
+      { x: 1, f2: 3 },
+      { x: 2, f2: 4 },
+      { x: 3, f2: 3 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
@@ -252,14 +252,14 @@
     this.addHTMLOutput(chart);
 
     var expectedChartData = [
-      { x: 0, f1: 1000 },
-      { x: 1, f1: 2000 },
-      { x: 2, f1: 3000 },
-      { x: 3, f1: 2000 },
-      { x: 0, f2: 2000 },
-      { x: 1, f2: 3000 },
-      { x: 2, f2: 4000 },
-      { x: 3, f2: 3000 }
+      { x: 0, f1: 1 },
+      { x: 1, f1: 2 },
+      { x: 2, f1: 3 },
+      { x: 3, f1: 2 },
+      { x: 0, f2: 2 },
+      { x: 1, f2: 3 },
+      { x: 2, f2: 4 },
+      { x: 3, f2: 3 }
     ];
     assert.sameDeepMembers(chart.chart.data, expectedChartData);
   });
diff --git a/catapult/tracing/tracing/ui/analysis/generic_object_view.html b/catapult/tracing/tracing/ui/analysis/generic_object_view.html
index a1bf70c..0373f37 100644
--- a/catapult/tracing/tracing/ui/analysis/generic_object_view.html
+++ b/catapult/tracing/tracing/ui/analysis/generic_object_view.html
@@ -6,8 +6,6 @@
 -->
 
 <link rel="import" href="/tracing/base/rect.html">
-<link rel="import" href="/tracing/base/units/time_duration.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/base/utils.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/model/object_instance.html">
@@ -15,9 +13,8 @@
 <link rel="import" href="/tracing/ui/analysis/analysis_link.html">
 <link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/base/ui.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
 
 <polymer-element name="tr-ui-a-generic-object-view"
     is="HTMLUnknownElement">
@@ -167,8 +164,8 @@
         return;
       }
 
-      if (object instanceof tr.b.u.Scalar) {
-        var el = this.ownerDocument.createElement('tr-ui-u-scalar-span');
+      if (object instanceof tr.v.ScalarNumeric) {
+        var el = this.ownerDocument.createElement('tr-v-ui-scalar-span');
         el.value = object;
         this.appendElementWithLabel_(label, indent, el, suffix);
         return;
diff --git a/catapult/tracing/tracing/ui/analysis/generic_object_view_test.html b/catapult/tracing/tracing/ui/analysis/generic_object_view_test.html
index ed2705d..fe0be28 100644
--- a/catapult/tracing/tracing/ui/analysis/generic_object_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/generic_object_view_test.html
@@ -5,9 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/ui/base/deep_utils.html">
-<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
 <link rel="import" href="/tracing/model/object_instance.html">
+<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
+<link rel="import" href="/tracing/ui/base/deep_utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -170,26 +172,28 @@
 
   test('timeDurationValue', function() {
     var view = document.createElement('tr-ui-a-generic-object-view');
-    view.object = new tr.b.u.TimeDuration(3);
+    view.object =
+        new tr.v.ScalarNumeric(tr.v.Unit.byName.timeDurationInMs, 3);
     assert.isDefined(tr.b.findDeepElementMatching(
-      view.$.content, 'tr-ui-u-scalar-span'));
+      view.$.content, 'tr-v-ui-scalar-span'));
   });
 
   test('timeStampValue', function() {
     var view = document.createElement('tr-ui-a-generic-object-view');
-    view.object = new tr.b.u.TimeStamp(3);
+    view.object = new tr.v.ScalarNumeric(tr.v.Unit.byName.timeStampInMs, 3);
     assert.isDefined(tr.b.findDeepElementMatching(
-      view.$.content, 'tr-ui-u-scalar-span'));
+      view.$.content, 'tr-v-ui-scalar-span'));
   });
 
   test('scalarValue', function() {
     var view = document.createElement('tr-ui-a-generic-object-view');
-    view.object = new tr.b.u.Scalar(.3, tr.b.u.Units.normalizedPercentage);
+    view.object =
+        new tr.v.ScalarNumeric(tr.v.Unit.byName.normalizedPercentage, .3);
     var m = tr.b.findDeepElementMatching(
-        view.$.content, 'tr-ui-u-scalar-span');
+        view.$.content, 'tr-v-ui-scalar-span');
     assert.isDefined(m);
     assert.equal(m.value, .3);
-    assert.equal(m.unit, tr.b.u.Units.normalizedPercentage);
+    assert.equal(m.unit, tr.v.Unit.byName.normalizedPercentage);
   });
 
 });
diff --git a/catapult/tracing/tracing/ui/analysis/layout_tree_sub_view.html b/catapult/tracing/tracing/ui/analysis/layout_tree_sub_view.html
new file mode 100644
index 0000000..75a42c0
--- /dev/null
+++ b/catapult/tracing/tracing/ui/analysis/layout_tree_sub_view.html
@@ -0,0 +1,203 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
+
+<polymer-element name="tr-ui-a-layout-tree-sub-view"
+    extends="tr-ui-a-sub-view">
+  <template>
+    <div id="content"></div>
+  </template>
+</polymer-element>
+<script>
+'use strict';
+
+tr.exportTo('tr.ui.analysis', function() {
+  Polymer('tr-ui-a-layout-tree-sub-view', {
+    set selection(selection) {
+      this.currentSelection_ = selection;
+      this.updateContents_();
+    },
+
+    get selection() {
+      return this.currentSelection_;
+    },
+
+    updateContents_: function() {
+      this.$.content.textContent = '';
+      if (!this.currentSelection_)
+        return;
+
+      var columns = [
+        {
+          title: 'Tag/Name',
+          value: function(layoutObject) {
+            return layoutObject.tag || ':' + layoutObject.name;
+          }
+        },
+
+        {
+          title: 'htmlId',
+          value: function(layoutObject) {
+            return layoutObject.htmlId || '';
+          }
+        },
+
+        {
+          title: 'classNames',
+          value: function(layoutObject) {
+            return layoutObject.classNames || '';
+          }
+        },
+
+        {
+          title: 'reasons',
+          value: function(layoutObject) {
+            return layoutObject.needsLayoutReasons.join(', ');
+          }
+        },
+
+        {
+          title: 'width',
+          value: function(layoutObject) {
+            return layoutObject.absoluteRect.width;
+          }
+        },
+
+        {
+          title: 'height',
+          value: function(layoutObject) {
+            return layoutObject.absoluteRect.height;
+          }
+        },
+
+        {
+          title: 'absX',
+          value: function(layoutObject) {
+            return layoutObject.absoluteRect.left;
+          }
+        },
+
+        {
+          title: 'absY',
+          value: function(layoutObject) {
+            return layoutObject.absoluteRect.top;
+          }
+        },
+
+        {
+          title: 'relX',
+          value: function(layoutObject) {
+            return layoutObject.relativeRect.left;
+          }
+        },
+
+        {
+          title: 'relY',
+          value: function(layoutObject) {
+            return layoutObject.relativeRect.top;
+          }
+        },
+
+        {
+          title: 'float',
+          value: function(layoutObject) {
+            return layoutObject.isFloat ? 'float' : '';
+          }
+        },
+
+        {
+          title: 'positioned',
+          value: function(layoutObject) {
+            return layoutObject.isPositioned ? 'positioned' : '';
+          }
+        },
+
+        {
+          title: 'relative',
+          value: function(layoutObject) {
+            return layoutObject.isRelativePositioned ? 'relative' : '';
+          }
+        },
+
+        {
+          title: 'sticky',
+          value: function(layoutObject) {
+            return layoutObject.isStickyPositioned ? 'sticky' : '';
+          }
+        },
+
+        {
+          title: 'anonymous',
+          value: function(layoutObject) {
+            return layoutObject.isAnonymous ? 'anonymous' : '';
+          }
+        },
+
+        {
+          title: 'row',
+          value: function(layoutObject) {
+            if (layoutObject.tableRow === undefined)
+              return '';
+            return layoutObject.tableRow;
+          }
+        },
+
+        {
+          title: 'col',
+          value: function(layoutObject) {
+            if (layoutObject.tableCol === undefined)
+              return '';
+            return layoutObject.tableCol;
+          }
+        },
+
+        {
+          title: 'rowSpan',
+          value: function(layoutObject) {
+            if (layoutObject.tableRowSpan === undefined)
+              return '';
+            return layoutObject.tableRowSpan;
+          }
+        },
+
+        {
+          title: 'colSpan',
+          value: function(layoutObject) {
+            if (layoutObject.tableColSpan === undefined)
+              return '';
+            return layoutObject.tableColSpan;
+          }
+        },
+
+        {
+          title: 'address',
+          value: function(layoutObject) {
+            return layoutObject.id.toString(16);
+          }
+        }
+      ];
+
+      var table = this.ownerDocument.createElement('tr-ui-b-table');
+      table.defaultExpansionStateCallback = function(
+          layoutObject, parentLayoutObject) {
+        return true;
+      };
+      table.subRowsPropertyName = 'childLayoutObjects';
+      table.tableColumns = columns;
+      table.tableRows = this.currentSelection_.map(function(snapshot) {
+        return snapshot.rootLayoutObject;
+      });
+      table.rebuild();
+      this.$.content.appendChild(table);
+    }
+  });
+
+  return {};
+});
+</script>
+
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane.html b/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane.html
index 8275697..2669ff3 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane.html
@@ -6,6 +6,7 @@
 -->
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/range.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_heap_details_pane.html">
@@ -13,6 +14,7 @@
 <link rel="import" href="/tracing/ui/analysis/stacked_pane.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-memory-dump-allocator-details-pane"
     extends="tr-ui-a-stacked-pane">
@@ -66,31 +68,25 @@
 
 tr.exportTo('tr.ui.analysis', function() {
 
-  var IMPORTANCE_RULES = [
-    {
-      condition: tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME,
-      importance: 10
-    },
-    {
-      condition: tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME,
-      importance: 9
-    },
-    {
-      condition: 'page_size',
-      importance: 0
-    },
-    {
-      condition: /size/,
-      importance: 5
-    },
-    {
-      importance: 0
-    }
-  ];
+  // Constant representing the context in suballocation rows.
+  var SUBALLOCATION_CONTEXT = true;
 
-  /** @{constructor} */
-  function AllocatorDumpNameColumn(title) {
-    tr.ui.analysis.TitleColumn.call(this, title);
+  // Size numeric info types.
+  var MemoryAllocatorDumpInfoType = tr.model.MemoryAllocatorDumpInfoType;
+  var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;
+  var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;
+
+  // Unicode symbols used for memory cell info icons and messages.
+  var LEFTWARDS_OPEN_HEADED_ARROW = String.fromCharCode(0x21FD);
+  var RIGHTWARDS_OPEN_HEADED_ARROW = String.fromCharCode(0x21FE);
+  var EN_DASH = String.fromCharCode(0x2013);
+  var CIRCLED_LATIN_SMALL_LETTER_I = String.fromCharCode(0x24D8);
+
+  /** @constructor */
+  function AllocatorDumpNameColumn() {
+    tr.ui.analysis.TitleColumn.call(this, 'Component');
   }
 
   AllocatorDumpNameColumn.prototype = {
@@ -102,11 +98,400 @@
       return tr.ui.b.createSpan({
         textContent: row.title,
         italic: true,
-        tooltip: row.fullName
+        tooltip: row.fullNames === undefined ?
+            undefined : row.fullNames.join(', ')
       });
     }
   };
 
+  /**
+   * Retrieve the entry associated with a given name from a map and increment
+   * its count.
+   *
+   * If there is no entry associated with the name, a new entry is created, the
+   * creation callback is called, the entry's count is incremented (from 0 to
+   * 1) and the newly created entry is returned.
+   */
+  function getAndUpdateEntry(map, name, createdCallback) {
+    var entry = map.get(name);
+    if (entry === undefined) {
+      entry = {count: 0};
+      createdCallback(entry);
+      map.set(name, entry);
+    }
+    entry.count++;
+    return entry;
+  }
+
+  /**
+   * Helper class for building size and effective size column info messages.
+   *
+   * @constructor
+   */
+  function SizeInfoMessageBuilder() {
+    this.parts_ = [];
+    this.indent_ = 0;
+  }
+
+  SizeInfoMessageBuilder.prototype = {
+    append: function(/* arguments */) {
+      this.parts_.push.apply(
+          this.parts_, Array.prototype.slice.apply(arguments));
+    },
+
+    /**
+     * Append the entries of a map to the message according to the following
+     * rules:
+     *
+     *   1. If the map is empty, append emptyText to the message (if provided).
+     *      Examples:
+     *
+     *                       emptyText=undefined
+     *        Hello, World! ====================> Hello, World!
+     *
+     *                        emptyText='empty'
+     *        The bottle is ====================> The bottle is empty
+     *
+     *   2. If the map contains a single entry, append a space and call
+     *      itemCallback on the entry (which is in turn expected to append a
+     *      message for the entry). Example:
+     *
+     *        Please do not ====================> Please do not [item-message]
+     *
+     *   3. If the map contains multiple entries, append them as a list
+     *      with itemCallback called on each entry. If hasPluralSuffix is true,
+     *      's' will be appended to the message before the list. Examples:
+     *
+     *                      hasPluralSuffix=false
+     *        I need to buy ====================> I need to buy:
+     *                                             - [item1-message]
+     *                                             - [item2-message]
+     *                                             [...]
+     *                                             - [itemN-message]
+     *
+     *                      hasPluralSuffix=true
+     *        Suspected CL  ====================> Suspected CLs:
+     *                                             - [item1-message]
+     *                                             - [item2-message]
+     *                                             [...]
+     *                                             - [itemN-message]
+     */
+    appendMap: function(
+        map, hasPluralSuffix, emptyText, itemCallback, opt_this) {
+      opt_this = opt_this || this;
+      if (map.size === 0) {
+        if (emptyText)
+          this.append(emptyText);
+      } else if (map.size === 1) {
+        this.parts_.push(' ');
+        var key = map.keys().next().value;
+        itemCallback.call(opt_this, key, map.get(key));
+      } else {
+        if (hasPluralSuffix)
+          this.parts_.push('s');
+        this.parts_.push(':');
+        this.indent_++;
+        for (var key of map.keys()) {
+          this.parts_.push('\n', ' '.repeat(3 * (this.indent_ - 1)), ' - ');
+          itemCallback.call(opt_this, key, map.get(key));
+        }
+        this.indent_--;
+      }
+    },
+
+    appendImportanceRange: function(range) {
+      this.append(' (importance: ');
+      if (range.min === range.max)
+        this.append(range.min);
+      else
+        this.append(range.min, EN_DASH, range.max);
+      this.append(')');
+    },
+
+    appendSizeIfDefined: function(size) {
+      if (size !== undefined)
+        this.append(' (', tr.v.Unit.byName.sizeInBytes.format(size), ')');
+    },
+
+    appendSomeTimestampsQuantifier: function() {
+      this.append(
+          ' ', tr.ui.analysis.MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER);
+    },
+
+    build: function() {
+      return this.parts_.join('');
+    }
+  };
+
+  /** @constructor */
+  function EffectiveSizeColumn(name, cellPath, aggregationMode) {
+    tr.ui.analysis.NumericMemoryColumn.call(
+        this, name, cellPath, aggregationMode);
+  }
+
+  EffectiveSizeColumn.prototype = {
+    __proto__: tr.ui.analysis.NumericMemoryColumn.prototype,
+
+    addInfos: function(numerics, memoryAllocatorDumps, infos) {
+      if (memoryAllocatorDumps === undefined)
+        return;
+
+      // Quantified name of an owner dump (of the given dump) -> {count,
+      // importanceRange}.
+      var ownerNameToEntry = new Map();
+
+      // Quantified name of an owned dump (by the given dump) -> {count,
+      // importanceRange, sharerNameToEntry}, where sharerNameToEntry is a map
+      // from quantified names of other owners of the owned dump to {count,
+      // importanceRange}.
+      var ownedNameToEntry = new Map();
+
+      for (var i = 0; i < numerics.length; i++) {
+        if (numerics[i] === undefined)
+          continue;
+        var dump = memoryAllocatorDumps[i];
+        if (dump === SUBALLOCATION_CONTEXT)
+          return;  // No ownership of suballocation internal rows.
+
+        // Gather owners of this dump.
+        dump.ownedBy.forEach(function(ownerLink) {
+          var ownerDump = ownerLink.source;
+          this.getAndUpdateOwnershipEntry_(
+              ownerNameToEntry, ownerDump, ownerLink);
+        }, this);
+
+        // Gather dumps owned by this dump and other owner dumps sharing them
+        // (with this dump).
+        var ownedLink = dump.owns;
+        if (ownedLink !== undefined) {
+          var ownedDump = ownedLink.target;
+          var ownedEntry = this.getAndUpdateOwnershipEntry_(ownedNameToEntry,
+              ownedDump, ownedLink, true /* opt_withSharerNameToEntry */);
+          var sharerNameToEntry = ownedEntry.sharerNameToEntry;
+          ownedDump.ownedBy.forEach(function(sharerLink) {
+            var sharerDump = sharerLink.source;
+            if (sharerDump === dump)
+              return;
+            this.getAndUpdateOwnershipEntry_(
+                sharerNameToEntry, sharerDump, sharerLink);
+          }, this);
+        }
+      }
+
+      // Emit a single info listing all owners of this dump.
+      if (ownerNameToEntry.size > 0) {
+        var messageBuilder = new SizeInfoMessageBuilder();
+        messageBuilder.append('shared by');
+        messageBuilder.appendMap(
+            ownerNameToEntry,
+            false /* hasPluralSuffix */,
+            undefined /* emptyText */,
+            function(ownerName, ownerEntry) {
+              messageBuilder.append(ownerName);
+              if (ownerEntry.count < numerics.length)
+                messageBuilder.appendSomeTimestampsQuantifier();
+              messageBuilder.appendImportanceRange(ownerEntry.importanceRange);
+            }, this);
+        infos.push({
+          message: messageBuilder.build(),
+          icon: LEFTWARDS_OPEN_HEADED_ARROW,
+          color: 'green'
+        });
+      }
+
+      // Emit a single info listing all dumps owned by this dump together
+      // with list(s) of other owner dumps sharing them with this dump.
+      if (ownedNameToEntry.size > 0) {
+        var messageBuilder = new SizeInfoMessageBuilder();
+        messageBuilder.append('shares');
+        messageBuilder.appendMap(
+            ownedNameToEntry,
+            false /* hasPluralSuffix */,
+            undefined /* emptyText */,
+            function(ownedName, ownedEntry) {
+              messageBuilder.append(ownedName);
+              var ownedCount = ownedEntry.count;
+              if (ownedCount < numerics.length)
+                messageBuilder.appendSomeTimestampsQuantifier();
+              messageBuilder.appendImportanceRange(ownedEntry.importanceRange);
+            messageBuilder.append(' with');
+            messageBuilder.appendMap(
+                ownedEntry.sharerNameToEntry,
+                false /* hasPluralSuffix */,
+                ' no other dumps',
+                function(sharerName, sharerEntry) {
+                  messageBuilder.append(sharerName);
+                  if (sharerEntry.count < ownedCount)
+                    messageBuilder.appendSomeTimestampsQuantifier();
+                  messageBuilder.appendImportanceRange(
+                      sharerEntry.importanceRange);
+                }, this);
+            }, this);
+        infos.push({
+          message: messageBuilder.build(),
+          icon: RIGHTWARDS_OPEN_HEADED_ARROW,
+          color: 'green'
+        });
+      }
+    },
+
+    getAndUpdateOwnershipEntry_: function(
+        map, dump, link, opt_withSharerNameToEntry) {
+      var entry = getAndUpdateEntry(map, dump.quantifiedName,
+          function(newEntry) {
+            newEntry.importanceRange = new tr.b.Range();
+            if (opt_withSharerNameToEntry)
+              newEntry.sharerNameToEntry = new Map();
+          });
+      entry.importanceRange.addValue(link.importance || 0);
+      return entry;
+    }
+  };
+
+  /** @constructor */
+  function SizeColumn(name, cellPath, aggregationMode) {
+    tr.ui.analysis.NumericMemoryColumn.call(
+        this, name, cellPath, aggregationMode);
+  }
+
+  SizeColumn.prototype = {
+    __proto__: tr.ui.analysis.NumericMemoryColumn.prototype,
+
+    addInfos: function(numerics, memoryAllocatorDumps, infos) {
+      if (memoryAllocatorDumps === undefined)
+        return;
+      this.addOverlapInfo_(numerics, memoryAllocatorDumps, infos);
+      this.addProvidedSizeWarningInfos_(numerics, memoryAllocatorDumps, infos);
+    },
+
+    addOverlapInfo_: function(numerics, memoryAllocatorDumps, infos) {
+      // Sibling allocator dump name -> {count, size}. The latter field (size)
+      // is omitted in multi-selection mode.
+      var siblingNameToEntry = new Map();
+      for (var i = 0; i < numerics.length; i++) {
+        if (numerics[i] === undefined)
+          continue;
+        var dump = memoryAllocatorDumps[i];
+        if (dump === SUBALLOCATION_CONTEXT)
+          return;  // No ownership of suballocation internal rows.
+        var ownedBySiblingSizes = dump.ownedBySiblingSizes;
+        for (var siblingDump of ownedBySiblingSizes.keys()) {
+          var siblingName = siblingDump.name;
+          getAndUpdateEntry(siblingNameToEntry, siblingName,
+              function(newEntry) {
+                if (numerics.length === 1 /* single-selection mode */)
+                  newEntry.size = ownedBySiblingSizes.get(siblingDump);
+              });
+        }
+      }
+
+      // Emit a single info describing all overlaps with siblings (if
+      // applicable).
+      if (siblingNameToEntry.size > 0) {
+        var messageBuilder = new SizeInfoMessageBuilder();
+        messageBuilder.append('overlaps with its sibling');
+        messageBuilder.appendMap(
+            siblingNameToEntry,
+            true /* hasPluralSuffix */,
+            undefined /* emptyText */,
+            function(siblingName, siblingEntry) {
+              messageBuilder.append('\'', siblingName, '\'');
+              messageBuilder.appendSizeIfDefined(siblingEntry.size);
+              if (siblingEntry.count < numerics.length)
+                messageBuilder.appendSomeTimestampsQuantifier();
+            }, this);
+        infos.push({
+          message: messageBuilder.build(),
+          icon: CIRCLED_LATIN_SMALL_LETTER_I,
+          color: 'blue'
+        });
+      }
+    },
+
+    addProvidedSizeWarningInfos_: function(numerics, memoryAllocatorDumps,
+        infos) {
+      // Info type (see MemoryAllocatorDumpInfoType) -> {count, providedSize,
+      // dependencySize}. The latter two fields (providedSize and
+      // dependencySize) are omitted in multi-selection mode.
+      var infoTypeToEntry = new Map();
+      for (var i = 0; i < numerics.length; i++) {
+        if (numerics[i] === undefined)
+          continue;
+        var dump = memoryAllocatorDumps[i];
+        if (dump === SUBALLOCATION_CONTEXT)
+          return;  // Suballocation internal rows have no provided size.
+        dump.infos.forEach(function(dumpInfo) {
+          getAndUpdateEntry(infoTypeToEntry, dumpInfo.type, function(newEntry) {
+            if (numerics.length === 1 /* single-selection mode */) {
+              newEntry.providedSize = dumpInfo.providedSize;
+              newEntry.dependencySize = dumpInfo.dependencySize;
+            }
+          });
+        });
+      }
+
+      // Emit a warning info for every info type.
+      for (var infoType of infoTypeToEntry.keys()) {
+        var entry = infoTypeToEntry.get(infoType);
+        var messageBuilder = new SizeInfoMessageBuilder();
+        messageBuilder.append('provided size');
+        messageBuilder.appendSizeIfDefined(entry.providedSize);
+        var dependencyName;
+        switch (infoType) {
+          case PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN:
+            dependencyName = 'the aggregated size of the children';
+            break;
+          case PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER:
+            dependencyName = 'the size of the largest owner';
+            break;
+          default:
+            dependencyName = 'an unknown dependency';
+            break;
+        }
+        messageBuilder.append(' was less than ', dependencyName);
+        messageBuilder.appendSizeIfDefined(entry.dependencySize);
+        if (entry.count < numerics.length)
+          messageBuilder.appendSomeTimestampsQuantifier();
+        infos.push(tr.ui.analysis.createWarningInfo(messageBuilder.build()));
+      }
+    }
+  };
+
+  var NUMERIC_COLUMN_RULES = [
+    {
+      condition: tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME,
+      importance: 10,
+      columnConstructor: EffectiveSizeColumn
+    },
+    {
+      condition: tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME,
+      importance: 9,
+      columnConstructor: SizeColumn
+    },
+    {
+      condition: 'page_size',
+      importance: 0,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: /size/,
+      importance: 5,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      // All other columns.
+      importance: 0,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    }
+  ];
+
+  var DIAGNOSTIC_COLUMN_RULES = [
+    {
+      importance: 0,
+      columnConstructor: tr.ui.analysis.StringMemoryColumn
+    }
+  ];
+
   Polymer('tr-ui-a-memory-dump-allocator-details-pane', {
     created: function() {
       this.memoryAllocatorDumps_ = undefined;
@@ -178,6 +563,17 @@
 
       var rows = this.createRows_();
       var columns = this.createColumns_(rows);
+      rows.forEach(function(rootRow) {
+        tr.ui.analysis.aggregateTableRowCellsRecursively(rootRow, columns,
+            function(contexts) {
+              // Only aggregate suballocation rows (numerics of regular rows
+              // corresponding to MADs have already been aggregated by the
+              // model in MemoryAllocatorDump.aggregateNumericsRecursively).
+              return contexts !== undefined && contexts.some(function(context) {
+                return context === SUBALLOCATION_CONTEXT;
+              });
+            });
+      });
 
       this.$.table.tableRows = rows;
       this.$.table.tableColumns = columns;
@@ -211,17 +607,14 @@
       var title = definedDump.name;
       var fullName = definedDump.fullName;
 
-      // Determine at which timestamps (indices of the current selection)
-      // the dump was provided.
-      var defined = dumps.map(function(dump) {
-        return dump !== undefined;
+      // Transform a chronological list of memory allocator dumps into two
+      // dictionaries of cells (where each cell contains a chronological list
+      // of the values of one of its numerics or diagnostics).
+      var numericCells = tr.ui.analysis.createCells(dumps, function(dump) {
+        return dump.numerics;
       });
-
-      // Transform a chronological list of memory allocator dumps into a
-      // dictionary of cells (where each cell contains a chronological list
-      // of the values of its attribute).
-      var cells = tr.ui.analysis.createCells(dumps, function(dump) {
-        return dump.attributes;
+      var diagnosticCells = tr.ui.analysis.createCells(dumps, function(dump) {
+        return dump.diagnostics;
       });
 
       // Determine whether the memory allocator dump is a suballocation. A
@@ -262,9 +655,10 @@
 
       var row = {
         title: title,
-        fullName: fullName,
-        defined: defined,
-        cells: cells,
+        fullNames: [fullName],
+        contexts: dumps,
+        numericCells: numericCells,
+        diagnosticCells: diagnosticCells,
         suballocatedBy: suballocatedBy
       };
 
@@ -297,8 +691,6 @@
       if (suballocationClassificationRootNode !== undefined) {
         var suballocationRow = this.createSuballocationRowRecursively_(
             'suballocations', suballocationClassificationRootNode);
-        tr.ui.analysis.aggregateTableRowCellsRecursively(
-            suballocationRow, 'cells');
         subRows.push(suballocationRow);
       }
 
@@ -330,9 +722,55 @@
         var currentNode = nextNode;
       }
 
-      if (currentNode.row !== undefined)
-        throw new Error('Multiple suballocations with the same owner name');
-      currentNode.row = suballocationRow;
+      var existingRow = currentNode.row;
+      if (existingRow !== undefined) {
+        // On rare occasions it can happen that one dump (e.g. sqlite) owns
+        // different suballocations at different timestamps (e.g.
+        // malloc/allocated_objects/_7d35 and malloc/allocated_objects/_511e).
+        // When this happens, we merge the two suballocations into a single row
+        // (malloc/allocated_objects/suballocations/sqlite).
+        for (var i = 0; i < suballocationRow.contexts.length; i++) {
+          var newContext = suballocationRow.contexts[i];
+          if (newContext === undefined)
+            continue;
+
+          if (existingRow.contexts[i] !== undefined)
+            throw new Error('Multiple suballocations with the same owner name');
+
+          existingRow.contexts[i] = newContext;
+          ['numericCells', 'diagnosticCells'].forEach(function(cellKey) {
+            var suballocationCells = suballocationRow[cellKey];
+            if (suballocationCells === undefined)
+              return;
+            tr.b.iterItems(suballocationCells, function(cellName, cell) {
+              if (cell === undefined)
+                return;
+              var fields = cell.fields;
+              if (fields === undefined)
+                return;
+              var field = fields[i];
+              if (field === undefined)
+                return;
+              var existingCells = existingRow[cellKey];
+              if (existingCells === undefined) {
+                existingCells = {};
+                existingRow[cellKey] = existingCells;
+              }
+              var existingCell = existingCells[cellName];
+              if (existingCell === undefined) {
+                existingCell = new tr.ui.analysis.MemoryCell(
+                    new Array(fields.length));
+                existingCells[cellName] = existingCell;
+              }
+              existingCell.fields[i] = field;
+            });
+          });
+        }
+        existingRow.fullNames.push.apply(
+            existingRow.fullNames, suballocationRow.fullNames);
+      } else {
+        currentNode.row = suballocationRow;
+      }
 
       return rootNode;
     },
@@ -362,7 +800,7 @@
         // this case, the suballocation from the ancestor must be mapped to
         // 'malloc/allocated_objects/suballocations/skia/<unspecified>' so
         // that 'malloc/allocated_objects/suballocations/skia' could
-        // aggregate the attributes of the two suballocations properly.
+        // aggregate the numerics of the two suballocations properly.
         var row = node.row;
         row.title = '<unspecified>';
         row.suballocation = true;
@@ -372,37 +810,45 @@
       // An internal row of the suballocation tree is assumed to be defined
       // at a given timestamp if at least one of its sub-rows is defined at
       // the timestamp.
-      var defined = new Array(subRows[0].defined.length);
+      var contexts = new Array(subRows[0].contexts.length);
       for (var i = 0; i < subRows.length; i++) {
-        subRows[i].defined.forEach(function(definedValue, index) {
-          defined[index] = defined[index] || definedValue;
+        subRows[i].contexts.forEach(function(subContext, index) {
+          if (subContext !== undefined)
+            contexts[index] = SUBALLOCATION_CONTEXT;
         });
       }
 
       return {
         title: name,
         suballocation: true,
-        defined: defined,
-        cells: {},
+        contexts: contexts,
         subRows: subRows
       };
     },
 
     createColumns_: function(rows) {
-      var titleColumn = new AllocatorDumpNameColumn('Component');
+      var titleColumn = new AllocatorDumpNameColumn();
       titleColumn.width = '200px';
 
-      var attributeColumns = tr.ui.analysis.MemoryColumn.fromRows(
-          rows, 'cells', this.aggregationMode_);
-      tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);
-      tr.ui.analysis.MemoryColumn.sortByImportance(
-          attributeColumns, IMPORTANCE_RULES);
+      var numericColumns = tr.ui.analysis.MemoryColumn.fromRows(
+          rows, 'numericCells', this.aggregationMode_, NUMERIC_COLUMN_RULES);
+      var diagnosticColumns = tr.ui.analysis.MemoryColumn.fromRows(
+          rows, 'diagnosticCells', this.aggregationMode_,
+          DIAGNOSTIC_COLUMN_RULES);
+      var fieldColumns = numericColumns.concat(diagnosticColumns);
+      tr.ui.analysis.MemoryColumn.spaceEqually(fieldColumns);
 
-      var columns = [titleColumn].concat(attributeColumns);
+      var columns = [titleColumn].concat(fieldColumns);
       return columns;
     }
   });
 
-  return {};
+  return {
+    // All exports are for testing only.
+    SUBALLOCATION_CONTEXT: SUBALLOCATION_CONTEXT,
+    AllocatorDumpNameColumn: AllocatorDumpNameColumn,
+    EffectiveSizeColumn: EffectiveSizeColumn,
+    SizeColumn: SizeColumn
+  };
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane_test.html b/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane_test.html
index 1f41532..e3a93cc 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane_test.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_allocator_details_pane_test.html
@@ -6,56 +6,73 @@
 -->
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/heap_dump.html">
+<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_allocator_details_pane.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_sub_view_test_utils.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
-<link rel="import" href="/tracing/model/heap_dump.html">
-<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
   var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
-  var ScalarAttribute = tr.model.ScalarAttribute;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
   var HeapDump = tr.model.HeapDump;
   var AggregationMode = tr.ui.analysis.MemoryColumn.AggregationMode;
   var addGlobalMemoryDump = tr.ui.analysis.addGlobalMemoryDump;
   var addProcessMemoryDump = tr.ui.analysis.addProcessMemoryDump;
-  var checkAttributes = tr.ui.analysis.checkAttributes;
-  var checkSizeAttributes = tr.ui.analysis.checkSizeAttributes;
+  var checkNumericFields = tr.ui.analysis.checkNumericFields;
+  var checkSizeNumericFields = tr.ui.analysis.checkSizeNumericFields;
+  var checkStringFields = tr.ui.analysis.checkStringFields;
+  var checkColumnInfosAndColor = tr.ui.analysis.checkColumnInfosAndColor;
+  var checkColumns = tr.ui.analysis.checkColumns;
   var isElementDisplayed = tr.ui.analysis.isElementDisplayed;
+  var AllocatorDumpNameColumn = tr.ui.analysis.AllocatorDumpNameColumn;
+  var EffectiveSizeColumn = tr.ui.analysis.EffectiveSizeColumn;
+  var SizeColumn = tr.ui.analysis.SizeColumn;
+  var StringMemoryColumn = tr.ui.analysis.StringMemoryColumn;
+  var NumericMemoryColumn = tr.ui.analysis.NumericMemoryColumn;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var newChildDump = tr.model.MemoryDumpTestUtils.newChildDump;
+  var addOwnershipLink = tr.model.MemoryDumpTestUtils.addOwnershipLink;
 
-  // TODO(petrcermak): This function is the same as newChildDump in
-  // memory_allocator_dump_test.html. It should probably be factored out.
-  function newChildDump(parentDump, name) {
-    var childDump = new MemoryAllocatorDump(
-        parentDump.containerMemoryDump, parentDump.fullName + '/' + name);
-    childDump.parent = parentDump;
-    parentDump.children.push(childDump);
-    return childDump;
+  var SUBALLOCATION_CONTEXT = tr.ui.analysis.SUBALLOCATION_CONTEXT;
+  var MemoryAllocatorDumpInfoType = tr.model.MemoryAllocatorDumpInfoType;
+  var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;
+  var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER =
+      MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;
+
+  function addRootDumps(containerMemoryDump, rootNames, addedCallback) {
+    // Test sanity check.
+    assert.isUndefined(containerMemoryDump.memoryAllocatorDumps);
+
+    var rootDumps = rootNames.map(function(rootName) {
+      return new MemoryAllocatorDump(containerMemoryDump, rootName);
+    });
+    addedCallback.apply(null, rootDumps);
+    containerMemoryDump.memoryAllocatorDumps = rootDumps;
   }
 
-  function addOwnershipLink(ownerDump, ownedDump) {
-    assert.isUndefined(ownerDump.owns);  // Test sanity check.
-    var ownershipLink = new MemoryAllocatorDumpLink(ownerDump, ownedDump);
-    ownerDump.owns = ownershipLink;
-    ownedDump.ownedBy.push(ownershipLink);
-  }
-
-  function addSuballocationDump(ownerDump, parentDump, name, size) {
-    var suballocationDump = newChildDump(parentDump, name);
-    suballocationDump.addAttribute('size', new ScalarAttribute('size', size));
+  function newSuballocationDump(ownerDump, parentDump, name, size) {
+    var suballocationDump = newChildDump(parentDump, name, { size: size });
     if (ownerDump !== undefined)
       addOwnershipLink(ownerDump, suballocationDump);
+    return suballocationDump;
   }
 
-  function createMemoryAllocatorDumps() {
+  function createProcessMemoryDumps() {
     var model = tr.c.TestUtils.newModel(function(model) {
       var process = model.getOrCreateProcess(1);
 
@@ -63,138 +80,160 @@
       var gmd1 = addGlobalMemoryDump(model, -10);
       var pmd1 = addProcessMemoryDump(gmd1, process, -11);
       pmd1.memoryAllocatorDumps = (function() {
-        var v8Dump = new MemoryAllocatorDump(pmd1, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 1073741824) /* 1 GiB */);
-        v8Dump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        v8Dump.addAttribute(
-            'objects_count', new ScalarAttribute('objects', 204));
+        var v8Dump = newAllocatorDump(pmd1, 'v8', {
+          size: 1073741824 /* 1 GiB */,
+          inner_size: 2097152 /* 2 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 204)
+        });
 
-        var v8HeapsDump = newChildDump(v8Dump, 'heaps');
-        v8HeapsDump.addAttribute('size',
-            new ScalarAttribute('bytes', 805306368) /* 768 MiB */);
-        var v8Heap42Dump = newChildDump(v8HeapsDump, 'heap42');
-        v8Heap42Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 804782080) /* 767.5 MiB */);
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps',
+            { size: 805306368 /* 768 MiB */ });
+        newChildDump(v8HeapsDump, 'heap42',
+            { size: 804782080 /* 767.5 MiB */ });
 
         var v8ObjectsDump = newChildDump(v8Dump, 'objects');
-        var v8FooDump = newChildDump(v8ObjectsDump, 'foo');
-        v8FooDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1022976) /* 999 KiB */);
-        var v8BarDump = newChildDump(v8ObjectsDump, 'bar');
-        v8BarDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1024000) /* 1000 KiB */);
+        v8ObjectsDump.addDiagnostic('url', 'http://example.com');
+        newChildDump(v8ObjectsDump, 'foo', { size: 1022976 /* 999 KiB */ });
+        newChildDump(v8ObjectsDump, 'bar', { size: 1024000 /* 1000 KiB */ });
 
-        var oilpanDump = new MemoryAllocatorDump(pmd1, 'oilpan');
-        oilpanDump.addAttribute('size',
-            new ScalarAttribute('bytes', 125829120) /* 120 MiB */);
-        addSuballocationDump(
+        var oilpanDump = newAllocatorDump(pmd1, 'oilpan',
+            { size: 125829120 /* 120 MiB */ });
+        newSuballocationDump(
             oilpanDump, v8Dump, '__99BEAD', 150994944 /* 144 MiB */);
 
         var oilpanSubDump = newChildDump(oilpanDump, 'animals');
 
-        var oilpanSubDump1 = newChildDump(oilpanSubDump, 'cow');
-        oilpanSubDump1.addAttribute('size',
-            new ScalarAttribute('bytes', 33554432) /* 32 MiB */);
-        addSuballocationDump(
+        var oilpanSubDump1 = newChildDump(oilpanSubDump, 'cow',
+            { size: 33554432 /* 32 MiB */ });
+        newSuballocationDump(
             oilpanSubDump1, v8Dump, '__42BEEF', 67108864 /* 64 MiB */);
 
-        var oilpanSubDump2 = newChildDump(oilpanSubDump, 'chicken');
-        oilpanSubDump2.addAttribute('size',
-            new ScalarAttribute('bytes', 16777216) /* 16 MiB */);
-        addSuballocationDump(
+        var oilpanSubDump2 = newChildDump(oilpanSubDump, 'chicken',
+            { size: 16777216 /* 16 MiB */ });
+        newSuballocationDump(
             oilpanSubDump2, v8Dump, '__68DEAD', 33554432 /* 32 MiB */);
 
-        var skiaDump = new MemoryAllocatorDump(pmd1, 'skia');
-        skiaDump.addAttribute('size',
-            new ScalarAttribute('bytes', 8388608) /* 8 MiB */);
-        addSuballocationDump(
+        var skiaDump = newAllocatorDump(pmd1, 'skia',
+            { size: 8388608 /* 8 MiB */ });
+        var suballocationDump = newSuballocationDump(
             skiaDump, v8Dump, '__15FADE', 16777216 /* 16 MiB */);
 
-        return [v8Dump, oilpanDump, skiaDump];
+        var ccDump = newAllocatorDump(pmd1, 'cc',
+            { size: 4194304 /* 4 MiB */ });
+        newSuballocationDump(
+            ccDump, v8Dump, '__12FEED', 5242880 /* 5 MiB */).addDiagnostic(
+                'url', 'localhost:1234');
+
+        return [v8Dump, oilpanDump, skiaDump, ccDump];
       })();
 
       // Second timestamp.
       var gmd2 = addGlobalMemoryDump(model, 10);
       var pmd2 = addProcessMemoryDump(gmd2, process, 11);
       pmd2.memoryAllocatorDumps = (function() {
-        var v8Dump = new MemoryAllocatorDump(pmd2, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 1073741824) /* 1 GiB */);
-        v8Dump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        v8Dump.addAttribute(
-            'objects_count', new ScalarAttribute('objects', 204));
+        var v8Dump = newAllocatorDump(pmd2, 'v8', {
+          size: 1073741824 /* 1 GiB */,
+          inner_size: 2097152 /* 2 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 204)
+        });
 
         var v8ObjectsDump = newChildDump(v8Dump, 'objects');
-        var v8FooDump = newChildDump(v8ObjectsDump, 'foo');
-        v8FooDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1020928) /* 997 KiB */);
-        var v8BarDump = newChildDump(v8ObjectsDump, 'bar');
-        v8BarDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1026048) /* 1002 KiB */);
+        v8ObjectsDump.addDiagnostic('url', 'http://sample.net');
+        newChildDump(v8ObjectsDump, 'foo', { size: 1020928 /* 997 KiB */ });
+        newChildDump(v8ObjectsDump, 'bar', { size: 1026048 /* 1002 KiB */ });
 
-        addSuballocationDump(
+        newSuballocationDump(
             undefined, v8Dump, '__99BEAD', 268435456 /* 256 MiB */);
 
-        return [v8Dump];
+        var ccDump = newAllocatorDump(pmd2, 'cc',
+            { size: 7340032 /* 7 MiB */ });
+        newSuballocationDump(
+            ccDump, v8Dump, '__13DEED', 11534336 /* 11 MiB */).addDiagnostic(
+                'url', 'localhost:5678');
+
+        return [v8Dump, ccDump];
       })();
     });
 
-    return model.processes[1].memoryDumps.map(function(pmd) {
-      return pmd.getMemoryAllocatorDumpByFullName('v8');
+    return model.processes[1].memoryDumps;
+  }
+
+  function createSizeFields(values) {
+    return values.map(function(value) {
+      if (value === undefined)
+        return undefined;
+      return new ScalarNumeric(sizeInBytes_smallerIsBetter, value);
     });
   }
 
-  function checkColumns(columns, expectedAggregationMode) {
-    var EXPECTED_COLUMN_NAMES = [
-      'Component',
-      'effective_size',
-      'size',
-      'inner_size',
-      'objects_count'
-    ];
+  var EXPECTED_COLUMNS = [
+    { title: 'Component', type: AllocatorDumpNameColumn, noAggregation: true },
+    { title: 'effective_size', type: EffectiveSizeColumn },
+    { title: 'size', type: SizeColumn },
+    { title: 'inner_size', type: NumericMemoryColumn },
+    { title: 'objects_count', type: NumericMemoryColumn },
+    { title: 'url', type: StringMemoryColumn }
+  ];
 
-    // First column doesn't change value over time (no aggregation).
-    var VARIABLE_CELLS_START_INDEX = 1;
-
-    // Check column names.
-    assert.lengthOf(columns, EXPECTED_COLUMN_NAMES.length);
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++)
-      assert.equal(columns[i].title, EXPECTED_COLUMN_NAMES[i]);
-
-    // Check aggregation modes.
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++) {
-      assert.strictEqual(columns[i].aggregationMode,
-          i < VARIABLE_CELLS_START_INDEX ? undefined : expectedAggregationMode);
-    }
-  }
-
-  function checkRow(columns, row, expectedTitle, expectedSizes,
-      expectedEffectiveSizes, expectedInnerSizes, expectedObjectCounts,
-      expectedSubRowCount, expectedDefinedValues) {
+  function checkRow(columns, row, expectations) {
     var formattedTitle = columns[0].formatTitle(row);
+    var expectedTitle = expectations.title;
     if (typeof expectedTitle === 'function')
       expectedTitle(formattedTitle);
     else
       assert.equal(formattedTitle, expectedTitle);
 
-    checkSizeAttributes(row, columns[1], expectedSizes);
-    checkSizeAttributes(row, columns[2], expectedEffectiveSizes);
-    checkSizeAttributes(row, columns[3], expectedInnerSizes);
-    checkAttributes(
-        row, columns[4], expectedObjectCounts, ScalarAttribute, 'objects');
+    checkSizeNumericFields(row, columns[1], expectations.size);
+    checkSizeNumericFields(row, columns[2], expectations.effective_size);
+    checkSizeNumericFields(row, columns[3], expectations.inner_size);
+    checkNumericFields(row, columns[4], expectations.objects_count,
+        unitlessNumber_smallerIsBetter);
+    checkStringFields(row, columns[5], expectations.url);
 
+    var expectedSubRowCount = expectations.sub_row_count;
     if (expectedSubRowCount === undefined)
       assert.isUndefined(row.subRows);
     else
       assert.lengthOf(row.subRows, expectedSubRowCount);
 
-    if (expectedDefinedValues)
-      assert.deepEqual(tr.b.asArray(row.defined), expectedDefinedValues);
+    var expectedContexts = expectations.contexts;
+    if (expectedContexts === undefined)
+      assert.isUndefined(row.contexts);
     else
-      assert.isUndefined(row.defined);
+      assert.deepEqual(tr.b.asArray(row.contexts), expectedContexts);
+  }
+
+  function buildProcessMemoryDumps(count, preFinalizeDumpsCallback) {
+    var pmds = new Array(count);
+    tr.c.TestUtils.newModel(function(model) {
+      var process = model.getOrCreateProcess(1);
+      for (var i = 0; i < count; i++) {
+        var timestamp = 10 + i;
+        var gmd = addGlobalMemoryDump(model, timestamp);
+        pmds[i] = addProcessMemoryDump(gmd, process, timestamp);
+      }
+      preFinalizeDumpsCallback(pmds);
+    });
+    return pmds;
+  }
+
+  function getAllocatorDumps(pmds, fullName) {
+    return pmds.map(function(pmd) {
+      if (pmd === undefined)
+        return undefined;
+      return pmd.getMemoryAllocatorDumpByFullName(fullName);
+    });
+  }
+
+  function checkAllocatorPaneColumnInfosAndColor(
+      column, dumps, numericName, expectedInfos) {
+    var numerics = dumps.map(function(dump) {
+      if (dump === undefined)
+        return undefined;
+      return dump.numerics[numericName];
+    });
+    checkColumnInfosAndColor(
+        column, numerics, dumps, expectedInfos, undefined /* no color */);
   }
 
   test('instantiate_empty', function() {
@@ -208,11 +247,11 @@
   });
 
   test('instantiate_single', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps().slice(0, 1);
+    var processMemoryDumps = createProcessMemoryDumps().slice(0, 1);
 
     var viewEl = tr.ui.analysis.createTestPane(
         'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
+    viewEl.memoryAllocatorDumps = getAllocatorDumps(processMemoryDumps, 'v8');
     viewEl.rebuild();
     assert.deepEqual(viewEl.requestedChildPanes, [undefined]);
     this.addHTMLOutput(viewEl);
@@ -223,70 +262,129 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, undefined /* no aggregation */);
+    checkColumns(columns, EXPECTED_COLUMNS, undefined /* no aggregation */);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
 
     // Check the rows of the table.
     var rootRow = rows[0];
-    checkRow(columns, rootRow, 'v8', [941571072], [1075788800], [2097152],
-        [204], 3, [true]);
+    checkRow(columns, rootRow, {
+      title: 'v8',
+      size: [942619648],
+      effective_size: [1081031680],
+      inner_size: [2097152],
+      objects_count: [204],
+      sub_row_count: 3,
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8'),
+    });
 
     var heapsSubRow = rootRow.subRows[0];
-    checkRow(columns, heapsSubRow, 'heaps', [805306368], [805306368], undefined,
-        undefined, 2, [true]);
+    checkRow(columns, heapsSubRow, {
+      title: 'heaps',
+      size: [805306368],
+      effective_size: [805306368],
+      sub_row_count: 2,
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/heaps'),
+    });
 
     var heapsUnspecifiedSubRow = heapsSubRow.subRows[0];
-    checkRow(columns, heapsUnspecifiedSubRow, '<unspecified>', [524288],
-        [524288], undefined, undefined, undefined, [true]);
+    checkRow(columns, heapsUnspecifiedSubRow, {
+      title: '<unspecified>',
+      size: [524288],
+      effective_size: [524288],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/heaps/<unspecified>'),
+    });
 
     var suballocationsSubRow = rootRow.subRows[2];
-    checkRow(columns, suballocationsSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'suballocations');
-      assert.equal(formattedTitle.title, '');
-    }, [134217728], [268435456], undefined, undefined, 2, [true]);
+    checkRow(columns, suballocationsSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'suballocations');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [135266304],
+      effective_size: [273678336],
+      sub_row_count: 3,
+      contexts: [SUBALLOCATION_CONTEXT],
+    });
 
     var oilpanSuballocationSubRow = suballocationsSubRow.subRows[0];
-    checkRow(columns, oilpanSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'oilpan');
-      assert.equal(formattedTitle.title, '');
-    }, [125829120], [251658240], undefined, undefined, 2, [true]);
+    checkRow(columns, oilpanSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'oilpan');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [125829120],
+      effective_size: [251658240],
+      sub_row_count: 2,
+      contexts: [SUBALLOCATION_CONTEXT],
+    });
 
     var oilpanUnspecifiedSuballocationSubRow =
         oilpanSuballocationSubRow.subRows[0];
-    checkRow(columns, oilpanUnspecifiedSuballocationSubRow,
-        function(formattedTitle) {
-          assert.equal(formattedTitle.textContent, '<unspecified>');
-          assert.equal(formattedTitle.title, 'v8/__99BEAD');
-        }, [75497472], [150994944], undefined, undefined, undefined, [true]);
+    checkRow(columns, oilpanUnspecifiedSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, '<unspecified>');
+        assert.equal(formattedTitle.title, 'v8/__99BEAD');
+      },
+      size: [75497472],
+      effective_size: [150994944],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__99BEAD'),
+    });
 
     var oilpanAnimalsSuballocationSubRow = oilpanSuballocationSubRow.subRows[1];
-    checkRow(columns, oilpanAnimalsSuballocationSubRow,
-        function(formattedTitle) {
-          assert.equal(formattedTitle.textContent, 'animals');
-          assert.equal(formattedTitle.title, '');
-        }, [50331648], [100663296], undefined, undefined, 2, [true]);
+    checkRow(columns, oilpanAnimalsSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'animals');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [50331648],
+      effective_size: [100663296],
+      sub_row_count: 2,
+      contexts: [SUBALLOCATION_CONTEXT],
+    });
 
     var oilpanCowSuballocationSubRow =
         oilpanAnimalsSuballocationSubRow.subRows[0];
-    checkRow(columns, oilpanCowSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'cow');
-      assert.equal(formattedTitle.title, 'v8/__42BEEF');
-    }, [33554432], [67108864], undefined, undefined, undefined, [true]);
+    checkRow(columns, oilpanCowSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'cow');
+        assert.equal(formattedTitle.title, 'v8/__42BEEF');
+      },
+      size: [33554432],
+      effective_size: [67108864],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__42BEEF'),
+    });
 
     var skiaSuballocationSubRow = suballocationsSubRow.subRows[1];
-    checkRow(columns, skiaSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'skia');
-      assert.equal(formattedTitle.title, 'v8/__15FADE');
-    }, [8388608], [16777216], undefined, undefined, undefined, [true]);
+    checkRow(columns, skiaSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'skia');
+        assert.equal(formattedTitle.title, 'v8/__15FADE');
+      },
+      size: [8388608],
+      effective_size: [16777216],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__15FADE'),
+    });
+
+    var ccSuballocationSubRow = suballocationsSubRow.subRows[2];
+    checkRow(columns, ccSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'cc');
+        assert.equal(formattedTitle.title, 'v8/__12FEED');
+      },
+      size: [1048576],
+      effective_size: [5242880],
+      url: ['localhost:1234'],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__12FEED')
+    });
   });
 
   test('instantiate_multipleDiff', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps();
+    var processMemoryDumps = createProcessMemoryDumps();
 
     var viewEl = tr.ui.analysis.createTestPane(
         'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
+    viewEl.memoryAllocatorDumps = getAllocatorDumps(processMemoryDumps, 'v8');
     viewEl.aggregationMode = AggregationMode.DIFF;
     viewEl.rebuild();
     assert.deepEqual(viewEl.requestedChildPanes, [undefined]);
@@ -298,83 +396,140 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.DIFF);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.DIFF);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
 
     // Check the rows of the table.
     var rootRow = rows[0];
-    checkRow(columns, rootRow, 'v8', [941571072, 1073741824],
-        [1075788800, 1073741824], [2097152, 2097152], [204, 204], 4,
-        [true, true]);
+    checkRow(columns, rootRow, {
+      title: 'v8',
+      size: [942619648, 1066401792],
+      effective_size: [1081031680, 1073741824],
+      inner_size: [2097152, 2097152],
+      objects_count: [204, 204],
+      sub_row_count: 4,
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8'),
+    });
 
     var heapsSubRow = rootRow.subRows[0];
-    checkRow(columns, heapsSubRow, 'heaps', [805306368, undefined],
-        [805306368, undefined], undefined, undefined, 2, [true, undefined]);
+    checkRow(columns, heapsSubRow, {
+      title: 'heaps',
+      size: [805306368, undefined],
+      effective_size: [805306368, undefined],
+      sub_row_count: 2,
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/heaps'),
+    });
 
     var heapsUnspecifiedSubRow = heapsSubRow.subRows[0];
-    checkRow(columns, heapsUnspecifiedSubRow, '<unspecified>',
-        [524288, undefined], [524288, undefined], undefined, undefined,
-        undefined, [true, undefined]);
+    checkRow(columns, heapsUnspecifiedSubRow, {
+      title: '<unspecified>',
+      size: [524288, undefined],
+      effective_size: [524288, undefined],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/heaps/<unspecified>'),
+    });
 
     var unspecifiedSubRow = rootRow.subRows[2];
-    checkRow(columns, unspecifiedSubRow, '<unspecified>',
-        [undefined, 803259392], [undefined, 803259392], undefined, undefined,
-        undefined, [undefined, true]);
+    checkRow(columns, unspecifiedSubRow, {
+      title: '<unspecified>',
+      size: [undefined, 791725056],
+      effective_size: [undefined, 791725056],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/<unspecified>'),
+    });
 
     var suballocationsSubRow = rootRow.subRows[3];
-    checkRow(columns, suballocationsSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'suballocations');
-      assert.equal(formattedTitle.title, '');
-    }, [134217728, 268435456], [268435456, 268435456], undefined, undefined,
-        2, [true, true]);
+    checkRow(columns, suballocationsSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'suballocations');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [135266304, 272629760],
+      effective_size: [273678336, 279969792],
+      sub_row_count: 3,
+      contexts: [SUBALLOCATION_CONTEXT, SUBALLOCATION_CONTEXT],
+    });
 
     var oilpanSuballocationSubRow = suballocationsSubRow.subRows[0];
-    checkRow(columns, oilpanSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'oilpan');
-      assert.equal(formattedTitle.title, '');
-    }, [125829120, 268435456], [251658240, 268435456], undefined, undefined, 2,
-        [true, true]);
+    checkRow(columns, oilpanSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'oilpan');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [125829120, 268435456],
+      effective_size: [251658240, 268435456],
+      sub_row_count: 2,
+      contexts: [SUBALLOCATION_CONTEXT, SUBALLOCATION_CONTEXT],
+    });
 
     var oilpanUnspecifiedSuballocationSubRow =
         oilpanSuballocationSubRow.subRows[0];
-    checkRow(columns, oilpanUnspecifiedSuballocationSubRow,
-        function(formattedTitle) {
-          assert.equal(formattedTitle.textContent, '<unspecified>');
-          assert.equal(formattedTitle.title, 'v8/__99BEAD');
-        }, [75497472, 268435456], [150994944, 268435456], undefined, undefined,
-            undefined, [true, true]);
+    checkRow(columns, oilpanUnspecifiedSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, '<unspecified>');
+        assert.equal(formattedTitle.title, 'v8/__99BEAD');
+      },
+      size: [75497472, 268435456],
+      effective_size: [150994944, 268435456],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__99BEAD'),
+    });
 
     var oilpanAnimalsSuballocationSubRow = oilpanSuballocationSubRow.subRows[1];
-    checkRow(columns, oilpanAnimalsSuballocationSubRow,
-        function(formattedTitle) {
-          assert.equal(formattedTitle.textContent, 'animals');
-          assert.equal(formattedTitle.title, '');
-        }, [50331648, undefined], [100663296, undefined], undefined, undefined,
-            2, [true, undefined]);
+    checkRow(columns, oilpanAnimalsSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'animals');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [50331648, undefined],
+      effective_size: [100663296, undefined],
+      sub_row_count: 2,
+      contexts: [SUBALLOCATION_CONTEXT, undefined],
+    });
 
     var oilpanCowSuballocationSubRow =
         oilpanAnimalsSuballocationSubRow.subRows[0];
-    checkRow(columns, oilpanCowSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'cow');
-      assert.equal(formattedTitle.title, 'v8/__42BEEF');
-    }, [33554432, undefined], [67108864, undefined], undefined, undefined,
-        undefined, [true, undefined]);
+    checkRow(columns, oilpanCowSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'cow');
+        assert.equal(formattedTitle.title, 'v8/__42BEEF');
+      },
+      size: [33554432, undefined],
+      effective_size: [67108864, undefined],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__42BEEF'),
+    });
 
     var skiaSuballocationSubRow = suballocationsSubRow.subRows[1];
-    checkRow(columns, skiaSuballocationSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'skia');
-      assert.equal(formattedTitle.title, 'v8/__15FADE');
-    }, [8388608, undefined], [16777216, undefined], undefined, undefined,
-        undefined, [true, undefined]);
+    checkRow(columns, skiaSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'skia');
+        assert.equal(formattedTitle.title, 'v8/__15FADE');
+      },
+      size: [8388608, undefined],
+      effective_size: [16777216, undefined],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/__15FADE'),
+    });
+
+    var ccSuballocationSubRow = suballocationsSubRow.subRows[2];
+    checkRow(columns, ccSuballocationSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'cc');
+        assert.equal(formattedTitle.title, 'v8/__12FEED, v8/__13DEED');
+      },
+      size: [1048576, 4194304],
+      effective_size: [5242880, 11534336],
+      url: ['localhost:1234', 'localhost:5678'],
+      contexts: [
+        processMemoryDumps[0].getMemoryAllocatorDumpByFullName('v8/__12FEED'),
+        processMemoryDumps[1].getMemoryAllocatorDumpByFullName('v8/__13DEED')
+      ]
+    });
   });
 
   test('instantiate_multipleMax', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps();
+    var processMemoryDumps = createProcessMemoryDumps();
 
     var viewEl = tr.ui.analysis.createTestPane(
         'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
+    viewEl.memoryAllocatorDumps = getAllocatorDumps(processMemoryDumps, 'v8');
     viewEl.aggregationMode = AggregationMode.MAX;
     viewEl.rebuild();
     assert.deepEqual(viewEl.requestedChildPanes, [undefined]);
@@ -387,18 +542,18 @@
     // Just check that the aggregation mode was propagated to the columns.
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.MAX);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.MAX);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
   });
 
   test('instantiate_multipleWithUndefined', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps();
-    memoryAllocatorDumps.splice(1, 0, undefined);
+    var processMemoryDumps = createProcessMemoryDumps();
+    processMemoryDumps.splice(1, 0, undefined);
 
     var viewEl = tr.ui.analysis.createTestPane(
         'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
+    viewEl.memoryAllocatorDumps = getAllocatorDumps(processMemoryDumps, 'v8');
     viewEl.aggregationMode = AggregationMode.DIFF;
     viewEl.rebuild();
     assert.deepEqual(viewEl.requestedChildPanes, [undefined]);
@@ -410,72 +565,55 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.DIFF);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.DIFF);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
 
     // Check only a few rows of the table.
     var rootRow = rows[0];
-    checkRow(columns, rootRow, 'v8', [941571072, undefined, 1073741824],
-        [1075788800, undefined, 1073741824], [2097152, undefined, 2097152],
-        [204, undefined, 204], 4, [true, false, true]);
+    checkRow(columns, rootRow, {
+      title: 'v8',
+      size: [942619648, undefined, 1066401792],
+      effective_size: [1081031680, undefined, 1073741824],
+      inner_size: [2097152, undefined, 2097152],
+      objects_count: [204, undefined, 204],
+      sub_row_count: 4,
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8'),
+    });
 
     var unspecifiedSubRow = rootRow.subRows[2];
-    checkRow(columns, unspecifiedSubRow, '<unspecified>',
-        [undefined, undefined, 803259392], [undefined, undefined, 803259392],
-        undefined, undefined, undefined, [undefined, undefined, true]);
+    checkRow(columns, unspecifiedSubRow, {
+      title: '<unspecified>',
+      size: [undefined, undefined, 791725056],
+      effective_size: [undefined, undefined, 791725056],
+      contexts: getAllocatorDumps(processMemoryDumps, 'v8/<unspecified>'),
+    });
 
     var suballocationsSubRow = rootRow.subRows[3];
-    checkRow(columns, suballocationsSubRow, function(formattedTitle) {
-      assert.equal(formattedTitle.textContent, 'suballocations');
-      assert.equal(formattedTitle.title, '');
-    }, [134217728, undefined, 268435456], [268435456, undefined, 268435456],
-        undefined, undefined, 2, [true, undefined, true]);
-  });
-
-  test('sortTitles', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps();
-
-    var viewEl = tr.ui.analysis.createTestPane(
-        'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
-    viewEl.rebuild();
-
-    var table = viewEl.$.table;
-    var titleColumn = table.tableColumns[0];
-    var rootRow = table.tableRows[0];
-
-    // Non-suballocation rows.
-    var heapsSubRow = rootRow.subRows[0];
-    var objectsSubRow = rootRow.subRows[1];
-    assert.isBelow(titleColumn.cmp(heapsSubRow, objectsSubRow), 0);
-    assert.equal(titleColumn.cmp(objectsSubRow, objectsSubRow), 0);
-    assert.isAbove(titleColumn.cmp(objectsSubRow, heapsSubRow), 0);
-
-    // Suballocation rows.
-    var oilpanSubRow = rootRow.subRows[3].subRows[0];
-    var skiaSubRow = rootRow.subRows[3].subRows[1];
-    assert.isBelow(titleColumn.cmp(oilpanSubRow, skiaSubRow), 0);
-    assert.equal(titleColumn.cmp(oilpanSubRow, oilpanSubRow), 0);
-    assert.isAbove(titleColumn.cmp(skiaSubRow, oilpanSubRow), 0);
-
-    // Mixture.
-    assert.isBelow(titleColumn.cmp(heapsSubRow, oilpanSubRow), 0);
-    assert.isAbove(titleColumn.cmp(oilpanSubRow, heapsSubRow), 0);
+    checkRow(columns, suballocationsSubRow, {
+      title: function(formattedTitle) {
+        assert.equal(formattedTitle.textContent, 'suballocations');
+        assert.equal(formattedTitle.title, '');
+      },
+      size: [135266304, undefined, 272629760],
+      effective_size: [273678336, undefined, 279969792],
+      sub_row_count: 3,
+      contexts: [SUBALLOCATION_CONTEXT, undefined, SUBALLOCATION_CONTEXT],
+    });
   });
 
   test('heapDumpsPassThrough', function() {
-    var memoryAllocatorDumps = createMemoryAllocatorDumps();
-    var heapDumps = memoryAllocatorDumps.map(function(dump) {
+    var processMemoryDumps = createProcessMemoryDumps();
+    var heapDumps = processMemoryDumps.map(function(dump) {
       if (dump === undefined)
         return undefined;
-      return new HeapDump(dump.containerMemoryDump, 'v8');
+      return new HeapDump(dump, 'v8');
     });
 
     // Start by creating a component details pane without any heap dumps.
     var viewEl = tr.ui.analysis.createTestPane(
         'tr-ui-a-memory-dump-allocator-details-pane');
-    viewEl.memoryAllocatorDumps = memoryAllocatorDumps;
+    viewEl.memoryAllocatorDumps = getAllocatorDumps(processMemoryDumps, 'v8');
     viewEl.aggregationMode = AggregationMode.MAX;
     viewEl.rebuild();
 
@@ -501,5 +639,576 @@
     assert.lengthOf(viewEl.requestedChildPanes, 3);
     assert.isUndefined(viewEl.requestedChildPanes[2]);
   });
+
+  test('allocatorDumpNameColumn', function() {
+    var c = new AllocatorDumpNameColumn();
+
+    // Regular row.
+    assert.strictEqual(c.formatTitle({title: 'Regular row'}), 'Regular row');
+
+    // Sub-allocation row.
+    var row = c.formatTitle({title: 'Suballocation row', suballocation: true});
+    assert.strictEqual(row.textContent, 'Suballocation row');
+    assert.strictEqual(row.style.fontStyle, 'italic');
+  });
+
+  test('effectiveSizeColumn_noContext', function() {
+    var c = new EffectiveSizeColumn('Effective Size', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+
+    // Single selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128]),
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+
+    // Multi-selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128, 256, undefined, 64]),
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+  });
+
+  test('effectiveSizeColumn_suballocationContext', function() {
+    var c = new EffectiveSizeColumn('Effective Size', 'bytes', tr.b.identity,
+        AggregationMode.MAX);
+
+    // Single selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128]),
+        [SUBALLOCATION_CONTEXT],
+        [] /* no infos */,
+        undefined /* no color */);
+
+    // Multi-selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([undefined, 256, undefined, 64]),
+        [undefined, SUBALLOCATION_CONTEXT, SUBALLOCATION_CONTEXT,
+            SUBALLOCATION_CONTEXT],
+        [] /* no infos */,
+        undefined /* no color */);
+  });
+
+  test('effectiveSizeColumn_dumpContext_noOwnership', function() {
+    var c = new EffectiveSizeColumn('Effective Size', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    var pmds = buildProcessMemoryDumps(4 /* count */, function(pmds) {
+      addRootDumps(pmds[0], ['v8'], function(v8Dump) {
+        newChildDump(v8Dump, 'heaps', { size: 64 });
+      });
+      addRootDumps(pmds[2], ['v8'], function(v8Dump) {
+        newChildDump(v8Dump, 'heaps', { size: 128 });
+      });
+      addRootDumps(pmds[3], ['v8'], function(v8Dump) {});
+    });
+    var v8HeapsDumps = getAllocatorDumps(pmds, 'v8/heaps');
+
+    // Single selection.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDumps[0]],
+        'effective_size',
+        [] /* no infos */);
+
+    // Multi-selection, all dumps defined.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDumps[0], v8HeapsDumps[2]],
+        'effective_size',
+        [] /* no infos */);
+
+    // Multi-selection, some dumps missing.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        v8HeapsDumps,
+        'effective_size',
+        [] /* no infos */);
+  });
+
+  test('effectiveSizeColumn_dumpContext_singleOwnership', function() {
+    var c = new EffectiveSizeColumn('Effective Size', 'bytes', tr.b.identity,
+        AggregationMode.MAX);
+    var pmds = buildProcessMemoryDumps(5 /* count */, function(pmds) {
+      addRootDumps(pmds[0], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 32 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 64 });
+        addOwnershipLink(v8HeapsDump, oilpanObjectsDump);
+      });
+      addRootDumps(pmds[1], ['v8'], function(v8Dump) {
+        newChildDump(v8Dump, 'heaps', { size: 32 });
+        // Missing oilpan/objects dump.
+      });
+      addRootDumps(pmds[2], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        newChildDump(oilpanDump, 'objects', { size: 64 });
+        // Missing v8/heaps dump.
+      });
+      addRootDumps(pmds[3], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        newChildDump(v8Dump, 'heaps', { size: 32 });
+        newChildDump(oilpanDump, 'objects', { size: 64 });
+        // Missing ownership link.
+      });
+      addRootDumps(pmds[4], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 32 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 64 });
+        addOwnershipLink(v8HeapsDump, oilpanObjectsDump, 2);
+      });
+    });
+    var v8HeapsDump = getAllocatorDumps(pmds, 'v8/heaps');
+    var oilpanObjectsDump = getAllocatorDumps(pmds, 'oilpan/objects');
+
+    // Single selection.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDump[0]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FE',
+            message: 'shares \'oilpan/objects\' in Process 1 (importance: 0) ' +
+                'with no other dumps',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [oilpanObjectsDump[4]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by \'v8/heaps\' in Process 1 (importance: 2)',
+            color: 'green'
+          }
+        ]);
+
+    // Multi-selection, all dumps defined.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDump[0], v8HeapsDump[4]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FE',
+            message: 'shares \'oilpan/objects\' in Process 1 (importance: ' +
+                '0\u20132) with no other dumps',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [oilpanObjectsDump[0], oilpanObjectsDump[4]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by \'v8/heaps\' in Process 1 (importance: ' +
+                '0\u20132)',
+            color: 'green'
+          }
+        ]);
+
+    // Multi-selection, some dumps missing.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        v8HeapsDump,
+        'effective_size',
+        [
+          {
+            icon: '\u21FE',
+            message: 'shares \'oilpan/objects\' in Process 1 at some ' +
+                'selected timestamps (importance: 0\u20132) with no other ' +
+                'dumps',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        oilpanObjectsDump,
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by \'v8/heaps\' in Process 1 at some selected ' +
+                'timestamps (importance: 0\u20132)',
+            color: 'green'
+          }
+        ]);
+  });
+
+  test('effectiveSizeColumn_dumpContext_multipleOwnerships', function() {
+    var c = new EffectiveSizeColumn('Effective Size', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    var pmds = buildProcessMemoryDumps(6 /* count */, function(pmds) {
+      addRootDumps(pmds[0], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 32 });
+        var v8QueuesDump = newChildDump(v8Dump, 'queues', { size: 8 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 64 });
+        addOwnershipLink(v8HeapsDump, oilpanObjectsDump);
+        addOwnershipLink(v8QueuesDump, oilpanObjectsDump, 1);
+      });
+      addRootDumps(pmds[1], ['v8'], function(v8Dump) {});
+      addRootDumps(pmds[2], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 32 });
+        var v8QueuesDump = newChildDump(v8Dump, 'queues', { size: 8 });
+        var v8PilesDump = newChildDump(v8Dump, 'piles', { size: 48 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 64 });
+        addOwnershipLink(v8HeapsDump, oilpanObjectsDump, 2);
+        addOwnershipLink(v8QueuesDump, oilpanObjectsDump, 1);
+        addOwnershipLink(v8PilesDump, oilpanObjectsDump);
+      });
+      addRootDumps(pmds[3], ['v8', 'blink'], function(v8Dump, blinkDump) {
+        var blinkHandlesDump = newChildDump(blinkDump, 'handles', { size: 32 });
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 64 });
+        var blinkObjectsDump = newChildDump(blinkDump, 'objects', { size: 32 });
+        addOwnershipLink(blinkHandlesDump, v8HeapsDump, -273);
+        addOwnershipLink(v8HeapsDump, blinkObjectsDump, 3);
+      });
+      addRootDumps(pmds[4], ['v8', 'gpu'], function(v8Dump, gpuDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 64 });
+        var gpuTile1Dump = newChildDump(gpuDump, 'tile1', { size: 100 });
+        var gpuTile2Dump = newChildDump(gpuDump, 'tile2', { size: 99 });
+        addOwnershipLink(v8HeapsDump, gpuTile1Dump, 3);
+        addOwnershipLink(gpuTile2Dump, gpuTile1Dump, -1);
+      });
+      addRootDumps(pmds[5], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 32 });
+        var v8QueuesDump = newChildDump(v8Dump, 'queues', { size: 8 });
+        var v8PilesDump = newChildDump(v8Dump, 'piles', { size: 48 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 64 });
+        addOwnershipLink(v8HeapsDump, oilpanObjectsDump, 1);
+        addOwnershipLink(v8QueuesDump, oilpanObjectsDump, 1);
+        addOwnershipLink(v8PilesDump, oilpanObjectsDump, 7);
+      });
+    });
+    var v8HeapsDump = getAllocatorDumps(pmds, 'v8/heaps');
+    var oilpanObjectsDump = getAllocatorDumps(pmds, 'oilpan/objects');
+    var gpuTile1Dump = getAllocatorDumps(pmds, 'gpu/tile1');
+
+    // Single selection.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDump[4]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FE',
+            message: 'shares \'gpu/tile1\' in Process 1 (importance: 3) with ' +
+                '\'gpu/tile2\' in Process 1 (importance: -1)',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [gpuTile1Dump[4]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by:\n' +
+                ' - \'v8/heaps\' in Process 1 (importance: 3)\n' +
+                ' - \'gpu/tile2\' in Process 1 (importance: -1)',
+            color: 'green'
+          }
+        ]);
+
+    // Multi-selection, all dumps defined.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapsDump[2], v8HeapsDump[5]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FE',
+            message: 'shares \'oilpan/objects\' in Process 1 (importance: ' +
+                '1\u20132) with:\n' +
+                ' - \'v8/queues\' in Process 1 (importance: 1)\n' +
+                ' - \'v8/piles\' in Process 1 (importance: 0\u20137)',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [oilpanObjectsDump[2], oilpanObjectsDump[5]],
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by:\n' +
+                ' - \'v8/heaps\' in Process 1 (importance: 1\u20132)\n' +
+                ' - \'v8/queues\' in Process 1 (importance: 1)\n' +
+                ' - \'v8/piles\' in Process 1 (importance: 0\u20137)',
+            color: 'green'
+          }
+        ]);
+
+    // Multi-selection, some dumps missing.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        v8HeapsDump,
+        'effective_size',
+        [ // v8/objects is both owned (first info) and an owner (second info).
+          {
+            icon: '\u21FD',
+            message: 'shared by \'blink/handles\' in Process 1 at some ' +
+                'selected timestamps (importance: -273)',
+            color: 'green'
+          },
+          {
+            icon: '\u21FE',
+            message: 'shares:\n' +
+                ' - \'oilpan/objects\' in Process 1 at some selected ' +
+                'timestamps (importance: 0\u20132) with:\n' +
+                '    - \'v8/queues\' in Process 1 (importance: 1)\n' +
+                '    - \'v8/piles\' in Process 1 at some selected ' +
+                'timestamps (importance: 0\u20137)\n' +
+                ' - \'blink/objects\' in Process 1 at some selected ' +
+                'timestamps (importance: 3) with no other dumps\n' +
+                ' - \'gpu/tile1\' in Process 1 at some selected timestamps ' +
+                '(importance: 3) with \'gpu/tile2\' in Process 1 ' +
+                '(importance: -1)',
+            color: 'green'
+          }
+        ]);
+    checkAllocatorPaneColumnInfosAndColor(c,
+        oilpanObjectsDump,
+        'effective_size',
+        [
+          {
+            icon: '\u21FD',
+            message: 'shared by:\n' +
+                ' - \'v8/heaps\' in Process 1 at some selected timestamps ' +
+                '(importance: 0\u20132)\n' +
+                ' - \'v8/queues\' in Process 1 at some selected timestamps ' +
+                '(importance: 1)\n' +
+                ' - \'v8/piles\' in Process 1 at some selected timestamps ' +
+                '(importance: 0\u20137)',
+            color: 'green'
+          }
+        ]);
+  });
+
+  test('sizeColumn_noContext', function() {
+    var c = new SizeColumn('Size', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+
+    // Single selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128]),
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+
+    // Multi-selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128, 256, undefined, 64]),
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+  });
+
+  test('sizeColumn_suballocationContext', function() {
+    var c = new SizeColumn('Size', 'bytes', tr.b.identity, AggregationMode.MAX);
+
+    // Single selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([128]),
+        [SUBALLOCATION_CONTEXT],
+        [] /* no infos */,
+        undefined /* no color */);
+
+    // Multi-selection.
+    checkColumnInfosAndColor(c,
+        createSizeFields([undefined, 256, undefined, 64]),
+        [undefined, SUBALLOCATION_CONTEXT, undefined, SUBALLOCATION_CONTEXT],
+        [] /* no infos */,
+        undefined /* no color */);
+  });
+
+  test('sizeColumn_dumpContext', function() {
+    var c = new SizeColumn('Size', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    var pmds = buildProcessMemoryDumps(7 /* count */, function(pmds) {
+      addRootDumps(pmds[0], ['v8'], function(v8Dump) {
+        // Single direct overlap (v8/objects -> v8/heaps).
+        var v8ObjectsDump = newChildDump(v8Dump, 'objects', { size: 1536 });
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+        addOwnershipLink(v8ObjectsDump, v8HeapsDump);
+      });
+      // pmd[1] intentionally skipped.
+      addRootDumps(pmds[2], ['v8'], function(v8Dump, oilpanDump) {
+        // Single direct overlap with inconsistent owned dump size.
+        var v8ObjectsDump = newChildDump(v8Dump, 'objects', { size: 3072 });
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+        addOwnershipLink(v8ObjectsDump, v8HeapsDump);
+      });
+      addRootDumps(pmds[3], ['v8'], function(v8Dump) {
+        // Single indirect overlap (v8/objects/X -> v8/heaps/42).
+        var v8ObjectsDump = newChildDump(v8Dump, 'objects', { size: 1536 });
+        var v8ObjectsXDump = newChildDump(v8ObjectsDump, 'X', { size: 512 });
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+        var v8Heaps42Dump = newChildDump(v8HeapsDump, '42', { size: 1024 });
+        addOwnershipLink(v8ObjectsXDump, v8Heaps42Dump);
+      });
+      addRootDumps(pmds[4], ['v8'], function(v8Dump) {
+        // Multiple overlaps.
+        var v8ObjectsDump = newChildDump(v8Dump, 'objects', { size: 1024 });
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+
+        var v8ObjectsXDump = newChildDump(v8ObjectsDump, 'X', { size: 512 });
+        var v8Heaps42Dump = newChildDump(v8HeapsDump, '42', { size: 1280 });
+        addOwnershipLink(v8ObjectsXDump, v8Heaps42Dump);
+
+        var v8ObjectsYDump = newChildDump(v8ObjectsDump, 'Y', { size: 128 });
+        var v8Heaps90Dump = newChildDump(v8HeapsDump, '90', { size: 256 });
+        addOwnershipLink(v8ObjectsYDump, v8Heaps90Dump);
+
+        var v8BlocksDump = newChildDump(v8Dump, 'blocks', { size: 768 });
+        addOwnershipLink(v8BlocksDump, v8Heaps42Dump);
+      });
+      addRootDumps(pmds[5], ['v8'], function(v8Dump) {
+        // No overlaps, inconsistent parent size.
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+        newChildDump(v8HeapsDump, '42', { size: 1536 });
+        newChildDump(v8HeapsDump, '90', { size: 615 });
+      });
+      addRootDumps(pmds[6], ['v8', 'oilpan'], function(v8Dump, oilpanDump) {
+        // No overlaps, inconsistent parent and owned dump size.
+        var v8HeapsDump = newChildDump(v8Dump, 'heaps', { size: 2048 });
+        newChildDump(v8HeapsDump, '42', { size: 1536 });
+        newChildDump(v8HeapsDump, '90', { size: 615 });
+        var oilpanObjectsDump =
+            newChildDump(oilpanDump, 'objects', { size: 3072 });
+        addOwnershipLink(oilpanObjectsDump, v8HeapsDump);
+      });
+    });
+    var v8HeapDumps = getAllocatorDumps(pmds, 'v8/heaps');
+
+    // Single selection, single overlap.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[0]],
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its sibling \'objects\' (1.5 KiB)',
+            color: 'blue'
+          }
+        ]);
+
+    // Single selection, multiple overlaps.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[4]],
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its siblings:\n' +
+                ' - \'objects\' (640.0 B)\n' +
+                ' - \'blocks\' (768.0 B)',
+            color: 'blue'
+          }
+        ]);
+
+    // Single selection, warnings with no overlaps.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[6]],
+        'size',
+        [
+          {
+            icon: '\u26A0',
+            message: 'provided size (2.0 KiB) was less than the aggregated ' +
+                'size of the children (2.1 KiB)',
+            color: 'red'
+          },
+          {
+            icon: '\u26A0',
+            message: 'provided size (2.0 KiB) was less than the size of the ' +
+                'largest owner (3.0 KiB)',
+            color: 'red'
+          }
+        ]);
+
+    // Single selection, single overlap with a warning.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[2]],
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its sibling \'objects\' (3.0 KiB)',
+            color: 'blue'
+          },
+          {
+            icon: '\u26A0',
+            message: 'provided size (2.0 KiB) was less than the size of the ' +
+                'largest owner (3.0 KiB)',
+            color: 'red'
+          }
+        ]);
+
+    // Multi-selection, single overlap.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[0], v8HeapDumps[3]],
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its sibling \'objects\'',
+            color: 'blue'
+          }
+        ]);
+
+    // Multi-selection, multiple overlaps.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[0], v8HeapDumps[4]],
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its siblings:\n' +
+                ' - \'objects\'\n' +
+                ' - \'blocks\' at some selected timestamps',
+            color: 'blue'
+          }
+        ]);
+
+    // Multi-selection, warnings with no overlaps.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        [v8HeapDumps[5], v8HeapDumps[6]],
+        'size',
+        [
+          {
+            icon: '\u26A0',
+            message: 'provided size was less than the aggregated ' +
+                'size of the children',
+            color: 'red'
+          },
+          {
+            icon: '\u26A0',
+            message: 'provided size was less than the size of the largest ' +
+                'owner at some selected timestamps',
+            color: 'red'
+          }
+        ]);
+
+    // Multi-selection, multiple overlaps with warnings.
+    checkAllocatorPaneColumnInfosAndColor(c,
+        v8HeapDumps,
+        'size',
+        [
+          {
+            icon: '\u24D8',
+            message: 'overlaps with its siblings:\n' +
+                ' - \'objects\' at some selected timestamps\n' +
+                ' - \'blocks\' at some selected timestamps',
+            color: 'blue'
+          },
+          {
+            icon: '\u26A0',
+            message: 'provided size was less than the size of the largest ' +
+                'owner at some selected timestamps',
+            color: 'red'
+          },
+          {
+            icon: '\u26A0',
+            message: 'provided size was less than the aggregated size of ' +
+                'the children at some selected timestamps',
+            color: 'red'
+          }
+        ]);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_header_pane.html b/catapult/tracing/tracing/ui/analysis/memory_dump_header_pane.html
index 58e310f..7b9ea7c 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_header_pane.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_header_pane.html
@@ -5,11 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/time_stamp.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_overview_pane.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
 <link rel="import" href="/tracing/ui/analysis/stacked_pane.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-memory-dump-header-pane"
     extends="tr-ui-a-stacked-pane">
@@ -127,15 +127,16 @@
           'Selected ' + containerDumpCount + ' memory dump' +
           (isMultiSelection ? 's' : '') +
           ' in ' + this.containerMemoryDumps_[0].containerName + ' at '));
-      // TODO(petrcermak): Use <tr-ui-u-scalar-span> once it can be displayed
+      // TODO(petrcermak): Use <tr-v-ui-scalar-span> once it can be displayed
       // inline. See https://github.com/catapult-project/catapult/issues/1371.
       this.$.label.appendChild(document.createTextNode(
-          tr.b.u.TimeStamp.format(this.containerMemoryDumps_[0].start)));
+          tr.v.Unit.byName.timeStampInMs.format(
+              this.containerMemoryDumps_[0].start)));
       if (isMultiSelection) {
         var ELLIPSIS = String.fromCharCode(8230);
         this.$.label.appendChild(document.createTextNode(ELLIPSIS));
         this.$.label.appendChild(document.createTextNode(
-            tr.b.u.TimeStamp.format(
+            tr.v.Unit.byName.timeStampInMs.format(
                 this.containerMemoryDumps_[containerDumpCount - 1].start)));
       }
     },
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane.html b/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane.html
index eb663d1..6b9dbb1 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane.html
@@ -5,12 +5,16 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/color_scheme.html">
 <link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/multi_dimensional_view.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
-<link rel="import" href="/tracing/ui/analysis/stack_frame_tree.html">
 <link rel="import" href="/tracing/ui/analysis/stacked_pane.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel='import' href='/tracing/ui/base/info_bar.html'>
 <link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-memory-dump-heap-details-pane"
     extends="tr-ui-a-stacked-pane">
@@ -73,6 +77,8 @@
       </div>
     </div>
     <div id="contents">
+      <tr-ui-b-info-bar id="info_bar" class="info-bar-hidden">
+      </tr-ui-b-info-bar>
       <div id="info_text">No heap dump selected</div>
       <tr-ui-b-table id="table"></tr-ui-b-table>
     </div>
@@ -83,32 +89,111 @@
 
 tr.exportTo('tr.ui.analysis', function() {
 
-  var COLUMN_IMPORTANCE_RULES =
-      tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules([
-          'Total size',
-          'Self size']);
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+
+  /** @{enum} */
+  var RowDimension = {
+    ROOT: -1,
+    STACK_FRAME: 0,
+    OBJECT_TYPE: 1
+  };
+
+  var LATIN_SMALL_LETTER_F_WITH_HOOK = String.fromCharCode(0x0192);
+  var CIRCLED_LATIN_CAPITAL_LETTER_T = String.fromCharCode(0x24C9);
+
+  /** @{constructor} */
+  function HeapDumpNodeTitleColumn(title) {
+    tr.ui.analysis.TitleColumn.call(this, title);
+  }
+
+  HeapDumpNodeTitleColumn.prototype = {
+    __proto__: tr.ui.analysis.TitleColumn.prototype,
+
+    formatTitle: function(row) {
+      var title = row.title;
+      var dimension = row.dimension;
+      switch (dimension) {
+        case RowDimension.ROOT:
+          return title;
+
+        case RowDimension.STACK_FRAME:
+        case RowDimension.OBJECT_TYPE:
+          return this.formatSubRow_(title, dimension);
+
+        default:
+          throw new Error('Invalid row dimension: ' + row.dimension);
+      }
+    },
+
+    cmp: function(rowA, rowB) {
+      if (rowA.dimension !== rowB.dimension)
+        return rowA.dimension - rowB.dimension;
+      return tr.ui.analysis.TitleColumn.prototype.cmp.call(this, rowA, rowB);
+    },
+
+    formatSubRow_: function(title, dimension) {
+      var titleEl = document.createElement('span');
+
+      var symbolEl = document.createElement('span');
+      var symbolColorName;
+      if (dimension === RowDimension.STACK_FRAME) {
+        symbolEl.textContent = LATIN_SMALL_LETTER_F_WITH_HOOK;
+        symbolEl.title = 'Stack frame';
+        symbolColorName = 'heap_dump_stack_frame';
+      } else {
+        symbolEl.textContent = CIRCLED_LATIN_CAPITAL_LETTER_T;
+        symbolEl.title = 'Object type';
+        symbolColorName = 'heap_dump_object_type';
+      }
+      symbolEl.style.color =
+          tr.b.ColorScheme.getColorForReservedNameAsString(symbolColorName);
+      symbolEl.style.paddingRight = '4px';
+      symbolEl.style.cursor = 'help';
+      symbolEl.style.weight = 'bold';
+      titleEl.appendChild(symbolEl);
+
+      titleEl.appendChild(document.createTextNode(title));
+
+      return titleEl;
+    }
+  };
+
+  var COLUMN_RULES = [
+    {
+      importance: 0,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    }
+  ];
 
   Polymer('tr-ui-a-memory-dump-heap-details-pane', {
     created: function() {
       this.heapDumps_ = undefined;
       this.aggregationMode_ = undefined;
-      this.bottomUpView_ = false;
+      this.viewMode_ = undefined;
     },
 
     ready: function() {
       this.$.table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
+      this.$.info_bar.message = 'Note: Values displayed in the heavy view ' +
+          'are lower bounds (except for the root).';
 
       this.$.view_mode_container.appendChild(tr.ui.b.createSelector(
-          this, 'bottomUpView', 'memoryDumpHeapDetailsPane.bottomUpView',
-          false /* Top down default */,
+          this, 'viewMode', 'memoryDumpHeapDetailsPane.viewMode',
+          tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW,
           [
             {
-              label: 'Tree (top down)',
-              value: false
+              label: 'Top-down (Tree)',
+              value: tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW
             },
             {
-              label: 'Heavy (bottom up)',
-              value: true
+              label: 'Top-down (Heavy)',
+              value: tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW
+            },
+            {
+              label: 'Bottom-up (Heavy)',
+              value: tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW
             }
           ]));
     },
@@ -144,13 +229,23 @@
       return this.aggregationMode_;
     },
 
-    set bottomUpView(bottomUpView) {
-      this.bottomUpView_ = bottomUpView;
+    set viewMode(viewMode) {
+      this.viewMode_ = viewMode;
       this.scheduleRebuildPane_();
     },
 
-    get bottomUpView() {
-      return this.bottomUpView_;
+    get viewMode() {
+      return this.viewMode_;
+    },
+
+    get heavyView() {
+      switch (this.viewMode) {
+        case tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW:
+        case tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW:
+          return true;
+        default:
+          return false;
+      }
     },
 
     rebuildPane_: function() {
@@ -160,6 +255,7 @@
         this.$.info_text.style.display = 'block';
         this.$.table.style.display = 'none';
         this.$.view_mode_container.style.display = 'none';
+        this.$.info_bar.visible = false;
 
         this.$.table.clear();
         this.$.table.rebuild();
@@ -171,6 +267,9 @@
       this.$.table.style.display = 'block';
       this.$.view_mode_container.style.display = 'block';
 
+      // Show the info bar if in heavy view mode.
+      this.$.info_bar.visible = this.heavyView;
+
       var stackFrameTrees = this.createStackFrameTrees_(this.heapDumps_);
       var rows = this.createRows_(stackFrameTrees);
       var columns = this.createColumns_(rows);
@@ -186,101 +285,121 @@
         if (heapDump === undefined)
           return undefined;
 
-        var rootNode =
-            new tr.ui.analysis.StackFrameTreeNode(heapDump.allocatorName);
-        var sumSize = undefined;
+        var builder = new tr.b.MultiDimensionalViewBuilder(2 /* dimensions */);
 
         // Build the heap tree.
         heapDump.entries.forEach(function(entry) {
-          var size = entry.size;
           var leafStackFrame = entry.leafStackFrame;
-          if (leafStackFrame === undefined) {
-            if (sumSize !== undefined)
-              throw new Error('Multiple sum stack frames');
-            sumSize = size;
-            return;
-          }
-          rootNode.addStackTrace(leafStackFrame.stackTrace, size, true);
+          var stackTracePath = leafStackFrame === undefined ?
+              [] : leafStackFrame.getUserFriendlyStackTrace().reverse();
+
+          var objectTypeName = entry.objectTypeName;
+          var objectTypeNamePath = objectTypeName === undefined ?
+              [] : [objectTypeName];
+
+          builder.addPath([stackTracePath, objectTypeNamePath], entry.size,
+              tr.b.MultiDimensionalViewBuilder.ValueKind.TOTAL);
         }, this);
 
-        // Add an <unspecified> node (if applicable).
-        if (sumSize !== undefined && sumSize > rootNode.total) {
-          var unspecifiedSize = sumSize - rootNode.total;
-          rootNode.total = sumSize;
-          var unspecifiedNode = rootNode.getOrCreateChild('<unspecified>');
-          unspecifiedNode.total += unspecifiedSize;
-          unspecifiedNode.self += unspecifiedSize;
-        }
-
-        if (this.bottomUpView)
-          return rootNode.convertToBottomUpView();
-        else
-          return rootNode;
+        return builder.buildView(this.viewMode);
       }, this);
     },
 
     createRows_: function(stackFrameTrees) {
-      return [this.createHeapRowRecursively_(stackFrameTrees)];
+      var definedHeapDump = tr.b.findFirstInArray(this.heapDumps);
+      if (definedHeapDump === undefined)
+        return [];
+
+      // The title of the root row is the name of the allocator.
+      var rootRowTitle = definedHeapDump.allocatorName;
+      return [this.createHeapRowRecursively_(
+          stackFrameTrees, RowDimension.ROOT, rootRowTitle)];
     },
 
-    createHeapRowRecursively_: function(nodes) {
-      // Get the name of the stack frame tree nodes. We can use any defined
-      // node since they all have the same name.
-      var title = tr.b.findFirstInArray(nodes).title;
-
-      // Determine at which timestamps (indices of the current selection)
-      // the stack frame tree node was provided.
-      var defined = nodes.map(function(node) {
-        return node !== undefined;
-      });
-
+    createHeapRowRecursively_: function(nodes, dimension, title) {
       // Transform a chronological list of stack frame tree nodes into a
       // dictionary of cells (where each cell contains a chronological list
-      // of the values of its attribute).
+      // of the values of its numeric).
       var cells = tr.ui.analysis.createCells(nodes, function(node) {
         return {
-          'Total size': new tr.model.ScalarAttribute('bytes', node.total),
-          'Self size': new tr.model.ScalarAttribute('bytes', node.self)
+          'Size': new ScalarNumeric(sizeInBytes_smallerIsBetter, node.total)
         };
       });
 
-      // Child stack frame tree node index (list index) ->
-      // Timestamp (list index) -> Child stack frame tree node.
-      var groupedChildNodes = tr.b.dictionaryValues(
-          tr.b.invertArrayOfDicts(nodes, function(node) {
-            return node.children;
-          }));
-
       var row = {
+        dimension: dimension,
         title: title,
-        defined: defined,
+        contexts: nodes,
         cells: cells
       };
 
       // Recursively create sub-rows for children (if applicable).
-      if (groupedChildNodes.length > 0) {
-        row.subRows =
-            groupedChildNodes.map(this.createHeapRowRecursively_, this);
-      }
+      var stackFrameSubRows = this.createHeapDimensionSubRowsRecursively_(
+          nodes, RowDimension.STACK_FRAME);
+      var objectTypeSubRows = this.createHeapDimensionSubRowsRecursively_(
+          nodes, RowDimension.OBJECT_TYPE);
+      var subRows = stackFrameSubRows.concat(objectTypeSubRows);
+      if (subRows.length > 0)
+        row.subRows = subRows;
 
       return row;
     },
 
+    createHeapDimensionSubRowsRecursively_: function(nodes, dimension) {
+      // Sub-row name (list index) -> Timestamp (list index) -> Child
+      // MultiDimensionalViewNode.
+      var dimensionGroupedChildNodes = tr.b.dictionaryValues(
+          tr.b.invertArrayOfDicts(nodes, function(node) {
+            var childDict = {};
+            var displayedChildrenTotal = 0;
+            var hasDisplayedChildren = false;
+            for (var child of node.children[dimension].values()) {
+              // Don't show lower-bound sub-rows in tree-view.
+              if (!this.heavyView && child.isLowerBound)
+                continue;
+              childDict[child.title[dimension]] = child;
+              displayedChildrenTotal += child.total;
+              hasDisplayedChildren = true;
+            }
+
+            // Add '<other>' node if necessary in tree-view.
+            if (!this.heavyView && displayedChildrenTotal < node.total &&
+                hasDisplayedChildren) {
+              var otherTitle = node.title.slice();
+              otherTitle[dimension] = '<other>';
+              childDict['<other>'] = {
+                title: otherTitle,
+                total: node.total - displayedChildrenTotal,
+                children: [new Map(), new Map()]
+              };
+            }
+
+            return childDict;
+          }, this));
+
+      // Sub-row name (list index) -> Sub-row.
+      return dimensionGroupedChildNodes.map(function(subRowNodes) {
+        var subRowTitle = tr.b.findFirstInArray(subRowNodes).title[dimension];
+        return this.createHeapRowRecursively_(
+            subRowNodes, dimension, subRowTitle);
+      }, this);
+    },
+
     createColumns_: function(rows) {
-      var titleColumn = new tr.ui.analysis.TitleColumn('Stack frame');
+      var titleColumn = new HeapDumpNodeTitleColumn('Stack frame');
       titleColumn.width = '500px';
 
-      var attributeColumns = tr.ui.analysis.MemoryColumn.fromRows(
-          rows, 'cells', this.aggregationMode_);
-      tr.ui.analysis.MemoryColumn.sortByImportance(
-          attributeColumns, COLUMN_IMPORTANCE_RULES);
-      tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);
+      var numericColumns = tr.ui.analysis.MemoryColumn.fromRows(
+          rows, 'cells', this.aggregationMode_, COLUMN_RULES);
+      tr.ui.analysis.MemoryColumn.spaceEqually(numericColumns);
 
-      var columns = [titleColumn].concat(attributeColumns);
+      var columns = [titleColumn].concat(numericColumns);
       return columns;
     }
   });
 
-  return {};
+  return {
+    RowDimension: RowDimension  // Exported for testing.
+  };
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane_test.html b/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane_test.html
index 6d2990f..0d595a8 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane_test.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_heap_details_pane_test.html
@@ -6,23 +6,35 @@
 -->
 
 <link rel='import' href='/tracing/base/iteration_helpers.html'>
+<link rel='import' href='/tracing/base/multi_dimensional_view.html'>
+<link rel='import' href='/tracing/core/test_utils.html'>
+<link rel='import' href='/tracing/model/heap_dump.html'>
 <link rel='import'
     href='/tracing/ui/analysis/memory_dump_heap_details_pane.html'>
 <link rel='import'
     href='/tracing/ui/analysis/memory_dump_sub_view_test_utils.html'>
 <link rel='import' href='/tracing/ui/analysis/memory_dump_sub_view_util.html'>
-<link rel='import' href='/tracing/core/test_utils.html'>
-<link rel='import' href='/tracing/model/heap_dump.html'>
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var MultiDimensionalViewType = tr.b.MultiDimensionalViewType;
+  var TOP_DOWN_TREE_VIEW = tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW;
+  var TOP_DOWN_HEAVY_VIEW = tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW;
+  var BOTTOM_UP_HEAVY_VIEW = tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW;
   var HeapDump = tr.model.HeapDump;
+  var RowDimension = tr.ui.analysis.RowDimension;
+  var ROOT = RowDimension.ROOT;
+  var STACK_FRAME = RowDimension.STACK_FRAME;
+  var OBJECT_TYPE = RowDimension.OBJECT_TYPE;
+  var TitleColumn = tr.ui.analysis.TitleColumn;
+  var NumericMemoryColumn = tr.ui.analysis.NumericMemoryColumn;
   var AggregationMode = tr.ui.analysis.MemoryColumn.AggregationMode;
   var addGlobalMemoryDump = tr.ui.analysis.addGlobalMemoryDump;
   var addProcessMemoryDump = tr.ui.analysis.addProcessMemoryDump;
-  var checkSizeAttributes = tr.ui.analysis.checkSizeAttributes;
+  var checkColumns = tr.ui.analysis.checkColumns;
+  var checkSizeNumericFields = tr.ui.analysis.checkSizeNumericFields;
   var isElementDisplayed = tr.ui.analysis.isElementDisplayed;
 
   function createHeapDumps() {
@@ -30,9 +42,8 @@
     var process = model.getOrCreateProcess(1);
 
     function heapTrace(/* topStackFrame, ..., leafStackFrame */) {
-      var titles =
-          Array.prototype.concat.apply([undefined /* root */], arguments);
-      return tr.c.TestUtils.newStackTrace(model, titles);
+      return tr.c.TestUtils.newStackTrace(
+          model, Array.prototype.slice.call(arguments));
     }
 
     // First timestamp.
@@ -40,17 +51,79 @@
     var pmd1 = addProcessMemoryDump(gmd1, process, -11);
     var hd1 = new HeapDump(pmd1, 'partition_alloc');
 
-    hd1.addEntry(undefined /* sum over all traces */, 4194304 /* 4 MiB */);
+    hd1.addEntry(undefined /* sum over all traces */,
+        undefined /* sum over all types */, 4194304 /* 4 MiB */);
+    hd1.addEntry(undefined /* sum over all traces */, 'v8::Context',
+        1048576 /* 1 MiB */);
+    hd1.addEntry(undefined /* sum over all traces */, 'blink::Node',
+        331776 /* 324 KiB */);
+    hd1.addEntry(heapTrace('MessageLoop::RunTask'),
+        undefined /* sum over all types */, 4194304 /* 4 MiB */);
+    hd1.addEntry(heapTrace('MessageLoop::RunTask'), 'v8::Context',
+        1048576 /* 1 MiB */);
+
     hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
-        102400 /* 100 KiB */);
-    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall',
-        'V8.Execute'), 1048576 /* 1 MiB */);
-    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall',
-        'FunctionCall'), 204800 /* 200 KiB */);
-    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
-        'V8.Execute'), 2097152 /* 2 MiB */);
-    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'V8.Execute',
-        'FunctionCall'), 307200 /* 300 KiB */);
+        undefined /* sum over all types */, 1406976 /* 1.3 MiB */);
+    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
+        'blink::Node', 331776 /* 324 KiB */);
+    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
+        'v8::Context', 1024000 /* 1000 KiB */);
+    hd1.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall', '<self>'),
+        undefined /* sum over all types */, 102400 /* 100 KiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'V8.Execute'),
+        'v8::Context', 716800 /* 700 KiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'V8.Execute'),
+        undefined /* sum over all types */, 1048576 /* 1 MiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall'),
+        undefined /* sum over all types */,
+        153600 /* 150 KiB, lower than the actual sum (should be ignored) */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall'),
+        'v8::Context', 153600 /* 150 KiB */);
+
+    // The following entry should not appear in the tree-view because there is
+    // no entry for its parent stack frame.
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'MissingParent', 'FunctionCall'),
+        undefined /* sum over all types */, 10 /* 10 B */);
+
+    // The following entry should not appear in the tree-view because there is
+    // no sum over all types (for the given stack trace). However, it will lead
+    // to a visible increase of the (incorrectly provided) sum over all types
+    // of MessageLoop::RunTask -> FunctionCall -> FunctionCall by 50 KiB.
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall',
+            'FunctionCall'),
+        'MissingSumOverAllTypes', 51200 /* 50 KiB */);
+
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute'),
+        undefined /* sum over all types */, 2404352 /* 2.3 MiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall'),
+        undefined /* sum over all types */, 2404352 /* 2.3 MiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall'),
+        'v8::Context', 20480 /* 20 KiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
+            '<self>'),
+        'v8::Context', 15360 /* 15 KiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
+            'V8.Execute'),
+        undefined /* sum over all types */, 2097152 /* 2 MiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
+            'V8.Execute', 'V8.Execute'),
+        undefined /* sum over all types */, 2097152 /* 2 MiB */);
+    hd1.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
+            '<self>'),
+        undefined /* sum over all types */, 307200 /* 300 KiB */);
 
     // Second timestamp.
     var gmd2 = addGlobalMemoryDump(model, 10);
@@ -58,62 +131,111 @@
     var hd2 = new HeapDump(pmd2, 'partition_alloc');
 
     hd2.addEntry(undefined /* sum over all traces */,
+        undefined /* sum over all types */,
         3145728 /* 3 MiB, lower than the actual sum (should be ignored) */);
-    hd2.addEntry(heapTrace(/* empty trace */),
+    hd2.addEntry(undefined /* sum over all traces */,
+        'v8::Context', 1258291 /* 1.2 MiB */);
+    hd2.addEntry(undefined /* sum over all traces */,
+        'blink::Node', 1048576 /* 1 MiB */);
+    hd2.addEntry(heapTrace('<self>'), undefined /* sum over all types */,
         131072 /* 128 KiB */);
+    hd2.addEntry(heapTrace('<self>'), 'v8::Context', 131072 /* 128 KiB */);
+    hd2.addEntry(heapTrace('MessageLoop::RunTask'),
+        undefined /* sum over all types */, 4823449 /* 4.6 MiB */);
+    hd2.addEntry(heapTrace('MessageLoop::RunTask'), 'v8::Context',
+        1127219 /* 1.1 MiB */);
+
     hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
-        393216 /* 384 KiB */);
-    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall',
-        'V8.Execute'), 1572864 /* 1.5 MiB */);
-    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
-        'V8.Execute'), 2621440 /* 2.5 MiB */);
-    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall',
-        'FunctionCall', 'FunctionCall'), 204800 /* 200 KiB */);
+        undefined /* sum over all types */, 2170880 /* 2.1 MiB */);
+    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
+        'v8::Context', 1024000 /* 1000 KiB */);
+    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall'),
+        'blink::Node', 819200 /* 800 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'V8.Execute'),
+        undefined /* sum over all types */, 1572864 /* 1.5 MiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'V8.Execute'),
+        'v8::Context', 614400 /* 600 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'V8.Execute'),
+        'blink::Node', 819200 /* 800 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall'),
+        undefined /* sum over all types */, 204800 /* 200 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall'),
+        'v8::Context', 122880 /* 120 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'FunctionCall', 'FunctionCall',
+            'FunctionCall'),
+        undefined /* sum over all types */, 204800 /* 200 KiB */);
+    hd2.addEntry(heapTrace('MessageLoop::RunTask', 'FunctionCall', '<self>'),
+        undefined /* sum over all types */, 393216 /* 384 KiB */);
+
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute'),
+        undefined /* sum over all types */, 2621440 /* 2.5 MiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall'),
+        undefined /* sum over all types */, 2621440 /* 2.5 MiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall'),
+        'v8::Context', 20480 /* 20 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall'),
+        'WTF::StringImpl', 126362 /* 123.4 KiB */);
+    hd2.addEntry(
+        heapTrace('MessageLoop::RunTask', 'V8.Execute', 'FunctionCall',
+            'V8.Execute'),
+        undefined /* sum over all types */, 2516582 /* 2.4 MiB */);
 
     return [hd1, hd2];
   }
 
-  function checkTableAndViewModeSelectorDislayed(viewEl, displayed) {
-    assert.strictEqual(isElementDisplayed(viewEl.$.info_text), !displayed);
-    assert.strictEqual(isElementDisplayed(viewEl.$.table), displayed);
-    assert.strictEqual(
-        isElementDisplayed(viewEl.$.view_mode_container), displayed);
+  function checkDisplayedElements(viewEl, displayExpectations) {
+    assert.strictEqual(isElementDisplayed(viewEl.$.info_text),
+        displayExpectations.infoText);
+    assert.strictEqual(isElementDisplayed(viewEl.$.info_bar),
+        displayExpectations.infoBar);
+    assert.strictEqual(isElementDisplayed(viewEl.$.table),
+        displayExpectations.tableAndSelector);
+    assert.strictEqual(isElementDisplayed(viewEl.$.view_mode_container),
+        displayExpectations.tableAndSelector);
   }
 
-  function checkColumns(columns, expectedAggregationMode) {
-    var EXPECTED_COLUMN_NAMES = [
-      'Stack frame',
-      'Total size',
-      'Self size'
-    ];
+  var EXPECTED_COLUMNS = [
+    { title: 'Stack frame', type: TitleColumn, noAggregation: true },
+    { title: 'Size', type: NumericMemoryColumn }
+  ];
 
-    // First column doesn't change value over time (no aggregation).
-    var VARIABLE_CELLS_START_INDEX = 1;
-
-    // Check column names.
-    assert.lengthOf(columns, EXPECTED_COLUMN_NAMES.length);
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++)
-      assert.equal(columns[i].title, EXPECTED_COLUMN_NAMES[i]);
-
-    // Check aggregation modes.
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++) {
-      assert.strictEqual(columns[i].aggregationMode,
-          i < VARIABLE_CELLS_START_INDEX ? undefined : expectedAggregationMode);
-    }
-  }
-
-  function checkRow(columns, row, expectedTitle, expectedTotalSizes,
-      expectedSelfSizes, expectedDefinedValues) {
+  function checkRow(columns, row, expectedDimension, expectedTitle,
+      expectedSizes, expectedDefinedValues) {
     var formattedTitle = columns[0].formatTitle(row);
-    assert.equal(formattedTitle, expectedTitle);
+    switch (expectedDimension) {
+      case ROOT:
+        assert.equal(formattedTitle, expectedTitle);
+        break;
 
-    checkSizeAttributes(row, columns[1], expectedTotalSizes);
-    checkSizeAttributes(row, columns[2], expectedSelfSizes);
+      case STACK_FRAME:
+      case OBJECT_TYPE:
+        assert.lengthOf(formattedTitle.childNodes, 2);
+        assert.strictEqual(formattedTitle.childNodes[0].textContent,
+            expectedDimension === STACK_FRAME ? '\u0192' : '\u24C9');
+        assert.strictEqual(
+            formattedTitle.childNodes[1].textContent, expectedTitle);
+        break;
 
-    if (expectedDefinedValues)
-      assert.deepEqual(tr.b.asArray(row.defined), expectedDefinedValues);
-    else
-      assert.isUndefined(row.defined);
+      default:
+        throw new Error('Invalid expected dimension: ' + expectedDimension);
+    }
+
+    checkSizeNumericFields(row, columns[1], expectedSizes);
+
+    var actualDefinedValues = new Array(row.contexts.length);
+    for (var i = 0; i < row.contexts.length; i++)
+      actualDefinedValues[i] = row.contexts[i] !== undefined;
+    assert.deepEqual(actualDefinedValues, expectedDefinedValues);
   }
 
   function checkRows(columns, rows, expectedStructure) {
@@ -131,33 +253,106 @@
     for (var i = 0; i < expectedStructure.length; i++) {
       var row = rows[i];
       var expectedRowStructure = expectedStructure[i];
-      checkRow(columns, row, expectedRowStructure.title,
-          expectedRowStructure.total, expectedRowStructure.self,
+      checkRow(columns, row, expectedRowStructure.dimension,
+          expectedRowStructure.title, expectedRowStructure.size,
           expectedRowStructure.defined);
       checkRows(columns, row.subRows, expectedRowStructure.children);
     }
   }
 
-  function checkTable(viewEl, expectedAggregationMode, expectedStructure) {
-    checkTableAndViewModeSelectorDislayed(viewEl, true);
+  function checkTable(viewEl, expectedAggregationMode, expectedInfoBarDisplayed,
+      expectedStructure) {
+    checkDisplayedElements(viewEl, {
+      infoText: false,
+      tableAndSelector: true,
+      infoBar: expectedInfoBarDisplayed
+    });
     var table = viewEl.$.table;
     var columns = table.tableColumns;
     var rows = table.tableRows;
-    checkColumns(columns, expectedAggregationMode);
+    checkColumns(columns, EXPECTED_COLUMNS, expectedAggregationMode);
     checkRows(columns, rows, expectedStructure);
   }
 
-  function changeView(viewEl, bottomUpView) {
-    tr.b.findDeepElementMatching(viewEl, 'select').selectedValue = bottomUpView;
+  function changeView(viewEl, viewType) {
+    tr.b.findDeepElementMatching(viewEl, 'select').selectedValue = viewType;
     viewEl.rebuild();
   }
 
+  /**
+   * Helper function for generating the expected structures of heap details
+   * pane tables. Given a table, this function generates its structure.
+   *
+   * This avoids the need to write such structures manually, which is very
+   * tedious. However, the correctness of the generated structures needs to be
+   * verified by the developer! Maximum line length must also be enforced
+   * manually.
+   */
+  function printTable(test, viewEl) {
+    var generator = new tr.c.TestUtils.SourceGenerator();
+
+    function formatRows(rows) {
+      generator.formatMultiLineList(rows, function(row) {
+        generator.push('{');
+        generator.indentBlock(2, true /* break line */, function() {
+          generator.push('dimension: ');
+          for (var dimensionTitle in RowDimension) {
+            if (row.dimension === RowDimension[dimensionTitle]) {
+              generator.push(dimensionTitle);
+              break;
+            }
+          }
+          generator.push(',');
+          generator.breakLine();
+
+          generator.push('title: \'', row.title, '\',');
+          generator.breakLine();
+
+          var fields = row.cells['Size'].fields;
+          generator.push('size: ');
+          generator.formatSingleLineList(fields, function(field) {
+            generator.push(
+                field === undefined ? 'undefined' : String(field.value));
+          });
+          generator.push(',');
+          generator.breakLine();
+
+          generator.push('defined: ');
+          generator.formatSingleLineList(fields, function(field) {
+            generator.push(field === undefined ? 'undefined' : 'true');
+          });
+
+          if (row.subRows && row.subRows.length > 0) {
+            generator.push(',');
+            generator.breakLine();
+            generator.push('children: ');
+            formatRows(row.subRows);
+          }
+        });
+        generator.breakLine();
+        generator.push('}');
+      });
+    }
+
+    generator.indentBlock(8, false /* don't break line */,
+        formatRows.bind(null, viewEl.$.table.tableRows));
+
+    tr.c.TestUtils.addSourceListing(test, generator.build());
+
+    throw new Error('This error is thrown to prevent accidentally ' +
+        'checking in a test which calls this function.');
+  }
+
   test('instantiate_empty', function() {
     tr.ui.analysis.createAndCheckEmptyPanes(this,
         'tr-ui-a-memory-dump-heap-details-pane', 'heapDumps',
         function(viewEl) {
           // Check that the info text is shown.
-          checkTableAndViewModeSelectorDislayed(viewEl, false);
+          checkDisplayedElements(viewEl, {
+            infoText: true,
+            tableAndSelector: false,
+            infoBar: false
+          });
         });
   });
 
@@ -171,29 +366,40 @@
     viewEl.rebuild();
     this.addHTMLOutput(viewEl);
 
-    // Top-down view.
-    checkTable(viewEl, undefined /* no aggregation */, [
-      {
-        title: 'partition_alloc',
-        total: [0],
-        self: [0],
-        defined: [true]
-      }
-    ]);
+    // Top-down tree view (default).
+    checkTable(viewEl, undefined /* no aggregation */,
+        false /* hide info bar */, [
+          {
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [0],
+            defined: [true]
+          }
+        ]);
 
-    changeView(viewEl, true /* bottom-up view */);
+    changeView(viewEl, TOP_DOWN_HEAVY_VIEW);
+    checkTable(viewEl, undefined /* no aggregation */, true /* show info bar */,
+        [
+          {
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [0],
+            defined: [true]
+          }
+        ]);
 
-    // Bottom-up view.
-    checkTable(viewEl, undefined /* no aggregation */, [
-      {
-        title: 'partition_alloc',
-        total: [0],
-        self: [0],
-        defined: [true]
-      }
-    ]);
+    changeView(viewEl, BOTTOM_UP_HEAVY_VIEW);
+    checkTable(viewEl, undefined /* no aggregation */, true /* show info bar */,
+        [
+          {
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [0],
+            defined: [true]
+          }
+        ]);
 
-    changeView(viewEl, false /* top-down view */);
+    changeView(viewEl, TOP_DOWN_TREE_VIEW);
   });
 
   test('instantiate_single', function() {
@@ -205,158 +411,870 @@
     viewEl.rebuild();
     this.addHTMLOutput(viewEl);
 
-    // Top-down view (default).
-    checkTable(viewEl, undefined /* no aggregation */, [
-      {
-        title: 'partition_alloc',
-        total: [4194304],
-        self: [0],
-        defined: [true],
-        children: [
+    // Top-down tree view (default).
+    checkTable(viewEl, undefined /* no aggregation */,
+        false /* hide info bar */, [
           {
-            title: 'MessageLoop::RunTask',
-            total: [3760128],
-            self: [0],
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [4194304],
             defined: [true],
             children: [
               {
-                title: 'FunctionCall',
-                total: [1355776],
-                self: [102400],
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [4194304],
                 defined: [true],
                 children: [
                   {
-                    title: 'V8.Execute',
-                    total: [1048576],
-                    self: [1048576],
-                    defined: [true]
-                  },
-                  {
+                    dimension: STACK_FRAME,
                     title: 'FunctionCall',
-                    total: [204800],
-                    self: [204800],
-                    defined: [true]
-                  }
-                ]
-              },
-              {
-                title: 'V8.Execute',
-                total: [2404352],
-                self: [0],
-                defined: [true],
-                children: [
-                  {
-                    title: 'FunctionCall',
-                    total: [2404352],
-                    self: [307200],
+                    size: [1406976],
                     defined: [true],
                     children: [
                       {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [102400],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
                         title: 'V8.Execute',
-                        total: [2097152],
-                        self: [2097152],
-                        defined: [true]
+                        size: [1048576],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: '<other>',
+                            size: [331776],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600 + 51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [153600],
+                            defined: [true],
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: '<other>',
+                            size: [51200],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [51200],
+                        defined: [true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'blink::Node',
+                        size: [331776],
+                        defined: [true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<other>',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: '<other>',
+                        size: [51200],
+                        defined: [true],
                       }
                     ]
-                  }
-                ]
-              }
-            ]
-          },
-          {
-            title: '<unspecified>',
-            total: [434176],
-            self: [434176],
-            defined: [true]
-          }
-        ]
-      }
-    ]);
-
-    changeView(viewEl, true /* bottom-up view */);
-    checkTable(viewEl, undefined /* no aggregation */, [
-      {
-        title: 'partition_alloc',
-        total: [4194304],
-        self: [0],
-        defined: [true],
-        children: [
-          {
-            title: 'MessageLoop::RunTask',
-            total: [3760128],
-            self: [0],
-            defined: [true]
-          },
-          {
-            title: 'FunctionCall',
-            total: [3760128],
-            self: [614400],
-            defined: [true],
-            children: [
-              {
-                title: 'MessageLoop::RunTask',
-                total: [1355776],
-                self: [102400],
-                defined: [true]
-              },
-              {
-                title: 'FunctionCall',
-                total: [204800],
-                self: [204800],
-                defined: [true],
-                children: [
-                  {
-                    title: 'MessageLoop::RunTask',
-                    total: [204800],
-                    self: [204800],
-                    defined: [true]
-                  }
-                ]
-              },
-              {
-                title: 'V8.Execute',
-                total: [2404352],
-                self: [307200],
-                defined: [true],
-                children: [
-                  {
-                    title: 'MessageLoop::RunTask',
-                    total: [2404352],
-                    self: [307200],
-                    defined: [true]
-                  }
-                ]
-              }
-            ]
-          },
-          {
-            title: 'V8.Execute',
-            total: [3452928],
-            self: [3145728],
-            defined: [true],
-            children: [
-              {
-                title: 'FunctionCall',
-                total: [3145728],
-                self: [3145728],
-                defined: [true],
-                children: [
-                  {
-                    title: 'MessageLoop::RunTask',
-                    total: [1048576],
-                    self: [1048576],
-                    defined: [true]
                   },
                   {
+                    dimension: STACK_FRAME,
                     title: 'V8.Execute',
-                    total: [2097152],
-                    self: [2097152],
+                    size: [2404352],
                     defined: [true],
                     children: [
                       {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [2404352],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [307200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: 'v8::Context',
+                                size: [15360],
+                                defined: [true],
+                              },
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: '<other>',
+                                size: [291840],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'V8.Execute',
+                                size: [2097152],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [20480],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<self>',
+                                size: [15360],
+                                defined: [true],
+                              },
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<other>',
+                                size: [5120],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: '<other>',
+                            size: [2383872],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<other>',
+                    size: [382976],
+                    defined: [true],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1048576],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<other>',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [24576],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: '<other>',
+                    size: [3145728],
+                    defined: [true],
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1048576],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [1048576],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<other>',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [24576],
+                        defined: [true],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [331776],
+                defined: [true],
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: '<other>',
+                size: [2813952],
+                defined: [true],
+              }
+            ]
+          }
+        ]);
+
+    changeView(viewEl, BOTTOM_UP_HEAVY_VIEW);
+    checkTable(viewEl, undefined /* no aggregation */, true /* show info bar */,
+        [
+          {
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [4194304],
+            defined: [true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [4194304],
+                defined: [true],
+                children: [
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1048576],
+                    defined: [true]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776],
+                    defined: [true]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200],
+                    defined: [true]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [3811338],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [1406976],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'blink::Node',
+                        size: [331776],
+                        defined: [true]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [1024000],
+                        defined: [true]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200],
+                        defined: [true]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [204800],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
                         title: 'MessageLoop::RunTask',
-                        total: [2097152],
-                        self: [2097152],
+                        size: [204800],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [153600],
+                            defined: [true]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'MissingSumOverAllTypes',
+                            size: [51200],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: 'MissingSumOverAllTypes',
+                                size: [51200],
+                                defined: [true]
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'MissingSumOverAllTypes',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [51200],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [153600],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [153600],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [51200],
+                            defined: [true]
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [51200],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MissingParent',
+                    size: [10],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [10],
+                        defined: [true]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2404352],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [2404352],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [20480],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [20480],
+                            defined: [true]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [331776],
+                        defined: [true]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1044480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [1024000],
+                        defined: [true]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [153600],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [20480],
+                            defined: [true]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [51200],
+                        defined: [true]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [51200],
+                            defined: [true]
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [51200],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: '<self>',
+                size: [409600],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [409600],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [102400],
+                        defined: [true]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [307200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [307200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: 'v8::Context',
+                                size: [15360],
+                                defined: [true]
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [15360],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [15360],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [15360],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [15360],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [15360],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [15360],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [15360],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [15360],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [15360],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [3452928],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [3145728],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [1048576],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [716800],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [2097152],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [716800],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [716800],
+                            defined: [true]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [2404352],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480],
+                        defined: [true]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2097152],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [2097152],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [2097152],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [737280],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [716800],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [716800],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [20480],
                         defined: [true]
                       }
                     ]
@@ -364,22 +1282,999 @@
                 ]
               },
               {
-                title: 'MessageLoop::RunTask',
-                total: [2404352],
-                self: [0],
-                defined: [true]
+                dimension: STACK_FRAME,
+                title: 'MissingParent',
+                size: [10],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [10],
+                    defined: [true]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1048576],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [1048576],
+                    defined: [true]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1044480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [1024000],
+                        defined: [true]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [153600],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [20480],
+                            defined: [true]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [15360],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [15360],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [15360],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [15360],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [737280],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [716800],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [716800],
+                            defined: [true]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [20480],
+                        defined: [true]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [331776],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [331776],
+                    defined: [true]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [331776],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [331776],
+                        defined: [true]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'MissingSumOverAllTypes',
+                size: [51200],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [51200],
+                    defined: [true]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'MessageLoop::RunTask',
+                        size: [51200],
+                        defined: [true]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'MessageLoop::RunTask',
+                            size: [51200],
+                            defined: [true]
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'MessageLoop::RunTask',
+                                size: [51200],
+                                defined: [true]
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
               }
             ]
-          },
-          {
-            title: '<unspecified>',
-            total: [434176],
-            self: [434176],
-            defined: [true]
           }
-        ]
-      }
-    ]);
+        ]);
+
+    changeView(viewEl, TOP_DOWN_HEAVY_VIEW);
+    checkTable(viewEl, undefined /* no aggregation */, true /* show info bar */,
+        [
+          {
+            dimension: ROOT,
+            title: 'partition_alloc',
+            size: [4194304],
+            defined: [true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [4194304],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1406976],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [102400],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [1048576],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [716800],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600 + 51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: 'MissingSumOverAllTypes',
+                                size: [51200],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [153600],
+                            defined: [true],
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'MissingSumOverAllTypes',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'FunctionCall',
+                                size: [51200],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'blink::Node',
+                        size: [331776],
+                        defined: [true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'FunctionCall',
+                                size: [51200],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MissingParent',
+                    size: [10],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [10],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2404352],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [2404352],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [307200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: OBJECT_TYPE,
+                                title: 'v8::Context',
+                                size: [15360],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'V8.Execute',
+                                size: [2097152],
+                                defined: [true],
+                              }
+                            ]
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [20480],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<self>',
+                                size: [15360],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [20480],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<self>',
+                                size: [15360],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1048576],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [20480],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<self>',
+                                size: [15360],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [331776],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'FunctionCall',
+                                size: [51200],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [1406976 + 10 + 2404352],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [102400 + 307200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [15360],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [1048576 + 2097152],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152],
+                        defined: [true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [716800],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [153600 + 51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'MissingSumOverAllTypes',
+                            size: [51200],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [153600],
+                        defined: [true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776],
+                    defined: [true],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1024000 + 20480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [15360],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: '<self>',
+                size: [102400 + 307200],
+                defined: [true],
+                children: [
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [15360],
+                    defined: [true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [1048576 + 2404352],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [2404352],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [307200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [15360],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2097152],
+                    defined: [true],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [716800 + 20480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'MissingParent',
+                size: [10],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [10],
+                    defined: [true],
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1048576],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [1048576],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [1024000],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [716800],
+                            defined: [true],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [153600],
+                            defined: [true],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [20480],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: '<self>',
+                                size: [15360],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1024000 + 20480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [15360],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800],
+                        defined: [true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [15360],
+                    defined: [true],
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [716800 + 20480],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [20480],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [331776],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [331776],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [331776],
+                        defined: [true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [331776],
+                    defined: [true],
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'MissingSumOverAllTypes',
+                size: [51200],
+                defined: [true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'MessageLoop::RunTask',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                            children: [
+                              {
+                                dimension: STACK_FRAME,
+                                title: 'FunctionCall',
+                                size: [51200],
+                                defined: [true],
+                              }
+                            ]
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200],
+                    defined: [true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200],
+                        defined: [true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200],
+                            defined: [true],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              }
+            ]
+          }
+        ]);
   });
 
   test('instantiate_multipleDiff', function() {
@@ -392,199 +2287,1319 @@
     viewEl.rebuild();
     this.addHTMLOutput(viewEl);
 
-    changeView(viewEl, true /* bottom-up view */);
-    checkTable(viewEl, AggregationMode.DIFF, [
+    changeView(viewEl, TOP_DOWN_HEAVY_VIEW);
+    checkTable(viewEl, AggregationMode.DIFF, true /* show info bar */, [
       {
+        dimension: ROOT,
         title: 'partition_alloc',
-        total: [4194304, 4923392],
-        self: [0, 131072],
+        size: [4194304, 4954521],
         defined: [true, true],
         children: [
           {
+            dimension: STACK_FRAME,
             title: 'MessageLoop::RunTask',
-            total: [3760128, 4792320],
-            self: [0, 0],
-            defined: [true, true]
-          },
-          {
-            title: 'FunctionCall',
-            total: [3760128, 4792320],
-            self: [614400, 598016],
+            size: [4194304, 4823449],
             defined: [true, true],
             children: [
               {
-                title: 'MessageLoop::RunTask',
-                total: [1355776, 2170880],
-                self: [102400, 393216],
-                defined: [true, true]
-              },
-              {
+                dimension: STACK_FRAME,
                 title: 'FunctionCall',
-                total: [204800, 204800],
-                self: [204800, 204800],
+                size: [1406976, 2170880],
                 defined: [true, true],
                 children: [
                   {
-                    title: 'MessageLoop::RunTask',
-                    total: [204800, 204800],
-                    self: [204800, 0],
-                    defined: [true, true]
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [102400, 393216],
+                    defined: [true, true],
                   },
                   {
-                    title: 'FunctionCall',
-                    total: [undefined, 204800],
-                    self: [undefined, 204800],
-                    defined: [undefined, true],
-                    children: [
-                      {
-                        title: 'MessageLoop::RunTask',
-                        total: [undefined, 204800],
-                        self: [undefined, 204800],
-                        defined: [undefined, true]
-                      }
-                    ]
-                  }
-                ]
-              },
-              {
-                title: 'V8.Execute',
-                total: [2404352, 2621440],
-                self: [307200, 0],
-                defined: [true, true],
-                children: [
-                  {
-                    title: 'MessageLoop::RunTask',
-                    total: [2404352, 2621440],
-                    self: [307200, 0],
-                    defined: [true, true]
-                  }
-                ]
-              }
-            ]
-          },
-          {
-            title: 'V8.Execute',
-            total: [3452928, 4194304],
-            self: [3145728, 4194304],
-            defined: [true, true],
-            children: [
-              {
-                title: 'FunctionCall',
-                total: [3145728, 4194304],
-                self: [3145728, 4194304],
-                defined: [true, true],
-                children: [
-                  {
-                    title: 'MessageLoop::RunTask',
-                    total: [1048576, 1572864],
-                    self: [1048576, 1572864],
-                    defined: [true, true]
-                  },
-                  {
+                    dimension: STACK_FRAME,
                     title: 'V8.Execute',
-                    total: [2097152, 2621440],
-                    self: [2097152, 2621440],
+                    size: [1048576, 1572864],
                     defined: [true, true],
                     children: [
                       {
-                        title: 'MessageLoop::RunTask',
-                        total: [2097152, 2621440],
-                        self: [2097152, 2621440],
-                        defined: [true, true]
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'blink::Node',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [204800, 204800],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, 204800],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'MissingSumOverAllTypes',
+                            size: [51200, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776, 819200],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1024000, 1024000],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200, undefined],
+                            defined: [true, false],
+                          }
+                        ]
                       }
                     ]
                   }
                 ]
               },
               {
-                title: 'MessageLoop::RunTask',
-                total: [2404352, 2621440],
-                self: [0, 0],
-                defined: [true, true]
+                dimension: STACK_FRAME,
+                title: 'MissingParent',
+                size: [10, undefined],
+                defined: [true, false],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [10, undefined],
+                    defined: [true, false],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [2404352, 2621440],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [2404352, 2621440],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [307200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152, 2516582],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480, 20480],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'WTF::StringImpl',
+                        size: [undefined, 126362],
+                        defined: [false, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [20480, 20480],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'WTF::StringImpl',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [undefined, 126362],
+                        defined: [false, true],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1048576, 1127219],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1024000, 1024000],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [20480, 20480],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [331776, 819200],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [331776, 819200],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'MissingSumOverAllTypes',
+                size: [51200, undefined],
+                defined: [true, false],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'WTF::StringImpl',
+                size: [undefined, 126362],
+                defined: [false, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [undefined, 126362],
+                        defined: [false, true],
+                      }
+                    ]
+                  }
+                ]
               }
             ]
           },
           {
-            title: '<unspecified>',
-            total: [434176, undefined],
-            self: [434176, undefined],
-            defined: [true, undefined]
+            dimension: STACK_FRAME,
+            title: 'FunctionCall',
+            size: [3811338, 4792320],
+            defined: [true, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: '<self>',
+                size: [409600, 393216],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [15360, undefined],
+                    defined: [true, false],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [3145728, 4089446],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2097152, undefined],
+                    defined: [true, false],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [716800, 614400],
+                    defined: [true, true],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [undefined, 819200],
+                    defined: [false, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [204800, 204800],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200, 204800],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'MissingSumOverAllTypes',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [153600, 122880],
+                    defined: [true, true],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'MissingSumOverAllTypes',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [331776, 819200],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [undefined, 819200],
+                    defined: [false, true],
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1044480, 1044480],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [15360, undefined],
+                    defined: [true, false],
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [716800, 614400],
+                    defined: [true, true],
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [153600, 122880],
+                    defined: [true, true],
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'MissingSumOverAllTypes',
+                size: [51200, undefined],
+                defined: [true, false],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'WTF::StringImpl',
+                size: [undefined, 126362],
+                defined: [false, true],
+              }
+            ]
+          },
+          {
+            dimension: STACK_FRAME,
+            title: '<self>',
+            size: [409600, 524288],
+            defined: [true, true],
+            children: [
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [15360, 131072],
+                defined: [true, true],
+              }
+            ]
+          },
+          {
+            dimension: STACK_FRAME,
+            title: 'V8.Execute',
+            size: [3452928, 4194304],
+            defined: [true, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [2404352, 2621440],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [307200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [15360, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [2097152, 2516582],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [15360, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'WTF::StringImpl',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [2097152, undefined],
+                defined: [true, false],
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [737280, 634880],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [15360, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'blink::Node',
+                size: [undefined, 819200],
+                defined: [false, true],
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'WTF::StringImpl',
+                size: [undefined, 126362],
+                defined: [false, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                  }
+                ]
+              }
+            ]
+          },
+          {
+            dimension: STACK_FRAME,
+            title: 'MissingParent',
+            size: [10, undefined],
+            defined: [true, false],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [10, undefined],
+                defined: [true, false],
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'v8::Context',
+            size: [1048576, 1258291],
+            defined: [true, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [1048576, 1127219],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1024000, 1024000],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [20480, 20480],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [1044480, 1044480],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [15360, undefined],
+                    defined: [true, false],
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [716800, 614400],
+                    defined: [true, true],
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [153600, 122880],
+                    defined: [true, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: '<self>',
+                size: [15360, 131072],
+                defined: [true, true],
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [737280, 634880],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [20480, 20480],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [15360, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  }
+                ]
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'blink::Node',
+            size: [331776, 1048576],
+            defined: [true, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [331776, 819200],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [331776, 819200],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [331776, 819200],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [undefined, 819200],
+                    defined: [false, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [undefined, 819200],
+                defined: [false, true],
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'MissingSumOverAllTypes',
+            size: [51200, undefined],
+            defined: [true, false],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [51200, undefined],
+                defined: [true, false],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'FunctionCall',
+                            size: [51200, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [51200, undefined],
+                defined: [true, false],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [51200, undefined],
+                        defined: [true, false],
+                      }
+                    ]
+                  }
+                ]
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'WTF::StringImpl',
+            size: [undefined, 126362],
+            defined: [false, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [undefined, 126362],
+                defined: [false, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [undefined, 126362],
+                        defined: [false, true],
+                      }
+                    ]
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'FunctionCall',
+                size: [undefined, 126362],
+                defined: [false, true],
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [undefined, 126362],
+                defined: [false, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [undefined, 126362],
+                    defined: [false, true],
+                  }
+                ]
+              }
+            ]
           }
         ]
       }
     ]);
 
-    changeView(viewEl, false /* top-down view */);
-    checkTable(viewEl, AggregationMode.DIFF, [
+    changeView(viewEl, TOP_DOWN_TREE_VIEW);
+    checkTable(viewEl, AggregationMode.DIFF, false /* hide info bar */, [
       {
+        dimension: ROOT,
         title: 'partition_alloc',
-        total: [4194304, 4923392],
-        self: [0, 131072],
+        size: [4194304, 4954521],
         defined: [true, true],
         children: [
           {
+            dimension: STACK_FRAME,
             title: 'MessageLoop::RunTask',
-            total: [3760128, 4792320],
-            self: [0, 0],
+            size: [4194304, 4823449],
             defined: [true, true],
             children: [
               {
+                dimension: STACK_FRAME,
                 title: 'FunctionCall',
-                total: [1355776, 2170880],
-                self: [102400, 393216],
+                size: [1406976, 2170880],
                 defined: [true, true],
                 children: [
                   {
-                    title: 'V8.Execute',
-                    total: [1048576, 1572864],
-                    self: [1048576, 1572864],
-                    defined: [true, true]
+                    dimension: STACK_FRAME,
+                    title: '<self>',
+                    size: [102400, 393216],
+                    defined: [true, true],
                   },
                   {
-                    title: 'FunctionCall',
-                    total: [204800, 204800],
-                    self: [204800, 0],
+                    dimension: STACK_FRAME,
+                    title: 'V8.Execute',
+                    size: [1048576, 1572864],
                     defined: [true, true],
                     children: [
                       {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: '<other>',
+                        size: [331776, 139264],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'blink::Node',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [204800, 204800],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
                         title: 'FunctionCall',
-                        total: [undefined, 204800],
-                        self: [undefined, 204800],
-                        defined: [undefined, true]
+                        size: [undefined, 204800],
+                        defined: [false, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: '<other>',
+                        size: [51200, 81920],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<other>',
+                    size: [51200, undefined],
+                    defined: [true, false],
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'blink::Node',
+                    size: [331776, 819200],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [undefined, 819200],
+                        defined: [false, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: 'v8::Context',
+                    size: [1024000, 1024000],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [153600, 286720],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: OBJECT_TYPE,
+                    title: '<other>',
+                    size: [51200, 327680],
+                    defined: [true, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: 'V8.Execute',
+                size: [2404352, 2621440],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [2404352, 2621440],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<self>',
+                        size: [307200, undefined],
+                        defined: [true, false],
+                        children: [
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: 'v8::Context',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          },
+                          {
+                            dimension: OBJECT_TYPE,
+                            title: '<other>',
+                            size: [291840, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [2097152, 2516582],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: 'V8.Execute',
+                            size: [2097152, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [undefined, 104858],
+                        defined: [false, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'v8::Context',
+                        size: [20480, 20480],
+                        defined: [true, true],
+                        children: [
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<self>',
+                            size: [15360, undefined],
+                            defined: [true, false],
+                          },
+                          {
+                            dimension: STACK_FRAME,
+                            title: '<other>',
+                            size: [5120, undefined],
+                            defined: [true, false],
+                          }
+                        ]
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: '<other>',
+                        size: [2383872, 2474598],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: OBJECT_TYPE,
+                        title: 'WTF::StringImpl',
+                        size: [undefined, 126362],
+                        defined: [false, true],
                       }
                     ]
                   }
                 ]
               },
               {
-                title: 'V8.Execute',
-                total: [2404352, 2621440],
-                self: [0, 0],
+                dimension: STACK_FRAME,
+                title: '<other>',
+                size: [382976, 31129],
+                defined: [true, true],
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [1048576, 1127219],
                 defined: [true, true],
                 children: [
                   {
+                    dimension: STACK_FRAME,
                     title: 'FunctionCall',
-                    total: [2404352, 2621440],
-                    self: [307200, 0],
+                    size: [1024000, 1024000],
                     defined: [true, true],
                     children: [
                       {
+                        dimension: STACK_FRAME,
                         title: 'V8.Execute',
-                        total: [2097152, 2621440],
-                        self: [2097152, 2621440],
-                        defined: [true, true]
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [153600, 286720],
+                        defined: [true, true],
                       }
                     ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<other>',
+                    size: [24576, 103219],
+                    defined: [true, true],
                   }
                 ]
+              },
+              {
+                dimension: OBJECT_TYPE,
+                title: '<other>',
+                size: [3145728, 3696230],
+                defined: [true, true],
               }
             ]
           },
           {
-            title: '<unspecified>',
-            total: [434176, undefined],
-            self: [434176, undefined],
-            defined: [true, undefined]
+            dimension: STACK_FRAME,
+            title: '<self>',
+            size: [undefined, 131072],
+            defined: [false, true],
+            children: [
+              {
+                dimension: OBJECT_TYPE,
+                title: 'v8::Context',
+                size: [undefined, 131072],
+                defined: [false, true],
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'v8::Context',
+            size: [1048576, 1258291],
+            defined: [true, true],
+            children: [
+              {
+                dimension: STACK_FRAME,
+                title: 'MessageLoop::RunTask',
+                size: [1048576, 1127219],
+                defined: [true, true],
+                children: [
+                  {
+                    dimension: STACK_FRAME,
+                    title: 'FunctionCall',
+                    size: [1024000, 1024000],
+                    defined: [true, true],
+                    children: [
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'V8.Execute',
+                        size: [716800, 614400],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: 'FunctionCall',
+                        size: [153600, 122880],
+                        defined: [true, true],
+                      },
+                      {
+                        dimension: STACK_FRAME,
+                        title: '<other>',
+                        size: [153600, 286720],
+                        defined: [true, true],
+                      }
+                    ]
+                  },
+                  {
+                    dimension: STACK_FRAME,
+                    title: '<other>',
+                    size: [24576, 103219],
+                    defined: [true, true],
+                  }
+                ]
+              },
+              {
+                dimension: STACK_FRAME,
+                title: '<self>',
+                size: [undefined, 131072],
+                defined: [false, true],
+              }
+            ]
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: 'blink::Node',
+            size: [331776, 1048576],
+            defined: [true, true],
+          },
+          {
+            dimension: OBJECT_TYPE,
+            title: '<other>',
+            size: [2813952, 2647654],
+            defined: [true, true],
           }
         ]
       }
@@ -601,14 +3616,14 @@
     viewEl.rebuild();
     this.addHTMLOutput(viewEl);
 
-    changeView(viewEl, true /* bottom-up view */);
-    checkTable(viewEl, AggregationMode.MAX, [
+    changeView(viewEl, TOP_DOWN_HEAVY_VIEW);
+    checkTable(viewEl, AggregationMode.MAX, true /* show info bar */, [
       {
+        dimension: ROOT,
         title: 'partition_alloc',
-        total: [4194304, 4923392],
-        self: [0, 131072],
+        size: [4194304, 4954521],
         defined: [true, true],
-        children: 4  // No need to check the full structure again.
+        children: 9  // No need to check the full structure again.
       }
     ]);
   });
@@ -624,14 +3639,14 @@
     viewEl.rebuild();
     this.addHTMLOutput(viewEl);
 
-    // Top-down view (default).
-    checkTable(viewEl, AggregationMode.DIFF, [
+    // Top-down tree view (default).
+    checkTable(viewEl, AggregationMode.DIFF, false /* hide info bar */, [
       {
+        dimension: ROOT,
         title: 'partition_alloc',
-        total: [4194304, undefined, 4923392],
-        self: [0, undefined, 131072],
+        size: [4194304, undefined, 4954521],
         defined: [true, false, true],
-        children: 2  // No need to check the full structure again.
+        children: 5  // No need to check the full structure again.
       }
     ]);
   });
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane.html b/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane.html
index 28aa367..b10ccf1 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane.html
@@ -7,7 +7,6 @@
 
 <link rel="import" href="/tracing/base/color_scheme.html">
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/memory_allocator_dump.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_allocator_details_pane.html">
@@ -19,6 +18,8 @@
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/view_specific_brushing_state.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-memory-dump-overview-pane"
     extends="tr-ui-a-stacked-pane">
@@ -76,25 +77,50 @@
 tr.exportTo('tr.ui.analysis', function() {
 
   var ColorScheme = tr.b.ColorScheme;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
 
   var PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX = '_bytes';
 
-  var DISPLAYED_SIZE_ATTRIBUTE_NAME =
-      tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME;
+  var DISPLAYED_SIZE_NUMERIC_NAME =
+      tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME;
+  var SOME_TIMESTAMPS_INFO_QUANTIFIER =
+      tr.ui.analysis.MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER;
 
-  var GREATER_THAN_OR_EQUAL_TO_SYMBOL = String.fromCharCode(8805);
+  // Unicode symbols used for memory cell info icons and messages.
+  var RIGHTWARDS_ARROW_WITH_HOOK = String.fromCharCode(0x21AA);
+  var RIGHTWARDS_ARROW_FROM_BAR = String.fromCharCode(0x21A6);
+  var GREATER_THAN_OR_EQUAL_TO = String.fromCharCode(0x2265);
+  var UNMARRIED_PARTNERSHIP_SYMBOL = String.fromCharCode(0x26AF);
+  var TRIGRAM_FOR_HEAVEN = String.fromCharCode(0x2630);
+
+  // TODO(petrcermak): Move this to iteration_helpers.html.
+  function lazyMap(list, fn, opt_this) {
+    opt_this = opt_this || this;
+    var result = undefined;
+    list.forEach(function(item, index) {
+      var value = fn.call(opt_this, item, index);
+      if (value === undefined)
+        return;
+      if (result === undefined)
+        result = new Array(list.length);
+      result[index] = value;
+    });
+    return result;
+  }
 
   /** @constructor */
-  function ProcessNameColumn(title) {
-    tr.ui.analysis.TitleColumn.call(this, title);
+  function ProcessNameColumn() {
+    tr.ui.analysis.TitleColumn.call(this, 'Process');
   }
 
   ProcessNameColumn.prototype = {
     __proto__: tr.ui.analysis.TitleColumn.prototype,
 
     formatTitle: function(row) {
-      if (row.noLegend)
-        return row.title;
+      if (row.contexts === undefined)
+        return row.title;  // Total row.
       var titleEl = document.createElement('tr-ui-b-color-legend');
       titleEl.label = row.title;
       return titleEl;
@@ -102,42 +128,206 @@
   };
 
   /** @constructor */
-  function UsedMemoryColumn(name, units, cellGetter, aggregationMode) {
-    tr.ui.analysis.ScalarMemoryColumn.call(
-        this, name, units, cellGetter, aggregationMode);
+  function UsedMemoryColumn(name, cellPath, aggregationMode) {
+    tr.ui.analysis.NumericMemoryColumn.call(
+        this, name, cellPath, aggregationMode);
   }
 
-  var USED_MEMORY_COLUMN_COLOR =
+  UsedMemoryColumn.COLOR =
       ColorScheme.getColorForReservedNameAsString('used_memory_column');
-  var OLDER_USED_MEMORY_COLUMN_COLOR =
+  UsedMemoryColumn.OLDER_COLOR =
       ColorScheme.getColorForReservedNameAsString('older_used_memory_column');
 
   UsedMemoryColumn.prototype = {
-    __proto__: tr.ui.analysis.ScalarMemoryColumn.prototype,
+    __proto__: tr.ui.analysis.NumericMemoryColumn.prototype,
 
     get title() {
-      return tr.ui.b.createSpan(
-          {textContent: this.name, color: USED_MEMORY_COLUMN_COLOR});
+      return tr.ui.b.createSpan({
+        textContent: this.name,
+        color: UsedMemoryColumn.COLOR
+      });
     },
 
-    color: function(attrs) {
-      // TODO(petrcermak): Figure out how to make this work for multi-
-      // selection as well.
-      if (attrs.length === 1 && attrs[0].isOlderValue)
-        return OLDER_USED_MEMORY_COLUMN_COLOR;
-      else
-        return USED_MEMORY_COLUMN_COLOR;
+    color: function(numerics, processMemoryDumps) {
+      return UsedMemoryColumn.COLOR;
+    },
+
+    getChildPaneBuilder: function(processMemoryDumps) {
+      if (processMemoryDumps === undefined)
+        return undefined;
+
+      var vmRegions = lazyMap(processMemoryDumps, function(pmd) {
+        if (pmd === undefined)
+          return undefined;
+        return pmd.mostRecentVmRegions;
+      });
+      if (vmRegions === undefined)
+        return undefined;
+
+      return function() {
+        var pane = document.createElement(
+            'tr-ui-a-memory-dump-vm-regions-details-pane');
+        pane.vmRegions = vmRegions;
+        pane.aggregationMode = this.aggregationMode;
+        return pane;
+      }.bind(this);
     }
   };
 
   /** @constructor */
-  function AllocatorColumn(name, units, cellGetter, aggregationMode) {
-    tr.ui.analysis.ScalarMemoryColumn.call(
-        this, name, units, cellGetter, aggregationMode);
+  function PeakMemoryColumn(name, cellPath, aggregationMode) {
+    UsedMemoryColumn.call(this, name, cellPath, aggregationMode);
+  }
+
+  PeakMemoryColumn.prototype = {
+    __proto__: UsedMemoryColumn.prototype,
+
+    addInfos: function(numerics, processMemoryDumps, infos) {
+      if (processMemoryDumps === undefined)
+        return;  // Total row.
+
+      var resettableValueCount = 0;
+      var nonResettableValueCount = 0;
+      for (var i = 0; i < numerics.length; i++) {
+        if (numerics[i] === undefined)
+          continue;
+        if (processMemoryDumps[i].arePeakResidentBytesResettable)
+          resettableValueCount++;
+        else
+          nonResettableValueCount++;
+      }
+
+      if (resettableValueCount > 0 && nonResettableValueCount > 0) {
+        infos.push(tr.ui.analysis.createWarningInfo('Both resettable and ' +
+            'non-resettable peak RSS values were provided by the process'));
+      } else if (resettableValueCount > 0) {
+        infos.push({
+          icon: RIGHTWARDS_ARROW_WITH_HOOK,
+          message: 'Peak RSS since previous memory dump.'
+        });
+      } else {
+        infos.push({
+          icon: RIGHTWARDS_ARROW_FROM_BAR,
+          message: 'Peak RSS since process startup. Finer grained ' +
+              'peaks require a Linux kernel version ' +
+              GREATER_THAN_OR_EQUAL_TO + ' 4.0.'
+        });
+      }
+    }
+  };
+
+  /** @constructor */
+  function ByteStatColumn(name, cellPath, aggregationMode) {
+    UsedMemoryColumn.call(this, name, cellPath, aggregationMode);
+  }
+
+  ByteStatColumn.prototype = {
+    __proto__: UsedMemoryColumn.prototype,
+
+    color: function(numerics, processMemoryDumps) {
+      if (processMemoryDumps === undefined)
+        return UsedMemoryColumn.COLOR;  // Total row.
+
+      var allOlderValues = processMemoryDumps.every(
+          function(processMemoryDump) {
+            if (processMemoryDump === undefined)
+              return true;
+            return !processMemoryDump.hasOwnVmRegions;
+          });
+
+      // Show the cell in lighter blue if all values were older (i.e. none of
+      // the defined process memory dumps had own VM regions).
+      if (allOlderValues)
+        return UsedMemoryColumn.OLDER_COLOR;
+      else
+        return UsedMemoryColumn.COLOR;
+    },
+
+    addInfos: function(numerics, processMemoryDumps, infos) {
+      if (processMemoryDumps === undefined)
+        return;  // Total row.
+
+      var olderValueCount = 0;
+      for (var i = 0; i < numerics.length; i++) {
+        var processMemoryDump = processMemoryDumps[i];
+        if (processMemoryDump !== undefined &&
+            !processMemoryDump.hasOwnVmRegions) {
+          olderValueCount++;
+        }
+      }
+
+      if (olderValueCount === 0)
+        return;  // There are no older values.
+
+      var infoQuantifier = olderValueCount < numerics.length ?
+          ' ' + SOME_TIMESTAMPS_INFO_QUANTIFIER /* some values are older */ :
+          '' /* all values are older */;
+
+      // Emit an info if there was at least one older value (i.e. at least one
+      // defined process memory dump did not have own VM regions).
+      infos.push({
+        message: 'Older value' + infoQuantifier +
+            ' (only heavy (purple) memory dumps contain memory maps).',
+        icon: UNMARRIED_PARTNERSHIP_SYMBOL
+      });
+    }
+  };
+
+  // Rules for constructing and sorting used memory columns.
+  UsedMemoryColumn.RULES = [
+    {
+      condition: 'Total resident',
+      importance: 10,
+      columnConstructor: UsedMemoryColumn
+    },
+    {
+      condition: 'Peak total resident',
+      importance: 9,
+      columnConstructor: PeakMemoryColumn
+    },
+    {
+      condition: 'PSS',
+      importance: 8,
+      columnConstructor: ByteStatColumn
+    },
+    {
+      condition: 'Private dirty',
+      importance: 7,
+      columnConstructor: ByteStatColumn
+    },
+    {
+      condition: 'Swapped',
+      importance: 6,
+      columnConstructor: ByteStatColumn
+    },
+    {
+      // All other columns.
+      importance: 0,
+      columnConstructor: UsedMemoryColumn
+    }
+  ];
+
+  // Map from ProcessMemoryDump totals fields to column names.
+  UsedMemoryColumn.TOTALS_MAP = {
+    'residentBytes': 'Total resident',
+    'peakResidentBytes': 'Peak total resident'
+  };
+
+  // Map from VMRegionByteStats field names to column names.
+  UsedMemoryColumn.BYTE_STAT_MAP = {
+    'proportionalResident': 'PSS',
+    'privateDirtyResident': 'Private dirty',
+    'swapped': 'Swapped'
+  };
+
+  /** @constructor */
+  function AllocatorColumn(name, cellPath, aggregationMode) {
+    tr.ui.analysis.NumericMemoryColumn.call(
+        this, name, cellPath, aggregationMode);
   }
 
   AllocatorColumn.prototype = {
-    __proto__: tr.ui.analysis.ScalarMemoryColumn.prototype,
+    __proto__: tr.ui.analysis.NumericMemoryColumn.prototype,
 
     get title() {
       var titleEl = document.createElement('tr-ui-b-color-legend');
@@ -145,81 +335,101 @@
       return titleEl;
     },
 
-    getInfos: function(attrs) {
-      // Show the heap dump icon if at least one of the attributes has an
-      // associated heap dump.
-      var hasDumpInfo = undefined;
-      attrs.some(function(attr) {
-        if (attr === undefined)
-          return false;
-        return attr.infos.some(function(info) {
-          if (info.type !== tr.model.AttributeInfoType.HAS_HEAP_DUMP)
-            return false;
-          hasDumpInfo = info;
-          return true;
-        });
-      });
+    addInfos: function(numerics, processMemoryDumps, infos) {
+      if (processMemoryDumps === undefined)
+        return;
 
-      if (hasDumpInfo !== undefined)
-        return [hasDumpInfo];
-      else
-        return [];
+      var heapDumpCount = 0;
+      for (var i = 0; i < processMemoryDumps.length; i++) {
+        var processMemoryDump = processMemoryDumps[i];
+        if (processMemoryDump === undefined)
+          continue;
+        var heapDumps = processMemoryDump.heapDumps;
+        if (heapDumps === undefined)
+          continue;
+        if (heapDumps[this.name] !== undefined)
+          heapDumpCount++;
+      }
+
+      if (heapDumpCount === 0)
+        return;  // There are no heap dumps.
+
+      var infoQuantifier = heapDumpCount < numerics.length ?
+          ' ' + SOME_TIMESTAMPS_INFO_QUANTIFIER : '';
+
+      // Emit a heap dump info if at least one of the process memory dumps has
+      // a heap dump associated with this allocator.
+      infos.push({
+        message: 'Heap dump provided' + infoQuantifier + '.',
+        icon: TRIGRAM_FOR_HEAVEN
+      });
+    },
+
+    getChildPaneBuilder: function(processMemoryDumps) {
+      if (processMemoryDumps === undefined)
+        return undefined;
+
+      var memoryAllocatorDumps = lazyMap(processMemoryDumps, function(pmd) {
+        if (pmd === undefined)
+          return undefined;
+        return pmd.getMemoryAllocatorDumpByFullName(this.name);
+      }, this);
+      if (memoryAllocatorDumps === undefined)
+        return undefined;
+
+      var heapDumps = lazyMap(processMemoryDumps, function(pmd) {
+        if (pmd === undefined || pmd.heapDumps === undefined)
+          return undefined;
+        return pmd.heapDumps[this.name];
+      }, this);
+
+      return function() {
+        var pane = document.createElement(
+            'tr-ui-a-memory-dump-allocator-details-pane');
+        pane.memoryAllocatorDumps = memoryAllocatorDumps;
+        pane.heapDumps = heapDumps;
+        pane.aggregationMode = this.aggregationMode;
+        return pane;
+      }.bind(this);
     }
   };
 
   /** @constructor */
-  function TracingColumn(name, units, cellGetter, aggregationMode) {
-    tr.ui.analysis.ScalarMemoryColumn.call(
-        this, name, units, cellGetter, aggregationMode);
+  function TracingColumn(name, cellPath, aggregationMode) {
+    AllocatorColumn.call(this, name, cellPath, aggregationMode);
   }
 
-  var TRACING_COLUMN_COLOR =
+  TracingColumn.COLOR =
       ColorScheme.getColorForReservedNameAsString('tracing_memory_column');
 
   TracingColumn.prototype = {
-    __proto__: tr.ui.analysis.ScalarMemoryColumn.prototype,
+    __proto__: AllocatorColumn.prototype,
 
     get title() {
-      return tr.ui.b.createSpan(
-          {textContent: this.name, color: TRACING_COLUMN_COLOR});
+      return tr.ui.b.createSpan({
+        textContent: this.name,
+        color: TracingColumn.COLOR
+      });
     },
 
-    color: TRACING_COLUMN_COLOR
+    color: function(numerics, processMemoryDumps) {
+      return TracingColumn.COLOR;
+    }
   };
 
-  // Rules for constructing and sorting used memory columns.
-  var USED_MEMORY_SIZE_COLUMNS_CONSTRUCTOR_RULES = [
-    {
-      columnConstructor: UsedMemoryColumn
-    }
-  ];
-  var USED_MEMORY_SIZE_COLUMNS_IMPORTANCE_RULES =
-      tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules([
-          'Total resident',
-          'Peak total resident',
-          'PSS',
-          'Private dirty',
-          'Swapped']);
-
   // Rules for constructing and sorting allocator columns.
-  var ALLOCATOR_SIZE_COLUMNS_CONSTRUCTOR_RULES = [
+  AllocatorColumn.RULES = [
     {
       condition: 'tracing',
+      importance: 0,
       columnConstructor: TracingColumn
     },
     {
+      // All other columns.
+      importance: 1,
       columnConstructor: AllocatorColumn
     }
   ];
-  var ALLOCATOR_SIZE_COLUMNS_IMPORTANCE_RULES = [
-    {
-      condition: 'tracing',
-      importance: 0
-    },
-    {
-      importance: 1
-    }
-  ];
 
   Polymer('tr-ui-a-memory-dump-overview-pane', {
     created: function() {
@@ -293,10 +503,25 @@
 
     changeChildPane_: function() {
       this.storeSelection_();
-      var builder = undefined;
-      if (this.selectedMemoryCell !== undefined)
-        builder = this.selectedMemoryCell.buildDetailsPane;
-      this.childPaneBuilder = builder;
+      this.childPaneBuilder = this.determineChildPaneBuilderFromSelection_();
+    },
+
+    determineChildPaneBuilderFromSelection_: function() {
+      if (this.processMemoryDumps_ === undefined ||
+          this.processMemoryDumps_.length === 0) {
+        return undefined;
+      }
+
+      var selectedTableRow = this.$.table.selectedTableRow;
+      if (!selectedTableRow)
+        return undefined;
+
+      var selectedColumnIndex = this.$.table.selectedColumnIndex;
+      if (selectedColumnIndex === undefined)
+        return undefined;
+      var selectedColumn = this.$.table.tableColumns[selectedColumnIndex];
+
+      return selectedColumn.getChildPaneBuilder(selectedTableRow.contexts);
     },
 
     rebuildPane_: function() {
@@ -316,8 +541,8 @@
       this.$.table.style.display = 'block';
 
       var rows = this.createRows_();
-      var footerRows = this.createFooterRows_(rows);
       var columns = this.createColumns_(rows);
+      var footerRows = this.createFooterRows_(rows, columns);
 
       this.$.table.tableRows = rows;
       this.$.table.footerRows = footerRows;
@@ -336,58 +561,27 @@
           timeToPidToProcessMemoryDump);
 
       // Process (list index) -> Component (dict key) -> Cell.
-      var rows = [];
-
-      var aggregationMode = this.aggregationMode_;
       return tr.b.dictionaryValues(tr.b.mapItems(
           pidToTimeToProcessMemoryDump, function(pid, timeToDump) {
         // Get the process associated with the dumps. We can use any defined
         // process memory dump in timeToDump since they all have the same pid.
         var process = tr.b.findFirstInArray(timeToDump).process;
 
-        // Determine at which timestamps (indices of the current selection)
-        // the dump was provided.
-        var defined = timeToDump.map(function(dump) {
-          return dump !== undefined;
-        });
-
         // Used memory (total resident, PSS, ...).
-        var timeToVmRegions = timeToDump.map(function(dump) {
-          if (dump === undefined)
-            return undefined;
-          return dump.mostRecentVmRegions;
-        });
-        function buildVmRegionsPane() {
-          var pane = document.createElement(
-              'tr-ui-a-memory-dump-vm-regions-details-pane');
-          pane.vmRegions = timeToVmRegions;
-          pane.aggregationMode = aggregationMode;
-          return pane;
-        }
         var usedMemoryCells = tr.ui.analysis.createCells(timeToDump,
             function(dump) {
               var sizes = {};
 
-              // Totals.
               var totals = dump.totals;
               if (totals !== undefined) {
-                tr.ui.analysis.addAttributeIfDefined(
-                    sizes, 'Total resident', tr.model.ScalarAttribute, 'bytes',
-                    totals.residentBytes);
-                tr.ui.analysis.addAttributeIfDefined(
-                    sizes, 'Peak total resident', tr.model.ScalarAttribute,
-                    'bytes', totals.peakResidentBytes, function(attr) {
-                      if (dump.totals.arePeakResidentBytesResettable) {
-                        attr.infos.push(new tr.model.AttributeInfo(
-                            tr.model.AttributeInfoType.RECENT_VALUE,
-                            'Peak RSS since previous memory dump.'));
-                      } else {
-                        attr.infos.push(new tr.model.AttributeInfo(
-                            tr.model.AttributeInfoType.OVERALL_VALUE,
-                            'Peak RSS since process startup. Finer grained ' +
-                            'peaks require a Linux kernel version ' +
-                            GREATER_THAN_OR_EQUAL_TO_SYMBOL + ' 4.0.'));
-                      }
+                // Common totals.
+                tr.b.iterItems(UsedMemoryColumn.TOTALS_MAP,
+                    function(totalName, cellName) {
+                      var total = totals[totalName];
+                      if (total === undefined)
+                        return;
+                      sizes[cellName] = new ScalarNumeric(
+                          sizeInBytes_smallerIsBetter, total);
                     });
 
                 // Platform-specific totals (e.g. private resident on Mac).
@@ -402,135 +596,75 @@
                     }
                     name = name.replace('_', ' ').trim();
                     name = name.charAt(0).toUpperCase() + name.slice(1);
-                    sizes[name] = new tr.model.ScalarAttribute('bytes', size);
+                    sizes[name] = new ScalarNumeric(
+                        sizeInBytes_smallerIsBetter, size);
                   });
                 }
               }
 
               // VM regions byte stats.
-              var vmRegionAttributeAddedCallback = undefined;
-              if (!dump.hasOwnVmRegions) {
-                vmRegionAttributeAddedCallback = function(attr) {
-                  attr.infos.push(new tr.model.AttributeInfo(
-                      tr.model.AttributeInfoType.LINK,
-                       'Older value (process did not dump memory maps).'));
-                  attr.isOlderValue = true;
-                };
+              var vmRegions = dump.mostRecentVmRegions;
+              if (vmRegions !== undefined) {
+                tr.b.iterItems(UsedMemoryColumn.BYTE_STAT_MAP,
+                    function(byteStatName, cellName) {
+                      var byteStat = vmRegions.byteStats[byteStatName];
+                      if (byteStat === undefined)
+                        return;
+                      sizes[cellName] = new ScalarNumeric(
+                          sizeInBytes_smallerIsBetter, byteStat);
+                    });
               }
-              tr.ui.analysis.addAttributeIfDefined(
-                    sizes, 'PSS', tr.model.ScalarAttribute, 'bytes',
-                    dump.getMostRecentTotalVmRegionStat(
-                        'proportionalResident'),
-                    vmRegionAttributeAddedCallback);
-              tr.ui.analysis.addAttributeIfDefined(
-                    sizes, 'Private dirty', tr.model.ScalarAttribute, 'bytes',
-                    dump.getMostRecentTotalVmRegionStat(
-                        'privateDirtyResident'),
-                    vmRegionAttributeAddedCallback);
-              tr.ui.analysis.addAttributeIfDefined(
-                    sizes, 'Swapped', tr.model.ScalarAttribute, 'bytes',
-                    dump.getMostRecentTotalVmRegionStat('swapped'),
-                    vmRegionAttributeAddedCallback);
 
               return sizes;
-            },
-            function(attrName, cell) {
-              cell.buildDetailsPane = buildVmRegionsPane;
             });
 
         // Allocator memory (v8, oilpan, ...).
         var allocatorCells = tr.ui.analysis.createCells(timeToDump,
             function(dump) {
-              if (dump.memoryAllocatorDumps === undefined)
+              var memoryAllocatorDumps = dump.memoryAllocatorDumps;
+              if (memoryAllocatorDumps === undefined)
                 return undefined;
               var sizes = {};
-              dump.memoryAllocatorDumps.forEach(function(allocatorDump) {
-                var rootAttribute = allocatorDump.attributes[
-                    DISPLAYED_SIZE_ATTRIBUTE_NAME];
-                if (rootAttribute === undefined)
-                  return;
-                var allocatorName = allocatorDump.fullName;
-                // Clone the attribute so that we could provide custom infos
-                // (instead of the original ones).
-                var overviewAttribute = new rootAttribute.constructor(
-                    rootAttribute.units, rootAttribute.value);
-                if (dump.heapDumps !== undefined &&
-                    dump.heapDumps[allocatorName] !== undefined) {
-                  overviewAttribute.infos.push(new tr.model.AttributeInfo(
-                      tr.model.AttributeInfoType.HAS_HEAP_DUMP,
-                      'Heap dump provided'));
-                }
-                sizes[allocatorName] = overviewAttribute;
+              memoryAllocatorDumps.forEach(function(allocatorDump) {
+                var rootDisplayedSizeNumeric = allocatorDump.numerics[
+                    DISPLAYED_SIZE_NUMERIC_NAME];
+                if (rootDisplayedSizeNumeric !== undefined)
+                  sizes[allocatorDump.fullName] = rootDisplayedSizeNumeric;
               });
               return sizes;
-            },
-            function(allocatorName, cell) {
-              var memoryAllocatorDumps = timeToDump.map(function(dump) {
-                if (dump === undefined)
-                  return undefined;
-                return dump.getMemoryAllocatorDumpByFullName(allocatorName);
-              });
-              // Lazily construct the list of heap dumps if a heap dump is
-              // encountered.
-              var heapDumps = undefined;
-              timeToDump.forEach(function(dump, index) {
-                if (dump === undefined || dump.heapDumps === undefined)
-                  return;
-                if (heapDumps === undefined)
-                  heapDumps = new Array(timeToDump.length);
-                heapDumps[index] = dump.heapDumps[allocatorName];
-              });
-              cell.buildDetailsPane = function() {
-                var pane = document.createElement(
-                    'tr-ui-a-memory-dump-allocator-details-pane');
-                pane.memoryAllocatorDumps = memoryAllocatorDumps;
-                pane.heapDumps = heapDumps;
-                pane.aggregationMode = aggregationMode;
-                return pane;
-              };
             });
 
         return {
           title: process.userFriendlyName,
-          defined: defined,
+          contexts: timeToDump,
           usedMemoryCells: usedMemoryCells,
           allocatorCells: allocatorCells
         };
       }));
     },
 
-    createFooterRows_: function(rows) {
+    createFooterRows_: function(rows, columns) {
       // Add a 'Total' row if there are at least two process memory dumps.
       if (rows.length <= 1)
         return [];
 
-      var totalRow = {
-        title: 'Total',
-        noLegend: true
-      };
-
-      tr.ui.analysis.aggregateTableRowCells(
-          totalRow, rows, 'usedMemoryCells');
-      tr.ui.analysis.aggregateTableRowCells(totalRow, rows, 'allocatorCells');
+      var totalRow = {title: 'Total'};
+      tr.ui.analysis.aggregateTableRowCells(totalRow, rows, columns);
 
       return [totalRow];
     },
 
     createColumns_: function(rows) {
-      var titleColumn = new ProcessNameColumn('Process');
+      var titleColumn = new ProcessNameColumn();
       titleColumn.width = '200px';
 
       var usedMemorySizeColumns = tr.ui.analysis.MemoryColumn.fromRows(
           rows, 'usedMemoryCells', this.aggregationMode_,
-          USED_MEMORY_SIZE_COLUMNS_CONSTRUCTOR_RULES);
-      tr.ui.analysis.MemoryColumn.sortByImportance(
-          usedMemorySizeColumns, USED_MEMORY_SIZE_COLUMNS_IMPORTANCE_RULES);
+          UsedMemoryColumn.RULES);
 
       var allocatorSizeColumns = tr.ui.analysis.MemoryColumn.fromRows(
           rows, 'allocatorCells', this.aggregationMode_,
-          ALLOCATOR_SIZE_COLUMNS_CONSTRUCTOR_RULES);
-      tr.ui.analysis.MemoryColumn.sortByImportance(
-          allocatorSizeColumns, ALLOCATOR_SIZE_COLUMNS_IMPORTANCE_RULES);
+          AllocatorColumn.RULES);
 
       var sizeColumns = usedMemorySizeColumns.concat(allocatorSizeColumns);
       tr.ui.analysis.MemoryColumn.spaceEqually(sizeColumns);
@@ -584,7 +718,13 @@
   });
 
   return {
-    AllocatorColumn: AllocatorColumn  // Exported for testing.
+    // All exports are for testing only.
+    ProcessNameColumn: ProcessNameColumn,
+    UsedMemoryColumn: UsedMemoryColumn,
+    PeakMemoryColumn: PeakMemoryColumn,
+    ByteStatColumn: ByteStatColumn,
+    AllocatorColumn: AllocatorColumn,
+    TracingColumn: TracingColumn
   };
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane_test.html b/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane_test.html
index 7d2810b..a9ac136 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane_test.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_overview_pane_test.html
@@ -6,7 +6,8 @@
 -->
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/heap_dump.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_overview_pane.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_sub_view_test_utils.html">
@@ -17,74 +18,81 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var ScalarAttribute = tr.model.ScalarAttribute;
-  var AttributeInfo = tr.model.AttributeInfo;
-  var AttributeInfoType = tr.model.AttributeInfoType;
-  var AllocatorColumn = tr.ui.analysis.AllocatorColumn;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+  var HeapDump = tr.model.HeapDump;
   var AggregationMode = tr.ui.analysis.MemoryColumn.AggregationMode;
-  var checkSizeAttributes = tr.ui.analysis.checkSizeAttributes;
+  var checkSizeNumericFields = tr.ui.analysis.checkSizeNumericFields;
+  var checkColor = tr.ui.analysis.checkColor;
+  var checkColumns = tr.ui.analysis.checkColumns;
+  var checkColumnInfosAndColor = tr.ui.analysis.checkColumnInfosAndColor;
   var convertToProcessMemoryDumps = tr.ui.analysis.convertToProcessMemoryDumps;
+  var extractProcessMemoryDumps = tr.ui.analysis.extractProcessMemoryDumps;
   var extractVmRegions = tr.ui.analysis.extractVmRegions;
   var extractMemoryAllocatorDumps = tr.ui.analysis.extractMemoryAllocatorDumps;
   var isElementDisplayed = tr.ui.analysis.isElementDisplayed;
+  var addProcessMemoryDump = tr.ui.analysis.addProcessMemoryDump;
+  var addGlobalMemoryDump = tr.ui.analysis.addGlobalMemoryDump;
+  var ProcessNameColumn = tr.ui.analysis.ProcessNameColumn;
+  var UsedMemoryColumn = tr.ui.analysis.UsedMemoryColumn;
+  var PeakMemoryColumn = tr.ui.analysis.PeakMemoryColumn;
+  var ByteStatColumn = tr.ui.analysis.ByteStatColumn;
+  var AllocatorColumn = tr.ui.analysis.AllocatorColumn;
+  var TracingColumn = tr.ui.analysis.TracingColumn;
 
-  function getTitleText(title) {
-    if (!(title instanceof HTMLElement))
-      return title;
-    if (title.tagName === 'TR-UI-B-COLOR-LEGEND')
-      return title.label;
-    return title.textContent;
+  function spanMatcher(expectedTitle) {
+    return function(actualTitle) {
+      assert.instanceOf(actualTitle, HTMLElement);
+      assert.strictEqual(actualTitle.tagName, 'SPAN');
+      assert.strictEqual(actualTitle.textContent, expectedTitle);
+    };
   }
 
-  function checkColumns(columns, expectedAggregationMode) {
-    var EXPECTED_COLUMN_NAMES = [
-      'Process',
-      'Total resident',
-      'Peak total resident',
-      'PSS',
-      'Private dirty',
-      'Swapped',
-      'Private',
-      'blink',
-      'malloc',
-      'oilpan',
-      'v8',
-      'tracing'
-    ];
-
-    // First column doesn't change value over time (no aggregation).
-    var VARIABLE_CELLS_START_INDEX = 1;
-
-    // Check column names.
-    assert.lengthOf(columns, EXPECTED_COLUMN_NAMES.length);
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++)
-      assert.equal(getTitleText(columns[i].title), EXPECTED_COLUMN_NAMES[i]);
-
-    // Check aggregation modes.
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++) {
-      assert.strictEqual(columns[i].aggregationMode,
-          i < VARIABLE_CELLS_START_INDEX ? undefined : expectedAggregationMode);
-    }
+  function colorLegendMatcher(expectedTitle) {
+    return function(actualTitle) {
+      assert.instanceOf(actualTitle, HTMLElement);
+      assert.strictEqual(actualTitle.tagName, 'TR-UI-B-COLOR-LEGEND');
+      assert.strictEqual(actualTitle.label, expectedTitle);
+    };
   }
 
+  var EXPECTED_COLUMNS = [
+    { title: 'Process', type: ProcessNameColumn, noAggregation: true },
+    { title: spanMatcher('Total resident'), type: UsedMemoryColumn },
+    { title: spanMatcher('Peak total resident'), type: PeakMemoryColumn },
+    { title: spanMatcher('PSS'), type: ByteStatColumn },
+    { title: spanMatcher('Private dirty'), type: ByteStatColumn },
+    { title: spanMatcher('Swapped'), type: ByteStatColumn },
+    { title: spanMatcher('Private'), type: UsedMemoryColumn },
+    { title: colorLegendMatcher('blink'), type: AllocatorColumn },
+    { title: colorLegendMatcher('malloc'), type: AllocatorColumn },
+    { title: colorLegendMatcher('oilpan'), type: AllocatorColumn },
+    { title: colorLegendMatcher('v8'), type: AllocatorColumn },
+    { title: spanMatcher('tracing'), type: TracingColumn }
+  ];
+
   function checkRow(columns, row, expectedTitle, expectedSizes,
-      expectedDefinedValues) {
+      expectedContexts) {
     // Check title.
-    var formattedTitle = getTitleText(columns[0].formatTitle(row));
-    assert.equal(formattedTitle, expectedTitle);
+    var formattedTitle = columns[0].formatTitle(row);
+    if (typeof expectedTitle === 'function')
+      expectedTitle(formattedTitle);
+    else
+      assert.strictEqual(formattedTitle, expectedTitle);
 
     // Check all sizes. The first assert below is a test sanity check.
     assert.lengthOf(expectedSizes, columns.length - 1 /* all except title */);
     for (var i = 0; i < expectedSizes.length; i++)
-      checkSizeAttributes(row, columns[i + 1], expectedSizes[i]);
+      checkSizeNumericFields(row, columns[i + 1], expectedSizes[i]);
 
     // There should be no row nesting on the overview pane.
     assert.isUndefined(row.subRows);
 
-    if (expectedDefinedValues)
-      assert.deepEqual(tr.b.asArray(row.defined), expectedDefinedValues);
+    if (expectedContexts)
+      assert.deepEqual(tr.b.asArray(row.contexts), expectedContexts);
     else
-      assert.isUndefined(row.defined);
+      assert.isUndefined(row.contexts);
   }
 
   function checkRows(columns, actualRows, expectedRows) {
@@ -97,10 +105,21 @@
       var actualRow = actualRows[i];
       var expectedRow = expectedRows[i];
       checkRow(columns, actualRow, expectedRow.title, expectedRow.sizes,
-          expectedRow.defined);
+          expectedRow.contexts);
     }
   }
 
+  function checkSpanWithColor(span, expectedText, expectedColorReservedName) {
+    assert.strictEqual(span.tagName, 'SPAN');
+    assert.strictEqual(span.textContent, expectedText);
+    checkColor(span.style.color, expectedColorReservedName);
+  }
+
+  function checkColorLegend(legend, expectedLabel) {
+    assert.strictEqual(legend.tagName, 'TR-UI-B-COLOR-LEGEND');
+    assert.strictEqual(legend.label, expectedLabel);
+  }
+
   function createAndCheckMemoryDumpOverviewPane(
       test, processMemoryDumps, expectedRows, expectedFooterRows,
       aggregationMode) {
@@ -119,13 +138,48 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, aggregationMode);
+    checkColumns(columns, EXPECTED_COLUMNS, aggregationMode);
     var rows = table.tableRows;
 
     checkRows(columns, table.tableRows, expectedRows);
     checkRows(columns, table.footerRows, expectedFooterRows);
   }
 
+  var FIELD = 1 << 0;
+  var DUMP = 1 << 1;
+
+  function checkOverviewColumnInfosAndColor(column, fieldAndDumpMask,
+      dumpCreatedCallback, expectedInfos, expectedColorReservedName) {
+    var fields = fieldAndDumpMask.map(function(mask, index) {
+      return mask & FIELD ?
+          new ScalarNumeric(sizeInBytes_smallerIsBetter, 1024 + 32 * index) :
+          undefined;
+    });
+
+    var contexts;
+    if (dumpCreatedCallback === undefined) {
+      contexts = undefined;
+    } else {
+      tr.c.TestUtils.newModel(function(model) {
+        var process = model.getOrCreateProcess(1);
+        fieldAndDumpMask.forEach(function(mask, i) {
+          var timestamp = 10 + i;
+          var gmd = addGlobalMemoryDump(model, timestamp);
+          if (mask & DUMP) {
+            var pmd = addProcessMemoryDump(gmd, process, timestamp);
+            dumpCreatedCallback(pmd, mask);
+          }
+        });
+        contexts = model.globalMemoryDumps.map(function(gmd) {
+          return gmd.processMemoryDumps[1];
+        });
+      });
+    }
+
+    checkColumnInfosAndColor(
+        column, fields, contexts, expectedInfos, expectedColorReservedName);
+  }
+
   test('instantiate_empty', function() {
     tr.ui.analysis.createAndCheckEmptyPanes(this,
         'tr-ui-a-memory-dump-overview-pane', 'processMemoryDumps',
@@ -143,23 +197,24 @@
         processMemoryDumps,
         [  // Table rows.
           {
-            title: 'Process 1',
-            sizes: [[29884416], undefined, [9437184], [5767168], [0], undefined,
-                undefined, [7340032], undefined, undefined, [2097152]],
-            defined: [true]
+            title: colorLegendMatcher('Process 1'),
+            sizes: [[29884416], undefined, [9437184], [5767168], undefined,
+                undefined, undefined, [7340032], undefined, undefined,
+                [2097152]],
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 1)
           },
           {
-            title: 'Process 2',
+            title: colorLegendMatcher('Process 2'),
             sizes: [[17825792], [39845888], [18350080], [0], [32], [8912896],
                 [7340032], [1048576], [1], [5242880], [1572864]],
-            defined: [true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 2)
           },
           {
-            title: 'Process 4',
+            title: colorLegendMatcher('Process 4'),
             sizes: [undefined, [17825792], undefined, undefined, undefined,
                 undefined, undefined, undefined, undefined, undefined,
                 undefined],
-            defined: [true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 4)
           }
         ],
         [  // Footer rows.
@@ -167,7 +222,7 @@
             title: 'Total',
             sizes: [[47710208], [57671680], [27787264], [5767168], [32],
                 [8912896], [7340032], [8388608], [1], [5242880], [3670016]],
-            defined: undefined
+            contexts: undefined
           }
         ],
         undefined /* no aggregation */);
@@ -180,37 +235,37 @@
         processMemoryDumps,
         [  // Table rows.
           {
-            title: 'Process 1',
+            title: colorLegendMatcher('Process 1'),
             sizes: [[31457280, 29884416, undefined], undefined,
                 [10485760, 9437184, undefined], [8388608, 5767168, undefined],
-                [0, 0, undefined], undefined, undefined,
+                undefined, undefined, undefined,
                 [undefined, 7340032, undefined], undefined, undefined,
                 [undefined, 2097152, undefined]],
-            defined: [true, true, undefined]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 1)
           },
           {
-            title: 'Process 2',
+            title: colorLegendMatcher('Process 2'),
             sizes: [[19398656, 17825792, 15728640],
                 [40370176, 39845888, 40894464], [18350080, 18350080, 18350080],
                 [0, 0, -2621440], [32, 32, 64], [10485760, 8912896, 7340032],
                 [undefined, 7340032, 6291456], [2097152, 1048576, 786432],
                 [undefined, 1, undefined], [5242880, 5242880, 5767168],
                 [1048576, 1572864, 2097152]],
-            defined: [true, true, true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 2)
           },
           {
-            title: 'Process 3',
+            title: colorLegendMatcher('Process 3'),
             sizes: [undefined, undefined, undefined, undefined, undefined,
                 undefined, undefined, undefined, [2147483648, undefined,
                 1073741824], [1073741824, undefined, 2147483648], undefined],
-            defined: [true, undefined, true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 3)
           },
           {
-            title: 'Process 4',
+            title: colorLegendMatcher('Process 4'),
             sizes: [undefined, [undefined, 17825792, 17825792], undefined,
                 undefined, undefined, undefined, undefined, undefined,
                 undefined, undefined, undefined],
-            defined: [undefined, true, true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 4)
           }
         ],
         [  // Footer rows.
@@ -222,7 +277,7 @@
                 [10485760, 8912896, 7340032], [undefined, 7340032, 6291456],
                 [2097152, 8388608, 786432], [2147483648, 1, 1073741824],
                 [1078984704, 5242880, 2153250816], [1048576, 3670016, 2097152]],
-            defined: undefined
+            contexts: undefined
           }
         ],
         AggregationMode.DIFF);
@@ -235,10 +290,10 @@
         processMemoryDumps,
         [  // Table rows.
           {
-            title: 'Process 2',
+            title: colorLegendMatcher('Process 2'),
             sizes: [[17825792], [39845888], [18350080], [0], [32], [8912896],
                 [7340032], [1048576], [1], [5242880], [1572864]],
-            defined: [true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 2)
           }
         ],
         [] /* footer rows */,
@@ -252,14 +307,14 @@
         processMemoryDumps,
         [  // Table rows.
           {
-            title: 'Process 2',
+            title: colorLegendMatcher('Process 2'),
             sizes: [[19398656, 17825792, 15728640],
                 [40370176, 39845888, 40894464], [18350080, 18350080, 18350080],
                 [0, 0, -2621440], [32, 32, 64], [10485760, 8912896, 7340032],
                 [undefined, 7340032, 6291456], [2097152, 1048576, 786432],
                 [undefined, 1, undefined], [5242880, 5242880, 5767168],
                 [1048576, 1572864, 2097152]],
-            defined: [true, true, true]
+            contexts: extractProcessMemoryDumps(processMemoryDumps, 2)
           }
         ],
         [] /* footer rows */,
@@ -304,10 +359,12 @@
     containerEl.brushingStateController =
         new tr.c.BrushingStateController(undefined);
 
-    function simulateView(pids, aggregationMode, expectedSelectedCellAttrs,
-        expectedSelectedRowTitle, expectedSelectedColumnIndex, callback) {
+    function simulateView(pids, aggregationMode,
+        expectedSelectedCellFieldValues, expectedSelectedRowTitle,
+        expectedSelectedColumnIndex, callback) {
       var viewEl =
           tr.ui.analysis.createTestPane('tr-ui-a-memory-dump-overview-pane');
+      var table = viewEl.$.table;
       containerEl.textContent = '';
       containerEl.appendChild(viewEl);
 
@@ -321,15 +378,14 @@
       viewEl.aggregationMode = aggregationMode;
       viewEl.rebuild();
 
-      if (expectedSelectedCellAttrs === undefined) {
-        assert.isUndefined(viewEl.selectedMemoryCell);
+      if (expectedSelectedCellFieldValues === undefined) {
         assert.isUndefined(viewEl.childPaneBuilder);
       } else {
-        checkSizeAttributes(viewEl.selectedMemoryCell.attrs,
-            undefined /* row = attrs */, expectedSelectedCellAttrs);
+        checkSizeNumericFields(table.selectedTableRow,
+            table.tableColumns[table.selectedColumnIndex],
+            expectedSelectedCellFieldValues);
       }
 
-      var table = viewEl.$.table;
       assert.strictEqual(
           table.selectedColumnIndex, expectedSelectedColumnIndex);
       if (expectedSelectedRowTitle === undefined)
@@ -416,31 +472,278 @@
         });
   });
 
-  test('allocatorColumn_getInfos', function() {
-    var c = new AllocatorColumn('test_column', 'ms', tr.b.identity,
+  test('processNameColumn_formatTitle', function() {
+    var c = new ProcessNameColumn();
+
+    // With context (total row).
+    assert.strictEqual(c.formatTitle({
+      title: 'Total',
+      usedMemoryCells: {}
+    }), 'Total');
+
+    // Without context (process row).
+    var title = c.formatTitle({
+      title: 'Process 1',
+      usedMemoryCells: {},
+      contexts: [tr.ui.analysis.createSingleTestProcessMemoryDump()]
+    });
+    checkColorLegend(title, 'Process 1');
+  });
+
+  test('usedMemoryColumn', function() {
+    var c = new UsedMemoryColumn('Private', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    checkSpanWithColor(c.title, 'Private',
+        'used_memory_column' /* blue (column title) */);
+    checkColor(c.color(undefined /* contexts */),
+        'used_memory_column' /* blue (column cells) */);
+  });
+
+  test('peakMemoryColumn', function() {
+    var c = new PeakMemoryColumn('Peak', 'bytes', tr.b.identity,
         AggregationMode.MAX);
+    checkSpanWithColor(c.title, 'Peak',
+        'used_memory_column' /* blue (column title) */);
+    checkColor(c.color(undefined) /* contexts */,
+        'used_memory_column' /* blue (column cells) */);
 
-    var attr1 = new ScalarAttribute('bytes', 64);
-    attr1.infos = [
-      new AttributeInfo(AttributeInfoType.INFORMATION, 'boring')
-    ];
+    var RESETTABLE_PEAK = 1 << 2;
+    var NON_RESETTABLE_PEAK = 1 << 3;
+    function checkPeakColumnInfosAndColor(fieldAndDumpMask, expectedInfos) {
+      checkOverviewColumnInfosAndColor(c,
+          fieldAndDumpMask,
+          function(pmd, mask) {
+            if (mask & RESETTABLE_PEAK) {
+              assert.strictEqual(
+                  mask & NON_RESETTABLE_PEAK, 0);  // Test sanity check.
+              pmd.arePeakResidentBytesResettable = true;
+            } else if (mask & NON_RESETTABLE_PEAK) {
+              pmd.arePeakResidentBytesResettable = false;
+            }
+          },
+          expectedInfos,
+          'used_memory_column');
+    }
 
-    var attr2 = new ScalarAttribute('bytes', 128);
-    var hasHeapDumpInfo =
-        new AttributeInfo(AttributeInfoType.HAS_HEAP_DUMP, 'exciting');
-    attr2.infos = [
-      new AttributeInfo(AttributeInfoType.INFORMATION, 'boring'),
-      hasHeapDumpInfo
-    ];
+    // No context.
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        'used_memory_column' /* blue color */);
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD, FIELD, 0, FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        'used_memory_column' /* blue color */);
 
-    var attr3 = new ScalarAttribute('bytes', 256);
-    attr3.infos = [
-      new AttributeInfo(AttributeInfoType.INFORMATION, 'boring')
-    ];
+    // All resettable.
+    var EXPECTED_RESETTABLE_INFO = {
+      icon: '\u21AA',
+      message: 'Peak RSS since previous memory dump.'
+    };
+    checkPeakColumnInfosAndColor([
+      FIELD | DUMP | RESETTABLE_PEAK
+    ], [EXPECTED_RESETTABLE_INFO]);
+    checkPeakColumnInfosAndColor([
+      FIELD | DUMP | RESETTABLE_PEAK,
+      DUMP /* ignored because there's no field */,
+      0,
+      FIELD | DUMP | RESETTABLE_PEAK
+    ], [EXPECTED_RESETTABLE_INFO]);
 
-    assert.deepEqual(c.getInfos([attr1]), []);
-    assert.deepEqual(c.getInfos([attr2]), [hasHeapDumpInfo]);
-    assert.deepEqual(c.getInfos([attr1, attr2, attr3]), [hasHeapDumpInfo]);
+    // All non-resettable.
+    var EXPECTED_NON_RESETTABLE_INFO = {
+      icon: '\u21A6',
+      message: 'Peak RSS since process startup. Finer grained peaks require ' +
+          'a Linux kernel version \u2265 4.0.'
+    };
+    checkPeakColumnInfosAndColor([
+      FIELD | DUMP | NON_RESETTABLE_PEAK
+    ], [EXPECTED_NON_RESETTABLE_INFO]);
+    checkPeakColumnInfosAndColor([
+      0,
+      DUMP | RESETTABLE_PEAK /* ignored because there's no field */,
+      FIELD | DUMP | NON_RESETTABLE_PEAK,
+      FIELD | DUMP | NON_RESETTABLE_PEAK
+    ], [EXPECTED_NON_RESETTABLE_INFO]);
+
+    // Combination (warning).
+    var EXPECTED_COMBINATION_INFO = {
+      icon: '\u26A0',
+      message: 'Both resettable and non-resettable peak RSS values were ' +
+          'provided by the process',
+      color: 'red'
+    };
+    checkPeakColumnInfosAndColor([
+      FIELD | DUMP | NON_RESETTABLE_PEAK,
+      0,
+      FIELD | DUMP | RESETTABLE_PEAK,
+      0
+    ], [EXPECTED_COMBINATION_INFO]);
+  });
+
+  test('byteStatColumn', function() {
+    var c = new ByteStatColumn('Stat', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    checkSpanWithColor(c.title, 'Stat',
+        'used_memory_column' /* blue (column title) */);
+
+    var HAS_OWN_VM_REGIONS = 1 << 2;
+    function checkByteStatColumnInfosAndColor(
+        fieldAndDumpMask, expectedInfos, expectedIsOlderColor) {
+      checkOverviewColumnInfosAndColor(c,
+          fieldAndDumpMask,
+          function(pmd, mask) {
+            if (mask & HAS_OWN_VM_REGIONS)
+              pmd.vmRegions = [];
+          },
+          expectedInfos,
+          expectedIsOlderColor ?
+              'older_used_memory_column' /* light blue */ :
+              'used_memory_column' /* blue color */);
+    }
+
+    var EXPECTED_ALL_OLDER_VALUES = {
+      icon: '\u26AF',
+      message: 'Older value (only heavy (purple) memory dumps contain ' +
+          'memory maps).'
+    };
+    var EXPECTED_SOME_OLDER_VALUES = {
+      icon: '\u26AF',
+      message: 'Older value at some selected timestamps (only heavy ' +
+          '(purple) memory dumps contain memory maps).'
+    };
+
+    // No context.
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        'used_memory_column' /* blue color */);
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD, FIELD, 0, FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        'used_memory_column' /* blue color */);
+
+    // All process memory dumps have own VM regions.
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP | HAS_OWN_VM_REGIONS
+    ], [] /* no infos */, false /* blue color */);
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP | HAS_OWN_VM_REGIONS,
+      FIELD | DUMP | HAS_OWN_VM_REGIONS,
+      0,
+      FIELD | DUMP | HAS_OWN_VM_REGIONS
+    ], [] /* no infos */, false /* blue color */);
+
+    // No process memory dumps have own VM regions.
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP
+    ], [EXPECTED_ALL_OLDER_VALUES], true /* light blue */);
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP,
+      FIELD | DUMP
+    ], [EXPECTED_ALL_OLDER_VALUES], true /* light blue */);
+
+    // Some process memory dumps don't have own VM regions.
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP,
+      0,
+      FIELD | DUMP
+    ], [EXPECTED_SOME_OLDER_VALUES], true /* light blue */);
+    checkByteStatColumnInfosAndColor([
+      FIELD | DUMP | HAS_OWN_VM_REGIONS,
+      FIELD | DUMP,
+      FIELD | DUMP | HAS_OWN_VM_REGIONS
+    ], [EXPECTED_SOME_OLDER_VALUES], false /* blue */);
+  });
+
+  test('allocatorColumn', function() {
+    var c = new AllocatorColumn('Allocator', 'bytes', tr.b.identity,
+        AggregationMode.MAX);
+    checkColorLegend(c.title, 'Allocator');
+    checkColor(c.color(undefined /* contexts */),
+        undefined /* no color (column cells) */);
+
+    var HAS_HEAP_DUMPS = 1 << 2;
+    var HAS_ALLOCATOR_HEAP_DUMP = 1 << 3;
+    function checkAllocatorColumnInfosAndColor(fieldAndDumpMask,
+        expectedInfos) {
+      checkOverviewColumnInfosAndColor(c,
+          fieldAndDumpMask,
+          function(pmd, mask) {
+            if (mask & HAS_HEAP_DUMPS)
+              pmd.heapDumps = {};
+            if (mask & HAS_ALLOCATOR_HEAP_DUMP)
+              pmd.heapDumps['Allocator'] = new HeapDump(pmd, 'Allocator');
+          },
+          expectedInfos,
+          undefined /* no color */);
+    }
+
+    var EXPECTED_ALL_HAVE_ALLOCATOR_HEAP_DUMP = {
+      icon: '\u2630',
+      message: 'Heap dump provided.'
+    };
+    var EXPECTED_SOME_HAVE_ALLOCATOR_HEAP_DUMP = {
+      icon: '\u2630',
+      message: 'Heap dump provided at some selected timestamps.'
+    };
+
+    // No context.
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+    checkOverviewColumnInfosAndColor(c,
+        [FIELD, FIELD, 0, FIELD],
+        undefined /* no context */,
+        [] /* no infos */,
+        undefined /* no color */);
+
+    // No process memory dumps have heap dumps.
+    checkAllocatorColumnInfosAndColor([
+      FIELD | DUMP
+    ], [] /* no infos */);
+    checkAllocatorColumnInfosAndColor([
+      FIELD | DUMP,
+      FIELD | DUMP | HAS_HEAP_DUMPS,
+      0,
+      FIELD | DUMP
+    ], [] /* infos */);
+
+    // All process memory dumps have heap dumps.
+    checkAllocatorColumnInfosAndColor([
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP
+    ], [EXPECTED_ALL_HAVE_ALLOCATOR_HEAP_DUMP]);
+    checkAllocatorColumnInfosAndColor([
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP,
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP,
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP
+    ], [EXPECTED_ALL_HAVE_ALLOCATOR_HEAP_DUMP]);
+
+    // Some process memory dumps have heap dumps.
+    checkAllocatorColumnInfosAndColor([
+      0,
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP
+    ], [EXPECTED_SOME_HAVE_ALLOCATOR_HEAP_DUMP]);
+    checkAllocatorColumnInfosAndColor([
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP,
+      FIELD | DUMP | HAS_HEAP_DUMPS,
+      FIELD | DUMP | HAS_HEAP_DUMPS | HAS_ALLOCATOR_HEAP_DUMP
+    ], [EXPECTED_SOME_HAVE_ALLOCATOR_HEAP_DUMP]);
+  });
+
+  test('tracingColumn', function() {
+    var c = new TracingColumn('Tracing', 'bytes', tr.b.identity,
+        AggregationMode.DIFF);
+    checkSpanWithColor(c.title, 'Tracing',
+        'tracing_memory_column' /* expected column title gray color */);
+    checkColor(c.color(undefined /* contexts */),
+        'tracing_memory_column' /* expected column cells gray color */);
   });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_test_utils.html b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_test_utils.html
index be19cd9..46046fd 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_test_utils.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_test_utils.html
@@ -5,12 +5,16 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/color.html">
+<link rel="import" href="/tracing/base/color_scheme.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
 <link rel="import" href="/tracing/model/heap_dump.html">
-<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
+<link rel="import" href="/tracing/model/vm_region.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -19,14 +23,20 @@
  * @fileoverview Helper functions for memory dump analysis sub-view tests.
  */
 tr.exportTo('tr.ui.analysis', function() {
+  var Color = tr.b.Color;
+  var ColorScheme = tr.b.ColorScheme;
   var GlobalMemoryDump = tr.model.GlobalMemoryDump;
   var ProcessMemoryDump = tr.model.ProcessMemoryDump;
-  var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
   var VMRegion = tr.model.VMRegion;
-  var VMRegionByteStats = tr.model.VMRegionByteStats;
-  var ScalarAttribute = tr.model.ScalarAttribute;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+  var unitlessNumber_smallerIsBetter =
+      tr.v.Unit.byName.unitlessNumber_smallerIsBetter;
   var HeapDump = tr.model.HeapDump;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var addOwnershipLink = tr.model.MemoryDumpTestUtils.addOwnershipLink;
 
   function createMultipleTestGlobalMemoryDumps() {
     var model = tr.c.TestUtils.newModel(function(model) {
@@ -43,7 +53,7 @@
       // Totals and VM regions.
       var pmd1A = addProcessMemoryDump(gmd1, pA, 41);
       pmd1A.totals = { residentBytes: 31457280 /* 30 MiB */ };
-      pmd1A.vmRegions = [
+      pmd1A.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           startAddress: 1024,
           sizeInBytes: 20971520, /* 20 MiB */
@@ -55,7 +65,7 @@
             proportionalResident: 10485760 /* 10 MiB */
           }
         })
-      ];
+      ]);
 
       // Everything.
       var pmd1B = addProcessMemoryDump(gmd1, pB, 42);
@@ -67,7 +77,7 @@
           private_bytes: 10485760 /* 10 MiB */
         }
       };
-      pmd1B.vmRegions = [
+      pmd1B.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           startAddress: 256,
           sizeInBytes: 6000,
@@ -91,47 +101,31 @@
             swapped: 0 /* 32 B */
           }
         })
+      ]);
+      pmd1B.memoryAllocatorDumps = [
+        newAllocatorDump(pmd1B, 'malloc', { size: 3145728 /* 3 MiB */ }),
+        newAllocatorDump(pmd1B, 'v8', { size: 5242880 /* 5 MiB */ }),
+        newAllocatorDump(pmd1B, 'tracing', {
+          size: 1048576 /* 1 MiB */,
+          resident_size: 1572864 /* 1.5 MiB */
+        })
       ];
-      pmd1B.memoryAllocatorDumps = (function() {
-        var mallocDump = new MemoryAllocatorDump(pmd1B, 'malloc');
-        mallocDump.addAttribute('size',
-            new ScalarAttribute('bytes', 3145728) /* 3 MiB */);
-
-        var v8Dump = new MemoryAllocatorDump(pmd1B, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 5242880) /* 5 MiB */);
-
-        var tracingDump = new MemoryAllocatorDump(pmd1B, 'tracing');
-        tracingDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1048576) /* 1 MiB */);
-        tracingDump.addAttribute('resident_size',
-            new ScalarAttribute('bytes', 1572864) /* 1.5 MiB */);
-
-        return [mallocDump, v8Dump, tracingDump];
-      })();
 
       // Allocator dumps only.
       var pmd1C = addProcessMemoryDump(gmd1, pC, 43);
       pmd1C.memoryAllocatorDumps = (function() {
-        var oilpanDump = new MemoryAllocatorDump(pmd1C, 'oilpan');
-        oilpanDump.addAttribute('size',
-            new ScalarAttribute('bytes', 3221225472) /* 3 GiB */);
-        oilpanDump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 5242880) /* 5 MiB */);
-        oilpanDump.addAttribute('objects_count',
-            new ScalarAttribute('objects', 2015));
+        var oilpanDump = newAllocatorDump(pmd1C, 'oilpan', {
+          size: 3221225472 /* 3 GiB */,
+          inner_size: 5242880 /* 5 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 2015)
+        });
+        var v8Dump = newAllocatorDump(pmd1C, 'v8', {
+          size: 1073741824 /* 1 GiB */,
+          inner_size: 2097152 /* 2 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 204)
+        });
 
-        var v8Dump = new MemoryAllocatorDump(pmd1C, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 1073741824) /* 1 GiB */);
-        v8Dump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        v8Dump.addAttribute('objects_count',
-            new ScalarAttribute('objects', 204));
-
-        var ownershipLink = new MemoryAllocatorDumpLink(v8Dump, oilpanDump);
-        v8Dump.owns = ownershipLink;
-        oilpanDump.ownedBy.push(ownershipLink);
+        addOwnershipLink(v8Dump, oilpanDump);
 
         return [oilpanDump, v8Dump];
       })();
@@ -140,7 +134,8 @@
           var v8HeapDump = new HeapDump(pmd1C, 'v8');
           v8HeapDump.addEntry(
               tr.c.TestUtils.newStackTrace(model,
-                  [undefined /* root */, 'V8.Execute', 'UpdateLayoutTree']),
+                  ['V8.Execute', 'UpdateLayoutTree']),
+              undefined /* sum over all object types */,
               536870912 /* 512 MiB */);
           return v8HeapDump;
         })()
@@ -154,7 +149,7 @@
       // Everything.
       var pmd2A = addProcessMemoryDump(gmd2, pA, 67);
       pmd2A.totals = { residentBytes: 32505856 /* 31 MiB */ };
-      pmd2A.vmRegions = [
+      pmd2A.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           startAddress: 1024,
           sizeInBytes: 20971520, /* 20 MiB */
@@ -177,20 +172,14 @@
             proportionalResident: 524288 /* 512 KiB */
           }
         })
+      ]);
+      pmd2A.memoryAllocatorDumps = [
+        newAllocatorDump(pmd2A, 'malloc', { size: 9437184 /* 9 MiB */ }),
+        newAllocatorDump(pmd2A, 'tracing', {
+          size: 2097152 /* 2 MiB */,
+          resident_size: 2621440 /* 2.5 MiB */
+        })
       ];
-      pmd2A.memoryAllocatorDumps = (function() {
-        var mallocDump = new MemoryAllocatorDump(pmd2A, 'malloc');
-        mallocDump.addAttribute('size',
-            new ScalarAttribute('bytes', 9437184) /* 9 MiB */);
-
-        var tracingDump = new MemoryAllocatorDump(pmd2A, 'tracing');
-        tracingDump.addAttribute('size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        tracingDump.addAttribute('resident_size',
-            new ScalarAttribute('bytes', 2621440) /* 2.5 MiB */);
-
-        return [mallocDump, tracingDump];
-      })();
 
       // Totals and allocator dumps only.
       var pmd2B = addProcessMemoryDump(gmd2, pB, 69);
@@ -202,30 +191,16 @@
           private_bytes: 8912896 /* 8.5 MiB */
         }
       };
-      pmd2B.memoryAllocatorDumps = (function() {
-        var mallocDump = new MemoryAllocatorDump(pmd2B, 'malloc');
-        mallocDump.addAttribute('size',
-            new ScalarAttribute('bytes', 2621440) /* 2.5 MiB */);
-
-        var v8Dump = new MemoryAllocatorDump(pmd2B, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 5242880) /* 5 MiB */);
-
-        var blinkDump = new MemoryAllocatorDump(pmd2B, 'blink');
-        blinkDump.addAttribute('size',
-            new ScalarAttribute('bytes', 7340032) /* 7 MiB */);
-
-        var oilpanDump = new MemoryAllocatorDump(pmd2B, 'oilpan');
-        oilpanDump.addAttribute('size', new ScalarAttribute('bytes', 1));
-
-        var tracingDump = new MemoryAllocatorDump(pmd2B, 'tracing');
-        tracingDump.addAttribute('size',
-            new ScalarAttribute('bytes', 1572864) /* 1.5 MiB */);
-        tracingDump.addAttribute('resident_size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-
-        return [mallocDump, v8Dump, blinkDump, oilpanDump, tracingDump];
-      })();
+      pmd2B.memoryAllocatorDumps = [
+        newAllocatorDump(pmd2B, 'malloc', { size: 2621440 /* 2.5 MiB */ }),
+        newAllocatorDump(pmd2B, 'v8', { size: 5242880 /* 5 MiB */ }),
+        newAllocatorDump(pmd2B, 'blink', { size: 7340032 /* 7 MiB */ }),
+        newAllocatorDump(pmd2B, 'oilpan', { size: 1 }),
+        newAllocatorDump(pmd2B, 'tracing', {
+          size: 1572864 /* 1.5 MiB */,
+          resident_size: 2097152 /* 2 MiB */
+        })
+      ];
 
       // Resettable peak total size only.
       var pmd2D = addProcessMemoryDump(gmd2, pD, 71);
@@ -249,7 +224,7 @@
           private_bytes: 7340032 /* 7 MiB */
         }
       };
-      pmd3B.vmRegions = [
+      pmd3B.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           startAddress: 256,
           sizeInBytes: 6000,
@@ -262,51 +237,32 @@
             swapped: 64 /* 32 B */
           }
         })
+      ]);
+      pmd3B.memoryAllocatorDumps = [
+        newAllocatorDump(pmd3B, 'malloc', {size: 2883584 /* 2.75 MiB */ }),
+        newAllocatorDump(pmd3B, 'v8', { size: 5767168 /* 5.5 MiB */ }),
+        newAllocatorDump(pmd3B, 'blink', { size: 6291456 /* 7 MiB */ }),
+        newAllocatorDump(pmd3B, 'tracing', {
+          size: 2097152 /* 2 MiB */,
+          resident_size: 3145728 /* 3 MiB */
+        })
       ];
-      pmd3B.memoryAllocatorDumps = (function() {
-        var mallocDump = new MemoryAllocatorDump(pmd3B, 'malloc');
-        mallocDump.addAttribute('size',
-            new ScalarAttribute('bytes', 2883584) /* 2.75 MiB */);
-
-        var v8Dump = new MemoryAllocatorDump(pmd3B, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 5767168) /* 5.5 MiB */);
-
-        var blinkDump = new MemoryAllocatorDump(pmd3B, 'blink');
-        blinkDump.addAttribute('size',
-            new ScalarAttribute('bytes', 6291456) /* 7 MiB */);
-
-        var tracingDump = new MemoryAllocatorDump(pmd3B, 'tracing');
-        tracingDump.addAttribute('size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        tracingDump.addAttribute('resident_size',
-            new ScalarAttribute('bytes', 3145728) /* 3 MiB */);
-
-        return [mallocDump, v8Dump, blinkDump, tracingDump];
-      })();
 
       // Allocator dumps only.
       var pmd3C = addProcessMemoryDump(gmd3, pC, 100);
       pmd3C.memoryAllocatorDumps = (function() {
-        var oilpanDump = new MemoryAllocatorDump(pmd3C, 'oilpan');
-        oilpanDump.addAttribute('size',
-            new ScalarAttribute('bytes', 3221225472) /* 3 GiB */);
-        oilpanDump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 5242880) /* 5 MiB */);
-        oilpanDump.addAttribute('objects_count',
-            new ScalarAttribute('objects', 2015));
+        var oilpanDump = newAllocatorDump(pmd3C, 'oilpan', {
+          size: 3221225472 /* 3 GiB */,
+          inner_size: 5242880 /* 5 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 2015)
+        });
+        var v8Dump = newAllocatorDump(pmd3C, 'v8', {
+          size: 2147483648 /* 2 GiB */,
+          inner_size: 2097152 /* 2 MiB */,
+          objects_count: new ScalarNumeric(unitlessNumber_smallerIsBetter, 204)
+        });
 
-        var v8Dump = new MemoryAllocatorDump(pmd3C, 'v8');
-        v8Dump.addAttribute('size',
-            new ScalarAttribute('bytes', 2147483648) /* 2 GiB */);
-        v8Dump.addAttribute('inner_size',
-            new ScalarAttribute('bytes', 2097152) /* 2 MiB */);
-        v8Dump.addAttribute('objects_count',
-            new ScalarAttribute('objects', 204));
-
-        var ownershipLink = new MemoryAllocatorDumpLink(v8Dump, oilpanDump);
-        v8Dump.owns = ownershipLink;
-        oilpanDump.ownedBy.push(ownershipLink);
+        addOwnershipLink(v8Dump, oilpanDump);
 
         return [oilpanDump, v8Dump];
       })();
@@ -315,11 +271,13 @@
           var v8HeapDump = new HeapDump(pmd1C, 'v8');
           v8HeapDump.addEntry(
               tr.c.TestUtils.newStackTrace(model,
-                  [undefined /* root */, 'V8.Execute', 'UpdateLayoutTree']),
+                  ['V8.Execute', 'UpdateLayoutTree']),
+              undefined /* sum over all object types */,
               268435456 /* 256 MiB */);
           v8HeapDump.addEntry(
               tr.c.TestUtils.newStackTrace(model,
-                  [undefined /* root */, 'V8.Execute', 'FrameView::layout']),
+                  ['V8.Execute', 'FrameView::layout']),
+              undefined /* sum over all object types */,
               134217728 /* 128 MiB */);
           return v8HeapDump;
         })()
@@ -350,37 +308,106 @@
     return createMultipleTestProcessMemoryDumps()[1];
   }
 
-  function checkAttributes(
-      row, column, expectedAttrValues, expectedClass, expectedUnits) {
-    var attrs;
+  function checkNumericFields(row, column, expectedValues, expectedUnit) {
+    var fields;
     if (column === undefined)
-      attrs = row;
+      fields = row;
     else
-      attrs = column.attrs(row);
+      fields = column.fields(row);
 
-    if (expectedAttrValues === undefined) {
-      assert.isUndefined(attrs);
+    if (expectedValues === undefined) {
+      assert.isUndefined(fields);
       return;
     }
 
-    assert.lengthOf(attrs, expectedAttrValues.length);
-    for (var i = 0; i < attrs.length; i++) {
-      var attr = attrs[i];
-      var expectedAttrValue = expectedAttrValues[i];
-      if (expectedAttrValue === undefined) {
-        assert.isUndefined(attr);
+    assert.lengthOf(fields, expectedValues.length);
+    for (var i = 0; i < fields.length; i++) {
+      var field = fields[i];
+      var expectedValue = expectedValues[i];
+      if (expectedValue === undefined) {
+        assert.isUndefined(field);
       } else {
-        assert.isDefined(expectedClass);  // Test sanity check.
-        assert.isDefined(expectedUnits);  // Test sanity check.
-        assert.instanceOf(attr, expectedClass);
-        assert.equal(attr.value, expectedAttrValue);
-        assert.equal(attr.units, expectedUnits);
+        assert.isDefined(expectedUnit);  // Test sanity check.
+        assert.instanceOf(field, ScalarNumeric);
+        assert.equal(field.value, expectedValue);
+        assert.equal(field.unit, expectedUnit);
       }
     }
   }
 
-  function checkSizeAttributes(row, column, expectedAttrValues) {
-    checkAttributes(row, column, expectedAttrValues, ScalarAttribute, 'bytes');
+  function checkSizeNumericFields(row, column, expectedValues) {
+    checkNumericFields(row, column, expectedValues,
+        sizeInBytes_smallerIsBetter);
+  }
+
+  function checkStringFields(row, column, expectedStrings) {
+    var fields = column.fields(row);
+
+    if (expectedStrings === undefined) {
+      assert.isUndefined(fields);
+      return;
+    }
+
+    assert.deepEqual(tr.b.asArray(fields), expectedStrings);
+  }
+
+  /**
+   * Check the titles, types and aggregation modes of a list of columns.
+   * expectedColumns is a list of dictionaries with the following fields:
+   *
+   *   - title: Either the expected title (string), or a matcher for it
+   *     (function that accepts the actual title as its argument).
+   *   - type: The expected class of the column.
+   *   - noAggregation: If true, the column is expected to have no aggregation
+   *     mode (regardless of expectedAggregationMode).
+   */
+  function checkColumns(columns, expectedColumns, expectedAggregationMode) {
+    assert.lengthOf(columns, expectedColumns.length);
+    for (var i = 0; i < expectedColumns.length; i++) {
+      var actualColumn = columns[i];
+      var expectedColumn = expectedColumns[i];
+      var expectedTitle = expectedColumn.title;
+      if (typeof expectedTitle === 'function')
+        expectedTitle(actualColumn.title);  // Custom title matcher.
+      else
+        assert.strictEqual(actualColumn.title, expectedTitle);  // String title.
+      assert.instanceOf(actualColumn, expectedColumn.type);
+      assert.strictEqual(actualColumn.aggregationMode,
+          expectedColumn.noAggregation ? undefined : expectedAggregationMode);
+    }
+  }
+
+  function checkColumnInfosAndColor(
+      column, fields, contexts, expectedInfos, expectedColorReservedName) {
+    // Test sanity checks.
+    assert.isDefined(fields);
+    if (contexts !== undefined)
+      assert.lengthOf(contexts, fields.length);
+
+    // Check infos.
+    var infos = [];
+    column.addInfos(fields, contexts, infos);
+    assert.lengthOf(infos, expectedInfos.length);
+    for (var i = 0; i < expectedInfos.length; i++)
+      assert.deepEqual(infos[i], expectedInfos[i]);
+
+    // Check color.
+    var actualColor = typeof column.color === 'function' ?
+        column.color(fields, contexts) :
+        column.color;
+    checkColor(actualColor, expectedColorReservedName);
+  }
+
+  function checkColor(actualColorString, expectedColorReservedName) {
+    if (expectedColorReservedName === undefined) {
+      assert.isUndefined(actualColorString);
+      return;
+    }
+
+    var actualColor = Color.fromString(actualColorString);
+    var expectedColor = ColorScheme.colors[
+        ColorScheme.getColorIdForReservedName(expectedColorReservedName)];
+    assert.deepEqual(actualColor, expectedColor);
   }
 
   function createAndCheckEmptyPanes(
@@ -436,7 +463,7 @@
       if (childPaneBuilder === undefined)
         return undefined;
       return childPaneBuilder();
-    }
+    };
 
     return paneEl;
   }
@@ -479,6 +506,16 @@
   }
 
   /**
+   * Extract a chronological list of ProcessMemoryDump(s) (for a given process)
+   * from a chronological list of dictionaries of ProcessMemoryDump(s).
+   */
+  function extractProcessMemoryDumps(processMemoryDumps, pid) {
+    return processMemoryDumps.map(function(memoryDumps) {
+      return memoryDumps[pid];
+    });
+  }
+
+  /**
    * Extract a chronological list of lists of VMRegion(s) (for a given process)
    * from a chronological list of dictionaries of ProcessMemoryDump(s).
    */
@@ -527,12 +564,17 @@
     createMultipleTestGlobalMemoryDumps: createMultipleTestGlobalMemoryDumps,
     createSingleTestProcessMemoryDump: createSingleTestProcessMemoryDump,
     createMultipleTestProcessMemoryDumps: createMultipleTestProcessMemoryDumps,
-    checkAttributes: checkAttributes,
-    checkSizeAttributes: checkSizeAttributes,
+    checkNumericFields: checkNumericFields,
+    checkSizeNumericFields: checkSizeNumericFields,
+    checkStringFields: checkStringFields,
+    checkColumns: checkColumns,
+    checkColumnInfosAndColor: checkColumnInfosAndColor,
+    checkColor: checkColor,
     createAndCheckEmptyPanes: createAndCheckEmptyPanes,
     createTestPane: createTestPane,
     isElementDisplayed: isElementDisplayed,
     convertToProcessMemoryDumps: convertToProcessMemoryDumps,
+    extractProcessMemoryDumps: extractProcessMemoryDumps,
     extractVmRegions: extractVmRegions,
     extractMemoryAllocatorDumps: extractMemoryAllocatorDumps,
     extractHeapDumps: extractHeapDumps
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util.html b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util.html
index e260d69..3220735 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util.html
@@ -6,10 +6,9 @@
 -->
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
 
 <script>
 'use strict';
@@ -40,18 +39,19 @@
      * Get the title associated with a given row.
      *
      * This method will decorate the title with color and '+++'/'---' prefix if
-     * appropriate (as determined by the optional row.defined field). Examples:
+     * appropriate (as determined by the optional row.contexts field).
+     * Examples:
      *
-     *   +-------------------------+-----------------+--------+--------+
-     *   | Defined (at timestamps) | Interpretation  | Prefix | Color  |
-     *   +-------------------------+-----------------+--------+--------+
-     *   | 1111111111              | always present  |        |        |
-     *   | 0000111111              | added           | +++    | red    |
-     *   | 1111111000              | deleted         | ---    | green  |
-     *   | 1100111111*             | flaky           |        | purple |
-     *   | 0001001111              | added + flaky   | +++    | purple |
-     *   | 1111100010              | deleted + flaky | ---    | purple |
-     *   +-------------------------+-----------------+--------+--------+
+     *   +----------------------+-----------------+--------+--------+
+     *   | Contexts provided at | Interpretation  | Prefix | Color  |
+     *   +----------------------+-----------------+--------+--------+
+     *   | 1111111111           | always present  |        |        |
+     *   | 0000111111           | added           | +++    | red    |
+     *   | 1111111000           | deleted         | ---    | green  |
+     *   | 1100111111*          | flaky           |        | purple |
+     *   | 0001001111           | added + flaky   | +++    | purple |
+     *   | 1111100010           | deleted + flaky | ---    | purple |
+     *   +----------------------+-----------------+--------+--------+
      *
      *   *) This means that, given a selection of 10 memory dumps, a particular
      *      row (e.g. a process) was present in the first 2 and last 6 of them
@@ -63,33 +63,33 @@
     value: function(row) {
       var formattedTitle = this.formatTitle(row);
 
-      var defined = row.defined;
-      if (defined === undefined || defined.length === 0)
+      var contexts = row.contexts;
+      if (contexts === undefined || contexts.length === 0)
         return formattedTitle;
 
       // Determine if the row was provided in the first and last row and how
       // many times it changed between being provided and not provided.
-      var firstDefined = defined[0];
-      var lastDefined = defined[defined.length - 1];
-      var changeDefinedCount = 0;
-      for (var i = 1; i < defined.length; i++) {
-        if (defined[i] !== defined[i - 1])
-          changeDefinedCount++;
+      var firstContext = contexts[0];
+      var lastContext = contexts[contexts.length - 1];
+      var changeDefinedContextCount = 0;
+      for (var i = 1; i < contexts.length; i++) {
+        if ((contexts[i] === undefined) !== (contexts[i - 1] === undefined))
+          changeDefinedContextCount++;
       }
 
       // Determine the color and prefix of the title.
       var color = undefined;
       var prefix = undefined;
-      if (!firstDefined && lastDefined) {
+      if (!firstContext && lastContext) {
         // The row was added.
         color = 'red';
         prefix = '+++';
-      } else if (firstDefined && !lastDefined) {
+      } else if (firstContext && !lastContext) {
         // The row was removed.
         color = 'green';
         prefix = '---';
       }
-      if (changeDefinedCount > 1) {
+      if (changeDefinedContextCount > 1) {
         // The row was flaky (added/removed more than once).
         color = 'purple';
       }
@@ -125,88 +125,118 @@
   };
 
   /**
-   * A table column for displaying memory dump data.
+   * Abstract table column for displaying memory dump data.
    *
    * @constructor
    */
-  function MemoryColumn(name, units, cellGetter, aggregationMode) {
+  function MemoryColumn(name, cellPath, aggregationMode) {
     this.name = name;
-    this.units = units;
-    this.cell = cellGetter;
+    this.cellPath = cellPath;
 
     // See MemoryColumn.AggregationMode enum in this file.
     this.aggregationMode = aggregationMode;
-
-    // Color of the values returned by this column can be set by assigning a
-    // 'color' field/getter. The value can be either (1) undefined (no specific
-    // color is set), (2) a color string (e.g. 'blue'), or (3) a function
-    // mapping lists of attributes to color strings (the return value can be
-    // undefined again).
   }
 
-  MemoryColumn.fromRows = function(rows, cellKey, aggregationMode,
-      opt_customColumnConstructorRules) {
-    var columnTraits = {};
-
-    function gatherTraits(row) {
-      if (row === undefined)
-        return;
-      var attrCells = row[cellKey];
-      if (attrCells === undefined)
-        return;
-      tr.b.iterItems(attrCells, function(attrName, attrCell) {
-        if (attrCell === undefined)
+  /**
+   * Construct columns from cells in a hierarchy of rows and a list of rules.
+   *
+   * The list of rules contains objects with three fields:
+   *
+   *   condition: Optional string or regular expression matched against the
+   *       name of a cell. If omitted, the rule will match any cell.
+   *   importance: Mandatory number which determines the final order of the
+   *       columns. The column with the highest importance will be first in the
+   *       returned array.
+   *   columnConstructor: Mandatory memory column constructor.
+   *
+   * Example:
+   *
+   *   var importanceRules = [
+   *     {
+   *       condition: 'page_size',
+   *       columnConstructor: NumericMemoryColumn,
+   *       importance: 8
+   *     },
+   *     {
+   *       condition: /size/,
+   *       columnConstructor: CustomNumericMemoryColumn,
+   *       importance: 10
+   *     },
+   *     {
+   *       // No condition: matches all columns.
+   *       columnConstructor: NumericMemoryColumn,
+   *       importance: 9
+   *     }
+   *   ];
+   *
+   * Given a name of a cell, the corresponding column constructor and
+   * importance are determined by the first rule whose condition matches the
+   * column's name. For example, given a cell with name 'inner_size', the
+   * corresponding column will be constructed using CustomNumericMemoryColumn
+   * and its importance (for sorting purposes) will be 10 (second rule).
+   *
+   * After columns are constructed for all cell names, they are sorted in
+   * descending order of importance and the resulting list is returned. In the
+   * example above, the constructed columns will be sorted into three groups as
+   * follows:
+   *
+   *      [most important, left in the resulting table]
+   *   1. columns whose name contains 'size' excluding 'page_size' because it
+   *      would have already matched the first rule (Note that string matches
+   *      must be exact so a column named 'page_size2' would not match the
+   *      first rule and would therefore belong to this group).
+   *   2. columns whose name does not contain 'size'.
+   *   3. columns whose name is 'page_size'.
+   *      [least important, right in the resulting table]
+   *
+   * where columns will be sorted alphabetically within each group.
+   */
+  MemoryColumn.fromRows = function(rows, cellKey, aggregationMode, rules) {
+    // Recursively find the names of all cells of the rows (and their sub-rows).
+    var cellNames = new Set();
+    function gatherCellNames(rows) {
+      rows.forEach(function(row) {
+        if (row === undefined)
           return;
-        var attrValues = attrCell.attrs;
-        if (attrValues === undefined)
-          return;
-        var existingTraits = columnTraits[attrName];
-        attrValues.forEach(function(attrValue) {
-          if (attrValue === undefined)
-            return;
-          if (existingTraits === undefined) {
-            columnTraits[attrName] = existingTraits = {
-              constructor: attrValue.constructor,
-              units: attrValue.units
-            };
-            return;
-          }
-          if (existingTraits.constructor !== attrValue.constructor ||
-              existingTraits.units !== attrValue.units) {
-            existingTraits.constructor = tr.model.UnknownAttribute;
-            existingTraits.units = undefined;
-          }
-        });
+        var fieldCells = row[cellKey];
+        if (fieldCells !== undefined) {
+          tr.b.iterItems(fieldCells, function(fieldName, fieldCell) {
+            if (fieldCell === undefined || fieldCell.fields === undefined)
+              return;
+            cellNames.add(fieldName);
+          });
+        }
+        var subRows = row.subRows;
+        if (subRows !== undefined)
+          gatherCellNames(subRows);
       });
-      if (row.subRows !== undefined)
-        row.subRows.forEach(gatherTraits);
-    };
-    rows.forEach(gatherTraits);
+    }
+    gatherCellNames(rows);
 
-    var columns = [];
-    tr.b.iterItems(columnTraits, function(columnName, columnTraits) {
-      var cellGetter = fieldGetter(cellKey, columnName);
-      var constructor = undefined;
-      if (opt_customColumnConstructorRules !== undefined) {
-        var matchingRule = MemoryColumn.findMatchingRule(
-            columnName, opt_customColumnConstructorRules);
-        if (matchingRule !== undefined)
-          constructor = matchingRule.columnConstructor;
-      }
-      if (constructor === undefined)
-        constructor = MemoryColumn.constructorFromAttributeTraits(columnTraits);
-      columns.push(new constructor(
-          columnName, columnTraits.units, cellGetter, aggregationMode));
+    // Based on the provided list of rules, construct the columns and calculate
+    // their importance.
+    var positions = [];
+    cellNames.forEach(function(cellName) {
+      var cellPath = [cellKey, cellName];
+      var matchingRule = MemoryColumn.findMatchingRule(cellName, rules);
+      var constructor = matchingRule.columnConstructor;
+      var column = new constructor(cellName, cellPath, aggregationMode);
+      positions.push({
+        importance: matchingRule.importance,
+        column: column
+      });
     });
 
-    return columns;
-  };
+    positions.sort(function(a, b) {
+      // Sort columns with the same importance alphabetically.
+      if (a.importance === b.importance)
+        return COLLATOR.compare(a.column.name, b.column.name);
 
-  MemoryColumn.constructorFromAttributeTraits = function(traits) {
-    if (traits.constructor === tr.model.ScalarAttribute)
-      return ScalarMemoryColumn;
-    else
-      return MemoryColumn;
+      // Sort columns in descending order of importance.
+      return b.importance - a.importance;
+    });
+
+    return positions.map(function(position) { return position.column });
   };
 
   MemoryColumn.spaceEqually = function(columns) {
@@ -238,220 +268,68 @@
     return condition.test(name);
   };
 
-  /**
-   * Sort a list of memory columns according to a list of importance rules.
-   * This function modifies the original array and doesn't return anything.
-   *
-   * The list of importance rules contains objects with mandatory 'importance'
-   * numeric fields and optional 'condition' string or regex fields. Example:
-   *
-   *   var importanceRules = [
-   *     {
-   *       condition: 'page_size',
-   *       importance: 8
-   *     },
-   *     {
-   *       condition: /size/,
-   *       importance: 10
-   *     },
-   *     {
-   *       // No condition: matches all columns.
-   *       importance: 9
-   *     }
-   *   ];
-   *
-   * The importance of a column is determined by the first rule whose condition
-   * matches the column's name, so the rules above will sort a generic list of
-   * columns into three groups as follows:
-   *
-   *      [most important, left in the resulting table]
-   *   1. columns whose name contains 'size' excluding 'page_size' because it
-   *      would have already matched the first rule (Note that string matches
-   *      must be exact so a column named 'page_size2' would not match the
-   *      first rule and would therefore belong to this group).
-   *   2. columns whose name does not contain 'size'.
-   *   3. columns whose name is 'page_size'.
-   *      [least important, right in the resulting table]
-   *
-   * where columns are sorted alphabetically within each group.
-   */
-  MemoryColumn.sortByImportance = function(columns, importanceRules) {
-    var positions = columns.map(function(column, srcIndex) {
-      return {
-        importance: column.getImportance(importanceRules),
-        column: column
-      };
-    });
-
-    positions.sort(function(a, b) {
-      // Sort columns with the same importance alphabetically.
-      if (a.importance === b.importance)
-        return COLLATOR.compare(a.column.name, b.column.name);
-
-      // Sort columns in descending order of importance.
-      return b.importance - a.importance;
-    });
-
-    positions.forEach(function(position, dstIndex) {
-      columns[dstIndex] = position.column;
-    });
-  };
-
-  /**
-   * Convert a list of columns names to a list of importance rules. The list of
-   * column names is assumed to be sorted in descending order of importance
-   * (i.e. from left to right). For example:
-   *
-   *    MemoryColumn.columnNamesToImportanceRules([
-   *      'Column A',
-   *      'Column B',
-   *      'Column C'
-   *    ]);
-   *
-   * will return the following list of importance rules:
-   *
-   *   [
-   *     {
-   *       condition: 'Column A',
-   *       importance: 3
-   *     },
-   *     {
-   *       condition: 'Column B',
-   *       importance: 2
-   *     },
-   *     {
-   *       condition: 'Column C',
-   *       importance: 1
-   *     }
-   *   ]
-   */
-  MemoryColumn.columnNamesToImportanceRules = function(columnNames) {
-    return columnNames.map(function(columnName, columnIndex) {
-      return {
-        condition: columnName,
-        importance: columnNames.length - columnIndex
-      };
-    });
-  };
-
-  MemoryColumn.iconFromAttributeInfoType = function(type) {
-    switch (type) {
-      case tr.model.AttributeInfoType.WARNING:
-        return {
-          symbol: String.fromCharCode(9888),  // Exclamation mark in a triangle.
-          color: 'red'
-        };
-      case tr.model.AttributeInfoType.LINK:
-        return {
-          symbol: String.fromCharCode(9903)  // Link symbol.
-          /* Don't modify the color. */
-        };
-      case tr.model.AttributeInfoType.MEMORY_OWNER:
-        return {
-          symbol: String.fromCharCode(8702),  // Right arrow.
-          color: 'green'
-        };
-      case tr.model.AttributeInfoType.MEMORY_OWNED:
-        return {
-          symbol: String.fromCharCode(8701),  // Left arrow.
-          color: 'green'
-        };
-      case tr.model.AttributeInfoType.OVERALL_VALUE:
-        return {
-          symbol: String.fromCharCode(8614)  // Right arrow with a bar.
-          /* Don't modify the color. */
-        };
-      case tr.model.AttributeInfoType.RECENT_VALUE:
-        return {
-          symbol: String.fromCharCode(8618)  // Right arrow with a hook.
-          /* Don't modify the color. */
-        };
-      case tr.model.AttributeInfoType.HAS_HEAP_DUMP:
-        return {
-          symbol: String.fromCharCode(9776)  // Trigram for heaven.
-          /* Don't modify the color. */
-        };
-      default:
-        return {
-          symbol: String.fromCharCode(9432),  // Circled small letter 'i'.
-          color: 'blue'
-        };
-    }
-    throw new Error('Unreachable');
-  };
-
   /** @enum */
   MemoryColumn.AggregationMode = {
     DIFF: 0,
     MAX: 1
   };
 
+  MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER = 'at some selected timestamps';
+
   MemoryColumn.prototype = {
     get title() {
       return this.name;
     },
 
-    attrs: function(row) {
+    cell: function(row) {
+      var cell = row;
+      var cellPath = this.cellPath;
+      for (var i = 0; i < cellPath.length; i++) {
+        if (cell === undefined)
+          return undefined;
+        cell = cell[cellPath[i]];
+      }
+      return cell;
+    },
+
+    aggregateCells: function(row, subRows) {
+      // No generic aggregation.
+    },
+
+    fields: function(row) {
       var cell = this.cell(row);
       if (cell === undefined)
         return undefined;
-      return cell.attrs;
+      return cell.fields;
     },
 
+    /**
+     * Format a cell associated with this column from the given row. This
+     * method is not intended to be overriden.
+     */
     value: function(row) {
-      var attrs = this.attrs(row);
-      if (this.hasAllRelevantAttrsUndefined(attrs))
+      var fields = this.fields(row);
+      if (this.hasAllRelevantFieldsUndefined(fields))
         return '';
-      return this.formatAttributes(attrs);
-    },
 
-    /**
-     * Returns true iff all attributes of a row which are relevant for the
-     * current aggregation mode (e.g. first and last attribute for diff mode)
-     * are undefined.
-     */
-    hasAllRelevantAttrsUndefined: function(attrs) {
-      if (attrs === undefined)
-        return true;
+      // Determine the color and infos of the resulting element.
+      var contexts = row.contexts;
+      var color = this.color(fields, contexts);
+      var infos = [];
+      this.addInfos(fields, contexts, infos);
 
-      switch (this.aggregationMode) {
-        case MemoryColumn.AggregationMode.DIFF:
-          // Only the first and last attribute are relevant.
-          return attrs[0] === undefined &&
-              attrs[attrs.length - 1] === undefined;
+      // Format the actual fields.
+      var formattedFields = this.formatFields(fields);
 
-        case MemoryColumn.AggregationMode.MAX:
-        default:
-          // All attributes are relevant.
-          return attrs.every(function(attr) { return attr === undefined; });
-      }
-    },
+      // If no color is specified and there are no infos, there is no need to
+      // wrap the value in a span element.#
+      if ((color === undefined || formattedFields === '') && infos.length === 0)
+        return formattedFields;
 
-    /**
-     * Format a defined attribute (both values and infos). This method is not
-     * intended to be overriden. At least one attribute relevant for the
-     * current aggregation mode is guaranteed to be defined.
-     */
-    formatAttributes: function(attrs) {
-      var formattedValue = this.formatAttributeValues(attrs);
-
-      // Determine the color of the resulting element.
-      var color;
-      if (typeof this.color === 'function')
-        color = this.color(attrs);
-      else
-        color = this.color;
-
-      // If no color is specified and there are no infos, there is no need
-      // to wrap the value in a span element.
-      var infos = this.getInfos(attrs);
-      if ((color === undefined || formattedValue === '') && infos.length === 0)
-        return formattedValue;
-
-      var attrEl = document.createElement('span');
-      attrEl.style.display = 'flex';
-      attrEl.style.alignItems = 'center';
-      attrEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedValue));
+      var fieldEl = document.createElement('span');
+      fieldEl.style.display = 'flex';
+      fieldEl.style.alignItems = 'center';
+      fieldEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedFields));
 
       // Add info icons with tooltips.
       infos.forEach(function(info) {
@@ -459,60 +337,88 @@
         infoEl.style.paddingLeft = '4px';
         infoEl.style.cursor = 'help';
         infoEl.style.fontWeight = 'bold';
-        var icon = MemoryColumn.iconFromAttributeInfoType(info.type);
-        infoEl.textContent = icon.symbol;
-        if (icon.color !== undefined)
-          infoEl.style.color = icon.color;
+        infoEl.textContent = info.icon;
+        if (info.color !== undefined)
+          infoEl.style.color = info.color;
         infoEl.title = info.message;
-        attrEl.appendChild(infoEl);
+        fieldEl.appendChild(infoEl);
       }, this);
 
       // Set the color of the element.
       if (color !== undefined)
-        attrEl.style.color = color;
+        fieldEl.style.color = color;
 
-      return attrEl;
+      return fieldEl;
     },
 
     /**
-     * Format the values of a single or multiple attributes. At least one
-     * attribute relevant for the current aggregation mode is guaranteed to be
-     * defined.
+     * Returns true iff all fields of a row which are relevant for the current
+     * aggregation mode (e.g. first and last field for diff mode) are undefined.
      */
-    formatAttributeValues: function(attrs) {
-      if (attrs.length === 1)
-        return this.formatSingleAttributeValue(attrs[0]);
-      else
-        return this.formatMultipleAttributeValues(attrs);
-    },
+    hasAllRelevantFieldsUndefined: function(fields) {
+      if (fields === undefined)
+        return true;
 
-    /**
-     * Format the value of a single defined attribute.
-     *
-     * This method is intended to be overriden by attribute type/unit specific
-     * columns (e.g. show '1.0 KiB' instead of '1024' for ScalarAttribute(s)
-     * representing bytes).
-     */
-    formatSingleAttributeValue: function(attr) {
-      return String(attr.value);
-    },
-
-    /**
-     * Format the values of multiple attributes. At least one attribute
-     * relevant for the current aggregation mode is guaranteed to be defined.
-     *
-     * The aggregation mode specializations of this method (e.g.
-     * formatMultipleAttributeValuesDiff) are intended to be overriden by
-     * attribute type/unit specific columns.
-     */
-    formatMultipleAttributeValues: function(attrs) {
       switch (this.aggregationMode) {
         case MemoryColumn.AggregationMode.DIFF:
-          return this.formatMultipleAttributeValuesDiff(
-              attrs[0], attrs[attrs.length - 1]);
+          // Only the first and last field are relevant.
+          return fields[0] === undefined &&
+              fields[fields.length - 1] === undefined;
 
         case MemoryColumn.AggregationMode.MAX:
-          return this.formatMultipleAttributeValuesMax(attrs);
+        default:
+          // All fields are relevant.
+          return fields.every(function(field) { return field === undefined; });
+      }
+    },
+
+    /**
+     * Get the color of the given fields formatted by this column. At least one
+     * field relevant for the current aggregation mode is guaranteed to be
+     * defined.
+     */
+    color: function(fields, contexts) {
+      return undefined;
+    },
+
+    /**
+     * Format an arbitrary number of fields. At least one field relevant for
+     * the current aggregation mode is guaranteed to be defined.
+     */
+    formatFields: function(fields) {
+      if (fields.length === 1)
+        return this.formatSingleField(fields[0]);
+      else
+        return this.formatMultipleFields(fields);
+    },
+
+    /**
+     * Format a single defined field.
+     *
+     * This method is intended to be overriden by field type specific columns
+     * (e.g. show '1.0 KiB' instead of '1024' for ScalarNumeric(s) representing
+     * bytes).
+     */
+    formatSingleField: function(field) {
+      throw new Error('Not implemented');
+    },
+
+    /**
+     * Format multiple fields. At least one field relevant for the current
+     * aggregation mode is guaranteed to be defined.
+     *
+     * The aggregation mode specializations of this method (e.g.
+     * formatMultipleFieldsDiff) are intended to be overriden by field type
+     * specific columns.
+     */
+    formatMultipleFields: function(fields) {
+      switch (this.aggregationMode) {
+        case MemoryColumn.AggregationMode.DIFF:
+          return this.formatMultipleFieldsDiff(
+              fields[0], fields[fields.length - 1]);
+
+        case MemoryColumn.AggregationMode.MAX:
+          return this.formatMultipleFieldsMax(fields);
 
         default:
           return tr.ui.b.createSpan({
@@ -522,54 +428,26 @@
       }
     },
 
-    formatMultipleAttributeValuesDiff: function(firstAttr, lastAttr) {
-      if (firstAttr === undefined) {
-        // Attribute was added ("+NEW_VALUE" in red).
-        var spanEl = tr.ui.b.createSpan({color: 'red'});
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('+'));
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
-            this.formatSingleAttributeValue(lastAttr)));
-        return spanEl;
-      } else if (lastAttr === undefined) {
-        // Attribute was removed ("-OLD_VALUE" in green).
-        var spanEl = tr.ui.b.createSpan({color: 'green'});
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('-'));
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
-            this.formatSingleAttributeValue(firstAttr)));
-        return spanEl;
-      } else if (firstAttr.value === lastAttr.value &&
-                 firstAttr.units === lastAttr.units) {
-        // Attribute didn't change ("VALUE" with unchanged color).
-        return this.formatSingleAttributeValue(firstAttr);
-      } else {
-        // Attribute changed ("OLD_VALUE -> NEW_VALUE" in orange).
-        var spanEl = tr.ui.b.createSpan({color: 'DarkOrange'});
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
-            this.formatSingleAttributeValue(firstAttr)));
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
-            ' ' + RIGHTWARDS_ARROW + ' '));
-        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
-            this.formatSingleAttributeValue(lastAttr)));
-        return spanEl;
-      }
+    formatMultipleFieldsDiff: function(firstField, lastField) {
+      throw new Error('Not implemented');
     },
 
-    formatMultipleAttributeValuesMax: function(attrs) {
-      return this.getMaxAttributeValue(attrs);
+    formatMultipleFieldsMax: function(fields) {
+      return this.formatSingleField(this.getMaxField(fields));
     },
 
     cmp: function(rowA, rowB) {
-      var attrsA = this.attrs(rowA);
-      var attrsB = this.attrs(rowB);
+      var fieldsA = this.fields(rowA);
+      var fieldsB = this.fields(rowB);
 
       // Sanity check.
-      if (attrsA !== undefined && attrsB !== undefined &&
-          attrsA.length !== attrsB.length)
-        throw new Error('Different number of attributes');
+      if (fieldsA !== undefined && fieldsB !== undefined &&
+          fieldsA.length !== fieldsB.length)
+        throw new Error('Different number of fields');
 
-      // Handle empty attributes.
-      var undefinedA = this.hasAllRelevantAttrsUndefined(attrsA);
-      var undefinedB = this.hasAllRelevantAttrsUndefined(attrsB);
+      // Handle empty fields.
+      var undefinedA = this.hasAllRelevantFieldsUndefined(fieldsA);
+      var undefinedB = this.hasAllRelevantFieldsUndefined(fieldsB);
       if (undefinedA && undefinedB)
         return 0;
       if (undefinedA)
@@ -577,130 +455,78 @@
       if (undefinedB)
         return 1;
 
-      return this.compareAttributes(attrsA, attrsB);
+      return this.compareFields(fieldsA, fieldsB);
     },
 
     /**
-     * Compare the values of a pair of single or multiple attributes. At least
-     * one attribute relevant for the current aggregation mode is guaranteed to
-     * be defined in each of the two lists of attributes.
+     * Compare a pair of single or multiple fields. At least one field relevant
+     * for the current aggregation mode is guaranteed to be defined in each of
+     * the two lists.
      */
-    compareAttributes: function(attrsA, attrsB) {
-      if (attrsA.length === 1)
-        return this.compareSingleAttributes(attrsA[0], attrsB[0]);
+    compareFields: function(fieldsA, fieldsB) {
+      if (fieldsA.length === 1)
+        return this.compareSingleFields(fieldsA[0], fieldsB[0]);
       else
-        return this.compareMultipleAttributes(attrsA, attrsB);
+        return this.compareMultipleFields(fieldsA, fieldsB);
     },
 
     /**
-     * Compare the values of a pair of single defined attributes.
+     * Compare a pair of single defined fields.
      *
-     * This method and/or compareSingleAttributeValues are intended to be
-     * overriden by attribute type/unit specific columns.
+     * This method is intended to be overriden by field type specific columns.
      */
-    compareSingleAttributes: function(attrA, attrB) {
-      return this.compareSingleAttributeValues(attrA.value, attrB.value);
+    compareSingleFields: function(fieldA, fieldB) {
+      throw new Error('Not implemented');
     },
 
     /**
-     * Compare the values of a pair of multiple attributes. At least one
-     * attribute relevant for the current aggregation mode is guaranteed to be
-     * defined in each of the two lists of attributes.
+     * Compare a pair of multiple fields. At least one field relevant for the
+     * current aggregation mode is guaranteed to be defined in each of the two
+     * lists.
      *
      * The aggregation mode specializations of this method (e.g.
-     * compareMultipleAttributesDiff) are intended to be overriden by attribute
-     * type/unit specific columns.
+     * compareMultipleFieldsDiff) are intended to be overriden by field type
+     * specific columns.
      */
-    compareMultipleAttributes: function(attrsA, attrsB) {
+    compareMultipleFields: function(fieldsA, fieldsB) {
       switch (this.aggregationMode) {
         case MemoryColumn.AggregationMode.DIFF:
-          return this.compareMultipleAttributesDiff(
-              attrsA[0], attrsA[attrsA.length - 1],
-              attrsB[0], attrsB[attrsB.length - 1]);
+          return this.compareMultipleFieldsDiff(
+              fieldsA[0], fieldsA[fieldsA.length - 1],
+              fieldsB[0], fieldsB[fieldsB.length - 1]);
 
         case MemoryColumn.AggregationMode.MAX:
-          return this.compareMultipleAttributesMax(attrsA, attrsB);
+          return this.compareMultipleFieldsMax(fieldsA, fieldsB);
 
         default:
           return 0;
       }
     },
 
-    compareMultipleAttributesDiff: function(firstAttrA, lastAttrA, firstAttrB,
-        lastAttrB) {
-      // If one of the attributes was added (and the other one wasn't), mark
-      // the corresponding diff as greater.
-      if (firstAttrA === undefined && firstAttrB !== undefined)
-        return 1;
-      if (firstAttrA !== undefined && firstAttrB === undefined)
-        return -1;
-
-      // If both attributes were added, compare the last values (greater last
-      // value implies greater diff).
-      if (firstAttrA === undefined && firstAttrB === undefined)
-        return this.compareSingleAttributes(lastAttrA, lastAttrB);
-
-      // If one of the attributes was removed (and the other one wasn't), mark
-      // the corresponding diff as lower.
-      if (lastAttrA === undefined && lastAttrB !== undefined)
-        return -1;
-      if (lastAttrA !== undefined && lastAttrB === undefined)
-        return 1;
-
-      // If both attributes were removed, compare the first values (greater
-      // first value implies smaller (!) diff).
-      if (lastAttrA === undefined && lastAttrB === undefined)
-        return this.compareSingleAttributes(firstAttrB, firstAttrA);
-
-      var areAttrsAEqual = firstAttrA.value === lastAttrA.value &&
-          firstAttrA.units === lastAttrA.units;
-      var areAttrsBEqual = firstAttrB.value === lastAttrB.value &&
-          firstAttrB.units === lastAttrB.units;
-
-      // Consider diffs of attributes that did not change to be smaller than
-      // diffs of attributes that did change.
-      if (areAttrsAEqual && areAttrsBEqual)
-        return 0;
-      if (areAttrsAEqual)
-        return -1;
-      if (areAttrsBEqual)
-        return 1;
-
-      // Both attributes changed. We are unable to determine the ordering of
-      // the diffs.
-      return 0;
+    compareMultipleFieldsDiff: function(firstFieldA, lastFieldA, firstFieldB,
+        lastFieldB) {
+      throw new Error('Not implemented');
     },
 
-    compareMultipleAttributesMax: function(attrsA, attrsB) {
-      return this.compareSingleAttributeValues(
-          this.getMaxAttributeValue(attrsA), this.getMaxAttributeValue(attrsB));
+    compareMultipleFieldsMax: function(fieldsA, fieldsB) {
+      return this.compareSingleFields(
+          this.getMaxField(fieldsA), this.getMaxField(fieldsB));
     },
 
-    getMaxAttributeValue: function(attrs) {
-      return attrs.reduce(function(accumulator, attr) {
-        if (attr === undefined)
+    getMaxField: function(fields) {
+      return fields.reduce(function(accumulator, field) {
+        if (field === undefined)
           return accumulator;
-        var attrValue = attr.value;
         if (accumulator === undefined ||
-            this.compareSingleAttributeValues(attrValue, accumulator) > 0) {
-          return attrValue;
+            this.compareSingleFields(field, accumulator) > 0) {
+          return field;
         }
         return accumulator;
       }.bind(this), undefined);
     },
 
-    compareSingleAttributeValues: function(attrValueA, attrValueB) {
-      return COLLATOR.compare(String(attrValueA), String(attrValueB));
-    },
-
-    getInfos: function(attrs) {
-      if (attrs.length !== 1) {
-        // Don't aggregate infos for multiple attributes (it is unclear how
-        // they should be aggregated in the first place).
-        return [];
-      }
-
-      return attrs[0].infos;
+    addInfos: function(fields, contexts, infos) {
+      // No generic infos.
     },
 
     getImportance: function(importanceRules) {
@@ -724,222 +550,313 @@
   /**
    * @constructor
    */
-  function ScalarMemoryColumn(name, title, units, cellGetter, aggregationMode) {
-    MemoryColumn.call(this, name, title, units, cellGetter, aggregationMode);
+  function StringMemoryColumn(name, cellPath, aggregationMode) {
+    MemoryColumn.call(this, name, cellPath, aggregationMode);
   }
 
-  ScalarMemoryColumn.prototype = {
+  StringMemoryColumn.prototype = {
     __proto__: MemoryColumn.prototype,
 
-    formatSingleAttributeValue: function(attr) {
-      return this.formatUnits(attr.value, false);
+    formatSingleField: function(string) {
+      return string;
     },
 
-    formatMultipleAttributeValuesDiff: function(firstAttr, lastAttr) {
-      return this.formatUnits(this.getDiffAttrValue(firstAttr, lastAttr), true);
+    formatMultipleFieldsDiff: function(firstString, lastString) {
+      if (firstString === undefined) {
+        // String was added ("+NEW_VALUE" in red).
+        var spanEl = tr.ui.b.createSpan({color: 'red'});
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('+'));
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
+            this.formatSingleField(lastString)));
+        return spanEl;
+      } else if (lastString === undefined) {
+        // String was removed ("-OLD_VALUE" in green).
+        var spanEl = tr.ui.b.createSpan({color: 'green'});
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('-'));
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
+            this.formatSingleField(firstString)));
+        return spanEl;
+      } else if (firstString === lastString) {
+        // String didn't change ("VALUE" with unchanged color).
+        return this.formatSingleField(firstString);
+      } else {
+        // String changed ("OLD_VALUE -> NEW_VALUE" in orange).
+        var spanEl = tr.ui.b.createSpan({color: 'DarkOrange'});
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
+            this.formatSingleField(firstString)));
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
+            ' ' + RIGHTWARDS_ARROW + ' '));
+        spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(
+            this.formatSingleField(lastString)));
+        return spanEl;
+      }
     },
 
-    formatMultipleAttributeValuesMax: function(attrs) {
-      return this.formatUnits(this.getMaxAttributeValue(attrs), false);
+    compareSingleFields: function(stringA, stringB) {
+      return COLLATOR.compare(stringA, stringB);
     },
 
-    formatUnits: function(value, isDelta) {
-      if (value === undefined)
-        return '';
+    compareMultipleFieldsDiff: function(firstStringA, lastStringA, firstStringB,
+        lastStringB) {
+      // If one of the strings was added (and the other one wasn't), mark the
+      // corresponding diff as greater.
+      if (firstStringA === undefined && firstStringB !== undefined)
+        return 1;
+      if (firstStringA !== undefined && firstStringB === undefined)
+        return -1;
 
-      var sizeEl = document.createElement('tr-ui-u-scalar-span');
-      sizeEl.value = value;
-      // TODO(petrcermak): Figure out if/how we can use tr.b.u.Units.fromJSON.
-      if (this.units === 'bytes')
-        sizeEl.unit = tr.b.u.Units.sizeInBytes;
-      else
-        sizeEl.unit = tr.b.u.Units.unitlessNumber;
+      // If both strings were added, compare the last values (greater last
+      // value implies greater diff).
+      if (firstStringA === undefined && firstStringB === undefined)
+        return this.compareSingleFields(lastStringA, lastStringB);
 
-      if (!isDelta)
-        return sizeEl;
+      // If one of the strings was removed (and the other one wasn't), mark the
+      // corresponding diff as lower.
+      if (lastStringA === undefined && lastStringB !== undefined)
+        return -1;
+      if (lastStringA !== undefined && lastStringB === undefined)
+        return 1;
 
-      sizeEl.isDelta = true;
+      // If both strings were removed, compare the first values (greater first
+      // value implies smaller (!) diff).
+      if (lastStringA === undefined && lastStringB === undefined)
+        return this.compareSingleFields(firstStringB, firstStringA);
 
-      if (value === 0)
-        return sizeEl;
+      var areStringsAEqual = firstStringA === lastStringA;
+      var areStringsBEqual = firstStringB === lastStringB;
 
-      var wrapperEl = document.createElement('span');
-      wrapperEl.style.color = value > 0 ? 'red' : 'green';
-      wrapperEl.appendChild(sizeEl);
-      return wrapperEl;
-    },
+      // Consider diffs of strings that did not change to be smaller than diffs
+      // of strings that did change.
+      if (areStringsAEqual && areStringsBEqual)
+        return 0;
+      if (areStringsAEqual)
+        return -1;
+      if (areStringsBEqual)
+        return 1;
 
-    compareSingleAttributeValues: function(attrValueA, attrValueB) {
-      return attrValueA - attrValueB;
-    },
-
-    compareMultipleAttributesDiff: function(firstAttrA, lastAttrA, firstAttrB,
-        lastAttrB) {
-      return this.getDiffAttrValue(firstAttrA, lastAttrA) -
-          this.getDiffAttrValue(firstAttrB, lastAttrB);
-    },
-
-    getDiffAttrValue: function(firstAttr, lastAttr) {
-      var firstValue = firstAttr === undefined ? 0 : firstAttr.value;
-      var lastValue = lastAttr === undefined ? 0 : lastAttr.value;
-      return lastValue - firstValue;
+      // Both strings changed. We are unable to determine the ordering of the
+      // diffs.
+      return 0;
     }
   };
 
   /**
    * @constructor
    */
-  function MemoryCell(attrs) {
-    this.attrs = attrs;
+  function NumericMemoryColumn(name, cellPath, aggregationMode) {
+    MemoryColumn.call(this, name, cellPath, aggregationMode);
   }
 
-  MemoryCell.extractAttributes = function(cell) {
-    if (cell === undefined)
-      return undefined;
-    return cell.attrs;
+  // Avoid tiny positive/negative diffs (displayed in the UI as '+0.0 B' and
+  // '-0.0 B') due to imprecise floating-point arithmetic by treating all diffs
+  // within the (-DIFF_EPSILON, DIFF_EPSILON) range as zeros.
+  NumericMemoryColumn.DIFF_EPSILON = 0.0001;
+
+  NumericMemoryColumn.prototype = {
+    __proto__: MemoryColumn.prototype,
+
+    aggregateCells: function(row, subRows) {
+      var subRowCells = subRows.map(this.cell, this);
+
+      // Determine if there is at least one defined numeric in the sub-row
+      // cells and the timestamp count.
+      var hasDefinedSubRowNumeric = false;
+      var timestampCount = undefined;
+      subRowCells.forEach(function(subRowCell) {
+        if (subRowCell === undefined)
+          return;
+
+        var subRowNumerics = subRowCell.fields;
+        if (subRowNumerics === undefined)
+          return;
+
+        if (timestampCount === undefined)
+          timestampCount = subRowNumerics.length;
+        else if (timestampCount !== subRowNumerics.length)
+          throw new Error('Sub-rows have different numbers of timestamps');
+
+        if (hasDefinedSubRowNumeric)
+          return;  // Avoid unnecessary traversals of the numerics.
+        hasDefinedSubRowNumeric = subRowNumerics.some(function(numeric) {
+          return numeric !== undefined;
+        });
+      });
+      if (!hasDefinedSubRowNumeric)
+        return;  // No numeric to aggregate.
+
+      // Get or create the row cell.
+      var cellPath = this.cellPath;
+      var rowCell = row;
+      for (var i = 0; i < cellPath.length; i++) {
+        var nextStepName = cellPath[i];
+        var nextStep = rowCell[nextStepName];
+        if (nextStep === undefined) {
+          if (i < cellPath.length - 1)
+            nextStep = {};
+          else
+            nextStep = new MemoryCell(undefined);
+          rowCell[nextStepName] = nextStep;
+        }
+        rowCell = nextStep;
+      }
+      if (rowCell.fields === undefined) {
+        rowCell.fields = new Array(timestampCount);
+      } else if (rowCell.fields.length !== timestampCount) {
+        throw new Error(
+            'Row has a different number of timestamps than sub-rows');
+      }
+
+      for (var i = 0; i < timestampCount; i++) {
+        if (rowCell.fields[i] !== undefined)
+          continue;
+        rowCell.fields[i] = tr.model.MemoryAllocatorDump.aggregateNumerics(
+            subRowCells.map(function(subRowCell) {
+              if (subRowCell === undefined || subRowCell.fields === undefined)
+                return undefined;
+              return subRowCell.fields[i];
+            }));
+      }
+    },
+
+    formatSingleField: function(numeric) {
+      if (numeric === undefined)
+        return '';
+      return tr.v.ui.createScalarSpan(numeric);
+    },
+
+    formatMultipleFieldsDiff: function(firstNumeric, lastNumeric) {
+      return this.formatSingleField(
+          this.getDiffField_(firstNumeric, lastNumeric));
+    },
+
+    compareSingleFields: function(numericA, numericB) {
+      return numericA.value - numericB.value;
+    },
+
+    compareMultipleFieldsDiff: function(firstNumericA, lastNumericA,
+        firstNumericB, lastNumericB) {
+      return this.getDiffFieldValue_(firstNumericA, lastNumericA) -
+          this.getDiffFieldValue_(firstNumericB, lastNumericB);
+    },
+
+    getDiffField_: function(firstNumeric, lastNumeric) {
+      var definedNumeric = firstNumeric || lastNumeric;
+      return new tr.v.ScalarNumeric(definedNumeric.unit.correspondingDeltaUnit,
+          this.getDiffFieldValue_(firstNumeric, lastNumeric));
+    },
+
+    getDiffFieldValue_: function(firstNumeric, lastNumeric) {
+      var firstValue = firstNumeric === undefined ? 0 : firstNumeric.value;
+      var lastValue = lastNumeric === undefined ? 0 : lastNumeric.value;
+      var diff = lastValue - firstValue;
+      return Math.abs(diff) < NumericMemoryColumn.DIFF_EPSILON ? 0 : diff;
+    }
   };
 
-  function fieldGetter(/* fields */) {
-    var fields = tr.b.asArray(arguments);
-    return function(row) {
-      var value = row;
-      for (var i = 0; i < fields.length; i++)
-        value = value[fields[i]];
-      return value;
-    };
+  /**
+   * @constructor
+   */
+  function MemoryCell(fields) {
+    this.fields = fields;
   }
 
+  MemoryCell.extractFields = function(cell) {
+    if (cell === undefined)
+      return undefined;
+    return cell.fields;
+  };
+
   /** Limit for the number of sub-rows for recursive table row expansion. */
-  var RECURSIVE_EXPANSION_MAX_SUB_ROW_COUNT = 10;
+  var RECURSIVE_EXPANSION_MAX_VISIBLE_ROW_COUNT = 10;
 
   function expandTableRowsRecursively(table) {
-    function expandRowRecursively(row) {
-      if (row.subRows === undefined || row.subRows.length === 0)
-        return;
-      if (row.subRows.length > RECURSIVE_EXPANSION_MAX_SUB_ROW_COUNT)
-        return;
-      table.setExpandedForTableRow(row, true);
-      row.subRows.forEach(expandRowRecursively);
-    }
-    table.tableRows.forEach(expandRowRecursively);
-  }
+    var currentLevelRows = table.tableRows;
+    var totalVisibleRowCount = currentLevelRows.length;
 
-  // TODO(petrcermak): This code is almost the same as
-  // MemoryAllocatorDump.aggregateAttributes. Consider sharing code between
-  // the two functions.
-  function aggregateTableRowCellsRecursively(row, cellKey) {
-    var subRows = row.subRows;
-    if (subRows === undefined)
-      return;
-
-    subRows.forEach(function(subRow) {
-      aggregateTableRowCellsRecursively(subRow, cellKey);
-    });
-
-    aggregateTableRowCells(row, subRows, cellKey);
-  }
-
-  function aggregateTableRowCells(row, subRows, cellKey) {
-    var rowCells = row[cellKey];
-    if (rowCells === undefined)
-      row[cellKey] = rowCells = {};
-
-    var subRowCellNames = {};
-    subRows.forEach(function(subRow) {
-      var subRowCells = subRow[cellKey];
-      if (subRowCells === undefined)
-        return;
-      tr.b.iterItems(subRowCells, function(columnName) {
-        subRowCellNames[columnName] = true;
-      });
-    });
-
-    tr.b.iterItems(subRowCellNames, function(cellName) {
-      var existingRowCell = rowCells[cellName];
-      var existingRowAttributes = MemoryCell.extractAttributes(existingRowCell);
-
-      // Determine how many timestamps we need to aggregate the attribute for.
-      var timestampCount = undefined;
-      if (existingRowAttributes !== undefined)
-        timestampCount = existingRowAttributes.length;
-      subRows.forEach(function(subRow) {
-        var subRowCells = subRow[cellKey];
-        if (subRowCells === undefined)
+    while (currentLevelRows.length > 0) {
+      // Calculate the total number of sub-rows on the current level.
+      var nextLevelRowCount = 0;
+      currentLevelRows.forEach(function(currentLevelRow) {
+        var subRows = currentLevelRow.subRows;
+        if (subRows === undefined || subRows.length === 0)
           return;
-        var subRowCellAttributes = MemoryCell.extractAttributes(
-            subRowCells[cellName]);
-        if (subRowCellAttributes === undefined)
-          return;
-        if (timestampCount === undefined)
-          timestampCount = subRowCellAttributes.length;
-        else if (timestampCount !== subRowCellAttributes.length)
-          throw new Error('Rows have different number of timestamps');
+        nextLevelRowCount += subRows.length;
       });
-      if (timestampCount === undefined)
-        throw new Error('Handling non-existent cell name \'' + cellName + '\'');
 
-      // Aggregate the attributes for each timestamp.
-      var aggregatedAttributes = new Array(timestampCount);
-      for (var i = 0; i < timestampCount; i++) {
-        var existingRowAttribute = undefined;
-        if (existingRowAttributes !== undefined)
-          existingRowAttribute = existingRowAttributes[i];
-        var subRowAttributes = subRows.map(function(subRow) {
-          var subRowCells = subRow[cellKey];
-          if (subRowCells === undefined)
-            return undefined;
-          var subRowCellAttributes = MemoryCell.extractAttributes(
-              subRowCells[cellName]);
-          if (subRowCellAttributes === undefined)
-            return;
-          return subRowCellAttributes[i];
+      // Determine whether expanding all rows on the current level would cause
+      // the total number of visible rows go over the limit.
+      if (totalVisibleRowCount + nextLevelRowCount >
+          RECURSIVE_EXPANSION_MAX_VISIBLE_ROW_COUNT) {
+        break;
+      }
+
+      // Expand all rows on the current level and gather their sub-rows.
+      var nextLevelRows = new Array(nextLevelRowCount);
+      var nextLevelRowIndex = 0;
+      currentLevelRows.forEach(function(currentLevelRow) {
+        var subRows = currentLevelRow.subRows;
+        if (subRows === undefined || subRows.length === 0)
+          return;
+        table.setExpandedForTableRow(currentLevelRow, true);
+        subRows.forEach(function(subRow) {
+          nextLevelRows[nextLevelRowIndex++] = subRow;
         });
-        aggregatedAttributes[i] = tr.model.Attribute.aggregate(
-            subRowAttributes, existingRowAttribute);
-      }
+      });
 
-      if (existingRowCell !== undefined) {
-        // The cell might contain some extra fields (e.g. custom
-        // buildDetailsPane method) which we don't want to throw away.
-        existingRowCell.attrs = aggregatedAttributes;
-      } else {
-        rowCells[cellName] = new MemoryCell(aggregatedAttributes);
-      }
-    });
+      // Update the total number of visible rows and progress to the next level.
+      totalVisibleRowCount += nextLevelRowCount;
+      currentLevelRows = nextLevelRows;
+    }
   }
 
-  function createCells(timeToValues, valueAttrsGetter, opt_cellAddedCallback) {
-    var attrNameToAttrs = tr.b.invertArrayOfDicts(
-        timeToValues, valueAttrsGetter);
-    return tr.b.mapItems(attrNameToAttrs, function(attrName, attrs) {
-      var cell = new tr.ui.analysis.MemoryCell(attrs);
-      if (opt_cellAddedCallback !== undefined)
-        opt_cellAddedCallback(attrName, cell);
-      return cell;
-    });
-  }
-
-  function addAttributeIfDefined(dstDict, attrName, attrClass, units, value,
-      opt_addedCallback) {
-    if (value === undefined)
+  function aggregateTableRowCellsRecursively(row, columns, opt_predicate) {
+    var subRows = row.subRows;
+    if (subRows === undefined || subRows.length === 0)
       return;
-    var attr = new attrClass(units, value);
-    dstDict[attrName] = attr;
-    if (opt_addedCallback !== undefined)
-      opt_addedCallback(attr);
+
+    subRows.forEach(function(subRow) {
+      aggregateTableRowCellsRecursively(subRow, columns, opt_predicate);
+    });
+
+    if (opt_predicate === undefined || opt_predicate(row.contexts))
+      aggregateTableRowCells(row, subRows, columns);
+  }
+
+  function aggregateTableRowCells(row, subRows, columns) {
+    columns.forEach(function(column) {
+      if (!(column instanceof MemoryColumn))
+        return;
+      column.aggregateCells(row, subRows);
+    });
+  }
+
+  function createCells(timeToValues, valueFieldsGetter) {
+    var fieldNameToFields = tr.b.invertArrayOfDicts(
+        timeToValues, valueFieldsGetter);
+    return tr.b.mapItems(fieldNameToFields, function(fieldName, fields) {
+      return new tr.ui.analysis.MemoryCell(fields);
+    });
+  }
+
+  function createWarningInfo(message) {
+    return {
+      message: message,
+      icon: String.fromCharCode(9888),
+      color: 'red'
+    };
   }
 
   return {
     TitleColumn: TitleColumn,
     MemoryColumn: MemoryColumn,
-    ScalarMemoryColumn: ScalarMemoryColumn,
+    StringMemoryColumn: StringMemoryColumn,
+    NumericMemoryColumn: NumericMemoryColumn,
     MemoryCell: MemoryCell,
-    fieldGetter: fieldGetter,
     expandTableRowsRecursively: expandTableRowsRecursively,
     aggregateTableRowCellsRecursively: aggregateTableRowCellsRecursively,
     aggregateTableRowCells: aggregateTableRowCells,
     createCells: createCells,
-    addAttributeIfDefined: addAttributeIfDefined
+    createWarningInfo: createWarningInfo
   };
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util_test.html b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util_test.html
index 8a7ccc1..4e31694 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util_test.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_sub_view_util_test.html
@@ -5,12 +5,14 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/units.html">
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_sub_view_test_utils.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/model/attribute.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -19,32 +21,30 @@
   var TitleColumn = tr.ui.analysis.TitleColumn;
   var MemoryColumn = tr.ui.analysis.MemoryColumn;
   var AggregationMode = MemoryColumn.AggregationMode;
-  var ScalarMemoryColumn = tr.ui.analysis.ScalarMemoryColumn;
+  var StringMemoryColumn = tr.ui.analysis.StringMemoryColumn;
+  var NumericMemoryColumn = tr.ui.analysis.NumericMemoryColumn;
   var MemoryCell = tr.ui.analysis.MemoryCell;
-  var fieldGetter = tr.ui.analysis.fieldGetter;
   var expandTableRowsRecursively = tr.ui.analysis.expandTableRowsRecursively;
   var aggregateTableRowCells = tr.ui.analysis.aggregateTableRowCells;
   var aggregateTableRowCellsRecursively =
       tr.ui.analysis.aggregateTableRowCellsRecursively;
-  var StringAttribute = tr.model.StringAttribute;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-  var UnknownAttribute = tr.model.UnknownAttribute;
-  var AttributeInfo = tr.model.AttributeInfo;
-  var AttributeInfoType = tr.model.AttributeInfoType;
-  var checkSizeAttributes = tr.ui.analysis.checkSizeAttributes;
-  var checkAttributes = tr.ui.analysis.checkAttributes;
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
+  var checkSizeNumericFields = tr.ui.analysis.checkSizeNumericFields;
+  var checkNumericFields = tr.ui.analysis.checkNumericFields;
+  var checkStringFields = tr.ui.analysis.checkStringFields;
   var createCells = tr.ui.analysis.createCells;
-  var addAttributeIfDefined = tr.ui.analysis.addAttributeIfDefined;
+  var createWarningInfo = tr.ui.analysis.createWarningInfo;
 
   function checkPercent(string, expectedPercent) {
     assert.equal(Number(string.slice(0, -1)), expectedPercent);
     assert.equal(string.slice(-1), '%');
   }
 
-  function checkMemoryColumnValueFormat(test, column, attrValues,
+  function checkMemoryColumnFieldFormat(test, column, fields,
       expectedTextContent, opt_expectedColor) {
-    var value = column.formatMultipleAttributeValues(
-        buildStringCell(column.unit, attrValues).attrs);
+    var value = column.formatMultipleFields(fields);
     if (expectedTextContent === undefined) {
       assert.strictEqual(value, '');
       assert.isUndefined(opt_expectedColor);  // Test sanity check.
@@ -63,59 +63,36 @@
       assert.strictEqual(node.style.color, opt_expectedColor);
   }
 
-  function checkCompareAttributesEqual(column, attrValuesA, attrValuesB) {
-    var attrsA = buildStringCell(column.unit, attrValuesA).attrs;
-    var attrsB = buildStringCell(column.unit, attrValuesB).attrs;
-    assert.strictEqual(column.compareAttributes(attrsA, attrsB), 0);
+  function checkCompareFieldsEqual(column, fieldValuesA, fieldValuesB) {
+    assert.strictEqual(column.compareFields(fieldValuesA, fieldValuesB), 0);
   }
 
-  function checkCompareAttributesLess(column, attrValuesA, attrValuesB) {
-    var attrsA = buildStringCell(column.unit, attrValuesA).attrs;
-    var attrsB = buildStringCell(column.unit, attrValuesB).attrs;
-    assert.isBelow(column.compareAttributes(attrsA, attrsB), 0);
-    assert.isAbove(column.compareAttributes(attrsB, attrsA), 0);
+  function checkCompareFieldsLess(column, fieldValuesA, fieldValuesB) {
+    assert.isBelow(column.compareFields(fieldValuesA, fieldValuesB), 0);
+    assert.isAbove(column.compareFields(fieldValuesB, fieldValuesA), 0);
   }
 
-  function checkScalarMemoryColumnValueFormat(test, column, attrValues,
-      expectedValue, expectedUnits, expectedIsDelta, opt_expectedColor) {
-    var value = column.formatMultipleAttributeValues(
-        buildScalarCell(column.unit, attrValues).attrs);
+  function checkNumericMemoryColumnFieldFormat(test, column, fieldValues, unit,
+      expectedValue) {
+    var value = column.formatMultipleFields(
+        buildScalarCell(unit, fieldValues).fields);
     if (expectedValue === undefined) {
       assert.equal(value, '');
       assert.isUndefined(expectedUnits);  // Test sanity check.
-      assert.isUndefined(expectedIsDelta);  // Test sanity check.
-      assert.isUndefined(opt_expectedColor);  // Test sanity check.
       return;
     }
 
     test.addHTMLOutput(value);
-    var scalarSpan;
-    if (opt_expectedColor === undefined) {
-      scalarSpan = value;
-    } else {
-      assert.lengthOf(value.childNodes, 1);
-      assert.equal(value.style.color, opt_expectedColor);
-      scalarSpan = value.childNodes[0];
-    }
-    assert.equal(scalarSpan.tagName, 'TR-UI-U-SCALAR-SPAN');
-    assert.equal(scalarSpan.value, expectedValue);
-    assert.equal(scalarSpan.unit, expectedUnits);
-    assert.equal(scalarSpan.isDelta, expectedIsDelta);
+    assert.equal(value.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.equal(value.value, expectedValue);
+    assert.equal(value.unit, unit);
   }
 
-  function buildStringCell(units, values) {
+  function buildScalarCell(unit, values) {
     return new MemoryCell(values.map(function(value) {
       if (value === undefined)
         return undefined;
-      return new StringAttribute(units, value);
-    }));
-  }
-
-  function buildScalarCell(units, values) {
-    return new MemoryCell(values.map(function(value) {
-      if (value === undefined)
-        return undefined;
-      return new ScalarAttribute(units, value);
+      return new ScalarNumeric(unit, value);
     }));
   }
 
@@ -123,25 +100,26 @@
     return [
       {
         title: 'Row 1',
-        attributes: {
-          'cpu_temperature': buildStringCell('C',
-              ['below zero', 'absolute zero'])
+        fields: {
+          'cpu_temperature': new MemoryCell(['below zero', 'absolute zero'])
         },
         subRows: [
           {
             title: 'Row 1A',
-            attributes: {
-              'page_size': buildScalarCell('bytes', [1024, 1025])
+            fields: {
+              'page_size': buildScalarCell(sizeInBytes_smallerIsBetter,
+                  [1024, 1025])
             }
           },
           {
             title: 'Row 1B',
-            attributes: {
-              'page_size': buildScalarCell('bytes', [512, 513]),
-              'mixed': buildStringCell('ms', ['0.01', '0.10']),
+            fields: {
+              'page_size': buildScalarCell(sizeInBytes_smallerIsBetter,
+                  [512, 513]),
+              'mixed': new MemoryCell(['0.01', '0.10']),
               'mixed2': new MemoryCell([
-                new ScalarAttribute('GeV', 2.43e18),
-                new ScalarAttribute('MWh', 0.5433)
+                new ScalarNumeric(tr.v.Unit.byName.powerInWatts, 2.43e18),
+                new ScalarNumeric(tr.v.Unit.byName.powerInWatts, 0.5433)
               ])
             }
           }
@@ -149,65 +127,96 @@
       },
       {
         title: 'Row 2',
-        attributes: {
+        fields: {
           'cpu_temperature': undefined,
-          'mixed': buildScalarCell('ms', [0.99, 0.999])
+          'mixed': buildScalarCell(tr.v.Unit.byName.timeDurationInMs,
+              [0.99, 0.999])
         }
       }
     ];
   }
 
-  function checkMemoryColumn(column, expectedName, expectedTitle, expectedUnits,
+  function checkMemoryColumn(column, expectedName, expectedTitle,
       expectedAggregationMode, testRow, expectedCell, expectedType) {
-    assert.equal(column.name, expectedName);
+    assert.strictEqual(column.name, expectedName);
     if (typeof expectedTitle === 'function')
       expectedTitle(column.title);
     else
-      assert.equal(column.title, expectedTitle);
-    assert.strictEqual(column.units, expectedUnits);
+      assert.strictEqual(column.title, expectedTitle);
     assert.strictEqual(column.aggregationMode, expectedAggregationMode);
     assert.strictEqual(column.cell(testRow), expectedCell);
     assert.instanceOf(column, expectedType);
   }
 
+  function checkCellValue(
+      test, value, expectedText, expectedColor, opt_expectedInfos) {
+    var expectedInfos = opt_expectedInfos || [];
+    assert.lengthOf(value.childNodes, 1 + expectedInfos.length);
+    assert.strictEqual(value.style.color, expectedColor);
+    if (typeof expectedText === 'string')
+      assert.strictEqual(value.childNodes[0].textContent, expectedText);
+    else
+      expectedText(value.childNodes[0]);
+    for (var i = 0; i < expectedInfos.length; i++) {
+      var expectedInfo = expectedInfos[i];
+      var infoEl = value.childNodes[i + 1];
+      assert.strictEqual(infoEl.textContent, expectedInfo.icon);
+      assert.strictEqual(infoEl.title, expectedInfo.message);
+      assert.strictEqual(infoEl.style.color, expectedInfo.color || '');
+    }
+    test.addHTMLOutput(value);
+  }
+
+  function sizeSpanMatcher(expectedValue, opt_expectedIsDelta) {
+    return function(element) {
+      assert.strictEqual(element.tagName, 'TR-V-UI-SCALAR-SPAN');
+      assert.strictEqual(element.value, expectedValue);
+      assert.strictEqual(element.unit, opt_expectedIsDelta ?
+          tr.v.Unit.byName.sizeInBytesDelta_smallerIsBetter :
+          tr.v.Unit.byName.sizeInBytes_smallerIsBetter);
+    };
+  }
+
   test('checkTitleColumn_value', function() {
     var column = new TitleColumn('column_title');
     assert.equal(column.title, 'column_title');
     assert.isFalse(column.supportsCellSelection);
 
-    var row = {title: 'undefined', defined: undefined};
+    var row = {title: 'undefined', contexts: undefined};
     assert.equal(column.formatTitle(row), 'undefined');
     assert.equal(column.value(row), 'undefined');
 
-    var row = {title: 'constant', defined: [true, true, true, true]};
+    var row = {title: 'constant', contexts: [{}, {}, {}, {}]};
     assert.equal(column.formatTitle(row), 'constant');
     assert.equal(column.value(row), 'constant');
 
-    var row = {title: 'added', defined: [false, false, false, true]};
+    var row = {title: 'added', contexts: [undefined, undefined, undefined, {}]};
     assert.equal(column.formatTitle(row), 'added');
     var value = column.value(row);
     assert.equal(value.textContent, '+++\u00A0added');
     assert.equal(value.style.color, 'red');
 
-    var row = {title: 'removed', defined: [true, true, false, false]};
+    var row = {title: 'removed', contexts: [true, true, undefined, undefined]};
     assert.equal(column.formatTitle(row), 'removed');
     var value = column.value(row);
     assert.equal(value.textContent, '---\u00A0removed');
     assert.equal(value.style.color, 'green');
 
-    var row = {title: 'flaky', defined: [true, false, true, true]};
+    var row = {title: 'flaky', contexts: [true, undefined, true, true]};
     assert.equal(column.formatTitle(row), 'flaky');
     var value = column.value(row);
     assert.equal(value.textContent, 'flaky');
     assert.equal(value.style.color, 'purple');
 
-    var row = {title: 'added-flaky', defined: [false, true, false, true]};
+    var row =
+        {title: 'added-flaky', contexts: [undefined, {}, undefined, true]};
     assert.equal(column.formatTitle(row), 'added-flaky');
     var value = column.value(row);
     assert.equal(value.textContent, '+++\u00A0added-flaky');
     assert.equal(value.style.color, 'purple');
 
-    var row = {title: 'removed-flaky', defined: [true, false, true, false]};
+    var row =
+        {title: 'removed-flaky', contexts: [true, undefined, {}, undefined]};
     assert.equal(column.formatTitle(row), 'removed-flaky');
     var value = column.value(row);
     assert.equal(value.textContent, '---\u00A0removed-flaky');
@@ -222,33 +231,15 @@
     assert.isAbove(column.cmp({title: '10'}, {title: '2'}), 0);
   });
 
-  test('checkMemoryColumn_fromRows_defaultColumnConstructorRules', function() {
-    var rows = buildTestRows();
-    var columns =
-        MemoryColumn.fromRows(rows, 'attributes', AggregationMode.DIFF);
-    assert.lengthOf(columns, 4);
+  test('checkMemoryColumn_fromRows', function() {
+    function MockColumn0() {
+      MemoryColumn.apply(this, arguments);
+    }
+    MockColumn0.prototype = {
+      __proto__: MemoryColumn.prototype,
+      get title() { return 'MockColumn0'; }
+    };
 
-    var cpuTemperatureColumn = columns[0];
-    checkMemoryColumn(cpuTemperatureColumn, 'cpu_temperature',
-        'cpu_temperature', 'C', AggregationMode.DIFF,
-        {attributes: {cpu_temperature: 42}}, 42, MemoryColumn);
-
-    var pageSizeColumn = columns[1];
-    checkMemoryColumn(pageSizeColumn, 'page_size', 'page_size', 'bytes',
-        AggregationMode.DIFF, {attributes: {page_size: 'large'}}, 'large',
-        ScalarMemoryColumn);
-
-    var mixedColumn = columns[2];
-    checkMemoryColumn(mixedColumn, 'mixed', 'mixed', undefined /* unitless */,
-        AggregationMode.DIFF, {attributes: {mixed: 89}}, 89, MemoryColumn);
-
-    var mixed2Column = columns[3];
-    checkMemoryColumn(mixed2Column, 'mixed2', 'mixed2',
-        undefined /* unitless */, AggregationMode.DIFF,
-        {attributes: {mixed2: 'invalid'}}, 'invalid', MemoryColumn);
-  });
-
-  test('checkMemoryColumn_fromRows_customColumnConstructorRules', function() {
     function MockColumn1() {
       MemoryColumn.apply(this, arguments);
     }
@@ -267,63 +258,50 @@
 
     var rules = [
       {
-        condition: /size/
-        // columnConstructor intentionally undefined.
+        condition: /size/,
+        importance: 10,
+        columnConstructor: MockColumn0
       },
       {
         condition: 'cpu_temperature',
+        importance: 0,
         columnConstructor: MockColumn1
       },
       {
         condition: 'unmatched',
+        importance: -1,
         get columnConstructor() {
           throw new Error('The constructor should never be retrieved');
         }
       },
       {
-        condition: /d$/,
+        importance: 1,
         columnConstructor: MockColumn2
       }
     ];
 
     var rows = buildTestRows();
-    var columns = MemoryColumn.fromRows(rows, 'attributes', AggregationMode.MAX,
+    var columns = MemoryColumn.fromRows(rows, 'fields', AggregationMode.MAX,
         rules);
     assert.lengthOf(columns, 4);
 
-    var cpuTemperatureColumn = columns[0];
+    var pageSizeColumn = columns[0];
+    checkMemoryColumn(pageSizeColumn, 'page_size', 'MockColumn0',
+        AggregationMode.MAX, {fields: {page_size: 'large'}}, 'large',
+        MockColumn0);
+
+    var mixedColumn = columns[1];
+    checkMemoryColumn(mixedColumn, 'mixed', 'MockColumn2', AggregationMode.MAX,
+        {fields: {mixed: 89}}, 89, MockColumn2);
+
+    var mixed2Column = columns[2];
+    checkMemoryColumn(mixed2Column, 'mixed2', 'MockColumn2',
+        AggregationMode.MAX, {fields: {mixed2: 'invalid'}}, 'invalid',
+        MemoryColumn);
+
+    var cpuTemperatureColumn = columns[3];
     checkMemoryColumn(cpuTemperatureColumn, 'cpu_temperature', 'MockColumn1',
-        'C', AggregationMode.MAX, {attributes: {cpu_temperature: 42}}, 42,
-        MockColumn1);
-
-    var pageSizeColumn = columns[1];
-    checkMemoryColumn(pageSizeColumn, 'page_size', 'page_size', 'bytes',
-        AggregationMode.MAX, {attributes: {page_size: 'large'}}, 'large',
-        ScalarMemoryColumn);
-
-    var mixedColumn = columns[2];
-    checkMemoryColumn(mixedColumn, 'mixed', 'MockColumn2',
-        undefined /* unitless */, AggregationMode.MAX,
-        {attributes: {mixed: 89}}, 89, MockColumn2);
-
-    var mixed2Column = columns[3];
-    checkMemoryColumn(mixed2Column, 'mixed2', 'mixed2',
-        undefined /* unitless */, AggregationMode.MAX,
-        {attributes: {mixed2: 'invalid'}}, 'invalid', MemoryColumn);
-  });
-
-  test('checkMemoryColumn_constructorFromAttributeTraits', function() {
-    // String attribute.
-    assert.strictEqual(MemoryColumn.constructorFromAttributeTraits(
-        {constructor: StringAttribute, units: 'Mbps'}), MemoryColumn);
-
-    // Scalar attribute.
-    assert.strictEqual(MemoryColumn.constructorFromAttributeTraits(
-        {constructor: ScalarAttribute, units: 'bytes'}), ScalarMemoryColumn);
-
-    // Unknown attribute.
-    assert.strictEqual(MemoryColumn.constructorFromAttributeTraits(
-        {constructor: UnknownAttribute, units: undefined}), MemoryColumn);
+        AggregationMode.MAX, {fields: {cpu_temperature: 42}}, 42, MockColumn1);
   });
 
   test('checkMemoryColumn_spaceEqually', function() {
@@ -357,125 +335,90 @@
     checkPercent(columns[1].width, 50);
   });
 
-  test('checkMemoryColumn_sortByImportance', function() {
-    var columns = [
-      new MemoryColumn('page_size', 'bytes', fieldGetter('pgsize')),
-      new MemoryColumn('resident_size', 'bytes', fieldGetter('rss')),
-      new MemoryColumn('object_count', 'objects', fieldGetter('objcount')),
-      new MemoryColumn('proportional_size', 'bytes', fieldGetter('pss'))
-    ];
-
-    var rules = [
-      {
-        condition: 'page_size',
-        importance: 8
-      },
-      {
-        condition: /size/,
-        importance: 10
-      },
-      {
-        importance: 9
-      }
-    ];
-
-    MemoryColumn.sortByImportance(columns, rules);
-
-    assert.lengthOf(columns, 4);
-    assert.equal(columns[0].name, 'proportional_size');
-    assert.equal(columns[1].name, 'resident_size');
-    assert.equal(columns[2].name, 'object_count');
-    assert.equal(columns[3].name, 'page_size');
+  test('checkMemoryColumn_instantiate', function() {
+    var c = new MemoryColumn('test_column', ['x'], AggregationMode.MAX);
+    assert.equal(c.name, 'test_column');
+    assert.equal(c.title, 'test_column');
+    assert.equal(c.cell({x: 95}), 95);
+    assert.isUndefined(c.width);
+    assert.isUndefined(c.color());
   });
 
-  test('checkMemoryColumn_columnNamesToImportanceRules', function() {
-    var columnNames = ['A', 'B', 'C'];
-    var importanceRules =
-        MemoryColumn.columnNamesToImportanceRules(columnNames);
+  test('checkMemoryColumn_cell', function() {
+    var c = new MemoryColumn('test_column', ['a', 'b'], AggregationMode.MAX);
+    var cell = new MemoryCell(undefined);
 
-    assert.lengthOf(importanceRules, 3);
-
-    assert.equal(importanceRules[0].condition, 'A');
-    assert.equal(importanceRules[0].importance, 3);
-    assert.equal(importanceRules[1].condition, 'B');
-    assert.equal(importanceRules[1].importance, 2);
-    assert.equal(importanceRules[2].condition, 'C');
-    assert.equal(importanceRules[2].importance, 1);
+    assert.isUndefined(c.cell(undefined));
+    assert.isUndefined(c.cell({b: cell}));
+    assert.isUndefined(c.cell({a: {c: cell}}));
+    assert.strictEqual(c.cell({a: {b: cell, c: 42}}), cell);
   });
 
   test('checkMemoryColumn_fields', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-    assert.equal(c.name, 'test_column');
-    assert.equal(c.title, 'test_column');
-    assert.equal(c.units, 'ms');
-    assert.equal(c.cell({x: 95}), 95);
-    assert.isUndefined(c.width);
-    assert.isUndefined(c.color);
-  });
-
-  test('checkMemoryColumn_attrs', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
+    var c = new MemoryColumn('test_column', ['x'],
         AggregationMode.MAX);
 
-    // Undefined cell or attribute inside cell.
-    assert.isUndefined(c.attrs({}));
-    assert.isUndefined(c.attrs({x: new MemoryCell(undefined)}));
+    // Undefined cell or field inside cell.
+    assert.isUndefined(c.fields({}));
+    assert.isUndefined(c.fields({x: new MemoryCell(undefined)}));
 
-    // Defined attribute(s) inside cell.
-    var attr1 = new ScalarAttribute('hPa', 1013.25);
-    var attr2 = new ScalarAttribute('hPa', 1065);
-    var row1 = {x: new MemoryCell([attr1])};
-    var row2 = {x: new MemoryCell([attr1, attr2])};
-    assert.deepEqual(c.attrs(row1), [attr1]);
-    assert.deepEqual(c.attrs(row2), [attr1, attr2]);
+    // Defined field(s) inside cell.
+    var field1 = new ScalarNumeric(tr.v.Unit.byName.powerInWatts, 1013.25);
+    var field2 = new ScalarNumeric(tr.v.Unit.byName.powerInWatts, 1065);
+    var row1 = {x: new MemoryCell([field1])};
+    var row2 = {x: new MemoryCell([field1, field2])};
+    assert.deepEqual(c.fields(row1), [field1]);
+    assert.deepEqual(c.fields(row2), [field1, field2]);
   });
 
-  test('checkMemoryColumn_hasAllRelevantAttrsUndefined', function() {
-    // Single attribute.
-    var c1 = new MemoryColumn('single_column', 'ms', fieldGetter('x'),
+  test('checkMemoryColumn_hasAllRelevantFieldsUndefined', function() {
+    // Single field.
+    var c1 = new MemoryColumn('single_column', ['x'],
         undefined /* aggregation mode */);
-    assert.isTrue(c1.hasAllRelevantAttrsUndefined([undefined]));
-    assert.isFalse(c1.hasAllRelevantAttrsUndefined(
-        [new ScalarAttribute('bytes', 16)]));
+    assert.isTrue(c1.hasAllRelevantFieldsUndefined([undefined]));
+    assert.isFalse(c1.hasAllRelevantFieldsUndefined(
+        [new ScalarNumeric(sizeInBytes_smallerIsBetter, 16)]));
 
-    // Multiple attributes, diff aggregation mode.
-    var c2 = new MemoryColumn('diff_column', 'ms', fieldGetter('x'),
+    // Multiple fields, diff aggregation mode.
+    var c2 = new MemoryColumn('diff_column', ['x'],
         AggregationMode.DIFF);
-    assert.isTrue(c2.hasAllRelevantAttrsUndefined([undefined, undefined]));
-    assert.isTrue(c2.hasAllRelevantAttrsUndefined(
+    assert.isTrue(c2.hasAllRelevantFieldsUndefined([undefined, undefined]));
+    assert.isTrue(c2.hasAllRelevantFieldsUndefined(
         [undefined, undefined, undefined]));
-    assert.isTrue(c2.hasAllRelevantAttrsUndefined(
-        [undefined, new ScalarAttribute('bytes', 16), undefined]));
-    assert.isFalse(c2.hasAllRelevantAttrsUndefined(
-        [undefined, new ScalarAttribute('bytes', 32)]));
-    assert.isFalse(c2.hasAllRelevantAttrsUndefined(
-        [new ScalarAttribute('bytes', 32), undefined, undefined]));
-    assert.isFalse(c2.hasAllRelevantAttrsUndefined([
-        new ScalarAttribute('bytes', 16),
+    assert.isTrue(c2.hasAllRelevantFieldsUndefined(
+        [undefined, new ScalarNumeric(sizeInBytes_smallerIsBetter, 16),
+         undefined]));
+    assert.isFalse(c2.hasAllRelevantFieldsUndefined(
+        [undefined, new ScalarNumeric(sizeInBytes_smallerIsBetter, 32)]));
+    assert.isFalse(c2.hasAllRelevantFieldsUndefined(
+        [new ScalarNumeric(sizeInBytes_smallerIsBetter, 32), undefined,
+        undefined]));
+    assert.isFalse(c2.hasAllRelevantFieldsUndefined([
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 16),
         undefined,
-        new ScalarAttribute('bytes', 32)]));
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 32)]));
 
-    // Multiple attributes, max aggregation mode.
-    var c3 = new MemoryColumn('max_column', 'ms', fieldGetter('x'),
+    // Multiple fields, max aggregation mode.
+    var c3 = new MemoryColumn('max_column', ['x'],
         AggregationMode.MAX);
-    assert.isTrue(c3.hasAllRelevantAttrsUndefined([undefined, undefined]));
-    assert.isTrue(c3.hasAllRelevantAttrsUndefined(
+    assert.isTrue(c3.hasAllRelevantFieldsUndefined([undefined, undefined]));
+    assert.isTrue(c3.hasAllRelevantFieldsUndefined(
         [undefined, undefined, undefined]));
-    assert.isFalse(c3.hasAllRelevantAttrsUndefined(
-        [undefined, new ScalarAttribute('bytes', 16), undefined]));
-    assert.isFalse(c3.hasAllRelevantAttrsUndefined(
-        [undefined, new ScalarAttribute('bytes', 32)]));
-    assert.isFalse(c3.hasAllRelevantAttrsUndefined([
-        new ScalarAttribute('bytes', 32),
+    assert.isFalse(c3.hasAllRelevantFieldsUndefined(
+        [undefined, new ScalarNumeric(sizeInBytes_smallerIsBetter, 16),
+         undefined]));
+    assert.isFalse(c3.hasAllRelevantFieldsUndefined(
+        [undefined, new ScalarNumeric(sizeInBytes_smallerIsBetter, 32)]));
+    assert.isFalse(c3.hasAllRelevantFieldsUndefined([
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 32),
         undefined,
-        new ScalarAttribute('bytes', 16)]));
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 16)]));
   });
 
-  test('checkMemoryColumn_value_allAttrsUndefined', function() {
-    var c1 = new MemoryColumn('no_color', 'ms', fieldGetter('x'),
+  test('checkMemoryColumn_value_allFieldsUndefined', function() {
+    var c1 = new MemoryColumn('no_color', ['x'],
         AggregationMode.MAX);
-    var c2 = new MemoryColumn('color', 'ms', fieldGetter('x'),
+    var c2 = new MemoryColumn('color', ['x'],
         AggregationMode.DIFF);
     Object.defineProperty(c2, 'color', {
       get: function() {
@@ -483,6 +426,11 @@
       }
     });
 
+    // Infos should be completely ignored.
+    c1.addInfos = c2.addInfos = function() {
+      throw new Error('This method should never be called');
+    };
+
     [c1, c2].forEach(function(c) {
       assert.equal(c.value({}), '');
       assert.equal(c.value({x: new MemoryCell(undefined)}), '');
@@ -490,284 +438,14 @@
       assert.equal(c.value({x: new MemoryCell([undefined, undefined])}), '');
     });
 
-    // Diff should only take into account the first and last attribute value.
+    // Diff should only take into account the first and last field value.
     assert.equal(c2.value({x: new MemoryCell(
-        [undefined, new ScalarAttribute('bytes', 16), undefined])}), '');
-  });
-
-  test('checkMemoryColumn_value_singleAttribute', function() {
-    var c1 = new MemoryColumn('no_color', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-    var c2 = new MemoryColumn('color_string', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-    c2.color = '#009999';
-    var c3 = new MemoryColumn('color_function', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-    c3.color = function(attrs) {
-      return attrs[0].value > '0' ? 'green' : undefined;
-    };
-
-    // Without infos.
-    var row = {x: buildScalarCell('ms', [123])};
-    var value1 = c1.value(row);
-    assert.equal(value1, '123');
-    this.addHTMLOutput(tr.ui.b.createSpan({textContent: value1}));
-    var value2 = c2.value(row);
-    assert.equal(value2.textContent, '123');
-    assert.equal(value2.style.color, 'rgb(0, 153, 153)');
-    this.addHTMLOutput(value2);
-    var value3 = c3.value(row);
-    assert.equal(value3.textContent, '123');
-    assert.equal(value3.style.color, 'green');
-    this.addHTMLOutput(value3);
-
-    var row = {x: buildScalarCell('ms', [-123])};
-    var value1 = c1.value(row);
-    assert.equal(value1, '-123');
-    this.addHTMLOutput(tr.ui.b.createSpan({textContent: value1}));
-    var value2 = c2.value(row);
-    assert.equal(value2.textContent, '-123');
-    assert.equal(value2.style.color, 'rgb(0, 153, 153)');
-    this.addHTMLOutput(value2);
-    var value3 = c3.value(row);
-    assert.equal(value3, '-123');
-    this.addHTMLOutput(tr.ui.b.createSpan({textContent: value3}));
-
-    // With infos.
-    var checkCellValue = function(value, expectedText, expectedColor) {
-      assert.lengthOf(value.childNodes, 3);
-      assert.equal(value.style.color, expectedColor);
-      assert.equal(value.childNodes[0].textContent, expectedText);
-      assert.equal(value.childNodes[1].textContent, String.fromCharCode(9888));
-      assert.equal(value.childNodes[1].title, 'This value is too cool');
-      assert.equal(value.childNodes[2].textContent, String.fromCharCode(9903));
-      assert.equal(value.childNodes[2].title, 'Source: Test');
-      this.addHTMLOutput(value);
-    }.bind(this);
-
-    var cell = buildStringCell('ms', ['couple']);
-    cell.attrs[0].infos = [
-      new AttributeInfo(AttributeInfoType.WARNING, 'This value is too cool'),
-      new AttributeInfo(AttributeInfoType.LINK, 'Source: Test')
-    ];
-    checkCellValue(c1.value({x: cell}), 'couple', '');
-    checkCellValue(c2.value({x: cell}), 'couple', 'rgb(0, 153, 153)');
-    checkCellValue(c3.value({x: cell}), 'couple', 'green');
-
-    var cell = buildStringCell('ms', ['-couple']);
-    cell.attrs[0].infos = [
-      new AttributeInfo(AttributeInfoType.WARNING, 'This value is too cool'),
-      new AttributeInfo(AttributeInfoType.LINK, 'Source: Test')
-    ];
-    checkCellValue(c1.value({x: cell}), '-couple', '');
-    checkCellValue(c2.value({x: cell}), '-couple', 'rgb(0, 153, 153)');
-    checkCellValue(c3.value({x: cell}), '-couple', '');
-  });
-
-  test('checkMemoryColumn_value_multipleAttributes', function() {
-    var c1 = new MemoryColumn('test_column1', 'ms', fieldGetter('x'),
-        undefined /* aggregation mode */);
-    var c2 = new MemoryColumn('test_column2', 'ms', fieldGetter('x'),
-        AggregationMode.DIFF);
-    c2.color = '#009999';
-    var c3 = new MemoryColumn('test_column3', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-    c3.color = function(attrs) {
-      return attrs.length % 2 === 1 ? 'green' : undefined;
-    };
-
-    var cell = buildStringCell('ms', ['old', 'new']);
-    var value1 = c1.value({x: cell});
-    assert.equal(value1.style.color, '');
-    assert.equal(value1.textContent, '(unsupported aggregation mode)');
-    this.addHTMLOutput(value1);
-    var value2 = c2.value({x: cell});
-    assert.equal(value2.style.color, 'rgb(0, 153, 153)');
-    assert.equal(value2.textContent, 'old \u2192 new');
-    this.addHTMLOutput(value2);
-    var value3 = c3.value({x: cell});
-    assert.equal(value3, 'old');
-
-    var cell = buildStringCell('ms', ['old', undefined, 'new']);
-    var value1 = c1.value({x: cell});
-    assert.equal(value1.style.color, '');
-    assert.equal(value1.textContent, '(unsupported aggregation mode)');
-    this.addHTMLOutput(value1);
-    var value2 = c2.value({x: cell});
-    assert.equal(value2.style.color, 'rgb(0, 153, 153)');
-    assert.equal(value2.textContent, 'old \u2192 new');
-    this.addHTMLOutput(value2);
-    var value3 = c3.value({x: cell});
-    assert.equal(value3.style.color, 'green');
-    assert.equal(value3.textContent, 'old');
-    this.addHTMLOutput(value3);
-  });
-
-  test('checkMemoryColumn_formatSingleAttributeValue', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        undefined /* aggregation mode */);
-
-    assert.equal(c.formatSingleAttributeValue(
-        new ScalarAttribute('bytes', 1024)), '1024');
-    assert.equal(c.formatSingleAttributeValue(
-        new StringAttribute('ms', '~10')), '~10');
-  });
-
-  test('checkMemoryColumn_formatMultipleAttributeValues_diff', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.DIFF);
-
-    // Added value.
-    checkMemoryColumnValueFormat(this, c, [undefined, 'few'], '+few', 'red');
-    checkMemoryColumnValueFormat(this, c, [undefined, 64, 32], '+32', 'red');
-
-    // Removed value.
-    checkMemoryColumnValueFormat(this, c, ['00', undefined], '-00', 'green');
-    checkMemoryColumnValueFormat(this, c, [1, undefined, 2, undefined], '-1',
-        'green');
-
-    // Identical values.
-    checkMemoryColumnValueFormat(this, c, ['Unchanged', 'Unchanged'],
-        'Unchanged', undefined /* unchanged color (not an HTML element) */);
-    checkMemoryColumnValueFormat(this, c, [16, 32, undefined, 64, 16], '16',
-        undefined /* unchanged color (not an HTML element) */);
-
-    // Different values.
-    checkMemoryColumnValueFormat(this, c, ['A', 'C', undefined, 'C', 'B'],
-        'A \u2192 B', 'darkorange');
-    checkMemoryColumnValueFormat(this, c, [16, undefined, 64], '16 \u2192 64',
-        'darkorange');
-  });
-
-  test('checkMemoryColumn_formatMultipleAttributeValues_max', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-
-    // Different values.
-    checkMemoryColumnValueFormat(this, c, ['A', 'B', 'A'], 'B',
-        undefined /* unchanged color (not an HTML element) */);
-    checkMemoryColumnValueFormat(this, c, [16, 16, undefined, 17], '17',
-        undefined /* unchanged color (not an HTML element) */);
-
-    // Identical values.
-    checkMemoryColumnValueFormat(this, c, ['X', 'X'], 'X',
-        undefined /* unchanged color (not an HTML element) */);
-    checkMemoryColumnValueFormat(this, c, [7, undefined, 7, undefined, 7], '7',
-        undefined /* unchanged color (not an HTML element) */);
-  });
-
-  test('checkMemoryColumn_compareSingleAttributes', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        undefined /* aggregation mode */);
-
-    assert.isBelow(c.compareSingleAttributes(new ScalarAttribute('bytes', 2),
-        new ScalarAttribute('bytes', 10)), 0);
-    assert.equal(c.compareSingleAttributes(new StringAttribute('', 'equal'),
-        new StringAttribute('', 'equal')), 0);
-    assert.isAbove(c.compareSingleAttributes(new StringAttribute('ms', '100'),
-        new StringAttribute('ms', '99')), 0);
-  });
-
-  test('checkMemoryColumn_compareMultipleAttributes_diff', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.DIFF);
-
-    // One attribute was added.
-    checkCompareAttributesLess(c, [-10, 10], [undefined, 5]);
-    checkCompareAttributesLess(c,
-        [-100, undefined, undefined], [undefined, 4, 5]);
-    checkCompareAttributesLess(c,
-        [1, 2, 3, 4], [undefined, 'x', undefined, 'y']);
-
-    // Both attributes were added.
-    checkCompareAttributesEqual(c,
-        [undefined, 'C', undefined, 'A'], [undefined, 'B', 'D', 'A']);
-    checkCompareAttributesLess(c, [undefined, 1], [undefined, 2]);
-    checkCompareAttributesLess(c, [undefined, 6, 3], [undefined, 5, 4]);
-
-    // One attribute was removed (neither was added).
-    checkCompareAttributesLess(c, ['B', undefined], ['A', 'A']);
-    checkCompareAttributesLess(c,
-        [5, undefined, undefined], [undefined, -5, -10]);
-
-    // Both attributes were removed (neither was added)
-    checkCompareAttributesEqual(c, ['T', 'A', undefined, undefined],
-        ['T', 'B', 'C', undefined]);
-    checkCompareAttributesLess(c, [5, undefined], [4, undefined]);
-
-    // Neither attribute was added or removed.
-    checkCompareAttributesLess(c, ['BB', 'BB'], ['AA', 'CC']);
-    checkCompareAttributesEqual(c, [7, 8, 9], [6, 9, 10]);
-    checkCompareAttributesEqual(c, [5, undefined, 5], [4, 3, 4]);
-  });
-
-  test('checkMemoryColumn_compareMultipleAttributes_max', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-
-    // At least one attribute has multiple values.
-    checkCompareAttributesEqual(c, [0, 1, 3], [1, 3, 2]);
-    checkCompareAttributesLess(c, ['4', undefined, '4'], ['3', '4', '5']);
-    checkCompareAttributesLess(c, [3, 3, 3], [9, undefined, 10]);
-
-    // Both attributes have single values.
-    checkCompareAttributesEqual(c,
-        [undefined, 'ttt', undefined], ['ttt', 'ttt', undefined]);
-    checkCompareAttributesLess(c, [undefined, -1, undefined], [-2, -2, -2]);
-    checkCompareAttributesLess(c, ['Q', 'Q', undefined], ['X', undefined, 'X']);
-  });
-
-  test('checkMemoryColumn_cmp', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.DIFF);
-
-    // Cell (or the associated attribute) undefined in one or both rows.
-    assert.equal(c.cmp({}, {y: new MemoryCell([undefined])}), 0);
-    assert.equal(c.cmp({x: new MemoryCell(undefined)}, {}), 0);
-    assert.equal(c.cmp({x: new MemoryCell([undefined, undefined])}, {}), 0);
-    assert.isAbove(c.cmp({x: buildStringCell('ms', ['negative'])}, {}), 0);
-    assert.isAbove(c.cmp({x: buildStringCell('ms', ['negative'])},
-        {x: new MemoryCell([undefined])}), 0);
-    assert.isBelow(c.cmp({}, {x: buildStringCell('ms', ['positive'])}), 0);
-    assert.isBelow(c.cmp({x: new MemoryCell(undefined)},
-        {x: buildStringCell('ms', ['positive'])}), 0);
-
-    // Single attribute.
-    assert.equal(c.cmp({x: buildStringCell('t', ['equal'])},
-        {x: buildStringCell('t', ['equal'])}), 0);
-    assert.isAbove(c.cmp({x: buildStringCell('u', ['bigger'])},
-        {x: buildStringCell('u', ['BIG'])}), 0);
-    assert.isBelow(c.cmp({x: buildStringCell('v', ['small'])},
-        {x: buildStringCell('v', ['smaLL'])}), 0);
-
-    // Multiple attributes.
-    assert.isBelow(c.cmp(
-        {x: buildStringCell('', ['MemoryColumn', 'supports*', undefined])},
-        {x: buildStringCell('', ['comparing', 'multiple', 'values :-)'])}), 0);
-  });
-
-  test('checkMemoryColumn_getInfos', function() {
-    var c = new MemoryColumn('test_column', 'ms', fieldGetter('x'),
-        AggregationMode.MAX);
-
-    var info = new AttributeInfo(AttributeInfoType.INFORMATION, 'interesting');
-    var warning = new AttributeInfo(AttributeInfoType.WARNING, 'thrilling');
-    var error = new AttributeInfo(AttributeInfoType.ERROR, 'inconceivable');
-
-    var attr1 = new ScalarAttribute('bytes', 64);
-    attr1.infos.push(info);
-    attr1.infos.push(warning);
-    var attr2 = new ScalarAttribute('bytes', 128);
-    var attr3 = new ScalarAttribute('bytes', 256);
-    attr3.infos.push(error);
-
-    assert.deepEqual(c.getInfos([attr1]), [info, warning]);
-    assert.deepEqual(c.getInfos([attr1, attr2]), []);
-    assert.deepEqual(c.getInfos([attr1, attr2, attr3]), []);
+        [undefined, new ScalarNumeric(sizeInBytes_smallerIsBetter, 16),
+         undefined])}), '');
   });
 
   test('checkMemoryColumn_getImportance', function() {
-    var c = new ScalarMemoryColumn('test_column', 'bytes', fieldGetter('x'));
+    var c = new NumericMemoryColumn('test_column', ['x']);
 
     var rules1 = [];
     assert.equal(c.getImportance(rules1), 0);
@@ -809,7 +487,7 @@
   });
 
   test('checkMemoryColumn_nameMatchesCondition', function() {
-    var c = new ScalarMemoryColumn('test_column', 'bytes', fieldGetter('x'));
+    var c = new NumericMemoryColumn('test_column', ['x']);
 
     assert.isTrue(MemoryColumn.nameMatchesCondition('test_column', undefined));
 
@@ -825,202 +503,465 @@
     assert.isFalse(MemoryColumn.nameMatchesCondition('test_column', /test$/));
   });
 
-  test('checkScalarMemoryColumn_value', function() {
-    var c = new ScalarMemoryColumn('test_column', 'bytes', fieldGetter('x'),
-        AggregationMode.DIFF);
-    c.color = '#009999';
+  test('checkStringMemoryColumn_value_singleField', function() {
+    var c = new StringMemoryColumn('', ['x'], AggregationMode.MAX);
+    c.color = function(fields, contexts) {
+      if (fields[0] < '0')
+        return 'green';
+      else if (contexts && contexts[0] % 2 === 0)
+        return 'red';
+      else
+        return undefined;
+    };
 
-    // Undefined attribute values.
-    var cell = buildScalarCell('bytes', [undefined, 1, undefined]);
-    var value = c.value({x: cell});
-    assert.equal(value, '');
-
-    // Single attribute value.
-    var cell = buildScalarCell('bytes', [5.4975581e13 /* 50 TiB */]);
-    cell.attrs[0].infos = [
-      new AttributeInfo(AttributeInfoType.WARNING, 'This value is too cool'),
-      new AttributeInfo(AttributeInfoType.LINK, 'Source: Test')
+    var infos1 = [{ icon: '\u{1F648}', message: 'Some info', color: 'blue' }];
+    var infos2 = [
+      { icon: '\u{1F649}', message: 'Start', color: 'cyan' },
+      { icon: '\u{1F64A}', message: 'Stop' }
     ];
-    var value = c.value({x: cell});
-    assert.lengthOf(value.childNodes, 3);
-    assert.equal(value.style.color, 'rgb(0, 153, 153)');
-    assert.equal(value.childNodes[0].tagName, 'TR-UI-U-SCALAR-SPAN');
-    assert.equal(value.childNodes[0].value, 5.4975581e13);
-    assert.equal(value.childNodes[0].unit, tr.b.u.Units.sizeInBytes);
-    assert.equal(value.childNodes[1].textContent, String.fromCharCode(9888));
-    assert.equal(value.childNodes[1].title, 'This value is too cool');
-    assert.equal(value.childNodes[2].textContent, String.fromCharCode(9903));
-    assert.equal(value.childNodes[2].title, 'Source: Test');
-    this.addHTMLOutput(value);
+    c.addInfos = function(fields, contexts, infos) {
+      if (fields[0] < '0')
+        infos.push.apply(infos, infos1);
+      else if (contexts && contexts[0] % 2 === 0)
+        infos.push.apply(infos, infos2);
+    };
 
-    // Multiple attribute values.
-    var cell = buildScalarCell('bytes',
-        [5.4975581e13 /* 50 TiB */, undefined, 2.1990233e13 /* 20 TiB */]);
-    cell.attrs[0].infos = [
-      new AttributeInfo(AttributeInfoType.ERROR, 'This info is invisible')
-    ];
-    var value = c.value({x: cell});
-    assert.lengthOf(value.childNodes, 1);
-    assert.equal(value.style.color, 'rgb(0, 153, 153)');
-    assert.lengthOf(value.childNodes[0].childNodes, 1);
-    assert.equal(value.childNodes[0].style.color, 'green');
-    assert.equal(value.childNodes[0].childNodes[0].tagName,
-        'TR-UI-U-SCALAR-SPAN');
-    assert.equal(value.childNodes[0].childNodes[0].value, -3.2985348e13);
-    assert.equal(value.childNodes[0].childNodes[0].unit,
-        tr.b.u.Units.sizeInBytes);
-    assert.equal(value.childNodes[0].childNodes[0].isDelta, true);
-    this.addHTMLOutput(value);
+    var row = {x: new MemoryCell(['123'])};
+    assert.strictEqual(c.value(row), '123');
+
+    var row = {x: new MemoryCell(['-123']), contexts: [undefined]};
+    checkCellValue(this, c.value(row), '-123', 'green', infos1);
+
+    var row = {x: new MemoryCell(['123']), contexts: [42]};
+    checkCellValue(this, c.value(row), '123', 'red', infos2);
   });
 
-  test('checkScalarMemoryColumn_formatSingleAttributeValue', function() {
-    var c = new ScalarMemoryColumn('non_bytes_column', 'ms', fieldGetter('x'),
+  test('checkStringMemoryColumn_value_multipleFields', function() {
+    var c1 = new StringMemoryColumn('test_column1', ['x'],
         undefined /* aggregation mode */);
-    var value = c.formatSingleAttributeValue(new ScalarAttribute('ms', 123));
-    assert.equal(value.tagName, 'TR-UI-U-SCALAR-SPAN');
+    var c2 = new StringMemoryColumn('test_column2', ['x'],
+        AggregationMode.DIFF);
+    c2.color = function(fields, contexts) {
+      return '#009999';
+    };
+    var c3 = new StringMemoryColumn('test_column3', ['x'],
+        AggregationMode.MAX);
+    c3.color = function(fields, contexts) {
+      if (fields[0] < '0')
+        return 'green';
+      else if (contexts && contexts[contexts.length - 1] % 2 === 0)
+        return 'red';
+      else
+        return undefined;
+    };
+
+    var infos1 = [{ icon: '\u{1F648}', message: 'Some info', color: 'blue' }];
+    var infos2 = [
+      { icon: '\u{1F649}', message: 'Start', color: 'cyan' },
+      { icon: '\u{1F64A}', message: 'Stop' }
+    ];
+    c1.addInfos = c2.addInfos = c3.addInfos =
+        function(fields, contexts, infos) {
+      if (fields[0] < '0')
+        infos.push.apply(infos, infos1);
+      else if (contexts && contexts[contexts.length - 1] % 2 === 0)
+        infos.push.apply(infos, infos2);
+    };
+
+    var row = {x: new MemoryCell(['123', '456'])};
+    checkCellValue(this, c1.value(row), '(unsupported aggregation mode)', '');
+    checkCellValue(this, c2.value(row), '123 \u2192 456', 'rgb(0, 153, 153)');
+    assert.strictEqual(c3.value(row), '456');
+
+    var row = {
+      x: new MemoryCell(['-123', undefined, '+123']),
+      contexts: [12, 14, undefined]
+    };
+    checkCellValue(this, c1.value(row), '(unsupported aggregation mode)', '',
+        infos1);
+    checkCellValue(this, c2.value(row), '-123 \u2192 +123', 'rgb(0, 153, 153)',
+        infos1);
+    checkCellValue(this, c3.value(row), '+123', 'green', infos1);
+
+    var row = {
+      x: new MemoryCell(['123', undefined, '456']),
+      contexts: [31, 7, -2]
+    };
+    checkCellValue(this, c1.value(row), '(unsupported aggregation mode)', '',
+        infos2);
+    checkCellValue(this, c2.value(row), '123 \u2192 456', 'rgb(0, 153, 153)',
+        infos2);
+    checkCellValue(this, c3.value(row), '456', 'red', infos2);
+  });
+
+  test('checkStringMemoryColumn_formatSingleField', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        undefined /* aggregation mode */);
+
+    assert.equal(c.formatSingleField('1024'), '1024');
+    assert.equal(c.formatSingleField('~10'), '~10');
+  });
+
+  test('checkStringMemoryColumn_formatMultipleFields_diff', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        AggregationMode.DIFF);
+
+    // Added value.
+    checkMemoryColumnFieldFormat(this, c, [undefined, 'few'], '+few', 'red');
+    checkMemoryColumnFieldFormat(this, c, [undefined, 64, 32], '+32', 'red');
+
+    // Removed value.
+    checkMemoryColumnFieldFormat(this, c, ['00', undefined], '-00', 'green');
+    checkMemoryColumnFieldFormat(this, c, [1, undefined, 2, undefined], '-1',
+        'green');
+
+    // Identical values.
+    checkMemoryColumnFieldFormat(this, c, ['Unchanged', 'Unchanged'],
+        'Unchanged', undefined /* unchanged color (not an HTML element) */);
+    checkMemoryColumnFieldFormat(this, c, [16, 32, undefined, 64, 16], '16',
+        undefined /* unchanged color (not an HTML element) */);
+
+    // Different values.
+    checkMemoryColumnFieldFormat(this, c, ['A', 'C', undefined, 'C', 'B'],
+        'A \u2192 B', 'darkorange');
+    checkMemoryColumnFieldFormat(this, c, [16, undefined, 64], '16 \u2192 64',
+        'darkorange');
+  });
+
+  test('checkStringMemoryColumn_formatMultipleFields_max', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        AggregationMode.MAX);
+
+    // Different values.
+    checkMemoryColumnFieldFormat(this, c, ['A', 'B', 'A'], 'B',
+        undefined /* unchanged color (not an HTML element) */);
+    checkMemoryColumnFieldFormat(this, c, [16, 16, undefined, 17], '17',
+        undefined /* unchanged color (not an HTML element) */);
+
+    // Identical values.
+    checkMemoryColumnFieldFormat(this, c, ['X', 'X'], 'X',
+        undefined /* unchanged color (not an HTML element) */);
+    checkMemoryColumnFieldFormat(this, c, [7, undefined, 7, undefined, 7], '7',
+        undefined /* unchanged color (not an HTML element) */);
+  });
+
+  test('checkStringMemoryColumn_compareSingleFields', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        undefined /* aggregation mode */);
+
+    assert.isBelow(c.compareSingleFields(
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 2),
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 10)), 0);
+    assert.equal(c.compareSingleFields('equal', 'equal'), 0);
+    assert.isAbove(c.compareSingleFields('100', '99'), 0);
+  });
+
+  test('checkStringMemoryColumn_compareMultipleFields_diff', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        AggregationMode.DIFF);
+
+    // One field was added.
+    checkCompareFieldsLess(c, [-10, 10], [undefined, 5]);
+    checkCompareFieldsLess(c,
+        [-100, undefined, undefined], [undefined, 4, 5]);
+    checkCompareFieldsLess(c,
+        [1, 2, 3, 4], [undefined, 'x', undefined, 'y']);
+
+    // Both fields were added.
+    checkCompareFieldsEqual(c,
+        [undefined, 'C', undefined, 'A'], [undefined, 'B', 'D', 'A']);
+    checkCompareFieldsLess(c, [undefined, 1], [undefined, 2]);
+    checkCompareFieldsLess(c, [undefined, 6, 3], [undefined, 5, 4]);
+
+    // One field was removed (neither was added).
+    checkCompareFieldsLess(c, ['B', undefined], ['A', 'A']);
+    checkCompareFieldsLess(c,
+        [5, undefined, undefined], [undefined, -5, -10]);
+
+    // Both fields were removed (neither was added)
+    checkCompareFieldsEqual(c, ['T', 'A', undefined, undefined],
+        ['T', 'B', 'C', undefined]);
+    checkCompareFieldsLess(c, [5, undefined], [4, undefined]);
+
+    // Neither field was added or removed.
+    checkCompareFieldsLess(c, ['BB', 'BB'], ['AA', 'CC']);
+    checkCompareFieldsEqual(c, [7, 8, 9], [6, 9, 10]);
+    checkCompareFieldsEqual(c, [5, undefined, 5], [4, 3, 4]);
+  });
+
+  test('checkStringMemoryColumn_compareMultipleFields_max', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        AggregationMode.MAX);
+
+    // At least one field has multiple values.
+    checkCompareFieldsEqual(c, [0, 1, 3], [1, 3, 2]);
+    checkCompareFieldsLess(c, ['4', undefined, '4'], ['3', '4', '5']);
+    checkCompareFieldsLess(c, [3, 3, 3], [9, undefined, 10]);
+
+    // Both fields have single values.
+    checkCompareFieldsEqual(c,
+        [undefined, 'ttt', undefined], ['ttt', 'ttt', undefined]);
+    checkCompareFieldsLess(c, [undefined, -1, undefined], [-2, -2, -2]);
+    checkCompareFieldsLess(c, ['Q', 'Q', undefined], ['X', undefined, 'X']);
+  });
+
+  test('checkStringMemoryColumn_cmp', function() {
+    var c = new StringMemoryColumn('test_column', ['x'],
+        AggregationMode.DIFF);
+
+    // Cell (or the associated field) undefined in one or both rows.
+    assert.equal(c.cmp({}, {y: new MemoryCell([undefined])}), 0);
+    assert.equal(c.cmp({x: new MemoryCell(undefined)}, {}), 0);
+    assert.equal(c.cmp({x: new MemoryCell([undefined, undefined])}, {}), 0);
+    assert.isAbove(c.cmp({x: new MemoryCell(['negative'])}, {}), 0);
+    assert.isAbove(c.cmp({x: new MemoryCell(['negative'])},
+        {x: new MemoryCell([undefined])}), 0);
+    assert.isBelow(c.cmp({}, {x: new MemoryCell(['positive'])}), 0);
+    assert.isBelow(c.cmp({x: new MemoryCell(undefined)},
+        {x: new MemoryCell(['positive'])}), 0);
+
+    // Single field.
+    assert.equal(c.cmp({x: new MemoryCell(['equal'])},
+        {x: new MemoryCell(['equal'])}), 0);
+    assert.isAbove(c.cmp({x: new MemoryCell(['bigger'])},
+        {x: new MemoryCell(['BIG'])}), 0);
+    assert.isBelow(c.cmp({x: new MemoryCell(['small'])},
+        {x: new MemoryCell(['smaLL'])}), 0);
+
+    // Multiple fields.
+    assert.isBelow(c.cmp(
+        {x: new MemoryCell(['MemoryColumn', 'supports*', undefined])},
+        {x: new MemoryCell(['comparing', 'multiple', 'values :-)'])}), 0);
+  });
+
+  test('checkNumericMemoryColumn_value', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
+        AggregationMode.DIFF);
+    c.color = function(fields, contexts) {
+      return '#009999';
+    };
+    var infos1 = [createWarningInfo('Attention!')];
+    c.addInfos = function(fields, contexts, infos) {
+      infos.push.apply(infos, infos1);
+    };
+
+    // Undefined field values.
+    var row = {x: buildScalarCell(sizeInBytes_smallerIsBetter,
+        [undefined, 1, undefined])};
+    assert.equal(c.value(row), '');
+
+    // Single field value.
+    var row = {x: buildScalarCell(sizeInBytes_smallerIsBetter,
+        [5.4975581e13 /* 50 TiB */])};
+    checkCellValue(this, c.value(row), sizeSpanMatcher(5.4975581e13),
+        'rgb(0, 153, 153)', infos1);
+
+    // Multiple field values.
+    var row = {
+      x: buildScalarCell(sizeInBytes_smallerIsBetter,
+          [5.4975581e13 /* 50 TiB */, undefined, 2.1990233e13 /* 20 TiB */])
+    };
+    checkCellValue(this, c.value(row),
+        sizeSpanMatcher(-3.2985348e13, true /* opt_expectedIsDelta */),
+        'rgb(0, 153, 153)', infos1);
+  });
+
+  test('checkNumericMemoryColumn_formatSingleField', function() {
+    var c = new NumericMemoryColumn('non_bytes_column', ['x'],
+        undefined /* aggregation mode */);
+    var value = c.formatSingleField(new ScalarNumeric(
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, 123));
+    assert.equal(value.tagName, 'TR-V-UI-SCALAR-SPAN');
     assert.equal(value.value, 123);
-    assert.equal(value.unit, tr.b.u.Units.unitlessNumber);
+    assert.equal(value.unit, tr.v.Unit.byName.unitlessNumber_smallerIsBetter);
     this.addHTMLOutput(value);
 
-    var c = new ScalarMemoryColumn('bytes_column', 'bytes', fieldGetter('x'),
+    var c = new NumericMemoryColumn('bytes_column', ['x'],
         undefined /* aggregation mode */);
-    var value = c.formatSingleAttributeValue(new ScalarAttribute('bytes', 456));
-    assert.equal(value.tagName, 'TR-UI-U-SCALAR-SPAN');
+    var value = c.formatSingleField(new ScalarNumeric(
+        sizeInBytes_smallerIsBetter, 456));
+    assert.equal(value.tagName, 'TR-V-UI-SCALAR-SPAN');
     assert.equal(value.value, 456);
-    assert.equal(value.unit, tr.b.u.Units.sizeInBytes);
+    assert.equal(value.unit, tr.v.Unit.byName.sizeInBytes_smallerIsBetter);
     this.addHTMLOutput(value);
   });
 
-  test('checkScalarMemoryColumn_formatMultipleAttributeValues_diff',
+  test('checkNumericMemoryColumn_formatMultipleFields_diff',
       function() {
-    var c = new ScalarMemoryColumn('non_bytes_column', 'ms', fieldGetter('x'),
+    var c = new NumericMemoryColumn('non_bytes_column', ['x'],
         AggregationMode.DIFF);
-    checkScalarMemoryColumnValueFormat(this, c, [1, 2, 3], 2,
-        tr.b.u.Units.unitlessNumber, true, 'red');
-    checkScalarMemoryColumnValueFormat(this, c, [10, undefined], -10,
-        tr.b.u.Units.unitlessNumber, true, 'green');
-    checkScalarMemoryColumnValueFormat(this, c, [undefined, 60, 0], 0,
-        tr.b.u.Units.unitlessNumber, true);
+    checkNumericMemoryColumnFieldFormat(this, c, [1, 2, 3],
+        tr.v.Unit.byName.unitlessNumberDelta_smallerIsBetter, 2);
+    checkNumericMemoryColumnFieldFormat(this, c, [10, undefined],
+        tr.v.Unit.byName.unitlessNumberDelta_smallerIsBetter, -10);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 60, 0],
+        tr.v.Unit.byName.unitlessNumberDelta_smallerIsBetter, 0);
+    checkNumericMemoryColumnFieldFormat(
+        this, c, [2.71828, 2.71829] /* diff within epsilon */,
+        tr.v.Unit.byName.unitlessNumberDelta_smallerIsBetter, 0);
 
-    var c = new ScalarMemoryColumn('bytes_column', 'bytes', fieldGetter('x'),
+    var c = new NumericMemoryColumn('bytes_column', ['x'],
         AggregationMode.DIFF);
-    checkScalarMemoryColumnValueFormat(this, c, [1, 2, 3], 2,
-        tr.b.u.Units.sizeInBytes, true, 'red');
-    checkScalarMemoryColumnValueFormat(this, c, [10, undefined], -10,
-        tr.b.u.Units.sizeInBytes, true, 'green');
-    checkScalarMemoryColumnValueFormat(this, c, [undefined, 60, 0], 0,
-        tr.b.u.Units.sizeInBytes, true, undefined);
+    checkNumericMemoryColumnFieldFormat(this, c, [1, 2, 3],
+        tr.v.Unit.byName.sizeInBytesDelta_smallerIsBetter, 2);
+    checkNumericMemoryColumnFieldFormat(this, c, [10, undefined],
+        tr.v.Unit.byName.sizeInBytesDelta_smallerIsBetter, -10);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 60, 0],
+        tr.v.Unit.byName.sizeInBytesDelta_smallerIsBetter, 0);
+    checkNumericMemoryColumnFieldFormat(
+        this, c, [1.41421, 1.41422] /* diff within epsilon */,
+        tr.v.Unit.byName.sizeInBytesDelta_smallerIsBetter, 0);
   });
 
-  test('checkScalarMemoryColumn_formatMultipleAttributeValues_max',
+  test('checkNumericMemoryColumn_formatMultipleFields_max',
       function() {
-    var c = new ScalarMemoryColumn('non_bytes_column', 'ms', fieldGetter('x'),
+    var c = new NumericMemoryColumn('non_bytes_column', ['x'],
         AggregationMode.MAX);
-    checkScalarMemoryColumnValueFormat(this, c, [1, 2, 3], 3,
-        tr.b.u.Units.unitlessNumber, false);
-    checkScalarMemoryColumnValueFormat(this, c, [10, undefined], 10,
-        tr.b.u.Units.unitlessNumber, false);
-    checkScalarMemoryColumnValueFormat(this, c, [undefined, 60, 0], 60,
-        tr.b.u.Units.unitlessNumber, false);
-    checkScalarMemoryColumnValueFormat(this, c,
-        [undefined, 10, 20, undefined], 20, tr.b.u.Units.unitlessNumber, false);
+    checkNumericMemoryColumnFieldFormat(this, c, [1, 2, 3],
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, 3);
+    checkNumericMemoryColumnFieldFormat(this, c, [10, undefined],
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, 10);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 60, 0],
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, 60);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 10, 20, undefined],
+        tr.v.Unit.byName.unitlessNumber_smallerIsBetter, 20);
 
-    var c = new ScalarMemoryColumn('bytes_column', 'bytes', fieldGetter('x'),
+    var c = new NumericMemoryColumn('bytes_column', ['x'],
         AggregationMode.MAX);
-    checkScalarMemoryColumnValueFormat(this, c, [1, 2, 3], 3,
-        tr.b.u.Units.sizeInBytes, false);
-    checkScalarMemoryColumnValueFormat(this, c, [10, undefined], 10,
-        tr.b.u.Units.sizeInBytes, false);
-    checkScalarMemoryColumnValueFormat(this, c, [undefined, 60, 0], 60,
-        tr.b.u.Units.sizeInBytes, false);
-    checkScalarMemoryColumnValueFormat(this, c,
-        [undefined, 10, 20, undefined], 20, tr.b.u.Units.sizeInBytes, false);
+    checkNumericMemoryColumnFieldFormat(this, c, [1, 2, 3],
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, 3);
+    checkNumericMemoryColumnFieldFormat(this, c, [10, undefined],
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, 10);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 60, 0],
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, 60);
+    checkNumericMemoryColumnFieldFormat(this, c, [undefined, 10, 20, undefined],
+        tr.v.Unit.byName.sizeInBytes_smallerIsBetter, 20);
   });
 
-  test('checkScalarMemoryColumn_cmp', function() {
-    var c = new ScalarMemoryColumn('test_column', 'bytes', fieldGetter('x'),
+  test('checkNumericMemoryColumn_cmp', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
         AggregationMode.DIFF);
 
-    // Undefined attribute values.
-    assert.isAbove(c.cmp({x: buildScalarCell('bytes', [-9999999999])},
+    // Undefined field values.
+    assert.isAbove(c.cmp({x: buildScalarCell(sizeInBytes_smallerIsBetter,
+        [-9999999999])},
         {x: undefined}), 0);
     assert.isBelow(c.cmp({x: new MemoryCell(undefined)},
-        {x: buildScalarCell('bytes', [748, 749])}), 0);
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter, [748, 749])}), 0);
     assert.equal(c.cmp({},
-        {x: buildScalarCell('bytes', [undefined, undefined])}), 0);
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter,
+            [undefined, undefined])}), 0);
 
-    // Single attribute value.
-    assert.isBelow(c.cmp({x: buildScalarCell('bytes', [16384])},
-        {x: buildScalarCell('bytes', [32768])}), 0);
+    // Single field value.
+    assert.isBelow(c.cmp(
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter, [16384])},
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter, [32768])}), 0);
 
-    // Multiple attribute values.
-    assert.equal(c.cmp({x: buildScalarCell('bytes', [999, undefined, 1001])},
-        {x: buildScalarCell('bytes', [undefined, 5, 2])}), 0);
+    // Multiple field values.
+    assert.equal(c.cmp(
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter,
+            [999, undefined, 1001])},
+        {x: buildScalarCell(sizeInBytes_smallerIsBetter,
+            [undefined, 5, 2])}), 0);
   });
 
-  test('checkScalarMemoryColumn_compareSingleAttributes', function() {
-    var c = new ScalarMemoryColumn('test_column', 'ms', fieldGetter('x'),
+  test('checkNumericMemoryColumn_compareSingleFields', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
         undefined /* aggregation mode */);
 
-    assert.isBelow(c.compareSingleAttributes(new ScalarAttribute('ms', 99),
-        new ScalarAttribute('ms', 100)), 0);
-    assert.equal(c.compareSingleAttributes(new ScalarAttribute('', 0xEEE),
-        new ScalarAttribute('', 0xEEE)), 0);
-    assert.isAbove(c.compareSingleAttributes(new ScalarAttribute('bytes', 10),
-        new ScalarAttribute('bytes', 2)), 0);
+    assert.isBelow(c.compareSingleFields(
+        new ScalarNumeric(
+            tr.v.Unit.byName.timeDurationInMs_smallerIsBetter, 99),
+        new ScalarNumeric(
+            tr.v.Unit.byName.timeDurationInMs_smallerIsBetter, 100)), 0);
+    assert.equal(c.compareSingleFields(
+        new ScalarNumeric(tr.v.Unit.byName.unitlessNumber, 0xEEE),
+        new ScalarNumeric(tr.v.Unit.byName.unitlessNumber, 0xEEE)), 0);
+    assert.isAbove(c.compareSingleFields(
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 10),
+        new ScalarNumeric(sizeInBytes_smallerIsBetter, 2)), 0);
   });
 
-  test('checkScalarMemoryColumn_compareMultipleAttributes_diff', function() {
-    var c = new ScalarMemoryColumn('test_column', 'ms', fieldGetter('x'),
+  test('checkNumericMemoryColumn_compareMultipleFields_diff', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
         AggregationMode.DIFF);
 
-    assert.isBelow(c.compareMultipleAttributes(
-        buildScalarCell('bytes', [10000, 10001, 10002] /* diff +2 */).attrs,
-        buildScalarCell('bytes', [5, 7, 8] /* diff +3 */).attrs), 0);
-    assert.equal(c.compareMultipleAttributes(
-        buildScalarCell('ms', [4, undefined] /* diff -4 */).attrs,
-        buildScalarCell('ms', [999, 995] /* diff -4 */).attrs), 0);
-    assert.isAbove(c.compareMultipleAttributes(
-        buildScalarCell('bytes', [10, undefined, 12] /* diff +2 */).attrs,
-        buildScalarCell('bytes', [11, 50, 12] /* diff +1 */).attrs), 0);
-    assert.equal(c.compareMultipleAttributes(
-        buildScalarCell('W', [17, undefined, 17] /* diff 0 */).attrs,
-        buildScalarCell('W',
-            [undefined, 100, undefined] /* diff 0 */).attrs), 0);
+    assert.isBelow(c.compareMultipleFields(
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [10000, 10001, 10002] /* diff +2 */).fields,
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [5, 7, 8] /* diff +3 */).fields), 0);
+    assert.equal(c.compareMultipleFields(
+        buildScalarCell(tr.v.Unit.byName.timeDurationInMs_smallerIsBetter,
+            [4, undefined] /* diff -4 */).fields,
+        buildScalarCell(tr.v.Unit.byName.timeDurationInMs_smallerIsBetter,
+            [999, 995] /* diff -4 */).fields), 0);
+    assert.isAbove(c.compareMultipleFields(
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [10, undefined, 12] /* diff +2 */).fields,
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [11, 50, 12] /* diff +1 */).fields), 0);
+    assert.equal(c.compareMultipleFields(
+        buildScalarCell(tr.v.Unit.byName.powerInWatts_smallerIsBetter,
+            [17, undefined, 17] /* diff 0 */).fields,
+        buildScalarCell(tr.v.Unit.byName.powerInWatts_smallerIsBetter,
+            [undefined, 100, undefined] /* diff 0 */).fields), 0);
+    assert.equal(c.compareMultipleFields(
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [3.14159, undefined, 3.14160] /* diff within epsilon */).fields,
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [100, 100, 100] /* diff 0 */).fields), 0);
   });
 
-  test('checkScalarMemoryColumn_compareMultipleAttributes_max', function() {
-    var c = new ScalarMemoryColumn('test_column', 'ms', fieldGetter('x'),
+  test('checkNumericMemoryColumn_compareMultipleFields_max', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
         AggregationMode.MAX);
 
-    assert.isBelow(c.compareMultipleAttributes(
-        buildScalarCell('bytes', [10, undefined, 12]).attrs,
-        buildScalarCell('bytes', [11, 50, 12]).attrs), 0);
-    assert.equal(c.compareMultipleAttributes(
-        buildScalarCell('ms', [999, undefined, -8888]).attrs,
-        buildScalarCell('ms', [undefined, 999, undefined]).attrs), 0);
-    assert.isAbove(c.compareMultipleAttributes(
-        buildScalarCell('bytes', [10000, 10001, 10002]).attrs,
-        buildScalarCell('bytes', [5, 7, 8]).attrs), 0);
-    assert.isBelow(c.compareMultipleAttributes(
-        buildScalarCell('W', [17, undefined, 17]).attrs,
-        buildScalarCell('W', [undefined, 100, undefined]).attrs), 0);
+    assert.isBelow(c.compareMultipleFields(
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [10, undefined, 12]).fields,
+        buildScalarCell(sizeInBytes_smallerIsBetter, [11, 50, 12]).fields), 0);
+    assert.equal(c.compareMultipleFields(
+        buildScalarCell(tr.v.Unit.byName.timeDurationInMs_smallerIsBetter,
+            [999, undefined, -8888]).fields,
+        buildScalarCell(tr.v.Unit.byName.timeDurationInMs_smallerIsBetter,
+            [undefined, 999, undefined]).fields), 0);
+    assert.isAbove(c.compareMultipleFields(
+        buildScalarCell(sizeInBytes_smallerIsBetter,
+            [10000, 10001, 10002]).fields,
+        buildScalarCell(sizeInBytes_smallerIsBetter, [5, 7, 8]).fields), 0);
+    assert.isBelow(c.compareMultipleFields(
+        buildScalarCell(tr.v.Unit.byName.powerInWatts_smallerIsBetter,
+            [17, undefined, 17]).fields,
+        buildScalarCell(tr.v.Unit.byName.powerInWatts_smallerIsBetter,
+            [undefined, 100, undefined]).fields), 0);
   });
 
-  test('checkFieldGetter', function() {
-    // Zero cells.
-    var f = fieldGetter();
-    var row = {a: 5};
-    assert.equal(f(row), row);
+  test('checkNumericMemoryColumn_getDiffFieldValue', function() {
+    var c = new NumericMemoryColumn('test_column', ['x'],
+        AggregationMode.MAX);
+    function checkDiffValue(first, last, expectedDiffValue) {
+      var actualDiffValue = c.getDiffFieldValue_(
+          first === undefined ? undefined :
+              new ScalarNumeric(sizeInBytes_smallerIsBetter, first),
+          last === undefined ? undefined :
+              new ScalarNumeric(sizeInBytes_smallerIsBetter, last));
+      assert.closeTo(actualDiffValue, expectedDiffValue, 1e-8);
+    }
 
-    // One cell.
-    var f = fieldGetter('p2');
-    assert.equal(f({p1: 'wrong', p2: 'right'}), 'right');
-    assert.isUndefined(f({p1: 'wrong'}));
+    // Diff outside epsilon range.
+    checkDiffValue(0, 0.0002, 0.0002);
+    checkDiffValue(undefined, 0.0003, 0.0003);
+    checkDiffValue(0.3334, 0.3332, -0.0002);
+    checkDiffValue(0.0005, undefined, -0.0005);
 
-    // Multiple cells.
-    var f = fieldGetter('b', 'd', 'f');
-    assert.equal(f({a: 0, b: {c: 0, d: {e: 0, f: 42}}}), 42);
+    // Diff inside epsilon range.
+    checkDiffValue(5, 5.00009, 0);
+    checkDiffValue(undefined, 0.0000888, 0);
+    checkDiffValue(0.29999, 0.3, 0);
+    checkDiffValue(0.00009, undefined, 0);
+    checkDiffValue(0.777777, 0.777777, 0);
+    checkDiffValue(undefined, undefined, 0);
   });
 
   test('checkExpandTableRowsRecursively', function() {
@@ -1037,43 +978,43 @@
         data: 'allocated',
         subRows: [
           {
-            data: 'v8'
+            data: 'v8',
+            subRows: []
           },
           {
             data: 'oilpan',
             subRows: [
-              { data: 'heaps' },
-              { data: 'objects' }
-            ]
-          },
-          {
-            data: 'skia',
-            subRows: [
-              { data: 'way' },
-              { data: 'too' },
-              { data: 'many' },
-              { data: 'sub-' },
-              { data: 'rows' },
-              { data: 'so' },
-              { data: 'that' },
-              { data: 'they' },
-              { data: 'wouldn\'t' },
-              { data: 'be' },
-              { data: 'auto-' },
-              { data: 'expanded' }
+              {
+                data: 'still_visible',
+                subRows: [
+                  {
+                    data: 'not_visible_any_more'
+                  }
+                ]
+              },
+              {
+                data: 'also_visible'
+              }
             ]
           }
         ]
       },
       {
-        data: 'overhead',
+        data: 'no_sub_rows'
+      },
+      {
+        data: 'fragmentation',
         subRows: [
           {
-            data: 'internal_fragmentation'
+            data: 'internal'
           },
           {
-            data: 'external_fragmentation',
-            subRows: []
+            data: 'external',
+            subRows: [
+              {
+                data: 'unexpanded'
+              }
+            ]
           }
         ]
       }
@@ -1086,42 +1027,44 @@
 
     expandTableRowsRecursively(table);
 
-    // 'allocated' row should be expanded.
-    assert.isTrue(table.getExpandedForTableRow(rows[0]));
+    function isExpanded(row) { return table.getExpandedForTableRow(row); }
 
-    // 'allocated/v8' row cannot be expanded (no sub-rows).
-    assert.isFalse(table.getExpandedForTableRow(rows[0].subRows[0]));
+    // Level 0 (3 rows) should be expanded (except for nodes which have no
+    // sub-rows).
+    assert.isTrue(isExpanded(rows[0] /* allocated */));
+    assert.isFalse(isExpanded(rows[1] /* no_sub_rows */));
+    assert.isTrue(isExpanded(rows[2] /* overhead */));
 
-    // 'allocated/oilpan' row should be expanded.
-    assert.isTrue(table.getExpandedForTableRow(rows[0].subRows[1]));
+    // Level 1 (4 rows) should be expanded (except for nodes which have no
+    // sub-rows).
+    assert.isFalse(isExpanded(rows[0].subRows[0] /* allocated/v8 */));
+    assert.isTrue(isExpanded(rows[0].subRows[1] /* allocated/oilpan */));
+    assert.isFalse(isExpanded(rows[2].subRows[0] /* fragmentation/internal */));
+    assert.isTrue(isExpanded(rows[2].subRows[1] /* fragmentation/external */));
 
-    // 'allocated/skia' row should not be expanded (more than 10 sub-rows).
-    assert.isFalse(table.getExpandedForTableRow(rows[0].subRows[2]));
-
-    // 'overhead' row should be expanded.
-    assert.isTrue(table.getExpandedForTableRow(rows[1]));
-
-    // 'overhead/internal_fragmentation' cannot be expanded (no sub-rows).
-    assert.isFalse(table.getExpandedForTableRow(rows[1].subRows[0]));
-
-    // 'overhead/external_fragmentation' cannot be expanded (no sub-rows).
-    assert.isFalse(table.getExpandedForTableRow(rows[1].subRows[1]));
+    // Level 2 (3 rows) should not be expanded any more.
+    assert.isFalse(isExpanded(
+        rows[0].subRows[1].subRows[0] /* allocated/oilpan/still_visible */));
+    assert.isFalse(isExpanded(
+        rows[0].subRows[1].subRows[1] /* allocated/oilpan/also_visible */));
+    assert.isFalse(isExpanded(
+        rows[2].subRows[1].subRows[0] /* fragmentation/external/unexpanded */));
   });
 
-  test('checkMemoryCell_extractAttributes', function() {
-    assert.isUndefined(MemoryCell.extractAttributes(undefined));
+  test('checkMemoryCell_extractFields', function() {
+    assert.isUndefined(MemoryCell.extractFields(undefined));
 
-    assert.isUndefined(MemoryCell.extractAttributes(new MemoryCell(undefined)));
+    assert.isUndefined(MemoryCell.extractFields(new MemoryCell(undefined)));
 
-    var attrs = [new ScalarAttribute('bytes', 1024)];
+    var fields = [new ScalarNumeric(sizeInBytes_smallerIsBetter, 1024)];
     assert.strictEqual(
-        MemoryCell.extractAttributes(new MemoryCell(attrs)), attrs);
+        MemoryCell.extractFields(new MemoryCell(fields)), fields);
   });
 
   test('checkAggregateTableRowCellsRecursively', function() {
     var row = {
       testCells: {
-        a: buildScalarCell('bytes', [17])
+        a: buildScalarCell(sizeInBytes_smallerIsBetter, [17])
       },
       subRows: [
         {
@@ -1129,8 +1072,9 @@
           subRows: [
             {
               testCells: {
-                b: buildScalarCell('bytes', [103]),
-                c: buildStringCell('', ['should-not-propagate-upwards'])
+                b: buildScalarCell(sizeInBytes_smallerIsBetter, [103]),
+                c: new MemoryCell(['should-not-propagate-upwards']),
+                d: buildScalarCell(sizeInBytes_smallerIsBetter, [-200])
               }
               // Intentionally no subRows.
             },
@@ -1138,74 +1082,88 @@
               testCells: {},
               subRows: []
             }
-          ]
+          ],
+          contexts: ['skip-row-when-using-predicate']
         },
         {
           testCells: {
-            b: buildScalarCell('bytes', [20]),
-            a: buildScalarCell('bytes', [13])
-          }
+            b: buildScalarCell(sizeInBytes_smallerIsBetter, [20]),
+            a: buildScalarCell(sizeInBytes_smallerIsBetter, [13]),
+            e: buildScalarCell(sizeInBytes_smallerIsBetter, [-300])
+          },
+          contexts: ['don\'t-skip']
         }
       ]
     };
 
-    aggregateTableRowCellsRecursively(row, 'testCells');
+    // Without a predicate.
+    var ca = new NumericMemoryColumn('column_a', ['testCells', 'a']);
+    var cb = new NumericMemoryColumn('column_b', ['testCells', 'b']);
+    var cc = new StringMemoryColumn('column_c', ['testCells', 'c']);
+    aggregateTableRowCellsRecursively(row, [ca, cb, cc]);
+    checkSizeNumericFields(row, ca, [17]);
+    checkSizeNumericFields(row, cb, [123]);
+    checkStringFields(row, cc, undefined);
 
-    var mockColumn = new MemoryColumn('', '', tr.b.identity, undefined);
-
-    checkSizeAttributes(row.testCells.a, mockColumn, [17]);
-    checkSizeAttributes(row.testCells.b, mockColumn, [123]);
-    checkAttributes(row.testCells.c, mockColumn, [undefined]);
+    // With a predicate.
+    var cd = new NumericMemoryColumn('column_d', ['testCells', 'd']);
+    var ce = new NumericMemoryColumn('column_e', ['testCells', 'e']);
+    aggregateTableRowCellsRecursively(row, [cd, ce], function(contexts) {
+      return contexts === undefined || !contexts[0].startsWith('skip');
+    });
+    checkSizeNumericFields(row, cd, undefined);
+    checkSizeNumericFields(row, ce, [-300]);
   });
 
   test('checkAggregateTableRowCells', function() {
-    var cell = new MemoryCell(undefined);
-    cell.foo = 'bar';
-
     var row = {
       // Intentionally no testCells.
       otherCells: {
-        a: cell
+        a: buildScalarCell(tr.v.Unit.byName.unitlessNumber,
+            [5, undefined, undefined])
       }
     };
     var subRows = [
       {
         testCells: {
-          a: buildScalarCell('bytes', [1, 9])
+          a: buildScalarCell(sizeInBytes_smallerIsBetter, [1, 9])
         },
         subRows: [
           {
             testCells: {
-              c: buildScalarCell('bytes', [13])
+              c: buildScalarCell(sizeInBytes_smallerIsBetter, [13])
             }
           }
         ]
       },
       {
         testCells: {
-          a: buildScalarCell('bytes', [2, 17]),
-          b: buildScalarCell('bytes', [5])
+          a: buildScalarCell(sizeInBytes_smallerIsBetter, [2, 17]),
+          b: buildScalarCell(sizeInBytes_smallerIsBetter, [5])
         },
         otherCells: {
-          a: buildScalarCell('objects', [153]),
-          b: buildStringCell('', ['attribute-should-not-propagate-upwards', ''])
+          a: buildScalarCell(tr.v.Unit.byName.unitlessNumber,
+              [153, undefined, 257]),
+          b: new MemoryCell(['field-should-not-propagate-upwards', ''])
         }
       }
     ];
 
-    aggregateTableRowCells(row, subRows, 'testCells');
-    aggregateTableRowCells(row, subRows, 'otherCells');
+    var cta = new NumericMemoryColumn('column_test_a', ['testCells', 'a']);
+    var ctb = new NumericMemoryColumn('column_test_b', ['testCells', 'b']);
+    var ctc = new NumericMemoryColumn('column_test_c', ['testCells', 'c']);
+    var coa = new NumericMemoryColumn('column_other_a', ['otherCells', 'a']);
+    var cob = new StringMemoryColumn('column_other_b', ['otherCells', 'b']);
 
-    var mockColumn = new MemoryColumn('', '', tr.b.identity, undefined);
+    aggregateTableRowCells(row, subRows, [cta, ctb, ctc, coa, cob]);
 
-    checkSizeAttributes(row.testCells.a, mockColumn, [3, 26]);
-    checkSizeAttributes(row.testCells.b, mockColumn, [5]);
-    assert.notProperty(row.testCells, 'c');
+    checkSizeNumericFields(row, cta, [3, 26]);
+    checkSizeNumericFields(row, ctb, [5]);
+    checkSizeNumericFields(row, ctc, undefined);
 
-    var otherCellA = row.otherCells.a;
-    assert.equal(otherCellA.foo, 'bar');
-    checkAttributes(otherCellA, mockColumn, [153], ScalarAttribute, 'objects');
-    checkAttributes(row.otherCells.b, mockColumn, [undefined, undefined]);
+    checkNumericFields(row, coa, [5, undefined, 257],
+        tr.v.Unit.byName.unitlessNumber);
+    checkStringFields(row, cob, undefined);
   });
 
   test('checkCreateCells', function() {
@@ -1225,73 +1183,23 @@
       }
     ];
 
-    var mockColumn = new MemoryColumn('', '', tr.b.identity, undefined);
+    var mockColumn = new MemoryColumn('', [], undefined);
 
-    // Without callback.
     var cells = createCells(values, function(dict) {
-      var attrs = {};
+      var fields = {};
       tr.b.iterItems(dict, function(key, value) {
-        addAttributeIfDefined(attrs, key, ScalarAttribute, 'bytes', value);
+        if (value === undefined)
+          return;
+        fields[key] = new ScalarNumeric(sizeInBytes_smallerIsBetter, value);
       });
-      return attrs;
+      return fields;
     });
     assert.deepEqual(Object.keys(cells), ['a', 'b', 'd']);
-    checkSizeAttributes(
+    checkSizeNumericFields(
         cells.a, mockColumn, [9, undefined, undefined, undefined]);
-    checkSizeAttributes(cells.b, mockColumn, [314, 159, undefined, 265]);
-    checkSizeAttributes(
+    checkSizeNumericFields(cells.b, mockColumn, [314, 159, undefined, 265]);
+    checkSizeNumericFields(
         cells.d, mockColumn, [undefined, undefined, undefined, 0]);
-
-    // With callback.
-    var createdAttrNames = {};
-    var cells = createCells(values,
-        function(dict) {
-          var attrs = {};
-          tr.b.iterItems(dict, function(key, value) {
-            addAttributeIfDefined(attrs, key, ScalarAttribute, 'bytes', value);
-          });
-          return attrs;
-        },
-        function(attrName, cell) {
-          assert.lengthOf(cell.attrs, 4);
-          assert.notProperty(createdAttrNames, attrName);
-          createdAttrNames[attrName] = true;
-        });
-    assert.sameMembers(Object.keys(createdAttrNames), ['a', 'b', 'd']);
-  });
-
-  test('checkAddAttributeIfDefined', function() {
-    var attrs = {};
-
-    // Undefined attribute value.
-    addAttributeIfDefined(attrs, 'x', ScalarAttribute, 'bytes', undefined);
-    assert.lengthOf(Object.keys(attrs), 0);
-
-    // Defined attribute value.
-    addAttributeIfDefined(attrs, 'x', ScalarAttribute, 'bytes', 16384);
-    assert.deepEqual(Object.keys(attrs), ['x']);
-    checkSizeAttributes([attrs.x], undefined /* no column */, [16384]);
-
-    var didFireCallback;
-    var addedCallback = function(attr) {
-      didFireCallback = true;
-    };
-
-    // Undefined attribute value with callback.
-    didFireCallback = false;
-    addAttributeIfDefined(
-        attrs, 'y', StringAttribute, '', undefined, addedCallback);
-    assert.isFalse(didFireCallback);
-    assert.deepEqual(Object.keys(attrs), ['x']);
-
-    // Defined attribute with callback.
-    didFireCallback = false;
-    addAttributeIfDefined(
-        attrs, 'y', StringAttribute, '', '(+infinity)', addedCallback);
-    assert.isTrue(didFireCallback);
-    assert.deepEqual(Object.keys(attrs), ['x', 'y']);
-    checkAttributes([attrs.y], undefined /* no column */, ['(+infinity)'],
-        StringAttribute, '');
   });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane.html b/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane.html
index a46ab08..bf55a30 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane.html
@@ -6,10 +6,11 @@
 -->
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
 <link rel="import" href="/tracing/ui/analysis/stacked_pane.html">
 <link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-memory-dump-vm-regions-details-pane"
     extends="tr-ui-a-stacked-pane">
@@ -63,156 +64,70 @@
 
 tr.exportTo('tr.ui.analysis', function() {
 
-  var COLUMN_IMPORTANCE_RULES =
-      tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules([
-          'Start address',
-          'Virtual size',
-          'Protection flags',
-          'PSS',
-          'Private dirty',
-          'Private clean',
-          'Shared dirty',
-          'Shared clean',
-          'Swapped']);
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var sizeInBytes_smallerIsBetter =
+      tr.v.Unit.byName.sizeInBytes_smallerIsBetter;
 
-  /**
-   * Rules for classifying memory maps.
-   *
-   * These rules are derived from core/jni/android_os_Debug.cpp in Android.
-   */
-  var CLASSIFICATION_RULES = {
-    name: 'Total',
-    children: [
-      {
-        name: 'Android',
-        file: /^\/dev\/ashmem(?!\/libc malloc)/,
-        children: [
-          {
-            name: 'Java runtime',
-            file: /^\/dev\/ashmem\/dalvik-/,
-            children: [
-              {
-                name: 'Spaces',
-                file: /\/dalvik-(alloc|main|large object|non moving|zygote) space/,  // @suppress longLineCheck
-                children: [
-                  {
-                    name: 'Normal',
-                    file: /\/dalvik-(alloc|main)/
-                  },
-                  {
-                    name: 'Large',
-                    file: /\/dalvik-large object/
-                  },
-                  {
-                    name: 'Zygote',
-                    file: /\/dalvik-zygote/
-                  },
-                  {
-                    name: 'Non-moving',
-                    file: /\/dalvik-non moving/
-                  }
-                ]
-              },
-              {
-                name: 'Linear Alloc',
-                file: /\/dalvik-LinearAlloc/
-              },
-              {
-                name: 'Indirect Reference Table',
-                file: /\/dalvik-indirect.ref/
-              },
-              {
-                name: 'Cache',
-                file: /\/dalvik-jit-code-cache/
-              },
-              {
-                name: 'Accounting'
-              }
-            ]
-          },
-          {
-            name: 'Cursor',
-            file: /\/CursorWindow/
-          },
-          {
-            name: 'Ashmem'
-          }
-        ]
-      },
-      {
-        name: 'Native heap',
-        file: /^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|(\[discounted tracing overhead\])|$)/  // @suppress longLineCheck
-      },
-      {
-        name: 'Stack',
-        file: /^\[stack/
-      },
-      {
-        name: 'Files',
-        file: /\.((((jar)|(apk)|(ttf)|(odex)|(oat)|(arg))$)|(dex)|(so))/,
-        children: [
-          {
-            name: 'so',
-            file: /\.so/
-          },
-          {
-            name: 'jar',
-            file: /\.jar$/
-          },
-          {
-            name: 'apk',
-            file: /\.apk$/
-          },
-          {
-            name: 'ttf',
-            file: /\.ttf$/
-          },
-          {
-            name: 'dex',
-            file: /\.((dex)|(odex$))/
-          },
-          {
-            name: 'oat',
-            file: /\.oat$/
-          },
-          {
-            name: 'art',
-            file: /\.art$/
-          }
-        ]
-      },
-      {
-        name: 'Devices',
-        file: /(^\/dev\/)|(anon_inode:dmabuf)/,
-        children: [
-          {
-            name: 'GPU',
-            file: /\/((nv)|(mali)|(kgsl))/
-          },
-          {
-            name: 'DMA',
-            file: /anon_inode:dmabuf/
-          }
-        ]
-      }
-    ]
+  var CONSTANT_COLUMN_RULES = [
+    {
+      condition: 'Start address',
+      importance: 0,
+      columnConstructor: tr.ui.analysis.StringMemoryColumn
+    }
+  ];
+
+  var VARIABLE_COLUMN_RULES = [
+    {
+      condition: 'Virtual size',
+      importance: 7,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Protection flags',
+      importance: 6,
+      columnConstructor: tr.ui.analysis.StringMemoryColumn
+    },
+    {
+      condition: 'PSS',
+      importance: 5,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Private dirty',
+      importance: 4,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Private clean',
+      importance: 3,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Shared dirty',
+      importance: 2,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Shared clean',
+      importance: 1,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    },
+    {
+      condition: 'Swapped',
+      importance: 0,
+      columnConstructor: tr.ui.analysis.NumericMemoryColumn
+    }
+  ];
+
+  var BYTE_STAT_COLUMN_MAP = {
+    'proportionalResident': 'PSS',
+    'privateDirtyResident': 'Private dirty',
+    'privateCleanResident': 'Private clean',
+    'sharedDirtyResident': 'Shared dirty',
+    'sharedCleanResident': 'Shared clean',
+    'swapped': 'Swapped'
   };
 
-  /**
-   * Create a tree of nested rows (containing no mmaps) corresponding to a
-   * tree of classification rules.
-   */
-  function createEmptyRuleRow(rule) {
-    var row = {
-      title: rule.name,
-      rule: rule,
-      subRows: []
-    };
-    if (rule.children !== undefined)
-      row.subRows = rule.children.map(createEmptyRuleRow);
-    return row;
-  }
-
   function hexString(address, is64BitAddress) {
     if (address === undefined)
       return undefined;
@@ -220,43 +135,6 @@
     return (hexPadding + address.toString(16)).substr(-hexPadding.length);
   }
 
-  /**
-   * Classify a VM region row and add it to a tree of nested rows.
-   */
-  function classifyRegionRow(ruleRow, regionRow) {
-    var rule = ruleRow.rule;
-    if (rule === undefined ||
-        rule.children === undefined ||
-        rule.children.length === 0) {
-      // Leaf rule (append the region row to the rule's sub-rows).
-      ruleRow.subRows.push(regionRow);
-      return;
-    }
-
-    // Non-leaf rule (classify region row further down the tree).
-    function regionRowMatchesChildRule(childRule) {
-      var fileRegExp = childRule.file;
-      if (fileRegExp === undefined)
-        return true;
-      return fileRegExp.test(regionRow.title);
-    }
-
-    var matchedChildRuleIndex = tr.b.findFirstIndexInArray(
-        rule.children, regionRowMatchesChildRule);
-    if (matchedChildRuleIndex === -1) {
-      // Region belongs to the 'Other' node (created lazily).
-      matchedChildRuleIndex = rule.children.length;
-      if (matchedChildRuleIndex >= ruleRow.subRows.length) {
-        ruleRow.subRows.push({
-          title: 'Other',
-          subRows: []
-        });
-      }
-    }
-
-    classifyRegionRow(ruleRow.subRows[matchedChildRuleIndex], regionRow);
-  }
-
   function pruneEmptyRuleRows(row) {
     if (row.subRows === undefined || row.subRows.length === 0)
       return;
@@ -326,10 +204,7 @@
     },
 
     rebuildPane_: function() {
-      var unclassifiedRows = [];
-      if (this.vmRegions_ !== undefined)
-        unclassifiedRows = this.createUnclassifiedRows_(this.vmRegions_);
-      if (unclassifiedRows.length === 0) {
+      if (this.vmRegions_ === undefined || this.vmRegions_.length === 0) {
         // Show the info text (hide the table).
         this.$.info_text.style.display = 'block';
         this.$.table.style.display = 'none';
@@ -343,9 +218,12 @@
       this.$.info_text.style.display = 'none';
       this.$.table.style.display = 'block';
 
-      var rows = this.classifyRows_(unclassifiedRows);
+      var rows = this.createRows_(this.vmRegions_);
       var columns = this.createColumns_(rows);
 
+      // Note: There is no need to aggregate fields of the VM regions because
+      // the classification tree already takes care of that.
+
       this.$.table.tableRows = rows;
       this.$.table.tableColumns = columns;
 
@@ -356,135 +234,128 @@
       tr.ui.analysis.expandTableRowsRecursively(this.$.table);
     },
 
-    /**
-     * Join VM regions from a chronological list of lists of VM regions:
-     *
-     *   INPUT:
-     *
-     *     [
-     *       [regionA_at_time1, regionB_at_time1, ...],  // Time 1.
-     *       [regionA_at_time2, regionB_at_time2, ...],  // Time 2.
-     *       ...
-     *     ]
-     *
-     *   OUTPUT:
-     *
-     *     [
-     *       [regionA_at_time1, regionA_at_time2, ...],  // Region A.
-     *       [regionB_at_time1, regionB_at_time2, ...],  // Region B.
-     *       ...
-     *     ]
-     *
-     * Two regions (from different timestamps) are considered to refer to the
-     * same region if they have the same mapped file, start address, and
-     * virtual size.
-     */
-    joinRegions_: function(timeToRegionIdToRegion) {
-      // TODO(petrcermak): Investigate if it's worth defining a dedicated
-      // method for this (invertArrayOfArrays) for performance reasons.
-      return tr.b.dictionaryValues(tr.b.invertArrayOfDicts(
-          timeToRegionIdToRegion,
-          function(regionIdToRegion) {
-            return tr.b.arrayToDict(regionIdToRegion, function(region) {
-              return [region.mappedFile, region.startAddress].join('#');
-            });
-          }));
-    },
-
-    createUnclassifiedRows_: function(timeToRegionIdToRegion) {
+    createRows_: function(timeToVmRegionTree) {
       // Determine if any start address is outside the 32-bit range.
-      var is64BitAddress = timeToRegionIdToRegion.some(
-          function(regionIdToRegion) {
-        if (regionIdToRegion === undefined)
+      var is64BitAddress = timeToVmRegionTree.some(function(vmRegionTree) {
+        if (vmRegionTree === undefined)
           return false;
-        return regionIdToRegion.some(function(region) {
+        return vmRegionTree.someRegion(function(region) {
           if (region.startAddress === undefined)
             return false;
           return region.startAddress >= 4294967296 /* 2^32 */;
         });
       });
 
+      return [
+        this.createClassificationNodeRow(timeToVmRegionTree, is64BitAddress)
+      ];
+    },
+
+    createClassificationNodeRow: function(timeToNode, is64BitAddress) {
+      // Get any defined classification node so that we can extract the
+      // properties which don't change over time.
+      var definedNode = tr.b.findFirstInArray(timeToNode);
+
+      // Child node ID (list index) -> Timestamp (list index) ->
+      // VM region classification node.
+      var childNodeIdToTimeToNode = tr.b.dictionaryValues(
+          tr.b.invertArrayOfDicts(timeToNode, function(node) {
+            var children = node.children;
+            if (children === undefined)
+              return undefined;
+            var childMap = {};
+            children.forEach(function(childNode) {
+              if (!childNode.hasRegions)
+                return;
+              childMap[childNode.title] = childNode;
+            });
+            return childMap;
+          }));
+      var childNodeSubRows = childNodeIdToTimeToNode.map(
+          function(timeToChildNode) {
+            return this.createClassificationNodeRow(
+                timeToChildNode, is64BitAddress);
+          }, this);
+
       // Region ID (list index) -> Timestamp (list index) -> VM region.
-      var regionIdToTimeToRegion = this.joinRegions_(timeToRegionIdToRegion);
-
-      return regionIdToTimeToRegion.map(function(timeToRegion) {
-        // Get any defined VM region so that we can extract the properties
-        // which don't change over time.
-        var definedRegion = tr.b.findFirstInArray(timeToRegion);
-
-        // Determine at which timestamps (indices of the current selection)
-        // VM regions were provided.
-        var defined = timeToRegion.map(function(region) {
-          return region !== undefined;
-        });
-
-        // Cells for VM region properties which DON'T change over time.
-        var constantCells = tr.ui.analysis.createCells([definedRegion],
-            function(region) {
-              var attrs = {};
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Start address', tr.model.StringAttribute, '',
-                  hexString(region.startAddress, is64BitAddress));
-              return attrs;
+      var regionIdToTimeToRegion = tr.b.dictionaryValues(
+          tr.b.invertArrayOfDicts(timeToNode, function(node) {
+            var regions = node.regions;
+            if (regions === undefined)
+              return undefined;
+            return tr.b.arrayToDict(regions, function(region) {
+              return region.uniqueIdWithinProcess;
             });
+          }));
+      var regionSubRows = regionIdToTimeToRegion.map(function(timeToRegion) {
+        return this.createRegionRow_(timeToRegion, is64BitAddress);
+      }, this);
 
-        // Cells for VM region properties which DO change over time.
-        var variableCells = tr.ui.analysis.createCells(timeToRegion,
-            function(region) {
-              var attrs = {};
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Virtual size', tr.model.ScalarAttribute, 'bytes',
-                  region.sizeInBytes);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Protection flags', tr.model.StringAttribute, '',
-                  region.protectionFlagsToString);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'PSS', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.proportionalResident);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Private dirty', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.privateDirtyResident);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Private clean', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.privateCleanResident);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Shared dirty', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.sharedDirtyResident);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Shared clean', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.sharedCleanResident);
-              tr.ui.analysis.addAttributeIfDefined(
-                  attrs, 'Swapped', tr.model.ScalarAttribute, 'bytes',
-                  region.byteStats.swapped);
-              return attrs;
-            });
+      var subRows = childNodeSubRows.concat(regionSubRows);
 
-        return {
-          title: definedRegion.mappedFile || '',
-          defined: defined,
-          constantCells: constantCells,
-          variableCells: variableCells
-        };
+      return {
+        title: definedNode.title,
+        contexts: timeToNode,
+        variableCells: this.createVariableCells_(timeToNode),
+        subRows: subRows
+      };
+    },
+
+    createRegionRow_: function(timeToRegion, is64BitAddress) {
+      // Get any defined VM region so that we can extract the properties which
+      // don't change over time.
+      var definedRegion = tr.b.findFirstInArray(timeToRegion);
+
+      return {
+        title: definedRegion.mappedFile,
+        contexts: timeToRegion,
+        constantCells: this.createConstantCells_(definedRegion, is64BitAddress),
+        variableCells: this.createVariableCells_(timeToRegion)
+      };
+    },
+
+    /**
+     * Create cells for VM region properties which DON'T change over time.
+     *
+     * Note that there are currently no such properties of classification nodes.
+     */
+    createConstantCells_: function(definedRegion, is64BitAddress) {
+      return tr.ui.analysis.createCells([definedRegion], function(region) {
+        var startAddress = region.startAddress;
+        if (startAddress === undefined)
+          return undefined;
+        return { 'Start address': hexString(startAddress, is64BitAddress) };
       });
     },
 
-    classifyRows_: function(unclassifiedRows) {
-      // Create an empty tree structure of rows.
-      var rootRow = createEmptyRuleRow(CLASSIFICATION_RULES);
+    /**
+     * Create cells for VM region (classification node) properties which DO
+     * change over time.
+     */
+    createVariableCells_: function(timeToRegion) {
+      return tr.ui.analysis.createCells(timeToRegion, function(region) {
+          var fields = {};
 
-      // Classify the VM regions.
-      unclassifiedRows.map(classifyRegionRow.bind(undefined, rootRow));
+          var sizeInBytes = region.sizeInBytes;
+          if (sizeInBytes !== undefined) {
+            fields['Virtual size'] = new ScalarNumeric(
+                sizeInBytes_smallerIsBetter, sizeInBytes);
+          }
+          var protectionFlags = region.protectionFlagsToString;
+          if (protectionFlags !== undefined)
+            fields['Protection flags'] = protectionFlags;
 
-      // Prune rule rows with no VM regions.
-      pruneEmptyRuleRows(rootRow);
+          tr.b.iterItems(BYTE_STAT_COLUMN_MAP,
+              function(byteStatName, columnName) {
+                var byteStat = region.byteStats[byteStatName];
+                if (byteStat === undefined)
+                  return;
+                fields[columnName] = new ScalarNumeric(
+                    sizeInBytes_smallerIsBetter, byteStat);
+              });
 
-      // Aggregate attributes of the VM regions.
-      tr.ui.analysis.aggregateTableRowCellsRecursively(
-          rootRow, 'constantCells');
-      tr.ui.analysis.aggregateTableRowCellsRecursively(
-          rootRow, 'variableCells');
-
-      return [rootRow];
+          return fields;
+        });
     },
 
     createColumns_: function(rows) {
@@ -492,15 +363,13 @@
       titleColumn.width = '200px';
 
       var constantColumns = tr.ui.analysis.MemoryColumn.fromRows(
-          rows, 'constantCells');
+          rows, 'constantCells', undefined, CONSTANT_COLUMN_RULES);
       var variableColumns = tr.ui.analysis.MemoryColumn.fromRows(
-          rows, 'variableCells', this.aggregationMode_);
-      var attributeColumns = constantColumns.concat(variableColumns);
-      tr.ui.analysis.MemoryColumn.sortByImportance(attributeColumns,
-          COLUMN_IMPORTANCE_RULES);
-      tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);
+          rows, 'variableCells', this.aggregationMode_, VARIABLE_COLUMN_RULES);
+      var fieldColumns = constantColumns.concat(variableColumns);
+      tr.ui.analysis.MemoryColumn.spaceEqually(fieldColumns);
 
-      var columns = [titleColumn].concat(attributeColumns);
+      var columns = [titleColumn].concat(fieldColumns);
       return columns;
     }
   });
diff --git a/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane_test.html b/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane_test.html
index 0cfc893..041b689 100644
--- a/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane_test.html
+++ b/catapult/tracing/tracing/ui/analysis/memory_dump_vm_regions_details_pane_test.html
@@ -7,8 +7,8 @@
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/attribute.html">
-<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/model/vm_region.html">
 <link rel="import"
     href="/tracing/ui/analysis/memory_dump_sub_view_test_utils.html">
 <link rel="import" href="/tracing/ui/analysis/memory_dump_sub_view_util.html">
@@ -19,15 +19,18 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var ScalarAttribute = tr.model.ScalarAttribute;
-  var StringAttribute = tr.model.StringAttribute;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
   var VMRegion = tr.model.VMRegion;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
+  var TitleColumn = tr.ui.analysis.TitleColumn;
+  var StringMemoryColumn = tr.ui.analysis.StringMemoryColumn;
+  var NumericMemoryColumn = tr.ui.analysis.NumericMemoryColumn;
   var AggregationMode = tr.ui.analysis.MemoryColumn.AggregationMode;
   var addGlobalMemoryDump = tr.ui.analysis.addGlobalMemoryDump;
   var addProcessMemoryDump = tr.ui.analysis.addProcessMemoryDump;
-  var checkAttributes = tr.ui.analysis.checkAttributes;
-  var checkSizeAttributes = tr.ui.analysis.checkSizeAttributes;
+  var checkSizeNumericFields = tr.ui.analysis.checkSizeNumericFields;
+  var checkStringFields = tr.ui.analysis.checkStringFields;
+  var checkColumns = tr.ui.analysis.checkColumns;
   var isElementDisplayed = tr.ui.analysis.isElementDisplayed;
 
   function createVMRegions() {
@@ -37,7 +40,7 @@
       // First timestamp.
       var gmd1 = addGlobalMemoryDump(model, 42);
       var pmd1 = addProcessMemoryDump(gmd1, process, 42);
-      pmd1.vmRegions = [
+      pmd1.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           mappedFile: '/lib/chrome.so',
           startAddress: 65536,
@@ -101,20 +104,18 @@
             swapped: 0
           }
         })
-      ];
+      ]);
 
       // This is here so that we could test that tracing is discounted from the
       // 'Native heap' category.
-      var tracingDump = new MemoryAllocatorDump(pmd1, 'tracing');
-      tracingDump.addAttribute('size', new ScalarAttribute('bytes', 500));
-      tracingDump.addAttribute('resident_size',
-          new ScalarAttribute('bytes', 32));
-      pmd1.memoryAllocatorDumps = [tracingDump];
+      pmd1.memoryAllocatorDumps = [
+        newAllocatorDump(pmd1, 'tracing', { size: 500, resident_size: 32 })
+      ];
 
       // Second timestamp.
       var gmd2 = addGlobalMemoryDump(model, 42);
       var pmd2 = addProcessMemoryDump(gmd2, process, 42);
-      pmd2.vmRegions = [
+      pmd2.vmRegions = VMRegionClassificationNode.fromRegions([
         VMRegion.fromDict({
           mappedFile: '/lib/chrome.so',
           startAddress: 65536,
@@ -180,6 +181,15 @@
           }
         }),
         VMRegion.fromDict({
+          mappedFile: '/usr/share/fonts/DejaVuSansMono.ttf',
+          startAddress: 140121259503616,
+          sizeInBytes: 335872,
+          protectionFlags: VMRegion.PROTECTION_FLAG_READ,
+          byteStats: {
+            proportionalResident: 22528
+          }
+        }),
+        VMRegion.fromDict({
           mappedFile: 'another-map',
           startAddress: 52583094233905872,
           sizeInBytes: 1,
@@ -189,7 +199,7 @@
             swapped: 1
           }
         })
-      ];
+      ]);
     });
 
     return model.processes[1].memoryDumps.map(function(pmd) {
@@ -197,54 +207,67 @@
     });
   }
 
-  function checkColumns(columns, expectedAggregationMode) {
-    var EXPECTED_COLUMN_NAMES = [
-      'Mapped file',
-      'Start address',
-      'Virtual size',
-      'Protection flags',
-      'PSS',
-      'Private dirty',
-      'Swapped'
-    ];
-
-    // First two columns don't change value over time (no aggregation).
-    var VARIABLE_CELLS_START_INDEX = 2;
-
-    // Check column names.
-    assert.lengthOf(columns, EXPECTED_COLUMN_NAMES.length);
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++)
-      assert.equal(columns[i].title, EXPECTED_COLUMN_NAMES[i]);
-
-    // Check aggregation modes.
-    for (var i = 0; i < EXPECTED_COLUMN_NAMES.length; i++) {
-      assert.strictEqual(columns[i].aggregationMode,
-          i < VARIABLE_CELLS_START_INDEX ? undefined : expectedAggregationMode);
-    }
-  }
+  var EXPECTED_COLUMNS = [
+    { title: 'Mapped file', type: TitleColumn, noAggregation: true },
+    { title: 'Start address', type: StringMemoryColumn, noAggregation: true },
+    { title: 'Virtual size', type: NumericMemoryColumn },
+    { title: 'Protection flags', type: StringMemoryColumn },
+    { title: 'PSS', type: NumericMemoryColumn },
+    { title: 'Private dirty', type: NumericMemoryColumn },
+    { title: 'Swapped', type: NumericMemoryColumn }
+  ];
 
   function checkRow(columns, row, expectedTitle, expectedStartAddress,
       expectedVirtualSize, expectedProtectionFlags,
       expectedProportionalResidentValues, expectedPrivateDirtyResidentValues,
-      expectedSwappedValues, expectedSubRowCount, expectedDefinedValues) {
+      expectedSwappedValues, expectedSubRowCount, expectedContexts) {
     assert.equal(columns[0].formatTitle(row), expectedTitle);
-    checkAttributes(row, columns[1], expectedStartAddress, StringAttribute, '');
-    checkSizeAttributes(row, columns[2], expectedVirtualSize);
-    checkAttributes(row, columns[3], expectedProtectionFlags, StringAttribute,
-        '');
-    checkSizeAttributes(row, columns[4], expectedProportionalResidentValues);
-    checkSizeAttributes(row, columns[5], expectedPrivateDirtyResidentValues);
-    checkSizeAttributes(row, columns[6], expectedSwappedValues);
+    checkStringFields(row, columns[1], expectedStartAddress);
+    checkSizeNumericFields(row, columns[2], expectedVirtualSize);
+    checkStringFields(row, columns[3], expectedProtectionFlags);
+    checkSizeNumericFields(row, columns[4], expectedProportionalResidentValues);
+    checkSizeNumericFields(row, columns[5], expectedPrivateDirtyResidentValues);
+    checkSizeNumericFields(row, columns[6], expectedSwappedValues);
 
     if (expectedSubRowCount === undefined)
       assert.isUndefined(row.subRows);
     else
       assert.lengthOf(row.subRows, expectedSubRowCount);
 
-    if (expectedDefinedValues)
-      assert.deepEqual(tr.b.asArray(row.defined), expectedDefinedValues);
+    if (typeof expectedContexts === 'function')
+      expectedContexts(row.contexts);
+    else if (expectedContexts !== undefined)
+      assert.deepEqual(tr.b.asArray(row.contexts), expectedContexts);
     else
-      assert.isUndefined(row.defined);
+      assert.isUndefined(row.contexts);
+  }
+
+  function genericMatcher(callback, defined) {
+    return function(actualValues) {
+      assert.lengthOf(actualValues, defined.length);
+      for (var i = 0; i < defined.length; i++) {
+        var actualValue = actualValues[i];
+        if (defined[i])
+          callback(actualValue);
+        else
+          assert.isUndefined(actualValue);
+      }
+    }
+  }
+
+  function vmRegionsMatcher(expectedMappedFile, expectedStartAddress, defined) {
+    return genericMatcher(function(actualRegion) {
+      assert.instanceOf(actualRegion, VMRegion);
+      assert.strictEqual(actualRegion.mappedFile, expectedMappedFile);
+      assert.strictEqual(actualRegion.startAddress, expectedStartAddress);
+    }, defined);
+  }
+
+  function classificationNodesMatcher(expectedTitle, defined) {
+    return genericMatcher(function(actualNode) {
+      assert.instanceOf(actualNode, VMRegionClassificationNode);
+      assert.strictEqual(actualNode.title, expectedTitle);
+    }, defined);
   }
 
   test('instantiate_empty', function() {
@@ -272,52 +295,60 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, undefined /* no aggregation */);
+    checkColumns(columns, EXPECTED_COLUMNS, undefined /* no aggregation */);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
 
     // Check the rows of the table.
     var totalRow = rows[0];
-    checkRow(columns, totalRow, 'Total', [undefined], [4833935160], [undefined],
-        [8460], [64], [0], 3, undefined);
+    checkRow(columns, totalRow, 'Total', undefined, [4833935160], undefined,
+        [8460], [64], [0], 3, vmRegions);
 
     var androidRow = totalRow.subRows[0];
-    checkRow(columns, androidRow, 'Android', [undefined], [100], [undefined],
-        [100], [0], [0], 1, undefined);
+    checkRow(columns, androidRow, 'Android', undefined, [100], undefined,
+        [100], [0], [0], 1, classificationNodesMatcher('Android', [true]));
 
     var javaRuntimeRow = androidRow.subRows[0];
-    checkRow(columns, javaRuntimeRow, 'Java runtime', [undefined], [100],
-        [undefined], [100], [0], [0], 1, undefined);
+    checkRow(columns, javaRuntimeRow, 'Java runtime', undefined, [100],
+        undefined, [100], [0], [0], 1,
+        classificationNodesMatcher('Java runtime', [true]));
 
     var spacesRow = javaRuntimeRow.subRows[0];
-    checkRow(columns, spacesRow, 'Spaces', [undefined], [100], [undefined],
-        [100], [0], [0], 1, undefined);
+    checkRow(columns, spacesRow, 'Spaces', undefined, [100], undefined, [100],
+        [0], [0], 1, classificationNodesMatcher('Spaces', [true]));
 
     var nativeHeapRow = totalRow.subRows[1];
-    checkRow(columns, nativeHeapRow, 'Native heap', [undefined], [4294966996],
-        [undefined], [168], [64], [0], 4, undefined);
+    checkRow(columns, nativeHeapRow, 'Native heap', undefined, [4294966996],
+        undefined, [168], [64], [0], 4,
+        classificationNodesMatcher('Native heap', [true]));
 
     var discountedTracingOverheadRow = nativeHeapRow.subRows[3];
     checkRow(columns, discountedTracingOverheadRow,
         '[discounted tracing overhead]', undefined, [-500], undefined, [-32],
-        [-32], undefined, undefined, [true]);
+        [-32], undefined, undefined,
+        vmRegionsMatcher('[discounted tracing overhead]', undefined, [true]));
 
     var filesRow = totalRow.subRows[2];
-    checkRow(columns, filesRow, 'Files', [undefined], [538968064], [undefined],
-        [8192], undefined, undefined, 1, undefined);
+    checkRow(columns, filesRow, 'Files', undefined, [538968064], undefined,
+        [8192], undefined, undefined, 1,
+        classificationNodesMatcher('Files', [true]));
 
     var soRow = filesRow.subRows[0];
-    checkRow(columns, soRow, 'so', [undefined], [538968064], [undefined],
-        [8192], undefined, undefined, 2, undefined);
+    checkRow(columns, soRow, 'so', undefined, [538968064], undefined,
+        [8192], undefined, undefined, 2,
+        classificationNodesMatcher('so', [true]));
 
     var mmapChromeRow = soRow.subRows[0];
     checkRow(columns, mmapChromeRow, '/lib/chrome.so', ['0000000000010000'],
-        [536870912], ['r-x'], [8192], undefined, undefined, undefined, [true]);
+        [536870912], ['r-xp'], [8192], undefined, undefined, undefined,
+        vmRegionsMatcher('/lib/chrome.so', 65536, [true]));
 
     var mmapLibX11Row = soRow.subRows[1];
     checkRow(columns, mmapLibX11Row,
         '/usr/lib/x86_64-linux-gnu/libX11.so.6.3.0', ['00007f996fd80000'],
-        [2097152], ['---'], [0], undefined, undefined, undefined, [true]);
+        [2097152], ['---p'], [0], undefined, undefined, undefined,
+        vmRegionsMatcher('/usr/lib/x86_64-linux-gnu/libX11.so.6.3.0',
+            140296983150592, [true]));
   });
 
   test('instantiate_multipleDiff', function() {
@@ -336,68 +367,76 @@
 
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.DIFF);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.DIFF);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
 
     // Check the rows of the table.
     var totalRow = rows[0];
-    checkRow(columns, totalRow, 'Total', [undefined], [4833935160, 5368709421],
-        [undefined, undefined], [8460, 19557], [64, 197], [0, 33], 4,
-        undefined);
+    checkRow(columns, totalRow, 'Total', undefined, [4833935160, 5369045293],
+        undefined, [8460, 42085], [64, 197], [0, 33], 4, vmRegions);
 
     var androidRow = totalRow.subRows[0];
-    checkRow(columns, androidRow, 'Android', [undefined], [100, 100],
-        [undefined, undefined], [100, 0], [0, 100], [0, 0], 1, undefined);
+    checkRow(columns, androidRow, 'Android', undefined, [100, 100], undefined,
+        [100, 0], [0, 100], [0, 0], 1,
+        classificationNodesMatcher('Android', [true, true]));
 
     var javaRuntimeRow = androidRow.subRows[0];
-    checkRow(columns, javaRuntimeRow, 'Java runtime', [undefined], [100, 100],
-        [undefined, undefined], [100, 0], [0, 100], [0, 0], 1, undefined);
+    checkRow(columns, javaRuntimeRow, 'Java runtime', undefined, [100, 100],
+        undefined, [100, 0], [0, 100], [0, 0], 1,
+        classificationNodesMatcher('Java runtime', [true, true]));
 
     var spacesRow = javaRuntimeRow.subRows[0];
-    checkRow(columns, spacesRow, 'Spaces', [undefined], [100, 100],
-        [undefined, undefined], [100, 0], [0, 100], [0, 0], 1, undefined);
+    checkRow(columns, spacesRow, 'Spaces', undefined, [100, 100], undefined,
+        [100, 0], [0, 100], [0, 0], 1,
+        classificationNodesMatcher('Spaces', [true, true]));
 
     var nativeHeapRow = totalRow.subRows[1];
-    checkRow(columns, nativeHeapRow, 'Native heap', [undefined],
-        [4294966996, 4294967496], [undefined, undefined], [168, 100], [64, 96],
-        [0, 32], 4, undefined);
+    checkRow(columns, nativeHeapRow, 'Native heap', undefined,
+        [4294966996, 4294967496], undefined, [168, 100], [64, 96], [0, 32], 4,
+        classificationNodesMatcher('Native heap', [true, true]));
 
     var discountedTracingOverheadRow = nativeHeapRow.subRows[3];
     checkRow(columns, discountedTracingOverheadRow,
         '[discounted tracing overhead]', undefined, [-500, undefined],
         undefined, [-32, undefined], [-32, undefined], undefined, undefined,
-        [true, undefined]);
+        vmRegionsMatcher('[discounted tracing overhead]', undefined,
+            [true, false]));
 
     var filesRow = totalRow.subRows[2];
-    checkRow(columns, filesRow, 'Files', [undefined], [538968064, 1073741824],
-        [undefined, undefined], [8192, 19456], undefined, undefined, 1,
-        undefined);
+    checkRow(columns, filesRow, 'Files', undefined, [538968064, 1074077696],
+        undefined, [8192, 41984], undefined, undefined, 2,
+        classificationNodesMatcher('Files', [true, true]));
 
     var soRow = filesRow.subRows[0];
-    checkRow(columns, soRow, 'so', [undefined], [538968064, 1073741824],
-        [undefined, undefined], [8192, 19456], undefined, undefined, 3,
-        undefined);
+    checkRow(columns, soRow, 'so', undefined, [538968064, 1073741824],
+        undefined, [8192, 19456], undefined, undefined, 3,
+        classificationNodesMatcher('so', [true, true]));
 
     var mmapChromeRow = soRow.subRows[0];
     checkRow(columns, mmapChromeRow, '/lib/chrome.so', ['0000000000010000'],
-        [536870912, 536870912], ['r-x', 'r-x'], [8192, 9216], undefined,
-        undefined, undefined, [true, true]);
+        [536870912, 536870912], ['r-xp', 'r-xp'], [8192, 9216], undefined,
+        undefined, undefined,
+        vmRegionsMatcher('/lib/chrome.so', 65536, [true, true]));
 
     var mmapLibX11Row = soRow.subRows[1];
     checkRow(columns, mmapLibX11Row,
         '/usr/lib/x86_64-linux-gnu/libX11.so.6.3.0', ['00007f996fd80000'],
-        [2097152, undefined], ['---', undefined], [0, undefined], undefined,
-        undefined, undefined, [true, undefined]);
+        [2097152, undefined], ['---p', undefined], [0, undefined], undefined,
+        undefined, undefined,
+        vmRegionsMatcher('/usr/lib/x86_64-linux-gnu/libX11.so.6.3.0',
+            140296983150592, [true, false]));
 
     var otherRow = totalRow.subRows[3];
-    checkRow(columns, otherRow, 'Other', [undefined], [undefined, 1], undefined,
-        [undefined, 1], [undefined, 1], [undefined, 1], 1, undefined);
+    checkRow(columns, otherRow, 'Other', undefined, [undefined, 1], undefined,
+        [undefined, 1], [undefined, 1], [undefined, 1], 1,
+        classificationNodesMatcher('Other', [false, true]));
 
     var anotherMapRow = otherRow.subRows[0];
     checkRow(columns, anotherMapRow, 'another-map', ['00bad00bad00bad0'],
         [undefined, 1], undefined, [undefined, 1], [undefined, 1],
-        [undefined, 1], undefined, [undefined, true]);
+        [undefined, 1], undefined,
+        vmRegionsMatcher('another-map', 52583094233905872, [false, true]));
   });
 
   test('instantiate_multipleMax', function() {
@@ -417,7 +456,7 @@
     // Just check that the aggregation mode was propagated to the columns.
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.MAX);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.MAX);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
   });
@@ -440,7 +479,7 @@
     // Just check that the table has the right shape.
     var table = viewEl.$.table;
     var columns = table.tableColumns;
-    checkColumns(columns, AggregationMode.DIFF);
+    checkColumns(columns, EXPECTED_COLUMNS, AggregationMode.DIFF);
     var rows = table.tableRows;
     assert.lengthOf(rows, 1);
   });
diff --git a/catapult/tracing/tracing/ui/analysis/multi_event_details_table.html b/catapult/tracing/tracing/ui/analysis/multi_event_details_table.html
index 514e8e5..86116e1 100644
--- a/catapult/tracing/tracing/ui/analysis/multi_event_details_table.html
+++ b/catapult/tracing/tracing/ui/analysis/multi_event_details_table.html
@@ -7,11 +7,11 @@
 
 <link rel="import" href="/tracing/base/base.html">
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
 <link rel="import" href="/tracing/ui/analysis/multi_event_summary.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name='tr-ui-a-multi-event-details-table'>
   <template>
@@ -146,7 +146,7 @@
       else
         colWidthPercentage = '33.3333%';
 
-      var timeSpanConfig = {ownerDocument: this.ownerDocument};
+      var ownerDocument = this.ownerDocument;
       var columns = [];
 
       columns.push({
@@ -160,8 +160,10 @@
           linkEl.setSelectionAndContent(function() {
               return new tr.model.EventSet(row.event);
           });
-          linkEl.appendChild(tr.ui.units.createTimeStampSpan(
-              row.start, timeSpanConfig));
+          linkEl.appendChild(tr.v.ui.createScalarSpan(row.start, {
+            unit: tr.v.Unit.byName.timeStampInMs,
+            ownerDocument: ownerDocument
+          }));
           return linkEl;
         },
         width: '350px',
@@ -174,8 +176,10 @@
         columns.push({
           title: 'Wall Duration (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.duration, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.duration, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: ownerDocument
+            });
           },
           width: '<upated further down>',
           cmp: function(rowA, rowB) {
@@ -188,8 +192,10 @@
         columns.push({
           title: 'CPU Duration (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.cpuDuration, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.cpuDuration, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: ownerDocument
+            });
           },
           width: '<upated further down>',
           cmp: function(rowA, rowB) {
@@ -202,8 +208,10 @@
         columns.push({
           title: 'Self time (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.selfTime, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.selfTime, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: ownerDocument
+            });
           },
           width: '<upated further down>',
           cmp: function(rowA, rowB) {
@@ -216,8 +224,10 @@
         columns.push({
           title: 'CPU Self Time (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.cpuSelfTime, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.cpuSelfTime, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: ownerDocument
+            });
           },
           width: '<upated further down>',
           cmp: function(rowA, rowB) {
@@ -257,7 +267,7 @@
         if (hasTotal) {
           colDesc.cmp = function(rowA, rowB) {
             return rowA.args[argKey] - rowB.args[argKey];
-          }
+          };
         }
         columns.push(colDesc);
       });
diff --git a/catapult/tracing/tracing/ui/analysis/multi_event_sub_view_test.html b/catapult/tracing/tracing/ui/analysis/multi_event_sub_view_test.html
index e6f34f3..5c26a86 100644
--- a/catapult/tracing/tracing/ui/analysis/multi_event_sub_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/multi_event_sub_view_test.html
@@ -19,7 +19,6 @@
   var Thread = tr.model.Thread;
   var EventSet = tr.model.EventSet;
   var newSliceEx = tr.c.TestUtils.newSliceEx;
-  var newSliceCategory = tr.c.TestUtils.newSliceCategory;
   var Slice = tr.model.Slice;
 
   test('differentTitles', function() {
diff --git a/catapult/tracing/tracing/ui/analysis/multi_event_summary_table.html b/catapult/tracing/tracing/ui/analysis/multi_event_summary_table.html
index ca48128..06ec9cb 100644
--- a/catapult/tracing/tracing/ui/analysis/multi_event_summary_table.html
+++ b/catapult/tracing/tracing/ui/analysis/multi_event_summary_table.html
@@ -12,7 +12,8 @@
 <link rel="import" href="/tracing/ui/analysis/analysis_link.html">
 <link rel="import" href="/tracing/ui/analysis/multi_event_summary.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 </script>
 <polymer-element name='tr-ui-a-multi-event-summary-table'>
@@ -78,7 +79,8 @@
         columns.push({
           title: 'Wall Duration',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(row.duration, {
+            return tr.v.ui.createScalarSpan(row.duration, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
               total: row.totalsRow ? undefined : maxValues.duration,
               ownerDocument: ownerDocument,
               rightAlign: true
@@ -95,7 +97,8 @@
         columns.push({
           title: 'CPU Duration',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(row.cpuDuration, {
+            return tr.v.ui.createScalarSpan(row.cpuDuration, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
               total: row.totalsRow ? undefined : maxValues.cpuDuration,
               ownerDocument: ownerDocument,
               rightAlign: true
@@ -112,7 +115,8 @@
         columns.push({
           title: 'Self time',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(row.selfTime, {
+            return tr.v.ui.createScalarSpan(row.selfTime, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
               total: row.totalsRow ? undefined : maxValues.selfTime,
               ownerDocument: ownerDocument,
               rightAlign: true
@@ -129,7 +133,8 @@
         columns.push({
           title: 'CPU Self Time',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(row.cpuSelfTime, {
+            return tr.v.ui.createScalarSpan(row.cpuSelfTime, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
               total: row.totalsRow ? undefined : maxValues.cpuSelfTime,
               ownerDocument: ownerDocument,
               rightAlign: true
diff --git a/catapult/tracing/tracing/ui/analysis/multi_interaction_record_sub_view.html b/catapult/tracing/tracing/ui/analysis/multi_interaction_record_sub_view.html
deleted file mode 100644
index f698419..0000000
--- a/catapult/tracing/tracing/ui/analysis/multi_interaction_record_sub_view.html
+++ /dev/null
@@ -1,50 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/analysis/multi_event_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
-
-<polymer-element name="tr-ui-a-multi-interaction-record-sub-view"
-    extends="tr-ui-a-sub-view">
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.currentSelection_ = undefined;
-    },
-
-    set selection(selection) {
-      this.currentSelection_ = selection;
-      this.textContent = '';
-      var realView = document.createElement('tr-ui-a-multi-event-sub-view');
-
-      this.appendChild(realView);
-      realView.setSelectionWithoutErrorChecks(selection);
-
-      this.currentSelection_ = selection;
-    },
-
-    get selection() {
-      return this.currentSelection_;
-    },
-
-    get relatedEventsToHighlight() {
-      if (!this.currentSelection_)
-        return undefined;
-      var selection = new tr.model.EventSet();
-      this.currentSelection_.forEach(function(ir) {
-        ir.associatedEvents.forEach(function(event) {
-          selection.push(event);
-        });
-      });
-      return selection;
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/analysis/multi_object_sub_view.html b/catapult/tracing/tracing/ui/analysis/multi_object_sub_view.html
index 4e72e68..8241ab1 100644
--- a/catapult/tracing/tracing/ui/analysis/multi_object_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/multi_object_sub_view.html
@@ -6,11 +6,12 @@
 -->
 
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_link.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-multi-object-sub-view"
     extends="tr-ui-a-sub-view">
@@ -44,17 +45,20 @@
       var objectEvents = tr.b.asArray(selection).sort(
           tr.b.Range.compareByMinTimes);
 
-      var timeSpanConfig = {ownerDocument: this.ownerDocument};
+      var timeSpanConfig = {
+        unit: tr.v.Unit.byName.timeStampInMs,
+        ownerDocument: this.ownerDocument
+      };
       var table = this.$.content;
       table.tableColumns = [
         {
           title: 'First',
           value: function(event) {
             if (event instanceof tr.model.ObjectSnapshot)
-              return tr.ui.units.createTimeStampSpan(event.ts, timeSpanConfig);
+              return tr.v.ui.createScalarSpan(event.ts, timeSpanConfig);
 
             var spanEl = document.createElement('span');
-            spanEl.appendChild(tr.ui.units.createTimeStampSpan(
+            spanEl.appendChild(tr.v.ui.createScalarSpan(
                 event.creationTs, timeSpanConfig));
             spanEl.appendChild(tr.ui.b.createSpan({
                 textContent: '-',
@@ -62,7 +66,7 @@
                 marginRight: '4px'
             }));
             if (event.deletionTs != Number.MAX_VALUE) {
-              spanEl.appendChild(tr.ui.units.createTimeStampSpan(
+              spanEl.appendChild(tr.v.ui.createScalarSpan(
                   event.deletionTs, timeSpanConfig));
             }
             return spanEl;
diff --git a/catapult/tracing/tracing/ui/analysis/multi_sample_sub_view.html b/catapult/tracing/tracing/ui/analysis/multi_sample_sub_view.html
index 0bf02f8..791ef9f 100644
--- a/catapult/tracing/tracing/ui/analysis/multi_sample_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/multi_sample_sub_view.html
@@ -5,11 +5,11 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/units.html">
+<link rel="import" href="/tracing/base/multi_dimensional_view.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/stack_frame_tree.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-multi-sample-sub-view"
     extends="tr-ui-a-sub-view">
@@ -53,11 +53,23 @@
       ready: function() {
         var viewSelector = tr.ui.b.createSelector(
             this, 'viewOption', 'tracing.ui.analysis.multi_sample_sub_view',
-            'TOPDOWNVIEW',
-            [{label: 'Tree (Top Down)', value: 'TOPDOWNVIEW'},
-             {label: 'Heavy (Bottom Up)', value: 'BOTTOMUPVIEW'}]
-        );
+            tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW,
+            [
+              {
+                label: 'Top-down (Tree)',
+                value: tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW
+              },
+              {
+                label: 'Top-down (Heavy)',
+                value: tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW
+              },
+              {
+                label: 'Bottom-up (Heavy)',
+                value: tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW
+              }
+            ]);
         this.$.control.appendChild(viewSelector);
+        this.$.table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
       },
 
       get selection() {
@@ -79,23 +91,15 @@
       },
 
       createSamplingSummary_: function(selection, viewOption) {
-        var root = new tr.ui.analysis.StackFrameTreeNode(
-            '(root)', undefined /* frame */);
+        var builder = new tr.b.MultiDimensionalViewBuilder(1 /* dimensions */);
         var samples = selection.getEventsOrganizedByBaseType().sample;
 
         samples.forEach(function(sample) {
-          root.addStackTrace(sample.stackTrace, 1);
+          builder.addPath([sample.getUserFriendlyStackTrace().reverse()],
+              1, tr.b.MultiDimensionalViewBuilder.ValueKind.SELF);
         });
 
-        switch (viewOption) {
-          case 'TOPDOWNVIEW':
-            return root;
-          case 'BOTTOMUPVIEW':
-            return root.convertToBottomUpView();
-          default:
-            throw new Error(
-                'Unknown sampling summary view option: \'' + viewOption + '\'');
-        }
+        return builder.buildView(viewOption);
       },
 
       updateContents_: function() {
@@ -115,10 +119,10 @@
           this.createSamplesColumn_('Self'),
           {
             title: 'Symbol',
-            value: function(row) { return row.title; },
+            value: function(row) { return row.title[0]; },
             width: '250px',
             cmp: function(a, b) {
-              return a.title.localeCompare(b.title);
+              return a.title[0].localeCompare(b.title[0]);
             },
             showExpandButtons: true
           }
@@ -138,10 +142,10 @@
           value: function(row) {
             var percent = row[field] / samplingDataTotal;
 
-            var span = document.createElement('tr-ui-u-scalar-span');
+            var span = document.createElement('tr-v-ui-scalar-span');
             span.value = (percent * 100).toFixed(2);
             span.percentage = percent;
-            span.unit = tr.b.u.Units.unitlessNumber;
+            span.unit = tr.v.Unit.byName.unitlessNumber;
             return span;
 
           }.bind(this),
diff --git a/catapult/tracing/tracing/ui/analysis/multi_user_expectation_sub_view.html b/catapult/tracing/tracing/ui/analysis/multi_user_expectation_sub_view.html
new file mode 100644
index 0000000..b2f02eb
--- /dev/null
+++ b/catapult/tracing/tracing/ui/analysis/multi_user_expectation_sub_view.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/model/event_set.html">
+<link rel="import" href="/tracing/ui/analysis/multi_event_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
+
+<polymer-element name="tr-ui-a-multi-user-expectation-sub-view"
+    extends="tr-ui-a-sub-view">
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.currentSelection_ = undefined;
+    },
+
+    set selection(selection) {
+      this.currentSelection_ = selection;
+      this.textContent = '';
+      var realView = document.createElement('tr-ui-a-multi-event-sub-view');
+
+      this.appendChild(realView);
+      realView.setSelectionWithoutErrorChecks(selection);
+
+      this.currentSelection_ = selection;
+    },
+
+    get selection() {
+      return this.currentSelection_;
+    },
+
+    get relatedEventsToHighlight() {
+      if (!this.currentSelection_)
+        return undefined;
+      var selection = new tr.model.EventSet();
+      this.currentSelection_.forEach(function(ir) {
+        ir.associatedEvents.forEach(function(event) {
+          selection.push(event);
+        });
+      });
+      return selection;
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/analysis/power_sample_summary_table.html b/catapult/tracing/tracing/ui/analysis/power_sample_summary_table.html
index 99c4ec8..5c0eb10 100644
--- a/catapult/tracing/tracing/ui/analysis/power_sample_summary_table.html
+++ b/catapult/tracing/tracing/ui/analysis/power_sample_summary_table.html
@@ -5,10 +5,9 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/base/units/units.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-power-sample-summary-table">
   <template>
@@ -24,21 +23,21 @@
           title: 'Min power',
           width: '100px',
           value: function(row) {
-            return tr.b.u.Units.powerInWatts.format(row.min / 1000.0);
+            return tr.v.Unit.byName.powerInWatts.format(row.min / 1000.0);
           }
         },
         {
           title: 'Max power',
           width: '100px',
           value: function(row) {
-            return tr.b.u.Units.powerInWatts.format(row.max / 1000.0);
+            return tr.v.Unit.byName.powerInWatts.format(row.max / 1000.0);
           }
         },
         {
           title: 'Time-weighted average',
           width: '100px',
           value: function(row) {
-            return tr.b.u.Units.powerInWatts.format(
+            return tr.v.Unit.byName.powerInWatts.format(
                 row.timeWeightedAverage / 1000.0);
           }
         },
@@ -46,7 +45,7 @@
           title: 'Energy consumed',
           width: '100px',
           value: function(row) {
-            return tr.b.u.Units.energyInJoules.format(row.energyConsumed);
+            return tr.v.Unit.byName.energyInJoules.format(row.energyConsumed);
           }
         },
         {
@@ -109,9 +108,14 @@
       if (energyConsumed === 'N/A')
         return 'N/A';
 
-      // Energy consumed is returned in Joules, so we can convert it to power by
-      // dividing by time and milliWatts by multiplying by 1000.
-      return this.getEnergyConsumed() / this.samples.bounds.duration * 1000;
+      // Multiply by 1000 to convert Joules to milliJoules.
+      var energyInMillijoules = this.getEnergyConsumed() * 1000;
+
+      // Divide by 1000 to convert milliseconds to seconds.
+      var durationInSeconds = this.samples.bounds.duration / 1000;
+
+      // Convert energy to power in milliwatts by dividing by time in seconds.
+      return energyInMillijoules / durationInSeconds;
     },
 
     getEnergyConsumed: function() {
diff --git a/catapult/tracing/tracing/ui/analysis/power_sample_summary_table_test.html b/catapult/tracing/tracing/ui/analysis/power_sample_summary_table_test.html
index 3dbd814..4ee6749 100644
--- a/catapult/tracing/tracing/ui/analysis/power_sample_summary_table_test.html
+++ b/catapult/tracing/tracing/ui/analysis/power_sample_summary_table_test.html
@@ -76,7 +76,7 @@
     assert.equal(table.$.table.tableRows[0].min, 1);
     assert.equal(table.$.table.tableRows[0].max, 2);
     assert.equal(table.$.table.tableRows[0].timeWeightedAverage, 1);
-    assert.equal(table.$.table.tableRows[0].energyConsumed, 1);
+    assert.equal(table.$.table.tableRows[0].energyConsumed, 0.001);
     assert.equal(table.$.table.tableRows[0].sampleCount, 2);
   });
 
@@ -94,7 +94,7 @@
     assert.equal(table.$.table.tableRows[0].min, 1);
     assert.equal(table.$.table.tableRows[0].max, 3);
     assert.equal(table.$.table.tableRows[0].timeWeightedAverage, 1.5);
-    assert.equal(table.$.table.tableRows[0].energyConsumed, 3);
+    assert.equal(table.$.table.tableRows[0].energyConsumed, 0.003);
     assert.equal(table.$.table.tableRows[0].sampleCount, 3);
   });
 
@@ -115,19 +115,19 @@
 
     assert.equal(columns[0].title, 'Min power');
     assert.equal(columns[0].width, '100px');
-    assert.equal(columns[0].value(row), '1.000 mW');
+    assert.equal(columns[0].value(row), '0.001 W');
 
     assert.equal(columns[1].title, 'Max power');
     assert.equal(columns[1].width, '100px');
-    assert.equal(columns[1].value(row), '3.000 mW');
+    assert.equal(columns[1].value(row), '0.003 W');
 
     assert.equal(columns[2].title, 'Time-weighted average');
     assert.equal(columns[2].width, '100px');
-    assert.equal(columns[2].value(row), '1.500 mW');
+    assert.equal(columns[2].value(row), '0.002 W');
 
     assert.equal(columns[3].title, 'Energy consumed');
     assert.equal(columns[3].width, '100px');
-    assert.equal(columns[3].value(row), '3.000 J');
+    assert.equal(columns[3].value(row), '0.003 J');
 
     assert.equal(columns[4].title, 'Sample count');
     assert.equal(columns[4].width, '100%');
diff --git a/catapult/tracing/tracing/ui/analysis/power_sample_table.html b/catapult/tracing/tracing/ui/analysis/power_sample_table.html
index 5b991c4..b06e9ec 100644
--- a/catapult/tracing/tracing/ui/analysis/power_sample_table.html
+++ b/catapult/tracing/tracing/ui/analysis/power_sample_table.html
@@ -7,7 +7,8 @@
 
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-power-sample-table">
   <template>
@@ -31,13 +32,19 @@
         title: 'Time',
         width: '100px',
         value: function(row) {
-          return tr.ui.units.createTimeStampSpan(row.start);
+          return tr.v.ui.createScalarSpan(row.start, {
+            unit: tr.v.Unit.byName.timeStampInMs
+          });
         }
       },
       {
-        title: 'Power (mW)',
+        title: 'Power',
         width: '100%',
-        value: function(row) { return row.power; }
+        value: function(row) {
+          return tr.v.ui.createScalarSpan(row.power / 1000, {
+            unit: tr.v.Unit.byName.powerInWatts
+          });
+        }
       }
     ];
     this.samples = new EventSet();
diff --git a/catapult/tracing/tracing/ui/analysis/power_sample_table_test.html b/catapult/tracing/tracing/ui/analysis/power_sample_table_test.html
index f8bb665..a9f6ab6 100644
--- a/catapult/tracing/tracing/ui/analysis/power_sample_table_test.html
+++ b/catapult/tracing/tracing/ui/analysis/power_sample_table_test.html
@@ -9,6 +9,7 @@
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/power_series.html">
 <link rel="import" href="/tracing/ui/analysis/power_sample_table.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -80,12 +81,16 @@
     assert.equal(columns[0].title, 'Time');
     assert.equal(columns[0].width, '100px');
     var timestampSpan = columns[0].value(series.samples[0]);
-    assert.equal(timestampSpan.tagName, 'TR-UI-U-TIME-STAMP-SPAN');
-    assert.equal(timestampSpan.timestamp, 0);
+    assert.equal(timestampSpan.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.equal(timestampSpan.value, 0);
+    assert.equal(timestampSpan.unit, tr.v.Unit.byName.timeStampInMs);
 
-    assert.equal(columns[1].title, 'Power (mW)');
+    assert.equal(columns[1].title, 'Power');
     assert.equal(columns[1].width, '100%');
-    assert.equal(columns[1].value(series.samples[0]), 1000);
+    var powerSpan = columns[1].value(series.samples[0]);
+    assert.equal(powerSpan.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.equal(powerSpan.value, 1);
+    assert.equal(powerSpan.unit, tr.v.Unit.byName.powerInWatts);
   });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/selection_summary_table.html b/catapult/tracing/tracing/ui/analysis/selection_summary_table.html
index 02e010d..cc3c00e 100644
--- a/catapult/tracing/tracing/ui/analysis/selection_summary_table.html
+++ b/catapult/tracing/tracing/ui/analysis/selection_summary_table.html
@@ -7,8 +7,8 @@
 
 <link rel="import" href="/tracing/base/base.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name='tr-ui-a-selection-summary-table'>
   <template>
@@ -69,17 +69,21 @@
       else
         hasRange = false;
 
-      var timeSpanConfig = {ownerDocument: this.ownerDocument};
-
       rows.push({
         title: 'Selection start',
-        value: hasRange ? tr.ui.units.createTimeStampSpan(
-            selection.bounds.min, timeSpanConfig) : '<empty>'
+        value: hasRange ? tr.v.ui.createScalarSpan(
+            selection.bounds.min, {
+              unit: tr.v.Unit.byName.timeStampInMs,
+              ownerDocument: this.ownerDocument
+            }) : '<empty>'
       });
       rows.push({
         title: 'Selection extent',
-        value: hasRange ? tr.ui.units.createTimeDurationSpan(
-            selection.bounds.range, timeSpanConfig) : '<empty>'
+        value: hasRange ? tr.v.ui.createScalarSpan(
+            selection.bounds.range, {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: this.ownerDocument
+            }) : '<empty>'
       });
 
       this.$.table.tableRows = rows;
diff --git a/catapult/tracing/tracing/ui/analysis/selection_summary_table_test.html b/catapult/tracing/tracing/ui/analysis/selection_summary_table_test.html
index fbcc2cb..5c1f169 100644
--- a/catapult/tracing/tracing/ui/analysis/selection_summary_table_test.html
+++ b/catapult/tracing/tracing/ui/analysis/selection_summary_table_test.html
@@ -10,6 +10,7 @@
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/ui/analysis/selection_summary_table.html">
 <link rel="import" href="/tracing/ui/base/deep_utils.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -63,8 +64,12 @@
 
     var tableEl = tr.b.findDeepElementMatching(
         summaryTable, 'tr-ui-b-table');
-    assert.equal(tableEl.tableRows[0].value.timestamp, 0);
-    assert.equal(tableEl.tableRows[1].value.duration, 3);
+    assert.equal(tableEl.tableRows[0].value.value, 0);
+    assert.strictEqual(tableEl.tableRows[0].value.unit,
+        tr.v.Unit.byName.timeStampInMs);
+    assert.equal(tableEl.tableRows[1].value.value, 3);
+    assert.strictEqual(tableEl.tableRows[1].value.unit,
+        tr.v.Unit.byName.timeDurationInMs);
   });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/analysis/single_cpu_slice_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_cpu_slice_sub_view.html
index a7508cc..ef1262c 100644
--- a/catapult/tracing/tracing/ui/analysis/single_cpu_slice_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/single_cpu_slice_sub_view.html
@@ -7,10 +7,10 @@
 
 <link rel="import" href="/tracing/base/utils.html">
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_link.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-single-cpu-slice-sub-view"
     extends="tr-ui-a-sub-view">
@@ -55,15 +55,15 @@
       <tr>
         <td>Start:</td>
         <td>
-          <tr-ui-u-time-stamp-span id="start">
-          </tr-ui-u-time-stamp-span>
+          <tr-v-ui-scalar-span id="start">
+          </tr-v-ui-scalar-span>
         </td>
       </tr>
       <tr>
         <td>Duration:</td>
         <td>
-          <tr-ui-u-time-duration-span id="duration">
-          </tr-ui-u-time-duration-span>
+          <tr-v-ui-scalar-span id="duration">
+          </tr-v-ui-scalar-span>
         </td>
       </tr>
       <tr>
@@ -113,8 +113,10 @@
         shadowRoot.querySelector('#thread-name').textContent = cpuSlice.title;
       }
 
-      shadowRoot.querySelector('#start').timestamp = cpuSlice.start;
-      shadowRoot.querySelector('#duration').duration = cpuSlice.duration;
+      shadowRoot.querySelector('#start').setValueAndUnit(
+          cpuSlice.start, tr.v.Unit.byName.timeStampInMs);
+      shadowRoot.querySelector('#duration').setValueAndUnit(
+          cpuSlice.duration, tr.v.Unit.byName.timeDurationInMs);
 
       var runningThreadEl = shadowRoot.querySelector('#running-thread');
 
diff --git a/catapult/tracing/tracing/ui/analysis/single_event_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_event_sub_view.html
index 3a697b9..6219854 100644
--- a/catapult/tracing/tracing/ui/analysis/single_event_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/single_event_sub_view.html
@@ -10,10 +10,10 @@
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
 <link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
 <link rel="import" href="/tracing/ui/analysis/stack_frame.html">
-<link rel="import" href="/tracing/ui/base/ui.html">
 <link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/ui/base/ui.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-single-event-sub-view"
     extends="tr-ui-a-sub-view">
@@ -88,35 +88,45 @@
       if (event.name)
         rows.push({ name: 'Name', value: event.name });
 
-      var startEl = document.createElement('tr-ui-u-time-stamp-span');
-      startEl.timestamp = event.start;
-      rows.push({ name: 'Start', value: startEl });
+      rows.push({
+        name: 'Start',
+        value: tr.v.ui.createScalarSpan(event.start, {
+          unit: tr.v.Unit.byName.timeStampInMs
+        })
+      });
 
       if (event.duration) {
-        var wallDurationEl = document.createElement(
-            'tr-ui-u-time-duration-span');
-        wallDurationEl.duration = event.duration;
-        rows.push({ name: 'Wall Duration', value: wallDurationEl });
+        rows.push({
+          name: 'Wall Duration',
+          value: tr.v.ui.createScalarSpan(event.duration, {
+            unit: tr.v.Unit.byName.timeDurationInMs
+          })
+        });
       }
 
       if (event.cpuDuration) {
-        var cpuDurationEl = document.createElement(
-            'tr-ui-u-time-duration-span');
-        cpuDurationEl.duration = event.cpuDuration;
-        rows.push({ name: 'CPU Duration', value: cpuDurationEl });
+        rows.push({
+          name: 'CPU Duration',
+          value: tr.v.ui.createScalarSpan(event.cpuDuration, {
+            unit: tr.v.Unit.byName.timeDurationInMs
+          })
+        });
       }
 
       if (event.subSlices !== undefined && event.subSlices.length !== 0) {
         if (event.selfTime) {
-          var selfTimeEl = document.createElement('tr-ui-u-time-duration-span');
-          selfTimeEl.duration = event.selfTime;
-          rows.push({ name: 'Self Time', value: selfTimeEl });
+          rows.push({
+            name: 'Self Time',
+            value: tr.v.ui.createScalarSpan(event.selfTime, {
+              unit: tr.v.Unit.byName.timeDurationInMs
+            })
+          });
         }
 
         if (event.cpuSelfTime) {
-          var cpuSelfTimeEl = document.createElement(
-              'tr-ui-u-time-duration-span');
-          cpuSelfTimeEl.duration = event.cpuSelfTime;
+          var cpuSelfTimeEl = tr.v.ui.createScalarSpan(event.cpuSelfTime, {
+            unit: tr.v.Unit.byName.timeDurationInMs
+          });
           if (event.cpuSelfTime > event.selfTime) {
             cpuSelfTimeEl.warning =
                 ' Note that CPU Self Time is larger than Self Time. ' +
@@ -124,16 +134,17 @@
                 'due to several subslices, rounding issues, and imprecise ' +
                 'time at which we get cpu- and real-time.';
           }
-          rows.push({name: 'CPU Self Time',
-                     value: cpuSelfTimeEl});
+          rows.push({ name: 'CPU Self Time', value: cpuSelfTimeEl });
         }
       }
 
       if (event.durationInUserTime) {
-        var durationInUserTimeEl = document.createElement(
-            'tr-ui-u-time-duration-span');
-        durationInUserTimeEl.duration = event.durationInUserTime;
-        rows.push({ name: 'Duration (U)', value: durationInUserTimeEl });
+        rows.push({
+          name: 'Duration (U)',
+          value: tr.v.ui.createScalarSpan(event.durationInUserTime, {
+            unit: tr.v.Unit.byName.timeDurationInMs
+          })
+        });
       }
 
       function createStackFrameEl(sf) {
@@ -244,6 +255,10 @@
       else
         this.addArgsToRows_(rows, event.args);
 
+      var event = new tr.b.Event('customize-rows');
+      event.rows = rows;
+      this.dispatchEvent(event);
+
       this.$.table.tableRows = rows;
       this.$.table.rebuild();
     }
diff --git a/catapult/tracing/tracing/ui/analysis/single_event_sub_view_test.html b/catapult/tracing/tracing/ui/analysis/single_event_sub_view_test.html
index 77b078a..b52ebaa 100644
--- a/catapult/tracing/tracing/ui/analysis/single_event_sub_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/single_event_sub_view_test.html
@@ -191,8 +191,12 @@
         subView, 'tr-ui-b-table');
     assert.equal(table.tableRows.length, 3);
     assert.equal(table.tableRows[0].value, 'b');
-    assert.equal(table.tableRows[1].value.timestamp, 0);
-    assert.equal(table.tableRows[2].value.duration, 0.002);
+    assert.equal(table.tableRows[1].value.value, 0);
+    assert.strictEqual(table.tableRows[1].value.unit,
+        tr.v.Unit.byName.timeStampInMs);
+    assert.equal(table.tableRows[2].value.value, 0.002);
+    assert.strictEqual(table.tableRows[2].value.unit,
+        tr.v.Unit.byName.timeDurationInMs);
   });
 
   test('analyzeSelectionWithSingleSliceCategory', function() {
@@ -207,8 +211,12 @@
     assert.equal(table.tableRows.length, 4);
     assert.equal(table.tableRows[0].value, 'b');
     assert.equal(table.tableRows[1].value, 'foo');
-    assert.equal(table.tableRows[2].value.timestamp, 0);
-    assert.equal(table.tableRows[3].value.duration, 0.002);
+    assert.equal(table.tableRows[2].value.value, 0);
+    assert.strictEqual(table.tableRows[2].value.unit,
+        tr.v.Unit.byName.timeStampInMs);
+    assert.equal(table.tableRows[3].value.value, 0.002);
+    assert.strictEqual(table.tableRows[3].value.unit,
+        tr.v.Unit.byName.timeDurationInMs);
   });
 
   test('instantiate_withSingleSliceContainingIDRef', function() {
diff --git a/catapult/tracing/tracing/ui/analysis/single_flow_event_sub_view_test.html b/catapult/tracing/tracing/ui/analysis/single_flow_event_sub_view_test.html
index d9203d3..bbd348a 100644
--- a/catapult/tracing/tracing/ui/analysis/single_flow_event_sub_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/single_flow_event_sub_view_test.html
@@ -16,19 +16,19 @@
 tr.b.unittest.testSuite(function() {
   var Model = tr.Model;
   var EventSet = tr.model.EventSet;
-  var test_utils = tr.c.TestUtils;
+  var TestUtils = tr.c.TestUtils;
 
   test('analyzeSelectionWithSingleEvent', function() {
-    var model = test_utils.newModel(function(model) {
+    var model = TestUtils.newModel(function(model) {
       model.p1 = model.getOrCreateProcess(1);
       model.t2 = model.p1.getOrCreateThread(model.p1);
-      model.sA = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+      model.sA = model.t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
         title: 'a', start: 0, end: 2
       }));
-      model.sB = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
+      model.sB = model.t2.sliceGroup.pushSlice(TestUtils.newSliceEx({
         title: 'b', start: 9, end: 11
       }));
-      model.fe = test_utils.newFlowEventEx({
+      model.fe = TestUtils.newFlowEventEx({
         cat: 'cat',
         id: 1234,
         title: 'MyFlow',
diff --git a/catapult/tracing/tracing/ui/analysis/single_interaction_record_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_interaction_record_sub_view.html
deleted file mode 100644
index 519731d..0000000
--- a/catapult/tracing/tracing/ui/analysis/single_interaction_record_sub_view.html
+++ /dev/null
@@ -1,42 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
-<link rel="import" href="/tracing/ui/analysis/single_event_sub_view.html">
-
-<polymer-element name="tr-ui-a-single-interaction-record-sub-view"
-    extends="tr-ui-a-sub-view">
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.currentSelection_ = undefined;
-    },
-
-    get selection() {
-      return this.currentSelection_;
-    },
-
-    set selection(selection) {
-      this.textContent = '';
-      var realView = document.createElement('tr-ui-a-single-event-sub-view');
-
-      this.appendChild(realView);
-      realView.setSelectionWithoutErrorChecks(selection);
-
-      this.currentSelection_ = selection;
-    },
-
-    get relatedEventsToHighlight() {
-      if (!this.currentSelection_)
-        return undefined;
-      return this.currentSelection_[0].associatedEvents;
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/analysis/single_object_snapshot_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_object_snapshot_sub_view.html
index 2049ec6..b61cbe3 100644
--- a/catapult/tracing/tracing/ui/analysis/single_object_snapshot_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/single_object_snapshot_sub_view.html
@@ -12,7 +12,8 @@
 <link rel="import" href="/tracing/ui/analysis/object_instance_view.html">
 <link rel="import" href="/tracing/ui/analysis/object_snapshot_view.html">
 <link rel="import" href="/tracing/ui/analysis/single_event_sub_view.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-single-object-snapshot-sub-view"
     extends="tr-ui-a-sub-view">
@@ -101,8 +102,10 @@
 
       titleEl.appendChild(document.createTextNode(' @ '));
 
-      titleEl.appendChild(tr.ui.units.createTimeStampSpan(
-          snapshot.ts, {ownerDocument: this.ownerDocument}));
+      titleEl.appendChild(tr.v.ui.createScalarSpan(snapshot.ts, {
+        unit: tr.v.Unit.byName.timeStampInMs,
+        ownerDocument: this.ownerDocument
+      }));
 
       var tableEl = document.createElement('table');
       this.appendChild(tableEl);
diff --git a/catapult/tracing/tracing/ui/analysis/single_sample_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_sample_sub_view.html
index 1a6e3ad..29f7b2e 100644
--- a/catapult/tracing/tracing/ui/analysis/single_sample_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/single_sample_sub_view.html
@@ -6,9 +6,10 @@
 -->
 
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
-<link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/analysis/stack_frame.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-single-sample-sub-view"
     extends="tr-ui-a-sub-view">
@@ -68,8 +69,10 @@
 
       rows.push({
           title: 'Sample time',
-          value: tr.ui.units.createTimeStampSpan(
-              sample.start, {ownerDocument: this.ownerDocument})
+          value: tr.v.ui.createScalarSpan(sample.start, {
+            unit: tr.v.Unit.byName.timeStampInMs,
+            ownerDocument: this.ownerDocument
+          })
       });
 
       var sfEl = document.createElement('tr-ui-a-stack-frame');
diff --git a/catapult/tracing/tracing/ui/analysis/single_sample_sub_view_test.html b/catapult/tracing/tracing/ui/analysis/single_sample_sub_view_test.html
index 91c6270..d31abfa 100644
--- a/catapult/tracing/tracing/ui/analysis/single_sample_sub_view_test.html
+++ b/catapult/tracing/tracing/ui/analysis/single_sample_sub_view_test.html
@@ -10,6 +10,7 @@
 <link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/ui/analysis/single_sample_sub_view.html">
 <link rel="import" href="/tracing/ui/base/deep_utils.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <script>
 'use strict';
@@ -51,7 +52,8 @@
     assert.equal(rows.length, 3);
 
     assert.equal(rows[0].value, 'X');
-    assert.equal(rows[1].value.timestamp, 0.184);
+    assert.equal(rows[1].value.value, 0.184);
+    assert.strictEqual(rows[1].value.unit, tr.v.Unit.byName.timeStampInMs);
     assert.equal(rows[2].value.stackFrame, t53.samples[0].leafStackFrame);
   });
 });
diff --git a/catapult/tracing/tracing/ui/analysis/single_thread_time_slice_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_thread_time_slice_sub_view.html
index 383b305..c705bc8 100644
--- a/catapult/tracing/tracing/ui/analysis/single_thread_time_slice_sub_view.html
+++ b/catapult/tracing/tracing/ui/analysis/single_thread_time_slice_sub_view.html
@@ -13,8 +13,8 @@
 <link rel="import" href="/tracing/ui/analysis/analysis_link.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
 <link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-a-single-thread-time-slice-sub-view"
     extends="tr-ui-a-sub-view">
@@ -63,15 +63,15 @@
       <tr>
         <td>Start:</td>
         <td>
-          <tr-ui-u-time-stamp-span id="start">
-          </tr-ui-u-time-stamp-span>
+          <tr-v-ui-scalar-span id="start">
+          </tr-v-ui-scalar-span>
         </td>
       </tr>
       <tr>
         <td>Duration:</td>
         <td>
-          <tr-ui-u-time-duration-span id="duration">
-          </tr-ui-u-time-duration-span>
+          <tr-v-ui-scalar-span id="duration">
+          </tr-v-ui-scalar-span>
         </td>
       </tr>
 
@@ -122,8 +122,11 @@
       shadowRoot.querySelector('#thread-name').textContent =
           thread.userFriendlyName;
 
-      shadowRoot.querySelector('#start').timestamp = timeSlice.start;
-      shadowRoot.querySelector('#duration').duration = timeSlice.duration;
+      shadowRoot.querySelector('#start').setValueAndUnit(
+          timeSlice.start, tr.v.Unit.byName.timeStampInMs);
+      shadowRoot.querySelector('#duration').setValueAndUnit(
+          timeSlice.duration, tr.v.Unit.byName.timeDurationInMs);
+
       var onCpuEl = shadowRoot.querySelector('#on-cpu');
       onCpuEl.textContent = '';
       var runningInsteadEl = shadowRoot.querySelector('#running-instead');
diff --git a/catapult/tracing/tracing/ui/analysis/single_user_expectation_sub_view.html b/catapult/tracing/tracing/ui/analysis/single_user_expectation_sub_view.html
new file mode 100644
index 0000000..621df98
--- /dev/null
+++ b/catapult/tracing/tracing/ui/analysis/single_user_expectation_sub_view.html
@@ -0,0 +1,97 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/metrics/value_list.html">
+<link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
+<link rel="import" href="/tracing/ui/analysis/single_event_sub_view.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<polymer-element name="tr-ui-a-single-user-expectation-sub-view"
+    extends="tr-ui-a-sub-view">
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.currentSelection_ = undefined;
+      this.realView_ = undefined;
+    },
+
+    get selection() {
+      return this.currentSelection_;
+    },
+
+    set selection(selection) {
+      this.textContent = '';
+      this.realView_ = document.createElement('tr-ui-a-single-event-sub-view');
+      this.realView_.addEventListener('customize-rows',
+          this.onCustomizeRows_.bind(this));
+
+      this.appendChild(this.realView_);
+      this.currentSelection_ = selection;
+      this.realView_.setSelectionWithoutErrorChecks(selection);
+    },
+
+    get relatedEventsToHighlight() {
+      if (!this.currentSelection_)
+        return undefined;
+      return this.currentSelection_[0].associatedEvents;
+    },
+
+    onCustomizeRows_: function(event) {
+      var ue = this.selection[0];
+
+      var valueList = new tr.metrics.ValueList();
+
+      function runMetric(metricInfo) {
+        try {
+          metricInfo.constructor(valueList, ue.parentModel);
+        } catch (failure) {
+          console.error(metricInfo, failure);
+        }
+      }
+
+      tr.metrics.MetricRegistry.getAllRegisteredTypeInfos().forEach(runMetric);
+
+      // Metrics may have been computed more than once, so avoid displaying them
+      // more than once by collecting them in a dictionary.
+      // https://github.com/catapult-project/catapult/issues/2154
+      var metricValues = {};
+
+      valueList.valueDicts.forEach(function(value) {
+        if (value.grouping_keys.userExpectationStableId !== ue.stableId)
+          return;
+
+        if ((value.type !== 'numeric') ||
+            (value.numeric.type !== 'scalar'))
+          return;
+
+        metricValues[value.grouping_keys.name] = value.numeric;
+      });
+
+      for (var name in metricValues) {
+        event.rows.push({
+          name: name,
+          value: tr.v.ui.createScalarSpan(metricValues[name].value, {
+            unit: tr.v.Unit.fromJSON(metricValues[name].unit)
+          })
+        });
+      }
+
+      if (ue.rawCpuMs) {
+        event.rows.push({
+          name: 'Total CPU',
+          value: tr.v.ui.createScalarSpan(ue.totalCpuMs, {
+            unit: tr.v.Unit.byName.timeDurationInMs
+          })
+        });
+      }
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/analysis/stack_frame_tree.html b/catapult/tracing/tracing/ui/analysis/stack_frame_tree.html
deleted file mode 100644
index bbb57f6..0000000
--- a/catapult/tracing/tracing/ui/analysis/stack_frame_tree.html
+++ /dev/null
@@ -1,260 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.analysis', function() {
-
-  /**
-   * Z-function: Given a list (or a string) of length n, for each index i from
-   * 1 to n - 1, find the length z[i] of the longest substring starting at
-   * position i which is also a prefix of the list. This function returns the
-   * list of maximum lengths z.
-   *
-   * Mathematically, for each i from 1 to n - 1, z[i] is the maximum value such
-   * that [list[0], ..., list[i - 1]] = [list[i], ..., list[i + z[i] - 1]].
-   * z[0] is defined to be zero for convenience.
-   *
-   * Example:
-   *
-   *   Input (list): ['A', 'B', 'A', 'C', 'A', 'B', 'A']
-   *   Output (z):   [ 0 ,  0 ,  1 ,  0 ,  3 ,  0 ,  1 ]
-   *
-   * Unlike the brute-force approach (which is O(n^2) in the worst case), the
-   * complexity of this implementation is linear in the size of the list, i.e.
-   * O(n).
-   *
-   * Source: http://e-maxx-eng.github.io/string/z-function.html
-   */
-  function zFunction(list) {
-    var n = list.length;
-    if (n === 0)
-      return [];
-
-    var z = new Array(n);
-    z[0] = 0;
-
-    for (var i = 1, left = 0, right = 0; i < n; ++i) {
-      var maxLength;
-      if (i <= right)
-        maxLength = Math.min(right - i + 1, z[i - left]);
-      else
-        maxLength = 0;
-
-      while (i + maxLength < n && list[maxLength] === list[i + maxLength])
-        ++maxLength;
-
-      if (i + maxLength - 1 > right) {
-        left = i;
-        right = i + maxLength - 1;
-      }
-
-      z[i] = maxLength;
-    }
-
-    return z;
-  }
-
-  /**
-   * Node of a stack frame hierarchy representation for tree (top down) and
-   * heavy (bottom up) views.
-   *
-   * To build a top-down view of a set of stack traces, you simply create a
-   * root node and than add the stack traces to it:
-   *
-   *   var topDownRoot = new StackFrameTreeNode('root', undefined);
-   *   topDownRoot.addStackTrace(trace1, size1);
-   *   topDownRoot.addStackTrace(trace2, size2);
-   *   ...
-   *   topDownRoot.addStackTrace(traceN, sizeN);
-   *
-   * The corresponding bottom-up view is constructed indirectly by converting
-   * the top-down view:
-   *
-   *   var bottomUpRoot = topDownRoot.convertToBottomUpView();
-   *
-   * @{constructor}
-   */
-  function StackFrameTreeNode(title, opt_frame) {
-    this.title = title;
-    this.frame = opt_frame;
-
-    this.parent = undefined;
-    this.children = [];
-    this.childMap = new Map();
-
-    this.total = 0;
-    this.self = 0;
-  }
-
-  StackFrameTreeNode.prototype = {
-    /** Duck type <tr-ui-b-table> rows. */
-    get subRows() {
-      return this.children;
-    },
-
-    /**
-     * Returns the list of titles of this node and all its ancestors (including
-     * the root). The title of this node is first in the list.
-     *
-     * Note that this method does not use
-     * tr.model.StackFrame.prototype.getUserFriendlyStackTrace because some
-     * nodes don't have frames (e.g. the root).
-     */
-    get stackTraceTitles() {
-      var titles = [];
-      for (var currentNode = this; currentNode !== undefined;
-        currentNode = currentNode.parent) {
-        titles.push(currentNode.title);
-      }
-      return titles;
-    },
-
-    getOrCreateChild: function(title, opt_frame) {
-      var childNode = this.childMap.get(title);
-      if (childNode !== undefined)
-        return childNode;
-
-      childNode = new StackFrameTreeNode(title, opt_frame);
-      childNode.parent = this;
-      this.children.push(childNode);
-      this.childMap.set(title, childNode);
-
-      return childNode;
-    },
-
-    /**
-     * Add a stack trace to the stack frame tree. The first element of the
-     * trace is expected to be the leaf stack frame (so that
-     * tr.model.StackFrame.prototype.stackTrace could be used as the parameter
-     * without any modification).
-     *
-     * For example, the following code snippet:
-     *
-     *   var frameA = new StackFrame(undefined, tr.b.GUID.allocate(), 'A');
-     *   var frameB = new StackFrame(frameA, tr.b.GUID.allocate(), 'B');
-     *   var frameC = new StackFrame(frameB, tr.b.GUID.allocate(), 'C');
-     *   root.addStackTrace(frameC.stackTrace, 42)
-     *
-     * will add the path root -> A -> B -> C to the tree (if necessary), add 42
-     * to the total value of all the nodes on the path (root, A, B, C), and add
-     * 42 to the self value of the leaf node (C).
-     *
-     * Important: No stack traces should be added to a bottom-up view (once it
-     * has been converted). Doing so will not update the structure and values
-     * of the view correctly!
-     */
-    addStackTrace: function(trace, value, opt_traceContainsRootFrame) {
-      var currentNode = this;
-      var startIndex = trace.length - (opt_traceContainsRootFrame ? 2 : 1);
-      for (var i = startIndex; i >= 0; i--) {
-        currentNode.total += value;
-        var stackFrame = trace[i];
-        currentNode =
-            currentNode.getOrCreateChild(stackFrame.title, stackFrame);
-      }
-      currentNode.total += value;
-      currentNode.self += value;
-    },
-
-    /**
-     * Converts this stack frame tree from top-down view to the corresponding
-     * bottom-up view. This method returns the root of the tree representing
-     * the bottom-up view.
-     *
-     * Note that there is no connection between the two representations after
-     * the conversion (modifying the structure and/or values of one of them
-     * will not affect the other one).
-     */
-    convertToBottomUpView: function() {
-      var bottomUpViewRoot = new StackFrameTreeNode(this.title, this.frame);
-      bottomUpViewRoot.total = this.total;
-      bottomUpViewRoot.self = this.self;
-
-      this.addChildrenToBottomUpViewRecursively_(bottomUpViewRoot);
-
-      return bottomUpViewRoot;
-    },
-
-    addChildrenToBottomUpViewRecursively_: function(bottomUpViewRoot) {
-      this.children.forEach(function(child) {
-        child.addToBottomUpViewRecursively_(bottomUpViewRoot);
-      });
-    },
-
-    /**
-     * Add this node and all its children to the provided bottom-up view.
-     *
-     * This code was inspired by
-     * third_party/WebKit/Source/devtools/front_end/profiler/CPUProfileBottomUpDataGrid.js
-     * in the Chromium tree.
-     */
-    addToBottomUpViewRecursively_: function(bottomUpViewRoot) {
-      // Determine the length of the suffix of the trace associated with this
-      // node whose total should *not* be added to the corresponding bottom-up
-      // view node. This is to avoid double-counting recursive calls. Note that
-      // this does not affect self size.
-      //
-      // For example, if this node corresponds to the leaf stack frame (B) of
-      // root -> A -> B -> C -> A -> B, then the length of the suffix will be
-      // 2. This means that the total size of this node will only be added to
-      // nodes marked with * in the resulting bottom-up representation
-      // root -> B -> A -> C* -> B* -> A*. The reason for this is that the
-      // total would already have been included when the root -> A -> B prefix
-      // of the trace was added to the bottom-up view (when the great
-      // grandparent (A) of this node was visited by this recursive method).
-      var remainingRecursiveSuffixLength =
-          this.calculateRecursiveSuffixLength_();
-
-      // Construct the bottom-up view counterpart of this top-down view node.
-      //
-      // For example, if this node corresponds to the leaf stack frame (C) of
-      // the stack trace root -> A -> B -> C, the bottom-up view will be
-      // updated with root -> C -> B -> A.
-      var bottomUpParentNode = bottomUpViewRoot;
-      for (var topDownNode = this;
-           topDownNode.parent !== undefined /* don't include the root node */;
-           topDownNode = topDownNode.parent) {
-        var bottomUpChildNode = bottomUpParentNode.getOrCreateChild(
-            topDownNode.title, topDownNode.frame);
-        bottomUpChildNode.self += this.self;
-        if (remainingRecursiveSuffixLength > 0)
-          remainingRecursiveSuffixLength--;
-        else
-          bottomUpChildNode.total += this.total;
-        bottomUpParentNode = bottomUpChildNode;
-      }
-
-      this.addChildrenToBottomUpViewRecursively_(bottomUpViewRoot);
-    },
-
-    /**
-     * Determine the length of the longest suffix of the stack trace associated
-     * with this node which is repeated in the trace.
-     *
-     * For example, if this node corresponds to the leaf stack frame (C) of the
-     * stack trace root -> A -> B -> C -> A -> B -> B -> C, then this method
-     * will return 2 because the suffix B -> C is repeated in the trace.
-     */
-    calculateRecursiveSuffixLength_: function() {
-      var maxLengths = zFunction(this.stackTraceTitles);
-      var recursiveSuffixLength = 0;
-      for (var i = 0; i < maxLengths.length; i++)
-        recursiveSuffixLength = Math.max(recursiveSuffixLength, maxLengths[i]);
-      return recursiveSuffixLength;
-    }
-  };
-
-  return {
-    StackFrameTreeNode: StackFrameTreeNode,
-    zFunction: zFunction  // Exported for testing.
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/analysis/stack_frame_tree_test.html b/catapult/tracing/tracing/ui/analysis/stack_frame_tree_test.html
deleted file mode 100644
index 484d380..0000000
--- a/catapult/tracing/tracing/ui/analysis/stack_frame_tree_test.html
+++ /dev/null
@@ -1,728 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/model/stack_frame.html">
-<link rel="import" href="/tracing/ui/analysis/stack_frame_tree.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var StackFrame = tr.model.StackFrame;
-  var StackFrameTreeNode = tr.ui.analysis.StackFrameTreeNode;
-  var zFunction = tr.ui.analysis.zFunction;
-
-  function checkNodeValues(node, expectedTitle, expectedTotal, expectedSelf) {
-    assert.strictEqual(node.title, expectedTitle);
-    assert.strictEqual(node.total, expectedTotal);
-    assert.strictEqual(node.self, expectedSelf);
-  }
-
-  function checkNode(node, expectedTitle, expectedFrame, expectedParent,
-      expectedChildren, expectedTotal, expectedSelf, expectedStackTraceTitles) {
-    checkNodeValues(node, expectedTitle, expectedTotal, expectedSelf);
-
-    assert.strictEqual(node.frame, expectedFrame);
-
-    assert.strictEqual(node.parent, expectedParent);
-    if (expectedParent !== undefined)
-      assert.strictEqual(expectedParent.getOrCreateChild(expectedTitle), node);
-    assert.deepEqual(node.children, expectedChildren);
-
-    // Check that node.childMap has the same values as node.children and that
-    // the title -> child mapping is set up correctly.
-    assert.sameMembers(node.children, tr.b.mapValues(node.childMap));
-    tr.b.iterMapItems(node.childMap, function(title, child) {
-      assert.strictEqual(child.title, title);
-    });
-
-    assert.deepEqual(node.stackTraceTitles, expectedStackTraceTitles);
-  }
-
-  function trace(/* frameTitleRoot, ..., frameTitleLeaf */) {
-    var trace = new Array(arguments.length);
-    Array.prototype.forEach.call(arguments, function(frameTitle, index) {
-      trace[index] = new StackFrame(
-          trace[index - 1], tr.b.GUID.allocate(), frameTitle);
-    });
-    return trace.reverse();
-  }
-
-  function checkTree(node, expectedStructure, opt_expectedParent) {
-    checkNodeValues(node, expectedStructure.title, expectedStructure.total,
-        expectedStructure.self);
-    var expectedChildStructures = expectedStructure.children || [];
-    assert.lengthOf(node.children, expectedChildStructures.length);
-    for (var i = 0; i < expectedChildStructures.length; i++)
-      checkTree(node.children[i], expectedChildStructures[i], node);
-  }
-
-  test('nodes', function() {
-    var root = new StackFrameTreeNode('root', undefined);
-
-    var frame1 = new StackFrame(undefined, tr.b.GUID.allocate(), 'frame1');
-    var child1 = root.getOrCreateChild('child1', frame1);
-
-    var frame2 = new StackFrame(undefined, tr.b.GUID.allocate(), 'frame2');
-    var child2 = root.getOrCreateChild('child2', frame2);
-
-    var frame3 = new StackFrame(frame2, tr.b.GUID.allocate(), 'frame3');
-    var grandchild = child2.getOrCreateChild('grandchild', frame3);
-
-    checkNode(root, 'root', undefined, undefined, [child1, child2], 0, 0,
-        ['root']);
-    checkNode(child1, 'child1', frame1, root, [], 0, 0, ['child1', 'root']);
-    checkNode(child2, 'child2', frame2, root, [grandchild], 0, 0,
-        ['child2', 'root']);
-    checkNode(grandchild, 'grandchild', frame3, child2, [], 0, 0,
-        ['grandchild', 'child2', 'root']);
-  });
-
-  test('views_empty', function() {
-    var topDownRoot = new StackFrameTreeNode('root', undefined);
-    var bottomUpRoot = topDownRoot.convertToBottomUpView();
-
-    checkTree(topDownRoot, {
-      title: 'root',
-      total: 0,
-      self: 0
-    });
-
-    checkTree(bottomUpRoot, {
-      title: 'root',
-      total: 0,
-      self: 0
-    });
-  });
-
-  test('views_rootOnly', function() {
-    var topDownRoot = new StackFrameTreeNode('root', undefined);
-    topDownRoot.addStackTrace(trace(), 39);
-    topDownRoot.addStackTrace(trace(), 3);
-    var bottomUpRoot = topDownRoot.convertToBottomUpView();
-
-    checkTree(topDownRoot, {
-      title: 'root',
-      total: 42,
-      self: 42
-    });
-
-    checkTree(bottomUpRoot, {
-      title: 'root',
-      total: 42,
-      self: 42
-    });
-  });
-
-  test('views_noRecursion', function() {
-    var topDownRoot = new StackFrameTreeNode('root', undefined);
-    topDownRoot.addStackTrace(trace('A', 'B', 'C'), 10);
-    topDownRoot.addStackTrace(trace('A', 'B'), 20);
-    topDownRoot.addStackTrace(trace('B', 'D'), 30);
-    topDownRoot.addStackTrace(trace('A', 'B', 'D'), 40);
-    topDownRoot.addStackTrace(trace('A', 'C'), 50);
-    topDownRoot.addStackTrace(trace(), 60);
-    var bottomUpRoot = topDownRoot.convertToBottomUpView();
-
-    checkTree(topDownRoot, {
-      title: 'root',
-      total: 210,
-      self: 60,
-      children: [
-        {
-          title: 'A',
-          total: 120,
-          self: 0,
-          children: [
-            {
-              title: 'B',
-              total: 70,
-              self: 20,
-              children: [
-                {
-                  title: 'C',
-                  total: 10,
-                  self: 10
-                },
-                {
-                  title: 'D',
-                  total: 40,
-                  self: 40
-                }
-              ]
-            },
-            {
-              title: 'C',
-              total: 50,
-              self: 50
-            }
-          ]
-        },
-        {
-          title: 'B',
-          total: 30,
-          self: 0,
-          children: [
-            {
-              title: 'D',
-              total: 30,
-              self: 30
-            }
-          ]
-        }
-      ]
-    });
-
-    checkTree(bottomUpRoot, {
-      title: 'root',
-      total: 210,
-      self: 60,
-      children: [
-        {
-          title: 'A',
-          total: 120,
-          self: 0
-        },
-        {
-          title: 'B',
-          total: 100,
-          self: 20,
-          children: [
-            {
-              title: 'A',
-              total: 70,
-              self: 20
-            }
-          ]
-        },
-        {
-          title: 'C',
-          total: 60,
-          self: 60,
-          children: [
-            {
-              title: 'B',
-              total: 10,
-              self: 10,
-              children: [
-                {
-                  title: 'A',
-                  total: 10,
-                  self: 10
-                }
-              ]
-            },
-            {
-              title: 'A',
-              total: 50,
-              self: 50
-            }
-          ]
-        },
-        {
-          title: 'D',
-          total: 70,
-          self: 70,
-          children: [
-            {
-              title: 'B',
-              total: 70,
-              self: 70,
-              children: [
-                {
-                  title: 'A',
-                  total: 40,
-                  self: 40
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    });
-  });
-
-  test('views_simpleRecursion', function() {
-    var topDownRoot = new StackFrameTreeNode('root', undefined);
-    topDownRoot.addStackTrace(trace('A'), 10);
-    topDownRoot.addStackTrace(trace('A', 'A', 'A'), 20);
-    topDownRoot.addStackTrace(trace('A', 'A'), 30);
-    topDownRoot.addStackTrace(trace('A', 'A', 'A', 'A'), 40);
-    var bottomUpRoot = topDownRoot.convertToBottomUpView();
-
-    checkTree(topDownRoot, {
-      title: 'root',
-      total: 100,
-      self: 0,
-      children: [
-        {
-          title: 'A',
-          total: 100,
-          self: 10,
-          children: [
-            {
-              title: 'A',
-              total: 90,
-              self: 30,
-              children: [
-                {
-                  title: 'A',
-                  total: 60,
-                  self: 20,
-                  children: [
-                    {
-                      title: 'A',
-                      total: 40,
-                      self: 40
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    });
-
-    checkTree(bottomUpRoot, {
-      title: 'root',
-      total: 100,
-      self: 0,
-      children: [
-        {
-          title: 'A',
-          total: 100,
-          self: 100,
-          children: [
-            {
-              title: 'A',
-              total: 90,
-              self: 90,
-              children: [
-                {
-                  title: 'A',
-                  total: 60,
-                  self: 60,
-                  children: [
-                    {
-                      title: 'A',
-                      total: 40,
-                      self: 40
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    });
-  });
-
-  test('views_complexRecursion', function() {
-    var topDownRoot = new StackFrameTreeNode('root', undefined);
-    topDownRoot.addStackTrace(trace('A', 'B', 'C'), 10);
-    topDownRoot.addStackTrace(trace('A', 'D', 'B', 'C', 'A', 'B', 'C'), 20);
-    topDownRoot.addStackTrace(trace('A', 'D', 'B', 'C', 'A', 'B', 'D'), 30);
-    topDownRoot.addStackTrace(trace('C', 'B', 'C'), 40);
-    topDownRoot.addStackTrace(trace('C', 'B', 'C', 'B', 'C'), 50);
-    var bottomUpRoot = topDownRoot.convertToBottomUpView();
-
-    checkTree(topDownRoot, {
-      title: 'root',
-      total: 150,
-      self: 0,
-      children: [
-        {
-          title: 'A',
-          total: 60,
-          self: 0,
-          children: [
-            {
-              title: 'B',
-              total: 10,
-              self: 0,
-              children: [
-                {
-                  title: 'C',
-                  total: 10,
-                  self: 10
-                }
-              ]
-            },
-            {
-              title: 'D',
-              total: 50,
-              self: 0,
-              children: [
-                {
-                  title: 'B',
-                  total: 50,
-                  self: 0,
-                  children: [
-                    {
-                      title: 'C',
-                      total: 50,
-                      self: 0,
-                      children: [
-                        {
-                          title: 'A',
-                          total: 50,
-                          self: 0,
-                          children: [
-                            {
-                              title: 'B',
-                              total: 50,
-                              self: 0,
-                              children: [
-                                {
-                                  title: 'C',
-                                  total: 20,
-                                  self: 20
-                                },
-                                {
-                                  title: 'D',
-                                  total: 30,
-                                  self: 30
-                                }
-                              ]
-                            }
-                          ]
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        },
-        {
-          title: 'C',
-          total: 90,
-          self: 0,
-          children: [
-            {
-              title: 'B',
-              total: 90,
-              self: 0,
-              children: [
-                {
-                  title: 'C',
-                  total: 90,
-                  self: 40,
-                  children: [
-                    {
-                      title: 'B',
-                      total: 50,
-                      self: 0,
-                      children: [
-                        {
-                          title: 'C',
-                          total: 50,
-                          self: 50
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    });
-
-    checkTree(bottomUpRoot, {
-      title: 'root',
-      total: 150,
-      self: 0,
-      children: [
-        {
-          title: 'A',
-          total: 60,
-          self: 0,
-          children: [
-            {
-              title: 'C',
-              total: 50,
-              self: 0,
-              children: [
-                {
-                  title: 'B',
-                  total: 50,
-                  self: 0,
-                  children: [
-                    {
-                      title: 'D',
-                      total: 50,
-                      self: 0,
-                      children: [
-                        {
-                          title: 'A',
-                          total: 50,
-                          self: 0
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        },
-        {
-          title: 'B',
-          total: 150,
-          self: 0,
-          children: [
-            {
-              title: 'A',
-              total: 60,
-              self: 0,
-              children: [
-                {
-                  title: 'C',
-                  total: 50,
-                  self: 0,
-                  children: [
-                    {
-                      title: 'B',
-                      total: 50,
-                      self: 0,
-                      children: [
-                        {
-                          title: 'D',
-                          total: 50,
-                          self: 0,
-                          children: [
-                            {
-                              title: 'A',
-                              total: 50,
-                              self: 0
-                            }
-                          ]
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            },
-            {
-              title: 'D',
-              total: 50,
-              self: 0,
-              children: [
-                {
-                  title: 'A',
-                  total: 50,
-                  self: 0
-                }
-              ]
-            },
-            {
-              title: 'C',
-              total: 90,
-              self: 0,
-              children: [
-                {
-                  title: 'B',
-                  total: 50,
-                  self: 0,
-                  children: [
-                    {
-                      title: 'C',
-                      total: 50,
-                      self: 0
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        },
-        {
-          title: 'C',
-          total: 150,
-          self: 120,
-          children: [
-            {
-              title: 'B',
-              total: 150,
-              self: 120,
-              children: [
-                {
-                  title: 'A',
-                  total: 30,
-                  self: 30,
-                  children: [
-                    {
-                      title: 'C',
-                      total: 20,
-                      self: 20,
-                      children: [
-                        {
-                          title: 'B',
-                          total: 20,
-                          self: 20,
-                          children: [
-                            {
-                              title: 'D',
-                              total: 20,
-                              self: 20,
-                              children: [
-                                {
-                                  title: 'A',
-                                  total: 20,
-                                  self: 20
-                                }
-                              ]
-                            }
-                          ]
-                        }
-                      ]
-                    }
-                  ]
-                },
-                {
-                  title: 'D',
-                  total: 50,
-                  self: 0,
-                  children: [
-                    {
-                      title: 'A',
-                      total: 50,
-                      self: 0
-                    }
-                  ]
-                },
-                {
-                  title: 'C',
-                  total: 90,
-                  self: 90,
-                  children: [
-                    {
-                      title: 'B',
-                      total: 50,
-                      self: 50,
-                      children: [
-                        {
-                          title: 'C',
-                          total: 50,
-                          self: 50
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        },
-        {
-          title: 'D',
-          total: 50,
-          self: 30,
-          children: [
-            {
-              title: 'A',
-              total: 50,
-              self: 0
-            },
-            {
-              title: 'B',
-              total: 30,
-              self: 30,
-              children: [
-                {
-                  title: 'A',
-                  total: 30,
-                  self: 30,
-                  children: [
-                    {
-                      title: 'C',
-                      total: 30,
-                      self: 30,
-                      children: [
-                        {
-                          title: 'B',
-                          total: 30,
-                          self: 30,
-                          children: [
-                            {
-                              title: 'D',
-                              total: 30,
-                              self: 30,
-                              children: [
-                                {
-                                  title: 'A',
-                                  total: 30,
-                                  self: 30
-                                }
-                              ]
-                            }
-                          ]
-                        }
-                      ]
-                    }
-                  ]
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    });
-  });
-
-  test('zFunction', function() {
-    // Empty list/string.
-    assert.deepEqual(zFunction([]), []);
-    assert.deepEqual(zFunction(''), []);
-
-    // Singleton list/string.
-    assert.deepEqual(zFunction([1]), [0]);
-    assert.deepEqual(zFunction('T'), [0]);
-
-    // No duplicate elements.
-    assert.deepEqual(zFunction([1, 2, 3, 4, 5]), [0, 0, 0, 0, 0]);
-    assert.deepEqual(zFunction('ABCDEF'), [0, 0, 0, 0, 0, 0]);
-
-    // No substring is a suffix.
-    assert.deepEqual(zFunction([1, 2, 3, 2]), [0, 0, 0, 0]);
-    assert.deepEqual(zFunction('ABBB'), [0, 0, 0, 0]);
-
-    // Pure repetition.
-    assert.deepEqual(zFunction([1, 1, 1, 1, 1]), [0, 4, 3, 2, 1]);
-    assert.deepEqual(zFunction('AAAAA'), [0, 4, 3, 2, 1]);
-
-    // Interleaved repetition.
-    assert.deepEqual(zFunction([1, 2, 1, 3, 1, 2, 1]), [0, 0, 1, 0, 3, 0, 1]);
-    assert.deepEqual(zFunction('AAABAAB'), [0, 2, 1, 0, 2, 1, 0]);
-
-    // Complex patterns.
-    assert.deepEqual(
-        zFunction([7, 9, 7, 9, 7, 9, 7, 9]), [0, 0, 6, 0, 4, 0, 2, 0]);
-    assert.deepEqual(
-        zFunction('CCGTCCCGTACC'), [0, 1, 0, 0, 2, 4, 1, 0, 0, 0, 2, 1]);
-  });
-
-  test('__proto__TitledNode', function() {
-    var root = new StackFrameTreeNode('root', undefined);
-
-    var frame1 = new StackFrame(undefined, tr.b.GUID.allocate(), 'frame1');
-    var child1 = root.getOrCreateChild('__proto__', frame1);
-
-    checkNode(root, 'root', undefined, undefined, [child1], 0, 0,
-        ['root']);
-    checkNode(child1, '__proto__', frame1, root, [], 0, 0,
-        ['__proto__', 'root']);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/analysis/tab_view_test.html b/catapult/tracing/tracing/ui/analysis/tab_view_test.html
deleted file mode 100644
index 5ca45d6..0000000
--- a/catapult/tracing/tracing/ui/analysis/tab_view_test.html
+++ /dev/null
@@ -1,323 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/analysis/tab_view.html">
-
-<template id="tab-view-test-template">
-  <tr-ui-a-tab-view>
-    <p tab-label="Existing Label"> Tab with label already set </p>
-    <p> Tab Content with no label </p>
-    <p selected="selected" tab-label="Should be selected">
-      Already selected tab
-    </p>
-    <p selected="selected" tab-label="Should not be selected">
-      Second already selected tab
-    </p>
-  </tr-ui-a-tab-view>
-</template>
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var THIS_DOC = document._currentScript.ownerDocument;
-
-  test('instantiate', function() {
-
-    var TAB_TEXT = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.' +
-        ' Cras eleifend elit nec erat tristique pellentesque. Cras placerat ' +
-        'lectus, sed semper tortor ornare quis. Maecenas vitae hendrerit. ' +
-        'Cras mattis interdum nisi, eget egestas dui iaculis ultricies. Proi' +
-        'n magna at nibh fringilla tincidunt id vitae ante. Fusce nec urna n' +
-        'on porttitor tincidunt. Pellentesque habitant morbi tristique senec' +
-        'tus netus et malesuada fames ac turpis egestas. Suspendisse sed vel' +
-        'it mollis ornare sit amet vel augue. Nullam rhoncus in tellus id. ' +
-        'Vestibulum ante ipsum primis in faucibus orci luctus et ultrices ' +
-        'cubilia Curae; Nunc at velit consectetur ipsum tempus tempus. Nunc ' +
-        'mattis sapien, a placerat erat. Vivamus ac enim ultricies, gravida ' +
-        'nulla ut, scelerisque magna. Sed a volutpat enim. Morbi vulputate, ' +
-        'sed egestas mollis, urna nisl varius sem, sed venenatis turpis null' +
-        'a ipsum. Suspendisse potenti.';
-
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '500px';
-    tabViewContainer.style.height = '200px';
-
-    var tabView = new TracingAnalysisTabView();
-
-    var firstTab = document.createElement('div');
-    firstTab.setAttribute('tab-label', 'First Tab Label');
-    firstTab.innerHTML = '<p>' + TAB_TEXT + '<p>';
-
-    var secondTab = document.createElement('div');
-    secondTab.setAttribute('tab-label', 'Second Tab Label');
-    secondTab.innerHTML = '<b>' + 'Second Tab Text' + '</b>';
-
-    var thirdTab = document.createElement('div');
-    thirdTab.setAttribute('tab-label', 'Third Tab Label');
-    thirdTab.innerHTML = '<b>' + 'Third Tab Text' + '</b>';
-
-    tabView.appendChild(firstTab);
-    tabView.appendChild(secondTab);
-    tabView.appendChild(thirdTab);
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-
-    thirdTab.setAttribute('tab-label', 'Something Different');
-
-    var button = document.createElement('button');
-    button.textContent = 'Change label';
-
-    button.addEventListener('click', function() {
-      thirdTab.setAttribute('tab-label', 'Label Changed');
-    });
-
-    tabView.selectedTab = secondTab;
-    this.addHTMLOutput(button);
-  });
-
-
-  test('instantiateWithTabHeading', function() {
-    var TAB_TEXT = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.' +
-        ' Cras eleifend elit nec erat tristique pellentesque. Cras placerat ' +
-        'lectus, sed semper tortor ornare quis. Maecenas vitae hendrerit. ' +
-        'Cras mattis interdum nisi, eget egestas dui iaculis ultricies. Proi' +
-        'n magna at nibh fringilla tincidunt id vitae ante. Fusce nec urna n' +
-        'on porttitor tincidunt. Pellentesque habitant morbi tristique senec' +
-        'tus netus et malesuada fames ac turpis egestas. Suspendisse sed vel' +
-        'it mollis ornare sit amet vel augue. Nullam rhoncus in tellus id. ' +
-        'Vestibulum ante ipsum primis in faucibus orci luctus et ultrices ' +
-        'cubilia Curae; Nunc at velit consectetur ipsum tempus tempus. Nunc ' +
-        'mattis sapien, a placerat erat. Vivamus ac enim ultricies, gravida ' +
-        'nulla ut, scelerisque magna. Sed a volutpat enim. Morbi vulputate, ' +
-        'sed egestas mollis, urna nisl varius sem, sed venenatis turpis null' +
-        'a ipsum. Suspendisse potenti.';
-
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '500px';
-    tabViewContainer.style.height = '200px';
-
-    var tabView = new TracingAnalysisTabView();
-    tabView.tabStripHeadingText = 'Hello world:';
-
-    var firstTab = document.createElement('div');
-    firstTab.setAttribute('tab-label', 'First Tab Label');
-    firstTab.innerHTML = '<p>' + TAB_TEXT + '<p>';
-
-    var secondTab = document.createElement('div');
-    secondTab.setAttribute('tab-label', 'Second Tab Label');
-    secondTab.innerHTML = '<b>' + 'Second Tab Text' + '</b>';
-
-    var thirdTab = document.createElement('div');
-    thirdTab.setAttribute('tab-label', 'Third Tab Label');
-    thirdTab.innerHTML = '<b>' + 'Third Tab Text' + '</b>';
-
-    tabView.appendChild(firstTab);
-    tabView.appendChild(secondTab);
-    tabView.appendChild(thirdTab);
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-    tabView.selectedTab = secondTab;
-  });
-
-  test('instantiateChildrenAlreadyInside', function() {
-    var tabViewTemplate = THIS_DOC.querySelector('#tab-view-test-template');
-    var tabView = tabViewTemplate.createInstance();
-
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '400px';
-    tabViewContainer.style.height = '200px';
-
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-
-  });
-
-  test('programaticallySetSelectedTab', function() {
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '500px';
-    tabViewContainer.style.height = '200px';
-
-    var tabView = new TracingAnalysisTabView();
-
-    var t1 = document.createElement('div');
-    var t2 = document.createElement('div');
-    var t3 = document.createElement('div');
-
-    tabView.appendChild(t1);
-    tabView.appendChild(t2);
-    tabView.appendChild(t3);
-
-    assert.isUndefined(tabView.selectedTab);
-    tabView.selectedTab = t1;
-
-    assert.isTrue(t1.hasAttribute('selected'));
-    assert.isFalse(t2.hasAttribute('selected'));
-    assert.isFalse(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t1, tabView.selectedTab));
-
-    tabView.selectedTab = t2;
-    assert.isFalse(t1.hasAttribute('selected'));
-    assert.isTrue(t2.hasAttribute('selected'));
-    assert.isFalse(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t2, tabView.selectedTab));
-
-    tabView.selectedTab = t3;
-    assert.isFalse(t1.hasAttribute('selected'));
-    assert.isFalse(t2.hasAttribute('selected'));
-    assert.isTrue(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t3, tabView.selectedTab));
-
-    t1.selected = true;
-    assert.isTrue(t1.hasAttribute('selected'));
-    assert.isFalse(t2.hasAttribute('selected'));
-    assert.isFalse(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t1, tabView.selectedTab));
-
-    // Make sure just randomly setting a tab as not selected does not
-    // break the existing selection.
-    t2.selected = false;
-    t3.selected = false;
-    assert.isTrue(t1.hasAttribute('selected'));
-    assert.isFalse(t2.hasAttribute('selected'));
-    assert.isFalse(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t1, tabView.selectedTab));
-
-    t3.selected = true;
-    assert.isFalse(t1.hasAttribute('selected'));
-    assert.isFalse(t2.hasAttribute('selected'));
-    assert.isTrue(t3.hasAttribute('selected'));
-    assert.isTrue(Object.is(t3, tabView.selectedTab));
-
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-  });
-
-  /**
-   * This test checks that if an element has a selected property already set,
-   * before being attached to the tabView, it still gets selected if the
-   * property is true, after it gets attached.
-   */
-  test('instantiateSetSelectedTabAlreadySet', function() {
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '500px';
-    tabViewContainer.style.height = '200px';
-
-    var tabView = new TracingAnalysisTabView();
-
-    var t1 = document.createElement('div');
-    t1.textContent = 'This text should BE visible.';
-    var t2 = document.createElement('div');
-    t2.textContent = 'This text should NOT be visible.';
-    var t3 = document.createElement('div');
-    t3.textContent = 'This text should NOT be visible, also.';
-
-    t1.selected = true;
-    t2.selected = false;
-    t3.selected = false;
-
-    tabView.appendChild(t1);
-    tabView.appendChild(t2);
-    tabView.appendChild(t3);
-
-    t1.setAttribute('tab-label', 'This should be selected');
-    t2.setAttribute('tab-label', 'Not selected');
-    t3.setAttribute('tab-label', 'Not selected');
-
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-  });
-
-  test('selectingInvalidTabWorks', function() {
-    var tabView = new TracingAnalysisTabView();
-    var t1 = document.createElement('div');
-    var t2 = document.createElement('div');
-    var t3 = document.createElement('div');
-    var invalidChild = document.createElement('div');
-
-    tabView.appendChild(t1);
-    tabView.appendChild(t2);
-    tabView.appendChild(t3);
-
-    tabView.selectedTab = t1;
-
-    assert.equal(tabView.selectedTab, t1);
-
-    // Make sure that selecting an invalid tab does not break the current
-    // selection.
-    tabView.selectedTab = invalidChild;
-    assert.equal(t1, tabView.selectedTab);
-
-    // Also make sure the invalidChild does not influence the tab view when
-    // it has a selected property set.
-    invalidChild.selected = true;
-    tabView.selectedTab = invalidChild;
-    assert.equal(t1, tabView.selectedTab);
-  });
-
-  test('changeTabCausesEvent', function() {
-    var tabView = new TracingAnalysisTabView();
-    var t1 = document.createElement('div');
-    var t2 = document.createElement('div');
-    var invalidChild = document.createElement('div');
-
-    tabView.appendChild(t1);
-    tabView.appendChild(t2);
-
-    var numChangeEvents = 0;
-    tabView.addEventListener('selected-tab-change', function() {
-        numChangeEvents++;
-    });
-    tabView.selectedTab = t1;
-    assert.equal(numChangeEvents, 1);
-    tabView.selectedTab = t1;
-    assert.equal(numChangeEvents, 1);
-    tabView.selectedTab = t2;
-    assert.equal(numChangeEvents, 2);
-    tabView.selectedTab = undefined;
-    assert.equal(numChangeEvents, 3);
-  });
-
-  /**
-   * This test makes sure that removing the selected tab does not select
-   * any other tab.
-   */
-  test('instantiateRemovingSelectedTab', function() {
-    var tabViewContainer = document.createElement('div');
-    tabViewContainer.style.width = '500px';
-    tabViewContainer.style.height = '200px';
-
-    var tabView = new TracingAnalysisTabView();
-
-    var t1 = document.createElement('div');
-    t1.textContent = 'This text should BE visible.';
-    var t2 = document.createElement('div');
-    t2.textContent = 'This text should NOT be visible.';
-    var t3 = document.createElement('div');
-    t3.textContent = 'This text should NOT be visible, also.';
-
-    tabView.appendChild(t1);
-    tabView.appendChild(t2);
-    tabView.appendChild(t3);
-
-    t1.setAttribute('tab-label', 'This should not exist');
-    t2.setAttribute('tab-label', 'Not selected');
-    t3.setAttribute('tab-label', 'Not selected');
-
-    tabView.selectedTab = t1;
-    tabView.removeChild(t1);
-
-    tabViewContainer.appendChild(tabView);
-
-    this.addHTMLOutput(tabViewContainer);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/annotations/annotation_view_test.html b/catapult/tracing/tracing/ui/annotations/annotation_view_test.html
index 8a975cc..5ff3f31 100644
--- a/catapult/tracing/tracing/ui/annotations/annotation_view_test.html
+++ b/catapult/tracing/tracing/ui/annotations/annotation_view_test.html
@@ -5,14 +5,14 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/model/location.html">
-<link rel="import" href="/tracing/ui/timeline_track_view.html">
-<link rel="import" href="/tracing/ui/timeline_viewport.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/comment_box_annotation.html">
-<link rel="import" href="/tracing/model/rect_annotation.html">
+<link rel="import" href="/tracing/model/location.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/rect_annotation.html">
 <link rel="import" href="/tracing/model/x_marker_annotation.html">
+<link rel="import" href="/tracing/ui/timeline_track_view.html">
+<link rel="import" href="/tracing/ui/timeline_viewport.html">
 
 <script>
 'use strict';
@@ -23,7 +23,8 @@
     var model = new tr.Model();
     var process = model.getOrCreateProcess(1);
     var thread = process.getOrCreateThread(2);
-    thread.sliceGroup.pushSlice(tr.c.TestUtils.newSliceNamed('a', 80, 50));
+    thread.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+        {title: 'a', start: 80, duration: 50}));
 
     var timeline = document.createElement('tr-ui-timeline-track-view');
     var vp = new tr.ui.TimelineViewport(timeline);
diff --git a/catapult/tracing/tracing/ui/base/bar_chart.html b/catapult/tracing/tracing/ui/base/bar_chart.html
index dcf9d6b..b4b4cba 100644
--- a/catapult/tracing/tracing/ui/base/bar_chart.html
+++ b/catapult/tracing/tracing/ui/base/bar_chart.html
@@ -69,7 +69,7 @@
 
       // Y.
       this.yScale_.range([this.chartAreaSize.height, 0]);
-      this.yScale_.domain([Math.min(yRange.min, 0), yRange.max]);
+      this.yScale_.domain(this.getYScaleDomain_(yRange.min, yRange.max));
     },
 
     updateDataContents_: function(dataSel) {
@@ -98,7 +98,7 @@
           var left = this.xScale_(currentX);
           var right = this.xScale_(currentX + width);
           var widthPx = right - left;
-          var top = this.yScale_(stack.y);
+          var top = this.yScale_(Math.max(stack.y, this.getYScaleMin_()));
           rectsSel.enter()
               .append('rect')
               .attr('fill', stack.color)
diff --git a/catapult/tracing/tracing/ui/base/bar_chart_test.html b/catapult/tracing/tracing/ui/base/bar_chart_test.html
index fcdaa07..24d9de5 100644
--- a/catapult/tracing/tracing/ui/base/bar_chart_test.html
+++ b/catapult/tracing/tracing/ui/base/bar_chart_test.html
@@ -26,6 +26,24 @@
     this.addHTMLOutput(chart);
   });
 
+  test('instantiation_singleSeries_yLogScale', function() {
+    var chart = new tr.ui.b.BarChart();
+    chart.isYLogScale = true;
+    chart.width = 400;
+    chart.height = 200;
+    chart.chartTitle = 'Chart title';
+    var data = [
+      {x: 10, value: 100},
+      {x: 20, value: 10},
+      {x: 30, value: 1},
+      {x: 40, value: 0.1},
+      {x: 50, value: 0.01},
+      {x: 60, value: 0.001}
+    ];
+    chart.data = data;
+    this.addHTMLOutput(chart);
+  });
+
   test('undefined', function() {
     var chart = new tr.ui.b.BarChart();
     assert.throws(function() {
@@ -55,6 +73,28 @@
     this.addHTMLOutput(chart);
   });
 
+  test('instantiation_twoSeries_yLogScale', function() {
+    var chart = new tr.ui.b.BarChart();
+    chart.isYLogScale = true;
+    chart.width = 400;
+    chart.height = 200;
+    chart.chartTitle = 'Chart title';
+    var data = [
+      {x: 10, alpha: 100, beta: 50},
+      {x: 20, alpha: 110, beta: 75},
+      {x: 30, alpha: 100, beta: 125},
+      {x: 40, alpha: 50, beta: 125}
+    ];
+    chart.data = data;
+
+    var r = new tr.b.Range();
+    r.addValue(20);
+    r.addValue(40);
+    chart.brushedRange = r;
+
+    this.addHTMLOutput(chart);
+  });
+
   test('instantiation_twoSparseSeriesWithFirstValueSparse', function() {
     var chart = new tr.ui.b.BarChart();
 
diff --git a/catapult/tracing/tracing/ui/base/chart_base_2d.html b/catapult/tracing/tracing/ui/base/chart_base_2d.html
index 7dd9d1c..5e32e3b 100644
--- a/catapult/tracing/tracing/ui/base/chart_base_2d.html
+++ b/catapult/tracing/tracing/ui/base/chart_base_2d.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/base/range.html">
 <link rel="import" href="/tracing/ui/base/chart_base.html">
 <link rel="import" href="/tracing/ui/base/mouse_tracker.html">
@@ -34,6 +35,9 @@
       this.classList.add('chart-base-2d');
       this.xScale_ = d3.scale.linear();
       this.yScale_ = d3.scale.linear();
+      this.isYLogScale_ = false;
+      this.yLogScaleMin_ = undefined;
+      this.dataRange_ = new tr.b.Range();
 
       this.data_ = [];
       this.seriesKeys_ = [];
@@ -67,9 +71,28 @@
 
       this.data_ = data;
       this.updateSeriesKeys_();
+      this.updateDataRange_();
       this.updateContents_();
     },
 
+    set isYLogScale(logScale) {
+      if (logScale)
+        this.yScale_ = d3.scale.log(10);
+      else
+        this.yScale_ = d3.scale.linear();
+      this.isYLogScale_ = logScale;
+    },
+
+    getYScaleMin_: function() {
+      return this.isYLogScale_ ? this.yLogScaleMin_ : 0;
+    },
+
+    getYScaleDomain_: function(minValue, maxValue) {
+      if (this.isYLogScale_)
+        return [this.getYScaleMin_(), maxValue];
+      return [Math.min(minValue, this.getYScaleMin_()), maxValue];
+    },
+
     getSampleWidth_: function(data, index, leftSide) {
       var leftIndex, rightIndex;
       if (leftSide) {
@@ -171,12 +194,62 @@
       return margin;
     },
 
+    updateDataRange_: function() {
+      var dataBySeriesKey = this.getDataBySeriesKey_();
+      this.dataRange_.reset();
+      tr.b.iterItems(dataBySeriesKey, function(series, values) {
+        for (var i = 0; i < values.length; i++) {
+          this.dataRange_.addValue(values[i][series]);
+        }
+      }, this);
+
+      // Choose the closest power of 10, rounded down, as the smallest tick
+      // to display.
+      this.yLogScaleMin_ = undefined;
+      if (this.dataRange_.min !== undefined) {
+        var minValue = this.dataRange_.min;
+        if (minValue == 0)
+          minValue = 1;
+
+        var onePowerLess = Math.floor(
+            Math.log(minValue) / Math.log(10)) - 1;
+        this.yLogScaleMin_ = Math.pow(10, onePowerLess);
+      }
+    },
+
     updateYAxis_: function(yAxis) {
       yAxis.selectAll('*').remove();
       yAxis[0][0].style.opacity = 0;
-      yAxis.call(d3.svg.axis()
+
+      var axisModifier = d3.svg.axis()
         .scale(this.yScale_)
-        .orient('left'));
+        .orient('left');
+
+      if (this.isYLogScale_) {
+        if (this.yLogScaleMin_ === undefined)
+          return;
+        var minValue = this.dataRange_.min;
+        if (minValue == 0)
+          minValue = 1;
+
+        var largestPower = Math.ceil(
+            Math.log(this.dataRange_.max) / Math.log(10)) + 1;
+        var smallestPower = Math.floor(
+            Math.log(minValue) / Math.log(10));
+        var tickValues = [];
+        for (var i = smallestPower; i < largestPower; i++) {
+          tickValues.push(Math.pow(10, i));
+        }
+
+        axisModifier = axisModifier
+          .tickValues(tickValues)
+          .tickFormat(function(d) {
+            return d;
+          });
+      }
+
+      yAxis.call(axisModifier);
+
       window.requestAnimationFrame(function() {
         var previousTop = undefined;
         var leftMargin = 0;
@@ -240,10 +313,13 @@
           if (multiSeriesDatum[seriesKey] === undefined)
             return;
 
+          if (!this.isDatumFieldSeries_(seriesKey))
+            return;
+
           var singleSeriesDatum = {x: x};
           singleSeriesDatum[seriesKey] = multiSeriesDatum[seriesKey];
           dataBySeriesKey[seriesKey].push(singleSeriesDatum);
-        });
+        }, this);
       }, this);
 
       return dataBySeriesKey;
diff --git a/catapult/tracing/tracing/ui/base/checkbox.html b/catapult/tracing/tracing/ui/base/checkbox.html
new file mode 100644
index 0000000..9de4918
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/checkbox.html
@@ -0,0 +1,107 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/settings.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/ui.html">
+
+<polymer-element name='tr-ui-b-checkbox'>
+  <template>
+    <style>
+    .inline {
+      display: inline-block;
+    }
+    </style>
+
+    <input type="checkbox" id="checkbox" class="inline"/>
+    <div id="label" class="inline"></div>
+  </template>
+
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.needsInit_ = true;
+      this.defaultCheckedValue_ = undefined;
+      this.settingsKey_ = undefined;
+      this.label_ = undefined;
+      this.checked_ = false;
+      this.is_ready_ = false;
+    },
+
+    ready: function() {
+      this.is_ready_ = true;
+      this.$.checkbox.addEventListener('click', function() {
+        this.checked = this.$.checkbox.checked;
+      }.bind(this));
+      this.maybeUpdateElements_();
+    },
+
+    maybeUpdateElements_: function() {
+      if (!this.is_ready_)
+        return;
+      this.$.label.innerText = this.label_;
+      this.$.checkbox.checked = this.checked_;
+    },
+
+    get defaultCheckedValue() {
+      return this.defaultCheckedValue_;
+    },
+
+    set defaultCheckedValue(defaultCheckedValue) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.defaultCheckedValue_ = defaultCheckedValue;
+      this.maybeInit_();
+    },
+
+    get settingsKey() {
+      return this.settingsKey_;
+    },
+
+    set settingsKey(settingsKey) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.settingsKey_ = settingsKey;
+      this.maybeInit_();
+    },
+
+    maybeInit_: function() {
+      if (!this.needsInit_)
+        return;
+      if (this.settingsKey_ === undefined)
+        return;
+      if (this.defaultCheckedValue_ === undefined)
+        return;
+      this.needsInit_ = false;
+      this.checked = tr.b.Settings.get(
+        this.settingsKey_, this.defaultCheckedValue_);
+    },
+
+    get label() {
+      return this.label_;
+    },
+
+    set label(label) {
+      this.label_ = label;
+      this.maybeUpdateElements_();
+    },
+
+    get checked() {
+      return this.checked_;
+    },
+
+    set checked(checked) {
+      this.checked_ = checked;
+      this.maybeUpdateElements_();
+      tr.b.Settings.set(this.settingsKey_, this.checked_);
+    },
+
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/base/checkbox_picker.html b/catapult/tracing/tracing/ui/base/checkbox_picker.html
new file mode 100644
index 0000000..261b159
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/checkbox_picker.html
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/checkbox.html">
+<link rel="import" href="/tracing/ui/base/ui.html">
+
+<polymer-element name='tr-ui-b-checkbox-picker'>
+  <template>
+    <style>
+    #container {
+      display: flex;
+      flex-direction: column;
+    }
+    </style>
+
+    <div id="container">
+    </div>
+
+  </template>
+
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.needsInit_ = true;
+      this.settingsKey_ = undefined;
+      this.is_ready_ = false;
+      this.checkboxes_ = undefined;
+    },
+
+    ready: function() {
+      this.is_ready_ = true;
+      this.maybeInit_();
+      this.maybeRenderCheckboxes_();
+    },
+
+    get settingsKey() {
+      return this.settingsKey_;
+    },
+
+    set settingsKey(settingsKey) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.settingsKey_ = settingsKey;
+      this.maybeInit_();
+    },
+
+    maybeInit_: function() {
+      if (!this.needsInit_)
+        return;
+      if (this.settingsKey_ === undefined)
+        return;
+      if (this.checkboxes_ === undefined)
+        return;
+
+      this.needsInit_ = false;
+
+      for (var key in this.checkboxes_) {
+        this.checkboxes_[key].defaultCheckedValue = false;
+        this.checkboxes_[key].settingsKey = this.settingsKey_ + key;
+      }
+    },
+
+    set items(items) {
+      this.checkboxes_ = {};
+      items.forEach(function(e) {
+        if (e.key in this.checkboxes_)
+          throw new Error(e.key + ' already exists');
+        var checkboxEl = document.createElement('tr-ui-b-checkbox');
+        checkboxEl.label = e.label;
+        this.checkboxes_[e.key] = checkboxEl;
+      }.bind(this));
+      this.maybeInit_();
+      this.maybeRenderCheckboxes_();
+    },
+
+    maybeRenderCheckboxes_: function() {
+      if (!this.is_ready_)
+        return;
+      if (this.checkboxes_ === undefined)
+        return;
+      for (var key in this.checkboxes_)
+        this.$.container.appendChild(this.checkboxes_[key]);
+    },
+
+    selectCheckbox: function(key) {
+        if (!(key in this.checkboxes_))
+          throw new Error(key + ' does not exists');
+        this.checkboxes_[key].checked = true;
+    },
+
+    unselectCheckbox: function(key) {
+        if (!(key in this.checkboxes_))
+          throw new Error(key + ' does not exists');
+        this.checkboxes_[key].checked = false;
+    },
+
+    get checkedKeys() {
+      return Object.keys(this.checkboxes_).filter(function(k) {
+        return this.checkboxes_[k].checked;
+      }.bind(this));
+    },
+
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/base/checkbox_picker_test.html b/catapult/tracing/tracing/ui/base/checkbox_picker_test.html
new file mode 100644
index 0000000..40d8336
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/checkbox_picker_test.html
@@ -0,0 +1,139 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/checkbox_picker.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('basicAllCheckboxUnchecked', function() {
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    cp.items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Boeing', label: 'I want to fly'}
+    ];
+    this.addHTMLOutput(cp);
+    assert.deepEqual(cp.checkedKeys, []);
+  });
+
+  test('basicSomeCheckboxChecked', function() {
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    cp.items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Honda', label: 'I want to drive Honda'},
+      {key: 'Tesla', label: 'I want to drive electric car'},
+    ];
+
+    cp.selectCheckbox('Toyota');
+    cp.selectCheckbox('Tesla');
+    this.addHTMLOutput(cp);
+    assert.deepEqual(cp.checkedKeys.sort(), ['Tesla', 'Toyota']);
+    cp.unselectCheckbox('Toyota');
+    assert.deepEqual(cp.checkedKeys, ['Tesla']);
+  });
+
+  test('duplicateKeys', function() {
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    assert.throws(function() {
+      cp.items = [
+        {key: 'Toyota', label: 'I want to drive Toyota'},
+        {key: 'Honda', label: 'I want to drive Honda'},
+        {key: 'Toyota', label: 'I want to drive electric car'},
+      ];
+    });
+  });
+
+  test('selectAndUnselectNonExistingKey', function() {
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    cp.items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Honda', label: 'I want to drive Honda'},
+    ];
+    assert.throws(function() {
+      cp.selectCheckbox('Lamborghini');
+    });
+    assert.throws(function() {
+      cp.unselectCheckbox('Roll Royce');
+    });
+  });
+
+  test('testPersistentStateOneSetSettingsKeyBeforeSettingItems', function() {
+    var container1 = tr.ui.b.createDiv({textContent: 'Checkbox Picker One'});
+    container1.style.border = 'solid';
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    cp.settingsKey = 'checkbox-picker-test-one';
+    cp.items = [
+        {key: 'Toyota', label: 'I want to drive Toyota'},
+        {key: 'Honda', label: 'I want to drive Honda'},
+        {key: 'Tesla', label: 'I want to drive electric car'},
+      ];
+    cp.selectCheckbox('Toyota');
+    cp.selectCheckbox('Tesla');
+    container1.appendChild(cp);
+    this.addHTMLOutput(container1);
+    cp.unselectCheckbox('Tesla');
+    assert.deepEqual(cp.checkedKeys, ['Toyota']);
+
+    this.addHTMLOutput(document.createElement('br'));
+
+    var container2 = tr.ui.b.createDiv(
+        {textContent:
+            'Checkbox Picker Two (Same settingsKey as Checkbox Picker One)'});
+    container2.style.border = 'solid #0000FF';
+    var cp2 = document.createElement('tr-ui-b-checkbox-picker');
+    cp2.settingsKey = 'checkbox-picker-test-one';
+    cp2.items = [
+        {key: 'Toyota', label: 'I want to drive Toyota'},
+        {key: 'Honda', label: 'I want to drive Honda'},
+        {key: 'Tesla', label: 'I want to drive electric car'},
+      ];
+    container2.appendChild(cp2);
+    this.addHTMLOutput(container2);
+    assert.deepEqual(cp2.checkedKeys, ['Toyota']);
+  });
+
+  test('testPersistentStateTwoSetSettingsKeyAfterSettingItems', function() {
+    var container1 = tr.ui.b.createDiv({textContent: 'Checkbox Picker One'});
+    container1.style.border = 'solid';
+    var cp = document.createElement('tr-ui-b-checkbox-picker');
+    cp.items = [
+        {key: 'Toyota', label: 'I want to drive Toyota'},
+        {key: 'Honda', label: 'I want to drive Honda'},
+        {key: 'Tesla', label: 'I want to drive electric car'},
+      ];
+    cp.settingsKey = 'checkbox-picker-test-one';
+    cp.selectCheckbox('Toyota');
+    cp.selectCheckbox('Tesla');
+    container1.appendChild(cp);
+    this.addHTMLOutput(container1);
+    assert.deepEqual(cp.checkedKeys.sort(), ['Tesla', 'Toyota']);
+
+    this.addHTMLOutput(document.createElement('br'));
+
+    var container2 = tr.ui.b.createDiv(
+        {textContent:
+            'Checkbox Picker Two (Same settingsKey as Checkbox Picker One)'});
+    container2.style.border = 'solid #0000FF';
+    var cp2 = document.createElement('tr-ui-b-checkbox-picker');
+    cp2.items = [
+        {key: 'Toyota', label: 'I want to drive Toyota'},
+        {key: 'Honda', label: 'I want to drive Honda'},
+        {key: 'Tesla', label: 'I want to drive electric car'},
+      ];
+    container2.appendChild(cp2);
+    this.addHTMLOutput(container2);
+    cp2.settingsKey = 'checkbox-picker-test-one';
+    assert.deepEqual(cp2.checkedKeys.sort(), ['Tesla', 'Toyota']);
+
+  });
+
+
+
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/checkbox_test.html b/catapult/tracing/tracing/ui/base/checkbox_test.html
new file mode 100644
index 0000000..16f0223
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/checkbox_test.html
@@ -0,0 +1,69 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/checkbox.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('basicUnchecked', function() {
+    var checkbox = document.createElement('tr-ui-b-checkbox');
+    checkbox.label = 'Yo like pizza?';
+    this.addHTMLOutput(checkbox);
+    assert.equal(checkbox.label, 'Yo like pizza?');
+    assert.isFalse(checkbox.checked);
+  });
+
+  test('basicChecked', function() {
+    var checkbox = document.createElement('tr-ui-b-checkbox');
+    checkbox.label = 'Yo like cookie?';
+    checkbox.checked = true;
+    this.addHTMLOutput(checkbox);
+    assert.equal(checkbox.label, 'Yo like cookie?');
+    assert.isTrue(checkbox.checked);
+  });
+
+  test('testPersistentStateOneSetSettingsKeyBeforeAddToDom', function() {
+    var checkbox = document.createElement('tr-ui-b-checkbox');
+    checkbox.settingsKey = 'checkbox-basic-test-one';
+    checkbox.label = 'I like sushi';
+    checkbox.defaultCheckedValue = false;
+    this.addHTMLOutput(checkbox);
+    assert.isFalse(checkbox.checked);
+    checkbox.checked = true;
+
+    var checkbox2 = document.createElement('tr-ui-b-checkbox');
+    checkbox2.label = 'I like sushi';
+    checkbox2.defaultCheckedValue = false;
+    checkbox2.settingsKey = 'checkbox-basic-test-one';
+    this.addHTMLOutput(checkbox2);
+    assert.isTrue(checkbox2.checked);
+  });
+
+  test('testPersistentStateTwoSetSettingsKeyAfterAddToDom', function() {
+    var checkbox = document.createElement('tr-ui-b-checkbox');
+    this.addHTMLOutput(checkbox);
+    checkbox.label = 'I like Ramen';
+    checkbox.settingsKey = 'checkbox-basic-test-two';
+    checkbox.defaultCheckedValue = false;
+    assert.isFalse(checkbox.checked);
+    checkbox.checked = true;
+
+    var checkbox2 = document.createElement('tr-ui-b-checkbox');
+    this.addHTMLOutput(checkbox2);
+    checkbox2.label = 'I like Ramen';
+    checkbox2.defaultCheckedValue = false;
+    checkbox2.settingsKey = 'checkbox-basic-test-two';
+    assert.isTrue(checkbox2.checked);
+  });
+
+
+
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/dom_helpers.html b/catapult/tracing/tracing/ui/base/dom_helpers.html
index 91b6690..acdcb2f 100644
--- a/catapult/tracing/tracing/ui/base/dom_helpers.html
+++ b/catapult/tracing/tracing/ui/base/dom_helpers.html
@@ -271,6 +271,37 @@
     return spanEl;
   }
 
+  function createButton(targetEl, targetElProperty, label, opt_changeCb) {
+    var buttonEl = document.createElement('input');
+    buttonEl.type = 'button';
+
+    function onClick() {
+      if (opt_changeCb)
+        opt_changeCb.call();
+    }
+
+    buttonEl.addEventListener('click', onClick);
+    buttonEl.value = label;
+
+    return buttonEl;
+  }
+
+  function createTextInput(
+      targetEl, targetElProperty, settingsKey, defaultValue) {
+    var initialValue = tr.b.Settings.get(settingsKey, defaultValue);
+    var el = document.createElement('input');
+    el.type = 'text';
+    function onChange(e) {
+      tr.b.Settings.set(settingsKey, el.value);
+      targetEl[targetElProperty] = el.value;
+    }
+    el.addEventListener('input', onChange);
+    el.value = initialValue;
+    targetEl[targetElProperty] = initialValue;
+
+    return el;
+  }
+
   function isElementAttachedToDocument(el) {
     var cur = el;
     while (cur.parentNode)
@@ -292,6 +323,8 @@
     createSelector: createSelector,
     createOptionGroup: createOptionGroup,
     createCheckBox: createCheckBox,
+    createButton: createButton,
+    createTextInput: createTextInput,
     isElementAttachedToDocument: isElementAttachedToDocument,
     asHTMLOrTextNode: asHTMLOrTextNode
   };
diff --git a/catapult/tracing/tracing/ui/base/grouping_table.html b/catapult/tracing/tracing/ui/base/grouping_table.html
new file mode 100644
index 0000000..3415741
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/grouping_table.html
@@ -0,0 +1,227 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/table.html">
+
+<polymer-element name="tr-ui-b-grouping-table">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #table {
+      flex: 1 1 auto;
+    }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+</polymer-element>
+<script>
+'use strict';
+
+tr.exportTo('tr.ui.b', function() {
+
+  function Row(title, data, groupingKeyFuncs, rowStatsConstructor) {
+    this.title = title;
+    this.data_ = data;
+    if (groupingKeyFuncs === undefined)
+      groupingKeyFuncs = [];
+    this.groupingKeyFuncs_ = groupingKeyFuncs;
+    this.rowStatsConstructor_ = rowStatsConstructor;
+
+    this.subRowsBuilt_ = false;
+    this.subRows_ = undefined;
+
+    this.rowStats_ = undefined;
+  }
+
+  Row.prototype = {
+    getCurrentGroupingKeyFunc_: function() {
+      if (this.groupingKeyFuncs_.length === 0)
+        return undefined;
+      return this.groupingKeyFuncs_[0];
+    },
+
+    get data() {
+      return this.data_;
+    },
+
+    get rowStats() {
+      if (this.rowStats_ === undefined) {
+        this.rowStats_ = new this.rowStatsConstructor_(this);
+      }
+      return this.rowStats_;
+    },
+
+    rebuildSubRowsIfNeeded_: function() {
+      if (this.subRowsBuilt_)
+        return;
+      this.subRowsBuilt_ = true;
+
+      var groupingKeyFunc = this.getCurrentGroupingKeyFunc_();
+      if (groupingKeyFunc === undefined) {
+        this.subRows_ = undefined;
+        return;
+      }
+
+      var dataByKey = {};
+      var hasValues = false;
+      this.data_.forEach(function(datum) {
+        var key = groupingKeyFunc(datum);
+        hasValues = hasValues || (key !== undefined);
+        if (dataByKey[key] === undefined)
+          dataByKey[key] = [];
+        dataByKey[key].push(datum);
+      });
+      if (!hasValues) {
+        this.subRows_ = undefined;
+        return;
+      }
+
+      this.subRows_ = [];
+      for (var key in dataByKey) {
+        var row = new Row(key,
+                          dataByKey[key],
+                          this.groupingKeyFuncs_.slice(1),
+                          this.rowStatsConstructor_);
+        this.subRows_.push(row);
+      }
+    },
+
+    get isExpanded() {
+      return (this.subRows &&
+              (this.subRows.length > 0) &&
+              (this.subRows.length < 5));
+    },
+
+    get subRows() {
+      this.rebuildSubRowsIfNeeded_();
+      return this.subRows_;
+    }
+  };
+
+  Polymer('tr-ui-b-grouping-table', {
+    created: function() {
+      this.dataToGroup_ = undefined;
+      this.groupBy_ = undefined;
+      this.rowStatsConstructor_ = undefined;
+    },
+
+    get tableColumns() {
+      return this.$.table.tableColumns;
+    },
+
+    set tableColumns(tableColumns) {
+      this.$.table.tableColumns = tableColumns;
+    },
+
+    get tableRows() {
+      return this.$.table.tableRows;
+    },
+
+    get sortColumnIndex() {
+      return this.$.table.sortColumnIndex;
+    },
+
+    set sortColumnIndex(sortColumnIndex) {
+      this.$.table.sortColumnIndex = sortColumnIndex;
+    },
+
+    get sortDescending() {
+      return this.$.table.sortDescending;
+    },
+
+    set sortDescending(sortDescending) {
+      this.$.table.sortDescending = sortDescending;
+    },
+
+    get selectionMode() {
+      return this.$.table.selectionMode;
+    },
+
+    set selectionMode(selectionMode) {
+      this.$.table.selectionMode = selectionMode;
+    },
+
+    get rowHighlightStyle() {
+      return this.$.table.rowHighlightStyle;
+    },
+
+    set rowHighlightStyle(rowHighlightStyle) {
+      this.$.table.rowHighlightStyle = rowHighlightStyle;
+    },
+
+    get cellHighlightStyle() {
+      return this.$.table.cellHighlightStyle;
+    },
+
+    set cellHighlightStyle(cellHighlightStyle) {
+      this.$.table.cellHighlightStyle = cellHighlightStyle;
+    },
+
+    get selectedColumnIndex() {
+      return this.$.table.selectedColumnIndex;
+    },
+
+    set selectedColumnIndex(selectedColumnIndex) {
+      this.$.table.selectedColumnIndex = selectedColumnIndex;
+    },
+
+    get selectedTableRow() {
+      return this.$.table.selectedTableRow;
+    },
+
+    set selectedTableRow(selectedTableRow) {
+      this.$.table.selectedTableRow = selectedTableRow;
+    },
+
+    get groupBy() {
+      return this.groupBy_;
+    },
+
+    set groupBy(groupBy) {
+      this.groupBy_ = groupBy;
+      this.updateContents_();
+    },
+
+    get dataToGroup() {
+      return this.dataToGroup_;
+    },
+
+    set dataToGroup(dataToGroup) {
+      this.dataToGroup_ = dataToGroup;
+      this.updateContents_();
+    },
+
+    get rowStatsConstructor() {
+      return this.rowStatsConstructor_;
+    },
+
+    set rowStatsConstructor(rowStatsConstructor) {
+      this.rowStatsConstructor_ = rowStatsConstructor;
+      this.updateContents_();
+    },
+
+    rebuild: function() {
+      this.$.table.rebuild();
+    },
+
+    updateContents_: function() {
+      var groupBy = this.groupBy_ || [];
+      var dataToGroup = this.dataToGroup_ || [];
+      var rowStatsConstructor = this.rowStatsConstructor_ || function() {};
+
+      var superRow = new Row('', dataToGroup, groupBy,
+                             rowStatsConstructor);
+      this.$.table.tableRows = superRow.subRows || [];
+    }
+  });
+
+  return {
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker.html b/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker.html
new file mode 100644
index 0000000..ef12f04
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker.html
@@ -0,0 +1,313 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/settings.html">
+<link rel="import" href="/tracing/ui/base/dropdown.html">
+
+<polymer-element name="tr-ui-b-grouping-table-groupby-picker">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: row;
+      align-items: center;
+    }
+    groups {
+      -webkit-user-select: none;
+      display: flex;
+      flex-direction: row;
+      padding-left: 10px;
+    }
+
+    group, possible-group {
+      display: span;
+      padding-right: 10px;
+      padding-left: 10px;
+    }
+
+    group {
+      border-left: 1px solid rgba(0,0,0,0);
+      cursor: move;
+    }
+
+    group.dragging {
+      opacity: 0.2;
+    }
+
+    group.drop-targeted {
+      border-left: 1px solid black;
+    }
+
+
+    #remove {
+      cursor: default;
+    }
+
+    #remove:not([hovered]) {
+      visibility: hidden;
+    }
+    </style>
+    <groups>
+    </groups>
+    <tr-ui-b-dropdown id="add-group"></tr-ui-b-dropdown>
+  </template>
+</polymer-element>
+
+<template id="tr-ui-b-grouping-table-groupby-picker-group-template">
+  <span id="key"></span>
+  <span id="remove">&times;</span>
+</template>
+
+<script>
+'use strict';
+
+tr.exportTo('tr.ui.b', function() {
+  var THIS_DOC = document._currentScript.ownerDocument;
+
+  Polymer('tr-ui-b-grouping-table-groupby-picker', {
+    created: function() {
+      this.needsInit_ = true;
+      this.defaultGroupKeys_ = undefined;
+      this.possibleGroups_ = [];
+      this.settingsKey_ = [];
+
+      this.currentGroupKeys_ = undefined;
+
+      this.dragging_ = false;
+    },
+
+    get defaultGroupKeys() {
+      return this.defaultGroupKeys_;
+    },
+
+    set defaultGroupKeys(defaultGroupKeys) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.defaultGroupKeys_ = defaultGroupKeys;
+      this.maybeInit_();
+    },
+
+    get possibleGroups() {
+      return this.possibleGroups_;
+    },
+
+    set possibleGroups(possibleGroups) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.possibleGroups_ = possibleGroups;
+      this.maybeInit_();
+    },
+
+    get settingsKey() {
+      return this.settingsKey_;
+    },
+
+    set settingsKey(settingsKey) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.settingsKey_ = settingsKey;
+      this.maybeInit_();
+    },
+
+    maybeInit_: function() {
+      if (!this.needsInit_)
+        return;
+
+      if (this.settingsKey_ === undefined)
+        return;
+      if (this.defaultGroupKeys_ === undefined)
+        return;
+      if (this.possibleGroups_ === undefined)
+        return;
+
+      this.needsInit_ = false;
+
+      var addGroupEl = this.shadowRoot.querySelector('#add-group');
+      addGroupEl.iconElement.textContent = 'Add another...';
+
+      this.currentGroupKeys = tr.b.Settings.get(
+        this.settingsKey_, this.defaultGroupKeys_);
+    },
+
+    get currentGroupKeys() {
+      return this.currentGroupKeys_;
+    },
+
+    get currentGroups() {
+      var groupsByKey = {};
+      this.possibleGroups_.forEach(function(group) {
+        groupsByKey[group.key] = group;
+      });
+      return this.currentGroupKeys_.map(function(groupKey) {
+        return groupsByKey[groupKey];
+      });
+    },
+
+    set currentGroupKeys(currentGroupKeys) {
+      if (this.currentGroupKeys_ === currentGroupKeys)
+        return;
+
+      if (!(currentGroupKeys instanceof Array))
+        throw new Error('Must be array');
+
+      this.currentGroupKeys_ = currentGroupKeys;
+      this.updateGroups_();
+
+      tr.b.Settings.set(
+        this.settingsKey_, this.currentGroupKeys_);
+
+      var e = new tr.b.Event('current-groups-changed');
+      this.dispatchEvent(e);
+    },
+
+    updateGroups_: function() {
+      var groupsEl = this.shadowRoot.querySelector('groups');
+      var addGroupEl = this.shadowRoot.querySelector('#add-group');
+
+      groupsEl.textContent = '';
+      addGroupEl.textContent = '';
+
+      var unusedGroups = {};
+      var groupsByKey = {};
+      this.possibleGroups_.forEach(function(group) {
+        unusedGroups[group.key] = group;
+        groupsByKey[group.key] = group;
+      });
+
+      this.currentGroupKeys_.forEach(function(key) {
+        delete unusedGroups[key];
+      });
+
+      // Create groups.
+      var groupTemplateEl = THIS_DOC.querySelector(
+          '#tr-ui-b-grouping-table-groupby-picker-group-template');
+      this.currentGroupKeys_.forEach(function(key, index) {
+        var group = groupsByKey[key];
+        var groupEl = document.createElement('group');
+        groupEl.groupKey = key;
+        groupEl.appendChild(document.importNode(groupTemplateEl.content, true));
+        groupEl.querySelector('#key').textContent = group.label;
+        groupsEl.appendChild(groupEl);
+
+        this.configureRemoveButtonForGroup_(groupEl);
+        this.configureDragAndDropForGroup_(groupEl);
+      }, this);
+
+      // Adjust dropdown.
+      tr.b.iterItems(unusedGroups, function(key, group) {
+        var groupEl = document.createElement('possible-group');
+        groupEl.textContent = group.label;
+        groupEl.addEventListener('click', function() {
+          var newKeys = this.currentGroupKeys.slice();
+          newKeys.push(key);
+          this.currentGroupKeys = newKeys;
+          addGroupEl.close();
+        }.bind(this));
+        addGroupEl.appendChild(groupEl);
+      }, this);
+
+      // Hide dropdown if needed.
+      if (tr.b.dictionaryLength(unusedGroups) == 0) {
+        addGroupEl.style.display = 'none';
+      } else {
+        addGroupEl.style.display = '';
+      }
+    },
+
+    configureRemoveButtonForGroup_: function(groupEl) {
+      var removeEl = groupEl.querySelector('#remove');
+      removeEl.addEventListener('click', function() {
+        var newKeys = this.currentGroupKeys.slice();
+        var i = newKeys.indexOf(groupEl.groupKey);
+        newKeys.splice(i, 1);
+        this.currentGroupKeys = newKeys;
+      }.bind(this));
+
+      groupEl.addEventListener('mouseenter', function() {
+        removeEl.setAttribute('hovered', true);
+      });
+      groupEl.addEventListener('mouseleave', function() {
+        removeEl.removeAttribute('hovered');
+      });
+    },
+
+    configureDragAndDropForGroup_: function(groupEl) {
+      var groupsEl = groupEl.parentElement;
+
+      groupEl.setAttribute('draggable', true);
+
+      groupEl.addEventListener('dragstart', function(e) {
+        e.dataTransfer.setData('groupKey', groupEl.groupKey);
+        groupEl.querySelector('#remove').removeAttribute('hovered');
+        groupEl.classList.add('dragging');
+        this.dragging_ = true;
+      }.bind(this));
+
+      groupEl.addEventListener('dragend', function(e) {
+        console.log(e.type, groupEl.groupKey);
+        for (var i = 0; i < groupsEl.children.length; i++)
+          groupsEl.children[i].classList.remove('drop-targeted');
+        groupEl.classList.remove('dragging');
+        this.dragging_ = false;
+      }.bind(this));
+
+      // Drop targeting.
+      groupEl.addEventListener('dragenter', function(e) {
+        if (!this.dragging_)
+          return;
+        groupEl.classList.add('drop-targeted');
+        if (this.dragging_)
+          e.preventDefault();
+      }.bind(this));
+
+      groupEl.addEventListener('dragleave', function(e) {
+        if (!this.dragging_)
+          return;
+        groupEl.classList.remove('drop-targeted');
+        e.preventDefault();
+      }.bind(this));
+
+
+      // Drop logic.
+      groupEl.addEventListener('dragover', function(e) {
+        if (!this.dragging_)
+          return;
+        e.preventDefault();
+        groupEl.classList.add('drop-targeted');
+      }.bind(this));
+
+      groupEl.addEventListener('drop', function(e) {
+        if (!this.dragging_)
+          return;
+
+        var srcKey = e.dataTransfer.getData('groupKey');
+        var dstKey = groupEl.groupKey;
+
+        if (srcKey === dstKey)
+          return;
+
+        var newKeys = this.currentGroupKeys_.slice();
+
+        var srcIndex = this.currentGroupKeys_.indexOf(srcKey);
+        newKeys.splice(srcIndex, 1);
+
+        var dstIndex = this.currentGroupKeys_.indexOf(dstKey);
+        newKeys.splice(dstIndex, 0, srcKey);
+
+        this.currentGroupKeys = newKeys;
+
+        e.dataTransfer.clearData();
+        e.preventDefault();
+        e.stopPropagation();
+      }.bind(this));
+    }
+  });
+
+  return {
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker_test.html b/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker_test.html
new file mode 100644
index 0000000..1bbbc4f
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/grouping_table_groupby_picker_test.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/grouping_table_groupby_picker.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('basic', function() {
+    var picker = document.createElement(
+        'tr-ui-b-grouping-table-groupby-picker');
+    picker.settingsKey = 'basic-test';
+    picker.possibleGroups = [
+      {key: 'a', label: 'A'},
+      {key: 'b', label: 'B'},
+      {key: 'c', label: 'C'},
+      {key: 'd', label: 'D'},
+      {key: 'e', label: 'E'}
+    ];
+    picker.defaultGroupKeys = ['a', 'b', 'd'];
+    this.addHTMLOutput(picker);
+  });
+
+  test('allGroupsAdded', function() {
+    var picker = document.createElement(
+        'tr-ui-b-grouping-table-groupby-picker');
+    picker.settingsKey = 'basic-test';
+    picker.possibleGroups = [
+      {key: 'a', label: 'A'},
+      {key: 'b', label: 'B'},
+      {key: 'c', label: 'C'},
+      {key: 'd', label: 'D'},
+      {key: 'e', label: 'E'}
+    ];
+    picker.defaultGroupKeys = ['a', 'b', 'c', 'd', 'e'];
+    this.addHTMLOutput(picker);
+  });
+
+  test('noGroupsAdded', function() {
+    var picker = document.createElement(
+        'tr-ui-b-grouping-table-groupby-picker');
+    picker.settingsKey = 'basic-test';
+    picker.possibleGroups = [
+      {key: 'a', label: 'A'},
+      {key: 'b', label: 'B'},
+      {key: 'c', label: 'C'},
+      {key: 'd', label: 'D'},
+      {key: 'e', label: 'E'}
+    ];
+    picker.defaultGroupKeys = [];
+    this.addHTMLOutput(picker);
+  });
+
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/polymer_utils.html b/catapult/tracing/tracing/ui/base/polymer_utils.html
index b575207..b8c5e9d 100644
--- a/catapult/tracing/tracing/ui/base/polymer_utils.html
+++ b/catapult/tracing/tracing/ui/base/polymer_utils.html
@@ -14,39 +14,11 @@
  */
 tr.exportTo('tr.ui.b', function() {
 
-  Object.observe(Polymer.elements, clearPolymerElementCaches);
-
-  var elementsByName = undefined;
-  var elementsThatExtend = undefined;
-  var elementSubclasses = undefined;
-  function clearPolymerElementCaches() {
-    elementsByName = {};
-    elementsThatExtend = undefined;
-    elementSubclasses = {};
-  }
-
-  function buildElementMapsIfNeeded() {
-    if (elementsThatExtend !== undefined && elementsByName !== undefined)
-      return;
-    elementsByName = {};
-    elementsThatExtend = {};
-    Polymer.elements.forEach(function(element) {
-      if (elementsByName[element.name])
-        throw new Error('Something is strange: dupe polymer element names');
-
-      elementsByName[element.name] = element;
-
-      if (element.extends) {
-        if (elementsThatExtend[element.extends] === undefined)
-          elementsThatExtend[element.extends] = [];
-        elementsThatExtend[element.extends].push(element.name);
-      }
-    });
-  }
-
   function getPolymerElementNamed(tagName) {
-    buildElementMapsIfNeeded();
-    return elementsByName[tagName];
+    for (var i = 0; i < Polymer.elements.length; i++) {
+      if (Polymer.elements[i].name === tagName)
+        return Polymer.elements[i];
+    }
   }
 
   function getPolymerElementsThatSubclass(tagName) {
@@ -55,28 +27,33 @@
         'Wait until Polymer.whenReady');
     }
 
-    buildElementMapsIfNeeded();
+    var baseElement;
+    var elementNamesThatExtend = {};
+    Polymer.elements.forEach(function(element) {
+      if (element.name === tagName)
+        baseElement = element;
 
-    var element = getPolymerElementNamed(tagName);
-    if (!element)
+      if (element.extends) {
+        if (elementNamesThatExtend[element.extends] === undefined)
+          elementNamesThatExtend[element.extends] = [];
+        elementNamesThatExtend[element.extends].push(element.name);
+      }
+    });
+
+    if (!baseElement)
       throw new Error(tagName + ' is not a polymer element');
 
-    if (elementSubclasses === undefined)
-      elementSubclasses = {};
-
-    if (elementSubclasses[tagName] === undefined) {
-      var immediateSubElements = elementsThatExtend[element.name];
-      var allSubElements = [];
-      if (immediateSubElements !== undefined && immediateSubElements.length) {
-        immediateSubElements.forEach(function(subElement) {
-          allSubElements.push(subElement);
-          allSubElements.push.apply(
-            allSubElements, getPolymerElementsThatSubclass(subElement));
-        });
-      }
-      elementSubclasses[tagName] = allSubElements;
+    var allFoundSubElementNames = [baseElement.name];
+    for (var i = 0; i < allFoundSubElementNames.length; i++) {
+      var elementName = allFoundSubElementNames[i];
+      allFoundSubElementNames.push.apply(
+          allFoundSubElementNames, elementNamesThatExtend[elementName]);
     }
-    return elementSubclasses[tagName];
+
+    // Remove the base element tag name from the list.
+    allFoundSubElementNames.shift();
+
+    return allFoundSubElementNames;
   }
 
   return {
diff --git a/catapult/tracing/tracing/ui/base/polymer_utils_test.html b/catapult/tracing/tracing/ui/base/polymer_utils_test.html
new file mode 100644
index 0000000..698ec0c
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/polymer_utils_test.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/polymer_utils.html">
+
+<!--
+The Polymer elements defined in this file form the following class hierarchy:
+
+    A (common superclass)
+   / \
+  B   C
+     / \
+    D   E
+   / \
+  F   G
+-->
+
+<polymer-element name="polymer-utils-test-element-a" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-b"
+    extends="polymer-utils-test-element-a" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-c"
+    extends="polymer-utils-test-element-a" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-d"
+    extends="polymer-utils-test-element-c" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-e"
+    extends="polymer-utils-test-element-c" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-f"
+    extends="polymer-utils-test-element-d" noscript>
+</polymer-element>
+
+<polymer-element name="polymer-utils-test-element-g"
+    extends="polymer-utils-test-element-d" noscript>
+</polymer-element>
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('getPolymerElementsThatSubclass', function() {
+    function checkSubclasses(classNameSuffix, subclassNameSuffixes) {
+      var className = 'polymer-utils-test-element-' + classNameSuffix;
+      var subclassNames = subclassNameSuffixes.map(
+          function(subclassNameSuffix) {
+            return 'polymer-utils-test-element-' + subclassNameSuffix;
+          });
+      assert.sameMembers(
+          tr.ui.b.getPolymerElementsThatSubclass(className), subclassNames);
+    }
+
+    checkSubclasses('a', ['b', 'c', 'd', 'e', 'f', 'g']);
+    checkSubclasses('b', []);
+    checkSubclasses('c', ['d', 'e', 'f', 'g']);
+    checkSubclasses('d', ['f', 'g']);
+    checkSubclasses('e', []);
+    checkSubclasses('f', []);
+    checkSubclasses('g', []);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/radio_picker.html b/catapult/tracing/tracing/ui/base/radio_picker.html
new file mode 100644
index 0000000..85d204a
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/radio_picker.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/ui.html">
+
+<polymer-element name='tr-ui-b-radio-picker'>
+  <template>
+    <style>
+    #container {
+      display: flex;
+      flex-direction: column;
+    }
+    </style>
+
+    <div id="container">
+    </div>
+
+  </template>
+
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.needsInit_ = true;
+      this.settingsKey_ = undefined;
+      this.is_ready_ = false;
+      this.radio_buttons_ = undefined;
+      // Keeping track of which key is selected. This member should only be set
+      // set inside select() method to make sure that logical state & the UI
+      // state is consistent.
+      this.selectedKey_ = undefined;
+    },
+
+    ready: function() {
+      this.is_ready_ = true;
+      this.maybeInit_();
+      this.maybeRenderRadioButtons_();
+    },
+
+    get settingsKey() {
+      return this.settingsKey_;
+    },
+
+    set settingsKey(settingsKey) {
+      if (!this.needsInit_)
+        throw new Error('Already initialized.');
+      this.settingsKey_ = settingsKey;
+      this.maybeInit_();
+    },
+
+    maybeInit_: function() {
+      if (!this.needsInit_)
+        return;
+      if (this.settingsKey_ === undefined)
+        return;
+      this.needsInit_ = false;
+      this.select(tr.b.Settings.get(this.settingsKey_));
+    },
+
+    set items(items) {
+      this.radio_buttons_ = {};
+      items.forEach(function(e) {
+        if (e.key in this.radio_buttons_)
+          throw new Error(e.key + ' already exists');
+        var radio_button = document.createElement('div');
+        var input = document.createElement('input');
+        var label = document.createElement('div');
+        input.type = 'radio';
+        input.addEventListener('click', function() {
+          this.select(e.key);
+        }.bind(this));
+        label.innerHTML = e.label;
+        label.style.display = 'inline';
+        radio_button.appendChild(input);
+        radio_button.appendChild(label);
+        this.radio_buttons_[e.key] = input;
+      }.bind(this));
+
+      this.maybeInit_();
+      this.maybeRenderRadioButtons_();
+    },
+
+    maybeRenderRadioButtons_: function() {
+      if (!this.is_ready_)
+        return;
+      if (this.radio_buttons_ === undefined)
+        return;
+      for (var key in this.radio_buttons_)
+        this.$.container.appendChild(this.radio_buttons_[key].parentElement);
+      if (this.selectedKey_ !== undefined)
+        this.select(this.selectedKey_);
+    },
+
+    select: function(key) {
+      if (key === undefined)
+        return;
+     if (this.radio_buttons_ == undefined) {
+        this.selectedKey_ = key;
+        return;
+     }
+      if (!(key in this.radio_buttons_))
+        throw new Error(key + ' does not exists');
+      // Unselect the previous radio, update the key & select the new one.
+      if (this.selectedKey_ !== undefined)
+        this.radio_buttons_[this.selectedKey_].checked = false;
+      this.selectedKey_ = key;
+      tr.b.Settings.set(this.settingsKey_, this.selectedKey_);
+      if (this.selectedKey_ !== undefined)
+        this.radio_buttons_[this.selectedKey_].checked = true;
+    },
+
+    get selectedKey() {
+      return this.selectedKey_;
+    },
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/base/radio_picker_test.html b/catapult/tracing/tracing/ui/base/radio_picker_test.html
new file mode 100644
index 0000000..dea60cd
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/radio_picker_test.html
@@ -0,0 +1,89 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/radio_picker.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('basic', function() {
+    var rp = document.createElement('tr-ui-b-radio-picker');
+    rp.items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Boeing', label: 'I want to fly'},
+      {key: 'Submarine', label: 'I want to swim'}
+    ];
+    this.addHTMLOutput(rp);
+    assert.equal(rp.selectedKey, undefined);
+    rp.select('Toyota');
+    assert.equal(rp.selectedKey, 'Toyota');
+  });
+
+  test('persistentState_setSelectedKeyAfterSettingItems', function() {
+    var items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Boeing', label: 'I want to fly'},
+      {key: 'Submarine', label: 'I want to swim'}
+    ];
+    var container1 = tr.ui.b.createDiv({textContent: 'Radio Picker One'});
+    container1.style.border = 'solid';
+    var rp = document.createElement('tr-ui-b-radio-picker');
+    rp.items = items;
+    rp.settingsKey = 'radio-picker-test-one';
+    container1.appendChild(rp);
+    this.addHTMLOutput(container1);
+    assert.equal(rp.selectedKey, undefined);
+    rp.select('Toyota');
+    assert.equal(rp.selectedKey, 'Toyota');
+
+    var container2 = tr.ui.b.createDiv({
+        textContent: 'Radio Picker Two (same settingKey as Radio Picker One)'});
+    container2.style.border = 'solid';
+    var rp2 = document.createElement('tr-ui-b-radio-picker');
+    rp2.items = items;
+    rp2.settingsKey = 'radio-picker-test-one';
+    container2.appendChild(rp2);
+    this.addHTMLOutput(container2);
+
+    assert.equal(rp2.selectedKey, 'Toyota');
+  });
+
+  test('persistentState_setSelectedKeyBeforeSettingItems', function() {
+    var items = [
+      {key: 'Toyota', label: 'I want to drive Toyota'},
+      {key: 'Boeing', label: 'I want to fly'},
+      {key: 'Submarine', label: 'I want to swim'}
+    ];
+    var container1 = tr.ui.b.createDiv({textContent: 'Radio Picker One'});
+    container1.style.border = 'solid';
+    var rp = document.createElement('tr-ui-b-radio-picker');
+    rp.settingsKey = 'radio-picker-test-two';
+    rp.items = items;
+    container1.appendChild(rp);
+    this.addHTMLOutput(container1);
+    assert.equal(rp.selectedKey, undefined);
+    rp.select('Boeing');
+    assert.equal(rp.selectedKey, 'Boeing');
+
+    var container2 = tr.ui.b.createDiv({
+        textContent: 'Radio Picker Two (same settingKey as Radio Picker One)'});
+    container2.style.border = 'solid';
+    var rp2 = document.createElement('tr-ui-b-radio-picker');
+    rp2.settingsKey = 'radio-picker-test-two';
+    container2.appendChild(rp2);
+    this.addHTMLOutput(container2);
+    rp2.items = items;
+
+    assert.equal(rp2.selectedKey, 'Boeing');
+  });
+
+
+
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/analysis/tab_view.html b/catapult/tracing/tracing/ui/base/tab_view.html
similarity index 100%
rename from catapult/tracing/tracing/ui/analysis/tab_view.html
rename to catapult/tracing/tracing/ui/base/tab_view.html
diff --git a/catapult/tracing/tracing/ui/base/tab_view_test.html b/catapult/tracing/tracing/ui/base/tab_view_test.html
new file mode 100644
index 0000000..42f16e5
--- /dev/null
+++ b/catapult/tracing/tracing/ui/base/tab_view_test.html
@@ -0,0 +1,323 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/tab_view.html">
+
+<template id="tab-view-test-template">
+  <tr-ui-a-tab-view>
+    <p tab-label="Existing Label"> Tab with label already set </p>
+    <p> Tab Content with no label </p>
+    <p selected="selected" tab-label="Should be selected">
+      Already selected tab
+    </p>
+    <p selected="selected" tab-label="Should not be selected">
+      Second already selected tab
+    </p>
+  </tr-ui-a-tab-view>
+</template>
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var THIS_DOC = document._currentScript.ownerDocument;
+
+  test('instantiate', function() {
+
+    var TAB_TEXT = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.' +
+        ' Cras eleifend elit nec erat tristique pellentesque. Cras placerat ' +
+        'lectus, sed semper tortor ornare quis. Maecenas vitae hendrerit. ' +
+        'Cras mattis interdum nisi, eget egestas dui iaculis ultricies. Proi' +
+        'n magna at nibh fringilla tincidunt id vitae ante. Fusce nec urna n' +
+        'on porttitor tincidunt. Pellentesque habitant morbi tristique senec' +
+        'tus netus et malesuada fames ac turpis egestas. Suspendisse sed vel' +
+        'it mollis ornare sit amet vel augue. Nullam rhoncus in tellus id. ' +
+        'Vestibulum ante ipsum primis in faucibus orci luctus et ultrices ' +
+        'cubilia Curae; Nunc at velit consectetur ipsum tempus tempus. Nunc ' +
+        'mattis sapien, a placerat erat. Vivamus ac enim ultricies, gravida ' +
+        'nulla ut, scelerisque magna. Sed a volutpat enim. Morbi vulputate, ' +
+        'sed egestas mollis, urna nisl varius sem, sed venenatis turpis null' +
+        'a ipsum. Suspendisse potenti.';
+
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '500px';
+    tabViewContainer.style.height = '200px';
+
+    var tabView = new TracingAnalysisTabView();
+
+    var firstTab = document.createElement('div');
+    firstTab.setAttribute('tab-label', 'First Tab Label');
+    firstTab.innerHTML = '<p>' + TAB_TEXT + '<p>';
+
+    var secondTab = document.createElement('div');
+    secondTab.setAttribute('tab-label', 'Second Tab Label');
+    secondTab.innerHTML = '<b>' + 'Second Tab Text' + '</b>';
+
+    var thirdTab = document.createElement('div');
+    thirdTab.setAttribute('tab-label', 'Third Tab Label');
+    thirdTab.innerHTML = '<b>' + 'Third Tab Text' + '</b>';
+
+    tabView.appendChild(firstTab);
+    tabView.appendChild(secondTab);
+    tabView.appendChild(thirdTab);
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+
+    thirdTab.setAttribute('tab-label', 'Something Different');
+
+    var button = document.createElement('button');
+    button.textContent = 'Change label';
+
+    button.addEventListener('click', function() {
+      thirdTab.setAttribute('tab-label', 'Label Changed');
+    });
+
+    tabView.selectedTab = secondTab;
+    this.addHTMLOutput(button);
+  });
+
+
+  test('instantiateWithTabHeading', function() {
+    var TAB_TEXT = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.' +
+        ' Cras eleifend elit nec erat tristique pellentesque. Cras placerat ' +
+        'lectus, sed semper tortor ornare quis. Maecenas vitae hendrerit. ' +
+        'Cras mattis interdum nisi, eget egestas dui iaculis ultricies. Proi' +
+        'n magna at nibh fringilla tincidunt id vitae ante. Fusce nec urna n' +
+        'on porttitor tincidunt. Pellentesque habitant morbi tristique senec' +
+        'tus netus et malesuada fames ac turpis egestas. Suspendisse sed vel' +
+        'it mollis ornare sit amet vel augue. Nullam rhoncus in tellus id. ' +
+        'Vestibulum ante ipsum primis in faucibus orci luctus et ultrices ' +
+        'cubilia Curae; Nunc at velit consectetur ipsum tempus tempus. Nunc ' +
+        'mattis sapien, a placerat erat. Vivamus ac enim ultricies, gravida ' +
+        'nulla ut, scelerisque magna. Sed a volutpat enim. Morbi vulputate, ' +
+        'sed egestas mollis, urna nisl varius sem, sed venenatis turpis null' +
+        'a ipsum. Suspendisse potenti.';
+
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '500px';
+    tabViewContainer.style.height = '200px';
+
+    var tabView = new TracingAnalysisTabView();
+    tabView.tabStripHeadingText = 'Hello world:';
+
+    var firstTab = document.createElement('div');
+    firstTab.setAttribute('tab-label', 'First Tab Label');
+    firstTab.innerHTML = '<p>' + TAB_TEXT + '<p>';
+
+    var secondTab = document.createElement('div');
+    secondTab.setAttribute('tab-label', 'Second Tab Label');
+    secondTab.innerHTML = '<b>' + 'Second Tab Text' + '</b>';
+
+    var thirdTab = document.createElement('div');
+    thirdTab.setAttribute('tab-label', 'Third Tab Label');
+    thirdTab.innerHTML = '<b>' + 'Third Tab Text' + '</b>';
+
+    tabView.appendChild(firstTab);
+    tabView.appendChild(secondTab);
+    tabView.appendChild(thirdTab);
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+    tabView.selectedTab = secondTab;
+  });
+
+  test('instantiateChildrenAlreadyInside', function() {
+    var tabViewTemplate = THIS_DOC.querySelector('#tab-view-test-template');
+    var tabView = tabViewTemplate.createInstance();
+
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '400px';
+    tabViewContainer.style.height = '200px';
+
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+
+  });
+
+  test('programaticallySetSelectedTab', function() {
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '500px';
+    tabViewContainer.style.height = '200px';
+
+    var tabView = new TracingAnalysisTabView();
+
+    var t1 = document.createElement('div');
+    var t2 = document.createElement('div');
+    var t3 = document.createElement('div');
+
+    tabView.appendChild(t1);
+    tabView.appendChild(t2);
+    tabView.appendChild(t3);
+
+    assert.isUndefined(tabView.selectedTab);
+    tabView.selectedTab = t1;
+
+    assert.isTrue(t1.hasAttribute('selected'));
+    assert.isFalse(t2.hasAttribute('selected'));
+    assert.isFalse(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t1, tabView.selectedTab));
+
+    tabView.selectedTab = t2;
+    assert.isFalse(t1.hasAttribute('selected'));
+    assert.isTrue(t2.hasAttribute('selected'));
+    assert.isFalse(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t2, tabView.selectedTab));
+
+    tabView.selectedTab = t3;
+    assert.isFalse(t1.hasAttribute('selected'));
+    assert.isFalse(t2.hasAttribute('selected'));
+    assert.isTrue(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t3, tabView.selectedTab));
+
+    t1.selected = true;
+    assert.isTrue(t1.hasAttribute('selected'));
+    assert.isFalse(t2.hasAttribute('selected'));
+    assert.isFalse(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t1, tabView.selectedTab));
+
+    // Make sure just randomly setting a tab as not selected does not
+    // break the existing selection.
+    t2.selected = false;
+    t3.selected = false;
+    assert.isTrue(t1.hasAttribute('selected'));
+    assert.isFalse(t2.hasAttribute('selected'));
+    assert.isFalse(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t1, tabView.selectedTab));
+
+    t3.selected = true;
+    assert.isFalse(t1.hasAttribute('selected'));
+    assert.isFalse(t2.hasAttribute('selected'));
+    assert.isTrue(t3.hasAttribute('selected'));
+    assert.isTrue(Object.is(t3, tabView.selectedTab));
+
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+  });
+
+  /**
+   * This test checks that if an element has a selected property already set,
+   * before being attached to the tabView, it still gets selected if the
+   * property is true, after it gets attached.
+   */
+  test('instantiateSetSelectedTabAlreadySet', function() {
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '500px';
+    tabViewContainer.style.height = '200px';
+
+    var tabView = new TracingAnalysisTabView();
+
+    var t1 = document.createElement('div');
+    t1.textContent = 'This text should BE visible.';
+    var t2 = document.createElement('div');
+    t2.textContent = 'This text should NOT be visible.';
+    var t3 = document.createElement('div');
+    t3.textContent = 'This text should NOT be visible, also.';
+
+    t1.selected = true;
+    t2.selected = false;
+    t3.selected = false;
+
+    tabView.appendChild(t1);
+    tabView.appendChild(t2);
+    tabView.appendChild(t3);
+
+    t1.setAttribute('tab-label', 'This should be selected');
+    t2.setAttribute('tab-label', 'Not selected');
+    t3.setAttribute('tab-label', 'Not selected');
+
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+  });
+
+  test('selectingInvalidTabWorks', function() {
+    var tabView = new TracingAnalysisTabView();
+    var t1 = document.createElement('div');
+    var t2 = document.createElement('div');
+    var t3 = document.createElement('div');
+    var invalidChild = document.createElement('div');
+
+    tabView.appendChild(t1);
+    tabView.appendChild(t2);
+    tabView.appendChild(t3);
+
+    tabView.selectedTab = t1;
+
+    assert.equal(tabView.selectedTab, t1);
+
+    // Make sure that selecting an invalid tab does not break the current
+    // selection.
+    tabView.selectedTab = invalidChild;
+    assert.equal(t1, tabView.selectedTab);
+
+    // Also make sure the invalidChild does not influence the tab view when
+    // it has a selected property set.
+    invalidChild.selected = true;
+    tabView.selectedTab = invalidChild;
+    assert.equal(t1, tabView.selectedTab);
+  });
+
+  test('changeTabCausesEvent', function() {
+    var tabView = new TracingAnalysisTabView();
+    var t1 = document.createElement('div');
+    var t2 = document.createElement('div');
+    var invalidChild = document.createElement('div');
+
+    tabView.appendChild(t1);
+    tabView.appendChild(t2);
+
+    var numChangeEvents = 0;
+    tabView.addEventListener('selected-tab-change', function() {
+        numChangeEvents++;
+    });
+    tabView.selectedTab = t1;
+    assert.equal(numChangeEvents, 1);
+    tabView.selectedTab = t1;
+    assert.equal(numChangeEvents, 1);
+    tabView.selectedTab = t2;
+    assert.equal(numChangeEvents, 2);
+    tabView.selectedTab = undefined;
+    assert.equal(numChangeEvents, 3);
+  });
+
+  /**
+   * This test makes sure that removing the selected tab does not select
+   * any other tab.
+   */
+  test('instantiateRemovingSelectedTab', function() {
+    var tabViewContainer = document.createElement('div');
+    tabViewContainer.style.width = '500px';
+    tabViewContainer.style.height = '200px';
+
+    var tabView = new TracingAnalysisTabView();
+
+    var t1 = document.createElement('div');
+    t1.textContent = 'This text should BE visible.';
+    var t2 = document.createElement('div');
+    t2.textContent = 'This text should NOT be visible.';
+    var t3 = document.createElement('div');
+    t3.textContent = 'This text should NOT be visible, also.';
+
+    tabView.appendChild(t1);
+    tabView.appendChild(t2);
+    tabView.appendChild(t3);
+
+    t1.setAttribute('tab-label', 'This should not exist');
+    t2.setAttribute('tab-label', 'Not selected');
+    t3.setAttribute('tab-label', 'Not selected');
+
+    tabView.selectedTab = t1;
+    tabView.removeChild(t1);
+
+    tabViewContainer.appendChild(tabView);
+
+    this.addHTMLOutput(tabViewContainer);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/base/table.html b/catapult/tracing/tracing/ui/base/table.html
index 9199de1..0e758fb 100644
--- a/catapult/tracing/tracing/ui/base/table.html
+++ b/catapult/tracing/tracing/ui/base/table.html
@@ -242,6 +242,7 @@
         this.columnsWithExpandButtons_ = [];
         this.headerCells_ = [];
         this.subRowsPropertyName_ = 'subRows';
+        this.defaultExpansionStateCallback_ = undefined;
       },
 
       get showHeader() {
@@ -258,6 +259,19 @@
       },
 
       /**
+       * This callback will be called whenever a body row is built
+       * for a userRow that has subRows and does not have an explicit
+       * isExpanded field.
+       * The callback should return true if the row should be expanded,
+       * or false if the row should be collapsed.
+       * @param {function(userRow, parentUserRow): boolean} cb The callback.
+       */
+      set defaultExpansionStateCallback(cb) {
+        this.defaultExpansionStateCallback_ = cb;
+        this.scheduleRebuildBody_();
+      },
+
+      /**
        * This callback will be called whenever a body row is built.
        * The callback's return value is ignored.
        * @param {function(userRow, trElement)} cb The callback.
@@ -432,7 +446,7 @@
         }.bind(this));
         // Sort expanded sub rows recursively.
         for (var i = 0; i < rows.length; i++) {
-          if (rows[i].isExpanded)
+          if (this.getExpandedForUserRow_(rows[i]))
             this.sortRows_(rows[i][this.subRowsPropertyName_]);
         }
       },
@@ -578,16 +592,23 @@
       },
 
       getOrCreateRowInfoFor_: function(rowInfoMap, userRow, parentRowInfo) {
-        if (rowInfoMap.has(userRow))
-          return rowInfoMap.get(userRow);
+        var rowInfo = undefined;
 
-        var rowInfo = {
-          userRow: userRow,
-          htmlNode: undefined,
-          isExpanded: userRow.isExpanded || false,
-          parentRowInfo: parentRowInfo
-        };
-        rowInfoMap.set(userRow, rowInfo);
+        if (rowInfoMap.has(userRow)) {
+          rowInfo = rowInfoMap.get(userRow);
+        } else {
+          rowInfo = {
+            userRow: userRow,
+            htmlNode: undefined,
+            parentRowInfo: parentRowInfo
+          };
+          rowInfoMap.set(userRow, rowInfo);
+        }
+
+        // Recompute isExpanded in case defaultExpansionStateCallback_ has
+        // changed.
+        rowInfo.isExpanded = this.getExpandedForUserRow_(userRow);
+
         return rowInfo;
       },
 
@@ -647,13 +668,11 @@
           i += colSpan;
         }
 
-        var needsClickListener = false;
-        if (this.columnsWithExpandButtons_.length)
-          needsClickListener = true;
-        else if (tableSection == this.$.body)
-          needsClickListener = true;
+        var isSelectable = tableSection === this.$.body;
+        var isExpandable = rowInfo.userRow[this.subRowsPropertyName_] &&
+            rowInfo.userRow[this.subRowsPropertyName_].length;
 
-        if (needsClickListener) {
+        if (isSelectable || isExpandable) {
           trElement.addEventListener('click', function(e) {
             e.stopPropagation();
             if (e.target.tagName == 'EXPAND-BUTTON') {
@@ -671,42 +690,37 @@
               return getTD(cur.parentElement);
             }
 
-            if (this.selectionMode_ !== SelectionMode.NONE) {
-              var isAlreadySelected = false;
-              var tdThatWasClicked = getTD(e.target);
+            // If the row/cell can be selected and it's not selected yet,
+            // select it.
+            if (isSelectable && this.selectionMode_ !== SelectionMode.NONE) {
+              var shouldSelect = false;
+              var columnIndex = getTD(e.target).columnIndex;
               switch (this.selectionMode_) {
                 case SelectionMode.ROW:
-                  isAlreadySelected = this.selectedTableRowInfo_ === rowInfo;
+                  shouldSelect = this.selectedTableRowInfo_ !== rowInfo;
                   break;
 
                 case SelectionMode.CELL:
-                  isAlreadySelected = this.selectedTableRowInfo_ === rowInfo;
-                  isAlreadySelected &= (this.selectedColumnIndex_ ===
-                                        tdThatWasClicked.columnIndex);
+                  if (this.doesColumnIndexSupportSelection(columnIndex)) {
+                    shouldSelect = this.selectedTableRowInfo_ !== rowInfo ||
+                        this.selectedColumnIndex_ !== columnIndex;
+                  }
                   break;
 
                 default:
                   throw new Error('Invalid selection mode ' +
                       this.selectionMode_);
               }
-              if (isAlreadySelected) {
-                if (rowInfo.userRow[this.subRowsPropertyName_] &&
-                    rowInfo.userRow[this.subRowsPropertyName_].length) {
-                  this.setExpandedForUserRow_(
-                      tableSection, rowInfoMap,
-                      rowInfo.userRow, !rowInfo.isExpanded);
-                }
-              } else {
-                this.didTableRowInfoGetClicked_(
-                    rowInfo, tdThatWasClicked.columnIndex);
+              if (shouldSelect) {
+                this.didTableRowInfoGetClicked_(rowInfo, columnIndex);
+                return;
               }
-            } else {
-              if (rowInfo.userRow[this.subRowsPropertyName_] &&
-                  rowInfo.userRow[this.subRowsPropertyName_].length) {
-                this.setExpandedForUserRow_(
-                    tableSection, rowInfoMap,
-                    rowInfo.userRow, !rowInfo.isExpanded);
-              }
+            }
+
+            // Otherwise, if the row is expandable, expand/collapse it.
+            if (isExpandable) {
+              this.setExpandedForUserRow_(tableSection, rowInfoMap,
+                  rowInfo.userRow, !rowInfo.isExpanded);
             }
           }.bind(this));
         }
@@ -820,6 +834,27 @@
         return rowInfo.isExpanded;
       },
 
+      getExpandedForUserRow_: function(userRow) {
+        if (userRow[this.subRowsPropertyName_] === undefined)
+          return false;
+        if (userRow[this.subRowsPropertyName_].length === 0)
+          return false;
+        if (userRow.isExpanded)
+          return true;
+        if (userRow.isExpanded === false)
+          return false;
+        if (this.defaultExpansionStateCallback_ === undefined)
+          return false;
+
+        var parentUserRow = undefined;
+        var rowInfo = this.tableRowsInfo_.get(userRow);
+        if (rowInfo && rowInfo.parentRowInfo)
+          parentUserRow = rowInfo.parentRowInfo.userRow;
+
+        return this.defaultExpansionStateCallback_(
+            userRow, parentUserRow);
+      },
+
       setExpandedForTableRow: function(userRow, expanded) {
         this.rebuildIfNeeded_();
         var rowInfo = this.tableRowsInfo_.get(userRow);
@@ -999,13 +1034,13 @@
           case SelectionMode.CELL:
             if (!this.doesColumnIndexSupportSelection(columnIndex))
               return;
+            if (this.selectedColumnIndex !== columnIndex)
+              this.selectedColumnIndex = columnIndex;
             // Fall through.
 
           case SelectionMode.ROW:
             if (this.selectedTableRowInfo_ !== rowInfo)
               this.selectedTableRow = rowInfo.userRow;
-            if (this.selectedColumnIndex !== columnIndex)
-              this.selectedColumnIndex = columnIndex;
         }
       },
 
@@ -1178,6 +1213,7 @@
           return;
 
         var code_to_command_names = {
+          13: 'ENTER',
           37: 'ARROW_LEFT',
           38: 'ARROW_UP',
           39: 'ARROW_RIGHT',
@@ -1282,6 +1318,16 @@
           }
         }
 
+        if (cmdName === 'ENTER') {
+          if (rowInfo.userRow[this.subRowsPropertyName_] === undefined)
+            return;
+          if (rowInfo.userRow[this.subRowsPropertyName_].length === 0)
+            return;
+          this.setExpandedForTableRow(rowInfo.userRow, !rowInfo.isExpanded);
+          this.focusSelected_();
+          return;
+        }
+
         throw new Error('Unrecognized command ' + cmdName);
       },
 
diff --git a/catapult/tracing/tracing/ui/base/table_test.html b/catapult/tracing/tracing/ui/base/table_test.html
index b0a0073..39cc15a 100644
--- a/catapult/tracing/tracing/ui/base/table_test.html
+++ b/catapult/tracing/tracing/ui/base/table_test.html
@@ -797,6 +797,15 @@
     table.performKeyCommand_('ARROW_UP');
     assert.equal(table.selectedTableRow, rows[0]);
 
+    // Enter on collapsed row should expand.
+    table.selectedTableRow = rows[0];
+    table.performKeyCommand_('ENTER');
+    assert.equal(table.selectedTableRow, rows[0]);
+    assert.isTrue(table.getExpandedForTableRow(rows[0]));
+
+    table.performKeyCommand_('ENTER');
+    assert.isFalse(table.getExpandedForTableRow(rows[0]));
+
     // Arrow right on collapsed row should expand.
     table.selectedTableRow = rows[0];
     table.performKeyCommand_('ARROW_RIGHT');
@@ -1255,5 +1264,116 @@
 
     assert.isTrue(callbackCalled);
   });
+
+  test('selectionEdgeCases', function() {
+    var table = document.createElement('tr-ui-b-table');
+    table.tableColumns = [
+      {
+        title: 'Column',
+        value: function(row) { return row.data; },
+        supportsCellSelection: false
+      }
+    ];
+    table.tableRows = [{ data: 'body row' }];
+    table.footerRows = [{ data: 'footer row' }];
+    table.selectionMode = SelectionMode.ROW;
+    this.addHTMLOutput(table);
+
+    // Clicking on the body row should *not* throw an exception (despite the
+    // column not supporting cell selection).
+    table.$.body.children[0].children[0].click();
+
+    // Clicking on the footer row should *not* throw an exception (despite
+    // footer rows not being selectable in general).
+    table.$.foot.children[0].children[0].click();
+  });
+
+  test('defaultExpansionStateCallback', function() {
+    var columns = [
+      {
+        title: 'Name',
+        value: function(row) { return row.name; }
+      },
+      {
+        title: 'Value',
+        value: function(row) { return row.value; }
+      }
+    ];
+
+    var rows = [
+      {
+        name: 'A',
+        value: 10,
+        subRows: [
+          {
+            name: 'B',
+            value: 8,
+            subRows: [
+              {
+                name: 'C',
+                value: 4
+              },
+              {
+                name: 'D',
+                value: 4
+              }
+            ]
+          },
+          {
+            name: 'E',
+            value: 2,
+            subRows: [
+              {
+                name: 'F',
+                value: 1
+              },
+              {
+                name: 'G',
+                value: 1
+              }
+            ]
+          }
+        ]
+      }
+    ];
+
+    var table = document.createElement('tr-ui-b-table');
+    table.tableColumns = columns;
+    table.tableRows = rows;
+    table.rebuild();
+
+    this.addHTMLOutput(table);
+
+    var cRow = tr.b.findDeepElementMatchingPredicate(
+        table, function(element) {
+      return element.textContent === 'C';
+    });
+    assert.equal(cRow, undefined);
+
+    var callbackCalled = false;
+    table.defaultExpansionStateCallback = function(row, parentRow) {
+      callbackCalled = true;
+
+      if (parentRow === undefined)
+        return true;
+
+      if (row.value >= (parentRow.value * 0.8))
+        return true;
+
+      return false;
+    };
+
+    // Setting the callback should set the body dirty.
+    assert.isTrue(table.bodyDirty_);
+    assert.isFalse(callbackCalled);
+
+    table.rebuild();
+
+    assert.isTrue(callbackCalled);
+    cRow = tr.b.findDeepElementMatchingPredicate(table, function(element) {
+      return element.textContent === 'C';
+    });
+    assert.isDefined(cRow);
+  });
 });
 </script>
diff --git a/catapult/tracing/tracing/ui/base/utils.html b/catapult/tracing/tracing/ui/base/utils.html
index e3ce358..a205d1d 100644
--- a/catapult/tracing/tracing/ui/base/utils.html
+++ b/catapult/tracing/tracing/ui/base/utils.html
@@ -52,7 +52,13 @@
     return extracted;
   }
 
+  function toThreeDigitLocaleString(value) {
+    return value.toLocaleString(
+        undefined, {minimumFractionDigits: 3, maximumFractionDigits: 3});
+  }
+
   return {
+    toThreeDigitLocaleString: toThreeDigitLocaleString,
     instantiateTemplate: instantiateTemplate,
     windowRectForElement: windowRectForElement,
     scrollIntoViewIfNeeded: scrollIntoViewIfNeeded,
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html b/catapult/tracing/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html
index 082a9ed..a88122f 100644
--- a/catapult/tracing/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html
@@ -61,22 +61,6 @@
       assert.equal(this.requests.length, this.nextRequestIndex);
     },
 
-    beginMonitoring: function(monitoringOptions) {
-      return this._request('beginMonitoring', monitoringOptions);
-    },
-
-    endMonitoring: function() {
-      return this._request('endMonitoring');
-    },
-
-    captureMonitoring: function() {
-      return this._request('captureMonitoring');
-    },
-
-    getMonitoringStatus: function() {
-      return this._request('getMonitoringStatus');
-    },
-
     getCategories: function() {
       return this._request('getCategories');
     },
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view.html b/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view.html
index 205d848..1e6f06c 100644
--- a/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view.html
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view.html
@@ -7,7 +7,7 @@
 
 <link rel="import" href="/tracing/base/base64.html">
 <link rel="import"
-      href="/tracing/ui/extras/about_tracing/record_and_capture_controller.html">
+      href="/tracing/ui/extras/about_tracing/record_controller.html">
 <link rel="import"
       href="/tracing/ui/extras/about_tracing/inspector_tracing_controller_client.html">
 <link rel="import"
@@ -39,8 +39,7 @@
   -webkit-user-select: text;
 }
 
-x-timeline-view-buttons,
-x-timeline-view-buttons > #monitoring-elements {
+x-timeline-view-buttons {
   display: flex;
   align-items: center;
 }
@@ -50,11 +49,6 @@
   <tr-ui-b-info-bar-group></tr-ui-b-info-bar-group>
   <x-timeline-view-buttons>
     <button id="record-button">Record</button>
-    <span id="monitoring-elements">
-      <input id="monitor-checkbox" type="checkbox">
-      <label for="monitor-checkbox">Monitoring</label></input>
-      <button id="capture-button">Capture Monitoring Snapshot</button>
-    </span>
     <button id="save-button">Save</button>
     <button id="load-button">Load</button>
   </x-timeline-view-buttons>
@@ -80,7 +74,7 @@
       };
       reader.onerror = function(err) {
         reject(err);
-      }
+      };
 
       var is_binary = /[.]gz$/.test(filename) || /[.]zip$/.test(filename);
       if (is_binary)
@@ -111,9 +105,6 @@
       // Detach the buttons. We will reattach them to the timeline view.
       // TODO(nduca): Make timeline-view have a content select="x-buttons"
       // that pulls in any buttons.
-      this.monitoringElements_ = this.querySelector('#monitoring-elements');
-      this.monitorCheckbox_ = this.querySelector('#monitor-checkbox');
-      this.captureButton_ = this.querySelector('#capture-button');
       this.recordButton_ = this.querySelector('#record-button');
       this.loadButton_ = this.querySelector('#load-button');
       this.saveButton_ = this.querySelector('#save-button');
@@ -146,14 +137,8 @@
       }
 
       this.isRecording_ = false;
-      this.isMonitoring_ = false;
       this.activeTrace_ = undefined;
 
-      window.onMonitoringStateChanged = function(is_monitoring) {
-        this.onMonitoringStateChanged_(is_monitoring);
-      }.bind(this);
-
-      this.getMonitoringStatus();
       this.updateTracingControllerSpecificState_();
     },
 
@@ -167,10 +152,6 @@
       return this.isRecording_;
     },
 
-    get isMonitoring() {
-      return this.isMonitoring_;
-    },
-
     set tracingControllerClient(tracingControllerClient) {
       this.tracingControllerClient_ = tracingControllerClient;
       this.updateTracingControllerSpecificState_();
@@ -185,31 +166,23 @@
             'This about:tracing is connected to a remote device...',
             [{buttonText: 'Wow!', onClick: function() {}}]);
       }
-
-      this.monitoringElements_.style.display = isInspector ? 'none' : '';
     },
 
     beginRecording: function() {
       if (this.isRecording_)
         throw new Error('Already recording');
-      if (this.isMonitoring_)
-        throw new Error('Already monitoring');
       this.isRecording_ = true;
-      this.monitorCheckbox_.disabled = true;
-      this.monitorCheckbox_.checked = false;
       var resultPromise = tr.ui.e.about_tracing.beginRecording(
           this.tracingControllerClient_);
       resultPromise.then(
           function(data) {
             this.isRecording_ = false;
-            this.monitorCheckbox_.disabled = false;
             var traceName = tr.ui.e.about_tracing.defaultTraceName(
                 this.tracingControllerClient_);
             this.setActiveTrace(traceName, data, false);
           }.bind(this),
           function(err) {
             this.isRecording_ = false;
-            this.monitorCheckbox_.disabled = false;
             if (err instanceof tr.ui.e.about_tracing.UserCancelledError)
               return;
             tr.ui.b.Overlay.showError('Error while recording', err);
@@ -217,86 +190,6 @@
       return resultPromise;
     },
 
-    beginMonitoring: function() {
-      if (this.isRecording_)
-        throw new Error('Already recording');
-      if (this.isMonitoring_)
-        throw new Error('Already monitoring');
-      var resultPromise =
-          tr.ui.e.about_tracing.beginMonitoring(this.tracingControllerClient_);
-      resultPromise.then(
-          function() {
-          }.bind(this),
-          function(err) {
-            if (err instanceof tr.ui.e.about_tracing.UserCancelledError)
-              return;
-            tr.ui.b.Overlay.showError('Error while monitoring', err);
-          }.bind(this));
-      return resultPromise;
-    },
-
-    endMonitoring: function() {
-      if (this.isRecording_)
-        throw new Error('Already recording');
-      if (!this.isMonitoring_)
-        throw new Error('Monitoring is disabled');
-      var resultPromise =
-          tr.ui.e.about_tracing.endMonitoring(this.tracingControllerClient_);
-      resultPromise.then(
-          function() {
-          }.bind(this),
-          function(err) {
-            if (err instanceof tr.ui.e.about_tracing.UserCancelledError)
-              return;
-            tr.ui.b.Overlay.showError('Error while monitoring', err);
-          }.bind(this));
-      return resultPromise;
-    },
-
-    captureMonitoring: function() {
-      if (!this.isMonitoring_)
-        throw new Error('Monitoring is disabled');
-      var resultPromise =
-          tr.ui.e.about_tracing.captureMonitoring(
-              this.tracingControllerClient_);
-      resultPromise.then(
-          function(data) {
-            var traceName = tr.ui.e.about_tracing.defaultTraceName(
-                this.tracingControllerClient_);
-            this.setActiveTrace(traceName, data, true);
-          }.bind(this),
-          function(err) {
-            if (err instanceof tr.ui.e.about_tracing.UserCancelledError)
-              return;
-            tr.ui.b.Overlay.showError('Error while monitoring', err);
-          }.bind(this));
-      return resultPromise;
-    },
-
-    getMonitoringStatus: function() {
-      var resultPromise =
-          tr.ui.e.about_tracing.getMonitoringStatus(
-              this.tracingControllerClient_);
-      resultPromise.then(
-          function(status) {
-            this.onMonitoringStateChanged_(status.isMonitoring);
-          }.bind(this),
-          function(err) {
-            if (err instanceof tr.ui.e.about_tracing.UserCancelledError)
-              return;
-            tr.ui.b.Overlay.showError('Error while updating tracing states',
-                                      err);
-          }.bind(this));
-      return resultPromise;
-    },
-
-    onMonitoringStateChanged_: function(is_monitoring) {
-      this.isMonitoring_ = is_monitoring;
-      this.recordButton_.disabled = is_monitoring;
-      this.captureButton_.disabled = !is_monitoring;
-      this.monitorCheckbox_.checked = is_monitoring;
-    },
-
     get timelineView() {
       return this.timelineView_;
     },
@@ -341,22 +234,6 @@
             this.beginRecording();
           }.bind(this));
 
-      this.monitorCheckbox_.addEventListener(
-          'click', function(event) {
-            event.stopPropagation();
-            if (this.isMonitoring_)
-              this.endMonitoring();
-            else
-              this.beginMonitoring();
-          }.bind(this));
-
-      this.captureButton_.addEventListener(
-          'click', function(event) {
-            event.stopPropagation();
-            this.captureMonitoring();
-          }.bind(this));
-      this.captureButton_.disabled = true;
-
       this.loadButton_.addEventListener(
           'click', function(event) {
             event.stopPropagation();
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view_test.html b/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view_test.html
index d3c2d24..457d074 100644
--- a/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view_test.html
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/profiling_view_test.html
@@ -6,6 +6,7 @@
 -->
 
 <link rel="import" href="/tracing/base/base.html">
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import"
       href="/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html">
 <link rel="import" href="/tracing/extras/importer/trace_event_importer.html">
@@ -15,6 +16,7 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
+  var Base64 = tr.b.Base64;
   var testData = [
     {name: 'a', args: {}, pid: 52, ts: 15000, cat: 'foo', tid: 53, ph: 'B'},
     {name: 'a', args: {}, pid: 52, ts: 19000, cat: 'foo', tid: 53, ph: 'E'},
@@ -35,9 +37,6 @@
   test('recording', function() {
     var mock = new tr.ui.e.about_tracing.MockTracingControllerClient();
     mock.allowLooping = true;
-    mock.expectRequest('getMonitoringStatus', function() {
-      return btoa(JSON.stringify(monitoringOptions));
-    });
     mock.expectRequest('endRecording', function() {
       return '';
     });
@@ -80,58 +79,6 @@
           });
     });
   });
-
-  test('monitoring', function() {
-    var mock = new tr.ui.e.about_tracing.MockTracingControllerClient();
-    mock.allowLooping = true;
-    mock.expectRequest('getMonitoringStatus', function() {
-      return btoa(JSON.stringify(monitoringOptions));
-    });
-    mock.expectRequest('beginMonitoring', function(data) {
-      return '';
-    });
-    mock.expectRequest('captureMonitoring', function(data) {
-      return JSON.stringify(testData);
-    });
-    mock.expectRequest('endMonitoring', function(data) {
-      return '';
-    });
-
-    var view = new ProfilingView(mock);
-    view.style.height = '400px';
-    view.style.border = '1px solid black';
-    this.addHTMLOutput(view);
-
-    return new Promise(function(resolve, reject) {
-      assert.isFalse(view.monitorCheckbox_.checked);
-
-      function beginMonitoring() {
-        // Since we don't fall back to TracingController when testing,
-        // we cannot rely on TracingController to invoke a callback to change
-        // view.isMonitoring_. Thus we change view.isMonitoring_ manually.
-        view.onMonitoringStateChanged_(true);
-        assert.isTrue(view.monitorCheckbox_.checked);
-        setTimeout(captureMonitoring, 60);
-      }
-
-      function captureMonitoring() {
-        assert.isTrue(view.monitorCheckbox_.checked);
-        view.captureButton_.click();
-        setTimeout(endMonitoring, 60);
-      }
-
-      function endMonitoring() {
-        assert.isTrue(view.monitorCheckbox_.checked);
-        view.monitorCheckbox_.click();
-        assert.isFalse(view.monitorCheckbox_.checked);
-      }
-
-      var monitoringPromise = view.beginMonitoring();
-      setTimeout(beginMonitoring, 60);
-
-      monitoringPromise.then(resolve, reject);
-    });
-  });
 });
 </script>
 
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller.html b/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller.html
deleted file mode 100644
index ad1a5b1..0000000
--- a/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller.html
+++ /dev/null
@@ -1,238 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/extras/about_tracing/record_selection_dialog.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.e.about_tracing', function() {
-  function beginMonitoring(tracingControllerClient) {
-    var finalPromiseResolver;
-    var finalPromise = new Promise(function(resolve, reject) {
-      finalPromiseResolver = {
-        resolve: resolve,
-        reject: reject
-      };
-    });
-
-    // TODO(haraken): Implement a configure dialog to set these options.
-    var monitoringOptions = {
-      categoryFilter: '*',
-      useSystemTracing: false,
-      tracingRecordMode: 'record-until-full',
-      useSampling: true
-    };
-
-
-    var beginMonitoringPromise = tracingControllerClient.beginMonitoring(
-        monitoringOptions);
-
-    beginMonitoringPromise.then(
-        function() {
-          finalPromiseResolver.resolve();
-        },
-        function(err) {
-          finalPromiseResolver.reject(err);
-        });
-
-    return finalPromise;
-  }
-
-  function endMonitoring(tracingControllerClient) {
-    var endMonitoringPromise = tracingControllerClient.endMonitoring();
-    return endMonitoringPromise.then(
-        function() {
-        },
-        function(err) {
-        });
-  }
-
-  function captureMonitoring(tracingControllerClient) {
-    var captureMonitoringPromise =
-        tracingControllerClient.captureMonitoring();
-    return captureMonitoringPromise;
-  }
-
-  function getMonitoringStatus(tracingControllerClient) {
-    var getMonitoringStatusPromise =
-        tracingControllerClient.getMonitoringStatus();
-    return getMonitoringStatusPromise;
-  }
-
-  function beginRecording(tracingControllerClient) {
-    var finalPromiseResolver;
-    var finalPromise = new Promise(function(resolve, reject) {
-      finalPromiseResolver = {
-        resolve: resolve,
-        reject: reject
-      };
-    });
-    finalPromise.selectionDlg = undefined;
-    finalPromise.progressDlg = undefined;
-
-    function beginRecordingError(err) {
-      finalPromiseResolver.reject(err);
-    }
-
-    // Step 0: End recording. This is necessary when the user reloads the
-    // about:tracing page when we are recording. Window.onbeforeunload is not
-    // reliable to end recording on reload.
-    endRecording(tracingControllerClient).then(
-        getCategories,
-        getCategories);  // Ignore error.
-
-    // But just in case, bind onbeforeunload anyway.
-    window.onbeforeunload = function(e) {
-      endRecording(tracingControllerClient);
-    }
-
-    // Step 1: Get categories.
-    function getCategories() {
-      var p = tracingControllerClient.getCategories().then(
-          showTracingDialog,
-          beginRecordingError);
-      p.catch(function(err) {
-        beginRecordingError(err);
-      });
-    }
-
-    // Step 2: Show tracing dialog.
-    var selectionDlg;
-    function showTracingDialog(categories) {
-      selectionDlg = new tr.ui.e.about_tracing.RecordSelectionDialog();
-      selectionDlg.categories = categories;
-      selectionDlg.settings_key =
-          'tr.ui.e.about_tracing.record_selection_dialog';
-      selectionDlg.addEventListener('recordclick', startTracing);
-      selectionDlg.addEventListener('closeclick', cancelRecording);
-      selectionDlg.visible = true;
-
-      finalPromise.selectionDlg = selectionDlg;
-    }
-
-    function cancelRecording() {
-      finalPromise.selectionDlg = undefined;
-      finalPromiseResolver.reject(new UserCancelledError());
-    }
-
-    // Step 2: Do the actual tracing dialog.
-    var progressDlg;
-    var bufferPercentFullDiv;
-    function startTracing() {
-      progressDlg = new tr.ui.b.Overlay();
-      progressDlg.textContent = 'Recording...';
-      progressDlg.userCanClose = false;
-
-      bufferPercentFullDiv = document.createElement('div');
-      progressDlg.appendChild(bufferPercentFullDiv);
-
-      var stopButton = document.createElement('button');
-      stopButton.textContent = 'Stop';
-      progressDlg.clickStopButton = function() {
-        stopButton.click();
-      };
-      progressDlg.appendChild(stopButton);
-
-      var recordingOptions = {
-        categoryFilter: selectionDlg.categoryFilter(),
-        useSystemTracing: selectionDlg.useSystemTracing,
-        tracingRecordMode: selectionDlg.tracingRecordMode,
-        useSampling: selectionDlg.useSampling
-      };
-
-
-      var requestPromise = tracingControllerClient.beginRecording(
-          recordingOptions);
-      requestPromise.then(
-          function() {
-            progressDlg.visible = true;
-            stopButton.focus();
-            updateBufferPercentFull('0');
-          },
-          recordFailed);
-
-      stopButton.addEventListener('click', function() {
-        // TODO(chrishenry): Currently, this only dismiss the progress
-        // dialog when tracingComplete event is received. When performing
-        // remote debugging, the tracingComplete event may be delayed
-        // considerable. We should indicate to user that we are waiting
-        // for tracingComplete event instead of being unresponsive. (For
-        // now, I disable the "stop" button, since clicking on the button
-        // again now cause exception.)
-        var recordingPromise = endRecording(tracingControllerClient);
-        recordingPromise.then(
-            recordFinished,
-            recordFailed);
-        stopButton.disabled = true;
-        bufferPercentFullDiv = undefined;
-      });
-      finalPromise.progressDlg = progressDlg;
-    }
-
-    function recordFinished(tracedData) {
-      progressDlg.visible = false;
-      finalPromise.progressDlg = undefined;
-      finalPromiseResolver.resolve(tracedData);
-    }
-
-    function recordFailed(err) {
-      progressDlg.visible = false;
-      finalPromise.progressDlg = undefined;
-      finalPromiseResolver.reject(err);
-    }
-
-    function getBufferPercentFull() {
-      if (!bufferPercentFullDiv)
-        return;
-
-      tracingControllerClient.beginGetBufferPercentFull().then(
-          updateBufferPercentFull);
-    }
-
-    function updateBufferPercentFull(percent_full) {
-      if (!bufferPercentFullDiv)
-        return;
-
-      percent_full = Math.round(100 * parseFloat(percent_full));
-      var newText = 'Buffer usage: ' + percent_full + '%';
-      if (bufferPercentFullDiv.textContent != newText)
-        bufferPercentFullDiv.textContent = newText;
-
-      window.setTimeout(getBufferPercentFull, 500);
-    }
-
-    // Thats it! We're done.
-    return finalPromise;
-  };
-
-  function endRecording(tracingControllerClient) {
-    return tracingControllerClient.endRecording();
-  }
-
-  function defaultTraceName(tracingControllerClient) {
-    return tracingControllerClient.defaultTraceName();
-  }
-
-  function UserCancelledError() {
-    Error.apply(this, arguments);
-  }
-  UserCancelledError.prototype = {
-    __proto__: Error.prototype
-  };
-
-  return {
-    beginRecording: beginRecording,
-    beginMonitoring: beginMonitoring,
-    endMonitoring: endMonitoring,
-    captureMonitoring: captureMonitoring,
-    getMonitoringStatus: getMonitoringStatus,
-    UserCancelledError: UserCancelledError,
-    defaultTraceName: defaultTraceName
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller_test.html b/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller_test.html
deleted file mode 100644
index 9a06e61..0000000
--- a/catapult/tracing/tracing/ui/extras/about_tracing/record_and_capture_controller_test.html
+++ /dev/null
@@ -1,110 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import"
-      href="/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html">
-<link rel="import"
-      href="/tracing/ui/extras/about_tracing/record_and_capture_controller.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var testData = [
-    {name: 'a', args: {}, pid: 52, ts: 15000, cat: 'foo', tid: 53, ph: 'B'},
-    {name: 'a', args: {}, pid: 52, ts: 19000, cat: 'foo', tid: 53, ph: 'E'},
-    {name: 'b', args: {}, pid: 52, ts: 32000, cat: 'foo', tid: 53, ph: 'B'},
-    {name: 'b', args: {}, pid: 52, ts: 54000, cat: 'foo', tid: 53, ph: 'E'}
-  ];
-
-  test('fullRecording', function() {
-    return new Promise(function(resolve, reject) {
-      var mock = new tr.ui.e.about_tracing.MockTracingControllerClient();
-      mock.expectRequest('endRecording', function() {
-        return '';
-      });
-      mock.expectRequest('getCategories', function() {
-        setTimeout(function() {
-          recordingPromise.selectionDlg.clickRecordButton();
-        }, 20);
-        return ['a', 'b', 'c'];
-      });
-      mock.expectRequest('beginRecording', function(recordingOptions) {
-        assert.typeOf(recordingOptions.categoryFilter, 'string');
-        assert.typeOf(recordingOptions.useSystemTracing, 'boolean');
-        assert.typeOf(recordingOptions.useSampling, 'boolean');
-        assert.typeOf(recordingOptions.tracingRecordMode, 'string');
-        setTimeout(function() {
-          recordingPromise.progressDlg.clickStopButton();
-        }, 10);
-        return '';
-      });
-      mock.expectRequest('endRecording', function(data) {
-        return JSON.stringify(testData);
-      });
-
-      var recordingPromise = tr.ui.e.about_tracing.beginRecording(mock);
-
-      return recordingPromise.then(
-          function(data) {
-            mock.assertAllRequestsHandled();
-            var testDataString = JSON.stringify(testData);
-            assert.equal(data, testDataString);
-            resolve();
-          },
-          function(error) {
-            reject('This should never be reached');
-          });
-    });
-  });
-
-  test('monitoring', function() {
-    return new Promise(function(resolve, reject) {
-      var mock = new tr.ui.e.about_tracing.MockTracingControllerClient();
-
-      mock.expectRequest('beginMonitoring', function(monitoringOptions) {
-        assert.typeOf(monitoringOptions.categoryFilter, 'string');
-        assert.typeOf(monitoringOptions.useSystemTracing, 'boolean');
-        assert.typeOf(monitoringOptions.useSampling, 'boolean');
-        assert.typeOf(monitoringOptions.tracingRecordMode, 'string');
-        setTimeout(function() {
-          var capturePromise = tr.ui.e.about_tracing.captureMonitoring(mock);
-          capturePromise.then(
-              function(data) {
-                var testDataString = JSON.stringify(testData);
-                assert.equal(data, testDataString);
-              },
-              function(error) {
-                reject();
-              });
-        }, 10);
-        return '';
-      });
-
-      mock.expectRequest('captureMonitoring', function(data) {
-        setTimeout(function() {
-          var endPromise = tr.ui.e.about_tracing.endMonitoring(mock);
-          endPromise.then(
-              function(data) {
-                mock.assertAllRequestsHandled();
-                resolve();
-              },
-              function(error) {
-                reject();
-              });
-        }, 10);
-        return JSON.stringify(testData);
-      });
-
-      mock.expectRequest('endMonitoring', function(data) {
-      });
-
-      tr.ui.e.about_tracing.beginMonitoring(mock);
-    });
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/record_controller.html b/catapult/tracing/tracing/ui/extras/about_tracing/record_controller.html
new file mode 100644
index 0000000..2284b0f
--- /dev/null
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/record_controller.html
@@ -0,0 +1,181 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2013 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/extras/about_tracing/record_selection_dialog.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.ui.e.about_tracing', function() {
+  function beginRecording(tracingControllerClient) {
+    var finalPromiseResolver;
+    var finalPromise = new Promise(function(resolve, reject) {
+      finalPromiseResolver = {
+        resolve: resolve,
+        reject: reject
+      };
+    });
+    finalPromise.selectionDlg = undefined;
+    finalPromise.progressDlg = undefined;
+
+    function beginRecordingError(err) {
+      finalPromiseResolver.reject(err);
+    }
+
+    // Step 0: End recording. This is necessary when the user reloads the
+    // about:tracing page when we are recording. Window.onbeforeunload is not
+    // reliable to end recording on reload.
+    endRecording(tracingControllerClient).then(
+        getCategories,
+        getCategories);  // Ignore error.
+
+    // But just in case, bind onbeforeunload anyway.
+    window.onbeforeunload = function(e) {
+      endRecording(tracingControllerClient);
+    };
+
+    // Step 1: Get categories.
+    function getCategories() {
+      var p = tracingControllerClient.getCategories().then(
+          showTracingDialog,
+          beginRecordingError);
+      p.catch(function(err) {
+        beginRecordingError(err);
+      });
+    }
+
+    // Step 2: Show tracing dialog.
+    var selectionDlg;
+    function showTracingDialog(categories) {
+      selectionDlg = new tr.ui.e.about_tracing.RecordSelectionDialog();
+      selectionDlg.categories = categories;
+      selectionDlg.settings_key =
+          'tr.ui.e.about_tracing.record_selection_dialog';
+      selectionDlg.addEventListener('recordclick', startTracing);
+      selectionDlg.addEventListener('closeclick', cancelRecording);
+      selectionDlg.visible = true;
+
+      finalPromise.selectionDlg = selectionDlg;
+    }
+
+    function cancelRecording() {
+      finalPromise.selectionDlg = undefined;
+      finalPromiseResolver.reject(new UserCancelledError());
+    }
+
+    // Step 2: Do the actual tracing dialog.
+    var progressDlg;
+    var bufferPercentFullDiv;
+    function startTracing() {
+      progressDlg = new tr.ui.b.Overlay();
+      progressDlg.textContent = 'Recording...';
+      progressDlg.userCanClose = false;
+
+      bufferPercentFullDiv = document.createElement('div');
+      progressDlg.appendChild(bufferPercentFullDiv);
+
+      var stopButton = document.createElement('button');
+      stopButton.textContent = 'Stop';
+      progressDlg.clickStopButton = function() {
+        stopButton.click();
+      };
+      progressDlg.appendChild(stopButton);
+
+      var recordingOptions = {
+        categoryFilter: selectionDlg.categoryFilter(),
+        useSystemTracing: selectionDlg.useSystemTracing,
+        tracingRecordMode: selectionDlg.tracingRecordMode,
+        useSampling: selectionDlg.useSampling
+      };
+
+
+      var requestPromise = tracingControllerClient.beginRecording(
+          recordingOptions);
+      requestPromise.then(
+          function() {
+            progressDlg.visible = true;
+            stopButton.focus();
+            updateBufferPercentFull('0');
+          },
+          recordFailed);
+
+      stopButton.addEventListener('click', function() {
+        // TODO(chrishenry): Currently, this only dismiss the progress
+        // dialog when tracingComplete event is received. When performing
+        // remote debugging, the tracingComplete event may be delayed
+        // considerable. We should indicate to user that we are waiting
+        // for tracingComplete event instead of being unresponsive. (For
+        // now, I disable the "stop" button, since clicking on the button
+        // again now cause exception.)
+        var recordingPromise = endRecording(tracingControllerClient);
+        recordingPromise.then(
+            recordFinished,
+            recordFailed);
+        stopButton.disabled = true;
+        bufferPercentFullDiv = undefined;
+      });
+      finalPromise.progressDlg = progressDlg;
+    }
+
+    function recordFinished(tracedData) {
+      progressDlg.visible = false;
+      finalPromise.progressDlg = undefined;
+      finalPromiseResolver.resolve(tracedData);
+    }
+
+    function recordFailed(err) {
+      progressDlg.visible = false;
+      finalPromise.progressDlg = undefined;
+      finalPromiseResolver.reject(err);
+    }
+
+    function getBufferPercentFull() {
+      if (!bufferPercentFullDiv)
+        return;
+
+      tracingControllerClient.beginGetBufferPercentFull().then(
+          updateBufferPercentFull);
+    }
+
+    function updateBufferPercentFull(percent_full) {
+      if (!bufferPercentFullDiv)
+        return;
+
+      percent_full = Math.round(100 * parseFloat(percent_full));
+      var newText = 'Buffer usage: ' + percent_full + '%';
+      if (bufferPercentFullDiv.textContent != newText)
+        bufferPercentFullDiv.textContent = newText;
+
+      window.setTimeout(getBufferPercentFull, 500);
+    }
+
+    // Thats it! We're done.
+    return finalPromise;
+  };
+
+  function endRecording(tracingControllerClient) {
+    return tracingControllerClient.endRecording();
+  }
+
+  function defaultTraceName(tracingControllerClient) {
+    return tracingControllerClient.defaultTraceName();
+  }
+
+  function UserCancelledError() {
+    Error.apply(this, arguments);
+  }
+  UserCancelledError.prototype = {
+    __proto__: Error.prototype
+  };
+
+  return {
+    beginRecording: beginRecording,
+    UserCancelledError: UserCancelledError,
+    defaultTraceName: defaultTraceName
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/record_controller_test.html b/catapult/tracing/tracing/ui/extras/about_tracing/record_controller_test.html
new file mode 100644
index 0000000..69c9cc8
--- /dev/null
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/record_controller_test.html
@@ -0,0 +1,65 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2013 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import"
+      href="/tracing/ui/extras/about_tracing/mock_tracing_controller_client.html">
+<link rel="import"
+      href="/tracing/ui/extras/about_tracing/record_controller.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var testData = [
+    {name: 'a', args: {}, pid: 52, ts: 15000, cat: 'foo', tid: 53, ph: 'B'},
+    {name: 'a', args: {}, pid: 52, ts: 19000, cat: 'foo', tid: 53, ph: 'E'},
+    {name: 'b', args: {}, pid: 52, ts: 32000, cat: 'foo', tid: 53, ph: 'B'},
+    {name: 'b', args: {}, pid: 52, ts: 54000, cat: 'foo', tid: 53, ph: 'E'}
+  ];
+
+  test('fullRecording', function() {
+    return new Promise(function(resolve, reject) {
+      var mock = new tr.ui.e.about_tracing.MockTracingControllerClient();
+      mock.expectRequest('endRecording', function() {
+        return '';
+      });
+      mock.expectRequest('getCategories', function() {
+        setTimeout(function() {
+          recordingPromise.selectionDlg.clickRecordButton();
+        }, 20);
+        return ['a', 'b', 'c'];
+      });
+      mock.expectRequest('beginRecording', function(recordingOptions) {
+        assert.typeOf(recordingOptions.categoryFilter, 'string');
+        assert.typeOf(recordingOptions.useSystemTracing, 'boolean');
+        assert.typeOf(recordingOptions.useSampling, 'boolean');
+        assert.typeOf(recordingOptions.tracingRecordMode, 'string');
+        setTimeout(function() {
+          recordingPromise.progressDlg.clickStopButton();
+        }, 10);
+        return '';
+      });
+      mock.expectRequest('endRecording', function(data) {
+        return JSON.stringify(testData);
+      });
+
+      var recordingPromise = tr.ui.e.about_tracing.beginRecording(mock);
+
+      return recordingPromise.then(
+          function(data) {
+            mock.assertAllRequestsHandled();
+            var testDataString = JSON.stringify(testData);
+            assert.equal(data, testDataString);
+            resolve();
+          },
+          function(error) {
+            reject('This should never be reached');
+          });
+    });
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/extras/about_tracing/xhr_based_tracing_controller_client.html b/catapult/tracing/tracing/ui/extras/about_tracing/xhr_based_tracing_controller_client.html
index 61341c4..c7c68cc 100644
--- a/catapult/tracing/tracing/ui/extras/about_tracing/xhr_based_tracing_controller_client.html
+++ b/catapult/tracing/tracing/ui/extras/about_tracing/xhr_based_tracing_controller_client.html
@@ -13,6 +13,8 @@
 'use strict';
 
 tr.exportTo('tr.ui.e.about_tracing', function() {
+  var Base64 = tr.b.Base64;
+
   function beginXhr(method, path, data) {
     if (data === undefined)
       data = null;
@@ -45,7 +47,7 @@
     __proto__: tr.ui.e.about_tracing.TracingControllerClient.prototype,
 
     beginMonitoring: function(monitoringOptions) {
-      var monitoringOptionsB64 = btoa(JSON.stringify(monitoringOptions));
+      var monitoringOptionsB64 = Base64.btoa(JSON.stringify(monitoringOptions));
       return beginXhr('GET', '/json/begin_monitoring?' + monitoringOptionsB64);
     },
 
@@ -56,9 +58,9 @@
     captureMonitoring: function() {
       return beginXhr('GET', '/json/capture_monitoring_compressed').then(
         function(data) {
-          var decoded_size = tr.b.Base64.getDecodedBufferLength(data);
+          var decoded_size = Base64.getDecodedBufferLength(data);
           var buffer = new ArrayBuffer(decoded_size);
-          tr.b.Base64.DecodeToTypedArray(data, new DataView(buffer));
+          Base64.DecodeToTypedArray(data, new DataView(buffer));
           return buffer;
         }
       );
@@ -67,7 +69,7 @@
     getMonitoringStatus: function() {
       return beginXhr('GET', '/json/get_monitoring_status').then(
           function(monitoringOptionsB64) {
-            return JSON.parse(atob(monitoringOptionsB64));
+            return JSON.parse(Base64.atob(monitoringOptionsB64));
           });
     },
 
@@ -79,7 +81,7 @@
     },
 
     beginRecording: function(recordingOptions) {
-      var recordingOptionsB64 = btoa(JSON.stringify(recordingOptions));
+      var recordingOptionsB64 = Base64.btoa(JSON.stringify(recordingOptions));
       return beginXhr('GET', '/json/begin_recording?' +
                       recordingOptionsB64);
     },
@@ -91,9 +93,9 @@
     endRecording: function() {
       return beginXhr('GET', '/json/end_recording_compressed').then(
         function(data) {
-          var decoded_size = tr.b.Base64.getDecodedBufferLength(data);
+          var decoded_size = Base64.getDecodedBufferLength(data);
           var buffer = new ArrayBuffer(decoded_size);
-          tr.b.Base64.DecodeToTypedArray(data, new DataView(buffer));
+          Base64.DecodeToTypedArray(data, new DataView(buffer));
           return buffer;
         }
       );
diff --git a/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_debugger.html b/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_debugger.html
index ee40f20..55a028c 100644
--- a/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_debugger.html
+++ b/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_debugger.html
@@ -5,15 +5,17 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/extras/chrome/cc/picture.html">
 <link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
 <link rel="import" href="/tracing/ui/base/drag_handle.html">
-<link rel="import" href="/tracing/ui/base/info_bar.html">
 <link rel="import" href="/tracing/ui/base/hotkey_controller.html">
+<link rel="import" href="/tracing/ui/base/info_bar.html">
 <link rel="import" href="/tracing/ui/base/list_view.html">
 <link rel="import" href="/tracing/ui/base/mouse_mode_selector.html">
 <link rel="import" href="/tracing/ui/base/overlay.html">
 <link rel="import" href="/tracing/ui/base/utils.html">
+<link rel="import" href="/tracing/ui/extras/chrome/cc/display_item_list_item.html">
 <link rel="import" href="/tracing/ui/extras/chrome/cc/picture_ops_list_view.html">
 
 <template id="tr-ui-e-chrome-cc-display-item-debugger-template">
@@ -62,20 +64,6 @@
     border-bottom: 1px solid #555;
   }
 
-  * /deep/ tr-ui-e-chrome-cc-display-item-debugger > left-panel >
-      display-item-info > .x-list-view > div {
-    border-bottom: 1px solid #555;
-    padding-top: 3px;
-    padding-bottom: 3px;
-    padding-left: 5px;
-  }
-
-  * /deep/ tr-ui-e-chrome-cc-display-item-debugger > left-panel >
-      display-item-info > .x-list-view > div:hover {
-    background-color: #f0f0f0;
-    cursor: pointer;
-  }
-
   /*************************************************/
 
   * /deep/ tr-ui-e-chrome-cc-display-item-debugger > right-panel >
@@ -218,11 +206,10 @@
 
       this.displayItemListView_.clear();
       this.displayItemList_.items.forEach(function(item) {
-        var newListItem = document.createElement('div');
-        newListItem.innerText = item;
-        // FIXME: We should improve our output to better format this.
-        var text = item.skp64 ? item.name : item;
-        this.displayItemListView_.addItem(text);
+        var listItem = document.createElement(
+            'tr-ui-e-chrome-cc-display-item-list-item');
+        listItem.data = item;
+        this.displayItemListView_.appendChild(listItem);
       }.bind(this));
     },
 
@@ -468,7 +455,7 @@
     },
 
     onExportSkPictureClicked_: function() {
-      var rawData = atob(this.picture_.getBase64SkpData());
+      var rawData = tr.b.Base64.atob(this.picture_.getBase64SkpData());
       this.saveFile_(this.skpFilename_.value, rawData);
     }
   };
diff --git a/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_list_item.html b/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_list_item.html
new file mode 100644
index 0000000..b34495a
--- /dev/null
+++ b/catapult/tracing/tracing/ui/extras/chrome/cc/display_item_list_item.html
@@ -0,0 +1,124 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<!--
+An element displaying basic information about a display item in a list view.
+-->
+<polymer-element name="tr-ui-e-chrome-cc-display-item-list-item">
+  <template>
+    <style>
+      :host {
+        border-bottom: 1px solid #555;
+        display: block;
+        font-size: 12px;
+        padding: 3px 5px;
+      }
+
+      :host(:hover) {
+        background-color: #f0f0f0;
+        cursor: pointer;
+      }
+
+      .header {
+        font-weight: bold;
+        margin: 2px 0;
+      }
+
+      .header > .extra {
+        background-color: #777;
+        border-radius: 4px;
+        color: white;
+        margin: 0 6px;
+        text-decoration: none;
+        padding: 2px 4px;
+      }
+
+      .raw-details {
+        white-space: pre-wrap;
+      }
+
+      .details > dl {
+        margin: 0;
+      }
+
+      :host(:not([selected])) .details {
+        display: none;
+      }
+    </style>
+    <div class="header">
+      {{name}}
+      <template if="{{richDetails && richDetails.skp64}}">
+        <a class="extra"
+           href="data:application/octet-stream;base64,{{richDetails.skp64}}"
+           download="drawing.skp" on-click="{{stopPropagation}}">SKP</a>
+      </template>
+    </div>
+    <div class="details">
+      <template if="{{rawDetails}}">
+        <div class="raw-details">{{rawDetails}}</div>
+      </template>
+      <template if="{{richDetails}}" bind="{{richDetails}}">
+        <dl>
+          <template if="{{cullRect}}" bind="{{cullRect}}">
+            <dt>Cull rect</dt>
+            <dd>{{x}},{{y}} {{width}}&times;{{height}}</dd>
+          </template>
+          <template if="{{visualRect}}" bind="{{visualRect}}">
+            <dt>Visual rect</dt>
+            <dd>{{x}},{{y}} {{width}}&times;{{height}}</dd>
+          </template>
+        </dl>
+      </template>
+    </div>
+  </template>
+  <script>
+    'use strict';
+    (function() {
+      // Extracts the "type" and "details" parts of the unstructured (plaintext)
+      // display item format, even if the details span multiple lines.
+      // For example, given "FooDisplayItem type=hello\nworld", produces
+      // "FooDisplayItem" as the first capture and "type=hello\nworld" as the
+      // second. Either capture could be the empty string, but this regex will
+      // still successfully match.
+      var DETAILS_SPLIT_REGEX = /^(\S*)\s*([\S\s]*)$/;
+
+      Polymer({
+        created: function() {
+          this.name = '';
+          this.rawDetails = '';
+          this.richDetails = undefined;
+          this.data_ = undefined;
+        },
+
+        get data() {
+          return this.data_;
+        },
+
+        set data(data) {
+          this.data_ = data;
+
+          if (!data) {
+            this.name = 'DATA MISSING';
+            this.rawDetails = '';
+            this.richDetails = undefined;
+          } else if (typeof data === 'string') {
+            var match = data.match(DETAILS_SPLIT_REGEX);
+            this.name = match[1];
+            this.rawDetails = match[2];
+            this.richDetails = undefined;
+          } else {
+            this.name = data.name;
+            this.rawDetails = '';
+            this.richDetails = data;
+          }
+        },
+
+        stopPropagation: function(e) { e.stopPropagation(); }
+      });
+    })();
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/ui/extras/chrome/cc/picture_debugger.html b/catapult/tracing/tracing/ui/extras/chrome/cc/picture_debugger.html
index bd6777e..28d51d1 100644
--- a/catapult/tracing/tracing/ui/extras/chrome/cc/picture_debugger.html
+++ b/catapult/tracing/tracing/ui/extras/chrome/cc/picture_debugger.html
@@ -5,6 +5,7 @@
 found in the LICENSE file.
 -->
 
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/extras/chrome/cc/picture.html">
 <link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
 <link rel="import" href="/tracing/ui/base/drag_handle.html">
@@ -234,7 +235,7 @@
 
     onSaveAsSkPictureClicked_: function() {
       // Decode base64 data into a String
-      var rawData = atob(this.picture_.getBase64SkpData());
+      var rawData = tr.b.Base64.atob(this.picture_.getBase64SkpData());
 
       // Convert this String into an Uint8Array
       var length = rawData.length;
diff --git a/catapult/tracing/tracing/ui/extras/chrome/cc/raster_task_view.html b/catapult/tracing/tracing/ui/extras/chrome/cc/raster_task_view.html
index 4e25389..b402084 100644
--- a/catapult/tracing/tracing/ui/extras/chrome/cc/raster_task_view.html
+++ b/catapult/tracing/tracing/ui/extras/chrome/cc/raster_task_view.html
@@ -9,7 +9,8 @@
 <link rel="import" href="/tracing/ui/analysis/analysis_sub_view.html">
 <link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/extras/chrome/cc/selection.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-e-chrome-cc-raster-task-view">
   <template>
@@ -44,7 +45,10 @@
     },
 
     updateColumns_: function(hadCpuDurations) {
-      var timeSpanConfig = {ownerDocument: this.ownerDocument};
+      var timeSpanConfig = {
+        unit: tr.v.Unit.byName.timeDurationInMs,
+        ownerDocument: this.ownerDocument
+      };
 
       var columns = [
         {
@@ -86,8 +90,7 @@
         {
           title: 'Wall Duration (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.duration, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.duration, timeSpanConfig);
           },
           cmp: function(a, b) { return a.duration - b.duration; }
         }
@@ -97,8 +100,7 @@
         columns.push({
           title: 'CPU Duration (ms)',
           value: function(row) {
-            return tr.ui.units.createTimeDurationSpan(
-                row.duration, timeSpanConfig);
+            return tr.v.ui.createScalarSpan(row.cpuDuration, timeSpanConfig);
           },
           cmp: function(a, b) { return a.cpuDuration - b.cpuDuration; }
         });
diff --git a/catapult/tracing/tracing/ui/extras/chrome_config.html b/catapult/tracing/tracing/ui/extras/chrome_config.html
index f76c5f6..a928f5f 100644
--- a/catapult/tracing/tracing/ui/extras/chrome_config.html
+++ b/catapult/tracing/tracing/ui/extras/chrome_config.html
@@ -5,8 +5,6 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/extras/chrome_config.html">
-
 <!--
 The chrome config is heavily used:
   - chrome://tracing,
@@ -15,20 +13,11 @@
     - telemetry
 -->
 
-<!-- Core UI configs -->
+<link rel="import" href="/tracing/extras/chrome_config.html">
 <link rel="import" href="/tracing/ui/base/ui.html">
-<link rel="import" href="/tracing/ui/timeline_view.html">
-
-<!-- Lots of chrome-specific extras -->
 <link rel="import" href="/tracing/ui/extras/chrome/cc/cc.html">
 <link rel="import" href="/tracing/ui/extras/chrome/gpu/gpu.html">
-<link rel="import" href="/tracing/ui/extras/system_stats/system_stats.html">
-<link rel="import" href="/tracing/ui/extras/tcmalloc/tcmalloc.html">
-
-<!-- Side panels -->
-<link rel="import" href="/tracing/ui/extras/side_panel/category_summary_side_panel.html">
 <link rel="import" href="/tracing/ui/extras/side_panel/input_latency_side_panel.html">
 <link rel="import" href="/tracing/ui/extras/side_panel/time_summary_side_panel.html">
-
-<!-- RAIL is fun too -->
-<link rel="import" href="/tracing/ui/extras/rail/rail_score_side_panel.html">
+<link rel="import" href="/tracing/ui/extras/system_stats/system_stats.html">
+<link rel="import" href="/tracing/ui/timeline_view.html">
diff --git a/catapult/tracing/tracing/ui/extras/deep_reports/main.html b/catapult/tracing/tracing/ui/extras/deep_reports/main.html
index 6226ccc..88af3f2 100644
--- a/catapult/tracing/tracing/ui/extras/deep_reports/main.html
+++ b/catapult/tracing/tracing/ui/extras/deep_reports/main.html
@@ -7,9 +7,8 @@
 
 <link rel="import" href="/tracing/base/base.html">
 <link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
-<link rel="import" href="/tracing/ui/extras/deep_reports/scalar_value.html">
 <link rel="import" href="/tracing/base/xhr.html">
+<link rel="import" href="/tracing/ui/extras/deep_reports/scalar_value.html">
 
 <script>
 'use strict';
@@ -54,16 +53,9 @@
   }
 
   function processModel(results, page, model) {
-    var railScore = tr.e.rail.RAILScore.fromModel(model);
-    if (railScore === undefined)
-      return;
-
     results.addValue(
         new tr.ui.e.deep_reports.ScalarValue(
-            page, 'numRailIRs', 'ms', railScore.interactionRecords.length));
-    results.addValue(
-      new tr.ui.e.deep_reports.ScalarValue(
-        page, 'railScore', 'rails', railScore.overallScore));
+            page, 'numRailIRs', 'ms', model.userModel.expectations.length));
   }
 
   return {
diff --git a/catapult/tracing/tracing/ui/extras/drive/comments_side_panel.html b/catapult/tracing/tracing/ui/extras/drive/comments_side_panel.html
index 33ae185..3728481 100644
--- a/catapult/tracing/tracing/ui/extras/drive/comments_side_panel.html
+++ b/catapult/tracing/tracing/ui/extras/drive/comments_side_panel.html
@@ -155,10 +155,6 @@
       this.selection_ = selection;
     },
 
-    get listeningToKeys() {
-      return this.textAreaFocused;
-    },
-
     updateContents_: function() {
       this.commentProvider_.updateComments();
     },
diff --git a/catapult/tracing/tracing/ui/extras/full_config.html b/catapult/tracing/tracing/ui/extras/full_config.html
index 09fab1f..f27fbaf 100644
--- a/catapult/tracing/tracing/ui/extras/full_config.html
+++ b/catapult/tracing/tracing/ui/extras/full_config.html
@@ -8,5 +8,5 @@
 <!-- The full config is all the configs slammed together. -->
 <link rel="import" href="/tracing/extras/importer/gcloud_trace/gcloud_trace_importer.html">
 <link rel="import" href="/tracing/ui/extras/chrome_config.html">
-<link rel="import" href="/tracing/ui/extras/systrace_config.html">
 <link rel="import" href="/tracing/ui/extras/lean_config.html">
+<link rel="import" href="/tracing/ui/extras/systrace_config.html">
diff --git a/catapult/tracing/tracing/ui/extras/lean_config.html b/catapult/tracing/tracing/ui/extras/lean_config.html
index 54268b6..2094d41 100644
--- a/catapult/tracing/tracing/ui/extras/lean_config.html
+++ b/catapult/tracing/tracing/ui/extras/lean_config.html
@@ -12,3 +12,4 @@
 json blobs.
 -->
 <link rel="import" href="/tracing/ui/extras/highlighter/vsync_highlighter.html">
+<link rel="import" href="/tracing/ui/side_panel/file_size_stats_side_panel.html">
diff --git a/catapult/tracing/tracing/ui/extras/rail/ir_verifier_row.html b/catapult/tracing/tracing/ui/extras/rail/ir_verifier_row.html
deleted file mode 100644
index 629e524..0000000
--- a/catapult/tracing/tracing/ui/extras/rail/ir_verifier_row.html
+++ /dev/null
@@ -1,123 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-
-<polymer-element name="tr-ui-e-rail-ir-verifier-row">
-  <template>
-    <style>
-    :host {
-      display: table-row;
-      border-spacing: 0px;
-    }
-    #label {
-      width: 1px;
-    }
-    td {
-      vertical-align: top;
-      padding: 0px;
-    }
-    </style>
-
-    <td id="label"></td>
-    <td><canvas id="canvas"></td>
-  </template>
-  <script>
-  'use strict';
-  (function() {
-    var ColorScheme = tr.b.ColorScheme;
-
-    // Returns a list of non-overlapping lists of events.
-    // |events| must be sorted by start time.
-    // C.f. AsyncSliceGroupTrack.buildSubRows_()
-    function organizeEventsIntoTracks(events) {
-      if (events.length === 0)
-        return [[]];
-
-      var tracks = [[events.shift()]];
-      events.forEach(function(event) {
-        for (var tracki = 0; tracki < tracks.length; ++tracki) {
-          var track = tracks[tracki];
-          var lastEvent = track[track.length - 1];
-          if (event.start >= lastEvent.end) {
-            track.push(event);
-            return;
-          }
-        }
-        tracks.push([event]);
-      });
-      return tracks;
-    }
-
-    function getEventColor(event) {
-      var typeNameOrTitle = event.typeName || event.title;
-      var colorId;
-      if (event.railTypeName_) {
-        colorId = ColorScheme.getColorIdForReservedName(event.railTypeName_);
-      } else {
-        colorId = ColorScheme.getColorIdForGeneralPurposeString(
-          typeNameOrTitle);
-      }
-      return ColorScheme.colorsAsStrings[colorId];
-    }
-
-    Polymer({
-      created: function() {
-        this.events_ = undefined;
-        this.bounds_ = undefined;
-      },
-
-      // |events| must be sorted by start time.
-      set events(events) {
-        this.events_ = events;
-      },
-
-      set bounds(bounds) {
-        this.bounds_ = bounds;
-      },
-
-      set labelString(s) {
-        this.$.label.textContent = s;
-      },
-
-      update: function() {
-        var tracks = organizeEventsIntoTracks(this.events_);
-        var totalWidth = this.getBoundingClientRect().width;
-        var labelWidth = this.$.label.getBoundingClientRect().width;
-        this.$.canvas.width = totalWidth - labelWidth;
-        var xScale = this.$.canvas.width / this.bounds_.max;
-        var ROW_HEIGHT = 20;
-        this.$.canvas.height = ROW_HEIGHT * tracks.length;
-        var context = this.$.canvas.getContext('2d');
-        tracks.forEach(function(track, trackIndex) {
-          var y = 20 * trackIndex;
-          track.forEach(function(event) {
-            var x = event.start * xScale;
-            var w = (event.end - event.start) * xScale;
-
-            context.beginPath();
-            context.rect(x, y, w, ROW_HEIGHT - 1);
-            context.fillStyle = getEventColor(event);
-            context.fill();
-            context.lineWidth = 1;
-            context.strokeStyle = 'black';
-            context.stroke();
-
-            context.beginPath();
-            context.font = '15px Arial';
-            context.fillStyle = 'black';
-            context.textAlign = 'center';
-            var textString = event.typeName || event.title;
-            context.fillText(textString, x + (w / 2), y + (3 * ROW_HEIGHT / 4));
-            context.stroke();
-          });
-        });
-      }
-    });
-  })();
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/extras/rail/ir_verifier_view.html b/catapult/tracing/tracing/ui/extras/rail/ir_verifier_view.html
deleted file mode 100644
index 78af769..0000000
--- a/catapult/tracing/tracing/ui/extras/rail/ir_verifier_view.html
+++ /dev/null
@@ -1,87 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/extras/rail/ir_verifier_row.html">
-
-<polymer-element name="tr-ui-e-rail-ir-verifier-view">
-  <template>
-    <style>
-    :host {
-      display: table;
-    }
-    table {
-      border-collapse: collapse;
-      border-spacing: 0px;
-    }
-    #expected {
-      display: none;
-    }
-    </style>
-
-    <tr-ui-e-rail-ir-verifier-row id="expected"></tr-ui-e-rail-ir-verifier-row>
-    <tr-ui-e-rail-ir-verifier-row id="actual"></tr-ui-e-rail-ir-verifier-row>
-    <tr-ui-e-rail-ir-verifier-row id="model"></tr-ui-e-rail-ir-verifier-row>
-  </template>
-  <script>
-  'use strict';
-  (function() {
-    Polymer({
-      created: function() {
-        this.hasExpectedIRs_ = false;
-      },
-
-      ready: function() {
-        this.$.actual.labelString = 'Interactions:';
-        this.$.model.labelString = 'Model:';
-      },
-
-      set bounds(bounds) {
-        this.$.expected.bounds = bounds;
-        this.$.actual.bounds = bounds;
-        this.$.model.bounds = bounds;
-      },
-
-      set actualIRs(irs) {
-        this.$.actual.events = irs;
-      },
-
-      set expectedIRs(irs) {
-        // Make getEventColor() for the expectedIRs match the actualIRs:
-        irs.forEach(function(eir) {
-          if (eir.title === 'Response') {
-            eir.railTypeName_ = 'rail_response';
-          } else if (eir.title === 'Animation') {
-            eir.railTypeName_ = 'rail_animate';
-          } else if (eir.title === 'Idle') {
-            eir.railTypeName_ = 'rail_idle';
-          } else if (eir.title === 'Load') {
-            eir.railTypeName_ = 'rail_load';
-          }
-        });
-
-        this.$.expected.labelString = 'Expected:';
-        this.$.actual.labelString = 'Actual:';
-        this.$.expected.events = irs;
-        this.$.expected.display = 'table-row';
-        this.hasExpectedIRs_ = true;
-      },
-
-      set model(events) {
-        this.$.model.events = events;
-      },
-
-      update: function() {
-        this.style.width = (window.innerWidth - 30) + 'px';
-        if (this.hasExpectedIRs_)
-          this.$.expected.update();
-        this.$.actual.update();
-        this.$.model.update();
-      }
-    });
-  })();
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel.html b/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel.html
deleted file mode 100644
index a4b296d..0000000
--- a/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel.html
+++ /dev/null
@@ -1,312 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/rail/rail_score.html">
-<link rel="import" href="/tracing/extras/rail/rail_ir_finder.html">
-<link rel="import" href="/tracing/model/ir_coverage.html">
-<link rel="import" href="/tracing/ui/analysis/analysis_link.html">
-<link rel="import" href="/tracing/ui/base/color_legend.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/extras/rail/rail_score_span.html">
-<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-
-<polymer-element name='tr-ui-e-rail-rail-score-side-panel'
-    extends='tr-ui-side-panel'>
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-      width: 450px;
-      overflow-x: auto;
-    }
-
-    #score {
-      background-color: rgb(236, 236, 236)
-      flex: 0 0 auto;
-    }
-
-    #content {
-      min-width: 0;
-      flex-direction: column;
-      display: flex;
-      flex: 1 1 auto;
-    }
-
-    #coverage {
-      font-size: 10px;
-    }
-    </style>
-
-    <tr-ui-e-rail-rail-score-span id="score"></tr-ui-e-rail-rail-score-span>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-
-    <div id="coverage">
-      <b>Coverage:</b><br>
-      <tr-ui-a-analysis-link id="associated-events"></tr-ui-a-analysis-link><br>
-      <tr-ui-a-analysis-link id="unassociated-events"></tr-ui-a-analysis-link>
-    </div>
-    <button id="test">Create Test</button>
-  </template>
-</polymer-element>
-
-<script>
-'use strict';
-(function() {
-  function setCoverageLink(
-      link, labelString, events, eventRatio, cpuMs, cpuRatio) {
-    link.setSelectionAndContent(events);
-
-    labelString += ' ' + events.length + ' events';
-    labelString += ' (' + parseInt(100 * eventRatio) + '%)';
-    if (cpuRatio !== undefined)
-      labelString += ', ';
-    link.appendChild(document.createTextNode(labelString));
-
-    if (cpuRatio === undefined)
-      return;
-
-    var cpuMsSpan = document.createElement('tr-ui-u-time-duration-span');
-    // There will be some text after the cpuMsSpan, that should be on the same
-    // line if it fits. This "span" has display: block for its sparkline... so I
-    // guess I'll set it inline here?
-    cpuMsSpan.style.display = 'inline';
-    cpuMsSpan.duration = cpuMs;
-    cpuMsSpan.contentTextDecoration = 'underline';
-    link.appendChild(cpuMsSpan);
-
-    var cpuString = ' (' + parseInt(100 * cpuRatio) + '%)';
-    link.appendChild(document.createTextNode(cpuString));
-  }
-
-  Polymer('tr-ui-e-rail-rail-score-side-panel', {
-    ready: function() {
-      this.rangeOfInterest_ = new tr.b.Range();
-      this.model_ = undefined;
-      this.railScore_ = undefined;
-      this.selection_ = new tr.model.EventSet();
-      this.$.test.addEventListener('click', this.createTest_.bind(this));
-    },
-
-    createTest_: function() {
-      var overlay = new tr.ui.b.Overlay();
-      overlay.title = 'RAILIRFinder test';
-      var textarea = document.createElement('textarea');
-      textarea.textContent = tr.e.rail.createIRFinderTestCaseStringFromModel(
-          this.model_);
-      textarea.rows = textarea.textContent.split('\n').length;
-      textarea.cols = 80;
-      overlay.appendChild(textarea);
-      overlay.visible = true;
-      textarea.select();
-      textarea.focus();
-    },
-
-    get textLabel() {
-     return 'RAIL Info';
-    },
-
-    supportsModel: function(m) {
-      if (m === undefined) {
-        return {
-          supported: false,
-          reason: 'Unknown tracing model'
-        };
-      }
-
-      var railScore = tr.e.rail.RAILScore.fromModel(m);
-      if (railScore === undefined) {
-        return {
-          supported: false,
-          reason: 'RAIL interactions were not found on the model'
-        };
-      }
-
-      return {
-        supported: true
-      };
-    },
-
-    get model() {
-      return this.model_;
-    },
-
-    set model(model) {
-      this.model_ = model;
-      this.updateScore_();
-      this.updateCoverage_();
-      this.updateTable_();
-    },
-
-    get listeningToKeys() {
-      return false;
-    },
-
-    get effectiveRangeOfInterest() {
-      if (!this.rangeOfInterest_ || this.rangeOfInterest_.isEmpty) {
-        if (this.model)
-          return this.model.bounds;
-        else
-          return new tr.b.Range();
-      }
-
-      return this.rangeOfInterest_;
-    },
-
-    set rangeOfInterest(rangeOfInterest) {
-      this.rangeOfInterest_ = rangeOfInterest;
-      this.updateScore_();
-      // TODO(benjhayden): Make updateCoverage_ reflect rangeOfInterest.
-      // https://github.com/catapult-project/catapult/issues/1753
-      this.updateTable_();
-    },
-
-    updateScore_: function() {
-      if (!this.model)
-        return;
-
-      this.railScore_ = tr.e.rail.RAILScore.fromModel(
-          this.model, this.effectiveRangeOfInterest);
-      this.$.score.railScore = this.railScore_;
-    },
-
-    updateCoverage_: function() {
-      if (!this.model)
-        return;
-
-      var coverage = tr.model.getIRCoverageFromModel(this.model);
-
-      if (!coverage)
-        return;
-
-      var associatedEvents =
-          tr.model.getAssociatedEvents(this.model.interactionRecords);
-      var unassociatedEvents = tr.model.getUnassociatedEvents(
-          this.model, associatedEvents);
-
-      setCoverageLink(this.shadowRoot.querySelector('#associated-events'),
-                      'Associated',
-                      associatedEvents,
-                      coverage.coveredEventsCountRatio,
-                      coverage.associatedEventsCpuTimeMs,
-                      coverage.coveredEventsCpuTimeRatio);
-      setCoverageLink(this.shadowRoot.querySelector('#unassociated-events'),
-                      'Unassociated',
-                      unassociatedEvents,
-                      1.0 - coverage.coveredEventsCountRatio,
-                      coverage.unassociatedEventsCpuTimeMs,
-                      1.0 - coverage.coveredEventsCpuTimeRatio);
-    },
-
-    updateTable_: function() {
-      if (!this.model)
-        return;
-
-      function toThreeDigitLocaleString(value) {
-        return value.toLocaleString(undefined,
-                                    {minimumFractionDigits: 3,
-                                     maximumFractionDigits: 3});
-      }
-
-      var columns = [
-        {
-          title: 'Type',
-          width: '150px',
-          value: function(ir) {
-            var events = new tr.model.EventSet([ir]);
-            events.addEventSet(ir.associatedEvents);
-
-            var linkEl = document.createElement('tr-ui-a-analysis-link');
-            linkEl.setSelectionAndContent(events, ir.railTypeName);
-
-            var el = document.createElement('tr-ui-b-color-legend');
-            el.setLabelAndColorId(linkEl, ir.colorId);
-            el.compoundEventSelectionState =
-                ir.computeCompoundEvenSelectionState(this.selection_);
-            return el;
-          }.bind(this),
-          cmp: function(a, b) {
-            return a.railTypeName.localeCompare(b.railTypeName);
-          }
-        },
-        {
-          title: 'Efficiency',
-          width: '33%',
-          value: function(ir) {
-            return toThreeDigitLocaleString(ir.normalizedEfficiency);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            return a.normalizedEfficiency - b.normalizedEfficiency;
-          }
-        },
-        {
-          title: 'Comfort',
-          width: '33%',
-          value: function(ir) {
-            return toThreeDigitLocaleString(ir.normalizedUserComfort);
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            return a.normalizedUserComfort - b.normalizedUserComfort;
-          }
-        },
-        {
-          title: 'Score',
-          width: '33%',
-          value: function(ir) {
-            var span = document.createElement('span');
-            span.style.fontWeight = 'bold';
-            span.textContent = toThreeDigitLocaleString(ir.railScore);
-            return span;
-          },
-          textAlign: 'right',
-          cmp: function(a, b) {
-            return a.railScore - b.railScore;
-          }
-        }
-      ];
-
-      this.$.table.tableColumns = columns;
-
-      var rows = [];
-      if (this.railScore_) {
-        this.railScore_.interactionRecords.forEach(function(ir) {
-          if (!this.effectiveRangeOfInterest.intersectsExplicitRangeExclusive(
-                ir.start, ir.end))
-            return;
-
-          rows.push(ir);
-        }, this);
-      }
-
-      this.$.table.tableRows = rows;
-      this.$.table.rebuild();
-    },
-
-    onTableSelectionChanged_: function() {
-      var selectedIR = this.$.table.selectedTableRow;
-
-      var event = new tr.c.RequestSelectionChangeEvent();
-      event.selection = new tr.c.Selection([selectedIR]);
-      this.dispatchEvent(event);
-    },
-
-    set selection(selection) {
-      if (selection === undefined)
-        selection = new tr.model.EventSet();
-
-      if (this.selection_.equals(selection))
-        return;
-
-      this.selection_ = selection;
-      this.updateTable_();
-    }
-  });
-})();
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel_test.html b/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel_test.html
deleted file mode 100644
index f026a2e..0000000
--- a/catapult/tracing/tracing/ui/extras/rail/rail_score_side_panel_test.html
+++ /dev/null
@@ -1,89 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/extras/rail/stub_rail_interaction_record.html">
-<link rel="import" href="/tracing/model/model.html">
-<link rel="import" href="/tracing/ui/extras/rail/rail_score_side_panel.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var test_utils = tr.c.TestUtils;
-  var CompoundEventSelectionState = tr.model.CompoundEventSelectionState;
-
-  function createModel(opt_customizeModelCallback) {
-    return test_utils.newModel(function(model) {
-      model.p1 = model.getOrCreateProcess(1);
-      model.t2 = model.p1.getOrCreateThread(2);
-
-      model.s1 = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        title: 'a', start: 10, end: 20
-      }));
-      model.s2 = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        title: 'b', start: 20, end: 30
-      }));
-      model.s3 = model.t2.sliceGroup.pushSlice(test_utils.newSliceEx({
-        title: 'c', start: 30, end: 40
-      }));
-
-      model.ir1 = new tr.e.rail.StubRAILInteractionRecord({
-        parentModel: model,
-        start: 0, end: 100,
-        railTypeName: 'rail_idle',
-        normalizedEfficiency: 0.8,
-        normalizedUserComfort: 0.0,
-        associatedEvents: [
-          {
-            guid: 0,
-            isTopLevel: true,
-            cpuDuration: 1
-          }
-        ]
-      });
-      model.addInteractionRecord(model.ir1);
-      model.ir1.associatedEvents.push(model.s1);
-
-      model.ir2 = new tr.e.rail.StubRAILInteractionRecord({
-        parentModel: model,
-        start: 100, end: 200,
-        railTypeName: 'rail_response',
-        normalizedEfficiency: 0.92935252939242,
-        normalizedUserComfort: 0.24128432825823582,
-        associatedEvents: [
-          {
-            guid: 1,
-            isTopLevel: true,
-            cpuDuration: 1
-          }
-        ]
-      });
-      model.addInteractionRecord(model.ir2);
-
-      model.ir2.associatedEvents.push(model.s2);
-      model.ir2.associatedEvents.push(model.s3);
-
-      if (opt_customizeModelCallback)
-        opt_customizeModelCallback(model);
-    });
-  }
-
-  test('instantiate', function() {
-    var panel = document.createElement('tr-ui-e-rail-rail-score-side-panel');
-    panel.model = createModel();
-    panel.style.height = '200px';
-
-    var rows = panel.$.table.tableRows;
-    assert.equal(rows.length, 2);
-
-    this.addHTMLOutput(panel);
-
-    panel.rangeOfInterest = tr.b.Range.fromExplicitRange(0, 10);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/rail/rail_score_span.html b/catapult/tracing/tracing/ui/extras/rail/rail_score_span.html
deleted file mode 100644
index d23de11..0000000
--- a/catapult/tracing/tracing/ui/extras/rail/rail_score_span.html
+++ /dev/null
@@ -1,59 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/base.html">
-
-<polymer-element name="tr-ui-e-rail-rail-score-span">
-  <template>
-  <style>
-    :host {
-      display: span;
-    }
-  </style>
-  <span id="content">
-    <span>RAIL Score: </span><span id="score"></span>
-  </span>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.railScore_ = undefined;
-    },
-
-    ready: function() {
-      this.updateContent_();
-    },
-
-    get railScore() {
-      return this.railScore_;
-    },
-
-    set railScore(railScore) {
-      this.railScore_ = railScore;
-      this.updateContent_();
-    },
-
-    updateContent_: function() {
-      if (this.railScore_ === undefined) {
-        this.$.content.style.display = 'none';
-        return;
-      }
-      this.$.content.style.display = '';
-      var overallScore = this.railScore_.overallScore;
-      if (overallScore === undefined) {
-        this.$.score.textContent = '';
-        return;
-      }
-      this.$.score.textContent = overallScore.toLocaleString(
-          undefined,
-          {minimumFractionDigits: 3});
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/alerts_side_panel.html b/catapult/tracing/tracing/ui/extras/side_panel/alerts_side_panel.html
index 5c6a5e9..5cc5b6d 100644
--- a/catapult/tracing/tracing/ui/extras/side_panel/alerts_side_panel.html
+++ b/catapult/tracing/tracing/ui/extras/side_panel/alerts_side_panel.html
@@ -6,7 +6,6 @@
 -->
 
 <link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/ui/base/table.html">
 <link rel="import" href="/tracing/ui/side_panel/side_panel.html">
@@ -51,10 +50,6 @@
       this.updateContents_();
     },
 
-    get listeningToKeys() {
-      return false;
-    },
-
     set selection(selection) {
     },
 
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel.html b/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel.html
deleted file mode 100644
index 939a861..0000000
--- a/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel.html
+++ /dev/null
@@ -1,142 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/category_util.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
-
-<polymer-element name='tr-ui-e-s-category-summary-side-panel'
-    extends='tr-ui-side-panel'>
-  <template>
-    <style>
-    :host {
-      display: block;
-      width: 450px;
-      overflow-x: auto;
-    }
-    </style>
-
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-
-  <script>
-  'use strict';
-
-  Polymer({
-    ready: function() {
-    },
-
-    get model() {
-      return this.model_;
-    },
-
-    set model(model) {
-      this.model_ = model;
-      this.updateContents_();
-    },
-
-    get listeningToKeys() {
-      return false;
-    },
-
-    set selection(selection) {
-    },
-
-    set rangeOfInterest(rangeOfInterest) {
-    },
-
-    updateContents_: function() {
-      this.$.table.tableColumns = [
-        {
-          title: 'Category / Title',
-          value: function(row) {
-            return row.title;
-          }
-        },
-        {
-          title: 'Events',
-          textAlign: 'right',
-          value: function(row) {
-            return row.count;
-          }
-        }
-      ];
-
-      if (this.model_ === undefined) {
-        this.$.table.tableRows = [];
-        return;
-      }
-
-      var categories = {};
-
-      this.model_.iterateAllEvents(function handleEvent(event) {
-        if (!(event instanceof tr.model.Slice) &&
-            !(event instanceof tr.model.AsyncSlice) &&
-            !(event instanceof tr.model.InstantEvent) &&
-            !(event instanceof tr.model.FlowEvent))
-          return;
-
-        tr.b.getCategoryParts(event.category).forEach(function(category) {
-          if (categories[category] === undefined) {
-            categories[category] = {};
-          }
-          var titleCounts = categories[category];
-          if (titleCounts[event.title] === undefined) {
-            titleCounts[event.title] = 0;
-          }
-          titleCounts[event.title] += 1;
-        });
-      });
-
-      function compareCounts(a, b) {
-        return b.count - a.count;
-      }
-
-      var rows = [];
-      for (var category in categories) {
-        var categoryRow = {
-          title: category,
-          subRows: [],
-          count: 0
-        };
-        rows.push(categoryRow);
-
-        var titleCounts = categories[category];
-        for (var title in titleCounts) {
-          var count = titleCounts[title];
-          categoryRow.count += count;
-          categoryRow.subRows.push({
-            title: title,
-            count: count
-          });
-        }
-        categoryRow.subRows.sort(compareCounts);
-      }
-      rows.sort(compareCounts);
-
-      this.$.table.tableRows = rows;
-    },
-
-    supportsModel: function(m) {
-      if (m == undefined) {
-        return {
-          supported: false,
-          reason: 'Unknown tracing model'
-        };
-      }
-
-      return {
-        supported: true
-      };
-    },
-
-    get textLabel() {
-      return 'Categories';
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel_test.html b/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel_test.html
deleted file mode 100644
index 9efac64..0000000
--- a/catapult/tracing/tracing/ui/extras/side_panel/category_summary_side_panel_test.html
+++ /dev/null
@@ -1,45 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/extras/side_panel/category_summary_side_panel.html">
-<link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/model/model.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('instantiate', function() {
-    var panel = document.createElement('tr-ui-e-s-category-summary-side-panel');
-    panel.model = tr.c.TestUtils.newModel(function(model) {
-      var process = model.getOrCreateProcess(1);
-      var thread = process.getOrCreateThread(2);
-      thread.sliceGroup.pushSlice(tr.c.TestUtils.newAsyncSliceEx({
-        start: 0,
-        duration: 1,
-        title: 'sync',
-        cat: 'category'
-      }));
-      thread.asyncSliceGroup.push(tr.c.TestUtils.newAsyncSliceEx({
-        start: 1,
-        duration: 2,
-        title: 'async',
-        cat: 'category'
-      }));
-      model.flowEvents.push(tr.c.TestUtils.newFlowEventEx({
-        start: 2,
-        duration: 3,
-        title: 'flow',
-        cat: 'category'
-      }));
-    });
-    panel.style.height = '100px';
-
-    this.addHTMLOutput(panel);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/input_latency_side_panel.html b/catapult/tracing/tracing/ui/extras/side_panel/input_latency_side_panel.html
index 5b63f14..2f2c39e 100644
--- a/catapult/tracing/tracing/ui/extras/side_panel/input_latency_side_panel.html
+++ b/catapult/tracing/tracing/ui/extras/side_panel/input_latency_side_panel.html
@@ -6,11 +6,11 @@
 -->
 
 <link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/extras/chrome/chrome_model_helper.html">
 <link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
+<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/line_chart.html">
+<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
 
 <polymer-element name='tr-ui-e-s-input-latency-side-panel'
     extends='tr-ui-side-panel'>
@@ -43,7 +43,7 @@
   Polymer({
     ready: function() {
       this.rangeOfInterest_ = new tr.b.Range();
-      this.frametimeType_ = tr.e.audits.IMPL_FRAMETIME_TYPE;
+      this.frametimeType_ = tr.model.helpers.IMPL_FRAMETIME_TYPE;
       this.latencyChart_ = undefined;
       this.frametimeChart_ = undefined;
       this.selectedProcessId_ = undefined;
@@ -57,10 +57,12 @@
 
     set model(model) {
       this.model_ = model;
-      if (this.model_)
-        this.modelHelper_ = new tr.e.audits.ChromeModelHelper(model);
-      else
+      if (this.model_) {
+        this.modelHelper_ = this.model_.getOrCreateHelper(
+            tr.model.helpers.ChromeModelHelper);
+      } else {
         this.modelHelper_ = undefined;
+      }
 
       this.updateToolbar_();
       this.updateContents_();
@@ -123,7 +125,8 @@
             latencySlices.push(event);
         });
       });
-      latencySlices = tr.e.audits.getSlicesIntersectingRange(r, latencySlices);
+      latencySlices = tr.model.helpers.getSlicesIntersectingRange(
+          r, latencySlices);
 
       var event = new tr.model.RequestSelectionChangeEvent();
       event.selection = new tr.model.EventSet(latencySlices);
@@ -176,9 +179,9 @@
           this, 'frametimeType',
           'inputLatencySidePanel.frametimeType', this.frametimeType_,
           [{label: 'Main Thread Frame Times',
-            value: tr.e.audits.MAIN_FRAMETIME_TYPE},
+            value: tr.model.helpers.MAIN_FRAMETIME_TYPE},
            {label: 'Impl Thread Frame Times',
-            value: tr.e.audits.IMPL_FRAMETIME_TYPE}
+            value: tr.model.helpers.IMPL_FRAMETIME_TYPE}
           ]));
       toolbarEl.appendChild(tr.ui.b.createSelector(
           this, 'selectedProcessId',
@@ -227,7 +230,8 @@
       var frameEvents = chromeProcess.getFrameEventsInRange(
           this.frametimeType, rangeOfInterest);
 
-      var frametimeData = tr.e.audits.getFrametimeDataFromEvents(frameEvents);
+      var frametimeData = tr.model.helpers.getFrametimeDataFromEvents(
+          frameEvents);
       var averageFrametime = tr.b.Statistics.mean(frametimeData, function(d) {
         return d.frametime;
       });
@@ -295,14 +299,14 @@
         };
       }
 
-      if (!tr.e.audits.ChromeModelHelper.supportsModel(m)) {
+      if (!tr.model.helpers.ChromeModelHelper.supportsModel(m)) {
         return {
           supported: false,
           reason: 'No Chrome browser or renderer process found'
         };
       }
 
-      var modelHelper = new tr.e.audits.ChromeModelHelper(m);
+      var modelHelper = m.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);
       if (modelHelper.browserHelper &&
         modelHelper.browserHelper.hasLatencyEvents) {
           return {
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel.html b/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel.html
index 122d5d6..a0b6d9d 100644
--- a/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel.html
+++ b/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel.html
@@ -7,12 +7,12 @@
 
 <link rel="import" href="/tracing/base/iteration_helpers.html">
 <link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/base/units/time_duration.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/pie_chart.html">
 <link rel="import" href="/tracing/ui/side_panel/side_panel.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <polymer-element name="tr-ui-e-s-time-summary-side-panel"
     extends="tr-ui-side-panel">
@@ -214,7 +214,7 @@
           data.push({
             label: resultsForGroup.name,
             value: value,
-            valueText: tr.b.u.TimeDuration.format(value),
+            valueText: tr.v.Unit.byName.timeDurationInMs.format(value),
             resultsForGroup: resultsForGroup
           });
         }
@@ -255,10 +255,6 @@
         this.updateContents_();
       },
 
-      get listeningToKeys() {
-        return false;
-      },
-
       get groupBy() {
         return groupBy_;
       },
@@ -351,8 +347,11 @@
         summaryText.appendChild(tr.ui.b.createSpan({
           textContent: 'Total ' + this.groupingUnit_ + ': ',
           bold: true}));
-        summaryText.appendChild(tr.ui.units.createTimeDurationSpan(
-            getValueFromGroup(allGroup), {ownerDocument: this.ownerDocument}));
+        summaryText.appendChild(tr.v.ui.createScalarSpan(
+            getValueFromGroup(allGroup), {
+              unit: tr.v.Unit.byName.timeDurationInMs,
+              ownerDocument: this.ownerDocument
+            }));
         resultArea.appendChild(summaryText);
 
         // If needed, add in the idle time.
@@ -367,7 +366,7 @@
           extraData.push({
             label: 'CPU Idle',
             value: idleTime,
-            valueText: tr.b.u.TimeDuration.format(idleTime)
+            valueText: tr.v.Unit.byName.timeDurationInMs.format(idleTime)
           });
           extraValue += idleTime;
         }
diff --git a/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel_test.html b/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel_test.html
index 94db77c..789093f 100644
--- a/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel_test.html
+++ b/catapult/tracing/tracing/ui/extras/side_panel/time_summary_side_panel_test.html
@@ -5,16 +5,14 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/ui/extras/side_panel/time_summary_side_panel.html">
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/ui/extras/side_panel/time_summary_side_panel.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var newSliceNamed = tr.c.TestUtils.newSliceNamed;
-
   function createModel(opt_options) {
     var options = opt_options || {};
     var m = tr.c.TestUtils.newModelWithEvents([], {
@@ -148,7 +146,9 @@
         groupData.forEach(function(groupData) {
           var group = ts.generateResultsForGroup(m, groupData.label);
 
-          var slice = newSliceNamed(groupData.label, start, groupData.value);
+          var slice = tr.c.TestUtils.newSliceEx(
+              {title: groupData.label,
+               start: start, duration: groupData.value});
           start += groupData.value;
           group.allSlices.push(slice);
           group.topLevelSlices.push(slice);
diff --git a/catapult/tracing/tracing/ui/extras/systrace_config.html b/catapult/tracing/tracing/ui/extras/systrace_config.html
index 1aecd55..3ba4626 100644
--- a/catapult/tracing/tracing/ui/extras/systrace_config.html
+++ b/catapult/tracing/tracing/ui/extras/systrace_config.html
@@ -6,11 +6,7 @@
 -->
 
 <link rel="import" href="/tracing/extras/systrace_config.html">
-
-<!-- Core and UI configs. -->
 <link rel="import" href="/tracing/ui/base/ui.html">
-<link rel="import" href="/tracing/ui/timeline_view.html">
-
-<!-- Features used by Android systrace. -->
-<link rel="import" href="/tracing/ui/extras/side_panel/alerts_side_panel.html">
 <link rel="import" href="/tracing/ui/extras/highlighter/vsync_highlighter.html">
+<link rel="import" href="/tracing/ui/extras/side_panel/alerts_side_panel.html">
+<link rel="import" href="/tracing/ui/timeline_view.html">
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.css b/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.css
deleted file mode 100644
index b17d820..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.css
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright (c) 2013 The Chromium Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-.tr-ui-e-tcmalloc-heap-instance-track {
-  height: 150px;
-}
-
-.tr-ui-e-tcmalloc-heap-instance-track ul {
-  list-style: none;
-  list-style-position: outside;
-  margin: 0;
-  overflow: hidden;
-}
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.html b/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.html
deleted file mode 100644
index 419d8b2..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/heap_instance_track.html
+++ /dev/null
@@ -1,177 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="stylesheet" href="/tracing/ui/extras/tcmalloc/heap_instance_track.css">
-
-<link rel="import" href="/tracing/base/sorted_array_utils.html">
-<link rel="import" href="/tracing/ui/tracks/stacked_bars_track.html">
-<link rel="import" href="/tracing/ui/tracks/object_instance_track.html">
-<link rel="import" href="/tracing/ui/base/event_presenter.html">
-<link rel="import" href="/tracing/ui/base/ui.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.e.tcmalloc', function() {
-  var EventPresenter = tr.ui.b.EventPresenter;
-
-  /**
-   * A track that displays heap memory data.
-   * @constructor
-   * @extends {StackedBarsTrack}
-   */
-
-  var HeapInstanceTrack = tr.ui.b.define(
-      'tr-ui-e-tcmalloc-heap-instance-track', tr.ui.tracks.StackedBarsTrack);
-
-  HeapInstanceTrack.prototype = {
-
-    __proto__: tr.ui.tracks.StackedBarsTrack.prototype,
-
-    decorate: function(viewport) {
-      tr.ui.tracks.StackedBarsTrack.prototype.decorate.call(this, viewport);
-      this.classList.add('tr-ui-e-tcmalloc-heap-instance-track');
-      this.objectInstance_ = null;
-    },
-
-    set objectInstances(objectInstances) {
-      if (!objectInstances) {
-        this.objectInstance_ = [];
-        return;
-      }
-      if (objectInstances.length != 1)
-        throw new Error('Bad object instance count.');
-      this.objectInstance_ = objectInstances[0];
-      this.maxBytes_ = this.computeMaxBytes_(
-          this.objectInstance_.snapshots);
-    },
-
-    computeMaxBytes_: function(snapshots) {
-      var maxBytes = 0;
-      for (var i = 0; i < snapshots.length; i++) {
-        var snapshot = snapshots[i];
-        // Sum all the current allocations in this snapshot.
-        var traceNames = Object.keys(snapshot.heap_.children);
-        var sumBytes = 0;
-        for (var j = 0; j < traceNames.length; j++) {
-          sumBytes += snapshot.heap_.children[traceNames[j]].currentBytes;
-        }
-        // Keep track of the maximum across all snapshots.
-        if (sumBytes > maxBytes)
-          maxBytes = sumBytes;
-      }
-      return maxBytes;
-    },
-
-    get height() {
-      return window.getComputedStyle(this).height;
-    },
-
-    set height(height) {
-      this.style.height = height;
-    },
-
-    draw: function(type, viewLWorld, viewRWorld) {
-      switch (type) {
-        case tr.ui.tracks.DrawType.GENERAL_EVENT:
-          this.drawEvents_(viewLWorld, viewRWorld);
-          break;
-      }
-    },
-
-    drawEvents_: function(viewLWorld, viewRWorld) {
-      var ctx = this.context();
-      var pixelRatio = window.devicePixelRatio || 1;
-
-      var bounds = this.getBoundingClientRect();
-      var width = bounds.width * pixelRatio;
-      var height = bounds.height * pixelRatio;
-
-      // Culling parameters.
-      var dt = this.viewport.currentDisplayTransform;
-
-      // Scale by the size of the largest snapshot.
-      var maxBytes = this.maxBytes_;
-
-      var objectSnapshots = this.objectInstance_.snapshots;
-      var lowIndex = tr.b.findLowIndexInSortedArray(
-          objectSnapshots,
-          function(snapshot) {
-            return snapshot.ts;
-          },
-          viewLWorld);
-      // Assure that the stack with the left edge off screen still gets drawn
-      if (lowIndex > 0)
-        lowIndex -= 1;
-
-      for (var i = lowIndex; i < objectSnapshots.length; ++i) {
-        var snapshot = objectSnapshots[i];
-
-        var left = snapshot.ts;
-        if (left > viewRWorld)
-          break;
-        var leftView = dt.xWorldToView(left);
-        if (leftView < 0)
-          leftView = 0;
-
-        // Compute the edges for the column graph bar.
-        var right;
-        if (i != objectSnapshots.length - 1) {
-          right = objectSnapshots[i + 1].ts;
-        } else {
-          // If this is the last snaphot of multiple snapshots, use the width of
-          // the previous snapshot for the width.
-          if (objectSnapshots.length > 1)
-            right = objectSnapshots[i].ts + (objectSnapshots[i].ts -
-                    objectSnapshots[i - 1].ts);
-          else
-            // If there's only one snapshot, use max bounds as the width.
-            right = this.objectInstance_.parent.model.bounds.max;
-        }
-
-        var rightView = dt.xWorldToView(right);
-        if (rightView > width)
-          rightView = width;
-
-        // Floor the bounds to avoid a small gap between stacks.
-        leftView = Math.floor(leftView);
-        rightView = Math.floor(rightView);
-
-        // Draw a stacked bar graph. Largest item is stored first in the
-        // heap data structure, so iterate backwards. Likewise draw from
-        // the bottom of the bar upwards.
-        var currentY = height;
-        var keys = Object.keys(snapshot.heap_.children);
-        for (var k = keys.length - 1; k >= 0; k--) {
-          var trace = snapshot.heap_.children[keys[k]];
-          if (this.objectInstance_.selectedTraces &&
-              this.objectInstance_.selectedTraces.length > 0 &&
-              this.objectInstance_.selectedTraces[0] == keys[k]) {
-            // A trace selected in the analysis view is bright yellow.
-            ctx.fillStyle = 'rgb(239, 248, 206)';
-          } else
-            ctx.fillStyle = EventPresenter.getBarSnapshotColor(snapshot, k);
-
-          var barHeight = height * trace.currentBytes / maxBytes;
-          ctx.fillRect(leftView, currentY - barHeight,
-                       Math.max(rightView - leftView, 1), barHeight);
-          currentY -= barHeight;
-        }
-      }
-      ctx.lineWidth = 1;
-    }
-  };
-
-  tr.ui.tracks.ObjectInstanceTrack.register(
-      HeapInstanceTrack,
-      {typeName: 'memory::Heap'});
-
-  return {
-    HeapInstanceTrack: HeapInstanceTrack
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/images/collapse.png b/catapult/tracing/tracing/ui/extras/tcmalloc/images/collapse.png
deleted file mode 100644
index c5fb718..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/images/collapse.png
+++ /dev/null
Binary files differ
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/images/expand.png b/catapult/tracing/tracing/ui/extras/tcmalloc/images/expand.png
deleted file mode 100644
index 8f2d0ef..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/images/expand.png
+++ /dev/null
Binary files differ
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc.html b/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc.html
deleted file mode 100644
index dfeef93..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/extras/tcmalloc/heap.html">
-<link rel="import" href="/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.html">
-<link rel="import" href="/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.html">
-<link rel="import" href="/tracing/ui/extras/tcmalloc/heap_instance_track.html">
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.css b/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.css
deleted file mode 100644
index 05a9451..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.css
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2013 The Chromium Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-.tr-ui-e-tcmalloc-instance-view .subhead {
-  font-size: small;
-  padding-bottom: 10px;
-}
-
-.tr-ui-e-tcmalloc-instance-view #args {
-  white-space: pre;
-}
-
-.tr-ui-e-tcmalloc-instance-view #snapshots > * {
-  display: block;
-}
-
-.tr-ui-e-tcmalloc-instance-view {
-  overflow: auto;
-}
-
-.tr-ui-e-tcmalloc-instance-view * {
-  -webkit-user-select: text;
-}
-
-.tr-ui-e-tcmalloc-instance-view .title {
-  border-bottom: 1px solid rgb(128, 128, 128);
-  font-size: 110%;
-  font-weight: bold;
-}
-
-.tr-ui-e-tcmalloc-instance-view td,
-.tr-ui-e-tcmalloc-instance-view th {
-  font-size: small;
-  text-align: right;
-}
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.html b/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.html
deleted file mode 100644
index e114083..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.html
+++ /dev/null
@@ -1,112 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="stylesheet" href="/tracing/ui/extras/tcmalloc/tcmalloc_instance_view.css">
-
-<link rel="import" href="/tracing/model/event_set.html">
-<link rel="import" href="/tracing/ui/analysis/object_instance_view.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.e.tcmalloc', function() {
-  /**
-   * Displays tcmalloc heap memory information over time. A tcmalloc instance
-   * has multiple snapshots.
-   * @constructor
-   */
-  var TcmallocInstanceView = tr.ui.b.define(
-      'tr-ui-e-tcmalloc-instance-view', tr.ui.analysis.ObjectInstanceView);
-
-  TcmallocInstanceView.prototype = {
-    __proto__: tr.ui.analysis.ObjectInstanceView.prototype,
-
-    decorate: function() {
-      tr.ui.analysis.ObjectInstanceView.prototype.decorate.apply(this);
-      this.classList.add('tr-ui-e-tcmalloc-instance-view');
-    },
-
-    updateContents: function() {
-      var instance = this.objectInstance_;
-      if (!instance || !instance.snapshots || instance.snapshots.length == 0) {
-        this.textContent = 'No data found.';
-        return;
-      }
-      // Clear old view.
-      this.textContent = '';
-
-      // First, grab the largest N traces from the first snapshot.
-      var snapshot = instance.snapshots[0];
-      var heapEntry = snapshot.heap_;
-      var traceNames = Object.keys(heapEntry.children);
-      traceNames.sort(function(a, b) {
-        // Sort from large to small.
-        return heapEntry.children[b].currentBytes -
-            heapEntry.children[a].currentBytes;
-      });
-      // Only use the largest 5 traces to start
-      traceNames = traceNames.slice(0, 5);
-
-      var table = document.createElement('table');
-      var titles = ['Total'];
-      titles = titles.concat(traceNames);
-      table.appendChild(this.buildRow_(null, titles));
-
-      // One array per trace name.
-      var chartArrays = [[], [], [], [], []];
-      for (var i = 0; i < instance.snapshots.length; i++) {
-        var snapshot = instance.snapshots[i];
-        var rowData = [snapshot.total_.currentBytes];
-        for (var j = 0; j < 5; j++) {
-          var bytes = snapshot.heap_.children[traceNames[j]].currentBytes;
-          rowData.push(bytes);
-          // Associate a megabyte count with a time in seconds.
-          chartArrays[j].push(
-              [Math.round(snapshot.ts / 1000), bytes / 1024 / 1024]);
-        }
-        var row = this.buildRow_(snapshot, rowData);
-        table.appendChild(row);
-      }
-      this.appendChild(table);
-    },
-
-    buildRow_: function(snapshot, items) {
-      var row = document.createElement('tr');
-      var td = document.createElement('td');
-      if (snapshot) {
-        var snapshotLink = document.createElement('tr-ui-a-analysis-link');
-        snapshotLink.selection = new tr.model.EventSet(snapshot);
-        td.appendChild(snapshotLink);
-      }
-      row.appendChild(td);
-      for (var i = 0; i < items.length; i++) {
-        var data = document.createElement('td');
-        data.textContent = items[i];
-        row.appendChild(data);
-      }
-      return row;
-    },
-
-    /*
-     * Returns a human readable string for a size in bytes.
-     */
-    getByteString_: function(bytes) {
-      var mb = bytes / 1024 / 1024;
-      return mb.toFixed(1) + ' MB';
-    }
-  };
-
-  tr.ui.analysis.ObjectInstanceView.register(
-      TcmallocInstanceView,
-      {typeName: 'memory::Heap'});
-
-  return {
-    TcmallocInstanceView: TcmallocInstanceView
-  };
-
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.css b/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.css
deleted file mode 100644
index 87fc7a5..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.css
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2013 The Chromium Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-.tr-ui-e-tcmalloc-heap-snapshot-view .subhead {
-  font-size: small;
-  padding-bottom: 10px;
-}
-
-.tr-ui-e-tcmalloc-heap-snapshot-view ul {
-  background-position: 0 5px;
-  background-repeat: no-repeat;
-  cursor: pointer;
-  font-family: monospace;
-  list-style: none;
-  margin: 0;
-  padding-left: 15px;
-}
-
-.tr-ui-e-tcmalloc-heap-snapshot-view li {
-  background-position: 0 5px;
-  background-repeat: no-repeat;
-  cursor: pointer;
-  list-style: none;
-  margin: 0;
-  padding-left: 15px;
-}
-
-/* Collapsed state for list element */
-.tr-ui-e-tcmalloc-heap-snapshot-view .collapsed {
-  background-image: url(./images/expand.png);
-}
-
-/* Expanded state for list element. Must be located under the collapsed one. */
-.tr-ui-e-tcmalloc-heap-snapshot-view .expanded {
-  background-image: url(./images/collapse.png);
-}
-
-/* Allocation size in MB, right-aligned for easier comparison of columns. */
-.tr-ui-e-tcmalloc-heap-snapshot-view .trace-bytes {
-  display: inline-block;
-  padding-right: 10px;
-  text-align: right;
-  width: 80px;
-}
-
-/* Trace allocation count. */
-.tr-ui-e-tcmalloc-heap-snapshot-view .trace-allocs {
-  display: inline-block;
-  padding-right: 10px;
-  text-align: right;
-  width: 120px;
-}
-
-/* Trace name, inline so it appears to the right of the byte count. */
-.tr-ui-e-tcmalloc-heap-snapshot-view .trace-name {
-  display: inline-block;
-}
diff --git a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.html b/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.html
deleted file mode 100644
index 433cd4e..0000000
--- a/catapult/tracing/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.html
+++ /dev/null
@@ -1,176 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2013 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="stylesheet" href="/tracing/ui/extras/tcmalloc/tcmalloc_snapshot_view.css">
-
-<link rel="import" href="/tracing/ui/analysis/object_snapshot_view.html">
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.e.tcmalloc', function() {
-  /*
-   * Displays a heap memory snapshot in a human readable form.
-   * @constructor
-   */
-  var TcmallocSnapshotView = tr.ui.b.define(
-      'tr-ui-e-tcmalloc-heap-snapshot-view',
-      tr.ui.analysis.ObjectSnapshotView);
-
-  TcmallocSnapshotView.prototype = {
-    __proto__: tr.ui.analysis.ObjectSnapshotView.prototype,
-
-    decorate: function() {
-      this.classList.add('tr-ui-e-tcmalloc-heap-snapshot-view');
-    },
-
-    updateContents: function() {
-      var snapshot = this.objectSnapshot_;
-      if (!snapshot || !snapshot.heap_) {
-        this.textContent = 'No heap found.';
-        return;
-      }
-      // Clear old snapshot view.
-      this.textContent = '';
-
-      // Note: "total" may actually be less than the largest allocation bin.
-      // This might happen if one stack is doing a lot of allocation, then
-      // passing off to another stack for deallocation.  That stack will
-      // have a high "current bytes" count and the other one might be
-      // negative or zero. So "total" may be smaller than the largest trace.
-      var subhead = document.createElement('div');
-      subhead.textContent = 'Retaining ' +
-          this.getByteString_(snapshot.total_.currentBytes) + ' in ' +
-          snapshot.total_.currentAllocs +
-          ' allocations. Showing > 0.1 MB.';
-      subhead.className = 'subhead';
-      this.appendChild(subhead);
-
-      // Build a nested tree-view of allocations
-      var myList = this.buildAllocList_(snapshot.heap_, false);
-      this.appendChild(myList);
-    },
-
-    /**
-     * Creates a nested list with clickable entries.
-     * @param {Object} heapEntry The current trace heap entry.
-     * @param {boolean} hide Whether this list is hidden by default.
-     * @return {Element} A <ul> list element.
-     */
-    buildAllocList_: function(heapEntry, hide) {
-      var myList = document.createElement('ul');
-      myList.hidden = hide;
-      var keys = Object.keys(heapEntry.children);
-      keys.sort(function(a, b) {
-        // Sort from large to small.
-        return heapEntry.children[b].currentBytes -
-            heapEntry.children[a].currentBytes;
-      });
-      for (var i = 0; i < keys.length; i++) {
-        var traceName = keys[i];
-        var trace = heapEntry.children[traceName];
-        // Don't show small nodes - they make things harder to see.
-        if (trace.currentBytes < 100 * 1024)
-          continue;
-        var childCount = Object.keys(trace.children).length;
-        var isLeaf = childCount == 0;
-        var myItem = this.buildItem_(
-            traceName, isLeaf, trace.currentBytes, trace.currentAllocs);
-        myList.appendChild(myItem);
-        // Build a nested <ul> list of my children.
-        if (childCount > 0)
-          myItem.appendChild(this.buildAllocList_(trace, true));
-      }
-      return myList;
-    },
-
-    /*
-     * Returns a <li> for an allocation traceName of size bytes.
-     */
-    buildItem_: function(traceName, isLeaf, bytes, allocs) {
-      var myItem = document.createElement('li');
-      myItem.className = 'trace-item';
-      myItem.id = traceName;
-
-      var byteDiv = document.createElement('div');
-      byteDiv.textContent = this.getByteString_(bytes);
-      byteDiv.className = 'trace-bytes';
-      myItem.appendChild(byteDiv);
-
-      if (traceName.length == 0) {
-        // The empty trace name indicates that the allocations occurred at
-        // this trace level, not in a sub-trace. This looks weird as the
-        // empty string, so replace it with something non-empty and don't
-        // give that line an expander.
-        traceName = '(here)';
-      } else if (traceName.indexOf('..') == 0) {
-        // Tasks in RunTask have special handling. They show the path of the
-        // filename. Convert '../../foo.cc' into 'RunTask from foo.cc'.
-        var lastSlash = traceName.lastIndexOf('/');
-        if (lastSlash != -1)
-          traceName = 'Task from ' + traceName.substr(lastSlash + 1);
-      }
-      var traceDiv = document.createElement('div');
-      traceDiv.textContent = traceName;
-      traceDiv.className = 'trace-name';
-      myItem.appendChild(traceDiv);
-
-      // Don't allow leaf nodes to be expanded.
-      if (isLeaf)
-        return myItem;
-
-      // Expand the element when it is clicked.
-      var self = this;
-      myItem.addEventListener('click', function(event) {
-        // Allow click on the +/- image (li) or child divs.
-        if (this == event.target || this == event.target.parentElement) {
-          this.classList.toggle('expanded');
-          var child = this.querySelector('ul');
-          child.hidden = !child.hidden;
-          // Highlight this stack trace in the timeline view.
-          self.onItemClicked_(this);
-        }
-      });
-      myItem.classList.add('collapsed');
-      return myItem;
-    },
-
-    onItemClicked_: function(traceItem) {
-      // Compute the full stack trace the user just clicked.
-      var traces = [];
-      while (traceItem.classList.contains('trace-item')) {
-        var traceNameDiv = traceItem.firstElementChild.nextElementSibling;
-        traces.unshift(traceNameDiv.textContent);
-        var traceNameUl = traceItem.parentElement;
-        traceItem = traceNameUl.parentElement;
-      }
-      // Tell the instance that this stack trace is selected.
-      var instance = this.objectSnapshot_.objectInstance;
-      instance.selectedTraces = traces;
-      // Invalid the viewport to cause a redraw.
-      var trackView = document.querySelector('.timeline-track-view');
-      trackView.viewport_.dispatchChangeEvent();
-    },
-
-    /*
-     * Returns a human readable string for a size in bytes.
-     */
-    getByteString_: function(bytes) {
-      var mb = bytes / 1024 / 1024;
-      return mb.toFixed(1) + ' MB';
-    }
-  };
-
-  tr.ui.analysis.ObjectSnapshotView.register(
-      TcmallocSnapshotView,
-      {typeName: 'memory::Heap'});
-
-  return {
-    TcmallocSnapshotView: TcmallocSnapshotView
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/scripting_control.html b/catapult/tracing/tracing/ui/scripting_control.html
index fe4c711..8490166 100644
--- a/catapult/tracing/tracing/ui/scripting_control.html
+++ b/catapult/tracing/tracing/ui/scripting_control.html
@@ -69,7 +69,8 @@
       <div id='prompt'
            on-keypress="{{ promptKeyPress }}"
            on-keydown="{{ promptKeyDown }}"
-           on-blur="{{ onConsoleBlur }}">
+           on-blur="{{ onConsoleBlur }}"></div>
+    </div>
   </template>
 
   <script>
diff --git a/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel.html b/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel.html
new file mode 100644
index 0000000..a6cf717
--- /dev/null
+++ b/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel.html
@@ -0,0 +1,212 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/ui/base/grouping_table.html">
+<link rel="import" href="/tracing/ui/base/grouping_table_groupby_picker.html">
+<link rel="import" href="/tracing/ui/side_panel/side_panel.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<polymer-element name='tr-ui-sp-file-size-stats-side-panel'
+    extends='tr-ui-side-panel'>
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+      width: 600px;
+    }
+    toolbar {
+      align-items: center;
+      background-color: rgb(236, 236, 236);
+      border-bottom: 1px solid #8e8e8e;
+      display: flex;
+      flex-direction: row;
+      flex-direction: row;
+      flex: 0 0 auto;
+      font-size: 12px;
+      padding: 0 10px 0 10px;
+    }
+    table-container {
+      display: flex;
+      min-height: 0px;
+      overflow-y: auto;
+    }
+    </style>
+
+    <toolbar>
+      <span><b>Group by:</b></span>
+      <tr-ui-b-grouping-table-groupby-picker id="picker">
+      </tr-ui-b-grouping-table-groupby-picker>
+    </toolbar>
+    <table-container>
+      <tr-ui-b-grouping-table id="table"></tr-ui-b-grouping-table>
+    </table-container>
+  </template>
+</polymer-element>
+
+<script>
+'use strict';
+(function() {
+
+  Polymer('tr-ui-sp-file-size-stats-side-panel', {
+    ready: function() {
+      this.model_ = undefined;
+      this.selection_ = new tr.model.EventSet();
+      this.$.picker.settingsKey = 'tr-ui-sp-file-size-stats-side-panel-picker';
+      this.$.picker.possibleGroups = [
+        {
+          key: 'phase', label: 'Event Type',
+          dataFn: function(eventStat) { return eventStat.phase; }
+        },
+        {
+          key: 'category', label: 'Category',
+          dataFn: function(eventStat) { return eventStat.category; }
+        },
+        {
+          key: 'title', label: 'Title',
+          dataFn: function(eventStat) { return eventStat.title; }
+        }
+      ];
+      this.$.picker.defaultGroupKeys = ['phase', 'title'];
+      this.$.picker.addEventListener('current-groups-changed',
+                                     this.updateContents_.bind(this));
+    },
+
+    get textLabel() {
+     return 'File Size Stats';
+    },
+
+    supportsModel: function(m) {
+      if (!m) {
+        return {
+          supported: false,
+          reason: 'No stats were collected for this file.'
+        };
+      }
+
+      if (m.stats.allTraceEventStats.length === 0) {
+        return {
+          supported: false,
+          reason: 'No stats were collected for this file.'
+        };
+      }
+      return {
+        supported: true
+      };
+    },
+
+    get model() {
+      return this.model_;
+    },
+
+    set model(model) {
+      this.model_ = model;
+      this.updateContents_();
+    },
+
+    get rangeOfInterest() {
+      return this.rangeOfInterest_;
+    },
+
+    set rangeOfInterest(rangeOfInterest) {
+      this.rangeOfInterest_ = rangeOfInterest;
+    },
+
+    get selection() {
+      return this.selection_;
+    },
+
+    set selection(selection) {
+      this.selection_ = selection;
+    },
+
+    createColumns_: function(stats) {
+      var columns = [
+        {
+          title: 'Title',
+          value: function(row) {
+            var titleEl = document.createElement('span');
+            titleEl.textContent = row.title;
+            titleEl.style.textOverflow = 'ellipsis';
+            return titleEl;
+          },
+          cmp: function(a, b) {
+            return a.title.localeCompare(b.title);
+          },
+          width: '400px'
+        },
+        {
+          title: 'Num Events',
+          textAlign: 'right',
+          value: function(row) {
+            return row.rowStats.numEvents;
+          },
+          cmp: function(a, b) {
+            return a.rowStats.numEvents - b.rowStats.numEvents;
+          },
+          width: '80px'
+        }
+      ];
+
+      if (stats && stats.hasEventSizesinBytes) {
+        columns.push({
+          title: 'Bytes',
+          textAlign: 'right',
+          value: function(row) {
+            var value = new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes,
+                row.rowStats.totalEventSizeinBytes);
+            var spanEl = tr.v.ui.createScalarSpan(value);
+            return spanEl;
+          },
+          cmp: function(a, b) {
+            return a.rowStats.totalEventSizeinBytes -
+                b.rowStats.totalEventSizeinBytes;
+          },
+          width: '80px'
+        });
+      }
+      return columns;
+    },
+
+    updateContents_: function() {
+      var table = this.$.table;
+
+      var columns = this.createColumns_(this.model.stats);
+      table.rowStatsConstructor = function ModelStatsRowStats(row) {
+        var sum = tr.b.Statistics.sum(row.data, function(x) {
+          return x.numEvents;
+        });
+        var totalEventSizeinBytes = tr.b.Statistics.sum(row.data, function(x) {
+          return x.totalEventSizeinBytes;
+        });
+        return {
+          numEvents: sum,
+          totalEventSizeinBytes: totalEventSizeinBytes
+        };
+      };
+      table.tableColumns = columns;
+      table.sortColumnIndex = 1;
+      table.sortDescending = true;
+      table.selectionMode = tr.ui.b.TableFormat.SelectionMode.ROW;
+
+      table.groupBy = this.$.picker.currentGroups.map(function(group) {
+        return group.dataFn;
+      });
+
+      if (!this.model) {
+        table.dataToGroup = [];
+      } else {
+        table.dataToGroup = this.model.stats.allTraceEventStats;
+      }
+      this.$.table.rebuild();
+    }
+  });
+})();
+</script>
diff --git a/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel_test.html b/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel_test.html
new file mode 100644
index 0000000..75c389c
--- /dev/null
+++ b/catapult/tracing/tracing/ui/side_panel/file_size_stats_side_panel_test.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/core/test_utils.html">
+<link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/ui/side_panel/file_size_stats_side_panel.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var TestUtils = tr.c.TestUtils;
+
+  function createModel(opt_customizeModelCallback) {
+    return TestUtils.newModel(function(model) {
+      var modelStats = model.stats;
+      modelStats.willProcessBasicTraceEvent('X', 'cat1', 'title1');
+      modelStats.willProcessBasicTraceEvent('X', 'cat1', 'title1');
+      modelStats.willProcessBasicTraceEvent('X', 'cat2', 'title1');
+      modelStats.willProcessBasicTraceEvent('X', 'cat2', 'title3');
+      modelStats.willProcessBasicTraceEvent('Y', 'cat3', 'title3');
+    });
+  }
+
+  test('instantiate', function() {
+    var panel = document.createElement('tr-ui-sp-file-size-stats-side-panel');
+    panel.model = createModel();
+    panel.style.height = '200px';
+    this.addHTMLOutput(panel);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/ui/side_panel/side_panel.html b/catapult/tracing/tracing/ui/side_panel/side_panel.html
index 75d0a15..996788f 100644
--- a/catapult/tracing/tracing/ui/side_panel/side_panel.html
+++ b/catapult/tracing/tracing/ui/side_panel/side_panel.html
@@ -38,10 +38,6 @@
       throw new Error('Not implemented');
     },
 
-    get listeningToKeys() {
-      throw new Error('Not implemented');
-    },
-
     supportsModel: function(m) {
       throw new Error('Not implemented');
     }
diff --git a/catapult/tracing/tracing/ui/timeline_display_transform_animations.html b/catapult/tracing/tracing/ui/timeline_display_transform_animations.html
index 6f34e9e..7122d9b 100644
--- a/catapult/tracing/tracing/ui/timeline_display_transform_animations.html
+++ b/catapult/tracing/tracing/ui/timeline_display_transform_animations.html
@@ -11,7 +11,7 @@
 'use strict';
 
 tr.exportTo('tr.ui', function() {
-  var kDefaultPanAnimatoinDurationMs = 100.0;
+  var kDefaultPanAnimationDurationMs = 100.0;
 
   /**
    * Pans a TimelineDisplayTransform by a given amount.
@@ -20,14 +20,14 @@
    * @param {Number} deltaX The total amount of change to the transform's panX.
    * @param {Number} deltaY The total amount of change to the transform's panY.
    * @param {Number=} opt_durationMs How long the pan animation should run.
-   * Defaults to kDefaultPanAnimatoinDurationMs.
+   * Defaults to kDefaultPanAnimationDurationMs.
    */
   function TimelineDisplayTransformPanAnimation(
       deltaX, deltaY, opt_durationMs) {
     this.deltaX = deltaX;
     this.deltaY = deltaY;
     if (opt_durationMs === undefined)
-      this.durationMs = kDefaultPanAnimatoinDurationMs;
+      this.durationMs = kDefaultPanAnimationDurationMs;
     else
       this.durationMs = opt_durationMs;
 
@@ -116,7 +116,7 @@
     this.goalFocalPointY = goalFocalPointY;
     this.zoomInRatioX = zoomInRatioX;
     if (opt_durationMs === undefined)
-      this.durationMs = kDefaultPanAnimatoinDurationMs;
+      this.durationMs = kDefaultPanAnimationDurationMs;
     else
       this.durationMs = opt_durationMs;
 
diff --git a/catapult/tracing/tracing/ui/timeline_track_view.html b/catapult/tracing/tracing/ui/timeline_track_view.html
index 224bb7a..acf5472 100644
--- a/catapult/tracing/tracing/ui/timeline_track_view.html
+++ b/catapult/tracing/tracing/ui/timeline_track_view.html
@@ -8,7 +8,6 @@
 <link rel="import" href="/tracing/base/event.html">
 <link rel="import" href="/tracing/base/settings.html">
 <link rel="import" href="/tracing/base/task.html">
-<link rel="import" href="/tracing/base/units/time_duration.html">
 <link rel="import" href="/tracing/core/filter.html">
 <link rel="import" href="/tracing/model/event.html">
 <link rel="import" href="/tracing/model/event_set.html">
@@ -17,11 +16,12 @@
 <link rel="import" href="/tracing/ui/base/mouse_mode_selector.html">
 <link rel="import" href="/tracing/ui/base/timing_tool.html">
 <link rel="import" href="/tracing/ui/base/ui.html">
-<link rel="import" href="/tracing/ui/timeline_viewport.html">
 <link rel="import" href="/tracing/ui/timeline_display_transform_animations.html">
+<link rel="import" href="/tracing/ui/timeline_viewport.html">
 <link rel="import" href="/tracing/ui/tracks/drawing_container.html">
 <link rel="import" href="/tracing/ui/tracks/model_track.html">
 <link rel="import" href="/tracing/ui/tracks/ruler_track.html">
+<link rel="import" href="/tracing/value/unit.html">
 
 <!--
   Interactive visualizaiton of Model objects based loosely on gantt charts.
@@ -798,7 +798,7 @@
         results.width = results.right - results.left;
         results.height = results.bottom - results.top;
         return results;
-      }
+      };
 
       // TODO(dsinclair): intersectRect_ can return false (which should actually
       // be undefined) but we use finalDragBox without checking the return value
@@ -819,7 +819,8 @@
       var hiWX = dt.xViewToWorld(
           (hiX - canv.offsetLeft) * pixelRatio);
 
-      this.$.drag_box.textContent = tr.b.u.TimeDuration.format(hiWX - loWX);
+      this.$.drag_box.textContent =
+          tr.v.Unit.byName.timeDurationInMs.format(hiWX - loWX);
 
       var e = new tr.b.Event('selectionChanging');
       e.loWX = loWX;
diff --git a/catapult/tracing/tracing/ui/timeline_track_view_test.html b/catapult/tracing/tracing/ui/timeline_track_view_test.html
index 15433f8..aae2535 100644
--- a/catapult/tracing/tracing/ui/timeline_track_view_test.html
+++ b/catapult/tracing/tracing/ui/timeline_track_view_test.html
@@ -139,7 +139,7 @@
     var model = new tr.Model();
     var p1 = model.getOrCreateProcess(1);
     var t1 = p1.getOrCreateThread(2);
-    t1.sliceGroup.pushSlice(tr.c.TestUtils.newSlice(0, 1));
+    t1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx({start: 0, duration: 1}));
 
     var timeline = document.createElement('tr-ui-timeline-track-view');
     timeline.model = model;
diff --git a/catapult/tracing/tracing/ui/timeline_view.html b/catapult/tracing/tracing/ui/timeline_view.html
index e5f85fc..f0dc001 100644
--- a/catapult/tracing/tracing/ui/timeline_view.html
+++ b/catapult/tracing/tracing/ui/timeline_view.html
@@ -8,6 +8,7 @@
 <link rel="import" href="/tracing/base/settings.html">
 <link rel="import" href="/tracing/base/utils.html">
 <link rel="import" href="/tracing/core/scripting_controller.html">
+<link rel="import" href="/tracing/metrics/all_metrics.html">
 <link rel="import" href="/tracing/ui/analysis/analysis_view.html">
 <link rel="import" href="/tracing/ui/base/dom_helpers.html">
 <link rel="import" href="/tracing/ui/base/drag_handle.html">
@@ -26,7 +27,7 @@
 <link rel="import" href="/tracing/ui/timeline_track_view.html">
 <link rel="import" href="/tracing/ui/timeline_view_help_overlay.html">
 <link rel="import" href="/tracing/ui/timeline_view_metadata_overlay.html">
-<link rel="import" href="/tracing/ui/units/preferred_display_unit.html">
+<link rel="import" href="/tracing/value/ui/preferred_display_unit.html">
 
 <polymer-element name='tr-ui-timeline-view'>
   <template>
@@ -128,8 +129,8 @@
     <tr-ui-b-drag-handle id="drag_handle"></tr-ui-b-drag-handle>
     <tr-ui-a-analysis-view id="analysis"></tr-ui-a-analysis-view>
 
-    <tr-ui-u-preferred-display-unit id="display_unit">
-    </tr-ui-u-preferred-display-unit>
+    <tr-v-ui-preferred-display-unit id="display_unit">
+    </tr-v-ui-preferred-display-unit>
   </template>
 
   <script>
@@ -158,9 +159,10 @@
       this.sidePanelContainer_.brushingStateController =
           this.brushingStateController_;
 
-      if (window.tr.e && window.tr.e.rail && window.tr.e.rail.RAILScore) {
+      if (window.tr.metrics && window.tr.metrics.sh &&
+          window.tr.metrics.sh.SystemHealthMetric) {
         this.railScoreSpan_ = document.createElement(
-            'tr-ui-e-rail-rail-score-span');
+            'tr-metrics-ui-sh-system-health-span');
         this.rightControls.appendChild(this.railScoreSpan_);
       } else {
         this.railScoreSpan_ = undefined;
@@ -365,7 +367,7 @@
       // Remove old trackView if the model has completely changed.
       if (modelInstanceChanged) {
         if (this.railScoreSpan_)
-          this.railScoreSpan_.railScore = undefined;
+          this.railScoreSpan_.model = undefined;
         this.trackViewContainer_.textContent = '';
         if (this.trackView_) {
           this.trackView_.viewport.removeEventListener(
@@ -394,10 +396,8 @@
         this.trackView_.model = model;
         this.trackView_.viewport.showFlowEvents = this.showFlowEvents;
         this.trackView_.viewport.highlightVSync = this.highlightVSync;
-        if (this.railScoreSpan_) {
-          var railScore = tr.e.rail.RAILScore.fromModel(model);
-          this.railScoreSpan_.railScore = railScore;
-        }
+        if (this.railScoreSpan_)
+          this.railScoreSpan_.model = model;
 
         this.$.display_unit.preferredTimeDisplayMode = model.intrinsicTimeUnit;
       }
@@ -512,10 +512,8 @@
       if (!spc.rangeOfInterest.equals(vr))
         spc.rangeOfInterest = vr;
 
-      if (this.railScoreSpan_ && this.model) {
-        var railScore = tr.e.rail.RAILScore.fromModel(this.model, vr);
-        this.railScoreSpan_.railScore = railScore;
-      }
+      if (this.railScoreSpan_ && this.model)
+        this.railScoreSpan_.model = this.model;
     },
 
     toggleHighlightVSync_: function() {
diff --git a/catapult/tracing/tracing/ui/timeline_view_help_overlay.html b/catapult/tracing/tracing/ui/timeline_view_help_overlay.html
index ec06f72..bb06c80 100644
--- a/catapult/tracing/tracing/ui/timeline_view_help_overlay.html
+++ b/catapult/tracing/tracing/ui/timeline_view_help_overlay.html
@@ -111,6 +111,11 @@
       </div>
 
       <div class='pair'>
+        <div class='command'><span class='mod'></span>-click/drag</div>
+        <div class='action'>Add events to the current selection</div>
+      </div>
+
+      <div class='pair'>
         <div class='command'>double click</div>
         <div class='action'>Select all events with same title</div>
       </div>
@@ -166,11 +171,6 @@
       </div>
 
       <div class='pair'>
-        <div class='command'><span class='mod'></span></div>
-        <div class='action'>Hold for temporary zoom</div>
-      </div>
-
-      <div class='pair'>
         <div class='command'>/</div>
         <div class='action'>Search</div>
       </div>
diff --git a/catapult/tracing/tracing/ui/timeline_view_metadata_overlay.html b/catapult/tracing/tracing/ui/timeline_view_metadata_overlay.html
index f4b4b7a..0727958 100644
--- a/catapult/tracing/tracing/ui/timeline_view_metadata_overlay.html
+++ b/catapult/tracing/tracing/ui/timeline_view_metadata_overlay.html
@@ -5,9 +5,9 @@
 found in the LICENSE file.
 -->
 <link rel="import" href="/tracing/base/base.html">
-<link rel="import" href="/tracing/ui/base/overlay.html">
 <link rel="import" href="/tracing/ui/base/mouse_mode_icon.html">
-<link rel="import" href="/tracing/ui/units/generic_table_view.html">
+<link rel="import" href="/tracing/ui/base/overlay.html">
+<link rel="import" href="/tracing/value/ui/generic_table_view.html">
 
 <polymer-element name="tr-ui-timeline-view-metadata-overlay">
   <template>
@@ -18,7 +18,7 @@
       overflow: auto;
     }
     </style>
-    <tr-ui-u-generic-table-view id="gtv"></tr-ui-u-generic-table-view>
+    <tr-v-ui-generic-table-view id="gtv"></tr-v-ui-generic-table-view>
   </template>
 
   <script>
diff --git a/catapult/tracing/tracing/ui/timeline_view_test.html b/catapult/tracing/tracing/ui/timeline_view_test.html
index c5a3d10..e3e2e06 100644
--- a/catapult/tracing/tracing/ui/timeline_view_test.html
+++ b/catapult/tracing/tracing/ui/timeline_view_test.html
@@ -15,7 +15,6 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  var newSliceNamed = tr.c.TestUtils.newSliceNamed;
   var Task = tr.b.Task;
 
   function setupTimeline() {
@@ -145,7 +144,8 @@
 
     // Mutate the model and update the view.
     var t123 = model.getOrCreateProcess(123).getOrCreateThread(123);
-    t123.sliceGroup.pushSlice(newSliceNamed('somethingUnusual', 0, 5));
+    t123.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+        {title: 'somethingUnusual', start: 0, duration: 5}));
     view.model = model;
 
     // Verify that the new bits of the model show up in the view.
diff --git a/catapult/tracing/tracing/ui/tracks/async_slice_group_track.html b/catapult/tracing/tracing/ui/tracks/async_slice_group_track.html
index 50a7904..60349be 100644
--- a/catapult/tracing/tracing/ui/tracks/async_slice_group_track.html
+++ b/catapult/tracing/tracing/ui/tracks/async_slice_group_track.html
@@ -95,7 +95,7 @@
           return true;
         }
         return false;
-      }
+      };
 
       var subRows = [];
       for (var i = 0; i < slices.length; i++) {
@@ -121,10 +121,10 @@
           if (level === rows.length)
             rows.push([]);
           for (var h = 0; h < subSlices.length; h++) {
-             rows[level].push(subSlices[h]);
-             fitSubSlicesRecursively(subSlices[h].subSlices, level + 1, rows);
+            rows[level].push(subSlices[h]);
+            fitSubSlicesRecursively(subSlices[h].subSlices, level + 1, rows);
           }
-        }
+        };
         fitSubSlicesRecursively(slice.subSlices, index + 1, subRows);
       }
       return subRows;
diff --git a/catapult/tracing/tracing/ui/tracks/cpu_track.html b/catapult/tracing/tracing/ui/tracks/cpu_track.html
index f03f358..84b7afe 100644
--- a/catapult/tracing/tracing/ui/tracks/cpu_track.html
+++ b/catapult/tracing/tracing/ui/tracks/cpu_track.html
@@ -18,7 +18,7 @@
 tr.exportTo('tr.ui.tracks', function() {
 
   /**
-   * Visualizes a Cpu using a series of of SliceTracks.
+   * Visualizes a Cpu using a series of SliceTracks.
    * @constructor
    */
   var CpuTrack =
diff --git a/catapult/tracing/tracing/ui/tracks/drawing_container_perf_test.html b/catapult/tracing/tracing/ui/tracks/drawing_container_perf_test.html
index 86867e0..41f9aa8 100644
--- a/catapult/tracing/tracing/ui/tracks/drawing_container_perf_test.html
+++ b/catapult/tracing/tracing/ui/tracks/drawing_container_perf_test.html
@@ -83,14 +83,17 @@
     }
   };
 
-  test(new GeneralDCPerfTestCase('draw_softwareCanvas_One',
-                                 {iterations: 1}));
-  test(new GeneralDCPerfTestCase('draw_softwareCanvas_Ten',
+  // Failing on Chrome canary, see
+  // https://github.com/catapult-project/catapult/issues/1826
+  flakyTest(new GeneralDCPerfTestCase('draw_softwareCanvas_One',
+                                      {iterations: 1}));
+  // Failing on Chrome stable on Windows, see
+  // https://github.com/catapult-project/catapult/issues/1908
+  flakyTest(new GeneralDCPerfTestCase('draw_softwareCanvas_Ten',
                                  {iterations: 10}));
   test(new GeneralDCPerfTestCase('draw_softwareCanvas_AHundred',
                                  {iterations: 100}));
 
-
   function AsyncDCPerfTestCase(testName, opt_options) {
     DCPerfTestCase.call(this, testName, opt_options);
   }
diff --git a/catapult/tracing/tracing/ui/tracks/frame_track_test.html b/catapult/tracing/tracing/ui/tracks/frame_track_test.html
index 8db1b14..943e245 100644
--- a/catapult/tracing/tracing/ui/tracks/frame_track_test.html
+++ b/catapult/tracing/tracing/ui/tracks/frame_track_test.html
@@ -22,15 +22,14 @@
   var SelectionState = tr.model.SelectionState;
   var Viewport = tr.ui.TimelineViewport;
 
-  var newSliceNamed = tr.c.TestUtils.newSliceNamed;
-
   var createFrames = function() {
     var frames = undefined;
     var model = tr.c.TestUtils.newModel(function(model) {
       var process = model.getOrCreateProcess(1);
       var thread = process.getOrCreateThread(1);
       for (var i = 1; i < 5; i++) {
-        var slice = newSliceNamed('work for frame', i * 20, 10);
+        var slice = tr.c.TestUtils.newSliceEx(
+            {title: 'work for frame', start: i * 20, duration: 10});
         thread.sliceGroup.pushSlice(slice);
         var events = [slice];
         var threadTimeRanges =
diff --git a/catapult/tracing/tracing/ui/tracks/interaction_track.html b/catapult/tracing/tracing/ui/tracks/interaction_track.html
index 82dcec2..edb913b 100644
--- a/catapult/tracing/tracing/ui/tracks/interaction_track.html
+++ b/catapult/tracing/tracing/ui/tracks/interaction_track.html
@@ -35,7 +35,7 @@
     },
 
     set model(model) {
-      this.setItemsToGroup(model.interactionRecords, {
+      this.setItemsToGroup(model.userModel.expectations, {
         guid: tr.b.GUID.allocate(),
         model: model,
         getSettingsKey: function() {
diff --git a/catapult/tracing/tracing/ui/tracks/interaction_track_test.html b/catapult/tracing/tracing/ui/tracks/interaction_track_test.html
index 70de770..1b2c134 100644
--- a/catapult/tracing/tracing/ui/tracks/interaction_track_test.html
+++ b/catapult/tracing/tracing/ui/tracks/interaction_track_test.html
@@ -7,6 +7,7 @@
 
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/user_model/stub_expectation.html">
 <link rel="import" href="/tracing/ui/timeline_viewport.html">
 <link rel="import" href="/tracing/ui/tracks/interaction_track.html">
 
@@ -14,7 +15,7 @@
 'use strict';
 
 tr.b.unittest.testSuite(function() {
-  // InteractionRecords should be sorted by start time, not title, so that
+  // UserExpectations should be sorted by start time, not title, so that
   // AsyncSliceGroupTrack.buildSubRows_ can lay them out in as few tracks as
   // possible, so that they mesh instead of stacking unnecessarily.
   test('instantiate', function() {
@@ -26,17 +27,18 @@
     track.model = tr.c.TestUtils.newModel(function(model) {
       var process = model.getOrCreateProcess(1);
       var thread = process.getOrCreateThread(1);
-      thread.sliceGroup.pushSlice(tr.c.TestUtils.newSlice(0, 200));
-      model.interactionRecords.push(new tr.model.InteractionRecord(
-            model, 'a', 0, 100, 100));
-      model.interactionRecords.push(new tr.model.InteractionRecord(
-            model, 'b', 1, 0, 100));
-      model.interactionRecords.push(new tr.model.InteractionRecord(
-            model, 'c', 2, 150, 50));
-      model.interactionRecords.push(new tr.model.InteractionRecord(
-            model, 'd', 3, 50, 100));
-      model.interactionRecords.push(new tr.model.InteractionRecord(
-            model, 'e', 4, 0, 50));
+      thread.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 0, duration: 200}));
+      model.userModel.expectations.push(new tr.model.um.StubExpectation(
+          {parentModel: model, start: 100, duration: 100}));
+      model.userModel.expectations.push(new tr.model.um.StubExpectation(
+          {parentModel: model, start: 0, duration: 100}));
+      model.userModel.expectations.push(new tr.model.um.StubExpectation(
+          {parentModel: model, start: 150, duration: 50}));
+      model.userModel.expectations.push(new tr.model.um.StubExpectation(
+          {parentModel: model, start: 50, duration: 100}));
+      model.userModel.expectations.push(new tr.model.um.StubExpectation(
+          {parentModel: model, start: 0, duration: 50}));
       // Model.createImportTracesTask() automatically sorts IRs by start time.
     });
     assert.equal(2, track.subRows_.length);
diff --git a/catapult/tracing/tracing/ui/tracks/memory_dump_track_test_utils.html b/catapult/tracing/tracing/ui/tracks/memory_dump_track_test_utils.html
index 50a87da..f534611 100644
--- a/catapult/tracing/tracing/ui/tracks/memory_dump_track_test_utils.html
+++ b/catapult/tracing/tracing/ui/tracks/memory_dump_track_test_utils.html
@@ -5,12 +5,12 @@
 found in the LICENSE file.
 -->
 
-<link rel="import" href="/tracing/model/attribute.html">
 <link rel="import" href="/tracing/model/global_memory_dump.html">
-<link rel="import" href="/tracing/model/memory_allocator_dump.html">
+<link rel="import" href="/tracing/model/memory_dump_test_utils.html">
+<link rel="import" href="/tracing/model/model.html">
 <link rel="import" href="/tracing/model/process_memory_dump.html">
 <link rel="import" href="/tracing/model/selection_state.html">
-<link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/vm_region.html">
 
 <script>
 'use strict';
@@ -21,27 +21,27 @@
 tr.exportTo('tr.ui.tracks', function() {
   var ProcessMemoryDump = tr.model.ProcessMemoryDump;
   var GlobalMemoryDump = tr.model.GlobalMemoryDump;
-  var MemoryAllocatorDump = tr.model.MemoryAllocatorDump;
-  var MemoryAllocatorDumpLink = tr.model.MemoryAllocatorDumpLink;
   var VMRegion = tr.model.VMRegion;
-  var VMRegionByteStats = tr.model.VMRegionByteStats;
-  var ScalarAttribute = tr.model.ScalarAttribute;
+  var VMRegionClassificationNode = tr.model.VMRegionClassificationNode;
   var SelectionState = tr.model.SelectionState;
+  var newAllocatorDump = tr.model.MemoryDumpTestUtils.newAllocatorDump;
+  var addOwnershipLink = tr.model.MemoryDumpTestUtils.addOwnershipLink;
 
   function createVMRegions(pssValues) {
-    return pssValues.map(function(pssValue, i) {
-      return VMRegion.fromDict({
-        startAddress: 1000 * i,
-        sizeInBytes: 1000,
-        protectionFlags: VMRegion.PROTECTION_FLAG_READ,
-        mappedFile: '[stack' + i + ']',
-        byteStats: {
-          privateDirtyResident: pssValue / 3,
-          swapped: pssValue * 3,
-          proportionalResident: pssValue
-        }
-      });
-    });
+    return VMRegionClassificationNode.fromRegions(
+        pssValues.map(function(pssValue, i) {
+          return VMRegion.fromDict({
+            startAddress: 1000 * i,
+            sizeInBytes: 1000,
+            protectionFlags: VMRegion.PROTECTION_FLAG_READ,
+            mappedFile: '[stack' + i + ']',
+            byteStats: {
+              privateDirtyResident: pssValue / 3,
+              swapped: pssValue * 3,
+              proportionalResident: pssValue
+            }
+          });
+        }));
   }
 
   function createAllocatorDumps(memoryDump, dumpData) {
@@ -49,9 +49,7 @@
     var allocatorDumps = tr.b.mapItems(dumpData, function(allocatorName, data) {
       var size = data.size;
       assert.typeOf(size, 'number');  // Sanity check.
-      var dump = new MemoryAllocatorDump(memoryDump, allocatorName);
-      dump.addAttribute('size', new ScalarAttribute('bytes', size));
-      return dump;
+      return newAllocatorDump(memoryDump, allocatorName, { size: size });
     });
 
     // Add ownership links between them.
@@ -65,9 +63,7 @@
       var ownedDump = allocatorDumps[owns];
       assert.isDefined(ownedDump);  // Sanity check.
 
-      var ownershipLink = new MemoryAllocatorDumpLink(ownerDump, ownedDump);
-      ownerDump.owns = ownershipLink;
-      ownedDump.ownedBy.push(ownershipLink);
+      addOwnershipLink(ownerDump, ownedDump);
     });
 
     return tr.b.dictionaryValues(allocatorDumps);
diff --git a/catapult/tracing/tracing/ui/tracks/memory_dump_track_util.html b/catapult/tracing/tracing/ui/tracks/memory_dump_track_util.html
index 5e6b2ef..d00fca9 100644
--- a/catapult/tracing/tracing/ui/tracks/memory_dump_track_util.html
+++ b/catapult/tracing/tracing/ui/tracks/memory_dump_track_util.html
@@ -19,8 +19,8 @@
 tr.exportTo('tr.ui.tracks', function() {
   var ColorScheme = tr.b.ColorScheme;
 
-  var DISPLAYED_SIZE_ATTRIBUTE_NAME =
-      tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME;
+  var DISPLAYED_SIZE_NUMERIC_NAME =
+      tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME;
 
   /**
    * Add numeric values from a source dictionary to the numeric values in
@@ -53,8 +53,7 @@
       // TODO(petrcermak): Find a less hacky way to do this.
       if (allocatorDump.fullName === 'tracing')
         return;
-      var allocatorSize = allocatorDump.attributes[
-          DISPLAYED_SIZE_ATTRIBUTE_NAME];
+      var allocatorSize = allocatorDump.numerics[DISPLAYED_SIZE_NUMERIC_NAME];
       if (allocatorSize === undefined)
         return;
       var allocatorSizeValue = allocatorSize.value;
@@ -189,20 +188,14 @@
       var pssBase = 0;
       tr.b.iterItems(pidToPoints, function(pid, points) {
         var processMemoryDump = globalDump.processMemoryDumps[pid];
-        var pss;
-        if (processMemoryDump === undefined) {
-          // If no dump was found, assume that the process is dead.
-          pss = 0;
-        } else {
-          pss = processMemoryDump.getMostRecentTotalVmRegionStat(
-              'proportionalResident');
-          if (pss === undefined) {
-            // If the dump does not provide the necessary information (namely
-            // most recent VM regions), assume zero.
-            pss = 0;
-          }
+        var cumulativePss = pssBase;
+        // If no dump was found (probably dead) or it does not provide the
+        // necessary information (namely most recent VM regions), assume zero.
+        if (processMemoryDump !== undefined) {
+          var vmRegions = processMemoryDump.mostRecentVmRegions;
+          if (vmRegions !== undefined)
+            cumulativePss += vmRegions.byteStats.proportionalResident || 0;
         }
-        var cumulativePss = pssBase + pss;
         points.push(new tr.ui.tracks.ChartPoint(
             globalDump, globalDump.start, cumulativePss, pssBase));
         pssBase = cumulativePss;
diff --git a/catapult/tracing/tracing/ui/tracks/model_track.html b/catapult/tracing/tracing/ui/tracks/model_track.html
index 0ed5583..2405ea1 100644
--- a/catapult/tracing/tracing/ui/tracks/model_track.html
+++ b/catapult/tracing/tracing/ui/tracks/model_track.html
@@ -99,7 +99,7 @@
     },
 
     updateContentsForLowerMode_: function() {
-      if (this.model_.interactionRecords.length) {
+      if (this.model_.userModel.expectations.length) {
         var mrt = new tr.ui.tracks.InteractionTrack(this.viewport_);
         mrt.model = this.model_;
         this.appendChild(mrt);
diff --git a/catapult/tracing/tracing/ui/tracks/object_instance_track_test.html b/catapult/tracing/tracing/ui/tracks/object_instance_track_test.html
index 27523a5..6d6dec9 100644
--- a/catapult/tracing/tracing/ui/tracks/object_instance_track_test.html
+++ b/catapult/tracing/tracing/ui/tracks/object_instance_track_test.html
@@ -8,6 +8,7 @@
 <link rel="import" href="/tracing/core/test_utils.html">
 <link rel="import" href="/tracing/model/event_set.html">
 <link rel="import" href="/tracing/model/object_collection.html">
+<link rel="import" href="/tracing/model/scoped_id.html">
 <link rel="import" href="/tracing/model/selection_state.html">
 <link rel="import" href="/tracing/ui/timeline_viewport.html">
 <link rel="import" href="/tracing/ui/tracks/drawing_container.html">
@@ -23,15 +24,17 @@
 
   var createObjects = function() {
     var objects = new tr.model.ObjectCollection({});
-    objects.idWasCreated('0x1000', 'tr.e.cc', 'Frame', 10);
-    objects.addSnapshot('0x1000', 'tr.e.cc', 'Frame', 10, 'snapshot-1');
-    objects.addSnapshot('0x1000', 'tr.e.cc', 'Frame', 25, 'snapshot-2');
-    objects.addSnapshot('0x1000', 'tr.e.cc', 'Frame', 40, 'snapshot-3');
-    objects.idWasDeleted('0x1000', 'tr.e.cc', 'Frame', 45);
+    var scopedId1 = new tr.model.ScopedId('ptr', '0x1000');
+    objects.idWasCreated(scopedId1, 'tr.e.cc', 'Frame', 10);
+    objects.addSnapshot(scopedId1, 'tr.e.cc', 'Frame', 10, 'snapshot-1');
+    objects.addSnapshot(scopedId1, 'tr.e.cc', 'Frame', 25, 'snapshot-2');
+    objects.addSnapshot(scopedId1, 'tr.e.cc', 'Frame', 40, 'snapshot-3');
+    objects.idWasDeleted(scopedId1, 'tr.e.cc', 'Frame', 45);
 
-    objects.idWasCreated('0x1001', 'skia', 'Picture', 20);
-    objects.addSnapshot('0x1001', 'skia', 'Picture', 20, 'snapshot-1');
-    objects.idWasDeleted('0x1001', 'skia', 'Picture', 25);
+    var scopedId2 = new tr.model.ScopedId('ptr', '0x1001');
+    objects.idWasCreated(scopedId2, 'skia', 'Picture', 20);
+    objects.addSnapshot(scopedId2, 'skia', 'Picture', 20, 'snapshot-1');
+    objects.idWasDeleted(scopedId2, 'skia', 'Picture', 25);
     return objects;
   };
 
@@ -96,7 +99,7 @@
     track.objectInstances = frames;
 
     var instance = new tr.model.ObjectInstance(
-        {}, '0x1000', 'cat', 'n', 10);
+        {}, new tr.model.ScopedId('ptr', '0x1000'), 'cat', 'n', 10);
 
     assert.doesNotThrow(function() {
       track.addEventNearToProvidedEventToSelection(instance, 0, undefined);
diff --git a/catapult/tracing/tracing/ui/tracks/process_summary_track_test.html b/catapult/tracing/tracing/ui/tracks/process_summary_track_test.html
index 3015e23..11a6852 100644
--- a/catapult/tracing/tracing/ui/tracks/process_summary_track_test.html
+++ b/catapult/tracing/tracing/ui/tracks/process_summary_track_test.html
@@ -6,17 +6,15 @@
 -->
 
 <link rel="import" href="/tracing/core/test_utils.html">
-<link rel="import" href="/tracing/ui/tracks/process_summary_track.html">
-<link rel="import" href="/tracing/model/slice_group.html">
 <link rel="import" href="/tracing/model/model.html">
+<link rel="import" href="/tracing/model/slice_group.html">
+<link rel="import" href="/tracing/ui/tracks/process_summary_track.html">
 
 <script>
 'use strict';
 
 tr.b.unittest.testSuite(function() {
   var ProcessSummaryTrack = tr.ui.tracks.ProcessSummaryTrack;
-  var newSlice = tr.c.TestUtils.newSlice;
-  var newSliceNamed = tr.c.TestUtils.newSliceNamed;
 
   test('buildRectSimple', function() {
     var process;
@@ -25,9 +23,11 @@
       // XXXX
       //    XXXX
       var thread1 = process.getOrCreateThread(1);
-      thread1.sliceGroup.pushSlice(newSlice(1, 4));
+      thread1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 1, duration: 4}));
       var thread2 = process.getOrCreateThread(2);
-      thread2.sliceGroup.pushSlice(newSlice(4, 4));
+      thread2.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 4, duration: 4}));
     });
 
     var rects = ProcessSummaryTrack.buildRectsFromProcess(process);
@@ -45,14 +45,21 @@
       // XXXX    X X XX
       //    XXXX XXX    X
       var thread1 = process.getOrCreateThread(1);
-      thread1.sliceGroup.pushSlice(newSlice(1, 4));
-      thread1.sliceGroup.pushSlice(newSlice(9, 1));
-      thread1.sliceGroup.pushSlice(newSlice(11, 1));
-      thread1.sliceGroup.pushSlice(newSlice(13, 2));
+      thread1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 1, duration: 4}));
+      thread1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 9, duration: 1}));
+      thread1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 11, duration: 1}));
+      thread1.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 13, duration: 2}));
       var thread2 = process.getOrCreateThread(2);
-      thread2.sliceGroup.pushSlice(newSlice(4, 4));
-      thread2.sliceGroup.pushSlice(newSlice(9, 3));
-      thread2.sliceGroup.pushSlice(newSlice(16, 1));
+      thread2.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 4, duration: 4}));
+      thread2.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 9, duration: 3}));
+      thread2.sliceGroup.pushSlice(tr.c.TestUtils.newSliceEx(
+          {start: 16, duration: 1}));
     });
 
     var rects = ProcessSummaryTrack.buildRectsFromProcess(process);
@@ -73,8 +80,10 @@
     var model = tr.c.TestUtils.newModel(function(model) {
       //    [    unimportant    ]
       //         [important]
-      var a = newSliceNamed('unimportant', 4, 21);
-      var b = newSliceNamed('important', 9, 11);
+      var a = tr.c.TestUtils.newSliceEx(
+          {title: 'unimportant', start: 4, duration: 21});
+      var b = tr.c.TestUtils.newSliceEx(
+          {title: 'important', start: 9, duration: 11});
       b.important = true;
       process = model.getOrCreateProcess(1);
       process.getOrCreateThread(1).sliceGroup.pushSlices([a, b]);
diff --git a/catapult/tracing/tracing/ui/tracks/process_track_base.html b/catapult/tracing/tracing/ui/tracks/process_track_base.html
index 9eeec86..d96f1ce 100644
--- a/catapult/tracing/tracing/ui/tracks/process_track_base.html
+++ b/catapult/tracing/tracing/ui/tracks/process_track_base.html
@@ -7,6 +7,10 @@
 
 <link rel="stylesheet" href="/tracing/ui/tracks/process_track_base.css">
 
+<link rel="import" href="/tracing/core/filter.html">
+<link rel="import" href="/tracing/model/model_settings.html">
+<link rel="import" href="/tracing/ui/base/dom_helpers.html">
+<link rel="import" href="/tracing/ui/base/ui.html">
 <link rel="import" href="/tracing/ui/tracks/container_track.html">
 <link rel="import" href="/tracing/ui/tracks/counter_track.html">
 <link rel="import" href="/tracing/ui/tracks/frame_track.html">
@@ -14,10 +18,6 @@
 <link rel="import" href="/tracing/ui/tracks/process_summary_track.html">
 <link rel="import" href="/tracing/ui/tracks/spacing_track.html">
 <link rel="import" href="/tracing/ui/tracks/thread_track.html">
-<link rel="import" href="/tracing/core/filter.html">
-<link rel="import" href="/tracing/model/model_settings.html">
-<link rel="import" href="/tracing/ui/base/ui.html">
-<link rel="import" href="/tracing/ui/base/dom_helpers.html">
 
 <script>
 'use strict';
@@ -168,7 +168,6 @@
       var track = new tr.ui.tracks.FrameTrack(this.viewport);
       track.frames = frames;
       this.appendChild(track);
-      this.backgroundProvider = track;
     },
 
     appendObjectInstanceTracks_: function() {
diff --git a/catapult/tracing/tracing/ui/tracks/spacing_track.html b/catapult/tracing/tracing/ui/tracks/spacing_track.html
index 26e9f79..5eea1c9 100644
--- a/catapult/tracing/tracing/ui/tracks/spacing_track.html
+++ b/catapult/tracing/tracing/ui/tracks/spacing_track.html
@@ -15,7 +15,10 @@
 
 tr.exportTo('tr.ui.tracks', function() {
   /**
+   * A track used to provide whitespace between the tracks above and below it.
+   *
    * @constructor
+   * @extends {tr.ui.tracks.Track}
    */
   var SpacingTrack = tr.ui.b.define('spacing-track', tr.ui.tracks.Track);
 
@@ -30,9 +33,6 @@
       this.appendChild(this.heading_);
     },
 
-    draw: function(type, viewLWorld, viewRWorld) {
-    },
-
     addAllEventsMatchingFilterToSelection: function(filter, selection) {
     }
   };
diff --git a/catapult/tracing/tracing/ui/tracks/track.html b/catapult/tracing/tracing/ui/tracks/track.html
index 944288d..09a54c2 100644
--- a/catapult/tracing/tracing/ui/tracks/track.html
+++ b/catapult/tracing/tracing/ui/tracks/track.html
@@ -12,14 +12,9 @@
 <script>
 'use strict';
 
-/**
- * @fileoverview Renders an array of slices into the provided div,
- * using a child canvas element. Uses a FastRectRenderer to draw only
- * the visible slices.
- */
 tr.exportTo('tr.ui.tracks', function() {
   /**
-   * The base class for all tracks.
+   * The base class for all tracks, which render data into a provided div.
    * @constructor
    */
   var Track = tr.ui.b.define('track',
@@ -80,6 +75,11 @@
     updateContents_: function() {
     },
 
+    /**
+     * Wrapper function around draw() that performs transformations on the
+     * context necessary for the track's contents to be drawn in the right place
+     * given the current pan and zoom.
+     */
     drawTrack: function(type) {
       var ctx = this.context();
 
diff --git a/catapult/tracing/tracing/ui/units/array_of_numbers_span.html b/catapult/tracing/tracing/ui/units/array_of_numbers_span.html
deleted file mode 100644
index ea862d6..0000000
--- a/catapult/tracing/tracing/ui/units/array_of_numbers_span.html
+++ /dev/null
@@ -1,75 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/statistics.html">
-<script>
-'use strict';
-tr.exportTo('tr.ui.units', function() {
-  var ArrayOfNumbersSummaryModes = {
-    AVERAGE_MODE: 'average-mode',
-    TOTAL_MODE: 'total-mode'
-  };
-  return {
-    ArrayOfNumbersSummaryModes: ArrayOfNumbersSummaryModes
-  };
-});
-</script>
-<polymer-element name="tr-ui-u-array-of-numbers-span">
-  <template>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.numbers_ = undefined;
-      this.summaryMode_ = tr.ui.units.ArrayOfNumbersSummaryModes.AVERAGE_MODE;
-    },
-
-    get summaryMode() {
-      return this.summaryMode_;
-    },
-
-    set summaryMode(summaryMode) {
-      this.summaryMode_ = summaryMode;
-      this.updateContents_();
-    },
-
-    get numbers() {
-      return this.numbers_;
-    },
-
-    set numbers(numbers) {
-      if (numbers === undefined) {
-        this.numbers_ = undefined;
-        this.updateContents_();
-        return;
-      }
-      if (!(numbers instanceof Array))
-        throw new Error('Must provide an array');
-      this.numbers_ = numbers;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      if (this.numbers_ === undefined) {
-        this.shadowRoot.textContent = '-';
-        return;
-      }
-
-      var ArrayOfNumbersSummaryModes = tr.ui.units.ArrayOfNumbersSummaryModes;
-      var value;
-      if (this.summaryMode_ === ArrayOfNumbersSummaryModes.AVERAGE_MODE)
-        value = tr.b.Statistics.mean(this.numbers_);
-      else
-        value = tr.b.Statistics.sum(this.numbers_);
-
-      var valueRounded = Math.round(value * 1000.0) / 1000.0;
-      this.shadowRoot.textContent = valueRounded;
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/array_of_numbers_span_test.html b/catapult/tracing/tracing/ui/units/array_of_numbers_span_test.html
deleted file mode 100644
index 15320dd..0000000
--- a/catapult/tracing/tracing/ui/units/array_of_numbers_span_test.html
+++ /dev/null
@@ -1,28 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/ui/units/array_of_numbers_span.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('instantiateInAverageMode', function() {
-    var span = document.createElement('tr-ui-u-array-of-numbers-span');
-    span.numbers = [1, 2, 3];
-    span.summaryMode = tr.ui.units.ArrayOfNumbersSummaryModes.AVERAGE_MODE;
-    this.addHTMLOutput(span);
-    assert.equal(span.shadowRoot.textContent, '2');
-  });
-
-  test('instantiateInTotalsMode', function() {
-    var span = document.createElement('tr-ui-u-array-of-numbers-span');
-    span.numbers = [1, 2, 3];
-    span.summaryMode = tr.ui.units.ArrayOfNumbersSummaryModes.TOTALS_MODE;
-    this.addHTMLOutput(span);
-    assert.equal(span.shadowRoot.textContent, '6');
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/generic_table_view.html b/catapult/tracing/tracing/ui/units/generic_table_view.html
deleted file mode 100644
index 85fcef3..0000000
--- a/catapult/tracing/tracing/ui/units/generic_table_view.html
+++ /dev/null
@@ -1,299 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/iteration_helpers.html">
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/ui/base/table.html">
-<link rel="import" href="/tracing/base/units/generic_table.html">
-<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
-<link rel="import" href="/tracing/ui/units/array_of_numbers_span.html">
-
-<polymer-element name="tr-ui-u-generic-table-view">
-  <template>
-    <style>
-    :host {
-    display: flex;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-</polymer-element>
-
-<script>
-'use strict';
-
-tr.exportTo('tr.ui.units', function() {
-  var TEXT_COLUMN_MODE = 1;
-  var NUMERIC_COLUMN_MODE = 2;
-  var ELEMENT_COLUMN_MODE = 3;
-
-  function isNumeric(value) {
-    // TODO(nduca): Also consider other units that are numeric.
-    if ((typeof value) === 'number')
-      return true;
-    else if (value instanceof Number)
-      return true;
-    return false;
-  }
-
-  function GenericTableViewTotalsItem(opt_values) {
-    if (opt_values !== undefined)
-      this.values = opt_values;
-    else
-      this.values = [];
-  }
-
-  function GenericTableViewColumnDescriptor(fieldName, firstFieldValue) {
-    this.title = fieldName;
-    this.fieldName = fieldName;
-
-    this.updateModeGivenValue(firstFieldValue);
-  }
-
-  GenericTableViewColumnDescriptor.prototype = {
-    get columnMode() {
-      return this.columnMode_;
-    },
-
-    get isInNumericMode() {
-      return this.columnMode_ === NUMERIC_COLUMN_MODE;
-    },
-
-    cmp: function(a, b) {
-      if (this.columnMode_ === ELEMENT_COLUMN_MODE)
-        return 0;
-
-      return tr.b.comparePossiblyUndefinedValues(a, b, function(a, b) {
-        var vA = a[this.fieldName];
-        var vB = b[this.fieldName];
-        return tr.b.comparePossiblyUndefinedValues(vA, vB, function(vA, vB) {
-          if (vA.localeCompare)
-            return vA.localeCompare(vB);
-          return vA - vB;
-        }, this);
-      }, this);
-    },
-
-    updateModeGivenValue: function(fieldValue) {
-      if (this.columnMode_ === undefined) {
-        if (fieldValue === undefined || fieldValue === null)
-          return;
-
-        if (isNumeric(fieldValue)) {
-          this.columnMode_ = NUMERIC_COLUMN_MODE;
-          return;
-        }
-
-        if (fieldValue instanceof HTMLElement) {
-          this.columnMode_ = ELEMENT_COLUMN_MODE;
-          return;
-        }
-
-        this.columnMode_ = TEXT_COLUMN_MODE;
-        return;
-      }
-
-      // Undefineds & nulls shouldn't change the mode.
-      if (fieldValue === undefined || fieldValue === null)
-        return;
-
-      // If we were already in numeric mode, then we don't
-      // need to put it into numeric mode again. And, if we were
-      // previously in text mode, then we can't go into numeric mode now.
-      if (isNumeric(fieldValue))
-        return;
-
-      if (fieldValue instanceof HTMLElement) {
-        this.columnMode_ = ELEMENT_COLUMN_MODE;
-        return;
-      }
-
-      if (this.columnMode_ === NUMERIC_COLUMN_MODE)
-        this.columnMode_ = TEXT_COLUMN_MODE;
-    },
-
-    value: function(item) {
-      var fieldValue = item[this.fieldName];
-      if (fieldValue instanceof GenericTableViewTotalsItem) {
-        var span = document.createElement('tr-ui-u-array-of-numbers-span');
-        span.summaryMode = tr.ui.units.ArrayOfNumbersSummaryModes.TOTAL_MODE;
-        span.numbers = fieldValue.values;
-        return span;
-      }
-
-      if (fieldValue === undefined)
-        return '-';
-
-      if (fieldValue instanceof HTMLElement)
-        return fieldValue;
-
-      if (fieldValue instanceof Object) {
-        var gov = document.createElement('tr-ui-a-generic-object-view');
-        gov.object = fieldValue;
-        return gov;
-      }
-
-      // TODO(nduca): Use units objects if applicable.
-      return fieldValue;
-    }
-  };
-
-  Polymer('tr-ui-u-generic-table-view', {
-    created: function() {
-      this.items_ = undefined;
-      this.importantColumNames_ = [];
-    },
-
-    get items() {
-      return this.items_;
-    },
-
-    set items(itemsOrGenericTable) {
-      if (itemsOrGenericTable === undefined) {
-        this.items_ = undefined;
-      } else if (itemsOrGenericTable instanceof Array) {
-        this.items_ = itemsOrGenericTable;
-      } else if (itemsOrGenericTable instanceof tr.b.u.GenericTable) {
-        this.items_ = itemsOrGenericTable.items;
-      }
-      this.updateContents_();
-    },
-
-    get importantColumNames() {
-      return this.importantColumNames_;
-    },
-
-    set importantColumNames(importantColumNames) {
-      this.importantColumNames_ = importantColumNames;
-      this.updateContents_();
-    },
-
-    createColumns_: function() {
-      var columnsByName = {};
-      this.items_.forEach(function(item) {
-        tr.b.iterItems(item, function(itemFieldName, itemFieldValue) {
-          var colDesc = columnsByName[itemFieldName];
-          if (colDesc !== undefined) {
-            colDesc.updateModeGivenValue(itemFieldValue);
-            return;
-          }
-
-          colDesc = new GenericTableViewColumnDescriptor(
-              itemFieldName, itemFieldValue);
-          columnsByName[itemFieldName] = colDesc;
-        }, this);
-      }, this);
-
-      var columns = tr.b.dictionaryValues(columnsByName);
-      if (columns.length === 0)
-        return undefined;
-
-      // Sort by name.
-      var isColumnNameImportant = {};
-      var importantColumNames = this.importantColumNames || [];
-      importantColumNames.forEach(function(icn) {
-        isColumnNameImportant[icn] = true;
-      });
-      columns.sort(function(a, b) {
-        var iA = isColumnNameImportant[a.title] ? 1 : 0;
-        var iB = isColumnNameImportant[b.title] ? 1 : 0;
-        if ((iB - iA) !== 0)
-          return iB - iA;
-        return a.title.localeCompare(b.title);
-      });
-
-      // Set sizes. This is convoluted by the fact that the first
-      // table column must have fixed size.
-      var colWidthPercentage;
-      if (columns.length == 1)
-        colWidthPercentage = '100%';
-      else
-        colWidthPercentage = (100 / (columns.length - 1)).toFixed(3) + '%';
-      columns[0].width = '250px';
-      for (var i = 1; i < columns.length; i++)
-        columns[i].width = colWidthPercentage;
-
-      return columns;
-    },
-
-    createFooterRowsIfNeeded_: function(columns) {
-      // Make totals row if needed.
-      var hasColumnThatIsNumeric = columns.some(function(column) {
-        return column.isInNumericMode;
-      });
-      if (!hasColumnThatIsNumeric)
-        return [];
-
-      var totalsItems = {};
-      columns.forEach(function(column) {
-        if (!column.isInNumericMode)
-          return;
-        var totalsItem = new GenericTableViewTotalsItem();
-        this.items_.forEach(function(item) {
-          var fieldValue = item[column.fieldName];
-          if (fieldValue === undefined || fieldValue === null)
-            return;
-          totalsItem.values.push(fieldValue);
-        });
-        totalsItems[column.fieldName] = totalsItem;
-      }, this);
-
-      return [totalsItems];
-    },
-
-    updateContents_: function() {
-      var columns;
-      if (this.items_ !== undefined)
-        columns = this.createColumns_();
-
-      if (!columns) {
-        this.$.table.tableColumns = [];
-        this.$.table.tableRows = [];
-        this.$.table.footerRows = [];
-        return;
-      }
-
-      this.$.table.tableColumns = columns;
-      this.$.table.tableRows = this.items_;
-      this.$.table.footerRows = this.createFooterRowsIfNeeded_(columns);
-      this.$.table.rebuild();
-    },
-
-    get selectionMode() {
-      return this.$.table.selectionMode;
-    },
-
-    set selectionMode(selectionMode) {
-      this.$.table.selectionMode = selectionMode;
-    },
-
-    get rowHighlightStyle() {
-      return this.$.table.rowHighlightStyle;
-    },
-
-    set rowHighlightStyle(rowHighlightStyle) {
-      this.$.table.rowHighlightStyle = rowHighlightStyle;
-    },
-
-    get cellHighlightStyle() {
-      return this.$.table.cellHighlightStyle;
-    },
-
-    set cellHighlightStyle(cellHighlightStyle) {
-      this.$.table.cellHighlightStyle = cellHighlightStyle;
-    }
-  });
-
-  return {
-    GenericTableViewTotalsItem: GenericTableViewTotalsItem,
-    GenericTableViewColumnDescriptor: GenericTableViewColumnDescriptor
-  };
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/generic_table_view_test.html b/catapult/tracing/tracing/ui/units/generic_table_view_test.html
deleted file mode 100644
index ef9117e..0000000
--- a/catapult/tracing/tracing/ui/units/generic_table_view_test.html
+++ /dev/null
@@ -1,198 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2014 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/units/generic_table_view.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var GenericTableViewColumnDescriptor =
-      tr.ui.units.GenericTableViewColumnDescriptor;
-  var GenericTableViewTotalsItem = tr.ui.units.GenericTableViewTotalsItem;
-
-  test('descBasicNumericMode', function() {
-    var colDesc = new GenericTableViewColumnDescriptor('a');
-    assert.isFalse(colDesc.isInNumericMode);
-
-    colDesc.updateModeGivenValue(4);
-    assert.isTrue(colDesc.isInNumericMode);
-
-    colDesc.updateModeGivenValue(4);
-    assert.isTrue(colDesc.isInNumericMode);
-
-    colDesc.updateModeGivenValue(undefined);
-    colDesc.updateModeGivenValue(null);
-    assert.isTrue(colDesc.isInNumericMode);
-
-    colDesc.updateModeGivenValue('a');
-    assert.isFalse(colDesc.isInNumericMode);
-  });
-
-  test('descBasicNonNumericMode', function() {
-    var colDesc = new GenericTableViewColumnDescriptor('a');
-    assert.isFalse(colDesc.isInNumericMode);
-    colDesc.updateModeGivenValue(4);
-    assert.isTrue(colDesc.isInNumericMode);
-    colDesc.updateModeGivenValue('a');
-    assert.isFalse(colDesc.isInNumericMode);
-  });
-
-  test('descCmpWithNumbers', function() {
-    var colDesc = new GenericTableViewColumnDescriptor('a', 1);
-    assert.equal(colDesc.cmp({a: 1}, {a: 2}), -1);
-    assert.equal(colDesc.cmp({a: 1}, undefined), -1);
-  });
-
-  test('descCmpWithText', function() {
-    var colDesc = new GenericTableViewColumnDescriptor('a', 'text');
-    assert.equal(colDesc.cmp({a: 'a'}, {a: 'b'}), -1);
-    assert.equal(colDesc.cmp({a: 'a'}, undefined), -1);
-  });
-
-  test('descValue', function() {
-    var colDesc = new GenericTableViewColumnDescriptor('a', 1);
-    var value = colDesc.value({a: undefined});
-    assert.equal(value, '-');
-
-    value = colDesc.value({a: 3});
-    assert.equal(value, 3);
-
-    var totalsValue = colDesc.value(
-        {a: new GenericTableViewTotalsItem([1, 2, 3])});
-    assert.equal(totalsValue.tagName.toLowerCase(),
-                  'tr-ui-u-array-of-numbers-span');
-    assert.deepEqual(totalsValue.numbers, [1, 2, 3]);
-  });
-
-  test('everythingTogether', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-        a: 'someString',
-        b: 2,
-        c: 'adsf'
-      },
-      {
-        a: 'someOtherString',
-        b: 2,
-        c: 'adsf'
-      }
-    ];
-    this.addHTMLOutput(table);
-  });
-
-  test('summableColumn', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-        a: 1
-      },
-      {
-        a: 2
-      },
-      {
-        a: 3
-      }
-    ];
-    this.addHTMLOutput(table);
-
-    assert.equal(table.$.table.tableColumns.length, 1);
-    assert.equal(table.$.table.tableRows.length, 3);
-    assert.isTrue(table.$.table.tableColumns[0].isInNumericMode);
-    assert.equal(table.$.table.tableColumns[0].fieldName, 'a');
-    var totalsItem = table.$.table.footerRows[0].a;
-    assert.deepEqual(totalsItem.values, [1, 2, 3]);
-  });
-
-
-  test('usingGenericTable', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = new tr.b.u.GenericTable([
-      {
-        a: 1
-      }
-    ]);
-    assert.equal(table.items.length, 1);
-  });
-
-  test('valueIsObject', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = new tr.b.u.GenericTable([
-      {
-        a: {x: 1, y: 'string'}
-      },
-      {
-        a: 'something'
-      }
-    ]);
-    this.addHTMLOutput(table);
-    assert.equal(table.items.length, 2);
-  });
-
-  test('mixedTypeTable', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-        a: 1
-      },
-      {
-        a: 2
-      },
-      {
-        b: 'c'
-      }
-    ];
-    this.addHTMLOutput(table);
-  });
-
-  test('tableWithElement', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-        a: 1
-      },
-      {
-        a: tr.ui.b.createSpan({textContent: 'ohai'})
-      },
-      {
-        b: 'c'
-      }
-    ];
-    this.addHTMLOutput(table);
-  });
-
-
-  test('emptyTable', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [{}];
-    assert.equal(table.$.table.tableColumns.length, 0);
-  });
-
-  test('undefinedAndValue', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-      },
-      {
-        a: 2
-      }
-    ];
-    this.addHTMLOutput(table);
-  });
-
-  test('undefinedOnly', function() {
-    var table = document.createElement('tr-ui-u-generic-table-view');
-    table.items = [
-      {
-        a: undefined
-      }
-    ];
-    this.addHTMLOutput(table);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/histogram_span.html b/catapult/tracing/tracing/ui/units/histogram_span.html
deleted file mode 100644
index 93ece67..0000000
--- a/catapult/tracing/tracing/ui/units/histogram_span.html
+++ /dev/null
@@ -1,190 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/statistics.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/base/units/histogram.html">
-<link rel="import" href="/tracing/ui/base/bar_chart.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-
-<polymer-element name="tr-ui-u-histogram-span">
-  <template>
-    <style>
-      :host {
-        display: flex;
-        flex-direction: column;
-      }
-
-      #stats {
-        display: flex;
-        flex-direction: row;
-        flex: 0 0 auto;
-        font-weight: bold;
-      }
-
-      #nnans {
-        color: red;
-      }
-      #table {
-        flex: 1 1 auto;
-      }
-    </style>
-    <div id="stats">
-      <span id="nsamples"></span>&nbsp;samples,&nbsp;
-      <span id="hadnans"><span id="nnans"></span> non-numeric samples,&nbsp;
-      </span>
-      average=<tr-ui-u-scalar-span id="average"></tr-ui-u-scalar-span>
-    </div>
-    <div id="container"></div>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    created: function() {
-      this.histogram_ = undefined;
-      this.chart_ = new tr.ui.b.BarChart();
-      this.chart_.width = 400;
-      this.chart_.height = 200;
-      this.mouseDownBin_ = undefined;
-      this.brushedBins_ = [];
-      this.chart_.addEventListener('item-mousedown',
-          this.onMouseDown_.bind(this));
-      this.chart_.addEventListener('item-mousemove',
-          this.onMouseMove_.bind(this));
-      this.chart_.addEventListener('item-mouseup',
-          this.onMouseUp_.bind(this));
-    },
-
-    ready: function() {
-      this.$.container.appendChild(this.chart_);
-    },
-
-    get brushedBins() {
-      return this.brushedBins_;
-    },
-
-    updateBrushedRange_: function(currentX) {
-      this.brushedBins_ = [this.histogram_.getBinForValue(currentX)];
-      var r = new tr.b.Range();
-      r.addValue(this.mouseDownX_);
-      r.addValue(currentX);
-
-      // Collect bins:
-      var centralMin = Number.MAX_VALUE;
-      var centralMax = -Number.MAX_VALUE;
-      this.histogram_.centralBins.forEach(function(bin) {
-        centralMin = Math.min(centralMin, bin.range.min);
-        centralMax = Math.max(centralMax, bin.range.max);
-        if ((bin.range.max > r.min) &&
-            (bin.range.min < r.max) &&
-            (this.brushedBins_.indexOf(bin) < 0))
-          this.brushedBins_.push(bin);
-      }, this);
-      if ((this.histogram_.underflowBin.max > r.min) &&
-          (this.brushedBins_.indexOf(this.histogram_.underflowBin) < 0)) {
-        this.brushedBins_.push(this.histogram_.underflowBin);
-      }
-      if ((this.histogram_.overflowBin.min < r.max) &&
-          (this.brushedBins_.indexOf(this.histogram_.overflowBin) < 0)) {
-        this.brushedBins_.push(this.histogram_.overflowBin);
-      }
-      this.brushedBins_.sort(function(a, b) {
-        return a.range.min - b.range.min;
-      });
-
-      // Prevent Infinity:
-      var minBin = this.histogram_.getBinForValue(r.min);
-      var maxBin = this.histogram_.getBinForValue(r.max);
-      var binWidth = this.histogram_.centralBins[0].range.range;
-      r.min = minBin ? Math.max(centralMin - binWidth, minBin.range.min) :
-        centralMin - binWidth;
-      r.max = maxBin ? Math.min(centralMax + binWidth, maxBin.range.max) :
-        centralMax + binWidth;
-
-      this.chart_.brushedRange = r;
-
-      this.dispatchEvent(new tr.b.Event('brushed-bins-changed'));
-    },
-
-    onMouseDown_: function(chartEvent) {
-      chartEvent.stopPropagation();
-      if (!this.histogram_)
-        return;
-      this.mouseDownX_ = chartEvent.x;
-      this.updateBrushedRange_(chartEvent.x);
-    },
-
-    onMouseMove_: function(chartEvent) {
-      chartEvent.stopPropagation();
-      if (!this.histogram_)
-        return;
-      this.updateBrushedRange_(chartEvent.x);
-    },
-
-    onMouseUp_: function(chartEvent) {
-      chartEvent.stopPropagation();
-      if (!this.histogram_)
-        return;
-      this.updateBrushedRange_(chartEvent.x);
-      this.mouseDownX_ = undefined;
-    },
-
-    get histogram() {
-      return this.histogram_;
-    },
-
-    set histogram(histogram) {
-      this.histogram_ = histogram;
-      this.updateContents_();
-    },
-
-    updateContents_: function() {
-      this.$.container.style.display = this.histogram_ ? '' : 'none';
-      if (!this.histogram_) {
-        this.$.nsamples.textContent = 0;
-        this.$.average.setValueAndUnit(undefined, undefined);
-        return;
-      }
-
-      this.$.nsamples.textContent = this.histogram_.numValues;
-      this.$.average.setValueAndUnit(this.histogram_.average,
-                                     this.histogram_.unit);
-      if (this.histogram_.numNans > 0) {
-        this.$.hadnans.style.display = '';
-        this.$.nnans.textContent = this.histogram_.numNans;
-      } else {
-        this.$.hadnans.style.display = 'none';
-      }
-
-      var maximumBinValue = tr.b.Statistics.max(this.histogram_.allBins,
-                                                function(bin) {
-                                                  return bin.count;
-                                                });
-      var chartData = [];
-      var binWidth = this.histogram_.centralBins[0].range.range;
-      this.histogram_.allBins.forEach(function(bin) {
-        var x = bin.range.min;
-        if (x === -Number.MAX_VALUE) {
-          if (!bin.count)
-            return;
-          x = bin.range.max - binWidth;
-        }
-        chartData.push({x: x,
-                        y: bin.count});
-      });
-      chartData.sort(function(x, y) {
-        return x.x - y.x;
-      });
-      this.$.container.style.display = chartData.length ? '' : 'none';
-      this.chart_.data = chartData;
-      this.brushedBins_ = [];
-      this.chart_.brushedRange = new tr.b.Range();
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/histogram_span_test.html b/catapult/tracing/tracing/ui/units/histogram_span_test.html
deleted file mode 100644
index bbc014e..0000000
--- a/catapult/tracing/tracing/ui/units/histogram_span_test.html
+++ /dev/null
@@ -1,65 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/ui/units/histogram_span.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('basic', function() {
-    var h = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    h.add(-1, 'a');
-    h.add(0, 'b');
-    h.add(0, 'b');
-    h.add(0, 'b');
-    h.add(0, 'b');
-    h.add(0, 'b');
-    h.add(0, 'b');
-    h.add(0, 'c');
-    h.add(500, 'c');
-    h.add(999, 'd');
-    h.add(1000, 'd');
-
-    var span = document.createElement('tr-ui-u-histogram-span');
-    span.histogram = h;
-    this.addHTMLOutput(span);
-  });
-
-  test('undefined', function() {
-    var span = document.createElement('tr-ui-u-histogram-span');
-    span.histogram = undefined;
-    this.addHTMLOutput(span);
-  });
-
-  test('emptyHistogram', function() {
-    var h = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-
-    var span = document.createElement('tr-ui-u-histogram-span');
-    span.histogram = h;
-    this.addHTMLOutput(span);
-  });
-
-  test('nans', function() {
-    var h = new tr.b.u.Histogram.createLinear(
-        tr.b.u.Units.timeDurationInMs,
-        tr.b.Range.fromExplicitRange(0, 1000),
-        10);
-    h.add(undefined, 'b');
-    h.add(NaN, 'c');
-
-    var span = document.createElement('tr-ui-u-histogram-span');
-    span.histogram = h;
-    this.addHTMLOutput(span);
-  });
-
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/preferred_display_unit.html b/catapult/tracing/tracing/ui/units/preferred_display_unit.html
deleted file mode 100644
index 9a46d35..0000000
--- a/catapult/tracing/tracing/ui/units/preferred_display_unit.html
+++ /dev/null
@@ -1,39 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-
-<polymer-element name="tr-ui-u-preferred-display-unit">
-  <script>
-  'use strict';
-  Polymer({
-    ready: function() {
-      this.preferredTimeDisplayMode_ = undefined;
-    },
-
-    attached: function() {
-      tr.b.u.Units.didPreferredTimeDisplayUnitChange();
-    },
-
-    detached: function() {
-      tr.b.u.Units.didPreferredTimeDisplayUnitChange();
-    },
-
-    // null means no-preference
-    get preferredTimeDisplayMode() {
-      return this.preferredTimeDisplayMode_;
-    },
-
-    set preferredTimeDisplayMode(v) {
-      if (this.preferredTimeDisplayMode_ === v)
-        return;
-      this.preferredTimeDisplayMode_ = v;
-      tr.b.u.Units.didPreferredTimeDisplayUnitChange();
-    }
-
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/preferred_display_unit_test.html b/catapult/tracing/tracing/ui/units/preferred_display_unit_test.html
deleted file mode 100644
index 163fc5e..0000000
--- a/catapult/tracing/tracing/ui/units/preferred_display_unit_test.html
+++ /dev/null
@@ -1,22 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-<link rel="import" href="/tracing/ui/units/preferred_display_unit.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  test('instantiate', function() {
-    var unit = document.createElement('tr-ui-u-preferred-display-unit');
-    var ms = tr.b.u.TimeDisplayModes.ms;
-    unit.preferredDisplayUnit = ms;
-    assert.equal(unit.preferredDisplayUnit, ms);
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/scalar_span.html b/catapult/tracing/tracing/ui/units/scalar_span.html
deleted file mode 100644
index f321209..0000000
--- a/catapult/tracing/tracing/ui/units/scalar_span.html
+++ /dev/null
@@ -1,204 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/ui/base/deep_utils.html">
-<link rel="import" href="/tracing/ui/base/polymer_utils.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/base/units/scalar.html">
-
-<script>
-'use strict';
-tr.exportTo('tr.ui.units', function() {
-  function createScalarSpan(value, opt_config) {
-    if (value === undefined)
-      return '';
-    var config = opt_config || {};
-    var ownerDocument = config.ownerDocument || document;
-    var span = ownerDocument.createElement('tr-ui-u-scalar-span');
-    span.value = value;
-    return span;
-  }
-
-  tr.b.u.Units.addEventListener('display-mode-changed', function(e) {
-    var subclassNames = tr.ui.b.getPolymerElementsThatSubclass(
-        'tr-ui-u-scalar-span');
-    var isSubclass = {};
-    subclassNames.forEach(function(n) {
-      isSubclass[n.toUpperCase()] = true;
-    });
-
-    var m = tr.b.findDeepElementsMatchingPredicate(
-        document.body,
-        function(el) {
-         return isSubclass[el.tagName];
-        });
-    m.forEach(function(el) {
-      el.updateContent_();
-    });
-  });
-
-  return {
-    createScalarSpan: createScalarSpan
-  };
-});
-</script>
-
-<polymer-element name="tr-ui-u-scalar-span">
-  <template>
-    <style>
-    :host {
-      display: block;
-      position: relative;
-    }
-    #content.right-align {
-      text-align: right;
-      position: relative;
-      display: block;
-    }
-    #sparkline {
-      width: 0%;
-      position: absolute;
-      bottom: 0;
-      right: 0;
-      display: none;
-      height: 100%;
-      background-color: hsla(216, 100%, 94.5%, .75);
-      border-left: 1px solid hsl(216, 100%, 89%);
-      box-sizing: border-box;
-    }
-    #warning {
-      margin-left: 4px;
-      font-size: 66%;
-    }
-    </style>
-    <span id="sparkline"></span>
-    <span id="content"></span>
-    <span id="warning" style="display:none">&#9888;</span>
-  </template>
-  <script>
-  'use strict';
-
-  Polymer({
-    ready: function() {
-      this.value_ = undefined;
-      this.unit_ = undefined;
-
-      this.warning_ = undefined;
-      this.percentage_ = undefined;
-      this.isDelta_ = false;
-    },
-
-    set contentTextDecoration(deco) {
-      this.$.content.style.textDecoration = deco;
-    },
-
-    get value() {
-      return this.value_;
-    },
-
-    set value(value) {
-      if (value instanceof tr.b.u.Scalar) {
-        this.value_ = value.value;
-        this.unit_ = value.unit;
-      } else {
-        this.value_ = value;
-      }
-      this.updateContent_();
-    },
-
-    get unit() {
-      return this.unit_;
-    },
-
-    set unit(unit) {
-      this.unit_ = unit;
-      this.updateContent_();
-    },
-
-    setValueAndUnit: function(value, unit) {
-      this.value_ = value;
-      this.unit_ = unit;
-      this.updateContent_();
-    },
-
-    get percentage() {
-      return this.percentage_;
-    },
-
-    set percentage(percentage) {
-      this.percentage_ = percentage;
-      this.updateSparkline_();
-    },
-
-    get rightAlign() {
-      return this.$.content.classList.contains('right-align');
-    },
-
-    set rightAlign(rightAlign) {
-      if (rightAlign)
-        this.$.content.classList.add('right-align');
-      else
-        this.$.content.classList.remove('right-align');
-    },
-
-    get isDelta() {
-      return this.isDelta_;
-    },
-
-    set isDelta(isDelta) {
-      this.isDelta_ = isDelta;
-      this.updateContent_();
-    },
-
-    updateSparkline_: function() {
-      if (this.percentage_ === undefined) {
-        this.$.sparkline.style.display = 'none';
-        this.$.sparkline.style.width = '0';
-      } else {
-        this.$.sparkline.style.display = 'block';
-        this.$.sparkline.style.width = (this.percentage_ * 100) + '%';
-      }
-    },
-
-    updateContent_: function() {
-      if (this.unit_ === undefined) {
-        this.$.content.textContent = '';
-        return;
-      }
-      var content = this.unit_.format(this.value);
-      if (this.isDelta_) {
-        if (this.value > 0) {
-          // Positive delta.
-          content = '+' + content;
-        } else if (this.value === 0) {
-          // Zero delta.
-          var PLUS_MINUS_SIGN = String.fromCharCode(177);
-          content = PLUS_MINUS_SIGN + content;
-        }
-        // No need for this.value < 0 case (negative sign is alwas shown).
-      }
-      this.$.content.textContent = content;
-    },
-
-    get warning() {
-      return this.warning_;
-    },
-
-    set warning(warning) {
-      this.warning_ = warning;
-      var warningEl = this.$.warning;
-      if (this.warning_) {
-        warningEl.title = warning;
-        warningEl.style.display = '';
-      } else {
-        warningEl.title = '';
-        warningEl.style.display = 'none';
-      }
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/scalar_span_test.html b/catapult/tracing/tracing/ui/units/scalar_span_test.html
deleted file mode 100644
index be6949a..0000000
--- a/catapult/tracing/tracing/ui/units/scalar_span_test.html
+++ /dev/null
@@ -1,48 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/scalar.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  function createScalarSpan(value) {
-    var scalar = new tr.b.u.Scalar(value, tr.b.u.Units.sizeInBytes);
-    return tr.ui.units.createScalarSpan(scalar);
-  }
-
-  test('instantiate', function() {
-    var checkSpan = (function(value, expectedTextContent) {
-      var span = createScalarSpan(value);
-      assert.strictEqual(span.$.content.textContent, expectedTextContent);
-      assert.isFalse(span.isDelta);
-      this.addHTMLOutput(span);
-    }).bind(this);
-
-    checkSpan(1023, '1023.0 B');
-    checkSpan(0, '0.0 B');
-    checkSpan(-1024, '-1.0 KiB');
-  });
-
-  test('instantiate_isDelta', function() {
-    var checkSpan = (function(value, expectedTextContent) {
-      var span = createScalarSpan(value);
-      span.isDelta = true;
-      assert.strictEqual(span.$.content.textContent, expectedTextContent);
-      assert.isTrue(span.isDelta);
-      this.addHTMLOutput(span);
-    }).bind(this);
-
-    checkSpan(1023, '+1023.0 B');
-    checkSpan(0, '\u00B10.0 B');
-    checkSpan(-1024, '-1.0 KiB');
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/time_duration_span.html b/catapult/tracing/tracing/ui/units/time_duration_span.html
deleted file mode 100644
index aedaf2b..0000000
--- a/catapult/tracing/tracing/ui/units/time_duration_span.html
+++ /dev/null
@@ -1,62 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-<link rel="import" href="/tracing/base/units/time_duration.html">
-<link rel="import" href="/tracing/ui/base/deep_utils.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-
-<script>
-'use strict';
-
-// TODO(petrcermak): Similarly to base/units/time_duration.html and
-// base/units/time_stamp.html, the classes defined in this file are almost
-// identical to the ones defined in time_stamp_span. Consider sharing more
-// code between the two files.
-tr.exportTo('tr.ui.units', function() {
-  function createTimeDurationSpan(duration, opt_config) {
-    if (duration === undefined)
-      return '';
-    var config = opt_config || {};
-    var ownerDocument = config.ownerDocument || document;
-    var span = ownerDocument.createElement('tr-ui-u-time-duration-span');
-    span.setValueAndUnit(duration, tr.b.u.Units.timeDurationInMs);
-    if (config.total)
-      span.percentage = duration / config.total;
-    span.duration = duration;
-    // TODO(petrcermak): Get rid of this boolean once we've cleaned up units.
-    if (config.rightAlign)
-      span.rightAlign = true;
-    return span;
-  }
-
-  return {
-    createTimeDurationSpan: createTimeDurationSpan
-  };
-});
-</script>
-
-<polymer-element name="tr-ui-u-time-duration-span"
-                 extends="tr-ui-u-scalar-span">
-  <script>
-  'use strict';
-
-  Polymer({
-    get duration() {
-      return this.value;
-    },
-
-    set duration(duration) {
-      if (duration instanceof tr.b.u.TimeDuration) {
-        this.value = duration;
-        return;
-      }
-      this.setValueAndUnit(duration, tr.b.u.Units.timeDurationInMs);
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/time_duration_span_test.html b/catapult/tracing/tracing/ui/units/time_duration_span_test.html
deleted file mode 100644
index 6363d16..0000000
--- a/catapult/tracing/tracing/ui/units/time_duration_span_test.html
+++ /dev/null
@@ -1,124 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-<link rel="import" href="/tracing/ui/units/time_duration_span.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var THIS_DOC = document._currentScript.ownerDocument;
-
-  test('createTimeDurationSpan', function() {
-    // Default owner document.
-    var timeStamp = tr.ui.units.createTimeDurationSpan(3.14);
-    assert.strictEqual(timeStamp.tagName, 'TR-UI-U-TIME-DURATION-SPAN');
-    assert.strictEqual(timeStamp.duration, 3.14);
-    assert.strictEqual(timeStamp.ownerDocument, document);
-
-    // Custom owner document.
-    var config = {ownerDocument: THIS_DOC};
-    var timeStamp = tr.ui.units.createTimeDurationSpan(-273.15, config);
-    assert.strictEqual(timeStamp.tagName, 'TR-UI-U-TIME-DURATION-SPAN');
-    assert.strictEqual(timeStamp.duration, -273.15);
-    assert.strictEqual(timeStamp.ownerDocument, THIS_DOC);
-  });
-
-  test('instantiate', function() {
-    var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-    timeSpan.duration = 73;
-    this.addHTMLOutput(timeSpan);
-  });
-
-  test('instantiateWithObject', function() {
-    var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-    timeSpan.duration = new tr.b.u.TimeDuration(73);
-    this.addHTMLOutput(timeSpan);
-    assert.equal(timeSpan.duration, 73);
-  });
-
-  test('instantiateWithWarning', function() {
-    var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-    timeSpan.duration = 400;
-    timeSpan.warning = 'there is a problem with this time';
-    this.addHTMLOutput(timeSpan);
-  });
-
-  test('instantiateWithPercentage', function() {
-    var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-    timeSpan.percentage = 0.5;
-    timeSpan.duration = 40;
-    this.addHTMLOutput(timeSpan);
-  });
-
-  test('warningAndNonWarningHaveSimilarHeights', function() {
-    var spanA = document.createElement('tr-ui-u-time-duration-span');
-    spanA.duration = 400;
-
-    var spanB = document.createElement('tr-ui-u-time-duration-span');
-    spanB.duration = 400;
-    spanB.warning = 'there is a problem with this time';
-
-    var overall = document.createElement('div');
-    overall.style.display = 'flex';
-    overall.appendChild(spanA);
-    spanB.style.marginLeft = '4px';
-    overall.appendChild(spanB);
-    this.addHTMLOutput(overall);
-  });
-
-
-  test('respectCurrentDisplayUnit', function() {
-    try {
-      tr.b.u.Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ns;
-
-      var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-      timeSpan.duration = 73;
-      this.addHTMLOutput(timeSpan);
-
-      assert.isTrue(timeSpan.$.content.textContent.indexOf('ns') > 0);
-      tr.b.u.Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ms;
-      assert.isTrue(timeSpan.$.content.textContent.indexOf('ms') > 0);
-    } finally {
-      tr.b.u.Units.reset();
-    }
-  });
-
-
-  test('displaySparkline', function() {
-    var div = document.createElement('div');
-    div.style.width = '100px';
-
-    var timeSpan = document.createElement('tr-ui-u-time-duration-span');
-    timeSpan.duration = 40;
-    div.appendChild(timeSpan);
-
-    this.addHTMLOutput(div);
-
-    function checkSparkline(expectedDisplay, expectedWidth) {
-      var computedStyle = getComputedStyle(timeSpan.$.sparkline);
-      assert.equal(computedStyle.display, expectedDisplay);
-      assert.equal(parseInt(computedStyle.width), expectedWidth);
-    }
-
-    // No percentage set.
-    checkSparkline('none', 0);
-
-    // Zero percentage set.
-    timeSpan.percentage = 0;
-    checkSparkline('block', 1);
-
-    // Undefined percentage set.
-    timeSpan.percentage = undefined;
-    checkSparkline('none', 0);
-
-    // Undefined percentage set.
-    timeSpan.percentage = 0.5;
-    checkSparkline('block', 50);
-  });
-
-});
-</script>
diff --git a/catapult/tracing/tracing/ui/units/time_stamp_span.html b/catapult/tracing/tracing/ui/units/time_stamp_span.html
deleted file mode 100644
index 09f9950..0000000
--- a/catapult/tracing/tracing/ui/units/time_stamp_span.html
+++ /dev/null
@@ -1,52 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-
-<link rel="import" href="/tracing/base/units/time_display_mode.html">
-<link rel="import" href="/tracing/base/units/time_stamp.html">
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/ui/base/deep_utils.html">
-<link rel="import" href="/tracing/ui/units/scalar_span.html">
-
-<script>
-'use strict';
-tr.exportTo('tr.ui.units', function() {
-  function createTimeStampSpan(timestamp, opt_config) {
-    if (timestamp === undefined)
-      return '';
-    var config = opt_config || {};
-    var ownerDocument = config.ownerDocument || document;
-    var span = ownerDocument.createElement('tr-ui-u-time-stamp-span');
-    span.timestamp = timestamp;
-    return span;
-  }
-
-  return {
-    createTimeStampSpan: createTimeStampSpan
-  };
-});
-</script>
-
-<polymer-element name="tr-ui-u-time-stamp-span"
-                 extends="tr-ui-u-scalar-span">
-  <script>
-  'use strict';
-
-  Polymer({
-    get timestamp() {
-      return this.value;
-    },
-
-    set timestamp(timestamp) {
-      if (timestamp instanceof tr.b.u.TimeStamp) {
-        this.value = timestamp;
-        return;
-      }
-      this.setValueAndUnit(timestamp, tr.b.u.Units.timeStampInMs);
-    }
-  });
-  </script>
-</polymer-element>
diff --git a/catapult/tracing/tracing/ui/units/time_stamp_span_test.html b/catapult/tracing/tracing/ui/units/time_stamp_span_test.html
deleted file mode 100644
index 850bee6..0000000
--- a/catapult/tracing/tracing/ui/units/time_stamp_span_test.html
+++ /dev/null
@@ -1,59 +0,0 @@
-<!DOCTYPE html>
-<!--
-Copyright (c) 2015 The Chromium Authors. All rights reserved.
-Use of this source code is governed by a BSD-style license that can be
-found in the LICENSE file.
--->
-<link rel="import" href="/tracing/base/units/units.html">
-<link rel="import" href="/tracing/ui/units/time_stamp_span.html">
-<script>
-'use strict';
-
-tr.b.unittest.testSuite(function() {
-  var THIS_DOC = document._currentScript.ownerDocument;
-
-  test('createTimeStampSpan', function() {
-    // Default owner document.
-    var timeStamp = tr.ui.units.createTimeStampSpan(3.14);
-    assert.strictEqual(timeStamp.tagName, 'TR-UI-U-TIME-STAMP-SPAN');
-    assert.strictEqual(timeStamp.timestamp, 3.14);
-    assert.strictEqual(timeStamp.ownerDocument, document);
-
-    // Custom owner document.
-    var config = {ownerDocument: THIS_DOC};
-    var timeStamp = tr.ui.units.createTimeStampSpan(-273.15, config);
-    assert.strictEqual(timeStamp.tagName, 'TR-UI-U-TIME-STAMP-SPAN');
-    assert.strictEqual(timeStamp.timestamp, -273.15);
-    assert.strictEqual(timeStamp.ownerDocument, THIS_DOC);
-  });
-
-  test('instantiate', function() {
-    var timeStamp = document.createElement('tr-ui-u-time-stamp-span');
-    timeStamp.timestamp = 73;
-    this.addHTMLOutput(timeStamp);
-  });
-
-  test('instantiateWithObject', function() {
-    var timeStamp = document.createElement('tr-ui-u-time-stamp-span');
-    timeStamp.timestamp = new tr.b.u.TimeStamp(73);
-    this.addHTMLOutput(timeStamp);
-    assert.equal(timeStamp.timestamp, 73);
-  });
-
-  test('respectCurrentDisplayUnit', function() {
-    try {
-      tr.b.u.Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ns;
-
-      var timeStamp = document.createElement('tr-ui-u-time-stamp-span');
-      timeStamp.timestamp = 73;
-      this.addHTMLOutput(timeStamp);
-
-      assert.isTrue(timeStamp.shadowRoot.textContent.indexOf('ns') > 0);
-      tr.b.u.Units.currentTimeDisplayMode = tr.b.u.TimeDisplayModes.ms;
-      assert.isTrue(timeStamp.shadowRoot.textContent.indexOf('ms') > 0);
-    } finally {
-      tr.b.u.Units.reset();
-    }
-  });
-});
-</script>
diff --git a/catapult/tracing/tracing/value/__init__.py b/catapult/tracing/tracing/value/__init__.py
new file mode 100644
index 0000000..c134637
--- /dev/null
+++ b/catapult/tracing/tracing/value/__init__.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Simplified version of telemetry Value system, just enough for us to get
+# us up and running.
+
+
+class Value(object):
+
+  def __init__(self, canonical_url, name, units, description=None,
+               important=False, ir_stable_id=None):
+    self.canonical_url = canonical_url
+    self.name = name
+    self.units = units
+    self.description = description
+    self.important = important
+    self.ir_stable_id = ir_stable_id
+
+  def AsDict(self):
+    d = {
+        'canonical_url': self.canonical_url,
+        'name': self.name,
+        'important': self.important
+    }
+    # Only dump values if they're non-None, because Python json-ification turns
+    # these to null, instead of leaving them out.
+    if self.units is not None:
+      d['units'] = self.units
+
+    if self.description is not None:
+      d['description'] = self.description
+
+    if self.ir_stable_id is not None:
+      d['ir_stable_id'] = self.ir_stable_id
+
+    self._AsDictInto(d)
+    assert 'type' in d
+
+    return d
+
+  def _AsDictInto(self, d):
+    raise NotImplementedError()
+
+  @classmethod
+  def FromDict(cls, d):
+    if d['type'] == 'dict':
+      return DictValue.FromDict(d)
+    elif d['type'] == 'scalar':
+      return ScalarValue.FromDict(d)
+    elif d['type'] == 'failure':
+      return FailureValue.FromDict(d)
+    elif d['type'] == 'skip':
+      return SkipValue.FromDict(d)
+    else:
+      raise NotImplementedError()
+
+
+# TODO(eakuefner): Change to NumericValue after porting Unit
+# (https://github.com/catapult-project/catapult/issues/2049)
+class ScalarValue(Value):
+
+  def __init__(self, canonical_url, name, value, description=None,
+               important=False, ir_stable_id=None):
+    assert isinstance(value, dict)
+    super(ScalarValue, self).__init__(canonical_url, name, units=None,
+                                      description=description,
+                                      important=important,
+                                      ir_stable_id=ir_stable_id)
+    self._value = value
+
+  def __repr__(self):
+    return '%s("%s", "%s")' % (self.__class__.__name__,
+                               self.name, self.value)
+
+  def _AsDictInto(self, d):
+    d['type'] = 'scalar'
+    d['value'] = self._value
+
+  @classmethod
+  def FromDict(cls, d):
+    assert d.get('units', None) == None
+    return cls(d['canonical_url'], name=d['name'],
+               description=d.get('description', None),
+               value=d['value'],
+               important=d['important'],
+               ir_stable_id=d.get('ir_stable_id', None))
+
+  @property
+  def value(self):
+    return self._value
+
+  def __getitem__(self, key):
+    return self._value[key]
+
+
+class DictValue(Value):
+
+  def __init__(self, canonical_url, name, value, description=None,
+               important=False, ir_stable_id=None):
+    assert isinstance(value, dict)
+    super(DictValue, self).__init__(canonical_url, name, units=None,
+                                    description=description,
+                                    important=important,
+                                    ir_stable_id=ir_stable_id)
+    self._value = value
+
+  def __repr__(self):
+    return '%s("%s", "%s")' % (self.__class__.__name__,
+                               self.name, self.value)
+
+  def _AsDictInto(self, d):
+    d['type'] = 'dict'
+    d['value'] = self._value
+
+  @classmethod
+  def FromDict(cls, d):
+    assert d.get('units', None) == None
+    return cls(d['canonical_url'], name=d['name'],
+               description=d.get('description', None),
+               value=d['value'],
+               important=d['important'],
+               ir_stable_id=d.get('ir_stable_id', None))
+
+  @property
+  def value(self):
+    return self._value
+
+  def __getitem__(self, key):
+    return self._value[key]
+
+class FailureValue(Value):
+
+  def __init__(self, canonical_url, failure_type_name, description, stack,
+               important=False, ir_stable_id=None):
+    super(FailureValue, self).__init__(canonical_url,
+                                       name=failure_type_name,
+                                       units=None,
+                                       description=description,
+                                       important=important,
+                                       ir_stable_id=ir_stable_id)
+    assert isinstance(stack, basestring)
+    self.stack = stack
+
+  def __repr__(self):
+    return '%s("%s", "%s")' % (self.__class__.__name__,
+                               self.name, self.description)
+
+  def _AsDictInto(self, d):
+    d['type'] = 'failure'
+    d['stack_str'] = self.stack
+
+  @classmethod
+  def FromDict(cls, d):
+    assert d.get('units', None) == None
+    return cls(d['canonical_url'],
+               failure_type_name=d['name'],
+               description=d.get('description', None),
+               stack=d['stack_str'],
+               important=d.get('important', False),
+               ir_stable_id=d.get('ir_stable_id', None))
+
+  def GetGTestPrintString(self):
+    return self.stack
+
+
+class SkipValue(Value):
+
+  def __init__(self, canonical_url, skipped_result_name,
+               description=None, important=False, ir_stable_id=None):
+    super(SkipValue, self).__init__(canonical_url,
+                                    name=skipped_result_name,
+                                    units=None,
+                                    description=description,
+                                    important=important,
+                                    ir_stable_id=ir_stable_id)
+
+  def __repr__(self):
+    return '%s("%s", "%s")' % (self.__class__.__name__,
+                               self.name, self.description)
+
+  def _AsDictInto(self, d):
+    d['type'] = 'skip'
+
+  @classmethod
+  def FromDict(cls, d):
+    assert d.get('units', None) == None
+    return cls(d['canonical_url'],
+               skipped_result_name=d['name'],
+               description=d.get('description', None),
+               important=d.get('important', False),
+               ir_stable_id=d.get('ir_stable_id', None))
diff --git a/catapult/tracing/tracing/value/generic_table.html b/catapult/tracing/tracing/value/generic_table.html
new file mode 100644
index 0000000..220510c
--- /dev/null
+++ b/catapult/tracing/tracing/value/generic_table.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/base.html">
+<script>
+'use strict';
+
+tr.exportTo('tr.v', function() {
+  /**
+   * Tabular data wrapper. Simply wraps an array of items.
+   */
+  function GenericTable(items) {
+    if (items !== undefined)
+      this.items = items;
+    else
+      this.items = [];
+  };
+
+  GenericTable.prototype = {
+  };
+
+  return {
+    GenericTable: GenericTable
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/numeric.html b/catapult/tracing/tracing/value/numeric.html
new file mode 100644
index 0000000..68dac33
--- /dev/null
+++ b/catapult/tracing/tracing/value/numeric.html
@@ -0,0 +1,334 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/range.html">
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.v', function() {
+  var Range = tr.b.Range;
+
+  var MAX_SOURCE_INFOS = 16;
+
+  function NumericBase(unit) {
+    if (!(unit instanceof tr.v.Unit))
+      throw new Error('Expected provided unit to be instance of Unit');
+
+    this.unit = unit;
+  }
+
+  NumericBase.prototype = {
+    asDict: function() {
+      var d = {
+        unit: this.unit.asJSON()
+      };
+
+      this.asDictInto_(d);
+      return d;
+    }
+  };
+
+  NumericBase.fromDict = function(d) {
+    if (d.type === 'scalar')
+      return ScalarNumeric.fromDict(d);
+
+    throw new Error('Not implemented');
+  };
+
+  function NumericBin(parentNumeric, opt_range) {
+    this.parentNumeric = parentNumeric;
+    this.range = opt_range || (new tr.b.Range());
+    this.count = 0;
+    this.sourceInfos = [];
+  }
+
+  NumericBin.fromDict = function(parentNumeric, d) {
+    var n = new NumericBin(parentNumeric);
+    n.range.min = d.min;
+    n.range.max = d.max;
+    n.count = d.count;
+    n.sourceInfos = d.sourceInfos;
+    return n;
+  };
+
+  NumericBin.prototype = {
+    add: function(value, sourceInfo) {
+      this.count += 1;
+      tr.b.Statistics.uniformlySampleStream(this.sourceInfos, this.count,
+          sourceInfo, MAX_SOURCE_INFOS);
+    },
+
+    addBin: function(other) {
+      if (!this.range.equals(other.range))
+        throw new Error('Merging incompatible Numeric bins.');
+      tr.b.Statistics.mergeSampledStreams(this.sourceInfos, this.count,
+          other.sourceInfos, other.count, MAX_SOURCE_INFOS);
+      this.count += other.count;
+    },
+
+    asDict: function() {
+      return {
+        min: this.range.min,
+        max: this.range.max,
+        count: this.count,
+        sourceInfos: this.sourceInfos.slice(0)
+      };
+    },
+
+    asJSON: function() {
+      return this.asDict();
+    }
+  };
+
+  function Numeric(unit, range, binInfo) {
+    NumericBase.call(this, unit);
+
+    this.range = range;
+
+    this.numNans = 0;
+    this.nanSourceInfos = [];
+
+    this.runningSum = 0;
+    this.maxCount_ = 0;
+
+    this.underflowBin = binInfo.underflowBin;
+    this.centralBins = binInfo.centralBins;
+    this.centralBinWidth = binInfo.centralBinWidth;
+    this.overflowBin = binInfo.overflowBin;
+
+    this.allBins = [];
+    this.allBins.push(this.underflowBin);
+    this.allBins.push.apply(this.allBins, this.centralBins);
+    this.allBins.push(this.overflowBin);
+
+    this.allBins.forEach(function(bin) {
+      if (bin.count > this.maxCount_)
+        this.maxCount_ = bin.count;
+    }, this);
+  }
+
+  Numeric.fromDict = function(d) {
+    var range = Range.fromExplicitRange(d.min, d.max);
+    var binInfo = {};
+    binInfo.underflowBin = NumericBin.fromDict(undefined, d.underflowBin);
+    binInfo.centralBins = d.centralBins.map(function(binAsDict) {
+      return NumericBin.fromDict(undefined, binAsDict);
+    });
+    binInfo.centralBinWidth = d.centralBinWidth;
+    binInfo.overflowBin = NumericBin.fromDict(undefined, d.overflowBin);
+    var n = new Numeric(tr.v.Unit.fromJSON(d.unit), range, binInfo);
+    n.allBins.forEach(function(bin) {
+      bin.parentNumeric = n;
+    });
+    n.runningSum = d.runningSum;
+    n.numNans = d.numNans;
+    n.nanSourceInfos = d.nanSourceInfos;
+    return n;
+  };
+
+  Numeric.createLinear = function(unit, range, numBins) {
+    if (range.isEmpty)
+      throw new Error('Nope');
+
+    var binInfo = {};
+    binInfo.underflowBin = new NumericBin(
+        this, Range.fromExplicitRange(-Number.MAX_VALUE, range.min));
+    binInfo.overflowBin = new NumericBin(
+        this, Range.fromExplicitRange(range.max, Number.MAX_VALUE));
+    binInfo.centralBins = [];
+    binInfo.centralBinWidth = range.range / numBins;
+
+    for (var i = 0; i < numBins; i++) {
+      var lo = range.min + (binInfo.centralBinWidth * i);
+      var hi = lo + binInfo.centralBinWidth;
+      binInfo.centralBins.push(
+          new NumericBin(undefined, Range.fromExplicitRange(lo, hi)));
+    }
+
+    var n = new Numeric(unit, range, binInfo);
+    n.allBins.forEach(function(bin) {
+      bin.parentNumeric = n;
+    });
+    return n;
+  };
+
+  Numeric.prototype = {
+    __proto__: NumericBase.prototype,
+
+    get numValues() {
+      return tr.b.Statistics.sum(this.allBins, function(e) {
+        return e.count;
+      });
+    },
+
+    get average() {
+      return this.runningSum / this.numValues;
+    },
+
+    get maxCount() {
+      return this.maxCount_;
+    },
+
+    getInterpolatedCountAt: function(value) {
+      var bin = this.getBinForValue(value);
+      var idx = this.centralBins.indexOf(bin);
+      if (idx < 0) {
+        // |value| is in either the underflowBin or the overflowBin.
+        // We can't interpolate between infinities.
+        return bin.count;
+      }
+
+      // |value| must fall between the centers of two bins.
+      // The bin whose center is less than |value| will be this:
+      var lesserBin = bin;
+
+      // The bin whose center is greater than |value| will be this:
+      var greaterBin = bin;
+
+      // One of those bins could be an under/overflow bin.
+      // Avoid dealing with Infinities by arbitrarily saying that center of the
+      // underflow bin is its range.max, and the center of the overflow bin is
+      // its range.min.
+      // The centers of bins in |this.centralBins| will default to their
+      // |range.center|.
+
+      var lesserBinCenter = undefined;
+      var greaterBinCenter = undefined;
+
+      if (value < greaterBin.range.center) {
+        if (idx > 0) {
+          lesserBin = this.centralBins[idx - 1];
+        } else {
+          lesserBin = this.underflowBin;
+          lesserBinCenter = lesserBin.range.max;
+        }
+      } else {
+        if (idx < (this.centralBins.length - 1)) {
+          greaterBin = this.centralBins[idx + 1];
+        } else {
+          greaterBin = this.overflowBin;
+          greaterBinCenter = greaterBin.range.min;
+        }
+      }
+
+      if (greaterBinCenter === undefined)
+        greaterBinCenter = greaterBin.range.center;
+
+      if (lesserBinCenter === undefined)
+        lesserBinCenter = lesserBin.range.center;
+
+      value = tr.b.normalize(value, lesserBinCenter, greaterBinCenter);
+
+      return tr.b.lerp(value, lesserBin.count, greaterBin.count);
+    },
+
+    getBinForValue: function(value) {
+      if (value < this.range.min)
+        return this.underflowBin;
+      if (value >= this.range.max)
+        return this.overflowBin;
+      var binIdx = Math.floor((value - this.range.min) / this.centralBinWidth);
+      return this.centralBins[binIdx];
+    },
+
+    add: function(value, sourceInfo) {
+      if (typeof(value) !== 'number' || isNaN(value)) {
+        this.numNans++;
+        tr.b.Statistics.uniformlySampleStream(this.nanSourceInfos, this.numNans,
+            sourceInfo, MAX_SOURCE_INFOS);
+        return;
+      }
+
+      var bin = this.getBinForValue(value);
+      bin.add(value, sourceInfo);
+      this.runningSum += value;
+      if (bin.count > this.maxCount_)
+        this.maxCount_ = bin.count;
+    },
+
+    addNumeric: function(other) {
+      if (!this.range.equals(other.range) ||
+          !this.unit === other.unit ||
+          this.allBins.length !== other.allBins.length) {
+        throw new Error('Merging incompatible Numerics.');
+      }
+      tr.b.Statistics.mergeSampledStreams(this.nanSourceInfos, this.numNans,
+          other.nanSourceInfos, other.numNans, MAX_SOURCE_INFOS);
+      this.numNans += other.numNans;
+      this.runningSum += other.runningSum;
+      for (var i = 0; i < this.allBins.length; ++i) {
+        this.allBins[i].addBin(other.allBins[i]);
+      }
+    },
+
+    clone: function() {
+      return Numeric.fromDict(this.asDict());
+    },
+
+    asDict: function() {
+      var d = {
+        unit: this.unit.asJSON(),
+
+        min: this.range.min,
+        max: this.range.max,
+
+        numNans: this.numNans,
+        nanSourceInfos: this.nanSourceInfos,
+
+        runningSum: this.runningSum,
+
+        underflowBin: this.underflowBin.asDict(),
+        centralBins: this.centralBins.map(function(bin) {
+          return bin.asDict();
+        }),
+        centralBinWidth: this.centralBinWidth,
+        overflowBin: this.overflowBin.asDict()
+      };
+      return d;
+    },
+
+    asJSON: function() {
+      return this.asDict();
+    }
+  };
+
+  function ScalarNumeric(unit, value) {
+    if (!(typeof(value) == 'number'))
+      throw new Error('Expected value to be number');
+
+    NumericBase.call(this, unit);
+    this.value = value;
+  }
+
+  ScalarNumeric.prototype = {
+    __proto__: NumericBase.prototype,
+
+    asDictInto_: function(d) {
+      d.type = 'scalar';
+      d.value = this.value;
+    },
+
+    toString: function() {
+      return this.unit.format(this.value);
+    }
+  };
+
+  ScalarNumeric.fromDict = function(d) {
+    return new ScalarNumeric(tr.v.Unit.fromJSON(d.unit), d.value);
+  };
+
+  return {
+    NumericBase: NumericBase,
+    NumericBin: NumericBin,
+    Numeric: Numeric,
+    ScalarNumeric: ScalarNumeric
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/numeric_test.html b/catapult/tracing/tracing/value/numeric_test.html
new file mode 100644
index 0000000..05f930a
--- /dev/null
+++ b/catapult/tracing/tracing/value/numeric_test.html
@@ -0,0 +1,176 @@
+<!DOCTYPE html>
+<!--
+Copyright 2016 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+
+  test('nonUnitThrows', function() {
+    assert.throws(function() { new tr.v.NumericBase('foo', -273.15); });
+  });
+
+  test('nonNumberScalarThrows', function() {
+    var unit = tr.v.Unit.byName.sizeInBytes;
+    assert.throws(function() { new tr.v.ScalarNumeric(unit, 'foo'); });
+  });
+
+  test('numericBasic', function() {
+    var n = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    assert.equal(n.getBinForValue(250).range.min, 200);
+    assert.equal(n.getBinForValue(250).range.max, 300);
+    n.add(-1, 'a');
+    n.add(0, 'b');
+    n.add(0, 'c');
+    n.add(500, 'c');
+    n.add(999, 'd');
+    n.add(1000, 'd');
+    assert.equal(n.underflowBin.count, 1);
+
+    assert.equal(n.getBinForValue(0).count, 2);
+    assert.deepEqual(n.getBinForValue(0).sourceInfos,
+                     ['b', 'c']);
+
+    assert.equal(n.getBinForValue(500).count, 1);
+    assert.equal(n.getBinForValue(999).count, 1);
+
+    assert.equal(n.overflowBin.count, 1);
+    assert.equal(n.numValues, 6);
+    assert.closeTo(n.average, 416.3, 0.1);
+  });
+
+  test('numericNans', function() {
+    var n = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    n.add(undefined, 'b');
+    n.add(NaN, 'c');
+
+    assert.equal(n.numNans, 2);
+    assert.deepEqual(n.nanSourceInfos, ['b', 'c']);
+  });
+
+  test('addNumericsValid', function() {
+    var n0 = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    var n1 = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    n0.add(-1, 'a0');
+    n0.add(0, 'b0');
+    n0.add(0, 'c0');
+    n0.add(500, 'c0');
+    n0.add(1000, 'd0');
+    n0.add(NaN, 'e0');
+
+    n1.add(-1, 'a1');
+    n1.add(0, 'b1');
+    n1.add(0, 'c1');
+    n1.add(999, 'd1');
+    n1.add(1000, 'd1');
+    n1.add(NaN, 'e1');
+
+    n0.addNumeric(n1);
+
+    assert.equal(n0.numNans, 2);
+    assert.deepEqual(n0.nanSourceInfos, ['e0', 'e1']);
+
+    assert.equal(n0.underflowBin.count, 2);
+    assert.deepEqual(n0.underflowBin.sourceInfos, ['a0', 'a1']);
+
+    assert.equal(n0.getBinForValue(0).count, 4);
+    assert.deepEqual(n0.getBinForValue(0).sourceInfos,
+        ['b0', 'c0', 'b1', 'c1']);
+
+    assert.equal(n0.getBinForValue(500).count, 1);
+    assert.deepEqual(n0.getBinForValue(500).sourceInfos, ['c0']);
+
+    assert.equal(n0.getBinForValue(999).count, 1);
+    assert.deepEqual(n0.getBinForValue(999).sourceInfos, ['d1']);
+
+    assert.equal(n0.overflowBin.count, 2);
+    assert.deepEqual(n0.overflowBin.sourceInfos, ['d0', 'd1']);
+
+    assert.equal(n0.numValues, 10);
+    assert.closeTo(n0.average, 349.7, 0.1);
+
+    assert.equal(2, n0.maxCount);
+    assert.equal(2, n1.maxCount);
+  });
+
+  test('addNumericsInvalid', function() {
+    var n0 = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    var n1 = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1001),
+        10);
+    var n2 = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        11);
+
+    assert.throws(n0.addNumeric.bind(n0, n1), Error);
+    assert.throws(n0.addNumeric.bind(n0, n1), Error);
+  });
+
+  test('getInterpolateCountAt', function() {
+    var n = tr.v.Numeric.fromDict({
+      unit: 'unitless',
+      min: 0,
+      max: 100,
+      centralBinWidth: 10,
+      underflowBin: {min: -Number.MAX_VALUE, max: 0, count: 11},
+      centralBins: [
+        {min: 0, max: 10, count: 10},
+        {min: 10, max: 20, count: 9},
+        {min: 20, max: 30, count: 8},
+        {min: 30, max: 40, count: 7},
+        {min: 40, max: 50, count: 6},
+        {min: 50, max: 60, count: 5},
+        {min: 60, max: 70, count: 4},
+        {min: 70, max: 80, count: 3},
+        {min: 80, max: 90, count: 2},
+        {min: 90, max: 100, count: 1}
+      ],
+      overflowBin: {min: 100, max: Number.MAX_VALUE, count: 0}
+    });
+
+    assert.equal(11, n.maxCount);
+    assert.equal(11, n.getInterpolatedCountAt(-1));
+    assert.equal(0, n.getInterpolatedCountAt(101));
+    assert.closeTo(10.8, n.getInterpolatedCountAt(1), 1e-3);
+    assert.closeTo(9.5, n.getInterpolatedCountAt(10), 1e-3);
+    assert.closeTo(0.2, n.getInterpolatedCountAt(99), 1e-3);
+  });
+
+  test('scalarBasic', function() {
+    var unit = tr.v.Unit.byName.sizeInBytes;
+
+    var d = {
+      type: 'scalar',
+      unit: unit.asJSON(),
+      value: 42
+    };
+
+    assert.deepEqual(d, tr.v.NumericBase.fromDict(d).asDict());
+  });
+});
+
+</script>
diff --git a/catapult/tracing/tracing/value/time_display_mode.html b/catapult/tracing/tracing/value/time_display_mode.html
new file mode 100644
index 0000000..d403b90
--- /dev/null
+++ b/catapult/tracing/tracing/value/time_display_mode.html
@@ -0,0 +1,52 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/base.html">
+
+<script>
+'use strict';
+
+/**
+ * @fileoverview Time currentDisplayUnit
+ */
+tr.exportTo('tr.v', function() {
+  var msDisplayMode = {
+    scale: 1e-3,
+    suffix: 'ms',
+    // Compares a < b with adjustments to precision errors.
+    roundedLess: function(a, b) {
+      return Math.round(a * 1000) < Math.round(b * 1000);
+    },
+    format: function(ts) {
+      return new Number(ts)
+          .toLocaleString(undefined, { minimumFractionDigits: 3 }) + ' ms';
+    }
+  };
+
+  var nsDisplayMode = {
+    scale: 1e-9,
+    suffix: 'ns',
+    // Compares a < b with adjustments to precision errors.
+    roundedLess: function(a, b) {
+      return Math.round(a * 1000000) < Math.round(b * 1000000);
+    },
+    format: function(ts) {
+      return new Number(ts * 1000000)
+          .toLocaleString(undefined, { maximumFractionDigits: 0 }) + ' ns';
+    }
+  };
+
+  var TimeDisplayModes = {
+    ns: nsDisplayMode,
+    ms: msDisplayMode
+  };
+
+  return {
+    TimeDisplayModes: TimeDisplayModes
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/time_display_mode_test.html b/catapult/tracing/tracing/value/time_display_mode_test.html
new file mode 100644
index 0000000..d1e208a
--- /dev/null
+++ b/catapult/tracing/tracing/value/time_display_mode_test.html
@@ -0,0 +1,42 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/time_display_mode.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('Time.ms.format', function() {
+    function local(v) {
+      return v.toLocaleString(undefined, { minimumFractionDigits: 3}) + ' ms';
+    }
+
+    var unit = tr.v.TimeDisplayModes.ms;
+    assert.equal(unit.format(1), local(1));
+    assert.equal(unit.format(1.001), local(1.001));
+    assert.equal(unit.format(1.0005), local(1.001));
+    assert.equal(unit.format(1.0004), local(1));
+    assert.equal(unit.format(0.999), local(0.999));
+    assert.equal(unit.format(0.9995), local(1));
+  });
+
+  test('Time.ns.format', function() {
+    function local(v) {
+      return v.toLocaleString(undefined, { maximumFractionDigits: 0}) + ' ns';
+    }
+
+    var unit = tr.v.TimeDisplayModes.ns;
+    assert.equal(unit.format(1), local(1000000));
+    assert.equal(unit.format(0.001), local(1000));
+    assert.equal(unit.format(0.000001), local(1));
+    assert.equal(unit.format(0.0000005), local(1));
+    assert.equal(unit.format(0.0000004), local(0));
+    assert.equal(unit.format(0.0000015), local(2));
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/array_of_numbers_span.html b/catapult/tracing/tracing/value/ui/array_of_numbers_span.html
new file mode 100644
index 0000000..9f90e35
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/array_of_numbers_span.html
@@ -0,0 +1,75 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/statistics.html">
+<script>
+'use strict';
+tr.exportTo('tr.v.ui', function() {
+  var ArrayOfNumbersSummaryModes = {
+    AVERAGE_MODE: 'average-mode',
+    TOTAL_MODE: 'total-mode'
+  };
+  return {
+    ArrayOfNumbersSummaryModes: ArrayOfNumbersSummaryModes
+  };
+});
+</script>
+<polymer-element name="tr-v-ui-array-of-numbers-span">
+  <template>
+  </template>
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.numbers_ = undefined;
+      this.summaryMode_ = tr.v.ui.ArrayOfNumbersSummaryModes.AVERAGE_MODE;
+    },
+
+    get summaryMode() {
+      return this.summaryMode_;
+    },
+
+    set summaryMode(summaryMode) {
+      this.summaryMode_ = summaryMode;
+      this.updateContents_();
+    },
+
+    get numbers() {
+      return this.numbers_;
+    },
+
+    set numbers(numbers) {
+      if (numbers === undefined) {
+        this.numbers_ = undefined;
+        this.updateContents_();
+        return;
+      }
+      if (!(numbers instanceof Array))
+        throw new Error('Must provide an array');
+      this.numbers_ = numbers;
+      this.updateContents_();
+    },
+
+    updateContents_: function() {
+      if (this.numbers_ === undefined) {
+        this.shadowRoot.textContent = '-';
+        return;
+      }
+
+      var ArrayOfNumbersSummaryModes = tr.v.ui.ArrayOfNumbersSummaryModes;
+      var value;
+      if (this.summaryMode_ === ArrayOfNumbersSummaryModes.AVERAGE_MODE)
+        value = tr.b.Statistics.mean(this.numbers_);
+      else
+        value = tr.b.Statistics.sum(this.numbers_);
+
+      var valueRounded = Math.round(value * 1000.0) / 1000.0;
+      this.shadowRoot.textContent = valueRounded;
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/value/ui/array_of_numbers_span_test.html b/catapult/tracing/tracing/value/ui/array_of_numbers_span_test.html
new file mode 100644
index 0000000..9847276
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/array_of_numbers_span_test.html
@@ -0,0 +1,28 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/value/ui/array_of_numbers_span.html">
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('instantiateInAverageMode', function() {
+    var span = document.createElement('tr-v-ui-array-of-numbers-span');
+    span.numbers = [1, 2, 3];
+    span.summaryMode = tr.v.ui.ArrayOfNumbersSummaryModes.AVERAGE_MODE;
+    this.addHTMLOutput(span);
+    assert.equal(span.shadowRoot.textContent, '2');
+  });
+
+  test('instantiateInTotalsMode', function() {
+    var span = document.createElement('tr-v-ui-array-of-numbers-span');
+    span.numbers = [1, 2, 3];
+    span.summaryMode = tr.v.ui.ArrayOfNumbersSummaryModes.TOTALS_MODE;
+    this.addHTMLOutput(span);
+    assert.equal(span.shadowRoot.textContent, '6');
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/generic_table_view.html b/catapult/tracing/tracing/value/ui/generic_table_view.html
new file mode 100644
index 0000000..f9becae
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/generic_table_view.html
@@ -0,0 +1,299 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/iteration_helpers.html">
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/ui/analysis/generic_object_view.html">
+<link rel="import" href="/tracing/ui/base/table.html">
+<link rel="import" href="/tracing/value/generic_table.html">
+<link rel="import" href="/tracing/value/ui/array_of_numbers_span.html">
+
+<polymer-element name="tr-v-ui-generic-table-view">
+  <template>
+    <style>
+    :host {
+    display: flex;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+</polymer-element>
+
+<script>
+'use strict';
+
+tr.exportTo('tr.v.ui', function() {
+  var TEXT_COLUMN_MODE = 1;
+  var NUMERIC_COLUMN_MODE = 2;
+  var ELEMENT_COLUMN_MODE = 3;
+
+  function isNumeric(value) {
+    // TODO(nduca): Also consider other units that are numeric.
+    if ((typeof value) === 'number')
+      return true;
+    else if (value instanceof Number)
+      return true;
+    return false;
+  }
+
+  function GenericTableViewTotalsItem(opt_values) {
+    if (opt_values !== undefined)
+      this.values = opt_values;
+    else
+      this.values = [];
+  }
+
+  function GenericTableViewColumnDescriptor(fieldName, firstFieldValue) {
+    this.title = fieldName;
+    this.fieldName = fieldName;
+
+    this.updateModeGivenValue(firstFieldValue);
+  }
+
+  GenericTableViewColumnDescriptor.prototype = {
+    get columnMode() {
+      return this.columnMode_;
+    },
+
+    get isInNumericMode() {
+      return this.columnMode_ === NUMERIC_COLUMN_MODE;
+    },
+
+    cmp: function(a, b) {
+      if (this.columnMode_ === ELEMENT_COLUMN_MODE)
+        return 0;
+
+      return tr.b.comparePossiblyUndefinedValues(a, b, function(a, b) {
+        var vA = a[this.fieldName];
+        var vB = b[this.fieldName];
+        return tr.b.comparePossiblyUndefinedValues(vA, vB, function(vA, vB) {
+          if (vA.localeCompare)
+            return vA.localeCompare(vB);
+          return vA - vB;
+        }, this);
+      }, this);
+    },
+
+    updateModeGivenValue: function(fieldValue) {
+      if (this.columnMode_ === undefined) {
+        if (fieldValue === undefined || fieldValue === null)
+          return;
+
+        if (isNumeric(fieldValue)) {
+          this.columnMode_ = NUMERIC_COLUMN_MODE;
+          return;
+        }
+
+        if (fieldValue instanceof HTMLElement) {
+          this.columnMode_ = ELEMENT_COLUMN_MODE;
+          return;
+        }
+
+        this.columnMode_ = TEXT_COLUMN_MODE;
+        return;
+      }
+
+      // Undefineds & nulls shouldn't change the mode.
+      if (fieldValue === undefined || fieldValue === null)
+        return;
+
+      // If we were already in numeric mode, then we don't
+      // need to put it into numeric mode again. And, if we were
+      // previously in text mode, then we can't go into numeric mode now.
+      if (isNumeric(fieldValue))
+        return;
+
+      if (fieldValue instanceof HTMLElement) {
+        this.columnMode_ = ELEMENT_COLUMN_MODE;
+        return;
+      }
+
+      if (this.columnMode_ === NUMERIC_COLUMN_MODE)
+        this.columnMode_ = TEXT_COLUMN_MODE;
+    },
+
+    value: function(item) {
+      var fieldValue = item[this.fieldName];
+      if (fieldValue instanceof GenericTableViewTotalsItem) {
+        var span = document.createElement('tr-v-ui-array-of-numbers-span');
+        span.summaryMode = tr.v.ui.ArrayOfNumbersSummaryModes.TOTAL_MODE;
+        span.numbers = fieldValue.values;
+        return span;
+      }
+
+      if (fieldValue === undefined)
+        return '-';
+
+      if (fieldValue instanceof HTMLElement)
+        return fieldValue;
+
+      if (fieldValue instanceof Object) {
+        var gov = document.createElement('tr-ui-a-generic-object-view');
+        gov.object = fieldValue;
+        return gov;
+      }
+
+      // TODO(nduca): Use units objects if applicable.
+      return fieldValue;
+    }
+  };
+
+  Polymer('tr-v-ui-generic-table-view', {
+    created: function() {
+      this.items_ = undefined;
+      this.importantColumNames_ = [];
+    },
+
+    get items() {
+      return this.items_;
+    },
+
+    set items(itemsOrGenericTable) {
+      if (itemsOrGenericTable === undefined) {
+        this.items_ = undefined;
+      } else if (itemsOrGenericTable instanceof Array) {
+        this.items_ = itemsOrGenericTable;
+      } else if (itemsOrGenericTable instanceof tr.v.GenericTable) {
+        this.items_ = itemsOrGenericTable.items;
+      }
+      this.updateContents_();
+    },
+
+    get importantColumNames() {
+      return this.importantColumNames_;
+    },
+
+    set importantColumNames(importantColumNames) {
+      this.importantColumNames_ = importantColumNames;
+      this.updateContents_();
+    },
+
+    createColumns_: function() {
+      var columnsByName = {};
+      this.items_.forEach(function(item) {
+        tr.b.iterItems(item, function(itemFieldName, itemFieldValue) {
+          var colDesc = columnsByName[itemFieldName];
+          if (colDesc !== undefined) {
+            colDesc.updateModeGivenValue(itemFieldValue);
+            return;
+          }
+
+          colDesc = new GenericTableViewColumnDescriptor(
+              itemFieldName, itemFieldValue);
+          columnsByName[itemFieldName] = colDesc;
+        }, this);
+      }, this);
+
+      var columns = tr.b.dictionaryValues(columnsByName);
+      if (columns.length === 0)
+        return undefined;
+
+      // Sort by name.
+      var isColumnNameImportant = {};
+      var importantColumNames = this.importantColumNames || [];
+      importantColumNames.forEach(function(icn) {
+        isColumnNameImportant[icn] = true;
+      });
+      columns.sort(function(a, b) {
+        var iA = isColumnNameImportant[a.title] ? 1 : 0;
+        var iB = isColumnNameImportant[b.title] ? 1 : 0;
+        if ((iB - iA) !== 0)
+          return iB - iA;
+        return a.title.localeCompare(b.title);
+      });
+
+      // Set sizes. This is convoluted by the fact that the first
+      // table column must have fixed size.
+      var colWidthPercentage;
+      if (columns.length == 1)
+        colWidthPercentage = '100%';
+      else
+        colWidthPercentage = (100 / (columns.length - 1)).toFixed(3) + '%';
+      columns[0].width = '250px';
+      for (var i = 1; i < columns.length; i++)
+        columns[i].width = colWidthPercentage;
+
+      return columns;
+    },
+
+    createFooterRowsIfNeeded_: function(columns) {
+      // Make totals row if needed.
+      var hasColumnThatIsNumeric = columns.some(function(column) {
+        return column.isInNumericMode;
+      });
+      if (!hasColumnThatIsNumeric)
+        return [];
+
+      var totalsItems = {};
+      columns.forEach(function(column) {
+        if (!column.isInNumericMode)
+          return;
+        var totalsItem = new GenericTableViewTotalsItem();
+        this.items_.forEach(function(item) {
+          var fieldValue = item[column.fieldName];
+          if (fieldValue === undefined || fieldValue === null)
+            return;
+          totalsItem.values.push(fieldValue);
+        });
+        totalsItems[column.fieldName] = totalsItem;
+      }, this);
+
+      return [totalsItems];
+    },
+
+    updateContents_: function() {
+      var columns;
+      if (this.items_ !== undefined)
+        columns = this.createColumns_();
+
+      if (!columns) {
+        this.$.table.tableColumns = [];
+        this.$.table.tableRows = [];
+        this.$.table.footerRows = [];
+        return;
+      }
+
+      this.$.table.tableColumns = columns;
+      this.$.table.tableRows = this.items_;
+      this.$.table.footerRows = this.createFooterRowsIfNeeded_(columns);
+      this.$.table.rebuild();
+    },
+
+    get selectionMode() {
+      return this.$.table.selectionMode;
+    },
+
+    set selectionMode(selectionMode) {
+      this.$.table.selectionMode = selectionMode;
+    },
+
+    get rowHighlightStyle() {
+      return this.$.table.rowHighlightStyle;
+    },
+
+    set rowHighlightStyle(rowHighlightStyle) {
+      this.$.table.rowHighlightStyle = rowHighlightStyle;
+    },
+
+    get cellHighlightStyle() {
+      return this.$.table.cellHighlightStyle;
+    },
+
+    set cellHighlightStyle(cellHighlightStyle) {
+      this.$.table.cellHighlightStyle = cellHighlightStyle;
+    }
+  });
+
+  return {
+    GenericTableViewTotalsItem: GenericTableViewTotalsItem,
+    GenericTableViewColumnDescriptor: GenericTableViewColumnDescriptor
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/generic_table_view_test.html b/catapult/tracing/tracing/value/ui/generic_table_view_test.html
new file mode 100644
index 0000000..85bfe45
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/generic_table_view_test.html
@@ -0,0 +1,199 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2014 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/generic_table.html">
+<link rel="import" href="/tracing/value/ui/generic_table_view.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var GenericTableViewColumnDescriptor =
+      tr.v.ui.GenericTableViewColumnDescriptor;
+  var GenericTableViewTotalsItem = tr.v.ui.GenericTableViewTotalsItem;
+
+  test('descBasicNumericMode', function() {
+    var colDesc = new GenericTableViewColumnDescriptor('a');
+    assert.isFalse(colDesc.isInNumericMode);
+
+    colDesc.updateModeGivenValue(4);
+    assert.isTrue(colDesc.isInNumericMode);
+
+    colDesc.updateModeGivenValue(4);
+    assert.isTrue(colDesc.isInNumericMode);
+
+    colDesc.updateModeGivenValue(undefined);
+    colDesc.updateModeGivenValue(null);
+    assert.isTrue(colDesc.isInNumericMode);
+
+    colDesc.updateModeGivenValue('a');
+    assert.isFalse(colDesc.isInNumericMode);
+  });
+
+  test('descBasicNonNumericMode', function() {
+    var colDesc = new GenericTableViewColumnDescriptor('a');
+    assert.isFalse(colDesc.isInNumericMode);
+    colDesc.updateModeGivenValue(4);
+    assert.isTrue(colDesc.isInNumericMode);
+    colDesc.updateModeGivenValue('a');
+    assert.isFalse(colDesc.isInNumericMode);
+  });
+
+  test('descCmpWithNumbers', function() {
+    var colDesc = new GenericTableViewColumnDescriptor('a', 1);
+    assert.equal(colDesc.cmp({a: 1}, {a: 2}), -1);
+    assert.equal(colDesc.cmp({a: 1}, undefined), -1);
+  });
+
+  test('descCmpWithText', function() {
+    var colDesc = new GenericTableViewColumnDescriptor('a', 'text');
+    assert.equal(colDesc.cmp({a: 'a'}, {a: 'b'}), -1);
+    assert.equal(colDesc.cmp({a: 'a'}, undefined), -1);
+  });
+
+  test('descValue', function() {
+    var colDesc = new GenericTableViewColumnDescriptor('a', 1);
+    var value = colDesc.value({a: undefined});
+    assert.equal(value, '-');
+
+    value = colDesc.value({a: 3});
+    assert.equal(value, 3);
+
+    var totalsValue = colDesc.value(
+        {a: new GenericTableViewTotalsItem([1, 2, 3])});
+    assert.equal(totalsValue.tagName.toLowerCase(),
+                  'tr-v-ui-array-of-numbers-span');
+    assert.deepEqual(totalsValue.numbers, [1, 2, 3]);
+  });
+
+  test('everythingTogether', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+        a: 'someString',
+        b: 2,
+        c: 'adsf'
+      },
+      {
+        a: 'someOtherString',
+        b: 2,
+        c: 'adsf'
+      }
+    ];
+    this.addHTMLOutput(table);
+  });
+
+  test('summableColumn', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+        a: 1
+      },
+      {
+        a: 2
+      },
+      {
+        a: 3
+      }
+    ];
+    this.addHTMLOutput(table);
+
+    assert.equal(table.$.table.tableColumns.length, 1);
+    assert.equal(table.$.table.tableRows.length, 3);
+    assert.isTrue(table.$.table.tableColumns[0].isInNumericMode);
+    assert.equal(table.$.table.tableColumns[0].fieldName, 'a');
+    var totalsItem = table.$.table.footerRows[0].a;
+    assert.deepEqual(totalsItem.values, [1, 2, 3]);
+  });
+
+
+  test('usingGenericTable', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = new tr.v.GenericTable([
+      {
+        a: 1
+      }
+    ]);
+    assert.equal(table.items.length, 1);
+  });
+
+  test('valueIsObject', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = new tr.v.GenericTable([
+      {
+        a: {x: 1, y: 'string'}
+      },
+      {
+        a: 'something'
+      }
+    ]);
+    this.addHTMLOutput(table);
+    assert.equal(table.items.length, 2);
+  });
+
+  test('mixedTypeTable', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+        a: 1
+      },
+      {
+        a: 2
+      },
+      {
+        b: 'c'
+      }
+    ];
+    this.addHTMLOutput(table);
+  });
+
+  test('tableWithElement', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+        a: 1
+      },
+      {
+        a: tr.ui.b.createSpan({textContent: 'ohai'})
+      },
+      {
+        b: 'c'
+      }
+    ];
+    this.addHTMLOutput(table);
+  });
+
+
+  test('emptyTable', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [{}];
+    assert.equal(table.$.table.tableColumns.length, 0);
+  });
+
+  test('undefinedAndValue', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+      },
+      {
+        a: 2
+      }
+    ];
+    this.addHTMLOutput(table);
+  });
+
+  test('undefinedOnly', function() {
+    var table = document.createElement('tr-v-ui-generic-table-view');
+    table.items = [
+      {
+        a: undefined
+      }
+    ];
+    this.addHTMLOutput(table);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/histogram_span.html b/catapult/tracing/tracing/value/ui/histogram_span.html
new file mode 100644
index 0000000..2bc4898
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/histogram_span.html
@@ -0,0 +1,192 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/statistics.html">
+<link rel="import" href="/tracing/ui/base/bar_chart.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+
+<polymer-element name="tr-v-ui-histogram-span">
+  <template>
+    <style>
+      :host {
+        display: flex;
+        flex-direction: column;
+      }
+
+      #stats {
+        display: flex;
+        flex-direction: row;
+        flex: 0 0 auto;
+        font-weight: bold;
+      }
+
+      #nnans {
+        color: red;
+      }
+      #table {
+        flex: 1 1 auto;
+      }
+    </style>
+    <div id="stats">
+      <span id="nsamples"></span>&nbsp;samples,&nbsp;
+      <span id="hadnans"><span id="nnans"></span> non-numeric samples,&nbsp;
+      </span>
+      average=<tr-v-ui-scalar-span id="average"></tr-v-ui-scalar-span>
+    </div>
+    <div id="container"></div>
+  </template>
+  <script>
+  'use strict';
+
+  Polymer({
+    created: function() {
+      this.histogram_ = undefined;
+      this.chart_ = new tr.ui.b.BarChart();
+      this.chart_.width = 400;
+      this.chart_.height = 200;
+      this.mouseDownBin_ = undefined;
+      this.brushedBins_ = [];
+      this.chart_.addEventListener('item-mousedown',
+          this.onMouseDown_.bind(this));
+      this.chart_.addEventListener('item-mousemove',
+          this.onMouseMove_.bind(this));
+      this.chart_.addEventListener('item-mouseup',
+          this.onMouseUp_.bind(this));
+    },
+
+    ready: function() {
+      this.$.container.appendChild(this.chart_);
+    },
+
+    get brushedBins() {
+      return this.brushedBins_;
+    },
+
+    updateBrushedRange_: function(currentX) {
+      this.brushedBins_ = [this.histogram_.getBinForValue(currentX)];
+      var r = new tr.b.Range();
+      r.addValue(this.mouseDownX_);
+      r.addValue(currentX);
+
+      // Collect bins:
+      var centralMin = Number.MAX_VALUE;
+      var centralMax = -Number.MAX_VALUE;
+      this.histogram_.centralBins.forEach(function(bin) {
+        centralMin = Math.min(centralMin, bin.range.min);
+        centralMax = Math.max(centralMax, bin.range.max);
+        if ((bin.range.max > r.min) &&
+            (bin.range.min < r.max) &&
+            (this.brushedBins_.indexOf(bin) < 0))
+          this.brushedBins_.push(bin);
+      }, this);
+      if ((this.histogram_.underflowBin.max > r.min) &&
+          (this.brushedBins_.indexOf(this.histogram_.underflowBin) < 0)) {
+        this.brushedBins_.push(this.histogram_.underflowBin);
+      }
+      if ((this.histogram_.overflowBin.min < r.max) &&
+          (this.brushedBins_.indexOf(this.histogram_.overflowBin) < 0)) {
+        this.brushedBins_.push(this.histogram_.overflowBin);
+      }
+      this.brushedBins_.sort(function(a, b) {
+        return a.range.min - b.range.min;
+      });
+
+      // Prevent Infinity:
+      var minBin = this.histogram_.getBinForValue(r.min);
+      var maxBin = this.histogram_.getBinForValue(r.max);
+      var binWidth = this.histogram_.centralBins[0].range.range;
+      r.min = minBin ? Math.max(centralMin - binWidth, minBin.range.min) :
+        centralMin - binWidth;
+      r.max = maxBin ? Math.min(centralMax + binWidth, maxBin.range.max) :
+        centralMax + binWidth;
+
+      this.chart_.brushedRange = r;
+
+      this.dispatchEvent(new tr.b.Event('brushed-bins-changed'));
+    },
+
+    onMouseDown_: function(chartEvent) {
+      chartEvent.stopPropagation();
+      if (!this.histogram_)
+        return;
+      this.mouseDownX_ = chartEvent.x;
+      this.updateBrushedRange_(chartEvent.x);
+    },
+
+    onMouseMove_: function(chartEvent) {
+      chartEvent.stopPropagation();
+      if (!this.histogram_)
+        return;
+      this.updateBrushedRange_(chartEvent.x);
+    },
+
+    onMouseUp_: function(chartEvent) {
+      chartEvent.stopPropagation();
+      if (!this.histogram_)
+        return;
+      this.updateBrushedRange_(chartEvent.x);
+      this.mouseDownX_ = undefined;
+    },
+
+    get histogram() {
+      return this.histogram_;
+    },
+
+    set histogram(histogram) {
+      this.histogram_ = histogram;
+      this.updateContents_();
+    },
+
+    set isYLogScale(logScale) {
+      this.chart_.isYLogScale = logScale;
+    },
+
+    updateContents_: function() {
+      this.$.container.style.display = this.histogram_ ? '' : 'none';
+      if (!this.histogram_) {
+        this.$.nsamples.textContent = 0;
+        this.$.average.setValueAndUnit(undefined, undefined);
+        return;
+      }
+
+      this.$.nsamples.textContent = this.histogram_.numValues;
+      this.$.average.setValueAndUnit(this.histogram_.average,
+                                     this.histogram_.unit);
+      if (this.histogram_.numNans > 0) {
+        this.$.hadnans.style.display = '';
+        this.$.nnans.textContent = this.histogram_.numNans;
+      } else {
+        this.$.hadnans.style.display = 'none';
+      }
+
+      var maximumBinValue = tr.b.Statistics.max(this.histogram_.allBins,
+                                                function(bin) {
+                                                  return bin.count;
+                                                });
+      var chartData = [];
+      var binWidth = this.histogram_.centralBins[0].range.range;
+      this.histogram_.allBins.forEach(function(bin) {
+        var x = bin.range.min;
+        if (x === -Number.MAX_VALUE) {
+          if (!bin.count)
+            return;
+          x = bin.range.max - binWidth;
+        }
+        chartData.push({x: x,
+                        y: bin.count});
+      });
+      chartData.sort(function(x, y) {
+        return x.x - y.x;
+      });
+      this.$.container.style.display = chartData.length ? '' : 'none';
+      this.chart_.data = chartData;
+      this.brushedBins_ = [];
+      this.chart_.brushedRange = new tr.b.Range();
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/value/ui/histogram_span_test.html b/catapult/tracing/tracing/value/ui/histogram_span_test.html
new file mode 100644
index 0000000..87ecf99
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/histogram_span_test.html
@@ -0,0 +1,69 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/histogram.html">
+<link rel="import" href="/tracing/value/ui/histogram_span.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('basic', function() {
+    var h = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    h.add(-1, 'a');
+    h.add(0, 'b');
+    h.add(0, 'b');
+    h.add(0, 'b');
+    h.add(0, 'b');
+    h.add(0, 'b');
+    h.add(0, 'b');
+    h.add(0, 'c');
+    h.add(500, 'c');
+    h.add(999, 'd');
+    h.add(1000, 'd');
+
+    var span = document.createElement('tr-v-ui-histogram-span');
+    span.histogram = h;
+    this.addHTMLOutput(span);
+  });
+
+  test('undefined', function() {
+    var span = document.createElement('tr-v-ui-histogram-span');
+    span.histogram = undefined;
+    this.addHTMLOutput(span);
+  });
+
+  test('emptyHistogram', function() {
+    var h = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+
+    var span = document.createElement('tr-v-ui-histogram-span');
+    span.histogram = h;
+    this.addHTMLOutput(span);
+  });
+
+  test('nans', function() {
+    var h = new tr.v.Numeric.createLinear(
+        tr.v.Unit.byName.timeDurationInMs,
+        tr.b.Range.fromExplicitRange(0, 1000),
+        10);
+    h.add(undefined, 'b');
+    h.add(NaN, 'c');
+
+    var span = document.createElement('tr-v-ui-histogram-span');
+    span.histogram = h;
+    this.addHTMLOutput(span);
+  });
+
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/preferred_display_unit.html b/catapult/tracing/tracing/value/ui/preferred_display_unit.html
new file mode 100644
index 0000000..7e5a94e
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/preferred_display_unit.html
@@ -0,0 +1,40 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/unit.html">
+
+<polymer-element name="tr-v-ui-preferred-display-unit">
+  <script>
+  'use strict';
+  Polymer({
+    ready: function() {
+      this.preferredTimeDisplayMode_ = undefined;
+    },
+
+    attached: function() {
+      tr.v.Unit.didPreferredTimeDisplayUnitChange();
+    },
+
+    detached: function() {
+      tr.v.Unit.didPreferredTimeDisplayUnitChange();
+    },
+
+    // null means no-preference
+    get preferredTimeDisplayMode() {
+      return this.preferredTimeDisplayMode_;
+    },
+
+    set preferredTimeDisplayMode(v) {
+      if (this.preferredTimeDisplayMode_ === v)
+        return;
+      this.preferredTimeDisplayMode_ = v;
+      tr.v.Unit.didPreferredTimeDisplayUnitChange();
+    }
+
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/value/ui/preferred_display_unit_test.html b/catapult/tracing/tracing/value/ui/preferred_display_unit_test.html
new file mode 100644
index 0000000..742783f
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/preferred_display_unit_test.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/time_display_mode.html">
+<link rel="import" href="/tracing/value/ui/preferred_display_unit.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('instantiate', function() {
+    var unit = document.createElement('tr-v-ui-preferred-display-unit');
+    var ms = tr.v.TimeDisplayModes.ms;
+    unit.preferredDisplayUnit = ms;
+    assert.equal(unit.preferredDisplayUnit, ms);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/ui/scalar_span.html b/catapult/tracing/tracing/value/ui/scalar_span.html
new file mode 100644
index 0000000..1528a22
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/scalar_span.html
@@ -0,0 +1,237 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/ui/base/deep_utils.html">
+<link rel="import" href="/tracing/ui/base/polymer_utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+tr.exportTo('tr.v.ui', function() {
+  function createScalarSpan(value, opt_config) {
+    if (value === undefined)
+      return '';
+
+    var config = opt_config || {};
+    var ownerDocument = config.ownerDocument || document;
+
+    var span = ownerDocument.createElement('tr-v-ui-scalar-span');
+
+    var numericValue;
+    if (value instanceof tr.v.ScalarNumeric) {
+      span.value = value;
+      numericValue = value.value;
+    } else {
+      var unit = config.unit;
+      if (unit === undefined) {
+        throw new Error(
+            'Unit must be provided in config when value is a number');
+      }
+      span.setValueAndUnit(value, unit);
+      numericValue = value;
+    }
+
+    if (config.total)
+      span.percentage = numericValue / config.total;
+
+    if (config.rightAlign)
+      span.rightAlign = true;
+
+    return span;
+  }
+
+  tr.v.Unit.addEventListener('display-mode-changed', function(e) {
+    var scalarSpanTagName = 'tr-v-ui-scalar-span';
+    var subclassNames = tr.ui.b.getPolymerElementsThatSubclass(
+        scalarSpanTagName);
+    subclassNames.push(scalarSpanTagName);
+    var isSubclass = {};
+    subclassNames.forEach(function(n) {
+      isSubclass[n.toUpperCase()] = true;
+    });
+
+    var m = tr.b.findDeepElementsMatchingPredicate(
+        document.body,
+        function(el) {
+         return isSubclass[el.tagName];
+        });
+    m.forEach(function(el) {
+      el.updateContent_();
+    });
+  });
+
+  return {
+    createScalarSpan: createScalarSpan
+  };
+});
+</script>
+
+<polymer-element name="tr-v-ui-scalar-span">
+  <template>
+    <style>
+    :host {
+      display: block;
+      position: relative;
+    }
+    #content.right-align {
+      text-align: right;
+      position: relative;
+      display: block;
+    }
+    #sparkline {
+      width: 0%;
+      position: absolute;
+      bottom: 0;
+      right: 0;
+      display: none;
+      height: 100%;
+      background-color: hsla(216, 100%, 94.5%, .75);
+      border-left: 1px solid hsl(216, 100%, 89%);
+      box-sizing: border-box;
+    }
+    #warning {
+      margin-left: 4px;
+      font-size: 66%;
+    }
+    </style>
+    <span id="sparkline"></span>
+    <span id="content"></span>
+    <span id="warning" style="display:none">&#9888;</span>
+  </template>
+  <script>
+  'use strict';
+
+  Polymer({
+    ready: function() {
+      this.value_ = undefined;
+      this.unit_ = undefined;
+
+      this.warning_ = undefined;
+      this.percentage_ = undefined;
+    },
+
+    set contentTextDecoration(deco) {
+      this.$.content.style.textDecoration = deco;
+    },
+
+    get value() {
+      return this.value_;
+    },
+
+    set value(value) {
+      if (value instanceof tr.v.ScalarNumeric) {
+        this.value_ = value.value;
+        this.unit_ = value.unit;
+      } else {
+        this.value_ = value;
+      }
+      this.updateContent_();
+    },
+
+    get unit() {
+      return this.unit_;
+    },
+
+    set unit(unit) {
+      this.unit_ = unit;
+      this.updateContent_();
+    },
+
+    setValueAndUnit: function(value, unit) {
+      this.value_ = value;
+      this.unit_ = unit;
+      this.updateContent_();
+    },
+
+    get percentage() {
+      return this.percentage_;
+    },
+
+    set percentage(percentage) {
+      this.percentage_ = percentage;
+      this.updateSparkline_();
+    },
+
+    get rightAlign() {
+      return this.$.content.classList.contains('right-align');
+    },
+
+    set rightAlign(rightAlign) {
+      if (rightAlign)
+        this.$.content.classList.add('right-align');
+      else
+        this.$.content.classList.remove('right-align');
+    },
+
+    updateSparkline_: function() {
+      if (this.percentage_ === undefined) {
+        this.$.sparkline.style.display = 'none';
+        this.$.sparkline.style.width = '0';
+      } else {
+        this.$.sparkline.style.display = 'block';
+        this.$.sparkline.style.width = (this.percentage_ * 100) + '%';
+      }
+    },
+
+    updateContent_: function() {
+      if (this.unit_ === undefined) {
+        this.$.content.textContent = '';
+        this.$.content.style.color = '';
+        return;
+      }
+
+      this.$.content.textContent = this.unit_.format(this.value);
+
+      var BIGGER_IS_BETTER = tr.v.ImprovementDirection.BIGGER_IS_BETTER;
+      var SMALLER_IS_BETTER = tr.v.ImprovementDirection.SMALLER_IS_BETTER;
+      var color = '';
+      if (this.unit_.isDelta) {
+        var improvementDirection = this.unit_.improvementDirection;
+        if (this.value > 0) {
+          // Positive delta.
+          switch (improvementDirection) {
+            case BIGGER_IS_BETTER:
+              color = 'green';
+              break;
+            case SMALLER_IS_BETTER:
+              color = 'red';
+              break;
+          }
+        } else if (this.value < 0) {
+          // Negative delta.
+          switch (improvementDirection) {
+            case BIGGER_IS_BETTER:
+              color = 'red';
+              break;
+            case SMALLER_IS_BETTER:
+              color = 'green';
+              break;
+          }
+        }
+      }
+      this.$.content.style.color = color;
+    },
+
+    get warning() {
+      return this.warning_;
+    },
+
+    set warning(warning) {
+      this.warning_ = warning;
+      var warningEl = this.$.warning;
+      if (this.warning_) {
+        warningEl.title = warning;
+        warningEl.style.display = '';
+      } else {
+        warningEl.title = '';
+        warningEl.style.display = 'none';
+      }
+    }
+  });
+  </script>
+</polymer-element>
diff --git a/catapult/tracing/tracing/value/ui/scalar_span_test.html b/catapult/tracing/tracing/value/ui/scalar_span_test.html
new file mode 100644
index 0000000..b380f59
--- /dev/null
+++ b/catapult/tracing/tracing/value/ui/scalar_span_test.html
@@ -0,0 +1,201 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
+<link rel="import" href="/tracing/value/ui/scalar_span.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var ScalarNumeric = tr.v.ScalarNumeric;
+  var Unit = tr.v.Unit;
+  var THIS_DOC = document._currentScript.ownerDocument;
+
+  function checkScalarSpan(
+      test, value, unit, expectedTextContent, opt_expectedColor) {
+    var span = tr.v.ui.createScalarSpan(new tr.v.ScalarNumeric(unit, value));
+    assert.strictEqual(span.$.content.textContent, expectedTextContent);
+    assert.strictEqual(span.$.content.style.color, opt_expectedColor || '');
+    test.addHTMLOutput(span);
+  }
+
+  test('instantiate', function() {
+    checkScalarSpan(this, 123.456789, Unit.byName.timeDurationInMs,
+        '123.457 ms');
+    checkScalarSpan(this, 0, Unit.byName.normalizedPercentage, '0.000%');
+    checkScalarSpan(this, -2560, Unit.byName.sizeInBytes, '-2.5 KiB');
+  });
+
+  test('instantiate_smallerIsBetter', function() {
+    checkScalarSpan(this, 45097156608, Unit.byName.sizeInBytes_smallerIsBetter,
+        '42.0 GiB');
+    checkScalarSpan(this, 0, Unit.byName.energyInJoules_smallerIsBetter,
+        '0.000 J');
+    checkScalarSpan(this, -0.25, Unit.byName.unitlessNumber_smallerIsBetter,
+        '-0.250');
+  });
+
+  test('instantiate_biggerIsBetter', function() {
+    checkScalarSpan(this, 0.07, Unit.byName.powerInWatts_smallerIsBetter,
+        '0.070 W');
+    checkScalarSpan(this, 0, Unit.byName.timeStampInMs_biggerIsBetter,
+        '0.000 ms');
+    checkScalarSpan(this, -0.00003,
+        Unit.byName.normalizedPercentage_biggerIsBetter, '-0.003%');
+  });
+
+  test('instantiate_delta', function() {
+    checkScalarSpan(this, 123.456789, Unit.byName.timeDurationInMsDelta,
+        '+123.457 ms');
+    checkScalarSpan(this, 0, Unit.byName.normalizedPercentageDelta,
+        '\u00B10.000%');
+    checkScalarSpan(this, -2560, Unit.byName.sizeInBytesDelta,
+        '-2.5 KiB');
+  });
+
+  test('instantiate_delta_smallerIsBetter', function() {
+    checkScalarSpan(this, 45097156608,
+        Unit.byName.sizeInBytesDelta_smallerIsBetter, '+42.0 GiB', 'red');
+    checkScalarSpan(this, 0, Unit.byName.energyInJoulesDelta_smallerIsBetter,
+        '\u00B10.000 J');
+    checkScalarSpan(this, -0.25,
+        Unit.byName.unitlessNumberDelta_smallerIsBetter, '-0.250', 'green');
+  });
+
+  test('instantiate_delta_biggerIsBetter', function() {
+    checkScalarSpan(this, 0.07, Unit.byName.powerInWattsDelta_biggerIsBetter,
+        '+0.070 W', 'green');
+    checkScalarSpan(this, 0, Unit.byName.timeStampInMsDelta_biggerIsBetter,
+        '\u00B10.000 ms');
+    checkScalarSpan(this, -0.00003,
+        Unit.byName.normalizedPercentageDelta_biggerIsBetter, '-0.003%', 'red');
+  });
+
+  test('createScalarSpan', function() {
+    // No config.
+    var span = tr.v.ui.createScalarSpan(
+        new ScalarNumeric(Unit.byName.powerInWatts, 3.14));
+    assert.strictEqual(span.ownerDocument, document);
+    assert.strictEqual(span.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.strictEqual(span.value, 3.14);
+    assert.strictEqual(span.unit, Unit.byName.powerInWatts);
+    assert.isUndefined(span.percentage);
+    assert.isUndefined(span.warning);
+    assert.isFalse(span.rightAlign);
+    this.addHTMLOutput(span);
+
+    // Custom owner document and right align.
+    var span = tr.v.ui.createScalarSpan(
+        new ScalarNumeric(Unit.byName.energyInJoules, 2.72),
+        { ownerDocument: THIS_DOC, rightAlign: true });
+    assert.strictEqual(span.ownerDocument, THIS_DOC);
+    assert.strictEqual(span.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.strictEqual(span.value, 2.72);
+    assert.strictEqual(span.unit, Unit.byName.energyInJoules);
+    assert.isUndefined(span.percentage);
+    assert.isUndefined(span.warning);
+    assert.isTrue(span.rightAlign);
+    this.addHTMLOutput(span);
+
+    // Unit and sparkline set via config.
+    var span = tr.v.ui.createScalarSpan(1.62,
+        { unit: Unit.byName.timeStampInMs, total: 3.24 });
+    assert.strictEqual(span.ownerDocument, document);
+    assert.strictEqual(span.tagName, 'TR-V-UI-SCALAR-SPAN');
+    assert.strictEqual(span.value, 1.62);
+    assert.strictEqual(span.unit, Unit.byName.timeStampInMs);
+    assert.strictEqual(span.percentage, 0.5);
+    assert.isUndefined(span.warning);
+    assert.isFalse(span.rightAlign);
+    this.addHTMLOutput(span);
+  });
+
+  test('instantiate_withWarning', function() {
+    var span = document.createElement('tr-v-ui-scalar-span');
+    span.value = 400000000;
+    span.unit = Unit.byName.sizeInBytes;
+    span.warning = 'There is a problem with this size';
+    this.addHTMLOutput(span);
+  });
+
+  test('instantiate_withPercentage', function() {
+    var span = document.createElement('tr-v-ui-scalar-span');
+    span.value = new ScalarNumeric(Unit.byName.unitlessNumber, 99);
+    span.percentage = 0.66;
+    this.addHTMLOutput(span);
+  });
+
+  test('instantiate_withRightAlign', function() {
+    var span = document.createElement('tr-v-ui-scalar-span');
+    span.value = new ScalarNumeric(Unit.byName.timeStampInMs, 5.777);
+    span.rightAlign = true;
+    this.addHTMLOutput(span);
+  });
+
+  test('warningAndNonWarningHaveSimilarHeights', function() {
+    var spanA = document.createElement('tr-v-ui-scalar-span');
+    spanA.setValueAndUnit(400, Unit.byName.timeDurationInMs);
+
+    var spanB = document.createElement('tr-v-ui-scalar-span');
+    spanB.setValueAndUnit(400, Unit.byName.timeDurationInMs);
+    spanB.warning = 'There is a problem with this time';
+
+    var overall = document.createElement('div');
+    overall.style.display = 'flex';
+    overall.appendChild(spanA);
+    spanB.style.marginLeft = '4px';
+    overall.appendChild(spanB);
+    this.addHTMLOutput(overall);
+  });
+
+  test('respectCurrentDisplayUnit', function() {
+    try {
+      Unit.currentTimeDisplayMode = tr.v.TimeDisplayModes.ns;
+
+      var span = document.createElement('tr-v-ui-scalar-span');
+      span.setValueAndUnit(73, Unit.byName.timeStampInMs);
+      this.addHTMLOutput(span);
+
+      assert.isTrue(span.$.content.textContent.indexOf('ns') > 0);
+      Unit.currentTimeDisplayMode = tr.v.TimeDisplayModes.ms;
+      assert.isTrue(span.$.content.textContent.indexOf('ms') > 0);
+    } finally {
+      Unit.reset();
+    }
+  });
+
+  test('displaySparkline', function() {
+    var div = document.createElement('div');
+    div.style.width = '100px';
+    this.addHTMLOutput(div);
+
+    function addAndCheckScalarSpan(percentage, expectedDisplay, expectedWidth) {
+      var span = tr.v.ui.createScalarSpan(new ScalarNumeric(
+          Unit.byName.timeDurationInMs, 10 * div.children.length));
+      if (percentage !== null)
+        span.percentage = percentage;
+
+      div.appendChild(span);
+
+      var computedStyle = getComputedStyle(span.$.sparkline);
+      assert.equal(computedStyle.display, expectedDisplay);
+      assert.equal(parseInt(computedStyle.width), expectedWidth);
+    }
+
+    addAndCheckScalarSpan(null /* no percentage set */, 'none', 0);
+    addAndCheckScalarSpan(undefined, 'none', 0);
+    addAndCheckScalarSpan(0, 'block', 1);
+    addAndCheckScalarSpan(0.05, 'block', 5);
+    addAndCheckScalarSpan(0.5, 'block', 50);
+    addAndCheckScalarSpan(0.95, 'block', 95);
+    addAndCheckScalarSpan(1, 'block', 100);
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/unit.html b/catapult/tracing/tracing/value/unit.html
new file mode 100644
index 0000000..c34aaf0
--- /dev/null
+++ b/catapult/tracing/tracing/value/unit.html
@@ -0,0 +1,264 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/event.html">
+<link rel="import" href="/tracing/base/event_target.html">
+<link rel="import" href="/tracing/value/time_display_mode.html">
+
+<script>
+'use strict';
+
+tr.exportTo('tr.v', function() {
+  var TimeDisplayModes = tr.v.TimeDisplayModes;
+
+  var BINARY_PREFIXES = ['', 'Ki', 'Mi', 'Gi', 'Ti'];
+
+  var PLUS_MINUS_SIGN = String.fromCharCode(177);
+
+  function max(a, b) {
+    if (a === undefined)
+      return b;
+    if (b === undefined)
+      return a;
+    return a.scale > b.scale ? a : b;
+  }
+
+  /** @enum */
+  var ImprovementDirection = {
+    DONT_CARE: 0,
+    BIGGER_IS_BETTER: 1,
+    SMALLER_IS_BETTER: 2
+  };
+
+  /** @constructor */
+  function Unit(unitName, jsonName, isDelta, improvementDirection,
+      formatValue) {
+    this.unitName = unitName;
+    this.jsonName = jsonName;
+    this.isDelta = isDelta;
+    this.improvementDirection = improvementDirection;
+    this.formatValue_ = formatValue;
+    this.correspondingDeltaUnit = undefined;
+  }
+
+  Unit.prototype = {
+    asJSON: function() {
+      return this.jsonName;
+    },
+
+    format: function(value) {
+      var formattedValue = this.formatValue_(value);
+      if (!this.isDelta || value < 0 /* already contains negative sign */)
+        return formattedValue;
+      if (value === 0)
+        return PLUS_MINUS_SIGN + formattedValue;
+      else
+        return '+' + formattedValue;
+    }
+  };
+
+  Unit.reset = function() {
+    Unit.currentTimeDisplayMode = TimeDisplayModes.ms;
+  };
+
+  Unit.timestampFromUs = function(us) {
+    return us / 1000;
+  };
+
+  Unit.maybeTimestampFromUs = function(us) {
+    return us === undefined ? undefined : us / 1000;
+  };
+
+  Object.defineProperty(Unit, 'currentTimeDisplayMode', {
+    get: function() {
+      return Unit.currentTimeDisplayMode_;
+    },
+    // Use tr-v-ui-preferred-display-unit element instead of directly setting.
+    set: function(value) {
+      if (Unit.currentTimeDisplayMode_ === value)
+        return;
+
+      Unit.currentTimeDisplayMode_ = value;
+      Unit.dispatchEvent(new tr.b.Event('display-mode-changed'));
+    }
+  });
+
+  Unit.didPreferredTimeDisplayUnitChange = function() {
+    var largest = undefined;
+    var els = tr.b.findDeepElementsMatching(document.body,
+        'tr-v-ui-preferred-display-unit');
+    els.forEach(function(el) {
+      largest = max(largest, el.preferredTimeDisplayMode);
+    });
+
+    Unit.currentDisplayUnit = largest === undefined ?
+        TimeDisplayModes.ms : largest;
+  };
+
+  Unit.byName = {};
+  Unit.byJSONName = {};
+
+  Unit.fromJSON = function(object) {
+    var u = Unit.byJSONName[object];
+    if (u) {
+      return u;
+    }
+    throw new Error('Unrecognized unit');
+  };
+
+  /**
+   * Define all combinations of a unit with isDelta and improvementDirection
+   * flags. For example, the following code:
+   *
+   *   Unit.define({
+   *     baseUnitName: 'powerInWatts'
+   *     baseJsonName: 'W'
+   *     formatValue: function(value) {
+   *       // Code for formatting the unit (independent of isDelta and
+   *       // improvementDirection flags).
+   *      }
+   *   });
+   *
+   * generates the following six units (JSON names shown in parentheses):
+   *
+   *   Unit.byName.powerInWatts (W)
+   *   Unit.byName.powerInWatts_smallerIsBetter (W_smallerIsBetter)
+   *   Unit.byName.powerInWatts_biggerIsBetter (W_biggerIsBetter)
+   *   Unit.byName.powerInWattsDelta (WDelta)
+   *   Unit.byName.powerInWattsDelta_smallerIsBetter (WDelta_smallerIsBetter)
+   *   Unit.byName.powerInWattsDelta_biggerIsBetter (WDelta_biggerIsBetter)
+   *
+   * with the appropriate flags and formatting code (including +/- prefixes
+   * for deltas).
+   */
+  Unit.define = function(params) {
+    tr.b.iterItems(ImprovementDirection, function(_, improvementDirection) {
+      var regularUnit =
+          Unit.defineUnitVariant_(params, false, improvementDirection);
+      var deltaUnit =
+          Unit.defineUnitVariant_(params, true, improvementDirection);
+
+      regularUnit.correspondingDeltaUnit = deltaUnit;
+      deltaUnit.correspondingDeltaUnit = deltaUnit;
+    });
+  };
+
+  Unit.defineUnitVariant_ = function(params, isDelta, improvementDirection) {
+    var nameSuffix = isDelta ? 'Delta' : '';
+    switch (improvementDirection) {
+      case ImprovementDirection.DONT_CARE:
+        break;
+      case ImprovementDirection.BIGGER_IS_BETTER:
+        nameSuffix += '_biggerIsBetter';
+        break;
+      case ImprovementDirection.SMALLER_IS_BETTER:
+        nameSuffix += '_smallerIsBetter';
+        break;
+      default:
+        throw new Error(
+            'Unknown improvement direction: ' + improvementDirection);
+    }
+
+    var unitName = params.baseUnitName + nameSuffix;
+    var jsonName = params.baseJsonName + nameSuffix;
+    if (Unit.byName[unitName] !== undefined)
+      throw new Error('Unit \'' + unitName + '\' already exists');
+    if (Unit.byJSONName[jsonName] !== undefined)
+      throw new Error('JSON unit \'' + jsonName + '\' alread exists');
+
+    var unit = new Unit(
+        unitName, jsonName, isDelta, improvementDirection, params.formatValue);
+    Unit.byName[unitName] = unit;
+    Unit.byJSONName[jsonName] = unit;
+
+    return unit;
+  };
+
+  tr.b.EventTarget.decorate(Unit);
+  Unit.reset();
+
+  // Known display units follow.
+  //////////////////////////////////////////////////////////////////////////////
+
+  Unit.define({
+    baseUnitName: 'timeDurationInMs',
+    baseJsonName: 'ms',
+    formatValue: function(value) {
+      return Unit.currentTimeDisplayMode_.format(value);
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'timeStampInMs',
+    baseJsonName: 'tsMs',
+    formatValue: function(value) {
+      return Unit.currentTimeDisplayMode_.format(value);
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'normalizedPercentage',
+    baseJsonName: 'n%',
+    formatValue: function(value) {
+      var tmp = new Number(Math.round(value * 100000) / 1000);
+      return tmp.toLocaleString(undefined, { minimumFractionDigits: 3 }) + '%';
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'sizeInBytes',
+    baseJsonName: 'sizeInBytes',
+    formatValue: function(value) {
+      var signPrefix = '';
+      if (value < 0) {
+        signPrefix = '-';
+        value = -value;
+      }
+
+      var i = 0;
+      while (value >= 1024 && i < BINARY_PREFIXES.length - 1) {
+        value /= 1024;
+        i++;
+      }
+
+      return signPrefix + value.toFixed(1) + ' ' + BINARY_PREFIXES[i] + 'B';
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'energyInJoules',
+    baseJsonName: 'J',
+    formatValue: function(value) {
+      return value.toLocaleString(
+          undefined, { minimumFractionDigits: 3 }) + ' J';
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'powerInWatts',
+    baseJsonName: 'W',
+    formatValue: function(value) {
+      return value.toLocaleString(
+          undefined, { minimumFractionDigits: 3 }) + ' W';
+    }
+  });
+
+  Unit.define({
+    baseUnitName: 'unitlessNumber',
+    baseJsonName: 'unitless',
+    formatValue: function(value) {
+      return value.toLocaleString(
+          undefined, { minimumFractionDigits: 3, maximumFractionDigits: 3 });
+    }
+  });
+
+  return {
+    ImprovementDirection: ImprovementDirection,
+    Unit: Unit
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/unit_test.html b/catapult/tracing/tracing/value/unit_test.html
new file mode 100644
index 0000000..ad595ec
--- /dev/null
+++ b/catapult/tracing/tracing/value/unit_test.html
@@ -0,0 +1,174 @@
+<!DOCTYPE html>
+<!--
+Copyright 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/value/time_display_mode.html">
+<link rel="import" href="/tracing/value/unit.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  var Unit = tr.v.Unit;
+  var ImprovementDirection = tr.v.ImprovementDirection;
+
+  test('Unit.display-mode-changed', function() {
+    var Unit = tr.v.Unit;
+    var TimeDisplayModes = tr.v.TimeDisplayModes;
+
+    var listenerWasCalled = false;
+    function listener(e) {
+      listenerWasCalled = true;
+    }
+
+    try {
+      Unit.currentTimeDisplayMode = TimeDisplayModes.ms;
+      Unit.addEventListener('display-mode-changed', listener);
+
+      listenerWasCalled = false;
+      Unit.currentTimeDisplayMode = TimeDisplayModes.ns;
+      assert.isTrue(listenerWasCalled);
+      assert.equal(Unit.currentTimeDisplayMode, TimeDisplayModes.ns);
+    } finally {
+      Unit.removeEventListener('display-mode-changed', listener);
+      Unit.reset();
+    }
+  });
+
+  function checkTimeUnit(unit) {
+    try {
+      // Use milliseconds to display time (default behavior).
+      Unit.currentTimeDisplayMode = tr.v.TimeDisplayModes.ms;
+
+      assert.strictEqual(unit.format(0), '0.000 ms');
+      assert.strictEqual(unit.format(0.02), '0.020 ms');
+      assert.strictEqual(unit.format(0.001), '0.001 ms');
+      assert.strictEqual(unit.format(0.0005), '0.001 ms');
+      assert.strictEqual(unit.format(0.00049), '0.000 ms');
+      assert.strictEqual(unit.format(999.999), '999.999 ms');
+      assert.strictEqual(unit.format(1000.001), '1,000.001 ms');
+      assert.strictEqual(unit.format(123456789), '123,456,789.000 ms');
+      assert.strictEqual(unit.format(-0.00051), '-0.001 ms');
+      assert.strictEqual(unit.format(-123456789), '-123,456,789.000 ms');
+
+      // Change the unit to nanoseconds.
+      Unit.currentTimeDisplayMode = tr.v.TimeDisplayModes.ns;
+
+      assert.strictEqual(unit.format(0), '0 ns');
+      assert.strictEqual(unit.format(1), '1,000,000 ns');
+      assert.strictEqual(unit.format(0.000042), '42 ns');
+      assert.strictEqual(unit.format(0.000001), '1 ns');
+      assert.strictEqual(unit.format(0.0000005), '1 ns');
+      assert.strictEqual(unit.format(0.00000049), '0 ns');
+      assert.strictEqual(unit.format(123.456), '123,456,000 ns');
+      assert.strictEqual(unit.format(-0.07), '-70,000 ns');
+    } finally {
+      Unit.reset();
+    }
+  }
+
+  test('timeStampInMs', function() {
+    assert.strictEqual(Unit.byName.timeStampInMs.unitName, 'timeStampInMs');
+    assert.strictEqual(Unit.byName.timeStampInMs.asJSON(), 'tsMs');
+    checkTimeUnit(Unit.byName.timeStampInMs);
+  });
+
+  test('timeDurationInMs', function() {
+    assert.strictEqual(Unit.byName.timeDurationInMs.unitName,
+        'timeDurationInMs');
+    assert.strictEqual(Unit.byName.timeDurationInMs.asJSON(), 'ms');
+    checkTimeUnit(Unit.byName.timeDurationInMs);
+  });
+
+  test('sizeInBytes', function() {
+    var SOURCE_VALUES = [0, 1, 1536, 424.5 * 1024 * 1024,
+        1025 * 1024 * 1024 * 1024 * 1024, -2.5 * 1024 * 1024];
+    var EXPECTED_REGULAR_FORMATTED_VALUES = ['0.0 B', '1.0 B', '1.5 KiB',
+        '424.5 MiB', '1025.0 TiB', '-2.5 MiB'];
+    var EXPECTED_DELTA_FORMATTED_VALUES = ['\u00B10.0 B', '+1.0 B', '+1.5 KiB',
+        '+424.5 MiB', '+1025.0 TiB', '-2.5 MiB'];
+
+    function checkSizeUnit(unit, expectation) {
+      assert.strictEqual(unit.unitName, expectation.unitName);
+      assert.strictEqual(unit.asJSON(), expectation.asJSON);
+      assert.strictEqual(unit.isDelta, expectation.isDelta);
+      assert.strictEqual(unit.correspondingDeltaUnit,
+          expectation.correspondingDeltaUnit);
+      assert.strictEqual(unit.improvementDirection,
+          expectation.improvementDirection);
+      assert.deepEqual(SOURCE_VALUES.map(unit.format.bind(unit)),
+          expectation.formattedValues);
+    }
+
+    // Regular (non-delta).
+    checkSizeUnit(Unit.byName.sizeInBytes, {
+      unitName: 'sizeInBytes',
+      asJSON: 'sizeInBytes',
+      isDelta: false,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta,
+      improvementDirection: ImprovementDirection.DONT_CARE,
+      formattedValues: EXPECTED_REGULAR_FORMATTED_VALUES
+    });
+    checkSizeUnit(Unit.byName.sizeInBytes_smallerIsBetter, {
+      unitName: 'sizeInBytes_smallerIsBetter',
+      asJSON: 'sizeInBytes_smallerIsBetter',
+      isDelta: false,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta_smallerIsBetter,
+      improvementDirection: ImprovementDirection.SMALLER_IS_BETTER,
+      formattedValues: EXPECTED_REGULAR_FORMATTED_VALUES
+    });
+    checkSizeUnit(Unit.byName.sizeInBytes_biggerIsBetter, {
+      unitName: 'sizeInBytes_biggerIsBetter',
+      asJSON: 'sizeInBytes_biggerIsBetter',
+      isDelta: false,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta_biggerIsBetter,
+      improvementDirection: ImprovementDirection.BIGGER_IS_BETTER,
+      formattedValues: EXPECTED_REGULAR_FORMATTED_VALUES
+    });
+
+    // Delta.
+    checkSizeUnit(Unit.byName.sizeInBytesDelta, {
+      unitName: 'sizeInBytesDelta',
+      asJSON: 'sizeInBytesDelta',
+      isDelta: true,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta,
+      improvementDirection: ImprovementDirection.DONT_CARE,
+      formattedValues: EXPECTED_DELTA_FORMATTED_VALUES
+    });
+    checkSizeUnit(Unit.byName.sizeInBytesDelta_smallerIsBetter, {
+      unitName: 'sizeInBytesDelta_smallerIsBetter',
+      asJSON: 'sizeInBytesDelta_smallerIsBetter',
+      isDelta: true,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta_smallerIsBetter,
+      improvementDirection: ImprovementDirection.SMALLER_IS_BETTER,
+      formattedValues: EXPECTED_DELTA_FORMATTED_VALUES
+    });
+    checkSizeUnit(Unit.byName.sizeInBytesDelta_biggerIsBetter, {
+      unitName: 'sizeInBytesDelta_biggerIsBetter',
+      asJSON: 'sizeInBytesDelta_biggerIsBetter',
+      isDelta: true,
+      correspondingDeltaUnit: Unit.byName.sizeInBytesDelta_biggerIsBetter,
+      improvementDirection: ImprovementDirection.BIGGER_IS_BETTER,
+      formattedValues: EXPECTED_DELTA_FORMATTED_VALUES
+    });
+  });
+
+  test('energyInJoules', function() {
+    assert.equal(Unit.byName.energyInJoules.format(1000), '1,000.000 J');
+    assert.equal(Unit.byName.energyInJoules.format(1), '1.000 J');
+    assert.equal(Unit.byName.energyInJoules.format(.005), '0.005 J');
+    assert.equal(Unit.byName.energyInJoules.format(.0005), '0.001 J');
+    assert.equal(Unit.byName.energyInJoules.format(.0004), '0.000 J');
+  });
+
+  test('powerInWatts', function() {
+    assert.equal(Unit.byName.powerInWatts.format(1000), '1,000.000 W');
+    assert.equal(Unit.byName.powerInWatts.format(1), '1.000 W');
+    assert.equal(Unit.byName.powerInWatts.format(.001), '0.001 W');
+    assert.equal(Unit.byName.powerInWatts.format(.001005), '0.001 W');
+  });
+});
+</script>
diff --git a/catapult/tracing/tracing/value/value.html b/catapult/tracing/tracing/value/value.html
new file mode 100644
index 0000000..8d1786c
--- /dev/null
+++ b/catapult/tracing/tracing/value/value.html
@@ -0,0 +1,223 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2015 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<link rel="import" href="/tracing/base/guid.html">
+<link rel="import" href="/tracing/base/utils.html">
+<script>
+'use strict';
+
+tr.exportTo('tr.v', function() {
+  function Value(canonicalUrl, name, opt_options, opt_groupingKeys,
+                 opt_diagnostics) {
+    if (typeof(name) !== 'string')
+      throw new Error('Expected value_name grouping key to be provided');
+
+    this.groupingKeys = opt_groupingKeys || {};
+    this.groupingKeys.name = name;
+
+    this.diagnostics = opt_diagnostics || {};
+
+    // May be undefined
+    this.diagnostics.canonical_url = canonicalUrl;
+
+    var options = opt_options || {};
+    this.description = options.description;
+    this.important = options.important !== undefined ?
+        options.important : false;
+  }
+
+  Value.fromDict = function(d) {
+    if (d.type === 'numeric')
+      return NumericValue.fromDict(d);
+
+    if (d.type === 'dict')
+      return DictValue.fromDict(d);
+
+    if (d.type == 'failure')
+      return FailureValue.fromDict(d);
+
+    if (d.type === 'skip')
+      return SkipValue.fromDict(d);
+
+    throw new Error('Not implemented');
+  };
+
+  Value.prototype = {
+    get name() {
+      return this.groupingKeys.name;
+    },
+
+    get canonicalUrl() {
+      return this.diagnostics.canonical_url;
+    },
+
+    addGroupingKey: function(keyName, key) {
+      if (this.groupingKeys.hasOwnProperty(keyName))
+          throw new Error('Tried to redefine grouping key ' + keyName);
+      this.groupingKeys[keyName] = key;
+    },
+
+    asDict: function() {
+      return this.asJSON();
+    },
+
+    asJSON: function() {
+      var d = {
+        grouping_keys: this.groupingKeys,
+        description: this.description,
+        important: this.important,
+        diagnostics: this.diagnostics
+      };
+
+      this._asDictInto(d);
+      if (d.type === undefined)
+        throw new Error('_asDictInto must set type field');
+      return d;
+    },
+
+    _asDictInto: function(d) {
+      throw new Error('Not implemented');
+    }
+  };
+
+  function NumericValue(canonicalUrl, name, numeric, opt_options,
+                        opt_groupingKeys, opt_diagnostics) {
+    if (!(numeric instanceof tr.v.NumericBase))
+      throw new Error('Expected numeric to be instance of tr.v.NumericBase');
+
+    Value.call(this, canonicalUrl, name, opt_options, opt_groupingKeys,
+               opt_diagnostics);
+    this.numeric = numeric;
+  }
+
+  NumericValue.fromDict = function(d) {
+    if (d.numeric === undefined)
+      throw new Error('Expected numeric to be provided');
+    var numeric = tr.v.NumericBase.fromDict(d.numeric);
+    return new NumericValue(d.diagnostics.canonical_url, d.grouping_keys.name,
+                            numeric, d, d.grouping_keys, d.diagnostics);
+  };
+
+  NumericValue.prototype = {
+    __proto__: Value.prototype,
+
+    _asDictInto: function(d) {
+      d.type = 'numeric';
+      d.numeric = this.numeric.asDict();
+    }
+  };
+
+  function DictValue(canonicalUrl, name, value, opt_options, opt_groupingKeys,
+                     opt_diagnostics) {
+    Value.call(this, canonicalUrl, name, opt_options, opt_groupingKeys,
+               opt_diagnostics);
+    this.value = value;
+  }
+
+  DictValue.fromDict = function(d) {
+    if (d.units !== undefined)
+      throw new Error('Expected units to be undefined');
+    if (d.value === undefined)
+      throw new Error('Expected value to be provided');
+    return new DictValue(d.diagnostics.canonical_url, d.grouping_keys.name,
+                         d.value, d, d.groupingKeys, d.diagnostics);
+  };
+
+  DictValue.prototype = {
+    __proto__: Value.prototype,
+
+    _asDictInto: function(d) {
+      d.type = 'dict';
+      d.value = this.value;
+    }
+  };
+
+
+  function FailureValue(canonicalUrl, name, opt_options, opt_groupingKeys,
+                        opt_diagnostics) {
+    var options = opt_options || {};
+
+    var stack;
+    if (options.stack === undefined) {
+      if (options.stack_str === undefined) {
+        throw new Error('Expected stack_str or stack to be provided');
+      } else {
+        stack = options.stack_str;
+      }
+    } else {
+      stack = options.stack;
+    }
+
+    if (typeof stack !== 'string')
+      throw new Error('stack must be provided as a string');
+
+    if (canonicalUrl === undefined) {
+      throw new Error('FailureValue must provide canonicalUrl');
+    }
+
+    Value.call(this, canonicalUrl, name, options, opt_groupingKeys,
+               opt_diagnostics);
+    this.stack = stack;
+  }
+
+  FailureValue.fromError = function(canonicalUrl, e) {
+    var ex = tr.b.normalizeException(e);
+    return new FailureValue(canonicalUrl, ex.typeName,
+                            {description: ex.message,
+                             stack: ex.stack});
+
+  };
+
+  FailureValue.fromDict = function(d) {
+    if (d.units !== undefined)
+      throw new Error('Expected units to be undefined');
+    if (d.stack_str === undefined)
+      throw new Error('Expected stack_str to be provided');
+    return new FailureValue(d.diagnostics.canonical_url, d.grouping_keys.name,
+                            d, d.grouping_keys, d.diagnostics);
+  };
+
+  FailureValue.prototype = {
+    __proto__: Value.prototype,
+
+    _asDictInto: function(d) {
+      d.type = 'failure';
+      d.stack_str = this.stack;
+    }
+  };
+
+
+  function SkipValue(canonicalUrl, name, opt_options, opt_groupingKeys,
+                     opt_diagnostics) {
+    Value.call(this, canonicalUrl, name, opt_options, opt_groupingKeys,
+               opt_diagnostics);
+  }
+
+  SkipValue.fromDict = function(d) {
+    if (d.units !== undefined)
+      throw new Error('Expected units to be undefined');
+    return new SkipValue(d.diagnostics.canonical_url, d.grouping_keys.name,
+                         d, d.grouping_keys, d.diagnostics);
+  };
+
+  SkipValue.prototype = {
+    __proto__: Value.prototype,
+
+    _asDictInto: function(d) {
+      d.type = 'skip';
+    }
+  };
+
+
+  return {
+    Value: Value,
+    NumericValue: NumericValue,
+    DictValue: DictValue,
+    FailureValue: FailureValue,
+    SkipValue: SkipValue
+  };
+});
+</script>
diff --git a/catapult/tracing/tracing/value/value_test.html b/catapult/tracing/tracing/value/value_test.html
new file mode 100644
index 0000000..595400a
--- /dev/null
+++ b/catapult/tracing/tracing/value/value_test.html
@@ -0,0 +1,86 @@
+<!DOCTYPE html>
+<!--
+Copyright (c) 2013 The Chromium Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+
+<link rel="import" href="/tracing/base/utils.html">
+<link rel="import" href="/tracing/value/numeric.html">
+<link rel="import" href="/tracing/value/unit.html">
+<link rel="import" href="/tracing/value/value.html">
+
+<script>
+'use strict';
+
+tr.b.unittest.testSuite(function() {
+  test('numericValueBasic', function() {
+    var canonicalUrl = 'my_test.json';
+    var n = new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes, 314);
+    var v = new tr.v.NumericValue(canonicalUrl, 'MyNumeric', n);
+    var d = v.asDict();
+
+    var v2 = tr.v.Value.fromDict(d);
+    assert.instanceOf(v2, tr.v.NumericValue);
+    assert.equal(v.name, v2.name);
+    assert.equal(v.numeric.value, v2.numeric.value);
+    assert.equal(v.canonicalUrl, v2.canonicalUrl);
+  });
+
+  test('dictValueBasic', function() {
+    var canonicalUrl = 'my_test.json';
+    var v = new tr.v.DictValue(canonicalUrl, 'MyDict', {my_key: 'my_value'});
+    var d = v.asDict();
+
+    var v2 = tr.v.Value.fromDict(d);
+    assert.instanceOf(v2, tr.v.DictValue);
+    assert.equal(v.groupingKeys.value_name, v2.groupingKeys.value_name);
+    assert.deepEqual(v.value, v2.value);
+  });
+
+  test('failureValueBasic', function() {
+    var canonicalUrl = 'my_test.json';
+    var v = new tr.v.FailureValue(
+      canonicalUrl, 'MyFailure',
+      {description: 'Description', stack: tr.b.stackTraceAsString()});
+    var d = v.asDict();
+
+    var v2 = tr.v.Value.fromDict(d);
+    assert.instanceOf(v2, tr.v.FailureValue);
+    assert.equal(v.name, v2.name);
+    assert.equal(v.description, v2.description);
+    assert.equal(v.stack, v2.stack);
+    assert.equal(v.diagnostics.canonical_url, v2.diagnostics.canonical_url);
+  });
+
+  test('skipValueBasic', function() {
+    var canonicalUrl = 'my_test.json';
+    var v = new tr.v.SkipValue(canonicalUrl, 'MySkip',
+                               {description: 'WhySkipped'});
+    var d = v.asDict();
+
+    var v2 = tr.v.Value.fromDict(d);
+    assert.instanceOf(v2, tr.v.SkipValue);
+    assert.equal(v.groupingKeys.value_name, v2.groupingKeys.value_name);
+    assert.equal(v.description, v2.description);
+  });
+
+  test('addGroupingKey', function() {
+    var canonicalUrl = 'my_test.json';
+    var n = new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes, 314);
+    var v = new tr.v.NumericValue(canonicalUrl, 'MyScalar', n);
+
+    v.addGroupingKey('foo', 'bar');
+    assert.equal(v.groupingKeys.foo, 'bar');
+  });
+
+  test('addDuplicateGroupingKeyThrows', function() {
+    var canonicalUrl = 'my_test.json';
+    var n = new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes, 314);
+    var v = new tr.v.NumericValue(canonicalUrl, 'MyScalar', n);
+
+    assert.throws(function() { v.addGroupingKey('name', 'bar'); });
+  });
+});
+
+</script>
diff --git a/catapult/tracing/tracing/value/value_unittest.py b/catapult/tracing/tracing/value/value_unittest.py
new file mode 100644
index 0000000..88ffbc3
--- /dev/null
+++ b/catapult/tracing/tracing/value/value_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import unittest
+
+from tracing import value as value_module
+
+
+class ValueTests(unittest.TestCase):
+
+  def testScalar(self):
+    d = {
+        'canonical_url': '/a.json',
+        'type': 'scalar',
+        'name': 'MyScalarValue',
+        'important': False,
+        'value': {'a': 1, 'b': 'b'}
+    }
+    v = value_module.Value.FromDict(d)
+    self.assertTrue(isinstance(v, value_module.ScalarValue))
+    d2 = v.AsDict()
+
+    self.assertEquals(d, d2)
+
+  def testDict(self):
+    d = {
+        'canonical_url': '/a.json',
+        'type': 'dict',
+        'name': 'MyDictValue',
+        'important': False,
+        'value': {'a': 1, 'b': 'b'}
+    }
+    v = value_module.Value.FromDict(d)
+    self.assertTrue(isinstance(v, value_module.DictValue))
+    d2 = v.AsDict()
+
+    self.assertEquals(d, d2)
+
+  def testFailure(self):
+    d = {
+        'canonical_url': '/a.json',
+        'type': 'failure',
+        'name': 'Error',
+        'important': False,
+        'description': 'Some error message',
+        'stack_str': 'Some stack string'
+    }
+    v = value_module.Value.FromDict(d)
+    self.assertTrue(isinstance(v, value_module.FailureValue))
+    d2 = v.AsDict()
+
+    self.assertEquals(d, d2)
diff --git a/catapult/tracing/tracing_build/__init__.py b/catapult/tracing/tracing_build/__init__.py
index 528e805..22060c5 100644
--- a/catapult/tracing/tracing_build/__init__.py
+++ b/catapult/tracing/tracing_build/__init__.py
@@ -2,8 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
-import os
-import sys
 
 import tracing_project
 tracing_project.UpdateSysPathIfNeeded()
diff --git a/catapult/tracing/tracing_build/benchmarks.py b/catapult/tracing/tracing_build/benchmarks.py
index 9d91b99..11ee526 100644
--- a/catapult/tracing/tracing_build/benchmarks.py
+++ b/catapult/tracing/tracing_build/benchmarks.py
@@ -8,11 +8,7 @@
 import StringIO
 import inspect
 import sys
-import os
 
-import tracing_project
-from py_vulcanize import generate
-from py_vulcanize import project
 
 
 class Bench(object):
diff --git a/catapult/tracing/tracing_build/check_common.py b/catapult/tracing/tracing_build/check_common.py
index 7c06e3c..b49513f 100644
--- a/catapult/tracing/tracing_build/check_common.py
+++ b/catapult/tracing/tracing_build/check_common.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import sys
 import os
 
 import tracing_project
@@ -14,13 +13,13 @@
 
 
 def GetFileGroupFromFileName(filename):
-   extension = os.path.splitext(filename)[1]
-   return {
-       '.css': 'tracing_css_files',
-       '.html': 'tracing_js_html_files',
-       '.js': 'tracing_js_html_files',
-       '.png': 'tracing_img_files'
-   }[extension]
+  extension = os.path.splitext(filename)[1]
+  return {
+      '.css': 'tracing_css_files',
+      '.html': 'tracing_js_html_files',
+      '.js': 'tracing_js_html_files',
+      '.png': 'tracing_img_files'
+  }[extension]
 
 
 def CheckListedFilesSorted(src_file, group_name, listed_files):
diff --git a/catapult/tracing/tracing_build/check_common_unittest.py b/catapult/tracing/tracing_build/check_common_unittest.py
index defa221..b3af7b6 100644
--- a/catapult/tracing/tracing_build/check_common_unittest.py
+++ b/catapult/tracing/tracing_build/check_common_unittest.py
@@ -7,9 +7,9 @@
 from tracing_build import check_common
 
 
-class CheckCommonUnittTest(unittest.TestCase):
+class CheckCommonUnitTest(unittest.TestCase):
 
-  def test_filesSortedTest(self):
+  def testFilesSorted(self):
     error = check_common.CheckListedFilesSorted('foo.gyp', 'tracing_pdf_files',
                                                 ['/dir/file.pdf',
                                                  '/dir/another_file.pdf'])
diff --git a/catapult/tracing/tracing_build/check_gypi.py b/catapult/tracing/tracing_build/check_gypi.py
index 810b4c8..fb59626 100644
--- a/catapult/tracing/tracing_build/check_gypi.py
+++ b/catapult/tracing/tracing_build/check_gypi.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import sys
 import os
 
 from tracing_build import check_common
@@ -16,7 +15,7 @@
   gyp = f.read()
   f.close()
 
-  data = eval(gyp)
+  data = eval(gyp)  # pylint: disable=eval-used
   listed_files = []
   error = ''
   for group in check_common.FILE_GROUPS:
diff --git a/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py b/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py
index 3615465..dd31b7e 100644
--- a/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py
+++ b/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py
@@ -2,18 +2,16 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import os
 import unittest
 import tempfile
 import shutil
-import sys
 
 from tracing_build import generate_about_tracing_contents
 
 
 class GenerateAboutTracingContentsUnittTest(unittest.TestCase):
 
-  def test_smokeTest(self):
+  def testSmoke(self):
     try:
       tmpdir = tempfile.mkdtemp()
       res = generate_about_tracing_contents.Main(['--outdir', tmpdir])
diff --git a/catapult/tracing/tracing_build/run_vinn_tests.py b/catapult/tracing/tracing_build/run_vinn_tests.py
index 752d720..f8c79e0 100644
--- a/catapult/tracing/tracing_build/run_vinn_tests.py
+++ b/catapult/tracing/tracing_build/run_vinn_tests.py
@@ -18,26 +18,30 @@
 
 def RunTests():
   project = tracing_project.TracingProject()
-  d8_test_module_filenames = ['/' + _RelPathToUnixPath(x)
-                              for x in project.FindAllD8TestModuleRelPaths()]
-  d8_test_module_filenames.sort()
+  headless_test_module_filenames = [
+      '/' + _RelPathToUnixPath(x)
+      for x in project.FindAllD8TestModuleRelPaths()]
+  headless_test_module_filenames.sort()
 
   cmd = """
-  loadHTML('/tracing/base/d8_tests.html');
+  HTMLImportsLoader.loadHTML('/tracing/base/headless_tests.html');
+  tr.b.unittest.loadAndRunTests(sys.argv.slice(1));
   """
   res = vinn.RunJsString(
-    cmd, source_paths=list(project.source_paths),
-    js_args=d8_test_module_filenames, stdout=sys.stdout, stdin=sys.stdin)
+      cmd, source_paths=list(project.source_paths),
+      js_args=headless_test_module_filenames,
+      stdout=sys.stdout, stdin=sys.stdin)
   return res.returncode
 
+
 def Main(argv):
   parser = argparse.ArgumentParser(
       description='Run d8 tests.')
   parser.add_argument(
-    '--no-install-hooks', dest='install_hooks', action='store_false')
+      '--no-install-hooks', dest='install_hooks', action='store_false')
   parser.set_defaults(install_hooks=True)
   args = parser.parse_args(argv[1:])
   if args.install_hooks:
     install.InstallHooks()
 
-  sys.exit(RunTests())
\ No newline at end of file
+  sys.exit(RunTests())
diff --git a/catapult/tracing/tracing_build/trace2html.py b/catapult/tracing/tracing_build/trace2html.py
index 025f5a4..6e6093f 100644
--- a/catapult/tracing/tracing_build/trace2html.py
+++ b/catapult/tracing/tracing_build/trace2html.py
@@ -9,7 +9,6 @@
 import json
 import os
 import StringIO
-import sys
 
 import tracing_project
 
diff --git a/catapult/tracing/tracing_build/trace2html_unittest.py b/catapult/tracing/tracing_build/trace2html_unittest.py
index d39a137..4489ebc 100644
--- a/catapult/tracing/tracing_build/trace2html_unittest.py
+++ b/catapult/tracing/tracing_build/trace2html_unittest.py
@@ -12,7 +12,7 @@
 
 class Trace2HTMLTests(unittest.TestCase):
 
-  def test_writeHTMLForTracesToFile(self):
+  def testWriteHTMLForTracesToFile(self):
     # Note: We can't use "with" when working with tempfile.NamedTemporaryFile as
     # that does not work on Windows. We use the longer, more clunky version
     # instead. See https://bugs.python.org/issue14243 for detials.
diff --git a/catapult/tracing/tracing_build/tracing_dev_server_config.py b/catapult/tracing/tracing_build/tracing_dev_server_config.py
index 6d71792..fcd0a1a 100644
--- a/catapult/tracing/tracing_build/tracing_dev_server_config.py
+++ b/catapult/tracing/tracing_build/tracing_dev_server_config.py
@@ -2,25 +2,22 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import argparse
 import json
 import os
-import sys
 
 import tracing_project
-from hooks import install
 
-from paste import httpserver
-from paste import fileapp
 
 import webapp2
-from webapp2 import Route, RedirectHandler
+from webapp2 import Route
 
 
 def _RelPathToUnixPath(p):
   return p.replace(os.sep, '/')
 
+
 class TestListHandler(webapp2.RequestHandler):
+
   def get(self, *args, **kwargs):  # pylint: disable=unused-argument
     project = tracing_project.TracingProject()
     test_relpaths = ['/' + _RelPathToUnixPath(x)
@@ -33,6 +30,7 @@
 
 
 class TracingDevServerConfig(object):
+
   def __init__(self):
     self.project = tracing_project.TracingProject()
 
@@ -54,6 +52,6 @@
 
   def GetTestDataPaths(self, args):  # pylint: disable=unused-argument
     return [
-      ('/tracing/test_data/', os.path.expanduser(args.data_dir)),
-      ('/tracing/skp_data/', os.path.expanduser(args.skp_data_dir)),
+        ('/tracing/test_data/', os.path.expanduser(args.data_dir)),
+        ('/tracing/skp_data/', os.path.expanduser(args.skp_data_dir)),
     ]
diff --git a/catapult/tracing/tracing_build/update_gypi.py b/catapult/tracing/tracing_build/update_gypi.py
index ca5e275..94ee47e 100644
--- a/catapult/tracing/tracing_build/update_gypi.py
+++ b/catapult/tracing/tracing_build/update_gypi.py
@@ -5,7 +5,6 @@
 import collections
 import os
 import re
-import sys
 
 import tracing_project
 from tracing_build import check_common
@@ -16,9 +15,9 @@
   def __init__(self, data, token_id=None):
     self.data = data
     if token_id:
-      self.id = token_id
+      self.token_id = token_id
     else:
-      self.id = 'plain'
+      self.token_id = 'plain'
 
 
 class BuildFile(object):
@@ -48,10 +47,10 @@
 
   def Update(self, files_by_group):
     for token in self._tokens:
-      if token.id in files_by_group:
+      if token.token_id in files_by_group:
         token.data = self._GetReplacementListAsString(
             token.data,
-            files_by_group[token.id])
+            files_by_group[token.token_id])
 
   def Write(self, f):
     for token in self._tokens:
@@ -85,7 +84,7 @@
     # In the match,
     # group 1 is : 'file_group_name'
     # group 2 is : """  'path/to/one/file.extension',\n  'another/file.ex',\n"""
-    regexp_str = "'(%s)': \[\n(.+?) +\],?\n" % "|".join(self._file_groups)
+    regexp_str = r"'(%s)': \[\n(.+?) +\],?\n" % "|".join(self._file_groups)
     return re.compile(regexp_str, re.MULTILINE | re.DOTALL)
 
   def _GetReplacementListAsString(self, existing_list_as_string, filelist):
@@ -95,10 +94,10 @@
                     for filename in filelist])
 
 
-def _GroupFiles(fileNameToGroupNameFunc, filenames):
+def _GroupFiles(file_name_to_group_name_func, filenames):
   file_groups = collections.defaultdict(lambda: [])
   for filename in filenames:
-    file_groups[fileNameToGroupNameFunc(filename)].append(filename)
+    file_groups[file_name_to_group_name_func(filename)].append(filename)
   for group in file_groups:
     file_groups[group].sort()
   return file_groups
diff --git a/catapult/tracing/tracing_build/update_gypi_unittest.py b/catapult/tracing/tracing_build/update_gypi_unittest.py
index 6649a56..9187145 100644
--- a/catapult/tracing/tracing_build/update_gypi_unittest.py
+++ b/catapult/tracing/tracing_build/update_gypi_unittest.py
@@ -12,22 +12,22 @@
   def setUp(self):
     self.file_groups = ['group1', 'group2']
 
-  def test_GypiTokenizer(self):
+  def testGypiTokenizer(self):
     content = ("useless data\n'group1': [\n    <file list goes here>\n"
                "    ]\nNote the four spaces before the ] above")
     gypi_files = GypiFile(content, self.file_groups)
     self.assertEqual(3, len(gypi_files._tokens))
-    self.assertEqual('plain', gypi_files._tokens[0].id)
+    self.assertEqual('plain', gypi_files._tokens[0].token_id)
     self.assertEqual(
         "useless data\n'group1': [\n", gypi_files._tokens[0].data)
-    self.assertEqual('group1', gypi_files._tokens[1].id)
+    self.assertEqual('group1', gypi_files._tokens[1].token_id)
     self.assertEqual("    <file list goes here>\n", gypi_files._tokens[1].data)
-    self.assertEqual('plain', gypi_files._tokens[2].id)
+    self.assertEqual('plain', gypi_files._tokens[2].token_id)
     self.assertEqual(
         "    ]\nNote the four spaces before the ] above",
         gypi_files._tokens[2].data)
 
-  def test_GypiFileListBuilder(self):
+  def testGypiFileListBuilder(self):
     gypi_file = GypiFile('', self.file_groups)
     existing_list = ("    '/four/spaces/indent',\n'"
                      "    '/five/spaces/but/only/first/line/matters',\n")
diff --git a/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py b/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py
index 843f0f2..a5c154f 100644
--- a/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py
+++ b/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py
@@ -11,7 +11,8 @@
 
 
 class Trace2HTMLTests(unittest.TestCase):
-  def test_writeHTMLForTracesToFile(self):
+
+  def testWriteHTMLForTracesToFile(self):
     try:
       # Note: We can't use "with" when working with tempfile.NamedTemporaryFile
       # as that does not work on Windows. We use the longer, more clunky version
diff --git a/catapult/tracing/tracing_examples/skia_debugger.html b/catapult/tracing/tracing_examples/skia_debugger.html
index 1a94601..29b253b 100644
--- a/catapult/tracing/tracing_examples/skia_debugger.html
+++ b/catapult/tracing/tracing_examples/skia_debugger.html
@@ -12,6 +12,7 @@
 <script src="/components/webcomponentsjs/webcomponents.js"></script>
 
 <link rel="import" href="/components/polymer/polymer.html">
+<link rel="import" href="/tracing/base/base64.html">
 <link rel="import" href="/tracing/extras/chrome/cc/picture.html">
 <link rel="import" href="/tracing/ui/extras/chrome/cc/picture_debugger.html">
 
@@ -61,7 +62,7 @@
   }
 
   function utf8_to_b64(str) {
-    return window.btoa(unescape(encodeURIComponent(str)));
+    return tr.b.Base64.btoa(unescape(encodeURIComponent(str)));
   }
 
   function loadSkp(filename, onSkpLoaded) {
diff --git a/catapult/tracing/tracing_examples/trace_viewer.html b/catapult/tracing/tracing_examples/trace_viewer.html
index 76b2b6d..717ec67 100644
--- a/catapult/tracing/tracing_examples/trace_viewer.html
+++ b/catapult/tracing/tracing_examples/trace_viewer.html
@@ -12,6 +12,7 @@
 <script src="/components/webcomponentsjs/webcomponents.js"></script>
 
 <link rel="import" href="/components/polymer/polymer.html">
+<link rel="import" href="/tracing/base/timing.html">
 <link rel="import" href="/tracing/base/xhr.html">
 <link rel="import" href="/tracing/importer/import.html">
 <link rel="import" href="/tracing/ui/extras/full_config.html">
@@ -47,10 +48,12 @@
   <script>
   'use strict';
 
+  var Timing = tr.b.Timing;
   var timelineViewEl;
   var selectEl;
 
   function loadTraces(filenames, onTracesLoaded) {
+    var loadTracesMakedTimer = Timing.mark('TraceImport', 'loadTraces');
     var traces = [];
     for (var i = 0; i < filenames.length; i++) {
       traces.push(undefined);
@@ -61,20 +64,30 @@
       getAsync(filename, function(trace) {
         traces[i] = trace;
         numTracesPending--;
-        if (numTracesPending == 0)
+        if (numTracesPending == 0) {
+          loadTracesMakedTimer.end();
           onTracesLoaded(filenames, traces);
+        }
       });
     });
   }
 
 
   function getAsync(url, cb) {
-    tr.b.getAsync(url).then(cb);
+    return tr.b.getAsync(url).then(cb);
   }
 
   function createViewFromTraces(filenames, traces) {
+    var createViewFromTracesTimer = Timing.mark(
+        'TraceImport', 'createViewFromTraces');
     var m = new tr.Model();
-    var i = new tr.importer.Import(m);
+
+    var trackDetailedModelStatsEl =
+        tr.b.findDeepElementMatching(document.body,
+                                     '#track-detailed-model-stats');
+    var importOptions = new tr.importer.ImportOptions();
+    importOptions.trackDetailedModelStats = trackDetailedModelStatsEl.checked;
+    var i = new tr.importer.Import(m, importOptions);
     var p = i.importTracesWithProgressDialog(traces);
 
     p.then(
@@ -83,12 +96,14 @@
         timelineViewEl.updateDocumentFavicon();
         timelineViewEl.globalMode = true;
         timelineViewEl.viewTitle = '';
+        createViewFromTracesTimer.end();
       },
       function(err) {
         var overlay = new tr.ui.b.Overlay();
         overlay.textContent = tr.b.normalizeException(err).message;
         overlay.title = 'Import error';
         overlay.visible = true;
+        createViewFromTracesTimer.end();
       });
   }
 
@@ -130,9 +145,11 @@
   window.addEventListener('hashchange', onHashChange);
 
   function onLoad() {
+    var onLoadTimer = Timing.mark('TraceImport', 'onLoad');
     timelineViewEl = document.querySelector('tr-ui-timeline-view');
     timelineViewEl.globalMode = true;
 
+
     selectEl = document.createElement('select');
     timelineViewEl.leftControls.appendChild(selectEl);
 
@@ -153,8 +170,17 @@
       } else {
         onHashChange();
       }
-    });
+    }).then(onLoadTimer.end.call(this));
+
+    var trackDetailedModelStatsEl = tr.ui.b.createCheckBox(
+        this, 'trackDetailedModelStats',
+        'traceViewer.trackDetailedModelStats', false,
+        'Detailed file size stats',
+        onHashChange);
+    trackDetailedModelStatsEl.id = 'track-detailed-model-stats';
+    timelineViewEl.leftControls.appendChild(trackDetailedModelStatsEl);
   }
+
   window.addEventListener('load', onLoad);
   </script>
 </body>
diff --git a/catapult/tracing/tracing_project.py b/catapult/tracing/tracing_project.py
index 275771c..b7cd8a5 100644
--- a/catapult/tracing/tracing_project.py
+++ b/catapult/tracing/tracing_project.py
@@ -36,7 +36,7 @@
         all_filenames.add(x)
   return all_filenames
 
-def _IsFilenameATest(x):  # pylint: disable=unused-argument
+def _IsFilenameATest(x):
   if x.endswith('_test.js'):
     return True
 
@@ -53,7 +53,7 @@
   return False
 
 
-class TracingProject():
+class TracingProject(object):
   catapult_path = os.path.abspath(
       os.path.join(os.path.dirname(__file__), os.path.pardir))
 
@@ -78,6 +78,11 @@
   chai_path = os.path.join(tracing_third_party_path, 'chai')
   mocha_path = os.path.join(tracing_third_party_path, 'mocha')
 
+  mre_path = os.path.join(catapult_path, 'perf_insights')
+
+  value_ui_path = os.path.join(tracing_src_path, 'value', 'ui')
+  metrics_ui_path = os.path.join(tracing_src_path, 'metrics', 'ui')
+
   test_data_path = os.path.join(tracing_root_path, 'test_data')
   skp_data_path = os.path.join(tracing_root_path, 'skp_data')
 
@@ -90,6 +95,7 @@
     self.source_paths = []
     self.source_paths.append(self.tracing_root_path)
     self.source_paths.append(self.tracing_third_party_path)
+    self.source_paths.append(self.mre_path)
     self.source_paths.append(self.jszip_path)
     self.source_paths.append(self.glmatrix_path)
     self.source_paths.append(self.d3_path)
@@ -101,7 +107,16 @@
     return project_module.Project(self.source_paths)
 
   def IsD8CompatibleFile(self, filename):
-    return not filename.startswith(self.ui_path)
+    if filename.startswith(self.ui_path):
+      return False
+
+    if filename.startswith(self.value_ui_path):
+      return False
+
+    if filename.startswith(self.metrics_ui_path):
+      return False
+
+    return True
 
   def FindAllTestModuleRelPaths(self, pred=None):
     if pred is None:
diff --git a/systrace.py b/systrace.py
index 10824c7..d51b410 100755
--- a/systrace.py
+++ b/systrace.py
@@ -14,9 +14,10 @@
   sys.exit(1)
 
 systrace_dir = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), 'catapult', 'systrace', 'systrace'))
+    os.path.join(os.path.dirname(__file__), 'catapult', 'systrace'))
 sys.path.insert(0, systrace_dir)
-import systrace
+
+from systrace import systrace
 
 if __name__ == '__main__':
   sys.exit(systrace.main())
diff --git a/systrace_trace_viewer.html b/systrace_trace_viewer.html
index ae70787..2fde043 100644
--- a/systrace_trace_viewer.html
+++ b/systrace_trace_viewer.html
@@ -96,116 +96,185 @@
       </overlay-frame>
     </overlay-vertical-centering-container>
   </overlay-mask>
-</template><polymer-element constructor="TracingAnalysisTabView" name="tr-ui-a-tab-view">
+</template><polymer-element name="tv-ui-b-hotkey-controller">
+  
+</polymer-element><polymer-element name="tr-ui-b-mouse-mode-icon">
   <template>
     <style>
-      :host {
-        display: flex;
-        flex-flow: column nowrap;
-        overflow: hidden;
-        box-sizing: border-box;
-      }
-
-      tab-strip[tabs-hidden] {
-        display: none;
-      }
-
-      tab-strip {
-        background-color: rgb(236, 236, 236);
-        border-bottom: 1px solid #8e8e8e;
-        display: flex;
-        flex: 0 0 auto;
-        flex-flow: row;
-        overflow-x: auto;
-        padding: 0 10px 0 10px;
-        font-size: 12px;
-      }
-
-      tab-button {
-        display: block;
-        flex: 0 0 auto;
-        padding: 4px 15px 1px 15px;
-        margin-top: 2px;
-      }
-
-      tab-button[selected=true] {
-        background-color: white;
-        border: 1px solid rgb(163, 163, 163);
-        border-bottom: none;
-        padding: 3px 14px 1px 14px;
-      }
-
-      tabs-content-container {
-        display: flex;
-        flex: 1 1 auto;
-        overflow: auto;
-        width: 100%;
-      }
-
-      ::content > * {
-        flex: 1 1 auto;
-      }
-
-      ::content > *:not([selected]) {
-        display: none;
-      }
-
-      button-label {
-        display: inline;
-      }
-
-      tab-strip-heading {
-        display: block;
-        flex: 0 0 auto;
-        padding: 4px 15px 1px 15px;
-        margin-top: 2px;
-        margin-before: 20px;
-        margin-after: 10px;
-      }
-      #tsh {
-        display: inline;
-        font-weight: bold;
-      }
+    :host {
+      display: block;
+      background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAChCAYAAACbBNzvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABV0RVh0Q3JlYXRpb24gVGltZQA3LzE2LzEzRNEKUwAAABx0RVh0U29mdHdhcmUAQWRvYmUgRmlyZXdvcmtzIENTNui8sowAAA9aSURBVHic7V1rTFvl//+UrgUmZWMpbLa6cLErwpYxkqLGkjAG88WSbmumGUllvlmAJctMRtybvlHrLXiJUekMIZuYSCL5gS+EuLIXGEGjqCsllCEW6xQECgzWG7S05/+C/zkp9LTn0gsL6ych9JzznOdzPj19Luf5PN/nCN59913ixRdfRFdXFxLx/2GDgCAIYmpqCoWFhUjE/4cNae+99x4AIFH/Hzak7nDqDu+wOyyw2WzEdl9EMpG23ReQbKQE73Q8coJ3bfcFWK1W/Pbbb/D7/UhLi/37DwaDEIvFKC8vR0lJSdjxbRVstVoxPDyMxx9/HAUFBcjMzIRAIOCdXzAYhNvtht1ux/DwMACEid5WwSMjI3jyySdRXFwMsVgMoVAYk2CCIJCZmYns7GyMjo5iZGQkPoKXl5exd+9e3hdGIhgMIj8/H5mZmRCJRIyCyQ5NJBAEgUAgAKFQiIKCAiwsLISl4VxoHA4H+vv74Xa7uZ4aBqFQiOzsbIhEIojFYojFYohEItq/8fFxXLlyBUtLSxHThOaxZ88eCIXC2AWPj48DAH799deYBaelpUEoFLL6++qrrwAAH3zwAav0YrGYthLkJHh6ehpzc3MAgPn5eUxPT8csWiAQMJbboaEhmM1mAIDFYsHQ0BDvPDkJtlgsYdt+v59LFrxw/fr1sG2Xy8UrL06C6+vrw7bFYjEvYi747rvvwrYlEgmvvDjV0g6HI+p2ohBP3qh32OFwoLe3l1VGvb29sNvtvC8kFCMjI9DpdKzS6nQ6mEwm1nnTPg/7/X6MjY1hcnKS/VX+P/bu3YuysjLk5uYypv36669x8uRJZGRkQCQSwev1oqOjAz09PZx5CwsLcenSJRw+fBh+vx+rq6swmUx46aWXNqWjvcMDAwO8xAIbnZKBgQFeNXhzczMvscBGp6S5uRk//vhj1HS0grVaLYqLi3kRy+Vy1NXVRe0RRcKNGzeg0Wh48apUKnR1daG6ujpqOtpKy+VyQa1Wo6SkBLdv38aFCxeoY5988gn1+fLly9TnL774ApWVlXjiiSfgdDqxtrbG+aJ9Ph/0ej3OnDkDvV6PW7duUceOHDlCfR4dHaU+v/DCC7h27RrUajWcTidWV1ejctAKJggCKysryMzMhE6nw+zsLO3Joft1Oh0ePHiApaUlduqi8BYVFaGvr48Vb19fHyfeqM2Sz+dj3QTEs4lKJC+njsfWJoptkxUrtjZRbJssOnASXFtbG3U7UXjrrbeibnMBJ8FZWVkoKysDABQUFCArK4s3MRcoFArqrlZXV0OhUPDOi5Ngn8+Hw4cPQyqV4tlnn4XP5+NNTIIgmH0An8+HV155BUqlEq+++ior3kAgQLuf84jH2toajh8/jvX1da6n0sLj8SAjI4MxHUEQ+PTTT1nlSRAEHjx4QHtsW8e0RCIR7HY79uzZE/GOcEUgEEAgEMDff/8NkUgUdnxbBR85cgRmsxkCgQD5+fkRh2XYIhAI4P79+5iamoLD4cCxY8fC0myr4KeeegoCgQBWqxVzc3NIS0uLedQyGAxi165dKC8vR1FRUVialHu405ESvNPxyAlOuYfJRMo9fFjdw3iBq3vIBDbu4bYK3uoextKtJEH2yWNyD8nyEG8wuYcffvgha3cxru6h3W5Hf39/QoyzaE6fyWRCQ0MDZ+MsLu7h8vIyent7sby8zIk8VkxNTUGn08Fms8UlP04Nn9/vR39/f9w8JLZwu91obGzk5CFFAq+Wfnh4mDKok4mWlha0trbGlAfvrs3k5CQGBgaSYoiHoqenB1evXk2OIb4VDocDJpMp6eXaYrGgsbGRV7mOufPq8XgwMDCQ9HI9NzeHq1evci7XvDseUqkUWq0W6enpCAaDcDqd8Hq9fLNjDaVSiRs3bkAikfDi5XSHxWIxampqAAALCwsYGhrC7Ows5ufnEypWIpHAYDAAACYmJnD9+nXevJwEnzp1CjKZDBUVFQCAsbGxpJTfjz76CFVVVWhqagIAdHR08G6XWQuuqanB7t274fV6UVpaiuzsbAAbTzyJhMFggEKhgNfrRX19PWQyGQDAaDTyyo+V4JqaGshkMsricLlcOH78OICNCWp8p0cwwWAwoKqqahPvG2+8AWDji+7u7uacJyvBMpksrKxkZWVR0yLGxsY4E7NBVVVVGK9CoaCmRXR0dHDOk5VguorB5/OhoqICYrE4YZ2PSLxXrlyBRCLhNcE1pufh1dVVXLx4EWlpaRGnJzCBjXtId87g4GBU3ri5h1uJ5+fnY8mCtXvIhTflHoYg5R4mEyn3MAl45KyWlOCdjkdOcMo9TCZS7mHKPeSGhLmH5LBOrAGXXN1DcliHrgdFgsk95CzYbrfDbDbD7/ejrKwstpmtNO5hJJhMJrS2tsLtdqOpqQlarTZi2mjuIWvBfr8fZrN50/iz2WzG9PQ0nn/+edonEzZgij10uVwwGo2bxp+NRiOGhobw+uuv005hjtk9JENz6AbbyWCuRESp2Ww2NDc30w62WywW6HQ6zoOIrO5wbm4uzp8/j5WVFXR2dm46VldXh3379mF5eTku86dDUVxcjK6uLthstrClqrq6unDo0CHOvKwE+/1+LC4uUqG0oZiYmIhaicQCkvfu3bthxwYGBnhVmpy6NnSD7kxxQvEA3Zo+fIsQJ8F040j379/nRcwFdF4037FwToLphkUXFxd5EXMB3chkUgQ7nc6wfT6fL+Gm+H///Re2z+Vy8TLFGSut/v5+RsPsm2++AbDR84pXLFNDQwPjelxnz54FsBFK+/nnn7PKl/EOa7VaVmHvYrE4au+HK27evMkq7F0ikeDmzZus82UU7HK5qG8yGs6ePct73gUdfD4f2tvbGdO1t7dzaocZBRMEAaFQSBnhdKipqYFQKORlm0TjzcvLo4xwOhgMBuTl5XHiZVVp+f1+yGQy2iDq4uJiyGSyhFRcfr8fVVVVtEHUGo0GVVVVnHlZ19JerxdqtRpSqZTaJ5VKoVarEzrdwev1Qq/XQ6lUUvuUSiX0ej0vXk7N0srKCjQaDbXmjUajwcrKCmfSULD5Oa6srKCtrQ0SiQQSiQRtbW2MvHFzD0MrsXhUUmzdw9BKjKmSiqt7SBBE3Conru4hOa8kWqBnyj3cgl0EQcQ0cMYWW3kIgkiKe7iVV2C1Won09PSYxLCB1+tFZmYmtb22tobt4E1LBimATaQAkiKWjveR85ZSgnc6Uu5hMpFyD1PuITekYg/ZxB52dXXFTMo2n1D38NSpU7zjDEP/yHzisnJpIsBm5dJ45rntgpONuITTJirctqWlJabjdGAUvNUEp0NouxcvtLa2MgZhmUwmzqKjCrbb7aw9HC5pmWAymVivb2kymTgFe0RslrbeNTa1rtlshkgkQn5+PusL2Iqtd42NdWM0GpGVlYWTJ08ypo14h/nGI8Uax8Q3XJbteREFV1ZW8iLmex6Ja9euJfS8iD9puVyOmpoa3L59G8DmVUq3glzNlAzoimVgvrq6GmlpadDr9QA2r1K6FeRqpmRAFxveiIK9Xi8VZ/jLL78whulUVFTELJbkJeMMjUYjI29TUxNrsQBDX5qMM4w0qE2iuLgYpaWlcXMPyThDphWMNRoN6uvrOfGyskvVanXUNGq1Oq5WKclL/qwjQa/Xc+Zl1dNi8nFi9ZeSyZvqS0erjbmAbT6kT7X1lQp8QeYTyasKE8w3aJJvPh6PBwRBYGZmJi68MzMzqdjDUDx67mEsFxwrUrGHSUCqWdrpSAne6dix7uFzzz1HW0s/FO7h/v37UVBQgMceeyxm99DlcsFut2NwcBACgSDsnTHb7h4ePHgQxcXFcTPTMjIyIJFIcOfOHfz+++8Pl2DSPSTftxQv93DXrl0oKirCnTt3wtIwFhq62aputxtms5maCR8pHROEQiEkEgntew/X1tbC3mu4tLSE9vZ2nD9/njZd6Pn79u3jHoo3OTmJsbExnDlzBsDGWLXdbqcNoent7YVCocChQ4dYh+VFij3s7u5GR0cH9YWaTCbcunVr0yMkmfbChQvQarXQarVUWF4wGER6ejp7wdPT0zCbzfB4PJv2R7NT/H4/rFYrJicnUVZWxnowPtTpGxoagtFoDAsIi2anuN1ufPnll+ju7salS5dw4sQJKk+64hH2FTgcDgwPD4eJZQu/3w+bzcZ5JSSLxYL333+fNvqNDdxuN3p6ehjPDxMsl8tjjkw5ceIENfOVLVQqFd58882YeA0GA7WiWiSECfb5fPjpp58AbKyBx/bCpVIp6urqAADff/895wf6tbU1fPbZZwCAjz/+mPHCSSiVSsr3eueddxh5aWtpMrwuJyeH9cuczp07R5UZvktO/fnnnwCAY8eOoa+vj9U5nZ2d1CsH2fhaUZulwcFB1kGNi4uLjK/gYwuDwcCJ9+2332add9RmyW63w+12Q6FQIC8vD5cvX8bCwgI19VcqlcJms8HhcGBycjJuSz6aTCbMzs5Cq9Xi6NGjGB0dxcTEBJxOJyQSCZRKJUZGRjAyMoL//e9/jBFsoaAVLJfLKZvD4XBQ37ZEItlUph0OB238gVwu5ySQhEqlopo+i8VCtbsymWxTmb579y6t46BSqRg5aAXX1tbi22+/DZvY5XQ6aQMuQyGVSlFbW8trgb6WlhY0NDRgYmJi0/6ZmRnGYVylUomWlhbGeGbaMuzxeKDRaKhVDdkgOzsblZWVOHfuHO82fH19HW1tbWhqamL9ul2ZTIbXXnsNnZ2drN7yFfFFjy6XC6WlpVCpVFhaWsK///5LVfnz8/PIy8sDAOzevRu5ubnIycmBx+OJKZ6YIAj4fD7U19ejsbERf/zxB4aHhykrdHx8HE8//TQAYP/+/VAqlVAoFJx4I1ZapGiyrBw4cAD37t2DXC7HgQMHAGx0QXNycrC+vh63VR5Cecnw3J6eHqhUKpSXlwPY6OI+88wzALiHxnN6PPz555/D9h08eJATIR/Qzd9gE/FKh9SYFlvI5XKqPMUCrlFuKpUKp0+fZkwXDAZp93MSLBaLUVJSgqNHjyIjIwNerzfmOR0ul4sx9lAikeD06dN4+eWXIZVKGXnj5h5evHgRXq8XHo+Hd9MTCpFIhHv37iEnJydqp/+HH36A1+uFy+VirKTi6h7Gug7tVpDuIUEQKCwsjOge/vPPP6zyCwQCWF5exl9//YX5+Xla93DbzTSbzQar1Yr19fW4uoclJSUp9xB4BJullOCdjkdO8P8BGCQ0hnF1DxUAAAAASUVORK5CYII=);
+      width: 27px;
+      height: 30px;
+    }
+    :host.active {
+      cursor: auto;
+    }
     </style>
-
-    <tab-strip>
-      <tab-strip-heading id="tshh">
-        <span id="tsh"></span>
-      </tab-strip-heading>
-      <template repeat="{{tab in tabs_}}">
-        <tab-button button-id="{{ tab.id }}" on-click="{{ tabButtonSelectHandler_ }}" selected="{{ selectedTab_.id === tab.id }}">
-          <button-label>{{ tab.label ? tab.label : 'No Label'}}</button-label>
-        </tab-button>
-      </template>
-    </tab-strip>
-
-    <tabs-content-container id="content-container">
-        <content></content>
-    </tabs-content-container>
-
   </template>
+  
+</polymer-element><polymer-element name="tr-ui-b-mouse-mode-selector">
+  <template>
+    <style>
+    :host {
 
-  
-</polymer-element><polymer-element name="tr-ui-a-sub-view">
-  
+      -webkit-user-drag: element;
+      -webkit-user-select: none;
+
+      background: #DDD;
+      border: 1px solid #BBB;
+      border-radius: 4px;
+      box-shadow: 0 1px 2px rgba(0,0,0,0.2);
+      left: calc(100% - 120px);
+      position: absolute;
+      top: 100px;
+      user-select: none;
+      width: 29px;
+      z-index: 20;
+    }
+
+    .drag-handle {
+      background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAChCAYAAACbBNzvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABV0RVh0Q3JlYXRpb24gVGltZQA3LzE2LzEzRNEKUwAAABx0RVh0U29mdHdhcmUAQWRvYmUgRmlyZXdvcmtzIENTNui8sowAAA9aSURBVHic7V1rTFvl//+UrgUmZWMpbLa6cLErwpYxkqLGkjAG88WSbmumGUllvlmAJctMRtybvlHrLXiJUekMIZuYSCL5gS+EuLIXGEGjqCsllCEW6xQECgzWG7S05/+C/zkp9LTn0gsL6ych9JzznOdzPj19Luf5PN/nCN59913ixRdfRFdXFxLx/2GDgCAIYmpqCoWFhUjE/4cNae+99x4AIFH/Hzak7nDqDu+wOyyw2WzEdl9EMpG23ReQbKQE73Q8coJ3bfcFWK1W/Pbbb/D7/UhLi/37DwaDEIvFKC8vR0lJSdjxbRVstVoxPDyMxx9/HAUFBcjMzIRAIOCdXzAYhNvtht1ux/DwMACEid5WwSMjI3jyySdRXFwMsVgMoVAYk2CCIJCZmYns7GyMjo5iZGQkPoKXl5exd+9e3hdGIhgMIj8/H5mZmRCJRIyCyQ5NJBAEgUAgAKFQiIKCAiwsLISl4VxoHA4H+vv74Xa7uZ4aBqFQiOzsbIhEIojFYojFYohEItq/8fFxXLlyBUtLSxHThOaxZ88eCIXC2AWPj48DAH799deYBaelpUEoFLL6++qrrwAAH3zwAav0YrGYthLkJHh6ehpzc3MAgPn5eUxPT8csWiAQMJbboaEhmM1mAIDFYsHQ0BDvPDkJtlgsYdt+v59LFrxw/fr1sG2Xy8UrL06C6+vrw7bFYjEvYi747rvvwrYlEgmvvDjV0g6HI+p2ohBP3qh32OFwoLe3l1VGvb29sNvtvC8kFCMjI9DpdKzS6nQ6mEwm1nnTPg/7/X6MjY1hcnKS/VX+P/bu3YuysjLk5uYypv36669x8uRJZGRkQCQSwev1oqOjAz09PZx5CwsLcenSJRw+fBh+vx+rq6swmUx46aWXNqWjvcMDAwO8xAIbnZKBgQFeNXhzczMvscBGp6S5uRk//vhj1HS0grVaLYqLi3kRy+Vy1NXVRe0RRcKNGzeg0Wh48apUKnR1daG6ujpqOtpKy+VyQa1Wo6SkBLdv38aFCxeoY5988gn1+fLly9TnL774ApWVlXjiiSfgdDqxtrbG+aJ9Ph/0ej3OnDkDvV6PW7duUceOHDlCfR4dHaU+v/DCC7h27RrUajWcTidWV1ejctAKJggCKysryMzMhE6nw+zsLO3Joft1Oh0ePHiApaUlduqi8BYVFaGvr48Vb19fHyfeqM2Sz+dj3QTEs4lKJC+njsfWJoptkxUrtjZRbJssOnASXFtbG3U7UXjrrbeibnMBJ8FZWVkoKysDABQUFCArK4s3MRcoFArqrlZXV0OhUPDOi5Ngn8+Hw4cPQyqV4tlnn4XP5+NNTIIgmH0An8+HV155BUqlEq+++ior3kAgQLuf84jH2toajh8/jvX1da6n0sLj8SAjI4MxHUEQ+PTTT1nlSRAEHjx4QHtsW8e0RCIR7HY79uzZE/GOcEUgEEAgEMDff/8NkUgUdnxbBR85cgRmsxkCgQD5+fkRh2XYIhAI4P79+5iamoLD4cCxY8fC0myr4KeeegoCgQBWqxVzc3NIS0uLedQyGAxi165dKC8vR1FRUVialHu405ESvNPxyAlOuYfJRMo9fFjdw3iBq3vIBDbu4bYK3uoextKtJEH2yWNyD8nyEG8wuYcffvgha3cxru6h3W5Hf39/QoyzaE6fyWRCQ0MDZ+MsLu7h8vIyent7sby8zIk8VkxNTUGn08Fms8UlP04Nn9/vR39/f9w8JLZwu91obGzk5CFFAq+Wfnh4mDKok4mWlha0trbGlAfvrs3k5CQGBgaSYoiHoqenB1evXk2OIb4VDocDJpMp6eXaYrGgsbGRV7mOufPq8XgwMDCQ9HI9NzeHq1evci7XvDseUqkUWq0W6enpCAaDcDqd8Hq9fLNjDaVSiRs3bkAikfDi5XSHxWIxampqAAALCwsYGhrC7Ows5ufnEypWIpHAYDAAACYmJnD9+nXevJwEnzp1CjKZDBUVFQCAsbGxpJTfjz76CFVVVWhqagIAdHR08G6XWQuuqanB7t274fV6UVpaiuzsbAAbTzyJhMFggEKhgNfrRX19PWQyGQDAaDTyyo+V4JqaGshkMsricLlcOH78OICNCWp8p0cwwWAwoKqqahPvG2+8AWDji+7u7uacJyvBMpksrKxkZWVR0yLGxsY4E7NBVVVVGK9CoaCmRXR0dHDOk5VguorB5/OhoqICYrE4YZ2PSLxXrlyBRCLhNcE1pufh1dVVXLx4EWlpaRGnJzCBjXtId87g4GBU3ri5h1uJ5+fnY8mCtXvIhTflHoYg5R4mEyn3MAl45KyWlOCdjkdOcMo9TCZS7mHKPeSGhLmH5LBOrAGXXN1DcliHrgdFgsk95CzYbrfDbDbD7/ejrKwstpmtNO5hJJhMJrS2tsLtdqOpqQlarTZi2mjuIWvBfr8fZrN50/iz2WzG9PQ0nn/+edonEzZgij10uVwwGo2bxp+NRiOGhobw+uuv005hjtk9JENz6AbbyWCuRESp2Ww2NDc30w62WywW6HQ6zoOIrO5wbm4uzp8/j5WVFXR2dm46VldXh3379mF5eTku86dDUVxcjK6uLthstrClqrq6unDo0CHOvKwE+/1+LC4uUqG0oZiYmIhaicQCkvfu3bthxwYGBnhVmpy6NnSD7kxxQvEA3Zo+fIsQJ8F040j379/nRcwFdF4037FwToLphkUXFxd5EXMB3chkUgQ7nc6wfT6fL+Gm+H///Re2z+Vy8TLFGSut/v5+RsPsm2++AbDR84pXLFNDQwPjelxnz54FsBFK+/nnn7PKl/EOa7VaVmHvYrE4au+HK27evMkq7F0ikeDmzZus82UU7HK5qG8yGs6ePct73gUdfD4f2tvbGdO1t7dzaocZBRMEAaFQSBnhdKipqYFQKORlm0TjzcvLo4xwOhgMBuTl5XHiZVVp+f1+yGQy2iDq4uJiyGSyhFRcfr8fVVVVtEHUGo0GVVVVnHlZ19JerxdqtRpSqZTaJ5VKoVarEzrdwev1Qq/XQ6lUUvuUSiX0ej0vXk7N0srKCjQaDbXmjUajwcrKCmfSULD5Oa6srKCtrQ0SiQQSiQRtbW2MvHFzD0MrsXhUUmzdw9BKjKmSiqt7SBBE3Conru4hOa8kWqBnyj3cgl0EQcQ0cMYWW3kIgkiKe7iVV2C1Won09PSYxLCB1+tFZmYmtb22tobt4E1LBimATaQAkiKWjveR85ZSgnc6Uu5hMpFyD1PuITekYg/ZxB52dXXFTMo2n1D38NSpU7zjDEP/yHzisnJpIsBm5dJ45rntgpONuITTJirctqWlJabjdGAUvNUEp0NouxcvtLa2MgZhmUwmzqKjCrbb7aw9HC5pmWAymVivb2kymTgFe0RslrbeNTa1rtlshkgkQn5+PusL2Iqtd42NdWM0GpGVlYWTJ08ypo14h/nGI8Uax8Q3XJbteREFV1ZW8iLmex6Ja9euJfS8iD9puVyOmpoa3L59G8DmVUq3glzNlAzoimVgvrq6GmlpadDr9QA2r1K6FeRqpmRAFxveiIK9Xi8VZ/jLL78whulUVFTELJbkJeMMjUYjI29TUxNrsQBDX5qMM4w0qE2iuLgYpaWlcXMPyThDphWMNRoN6uvrOfGyskvVanXUNGq1Oq5WKclL/qwjQa/Xc+Zl1dNi8nFi9ZeSyZvqS0erjbmAbT6kT7X1lQp8QeYTyasKE8w3aJJvPh6PBwRBYGZmJi68MzMzqdjDUDx67mEsFxwrUrGHSUCqWdrpSAne6dix7uFzzz1HW0s/FO7h/v37UVBQgMceeyxm99DlcsFut2NwcBACgSDsnTHb7h4ePHgQxcXFcTPTMjIyIJFIcOfOHfz+++8Pl2DSPSTftxQv93DXrl0oKirCnTt3wtIwFhq62aputxtms5maCR8pHROEQiEkEgntew/X1tbC3mu4tLSE9vZ2nD9/njZd6Pn79u3jHoo3OTmJsbExnDlzBsDGWLXdbqcNoent7YVCocChQ4dYh+VFij3s7u5GR0cH9YWaTCbcunVr0yMkmfbChQvQarXQarVUWF4wGER6ejp7wdPT0zCbzfB4PJv2R7NT/H4/rFYrJicnUVZWxnowPtTpGxoagtFoDAsIi2anuN1ufPnll+ju7salS5dw4sQJKk+64hH2FTgcDgwPD4eJZQu/3w+bzcZ5JSSLxYL333+fNvqNDdxuN3p6ehjPDxMsl8tjjkw5ceIENfOVLVQqFd58882YeA0GA7WiWiSECfb5fPjpp58AbKyBx/bCpVIp6urqAADff/895wf6tbU1fPbZZwCAjz/+mPHCSSiVSsr3eueddxh5aWtpMrwuJyeH9cuczp07R5UZvktO/fnnnwCAY8eOoa+vj9U5nZ2d1CsH2fhaUZulwcFB1kGNi4uLjK/gYwuDwcCJ9+2332add9RmyW63w+12Q6FQIC8vD5cvX8bCwgI19VcqlcJms8HhcGBycjJuSz6aTCbMzs5Cq9Xi6NGjGB0dxcTEBJxOJyQSCZRKJUZGRjAyMoL//e9/jBFsoaAVLJfLKZvD4XBQ37ZEItlUph0OB238gVwu5ySQhEqlopo+i8VCtbsymWxTmb579y6t46BSqRg5aAXX1tbi22+/DZvY5XQ6aQMuQyGVSlFbW8trgb6WlhY0NDRgYmJi0/6ZmRnGYVylUomWlhbGeGbaMuzxeKDRaKhVDdkgOzsblZWVOHfuHO82fH19HW1tbWhqamL9ul2ZTIbXXnsNnZ2drN7yFfFFjy6XC6WlpVCpVFhaWsK///5LVfnz8/PIy8sDAOzevRu5ubnIycmBx+OJKZ6YIAj4fD7U19ejsbERf/zxB4aHhykrdHx8HE8//TQAYP/+/VAqlVAoFJx4I1ZapGiyrBw4cAD37t2DXC7HgQMHAGx0QXNycrC+vh63VR5Cecnw3J6eHqhUKpSXlwPY6OI+88wzALiHxnN6PPz555/D9h08eJATIR/Qzd9gE/FKh9SYFlvI5XKqPMUCrlFuKpUKp0+fZkwXDAZp93MSLBaLUVJSgqNHjyIjIwNerzfmOR0ul4sx9lAikeD06dN4+eWXIZVKGXnj5h5evHgRXq8XHo+Hd9MTCpFIhHv37iEnJydqp/+HH36A1+uFy+VirKTi6h7Gug7tVpDuIUEQKCwsjOge/vPPP6zyCwQCWF5exl9//YX5+Xla93DbzTSbzQar1Yr19fW4uoclJSUp9xB4BJullOCdjkdO8P8BGCQ0hnF1DxUAAAAASUVORK5CYII=) 2px 3px no-repeat;
+      background-repeat: no-repeat;
+      border-bottom: 1px solid #BCBCBC;
+      cursor: move;
+      display: block;
+      height: 13px;
+      width: 27px;
+    }
+
+    .tool-button {
+      background-position: center center;
+      background-repeat: no-repeat;
+      border-bottom: 1px solid #BCBCBC;
+      border-top: 1px solid #F1F1F1;
+      cursor: pointer;
+    }
+
+    .buttons > .tool-button:last-child {
+      border-bottom: none;
+    }
+
+    </style>
+    <div class="drag-handle"></div>
+    <div class="buttons">
+    </div>
+  </template>
 </polymer-element><style>
 * /deep/ .labeled-checkbox {
   display: flex;
   white-space: nowrap;
 }
-</style><polymer-element is="a" name="tr-ui-a-analysis-link" on-click="{{onClicked_}}" on-mouseenter="{{onMouseEnter_}}" on-mouseleave="{{onMouseLeave_}}">
+</style><style>
+.track-button{background-color:rgba(255,255,255,0.5);border:1px solid rgba(0,0,0,0.1);color:rgba(0,0,0,0.2);font-size:10px;height:12px;text-align:center;width:12px}.track-button:hover{background-color:rgba(255,255,255,1.0);border:1px solid rgba(0,0,0,0.5);box-shadow:0 0 .05em rgba(0,0,0,0.4);color:rgba(0,0,0,1)}.track-close-button{left:2px;position:absolute;top:2px}.track-collapse-button{left:3px;position:absolute;top:2px}
+</style><style>
+.drawing-container{-webkit-box-flex:1;display:inline;overflow:auto;overflow-x:hidden;position:relative}.drawing-container-canvas{-webkit-box-flex:1;display:block;pointer-events:none;position:absolute;top:0}
+</style><polymer-element name="tr-ui-heading">
   <template>
     <style>
     :host {
-      display: inline;
-      color: -webkit-link;
-      cursor: pointer;
-      text-decoration: underline;
-      cursor: pointer;
+      background-color: rgb(243, 245, 247);
+      border-right: 1px solid #8e8e8e;
+      display: block;
+      height: 100%;
+      margin: 0;
+      padding: 0 5px 0 0;
+    }
+
+    heading {
+      display: block;
+      overflow-x: hidden;
+      text-align: left;
+      text-overflow: ellipsis;
+      white-space: nowrap;
+    }
+
+    #arrow {
+      -webkit-flex: 0 0 auto;
+      font-family: sans-serif;
+      margin-left: 5px;
+      margin-right: 5px;
+      width: 8px;
+    }
+
+    #link, #heading_content {
+      display: none;
+    }
+    </style>
+    <heading id="heading" on-click="{{onHeadingDivClicked_}}">
+      <span id="arrow"></span>
+      <span id="heading_content"></span>
+      <tr-ui-a-analysis-link id="link"></tr-ui-a-analysis-link>
+    </heading>
+  </template>
+
+  
+</polymer-element><style>
+.letter-dot-track {
+  height: 18px;
+}
+</style><style>
+.chart-track {
+  height: 30px;
+  position: relative;
+}
+</style><style>
+.power-series-track {
+  height: 90px;
+}
+</style><style>
+.spacing-track{height:4px}
+</style><style>
+.object-instance-track{height:18px}
+</style><style>
+.rect-track{height:18px}
+</style><style>
+.thread-track{-webkit-box-orient:vertical;display:-webkit-box;position:relative}
+</style><style>
+.process-track-header{-webkit-flex:0 0 auto;background-image:-webkit-gradient(linear,0 0,100% 0,from(#E5E5E5),to(#D1D1D1));border-bottom:1px solid #8e8e8e;border-top:1px solid white;font-size:75%}.process-track-name:before{content:'\25B8';padding:0 5px}.process-track-base.expanded .process-track-name:before{content:'\25BE'}
+</style><style>
+.model-track {
+  -webkit-box-flex: 1;
+}
+</style><style>
+.ruler-track{height:12px}.ruler-track.tall-mode{height:30px}
+</style><polymer-element name="tr-ui-timeline-track-view">
+  <template>
+    <style>
+    :host {
+      -webkit-box-orient: vertical;
+      display: -webkit-box;
+      position: relative;
+    }
+
+    :host ::content * {
+      -webkit-user-select: none;
+      cursor: default;
+    }
+
+    #drag_box {
+      background-color: rgba(0, 0, 255, 0.25);
+      border: 1px solid rgb(0, 0, 96);
+      font-size: 75%;
+      position: fixed;
+    }
+
+    #hint_text {
+      position: absolute;
+      bottom: 6px;
+      right: 6px;
+      font-size: 8pt;
     }
     </style>
     <content></content>
+
+    <div id="drag_box"></div>
+    <div id="hint_text"></div>
+
+    <tv-ui-b-hotkey-controller id="hotkey_controller">
+    </tv-ui-b-hotkey-controller>
   </template>
+
   
 </polymer-element><polymer-element name="tr-ui-b-table">
   <template>
@@ -368,7 +437,96 @@
   </template>
 
   
-</polymer-element><polymer-element name="tr-ui-u-scalar-span">
+</polymer-element><polymer-element name="tr-ui-side-panel">
+  
+</polymer-element><style>
+  * /deep/ .chart-base #title {
+    font-size: 16pt;
+  }
+
+  * /deep/ .chart-base {
+    font-size: 12pt;
+    -webkit-user-select: none;
+    cursor: default;
+  }
+
+  * /deep/ .chart-base .axis path,
+  * /deep/ .chart-base .axis line {
+    fill: none;
+    shape-rendering: crispEdges;
+    stroke: #000;
+  }
+</style><template id="chart-base-template">
+  <svg> 
+    <g id="chart-area" xmlns="http://www.w3.org/2000/svg">
+      <g class="x axis"></g>
+      <g class="y axis"></g>
+      <text id="title"></text>
+    </g>
+  </svg>
+</template><style>
+  * /deep/ .chart-base-2d.updating-brushing-state #brushes > * {
+    fill: rgb(103, 199, 165)
+  }
+
+  * /deep/ .chart-base-2d #brushes {
+    fill: rgb(213, 236, 229)
+  }
+</style><style>
+* /deep/ .line-chart .line{fill:none;stroke-width:1.5px}* /deep/ .line-chart #brushes>rect{fill:rgb(192,192,192)}
+</style><polymer-element extends="tr-ui-side-panel" name="tr-ui-e-s-alerts-side-panel">
+  <template>
+    <style>
+    :host {
+      display: block;
+      width: 250px;
+    }
+    #content {
+      flex-direction: column;
+      display: flex;
+    }
+    </style>
+
+    <div id="content">
+      <toolbar id="toolbar"></toolbar>
+      <result-area id="result_area"></result-area>
+    </div>
+  </template>
+
+  
+</polymer-element><polymer-element name="tr-ui-a-sub-view">
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-alert-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+  </template>
+  
+</polymer-element><polymer-element is="a" name="tr-ui-a-analysis-link" on-click="{{onClicked_}}" on-mouseenter="{{onMouseEnter_}}" on-mouseleave="{{onMouseLeave_}}">
+  <template>
+    <style>
+    :host {
+      display: inline;
+      color: -webkit-link;
+      cursor: pointer;
+      text-decoration: underline;
+      cursor: pointer;
+    }
+    </style>
+    <content></content>
+  </template>
+  
+</polymer-element><polymer-element name="tr-v-ui-scalar-span">
   <template>
     <style>
     :host {
@@ -401,618 +559,36 @@
     <span id="warning" style="display:none">⚠</span>
   </template>
   
-</polymer-element><polymer-element extends="tr-ui-u-scalar-span" name="tr-ui-u-time-duration-span">
-  
-</polymer-element><polymer-element extends="tr-ui-u-scalar-span" name="tr-ui-u-time-stamp-span">
-  
-</polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-a-generic-object-view">
-  <template>
-    <style>
-    :host {
-      display: block;
-      font-family: monospace;
-    }
-    </style>
-    <div id="content">
-    </div>
-  </template>
-
-  
-</polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-a-generic-object-view-with-label">
-  <template>
-    <style>
-    :host {
-      display: block;
-    }
-    </style>
-  </template>
-
-  
-</polymer-element><polymer-element name="tr-ui-a-stack-frame">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: row;
-      align-items: center;
-    }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-event-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-  </template>
-  
-</polymer-element><polymer-element name="tr-ui-a-related-events">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-thread-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: row;
-    }
-    #events {
-      display: flex;
-      flex-direction: column;
-    }
-
-    </style>
-    <tr-ui-a-single-event-sub-view id="content"></tr-ui-a-single-event-sub-view>
-    <div id="events">
-      <tr-ui-a-related-events id="relatedEvents">
-      </tr-ui-a-related-events>
-    </div>
-  </template>
-
-  
-</polymer-element><polymer-element name="tr-ui-a-selection-summary-table">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-    
-  </template>
-  
-</polymer-element><polymer-element name="tr-ui-a-multi-event-summary-table">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-    
-  </template>
-  
-</polymer-element><polymer-element name="tr-ui-a-multi-event-details-table">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-
-    #titletable {
-      font-weight: bold;
-    }
-
-    #title-info {
-      font-size: 12px;
-    }
-    </style>
-    <tr-ui-b-table id="titletable">
-    </tr-ui-b-table>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-event-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      overflow: auto;
-    }
-    #content {
-      display: flex;
-      flex-direction: column;
-      flex: 0 1 auto;
-      align-self: stretch;
-    }
-    #content > * {
-      flex: 0 0 auto;
-      align-self: stretch;
-    }
-    tr-ui-a-multi-event-summary-table {
-      border-bottom: 1px solid #aaa;
-    }
-
-    tr-ui-a-selection-summary-table  {
-      margin-top: 1.25em;
-      border-top: 1px solid #aaa;
-      background-color: #eee;
-      font-weight: bold;
-      margin-bottom: 1.25em;
-      border-bottom: 1px solid #aaa;
-    }
-    </style>
-    <div id="content"></div>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-thread-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #content {
-      display: flex;
-      flex: 1 1 auto;
-    }
-    #content > tr-ui-a-related-events {
-      margin-left: 8px;
-      flex: 0 1 200px;
-    }
-    </style>
-    <div id="content"></div>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-async-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: row;
-    }
-    #events {
-      display:flex;
-      flex-direction: column;
-    }
-    </style>
-    <tr-ui-a-single-event-sub-view id="content"></tr-ui-a-single-event-sub-view>
-    <div id="events">
-      <tr-ui-a-related-events id="relatedEvents"></tr-ui-a-related-events>
-    </div>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-async-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #container {
-      display: flex;
-      flex: 1 1 auto;
-    }
-    #events {
-      margin-left: 8px;
-      flex: 0 1 200px;
-    }
-    </style>
-    <div id="container">
-      <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
-      <div id="events">
-        <tr-ui-a-related-events id="relatedEvents"></tr-ui-a-related-events>
-      </div>
-    </div>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-cpu-slice-sub-view">
-  <template>
-    <style>
-    table {
-      border-collapse: collapse;
-      border-width: 0;
-      margin-bottom: 25px;
-      width: 100%;
-    }
-
-    table tr > td:first-child {
-      padding-left: 2px;
-    }
-
-    table tr > td {
-      padding: 2px 4px 2px 4px;
-      vertical-align: text-top;
-      width: 150px;
-    }
-
-    table td td {
-      padding: 0 0 0 0;
-      width: auto;
-    }
-    tr {
-      vertical-align: top;
-    }
-
-    tr:nth-child(2n+0) {
-      background-color: #e2e2e2;
-    }
-    </style>
-    <table>
-      <tbody><tr>
-        <td>Running process:</td><td id="process-name"></td>
-      </tr>
-      <tr>
-        <td>Running thread:</td><td id="thread-name"></td>
-      </tr>
-      <tr>
-        <td>Start:</td>
-        <td>
-          <tr-ui-u-time-stamp-span id="start">
-          </tr-ui-u-time-stamp-span>
-        </td>
-      </tr>
-      <tr>
-        <td>Duration:</td>
-        <td>
-          <tr-ui-u-time-duration-span id="duration">
-          </tr-ui-u-time-duration-span>
-        </td>
-      </tr>
-      <tr>
-        <td>Active slices:</td><td id="running-thread"></td>
-      </tr>
-      <tr>
-        <td>Args:</td>
-        <td>
-          <tr-ui-a-generic-object-view id="args">
-          </tr-ui-a-generic-object-view>
-        </td>
-      </tr>
-    </tbody></table>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-cpu-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #content {
-      flex: 1 1 auto;
-    }
-    </style>
-    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-thread-time-slice-sub-view">
-  <template>
-    <style>
-    table {
-      border-collapse: collapse;
-      border-width: 0;
-      margin-bottom: 25px;
-      width: 100%;
-    }
-
-    table tr > td:first-child {
-      padding-left: 2px;
-    }
-
-    table tr > td {
-      padding: 2px 4px 2px 4px;
-      vertical-align: text-top;
-      width: 150px;
-    }
-
-    table td td {
-      padding: 0 0 0 0;
-      width: auto;
-    }
-    tr {
-      vertical-align: top;
-    }
-
-    tr:nth-child(2n+0) {
-      background-color: #e2e2e2;
-    }
-    </style>
-    <table>
-      <tbody><tr>
-        <td>Running process:</td><td id="process-name"></td>
-      </tr>
-      <tr>
-        <td>Running thread:</td><td id="thread-name"></td>
-      </tr>
-      <tr>
-        <td>State:</td>
-        <td><b><span id="state"></span></b></td>
-      </tr>
-      <tr>
-        <td>Start:</td>
-        <td>
-          <tr-ui-u-time-stamp-span id="start">
-          </tr-ui-u-time-stamp-span>
-        </td>
-      </tr>
-      <tr>
-        <td>Duration:</td>
-        <td>
-          <tr-ui-u-time-duration-span id="duration">
-          </tr-ui-u-time-duration-span>
-        </td>
-      </tr>
-
-      <tr>
-        <td>On CPU:</td><td id="on-cpu"></td>
-      </tr>
-
-      <tr>
-        <td>Running instead:</td><td id="running-instead"></td>
-      </tr>
-
-      <tr>
-        <td>Args:</td><td id="args"></td>
-      </tr>
-    </tbody></table>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-thread-time-slice-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    #content {
-      flex: 1 1 auto;
-    }
-    </style>
-    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-instant-event-sub-view">
-  <template>
-    <style>
-    :host {
-      display: block;
-    }
-    </style>
-    <div id="content"></div>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-instant-event-sub-view">
-  <template>
-    <style>
-    :host {
-      display: block;
-    }
-    </style>
-    <div id="content"></div>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-counter-sample-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    </style>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-</polymer-element><polymer-element extends="tr-ui-a-single-event-sub-view" name="tr-ui-a-single-flow-event-sub-view">
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-flow-event-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    </style>
-    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-object-instance-sub-view">
-  <template>
-    <style>
-    :host {
-      display: block;
-    }
-
-    #snapshots > * {
-      display: block;
-    }
-
-    :host {
-      overflow: auto;
-      display: block;
-    }
-
-    * {
-      -webkit-user-select: text;
-    }
-
-    .title {
-      border-bottom: 1px solid rgb(128, 128, 128);
-      font-size: 110%;
-      font-weight: bold;
-    }
-
-    td, th {
-      font-family: monospace;
-      vertical-align: top;
-    }
-    </style>
-    <div id="content"></div>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-object-snapshot-sub-view">
-  <template>
-    <style>
-    #args {
-      white-space: pre;
-    }
-
-    :host {
-      overflow: auto;
-      display: flex;
-    }
-
-    ::content * {
-      -webkit-user-select: text;
-    }
-
-    ::content .title {
-      border-bottom: 1px solid rgb(128, 128, 128);
-      font-size: 110%;
-      font-weight: bold;
-    }
-
-    ::content td, th {
-      font-family: monospace;
-      vertical-align: top;
-    }
-    </style>
-    <content></content>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-object-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    </style>
-    <tr-ui-b-table id="content"></tr-ui-b-table>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-sample-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-    }
-    </style>
-    <tr-ui-b-table id="content"></tr-ui-b-table>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-sample-sub-view">
-  <template>
-    <style>
-    :host { display: block; }
-    #control {
-      background-color: #e6e6e6;
-      background-image: -webkit-gradient(linear, 0 0, 0 100%,
-                                         from(#E5E5E5), to(#D1D1D1));
-      flex: 0 0 auto;
-      overflow-x: auto;
-    }
-    #control::-webkit-scrollbar { height: 0px; }
-    #control {
-      font-size: 12px;
-      display: flex;
-      flex-direction: row;
-      align-items: stretch;
-      margin: 1px;
-      margin-right: 2px;
-    }
-    </style>
-    <div id="control">
-      Sample View Option
-    </div>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-interaction-record-sub-view">
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-interaction-record-sub-view">
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-alert-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    #table {
-      flex: 1 1 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-b-table id="table">
-    </tr-ui-b-table>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-frame-sub-view">
-  <template>
-    <style>
-    :host {
-      display: flex;
-      flex-direction: column;
-    }
-    #asv {
-      flex: 0 0 auto;
-      align-self: stretch;
-    }
-    </style>
-    <tr-ui-a-alert-sub-view id="asv">
-    </tr-ui-a-alert-sub-view>
-  </template>
-  
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-frame-sub-view">
-  
 </polymer-element><polymer-element name="tr-ui-a-stacked-pane">
   
+</polymer-element><polymer-element is="HTMLDivElement" name="tr-ui-b-info-bar">
+  <template>
+    <style>
+    :host {
+      align-items: center;
+      flex: 0 0 auto;
+      background-color: rgb(252, 235, 162);
+      border-bottom: 1px solid #A3A3A3;
+      border-left: 1px solid white;
+      border-right: 1px solid #A3A3A3;
+      border-top: 1px solid white;
+      display: flex;
+      height: 26px;
+      padding: 0 3px 0 3px;
+    }
+
+    :host(.info-bar-hidden) {
+      display: none;
+    }
+
+    #message { flex: 1 1 auto; }
+    </style>
+
+    <span id="message"></span>
+    <span id="buttons"></span>
+  </template>
+
+  
 </polymer-element><polymer-element extends="tr-ui-a-stacked-pane" name="tr-ui-a-memory-dump-heap-details-pane">
   <template>
     <style>
@@ -1073,6 +649,8 @@
       </div>
     </div>
     <div id="contents">
+      <tr-ui-b-info-bar class="info-bar-hidden" id="info_bar">
+      </tr-ui-b-info-bar>
       <div id="info_text">No heap dump selected</div>
       <tr-ui-b-table id="table"></tr-ui-b-table>
     </div>
@@ -1285,6 +863,232 @@
   <template>
     <div id="content"></div>
   </template>
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-counter-sample-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-layout-tree-sub-view">
+  <template>
+    <div id="content"></div>
+  </template>
+</polymer-element><polymer-element name="tr-ui-a-selection-summary-table">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+    
+  </template>
+  
+</polymer-element><polymer-element name="tr-ui-a-multi-event-summary-table">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+    
+  </template>
+  
+</polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-a-generic-object-view">
+  <template>
+    <style>
+    :host {
+      display: block;
+      font-family: monospace;
+    }
+    </style>
+    <div id="content">
+    </div>
+  </template>
+
+  
+</polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-a-generic-object-view-with-label">
+  <template>
+    <style>
+    :host {
+      display: block;
+    }
+    </style>
+  </template>
+
+  
+</polymer-element><polymer-element name="tr-ui-a-multi-event-details-table">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+
+    #titletable {
+      font-weight: bold;
+    }
+
+    #title-info {
+      font-size: 12px;
+    }
+    </style>
+    <tr-ui-b-table id="titletable">
+    </tr-ui-b-table>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-event-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      overflow: auto;
+    }
+    #content {
+      display: flex;
+      flex-direction: column;
+      flex: 0 1 auto;
+      align-self: stretch;
+    }
+    #content > * {
+      flex: 0 0 auto;
+      align-self: stretch;
+    }
+    tr-ui-a-multi-event-summary-table {
+      border-bottom: 1px solid #aaa;
+    }
+
+    tr-ui-a-selection-summary-table  {
+      margin-top: 1.25em;
+      border-top: 1px solid #aaa;
+      background-color: #eee;
+      font-weight: bold;
+      margin-bottom: 1.25em;
+      border-bottom: 1px solid #aaa;
+    }
+    </style>
+    <div id="content"></div>
+  </template>
+  
+</polymer-element><polymer-element name="tr-ui-a-related-events">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-async-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #container {
+      display: flex;
+      flex: 1 1 auto;
+    }
+    #events {
+      margin-left: 8px;
+      flex: 0 1 200px;
+    }
+    </style>
+    <div id="container">
+      <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
+      <div id="events">
+        <tr-ui-a-related-events id="relatedEvents"></tr-ui-a-related-events>
+      </div>
+    </div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-cpu-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #content {
+      flex: 1 1 auto;
+    }
+    </style>
+    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-flow-event-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    </style>
+    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-frame-sub-view">
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-instant-event-sub-view">
+  <template>
+    <style>
+    :host {
+      display: block;
+    }
+    </style>
+    <div id="content"></div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-object-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    </style>
+    <tr-ui-b-table id="content"></tr-ui-b-table>
+  </template>
+  
+</polymer-element><polymer-element name="tr-ui-a-frame-power-usage-chart">
+  <template>
+    <div id="content"></div>
+  </template>
+</polymer-element><polymer-element name="tr-ui-a-power-sample-summary-table">
+  <template>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+  
 </polymer-element><polymer-element name="tr-ui-a-power-sample-table">
   <template>
     <style>
@@ -1294,60 +1098,6 @@
     </style>
     <tr-ui-b-table id="table"></tr-ui-b-table>
   </template>
-</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-power-sample-sub-view">
-  <template>
-    <style>
-    :host { display: block; }
-    </style>
-    <tr-ui-a-power-sample-table id="samplesTable">
-    </tr-ui-a-power-sample-table>
-  </template>
-
-  
-</polymer-element><style>
-  * /deep/ .chart-base #title {
-    font-size: 16pt;
-  }
-
-  * /deep/ .chart-base {
-    font-size: 12pt;
-    -webkit-user-select: none;
-    cursor: default;
-  }
-
-  * /deep/ .chart-base .axis path,
-  * /deep/ .chart-base .axis line {
-    fill: none;
-    shape-rendering: crispEdges;
-    stroke: #000;
-  }
-</style><template id="chart-base-template">
-  <svg> 
-    <g id="chart-area" xmlns="http://www.w3.org/2000/svg">
-      <g class="x axis"></g>
-      <g class="y axis"></g>
-      <text id="title"></text>
-    </g>
-  </svg>
-</template><style>
-  * /deep/ .chart-base-2d.updating-brushing-state #brushes > * {
-    fill: rgb(103, 199, 165)
-  }
-
-  * /deep/ .chart-base-2d #brushes {
-    fill: rgb(213, 236, 229)
-  }
-</style><style>
-* /deep/ .line-chart .line{fill:none;stroke-width:1.5px}* /deep/ .line-chart #brushes>rect{fill:rgb(192,192,192)}
-</style><polymer-element name="tr-ui-a-frame-power-usage-chart">
-  <template>
-    <div id="content"></div>
-  </template>
-</polymer-element><polymer-element name="tr-ui-a-power-sample-summary-table">
-  <template>
-    <tr-ui-b-table id="table"></tr-ui-b-table>
-  </template>
-  
 </polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-power-sample-sub-view">
   <template>
     <style>
@@ -1373,6 +1123,483 @@
     <tr-ui-a-frame-power-usage-chart id="chart">
     </tr-ui-a-frame-power-usage-chart>
   </template>
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-sample-sub-view">
+  <template>
+    <style>
+    :host { display: block; }
+    #control {
+      background-color: #e6e6e6;
+      background-image: -webkit-gradient(linear, 0 0, 0 100%,
+                                         from(#E5E5E5), to(#D1D1D1));
+      flex: 0 0 auto;
+      overflow-x: auto;
+    }
+    #control::-webkit-scrollbar { height: 0px; }
+    #control {
+      font-size: 12px;
+      display: flex;
+      flex-direction: row;
+      align-items: stretch;
+      margin: 1px;
+      margin-right: 2px;
+    }
+    </style>
+    <div id="control">
+      Sample View Option
+    </div>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-thread-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #content {
+      display: flex;
+      flex: 1 1 auto;
+    }
+    #content > tr-ui-a-related-events {
+      margin-left: 8px;
+      flex: 0 1 200px;
+    }
+    </style>
+    <div id="content"></div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-thread-time-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    #content {
+      flex: 1 1 auto;
+    }
+    </style>
+    <tr-ui-a-multi-event-sub-view id="content"></tr-ui-a-multi-event-sub-view>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-multi-user-expectation-sub-view">
+  
+</polymer-element><polymer-element name="tr-ui-a-stack-frame">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: row;
+      align-items: center;
+    }
+    </style>
+    <tr-ui-b-table id="table"></tr-ui-b-table>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-event-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    #table {
+      flex: 1 1 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-b-table id="table">
+    </tr-ui-b-table>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-async-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: row;
+    }
+    #events {
+      display:flex;
+      flex-direction: column;
+    }
+    </style>
+    <tr-ui-a-single-event-sub-view id="content"></tr-ui-a-single-event-sub-view>
+    <div id="events">
+      <tr-ui-a-related-events id="relatedEvents"></tr-ui-a-related-events>
+    </div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-cpu-slice-sub-view">
+  <template>
+    <style>
+    table {
+      border-collapse: collapse;
+      border-width: 0;
+      margin-bottom: 25px;
+      width: 100%;
+    }
+
+    table tr > td:first-child {
+      padding-left: 2px;
+    }
+
+    table tr > td {
+      padding: 2px 4px 2px 4px;
+      vertical-align: text-top;
+      width: 150px;
+    }
+
+    table td td {
+      padding: 0 0 0 0;
+      width: auto;
+    }
+    tr {
+      vertical-align: top;
+    }
+
+    tr:nth-child(2n+0) {
+      background-color: #e2e2e2;
+    }
+    </style>
+    <table>
+      <tbody><tr>
+        <td>Running process:</td><td id="process-name"></td>
+      </tr>
+      <tr>
+        <td>Running thread:</td><td id="thread-name"></td>
+      </tr>
+      <tr>
+        <td>Start:</td>
+        <td>
+          <tr-v-ui-scalar-span id="start">
+          </tr-v-ui-scalar-span>
+        </td>
+      </tr>
+      <tr>
+        <td>Duration:</td>
+        <td>
+          <tr-v-ui-scalar-span id="duration">
+          </tr-v-ui-scalar-span>
+        </td>
+      </tr>
+      <tr>
+        <td>Active slices:</td><td id="running-thread"></td>
+      </tr>
+      <tr>
+        <td>Args:</td>
+        <td>
+          <tr-ui-a-generic-object-view id="args">
+          </tr-ui-a-generic-object-view>
+        </td>
+      </tr>
+    </tbody></table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-single-event-sub-view" name="tr-ui-a-single-flow-event-sub-view">
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-frame-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: column;
+    }
+    #asv {
+      flex: 0 0 auto;
+      align-self: stretch;
+    }
+    </style>
+    <tr-ui-a-alert-sub-view id="asv">
+    </tr-ui-a-alert-sub-view>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-instant-event-sub-view">
+  <template>
+    <style>
+    :host {
+      display: block;
+    }
+    </style>
+    <div id="content"></div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-object-instance-sub-view">
+  <template>
+    <style>
+    :host {
+      display: block;
+    }
+
+    #snapshots > * {
+      display: block;
+    }
+
+    :host {
+      overflow: auto;
+      display: block;
+    }
+
+    * {
+      -webkit-user-select: text;
+    }
+
+    .title {
+      border-bottom: 1px solid rgb(128, 128, 128);
+      font-size: 110%;
+      font-weight: bold;
+    }
+
+    td, th {
+      font-family: monospace;
+      vertical-align: top;
+    }
+    </style>
+    <div id="content"></div>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-object-snapshot-sub-view">
+  <template>
+    <style>
+    #args {
+      white-space: pre;
+    }
+
+    :host {
+      overflow: auto;
+      display: flex;
+    }
+
+    ::content * {
+      -webkit-user-select: text;
+    }
+
+    ::content .title {
+      border-bottom: 1px solid rgb(128, 128, 128);
+      font-size: 110%;
+      font-weight: bold;
+    }
+
+    ::content td, th {
+      font-family: monospace;
+      vertical-align: top;
+    }
+    </style>
+    <content></content>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-power-sample-sub-view">
+  <template>
+    <style>
+    :host { display: block; }
+    </style>
+    <tr-ui-a-power-sample-table id="samplesTable">
+    </tr-ui-a-power-sample-table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-sample-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+    }
+    </style>
+    <tr-ui-b-table id="content"></tr-ui-b-table>
+  </template>
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-thread-slice-sub-view">
+  <template>
+    <style>
+    :host {
+      display: flex;
+      flex-direction: row;
+    }
+    #events {
+      display: flex;
+      flex-direction: column;
+    }
+
+    </style>
+    <tr-ui-a-single-event-sub-view id="content"></tr-ui-a-single-event-sub-view>
+    <div id="events">
+      <tr-ui-a-related-events id="relatedEvents">
+      </tr-ui-a-related-events>
+    </div>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-thread-time-slice-sub-view">
+  <template>
+    <style>
+    table {
+      border-collapse: collapse;
+      border-width: 0;
+      margin-bottom: 25px;
+      width: 100%;
+    }
+
+    table tr > td:first-child {
+      padding-left: 2px;
+    }
+
+    table tr > td {
+      padding: 2px 4px 2px 4px;
+      vertical-align: text-top;
+      width: 150px;
+    }
+
+    table td td {
+      padding: 0 0 0 0;
+      width: auto;
+    }
+    tr {
+      vertical-align: top;
+    }
+
+    tr:nth-child(2n+0) {
+      background-color: #e2e2e2;
+    }
+    </style>
+    <table>
+      <tbody><tr>
+        <td>Running process:</td><td id="process-name"></td>
+      </tr>
+      <tr>
+        <td>Running thread:</td><td id="thread-name"></td>
+      </tr>
+      <tr>
+        <td>State:</td>
+        <td><b><span id="state"></span></b></td>
+      </tr>
+      <tr>
+        <td>Start:</td>
+        <td>
+          <tr-v-ui-scalar-span id="start">
+          </tr-v-ui-scalar-span>
+        </td>
+      </tr>
+      <tr>
+        <td>Duration:</td>
+        <td>
+          <tr-v-ui-scalar-span id="duration">
+          </tr-v-ui-scalar-span>
+        </td>
+      </tr>
+
+      <tr>
+        <td>On CPU:</td><td id="on-cpu"></td>
+      </tr>
+
+      <tr>
+        <td>Running instead:</td><td id="running-instead"></td>
+      </tr>
+
+      <tr>
+        <td>Args:</td><td id="args"></td>
+      </tr>
+    </tbody></table>
+  </template>
+
+  
+</polymer-element><polymer-element extends="tr-ui-a-sub-view" name="tr-ui-a-single-user-expectation-sub-view">
+  
+</polymer-element><polymer-element constructor="TracingAnalysisTabView" name="tr-ui-a-tab-view">
+  <template>
+    <style>
+      :host {
+        display: flex;
+        flex-flow: column nowrap;
+        overflow: hidden;
+        box-sizing: border-box;
+      }
+
+      tab-strip[tabs-hidden] {
+        display: none;
+      }
+
+      tab-strip {
+        background-color: rgb(236, 236, 236);
+        border-bottom: 1px solid #8e8e8e;
+        display: flex;
+        flex: 0 0 auto;
+        flex-flow: row;
+        overflow-x: auto;
+        padding: 0 10px 0 10px;
+        font-size: 12px;
+      }
+
+      tab-button {
+        display: block;
+        flex: 0 0 auto;
+        padding: 4px 15px 1px 15px;
+        margin-top: 2px;
+      }
+
+      tab-button[selected=true] {
+        background-color: white;
+        border: 1px solid rgb(163, 163, 163);
+        border-bottom: none;
+        padding: 3px 14px 1px 14px;
+      }
+
+      tabs-content-container {
+        display: flex;
+        flex: 1 1 auto;
+        overflow: auto;
+        width: 100%;
+      }
+
+      ::content > * {
+        flex: 1 1 auto;
+      }
+
+      ::content > *:not([selected]) {
+        display: none;
+      }
+
+      button-label {
+        display: inline;
+      }
+
+      tab-strip-heading {
+        display: block;
+        flex: 0 0 auto;
+        padding: 4px 15px 1px 15px;
+        margin-top: 2px;
+        margin-before: 20px;
+        margin-after: 10px;
+      }
+      #tsh {
+        display: inline;
+        font-weight: bold;
+      }
+    </style>
+
+    <tab-strip>
+      <tab-strip-heading id="tshh">
+        <span id="tsh"></span>
+      </tab-strip-heading>
+      <template repeat="{{tab in tabs_}}">
+        <tab-button button-id="{{ tab.id }}" on-click="{{ tabButtonSelectHandler_ }}" selected="{{ selectedTab_.id === tab.id }}">
+          <button-label>{{ tab.label ? tab.label : 'No Label'}}</button-label>
+        </tab-button>
+      </template>
+    </tab-strip>
+
+    <tabs-content-container id="content-container">
+        <content></content>
+    </tabs-content-container>
+
+  </template>
+
+  
 </polymer-element><polymer-element name="tr-ui-a-analysis-view">
   <template>
     <style>
@@ -1494,36 +1721,6 @@
     </dialog>
   </template>
   
-</polymer-element><polymer-element name="tv-ui-b-hotkey-controller">
-  
-</polymer-element><polymer-element is="HTMLDivElement" name="tr-ui-b-info-bar">
-  <template>
-    <style>
-    :host {
-      align-items: center;
-      flex: 0 0 auto;
-      background-color: rgb(252, 235, 162);
-      border-bottom: 1px solid #A3A3A3;
-      border-left: 1px solid white;
-      border-right: 1px solid #A3A3A3;
-      border-top: 1px solid white;
-      display: flex;
-      height: 26px;
-      padding: 0 3px 0 3px;
-    }
-
-    :host(.info-bar-hidden) {
-      display: none;
-    }
-
-    #message { flex: 1 1 auto; }
-    </style>
-
-    <span id="message"></span>
-    <span id="buttons"></span>
-  </template>
-
-  
 </polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-b-info-bar-group">
   <template>
     <style>
@@ -1567,179 +1764,6 @@
       <content></content>
     </div>
   </template>
-</polymer-element><polymer-element name="tr-ui-b-mouse-mode-icon">
-  <template>
-    <style>
-    :host {
-      display: block;
-      background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAChCAYAAACbBNzvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABV0RVh0Q3JlYXRpb24gVGltZQA3LzE2LzEzRNEKUwAAABx0RVh0U29mdHdhcmUAQWRvYmUgRmlyZXdvcmtzIENTNui8sowAAA9aSURBVHic7V1rTFvl//+UrgUmZWMpbLa6cLErwpYxkqLGkjAG88WSbmumGUllvlmAJctMRtybvlHrLXiJUekMIZuYSCL5gS+EuLIXGEGjqCsllCEW6xQECgzWG7S05/+C/zkp9LTn0gsL6ych9JzznOdzPj19Luf5PN/nCN59913ixRdfRFdXFxLx/2GDgCAIYmpqCoWFhUjE/4cNae+99x4AIFH/Hzak7nDqDu+wOyyw2WzEdl9EMpG23ReQbKQE73Q8coJ3bfcFWK1W/Pbbb/D7/UhLi/37DwaDEIvFKC8vR0lJSdjxbRVstVoxPDyMxx9/HAUFBcjMzIRAIOCdXzAYhNvtht1ux/DwMACEid5WwSMjI3jyySdRXFwMsVgMoVAYk2CCIJCZmYns7GyMjo5iZGQkPoKXl5exd+9e3hdGIhgMIj8/H5mZmRCJRIyCyQ5NJBAEgUAgAKFQiIKCAiwsLISl4VxoHA4H+vv74Xa7uZ4aBqFQiOzsbIhEIojFYojFYohEItq/8fFxXLlyBUtLSxHThOaxZ88eCIXC2AWPj48DAH799deYBaelpUEoFLL6++qrrwAAH3zwAav0YrGYthLkJHh6ehpzc3MAgPn5eUxPT8csWiAQMJbboaEhmM1mAIDFYsHQ0BDvPDkJtlgsYdt+v59LFrxw/fr1sG2Xy8UrL06C6+vrw7bFYjEvYi747rvvwrYlEgmvvDjV0g6HI+p2ohBP3qh32OFwoLe3l1VGvb29sNvtvC8kFCMjI9DpdKzS6nQ6mEwm1nnTPg/7/X6MjY1hcnKS/VX+P/bu3YuysjLk5uYypv36669x8uRJZGRkQCQSwev1oqOjAz09PZx5CwsLcenSJRw+fBh+vx+rq6swmUx46aWXNqWjvcMDAwO8xAIbnZKBgQFeNXhzczMvscBGp6S5uRk//vhj1HS0grVaLYqLi3kRy+Vy1NXVRe0RRcKNGzeg0Wh48apUKnR1daG6ujpqOtpKy+VyQa1Wo6SkBLdv38aFCxeoY5988gn1+fLly9TnL774ApWVlXjiiSfgdDqxtrbG+aJ9Ph/0ej3OnDkDvV6PW7duUceOHDlCfR4dHaU+v/DCC7h27RrUajWcTidWV1ejctAKJggCKysryMzMhE6nw+zsLO3Joft1Oh0ePHiApaUlduqi8BYVFaGvr48Vb19fHyfeqM2Sz+dj3QTEs4lKJC+njsfWJoptkxUrtjZRbJssOnASXFtbG3U7UXjrrbeibnMBJ8FZWVkoKysDABQUFCArK4s3MRcoFArqrlZXV0OhUPDOi5Ngn8+Hw4cPQyqV4tlnn4XP5+NNTIIgmH0An8+HV155BUqlEq+++ior3kAgQLuf84jH2toajh8/jvX1da6n0sLj8SAjI4MxHUEQ+PTTT1nlSRAEHjx4QHtsW8e0RCIR7HY79uzZE/GOcEUgEEAgEMDff/8NkUgUdnxbBR85cgRmsxkCgQD5+fkRh2XYIhAI4P79+5iamoLD4cCxY8fC0myr4KeeegoCgQBWqxVzc3NIS0uLedQyGAxi165dKC8vR1FRUVialHu405ESvNPxyAlOuYfJRMo9fFjdw3iBq3vIBDbu4bYK3uoextKtJEH2yWNyD8nyEG8wuYcffvgha3cxru6h3W5Hf39/QoyzaE6fyWRCQ0MDZ+MsLu7h8vIyent7sby8zIk8VkxNTUGn08Fms8UlP04Nn9/vR39/f9w8JLZwu91obGzk5CFFAq+Wfnh4mDKok4mWlha0trbGlAfvrs3k5CQGBgaSYoiHoqenB1evXk2OIb4VDocDJpMp6eXaYrGgsbGRV7mOufPq8XgwMDCQ9HI9NzeHq1evci7XvDseUqkUWq0W6enpCAaDcDqd8Hq9fLNjDaVSiRs3bkAikfDi5XSHxWIxampqAAALCwsYGhrC7Ows5ufnEypWIpHAYDAAACYmJnD9+nXevJwEnzp1CjKZDBUVFQCAsbGxpJTfjz76CFVVVWhqagIAdHR08G6XWQuuqanB7t274fV6UVpaiuzsbAAbTzyJhMFggEKhgNfrRX19PWQyGQDAaDTyyo+V4JqaGshkMsricLlcOH78OICNCWp8p0cwwWAwoKqqahPvG2+8AWDji+7u7uacJyvBMpksrKxkZWVR0yLGxsY4E7NBVVVVGK9CoaCmRXR0dHDOk5VguorB5/OhoqICYrE4YZ2PSLxXrlyBRCLhNcE1pufh1dVVXLx4EWlpaRGnJzCBjXtId87g4GBU3ri5h1uJ5+fnY8mCtXvIhTflHoYg5R4mEyn3MAl45KyWlOCdjkdOcMo9TCZS7mHKPeSGhLmH5LBOrAGXXN1DcliHrgdFgsk95CzYbrfDbDbD7/ejrKwstpmtNO5hJJhMJrS2tsLtdqOpqQlarTZi2mjuIWvBfr8fZrN50/iz2WzG9PQ0nn/+edonEzZgij10uVwwGo2bxp+NRiOGhobw+uuv005hjtk9JENz6AbbyWCuRESp2Ww2NDc30w62WywW6HQ6zoOIrO5wbm4uzp8/j5WVFXR2dm46VldXh3379mF5eTku86dDUVxcjK6uLthstrClqrq6unDo0CHOvKwE+/1+LC4uUqG0oZiYmIhaicQCkvfu3bthxwYGBnhVmpy6NnSD7kxxQvEA3Zo+fIsQJ8F040j379/nRcwFdF4037FwToLphkUXFxd5EXMB3chkUgQ7nc6wfT6fL+Gm+H///Re2z+Vy8TLFGSut/v5+RsPsm2++AbDR84pXLFNDQwPjelxnz54FsBFK+/nnn7PKl/EOa7VaVmHvYrE4au+HK27evMkq7F0ikeDmzZus82UU7HK5qG8yGs6ePct73gUdfD4f2tvbGdO1t7dzaocZBRMEAaFQSBnhdKipqYFQKORlm0TjzcvLo4xwOhgMBuTl5XHiZVVp+f1+yGQy2iDq4uJiyGSyhFRcfr8fVVVVtEHUGo0GVVVVnHlZ19JerxdqtRpSqZTaJ5VKoVarEzrdwev1Qq/XQ6lUUvuUSiX0ej0vXk7N0srKCjQaDbXmjUajwcrKCmfSULD5Oa6srKCtrQ0SiQQSiQRtbW2MvHFzD0MrsXhUUmzdw9BKjKmSiqt7SBBE3Conru4hOa8kWqBnyj3cgl0EQcQ0cMYWW3kIgkiKe7iVV2C1Won09PSYxLCB1+tFZmYmtb22tobt4E1LBimATaQAkiKWjveR85ZSgnc6Uu5hMpFyD1PuITekYg/ZxB52dXXFTMo2n1D38NSpU7zjDEP/yHzisnJpIsBm5dJ45rntgpONuITTJirctqWlJabjdGAUvNUEp0NouxcvtLa2MgZhmUwmzqKjCrbb7aw9HC5pmWAymVivb2kymTgFe0RslrbeNTa1rtlshkgkQn5+PusL2Iqtd42NdWM0GpGVlYWTJ08ypo14h/nGI8Uax8Q3XJbteREFV1ZW8iLmex6Ja9euJfS8iD9puVyOmpoa3L59G8DmVUq3glzNlAzoimVgvrq6GmlpadDr9QA2r1K6FeRqpmRAFxveiIK9Xi8VZ/jLL78whulUVFTELJbkJeMMjUYjI29TUxNrsQBDX5qMM4w0qE2iuLgYpaWlcXMPyThDphWMNRoN6uvrOfGyskvVanXUNGq1Oq5WKclL/qwjQa/Xc+Zl1dNi8nFi9ZeSyZvqS0erjbmAbT6kT7X1lQp8QeYTyasKE8w3aJJvPh6PBwRBYGZmJi68MzMzqdjDUDx67mEsFxwrUrGHSUCqWdrpSAne6dix7uFzzz1HW0s/FO7h/v37UVBQgMceeyxm99DlcsFut2NwcBACgSDsnTHb7h4ePHgQxcXFcTPTMjIyIJFIcOfOHfz+++8Pl2DSPSTftxQv93DXrl0oKirCnTt3wtIwFhq62aputxtms5maCR8pHROEQiEkEgntew/X1tbC3mu4tLSE9vZ2nD9/njZd6Pn79u3jHoo3OTmJsbExnDlzBsDGWLXdbqcNoent7YVCocChQ4dYh+VFij3s7u5GR0cH9YWaTCbcunVr0yMkmfbChQvQarXQarVUWF4wGER6ejp7wdPT0zCbzfB4PJv2R7NT/H4/rFYrJicnUVZWxnowPtTpGxoagtFoDAsIi2anuN1ufPnll+ju7salS5dw4sQJKk+64hH2FTgcDgwPD4eJZQu/3w+bzcZ5JSSLxYL333+fNvqNDdxuN3p6ehjPDxMsl8tjjkw5ceIENfOVLVQqFd58882YeA0GA7WiWiSECfb5fPjpp58AbKyBx/bCpVIp6urqAADff/895wf6tbU1fPbZZwCAjz/+mPHCSSiVSsr3eueddxh5aWtpMrwuJyeH9cuczp07R5UZvktO/fnnnwCAY8eOoa+vj9U5nZ2d1CsH2fhaUZulwcFB1kGNi4uLjK/gYwuDwcCJ9+2332add9RmyW63w+12Q6FQIC8vD5cvX8bCwgI19VcqlcJms8HhcGBycjJuSz6aTCbMzs5Cq9Xi6NGjGB0dxcTEBJxOJyQSCZRKJUZGRjAyMoL//e9/jBFsoaAVLJfLKZvD4XBQ37ZEItlUph0OB238gVwu5ySQhEqlopo+i8VCtbsymWxTmb579y6t46BSqRg5aAXX1tbi22+/DZvY5XQ6aQMuQyGVSlFbW8trgb6WlhY0NDRgYmJi0/6ZmRnGYVylUomWlhbGeGbaMuzxeKDRaKhVDdkgOzsblZWVOHfuHO82fH19HW1tbWhqamL9ul2ZTIbXXnsNnZ2drN7yFfFFjy6XC6WlpVCpVFhaWsK///5LVfnz8/PIy8sDAOzevRu5ubnIycmBx+OJKZ6YIAj4fD7U19ejsbERf/zxB4aHhykrdHx8HE8//TQAYP/+/VAqlVAoFJx4I1ZapGiyrBw4cAD37t2DXC7HgQMHAGx0QXNycrC+vh63VR5Cecnw3J6eHqhUKpSXlwPY6OI+88wzALiHxnN6PPz555/D9h08eJATIR/Qzd9gE/FKh9SYFlvI5XKqPMUCrlFuKpUKp0+fZkwXDAZp93MSLBaLUVJSgqNHjyIjIwNerzfmOR0ul4sx9lAikeD06dN4+eWXIZVKGXnj5h5evHgRXq8XHo+Hd9MTCpFIhHv37iEnJydqp/+HH36A1+uFy+VirKTi6h7Gug7tVpDuIUEQKCwsjOge/vPPP6zyCwQCWF5exl9//YX5+Xla93DbzTSbzQar1Yr19fW4uoclJSUp9xB4BJullOCdjkdO8P8BGCQ0hnF1DxUAAAAASUVORK5CYII=);
-      width: 27px;
-      height: 30px;
-    }
-    :host.active {
-      cursor: auto;
-    }
-    </style>
-  </template>
-  
-</polymer-element><polymer-element name="tr-ui-b-mouse-mode-selector">
-  <template>
-    <style>
-    :host {
-
-      -webkit-user-drag: element;
-      -webkit-user-select: none;
-
-      background: #DDD;
-      border: 1px solid #BBB;
-      border-radius: 4px;
-      box-shadow: 0 1px 2px rgba(0,0,0,0.2);
-      left: calc(100% - 120px);
-      position: absolute;
-      top: 100px;
-      user-select: none;
-      width: 29px;
-      z-index: 20;
-    }
-
-    .drag-handle {
-      background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAChCAYAAACbBNzvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABV0RVh0Q3JlYXRpb24gVGltZQA3LzE2LzEzRNEKUwAAABx0RVh0U29mdHdhcmUAQWRvYmUgRmlyZXdvcmtzIENTNui8sowAAA9aSURBVHic7V1rTFvl//+UrgUmZWMpbLa6cLErwpYxkqLGkjAG88WSbmumGUllvlmAJctMRtybvlHrLXiJUekMIZuYSCL5gS+EuLIXGEGjqCsllCEW6xQECgzWG7S05/+C/zkp9LTn0gsL6ych9JzznOdzPj19Luf5PN/nCN59913ixRdfRFdXFxLx/2GDgCAIYmpqCoWFhUjE/4cNae+99x4AIFH/Hzak7nDqDu+wOyyw2WzEdl9EMpG23ReQbKQE73Q8coJ3bfcFWK1W/Pbbb/D7/UhLi/37DwaDEIvFKC8vR0lJSdjxbRVstVoxPDyMxx9/HAUFBcjMzIRAIOCdXzAYhNvtht1ux/DwMACEid5WwSMjI3jyySdRXFwMsVgMoVAYk2CCIJCZmYns7GyMjo5iZGQkPoKXl5exd+9e3hdGIhgMIj8/H5mZmRCJRIyCyQ5NJBAEgUAgAKFQiIKCAiwsLISl4VxoHA4H+vv74Xa7uZ4aBqFQiOzsbIhEIojFYojFYohEItq/8fFxXLlyBUtLSxHThOaxZ88eCIXC2AWPj48DAH799deYBaelpUEoFLL6++qrrwAAH3zwAav0YrGYthLkJHh6ehpzc3MAgPn5eUxPT8csWiAQMJbboaEhmM1mAIDFYsHQ0BDvPDkJtlgsYdt+v59LFrxw/fr1sG2Xy8UrL06C6+vrw7bFYjEvYi747rvvwrYlEgmvvDjV0g6HI+p2ohBP3qh32OFwoLe3l1VGvb29sNvtvC8kFCMjI9DpdKzS6nQ6mEwm1nnTPg/7/X6MjY1hcnKS/VX+P/bu3YuysjLk5uYypv36669x8uRJZGRkQCQSwev1oqOjAz09PZx5CwsLcenSJRw+fBh+vx+rq6swmUx46aWXNqWjvcMDAwO8xAIbnZKBgQFeNXhzczMvscBGp6S5uRk//vhj1HS0grVaLYqLi3kRy+Vy1NXVRe0RRcKNGzeg0Wh48apUKnR1daG6ujpqOtpKy+VyQa1Wo6SkBLdv38aFCxeoY5988gn1+fLly9TnL774ApWVlXjiiSfgdDqxtrbG+aJ9Ph/0ej3OnDkDvV6PW7duUceOHDlCfR4dHaU+v/DCC7h27RrUajWcTidWV1ejctAKJggCKysryMzMhE6nw+zsLO3Joft1Oh0ePHiApaUlduqi8BYVFaGvr48Vb19fHyfeqM2Sz+dj3QTEs4lKJC+njsfWJoptkxUrtjZRbJssOnASXFtbG3U7UXjrrbeibnMBJ8FZWVkoKysDABQUFCArK4s3MRcoFArqrlZXV0OhUPDOi5Ngn8+Hw4cPQyqV4tlnn4XP5+NNTIIgmH0An8+HV155BUqlEq+++ior3kAgQLuf84jH2toajh8/jvX1da6n0sLj8SAjI4MxHUEQ+PTTT1nlSRAEHjx4QHtsW8e0RCIR7HY79uzZE/GOcEUgEEAgEMDff/8NkUgUdnxbBR85cgRmsxkCgQD5+fkRh2XYIhAI4P79+5iamoLD4cCxY8fC0myr4KeeegoCgQBWqxVzc3NIS0uLedQyGAxi165dKC8vR1FRUVialHu405ESvNPxyAlOuYfJRMo9fFjdw3iBq3vIBDbu4bYK3uoextKtJEH2yWNyD8nyEG8wuYcffvgha3cxru6h3W5Hf39/QoyzaE6fyWRCQ0MDZ+MsLu7h8vIyent7sby8zIk8VkxNTUGn08Fms8UlP04Nn9/vR39/f9w8JLZwu91obGzk5CFFAq+Wfnh4mDKok4mWlha0trbGlAfvrs3k5CQGBgaSYoiHoqenB1evXk2OIb4VDocDJpMp6eXaYrGgsbGRV7mOufPq8XgwMDCQ9HI9NzeHq1evci7XvDseUqkUWq0W6enpCAaDcDqd8Hq9fLNjDaVSiRs3bkAikfDi5XSHxWIxampqAAALCwsYGhrC7Ows5ufnEypWIpHAYDAAACYmJnD9+nXevJwEnzp1CjKZDBUVFQCAsbGxpJTfjz76CFVVVWhqagIAdHR08G6XWQuuqanB7t274fV6UVpaiuzsbAAbTzyJhMFggEKhgNfrRX19PWQyGQDAaDTyyo+V4JqaGshkMsricLlcOH78OICNCWp8p0cwwWAwoKqqahPvG2+8AWDji+7u7uacJyvBMpksrKxkZWVR0yLGxsY4E7NBVVVVGK9CoaCmRXR0dHDOk5VguorB5/OhoqICYrE4YZ2PSLxXrlyBRCLhNcE1pufh1dVVXLx4EWlpaRGnJzCBjXtId87g4GBU3ri5h1uJ5+fnY8mCtXvIhTflHoYg5R4mEyn3MAl45KyWlOCdjkdOcMo9TCZS7mHKPeSGhLmH5LBOrAGXXN1DcliHrgdFgsk95CzYbrfDbDbD7/ejrKwstpmtNO5hJJhMJrS2tsLtdqOpqQlarTZi2mjuIWvBfr8fZrN50/iz2WzG9PQ0nn/+edonEzZgij10uVwwGo2bxp+NRiOGhobw+uuv005hjtk9JENz6AbbyWCuRESp2Ww2NDc30w62WywW6HQ6zoOIrO5wbm4uzp8/j5WVFXR2dm46VldXh3379mF5eTku86dDUVxcjK6uLthstrClqrq6unDo0CHOvKwE+/1+LC4uUqG0oZiYmIhaicQCkvfu3bthxwYGBnhVmpy6NnSD7kxxQvEA3Zo+fIsQJ8F040j379/nRcwFdF4037FwToLphkUXFxd5EXMB3chkUgQ7nc6wfT6fL+Gm+H///Re2z+Vy8TLFGSut/v5+RsPsm2++AbDR84pXLFNDQwPjelxnz54FsBFK+/nnn7PKl/EOa7VaVmHvYrE4au+HK27evMkq7F0ikeDmzZus82UU7HK5qG8yGs6ePct73gUdfD4f2tvbGdO1t7dzaocZBRMEAaFQSBnhdKipqYFQKORlm0TjzcvLo4xwOhgMBuTl5XHiZVVp+f1+yGQy2iDq4uJiyGSyhFRcfr8fVVVVtEHUGo0GVVVVnHlZ19JerxdqtRpSqZTaJ5VKoVarEzrdwev1Qq/XQ6lUUvuUSiX0ej0vXk7N0srKCjQaDbXmjUajwcrKCmfSULD5Oa6srKCtrQ0SiQQSiQRtbW2MvHFzD0MrsXhUUmzdw9BKjKmSiqt7SBBE3Conru4hOa8kWqBnyj3cgl0EQcQ0cMYWW3kIgkiKe7iVV2C1Won09PSYxLCB1+tFZmYmtb22tobt4E1LBimATaQAkiKWjveR85ZSgnc6Uu5hMpFyD1PuITekYg/ZxB52dXXFTMo2n1D38NSpU7zjDEP/yHzisnJpIsBm5dJ45rntgpONuITTJirctqWlJabjdGAUvNUEp0NouxcvtLa2MgZhmUwmzqKjCrbb7aw9HC5pmWAymVivb2kymTgFe0RslrbeNTa1rtlshkgkQn5+PusL2Iqtd42NdWM0GpGVlYWTJ08ypo14h/nGI8Uax8Q3XJbteREFV1ZW8iLmex6Ja9euJfS8iD9puVyOmpoa3L59G8DmVUq3glzNlAzoimVgvrq6GmlpadDr9QA2r1K6FeRqpmRAFxveiIK9Xi8VZ/jLL78whulUVFTELJbkJeMMjUYjI29TUxNrsQBDX5qMM4w0qE2iuLgYpaWlcXMPyThDphWMNRoN6uvrOfGyskvVanXUNGq1Oq5WKclL/qwjQa/Xc+Zl1dNi8nFi9ZeSyZvqS0erjbmAbT6kT7X1lQp8QeYTyasKE8w3aJJvPh6PBwRBYGZmJi68MzMzqdjDUDx67mEsFxwrUrGHSUCqWdrpSAne6dix7uFzzz1HW0s/FO7h/v37UVBQgMceeyxm99DlcsFut2NwcBACgSDsnTHb7h4ePHgQxcXFcTPTMjIyIJFIcOfOHfz+++8Pl2DSPSTftxQv93DXrl0oKirCnTt3wtIwFhq62aputxtms5maCR8pHROEQiEkEgntew/X1tbC3mu4tLSE9vZ2nD9/njZd6Pn79u3jHoo3OTmJsbExnDlzBsDGWLXdbqcNoent7YVCocChQ4dYh+VFij3s7u5GR0cH9YWaTCbcunVr0yMkmfbChQvQarXQarVUWF4wGER6ejp7wdPT0zCbzfB4PJv2R7NT/H4/rFYrJicnUVZWxnowPtTpGxoagtFoDAsIi2anuN1ufPnll+ju7salS5dw4sQJKk+64hH2FTgcDgwPD4eJZQu/3w+bzcZ5JSSLxYL333+fNvqNDdxuN3p6ehjPDxMsl8tjjkw5ceIENfOVLVQqFd58882YeA0GA7WiWiSECfb5fPjpp58AbKyBx/bCpVIp6urqAADff/895wf6tbU1fPbZZwCAjz/+mPHCSSiVSsr3eueddxh5aWtpMrwuJyeH9cuczp07R5UZvktO/fnnnwCAY8eOoa+vj9U5nZ2d1CsH2fhaUZulwcFB1kGNi4uLjK/gYwuDwcCJ9+2332add9RmyW63w+12Q6FQIC8vD5cvX8bCwgI19VcqlcJms8HhcGBycjJuSz6aTCbMzs5Cq9Xi6NGjGB0dxcTEBJxOJyQSCZRKJUZGRjAyMoL//e9/jBFsoaAVLJfLKZvD4XBQ37ZEItlUph0OB238gVwu5ySQhEqlopo+i8VCtbsymWxTmb579y6t46BSqRg5aAXX1tbi22+/DZvY5XQ6aQMuQyGVSlFbW8trgb6WlhY0NDRgYmJi0/6ZmRnGYVylUomWlhbGeGbaMuzxeKDRaKhVDdkgOzsblZWVOHfuHO82fH19HW1tbWhqamL9ul2ZTIbXXnsNnZ2drN7yFfFFjy6XC6WlpVCpVFhaWsK///5LVfnz8/PIy8sDAOzevRu5ubnIycmBx+OJKZ6YIAj4fD7U19ejsbERf/zxB4aHhykrdHx8HE8//TQAYP/+/VAqlVAoFJx4I1ZapGiyrBw4cAD37t2DXC7HgQMHAGx0QXNycrC+vh63VR5Cecnw3J6eHqhUKpSXlwPY6OI+88wzALiHxnN6PPz555/D9h08eJATIR/Qzd9gE/FKh9SYFlvI5XKqPMUCrlFuKpUKp0+fZkwXDAZp93MSLBaLUVJSgqNHjyIjIwNerzfmOR0ul4sx9lAikeD06dN4+eWXIZVKGXnj5h5evHgRXq8XHo+Hd9MTCpFIhHv37iEnJydqp/+HH36A1+uFy+VirKTi6h7Gug7tVpDuIUEQKCwsjOge/vPPP6zyCwQCWF5exl9//YX5+Xla93DbzTSbzQar1Yr19fW4uoclJSUp9xB4BJullOCdjkdO8P8BGCQ0hnF1DxUAAAAASUVORK5CYII=) 2px 3px no-repeat;
-      background-repeat: no-repeat;
-      border-bottom: 1px solid #BCBCBC;
-      cursor: move;
-      display: block;
-      height: 13px;
-      width: 27px;
-    }
-
-    .tool-button {
-      background-position: center center;
-      background-repeat: no-repeat;
-      border-bottom: 1px solid #BCBCBC;
-      border-top: 1px solid #F1F1F1;
-      cursor: pointer;
-    }
-
-    .buttons > .tool-button:last-child {
-      border-bottom: none;
-    }
-
-    </style>
-    <div class="drag-handle"></div>
-    <div class="buttons">
-    </div>
-  </template>
-</polymer-element><style>
-.track-button{background-color:rgba(255,255,255,0.5);border:1px solid rgba(0,0,0,0.1);color:rgba(0,0,0,0.2);font-size:10px;height:12px;text-align:center;width:12px}.track-button:hover{background-color:rgba(255,255,255,1.0);border:1px solid rgba(0,0,0,0.5);box-shadow:0 0 .05em rgba(0,0,0,0.4);color:rgba(0,0,0,1)}.track-close-button{left:2px;position:absolute;top:2px}.track-collapse-button{left:3px;position:absolute;top:2px}
-</style><style>
-.drawing-container{-webkit-box-flex:1;display:inline;overflow:auto;overflow-x:hidden;position:relative}.drawing-container-canvas{-webkit-box-flex:1;display:block;pointer-events:none;position:absolute;top:0}
-</style><polymer-element name="tr-ui-heading">
-  <template>
-    <style>
-    :host {
-      background-color: rgb(243, 245, 247);
-      border-right: 1px solid #8e8e8e;
-      display: block;
-      height: 100%;
-      margin: 0;
-      padding: 0 5px 0 0;
-    }
-
-    heading {
-      display: block;
-      overflow-x: hidden;
-      text-align: left;
-      text-overflow: ellipsis;
-      white-space: nowrap;
-    }
-
-    #arrow {
-      -webkit-flex: 0 0 auto;
-      font-family: sans-serif;
-      margin-left: 5px;
-      margin-right: 5px;
-      width: 8px;
-    }
-
-    #link, #heading_content {
-      display: none;
-    }
-    </style>
-    <heading id="heading" on-click="{{onHeadingDivClicked_}}">
-      <span id="arrow"></span>
-      <span id="heading_content"></span>
-      <tr-ui-a-analysis-link id="link"></tr-ui-a-analysis-link>
-    </heading>
-  </template>
-
-  
-</polymer-element><style>
-.letter-dot-track {
-  height: 18px;
-}
-</style><style>
-.chart-track {
-  height: 30px;
-  position: relative;
-}
-</style><style>
-.power-series-track {
-  height: 90px;
-}
-</style><style>
-.spacing-track{height:4px}
-</style><style>
-.object-instance-track{height:18px}
-</style><style>
-.rect-track{height:18px}
-</style><style>
-.thread-track{-webkit-box-orient:vertical;display:-webkit-box;position:relative}
-</style><style>
-.process-track-header{-webkit-flex:0 0 auto;background-image:-webkit-gradient(linear,0 0,100% 0,from(#E5E5E5),to(#D1D1D1));border-bottom:1px solid #8e8e8e;border-top:1px solid white;font-size:75%}.process-track-name:before{content:'\25B8';padding:0 5px}.process-track-base.expanded .process-track-name:before{content:'\25BE'}
-</style><style>
-.model-track {
-  -webkit-box-flex: 1;
-}
-</style><style>
-.ruler-track{height:12px}.ruler-track.tall-mode{height:30px}
-</style><polymer-element name="tr-ui-timeline-track-view">
-  <template>
-    <style>
-    :host {
-      -webkit-box-orient: vertical;
-      display: -webkit-box;
-      position: relative;
-    }
-
-    :host ::content * {
-      -webkit-user-select: none;
-      cursor: default;
-    }
-
-    #drag_box {
-      background-color: rgba(0, 0, 255, 0.25);
-      border: 1px solid rgb(0, 0, 96);
-      font-size: 75%;
-      position: fixed;
-    }
-
-    #hint_text {
-      position: absolute;
-      bottom: 6px;
-      right: 6px;
-      font-size: 8pt;
-    }
-    </style>
-    <content></content>
-
-    <div id="drag_box"></div>
-    <div id="hint_text"></div>
-
-    <tv-ui-b-hotkey-controller id="hotkey_controller">
-    </tv-ui-b-hotkey-controller>
-  </template>
-
-  
 </polymer-element><polymer-element name="tr-ui-find-control">
   <template>
     <style>
@@ -1863,13 +1887,11 @@
 
     <div class="root hidden" id="root" on-focus="{{ onConsoleFocus }}" tabindex="0">
       <div id="history"></div>
-      <div id="prompt" on-blur="{{ onConsoleBlur }}" on-keydown="{{ promptKeyDown }}" on-keypress="{{ promptKeyPress }}">
-  
+      <div id="prompt" on-blur="{{ onConsoleBlur }}" on-keydown="{{ promptKeyDown }}" on-keypress="{{ promptKeyPress }}"></div>
+    </div>
+  </template>
 
   
-
-</div></div></template></polymer-element><polymer-element name="tr-ui-side-panel">
-  
 </polymer-element><polymer-element is="HTMLUnknownElement" name="tr-ui-side-panel-container">
   <template>
     <style>
@@ -2035,6 +2057,11 @@
       </div>
 
       <div class="pair">
+        <div class="command"><span class="mod"></span>-click/drag</div>
+        <div class="action">Add events to the current selection</div>
+      </div>
+
+      <div class="pair">
         <div class="command">double click</div>
         <div class="action">Select all events with same title</div>
       </div>
@@ -2090,11 +2117,6 @@
       </div>
 
       <div class="pair">
-        <div class="command"><span class="mod"></span></div>
-        <div class="action">Hold for temporary zoom</div>
-      </div>
-
-      <div class="pair">
         <div class="command">/</div>
         <div class="action">Search</div>
       </div>
@@ -2147,11 +2169,11 @@
   </template>
 
   
-</polymer-element><polymer-element name="tr-ui-u-array-of-numbers-span">
+</polymer-element><polymer-element name="tr-v-ui-array-of-numbers-span">
   <template>
   </template>
   
-</polymer-element><polymer-element name="tr-ui-u-generic-table-view">
+</polymer-element><polymer-element name="tr-v-ui-generic-table-view">
   <template>
     <style>
     :host {
@@ -2173,11 +2195,11 @@
       overflow: auto;
     }
     </style>
-    <tr-ui-u-generic-table-view id="gtv"></tr-ui-u-generic-table-view>
+    <tr-v-ui-generic-table-view id="gtv"></tr-v-ui-generic-table-view>
   </template>
 
   
-</polymer-element><polymer-element name="tr-ui-u-preferred-display-unit">
+</polymer-element><polymer-element name="tr-v-ui-preferred-display-unit">
   
 </polymer-element><polymer-element name="tr-ui-timeline-view">
   <template>
@@ -2279,28 +2301,8 @@
     <tr-ui-b-drag-handle id="drag_handle"></tr-ui-b-drag-handle>
     <tr-ui-a-analysis-view id="analysis"></tr-ui-a-analysis-view>
 
-    <tr-ui-u-preferred-display-unit id="display_unit">
-    </tr-ui-u-preferred-display-unit>
-  </template>
-
-  
-</polymer-element><polymer-element extends="tr-ui-side-panel" name="tr-ui-e-s-alerts-side-panel">
-  <template>
-    <style>
-    :host {
-      display: block;
-      width: 250px;
-    }
-    #content {
-      flex-direction: column;
-      display: flex;
-    }
-    </style>
-
-    <div id="content">
-      <toolbar id="toolbar"></toolbar>
-      <result-area id="result_area"></result-area>
-    </div>
+    <tr-v-ui-preferred-display-unit id="display_unit">
+    </tr-v-ui-preferred-display-unit>
   </template>
 
   
@@ -2346,50 +2348,7 @@
 function exportTo(namespace,fn){var obj=exportPath(namespace);var exports=fn();for(var propertyName in exports){var propertyDescriptor=Object.getOwnPropertyDescriptor(exports,propertyName);if(propertyDescriptor)
 Object.defineProperty(obj,propertyName,propertyDescriptor);}};function initialize(){if(global.isVinn){tr.isVinn=true;}else if(global.process&&global.process.versions.node){tr.isNode=true;}else{tr.isVinn=false;tr.isNode=false;tr.doc=document;tr.isMac=/Mac/.test(navigator.platform);tr.isWindows=/Win/.test(navigator.platform);tr.isChromeOS=/CrOS/.test(navigator.userAgent);tr.isLinux=/Linux/.test(navigator.userAgent);}
 tr.isHeadless=tr.isVinn||tr.isNode;}
-return{initialize:initialize,exportTo:exportTo,isExported:isExported,isDefined:isDefined,showPanic:showPanic,hasPanic:hasPanic,getPanicText:getPanicText};})();tr.initialize();'use strict';tr.exportTo('tr.b',function(){function EventTarget(){}
-EventTarget.decorate=function(target){for(var k in EventTarget.prototype){if(k=='decorate')
-continue;var v=EventTarget.prototype[k];if(typeof v!=='function')
-continue;target[k]=v;}};EventTarget.prototype={addEventListener:function(type,handler){if(!this.listeners_)
-this.listeners_=Object.create(null);if(!(type in this.listeners_)){this.listeners_[type]=[handler];}else{var handlers=this.listeners_[type];if(handlers.indexOf(handler)<0)
-handlers.push(handler);}},removeEventListener:function(type,handler){if(!this.listeners_)
-return;if(type in this.listeners_){var handlers=this.listeners_[type];var index=handlers.indexOf(handler);if(index>=0){if(handlers.length==1)
-delete this.listeners_[type];else
-handlers.splice(index,1);}}},dispatchEvent:function(event){if(!this.listeners_)
-return true;var self=this;event.__defineGetter__('target',function(){return self;});var realPreventDefault=event.preventDefault;event.preventDefault=function(){realPreventDefault.call(this);this.rawReturnValue=false;};var type=event.type;var prevented=0;if(type in this.listeners_){var handlers=this.listeners_[type].concat();for(var i=0,handler;handler=handlers[i];i++){if(handler.handleEvent)
-prevented|=handler.handleEvent.call(handler,event)===false;else
-prevented|=handler.call(this,event)===false;}}
-return!prevented&&event.rawReturnValue;},hasEventListener:function(type){return this.listeners_[type]!==undefined;}};var EventTargetHelper={decorate:function(target){for(var k in EventTargetHelper){if(k=='decorate')
-continue;var v=EventTargetHelper[k];if(typeof v!=='function')
-continue;target[k]=v;}
-target.listenerCounts_={};},addEventListener:function(type,listener,useCapture){this.__proto__.addEventListener.call(this,type,listener,useCapture);if(this.listenerCounts_[type]===undefined)
-this.listenerCounts_[type]=0;this.listenerCounts_[type]++;},removeEventListener:function(type,listener,useCapture){this.__proto__.removeEventListener.call(this,type,listener,useCapture);this.listenerCounts_[type]--;},hasEventListener:function(type){return this.listenerCounts_[type]>0;}};return{EventTarget:EventTarget,EventTargetHelper:EventTargetHelper};});'use strict';tr.exportTo('tr.b',function(){var Event;if(tr.isHeadless){function HeadlessEvent(type,opt_bubbles,opt_preventable){this.type=type;this.bubbles=(opt_bubbles!==undefined?!!opt_bubbles:false);this.cancelable=(opt_preventable!==undefined?!!opt_preventable:false);this.defaultPrevented=false;this.cancelBubble=false;};HeadlessEvent.prototype={preventDefault:function(){this.defaultPrevented=true;},stopPropagation:function(){this.cancelBubble=true;}};Event=HeadlessEvent;}else{function TrEvent(type,opt_bubbles,opt_preventable){var e=tr.doc.createEvent('Event');e.initEvent(type,!!opt_bubbles,!!opt_preventable);e.__proto__=global.Event.prototype;return e;};TrEvent.prototype={__proto__:global.Event.prototype};Event=TrEvent;}
-function dispatchSimpleEvent(target,type,opt_bubbles,opt_cancelable){var e=new tr.b.Event(type,opt_bubbles,opt_cancelable);return target.dispatchEvent(e);}
-return{Event:Event,dispatchSimpleEvent:dispatchSimpleEvent};});'use strict';tr.exportTo('tr.b',function(){function max(a,b){if(a===undefined)
-return b;if(b===undefined)
-return a;return Math.max(a,b);}
-function IntervalTree(beginPositionCb,endPositionCb){this.beginPositionCb_=beginPositionCb;this.endPositionCb_=endPositionCb;this.root_=undefined;this.size_=0;}
-IntervalTree.prototype={insert:function(datum){var startPosition=this.beginPositionCb_(datum);var endPosition=this.endPositionCb_(datum);var node=new IntervalTreeNode(datum,startPosition,endPosition);this.size_++;this.root_=this.insertNode_(this.root_,node);this.root_.colour=Colour.BLACK;return datum;},insertNode_:function(root,node){if(root===undefined)
-return node;if(root.leftNode&&root.leftNode.isRed&&root.rightNode&&root.rightNode.isRed)
-this.flipNodeColour_(root);if(node.key<root.key)
-root.leftNode=this.insertNode_(root.leftNode,node);else if(node.key===root.key)
-root.merge(node);else
-root.rightNode=this.insertNode_(root.rightNode,node);if(root.rightNode&&root.rightNode.isRed&&(root.leftNode===undefined||!root.leftNode.isRed))
-root=this.rotateLeft_(root);if(root.leftNode&&root.leftNode.isRed&&root.leftNode.leftNode&&root.leftNode.leftNode.isRed)
-root=this.rotateRight_(root);return root;},rotateRight_:function(node){var sibling=node.leftNode;node.leftNode=sibling.rightNode;sibling.rightNode=node;sibling.colour=node.colour;node.colour=Colour.RED;return sibling;},rotateLeft_:function(node){var sibling=node.rightNode;node.rightNode=sibling.leftNode;sibling.leftNode=node;sibling.colour=node.colour;node.colour=Colour.RED;return sibling;},flipNodeColour_:function(node){node.colour=this.flipColour_(node.colour);node.leftNode.colour=this.flipColour_(node.leftNode.colour);node.rightNode.colour=this.flipColour_(node.rightNode.colour);},flipColour_:function(colour){return colour===Colour.RED?Colour.BLACK:Colour.RED;},updateHighValues:function(){this.updateHighValues_(this.root_);},updateHighValues_:function(node){if(node===undefined)
-return undefined;node.maxHighLeft=this.updateHighValues_(node.leftNode);node.maxHighRight=this.updateHighValues_(node.rightNode);return max(max(node.maxHighLeft,node.highValue),node.maxHighRight);},validateFindArguments_:function(queryLow,queryHigh){if(queryLow===undefined||queryHigh===undefined)
-throw new Error('queryLow and queryHigh must be defined');if((typeof queryLow!=='number')||(typeof queryHigh!=='number'))
-throw new Error('queryLow and queryHigh must be numbers');},findIntersection:function(queryLow,queryHigh){this.validateFindArguments_(queryLow,queryHigh);if(this.root_===undefined)
-return[];var ret=[];this.root_.appendIntersectionsInto_(ret,queryLow,queryHigh);return ret;},get size(){return this.size_;},get root(){return this.root_;},dump_:function(){if(this.root_===undefined)
-return[];return this.root_.dump();}};var Colour={RED:'red',BLACK:'black'};function IntervalTreeNode(datum,lowValue,highValue){this.lowValue_=lowValue;this.data_=[{datum:datum,high:highValue,low:lowValue}];this.colour_=Colour.RED;this.parentNode_=undefined;this.leftNode_=undefined;this.rightNode_=undefined;this.maxHighLeft_=undefined;this.maxHighRight_=undefined;}
-IntervalTreeNode.prototype={appendIntersectionsInto_:function(ret,queryLow,queryHigh){if(this.lowValue_>=queryHigh){if(!this.leftNode_)
-return;return this.leftNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}
-if(this.maxHighLeft_>queryLow){this.leftNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}
-if(this.highValue>queryLow){for(var i=(this.data.length-1);i>=0;--i){if(this.data[i].high<queryLow)
-break;ret.push(this.data[i].datum);}}
-if(this.rightNode_){this.rightNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}},get colour(){return this.colour_;},set colour(colour){this.colour_=colour;},get key(){return this.lowValue_;},get lowValue(){return this.lowValue_;},get highValue(){return this.data_[this.data_.length-1].high;},set leftNode(left){this.leftNode_=left;},get leftNode(){return this.leftNode_;},get hasLeftNode(){return this.leftNode_!==undefined;},set rightNode(right){this.rightNode_=right;},get rightNode(){return this.rightNode_;},get hasRightNode(){return this.rightNode_!==undefined;},set parentNode(parent){this.parentNode_=parent;},get parentNode(){return this.parentNode_;},get isRootNode(){return this.parentNode_===undefined;},set maxHighLeft(high){this.maxHighLeft_=high;},get maxHighLeft(){return this.maxHighLeft_;},set maxHighRight(high){this.maxHighRight_=high;},get maxHighRight(){return this.maxHighRight_;},get data(){return this.data_;},get isRed(){return this.colour_===Colour.RED;},merge:function(node){for(var i=0;i<node.data.length;i++)
-this.data_.push(node.data[i]);this.data_.sort(function(a,b){return a.high-b.high;});},dump:function(){var ret={};if(this.leftNode_)
-ret['left']=this.leftNode_.dump();ret['data']=this.data_.map(function(d){return[d.low,d.high];});if(this.rightNode_)
-ret['right']=this.rightNode_.dump();return ret;}};return{IntervalTree:IntervalTree};});'use strict';tr.exportTo('tr.b',function(){function asArray(arrayish){var values=[];for(var i=0;i<arrayish.length;i++)
+return{initialize:initialize,exportTo:exportTo,isExported:isExported,isDefined:isDefined,showPanic:showPanic,hasPanic:hasPanic,getPanicText:getPanicText};})();tr.initialize();'use strict';tr.exportTo('tr.b',function(){function asArray(arrayish){var values=[];for(var i=0;i<arrayish.length;i++)
 values.push(arrayish[i]);return values;}
 function compareArrays(x,y,elementCmp){var minLength=Math.min(x.length,y.length);for(var i=0;i<minLength;i++){var tmp=elementCmp(x[i],y[i]);if(tmp)
 return tmp;}
@@ -2451,7 +2410,25 @@
 values.push(value);return values;}
 function iterMapItems(map,fn,opt_this){opt_this=opt_this||this;for(var key of map.keys())
 fn.call(opt_this,key,map.get(key));}
-return{asArray:asArray,concatenateArrays:concatenateArrays,concatenateObjects:concatenateObjects,compareArrays:compareArrays,comparePossiblyUndefinedValues:comparePossiblyUndefinedValues,compareNumericWithNaNs:compareNumericWithNaNs,dictionaryLength:dictionaryLength,dictionaryKeys:dictionaryKeys,dictionaryValues:dictionaryValues,dictionaryContainsValue:dictionaryContainsValue,group:group,iterItems:iterItems,mapItems:mapItems,filterItems:filterItems,iterObjectFieldsRecursively:iterObjectFieldsRecursively,invertArrayOfDicts:invertArrayOfDicts,arrayToDict:arrayToDict,identity:identity,findFirstIndexInArray:findFirstIndexInArray,findFirstInArray:findFirstInArray,findFirstKeyInDictMatching:findFirstKeyInDictMatching,mapValues:mapValues,iterMapItems:iterMapItems};});'use strict';tr.exportTo('tr.b',function(){function Range(){this.isEmpty_=true;this.min_=undefined;this.max_=undefined;};Range.prototype={__proto__:Object.prototype,reset:function(){this.isEmpty_=true;this.min_=undefined;this.max_=undefined;},get isEmpty(){return this.isEmpty_;},addRange:function(range){if(range.isEmpty)
+return{asArray:asArray,concatenateArrays:concatenateArrays,concatenateObjects:concatenateObjects,compareArrays:compareArrays,comparePossiblyUndefinedValues:comparePossiblyUndefinedValues,compareNumericWithNaNs:compareNumericWithNaNs,dictionaryLength:dictionaryLength,dictionaryKeys:dictionaryKeys,dictionaryValues:dictionaryValues,dictionaryContainsValue:dictionaryContainsValue,group:group,iterItems:iterItems,mapItems:mapItems,filterItems:filterItems,iterObjectFieldsRecursively:iterObjectFieldsRecursively,invertArrayOfDicts:invertArrayOfDicts,arrayToDict:arrayToDict,identity:identity,findFirstIndexInArray:findFirstIndexInArray,findFirstInArray:findFirstInArray,findFirstKeyInDictMatching:findFirstKeyInDictMatching,mapValues:mapValues,iterMapItems:iterMapItems};});'use strict';tr.exportTo('tr.b',function(){function convertEventsToRanges(events){return events.map(function(event){return tr.b.Range.fromExplicitRange(event.start,event.end);});}
+function mergeRanges(inRanges,mergeThreshold,mergeFunction){var remainingEvents=inRanges.slice();remainingEvents.sort(function(x,y){return x.min-y.min;});if(remainingEvents.length<=1){var merged=[];if(remainingEvents.length==1){merged.push(mergeFunction(remainingEvents));}
+return merged;}
+var mergedEvents=[];var currentMergeBuffer=[];var rightEdge;function beginMerging(){currentMergeBuffer.push(remainingEvents[0]);remainingEvents.splice(0,1);rightEdge=currentMergeBuffer[0].max;}
+function flushCurrentMergeBuffer(){if(currentMergeBuffer.length==0)
+return;mergedEvents.push(mergeFunction(currentMergeBuffer));currentMergeBuffer=[];if(remainingEvents.length!=0)
+beginMerging();}
+beginMerging();while(remainingEvents.length){var currentEvent=remainingEvents[0];var distanceFromRightEdge=currentEvent.min-rightEdge;if(distanceFromRightEdge<mergeThreshold){rightEdge=Math.max(rightEdge,currentEvent.max);remainingEvents.splice(0,1);currentMergeBuffer.push(currentEvent);continue;}
+flushCurrentMergeBuffer();}
+flushCurrentMergeBuffer();return mergedEvents;}
+function findEmptyRangesBetweenRanges(inRanges,opt_totalRange){if(opt_totalRange&&opt_totalRange.isEmpty)
+opt_totalRange=undefined;var emptyRanges=[];if(!inRanges.length){if(opt_totalRange)
+emptyRanges.push(opt_totalRange);return emptyRanges;}
+inRanges=inRanges.slice();inRanges.sort(function(x,y){return x.min-y.min;});if(opt_totalRange&&(opt_totalRange.min<inRanges[0].min)){emptyRanges.push(tr.b.Range.fromExplicitRange(opt_totalRange.min,inRanges[0].min));}
+inRanges.forEach(function(range,index){for(var otherIndex=0;otherIndex<inRanges.length;++otherIndex){if(index===otherIndex)
+continue;var other=inRanges[otherIndex];if(other.min>range.max){emptyRanges.push(tr.b.Range.fromExplicitRange(range.max,other.min));return;}
+if(other.max>range.max){return;}}
+if(opt_totalRange&&(range.max<opt_totalRange.max)){emptyRanges.push(tr.b.Range.fromExplicitRange(range.max,opt_totalRange.max));}});return emptyRanges;}
+return{convertEventsToRanges:convertEventsToRanges,findEmptyRangesBetweenRanges:findEmptyRangesBetweenRanges,mergeRanges:mergeRanges};});'use strict';tr.exportTo('tr.b',function(){function Range(){this.isEmpty_=true;this.min_=undefined;this.max_=undefined;};Range.prototype={__proto__:Object.prototype,reset:function(){this.isEmpty_=true;this.min_=undefined;this.max_=undefined;},get isEmpty(){return this.isEmpty_;},addRange:function(range){if(range.isEmpty)
 return;this.addValue(range.min);this.addValue(range.max);},addValue:function(value){if(this.isEmpty_){this.max_=value;this.min_=value;this.isEmpty_=false;return;}
 this.max_=Math.max(this.max_,value);this.min_=Math.min(this.min_,value);},set min(min){this.isEmpty_=false;this.min_=min;},get min(){if(this.isEmpty_)
 return undefined;return this.min_;},get max(){if(this.isEmpty_)
@@ -2479,98 +2456,61 @@
 var first=binSearch(function(i){return this.min_===undefined||this.min_<=getValue(i);}.bind(this));var last=binSearch(function(i){return this.max_!==undefined&&this.max_<getValue(i);}.bind(this));return array.slice(first,last);}};Range.fromDict=function(d){if(d.isEmpty===true){return new Range();}else if(d.isEmpty===false){var range=new Range();range.min=d.min;range.max=d.max;return range;}else{throw new Error('Not a range');}};Range.fromExplicitRange=function(min,max){var range=new Range();range.min=min;range.max=max;return range;};Range.compareByMinTimes=function(a,b){if(!a.isEmpty&&!b.isEmpty)
 return a.min_-b.min_;if(a.isEmpty&&!b.isEmpty)
 return-1;if(!a.isEmpty&&b.isEmpty)
-return 1;return 0;};return{Range:Range};});'use strict';tr.exportTo('tr.b',function(){function addSingletonGetter(ctor){ctor.getInstance=function(){return ctor.instance_||(ctor.instance_=new ctor());};}
-function deepCopy(value){if(!(value instanceof Object)){if(value===undefined||value===null)
-return value;if(typeof value=='string')
-return value.substring();if(typeof value=='boolean')
-return value;if(typeof value=='number')
-return value;throw new Error('Unrecognized: '+typeof value);}
-var object=value;if(object instanceof Array){var res=new Array(object.length);for(var i=0;i<object.length;i++)
-res[i]=deepCopy(object[i]);return res;}
-if(object.__proto__!=Object.prototype)
-throw new Error('Can only clone simple types');var res={};for(var key in object){res[key]=deepCopy(object[key]);}
-return res;}
-function normalizeException(e){if(e===undefined||e===null){return{typeName:'UndefinedError',message:'Unknown: null or undefined exception',stack:'Unknown'};}
-if(typeof(e)=='string'){return{typeName:'StringError',message:e,stack:[e]};}
-var typeName;if(e.name){typeName=e.name;}else if(e.constructor){if(e.constructor.name){typeName=e.constructor.name;}else{typeName='AnonymousError';}}else{typeName='ErrorWithNoConstructor';}
-var msg=e.message?e.message:'Unknown';return{typeName:typeName,message:msg,stack:e.stack?e.stack:[msg]};}
-function stackTraceAsString(){return new Error().stack+'';}
-function stackTrace(){var stack=stackTraceAsString();stack=stack.split('\n');return stack.slice(2);}
-function getUsingPath(path,from_dict){var parts=path.split('.');var cur=from_dict;for(var part;parts.length&&(part=parts.shift());){if(!parts.length){return cur[part];}else if(part in cur){cur=cur[part];}else{return undefined;}}
-return undefined;}
-return{addSingletonGetter:addSingletonGetter,deepCopy:deepCopy,normalizeException:normalizeException,stackTrace:stackTrace,stackTraceAsString:stackTraceAsString,getUsingPath:getUsingPath};});'use strict';tr.exportTo('tr.b',function(){var ESTIMATED_IDLE_PERIOD_LENGTH_MILLISECONDS=10;var recordRAFStacks=false;var pendingPreAFs=[];var pendingRAFs=[];var pendingIdleCallbacks=[];var currentRAFDispatchList=undefined;var rafScheduled=false;var idleWorkScheduled=false;function scheduleRAF(){if(rafScheduled)
-return;rafScheduled=true;if(tr.isHeadless){Promise.resolve().then(function(){processRequests(false,0);},function(e){console.log(e.stack);throw e;});}else{if(window.requestAnimationFrame){window.requestAnimationFrame(processRequests.bind(this,false));}else{var delta=Date.now()-window.performance.now();window.webkitRequestAnimationFrame(function(domTimeStamp){processRequests(false,domTimeStamp-delta);});}}}
-function nativeRequestIdleCallbackSupported(){return!tr.isHeadless&&window.requestIdleCallback;}
-function scheduleIdleWork(){if(idleWorkScheduled)
-return;if(!nativeRequestIdleCallbackSupported()){scheduleRAF();return;}
-idleWorkScheduled=true;window.requestIdleCallback(processIdleWork);}
-function onAnimationFrameError(e,opt_stack){console.log(e.stack);if(tr.isHeadless)
-throw e;if(opt_stack)
-console.log(opt_stack);if(e.message)
-console.error(e.message,e.stack);else
-console.error(e);}
-function runTask(task,frameBeginTime){try{task.callback.call(task.context,frameBeginTime);}catch(e){tr.b.onAnimationFrameError(e,task.stack);}}
-function processRequests(forceAllTasksToRun,frameBeginTime){rafScheduled=false;var currentPreAFs=pendingPreAFs;currentRAFDispatchList=pendingRAFs;pendingPreAFs=[];pendingRAFs=[];var hasRAFTasks=currentPreAFs.length||currentRAFDispatchList.length;for(var i=0;i<currentPreAFs.length;i++)
-runTask(currentPreAFs[i],frameBeginTime);while(currentRAFDispatchList.length>0)
-runTask(currentRAFDispatchList.shift(),frameBeginTime);currentRAFDispatchList=undefined;if((!hasRAFTasks&&!nativeRequestIdleCallbackSupported())||forceAllTasksToRun){var rafCompletionDeadline=frameBeginTime+ESTIMATED_IDLE_PERIOD_LENGTH_MILLISECONDS;processIdleWork(forceAllTasksToRun,{timeRemaining:function(){return rafCompletionDeadline-window.performance.now();}});}
-if(pendingIdleCallbacks.length>0)
-scheduleIdleWork();}
-function processIdleWork(forceAllTasksToRun,deadline){idleWorkScheduled=false;while(pendingIdleCallbacks.length>0){runTask(pendingIdleCallbacks.shift());if(!forceAllTasksToRun&&(tr.isHeadless||deadline.timeRemaining()<=0)){break;}}
-if(pendingIdleCallbacks.length>0)
-scheduleIdleWork();}
-function getStack_(){if(!recordRAFStacks)
-return'';var stackLines=tr.b.stackTrace();stackLines.shift();return stackLines.join('\n');}
-function requestPreAnimationFrame(callback,opt_this){pendingPreAFs.push({callback:callback,context:opt_this||window,stack:getStack_()});scheduleRAF();}
-function requestAnimationFrameInThisFrameIfPossible(callback,opt_this){if(!currentRAFDispatchList){requestAnimationFrame(callback,opt_this);return;}
-currentRAFDispatchList.push({callback:callback,context:opt_this||window,stack:getStack_()});return;}
-function requestAnimationFrame(callback,opt_this){pendingRAFs.push({callback:callback,context:opt_this||window,stack:getStack_()});scheduleRAF();}
-function requestIdleCallback(callback,opt_this){pendingIdleCallbacks.push({callback:callback,context:opt_this||window,stack:getStack_()});scheduleIdleWork();}
-function forcePendingRAFTasksToRun(frameBeginTime){if(!rafScheduled)
-return;processRequests(false,frameBeginTime);}
-function forceAllPendingTasksToRunForTest(){if(!rafScheduled&&!idleWorkScheduled)
-return;processRequests(true,0);}
-return{onAnimationFrameError:onAnimationFrameError,requestPreAnimationFrame:requestPreAnimationFrame,requestAnimationFrame:requestAnimationFrame,requestAnimationFrameInThisFrameIfPossible:requestAnimationFrameInThisFrameIfPossible,requestIdleCallback:requestIdleCallback,forcePendingRAFTasksToRun:forcePendingRAFTasksToRun,forceAllPendingTasksToRunForTest:forceAllPendingTasksToRunForTest};});'use strict';tr.exportTo('tr.b',function(){function Task(runCb,thisArg){if(runCb!==undefined&&thisArg===undefined)
-throw new Error('Almost certainly, you meant to pass a thisArg.');this.runCb_=runCb;this.thisArg_=thisArg;this.afterTask_=undefined;this.subTasks_=[];}
-Task.prototype={subTask:function(cb,thisArg){if(cb instanceof Task)
-this.subTasks_.push(cb);else
-this.subTasks_.push(new Task(cb,thisArg));return this.subTasks_[this.subTasks_.length-1];},run:function(){if(this.runCb_!==undefined)
-this.runCb_.call(this.thisArg_,this);var subTasks=this.subTasks_;this.subTasks_=undefined;if(!subTasks.length)
-return this.afterTask_;for(var i=1;i<subTasks.length;i++)
-subTasks[i-1].afterTask_=subTasks[i];subTasks[subTasks.length-1].afterTask_=this.afterTask_;return subTasks[0];},after:function(cb,thisArg){if(this.afterTask_)
-throw new Error('Has an after task already');if(cb instanceof Task)
-this.afterTask_=cb;else
-this.afterTask_=new Task(cb,thisArg);return this.afterTask_;},enqueue:function(cb,thisArg){var lastTask=this;while(lastTask.afterTask_)
-lastTask=lastTask.afterTask_;return lastTask.after(cb,thisArg);}};Task.RunSynchronously=function(task){var curTask=task;while(curTask)
-curTask=curTask.run();}
-Task.RunWhenIdle=function(task){return new Promise(function(resolve,reject){var curTask=task;function runAnother(){try{curTask=curTask.run();}catch(e){reject(e);console.error(e.stack);return;}
-if(curTask){tr.b.requestIdleCallback(runAnother);return;}
-resolve();}
-tr.b.requestIdleCallback(runAnother);});}
-return{Task:Task};});'use strict';tr.exportTo('tr.b',function(){function _iterateElementDeeplyImpl(element,cb,thisArg,includeElement){if(includeElement){if(cb.call(thisArg,element))
-return true;}
-if(element.shadowRoot){if(_iterateElementDeeplyImpl(element.shadowRoot,cb,thisArg,false))
-return true;}
-for(var i=0;i<element.children.length;i++){if(_iterateElementDeeplyImpl(element.children[i],cb,thisArg,true))
-return true;}}
-function iterateElementDeeply(element,cb,thisArg){_iterateElementDeeplyImpl(element,cb,thisArg,false);}
-function findDeepElementMatchingPredicate(element,predicate){var foundElement=undefined;function matches(element){var match=predicate(element);if(!match)
-return false;foundElement=element;return true;}
-iterateElementDeeply(element,matches);return foundElement;}
-function findDeepElementsMatchingPredicate(element,predicate){var foundElements=[];function matches(element){var match=predicate(element);if(match){foundElements.push(element);}
-return false;}
-iterateElementDeeply(element,matches);return foundElements;}
-function findDeepElementMatching(element,selector){return findDeepElementMatchingPredicate(element,function(element){return element.matches(selector);});}
-function findDeepElementsMatching(element,selector){return findDeepElementsMatchingPredicate(element,function(element){return element.matches(selector);});}
-function findDeepElementWithTextContent(element,re){return findDeepElementMatchingPredicate(element,function(element){if(element.children.length!==0)
-return false;return re.test(element.textContent);});}
-return{iterateElementDeeply:iterateElementDeeply,findDeepElementMatching:findDeepElementMatching,findDeepElementsMatching:findDeepElementsMatching,findDeepElementMatchingPredicate:findDeepElementMatchingPredicate,findDeepElementsMatchingPredicate:findDeepElementsMatchingPredicate,findDeepElementWithTextContent:findDeepElementWithTextContent};});'use strict';tr.exportTo('tr.b.u',function(){var msDisplayMode={scale:1e-3,suffix:'ms',roundedLess:function(a,b){return Math.round(a*1000)<Math.round(b*1000);},format:function(ts){return new Number(ts).toLocaleString(undefined,{minimumFractionDigits:3})+' ms';}};var nsDisplayMode={scale:1e-9,suffix:'ns',roundedLess:function(a,b){return Math.round(a*1000000)<Math.round(b*1000000);},format:function(ts){return new Number(ts*1000000).toLocaleString(undefined,{maximumFractionDigits:0})+' ns';}};var TimeDisplayModes={ns:nsDisplayMode,ms:msDisplayMode};return{TimeDisplayModes:TimeDisplayModes};});'use strict';tr.exportTo('tr.b.u',function(){var TimeDisplayModes=tr.b.u.TimeDisplayModes;function max(a,b){if(a===undefined)
-return b;if(b===undefined)
-return a;return a.scale>b.scale?a:b;}
-var Units={reset:function(){this.currentTimeDisplayMode=TimeDisplayModes.ms;},timestampFromUs:function(us){return us/1000;},maybeTimestampFromUs:function(us){return us===undefined?undefined:us/1000;},get currentTimeDisplayMode(){return this.currentTimeDisplayMode_;},set currentTimeDisplayMode(value){if(this.currentTimeDisplayMode_==value)
-return;this.currentTimeDisplayMode_=value;this.dispatchEvent(new tr.b.Event('display-mode-changed'));},didPreferredTimeDisplayUnitChange:function(){var largest=undefined;var els=tr.b.findDeepElementsMatching(document.body,'tr-ui-u-preferred-display-unit');els.forEach(function(el){largest=max(largest,el.preferredTimeDisplayMode);});this.currentDisplayUnit=largest===undefined?TimeDisplayModes.ms:largest;},unitsByJSONName:{},fromJSON:function(object){var u=this.unitsByJSONName[object];if(u){return u;}
-throw new Error('Unrecognized unit');}};tr.b.EventTarget.decorate(Units);Units.reset();Units.timeDurationInMs={asJSON:function(){return'ms';},format:function(value){return Units.currentTimeDisplayMode_.format(value);}};Units.unitsByJSONName['ms']=Units.timeDurationInMs;Units.timeStampInMs={asJSON:function(){return'tsMs';},format:function(value){return Units.currentTimeDisplayMode_.format(value);}};Units.unitsByJSONName['tsMs']=Units.timeStampInMs;Units.normalizedPercentage={asJSON:function(){return'n%';},format:function(value){var tmp=new Number(Math.round(value*100000)/1000);return tmp.toLocaleString(undefined,{minimumFractionDigits:3})+'%';}};Units.unitsByJSONName['n%']=Units.normalizedPercentage;var SIZE_UNIT_PREFIXES=['','Ki','Mi','Gi','Ti'];Units.sizeInBytes={asJSON:function(){return'sizeInBytes';},format:function(value){var signPrefix='';if(value<0){signPrefix='-';value=-value;}
-var i=0;while(value>=1024&&i<SIZE_UNIT_PREFIXES.length-1){value/=1024;i++;}
-return signPrefix+value.toFixed(1)+' '+SIZE_UNIT_PREFIXES[i]+'B';}};Units.unitsByJSONName['sizeInBytes']=Units.sizeInBytes;Units.energyInJoules={asJSON:function(){return'J';},format:function(value){return value.toLocaleString(undefined,{minimumFractionDigits:3})+' J';}};Units.unitsByJSONName['J']=Units.energyInJoules;Units.powerInWatts={asJSON:function(){return'W';},format:function(value){return(value*1000.0).toLocaleString(undefined,{minimumFractionDigits:3})+' mW';}};Units.unitsByJSONName['W']=Units.powerInWatts;Units.unitlessNumber={asJSON:function(){return'unitless';},format:function(value){return value.toLocaleString(undefined,{minimumFractionDigits:3,maximumFractionDigits:3});}};Units.unitsByJSONName['unitless']=Units.unitlessNumber;return{Units:Units};});'use strict';tr.exportTo('tr.b',function(){function RegisteredTypeInfo(constructor,metadata){this.constructor=constructor;this.metadata=metadata;};var BASIC_REGISTRY_MODE='BASIC_REGISTRY_MODE';var TYPE_BASED_REGISTRY_MODE='TYPE_BASED_REGISTRY_MODE';var ALL_MODES={BASIC_REGISTRY_MODE:true,TYPE_BASED_REGISTRY_MODE:true};function ExtensionRegistryOptions(mode){if(mode===undefined)
+return 1;return 0;};return{Range:Range};});'use strict';tr.exportTo('tr.b',function(){function identity(d){return d;}
+function Statistics(){}
+Statistics.divideIfPossibleOrZero=function(numerator,denominator){if(denominator===0)
+return 0;return numerator/denominator;};Statistics.sum=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=0;for(var i=0;i<ary.length;i++)
+ret+=func.call(opt_this,ary[i],i);return ret;};Statistics.mean=function(ary,opt_func,opt_this){return Statistics.sum(ary,opt_func,opt_this)/ary.length;};Statistics.weightedMean=function(ary,weightCallback,opt_valueCallback,opt_this){var valueCallback=opt_valueCallback||identity;var numerator=0;var denominator=0;for(var i=0;i<ary.length;i++){var value=valueCallback.call(opt_this,ary[i],i);if(value===undefined)
+continue;var weight=weightCallback.call(opt_this,ary[i],i,value);numerator+=weight*value;denominator+=weight;}
+if(denominator===0)
+return undefined;return numerator/denominator;};Statistics.variance=function(ary,opt_func,opt_this){var func=opt_func||identity;var mean=Statistics.mean(ary,func,opt_this);var sumOfSquaredDistances=Statistics.sum(ary,function(d,i){var v=func.call(this,d,i)-mean;return v*v;},opt_this);return sumOfSquaredDistances/(ary.length-1);};Statistics.stddev=function(ary,opt_func,opt_this){return Math.sqrt(Statistics.variance(ary,opt_func,opt_this));};Statistics.max=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=-Infinity;for(var i=0;i<ary.length;i++)
+ret=Math.max(ret,func.call(opt_this,ary[i],i));return ret;};Statistics.min=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=Infinity;for(var i=0;i<ary.length;i++)
+ret=Math.min(ret,func.call(opt_this,ary[i],i));return ret;};Statistics.range=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=new tr.b.Range();for(var i=0;i<ary.length;i++)
+ret.addValue(func.call(opt_this,ary[i],i));return ret;};Statistics.percentile=function(ary,percent,opt_func,opt_this){if(!(percent>=0&&percent<=1))
+throw new Error('percent must be [0,1]');var func=opt_func||identity;var tmp=new Array(ary.length);for(var i=0;i<ary.length;i++)
+tmp[i]=func.call(opt_this,ary[i],i);tmp.sort();var idx=Math.floor((ary.length-1)*percent);return tmp[idx];};Statistics.clamp=function(value,opt_low,opt_high){opt_low=opt_low||0.0;opt_high=opt_high||1.0;return Math.min(Math.max(value,opt_low),opt_high);};Statistics.normalizeSamples=function(samples){if(samples.length===0){return{normalized_samples:samples,scale:1.0};}
+samples=samples.slice().sort(function(a,b){return a-b;});var low=Math.min.apply(null,samples);var high=Math.max.apply(null,samples);var new_low=0.5/samples.length;var new_high=(samples.length-0.5)/samples.length;if(high-low===0.0){samples=Array.apply(null,new Array(samples.length)).map(function(){return 0.5;});return{normalized_samples:samples,scale:1.0};}
+var scale=(new_high-new_low)/(high-low);for(var i=0;i<samples.length;i++){samples[i]=(samples[i]-low)*scale+new_low;}
+return{normalized_samples:samples,scale:scale};};Statistics.discrepancy=function(samples,opt_location_count){if(samples.length===0)
+return 0.0;var max_local_discrepancy=0;var inv_sample_count=1.0/samples.length;var locations=[];var count_less=[];var count_less_equal=[];if(opt_location_count!==undefined){var sample_index=0;for(var i=0;i<opt_location_count;i++){var location=i/(opt_location_count-1);locations.push(location);while(sample_index<samples.length&&samples[sample_index]<location){sample_index+=1;}
+count_less.push(sample_index);while(sample_index<samples.length&&samples[sample_index]<=location){sample_index+=1;}
+count_less_equal.push(sample_index);}}else{if(samples[0]>0.0){locations.push(0.0);count_less.push(0);count_less_equal.push(0);}
+for(var i=0;i<samples.length;i++){locations.push(samples[i]);count_less.push(i);count_less_equal.push(i+1);}
+if(samples[-1]<1.0){locations.push(1.0);count_less.push(samples.length);count_less_equal.push(samples.length);}}
+for(var i=0;i<locations.length;i++){for(var j=i+1;j<locations.length;j++){var length=locations[j]-locations[i];var count_closed=count_less_equal[j]-count_less[i];var local_discrepancy_closed=Math.abs(count_closed*inv_sample_count-length);var max_local_discrepancy=Math.max(local_discrepancy_closed,max_local_discrepancy);var count_open=count_less[j]-count_less_equal[i];var local_discrepancy_open=Math.abs(count_open*inv_sample_count-length);var max_local_discrepancy=Math.max(local_discrepancy_open,max_local_discrepancy);}}
+return max_local_discrepancy;};Statistics.timestampsDiscrepancy=function(timestamps,opt_absolute,opt_location_count){if(timestamps.length===0)
+return 0.0;if(opt_absolute===undefined)
+opt_absolute=true;if(Array.isArray(timestamps[0])){var range_discrepancies=timestamps.map(function(r){return Statistics.timestampsDiscrepancy(r);});return Math.max.apply(null,range_discrepancies);}
+var s=Statistics.normalizeSamples(timestamps);var samples=s.normalized_samples;var sample_scale=s.scale;var discrepancy=Statistics.discrepancy(samples,opt_location_count);var inv_sample_count=1.0/samples.length;if(opt_absolute===true){discrepancy/=sample_scale;}else{discrepancy=Statistics.clamp((discrepancy-inv_sample_count)/(1.0-inv_sample_count));}
+return discrepancy;};Statistics.durationsDiscrepancy=function(durations,opt_absolute,opt_location_count){if(durations.length===0)
+return 0.0;var timestamps=durations.reduce(function(prev,curr,index,array){prev.push(prev[prev.length-1]+curr);return prev;},[0]);return Statistics.timestampsDiscrepancy(timestamps,opt_absolute,opt_location_count);};Statistics.uniformlySampleStream=function(samples,streamLength,newElement,numSamples){if(streamLength<=numSamples){if(samples.length>=streamLength)
+samples[streamLength-1]=newElement;else
+samples.push(newElement);return;}
+var probToKeep=numSamples/streamLength;if(Math.random()>probToKeep)
+return;var index=Math.floor(Math.random()*numSamples);samples[index]=newElement;};Statistics.mergeSampledStreams=function(samplesA,streamLengthA,samplesB,streamLengthB,numSamples){if(streamLengthB<numSamples){var nbElements=Math.min(streamLengthB,samplesB.length);for(var i=0;i<nbElements;++i){Statistics.uniformlySampleStream(samplesA,streamLengthA+i+1,samplesB[i],numSamples);}
+return;}
+if(streamLengthA<numSamples){var nbElements=Math.min(streamLengthA,samplesA.length);var tempSamples=samplesB.slice();for(var i=0;i<nbElements;++i){Statistics.uniformlySampleStream(tempSamples,streamLengthB+i+1,samplesA[i],numSamples);}
+for(var i=0;i<tempSamples.length;++i){samplesA[i]=tempSamples[i];}
+return;}
+var nbElements=Math.min(numSamples,samplesB.length);var probOfSwapping=streamLengthB/(streamLengthA+streamLengthB);for(var i=0;i<nbElements;++i){if(Math.random()<probOfSwapping){samplesA[i]=samplesB[i];}}};return{Statistics:Statistics};});'use strict';tr.exportTo('tr.b',function(){function EventTarget(){}
+EventTarget.decorate=function(target){for(var k in EventTarget.prototype){if(k=='decorate')
+continue;var v=EventTarget.prototype[k];if(typeof v!=='function')
+continue;target[k]=v;}};EventTarget.prototype={addEventListener:function(type,handler){if(!this.listeners_)
+this.listeners_=Object.create(null);if(!(type in this.listeners_)){this.listeners_[type]=[handler];}else{var handlers=this.listeners_[type];if(handlers.indexOf(handler)<0)
+handlers.push(handler);}},removeEventListener:function(type,handler){if(!this.listeners_)
+return;if(type in this.listeners_){var handlers=this.listeners_[type];var index=handlers.indexOf(handler);if(index>=0){if(handlers.length==1)
+delete this.listeners_[type];else
+handlers.splice(index,1);}}},dispatchEvent:function(event){if(!this.listeners_)
+return true;var self=this;event.__defineGetter__('target',function(){return self;});var realPreventDefault=event.preventDefault;event.preventDefault=function(){realPreventDefault.call(this);this.rawReturnValue=false;};var type=event.type;var prevented=0;if(type in this.listeners_){var handlers=this.listeners_[type].concat();for(var i=0,handler;handler=handlers[i];i++){if(handler.handleEvent)
+prevented|=handler.handleEvent.call(handler,event)===false;else
+prevented|=handler.call(this,event)===false;}}
+return!prevented&&event.rawReturnValue;},hasEventListener:function(type){return this.listeners_[type]!==undefined;}};var EventTargetHelper={decorate:function(target){for(var k in EventTargetHelper){if(k=='decorate')
+continue;var v=EventTargetHelper[k];if(typeof v!=='function')
+continue;target[k]=v;}
+target.listenerCounts_={};},addEventListener:function(type,listener,useCapture){this.__proto__.addEventListener.call(this,type,listener,useCapture);if(this.listenerCounts_[type]===undefined)
+this.listenerCounts_[type]=0;this.listenerCounts_[type]++;},removeEventListener:function(type,listener,useCapture){this.__proto__.removeEventListener.call(this,type,listener,useCapture);this.listenerCounts_[type]--;},hasEventListener:function(type){return this.listenerCounts_[type]>0;}};return{EventTarget:EventTarget,EventTargetHelper:EventTargetHelper};});'use strict';tr.exportTo('tr.b',function(){var Event;if(tr.isHeadless){function HeadlessEvent(type,opt_bubbles,opt_preventable){this.type=type;this.bubbles=(opt_bubbles!==undefined?!!opt_bubbles:false);this.cancelable=(opt_preventable!==undefined?!!opt_preventable:false);this.defaultPrevented=false;this.cancelBubble=false;};HeadlessEvent.prototype={preventDefault:function(){this.defaultPrevented=true;},stopPropagation:function(){this.cancelBubble=true;}};Event=HeadlessEvent;}else{function TrEvent(type,opt_bubbles,opt_preventable){var e=tr.doc.createEvent('Event');e.initEvent(type,!!opt_bubbles,!!opt_preventable);e.__proto__=global.Event.prototype;return e;};TrEvent.prototype={__proto__:global.Event.prototype};Event=TrEvent;}
+function dispatchSimpleEvent(target,type,opt_bubbles,opt_cancelable){var e=new tr.b.Event(type,opt_bubbles,opt_cancelable);return target.dispatchEvent(e);}
+return{Event:Event,dispatchSimpleEvent:dispatchSimpleEvent};});'use strict';tr.exportTo('tr.b',function(){function RegisteredTypeInfo(constructor,metadata){this.constructor=constructor;this.metadata=metadata;};var BASIC_REGISTRY_MODE='BASIC_REGISTRY_MODE';var TYPE_BASED_REGISTRY_MODE='TYPE_BASED_REGISTRY_MODE';var ALL_MODES={BASIC_REGISTRY_MODE:true,TYPE_BASED_REGISTRY_MODE:true};function ExtensionRegistryOptions(mode){if(mode===undefined)
 throw new Error('Mode is required');if(!ALL_MODES[mode])
 throw new Error('Not a mode.');this.mode_=mode;this.defaultMetadata_={};this.defaultConstructor_=undefined;this.mandatoryBaseClass_=undefined;this.defaultTypeInfo_=undefined;this.frozen_=false;}
 ExtensionRegistryOptions.prototype={freeze:function(){if(this.frozen_)
@@ -2591,7 +2531,9 @@
 throw new Error(constructor+' not registered');registry.registeredTypeInfos_.splice(foundIndex,1);var e=new tr.b.Event('registry-changed');registry.dispatchEvent(e);};registry.getAllRegisteredTypeInfos=function(){return registry.registeredTypeInfos_;};registry.findTypeInfo=function(constructor){var foundIndex=this.findIndexOfRegisteredConstructor(constructor);if(foundIndex!==undefined)
 return this.registeredTypeInfos_[foundIndex];return undefined;};registry.findTypeInfoMatching=function(predicate,opt_this){opt_this=opt_this?opt_this:undefined;for(var i=0;i<registry.registeredTypeInfos_.length;++i){var typeInfo=registry.registeredTypeInfos_[i];if(predicate.call(opt_this,typeInfo))
 return typeInfo;}
-return extensionRegistryOptions.defaultTypeInfo;};}
+return extensionRegistryOptions.defaultTypeInfo;};registry.findTypeInfoWithName=function(name){if(typeof(name)!=='string')
+throw new Error('Name is not a string.');var typeInfo=registry.findTypeInfoMatching(function(ti){return ti.constructor.name===name;});if(typeInfo)
+return typeInfo;return undefined;};}
 return{_decorateBasicExtensionRegistry:decorateBasicExtensionRegistry};});'use strict';tr.exportTo('tr.b',function(){var categoryPartsFor={};function getCategoryParts(category){var parts=categoryPartsFor[category];if(parts!==undefined)
 return parts;parts=category.split(',');categoryPartsFor[category]=parts;return parts;}
 return{getCategoryParts:getCategoryParts};});'use strict';tr.exportTo('tr.b',function(){var getCategoryParts=tr.b.getCategoryParts;var RegisteredTypeInfo=tr.b.RegisteredTypeInfo;var ExtensionRegistryOptions=tr.b.ExtensionRegistryOptions;function decorateTypeBasedExtensionRegistry(registry,extensionRegistryOptions){var savedStateStack=[];registry.registeredTypeInfos_=[];registry.categoryPartToTypeInfoMap_={};registry.typeNameToTypeInfoMap_={};registry.register=function(constructor,metadata){extensionRegistryOptions.validateConstructor(constructor);var typeInfo=new RegisteredTypeInfo(constructor,metadata||extensionRegistryOptions.defaultMetadata);typeInfo.typeNames=[];typeInfo.categoryParts=[];if(metadata&&metadata.typeName)
@@ -2611,20 +2553,7 @@
 if(registry.addEventListener===undefined)
 tr.b.EventTarget.decorate(registry);}
 return{decorateExtensionRegistry:decorateExtensionRegistry};});'use strict';tr.exportTo('tr.c',function(){function Auditor(model){this.model_=model;}
-Auditor.prototype={__proto__:Object.prototype,get model(){return this.model_;},runAnnotate:function(){},installUserFriendlyCategoryDriverIfNeeded:function(){},runAudit:function(){}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Auditor;tr.b.decorateExtensionRegistry(Auditor,options);return{Auditor:Auditor};});'use strict';tr.exportTo('tr.c',function(){function makeCaseInsensitiveRegex(pattern){pattern=pattern.replace(/[.*+?^${}()|[\]\\]/g,'\\$&');return new RegExp(pattern,'i');}
-function Filter(){}
-Filter.prototype={__proto__:Object.prototype,matchCounter:function(counter){return true;},matchCpu:function(cpu){return true;},matchProcess:function(process){return true;},matchSlice:function(slice){return true;},matchThread:function(thread){return true;}};function TitleOrCategoryFilter(text){Filter.call(this);this.regex_=makeCaseInsensitiveRegex(text);if(!text.length)
-throw new Error('Filter text is empty.');}
-TitleOrCategoryFilter.prototype={__proto__:Filter.prototype,matchSlice:function(slice){if(slice.title===undefined&&slice.category===undefined)
-return false;return this.regex_.test(slice.title)||(!!slice.category&&this.regex_.test(slice.category));}};function ExactTitleFilter(text){Filter.call(this);this.text_=text;if(!text.length)
-throw new Error('Filter text is empty.');}
-ExactTitleFilter.prototype={__proto__:Filter.prototype,matchSlice:function(slice){return slice.title===this.text_;}};function FullTextFilter(text){Filter.call(this);this.regex_=makeCaseInsensitiveRegex(text);this.titleOrCategoryFilter_=new TitleOrCategoryFilter(text);}
-FullTextFilter.prototype={__proto__:Filter.prototype,matchObject_:function(obj){for(var key in obj){if(!obj.hasOwnProperty(key))
-continue;if(this.regex_.test(key))
-return true;if(this.regex_.test(obj[key]))
-return true;}
-return false;},matchSlice:function(slice){if(this.titleOrCategoryFilter_.matchSlice(slice))
-return true;return this.matchObject_(slice.args);}};return{Filter:Filter,TitleOrCategoryFilter:TitleOrCategoryFilter,ExactTitleFilter:ExactTitleFilter,FullTextFilter:FullTextFilter};});'use strict';tr.exportTo('tr.b.u',function(){function Scalar(value,unit){this.value=value;this.unit=unit;};Scalar.prototype={toString:function(){return this.unit.format(this.value);}};return{Scalar:Scalar};});'use strict';tr.exportTo('tr.b.u',function(){function TimeStamp(timestamp){tr.b.u.Scalar.call(this,timestamp,tr.b.u.Units.timeStampInMs);};TimeStamp.prototype={__proto__:tr.b.u.Scalar.prototype,get timestamp(){return this.value;}};TimeStamp.format=function(timestamp){return tr.b.u.Units.timeStampInMs.format(timestamp);};return{TimeStamp:TimeStamp};});'use strict';tr.exportTo('tr.b',function(){function clamp01(value){return Math.max(0,Math.min(1,value));}
+Auditor.prototype={__proto__:Object.prototype,get model(){return this.model_;},runAnnotate:function(){},installUserFriendlyCategoryDriverIfNeeded:function(){},runAudit:function(){}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Auditor;tr.b.decorateExtensionRegistry(Auditor,options);return{Auditor:Auditor};});'use strict';tr.exportTo('tr.b',function(){function clamp01(value){return Math.max(0,Math.min(1,value));}
 function Color(opt_r,opt_g,opt_b,opt_a){this.r=Math.floor(opt_r)||0;this.g=Math.floor(opt_g)||0;this.b=Math.floor(opt_b)||0;this.a=opt_a;}
 Color.fromString=function(str){var tmp;var values;if(str.substr(0,4)=='rgb('){tmp=str.substr(4,str.length-5);values=tmp.split(',').map(function(v){return v.replace(/^\s+/,'','g');});if(values.length!=3)
 throw new Error('Malformatted rgb-expression');return new Color(parseInt(values[0]),parseInt(values[1]),parseInt(values[2]));}else if(str.substr(0,5)=='rgba('){tmp=str.substr(5,str.length-6);values=tmp.split(',').map(function(v){return v.replace(/^\s+/,'','g');});if(values.length!=4)
@@ -2647,7 +2576,7 @@
 h/=6;}
 return{h:h,s:s,l:l,a:this.a};},toStringWithAlphaOverride:function(alpha){return'rgba('+
 this.r+','+this.g+','+
-this.b+','+alpha+')';}};return{Color:Color};});'use strict';tr.exportTo('tr.b',function(){var generalPurposeColors=[new tr.b.Color(122,98,135),new tr.b.Color(150,83,105),new tr.b.Color(44,56,189),new tr.b.Color(99,86,147),new tr.b.Color(104,129,107),new tr.b.Color(130,178,55),new tr.b.Color(87,109,147),new tr.b.Color(111,145,88),new tr.b.Color(81,152,131),new tr.b.Color(142,91,111),new tr.b.Color(81,163,70),new tr.b.Color(148,94,86),new tr.b.Color(144,89,118),new tr.b.Color(83,150,97),new tr.b.Color(105,94,139),new tr.b.Color(89,144,122),new tr.b.Color(105,119,128),new tr.b.Color(96,128,137),new tr.b.Color(145,88,145),new tr.b.Color(88,145,144),new tr.b.Color(90,100,143),new tr.b.Color(121,97,136),new tr.b.Color(111,160,73),new tr.b.Color(112,91,142),new tr.b.Color(86,147,86),new tr.b.Color(63,100,170),new tr.b.Color(81,152,107),new tr.b.Color(60,164,173),new tr.b.Color(143,72,161),new tr.b.Color(159,74,86)];var reservedColorsByName={thread_state_uninterruptible:new tr.b.Color(182,125,143),thread_state_iowait:new tr.b.Color(255,140,0),thread_state_running:new tr.b.Color(126,200,148),thread_state_runnable:new tr.b.Color(133,160,210),thread_state_sleeping:new tr.b.Color(240,240,240),thread_state_unknown:new tr.b.Color(199,155,125),light_memory_dump:new tr.b.Color(0,0,180),detailed_memory_dump:new tr.b.Color(180,0,180),generic_work:new tr.b.Color(125,125,125),good:new tr.b.Color(0,125,0),bad:new tr.b.Color(180,125,0),terrible:new tr.b.Color(180,0,0),black:new tr.b.Color(0,0,0),rail_response:new tr.b.Color(67,135,253),rail_animate:new tr.b.Color(244,74,63),rail_idle:new tr.b.Color(238,142,0),rail_load:new tr.b.Color(13,168,97),used_memory_column:new tr.b.Color(0,0,255),older_used_memory_column:new tr.b.Color(153,204,255),tracing_memory_column:new tr.b.Color(153,153,153),cq_build_running:new tr.b.Color(255,255,119),cq_build_passed:new tr.b.Color(153,238,102),cq_build_failed:new tr.b.Color(238,136,136),cq_build_abandoned:new tr.b.Color(187,187,187),cq_build_attempt_runnig:new tr.b.Color(222,222,75),cq_build_attempt_passed:new tr.b.Color(103,218,35),cq_build_attempt_failed:new tr.b.Color(197,81,81)};var numGeneralPurposeColorIds=generalPurposeColors.length;var numReservedColorIds=tr.b.dictionaryLength(reservedColorsByName);var numColorsPerVariant=numGeneralPurposeColorIds+numReservedColorIds;function ColorScheme(){}
+this.b+','+alpha+')';}};return{Color:Color};});'use strict';tr.exportTo('tr.b',function(){var generalPurposeColors=[new tr.b.Color(122,98,135),new tr.b.Color(150,83,105),new tr.b.Color(44,56,189),new tr.b.Color(99,86,147),new tr.b.Color(104,129,107),new tr.b.Color(130,178,55),new tr.b.Color(87,109,147),new tr.b.Color(111,145,88),new tr.b.Color(81,152,131),new tr.b.Color(142,91,111),new tr.b.Color(81,163,70),new tr.b.Color(148,94,86),new tr.b.Color(144,89,118),new tr.b.Color(83,150,97),new tr.b.Color(105,94,139),new tr.b.Color(89,144,122),new tr.b.Color(105,119,128),new tr.b.Color(96,128,137),new tr.b.Color(145,88,145),new tr.b.Color(88,145,144),new tr.b.Color(90,100,143),new tr.b.Color(121,97,136),new tr.b.Color(111,160,73),new tr.b.Color(112,91,142),new tr.b.Color(86,147,86),new tr.b.Color(63,100,170),new tr.b.Color(81,152,107),new tr.b.Color(60,164,173),new tr.b.Color(143,72,161),new tr.b.Color(159,74,86)];var reservedColorsByName={thread_state_uninterruptible:new tr.b.Color(182,125,143),thread_state_iowait:new tr.b.Color(255,140,0),thread_state_running:new tr.b.Color(126,200,148),thread_state_runnable:new tr.b.Color(133,160,210),thread_state_sleeping:new tr.b.Color(240,240,240),thread_state_unknown:new tr.b.Color(199,155,125),light_memory_dump:new tr.b.Color(0,0,180),detailed_memory_dump:new tr.b.Color(180,0,180),generic_work:new tr.b.Color(125,125,125),good:new tr.b.Color(0,125,0),bad:new tr.b.Color(180,125,0),terrible:new tr.b.Color(180,0,0),black:new tr.b.Color(0,0,0),rail_response:new tr.b.Color(67,135,253),rail_animation:new tr.b.Color(244,74,63),rail_idle:new tr.b.Color(238,142,0),rail_load:new tr.b.Color(13,168,97),used_memory_column:new tr.b.Color(0,0,255),older_used_memory_column:new tr.b.Color(153,204,255),tracing_memory_column:new tr.b.Color(153,153,153),heap_dump_stack_frame:new tr.b.Color(128,128,128),heap_dump_object_type:new tr.b.Color(0,0,255),cq_build_running:new tr.b.Color(255,255,119),cq_build_passed:new tr.b.Color(153,238,102),cq_build_failed:new tr.b.Color(238,136,136),cq_build_abandoned:new tr.b.Color(187,187,187),cq_build_attempt_runnig:new tr.b.Color(222,222,75),cq_build_attempt_passed:new tr.b.Color(103,218,35),cq_build_attempt_failed:new tr.b.Color(197,81,81)};var numGeneralPurposeColorIds=generalPurposeColors.length;var numReservedColorIds=tr.b.dictionaryLength(reservedColorsByName);var numColorsPerVariant=numGeneralPurposeColorIds+numReservedColorIds;function ColorScheme(){}
 var paletteBase=[];paletteBase.push.apply(paletteBase,generalPurposeColors);paletteBase.push.apply(paletteBase,tr.b.dictionaryValues(reservedColorsByName));ColorScheme.colors=[];ColorScheme.properties={};ColorScheme.properties={numColorsPerVariant:numColorsPerVariant};function pushVariant(func){var variantColors=paletteBase.map(func);ColorScheme.colors.push.apply(ColorScheme.colors,variantColors);}
 pushVariant(function(c){return c;});ColorScheme.properties.brightenedOffsets=[];ColorScheme.properties.brightenedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.lighten(0.3,0.9);});ColorScheme.properties.brightenedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.lighten(0.48,0.9);});ColorScheme.properties.brightenedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.lighten(0.65,0.9);});ColorScheme.properties.dimmedOffsets=[];ColorScheme.properties.dimmedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.desaturate();});ColorScheme.properties.dimmedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.desaturate(0.5);});ColorScheme.properties.dimmedOffsets.push(ColorScheme.colors.length);pushVariant(function(c){return c.desaturate(0.3);});ColorScheme.colorsAsStrings=ColorScheme.colors.map(function(c){return c.toString();});var reservedColorNameToIdMap=(function(){var m={};var i=generalPurposeColors.length;tr.b.iterItems(reservedColorsByName,function(key,value){m[key]=i++;});return m;})();ColorScheme.getColorIdForReservedName=function(name){var id=reservedColorNameToIdMap[name];if(id===undefined)
 throw new Error('Unrecognized color ')+name;return id;};ColorScheme.getColorForReservedNameAsString=function(reservedName){var id=ColorScheme.getColorIdForReservedName(reservedName);return ColorScheme.colorsAsStrings[id];};ColorScheme.getStringHash=function(name){var hash=0;for(var i=0;i<name.length;++i)
@@ -2663,13 +2592,15 @@
 EventRegistry.addEventListener('registry-changed',function(){eventsByTypeName=undefined;});function convertCamelCaseToTitleCase(name){var result=name.replace(/[A-Z]/g,' $&');result=result.charAt(0).toUpperCase()+result.slice(1);return result;}
 EventRegistry.getUserFriendlySingularName=function(typeName){var typeInfo=EventRegistry.getEventTypeInfoByTypeName(typeName);var str=typeInfo.metadata.name;return convertCamelCaseToTitleCase(str);};EventRegistry.getUserFriendlyPluralName=function(typeName){var typeInfo=EventRegistry.getEventTypeInfoByTypeName(typeName);var str=typeInfo.metadata.pluralName;return convertCamelCaseToTitleCase(str);};return{EventRegistry:EventRegistry};});'use strict';tr.exportTo('tr.model',function(){var EventRegistry=tr.model.EventRegistry;var RequestSelectionChangeEvent=tr.b.Event.bind(undefined,'requestSelectionChange',true,false);function EventSet(opt_events){this.bounds_dirty_=true;this.bounds_=new tr.b.Range();this.length_=0;this.guid_=tr.b.GUID.allocate();this.pushed_guids_={};if(opt_events){if(opt_events instanceof Array){for(var i=0;i<opt_events.length;i++)
 this.push(opt_events[i]);}else if(opt_events instanceof EventSet){this.addEventSet(opt_events);}else{this.push(opt_events);}}}
-EventSet.prototype={__proto__:Object.prototype,get bounds(){if(this.bounds_dirty_){this.bounds_.reset();for(var i=0;i<this.length_;i++)
-this[i].addBoundsToRange(this.bounds_);this.bounds_dirty_=false;}
-return this.bounds_;},get duration(){if(this.bounds_.isEmpty)
+EventSet.prototype={__proto__:Object.prototype,get bounds(){if(this.bounds_dirty_)
+this.resolveBounds_();return this.bounds_;},get duration(){if(this.bounds_.isEmpty)
 return 0;return this.bounds_.max-this.bounds_.min;},get length(){return this.length_;},get guid(){return this.guid_;},clear:function(){for(var i=0;i<this.length_;++i)
-delete this[i];this.length_=0;this.bounds_dirty_=true;},push:function(event){if(event.guid==undefined)
+delete this[i];this.length_=0;this.bounds_dirty_=true;},resolveBounds_:function(){this.bounds_.reset();for(var i=0;i<this.length_;i++)
+this[i].addBoundsToRange(this.bounds_);this.bounds_dirty_=false;},push:function(event){if(event.guid==undefined)
 throw new Error('Event must have a GUID');if(this.contains(event))
-return event;this.pushed_guids_[event.guid]=true;this[this.length_++]=event;this.bounds_dirty_=true;return event;},contains:function(event){return this.pushed_guids_[event.guid];},addEventSet:function(eventSet){for(var i=0;i<eventSet.length;i++)
+return event;this.pushed_guids_[event.guid]=true;this[this.length_++]=event;this.bounds_dirty_=true;return event;},contains:function(event){return this.pushed_guids_[event.guid];},indexOf:function(event){for(var i=0;i<this.length;i++){if(this[i].guid===event.guid)
+return i;}
+return-1;},addEventSet:function(eventSet){for(var i=0;i<eventSet.length;i++)
 this.push(eventSet[i]);},subEventSet:function(index,count){count=count||1;var eventSet=new EventSet();eventSet.bounds_dirty_=true;if(index<0||index+count>this.length_)
 throw new Error('Index out of bounds');for(var i=index;i<index+count;i++)
 eventSet.push(this[i]);return eventSet;},intersectionIsEmpty:function(otherEventSet){return!this.some(function(event){return otherEventSet.contains(event);});},equals:function(that){if(this.length!==that.length)
@@ -2696,165 +2627,32 @@
 if(!fn.call(opt_this,this[i],i))
 return false;return true;},some:function(fn,opt_this){for(var i=0;i<this.length;i++)
 if(fn.call(opt_this,this[i],i))
-return true;return false;},asDict:function(){var stable_ids=[];this.forEach(function(event){stable_ids.push(event.stableId);});return{'events':stable_ids};}};return{EventSet:EventSet,RequestSelectionChangeEvent:RequestSelectionChangeEvent};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;var SelectionState={NONE:0,SELECTED:ColorScheme.properties.brightenedOffsets[0],HIGHLIGHTED:ColorScheme.properties.brightenedOffsets[1],DIMMED:ColorScheme.properties.dimmedOffsets[0],BRIGHTENED0:ColorScheme.properties.brightenedOffsets[0],BRIGHTENED1:ColorScheme.properties.brightenedOffsets[1],BRIGHTENED2:ColorScheme.properties.brightenedOffsets[2],DIMMED0:ColorScheme.properties.dimmedOffsets[0],DIMMED1:ColorScheme.properties.dimmedOffsets[1],DIMMED2:ColorScheme.properties.dimmedOffsets[2]};var brighteningLevels=[SelectionState.NONE,SelectionState.BRIGHTENED0,SelectionState.BRIGHTENED1,SelectionState.BRIGHTENED2];SelectionState.getFromBrighteningLevel=function(level){return brighteningLevels[level];}
+return true;return false;},asDict:function(){var stable_ids=[];this.forEach(function(event){stable_ids.push(event.stableId);});return{'events':stable_ids};}};EventSet.IMMUTABLE_EMPTY_SET=(function(){var s=new EventSet();s.resolveBounds_();s.push=function(){throw new Error('Cannot push to an immutable event set');};s.addEventSet=function(){throw new Error('Cannot add to an immutable event set');};Object.freeze(s);return s;})();return{EventSet:EventSet,RequestSelectionChangeEvent:RequestSelectionChangeEvent};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;var SelectionState={NONE:0,SELECTED:ColorScheme.properties.brightenedOffsets[0],HIGHLIGHTED:ColorScheme.properties.brightenedOffsets[1],DIMMED:ColorScheme.properties.dimmedOffsets[0],BRIGHTENED0:ColorScheme.properties.brightenedOffsets[0],BRIGHTENED1:ColorScheme.properties.brightenedOffsets[1],BRIGHTENED2:ColorScheme.properties.brightenedOffsets[2],DIMMED0:ColorScheme.properties.dimmedOffsets[0],DIMMED1:ColorScheme.properties.dimmedOffsets[1],DIMMED2:ColorScheme.properties.dimmedOffsets[2]};var brighteningLevels=[SelectionState.NONE,SelectionState.BRIGHTENED0,SelectionState.BRIGHTENED1,SelectionState.BRIGHTENED2];SelectionState.getFromBrighteningLevel=function(level){return brighteningLevels[level];}
 var dimmingLevels=[SelectionState.DIMMED0,SelectionState.DIMMED1,SelectionState.DIMMED2];SelectionState.getFromDimmingLevel=function(level){return dimmingLevels[level];}
 return{SelectionState:SelectionState};});'use strict';tr.exportTo('tr.model',function(){var SelectionState=tr.model.SelectionState;function SelectableItem(modelItem){this.modelItem_=modelItem;}
 SelectableItem.prototype={get modelItem(){return this.modelItem_;},get selected(){return this.selectionState===SelectionState.SELECTED;},addToSelection:function(selection){var modelItem=this.modelItem_;if(!modelItem)
 return;selection.push(modelItem);},addToTrackMap:function(eventToTrackMap,track){var modelItem=this.modelItem_;if(!modelItem)
-return;eventToTrackMap.addEvent(modelItem,track);}};return{SelectableItem:SelectableItem};});'use strict';tr.exportTo('tr.model',function(){var SelectableItem=tr.model.SelectableItem;var SelectionState=tr.model.SelectionState;function Event(){SelectableItem.call(this,this);this.guid_=tr.b.GUID.allocate();this.selectionState=SelectionState.NONE;this.associatedAlerts=new tr.model.EventSet();this.info=undefined;}
-Event.prototype={__proto__:SelectableItem.prototype,get guid(){return this.guid_;},get stableId(){return undefined;},addBoundsToRange:function(range){throw new Error('Not implemented');}};return{Event:Event};});'use strict';tr.exportTo('tr.model',function(){function TimedEvent(start){tr.model.Event.call(this);this.start=start;this.duration=0;this.cpuStart=undefined;this.cpuDuration=undefined;}
-TimedEvent.prototype={__proto__:tr.model.Event.prototype,get end(){return this.start+this.duration;},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);},bounds:function(that,precisionUnit){if(precisionUnit===undefined){precisionUnit=tr.b.u.TimeDisplayModes.ms;}
-var startsBefore=precisionUnit.roundedLess(that.start,this.start);var endsAfter=precisionUnit.roundedLess(this.end,that.end);return!startsBefore&&!endsAfter;}};return{TimedEvent:TimedEvent};});'use strict';tr.exportTo('tr.model',function(){function Alert(info,start,opt_associatedEvents,opt_args){tr.model.TimedEvent.call(this,start);this.info=info;this.args=opt_args||{};this.associatedEvents=new tr.model.EventSet(opt_associatedEvents);this.associatedEvents.forEach(function(event){event.associatedAlerts.push(this);},this);}
+return;eventToTrackMap.addEvent(modelItem,track);}};return{SelectableItem:SelectableItem};});'use strict';tr.exportTo('tr.model',function(){var SelectableItem=tr.model.SelectableItem;var SelectionState=tr.model.SelectionState;var IMMUTABLE_EMPTY_SET=tr.model.EventSet.IMMUTABLE_EMPTY_SET;function Event(){SelectableItem.call(this,this);this.guid_=tr.b.GUID.allocate();this.selectionState=SelectionState.NONE;this.info=undefined;}
+Event.prototype={__proto__:SelectableItem.prototype,get guid(){return this.guid_;},get stableId(){return undefined;},associatedAlerts:IMMUTABLE_EMPTY_SET,addAssociatedAlert:function(alert){if(this.associatedAlerts===IMMUTABLE_EMPTY_SET)
+this.associatedAlerts=new tr.model.EventSet();this.associatedAlerts.push(alert);},addBoundsToRange:function(range){throw new Error('Not implemented');}};return{Event:Event};});'use strict';tr.exportTo('tr.v',function(){var msDisplayMode={scale:1e-3,suffix:'ms',roundedLess:function(a,b){return Math.round(a*1000)<Math.round(b*1000);},format:function(ts){return new Number(ts).toLocaleString(undefined,{minimumFractionDigits:3})+' ms';}};var nsDisplayMode={scale:1e-9,suffix:'ns',roundedLess:function(a,b){return Math.round(a*1000000)<Math.round(b*1000000);},format:function(ts){return new Number(ts*1000000).toLocaleString(undefined,{maximumFractionDigits:0})+' ns';}};var TimeDisplayModes={ns:nsDisplayMode,ms:msDisplayMode};return{TimeDisplayModes:TimeDisplayModes};});'use strict';tr.exportTo('tr.model',function(){function TimedEvent(start){tr.model.Event.call(this);this.start=start;this.duration=0;this.cpuStart=undefined;this.cpuDuration=undefined;}
+TimedEvent.prototype={__proto__:tr.model.Event.prototype,get end(){return this.start+this.duration;},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);},bounds:function(that,opt_precisionUnit){if(opt_precisionUnit===undefined)
+opt_precisionUnit=tr.v.TimeDisplayModes.ms;var startsBefore=opt_precisionUnit.roundedLess(that.start,this.start);var endsAfter=opt_precisionUnit.roundedLess(this.end,that.end);return!startsBefore&&!endsAfter;}};return{TimedEvent:TimedEvent};});'use strict';tr.exportTo('tr.v',function(){var TimeDisplayModes=tr.v.TimeDisplayModes;var BINARY_PREFIXES=['','Ki','Mi','Gi','Ti'];var PLUS_MINUS_SIGN=String.fromCharCode(177);function max(a,b){if(a===undefined)
+return b;if(b===undefined)
+return a;return a.scale>b.scale?a:b;}
+var ImprovementDirection={DONT_CARE:0,BIGGER_IS_BETTER:1,SMALLER_IS_BETTER:2};function Unit(unitName,jsonName,isDelta,improvementDirection,formatValue){this.unitName=unitName;this.jsonName=jsonName;this.isDelta=isDelta;this.improvementDirection=improvementDirection;this.formatValue_=formatValue;this.correspondingDeltaUnit=undefined;}
+Unit.prototype={asJSON:function(){return this.jsonName;},format:function(value){var formattedValue=this.formatValue_(value);if(!this.isDelta||value<0)
+return formattedValue;if(value===0)
+return PLUS_MINUS_SIGN+formattedValue;else
+return'+'+formattedValue;}};Unit.reset=function(){Unit.currentTimeDisplayMode=TimeDisplayModes.ms;};Unit.timestampFromUs=function(us){return us/1000;};Unit.maybeTimestampFromUs=function(us){return us===undefined?undefined:us/1000;};Object.defineProperty(Unit,'currentTimeDisplayMode',{get:function(){return Unit.currentTimeDisplayMode_;},set:function(value){if(Unit.currentTimeDisplayMode_===value)
+return;Unit.currentTimeDisplayMode_=value;Unit.dispatchEvent(new tr.b.Event('display-mode-changed'));}});Unit.didPreferredTimeDisplayUnitChange=function(){var largest=undefined;var els=tr.b.findDeepElementsMatching(document.body,'tr-v-ui-preferred-display-unit');els.forEach(function(el){largest=max(largest,el.preferredTimeDisplayMode);});Unit.currentDisplayUnit=largest===undefined?TimeDisplayModes.ms:largest;};Unit.byName={};Unit.byJSONName={};Unit.fromJSON=function(object){var u=Unit.byJSONName[object];if(u){return u;}
+throw new Error('Unrecognized unit');};Unit.define=function(params){tr.b.iterItems(ImprovementDirection,function(_,improvementDirection){var regularUnit=Unit.defineUnitVariant_(params,false,improvementDirection);var deltaUnit=Unit.defineUnitVariant_(params,true,improvementDirection);regularUnit.correspondingDeltaUnit=deltaUnit;deltaUnit.correspondingDeltaUnit=deltaUnit;});};Unit.defineUnitVariant_=function(params,isDelta,improvementDirection){var nameSuffix=isDelta?'Delta':'';switch(improvementDirection){case ImprovementDirection.DONT_CARE:break;case ImprovementDirection.BIGGER_IS_BETTER:nameSuffix+='_biggerIsBetter';break;case ImprovementDirection.SMALLER_IS_BETTER:nameSuffix+='_smallerIsBetter';break;default:throw new Error('Unknown improvement direction: '+improvementDirection);}
+var unitName=params.baseUnitName+nameSuffix;var jsonName=params.baseJsonName+nameSuffix;if(Unit.byName[unitName]!==undefined)
+throw new Error('Unit \''+unitName+'\' already exists');if(Unit.byJSONName[jsonName]!==undefined)
+throw new Error('JSON unit \''+jsonName+'\' alread exists');var unit=new Unit(unitName,jsonName,isDelta,improvementDirection,params.formatValue);Unit.byName[unitName]=unit;Unit.byJSONName[jsonName]=unit;return unit;};tr.b.EventTarget.decorate(Unit);Unit.reset();Unit.define({baseUnitName:'timeDurationInMs',baseJsonName:'ms',formatValue:function(value){return Unit.currentTimeDisplayMode_.format(value);}});Unit.define({baseUnitName:'timeStampInMs',baseJsonName:'tsMs',formatValue:function(value){return Unit.currentTimeDisplayMode_.format(value);}});Unit.define({baseUnitName:'normalizedPercentage',baseJsonName:'n%',formatValue:function(value){var tmp=new Number(Math.round(value*100000)/1000);return tmp.toLocaleString(undefined,{minimumFractionDigits:3})+'%';}});Unit.define({baseUnitName:'sizeInBytes',baseJsonName:'sizeInBytes',formatValue:function(value){var signPrefix='';if(value<0){signPrefix='-';value=-value;}
+var i=0;while(value>=1024&&i<BINARY_PREFIXES.length-1){value/=1024;i++;}
+return signPrefix+value.toFixed(1)+' '+BINARY_PREFIXES[i]+'B';}});Unit.define({baseUnitName:'energyInJoules',baseJsonName:'J',formatValue:function(value){return value.toLocaleString(undefined,{minimumFractionDigits:3})+' J';}});Unit.define({baseUnitName:'powerInWatts',baseJsonName:'W',formatValue:function(value){return value.toLocaleString(undefined,{minimumFractionDigits:3})+' W';}});Unit.define({baseUnitName:'unitlessNumber',baseJsonName:'unitless',formatValue:function(value){return value.toLocaleString(undefined,{minimumFractionDigits:3,maximumFractionDigits:3});}});return{ImprovementDirection:ImprovementDirection,Unit:Unit};});'use strict';tr.exportTo('tr.model',function(){function Alert(info,start,opt_associatedEvents,opt_args){tr.model.TimedEvent.call(this,start);this.info=info;this.args=opt_args||{};this.associatedEvents=new tr.model.EventSet(opt_associatedEvents);this.associatedEvents.forEach(function(event){event.addAssociatedAlert(this);},this);}
 Alert.prototype={__proto__:tr.model.TimedEvent.prototype,get title(){return this.info.title;},get colorId(){return this.info.colorId;},get userFriendlyName(){return'Alert '+this.title+' at '+
-tr.b.u.TimeStamp.format(this.start);}};tr.model.EventRegistry.register(Alert,{name:'alert',pluralName:'alerts',singleViewElementName:'tr-ui-a-alert-sub-view',multiViewElementName:'tr-ui-a-alert-sub-view'});return{Alert:Alert};});'use strict';tr.exportTo('tr.model',function(){function EventContainer(){this.guid_=tr.b.GUID.allocate();this.important=true;this.bounds_=new tr.b.Range();}
-EventContainer.prototype={get guid(){return this.guid_;},get stableId(){throw new Error('Not implemented');},get bounds(){return this.bounds_;},updateBounds:function(){throw new Error('Not implemented');},shiftTimestampsForward:function(amount){throw new Error('Not implemented');},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){throw new Error('Not implemented');},iterateAllChildEventContainers:function(callback,opt_this){throw new Error('Not implemented');},iterateAllEvents:function(callback,opt_this){this.iterateAllEventContainers(function(ec){ec.iterateAllEventsInThisContainer(function(eventType){return true;},callback,opt_this);});},iterateAllEventContainers:function(callback,opt_this){function visit(ec){callback.call(opt_this,ec);ec.iterateAllChildEventContainers(visit);}
-visit(this);}};return{EventContainer:EventContainer};});'use strict';tr.exportTo('tr.model',function(){var Event=tr.model.Event;var EventRegistry=tr.model.EventRegistry;function PowerSample(series,start,power){Event.call(this);this.series_=series;this.start_=start;this.power_=power;}
-PowerSample.prototype={__proto__:Event.prototype,get series(){return this.series_;},get start(){return this.start_;},set start(value){this.start_=value;},get power(){return this.power_;},set power(value){this.power_=value;},addBoundsToRange:function(range){range.addValue(this.start);}};EventRegistry.register(PowerSample,{name:'powerSample',pluralName:'powerSamples',singleViewElementName:'tr-ui-a-single-power-sample-sub-view',multiViewElementName:'tr-ui-a-multi-power-sample-sub-view'});return{PowerSample:PowerSample};});'use strict';tr.exportTo('tr.model',function(){var PowerSample=tr.model.PowerSample;function PowerSeries(device){tr.model.EventContainer.call(this);this.device_=device;this.samples_=[];}
-PowerSeries.prototype={__proto__:tr.model.EventContainer.prototype,get device(){return this.device_;},get samples(){return this.samples_;},get stableId(){return this.device_.stableId+'.PowerSeries';},addPowerSample:function(ts,val){var sample=new PowerSample(this,ts,val);this.samples_.push(sample);return sample;},getEnergyConsumed:function(start,end){var measurementRange=tr.b.Range.fromExplicitRange(start,end);var energyConsumed=0;for(var i=0;i<this.samples.length;i++){var sample=this.samples[i];var nextSample=this.samples[i+1];var sampleRange=new tr.b.Range();sampleRange.addValue(sample.start);sampleRange.addValue(nextSample?nextSample.start:Infinity);var timeIntersection=measurementRange.findIntersection(sampleRange);energyConsumed+=timeIntersection.duration/1000*sample.power;}
-return energyConsumed;},shiftTimestampsForward:function(amount){for(var i=0;i<this.samples_.length;++i)
-this.samples_[i].start+=amount;},updateBounds:function(){this.bounds.reset();if(this.samples_.length===0)
-return;this.bounds.addValue(this.samples_[0].start);this.bounds.addValue(this.samples_[this.samples_.length-1].start);},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,PowerSample))
-this.samples_.forEach(callback,opt_this);},iterateAllChildEventContainers:function(callback,opt_this){}};return{PowerSeries:PowerSeries};});'use strict';tr.exportTo('tr.model',function(){function Device(model){if(!model)
-throw new Error('Must provide a model.');tr.model.EventContainer.call(this);this.powerSeries_=undefined;this.vSyncTimestamps_=[];};Device.compare=function(x,y){return x.guid-y.guid;};Device.prototype={__proto__:tr.model.EventContainer.prototype,compareTo:function(that){return Device.compare(this,that);},get userFriendlyName(){return'Device';},get userFriendlyDetails(){return'Device';},get stableId(){return'Device';},getSettingsKey:function(){return'device';},get powerSeries(){return this.powerSeries_;},set powerSeries(powerSeries){this.powerSeries_=powerSeries;},get vSyncTimestamps(){return this.vSyncTimestamps_;},set vSyncTimestamps(value){this.vSyncTimestamps_=value;},updateBounds:function(){this.bounds.reset();this.iterateAllChildEventContainers(function(child){child.updateBounds();this.bounds.addRange(child.bounds);},this);},shiftTimestampsForward:function(amount){this.iterateAllChildEventContainers(function(child){child.shiftTimestampsForward(amount);});for(var i=0;i<this.vSyncTimestamps_.length;i++)
-this.vSyncTimestamps_[i]+=amount;},addCategoriesToDict:function(categoriesDict){},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){},iterateAllChildEventContainers:function(callback,opt_this){if(this.powerSeries_)
-callback.call(opt_this,this.powerSeries_);}};return{Device:Device};});'use strict';tr.exportTo('tr.model',function(){function FlowEvent(category,id,title,colorId,start,args,opt_duration){tr.model.TimedEvent.call(this,start);this.category=category||'';this.title=title;this.colorId=colorId;this.start=start;this.args=args;this.id=id;this.startSlice=undefined;this.endSlice=undefined;this.startStackFrame=undefined;this.endStackFrame=undefined;if(opt_duration!==undefined)
-this.duration=opt_duration;}
-FlowEvent.prototype={__proto__:tr.model.TimedEvent.prototype,get userFriendlyName(){return'Flow event named '+this.title+' at '+
-tr.b.u.TimeStamp.format(this.timestamp);}};tr.model.EventRegistry.register(FlowEvent,{name:'flowEvent',pluralName:'flowEvents',singleViewElementName:'tr-ui-a-single-flow-event-sub-view',multiViewElementName:'tr-ui-a-multi-flow-event-sub-view'});return{FlowEvent:FlowEvent};});'use strict';tr.exportTo('tr.b',function(){function identity(d){return d;}
-function Statistics(){}
-Statistics.divideIfPossibleOrZero=function(numerator,denominator){if(denominator===0)
-return 0;return numerator/denominator;}
-Statistics.sum=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=0;for(var i=0;i<ary.length;i++)
-ret+=func.call(opt_this,ary[i],i);return ret;};Statistics.mean=function(ary,opt_func,opt_this){return Statistics.sum(ary,opt_func,opt_this)/ary.length;};Statistics.weightedMean=function(ary,weightCallback,opt_valueCallback,opt_this){var valueCallback=opt_valueCallback||identity;var numerator=0;var denominator=0;for(var i=0;i<ary.length;i++){var weight=weightCallback.call(opt_this,ary[i],i);var value=valueCallback.call(opt_this,ary[i],i);numerator+=weight*value;denominator+=weight;}
-if(denominator===0)
-return undefined;return numerator/denominator;};Statistics.variance=function(ary,opt_func,opt_this){var func=opt_func||identity;var mean=Statistics.mean(ary,func,opt_this);var sumOfSquaredDistances=Statistics.sum(ary,function(d,i){var v=func.call(this,d,i)-mean;return v*v;},opt_this);return sumOfSquaredDistances/(ary.length-1);};Statistics.stddev=function(ary,opt_func,opt_this){return Math.sqrt(Statistics.variance(ary,opt_func,opt_this));};Statistics.max=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=-Infinity;for(var i=0;i<ary.length;i++)
-ret=Math.max(ret,func.call(opt_this,ary[i],i));return ret;};Statistics.min=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=Infinity;for(var i=0;i<ary.length;i++)
-ret=Math.min(ret,func.call(opt_this,ary[i],i));return ret;};Statistics.range=function(ary,opt_func,opt_this){var func=opt_func||identity;var ret=new tr.b.Range();for(var i=0;i<ary.length;i++)
-ret.addValue(func.call(opt_this,ary[i],i));return ret;}
-Statistics.percentile=function(ary,percent,opt_func,opt_this){if(!(percent>=0&&percent<=1))
-throw new Error('percent must be [0,1]');var func=opt_func||identity;var tmp=new Array(ary.length);for(var i=0;i<ary.length;i++)
-tmp[i]=func.call(opt_this,ary[i],i);tmp.sort();var idx=Math.floor((ary.length-1)*percent);return tmp[idx];};Statistics.clamp=function(value,opt_low,opt_high){opt_low=opt_low||0.0;opt_high=opt_high||1.0;return Math.min(Math.max(value,opt_low),opt_high);}
-Statistics.normalizeSamples=function(samples){if(samples.length===0){return{normalized_samples:samples,scale:1.0};}
-samples=samples.slice().sort(function(a,b){return a-b;});var low=Math.min.apply(null,samples);var high=Math.max.apply(null,samples);var new_low=0.5/samples.length;var new_high=(samples.length-0.5)/samples.length;if(high-low===0.0){samples=Array.apply(null,new Array(samples.length)).map(function(){return 0.5;});return{normalized_samples:samples,scale:1.0};}
-var scale=(new_high-new_low)/(high-low);for(var i=0;i<samples.length;i++){samples[i]=(samples[i]-low)*scale+new_low;}
-return{normalized_samples:samples,scale:scale};}
-Statistics.discrepancy=function(samples,opt_location_count){if(samples.length===0)
-return 0.0;var max_local_discrepancy=0;var inv_sample_count=1.0/samples.length;var locations=[];var count_less=[];var count_less_equal=[];if(opt_location_count!==undefined){var sample_index=0;for(var i=0;i<opt_location_count;i++){var location=i/(opt_location_count-1);locations.push(location);while(sample_index<samples.length&&samples[sample_index]<location){sample_index+=1;}
-count_less.push(sample_index);while(sample_index<samples.length&&samples[sample_index]<=location){sample_index+=1;}
-count_less_equal.push(sample_index);}}else{if(samples[0]>0.0){locations.push(0.0);count_less.push(0);count_less_equal.push(0);}
-for(var i=0;i<samples.length;i++){locations.push(samples[i]);count_less.push(i);count_less_equal.push(i+1);}
-if(samples[-1]<1.0){locations.push(1.0);count_less.push(samples.length);count_less_equal.push(samples.length);}}
-for(var i=0;i<locations.length;i++){for(var j=i+1;j<locations.length;j++){var length=locations[j]-locations[i];var count_closed=count_less_equal[j]-count_less[i];var local_discrepancy_closed=Math.abs(count_closed*inv_sample_count-length);var max_local_discrepancy=Math.max(local_discrepancy_closed,max_local_discrepancy);var count_open=count_less[j]-count_less_equal[i];var local_discrepancy_open=Math.abs(count_open*inv_sample_count-length);var max_local_discrepancy=Math.max(local_discrepancy_open,max_local_discrepancy);}}
-return max_local_discrepancy;};Statistics.timestampsDiscrepancy=function(timestamps,opt_absolute,opt_location_count){if(timestamps.length===0)
-return 0.0;if(opt_absolute===undefined)
-opt_absolute=true;if(Array.isArray(timestamps[0])){var range_discrepancies=timestamps.map(function(r){return Statistics.timestampsDiscrepancy(r);});return Math.max.apply(null,range_discrepancies);}
-var s=Statistics.normalizeSamples(timestamps);var samples=s.normalized_samples;var sample_scale=s.scale;var discrepancy=Statistics.discrepancy(samples,opt_location_count);var inv_sample_count=1.0/samples.length;if(opt_absolute===true){discrepancy/=sample_scale;}else{discrepancy=Statistics.clamp((discrepancy-inv_sample_count)/(1.0-inv_sample_count));}
-return discrepancy;};Statistics.durationsDiscrepancy=function(durations,opt_absolute,opt_location_count){if(durations.length===0)
-return 0.0;var timestamps=durations.reduce(function(prev,curr,index,array){prev.push(prev[prev.length-1]+curr);return prev;},[0]);return Statistics.timestampsDiscrepancy(timestamps,opt_absolute,opt_location_count);};Statistics.uniformlySampleStream=function(samples,streamLength,newElement,numSamples){if(streamLength<=numSamples){if(samples.length>=streamLength)
-samples[streamLength-1]=newElement;else
-samples.push(newElement);return;}
-var probToKeep=numSamples/streamLength;if(Math.random()>probToKeep)
-return;var index=Math.floor(Math.random()*numSamples);samples[index]=newElement;};Statistics.mergeSampledStreams=function(samplesA,streamLengthA,samplesB,streamLengthB,numSamples){if(streamLengthB<numSamples){var nbElements=Math.min(streamLengthB,samplesB.length);for(var i=0;i<nbElements;++i){Statistics.uniformlySampleStream(samplesA,streamLengthA+i+1,samplesB[i],numSamples);}
-return;}
-if(streamLengthA<numSamples){var nbElements=Math.min(streamLengthA,samplesA.length);var tempSamples=samplesB.slice();for(var i=0;i<nbElements;++i){Statistics.uniformlySampleStream(tempSamples,streamLengthB+i+1,samplesA[i],numSamples);}
-for(var i=0;i<tempSamples.length;++i){samplesA[i]=tempSamples[i];}
-return;}
-var nbElements=Math.min(numSamples,samplesB.length);var probOfSwapping=streamLengthB/(streamLengthA+streamLengthB);for(var i=0;i<nbElements;++i){if(Math.random()<probOfSwapping){samplesA[i]=samplesB[i];}}}
-return{Statistics:Statistics};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;var Statistics=tr.b.Statistics;var FRAME_PERF_CLASS={GOOD:'good',BAD:'bad',TERRIBLE:'terrible',NEUTRAL:'generic_work'};function Frame(associatedEvents,threadTimeRanges,opt_args){tr.model.Event.call(this);this.threadTimeRanges=threadTimeRanges;this.associatedEvents=new tr.model.EventSet(associatedEvents);this.args=opt_args||{};this.title='Frame';this.start=Statistics.min(threadTimeRanges,function(x){return x.start;});this.end=Statistics.max(threadTimeRanges,function(x){return x.end;});this.totalDuration=Statistics.sum(threadTimeRanges,function(x){return x.end-x.start;});this.perfClass=FRAME_PERF_CLASS.NEUTRAL;};Frame.prototype={__proto__:tr.model.Event.prototype,set perfClass(perfClass){this.colorId=ColorScheme.getColorIdForReservedName(perfClass);this.perfClass_=perfClass;},get perfClass(){return this.perfClass_;},shiftTimestampsForward:function(amount){this.start+=amount;this.end+=amount;for(var i=0;i<this.threadTimeRanges.length;i++){this.threadTimeRanges[i].start+=amount;this.threadTimeRanges[i].end+=amount;}},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);}};tr.model.EventRegistry.register(Frame,{name:'frame',pluralName:'frames',singleViewElementName:'tr-ui-a-single-frame-sub-view',multiViewElementName:'tr-ui-a-multi-frame-sub-view'});return{Frame:Frame,FRAME_PERF_CLASS:FRAME_PERF_CLASS};});'use strict';tr.exportTo('tr.model',function(){function Attribute(units){this.units=units;this.infos=[];}
-Attribute.fromDictIfPossible=function(dict,opt_model){var typeInfo=Attribute.findTypeInfoMatching(function(typeInfo){return typeInfo.metadata.type===dict.type;});if(typeInfo===undefined){if(opt_model){opt_model.importWarning({type:'attribute_parse_error',message:'Unknown attribute type \''+dict.type+'\'.'});}
-return UnknownAttribute.fromDict(dict,opt_model);}
-return typeInfo.constructor.fromDict(dict,opt_model);};Attribute.findCommonTraits=function(attributes,opt_model){var commonTraits;for(var i=0;i<attributes.length;i++){var attribute=attributes[i];if(attribute===undefined)
-continue;var attributeConstructor=attribute.constructor;var attributeUnits=attribute.units;if(commonTraits===undefined){commonTraits={constructor:attributeConstructor,units:attributeUnits};}else if(attributeConstructor!==commonTraits.constructor){if(opt_model){opt_model.importWarning({type:'attribute_parse_error',message:'Attribute with different types: '+
-commonTraits.constructor+' and '+attributeConstructor+'.'});}
-commonTraits={constructor:UnknownAttribute,units:undefined};break;}else if(attributeUnits!==commonTraits.units){if(opt_model){opt_model.importWarning({type:'attribute_parse_error',message:'Attribute with different units: '+commonTraits.units+' and '+attributeUnits+'.'});}
-commonTraits={constructor:UnknownAttribute,units:undefined};break;}}
-return commonTraits;};Attribute.aggregate=function(childAttributes,existingParentAttribute,opt_model){var definedChildAttributes=childAttributes.filter(function(childAttribute){return childAttribute!==undefined;});var traits=Attribute.findCommonTraits(definedChildAttributes,opt_model);if(traits===undefined)
-return existingParentAttribute;var constructor=traits.constructor;if(constructor.merge===undefined)
-return existingParentAttribute;var mergedAttribute=constructor.merge(definedChildAttributes,traits.units,opt_model);if(existingParentAttribute===undefined)
-return mergedAttribute;existingParentAttribute.useMergedAttribute(mergedAttribute,opt_model);return existingParentAttribute;}
-Attribute.fromTraceValue=function(dict,opt_model){throw new Error('Not implemented');};Attribute.prototype.useMergedAttribute=function(mergedAttribute,opt_model){if(mergedAttribute.constructor!==this.constructor){if(opt_model){opt_model.importWarning({type:'attribute_parse_error',message:'Attribute with different types: '+this.constructor+' and '+mergedAttribute.constructor+'.'});}}else if(mergedAttribute.units!==this.units){if(opt_model){opt_model.importWarning({type:'attribute_parse_error',message:'Attribute with different units: '+this.units+' and '+mergedAttribute.units+'.'});}}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(Attribute,options);Attribute.addEventListener('will-register',function(e){if(!e.typeInfo.constructor.hasOwnProperty('fromDict'))
-throw new Error('Attributes must have fromDict method');if(!e.typeInfo.metadata.type)
-throw new Error('Attributes must provide type');if(e.typeInfo.constructor.prototype.constructor!==e.typeInfo.constructor)
-throw new Error('Attribute prototypes must provide constructor.');});function ScalarAttribute(units,value){Attribute.call(this,units);this.value=value;}
-ScalarAttribute.fromDict=function(dict){return new ScalarAttribute(dict.units,parseInt(dict.value,16));};ScalarAttribute.merge=function(childAttributes,units){var sum=0;childAttributes.forEach(function(childAttribute){sum+=childAttribute.value;});return new ScalarAttribute(units,sum);}
-ScalarAttribute.prototype.__proto__=Attribute.prototype;Attribute.register(ScalarAttribute,{type:'scalar'});function StringAttribute(units,value){Attribute.call(this,units);this.value=value;}
-StringAttribute.fromDict=function(dict){return new StringAttribute(dict.units,dict.value);};Attribute.register(StringAttribute,{type:'string'});function UnknownAttribute(units,opt_value){Attribute.call(this,units,opt_value);this.value=opt_value;}
-UnknownAttribute.fromDict=function(dict){return new UnknownAttribute(dict.units);};UnknownAttribute.prototype.__proto__=Attribute.prototype;function AttributeInfo(type,message){this.type=type;this.message=message;}
-var AttributeInfoType={INFORMATION:0,WARNING:1,LINK:2,MEMORY_OWNER:3,MEMORY_OWNED:4,OVERALL_VALUE:5,RECENT_VALUE:6,HAS_HEAP_DUMP:7};return{Attribute:Attribute,ScalarAttribute:ScalarAttribute,StringAttribute:StringAttribute,UnknownAttribute:UnknownAttribute,AttributeInfo:AttributeInfo,AttributeInfoType:AttributeInfoType};});'use strict';tr.exportTo('tr.model',function(){function ContainerMemoryDump(start){tr.model.TimedEvent.call(this,start);this.levelOfDetail=undefined;this.memoryAllocatorDumps_=undefined;this.memoryAllocatorDumpsByFullName_=undefined;};ContainerMemoryDump.prototype={__proto__:tr.model.TimedEvent.prototype,shiftTimestampsForward:function(amount){this.start+=amount;},get memoryAllocatorDumps(){return this.memoryAllocatorDumps_;},set memoryAllocatorDumps(memoryAllocatorDumps){this.memoryAllocatorDumps_=memoryAllocatorDumps;this.memoryAllocatorDumpsByFullName_=undefined;},getMemoryAllocatorDumpByFullName:function(fullName){if(this.memoryAllocatorDumps_===undefined)
-return undefined;if(this.memoryAllocatorDumpsByFullName_===undefined){var index={};function addDumpsToIndex(dumps){dumps.forEach(function(dump){index[dump.fullName]=dump;addDumpsToIndex(dump.children);});};addDumpsToIndex(this.memoryAllocatorDumps_);this.memoryAllocatorDumpsByFullName_=index;}
-return this.memoryAllocatorDumpsByFullName_[fullName];},iterateRootAllocatorDumps:function(fn,opt_this){if(this.memoryAllocatorDumps===undefined)
-return;this.memoryAllocatorDumps.forEach(fn,opt_this||this);}};return{ContainerMemoryDump:ContainerMemoryDump};});'use strict';tr.exportTo('tr.model',function(){function MemoryAllocatorDump(containerMemoryDump,fullName,opt_guid){this.fullName=fullName;this.parent=undefined;this.children=[];this.attributes={};this.containerMemoryDump=containerMemoryDump;this.owns=undefined;this.ownedBy=[];this.retains=[];this.retainedBy=[];this.guid=opt_guid;};MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME='size';MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME='effective_size';MemoryAllocatorDump.RESIDENT_SIZE_ATTRIBUTE_NAME='resident_size';MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME=MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME;MemoryAllocatorDump.prototype={get name(){return this.fullName.substring(this.fullName.lastIndexOf('/')+1);},get quantifiedName(){return'\''+this.fullName+'\' in '+
-this.containerMemoryDump.containerName;},isDescendantOf:function(otherDump){var dump=this;while(dump!==undefined){if(dump===otherDump)
-return true;dump=dump.parent;}
-return false;},addAttribute:function(name,value){if(name in this.attributes)
-throw new Error('Duplicate attribute name: '+name+'.');this.attributes[name]=value;},aggregateAttributes:function(opt_model){var attributes={};this.children.forEach(function(child){child.aggregateAttributes(opt_model);tr.b.iterItems(child.attributes,function(name){attributes[name]=true;},this);},this);tr.b.iterItems(attributes,function(name){var childAttributes=this.children.map(function(child){return child.attributes[name];},this);var currentAttribute=this.attributes[name];this.attributes[name]=tr.model.Attribute.aggregate(childAttributes,currentAttribute,opt_model);},this);},getValidSizeAttributeOrUndefined:function(sizeAttrName,opt_model){var sizeAttr=this.attributes[sizeAttrName];if(sizeAttr===undefined)
-return undefined;if(!(sizeAttr instanceof tr.model.ScalarAttribute)){if(opt_model!==undefined){opt_model.importWarning({type:'memory_dump_parse_error',message:'\''+sizeAttrName+'\' attribute of memory allocator '+'dump \''+memoryAllocatorDump.fullName+'\' is not a scalar.'});}
-return undefined;}
-return sizeAttr;}};function MemoryAllocatorDumpLink(source,target,opt_importance){this.source=source;this.target=target;this.importance=opt_importance;}
-return{MemoryAllocatorDump:MemoryAllocatorDump,MemoryAllocatorDumpLink:MemoryAllocatorDumpLink};});'use strict';tr.exportTo('tr.model',function(){function GlobalMemoryDump(model,start){tr.model.ContainerMemoryDump.call(this,start);this.model=model;this.processMemoryDumps={};}
-var SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME;var EFFECTIVE_SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME;function getSize(dump){var attr=dump.attributes[SIZE_ATTRIBUTE_NAME];if(attr===undefined)
-return 0;return attr.value;}
-function hasSize(dump){return dump.attributes[SIZE_ATTRIBUTE_NAME]!==undefined;}
-function optional(value,defaultValue){if(value===undefined)
-return defaultValue;return value;}
-function ownershipToUserFriendlyString(dump,importance){return dump.quantifiedName+' (importance: '+
-optional(importance,0)+')';}
-GlobalMemoryDump.prototype={__proto__:tr.model.ContainerMemoryDump.prototype,get userFriendlyName(){return'Global memory dump at '+tr.b.u.TimeStamp.format(this.start);},get containerName(){return'global space';},calculateGraphAttributes:function(){this.setUpTracingOverheadOwnership();this.calculateSizes();this.calculateEffectiveSizes();this.aggregateAttributes();this.discountTracingOverheadFromVmRegions();},calculateSizes:function(){this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateMemoryAllocatorDumpSize_.bind(this));},calculateMemoryAllocatorDumpSize_:function(dump){var shouldDefineSize=false;function getDependencySize(dependencyDump){var attr=dependencyDump.attributes[SIZE_ATTRIBUTE_NAME];if(attr===undefined)
-return 0;shouldDefineSize=true;return attr.value;}
-var sizeAttribute=dump.getValidSizeAttributeOrUndefined(SIZE_ATTRIBUTE_NAME,this.model);var size=0;var infos=[];var checkDependentSizeIsConsistent=function(){};if(sizeAttribute!==undefined){size=sizeAttribute.value;shouldDefineSize=true;checkDependentSizeIsConsistent=function(dependentSize,dependentName){if(size>=dependentSize)
-return;var messageSuffix=' ('+tr.b.u.Units.sizeInBytes.format(size)+') is less than '+dependentName+' ('+
-tr.b.u.Units.sizeInBytes.format(dependentSize)+').';this.model.importWarning({type:'memory_dump_parse_error',message:'Size provided by memory allocator dump \''+
-dump.fullName+'\''+messageSuffix});infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.WARNING,'Size provided by this memory allocator dump'+messageSuffix));}.bind(this);}
-var aggregatedChildrenSize=0;var allOverlaps={};dump.children.forEach(function(childDump){function aggregateDescendantDump(descendantDump){var ownedDumpLink=descendantDump.owns;if(ownedDumpLink!==undefined&&ownedDumpLink.target.isDescendantOf(dump)){var ownedDescendantDump=ownedDumpLink.target;var ownedChildDump=ownedDescendantDump;while(ownedChildDump.parent!==dump)
-ownedChildDump=ownedChildDump.parent;if(childDump!==ownedChildDump){var overlap=getDependencySize(descendantDump);if(overlap>0){var ownedChildOverlaps=allOverlaps[ownedChildDump.name];if(ownedChildOverlaps===undefined)
-allOverlaps[ownedChildDump.name]=ownedChildOverlaps={};var previousTotalOverlap=ownedChildOverlaps[childDump.name]||0;var updatedTotalOverlap=previousTotalOverlap+overlap;ownedChildOverlaps[childDump.name]=updatedTotalOverlap;}}
-return;}
-if(descendantDump.children.length===0){aggregatedChildrenSize+=getDependencySize(descendantDump);return;}
-descendantDump.children.forEach(aggregateDescendantDump);}
-aggregateDescendantDump(childDump);});dump.children.forEach(function(childDump){var childOverlaps=allOverlaps[childDump.name];if(childOverlaps===undefined)
-return;var message=tr.b.dictionaryValues(tr.b.mapItems(childOverlaps,function(ownerChildName,overlap){return'overlaps with its sibling \''+ownerChildName+'\' ('+
-tr.b.u.Units.sizeInBytes.format(overlap)+')';})).join(' ');childDump.attributes[SIZE_ATTRIBUTE_NAME].infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.INFORMATION,message));});checkDependentSizeIsConsistent(aggregatedChildrenSize,'the aggregated size of its children');var largestOwnerSize=0;dump.ownedBy.forEach(function(ownershipLink){var owner=ownershipLink.source;var ownerSize=getDependencySize(owner);largestOwnerSize=Math.max(largestOwnerSize,ownerSize);});checkDependentSizeIsConsistent(largestOwnerSize,'the size of its largest owner');if(!shouldDefineSize){dump.attributes[SIZE_ATTRIBUTE_NAME]=undefined;return;}
-size=Math.max(size,aggregatedChildrenSize,largestOwnerSize);var sizeAttribute=new tr.model.ScalarAttribute('bytes',size);sizeAttribute.infos=infos;dump.attributes[SIZE_ATTRIBUTE_NAME]=sizeAttribute;if(aggregatedChildrenSize<size&&dump.children!==undefined&&dump.children.length>0){var virtualChild=new tr.model.MemoryAllocatorDump(dump.containerMemoryDump,dump.fullName+'/<unspecified>');virtualChild.parent=dump;dump.children.unshift(virtualChild);virtualChild.attributes[SIZE_ATTRIBUTE_NAME]=new tr.model.ScalarAttribute('bytes',size-aggregatedChildrenSize);}},calculateEffectiveSizes:function(){this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpSubSizes_.bind(this));this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpOwnershipCoefficient_.bind(this));this.traverseAllocatorDumpsInDepthFirstPreOrder(this.calculateDumpCumulativeOwnershipCoefficient_.bind(this));this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpEffectiveSize_.bind(this));},calculateDumpSubSizes_:function(dump){if(!hasSize(dump))
-return;if(dump.children===undefined||dump.children.length===0){var size=getSize(dump);dump.notOwningSubSize_=size;dump.notOwnedSubSize_=size;return;}
-var notOwningSubSize=0;dump.children.forEach(function(childDump){if(childDump.owns!==undefined)
-return;notOwningSubSize+=optional(childDump.notOwningSubSize_,0);});dump.notOwningSubSize_=notOwningSubSize;var notOwnedSubSize=0;dump.children.forEach(function(childDump){if(childDump.ownedBy.length===0){notOwnedSubSize+=optional(childDump.notOwnedSubSize_,0);return;}
-var largestChildOwnerSize=0;childDump.ownedBy.forEach(function(ownershipLink){largestChildOwnerSize=Math.max(largestChildOwnerSize,getSize(ownershipLink.source));});notOwnedSubSize+=getSize(childDump)-largestChildOwnerSize;});dump.notOwnedSubSize_=notOwnedSubSize;},calculateDumpOwnershipCoefficient_:function(dump){if(!hasSize(dump))
-return;if(dump.ownedBy.length===0)
-return;var owners=dump.ownedBy.map(function(ownershipLink){return{dump:ownershipLink.source,importance:optional(ownershipLink.importance,0),notOwningSubSize:optional(ownershipLink.source.notOwningSubSize_,0)};});owners.sort(function(a,b){if(a.importance===b.importance)
-return a.notOwningSubSize-b.notOwningSubSize;return b.importance-a.importance;});var currentImportanceStartPos=0;var alreadyAttributedSubSize=0;while(currentImportanceStartPos<owners.length){var currentImportance=owners[currentImportanceStartPos].importance;var nextImportanceStartPos=currentImportanceStartPos+1;while(nextImportanceStartPos<owners.length&&owners[nextImportanceStartPos].importance===currentImportance){nextImportanceStartPos++;}
-var attributedNotOwningSubSize=0;for(var pos=currentImportanceStartPos;pos<nextImportanceStartPos;pos++){var owner=owners[pos];var notOwningSubSize=owner.notOwningSubSize;if(notOwningSubSize>alreadyAttributedSubSize){attributedNotOwningSubSize+=(notOwningSubSize-alreadyAttributedSubSize)/(nextImportanceStartPos-pos);alreadyAttributedSubSize=notOwningSubSize;}
-var owningCoefficient=0;if(notOwningSubSize!==0)
-owningCoefficient=attributedNotOwningSubSize/notOwningSubSize;owner.dump.owningCoefficient_=owningCoefficient;}
-currentImportanceStartPos=nextImportanceStartPos;}
-var notOwnedSubSize=optional(dump.notOwnedSubSize_,0);var remainderSubSize=notOwnedSubSize-alreadyAttributedSubSize;var ownedCoefficient=0;if(notOwnedSubSize!==0)
-ownedCoefficient=remainderSubSize/notOwnedSubSize;dump.ownedCoefficient_=ownedCoefficient;},calculateDumpCumulativeOwnershipCoefficient_:function(dump){if(!hasSize(dump))
-return;var cumulativeOwnedCoefficient=optional(dump.ownedCoefficient_,1);var parent=dump.parent;if(dump.parent!==undefined)
-cumulativeOwnedCoefficient*=dump.parent.cumulativeOwnedCoefficient_;dump.cumulativeOwnedCoefficient_=cumulativeOwnedCoefficient;var cumulativeOwningCoefficient;if(dump.owns!==undefined){cumulativeOwningCoefficient=dump.owningCoefficient_*dump.owns.target.cumulativeOwningCoefficient_;}else if(dump.parent!==undefined){cumulativeOwningCoefficient=dump.parent.cumulativeOwningCoefficient_;}else{cumulativeOwningCoefficient=1;}
-dump.cumulativeOwningCoefficient_=cumulativeOwningCoefficient;},calculateDumpEffectiveSize_:function(dump){if(!hasSize(dump)){dump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME]=undefined;return;}
-var effectiveSize;if(dump.children===undefined||dump.children.length===0){effectiveSize=getSize(dump)*dump.cumulativeOwningCoefficient_*dump.cumulativeOwnedCoefficient_;}else{effectiveSize=0;dump.children.forEach(function(childDump){if(!hasSize(childDump))
-return;effectiveSize+=childDump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME].value;});}
-var attribute=new tr.model.ScalarAttribute('bytes',effectiveSize);dump.attributes[EFFECTIVE_SIZE_ATTRIBUTE_NAME]=attribute;if(dump.ownedBy.length>0){var message='shared by:'+
-dump.ownedBy.map(function(ownershipLink){return'\n  - '+ownershipToUserFriendlyString(ownershipLink.source,ownershipLink.importance);}).join();attribute.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.MEMORY_OWNED,message));}
-if(dump.owns!==undefined){var target=dump.owns.target;var message='shares '+
-ownershipToUserFriendlyString(target,dump.owns.importance)+' with';var otherOwnershipLinks=target.ownedBy.filter(function(ownershipLink){return ownershipLink.source!==dump;});if(otherOwnershipLinks.length>0){message+=':';message+=otherOwnershipLinks.map(function(ownershipLink){return'\n  - '+ownershipToUserFriendlyString(ownershipLink.source,ownershipLink.importance);}).join();}else{message+=' no other dumps';}
-attribute.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.MEMORY_OWNER,message));}},aggregateAttributes:function(){this.iterateRootAllocatorDumps(function(dump){dump.aggregateAttributes(this.model);});this.iterateRootAllocatorDumps(this.propagateAttributesRecursively);tr.b.iterItems(this.processMemoryDumps,function(pid,processMemoryDump){processMemoryDump.iterateRootAllocatorDumps(function(dump){dump.aggregateAttributes(this.model);},this);},this);},propagateAttributesRecursively:function(globalAllocatorDump){tr.b.iterItems(globalAllocatorDump.attributes,function(attrName,attr){if(attrName===SIZE_ATTRIBUTE_NAME||attrName===EFFECTIVE_SIZE_ATTRIBUTE_NAME){return;}
-globalAllocatorDump.ownedBy.forEach(function(ownershipLink){var processAllocatorDump=ownershipLink.source;if(processAllocatorDump.attributes[attrName]!==undefined){return;}
-processAllocatorDump.attributes[attrName]=attr;});});globalAllocatorDump.children.forEach(this.propagateAttributesRecursively,this);},setUpTracingOverheadOwnership:function(){tr.b.iterItems(this.processMemoryDumps,function(pid,dump){dump.setUpTracingOverheadOwnership(this.model);},this);},discountTracingOverheadFromVmRegions:function(){tr.b.iterItems(this.processMemoryDumps,function(pid,dump){dump.discountTracingOverheadFromVmRegions(this.model);},this);},iterateContainerDumps:function(fn){fn.call(this,this);tr.b.iterItems(this.processMemoryDumps,function(pid,processDump){fn.call(this,processDump);},this);},iterateAllRootAllocatorDumps:function(fn){this.iterateContainerDumps(function(containerDump){containerDump.iterateRootAllocatorDumps(fn,this);});},traverseAllocatorDumpsInDepthFirstPostOrder:function(fn){var visitedDumps=new WeakSet();var openDumps=new WeakSet();function visit(dump){if(visitedDumps.has(dump))
-return;if(openDumps.has(dump))
-throw new Error(dump.userFriendlyName+' contains a cycle');openDumps.add(dump);dump.ownedBy.forEach(function(ownershipLink){visit.call(this,ownershipLink.source);},this);dump.children.forEach(visit,this);fn.call(this,dump);visitedDumps.add(dump);openDumps.delete(dump);}
-this.iterateAllRootAllocatorDumps(visit);},traverseAllocatorDumpsInDepthFirstPreOrder:function(fn){var visitedDumps=new WeakSet();function visit(dump){if(visitedDumps.has(dump))
-return;if(dump.owns!==undefined&&!visitedDumps.has(dump.owns.target))
-return;if(dump.parent!==undefined&&!visitedDumps.has(dump.parent))
-return;fn.call(this,dump);visitedDumps.add(dump);dump.ownedBy.forEach(function(ownershipLink){visit.call(this,ownershipLink.source);},this);dump.children.forEach(visit,this);}
-this.iterateAllRootAllocatorDumps(visit);}};tr.model.EventRegistry.register(GlobalMemoryDump,{name:'globalMemoryDump',pluralName:'globalMemoryDumps',singleViewElementName:'tr-ui-a-container-memory-dump-sub-view',multiViewElementName:'tr-ui-a-container-memory-dump-sub-view'});return{GlobalMemoryDump:GlobalMemoryDump};});'use strict';tr.exportTo('tr.model',function(){var InstantEventType={GLOBAL:1,PROCESS:2};function InstantEvent(category,title,colorId,start,args){tr.model.TimedEvent.call(this);this.category=category||'';this.title=title;this.colorId=colorId;this.start=start;this.args=args;this.type=undefined;};InstantEvent.prototype={__proto__:tr.model.TimedEvent.prototype};function GlobalInstantEvent(category,title,colorId,start,args){InstantEvent.apply(this,arguments);this.type=InstantEventType.GLOBAL;};GlobalInstantEvent.prototype={__proto__:InstantEvent.prototype,get userFriendlyName(){return'Global instant event '+this.title+' @ '+
-tr.b.u.TimeStamp.format(start);}};function ProcessInstantEvent(category,title,colorId,start,args){InstantEvent.apply(this,arguments);this.type=InstantEventType.PROCESS;};ProcessInstantEvent.prototype={__proto__:InstantEvent.prototype,get userFriendlyName(){return'Process-level instant event '+this.title+' @ '+
-tr.b.u.TimeStamp.format(start);}};tr.model.EventRegistry.register(InstantEvent,{name:'instantEvent',pluralName:'instantEvents',singleViewElementName:'tr-ui-a-single-instant-event-sub-view',multiViewElementName:'tr-ui-a-multi-instant-event-sub-view'});return{GlobalInstantEvent:GlobalInstantEvent,ProcessInstantEvent:ProcessInstantEvent,InstantEventType:InstantEventType,InstantEvent:InstantEvent};});'use strict';tr.exportTo('tr.model',function(){var CompoundEventSelectionState={NOT_SELECTED:0,EVENT_SELECTED:0x1,SOME_ASSOCIATED_EVENTS_SELECTED:0x2,ALL_ASSOCIATED_EVENTS_SELECTED:0x4,EVENT_AND_SOME_ASSOCIATED_SELECTED:0x1|0x2,EVENT_AND_ALL_ASSOCIATED_SELECTED:0x1|0x4};return{CompoundEventSelectionState:CompoundEventSelectionState};});'use strict';tr.exportTo('tr.model',function(){var CompoundEventSelectionState=tr.model.CompoundEventSelectionState;function InteractionRecord(parentModel,title,colorId,start,duration){tr.model.TimedEvent.call(this,start);this.title=title;this.colorId=colorId;this.duration=duration;this.args={};this.associatedEvents=new tr.model.EventSet();this.parentModel=parentModel;this.sourceEvents=new tr.model.EventSet();}
-InteractionRecord.prototype={__proto__:tr.model.TimedEvent.prototype,get subSlices(){return[];},get userFriendlyName(){return this.title+' interaction at '+
-tr.b.u.TimeStamp.format(this.start);},get stableId(){return'IR.'+this.parentModel.interactionRecords.indexOf(this);},computeCompoundEvenSelectionState:function(selection){var cess=CompoundEventSelectionState.NOT_SELECTED;if(selection.contains(this))
-cess|=CompoundEventSelectionState.EVENT_SELECTED;if(this.associatedEvents.intersectionIsEmpty(selection))
-return cess;var allContained=this.associatedEvents.every(function(event){return selection.contains(event);});if(allContained)
-cess|=CompoundEventSelectionState.ALL_ASSOCIATED_EVENTS_SELECTED;else
-cess|=CompoundEventSelectionState.SOME_ASSOCIATED_EVENTS_SELECTED;return cess;}};tr.model.EventRegistry.register(InteractionRecord,{name:'interaction',pluralName:'interactions',singleViewElementName:'tr-ui-a-single-interaction-record-sub-view',multiViewElementName:'tr-ui-a-multi-interaction-record-sub-view'});return{InteractionRecord:InteractionRecord};});'use strict';tr.exportTo('tr.b',function(){function findLowIndexInSortedArray(ary,mapFn,loVal){if(ary.length==0)
+tr.v.Unit.byName.timeStampInMs.format(this.start);}};tr.model.EventRegistry.register(Alert,{name:'alert',pluralName:'alerts',singleViewElementName:'tr-ui-a-alert-sub-view',multiViewElementName:'tr-ui-a-alert-sub-view'});return{Alert:Alert};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;var Statistics=tr.b.Statistics;var FRAME_PERF_CLASS={GOOD:'good',BAD:'bad',TERRIBLE:'terrible',NEUTRAL:'generic_work'};function Frame(associatedEvents,threadTimeRanges,opt_args){tr.model.Event.call(this);this.threadTimeRanges=threadTimeRanges;this.associatedEvents=new tr.model.EventSet(associatedEvents);this.args=opt_args||{};this.title='Frame';this.start=Statistics.min(threadTimeRanges,function(x){return x.start;});this.end=Statistics.max(threadTimeRanges,function(x){return x.end;});this.totalDuration=Statistics.sum(threadTimeRanges,function(x){return x.end-x.start;});this.perfClass=FRAME_PERF_CLASS.NEUTRAL;};Frame.prototype={__proto__:tr.model.Event.prototype,set perfClass(perfClass){this.colorId=ColorScheme.getColorIdForReservedName(perfClass);this.perfClass_=perfClass;},get perfClass(){return this.perfClass_;},shiftTimestampsForward:function(amount){this.start+=amount;this.end+=amount;for(var i=0;i<this.threadTimeRanges.length;i++){this.threadTimeRanges[i].start+=amount;this.threadTimeRanges[i].end+=amount;}},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);}};tr.model.EventRegistry.register(Frame,{name:'frame',pluralName:'frames',singleViewElementName:'tr-ui-a-single-frame-sub-view',multiViewElementName:'tr-ui-a-multi-frame-sub-view'});return{Frame:Frame,FRAME_PERF_CLASS:FRAME_PERF_CLASS};});'use strict';tr.exportTo('tr.b',function(){function findLowIndexInSortedArray(ary,mapFn,loVal){if(ary.length==0)
 return 1;var low=0;var high=ary.length-1;var i,comparison;var hitPos=-1;while(low<=high){i=Math.floor((low+high)/2);comparison=mapFn(ary[i])-loVal;if(comparison<0){low=i+1;continue;}else if(comparison>0){high=i-1;continue;}else{hitPos=i;high=i-1;}}
 return hitPos!=-1?hitPos:low;}
 function findHighIndexInSortedArray(ary,mapFn,loVal,hiVal){var lo=loVal||0;var hi=hiVal!==undefined?hiVal:ary.length;while(lo<hi){var mid=(lo+hi)>>1;if(mapFn(ary[mid])>=0)
@@ -2883,27 +2681,62 @@
 return null;if(loDiff<hiDiff)
 return loInt;else
 return hiInt;}
-return{findLowIndexInSortedArray:findLowIndexInSortedArray,findHighIndexInSortedArray:findHighIndexInSortedArray,findIndexInSortedIntervals:findIndexInSortedIntervals,findIndexInSortedClosedIntervals:findIndexInSortedClosedIntervals,iterateOverIntersectingIntervals:iterateOverIntersectingIntervals,getIntersectingIntervals:getIntersectingIntervals,findClosestElementInSortedArray:findClosestElementInSortedArray,findClosestIntervalInSortedIntervals:findClosestIntervalInSortedIntervals};});'use strict';tr.exportTo('tr.model',function(){function CounterSample(series,timestamp,value){tr.model.Event.call(this);this.series_=series;this.timestamp_=timestamp;this.value_=value;}
-CounterSample.groupByTimestamp=function(samples){var samplesByTimestamp=tr.b.group(samples,function(sample){return sample.timestamp;});var timestamps=tr.b.dictionaryKeys(samplesByTimestamp);timestamps.sort();var groups=[];for(var i=0;i<timestamps.length;i++){var ts=timestamps[i];var group=samplesByTimestamp[ts];group.sort(function(x,y){return x.series.seriesIndex-y.series.seriesIndex;});groups.push(group);}
-return groups;}
-CounterSample.prototype={__proto__:tr.model.Event.prototype,get series(){return this.series_;},get timestamp(){return this.timestamp_;},get value(){return this.value_;},set timestamp(timestamp){this.timestamp_=timestamp;},addBoundsToRange:function(range){range.addValue(this.timestamp);},getSampleIndex:function(){return tr.b.findLowIndexInSortedArray(this.series.timestamps,function(x){return x;},this.timestamp_);},get userFriendlyName(){return'Counter sample from '+this.series_.title+' at '+
-tr.b.u.TimeStamp.format(this.timestamp);}};tr.model.EventRegistry.register(CounterSample,{name:'counterSample',pluralName:'counterSamples',singleViewElementName:'tr-ui-a-counter-sample-sub-view',multiViewElementName:'tr-ui-a-counter-sample-sub-view'});return{CounterSample:CounterSample};});'use strict';tr.exportTo('tr.model',function(){var CounterSample=tr.model.CounterSample;function CounterSeries(name,color){tr.model.EventContainer.call(this);this.name_=name;this.color_=color;this.timestamps_=[];this.samples_=[];this.counter=undefined;this.seriesIndex=undefined;}
-CounterSeries.prototype={__proto__:tr.model.EventContainer.prototype,get length(){return this.timestamps_.length;},get name(){return this.name_;},get color(){return this.color_;},get samples(){return this.samples_;},get timestamps(){return this.timestamps_;},getSample:function(idx){return this.samples_[idx];},getTimestamp:function(idx){return this.timestamps_[idx];},addCounterSample:function(ts,val){var sample=new CounterSample(this,ts,val);this.addSample(sample);return sample;},addSample:function(sample){this.timestamps_.push(sample.timestamp);this.samples_.push(sample);},getStatistics:function(sampleIndices){var sum=0;var min=Number.MAX_VALUE;var max=-Number.MAX_VALUE;for(var i=0;i<sampleIndices.length;++i){var sample=this.getSample(sampleIndices[i]).value;sum+=sample;min=Math.min(sample,min);max=Math.max(sample,max);}
-return{min:min,max:max,avg:(sum/sampleIndices.length),start:this.getSample(sampleIndices[0]).value,end:this.getSample(sampleIndices.length-1).value};},shiftTimestampsForward:function(amount){for(var i=0;i<this.timestamps_.length;++i){this.timestamps_[i]+=amount;this.samples_[i].timestamp=this.timestamps_[i];}},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,tr.model.CounterSample)){this.samples_.forEach(callback,opt_this);}},iterateAllChildEventContainers:function(callback,opt_this){}};return{CounterSeries:CounterSeries};});'use strict';tr.exportTo('tr.model',function(){function Counter(parent,id,category,name){tr.model.EventContainer.call(this);this.parent_=parent;this.id_=id;this.category_=category||'';this.name_=name;this.series_=[];this.totals=[];}
-Counter.prototype={__proto__:tr.model.EventContainer.prototype,get parent(){return this.parent_;},get id(){return this.id_;},get category(){return this.category_;},get name(){return this.name_;},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){},iterateAllChildEventContainers:function(callback,opt_this){for(var i=0;i<this.series_.length;i++)
-callback.call(opt_this,this.series_[i]);},set timestamps(arg){throw new Error('Bad counter API. No cookie.');},set seriesNames(arg){throw new Error('Bad counter API. No cookie.');},set seriesColors(arg){throw new Error('Bad counter API. No cookie.');},set samples(arg){throw new Error('Bad counter API. No cookie.');},addSeries:function(series){series.counter=this;series.seriesIndex=this.series_.length;this.series_.push(series);return series;},getSeries:function(idx){return this.series_[idx];},get series(){return this.series_;},get numSeries(){return this.series_.length;},get numSamples(){if(this.series_.length===0)
-return 0;return this.series_[0].length;},get timestamps(){if(this.series_.length===0)
-return[];return this.series_[0].timestamps;},getSampleStatistics:function(sampleIndices){sampleIndices.sort();var ret=[];this.series_.forEach(function(series){ret.push(series.getStatistics(sampleIndices));});return ret;},shiftTimestampsForward:function(amount){for(var i=0;i<this.series_.length;++i)
-this.series_[i].shiftTimestampsForward(amount);},updateBounds:function(){this.totals=[];this.maxTotal=0;this.bounds.reset();if(this.series_.length===0)
-return;var firstSeries=this.series_[0];var lastSeries=this.series_[this.series_.length-1];this.bounds.addValue(firstSeries.getTimestamp(0));this.bounds.addValue(lastSeries.getTimestamp(lastSeries.length-1));var numSeries=this.numSeries;this.maxTotal=-Infinity;for(var i=0;i<firstSeries.length;++i){var total=0;this.series_.forEach(function(series){total+=series.getSample(i).value;this.totals.push(total);}.bind(this));this.maxTotal=Math.max(total,this.maxTotal);}}};Counter.compare=function(x,y){var tmp=x.parent.compareTo(y);if(tmp!=0)
-return tmp;var tmp=x.name.localeCompare(y.name);if(tmp==0)
-return x.tid-y.tid;return tmp;};return{Counter:Counter};});'use strict';tr.exportTo('tr.model',function(){function Slice(category,title,colorId,start,args,opt_duration,opt_cpuStart,opt_cpuDuration,opt_argsStripped,opt_bind_id){tr.model.TimedEvent.call(this,start);this.category=category||'';this.title=title;this.colorId=colorId;this.args=args;this.startStackFrame=undefined;this.endStackFrame=undefined;this.didNotFinish=false;this.inFlowEvents=[];this.outFlowEvents=[];this.subSlices=[];this.selfTime=undefined;this.cpuSelfTime=undefined;this.important=false;this.parentContainer=undefined;this.argsStripped=false;this.bind_id_=opt_bind_id;this.parentSlice=undefined;this.isTopLevel=false;if(opt_duration!==undefined)
+return{findLowIndexInSortedArray:findLowIndexInSortedArray,findHighIndexInSortedArray:findHighIndexInSortedArray,findIndexInSortedIntervals:findIndexInSortedIntervals,findIndexInSortedClosedIntervals:findIndexInSortedClosedIntervals,iterateOverIntersectingIntervals:iterateOverIntersectingIntervals,getIntersectingIntervals:getIntersectingIntervals,findClosestElementInSortedArray:findClosestElementInSortedArray,findClosestIntervalInSortedIntervals:findClosestIntervalInSortedIntervals};});'use strict';tr.exportTo('tr.model.helpers',function(){var Frame=tr.model.Frame;var Statistics=tr.b.Statistics;var UI_DRAW_TYPE={NONE:'none',LEGACY:'legacy',MARSHMALLOW:'marshmallow'};var UI_THREAD_DRAW_NAMES={'performTraversals':UI_DRAW_TYPE.LEGACY,'Choreographer#doFrame':UI_DRAW_TYPE.MARSHMALLOW};var RENDER_THREAD_DRAW_NAME='DrawFrame';var RENDER_THREAD_INDEP_DRAW_NAME='doFrame';var THREAD_SYNC_NAME='syncFrameState';function getSlicesForThreadTimeRanges(threadTimeRanges){var ret=[];threadTimeRanges.forEach(function(threadTimeRange){var slices=[];threadTimeRange.thread.sliceGroup.iterSlicesInTimeRange(function(slice){slices.push(slice);},threadTimeRange.start,threadTimeRange.end);ret.push.apply(ret,slices);});return ret;}
+function makeFrame(threadTimeRanges,surfaceFlinger){var args={};if(surfaceFlinger&&surfaceFlinger.hasVsyncs){var start=Statistics.min(threadTimeRanges,function(threadTimeRanges){return threadTimeRanges.start;});args['deadline']=surfaceFlinger.getFrameDeadline(start);args['frameKickoff']=surfaceFlinger.getFrameKickoff(start);}
+var events=getSlicesForThreadTimeRanges(threadTimeRanges);return new Frame(events,threadTimeRanges,args);}
+function findOverlappingDrawFrame(renderThread,time){if(!renderThread)
+return undefined;var slices=renderThread.sliceGroup.slices;for(var i=0;i<slices.length;i++){var slice=slices[i];if(slice.title==RENDER_THREAD_DRAW_NAME&&slice.start<=time&&time<=slice.end){return slice;}}
+return undefined;}
+function getPreTraversalWorkRanges(uiThread){if(!uiThread)
+return[];var preFrameEvents=[];uiThread.sliceGroup.slices.forEach(function(slice){if(slice.title=='obtainView'||slice.title=='setupListItem'||slice.title=='deliverInputEvent'||slice.title=='RV Scroll')
+preFrameEvents.push(slice);});uiThread.asyncSliceGroup.slices.forEach(function(slice){if(slice.title=='deliverInputEvent')
+preFrameEvents.push(slice);});return tr.b.mergeRanges(tr.b.convertEventsToRanges(preFrameEvents),3,function(events){return{start:events[0].min,end:events[events.length-1].max};});}
+function getFrameStartTime(traversalStart,preTraversalWorkRanges){var preTraversalWorkRange=tr.b.findClosestIntervalInSortedIntervals(preTraversalWorkRanges,function(range){return range.start},function(range){return range.end},traversalStart,3);if(preTraversalWorkRange)
+return preTraversalWorkRange.start;return traversalStart;}
+function getUiThreadDrivenFrames(app){if(!app.uiThread)
+return[];var preTraversalWorkRanges=[];if(app.uiDrawType==UI_DRAW_TYPE.LEGACY)
+preTraversalWorkRanges=getPreTraversalWorkRanges(app.uiThread);var frames=[];app.uiThread.sliceGroup.slices.forEach(function(slice){if(!(slice.title in UI_THREAD_DRAW_NAMES)){return;}
+var threadTimeRanges=[];var uiThreadTimeRange={thread:app.uiThread,start:getFrameStartTime(slice.start,preTraversalWorkRanges),end:slice.end};threadTimeRanges.push(uiThreadTimeRange);var rtDrawSlice=findOverlappingDrawFrame(app.renderThread,slice.end);if(rtDrawSlice){var rtSyncSlice=rtDrawSlice.findDescendentSlice(THREAD_SYNC_NAME);if(rtSyncSlice){uiThreadTimeRange.end=Math.min(uiThreadTimeRange.end,rtSyncSlice.start);}
+threadTimeRanges.push({thread:app.renderThread,start:rtDrawSlice.start,end:rtDrawSlice.end});}
+frames.push(makeFrame(threadTimeRanges,app.surfaceFlinger));});return frames;}
+function getRenderThreadDrivenFrames(app){if(!app.renderThread)
+return[];var frames=[];app.renderThread.sliceGroup.getSlicesOfName(RENDER_THREAD_INDEP_DRAW_NAME).forEach(function(slice){var threadTimeRanges=[{thread:app.renderThread,start:slice.start,end:slice.end}];frames.push(makeFrame(threadTimeRanges,app.surfaceFlinger));});return frames;}
+function getUiDrawType(uiThread){if(!uiThread)
+return UI_DRAW_TYPE.NONE;var slices=uiThread.sliceGroup.slices;for(var i=0;i<slices.length;i++){if(slices[i].title in UI_THREAD_DRAW_NAMES){return UI_THREAD_DRAW_NAMES[slices[i].title];}}
+return UI_DRAW_TYPE.NONE;}
+function getInputSamples(process){var samples=undefined;for(var counterName in process.counters){if(/^android\.aq\:pending/.test(counterName)&&process.counters[counterName].numSeries==1){samples=process.counters[counterName].series[0].samples;break;}}
+if(!samples)
+return[];var inputSamples=[];var lastValue=0;samples.forEach(function(sample){if(sample.value>lastValue){inputSamples.push(sample);}
+lastValue=sample.value;});return inputSamples;}
+function getAnimationAsyncSlices(uiThread){if(!uiThread)
+return[];var slices=[];uiThread.asyncSliceGroup.iterateAllEvents(function(slice){if(/^animator\:/.test(slice.title))
+slices.push(slice);});return slices;}
+function AndroidApp(process,uiThread,renderThread,surfaceFlinger,uiDrawType){this.process=process;this.uiThread=uiThread;this.renderThread=renderThread;this.surfaceFlinger=surfaceFlinger;this.uiDrawType=uiDrawType;this.frames_=undefined;this.inputs_=undefined;};AndroidApp.createForProcessIfPossible=function(process,surfaceFlinger){var uiThread=process.getThread(process.pid);var uiDrawType=getUiDrawType(uiThread);if(uiDrawType==UI_DRAW_TYPE.NONE){uiThread=undefined;}
+var renderThreads=process.findAllThreadsNamed('RenderThread');var renderThread=renderThreads.length==1?renderThreads[0]:undefined;if(uiThread||renderThread){return new AndroidApp(process,uiThread,renderThread,surfaceFlinger,uiDrawType);}};AndroidApp.prototype={getFrames:function(){if(!this.frames_){var uiFrames=getUiThreadDrivenFrames(this);var rtFrames=getRenderThreadDrivenFrames(this);this.frames_=uiFrames.concat(rtFrames);this.frames_.sort(function(a,b){a.end-b.end});}
+return this.frames_;},getInputSamples:function(){if(!this.inputs_){this.inputs_=getInputSamples(this.process);}
+return this.inputs_;},getAnimationAsyncSlices:function(){if(!this.animations_){this.animations_=getAnimationAsyncSlices(this.uiThread);}
+return this.animations_;}};return{AndroidApp:AndroidApp};});'use strict';tr.exportTo('tr.model.helpers',function(){var findLowIndexInSortedArray=tr.b.findLowIndexInSortedArray;var VSYNC_SF_NAME='android.VSYNC-sf';var VSYNC_APP_NAME='android.VSYNC-app';var VSYNC_FALLBACK_NAME='android.VSYNC';var TIMESTAMP_FUDGE_MS=0.01;function getVsyncTimestamps(process,counterName){var vsync=process.counters[counterName];if(!vsync)
+vsync=process.counters[VSYNC_FALLBACK_NAME];if(vsync&&vsync.numSeries==1&&vsync.numSamples>1)
+return vsync.series[0].timestamps;return undefined;}
+function AndroidSurfaceFlinger(process,thread){this.process=process;this.thread=thread;this.appVsync_=undefined;this.sfVsync_=undefined;this.appVsyncTimestamps_=getVsyncTimestamps(process,VSYNC_APP_NAME);this.sfVsyncTimestamps_=getVsyncTimestamps(process,VSYNC_SF_NAME);};AndroidSurfaceFlinger.createForProcessIfPossible=function(process){var mainThread=process.getThread(process.pid);if(mainThread&&mainThread.name&&/surfaceflinger/.test(mainThread.name))
+return new AndroidSurfaceFlinger(process,mainThread);var primaryThreads=process.findAllThreadsNamed('SurfaceFlinger');if(primaryThreads.length==1)
+return new AndroidSurfaceFlinger(process,primaryThreads[0]);return undefined;};AndroidSurfaceFlinger.prototype={get hasVsyncs(){return!!this.appVsyncTimestamps_&&!!this.sfVsyncTimestamps_;},getFrameKickoff:function(timestamp){if(!this.hasVsyncs)
+throw new Error('cannot query vsync info without vsyncs');var firstGreaterIndex=findLowIndexInSortedArray(this.appVsyncTimestamps_,function(x){return x;},timestamp+TIMESTAMP_FUDGE_MS);if(firstGreaterIndex<1)
+return undefined;return this.appVsyncTimestamps_[firstGreaterIndex-1];},getFrameDeadline:function(timestamp){if(!this.hasVsyncs)
+throw new Error('cannot query vsync info without vsyncs');var firstGreaterIndex=findLowIndexInSortedArray(this.sfVsyncTimestamps_,function(x){return x;},timestamp+TIMESTAMP_FUDGE_MS);if(firstGreaterIndex>=this.sfVsyncTimestamps_.length)
+return undefined;return this.sfVsyncTimestamps_[firstGreaterIndex];}};return{AndroidSurfaceFlinger:AndroidSurfaceFlinger};});'use strict';tr.exportTo('tr.model.helpers',function(){var AndroidApp=tr.model.helpers.AndroidApp;var AndroidSurfaceFlinger=tr.model.helpers.AndroidSurfaceFlinger;var IMPORTANT_SURFACE_FLINGER_SLICES={'doComposition':true,'updateTexImage':true,'postFramebuffer':true};var IMPORTANT_UI_THREAD_SLICES={'Choreographer#doFrame':true,'performTraversals':true,'deliverInputEvent':true};var IMPORTANT_RENDER_THREAD_SLICES={'doFrame':true};function iterateImportantThreadSlices(thread,important,callback){if(!thread)
+return;thread.sliceGroup.slices.forEach(function(slice){if(slice.title in important)
+callback(slice);});}
+function AndroidModelHelper(model){this.model=model;this.apps=[];this.surfaceFlinger=undefined;var processes=model.getAllProcesses();for(var i=0;i<processes.length&&!this.surfaceFlinger;i++){this.surfaceFlinger=AndroidSurfaceFlinger.createForProcessIfPossible(processes[i]);}
+model.getAllProcesses().forEach(function(process){var app=AndroidApp.createForProcessIfPossible(process,this.surfaceFlinger);if(app)
+this.apps.push(app);},this);};AndroidModelHelper.guid=tr.b.GUID.allocate();AndroidModelHelper.supportsModel=function(model){return true;};AndroidModelHelper.prototype={iterateImportantSlices:function(callback){if(this.surfaceFlinger){iterateImportantThreadSlices(this.surfaceFlinger.thread,IMPORTANT_SURFACE_FLINGER_SLICES,callback);}
+this.apps.forEach(function(app){iterateImportantThreadSlices(app.uiThread,IMPORTANT_UI_THREAD_SLICES,callback);iterateImportantThreadSlices(app.renderThread,IMPORTANT_RENDER_THREAD_SLICES,callback);});}};return{AndroidModelHelper:AndroidModelHelper};});'use strict';tr.exportTo('tr.model',function(){function Slice(category,title,colorId,start,args,opt_duration,opt_cpuStart,opt_cpuDuration,opt_argsStripped,opt_bind_id){tr.model.TimedEvent.call(this,start);this.category=category||'';this.title=title;this.colorId=colorId;this.args=args;this.startStackFrame=undefined;this.endStackFrame=undefined;this.didNotFinish=false;this.inFlowEvents=[];this.outFlowEvents=[];this.subSlices=[];this.selfTime=undefined;this.cpuSelfTime=undefined;this.important=false;this.parentContainer=undefined;this.argsStripped=false;this.bind_id_=opt_bind_id;this.parentSlice=undefined;this.isTopLevel=false;if(opt_duration!==undefined)
 this.duration=opt_duration;if(opt_cpuStart!==undefined)
 this.cpuStart=opt_cpuStart;if(opt_cpuDuration!==undefined)
 this.cpuDuration=opt_cpuDuration;if(opt_argsStripped!==undefined)
 this.argsStripped=true;}
 Slice.prototype={__proto__:tr.model.TimedEvent.prototype,get analysisTypeName(){return this.title;},get userFriendlyName(){return'Slice '+this.title+' at '+
-tr.b.u.TimeStamp.format(this.start);},get stableId(){var parentSliceGroup=this.parentContainer.sliceGroup;return parentSliceGroup.stableId+'.'+
+tr.v.Unit.byName.timeStampInMs.format(this.start);},get stableId(){var parentSliceGroup=this.parentContainer.sliceGroup;return parentSliceGroup.stableId+'.'+
 parentSliceGroup.slices.indexOf(this);},findDescendentSlice:function(targetTitle){if(!this.subSlices)
 return undefined;for(var i=0;i<this.subSlices.length;i++){if(this.subSlices[i].title==targetTitle)
 return this.subSlices[i];var slice=this.subSlices[i].findDescendentSlice(targetTitle);if(slice)return slice;}
@@ -2923,7 +2756,382 @@
 if(!cpuSliceWhenLastRunning)
 return undefined;var cpu=cpuSliceWhenLastRunning.cpu;var indexOfSliceOnCpuWhenLastRunning=cpu.indexOf(cpuSliceWhenLastRunning);var nextRunningSlice=cpu.slices[indexOfSliceOnCpuWhenLastRunning+1];if(!nextRunningSlice)
 return undefined;if(Math.abs(nextRunningSlice.start-cpuSliceWhenLastRunning.end)<0.00001)
-return nextRunningSlice;return undefined;}};tr.model.EventRegistry.register(ThreadTimeSlice,{name:'threadTimeSlice',pluralName:'threadTimeSlices',singleViewElementName:'tr-ui-a-single-thread-time-slice-sub-view',multiViewElementName:'tr-ui-a-multi-thread-time-slice-sub-view'});return{ThreadTimeSlice:ThreadTimeSlice,SCHEDULING_STATE:SCHEDULING_STATE};});'use strict';tr.exportTo('tr.model',function(){var Slice=tr.model.Slice;function CpuSlice(cat,title,colorId,start,args,opt_duration){Slice.apply(this,arguments);this.threadThatWasRunning=undefined;this.cpu=undefined;}
+return nextRunningSlice;return undefined;}};tr.model.EventRegistry.register(ThreadTimeSlice,{name:'threadTimeSlice',pluralName:'threadTimeSlices',singleViewElementName:'tr-ui-a-single-thread-time-slice-sub-view',multiViewElementName:'tr-ui-a-multi-thread-time-slice-sub-view'});return{ThreadTimeSlice:ThreadTimeSlice,SCHEDULING_STATE:SCHEDULING_STATE};});'use strict';tr.exportTo('tr.model',function(){var CompoundEventSelectionState={NOT_SELECTED:0,EVENT_SELECTED:0x1,SOME_ASSOCIATED_EVENTS_SELECTED:0x2,ALL_ASSOCIATED_EVENTS_SELECTED:0x4,EVENT_AND_SOME_ASSOCIATED_SELECTED:0x1|0x2,EVENT_AND_ALL_ASSOCIATED_SELECTED:0x1|0x4};return{CompoundEventSelectionState:CompoundEventSelectionState};});'use strict';tr.exportTo('tr.model.um',function(){var CompoundEventSelectionState=tr.model.CompoundEventSelectionState;function UserExpectation(parentModel,initiatorTitle,start,duration){tr.model.TimedEvent.call(this,start);this.associatedEvents=new tr.model.EventSet();this.duration=duration;this.initiatorTitle_=initiatorTitle;this.parentModel=parentModel;this.typeInfo_=undefined;this.sourceEvents=new tr.model.EventSet();}
+UserExpectation.prototype={__proto__:tr.model.TimedEvent.prototype,computeCompoundEvenSelectionState:function(selection){var cess=CompoundEventSelectionState.NOT_SELECTED;if(selection.contains(this))
+cess|=CompoundEventSelectionState.EVENT_SELECTED;if(this.associatedEvents.intersectionIsEmpty(selection))
+return cess;var allContained=this.associatedEvents.every(function(event){return selection.contains(event);});if(allContained)
+cess|=CompoundEventSelectionState.ALL_ASSOCIATED_EVENTS_SELECTED;else
+cess|=CompoundEventSelectionState.SOME_ASSOCIATED_EVENTS_SELECTED;return cess;},get userFriendlyName(){return this.title+' User Expectation at '+
+tr.v.Unit.byName.timeStampInMs.format(this.start);},get stableId(){return('UserExpectation.'+
+this.parentModel.userModel.expectations.indexOf(this));},get typeInfo(){if(!this.typeInfo_)
+this.typeInfo_=UserExpectation.findTypeInfo(this.constructor);if(!this.typeInfo_)
+throw new Error('Unregistered UserExpectation');return this.typeInfo_;},get colorId(){return this.typeInfo.metadata.colorId;},get stageTitle(){return this.typeInfo.metadata.stageTitle;},get initiatorTitle(){return this.initiatorTitle_;},get title(){if(!this.initiatorTitle)
+return this.stageTitle;return this.initiatorTitle+' '+this.stageTitle;},get totalCpuMs(){var cpuMs=0;this.associatedEvents.forEach(function(event){if(event.cpuSelfTime)
+cpuMs+=event.cpuSelfTime;});return cpuMs;}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(UserExpectation,options);UserExpectation.addEventListener('will-register',function(e){var metadata=e.typeInfo.metadata;if(metadata.stageTitle===undefined){throw new Error('Registered UserExpectations must provide '+'stageTitle');}
+if(metadata.colorId===undefined){throw new Error('Registered UserExpectations must provide '+'colorId');}});tr.model.EventRegistry.register(UserExpectation,{name:'user-expectation',pluralName:'user-expectations',singleViewElementName:'tr-ui-a-single-user-expectation-sub-view',multiViewElementName:'tr-ui-a-multi-user-expectation-sub-view'});return{UserExpectation:UserExpectation};});'use strict';tr.exportTo('tr.model.um',function(){function ResponseExpectation(parentModel,initiatorTitle,start,duration,opt_isAnimationBegin){tr.model.um.UserExpectation.call(this,parentModel,initiatorTitle,start,duration);this.isAnimationBegin=opt_isAnimationBegin||false;}
+ResponseExpectation.prototype={__proto__:tr.model.um.UserExpectation.prototype,constructor:ResponseExpectation};tr.model.um.UserExpectation.register(ResponseExpectation,{stageTitle:'Response',colorId:tr.b.ColorScheme.getColorIdForReservedName('rail_response')});return{ResponseExpectation:ResponseExpectation};});'use strict';tr.exportTo('tr.v',function(){var Range=tr.b.Range;var MAX_SOURCE_INFOS=16;function NumericBase(unit){if(!(unit instanceof tr.v.Unit))
+throw new Error('Expected provided unit to be instance of Unit');this.unit=unit;}
+NumericBase.prototype={asDict:function(){var d={unit:this.unit.asJSON()};this.asDictInto_(d);return d;}};NumericBase.fromDict=function(d){if(d.type==='scalar')
+return ScalarNumeric.fromDict(d);throw new Error('Not implemented');};function NumericBin(parentNumeric,opt_range){this.parentNumeric=parentNumeric;this.range=opt_range||(new tr.b.Range());this.count=0;this.sourceInfos=[];}
+NumericBin.fromDict=function(parentNumeric,d){var n=new NumericBin(parentNumeric);n.range.min=d.min;n.range.max=d.max;n.count=d.count;n.sourceInfos=d.sourceInfos;return n;};NumericBin.prototype={add:function(value,sourceInfo){this.count+=1;tr.b.Statistics.uniformlySampleStream(this.sourceInfos,this.count,sourceInfo,MAX_SOURCE_INFOS);},addBin:function(other){if(!this.range.equals(other.range))
+throw new Error('Merging incompatible Numeric bins.');tr.b.Statistics.mergeSampledStreams(this.sourceInfos,this.count,other.sourceInfos,other.count,MAX_SOURCE_INFOS);this.count+=other.count;},asDict:function(){return{min:this.range.min,max:this.range.max,count:this.count,sourceInfos:this.sourceInfos.slice(0)};},asJSON:function(){return this.asDict();}};function Numeric(unit,range,binInfo){NumericBase.call(this,unit);this.range=range;this.numNans=0;this.nanSourceInfos=[];this.runningSum=0;this.maxCount_=0;this.underflowBin=binInfo.underflowBin;this.centralBins=binInfo.centralBins;this.centralBinWidth=binInfo.centralBinWidth;this.overflowBin=binInfo.overflowBin;this.allBins=[];this.allBins.push(this.underflowBin);this.allBins.push.apply(this.allBins,this.centralBins);this.allBins.push(this.overflowBin);this.allBins.forEach(function(bin){if(bin.count>this.maxCount_)
+this.maxCount_=bin.count;},this);}
+Numeric.fromDict=function(d){var range=Range.fromExplicitRange(d.min,d.max);var binInfo={};binInfo.underflowBin=NumericBin.fromDict(undefined,d.underflowBin);binInfo.centralBins=d.centralBins.map(function(binAsDict){return NumericBin.fromDict(undefined,binAsDict);});binInfo.centralBinWidth=d.centralBinWidth;binInfo.overflowBin=NumericBin.fromDict(undefined,d.overflowBin);var n=new Numeric(tr.v.Unit.fromJSON(d.unit),range,binInfo);n.allBins.forEach(function(bin){bin.parentNumeric=n;});n.runningSum=d.runningSum;n.numNans=d.numNans;n.nanSourceInfos=d.nanSourceInfos;return n;};Numeric.createLinear=function(unit,range,numBins){if(range.isEmpty)
+throw new Error('Nope');var binInfo={};binInfo.underflowBin=new NumericBin(this,Range.fromExplicitRange(-Number.MAX_VALUE,range.min));binInfo.overflowBin=new NumericBin(this,Range.fromExplicitRange(range.max,Number.MAX_VALUE));binInfo.centralBins=[];binInfo.centralBinWidth=range.range/numBins;for(var i=0;i<numBins;i++){var lo=range.min+(binInfo.centralBinWidth*i);var hi=lo+binInfo.centralBinWidth;binInfo.centralBins.push(new NumericBin(undefined,Range.fromExplicitRange(lo,hi)));}
+var n=new Numeric(unit,range,binInfo);n.allBins.forEach(function(bin){bin.parentNumeric=n;});return n;};Numeric.prototype={__proto__:NumericBase.prototype,get numValues(){return tr.b.Statistics.sum(this.allBins,function(e){return e.count;});},get average(){return this.runningSum/this.numValues;},get maxCount(){return this.maxCount_;},getInterpolatedCountAt:function(value){var bin=this.getBinForValue(value);var idx=this.centralBins.indexOf(bin);if(idx<0){return bin.count;}
+var lesserBin=bin;var greaterBin=bin;var lesserBinCenter=undefined;var greaterBinCenter=undefined;if(value<greaterBin.range.center){if(idx>0){lesserBin=this.centralBins[idx-1];}else{lesserBin=this.underflowBin;lesserBinCenter=lesserBin.range.max;}}else{if(idx<(this.centralBins.length-1)){greaterBin=this.centralBins[idx+1];}else{greaterBin=this.overflowBin;greaterBinCenter=greaterBin.range.min;}}
+if(greaterBinCenter===undefined)
+greaterBinCenter=greaterBin.range.center;if(lesserBinCenter===undefined)
+lesserBinCenter=lesserBin.range.center;value=tr.b.normalize(value,lesserBinCenter,greaterBinCenter);return tr.b.lerp(value,lesserBin.count,greaterBin.count);},getBinForValue:function(value){if(value<this.range.min)
+return this.underflowBin;if(value>=this.range.max)
+return this.overflowBin;var binIdx=Math.floor((value-this.range.min)/this.centralBinWidth);return this.centralBins[binIdx];},add:function(value,sourceInfo){if(typeof(value)!=='number'||isNaN(value)){this.numNans++;tr.b.Statistics.uniformlySampleStream(this.nanSourceInfos,this.numNans,sourceInfo,MAX_SOURCE_INFOS);return;}
+var bin=this.getBinForValue(value);bin.add(value,sourceInfo);this.runningSum+=value;if(bin.count>this.maxCount_)
+this.maxCount_=bin.count;},addNumeric:function(other){if(!this.range.equals(other.range)||!this.unit===other.unit||this.allBins.length!==other.allBins.length){throw new Error('Merging incompatible Numerics.');}
+tr.b.Statistics.mergeSampledStreams(this.nanSourceInfos,this.numNans,other.nanSourceInfos,other.numNans,MAX_SOURCE_INFOS);this.numNans+=other.numNans;this.runningSum+=other.runningSum;for(var i=0;i<this.allBins.length;++i){this.allBins[i].addBin(other.allBins[i]);}},clone:function(){return Numeric.fromDict(this.asDict());},asDict:function(){var d={unit:this.unit.asJSON(),min:this.range.min,max:this.range.max,numNans:this.numNans,nanSourceInfos:this.nanSourceInfos,runningSum:this.runningSum,underflowBin:this.underflowBin.asDict(),centralBins:this.centralBins.map(function(bin){return bin.asDict();}),centralBinWidth:this.centralBinWidth,overflowBin:this.overflowBin.asDict()};return d;},asJSON:function(){return this.asDict();}};function ScalarNumeric(unit,value){if(!(typeof(value)=='number'))
+throw new Error('Expected value to be number');NumericBase.call(this,unit);this.value=value;}
+ScalarNumeric.prototype={__proto__:NumericBase.prototype,asDictInto_:function(d){d.type='scalar';d.value=this.value;},toString:function(){return this.unit.format(this.value);}};ScalarNumeric.fromDict=function(d){return new ScalarNumeric(tr.v.Unit.fromJSON(d.unit),d.value);};return{NumericBase:NumericBase,NumericBin:NumericBin,Numeric:Numeric,ScalarNumeric:ScalarNumeric};});'use strict';tr.exportTo('tr.e.audits',function(){var SCHEDULING_STATE=tr.model.SCHEDULING_STATE;var Auditor=tr.c.Auditor;var AndroidModelHelper=tr.model.helpers.AndroidModelHelper;var ColorScheme=tr.b.ColorScheme;var Statistics=tr.b.Statistics;var FRAME_PERF_CLASS=tr.model.FRAME_PERF_CLASS;var Alert=tr.model.Alert;var EventInfo=tr.model.EventInfo;var ScalarNumeric=tr.v.ScalarNumeric;var timeDurationInMs=tr.v.Unit.byName.timeDurationInMs;var EXPECTED_FRAME_TIME_MS=16.67;function getStart(e){return e.start;}
+function getDuration(e){return e.duration;}
+function getCpuDuration(e){return(e.cpuDuration!==undefined)?e.cpuDuration:e.duration;}
+function frameIsActivityStart(frame){for(var i=0;i<frame.associatedEvents.length;i++){if(frame.associatedEvents[i].title=='activityStart')
+return true;}
+return false;}
+function frameMissedDeadline(frame){return frame.args['deadline']&&frame.args['deadline']<frame.end;}
+function DocLinkBuilder(){this.docLinks=[];}
+DocLinkBuilder.prototype={addAppVideo:function(name,videoId){this.docLinks.push({label:'Video Link',textContent:('Android Performance Patterns: '+name),href:'https://www.youtube.com/watch?list=PLWz5rJ2EKKc9CBxr3BVjPTPoDPLdPIFCE&v='+videoId});return this;},addDacRef:function(name,link){this.docLinks.push({label:'Doc Link',textContent:(name+' documentation'),href:'https://developer.android.com/reference/'+link});return this;},build:function(){return this.docLinks;}};function AndroidAuditor(model){Auditor.call(this,model);var helper=model.getOrCreateHelper(AndroidModelHelper);if(helper.apps.length||helper.surfaceFlinger)
+this.helper=helper;};AndroidAuditor.viewAlphaAlertInfo_=new EventInfo('Inefficient View alpha usage','Setting an alpha between 0 and 1 has significant performance costs, if one of the fast alpha paths is not used.',new DocLinkBuilder().addAppVideo('Hidden Cost of Transparency','wIy8g8yNhNk').addDacRef('View#setAlpha()','android/view/View.html#setAlpha(float)').build());AndroidAuditor.saveLayerAlertInfo_=new EventInfo('Expensive rendering with Canvas#saveLayer()','Canvas#saveLayer() incurs extremely high rendering cost. They disrupt the rendering pipeline when drawn, forcing a flush of drawing content. Instead use View hardware layers, or static Bitmaps. This enables the offscreen buffers to be reused in between frames, and avoids the disruptive render target switch.',new DocLinkBuilder().addAppVideo('Hidden Cost of Transparency','wIy8g8yNhNk').addDacRef('Canvas#saveLayerAlpha()','android/graphics/Canvas.html#saveLayerAlpha(android.graphics.RectF, int, int)').build());AndroidAuditor.getSaveLayerAlerts_=function(frame){var badAlphaRegEx=/^(.+) alpha caused (unclipped )?saveLayer (\d+)x(\d+)$/;var saveLayerRegEx=/^(unclipped )?saveLayer (\d+)x(\d+)$/;var ret=[];var events=[];frame.associatedEvents.forEach(function(slice){var match=badAlphaRegEx.exec(slice.title);if(match){var args={'view name':match[1],width:parseInt(match[3]),height:parseInt(match[4])};ret.push(new Alert(AndroidAuditor.viewAlphaAlertInfo_,slice.start,[slice],args));}else if(saveLayerRegEx.test(slice.title))
+events.push(slice);},this);if(events.length>ret.length){var unclippedSeen=Statistics.sum(events,function(slice){return saveLayerRegEx.exec(slice.title)[1]?1:0;});var clippedSeen=events.length-unclippedSeen;var earliestStart=Statistics.min(events,function(slice){return slice.start;});var args={'Unclipped saveLayer count (especially bad!)':unclippedSeen,'Clipped saveLayer count':clippedSeen};events.push(frame);ret.push(new Alert(AndroidAuditor.saveLayerAlertInfo_,earliestStart,events,args));}
+return ret;};AndroidAuditor.pathAlertInfo_=new EventInfo('Path texture churn','Paths are drawn with a mask texture, so when a path is modified / newly drawn, that texture must be generated and uploaded to the GPU. Ensure that you cache paths between frames and do not unnecessarily call Path#reset(). You can cut down on this cost by sharing Path object instances between drawables/views.');AndroidAuditor.getPathAlert_=function(frame){var uploadRegEx=/^Generate Path Texture$/;var events=frame.associatedEvents.filter(function(event){return event.title=='Generate Path Texture';});var start=Statistics.min(events,getStart);var duration=Statistics.sum(events,getDuration);if(duration<3)
+return undefined;events.push(frame);return new Alert(AndroidAuditor.pathAlertInfo_,start,events,{'Time spent':new ScalarNumeric(timeDurationInMs,duration)});};AndroidAuditor.uploadAlertInfo_=new EventInfo('Expensive Bitmap uploads','Bitmaps that have been modified / newly drawn must be uploaded to the GPU. Since this is expensive if the total number of pixels uploaded is large, reduce the amount of Bitmap churn in this animation/context, per frame.');AndroidAuditor.getUploadAlert_=function(frame){var uploadRegEx=/^Upload (\d+)x(\d+) Texture$/;var events=[];var start=Number.POSITIVE_INFINITY;var duration=0;var pixelsUploaded=0;frame.associatedEvents.forEach(function(event){var match=uploadRegEx.exec(event.title);if(match){events.push(event);start=Math.min(start,event.start);duration+=event.duration;pixelsUploaded+=parseInt(match[1])*parseInt(match[2]);}});if(events.length==0||duration<3)
+return undefined;var mPixels=(pixelsUploaded/1000000).toFixed(2)+' million';var args={'Pixels uploaded':mPixels,'Time spent':new ScalarNumeric(timeDurationInMs,duration)};events.push(frame);return new Alert(AndroidAuditor.uploadAlertInfo_,start,events,args);};AndroidAuditor.ListViewInflateAlertInfo_=new EventInfo('Inflation during ListView recycling','ListView item recycling involved inflating views. Ensure your Adapter#getView() recycles the incoming View, instead of constructing a new one.');AndroidAuditor.ListViewBindAlertInfo_=new EventInfo('Inefficient ListView recycling/rebinding','ListView recycling taking too much time per frame. Ensure your Adapter#getView() binds data efficiently.');AndroidAuditor.getListViewAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='obtainView'||event.title=='setupListItem';});var duration=Statistics.sum(events,getCpuDuration);if(events.length==0||duration<3)
+return undefined;var hasInflation=false;for(var i=0;i<events.length;i++){if(events[i]instanceof tr.model.Slice&&events[i].findDescendentSlice('inflate')){hasInflation=true;break;}}
+var start=Statistics.min(events,getStart);var args={'Time spent':new ScalarNumeric(timeDurationInMs,duration)};args['ListView items '+(hasInflation?'inflated':'rebound')]=events.length/2;var eventInfo=hasInflation?AndroidAuditor.ListViewInflateAlertInfo_:AndroidAuditor.ListViewBindAlertInfo_;events.push(frame);return new Alert(eventInfo,start,events,args);};AndroidAuditor.measureLayoutAlertInfo_=new EventInfo('Expensive measure/layout pass','Measure/Layout took a significant time, contributing to jank. Avoid triggering layout during animations.',new DocLinkBuilder().addAppVideo('Invalidations, Layouts, and Performance','we6poP0kw6E').build());AndroidAuditor.getMeasureLayoutAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='measure'||event.title=='layout';});var duration=Statistics.sum(events,getCpuDuration);if(events.length==0||duration<3)
+return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.measureLayoutAlertInfo_,start,events,{'Time spent':new ScalarNumeric(timeDurationInMs,duration)});};AndroidAuditor.viewDrawAlertInfo_=new EventInfo('Long View#draw()','Recording the drawing commands of invalidated Views took a long time. Avoid significant work in View or Drawable custom drawing, especially allocations or drawing to Bitmaps.',new DocLinkBuilder().addAppVideo('Invalidations, Layouts, and Performance','we6poP0kw6E').addAppVideo('Avoiding Allocations in onDraw()','HAK5acHQ53E').build());AndroidAuditor.getViewDrawAlert_=function(frame){var slice=undefined;for(var i=0;i<frame.associatedEvents.length;i++){if(frame.associatedEvents[i].title=='getDisplayList'||frame.associatedEvents[i].title=='Record View#draw()'){slice=frame.associatedEvents[i];break;}}
+if(!slice||getCpuDuration(slice)<3)
+return undefined;return new Alert(AndroidAuditor.viewDrawAlertInfo_,slice.start,[slice,frame],{'Time spent':new ScalarNumeric(timeDurationInMs,getCpuDuration(slice))});};AndroidAuditor.blockingGcAlertInfo_=new EventInfo('Blocking Garbage Collection','Blocking GCs are caused by object churn, and made worse by having large numbers of objects in the heap. Avoid allocating objects during animations/scrolling, and recycle Bitmaps to avoid triggering garbage collection.',new DocLinkBuilder().addAppVideo('Garbage Collection in Android','pzfzz50W5Uo').addAppVideo('Avoiding Allocations in onDraw()','HAK5acHQ53E').build());AndroidAuditor.getBlockingGcAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='DVM Suspend'||event.title=='GC: Wait For Concurrent';});var blockedDuration=Statistics.sum(events,getDuration);if(blockedDuration<3)
+return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.blockingGcAlertInfo_,start,events,{'Blocked duration':new ScalarNumeric(timeDurationInMs,blockedDuration)});};AndroidAuditor.lockContentionAlertInfo_=new EventInfo('Lock contention','UI thread lock contention is caused when another thread holds a lock that the UI thread is trying to use. UI thread progress is blocked until the lock is released. Inspect locking done within the UI thread, and ensure critical sections are short.');AndroidAuditor.getLockContentionAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return/^Lock Contention on /.test(event.title);});var blockedDuration=Statistics.sum(events,getDuration);if(blockedDuration<1)
+return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.lockContentionAlertInfo_,start,events,{'Blocked duration':new ScalarNumeric(timeDurationInMs,blockedDuration)});};AndroidAuditor.schedulingAlertInfo_=new EventInfo('Scheduling delay','Work to produce this frame was descheduled for several milliseconds, contributing to jank. Ensure that code on the UI thread doesn\'t block on work being done on other threads, and that background threads (doing e.g. network or bitmap loading) are running at android.os.Process#THREAD_PRIORITY_BACKGROUND or lower so they are less likely to interrupt the UI thread. These background threads should show up with a priority number of 130 or higher in the scheduling section under the Kernel process.');AndroidAuditor.getSchedulingAlert_=function(frame){var totalDuration=0;var totalStats={};frame.threadTimeRanges.forEach(function(ttr){var stats=ttr.thread.getSchedulingStatsForRange(ttr.start,ttr.end);tr.b.iterItems(stats,function(key,value){if(!(key in totalStats))
+totalStats[key]=0;totalStats[key]+=value;totalDuration+=value;});});if(!(SCHEDULING_STATE.RUNNING in totalStats)||totalDuration==0||totalDuration-totalStats[SCHEDULING_STATE.RUNNING]<3)
+return;var args={};tr.b.iterItems(totalStats,function(key,value){if(key===SCHEDULING_STATE.RUNNABLE)
+key='Not scheduled, but runnable';else if(key===SCHEDULING_STATE.UNINTR_SLEEP)
+key='Blocking I/O delay';args[key]=new ScalarNumeric(timeDurationInMs,value);});return new Alert(AndroidAuditor.schedulingAlertInfo_,frame.start,[frame],args);};AndroidAuditor.prototype={__proto__:Auditor.prototype,renameAndSort_:function(){this.model.kernel.important=false;this.model.getAllProcesses().forEach(function(process){if(this.helper.surfaceFlinger&&process==this.helper.surfaceFlinger.process){if(!process.name)
+process.name='SurfaceFlinger';process.sortIndex=Number.NEGATIVE_INFINITY;process.important=false;return;}
+var uiThread=process.getThread(process.pid);if(!process.name&&uiThread&&uiThread.name){if(/^ndroid\./.test(uiThread.name))
+uiThread.name='a'+uiThread.name;process.name=uiThread.name;uiThread.name='UI Thread';}
+process.sortIndex=0;for(var tid in process.threads){process.sortIndex-=process.threads[tid].sliceGroup.slices.length;}},this);this.model.getAllThreads().forEach(function(thread){if(thread.tid==thread.parent.pid)
+thread.sortIndex=-3;if(thread.name=='RenderThread')
+thread.sortIndex=-2;if(/^hwuiTask/.test(thread.name))
+thread.sortIndex=-1;});},pushFramesAndJudgeJank_:function(){var badFramesObserved=0;var framesObserved=0;var surfaceFlinger=this.helper.surfaceFlinger;this.helper.apps.forEach(function(app){app.process.frames=app.getFrames();app.process.frames.forEach(function(frame){if(frame.totalDuration>EXPECTED_FRAME_TIME_MS*2){badFramesObserved+=2;frame.perfClass=FRAME_PERF_CLASS.TERRIBLE;}else if(frame.totalDuration>EXPECTED_FRAME_TIME_MS||frameMissedDeadline(frame)){badFramesObserved++;frame.perfClass=FRAME_PERF_CLASS.BAD;}else{frame.perfClass=FRAME_PERF_CLASS.GOOD;}});framesObserved+=app.process.frames.length;});if(framesObserved){var portionBad=badFramesObserved/framesObserved;if(portionBad>0.3)
+this.model.faviconHue='red';else if(portionBad>0.05)
+this.model.faviconHue='yellow';else
+this.model.faviconHue='green';}},pushEventInfo_:function(){var appAnnotator=new AppAnnotator();this.helper.apps.forEach(function(app){if(app.uiThread)
+appAnnotator.applyEventInfos(app.uiThread.sliceGroup);if(app.renderThread)
+appAnnotator.applyEventInfos(app.renderThread.sliceGroup);});},runAnnotate:function(){if(!this.helper)
+return;this.renameAndSort_();this.pushFramesAndJudgeJank_();this.pushEventInfo_();this.helper.iterateImportantSlices(function(slice){slice.important=true;});},runAudit:function(){if(!this.helper)
+return;var alerts=this.model.alerts;this.helper.apps.forEach(function(app){app.getFrames().forEach(function(frame){alerts.push.apply(alerts,AndroidAuditor.getSaveLayerAlerts_(frame));if(frame.perfClass==FRAME_PERF_CLASS.NEUTRAL||frame.perfClass==FRAME_PERF_CLASS.GOOD)
+return;var alert=AndroidAuditor.getPathAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getUploadAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getListViewAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getMeasureLayoutAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getViewDrawAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getBlockingGcAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getLockContentionAlert_(frame);if(alert)
+alerts.push(alert);var alert=AndroidAuditor.getSchedulingAlert_(frame);if(alert)
+alerts.push(alert);});},this);this.addRenderingInteractionRecords();this.addInputInteractionRecords();},addRenderingInteractionRecords:function(){var events=[];this.helper.apps.forEach(function(app){events.push.apply(events,app.getAnimationAsyncSlices());events.push.apply(events,app.getFrames());});var mergerFunction=function(events){var ir=new tr.model.um.ResponseExpectation(this.model,'Rendering',events[0].min,events[events.length-1].max-events[0].min);this.model.userModel.expectations.push(ir);}.bind(this);tr.b.mergeRanges(tr.b.convertEventsToRanges(events),30,mergerFunction);},addInputInteractionRecords:function(){var inputSamples=[];this.helper.apps.forEach(function(app){inputSamples.push.apply(inputSamples,app.getInputSamples());});var mergerFunction=function(events){var ir=new tr.model.um.ResponseExpectation(this.model,'Input',events[0].min,events[events.length-1].max-events[0].min);this.model.userModel.expectations.push(ir);}.bind(this);var inputRanges=inputSamples.map(function(sample){return tr.b.Range.fromExplicitRange(sample.timestamp,sample.timestamp);});tr.b.mergeRanges(inputRanges,30,mergerFunction);}};Auditor.register(AndroidAuditor);function AppAnnotator(){this.titleInfoLookup={};this.titleParentLookup={};this.build_();}
+AppAnnotator.prototype={build_:function(){var registerEventInfo=function(dict){this.titleInfoLookup[dict.title]=new EventInfo(dict.title,dict.description,dict.docLinks);if(dict.parents)
+this.titleParentLookup[dict.title]=dict.parents;}.bind(this);registerEventInfo({title:'inflate',description:'Constructing a View hierarchy from pre-processed XML via LayoutInflater#layout. This includes constructing all of the View objects in the hierarchy, and applying styled attributes.'});registerEventInfo({title:'obtainView',description:'Adapter#getView() called to bind content to a recycled View that is being presented.'});registerEventInfo({title:'setupListItem',description:'Attached a newly-bound, recycled View to its parent ListView.'});registerEventInfo({title:'setupGridItem',description:'Attached a newly-bound, recycled View to its parent GridView.'});var choreographerLinks=new DocLinkBuilder().addDacRef('Choreographer','android/view/Choreographer.html').build();registerEventInfo({title:'Choreographer#doFrame',docLinks:choreographerLinks,description:'Choreographer executes frame callbacks for inputs, animations, and rendering traversals. When this work is done, a frame will be presented to the user.'});registerEventInfo({title:'input',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Input callbacks are processed. This generally encompasses dispatching input to Views, as well as any work the Views do to process this input/gesture.'});registerEventInfo({title:'animation',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Animation callbacks are processed. This is generally minimal work, as animations determine progress for the frame, and push new state to animated objects (such as setting View properties).'});registerEventInfo({title:'traversals',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Primary draw traversals. This is the primary traversal of the View hierarchy, including layout and draw passes.'});var traversalParents=['Choreographer#doFrame','performTraversals'];var layoutLinks=new DocLinkBuilder().addDacRef('View#Layout','android/view/View.html#Layout').build();registerEventInfo({title:'performTraversals',description:'A drawing traversal of the View hierarchy, comprised of all layout and drawing needed to produce the frame.'});registerEventInfo({title:'measure',parents:traversalParents,docLinks:layoutLinks,description:'First of two phases in view hierarchy layout. Views are asked to size themselves according to constraints supplied by their parent. Some ViewGroups may measure a child more than once to help satisfy their own constraints. Nesting ViewGroups that measure children more than once can lead to excessive and repeated work.'});registerEventInfo({title:'layout',parents:traversalParents,docLinks:layoutLinks,description:'Second of two phases in view hierarchy layout, repositioning content and child Views into their new locations.'});var drawString='Draw pass over the View hierarchy. Every invalidated View will have its drawing commands recorded. On Android versions prior to Lollipop, this would also include the issuing of draw commands to the GPU. Starting with Lollipop, it only includes the recording of commands, and syncing that information to the RenderThread.';registerEventInfo({title:'draw',parents:traversalParents,description:drawString});var recordString='Every invalidated View\'s drawing commands are recorded. Each will have View#draw() called, and is passed a Canvas that will record and store its drawing commands until it is next invalidated/rerecorded.';registerEventInfo({title:'getDisplayList',parents:['draw'],description:recordString});registerEventInfo({title:'Record View#draw()',parents:['draw'],description:recordString});registerEventInfo({title:'drawDisplayList',parents:['draw'],description:'Execution of recorded draw commands to generate a frame. This represents the actual formation and issuing of drawing commands to the GPU. On Android L and higher devices, this work is done on a dedicated RenderThread, instead of on the UI Thread.'});registerEventInfo({title:'DrawFrame',description:'RenderThread portion of the standard UI/RenderThread split frame. This represents the actual formation and issuing of drawing commands to the GPU.'});registerEventInfo({title:'doFrame',description:'RenderThread animation frame. Represents drawing work done by the RenderThread on a frame where the UI thread did not produce new drawing content.'});registerEventInfo({title:'syncFrameState',description:'Sync stage between the UI thread and the RenderThread, where the UI thread hands off a frame (including information about modified Views). Time in this method primarily consists of uploading modified Bitmaps to the GPU. After this sync is completed, the UI thread is unblocked, and the RenderThread starts to render the frame.'});registerEventInfo({title:'flush drawing commands',description:'Issuing the now complete drawing commands to the GPU.'});registerEventInfo({title:'eglSwapBuffers',description:'Complete GPU rendering of the frame.'});registerEventInfo({title:'RV Scroll',description:'RecyclerView is calculating a scroll. If there are too many of these in Systrace, some Views inside RecyclerView might be causing it. Try to avoid using EditText, focusable views or handle them with care.'});registerEventInfo({title:'RV OnLayout',description:'OnLayout has been called by the View system. If this shows up too many times in Systrace, make sure the children of RecyclerView do not update themselves directly. This will cause a full re-layout but when it happens via the Adapter notifyItemChanged, RecyclerView can avoid full layout calculation.'});registerEventInfo({title:'RV FullInvalidate',description:'NotifyDataSetChanged or equal has been called. If this is taking a long time, try sending granular notify adapter changes instead of just calling notifyDataSetChanged or setAdapter / swapAdapter. Adding stable ids to your adapter might help.'});registerEventInfo({title:'RV PartialInvalidate',description:'RecyclerView is rebinding a View. If this is taking a lot of time, consider optimizing your layout or make sure you are not doing extra operations in onBindViewHolder call.'});registerEventInfo({title:'RV OnBindView',description:'RecyclerView is rebinding a View. If this is taking a lot of time, consider optimizing your layout or make sure you are not doing extra operations in onBindViewHolder call.'});registerEventInfo({title:'RV CreateView',description:'RecyclerView is creating a new View. If too many of these are present: 1) There might be a problem in Recycling (e.g. custom Animations that set transient state and prevent recycling or ItemAnimator not implementing the contract properly. See Adapter#onFailedToRecycleView(ViewHolder). 2) There may be too many item view types. Try merging them. 3) There might be too many itemChange animations and not enough space in RecyclerPool. Try increasing your pool size and item cache size.'});registerEventInfo({title:'eglSwapBuffers',description:'The CPU has finished producing drawing commands, and is flushing drawing work to the GPU, and posting that buffer to the consumer (which is often SurfaceFlinger window composition). Once this is completed, the GPU can produce the frame content without any involvement from the CPU.'});},applyEventInfosRecursive_:function(parentNames,slice){var checkExpectedParentNames=function(expectedParentNames){if(!expectedParentNames)
+return true;return expectedParentNames.some(function(name){return name in parentNames;});};if(slice.title in this.titleInfoLookup){if(checkExpectedParentNames(this.titleParentLookup[slice.title]))
+slice.info=this.titleInfoLookup[slice.title];}
+if(slice.subSlices.length>0){if(!(slice.title in parentNames))
+parentNames[slice.title]=0;parentNames[slice.title]++;slice.subSlices.forEach(function(subSlice){this.applyEventInfosRecursive_(parentNames,subSlice);},this);parentNames[slice.title]--;if(parentNames[slice.title]==0)
+delete parentNames[slice.title];}},applyEventInfos:function(sliceGroup){sliceGroup.topLevelSlices.forEach(function(slice){this.applyEventInfosRecursive_({},slice);},this);}};return{AndroidAuditor:AndroidAuditor};});'use strict';tr.exportTo('tr.importer',function(){function Importer(){}
+Importer.prototype={__proto__:Object.prototype,get importerName(){return'Importer';},isTraceDataContainer:function(){return false;},extractSubtraces:function(){return[];},importClockSyncMarkers:function(){},importEvents:function(){},importSampleData:function(){},finalizeImport:function(){}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Importer;tr.b.decorateExtensionRegistry(Importer,options);Importer.findImporterFor=function(eventData){var typeInfo=Importer.findTypeInfoMatching(function(ti){return ti.constructor.canImport(eventData);});if(typeInfo)
+return typeInfo.constructor;return undefined;};return{Importer:Importer};});'use strict';tr.exportTo('tr.importer',function(){function SimpleLineReader(text){this.lines_=text.split('\n');this.curLine_=0;this.savedLines_=undefined;}
+SimpleLineReader.prototype={advanceToLineMatching:function(regex){for(;this.curLine_<this.lines_.length;this.curLine_++){var line=this.lines_[this.curLine_];if(this.savedLines_!==undefined)
+this.savedLines_.push(line);if(regex.test(line))
+return true;}
+return false;},get curLineNumber(){return this.curLine_;},beginSavingLines:function(){this.savedLines_=[];},endSavingLinesAndGetResult:function(){var tmp=this.savedLines_;this.savedLines_=undefined;return tmp;}};return{SimpleLineReader:SimpleLineReader};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;function Activity(name,category,range,args){tr.model.TimedEvent.call(this,range.min);this.title=name;this.category=category;this.colorId=ColorScheme.getColorIdForGeneralPurposeString(name);this.duration=range.duration;this.args=args;this.name=name;};Activity.prototype={__proto__:tr.model.TimedEvent.prototype,shiftTimestampsForward:function(amount){this.start+=amount;},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);}};return{Activity:Activity};});'use strict';tr.exportTo('tr.b',function(){function max(a,b){if(a===undefined)
+return b;if(b===undefined)
+return a;return Math.max(a,b);}
+function IntervalTree(beginPositionCb,endPositionCb){this.beginPositionCb_=beginPositionCb;this.endPositionCb_=endPositionCb;this.root_=undefined;this.size_=0;}
+IntervalTree.prototype={insert:function(datum){var startPosition=this.beginPositionCb_(datum);var endPosition=this.endPositionCb_(datum);var node=new IntervalTreeNode(datum,startPosition,endPosition);this.size_++;this.root_=this.insertNode_(this.root_,node);this.root_.colour=Colour.BLACK;return datum;},insertNode_:function(root,node){if(root===undefined)
+return node;if(root.leftNode&&root.leftNode.isRed&&root.rightNode&&root.rightNode.isRed)
+this.flipNodeColour_(root);if(node.key<root.key)
+root.leftNode=this.insertNode_(root.leftNode,node);else if(node.key===root.key)
+root.merge(node);else
+root.rightNode=this.insertNode_(root.rightNode,node);if(root.rightNode&&root.rightNode.isRed&&(root.leftNode===undefined||!root.leftNode.isRed))
+root=this.rotateLeft_(root);if(root.leftNode&&root.leftNode.isRed&&root.leftNode.leftNode&&root.leftNode.leftNode.isRed)
+root=this.rotateRight_(root);return root;},rotateRight_:function(node){var sibling=node.leftNode;node.leftNode=sibling.rightNode;sibling.rightNode=node;sibling.colour=node.colour;node.colour=Colour.RED;return sibling;},rotateLeft_:function(node){var sibling=node.rightNode;node.rightNode=sibling.leftNode;sibling.leftNode=node;sibling.colour=node.colour;node.colour=Colour.RED;return sibling;},flipNodeColour_:function(node){node.colour=this.flipColour_(node.colour);node.leftNode.colour=this.flipColour_(node.leftNode.colour);node.rightNode.colour=this.flipColour_(node.rightNode.colour);},flipColour_:function(colour){return colour===Colour.RED?Colour.BLACK:Colour.RED;},updateHighValues:function(){this.updateHighValues_(this.root_);},updateHighValues_:function(node){if(node===undefined)
+return undefined;node.maxHighLeft=this.updateHighValues_(node.leftNode);node.maxHighRight=this.updateHighValues_(node.rightNode);return max(max(node.maxHighLeft,node.highValue),node.maxHighRight);},validateFindArguments_:function(queryLow,queryHigh){if(queryLow===undefined||queryHigh===undefined)
+throw new Error('queryLow and queryHigh must be defined');if((typeof queryLow!=='number')||(typeof queryHigh!=='number'))
+throw new Error('queryLow and queryHigh must be numbers');},findIntersection:function(queryLow,queryHigh){this.validateFindArguments_(queryLow,queryHigh);if(this.root_===undefined)
+return[];var ret=[];this.root_.appendIntersectionsInto_(ret,queryLow,queryHigh);return ret;},get size(){return this.size_;},get root(){return this.root_;},dump_:function(){if(this.root_===undefined)
+return[];return this.root_.dump();}};var Colour={RED:'red',BLACK:'black'};function IntervalTreeNode(datum,lowValue,highValue){this.lowValue_=lowValue;this.data_=[{datum:datum,high:highValue,low:lowValue}];this.colour_=Colour.RED;this.parentNode_=undefined;this.leftNode_=undefined;this.rightNode_=undefined;this.maxHighLeft_=undefined;this.maxHighRight_=undefined;}
+IntervalTreeNode.prototype={appendIntersectionsInto_:function(ret,queryLow,queryHigh){if(this.lowValue_>=queryHigh){if(!this.leftNode_)
+return;return this.leftNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}
+if(this.maxHighLeft_>queryLow){this.leftNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}
+if(this.highValue>queryLow){for(var i=(this.data.length-1);i>=0;--i){if(this.data[i].high<queryLow)
+break;ret.push(this.data[i].datum);}}
+if(this.rightNode_){this.rightNode_.appendIntersectionsInto_(ret,queryLow,queryHigh);}},get colour(){return this.colour_;},set colour(colour){this.colour_=colour;},get key(){return this.lowValue_;},get lowValue(){return this.lowValue_;},get highValue(){return this.data_[this.data_.length-1].high;},set leftNode(left){this.leftNode_=left;},get leftNode(){return this.leftNode_;},get hasLeftNode(){return this.leftNode_!==undefined;},set rightNode(right){this.rightNode_=right;},get rightNode(){return this.rightNode_;},get hasRightNode(){return this.rightNode_!==undefined;},set parentNode(parent){this.parentNode_=parent;},get parentNode(){return this.parentNode_;},get isRootNode(){return this.parentNode_===undefined;},set maxHighLeft(high){this.maxHighLeft_=high;},get maxHighLeft(){return this.maxHighLeft_;},set maxHighRight(high){this.maxHighRight_=high;},get maxHighRight(){return this.maxHighRight_;},get data(){return this.data_;},get isRed(){return this.colour_===Colour.RED;},merge:function(node){for(var i=0;i<node.data.length;i++)
+this.data_.push(node.data[i]);this.data_.sort(function(a,b){return a.high-b.high;});},dump:function(){var ret={};if(this.leftNode_)
+ret['left']=this.leftNode_.dump();ret['data']=this.data_.map(function(d){return[d.low,d.high];});if(this.rightNode_)
+ret['right']=this.rightNode_.dump();return ret;}};return{IntervalTree:IntervalTree};});!function(t,n){if("object"==typeof exports&&"object"==typeof module)module.exports=n();else if("function"==typeof define&&define.amd)define(n);else{var r=n();for(var a in r)("object"==typeof exports?exports:t)[a]=r[a]}}(this,function(){return function(t){function n(a){if(r[a])return r[a].exports;var e=r[a]={exports:{},id:a,loaded:!1};return t[a].call(e.exports,e,e.exports,n),e.loaded=!0,e.exports}var r={};return n.m=t,n.c=r,n.p="",n(0)}([function(t,n,r){n.glMatrix=r(1),n.mat2=r(2),n.mat2d=r(3),n.mat3=r(4),n.mat4=r(5),n.quat=r(6),n.vec2=r(9),n.vec3=r(7),n.vec4=r(8)},function(t,n,r){var a={};a.EPSILON=1e-6,a.ARRAY_TYPE="undefined"!=typeof Float32Array?Float32Array:Array,a.RANDOM=Math.random,a.setMatrixArrayType=function(t){GLMAT_ARRAY_TYPE=t};var e=Math.PI/180;a.toRadian=function(t){return t*e},t.exports=a},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t},e.clone=function(t){var n=new a.ARRAY_TYPE(4);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1];t[1]=n[2],t[2]=r}else t[0]=n[0],t[1]=n[2],t[2]=n[1],t[3]=n[3];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*u-e*a;return o?(o=1/o,t[0]=u*o,t[1]=-a*o,t[2]=-e*o,t[3]=r*o,t):null},e.adjoint=function(t,n){var r=n[0];return t[0]=n[3],t[1]=-n[1],t[2]=-n[2],t[3]=r,t},e.determinant=function(t){return t[0]*t[3]-t[2]*t[1]},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1],f=r[2],s=r[3];return t[0]=a*i+u*c,t[1]=e*i+o*c,t[2]=a*f+u*s,t[3]=e*f+o*s,t},e.mul=e.multiply,e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+u*i,t[1]=e*c+o*i,t[2]=a*-i+u*c,t[3]=e*-i+o*c,t},e.scale=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1];return t[0]=a*i,t[1]=e*i,t[2]=u*c,t[3]=o*c,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=-r,t[3]=a,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=n[1],t},e.str=function(t){return"mat2("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2))},e.LDU=function(t,n,r,a){return t[2]=a[2]/a[0],r[0]=a[0],r[1]=a[1],r[3]=a[3]-t[2]*r[1],[t,n,r]},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(6);return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=0,t[5]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(6);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=0,t[5]=0,t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=r*u-a*e;return c?(c=1/c,t[0]=u*c,t[1]=-a*c,t[2]=-e*c,t[3]=r*c,t[4]=(e*i-u*o)*c,t[5]=(a*o-r*i)*c,t):null},e.determinant=function(t){return t[0]*t[3]-t[1]*t[2]},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1],h=r[2],M=r[3],l=r[4],v=r[5];return t[0]=a*f+u*s,t[1]=e*f+o*s,t[2]=a*h+u*M,t[3]=e*h+o*M,t[4]=a*l+u*v+i,t[5]=e*l+o*v+c,t},e.mul=e.multiply,e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=Math.sin(r),s=Math.cos(r);return t[0]=a*s+u*f,t[1]=e*s+o*f,t[2]=a*-f+u*s,t[3]=e*-f+o*s,t[4]=i,t[5]=c,t},e.scale=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1];return t[0]=a*f,t[1]=e*f,t[2]=u*s,t[3]=o*s,t[4]=i,t[5]=c,t},e.translate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1];return t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=a*f+u*s+i,t[5]=e*f+o*s+c,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=-r,t[3]=a,t[4]=0,t[5]=0,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=n[1],t[4]=0,t[5]=0,t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=n[0],t[5]=n[1],t},e.str=function(t){return"mat2d("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+1)},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(9);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromMat4=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[4],t[4]=n[5],t[5]=n[6],t[6]=n[8],t[7]=n[9],t[8]=n[10],t},e.clone=function(t){var n=new a.ARRAY_TYPE(9);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n[6]=t[6],n[7]=t[7],n[8]=t[8],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1],a=n[2],e=n[5];t[1]=n[3],t[2]=n[6],t[3]=r,t[5]=n[7],t[6]=a,t[7]=e}else t[0]=n[0],t[1]=n[3],t[2]=n[6],t[3]=n[1],t[4]=n[4],t[5]=n[7],t[6]=n[2],t[7]=n[5],t[8]=n[8];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=s*o-i*f,M=-s*u+i*c,l=f*u-o*c,v=r*h+a*M+e*l;return v?(v=1/v,t[0]=h*v,t[1]=(-s*a+e*f)*v,t[2]=(i*a-e*o)*v,t[3]=M*v,t[4]=(s*r-e*c)*v,t[5]=(-i*r+e*u)*v,t[6]=l*v,t[7]=(-f*r+a*c)*v,t[8]=(o*r-a*u)*v,t):null},e.adjoint=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8];return t[0]=o*s-i*f,t[1]=e*f-a*s,t[2]=a*i-e*o,t[3]=i*c-u*s,t[4]=r*s-e*c,t[5]=e*u-r*i,t[6]=u*f-o*c,t[7]=a*c-r*f,t[8]=r*o-a*u,t},e.determinant=function(t){var n=t[0],r=t[1],a=t[2],e=t[3],u=t[4],o=t[5],i=t[6],c=t[7],f=t[8];return n*(f*u-o*c)+r*(-f*e+o*i)+a*(c*e-u*i)},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=r[0],l=r[1],v=r[2],m=r[3],p=r[4],d=r[5],A=r[6],R=r[7],w=r[8];return t[0]=M*a+l*o+v*f,t[1]=M*e+l*i+v*s,t[2]=M*u+l*c+v*h,t[3]=m*a+p*o+d*f,t[4]=m*e+p*i+d*s,t[5]=m*u+p*c+d*h,t[6]=A*a+R*o+w*f,t[7]=A*e+R*i+w*s,t[8]=A*u+R*c+w*h,t},e.mul=e.multiply,e.translate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=r[0],l=r[1];return t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=i,t[5]=c,t[6]=M*a+l*o+f,t[7]=M*e+l*i+s,t[8]=M*u+l*c+h,t},e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=Math.sin(r),l=Math.cos(r);return t[0]=l*a+M*o,t[1]=l*e+M*i,t[2]=l*u+M*c,t[3]=l*o-M*a,t[4]=l*i-M*e,t[5]=l*c-M*u,t[6]=f,t[7]=s,t[8]=h,t},e.scale=function(t,n,r){var a=r[0],e=r[1];return t[0]=a*n[0],t[1]=a*n[1],t[2]=a*n[2],t[3]=e*n[3],t[4]=e*n[4],t[5]=e*n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=n[0],t[7]=n[1],t[8]=1,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=0,t[3]=-r,t[4]=a,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=0,t[4]=n[1],t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromMat2d=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=0,t[3]=n[2],t[4]=n[3],t[5]=0,t[6]=n[4],t[7]=n[5],t[8]=1,t},e.fromQuat=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r+r,i=a+a,c=e+e,f=r*o,s=a*o,h=a*i,M=e*o,l=e*i,v=e*c,m=u*o,p=u*i,d=u*c;return t[0]=1-h-v,t[3]=s-d,t[6]=M+p,t[1]=s+d,t[4]=1-f-v,t[7]=l-m,t[2]=M-p,t[5]=l+m,t[8]=1-f-h,t},e.normalFromMat4=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15],A=r*i-a*o,R=r*c-e*o,w=r*f-u*o,q=a*c-e*i,Y=a*f-u*i,g=e*f-u*c,y=s*m-h*v,x=s*p-M*v,P=s*d-l*v,E=h*p-M*m,T=h*d-l*m,b=M*d-l*p,D=A*b-R*T+w*E+q*P-Y*x+g*y;return D?(D=1/D,t[0]=(i*b-c*T+f*E)*D,t[1]=(c*P-o*b-f*x)*D,t[2]=(o*T-i*P+f*y)*D,t[3]=(e*T-a*b-u*E)*D,t[4]=(r*b-e*P+u*x)*D,t[5]=(a*P-r*T-u*y)*D,t[6]=(m*g-p*Y+d*q)*D,t[7]=(p*w-v*g-d*R)*D,t[8]=(v*Y-m*w+d*A)*D,t):null},e.str=function(t){return"mat3("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+", "+t[6]+", "+t[7]+", "+t[8]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+Math.pow(t[6],2)+Math.pow(t[7],2)+Math.pow(t[8],2))},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(16);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.clone=function(t){var n=new a.ARRAY_TYPE(16);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n[6]=t[6],n[7]=t[7],n[8]=t[8],n[9]=t[9],n[10]=t[10],n[11]=t[11],n[12]=t[12],n[13]=t[13],n[14]=t[14],n[15]=t[15],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t[9]=n[9],t[10]=n[10],t[11]=n[11],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1],a=n[2],e=n[3],u=n[6],o=n[7],i=n[11];t[1]=n[4],t[2]=n[8],t[3]=n[12],t[4]=r,t[6]=n[9],t[7]=n[13],t[8]=a,t[9]=u,t[11]=n[14],t[12]=e,t[13]=o,t[14]=i}else t[0]=n[0],t[1]=n[4],t[2]=n[8],t[3]=n[12],t[4]=n[1],t[5]=n[5],t[6]=n[9],t[7]=n[13],t[8]=n[2],t[9]=n[6],t[10]=n[10],t[11]=n[14],t[12]=n[3],t[13]=n[7],t[14]=n[11],t[15]=n[15];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15],A=r*i-a*o,R=r*c-e*o,w=r*f-u*o,q=a*c-e*i,Y=a*f-u*i,g=e*f-u*c,y=s*m-h*v,x=s*p-M*v,P=s*d-l*v,E=h*p-M*m,T=h*d-l*m,b=M*d-l*p,D=A*b-R*T+w*E+q*P-Y*x+g*y;return D?(D=1/D,t[0]=(i*b-c*T+f*E)*D,t[1]=(e*T-a*b-u*E)*D,t[2]=(m*g-p*Y+d*q)*D,t[3]=(M*Y-h*g-l*q)*D,t[4]=(c*P-o*b-f*x)*D,t[5]=(r*b-e*P+u*x)*D,t[6]=(p*w-v*g-d*R)*D,t[7]=(s*g-M*w+l*R)*D,t[8]=(o*T-i*P+f*y)*D,t[9]=(a*P-r*T-u*y)*D,t[10]=(v*Y-m*w+d*A)*D,t[11]=(h*w-s*Y-l*A)*D,t[12]=(i*x-o*E-c*y)*D,t[13]=(r*E-a*x+e*y)*D,t[14]=(m*R-v*q-p*A)*D,t[15]=(s*q-h*R+M*A)*D,t):null},e.adjoint=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15];return t[0]=i*(M*d-l*p)-h*(c*d-f*p)+m*(c*l-f*M),t[1]=-(a*(M*d-l*p)-h*(e*d-u*p)+m*(e*l-u*M)),t[2]=a*(c*d-f*p)-i*(e*d-u*p)+m*(e*f-u*c),t[3]=-(a*(c*l-f*M)-i*(e*l-u*M)+h*(e*f-u*c)),t[4]=-(o*(M*d-l*p)-s*(c*d-f*p)+v*(c*l-f*M)),t[5]=r*(M*d-l*p)-s*(e*d-u*p)+v*(e*l-u*M),t[6]=-(r*(c*d-f*p)-o*(e*d-u*p)+v*(e*f-u*c)),t[7]=r*(c*l-f*M)-o*(e*l-u*M)+s*(e*f-u*c),t[8]=o*(h*d-l*m)-s*(i*d-f*m)+v*(i*l-f*h),t[9]=-(r*(h*d-l*m)-s*(a*d-u*m)+v*(a*l-u*h)),t[10]=r*(i*d-f*m)-o*(a*d-u*m)+v*(a*f-u*i),t[11]=-(r*(i*l-f*h)-o*(a*l-u*h)+s*(a*f-u*i)),t[12]=-(o*(h*p-M*m)-s*(i*p-c*m)+v*(i*M-c*h)),t[13]=r*(h*p-M*m)-s*(a*p-e*m)+v*(a*M-e*h),t[14]=-(r*(i*p-c*m)-o*(a*p-e*m)+v*(a*c-e*i)),t[15]=r*(i*M-c*h)-o*(a*M-e*h)+s*(a*c-e*i),t},e.determinant=function(t){var n=t[0],r=t[1],a=t[2],e=t[3],u=t[4],o=t[5],i=t[6],c=t[7],f=t[8],s=t[9],h=t[10],M=t[11],l=t[12],v=t[13],m=t[14],p=t[15],d=n*o-r*u,A=n*i-a*u,R=n*c-e*u,w=r*i-a*o,q=r*c-e*o,Y=a*c-e*i,g=f*v-s*l,y=f*m-h*l,x=f*p-M*l,P=s*m-h*v,E=s*p-M*v,T=h*p-M*m;return d*T-A*E+R*P+w*x-q*y+Y*g},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=n[9],l=n[10],v=n[11],m=n[12],p=n[13],d=n[14],A=n[15],R=r[0],w=r[1],q=r[2],Y=r[3];return t[0]=R*a+w*i+q*h+Y*m,t[1]=R*e+w*c+q*M+Y*p,t[2]=R*u+w*f+q*l+Y*d,t[3]=R*o+w*s+q*v+Y*A,R=r[4],w=r[5],q=r[6],Y=r[7],t[4]=R*a+w*i+q*h+Y*m,t[5]=R*e+w*c+q*M+Y*p,t[6]=R*u+w*f+q*l+Y*d,t[7]=R*o+w*s+q*v+Y*A,R=r[8],w=r[9],q=r[10],Y=r[11],t[8]=R*a+w*i+q*h+Y*m,t[9]=R*e+w*c+q*M+Y*p,t[10]=R*u+w*f+q*l+Y*d,t[11]=R*o+w*s+q*v+Y*A,R=r[12],w=r[13],q=r[14],Y=r[15],t[12]=R*a+w*i+q*h+Y*m,t[13]=R*e+w*c+q*M+Y*p,t[14]=R*u+w*f+q*l+Y*d,t[15]=R*o+w*s+q*v+Y*A,t},e.mul=e.multiply,e.translate=function(t,n,r){var a,e,u,o,i,c,f,s,h,M,l,v,m=r[0],p=r[1],d=r[2];return n===t?(t[12]=n[0]*m+n[4]*p+n[8]*d+n[12],t[13]=n[1]*m+n[5]*p+n[9]*d+n[13],t[14]=n[2]*m+n[6]*p+n[10]*d+n[14],t[15]=n[3]*m+n[7]*p+n[11]*d+n[15]):(a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=n[9],l=n[10],v=n[11],t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=i,t[5]=c,t[6]=f,t[7]=s,t[8]=h,t[9]=M,t[10]=l,t[11]=v,t[12]=a*m+i*p+h*d+n[12],t[13]=e*m+c*p+M*d+n[13],t[14]=u*m+f*p+l*d+n[14],t[15]=o*m+s*p+v*d+n[15]),t},e.scale=function(t,n,r){var a=r[0],e=r[1],u=r[2];return t[0]=n[0]*a,t[1]=n[1]*a,t[2]=n[2]*a,t[3]=n[3]*a,t[4]=n[4]*e,t[5]=n[5]*e,t[6]=n[6]*e,t[7]=n[7]*e,t[8]=n[8]*u,t[9]=n[9]*u,t[10]=n[10]*u,t[11]=n[11]*u,t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15],t},e.rotate=function(t,n,r,e){var u,o,i,c,f,s,h,M,l,v,m,p,d,A,R,w,q,Y,g,y,x,P,E,T,b=e[0],D=e[1],L=e[2],_=Math.sqrt(b*b+D*D+L*L);return Math.abs(_)<a.EPSILON?null:(_=1/_,b*=_,D*=_,L*=_,u=Math.sin(r),o=Math.cos(r),i=1-o,c=n[0],f=n[1],s=n[2],h=n[3],M=n[4],l=n[5],v=n[6],m=n[7],p=n[8],d=n[9],A=n[10],R=n[11],w=b*b*i+o,q=D*b*i+L*u,Y=L*b*i-D*u,g=b*D*i-L*u,y=D*D*i+o,x=L*D*i+b*u,P=b*L*i+D*u,E=D*L*i-b*u,T=L*L*i+o,t[0]=c*w+M*q+p*Y,t[1]=f*w+l*q+d*Y,t[2]=s*w+v*q+A*Y,t[3]=h*w+m*q+R*Y,t[4]=c*g+M*y+p*x,t[5]=f*g+l*y+d*x,t[6]=s*g+v*y+A*x,t[7]=h*g+m*y+R*x,t[8]=c*P+M*E+p*T,t[9]=f*P+l*E+d*T,t[10]=s*P+v*E+A*T,t[11]=h*P+m*E+R*T,n!==t&&(t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t)},e.rotateX=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[4],o=n[5],i=n[6],c=n[7],f=n[8],s=n[9],h=n[10],M=n[11];return n!==t&&(t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[4]=u*e+f*a,t[5]=o*e+s*a,t[6]=i*e+h*a,t[7]=c*e+M*a,t[8]=f*e-u*a,t[9]=s*e-o*a,t[10]=h*e-i*a,t[11]=M*e-c*a,t},e.rotateY=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[0],o=n[1],i=n[2],c=n[3],f=n[8],s=n[9],h=n[10],M=n[11];return n!==t&&(t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[0]=u*e-f*a,t[1]=o*e-s*a,t[2]=i*e-h*a,t[3]=c*e-M*a,t[8]=u*a+f*e,t[9]=o*a+s*e,t[10]=i*a+h*e,t[11]=c*a+M*e,t},e.rotateZ=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[0],o=n[1],i=n[2],c=n[3],f=n[4],s=n[5],h=n[6],M=n[7];return n!==t&&(t[8]=n[8],t[9]=n[9],t[10]=n[10],t[11]=n[11],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[0]=u*e+f*a,t[1]=o*e+s*a,t[2]=i*e+h*a,t[3]=c*e+M*a,t[4]=f*e-u*a,t[5]=s*e-o*a,t[6]=h*e-i*a,t[7]=M*e-c*a,t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=n[0],t[13]=n[1],t[14]=n[2],t[15]=1,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=n[1],t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=n[2],t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromRotation=function(t,n,r){var e,u,o,i=r[0],c=r[1],f=r[2],s=Math.sqrt(i*i+c*c+f*f);return Math.abs(s)<a.EPSILON?null:(s=1/s,i*=s,c*=s,f*=s,e=Math.sin(n),u=Math.cos(n),o=1-u,t[0]=i*i*o+u,t[1]=c*i*o+f*e,t[2]=f*i*o-c*e,t[3]=0,t[4]=i*c*o-f*e,t[5]=c*c*o+u,t[6]=f*c*o+i*e,t[7]=0,t[8]=i*f*o+c*e,t[9]=c*f*o-i*e,t[10]=f*f*o+u,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t)},e.fromXRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=a,t[6]=r,t[7]=0,t[8]=0,t[9]=-r,t[10]=a,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromYRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=0,t[2]=-r,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=r,t[9]=0,t[10]=a,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromZRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=0,t[3]=0,t[4]=-r,t[5]=a,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromRotationTranslation=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=a+a,c=e+e,f=u+u,s=a*i,h=a*c,M=a*f,l=e*c,v=e*f,m=u*f,p=o*i,d=o*c,A=o*f;return t[0]=1-(l+m),t[1]=h+A,t[2]=M-d,t[3]=0,t[4]=h-A,t[5]=1-(s+m),t[6]=v+p,t[7]=0,t[8]=M+d,t[9]=v-p,t[10]=1-(s+l),t[11]=0,t[12]=r[0],t[13]=r[1],t[14]=r[2],t[15]=1,t},e.fromRotationTranslationScale=function(t,n,r,a){var e=n[0],u=n[1],o=n[2],i=n[3],c=e+e,f=u+u,s=o+o,h=e*c,M=e*f,l=e*s,v=u*f,m=u*s,p=o*s,d=i*c,A=i*f,R=i*s,w=a[0],q=a[1],Y=a[2];return t[0]=(1-(v+p))*w,t[1]=(M+R)*w,t[2]=(l-A)*w,t[3]=0,t[4]=(M-R)*q,t[5]=(1-(h+p))*q,t[6]=(m+d)*q,t[7]=0,t[8]=(l+A)*Y,t[9]=(m-d)*Y,t[10]=(1-(h+v))*Y,t[11]=0,t[12]=r[0],t[13]=r[1],t[14]=r[2],t[15]=1,t},e.fromRotationTranslationScaleOrigin=function(t,n,r,a,e){var u=n[0],o=n[1],i=n[2],c=n[3],f=u+u,s=o+o,h=i+i,M=u*f,l=u*s,v=u*h,m=o*s,p=o*h,d=i*h,A=c*f,R=c*s,w=c*h,q=a[0],Y=a[1],g=a[2],y=e[0],x=e[1],P=e[2];return t[0]=(1-(m+d))*q,t[1]=(l+w)*q,t[2]=(v-R)*q,t[3]=0,t[4]=(l-w)*Y,t[5]=(1-(M+d))*Y,t[6]=(p+A)*Y,t[7]=0,t[8]=(v+R)*g,t[9]=(p-A)*g,t[10]=(1-(M+m))*g,t[11]=0,t[12]=r[0]+y-(t[0]*y+t[4]*x+t[8]*P),t[13]=r[1]+x-(t[1]*y+t[5]*x+t[9]*P),t[14]=r[2]+P-(t[2]*y+t[6]*x+t[10]*P),t[15]=1,t},e.fromQuat=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r+r,i=a+a,c=e+e,f=r*o,s=a*o,h=a*i,M=e*o,l=e*i,v=e*c,m=u*o,p=u*i,d=u*c;return t[0]=1-h-v,t[1]=s+d,t[2]=M-p,t[3]=0,t[4]=s-d,t[5]=1-f-v,t[6]=l+m,t[7]=0,t[8]=M+p,t[9]=l-m,t[10]=1-f-h,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.frustum=function(t,n,r,a,e,u,o){var i=1/(r-n),c=1/(e-a),f=1/(u-o);return t[0]=2*u*i,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=2*u*c,t[6]=0,t[7]=0,t[8]=(r+n)*i,t[9]=(e+a)*c,t[10]=(o+u)*f,t[11]=-1,t[12]=0,t[13]=0,t[14]=o*u*2*f,t[15]=0,t},e.perspective=function(t,n,r,a,e){var u=1/Math.tan(n/2),o=1/(a-e);return t[0]=u/r,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=u,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=(e+a)*o,t[11]=-1,t[12]=0,t[13]=0,t[14]=2*e*a*o,t[15]=0,t},e.perspectiveFromFieldOfView=function(t,n,r,a){var e=Math.tan(n.upDegrees*Math.PI/180),u=Math.tan(n.downDegrees*Math.PI/180),o=Math.tan(n.leftDegrees*Math.PI/180),i=Math.tan(n.rightDegrees*Math.PI/180),c=2/(o+i),f=2/(e+u);return t[0]=c,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=f,t[6]=0,t[7]=0,t[8]=-((o-i)*c*.5),t[9]=(e-u)*f*.5,t[10]=a/(r-a),t[11]=-1,t[12]=0,t[13]=0,t[14]=a*r/(r-a),t[15]=0,t},e.ortho=function(t,n,r,a,e,u,o){var i=1/(n-r),c=1/(a-e),f=1/(u-o);return t[0]=-2*i,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=-2*c,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=2*f,t[11]=0,t[12]=(n+r)*i,t[13]=(e+a)*c,t[14]=(o+u)*f,t[15]=1,t},e.lookAt=function(t,n,r,u){var o,i,c,f,s,h,M,l,v,m,p=n[0],d=n[1],A=n[2],R=u[0],w=u[1],q=u[2],Y=r[0],g=r[1],y=r[2];return Math.abs(p-Y)<a.EPSILON&&Math.abs(d-g)<a.EPSILON&&Math.abs(A-y)<a.EPSILON?e.identity(t):(M=p-Y,l=d-g,v=A-y,m=1/Math.sqrt(M*M+l*l+v*v),M*=m,l*=m,v*=m,o=w*v-q*l,i=q*M-R*v,c=R*l-w*M,m=Math.sqrt(o*o+i*i+c*c),m?(m=1/m,o*=m,i*=m,c*=m):(o=0,i=0,c=0),f=l*c-v*i,s=v*o-M*c,h=M*i-l*o,m=Math.sqrt(f*f+s*s+h*h),m?(m=1/m,f*=m,s*=m,h*=m):(f=0,s=0,h=0),t[0]=o,t[1]=f,t[2]=M,t[3]=0,t[4]=i,t[5]=s,t[6]=l,t[7]=0,t[8]=c,t[9]=h,t[10]=v,t[11]=0,t[12]=-(o*p+i*d+c*A),t[13]=-(f*p+s*d+h*A),t[14]=-(M*p+l*d+v*A),t[15]=1,t)},e.str=function(t){return"mat4("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+", "+t[6]+", "+t[7]+", "+t[8]+", "+t[9]+", "+t[10]+", "+t[11]+", "+t[12]+", "+t[13]+", "+t[14]+", "+t[15]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+Math.pow(t[6],2)+Math.pow(t[7],2)+Math.pow(t[8],2)+Math.pow(t[9],2)+Math.pow(t[10],2)+Math.pow(t[11],2)+Math.pow(t[12],2)+Math.pow(t[13],2)+Math.pow(t[14],2)+Math.pow(t[15],2))},t.exports=e},function(t,n,r){var a=r(1),e=r(4),u=r(7),o=r(8),i={};i.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=1,t},i.rotationTo=function(){var t=u.create(),n=u.fromValues(1,0,0),r=u.fromValues(0,1,0);return function(a,e,o){var c=u.dot(e,o);return-.999999>c?(u.cross(t,n,e),u.length(t)<1e-6&&u.cross(t,r,e),u.normalize(t,t),i.setAxisAngle(a,t,Math.PI),a):c>.999999?(a[0]=0,a[1]=0,a[2]=0,a[3]=1,a):(u.cross(t,e,o),a[0]=t[0],a[1]=t[1],a[2]=t[2],a[3]=1+c,i.normalize(a,a))}}(),i.setAxes=function(){var t=e.create();return function(n,r,a,e){return t[0]=a[0],t[3]=a[1],t[6]=a[2],t[1]=e[0],t[4]=e[1],t[7]=e[2],t[2]=-r[0],t[5]=-r[1],t[8]=-r[2],i.normalize(n,i.fromMat3(n,t))}}(),i.clone=o.clone,i.fromValues=o.fromValues,i.copy=o.copy,i.set=o.set,i.identity=function(t){return t[0]=0,t[1]=0,t[2]=0,t[3]=1,t},i.setAxisAngle=function(t,n,r){r=.5*r;var a=Math.sin(r);return t[0]=a*n[0],t[1]=a*n[1],t[2]=a*n[2],t[3]=Math.cos(r),t},i.add=o.add,i.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1],f=r[2],s=r[3];return t[0]=a*s+o*i+e*f-u*c,t[1]=e*s+o*c+u*i-a*f,t[2]=u*s+o*f+a*c-e*i,t[3]=o*s-a*i-e*c-u*f,t},i.mul=i.multiply,i.scale=o.scale,i.rotateX=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+o*i,t[1]=e*c+u*i,t[2]=u*c-e*i,t[3]=o*c-a*i,t},i.rotateY=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c-u*i,t[1]=e*c+o*i,t[2]=u*c+a*i,t[3]=o*c-e*i,t},i.rotateZ=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+e*i,t[1]=e*c-a*i,t[2]=u*c+o*i,t[3]=o*c-u*i,t},i.calculateW=function(t,n){var r=n[0],a=n[1],e=n[2];return t[0]=r,t[1]=a,t[2]=e,t[3]=Math.sqrt(Math.abs(1-r*r-a*a-e*e)),t},i.dot=o.dot,i.lerp=o.lerp,i.slerp=function(t,n,r,a){var e,u,o,i,c,f=n[0],s=n[1],h=n[2],M=n[3],l=r[0],v=r[1],m=r[2],p=r[3];return u=f*l+s*v+h*m+M*p,0>u&&(u=-u,l=-l,v=-v,m=-m,p=-p),1-u>1e-6?(e=Math.acos(u),o=Math.sin(e),i=Math.sin((1-a)*e)/o,c=Math.sin(a*e)/o):(i=1-a,c=a),t[0]=i*f+c*l,t[1]=i*s+c*v,t[2]=i*h+c*m,t[3]=i*M+c*p,t},i.sqlerp=function(){var t=i.create(),n=i.create();return function(r,a,e,u,o,c){return i.slerp(t,a,o,c),i.slerp(n,e,u,c),i.slerp(r,t,n,2*c*(1-c)),r}}(),i.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*r+a*a+e*e+u*u,i=o?1/o:0;return t[0]=-r*i,t[1]=-a*i,t[2]=-e*i,t[3]=u*i,t},i.conjugate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t[3]=n[3],t},i.length=o.length,i.len=i.length,i.squaredLength=o.squaredLength,i.sqrLen=i.squaredLength,i.normalize=o.normalize,i.fromMat3=function(t,n){var r,a=n[0]+n[4]+n[8];if(a>0)r=Math.sqrt(a+1),t[3]=.5*r,r=.5/r,t[0]=(n[5]-n[7])*r,t[1]=(n[6]-n[2])*r,t[2]=(n[1]-n[3])*r;else{var e=0;n[4]>n[0]&&(e=1),n[8]>n[3*e+e]&&(e=2);var u=(e+1)%3,o=(e+2)%3;r=Math.sqrt(n[3*e+e]-n[3*u+u]-n[3*o+o]+1),t[e]=.5*r,r=.5/r,t[3]=(n[3*u+o]-n[3*o+u])*r,t[u]=(n[3*u+e]+n[3*e+u])*r,t[o]=(n[3*o+e]+n[3*e+o])*r}return t},i.str=function(t){return"quat("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},t.exports=i},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(3);return t[0]=0,t[1]=0,t[2]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(3);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n},e.fromValues=function(t,n,r){var e=new a.ARRAY_TYPE(3);return e[0]=t,e[1]=n,e[2]=r,e},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t},e.set=function(t,n,r,a){return t[0]=n,t[1]=r,t[2]=a,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t[2]=n[2]+r[2],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t[2]=n[2]-r[2],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t[2]=n[2]*r[2],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t[2]=n[2]/r[2],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t[2]=Math.min(n[2],r[2]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t[2]=Math.max(n[2],r[2]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t[2]=n[2]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t[2]=n[2]+r[2]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2];return Math.sqrt(r*r+a*a+e*e)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2];return r*r+a*a+e*e},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1],a=t[2];return Math.sqrt(n*n+r*r+a*a)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1],a=t[2];return n*n+r*r+a*a},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t[2]=1/n[2],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=n[2],u=r*r+a*a+e*e;return u>0&&(u=1/Math.sqrt(u),t[0]=n[0]*u,t[1]=n[1]*u,t[2]=n[2]*u),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]},e.cross=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2];return t[0]=e*c-u*i,t[1]=u*o-a*c,t[2]=a*i-e*o,t},e.lerp=function(t,n,r,a){var e=n[0],u=n[1],o=n[2];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t[2]=o+a*(r[2]-o),t},e.hermite=function(t,n,r,a,e,u){var o=u*u,i=o*(2*u-3)+1,c=o*(u-2)+u,f=o*(u-1),s=o*(3-2*u);return t[0]=n[0]*i+r[0]*c+a[0]*f+e[0]*s,t[1]=n[1]*i+r[1]*c+a[1]*f+e[1]*s,t[2]=n[2]*i+r[2]*c+a[2]*f+e[2]*s,t},e.bezier=function(t,n,r,a,e,u){var o=1-u,i=o*o,c=u*u,f=i*o,s=3*u*i,h=3*c*o,M=c*u;return t[0]=n[0]*f+r[0]*s+a[0]*h+e[0]*M,t[1]=n[1]*f+r[1]*s+a[1]*h+e[1]*M,t[2]=n[2]*f+r[2]*s+a[2]*h+e[2]*M,t},e.random=function(t,n){n=n||1;var r=2*a.RANDOM()*Math.PI,e=2*a.RANDOM()-1,u=Math.sqrt(1-e*e)*n;return t[0]=Math.cos(r)*u,t[1]=Math.sin(r)*u,t[2]=e*n,t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[3]*a+r[7]*e+r[11]*u+r[15];return o=o||1,t[0]=(r[0]*a+r[4]*e+r[8]*u+r[12])/o,t[1]=(r[1]*a+r[5]*e+r[9]*u+r[13])/o,t[2]=(r[2]*a+r[6]*e+r[10]*u+r[14])/o,t},e.transformMat3=function(t,n,r){var a=n[0],e=n[1],u=n[2];return t[0]=a*r[0]+e*r[3]+u*r[6],t[1]=a*r[1]+e*r[4]+u*r[7],t[2]=a*r[2]+e*r[5]+u*r[8],t},e.transformQuat=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2],f=r[3],s=f*a+i*u-c*e,h=f*e+c*a-o*u,M=f*u+o*e-i*a,l=-o*a-i*e-c*u;return t[0]=s*f+l*-o+h*-c-M*-i,t[1]=h*f+l*-i+M*-o-s*-c,t[2]=M*f+l*-c+s*-i-h*-o,t},e.rotateX=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[0],u[1]=e[1]*Math.cos(a)-e[2]*Math.sin(a),u[2]=e[1]*Math.sin(a)+e[2]*Math.cos(a),t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.rotateY=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[2]*Math.sin(a)+e[0]*Math.cos(a),u[1]=e[1],u[2]=e[2]*Math.cos(a)-e[0]*Math.sin(a),t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.rotateZ=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[0]*Math.cos(a)-e[1]*Math.sin(a),u[1]=e[0]*Math.sin(a)+e[1]*Math.cos(a),u[2]=e[2],t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=3),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],t[2]=n[i+2],u(t,t,o),n[i]=t[0],n[i+1]=t[1],n[i+2]=t[2];return n}}(),e.angle=function(t,n){var r=e.fromValues(t[0],t[1],t[2]),a=e.fromValues(n[0],n[1],n[2]);e.normalize(r,r),e.normalize(a,a);var u=e.dot(r,a);return u>1?0:Math.acos(u)},e.str=function(t){return"vec3("+t[0]+", "+t[1]+", "+t[2]+")"},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(4);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n},e.fromValues=function(t,n,r,e){var u=new a.ARRAY_TYPE(4);return u[0]=t,u[1]=n,u[2]=r,u[3]=e,u},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t},e.set=function(t,n,r,a,e){return t[0]=n,t[1]=r,t[2]=a,t[3]=e,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t[2]=n[2]+r[2],t[3]=n[3]+r[3],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t[2]=n[2]-r[2],t[3]=n[3]-r[3],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t[2]=n[2]*r[2],t[3]=n[3]*r[3],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t[2]=n[2]/r[2],t[3]=n[3]/r[3],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t[2]=Math.min(n[2],r[2]),t[3]=Math.min(n[3],r[3]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t[2]=Math.max(n[2],r[2]),t[3]=Math.max(n[3],r[3]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t[2]=n[2]*r,t[3]=n[3]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t[2]=n[2]+r[2]*a,t[3]=n[3]+r[3]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2],u=n[3]-t[3];return Math.sqrt(r*r+a*a+e*e+u*u)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2],u=n[3]-t[3];return r*r+a*a+e*e+u*u},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1],a=t[2],e=t[3];return Math.sqrt(n*n+r*r+a*a+e*e)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1],a=t[2],e=t[3];return n*n+r*r+a*a+e*e},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t[3]=-n[3],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t[2]=1/n[2],t[3]=1/n[3],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*r+a*a+e*e+u*u;return o>0&&(o=1/Math.sqrt(o),t[0]=r*o,t[1]=a*o,t[2]=e*o,t[3]=u*o),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]+t[3]*n[3]},e.lerp=function(t,n,r,a){var e=n[0],u=n[1],o=n[2],i=n[3];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t[2]=o+a*(r[2]-o),t[3]=i+a*(r[3]-i),t},e.random=function(t,n){return n=n||1,t[0]=a.RANDOM(),t[1]=a.RANDOM(),t[2]=a.RANDOM(),t[3]=a.RANDOM(),e.normalize(t,t),e.scale(t,t,n),t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3];return t[0]=r[0]*a+r[4]*e+r[8]*u+r[12]*o,t[1]=r[1]*a+r[5]*e+r[9]*u+r[13]*o,t[2]=r[2]*a+r[6]*e+r[10]*u+r[14]*o,t[3]=r[3]*a+r[7]*e+r[11]*u+r[15]*o,t},e.transformQuat=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2],f=r[3],s=f*a+i*u-c*e,h=f*e+c*a-o*u,M=f*u+o*e-i*a,l=-o*a-i*e-c*u;return t[0]=s*f+l*-o+h*-c-M*-i,t[1]=h*f+l*-i+M*-o-s*-c,t[2]=M*f+l*-c+s*-i-h*-o,t[3]=n[3],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=4),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],t[2]=n[i+2],t[3]=n[i+3],u(t,t,o),n[i]=t[0],n[i+1]=t[1],n[i+2]=t[2],n[i+3]=t[3];return n}}(),e.str=function(t){return"vec4("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(2);return t[0]=0,t[1]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(2);return n[0]=t[0],n[1]=t[1],n},e.fromValues=function(t,n){var r=new a.ARRAY_TYPE(2);return r[0]=t,r[1]=n,r},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t},e.set=function(t,n,r){return t[0]=n,t[1]=r,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1];return Math.sqrt(r*r+a*a)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1];return r*r+a*a},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1];return Math.sqrt(n*n+r*r)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1];return n*n+r*r},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=r*r+a*a;return e>0&&(e=1/Math.sqrt(e),t[0]=n[0]*e,t[1]=n[1]*e),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]},e.cross=function(t,n,r){var a=n[0]*r[1]-n[1]*r[0];return t[0]=t[1]=0,t[2]=a,t},e.lerp=function(t,n,r,a){var e=n[0],u=n[1];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t},e.random=function(t,n){n=n||1;var r=2*a.RANDOM()*Math.PI;return t[0]=Math.cos(r)*n,t[1]=Math.sin(r)*n,t},e.transformMat2=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[2]*e,t[1]=r[1]*a+r[3]*e,t},e.transformMat2d=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[2]*e+r[4],t[1]=r[1]*a+r[3]*e+r[5],t},e.transformMat3=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[3]*e+r[6],t[1]=r[1]*a+r[4]*e+r[7],t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[4]*e+r[12],t[1]=r[1]*a+r[5]*e+r[13],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=2),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],u(t,t,o),n[i]=t[0],n[i+1]=t[1];return n}}(),e.str=function(t){return"vec2("+t[0]+", "+t[1]+")"},t.exports=e}])});'use strict';(function(global){if(tr.isNode){var glMatrixAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/gl-matrix-min.js');var glMatrixModule=require(glMatrixAbsPath);for(var exportName in glMatrixModule){global[exportName]=glMatrixModule[exportName];}}})(this);'use strict';tr.exportTo('tr.b',function(){function clamp(x,lo,hi){return Math.min(Math.max(x,lo),hi);}
+function lerp(percentage,lo,hi){var range=hi-lo;return lo+percentage*range;}
+function normalize(value,lo,hi){return(value-lo)/(hi-lo);}
+function deg2rad(deg){return(Math.PI*deg)/180.0;}
+var tmp_vec2=vec2.create();var tmp_vec2b=vec2.create();var tmp_vec4=vec4.create();var tmp_mat2d=mat2d.create();vec2.createFromArray=function(arr){if(arr.length!=2)
+throw new Error('Should be length 2');var v=vec2.create();vec2.set(v,arr[0],arr[1]);return v;};vec2.createXY=function(x,y){var v=vec2.create();vec2.set(v,x,y);return v;};vec2.toString=function(a){return'['+a[0]+', '+a[1]+']';};vec2.addTwoScaledUnitVectors=function(out,u1,scale1,u2,scale2){vec2.scale(tmp_vec2,u1,scale1);vec2.scale(tmp_vec2b,u2,scale2);vec2.add(out,tmp_vec2,tmp_vec2b);};vec2.interpolatePiecewiseFunction=function(points,x){if(x<points[0][0])
+return points[0][1];for(var i=1;i<points.length;++i){if(x<points[i][0]){var percent=normalize(x,points[i-1][0],points[i][0]);return lerp(percent,points[i-1][1],points[i][1]);}}
+return points[points.length-1][1];};vec3.createXYZ=function(x,y,z){var v=vec3.create();vec3.set(v,x,y,z);return v;};vec3.toString=function(a){return'vec3('+a[0]+', '+a[1]+', '+a[2]+')';};mat2d.translateXY=function(out,x,y){vec2.set(tmp_vec2,x,y);mat2d.translate(out,out,tmp_vec2);};mat2d.scaleXY=function(out,x,y){vec2.set(tmp_vec2,x,y);mat2d.scale(out,out,tmp_vec2);};vec4.unitize=function(out,a){out[0]=a[0]/a[3];out[1]=a[1]/a[3];out[2]=a[2]/a[3];out[3]=1;return out;};vec2.copyFromVec4=function(out,a){vec4.unitize(tmp_vec4,a);vec2.copy(out,tmp_vec4);};return{clamp:clamp,lerp:lerp,normalize:normalize,deg2rad:deg2rad};});'use strict';tr.exportTo('tr.b',function(){var tmpVec2s=[];for(var i=0;i<8;i++)
+tmpVec2s[i]=vec2.create();var tmpVec2a=vec4.create();var tmpVec4a=vec4.create();var tmpVec4b=vec4.create();var tmpMat4=mat4.create();var tmpMat4b=mat4.create();var p00=vec2.createXY(0,0);var p10=vec2.createXY(1,0);var p01=vec2.createXY(0,1);var p11=vec2.createXY(1,1);var lerpingVecA=vec2.create();var lerpingVecB=vec2.create();function lerpVec2(out,a,b,amt){vec2.scale(lerpingVecA,a,amt);vec2.scale(lerpingVecB,b,1-amt);vec2.add(out,lerpingVecA,lerpingVecB);vec2.normalize(out,out);return out;}
+function Quad(){this.p1=vec2.create();this.p2=vec2.create();this.p3=vec2.create();this.p4=vec2.create();}
+Quad.fromXYWH=function(x,y,w,h){var q=new Quad();vec2.set(q.p1,x,y);vec2.set(q.p2,x+w,y);vec2.set(q.p3,x+w,y+h);vec2.set(q.p4,x,y+h);return q;}
+Quad.fromRect=function(r){return new Quad.fromXYWH(r.x,r.y,r.width,r.height);}
+Quad.from4Vecs=function(p1,p2,p3,p4){var q=new Quad();vec2.set(q.p1,p1[0],p1[1]);vec2.set(q.p2,p2[0],p2[1]);vec2.set(q.p3,p3[0],p3[1]);vec2.set(q.p4,p4[0],p4[1]);return q;}
+Quad.from8Array=function(arr){if(arr.length!=8)
+throw new Error('Array must be 8 long');var q=new Quad();q.p1[0]=arr[0];q.p1[1]=arr[1];q.p2[0]=arr[2];q.p2[1]=arr[3];q.p3[0]=arr[4];q.p3[1]=arr[5];q.p4[0]=arr[6];q.p4[1]=arr[7];return q;};Quad.prototype={pointInside:function(point){return pointInImplicitQuad(point,this.p1,this.p2,this.p3,this.p4);},boundingRect:function(){var x0=Math.min(this.p1[0],this.p2[0],this.p3[0],this.p4[0]);var y0=Math.min(this.p1[1],this.p2[1],this.p3[1],this.p4[1]);var x1=Math.max(this.p1[0],this.p2[0],this.p3[0],this.p4[0]);var y1=Math.max(this.p1[1],this.p2[1],this.p3[1],this.p4[1]);return new tr.b.Rect.fromXYWH(x0,y0,x1-x0,y1-y0);},clone:function(){var q=new Quad();vec2.copy(q.p1,this.p1);vec2.copy(q.p2,this.p2);vec2.copy(q.p3,this.p3);vec2.copy(q.p4,this.p4);return q;},scale:function(s){var q=new Quad();this.scaleFast(q,s);return q;},scaleFast:function(dstQuad,s){vec2.copy(dstQuad.p1,this.p1,s);vec2.copy(dstQuad.p2,this.p2,s);vec2.copy(dstQuad.p3,this.p3,s);vec2.copy(dstQuad.p3,this.p3,s);},isRectangle:function(){var bounds=this.boundingRect();return(bounds.x==this.p1[0]&&bounds.y==this.p1[1]&&bounds.width==this.p2[0]-this.p1[0]&&bounds.y==this.p2[1]&&bounds.width==this.p3[0]-this.p1[0]&&bounds.height==this.p3[1]-this.p2[1]&&bounds.x==this.p4[0]&&bounds.height==this.p4[1]-this.p2[1]);},projectUnitRect:function(rect){var q=new Quad();this.projectUnitRectFast(q,rect);return q;},projectUnitRectFast:function(dstQuad,rect){var v12=tmpVec2s[0];var v14=tmpVec2s[1];var v23=tmpVec2s[2];var v43=tmpVec2s[3];var l12,l14,l23,l43;vec2.sub(v12,this.p2,this.p1);l12=vec2.length(v12);vec2.scale(v12,v12,1/l12);vec2.sub(v14,this.p4,this.p1);l14=vec2.length(v14);vec2.scale(v14,v14,1/l14);vec2.sub(v23,this.p3,this.p2);l23=vec2.length(v23);vec2.scale(v23,v23,1/l23);vec2.sub(v43,this.p3,this.p4);l43=vec2.length(v43);vec2.scale(v43,v43,1/l43);var b12=tmpVec2s[0];var b14=tmpVec2s[1];var b23=tmpVec2s[2];var b43=tmpVec2s[3];lerpVec2(b12,v12,v43,rect.y);lerpVec2(b43,v12,v43,1-rect.bottom);lerpVec2(b14,v14,v23,rect.x);lerpVec2(b23,v14,v23,1-rect.right);vec2.addTwoScaledUnitVectors(tmpVec2a,b12,l12*rect.x,b14,l14*rect.y);vec2.add(dstQuad.p1,this.p1,tmpVec2a);vec2.addTwoScaledUnitVectors(tmpVec2a,b12,l12*-(1.0-rect.right),b23,l23*rect.y);vec2.add(dstQuad.p2,this.p2,tmpVec2a);vec2.addTwoScaledUnitVectors(tmpVec2a,b43,l43*-(1.0-rect.right),b23,l23*-(1.0-rect.bottom));vec2.add(dstQuad.p3,this.p3,tmpVec2a);vec2.addTwoScaledUnitVectors(tmpVec2a,b43,l43*rect.left,b14,l14*-(1.0-rect.bottom));vec2.add(dstQuad.p4,this.p4,tmpVec2a);},toString:function(){return'Quad('+
+vec2.toString(this.p1)+', '+
+vec2.toString(this.p2)+', '+
+vec2.toString(this.p3)+', '+
+vec2.toString(this.p4)+')';}};function sign(p1,p2,p3){return(p1[0]-p3[0])*(p2[1]-p3[1])-
+(p2[0]-p3[0])*(p1[1]-p3[1]);}
+function pointInTriangle2(pt,p1,p2,p3){var b1=sign(pt,p1,p2)<0.0;var b2=sign(pt,p2,p3)<0.0;var b3=sign(pt,p3,p1)<0.0;return((b1==b2)&&(b2==b3));}
+function pointInImplicitQuad(point,p1,p2,p3,p4){return pointInTriangle2(point,p1,p2,p3)||pointInTriangle2(point,p1,p3,p4);}
+return{pointInTriangle2:pointInTriangle2,pointInImplicitQuad:pointInImplicitQuad,Quad:Quad};});'use strict';tr.exportTo('tr.b',function(){function addSingletonGetter(ctor){ctor.getInstance=function(){return ctor.instance_||(ctor.instance_=new ctor());};}
+function deepCopy(value){if(!(value instanceof Object)){if(value===undefined||value===null)
+return value;if(typeof value=='string')
+return value.substring();if(typeof value=='boolean')
+return value;if(typeof value=='number')
+return value;throw new Error('Unrecognized: '+typeof value);}
+var object=value;if(object instanceof Array){var res=new Array(object.length);for(var i=0;i<object.length;i++)
+res[i]=deepCopy(object[i]);return res;}
+if(object.__proto__!=Object.prototype)
+throw new Error('Can only clone simple types');var res={};for(var key in object){res[key]=deepCopy(object[key]);}
+return res;}
+function normalizeException(e){if(e===undefined||e===null){return{typeName:'UndefinedError',message:'Unknown: null or undefined exception',stack:'Unknown'};}
+if(typeof(e)=='string'){return{typeName:'StringError',message:e,stack:[e]};}
+var typeName;if(e.name){typeName=e.name;}else if(e.constructor){if(e.constructor.name){typeName=e.constructor.name;}else{typeName='AnonymousError';}}else{typeName='ErrorWithNoConstructor';}
+var msg=e.message?e.message:'Unknown';return{typeName:typeName,message:msg,stack:e.stack?e.stack:[msg]};}
+function stackTraceAsString(){return new Error().stack+'';}
+function stackTrace(){var stack=stackTraceAsString();stack=stack.split('\n');return stack.slice(2);}
+function getUsingPath(path,from_dict){var parts=path.split('.');var cur=from_dict;for(var part;parts.length&&(part=parts.shift());){if(!parts.length){return cur[part];}else if(part in cur){cur=cur[part];}else{return undefined;}}
+return undefined;}
+return{addSingletonGetter:addSingletonGetter,deepCopy:deepCopy,normalizeException:normalizeException,stackTrace:stackTrace,stackTraceAsString:stackTraceAsString,getUsingPath:getUsingPath};});'use strict';tr.exportTo('tr.b',function(){var ESTIMATED_IDLE_PERIOD_LENGTH_MILLISECONDS=10;var REQUEST_IDLE_CALLBACK_TIMEOUT_MILLISECONDS=100;var recordRAFStacks=false;var pendingPreAFs=[];var pendingRAFs=[];var pendingIdleCallbacks=[];var currentRAFDispatchList=undefined;var rafScheduled=false;var idleWorkScheduled=false;function scheduleRAF(){if(rafScheduled)
+return;rafScheduled=true;if(tr.isHeadless){Promise.resolve().then(function(){processRequests(false,0);},function(e){console.log(e.stack);throw e;});}else{if(window.requestAnimationFrame){window.requestAnimationFrame(processRequests.bind(this,false));}else{var delta=Date.now()-window.performance.now();window.webkitRequestAnimationFrame(function(domTimeStamp){processRequests(false,domTimeStamp-delta);});}}}
+function nativeRequestIdleCallbackSupported(){return!tr.isHeadless&&window.requestIdleCallback;}
+function scheduleIdleWork(){if(idleWorkScheduled)
+return;if(!nativeRequestIdleCallbackSupported()){scheduleRAF();return;}
+idleWorkScheduled=true;window.requestIdleCallback(function(deadline,didTimeout){processIdleWork(false,deadline);},{timeout:REQUEST_IDLE_CALLBACK_TIMEOUT_MILLISECONDS});}
+function onAnimationFrameError(e,opt_stack){console.log(e.stack);if(tr.isHeadless)
+throw e;if(opt_stack)
+console.log(opt_stack);if(e.message)
+console.error(e.message,e.stack);else
+console.error(e);}
+function runTask(task,frameBeginTime){try{task.callback.call(task.context,frameBeginTime);}catch(e){tr.b.onAnimationFrameError(e,task.stack);}}
+function processRequests(forceAllTasksToRun,frameBeginTime){rafScheduled=false;var currentPreAFs=pendingPreAFs;currentRAFDispatchList=pendingRAFs;pendingPreAFs=[];pendingRAFs=[];var hasRAFTasks=currentPreAFs.length||currentRAFDispatchList.length;for(var i=0;i<currentPreAFs.length;i++)
+runTask(currentPreAFs[i],frameBeginTime);while(currentRAFDispatchList.length>0)
+runTask(currentRAFDispatchList.shift(),frameBeginTime);currentRAFDispatchList=undefined;if((!hasRAFTasks&&!nativeRequestIdleCallbackSupported())||forceAllTasksToRun){var rafCompletionDeadline=frameBeginTime+ESTIMATED_IDLE_PERIOD_LENGTH_MILLISECONDS;processIdleWork(forceAllTasksToRun,{timeRemaining:function(){return rafCompletionDeadline-window.performance.now();}});}
+if(pendingIdleCallbacks.length>0)
+scheduleIdleWork();}
+function processIdleWork(forceAllTasksToRun,deadline){idleWorkScheduled=false;while(pendingIdleCallbacks.length>0){runTask(pendingIdleCallbacks.shift());if(!forceAllTasksToRun&&(tr.isHeadless||deadline.timeRemaining()<=0)){break;}}
+if(pendingIdleCallbacks.length>0)
+scheduleIdleWork();}
+function getStack_(){if(!recordRAFStacks)
+return'';var stackLines=tr.b.stackTrace();stackLines.shift();return stackLines.join('\n');}
+function requestPreAnimationFrame(callback,opt_this){pendingPreAFs.push({callback:callback,context:opt_this||global,stack:getStack_()});scheduleRAF();}
+function requestAnimationFrameInThisFrameIfPossible(callback,opt_this){if(!currentRAFDispatchList){requestAnimationFrame(callback,opt_this);return;}
+currentRAFDispatchList.push({callback:callback,context:opt_this||global,stack:getStack_()});return;}
+function requestAnimationFrame(callback,opt_this){pendingRAFs.push({callback:callback,context:opt_this||global,stack:getStack_()});scheduleRAF();}
+function requestIdleCallback(callback,opt_this){pendingIdleCallbacks.push({callback:callback,context:opt_this||global,stack:getStack_()});scheduleIdleWork();}
+function forcePendingRAFTasksToRun(frameBeginTime){if(!rafScheduled)
+return;processRequests(false,frameBeginTime);}
+function forceAllPendingTasksToRunForTest(){if(!rafScheduled&&!idleWorkScheduled)
+return;processRequests(true,0);}
+return{onAnimationFrameError:onAnimationFrameError,requestPreAnimationFrame:requestPreAnimationFrame,requestAnimationFrame:requestAnimationFrame,requestAnimationFrameInThisFrameIfPossible:requestAnimationFrameInThisFrameIfPossible,requestIdleCallback:requestIdleCallback,forcePendingRAFTasksToRun:forcePendingRAFTasksToRun,forceAllPendingTasksToRunForTest:forceAllPendingTasksToRunForTest};});'use strict';tr.exportTo('tr.b',function(){function Base64(){}
+function b64ToUint6(nChr){if(nChr>64&&nChr<91)
+return nChr-65;if(nChr>96&&nChr<123)
+return nChr-71;if(nChr>47&&nChr<58)
+return nChr+4;if(nChr===43)
+return 62;if(nChr===47)
+return 63;return 0;}
+Base64.getDecodedBufferLength=function(input){return input.length*3+1>>2;};Base64.EncodeArrayBufferToString=function(input){var binary='';var bytes=new Uint8Array(input);var len=bytes.byteLength;for(var i=0;i<len;i++)
+binary+=String.fromCharCode(bytes[i]);return btoa(binary);};Base64.DecodeToTypedArray=function(input,output){var nInLen=input.length;var nOutLen=nInLen*3+1>>2;var nMod3=0;var nMod4=0;var nUint24=0;var nOutIdx=0;if(nOutLen>output.byteLength)
+throw new Error('Output buffer too small to decode.');for(var nInIdx=0;nInIdx<nInLen;nInIdx++){nMod4=nInIdx&3;nUint24|=b64ToUint6(input.charCodeAt(nInIdx))<<18-6*nMod4;if(nMod4===3||nInLen-nInIdx===1){for(nMod3=0;nMod3<3&&nOutIdx<nOutLen;nMod3++,nOutIdx++){output.setUint8(nOutIdx,nUint24>>>(16>>>nMod3&24)&255);}
+nUint24=0;}}
+return nOutIdx-1;};Base64.btoa=function(input){return btoa(input);};Base64.atob=function(input){return atob(input);};return{Base64:Base64};});'use strict';tr.exportTo('tr.b',function(){var Base64=tr.b.Base64;function computeUserTimingMarkName(groupName,functionName,opt_args){if(groupName===undefined)
+throw new Error('getMeasureString should have group name');if(functionName===undefined)
+throw new Error('getMeasureString should have function name');var userTimingMarkName=groupName+':'+functionName;if(opt_args!==undefined){userTimingMarkName+='/';userTimingMarkName+=Base64.btoa(JSON.stringify(opt_args));}
+return userTimingMarkName;}
+function Timing(){}
+Timing.nextMarkNumber=0;Timing.mark=function(groupName,functionName,opt_args){if(tr.isHeadless){return{end:function(){}};}
+var userTimingMarkName=computeUserTimingMarkName(groupName,functionName,opt_args);var markBeginName='tvcm.mark'+Timing.nextMarkNumber++;var markEndName='tvcm.mark'+Timing.nextMarkNumber++;window.performance.mark(markBeginName);return{end:function(){window.performance.mark(markEndName);window.performance.measure(userTimingMarkName,markBeginName,markEndName);}};};Timing.wrap=function(groupName,callback,opt_args){if(groupName===undefined)
+throw new Error('Timing.wrap should have group name');if(callback.name==='')
+throw new Error('Anonymous function is not allowed');return Timing.wrapNamedFunction(groupName,callback.name,callback,opt_args);};Timing.wrapNamedFunction=function(groupName,functionName,callback,opt_args){function timedNamedFunction(){var markedTime=Timing.mark(groupName,functionName,opt_args);try{callback.apply(this,arguments);}finally{markedTime.end();}}
+return timedNamedFunction;};function TimedNamedPromise(groupName,name,executor,opt_args){var markedTime=Timing.mark(groupName,name,opt_args);var promise=new Promise(executor);promise.then(function(result){markedTime.end();return result;},function(e){markedTime.end();throw e;});return promise;}
+return{_computeUserTimingMarkName:computeUserTimingMarkName,TimedNamedPromise:TimedNamedPromise,Timing:Timing};});'use strict';tr.exportTo('tr.b',function(){var Timing=tr.b.Timing;function Task(runCb,thisArg){if(runCb!==undefined&&thisArg===undefined)
+throw new Error('Almost certainly, you meant to pass a thisArg.');this.runCb_=runCb;this.thisArg_=thisArg;this.afterTask_=undefined;this.subTasks_=[];}
+Task.prototype={get name(){return this.runCb_.name;},subTask:function(cb,thisArg){if(cb instanceof Task)
+this.subTasks_.push(cb);else
+this.subTasks_.push(new Task(cb,thisArg));return this.subTasks_[this.subTasks_.length-1];},run:function(){if(this.runCb_!==undefined)
+this.runCb_.call(this.thisArg_,this);var subTasks=this.subTasks_;this.subTasks_=undefined;if(!subTasks.length)
+return this.afterTask_;for(var i=1;i<subTasks.length;i++)
+subTasks[i-1].afterTask_=subTasks[i];subTasks[subTasks.length-1].afterTask_=this.afterTask_;return subTasks[0];},after:function(cb,thisArg){if(this.afterTask_)
+throw new Error('Has an after task already');if(cb instanceof Task)
+this.afterTask_=cb;else
+this.afterTask_=new Task(cb,thisArg);return this.afterTask_;},timedAfter:function(groupName,cb,thisArg,opt_args){if(cb.name==='')
+throw new Error('Anonymous Task is not allowed');return this.namedTimedAfter(groupName,cb.name,cb,thisArg,opt_args);},namedTimedAfter:function(groupName,name,cb,thisArg,opt_args){if(this.afterTask_)
+throw new Error('Has an after task already');var realTask;if(cb instanceof Task)
+realTask=cb;else
+realTask=new Task(cb,thisArg);this.afterTask_=new Task(function(task){var markedTask=Timing.mark(groupName,name,opt_args);task.subTask(realTask,thisArg);task.subTask(function(){markedTask.end();},thisArg);},thisArg);return this.afterTask_;},enqueue:function(cb,thisArg){var lastTask=this;while(lastTask.afterTask_)
+lastTask=lastTask.afterTask_;return lastTask.after(cb,thisArg);}};Task.RunSynchronously=function(task){var curTask=task;while(curTask)
+curTask=curTask.run();}
+Task.RunWhenIdle=function(task){return new Promise(function(resolve,reject){var curTask=task;function runAnother(){try{curTask=curTask.run();}catch(e){reject(e);console.error(e.stack);return;}
+if(curTask){tr.b.requestIdleCallback(runAnother);return;}
+resolve();}
+tr.b.requestIdleCallback(runAnother);});}
+return{Task:Task};});'use strict';tr.exportTo('tr.c',function(){function makeCaseInsensitiveRegex(pattern){pattern=pattern.replace(/[.*+?^${}()|[\]\\]/g,'\\$&');return new RegExp(pattern,'i');}
+function Filter(){}
+Filter.prototype={__proto__:Object.prototype,matchCounter:function(counter){return true;},matchCpu:function(cpu){return true;},matchProcess:function(process){return true;},matchSlice:function(slice){return true;},matchThread:function(thread){return true;}};function TitleOrCategoryFilter(text){Filter.call(this);this.regex_=makeCaseInsensitiveRegex(text);if(!text.length)
+throw new Error('Filter text is empty.');}
+TitleOrCategoryFilter.prototype={__proto__:Filter.prototype,matchSlice:function(slice){if(slice.title===undefined&&slice.category===undefined)
+return false;return this.regex_.test(slice.title)||(!!slice.category&&this.regex_.test(slice.category));}};function ExactTitleFilter(text){Filter.call(this);this.text_=text;if(!text.length)
+throw new Error('Filter text is empty.');}
+ExactTitleFilter.prototype={__proto__:Filter.prototype,matchSlice:function(slice){return slice.title===this.text_;}};function FullTextFilter(text){Filter.call(this);this.regex_=makeCaseInsensitiveRegex(text);this.titleOrCategoryFilter_=new TitleOrCategoryFilter(text);}
+FullTextFilter.prototype={__proto__:Filter.prototype,matchObject_:function(obj){for(var key in obj){if(!obj.hasOwnProperty(key))
+continue;if(this.regex_.test(key))
+return true;if(this.regex_.test(obj[key]))
+return true;}
+return false;},matchSlice:function(slice){if(this.titleOrCategoryFilter_.matchSlice(slice))
+return true;return this.matchObject_(slice.args);}};return{Filter:Filter,TitleOrCategoryFilter:TitleOrCategoryFilter,ExactTitleFilter:ExactTitleFilter,FullTextFilter:FullTextFilter};});'use strict';tr.exportTo('tr.model',function(){var ClockDomainId={BATTOR:'battor',CHROME:'chrome'};function ClockSyncManager(){this.connectorBySyncId_={};this.modelDomainId_=undefined;this.modelTimeTransformerByDomainId_=undefined;}
+ClockSyncManager.prototype={addClockSyncMarker:function(domainId,syncId,startTs,opt_endTs){if(tr.b.dictionaryValues(ClockDomainId).indexOf(domainId)<0){throw new Error('"'+domainId+'" is not in the list of known '+'clock domain IDs.');}
+if(this.modelDomainId_!==undefined){throw new Error('Cannot add new clock sync markers after getting '+'a model time transformer.');}
+var marker=new ClockSyncMarker(domainId,startTs,opt_endTs);var connector=this.connectorBySyncId_[syncId];if(connector===undefined){this.connectorBySyncId_[syncId]=new ClockSyncConnector(marker);return;}
+if(connector.marker2!==undefined){throw new Error('Clock sync with ID "'+syncId+'" is already '+'complete - cannot add a third clock sync marker to it.');}
+if(connector.marker1.domainId===domainId)
+throw new Error('A clock domain cannot sync with itself.');if(this.getConnectorBetween_(connector.marker1.domainId,domainId)!==undefined){throw new Error('Cannot add multiple connectors between the same '+'clock domains.');}
+connector.marker2=marker;},getModelTimeTransformer:function(domainId){if(this.modelTimeTransformerByDomainId_===undefined)
+this.buildModelTimeTransformerMap_();var transformer=this.modelTimeTransformerByDomainId_[domainId];if(transformer===undefined){throw new Error('No clock sync markers exist pairing clock domain "'+
+domainId+'" '+'with model clock domain "'+
+this.modelDomainId_+'".');}
+return transformer;},buildModelTimeTransformerMap_(){var completeConnectorsByDomainId=this.getCompleteConnectorsByDomainId_();var uniqueClockDomainIds=tr.b.dictionaryKeys(completeConnectorsByDomainId);uniqueClockDomainIds.sort();var isFullyConnected=function(domainId){return completeConnectorsByDomainId[domainId].length===uniqueClockDomainIds.length-1;};this.modelDomainId_=tr.b.findFirstInArray(uniqueClockDomainIds,isFullyConnected);if(this.modelDomainId_===undefined){throw new Error('Unable to select a master clock domain because no '+'clock domain is directly connected to all others.');}
+this.modelTimeTransformerByDomainId_={};this.modelTimeTransformerByDomainId_[this.modelDomainId_]=tr.b.identity;var modelConnectors=completeConnectorsByDomainId[this.modelDomainId_];for(var i=0;i<modelConnectors.length;i++){var conn=modelConnectors[i];if(conn.marker1.domainId===this.modelDomainId_){this.modelTimeTransformerByDomainId_[conn.marker2.domainId]=conn.getTransformer(conn.marker2.domainId,conn.marker1.domainId);}else{this.modelTimeTransformerByDomainId_[conn.marker1.domainId]=conn.getTransformer(conn.marker1.domainId,conn.marker2.domainId);}}},getCompleteConnectorsByDomainId_:function(){var completeConnectorsByDomainId={};for(var syncId in this.connectorBySyncId_){var conn=this.connectorBySyncId_[syncId];var domain1=conn.marker1.domainId;if(completeConnectorsByDomainId[domain1]===undefined)
+completeConnectorsByDomainId[domain1]=[];if(conn.marker2===undefined)
+continue;var domain2=conn.marker2.domainId;if(completeConnectorsByDomainId[domain2]===undefined)
+completeConnectorsByDomainId[domain2]=[];completeConnectorsByDomainId[domain1].push(conn);completeConnectorsByDomainId[domain2].push(conn);}
+return completeConnectorsByDomainId;},getConnectorBetween_(domain1Id,domain2Id){for(var syncId in this.connectorBySyncId_){var connector=this.connectorBySyncId_[syncId];if(connector.isBetween(domain1Id,domain2Id))
+return connector;}
+return undefined;}};function ClockSyncMarker(domainId,startTs,opt_endTs){this.domainId=domainId;this.startTs=startTs;this.endTs=opt_endTs===undefined?startTs:opt_endTs;}
+ClockSyncMarker.prototype={get ts(){return(this.startTs+this.endTs)/2;}};function ClockSyncConnector(opt_marker1,opt_marker2){this.marker1=opt_marker1;this.marker2=opt_marker2;}
+ClockSyncConnector.prototype={getTransformer:function(fromDomainId,toDomainId){if(!this.isBetween(fromDomainId,toDomainId))
+throw new Error('This connector cannot perform this transformation.');var fromMarker,toMarker;if(this.marker1.domainId===fromDomainId){fromMarker=this.marker1;toMarker=this.marker2;}else{fromMarker=this.marker2;toMarker=this.marker1;}
+var fromTs=fromMarker.ts,toTs=toMarker.ts;if(fromDomainId==ClockDomainId.BATTOR&&toDomainId==ClockDomainId.CHROME){toTs=toMarker.startTs;}else if(fromDomainId==ClockDomainId.CHROME&&toDomainId==ClockDomainId.BATTOR){fromTs=fromMarker.startTs;}
+var tsShift=toTs-fromTs;return function(ts){return ts+tsShift;};},isBetween:function(domain1Id,domain2Id){if(this.marker1===undefined||this.marker2===undefined)
+return false;if(this.marker1.domainId===domain1Id&&this.marker2.domainId===domain2Id){return true;}
+if(this.marker1.domainId===domain2Id&&this.marker2.domainId===domain1Id){return true;}
+return false;}};return{ClockDomainId:ClockDomainId,ClockSyncManager:ClockSyncManager};});'use strict';tr.exportTo('tr.model',function(){return{BROWSER_PROCESS_PID_REF:-1,OBJECT_DEFAULT_SCOPE:'ptr'};});'use strict';tr.exportTo('tr.model',function(){function EventContainer(){this.guid_=tr.b.GUID.allocate();this.important=true;this.bounds_=new tr.b.Range();}
+EventContainer.prototype={get guid(){return this.guid_;},get stableId(){throw new Error('Not implemented');},get bounds(){return this.bounds_;},updateBounds:function(){throw new Error('Not implemented');},shiftTimestampsForward:function(amount){throw new Error('Not implemented');},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){throw new Error('Not implemented');},iterateAllChildEventContainers:function(callback,opt_this){throw new Error('Not implemented');},iterateAllEvents:function(callback,opt_this){this.iterateAllEventContainers(function(ec){ec.iterateAllEventsInThisContainer(function(eventType){return true;},callback,opt_this);});},iterateAllEventContainers:function(callback,opt_this){function visit(ec){callback.call(opt_this,ec);ec.iterateAllChildEventContainers(visit);}
+visit(this);}};return{EventContainer:EventContainer};});'use strict';tr.exportTo('tr.model',function(){var Event=tr.model.Event;var EventRegistry=tr.model.EventRegistry;function PowerSample(series,start,power){Event.call(this);this.series_=series;this.start_=start;this.power_=power;}
+PowerSample.prototype={__proto__:Event.prototype,get series(){return this.series_;},get start(){return this.start_;},set start(value){this.start_=value;},get power(){return this.power_;},set power(value){this.power_=value;},addBoundsToRange:function(range){range.addValue(this.start);}};EventRegistry.register(PowerSample,{name:'powerSample',pluralName:'powerSamples',singleViewElementName:'tr-ui-a-single-power-sample-sub-view',multiViewElementName:'tr-ui-a-multi-power-sample-sub-view'});return{PowerSample:PowerSample};});'use strict';tr.exportTo('tr.model',function(){var PowerSample=tr.model.PowerSample;function PowerSeries(device){tr.model.EventContainer.call(this);this.device_=device;this.samples_=[];}
+PowerSeries.prototype={__proto__:tr.model.EventContainer.prototype,get device(){return this.device_;},get samples(){return this.samples_;},get stableId(){return this.device_.stableId+'.PowerSeries';},addPowerSample:function(ts,val){var sample=new PowerSample(this,ts,val);this.samples_.push(sample);return sample;},getEnergyConsumed:function(start,end){var measurementRange=tr.b.Range.fromExplicitRange(start,end);var energyConsumed=0;for(var i=0;i<this.samples.length;i++){var sample=this.samples[i];var nextSample=this.samples[i+1];var sampleRange=new tr.b.Range();sampleRange.addValue(sample.start);sampleRange.addValue(nextSample?nextSample.start:Infinity);var timeIntersection=measurementRange.findIntersection(sampleRange);var powerInWatts=sample.power/1000.0;var durationInSeconds=timeIntersection.duration/1000;energyConsumed+=durationInSeconds*powerInWatts;}
+return energyConsumed;},shiftTimestampsForward:function(amount){for(var i=0;i<this.samples_.length;++i)
+this.samples_[i].start+=amount;},updateBounds:function(){this.bounds.reset();if(this.samples_.length===0)
+return;this.bounds.addValue(this.samples_[0].start);this.bounds.addValue(this.samples_[this.samples_.length-1].start);},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,PowerSample))
+this.samples_.forEach(callback,opt_this);},iterateAllChildEventContainers:function(callback,opt_this){}};return{PowerSeries:PowerSeries};});'use strict';tr.exportTo('tr.model',function(){function Device(model){if(!model)
+throw new Error('Must provide a model.');tr.model.EventContainer.call(this);this.powerSeries_=undefined;this.vSyncTimestamps_=[];};Device.compare=function(x,y){return x.guid-y.guid;};Device.prototype={__proto__:tr.model.EventContainer.prototype,compareTo:function(that){return Device.compare(this,that);},get userFriendlyName(){return'Device';},get userFriendlyDetails(){return'Device';},get stableId(){return'Device';},getSettingsKey:function(){return'device';},get powerSeries(){return this.powerSeries_;},set powerSeries(powerSeries){this.powerSeries_=powerSeries;},get vSyncTimestamps(){return this.vSyncTimestamps_;},set vSyncTimestamps(value){this.vSyncTimestamps_=value;},updateBounds:function(){this.bounds.reset();this.iterateAllChildEventContainers(function(child){child.updateBounds();this.bounds.addRange(child.bounds);},this);},shiftTimestampsForward:function(amount){this.iterateAllChildEventContainers(function(child){child.shiftTimestampsForward(amount);});for(var i=0;i<this.vSyncTimestamps_.length;i++)
+this.vSyncTimestamps_[i]+=amount;},addCategoriesToDict:function(categoriesDict){},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){},iterateAllChildEventContainers:function(callback,opt_this){if(this.powerSeries_)
+callback.call(opt_this,this.powerSeries_);}};return{Device:Device};});'use strict';tr.exportTo('tr.model',function(){function FlowEvent(category,id,title,colorId,start,args,opt_duration){tr.model.TimedEvent.call(this,start);this.category=category||'';this.title=title;this.colorId=colorId;this.start=start;this.args=args;this.id=id;this.startSlice=undefined;this.endSlice=undefined;this.startStackFrame=undefined;this.endStackFrame=undefined;if(opt_duration!==undefined)
+this.duration=opt_duration;}
+FlowEvent.prototype={__proto__:tr.model.TimedEvent.prototype,get userFriendlyName(){return'Flow event named '+this.title+' at '+
+tr.v.Unit.byName.timeStampInMs.format(this.timestamp);}};tr.model.EventRegistry.register(FlowEvent,{name:'flowEvent',pluralName:'flowEvents',singleViewElementName:'tr-ui-a-single-flow-event-sub-view',multiViewElementName:'tr-ui-a-multi-flow-event-sub-view'});return{FlowEvent:FlowEvent};});'use strict';tr.exportTo('tr.model',function(){function ContainerMemoryDump(start){tr.model.TimedEvent.call(this,start);this.levelOfDetail=undefined;this.memoryAllocatorDumps_=undefined;this.memoryAllocatorDumpsByFullName_=undefined;};ContainerMemoryDump.prototype={__proto__:tr.model.TimedEvent.prototype,shiftTimestampsForward:function(amount){this.start+=amount;},get memoryAllocatorDumps(){return this.memoryAllocatorDumps_;},set memoryAllocatorDumps(memoryAllocatorDumps){this.memoryAllocatorDumps_=memoryAllocatorDumps;this.forceRebuildingMemoryAllocatorDumpByFullNameIndex();},getMemoryAllocatorDumpByFullName:function(fullName){if(this.memoryAllocatorDumps_===undefined)
+return undefined;if(this.memoryAllocatorDumpsByFullName_===undefined){var index={};function addDumpsToIndex(dumps){dumps.forEach(function(dump){index[dump.fullName]=dump;addDumpsToIndex(dump.children);});};addDumpsToIndex(this.memoryAllocatorDumps_);this.memoryAllocatorDumpsByFullName_=index;}
+return this.memoryAllocatorDumpsByFullName_[fullName];},forceRebuildingMemoryAllocatorDumpByFullNameIndex:function(){this.memoryAllocatorDumpsByFullName_=undefined;},iterateRootAllocatorDumps:function(fn,opt_this){if(this.memoryAllocatorDumps===undefined)
+return;this.memoryAllocatorDumps.forEach(fn,opt_this||this);}};return{ContainerMemoryDump:ContainerMemoryDump};});'use strict';tr.exportTo('tr.model',function(){function MemoryAllocatorDump(containerMemoryDump,fullName,opt_guid){this.fullName=fullName;this.parent=undefined;this.children=[];this.numerics={};this.diagnostics={};this.containerMemoryDump=containerMemoryDump;this.owns=undefined;this.ownedBy=[];this.ownedBySiblingSizes=new Map();this.retains=[];this.retainedBy=[];this.weak=false;this.infos=[];this.guid=opt_guid;};MemoryAllocatorDump.SIZE_NUMERIC_NAME='size';MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME='effective_size';MemoryAllocatorDump.RESIDENT_SIZE_NUMERIC_NAME='resident_size';MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME=MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME;MemoryAllocatorDump.prototype={get name(){return this.fullName.substring(this.fullName.lastIndexOf('/')+1);},get quantifiedName(){return'\''+this.fullName+'\' in '+
+this.containerMemoryDump.containerName;},isDescendantOf:function(otherDump){var dump=this;while(dump!==undefined){if(dump===otherDump)
+return true;dump=dump.parent;}
+return false;},addNumeric:function(name,numeric){if(!(numeric instanceof tr.v.ScalarNumeric))
+throw new Error('Numeric value must be an instance of ScalarNumeric.');if(name in this.numerics)
+throw new Error('Duplicate numeric name: '+name+'.');this.numerics[name]=numeric;},addDiagnostic:function(name,text){if(typeof text!=='string')
+throw new Error('Diagnostic text must be a string.');if(name in this.diagnostics)
+throw new Error('Duplicate diagnostic name: '+name+'.');this.diagnostics[name]=text;},aggregateNumericsRecursively:function(opt_model){var numericNames=new Set();this.children.forEach(function(child){child.aggregateNumericsRecursively(opt_model);tr.b.iterItems(child.numerics,numericNames.add,numericNames);},this);numericNames.forEach(function(numericName){if(numericName===MemoryAllocatorDump.SIZE_NUMERIC_NAME||numericName===MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME||this.numerics[numericName]!==undefined){return;}
+this.numerics[numericName]=MemoryAllocatorDump.aggregateNumerics(this.children.map(function(child){return child.numerics[numericName];}),opt_model);},this);}};MemoryAllocatorDump.aggregateNumerics=function(numerics,opt_model){var shouldLogWarning=!!opt_model;var aggregatedUnit=undefined;var aggregatedValue=0;numerics.forEach(function(numeric){if(numeric===undefined)
+return;var unit=numeric.unit;if(aggregatedUnit===undefined){aggregatedUnit=unit;}else if(aggregatedUnit!==unit){if(shouldLogWarning){opt_model.importWarning({type:'numeric_parse_error',message:'Multiple units provided for numeric: \''+
+aggregatedUnit.unitName+'\' and \''+unit.unitName+'\'.'});shouldLogWarning=false;}
+aggregatedUnit=tr.v.Unit.byName.unitlessNumber_smallerIsBetter;}
+aggregatedValue+=numeric.value;},this);if(aggregatedUnit===undefined)
+return undefined;return new tr.v.ScalarNumeric(aggregatedUnit,aggregatedValue);};function MemoryAllocatorDumpLink(source,target,opt_importance){this.source=source;this.target=target;this.importance=opt_importance;this.size=undefined;}
+var MemoryAllocatorDumpInfoType={PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN:0,PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER:1};return{MemoryAllocatorDump:MemoryAllocatorDump,MemoryAllocatorDumpLink:MemoryAllocatorDumpLink,MemoryAllocatorDumpInfoType:MemoryAllocatorDumpInfoType};});'use strict';tr.exportTo('tr.model',function(){function GlobalMemoryDump(model,start){tr.model.ContainerMemoryDump.call(this,start);this.model=model;this.processMemoryDumps={};}
+var SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME;var EFFECTIVE_SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME;var MemoryAllocatorDumpInfoType=tr.model.MemoryAllocatorDumpInfoType;var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN=MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER=MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;function inPlaceFilter(array,predicate,opt_this){opt_this=opt_this||this;var nextPosition=0;for(var i=0;i<array.length;i++){if(!predicate.call(opt_this,array[i],i))
+continue;if(nextPosition<i)
+array[nextPosition]=array[i];nextPosition++;}
+if(nextPosition<array.length)
+array.length=nextPosition;}
+function getSize(dump){var numeric=dump.numerics[SIZE_NUMERIC_NAME];if(numeric===undefined)
+return 0;return numeric.value;}
+function hasSize(dump){return dump.numerics[SIZE_NUMERIC_NAME]!==undefined;}
+function optional(value,defaultValue){if(value===undefined)
+return defaultValue;return value;}
+GlobalMemoryDump.prototype={__proto__:tr.model.ContainerMemoryDump.prototype,get userFriendlyName(){return'Global memory dump at '+
+tr.v.Unit.byName.timeStampInMs.format(this.start);},get containerName(){return'global space';},finalizeGraph:function(){this.removeWeakDumps();this.setUpTracingOverheadOwnership();this.aggregateNumerics();this.calculateSizes();this.calculateEffectiveSizes();this.discountTracingOverheadFromVmRegions();this.forceRebuildingMemoryAllocatorDumpByFullNameIndices();},removeWeakDumps:function(){this.traverseAllocatorDumpsInDepthFirstPreOrder(function(dump){if(dump.weak)
+return;if((dump.owns!==undefined&&dump.owns.target.weak)||(dump.parent!==undefined&&dump.parent.weak)){dump.weak=true;}});function removeWeakDumpsFromListRecursively(dumps){inPlaceFilter(dumps,function(dump){if(dump.weak){return false;}
+removeWeakDumpsFromListRecursively(dump.children);inPlaceFilter(dump.ownedBy,function(ownershipLink){return!ownershipLink.source.weak;});return true;});}
+this.iterateContainerDumps(function(containerDump){var memoryAllocatorDumps=containerDump.memoryAllocatorDumps;if(memoryAllocatorDumps!==undefined)
+removeWeakDumpsFromListRecursively(memoryAllocatorDumps);});},calculateSizes:function(){this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateMemoryAllocatorDumpSize_.bind(this));},calculateMemoryAllocatorDumpSize_:function(dump){var shouldDefineSize=false;function getDependencySize(dependencyDump){var numeric=dependencyDump.numerics[SIZE_NUMERIC_NAME];if(numeric===undefined)
+return 0;shouldDefineSize=true;return numeric.value;}
+var sizeNumeric=dump.numerics[SIZE_NUMERIC_NAME];var size=0;var checkDependencySizeIsConsistent=function(){};if(sizeNumeric!==undefined){size=sizeNumeric.value;shouldDefineSize=true;if(sizeNumeric.unit!==tr.v.Unit.byName.sizeInBytes_smallerIsBetter){this.model.importWarning({type:'memory_dump_parse_error',message:'Invalid unit of \'size\' numeric of memory allocator '+'dump '+dump.quantifiedName+': '+
+sizeNumeric.unit.unitName+'.'});}
+checkDependencySizeIsConsistent=function(dependencySize,dependencyInfoType,dependencyName){if(size>=dependencySize)
+return;this.model.importWarning({type:'memory_dump_parse_error',message:'Size provided by memory allocator dump \''+
+dump.fullName+'\''+
+tr.v.Unit.byName.sizeInBytes.format(size)+') is less than '+dependencyName+' ('+
+tr.v.Unit.byName.sizeInBytes.format(dependencySize)+').'});dump.infos.push({type:dependencyInfoType,providedSize:size,dependencySize:dependencySize});}.bind(this);}
+var aggregatedChildrenSize=0;var allOverlaps={};dump.children.forEach(function(childDump){function aggregateDescendantDump(descendantDump){var ownedDumpLink=descendantDump.owns;if(ownedDumpLink!==undefined&&ownedDumpLink.target.isDescendantOf(dump)){var ownedChildDump=ownedDumpLink.target;while(ownedChildDump.parent!==dump)
+ownedChildDump=ownedChildDump.parent;if(childDump!==ownedChildDump){var ownedBySiblingSize=getDependencySize(descendantDump);if(ownedBySiblingSize>0){var previousTotalOwnedBySiblingSize=ownedChildDump.ownedBySiblingSizes.get(childDump)||0;var updatedTotalOwnedBySiblingSize=previousTotalOwnedBySiblingSize+ownedBySiblingSize;ownedChildDump.ownedBySiblingSizes.set(childDump,updatedTotalOwnedBySiblingSize);}}
+return;}
+if(descendantDump.children.length===0){aggregatedChildrenSize+=getDependencySize(descendantDump);return;}
+descendantDump.children.forEach(aggregateDescendantDump);}
+aggregateDescendantDump(childDump);});checkDependencySizeIsConsistent(aggregatedChildrenSize,PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN,'the aggregated size of its children');var largestOwnerSize=0;dump.ownedBy.forEach(function(ownershipLink){var owner=ownershipLink.source;var ownerSize=getDependencySize(owner);largestOwnerSize=Math.max(largestOwnerSize,ownerSize);});checkDependencySizeIsConsistent(largestOwnerSize,PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER,'the size of its largest owner');if(!shouldDefineSize){delete dump.numerics[SIZE_NUMERIC_NAME];return;}
+size=Math.max(size,aggregatedChildrenSize,largestOwnerSize);dump.numerics[SIZE_NUMERIC_NAME]=new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes_smallerIsBetter,size);if(aggregatedChildrenSize<size&&dump.children!==undefined&&dump.children.length>0){var virtualChild=new tr.model.MemoryAllocatorDump(dump.containerMemoryDump,dump.fullName+'/<unspecified>');virtualChild.parent=dump;dump.children.unshift(virtualChild);virtualChild.numerics[SIZE_NUMERIC_NAME]=new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes_smallerIsBetter,size-aggregatedChildrenSize);}},calculateEffectiveSizes:function(){this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpSubSizes_.bind(this));this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpOwnershipCoefficient_.bind(this));this.traverseAllocatorDumpsInDepthFirstPreOrder(this.calculateDumpCumulativeOwnershipCoefficient_.bind(this));this.traverseAllocatorDumpsInDepthFirstPostOrder(this.calculateDumpEffectiveSize_.bind(this));},calculateDumpSubSizes_:function(dump){if(!hasSize(dump))
+return;if(dump.children===undefined||dump.children.length===0){var size=getSize(dump);dump.notOwningSubSize_=size;dump.notOwnedSubSize_=size;return;}
+var notOwningSubSize=0;dump.children.forEach(function(childDump){if(childDump.owns!==undefined)
+return;notOwningSubSize+=optional(childDump.notOwningSubSize_,0);});dump.notOwningSubSize_=notOwningSubSize;var notOwnedSubSize=0;dump.children.forEach(function(childDump){if(childDump.ownedBy.length===0){notOwnedSubSize+=optional(childDump.notOwnedSubSize_,0);return;}
+var largestChildOwnerSize=0;childDump.ownedBy.forEach(function(ownershipLink){largestChildOwnerSize=Math.max(largestChildOwnerSize,getSize(ownershipLink.source));});notOwnedSubSize+=getSize(childDump)-largestChildOwnerSize;});dump.notOwnedSubSize_=notOwnedSubSize;},calculateDumpOwnershipCoefficient_:function(dump){if(!hasSize(dump))
+return;if(dump.ownedBy.length===0)
+return;var owners=dump.ownedBy.map(function(ownershipLink){return{dump:ownershipLink.source,importance:optional(ownershipLink.importance,0),notOwningSubSize:optional(ownershipLink.source.notOwningSubSize_,0)};});owners.sort(function(a,b){if(a.importance===b.importance)
+return a.notOwningSubSize-b.notOwningSubSize;return b.importance-a.importance;});var currentImportanceStartPos=0;var alreadyAttributedSubSize=0;while(currentImportanceStartPos<owners.length){var currentImportance=owners[currentImportanceStartPos].importance;var nextImportanceStartPos=currentImportanceStartPos+1;while(nextImportanceStartPos<owners.length&&owners[nextImportanceStartPos].importance===currentImportance){nextImportanceStartPos++;}
+var attributedNotOwningSubSize=0;for(var pos=currentImportanceStartPos;pos<nextImportanceStartPos;pos++){var owner=owners[pos];var notOwningSubSize=owner.notOwningSubSize;if(notOwningSubSize>alreadyAttributedSubSize){attributedNotOwningSubSize+=(notOwningSubSize-alreadyAttributedSubSize)/(nextImportanceStartPos-pos);alreadyAttributedSubSize=notOwningSubSize;}
+var owningCoefficient=0;if(notOwningSubSize!==0)
+owningCoefficient=attributedNotOwningSubSize/notOwningSubSize;owner.dump.owningCoefficient_=owningCoefficient;}
+currentImportanceStartPos=nextImportanceStartPos;}
+var notOwnedSubSize=optional(dump.notOwnedSubSize_,0);var remainderSubSize=notOwnedSubSize-alreadyAttributedSubSize;var ownedCoefficient=0;if(notOwnedSubSize!==0)
+ownedCoefficient=remainderSubSize/notOwnedSubSize;dump.ownedCoefficient_=ownedCoefficient;},calculateDumpCumulativeOwnershipCoefficient_:function(dump){if(!hasSize(dump))
+return;var cumulativeOwnedCoefficient=optional(dump.ownedCoefficient_,1);var parent=dump.parent;if(dump.parent!==undefined)
+cumulativeOwnedCoefficient*=dump.parent.cumulativeOwnedCoefficient_;dump.cumulativeOwnedCoefficient_=cumulativeOwnedCoefficient;var cumulativeOwningCoefficient;if(dump.owns!==undefined){cumulativeOwningCoefficient=dump.owningCoefficient_*dump.owns.target.cumulativeOwningCoefficient_;}else if(dump.parent!==undefined){cumulativeOwningCoefficient=dump.parent.cumulativeOwningCoefficient_;}else{cumulativeOwningCoefficient=1;}
+dump.cumulativeOwningCoefficient_=cumulativeOwningCoefficient;},calculateDumpEffectiveSize_:function(dump){if(!hasSize(dump)){delete dump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME];return;}
+var effectiveSize;if(dump.children===undefined||dump.children.length===0){effectiveSize=getSize(dump)*dump.cumulativeOwningCoefficient_*dump.cumulativeOwnedCoefficient_;}else{effectiveSize=0;dump.children.forEach(function(childDump){if(!hasSize(childDump))
+return;effectiveSize+=childDump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME].value;});}
+dump.numerics[EFFECTIVE_SIZE_NUMERIC_NAME]=new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes_smallerIsBetter,effectiveSize);},aggregateNumerics:function(){this.iterateRootAllocatorDumps(function(dump){dump.aggregateNumericsRecursively(this.model);});this.iterateRootAllocatorDumps(this.propagateNumericsAndDiagnosticsRecursively);tr.b.iterItems(this.processMemoryDumps,function(pid,processMemoryDump){processMemoryDump.iterateRootAllocatorDumps(function(dump){dump.aggregateNumericsRecursively(this.model);},this);},this);},propagateNumericsAndDiagnosticsRecursively:function(globalAllocatorDump){['numerics','diagnostics'].forEach(function(field){tr.b.iterItems(globalAllocatorDump[field],function(name,value){globalAllocatorDump.ownedBy.forEach(function(ownershipLink){var processAllocatorDump=ownershipLink.source;if(processAllocatorDump[field][name]!==undefined){return;}
+processAllocatorDump[field][name]=value;});});});globalAllocatorDump.children.forEach(this.propagateNumericsAndDiagnosticsRecursively,this);},setUpTracingOverheadOwnership:function(){tr.b.iterItems(this.processMemoryDumps,function(pid,dump){dump.setUpTracingOverheadOwnership(this.model);},this);},discountTracingOverheadFromVmRegions:function(){tr.b.iterItems(this.processMemoryDumps,function(pid,dump){dump.discountTracingOverheadFromVmRegions(this.model);},this);},forceRebuildingMemoryAllocatorDumpByFullNameIndices:function(){this.iterateContainerDumps(function(containerDump){containerDump.forceRebuildingMemoryAllocatorDumpByFullNameIndex();});},iterateContainerDumps:function(fn){fn.call(this,this);tr.b.iterItems(this.processMemoryDumps,function(pid,processDump){fn.call(this,processDump);},this);},iterateAllRootAllocatorDumps:function(fn){this.iterateContainerDumps(function(containerDump){containerDump.iterateRootAllocatorDumps(fn,this);});},traverseAllocatorDumpsInDepthFirstPostOrder:function(fn){var visitedDumps=new WeakSet();var openDumps=new WeakSet();function visit(dump){if(visitedDumps.has(dump))
+return;if(openDumps.has(dump))
+throw new Error(dump.userFriendlyName+' contains a cycle');openDumps.add(dump);dump.ownedBy.forEach(function(ownershipLink){visit.call(this,ownershipLink.source);},this);dump.children.forEach(visit,this);fn.call(this,dump);visitedDumps.add(dump);openDumps.delete(dump);}
+this.iterateAllRootAllocatorDumps(visit);},traverseAllocatorDumpsInDepthFirstPreOrder:function(fn){var visitedDumps=new WeakSet();function visit(dump){if(visitedDumps.has(dump))
+return;if(dump.owns!==undefined&&!visitedDumps.has(dump.owns.target))
+return;if(dump.parent!==undefined&&!visitedDumps.has(dump.parent))
+return;fn.call(this,dump);visitedDumps.add(dump);dump.ownedBy.forEach(function(ownershipLink){visit.call(this,ownershipLink.source);},this);dump.children.forEach(visit,this);}
+this.iterateAllRootAllocatorDumps(visit);}};tr.model.EventRegistry.register(GlobalMemoryDump,{name:'globalMemoryDump',pluralName:'globalMemoryDumps',singleViewElementName:'tr-ui-a-container-memory-dump-sub-view',multiViewElementName:'tr-ui-a-container-memory-dump-sub-view'});return{GlobalMemoryDump:GlobalMemoryDump};});'use strict';tr.exportTo('tr.model',function(){var InstantEventType={GLOBAL:1,PROCESS:2};function InstantEvent(category,title,colorId,start,args){tr.model.TimedEvent.call(this,start);this.category=category||'';this.title=title;this.colorId=colorId;this.args=args;this.type=undefined;};InstantEvent.prototype={__proto__:tr.model.TimedEvent.prototype};function GlobalInstantEvent(category,title,colorId,start,args){InstantEvent.apply(this,arguments);this.type=InstantEventType.GLOBAL;};GlobalInstantEvent.prototype={__proto__:InstantEvent.prototype,get userFriendlyName(){return'Global instant event '+this.title+' @ '+
+tr.v.Unit.byName.timeStampInMs.format(start);}};function ProcessInstantEvent(category,title,colorId,start,args){InstantEvent.apply(this,arguments);this.type=InstantEventType.PROCESS;};ProcessInstantEvent.prototype={__proto__:InstantEvent.prototype,get userFriendlyName(){return'Process-level instant event '+this.title+' @ '+
+tr.v.Unit.byName.timeStampInMs.format(start);}};tr.model.EventRegistry.register(InstantEvent,{name:'instantEvent',pluralName:'instantEvents',singleViewElementName:'tr-ui-a-single-instant-event-sub-view',multiViewElementName:'tr-ui-a-multi-instant-event-sub-view'});return{GlobalInstantEvent:GlobalInstantEvent,ProcessInstantEvent:ProcessInstantEvent,InstantEventType:InstantEventType,InstantEvent:InstantEvent};});'use strict';tr.exportTo('tr.model',function(){function CounterSample(series,timestamp,value){tr.model.Event.call(this);this.series_=series;this.timestamp_=timestamp;this.value_=value;}
+CounterSample.groupByTimestamp=function(samples){var samplesByTimestamp=tr.b.group(samples,function(sample){return sample.timestamp;});var timestamps=tr.b.dictionaryKeys(samplesByTimestamp);timestamps.sort();var groups=[];for(var i=0;i<timestamps.length;i++){var ts=timestamps[i];var group=samplesByTimestamp[ts];group.sort(function(x,y){return x.series.seriesIndex-y.series.seriesIndex;});groups.push(group);}
+return groups;};CounterSample.prototype={__proto__:tr.model.Event.prototype,get series(){return this.series_;},get timestamp(){return this.timestamp_;},get value(){return this.value_;},set timestamp(timestamp){this.timestamp_=timestamp;},addBoundsToRange:function(range){range.addValue(this.timestamp);},getSampleIndex:function(){return tr.b.findLowIndexInSortedArray(this.series.timestamps,function(x){return x;},this.timestamp_);},get userFriendlyName(){return'Counter sample from '+this.series_.title+' at '+
+tr.v.Unit.byName.timeStampInMs.format(this.timestamp);}};tr.model.EventRegistry.register(CounterSample,{name:'counterSample',pluralName:'counterSamples',singleViewElementName:'tr-ui-a-counter-sample-sub-view',multiViewElementName:'tr-ui-a-counter-sample-sub-view'});return{CounterSample:CounterSample};});'use strict';tr.exportTo('tr.model',function(){var CounterSample=tr.model.CounterSample;function CounterSeries(name,color){tr.model.EventContainer.call(this);this.name_=name;this.color_=color;this.timestamps_=[];this.samples_=[];this.counter=undefined;this.seriesIndex=undefined;}
+CounterSeries.prototype={__proto__:tr.model.EventContainer.prototype,get length(){return this.timestamps_.length;},get name(){return this.name_;},get color(){return this.color_;},get samples(){return this.samples_;},get timestamps(){return this.timestamps_;},getSample:function(idx){return this.samples_[idx];},getTimestamp:function(idx){return this.timestamps_[idx];},addCounterSample:function(ts,val){var sample=new CounterSample(this,ts,val);this.addSample(sample);return sample;},addSample:function(sample){this.timestamps_.push(sample.timestamp);this.samples_.push(sample);},getStatistics:function(sampleIndices){var sum=0;var min=Number.MAX_VALUE;var max=-Number.MAX_VALUE;for(var i=0;i<sampleIndices.length;++i){var sample=this.getSample(sampleIndices[i]).value;sum+=sample;min=Math.min(sample,min);max=Math.max(sample,max);}
+return{min:min,max:max,avg:(sum/sampleIndices.length),start:this.getSample(sampleIndices[0]).value,end:this.getSample(sampleIndices.length-1).value};},shiftTimestampsForward:function(amount){for(var i=0;i<this.timestamps_.length;++i){this.timestamps_[i]+=amount;this.samples_[i].timestamp=this.timestamps_[i];}},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,tr.model.CounterSample)){this.samples_.forEach(callback,opt_this);}},iterateAllChildEventContainers:function(callback,opt_this){}};return{CounterSeries:CounterSeries};});'use strict';tr.exportTo('tr.model',function(){function Counter(parent,id,category,name){tr.model.EventContainer.call(this);this.parent_=parent;this.id_=id;this.category_=category||'';this.name_=name;this.series_=[];this.totals=[];}
+Counter.prototype={__proto__:tr.model.EventContainer.prototype,get parent(){return this.parent_;},get id(){return this.id_;},get category(){return this.category_;},get name(){return this.name_;},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){},iterateAllChildEventContainers:function(callback,opt_this){for(var i=0;i<this.series_.length;i++)
+callback.call(opt_this,this.series_[i]);},set timestamps(arg){throw new Error('Bad counter API. No cookie.');},set seriesNames(arg){throw new Error('Bad counter API. No cookie.');},set seriesColors(arg){throw new Error('Bad counter API. No cookie.');},set samples(arg){throw new Error('Bad counter API. No cookie.');},addSeries:function(series){series.counter=this;series.seriesIndex=this.series_.length;this.series_.push(series);return series;},getSeries:function(idx){return this.series_[idx];},get series(){return this.series_;},get numSeries(){return this.series_.length;},get numSamples(){if(this.series_.length===0)
+return 0;return this.series_[0].length;},get timestamps(){if(this.series_.length===0)
+return[];return this.series_[0].timestamps;},getSampleStatistics:function(sampleIndices){sampleIndices.sort();var ret=[];this.series_.forEach(function(series){ret.push(series.getStatistics(sampleIndices));});return ret;},shiftTimestampsForward:function(amount){for(var i=0;i<this.series_.length;++i)
+this.series_[i].shiftTimestampsForward(amount);},updateBounds:function(){this.totals=[];this.maxTotal=0;this.bounds.reset();if(this.series_.length===0)
+return;var firstSeries=this.series_[0];var lastSeries=this.series_[this.series_.length-1];this.bounds.addValue(firstSeries.getTimestamp(0));this.bounds.addValue(lastSeries.getTimestamp(lastSeries.length-1));var numSeries=this.numSeries;this.maxTotal=-Infinity;for(var i=0;i<firstSeries.length;++i){var total=0;this.series_.forEach(function(series){total+=series.getSample(i).value;this.totals.push(total);}.bind(this));this.maxTotal=Math.max(total,this.maxTotal);}}};Counter.compare=function(x,y){var tmp=x.parent.compareTo(y);if(tmp!=0)
+return tmp;var tmp=x.name.localeCompare(y.name);if(tmp==0)
+return x.tid-y.tid;return tmp;};return{Counter:Counter};});'use strict';tr.exportTo('tr.model',function(){var Slice=tr.model.Slice;function CpuSlice(cat,title,colorId,start,args,opt_duration){Slice.apply(this,arguments);this.threadThatWasRunning=undefined;this.cpu=undefined;}
 CpuSlice.prototype={__proto__:Slice.prototype,get analysisTypeName(){return'tr.ui.analysis.CpuSlice';},getAssociatedTimeslice:function(){if(!this.threadThatWasRunning)
 return undefined;var timeSlices=this.threadThatWasRunning.timeSlices;for(var i=0;i<timeSlices.length;i++){var timeSlice=timeSlices[i];if(timeSlice.start!==this.start)
 continue;if(timeSlice.duration!==this.duration)
@@ -2954,7 +3162,7 @@
 ObjectSnapshot.prototype={__proto__:tr.model.Event.prototype,preInitialize:function(){},initialize:function(){},addBoundsToRange:function(range){range.addValue(this.ts);},get userFriendlyName(){return'Snapshot of '+
 this.objectInstance.typeName+' '+
 this.objectInstance.id+' @ '+
-tr.b.u.TimeStamp.format(this.ts);}};tr.model.EventRegistry.register(ObjectSnapshot,{name:'objectSnapshot',pluralName:'objectSnapshots',singleViewElementName:'tr-ui-a-single-object-snapshot-sub-view',multiViewElementName:'tr-ui-a-multi-object-sub-view'});var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectSnapshot;options.defaultConstructor=ObjectSnapshot;tr.b.decorateExtensionRegistry(ObjectSnapshot,options);return{ObjectSnapshot:ObjectSnapshot};});'use strict';tr.exportTo('tr.model',function(){var ObjectSnapshot=tr.model.ObjectSnapshot;function ObjectInstance(parent,id,category,name,creationTs,opt_baseTypeName){tr.model.Event.call(this);this.parent=parent;this.id=id;this.category=category;this.baseTypeName=opt_baseTypeName?opt_baseTypeName:name;this.name=name;this.creationTs=creationTs;this.creationTsWasExplicit=false;this.deletionTs=Number.MAX_VALUE;this.deletionTsWasExplicit=false;this.colorId=0;this.bounds=new tr.b.Range();this.snapshots=[];this.hasImplicitSnapshots=false;}
+tr.v.Unit.byName.timeStampInMs.format(this.ts);}};tr.model.EventRegistry.register(ObjectSnapshot,{name:'objectSnapshot',pluralName:'objectSnapshots',singleViewElementName:'tr-ui-a-single-object-snapshot-sub-view',multiViewElementName:'tr-ui-a-multi-object-sub-view'});var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectSnapshot;options.defaultConstructor=ObjectSnapshot;tr.b.decorateExtensionRegistry(ObjectSnapshot,options);return{ObjectSnapshot:ObjectSnapshot};});'use strict';tr.exportTo('tr.model',function(){var ObjectSnapshot=tr.model.ObjectSnapshot;function ObjectInstance(parent,scopedId,category,name,creationTs,opt_baseTypeName){tr.model.Event.call(this);this.parent=parent;this.scopedId=scopedId;this.category=category;this.baseTypeName=opt_baseTypeName?opt_baseTypeName:name;this.name=name;this.creationTs=creationTs;this.creationTsWasExplicit=false;this.deletionTs=Number.MAX_VALUE;this.deletionTsWasExplicit=false;this.colorId=0;this.bounds=new tr.b.Range();this.snapshots=[];this.hasImplicitSnapshots=false;}
 ObjectInstance.prototype={__proto__:tr.model.Event.prototype,get typeName(){return this.name;},addBoundsToRange:function(range){range.addRange(this.bounds);},addSnapshot:function(ts,args,opt_name,opt_baseTypeName){if(ts<this.creationTs)
 throw new Error('Snapshots must be >= instance.creationTs');if(ts>=this.deletionTs)
 throw new Error('Snapshots cannot be added after '+'an objects deletion timestamp.');var lastSnapshot;if(this.snapshots.length>0){lastSnapshot=this.snapshots[this.snapshots.length-1];if(lastSnapshot.ts==ts)
@@ -2976,35 +3184,36 @@
 return this.snapshots[this.snapshots.length-1];return this.snapshots[i];},updateBounds:function(){this.bounds.reset();this.bounds.addValue(this.creationTs);if(this.deletionTs!=Number.MAX_VALUE)
 this.bounds.addValue(this.deletionTs);else if(this.snapshots.length>0)
 this.bounds.addValue(this.snapshots[this.snapshots.length-1].ts);},shiftTimestampsForward:function(amount){this.creationTs+=amount;if(this.deletionTs!=Number.MAX_VALUE)
-this.deletionTs+=amount;this.snapshots.forEach(function(snapshot){snapshot.ts+=amount;});},get userFriendlyName(){return this.typeName+' object '+this.id;}};tr.model.EventRegistry.register(ObjectInstance,{name:'objectInstance',pluralName:'objectInstances',singleViewElementName:'tr-ui-a-single-object-instance-sub-view',multiViewElementName:'tr-ui-a-multi-object-sub-view'});var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectInstance;options.defaultConstructor=ObjectInstance;tr.b.decorateExtensionRegistry(ObjectInstance,options);return{ObjectInstance:ObjectInstance};});'use strict';tr.exportTo('tr.model',function(){function TimeToObjectInstanceMap(createObjectInstanceFunction,parent,id){this.createObjectInstanceFunction_=createObjectInstanceFunction;this.parent=parent;this.id=id;this.instances=[];}
-TimeToObjectInstanceMap.prototype={idWasCreated:function(category,name,ts){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts));this.instances[0].creationTsWasExplicit=true;return this.instances[0];}
+this.deletionTs+=amount;this.snapshots.forEach(function(snapshot){snapshot.ts+=amount;});},get userFriendlyName(){return this.typeName+' object '+this.scopedId;}};tr.model.EventRegistry.register(ObjectInstance,{name:'objectInstance',pluralName:'objectInstances',singleViewElementName:'tr-ui-a-single-object-instance-sub-view',multiViewElementName:'tr-ui-a-multi-object-sub-view'});var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectInstance;options.defaultConstructor=ObjectInstance;tr.b.decorateExtensionRegistry(ObjectInstance,options);return{ObjectInstance:ObjectInstance};});'use strict';tr.exportTo('tr.model',function(){function TimeToObjectInstanceMap(createObjectInstanceFunction,parent,scopedId){this.createObjectInstanceFunction_=createObjectInstanceFunction;this.parent=parent;this.scopedId=scopedId;this.instances=[];}
+TimeToObjectInstanceMap.prototype={idWasCreated:function(category,name,ts){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts));this.instances[0].creationTsWasExplicit=true;return this.instances[0];}
 var lastInstance=this.instances[this.instances.length-1];if(ts<lastInstance.deletionTs){throw new Error('Mutation of the TimeToObjectInstanceMap must be '+'done in ascending timestamp order.');}
-lastInstance=this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts);lastInstance.creationTsWasExplicit=true;this.instances.push(lastInstance);return lastInstance;},addSnapshot:function(category,name,ts,args,opt_baseTypeName){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts,opt_baseTypeName));}
+lastInstance=this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts);lastInstance.creationTsWasExplicit=true;this.instances.push(lastInstance);return lastInstance;},addSnapshot:function(category,name,ts,args,opt_baseTypeName){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts,opt_baseTypeName));}
 var i=tr.b.findIndexInSortedIntervals(this.instances,function(inst){return inst.creationTs;},function(inst){return inst.deletionTs-inst.creationTs;},ts);var instance;if(i<0){instance=this.instances[0];if(ts>instance.deletionTs||instance.creationTsWasExplicit){throw new Error('At the provided timestamp, no instance was still alive');}
 if(instance.snapshots.length!=0){throw new Error('Cannot shift creationTs forward, '+'snapshots have been added. First snap was at ts='+
 instance.snapshots[0].ts+' and creationTs was '+
 instance.creationTs);}
-instance.creationTs=ts;}else if(i>=this.instances.length){instance=this.instances[this.instances.length-1];if(ts>=instance.deletionTs){instance=this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts,opt_baseTypeName);this.instances.push(instance);}else{var lastValidIndex;for(var i=this.instances.length-1;i>=0;i--){var tmp=this.instances[i];if(ts>=tmp.deletionTs)
+instance.creationTs=ts;}else if(i>=this.instances.length){instance=this.instances[this.instances.length-1];if(ts>=instance.deletionTs){instance=this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts,opt_baseTypeName);this.instances.push(instance);}else{var lastValidIndex;for(var i=this.instances.length-1;i>=0;i--){var tmp=this.instances[i];if(ts>=tmp.deletionTs)
 break;if(tmp.creationTsWasExplicit==false&&tmp.snapshots.length==0)
 lastValidIndex=i;}
 if(lastValidIndex===undefined){throw new Error('Cannot add snapshot. No instance was alive that was mutable.');}
 instance=this.instances[lastValidIndex];instance.creationTs=ts;}}else{instance=this.instances[i];}
 return instance.addSnapshot(ts,args,name,opt_baseTypeName);},get lastInstance(){if(this.instances.length==0)
-return undefined;return this.instances[this.instances.length-1];},idWasDeleted:function(category,name,ts){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts));}
+return undefined;return this.instances[this.instances.length-1];},idWasDeleted:function(category,name,ts){if(this.instances.length==0){this.instances.push(this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts));}
 var lastInstance=this.instances[this.instances.length-1];if(ts<lastInstance.creationTs)
-throw new Error('Cannot delete a id before it was crated');if(lastInstance.deletionTs==Number.MAX_VALUE){lastInstance.wasDeleted(ts);return lastInstance;}
+throw new Error('Cannot delete an id before it was created');if(lastInstance.deletionTs==Number.MAX_VALUE){lastInstance.wasDeleted(ts);return lastInstance;}
 if(ts<lastInstance.deletionTs)
-throw new Error('id was already deleted earlier.');lastInstance=this.createObjectInstanceFunction_(this.parent,this.id,category,name,ts);this.instances.push(lastInstance);lastInstance.wasDeleted(ts);return lastInstance;},getInstanceAt:function(ts){var i=tr.b.findIndexInSortedIntervals(this.instances,function(inst){return inst.creationTs;},function(inst){return inst.deletionTs-inst.creationTs;},ts);if(i<0){if(this.instances[0].creationTsWasExplicit)
+throw new Error('id was already deleted earlier.');lastInstance=this.createObjectInstanceFunction_(this.parent,this.scopedId,category,name,ts);this.instances.push(lastInstance);lastInstance.wasDeleted(ts);return lastInstance;},getInstanceAt:function(ts){var i=tr.b.findIndexInSortedIntervals(this.instances,function(inst){return inst.creationTs;},function(inst){return inst.deletionTs-inst.creationTs;},ts);if(i<0){if(this.instances[0].creationTsWasExplicit)
 return undefined;return this.instances[0];}else if(i>=this.instances.length){return undefined;}
 return this.instances[i];},logToConsole:function(){for(var i=0;i<this.instances.length;i++){var instance=this.instances[i];var cEF='';var dEF='';if(instance.creationTsWasExplicit)
 cEF='(explicitC)';if(instance.deletionTsWasExplicit)
-dEF='(explicit)';console.log(instance.creationTs,cEF,instance.deletionTs,dEF,instance.category,instance.name,instance.snapshots.length+' snapshots');}}};return{TimeToObjectInstanceMap:TimeToObjectInstanceMap};});'use strict';tr.exportTo('tr.model',function(){var ObjectInstance=tr.model.ObjectInstance;var ObjectSnapshot=tr.model.ObjectSnapshot;function ObjectCollection(parent){tr.model.EventContainer.call(this);this.parent=parent;this.instanceMapsById_={};this.instancesByTypeName_={};this.createObjectInstance_=this.createObjectInstance_.bind(this);}
+dEF='(explicit)';console.log(instance.creationTs,cEF,instance.deletionTs,dEF,instance.category,instance.name,instance.snapshots.length+' snapshots');}}};return{TimeToObjectInstanceMap:TimeToObjectInstanceMap};});'use strict';tr.exportTo('tr.model',function(){var ObjectInstance=tr.model.ObjectInstance;var ObjectSnapshot=tr.model.ObjectSnapshot;function ObjectCollection(parent){tr.model.EventContainer.call(this);this.parent=parent;this.instanceMapsByScopedId_={};this.instancesByTypeName_={};this.createObjectInstance_=this.createObjectInstance_.bind(this);}
 ObjectCollection.prototype={__proto__:tr.model.EventContainer.prototype,iterateAllChildEventContainers:function(callback,opt_this){},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){var bI=!!eventTypePredicate.call(opt_this,ObjectInstance);var bS=!!eventTypePredicate.call(opt_this,ObjectSnapshot);if(bI===false&&bS===false)
 return;this.iterObjectInstances(function(instance){if(bI)
 callback.call(opt_this,instance);if(bS)
-instance.snapshots.forEach(callback,opt_this);},opt_this);},createObjectInstance_:function(parent,id,category,name,creationTs,opt_baseTypeName){var constructor=tr.model.ObjectInstance.getConstructor(category,name);var instance=new constructor(parent,id,category,name,creationTs,opt_baseTypeName);var typeName=instance.typeName;var instancesOfTypeName=this.instancesByTypeName_[typeName];if(!instancesOfTypeName){instancesOfTypeName=[];this.instancesByTypeName_[typeName]=instancesOfTypeName;}
-instancesOfTypeName.push(instance);return instance;},getOrCreateInstanceMap_:function(id){var instanceMap=this.instanceMapsById_[id];if(instanceMap)
-return instanceMap;instanceMap=new tr.model.TimeToObjectInstanceMap(this.createObjectInstance_,this.parent,id);this.instanceMapsById_[id]=instanceMap;return instanceMap;},idWasCreated:function(id,category,name,ts){var instanceMap=this.getOrCreateInstanceMap_(id);return instanceMap.idWasCreated(category,name,ts);},addSnapshot:function(id,category,name,ts,args,opt_baseTypeName){var instanceMap=this.getOrCreateInstanceMap_(id);var snapshot=instanceMap.addSnapshot(category,name,ts,args,opt_baseTypeName);if(snapshot.objectInstance.category!=category){var msg='Added snapshot name='+name+' with cat='+category+' impossible. It instance was created/snapshotted with cat='+
+instance.snapshots.forEach(callback,opt_this);},opt_this);},createObjectInstance_:function(parent,scopedId,category,name,creationTs,opt_baseTypeName){var constructor=tr.model.ObjectInstance.getConstructor(category,name);var instance=new constructor(parent,scopedId,category,name,creationTs,opt_baseTypeName);var typeName=instance.typeName;var instancesOfTypeName=this.instancesByTypeName_[typeName];if(!instancesOfTypeName){instancesOfTypeName=[];this.instancesByTypeName_[typeName]=instancesOfTypeName;}
+instancesOfTypeName.push(instance);return instance;},getOrCreateInstanceMap_:function(scopedId){var dict;if(scopedId.scope in this.instanceMapsByScopedId_){dict=this.instanceMapsByScopedId_[scopedId.scope];}else{dict={};this.instanceMapsByScopedId_[scopedId.scope]=dict;}
+var instanceMap=dict[scopedId.id];if(instanceMap)
+return instanceMap;instanceMap=new tr.model.TimeToObjectInstanceMap(this.createObjectInstance_,this.parent,scopedId);dict[scopedId.id]=instanceMap;return instanceMap;},idWasCreated:function(scopedId,category,name,ts){var instanceMap=this.getOrCreateInstanceMap_(scopedId);return instanceMap.idWasCreated(category,name,ts);},addSnapshot:function(scopedId,category,name,ts,args,opt_baseTypeName){var instanceMap=this.getOrCreateInstanceMap_(scopedId);var snapshot=instanceMap.addSnapshot(category,name,ts,args,opt_baseTypeName);if(snapshot.objectInstance.category!=category){var msg='Added snapshot name='+name+' with cat='+category+' impossible. It instance was created/snapshotted with cat='+
 snapshot.objectInstance.category+' name='+
 snapshot.objectInstance.name;throw new Error(msg);}
 if(opt_baseTypeName&&snapshot.objectInstance.baseTypeName!=opt_baseTypeName){throw new Error('Could not add snapshot with baseTypeName='+
@@ -3012,19 +3221,21 @@
 snapshot.objectInstance.baseTypeName);}
 if(snapshot.objectInstance.name!=name){throw new Error('Could not add snapshot with name='+name+'. It '+'was previously created with name='+
 snapshot.objectInstance.name);}
-return snapshot;},idWasDeleted:function(id,category,name,ts){var instanceMap=this.getOrCreateInstanceMap_(id);var deletedInstance=instanceMap.idWasDeleted(category,name,ts);if(!deletedInstance)
+return snapshot;},idWasDeleted:function(scopedId,category,name,ts){var instanceMap=this.getOrCreateInstanceMap_(scopedId);var deletedInstance=instanceMap.idWasDeleted(category,name,ts);if(!deletedInstance)
 return;if(deletedInstance.category!=category){var msg='Deleting object '+deletedInstance.name+' with a different category '+'than when it was created. It previous had cat='+
 deletedInstance.category+' but the delete command '+'had cat='+category;throw new Error(msg);}
 if(deletedInstance.baseTypeName!=name){throw new Error('Deletion requested for name='+
 name+' could not proceed: '+'An existing object with baseTypeName='+
-deletedInstance.baseTypeName+' existed.');}},autoDeleteObjects:function(maxTimestamp){tr.b.iterItems(this.instanceMapsById_,function(id,i2imap){var lastInstance=i2imap.lastInstance;if(lastInstance.deletionTs!=Number.MAX_VALUE)
-return;i2imap.idWasDeleted(lastInstance.category,lastInstance.name,maxTimestamp);lastInstance.deletionTsWasExplicit=false;});},getObjectInstanceAt:function(id,ts){var instanceMap=this.instanceMapsById_[id];if(!instanceMap)
-return undefined;return instanceMap.getInstanceAt(ts);},getSnapshotAt:function(id,ts){var instance=this.getObjectInstanceAt(id,ts);if(!instance)
-return undefined;return instance.getSnapshotAt(ts);},iterObjectInstances:function(iter,opt_this){opt_this=opt_this||this;tr.b.iterItems(this.instanceMapsById_,function(id,i2imap){i2imap.instances.forEach(iter,opt_this);});},getAllObjectInstances:function(){var instances=[];this.iterObjectInstances(function(i){instances.push(i);});return instances;},getAllInstancesNamed:function(name){return this.instancesByTypeName_[name];},getAllInstancesByTypeName:function(){return this.instancesByTypeName_;},preInitializeAllObjects:function(){this.iterObjectInstances(function(instance){instance.preInitialize();});},initializeAllObjects:function(){this.iterObjectInstances(function(instance){instance.initialize();});},initializeInstances:function(){this.iterObjectInstances(function(instance){instance.initialize();});},updateBounds:function(){this.bounds.reset();this.iterObjectInstances(function(instance){instance.updateBounds();this.bounds.addRange(instance.bounds);},this);},shiftTimestampsForward:function(amount){this.iterObjectInstances(function(instance){instance.shiftTimestampsForward(amount);});},addCategoriesToDict:function(categoriesDict){this.iterObjectInstances(function(instance){categoriesDict[instance.category]=true;});}};return{ObjectCollection:ObjectCollection};});'use strict';tr.exportTo('tr.model',function(){function AsyncSlice(category,title,colorId,start,args,duration,opt_isTopLevel,opt_cpuStart,opt_cpuDuration,opt_argsStripped){tr.model.TimedEvent.call(this,start);this.category=category||'';this.originalTitle=title;this.title=title;this.colorId=colorId;this.args=args;this.startStackFrame=undefined;this.endStackFrame=undefined;this.didNotFinish=false;this.important=false;this.subSlices=[];this.parentContainer=undefined;this.id=undefined;this.startThread=undefined;this.endThread=undefined;this.cpuStart=undefined;this.cpuDuration=undefined;this.argsStripped=false;this.startStackFrame=undefined;this.endStackFrame=undefined;this.duration=duration;this.isTopLevel=(opt_isTopLevel===true);if(opt_cpuStart!==undefined)
+deletedInstance.baseTypeName+' existed.');}},autoDeleteObjects:function(maxTimestamp){tr.b.iterItems(this.instanceMapsByScopedId_,function(scope,imapById){tr.b.iterItems(imapById,function(id,i2imap){var lastInstance=i2imap.lastInstance;if(lastInstance.deletionTs!=Number.MAX_VALUE)
+return;i2imap.idWasDeleted(lastInstance.category,lastInstance.name,maxTimestamp);lastInstance.deletionTsWasExplicit=false;});});},getObjectInstanceAt:function(scopedId,ts){var instanceMap;if(scopedId.scope in this.instanceMapsByScopedId_)
+instanceMap=this.instanceMapsByScopedId_[scopedId.scope][scopedId.id];if(!instanceMap)
+return undefined;return instanceMap.getInstanceAt(ts);},getSnapshotAt:function(scopedId,ts){var instance=this.getObjectInstanceAt(scopedId,ts);if(!instance)
+return undefined;return instance.getSnapshotAt(ts);},iterObjectInstances:function(iter,opt_this){opt_this=opt_this||this;tr.b.iterItems(this.instanceMapsByScopedId_,function(scope,imapById){tr.b.iterItems(imapById,function(id,i2imap){i2imap.instances.forEach(iter,opt_this);});});},getAllObjectInstances:function(){var instances=[];this.iterObjectInstances(function(i){instances.push(i);});return instances;},getAllInstancesNamed:function(name){return this.instancesByTypeName_[name];},getAllInstancesByTypeName:function(){return this.instancesByTypeName_;},preInitializeAllObjects:function(){this.iterObjectInstances(function(instance){instance.preInitialize();});},initializeAllObjects:function(){this.iterObjectInstances(function(instance){instance.initialize();});},initializeInstances:function(){this.iterObjectInstances(function(instance){instance.initialize();});},updateBounds:function(){this.bounds.reset();this.iterObjectInstances(function(instance){instance.updateBounds();this.bounds.addRange(instance.bounds);},this);},shiftTimestampsForward:function(amount){this.iterObjectInstances(function(instance){instance.shiftTimestampsForward(amount);});},addCategoriesToDict:function(categoriesDict){this.iterObjectInstances(function(instance){categoriesDict[instance.category]=true;});}};return{ObjectCollection:ObjectCollection};});'use strict';tr.exportTo('tr.model',function(){function AsyncSlice(category,title,colorId,start,args,duration,opt_isTopLevel,opt_cpuStart,opt_cpuDuration,opt_argsStripped){tr.model.TimedEvent.call(this,start);this.category=category||'';this.originalTitle=title;this.title=title;this.colorId=colorId;this.args=args;this.startStackFrame=undefined;this.endStackFrame=undefined;this.didNotFinish=false;this.important=false;this.subSlices=[];this.parentContainer_=undefined;this.id=undefined;this.startThread=undefined;this.endThread=undefined;this.cpuStart=undefined;this.cpuDuration=undefined;this.argsStripped=false;this.startStackFrame=undefined;this.endStackFrame=undefined;this.duration=duration;this.isTopLevel=(opt_isTopLevel===true);if(opt_cpuStart!==undefined)
 this.cpuStart=opt_cpuStart;if(opt_cpuDuration!==undefined)
 this.cpuDuration=opt_cpuDuration;if(opt_argsStripped!==undefined)
-this.argsStripped=opt_argsStripped;};AsyncSlice.prototype={__proto__:tr.model.TimedEvent.prototype,get analysisTypeName(){return this.title;},get viewSubGroupTitle(){return this.title;},get userFriendlyName(){return'Async slice '+this.title+' at '+
-tr.b.u.TimeStamp.format(this.start);},get stableId(){var parentAsyncSliceGroup=this.parentContainer.asyncSliceGroup;return parentAsyncSliceGroup.stableId+'.'+
+this.argsStripped=opt_argsStripped;};AsyncSlice.prototype={__proto__:tr.model.TimedEvent.prototype,get analysisTypeName(){return this.title;},get parentContainer(){return this.parentContainer_;},set parentContainer(parentContainer){this.parentContainer_=parentContainer;for(var i=0;i<this.subSlices.length;i++){var subSlice=this.subSlices[i];if(subSlice.parentContainer===undefined)
+subSlice.parentContainer=parentContainer;}},get viewSubGroupTitle(){return this.title;},get userFriendlyName(){return'Async slice '+this.title+' at '+
+tr.v.Unit.byName.timeStampInMs.format(this.start);},get stableId(){var parentAsyncSliceGroup=this.parentContainer.asyncSliceGroup;return parentAsyncSliceGroup.stableId+'.'+
 parentAsyncSliceGroup.slices.indexOf(this);},findDescendentSlice:function(targetTitle){if(!this.subSlices)
 return undefined;for(var i=0;i<this.subSlices.length;i++){if(this.subSlices[i].title==targetTitle)
 return this.subSlices[i];var slice=this.subSlices[i].findDescendentSlice(targetTitle);if(slice)return slice;}
@@ -3155,12 +3366,37 @@
 this.flowEventsById_[fe.id].push(fe);}},this);}
 ModelIndices.prototype={addEventWithId:function(id,event){if(!this.flowEventsById_.hasOwnProperty(id)){this.flowEventsById_[id]=new Array();}
 this.flowEventsById_[id].push(event);},getFlowEventsWithId:function(id){if(!this.flowEventsById_.hasOwnProperty(id))
-return[];return this.flowEventsById_[id];}};return{ModelIndices:ModelIndices};});'use strict';tr.exportTo('tr.model',function(){var DISCOUNTED_ALLOCATOR_NAMES=['winheap','malloc'];var TRACING_OVERHEAD_PATH=['allocated_objects','tracing_overhead'];var SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME;var RESIDENT_SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.RESIDENT_SIZE_ATTRIBUTE_NAME;function getSizeAttrValue(dump,sizeAttrName,opt_model){var sizeAttr=dump.getValidSizeAttributeOrUndefined(sizeAttrName,opt_model);if(sizeAttr===undefined)
-return 0;return sizeAttr.value;}
-function ProcessMemoryDump(globalMemoryDump,process,start){tr.model.ContainerMemoryDump.call(this,start);this.process=process;this.globalMemoryDump=globalMemoryDump;this.totals=undefined;this.vmRegions_=undefined;this.heapDumps=undefined;this.tracingOverheadOwnershipSetUp_=false;this.tracingOverheadDiscountedFromVmRegions_=false;};ProcessMemoryDump.prototype={__proto__:tr.model.ContainerMemoryDump.prototype,get userFriendlyName(){return'Process memory dump at '+
-tr.b.u.TimeStamp.format(this.start);},get containerName(){return this.process.userFriendlyName;},get processMemoryDumps(){var dumps={};dumps[this.process.pid]=this;return dumps;},get vmRegions(){return this.vmRegions_;},set vmRegions(vmRegions){this.vmRegions_=vmRegions;},get hasOwnVmRegions(){return this.vmRegions_!==undefined;},getMostRecentTotalVmRegionStat:function(statName){if(this.mostRecentVmRegions===undefined)
-return undefined;var total=0;this.mostRecentVmRegions.forEach(function(vmRegion){var statValue=vmRegion.byteStats[statName];if(statValue===undefined)
-return;total+=statValue;});return total;},setUpTracingOverheadOwnership:function(opt_model){if(this.tracingOverheadOwnershipSetUp_)
+return[];return this.flowEventsById_[id];}};return{ModelIndices:ModelIndices};});'use strict';tr.exportTo('tr.model',function(){function ModelStats(){this.traceEventCountsByKey_=new Map();this.allTraceEventStats_=[];this.traceEventStatsInTimeIntervals_=new Map();this.allTraceEventStatsInTimeIntervals_=[];this.hasEventSizesinBytes_=false;}
+ModelStats.prototype={TIME_INTERVAL_SIZE_IN_MS:100,willProcessBasicTraceEvent:function(phase,category,title,ts,opt_eventSizeinBytes){var key=phase+'/'+category+'/'+title;var eventStats=this.traceEventCountsByKey_.get(key);if(eventStats===undefined){eventStats={phase:phase,category:category,title:title,numEvents:0,totalEventSizeinBytes:0};this.traceEventCountsByKey_.set(key,eventStats);this.allTraceEventStats_.push(eventStats);}
+eventStats.numEvents++;var timeIntervalKey=Math.floor(tr.v.Unit.timestampFromUs(ts)/this.TIME_INTERVAL_SIZE_IN_MS);var eventStatsByTimeInverval=this.traceEventStatsInTimeIntervals_.get(timeIntervalKey);if(eventStatsByTimeInverval===undefined){eventStatsByTimeInverval={timeInterval:timeIntervalKey,numEvents:0,totalEventSizeinBytes:0};this.traceEventStatsInTimeIntervals_.set(timeIntervalKey,eventStatsByTimeInverval);this.allTraceEventStatsInTimeIntervals_.push(eventStatsByTimeInverval);}
+eventStatsByTimeInverval.numEvents++;if(opt_eventSizeinBytes!==undefined){this.hasEventSizesinBytes_=true;eventStats.totalEventSizeinBytes+=opt_eventSizeinBytes;eventStatsByTimeInverval.totalEventSizeinBytes+=opt_eventSizeinBytes;}},get allTraceEventStats(){return this.allTraceEventStats_;},get allTraceEventStatsInTimeIntervals(){return this.allTraceEventStatsInTimeIntervals_;},get hasEventSizesinBytes(){return this.hasEventSizesinBytes_;}};return{ModelStats:ModelStats};});'use strict';tr.exportTo('tr.model',function(){function VMRegion(startAddress,sizeInBytes,protectionFlags,mappedFile,byteStats){this.startAddress=startAddress;this.sizeInBytes=sizeInBytes;this.protectionFlags=protectionFlags;this.mappedFile=mappedFile||'';this.byteStats=byteStats||{};};VMRegion.PROTECTION_FLAG_READ=4;VMRegion.PROTECTION_FLAG_WRITE=2;VMRegion.PROTECTION_FLAG_EXECUTE=1;VMRegion.PROTECTION_FLAG_MAYSHARE=128;VMRegion.prototype={get uniqueIdWithinProcess(){return this.mappedFile+'#'+this.startAddress;},get protectionFlagsToString(){if(this.protectionFlags===undefined)
+return undefined;return((this.protectionFlags&VMRegion.PROTECTION_FLAG_READ?'r':'-')+
+(this.protectionFlags&VMRegion.PROTECTION_FLAG_WRITE?'w':'-')+
+(this.protectionFlags&VMRegion.PROTECTION_FLAG_EXECUTE?'x':'-')+
+(this.protectionFlags&VMRegion.PROTECTION_FLAG_MAYSHARE?'s':'p'));}};VMRegion.fromDict=function(dict){return new VMRegion(dict.startAddress,dict.sizeInBytes,dict.protectionFlags,dict.mappedFile,dict.byteStats);};function VMRegionClassificationNode(opt_rule){this.rule_=opt_rule||VMRegionClassificationNode.CLASSIFICATION_RULES;this.hasRegions=false;this.sizeInBytes=undefined;this.byteStats={};this.children_=undefined;this.regions_=[];}
+VMRegionClassificationNode.CLASSIFICATION_RULES={name:'Total',children:[{name:'Android',file:/^\/dev\/ashmem(?!\/libc malloc)/,children:[{name:'Java runtime',file:/^\/dev\/ashmem\/dalvik-/,children:[{name:'Spaces',file:/\/dalvik-(alloc|main|large object|non moving|zygote) space/,children:[{name:'Normal',file:/\/dalvik-(alloc|main)/},{name:'Large',file:/\/dalvik-large object/},{name:'Zygote',file:/\/dalvik-zygote/},{name:'Non-moving',file:/\/dalvik-non moving/}]},{name:'Linear Alloc',file:/\/dalvik-LinearAlloc/},{name:'Indirect Reference Table',file:/\/dalvik-indirect.ref/},{name:'Cache',file:/\/dalvik-jit-code-cache/},{name:'Accounting'}]},{name:'Cursor',file:/\/CursorWindow/},{name:'Ashmem'}]},{name:'Native heap',file:/^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|(\[discounted tracing overhead\])|$)/},{name:'Stack',file:/^\[stack/},{name:'Files',file:/\.((((jar)|(apk)|(ttf)|(odex)|(oat)|(art))$)|(dex)|(so))/,children:[{name:'so',file:/\.so/},{name:'jar',file:/\.jar$/},{name:'apk',file:/\.apk$/},{name:'ttf',file:/\.ttf$/},{name:'dex',file:/\.((dex)|(odex$))/},{name:'oat',file:/\.oat$/},{name:'art',file:/\.art$/}]},{name:'Devices',file:/(^\/dev\/)|(anon_inode:dmabuf)/,children:[{name:'GPU',file:/\/((nv)|(mali)|(kgsl))/},{name:'DMA',file:/anon_inode:dmabuf/}]}]};VMRegionClassificationNode.OTHER_RULE={name:'Other'};VMRegionClassificationNode.fromRegions=function(regions,opt_rules){var tree=new VMRegionClassificationNode(opt_rules);tree.regions_=regions;for(var i=0;i<regions.length;i++)
+tree.addStatsFromRegion_(regions[i]);return tree;};VMRegionClassificationNode.prototype={get title(){return this.rule_.name;},get children(){if(this.isLeafNode)
+return undefined;if(this.children_===undefined)
+this.buildTree_();return this.children_;},get regions(){if(!this.isLeafNode){return undefined;}
+return this.regions_;},get allRegionsForTesting(){if(this.regions_!==undefined){if(this.children_!==undefined){throw new Error('Internal error: a VM region classification node '+'cannot have both regions and children');}
+return this.regions_;}
+var regions=[];this.children_.forEach(function(childNode){regions=regions.concat(childNode.allRegionsForTesting);});return regions;},get isLeafNode(){var children=this.rule_.children;return children===undefined||children.length===0;},addRegion:function(region){this.addRegionRecursively_(region,true);},someRegion:function(fn,opt_this){if(this.regions_!==undefined){return this.regions_.some(fn,opt_this);}
+return this.children_.some(function(childNode){return childNode.someRegion(fn,opt_this);});},addRegionRecursively_:function(region,addStatsToThisNode){if(addStatsToThisNode)
+this.addStatsFromRegion_(region);if(this.regions_!==undefined){if(this.children_!==undefined){throw new Error('Internal error: a VM region classification node '+'cannot have both regions and children');}
+this.regions_.push(region);return;}
+function regionRowMatchesChildNide(child){var fileRegExp=child.rule_.file;if(fileRegExp===undefined)
+return true;return fileRegExp.test(region.mappedFile);}
+var matchedChild=tr.b.findFirstInArray(this.children_,regionRowMatchesChildNide);if(matchedChild===undefined){if(this.children_.length!==this.rule_.children.length)
+throw new Error('Internal error');matchedChild=new VMRegionClassificationNode(VMRegionClassificationNode.OTHER_RULE);this.children_.push(matchedChild);}
+matchedChild.addRegionRecursively_(region,true);},buildTree_:function(){var cachedRegions=this.regions_;this.regions_=undefined;this.buildChildNodesRecursively_();for(var i=0;i<cachedRegions.length;i++){this.addRegionRecursively_(cachedRegions[i],false);}},buildChildNodesRecursively_:function(){if(this.children_!==undefined){throw new Error('Internal error: Classification node already has children');}
+if(this.regions_!==undefined&&this.regions_.length!==0){throw new Error('Internal error: Classification node should have no regions');}
+if(this.isLeafNode)
+return;this.regions_=undefined;this.children_=this.rule_.children.map(function(childRule){var child=new VMRegionClassificationNode(childRule);child.buildChildNodesRecursively_();return child;});},addStatsFromRegion_:function(region){this.hasRegions=true;var regionSizeInBytes=region.sizeInBytes;if(regionSizeInBytes!==undefined)
+this.sizeInBytes=(this.sizeInBytes||0)+regionSizeInBytes;var thisByteStats=this.byteStats;var regionByteStats=region.byteStats;for(var byteStatName in regionByteStats){var regionByteStatValue=regionByteStats[byteStatName];if(regionByteStatValue===undefined)
+continue;thisByteStats[byteStatName]=(thisByteStats[byteStatName]||0)+regionByteStatValue;}}};return{VMRegion:VMRegion,VMRegionClassificationNode:VMRegionClassificationNode};});'use strict';tr.exportTo('tr.model',function(){var DISCOUNTED_ALLOCATOR_NAMES=['winheap','malloc'];var TRACING_OVERHEAD_PATH=['allocated_objects','tracing_overhead'];var SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME;var RESIDENT_SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.RESIDENT_SIZE_NUMERIC_NAME;function getSizeNumericValue(dump,sizeNumericName){var sizeNumeric=dump.numerics[sizeNumericName];if(sizeNumeric===undefined)
+return 0;return sizeNumeric.value;}
+function ProcessMemoryDump(globalMemoryDump,process,start){tr.model.ContainerMemoryDump.call(this,start);this.process=process;this.globalMemoryDump=globalMemoryDump;this.totals=undefined;this.vmRegions=undefined;this.heapDumps=undefined;this.tracingOverheadOwnershipSetUp_=false;this.tracingOverheadDiscountedFromVmRegions_=false;};ProcessMemoryDump.prototype={__proto__:tr.model.ContainerMemoryDump.prototype,get userFriendlyName(){return'Process memory dump at '+
+tr.v.Unit.byName.timeStampInMs.format(this.start);},get containerName(){return this.process.userFriendlyName;},get processMemoryDumps(){var dumps={};dumps[this.process.pid]=this;return dumps;},get hasOwnVmRegions(){return this.vmRegions!==undefined;},setUpTracingOverheadOwnership:function(opt_model){if(this.tracingOverheadOwnershipSetUp_)
 return;this.tracingOverheadOwnershipSetUp_=true;var tracingDump=this.getMemoryAllocatorDumpByFullName('tracing');if(tracingDump===undefined||tracingDump.owns!==undefined){return;}
 if(tracingDump.owns!==undefined)
 return;var hasDiscountedFromAllocatorDumps=DISCOUNTED_ALLOCATOR_NAMES.some(function(allocatorName){var allocatorDump=this.getMemoryAllocatorDumpByFullName(allocatorName);if(allocatorDump===undefined)
@@ -3170,23 +3406,16 @@
 for(;nextPathIndex<TRACING_OVERHEAD_PATH.length;nextPathIndex++){var childFullName=currentFullName+'/'+
 TRACING_OVERHEAD_PATH[nextPathIndex];var childDump=new tr.model.MemoryAllocatorDump(currentDump.containerMemoryDump,childFullName);childDump.parent=currentDump;currentDump.children.push(childDump);currentFullName=childFullName;currentDump=childDump;}
 var ownershipLink=new tr.model.MemoryAllocatorDumpLink(tracingDump,currentDump);tracingDump.owns=ownershipLink;currentDump.ownedBy.push(ownershipLink);return true;},this);if(hasDiscountedFromAllocatorDumps)
-this.memoryAllocatorDumps=this.memoryAllocatorDumps;},discountTracingOverheadFromVmRegions:function(opt_model){if(this.tracingOverheadDiscountedFromVmRegions_)
+this.forceRebuildingMemoryAllocatorDumpByFullNameIndex();},discountTracingOverheadFromVmRegions:function(opt_model){if(this.tracingOverheadDiscountedFromVmRegions_)
 return;this.tracingOverheadDiscountedFromVmRegions_=true;var tracingDump=this.getMemoryAllocatorDumpByFullName('tracing');if(tracingDump===undefined)
-return;var discountedSize=getSizeAttrValue(tracingDump,SIZE_ATTRIBUTE_NAME);var discountedResidentSize=getSizeAttrValue(tracingDump,RESIDENT_SIZE_ATTRIBUTE_NAME);if(discountedSize<=0&&discountedResidentSize<=0)
+return;var discountedSize=getSizeNumericValue(tracingDump,SIZE_NUMERIC_NAME);var discountedResidentSize=getSizeNumericValue(tracingDump,RESIDENT_SIZE_NUMERIC_NAME);if(discountedSize<=0&&discountedResidentSize<=0)
 return;if(this.totals!==undefined){if(this.totals.residentBytes!==undefined)
 this.totals.residentBytes-=discountedResidentSize;if(this.totals.peakResidentBytes!==undefined)
 this.totals.peakResidentBytes-=discountedResidentSize;}
-if(this.vmRegions_!==undefined){var hasSizeInBytes=false;var hasPrivateDirtyResident=false;var hasProportionalResident=false;for(var i=0;i<this.vmRegions_.length;i++){var vmRegion=this.vmRegions_[i];if(vmRegion.sizeInBytes!==undefined)
-hasSizeInBytes=true;var byteStats=vmRegion.byteStats;if(byteStats.privateDirtyResident!==undefined)
-hasPrivateDirtyResident=true;if(byteStats.proportionalResident!==undefined)
-hasProportionalResident=true;if(hasSizeInBytes&&hasPrivateDirtyResident&&hasProportionalResident){break;}}
-if((hasSizeInBytes&&discountedSize>0)||((hasPrivateDirtyResident||hasProportionalResident)&&discountedResidentSize>0)){this.vmRegions_.push(VMRegion.fromDict({mappedFile:'[discounted tracing overhead]',sizeInBytes:hasSizeInBytes?-discountedSize:undefined,byteStats:{privateDirtyResident:hasPrivateDirtyResident?-discountedResidentSize:undefined,proportionalResident:hasProportionalResident?-discountedResidentSize:undefined}}));}}}};ProcessMemoryDump.hookUpMostRecentVmRegionsLinks=function(processDumps){var mostRecentVmRegions=undefined;processDumps.forEach(function(processDump){if(processDump.vmRegions_!==undefined)
-mostRecentVmRegions=processDump.vmRegions_;processDump.mostRecentVmRegions=mostRecentVmRegions;});};function VMRegion(startAddress,sizeInBytes,protectionFlags,mappedFile,byteStats){this.startAddress=startAddress;this.sizeInBytes=sizeInBytes;this.protectionFlags=protectionFlags;this.mappedFile=mappedFile;this.byteStats=byteStats;};VMRegion.PROTECTION_FLAG_READ=4;VMRegion.PROTECTION_FLAG_WRITE=2;VMRegion.PROTECTION_FLAG_EXECUTE=1;VMRegion.prototype={get protectionFlagsToString(){if(this.protectionFlags===undefined)
-return undefined;return((this.protectionFlags&VMRegion.PROTECTION_FLAG_READ?'r':'-')+
-(this.protectionFlags&VMRegion.PROTECTION_FLAG_WRITE?'w':'-')+
-(this.protectionFlags&VMRegion.PROTECTION_FLAG_EXECUTE?'x':'-'));}};VMRegion.fromDict=function(dict){return new VMRegion(dict.startAddress,dict.sizeInBytes,dict.protectionFlags,dict.mappedFile,VMRegionByteStats.fromDict(dict.byteStats));};function VMRegionByteStats(privateCleanResident,privateDirtyResident,sharedCleanResident,sharedDirtyResident,proportionalResident,swapped){this.privateCleanResident=privateCleanResident;this.privateDirtyResident=privateDirtyResident;this.sharedCleanResident=sharedCleanResident;this.sharedDirtyResident=sharedDirtyResident;this.proportionalResident=proportionalResident;this.swapped=swapped;}
-VMRegionByteStats.fromDict=function(dict){return new VMRegionByteStats(dict.privateCleanResident,dict.privateDirtyResident,dict.sharedCleanResident,dict.sharedDirtyResident,dict.proportionalResident,dict.swapped);}
-tr.model.EventRegistry.register(ProcessMemoryDump,{name:'processMemoryDump',pluralName:'processMemoryDumps',singleViewElementName:'tr-ui-a-container-memory-dump-sub-view',multiViewElementName:'tr-ui-a-container-memory-dump-sub-view'});return{ProcessMemoryDump:ProcessMemoryDump,VMRegion:VMRegion,VMRegionByteStats:VMRegionByteStats};});'use strict';tr.exportTo('tr.model',function(){var ProcessBase=tr.model.ProcessBase;var ProcessInstantEvent=tr.model.ProcessInstantEvent;var Frame=tr.model.Frame;var ProcessMemoryDump=tr.model.ProcessMemoryDump;function Process(model,pid){if(model===undefined)
+if(this.vmRegions!==undefined){var hasSizeInBytes=this.vmRegions.sizeInBytes!==undefined;var hasPrivateDirtyResident=this.vmRegions.byteStats.privateDirtyResident!==undefined;var hasProportionalResident=this.vmRegions.byteStats.proportionalResident!==undefined;if((hasSizeInBytes&&discountedSize>0)||((hasPrivateDirtyResident||hasProportionalResident)&&discountedResidentSize>0)){var byteStats={};if(hasPrivateDirtyResident)
+byteStats.privateDirtyResident=-discountedResidentSize;if(hasProportionalResident)
+byteStats.proportionalResident=-discountedResidentSize;this.vmRegions.addRegion(tr.model.VMRegion.fromDict({mappedFile:'[discounted tracing overhead]',sizeInBytes:hasSizeInBytes?-discountedSize:undefined,byteStats:byteStats}));}}}};ProcessMemoryDump.hookUpMostRecentVmRegionsLinks=function(processDumps){var mostRecentVmRegions=undefined;processDumps.forEach(function(processDump){if(processDump.vmRegions!==undefined)
+mostRecentVmRegions=processDump.vmRegions;processDump.mostRecentVmRegions=mostRecentVmRegions;});};tr.model.EventRegistry.register(ProcessMemoryDump,{name:'processMemoryDump',pluralName:'processMemoryDumps',singleViewElementName:'tr-ui-a-container-memory-dump-sub-view',multiViewElementName:'tr-ui-a-container-memory-dump-sub-view'});return{ProcessMemoryDump:ProcessMemoryDump};});'use strict';tr.exportTo('tr.model',function(){var ProcessBase=tr.model.ProcessBase;var ProcessInstantEvent=tr.model.ProcessInstantEvent;var Frame=tr.model.Frame;var ProcessMemoryDump=tr.model.ProcessMemoryDump;function Process(model,pid){if(model===undefined)
 throw new Error('model must be provided');if(pid===undefined)
 throw new Error('pid must be provided');tr.model.ProcessBase.call(this,model);this.pid=pid;this.name=undefined;this.labels=[];this.instantEvents=[];this.memoryDumps=[];this.frames=[];this.activities=[];};Process.compare=function(x,y){var tmp=tr.model.ProcessBase.compare(x,y);if(tmp)
 return tmp;tmp=tr.b.comparePossiblyUndefinedValues(x.name,y.name,function(x,y){return x.localeCompare(y);});if(tmp)
@@ -3194,7 +3423,7 @@
 return tmp;return x.pid-y.pid;};Process.prototype={__proto__:tr.model.ProcessBase.prototype,get stableId(){return this.pid;},compareTo:function(that){return Process.compare(this,that);},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){ProcessBase.prototype.iterateAllEventsInThisContainer.call(this,eventTypePredicate,callback,opt_this);if(eventTypePredicate.call(opt_this,ProcessInstantEvent))
 this.instantEvents.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,Frame))
 this.frames.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,ProcessMemoryDump))
-this.memoryDumps.forEach(callback,opt_this);},pushInstantEvent:function(instantEvent){this.instantEvents.push(instantEvent);},addLabelIfNeeded:function(labelName){for(var i=0;i<this.labels.length;i++){if(this.labels[i]===labelName)
+this.memoryDumps.forEach(callback,opt_this);},addLabelIfNeeded:function(labelName){for(var i=0;i<this.labels.length;i++){if(this.labels[i]===labelName)
 return;}
 this.labels.push(labelName);},get userFriendlyName(){var res;if(this.name)
 res=this.name+' (pid '+this.pid+')';else
@@ -3202,15 +3431,15 @@
 res+=': '+this.labels.join(', ');return res;},get userFriendlyDetails(){if(this.name)
 return this.name+' (pid '+this.pid+')';return'pid: '+this.pid;},getSettingsKey:function(){if(!this.name)
 return undefined;if(!this.labels.length)
-return'processes.'+this.name;return'processes.'+this.name+'.'+this.labels.join('.');},shiftTimestampsForward:function(amount){for(var id in this.instantEvents)
-this.instantEvents[id].start+=amount;for(var i=0;i<this.frames.length;i++)
+return'processes.'+this.name;return'processes.'+this.name+'.'+this.labels.join('.');},shiftTimestampsForward:function(amount){for(var i=0;i<this.instantEvents.length;i++)
+this.instantEvents[i].start+=amount;for(var i=0;i<this.frames.length;i++)
 this.frames[i].shiftTimestampsForward(amount);for(var i=0;i<this.memoryDumps.length;i++)
 this.memoryDumps[i].shiftTimestampsForward(amount);for(var i=0;i<this.activities.length;i++)
 this.activities[i].shiftTimestampsForward(amount);tr.model.ProcessBase.prototype.shiftTimestampsForward.apply(this,arguments);},updateBounds:function(){tr.model.ProcessBase.prototype.updateBounds.apply(this);for(var i=0;i<this.frames.length;i++)
 this.frames[i].addBoundsToRange(this.bounds);for(var i=0;i<this.memoryDumps.length;i++)
 this.memoryDumps[i].addBoundsToRange(this.bounds);for(var i=0;i<this.activities.length;i++)
 this.activities[i].addBoundsToRange(this.bounds);},sortMemoryDumps:function(){this.memoryDumps.sort(function(x,y){return x.start-y.start;});tr.model.ProcessMemoryDump.hookUpMostRecentVmRegionsLinks(this.memoryDumps);}};return{Process:Process};});'use strict';tr.exportTo('tr.model',function(){function Sample(cpu,thread,title,start,leafStackFrame,opt_weight,opt_args){tr.model.TimedEvent.call(this,start);this.title=title;this.cpu=cpu;this.thread=thread;this.leafStackFrame=leafStackFrame;this.weight=opt_weight;this.args=opt_args||{};}
-Sample.prototype={__proto__:tr.model.TimedEvent.prototype,get colorId(){return this.leafStackFrame.colorId;},get stackTrace(){return this.leafStackFrame.stackTrace;},getUserFriendlyStackTrace:function(){return this.leafStackFrame.getUserFriendlyStackTrace();},get userFriendlyName(){return'Sample at '+tr.b.u.TimeStamp.format(this.start);}};tr.model.EventRegistry.register(Sample,{name:'sample',pluralName:'samples',singleViewElementName:'tr-ui-a-single-sample-sub-view',multiViewElementName:'tr-ui-a-multi-sample-sub-view'});return{Sample:Sample};});'use strict';tr.exportTo('tr.model',function(){function StackFrame(parentFrame,id,title,colorId,opt_sourceInfo){if(id===undefined)
+Sample.prototype={__proto__:tr.model.TimedEvent.prototype,get colorId(){return this.leafStackFrame.colorId;},get stackTrace(){return this.leafStackFrame.stackTrace;},getUserFriendlyStackTrace:function(){return this.leafStackFrame.getUserFriendlyStackTrace();},get userFriendlyName(){return'Sample at '+tr.v.Unit.byName.timeStampInMs.format(this.start);}};tr.model.EventRegistry.register(Sample,{name:'sample',pluralName:'samples',singleViewElementName:'tr-ui-a-single-sample-sub-view',multiViewElementName:'tr-ui-a-multi-sample-sub-view'});return{Sample:Sample};});'use strict';tr.exportTo('tr.model',function(){function StackFrame(parentFrame,id,title,colorId,opt_sourceInfo){if(id===undefined)
 throw new Error('id must be given');this.parentFrame_=parentFrame;this.id=id;this.title_=title;this.colorId=colorId;this.children=[];this.sourceInfo_=opt_sourceInfo;if(this.parentFrame_)
 this.parentFrame_.addChild(this);}
 StackFrame.prototype={get parentFrame(){return this.parentFrame_;},get title(){if(this.sourceInfo_){var src=this.sourceInfo_.toString();return this.title_+(src===''?'':' '+src);}
@@ -3221,7 +3450,9 @@
 this.parentFrame_.addChild(this);},addChild:function(child){this.children.push(child);},removeChild:function(child){var i=this.children.indexOf(child.id);if(i==-1)
 throw new Error('omg');this.children.splice(i,1);},removeAllChildren:function(){for(var i=0;i<this.children.length;i++)
 this.children[i].parentFrame_=undefined;this.children.splice(0,this.children.length);},get stackTrace(){var stack=[];var cur=this;while(cur){stack.push(cur);cur=cur.parentFrame;}
-return stack;},getUserFriendlyStackTrace:function(){return this.stackTrace.map(function(x){return x.title;});}};return{StackFrame:StackFrame};});'use strict';tr.exportTo('tr.ui.b',function(){function decorate(source,constr){var elements;if(typeof source=='string')
+return stack;},getUserFriendlyStackTrace:function(){return this.stackTrace.map(function(x){return x.title;});}};return{StackFrame:StackFrame};});'use strict';tr.exportTo('tr.model.um',function(){function UserModel(parentModel){tr.model.EventContainer.call(this);this.parentModel_=parentModel;this.expectations_=new tr.model.EventSet();}
+UserModel.prototype={__proto__:tr.model.EventContainer.prototype,get stableId(){return'UserModel';},get parentModel(){return this.parentModel_;},sortExpectations:function(){Array.prototype.sort.call(this.expectations_,function(x,y){return x.start-y.start;});},get expectations(){return this.expectations_;},shiftTimestampsForward:function(amount){},addCategoriesToDict:function(categoriesDict){},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,tr.model.um.UserExpectation))
+this.expectations.forEach(callback,opt_this);},iterateAllChildEventContainers:function(callback,opt_this){},updateBounds:function(){this.bounds.reset();this.expectations.forEach(function(expectation){expectation.addBoundsToRange(this.bounds);},this);}};return{UserModel:UserModel};});'use strict';tr.exportTo('tr.ui.b',function(){function decorate(source,constr){var elements;if(typeof source=='string')
 elements=tr.doc.querySelectorAll(source);else
 elements=[source];for(var i=0,el;el=elements[i];i++){if(!(el instanceof constr))
 constr.decorate(el);}}
@@ -3238,19 +3469,7 @@
 function elementIsChildOf(el,potentialParent){if(el==potentialParent)
 return false;var cur=el;while(cur.parentNode){if(cur==potentialParent)
 return true;cur=cur.parentNode;}
-return false;};return{decorate:decorate,define:define,elementIsChildOf:elementIsChildOf};});!function(t,n){if("object"==typeof exports&&"object"==typeof module)module.exports=n();else if("function"==typeof define&&define.amd)define(n);else{var r=n();for(var a in r)("object"==typeof exports?exports:t)[a]=r[a]}}(this,function(){return function(t){function n(a){if(r[a])return r[a].exports;var e=r[a]={exports:{},id:a,loaded:!1};return t[a].call(e.exports,e,e.exports,n),e.loaded=!0,e.exports}var r={};return n.m=t,n.c=r,n.p="",n(0)}([function(t,n,r){n.glMatrix=r(1),n.mat2=r(2),n.mat2d=r(3),n.mat3=r(4),n.mat4=r(5),n.quat=r(6),n.vec2=r(9),n.vec3=r(7),n.vec4=r(8)},function(t,n,r){var a={};a.EPSILON=1e-6,a.ARRAY_TYPE="undefined"!=typeof Float32Array?Float32Array:Array,a.RANDOM=Math.random,a.setMatrixArrayType=function(t){GLMAT_ARRAY_TYPE=t};var e=Math.PI/180;a.toRadian=function(t){return t*e},t.exports=a},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t},e.clone=function(t){var n=new a.ARRAY_TYPE(4);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1];t[1]=n[2],t[2]=r}else t[0]=n[0],t[1]=n[2],t[2]=n[1],t[3]=n[3];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*u-e*a;return o?(o=1/o,t[0]=u*o,t[1]=-a*o,t[2]=-e*o,t[3]=r*o,t):null},e.adjoint=function(t,n){var r=n[0];return t[0]=n[3],t[1]=-n[1],t[2]=-n[2],t[3]=r,t},e.determinant=function(t){return t[0]*t[3]-t[2]*t[1]},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1],f=r[2],s=r[3];return t[0]=a*i+u*c,t[1]=e*i+o*c,t[2]=a*f+u*s,t[3]=e*f+o*s,t},e.mul=e.multiply,e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+u*i,t[1]=e*c+o*i,t[2]=a*-i+u*c,t[3]=e*-i+o*c,t},e.scale=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1];return t[0]=a*i,t[1]=e*i,t[2]=u*c,t[3]=o*c,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=-r,t[3]=a,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=n[1],t},e.str=function(t){return"mat2("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2))},e.LDU=function(t,n,r,a){return t[2]=a[2]/a[0],r[0]=a[0],r[1]=a[1],r[3]=a[3]-t[2]*r[1],[t,n,r]},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(6);return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=0,t[5]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(6);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=0,t[5]=0,t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=r*u-a*e;return c?(c=1/c,t[0]=u*c,t[1]=-a*c,t[2]=-e*c,t[3]=r*c,t[4]=(e*i-u*o)*c,t[5]=(a*o-r*i)*c,t):null},e.determinant=function(t){return t[0]*t[3]-t[1]*t[2]},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1],h=r[2],M=r[3],l=r[4],v=r[5];return t[0]=a*f+u*s,t[1]=e*f+o*s,t[2]=a*h+u*M,t[3]=e*h+o*M,t[4]=a*l+u*v+i,t[5]=e*l+o*v+c,t},e.mul=e.multiply,e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=Math.sin(r),s=Math.cos(r);return t[0]=a*s+u*f,t[1]=e*s+o*f,t[2]=a*-f+u*s,t[3]=e*-f+o*s,t[4]=i,t[5]=c,t},e.scale=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1];return t[0]=a*f,t[1]=e*f,t[2]=u*s,t[3]=o*s,t[4]=i,t[5]=c,t},e.translate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=r[0],s=r[1];return t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=a*f+u*s+i,t[5]=e*f+o*s+c,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=-r,t[3]=a,t[4]=0,t[5]=0,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=n[1],t[4]=0,t[5]=0,t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=1,t[4]=n[0],t[5]=n[1],t},e.str=function(t){return"mat2d("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+1)},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(9);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromMat4=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[4],t[4]=n[5],t[5]=n[6],t[6]=n[8],t[7]=n[9],t[8]=n[10],t},e.clone=function(t){var n=new a.ARRAY_TYPE(9);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n[6]=t[6],n[7]=t[7],n[8]=t[8],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1],a=n[2],e=n[5];t[1]=n[3],t[2]=n[6],t[3]=r,t[5]=n[7],t[6]=a,t[7]=e}else t[0]=n[0],t[1]=n[3],t[2]=n[6],t[3]=n[1],t[4]=n[4],t[5]=n[7],t[6]=n[2],t[7]=n[5],t[8]=n[8];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=s*o-i*f,M=-s*u+i*c,l=f*u-o*c,v=r*h+a*M+e*l;return v?(v=1/v,t[0]=h*v,t[1]=(-s*a+e*f)*v,t[2]=(i*a-e*o)*v,t[3]=M*v,t[4]=(s*r-e*c)*v,t[5]=(-i*r+e*u)*v,t[6]=l*v,t[7]=(-f*r+a*c)*v,t[8]=(o*r-a*u)*v,t):null},e.adjoint=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8];return t[0]=o*s-i*f,t[1]=e*f-a*s,t[2]=a*i-e*o,t[3]=i*c-u*s,t[4]=r*s-e*c,t[5]=e*u-r*i,t[6]=u*f-o*c,t[7]=a*c-r*f,t[8]=r*o-a*u,t},e.determinant=function(t){var n=t[0],r=t[1],a=t[2],e=t[3],u=t[4],o=t[5],i=t[6],c=t[7],f=t[8];return n*(f*u-o*c)+r*(-f*e+o*i)+a*(c*e-u*i)},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=r[0],l=r[1],v=r[2],m=r[3],p=r[4],d=r[5],A=r[6],R=r[7],w=r[8];return t[0]=M*a+l*o+v*f,t[1]=M*e+l*i+v*s,t[2]=M*u+l*c+v*h,t[3]=m*a+p*o+d*f,t[4]=m*e+p*i+d*s,t[5]=m*u+p*c+d*h,t[6]=A*a+R*o+w*f,t[7]=A*e+R*i+w*s,t[8]=A*u+R*c+w*h,t},e.mul=e.multiply,e.translate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=r[0],l=r[1];return t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=i,t[5]=c,t[6]=M*a+l*o+f,t[7]=M*e+l*i+s,t[8]=M*u+l*c+h,t},e.rotate=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=Math.sin(r),l=Math.cos(r);return t[0]=l*a+M*o,t[1]=l*e+M*i,t[2]=l*u+M*c,t[3]=l*o-M*a,t[4]=l*i-M*e,t[5]=l*c-M*u,t[6]=f,t[7]=s,t[8]=h,t},e.scale=function(t,n,r){var a=r[0],e=r[1];return t[0]=a*n[0],t[1]=a*n[1],t[2]=a*n[2],t[3]=e*n[3],t[4]=e*n[4],t[5]=e*n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=1,t[5]=0,t[6]=n[0],t[7]=n[1],t[8]=1,t},e.fromRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=0,t[3]=-r,t[4]=a,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=0,t[4]=n[1],t[5]=0,t[6]=0,t[7]=0,t[8]=1,t},e.fromMat2d=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=0,t[3]=n[2],t[4]=n[3],t[5]=0,t[6]=n[4],t[7]=n[5],t[8]=1,t},e.fromQuat=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r+r,i=a+a,c=e+e,f=r*o,s=a*o,h=a*i,M=e*o,l=e*i,v=e*c,m=u*o,p=u*i,d=u*c;return t[0]=1-h-v,t[3]=s-d,t[6]=M+p,t[1]=s+d,t[4]=1-f-v,t[7]=l-m,t[2]=M-p,t[5]=l+m,t[8]=1-f-h,t},e.normalFromMat4=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15],A=r*i-a*o,R=r*c-e*o,w=r*f-u*o,q=a*c-e*i,Y=a*f-u*i,g=e*f-u*c,y=s*m-h*v,x=s*p-M*v,P=s*d-l*v,E=h*p-M*m,T=h*d-l*m,b=M*d-l*p,D=A*b-R*T+w*E+q*P-Y*x+g*y;return D?(D=1/D,t[0]=(i*b-c*T+f*E)*D,t[1]=(c*P-o*b-f*x)*D,t[2]=(o*T-i*P+f*y)*D,t[3]=(e*T-a*b-u*E)*D,t[4]=(r*b-e*P+u*x)*D,t[5]=(a*P-r*T-u*y)*D,t[6]=(m*g-p*Y+d*q)*D,t[7]=(p*w-v*g-d*R)*D,t[8]=(v*Y-m*w+d*A)*D,t):null},e.str=function(t){return"mat3("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+", "+t[6]+", "+t[7]+", "+t[8]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+Math.pow(t[6],2)+Math.pow(t[7],2)+Math.pow(t[8],2))},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(16);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.clone=function(t){var n=new a.ARRAY_TYPE(16);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n[4]=t[4],n[5]=t[5],n[6]=t[6],n[7]=t[7],n[8]=t[8],n[9]=t[9],n[10]=t[10],n[11]=t[11],n[12]=t[12],n[13]=t[13],n[14]=t[14],n[15]=t[15],n},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[8]=n[8],t[9]=n[9],t[10]=n[10],t[11]=n[11],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15],t},e.identity=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.transpose=function(t,n){if(t===n){var r=n[1],a=n[2],e=n[3],u=n[6],o=n[7],i=n[11];t[1]=n[4],t[2]=n[8],t[3]=n[12],t[4]=r,t[6]=n[9],t[7]=n[13],t[8]=a,t[9]=u,t[11]=n[14],t[12]=e,t[13]=o,t[14]=i}else t[0]=n[0],t[1]=n[4],t[2]=n[8],t[3]=n[12],t[4]=n[1],t[5]=n[5],t[6]=n[9],t[7]=n[13],t[8]=n[2],t[9]=n[6],t[10]=n[10],t[11]=n[14],t[12]=n[3],t[13]=n[7],t[14]=n[11],t[15]=n[15];return t},e.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15],A=r*i-a*o,R=r*c-e*o,w=r*f-u*o,q=a*c-e*i,Y=a*f-u*i,g=e*f-u*c,y=s*m-h*v,x=s*p-M*v,P=s*d-l*v,E=h*p-M*m,T=h*d-l*m,b=M*d-l*p,D=A*b-R*T+w*E+q*P-Y*x+g*y;return D?(D=1/D,t[0]=(i*b-c*T+f*E)*D,t[1]=(e*T-a*b-u*E)*D,t[2]=(m*g-p*Y+d*q)*D,t[3]=(M*Y-h*g-l*q)*D,t[4]=(c*P-o*b-f*x)*D,t[5]=(r*b-e*P+u*x)*D,t[6]=(p*w-v*g-d*R)*D,t[7]=(s*g-M*w+l*R)*D,t[8]=(o*T-i*P+f*y)*D,t[9]=(a*P-r*T-u*y)*D,t[10]=(v*Y-m*w+d*A)*D,t[11]=(h*w-s*Y-l*A)*D,t[12]=(i*x-o*E-c*y)*D,t[13]=(r*E-a*x+e*y)*D,t[14]=(m*R-v*q-p*A)*D,t[15]=(s*q-h*R+M*A)*D,t):null},e.adjoint=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=n[4],i=n[5],c=n[6],f=n[7],s=n[8],h=n[9],M=n[10],l=n[11],v=n[12],m=n[13],p=n[14],d=n[15];return t[0]=i*(M*d-l*p)-h*(c*d-f*p)+m*(c*l-f*M),t[1]=-(a*(M*d-l*p)-h*(e*d-u*p)+m*(e*l-u*M)),t[2]=a*(c*d-f*p)-i*(e*d-u*p)+m*(e*f-u*c),t[3]=-(a*(c*l-f*M)-i*(e*l-u*M)+h*(e*f-u*c)),t[4]=-(o*(M*d-l*p)-s*(c*d-f*p)+v*(c*l-f*M)),t[5]=r*(M*d-l*p)-s*(e*d-u*p)+v*(e*l-u*M),t[6]=-(r*(c*d-f*p)-o*(e*d-u*p)+v*(e*f-u*c)),t[7]=r*(c*l-f*M)-o*(e*l-u*M)+s*(e*f-u*c),t[8]=o*(h*d-l*m)-s*(i*d-f*m)+v*(i*l-f*h),t[9]=-(r*(h*d-l*m)-s*(a*d-u*m)+v*(a*l-u*h)),t[10]=r*(i*d-f*m)-o*(a*d-u*m)+v*(a*f-u*i),t[11]=-(r*(i*l-f*h)-o*(a*l-u*h)+s*(a*f-u*i)),t[12]=-(o*(h*p-M*m)-s*(i*p-c*m)+v*(i*M-c*h)),t[13]=r*(h*p-M*m)-s*(a*p-e*m)+v*(a*M-e*h),t[14]=-(r*(i*p-c*m)-o*(a*p-e*m)+v*(a*c-e*i)),t[15]=r*(i*M-c*h)-o*(a*M-e*h)+s*(a*c-e*i),t},e.determinant=function(t){var n=t[0],r=t[1],a=t[2],e=t[3],u=t[4],o=t[5],i=t[6],c=t[7],f=t[8],s=t[9],h=t[10],M=t[11],l=t[12],v=t[13],m=t[14],p=t[15],d=n*o-r*u,A=n*i-a*u,R=n*c-e*u,w=r*i-a*o,q=r*c-e*o,Y=a*c-e*i,g=f*v-s*l,y=f*m-h*l,x=f*p-M*l,P=s*m-h*v,E=s*p-M*v,T=h*p-M*m;return d*T-A*E+R*P+w*x-q*y+Y*g},e.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=n[9],l=n[10],v=n[11],m=n[12],p=n[13],d=n[14],A=n[15],R=r[0],w=r[1],q=r[2],Y=r[3];return t[0]=R*a+w*i+q*h+Y*m,t[1]=R*e+w*c+q*M+Y*p,t[2]=R*u+w*f+q*l+Y*d,t[3]=R*o+w*s+q*v+Y*A,R=r[4],w=r[5],q=r[6],Y=r[7],t[4]=R*a+w*i+q*h+Y*m,t[5]=R*e+w*c+q*M+Y*p,t[6]=R*u+w*f+q*l+Y*d,t[7]=R*o+w*s+q*v+Y*A,R=r[8],w=r[9],q=r[10],Y=r[11],t[8]=R*a+w*i+q*h+Y*m,t[9]=R*e+w*c+q*M+Y*p,t[10]=R*u+w*f+q*l+Y*d,t[11]=R*o+w*s+q*v+Y*A,R=r[12],w=r[13],q=r[14],Y=r[15],t[12]=R*a+w*i+q*h+Y*m,t[13]=R*e+w*c+q*M+Y*p,t[14]=R*u+w*f+q*l+Y*d,t[15]=R*o+w*s+q*v+Y*A,t},e.mul=e.multiply,e.translate=function(t,n,r){var a,e,u,o,i,c,f,s,h,M,l,v,m=r[0],p=r[1],d=r[2];return n===t?(t[12]=n[0]*m+n[4]*p+n[8]*d+n[12],t[13]=n[1]*m+n[5]*p+n[9]*d+n[13],t[14]=n[2]*m+n[6]*p+n[10]*d+n[14],t[15]=n[3]*m+n[7]*p+n[11]*d+n[15]):(a=n[0],e=n[1],u=n[2],o=n[3],i=n[4],c=n[5],f=n[6],s=n[7],h=n[8],M=n[9],l=n[10],v=n[11],t[0]=a,t[1]=e,t[2]=u,t[3]=o,t[4]=i,t[5]=c,t[6]=f,t[7]=s,t[8]=h,t[9]=M,t[10]=l,t[11]=v,t[12]=a*m+i*p+h*d+n[12],t[13]=e*m+c*p+M*d+n[13],t[14]=u*m+f*p+l*d+n[14],t[15]=o*m+s*p+v*d+n[15]),t},e.scale=function(t,n,r){var a=r[0],e=r[1],u=r[2];return t[0]=n[0]*a,t[1]=n[1]*a,t[2]=n[2]*a,t[3]=n[3]*a,t[4]=n[4]*e,t[5]=n[5]*e,t[6]=n[6]*e,t[7]=n[7]*e,t[8]=n[8]*u,t[9]=n[9]*u,t[10]=n[10]*u,t[11]=n[11]*u,t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15],t},e.rotate=function(t,n,r,e){var u,o,i,c,f,s,h,M,l,v,m,p,d,A,R,w,q,Y,g,y,x,P,E,T,b=e[0],D=e[1],L=e[2],_=Math.sqrt(b*b+D*D+L*L);return Math.abs(_)<a.EPSILON?null:(_=1/_,b*=_,D*=_,L*=_,u=Math.sin(r),o=Math.cos(r),i=1-o,c=n[0],f=n[1],s=n[2],h=n[3],M=n[4],l=n[5],v=n[6],m=n[7],p=n[8],d=n[9],A=n[10],R=n[11],w=b*b*i+o,q=D*b*i+L*u,Y=L*b*i-D*u,g=b*D*i-L*u,y=D*D*i+o,x=L*D*i+b*u,P=b*L*i+D*u,E=D*L*i-b*u,T=L*L*i+o,t[0]=c*w+M*q+p*Y,t[1]=f*w+l*q+d*Y,t[2]=s*w+v*q+A*Y,t[3]=h*w+m*q+R*Y,t[4]=c*g+M*y+p*x,t[5]=f*g+l*y+d*x,t[6]=s*g+v*y+A*x,t[7]=h*g+m*y+R*x,t[8]=c*P+M*E+p*T,t[9]=f*P+l*E+d*T,t[10]=s*P+v*E+A*T,t[11]=h*P+m*E+R*T,n!==t&&(t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t)},e.rotateX=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[4],o=n[5],i=n[6],c=n[7],f=n[8],s=n[9],h=n[10],M=n[11];return n!==t&&(t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[4]=u*e+f*a,t[5]=o*e+s*a,t[6]=i*e+h*a,t[7]=c*e+M*a,t[8]=f*e-u*a,t[9]=s*e-o*a,t[10]=h*e-i*a,t[11]=M*e-c*a,t},e.rotateY=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[0],o=n[1],i=n[2],c=n[3],f=n[8],s=n[9],h=n[10],M=n[11];return n!==t&&(t[4]=n[4],t[5]=n[5],t[6]=n[6],t[7]=n[7],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[0]=u*e-f*a,t[1]=o*e-s*a,t[2]=i*e-h*a,t[3]=c*e-M*a,t[8]=u*a+f*e,t[9]=o*a+s*e,t[10]=i*a+h*e,t[11]=c*a+M*e,t},e.rotateZ=function(t,n,r){var a=Math.sin(r),e=Math.cos(r),u=n[0],o=n[1],i=n[2],c=n[3],f=n[4],s=n[5],h=n[6],M=n[7];return n!==t&&(t[8]=n[8],t[9]=n[9],t[10]=n[10],t[11]=n[11],t[12]=n[12],t[13]=n[13],t[14]=n[14],t[15]=n[15]),t[0]=u*e+f*a,t[1]=o*e+s*a,t[2]=i*e+h*a,t[3]=c*e+M*a,t[4]=f*e-u*a,t[5]=s*e-o*a,t[6]=h*e-i*a,t[7]=M*e-c*a,t},e.fromTranslation=function(t,n){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=n[0],t[13]=n[1],t[14]=n[2],t[15]=1,t},e.fromScaling=function(t,n){return t[0]=n[0],t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=n[1],t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=n[2],t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromRotation=function(t,n,r){var e,u,o,i=r[0],c=r[1],f=r[2],s=Math.sqrt(i*i+c*c+f*f);return Math.abs(s)<a.EPSILON?null:(s=1/s,i*=s,c*=s,f*=s,e=Math.sin(n),u=Math.cos(n),o=1-u,t[0]=i*i*o+u,t[1]=c*i*o+f*e,t[2]=f*i*o-c*e,t[3]=0,t[4]=i*c*o-f*e,t[5]=c*c*o+u,t[6]=f*c*o+i*e,t[7]=0,t[8]=i*f*o+c*e,t[9]=c*f*o-i*e,t[10]=f*f*o+u,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t)},e.fromXRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=a,t[6]=r,t[7]=0,t[8]=0,t[9]=-r,t[10]=a,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromYRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=0,t[2]=-r,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=r,t[9]=0,t[10]=a,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromZRotation=function(t,n){var r=Math.sin(n),a=Math.cos(n);return t[0]=a,t[1]=r,t[2]=0,t[3]=0,t[4]=-r,t[5]=a,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.fromRotationTranslation=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=a+a,c=e+e,f=u+u,s=a*i,h=a*c,M=a*f,l=e*c,v=e*f,m=u*f,p=o*i,d=o*c,A=o*f;return t[0]=1-(l+m),t[1]=h+A,t[2]=M-d,t[3]=0,t[4]=h-A,t[5]=1-(s+m),t[6]=v+p,t[7]=0,t[8]=M+d,t[9]=v-p,t[10]=1-(s+l),t[11]=0,t[12]=r[0],t[13]=r[1],t[14]=r[2],t[15]=1,t},e.fromRotationTranslationScale=function(t,n,r,a){var e=n[0],u=n[1],o=n[2],i=n[3],c=e+e,f=u+u,s=o+o,h=e*c,M=e*f,l=e*s,v=u*f,m=u*s,p=o*s,d=i*c,A=i*f,R=i*s,w=a[0],q=a[1],Y=a[2];return t[0]=(1-(v+p))*w,t[1]=(M+R)*w,t[2]=(l-A)*w,t[3]=0,t[4]=(M-R)*q,t[5]=(1-(h+p))*q,t[6]=(m+d)*q,t[7]=0,t[8]=(l+A)*Y,t[9]=(m-d)*Y,t[10]=(1-(h+v))*Y,t[11]=0,t[12]=r[0],t[13]=r[1],t[14]=r[2],t[15]=1,t},e.fromRotationTranslationScaleOrigin=function(t,n,r,a,e){var u=n[0],o=n[1],i=n[2],c=n[3],f=u+u,s=o+o,h=i+i,M=u*f,l=u*s,v=u*h,m=o*s,p=o*h,d=i*h,A=c*f,R=c*s,w=c*h,q=a[0],Y=a[1],g=a[2],y=e[0],x=e[1],P=e[2];return t[0]=(1-(m+d))*q,t[1]=(l+w)*q,t[2]=(v-R)*q,t[3]=0,t[4]=(l-w)*Y,t[5]=(1-(M+d))*Y,t[6]=(p+A)*Y,t[7]=0,t[8]=(v+R)*g,t[9]=(p-A)*g,t[10]=(1-(M+m))*g,t[11]=0,t[12]=r[0]+y-(t[0]*y+t[4]*x+t[8]*P),t[13]=r[1]+x-(t[1]*y+t[5]*x+t[9]*P),t[14]=r[2]+P-(t[2]*y+t[6]*x+t[10]*P),t[15]=1,t},e.fromQuat=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r+r,i=a+a,c=e+e,f=r*o,s=a*o,h=a*i,M=e*o,l=e*i,v=e*c,m=u*o,p=u*i,d=u*c;return t[0]=1-h-v,t[1]=s+d,t[2]=M-p,t[3]=0,t[4]=s-d,t[5]=1-f-v,t[6]=l+m,t[7]=0,t[8]=M+p,t[9]=l-m,t[10]=1-f-h,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t},e.frustum=function(t,n,r,a,e,u,o){var i=1/(r-n),c=1/(e-a),f=1/(u-o);return t[0]=2*u*i,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=2*u*c,t[6]=0,t[7]=0,t[8]=(r+n)*i,t[9]=(e+a)*c,t[10]=(o+u)*f,t[11]=-1,t[12]=0,t[13]=0,t[14]=o*u*2*f,t[15]=0,t},e.perspective=function(t,n,r,a,e){var u=1/Math.tan(n/2),o=1/(a-e);return t[0]=u/r,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=u,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=(e+a)*o,t[11]=-1,t[12]=0,t[13]=0,t[14]=2*e*a*o,t[15]=0,t},e.perspectiveFromFieldOfView=function(t,n,r,a){var e=Math.tan(n.upDegrees*Math.PI/180),u=Math.tan(n.downDegrees*Math.PI/180),o=Math.tan(n.leftDegrees*Math.PI/180),i=Math.tan(n.rightDegrees*Math.PI/180),c=2/(o+i),f=2/(e+u);return t[0]=c,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=f,t[6]=0,t[7]=0,t[8]=-((o-i)*c*.5),t[9]=(e-u)*f*.5,t[10]=a/(r-a),t[11]=-1,t[12]=0,t[13]=0,t[14]=a*r/(r-a),t[15]=0,t},e.ortho=function(t,n,r,a,e,u,o){var i=1/(n-r),c=1/(a-e),f=1/(u-o);return t[0]=-2*i,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=-2*c,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=2*f,t[11]=0,t[12]=(n+r)*i,t[13]=(e+a)*c,t[14]=(o+u)*f,t[15]=1,t},e.lookAt=function(t,n,r,u){var o,i,c,f,s,h,M,l,v,m,p=n[0],d=n[1],A=n[2],R=u[0],w=u[1],q=u[2],Y=r[0],g=r[1],y=r[2];return Math.abs(p-Y)<a.EPSILON&&Math.abs(d-g)<a.EPSILON&&Math.abs(A-y)<a.EPSILON?e.identity(t):(M=p-Y,l=d-g,v=A-y,m=1/Math.sqrt(M*M+l*l+v*v),M*=m,l*=m,v*=m,o=w*v-q*l,i=q*M-R*v,c=R*l-w*M,m=Math.sqrt(o*o+i*i+c*c),m?(m=1/m,o*=m,i*=m,c*=m):(o=0,i=0,c=0),f=l*c-v*i,s=v*o-M*c,h=M*i-l*o,m=Math.sqrt(f*f+s*s+h*h),m?(m=1/m,f*=m,s*=m,h*=m):(f=0,s=0,h=0),t[0]=o,t[1]=f,t[2]=M,t[3]=0,t[4]=i,t[5]=s,t[6]=l,t[7]=0,t[8]=c,t[9]=h,t[10]=v,t[11]=0,t[12]=-(o*p+i*d+c*A),t[13]=-(f*p+s*d+h*A),t[14]=-(M*p+l*d+v*A),t[15]=1,t)},e.str=function(t){return"mat4("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+", "+t[6]+", "+t[7]+", "+t[8]+", "+t[9]+", "+t[10]+", "+t[11]+", "+t[12]+", "+t[13]+", "+t[14]+", "+t[15]+")"},e.frob=function(t){return Math.sqrt(Math.pow(t[0],2)+Math.pow(t[1],2)+Math.pow(t[2],2)+Math.pow(t[3],2)+Math.pow(t[4],2)+Math.pow(t[5],2)+Math.pow(t[6],2)+Math.pow(t[7],2)+Math.pow(t[8],2)+Math.pow(t[9],2)+Math.pow(t[10],2)+Math.pow(t[11],2)+Math.pow(t[12],2)+Math.pow(t[13],2)+Math.pow(t[14],2)+Math.pow(t[15],2))},t.exports=e},function(t,n,r){var a=r(1),e=r(4),u=r(7),o=r(8),i={};i.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=1,t},i.rotationTo=function(){var t=u.create(),n=u.fromValues(1,0,0),r=u.fromValues(0,1,0);return function(a,e,o){var c=u.dot(e,o);return-.999999>c?(u.cross(t,n,e),u.length(t)<1e-6&&u.cross(t,r,e),u.normalize(t,t),i.setAxisAngle(a,t,Math.PI),a):c>.999999?(a[0]=0,a[1]=0,a[2]=0,a[3]=1,a):(u.cross(t,e,o),a[0]=t[0],a[1]=t[1],a[2]=t[2],a[3]=1+c,i.normalize(a,a))}}(),i.setAxes=function(){var t=e.create();return function(n,r,a,e){return t[0]=a[0],t[3]=a[1],t[6]=a[2],t[1]=e[0],t[4]=e[1],t[7]=e[2],t[2]=-r[0],t[5]=-r[1],t[8]=-r[2],i.normalize(n,i.fromMat3(n,t))}}(),i.clone=o.clone,i.fromValues=o.fromValues,i.copy=o.copy,i.set=o.set,i.identity=function(t){return t[0]=0,t[1]=0,t[2]=0,t[3]=1,t},i.setAxisAngle=function(t,n,r){r=.5*r;var a=Math.sin(r);return t[0]=a*n[0],t[1]=a*n[1],t[2]=a*n[2],t[3]=Math.cos(r),t},i.add=o.add,i.multiply=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3],i=r[0],c=r[1],f=r[2],s=r[3];return t[0]=a*s+o*i+e*f-u*c,t[1]=e*s+o*c+u*i-a*f,t[2]=u*s+o*f+a*c-e*i,t[3]=o*s-a*i-e*c-u*f,t},i.mul=i.multiply,i.scale=o.scale,i.rotateX=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+o*i,t[1]=e*c+u*i,t[2]=u*c-e*i,t[3]=o*c-a*i,t},i.rotateY=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c-u*i,t[1]=e*c+o*i,t[2]=u*c+a*i,t[3]=o*c-e*i,t},i.rotateZ=function(t,n,r){r*=.5;var a=n[0],e=n[1],u=n[2],o=n[3],i=Math.sin(r),c=Math.cos(r);return t[0]=a*c+e*i,t[1]=e*c-a*i,t[2]=u*c+o*i,t[3]=o*c-u*i,t},i.calculateW=function(t,n){var r=n[0],a=n[1],e=n[2];return t[0]=r,t[1]=a,t[2]=e,t[3]=Math.sqrt(Math.abs(1-r*r-a*a-e*e)),t},i.dot=o.dot,i.lerp=o.lerp,i.slerp=function(t,n,r,a){var e,u,o,i,c,f=n[0],s=n[1],h=n[2],M=n[3],l=r[0],v=r[1],m=r[2],p=r[3];return u=f*l+s*v+h*m+M*p,0>u&&(u=-u,l=-l,v=-v,m=-m,p=-p),1-u>1e-6?(e=Math.acos(u),o=Math.sin(e),i=Math.sin((1-a)*e)/o,c=Math.sin(a*e)/o):(i=1-a,c=a),t[0]=i*f+c*l,t[1]=i*s+c*v,t[2]=i*h+c*m,t[3]=i*M+c*p,t},i.sqlerp=function(){var t=i.create(),n=i.create();return function(r,a,e,u,o,c){return i.slerp(t,a,o,c),i.slerp(n,e,u,c),i.slerp(r,t,n,2*c*(1-c)),r}}(),i.invert=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*r+a*a+e*e+u*u,i=o?1/o:0;return t[0]=-r*i,t[1]=-a*i,t[2]=-e*i,t[3]=u*i,t},i.conjugate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t[3]=n[3],t},i.length=o.length,i.len=i.length,i.squaredLength=o.squaredLength,i.sqrLen=i.squaredLength,i.normalize=o.normalize,i.fromMat3=function(t,n){var r,a=n[0]+n[4]+n[8];if(a>0)r=Math.sqrt(a+1),t[3]=.5*r,r=.5/r,t[0]=(n[5]-n[7])*r,t[1]=(n[6]-n[2])*r,t[2]=(n[1]-n[3])*r;else{var e=0;n[4]>n[0]&&(e=1),n[8]>n[3*e+e]&&(e=2);var u=(e+1)%3,o=(e+2)%3;r=Math.sqrt(n[3*e+e]-n[3*u+u]-n[3*o+o]+1),t[e]=.5*r,r=.5/r,t[3]=(n[3*u+o]-n[3*o+u])*r,t[u]=(n[3*u+e]+n[3*e+u])*r,t[o]=(n[3*o+e]+n[3*e+o])*r}return t},i.str=function(t){return"quat("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},t.exports=i},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(3);return t[0]=0,t[1]=0,t[2]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(3);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n},e.fromValues=function(t,n,r){var e=new a.ARRAY_TYPE(3);return e[0]=t,e[1]=n,e[2]=r,e},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t},e.set=function(t,n,r,a){return t[0]=n,t[1]=r,t[2]=a,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t[2]=n[2]+r[2],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t[2]=n[2]-r[2],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t[2]=n[2]*r[2],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t[2]=n[2]/r[2],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t[2]=Math.min(n[2],r[2]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t[2]=Math.max(n[2],r[2]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t[2]=n[2]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t[2]=n[2]+r[2]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2];return Math.sqrt(r*r+a*a+e*e)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2];return r*r+a*a+e*e},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1],a=t[2];return Math.sqrt(n*n+r*r+a*a)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1],a=t[2];return n*n+r*r+a*a},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t[2]=1/n[2],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=n[2],u=r*r+a*a+e*e;return u>0&&(u=1/Math.sqrt(u),t[0]=n[0]*u,t[1]=n[1]*u,t[2]=n[2]*u),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]},e.cross=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2];return t[0]=e*c-u*i,t[1]=u*o-a*c,t[2]=a*i-e*o,t},e.lerp=function(t,n,r,a){var e=n[0],u=n[1],o=n[2];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t[2]=o+a*(r[2]-o),t},e.hermite=function(t,n,r,a,e,u){var o=u*u,i=o*(2*u-3)+1,c=o*(u-2)+u,f=o*(u-1),s=o*(3-2*u);return t[0]=n[0]*i+r[0]*c+a[0]*f+e[0]*s,t[1]=n[1]*i+r[1]*c+a[1]*f+e[1]*s,t[2]=n[2]*i+r[2]*c+a[2]*f+e[2]*s,t},e.bezier=function(t,n,r,a,e,u){var o=1-u,i=o*o,c=u*u,f=i*o,s=3*u*i,h=3*c*o,M=c*u;return t[0]=n[0]*f+r[0]*s+a[0]*h+e[0]*M,t[1]=n[1]*f+r[1]*s+a[1]*h+e[1]*M,t[2]=n[2]*f+r[2]*s+a[2]*h+e[2]*M,t},e.random=function(t,n){n=n||1;var r=2*a.RANDOM()*Math.PI,e=2*a.RANDOM()-1,u=Math.sqrt(1-e*e)*n;return t[0]=Math.cos(r)*u,t[1]=Math.sin(r)*u,t[2]=e*n,t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[3]*a+r[7]*e+r[11]*u+r[15];return o=o||1,t[0]=(r[0]*a+r[4]*e+r[8]*u+r[12])/o,t[1]=(r[1]*a+r[5]*e+r[9]*u+r[13])/o,t[2]=(r[2]*a+r[6]*e+r[10]*u+r[14])/o,t},e.transformMat3=function(t,n,r){var a=n[0],e=n[1],u=n[2];return t[0]=a*r[0]+e*r[3]+u*r[6],t[1]=a*r[1]+e*r[4]+u*r[7],t[2]=a*r[2]+e*r[5]+u*r[8],t},e.transformQuat=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2],f=r[3],s=f*a+i*u-c*e,h=f*e+c*a-o*u,M=f*u+o*e-i*a,l=-o*a-i*e-c*u;return t[0]=s*f+l*-o+h*-c-M*-i,t[1]=h*f+l*-i+M*-o-s*-c,t[2]=M*f+l*-c+s*-i-h*-o,t},e.rotateX=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[0],u[1]=e[1]*Math.cos(a)-e[2]*Math.sin(a),u[2]=e[1]*Math.sin(a)+e[2]*Math.cos(a),t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.rotateY=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[2]*Math.sin(a)+e[0]*Math.cos(a),u[1]=e[1],u[2]=e[2]*Math.cos(a)-e[0]*Math.sin(a),t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.rotateZ=function(t,n,r,a){var e=[],u=[];return e[0]=n[0]-r[0],e[1]=n[1]-r[1],e[2]=n[2]-r[2],u[0]=e[0]*Math.cos(a)-e[1]*Math.sin(a),u[1]=e[0]*Math.sin(a)+e[1]*Math.cos(a),u[2]=e[2],t[0]=u[0]+r[0],t[1]=u[1]+r[1],t[2]=u[2]+r[2],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=3),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],t[2]=n[i+2],u(t,t,o),n[i]=t[0],n[i+1]=t[1],n[i+2]=t[2];return n}}(),e.angle=function(t,n){var r=e.fromValues(t[0],t[1],t[2]),a=e.fromValues(n[0],n[1],n[2]);e.normalize(r,r),e.normalize(a,a);var u=e.dot(r,a);return u>1?0:Math.acos(u)},e.str=function(t){return"vec3("+t[0]+", "+t[1]+", "+t[2]+")"},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(4);return n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=t[3],n},e.fromValues=function(t,n,r,e){var u=new a.ARRAY_TYPE(4);return u[0]=t,u[1]=n,u[2]=r,u[3]=e,u},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t[2]=n[2],t[3]=n[3],t},e.set=function(t,n,r,a,e){return t[0]=n,t[1]=r,t[2]=a,t[3]=e,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t[2]=n[2]+r[2],t[3]=n[3]+r[3],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t[2]=n[2]-r[2],t[3]=n[3]-r[3],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t[2]=n[2]*r[2],t[3]=n[3]*r[3],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t[2]=n[2]/r[2],t[3]=n[3]/r[3],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t[2]=Math.min(n[2],r[2]),t[3]=Math.min(n[3],r[3]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t[2]=Math.max(n[2],r[2]),t[3]=Math.max(n[3],r[3]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t[2]=n[2]*r,t[3]=n[3]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t[2]=n[2]+r[2]*a,t[3]=n[3]+r[3]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2],u=n[3]-t[3];return Math.sqrt(r*r+a*a+e*e+u*u)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1],e=n[2]-t[2],u=n[3]-t[3];return r*r+a*a+e*e+u*u},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1],a=t[2],e=t[3];return Math.sqrt(n*n+r*r+a*a+e*e)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1],a=t[2],e=t[3];return n*n+r*r+a*a+e*e},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t[2]=-n[2],t[3]=-n[3],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t[2]=1/n[2],t[3]=1/n[3],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=n[2],u=n[3],o=r*r+a*a+e*e+u*u;return o>0&&(o=1/Math.sqrt(o),t[0]=r*o,t[1]=a*o,t[2]=e*o,t[3]=u*o),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]+t[3]*n[3]},e.lerp=function(t,n,r,a){var e=n[0],u=n[1],o=n[2],i=n[3];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t[2]=o+a*(r[2]-o),t[3]=i+a*(r[3]-i),t},e.random=function(t,n){return n=n||1,t[0]=a.RANDOM(),t[1]=a.RANDOM(),t[2]=a.RANDOM(),t[3]=a.RANDOM(),e.normalize(t,t),e.scale(t,t,n),t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=n[3];return t[0]=r[0]*a+r[4]*e+r[8]*u+r[12]*o,t[1]=r[1]*a+r[5]*e+r[9]*u+r[13]*o,t[2]=r[2]*a+r[6]*e+r[10]*u+r[14]*o,t[3]=r[3]*a+r[7]*e+r[11]*u+r[15]*o,t},e.transformQuat=function(t,n,r){var a=n[0],e=n[1],u=n[2],o=r[0],i=r[1],c=r[2],f=r[3],s=f*a+i*u-c*e,h=f*e+c*a-o*u,M=f*u+o*e-i*a,l=-o*a-i*e-c*u;return t[0]=s*f+l*-o+h*-c-M*-i,t[1]=h*f+l*-i+M*-o-s*-c,t[2]=M*f+l*-c+s*-i-h*-o,t[3]=n[3],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=4),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],t[2]=n[i+2],t[3]=n[i+3],u(t,t,o),n[i]=t[0],n[i+1]=t[1],n[i+2]=t[2],n[i+3]=t[3];return n}}(),e.str=function(t){return"vec4("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+")"},t.exports=e},function(t,n,r){var a=r(1),e={};e.create=function(){var t=new a.ARRAY_TYPE(2);return t[0]=0,t[1]=0,t},e.clone=function(t){var n=new a.ARRAY_TYPE(2);return n[0]=t[0],n[1]=t[1],n},e.fromValues=function(t,n){var r=new a.ARRAY_TYPE(2);return r[0]=t,r[1]=n,r},e.copy=function(t,n){return t[0]=n[0],t[1]=n[1],t},e.set=function(t,n,r){return t[0]=n,t[1]=r,t},e.add=function(t,n,r){return t[0]=n[0]+r[0],t[1]=n[1]+r[1],t},e.subtract=function(t,n,r){return t[0]=n[0]-r[0],t[1]=n[1]-r[1],t},e.sub=e.subtract,e.multiply=function(t,n,r){return t[0]=n[0]*r[0],t[1]=n[1]*r[1],t},e.mul=e.multiply,e.divide=function(t,n,r){return t[0]=n[0]/r[0],t[1]=n[1]/r[1],t},e.div=e.divide,e.min=function(t,n,r){return t[0]=Math.min(n[0],r[0]),t[1]=Math.min(n[1],r[1]),t},e.max=function(t,n,r){return t[0]=Math.max(n[0],r[0]),t[1]=Math.max(n[1],r[1]),t},e.scale=function(t,n,r){return t[0]=n[0]*r,t[1]=n[1]*r,t},e.scaleAndAdd=function(t,n,r,a){return t[0]=n[0]+r[0]*a,t[1]=n[1]+r[1]*a,t},e.distance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1];return Math.sqrt(r*r+a*a)},e.dist=e.distance,e.squaredDistance=function(t,n){var r=n[0]-t[0],a=n[1]-t[1];return r*r+a*a},e.sqrDist=e.squaredDistance,e.length=function(t){var n=t[0],r=t[1];return Math.sqrt(n*n+r*r)},e.len=e.length,e.squaredLength=function(t){var n=t[0],r=t[1];return n*n+r*r},e.sqrLen=e.squaredLength,e.negate=function(t,n){return t[0]=-n[0],t[1]=-n[1],t},e.inverse=function(t,n){return t[0]=1/n[0],t[1]=1/n[1],t},e.normalize=function(t,n){var r=n[0],a=n[1],e=r*r+a*a;return e>0&&(e=1/Math.sqrt(e),t[0]=n[0]*e,t[1]=n[1]*e),t},e.dot=function(t,n){return t[0]*n[0]+t[1]*n[1]},e.cross=function(t,n,r){var a=n[0]*r[1]-n[1]*r[0];return t[0]=t[1]=0,t[2]=a,t},e.lerp=function(t,n,r,a){var e=n[0],u=n[1];return t[0]=e+a*(r[0]-e),t[1]=u+a*(r[1]-u),t},e.random=function(t,n){n=n||1;var r=2*a.RANDOM()*Math.PI;return t[0]=Math.cos(r)*n,t[1]=Math.sin(r)*n,t},e.transformMat2=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[2]*e,t[1]=r[1]*a+r[3]*e,t},e.transformMat2d=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[2]*e+r[4],t[1]=r[1]*a+r[3]*e+r[5],t},e.transformMat3=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[3]*e+r[6],t[1]=r[1]*a+r[4]*e+r[7],t},e.transformMat4=function(t,n,r){var a=n[0],e=n[1];return t[0]=r[0]*a+r[4]*e+r[12],t[1]=r[1]*a+r[5]*e+r[13],t},e.forEach=function(){var t=e.create();return function(n,r,a,e,u,o){var i,c;for(r||(r=2),a||(a=0),c=e?Math.min(e*r+a,n.length):n.length,i=a;c>i;i+=r)t[0]=n[i],t[1]=n[i+1],u(t,t,o),n[i]=t[0],n[i+1]=t[1];return n}}(),e.str=function(t){return"vec2("+t[0]+", "+t[1]+")"},t.exports=e}])});'use strict';tr.exportTo('tr.b',function(){function clamp(x,lo,hi){return Math.min(Math.max(x,lo),hi);}
-function lerp(percentage,lo,hi){var range=hi-lo;return lo+percentage*range;}
-function normalize(value,lo,hi){return(value-lo)/(hi-lo);}
-function deg2rad(deg){return(Math.PI*deg)/180.0;}
-var tmp_vec2=vec2.create();var tmp_vec2b=vec2.create();var tmp_vec4=vec4.create();var tmp_mat2d=mat2d.create();vec2.createFromArray=function(arr){if(arr.length!=2)
-throw new Error('Should be length 2');var v=vec2.create();vec2.set(v,arr[0],arr[1]);return v;};vec2.createXY=function(x,y){var v=vec2.create();vec2.set(v,x,y);return v;};vec2.toString=function(a){return'['+a[0]+', '+a[1]+']';};vec2.addTwoScaledUnitVectors=function(out,u1,scale1,u2,scale2){vec2.scale(tmp_vec2,u1,scale1);vec2.scale(tmp_vec2b,u2,scale2);vec2.add(out,tmp_vec2,tmp_vec2b);};vec2.interpolatePiecewiseFunction=function(points,x){if(x<points[0][0])
-return points[0][1];for(var i=1;i<points.length;++i){if(x<points[i][0]){var percent=normalize(x,points[i-1][0],points[i][0]);return lerp(percent,points[i-1][1],points[i][1]);}}
-return points[points.length-1][1];};vec3.createXYZ=function(x,y,z){var v=vec3.create();vec3.set(v,x,y,z);return v;};vec3.toString=function(a){return'vec3('+a[0]+', '+a[1]+', '+a[2]+')';}
-mat2d.translateXY=function(out,x,y){vec2.set(tmp_vec2,x,y);mat2d.translate(out,out,tmp_vec2);}
-mat2d.scaleXY=function(out,x,y){vec2.set(tmp_vec2,x,y);mat2d.scale(out,out,tmp_vec2);}
-vec4.unitize=function(out,a){out[0]=a[0]/a[3];out[1]=a[1]/a[3];out[2]=a[2]/a[3];out[3]=1;return out;}
-vec2.copyFromVec4=function(out,a){vec4.unitize(tmp_vec4,a);vec2.copy(out,tmp_vec4);}
-return{clamp:clamp,lerp:lerp,normalize:normalize,deg2rad:deg2rad};});'use strict';tr.exportTo('tr.b',function(){function Rect(){this.x=0;this.y=0;this.width=0;this.height=0;};Rect.fromXYWH=function(x,y,w,h){var rect=new Rect();rect.x=x;rect.y=y;rect.width=w;rect.height=h;return rect;}
+return false;};return{decorate:decorate,define:define,elementIsChildOf:elementIsChildOf};});'use strict';tr.exportTo('tr.b',function(){function Rect(){this.x=0;this.y=0;this.width=0;this.height=0;};Rect.fromXYWH=function(x,y,w,h){var rect=new Rect();rect.x=x;rect.y=y;rect.width=w;rect.height=h;return rect;}
 Rect.fromArray=function(ary){if(ary.length!=4)
 throw new Error('ary.length must be 4');var rect=new Rect();rect.x=ary[0];rect.y=ary[1];rect.width=ary[2];rect.height=ary[3];return rect;}
 Rect.prototype={__proto__:Object.prototype,get left(){return this.x;},get top(){return this.y;},get right(){return this.x+this.width;},get bottom(){return this.y+this.height;},toString:function(){return'Rect('+this.x+', '+this.y+', '+
@@ -3260,7 +3479,8 @@
 return tr.b.Rect.fromXYWH(position[0],position[1],size[0],size[1]);}
 function scrollIntoViewIfNeeded(el){var pr=el.parentElement.getBoundingClientRect();var cr=el.getBoundingClientRect();if(cr.top<pr.top){el.scrollIntoView(true);}else if(cr.bottom>pr.bottom){el.scrollIntoView(false);}}
 function extractUrlString(url){var extracted=url.replace(/url\((.*)\)/,'$1');extracted=extracted.replace(/\"(.*)\"/,'$1');return extracted;}
-return{instantiateTemplate:instantiateTemplate,windowRectForElement:windowRectForElement,scrollIntoViewIfNeeded:scrollIntoViewIfNeeded,extractUrlString:extractUrlString};});'use strict';tr.exportTo('tr.ui.b',function(){if(tr.isHeadless)
+function toThreeDigitLocaleString(value){return value.toLocaleString(undefined,{minimumFractionDigits:3,maximumFractionDigits:3});}
+return{toThreeDigitLocaleString:toThreeDigitLocaleString,instantiateTemplate:instantiateTemplate,windowRectForElement:windowRectForElement,scrollIntoViewIfNeeded:scrollIntoViewIfNeeded,extractUrlString:extractUrlString};});'use strict';tr.exportTo('tr.ui.b',function(){if(tr.isHeadless)
 return{};var THIS_DOC=document.currentScript.ownerDocument;var Overlay=tr.ui.b.define('overlay');Overlay.prototype={__proto__:HTMLDivElement.prototype,decorate:function(){this.classList.add('overlay');this.parentEl_=this.ownerDocument.body;this.visible_=false;this.userCanClose_=true;this.onKeyDown_=this.onKeyDown_.bind(this);this.onClick_=this.onClick_.bind(this);this.onFocusIn_=this.onFocusIn_.bind(this);this.onDocumentClick_=this.onDocumentClick_.bind(this);this.onClose_=this.onClose_.bind(this);this.addEventListener('visible-change',tr.ui.b.Overlay.prototype.onVisibleChange_.bind(this),true);var createShadowRoot=this.createShadowRoot||this.webkitCreateShadowRoot;this.shadow_=createShadowRoot.call(this);this.shadow_.appendChild(tr.ui.b.instantiateTemplate('#overlay-template',THIS_DOC));this.closeBtn_=this.shadow_.querySelector('close-button');this.closeBtn_.addEventListener('click',this.onClose_);this.shadow_.querySelector('overlay-frame').addEventListener('click',this.onClick_);this.observer_=new WebKitMutationObserver(this.didButtonBarMutate_.bind(this));this.observer_.observe(this.shadow_.querySelector('button-bar'),{childList:true});Object.defineProperty(this,'title',{get:function(){return this.shadow_.querySelector('title').textContent;},set:function(title){this.shadow_.querySelector('title').textContent=title;}});},set userCanClose(userCanClose){this.userCanClose_=userCanClose;this.closeBtn_.style.display=userCanClose?'block':'none';},get buttons(){return this.shadow_.querySelector('button-bar');},get visible(){return this.visible_;},set visible(newValue){if(this.visible_===newValue)
 return;this.visible_=newValue;var e=new tr.b.Event('visible-change');this.dispatchEvent(e);},onVisibleChange_:function(){this.visible_?this.show_():this.hide_();},show_:function(){this.parentEl_.appendChild(this);if(this.userCanClose_){this.addEventListener('keydown',this.onKeyDown_.bind(this));this.addEventListener('click',this.onDocumentClick_.bind(this));}
 this.parentEl_.addEventListener('focusin',this.onFocusIn_);this.tabIndex=0;var focusEl=undefined;var elList=this.querySelectorAll('button, input, list, select, a');if(elList.length>0){if(elList[0]===this.closeBtn_){if(elList.length>1)
@@ -3276,22 +3496,24 @@
 return;this.onClose_(e);},onClick_:function(e){e.stopPropagation();},onDocumentClick_:function(e){if(!this.userCanClose_)
 return;this.onClose_(e);}};Overlay.showError=function(msg,opt_err){var o=new Overlay();o.title='Error';o.textContent=msg;if(opt_err){var e=tr.b.normalizeException(opt_err);var stackDiv=document.createElement('pre');stackDiv.textContent=e.stack;stackDiv.style.paddingLeft='8px';stackDiv.style.margin=0;o.appendChild(stackDiv);}
 var b=document.createElement('button');b.textContent='OK';b.addEventListener('click',function(){o.visible=false;});o.buttons.appendChild(b);o.visible=true;return o;}
-return{Overlay:Overlay};});'use strict';tr.exportTo('tr',function(){var Process=tr.model.Process;var Device=tr.model.Device;var Kernel=tr.model.Kernel;var GlobalMemoryDump=tr.model.GlobalMemoryDump;var GlobalInstantEvent=tr.model.GlobalInstantEvent;var FlowEvent=tr.model.FlowEvent;var Alert=tr.model.Alert;var InteractionRecord=tr.model.InteractionRecord;var Sample=tr.model.Sample;function ClockSyncRecord(name,ts,args){this.name=name;this.ts=ts;this.args=args;}
-function Model(){tr.model.EventContainer.call(this);tr.b.EventTarget.decorate(this);this.timestampShiftToZeroAmount_=0;this.faviconHue='blue';this.device=new Device(this);this.kernel=new Kernel(this);this.processes={};this.metadata=[];this.categories=[];this.instantEvents=[];this.flowEvents=[];this.clockSyncRecords=[];this.intrinsicTimeUnit_=undefined;this.stackFrames={};this.samples=[];this.alerts=[];this.interactionRecords=[];this.flowIntervalTree=new tr.b.IntervalTree(function(f){return f.start;},function(f){return f.end;});this.globalMemoryDumps=[];this.userFriendlyCategoryDrivers_=[];this.annotationsByGuid_={};this.modelIndices=undefined;this.importWarnings_=[];this.reportedImportWarnings_={};this.isTimeHighResolution_=undefined;}
-Model.prototype={__proto__:tr.model.EventContainer.prototype,iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,GlobalMemoryDump))
+return{Overlay:Overlay};});'use strict';tr.exportTo('tr',function(){var Process=tr.model.Process;var Device=tr.model.Device;var Kernel=tr.model.Kernel;var GlobalMemoryDump=tr.model.GlobalMemoryDump;var GlobalInstantEvent=tr.model.GlobalInstantEvent;var FlowEvent=tr.model.FlowEvent;var Alert=tr.model.Alert;var Sample=tr.model.Sample;function Model(){tr.model.EventContainer.call(this);tr.b.EventTarget.decorate(this);this.timestampShiftToZeroAmount_=0;this.faviconHue='blue';this.device=new Device(this);this.kernel=new Kernel(this);this.processes={};this.metadata=[];this.categories=[];this.instantEvents=[];this.flowEvents=[];this.clockSyncManager=new tr.model.ClockSyncManager();this.clockSyncRecords=[];this.intrinsicTimeUnit_=undefined;this.stackFrames={};this.samples=[];this.alerts=[];this.userModel=new tr.model.um.UserModel(this);this.flowIntervalTree=new tr.b.IntervalTree((f)=>f.start,(f)=>f.end);this.globalMemoryDumps=[];this.userFriendlyCategoryDrivers_=[];this.annotationsByGuid_={};this.modelIndices=undefined;this.stats=new tr.model.ModelStats();this.importWarnings_=[];this.reportedImportWarnings_={};this.isTimeHighResolution_=undefined;this.patchupsToApply_=[];this.doesHelperGUIDSupportThisModel_={};this.helpersByConstructorGUID_={};}
+Model.prototype={__proto__:tr.model.EventContainer.prototype,getOrCreateHelper:function(constructor){if(!constructor.guid)
+throw new Error('Helper constructors must have GUIDs');if(this.helpersByConstructorGUID_[constructor.guid]===undefined){if(this.doesHelperGUIDSupportThisModel_[constructor.guid]===undefined){this.doesHelperGUIDSupportThisModel_[constructor.guid]=constructor.supportsModel(this);}
+if(!this.doesHelperGUIDSupportThisModel_[constructor.guid])
+return undefined;this.helpersByConstructorGUID_[constructor.guid]=new constructor(this);}
+return this.helpersByConstructorGUID_[constructor.guid];},iterateAllEventsInThisContainer:function(eventTypePredicate,callback,opt_this){if(eventTypePredicate.call(opt_this,GlobalMemoryDump))
 this.globalMemoryDumps.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,GlobalInstantEvent))
 this.instantEvents.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,FlowEvent))
 this.flowEvents.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,Alert))
-this.alerts.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,InteractionRecord))
-this.interactionRecords.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,Sample))
-this.samples.forEach(callback,opt_this);},iterateAllChildEventContainers:function(callback,opt_this){callback.call(opt_this,this.device);callback.call(opt_this,this.kernel);for(var pid in this.processes)
+this.alerts.forEach(callback,opt_this);if(eventTypePredicate.call(opt_this,Sample))
+this.samples.forEach(callback,opt_this);},iterateAllChildEventContainers:function(callback,opt_this){callback.call(opt_this,this.userModel);callback.call(opt_this,this.device);callback.call(opt_this,this.kernel);for(var pid in this.processes)
 callback.call(opt_this,this.processes[pid]);},iterateAllPersistableObjects:function(callback){this.kernel.iterateAllPersistableObjects(callback);for(var pid in this.processes)
 this.processes[pid].iterateAllPersistableObjects(callback);},updateBounds:function(){this.bounds.reset();var bounds=this.bounds;this.iterateAllChildEventContainers(function(ec){ec.updateBounds();bounds.addRange(ec.bounds);});this.iterateAllEventsInThisContainer(function(eventConstructor){return true;},function(event){event.addBoundsToRange(bounds);});},shiftWorldToZero:function(){var shiftAmount=-this.bounds.min;this.timestampShiftToZeroAmount_=shiftAmount;this.iterateAllChildEventContainers(function(ec){ec.shiftTimestampsForward(shiftAmount);});this.iterateAllEventsInThisContainer(function(eventConstructor){return true;},function(event){event.start+=shiftAmount;});this.updateBounds();},convertTimestampToModelTime:function(sourceClockDomainName,ts){if(sourceClockDomainName!=='traceEventClock')
-throw new Error('Only traceEventClock is supported.');return tr.b.u.Units.timestampFromUs(ts)+
+throw new Error('Only traceEventClock is supported.');return tr.v.Unit.timestampFromUs(ts)+
 this.timestampShiftToZeroAmount_;},get numProcesses(){var n=0;for(var p in this.processes)
 n++;return n;},getProcess:function(pid){return this.processes[pid];},getOrCreateProcess:function(pid){if(!this.processes[pid])
-this.processes[pid]=new Process(this,pid);return this.processes[pid];},pushInstantEvent:function(instantEvent){this.instantEvents.push(instantEvent);},addStackFrame:function(stackFrame){if(this.stackFrames[stackFrame.id])
-throw new Error('Stack frame already exists');this.stackFrames[stackFrame.id]=stackFrame;return stackFrame;},addInteractionRecord:function(ir){this.interactionRecords.push(ir);return ir;},getClockSyncRecordsNamed:function(name){return this.clockSyncRecords.filter(function(x){return x.name===name;});},updateCategories_:function(){var categoriesDict={};this.device.addCategoriesToDict(categoriesDict);this.kernel.addCategoriesToDict(categoriesDict);for(var pid in this.processes)
+this.processes[pid]=new Process(this,pid);return this.processes[pid];},addStackFrame:function(stackFrame){if(this.stackFrames[stackFrame.id])
+throw new Error('Stack frame already exists');this.stackFrames[stackFrame.id]=stackFrame;return stackFrame;},getClockSyncRecordsWithSyncId:function(syncId){return this.clockSyncRecords.filter(function(x){return x.syncId===syncId;});},updateCategories_:function(){var categoriesDict={};this.userModel.addCategoriesToDict(categoriesDict);this.device.addCategoriesToDict(categoriesDict);this.kernel.addCategoriesToDict(categoriesDict);for(var pid in this.processes)
 this.processes[pid].addCategoriesToDict(categoriesDict);this.categories=[];for(var category in categoriesDict)
 if(category!='')
 this.categories.push(category);},getAllThreads:function(){var threads=[];for(var tid in this.kernel.threads){threads.push(process.threads[tid]);}
@@ -3302,13 +3524,15 @@
 throw new Error('Annotation with undefined guid given');this.annotationsByGuid_[annotation.guid]=annotation;tr.b.dispatchSimpleEvent(this,'annotationChange');},removeAnnotation:function(annotation){this.annotationsByGuid_[annotation.guid].onRemove();delete this.annotationsByGuid_[annotation.guid];tr.b.dispatchSimpleEvent(this,'annotationChange');},getAllAnnotations:function(){return tr.b.dictionaryValues(this.annotationsByGuid_);},addUserFriendlyCategoryDriver:function(ufcd){this.userFriendlyCategoryDrivers_.push(ufcd);},getUserFriendlyCategoryFromEvent:function(event){for(var i=0;i<this.userFriendlyCategoryDrivers_.length;i++){var ufc=this.userFriendlyCategoryDrivers_[i].fromEvent(event);if(ufc!==undefined)
 return ufc;}
 return undefined;},findAllThreadsNamed:function(name){var namedThreads=[];namedThreads.push.apply(namedThreads,this.kernel.findAllThreadsNamed(name));for(var pid in this.processes){namedThreads.push.apply(namedThreads,this.processes[pid].findAllThreadsNamed(name));}
-return namedThreads;},set importOptions(options){this.importOptions_=options;},get intrinsicTimeUnit(){if(this.intrinsicTimeUnit_===undefined)
-return tr.b.u.TimeDisplayModes.ms;return this.intrinsicTimeUnit_;},set intrinsicTimeUnit(value){if(this.intrinsicTimeUnit_===value)
+return namedThreads;},get importOptions(){return this.importOptions_;},set importOptions(options){this.importOptions_=options;},get intrinsicTimeUnit(){if(this.intrinsicTimeUnit_===undefined)
+return tr.v.TimeDisplayModes.ms;return this.intrinsicTimeUnit_;},set intrinsicTimeUnit(value){if(this.intrinsicTimeUnit_===value)
 return;if(this.intrinsicTimeUnit_!==undefined)
 throw new Error('Intrinsic time unit already set');this.intrinsicTimeUnit_=value;},get isTimeHighResolution(){if(this.isTimeHighResolution_===undefined)
 this.isTimeHighResolution_=this.isTimeHighResolutionHeuristic_();return this.isTimeHighResolution_;},set isTimeHighResolution(value){if(this.isTimeHighResolution_===value)
 return;if(this.isTimeHighResolution_!==undefined)
-throw new Error('isTimeHighResolution already set');this.isTimeHighResolution_=value;},importWarning:function(data){data.showToUser=!!data.showToUser;this.importWarnings_.push(data);if(this.reportedImportWarnings_[data.type]===true)
+throw new Error('isTimeHighResolution already set');this.isTimeHighResolution_=value;},get canonicalUrlThatCreatedThisTrace(){return this.canonicalUrlThatCreatedThisTrace_;},set canonicalUrlThatCreatedThisTrace(value){if(this.canonicalUrlThatCreatedThisTrace_===value)
+return;if(this.canonicalUrlThatCreatedThisTrace_!==undefined)
+throw new Error('canonicalUrlThatCreatedThisTrace already set');this.canonicalUrlThatCreatedThisTrace_=value;},importWarning:function(data){data.showToUser=!!data.showToUser;this.importWarnings_.push(data);if(this.reportedImportWarnings_[data.type]===true)
 return;if(this.importOptions_.showImportWarnings)
 console.warn(data.message);this.reportedImportWarnings_[data.type]=true;},get hasImportWarnings(){return(this.importWarnings_.length>0);},get importWarnings(){return this.importWarnings_;},get importWarningsThatShouldBeShownToUser(){return this.importWarnings_.filter(function(warning){return warning.showToUser;});},autoCloseOpenSlices:function(){this.samples.sort(function(x,y){return x.start-y.start;});this.updateBounds();this.kernel.autoCloseOpenSlices();for(var pid in this.processes)
 this.processes[pid].autoCloseOpenSlices();},createSubSlices:function(){this.kernel.createSubSlices();for(var pid in this.processes)
@@ -3320,37 +3544,23 @@
 this.shiftWorldToZero();},buildFlowEventIntervalTree:function(){for(var i=0;i<this.flowEvents.length;++i){var flowEvent=this.flowEvents[i];this.flowIntervalTree.insert(flowEvent);}
 this.flowIntervalTree.updateHighValues();},cleanupUndeletedObjects:function(){for(var pid in this.processes)
 this.processes[pid].autoDeleteObjects(this.bounds.max);},sortMemoryDumps:function(){this.globalMemoryDumps.sort(function(x,y){return x.start-y.start;});for(var pid in this.processes)
-this.processes[pid].sortMemoryDumps();},calculateMemoryGraphAttributes:function(){this.globalMemoryDumps.forEach(function(dump){dump.calculateGraphAttributes();});},buildEventIndices:function(){this.modelIndices=new tr.model.ModelIndices(this);},sortInteractionRecords:function(){this.interactionRecords.sort(function(x,y){return x.start-y.start;});},sortAlerts:function(){this.alerts.sort(function(x,y){return x.start-y.start;});},isTimeHighResolutionHeuristic_:function(){if(this.intrinsicTimeUnit!==tr.b.u.TimeDisplayModes.ms)
+this.processes[pid].sortMemoryDumps();},finalizeMemoryGraphs:function(){this.globalMemoryDumps.forEach(function(dump){dump.finalizeGraph();});},buildEventIndices:function(){this.modelIndices=new tr.model.ModelIndices(this);},sortAlerts:function(){this.alerts.sort(function(x,y){return x.start-y.start;});},applyObjectRefPatchups:function(){var unresolved=[];this.patchupsToApply_.forEach(function(patchup){if(patchup.pidRef in this.processes){var snapshot=this.processes[patchup.pidRef].objects.getSnapshotAt(patchup.scopedId,patchup.ts);if(snapshot){patchup.object[patchup.field]=snapshot;return;}}
+unresolved.push(patchup);},this);this.patchupsToApply_=unresolved;},replacePIDRefsInPatchups:function(old_pid_ref,new_pid_ref){this.patchupsToApply_.forEach(function(patchup){if(patchup.pidRef===old_pid_ref)
+patchup.pidRef=new_pid_ref;});},isTimeHighResolutionHeuristic_:function(){if(this.intrinsicTimeUnit!==tr.v.TimeDisplayModes.ms)
 return false;var nbEvents=0;var nbPerBin=[];var maxEvents=0;for(var i=0;i<100;++i)
 nbPerBin.push(0);this.iterateAllEvents(function(event){nbEvents++;if(event.start!==undefined){var remainder=Math.floor((event.start-Math.floor(event.start))*100);nbPerBin[remainder]++;maxEvents=Math.max(maxEvents,nbPerBin[remainder]);}});if(nbEvents<100)
-return true;return(maxEvents/nbEvents)<0.9;}};return{ClockSyncRecord:ClockSyncRecord,Model:Model};});'use strict';tr.exportTo('tr.importer',function(){function Importer(){}
-Importer.prototype={__proto__:Object.prototype,isTraceDataContainer:function(){return false;},extractSubtraces:function(){return[];},importEvents:function(){},importSampleData:function(){},finalizeImport:function(){},joinRefs:function(){}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Importer;tr.b.decorateExtensionRegistry(Importer,options);Importer.findImporterFor=function(eventData){var typeInfo=Importer.findTypeInfoMatching(function(ti){return ti.constructor.canImport(eventData);});if(typeInfo)
-return typeInfo.constructor;return undefined;};return{Importer:Importer};});'use strict';tr.exportTo('tr.importer',function(){function EmptyImporter(events){this.importPriority=0;};EmptyImporter.canImport=function(eventData){if(eventData instanceof Array&&eventData.length==0)
-return true;if(typeof(eventData)==='string'||eventData instanceof String){return eventData.length==0;}
-return false;};EmptyImporter.prototype={__proto__:tr.importer.Importer.prototype};tr.importer.Importer.register(EmptyImporter);return{EmptyImporter:EmptyImporter};});'use strict';tr.exportTo('tr.importer',function(){function ImportOptions(){this.shiftWorldToZero=true;this.pruneEmptyContainers=true;this.showImportWarnings=true;this.customizeModelCallback=undefined;var auditorTypes=tr.c.Auditor.getAllRegisteredTypeInfos();this.auditorConstructors=auditorTypes.map(function(typeInfo){return typeInfo.constructor;});}
-function Import(model,opt_options){if(model===undefined)
-throw new Error('Must provide model to import into.');this.importing_=false;this.importOptions_=opt_options||new ImportOptions();this.model_=model;this.model_.importOptions=this.importOptions_;}
-Import.prototype={__proto__:Object.prototype,importTraces:function(traces){var progressMeter={update:function(msg){}};tr.b.Task.RunSynchronously(this.createImportTracesTask(progressMeter,traces));},importTracesWithProgressDialog:function(traces){if(tr.isHeadless)
-throw new Error('Cannot use this method in headless mode.');var overlay=tr.ui.b.Overlay();overlay.title='Importing...';overlay.userCanClose=false;overlay.msgEl=document.createElement('div');overlay.appendChild(overlay.msgEl);overlay.msgEl.style.margin='20px';overlay.update=function(msg){this.msgEl.textContent=msg;}
-overlay.visible=true;var promise=tr.b.Task.RunWhenIdle(this.createImportTracesTask(overlay,traces));promise.then(function(){overlay.visible=false;},function(err){overlay.visible=false;});return promise;},createImportTracesTask:function(progressMeter,traces){if(this.importing_)
-throw new Error('Already importing.');this.importing_=true;var importTask=new tr.b.Task(function prepareImport(){progressMeter.update('I will now import your traces for you...');},this);var lastTask=importTask;var importers=[];lastTask=lastTask.after(function createImports(){traces=traces.slice(0);progressMeter.update('Creating importers...');for(var i=0;i<traces.length;++i)
-importers.push(this.createImporter_(traces[i]));for(var i=0;i<importers.length;i++){var subtraces=importers[i].extractSubtraces();for(var j=0;j<subtraces.length;j++){try{traces.push(subtraces[j]);importers.push(this.createImporter_(subtraces[j]));}catch(error){console.warn(error.name+': '+error.message);continue;}}}
-if(traces.length&&!this.hasEventDataDecoder_(importers)){throw new Error('Could not find an importer for the provided eventData.');}
-importers.sort(function(x,y){return x.importPriority-y.importPriority;});},this);lastTask=lastTask.after(function runImport(task){importers.forEach(function(importer,index){task.subTask(function runImportEventsOnOneImporter(){progressMeter.update('Importing '+(index+1)+' of '+importers.length);importer.importEvents();},this);},this);},this);if(this.importOptions_.customizeModelCallback){lastTask=lastTask.after(function runCustomizeCallbacks(task){this.importOptions_.customizeModelCallback(this.model_);},this);}
-lastTask=lastTask.after(function(task){importers.forEach(function importSampleData(importer,index){progressMeter.update('Importing sample data '+(index+1)+'/'+importers.length);importer.importSampleData();},this);},this);lastTask=lastTask.after(function runAutoclosers(){progressMeter.update('Autoclosing open slices...');this.model_.autoCloseOpenSlices();this.model_.createSubSlices();},this);lastTask=lastTask.after(function finalizeImport(task){importers.forEach(function(importer,index){progressMeter.update('Finalizing import '+(index+1)+'/'+importers.length);importer.finalizeImport();},this);},this);lastTask=lastTask.after(function runPreinits(){progressMeter.update('Initializing objects (step 1/2)...');this.model_.preInitializeObjects();},this);if(this.importOptions_.pruneEmptyContainers){lastTask=lastTask.after(function runPruneEmptyContainers(){progressMeter.update('Pruning empty containers...');this.model_.pruneEmptyContainers();},this);}
-lastTask=lastTask.after(function runMergeKernelWithuserland(){progressMeter.update('Merging kernel with userland...');this.model_.mergeKernelWithUserland();},this);var auditors=[];lastTask=lastTask.after(function createAuditorsAndRunAnnotate(){progressMeter.update('Adding arbitrary data to model...');auditors=this.importOptions_.auditorConstructors.map(function(auditorConstructor){return new auditorConstructor(this.model_);},this);auditors.forEach(function(auditor){auditor.runAnnotate();auditor.installUserFriendlyCategoryDriverIfNeeded();});},this);lastTask=lastTask.after(function computeWorldBounds(){progressMeter.update('Computing final world bounds...');this.model_.computeWorldBounds(this.importOptions_.shiftWorldToZero);},this);lastTask=lastTask.after(function buildFlowEventIntervalTree(){progressMeter.update('Building flow event map...');this.model_.buildFlowEventIntervalTree();},this);lastTask=lastTask.after(function joinRefs(){progressMeter.update('Joining object refs...');for(var i=0;i<importers.length;i++)
-importers[i].joinRefs();},this);lastTask=lastTask.after(function cleanupUndeletedObjects(){progressMeter.update('Cleaning up undeleted objects...');this.model_.cleanupUndeletedObjects();},this);lastTask=lastTask.after(function sortMemoryDumps(){progressMeter.update('Sorting memory dumps...');this.model_.sortMemoryDumps();},this);lastTask=lastTask.after(function calculateMemoryGraphAttributes(){progressMeter.update('Calculating memory dump graph attributes...');this.model_.calculateMemoryGraphAttributes();},this);lastTask=lastTask.after(function initializeObjects(){progressMeter.update('Initializing objects (step 2/2)...');this.model_.initializeObjects();},this);lastTask=lastTask.after(function buildEventIndices(){progressMeter.update('Building event indices...');this.model_.buildEventIndices();},this);lastTask=lastTask.after(function runAudits(){progressMeter.update('Running auditors...');auditors.forEach(function(auditor){auditor.runAudit();});},this);lastTask=lastTask.after(function sortInteractionRecords(){progressMeter.update('Updating interaction records...');this.model_.sortInteractionRecords();},this);lastTask=lastTask.after(function sortAlerts(){progressMeter.update('Updating alerts...');this.model_.sortAlerts();},this);lastTask=lastTask.after(function lastUpdateBounds(){progressMeter.update('Update bounds...');this.model_.updateBounds();},this);lastTask=lastTask.after(function addModelWarnings(){progressMeter.update('Looking for warnings...');if(!this.model_.isTimeHighResolution){this.model_.importWarning({type:'low_resolution_timer',message:'Trace time is low resolution, trace may be unusable.',showToUser:true});}},this);lastTask.after(function(){this.importing_=false;},this);return importTask;},createImporter_:function(eventData){var importerConstructor=tr.importer.Importer.findImporterFor(eventData);if(!importerConstructor){throw new Error('Couldn\'t create an importer for the provided '+'eventData.');}
-return new importerConstructor(this.model_,eventData);},hasEventDataDecoder_:function(importers){if(importers.length===0)
-return false;for(var i=0;i<importers.length;++i){if(!importers[i].isTraceDataContainer())
-return true;}
-return false;}};return{ImportOptions:ImportOptions,Import:Import};});'use strict';tr.exportTo('tr.importer',function(){function SimpleLineReader(text){this.lines_=text.split('\n');this.curLine_=0;this.savedLines_=undefined;}
-SimpleLineReader.prototype={advanceToLineMatching:function(regex){for(;this.curLine_<this.lines_.length;this.curLine_++){var line=this.lines_[this.curLine_];if(this.savedLines_!==undefined)
-this.savedLines_.push(line);if(regex.test(line))
-return true;}
-return false;},get curLineNumber(){return this.curLine_;},beginSavingLines:function(){this.savedLines_=[];},endSavingLinesAndGetResult:function(){var tmp=this.savedLines_;this.savedLines_=undefined;return tmp;}};return{SimpleLineReader:SimpleLineReader};});'use strict';tr.exportTo('tr.model',function(){var ColorScheme=tr.b.ColorScheme;function Activity(name,category,range,args){tr.model.TimedEvent.call(this,range.min);this.title=name;this.category=category;this.colorId=ColorScheme.getColorIdForGeneralPurposeString(name);this.duration=range.duration;this.args=args;this.name=name;};Activity.prototype={__proto__:tr.model.TimedEvent.prototype,shiftTimestampsForward:function(amount){this.start+=amount;},addBoundsToRange:function(range){range.addValue(this.start);range.addValue(this.end);}};return{Activity:Activity};});'use strict';tr.exportTo('tr.e.importer.android',function(){var Importer=tr.importer.Importer;var ACTIVITY_STATE={NONE:'none',CREATED:'created',STARTED:'started',RESUMED:'resumed',PAUSED:'paused',STOPPED:'stopped',DESTROYED:'destroyed'};var activityMap={};function EventLogImporter(model,events){this.model_=model;this.events_=events;this.importPriority=3;}
+return true;return(maxEvents/nbEvents)<0.9;},joinRefs:function(){this.joinObjectRefs_();this.applyObjectRefPatchups();},joinObjectRefs_:function(){tr.b.iterItems(this.processes,function(pid,process){this.joinObjectRefsForProcess_(pid,process);},this);},joinObjectRefsForProcess_:function(pid,process){tr.b.iterItems(process.threads,function(tid,thread){thread.asyncSliceGroup.slices.forEach(function(item){this.searchItemForIDRefs_(pid,'start',item);},this);thread.sliceGroup.slices.forEach(function(item){this.searchItemForIDRefs_(pid,'start',item);},this);},this);process.objects.iterObjectInstances(function(instance){instance.snapshots.forEach(function(item){this.searchItemForIDRefs_(pid,'ts',item);},this);},this);},searchItemForIDRefs_:function(pid,itemTimestampField,item){if(!item.args)
+return;var patchupsToApply=this.patchupsToApply_;function handleField(object,fieldName,fieldValue){if(!fieldValue||(!fieldValue.id_ref&&!fieldValue.idRef))
+return;var scope=fieldValue.scope||tr.model.OBJECT_DEFAULT_SCOPE;var idRef=fieldValue.id_ref||fieldValue.idRef;var scopedId=new tr.model.ScopedId(scope,idRef);var pidRef=fieldValue.pid_ref||fieldValue.pidRef||pid;var ts=item[itemTimestampField];patchupsToApply.push({object:object,field:fieldName,pidRef:pidRef,scopedId:scopedId,ts:ts});}
+function iterObjectFieldsRecursively(object){if(!(object instanceof Object))
+return;if((object instanceof tr.model.ObjectSnapshot)||(object instanceof Float32Array)||(object instanceof tr.b.Quad))
+return;if(object instanceof Array){for(var i=0;i<object.length;i++){handleField(object,i,object[i]);iterObjectFieldsRecursively(object[i]);}
+return;}
+for(var key in object){var value=object[key];handleField(object,key,value);iterObjectFieldsRecursively(value);}}
+iterObjectFieldsRecursively(item.args);}};return{Model:Model};});'use strict';tr.exportTo('tr.e.importer.android',function(){var Importer=tr.importer.Importer;var ACTIVITY_STATE={NONE:'none',CREATED:'created',STARTED:'started',RESUMED:'resumed',PAUSED:'paused',STOPPED:'stopped',DESTROYED:'destroyed'};var activityMap={};function EventLogImporter(model,events){this.model_=model;this.events_=events;this.importPriority=3;}
 var eventLogActivityRE=new RegExp('(\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d+)'+'\\s+(\\d+)\\s+(\\d+)\\s+([A-Z])\\s*'+'(am_\\w+)\\s*:(.*)');var amCreateRE=new RegExp('\s*\\[.*,.*,.*,(.*),.*,.*,.*,.*\\]');var amFocusedRE=new RegExp('\s*\\[\\d+,(.*)\\]');var amProcStartRE=new RegExp('\s*\\[\\d+,\\d+,\\d+,.*,activity,(.*)\\]');var amOnResumeRE=new RegExp('\s*\\[\\d+,(.*)\\]');var amOnPauseRE=new RegExp('\s*\\[\\d+,(.*)\\]');var amLaunchTimeRE=new RegExp('\s*\\[\\d+,\\d+,(.*),(\\d+),(\\d+)');var amDestroyRE=new RegExp('\s*\\[\\d+,\\d+,\\d+,(.*)\\]');EventLogImporter.canImport=function(events){if(!(typeof(events)==='string'||events instanceof String))
 return false;if(/^<!DOCTYPE html>/.test(events))
-return false;return eventLogActivityRE.test(events);};EventLogImporter.prototype={__proto__:Importer.prototype,get model(){return this.model_;},getFullActivityName:function(component){var componentSplit=component.split('/');if(componentSplit[1].startsWith('.'))
+return false;return eventLogActivityRE.test(events);};EventLogImporter.prototype={__proto__:Importer.prototype,get importerName(){return'EventLogImporter';},get model(){return this.model_;},getFullActivityName:function(component){var componentSplit=component.split('/');if(componentSplit[1].startsWith('.'))
 return componentSplit[0]+componentSplit[1];return componentSplit[1];},getProcName:function(component){var componentSplit=component.split('/');return componentSplit[0];},findOrCreateActivity:function(activityName){if(activityName in activityMap)
 return activityMap[activityName];var activity={state:ACTIVITY_STATE.NONE,name:activityName};activityMap[activityName]=activity;return activity;},deleteActivity:function(activityName){delete activityMap[activityName];},handleCreateActivity:function(ts,activityName){var activity=this.findOrCreateActivity(activityName);activity.state=ACTIVITY_STATE.CREATED;activity.createdTs=ts;},handleFocusActivity:function(ts,procName,activityName){var activity=this.findOrCreateActivity(activityName);activity.lastFocusedTs=ts;},handleProcStartForActivity:function(ts,activityName){var activity=this.findOrCreateActivity(activityName);activity.procStartTs=ts;},handleOnResumeCalled:function(ts,pid,activityName){var activity=this.findOrCreateActivity(activityName);activity.state=ACTIVITY_STATE.RESUMED;activity.lastResumeTs=ts;activity.pid=pid;},handleOnPauseCalled:function(ts,activityName){var activity=this.findOrCreateActivity(activityName);activity.state=ACTIVITY_STATE.PAUSED;activity.lastPauseTs=ts;if(ts>this.model_.bounds.min&&ts<this.model_.bounds.max)
 this.addActivityToProcess(activity);},handleLaunchTime:function(ts,activityName,launchTime){var activity=this.findOrCreateActivity(activityName);activity.launchTime=launchTime;},handleDestroyActivity:function(ts,activityName){this.deleteActivity(activityName);},addActivityToProcess:function(activity){if(activity.pid===undefined)
@@ -3360,28 +3570,31 @@
 match[1].substring(5,match[1].length);var monotonic_ts=Date.parse(ts)+
 this.model_.realtime_to_monotonic_offset_ms;var pid=match[2];var action=match[5];var data=match[6];if(action==='am_create_activity'){match=amCreateRE.exec(data);if(match&&match.length>=2){this.handleCreateActivity(monotonic_ts,this.getFullActivityName(match[1]));}}else if(action==='am_focused_activity'){match=amFocusedRE.exec(data);if(match&&match.length>=2){this.handleFocusActivity(monotonic_ts,this.getProcName(match[1]),this.getFullActivityName(match[1]));}}else if(action==='am_proc_start'){match=amProcStartRE.exec(data);if(match&&match.length>=2){this.handleProcStartForActivity(monotonic_ts,this.getFullActivityName(match[1]));}}else if(action==='am_on_resume_called'){match=amOnResumeRE.exec(data);if(match&&match.length>=2)
 this.handleOnResumeCalled(monotonic_ts,pid,match[1]);}else if(action==='am_on_paused_called'){match=amOnPauseRE.exec(data);if(match&&match.length>=2)
-this.handleOnPauseCalled(monotonic_ts,match[1]);}else if(action==='am_activity_launch_time'){match=amLaunchTimeRE.exec(data);this.handleLaunchTime(monotonic_ts,this.getFullActivityName(match[1]),match[2]);}else if(action==='am_destroy_activity'){match=amDestroyRE.exec(data);if(match&&match.length==2){this.handleDestroyActivity(monotonic_ts,this.getFullActivityName(match[1]));}}},importEvents:function(isSecondaryImport){if(isNaN(this.model_.realtime_to_monotonic_offset_ms)){this.model_.importWarning({type:'eveng_log_clock_sync',message:'Need a trace_event_clock_sync to map realtime to import.'});return;}
-this.model_.updateBounds();var lines=this.events_.split('\n');lines.forEach(this.parseAmLine_,this);for(var activityName in activityMap){var activity=activityMap[activityName];if(activity.state==ACTIVITY_STATE.RESUMED){activity.lastPauseTs=this.model_.bounds.max;this.addActivityToProcess(activity);}}}};Importer.register(EventLogImporter);return{EventLogImporter:EventLogImporter};});'use strict';tr.exportTo('tr.e.importer.battor',function(){function BattorImporter(model,events){this.importPriority=3;this.sampleRate_=undefined;this.model_=model;this.events_=events;}
-var TestExports={};var battorDataLineRE=/^(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$/;var battorHeaderLineRE=/^# BattOr/;var sampleRateLineRE=/^# sample_rate=(\d+)Hz/;BattorImporter.canImport=function(events){if(!(typeof(events)==='string'||events instanceof String))
-return false;return battorHeaderLineRE.test(events);};BattorImporter.prototype={__proto__:tr.importer.Importer.prototype,get model(){return this.model_;},importEvents:function(isSecondaryImport){if(this.model_.device.powerSeries){this.model_.importWarning({type:'import_error',message:'Power counter exists, can not import BattOr power trace.'});return;}
-var name='power';var series=new tr.model.PowerSeries(this.model_.device);this.importPowerSamples(series);var syncMarks=this.model_.getClockSyncRecordsNamed('battor');if(syncMarks.length<1){this.model_.importWarning({type:'clock_sync',message:'Cannot import BattOr power trace without a sync signal.'});return;}
-var shiftTs=this.correlationClockSync(syncMarks,series);if(shiftTs===undefined){this.model_.importWarning({type:'clock_sync',message:'All of the BattOr power trace clock sync techinques failed.'});return;}
+this.handleOnPauseCalled(monotonic_ts,match[1]);}else if(action==='am_activity_launch_time'){match=amLaunchTimeRE.exec(data);this.handleLaunchTime(monotonic_ts,this.getFullActivityName(match[1]),match[2]);}else if(action==='am_destroy_activity'){match=amDestroyRE.exec(data);if(match&&match.length==2){this.handleDestroyActivity(monotonic_ts,this.getFullActivityName(match[1]));}}},importEvents:function(){if(isNaN(this.model_.realtime_to_monotonic_offset_ms)){this.model_.importWarning({type:'eveng_log_clock_sync',message:'Need a trace_event_clock_sync to map realtime to import.'});return;}
+this.model_.updateBounds();var lines=this.events_.split('\n');lines.forEach(this.parseAmLine_,this);for(var activityName in activityMap){var activity=activityMap[activityName];if(activity.state==ACTIVITY_STATE.RESUMED){activity.lastPauseTs=this.model_.bounds.max;this.addActivityToProcess(activity);}}}};Importer.register(EventLogImporter);return{EventLogImporter:EventLogImporter};});'use strict';tr.exportTo('tr.e.importer.battor',function(){function BattorImporter(model,events){this.importPriority=3;this.sampleRate_=undefined;this.model_=model;this.events_=events;this.explicitSyncMark_=undefined;}
+var TestExports={};var battorDataLineRE=new RegExp('^(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)'+'(?:\\s+<(\\S+)>)?$');var battorHeaderLineRE=/^# BattOr/;var sampleRateLineRE=/^# sample_rate (\d+) Hz/;BattorImporter.canImport=function(events){if(!(typeof(events)==='string'||events instanceof String))
+return false;return battorHeaderLineRE.test(events);};BattorImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'BattorImporter';},get model(){return this.model_;},importEvents:function(){if(this.model_.device.powerSeries){this.model_.importWarning({type:'import_error',message:'Power counter exists, can not import BattOr power trace.'});return;}
+var name='power';var series=new tr.model.PowerSeries(this.model_.device);this.importPowerSamples(series);var battorSyncMarks=this.model_.getClockSyncRecordsWithSyncId('battor');var shiftTs=undefined;shiftTs=this.correlationClockSync(battorSyncMarks,series);if(shiftTs===undefined)
+shiftTs=this.explicitClockSync();if(shiftTs===undefined){this.model_.importWarning({type:'clock_sync',message:'All of the BattOr power trace clock sync techinques failed.'});return;}
 series.shiftTimestampsForward(shiftTs);this.model_.device.powerSeries=series;},importPowerSamples:function(series){var lines=this.events_.split('\n');this.model_.updateBounds();var minTs=0;if(this.model_.bounds.min!==undefined)
 minTs=this.model_.bounds.min;lines.forEach(function(line){line=line.trim();if(line.length===0)
 return;if(/^#/.test(line)){groups=sampleRateLineRE.exec(line);if(!groups)
 return;this.sampleRate_=parseInt(groups[1]);}else{var groups=battorDataLineRE.exec(line);if(!groups){this.model_.importWarning({type:'parse_error',message:'Unrecognized line: '+line});return;}
-var time=parseFloat(groups[1])+minTs;var voltage_mV=parseFloat(groups[2]);var current_mA=parseFloat(groups[3]);series.addPowerSample(time,(voltage_mV*current_mA)/1000);}},this);},correlationClockSync:function(syncMarks,series){var syncCtr=this.model_.kernel.counters['null.vreg '+syncMarks[0].args['regulator']+' enabled'];if(syncCtr===undefined){this.model_.importWarning({type:'clock_sync',message:'Cannot correlate BattOr power trace without sync vreg.'});return undefined;}
-var syncEvents=[];var firstSyncEventTs=undefined;syncCtr.series[0].iterateAllEvents(function(event){if(event.timestamp>=syncMarks[0].ts&&event.timestamp<=syncMarks[1].ts){if(firstSyncEventTs===undefined)
+var time=parseFloat(groups[1])+minTs;var voltage_mV=parseFloat(groups[2]);var current_mA=parseFloat(groups[3]);series.addPowerSample(time,(voltage_mV*current_mA)/1000);if(groups[4]!==undefined&&this.explicitSyncMark_===undefined){var id=groups[4];this.explicitSyncMark_={'id':id,'ts':time};}}},this);},correlationClockSync:function(syncMarks,series){if(syncMarks.length!==2)
+return undefined;var syncCtr=this.model_.kernel.counters['null.vreg '+syncMarks[0].args['regulator']+' enabled'];if(syncCtr===undefined){this.model_.importWarning({type:'clock_sync',message:'Cannot correlate BattOr power trace without sync vreg.'});return undefined;}
+var syncEvents=[];var firstSyncEventTs=undefined;syncCtr.series[0].iterateAllEvents(function(event){if(event.timestamp>=syncMarks[0].start&&event.timestamp<=syncMarks[1].start){if(firstSyncEventTs===undefined)
 firstSyncEventTs=event.timestamp;var newEvent={'ts':(event.timestamp-firstSyncEventTs)/1000,'val':event.value};syncEvents.push(newEvent);}});var syncSamples=[];var syncNumSamples=Math.ceil(syncEvents[syncEvents.length-1].ts*this.sampleRate_);for(var i=1;i<syncEvents.length;i++){var sampleStartIdx=Math.ceil(syncEvents[i-1].ts*this.sampleRate_);var sampleEndIdx=Math.ceil(syncEvents[i].ts*this.sampleRate_);for(var j=sampleStartIdx;j<sampleEndIdx;j++){syncSamples[j]=syncEvents[i-1].val;}}
 var powerSamples=series.samples;if(powerSamples.length<syncSamples.length){this.model_.importWarning({type:'not_enough_samples',message:'Not enough power samples to correlate with sync signal.'});return undefined;}
 var maxShift=powerSamples.length-syncSamples.length;var minShift=0;var corrNumSamples=this.sampleRate_*5.0;if(powerSamples.length>corrNumSamples)
 minShift=powerSamples.length-corrNumSamples;var corr=[];for(var shift=minShift;shift<=maxShift;shift++){var corrSum=0;var powerAvg=0;for(var i=0;i<syncSamples.length;i++){corrSum+=(powerSamples[i+shift].power*syncSamples[i]);powerAvg+=powerSamples[i+shift].power;}
 powerAvg=powerAvg/syncSamples.length;corr.push(corrSum/powerAvg);}
 var corrPeakIdx=0;var corrPeak=0;for(var i=0;i<powerSamples.length;i++){if(corr[i]>corrPeak){corrPeak=corr[i];corrPeakIdx=i;}}
-var corrPeakTs=((minShift+corrPeakIdx)/this.sampleRate_);corrPeakTs*=1000;var syncStartTs=firstSyncEventTs-this.model_.bounds.min;var shiftTs=syncStartTs-corrPeakTs;return shiftTs;}};tr.importer.Importer.register(BattorImporter);return{BattorImporter:BattorImporter,_BattorImporterTestExports:TestExports};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isHeadless){global.window={};}'use strict';if(tr.isHeadless){global.JSZip=global.window.JSZip;global.window=undefined;}'use strict';tr.exportTo('tr.e.importer.ddms',function(){var kPid=0;var kCategory='java';var kMethodLutEndMarker='\n*end\n';var kThreadsStart='\n*threads\n';var kMethodsStart='\n*methods\n';var kTraceMethodEnter=0x00;var kTraceMethodExit=0x01;var kTraceUnroll=0x02;var kTraceMethodActionMask=0x03;var kTraceHeaderLength=32;var kTraceMagicValue=0x574f4c53;var kTraceVersionSingleClock=2;var kTraceVersionDualClock=3;var kTraceRecordSizeSingleClock=10;var kTraceRecordSizeDualClock=14;function Reader(string_payload){this.position_=0;this.data_=JSZip.utils.transformTo('uint8array',string_payload);}
+var corrPeakTs=((minShift+corrPeakIdx)/this.sampleRate_);corrPeakTs*=1000;var syncStartTs=firstSyncEventTs-this.model_.bounds.min;var shiftTs=syncStartTs-corrPeakTs;return shiftTs;},explicitClockSync:function(){if(this.explicitSyncMark_===undefined)
+return undefined;var syncMarks=this.model.getClockSyncRecordsWithSyncId(this.explicitSyncMark_['id']);if(syncMarks.length!==1){this.model_.importWarning({type:'missing_sync_marker',message:'No single clock sync record found for explicit clock sync.'});return undefined;}
+var clockSync=syncMarks[0];var syncTs=clockSync.start;var traceTs=this.explicitSyncMark_['ts'];return syncTs-traceTs;},foundExplicitSyncMark:function(){return this.explicitSyncMark_!==undefined;}};tr.importer.Importer.register(BattorImporter);return{BattorImporter:BattorImporter,_BattorImporterTestExports:TestExports};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.window={};}'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){var jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');var jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer.ddms',function(){var kPid=0;var kCategory='java';var kMethodLutEndMarker='\n*end\n';var kThreadsStart='\n*threads\n';var kMethodsStart='\n*methods\n';var kTraceMethodEnter=0x00;var kTraceMethodExit=0x01;var kTraceUnroll=0x02;var kTraceMethodActionMask=0x03;var kTraceHeaderLength=32;var kTraceMagicValue=0x574f4c53;var kTraceVersionSingleClock=2;var kTraceVersionDualClock=3;var kTraceRecordSizeSingleClock=10;var kTraceRecordSizeDualClock=14;function Reader(string_payload){this.position_=0;this.data_=JSZip.utils.transformTo('uint8array',string_payload);}
 Reader.prototype={__proto__:Object.prototype,uint8:function(){var result=this.data_[this.position_];this.position_+=1;return result;},uint16:function(){var result=0;result+=this.uint8();result+=this.uint8()<<8;return result;},uint32:function(){var result=0;result+=this.uint8();result+=this.uint8()<<8;result+=this.uint8()<<16;result+=this.uint8()<<24;return result;},uint64:function(){var low=this.uint32();var high=this.uint32();var low_str=('0000000'+low.toString(16)).substr(-8);var high_str=('0000000'+high.toString(16)).substr(-8);var result=high_str+low_str;return result;},seekTo:function(position){this.position_=position;},hasMore:function(){return this.position_<this.data_.length;}};function DdmsImporter(model,data){this.importPriority=3;this.model_=model;this.data_=data;}
 DdmsImporter.canImport=function(data){if(typeof(data)==='string'||data instanceof String){var header=data.slice(0,1000);return header.startsWith('*version\n')&&header.indexOf('\nvm=')>=0&&header.indexOf(kThreadsStart)>=0;}
-return false;};DdmsImporter.prototype={__proto__:tr.importer.Importer.prototype,get model(){return this.model_;},importEvents:function(isSecondaryImport){var divider=this.data_.indexOf(kMethodLutEndMarker)+
+return false;};DdmsImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'DdmsImporter';},get model(){return this.model_;},importEvents:function(){var divider=this.data_.indexOf(kMethodLutEndMarker)+
 kMethodLutEndMarker.length;this.metadata_=this.data_.slice(0,divider);this.methods_={};this.parseThreads();this.parseMethods();var traceReader=new Reader(this.data_.slice(divider));var magic=traceReader.uint32();if(magic!=kTraceMagicValue){throw Error('Failed to match magic value');}
 this.version_=traceReader.uint16();if(this.version_!=kTraceVersionDualClock){throw Error('Unknown version');}
 var dataOffest=traceReader.uint16();var startDateTime=traceReader.uint64();var recordSize=traceReader.uint16();traceReader.seekTo(dataOffest);while(traceReader.hasMore()){this.parseTraceEntry(traceReader);}},parseTraceEntry:function(reader){var tid=reader.uint16();var methodPacked=reader.uint32();var cpuSinceStart=reader.uint32();var wallClockSinceStart=reader.uint32();var method=methodPacked&~kTraceMethodActionMask;var action=methodPacked&kTraceMethodActionMask;var thread=this.getTid(tid);method=this.getMethodName(method);if(action==kTraceMethodEnter){thread.sliceGroup.beginSlice(kCategory,method,wallClockSinceStart,undefined,cpuSinceStart);}else if(thread.sliceGroup.openSliceCount){thread.sliceGroup.endSlice(wallClockSinceStart,cpuSinceStart);}},parseThreads:function(){var threads=this.metadata_.slice(this.metadata_.indexOf(kThreadsStart)+
@@ -3425,7 +3638,7 @@
 function BinderParser(importer){Parser.call(this,importer);importer.registerEventHandler('binder_locked',BinderParser.prototype.binderLocked.bind(this));importer.registerEventHandler('binder_unlock',BinderParser.prototype.binderUnlock.bind(this));importer.registerEventHandler('binder_lock',BinderParser.prototype.binderLock.bind(this));importer.registerEventHandler('binder_transaction',BinderParser.prototype.binderTransaction.bind(this));importer.registerEventHandler('binder_transaction_received',BinderParser.prototype.binderTransactionReceived.bind(this));this.model_=importer.model;this.kthreadlookup={};this.importer_=importer;this.transWaitingRecv={};this.syncTransWaitingCompletion={};this.recursiveSyncTransWaitingCompletion_ByPID={};this.receivedTransWaitingConversion={};}
 BinderParser.prototype={__proto__:Parser.prototype,binderLock:function(eventName,cpuNumber,pid,ts,eventBase){var tgid=parseInt(eventBase.tgid);this.doNameMappings(pid,tgid,eventName.threadName);var kthread=this.importer_.getOrCreateBinderKernelThread(eventBase.threadName,tgid,pid);kthread.binderAttemptLockTS=ts;kthread.binderOpenTsA=ts;return true;},binderLocked:function(eventName,cpuNumber,pid,ts,eventBase){var binder_thread=isBinderThread(eventBase.threadName);var tgid,name;var as_slice;var need_push=false;var kthread,rthread;tgid=parseInt(eventBase.tgid);name=eventBase.threadName;kthread=this.importer_.getOrCreateBinderKernelThread(eventBase.threadName,tgid,pid);this.doNameMappings(pid,tgid,name);rthread=kthread.thread;kthread.binderLockAquiredTS=ts;if(kthread.binderAttemptLockTS===undefined)
 return false;var args=this.generateArgsForSlice(tgid,pid,name,kthread);rthread.sliceGroup.pushCompleteSlice('binder','binder lock waiting',kthread.binderAttemptLockTS,ts-kthread.binderAttemptLockTS,0,0,args);kthread.binderAttemptLockTS=undefined;return true;},binderUnlock:function(eventName,cpuNumber,pid,ts,eventBase){var tgid=parseInt(eventBase.tgid);var kthread=this.importer_.getOrCreateBinderKernelThread(eventBase.threadName,tgid,pid);if(kthread.binderLockAquiredTS===undefined)
-return false;args=this.generateArgsForSlice(tgid,pid,eventBase.threadName,kthread);kthread.thread.sliceGroup.pushCompleteSlice('binder','binder lock held',kthread.binderLockAquiredTS,ts-kthread.binderLockAquiredTS,0,0,args);kthread.binderLockAquiredTS=undefined;return true;},binderTransaction:function(eventName,cpuNumber,pid,ts,eventBase){var event=binderTransRE.exec(eventBase.details);if(event===undefined)
+return false;var args=this.generateArgsForSlice(tgid,pid,eventBase.threadName,kthread);kthread.thread.sliceGroup.pushCompleteSlice('binder','binder lock held',kthread.binderLockAquiredTS,ts-kthread.binderLockAquiredTS,0,0,args);kthread.binderLockAquiredTS=undefined;return true;},binderTransaction:function(eventName,cpuNumber,pid,ts,eventBase){var event=binderTransRE.exec(eventBase.details);if(event===undefined)
 return false;var tgid=parseInt(eventBase.tgid);this.doNameMappings(pid,tgid,eventBase.threadName);var kthread;kthread=this.importer_.getOrCreateBinderKernelThread(eventBase.threadName,tgid,pid);var trans=new BinderTransaction(event,pid,ts,kthread);var args=generateBinderArgsForSlice(trans,eventBase.threadName);var prior_receive=this.getPriorReceiveOnPID(pid);if(prior_receive!==false){return this.modelPriorReceive(prior_receive,ts,pid,tgid,kthread,trans,args,event);}
 var recursive_trans=this.getRecursiveTransactionNeedingCompletion(pid);if(recursive_trans!==false)
 return this.modelRecursiveTransactions(recursive_trans,ts,pid,kthread,trans,args);var slice=kthread.thread.sliceGroup.pushCompleteSlice('binder','',ts,.03,0,0,args);slice.colorId=ColorScheme.getColorIdForGeneralPurposeString(ts.toString());trans.slice=slice;if(trans.expect_reply)
@@ -3606,7 +3819,7 @@
 var workqueueExecuteStartRE=/work struct (.+): function (\S+)/;var workqueueExecuteEndRE=/work struct (.+)/;WorkqueueParser.prototype={__proto__:Parser.prototype,executeStartEvent:function(eventName,cpuNumber,pid,ts,eventBase){var event=workqueueExecuteStartRE.exec(eventBase.details);if(!event)
 return false;var kthread=this.importer.getOrCreateKernelThread(eventBase.threadName,pid,pid);kthread.openSliceTS=ts;kthread.openSlice=event[2];return true;},executeEndEvent:function(eventName,cpuNumber,pid,ts,eventBase){var event=workqueueExecuteEndRE.exec(eventBase.details);if(!event)
 return false;var kthread=this.importer.getOrCreateKernelThread(eventBase.threadName,pid,pid);if(kthread.openSlice){var slice=new tr.model.Slice('',kthread.openSlice,ColorScheme.getColorIdForGeneralPurposeString(kthread.openSlice),kthread.openSliceTS,{},ts-kthread.openSliceTS);kthread.thread.sliceGroup.pushSlice(slice);}
-kthread.openSlice=undefined;return true;},executeQueueWork:function(eventName,cpuNumber,pid,ts,eventBase){return true;},executeActivateWork:function(eventName,cpuNumber,pid,ts,eventBase){return true;}};Parser.register(WorkqueueParser);return{WorkqueueParser:WorkqueueParser};});'use strict';tr.exportTo('tr.e.importer.linux_perf',function(){var ClockSyncRecord=tr.ClockSyncRecord;function LinuxPerfImporter(model,events){this.importPriority=2;this.model_=model;this.events_=events;this.newlyAddedClockSyncRecords_=[];this.wakeups_=[];this.blocked_reasons_=[];this.kernelThreadStates_={};this.buildMapFromLinuxPidsToThreads();this.lines_=[];this.pseudoThreadCounter=1;this.parsers_=[];this.eventHandlers_={};}
+kthread.openSlice=undefined;return true;},executeQueueWork:function(eventName,cpuNumber,pid,ts,eventBase){return true;},executeActivateWork:function(eventName,cpuNumber,pid,ts,eventBase){return true;}};Parser.register(WorkqueueParser);return{WorkqueueParser:WorkqueueParser};});'use strict';tr.exportTo('tr.model',function(){function ClockSyncRecord(syncId,start,args){this.syncId_=syncId;this.start_=start;this.args_=args;};ClockSyncRecord.prototype={get syncId(){return this.syncId_;},get start(){return this.start_;},set start(value){this.start_=value;},get args(){return this.args_;}};function InstantClockSyncRecord(syncId,start,args){ClockSyncRecord.call(this,syncId,start,args);};InstantClockSyncRecord.prototype={__proto__:ClockSyncRecord.prototype};function PingPongClockSyncRecord(syncId,start,duration,args){ClockSyncRecord.call(this,syncId,start,args);this.duration_=duration;};PingPongClockSyncRecord.prototype={__proto__:ClockSyncRecord.prototype,get duration(){return this.duration_;},set duration(value){this.duration_=value;},};return{InstantClockSyncRecord:InstantClockSyncRecord,PingPongClockSyncRecord:PingPongClockSyncRecord};});'use strict';tr.exportTo('tr.e.importer.linux_perf',function(){var InstantClockSyncRecord=tr.model.InstantClockSyncRecord;function LinuxPerfImporter(model,events){this.importPriority=2;this.model_=model;this.events_=events;this.newlyAddedClockSyncRecords_=[];this.wakeups_=[];this.blocked_reasons_=[];this.kernelThreadStates_={};this.buildMapFromLinuxPidsToThreads();this.lines_=[];this.pseudoThreadCounter=1;this.parsers_=[];this.eventHandlers_={};}
 var TestExports={};var lineREWithTGID=new RegExp('^\\s*(.+)-(\\d+)\\s+\\(\\s*(\\d+|-+)\\)\\s\\[(\\d+)\\]'+'\\s+[dX.][Nnp.][Hhs.][0-9a-f.]'+'\\s+(\\d+\\.\\d+):\\s+(\\S+):\\s(.*)$');var lineParserWithTGID=function(line){var groups=lineREWithTGID.exec(line);if(!groups){return groups;}
 var tgid=groups[3];if(tgid[0]==='-')
 tgid=undefined;return{threadName:groups[1],pid:groups[2],tgid:tgid,cpuNumber:groups[4],timestamp:groups[5],eventName:groups[6],details:groups[7]};};TestExports.lineParserWithTGID=lineParserWithTGID;var lineREWithIRQInfo=new RegExp('^\\s*(.+)-(\\d+)\\s+\\[(\\d+)\\]'+'\\s+[dX.][Nnp.][Hhs.][0-9a-f.]'+'\\s+(\\d+\\.\\d+):\\s+(\\S+):\\s(.*)$');var lineParserWithIRQInfo=function(line){var groups=lineREWithIRQInfo.exec(line);if(!groups){return groups;}
@@ -3640,7 +3853,7 @@
 return failure;events=r.endSavingLinesAndGetResult();events=events.slice(1,events.length-1);}
 if(!r.advanceToLineMatching(/^<\/body>$/))
 return failure;if(!r.advanceToLineMatching(/^<\/html>$/))
-return failure;return{ok:true,lines:produce_result?events:undefined,events_begin_at_line:events_begin_at_line};};LinuxPerfImporter.prototype={__proto__:tr.importer.Importer.prototype,get model(){return this.model_;},buildMapFromLinuxPidsToThreads:function(){this.threadsByLinuxPid={};this.model_.getAllThreads().forEach(function(thread){this.threadsByLinuxPid[thread.tid]=thread;}.bind(this));},getOrCreateCpu:function(cpuNumber){return this.model_.kernel.getOrCreateCpu(cpuNumber);},getOrCreateKernelThread:function(kernelThreadName,pid,tid){if(!this.kernelThreadStates_[kernelThreadName]){var thread=this.model_.getOrCreateProcess(pid).getOrCreateThread(tid);thread.name=kernelThreadName;this.kernelThreadStates_[kernelThreadName]={pid:pid,thread:thread,openSlice:undefined,openSliceTS:undefined};this.threadsByLinuxPid[pid]=thread;}
+return failure;return{ok:true,lines:produce_result?events:undefined,events_begin_at_line:events_begin_at_line};};LinuxPerfImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'LinuxPerfImporter';},get model(){return this.model_;},buildMapFromLinuxPidsToThreads:function(){this.threadsByLinuxPid={};this.model_.getAllThreads().forEach(function(thread){this.threadsByLinuxPid[thread.tid]=thread;}.bind(this));},getOrCreateCpu:function(cpuNumber){return this.model_.kernel.getOrCreateCpu(cpuNumber);},getOrCreateKernelThread:function(kernelThreadName,pid,tid){if(!this.kernelThreadStates_[kernelThreadName]){var thread=this.model_.getOrCreateProcess(pid).getOrCreateThread(tid);thread.name=kernelThreadName;this.kernelThreadStates_[kernelThreadName]={pid:pid,thread:thread,openSlice:undefined,openSliceTS:undefined};this.threadsByLinuxPid[pid]=thread;}
 return this.kernelThreadStates_[kernelThreadName];},getOrCreateBinderKernelThread:function(kernelThreadName,pid,tid){var key=kernelThreadName+pid+tid;if(!this.kernelThreadStates_[key]){var thread=this.model_.getOrCreateProcess(pid).getOrCreateThread(tid);thread.name=kernelThreadName;this.kernelThreadStates_[key]={pid:pid,thread:thread,openSlice:undefined,openSliceTS:undefined};this.threadsByLinuxPid[pid]=thread;}
 return this.kernelThreadStates_[key];},getOrCreatePseudoThread:function(threadName){var thread=this.kernelThreadStates_[threadName];if(!thread){thread=this.getOrCreateKernelThread(threadName,pseudoKernelPID,this.pseudoThreadCounter);this.pseudoThreadCounter++;}
 return thread;},importEvents:function(isSecondaryImport){this.parsers_=this.createParsers_();this.registerDefaultHandlers_();this.parseLines();this.importClockSyncRecords();var timeShift=this.computeTimeTransform();if(timeShift===undefined){this.model_.importWarning({type:'clock_sync',message:'Cannot import kernel trace without a clock sync.'});return;}
@@ -3663,152 +3876,24 @@
 if(wakeup!==undefined){var wakeupDuration=nextSlice.start-wakeup.ts;var args={'wakeup from tid':wakeup.fromTid};slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.RUNNABLE,'',wakeup.ts,args,wakeupDuration));wakeup=undefined;}};if(prevSlice.args.stateWhenDescheduled=='S'){pushSleep(SCHEDULING_STATE.SLEEPING);}else if(prevSlice.args.stateWhenDescheduled=='R'||prevSlice.args.stateWhenDescheduled=='R+'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.RUNNABLE,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='D'){pushSleep(SCHEDULING_STATE.UNINTR_SLEEP);}else if(prevSlice.args.stateWhenDescheduled=='T'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.STOPPED,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='t'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.DEBUG,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='Z'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.ZOMBIE,'',ioWaitId,prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='X'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.EXIT_DEAD,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='x'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.TASK_DEAD,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='K'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.WAKE_KILL,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='W'){slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.WAKING,'',prevSlice.end,{},midDuration));}else if(prevSlice.args.stateWhenDescheduled=='D|K'){pushSleep(SCHEDULING_STATE.UNINTR_SLEEP_WAKE_KILL);}else if(prevSlice.args.stateWhenDescheduled=='D|W'){pushSleep(SCHEDULING_STATE.UNINTR_SLEEP_WAKING);}else{slices.push(new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.UNKNOWN,'',prevSlice.end,{},midDuration));this.model_.importWarning({type:'parse_error',message:'Unrecognized sleep state: '+
 prevSlice.args.stateWhenDescheduled});}
 var runningSlice=new tr.model.ThreadTimeSlice(thread,SCHEDULING_STATE.RUNNING,'',nextSlice.start,{},nextSlice.duration);runningSlice.cpuOnWhichThreadWasRunning=prevSlice.cpu;slices.push(runningSlice);}
-thread.timeSlices=slices;},this);},computeCpuTimestampsForSlicesAsNeeded:function(){},computeTimeTransform:function(){var isSecondaryImport=this.model.getClockSyncRecordsNamed('ftrace_importer').length!==0;var mSyncs=this.model_.getClockSyncRecordsNamed('monotonic');if(mSyncs.length==0)
+thread.timeSlices=slices;},this);},computeCpuTimestampsForSlicesAsNeeded:function(){},computeTimeTransform:function(){var isSecondaryImport=this.model.getClockSyncRecordsWithSyncId('ftrace_importer').length!==0;var mSyncs=this.model_.getClockSyncRecordsWithSyncId('monotonic');if(mSyncs.length==0)
 return isSecondaryImport?undefined:0;var sync=mSyncs[0].args;if(sync.parentTS==0||sync.parentTS==sync.perfTS)
 return 0;return sync.parentTS-sync.perfTS;},createParsers_:function(){var allTypeInfos=tr.e.importer.linux_perf.Parser.getAllRegisteredTypeInfos();var parsers=allTypeInfos.map(function(typeInfo){return new typeInfo.constructor(this);},this);return parsers;},registerDefaultHandlers_:function(){this.registerEventHandler('tracing_mark_write',LinuxPerfImporter.prototype.traceMarkingWriteEvent.bind(this));this.registerEventHandler('0',LinuxPerfImporter.prototype.traceMarkingWriteEvent.bind(this));this.registerEventHandler('tracing_mark_write:trace_event_clock_sync',function(){return true;});this.registerEventHandler('0:trace_event_clock_sync',function(){return true;});},registerEventHandler:function(eventName,handler){this.eventHandlers_[eventName]=handler;},markPidRunnable:function(ts,pid,comm,prio,fromPid){this.wakeups_.push({ts:ts,tid:pid,fromTid:fromPid});},addPidBlockedReason:function(ts,pid,iowait,caller){this.blocked_reasons_.push({ts:ts,tid:pid,iowait:iowait,caller:caller});},traceClockSyncEvent:function(eventName,cpuNumber,pid,ts,eventBase){var event=/name=(\w+?)\s(.+)/.exec(eventBase.details);if(event){var name=event[1];var pieces=event[2].split(' ');var args={perfTS:ts};for(var i=0;i<pieces.length;i++){var parts=pieces[i].split('=');if(parts.length!=2)
 throw new Error('omgbbq');args[parts[0]]=parts[1];}
-this.addClockSyncRecord(new ClockSyncRecord(name,ts,args));return true;}
+this.addClockSyncRecord(new InstantClockSyncRecord(name,ts,args));return true;}
 event=/parent_ts=(\d+\.?\d*)/.exec(eventBase.details);if(!event)
-return false;this.addClockSyncRecord(new ClockSyncRecord('monotonic',ts,{perfTS:ts,parentTS:event[1]*1000}));return true;},traceMarkingWriteEvent:function(eventName,cpuNumber,pid,ts,eventBase,threadName){eventBase.details=eventBase.details.replace(/\\n.*$/,'');var event=/^\s*(\w+):\s*(.*)$/.exec(eventBase.details);if(!event){var tag=eventBase.details.substring(0,2);if(tag=='B|'||tag=='E'||tag=='E|'||tag=='X|'||tag=='C|'||tag=='S|'||tag=='F|'){eventBase.subEventName='android';}else{return false;}}else{eventBase.subEventName=event[1];eventBase.details=event[2];}
+return false;this.addClockSyncRecord(new InstantClockSyncRecord('monotonic',ts,{perfTS:ts,parentTS:event[1]*1000}));return true;},traceMarkingWriteEvent:function(eventName,cpuNumber,pid,ts,eventBase,threadName){eventBase.details=eventBase.details.replace(/\\n.*$/,'');var event=/^\s*(\w+):\s*(.*)$/.exec(eventBase.details);if(!event){var tag=eventBase.details.substring(0,2);if(tag=='B|'||tag=='E'||tag=='E|'||tag=='X|'||tag=='C|'||tag=='S|'||tag=='F|'){eventBase.subEventName='android';}else{return false;}}else{eventBase.subEventName=event[1];eventBase.details=event[2];}
 var writeEventName=eventName+':'+eventBase.subEventName;var handler=this.eventHandlers_[writeEventName];if(!handler){this.model_.importWarning({type:'parse_error',message:'Unknown trace_marking_write event '+writeEventName});return true;}
 return handler(writeEventName,cpuNumber,pid,ts,eventBase,threadName);},importClockSyncRecords:function(){this.forEachLine(function(text,eventBase,cpuNumber,pid,ts){var eventName=eventBase.eventName;if(eventName!=='tracing_mark_write'&&eventName!=='0')
 return;if(traceEventClockSyncRE.exec(eventBase.details))
 this.traceClockSyncEvent(eventName,cpuNumber,pid,ts,eventBase);if(realTimeClockSyncRE.exec(eventBase.details)){var match=realTimeClockSyncRE.exec(eventBase.details);this.model_.realtime_to_monotonic_offset_ms=ts-match[1];}
 if(genericClockSyncRE.exec(eventBase.details))
-this.traceClockSyncEvent(eventName,cpuNumber,pid,ts,eventBase);}.bind(this));},addClockSyncRecord:function(csr){this.newlyAddedClockSyncRecords_.push(csr);this.model_.clockSyncRecords.push(csr);},shiftNewlyAddedClockSyncRecords:function(timeShift){this.newlyAddedClockSyncRecords_.forEach(function(csr){csr.ts+=timeShift;});},importCpuData:function(timeShift){this.forEachLine(function(text,eventBase,cpuNumber,pid,ts){var eventName=eventBase.eventName;var handler=this.eventHandlers_[eventName];if(!handler){this.model_.importWarning({type:'parse_error',message:'Unknown event '+eventName+' ('+text+')'});return;}
+this.traceClockSyncEvent(eventName,cpuNumber,pid,ts,eventBase);}.bind(this));},addClockSyncRecord:function(csr){this.newlyAddedClockSyncRecords_.push(csr);this.model_.clockSyncRecords.push(csr);},shiftNewlyAddedClockSyncRecords:function(timeShift){this.newlyAddedClockSyncRecords_.forEach(function(csr){csr.start+=timeShift;});},importCpuData:function(timeShift){this.forEachLine(function(text,eventBase,cpuNumber,pid,ts){var eventName=eventBase.eventName;var handler=this.eventHandlers_[eventName];if(!handler){this.model_.importWarning({type:'parse_error',message:'Unknown event '+eventName+' ('+text+')'});return;}
 ts+=timeShift;if(!handler(eventName,cpuNumber,pid,ts,eventBase)){this.model_.importWarning({type:'parse_error',message:'Malformed '+eventName+' event ('+text+')'});}}.bind(this));},parseLines:function(){var lines=[];var extractResult=LinuxPerfImporter._extractEventsFromSystraceHTML(this.events_,true);if(!extractResult.ok)
 extractResult=LinuxPerfImporter._extractEventsFromSystraceMultiHTML(this.events_,true);var lines=extractResult.ok?extractResult.lines:this.events_.split('\n');var lineParser=null;for(var lineNumber=0;lineNumber<lines.length;++lineNumber){var line=lines[lineNumber].trim();if(line.length==0||/^#/.test(line))
 continue;if(lineParser==null){lineParser=autoDetectLineParser(line);if(lineParser==null){this.model_.importWarning({type:'parse_error',message:'Cannot parse line: '+line});continue;}}
 var eventBase=lineParser(line);if(!eventBase){this.model_.importWarning({type:'parse_error',message:'Unrecognized line: '+line});continue;}
-this.lines_.push([line,eventBase,parseInt(eventBase.cpuNumber),parseInt(eventBase.pid),parseFloat(eventBase.timestamp)*1000]);}},forEachLine:function(handler){for(var i=0;i<this.lines_.length;++i){var line=this.lines_[i];handler.apply(this,line);}}};tr.importer.Importer.register(LinuxPerfImporter);return{LinuxPerfImporter:LinuxPerfImporter,_LinuxPerfImporterTestExports:TestExports};});'use strict';tr.exportTo('tr.b.u',function(){function TimeDuration(duration){tr.b.u.Scalar.call(this,duration,tr.b.u.Units.timeDurationInMs);};TimeDuration.prototype={__proto__:tr.b.u.Scalar.prototype,get duration(){return this.value;}};TimeDuration.format=function(duration){return tr.b.u.Units.timeDurationInMs.format(duration);};return{TimeDuration:TimeDuration};});'use strict';tr.exportTo('tr.b',function(){function convertEventsToRanges(events){return events.map(function(event){return tr.b.Range.fromExplicitRange(event.start,event.end);});}
-function mergeRanges(inRanges,mergeThreshold,mergeFunction){var remainingEvents=inRanges.slice();remainingEvents.sort(function(x,y){return x.min-y.min;});if(remainingEvents.length<=1){var merged=[];if(remainingEvents.length==1){merged.push(mergeFunction(remainingEvents));}
-return merged;}
-var mergedEvents=[];var currentMergeBuffer=[];var rightEdge;function beginMerging(){currentMergeBuffer.push(remainingEvents[0]);remainingEvents.splice(0,1);rightEdge=currentMergeBuffer[0].max;}
-function flushCurrentMergeBuffer(){if(currentMergeBuffer.length==0)
-return;mergedEvents.push(mergeFunction(currentMergeBuffer));currentMergeBuffer=[];if(remainingEvents.length!=0)
-beginMerging();}
-beginMerging();while(remainingEvents.length){var currentEvent=remainingEvents[0];var distanceFromRightEdge=currentEvent.min-rightEdge;if(distanceFromRightEdge<mergeThreshold){rightEdge=Math.max(rightEdge,currentEvent.max);remainingEvents.splice(0,1);currentMergeBuffer.push(currentEvent);continue;}
-flushCurrentMergeBuffer();}
-flushCurrentMergeBuffer();return mergedEvents;}
-function findEmptyRangesBetweenRanges(inRanges,opt_totalRange){if(opt_totalRange&&opt_totalRange.isEmpty)
-opt_totalRange=undefined;var emptyRanges=[];if(!inRanges.length){if(opt_totalRange)
-emptyRanges.push(opt_totalRange);return emptyRanges;}
-inRanges=inRanges.slice();inRanges.sort(function(x,y){return x.min-y.min;});if(opt_totalRange&&(opt_totalRange.min<inRanges[0].min)){emptyRanges.push(tr.b.Range.fromExplicitRange(opt_totalRange.min,inRanges[0].min));}
-inRanges.forEach(function(range,index){for(var otherIndex=0;otherIndex<inRanges.length;++otherIndex){if(index===otherIndex)
-continue;var other=inRanges[otherIndex];if(other.min>range.max){emptyRanges.push(tr.b.Range.fromExplicitRange(range.max,other.min));return;}
-if(other.max>range.max){return;}}
-if(opt_totalRange&&(range.max<opt_totalRange.max)){emptyRanges.push(tr.b.Range.fromExplicitRange(range.max,opt_totalRange.max));}});return emptyRanges;}
-return{convertEventsToRanges:convertEventsToRanges,findEmptyRangesBetweenRanges:findEmptyRangesBetweenRanges,mergeRanges:mergeRanges};});'use strict';tr.exportTo('tr.e.audits',function(){var Frame=tr.model.Frame;var Statistics=tr.b.Statistics;var UI_DRAW_TYPE={NONE:'none',LEGACY:'legacy',MARSHMALLOW:'marshmallow'};var UI_THREAD_DRAW_NAMES={'performTraversals':UI_DRAW_TYPE.LEGACY,'Choreographer#doFrame':UI_DRAW_TYPE.MARSHMALLOW};var RENDER_THREAD_DRAW_NAME='DrawFrame';var RENDER_THREAD_INDEP_DRAW_NAME='doFrame';var THREAD_SYNC_NAME='syncFrameState';function getSlicesForThreadTimeRanges(threadTimeRanges){var ret=[];threadTimeRanges.forEach(function(threadTimeRange){var slices=[];threadTimeRange.thread.sliceGroup.iterSlicesInTimeRange(function(slice){slices.push(slice);},threadTimeRange.start,threadTimeRange.end);ret.push.apply(ret,slices);});return ret;}
-function makeFrame(threadTimeRanges,surfaceFlinger){var args={};if(surfaceFlinger&&surfaceFlinger.hasVsyncs){var start=Statistics.min(threadTimeRanges,function(threadTimeRanges){return threadTimeRanges.start;});args['deadline']=surfaceFlinger.getFrameDeadline(start);args['frameKickoff']=surfaceFlinger.getFrameKickoff(start);}
-var events=getSlicesForThreadTimeRanges(threadTimeRanges);return new Frame(events,threadTimeRanges,args);}
-function findOverlappingDrawFrame(renderThread,time){if(!renderThread)
-return undefined;var slices=renderThread.sliceGroup.slices;for(var i=0;i<slices.length;i++){var slice=slices[i];if(slice.title==RENDER_THREAD_DRAW_NAME&&slice.start<=time&&time<=slice.end){return slice;}}
-return undefined;}
-function getPreTraversalWorkRanges(uiThread){if(!uiThread)
-return[];var preFrameEvents=[];uiThread.sliceGroup.slices.forEach(function(slice){if(slice.title=='obtainView'||slice.title=='setupListItem'||slice.title=='deliverInputEvent'||slice.title=='RV Scroll')
-preFrameEvents.push(slice);});uiThread.asyncSliceGroup.slices.forEach(function(slice){if(slice.title=='deliverInputEvent')
-preFrameEvents.push(slice);});return tr.b.mergeRanges(tr.b.convertEventsToRanges(preFrameEvents),3,function(events){return{start:events[0].min,end:events[events.length-1].max};});}
-function getFrameStartTime(traversalStart,preTraversalWorkRanges){var preTraversalWorkRange=tr.b.findClosestIntervalInSortedIntervals(preTraversalWorkRanges,function(range){return range.start},function(range){return range.end},traversalStart,3);if(preTraversalWorkRange)
-return preTraversalWorkRange.start;return traversalStart;}
-function getUiThreadDrivenFrames(app){if(!app.uiThread)
-return[];var preTraversalWorkRanges=[];if(app.uiDrawType==UI_DRAW_TYPE.LEGACY)
-preTraversalWorkRanges=getPreTraversalWorkRanges(app.uiThread);var frames=[];app.uiThread.sliceGroup.slices.forEach(function(slice){if(!(slice.title in UI_THREAD_DRAW_NAMES)){return;}
-var threadTimeRanges=[];var uiThreadTimeRange={thread:app.uiThread,start:getFrameStartTime(slice.start,preTraversalWorkRanges),end:slice.end};threadTimeRanges.push(uiThreadTimeRange);var rtDrawSlice=findOverlappingDrawFrame(app.renderThread,slice.end);if(rtDrawSlice){var rtSyncSlice=rtDrawSlice.findDescendentSlice(THREAD_SYNC_NAME);if(rtSyncSlice){uiThreadTimeRange.end=Math.min(uiThreadTimeRange.end,rtSyncSlice.start);}
-threadTimeRanges.push({thread:app.renderThread,start:rtDrawSlice.start,end:rtDrawSlice.end});}
-frames.push(makeFrame(threadTimeRanges,app.surfaceFlinger));});return frames;}
-function getRenderThreadDrivenFrames(app){if(!app.renderThread)
-return[];var frames=[];app.renderThread.sliceGroup.getSlicesOfName(RENDER_THREAD_INDEP_DRAW_NAME).forEach(function(slice){var threadTimeRanges=[{thread:app.renderThread,start:slice.start,end:slice.end}];frames.push(makeFrame(threadTimeRanges,app.surfaceFlinger));});return frames;}
-function getUiDrawType(uiThread){if(!uiThread)
-return UI_DRAW_TYPE.NONE;var slices=uiThread.sliceGroup.slices;for(var i=0;i<slices.length;i++){if(slices[i].title in UI_THREAD_DRAW_NAMES){return UI_THREAD_DRAW_NAMES[slices[i].title];}}
-return UI_DRAW_TYPE.NONE;}
-function getInputSamples(process){var samples=undefined;for(var counterName in process.counters){if(/^android\.aq\:pending/.test(counterName)&&process.counters[counterName].numSeries==1){samples=process.counters[counterName].series[0].samples;break;}}
-if(!samples)
-return[];var inputSamples=[];var lastValue=0;samples.forEach(function(sample){if(sample.value>lastValue){inputSamples.push(sample);}
-lastValue=sample.value;});return inputSamples;}
-function getAnimationAsyncSlices(uiThread){if(!uiThread)
-return[];var slices=[];uiThread.asyncSliceGroup.iterateAllEvents(function(slice){if(/^animator\:/.test(slice.title))
-slices.push(slice);});return slices;}
-function AndroidApp(process,uiThread,renderThread,surfaceFlinger,uiDrawType){this.process=process;this.uiThread=uiThread;this.renderThread=renderThread;this.surfaceFlinger=surfaceFlinger;this.uiDrawType=uiDrawType;this.frames_=undefined;this.inputs_=undefined;};AndroidApp.createForProcessIfPossible=function(process,surfaceFlinger){var uiThread=process.getThread(process.pid);var uiDrawType=getUiDrawType(uiThread);if(uiDrawType==UI_DRAW_TYPE.NONE){uiThread=undefined;}
-var renderThreads=process.findAllThreadsNamed('RenderThread');var renderThread=renderThreads.length==1?renderThreads[0]:undefined;if(uiThread||renderThread){return new AndroidApp(process,uiThread,renderThread,surfaceFlinger,uiDrawType);}}
-AndroidApp.prototype={getFrames:function(){if(!this.frames_){var uiFrames=getUiThreadDrivenFrames(this);var rtFrames=getRenderThreadDrivenFrames(this);this.frames_=uiFrames.concat(rtFrames);this.frames_.sort(function(a,b){a.end-b.end});}
-return this.frames_;},getInputSamples:function(){if(!this.inputs_){this.inputs_=getInputSamples(this.process);}
-return this.inputs_;},getAnimationAsyncSlices:function(){if(!this.animations_){this.animations_=getAnimationAsyncSlices(this.uiThread);}
-return this.animations_;}};return{AndroidApp:AndroidApp};});'use strict';tr.exportTo('tr.e.audits',function(){var findLowIndexInSortedArray=tr.b.findLowIndexInSortedArray;var VSYNC_SF_NAME='android.VSYNC-sf';var VSYNC_APP_NAME='android.VSYNC-app';var VSYNC_FALLBACK_NAME='android.VSYNC';var TIMESTAMP_FUDGE_MS=0.01;function getVsyncTimestamps(process,counterName){var vsync=process.counters[counterName];if(!vsync)
-vsync=process.counters[VSYNC_FALLBACK_NAME];if(vsync&&vsync.numSeries==1&&vsync.numSamples>1)
-return vsync.series[0].timestamps;return undefined;}
-function AndroidSurfaceFlinger(process,thread){this.process=process;this.thread=thread;this.appVsync_=undefined;this.sfVsync_=undefined;this.appVsyncTimestamps_=getVsyncTimestamps(process,VSYNC_APP_NAME);this.sfVsyncTimestamps_=getVsyncTimestamps(process,VSYNC_SF_NAME);};AndroidSurfaceFlinger.createForProcessIfPossible=function(process){var mainThread=process.getThread(process.pid);if(mainThread&&mainThread.name&&/surfaceflinger/.test(mainThread.name))
-return new AndroidSurfaceFlinger(process,mainThread);var primaryThreads=process.findAllThreadsNamed('SurfaceFlinger');if(primaryThreads.length==1)
-return new AndroidSurfaceFlinger(process,primaryThreads[0]);return undefined;};AndroidSurfaceFlinger.prototype={get hasVsyncs(){return!!this.appVsyncTimestamps_&&!!this.sfVsyncTimestamps_;},getFrameKickoff:function(timestamp){if(!this.hasVsyncs)
-throw new Error('cannot query vsync info without vsyncs');var firstGreaterIndex=findLowIndexInSortedArray(this.appVsyncTimestamps_,function(x){return x;},timestamp+TIMESTAMP_FUDGE_MS);if(firstGreaterIndex<1)
-return undefined;return this.appVsyncTimestamps_[firstGreaterIndex-1];},getFrameDeadline:function(timestamp){if(!this.hasVsyncs)
-throw new Error('cannot query vsync info without vsyncs');var firstGreaterIndex=findLowIndexInSortedArray(this.sfVsyncTimestamps_,function(x){return x;},timestamp+TIMESTAMP_FUDGE_MS);if(firstGreaterIndex>=this.sfVsyncTimestamps_.length)
-return undefined;return this.sfVsyncTimestamps_[firstGreaterIndex];}};return{AndroidSurfaceFlinger:AndroidSurfaceFlinger};});'use strict';tr.exportTo('tr.e.audits',function(){var AndroidApp=tr.e.audits.AndroidApp;var AndroidSurfaceFlinger=tr.e.audits.AndroidSurfaceFlinger;var IMPORTANT_SURFACE_FLINGER_SLICES={'doComposition':true,'updateTexImage':true,'postFramebuffer':true};var IMPORTANT_UI_THREAD_SLICES={'Choreographer#doFrame':true,'performTraversals':true,'deliverInputEvent':true};var IMPORTANT_RENDER_THREAD_SLICES={'doFrame':true};function iterateImportantThreadSlices(thread,important,callback){if(!thread)
-return;thread.sliceGroup.slices.forEach(function(slice){if(slice.title in important)
-callback(slice);});}
-function AndroidModelHelper(model){this.model=model;this.apps=[];this.surfaceFlinger=undefined;var processes=model.getAllProcesses();for(var i=0;i<processes.length&&!this.surfaceFlinger;i++){this.surfaceFlinger=AndroidSurfaceFlinger.createForProcessIfPossible(processes[i]);}
-model.getAllProcesses().forEach(function(process){var app=AndroidApp.createForProcessIfPossible(process,this.surfaceFlinger);if(app)
-this.apps.push(app);},this);};AndroidModelHelper.prototype={iterateImportantSlices:function(callback){if(this.surfaceFlinger){iterateImportantThreadSlices(this.surfaceFlinger.thread,IMPORTANT_SURFACE_FLINGER_SLICES,callback);}
-this.apps.forEach(function(app){iterateImportantThreadSlices(app.uiThread,IMPORTANT_UI_THREAD_SLICES,callback);iterateImportantThreadSlices(app.renderThread,IMPORTANT_RENDER_THREAD_SLICES,callback);});}};return{AndroidModelHelper:AndroidModelHelper};});'use strict';tr.exportTo('tr.e.audits',function(){var SCHEDULING_STATE=tr.model.SCHEDULING_STATE;var Auditor=tr.c.Auditor;var AndroidModelHelper=tr.e.audits.AndroidModelHelper;var ColorScheme=tr.b.ColorScheme;var Statistics=tr.b.Statistics;var FRAME_PERF_CLASS=tr.model.FRAME_PERF_CLASS;var InteractionRecord=tr.model.InteractionRecord;var Alert=tr.model.Alert;var EventInfo=tr.model.EventInfo;var TimeDuration=tr.b.u.TimeDuration;var EXPECTED_FRAME_TIME_MS=16.67;function getStart(e){return e.start;}
-function getDuration(e){return e.duration;}
-function getCpuDuration(e){return(e.cpuDuration!==undefined)?e.cpuDuration:e.duration;}
-function frameIsActivityStart(frame){for(var i=0;i<frame.associatedEvents.length;i++){if(frame.associatedEvents[i].title=='activityStart')
-return true;}
-return false;}
-var Auditor=tr.c.Auditor;var AndroidModelHelper=tr.e.audits.AndroidModelHelper;function frameMissedDeadline(frame){return frame.args['deadline']&&frame.args['deadline']<frame.end;}
-function DocLinkBuilder(){this.docLinks=[];}
-DocLinkBuilder.prototype={addAppVideo:function(name,videoId){this.docLinks.push({label:'Video Link',textContent:('Android Performance Patterns: '+name),href:'https://www.youtube.com/watch?list=PLWz5rJ2EKKc9CBxr3BVjPTPoDPLdPIFCE&v='+videoId});return this;},addDacRef:function(name,link){this.docLinks.push({label:'Doc Link',textContent:(name+' documentation'),href:'https://developer.android.com/reference/'+link});return this;},build:function(){return this.docLinks;}};function AndroidAuditor(model){Auditor.call(this,model);var helper=new AndroidModelHelper(model);if(helper.apps.length||helper.surfaceFlinger)
-this.helper=helper;};AndroidAuditor.viewAlphaAlertInfo_=new EventInfo('Inefficient View alpha usage','Setting an alpha between 0 and 1 has significant performance costs, if one of the fast alpha paths is not used.',new DocLinkBuilder().addAppVideo('Hidden Cost of Transparency','wIy8g8yNhNk').addDacRef('View#setAlpha()','android/view/View.html#setAlpha(float)').build());AndroidAuditor.saveLayerAlertInfo_=new EventInfo('Expensive rendering with Canvas#saveLayer()','Canvas#saveLayer() incurs extremely high rendering cost. They disrupt the rendering pipeline when drawn, forcing a flush of drawing content. Instead use View hardware layers, or static Bitmaps. This enables the offscreen buffers to be reused in between frames, and avoids the disruptive render target switch.',new DocLinkBuilder().addAppVideo('Hidden Cost of Transparency','wIy8g8yNhNk').addDacRef('Canvas#saveLayerAlpha()','android/graphics/Canvas.html#saveLayerAlpha(android.graphics.RectF, int, int)').build());AndroidAuditor.getSaveLayerAlerts_=function(frame){var badAlphaRegEx=/^(.+) alpha caused (unclipped )?saveLayer (\d+)x(\d+)$/;var saveLayerRegEx=/^(unclipped )?saveLayer (\d+)x(\d+)$/;var ret=[];var events=[];frame.associatedEvents.forEach(function(slice){var match=badAlphaRegEx.exec(slice.title);if(match){var args={'view name':match[1],width:parseInt(match[3]),height:parseInt(match[4])};ret.push(new Alert(AndroidAuditor.viewAlphaAlertInfo_,slice.start,[slice],args));}else if(saveLayerRegEx.test(slice.title))
-events.push(slice);},this);if(events.length>ret.length){var unclippedSeen=Statistics.sum(events,function(slice){return saveLayerRegEx.exec(slice.title)[1]?1:0;});var clippedSeen=events.length-unclippedSeen;var earliestStart=Statistics.min(events,function(slice){return slice.start;});var args={'Unclipped saveLayer count (especially bad!)':unclippedSeen,'Clipped saveLayer count':clippedSeen};events.push(frame);ret.push(new Alert(AndroidAuditor.saveLayerAlertInfo_,earliestStart,events,args));}
-return ret;};AndroidAuditor.pathAlertInfo_=new EventInfo('Path texture churn','Paths are drawn with a mask texture, so when a path is modified / newly drawn, that texture must be generated and uploaded to the GPU. Ensure that you cache paths between frames and do not unnecessarily call Path#reset(). You can cut down on this cost by sharing Path object instances between drawables/views.');AndroidAuditor.getPathAlert_=function(frame){var uploadRegEx=/^Generate Path Texture$/;var events=frame.associatedEvents.filter(function(event){return event.title=='Generate Path Texture';});var start=Statistics.min(events,getStart);var duration=Statistics.sum(events,getDuration);if(duration<3)
-return undefined;events.push(frame);return new Alert(AndroidAuditor.pathAlertInfo_,start,events,{'Time spent':new TimeDuration(duration)});}
-AndroidAuditor.uploadAlertInfo_=new EventInfo('Expensive Bitmap uploads','Bitmaps that have been modified / newly drawn must be uploaded to the GPU. Since this is expensive if the total number of pixels uploaded is large, reduce the amount of Bitmap churn in this animation/context, per frame.');AndroidAuditor.getUploadAlert_=function(frame){var uploadRegEx=/^Upload (\d+)x(\d+) Texture$/;var events=[];var start=Number.POSITIVE_INFINITY;var duration=0;var pixelsUploaded=0;frame.associatedEvents.forEach(function(event){var match=uploadRegEx.exec(event.title);if(match){events.push(event);start=Math.min(start,event.start);duration+=event.duration;pixelsUploaded+=parseInt(match[1])*parseInt(match[2]);}});if(events.length==0||duration<3)
-return undefined;var mPixels=(pixelsUploaded/1000000).toFixed(2)+' million';var args={'Pixels uploaded':mPixels,'Time spent':new TimeDuration(duration)};events.push(frame);return new Alert(AndroidAuditor.uploadAlertInfo_,start,events,args);}
-AndroidAuditor.ListViewInflateAlertInfo_=new EventInfo('Inflation during ListView recycling','ListView item recycling involved inflating views. Ensure your Adapter#getView() recycles the incoming View, instead of constructing a new one.');AndroidAuditor.ListViewBindAlertInfo_=new EventInfo('Inefficient ListView recycling/rebinding','ListView recycling taking too much time per frame. Ensure your Adapter#getView() binds data efficiently.');AndroidAuditor.getListViewAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='obtainView'||event.title=='setupListItem';});var duration=Statistics.sum(events,getCpuDuration);if(events.length==0||duration<3)
-return undefined;var hasInflation=false;for(var i=0;i<events.length;i++){if(events[i]instanceof tr.model.Slice&&events[i].findDescendentSlice('inflate')){hasInflation=true;break;}}
-var start=Statistics.min(events,getStart);var args={'Time spent':new TimeDuration(duration)};args['ListView items '+(hasInflation?'inflated':'rebound')]=events.length/2;var eventInfo=hasInflation?AndroidAuditor.ListViewInflateAlertInfo_:AndroidAuditor.ListViewBindAlertInfo_;events.push(frame);return new Alert(eventInfo,start,events,args);}
-AndroidAuditor.measureLayoutAlertInfo_=new EventInfo('Expensive measure/layout pass','Measure/Layout took a significant time, contributing to jank. Avoid triggering layout during animations.',new DocLinkBuilder().addAppVideo('Invalidations, Layouts, and Performance','we6poP0kw6E').build());AndroidAuditor.getMeasureLayoutAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='measure'||event.title=='layout';});var duration=Statistics.sum(events,getCpuDuration);if(events.length==0||duration<3)
-return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.measureLayoutAlertInfo_,start,events,{'Time spent':new TimeDuration(duration)});}
-AndroidAuditor.viewDrawAlertInfo_=new EventInfo('Long View#draw()','Recording the drawing commands of invalidated Views took a long time. Avoid significant work in View or Drawable custom drawing, especially allocations or drawing to Bitmaps.',new DocLinkBuilder().addAppVideo('Invalidations, Layouts, and Performance','we6poP0kw6E').addAppVideo('Avoiding Allocations in onDraw()','HAK5acHQ53E').build());AndroidAuditor.getViewDrawAlert_=function(frame){var slice=undefined;for(var i=0;i<frame.associatedEvents.length;i++){if(frame.associatedEvents[i].title=='getDisplayList'||frame.associatedEvents[i].title=='Record View#draw()'){slice=frame.associatedEvents[i];break;}}
-if(!slice||getCpuDuration(slice)<3)
-return undefined;return new Alert(AndroidAuditor.viewDrawAlertInfo_,slice.start,[slice,frame],{'Time spent':new TimeDuration(getCpuDuration(slice))});}
-AndroidAuditor.blockingGcAlertInfo_=new EventInfo('Blocking Garbage Collection','Blocking GCs are caused by object churn, and made worse by having large numbers of objects in the heap. Avoid allocating objects during animations/scrolling, and recycle Bitmaps to avoid triggering garbage collection.',new DocLinkBuilder().addAppVideo('Garbage Collection in Android','pzfzz50W5Uo').addAppVideo('Avoiding Allocations in onDraw()','HAK5acHQ53E').build());AndroidAuditor.getBlockingGcAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return event.title=='DVM Suspend'||event.title=='GC: Wait For Concurrent';});var blockedDuration=Statistics.sum(events,getDuration);if(blockedDuration<3)
-return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.blockingGcAlertInfo_,start,events,{'Blocked duration':new TimeDuration(blockedDuration)});};AndroidAuditor.lockContentionAlertInfo_=new EventInfo('Lock contention','UI thread lock contention is caused when another thread holds a lock that the UI thread is trying to use. UI thread progress is blocked until the lock is released. Inspect locking done within the UI thread, and ensure critical sections are short.');AndroidAuditor.getLockContentionAlert_=function(frame){var events=frame.associatedEvents.filter(function(event){return/^Lock Contention on /.test(event.title);});var blockedDuration=Statistics.sum(events,getDuration);if(blockedDuration<1)
-return undefined;var start=Statistics.min(events,getStart);events.push(frame);return new Alert(AndroidAuditor.lockContentionAlertInfo_,start,events,{'Blocked duration':new TimeDuration(blockedDuration)});};AndroidAuditor.schedulingAlertInfo_=new EventInfo('Scheduling delay','Work to produce this frame was descheduled for several milliseconds, contributing to jank. Ensure that code on the UI thread doesn\'t block on work being done on other threads, and that background threads (doing e.g. network or bitmap loading) are running at android.os.Process#THREAD_PRIORITY_BACKGROUND or lower so they are less likely to interrupt the UI thread. These background threads should show up with a priority number of 130 or higher in the scheduling section under the Kernel process.');AndroidAuditor.getSchedulingAlert_=function(frame){var totalDuration=0;var totalStats={};frame.threadTimeRanges.forEach(function(ttr){var stats=ttr.thread.getSchedulingStatsForRange(ttr.start,ttr.end);tr.b.iterItems(stats,function(key,value){if(!(key in totalStats))
-totalStats[key]=0;totalStats[key]+=value;totalDuration+=value;});});if(!(SCHEDULING_STATE.RUNNING in totalStats)||totalDuration==0||totalDuration-totalStats[SCHEDULING_STATE.RUNNING]<3)
-return;var args={};tr.b.iterItems(totalStats,function(key,value){if(key===SCHEDULING_STATE.RUNNABLE)
-key='Not scheduled, but runnable';else if(key===SCHEDULING_STATE.UNINTR_SLEEP)
-key='Blocking I/O delay';args[key]=new TimeDuration(value);});return new Alert(AndroidAuditor.schedulingAlertInfo_,frame.start,[frame],args);};AndroidAuditor.prototype={__proto__:Auditor.prototype,renameAndSort_:function(){this.model.kernel.important=false;this.model.getAllProcesses().forEach(function(process){if(this.helper.surfaceFlinger&&process==this.helper.surfaceFlinger.process){if(!process.name)
-process.name='SurfaceFlinger';process.sortIndex=Number.NEGATIVE_INFINITY;process.important=false;return;}
-var uiThread=process.getThread(process.pid);if(!process.name&&uiThread&&uiThread.name){if(/^ndroid\./.test(uiThread.name))
-uiThread.name='a'+uiThread.name;process.name=uiThread.name;uiThread.name='UI Thread';}
-process.sortIndex=0;for(var tid in process.threads){process.sortIndex-=process.threads[tid].sliceGroup.slices.length;}},this);this.model.getAllThreads().forEach(function(thread){if(thread.tid==thread.parent.pid)
-thread.sortIndex=-3;if(thread.name=='RenderThread')
-thread.sortIndex=-2;if(/^hwuiTask/.test(thread.name))
-thread.sortIndex=-1;});},pushFramesAndJudgeJank_:function(){var badFramesObserved=0;var framesObserved=0;var surfaceFlinger=this.helper.surfaceFlinger;this.helper.apps.forEach(function(app){app.process.frames=app.getFrames();app.process.frames.forEach(function(frame){if(frame.totalDuration>EXPECTED_FRAME_TIME_MS*2){badFramesObserved+=2;frame.perfClass=FRAME_PERF_CLASS.TERRIBLE;}else if(frame.totalDuration>EXPECTED_FRAME_TIME_MS||frameMissedDeadline(frame)){badFramesObserved++;frame.perfClass=FRAME_PERF_CLASS.BAD;}else{frame.perfClass=FRAME_PERF_CLASS.GOOD;}});framesObserved+=app.process.frames.length;});if(framesObserved){var portionBad=badFramesObserved/framesObserved;if(portionBad>0.3)
-this.model.faviconHue='red';else if(portionBad>0.05)
-this.model.faviconHue='yellow';else
-this.model.faviconHue='green';}},pushEventInfo_:function(){var appAnnotator=new AppAnnotator();this.helper.apps.forEach(function(app){if(app.uiThread)
-appAnnotator.applyEventInfos(app.uiThread.sliceGroup);if(app.renderThread)
-appAnnotator.applyEventInfos(app.renderThread.sliceGroup);});},runAnnotate:function(){if(!this.helper)
-return;this.renameAndSort_();this.pushFramesAndJudgeJank_();this.pushEventInfo_();this.helper.iterateImportantSlices(function(slice){slice.important=true;});},runAudit:function(){if(!this.helper)
-return;var alerts=this.model.alerts;this.helper.apps.forEach(function(app){app.getFrames().forEach(function(frame){alerts.push.apply(alerts,AndroidAuditor.getSaveLayerAlerts_(frame));if(frame.perfClass==FRAME_PERF_CLASS.NEUTRAL||frame.perfClass==FRAME_PERF_CLASS.GOOD)
-return;var alert=AndroidAuditor.getPathAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getUploadAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getListViewAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getMeasureLayoutAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getViewDrawAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getBlockingGcAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getLockContentionAlert_(frame);if(alert)
-alerts.push(alert);var alert=AndroidAuditor.getSchedulingAlert_(frame);if(alert)
-alerts.push(alert);});},this);this.addRenderingInteractionRecords();this.addInputInteractionRecords();},addRenderingInteractionRecords:function(){var events=[];this.helper.apps.forEach(function(app){events.push.apply(events,app.getAnimationAsyncSlices());events.push.apply(events,app.getFrames());});var mergerFunction=function(events){var ir=new InteractionRecord(this.model,'Rendering',ColorScheme.getColorIdForGeneralPurposeString('mt_rendering'),events[0].min,events[events.length-1].max-events[0].min);this.model.addInteractionRecord(ir);}.bind(this);tr.b.mergeRanges(tr.b.convertEventsToRanges(events),30,mergerFunction);},addInputInteractionRecords:function(){var inputSamples=[];this.helper.apps.forEach(function(app){inputSamples.push.apply(inputSamples,app.getInputSamples());});var mergerFunction=function(events){var ir=new InteractionRecord(this.model,'Input',ColorScheme.getColorIdForGeneralPurposeString('mt_input'),events[0].min,events[events.length-1].max-events[0].min);this.model.addInteractionRecord(ir);}.bind(this);var inputRanges=inputSamples.map(function(sample){return tr.b.Range.fromExplicitRange(sample.timestamp,sample.timestamp);});tr.b.mergeRanges(inputRanges,30,mergerFunction);}};Auditor.register(AndroidAuditor);function AppAnnotator(){this.titleInfoLookup={};this.titleParentLookup={};this.build_();}
-AppAnnotator.prototype={build_:function(){var registerEventInfo=function(dict){this.titleInfoLookup[dict.title]=new EventInfo(dict.title,dict.description,dict.docLinks);if(dict.parents)
-this.titleParentLookup[dict.title]=dict.parents;}.bind(this);registerEventInfo({title:'inflate',description:'Constructing a View hierarchy from pre-processed XML via LayoutInflater#layout. This includes constructing all of the View objects in the hierarchy, and applying styled attributes.'});registerEventInfo({title:'obtainView',description:'Adapter#getView() called to bind content to a recycled View that is being presented.'});registerEventInfo({title:'setupListItem',description:'Attached a newly-bound, recycled View to its parent ListView.'});registerEventInfo({title:'setupGridItem',description:'Attached a newly-bound, recycled View to its parent GridView.'});var choreographerLinks=new DocLinkBuilder().addDacRef('Choreographer','android/view/Choreographer.html').build();registerEventInfo({title:'Choreographer#doFrame',docLinks:choreographerLinks,description:'Choreographer executes frame callbacks for inputs, animations, and rendering traversals. When this work is done, a frame will be presented to the user.'});registerEventInfo({title:'input',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Input callbacks are processed. This generally encompasses dispatching input to Views, as well as any work the Views do to process this input/gesture.'});registerEventInfo({title:'animation',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Animation callbacks are processed. This is generally minimal work, as animations determine progress for the frame, and push new state to animated objects (such as setting View properties).'});registerEventInfo({title:'traversals',parents:['Choreographer#doFrame'],docLinks:choreographerLinks,description:'Primary draw traversals. This is the primary traversal of the View hierarchy, including layout and draw passes.'});var traversalParents=['Choreographer#doFrame','performTraversals'];var layoutLinks=new DocLinkBuilder().addDacRef('View#Layout','android/view/View.html#Layout').build();registerEventInfo({title:'performTraversals',description:'A drawing traversal of the View hierarchy, comprised of all layout and drawing needed to produce the frame.'});registerEventInfo({title:'measure',parents:traversalParents,docLinks:layoutLinks,description:'First of two phases in view hierarchy layout. Views are asked to size themselves according to constraints supplied by their parent. Some ViewGroups may measure a child more than once to help satisfy their own constraints. Nesting ViewGroups that measure children more than once can lead to excessive and repeated work.'});registerEventInfo({title:'layout',parents:traversalParents,docLinks:layoutLinks,description:'Second of two phases in view hierarchy layout, repositioning content and child Views into their new locations.'});var drawString='Draw pass over the View hierarchy. Every invalidated View will have its drawing commands recorded. On Android versions prior to Lollipop, this would also include the issuing of draw commands to the GPU. Starting with Lollipop, it only includes the recording of commands, and syncing that information to the RenderThread.';registerEventInfo({title:'draw',parents:traversalParents,description:drawString});var recordString='Every invalidated View\'s drawing commands are recorded. Each will have View#draw() called, and is passed a Canvas that will record and store its drawing commands until it is next invalidated/rerecorded.';registerEventInfo({title:'getDisplayList',parents:['draw'],description:recordString});registerEventInfo({title:'Record View#draw()',parents:['draw'],description:recordString});registerEventInfo({title:'drawDisplayList',parents:['draw'],description:'Execution of recorded draw commands to generate a frame. This represents the actual formation and issuing of drawing commands to the GPU. On Android L and higher devices, this work is done on a dedicated RenderThread, instead of on the UI Thread.'});registerEventInfo({title:'DrawFrame',description:'RenderThread portion of the standard UI/RenderThread split frame. This represents the actual formation and issuing of drawing commands to the GPU.'});registerEventInfo({title:'doFrame',description:'RenderThread animation frame. Represents drawing work done by the RenderThread on a frame where the UI thread did not produce new drawing content.'});registerEventInfo({title:'syncFrameState',description:'Sync stage between the UI thread and the RenderThread, where the UI thread hands off a frame (including information about modified Views). Time in this method primarily consists of uploading modified Bitmaps to the GPU. After this sync is completed, the UI thread is unblocked, and the RenderThread starts to render the frame.'});registerEventInfo({title:'flush drawing commands',description:'Issuing the now complete drawing commands to the GPU.'});registerEventInfo({title:'eglSwapBuffers',description:'Complete GPU rendering of the frame.'});registerEventInfo({title:'RV Scroll',description:'RecyclerView is calculating a scroll. If there are too many of these in Systrace, some Views inside RecyclerView might be causing it. Try to avoid using EditText, focusable views or handle them with care.'});registerEventInfo({title:'RV OnLayout',description:'OnLayout has been called by the View system. If this shows up too many times in Systrace, make sure the children of RecyclerView do not update themselves directly. This will cause a full re-layout but when it happens via the Adapter notifyItemChanged, RecyclerView can avoid full layout calculation.'});registerEventInfo({title:'RV FullInvalidate',description:'NotifyDataSetChanged or equal has been called. If this is taking a long time, try sending granular notify adapter changes instead of just calling notifyDataSetChanged or setAdapter / swapAdapter. Adding stable ids to your adapter might help.'});registerEventInfo({title:'RV PartialInvalidate',description:'RecyclerView is rebinding a View. If this is taking a lot of time, consider optimizing your layout or make sure you are not doing extra operations in onBindViewHolder call.'});registerEventInfo({title:'RV OnBindView',description:'RecyclerView is rebinding a View. If this is taking a lot of time, consider optimizing your layout or make sure you are not doing extra operations in onBindViewHolder call.'});registerEventInfo({title:'RV CreateView',description:'RecyclerView is creating a new View. If too many of these are present: 1) There might be a problem in Recycling (e.g. custom Animations that set transient state and prevent recycling or ItemAnimator not implementing the contract properly. See Adapter#onFailedToRecycleView(ViewHolder). 2) There may be too many item view types. Try merging them. 3) There might be too many itemChange animations and not enough space in RecyclerPool. Try increasing your pool size and item cache size.'});registerEventInfo({title:'eglSwapBuffers',description:'The CPU has finished producing drawing commands, and is flushing drawing work to the GPU, and posting that buffer to the consumer (which is often SurfaceFlinger window composition). Once this is completed, the GPU can produce the frame content without any involvement from the CPU.'});},applyEventInfosRecursive_:function(parentNames,slice){var checkExpectedParentNames=function(expectedParentNames){if(!expectedParentNames)
-return true;return expectedParentNames.some(function(name){return name in parentNames;});}
-if(slice.title in this.titleInfoLookup){if(checkExpectedParentNames(this.titleParentLookup[slice.title]))
-slice.info=this.titleInfoLookup[slice.title];}
-if(slice.subSlices.length>0){if(!(slice.title in parentNames))
-parentNames[slice.title]=0;parentNames[slice.title]++;slice.subSlices.forEach(function(subSlice){this.applyEventInfosRecursive_(parentNames,subSlice);},this);parentNames[slice.title]--;if(parentNames[slice.title]==0)
-delete parentNames[slice.title];}},applyEventInfos:function(sliceGroup){sliceGroup.topLevelSlices.forEach(function(slice){this.applyEventInfosRecursive_({},slice);},this);}};return{AndroidAuditor:AndroidAuditor};});'use strict';tr.exportTo('tr.e.audits',function(){var VSYNC_COUNTER_PRECISIONS={'android.VSYNC-app':15,'android.VSYNC':15};var VSYNC_SLICE_PRECISIONS={'RenderWidgetHostViewAndroid::OnVSync':5,'VSYNC':10,'vblank':10,'DisplayLinkMac::GetVSyncParameters':5};var BEGIN_FRAME_SLICE_PRECISION={'Scheduler::BeginFrame':10};function VSyncAuditor(model){tr.c.Auditor.call(this,model);};VSyncAuditor.prototype={__proto__:tr.c.Auditor.prototype,runAnnotate:function(){this.model.device.vSyncTimestamps=this.findVSyncTimestamps(this.model);},findVSyncTimestamps:function(model){var times=[];var maxPrecision=Number.NEGATIVE_INFINITY;var maxTitle=undefined;function useInstead(title,precisions){var precision=precisions[title];if(precision===undefined)
+this.lines_.push([line,eventBase,parseInt(eventBase.cpuNumber),parseInt(eventBase.pid),parseFloat(eventBase.timestamp)*1000]);}},forEachLine:function(handler){for(var i=0;i<this.lines_.length;++i){var line=this.lines_[i];handler.apply(this,line);}}};tr.importer.Importer.register(LinuxPerfImporter);return{LinuxPerfImporter:LinuxPerfImporter,_LinuxPerfImporterTestExports:TestExports};});'use strict';tr.exportTo('tr.e.audits',function(){var VSYNC_COUNTER_PRECISIONS={'android.VSYNC-app':15,'android.VSYNC':15};var VSYNC_SLICE_PRECISIONS={'RenderWidgetHostViewAndroid::OnVSync':5,'VSYNC':10,'vblank':10,'DisplayLinkMac::GetVSyncParameters':5};var BEGIN_FRAME_SLICE_PRECISION={'Scheduler::BeginFrame':10};function VSyncAuditor(model){tr.c.Auditor.call(this,model);};VSyncAuditor.prototype={__proto__:tr.c.Auditor.prototype,runAnnotate:function(){this.model.device.vSyncTimestamps=this.findVSyncTimestamps(this.model);},findVSyncTimestamps:function(model){var times=[];var maxPrecision=Number.NEGATIVE_INFINITY;var maxTitle=undefined;function useInstead(title,precisions){var precision=precisions[title];if(precision===undefined)
 return false;if(title===maxTitle)
 return true;if(precision<=maxPrecision){if(precision===maxPrecision){console.warn('Encountered two different VSync events ('+
 maxTitle+', '+title+') with the same precision, '+'ignoring the newer one ('+title+')');}
@@ -3818,7 +3903,255 @@
 for(var tid in process.threads){var thread=process.threads[tid];for(var i=0;i<thread.sliceGroup.slices.length;i++){var slice=thread.sliceGroup.slices[i];if(useInstead(slice.title,VSYNC_SLICE_PRECISIONS))
 times.push(slice.start);else if(useInstead(slice.title,BEGIN_FRAME_SLICE_PRECISION)&&slice.args.args&&slice.args.args.frame_time_us)
 times.push(slice.args.args.frame_time_us/1000.0);}}}
-times.sort(function(x,y){return x-y;});return times;}};tr.c.Auditor.register(VSyncAuditor);return{VSyncAuditor:VSyncAuditor};});'use strict';tr.exportTo('tr.b',function(){function Settings(){return Settings;};if(tr.b.unittest&&tr.b.unittest.TestRunner){tr.b.unittest.TestRunner.addEventListener('tr-unittest-will-run',function(){if(tr.isHeadless)
+times.sort(function(x,y){return x-y;});return times;}};tr.c.Auditor.register(VSyncAuditor);return{VSyncAuditor:VSyncAuditor};});'use strict';tr.exportTo('tr.importer',function(){function EmptyImporter(events){this.importPriority=0;};EmptyImporter.canImport=function(eventData){if(eventData instanceof Array&&eventData.length==0)
+return true;if(typeof(eventData)==='string'||eventData instanceof String){return eventData.length==0;}
+return false;};EmptyImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'EmptyImporter';}};tr.importer.Importer.register(EmptyImporter);return{EmptyImporter:EmptyImporter};});'use strict';tr.exportTo('tr.model.helpers',function(){var MAIN_FRAMETIME_TYPE='main_frametime_type';var IMPL_FRAMETIME_TYPE='impl_frametime_type';var MAIN_RENDERING_STATS='BenchmarkInstrumentation::MainThreadRenderingStats';var IMPL_RENDERING_STATS='BenchmarkInstrumentation::ImplThreadRenderingStats';function getSlicesIntersectingRange(rangeOfInterest,slices){var slicesInFilterRange=[];for(var i=0;i<slices.length;i++){var slice=slices[i];if(rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end))
+slicesInFilterRange.push(slice);}
+return slicesInFilterRange;}
+function ChromeProcessHelper(modelHelper,process){this.modelHelper=modelHelper;this.process=process;}
+ChromeProcessHelper.prototype={get pid(){return this.process.pid;},getFrameEventsInRange:function(frametimeType,range){var titleToGet;if(frametimeType==MAIN_FRAMETIME_TYPE)
+titleToGet=MAIN_RENDERING_STATS;else
+titleToGet=IMPL_RENDERING_STATS;var frameEvents=[];this.process.iterateAllEvents(function(event){if(event.title!==titleToGet)
+return;if(range.intersectsExplicitRangeInclusive(event.start,event.end))
+frameEvents.push(event);});frameEvents.sort(function(a,b){return a.start-b.start});return frameEvents;}};function getFrametimeDataFromEvents(frameEvents){var frametimeData=[];for(var i=1;i<frameEvents.length;i++){var diff=frameEvents[i].start-frameEvents[i-1].start;frametimeData.push({'x':frameEvents[i].start,'frametime':diff});}
+return frametimeData;}
+return{ChromeProcessHelper:ChromeProcessHelper,MAIN_FRAMETIME_TYPE:MAIN_FRAMETIME_TYPE,IMPL_FRAMETIME_TYPE:IMPL_FRAMETIME_TYPE,MAIN_RENDERING_STATS:MAIN_RENDERING_STATS,IMPL_RENDERING_STATS:IMPL_RENDERING_STATS,getSlicesIntersectingRange:getSlicesIntersectingRange,getFrametimeDataFromEvents:getFrametimeDataFromEvents};});'use strict';tr.exportTo('tr.model.helpers',function(){function ChromeBrowserHelper(modelHelper,process){tr.model.helpers.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrBrowserMain');}
+ChromeBrowserHelper.isBrowserProcess=function(process){return!!process.findAtMostOneThreadNamed('CrBrowserMain');};ChromeBrowserHelper.prototype={__proto__:tr.model.helpers.ChromeProcessHelper.prototype,get rendererHelpers(){return this.modelHelper.rendererHelpers;},getLoadingEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return slice.title.indexOf('WebContentsImpl Loading')===0&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},getCommitProvisionalLoadEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return slice.title==='RenderFrameImpl::didCommitProvisionalLoad'&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},get hasLatencyEvents(){var hasLatency=false;this.modelHelper.model.getAllThreads().some(function(thread){thread.iterateAllEvents(function(event){if(!event.isTopLevel)
+return;if(!(event instanceof tr.e.cc.InputLatencyAsyncSlice))
+return;hasLatency=true;});return hasLatency;});return hasLatency;},getLatencyEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return(slice.title.indexOf('InputLatency')===0)&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},getAllAsyncSlicesMatching:function(pred,opt_this){var events=[];this.iterAllThreads(function(thread){thread.iterateAllEvents(function(slice){if(pred.call(opt_this,slice))
+events.push(slice);});});return events;},getAllNetworkEventsInRange:function(rangeOfInterest){var networkEvents=[];this.modelHelper.model.getAllThreads().forEach(function(thread){thread.asyncSliceGroup.slices.forEach(function(slice){var match=false;if(slice.category=='net'||slice.category=='disabled-by-default-netlog'||slice.category=='netlog'){match=true;}
+if(!match)
+return;if(rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end))
+networkEvents.push(slice);});});return networkEvents;},iterAllThreads:function(func,opt_this){tr.b.iterItems(this.process.threads,function(tid,thread){func.call(opt_this,thread);});tr.b.iterItems(this.rendererHelpers,function(pid,rendererHelper){var rendererProcess=rendererHelper.process;tr.b.iterItems(rendererProcess.threads,function(tid,thread){func.call(opt_this,thread);});},this);}};return{ChromeBrowserHelper:ChromeBrowserHelper};});'use strict';tr.exportTo('tr.model.helpers',function(){function ChromeGpuHelper(modelHelper,process){tr.model.helpers.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrGpuMain');};ChromeGpuHelper.isGpuProcess=function(process){if(process.findAtMostOneThreadNamed('CrBrowserMain')||process.findAtMostOneThreadNamed('CrRendererMain'))
+return false;return process.findAtMostOneThreadNamed('CrGpuMain');};ChromeGpuHelper.prototype={__proto__:tr.model.helpers.ChromeProcessHelper.prototype,get mainThread(){return this.mainThread_;}};return{ChromeGpuHelper:ChromeGpuHelper};});'use strict';tr.exportTo('tr.model.helpers',function(){function ChromeRendererHelper(modelHelper,process){tr.model.helpers.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrRendererMain');this.compositorThread_=process.findAtMostOneThreadNamed('Compositor');this.rasterWorkerThreads_=process.findAllThreadsMatching(function(t){if(t.name===undefined)
+return false;if(t.name.indexOf('CompositorTileWorker')===0)
+return true;if(t.name.indexOf('CompositorRasterWorker')===0)
+return true;return false;});};ChromeRendererHelper.isRenderProcess=function(process){if(!process.findAtMostOneThreadNamed('CrRendererMain'))
+return false;if(!process.findAtMostOneThreadNamed('Compositor'))
+return false;return true;};ChromeRendererHelper.prototype={__proto__:tr.model.helpers.ChromeProcessHelper.prototype,get mainThread(){return this.mainThread_;},get compositorThread(){return this.compositorThread_;},get rasterWorkerThreads(){return this.rasterWorkerThreads_;}};return{ChromeRendererHelper:ChromeRendererHelper};});'use strict';tr.exportTo('tr.model.helpers',function(){function findChromeBrowserProcess(model){var browserProcesses=[];model.getAllProcesses().forEach(function(process){if(!tr.model.helpers.ChromeBrowserHelper.isBrowserProcess(process))
+return;browserProcesses.push(process);},this);if(browserProcesses.length===0)
+return undefined;if(browserProcesses.length>1)
+return undefined;return browserProcesses[0];}
+function findChromeRenderProcesses(model){var rendererProcesses=[];model.getAllProcesses().forEach(function(process){if(!tr.model.helpers.ChromeRendererHelper.isRenderProcess(process))
+return;rendererProcesses.push(process);});return rendererProcesses;}
+function findChromeGpuProcess(model){var gpuProcesses=model.getAllProcesses().filter(tr.model.helpers.ChromeGpuHelper.isGpuProcess);if(gpuProcesses.length!=1)
+return undefined;return gpuProcesses[0];}
+function ChromeModelHelper(model){this.model_=model;this.browserProcess_=findChromeBrowserProcess(model);if(this.browserProcess_){this.browserHelper_=new tr.model.helpers.ChromeBrowserHelper(this,this.browserProcess_);}else{this.browserHelper_=undefined;}
+var gpuProcess=findChromeGpuProcess(model);if(gpuProcess){this.gpuHelper_=new tr.model.helpers.ChromeGpuHelper(this,gpuProcess);}else{this.gpuHelper_=undefined;}
+var rendererProcesses_=findChromeRenderProcesses(model);this.rendererHelpers_={};rendererProcesses_.forEach(function(renderProcess){var rendererHelper=new tr.model.helpers.ChromeRendererHelper(this,renderProcess);this.rendererHelpers_[rendererHelper.pid]=rendererHelper;},this);}
+ChromeModelHelper.guid=tr.b.GUID.allocate();ChromeModelHelper.supportsModel=function(model){if(findChromeBrowserProcess(model)!==undefined)
+return true;if(findChromeRenderProcesses(model).length)
+return true;return false;};ChromeModelHelper.prototype={get pid(){throw new Error('woah');},get process(){throw new Error('woah');},get model(){return this.model_;},get browserProcess(){return this.browserProcess_;},get browserHelper(){return this.browserHelper_;},get gpuHelper(){return this.gpuHelper_;},get rendererHelpers(){return this.rendererHelpers_;}};return{ChromeModelHelper:ChromeModelHelper};});'use strict';tr.exportTo('tr.e.cc',function(){var AsyncSlice=tr.model.AsyncSlice;var EventSet=tr.model.EventSet;var UI_COMP_NAME='INPUT_EVENT_LATENCY_UI_COMPONENT';var ORIGINAL_COMP_NAME='INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT';var BEGIN_COMP_NAME='INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT';var END_COMP_NAME='INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT';var MAIN_RENDERER_THREAD_NAME='CrRendererMain';var COMPOSITOR_THREAD_NAME='Compositor';var POSTTASK_FLOW_EVENT='disabled-by-default-toplevel.flow';var IPC_FLOW_EVENT='disabled-by-default-ipc.flow';var INPUT_EVENT_TYPE_NAMES={CHAR:'Char',CLICK:'GestureClick',CONTEXT_MENU:'ContextMenu',FLING_CANCEL:'GestureFlingCancel',FLING_START:'GestureFlingStart',KEY_DOWN:'KeyDown',KEY_DOWN_RAW:'RawKeyDown',KEY_UP:'KeyUp',LATENCY_SCROLL_UPDATE:'ScrollUpdate',MOUSE_DOWN:'MouseDown',MOUSE_ENTER:'MouseEnter',MOUSE_LEAVE:'MouseLeave',MOUSE_MOVE:'MouseMove',MOUSE_UP:'MouseUp',MOUSE_WHEEL:'MouseWheel',PINCH_BEGIN:'GesturePinchBegin',PINCH_END:'GesturePinchEnd',PINCH_UPDATE:'GesturePinchUpdate',SCROLL_BEGIN:'GestureScrollBegin',SCROLL_END:'GestureScrollEnd',SCROLL_UPDATE:'GestureScrollUpdate',SCROLL_UPDATE_RENDERER:'ScrollUpdate',SHOW_PRESS:'GestureShowPress',TAP:'GestureTap',TAP_CANCEL:'GestureTapCancel',TAP_DOWN:'GestureTapDown',TOUCH_CANCEL:'TouchCancel',TOUCH_END:'TouchEnd',TOUCH_MOVE:'TouchMove',TOUCH_START:'TouchStart',UNKNOWN:'UNKNOWN'};function InputLatencyAsyncSlice(){AsyncSlice.apply(this,arguments);this.associatedEvents_=new EventSet();this.typeName_=undefined;if(!this.isLegacyEvent)
+this.determineModernTypeName_();}
+InputLatencyAsyncSlice.prototype={__proto__:AsyncSlice.prototype,get isLegacyEvent(){return this.title==='InputLatency';},get typeName(){if(!this.typeName_)
+this.determineLegacyTypeName_();return this.typeName_;},checkTypeName_:function(){if(!this.typeName_)
+throw'Unable to determine typeName';var found=false;for(var type_name in INPUT_EVENT_TYPE_NAMES){if(this.typeName===INPUT_EVENT_TYPE_NAMES[type_name]){found=true;break;}}
+if(!found)
+this.typeName_=INPUT_EVENT_TYPE_NAMES.UNKNOWN;},determineModernTypeName_:function(){var lastColonIndex=this.title.lastIndexOf(':');if(lastColonIndex<0)
+return;var characterAfterLastColonIndex=lastColonIndex+1;this.typeName_=this.title.slice(characterAfterLastColonIndex);this.checkTypeName_();},determineLegacyTypeName_:function(){this.iterateAllDescendents(function(subSlice){var subSliceIsAInputLatencyAsyncSlice=(subSlice instanceof InputLatencyAsyncSlice);if(!subSliceIsAInputLatencyAsyncSlice)
+return;if(!subSlice.typeName)
+return;if(this.typeName_&&subSlice.typeName_){var subSliceHasDifferentTypeName=(this.typeName_!==subSlice.typeName_);if(subSliceHasDifferentTypeName){throw'InputLatencyAsyncSlice.determineLegacyTypeName_() '+' found multiple typeNames';}}
+this.typeName_=subSlice.typeName_;},this);if(!this.typeName_)
+throw'InputLatencyAsyncSlice.determineLegacyTypeName_() failed';this.checkTypeName_();},getRendererHelper:function(sourceSlices){var traceModel=this.startThread.parent.model;var modelHelper=traceModel.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper)
+return undefined;var mainThread=undefined;var compositorThread=undefined;for(var i in sourceSlices){if(sourceSlices[i].parentContainer.name===MAIN_RENDERER_THREAD_NAME)
+mainThread=sourceSlices[i].parentContainer;else if(sourceSlices[i].parentContainer.name===COMPOSITOR_THREAD_NAME)
+compositorThread=sourceSlices[i].parentContainer;if(mainThread&&compositorThread)
+break;}
+var rendererHelpers=modelHelper.rendererHelpers;var pids=Object.keys(rendererHelpers);for(var i=0;i<pids.length;i++){var pid=pids[i];var rendererHelper=rendererHelpers[pid];if(rendererHelper.mainThread===mainThread||rendererHelper.compositorThread===compositorThread)
+return rendererHelper;}
+return undefined;},addEntireSliceHierarchy:function(slice){this.associatedEvents_.push(slice);slice.iterateAllSubsequentSlices(function(subsequentSlice){this.associatedEvents_.push(subsequentSlice);},this);},addDirectlyAssociatedEvents:function(flowEvents){var slices=[];flowEvents.forEach(function(flowEvent){this.associatedEvents_.push(flowEvent);var newSource=flowEvent.startSlice.mostTopLevelSlice;if(slices.indexOf(newSource)===-1)
+slices.push(newSource);},this);var lastFlowEvent=flowEvents[flowEvents.length-1];var lastSource=lastFlowEvent.endSlice.mostTopLevelSlice;if(slices.indexOf(lastSource)===-1)
+slices.push(lastSource);return slices;},addScrollUpdateEvents:function(rendererHelper){if(!rendererHelper||!rendererHelper.compositorThread)
+return;var compositorThread=rendererHelper.compositorThread;var gestureScrollUpdateStart=this.start;var gestureScrollUpdateEnd=this.end;var allCompositorAsyncSlices=compositorThread.asyncSliceGroup.slices;for(var i in allCompositorAsyncSlices){var slice=allCompositorAsyncSlices[i];if(slice.title!=='Latency::ScrollUpdate')
+continue;var parentId=slice.args.data.INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT.sequence_number;if(parentId===undefined){if(slice.start<gestureScrollUpdateStart||slice.start>=gestureScrollUpdateEnd)
+continue;}else{if(parseInt(parentId)!==parseInt(this.id))
+continue;}
+slice.associatedEvents.forEach(function(event){this.associatedEvents_.push(event);},this);break;}},belongToOtherInputs:function(slice,flowEvents){var fromOtherInputs=false;slice.iterateEntireHierarchy(function(subsequentSlice){if(fromOtherInputs)
+return;subsequentSlice.inFlowEvents.forEach(function(inflow){if(fromOtherInputs)
+return;if(inflow.category.indexOf('input')>-1){if(flowEvents.indexOf(inflow)===-1)
+fromOtherInputs=true;}},this);},this);return fromOtherInputs;},triggerOtherInputs:function(event,flowEvents){if(event.outFlowEvents===undefined||event.outFlowEvents.length===0)
+return false;var flow=event.outFlowEvents[0];if(flow.category!==POSTTASK_FLOW_EVENT||!flow.endSlice)
+return false;var endSlice=flow.endSlice;if(this.belongToOtherInputs(endSlice.mostTopLevelSlice,flowEvents))
+return true;return false;},followSubsequentSlices:function(event,queue,visited,flowEvents){var stopFollowing=false;var inputAck=false;event.iterateAllSubsequentSlices(function(slice){if(stopFollowing)
+return;if(slice.title==='TaskQueueManager::RunTask')
+return;if(slice.title==='ThreadProxy::ScheduledActionSendBeginMainFrame')
+return;if(slice.title==='Scheduler::ScheduleBeginImplFrameDeadline'){if(this.triggerOtherInputs(slice,flowEvents))
+return;}
+if(slice.title==='CompositorImpl::PostComposite'){if(this.triggerOtherInputs(slice,flowEvents))
+return;}
+if(slice.title==='InputRouterImpl::ProcessInputEventAck')
+inputAck=true;if(inputAck&&slice.title==='InputRouterImpl::FilterAndSendWebInputEvent')
+stopFollowing=true;this.followCurrentSlice(slice,queue,visited);},this);},followCurrentSlice:function(event,queue,visited){event.outFlowEvents.forEach(function(outflow){if((outflow.category===POSTTASK_FLOW_EVENT||outflow.category===IPC_FLOW_EVENT)&&outflow.endSlice){this.associatedEvents_.push(outflow);var nextEvent=outflow.endSlice.mostTopLevelSlice;if(!visited.contains(nextEvent)){visited.push(nextEvent);queue.push(nextEvent);}}},this);},backtraceFromDraw:function(beginImplFrame,visited){var pendingEventQueue=[];pendingEventQueue.push(beginImplFrame.mostTopLevelSlice);while(pendingEventQueue.length!==0){var event=pendingEventQueue.pop();this.addEntireSliceHierarchy(event);event.inFlowEvents.forEach(function(inflow){if(inflow.category===POSTTASK_FLOW_EVENT&&inflow.startSlice){var nextEvent=inflow.startSlice.mostTopLevelSlice;if(!visited.contains(nextEvent)){visited.push(nextEvent);pendingEventQueue.push(nextEvent);}}},this);}},sortRasterizerSlices:function(rasterWorkerThreads,sortedRasterizerSlices){rasterWorkerThreads.forEach(function(rasterizer){Array.prototype.push.apply(sortedRasterizerSlices,rasterizer.sliceGroup.slices);},this);sortedRasterizerSlices.sort(function(a,b){if(a.start!==b.start)
+return a.start-b.start;return a.guid-b.guid;});},addRasterizationEvents:function(prepareTiles,rendererHelper,visited,flowEvents,sortedRasterizerSlices){if(!prepareTiles.args.prepare_tiles_id)
+return;if(!rendererHelper||!rendererHelper.rasterWorkerThreads)
+return;var rasterWorkerThreads=rendererHelper.rasterWorkerThreads;var prepare_tile_id=prepareTiles.args.prepare_tiles_id;var pendingEventQueue=[];if(sortedRasterizerSlices.length===0)
+this.sortRasterizerSlices(rasterWorkerThreads,sortedRasterizerSlices);var numFinishedTasks=0;var RASTER_TASK_TITLE='RasterizerTaskImpl::RunOnWorkerThread';var IMAGEDECODE_TASK_TITLE='ImageDecodeTaskImpl::RunOnWorkerThread';var FINISHED_TASK_TITLE='TaskSetFinishedTaskImpl::RunOnWorkerThread';for(var i=0;i<sortedRasterizerSlices.length;i++){var task=sortedRasterizerSlices[i];if(task.title===RASTER_TASK_TITLE||task.title===IMAGEDECODE_TASK_TITLE){if(task.args.source_prepare_tiles_id===prepare_tile_id)
+this.addEntireSliceHierarchy(task.mostTopLevelSlice);}else if(task.title===FINISHED_TASK_TITLE){if(task.start>prepareTiles.start){pendingEventQueue.push(task.mostTopLevelSlice);if(++numFinishedTasks===3)
+break;}}}
+while(pendingEventQueue.length!=0){var event=pendingEventQueue.pop();this.addEntireSliceHierarchy(event);this.followSubsequentSlices(event,pendingEventQueue,visited,flowEvents);}},addOtherCausallyRelatedEvents:function(rendererHelper,sourceSlices,flowEvents,sortedRasterizerSlices){var pendingEventQueue=[];var visitedEvents=new EventSet();var beginImplFrame=undefined;var prepareTiles=undefined;var sortedRasterizerSlices=[];sourceSlices.forEach(function(sourceSlice){if(!visitedEvents.contains(sourceSlice)){visitedEvents.push(sourceSlice);pendingEventQueue.push(sourceSlice);}},this);while(pendingEventQueue.length!=0){var event=pendingEventQueue.pop();this.addEntireSliceHierarchy(event);this.followCurrentSlice(event,pendingEventQueue,visitedEvents);this.followSubsequentSlices(event,pendingEventQueue,visitedEvents,flowEvents);var COMPOSITOR_PREPARE_TILES='TileManager::PrepareTiles';prepareTiles=event.findDescendentSlice(COMPOSITOR_PREPARE_TILES);if(prepareTiles)
+this.addRasterizationEvents(prepareTiles,rendererHelper,visitedEvents,flowEvents,sortedRasterizerSlices);var COMPOSITOR_ON_BIFD='Scheduler::OnBeginImplFrameDeadline';beginImplFrame=event.findDescendentSlice(COMPOSITOR_ON_BIFD);if(beginImplFrame)
+this.backtraceFromDraw(beginImplFrame,visitedEvents);}
+var INPUT_GSU='InputLatency::GestureScrollUpdate';if(this.title===INPUT_GSU)
+this.addScrollUpdateEvents(rendererHelper);},get associatedEvents(){if(this.associatedEvents_.length!==0)
+return this.associatedEvents_;var modelIndices=this.startThread.parent.model.modelIndices;var flowEvents=modelIndices.getFlowEventsWithId(this.id);if(flowEvents.length===0)
+return this.associatedEvents_;var sourceSlices=this.addDirectlyAssociatedEvents(flowEvents);var rendererHelper=this.getRendererHelper(sourceSlices);this.addOtherCausallyRelatedEvents(rendererHelper,sourceSlices,flowEvents);return this.associatedEvents_;},get inputLatency(){if(!('data'in this.args))
+return undefined;var data=this.args.data;if(!(END_COMP_NAME in data))
+return undefined;var latency=0;var endTime=data[END_COMP_NAME].time;if(ORIGINAL_COMP_NAME in data){latency=endTime-data[ORIGINAL_COMP_NAME].time;}else if(UI_COMP_NAME in data){latency=endTime-data[UI_COMP_NAME].time;}else if(BEGIN_COMP_NAME in data){latency=endTime-data[BEGIN_COMP_NAME].time;}else{throw new Error('No valid begin latency component');}
+return latency;}};var eventTypeNames=['Char','ContextMenu','GestureClick','GestureFlingCancel','GestureFlingStart','GestureScrollBegin','GestureScrollEnd','GestureScrollUpdate','GestureShowPress','GestureTap','GestureTapCancel','GestureTapDown','GesturePinchBegin','GesturePinchEnd','GesturePinchUpdate','KeyDown','KeyUp','MouseDown','MouseEnter','MouseLeave','MouseMove','MouseUp','MouseWheel','RawKeyDown','ScrollUpdate','TouchCancel','TouchEnd','TouchMove','TouchStart'];var allTypeNames=['InputLatency'];eventTypeNames.forEach(function(eventTypeName){allTypeNames.push('InputLatency:'+eventTypeName);allTypeNames.push('InputLatency::'+eventTypeName);});AsyncSlice.register(InputLatencyAsyncSlice,{typeNames:allTypeNames,categoryParts:['latencyInfo']});return{InputLatencyAsyncSlice:InputLatencyAsyncSlice,INPUT_EVENT_TYPE_NAMES:INPUT_EVENT_TYPE_NAMES};});'use strict';tr.exportTo('tr.model.um',function(){function AnimationExpectation(parentModel,initiatorTitle,start,duration){tr.model.um.UserExpectation.call(this,parentModel,initiatorTitle,start,duration);this.frameEvents_=undefined;}
+AnimationExpectation.prototype={__proto__:tr.model.um.UserExpectation.prototype,constructor:AnimationExpectation,get frameEvents(){if(this.frameEvents_)
+return this.frameEvents_;this.frameEvents_=new tr.model.EventSet();this.associatedEvents.forEach(function(event){if(event.title===tr.model.helpers.IMPL_RENDERING_STATS)
+this.frameEvents_.push(event);},this);return this.frameEvents_;}};tr.model.um.UserExpectation.register(AnimationExpectation,{stageTitle:'Animation',colorId:tr.b.ColorScheme.getColorIdForReservedName('rail_animation')});return{AnimationExpectation:AnimationExpectation};});'use strict';tr.exportTo('tr.importer',function(){function ProtoExpectation(irType,name){this.irType=irType;this.names=new Set(name?[name]:undefined);this.start=Infinity;this.end=-Infinity;this.associatedEvents=new tr.model.EventSet();this.isAnimationBegin=false;}
+ProtoExpectation.RESPONSE_TYPE='r';ProtoExpectation.ANIMATION_TYPE='a';ProtoExpectation.IGNORED_TYPE='ignored';ProtoExpectation.prototype={get isValid(){return this.end>this.start;},containsTypeNames:function(typeNames){for(var i=0;i<this.associatedEvents.length;++i){if(typeNames.indexOf(this.associatedEvents[i].typeName)>=0)
+return true;}
+return false;},containsSliceTitle:function(title){for(var i=0;i<this.associatedEvents.length;++i){if(title===this.associatedEvents[i].title)
+return true;}
+return false;},createInteractionRecord:function(model){if(!this.isValid){console.error('Invalid ProtoExpectation: '+this.debug()+' File a bug with this trace!');return undefined;}
+var initiatorTitles=[];this.names.forEach(function(name){initiatorTitles.push(name);});initiatorTitles=initiatorTitles.sort().join(',');var duration=this.end-this.start;var ir=undefined;switch(this.irType){case ProtoExpectation.RESPONSE_TYPE:ir=new tr.model.um.ResponseExpectation(model,initiatorTitles,this.start,duration,this.isAnimationBegin);break;case ProtoExpectation.ANIMATION_TYPE:ir=new tr.model.um.AnimationExpectation(model,initiatorTitles,this.start,duration);break;}
+if(!ir)
+return undefined;ir.sourceEvents.addEventSet(this.associatedEvents);function pushAssociatedEvents(event){ir.associatedEvents.push(event);if(event.associatedEvents)
+ir.associatedEvents.addEventSet(event.associatedEvents);}
+this.associatedEvents.forEach(function(event){pushAssociatedEvents(event);if(event.subSlices)
+event.subSlices.forEach(pushAssociatedEvents);});return ir;},merge:function(other){other.names.forEach(function(name){this.names.add(name);}.bind(this));this.associatedEvents.addEventSet(other.associatedEvents);this.start=Math.min(this.start,other.start);this.end=Math.max(this.end,other.end);if(other.isAnimationBegin)
+this.isAnimationBegin=true;},pushEvent:function(event){this.start=Math.min(this.start,event.start);this.end=Math.max(this.end,event.end);this.associatedEvents.push(event);},containsTimestampInclusive:function(timestamp){return(this.start<=timestamp)&&(timestamp<=this.end);},intersects:function(other){return(other.start<this.end)&&(other.end>this.start);},isNear:function(event,threshold){return(this.end+threshold)>event.start;},debug:function(){var debugString=this.irType+'(';debugString+=parseInt(this.start)+' ';debugString+=parseInt(this.end);this.associatedEvents.forEach(function(event){debugString+=' '+event.typeName;});return debugString+')';}};return{ProtoExpectation:ProtoExpectation};});'use strict';tr.exportTo('tr.importer',function(){var ProtoExpectation=tr.importer.ProtoExpectation;var INPUT_TYPE=tr.e.cc.INPUT_EVENT_TYPE_NAMES;var KEYBOARD_TYPE_NAMES=[INPUT_TYPE.CHAR,INPUT_TYPE.KEY_DOWN_RAW,INPUT_TYPE.KEY_DOWN,INPUT_TYPE.KEY_UP];var MOUSE_RESPONSE_TYPE_NAMES=[INPUT_TYPE.CLICK,INPUT_TYPE.CONTEXT_MENU];var MOUSE_WHEEL_TYPE_NAMES=[INPUT_TYPE.MOUSE_WHEEL];var MOUSE_DRAG_TYPE_NAMES=[INPUT_TYPE.MOUSE_DOWN,INPUT_TYPE.MOUSE_MOVE,INPUT_TYPE.MOUSE_UP];var TAP_TYPE_NAMES=[INPUT_TYPE.TAP,INPUT_TYPE.TAP_CANCEL,INPUT_TYPE.TAP_DOWN];var PINCH_TYPE_NAMES=[INPUT_TYPE.PINCH_BEGIN,INPUT_TYPE.PINCH_END,INPUT_TYPE.PINCH_UPDATE];var FLING_TYPE_NAMES=[INPUT_TYPE.FLING_CANCEL,INPUT_TYPE.FLING_START];var TOUCH_TYPE_NAMES=[INPUT_TYPE.TOUCH_END,INPUT_TYPE.TOUCH_MOVE,INPUT_TYPE.TOUCH_START];var SCROLL_TYPE_NAMES=[INPUT_TYPE.SCROLL_BEGIN,INPUT_TYPE.SCROLL_END,INPUT_TYPE.SCROLL_UPDATE];var ALL_HANDLED_TYPE_NAMES=[].concat(KEYBOARD_TYPE_NAMES,MOUSE_RESPONSE_TYPE_NAMES,MOUSE_WHEEL_TYPE_NAMES,MOUSE_DRAG_TYPE_NAMES,PINCH_TYPE_NAMES,TAP_TYPE_NAMES,FLING_TYPE_NAMES,TOUCH_TYPE_NAMES,SCROLL_TYPE_NAMES);var RENDERER_FLING_TITLE='InputHandlerProxy::HandleGestureFling::started';var CSS_ANIMATION_TITLE='Animation';var INPUT_MERGE_THRESHOLD_MS=200;var ANIMATION_MERGE_THRESHOLD_MS=1;var MOUSE_WHEEL_THRESHOLD_MS=40;var MOUSE_MOVE_THRESHOLD_MS=40;var KEYBOARD_IR_NAME='Keyboard';var MOUSE_IR_NAME='Mouse';var MOUSEWHEEL_IR_NAME='MouseWheel';var TAP_IR_NAME='Tap';var PINCH_IR_NAME='Pinch';var FLING_IR_NAME='Fling';var TOUCH_IR_NAME='Touch';var SCROLL_IR_NAME='Scroll';var CSS_IR_NAME='CSS';function compareEvents(x,y){if(x.start!==y.start)
+return x.start-y.start;if(x.end!==y.end)
+return x.end-y.end;if(x.guid&&y.guid)
+return x.guid-y.guid;return 0;}
+function forEventTypesIn(events,typeNames,cb,opt_this){events.forEach(function(event){if(typeNames.indexOf(event.typeName)>=0){cb.call(opt_this,event);}});}
+function causedFrame(event){for(var i=0;i<event.associatedEvents.length;++i){if(event.associatedEvents[i].title===tr.model.helpers.IMPL_RENDERING_STATS)
+return true;}
+return false;}
+function getSortedInputEvents(modelHelper){var inputEvents=[];var browserProcess=modelHelper.browserHelper.process;var mainThread=browserProcess.findAtMostOneThreadNamed('CrBrowserMain');mainThread.asyncSliceGroup.iterateAllEvents(function(slice){if(!slice.isTopLevel)
+return;if(!(slice instanceof tr.e.cc.InputLatencyAsyncSlice))
+return;if(isNaN(slice.start)||isNaN(slice.duration)||isNaN(slice.end))
+return;inputEvents.push(slice);});return inputEvents.sort(compareEvents);}
+function findProtoExpectations(modelHelper,sortedInputEvents){var protoExpectations=[];var handlers=[handleKeyboardEvents,handleMouseResponseEvents,handleMouseWheelEvents,handleMouseDragEvents,handleTapResponseEvents,handlePinchEvents,handleFlingEvents,handleTouchEvents,handleScrollEvents,handleCSSAnimations];handlers.forEach(function(handler){protoExpectations.push.apply(protoExpectations,handler(modelHelper,sortedInputEvents));});protoExpectations.sort(compareEvents);return protoExpectations;}
+function handleKeyboardEvents(modelHelper,sortedInputEvents){var protoExpectations=[];forEventTypesIn(sortedInputEvents,KEYBOARD_TYPE_NAMES,function(event){var pe=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,KEYBOARD_IR_NAME);pe.pushEvent(event);protoExpectations.push(pe);});return protoExpectations;}
+function handleMouseResponseEvents(modelHelper,sortedInputEvents){var protoExpectations=[];forEventTypesIn(sortedInputEvents,MOUSE_RESPONSE_TYPE_NAMES,function(event){var pe=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,MOUSE_IR_NAME);pe.pushEvent(event);protoExpectations.push(pe);});return protoExpectations;}
+function handleMouseWheelEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;var prevEvent_=undefined;forEventTypesIn(sortedInputEvents,MOUSE_WHEEL_TYPE_NAMES,function(event){var prevEvent=prevEvent_;prevEvent_=event;if(currentPE&&(prevEvent.start+MOUSE_WHEEL_THRESHOLD_MS)>=event.start){if(currentPE.irType===ProtoExpectation.ANIMATION_TYPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,MOUSEWHEEL_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+return;}
+currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,MOUSEWHEEL_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);});return protoExpectations;}
+function handleMouseDragEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;var mouseDownEvent=undefined;forEventTypesIn(sortedInputEvents,MOUSE_DRAG_TYPE_NAMES,function(event){switch(event.typeName){case INPUT_TYPE.MOUSE_DOWN:if(causedFrame(event)){var pe=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,MOUSE_IR_NAME);pe.pushEvent(event);protoExpectations.push(pe);}else{mouseDownEvent=event;}
+break;case INPUT_TYPE.MOUSE_MOVE:if(!causedFrame(event)){var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);}else if(!currentPE||!currentPE.isNear(event,MOUSE_MOVE_THRESHOLD_MS)){currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,MOUSE_IR_NAME);currentPE.pushEvent(event);if(mouseDownEvent){currentPE.associatedEvents.push(mouseDownEvent);mouseDownEvent=undefined;}
+protoExpectations.push(currentPE);}else{if(currentPE.irType===ProtoExpectation.ANIMATION_TYPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,MOUSE_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}}
+break;case INPUT_TYPE.MOUSE_UP:if(!mouseDownEvent){var pe=new ProtoExpectation(causedFrame(event)?ProtoExpectation.RESPONSE_TYPE:ProtoExpectation.IGNORED_TYPE,MOUSE_IR_NAME);pe.pushEvent(event);protoExpectations.push(pe);break;}
+if(currentPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,MOUSE_IR_NAME);if(mouseDownEvent)
+currentPE.associatedEvents.push(mouseDownEvent);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+mouseDownEvent=undefined;currentPE=undefined;break;}});if(mouseDownEvent){currentPE=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);currentPE.pushEvent(mouseDownEvent);protoExpectations.push(currentPE);}
+return protoExpectations;}
+function handleTapResponseEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;forEventTypesIn(sortedInputEvents,TAP_TYPE_NAMES,function(event){switch(event.typeName){case INPUT_TYPE.TAP_DOWN:currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,TAP_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);break;case INPUT_TYPE.TAP:if(currentPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,TAP_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+currentPE=undefined;break;case INPUT_TYPE.TAP_CANCEL:if(!currentPE){var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);break;}
+if(currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,TAP_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+currentPE=undefined;break;}});return protoExpectations;}
+function handlePinchEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;var sawFirstUpdate=false;var modelBounds=modelHelper.model.bounds;forEventTypesIn(sortedInputEvents,PINCH_TYPE_NAMES,function(event){switch(event.typeName){case INPUT_TYPE.PINCH_BEGIN:if(currentPE&&currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)){currentPE.pushEvent(event);break;}
+currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,PINCH_IR_NAME);currentPE.pushEvent(event);currentPE.isAnimationBegin=true;protoExpectations.push(currentPE);sawFirstUpdate=false;break;case INPUT_TYPE.PINCH_UPDATE:if(!currentPE||((currentPE.irType===ProtoExpectation.RESPONSE_TYPE)&&sawFirstUpdate)||!currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)){currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,PINCH_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}else{currentPE.pushEvent(event);sawFirstUpdate=true;}
+break;case INPUT_TYPE.PINCH_END:if(currentPE){currentPE.pushEvent(event);}else{var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);}
+currentPE=undefined;break;}});return protoExpectations;}
+function handleFlingEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;function isRendererFling(event){return event.title===RENDERER_FLING_TITLE;}
+var browserHelper=modelHelper.browserHelper;var flingEvents=browserHelper.getAllAsyncSlicesMatching(isRendererFling);forEventTypesIn(sortedInputEvents,FLING_TYPE_NAMES,function(event){flingEvents.push(event);});flingEvents.sort(compareEvents);flingEvents.forEach(function(event){if(event.title===RENDERER_FLING_TITLE){if(currentPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,FLING_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+return;}
+switch(event.typeName){case INPUT_TYPE.FLING_START:if(currentPE){console.error('Another FlingStart? File a bug with this trace!');currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,FLING_IR_NAME);currentPE.pushEvent(event);currentPE.end=0;protoExpectations.push(currentPE);}
+break;case INPUT_TYPE.FLING_CANCEL:if(currentPE){currentPE.pushEvent(event);currentPE.end=event.start;currentPE=undefined;}else{var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);}
+break;}});if(currentPE&&!currentPE.end)
+currentPE.end=modelHelper.model.bounds.max;return protoExpectations;}
+function handleTouchEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;var sawFirstMove=false;forEventTypesIn(sortedInputEvents,TOUCH_TYPE_NAMES,function(event){switch(event.typeName){case INPUT_TYPE.TOUCH_START:if(currentPE){currentPE.pushEvent(event);}else{currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,TOUCH_IR_NAME);currentPE.pushEvent(event);currentPE.isAnimationBegin=true;protoExpectations.push(currentPE);sawFirstMove=false;}
+break;case INPUT_TYPE.TOUCH_MOVE:if(!currentPE){currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,TOUCH_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);break;}
+if((sawFirstMove&&(currentPE.irType===ProtoExpectation.RESPONSE_TYPE))||!currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)){var prevEnd=currentPE.end;currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,TOUCH_IR_NAME);currentPE.pushEvent(event);currentPE.start=prevEnd;protoExpectations.push(currentPE);}else{currentPE.pushEvent(event);sawFirstMove=true;}
+break;case INPUT_TYPE.TOUCH_END:if(!currentPE){var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);break;}
+if(currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)){currentPE.pushEvent(event);}else{var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);}
+currentPE=undefined;break;}});return protoExpectations;}
+function handleScrollEvents(modelHelper,sortedInputEvents){var protoExpectations=[];var currentPE=undefined;var sawFirstUpdate=false;forEventTypesIn(sortedInputEvents,SCROLL_TYPE_NAMES,function(event){switch(event.typeName){case INPUT_TYPE.SCROLL_BEGIN:currentPE=new ProtoExpectation(ProtoExpectation.RESPONSE_TYPE,SCROLL_IR_NAME);currentPE.pushEvent(event);currentPE.isAnimationBegin=true;protoExpectations.push(currentPE);sawFirstUpdate=false;break;case INPUT_TYPE.SCROLL_UPDATE:if(currentPE){if(currentPE.isNear(event,INPUT_MERGE_THRESHOLD_MS)&&((currentPE.irType===ProtoExpectation.ANIMATION_TYPE)||!sawFirstUpdate)){currentPE.pushEvent(event);sawFirstUpdate=true;}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,SCROLL_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}}else{currentPE=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,SCROLL_IR_NAME);currentPE.pushEvent(event);protoExpectations.push(currentPE);}
+break;case INPUT_TYPE.SCROLL_END:if(!currentPE){console.error('ScrollEnd without ScrollUpdate? '+'File a bug with this trace!');var pe=new ProtoExpectation(ProtoExpectation.IGNORED_TYPE);pe.pushEvent(event);protoExpectations.push(pe);break;}
+currentPE.pushEvent(event);break;}});return protoExpectations;}
+function handleCSSAnimations(modelHelper,sortedInputEvents){var animationEvents=modelHelper.browserHelper.getAllAsyncSlicesMatching(function(event){return((event.title===CSS_ANIMATION_TITLE)&&(event.duration>0));});var animationRanges=[];animationEvents.forEach(function(event){var rendererHelper=new tr.model.helpers.ChromeRendererHelper(modelHelper,event.parentContainer.parent);animationRanges.push({min:event.start,max:event.end,event:event,frames:rendererHelper.getFrameEventsInRange(tr.model.helpers.IMPL_FRAMETIME_TYPE,tr.b.Range.fromExplicitRange(event.start,event.end))});});function merge(ranges){var protoExpectation=new ProtoExpectation(ProtoExpectation.ANIMATION_TYPE,CSS_IR_NAME);ranges.forEach(function(range){protoExpectation.pushEvent(range.event);protoExpectation.associatedEvents.addEventSet(range.frames);});return protoExpectation;}
+return tr.b.mergeRanges(animationRanges,ANIMATION_MERGE_THRESHOLD_MS,merge);}
+function postProcessProtoExpectations(protoExpectations){protoExpectations=mergeIntersectingResponses(protoExpectations);protoExpectations=mergeIntersectingAnimations(protoExpectations);protoExpectations=fixResponseAnimationStarts(protoExpectations);protoExpectations=fixTapResponseTouchAnimations(protoExpectations);return protoExpectations;}
+function mergeIntersectingResponses(protoExpectations){var newPEs=[];while(protoExpectations.length){var pe=protoExpectations.shift();newPEs.push(pe);if(pe.irType!==ProtoExpectation.RESPONSE_TYPE)
+continue;for(var i=0;i<protoExpectations.length;++i){var otherPE=protoExpectations[i];if(otherPE.irType!==pe.irType)
+continue;if(!otherPE.intersects(pe))
+continue;var typeNames=pe.associatedEvents.map(function(event){return event.typeName;});if(otherPE.containsTypeNames(typeNames))
+continue;pe.merge(otherPE);protoExpectations.splice(i,1);--i;}}
+return newPEs;}
+function mergeIntersectingAnimations(protoExpectations){var newPEs=[];while(protoExpectations.length){var pe=protoExpectations.shift();newPEs.push(pe);if(pe.irType!==ProtoExpectation.ANIMATION_TYPE)
+continue;var isCSS=pe.containsSliceTitle(CSS_ANIMATION_TITLE);var isFling=pe.containsTypeNames([INPUT_TYPE.FLING_START]);for(var i=0;i<protoExpectations.length;++i){var otherPE=protoExpectations[i];if(otherPE.irType!==pe.irType)
+continue;if(isCSS!=otherPE.containsSliceTitle(CSS_ANIMATION_TITLE))
+continue;if(!otherPE.intersects(pe))
+continue;if(isFling!=otherPE.containsTypeNames([INPUT_TYPE.FLING_START]))
+continue;pe.merge(otherPE);protoExpectations.splice(i,1);--i;}}
+return newPEs;}
+function fixResponseAnimationStarts(protoExpectations){protoExpectations.forEach(function(ape){if(ape.irType!==ProtoExpectation.ANIMATION_TYPE)
+return;protoExpectations.forEach(function(rpe){if(rpe.irType!==ProtoExpectation.RESPONSE_TYPE)
+return;if(!ape.containsTimestampInclusive(rpe.end))
+return;if(ape.containsTimestampInclusive(rpe.start))
+return;ape.start=rpe.end;});});return protoExpectations;}
+function fixTapResponseTouchAnimations(protoExpectations){function isTapResponse(pe){return(pe.irType===ProtoExpectation.RESPONSE_TYPE)&&pe.containsTypeNames([INPUT_TYPE.TAP]);}
+function isTouchAnimation(pe){return(pe.irType===ProtoExpectation.ANIMATION_TYPE)&&pe.containsTypeNames([INPUT_TYPE.TOUCH_MOVE])&&!pe.containsTypeNames([INPUT_TYPE.SCROLL_UPDATE,INPUT_TYPE.PINCH_UPDATE]);}
+var newPEs=[];while(protoExpectations.length){var pe=protoExpectations.shift();newPEs.push(pe);var peIsTapResponse=isTapResponse(pe);var peIsTouchAnimation=isTouchAnimation(pe);if(!peIsTapResponse&&!peIsTouchAnimation)
+continue;for(var i=0;i<protoExpectations.length;++i){var otherPE=protoExpectations[i];if(!otherPE.intersects(pe))
+continue;if(peIsTapResponse&&!isTouchAnimation(otherPE))
+continue;if(peIsTouchAnimation&&!isTapResponse(otherPE))
+continue;pe.irType=ProtoExpectation.RESPONSE_TYPE;pe.merge(otherPE);protoExpectations.splice(i,1);--i;}}
+return newPEs;}
+function checkAllInputEventsHandled(sortedInputEvents,protoExpectations){var handledEvents=[];protoExpectations.forEach(function(protoExpectation){protoExpectation.associatedEvents.forEach(function(event){if(handledEvents.indexOf(event)>=0){console.error('double-handled event',event.typeName,parseInt(event.start),parseInt(event.end),protoExpectation);return;}
+handledEvents.push(event);});});sortedInputEvents.forEach(function(event){if(handledEvents.indexOf(event)<0){console.error('UNHANDLED INPUT EVENT!',event.typeName,parseInt(event.start),parseInt(event.end));}});}
+function findInputExpectations(modelHelper){var sortedInputEvents=getSortedInputEvents(modelHelper);var protoExpectations=findProtoExpectations(modelHelper,sortedInputEvents);protoExpectations=postProcessProtoExpectations(protoExpectations);checkAllInputEventsHandled(sortedInputEvents,protoExpectations);var irs=[];protoExpectations.forEach(function(protoExpectation){var ir=protoExpectation.createInteractionRecord(modelHelper.model);if(ir)
+irs.push(ir);});return irs;}
+return{findInputExpectations:findInputExpectations,compareEvents:compareEvents,CSS_ANIMATION_TITLE:CSS_ANIMATION_TITLE};});'use strict';tr.exportTo('tr.model.um',function(){var LOAD_SUBTYPE_NAMES={SUCCESSFUL:'Successful',FAILED:'Failed',STARTUP:'Startup'};var DOES_LOAD_SUBTYPE_NAME_EXIST={};for(var key in LOAD_SUBTYPE_NAMES){DOES_LOAD_SUBTYPE_NAME_EXIST[LOAD_SUBTYPE_NAMES[key]]=true;;}
+function LoadExpectation(parentModel,initiatorTitle,start,duration){if(!DOES_LOAD_SUBTYPE_NAME_EXIST[initiatorTitle])
+throw new Error(initiatorTitle+' is not in LOAD_SUBTYPE_NAMES');tr.model.um.UserExpectation.call(this,parentModel,initiatorTitle,start,duration);this.renderProcess=undefined;this.renderMainThread=undefined;this.routingId=undefined;this.parentRoutingId=undefined;this.loadFinishedEvent=undefined;}
+LoadExpectation.prototype={__proto__:tr.model.um.UserExpectation.prototype,constructor:LoadExpectation};tr.model.um.UserExpectation.register(LoadExpectation,{stageTitle:'Load',colorId:tr.b.ColorScheme.getColorIdForReservedName('rail_load')});return{LOAD_SUBTYPE_NAMES:LOAD_SUBTYPE_NAMES,LoadExpectation:LoadExpectation};});'use strict';tr.exportTo('tr.importer',function(){var NAVIGATION_START='NavigationTiming navigationStart';var FIRST_CONTENTFUL_PAINT_TITLE='firstContentfulPaint';function getAllFrameEvents(modelHelper){var frameEvents=[];frameEvents.push.apply(frameEvents,modelHelper.browserHelper.getFrameEventsInRange(tr.model.helpers.IMPL_FRAMETIME_TYPE,modelHelper.model.bounds));tr.b.iterItems(modelHelper.rendererHelpers,function(pid,renderer){frameEvents.push.apply(frameEvents,renderer.getFrameEventsInRange(tr.model.helpers.IMPL_FRAMETIME_TYPE,modelHelper.model.bounds));});return frameEvents.sort(tr.importer.compareEvents);}
+function getStartupEvents(modelHelper){function isStartupSlice(slice){return slice.title==='BrowserMainLoop::CreateThreads';}
+var events=modelHelper.browserHelper.getAllAsyncSlicesMatching(isStartupSlice);var deduper=new tr.model.EventSet();events.forEach(function(event){var sliceGroup=event.parentContainer.sliceGroup;var slice=sliceGroup&&sliceGroup.findFirstSlice();if(slice)
+deduper.push(slice);});return deduper.toArray();}
+function findLoadExpectationsInternal(modelHelper,subtypeName,openingEvents,closingEvents){var loads=[];openingEvents.forEach(function(openingEvent){closingEvents.forEach(function(closingEvent){if(openingEvent.closingEvent)
+return;if(closingEvent.openingEvent)
+return;if(closingEvent.start<=openingEvent.start)
+return;if(openingEvent.parentContainer.parent.pid!==closingEvent.parentContainer.parent.pid)
+return;openingEvent.closingEvent=closingEvent;closingEvent.openingEvent=openingEvent;var lir=new tr.model.um.LoadExpectation(modelHelper.model,subtypeName,openingEvent.start,closingEvent.end-openingEvent.start);lir.associatedEvents.push(openingEvent);lir.associatedEvents.push(closingEvent);loads.push(lir);});});return loads;}
+function findRenderLoadExpectations(modelHelper){var events=[];modelHelper.model.iterateAllEvents(function(event){if((event.title===NAVIGATION_START)||(event.title===FIRST_CONTENTFUL_PAINT_TITLE))
+events.push(event);});events.sort(tr.importer.compareEvents);var loads=[];var startEvent=undefined;events.forEach(function(event){if(event.title===NAVIGATION_START){startEvent=event;}else if(event.title===FIRST_CONTENTFUL_PAINT_TITLE){if(startEvent){loads.push(new tr.model.um.LoadExpectation(modelHelper.model,tr.model.um.LOAD_SUBTYPE_NAMES.SUCCESSFUL,startEvent.start,event.start-startEvent.start));startEvent=undefined;}}});if(startEvent){loads.push(new tr.model.um.LoadExpectation(modelHelper.model,tr.model.um.LOAD_SUBTYPE_NAMES.SUCCESSFUL,startEvent.start,modelHelper.model.bounds.max-startEvent.start));}
+return loads;}
+function findLoadExpectations(modelHelper){var loads=[];var commitLoadEvents=modelHelper.browserHelper.getCommitProvisionalLoadEventsInRange(modelHelper.model.bounds);var startupEvents=getStartupEvents(modelHelper);var frameEvents=getAllFrameEvents(modelHelper);var startupLoads=findLoadExpectationsInternal(modelHelper,tr.model.um.LOAD_SUBTYPE_NAMES.STARTUP,startupEvents,frameEvents);loads.push.apply(loads,startupLoads);loads.push.apply(loads,findRenderLoadExpectations(modelHelper));return loads;}
+return{findLoadExpectations:findLoadExpectations};});'use strict';tr.exportTo('tr.model',function(){function getAssociatedEvents(irs){var allAssociatedEvents=new tr.model.EventSet();irs.forEach(function(ir){ir.associatedEvents.forEach(function(event){if(event instanceof tr.model.FlowEvent)
+return;allAssociatedEvents.push(event);});});return allAssociatedEvents;}
+function getUnassociatedEvents(model,associatedEvents){var unassociatedEvents=new tr.model.EventSet();model.getAllProcesses().forEach(function(process){for(var tid in process.threads){var thread=process.threads[tid];thread.sliceGroup.iterateAllEvents(function(event){if(!associatedEvents.contains(event))
+unassociatedEvents.push(event);});}});return unassociatedEvents;}
+function getTotalCpuDuration(events){var cpuMs=0;events.forEach(function(event){if(event.cpuSelfTime)
+cpuMs+=event.cpuSelfTime;});return cpuMs;}
+function getIRCoverageFromModel(model){var associatedEvents=getAssociatedEvents(model.userModel.expectations);if(!associatedEvents.length)
+return undefined;var unassociatedEvents=getUnassociatedEvents(model,associatedEvents);var associatedCpuMs=getTotalCpuDuration(associatedEvents);var unassociatedCpuMs=getTotalCpuDuration(unassociatedEvents);var totalEventCount=associatedEvents.length+unassociatedEvents.length;var totalCpuMs=associatedCpuMs+unassociatedCpuMs;var coveredEventsCpuTimeRatio=undefined;if(totalCpuMs!==0)
+coveredEventsCpuTimeRatio=associatedCpuMs/totalCpuMs;return{associatedEventsCount:associatedEvents.length,unassociatedEventsCount:unassociatedEvents.length,associatedEventsCpuTimeMs:associatedCpuMs,unassociatedEventsCpuTimeMs:unassociatedCpuMs,coveredEventsCountRatio:associatedEvents.length/totalEventCount,coveredEventsCpuTimeRatio:coveredEventsCpuTimeRatio};}
+return{getIRCoverageFromModel:getIRCoverageFromModel,getAssociatedEvents:getAssociatedEvents,getUnassociatedEvents:getUnassociatedEvents};});'use strict';tr.exportTo('tr.model.um',function(){function IdleExpectation(parentModel,start,duration){var initiatorTitle='';tr.model.um.UserExpectation.call(this,parentModel,initiatorTitle,start,duration);}
+IdleExpectation.prototype={__proto__:tr.model.um.UserExpectation.prototype,constructor:IdleExpectation};tr.model.um.UserExpectation.register(IdleExpectation,{stageTitle:'Idle',colorId:tr.b.ColorScheme.getColorIdForReservedName('rail_idle')});return{IdleExpectation:IdleExpectation};});'use strict';tr.exportTo('tr.importer',function(){var INSIGNIFICANT_MS=1;function UserModelBuilder(model){this.model=model;this.modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);};UserModelBuilder.supportsModelHelper=function(modelHelper){return modelHelper.browserHelper!==undefined;};UserModelBuilder.prototype={buildUserModel:function(){if(!this.modelHelper||!this.modelHelper.browserHelper)
+return;var expectations=undefined;try{expectations=this.findUserExpectations();}catch(error){this.model.importWarning({type:'UserModelBuilder',message:error,showToUser:true});return;}
+expectations.forEach(function(expectation){this.model.userModel.expectations.push(expectation);},this);},findUserExpectations:function(){var expectations=[];expectations.push.apply(expectations,tr.importer.findLoadExpectations(this.modelHelper));expectations.push.apply(expectations,tr.importer.findInputExpectations(this.modelHelper));expectations.push.apply(expectations,this.findIdleExpectations(expectations));this.collectUnassociatedEvents_(expectations);return expectations;},collectUnassociatedEvents_:function(rirs){var vacuumIRs=[];rirs.forEach(function(ir){if(ir instanceof tr.model.um.LoadExpectation||ir instanceof tr.model.um.IdleExpectation)
+vacuumIRs.push(ir);});if(vacuumIRs.length===0)
+return;var allAssociatedEvents=tr.model.getAssociatedEvents(rirs);var unassociatedEvents=tr.model.getUnassociatedEvents(this.model,allAssociatedEvents);unassociatedEvents.forEach(function(event){if(!(event instanceof tr.model.ThreadSlice))
+return;if(!event.isTopLevel)
+return;for(var iri=0;iri<vacuumIRs.length;++iri){var ir=vacuumIRs[iri];if((event.start>=ir.start)&&(event.start<ir.end)){ir.associatedEvents.addEventSet(event.entireHierarchy);return;}}});},findIdleExpectations:function(otherIRs){if(this.model.bounds.isEmpty)
+return;var emptyRanges=tr.b.findEmptyRangesBetweenRanges(tr.b.convertEventsToRanges(otherIRs),this.model.bounds);var irs=[];var model=this.model;emptyRanges.forEach(function(range){if(range.max<(range.min+INSIGNIFICANT_MS))
+return;irs.push(new tr.model.um.IdleExpectation(model,range.min,range.max-range.min));});return irs;}};function createCustomizeModelLinesFromModel(model){var modelLines=[];modelLines.push('      audits.addEvent(model.browserMain,');modelLines.push('          {title: \'model start\', start: 0, end: 1});');var typeNames={};for(var typeName in tr.e.cc.INPUT_EVENT_TYPE_NAMES){typeNames[tr.e.cc.INPUT_EVENT_TYPE_NAMES[typeName]]=typeName;}
+var modelEvents=new tr.model.EventSet();model.userModel.expectations.forEach(function(ir,index){modelEvents.addEventSet(ir.sourceEvents);});modelEvents=modelEvents.toArray();modelEvents.sort(tr.importer.compareEvents);modelEvents.forEach(function(event){var startAndEnd='start: '+parseInt(event.start)+', '+'end: '+parseInt(event.end)+'});';if(event instanceof tr.e.cc.InputLatencyAsyncSlice){modelLines.push('      audits.addInputEvent(model, INPUT_TYPE.'+
+typeNames[event.typeName]+',');}else if(event.title==='RenderFrameImpl::didCommitProvisionalLoad'){modelLines.push('      audits.addCommitLoadEvent(model,');}else if(event.title==='InputHandlerProxy::HandleGestureFling::started'){modelLines.push('      audits.addFlingAnimationEvent(model,');}else if(event.title===tr.model.helpers.IMPL_RENDERING_STATS){modelLines.push('      audits.addFrameEvent(model,');}else if(event.title===tr.importer.CSS_ANIMATION_TITLE){modelLines.push('      audits.addEvent(model.rendererMain, {');modelLines.push('        title: \'Animation\', '+startAndEnd);return;}else{throw('You must extend createCustomizeModelLinesFromModel()'+'to support this event:\n'+event.title+'\n');}
+modelLines.push('          {'+startAndEnd);});modelLines.push('      audits.addEvent(model.browserMain,');modelLines.push('          {'+'title: \'model end\', '+'start: '+(parseInt(model.bounds.max)-1)+', '+'end: '+parseInt(model.bounds.max)+'});');return modelLines;}
+function createExpectedIRLinesFromModel(model){var expectedLines=[];var irCount=model.userModel.expectations.length;model.userModel.expectations.forEach(function(ir,index){var irString='      {';irString+='title: \''+ir.title+'\', ';irString+='start: '+parseInt(ir.start)+', ';irString+='end: '+parseInt(ir.end)+', ';irString+='eventCount: '+ir.sourceEvents.length;irString+='}';if(index<(irCount-1))
+irString+=',';expectedLines.push(irString);});return expectedLines;}
+function createIRFinderTestCaseStringFromModel(model){var filename=window.location.hash.substr(1);var testName=filename.substr(filename.lastIndexOf('/')+1);testName=testName.substr(0,testName.indexOf('.'));try{var testLines=[];testLines.push('  /*');testLines.push('    This test was generated from');testLines.push('    '+filename+'');testLines.push('   */');testLines.push('  test(\''+testName+'\', function() {');testLines.push('    var verifier = new UserExpectationVerifier();');testLines.push('    verifier.customizeModelCallback = function(model) {');testLines.push.apply(testLines,createCustomizeModelLinesFromModel(model));testLines.push('    };');testLines.push('    verifier.expectedIRs = [');testLines.push.apply(testLines,createExpectedIRLinesFromModel(model));testLines.push('    ];');testLines.push('    verifier.verify();');testLines.push('  });');return testLines.join('\n');}catch(error){return error;}}
+return{UserModelBuilder:UserModelBuilder,createIRFinderTestCaseStringFromModel:createIRFinderTestCaseStringFromModel};});'use strict';tr.exportTo('tr.importer',function(){var Timing=tr.b.Timing;function ImportOptions(){this.shiftWorldToZero=true;this.pruneEmptyContainers=true;this.showImportWarnings=true;this.trackDetailedModelStats=false;this.customizeModelCallback=undefined;var auditorTypes=tr.c.Auditor.getAllRegisteredTypeInfos();this.auditorConstructors=auditorTypes.map(function(typeInfo){return typeInfo.constructor;});}
+function Import(model,opt_options){if(model===undefined)
+throw new Error('Must provide model to import into.');this.importing_=false;this.importOptions_=opt_options||new ImportOptions();this.model_=model;this.model_.importOptions=this.importOptions_;}
+Import.prototype={__proto__:Object.prototype,importTraces:function(traces){var progressMeter={update:function(msg){}};tr.b.Task.RunSynchronously(this.createImportTracesTask(progressMeter,traces));},importTracesWithProgressDialog:function(traces){if(tr.isHeadless)
+throw new Error('Cannot use this method in headless mode.');var overlay=tr.ui.b.Overlay();overlay.title='Importing...';overlay.userCanClose=false;overlay.msgEl=document.createElement('div');overlay.appendChild(overlay.msgEl);overlay.msgEl.style.margin='20px';overlay.update=function(msg){this.msgEl.textContent=msg;};overlay.visible=true;var promise=tr.b.Task.RunWhenIdle(this.createImportTracesTask(overlay,traces));promise.then(function(){overlay.visible=false;},function(err){overlay.visible=false;});return promise;},createImportTracesTask:function(progressMeter,traces){if(this.importing_)
+throw new Error('Already importing.');this.importing_=true;var importTask=new tr.b.Task(function prepareImport(){progressMeter.update('I will now import your traces for you...');},this);var lastTask=importTask;var importers=[];lastTask=lastTask.timedAfter('TraceImport',function createImports(){traces=traces.slice(0);progressMeter.update('Creating importers...');for(var i=0;i<traces.length;++i)
+importers.push(this.createImporter_(traces[i]));for(var i=0;i<importers.length;i++){var subtraces=importers[i].extractSubtraces();for(var j=0;j<subtraces.length;j++){try{traces.push(subtraces[j]);importers.push(this.createImporter_(subtraces[j]));}catch(error){console.warn(error.name+': '+error.message);continue;}}}
+if(traces.length&&!this.hasEventDataDecoder_(importers)){throw new Error('Could not find an importer for the provided eventData.');}
+importers.sort(function(x,y){return x.importPriority-y.importPriority;});},this);lastTask=lastTask.timedAfter('TraceImport',function importClockSyncMarkers(task){importers.forEach(function(importer,index){task.subTask(Timing.wrapNamedFunction('TraceImport',importer.importerName,function runImportClockSyncMarkersOnOneImporter(){progressMeter.update('Importing clock sync markers '+(index+1)+' of '+
+importers.length);importer.importClockSyncMarkers();}),this);},this);},this);lastTask=lastTask.timedAfter('TraceImport',function runImport(task){importers.forEach(function(importer,index){task.subTask(Timing.wrapNamedFunction('TraceImport',importer.importerName,function runImportEventsOnOneImporter(){progressMeter.update('Importing '+(index+1)+' of '+importers.length);importer.importEvents();}),this);},this);},this);if(this.importOptions_.customizeModelCallback){lastTask=lastTask.timedAfter('TraceImport',function runCustomizeCallbacks(task){this.importOptions_.customizeModelCallback(this.model_);},this);}
+lastTask=lastTask.timedAfter('TraceImport',function importSampleData(task){importers.forEach(function(importer,index){progressMeter.update('Importing sample data '+(index+1)+'/'+importers.length);importer.importSampleData();},this);},this);lastTask=lastTask.timedAfter('TraceImport',function runAutoclosers(){progressMeter.update('Autoclosing open slices...');this.model_.autoCloseOpenSlices();this.model_.createSubSlices();},this);lastTask=lastTask.timedAfter('TraceImport',function finalizeImport(task){importers.forEach(function(importer,index){progressMeter.update('Finalizing import '+(index+1)+'/'+importers.length);importer.finalizeImport();},this);},this);lastTask=lastTask.timedAfter('TraceImport',function runPreinits(){progressMeter.update('Initializing objects (step 1/2)...');this.model_.preInitializeObjects();},this);if(this.importOptions_.pruneEmptyContainers){lastTask=lastTask.timedAfter('TraceImport',function runPruneEmptyContainers(){progressMeter.update('Pruning empty containers...');this.model_.pruneEmptyContainers();},this);}
+lastTask=lastTask.timedAfter('TraceImport',function runMergeKernelWithuserland(){progressMeter.update('Merging kernel with userland...');this.model_.mergeKernelWithUserland();},this);var auditors=[];lastTask=lastTask.timedAfter('TraceImport',function createAuditorsAndRunAnnotate(){progressMeter.update('Adding arbitrary data to model...');auditors=this.importOptions_.auditorConstructors.map(function(auditorConstructor){return new auditorConstructor(this.model_);},this);auditors.forEach(function(auditor){auditor.runAnnotate();auditor.installUserFriendlyCategoryDriverIfNeeded();});},this);lastTask=lastTask.timedAfter('TraceImport',function computeWorldBounds(){progressMeter.update('Computing final world bounds...');this.model_.computeWorldBounds(this.importOptions_.shiftWorldToZero);},this);lastTask=lastTask.timedAfter('TraceImport',function buildFlowEventIntervalTree(){progressMeter.update('Building flow event map...');this.model_.buildFlowEventIntervalTree();},this);lastTask=lastTask.timedAfter('TraceImport',function joinRefs(){progressMeter.update('Joining object refs...');this.model_.joinRefs();},this);lastTask=lastTask.timedAfter('TraceImport',function cleanupUndeletedObjects(){progressMeter.update('Cleaning up undeleted objects...');this.model_.cleanupUndeletedObjects();},this);lastTask=lastTask.timedAfter('TraceImport',function sortMemoryDumps(){progressMeter.update('Sorting memory dumps...');this.model_.sortMemoryDumps();},this);lastTask=lastTask.timedAfter('TraceImport',function finalizeMemoryGraphs(){progressMeter.update('Finalizing memory dump graphs...');this.model_.finalizeMemoryGraphs();},this);lastTask=lastTask.timedAfter('TraceImport',function initializeObjects(){progressMeter.update('Initializing objects (step 2/2)...');this.model_.initializeObjects();},this);lastTask=lastTask.timedAfter('TraceImport',function buildEventIndices(){progressMeter.update('Building event indices...');this.model_.buildEventIndices();},this);lastTask=lastTask.timedAfter('TraceImport',function buildUserModel(){progressMeter.update('Building UserModel...');var userModelBuilder=new tr.importer.UserModelBuilder(this.model_);userModelBuilder.buildUserModel();},this);lastTask=lastTask.timedAfter('TraceImport',function sortExpectations(){progressMeter.update('Sorting user expectations...');this.model_.userModel.sortExpectations();},this);lastTask=lastTask.timedAfter('TraceImport',function runAudits(){progressMeter.update('Running auditors...');auditors.forEach(function(auditor){auditor.runAudit();});},this);lastTask=lastTask.timedAfter('TraceImport',function sortAlerts(){progressMeter.update('Updating alerts...');this.model_.sortAlerts();},this);lastTask=lastTask.timedAfter('TraceImport',function lastUpdateBounds(){progressMeter.update('Update bounds...');this.model_.updateBounds();},this);lastTask=lastTask.timedAfter('TraceImport',function addModelWarnings(){progressMeter.update('Looking for warnings...');if(!this.model_.isTimeHighResolution){this.model_.importWarning({type:'low_resolution_timer',message:'Trace time is low resolution, trace may be unusable.',showToUser:true});}},this);lastTask.after(function(){this.importing_=false;},this);return importTask;},createImporter_:function(eventData){var importerConstructor=tr.importer.Importer.findImporterFor(eventData);if(!importerConstructor){throw new Error('Couldn\'t create an importer for the provided '+'eventData.');}
+return new importerConstructor(this.model_,eventData);},hasEventDataDecoder_:function(importers){for(var i=0;i<importers.length;++i){if(!importers[i].isTraceDataContainer())
+return true;}
+return false;}};return{ImportOptions:ImportOptions,Import:Import};});'use strict';tr.exportTo('tr.ui.tracks',function(){function Highlighter(viewport){if(viewport===undefined){throw new Error('viewport must be provided');}
+this.viewport_=viewport;};Highlighter.prototype={__proto__:Object.prototype,processModel:function(model){throw new Error('processModel implementation missing');},drawHighlight:function(ctx,dt,viewLWorld,viewRWorld,viewHeight){throw new Error('drawHighlight implementation missing');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Highlighter;tr.b.decorateExtensionRegistry(Highlighter,options);return{Highlighter:Highlighter};});'use strict';tr.exportTo('tr.b',function(){function Settings(){return Settings;};if(tr.b.unittest&&tr.b.unittest.TestRunner){tr.b.unittest.TestRunner.addEventListener('tr-unittest-will-run',function(){if(tr.isHeadless)
 Settings.setAlternativeStorageInstance(new HeadlessStorage());else
 Settings.setAlternativeStorageInstance(global.sessionStorage);});}
 function SessionSettings(){return SessionSettings;}
@@ -3834,65 +4167,160 @@
 return null;return this.items_[key];},removeItem:function(key){if(!this.hasItem_[key])
 return;var value=this.items_[key];delete this.hasItem_[key];delete this.items_[key];this.length--;this.itemsAsArray_=undefined;return value;},setItem:function(key,value){if(this.hasItem_[key]){this.items_[key]=value;return;}
 this.items_[key]=value;this.hasItem_[key]=true;this.length++;this.itemsAsArray_=undefined;return value;}};if(tr.isHeadless){AddStaticStorageFunctionsToClass_(Settings,new HeadlessStorage());AddStaticStorageFunctionsToClass_(SessionSettings,new HeadlessStorage());}else{AddStaticStorageFunctionsToClass_(Settings,localStorage);AddStaticStorageFunctionsToClass_(SessionSettings,sessionStorage);}
-return{Settings:Settings,SessionSettings:SessionSettings};});'use strict';tr.exportTo('tr.c',function(){function ScriptingObject(){}
-ScriptingObject.prototype={onModelChanged:function(model){}};return{ScriptingObject:ScriptingObject};});'use strict';tr.exportTo('tr.c',function(){function ScriptingController(brushingStateController){this.brushingStateController_=brushingStateController;this.scriptObjectNames_=[];this.scriptObjectValues_=[];this.brushingStateController.addEventListener('model-changed',this.onModelChanged_.bind(this));var typeInfos=ScriptingObjectRegistry.getAllRegisteredTypeInfos();typeInfos.forEach(function(typeInfo){this.addScriptObject(typeInfo.metadata.name,typeInfo.constructor);global[typeInfo.metadata.name]=typeInfo.constructor;},this);}
-function ScriptingObjectRegistry(){}
-var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(ScriptingObjectRegistry,options);ScriptingController.prototype={get brushingStateController(){return this.brushingStateController_;},onModelChanged_:function(){this.scriptObjectValues_.forEach(function(v){if(v.onModelChanged)
-v.onModelChanged(this.brushingStateController.model);},this);},addScriptObject:function(name,value){this.scriptObjectNames_.push(name);this.scriptObjectValues_.push(value);},executeCommand:function(command){var f=new Function(this.scriptObjectNames_,'return eval('+command+')');return f.apply(null,this.scriptObjectValues_);}};return{ScriptingController:ScriptingController,ScriptingObjectRegistry:ScriptingObjectRegistry};});'use strict';Polymer('tr-ui-a-tab-view',{ready:function(){this.$.tshh.style.display='none';this.tabs_=[];this.selectedTab_=undefined;for(var i=0;i<this.children.length;i++)
-this.processAddedChild_(this.children[i]);this.childrenObserver_=new MutationObserver(this.childrenUpdated_.bind(this));this.childrenObserver_.observe(this,{childList:'true'});},get tabStripHeadingText(){return this.$.tsh.textContent;},set tabStripHeadingText(tabStripHeadingText){this.$.tsh.textContent=tabStripHeadingText;if(!!tabStripHeadingText)
-this.$.tshh.style.display='';else
-this.$.tshh.style.display='none';},get selectedTab(){this.childrenUpdated_(this.childrenObserver_.takeRecords(),this.childrenObserver_);if(this.selectedTab_)
-return this.selectedTab_.content;return undefined;},set selectedTab(content){this.childrenUpdated_(this.childrenObserver_.takeRecords(),this.childrenObserver_);if(content===undefined||content===null){this.changeSelectedTabById_(undefined);return;}
-var contentTabId=undefined;for(var i=0;i<this.tabs_.length;i++)
-if(this.tabs_[i].content===content){contentTabId=this.tabs_[i].id;break;}
-if(contentTabId===undefined)
-return;this.changeSelectedTabById_(contentTabId);},get tabsHidden(){var ts=this.shadowRoot.querySelector('tab-strip');return ts.hasAttribute('tabs-hidden');},set tabsHidden(tabsHidden){tabsHidden=!!tabsHidden;var ts=this.shadowRoot.querySelector('tab-strip');if(tabsHidden)
-ts.setAttribute('tabs-hidden',true);else
-ts.removeAttribute('tabs-hidden');},get tabs(){return this.tabs_.map(function(tabObject){return tabObject.content;});},processAddedChild_:function(child){var observerAttributeSelected=new MutationObserver(this.childAttributesChanged_.bind(this));var observerAttributeTabLabel=new MutationObserver(this.childAttributesChanged_.bind(this));var tabObject={id:this.tabs_.length,content:child,label:child.getAttribute('tab-label'),observers:{forAttributeSelected:observerAttributeSelected,forAttributeTabLabel:observerAttributeTabLabel}};this.tabs_.push(tabObject);if(child.hasAttribute('selected')){if(this.selectedTab_)
-child.removeAttribute('selected');else
-this.setSelectedTabById_(tabObject.id);}
-var previousSelected=child.selected;var tabView=this;Object.defineProperty(child,'selected',{configurable:true,set:function(value){if(value){tabView.changeSelectedTabById_(tabObject.id);return;}
-var wasSelected=tabView.selectedTab_===tabObject;if(wasSelected)
-tabView.changeSelectedTabById_(undefined);},get:function(){return this.hasAttribute('selected');}});if(previousSelected)
-child.selected=previousSelected;observerAttributeSelected.observe(child,{attributeFilter:['selected']});observerAttributeTabLabel.observe(child,{attributeFilter:['tab-label']});},processRemovedChild_:function(child){for(var i=0;i<this.tabs_.length;i++){this.tabs_[i].id=i;if(this.tabs_[i].content===child){this.tabs_[i].observers.forAttributeSelected.disconnect();this.tabs_[i].observers.forAttributeTabLabel.disconnect();if(this.tabs_[i]===this.selectedTab_){this.clearSelectedTab_();this.fire('selected-tab-change');}
-child.removeAttribute('selected');delete child.selected;this.tabs_.splice(i,1);i--;}}},childAttributesChanged_:function(mutations,observer){var tabObject=undefined;for(var i=0;i<this.tabs_.length;i++){var observers=this.tabs_[i].observers;if(observers.forAttributeSelected===observer||observers.forAttributeTabLabel===observer){tabObject=this.tabs_[i];break;}}
-if(!tabObject)
-return;for(var i=0;i<mutations.length;i++){var node=tabObject.content;if(mutations[i].attributeName==='tab-label')
-tabObject.label=node.getAttribute('tab-label');if(mutations[i].attributeName==='selected'){var nodeIsSelected=node.hasAttribute('selected');if(nodeIsSelected)
-this.changeSelectedTabById_(tabObject.id);else
-this.changeSelectedTabById_(undefined);}}},childrenUpdated_:function(mutations,observer){mutations.forEach(function(mutation){for(var i=0;i<mutation.removedNodes.length;i++)
-this.processRemovedChild_(mutation.removedNodes[i]);for(var i=0;i<mutation.addedNodes.length;i++)
-this.processAddedChild_(mutation.addedNodes[i]);},this);},tabButtonSelectHandler_:function(event,detail,sender){this.changeSelectedTabById_(sender.getAttribute('button-id'));},changeSelectedTabById_:function(id){var newTab=id!==undefined?this.tabs_[id]:undefined;var changed=this.selectedTab_!==newTab;this.saveCurrentTabScrollPosition_();this.clearSelectedTab_();if(id!==undefined){this.setSelectedTabById_(id);this.restoreCurrentTabScrollPosition_();}
-if(changed)
-this.fire('selected-tab-change');},setSelectedTabById_:function(id){this.selectedTab_=this.tabs_[id];this.selectedTab_.observers.forAttributeSelected.disconnect();this.selectedTab_.content.setAttribute('selected','selected');this.selectedTab_.observers.forAttributeSelected.observe(this.selectedTab_.content,{attributeFilter:['selected']});},saveTabStates:function(){this.saveCurrentTabScrollPosition_();},saveCurrentTabScrollPosition_:function(){if(this.selectedTab_){this.selectedTab_.content._savedScrollTop=this.$['content-container'].scrollTop;this.selectedTab_.content._savedScrollLeft=this.$['content-container'].scrollLeft;}},restoreCurrentTabScrollPosition_:function(){if(this.selectedTab_){this.$['content-container'].scrollTop=this.selectedTab_.content._savedScrollTop||0;this.$['content-container'].scrollLeft=this.selectedTab_.content._savedScrollLeft||0;}},clearSelectedTab_:function(){if(this.selectedTab_){this.selectedTab_.observers.forAttributeSelected.disconnect();this.selectedTab_.content.removeAttribute('selected');this.selectedTab_.observers.forAttributeSelected.observe(this.selectedTab_.content,{attributeFilter:['selected']});this.selectedTab_=undefined;}}});'use strict';Polymer('tr-ui-a-sub-view',{set tabLabel(label){return this.setAttribute('tab-label',label);},get tabLabel(){return this.getAttribute('tab-label');},get requiresTallView(){return false;},get relatedEventsToHighlight(){return undefined;},set selection(selection){throw new Error('Not implemented!');},get selection(){throw new Error('Not implemented!');}});'use strict';tr.exportTo('tr.ui.b',function(){var EventSet=tr.model.EventSet;var SelectionState=tr.model.SelectionState;function BrushingState(){this.guid_=tr.b.GUID.allocate();this.selection_=new EventSet();this.findMatches_=new EventSet();this.analysisViewRelatedEvents_=new EventSet();this.analysisLinkHoveredEvents_=new EventSet();this.appliedToModel_=undefined;this.viewSpecificBrushingStates_={};}
-BrushingState.prototype={get guid(){return this.guid_;},clone:function(){var that=new BrushingState();that.selection_=this.selection_;that.findMatches_=this.findMatches_;that.analysisViewRelatedEvents_=this.analysisViewRelatedEvents_;that.analysisLinkHoveredEvents_=this.analysisLinkHoveredEvents_;that.viewSpecificBrushingStates_=this.viewSpecificBrushingStates_;return that;},equals:function(that){if(!this.selection_.equals(that.selection_))
-return false;if(!this.findMatches_.equals(that.findMatches_))
-return false;if(!this.analysisViewRelatedEvents_.equals(that.analysisViewRelatedEvents_)){return false;}
-if(!this.analysisLinkHoveredEvents_.equals(that.analysisLinkHoveredEvents_)){return false;}
-return true;},get selectionOfInterest(){if(this.selection_.length)
-return this.selection_;if(this.highlight_.length)
-return this.highlight_;if(this.analysisViewRelatedEvents_.length)
-return this.analysisViewRelatedEvents_;if(this.analysisLinkHoveredEvents_.length)
-return this.analysisLinkHoveredEvents_;return this.selection_;},get selection(){return this.selection_;},set selection(selection){if(this.appliedToModel_)
-throw new Error('Cannot mutate this state right now');if(selection===undefined)
-selection=new EventSet();this.selection_=selection;},get findMatches(){return this.findMatches_;},set findMatches(findMatches){if(this.appliedToModel_)
-throw new Error('Cannot mutate this state right now');if(findMatches===undefined)
-findMatches=new EventSet();this.findMatches_=findMatches;},get analysisViewRelatedEvents(){return this.analysisViewRelatedEvents_;},set analysisViewRelatedEvents(analysisViewRelatedEvents){if(this.appliedToModel_)
-throw new Error('Cannot mutate this state right now');if(analysisViewRelatedEvents===undefined)
-analysisViewRelatedEvents=new EventSet();this.analysisViewRelatedEvents_=analysisViewRelatedEvents;},get analysisLinkHoveredEvents(){return this.analysisLinkHoveredEvents_;},set analysisLinkHoveredEvents(analysisLinkHoveredEvents){if(this.appliedToModel_)
-throw new Error('Cannot mutate this state right now');if(analysisLinkHoveredEvents===undefined)
-analysisLinkHoveredEvents=new EventSet();this.analysisLinkHoveredEvents_=analysisLinkHoveredEvents;},get isAppliedToModel(){return this.appliedToModel_!==undefined;},get viewSpecificBrushingStates(){return this.viewSpecificBrushingStates_;},set viewSpecificBrushingStates(viewSpecificBrushingStates){this.viewSpecificBrushingStates_=viewSpecificBrushingStates;},get causesDimming_(){return this.findMatches_.length>0||this.analysisViewRelatedEvents_.length>0;},get brightenedEvents_(){var brightenedEvents=new EventSet();brightenedEvents.addEventSet(this.selection_);brightenedEvents.addEventSet(this.analysisLinkHoveredEvents_);return brightenedEvents;},applyToModelSelectionState:function(model){this.appliedToModel_=model;if(!this.causesDimming_){this.brightenedEvents_.forEach(function(e){var score;score=0;if(this.selection_.contains(e))
-score++;if(this.analysisLinkHoveredEvents_.contains(e))
-score++;e.selectionState=SelectionState.getFromBrighteningLevel(score);},this);return;}
-var brightenedEvents=this.brightenedEvents_;model.iterateAllEvents(function(e){var score;if(brightenedEvents.contains(e)){score=0;if(this.selection_.contains(e))
-score++;if(this.analysisLinkHoveredEvents_.contains(e))
-score++;e.selectionState=SelectionState.getFromBrighteningLevel(score);}else{score=0;if(this.findMatches_.contains(e))
-score++;if(this.analysisViewRelatedEvents_.contains(e))
-score++;e.selectionState=SelectionState.getFromDimmingLevel(score);}}.bind(this));},transferModelOwnershipToClone:function(that){if(!this.appliedToModel_)
-throw new Error('Not applied');that.appliedToModel_=this.appliedToModel_;this.appliedToModel_=undefined;},unapplyFromModelSelectionState:function(){if(!this.appliedToModel_)
-throw new Error('Not applied');var model=this.appliedToModel_;this.appliedToModel_=undefined;if(!this.causesDimming_){this.brightenedEvents_.forEach(function(e){e.selectionState=SelectionState.NONE;});return;}
-model.iterateAllEvents(function(e){e.selectionState=SelectionState.NONE;});}};return{BrushingState:BrushingState};});'use strict';tr.exportTo('tr.ui.b',function(){function Animation(){}
-Animation.prototype={canTakeOverFor:function(existingAnimation){throw new Error('Not implemented');},takeOverFor:function(existingAnimation,newStartTimestamp,target){throw new Error('Not implemented');},start:function(timestamp,target){throw new Error('Not implemented');},didStopEarly:function(timestamp,target,willBeTakenOverByAnotherAnimation){},tick:function(timestamp,target){throw new Error('Not implemented');}};return{Animation:Animation};});'use strict';tr.exportTo('tr.ui.b',function(){function AnimationController(){tr.b.EventTarget.call(this);this.target_=undefined;this.activeAnimation_=undefined;this.tickScheduled_=false;}
+return{Settings:Settings,SessionSettings:SessionSettings};});'use strict';tr.exportTo('tr.model',function(){function Annotation(){this.guid_=tr.b.GUID.allocate();this.view_=undefined;};Annotation.fromDictIfPossible=function(args){if(args.typeName===undefined)
+throw new Error('Missing typeName argument');var typeInfo=Annotation.findTypeInfoMatching(function(typeInfo){return typeInfo.metadata.typeName===args.typeName;});if(typeInfo===undefined)
+return undefined;return typeInfo.constructor.fromDict(args);};Annotation.fromDict=function(){throw new Error('Not implemented');}
+Annotation.prototype={get guid(){return this.guid_;},onRemove:function(){},toDict:function(){throw new Error('Not implemented');},getOrCreateView:function(viewport){if(!this.view_)
+this.view_=this.createView_(viewport);return this.view_;},createView_:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(Annotation,options);Annotation.addEventListener('will-register',function(e){if(!e.typeInfo.constructor.hasOwnProperty('fromDict'))
+throw new Error('Must have fromDict method');if(!e.typeInfo.metadata.typeName)
+throw new Error('Registered Annotations must provide typeName');});return{Annotation:Annotation};});'use strict';tr.exportTo('tr.ui.annotations',function(){function AnnotationView(viewport,annotation){}
+AnnotationView.prototype={draw:function(ctx){throw new Error('Not implemented');}};return{AnnotationView:AnnotationView};});'use strict';tr.exportTo('tr.ui.annotations',function(){function XMarkerAnnotationView(viewport,annotation){this.viewport_=viewport;this.annotation_=annotation;}
+XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw:function(ctx){var dt=this.viewport_.currentDisplayTransform;var viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView:XMarkerAnnotationView};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';}
+XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);}
+XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict:function(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_:function(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation:XMarkerAnnotation};});'use strict';tr.exportTo('tr.ui.b',function(){function HotKey(dict){if(dict.eventType===undefined)
+throw new Error('eventType must be given');if(dict.keyCode===undefined&&dict.keyCodes===undefined)
+throw new Error('keyCode or keyCodes must be given');if(dict.keyCode!==undefined&&dict.keyCodes!==undefined)
+throw new Error('Only keyCode or keyCodes can be given');if(dict.callback===undefined)
+throw new Error('callback must be given');this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode)
+this.pushKeyCode_(dict.keyCode);else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);}
+this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;}
+HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call:function(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_:function(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey:HotKey};});'use strict';Polymer('tv-ui-b-hotkey-controller',{created:function(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached:function(){this.isAttached_=true;var host=this.findHost_();if(host.__hotkeyController)
+throw new Error('Multiple hotkey controllers attached to this host');host.__hotkeyController=this;this.curHost_=host;var parentElement;if(host.parentElement)
+parentElement=host.parentElement;else
+parentElement=host.parentNode.host;var parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;}
+host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached:function(){this.isAttached_=false;var host=this.curHost_;if(!host)
+return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;}
+host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_:function(controller){var i=this.childControllers_.indexOf(controller);if(i!==-1)
+throw new Error('Controller already registered');this.childControllers_.push(controller);},removeChildController_:function(controller){var i=this.childControllers_.indexOf(controller);if(i===-1)
+throw new Error('Controller not registered');this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_:function(eventType,useCapture){if(eventType==='keydown'){if(!useCapture)
+return this.bubblingKeyDownHotKeys_;else
+return this.capturingKeyDownHotKeys_;}else if(eventType==='keypress'){if(!useCapture)
+return this.bubblingKeyPressHotKeys_;else
+return this.capturingKeyPressHotKeys_;}else{throw new Error('Unsupported key event');}},addHotKey:function(hotKey){if(!(hotKey instanceof tr.ui.b.HotKey))
+throw new Error('hotKey must be a tr.ui.b.HotKey');var keyMap=this.getKeyMapForEventType_(hotKey.eventType,hotKey.useCapture);for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];if(keyMap[keyCode])
+throw new Error('Key is already bound for keyCode='+keyCode);}
+for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];keyMap[keyCode]=hotKey;}
+return hotKey;},removeHotKey:function(hotKey){if(!(hotKey instanceof tr.ui.b.HotKey))
+throw new Error('hotKey must be a tr.ui.b.HotKey');var keyMap=this.getKeyMapForEventType_(hotKey.eventType,hotKey.useCapture);for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];if(!keyMap[keyCode])
+throw new Error('Key is not bound for keyCode='+keyCode);keyMap[keyCode]=hotKey;}
+for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];}
+return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){var wasAttached=this.isAttached_;if(wasAttached)
+this.detached();this.globalMode_=!!globalMode;if(wasAttached)
+this.attached();},get topmostConroller_(){if(this.slavedToParentController_)
+return this.slavedToParentController_.topmostConroller_;return this;},childRequestsGeneralFocus:function(child){var topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement)
+document.activeElement.blur();}}else{if(document.activeElement)
+document.activeElement.blur();}},childRequestsBlur:function(child){child.blur();var topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_:function(){if(this.globalMode_){return document.body;}else{if(this.parentElement)
+return this.parentElement;var node=this;while(node.parentNode){node=node.parentNode;}
+return node.host;}},appendMatchingHotKeysTo_:function(matchedHotKeys,useCapture,e){var localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);var localHotKey=localKeyMap[e.keyCode];if(localHotKey)
+matchedHotKeys.push(localHotKey);for(var i=0;i<this.childControllers_.length;i++){var controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_:function(useCapture,e){if(useCapture==false&&e.path[0].tagName=='INPUT')
+return;var sortedControllers;var matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)
+return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');}
+var hotKey=matchedHotKeys[0];var prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){var curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller')
+return curElement;if(curElement.__hotkeyController)
+return curElement.__hotkeyController;if(curElement.parentElement){curElement=curElement.parentElement;continue;}
+curElement=findHost(curElement);}
+return undefined;}
+function findHost(initialNode){var node=initialNode;while(node.parentNode){node=node.parentNode;}
+return node.host;}
+return{getHotkeyControllerForElement:getHotkeyControllerForElement};});'use strict';tr.exportTo('tr.ui.b',function(){function MouseTracker(opt_targetElement){this.onMouseDown_=this.onMouseDown_.bind(this);this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.targetElement=opt_targetElement;}
+MouseTracker.prototype={get targetElement(){return this.targetElement_;},set targetElement(targetElement){if(this.targetElement_)
+this.targetElement_.removeEventListener('mousedown',this.onMouseDown_);this.targetElement_=targetElement;if(this.targetElement_)
+this.targetElement_.addEventListener('mousedown',this.onMouseDown_);},onMouseDown_:function(e){if(e.button!==0)
+return true;e=this.remakeEvent_(e,'mouse-tracker-start');this.targetElement_.dispatchEvent(e);document.addEventListener('mousemove',this.onMouseMove_);document.addEventListener('mouseup',this.onMouseUp_);this.targetElement_.addEventListener('blur',this.onMouseUp_);this.savePreviousUserSelect_=document.body.style['-webkit-user-select'];document.body.style['-webkit-user-select']='none';e.preventDefault();return true;},onMouseMove_:function(e){e=this.remakeEvent_(e,'mouse-tracker-move');this.targetElement_.dispatchEvent(e);},onMouseUp_:function(e){document.removeEventListener('mousemove',this.onMouseMove_);document.removeEventListener('mouseup',this.onMouseUp_);this.targetElement_.removeEventListener('blur',this.onMouseUp_);document.body.style['-webkit-user-select']=this.savePreviousUserSelect_;e=this.remakeEvent_(e,'mouse-tracker-end');this.targetElement_.dispatchEvent(e);},remakeEvent_:function(e,newType){var remade=new tr.b.Event(newType,true,true);remade.x=e.x;remade.y=e.y;remade.offsetX=e.offsetX;remade.offsetY=e.offsetY;remade.clientX=e.clientX;remade.clientY=e.clientY;return remade;}};function trackMouseMovesUntilMouseUp(mouseMoveHandler,opt_mouseUpHandler,opt_keyUpHandler){function cleanupAndDispatchToMouseUp(e){document.removeEventListener('mousemove',mouseMoveHandler);if(opt_keyUpHandler)
+document.removeEventListener('keyup',opt_keyUpHandler);document.removeEventListener('mouseup',cleanupAndDispatchToMouseUp);if(opt_mouseUpHandler)
+opt_mouseUpHandler(e);}
+document.addEventListener('mousemove',mouseMoveHandler);if(opt_keyUpHandler)
+document.addEventListener('keyup',opt_keyUpHandler);document.addEventListener('mouseup',cleanupAndDispatchToMouseUp);}
+return{MouseTracker:MouseTracker,trackMouseMovesUntilMouseUp:trackMouseMovesUntilMouseUp};});'use strict';tr.exportTo('tr.ui.b',function(){var MOUSE_SELECTOR_MODE={};MOUSE_SELECTOR_MODE.SELECTION=0x1;MOUSE_SELECTOR_MODE.PANSCAN=0x2;MOUSE_SELECTOR_MODE.ZOOM=0x4;MOUSE_SELECTOR_MODE.TIMING=0x8;MOUSE_SELECTOR_MODE.ROTATE=0x10;MOUSE_SELECTOR_MODE.ALL_MODES=0x1F;var MOUSE_SELECTOR_MODE_INFOS={};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.PANSCAN]={mode:MOUSE_SELECTOR_MODE.PANSCAN,title:'pan',eventNames:{enter:'enterpan',begin:'beginpan',update:'updatepan',end:'endpan',exit:'exitpan'},activeBackgroundPosition:'-30px -10px',defaultBackgroundPosition:'0 -10px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.SELECTION]={mode:MOUSE_SELECTOR_MODE.SELECTION,title:'selection',eventNames:{enter:'enterselection',begin:'beginselection',update:'updateselection',end:'endselection',exit:'exitselection'},activeBackgroundPosition:'-30px -40px',defaultBackgroundPosition:'0 -40px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.ZOOM]={mode:MOUSE_SELECTOR_MODE.ZOOM,title:'zoom',eventNames:{enter:'enterzoom',begin:'beginzoom',update:'updatezoom',end:'endzoom',exit:'exitzoom'},activeBackgroundPosition:'-30px -70px',defaultBackgroundPosition:'0 -70px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.TIMING]={mode:MOUSE_SELECTOR_MODE.TIMING,title:'timing',eventNames:{enter:'entertiming',begin:'begintiming',update:'updatetiming',end:'endtiming',exit:'exittiming'},activeBackgroundPosition:'-30px -100px',defaultBackgroundPosition:'0 -100px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.ROTATE]={mode:MOUSE_SELECTOR_MODE.ROTATE,title:'rotate',eventNames:{enter:'enterrotate',begin:'beginrotate',update:'updaterotate',end:'endrotate',exit:'exitrotate'},activeBackgroundPosition:'-30px -130px',defaultBackgroundPosition:'0 -130px'};return{MOUSE_SELECTOR_MODE_INFOS:MOUSE_SELECTOR_MODE_INFOS,MOUSE_SELECTOR_MODE:MOUSE_SELECTOR_MODE};});'use strict';Polymer('tr-ui-b-mouse-mode-icon',{publish:{modeName:{value:undefined,reflect:true}},created:function(){this.active_=false;this.acceleratorKey_=undefined;},ready:function(){this.updateContents_();},get mode(){return tr.ui.b.MOUSE_SELECTOR_MODE[this.modeName];},set mode(mode){var modeInfo=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS[mode];var modeName=tr.b.findFirstKeyInDictMatching(tr.ui.b.MOUSE_SELECTOR_MODE,function(modeName,candidateMode){return candidateMode===mode;});if(modeName===undefined)
+throw new Error('Unknown mode');this.modeName=modeName;},modeNameChanged:function(){this.updateContents_();},get active(){return this.active_;},set active(active){this.active_=!!active;if(this.active_)
+this.classList.add('active');else
+this.classList.remove('active');this.updateContents_();},get acceleratorKey(){return this.acceleratorKey_;},set acceleratorKey(acceleratorKey){this.acceleratorKey_=acceleratorKey;this.updateContents_();},updateContents_:function(){if(this.modeName===undefined)
+return;var mode=this.mode;if(mode===undefined)
+throw new Error('Invalid mode');var modeInfo=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS[mode];if(!modeInfo)
+throw new Error('Invalid mode');var title=modeInfo.title;if(this.acceleratorKey_)
+title=title+' ('+this.acceleratorKey_+')';this.title=title;var bp;if(this.active_)
+bp=modeInfo.activeBackgroundPosition;else
+bp=modeInfo.defaultBackgroundPosition;this.style.backgroundPosition=bp;}});'use strict';tr.exportTo('tr.ui.b',function(){var MOUSE_SELECTOR_MODE=tr.ui.b.MOUSE_SELECTOR_MODE;var MOUSE_SELECTOR_MODE_INFOS=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS;var MIN_MOUSE_SELECTION_DISTANCE=4;var MODIFIER={SHIFT:0x1,SPACE:0x2,CMD_OR_CTRL:0x4};function isCmdOrCtrlPressed(event){if(tr.isMac)
+return event.metaKey;else
+return event.ctrlKey;}
+Polymer('tr-ui-b-mouse-mode-selector',{__proto__:HTMLDivElement.prototype,created:function(){this.supportedModeMask_=MOUSE_SELECTOR_MODE.ALL_MODES;this.initialRelativeMouseDownPos_={x:0,y:0};this.defaultMode_=MOUSE_SELECTOR_MODE.PANSCAN;this.settingsKey_=undefined;this.mousePos_={x:0,y:0};this.mouseDownPos_={x:0,y:0};this.onMouseDown_=this.onMouseDown_.bind(this);this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.onKeyDown_=this.onKeyDown_.bind(this);this.onKeyUp_=this.onKeyUp_.bind(this);this.mode_=undefined;this.modeToKeyCodeMap_={};this.modifierToModeMap_={};this.targetElement_=undefined;this.modeBeforeAlternativeModeActivated_=null;this.isInteracting_=false;this.isClick_=false;},ready:function(){this.buttonsEl_=this.shadowRoot.querySelector('.buttons');this.dragHandleEl_=this.shadowRoot.querySelector('.drag-handle');this.supportedModeMask=MOUSE_SELECTOR_MODE.ALL_MODES;this.dragHandleEl_.addEventListener('mousedown',this.onDragHandleMouseDown_.bind(this));this.buttonsEl_.addEventListener('mouseup',this.onButtonMouseUp_);this.buttonsEl_.addEventListener('mousedown',this.onButtonMouseDown_);this.buttonsEl_.addEventListener('click',this.onButtonPress_.bind(this));},attached:function(){document.addEventListener('keydown',this.onKeyDown_);document.addEventListener('keyup',this.onKeyUp_);},detached:function(){document.removeEventListener('keydown',this.onKeyDown_);document.removeEventListener('keyup',this.onKeyUp_);},get targetElement(){return this.targetElement_;},set targetElement(target){if(this.targetElement_)
+this.targetElement_.removeEventListener('mousedown',this.onMouseDown_);this.targetElement_=target;if(this.targetElement_)
+this.targetElement_.addEventListener('mousedown',this.onMouseDown_);},get defaultMode(){return this.defaultMode_;},set defaultMode(defaultMode){this.defaultMode_=defaultMode;},get settingsKey(){return this.settingsKey_;},set settingsKey(settingsKey){this.settingsKey_=settingsKey;if(!this.settingsKey_)
+return;var mode=tr.b.Settings.get(this.settingsKey_+'.mode',undefined);if(MOUSE_SELECTOR_MODE_INFOS[mode]===undefined)
+mode=undefined;if((mode&this.supportedModeMask_)===0)
+mode=undefined;if(!mode)
+mode=this.defaultMode_;this.mode=mode;var pos=tr.b.Settings.get(this.settingsKey_+'.pos',undefined);if(pos)
+this.pos=pos;},get supportedModeMask(){return this.supportedModeMask_;},set supportedModeMask(supportedModeMask){if(this.mode&&(supportedModeMask&this.mode)===0)
+throw new Error('supportedModeMask must include current mode.');function createButtonForMode(mode){return button;}
+this.supportedModeMask_=supportedModeMask;this.buttonsEl_.textContent='';for(var modeName in MOUSE_SELECTOR_MODE){if(modeName=='ALL_MODES')
+continue;var mode=MOUSE_SELECTOR_MODE[modeName];if((this.supportedModeMask_&mode)===0)
+continue;var button=document.createElement('tr-ui-b-mouse-mode-icon');button.mode=mode;button.classList.add('tool-button');this.buttonsEl_.appendChild(button);}},getButtonForMode_:function(mode){for(var i=0;i<this.buttonsEl_.children.length;i++){var buttonEl=this.buttonsEl_.children[i];if(buttonEl.mode===mode)
+return buttonEl;}
+return undefined;},get mode(){return this.currentMode_;},set mode(newMode){if(newMode!==undefined){if(typeof newMode!=='number')
+throw new Error('Mode must be a number');if((newMode&this.supportedModeMask_)===0)
+throw new Error('Cannot switch to this mode, it is not supported');if(MOUSE_SELECTOR_MODE_INFOS[newMode]===undefined)
+throw new Error('Unrecognized mode');}
+var modeInfo;if(this.currentMode_===newMode)
+return;if(this.currentMode_){var buttonEl=this.getButtonForMode_(this.currentMode_);if(buttonEl)
+buttonEl.active=false;if(this.isInteracting_){var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.end);this.dispatchEvent(mouseEvent);}
+modeInfo=MOUSE_SELECTOR_MODE_INFOS[this.currentMode_];tr.b.dispatchSimpleEvent(this,modeInfo.eventNames.exit,true);}
+this.currentMode_=newMode;if(this.currentMode_){var buttonEl=this.getButtonForMode_(this.currentMode_);if(buttonEl)
+buttonEl.active=true;this.mouseDownPos_.x=this.mousePos_.x;this.mouseDownPos_.y=this.mousePos_.y;modeInfo=MOUSE_SELECTOR_MODE_INFOS[this.currentMode_];if(!this.isInAlternativeMode_)
+tr.b.dispatchSimpleEvent(this,modeInfo.eventNames.enter,true);if(this.isInteracting_){var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.begin);this.dispatchEvent(mouseEvent);}}
+if(this.settingsKey_&&!this.isInAlternativeMode_)
+tr.b.Settings.set(this.settingsKey_+'.mode',this.mode);},setKeyCodeForMode:function(mode,keyCode){if((mode&this.supportedModeMask_)===0)
+throw new Error('Mode not supported');this.modeToKeyCodeMap_[mode]=keyCode;if(!this.buttonsEl_)
+return;var buttonEl=this.getButtonForMode_(mode);if(buttonEl)
+buttonEl.acceleratorKey=String.fromCharCode(keyCode);},setCurrentMousePosFromEvent_:function(e){this.mousePos_.x=e.clientX;this.mousePos_.y=e.clientY;},createEvent_:function(eventName,sourceEvent){var event=new tr.b.Event(eventName,true);event.clientX=this.mousePos_.x;event.clientY=this.mousePos_.y;event.deltaX=this.mousePos_.x-this.mouseDownPos_.x;event.deltaY=this.mousePos_.y-this.mouseDownPos_.y;event.mouseDownX=this.mouseDownPos_.x;event.mouseDownY=this.mouseDownPos_.y;event.didPreventDefault=false;event.preventDefault=function(){event.didPreventDefault=true;if(sourceEvent)
+sourceEvent.preventDefault();};event.stopPropagation=function(){sourceEvent.stopPropagation();};event.stopImmediatePropagation=function(){throw new Error('Not implemented');};return event;},onMouseDown_:function(e){if(e.button!==0)
+return;this.setCurrentMousePosFromEvent_(e);var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.begin,e);if(this.mode===MOUSE_SELECTOR_MODE.SELECTION)
+mouseEvent.appendSelection=isCmdOrCtrlPressed(e);this.dispatchEvent(mouseEvent);this.isInteracting_=true;this.isClick_=true;tr.ui.b.trackMouseMovesUntilMouseUp(this.onMouseMove_,this.onMouseUp_);},onMouseMove_:function(e){this.setCurrentMousePosFromEvent_(e);var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.update,e);this.dispatchEvent(mouseEvent);if(this.isInteracting_)
+this.checkIsClick_(e);},onMouseUp_:function(e){if(e.button!==0)
+return;var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.end,e);mouseEvent.isClick=this.isClick_;this.dispatchEvent(mouseEvent);if(this.isClick_&&!mouseEvent.didPreventDefault)
+this.dispatchClickEvents_(e);this.isInteracting_=false;this.updateAlternativeModeState_(e);},onButtonMouseDown_:function(e){e.preventDefault();e.stopImmediatePropagation();},onButtonMouseUp_:function(e){e.preventDefault();e.stopImmediatePropagation();},onButtonPress_:function(e){this.modeBeforeAlternativeModeActivated_=undefined;this.mode=e.target.mode;e.preventDefault();},onKeyDown_:function(e){if(e.path[0].tagName=='INPUT')
+return;if(e.keyCode===' '.charCodeAt(0))
+this.spacePressed_=true;this.updateAlternativeModeState_(e);},onKeyUp_:function(e){if(e.path[0].tagName=='INPUT')
+return;if(e.keyCode===' '.charCodeAt(0))
+this.spacePressed_=false;var didHandleKey=false;tr.b.iterItems(this.modeToKeyCodeMap_,function(modeStr,keyCode){if(e.keyCode===keyCode){this.modeBeforeAlternativeModeActivated_=undefined;var mode=parseInt(modeStr);this.mode=mode;didHandleKey=true;}},this);if(didHandleKey){e.preventDefault();e.stopPropagation();return;}
+this.updateAlternativeModeState_(e);},updateAlternativeModeState_:function(e){var shiftPressed=e.shiftKey;var spacePressed=this.spacePressed_;var cmdOrCtrlPressed=isCmdOrCtrlPressed(e);var smm=this.supportedModeMask_;var newMode;var isNewModeAnAlternativeMode=false;if(shiftPressed&&(this.modifierToModeMap_[MODIFIER.SHIFT]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.SHIFT];isNewModeAnAlternativeMode=true;}else if(spacePressed&&(this.modifierToModeMap_[MODIFIER.SPACE]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.SPACE];isNewModeAnAlternativeMode=true;}else if(cmdOrCtrlPressed&&(this.modifierToModeMap_[MODIFIER.CMD_OR_CTRL]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.CMD_OR_CTRL];isNewModeAnAlternativeMode=true;}else{if(this.isInAlternativeMode_){newMode=this.modeBeforeAlternativeModeActivated_;isNewModeAnAlternativeMode=false;}else{newMode=undefined;}}
+if(this.mode===newMode||newMode===undefined)
+return;if(isNewModeAnAlternativeMode)
+this.modeBeforeAlternativeModeActivated_=this.mode;this.mode=newMode;},get isInAlternativeMode_(){return!!this.modeBeforeAlternativeModeActivated_;},setModifierForAlternateMode:function(mode,modifier){this.modifierToModeMap_[modifier]=mode;},get pos(){return{x:parseInt(this.style.left),y:parseInt(this.style.top)};},set pos(pos){pos=this.constrainPositionToBounds_(pos);this.style.left=pos.x+'px';this.style.top=pos.y+'px';if(this.settingsKey_)
+tr.b.Settings.set(this.settingsKey_+'.pos',this.pos);},constrainPositionToBounds_:function(pos){var parent=this.offsetParent||document.body;var parentRect=tr.ui.b.windowRectForElement(parent);var top=0;var bottom=parentRect.height-this.offsetHeight;var left=0;var right=parentRect.width-this.offsetWidth;var res={};res.x=Math.max(pos.x,left);res.x=Math.min(res.x,right);res.y=Math.max(pos.y,top);res.y=Math.min(res.y,bottom);return res;},onDragHandleMouseDown_:function(e){e.preventDefault();e.stopImmediatePropagation();var mouseDownPos={x:e.clientX-this.offsetLeft,y:e.clientY-this.offsetTop};tr.ui.b.trackMouseMovesUntilMouseUp(function(e){var pos={};pos.x=e.clientX-mouseDownPos.x;pos.y=e.clientY-mouseDownPos.y;this.pos=pos;}.bind(this));},checkIsClick_:function(e){if(!this.isInteracting_||!this.isClick_)
+return;var deltaX=this.mousePos_.x-this.mouseDownPos_.x;var deltaY=this.mousePos_.y-this.mouseDownPos_.y;var minDist=MIN_MOUSE_SELECTION_DISTANCE;if(deltaX*deltaX+deltaY*deltaY>minDist*minDist)
+this.isClick_=false;},dispatchClickEvents_:function(e){if(!this.isClick_)
+return;var modeInfo=MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.SELECTION];var eventNames=modeInfo.eventNames;var mouseEvent=this.createEvent_(eventNames.begin);mouseEvent.appendSelection=isCmdOrCtrlPressed(e);this.dispatchEvent(mouseEvent);mouseEvent=this.createEvent_(eventNames.end);this.dispatchEvent(mouseEvent);}});return{MIN_MOUSE_SELECTION_DISTANCE:MIN_MOUSE_SELECTION_DISTANCE,MODIFIER:MODIFIER};});'use strict';tr.exportTo('tr.ui.b',function(){function TimingTool(viewport,targetElement){this.viewport_=viewport;this.onMouseMove_=this.onMouseMove_.bind(this);this.onDblClick_=this.onDblClick_.bind(this);this.targetElement_=targetElement;this.isMovingLeftEdge_=false;};TimingTool.prototype={onEnterTiming:function(e){this.targetElement_.addEventListener('mousemove',this.onMouseMove_);this.targetElement_.addEventListener('dblclick',this.onDblClick_);},onBeginTiming:function(e){if(!this.isTouchPointInsideTrackBounds_(e.clientX,e.clientY))
+return;var pt=this.getSnappedToEventPosition_(e);this.mouseDownAt_(pt.x,pt.y);this.updateSnapIndicators_(pt);},updateSnapIndicators_:function(pt){if(!pt.snapped)
+return;var ir=this.viewport_.interestRange;if(ir.min===pt.x)
+ir.leftSnapIndicator=new tr.ui.SnapIndicator(pt.y,pt.height);if(ir.max===pt.x)
+ir.rightSnapIndicator=new tr.ui.SnapIndicator(pt.y,pt.height);},onUpdateTiming:function(e){var pt=this.getSnappedToEventPosition_(e);this.mouseMoveAt_(pt.x,pt.y,true);this.updateSnapIndicators_(pt);},onEndTiming:function(e){this.mouseUp_();},onExitTiming:function(e){this.targetElement_.removeEventListener('mousemove',this.onMouseMove_);this.targetElement_.removeEventListener('dblclick',this.onDblClick_);},onMouseMove_:function(e){if(e.button)
+return;var worldX=this.getWorldXFromEvent_(e);this.mouseMoveAt_(worldX,e.clientY,false);},onDblClick_:function(e){console.error('not implemented');},isTouchPointInsideTrackBounds_:function(clientX,clientY){if(!this.viewport_||!this.viewport_.modelTrackContainer||!this.viewport_.modelTrackContainer.canvas)
+return false;var canvas=this.viewport_.modelTrackContainer.canvas;var canvasRect=canvas.getBoundingClientRect();if(clientX>=canvasRect.left&&clientX<=canvasRect.right&&clientY>=canvasRect.top&&clientY<=canvasRect.bottom)
+return true;return false;},mouseDownAt_:function(worldX,y){var ir=this.viewport_.interestRange;var dt=this.viewport_.currentDisplayTransform;var pixelRatio=window.devicePixelRatio||1;var nearnessThresholdWorld=dt.xViewVectorToWorld(6*pixelRatio);if(ir.isEmpty){ir.setMinAndMax(worldX,worldX);ir.rightSelected=true;this.isMovingLeftEdge_=false;return;}
+if(Math.abs(worldX-ir.min)<nearnessThresholdWorld){ir.leftSelected=true;ir.min=worldX;this.isMovingLeftEdge_=true;return;}
+if(Math.abs(worldX-ir.max)<nearnessThresholdWorld){ir.rightSelected=true;ir.max=worldX;this.isMovingLeftEdge_=false;return;}
+ir.setMinAndMax(worldX,worldX);ir.rightSelected=true;this.isMovingLeftEdge_=false;},mouseMoveAt_:function(worldX,y,mouseDown){var ir=this.viewport_.interestRange;if(mouseDown){this.updateMovingEdge_(worldX);return;}
+var ir=this.viewport_.interestRange;var dt=this.viewport_.currentDisplayTransform;var pixelRatio=window.devicePixelRatio||1;var nearnessThresholdWorld=dt.xViewVectorToWorld(6*pixelRatio);if(Math.abs(worldX-ir.min)<nearnessThresholdWorld){ir.leftSelected=true;ir.rightSelected=false;return;}
+if(Math.abs(worldX-ir.max)<nearnessThresholdWorld){ir.leftSelected=false;ir.rightSelected=true;return;}
+ir.leftSelected=false;ir.rightSelected=false;return;},updateMovingEdge_:function(newWorldX){var ir=this.viewport_.interestRange;var a=ir.min;var b=ir.max;if(this.isMovingLeftEdge_)
+a=newWorldX;else
+b=newWorldX;if(a<=b)
+ir.setMinAndMax(a,b);else
+ir.setMinAndMax(b,a);if(ir.min==newWorldX){this.isMovingLeftEdge_=true;ir.leftSelected=true;ir.rightSelected=false;}else{this.isMovingLeftEdge_=false;ir.leftSelected=false;ir.rightSelected=true;}},mouseUp_:function(){var dt=this.viewport_.currentDisplayTransform;var ir=this.viewport_.interestRange;ir.leftSelected=false;ir.rightSelected=false;var pixelRatio=window.devicePixelRatio||1;var minWidthValue=dt.xViewVectorToWorld(2*pixelRatio);if(ir.range<minWidthValue)
+ir.reset();},getWorldXFromEvent_:function(e){var pixelRatio=window.devicePixelRatio||1;var canvas=this.viewport_.modelTrackContainer.canvas;var worldOffset=canvas.getBoundingClientRect().left;var viewX=(e.clientX-worldOffset)*pixelRatio;return this.viewport_.currentDisplayTransform.xViewToWorld(viewX);},getSnappedToEventPosition_:function(e){var pixelRatio=window.devicePixelRatio||1;var EVENT_SNAP_RANGE=16*pixelRatio;var modelTrackContainer=this.viewport_.modelTrackContainer;var modelTrackContainerRect=modelTrackContainer.getBoundingClientRect();var viewport=this.viewport_;var dt=viewport.currentDisplayTransform;var worldMaxDist=dt.xViewVectorToWorld(EVENT_SNAP_RANGE);var worldX=this.getWorldXFromEvent_(e);var mouseY=e.clientY;var selection=new tr.model.EventSet();modelTrackContainer.addClosestEventToSelection(worldX,worldMaxDist,mouseY,mouseY,selection);if(!selection.length){modelTrackContainer.addClosestEventToSelection(worldX,worldMaxDist,modelTrackContainerRect.top,modelTrackContainerRect.bottom,selection);}
+var minDistX=worldMaxDist;var minDistY=Infinity;var pixWidth=dt.xViewVectorToWorld(1);var result={x:worldX,y:mouseY-modelTrackContainerRect.top,height:0,snapped:false};var eventBounds=new tr.b.Range();for(var i=0;i<selection.length;i++){var event=selection[i];var track=viewport.trackForEvent(event);var trackRect=track.getBoundingClientRect();eventBounds.reset();event.addBoundsToRange(eventBounds);var eventX;if(Math.abs(eventBounds.min-worldX)<Math.abs(eventBounds.max-worldX)){eventX=eventBounds.min;}else{eventX=eventBounds.max;}
+var distX=eventX-worldX;var eventY=trackRect.top;var eventHeight=trackRect.height;var distY=Math.abs(eventY+eventHeight/2-mouseY);if((distX<=minDistX||Math.abs(distX-minDistX)<pixWidth)&&distY<minDistY){minDistX=distX;minDistY=distY;result.x=eventX;result.y=eventY+
+modelTrackContainer.scrollTop-modelTrackContainerRect.top;result.height=eventHeight;result.snapped=true;}}
+return result;}};return{TimingTool:TimingTool};});'use strict';tr.exportTo('tr.ui.b',function(){function Animation(){}
+Animation.prototype={canTakeOverFor:function(existingAnimation){throw new Error('Not implemented');},takeOverFor:function(existingAnimation,newStartTimestamp,target){throw new Error('Not implemented');},start:function(timestamp,target){throw new Error('Not implemented');},didStopEarly:function(timestamp,target,willBeTakenOverByAnotherAnimation){},tick:function(timestamp,target){throw new Error('Not implemented');}};return{Animation:Animation};});'use strict';tr.exportTo('tr.ui',function(){var kDefaultPanAnimationDurationMs=100.0;function TimelineDisplayTransformPanAnimation(deltaX,deltaY,opt_durationMs){this.deltaX=deltaX;this.deltaY=deltaY;if(opt_durationMs===undefined)
+this.durationMs=kDefaultPanAnimationDurationMs;else
+this.durationMs=opt_durationMs;this.startPanX=undefined;this.startPanY=undefined;this.startTimeMs=undefined;}
+TimelineDisplayTransformPanAnimation.prototype={__proto__:tr.ui.b.Animation.prototype,get affectsPanY(){return this.deltaY!==0;},canTakeOverFor:function(existingAnimation){return existingAnimation instanceof TimelineDisplayTransformPanAnimation;},takeOverFor:function(existing,timestamp,target){var remainingDeltaXOnExisting=existing.goalPanX-target.panX;var remainingDeltaYOnExisting=existing.goalPanY-target.panY;var remainingTimeOnExisting=timestamp-(existing.startTimeMs+existing.durationMs);remainingTimeOnExisting=Math.max(remainingTimeOnExisting,0);this.deltaX+=remainingDeltaXOnExisting;this.deltaY+=remainingDeltaYOnExisting;this.durationMs+=remainingTimeOnExisting;},start:function(timestamp,target){this.startTimeMs=timestamp;this.startPanX=target.panX;this.startPanY=target.panY;},tick:function(timestamp,target){var percentDone=(timestamp-this.startTimeMs)/this.durationMs;percentDone=tr.b.clamp(percentDone,0,1);target.panX=tr.b.lerp(percentDone,this.startPanX,this.goalPanX);if(this.affectsPanY)
+target.panY=tr.b.lerp(percentDone,this.startPanY,this.goalPanY);return timestamp>=this.startTimeMs+this.durationMs;},get goalPanX(){return this.startPanX+this.deltaX;},get goalPanY(){return this.startPanY+this.deltaY;}};function TimelineDisplayTransformZoomToAnimation(goalFocalPointXWorld,goalFocalPointXView,goalFocalPointY,zoomInRatioX,opt_durationMs){this.goalFocalPointXWorld=goalFocalPointXWorld;this.goalFocalPointXView=goalFocalPointXView;this.goalFocalPointY=goalFocalPointY;this.zoomInRatioX=zoomInRatioX;if(opt_durationMs===undefined)
+this.durationMs=kDefaultPanAnimationDurationMs;else
+this.durationMs=opt_durationMs;this.startTimeMs=undefined;this.startScaleX=undefined;this.goalScaleX=undefined;this.startPanY=undefined;}
+TimelineDisplayTransformZoomToAnimation.prototype={__proto__:tr.ui.b.Animation.prototype,get affectsPanY(){return this.startPanY!=this.goalFocalPointY;},canTakeOverFor:function(existingAnimation){return false;},takeOverFor:function(existingAnimation,timestamp,target){this.goalScaleX=target.scaleX*this.zoomInRatioX;},start:function(timestamp,target){this.startTimeMs=timestamp;this.startScaleX=target.scaleX;this.goalScaleX=this.zoomInRatioX*target.scaleX;this.startPanY=target.panY;},tick:function(timestamp,target){var percentDone=(timestamp-this.startTimeMs)/this.durationMs;percentDone=tr.b.clamp(percentDone,0,1);target.scaleX=tr.b.lerp(percentDone,this.startScaleX,this.goalScaleX);if(this.affectsPanY){target.panY=tr.b.lerp(percentDone,this.startPanY,this.goalFocalPointY);}
+target.xPanWorldPosToViewPos(this.goalFocalPointXWorld,this.goalFocalPointXView);return timestamp>=this.startTimeMs+this.durationMs;}};return{TimelineDisplayTransformPanAnimation:TimelineDisplayTransformPanAnimation,TimelineDisplayTransformZoomToAnimation:TimelineDisplayTransformZoomToAnimation};});'use strict';tr.exportTo('tr.ui.b',function(){function AnimationController(){tr.b.EventTarget.call(this);this.target_=undefined;this.activeAnimation_=undefined;this.tickScheduled_=false;}
 AnimationController.prototype={__proto__:tr.b.EventTarget.prototype,get target(){return this.target_;},set target(target){if(this.activeAnimation_)
 throw new Error('Cannot change target while animation is running.');if(target.cloneAnimationState===undefined||typeof target.cloneAnimationState!=='function')
 throw new Error('target must have a cloneAnimationState function');this.target_=target;},get activeAnimation(){return this.activeAnimation_;},get hasActiveAnimation(){return!!this.activeAnimation_;},queueAnimation:function(animation,opt_now){if(this.target_===undefined)
@@ -3952,11 +4380,16 @@
 opt_changeCb.call();}
 buttonEl.addEventListener('change',onChange);var id='#checkbox-'+nextCheckboxId++;var spanEl=createSpan({className:'labeled-checkbox'});buttonEl.setAttribute('id',id);var labelEl=document.createElement('label');labelEl.textContent=label;labelEl.setAttribute('for',id);spanEl.appendChild(buttonEl);spanEl.appendChild(labelEl);spanEl.__defineSetter__('checked',function(opt_bool){var changed=buttonEl.checked!==(!!opt_bool);if(!changed)
 return;buttonEl.checked=!!opt_bool;onChange();});spanEl.__defineGetter__('checked',function(){return buttonEl.checked;});return spanEl;}
+function createButton(targetEl,targetElProperty,label,opt_changeCb){var buttonEl=document.createElement('input');buttonEl.type='button';function onClick(){if(opt_changeCb)
+opt_changeCb.call();}
+buttonEl.addEventListener('click',onClick);buttonEl.value=label;return buttonEl;}
+function createTextInput(targetEl,targetElProperty,settingsKey,defaultValue){var initialValue=tr.b.Settings.get(settingsKey,defaultValue);var el=document.createElement('input');el.type='text';function onChange(e){tr.b.Settings.set(settingsKey,el.value);targetEl[targetElProperty]=el.value;}
+el.addEventListener('input',onChange);el.value=initialValue;targetEl[targetElProperty]=initialValue;return el;}
 function isElementAttachedToDocument(el){var cur=el;while(cur.parentNode)
 cur=cur.parentNode;return(cur===el.ownerDocument||cur.nodeName==='#document-fragment');}
 function asHTMLOrTextNode(value,opt_ownerDocument){if(value instanceof Node)
 return value;var ownerDocument=opt_ownerDocument||document;return ownerDocument.createTextNode(value);}
-return{createSpan:createSpan,createDiv:createDiv,createScopedStyle:createScopedStyle,createSelector:createSelector,createOptionGroup:createOptionGroup,createCheckBox:createCheckBox,isElementAttachedToDocument:isElementAttachedToDocument,asHTMLOrTextNode:asHTMLOrTextNode};});'use strict';tr.exportTo('tr.ui.b',function(){var ColorScheme=tr.b.ColorScheme;var colors=ColorScheme.colors;var colorsAsStrings=ColorScheme.colorsAsStrings;var numColorsPerVariant=ColorScheme.properties.numColorsPerVariant;var SelectionState=tr.model.SelectionState;var EventPresenter={getSelectableItemColorAsString:function(item){var colorId=item.colorId+this.getColorIdOffset_(item);return colorsAsStrings[colorId];},getColorIdOffset_:function(event){return event.selectionState;},getTextColor:function(event){if(event.selectionState===SelectionState.DIMMED)
+return{createSpan:createSpan,createDiv:createDiv,createScopedStyle:createScopedStyle,createSelector:createSelector,createOptionGroup:createOptionGroup,createCheckBox:createCheckBox,createButton:createButton,createTextInput:createTextInput,isElementAttachedToDocument:isElementAttachedToDocument,asHTMLOrTextNode:asHTMLOrTextNode};});'use strict';tr.exportTo('tr.ui.b',function(){var ColorScheme=tr.b.ColorScheme;var colors=ColorScheme.colors;var colorsAsStrings=ColorScheme.colorsAsStrings;var numColorsPerVariant=ColorScheme.properties.numColorsPerVariant;var SelectionState=tr.model.SelectionState;var EventPresenter={getSelectableItemColorAsString:function(item){var colorId=item.colorId+this.getColorIdOffset_(item);return colorsAsStrings[colorId];},getColorIdOffset_:function(event){return event.selectionState;},getTextColor:function(event){if(event.selectionState===SelectionState.DIMMED)
 return'rgb(60,60,60)';return'rgb(0,0,0)';},getSliceColorId:function(slice){return slice.colorId+this.getColorIdOffset_(slice);},getSliceAlpha:function(slice,async){var alpha=1;if(async)
 alpha*=0.3;return alpha;},getInstantSliceColor:function(instant){var colorId=instant.colorId+this.getColorIdOffset_(instant);return colors[colorId].toStringWithAlphaOverride(1.0);},getObjectInstanceColor:function(instance){var colorId=instance.colorId+this.getColorIdOffset_(instance);return colors[colorId].toStringWithAlphaOverride(0.25);},getObjectSnapshotColor:function(snapshot){var colorId=snapshot.objectInstance.colorId+this.getColorIdOffset_(snapshot);return colors[colorId];},getCounterSeriesColor:function(colorId,selectionState,opt_alphaMultiplier){var event={selectionState:selectionState};var c=colors[colorId+this.getColorIdOffset_(event)];return c.toStringWithAlphaOverride(opt_alphaMultiplier!==undefined?opt_alphaMultiplier:1.0);},getBarSnapshotColor:function(snapshot,offset){var colorId=(snapshot.objectInstance.colorId+offset)%numColorsPerVariant;colorId+=this.getColorIdOffset_(snapshot);return colors[colorId].toStringWithAlphaOverride(1.0);}};return{EventPresenter:EventPresenter};});'use strict';tr.exportTo('tr.ui.b',function(){var elidedTitleCacheDict={};var elidedTitleCache=new ElidedTitleCache();function ElidedTitleCache(){this.textWidthMap={};}
 ElidedTitleCache.prototype={get:function(ctx,pixWidth,title,width,sliceDuration){var elidedDict=elidedTitleCacheDict[title];if(!elidedDict){elidedDict={};elidedTitleCacheDict[title]=elidedDict;}
@@ -4047,852 +4480,7 @@
 continue;}
 var track=this.trackForEvent(event);track.addEventNearToProvidedEventToSelection(event,offset,newSelection);}
 if(newSelection.length==0)
-return undefined;return newSelection;},rebuildEventToTrackMap:function(){this.eventToTrackMap_=new tr.ui.tracks.EventToTrackMap();this.modelTrackContainer_.addEventsToTrackMap(this.eventToTrackMap_);},rebuildContainerToTrackMap:function(){this.containerToTrackMap.clear();this.modelTrackContainer_.addContainersToTrackMap(this.containerToTrackMap);},trackForEvent:function(event){return this.eventToTrackMap_[event.guid];}};return{TimelineViewport:TimelineViewport};});'use strict';tr.exportTo('tr.model',function(){function YComponent(stableId,yPercentOffset){this.stableId=stableId;this.yPercentOffset=yPercentOffset;}
-YComponent.prototype={toDict:function(){return{stableId:this.stableId,yPercentOffset:this.yPercentOffset};}};function Location(xWorld,yComponents){this.xWorld_=xWorld;this.yComponents_=yComponents;};Location.fromViewCoordinates=function(viewport,viewX,viewY){var dt=viewport.currentDisplayTransform;var xWorld=dt.xViewToWorld(viewX);var yComponents=[];var elem=document.elementFromPoint(viewX+viewport.modelTrackContainer.canvas.offsetLeft,viewY+viewport.modelTrackContainer.canvas.offsetTop);while(elem instanceof tr.ui.tracks.Track){if(elem.eventContainer){var boundRect=elem.getBoundingClientRect();var yPercentOffset=(viewY-boundRect.top)/boundRect.height;yComponents.push(new YComponent(elem.eventContainer.stableId,yPercentOffset));}
-elem=elem.parentElement;}
-if(yComponents.length==0)
-return;return new Location(xWorld,yComponents);}
-Location.fromStableIdAndTimestamp=function(viewport,stableId,ts){var xWorld=ts;var yComponents=[];var containerToTrack=viewport.containerToTrackMap;var elem=containerToTrack.getTrackByStableId(stableId);if(!elem)
-return;var firstY=elem.getBoundingClientRect().top;while(elem instanceof tr.ui.tracks.Track){if(elem.eventContainer){var boundRect=elem.getBoundingClientRect();var yPercentOffset=(firstY-boundRect.top)/boundRect.height;yComponents.push(new YComponent(elem.eventContainer.stableId,yPercentOffset));}
-elem=elem.parentElement;}
-if(yComponents.length==0)
-return;return new Location(xWorld,yComponents);}
-Location.prototype={get xWorld(){return this.xWorld_;},getContainingTrack:function(viewport){var containerToTrack=viewport.containerToTrackMap;for(var i in this.yComponents_){var yComponent=this.yComponents_[i];var track=containerToTrack.getTrackByStableId(yComponent.stableId);if(track!==undefined)
-return track;}},toViewCoordinates:function(viewport){var dt=viewport.currentDisplayTransform;var containerToTrack=viewport.containerToTrackMap;var viewX=dt.xWorldToView(this.xWorld_);var viewY=-1;for(var index in this.yComponents_){var yComponent=this.yComponents_[index];var track=containerToTrack.getTrackByStableId(yComponent.stableId);if(track!==undefined){var boundRect=track.getBoundingClientRect();viewY=yComponent.yPercentOffset*boundRect.height+boundRect.top;break;}}
-return{viewX:viewX,viewY:viewY};},toDict:function(){return{xWorld:this.xWorld_,yComponents:this.yComponents_};}};return{Location:Location};});'use strict';tr.exportTo('tr.ui.b',function(){var Location=tr.model.Location;function UIState(location,scaleX){this.location_=location;this.scaleX_=scaleX;};UIState.fromUserFriendlyString=function(model,viewport,stateString){var navByFinderPattern=/^(-?\d+(\.\d+)?)@(.+)x(\d+(\.\d+)?)$/g;var match=navByFinderPattern.exec(stateString);if(!match)
-return;var timestamp=parseFloat(match[1]);var stableId=match[3];var scaleX=parseFloat(match[4]);if(scaleX<=0)
-throw new Error('Invalid ScaleX value in UI State string.');if(!viewport.containerToTrackMap.getTrackByStableId(stableId))
-throw new Error('Invalid StableID given in UI State String.');var loc=tr.model.Location.fromStableIdAndTimestamp(viewport,stableId,timestamp);return new UIState(loc,scaleX);}
-UIState.prototype={get location(){return this.location_;},get scaleX(){return this.scaleX_;},toUserFriendlyString:function(viewport){var timestamp=this.location_.xWorld;var stableId=this.location_.getContainingTrack(viewport).eventContainer.stableId;var scaleX=this.scaleX_;return timestamp.toFixed(5)+'@'+stableId+'x'+scaleX.toFixed(5);},toDict:function(){return{location:this.location_.toDict(),scaleX:this.scaleX_};}};return{UIState:UIState};});'use strict';tr.exportTo('tr.c',function(){var BrushingState=tr.ui.b.BrushingState;var EventSet=tr.model.EventSet;var SelectionState=tr.model.SelectionState;var Viewport=tr.ui.TimelineViewport;function BrushingStateController(timelineView){tr.b.EventTarget.call(this);this.timelineView_=timelineView;this.currentBrushingState_=new BrushingState();this.onPopState_=this.onPopState_.bind(this);this.historyEnabled_=false;this.selections_={};}
-BrushingStateController.prototype={__proto__:tr.b.EventTarget.prototype,dispatchChangeEvent_:function(){var e=new tr.b.Event('change',false,false);this.dispatchEvent(e);},get model(){if(!this.timelineView_)
-return undefined;return this.timelineView_.model;},get trackView(){if(!this.timelineView_)
-return undefined;return this.timelineView_.trackView;},get viewport(){if(!this.timelineView_)
-return undefined;if(!this.timelineView_.trackView)
-return undefined;return this.timelineView_.trackView.viewport;},get historyEnabled(){return this.historyEnabled_;},set historyEnabled(historyEnabled){this.historyEnabled_=!!historyEnabled;if(historyEnabled)
-window.addEventListener('popstate',this.onPopState_);else
-window.removeEventListener('popstate',this.onPopState_);},modelWillChange:function(){if(this.currentBrushingState_.isAppliedToModel)
-this.currentBrushingState_.unapplyFromModelSelectionState();},modelDidChange:function(){this.selections_={};this.currentBrushingState_=new BrushingState();this.currentBrushingState_.applyToModelSelectionState(this.model);var e=new tr.b.Event('model-changed',false,false);this.dispatchEvent(e);this.dispatchChangeEvent_();},onUserInitiatedSelectionChange_:function(){var selection=this.selection;if(this.historyEnabled){this.selections_[selection.guid]=selection;var state={selection_guid:selection.guid};window.history.pushState(state,document.title);}},onPopState_:function(e){if(e.state===null)
-return;var selection=this.selections_[e.state.selection_guid];if(selection){var newState=this.currentBrushingState_.clone();newState.selection=selection;this.currentBrushingState=newState;}
-e.stopPropagation();},get selection(){return this.currentBrushingState_.selection;},get findMatches(){return this.currentBrushingState_.findMatches;},get selectionOfInterest(){return this.currentBrushingState_.selectionOfInterest;},get currentBrushingState(){return this.currentBrushingState_;},set currentBrushingState(newBrushingState){if(newBrushingState.isAppliedToModel)
-throw new Error('Cannot apply this state, it is applied');var hasValueChanged=!this.currentBrushingState_.equals(newBrushingState);if(newBrushingState!==this.currentBrushingState_&&!hasValueChanged){if(this.currentBrushingState_.isAppliedToModel){this.currentBrushingState_.transferModelOwnershipToClone(newBrushingState);}
-this.currentBrushingState_=newBrushingState;return;}
-if(this.currentBrushingState_.isAppliedToModel)
-this.currentBrushingState_.unapplyFromModelSelectionState();this.currentBrushingState_=newBrushingState;if(this.model)
-this.currentBrushingState_.applyToModelSelectionState(this.model);this.dispatchChangeEvent_();},addAllEventsMatchingFilterToSelectionAsTask:function(filter,selection){var timelineView=this.timelineView_.trackView;if(!timelineView)
-return new tr.b.Task();return timelineView.addAllEventsMatchingFilterToSelectionAsTask(filter,selection);},findTextChangedTo:function(allPossibleMatches){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.findMatches=allPossibleMatches;this.currentBrushingState=newBrushingState;},findFocusChangedTo:function(currentFocus){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=currentFocus;this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},findTextCleared:function(){if(this.xNavStringMarker_!==undefined){this.model.removeAnnotation(this.xNavStringMarker_);this.xNavStringMarker_=undefined;}
-if(this.guideLineAnnotation_!==undefined){this.model.removeAnnotation(this.guideLineAnnotation_);this.guideLineAnnotation_=undefined;}
-var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=new EventSet();newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},uiStateFromString:function(string){return tr.ui.b.UIState.fromUserFriendlyString(this.model,this.viewport,string);},navToPosition:function(uiState,showNavLine){this.trackView.navToPosition(uiState,showNavLine);},changeSelectionFromTimeline:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},showScriptControlSelection:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;},changeSelectionFromRequestSelectionChangeEvent:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},changeAnalysisViewRelatedEvents:function(eventSet){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.analysisViewRelatedEvents=eventSet;this.currentBrushingState=newBrushingState;},changeAnalysisLinkHoveredEvents:function(eventSet){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.analysisLinkHoveredEvents=eventSet;this.currentBrushingState=newBrushingState;},getViewSpecificBrushingState:function(viewId){return this.currentBrushingState.viewSpecificBrushingStates[viewId];},changeViewSpecificBrushingState:function(viewId,newState){var oldStates=this.currentBrushingState_.viewSpecificBrushingStates;var newStates={};for(var id in oldStates)
-newStates[id]=oldStates[id];if(newState===undefined)
-delete newStates[viewId];else
-newStates[viewId]=newState;var newBrushingState=this.currentBrushingState_.clone();newBrushingState.viewSpecificBrushingStates=newStates;this.currentBrushingState=newBrushingState;}};BrushingStateController.getControllerForElement=function(element){if(tr.isHeadless)
-throw new Error('Unsupported');var currentElement=element;while(currentElement){if(currentElement.brushingStateController)
-return currentElement.brushingStateController;if(currentElement.parentElement){currentElement=currentElement.parentElement;continue;}
-var currentNode=currentElement;while(currentNode.parentNode)
-currentNode=currentNode.parentNode;currentElement=currentNode.host;}
-return undefined;};return{BrushingStateController:BrushingStateController};});'use strict';Polymer('tr-ui-a-analysis-link',{ready:function(){this.selection_=undefined;},attached:function(){this.controller_=tr.c.BrushingStateController.getControllerForElement(this);},detached:function(){this.clearHighlight_();this.controller_=undefined;},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.textContent=selection.userFriendlyName;},setSelectionAndContent:function(selection,opt_textContent){this.selection_=selection;if(opt_textContent)
-this.textContent=opt_textContent;},getCurrentSelection_:function(){if(typeof this.selection_==='function')
-return this.selection_();return this.selection_;},setHighlight_:function(opt_eventSet){if(this.controller_)
-this.controller_.changeAnalysisLinkHoveredEvents(opt_eventSet);},clearHighlight_:function(opt_eventSet){this.setHighlight_();},onClicked_:function(){if(!this.selection_)
-return;var event=new tr.model.RequestSelectionChangeEvent();event.selection=this.getCurrentSelection_();this.dispatchEvent(event);},onMouseEnter_:function(){this.setHighlight_(this.getCurrentSelection_());},onMouseLeave_:function(){this.clearHighlight_();}});'use strict';tr.exportTo('tr.ui.b',function(){var TableFormat={};TableFormat.SelectionMode={NONE:0,ROW:1,CELL:2};TableFormat.HighlightStyle={DEFAULT:0,NONE:1,LIGHT:2,DARK:3};return{TableFormat:TableFormat};});'use strict';(function(){var RIGHT_ARROW=String.fromCharCode(0x25b6);var UNSORTED_ARROW=String.fromCharCode(0x25BF);var ASCENDING_ARROW=String.fromCharCode(0x25B4);var DESCENDING_ARROW=String.fromCharCode(0x25BE);var BASIC_INDENTATION=8;var SelectionMode=tr.ui.b.TableFormat.SelectionMode;var HighlightStyle=tr.ui.b.TableFormat.HighlightStyle;Polymer('tr-ui-b-table',{created:function(){this.selectionMode_=SelectionMode.NONE;this.rowHighlightStyle_=HighlightStyle.DEFAULT;this.cellHighlightStyle_=HighlightStyle.DEFAULT;this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.tableColumns_=[];this.tableRows_=[];this.tableRowsInfo_=new WeakMap();this.tableFooterRows_=[];this.tableFooterRowsInfo_=new WeakMap();this.sortColumnIndex_=undefined;this.sortDescending_=false;this.columnsWithExpandButtons_=[];this.headerCells_=[];this.showHeader_=true;this.emptyValue_=undefined;this.subRowsPropertyName_='subRows';this.customizeTableRowCallback_=undefined;},ready:function(){this.$.body.addEventListener('keydown',this.onKeyDown_.bind(this),true);},clear:function(){this.selectionMode_=SelectionMode.NONE;this.rowHighlightStyle_=HighlightStyle.DEFAULT;this.cellHighlightStyle_=HighlightStyle.DEFAULT;this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.textContent='';this.tableColumns_=[];this.tableRows_=[];this.tableRowsInfo_=new WeakMap();this.tableFooterRows_=[];this.tableFooterRowsInfo_=new WeakMap();this.sortColumnIndex_=undefined;this.sortDescending_=false;this.columnsWithExpandButtons_=[];this.headerCells_=[];this.subRowsPropertyName_='subRows';},get showHeader(){return this.showHeader_;},set showHeader(showHeader){this.showHeader_=showHeader;this.scheduleRebuildHeaders_();},set subRowsPropertyName(name){this.subRowsPropertyName_=name;},set customizeTableRowCallback(cb){this.customizeTableRowCallback_=cb;this.scheduleRebuildBody_();},get emptyValue(){return this.emptyValue_;},set emptyValue(emptyValue){var previousEmptyValue=this.emptyValue_;this.emptyValue_=emptyValue;if(this.tableRows_.length===0&&emptyValue!==previousEmptyValue)
-this.scheduleRebuildBody_();},set tableColumns(columns){var columnsWithExpandButtons=[];for(var i=0;i<columns.length;i++){if(columns[i].showExpandButtons)
-columnsWithExpandButtons.push(i);}
-if(columnsWithExpandButtons.length===0){columnsWithExpandButtons=[0];}
-for(var i=0;i<columns.length;i++){var colInfo=columns[i];if(colInfo.width===undefined)
-continue;var hasExpandButton=columnsWithExpandButtons.indexOf(i)!==-1;var w=colInfo.width;if(w){if(/\d+px/.test(w)){continue;}else if(/\d+%/.test(w)){if(hasExpandButton){throw new Error('Columns cannot be %-sized and host '+' an expand button');}}else{throw new Error('Unrecognized width string');}}}
-this.tableColumns_=columns;this.headerCells_=[];this.columnsWithExpandButtons_=columnsWithExpandButtons;this.sortColumnIndex=undefined;this.scheduleRebuildHeaders_();this.tableRows=this.tableRows_;},get tableColumns(){return this.tableColumns_;},set tableRows(rows){this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.maybeUpdateSelectedRow_();this.tableRows_=rows;this.tableRowsInfo_=new WeakMap();this.scheduleRebuildBody_();},get tableRows(){return this.tableRows_;},set footerRows(rows){this.tableFooterRows_=rows;this.tableFooterRowsInfo_=new WeakMap();this.scheduleRebuildFooter_();},get footerRows(){return this.tableFooterRows_;},set sortColumnIndex(number){if(number===this.sortColumnIndex_)
-return;if(number===undefined){this.sortColumnIndex_=undefined;this.updateHeaderArrows_();this.dispatchSortingChangedEvent_();return;}
-if(this.tableColumns_.length<=number)
-throw new Error('Column number '+number+' is out of bounds.');if(!this.tableColumns_[number].cmp)
-throw new Error('Column '+number+' does not have a comparator.');this.sortColumnIndex_=number;this.updateHeaderArrows_();this.scheduleRebuildBody_();this.dispatchSortingChangedEvent_();},get sortColumnIndex(){return this.sortColumnIndex_;},set sortDescending(value){var newValue=!!value;if(newValue!==this.sortDescending_){this.sortDescending_=newValue;this.updateHeaderArrows_();this.scheduleRebuildBody_();this.dispatchSortingChangedEvent_();}},get sortDescending(){return this.sortDescending_;},updateHeaderArrows_:function(){for(var i=0;i<this.headerCells_.length;i++){if(!this.tableColumns_[i].cmp){this.headerCells_[i].sideContent='';continue;}
-if(i!==this.sortColumnIndex_){this.headerCells_[i].sideContent=UNSORTED_ARROW;continue;}
-this.headerCells_[i].sideContent=this.sortDescending_?DESCENDING_ARROW:ASCENDING_ARROW;}},sortRows_:function(rows){rows.sort(function(rowA,rowB){if(this.sortDescending_)
-return this.tableColumns_[this.sortColumnIndex_].cmp(rowB.userRow,rowA.userRow);return this.tableColumns_[this.sortColumnIndex_].cmp(rowA.userRow,rowB.userRow);}.bind(this));for(var i=0;i<rows.length;i++){if(rows[i].isExpanded)
-this.sortRows_(rows[i][this.subRowsPropertyName_]);}},generateHeaderColumns_:function(){this.headerCells_=[];this.$.head.textContent='';if(!this.showHeader_)
-return;var tr=this.appendNewElement_(this.$.head,'tr');for(var i=0;i<this.tableColumns_.length;i++){var td=this.appendNewElement_(tr,'td');var headerCell=document.createElement('tr-ui-b-table-header-cell');if(this.showHeader)
-headerCell.cellTitle=this.tableColumns_[i].title;else
-headerCell.cellTitle='';if(this.tableColumns_[i].cmp){td.classList.add('sensitive');headerCell.tapCallback=this.createSortCallback_(i);if(this.sortColumnIndex_===i)
-headerCell.sideContent=this.sortDescending_?DESCENDING_ARROW:ASCENDING_ARROW;else
-headerCell.sideContent=UNSORTED_ARROW;}
-td.appendChild(headerCell);this.headerCells_.push(headerCell);}},applySizes_:function(){if(this.tableRows_.length===0&&!this.showHeader)
-return;var rowToRemoveSizing;var rowToSize;if(this.showHeader){rowToSize=this.$.head.children[0];rowToRemoveSizing=this.$.body.children[0];}else{rowToSize=this.$.body.children[0];rowToRemoveSizing=this.$.head.children[0];}
-for(var i=0;i<this.tableColumns_.length;i++){if(rowToRemoveSizing&&rowToRemoveSizing.children[i]){var tdToRemoveSizing=rowToRemoveSizing.children[i];tdToRemoveSizing.style.minWidth='';tdToRemoveSizing.style.width='';}
-var td=rowToSize.children[i];var delta;if(this.columnsWithExpandButtons_.indexOf(i)!==-1){td.style.paddingLeft=BASIC_INDENTATION+'px';delta=BASIC_INDENTATION+'px';}else{delta=undefined;}
-function calc(base,delta){if(delta)
-return'calc('+base+' - '+delta+')';else
-return base;}
-var w=this.tableColumns_[i].width;if(w){if(/\d+px/.test(w)){td.style.minWidth=calc(w,delta);}else if(/\d+%/.test(w)){td.style.width=w;}else{throw new Error('Unrecognized width string: '+w);}}}},createSortCallback_:function(columnNumber){return function(){var previousIndex=this.sortColumnIndex;this.sortColumnIndex=columnNumber;if(previousIndex!==columnNumber)
-this.sortDescending=false;else
-this.sortDescending=!this.sortDescending;}.bind(this);},generateTableRowNodes_:function(tableSection,userRows,rowInfoMap,indentation,lastAddedRow,parentRowInfo){if(this.sortColumnIndex_!==undefined&&tableSection===this.$.body){userRows=userRows.slice();userRows.sort(function(rowA,rowB){var c=this.tableColumns_[this.sortColumnIndex_].cmp(rowA,rowB);if(this.sortDescending_)
-c=-c;return c;}.bind(this));}
-for(var i=0;i<userRows.length;i++){var userRow=userRows[i];var rowInfo=this.getOrCreateRowInfoFor_(rowInfoMap,userRow,parentRowInfo);var htmlNode=this.getHTMLNodeForRowInfo_(tableSection,rowInfo,rowInfoMap,indentation);if(lastAddedRow===undefined){tableSection.insertBefore(htmlNode,tableSection.firstChild);}else{var nextSiblingOfLastAdded=lastAddedRow.nextSibling;tableSection.insertBefore(htmlNode,nextSiblingOfLastAdded);}
-this.updateTabIndexForTableRowNode_(htmlNode);lastAddedRow=htmlNode;if(!rowInfo.isExpanded)
-continue;lastAddedRow=this.generateTableRowNodes_(tableSection,userRow[this.subRowsPropertyName_],rowInfoMap,indentation+1,lastAddedRow,rowInfo);}
-return lastAddedRow;},getOrCreateRowInfoFor_:function(rowInfoMap,userRow,parentRowInfo){if(rowInfoMap.has(userRow))
-return rowInfoMap.get(userRow);var rowInfo={userRow:userRow,htmlNode:undefined,isExpanded:userRow.isExpanded||false,parentRowInfo:parentRowInfo};rowInfoMap.set(userRow,rowInfo);return rowInfo;},customizeTableRow_:function(userRow,trElement){if(!this.customizeTableRowCallback_)
-return;this.customizeTableRowCallback_(userRow,trElement);},getHTMLNodeForRowInfo_:function(tableSection,rowInfo,rowInfoMap,indentation){if(rowInfo.htmlNode){this.customizeTableRow_(rowInfo.userRow,rowInfo.htmlNode);return rowInfo.htmlNode;}
-var INDENT_SPACE=indentation*16;var INDENT_SPACE_NO_BUTTON=indentation*16+BASIC_INDENTATION;var trElement=this.ownerDocument.createElement('tr');rowInfo.htmlNode=trElement;rowInfo.indentation=indentation;trElement.rowInfo=rowInfo;this.customizeTableRow_(rowInfo.userRow,trElement);for(var i=0;i<this.tableColumns_.length;){var td=this.appendNewElement_(trElement,'td');td.columnIndex=i;var column=this.tableColumns_[i];var value=column.value(rowInfo.userRow);var colSpan=column.colSpan?column.colSpan:1;td.style.colSpan=colSpan;if(column.textAlign){td.style.textAlign=column.textAlign;}
-if(this.doesColumnIndexSupportSelection(i))
-td.classList.add('supports-selection');if(this.columnsWithExpandButtons_.indexOf(i)!=-1){if(rowInfo.userRow[this.subRowsPropertyName_]&&rowInfo.userRow[this.subRowsPropertyName_].length>0){td.style.paddingLeft=INDENT_SPACE+'px';var expandButton=this.appendNewElement_(td,'expand-button');expandButton.textContent=RIGHT_ARROW;if(rowInfo.isExpanded)
-expandButton.classList.add('button-expanded');}else{td.style.paddingLeft=INDENT_SPACE_NO_BUTTON+'px';}}
-if(value!==undefined)
-td.appendChild(tr.ui.b.asHTMLOrTextNode(value,this.ownerDocument));i+=colSpan;}
-var needsClickListener=false;if(this.columnsWithExpandButtons_.length)
-needsClickListener=true;else if(tableSection==this.$.body)
-needsClickListener=true;if(needsClickListener){trElement.addEventListener('click',function(e){e.stopPropagation();if(e.target.tagName=='EXPAND-BUTTON'){this.setExpandedForUserRow_(tableSection,rowInfoMap,rowInfo.userRow,!rowInfo.isExpanded);return;}
-function getTD(cur){if(cur===trElement)
-throw new Error('woah');if(cur.parentElement===trElement)
-return cur;return getTD(cur.parentElement);}
-if(this.selectionMode_!==SelectionMode.NONE){var isAlreadySelected=false;var tdThatWasClicked=getTD(e.target);switch(this.selectionMode_){case SelectionMode.ROW:isAlreadySelected=this.selectedTableRowInfo_===rowInfo;break;case SelectionMode.CELL:isAlreadySelected=this.selectedTableRowInfo_===rowInfo;isAlreadySelected&=(this.selectedColumnIndex_===tdThatWasClicked.columnIndex);break;default:throw new Error('Invalid selection mode '+
-this.selectionMode_);}
-if(isAlreadySelected){if(rowInfo.userRow[this.subRowsPropertyName_]&&rowInfo.userRow[this.subRowsPropertyName_].length){this.setExpandedForUserRow_(tableSection,rowInfoMap,rowInfo.userRow,!rowInfo.isExpanded);}}else{this.didTableRowInfoGetClicked_(rowInfo,tdThatWasClicked.columnIndex);}}else{if(rowInfo.userRow[this.subRowsPropertyName_]&&rowInfo.userRow[this.subRowsPropertyName_].length){this.setExpandedForUserRow_(tableSection,rowInfoMap,rowInfo.userRow,!rowInfo.isExpanded);}}}.bind(this));}
-return rowInfo.htmlNode;},removeSubNodes_:function(tableSection,rowInfo,rowInfoMap){if(rowInfo.userRow[this.subRowsPropertyName_]===undefined)
-return;for(var i=0;i<rowInfo.userRow[this.subRowsPropertyName_].length;i++){var subRow=rowInfo.userRow[this.subRowsPropertyName_][i];var subRowInfo=rowInfoMap.get(subRow);if(!subRowInfo)
-continue;var subNode=subRowInfo.htmlNode;if(subNode&&subNode.parentNode===tableSection){tableSection.removeChild(subNode);this.removeSubNodes_(tableSection,subRowInfo,rowInfoMap);}}},scheduleRebuildHeaders_:function(){this.headerDirty_=true;this.scheduleRebuild_();},scheduleRebuildBody_:function(){this.bodyDirty_=true;this.scheduleRebuild_();},scheduleRebuildFooter_:function(){this.footerDirty_=true;this.scheduleRebuild_();},scheduleRebuild_:function(){if(this.rebuildPending_)
-return;this.rebuildPending_=true;setTimeout(function(){this.rebuildPending_=false;this.rebuild();}.bind(this),0);},rebuildIfNeeded_:function(){this.rebuild();},rebuild:function(){var wasBodyOrHeaderDirty=this.headerDirty_||this.bodyDirty_;if(this.headerDirty_){this.generateHeaderColumns_();this.headerDirty_=false;}
-if(this.bodyDirty_){this.$.body.textContent='';this.generateTableRowNodes_(this.$.body,this.tableRows_,this.tableRowsInfo_,0,undefined,undefined);if(this.tableRows_.length===0&&this.emptyValue_!==undefined){var trElement=this.ownerDocument.createElement('tr');this.$.body.appendChild(trElement);trElement.classList.add('empty-row');var td=this.ownerDocument.createElement('td');trElement.appendChild(td);td.colSpan=this.tableColumns_.length;var emptyValue=this.emptyValue_;td.appendChild(tr.ui.b.asHTMLOrTextNode(emptyValue,this.ownerDocument));}
-this.bodyDirty_=false;}
-if(wasBodyOrHeaderDirty)
-this.applySizes_();if(this.footerDirty_){this.$.foot.textContent='';this.generateTableRowNodes_(this.$.foot,this.tableFooterRows_,this.tableFooterRowsInfo_,0,undefined,undefined);if(this.tableFooterRowsInfo_.length){this.$.body.classList.add('has-footer');}else{this.$.body.classList.remove('has-footer');}
-this.footerDirty_=false;}},appendNewElement_:function(parent,tagName){var element=parent.ownerDocument.createElement(tagName);parent.appendChild(element);return element;},getExpandedForTableRow:function(userRow){this.rebuildIfNeeded_();var rowInfo=this.tableRowsInfo_.get(userRow);if(rowInfo===undefined)
-throw new Error('Row has not been seen, must expand its parents');return rowInfo.isExpanded;},setExpandedForTableRow:function(userRow,expanded){this.rebuildIfNeeded_();var rowInfo=this.tableRowsInfo_.get(userRow);if(rowInfo===undefined)
-throw new Error('Row has not been seen, must expand its parents');return this.setExpandedForUserRow_(this.$.body,this.tableRowsInfo_,userRow,expanded);},setExpandedForUserRow_:function(tableSection,rowInfoMap,userRow,expanded){this.rebuildIfNeeded_();var rowInfo=rowInfoMap.get(userRow);if(rowInfo===undefined)
-throw new Error('Row has not been seen, must expand its parents');rowInfo.isExpanded=!!expanded;if(rowInfo.htmlNode===undefined)
-return;if(rowInfo.htmlNode.parentElement!==tableSection)
-return;var expandButton=rowInfo.htmlNode.querySelector('expand-button');if(rowInfo.isExpanded){expandButton.classList.add('button-expanded');var lastAddedRow=rowInfo.htmlNode;if(rowInfo.userRow[this.subRowsPropertyName_]){this.generateTableRowNodes_(tableSection,rowInfo.userRow[this.subRowsPropertyName_],rowInfoMap,rowInfo.indentation+1,lastAddedRow,rowInfo);}}else{expandButton.classList.remove('button-expanded');this.removeSubNodes_(tableSection,rowInfo,rowInfoMap);}
-this.maybeUpdateSelectedRow_();},get selectionMode(){return this.selectionMode_;},set selectionMode(selectionMode){if(!tr.b.dictionaryContainsValue(SelectionMode,selectionMode))
-throw new Error('Invalid selection mode '+selectionMode);this.rebuildIfNeeded_();this.selectionMode_=selectionMode;this.didSelectionStateChange_();},get rowHighlightStyle(){return this.rowHighlightStyle_;},set rowHighlightStyle(rowHighlightStyle){if(!tr.b.dictionaryContainsValue(HighlightStyle,rowHighlightStyle))
-throw new Error('Invalid row highlight style '+rowHighlightStyle);this.rebuildIfNeeded_();this.rowHighlightStyle_=rowHighlightStyle;this.didSelectionStateChange_();},get resolvedRowHighlightStyle(){if(this.rowHighlightStyle_!==HighlightStyle.DEFAULT)
-return this.rowHighlightStyle_;switch(this.selectionMode_){case SelectionMode.NONE:return HighlightStyle.NONE;case SelectionMode.ROW:return HighlightStyle.DARK;case SelectionMode.CELL:return HighlightStyle.LIGHT;default:throw new Error('Invalid selection mode '+selectionMode);}},get cellHighlightStyle(){return this.cellHighlightStyle_;},set cellHighlightStyle(cellHighlightStyle){if(!tr.b.dictionaryContainsValue(HighlightStyle,cellHighlightStyle))
-throw new Error('Invalid cell highlight style '+cellHighlightStyle);this.rebuildIfNeeded_();this.cellHighlightStyle_=cellHighlightStyle;this.didSelectionStateChange_();},get resolvedCellHighlightStyle(){if(this.cellHighlightStyle_!==HighlightStyle.DEFAULT)
-return this.cellHighlightStyle_;switch(this.selectionMode_){case SelectionMode.NONE:case SelectionMode.ROW:return HighlightStyle.NONE;case SelectionMode.CELL:return HighlightStyle.DARK;default:throw new Error('Invalid selection mode '+selectionMode);}},setHighlightStyle_:function(highlightAttribute,resolvedHighlightStyle){switch(resolvedHighlightStyle){case HighlightStyle.NONE:this.$.body.removeAttribute(highlightAttribute);break;case HighlightStyle.LIGHT:this.$.body.setAttribute(highlightAttribute,'light');break;case HighlightStyle.DARK:this.$.body.setAttribute(highlightAttribute,'dark');break;default:throw new Error('Invalid resolved highlight style '+
-resolvedHighlightStyle);}},didSelectionStateChange_:function(){this.setHighlightStyle_('row-highlight-style',this.resolvedRowHighlightStyle);this.setHighlightStyle_('cell-highlight-style',this.resolvedCellHighlightStyle);for(var i=0;i<this.$.body.children.length;i++)
-this.updateTabIndexForTableRowNode_(this.$.body.children[i]);this.maybeUpdateSelectedRow_();},maybeUpdateSelectedRow_:function(){if(this.selectedTableRowInfo_===undefined)
-return;if(this.selectionMode_===SelectionMode.NONE){this.removeSelectedState_();this.selectedTableRowInfo_=undefined;return;}
-function isVisible(rowInfo){if(!rowInfo.htmlNode)
-return false;return!!rowInfo.htmlNode.parentElement;}
-if(isVisible(this.selectedTableRowInfo_)){this.updateSelectedState_();return;}
-this.removeSelectedState_();var curRowInfo=this.selectedTableRowInfo_;while(curRowInfo&&!isVisible(curRowInfo))
-curRowInfo=curRowInfo.parentRowInfo;this.selectedTableRowInfo_=curRowInfo;if(this.selectedTableRowInfo_)
-this.updateSelectedState_();},didTableRowInfoGetClicked_:function(rowInfo,columnIndex){switch(this.selectionMode_){case SelectionMode.NONE:return;case SelectionMode.CELL:if(!this.doesColumnIndexSupportSelection(columnIndex))
-return;case SelectionMode.ROW:if(this.selectedTableRowInfo_!==rowInfo)
-this.selectedTableRow=rowInfo.userRow;if(this.selectedColumnIndex!==columnIndex)
-this.selectedColumnIndex=columnIndex;}},get selectedTableRow(){if(!this.selectedTableRowInfo_)
-return undefined;return this.selectedTableRowInfo_.userRow;},set selectedTableRow(userRow){this.rebuildIfNeeded_();if(this.selectionMode_===SelectionMode.NONE)
-throw new Error('Selection is off.');var rowInfo;if(userRow===undefined){rowInfo=undefined;}else{rowInfo=this.tableRowsInfo_.get(userRow);if(!rowInfo)
-throw new Error('Row has not been seen, must expand its parents.');}
-var e=this.prepareToChangeSelection_();this.selectedTableRowInfo_=rowInfo;if(this.selectedTableRowInfo_===undefined){this.selectedColumnIndex_=undefined;this.removeSelectedState_();}else{switch(this.selectionMode_){case SelectionMode.ROW:this.selectedColumnIndex_=undefined;break;case SelectionMode.CELL:if(this.selectedColumnIndex_===undefined){var i=this.getFirstSelectableColumnIndex_();if(i==-1)
-throw new Error('Cannot find a selectable column.');this.selectedColumnIndex_=i;}
-break;default:throw new Error('Invalid selection mode '+this.selectionMode_);}
-this.updateSelectedState_();}
-this.dispatchEvent(e);},updateTabIndexForTableRowNode_:function(row){if(this.selectionMode_===SelectionMode.ROW)
-row.tabIndex=0;else
-row.removeAttribute('tabIndex');var enableCellTab=this.selectionMode_===SelectionMode.CELL;for(var i=0;i<this.tableColumns_.length;i++){var cell=row.children[i];if(enableCellTab&&this.doesColumnIndexSupportSelection(i))
-cell.tabIndex=0;else
-cell.removeAttribute('tabIndex');}},prepareToChangeSelection_:function(){var e=new tr.b.Event('selection-changed');var previousSelectedRowInfo=this.selectedTableRowInfo_;if(previousSelectedRowInfo)
-e.previousSelectedTableRow=previousSelectedRowInfo.userRow;else
-e.previousSelectedTableRow=undefined;this.removeSelectedState_();return e;},removeSelectedState_:function(){this.setSelectedState_(false);},updateSelectedState_:function(){this.setSelectedState_(true);},setSelectedState_:function(select){if(this.selectedTableRowInfo_===undefined)
-return;var rowNode=this.selectedTableRowInfo_.htmlNode;if(select)
-rowNode.setAttribute('selected',true);else
-rowNode.removeAttribute('selected');var cellNode=rowNode.children[this.selectedColumnIndex_];if(!cellNode)
-return;if(select)
-cellNode.setAttribute('selected',true);else
-cellNode.removeAttribute('selected');},doesColumnIndexSupportSelection:function(columnIndex){var columnInfo=this.tableColumns_[columnIndex];var scs=columnInfo.supportsCellSelection;if(scs===false)
-return false;return true;},getFirstSelectableColumnIndex_:function(){for(var i=0;i<this.tableColumns_.length;i++){if(this.doesColumnIndexSupportSelection(i))
-return i;}
-return-1;},getSelectableNodeGivenTableRowNode_:function(htmlNode){switch(this.selectionMode_){case SelectionMode.ROW:return htmlNode;case SelectionMode.CELL:return htmlNode.children[this.selectedColumnIndex_];default:throw new Error('Invalid selection mode '+this.selectionMode_);}},get selectedColumnIndex(){if(this.selectionMode_!==SelectionMode.CELL)
-return undefined;return this.selectedColumnIndex_;},set selectedColumnIndex(selectedColumnIndex){this.rebuildIfNeeded_();if(this.selectionMode_===SelectionMode.NONE)
-throw new Error('Selection is off.');if(selectedColumnIndex<0||selectedColumnIndex>=this.tableColumns_.length)
-throw new Error('Invalid index');if(!this.doesColumnIndexSupportSelection(selectedColumnIndex))
-throw new Error('Selection is not supported on this column');var e=this.prepareToChangeSelection_();this.selectedColumnIndex_=selectedColumnIndex;if(this.selectedColumnIndex_===undefined)
-this.selectedTableRowInfo_=undefined;this.updateSelectedState_();this.dispatchEvent(e);},onKeyDown_:function(e){if(this.selectionMode_===SelectionMode.NONE)
-return;if(this.selectedTableRowInfo_===undefined)
-return;var code_to_command_names={37:'ARROW_LEFT',38:'ARROW_UP',39:'ARROW_RIGHT',40:'ARROW_DOWN'};var cmdName=code_to_command_names[e.keyCode];if(cmdName===undefined)
-return;e.stopPropagation();e.preventDefault();this.performKeyCommand_(cmdName);},performKeyCommand_:function(cmdName){this.rebuildIfNeeded_();var rowInfo=this.selectedTableRowInfo_;var htmlNode=rowInfo.htmlNode;if(cmdName==='ARROW_UP'){var prev=htmlNode.previousElementSibling;if(prev){tr.ui.b.scrollIntoViewIfNeeded(prev);this.selectedTableRow=prev.rowInfo.userRow;this.focusSelected_();return;}
-return;}
-if(cmdName==='ARROW_DOWN'){var next=htmlNode.nextElementSibling;if(next){tr.ui.b.scrollIntoViewIfNeeded(next);this.selectedTableRow=next.rowInfo.userRow;this.focusSelected_();return;}
-return;}
-if(cmdName==='ARROW_RIGHT'){switch(this.selectionMode_){case SelectionMode.ROW:if(rowInfo.userRow[this.subRowsPropertyName_]===undefined)
-return;if(rowInfo.userRow[this.subRowsPropertyName_].length===0)
-return;if(!rowInfo.isExpanded)
-this.setExpandedForTableRow(rowInfo.userRow,true);this.selectedTableRow=rowInfo.userRow[this.subRowsPropertyName_][0];this.focusSelected_();return;case SelectionMode.CELL:var newIndex=this.selectedColumnIndex_+1;if(newIndex>=this.tableColumns_.length)
-return;if(!this.doesColumnIndexSupportSelection(newIndex))
-return;this.selectedColumnIndex=newIndex;this.focusSelected_();return;default:throw new Error('Invalid selection mode '+this.selectionMode_);}}
-if(cmdName==='ARROW_LEFT'){switch(this.selectionMode_){case SelectionMode.ROW:if(rowInfo.isExpanded){this.setExpandedForTableRow(rowInfo.userRow,false);this.focusSelected_();return;}
-var parentRowInfo=rowInfo.parentRowInfo;if(parentRowInfo){this.selectedTableRow=parentRowInfo.userRow;this.focusSelected_();return;}
-return;case SelectionMode.CELL:var newIndex=this.selectedColumnIndex_-1;if(newIndex<0)
-return;if(!this.doesColumnIndexSupportSelection(newIndex))
-return;this.selectedColumnIndex=newIndex;this.focusSelected_();return;default:throw new Error('Invalid selection mode '+this.selectionMode_);}}
-throw new Error('Unrecognized command '+cmdName);},focusSelected_:function(){if(!this.selectedTableRowInfo_)
-return;var node=this.getSelectableNodeGivenTableRowNode_(this.selectedTableRowInfo_.htmlNode);node.focus();},dispatchSortingChangedEvent_:function(){var e=new tr.b.Event('sort-column-changed');e.sortColumnIndex=this.sortColumnIndex_;e.sortDescending=this.sortDescending_;this.dispatchEvent(e);}});})();'use strict';Polymer('tr-ui-b-table-header-cell',{created:function(){this.tapCallback_=undefined;this.cellTitle_='';},set cellTitle(value){this.cellTitle_=value;var titleNode=tr.ui.b.asHTMLOrTextNode(this.cellTitle_,this.ownerDocument);this.$.title.innerText='';this.$.title.appendChild(titleNode);},get cellTitle(){return this.cellTitle_;},clearSideContent:function(){this.$.side.textContent='';},set sideContent(content){this.$.side.textContent=content;},get sideContent(){return this.$.side.textContent;},set tapCallback(callback){this.style.cursor='pointer';this.tapCallback_=callback;},get tapCallback(){return this.tapCallback_;},onTap_:function(){if(this.tapCallback_)
-this.tapCallback_();}});'use strict';tr.exportTo('tr.ui.b',function(){Object.observe(Polymer.elements,clearPolymerElementCaches);var elementsByName=undefined;var elementsThatExtend=undefined;var elementSubclasses=undefined;function clearPolymerElementCaches(){elementsByName={};elementsThatExtend=undefined;elementSubclasses={};}
-function buildElementMapsIfNeeded(){if(elementsThatExtend!==undefined&&elementsByName!==undefined)
-return;elementsByName={};elementsThatExtend={};Polymer.elements.forEach(function(element){if(elementsByName[element.name])
-throw new Error('Something is strange: dupe polymer element names');elementsByName[element.name]=element;if(element.extends){if(elementsThatExtend[element.extends]===undefined)
-elementsThatExtend[element.extends]=[];elementsThatExtend[element.extends].push(element.name);}});}
-function getPolymerElementNamed(tagName){buildElementMapsIfNeeded();return elementsByName[tagName];}
-function getPolymerElementsThatSubclass(tagName){if(Polymer.waitingFor().length){throw new Error('There are unresolved polymer elements. '+'Wait until Polymer.whenReady');}
-buildElementMapsIfNeeded();var element=getPolymerElementNamed(tagName);if(!element)
-throw new Error(tagName+' is not a polymer element');if(elementSubclasses===undefined)
-elementSubclasses={};if(elementSubclasses[tagName]===undefined){var immediateSubElements=elementsThatExtend[element.name];var allSubElements=[];if(immediateSubElements!==undefined&&immediateSubElements.length){immediateSubElements.forEach(function(subElement){allSubElements.push(subElement);allSubElements.push.apply(allSubElements,getPolymerElementsThatSubclass(subElement));});}
-elementSubclasses[tagName]=allSubElements;}
-return elementSubclasses[tagName];}
-return{getPolymerElementNamed:getPolymerElementNamed,getPolymerElementsThatSubclass:getPolymerElementsThatSubclass};});'use strict';tr.exportTo('tr.ui.units',function(){function createScalarSpan(value,opt_config){if(value===undefined)
-return'';var config=opt_config||{};var ownerDocument=config.ownerDocument||document;var span=ownerDocument.createElement('tr-ui-u-scalar-span');span.value=value;return span;}
-tr.b.u.Units.addEventListener('display-mode-changed',function(e){var subclassNames=tr.ui.b.getPolymerElementsThatSubclass('tr-ui-u-scalar-span');var isSubclass={};subclassNames.forEach(function(n){isSubclass[n.toUpperCase()]=true;});var m=tr.b.findDeepElementsMatchingPredicate(document.body,function(el){return isSubclass[el.tagName];});m.forEach(function(el){el.updateContent_();});});return{createScalarSpan:createScalarSpan};});'use strict';Polymer('tr-ui-u-scalar-span',{ready:function(){this.value_=undefined;this.unit_=undefined;this.warning_=undefined;this.percentage_=undefined;this.isDelta_=false;},set contentTextDecoration(deco){this.$.content.style.textDecoration=deco;},get value(){return this.value_;},set value(value){if(value instanceof tr.b.u.Scalar){this.value_=value.value;this.unit_=value.unit;}else{this.value_=value;}
-this.updateContent_();},get unit(){return this.unit_;},set unit(unit){this.unit_=unit;this.updateContent_();},setValueAndUnit:function(value,unit){this.value_=value;this.unit_=unit;this.updateContent_();},get percentage(){return this.percentage_;},set percentage(percentage){this.percentage_=percentage;this.updateSparkline_();},get rightAlign(){return this.$.content.classList.contains('right-align');},set rightAlign(rightAlign){if(rightAlign)
-this.$.content.classList.add('right-align');else
-this.$.content.classList.remove('right-align');},get isDelta(){return this.isDelta_;},set isDelta(isDelta){this.isDelta_=isDelta;this.updateContent_();},updateSparkline_:function(){if(this.percentage_===undefined){this.$.sparkline.style.display='none';this.$.sparkline.style.width='0';}else{this.$.sparkline.style.display='block';this.$.sparkline.style.width=(this.percentage_*100)+'%';}},updateContent_:function(){if(this.unit_===undefined){this.$.content.textContent='';return;}
-var content=this.unit_.format(this.value);if(this.isDelta_){if(this.value>0){content='+'+content;}else if(this.value===0){var PLUS_MINUS_SIGN=String.fromCharCode(177);content=PLUS_MINUS_SIGN+content;}}
-this.$.content.textContent=content;},get warning(){return this.warning_;},set warning(warning){this.warning_=warning;var warningEl=this.$.warning;if(this.warning_){warningEl.title=warning;warningEl.style.display='';}else{warningEl.title='';warningEl.style.display='none';}}});'use strict';tr.exportTo('tr.ui.units',function(){function createTimeDurationSpan(duration,opt_config){if(duration===undefined)
-return'';var config=opt_config||{};var ownerDocument=config.ownerDocument||document;var span=ownerDocument.createElement('tr-ui-u-time-duration-span');span.setValueAndUnit(duration,tr.b.u.Units.timeDurationInMs);if(config.total)
-span.percentage=duration/config.total;span.duration=duration;if(config.rightAlign)
-span.rightAlign=true;return span;}
-return{createTimeDurationSpan:createTimeDurationSpan};});'use strict';Polymer('tr-ui-u-time-duration-span',{get duration(){return this.value;},set duration(duration){if(duration instanceof tr.b.u.TimeDuration){this.value=duration;return;}
-this.setValueAndUnit(duration,tr.b.u.Units.timeDurationInMs);}});'use strict';tr.exportTo('tr.ui.units',function(){function createTimeStampSpan(timestamp,opt_config){if(timestamp===undefined)
-return'';var config=opt_config||{};var ownerDocument=config.ownerDocument||document;var span=ownerDocument.createElement('tr-ui-u-time-stamp-span');span.timestamp=timestamp;return span;}
-return{createTimeStampSpan:createTimeStampSpan};});'use strict';Polymer('tr-ui-u-time-stamp-span',{get timestamp(){return this.value;},set timestamp(timestamp){if(timestamp instanceof tr.b.u.TimeStamp){this.value=timestamp;return;}
-this.setValueAndUnit(timestamp,tr.b.u.Units.timeStampInMs);}});'use strict';function isTable(object){if(!(object instanceof Array)||(object.length<2))return false;for(var colName in object[0]){if(typeof colName!=='string')return false;}
-for(var i=0;i<object.length;++i){if(!(object[i]instanceof Object))return false;for(var colName in object[i]){if(i&&(object[0][colName]===undefined))return false;var cellType=typeof object[i][colName];if(cellType!=='string'&&cellType!='number')return false;}
-if(i){for(var colName in object[0]){if(object[i][colName]===undefined)return false;}}}
-return true;}
-Polymer('tr-ui-a-generic-object-view',{ready:function(){this.object_=undefined;},get object(){return this.object_;},set object(object){this.object_=object;this.updateContents_();},updateContents_:function(){this.$.content.textContent='';this.appendElementsForType_('',this.object_,0,0,5,'');},appendElementsForType_:function(label,object,indent,depth,maxDepth,suffix){if(depth>maxDepth){this.appendSimpleText_(label,indent,'<recursion limit reached>',suffix);return;}
-if(object===undefined){this.appendSimpleText_(label,indent,'undefined',suffix);return;}
-if(object===null){this.appendSimpleText_(label,indent,'null',suffix);return;}
-if(!(object instanceof Object)){var type=typeof object;if(type=='string'){var objectReplaced=false;if((object[0]=='{'&&object[object.length-1]=='}')||(object[0]=='['&&object[object.length-1]==']')){try{object=JSON.parse(object);objectReplaced=true;}catch(e){}}
-if(!objectReplaced){if(object.indexOf('\n')!==-1){var lines=object.split('\n');lines.forEach(function(line,i){var text,ioff,ll,ss;if(i==0){text='"'+line;ioff=0;ll=label;ss='';}else if(i<lines.length-1){text=line;ioff=1;ll='';ss='';}else{text=line+'"';ioff=1;ll='';ss=suffix;}
-var el=this.appendSimpleText_(ll,indent+ioff*label.length+ioff,text,ss);el.style.whiteSpace='pre';return el;},this);return;}else{this.appendSimpleText_(label,indent,'"'+object+'"',suffix);return;}}
-else{}}else{return this.appendSimpleText_(label,indent,object,suffix);}}
-if(object instanceof tr.model.ObjectSnapshot){var link=document.createElement('tr-ui-a-analysis-link');link.selection=new tr.model.EventSet(object);this.appendElementWithLabel_(label,indent,link,suffix);return;}
-if(object instanceof tr.model.ObjectInstance){var link=document.createElement('tr-ui-a-analysis-link');link.selection=new tr.model.EventSet(object);this.appendElementWithLabel_(label,indent,link,suffix);return;}
-if(object instanceof tr.b.Rect){this.appendSimpleText_(label,indent,object.toString(),suffix);return;}
-if(object instanceof tr.b.u.Scalar){var el=this.ownerDocument.createElement('tr-ui-u-scalar-span');el.value=object;this.appendElementWithLabel_(label,indent,el,suffix);return;}
-if(object instanceof Array){this.appendElementsForArray_(label,object,indent,depth,maxDepth,suffix);return;}
-this.appendElementsForObject_(label,object,indent,depth,maxDepth,suffix);},appendElementsForArray_:function(label,object,indent,depth,maxDepth,suffix){if(object.length==0){this.appendSimpleText_(label,indent,'[]',suffix);return;}
-if(isTable(object)){var table=document.createElement('tr-ui-b-table');var columns=[];tr.b.iterItems(object[0],function(colName){columns.push({title:colName,value:function(row){return row[colName];}});});table.tableColumns=columns;table.tableRows=object;this.appendElementWithLabel_(label,indent,table,suffix);table.rebuild();return;}
-this.appendElementsForType_(label+'[',object[0],indent,depth+1,maxDepth,object.length>1?',':']'+suffix);for(var i=1;i<object.length;i++){this.appendElementsForType_('',object[i],indent+label.length+1,depth+1,maxDepth,i<object.length-1?',':']'+suffix);}
-return;},appendElementsForObject_:function(label,object,indent,depth,maxDepth,suffix){var keys=tr.b.dictionaryKeys(object);if(keys.length==0){this.appendSimpleText_(label,indent,'{}',suffix);return;}
-this.appendElementsForType_(label+'{'+keys[0]+': ',object[keys[0]],indent,depth,maxDepth,keys.length>1?',':'}'+suffix);for(var i=1;i<keys.length;i++){this.appendElementsForType_(keys[i]+': ',object[keys[i]],indent+label.length+1,depth+1,maxDepth,i<keys.length-1?',':'}'+suffix);}},appendElementWithLabel_:function(label,indent,dataElement,suffix){var row=document.createElement('div');var indentSpan=document.createElement('span');indentSpan.style.whiteSpace='pre';for(var i=0;i<indent;i++)
-indentSpan.textContent+=' ';row.appendChild(indentSpan);var labelSpan=document.createElement('span');labelSpan.textContent=label;row.appendChild(labelSpan);row.appendChild(dataElement);var suffixSpan=document.createElement('span');suffixSpan.textContent=suffix;row.appendChild(suffixSpan);row.dataElement=dataElement;this.$.content.appendChild(row);},appendSimpleText_:function(label,indent,text,suffix){var el=this.ownerDocument.createElement('span');el.textContent=text;this.appendElementWithLabel_(label,indent,el,suffix);return el;}});'use strict';Polymer('tr-ui-a-generic-object-view-with-label',{ready:function(){this.labelEl_=document.createElement('div');this.genericObjectView_=document.createElement('tr-ui-a-generic-object-view');this.shadowRoot.appendChild(this.labelEl_);this.shadowRoot.appendChild(this.genericObjectView_);},get label(){return this.labelEl_.textContent;},set label(label){this.labelEl_.textContent=label;},get object(){return this.genericObjectView_.object;},set object(object){this.genericObjectView_.object=object;}});'use strict';Polymer('tr-ui-a-stack-frame',{ready:function(){this.stackFrame_=undefined;this.$.table.tableColumns=[];this.$.table.showHeader=true;},get stackFrame(){return this.stackFrame_;},set stackFrame(stackFrame){var table=this.$.table;this.stackFrame_=stackFrame;if(stackFrame===undefined){table.tableColumns=[];table.tableRows=[];table.rebuild();return;}
-var hasName=false;var hasTitle=false;table.tableRows=stackFrame.stackTrace;table.tableRows.forEach(function(row){hasName|=row.name!==undefined;hasTitle|=row.title!==undefined;});var cols=[];if(hasName){cols.push({title:'Name',value:function(row){return row.name;}});}
-if(hasTitle){cols.push({title:'Title',value:function(row){return row.title;}});}
-table.tableColumns=cols;table.rebuild();},tableForTesting:function(){return this.$.table;}});'use strict';Polymer('tr-ui-a-single-event-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=[{title:'Label',value:function(row){return row.name;},width:'150px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];this.$.table.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single slices');this.setSelectionWithoutErrorChecks(selection);},setSelectionWithoutErrorChecks:function(selection){this.currentSelection_=selection;this.updateContents_();},getEventRows_:function(event){var rows=[];if(event.error)
-rows.push({name:'Error',value:event.error});if(event.title)
-rows.push({name:'Title',value:event.title});if(event.category)
-rows.push({name:'Category',value:event.category});if(event.model!==undefined){var ufc=event.model.getUserFriendlyCategoryFromEvent(event);if(ufc!==undefined)
-rows.push({name:'User Friendly Category',value:ufc});}
-if(event.name)
-rows.push({name:'Name',value:event.name});var startEl=document.createElement('tr-ui-u-time-stamp-span');startEl.timestamp=event.start;rows.push({name:'Start',value:startEl});if(event.duration){var wallDurationEl=document.createElement('tr-ui-u-time-duration-span');wallDurationEl.duration=event.duration;rows.push({name:'Wall Duration',value:wallDurationEl});}
-if(event.cpuDuration){var cpuDurationEl=document.createElement('tr-ui-u-time-duration-span');cpuDurationEl.duration=event.cpuDuration;rows.push({name:'CPU Duration',value:cpuDurationEl});}
-if(event.subSlices!==undefined&&event.subSlices.length!==0){if(event.selfTime){var selfTimeEl=document.createElement('tr-ui-u-time-duration-span');selfTimeEl.duration=event.selfTime;rows.push({name:'Self Time',value:selfTimeEl});}
-if(event.cpuSelfTime){var cpuSelfTimeEl=document.createElement('tr-ui-u-time-duration-span');cpuSelfTimeEl.duration=event.cpuSelfTime;if(event.cpuSelfTime>event.selfTime){cpuSelfTimeEl.warning=' Note that CPU Self Time is larger than Self Time. '+'This is a known limitation of this system, which occurs '+'due to several subslices, rounding issues, and imprecise '+'time at which we get cpu- and real-time.';}
-rows.push({name:'CPU Self Time',value:cpuSelfTimeEl});}}
-if(event.durationInUserTime){var durationInUserTimeEl=document.createElement('tr-ui-u-time-duration-span');durationInUserTimeEl.duration=event.durationInUserTime;rows.push({name:'Duration (U)',value:durationInUserTimeEl});}
-function createStackFrameEl(sf){var sfEl=document.createElement('tr-ui-a-stack-frame');sfEl.stackFrame=sf;return sfEl;}
-if(event.startStackFrame&&event.endStackFrame){if(event.startStackFrame===event.endStackFrame){rows.push({name:'Start+End Stack Trace',value:createStackFrameEl(event.startStackFrame)});}else{rows.push({name:'Start Stack Trace',value:createStackFrameEl(event.startStackFrame)});rows.push({name:'End Stack Trace',value:createStackFrameEl(event.endStackFrame)});}}else if(event.startStackFrame){rows.push({name:'Start Stack Trace',value:createStackFrameEl(event.startStackFrame)});}else if(event.endStackFrame){rows.push({name:'End Stack Trace',value:createStackFrameEl(event.endStackFrame)});}
-if(event.info){var descriptionEl=tr.ui.b.createDiv({textContent:event.info.description,maxWidth:'300px'});rows.push({name:'Description',value:descriptionEl});if(event.info.docLinks){event.info.docLinks.forEach(function(linkObject){var linkEl=document.createElement('a');linkEl.target='_blank';linkEl.href=linkObject.href;linkEl.textContent=linkObject.textContent;rows.push({name:linkObject.label,value:linkEl});});}}
-if(event.associatedAlerts.length){var alertSubRows=[];event.associatedAlerts.forEach(function(alert){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(alert);},alert.info.description);alertSubRows.push({name:alert.title,value:linkEl});});rows.push({name:'Alerts',value:'',isExpanded:true,subRows:alertSubRows});}
-return rows;},addArgsToRows_:function(rows,args){var n=0;for(var argName in args){n+=1;}
-if(n>0){var subRows=[];for(var argName in args){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=args[argName];subRows.push({name:argName,value:argView});}
-rows.push({name:'Args',value:'',isExpanded:true,subRows:subRows});}
-return rows;},updateContents_:function(){if(this.currentSelection_===undefined){this.$.table.rows=[];this.$.table.rebuild();return;}
-var event=this.currentSelection_[0];var rows=this.getEventRows_(event);if(event.argsStripped)
-rows.push({name:'Args',value:'Stripped'});else
-this.addArgsToRows_(rows,event.args);this.$.table.tableRows=rows;this.$.table.rebuild();}});'use strict';tr.exportTo('tr.ui.analysis',function(){var FLOW_IN=0x1;var FLOW_OUT=0x2;var FLOW_IN_OUT=FLOW_IN|FLOW_OUT;function FlowClassifier(){this.numEvents_=0;this.eventsByGUID_={};}
-FlowClassifier.prototype={getFS_:function(event){var fs=this.eventsByGUID_[event.guid];if(fs===undefined){this.numEvents_++;fs={state:0,event:event};this.eventsByGUID_[event.guid]=fs;}
-return fs;},addInFlow:function(event){var fs=this.getFS_(event);fs.state|=FLOW_IN;return event;},addOutFlow:function(event){var fs=this.getFS_(event);fs.state|=FLOW_OUT;return event;},hasEvents:function(){return this.numEvents_>0;},get inFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_IN)
-selection.push(fs.event);}
-return selection;},get outFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_OUT)
-selection.push(fs.event);}
-return selection;},get internalFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_IN_OUT)
-selection.push(fs.event);}
-return selection;}};return{FlowClassifier:FlowClassifier};});'use strict';Polymer('tr-ui-a-related-events',{ready:function(){this.eventGroups_=[];this.cancelFunctions_=[];this.$.table.tableColumns=[{title:'Event(s)',value:function(row){var typeEl=document.createElement('span');typeEl.innerText=row.type;if(row.tooltip)
-typeEl.title=row.tooltip;return typeEl;},width:'150px'},{title:'Link',width:'100%',value:function(row){var linkEl=document.createElement('tr-ui-a-analysis-link');if(row.name)
-linkEl.setSelectionAndContent(row.selection,row.name);else
-linkEl.selection=row.selection;return linkEl;}}];},hasRelatedEvents:function(){return(this.eventGroups_&&this.eventGroups_.length>0);},setRelatedEvents:function(eventSet){this.cancelAllTasks_();this.eventGroups_=[];this.addConnectedFlows_(eventSet);this.addConnectedEvents_(eventSet);this.addOverlappingSamples_(eventSet);this.updateContents_();},addConnectedFlows_:function(eventSet){var classifier=new tr.ui.analysis.FlowClassifier();eventSet.forEach(function(slice){if(slice.inFlowEvents){slice.inFlowEvents.forEach(function(flow){classifier.addInFlow(flow);});}
-if(slice.outFlowEvents){slice.outFlowEvents.forEach(function(flow){classifier.addOutFlow(flow);});}});if(!classifier.hasEvents())
-return;var addToEventGroups=function(type,flowEvent){this.eventGroups_.push({type:type,selection:new tr.model.EventSet(flowEvent),name:flowEvent.title});};classifier.inFlowEvents.forEach(addToEventGroups.bind(this,'Incoming flow'));classifier.outFlowEvents.forEach(addToEventGroups.bind(this,'Outgoing flow'));classifier.internalFlowEvents.forEach(addToEventGroups.bind(this,'Internal flow'));},cancelAllTasks_:function(){this.cancelFunctions_.forEach(function(cancelFunction){cancelFunction();});this.cancelFunctions_=[];},addConnectedEvents_:function(eventSet){this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('Preceding events','Add all events that have led to the selected one(s), connected by '+'flow arrows or by call stack.',eventSet,function(event,events){this.addInFlowEvents_(event,events);this.addAncestors_(event,events);if(event.startSlice)
-events.push(event.startSlice);}.bind(this)));this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('Following events','Add all events that have been caused by the selected one(s), '+'connected by flow arrows or by call stack.',eventSet,function(event,events){this.addOutFlowEvents_(event,events);this.addDescendents_(event,events);if(event.endSlice)
-events.push(event.endSlice);}.bind(this)));this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('All connected events','Add all events connected to the selected one(s) by flow arrows or '+'by call stack.',eventSet,function(event,events){this.addInFlowEvents_(event,events);this.addOutFlowEvents_(event,events);this.addAncestors_(event,events);this.addDescendents_(event,events);if(event.startSlice)
-events.push(event.startSlice);if(event.endSlice)
-events.push(event.endSlice);}.bind(this)));},createEventsLinkIfNeeded_:function(title,tooltip,events,addFunction){events=new tr.model.EventSet(events);var lengthBefore=events.length;var task;var isCanceled=false;function addEventsUntilTimeout(startingIndex){if(isCanceled)
-return;var startingTime=window.performance.now();while(startingIndex<events.length){addFunction(events[startingIndex],events);startingIndex++;if(window.performance.now()-startingTime>8){var newTask=new tr.b.Task(addEventsUntilTimeout.bind(this,startingIndex),this);task.after(newTask);task=newTask;return;}}
-if(lengthBefore===events.length)
-return;this.eventGroups_.push({type:title,tooltip:tooltip,selection:events});this.updateContents_();};function cancelTask(){isCanceled=true;}
-task=new tr.b.Task(addEventsUntilTimeout.bind(this,0),this);tr.b.Task.RunWhenIdle(task);return cancelTask;},addInFlowEvents_:function(event,eventSet){if(!event.inFlowEvents)
-return;event.inFlowEvents.forEach(function(e){eventSet.push(e);});},addOutFlowEvents_:function(event,eventSet){if(!event.outFlowEvents)
-return;event.outFlowEvents.forEach(function(e){eventSet.push(e);});},addAncestors_:function(event,eventSet){if(!event.iterateAllAncestors)
-return;event.iterateAllAncestors(function(e){eventSet.push(e);});},addDescendents_:function(event,eventSet){if(!event.iterateAllDescendents)
-return;event.iterateAllDescendents(function(e){eventSet.push(e);});},addOverlappingSamples_:function(eventSet){var samples=new tr.model.EventSet;eventSet.forEach(function(slice){if(!slice.parentContainer||!slice.parentContainer.samples)
-return;var candidates=slice.parentContainer.samples;var range=tr.b.Range.fromExplicitRange(slice.start,slice.start+slice.duration);var filteredSamples=range.filterArray(candidates,function(value){return value.start;});filteredSamples.forEach(function(sample){samples.push(sample);});}.bind(this));if(samples.length>0){this.eventGroups_.push({type:'Overlapping samples',tooltip:'All samples overlapping the selected slice(s).',selection:samples});}},updateContents_:function(){var table=this.$.table;if(this.eventGroups_===undefined)
-table.tableRows=[];else
-table.tableRows=this.eventGroups_.slice();table.rebuild();}});'use strict';Polymer('tr-ui-a-single-thread-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){this.$.content.selection=selection;this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents())
-this.$.relatedEvents.style.display='';else
-this.$.relatedEvents.style.display='none';}});'use strict';Polymer('tr-ui-a-selection-summary-table',{created:function(){this.selection_=new tr.b.Range();},ready:function(){this.$.table.showHeader=false;this.$.table.tableColumns=[{title:'Name',value:function(row){return row.title;},width:'350px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},updateContents_:function(){var selection=this.selection_;var rows=[];var hasRange;if(this.selection_&&(!selection.bounds.isEmpty))
-hasRange=true;else
-hasRange=false;var timeSpanConfig={ownerDocument:this.ownerDocument};rows.push({title:'Selection start',value:hasRange?tr.ui.units.createTimeStampSpan(selection.bounds.min,timeSpanConfig):'<empty>'});rows.push({title:'Selection extent',value:hasRange?tr.ui.units.createTimeDurationSpan(selection.bounds.range,timeSpanConfig):'<empty>'});this.$.table.tableRows=rows;this.$.table.rebuild();}});'use strict';tr.exportTo('tr.ui.analysis',function(){function MultiEventSummary(title,events){this.title=title;this.duration_=undefined;this.selfTime_=undefined;this.events_=events;this.cpuTimesComputed_=false;this.cpuSelfTime_=undefined;this.cpuDuration_=undefined;this.maxDuration_=undefined;this.maxCpuDuration_=undefined;this.maxSelfTime_=undefined;this.maxCpuSelfTime_=undefined;this.untotallableArgs_=[];this.totalledArgs_=undefined;};MultiEventSummary.prototype={set title(title){if(title=='Totals')
-this.totalsRow=true;this.title_=title;},get title(){return this.title_;},get duration(){if(this.duration_===undefined){this.duration_=tr.b.Statistics.sum(this.events_,function(event){return event.duration;});}
-return this.duration_;},get cpuSelfTime(){this.computeCpuTimesIfNeeded_();return this.cpuSelfTime_;},get cpuDuration(){this.computeCpuTimesIfNeeded_();return this.cpuDuration_;},computeCpuTimesIfNeeded_:function(){if(this.cpuTimesComputed_)
-return;this.cpuTimesComputed_=true;var cpuSelfTime=0;var cpuDuration=0;var hasCpuData=false;for(var i=0;i<this.events_.length;i++){var event=this.events_[i];if(event.cpuDuration!==undefined){cpuDuration+=event.cpuDuration;hasCpuData=true;}
-if(event.cpuSelfTime!==undefined){cpuSelfTime+=event.cpuSelfTime;hasCpuData=true;}}
-if(hasCpuData){this.cpuDuration_=cpuDuration;this.cpuSelfTime_=cpuSelfTime;}},get selfTime(){if(this.selfTime_===undefined){this.selfTime_=0;for(var i=0;i<this.events_.length;i++){if(this.events_[i].selfTime!==undefined)
-this.selfTime_+=this.events[i].selfTime;}}
-return this.selfTime_;},get events(){return this.events_;},get numEvents(){return this.events_.length;},get numAlerts(){if(this.numAlerts_===undefined){this.numAlerts_=tr.b.Statistics.sum(this.events_,function(event){return event.associatedAlerts.length;});}
-return this.numAlerts_;},get untotallableArgs(){this.updateArgsIfNeeded_();return this.untotallableArgs_;},get totalledArgs(){this.updateArgsIfNeeded_();return this.totalledArgs_;},get maxDuration(){if(this.maxDuration_===undefined){this.maxDuration_=tr.b.Statistics.max(this.events_,function(event){return event.duration;});}
-return this.maxDuration_;},get maxCpuDuration(){if(this.maxCpuDuration_===undefined){this.maxCpuDuration_=tr.b.Statistics.max(this.events_,function(event){return event.cpuDuration;});}
-return this.maxCpuDuration_;},get maxSelfTime(){if(this.maxSelfTime_===undefined){this.maxSelfTime_=tr.b.Statistics.max(this.events_,function(event){return event.selfTime;});}
-return this.maxSelfTime_;},get maxCpuSelfTime(){if(this.maxCpuSelfTime_===undefined){this.maxCpuSelfTime_=tr.b.Statistics.max(this.events_,function(event){return event.cpuSelfTime;});}
-return this.maxCpuSelfTime_;},updateArgsIfNeeded_:function(){if(this.totalledArgs_!==undefined)
-return;var untotallableArgs={};var totalledArgs={};for(var i=0;i<this.events_.length;i++){var event=this.events_[i];for(var argName in event.args){var argVal=event.args[argName];var type=typeof argVal;if(type!=='number'){untotallableArgs[argName]=true;delete totalledArgs[argName];continue;}
-if(untotallableArgs[argName]){continue;}
-if(totalledArgs[argName]===undefined)
-totalledArgs[argName]=0;totalledArgs[argName]+=argVal;}}
-this.untotallableArgs_=tr.b.dictionaryKeys(untotallableArgs);this.totalledArgs_=totalledArgs;}};return{MultiEventSummary:MultiEventSummary};});'use strict';Polymer('tr-ui-a-multi-event-summary-table',{ready:function(){this.showTotals_=false;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;this.eventsByTitle_=undefined;},updateTableColumns_:function(rows,maxValues){var hasCpuData=false;var hasAlerts=false;rows.forEach(function(row){if(row.cpuDuration!==undefined)
-hasCpuData=true;if(row.cpuSelfTime!==undefined)
-hasCpuData=true;if(row.numAlerts)
-hasAlerts=true;});var ownerDocument=this.ownerDocument;var columns=[];columns.push({title:'Name',value:function(row){if(row.title==='Totals')
-return'Totals';var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(row.events);},row.title);return linkEl;},width:'350px',cmp:function(rowA,rowB){return rowA.title.localeCompare(rowB.title);}});if(this.eventsHaveDuration_){columns.push({title:'Wall Duration',value:function(row){return tr.ui.units.createTimeDurationSpan(row.duration,{total:row.totalsRow?undefined:maxValues.duration,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.duration-rowB.duration;}});}
-if(this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Duration',value:function(row){return tr.ui.units.createTimeDurationSpan(row.cpuDuration,{total:row.totalsRow?undefined:maxValues.cpuDuration,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuDuration-rowB.cpuDuration;}});}
-if(this.eventsHaveSubRows_&&this.eventsHaveDuration_){columns.push({title:'Self time',value:function(row){return tr.ui.units.createTimeDurationSpan(row.selfTime,{total:row.totalsRow?undefined:maxValues.selfTime,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.selfTime-rowB.selfTime;}});}
-if(this.eventsHaveSubRows_&&this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Self Time',value:function(row){return tr.ui.units.createTimeDurationSpan(row.cpuSelfTime,{total:row.totalsRow?undefined:maxValues.cpuSelfTime,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuSelfTime-rowB.cpuSelfTime;}});}
-columns.push({title:'Occurrences',value:function(row){return row.numEvents;},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.numEvents-rowB.numEvents;}});var alertsColumnIndex;if(hasAlerts){columns.push({title:'Num Alerts',value:function(row){return row.numAlerts;},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.numAlerts-rowB.numAlerts;}});alertsColumnIndex=columns.length-1;}
-var colWidthPercentage;if(columns.length==1)
-colWidthPercentage='100%';else
-colWidthPercentage=(100/(columns.length-1)).toFixed(3)+'%';for(var i=1;i<columns.length;i++)
-columns[i].width=colWidthPercentage;this.$.table.tableColumns=columns;if(hasAlerts){this.$.table.sortColumnIndex=alertsColumnIndex;this.$.table.sortDescending=true;}},configure:function(config){if(config.eventsByTitle===undefined)
-throw new Error('Required: eventsByTitle');if(config.showTotals!==undefined)
-this.showTotals_=config.showTotals;else
-this.showTotals_=true;if(config.eventsHaveDuration!==undefined)
-this.eventsHaveDuration_=config.eventsHaveDuration;else
-this.eventsHaveDuration_=true;if(config.eventsHaveSubRows!==undefined)
-this.eventsHaveSubRows_=config.eventsHaveSubRows;else
-this.eventsHaveSubRows_=true;this.eventsByTitle_=config.eventsByTitle;this.updateContents_();},get showTotals(){return this.showTotals_;},set showTotals(showTotals){this.showTotals_=showTotals;this.updateContents_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},get eventsByTitle(){return this.eventsByTitle_;},set eventsByTitle(eventsByTitle){this.eventsByTitle_=eventsByTitle;this.updateContents_();},get selectionBounds(){return this.selectionBounds_;},set selectionBounds(selectionBounds){this.selectionBounds_=selectionBounds;this.updateContents_();},updateContents_:function(){var eventsByTitle;if(this.eventsByTitle_!==undefined)
-eventsByTitle=this.eventsByTitle_;else
-eventsByTitle=[];var allEvents=[];var rows=[];tr.b.iterItems(eventsByTitle,function(title,eventsOfSingleTitle){allEvents.push.apply(allEvents,eventsOfSingleTitle);var row=new tr.ui.analysis.MultiEventSummary(title,eventsOfSingleTitle);rows.push(row);});this.updateTableColumns_(rows);this.$.table.tableRows=rows;var maxValues={duration:undefined,selfTime:undefined,cpuSelfTime:undefined,cpuDuration:undefined};if(this.eventsHaveDuration){for(var column in maxValues){maxValues[column]=tr.b.Statistics.max(rows,function(event){return event[column];});}}
-var footerRows=[];if(this.showTotals_){var multiEventSummary=new tr.ui.analysis.MultiEventSummary('Totals',allEvents);footerRows.push(multiEventSummary);}
-this.updateTableColumns_(rows,maxValues);this.$.table.tableRows=rows;this.$.table.footerRows=footerRows;this.$.table.rebuild();}});'use strict';Polymer('tr-ui-a-multi-event-details-table',{created:function(){this.selection_=undefined;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;},ready:function(){this.initTitleTable_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},updateContents_:function(){var selection=this.selection_;this.updateTitleTable_();if(this.selection_===undefined){this.$.table.tableRows=[];this.$.table.tableFooterRows=[];this.$.table.rebuild();return;}
-var summary=new tr.ui.analysis.MultiEventSummary('Totals',this.selection_);this.updateColumns_(summary);this.updateRows_(summary);this.$.table.rebuild();},initTitleTable_:function(){var table=this.$.titletable;table.showHeader=false;table.tableColumns=[{title:'Title',value:function(row){return row.title;},width:'350px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];},updateTitleTable_:function(){var title;if(this.selection_&&this.selection_.length)
-title=this.selection_[0].title;else
-title='<No selection>';var table=this.$.titletable;table.tableRows=[{title:'Title',value:title}];},updateColumns_:function(summary){var hasCpuData;if(summary.cpuDuration!==undefined)
-hasCpuData=true;if(summary.cpuSelfTime!==undefined)
-hasCpuData=true;var colWidthPercentage;if(hasCpuData)
-colWidthPercentage='20%';else
-colWidthPercentage='33.3333%';var timeSpanConfig={ownerDocument:this.ownerDocument};var columns=[];columns.push({title:'Start',value:function(row){if(row.__proto__===tr.ui.analysis.MultiEventSummary.prototype){return row.title;}
-var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(row.event);});linkEl.appendChild(tr.ui.units.createTimeStampSpan(row.start,timeSpanConfig));return linkEl;},width:'350px',cmp:function(rowA,rowB){return rowA.start-rowB.start;}});if(this.eventsHaveDuration_){columns.push({title:'Wall Duration (ms)',value:function(row){return tr.ui.units.createTimeDurationSpan(row.duration,timeSpanConfig);},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.duration-rowB.duration;}});}
-if(this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Duration (ms)',value:function(row){return tr.ui.units.createTimeDurationSpan(row.cpuDuration,timeSpanConfig);},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuDuration-rowB.cpuDuration;}});}
-if(this.eventsHaveSubRows_&&this.eventsHaveDuration_){columns.push({title:'Self time (ms)',value:function(row){return tr.ui.units.createTimeDurationSpan(row.selfTime,timeSpanConfig);},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.selfTime-rowB.selfTime;}});}
-if(this.eventsHaveSubRows_&&this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Self Time (ms)',value:function(row){return tr.ui.units.createTimeDurationSpan(row.cpuSelfTime,timeSpanConfig);},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuSelfTime-rowB.cpuSelfTime;}});}
-var argKeys=tr.b.dictionaryKeys(summary.totalledArgs);argKeys.sort();var otherKeys=summary.untotallableArgs.slice(0);otherKeys.sort();argKeys.push.apply(argKeys,otherKeys);var keysWithColumns=argKeys.slice(0,4);var keysInOtherColumn=argKeys.slice(4);keysWithColumns.forEach(function(argKey){var hasTotal=summary.totalledArgs[argKey];var colDesc={title:'Arg: '+argKey,value:function(row){if(row.__proto__!==tr.ui.analysis.MultiEventSummary.prototype){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=row.args[argKey];return argView;}
-if(hasTotal)
-return row.totalledArgs[argKey];return'';},width:'<upated further down>'};if(hasTotal){colDesc.cmp=function(rowA,rowB){return rowA.args[argKey]-rowB.args[argKey];}}
-columns.push(colDesc);});if(keysInOtherColumn.length){columns.push({title:'Other Args',value:function(row){if(row.__proto__===tr.ui.analysis.MultiEventSummary.prototype)
-return'';var argView=document.createElement('tr-ui-a-generic-object-view');var obj={};for(var i=0;i<keysInOtherColumn.length;i++)
-obj[keysInOtherColumn[i]]=row.args[keysInOtherColumn[i]];argView.object=obj;return argView;},width:'<upated further down>'});}
-var colWidthPercentage;if(columns.length==1)
-colWidthPercentage='100%';else
-colWidthPercentage=(100/(columns.length-1)).toFixed(3)+'%';for(var i=1;i<columns.length;i++)
-columns[i].width=colWidthPercentage;this.$.table.tableColumns=columns;},updateRows_:function(summary){this.$.table.sortColumnIndex=0;function Row(event){this.event=event;}
-Row.prototype={get start(){return this.event.start;},get duration(){return this.event.duration;},get cpuDuration(){return this.event.cpuDuration;},get selfTime(){return this.event.selfTime;},get cpuSelfTime(){return this.event.cpuSelfTime;},get args(){return this.event.args;}};this.$.table.tableRows=this.selection_.map(function(event){return new Row(event);});this.$.table.footerRows=[summary];}});'use strict';Polymer('tr-ui-a-multi-event-sub-view',{created:function(){this.currentSelection_=undefined;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;},set selection(selection){if(selection.length<=1)
-throw new Error('Only supports multiple items');this.setSelectionWithoutErrorChecks(selection);},get selection(){return this.currentSelection_;},setSelectionWithoutErrorChecks:function(selection){this.currentSelection_=selection;this.updateContents_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},updateContents_:function(){var selection=this.currentSelection_;this.$.content.textContent='';if(!selection)
-return;var eventsByTitle=selection.getEventsOrganizedByTitle();var numTitles=tr.b.dictionaryLength(eventsByTitle);var summaryTableEl=document.createElement('tr-ui-a-multi-event-summary-table');summaryTableEl.configure({showTotals:numTitles>1,eventsByTitle:eventsByTitle,eventsHaveDuration:this.eventsHaveDuration_,eventsHaveSubRows:this.eventsHaveSubRows_});this.$.content.appendChild(summaryTableEl);var selectionSummaryTableEl=document.createElement('tr-ui-a-selection-summary-table');selectionSummaryTableEl.selection=this.currentSelection_;this.$.content.appendChild(selectionSummaryTableEl);if(numTitles===1){var detailsTableEl=document.createElement('tr-ui-a-multi-event-details-table');detailsTableEl.eventsHaveDuration=this.eventsHaveDuration_;detailsTableEl.eventsHaveSubRows=this.eventsHaveSubRows_;detailsTableEl.selection=selection;this.$.content.appendChild(detailsTableEl);}}});'use strict';Polymer('tr-ui-a-multi-thread-slice-sub-view',{created:function(){this.selection_=undefined;},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;if(tr.isExported('tr.ui.e.chrome.cc.RasterTaskSelection')){if(tr.ui.e.chrome.cc.RasterTaskSelection.supports(selection)){var ltvSelection=new tr.ui.e.chrome.cc.RasterTaskSelection(selection);var ltv=new tr.ui.e.chrome.cc.LayerTreeHostImplSnapshotView();ltv.objectSnapshot=ltvSelection.containingSnapshot;ltv.selection=ltvSelection;ltv.extraHighlightsByLayerId=ltvSelection.extraHighlightsByLayerId;this.$.content.textContent='';this.$.content.appendChild(ltv);this.requiresTallView_=true;return;}}
-this.$.content.textContent='';var mesv=document.createElement('tr-ui-a-multi-event-sub-view');mesv.selection=selection;this.$.content.appendChild(mesv);var relatedEvents=document.createElement('tr-ui-a-related-events');relatedEvents.setRelatedEvents(selection);if(relatedEvents.hasRelatedEvents()){this.$.content.appendChild(relatedEvents);}},get requiresTallView(){if(this.$.content.children.length===0)
-return false;var childTagName=this.$.content.children[0].tagName;if(childTagName==='TR-UI-A-MULTI-EVENT-SUB-VIEW')
-return false;return true;}});'use strict';Polymer('tr-ui-a-single-async-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single slices');this.$.content.setSelectionWithoutErrorChecks(selection);this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents()){this.$.relatedEvents.style.display='';}else{this.$.relatedEvents.style.display='none';}},getEventRows_:function(event){var rows=this.__proto__.__proto__.getEventRows_(event);rows.splice(0,0,{name:'ID',value:event.id});return rows;},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-multi-async-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){this.$.content.selection=selection;this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents()){this.$.relatedEvents.style.display='';}else{this.$.relatedEvents.style.display='none';}},get relatedEventsToHighlight(){if(!this.$.content.selection)
-return undefined;var selection=new tr.model.EventSet();this.$.content.selection.forEach(function(asyncEvent){if(!asyncEvent.associatedEvents)
-return;asyncEvent.associatedEvents.forEach(function(event){selection.push(event);});});if(selection.length)
-return selection;return undefined;}});'use strict';Polymer('tr-ui-a-single-cpu-slice-sub-view',{created:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single slices');if(!(selection[0]instanceof tr.model.CpuSlice))
-throw new Error('Only supports thread time slices');this.currentSelection_=selection;var cpuSlice=selection[0];var thread=cpuSlice.threadThatWasRunning;var shadowRoot=this.shadowRoot;if(thread){shadowRoot.querySelector('#process-name').textContent=thread.parent.userFriendlyName;shadowRoot.querySelector('#thread-name').textContent=thread.userFriendlyName;}else{shadowRoot.querySelector('#process-name').parentElement.style.display='none';shadowRoot.querySelector('#thread-name').textContent=cpuSlice.title;}
-shadowRoot.querySelector('#start').timestamp=cpuSlice.start;shadowRoot.querySelector('#duration').duration=cpuSlice.duration;var runningThreadEl=shadowRoot.querySelector('#running-thread');var timeSlice=cpuSlice.getAssociatedTimeslice();if(!timeSlice){runningThreadEl.parentElement.style.display='none';}else{var threadLink=document.createElement('tr-ui-a-analysis-link');threadLink.selection=new tr.model.EventSet(timeSlice);threadLink.textContent='Click to select';runningThreadEl.parentElement.style.display='';runningThreadEl.textContent='';runningThreadEl.appendChild(threadLink);}
-shadowRoot.querySelector('#args').object=cpuSlice.args;}});'use strict';Polymer('tr-ui-a-multi-cpu-slice-sub-view',{ready:function(){this.$.content.eventsHaveSubRows=false;},get selection(){return this.$.content.selection;},set selection(selection){this.$.content.setSelectionWithoutErrorChecks(selection);}});'use strict';Polymer('tr-ui-a-single-thread-time-slice-sub-view',{created:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single slices');if(!(selection[0]instanceof tr.model.ThreadTimeSlice))
-throw new Error('Only supports thread time slices');this.currentSelection_=selection;var timeSlice=selection[0];var thread=timeSlice.thread;var shadowRoot=this.shadowRoot;shadowRoot.querySelector('#state').textContent=timeSlice.title;var stateColor=tr.b.ColorScheme.colorsAsStrings[timeSlice.colorId];shadowRoot.querySelector('#state').style.backgroundColor=stateColor;shadowRoot.querySelector('#process-name').textContent=thread.parent.userFriendlyName;shadowRoot.querySelector('#thread-name').textContent=thread.userFriendlyName;shadowRoot.querySelector('#start').timestamp=timeSlice.start;shadowRoot.querySelector('#duration').duration=timeSlice.duration;var onCpuEl=shadowRoot.querySelector('#on-cpu');onCpuEl.textContent='';var runningInsteadEl=shadowRoot.querySelector('#running-instead');if(timeSlice.cpuOnWhichThreadWasRunning){runningInsteadEl.parentElement.removeChild(runningInsteadEl);var cpuLink=document.createElement('tr-ui-a-analysis-link');cpuLink.selection=new tr.model.EventSet(timeSlice.getAssociatedCpuSlice());cpuLink.textContent=timeSlice.cpuOnWhichThreadWasRunning.userFriendlyName;onCpuEl.appendChild(cpuLink);}else{onCpuEl.parentElement.removeChild(onCpuEl);var cpuSliceThatTookCpu=timeSlice.getCpuSliceThatTookCpu();if(cpuSliceThatTookCpu){var cpuLink=document.createElement('tr-ui-a-analysis-link');cpuLink.selection=new tr.model.EventSet(cpuSliceThatTookCpu);if(cpuSliceThatTookCpu.thread)
-cpuLink.textContent=cpuSliceThatTookCpu.thread.userFriendlyName;else
-cpuLink.textContent=cpuSliceThatTookCpu.title;runningInsteadEl.appendChild(cpuLink);}else{runningInsteadEl.parentElement.removeChild(runningInsteadEl);}}
-var argsEl=shadowRoot.querySelector('#args');if(tr.b.dictionaryKeys(timeSlice.args).length>0){var argsView=document.createElement('tr-ui-a-generic-object-view');argsView.object=timeSlice.args;argsEl.parentElement.style.display='';argsEl.textContent='';argsEl.appendChild(argsView);}else{argsEl.parentElement.style.display='none';}}});'use strict';Polymer('tr-ui-a-multi-thread-time-slice-sub-view',{ready:function(){this.$.content.eventsHaveSubRows=false;},get selection(){return this.$.content.selection;},set selection(selection){this.$.content.setSelectionWithoutErrorChecks(selection);}});'use strict';Polymer('tr-ui-a-single-instant-event-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.$.content.textContent='';var realView=document.createElement('tr-ui-a-single-event-sub-view');realView.setSelectionWithoutErrorChecks(selection);this.$.content.appendChild(realView);this.currentSelection_=selection;},get selection(){return this.currentSelection_;}});'use strict';Polymer('tr-ui-a-multi-instant-event-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.$.content.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');realView.eventsHaveDuration=false;realView.eventsHaveSubRows=false;this.$.content.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;}});'use strict';(function(){var COUNTER_SAMPLE_TABLE_COLUMNS=[{title:'Counter',width:'150px',value:function(row){return row.counter;}},{title:'Series',width:'150px',value:function(row){return row.series;}},{title:'Time',width:'150px',value:function(row){return row.start;}},{title:'Value',width:'100%',value:function(row){return row.value;}}];Polymer('tr-ui-a-counter-sample-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=COUNTER_SAMPLE_TABLE_COLUMNS;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){this.$.table.tableRows=this.selection?this.getRows_(this.selection.toArray()):[];this.$.table.rebuild();},getRows_:function(samples){var samplesByCounter=tr.b.group(samples,function(sample){return sample.series.counter.guid;});var rows=[];tr.b.iterItems(samplesByCounter,function(unused,counterSamples){var samplesBySeries=tr.b.group(counterSamples,function(sample){return sample.series.guid;});tr.b.iterItems(samplesBySeries,function(unused,seriesSamples){var seriesRows=this.getRowsForSamples_(seriesSamples);seriesRows[0].counter=seriesSamples[0].series.counter.name;seriesRows[0].series=seriesSamples[0].series.name;if(seriesRows.length>1){seriesRows[0].subRows=seriesRows.slice(1);seriesRows[0].isExpanded=true;}
-rows.push(seriesRows[0]);},this);},this);return rows;},getRowsForSamples_:function(samples){return samples.map(function(sample){return{start:sample.timestamp,value:sample.value};});}});})();'use strict';Polymer('tr-ui-a-single-flow-event-sub-view',{getEventRows_:function(event){var rows=this.__proto__.__proto__.getEventRows_(event);rows.splice(0,0,{name:'ID',value:event.id});function createLinkTo(slice){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(slice);});linkEl.textContent=slice.userFriendlyName;return linkEl;}
-rows.push({name:'From',value:createLinkTo(event.startSlice)});rows.push({name:'To',value:createLinkTo(event.endSlice)});return rows;}});'use strict';Polymer('tr-ui-a-multi-flow-event-sub-view',{ready:function(){this.$.content.eventsHaveDuration=false;this.$.content.eventsHaveSubRows=false;},set selection(selection){this.$.content.selection=selection;},get selection(){return this.$.content.selection;}});'use strict';tr.exportTo('tr.ui.analysis',function(){var ObjectInstanceView=tr.ui.b.define('object-instance-view');ObjectInstanceView.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.objectInstance_=undefined;},get requiresTallView(){return true;},set modelEvent(obj){this.objectInstance=obj;},get modelEvent(){return this.objectInstance;},get objectInstance(){return this.objectInstance_;},set objectInstance(i){this.objectInstance_=i;this.updateContents();},updateContents:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectInstanceView;options.defaultMetadata={showInTrackView:true};tr.b.decorateExtensionRegistry(ObjectInstanceView,options);return{ObjectInstanceView:ObjectInstanceView};});'use strict';Polymer('tr-ui-a-single-object-instance-sub-view',{created:function(){this.currentSelection_=undefined;},get requiresTallView(){if(this.$.content.children.length===0)
-return false;if(this.$.content.children[0]instanceof
-tr.ui.analysis.ObjectInstanceView)
-return this.$.content.children[0].requiresTallView;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single item selections');if(!(selection[0]instanceof tr.model.ObjectInstance))
-throw new Error('Only supports object instances');this.$.content.textContent='';this.currentSelection_=selection;var instance=selection[0];var typeInfo=tr.ui.analysis.ObjectInstanceView.getTypeInfo(instance.category,instance.typeName);if(typeInfo){var customView=new typeInfo.constructor();this.$.content.appendChild(customView);customView.modelEvent=instance;}else{this.appendGenericAnalysis_(instance);}},appendGenericAnalysis_:function(instance){var html='';html+='<div class="title">'+
-instance.typeName+' '+
-instance.id+'</div>\n';html+='<table>';html+='<tr>';html+='<tr><td>creationTs:</td><td>'+
-instance.creationTs+'</td></tr>\n';if(instance.deletionTs!=Number.MAX_VALUE){html+='<tr><td>deletionTs:</td><td>'+
-instance.deletionTs+'</td></tr>\n';}else{html+='<tr><td>deletionTs:</td><td>not deleted</td></tr>\n';}
-html+='<tr><td>snapshots:</td><td id="snapshots"></td></tr>\n';html+='</table>';this.$.content.innerHTML=html;var snapshotsEl=this.$.content.querySelector('#snapshots');instance.snapshots.forEach(function(snapshot){var snapshotLink=document.createElement('tr-ui-a-analysis-link');snapshotLink.selection=new tr.model.EventSet(snapshot);snapshotsEl.appendChild(snapshotLink);});}});'use strict';tr.exportTo('tr.ui.analysis',function(){var ObjectSnapshotView=tr.ui.b.define('object-snapshot-view');ObjectSnapshotView.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.objectSnapshot_=undefined;},get requiresTallView(){return true;},set modelEvent(obj){this.objectSnapshot=obj;},get modelEvent(){return this.objectSnapshot;},get objectSnapshot(){return this.objectSnapshot_;},set objectSnapshot(i){this.objectSnapshot_=i;this.updateContents();},updateContents:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectSnapshotView;options.defaultMetadata={showInstances:true,showInTrackView:true};tr.b.decorateExtensionRegistry(ObjectSnapshotView,options);return{ObjectSnapshotView:ObjectSnapshotView};});'use strict';Polymer('tr-ui-a-single-object-snapshot-sub-view',{created:function(){this.currentSelection_=undefined;},get requiresTallView(){if(this.children.length===0)
-return false;if(this.children[0]instanceof tr.ui.analysis.ObjectSnapshotView)
-return this.children[0].requiresTallView;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
-throw new Error('Only supports single item selections');if(!(selection[0]instanceof tr.model.ObjectSnapshot))
-throw new Error('Only supports object instances');this.textContent='';this.currentSelection_=selection;var snapshot=selection[0];var typeInfo=tr.ui.analysis.ObjectSnapshotView.getTypeInfo(snapshot.objectInstance.category,snapshot.objectInstance.typeName);if(typeInfo){var customView=new typeInfo.constructor();this.appendChild(customView);customView.modelEvent=snapshot;}else{this.appendGenericAnalysis_(snapshot);}},appendGenericAnalysis_:function(snapshot){var instance=snapshot.objectInstance;this.textContent='';var titleEl=document.createElement('div');titleEl.classList.add('title');titleEl.appendChild(document.createTextNode('Snapshot of '));this.appendChild(titleEl);var instanceLinkEl=document.createElement('tr-ui-a-analysis-link');instanceLinkEl.selection=new tr.model.EventSet(instance);titleEl.appendChild(instanceLinkEl);titleEl.appendChild(document.createTextNode(' @ '));titleEl.appendChild(tr.ui.units.createTimeStampSpan(snapshot.ts,{ownerDocument:this.ownerDocument}));var tableEl=document.createElement('table');this.appendChild(tableEl);var rowEl=document.createElement('tr');tableEl.appendChild(rowEl);var labelEl=document.createElement('td');labelEl.textContent='args:';rowEl.appendChild(labelEl);var argsEl=document.createElement('td');argsEl.id='args';rowEl.appendChild(argsEl);var objectViewEl=document.createElement('tr-ui-a-generic-object-view');objectViewEl.object=snapshot.args;argsEl.appendChild(objectViewEl);}});'use strict';Polymer('tr-ui-a-multi-object-sub-view',{created:function(){this.currentSelection_=undefined;},ready:function(){this.$.content.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;var objectEvents=tr.b.asArray(selection).sort(tr.b.Range.compareByMinTimes);var timeSpanConfig={ownerDocument:this.ownerDocument};var table=this.$.content;table.tableColumns=[{title:'First',value:function(event){if(event instanceof tr.model.ObjectSnapshot)
-return tr.ui.units.createTimeStampSpan(event.ts,timeSpanConfig);var spanEl=document.createElement('span');spanEl.appendChild(tr.ui.units.createTimeStampSpan(event.creationTs,timeSpanConfig));spanEl.appendChild(tr.ui.b.createSpan({textContent:'-',marginLeft:'4px',marginRight:'4px'}));if(event.deletionTs!=Number.MAX_VALUE){spanEl.appendChild(tr.ui.units.createTimeStampSpan(event.deletionTs,timeSpanConfig));}
-return spanEl;},width:'200px'},{title:'Second',value:function(event){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(event);},event.userFriendlyName);return linkEl;},width:'100%'}];table.tableRows=objectEvents;table.rebuild();}});'use strict';Polymer('tr-ui-a-single-sample-sub-view',{created:function(){this.currentSelection_=undefined;},ready:function(){this.$.content.tableColumns=[{title:'FirstColumn',value:function(row){return row.title;},width:'250px'},{title:'SecondColumn',value:function(row){return row.value;},width:'100%'}];this.$.content.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;if(this.currentSelection_===undefined){this.$.content.tableRows=[];return;}
-var sample=this.currentSelection_[0];var table=this.$.content;var rows=[];rows.push({title:'Title',value:sample.title});rows.push({title:'Sample time',value:tr.ui.units.createTimeStampSpan(sample.start,{ownerDocument:this.ownerDocument})});var sfEl=document.createElement('tr-ui-a-stack-frame');sfEl.stackFrame=sample.leafStackFrame;rows.push({title:'Stack trace',value:sfEl});table.tableRows=rows;table.rebuild();}});'use strict';tr.exportTo('tr.ui.analysis',function(){function zFunction(list){var n=list.length;if(n===0)
-return[];var z=new Array(n);z[0]=0;for(var i=1,left=0,right=0;i<n;++i){var maxLength;if(i<=right)
-maxLength=Math.min(right-i+1,z[i-left]);else
-maxLength=0;while(i+maxLength<n&&list[maxLength]===list[i+maxLength])
-++maxLength;if(i+maxLength-1>right){left=i;right=i+maxLength-1;}
-z[i]=maxLength;}
-return z;}
-function StackFrameTreeNode(title,opt_frame){this.title=title;this.frame=opt_frame;this.parent=undefined;this.children=[];this.childMap=new Map();this.total=0;this.self=0;}
-StackFrameTreeNode.prototype={get subRows(){return this.children;},get stackTraceTitles(){var titles=[];for(var currentNode=this;currentNode!==undefined;currentNode=currentNode.parent){titles.push(currentNode.title);}
-return titles;},getOrCreateChild:function(title,opt_frame){var childNode=this.childMap.get(title);if(childNode!==undefined)
-return childNode;childNode=new StackFrameTreeNode(title,opt_frame);childNode.parent=this;this.children.push(childNode);this.childMap.set(title,childNode);return childNode;},addStackTrace:function(trace,value,opt_traceContainsRootFrame){var currentNode=this;var startIndex=trace.length-(opt_traceContainsRootFrame?2:1);for(var i=startIndex;i>=0;i--){currentNode.total+=value;var stackFrame=trace[i];currentNode=currentNode.getOrCreateChild(stackFrame.title,stackFrame);}
-currentNode.total+=value;currentNode.self+=value;},convertToBottomUpView:function(){var bottomUpViewRoot=new StackFrameTreeNode(this.title,this.frame);bottomUpViewRoot.total=this.total;bottomUpViewRoot.self=this.self;this.addChildrenToBottomUpViewRecursively_(bottomUpViewRoot);return bottomUpViewRoot;},addChildrenToBottomUpViewRecursively_:function(bottomUpViewRoot){this.children.forEach(function(child){child.addToBottomUpViewRecursively_(bottomUpViewRoot);});},addToBottomUpViewRecursively_:function(bottomUpViewRoot){var remainingRecursiveSuffixLength=this.calculateRecursiveSuffixLength_();var bottomUpParentNode=bottomUpViewRoot;for(var topDownNode=this;topDownNode.parent!==undefined;topDownNode=topDownNode.parent){var bottomUpChildNode=bottomUpParentNode.getOrCreateChild(topDownNode.title,topDownNode.frame);bottomUpChildNode.self+=this.self;if(remainingRecursiveSuffixLength>0)
-remainingRecursiveSuffixLength--;else
-bottomUpChildNode.total+=this.total;bottomUpParentNode=bottomUpChildNode;}
-this.addChildrenToBottomUpViewRecursively_(bottomUpViewRoot);},calculateRecursiveSuffixLength_:function(){var maxLengths=zFunction(this.stackTraceTitles);var recursiveSuffixLength=0;for(var i=0;i<maxLengths.length;i++)
-recursiveSuffixLength=Math.max(recursiveSuffixLength,maxLengths[i]);return recursiveSuffixLength;}};return{StackFrameTreeNode:StackFrameTreeNode,zFunction:zFunction};});'use strict';(function(){Polymer('tr-ui-a-multi-sample-sub-view',{created:function(){this.viewOption_=undefined;this.selection_=undefined;},ready:function(){var viewSelector=tr.ui.b.createSelector(this,'viewOption','tracing.ui.analysis.multi_sample_sub_view','TOPDOWNVIEW',[{label:'Tree (Top Down)',value:'TOPDOWNVIEW'},{label:'Heavy (Bottom Up)',value:'BOTTOMUPVIEW'}]);this.$.control.appendChild(viewSelector);},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},get viewOption(){return this.viewOption_;},set viewOption(viewOption){this.viewOption_=viewOption;this.updateContents_();},createSamplingSummary_:function(selection,viewOption){var root=new tr.ui.analysis.StackFrameTreeNode('(root)',undefined);var samples=selection.getEventsOrganizedByBaseType().sample;samples.forEach(function(sample){root.addStackTrace(sample.stackTrace,1);});switch(viewOption){case'TOPDOWNVIEW':return root;case'BOTTOMUPVIEW':return root.convertToBottomUpView();default:throw new Error('Unknown sampling summary view option: \''+viewOption+'\'');}},updateContents_:function(){if(this.selection===undefined){this.$.table.tableColumns=[];this.$.table.tableRows=[];this.$.table.rebuild();return;}
-var samplingData=this.createSamplingSummary_(this.selection,this.viewOption);var columns=[this.createPercentColumn_('Total',samplingData.total),this.createSamplesColumn_('Total'),this.createPercentColumn_('Self',samplingData.total),this.createSamplesColumn_('Self'),{title:'Symbol',value:function(row){return row.title;},width:'250px',cmp:function(a,b){return a.title.localeCompare(b.title);},showExpandButtons:true}];this.$.table.tableColumns=columns;this.$.table.sortColumnIndex=1;this.$.table.sortDescending=true;this.$.table.tableRows=samplingData.subRows;this.$.table.rebuild();},createPercentColumn_:function(title,samplingDataTotal){var field=title.toLowerCase();return{title:title+' percent',value:function(row){var percent=row[field]/samplingDataTotal;var span=document.createElement('tr-ui-u-scalar-span');span.value=(percent*100).toFixed(2);span.percentage=percent;span.unit=tr.b.u.Units.unitlessNumber;return span;}.bind(this),width:'60px',cmp:function(a,b){return a[field]-b[field];}};},createSamplesColumn_:function(title){var field=title.toLowerCase();return{title:title+' samples',value:function(row){return row[field];},width:'60px',cmp:function(a,b){return a[field]-b[field];}};}});})();'use strict';Polymer('tr-ui-a-single-interaction-record-sub-view',{created:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.textContent='';var realView=document.createElement('tr-ui-a-single-event-sub-view');this.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-multi-interaction-record-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.currentSelection_=selection;this.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');this.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;var selection=new tr.model.EventSet();this.currentSelection_.forEach(function(ir){ir.associatedEvents.forEach(function(event){selection.push(event);});});return selection;}});'use strict';Polymer('tr-ui-a-alert-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=[{title:'Label',value:function(row){return row.name;},width:'150px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];this.$.table.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},getRowsForSingleAlert_:function(alert){var rows=[];for(var argName in alert.args){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=alert.args[argName];rows.push({name:argName,value:argView});}
-if(alert.associatedEvents.length){alert.associatedEvents.forEach(function(event,i){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return event;},event.title);var valueString='';if(event instanceof tr.model.TimedEvent)
-valueString='took '+event.duration.toFixed(2)+'ms';rows.push({name:linkEl,value:valueString});});}
-var descriptionEl=tr.ui.b.createDiv({textContent:alert.info.description,maxWidth:'300px'});rows.push({name:'Description',value:descriptionEl});if(alert.info.docLinks){alert.info.docLinks.forEach(function(linkObject){var linkEl=document.createElement('a');linkEl.target='_blank';linkEl.href=linkObject.href;linkEl.textContent=linkObject.textContent;rows.push({name:linkObject.label,value:linkEl});});}
-return rows;},getRowsForAlerts_:function(alerts){if(alerts.length==1){var rows=[{name:'Alert',value:alerts[0].title}];var detailRows=this.getRowsForSingleAlert_(alerts[0]);rows.push.apply(rows,detailRows);return rows;}else{return alerts.map(function(alert){return{name:'Alert',value:alert.title,isExpanded:alerts.size<10,subRows:this.getRowsForSingleAlert_(alert)};},this);}},updateContents_:function(){if(this.currentSelection_===undefined){this.$.table.rows=[];this.$.table.rebuild();return;}
-var alerts=this.currentSelection_;this.$.table.tableRows=this.getRowsForAlerts_(alerts);this.$.table.rebuild();},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-single-frame-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!=1)
-throw new Error('Only supports single frame!');this.currentSelection_=selection;this.$.asv.selection=selection[0].associatedAlerts;},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-multi-frame-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');realView.eventsHaveDuration=false;realView.eventsHaveSubRows=false;this.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;},get relatedEventsToHighlight(){if(!this.currentSelection_)
-return undefined;var selection=new tr.model.EventSet();this.currentSelection_.forEach(function(frameEvent){frameEvent.associatedEvents.forEach(function(event){selection.push(event);});});return selection;}});'use strict';tr.exportTo('tr.ui.analysis',function(){var NO_BREAK_SPACE=String.fromCharCode(160);var RIGHTWARDS_ARROW=String.fromCharCode(8594);var COLLATOR=new Intl.Collator(undefined,{numeric:true});function TitleColumn(title){this.title=title;}
-TitleColumn.prototype={supportsCellSelection:false,value:function(row){var formattedTitle=this.formatTitle(row);var defined=row.defined;if(defined===undefined||defined.length===0)
-return formattedTitle;var firstDefined=defined[0];var lastDefined=defined[defined.length-1];var changeDefinedCount=0;for(var i=1;i<defined.length;i++){if(defined[i]!==defined[i-1])
-changeDefinedCount++;}
-var color=undefined;var prefix=undefined;if(!firstDefined&&lastDefined){color='red';prefix='+++';}else if(firstDefined&&!lastDefined){color='green';prefix='---';}
-if(changeDefinedCount>1){color='purple';}
-if(color===undefined&&prefix===undefined)
-return formattedTitle;var titleEl=document.createElement('span');if(prefix!==undefined){var prefixEl=tr.ui.b.createSpan({textContent:prefix});prefixEl.style.fontFamily='monospace';titleEl.appendChild(prefixEl);titleEl.appendChild(tr.ui.b.asHTMLOrTextNode(NO_BREAK_SPACE));}
-if(color!==undefined)
-titleEl.style.color=color;titleEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedTitle));return titleEl;},formatTitle:function(row){return row.title;},cmp:function(rowA,rowB){return COLLATOR.compare(rowA.title,rowB.title);}};function MemoryColumn(name,units,cellGetter,aggregationMode){this.name=name;this.units=units;this.cell=cellGetter;this.aggregationMode=aggregationMode;}
-MemoryColumn.fromRows=function(rows,cellKey,aggregationMode,opt_customColumnConstructorRules){var columnTraits={};function gatherTraits(row){if(row===undefined)
-return;var attrCells=row[cellKey];if(attrCells===undefined)
-return;tr.b.iterItems(attrCells,function(attrName,attrCell){if(attrCell===undefined)
-return;var attrValues=attrCell.attrs;if(attrValues===undefined)
-return;var existingTraits=columnTraits[attrName];attrValues.forEach(function(attrValue){if(attrValue===undefined)
-return;if(existingTraits===undefined){columnTraits[attrName]=existingTraits={constructor:attrValue.constructor,units:attrValue.units};return;}
-if(existingTraits.constructor!==attrValue.constructor||existingTraits.units!==attrValue.units){existingTraits.constructor=tr.model.UnknownAttribute;existingTraits.units=undefined;}});});if(row.subRows!==undefined)
-row.subRows.forEach(gatherTraits);};rows.forEach(gatherTraits);var columns=[];tr.b.iterItems(columnTraits,function(columnName,columnTraits){var cellGetter=fieldGetter(cellKey,columnName);var constructor=undefined;if(opt_customColumnConstructorRules!==undefined){var matchingRule=MemoryColumn.findMatchingRule(columnName,opt_customColumnConstructorRules);if(matchingRule!==undefined)
-constructor=matchingRule.columnConstructor;}
-if(constructor===undefined)
-constructor=MemoryColumn.constructorFromAttributeTraits(columnTraits);columns.push(new constructor(columnName,columnTraits.units,cellGetter,aggregationMode));});return columns;};MemoryColumn.constructorFromAttributeTraits=function(traits){if(traits.constructor===tr.model.ScalarAttribute)
-return ScalarMemoryColumn;else
-return MemoryColumn;};MemoryColumn.spaceEqually=function(columns){var columnWidth=(100/columns.length).toFixed(3)+'%';columns.forEach(function(column){column.width=columnWidth;});};MemoryColumn.findMatchingRule=function(name,rules){for(var i=0;i<rules.length;i++){var rule=rules[i];if(MemoryColumn.nameMatchesCondition(name,rule.condition))
-return rule;}
-return undefined;};MemoryColumn.nameMatchesCondition=function(name,condition){if(condition===undefined)
-return true;if(typeof(condition)==='string')
-return name===condition;return condition.test(name);};MemoryColumn.sortByImportance=function(columns,importanceRules){var positions=columns.map(function(column,srcIndex){return{importance:column.getImportance(importanceRules),column:column};});positions.sort(function(a,b){if(a.importance===b.importance)
-return COLLATOR.compare(a.column.name,b.column.name);return b.importance-a.importance;});positions.forEach(function(position,dstIndex){columns[dstIndex]=position.column;});};MemoryColumn.columnNamesToImportanceRules=function(columnNames){return columnNames.map(function(columnName,columnIndex){return{condition:columnName,importance:columnNames.length-columnIndex};});};MemoryColumn.iconFromAttributeInfoType=function(type){switch(type){case tr.model.AttributeInfoType.WARNING:return{symbol:String.fromCharCode(9888),color:'red'};case tr.model.AttributeInfoType.LINK:return{symbol:String.fromCharCode(9903)};case tr.model.AttributeInfoType.MEMORY_OWNER:return{symbol:String.fromCharCode(8702),color:'green'};case tr.model.AttributeInfoType.MEMORY_OWNED:return{symbol:String.fromCharCode(8701),color:'green'};case tr.model.AttributeInfoType.OVERALL_VALUE:return{symbol:String.fromCharCode(8614)};case tr.model.AttributeInfoType.RECENT_VALUE:return{symbol:String.fromCharCode(8618)};case tr.model.AttributeInfoType.HAS_HEAP_DUMP:return{symbol:String.fromCharCode(9776)};default:return{symbol:String.fromCharCode(9432),color:'blue'};}
-throw new Error('Unreachable');};MemoryColumn.AggregationMode={DIFF:0,MAX:1};MemoryColumn.prototype={get title(){return this.name;},attrs:function(row){var cell=this.cell(row);if(cell===undefined)
-return undefined;return cell.attrs;},value:function(row){var attrs=this.attrs(row);if(this.hasAllRelevantAttrsUndefined(attrs))
-return'';return this.formatAttributes(attrs);},hasAllRelevantAttrsUndefined:function(attrs){if(attrs===undefined)
-return true;switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return attrs[0]===undefined&&attrs[attrs.length-1]===undefined;case MemoryColumn.AggregationMode.MAX:default:return attrs.every(function(attr){return attr===undefined;});}},formatAttributes:function(attrs){var formattedValue=this.formatAttributeValues(attrs);var color;if(typeof this.color==='function')
-color=this.color(attrs);else
-color=this.color;var infos=this.getInfos(attrs);if((color===undefined||formattedValue==='')&&infos.length===0)
-return formattedValue;var attrEl=document.createElement('span');attrEl.style.display='flex';attrEl.style.alignItems='center';attrEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedValue));infos.forEach(function(info){var infoEl=document.createElement('span');infoEl.style.paddingLeft='4px';infoEl.style.cursor='help';infoEl.style.fontWeight='bold';var icon=MemoryColumn.iconFromAttributeInfoType(info.type);infoEl.textContent=icon.symbol;if(icon.color!==undefined)
-infoEl.style.color=icon.color;infoEl.title=info.message;attrEl.appendChild(infoEl);},this);if(color!==undefined)
-attrEl.style.color=color;return attrEl;},formatAttributeValues:function(attrs){if(attrs.length===1)
-return this.formatSingleAttributeValue(attrs[0]);else
-return this.formatMultipleAttributeValues(attrs);},formatSingleAttributeValue:function(attr){return String(attr.value);},formatMultipleAttributeValues:function(attrs){switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return this.formatMultipleAttributeValuesDiff(attrs[0],attrs[attrs.length-1]);case MemoryColumn.AggregationMode.MAX:return this.formatMultipleAttributeValuesMax(attrs);default:return tr.ui.b.createSpan({textContent:'(unsupported aggregation mode)',italic:true});}},formatMultipleAttributeValuesDiff:function(firstAttr,lastAttr){if(firstAttr===undefined){var spanEl=tr.ui.b.createSpan({color:'red'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('+'));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleAttributeValue(lastAttr)));return spanEl;}else if(lastAttr===undefined){var spanEl=tr.ui.b.createSpan({color:'green'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('-'));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleAttributeValue(firstAttr)));return spanEl;}else if(firstAttr.value===lastAttr.value&&firstAttr.units===lastAttr.units){return this.formatSingleAttributeValue(firstAttr);}else{var spanEl=tr.ui.b.createSpan({color:'DarkOrange'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleAttributeValue(firstAttr)));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(' '+RIGHTWARDS_ARROW+' '));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleAttributeValue(lastAttr)));return spanEl;}},formatMultipleAttributeValuesMax:function(attrs){return this.getMaxAttributeValue(attrs);},cmp:function(rowA,rowB){var attrsA=this.attrs(rowA);var attrsB=this.attrs(rowB);if(attrsA!==undefined&&attrsB!==undefined&&attrsA.length!==attrsB.length)
-throw new Error('Different number of attributes');var undefinedA=this.hasAllRelevantAttrsUndefined(attrsA);var undefinedB=this.hasAllRelevantAttrsUndefined(attrsB);if(undefinedA&&undefinedB)
-return 0;if(undefinedA)
-return-1;if(undefinedB)
-return 1;return this.compareAttributes(attrsA,attrsB);},compareAttributes:function(attrsA,attrsB){if(attrsA.length===1)
-return this.compareSingleAttributes(attrsA[0],attrsB[0]);else
-return this.compareMultipleAttributes(attrsA,attrsB);},compareSingleAttributes:function(attrA,attrB){return this.compareSingleAttributeValues(attrA.value,attrB.value);},compareMultipleAttributes:function(attrsA,attrsB){switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return this.compareMultipleAttributesDiff(attrsA[0],attrsA[attrsA.length-1],attrsB[0],attrsB[attrsB.length-1]);case MemoryColumn.AggregationMode.MAX:return this.compareMultipleAttributesMax(attrsA,attrsB);default:return 0;}},compareMultipleAttributesDiff:function(firstAttrA,lastAttrA,firstAttrB,lastAttrB){if(firstAttrA===undefined&&firstAttrB!==undefined)
-return 1;if(firstAttrA!==undefined&&firstAttrB===undefined)
-return-1;if(firstAttrA===undefined&&firstAttrB===undefined)
-return this.compareSingleAttributes(lastAttrA,lastAttrB);if(lastAttrA===undefined&&lastAttrB!==undefined)
-return-1;if(lastAttrA!==undefined&&lastAttrB===undefined)
-return 1;if(lastAttrA===undefined&&lastAttrB===undefined)
-return this.compareSingleAttributes(firstAttrB,firstAttrA);var areAttrsAEqual=firstAttrA.value===lastAttrA.value&&firstAttrA.units===lastAttrA.units;var areAttrsBEqual=firstAttrB.value===lastAttrB.value&&firstAttrB.units===lastAttrB.units;if(areAttrsAEqual&&areAttrsBEqual)
-return 0;if(areAttrsAEqual)
-return-1;if(areAttrsBEqual)
-return 1;return 0;},compareMultipleAttributesMax:function(attrsA,attrsB){return this.compareSingleAttributeValues(this.getMaxAttributeValue(attrsA),this.getMaxAttributeValue(attrsB));},getMaxAttributeValue:function(attrs){return attrs.reduce(function(accumulator,attr){if(attr===undefined)
-return accumulator;var attrValue=attr.value;if(accumulator===undefined||this.compareSingleAttributeValues(attrValue,accumulator)>0){return attrValue;}
-return accumulator;}.bind(this),undefined);},compareSingleAttributeValues:function(attrValueA,attrValueB){return COLLATOR.compare(String(attrValueA),String(attrValueB));},getInfos:function(attrs){if(attrs.length!==1){return[];}
-return attrs[0].infos;},getImportance:function(importanceRules){if(importanceRules.length===0)
-return 0;var matchingRule=MemoryColumn.findMatchingRule(this.name,importanceRules);if(matchingRule!==undefined)
-return matchingRule.importance;var minImportance=importanceRules[0].importance;for(var i=1;i<importanceRules.length;i++)
-minImportance=Math.min(minImportance,importanceRules[i].importance);return minImportance-1;}};function ScalarMemoryColumn(name,title,units,cellGetter,aggregationMode){MemoryColumn.call(this,name,title,units,cellGetter,aggregationMode);}
-ScalarMemoryColumn.prototype={__proto__:MemoryColumn.prototype,formatSingleAttributeValue:function(attr){return this.formatUnits(attr.value,false);},formatMultipleAttributeValuesDiff:function(firstAttr,lastAttr){return this.formatUnits(this.getDiffAttrValue(firstAttr,lastAttr),true);},formatMultipleAttributeValuesMax:function(attrs){return this.formatUnits(this.getMaxAttributeValue(attrs),false);},formatUnits:function(value,isDelta){if(value===undefined)
-return'';var sizeEl=document.createElement('tr-ui-u-scalar-span');sizeEl.value=value;if(this.units==='bytes')
-sizeEl.unit=tr.b.u.Units.sizeInBytes;else
-sizeEl.unit=tr.b.u.Units.unitlessNumber;if(!isDelta)
-return sizeEl;sizeEl.isDelta=true;if(value===0)
-return sizeEl;var wrapperEl=document.createElement('span');wrapperEl.style.color=value>0?'red':'green';wrapperEl.appendChild(sizeEl);return wrapperEl;},compareSingleAttributeValues:function(attrValueA,attrValueB){return attrValueA-attrValueB;},compareMultipleAttributesDiff:function(firstAttrA,lastAttrA,firstAttrB,lastAttrB){return this.getDiffAttrValue(firstAttrA,lastAttrA)-
-this.getDiffAttrValue(firstAttrB,lastAttrB);},getDiffAttrValue:function(firstAttr,lastAttr){var firstValue=firstAttr===undefined?0:firstAttr.value;var lastValue=lastAttr===undefined?0:lastAttr.value;return lastValue-firstValue;}};function MemoryCell(attrs){this.attrs=attrs;}
-MemoryCell.extractAttributes=function(cell){if(cell===undefined)
-return undefined;return cell.attrs;};function fieldGetter(){var fields=tr.b.asArray(arguments);return function(row){var value=row;for(var i=0;i<fields.length;i++)
-value=value[fields[i]];return value;};}
-var RECURSIVE_EXPANSION_MAX_SUB_ROW_COUNT=10;function expandTableRowsRecursively(table){function expandRowRecursively(row){if(row.subRows===undefined||row.subRows.length===0)
-return;if(row.subRows.length>RECURSIVE_EXPANSION_MAX_SUB_ROW_COUNT)
-return;table.setExpandedForTableRow(row,true);row.subRows.forEach(expandRowRecursively);}
-table.tableRows.forEach(expandRowRecursively);}
-function aggregateTableRowCellsRecursively(row,cellKey){var subRows=row.subRows;if(subRows===undefined)
-return;subRows.forEach(function(subRow){aggregateTableRowCellsRecursively(subRow,cellKey);});aggregateTableRowCells(row,subRows,cellKey);}
-function aggregateTableRowCells(row,subRows,cellKey){var rowCells=row[cellKey];if(rowCells===undefined)
-row[cellKey]=rowCells={};var subRowCellNames={};subRows.forEach(function(subRow){var subRowCells=subRow[cellKey];if(subRowCells===undefined)
-return;tr.b.iterItems(subRowCells,function(columnName){subRowCellNames[columnName]=true;});});tr.b.iterItems(subRowCellNames,function(cellName){var existingRowCell=rowCells[cellName];var existingRowAttributes=MemoryCell.extractAttributes(existingRowCell);var timestampCount=undefined;if(existingRowAttributes!==undefined)
-timestampCount=existingRowAttributes.length;subRows.forEach(function(subRow){var subRowCells=subRow[cellKey];if(subRowCells===undefined)
-return;var subRowCellAttributes=MemoryCell.extractAttributes(subRowCells[cellName]);if(subRowCellAttributes===undefined)
-return;if(timestampCount===undefined)
-timestampCount=subRowCellAttributes.length;else if(timestampCount!==subRowCellAttributes.length)
-throw new Error('Rows have different number of timestamps');});if(timestampCount===undefined)
-throw new Error('Handling non-existent cell name \''+cellName+'\'');var aggregatedAttributes=new Array(timestampCount);for(var i=0;i<timestampCount;i++){var existingRowAttribute=undefined;if(existingRowAttributes!==undefined)
-existingRowAttribute=existingRowAttributes[i];var subRowAttributes=subRows.map(function(subRow){var subRowCells=subRow[cellKey];if(subRowCells===undefined)
-return undefined;var subRowCellAttributes=MemoryCell.extractAttributes(subRowCells[cellName]);if(subRowCellAttributes===undefined)
-return;return subRowCellAttributes[i];});aggregatedAttributes[i]=tr.model.Attribute.aggregate(subRowAttributes,existingRowAttribute);}
-if(existingRowCell!==undefined){existingRowCell.attrs=aggregatedAttributes;}else{rowCells[cellName]=new MemoryCell(aggregatedAttributes);}});}
-function createCells(timeToValues,valueAttrsGetter,opt_cellAddedCallback){var attrNameToAttrs=tr.b.invertArrayOfDicts(timeToValues,valueAttrsGetter);return tr.b.mapItems(attrNameToAttrs,function(attrName,attrs){var cell=new tr.ui.analysis.MemoryCell(attrs);if(opt_cellAddedCallback!==undefined)
-opt_cellAddedCallback(attrName,cell);return cell;});}
-function addAttributeIfDefined(dstDict,attrName,attrClass,units,value,opt_addedCallback){if(value===undefined)
-return;var attr=new attrClass(units,value);dstDict[attrName]=attr;if(opt_addedCallback!==undefined)
-opt_addedCallback(attr);}
-return{TitleColumn:TitleColumn,MemoryColumn:MemoryColumn,ScalarMemoryColumn:ScalarMemoryColumn,MemoryCell:MemoryCell,fieldGetter:fieldGetter,expandTableRowsRecursively:expandTableRowsRecursively,aggregateTableRowCellsRecursively:aggregateTableRowCellsRecursively,aggregateTableRowCells:aggregateTableRowCells,createCells:createCells,addAttributeIfDefined:addAttributeIfDefined};});'use strict';Polymer('tr-ui-a-stacked-pane',{rebuild:function(){if(!this.paneDirty_){return;}
-this.paneDirty_=false;this.rebuildPane_();},scheduleRebuildPane_:function(){if(this.paneDirty_)
-return;this.paneDirty_=true;setTimeout(this.rebuild.bind(this),0);},rebuildPane_:function(){},set childPaneBuilder(childPaneBuilder){this.childPaneBuilder_=childPaneBuilder;this.dispatchEvent(new tr.b.Event('request-child-pane-change'));},get childPaneBuilder(){return this.childPaneBuilder_;},appended:function(){this.rebuild();}});'use strict';tr.exportTo('tr.ui.analysis',function(){var COLUMN_IMPORTANCE_RULES=tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules(['Total size','Self size']);Polymer('tr-ui-a-memory-dump-heap-details-pane',{created:function(){this.heapDumps_=undefined;this.aggregationMode_=undefined;this.bottomUpView_=false;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;this.$.view_mode_container.appendChild(tr.ui.b.createSelector(this,'bottomUpView','memoryDumpHeapDetailsPane.bottomUpView',false,[{label:'Tree (top down)',value:false},{label:'Heavy (bottom up)',value:true}]));},set heapDumps(heapDumps){this.heapDumps_=heapDumps;this.scheduleRebuildPane_();},get heapDumps(){return this.heapDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},set bottomUpView(bottomUpView){this.bottomUpView_=bottomUpView;this.scheduleRebuildPane_();},get bottomUpView(){return this.bottomUpView_;},rebuildPane_:function(){if(this.heapDumps_===undefined||this.heapDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.view_mode_container.style.display='none';this.$.table.clear();this.$.table.rebuild();return;}
-this.$.info_text.style.display='none';this.$.table.style.display='block';this.$.view_mode_container.style.display='block';var stackFrameTrees=this.createStackFrameTrees_(this.heapDumps_);var rows=this.createRows_(stackFrameTrees);var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);},createStackFrameTrees_:function(heapDumps){return heapDumps.map(function(heapDump){if(heapDump===undefined)
-return undefined;var rootNode=new tr.ui.analysis.StackFrameTreeNode(heapDump.allocatorName);var sumSize=undefined;heapDump.entries.forEach(function(entry){var size=entry.size;var leafStackFrame=entry.leafStackFrame;if(leafStackFrame===undefined){if(sumSize!==undefined)
-throw new Error('Multiple sum stack frames');sumSize=size;return;}
-rootNode.addStackTrace(leafStackFrame.stackTrace,size,true);},this);if(sumSize!==undefined&&sumSize>rootNode.total){var unspecifiedSize=sumSize-rootNode.total;rootNode.total=sumSize;var unspecifiedNode=rootNode.getOrCreateChild('<unspecified>');unspecifiedNode.total+=unspecifiedSize;unspecifiedNode.self+=unspecifiedSize;}
-if(this.bottomUpView)
-return rootNode.convertToBottomUpView();else
-return rootNode;},this);},createRows_:function(stackFrameTrees){return[this.createHeapRowRecursively_(stackFrameTrees)];},createHeapRowRecursively_:function(nodes){var title=tr.b.findFirstInArray(nodes).title;var defined=nodes.map(function(node){return node!==undefined;});var cells=tr.ui.analysis.createCells(nodes,function(node){return{'Total size':new tr.model.ScalarAttribute('bytes',node.total),'Self size':new tr.model.ScalarAttribute('bytes',node.self)};});var groupedChildNodes=tr.b.dictionaryValues(tr.b.invertArrayOfDicts(nodes,function(node){return node.children;}));var row={title:title,defined:defined,cells:cells};if(groupedChildNodes.length>0){row.subRows=groupedChildNodes.map(this.createHeapRowRecursively_,this);}
-return row;},createColumns_:function(rows){var titleColumn=new tr.ui.analysis.TitleColumn('Stack frame');titleColumn.width='500px';var attributeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'cells',this.aggregationMode_);tr.ui.analysis.MemoryColumn.sortByImportance(attributeColumns,COLUMN_IMPORTANCE_RULES);tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);var columns=[titleColumn].concat(attributeColumns);return columns;}});return{};});'use strict';tr.exportTo('tr.ui.analysis',function(){var IMPORTANCE_RULES=[{condition:tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_ATTRIBUTE_NAME,importance:10},{condition:tr.model.MemoryAllocatorDump.SIZE_ATTRIBUTE_NAME,importance:9},{condition:'page_size',importance:0},{condition:/size/,importance:5},{importance:0}];function AllocatorDumpNameColumn(title){tr.ui.analysis.TitleColumn.call(this,title);}
-AllocatorDumpNameColumn.prototype={__proto__:tr.ui.analysis.TitleColumn.prototype,formatTitle:function(row){if(!row.suballocation)
-return row.title;return tr.ui.b.createSpan({textContent:row.title,italic:true,tooltip:row.fullName});}};Polymer('tr-ui-a-memory-dump-allocator-details-pane',{created:function(){this.memoryAllocatorDumps_=undefined;this.heapDumps_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;},set memoryAllocatorDumps(memoryAllocatorDumps){this.memoryAllocatorDumps_=memoryAllocatorDumps;this.scheduleRebuildPane_();},get memoryAllocatorDumps(){return this.memoryAllocatorDumps_;},set heapDumps(heapDumps){this.heapDumps_=heapDumps;this.scheduleRebuildPane_();},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){if(this.memoryAllocatorDumps_===undefined||this.memoryAllocatorDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();this.childPaneBuilder=undefined;return;}
-this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.createRows_();var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);if(this.heapDumps_===undefined){this.childPaneBuilder=undefined;}else{this.childPaneBuilder=function(){var pane=document.createElement('tr-ui-a-memory-dump-heap-details-pane');pane.heapDumps=this.heapDumps_;pane.aggregationMode=this.aggregationMode_;return pane;}.bind(this);}},createRows_:function(){return[this.createAllocatorRowRecursively_(this.memoryAllocatorDumps_)];},createAllocatorRowRecursively_:function(dumps){var definedDump=tr.b.findFirstInArray(dumps);var title=definedDump.name;var fullName=definedDump.fullName;var defined=dumps.map(function(dump){return dump!==undefined;});var cells=tr.ui.analysis.createCells(dumps,function(dump){return dump.attributes;});var suballocatedBy=undefined;if(title.startsWith('__')){for(var i=0;i<dumps.length;i++){var dump=dumps[i];if(dump===undefined||dump.ownedBy.length===0){continue;}
-var ownerDump=dump.ownedBy[0].source;if(dump.ownedBy.length>1||dump.children.length>0||ownerDump.containerMemoryDump!==dump.containerMemoryDump){suballocatedBy=undefined;break;}
-if(suballocatedBy===undefined){suballocatedBy=ownerDump.fullName;}else if(suballocatedBy!==ownerDump.fullName){suballocatedBy=undefined;break;}}}
-var row={title:title,fullName:fullName,defined:defined,cells:cells,suballocatedBy:suballocatedBy};var childDumpNameToDumps=tr.b.invertArrayOfDicts(dumps,function(dump){return tr.b.arrayToDict(dump.children,function(child){return child.name;});});var subRows=[];var suballocationClassificationRootNode=undefined;tr.b.iterItems(childDumpNameToDumps,function(childName,childDumps){var childRow=this.createAllocatorRowRecursively_(childDumps);if(childRow.suballocatedBy===undefined){subRows.push(childRow);}else{suballocationClassificationRootNode=this.classifySuballocationRow_(childRow,suballocationClassificationRootNode);}},this);if(suballocationClassificationRootNode!==undefined){var suballocationRow=this.createSuballocationRowRecursively_('suballocations',suballocationClassificationRootNode);tr.ui.analysis.aggregateTableRowCellsRecursively(suballocationRow,'cells');subRows.push(suballocationRow);}
-if(subRows.length>0)
-row.subRows=subRows;return row;},classifySuballocationRow_:function(suballocationRow,rootNode){if(rootNode===undefined){rootNode={children:{},row:undefined};}
-var suballocationLevels=suballocationRow.suballocatedBy.split('/');var currentNode=rootNode;for(var i=0;i<suballocationLevels.length;i++){var suballocationLevel=suballocationLevels[i];var nextNode=currentNode.children[suballocationLevel];if(nextNode===undefined){currentNode.children[suballocationLevel]=nextNode={children:{},row:undefined};}
-var currentNode=nextNode;}
-if(currentNode.row!==undefined)
-throw new Error('Multiple suballocations with the same owner name');currentNode.row=suballocationRow;return rootNode;},createSuballocationRowRecursively_:function(name,node){var childCount=Object.keys(node.children).length;if(childCount===0){if(node.row===undefined)
-throw new Error('Suballocation node must have a row or children');var row=node.row;row.title=name;row.suballocation=true;return row;}
-var subRows=tr.b.dictionaryValues(tr.b.mapItems(node.children,this.createSuballocationRowRecursively_,this));if(node.row!==undefined){var row=node.row;row.title='<unspecified>';row.suballocation=true;subRows.unshift(row);}
-var defined=new Array(subRows[0].defined.length);for(var i=0;i<subRows.length;i++){subRows[i].defined.forEach(function(definedValue,index){defined[index]=defined[index]||definedValue;});}
-return{title:name,suballocation:true,defined:defined,cells:{},subRows:subRows};},createColumns_:function(rows){var titleColumn=new AllocatorDumpNameColumn('Component');titleColumn.width='200px';var attributeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'cells',this.aggregationMode_);tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);tr.ui.analysis.MemoryColumn.sortByImportance(attributeColumns,IMPORTANCE_RULES);var columns=[titleColumn].concat(attributeColumns);return columns;}});return{};});'use strict';tr.exportTo('tr.ui.analysis',function(){var COLUMN_IMPORTANCE_RULES=tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules(['Start address','Virtual size','Protection flags','PSS','Private dirty','Private clean','Shared dirty','Shared clean','Swapped']);var CLASSIFICATION_RULES={name:'Total',children:[{name:'Android',file:/^\/dev\/ashmem(?!\/libc malloc)/,children:[{name:'Java runtime',file:/^\/dev\/ashmem\/dalvik-/,children:[{name:'Spaces',file:/\/dalvik-(alloc|main|large object|non moving|zygote) space/,children:[{name:'Normal',file:/\/dalvik-(alloc|main)/},{name:'Large',file:/\/dalvik-large object/},{name:'Zygote',file:/\/dalvik-zygote/},{name:'Non-moving',file:/\/dalvik-non moving/}]},{name:'Linear Alloc',file:/\/dalvik-LinearAlloc/},{name:'Indirect Reference Table',file:/\/dalvik-indirect.ref/},{name:'Cache',file:/\/dalvik-jit-code-cache/},{name:'Accounting'}]},{name:'Cursor',file:/\/CursorWindow/},{name:'Ashmem'}]},{name:'Native heap',file:/^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|(\[discounted tracing overhead\])|$)/},{name:'Stack',file:/^\[stack/},{name:'Files',file:/\.((((jar)|(apk)|(ttf)|(odex)|(oat)|(arg))$)|(dex)|(so))/,children:[{name:'so',file:/\.so/},{name:'jar',file:/\.jar$/},{name:'apk',file:/\.apk$/},{name:'ttf',file:/\.ttf$/},{name:'dex',file:/\.((dex)|(odex$))/},{name:'oat',file:/\.oat$/},{name:'art',file:/\.art$/}]},{name:'Devices',file:/(^\/dev\/)|(anon_inode:dmabuf)/,children:[{name:'GPU',file:/\/((nv)|(mali)|(kgsl))/},{name:'DMA',file:/anon_inode:dmabuf/}]}]};function createEmptyRuleRow(rule){var row={title:rule.name,rule:rule,subRows:[]};if(rule.children!==undefined)
-row.subRows=rule.children.map(createEmptyRuleRow);return row;}
-function hexString(address,is64BitAddress){if(address===undefined)
-return undefined;var hexPadding=is64BitAddress?'0000000000000000':'00000000';return(hexPadding+address.toString(16)).substr(-hexPadding.length);}
-function classifyRegionRow(ruleRow,regionRow){var rule=ruleRow.rule;if(rule===undefined||rule.children===undefined||rule.children.length===0){ruleRow.subRows.push(regionRow);return;}
-function regionRowMatchesChildRule(childRule){var fileRegExp=childRule.file;if(fileRegExp===undefined)
-return true;return fileRegExp.test(regionRow.title);}
-var matchedChildRuleIndex=tr.b.findFirstIndexInArray(rule.children,regionRowMatchesChildRule);if(matchedChildRuleIndex===-1){matchedChildRuleIndex=rule.children.length;if(matchedChildRuleIndex>=ruleRow.subRows.length){ruleRow.subRows.push({title:'Other',subRows:[]});}}
-classifyRegionRow(ruleRow.subRows[matchedChildRuleIndex],regionRow);}
-function pruneEmptyRuleRows(row){if(row.subRows===undefined||row.subRows.length===0)
-return;if(row.subRows[0].rule===undefined){return;}
-row.subRows.forEach(pruneEmptyRuleRows);row.subRows=row.subRows.filter(function(subRow){return subRow.subRows.length>0;});}
-Polymer('tr-ui-a-memory-dump-vm-regions-details-pane',{created:function(){this.vmRegions_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;},set vmRegions(vmRegions){this.vmRegions_=vmRegions;this.scheduleRebuildPane_();},get vmRegions(){return this.vmRegions_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){var unclassifiedRows=[];if(this.vmRegions_!==undefined)
-unclassifiedRows=this.createUnclassifiedRows_(this.vmRegions_);if(unclassifiedRows.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();return;}
-this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.classifyRows_(unclassifiedRows);var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);},joinRegions_:function(timeToRegionIdToRegion){return tr.b.dictionaryValues(tr.b.invertArrayOfDicts(timeToRegionIdToRegion,function(regionIdToRegion){return tr.b.arrayToDict(regionIdToRegion,function(region){return[region.mappedFile,region.startAddress].join('#');});}));},createUnclassifiedRows_:function(timeToRegionIdToRegion){var is64BitAddress=timeToRegionIdToRegion.some(function(regionIdToRegion){if(regionIdToRegion===undefined)
-return false;return regionIdToRegion.some(function(region){if(region.startAddress===undefined)
-return false;return region.startAddress>=4294967296;});});var regionIdToTimeToRegion=this.joinRegions_(timeToRegionIdToRegion);return regionIdToTimeToRegion.map(function(timeToRegion){var definedRegion=tr.b.findFirstInArray(timeToRegion);var defined=timeToRegion.map(function(region){return region!==undefined;});var constantCells=tr.ui.analysis.createCells([definedRegion],function(region){var attrs={};tr.ui.analysis.addAttributeIfDefined(attrs,'Start address',tr.model.StringAttribute,'',hexString(region.startAddress,is64BitAddress));return attrs;});var variableCells=tr.ui.analysis.createCells(timeToRegion,function(region){var attrs={};tr.ui.analysis.addAttributeIfDefined(attrs,'Virtual size',tr.model.ScalarAttribute,'bytes',region.sizeInBytes);tr.ui.analysis.addAttributeIfDefined(attrs,'Protection flags',tr.model.StringAttribute,'',region.protectionFlagsToString);tr.ui.analysis.addAttributeIfDefined(attrs,'PSS',tr.model.ScalarAttribute,'bytes',region.byteStats.proportionalResident);tr.ui.analysis.addAttributeIfDefined(attrs,'Private dirty',tr.model.ScalarAttribute,'bytes',region.byteStats.privateDirtyResident);tr.ui.analysis.addAttributeIfDefined(attrs,'Private clean',tr.model.ScalarAttribute,'bytes',region.byteStats.privateCleanResident);tr.ui.analysis.addAttributeIfDefined(attrs,'Shared dirty',tr.model.ScalarAttribute,'bytes',region.byteStats.sharedDirtyResident);tr.ui.analysis.addAttributeIfDefined(attrs,'Shared clean',tr.model.ScalarAttribute,'bytes',region.byteStats.sharedCleanResident);tr.ui.analysis.addAttributeIfDefined(attrs,'Swapped',tr.model.ScalarAttribute,'bytes',region.byteStats.swapped);return attrs;});return{title:definedRegion.mappedFile||'',defined:defined,constantCells:constantCells,variableCells:variableCells};});},classifyRows_:function(unclassifiedRows){var rootRow=createEmptyRuleRow(CLASSIFICATION_RULES);unclassifiedRows.map(classifyRegionRow.bind(undefined,rootRow));pruneEmptyRuleRows(rootRow);tr.ui.analysis.aggregateTableRowCellsRecursively(rootRow,'constantCells');tr.ui.analysis.aggregateTableRowCellsRecursively(rootRow,'variableCells');return[rootRow];},createColumns_:function(rows){var titleColumn=new tr.ui.analysis.TitleColumn('Mapped file');titleColumn.width='200px';var constantColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'constantCells');var variableColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'variableCells',this.aggregationMode_);var attributeColumns=constantColumns.concat(variableColumns);tr.ui.analysis.MemoryColumn.sortByImportance(attributeColumns,COLUMN_IMPORTANCE_RULES);tr.ui.analysis.MemoryColumn.spaceEqually(attributeColumns);var columns=[titleColumn].concat(attributeColumns);return columns;}});return{};});'use strict';Polymer('tr-ui-b-color-legend',{ready:function(){var blackSquareCharCode=9632;this.$.square.innerText=String.fromCharCode(blackSquareCharCode);this.label_=undefined;this.compoundEventSelectionState_=tr.model.CompoundEventSelectionState.NOT_SELECTED;},set compoundEventSelectionState(compoundEventSelectionState){this.compoundEventSelectionState_=compoundEventSelectionState;},get label(){return this.label_;},set label(label){if(label===undefined){this.setLabelAndColorId(undefined,undefined);return;}
-var colorId=tr.b.ColorScheme.getColorIdForGeneralPurposeString(label);this.setLabelAndColorId(label,colorId);},setLabelAndColorId:function(label,colorId){this.label_=label;this.$.label.textContent='';this.$.label.appendChild(tr.ui.b.asHTMLOrTextNode(label));if(colorId===undefined)
-this.$.square.style.color='initial';else
-this.$.square.style.color=tr.b.ColorScheme.colorsAsStrings[colorId];}});'use strict';Polymer('tr-ui-b-view-specific-brushing-state',{get viewId(){return this.getAttribute('view-id');},set viewId(viewId){this.setAttribute('view-id',viewId);},get:function(){var viewId=this.viewId;if(!viewId)
-throw new Error('Element must have a view-id attribute!');var brushingStateController=tr.c.BrushingStateController.getControllerForElement(this);if(!brushingStateController)
-return undefined;return brushingStateController.getViewSpecificBrushingState(viewId);},set:function(state){var viewId=this.viewId;if(!viewId)
-throw new Error('Element must have a view-id attribute!');var brushingStateController=tr.c.BrushingStateController.getControllerForElement(this);if(!brushingStateController)
-return;brushingStateController.changeViewSpecificBrushingState(viewId,state);}});'use strict';tr.exportTo('tr.ui.analysis',function(){var ColorScheme=tr.b.ColorScheme;var PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX='_bytes';var DISPLAYED_SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME;var GREATER_THAN_OR_EQUAL_TO_SYMBOL=String.fromCharCode(8805);function ProcessNameColumn(title){tr.ui.analysis.TitleColumn.call(this,title);}
-ProcessNameColumn.prototype={__proto__:tr.ui.analysis.TitleColumn.prototype,formatTitle:function(row){if(row.noLegend)
-return row.title;var titleEl=document.createElement('tr-ui-b-color-legend');titleEl.label=row.title;return titleEl;}};function UsedMemoryColumn(name,units,cellGetter,aggregationMode){tr.ui.analysis.ScalarMemoryColumn.call(this,name,units,cellGetter,aggregationMode);}
-var USED_MEMORY_COLUMN_COLOR=ColorScheme.getColorForReservedNameAsString('used_memory_column');var OLDER_USED_MEMORY_COLUMN_COLOR=ColorScheme.getColorForReservedNameAsString('older_used_memory_column');UsedMemoryColumn.prototype={__proto__:tr.ui.analysis.ScalarMemoryColumn.prototype,get title(){return tr.ui.b.createSpan({textContent:this.name,color:USED_MEMORY_COLUMN_COLOR});},color:function(attrs){if(attrs.length===1&&attrs[0].isOlderValue)
-return OLDER_USED_MEMORY_COLUMN_COLOR;else
-return USED_MEMORY_COLUMN_COLOR;}};function AllocatorColumn(name,units,cellGetter,aggregationMode){tr.ui.analysis.ScalarMemoryColumn.call(this,name,units,cellGetter,aggregationMode);}
-AllocatorColumn.prototype={__proto__:tr.ui.analysis.ScalarMemoryColumn.prototype,get title(){var titleEl=document.createElement('tr-ui-b-color-legend');titleEl.label=this.name;return titleEl;},getInfos:function(attrs){var hasDumpInfo=undefined;attrs.some(function(attr){if(attr===undefined)
-return false;return attr.infos.some(function(info){if(info.type!==tr.model.AttributeInfoType.HAS_HEAP_DUMP)
-return false;hasDumpInfo=info;return true;});});if(hasDumpInfo!==undefined)
-return[hasDumpInfo];else
-return[];}};function TracingColumn(name,units,cellGetter,aggregationMode){tr.ui.analysis.ScalarMemoryColumn.call(this,name,units,cellGetter,aggregationMode);}
-var TRACING_COLUMN_COLOR=ColorScheme.getColorForReservedNameAsString('tracing_memory_column');TracingColumn.prototype={__proto__:tr.ui.analysis.ScalarMemoryColumn.prototype,get title(){return tr.ui.b.createSpan({textContent:this.name,color:TRACING_COLUMN_COLOR});},color:TRACING_COLUMN_COLOR};var USED_MEMORY_SIZE_COLUMNS_CONSTRUCTOR_RULES=[{columnConstructor:UsedMemoryColumn}];var USED_MEMORY_SIZE_COLUMNS_IMPORTANCE_RULES=tr.ui.analysis.MemoryColumn.columnNamesToImportanceRules(['Total resident','Peak total resident','PSS','Private dirty','Swapped']);var ALLOCATOR_SIZE_COLUMNS_CONSTRUCTOR_RULES=[{condition:'tracing',columnConstructor:TracingColumn},{columnConstructor:AllocatorColumn}];var ALLOCATOR_SIZE_COLUMNS_IMPORTANCE_RULES=[{condition:'tracing',importance:0},{importance:1}];Polymer('tr-ui-a-memory-dump-overview-pane',{created:function(){this.processMemoryDumps_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.CELL;this.$.table.addEventListener('selection-changed',function(tableEvent){tableEvent.stopPropagation();this.changeChildPane_();}.bind(this));},set processMemoryDumps(processMemoryDumps){this.processMemoryDumps_=processMemoryDumps;this.scheduleRebuildPane_();},get processMemoryDumps(){return this.processMemoryDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},get selectedMemoryCell(){if(this.processMemoryDumps_===undefined||this.processMemoryDumps_.length===0){return undefined;}
-var selectedTableRow=this.$.table.selectedTableRow;if(!selectedTableRow)
-return undefined;var selectedColumnIndex=this.$.table.selectedColumnIndex;if(selectedColumnIndex===undefined)
-return undefined;var selectedColumn=this.$.table.tableColumns[selectedColumnIndex];var selectedMemoryCell=selectedColumn.cell(selectedTableRow);return selectedMemoryCell;},changeChildPane_:function(){this.storeSelection_();var builder=undefined;if(this.selectedMemoryCell!==undefined)
-builder=this.selectedMemoryCell.buildDetailsPane;this.childPaneBuilder=builder;},rebuildPane_:function(){if(this.processMemoryDumps_===undefined||this.processMemoryDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();return;}
-this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.createRows_();var footerRows=this.createFooterRows_(rows);var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.footerRows=footerRows;this.$.table.tableColumns=columns;this.$.table.rebuild();this.restoreSelection_();},createRows_:function(){var timeToPidToProcessMemoryDump=this.processMemoryDumps_;var pidToTimeToProcessMemoryDump=tr.b.invertArrayOfDicts(timeToPidToProcessMemoryDump);var rows=[];var aggregationMode=this.aggregationMode_;return tr.b.dictionaryValues(tr.b.mapItems(pidToTimeToProcessMemoryDump,function(pid,timeToDump){var process=tr.b.findFirstInArray(timeToDump).process;var defined=timeToDump.map(function(dump){return dump!==undefined;});var timeToVmRegions=timeToDump.map(function(dump){if(dump===undefined)
-return undefined;return dump.mostRecentVmRegions;});function buildVmRegionsPane(){var pane=document.createElement('tr-ui-a-memory-dump-vm-regions-details-pane');pane.vmRegions=timeToVmRegions;pane.aggregationMode=aggregationMode;return pane;}
-var usedMemoryCells=tr.ui.analysis.createCells(timeToDump,function(dump){var sizes={};var totals=dump.totals;if(totals!==undefined){tr.ui.analysis.addAttributeIfDefined(sizes,'Total resident',tr.model.ScalarAttribute,'bytes',totals.residentBytes);tr.ui.analysis.addAttributeIfDefined(sizes,'Peak total resident',tr.model.ScalarAttribute,'bytes',totals.peakResidentBytes,function(attr){if(dump.totals.arePeakResidentBytesResettable){attr.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.RECENT_VALUE,'Peak RSS since previous memory dump.'));}else{attr.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.OVERALL_VALUE,'Peak RSS since process startup. Finer grained '+'peaks require a Linux kernel version '+
-GREATER_THAN_OR_EQUAL_TO_SYMBOL+' 4.0.'));}});var platformSpecific=totals.platformSpecific;if(platformSpecific!==undefined){tr.b.iterItems(platformSpecific,function(name,size){if(name.endsWith(PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX)){name=name.substring(0,name.length-
-PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX.length);}
-name=name.replace('_',' ').trim();name=name.charAt(0).toUpperCase()+name.slice(1);sizes[name]=new tr.model.ScalarAttribute('bytes',size);});}}
-var vmRegionAttributeAddedCallback=undefined;if(!dump.hasOwnVmRegions){vmRegionAttributeAddedCallback=function(attr){attr.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.LINK,'Older value (process did not dump memory maps).'));attr.isOlderValue=true;};}
-tr.ui.analysis.addAttributeIfDefined(sizes,'PSS',tr.model.ScalarAttribute,'bytes',dump.getMostRecentTotalVmRegionStat('proportionalResident'),vmRegionAttributeAddedCallback);tr.ui.analysis.addAttributeIfDefined(sizes,'Private dirty',tr.model.ScalarAttribute,'bytes',dump.getMostRecentTotalVmRegionStat('privateDirtyResident'),vmRegionAttributeAddedCallback);tr.ui.analysis.addAttributeIfDefined(sizes,'Swapped',tr.model.ScalarAttribute,'bytes',dump.getMostRecentTotalVmRegionStat('swapped'),vmRegionAttributeAddedCallback);return sizes;},function(attrName,cell){cell.buildDetailsPane=buildVmRegionsPane;});var allocatorCells=tr.ui.analysis.createCells(timeToDump,function(dump){if(dump.memoryAllocatorDumps===undefined)
-return undefined;var sizes={};dump.memoryAllocatorDumps.forEach(function(allocatorDump){var rootAttribute=allocatorDump.attributes[DISPLAYED_SIZE_ATTRIBUTE_NAME];if(rootAttribute===undefined)
-return;var allocatorName=allocatorDump.fullName;var overviewAttribute=new rootAttribute.constructor(rootAttribute.units,rootAttribute.value);if(dump.heapDumps!==undefined&&dump.heapDumps[allocatorName]!==undefined){overviewAttribute.infos.push(new tr.model.AttributeInfo(tr.model.AttributeInfoType.HAS_HEAP_DUMP,'Heap dump provided'));}
-sizes[allocatorName]=overviewAttribute;});return sizes;},function(allocatorName,cell){var memoryAllocatorDumps=timeToDump.map(function(dump){if(dump===undefined)
-return undefined;return dump.getMemoryAllocatorDumpByFullName(allocatorName);});var heapDumps=undefined;timeToDump.forEach(function(dump,index){if(dump===undefined||dump.heapDumps===undefined)
-return;if(heapDumps===undefined)
-heapDumps=new Array(timeToDump.length);heapDumps[index]=dump.heapDumps[allocatorName];});cell.buildDetailsPane=function(){var pane=document.createElement('tr-ui-a-memory-dump-allocator-details-pane');pane.memoryAllocatorDumps=memoryAllocatorDumps;pane.heapDumps=heapDumps;pane.aggregationMode=aggregationMode;return pane;};});return{title:process.userFriendlyName,defined:defined,usedMemoryCells:usedMemoryCells,allocatorCells:allocatorCells};}));},createFooterRows_:function(rows){if(rows.length<=1)
-return[];var totalRow={title:'Total',noLegend:true};tr.ui.analysis.aggregateTableRowCells(totalRow,rows,'usedMemoryCells');tr.ui.analysis.aggregateTableRowCells(totalRow,rows,'allocatorCells');return[totalRow];},createColumns_:function(rows){var titleColumn=new ProcessNameColumn('Process');titleColumn.width='200px';var usedMemorySizeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'usedMemoryCells',this.aggregationMode_,USED_MEMORY_SIZE_COLUMNS_CONSTRUCTOR_RULES);tr.ui.analysis.MemoryColumn.sortByImportance(usedMemorySizeColumns,USED_MEMORY_SIZE_COLUMNS_IMPORTANCE_RULES);var allocatorSizeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'allocatorCells',this.aggregationMode_,ALLOCATOR_SIZE_COLUMNS_CONSTRUCTOR_RULES);tr.ui.analysis.MemoryColumn.sortByImportance(allocatorSizeColumns,ALLOCATOR_SIZE_COLUMNS_IMPORTANCE_RULES);var sizeColumns=usedMemorySizeColumns.concat(allocatorSizeColumns);tr.ui.analysis.MemoryColumn.spaceEqually(sizeColumns);var columns=[titleColumn].concat(sizeColumns);return columns;},storeSelection_:function(){var selectedRowTitle;var selectedRow=this.$.table.selectedTableRow;if(selectedRow!==undefined)
-selectedRowTitle=selectedRow.title;var selectedColumnName;var selectedColumnIndex=this.$.table.selectedColumnIndex;if(selectedColumnIndex!==undefined){var selectedColumn=this.$.table.tableColumns[selectedColumnIndex];selectedColumnName=selectedColumn.name;}
-this.$.state.set({rowTitle:selectedRowTitle,columnName:selectedColumnName});},restoreSelection_:function(){var settings=this.$.state.get();if(settings===undefined||settings.rowTitle===undefined||settings.columnName===undefined)
-return;var selectedColumnName=settings.columnName;var selectedColumnIndex=tr.b.findFirstIndexInArray(this.$.table.tableColumns,function(column){return column.name===selectedColumnName;});if(selectedColumnIndex<0)
-return;var selectedRowTitle=settings.rowTitle;var selectedRow=tr.b.findFirstInArray(this.$.table.tableRows,function(row){return row.title===selectedRowTitle;});if(selectedRow===undefined)
-return;this.$.table.selectedTableRow=selectedRow;this.$.table.selectedColumnIndex=selectedColumnIndex;}});return{AllocatorColumn:AllocatorColumn};});'use strict';tr.exportTo('tr.ui.analysis',function(){Polymer('tr-ui-a-memory-dump-header-pane',{created:function(){this.containerMemoryDumps_=undefined;},ready:function(){this.$.aggregation_mode_container.appendChild(tr.ui.b.createSelector(this,'aggregationMode','memoryDumpHeaderPane.aggregationMode',tr.ui.analysis.MemoryColumn.AggregationMode.DIFF,[{label:'Diff',value:tr.ui.analysis.MemoryColumn.AggregationMode.DIFF},{label:'Max',value:tr.ui.analysis.MemoryColumn.AggregationMode.MAX}]));},set containerMemoryDumps(containerMemoryDumps){this.containerMemoryDumps_=containerMemoryDumps;this.scheduleRebuildPane_();},get containerMemoryDumps(){return this.containerMemoryDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){this.updateLabel_();this.updateAggregationModeSelector_();this.changeChildPane_();},updateLabel_:function(){this.$.label.textContent='';if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=0){this.$.label.textContent='No memory dumps selected';return;}
-var containerDumpCount=this.containerMemoryDumps_.length;var isMultiSelection=containerDumpCount>1;this.$.label.appendChild(document.createTextNode('Selected '+containerDumpCount+' memory dump'+
-(isMultiSelection?'s':'')+' in '+this.containerMemoryDumps_[0].containerName+' at '));this.$.label.appendChild(document.createTextNode(tr.b.u.TimeStamp.format(this.containerMemoryDumps_[0].start)));if(isMultiSelection){var ELLIPSIS=String.fromCharCode(8230);this.$.label.appendChild(document.createTextNode(ELLIPSIS));this.$.label.appendChild(document.createTextNode(tr.b.u.TimeStamp.format(this.containerMemoryDumps_[containerDumpCount-1].start)));}},updateAggregationModeSelector_:function(){var displayStyle;if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=1)
-displayStyle='none';else
-displayStyle='initial';this.$.aggregation_mode_container.style.display=displayStyle;},changeChildPane_:function(){this.childPaneBuilder=function(){if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=0)
-return undefined;var overviewPane=document.createElement('tr-ui-a-memory-dump-overview-pane');overviewPane.processMemoryDumps=this.containerMemoryDumps_.map(function(containerDump){return containerDump.processMemoryDumps;});overviewPane.aggregationMode=this.aggregationMode;return overviewPane;}.bind(this);}});return{};});'use strict';Polymer('tr-ui-a-stacked-pane-view',{setPaneBuilder:function(paneBuilder,opt_parentPane){var paneContainer=this.$.pane_container;if(opt_parentPane){if(!(opt_parentPane instanceof HTMLElement))
-throw new Error('Parent pane must be an HTML element');if(opt_parentPane.parentElement!==paneContainer)
-throw new Error('Parent pane must be a child of the pane container');}
-while(paneContainer.lastElementChild!==null&&paneContainer.lastElementChild!==opt_parentPane){var removedPane=this.$.pane_container.lastElementChild;var listener=this.listeners_.get(removedPane);if(listener===undefined)
-throw new Error('No listener associated with pane');this.listeners_.delete(removedPane);removedPane.removeEventListener('request-child-pane-change',listener);paneContainer.removeChild(removedPane);}
-if(opt_parentPane&&opt_parentPane.parentElement!==paneContainer)
-throw new Error('Parent pane was removed from the pane container');if(!paneBuilder)
-return;var pane=paneBuilder();if(!pane)
-return;if(!(pane instanceof HTMLElement))
-throw new Error('Pane must be an HTML element');var listener=function(event){this.setPaneBuilder(pane.childPaneBuilder,pane);}.bind(this);if(!this.listeners_){this.listeners_=new WeakMap();}
-this.listeners_.set(pane,listener);pane.addEventListener('request-child-pane-change',listener);paneContainer.appendChild(pane);pane.appended();},rebuild:function(){var currentPane=this.$.pane_container.firstElementChild;while(currentPane){currentPane.rebuild();currentPane=currentPane.nextElementSibling;}},get panesForTesting(){var panes=[];var currentChild=this.$.pane_container.firstElementChild;while(currentChild){panes.push(currentChild);currentChild=currentChild.nextElementSibling;}
-return panes;}});'use strict';tr.exportTo('tr.ui.analysis',function(){Polymer('tr-ui-a-container-memory-dump-sub-view',{set selection(selection){if(selection===undefined){this.currentSelection_=undefined;this.dumpsByContainerName_=undefined;this.updateContents_();return;}
-selection.forEach(function(event){if(!(event instanceof tr.model.ContainerMemoryDump)){throw new Error('Memory dump sub-view only supports container memory dumps');}});this.currentSelection_=selection;this.dumpsByContainerName_=tr.b.group(this.currentSelection_.toArray(),function(dump){return dump.containerName;});tr.b.iterItems(this.dumpsByContainerName_,function(containerName,dumps){dumps.sort(function(a,b){return a.start-b.start;});});this.updateContents_();},get selection(){return this.currentSelection_;},get requiresTallView(){return true;},updateContents_:function(){this.$.content.textContent='';if(this.dumpsByContainerName_===undefined)
-return;var containerNames=Object.keys(this.dumpsByContainerName_);if(containerNames.length===0)
-return;if(containerNames.length>1)
-this.buildViewForMultipleContainerNames_();else
-this.buildViewForSingleContainerName_();},buildViewForSingleContainerName_:function(){var containerMemoryDumps=this.currentSelection_;var dumpView=this.ownerDocument.createElement('tr-ui-a-stacked-pane-view');this.$.content.appendChild(dumpView);dumpView.setPaneBuilder(function(){var headerPane=document.createElement('tr-ui-a-memory-dump-header-pane');headerPane.containerMemoryDumps=containerMemoryDumps;return headerPane;});},buildViewForMultipleContainerNames_:function(){var ownerDocument=this.ownerDocument;var rows=tr.b.dictionaryValues(tr.b.mapItems(this.dumpsByContainerName_,function(containerName,dumps){return{containerName:containerName,subRows:dumps,isExpanded:true};}));rows.sort(function(a,b){return a.containerName.localeCompare(b.containerName);});var columns=[{title:'Dump',value:function(row){if(row.subRows===undefined)
-return this.singleDumpValue_(row);else
-return this.groupedDumpValue_(row);},singleDumpValue_:function(row){var linkEl=ownerDocument.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(new tr.model.EventSet([row]));linkEl.appendChild(tr.ui.units.createTimeStampSpan(row.start,{ownerDocument:ownerDocument}));return linkEl;},groupedDumpValue_:function(row){var linkEl=ownerDocument.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(new tr.model.EventSet(row.subRows));linkEl.appendChild(tr.ui.b.createSpan({ownerDocument:ownerDocument,textContent:row.subRows.length+' memory dump'+
-(row.subRows.length===1?'':'s')+' in '}));linkEl.appendChild(tr.ui.b.createSpan({ownerDocument:ownerDocument,textContent:row.containerName,bold:true}));return linkEl;}}];var table=this.ownerDocument.createElement('tr-ui-b-table');table.tableColumns=columns;table.tableRows=rows;table.showHeader=false;table.rebuild();this.$.content.appendChild(table);}});return{};});'use strict';var EventSet=tr.model.EventSet;Polymer('tr-ui-a-power-sample-table',{ready:function(){this.$.table.tableColumns=[{title:'Time',width:'100px',value:function(row){return tr.ui.units.createTimeStampSpan(row.start);}},{title:'Power (mW)',width:'100%',value:function(row){return row.power;}}];this.samples=new EventSet();},get samples(){return this.samples_;},set samples(samples){this.samples_=(samples===undefined)?new EventSet():samples;this.updateContents_();},updateContents_:function(){this.$.table.tableRows=this.samples.toArray();this.$.table.rebuild();}});'use strict';Polymer('tr-ui-a-single-power-sample-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){this.$.samplesTable.samples=this.selection;}});!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv("	","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';tr.exportTo('tr.ui.b',function(){var THIS_DOC=document.currentScript.ownerDocument;var svgNS='http://www.w3.org/2000/svg';var ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){var id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected)
-id+=ColorScheme.properties.brightenedOffsets[0];return ColorScheme.colorsAsStrings[id];}
-var ChartBase=tr.ui.b.define('svg',undefined,svgNS);ChartBase.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.classList.add('chart-base');this.chartTitle_=undefined;this.seriesKeys_=undefined;this.width_=400;this.height_=300;var template=THIS_DOC.querySelector('#chart-base-template');var svgEl=template.content.querySelector('svg');for(var i=0;i<svgEl.children.length;i++)
-this.appendChild(svgEl.children[i].cloneNode(true));Object.defineProperty(this,'width',{get:function(){return this.width_;},set:function(width){this.width_=width;this.updateContents_();}});Object.defineProperty(this,'height',{get:function(){return this.height_;},set:function(height){this.height_=height;this.updateContents_();}});},get chartTitle(){return this.chartTitle_;},set chartTitle(chartTitle){this.chartTitle_=chartTitle;this.updateContents_();},get chartAreaElement(){return this.querySelector('#chart-area');},setSize:function(size){this.width_=size.width;this.height_=size.height;this.updateContents_();},getMargin_:function(){var margin={top:20,right:20,bottom:30,left:50};if(this.chartTitle_)
-margin.top+=20;return margin;},get margin(){return this.getMargin_();},get chartAreaSize(){var margin=this.margin;return{width:this.width_-margin.left-margin.right,height:this.height_-margin.top-margin.bottom};},getLegendKeys_:function(){throw new Error('Not implemented');},updateScales_:function(){throw new Error('Not implemented');},updateContents_:function(){var margin=this.margin;var thisSel=d3.select(this);thisSel.attr('width',this.width_);thisSel.attr('height',this.height_);var chartAreaSel=d3.select(this.chartAreaElement);chartAreaSel.attr('transform','translate('+margin.left+','+margin.top+')');this.updateScales_();this.updateTitle_(chartAreaSel);this.updateLegend_();},updateTitle_:function(chartAreaSel){var titleSel=chartAreaSel.select('#title');if(!this.chartTitle_){titleSel.style('display','none');return;}
-var width=this.chartAreaSize.width;titleSel.attr('transform','translate('+width*0.5+',-5)').style('display',undefined).style('text-anchor','middle').attr('class','title').attr('width',width).text(this.chartTitle_);},updateLegend_:function(){var keys=this.getLegendKeys_();if(keys===undefined)
-return;var chartAreaSel=d3.select(this.chartAreaElement);var chartAreaSize=this.chartAreaSize;var legendEntriesSel=chartAreaSel.selectAll('.legend').data(keys.slice().reverse());legendEntriesSel.enter().append('g').attr('class','legend').attr('transform',function(d,i){return'translate(0,'+i*20+')';}).append('text').text(function(key){return key;});legendEntriesSel.exit().remove();legendEntriesSel.attr('x',chartAreaSize.width-18).attr('width',18).attr('height',18).style('fill',function(key){var selected=this.currentHighlightedLegendKey===key;return getColorOfKey(key,selected);}.bind(this));legendEntriesSel.selectAll('text').attr('x',chartAreaSize.width-24).attr('y',9).attr('dy','.35em').style('text-anchor','end').text(function(d){return d;});},get highlightedLegendKey(){return this.highlightedLegendKey_;},set highlightedLegendKey(highlightedLegendKey){this.highlightedLegendKey_=highlightedLegendKey;this.updateHighlight_();},get currentHighlightedLegendKey(){if(this.tempHighlightedLegendKey_)
-return this.tempHighlightedLegendKey_;return this.highlightedLegendKey_;},pushTempHighlightedLegendKey:function(key){if(this.tempHighlightedLegendKey_)
-throw new Error('push cannot nest');this.tempHighlightedLegendKey_=key;this.updateHighlight_();},popTempHighlightedLegendKey:function(key){if(this.tempHighlightedLegendKey_!=key)
-throw new Error('pop cannot happen');this.tempHighlightedLegendKey_=undefined;this.updateHighlight_();},updateHighlight_:function(){var chartAreaSel=d3.select(this.chartAreaElement);var legendEntriesSel=chartAreaSel.selectAll('.legend');var that=this;legendEntriesSel.each(function(key){var highlighted=key==that.currentHighlightedLegendKey;var color=getColorOfKey(key,highlighted);this.style.fill=color;if(highlighted)
-this.style.fontWeight='bold';else
-this.style.fontWeight='';});}};return{getColorOfKey:getColorOfKey,ChartBase:ChartBase};});'use strict';tr.exportTo('tr.ui.b',function(){function MouseTracker(opt_targetElement){this.onMouseDown_=this.onMouseDown_.bind(this);this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.targetElement=opt_targetElement;}
-MouseTracker.prototype={get targetElement(){return this.targetElement_;},set targetElement(targetElement){if(this.targetElement_)
-this.targetElement_.removeEventListener('mousedown',this.onMouseDown_);this.targetElement_=targetElement;if(this.targetElement_)
-this.targetElement_.addEventListener('mousedown',this.onMouseDown_);},onMouseDown_:function(e){if(e.button!==0)
-return true;e=this.remakeEvent_(e,'mouse-tracker-start');this.targetElement_.dispatchEvent(e);document.addEventListener('mousemove',this.onMouseMove_);document.addEventListener('mouseup',this.onMouseUp_);this.targetElement_.addEventListener('blur',this.onMouseUp_);this.savePreviousUserSelect_=document.body.style['-webkit-user-select'];document.body.style['-webkit-user-select']='none';e.preventDefault();return true;},onMouseMove_:function(e){e=this.remakeEvent_(e,'mouse-tracker-move');this.targetElement_.dispatchEvent(e);},onMouseUp_:function(e){document.removeEventListener('mousemove',this.onMouseMove_);document.removeEventListener('mouseup',this.onMouseUp_);this.targetElement_.removeEventListener('blur',this.onMouseUp_);document.body.style['-webkit-user-select']=this.savePreviousUserSelect_;e=this.remakeEvent_(e,'mouse-tracker-end');this.targetElement_.dispatchEvent(e);},remakeEvent_:function(e,newType){var remade=new tr.b.Event(newType,true,true);remade.x=e.x;remade.y=e.y;remade.offsetX=e.offsetX;remade.offsetY=e.offsetY;remade.clientX=e.clientX;remade.clientY=e.clientY;return remade;}};function trackMouseMovesUntilMouseUp(mouseMoveHandler,opt_mouseUpHandler,opt_keyUpHandler){function cleanupAndDispatchToMouseUp(e){document.removeEventListener('mousemove',mouseMoveHandler);if(opt_keyUpHandler)
-document.removeEventListener('keyup',opt_keyUpHandler);document.removeEventListener('mouseup',cleanupAndDispatchToMouseUp);if(opt_mouseUpHandler)
-opt_mouseUpHandler(e);}
-document.addEventListener('mousemove',mouseMoveHandler);if(opt_keyUpHandler)
-document.addEventListener('keyup',opt_keyUpHandler);document.addEventListener('mouseup',cleanupAndDispatchToMouseUp);}
-return{MouseTracker:MouseTracker,trackMouseMovesUntilMouseUp:trackMouseMovesUntilMouseUp};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase=tr.ui.b.ChartBase;var ChartBase2D=tr.ui.b.define('chart-base-2d',ChartBase);ChartBase2D.prototype={__proto__:ChartBase.prototype,decorate:function(){ChartBase.prototype.decorate.call(this);this.classList.add('chart-base-2d');this.xScale_=d3.scale.linear();this.yScale_=d3.scale.linear();this.data_=[];this.seriesKeys_=[];this.leftMargin_=50;d3.select(this.chartAreaElement).append('g').attr('id','brushes');d3.select(this.chartAreaElement).append('g').attr('id','series');this.addEventListener('mousedown',this.onMouseDown_.bind(this));},get data(){return this.data_;},set data(data){if(data===undefined)
-throw new Error('data must be an Array');this.data_=data;this.updateSeriesKeys_();this.updateContents_();},getSampleWidth_:function(data,index,leftSide){var leftIndex,rightIndex;if(leftSide){leftIndex=Math.max(index-1,0);rightIndex=index;}else{leftIndex=index;rightIndex=Math.min(index+1,data.length-1);}
-var leftWidth=this.getXForDatum_(data[index],index)-
-this.getXForDatum_(data[leftIndex],leftIndex);var rightWidth=this.getXForDatum_(data[rightIndex],rightIndex)-
-this.getXForDatum_(data[index],index);return leftWidth*0.5+rightWidth*0.5;},getLegendKeys_:function(){if(this.seriesKeys_&&this.seriesKeys_.length>1)
-return this.seriesKeys_.slice();return[];},updateSeriesKeys_:function(){var keySet={};this.data_.forEach(function(datum){Object.keys(datum).forEach(function(key){if(this.isDatumFieldSeries_(key))
-keySet[key]=true;},this);},this);this.seriesKeys_=Object.keys(keySet);},isDatumFieldSeries_:function(fieldName){throw new Error('Not implemented');},getXForDatum_:function(datum,index){throw new Error('Not implemented');},updateScales_:function(){if(this.data_.length===0)
-return;var width=this.chartAreaSize.width;var height=this.chartAreaSize.height;this.xScale_.range([0,width]);this.xScale_.domain(d3.extent(this.data_,this.getXForDatum_.bind(this)));var yRange=new tr.b.Range();this.data_.forEach(function(datum){this.seriesKeys_.forEach(function(key){if(datum[key]!==undefined)
-yRange.addValue(datum[key]);});},this);this.yScale_.range([height,0]);this.yScale_.domain([yRange.min,yRange.max]);},updateBrushContents_:function(brushSel){brushSel.selectAll('*').remove();},updateXAxis_:function(xAxis){xAxis.selectAll('*').remove();xAxis[0][0].style.opacity=0;xAxis.attr('transform','translate(0,'+this.chartAreaSize.height+')').call(d3.svg.axis().scale(this.xScale_).orient('bottom'));window.requestAnimationFrame(function(){var previousRight=undefined;xAxis.selectAll('.tick')[0].forEach(function(tick){var currentLeft=tick.transform.baseVal[0].matrix.e;if((previousRight===undefined)||(currentLeft>(previousRight+3))){var currentWidth=tick.getBBox().width;previousRight=currentLeft+currentWidth;}else{tick.style.opacity=0;}});xAxis[0][0].style.opacity=1;});},getMargin_:function(){var margin=ChartBase.prototype.getMargin_.call(this);margin.left=this.leftMargin_;return margin;},updateYAxis_:function(yAxis){yAxis.selectAll('*').remove();yAxis[0][0].style.opacity=0;yAxis.call(d3.svg.axis().scale(this.yScale_).orient('left'));window.requestAnimationFrame(function(){var previousTop=undefined;var leftMargin=0;yAxis.selectAll('.tick')[0].forEach(function(tick){var bbox=tick.getBBox();leftMargin=Math.max(leftMargin,bbox.width);var currentTop=tick.transform.baseVal[0].matrix.f;var currentBottom=currentTop+bbox.height;if((previousTop===undefined)||(previousTop>(currentBottom+3))){previousTop=currentTop;}else{tick.style.opacity=0;}});if(leftMargin>this.leftMargin_){this.leftMargin_=leftMargin;this.updateContents_();}else{yAxis[0][0].style.opacity=1;}}.bind(this));},updateContents_:function(){ChartBase.prototype.updateContents_.call(this);var chartAreaSel=d3.select(this.chartAreaElement);this.updateXAxis_(chartAreaSel.select('.x.axis'));this.updateYAxis_(chartAreaSel.select('.y.axis'));this.updateBrushContents_(chartAreaSel.select('#brushes'));this.updateDataContents_(chartAreaSel.select('#series'));},updateDataContents_:function(seriesSel){throw new Error('Not implemented');},getDataBySeriesKey_:function(){var dataBySeriesKey={};this.seriesKeys_.forEach(function(seriesKey){dataBySeriesKey[seriesKey]=[];});this.data_.forEach(function(multiSeriesDatum,index){var x=this.getXForDatum_(multiSeriesDatum,index);d3.keys(multiSeriesDatum).forEach(function(seriesKey){if(seriesKey==='x')
-return;if(multiSeriesDatum[seriesKey]===undefined)
-return;var singleSeriesDatum={x:x};singleSeriesDatum[seriesKey]=multiSeriesDatum[seriesKey];dataBySeriesKey[seriesKey].push(singleSeriesDatum);});},this);return dataBySeriesKey;},getDataPointAtClientPoint_:function(clientX,clientY){var rect=this.getBoundingClientRect();var margin=this.margin;var x=clientX-rect.left-margin.left;var y=clientY-rect.top-margin.top;x=this.xScale_.invert(x);y=this.yScale_.invert(y);x=tr.b.clamp(x,this.xScale_.domain()[0],this.xScale_.domain()[1]);y=tr.b.clamp(y,this.yScale_.domain()[0],this.yScale_.domain()[1]);return{x:x,y:y};},prepareDataEvent_:function(mouseEvent,dataEvent){var dataPoint=this.getDataPointAtClientPoint_(mouseEvent.clientX,mouseEvent.clientY);dataEvent.x=dataPoint.x;dataEvent.y=dataPoint.y;},onMouseDown_:function(mouseEvent){tr.ui.b.trackMouseMovesUntilMouseUp(this.onMouseMove_.bind(this,mouseEvent.button),this.onMouseUp_.bind(this,mouseEvent.button));mouseEvent.preventDefault();mouseEvent.stopPropagation();var dataEvent=new tr.b.Event('item-mousedown');dataEvent.button=mouseEvent.button;this.classList.add('updating-brushing-state');this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);},onMouseMove_:function(button,mouseEvent){if(mouseEvent.buttons!==undefined){mouseEvent.preventDefault();mouseEvent.stopPropagation();}
-var dataEvent=new tr.b.Event('item-mousemove');dataEvent.button=button;this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);},onMouseUp_:function(button,mouseEvent){mouseEvent.preventDefault();mouseEvent.stopPropagation();var dataEvent=new tr.b.Event('item-mouseup');dataEvent.button=button;this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);this.classList.remove('updating-brushing-state');}};return{ChartBase2D:ChartBase2D};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase2D=tr.ui.b.ChartBase2D;var ChartBase2DBrushX=tr.ui.b.define('chart-base-2d-brush-1d',ChartBase2D);ChartBase2DBrushX.prototype={__proto__:ChartBase2D.prototype,decorate:function(){ChartBase2D.prototype.decorate.call(this);this.brushedRange_=new tr.b.Range();},set brushedRange(range){this.brushedRange_.reset();this.brushedRange_.addRange(range);this.updateContents_();},computeBrushRangeFromIndices:function(indexA,indexB){indexA=tr.b.clamp(indexA,0,this.data_.length-1);indexB=tr.b.clamp(indexB,0,this.data_.length-1);var leftIndex=Math.min(indexA,indexB);var rightIndex=Math.max(indexA,indexB);var r=new tr.b.Range();r.addValue(this.getXForDatum_(this.data_[leftIndex],leftIndex)-
-this.getSampleWidth_(this.data_,leftIndex,true));r.addValue(this.getXForDatum_(this.data_[rightIndex],rightIndex)+
-this.getSampleWidth_(this.data_,rightIndex,false));return r;},getDataIndex_:function(dataX){if(!this.data_)
-return undefined;var bisect=d3.bisector(this.getXForDatum_.bind(this)).right;return bisect(this.data_,dataX)-1;},prepareDataEvent_:function(mouseEvent,dataEvent){ChartBase2D.prototype.prepareDataEvent_.call(this,mouseEvent,dataEvent);dataEvent.index=this.getDataIndex_(dataEvent.x);if(dataEvent.index!==undefined)
-dataEvent.data=this.data_[dataEvent.index];},updateBrushContents_:function(brushSel){brushSel.selectAll('*').remove();var brushes=this.brushedRange_.isEmpty?[]:[this.brushedRange_];var brushRectsSel=brushSel.selectAll('rect').data(brushes);brushRectsSel.enter().append('rect');brushRectsSel.exit().remove();brushRectsSel.attr('x',function(d){return this.xScale_(d.min);}.bind(this)).attr('y',0).attr('width',function(d){return this.xScale_(d.max)-this.xScale_(d.min);}.bind(this)).attr('height',this.chartAreaSize.height);}};return{ChartBase2DBrushX:ChartBase2DBrushX};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase2DBrushX=tr.ui.b.ChartBase2DBrushX;var LineChart=tr.ui.b.define('line-chart',ChartBase2DBrushX);LineChart.prototype={__proto__:ChartBase2DBrushX.prototype,decorate:function(){ChartBase2DBrushX.prototype.decorate.call(this);this.classList.add('line-chart');},isDatumFieldSeries_:function(fieldName){return fieldName!='x';},getXForDatum_:function(datum,index){return datum.x;},updateDataContents_:function(dataSel){dataSel.selectAll('*').remove();var dataBySeriesKey=this.getDataBySeriesKey_();var pathsSel=dataSel.selectAll('path').data(this.seriesKeys_);pathsSel.enter().append('path').attr('class','line').style('stroke',function(key){return tr.ui.b.getColorOfKey(key);}).attr('d',function(key){var line=d3.svg.line().x(function(d){return this.xScale_(d.x);}.bind(this)).y(function(d){return this.yScale_(d[key]);}.bind(this));return line(dataBySeriesKey[key]);}.bind(this));pathsSel.exit().remove();}};return{LineChart:LineChart};});'use strict';var EventSet=tr.model.EventSet;var CHART_TITLE='Power (in mW) by ms since vertical sync';var CHART_WIDTH_FRACTION_OF_BODY=0.5;Polymer('tr-ui-a-frame-power-usage-chart',{ready:function(){this.chart_=undefined;this.samples_=new EventSet();this.vSyncTimestamps_=[];},get chart(){return this.chart_;},get samples(){return this.samples_;},get vSyncTimestamps(){return this.vSyncTimestamps_;},setData:function(samples,vSyncTimestamps){this.samples_=(samples===undefined)?new EventSet():samples;this.vSyncTimestamps_=(vSyncTimestamps===undefined)?[]:vSyncTimestamps;this.updateContents_();},updateContents_:function(){this.clearChart_();var data=this.getDataForLineChart_();if(data.length===0)
-return;this.chart_=this.createChart_(data);this.$.content.appendChild(this.chart_);},createChart_:function(data){var chart=new tr.ui.b.LineChart();var width=document.body.clientWidth*CHART_WIDTH_FRACTION_OF_BODY;chart.setSize({width:width,height:chart.height});chart.chartTitle=CHART_TITLE;chart.data=data;return chart;},clearChart_:function(){var content=this.$.content;while(content.firstChild)
-content.removeChild(content.firstChild);this.chart_=undefined;},getDataForLineChart_:function(){var sortedSamples=this.sortSamplesByTimestampAscending_(this.samples);var vSyncTimestamps=this.vSyncTimestamps.slice();var lastVSyncTimestamp=undefined;var points=[];var frameNumber=0;sortedSamples.forEach(function(sample){while(vSyncTimestamps.length>0&&vSyncTimestamps[0]<=sample.start){lastVSyncTimestamp=vSyncTimestamps.shift();frameNumber++;}
-if(lastVSyncTimestamp===undefined)
-return;var point={x:sample.start-lastVSyncTimestamp};point['f'+frameNumber]=sample.power;points.push(point);});return points;},sortSamplesByTimestampAscending_:function(samples){return samples.toArray().sort(function(smpl1,smpl2){return smpl1.start-smpl2.start;});}});'use strict';Polymer('tr-ui-a-power-sample-summary-table',{ready:function(){this.$.table.tableColumns=[{title:'Min power',width:'100px',value:function(row){return tr.b.u.Units.powerInWatts.format(row.min/1000.0);}},{title:'Max power',width:'100px',value:function(row){return tr.b.u.Units.powerInWatts.format(row.max/1000.0);}},{title:'Time-weighted average',width:'100px',value:function(row){return tr.b.u.Units.powerInWatts.format(row.timeWeightedAverage/1000.0);}},{title:'Energy consumed',width:'100px',value:function(row){return tr.b.u.Units.energyInJoules.format(row.energyConsumed);}},{title:'Sample count',width:'100%',value:function(row){return row.sampleCount;}}];this.samples=new tr.model.EventSet();},get samples(){return this.samples_;},set samples(samples){if(samples===this.samples)
-return;this.samples_=(samples===undefined)?new tr.model.EventSet():samples;this.updateContents_();},updateContents_:function(){if(this.samples.length===0){this.$.table.tableRows=[];}else{this.$.table.tableRows=[{min:this.getMin(),max:this.getMax(),timeWeightedAverage:this.getTimeWeightedAverage(),energyConsumed:this.getEnergyConsumed(),sampleCount:this.samples.length}];}
-this.$.table.rebuild();},getMin:function(){return Math.min.apply(null,this.samples.map(function(sample){return sample.power;}));},getMax:function(){return Math.max.apply(null,this.samples.map(function(sample){return sample.power;}));},getTimeWeightedAverage:function(){var energyConsumed=this.getEnergyConsumed();if(energyConsumed==='N/A')
-return'N/A';return this.getEnergyConsumed()/this.samples.bounds.duration*1000;},getEnergyConsumed:function(){if(this.samples.length<2)
-return'N/A';var bounds=this.samples.bounds;return this.samples[0].series.getEnergyConsumed(bounds.min,bounds.max);}});'use strict';Polymer('tr-ui-a-multi-power-sample-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){var samples=this.selection;var vSyncTimestamps=(this.selection===undefined)?[]:this.selection[0].series.device.vSyncTimestamps;this.$.summaryTable.samples=samples;this.$.samplesTable.samples=samples;this.$.chart.setData(this.selection,vSyncTimestamps);}});'use strict';(function(){var EventRegistry=tr.model.EventRegistry;Polymer('tr-ui-a-analysis-view',{ready:function(){this.tabView_=document.createElement('tr-ui-a-tab-view');this.tabView_.style.flex='1 1 auto';this.appendChild(this.tabView_);this.brushingStateController_=undefined;this.onSelectedTabChange_=this.onSelectedTabChange_.bind(this);this.onSelectionChanged_=this.onSelectionChanged_.bind(this);this.lastSeenSelection_=new tr.model.EventSet();},set tallMode(value){if(value)
-this.classList.add('tall-mode');else
-this.classList.remove('tall-mode');},get tallMode(){return this.classList.contains('tall-mode');},get tabView(){return this.tabView_;},get brushingStateController(){return this.brushingStateController_;},set brushingStateController(brushingStateController){if(this.brushingStateController){this.brushingStateController_.removeEventListener('change',this.onSelectionChanged_);}
-this.brushingStateController_=brushingStateController;if(this.brushingStateController){this.brushingStateController_.addEventListener('change',this.onSelectionChanged_);}
-this.onSelectionChanged_();},get selection(){return this.brushingStateController_.selection;},onSelectionChanged_:function(e){var selection=this.brushingStateController_.selection;var selectionHasSameValue=this.lastSeenSelection_.equals(selection);this.lastSeenSelection_=selection;if(selectionHasSameValue)
-return;var lastSelectedTabTagName;var lastSelectedTabTypeName;if(this.tabView_.selectedTab){lastSelectedTabTagName=this.tabView_.selectedTab.tagName;lastSelectedTabTypeName=this.tabView_.selectedTab._eventTypeName;}
-this.tallMode=false;var previouslySelectedTab=this.tabView_.selectedTab;this.tabView_.removeEventListener('selected-tab-change',this.onSelectedTabChange_);var previousSubViews={};for(var i=0;i<this.tabView_.children.length;i++){var previousSubView=this.tabView_.children[i];previousSubViews[previousSubView._eventTypeName]=previousSubView;}
-this.tabView_.saveTabStates();this.tabView_.textContent='';if(selection.length==0){this.tabView_.tabStripHeadingText='Nothing selected. Tap stuff.';}else if(selection.length==1){this.tabView_.tabStripHeadingText='1 item selected: ';}else{this.tabView_.tabStripHeadingText=selection.length+' items selected: ';}
-var eventsByBaseTypeName=selection.getEventsOrganizedByBaseType(true);var numBaseTypesToAnalyze=tr.b.dictionaryLength(eventsByBaseTypeName);for(var eventTypeName in eventsByBaseTypeName){var subSelection=eventsByBaseTypeName[eventTypeName];var subView=this.createSubViewForSelection_(eventTypeName,subSelection,previousSubViews[eventTypeName]);subView._eventTypeName=eventTypeName;this.tabView_.appendChild(subView);subView.selection=subSelection;}
-var tab;if(lastSelectedTabTagName)
-tab=this.tabView_.querySelector(lastSelectedTabTagName);if(!tab&&lastSelectedTabTypeName){var tab=tr.b.findFirstInArray(this.tabView_.children,function(tab){return tab._eventTypeName===lastSelectedTabTypeName;});}
-if(!tab)
-tab=this.tabView_.firstChild;this.tabView_.selectedTab=tab;this.onSelectedTabChange_();this.tabView_.addEventListener('selected-tab-change',this.onSelectedTabChange_);},createSubViewForSelection_:function(eventTypeName,subSelection,previousSubView){var eventTypeInfo=EventRegistry.getEventTypeInfoByTypeName(eventTypeName);var singleMode=subSelection.length==1;var tagName;if(subSelection.length===1)
-tagName=eventTypeInfo.metadata.singleViewElementName;else
-tagName=eventTypeInfo.metadata.multiViewElementName;if(!tr.ui.b.getPolymerElementNamed(tagName))
-throw new Error('Element not registered: '+tagName);var subView;if(previousSubView&&previousSubView.tagName===tagName.toUpperCase())
-subView=previousSubView;else
-subView=document.createElement(tagName);var camelLabel;if(subSelection.length===1)
-camelLabel=EventRegistry.getUserFriendlySingularName(eventTypeName);else
-camelLabel=EventRegistry.getUserFriendlyPluralName(eventTypeName);subView.tabLabel=camelLabel+' ('+subSelection.length+')';return subView;},onSelectedTabChange_:function(){var brushingStateController=this.brushingStateController_;if(this.tabView_.selectedTab){var selectedTab=this.tabView_.selectedTab;this.tallMode=selectedTab.requiresTallView;if(brushingStateController){var rlth=selectedTab.relatedEventsToHighlight;brushingStateController.changeAnalysisViewRelatedEvents(rlth);}}else{this.tallMode=false;if(brushingStateController)
-brushingStateController.changeAnalysisViewRelatedEvents(undefined);}}});})();'use strict';Polymer('tr-ui-b-drag-handle',{__proto__:HTMLDivElement.prototype,created:function(){this.lastMousePos_=0;this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.addEventListener('mousedown',this.onMouseDown_);this.target_=undefined;this.horizontal=true;this.observer_=new WebKitMutationObserver(this.didTargetMutate_.bind(this));this.targetSizesByModeKey_={};},get modeKey_(){return this.target_.className==''?'.':this.target_.className;},get target(){return this.target_;},set target(target){this.observer_.disconnect();this.target_=target;if(!this.target_)
-return;this.observer_.observe(this.target_,{attributes:true,attributeFilter:['class']});},get horizontal(){return this.horizontal_;},set horizontal(h){this.horizontal_=h;if(this.horizontal_)
-this.className='horizontal-drag-handle';else
-this.className='vertical-drag-handle';},get vertical(){return!this.horizontal_;},set vertical(v){this.horizontal=!v;},forceMutationObserverFlush_:function(){var records=this.observer_.takeRecords();if(records.length)
-this.didTargetMutate_(records);},didTargetMutate_:function(e){var modeSize=this.targetSizesByModeKey_[this.modeKey_];if(modeSize!==undefined){this.setTargetSize_(modeSize);return;}
-this.target_.style[this.targetStyleKey_]='';},get targetStyleKey_(){return this.horizontal_?'height':'width';},getTargetSize_:function(){var targetStyleKey=this.targetStyleKey_;if(!this.target_.style[targetStyleKey]){this.target_.style[targetStyleKey]=window.getComputedStyle(this.target_)[targetStyleKey];}
-var size=parseInt(this.target_.style[targetStyleKey]);this.targetSizesByModeKey_[this.modeKey_]=size;return size;},setTargetSize_:function(s){this.target_.style[this.targetStyleKey_]=s+'px';this.targetSizesByModeKey_[this.modeKey_]=s;},applyDelta_:function(delta){var curSize=this.getTargetSize_();var newSize;if(this.target_===this.nextElementSibling){newSize=curSize+delta;}else{newSize=curSize-delta;}
-this.setTargetSize_(newSize);},onMouseMove_:function(e){var curMousePos=this.horizontal_?e.clientY:e.clientX;var delta=this.lastMousePos_-curMousePos;this.applyDelta_(delta);this.lastMousePos_=curMousePos;e.preventDefault();return true;},onMouseDown_:function(e){if(!this.target_)
-return;this.forceMutationObserverFlush_();this.lastMousePos_=this.horizontal_?e.clientY:e.clientX;document.addEventListener('mousemove',this.onMouseMove_);document.addEventListener('mouseup',this.onMouseUp_);e.preventDefault();return true;},onMouseUp_:function(e){document.removeEventListener('mousemove',this.onMouseMove_);document.removeEventListener('mouseup',this.onMouseUp_);e.preventDefault();}});'use strict';Polymer('tr-ui-b-dropdown',{ready:function(){this.$.outer.tabIndex=0;},get iconElement(){return this.$.icon;},onOuterKeyDown_:function(e){if(e.keyCode===' '.charCodeAt(0)){this.toggle_();e.preventDefault();e.stopPropagation();}},onOuterClick_:function(e){var or=this.$.outer.getBoundingClientRect();var inside=true;inside&=e.clientX>=or.left;inside&=e.clientX<or.right;inside&=e.clientY>=or.top;inside&=e.clientY<or.bottom;if(!inside)
-return;e.preventDefault();this.toggle_();},toggle_:function(){if(!this.isOpen)
-this.show();else
-this.close();},show:function(){if(this.isOpen)
-return;this.$.outer.classList.add('open');var ddr=this.$.outer.getBoundingClientRect();var rW=Math.max(ddr.width,150);this.$.dialog.style.minWidth=rW+'px';this.$.dialog.showModal();var ddw=this.$.outer.getBoundingClientRect().width;var w=this.$.dialog.getBoundingClientRect().width;this.$.dialog.style.top=ddr.bottom-1+'px';this.$.dialog.style.left=ddr.left+'px';},onDialogClick_:function(e){if(!this.isOpen)
-return;if(e.srcElement!==this.$.dialog)
-return;e.preventDefault();this.close();},onDialogCancel_:function(e){e.preventDefault();this.close();},close:function(){if(!this.isOpen)
-return;this.$.dialog.close();this.$.outer.classList.remove('open');this.$.outer.focus();},get isOpen(){return this.$.dialog.hasAttribute('open');}});'use strict';tr.exportTo('tr.ui.b',function(){var FaviconsByHue={blue:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjj8xAGArIgqOPzE8nUY3dqJJOJeiSTiXnUY3do4/MTxhKyIKjkAxAAAAAAAAAAAAAAAAAAAAAABQJBwAAAAAAZJBMzSoSzqlsU8+6bRQP/21UT//tVE//7RQP/2wTz3ppko6pY9AMjQAAAABTyMbAAAAAAB7e3sAAP//AKFSRE+wTz3dtVE//7VRP/+1UT//tVE//7VRP/+zUD7/sE89/7BOPf+qTDvdl0M0TwAAAABWJx4A+fn5ANjd3TnIiX7ftVA9/7VRP/+1UT//tVE//7VRP/+xTz3/rE08/6xMO/+sTDv/rE08/6dKOt+SQTM5q0w7ALO0tA3v8fGu05uR/7NMOf+0Tzz/tE88/7RPPv+uTT3/p0o7/6ZJOv+mSTr/pkk6/6ZJOv+mSjr/n0Y4rnIwKg3h4eFK9/j48N2zrP/FeGr/xnps/8Z6bP/AaUv/tlw1/7RbNf+1WzX/tFs1/7RbNf+0WzX/tFs1/7NbNPCqWy1K7e3tjPn5+f/49vX/9vLy//by8v/28vH/8bZv/+6RH//ukyP/7pMj/+6SI//ukiP/7pMj/+2SIv/qjyL/34kfjPHx8bL5+fn/+fn5//n5+f/5+fr/+fn5//W7cP/zlB3/85Yh//OWIf/zliH/85Yh//GVIf/rkR//6ZAf/+KLHrLz8/O2+fn5//n5+f/5+fn/+fn5//n5+f/1unD/85Qd//OWIf/zliH/85Yh//CUIP/mjh//44we/+OMHv/diR628vLymfn5+f/5+fn/+fn5//n5+f/5+fn/9bx0//OXI//zmCb/85gm/++VIv/hjB//3Yoe/92KHv/dih7/2IYdmfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5//jo0//33bv/9929//bbtf/euDX/06oJ/9OrC//Tqwv/06oM98yfD1zr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/5+vv/+fv8//n7/f/3+PH/3Ms6/9O8AP/UvQD/1L0A/9K8AMbItAAY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/5+fr/9/bu/9zKOf/TuwD/1LwA/9S8APLQuABW3cQAAOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+fn6//f27v/cyTn/07sA/9S8APTRugB4w60ABcmyAAAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/49/H/5Ndu/NjEIdLSugBdybIABsy1AAAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tvX063Lt6MMhOQAAAM+/RAAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCwUEDDgZExxWJx4tYiwiN2IsIjdWJx4tOBkTHAsFBAwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///wAbDAkKZS0jMYs+MWydRjeipko6x6tMO9utTTzjrU0846tMO9umSjrHnUY3oos+MWxlLSMxGwwJCv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgZFAAPBwUHcjMoPJtFNpqsTTzhs1A+/LVRP/+2UT//tVE//7VRP/+1UT//tVE//7ZRP/+1UT//s1A+/KxNPOGbRTaacTInPA8HBQc4GRMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/yp4AUCQcGZVDNICtTjzktVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+0UT//s1A+/7JQPv+rTDvkkkEzgE8jGxn/xZoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA////AGswJSqiSTivs1A++7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tFA+/7FPPf+xTz3/sU89/7FPPf+vTj37nkc3r2guJCr///8AAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAP/DogB/VEwsqE09v7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7NQPv+vTj3/r049/69OPf+vTj3/r049/69OPf+uTjz/oUg4v20xJiz/nnsAAgEBAAAAAAAAAAAAAAAAAAAAAAD19fUAkp2fHdK2sbW5W0r/tVA+/7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+yUD7/rU08/6xNPP+tTTz/rU08/61NPP+tTTz/rU08/61NPP+sTTz/nkY3tWAqIR2pSzsAAAAAAAAAAAAAAAAAeXl5ADY2Ngnd39+O6tbT/blbSv+1UD7/tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//slA+/6xNPP+rTDv/q0w7/6tMO/+rTDv/q0w7/6tMO/+rTDv/q0w7/6tMO/+qTDv9lkM0jiUQDQlSJR0AAAAAAAAAAAD///8AxMTES/X29u3s2NX/uVtK/7VQPv+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7FPPv+qTDv/qEs6/6hLOv+oSzr/qEs6/6hLOv+oSzr/qEs6/6hLOv+oSzr/qEs6/6lLOv+lSTnthDsuS/+TcgAAAAAAm5ubAHBwcA/o6Oix+vv8/+zY1P+5W0r/tVA+/7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+xTz3/qEs6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+bRTaxSiEaD2cuJAD///8AycnJRfX19fD6+/z/69fU/7hYR/+0Tjv/tE48/7ROPP+0Tjz/tE48/7ROPP+0Tz3/r04+/6VJOv+jSDn/o0g5/6NIOf+jSDn/o0g5/6NIOf+jSDn/o0g5/6NIOf+jSDr/o0g5/6NIOf+jSDn/o0g6/6BHOfCCOS9F0FxKAAAAAALk5OSN+fn5//n6+v/y5+X/05uS/9CTiP/QlIn/0JSJ/9CUif/QlIn/0JSK/8yGb//AaDb/vWc0/71nNf+9ZzT/vWc0/71nNP+9ZjT/vWY0/71mNP+9ZjT/vGY0/7xmNP+8ZjT/vGY0/7xmNP+8ZjT/u2U0/7FiLY0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/5+vr/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//KWI//ylSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//CUIf/vkyD/5Y0fxY1XExbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LL/85cj//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/wlCD/7pIg/+6SIP/pjx/lunIZM9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fv9//fYsv/zlyP/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/75Mg/+uRH//qkB//6pAf/+iPH/TIfBtQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//OXI//zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh/+6TIP/ojx//548f/+ePH//njx//5o4f+c1/HGHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LL/85cj//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/tkiD/5Y0f/+SNH//ljR//5Y0f/+WNH//kjB/6zn8cZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fv9//fYsv/zlyP/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/65Eg/+KMHv/iix7/4ose/+KLHv/iix7/4ose/+CLHvfLfRta3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//OXI//zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh/+qRIP/gih7/34oe/9+KHv/fih7/34oe/9+KHv/fih7/3Yge78V6GkLS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LH/85Yg//OVHv/zlR7/85Ue//OVHv/zlR7/85Ue//OVIf/pjyH/3ogf/92HH//dhx//3Ycf/92HH//dhx//3Ycf/92HH//ahh7ZunMZI56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fr7//jr2f/2ypL/9smP//bJkP/2yZD/9smQ//bJkP/2yZD/5rNI/9OeFP/SnhX/0p4V/9KeFf/SnhX/0Z0V/9GdFf/RnRX/0Z0V/8yWFq6KVBcI////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn6//n6/P/5+vz/+fr8//n6/P/5+vz/+fr8//n6/P/h013/0rsA/9O8AP/TvAD/07wA/9O8AP/TvAD/07wA/9O8AP/SvAD+yLMAav/mAADr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+LSW//TuwD/1LwA/9S8AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9K6ANu/qgAkyLEAALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/4tJb/9O7AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9S8AP/UvAD/zrYAgQAAAACfjQAAAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/i0lv/07sA/9S8AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9K6AMzCrAAeybIAAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+LSW//TuwD/1LwA/9S8AP/UvAD/1LwA/9S8AP/TuwDsy7QATu7UAACXhQAAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/4tJb/9O7AP/UvAD/1LwA/9S8AP/UvAD/07wA9M63AG6ZiQADtqIAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/i0lv/07sA/9S8AP/UvAD/1LwA/9O8APDPuABzuKMABsGrAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+HSW//TugD/1LsA/9S8AP/TuwDazrcAWbejAATBqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/7uas/+bZdv/j1mvt2cYznMu0ACsUFAAAtaEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL4+frS9/j8kPT1/Trs8v8G8PP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',green:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbWJLAEpCMwptYks8eWxTdn1wVpd9cFaXeWxTdm1iSzxKQzMKbWJLAAAAAAAAAAAAAAAAAAAAAAA+OCsAAAAAAXBlTTSBdFmliHpe6Yp8X/2LfWD/i31g/4p8X/2HeV3pf3NYpW5jTDQAAAABPTcqAAAAAAB7e3sAlv//AIB1Xk+HeV3di31g/4t9YP+LfWD/i31g/4t9YP+Je1//h3pd/4d5Xf+DdVrddGhQTwAAAABDPC4A+fn5ANrb3DmupZPfinxf/4t9YP+LfWD/i31g/4t9YP+Iel7/hHdb/4R2W/+Edlv/hHdb/4BzWN9wZU05g3ZaALS0tA3w8PGuu7Sj/4h5W/+Je17/iXte/4t8X/+HeFz/gnNY/4FyWP+Bclj/gXJY/4FyWP+Bclj/fG1Url9NPA3h4eFK9/j48MvFuf+kmoP/ppuF/6abhf+JkHL/c4Rj/3OEY/9zhGP/coNj/3KDY/9yg2P/coNj/3CDYvBgf19K7e3tjPn5+f/39vb/9fTz//X08//09PP/itKw/0m+h/9Mv4n/TL+J/0y/if9Mv4n/TL+J/0y+iP9Lu4b/RrJ/jPHx8bL5+fn/+fn5//n5+f/5+fn/+fn5/4rXtP9Hwon/SsOL/0rDi/9Kw4v/SsOL/0nCiv9HvYb/RruF/0S1gbLz8/O2+fn5//n5+f/5+fn/+fn5//n5+f+K17P/R8KJ/0rDi/9Kw4v/SsOL/0nBif9GuYT/RbaC/0W2gv9Dsn+28vLymfn5+f/5+fn/+fn5//n5+f/5+fn/jdi1/0vDjP9OxI7/TsSO/0rAiv9FtoP/RLKA/0SygP9EsoD/Qq59mfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5/9rw5v/H6tn/yOra/8Lp2f9e1b7/O8yz/z3MtP89zLT/Pcuy9zzApVzr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/7+vr//Pr7//z6+//z+fn/ZuPY/zbczv853c7/Od3O/zjbzcY10sYY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/6+fn/8Pj3/2Xj1/823Mz/OdzN/znczfI42MlWO+XWAOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+vn5//D49/9j4tf/NdvM/znczfQ42ct4Ncu9BTbRwgAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/z+Pj/jung/FLf0tI42ctdNdHCBjfUxgAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tu329XLO7+whAFQmAGrUygAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCQgGDCsmHRxCOy4tS0M0N0tDNDdCOy4tKyYdHAkIBgwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///wAVEg4KTUU1MWtgSmx5bVOigHNYx4N2W9uFd1zjhXdc44N2W9uAc1jHeW1TomtgSmxNRjUxFRMOCv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACsnHgALCggHWE88PHdrUpqEd1vhiXxf/It9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/iXxf/IR3W+F3a1KaV048PAsKCAcrJx4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD///AAPjcqGXJnT4CFeFzki31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+KfWD/iXxf/4l7Xv+DdlrkcGVNgDw2Khn//+sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////AFJKOSp9cFavinxf+4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/inxf/4h6Xv+Iel3/iHpd/4h6Xv+GeV37eW1Ur1BINyr///8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAP//3gBsZ1osgnVbv4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4l8X/+HeV3/hnlc/4Z5XP+GeVz/hnlc/4Z5XP+GeFz/fG9Vv1RLOiz/9LoAAgIBAAAAAAAAAAAAAAAAAAAAAAD19fUAl5ibHcbCurWShGn/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+Je1//hXhc/4R3W/+Fd1v/hXdb/4V3W/+Fd1v/hXdb/4V3W/+Ed1v/eW1TtUlCMh2CdVkAAAAAAAAAAAAAAAAAeXl5ADY2Ngne3t+O4t/Z/ZKFaf+LfV//i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/iXte/4R3W/+Ddlr/g3Za/4N2Wv+Ddlr/g3Za/4N2Wv+Ddlr/g3Za/4N2Wv+CdVr9c2dPjhwZEwk/OSsAAAAAAAAAAAD///8AxMTES/X19u3k4dv/koRp/4t9X/+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4h6Xv+CdVr/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf9+clftZVtGS/3jrgAAAAAAm5ubAHBwcA/o6Oix+/v7/+Pg2/+ShGn/i31f/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+Iel7/gXRZ/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP93a1KxOTMnD1BHNwD///8AycnJRfX19fD7+/v/4+Da/5CCZ/+Jel3/iXtd/4l7Xf+Je13/iXtd/4l7Xf+Ke17/iHhd/4BxV/9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/31uVPBnWURFo45tAAAAAALk5OSN+fn5//r6+v/t7Oj/vLSk/7aunP+3rp3/t66d/7eunf+3rp3/uK+e/6Gmjv9vkG3/bI5r/2yOa/9sjmv/bI5r/2yOa/9sjmv/bI5r/2yOa/9sjmr/bI1q/2yNav9sjWr/bI1q/2uNav9rjWr/a41q/16GZI0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/5+fr/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/wOfV/0vCi/9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0nAif9Jv4j/RreCxStxUBbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/TMSM/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9JwYn/SL6I/0i+iP9GuoXlOJVqM9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pr7/7/n1f9Mw4z/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/ScCJ/0e8hv9HvIb/R7yG/0a6hfQ9oXJQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/v+fV/0zDjP9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0i/iP9GuoX/RrqE/0a6hP9GuoT/RrmD+T6ldWHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/TMOM/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Ivof/RbiD/0W3gv9FuIP/RbiD/0W4g/9Ft4L6PqZ2ZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pr7/7/n1f9Mw4z/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SL2H/0W2gv9FtYH/RbWB/0W1gf9FtYH/RbWB/0S0gPc+o3Ra3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/v+fV/0zDjP9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0e8hv9EtID/RLOA/0SzgP9Es4D/RLOA/0SzgP9Es4D/Q7F/7zyecULS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/SsOL/0jCiv9Iwor/SMKK/0jCiv9Iwor/SMKK/0rCiv9HuoT/RLF+/0Owff9EsH3/RLB9/0Swff9EsH3/RLB9/0Swff9CrnzZOJZrI56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn6/9/x6f+l38X/o9/D/6Tfw/+k38P/pN/D/6Tfw/+k38T/a9Kz/0DBof9BwKH/QcCh/0HAof9BwKD/QcCg/0G/oP9Bv6D/Qb+g/0C4mK4tbU4I////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn6//v6+//7+vv/+/r7//v6+//7+vv//Pr7//v6+/+B597/NdvN/znczf853M3/OdzN/znczf853M3/OdzN/znczf85283+NtHDakb/+gDr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/3/n3f823Mz/OdzN/znczf853M3/OdzN/znczf853M3/OdzN/zjay9s0x7kkNs/BALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/f+fd/zbbzP853M3/OdzN/znczf853M3/OdzN/znczf853M3/N9XHgQAAAAAspZoAAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9/593/NtvM/znczf853M3/OdzN/znczf853M3/OdzN/zjay8w0yrweNtDCAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/3/n3f8228z/OdzN/znczf853M3/OdzN/znczf8528zsN9PETkD45gAonJEAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/f+fd/zbbzP853M3/OdzN/znczf853M3/OdvM9DjWx24qoJUDMb2wAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9/593/NtvM/znczf853M3/OdzN/znbzPA418hzMr6xBjTIugAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/37m3f8z28z/N9zN/znczf8528zaONbIWTK/sgQ0yLsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/vfDr/5Tq4v+L6ODtYODUnDTTxSsAGBsAMrywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL6+PjS+vf3kPv09Tr/6u4G/+/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',red:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQxmbAC0RagpDGZs8ShysdkwdspdMHbKXShysdkMZmzwuEWoKQxmcAAAAAAAAAAAAAAAAAAAAAAAmDlgAAAAAAUQanzRPHrilUx/B6VQgxf1VIMb/VSDG/1Qgxf1TH8DpTh22pUMZnDQAAAABJQ5XAAAAAAB7ensA//8AAFUrr09SH8DdVSDG/1Ugxv9VIMb/VSDG/1Ugxv9UH8P/Ux/B/1IfwP9QHrrdRxqlTwAAAAAoD14A+fn5ANzf1zmMatPfVB7G/1Ugxv9VIMb/VSDG/1Ugxv9TH8L/UR68/1AevP9QHrz/UR68/04dt99EGaA5UB67ALS0sw3x8u+unYDd/1AZxP9THcX/Ux3F/1Qexf9THr//Tx23/08ctv9PHbb/Tx22/08dtv9PHbb/SxuurjkSfg3h4eFK+Pj38LWf5P97UtL/fVXS/31V0/9fOcz/SSfC/0knwP9JJ8D/SSfA/0knwP9JJ8D/SSfA/0gnv/A/KLNK7e3tjPn5+f/29fj/8vD3//Px9//y8Pf/fILz/zQ/8P83QvD/N0Lw/zdC8P83QvD/N0Lw/zdB7/82QOz/Mz3gjPHx8bL5+fn/+fn5//n6+f/5+vn/+fn5/36G9v8yQPT/NkP0/zZD9P82Q/T/NkP0/zVC8v80QOz/M0Dq/zI+47Lz8/O2+fn5//n5+f/5+fn/+fn5//n5+f99hvb/MkD0/zZD9P82Q/T/NkP0/zVC8f8zP+f/Mj7k/zI+5P8xPd628vLymfn5+f/5+fn/+fn5//n5+f/5+fn/gYn2/zdE9P87R/T/O0f0/zZF8P8yQOP/MT/e/zE/3v8xP97/Lz3ZmfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5/9fZ+P/Bxfj/wsb4/7vD+P87j/X/Dnzx/xF98f8RffH/EXzw9xZv5Vzr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/7+/n//Pz5//38+f/x+Pn/OrD+/wCY//8Amf//AJn//wCZ/cYAlPMY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/6+fn/7vX5/zmu/v8Al///AJj//wCY/vIAlfpWAJ//AOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+vn5/+71+f85rf7/AJb//wCY//QAlvx4AIzrBQCQ8gAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/x9vn/bsP8/CGk/tIAlvxdAJDyBgCT9QAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tuvy93LD4fUhAAC7AESo6wAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBgIMDBoKPRwoD14tLhFrNy4RazcoD14tGgo9HAYCDAwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP+3/wANBR0KLxJuMUEYmGxKHKyiTh22x1Aeu9tRHr3jUR6941Aeu9tOHbbHShysokEYmGwvEm4xDQUeCv+6/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoKPgAHAxAHNhR9PEkbqppRHr3hVCDE/FUgxv9VIMf/VSDH/1Ugxv9VIMb/VSDH/1Ugx/9VIMb/VCDE/FEevOFIG6maNRR8PAcDEAcaCj0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADVUP8AJg5YGUYao4BRH77kVSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMX/VB/E/1Qfw/9QHrvkRRmggCUOVhnQTv8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEA////ADITdSpMHbKvVCDE+1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VCDE/1Mfwv9TH8H/Ux/B/1Mfwv9SH7/7ShytrzEScSr///8AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAMto/wBVPoYsUSC3v1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1QfxP9SHsD/Uh6//1Iev/9SHr//Uh6//1Iev/9SHr//SxywvzMTdyymPf8AAQACAAAAAAAAAAAAAAAAAAAAAAD19fUAnaKQHbep1rVfLcn/VB/G/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9UH8P/UR6+/1Eevf9RHr3/UR69/1Eevf9RHr3/UR69/1Eevf9RHr3/ShuttS0RaB1PHrkAAAAAAAAAAAAAAAAAeXl5ADY2Ngnf4NyO18zu/V8tyf9UH8b/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VB/D/1EevP9QHrr/UB67/1Aeu/9QHrv/UB67/1Aeu/9QHrv/UB67/1Aeu/9QHrr9RhqkjhEGKAknDloAAAAAAAAAAAD///8AxMTES/b39O3Zzu//Xy3J/1Qfxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Mfwv9QHbr/Tx24/08duP9PHbj/Tx24/08duP9PHbj/Tx24/08duP9PHbj/Tx24/08duf9NHLTtPheRS5s5/wAAAAAAm5ubAHBwcA/o6Oix+/z6/9jO7/9fLcn/VB/G/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9TH8H/Tx24/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9JG6mxIw1RDzAScQD///8AycnJRfX19fD7/Pr/2M3v/1wqyP9SHMX/UhzF/1Icxf9SHMX/UhzF/1Icxf9THcX/Ux7A/04ctf9NHLL/Thyz/04cs/9NHLP/TRyz/00cs/9OHLP/Thyz/04cs/9OHLP/Thyz/04cs/9NHLP/Thyz/0wcsPA/Fo9FYyTkAAAAAALk5OSN+fn5//r6+f/n4vT/noDd/5Z22v+Wdtr/lnba/5Z22v+Wdtr/mHfb/35g1/9KMMr/SC/H/0gvx/9IL8f/SC/H/0gvx/9IL8b/SC/G/0gvxv9HL8b/Ry/G/0cvxv9HL8b/Ry/G/0cvxv9HL8X/Ry7F/z8tuI0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/6+vn/+fr5//n6+f/5+vn/+fr5//n6+f/9/fn/ub73/zhF8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zVC8f81QvD/Mz/mxR8njhbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+5vff/OEX0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P81QvH/NEHv/zRB7/8zQOrlKTO6M9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pz5/7m99/84RfT/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NULw/zRA7P80QOv/NEDr/zNA6fQsN8lQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8/Pn/ub33/zhF9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zVB7/8zQOn/Mz/o/zM/6P8zQOj/Mz/n+S04zmHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+5vff/OEX0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P80Qe7/Mz/m/zM/5f8zP+b/Mz/m/zM/5v8yP+X6LjnPZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pz5/7m99/84RfT/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NEHs/zI+4/8yPuP/Mj7j/zI+4/8yPuP/Mj7j/zI+4fctOMxa3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8/Pn/ub33/zhF9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zRA6/8xPeH/MT3g/zE94P8xPeD/MT3g/zE94P8xPeD/MT3e7ys2xkLS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+4vff/NkP0/zNB9P80QfT/NEH0/zRB9P80QfT/NEH0/zZC8/81P+n/Mjze/zI73f8yO93/Mjvd/zI73f8yO93/Mjvd/zI73f8xO9rZKTO7I56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+/r5/9ze+P+covf/mqD3/5qg9/+aoPf/mqD3/5qg9/+aoPf/UoLz/x1p5/8eaeb/Hmnm/x5p5v8eaeX/Hmnl/x5p5f8eaOX/Hmjl/yBh3a4jJokI////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vr5//z8+f/8/Pn//Pz5//z8+f/8/Pn//Pz5//z8+f9dvfz/AJf+/wCZ/v8Amf7/AJn+/wCZ/v8Amf7/AJn+/wCZ/v8AmP7+AJLxagC4/wDr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u8/f8Alv//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCW/NsAieckAI/xALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/W7z9/wCW//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJP3gQAAAAAAcr8AAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9bvP3/AJb//wCY//8AmP//AJj//wCY//8AmP//AJj//wCW/MwAi+oeAJDxAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u8/f8Alv//AJj//wCY//8AmP//AJj//wCY//8Al/7sAJL0TgCr/wAAa7QAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/W7z9/wCW//8AmP//AJj//wCY//8AmP//AJj+9ACU+G4AbrgDAIPaAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9bvP3/AJb//wCY//8AmP//AJj//wCY/vAAlflzAITcBgCK5wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u7/f8Alf//AJf//wCY//8Al/7aAJT4WQCE3AQAiucAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/rNv7/3bG/P9rwfztM6r7nACR9SsAER0AAIPZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL6+fjS/Pj2kP338jr/+eIG//fqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',yellow:'data:image/vndmicrosofticon;base64,AAABAAIAICAAAAEAIACoEAAAJgAAABAQAAABACAAaAQAAM4QAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAZKhQAOWAiAEV0KgBFdCoAOWAiABkqFAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8ZAAAChAHAEp8JwBvu10AgNeSAInluACN7c4Aj/DXAI/w1wCN7c4AieW4AIDXkgBvu10ASnwnAAoQBwA8ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbLgAAAAAFAFmWMwB/1YwAj/DXAJX7+QCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJX7+QCP79cAftWMAFmVMwAAAAUAGy4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7v8AAD1mFQB6zXYAkPLdAJf+/gCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP7/AJf+/wCV/P4AjvDdAHjKdgA8ZBUA6f8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP//AABWkCYAh+KoAJb8+QCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJf+/wCV+v8AlPr/AJT6/wCV+v8Akvf5AIPdqABTjCYA//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgICABb//wAka5wqAozquwCY/v8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCX/f8Ak/j/AJP3/wCT9/8Ak/f/AJP3/wCT9/8Akvb/AIbiuwBZlyoA//8AAAECAAAAAAAAAAAAAAAAAAAAAADz8/MAqJaJHZDD5rQLnP7/AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8Alvz/AJL2/wCR9P8AkfT/AJH0/wCR9P8AkfT/AJH0/wCR9P8AkfT/AITftABQhh0AjO0AAAAAAAAAAAAAAAAAfX19ADw8PAni3tuPuuD5/Quc//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJb8/wCQ8/8Aj/H/AI/x/wCP8f8Aj/H/AI/x/wCP8f8Aj/H/AI/x/wCP8f8AjvD9AH7UjwAiOQkASHkAAAAAAAgICAD///8AxcXFT/j19O+94vv/Cpz//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCV+/8Aj/H/AI3u/wCN7v8Aje7/AI3u/wCN7v8Aje7/AI3u/wCN7v8Aje7/AI3u/wCO7v8AiunvAHC8TwD//wAABQgAqKioAHp6ehHp6em3/fv5/7zh+v8KnP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8Alfr/AI7u/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8Ag9y3AERyEQBenQD///8AzMzMTfb29vP9+/n/vOH6/wqb//8Alv//AJb//wCW//8Alv//AJb//wCW//8Al///AJT5/wCL6/8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCH5fMAb75NAMP/AAAAAAXl5eWX+fn5//v6+f/T6vr/Wbv9/0+3/f9Qt/3/ULf9/1C3/f9Qt/3/Ubj9/zew+/8InO//B5nr/weZ6/8Hmev/B5nq/weZ6v8Hmer/B5nq/weZ6v8Hmer/B5jq/weY6v8HmOn/B5jp/weY6f8HmOn/Bpjp/weP15cBAAAFpKSkHfDw8M/5+fn/+fn5//n5+f/1+Pn/9Pf5//T3+f/09/n/9Pf5//T3+f/4+Pn/o+T6/wq//f8Hv/3/CL/9/wi//f8Iv/3/CL/9/wi//f8Iv/3/CL/8/wi+/P8Ivvz/CL78/wi+/P8Ivvz/CL78/we9+/8HvPr/BrbxzwR9pR3Ly8tA9fX17Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+l5vv/CcL//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hv/3/Br36/wa9+v8GuvbsBZnLQNra2mD39/f4+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//fr5/6Xm+/8Jwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B778/wa79/8Guvf/Brr3/wa59fgFo9hg4uLidPj4+P35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/9+vn/peb7/wnB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//we++/8GufX/Brj0/wa49P8GuPT/Brfz/QWm3XTk5OR6+Pj4/fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+l5vv/CcH//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hvfr/Brfy/wa28f8GtvH/Brbx/wa28f8GtfD9BafdeuXl5W/4+Pj8+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//fr5/6Xm+/8Jwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B7z5/wa17/8GtO7/BrTu/wa07v8GtO7/BrTu/waz7fwFpdtv4eHhVvj4+Pb5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/9+vn/peb7/wnB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//we7+P8Gsu3/BrHr/wax6/8Gsev/BrHr/wax6/8Gsev/BrDq9gWh1Vba2toz9vb25vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+k5fv/BsH//wPA//8DwP//A8D//wPA//8DwP//A8D//wXA//8Guvb/BrDq/wau6P8Gruj/Bq7o/wau6P8Gruj/Bq7o/wau6P8GreXmBZnLM7+/vxH09PTC+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+/r5/83v+v9x2vz/btn9/2/Z/f9v2f3/b9n9/2/Z/f9v2f3/RdL5/yXG7v8mxOz/JsTs/ybE6/8mxOv/JsTr/yXE6/8lw+v/JcPr/yK95cIQirAR////APDw8IH5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn5//r5+f/6+fn/+vn5//r5+f/6+fn/+vn5//r5+f+H8Pz/Oer+/zzq/v886v7/POr+/zzq/v886v7/POr+/zzq/v886v3/OuDzgWz//wD09PQA5+fnNPf39+n5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Xw/f846///O+v//zvr//876///O+v//zvr//876///O+v//zvp/ek32+00Ouf6AMrKygCzs7MF8/Pzmvn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/hfD9/zjr//876///O+v//zvr//876///O+v//zvr//876///OuX5miqptwUwv88AAAAAAPPz8wDp6eku9/f33fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f+F8P3/OOv//zvr//876///O+v//zvr//876///O+v//zvp/d033O8uOuX5AAAAAAAAAAAAvr6+AP///wDx8fFl+Pj49fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Xw/f846///O+v//zvr//876///O+v//zvr//876v71OeP2ZY7//wAus8IAAAAAAAAAAAAAAAAA4ODgANPT0wj09PSI+fn5+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/hfD9/zjr//876///O+v//zvr//876///O+v/+jrm+Ygyx9gINdPlAAAAAAAAAAAAAAAAAAAAAAAAAAAA6enpAOHh4Q309PSM+fn5+Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f+F8P3/OOv//zvr//876///O+v//zvr//g65/qMNtXnDTjd7wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6enpAOLi4gr09PRw+Pj45/n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Pw/f816///Oev//zvr//876v7nOub5cDbW5wo33O4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ODgANHR0QLx8fE89/f3sfn5+fX5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/t/T7/4Xx/f+A8P31Xez8sTnk9zwuxdUCNtTkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAREREAP///wDo6OgM9PT0Tff396T4+Pjf+fn5+Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+fj5+Pjf9vf3pPL09E3m6OgM7/3/APtbOwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACMjIwD19fUA4uLiBvHx8Sn19fVd9vb2jff396739/e99/f3vff396729vaN9fX1XfHx8Snl4uIG9PX1AFEnIgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/wD///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABgAAAAcAAAAPgAAAH4AAAB/AAAA/4AAAf/AAAP/8AAP//wAP/KAAAABAAAAAgAAAAAQAgAAAAAAAABAAAEgsAABILAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABorgAAS34IAHTDNQCC22wAh+OMAIfjjACC22wAdMQ1AEx/CABorwAAAAAAAAAAAAAAAAAAAAAAAEBrAAAAAAAAecswAIzsngCU+OUAl/37AJj+/wCY/v8Al/37AJP35QCL6Z4Ad8gwAAAAAAA+aQAAAAAAcXd6AP8AAAAOiNtNAJP32gCY//8AmP//AJj//wCY//8AmP//AJb8/wCU+f8Ak/j/AI7w2gB+1E0AAAAAAEd4APn7/ADc2NU5T7P33gCX//8AmP//AJj//wCY//8AmP//AJX6/wCR8/8AkPL/AJDz/wCQ8/8AjOzeAHrOOQCR9AC3t7cO8e/vsGnA/f8Alf//AJf//wCX//8Al///AJP4/wCN7v8AjOz/AIzs/wCM7P8AjOz/AIzt/wCG4rAAY6oO4uLiT/j39/GIzfz/Mav+/zSs/v80rP7/FaH5/wOV7f8DlOv/A5Tr/wOU6/8DlOv/A5Tr/wOU6/8Dk+jxBIvVT+3t7ZT5+fn/8fb5/+vz+f/r9Pn/6vP5/1nR+/8EvPz/B738/we9/P8Hvfz/B738/we9/P8HvPv/B7r4/wax7ZTy8vK7+fn5//n5+f/6+fn/+vn5//n5+f9e1f3/A8D//wfB//8Hwf//B8H//wfB//8HwP3/Brv3/wa59f8GtO678/Pzwfn5+f/5+fn/+fn5//n5+f/4+fn/XtX9/wPA//8Hwf//B8H//wfB//8Hv/z/Brfz/wa17/8Gte//BrHqwfPz86X5+fn/+fn5//n5+f/5+fn/+Pn5/2DW/f8Gwf//CsL//wrC//8Jv/v/CLXu/wix6f8Isen/CLHp/wet5KXy8vJo+fn5+vn5+f/5+fn/+fn5//n5+f/I7vr/quf7/6zn+/+m5/v/Tdz5/yzV9P8u1fT/LtX0/y7U8/ooyOpo7OzsH/f399D5+fn/+fn5//n5+f/5+fn//Pr5//36+f/++vn/9fn5/2rv/v857P//POz//zzs//886/3QOuLzH////wD09PRh+fn59vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//H4+f9o7v7/OOv//zvr//876//2Ouf6YUH//wDu7u4A6enpB/b29oT5+fn3+fn5//n5+f/5+fn/+fn5//n5+f/x+Pn/Zu7+/zfr//876//3Ouj8hDfc7wc44PMAAAAAAPHx8QDu7u4I9vb2aPj4+Nn5+fn9+fn5//n5+f/5+fn/8/n5/4zx/P1S7P7ZO+n8aDfh9Ag55PcAAAAAAAAAAAAAAAAA6+vrAN/f3wH19fUo9/f3fvj4+MH4+Pje+Pj43vj4+MHq9vh+w/H2KADM5wFk4e8AAAAAAAAAAADwDwAA4AcAAMADAACAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAQAAgAEAAMADAADgBwAA'};return{FaviconsByHue:FaviconsByHue};});'use strict';tr.exportTo('tr.ui.b',function(){function HotKey(dict){if(dict.eventType===undefined)
-throw new Error('eventType must be given');if(dict.keyCode===undefined&&dict.keyCodes===undefined)
-throw new Error('keyCode or keyCodes must be given');if(dict.keyCode!==undefined&&dict.keyCodes!==undefined)
-throw new Error('Only keyCode or keyCodes can be given');if(dict.callback===undefined)
-throw new Error('callback must be given');this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode)
-this.pushKeyCode_(dict.keyCode);else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);}
-this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;}
-HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call:function(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_:function(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey:HotKey};});'use strict';Polymer('tv-ui-b-hotkey-controller',{created:function(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached:function(){this.isAttached_=true;var host=this.findHost_();if(host.__hotkeyController)
-throw new Error('Multiple hotkey controllers attached to this host');host.__hotkeyController=this;this.curHost_=host;var parentElement;if(host.parentElement)
-parentElement=host.parentElement;else
-parentElement=host.parentNode.host;var parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;}
-host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached:function(){this.isAttached_=false;var host=this.curHost_;if(!host)
-return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;}
-host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_:function(controller){var i=this.childControllers_.indexOf(controller);if(i!==-1)
-throw new Error('Controller already registered');this.childControllers_.push(controller);},removeChildController_:function(controller){var i=this.childControllers_.indexOf(controller);if(i===-1)
-throw new Error('Controller not registered');this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_:function(eventType,useCapture){if(eventType==='keydown'){if(!useCapture)
-return this.bubblingKeyDownHotKeys_;else
-return this.capturingKeyDownHotKeys_;}else if(eventType==='keypress'){if(!useCapture)
-return this.bubblingKeyPressHotKeys_;else
-return this.capturingKeyPressHotKeys_;}else{throw new Error('Unsupported key event');}},addHotKey:function(hotKey){if(!(hotKey instanceof tr.ui.b.HotKey))
-throw new Error('hotKey must be a tr.ui.b.HotKey');var keyMap=this.getKeyMapForEventType_(hotKey.eventType,hotKey.useCapture);for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];if(keyMap[keyCode])
-throw new Error('Key is already bound for keyCode='+keyCode);}
-for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];keyMap[keyCode]=hotKey;}
-return hotKey;},removeHotKey:function(hotKey){if(!(hotKey instanceof tr.ui.b.HotKey))
-throw new Error('hotKey must be a tr.ui.b.HotKey');var keyMap=this.getKeyMapForEventType_(hotKey.eventType,hotKey.useCapture);for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];if(!keyMap[keyCode])
-throw new Error('Key is not bound for keyCode='+keyCode);keyMap[keyCode]=hotKey;}
-for(var i=0;i<hotKey.keyCodes.length;i++){var keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];}
-return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){var wasAttached=this.isAttached_;if(wasAttached)
-this.detached();this.globalMode_=!!globalMode;if(wasAttached)
-this.attached();},get topmostConroller_(){if(this.slavedToParentController_)
-return this.slavedToParentController_.topmostConroller_;return this;},childRequestsGeneralFocus:function(child){var topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement)
-document.activeElement.blur();}}else{if(document.activeElement)
-document.activeElement.blur();}},childRequestsBlur:function(child){child.blur();var topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_:function(){if(this.globalMode_){return document.body;}else{if(this.parentElement)
-return this.parentElement;var node=this;while(node.parentNode){node=node.parentNode;}
-return node.host;}},appendMatchingHotKeysTo_:function(matchedHotKeys,useCapture,e){var localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);var localHotKey=localKeyMap[e.keyCode];if(localHotKey)
-matchedHotKeys.push(localHotKey);for(var i=0;i<this.childControllers_.length;i++){var controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_:function(useCapture,e){if(useCapture==false&&e.path[0].tagName=='INPUT')
-return;var sortedControllers;var matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)
-return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');}
-var hotKey=matchedHotKeys[0];var prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){var curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller')
-return curElement;if(curElement.__hotkeyController)
-return curElement.__hotkeyController;if(curElement.parentElement){curElement=curElement.parentElement;continue;}
-curElement=findHost(curElement);}
-return undefined;}
-function findHost(initialNode){var node=initialNode;while(node.parentNode){node=node.parentNode;}
-return node.host;}
-return{getHotkeyControllerForElement:getHotkeyControllerForElement};});'use strict';Polymer('tr-ui-b-info-bar',{ready:function(){this.messageEl_=this.$.message;this.buttonsEl_=this.$.buttons;this.message='';this.visible=false;},get message(){return this.messageEl_.textContent;},set message(message){this.messageEl_.textContent=message;},get visible(){return!this.classList.contains('info-bar-hidden');},set visible(visible){if(visible)
-this.classList.remove('info-bar-hidden');else
-this.classList.add('info-bar-hidden');},removeAllButtons:function(){this.buttonsEl_.textContent='';},addButton:function(text,clickCallback){var button=document.createElement('button');button.textContent=text;button.addEventListener('click',clickCallback);this.buttonsEl_.appendChild(button);return button;}});'use strict';Polymer('tr-ui-b-info-bar-group',{ready:function(){this.messages_=[];},clearMessages:function(){this.messages_=[];this.updateContents_();},addMessage:function(text,opt_buttons){opt_buttons=opt_buttons||[];for(var i=0;i<opt_buttons.length;i++){if(opt_buttons[i].buttonText===undefined)
-throw new Error('buttonText must be provided');if(opt_buttons[i].onClick===undefined)
-throw new Error('onClick must be provided');}
-this.messages_.push({text:text,buttons:opt_buttons||[]});this.updateContents_();},updateContents_:function(){this.$.messages.textContent='';this.messages_.forEach(function(message){var bar=document.createElement('tr-ui-b-info-bar');bar.message=message.text;bar.visible=true;message.buttons.forEach(function(button){bar.addButton(button.buttonText,button.onClick);},this);this.$.messages.appendChild(bar);},this);}});'use strict';tr.exportTo('tr.ui',function(){var Task=tr.b.Task;function FindController(brushingStateController){this.brushingStateController_=brushingStateController;this.filterHits_=new tr.model.EventSet();this.currentHitIndex_=-1;this.activePromise_=Promise.resolve();this.activeTask_=undefined;};FindController.prototype={__proto__:Object.prototype,get model(){return this.brushingStateController_.model;},get brushingStateController(){return this.brushingStateController_;},enqueueOperation_:function(operation){var task;if(operation instanceof tr.b.Task)
-task=operation;else
-task=new tr.b.Task(operation,this);if(this.activeTask_){this.activeTask_=this.activeTask_.enqueue(task);}else{this.activeTask_=task;this.activePromise_=Task.RunWhenIdle(this.activeTask_);this.activePromise_.then(function(){this.activePromise_=undefined;this.activeTask_=undefined;}.bind(this));}},startFiltering:function(filterText){var sc=this.brushingStateController_;if(!sc)
-return;this.enqueueOperation_(function(){this.filterHits_=new tr.model.EventSet();this.currentHitIndex_=-1;}.bind(this));var stateFromString;try{stateFromString=sc.uiStateFromString(filterText);}catch(e){this.enqueueOperation_(function(){var overlay=new tr.ui.b.Overlay();overlay.textContent=e.message;overlay.title='UI State Navigation Error';overlay.visible=true;});return this.activePromise_;}
-if(stateFromString!==undefined){this.enqueueOperation_(sc.navToPosition.bind(this,stateFromString,true));}else{if(filterText.length===0){this.enqueueOperation_(sc.findTextCleared.bind(sc));}else{var filter=new tr.c.FullTextFilter(filterText);var filterHits=new tr.model.EventSet();this.enqueueOperation_(sc.addAllEventsMatchingFilterToSelectionAsTask(filter,filterHits));this.enqueueOperation_(function(){this.filterHits_=filterHits;sc.findTextChangedTo(filterHits);}.bind(this));}}
-return this.activePromise_;},get filterHits(){return this.filterHits_;},get currentHitIndex(){return this.currentHitIndex_;},find_:function(dir){var firstHit=this.currentHitIndex_===-1;if(firstHit&&dir<0)
-this.currentHitIndex_=0;var N=this.filterHits.length;this.currentHitIndex_=(this.currentHitIndex_+dir+N)%N;if(!this.brushingStateController_)
-return;this.brushingStateController_.findFocusChangedTo(this.filterHits.subEventSet(this.currentHitIndex_,1));},findNext:function(){this.find_(1);},findPrevious:function(){this.find_(-1);}};return{FindController:FindController};});'use strict';tr.exportTo('tr.model',function(){function Annotation(){this.guid_=tr.b.GUID.allocate();this.view_=undefined;};Annotation.fromDictIfPossible=function(args){if(args.typeName===undefined)
-throw new Error('Missing typeName argument');var typeInfo=Annotation.findTypeInfoMatching(function(typeInfo){return typeInfo.metadata.typeName===args.typeName;});if(typeInfo===undefined)
-return undefined;return typeInfo.constructor.fromDict(args);};Annotation.fromDict=function(){throw new Error('Not implemented');}
-Annotation.prototype={get guid(){return this.guid_;},onRemove:function(){},toDict:function(){throw new Error('Not implemented');},getOrCreateView:function(viewport){if(!this.view_)
-this.view_=this.createView_(viewport);return this.view_;},createView_:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(Annotation,options);Annotation.addEventListener('will-register',function(e){if(!e.typeInfo.constructor.hasOwnProperty('fromDict'))
-throw new Error('Must have fromDict method');if(!e.typeInfo.metadata.typeName)
-throw new Error('Registered Annotations must provide typeName');});return{Annotation:Annotation};});'use strict';tr.exportTo('tr.ui.annotations',function(){function AnnotationView(viewport,annotation){}
-AnnotationView.prototype={draw:function(ctx){throw new Error('Not implemented');}};return{AnnotationView:AnnotationView};});'use strict';tr.exportTo('tr.ui.annotations',function(){function XMarkerAnnotationView(viewport,annotation){this.viewport_=viewport;this.annotation_=annotation;}
-XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw:function(ctx){var dt=this.viewport_.currentDisplayTransform;var viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView:XMarkerAnnotationView};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';}
-XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);}
-XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict:function(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_:function(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation:XMarkerAnnotation};});'use strict';tr.exportTo('tr.ui.b',function(){var MOUSE_SELECTOR_MODE={};MOUSE_SELECTOR_MODE.SELECTION=0x1;MOUSE_SELECTOR_MODE.PANSCAN=0x2;MOUSE_SELECTOR_MODE.ZOOM=0x4;MOUSE_SELECTOR_MODE.TIMING=0x8;MOUSE_SELECTOR_MODE.ROTATE=0x10;MOUSE_SELECTOR_MODE.ALL_MODES=0x1F;var MOUSE_SELECTOR_MODE_INFOS={};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.PANSCAN]={mode:MOUSE_SELECTOR_MODE.PANSCAN,title:'pan',eventNames:{enter:'enterpan',begin:'beginpan',update:'updatepan',end:'endpan',exit:'exitpan'},activeBackgroundPosition:'-30px -10px',defaultBackgroundPosition:'0 -10px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.SELECTION]={mode:MOUSE_SELECTOR_MODE.SELECTION,title:'selection',eventNames:{enter:'enterselection',begin:'beginselection',update:'updateselection',end:'endselection',exit:'exitselection'},activeBackgroundPosition:'-30px -40px',defaultBackgroundPosition:'0 -40px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.ZOOM]={mode:MOUSE_SELECTOR_MODE.ZOOM,title:'zoom',eventNames:{enter:'enterzoom',begin:'beginzoom',update:'updatezoom',end:'endzoom',exit:'exitzoom'},activeBackgroundPosition:'-30px -70px',defaultBackgroundPosition:'0 -70px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.TIMING]={mode:MOUSE_SELECTOR_MODE.TIMING,title:'timing',eventNames:{enter:'entertiming',begin:'begintiming',update:'updatetiming',end:'endtiming',exit:'exittiming'},activeBackgroundPosition:'-30px -100px',defaultBackgroundPosition:'0 -100px'};MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.ROTATE]={mode:MOUSE_SELECTOR_MODE.ROTATE,title:'rotate',eventNames:{enter:'enterrotate',begin:'beginrotate',update:'updaterotate',end:'endrotate',exit:'exitrotate'},activeBackgroundPosition:'-30px -130px',defaultBackgroundPosition:'0 -130px'};return{MOUSE_SELECTOR_MODE_INFOS:MOUSE_SELECTOR_MODE_INFOS,MOUSE_SELECTOR_MODE:MOUSE_SELECTOR_MODE};});'use strict';Polymer('tr-ui-b-mouse-mode-icon',{publish:{modeName:{value:undefined,reflect:true}},created:function(){this.active_=false;this.acceleratorKey_=undefined;},ready:function(){this.updateContents_();},get mode(){return tr.ui.b.MOUSE_SELECTOR_MODE[this.modeName];},set mode(mode){var modeInfo=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS[mode];var modeName=tr.b.findFirstKeyInDictMatching(tr.ui.b.MOUSE_SELECTOR_MODE,function(modeName,candidateMode){return candidateMode===mode;});if(modeName===undefined)
-throw new Error('Unknown mode');this.modeName=modeName;},modeNameChanged:function(){this.updateContents_();},get active(){return this.active_;},set active(active){this.active_=!!active;if(this.active_)
-this.classList.add('active');else
-this.classList.remove('active');this.updateContents_();},get acceleratorKey(){return this.acceleratorKey_;},set acceleratorKey(acceleratorKey){this.acceleratorKey_=acceleratorKey;this.updateContents_();},updateContents_:function(){if(this.modeName===undefined)
-return;var mode=this.mode;if(mode===undefined)
-throw new Error('Invalid mode');var modeInfo=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS[mode];if(!modeInfo)
-throw new Error('Invalid mode');var title=modeInfo.title;if(this.acceleratorKey_)
-title=title+' ('+this.acceleratorKey_+')';this.title=title;var bp;if(this.active_)
-bp=modeInfo.activeBackgroundPosition;else
-bp=modeInfo.defaultBackgroundPosition;this.style.backgroundPosition=bp;}});'use strict';tr.exportTo('tr.ui.b',function(){var MOUSE_SELECTOR_MODE=tr.ui.b.MOUSE_SELECTOR_MODE;var MOUSE_SELECTOR_MODE_INFOS=tr.ui.b.MOUSE_SELECTOR_MODE_INFOS;var MIN_MOUSE_SELECTION_DISTANCE=4;var MODIFIER={SHIFT:0x1,SPACE:0x2,CMD_OR_CTRL:0x4};function isCmdOrCtrlPressed(event){if(tr.isMac)
-return event.metaKey;else
-return event.ctrlKey;}
-Polymer('tr-ui-b-mouse-mode-selector',{__proto__:HTMLDivElement.prototype,created:function(){this.supportedModeMask_=MOUSE_SELECTOR_MODE.ALL_MODES;this.initialRelativeMouseDownPos_={x:0,y:0};this.defaultMode_=MOUSE_SELECTOR_MODE.PANSCAN;this.settingsKey_=undefined;this.mousePos_={x:0,y:0};this.mouseDownPos_={x:0,y:0};this.onMouseDown_=this.onMouseDown_.bind(this);this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.onKeyDown_=this.onKeyDown_.bind(this);this.onKeyUp_=this.onKeyUp_.bind(this);this.mode_=undefined;this.modeToKeyCodeMap_={};this.modifierToModeMap_={};this.targetElement_=undefined;this.modeBeforeAlternativeModeActivated_=null;this.isInteracting_=false;this.isClick_=false;},ready:function(){this.buttonsEl_=this.shadowRoot.querySelector('.buttons');this.dragHandleEl_=this.shadowRoot.querySelector('.drag-handle');this.supportedModeMask=MOUSE_SELECTOR_MODE.ALL_MODES;this.dragHandleEl_.addEventListener('mousedown',this.onDragHandleMouseDown_.bind(this));this.buttonsEl_.addEventListener('mouseup',this.onButtonMouseUp_);this.buttonsEl_.addEventListener('mousedown',this.onButtonMouseDown_);this.buttonsEl_.addEventListener('click',this.onButtonPress_.bind(this));},attached:function(){document.addEventListener('keydown',this.onKeyDown_);document.addEventListener('keyup',this.onKeyUp_);},detached:function(){document.removeEventListener('keydown',this.onKeyDown_);document.removeEventListener('keyup',this.onKeyUp_);},get targetElement(){return this.targetElement_;},set targetElement(target){if(this.targetElement_)
-this.targetElement_.removeEventListener('mousedown',this.onMouseDown_);this.targetElement_=target;if(this.targetElement_)
-this.targetElement_.addEventListener('mousedown',this.onMouseDown_);},get defaultMode(){return this.defaultMode_;},set defaultMode(defaultMode){this.defaultMode_=defaultMode;},get settingsKey(){return this.settingsKey_;},set settingsKey(settingsKey){this.settingsKey_=settingsKey;if(!this.settingsKey_)
-return;var mode=tr.b.Settings.get(this.settingsKey_+'.mode',undefined);if(MOUSE_SELECTOR_MODE_INFOS[mode]===undefined)
-mode=undefined;if((mode&this.supportedModeMask_)===0)
-mode=undefined;if(!mode)
-mode=this.defaultMode_;this.mode=mode;var pos=tr.b.Settings.get(this.settingsKey_+'.pos',undefined);if(pos)
-this.pos=pos;},get supportedModeMask(){return this.supportedModeMask_;},set supportedModeMask(supportedModeMask){if(this.mode&&(supportedModeMask&this.mode)===0)
-throw new Error('supportedModeMask must include current mode.');function createButtonForMode(mode){return button;}
-this.supportedModeMask_=supportedModeMask;this.buttonsEl_.textContent='';for(var modeName in MOUSE_SELECTOR_MODE){if(modeName=='ALL_MODES')
-continue;var mode=MOUSE_SELECTOR_MODE[modeName];if((this.supportedModeMask_&mode)===0)
-continue;var button=document.createElement('tr-ui-b-mouse-mode-icon');button.mode=mode;button.classList.add('tool-button');this.buttonsEl_.appendChild(button);}},getButtonForMode_:function(mode){for(var i=0;i<this.buttonsEl_.children.length;i++){var buttonEl=this.buttonsEl_.children[i];if(buttonEl.mode===mode)
-return buttonEl;}
-return undefined;},get mode(){return this.currentMode_;},set mode(newMode){if(newMode!==undefined){if(typeof newMode!=='number')
-throw new Error('Mode must be a number');if((newMode&this.supportedModeMask_)===0)
-throw new Error('Cannot switch to this mode, it is not supported');if(MOUSE_SELECTOR_MODE_INFOS[newMode]===undefined)
-throw new Error('Unrecognized mode');}
-var modeInfo;if(this.currentMode_===newMode)
-return;if(this.currentMode_){var buttonEl=this.getButtonForMode_(this.currentMode_);if(buttonEl)
-buttonEl.active=false;if(this.isInteracting_){var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.end);this.dispatchEvent(mouseEvent);}
-modeInfo=MOUSE_SELECTOR_MODE_INFOS[this.currentMode_];tr.b.dispatchSimpleEvent(this,modeInfo.eventNames.exit,true);}
-this.currentMode_=newMode;if(this.currentMode_){var buttonEl=this.getButtonForMode_(this.currentMode_);if(buttonEl)
-buttonEl.active=true;this.mouseDownPos_.x=this.mousePos_.x;this.mouseDownPos_.y=this.mousePos_.y;modeInfo=MOUSE_SELECTOR_MODE_INFOS[this.currentMode_];if(!this.isInAlternativeMode_)
-tr.b.dispatchSimpleEvent(this,modeInfo.eventNames.enter,true);if(this.isInteracting_){var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.begin);this.dispatchEvent(mouseEvent);}}
-if(this.settingsKey_&&!this.isInAlternativeMode_)
-tr.b.Settings.set(this.settingsKey_+'.mode',this.mode);},setKeyCodeForMode:function(mode,keyCode){if((mode&this.supportedModeMask_)===0)
-throw new Error('Mode not supported');this.modeToKeyCodeMap_[mode]=keyCode;if(!this.buttonsEl_)
-return;var buttonEl=this.getButtonForMode_(mode);if(buttonEl)
-buttonEl.acceleratorKey=String.fromCharCode(keyCode);},setCurrentMousePosFromEvent_:function(e){this.mousePos_.x=e.clientX;this.mousePos_.y=e.clientY;},createEvent_:function(eventName,sourceEvent){var event=new tr.b.Event(eventName,true);event.clientX=this.mousePos_.x;event.clientY=this.mousePos_.y;event.deltaX=this.mousePos_.x-this.mouseDownPos_.x;event.deltaY=this.mousePos_.y-this.mouseDownPos_.y;event.mouseDownX=this.mouseDownPos_.x;event.mouseDownY=this.mouseDownPos_.y;event.didPreventDefault=false;event.preventDefault=function(){event.didPreventDefault=true;if(sourceEvent)
-sourceEvent.preventDefault();};event.stopPropagation=function(){sourceEvent.stopPropagation();};event.stopImmediatePropagation=function(){throw new Error('Not implemented');};return event;},onMouseDown_:function(e){if(e.button!==0)
-return;this.setCurrentMousePosFromEvent_(e);var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.begin,e);if(this.mode===MOUSE_SELECTOR_MODE.SELECTION)
-mouseEvent.appendSelection=isCmdOrCtrlPressed(e);this.dispatchEvent(mouseEvent);this.isInteracting_=true;this.isClick_=true;tr.ui.b.trackMouseMovesUntilMouseUp(this.onMouseMove_,this.onMouseUp_);},onMouseMove_:function(e){this.setCurrentMousePosFromEvent_(e);var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.update,e);this.dispatchEvent(mouseEvent);if(this.isInteracting_)
-this.checkIsClick_(e);},onMouseUp_:function(e){if(e.button!==0)
-return;var mouseEvent=this.createEvent_(MOUSE_SELECTOR_MODE_INFOS[this.mode].eventNames.end,e);mouseEvent.isClick=this.isClick_;this.dispatchEvent(mouseEvent);if(this.isClick_&&!mouseEvent.didPreventDefault)
-this.dispatchClickEvents_(e);this.isInteracting_=false;this.updateAlternativeModeState_(e);},onButtonMouseDown_:function(e){e.preventDefault();e.stopImmediatePropagation();},onButtonMouseUp_:function(e){e.preventDefault();e.stopImmediatePropagation();},onButtonPress_:function(e){this.modeBeforeAlternativeModeActivated_=undefined;this.mode=e.target.mode;e.preventDefault();},onKeyDown_:function(e){if(e.path[0].tagName=='INPUT')
-return;if(e.keyCode===' '.charCodeAt(0))
-this.spacePressed_=true;this.updateAlternativeModeState_(e);},onKeyUp_:function(e){if(e.path[0].tagName=='INPUT')
-return;if(e.keyCode===' '.charCodeAt(0))
-this.spacePressed_=false;var didHandleKey=false;tr.b.iterItems(this.modeToKeyCodeMap_,function(modeStr,keyCode){if(e.keyCode===keyCode){this.modeBeforeAlternativeModeActivated_=undefined;var mode=parseInt(modeStr);this.mode=mode;didHandleKey=true;}},this);if(didHandleKey){e.preventDefault();e.stopPropagation();return;}
-this.updateAlternativeModeState_(e);},updateAlternativeModeState_:function(e){var shiftPressed=e.shiftKey;var spacePressed=this.spacePressed_;var cmdOrCtrlPressed=isCmdOrCtrlPressed(e);var smm=this.supportedModeMask_;var newMode;var isNewModeAnAlternativeMode=false;if(shiftPressed&&(this.modifierToModeMap_[MODIFIER.SHIFT]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.SHIFT];isNewModeAnAlternativeMode=true;}else if(spacePressed&&(this.modifierToModeMap_[MODIFIER.SPACE]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.SPACE];isNewModeAnAlternativeMode=true;}else if(cmdOrCtrlPressed&&(this.modifierToModeMap_[MODIFIER.CMD_OR_CTRL]&smm)!==0){newMode=this.modifierToModeMap_[MODIFIER.CMD_OR_CTRL];isNewModeAnAlternativeMode=true;}else{if(this.isInAlternativeMode_){newMode=this.modeBeforeAlternativeModeActivated_;isNewModeAnAlternativeMode=false;}else{newMode=undefined;}}
-if(this.mode===newMode||newMode===undefined)
-return;if(isNewModeAnAlternativeMode)
-this.modeBeforeAlternativeModeActivated_=this.mode;this.mode=newMode;},get isInAlternativeMode_(){return!!this.modeBeforeAlternativeModeActivated_;},setModifierForAlternateMode:function(mode,modifier){this.modifierToModeMap_[modifier]=mode;},get pos(){return{x:parseInt(this.style.left),y:parseInt(this.style.top)};},set pos(pos){pos=this.constrainPositionToBounds_(pos);this.style.left=pos.x+'px';this.style.top=pos.y+'px';if(this.settingsKey_)
-tr.b.Settings.set(this.settingsKey_+'.pos',this.pos);},constrainPositionToBounds_:function(pos){var parent=this.offsetParent||document.body;var parentRect=tr.ui.b.windowRectForElement(parent);var top=0;var bottom=parentRect.height-this.offsetHeight;var left=0;var right=parentRect.width-this.offsetWidth;var res={};res.x=Math.max(pos.x,left);res.x=Math.min(res.x,right);res.y=Math.max(pos.y,top);res.y=Math.min(res.y,bottom);return res;},onDragHandleMouseDown_:function(e){e.preventDefault();e.stopImmediatePropagation();var mouseDownPos={x:e.clientX-this.offsetLeft,y:e.clientY-this.offsetTop};tr.ui.b.trackMouseMovesUntilMouseUp(function(e){var pos={};pos.x=e.clientX-mouseDownPos.x;pos.y=e.clientY-mouseDownPos.y;this.pos=pos;}.bind(this));},checkIsClick_:function(e){if(!this.isInteracting_||!this.isClick_)
-return;var deltaX=this.mousePos_.x-this.mouseDownPos_.x;var deltaY=this.mousePos_.y-this.mouseDownPos_.y;var minDist=MIN_MOUSE_SELECTION_DISTANCE;if(deltaX*deltaX+deltaY*deltaY>minDist*minDist)
-this.isClick_=false;},dispatchClickEvents_:function(e){if(!this.isClick_)
-return;var modeInfo=MOUSE_SELECTOR_MODE_INFOS[MOUSE_SELECTOR_MODE.SELECTION];var eventNames=modeInfo.eventNames;var mouseEvent=this.createEvent_(eventNames.begin);mouseEvent.appendSelection=isCmdOrCtrlPressed(e);this.dispatchEvent(mouseEvent);mouseEvent=this.createEvent_(eventNames.end);this.dispatchEvent(mouseEvent);}});return{MIN_MOUSE_SELECTION_DISTANCE:MIN_MOUSE_SELECTION_DISTANCE,MODIFIER:MODIFIER};});'use strict';tr.exportTo('tr.ui.b',function(){function TimingTool(viewport,targetElement){this.viewport_=viewport;this.onMouseMove_=this.onMouseMove_.bind(this);this.onDblClick_=this.onDblClick_.bind(this);this.targetElement_=targetElement;this.isMovingLeftEdge_=false;};TimingTool.prototype={onEnterTiming:function(e){this.targetElement_.addEventListener('mousemove',this.onMouseMove_);this.targetElement_.addEventListener('dblclick',this.onDblClick_);},onBeginTiming:function(e){if(!this.isTouchPointInsideTrackBounds_(e.clientX,e.clientY))
-return;var pt=this.getSnappedToEventPosition_(e);this.mouseDownAt_(pt.x,pt.y);this.updateSnapIndicators_(pt);},updateSnapIndicators_:function(pt){if(!pt.snapped)
-return;var ir=this.viewport_.interestRange;if(ir.min===pt.x)
-ir.leftSnapIndicator=new tr.ui.SnapIndicator(pt.y,pt.height);if(ir.max===pt.x)
-ir.rightSnapIndicator=new tr.ui.SnapIndicator(pt.y,pt.height);},onUpdateTiming:function(e){var pt=this.getSnappedToEventPosition_(e);this.mouseMoveAt_(pt.x,pt.y,true);this.updateSnapIndicators_(pt);},onEndTiming:function(e){this.mouseUp_();},onExitTiming:function(e){this.targetElement_.removeEventListener('mousemove',this.onMouseMove_);this.targetElement_.removeEventListener('dblclick',this.onDblClick_);},onMouseMove_:function(e){if(e.button)
-return;var worldX=this.getWorldXFromEvent_(e);this.mouseMoveAt_(worldX,e.clientY,false);},onDblClick_:function(e){console.error('not implemented');},isTouchPointInsideTrackBounds_:function(clientX,clientY){if(!this.viewport_||!this.viewport_.modelTrackContainer||!this.viewport_.modelTrackContainer.canvas)
-return false;var canvas=this.viewport_.modelTrackContainer.canvas;var canvasRect=canvas.getBoundingClientRect();if(clientX>=canvasRect.left&&clientX<=canvasRect.right&&clientY>=canvasRect.top&&clientY<=canvasRect.bottom)
-return true;return false;},mouseDownAt_:function(worldX,y){var ir=this.viewport_.interestRange;var dt=this.viewport_.currentDisplayTransform;var pixelRatio=window.devicePixelRatio||1;var nearnessThresholdWorld=dt.xViewVectorToWorld(6*pixelRatio);if(ir.isEmpty){ir.setMinAndMax(worldX,worldX);ir.rightSelected=true;this.isMovingLeftEdge_=false;return;}
-if(Math.abs(worldX-ir.min)<nearnessThresholdWorld){ir.leftSelected=true;ir.min=worldX;this.isMovingLeftEdge_=true;return;}
-if(Math.abs(worldX-ir.max)<nearnessThresholdWorld){ir.rightSelected=true;ir.max=worldX;this.isMovingLeftEdge_=false;return;}
-ir.setMinAndMax(worldX,worldX);ir.rightSelected=true;this.isMovingLeftEdge_=false;},mouseMoveAt_:function(worldX,y,mouseDown){var ir=this.viewport_.interestRange;if(mouseDown){this.updateMovingEdge_(worldX);return;}
-var ir=this.viewport_.interestRange;var dt=this.viewport_.currentDisplayTransform;var pixelRatio=window.devicePixelRatio||1;var nearnessThresholdWorld=dt.xViewVectorToWorld(6*pixelRatio);if(Math.abs(worldX-ir.min)<nearnessThresholdWorld){ir.leftSelected=true;ir.rightSelected=false;return;}
-if(Math.abs(worldX-ir.max)<nearnessThresholdWorld){ir.leftSelected=false;ir.rightSelected=true;return;}
-ir.leftSelected=false;ir.rightSelected=false;return;},updateMovingEdge_:function(newWorldX){var ir=this.viewport_.interestRange;var a=ir.min;var b=ir.max;if(this.isMovingLeftEdge_)
-a=newWorldX;else
-b=newWorldX;if(a<=b)
-ir.setMinAndMax(a,b);else
-ir.setMinAndMax(b,a);if(ir.min==newWorldX){this.isMovingLeftEdge_=true;ir.leftSelected=true;ir.rightSelected=false;}else{this.isMovingLeftEdge_=false;ir.leftSelected=false;ir.rightSelected=true;}},mouseUp_:function(){var dt=this.viewport_.currentDisplayTransform;var ir=this.viewport_.interestRange;ir.leftSelected=false;ir.rightSelected=false;var pixelRatio=window.devicePixelRatio||1;var minWidthValue=dt.xViewVectorToWorld(2*pixelRatio);if(ir.range<minWidthValue)
-ir.reset();},getWorldXFromEvent_:function(e){var pixelRatio=window.devicePixelRatio||1;var canvas=this.viewport_.modelTrackContainer.canvas;var worldOffset=canvas.getBoundingClientRect().left;var viewX=(e.clientX-worldOffset)*pixelRatio;return this.viewport_.currentDisplayTransform.xViewToWorld(viewX);},getSnappedToEventPosition_:function(e){var pixelRatio=window.devicePixelRatio||1;var EVENT_SNAP_RANGE=16*pixelRatio;var modelTrackContainer=this.viewport_.modelTrackContainer;var modelTrackContainerRect=modelTrackContainer.getBoundingClientRect();var viewport=this.viewport_;var dt=viewport.currentDisplayTransform;var worldMaxDist=dt.xViewVectorToWorld(EVENT_SNAP_RANGE);var worldX=this.getWorldXFromEvent_(e);var mouseY=e.clientY;var selection=new tr.model.EventSet();modelTrackContainer.addClosestEventToSelection(worldX,worldMaxDist,mouseY,mouseY,selection);if(!selection.length){modelTrackContainer.addClosestEventToSelection(worldX,worldMaxDist,modelTrackContainerRect.top,modelTrackContainerRect.bottom,selection);}
-var minDistX=worldMaxDist;var minDistY=Infinity;var pixWidth=dt.xViewVectorToWorld(1);var result={x:worldX,y:mouseY-modelTrackContainerRect.top,height:0,snapped:false};var eventBounds=new tr.b.Range();for(var i=0;i<selection.length;i++){var event=selection[i];var track=viewport.trackForEvent(event);var trackRect=track.getBoundingClientRect();eventBounds.reset();event.addBoundsToRange(eventBounds);var eventX;if(Math.abs(eventBounds.min-worldX)<Math.abs(eventBounds.max-worldX)){eventX=eventBounds.min;}else{eventX=eventBounds.max;}
-var distX=eventX-worldX;var eventY=trackRect.top;var eventHeight=trackRect.height;var distY=Math.abs(eventY+eventHeight/2-mouseY);if((distX<=minDistX||Math.abs(distX-minDistX)<pixWidth)&&distY<minDistY){minDistX=distX;minDistY=distY;result.x=eventX;result.y=eventY+
-modelTrackContainer.scrollTop-modelTrackContainerRect.top;result.height=eventHeight;result.snapped=true;}}
-return result;}};return{TimingTool:TimingTool};});'use strict';tr.exportTo('tr.ui',function(){var kDefaultPanAnimatoinDurationMs=100.0;function TimelineDisplayTransformPanAnimation(deltaX,deltaY,opt_durationMs){this.deltaX=deltaX;this.deltaY=deltaY;if(opt_durationMs===undefined)
-this.durationMs=kDefaultPanAnimatoinDurationMs;else
-this.durationMs=opt_durationMs;this.startPanX=undefined;this.startPanY=undefined;this.startTimeMs=undefined;}
-TimelineDisplayTransformPanAnimation.prototype={__proto__:tr.ui.b.Animation.prototype,get affectsPanY(){return this.deltaY!==0;},canTakeOverFor:function(existingAnimation){return existingAnimation instanceof TimelineDisplayTransformPanAnimation;},takeOverFor:function(existing,timestamp,target){var remainingDeltaXOnExisting=existing.goalPanX-target.panX;var remainingDeltaYOnExisting=existing.goalPanY-target.panY;var remainingTimeOnExisting=timestamp-(existing.startTimeMs+existing.durationMs);remainingTimeOnExisting=Math.max(remainingTimeOnExisting,0);this.deltaX+=remainingDeltaXOnExisting;this.deltaY+=remainingDeltaYOnExisting;this.durationMs+=remainingTimeOnExisting;},start:function(timestamp,target){this.startTimeMs=timestamp;this.startPanX=target.panX;this.startPanY=target.panY;},tick:function(timestamp,target){var percentDone=(timestamp-this.startTimeMs)/this.durationMs;percentDone=tr.b.clamp(percentDone,0,1);target.panX=tr.b.lerp(percentDone,this.startPanX,this.goalPanX);if(this.affectsPanY)
-target.panY=tr.b.lerp(percentDone,this.startPanY,this.goalPanY);return timestamp>=this.startTimeMs+this.durationMs;},get goalPanX(){return this.startPanX+this.deltaX;},get goalPanY(){return this.startPanY+this.deltaY;}};function TimelineDisplayTransformZoomToAnimation(goalFocalPointXWorld,goalFocalPointXView,goalFocalPointY,zoomInRatioX,opt_durationMs){this.goalFocalPointXWorld=goalFocalPointXWorld;this.goalFocalPointXView=goalFocalPointXView;this.goalFocalPointY=goalFocalPointY;this.zoomInRatioX=zoomInRatioX;if(opt_durationMs===undefined)
-this.durationMs=kDefaultPanAnimatoinDurationMs;else
-this.durationMs=opt_durationMs;this.startTimeMs=undefined;this.startScaleX=undefined;this.goalScaleX=undefined;this.startPanY=undefined;}
-TimelineDisplayTransformZoomToAnimation.prototype={__proto__:tr.ui.b.Animation.prototype,get affectsPanY(){return this.startPanY!=this.goalFocalPointY;},canTakeOverFor:function(existingAnimation){return false;},takeOverFor:function(existingAnimation,timestamp,target){this.goalScaleX=target.scaleX*this.zoomInRatioX;},start:function(timestamp,target){this.startTimeMs=timestamp;this.startScaleX=target.scaleX;this.goalScaleX=this.zoomInRatioX*target.scaleX;this.startPanY=target.panY;},tick:function(timestamp,target){var percentDone=(timestamp-this.startTimeMs)/this.durationMs;percentDone=tr.b.clamp(percentDone,0,1);target.scaleX=tr.b.lerp(percentDone,this.startScaleX,this.goalScaleX);if(this.affectsPanY){target.panY=tr.b.lerp(percentDone,this.startPanY,this.goalFocalPointY);}
-target.xPanWorldPosToViewPos(this.goalFocalPointXWorld,this.goalFocalPointXView);return timestamp>=this.startTimeMs+this.durationMs;}};return{TimelineDisplayTransformPanAnimation:TimelineDisplayTransformPanAnimation,TimelineDisplayTransformZoomToAnimation:TimelineDisplayTransformZoomToAnimation};});'use strict';tr.exportTo('tr.ui.b',function(){var ContainerThatDecoratesItsChildren=tr.ui.b.define('div');ContainerThatDecoratesItsChildren.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.observer_=new WebKitMutationObserver(this.didMutate_.bind(this));this.observer_.observe(this,{childList:true});Object.defineProperty(this,'textContent',{get:undefined,set:this.onSetTextContent_});},appendChild:function(x){HTMLUnknownElement.prototype.appendChild.call(this,x);this.didMutate_(this.observer_.takeRecords());},insertBefore:function(x,y){HTMLUnknownElement.prototype.insertBefore.call(this,x,y);this.didMutate_(this.observer_.takeRecords());},removeChild:function(x){HTMLUnknownElement.prototype.removeChild.call(this,x);this.didMutate_(this.observer_.takeRecords());},replaceChild:function(x,y){HTMLUnknownElement.prototype.replaceChild.call(this,x,y);this.didMutate_(this.observer_.takeRecords());},onSetTextContent_:function(textContent){if(textContent!='')
+return undefined;return newSelection;},rebuildEventToTrackMap:function(){this.eventToTrackMap_=new tr.ui.tracks.EventToTrackMap();this.modelTrackContainer_.addEventsToTrackMap(this.eventToTrackMap_);},rebuildContainerToTrackMap:function(){this.containerToTrackMap.clear();this.modelTrackContainer_.addContainersToTrackMap(this.containerToTrackMap);},trackForEvent:function(event){return this.eventToTrackMap_[event.guid];}};return{TimelineViewport:TimelineViewport};});'use strict';tr.exportTo('tr.ui.b',function(){var ContainerThatDecoratesItsChildren=tr.ui.b.define('div');ContainerThatDecoratesItsChildren.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.observer_=new WebKitMutationObserver(this.didMutate_.bind(this));this.observer_.observe(this,{childList:true});Object.defineProperty(this,'textContent',{get:undefined,set:this.onSetTextContent_});},appendChild:function(x){HTMLUnknownElement.prototype.appendChild.call(this,x);this.didMutate_(this.observer_.takeRecords());},insertBefore:function(x,y){HTMLUnknownElement.prototype.insertBefore.call(this,x,y);this.didMutate_(this.observer_.takeRecords());},removeChild:function(x){HTMLUnknownElement.prototype.removeChild.call(this,x);this.didMutate_(this.observer_.takeRecords());},replaceChild:function(x,y){HTMLUnknownElement.prototype.replaceChild.call(this,x,y);this.didMutate_(this.observer_.takeRecords());},onSetTextContent_:function(textContent){if(textContent!='')
 throw new Error('textContent can only be set to \'\'.');this.clear();},clear:function(){while(this.lastChild)
 HTMLUnknownElement.prototype.removeChild.call(this,this.lastChild);this.didMutate_(this.observer_.takeRecords());},didMutate_:function(records){this.beginDecorating_();for(var i=0;i<records.length;i++){var addedNodes=records[i].addedNodes;if(addedNodes){for(var j=0;j<addedNodes.length;j++)
 this.decorateChild_(addedNodes[j]);}
@@ -4994,11 +4582,11 @@
 var axisGuidToAxisData={};var topPadding=0;var bottomPadding=0;this.series_.forEach(function(series){var axis=series.axis;var axisGuid=axis.guid;if(!(axisGuid in axisGuidToAxisData)){axisGuidToAxisData[axisGuid]={axis:axis,series:[]};}
 axisGuidToAxisData[axisGuid].series.push(series);topPadding=Math.max(topPadding,series.topPadding);bottomPadding=Math.max(bottomPadding,series.bottomPadding);},this);this.axisGuidToAxisData_=axisGuidToAxisData;this.topPadding_=topPadding;this.bottomPadding_=bottomPadding;},draw:function(type,viewLWorld,viewRWorld){switch(type){case tr.ui.tracks.DrawType.GENERAL_EVENT:this.drawChart_(viewLWorld,viewRWorld);break;}},drawChart_:function(viewLWorld,viewRWorld){if(!this.series_)
 return;var ctx=this.context();var displayTransform=this.viewport.currentDisplayTransform;var pixelRatio=window.devicePixelRatio||1;var bounds=this.getBoundingClientRect();var highDetails=this.viewport.highDetails;var width=bounds.width*pixelRatio;var height=bounds.height*pixelRatio;var topPadding=this.topPadding_*pixelRatio;var bottomPadding=this.bottomPadding_*pixelRatio;ctx.save();ctx.beginPath();ctx.rect(0,0,width,height);ctx.clip();this.series_.forEach(function(series){var chartTransform=new tr.ui.tracks.ChartTransform(displayTransform,series.axis,width,height,topPadding,bottomPadding,pixelRatio);series.draw(ctx,chartTransform,highDetails);},this);ctx.restore();},addEventsToTrackMap:function(eventToTrackMap){this.series_.forEach(function(series){series.points.forEach(function(point){point.addToTrackMap(eventToTrackMap,this);},this);},this);},addIntersectingEventsInRangeToSelectionInWorldSpace:function(loWX,hiWX,viewPixWidthWorld,selection){this.series_.forEach(function(series){series.addIntersectingEventsInRangeToSelectionInWorldSpace(loWX,hiWX,viewPixWidthWorld,selection);},this);},addEventNearToProvidedEventToSelection:function(event,offset,selection){var foundItem=false;this.series_.forEach(function(series){foundItem=foundItem||series.addEventNearToProvidedEventToSelection(event,offset,selection);},this);return foundItem;},addAllEventsMatchingFilterToSelection:function(filter,selection){},addClosestEventToSelection:function(worldX,worldMaxDist,loY,hiY,selection){this.series_.forEach(function(series){series.addClosestEventToSelection(worldX,worldMaxDist,loY,hiY,selection);},this);},autoSetAllAxes:function(opt_config){tr.b.iterItems(this.axisGuidToAxisData_,function(axisGuid,axisData){var axis=axisData.axis;var series=axisData.series;axis.autoSetFromSeries(series,opt_config);},this);},autoSetAxis:function(axis,opt_config){var series=this.axisGuidToAxisData_[axis.guid].series;axis.autoSetFromSeries(series,opt_config);}};return{ChartTrack:ChartTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ColorScheme=tr.b.ColorScheme;var ChartTrack=tr.ui.tracks.ChartTrack;var PowerSeriesTrack=tr.ui.b.define('power-series-track',ChartTrack);PowerSeriesTrack.prototype={__proto__:ChartTrack.prototype,decorate:function(viewport){ChartTrack.prototype.decorate.call(this,viewport);this.classList.add('power-series-track');this.heading='Power';this.powerSeries_=undefined;},set powerSeries(powerSeries){this.powerSeries_=powerSeries;this.series=this.buildChartSeries_();this.autoSetAllAxes({expandMax:true});},get hasVisibleContent(){return(this.powerSeries_&&this.powerSeries_.samples.length>0);},addContainersToTrackMap:function(containerToTrackMap){containerToTrackMap.addContainer(this.powerSeries_,this);},buildChartSeries_:function(){if(!this.hasVisibleContent)
-return[];var axis=new tr.ui.tracks.ChartAxis(0,undefined);var pts=this.powerSeries_.samples.map(function(smpl){return new tr.ui.tracks.ChartPoint(smpl,smpl.start,smpl.power);});var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:ColorScheme.getColorIdForGeneralPurposeString(this.heading)};return[new tr.ui.tracks.ChartSeries(pts,axis,renderingConfig)];}};return{PowerSeriesTrack:PowerSeriesTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var SpacingTrack=tr.ui.b.define('spacing-track',tr.ui.tracks.Track);SpacingTrack.prototype={__proto__:tr.ui.tracks.Track.prototype,decorate:function(viewport){tr.ui.tracks.Track.prototype.decorate.call(this,viewport);this.classList.add('spacing-track');this.heading_=document.createElement('tr-ui-heading');this.appendChild(this.heading_);},draw:function(type,viewLWorld,viewRWorld){},addAllEventsMatchingFilterToSelection:function(filter,selection){}};return{SpacingTrack:SpacingTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ContainerTrack=tr.ui.tracks.ContainerTrack;var DeviceTrack=tr.ui.b.define('device-track',ContainerTrack);DeviceTrack.prototype={__proto__:ContainerTrack.prototype,decorate:function(viewport){ContainerTrack.prototype.decorate.call(this,viewport);this.classList.add('device-track');this.device_=undefined;this.powerSeriesTrack_=undefined;},get device(){return this.device_;},set device(device){this.device_=device;this.updateContents_();},get powerSeriesTrack(){return this.powerSeriesTrack_;},get hasVisibleContent(){return(this.powerSeriesTrack_&&this.powerSeriesTrack_.hasVisibleContent);},addContainersToTrackMap:function(containerToTrackMap){tr.ui.tracks.ContainerTrack.prototype.addContainersToTrackMap.call(this,containerToTrackMap);containerToTrackMap.addContainer(this.device,this);},addEventsToTrackMap:function(eventToTrackMap){this.tracks_.forEach(function(track){track.addEventsToTrackMap(eventToTrackMap);});},appendPowerSeriesTrack_:function(){this.powerSeriesTrack_=new tr.ui.tracks.PowerSeriesTrack(this.viewport);this.powerSeriesTrack_.powerSeries=this.device.powerSeries;if(this.powerSeriesTrack_.hasVisibleContent){this.appendChild(this.powerSeriesTrack_);this.appendChild(new tr.ui.tracks.SpacingTrack(this.viewport));}},updateContents_:function(){this.clearTracks_();this.appendPowerSeriesTrack_();}};return{DeviceTrack:DeviceTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ColorScheme=tr.b.ColorScheme;var DISPLAYED_SIZE_ATTRIBUTE_NAME=tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_ATTRIBUTE_NAME;function addDictionary(dstDict,srcDict){tr.b.iterItems(srcDict,function(key,value){var existingValue=dstDict[key];if(existingValue===undefined)
+return[];var axis=new tr.ui.tracks.ChartAxis(0,undefined);var pts=this.powerSeries_.samples.map(function(smpl){return new tr.ui.tracks.ChartPoint(smpl,smpl.start,smpl.power);});var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:ColorScheme.getColorIdForGeneralPurposeString(this.heading)};return[new tr.ui.tracks.ChartSeries(pts,axis,renderingConfig)];}};return{PowerSeriesTrack:PowerSeriesTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var SpacingTrack=tr.ui.b.define('spacing-track',tr.ui.tracks.Track);SpacingTrack.prototype={__proto__:tr.ui.tracks.Track.prototype,decorate:function(viewport){tr.ui.tracks.Track.prototype.decorate.call(this,viewport);this.classList.add('spacing-track');this.heading_=document.createElement('tr-ui-heading');this.appendChild(this.heading_);},addAllEventsMatchingFilterToSelection:function(filter,selection){}};return{SpacingTrack:SpacingTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ContainerTrack=tr.ui.tracks.ContainerTrack;var DeviceTrack=tr.ui.b.define('device-track',ContainerTrack);DeviceTrack.prototype={__proto__:ContainerTrack.prototype,decorate:function(viewport){ContainerTrack.prototype.decorate.call(this,viewport);this.classList.add('device-track');this.device_=undefined;this.powerSeriesTrack_=undefined;},get device(){return this.device_;},set device(device){this.device_=device;this.updateContents_();},get powerSeriesTrack(){return this.powerSeriesTrack_;},get hasVisibleContent(){return(this.powerSeriesTrack_&&this.powerSeriesTrack_.hasVisibleContent);},addContainersToTrackMap:function(containerToTrackMap){tr.ui.tracks.ContainerTrack.prototype.addContainersToTrackMap.call(this,containerToTrackMap);containerToTrackMap.addContainer(this.device,this);},addEventsToTrackMap:function(eventToTrackMap){this.tracks_.forEach(function(track){track.addEventsToTrackMap(eventToTrackMap);});},appendPowerSeriesTrack_:function(){this.powerSeriesTrack_=new tr.ui.tracks.PowerSeriesTrack(this.viewport);this.powerSeriesTrack_.powerSeries=this.device.powerSeries;if(this.powerSeriesTrack_.hasVisibleContent){this.appendChild(this.powerSeriesTrack_);this.appendChild(new tr.ui.tracks.SpacingTrack(this.viewport));}},updateContents_:function(){this.clearTracks_();this.appendPowerSeriesTrack_();}};return{DeviceTrack:DeviceTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ColorScheme=tr.b.ColorScheme;var DISPLAYED_SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME;function addDictionary(dstDict,srcDict){tr.b.iterItems(srcDict,function(key,value){var existingValue=dstDict[key];if(existingValue===undefined)
 existingValue=0;dstDict[key]=existingValue+value;});}
 function getProcessMemoryDumpAllocatorSizes(processMemoryDump){var allocatorDumps=processMemoryDump.memoryAllocatorDumps;if(allocatorDumps===undefined)
 return{};var allocatorSizes={};allocatorDumps.forEach(function(allocatorDump){if(allocatorDump.fullName==='tracing')
-return;var allocatorSize=allocatorDump.attributes[DISPLAYED_SIZE_ATTRIBUTE_NAME];if(allocatorSize===undefined)
+return;var allocatorSize=allocatorDump.numerics[DISPLAYED_SIZE_NUMERIC_NAME];if(allocatorSize===undefined)
 return;var allocatorSizeValue=allocatorSize.value;if(allocatorSizeValue===undefined)
 return;allocatorSizes[allocatorDump.fullName]=allocatorSizeValue;});return allocatorSizes;};function getGlobalMemoryDumpAllocatorSizes(globalMemoryDump){var globalAllocatorSizes={};tr.b.iterItems(globalMemoryDump.processMemoryDumps,function(pid,processMemoryDump){addDictionary(globalAllocatorSizes,getProcessMemoryDumpAllocatorSizes(processMemoryDump));});return globalAllocatorSizes;}
 function buildAllocatedMemoryChartSeries(memoryDumps,memoryDumpToAllocatorSizesFn){var allocatorNameToPoints={};var dumpsData=memoryDumps.map(function(memoryDump){var allocatorSizes=memoryDumpToAllocatorSizesFn(memoryDump);tr.b.iterItems(allocatorSizes,function(allocatorName){allocatorNameToPoints[allocatorName]=[];});return{dump:memoryDump,sizes:allocatorSizes};});if(Object.keys(allocatorNameToPoints).length===0)
@@ -5008,19 +4596,16 @@
 function buildGlobalUsedMemoryChartSeries(globalMemoryDumps){var containsVmRegions=globalMemoryDumps.some(function(globalDump){for(var pid in globalDump.processMemoryDumps)
 if(globalDump.processMemoryDumps[pid].mostRecentVmRegions)
 return true;return false;});if(!containsVmRegions)
-return undefined;var pidToProcess={};globalMemoryDumps.forEach(function(globalDump){tr.b.iterItems(globalDump.processMemoryDumps,function(pid,processDump){pidToProcess[pid]=processDump.process;});});var pidToPoints={};tr.b.iterItems(pidToProcess,function(pid,process){pidToPoints[pid]=[];});globalMemoryDumps.forEach(function(globalDump){var pssBase=0;tr.b.iterItems(pidToPoints,function(pid,points){var processMemoryDump=globalDump.processMemoryDumps[pid];var pss;if(processMemoryDump===undefined){pss=0;}else{pss=processMemoryDump.getMostRecentTotalVmRegionStat('proportionalResident');if(pss===undefined){pss=0;}}
-var cumulativePss=pssBase+pss;points.push(new tr.ui.tracks.ChartPoint(globalDump,globalDump.start,cumulativePss,pssBase));pssBase=cumulativePss;});});var axis=new tr.ui.tracks.ChartAxis(0);var series=[];tr.b.iterItems(pidToPoints,function(pid,points){var process=pidToProcess[pid];var colorId=ColorScheme.getColorIdForGeneralPurposeString(process.userFriendlyName);var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:colorId,backgroundOpacity:0.8};series.push(new tr.ui.tracks.ChartSeries(points,axis,renderingConfig));});series.reverse();return series;}
+return undefined;var pidToProcess={};globalMemoryDumps.forEach(function(globalDump){tr.b.iterItems(globalDump.processMemoryDumps,function(pid,processDump){pidToProcess[pid]=processDump.process;});});var pidToPoints={};tr.b.iterItems(pidToProcess,function(pid,process){pidToPoints[pid]=[];});globalMemoryDumps.forEach(function(globalDump){var pssBase=0;tr.b.iterItems(pidToPoints,function(pid,points){var processMemoryDump=globalDump.processMemoryDumps[pid];var cumulativePss=pssBase;if(processMemoryDump!==undefined){var vmRegions=processMemoryDump.mostRecentVmRegions;if(vmRegions!==undefined)
+cumulativePss+=vmRegions.byteStats.proportionalResident||0;}
+points.push(new tr.ui.tracks.ChartPoint(globalDump,globalDump.start,cumulativePss,pssBase));pssBase=cumulativePss;});});var axis=new tr.ui.tracks.ChartAxis(0);var series=[];tr.b.iterItems(pidToPoints,function(pid,points){var process=pidToProcess[pid];var colorId=ColorScheme.getColorIdForGeneralPurposeString(process.userFriendlyName);var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:colorId,backgroundOpacity:0.8};series.push(new tr.ui.tracks.ChartSeries(points,axis,renderingConfig));});series.reverse();return series;}
 function buildProcessAllocatedMemoryChartSeries(processMemoryDumps){return buildAllocatedMemoryChartSeries(processMemoryDumps,getProcessMemoryDumpAllocatorSizes);}
 function buildGlobalAllocatedMemoryChartSeries(globalMemoryDumps){return buildAllocatedMemoryChartSeries(globalMemoryDumps,getGlobalMemoryDumpAllocatorSizes);}
 return{buildMemoryLetterDots:buildMemoryLetterDots,buildGlobalUsedMemoryChartSeries:buildGlobalUsedMemoryChartSeries,buildProcessAllocatedMemoryChartSeries:buildProcessAllocatedMemoryChartSeries,buildGlobalAllocatedMemoryChartSeries:buildGlobalAllocatedMemoryChartSeries};});'use strict';tr.exportTo('tr.ui.tracks',function(){var USED_MEMORY_TRACK_HEIGHT=50;var ALLOCATED_MEMORY_TRACK_HEIGHT=50;var GlobalMemoryDumpTrack=tr.ui.b.define('global-memory-dump-track',tr.ui.tracks.ContainerTrack);GlobalMemoryDumpTrack.prototype={__proto__:tr.ui.tracks.ContainerTrack.prototype,decorate:function(viewport){tr.ui.tracks.ContainerTrack.prototype.decorate.call(this,viewport);this.memoryDumps_=undefined;},get memoryDumps(){return this.memoryDumps_;},set memoryDumps(memoryDumps){this.memoryDumps_=memoryDumps;this.updateContents_();},updateContents_:function(){this.clearTracks_();if(!this.memoryDumps_||!this.memoryDumps_.length)
 return;this.appendDumpDotsTrack_();this.appendUsedMemoryTrack_();this.appendAllocatedMemoryTrack_();},appendDumpDotsTrack_:function(){var items=tr.ui.tracks.buildMemoryLetterDots(this.memoryDumps_);if(!items)
 return;var track=new tr.ui.tracks.LetterDotTrack(this.viewport);track.heading='Memory Dumps';track.items=items;this.appendChild(track);},appendUsedMemoryTrack_:function(){var series=tr.ui.tracks.buildGlobalUsedMemoryChartSeries(this.memoryDumps_);if(!series)
 return;var track=new tr.ui.tracks.ChartTrack(this.viewport);track.heading='Memory per process';track.height=USED_MEMORY_TRACK_HEIGHT+'px';track.series=series;track.autoSetAllAxes({expandMax:true});this.appendChild(track);},appendAllocatedMemoryTrack_:function(){var series=tr.ui.tracks.buildGlobalAllocatedMemoryChartSeries(this.memoryDumps_);if(!series)
-return;var track=new tr.ui.tracks.ChartTrack(this.viewport);track.heading='Memory per component';track.height=ALLOCATED_MEMORY_TRACK_HEIGHT+'px';track.series=series;track.autoSetAllAxes({expandMax:true});this.appendChild(track);}};return{GlobalMemoryDumpTrack:GlobalMemoryDumpTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){function Highlighter(viewport){if(viewport===undefined){throw new Error('viewport must be provided');}
-this.viewport_=viewport;};Highlighter.prototype={__proto__:Object.prototype,processModel:function(model){throw new Error('processModel implementation missing');},drawHighlight:function(ctx,dt,viewLWorld,viewRWorld,viewHeight){throw new Error('drawHighlight implementation missing');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Highlighter;tr.b.decorateExtensionRegistry(Highlighter,options);return{Highlighter:Highlighter};});'use strict';tr.exportTo('tr.ui.tracks',function(){var CounterTrack=tr.ui.b.define('counter-track',tr.ui.tracks.ChartTrack);CounterTrack.prototype={__proto__:tr.ui.tracks.ChartTrack.prototype,decorate:function(viewport){tr.ui.tracks.ChartTrack.prototype.decorate.call(this,viewport);this.classList.add('counter-track');},get counter(){return this.chart;},set counter(counter){this.heading=counter.name+': ';this.series=CounterTrack.buildChartSeriesFromCounter(counter);this.autoSetAllAxes({expandMax:true});},getModelEventFromItem:function(chartValue){return chartValue;}};CounterTrack.buildChartSeriesFromCounter=function(counter){var numSeries=counter.series.length;var totals=counter.totals;var chartAxis=new tr.ui.tracks.ChartAxis(0,undefined);var chartSeries=counter.series.map(function(series,seriesIndex){var chartPoints=series.samples.map(function(sample,sampleIndex){var total=totals[sampleIndex*numSeries+seriesIndex];return new tr.ui.tracks.ChartPoint(sample,sample.timestamp,total);});var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:series.color};return new tr.ui.tracks.ChartSeries(chartPoints,chartAxis,renderingConfig);});chartSeries.reverse();return chartSeries;};return{CounterTrack:CounterTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var startCompare=function(x,y){return x.start-y.start;}
-var FrameTrack=tr.ui.b.define('frame-track',tr.ui.tracks.LetterDotTrack);FrameTrack.prototype={__proto__:tr.ui.tracks.LetterDotTrack.prototype,decorate:function(viewport){tr.ui.tracks.LetterDotTrack.prototype.decorate.call(this,viewport);this.heading='Frames';this.frames_=undefined;this.items=undefined;},get frames(){return this.frames_;},set frames(frames){this.frames_=frames;if(frames===undefined)
-return;this.frames_=this.frames_.slice();this.frames_.sort(startCompare);this.items=this.frames_.map(function(frame){return new FrameDot(frame);});}};function FrameDot(frame){tr.ui.tracks.LetterDot.call(this,frame,'F',frame.colorId,frame.start);}
-FrameDot.prototype={__proto__:tr.ui.tracks.LetterDot.prototype};return{FrameTrack:FrameTrack};});'use strict';tr.exportTo('tr.model',function(){var Settings=tr.b.Settings;function ModelSettings(model){this.model=model;this.objectsByKey_=[];this.nonuniqueKeys_=[];this.buildObjectsByKeyMap_();this.removeNonuniqueKeysFromSettings_();this.ephemeralSettingsByGUID_={};}
+return;var track=new tr.ui.tracks.ChartTrack(this.viewport);track.heading='Memory per component';track.height=ALLOCATED_MEMORY_TRACK_HEIGHT+'px';track.series=series;track.autoSetAllAxes({expandMax:true});this.appendChild(track);}};return{GlobalMemoryDumpTrack:GlobalMemoryDumpTrack};});'use strict';tr.exportTo('tr.model',function(){var Settings=tr.b.Settings;function ModelSettings(model){this.model=model;this.objectsByKey_=[];this.nonuniqueKeys_=[];this.buildObjectsByKeyMap_();this.removeNonuniqueKeysFromSettings_();this.ephemeralSettingsByGUID_={};}
 ModelSettings.prototype={buildObjectsByKeyMap_:function(){var objects=[];this.model.iterateAllPersistableObjects(function(o){objects.push(o);});var objectsByKey={};var NONUNIQUE_KEY='nonuniqueKey';for(var i=0;i<objects.length;i++){var object=objects[i];var objectKey=object.getSettingsKey();if(!objectKey)
 continue;if(objectsByKey[objectKey]===undefined){objectsByKey[objectKey]=object;continue;}
 objectsByKey[objectKey]=NONUNIQUE_KEY;}
@@ -5037,7 +4622,10 @@
 settings[objectKey]={};if(settings[objectKey][objectLevelKey]===value)
 return;settings[objectKey][objectLevelKey]=value;Settings.set('trace_model_settings',settings);},getEphemeralSettingsFor_:function(object){if(object.guid===undefined)
 throw new Error('Only objects with GUIDs can be persisted');if(this.ephemeralSettingsByGUID_[object.guid]===undefined)
-this.ephemeralSettingsByGUID_[object.guid]={};return this.ephemeralSettingsByGUID_[object.guid];}};return{ModelSettings:ModelSettings};});'use strict';tr.exportTo('tr.ui.tracks',function(){var MultiRowTrack=tr.ui.b.define('multi-row-track',tr.ui.tracks.ContainerTrack);MultiRowTrack.prototype={__proto__:tr.ui.tracks.ContainerTrack.prototype,decorate:function(viewport){tr.ui.tracks.ContainerTrack.prototype.decorate.call(this,viewport);this.tooltip_='';this.heading_='';this.groupingSource_=undefined;this.itemsToGroup_=undefined;this.defaultToCollapsedWhenSubRowCountMoreThan=1;this.itemsGroupedOnLastUpdateContents_=undefined;this.currentSubRows_=[];this.expanded_=true;},get itemsToGroup(){return this.itemsToGroup_;},setItemsToGroup:function(itemsToGroup,opt_groupingSource){this.itemsToGroup_=itemsToGroup;this.groupingSource_=opt_groupingSource;this.updateContents_();this.updateExpandedStateFromGroupingSource_();},get heading(){return this.heading_;},set heading(h){this.heading_=h;this.updateContents_();},get tooltip(){return this.tooltip_;},set tooltip(t){this.tooltip_=t;this.updateContents_();},get subRows(){return this.currentSubRows_;},get hasVisibleContent(){return this.children.length>0;},get expanded(){return this.expanded_;},set expanded(expanded){if(this.expanded_==expanded)
+this.ephemeralSettingsByGUID_[object.guid]={};return this.ephemeralSettingsByGUID_[object.guid];}};return{ModelSettings:ModelSettings};});'use strict';tr.exportTo('tr.ui.tracks',function(){var CounterTrack=tr.ui.b.define('counter-track',tr.ui.tracks.ChartTrack);CounterTrack.prototype={__proto__:tr.ui.tracks.ChartTrack.prototype,decorate:function(viewport){tr.ui.tracks.ChartTrack.prototype.decorate.call(this,viewport);this.classList.add('counter-track');},get counter(){return this.chart;},set counter(counter){this.heading=counter.name+': ';this.series=CounterTrack.buildChartSeriesFromCounter(counter);this.autoSetAllAxes({expandMax:true});},getModelEventFromItem:function(chartValue){return chartValue;}};CounterTrack.buildChartSeriesFromCounter=function(counter){var numSeries=counter.series.length;var totals=counter.totals;var chartAxis=new tr.ui.tracks.ChartAxis(0,undefined);var chartSeries=counter.series.map(function(series,seriesIndex){var chartPoints=series.samples.map(function(sample,sampleIndex){var total=totals[sampleIndex*numSeries+seriesIndex];return new tr.ui.tracks.ChartPoint(sample,sample.timestamp,total);});var renderingConfig={chartType:tr.ui.tracks.ChartSeriesType.AREA,colorId:series.color};return new tr.ui.tracks.ChartSeries(chartPoints,chartAxis,renderingConfig);});chartSeries.reverse();return chartSeries;};return{CounterTrack:CounterTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var startCompare=function(x,y){return x.start-y.start;}
+var FrameTrack=tr.ui.b.define('frame-track',tr.ui.tracks.LetterDotTrack);FrameTrack.prototype={__proto__:tr.ui.tracks.LetterDotTrack.prototype,decorate:function(viewport){tr.ui.tracks.LetterDotTrack.prototype.decorate.call(this,viewport);this.heading='Frames';this.frames_=undefined;this.items=undefined;},get frames(){return this.frames_;},set frames(frames){this.frames_=frames;if(frames===undefined)
+return;this.frames_=this.frames_.slice();this.frames_.sort(startCompare);this.items=this.frames_.map(function(frame){return new FrameDot(frame);});}};function FrameDot(frame){tr.ui.tracks.LetterDot.call(this,frame,'F',frame.colorId,frame.start);}
+FrameDot.prototype={__proto__:tr.ui.tracks.LetterDot.prototype};return{FrameTrack:FrameTrack};});'use strict';tr.exportTo('tr.ui.analysis',function(){var ObjectSnapshotView=tr.ui.b.define('object-snapshot-view');ObjectSnapshotView.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.objectSnapshot_=undefined;},get requiresTallView(){return true;},set modelEvent(obj){this.objectSnapshot=obj;},get modelEvent(){return this.objectSnapshot;},get objectSnapshot(){return this.objectSnapshot_;},set objectSnapshot(i){this.objectSnapshot_=i;this.updateContents();},updateContents:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectSnapshotView;options.defaultMetadata={showInstances:true,showInTrackView:true};tr.b.decorateExtensionRegistry(ObjectSnapshotView,options);return{ObjectSnapshotView:ObjectSnapshotView};});'use strict';tr.exportTo('tr.ui.analysis',function(){var ObjectInstanceView=tr.ui.b.define('object-instance-view');ObjectInstanceView.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.objectInstance_=undefined;},get requiresTallView(){return true;},set modelEvent(obj){this.objectInstance=obj;},get modelEvent(){return this.objectInstance;},get objectInstance(){return this.objectInstance_;},set objectInstance(i){this.objectInstance_=i;this.updateContents();},updateContents:function(){throw new Error('Not implemented');}};var options=new tr.b.ExtensionRegistryOptions(tr.b.TYPE_BASED_REGISTRY_MODE);options.mandatoryBaseClass=ObjectInstanceView;options.defaultMetadata={showInTrackView:true};tr.b.decorateExtensionRegistry(ObjectInstanceView,options);return{ObjectInstanceView:ObjectInstanceView};});'use strict';tr.exportTo('tr.ui.tracks',function(){var MultiRowTrack=tr.ui.b.define('multi-row-track',tr.ui.tracks.ContainerTrack);MultiRowTrack.prototype={__proto__:tr.ui.tracks.ContainerTrack.prototype,decorate:function(viewport){tr.ui.tracks.ContainerTrack.prototype.decorate.call(this,viewport);this.tooltip_='';this.heading_='';this.groupingSource_=undefined;this.itemsToGroup_=undefined;this.defaultToCollapsedWhenSubRowCountMoreThan=1;this.itemsGroupedOnLastUpdateContents_=undefined;this.currentSubRows_=[];this.expanded_=true;},get itemsToGroup(){return this.itemsToGroup_;},setItemsToGroup:function(itemsToGroup,opt_groupingSource){this.itemsToGroup_=itemsToGroup;this.groupingSource_=opt_groupingSource;this.updateContents_();this.updateExpandedStateFromGroupingSource_();},get heading(){return this.heading_;},set heading(h){this.heading_=h;this.updateContents_();},get tooltip(){return this.tooltip_;},set tooltip(t){this.tooltip_=t;this.updateContents_();},get subRows(){return this.currentSubRows_;},get hasVisibleContent(){return this.children.length>0;},get expanded(){return this.expanded_;},set expanded(expanded){if(this.expanded_==expanded)
 return;this.expanded_=expanded;this.expandedStateChanged_();},onHeadingClicked_:function(e){if(this.subRows.length<=1)
 return;this.expanded=!this.expanded;if(this.groupingSource_){var modelSettings=new tr.model.ModelSettings(this.groupingSource_.model);modelSettings.setSettingFor(this.groupingSource_,'expanded',this.expanded);}
 e.stopPropagation();},updateExpandedStateFromGroupingSource_:function(){if(this.groupingSource_){var numSubRows=this.subRows.length;var modelSettings=new tr.model.ModelSettings(this.groupingSource_.model);if(numSubRows>1){var defaultExpanded;if(numSubRows>this.defaultToCollapsedWhenSubRowCountMoreThan){defaultExpanded=false;}else{defaultExpanded=true;}
@@ -5093,13 +4681,11 @@
 for(var i=0;i<sliceToPut.subSlices.length;i++){if(!findLevel(sliceToPut.subSlices[i],rows,n+1))
 return false;}
 return true;}
-return false;}
-var subRows=[];for(var i=0;i<slices.length;i++){var slice=slices[i];var found=false;var index=subRows.length;for(var j=0;j<subRows.length;j++){if(findLevel(slice,subRows,j)){found=true;index=j;break;}}
+return false;};var subRows=[];for(var i=0;i<slices.length;i++){var slice=slices[i];var found=false;var index=subRows.length;for(var j=0;j<subRows.length;j++){if(findLevel(slice,subRows,j)){found=true;index=j;break;}}
 if(!found)
 subRows.push([]);subRows[index].push(slice);var fitSubSlicesRecursively=function(subSlices,level,rows){if(subSlices===undefined||subSlices.length===0)
 return;if(level===rows.length)
-rows.push([]);for(var h=0;h<subSlices.length;h++){rows[level].push(subSlices[h]);fitSubSlicesRecursively(subSlices[h].subSlices,level+1,rows);}}
-fitSubSlicesRecursively(slice.subSlices,index+1,subRows);}
+rows.push([]);for(var h=0;h<subSlices.length;h++){rows[level].push(subSlices[h]);fitSubSlicesRecursively(subSlices[h].subSlices,level+1,rows);}};fitSubSlicesRecursively(slice.subSlices,index+1,subRows);}
 return subRows;}};return{AsyncSliceGroupTrack:AsyncSliceGroupTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var SampleTrack=tr.ui.b.define('sample-track',tr.ui.tracks.RectTrack);SampleTrack.prototype={__proto__:tr.ui.tracks.RectTrack.prototype,decorate:function(viewport){tr.ui.tracks.RectTrack.prototype.decorate.call(this,viewport);},get samples(){return this.rects;},set samples(samples){this.rects=samples;}};return{SampleTrack:SampleTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var SliceGroupTrack=tr.ui.b.define('slice-group-track',tr.ui.tracks.MultiRowTrack);SliceGroupTrack.prototype={__proto__:tr.ui.tracks.MultiRowTrack.prototype,decorate:function(viewport){tr.ui.tracks.MultiRowTrack.prototype.decorate.call(this,viewport);this.classList.add('slice-group-track');this.group_=undefined;this.defaultToCollapsedWhenSubRowCountMoreThan=100;},addSubTrack_:function(slices){var track=new tr.ui.tracks.SliceTrack(this.viewport);track.slices=slices;this.appendChild(track);return track;},get group(){return this.group_;},set group(group){this.group_=group;this.setItemsToGroup(this.group_.slices,this.group_);},get eventContainer(){return this.group;},addContainersToTrackMap:function(containerToTrackMap){tr.ui.tracks.MultiRowTrack.prototype.addContainersToTrackMap.apply(this,arguments);containerToTrackMap.addContainer(this.group,this);},buildSubRows_:function(slices){var precisionUnit=this.group.model.intrinsicTimeUnit;if(!slices.length)
 return[];var ops=[];for(var i=0;i<slices.length;i++){if(slices[i].subSlices)
 slices[i].subSlices.splice(0,slices[i].subSlices.length);ops.push(i);}
@@ -5130,7 +4716,7 @@
 return;this.processNameEl_.textContent=this.processBase_.userFriendlyName;this.headerEl_.title=this.processBase_.userFriendlyDetails;this.willAppendTracks_();if(this.expanded){this.appendMemoryDumpTrack_();this.appendObjectInstanceTracks_();this.appendCounterTracks_();this.appendFrameTrack_();this.appendThreadTracks_();}else{this.appendSummaryTrack_();}
 this.didAppendTracks_();},addEventsToTrackMap:function(eventToTrackMap){this.tracks_.forEach(function(track){track.addEventsToTrackMap(eventToTrackMap);});},willAppendTracks_:function(){},didAppendTracks_:function(){},appendMemoryDumpTrack_:function(){},appendSummaryTrack_:function(){var track=new tr.ui.tracks.ProcessSummaryTrack(this.viewport);track.process=this.process;if(!track.hasVisibleContent)
 return;this.appendChild(track);},appendFrameTrack_:function(){var frames=this.process?this.process.frames:undefined;if(!frames||!frames.length)
-return;var track=new tr.ui.tracks.FrameTrack(this.viewport);track.frames=frames;this.appendChild(track);this.backgroundProvider=track;},appendObjectInstanceTracks_:function(){var instancesByTypeName=this.processBase_.objects.getAllInstancesByTypeName();var instanceTypeNames=tr.b.dictionaryKeys(instancesByTypeName);instanceTypeNames.sort();var didAppendAtLeastOneTrack=false;instanceTypeNames.forEach(function(typeName){var allInstances=instancesByTypeName[typeName];var instanceViewInfo=ObjectInstanceView.getTypeInfo(undefined,typeName);var snapshotViewInfo=ObjectSnapshotView.getTypeInfo(undefined,typeName);if(instanceViewInfo&&!instanceViewInfo.metadata.showInTrackView)
+return;var track=new tr.ui.tracks.FrameTrack(this.viewport);track.frames=frames;this.appendChild(track);},appendObjectInstanceTracks_:function(){var instancesByTypeName=this.processBase_.objects.getAllInstancesByTypeName();var instanceTypeNames=tr.b.dictionaryKeys(instancesByTypeName);instanceTypeNames.sort();var didAppendAtLeastOneTrack=false;instanceTypeNames.forEach(function(typeName){var allInstances=instancesByTypeName[typeName];var instanceViewInfo=ObjectInstanceView.getTypeInfo(undefined,typeName);var snapshotViewInfo=ObjectSnapshotView.getTypeInfo(undefined,typeName);if(instanceViewInfo&&!instanceViewInfo.metadata.showInTrackView)
 instanceViewInfo=undefined;if(snapshotViewInfo&&!snapshotViewInfo.metadata.showInTrackView)
 snapshotViewInfo=undefined;var hasViewInfo=instanceViewInfo||snapshotViewInfo;var visibleInstances=[];for(var i=0;i<allInstances.length;i++){var instance=allInstances[i];if(instance.snapshots.length===0)
 continue;if(instance.hasImplicitSnapshots&&!hasViewInfo)
@@ -5153,7 +4739,7 @@
 return selection;};this.appendChild(samplesTrack);},this);}};return{CpuTrack:CpuTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var Cpu=tr.model.Cpu;var CpuTrack=tr.ui.tracks.cpu_track;var ProcessTrackBase=tr.ui.tracks.ProcessTrackBase;var SpacingTrack=tr.ui.tracks.SpacingTrack;var KernelTrack=tr.ui.b.define('kernel-track',ProcessTrackBase);KernelTrack.prototype={__proto__:ProcessTrackBase.prototype,decorate:function(viewport){ProcessTrackBase.prototype.decorate.call(this,viewport);},set kernel(kernel){this.processBase=kernel;},get kernel(){return this.processBase;},get eventContainer(){return this.kernel;},get hasVisibleContent(){return this.children.length>1;},addContainersToTrackMap:function(containerToTrackMap){tr.ui.tracks.ProcessTrackBase.prototype.addContainersToTrackMap.call(this,containerToTrackMap);containerToTrackMap.addContainer(this.kernel,this);},willAppendTracks_:function(){var cpus=tr.b.dictionaryValues(this.kernel.cpus);cpus.sort(tr.model.Cpu.compare);var didAppendAtLeastOneTrack=false;for(var i=0;i<cpus.length;++i){var cpu=cpus[i];var track=new tr.ui.tracks.CpuTrack(this.viewport);track.detailedMode=this.expanded;track.cpu=cpu;if(!track.hasVisibleContent)
 continue;this.appendChild(track);didAppendAtLeastOneTrack=true;}
 if(didAppendAtLeastOneTrack)
-this.appendChild(new SpacingTrack(this.viewport));}};return{KernelTrack:KernelTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var InteractionTrack=tr.ui.b.define('interaction-track',tr.ui.tracks.MultiRowTrack);InteractionTrack.prototype={__proto__:tr.ui.tracks.MultiRowTrack.prototype,decorate:function(viewport){tr.ui.tracks.MultiRowTrack.prototype.decorate.call(this,viewport);this.heading='Interactions';this.subRows_=[];},set model(model){this.setItemsToGroup(model.interactionRecords,{guid:tr.b.GUID.allocate(),model:model,getSettingsKey:function(){return undefined;}});},buildSubRows_:function(slices){if(this.subRows_.length)
+this.appendChild(new SpacingTrack(this.viewport));}};return{KernelTrack:KernelTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var InteractionTrack=tr.ui.b.define('interaction-track',tr.ui.tracks.MultiRowTrack);InteractionTrack.prototype={__proto__:tr.ui.tracks.MultiRowTrack.prototype,decorate:function(viewport){tr.ui.tracks.MultiRowTrack.prototype.decorate.call(this,viewport);this.heading='Interactions';this.subRows_=[];},set model(model){this.setItemsToGroup(model.userModel.expectations,{guid:tr.b.GUID.allocate(),model:model,getSettingsKey:function(){return undefined;}});},buildSubRows_:function(slices){if(this.subRows_.length)
 return this.subRows_;this.subRows_.push.apply(this.subRows_,tr.ui.tracks.AsyncSliceGroupTrack.prototype.buildSubRows_.call({},slices,true));return this.subRows_;},addSubTrack_:function(slices){var track=new tr.ui.tracks.SliceTrack(this.viewport);track.slices=slices;this.appendChild(track);return track;}};return{InteractionTrack:InteractionTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ALLOCATED_MEMORY_TRACK_HEIGHT=50;var ProcessMemoryDumpTrack=tr.ui.b.define('process-memory-dump-track',tr.ui.tracks.ContainerTrack);ProcessMemoryDumpTrack.prototype={__proto__:tr.ui.tracks.ContainerTrack.prototype,decorate:function(viewport){tr.ui.tracks.ContainerTrack.prototype.decorate.call(this,viewport);this.memoryDumps_=undefined;},get memoryDumps(){return this.memoryDumps_;},set memoryDumps(memoryDumps){this.memoryDumps_=memoryDumps;this.updateContents_();},updateContents_:function(){this.clearTracks_();if(!this.memoryDumps_||!this.memoryDumps_.length)
 return;this.appendAllocatedMemoryTrack_();},appendAllocatedMemoryTrack_:function(){var series=tr.ui.tracks.buildProcessAllocatedMemoryChartSeries(this.memoryDumps_);if(!series)
 return;var track=new tr.ui.tracks.ChartTrack(this.viewport);track.heading='Memory per component';track.height=ALLOCATED_MEMORY_TRACK_HEIGHT+'px';track.series=series;track.autoSetAllAxes({expandMax:true});this.appendChild(track);}};return{ProcessMemoryDumpTrack:ProcessMemoryDumpTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var ProcessTrackBase=tr.ui.tracks.ProcessTrackBase;var ProcessTrack=tr.ui.b.define('process-track',ProcessTrackBase);ProcessTrack.prototype={__proto__:ProcessTrackBase.prototype,decorate:function(viewport){tr.ui.tracks.ProcessTrackBase.prototype.decorate.call(this,viewport);},drawTrack:function(type){switch(type){case tr.ui.tracks.DrawType.INSTANT_EVENT:if(!this.processBase.instantEvents||this.processBase.instantEvents.length===0)
@@ -5164,7 +4750,7 @@
 var instantEventWidth=2*viewPixWidthWorld;tr.b.iterateOverIntersectingIntervals(this.processBase.instantEvents,function(x){return x.start;},function(x){return x.duration+instantEventWidth;},loWX,hiWX,onPickHit.bind(this));tr.ui.tracks.ContainerTrack.prototype.addIntersectingEventsInRangeToSelectionInWorldSpace.apply(this,arguments);},addClosestEventToSelection:function(worldX,worldMaxDist,loY,hiY,selection){this.addClosestInstantEventToSelection(this.processBase.instantEvents,worldX,worldMaxDist,selection);tr.ui.tracks.ContainerTrack.prototype.addClosestEventToSelection.apply(this,arguments);}};return{ProcessTrack:ProcessTrack};});'use strict';tr.exportTo('tr.ui.tracks',function(){var SelectionState=tr.model.SelectionState;var EventPresenter=tr.ui.b.EventPresenter;var ModelTrack=tr.ui.b.define('model-track',tr.ui.tracks.ContainerTrack);ModelTrack.prototype={__proto__:tr.ui.tracks.ContainerTrack.prototype,decorate:function(viewport){tr.ui.tracks.ContainerTrack.prototype.decorate.call(this,viewport);this.classList.add('model-track');var typeInfos=tr.ui.tracks.Highlighter.getAllRegisteredTypeInfos();this.highlighters_=typeInfos.map(function(typeInfo){return new typeInfo.constructor(viewport);});this.upperMode_=false;this.annotationViews_=[];},get upperMode(){return this.upperMode_;},set upperMode(upperMode){this.upperMode_=upperMode;this.updateContents_();},detach:function(){tr.ui.tracks.ContainerTrack.prototype.detach.call(this);},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();this.model_.addEventListener('annotationChange',this.updateAnnotations_.bind(this));},get hasVisibleContent(){return this.children.length>0;},updateContents_:function(){this.textContent='';if(!this.model_)
 return;if(this.upperMode_)
 this.updateContentsForUpperMode_();else
-this.updateContentsForLowerMode_();},updateContentsForUpperMode_:function(){},updateContentsForLowerMode_:function(){if(this.model_.interactionRecords.length){var mrt=new tr.ui.tracks.InteractionTrack(this.viewport_);mrt.model=this.model_;this.appendChild(mrt);}
+this.updateContentsForLowerMode_();},updateContentsForUpperMode_:function(){},updateContentsForLowerMode_:function(){if(this.model_.userModel.expectations.length){var mrt=new tr.ui.tracks.InteractionTrack(this.viewport_);mrt.model=this.model_;this.appendChild(mrt);}
 if(this.model_.alerts.length){var at=new tr.ui.tracks.AlertTrack(this.viewport_);at.alerts=this.model_.alerts;this.appendChild(at);}
 if(this.model_.globalMemoryDumps.length){var gmdt=new tr.ui.tracks.GlobalMemoryDumpTrack(this.viewport_);gmdt.memoryDumps=this.model_.globalMemoryDumps;this.appendChild(gmdt);}
 this.appendDeviceTrack_();this.appendKernelTrack_();var processes=this.model_.getAllProcesses();processes.sort(tr.model.Process.compare);for(var i=0;i<processes.length;++i){var process=processes[i];var track=new tr.ui.tracks.ProcessTrack(this.viewport);track.process=process;if(!track.hasVisibleContent)
@@ -5236,8 +4822,7 @@
 if(this.viewport_.interestRange.min==selectionBounds.min&&this.viewport_.interestRange.max==selectionBounds.max)
 this.viewport_.interestRange.reset();else
 this.viewport_.interestRange.set(selectionBounds);},toggleHighDetails_:function(){this.viewport_.highDetails=!this.viewport_.highDetails;},hideDragBox_:function(){this.$.drag_box.style.left='-1000px';this.$.drag_box.style.top='-1000px';this.$.drag_box.style.width=0;this.$.drag_box.style.height=0;},setDragBoxPosition_:function(xStart,yStart,xEnd,yEnd){var loY=Math.min(yStart,yEnd);var hiY=Math.max(yStart,yEnd);var loX=Math.min(xStart,xEnd);var hiX=Math.max(xStart,xEnd);var modelTrackRect=this.modelTrack_.getBoundingClientRect();var dragRect={left:loX,top:loY,width:hiX-loX,height:hiY-loY};dragRect.right=dragRect.left+dragRect.width;dragRect.bottom=dragRect.top+dragRect.height;var modelTrackContainerRect=this.modelTrackContainer_.getBoundingClientRect();var clipRect={left:modelTrackContainerRect.left,top:modelTrackContainerRect.top,right:modelTrackContainerRect.right,bottom:modelTrackContainerRect.bottom};var headingWidth=window.getComputedStyle(this.querySelector('tr-ui-heading')).width;var trackTitleWidth=parseInt(headingWidth);clipRect.left=clipRect.left+trackTitleWidth;var intersectRect_=function(r1,r2){if(r2.left>r1.right||r2.right<r1.left||r2.top>r1.bottom||r2.bottom<r1.top)
-return false;var results={};results.left=Math.max(r1.left,r2.left);results.top=Math.max(r1.top,r2.top);results.right=Math.min(r1.right,r2.right);results.bottom=Math.min(r1.bottom,r2.bottom);results.width=results.right-results.left;results.height=results.bottom-results.top;return results;}
-var finalDragBox=intersectRect_(clipRect,dragRect);this.$.drag_box.style.left=finalDragBox.left+'px';this.$.drag_box.style.width=finalDragBox.width+'px';this.$.drag_box.style.top=finalDragBox.top+'px';this.$.drag_box.style.height=finalDragBox.height+'px';this.$.drag_box.style.whiteSpace='nowrap';var pixelRatio=window.devicePixelRatio||1;var canv=this.modelTrackContainer_.canvas;var dt=this.viewport_.currentDisplayTransform;var loWX=dt.xViewToWorld((loX-canv.offsetLeft)*pixelRatio);var hiWX=dt.xViewToWorld((hiX-canv.offsetLeft)*pixelRatio);this.$.drag_box.textContent=tr.b.u.TimeDuration.format(hiWX-loWX);var e=new tr.b.Event('selectionChanging');e.loWX=loWX;e.hiWX=hiWX;this.dispatchEvent(e);},onGridToggle_:function(left){var selection=this.brushingStateController_.selection;var tb=left?selection.bounds.min:selection.bounds.max;if(this.viewport_.gridEnabled&&this.viewport_.gridSide===left&&this.viewport_.gridInitialTimebase===tb){this.viewport_.gridside=undefined;this.viewport_.gridEnabled=false;this.viewport_.gridInitialTimebase=undefined;return;}
+return false;var results={};results.left=Math.max(r1.left,r2.left);results.top=Math.max(r1.top,r2.top);results.right=Math.min(r1.right,r2.right);results.bottom=Math.min(r1.bottom,r2.bottom);results.width=results.right-results.left;results.height=results.bottom-results.top;return results;};var finalDragBox=intersectRect_(clipRect,dragRect);this.$.drag_box.style.left=finalDragBox.left+'px';this.$.drag_box.style.width=finalDragBox.width+'px';this.$.drag_box.style.top=finalDragBox.top+'px';this.$.drag_box.style.height=finalDragBox.height+'px';this.$.drag_box.style.whiteSpace='nowrap';var pixelRatio=window.devicePixelRatio||1;var canv=this.modelTrackContainer_.canvas;var dt=this.viewport_.currentDisplayTransform;var loWX=dt.xViewToWorld((loX-canv.offsetLeft)*pixelRatio);var hiWX=dt.xViewToWorld((hiX-canv.offsetLeft)*pixelRatio);this.$.drag_box.textContent=tr.v.Unit.byName.timeDurationInMs.format(hiWX-loWX);var e=new tr.b.Event('selectionChanging');e.loWX=loWX;e.hiWX=hiWX;this.dispatchEvent(e);},onGridToggle_:function(left){var selection=this.brushingStateController_.selection;var tb=left?selection.bounds.min:selection.bounds.max;if(this.viewport_.gridEnabled&&this.viewport_.gridSide===left&&this.viewport_.gridInitialTimebase===tb){this.viewport_.gridside=undefined;this.viewport_.gridEnabled=false;this.viewport_.gridInitialTimebase=undefined;return;}
 var numIntervalsSinceStart=Math.ceil((tb-this.model_.bounds.min)/this.viewport_.gridStep_);this.viewport_.gridEnabled=true;this.viewport_.gridSide=left;this.viewport_.gridInitialTimebase=tb;this.viewport_.gridTimebase=tb-
 (numIntervalsSinceStart+1)*this.viewport_.gridStep_;},storeLastMousePos_:function(e){this.lastMouseViewPos_=this.extractRelativeMousePosition_(e);},storeLastTouchPositions_:function(e){this.lastTouchViewPositions_=this.extractRelativeTouchPositions_(e);},extractRelativeMousePosition_:function(e){var canv=this.modelTrackContainer_.canvas;return{x:e.clientX-canv.offsetLeft,y:e.clientY-canv.offsetTop};},extractRelativeTouchPositions_:function(e){var canv=this.modelTrackContainer_.canvas;var touches=[];for(var i=0;i<e.touches.length;++i){touches.push({x:e.touches[i].clientX-canv.offsetLeft,y:e.touches[i].clientY-canv.offsetTop});}
 return touches;},storeInitialMouseDownPos_:function(e){var position=this.extractRelativeMousePosition_(e);this.mouseViewPosAtMouseDown_.x=position.x;this.mouseViewPosAtMouseDown_.y=position.y;},focusElements_:function(){this.$.hotkey_controller.childRequestsGeneralFocus(this);},storeInitialInteractionPositionsAndFocus_:function(e){this.storeInitialMouseDownPos_(e);this.storeLastMousePos_(e);this.focusElements_();},onBeginPanScan_:function(e){var vp=this.viewport_;this.viewportDisplayTransformAtMouseDown_=vp.currentDisplayTransform.clone();this.isPanningAndScanning_=true;this.storeInitialInteractionPositionsAndFocus_(e);e.preventDefault();},onUpdatePanScan_:function(e){if(!this.isPanningAndScanning_)
@@ -5256,7 +4841,942 @@
 return{x:xSum/positions.length,y:ySum/positions.length};},computeTouchSpan_:function(positions){var xMin=Number.MAX_VALUE;var yMin=Number.MAX_VALUE;var xMax=Number.MIN_VALUE;var yMax=Number.MIN_VALUE;for(var i=0;i<positions.length;++i){xMin=Math.min(xMin,positions[i].x);yMin=Math.min(yMin,positions[i].y);xMax=Math.max(xMax,positions[i].x);yMax=Math.max(yMax,positions[i].y);}
 return Math.sqrt((xMin-xMax)*(xMin-xMax)+
 (yMin-yMax)*(yMin-yMax));},onUpdateTransformForTouch_:function(e){var newPositions=this.extractRelativeTouchPositions_(e);var currentPositions=this.lastTouchViewPositions_;var newCenter=this.computeTouchCenter_(newPositions);var currentCenter=this.computeTouchCenter_(currentPositions);var newSpan=this.computeTouchSpan_(newPositions);var currentSpan=this.computeTouchSpan_(currentPositions);var vp=this.viewport_;var viewWidth=this.viewWidth_;var pixelRatio=window.devicePixelRatio||1;var xDelta=pixelRatio*(newCenter.x-currentCenter.x);var yDelta=newCenter.y-currentCenter.y;var zoomScaleValue=currentSpan>10?newSpan/currentSpan:1;var viewFocus=pixelRatio*newCenter.x;var worldFocus=vp.currentDisplayTransform.xViewToWorld(viewFocus);this.displayTransform_.set(vp.currentDisplayTransform);this.displayTransform_.scaleX*=zoomScaleValue;this.displayTransform_.xPanWorldPosToViewPos(worldFocus,viewFocus,viewWidth);this.displayTransform_.incrementPanXInViewUnits(xDelta);this.displayTransform_.panY-=yDelta;vp.setDisplayTransformImmediately(this.displayTransform_);this.storeLastTouchPositions_(e);},initHintText_:function(){this.$.hint_text.style.display='none';this.pendingHintTextClearTimeout_=undefined;},showHintText_:function(text){if(this.pendingHintTextClearTimeout_){window.clearTimeout(this.pendingHintTextClearTimeout_);this.pendingHintTextClearTimeout_=undefined;}
-this.pendingHintTextClearTimeout_=setTimeout(this.hideHintText_.bind(this),1000);this.$.hint_text.textContent=text;this.$.hint_text.style.display='';},hideHintText_:function(){this.pendingHintTextClearTimeout_=undefined;this.$.hint_text.style.display='none';}});'use strict';Polymer('tr-ui-find-control',{filterKeyDown:function(e){if(e.keyCode===27){var hkc=tr.b.getHotkeyControllerForElement(this);if(hkc){hkc.childRequestsBlur(this);}else{this.blur();}
+this.pendingHintTextClearTimeout_=setTimeout(this.hideHintText_.bind(this),1000);this.$.hint_text.textContent=text;this.$.hint_text.style.display='';},hideHintText_:function(){this.pendingHintTextClearTimeout_=undefined;this.$.hint_text.style.display='none';}});'use strict';tr.exportTo('tr.ui.e.highlighter',function(){var Highlighter=tr.ui.tracks.Highlighter;function VSyncHighlighter(viewport){Highlighter.call(this,viewport);this.times_=[];}
+VSyncHighlighter.VSYNC_HIGHLIGHT_COLOR=new tr.b.Color(0,0,255);VSyncHighlighter.VSYNC_HIGHLIGHT_ALPHA=0.1;VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT=0.20;VSyncHighlighter.VSYNC_DENSITY_OPAQUE=0.10;VSyncHighlighter.VSYNC_DENSITY_RANGE=VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT-
+VSyncHighlighter.VSYNC_DENSITY_OPAQUE;VSyncHighlighter.generateStripes=function(times,minTime,maxTime){if(times.length===0)
+return[];var stripes=[];var lowIndex=tr.b.findLowIndexInSortedArray(times,function(time){return time;},minTime);var highIndex=lowIndex-1;while(times[highIndex+1]<=maxTime){highIndex++;}
+for(var i=lowIndex-(lowIndex%2);i<=highIndex;i+=2){var left=i<lowIndex?minTime:times[i];var right=i+1>highIndex?maxTime:times[i+1];stripes.push([left,right]);}
+return stripes;}
+VSyncHighlighter.prototype={__proto__:Highlighter.prototype,processModel:function(model){this.times_=model.device.vSyncTimestamps;},drawHighlight:function(ctx,dt,viewLWorld,viewRWorld,viewHeight){if(!this.viewport_.highlightVSync){return;}
+var stripes=VSyncHighlighter.generateStripes(this.times_,viewLWorld,viewRWorld);if(stripes.length==0){return;}
+var stripeRange=stripes[stripes.length-1][1]-stripes[0][0];var stripeDensity=stripes.length/(dt.scaleX*stripeRange);var clampedStripeDensity=tr.b.clamp(stripeDensity,VSyncHighlighter.VSYNC_DENSITY_OPAQUE,VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT);var opacity=(VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT-clampedStripeDensity)/VSyncHighlighter.VSYNC_DENSITY_RANGE;if(opacity==0){return;}
+var pixelRatio=window.devicePixelRatio||1;var height=viewHeight*pixelRatio;var c=VSyncHighlighter.VSYNC_HIGHLIGHT_COLOR;ctx.fillStyle=c.toStringWithAlphaOverride(VSyncHighlighter.VSYNC_HIGHLIGHT_ALPHA*opacity);for(var i=0;i<stripes.length;i++){var xLeftView=dt.xWorldToView(stripes[i][0]);var xRightView=dt.xWorldToView(stripes[i][1]);ctx.fillRect(xLeftView,0,xRightView-xLeftView,height);}}};tr.ui.tracks.Highlighter.register(VSyncHighlighter);return{VSyncHighlighter:VSyncHighlighter};});'use strict';tr.exportTo('tr.ui.b',function(){var TableFormat={};TableFormat.SelectionMode={NONE:0,ROW:1,CELL:2};TableFormat.HighlightStyle={DEFAULT:0,NONE:1,LIGHT:2,DARK:3};return{TableFormat:TableFormat};});'use strict';(function(){var RIGHT_ARROW=String.fromCharCode(0x25b6);var UNSORTED_ARROW=String.fromCharCode(0x25BF);var ASCENDING_ARROW=String.fromCharCode(0x25B4);var DESCENDING_ARROW=String.fromCharCode(0x25BE);var BASIC_INDENTATION=8;var SelectionMode=tr.ui.b.TableFormat.SelectionMode;var HighlightStyle=tr.ui.b.TableFormat.HighlightStyle;Polymer('tr-ui-b-table',{created:function(){this.selectionMode_=SelectionMode.NONE;this.rowHighlightStyle_=HighlightStyle.DEFAULT;this.cellHighlightStyle_=HighlightStyle.DEFAULT;this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.tableColumns_=[];this.tableRows_=[];this.tableRowsInfo_=new WeakMap();this.tableFooterRows_=[];this.tableFooterRowsInfo_=new WeakMap();this.sortColumnIndex_=undefined;this.sortDescending_=false;this.columnsWithExpandButtons_=[];this.headerCells_=[];this.showHeader_=true;this.emptyValue_=undefined;this.subRowsPropertyName_='subRows';this.customizeTableRowCallback_=undefined;},ready:function(){this.$.body.addEventListener('keydown',this.onKeyDown_.bind(this),true);},clear:function(){this.selectionMode_=SelectionMode.NONE;this.rowHighlightStyle_=HighlightStyle.DEFAULT;this.cellHighlightStyle_=HighlightStyle.DEFAULT;this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.textContent='';this.tableColumns_=[];this.tableRows_=[];this.tableRowsInfo_=new WeakMap();this.tableFooterRows_=[];this.tableFooterRowsInfo_=new WeakMap();this.sortColumnIndex_=undefined;this.sortDescending_=false;this.columnsWithExpandButtons_=[];this.headerCells_=[];this.subRowsPropertyName_='subRows';this.defaultExpansionStateCallback_=undefined;},get showHeader(){return this.showHeader_;},set showHeader(showHeader){this.showHeader_=showHeader;this.scheduleRebuildHeaders_();},set subRowsPropertyName(name){this.subRowsPropertyName_=name;},set defaultExpansionStateCallback(cb){this.defaultExpansionStateCallback_=cb;this.scheduleRebuildBody_();},set customizeTableRowCallback(cb){this.customizeTableRowCallback_=cb;this.scheduleRebuildBody_();},get emptyValue(){return this.emptyValue_;},set emptyValue(emptyValue){var previousEmptyValue=this.emptyValue_;this.emptyValue_=emptyValue;if(this.tableRows_.length===0&&emptyValue!==previousEmptyValue)
+this.scheduleRebuildBody_();},set tableColumns(columns){var columnsWithExpandButtons=[];for(var i=0;i<columns.length;i++){if(columns[i].showExpandButtons)
+columnsWithExpandButtons.push(i);}
+if(columnsWithExpandButtons.length===0){columnsWithExpandButtons=[0];}
+for(var i=0;i<columns.length;i++){var colInfo=columns[i];if(colInfo.width===undefined)
+continue;var hasExpandButton=columnsWithExpandButtons.indexOf(i)!==-1;var w=colInfo.width;if(w){if(/\d+px/.test(w)){continue;}else if(/\d+%/.test(w)){if(hasExpandButton){throw new Error('Columns cannot be %-sized and host '+' an expand button');}}else{throw new Error('Unrecognized width string');}}}
+this.tableColumns_=columns;this.headerCells_=[];this.columnsWithExpandButtons_=columnsWithExpandButtons;this.sortColumnIndex=undefined;this.scheduleRebuildHeaders_();this.tableRows=this.tableRows_;},get tableColumns(){return this.tableColumns_;},set tableRows(rows){this.selectedTableRowInfo_=undefined;this.selectedColumnIndex_=undefined;this.maybeUpdateSelectedRow_();this.tableRows_=rows;this.tableRowsInfo_=new WeakMap();this.scheduleRebuildBody_();},get tableRows(){return this.tableRows_;},set footerRows(rows){this.tableFooterRows_=rows;this.tableFooterRowsInfo_=new WeakMap();this.scheduleRebuildFooter_();},get footerRows(){return this.tableFooterRows_;},set sortColumnIndex(number){if(number===this.sortColumnIndex_)
+return;if(number===undefined){this.sortColumnIndex_=undefined;this.updateHeaderArrows_();this.dispatchSortingChangedEvent_();return;}
+if(this.tableColumns_.length<=number)
+throw new Error('Column number '+number+' is out of bounds.');if(!this.tableColumns_[number].cmp)
+throw new Error('Column '+number+' does not have a comparator.');this.sortColumnIndex_=number;this.updateHeaderArrows_();this.scheduleRebuildBody_();this.dispatchSortingChangedEvent_();},get sortColumnIndex(){return this.sortColumnIndex_;},set sortDescending(value){var newValue=!!value;if(newValue!==this.sortDescending_){this.sortDescending_=newValue;this.updateHeaderArrows_();this.scheduleRebuildBody_();this.dispatchSortingChangedEvent_();}},get sortDescending(){return this.sortDescending_;},updateHeaderArrows_:function(){for(var i=0;i<this.headerCells_.length;i++){if(!this.tableColumns_[i].cmp){this.headerCells_[i].sideContent='';continue;}
+if(i!==this.sortColumnIndex_){this.headerCells_[i].sideContent=UNSORTED_ARROW;continue;}
+this.headerCells_[i].sideContent=this.sortDescending_?DESCENDING_ARROW:ASCENDING_ARROW;}},sortRows_:function(rows){rows.sort(function(rowA,rowB){if(this.sortDescending_)
+return this.tableColumns_[this.sortColumnIndex_].cmp(rowB.userRow,rowA.userRow);return this.tableColumns_[this.sortColumnIndex_].cmp(rowA.userRow,rowB.userRow);}.bind(this));for(var i=0;i<rows.length;i++){if(this.getExpandedForUserRow_(rows[i]))
+this.sortRows_(rows[i][this.subRowsPropertyName_]);}},generateHeaderColumns_:function(){this.headerCells_=[];this.$.head.textContent='';if(!this.showHeader_)
+return;var tr=this.appendNewElement_(this.$.head,'tr');for(var i=0;i<this.tableColumns_.length;i++){var td=this.appendNewElement_(tr,'td');var headerCell=document.createElement('tr-ui-b-table-header-cell');if(this.showHeader)
+headerCell.cellTitle=this.tableColumns_[i].title;else
+headerCell.cellTitle='';if(this.tableColumns_[i].cmp){td.classList.add('sensitive');headerCell.tapCallback=this.createSortCallback_(i);if(this.sortColumnIndex_===i)
+headerCell.sideContent=this.sortDescending_?DESCENDING_ARROW:ASCENDING_ARROW;else
+headerCell.sideContent=UNSORTED_ARROW;}
+td.appendChild(headerCell);this.headerCells_.push(headerCell);}},applySizes_:function(){if(this.tableRows_.length===0&&!this.showHeader)
+return;var rowToRemoveSizing;var rowToSize;if(this.showHeader){rowToSize=this.$.head.children[0];rowToRemoveSizing=this.$.body.children[0];}else{rowToSize=this.$.body.children[0];rowToRemoveSizing=this.$.head.children[0];}
+for(var i=0;i<this.tableColumns_.length;i++){if(rowToRemoveSizing&&rowToRemoveSizing.children[i]){var tdToRemoveSizing=rowToRemoveSizing.children[i];tdToRemoveSizing.style.minWidth='';tdToRemoveSizing.style.width='';}
+var td=rowToSize.children[i];var delta;if(this.columnsWithExpandButtons_.indexOf(i)!==-1){td.style.paddingLeft=BASIC_INDENTATION+'px';delta=BASIC_INDENTATION+'px';}else{delta=undefined;}
+function calc(base,delta){if(delta)
+return'calc('+base+' - '+delta+')';else
+return base;}
+var w=this.tableColumns_[i].width;if(w){if(/\d+px/.test(w)){td.style.minWidth=calc(w,delta);}else if(/\d+%/.test(w)){td.style.width=w;}else{throw new Error('Unrecognized width string: '+w);}}}},createSortCallback_:function(columnNumber){return function(){var previousIndex=this.sortColumnIndex;this.sortColumnIndex=columnNumber;if(previousIndex!==columnNumber)
+this.sortDescending=false;else
+this.sortDescending=!this.sortDescending;}.bind(this);},generateTableRowNodes_:function(tableSection,userRows,rowInfoMap,indentation,lastAddedRow,parentRowInfo){if(this.sortColumnIndex_!==undefined&&tableSection===this.$.body){userRows=userRows.slice();userRows.sort(function(rowA,rowB){var c=this.tableColumns_[this.sortColumnIndex_].cmp(rowA,rowB);if(this.sortDescending_)
+c=-c;return c;}.bind(this));}
+for(var i=0;i<userRows.length;i++){var userRow=userRows[i];var rowInfo=this.getOrCreateRowInfoFor_(rowInfoMap,userRow,parentRowInfo);var htmlNode=this.getHTMLNodeForRowInfo_(tableSection,rowInfo,rowInfoMap,indentation);if(lastAddedRow===undefined){tableSection.insertBefore(htmlNode,tableSection.firstChild);}else{var nextSiblingOfLastAdded=lastAddedRow.nextSibling;tableSection.insertBefore(htmlNode,nextSiblingOfLastAdded);}
+this.updateTabIndexForTableRowNode_(htmlNode);lastAddedRow=htmlNode;if(!rowInfo.isExpanded)
+continue;lastAddedRow=this.generateTableRowNodes_(tableSection,userRow[this.subRowsPropertyName_],rowInfoMap,indentation+1,lastAddedRow,rowInfo);}
+return lastAddedRow;},getOrCreateRowInfoFor_:function(rowInfoMap,userRow,parentRowInfo){var rowInfo=undefined;if(rowInfoMap.has(userRow)){rowInfo=rowInfoMap.get(userRow);}else{rowInfo={userRow:userRow,htmlNode:undefined,parentRowInfo:parentRowInfo};rowInfoMap.set(userRow,rowInfo);}
+rowInfo.isExpanded=this.getExpandedForUserRow_(userRow);return rowInfo;},customizeTableRow_:function(userRow,trElement){if(!this.customizeTableRowCallback_)
+return;this.customizeTableRowCallback_(userRow,trElement);},getHTMLNodeForRowInfo_:function(tableSection,rowInfo,rowInfoMap,indentation){if(rowInfo.htmlNode){this.customizeTableRow_(rowInfo.userRow,rowInfo.htmlNode);return rowInfo.htmlNode;}
+var INDENT_SPACE=indentation*16;var INDENT_SPACE_NO_BUTTON=indentation*16+BASIC_INDENTATION;var trElement=this.ownerDocument.createElement('tr');rowInfo.htmlNode=trElement;rowInfo.indentation=indentation;trElement.rowInfo=rowInfo;this.customizeTableRow_(rowInfo.userRow,trElement);for(var i=0;i<this.tableColumns_.length;){var td=this.appendNewElement_(trElement,'td');td.columnIndex=i;var column=this.tableColumns_[i];var value=column.value(rowInfo.userRow);var colSpan=column.colSpan?column.colSpan:1;td.style.colSpan=colSpan;if(column.textAlign){td.style.textAlign=column.textAlign;}
+if(this.doesColumnIndexSupportSelection(i))
+td.classList.add('supports-selection');if(this.columnsWithExpandButtons_.indexOf(i)!=-1){if(rowInfo.userRow[this.subRowsPropertyName_]&&rowInfo.userRow[this.subRowsPropertyName_].length>0){td.style.paddingLeft=INDENT_SPACE+'px';var expandButton=this.appendNewElement_(td,'expand-button');expandButton.textContent=RIGHT_ARROW;if(rowInfo.isExpanded)
+expandButton.classList.add('button-expanded');}else{td.style.paddingLeft=INDENT_SPACE_NO_BUTTON+'px';}}
+if(value!==undefined)
+td.appendChild(tr.ui.b.asHTMLOrTextNode(value,this.ownerDocument));i+=colSpan;}
+var isSelectable=tableSection===this.$.body;var isExpandable=rowInfo.userRow[this.subRowsPropertyName_]&&rowInfo.userRow[this.subRowsPropertyName_].length;if(isSelectable||isExpandable){trElement.addEventListener('click',function(e){e.stopPropagation();if(e.target.tagName=='EXPAND-BUTTON'){this.setExpandedForUserRow_(tableSection,rowInfoMap,rowInfo.userRow,!rowInfo.isExpanded);return;}
+function getTD(cur){if(cur===trElement)
+throw new Error('woah');if(cur.parentElement===trElement)
+return cur;return getTD(cur.parentElement);}
+if(isSelectable&&this.selectionMode_!==SelectionMode.NONE){var shouldSelect=false;var columnIndex=getTD(e.target).columnIndex;switch(this.selectionMode_){case SelectionMode.ROW:shouldSelect=this.selectedTableRowInfo_!==rowInfo;break;case SelectionMode.CELL:if(this.doesColumnIndexSupportSelection(columnIndex)){shouldSelect=this.selectedTableRowInfo_!==rowInfo||this.selectedColumnIndex_!==columnIndex;}
+break;default:throw new Error('Invalid selection mode '+
+this.selectionMode_);}
+if(shouldSelect){this.didTableRowInfoGetClicked_(rowInfo,columnIndex);return;}}
+if(isExpandable){this.setExpandedForUserRow_(tableSection,rowInfoMap,rowInfo.userRow,!rowInfo.isExpanded);}}.bind(this));}
+return rowInfo.htmlNode;},removeSubNodes_:function(tableSection,rowInfo,rowInfoMap){if(rowInfo.userRow[this.subRowsPropertyName_]===undefined)
+return;for(var i=0;i<rowInfo.userRow[this.subRowsPropertyName_].length;i++){var subRow=rowInfo.userRow[this.subRowsPropertyName_][i];var subRowInfo=rowInfoMap.get(subRow);if(!subRowInfo)
+continue;var subNode=subRowInfo.htmlNode;if(subNode&&subNode.parentNode===tableSection){tableSection.removeChild(subNode);this.removeSubNodes_(tableSection,subRowInfo,rowInfoMap);}}},scheduleRebuildHeaders_:function(){this.headerDirty_=true;this.scheduleRebuild_();},scheduleRebuildBody_:function(){this.bodyDirty_=true;this.scheduleRebuild_();},scheduleRebuildFooter_:function(){this.footerDirty_=true;this.scheduleRebuild_();},scheduleRebuild_:function(){if(this.rebuildPending_)
+return;this.rebuildPending_=true;setTimeout(function(){this.rebuildPending_=false;this.rebuild();}.bind(this),0);},rebuildIfNeeded_:function(){this.rebuild();},rebuild:function(){var wasBodyOrHeaderDirty=this.headerDirty_||this.bodyDirty_;if(this.headerDirty_){this.generateHeaderColumns_();this.headerDirty_=false;}
+if(this.bodyDirty_){this.$.body.textContent='';this.generateTableRowNodes_(this.$.body,this.tableRows_,this.tableRowsInfo_,0,undefined,undefined);if(this.tableRows_.length===0&&this.emptyValue_!==undefined){var trElement=this.ownerDocument.createElement('tr');this.$.body.appendChild(trElement);trElement.classList.add('empty-row');var td=this.ownerDocument.createElement('td');trElement.appendChild(td);td.colSpan=this.tableColumns_.length;var emptyValue=this.emptyValue_;td.appendChild(tr.ui.b.asHTMLOrTextNode(emptyValue,this.ownerDocument));}
+this.bodyDirty_=false;}
+if(wasBodyOrHeaderDirty)
+this.applySizes_();if(this.footerDirty_){this.$.foot.textContent='';this.generateTableRowNodes_(this.$.foot,this.tableFooterRows_,this.tableFooterRowsInfo_,0,undefined,undefined);if(this.tableFooterRowsInfo_.length){this.$.body.classList.add('has-footer');}else{this.$.body.classList.remove('has-footer');}
+this.footerDirty_=false;}},appendNewElement_:function(parent,tagName){var element=parent.ownerDocument.createElement(tagName);parent.appendChild(element);return element;},getExpandedForTableRow:function(userRow){this.rebuildIfNeeded_();var rowInfo=this.tableRowsInfo_.get(userRow);if(rowInfo===undefined)
+throw new Error('Row has not been seen, must expand its parents');return rowInfo.isExpanded;},getExpandedForUserRow_:function(userRow){if(userRow[this.subRowsPropertyName_]===undefined)
+return false;if(userRow[this.subRowsPropertyName_].length===0)
+return false;if(userRow.isExpanded)
+return true;if(userRow.isExpanded===false)
+return false;if(this.defaultExpansionStateCallback_===undefined)
+return false;var parentUserRow=undefined;var rowInfo=this.tableRowsInfo_.get(userRow);if(rowInfo&&rowInfo.parentRowInfo)
+parentUserRow=rowInfo.parentRowInfo.userRow;return this.defaultExpansionStateCallback_(userRow,parentUserRow);},setExpandedForTableRow:function(userRow,expanded){this.rebuildIfNeeded_();var rowInfo=this.tableRowsInfo_.get(userRow);if(rowInfo===undefined)
+throw new Error('Row has not been seen, must expand its parents');return this.setExpandedForUserRow_(this.$.body,this.tableRowsInfo_,userRow,expanded);},setExpandedForUserRow_:function(tableSection,rowInfoMap,userRow,expanded){this.rebuildIfNeeded_();var rowInfo=rowInfoMap.get(userRow);if(rowInfo===undefined)
+throw new Error('Row has not been seen, must expand its parents');rowInfo.isExpanded=!!expanded;if(rowInfo.htmlNode===undefined)
+return;if(rowInfo.htmlNode.parentElement!==tableSection)
+return;var expandButton=rowInfo.htmlNode.querySelector('expand-button');if(rowInfo.isExpanded){expandButton.classList.add('button-expanded');var lastAddedRow=rowInfo.htmlNode;if(rowInfo.userRow[this.subRowsPropertyName_]){this.generateTableRowNodes_(tableSection,rowInfo.userRow[this.subRowsPropertyName_],rowInfoMap,rowInfo.indentation+1,lastAddedRow,rowInfo);}}else{expandButton.classList.remove('button-expanded');this.removeSubNodes_(tableSection,rowInfo,rowInfoMap);}
+this.maybeUpdateSelectedRow_();},get selectionMode(){return this.selectionMode_;},set selectionMode(selectionMode){if(!tr.b.dictionaryContainsValue(SelectionMode,selectionMode))
+throw new Error('Invalid selection mode '+selectionMode);this.rebuildIfNeeded_();this.selectionMode_=selectionMode;this.didSelectionStateChange_();},get rowHighlightStyle(){return this.rowHighlightStyle_;},set rowHighlightStyle(rowHighlightStyle){if(!tr.b.dictionaryContainsValue(HighlightStyle,rowHighlightStyle))
+throw new Error('Invalid row highlight style '+rowHighlightStyle);this.rebuildIfNeeded_();this.rowHighlightStyle_=rowHighlightStyle;this.didSelectionStateChange_();},get resolvedRowHighlightStyle(){if(this.rowHighlightStyle_!==HighlightStyle.DEFAULT)
+return this.rowHighlightStyle_;switch(this.selectionMode_){case SelectionMode.NONE:return HighlightStyle.NONE;case SelectionMode.ROW:return HighlightStyle.DARK;case SelectionMode.CELL:return HighlightStyle.LIGHT;default:throw new Error('Invalid selection mode '+selectionMode);}},get cellHighlightStyle(){return this.cellHighlightStyle_;},set cellHighlightStyle(cellHighlightStyle){if(!tr.b.dictionaryContainsValue(HighlightStyle,cellHighlightStyle))
+throw new Error('Invalid cell highlight style '+cellHighlightStyle);this.rebuildIfNeeded_();this.cellHighlightStyle_=cellHighlightStyle;this.didSelectionStateChange_();},get resolvedCellHighlightStyle(){if(this.cellHighlightStyle_!==HighlightStyle.DEFAULT)
+return this.cellHighlightStyle_;switch(this.selectionMode_){case SelectionMode.NONE:case SelectionMode.ROW:return HighlightStyle.NONE;case SelectionMode.CELL:return HighlightStyle.DARK;default:throw new Error('Invalid selection mode '+selectionMode);}},setHighlightStyle_:function(highlightAttribute,resolvedHighlightStyle){switch(resolvedHighlightStyle){case HighlightStyle.NONE:this.$.body.removeAttribute(highlightAttribute);break;case HighlightStyle.LIGHT:this.$.body.setAttribute(highlightAttribute,'light');break;case HighlightStyle.DARK:this.$.body.setAttribute(highlightAttribute,'dark');break;default:throw new Error('Invalid resolved highlight style '+
+resolvedHighlightStyle);}},didSelectionStateChange_:function(){this.setHighlightStyle_('row-highlight-style',this.resolvedRowHighlightStyle);this.setHighlightStyle_('cell-highlight-style',this.resolvedCellHighlightStyle);for(var i=0;i<this.$.body.children.length;i++)
+this.updateTabIndexForTableRowNode_(this.$.body.children[i]);this.maybeUpdateSelectedRow_();},maybeUpdateSelectedRow_:function(){if(this.selectedTableRowInfo_===undefined)
+return;if(this.selectionMode_===SelectionMode.NONE){this.removeSelectedState_();this.selectedTableRowInfo_=undefined;return;}
+function isVisible(rowInfo){if(!rowInfo.htmlNode)
+return false;return!!rowInfo.htmlNode.parentElement;}
+if(isVisible(this.selectedTableRowInfo_)){this.updateSelectedState_();return;}
+this.removeSelectedState_();var curRowInfo=this.selectedTableRowInfo_;while(curRowInfo&&!isVisible(curRowInfo))
+curRowInfo=curRowInfo.parentRowInfo;this.selectedTableRowInfo_=curRowInfo;if(this.selectedTableRowInfo_)
+this.updateSelectedState_();},didTableRowInfoGetClicked_:function(rowInfo,columnIndex){switch(this.selectionMode_){case SelectionMode.NONE:return;case SelectionMode.CELL:if(!this.doesColumnIndexSupportSelection(columnIndex))
+return;if(this.selectedColumnIndex!==columnIndex)
+this.selectedColumnIndex=columnIndex;case SelectionMode.ROW:if(this.selectedTableRowInfo_!==rowInfo)
+this.selectedTableRow=rowInfo.userRow;}},get selectedTableRow(){if(!this.selectedTableRowInfo_)
+return undefined;return this.selectedTableRowInfo_.userRow;},set selectedTableRow(userRow){this.rebuildIfNeeded_();if(this.selectionMode_===SelectionMode.NONE)
+throw new Error('Selection is off.');var rowInfo;if(userRow===undefined){rowInfo=undefined;}else{rowInfo=this.tableRowsInfo_.get(userRow);if(!rowInfo)
+throw new Error('Row has not been seen, must expand its parents.');}
+var e=this.prepareToChangeSelection_();this.selectedTableRowInfo_=rowInfo;if(this.selectedTableRowInfo_===undefined){this.selectedColumnIndex_=undefined;this.removeSelectedState_();}else{switch(this.selectionMode_){case SelectionMode.ROW:this.selectedColumnIndex_=undefined;break;case SelectionMode.CELL:if(this.selectedColumnIndex_===undefined){var i=this.getFirstSelectableColumnIndex_();if(i==-1)
+throw new Error('Cannot find a selectable column.');this.selectedColumnIndex_=i;}
+break;default:throw new Error('Invalid selection mode '+this.selectionMode_);}
+this.updateSelectedState_();}
+this.dispatchEvent(e);},updateTabIndexForTableRowNode_:function(row){if(this.selectionMode_===SelectionMode.ROW)
+row.tabIndex=0;else
+row.removeAttribute('tabIndex');var enableCellTab=this.selectionMode_===SelectionMode.CELL;for(var i=0;i<this.tableColumns_.length;i++){var cell=row.children[i];if(enableCellTab&&this.doesColumnIndexSupportSelection(i))
+cell.tabIndex=0;else
+cell.removeAttribute('tabIndex');}},prepareToChangeSelection_:function(){var e=new tr.b.Event('selection-changed');var previousSelectedRowInfo=this.selectedTableRowInfo_;if(previousSelectedRowInfo)
+e.previousSelectedTableRow=previousSelectedRowInfo.userRow;else
+e.previousSelectedTableRow=undefined;this.removeSelectedState_();return e;},removeSelectedState_:function(){this.setSelectedState_(false);},updateSelectedState_:function(){this.setSelectedState_(true);},setSelectedState_:function(select){if(this.selectedTableRowInfo_===undefined)
+return;var rowNode=this.selectedTableRowInfo_.htmlNode;if(select)
+rowNode.setAttribute('selected',true);else
+rowNode.removeAttribute('selected');var cellNode=rowNode.children[this.selectedColumnIndex_];if(!cellNode)
+return;if(select)
+cellNode.setAttribute('selected',true);else
+cellNode.removeAttribute('selected');},doesColumnIndexSupportSelection:function(columnIndex){var columnInfo=this.tableColumns_[columnIndex];var scs=columnInfo.supportsCellSelection;if(scs===false)
+return false;return true;},getFirstSelectableColumnIndex_:function(){for(var i=0;i<this.tableColumns_.length;i++){if(this.doesColumnIndexSupportSelection(i))
+return i;}
+return-1;},getSelectableNodeGivenTableRowNode_:function(htmlNode){switch(this.selectionMode_){case SelectionMode.ROW:return htmlNode;case SelectionMode.CELL:return htmlNode.children[this.selectedColumnIndex_];default:throw new Error('Invalid selection mode '+this.selectionMode_);}},get selectedColumnIndex(){if(this.selectionMode_!==SelectionMode.CELL)
+return undefined;return this.selectedColumnIndex_;},set selectedColumnIndex(selectedColumnIndex){this.rebuildIfNeeded_();if(this.selectionMode_===SelectionMode.NONE)
+throw new Error('Selection is off.');if(selectedColumnIndex<0||selectedColumnIndex>=this.tableColumns_.length)
+throw new Error('Invalid index');if(!this.doesColumnIndexSupportSelection(selectedColumnIndex))
+throw new Error('Selection is not supported on this column');var e=this.prepareToChangeSelection_();this.selectedColumnIndex_=selectedColumnIndex;if(this.selectedColumnIndex_===undefined)
+this.selectedTableRowInfo_=undefined;this.updateSelectedState_();this.dispatchEvent(e);},onKeyDown_:function(e){if(this.selectionMode_===SelectionMode.NONE)
+return;if(this.selectedTableRowInfo_===undefined)
+return;var code_to_command_names={13:'ENTER',37:'ARROW_LEFT',38:'ARROW_UP',39:'ARROW_RIGHT',40:'ARROW_DOWN'};var cmdName=code_to_command_names[e.keyCode];if(cmdName===undefined)
+return;e.stopPropagation();e.preventDefault();this.performKeyCommand_(cmdName);},performKeyCommand_:function(cmdName){this.rebuildIfNeeded_();var rowInfo=this.selectedTableRowInfo_;var htmlNode=rowInfo.htmlNode;if(cmdName==='ARROW_UP'){var prev=htmlNode.previousElementSibling;if(prev){tr.ui.b.scrollIntoViewIfNeeded(prev);this.selectedTableRow=prev.rowInfo.userRow;this.focusSelected_();return;}
+return;}
+if(cmdName==='ARROW_DOWN'){var next=htmlNode.nextElementSibling;if(next){tr.ui.b.scrollIntoViewIfNeeded(next);this.selectedTableRow=next.rowInfo.userRow;this.focusSelected_();return;}
+return;}
+if(cmdName==='ARROW_RIGHT'){switch(this.selectionMode_){case SelectionMode.ROW:if(rowInfo.userRow[this.subRowsPropertyName_]===undefined)
+return;if(rowInfo.userRow[this.subRowsPropertyName_].length===0)
+return;if(!rowInfo.isExpanded)
+this.setExpandedForTableRow(rowInfo.userRow,true);this.selectedTableRow=rowInfo.userRow[this.subRowsPropertyName_][0];this.focusSelected_();return;case SelectionMode.CELL:var newIndex=this.selectedColumnIndex_+1;if(newIndex>=this.tableColumns_.length)
+return;if(!this.doesColumnIndexSupportSelection(newIndex))
+return;this.selectedColumnIndex=newIndex;this.focusSelected_();return;default:throw new Error('Invalid selection mode '+this.selectionMode_);}}
+if(cmdName==='ARROW_LEFT'){switch(this.selectionMode_){case SelectionMode.ROW:if(rowInfo.isExpanded){this.setExpandedForTableRow(rowInfo.userRow,false);this.focusSelected_();return;}
+var parentRowInfo=rowInfo.parentRowInfo;if(parentRowInfo){this.selectedTableRow=parentRowInfo.userRow;this.focusSelected_();return;}
+return;case SelectionMode.CELL:var newIndex=this.selectedColumnIndex_-1;if(newIndex<0)
+return;if(!this.doesColumnIndexSupportSelection(newIndex))
+return;this.selectedColumnIndex=newIndex;this.focusSelected_();return;default:throw new Error('Invalid selection mode '+this.selectionMode_);}}
+if(cmdName==='ENTER'){if(rowInfo.userRow[this.subRowsPropertyName_]===undefined)
+return;if(rowInfo.userRow[this.subRowsPropertyName_].length===0)
+return;this.setExpandedForTableRow(rowInfo.userRow,!rowInfo.isExpanded);this.focusSelected_();return;}
+throw new Error('Unrecognized command '+cmdName);},focusSelected_:function(){if(!this.selectedTableRowInfo_)
+return;var node=this.getSelectableNodeGivenTableRowNode_(this.selectedTableRowInfo_.htmlNode);node.focus();},dispatchSortingChangedEvent_:function(){var e=new tr.b.Event('sort-column-changed');e.sortColumnIndex=this.sortColumnIndex_;e.sortDescending=this.sortDescending_;this.dispatchEvent(e);}});})();'use strict';Polymer('tr-ui-b-table-header-cell',{created:function(){this.tapCallback_=undefined;this.cellTitle_='';},set cellTitle(value){this.cellTitle_=value;var titleNode=tr.ui.b.asHTMLOrTextNode(this.cellTitle_,this.ownerDocument);this.$.title.innerText='';this.$.title.appendChild(titleNode);},get cellTitle(){return this.cellTitle_;},clearSideContent:function(){this.$.side.textContent='';},set sideContent(content){this.$.side.textContent=content;},get sideContent(){return this.$.side.textContent;},set tapCallback(callback){this.style.cursor='pointer';this.tapCallback_=callback;},get tapCallback(){return this.tapCallback_;},onTap_:function(){if(this.tapCallback_)
+this.tapCallback_();}});'use strict';Polymer('tr-ui-side-panel',{ready:function(){},get rangeOfInterest(){throw new Error('Not implemented');},set rangeOfInterest(rangeOfInterest){throw new Error('Not implemented');},get selection(){throw new Error('Not implemented');},set selection(selection){throw new Error('Not implemented');},get model(){throw new Error('Not implemented');},set model(model){throw new Error('Not implemented');},supportsModel:function(m){throw new Error('Not implemented');}});!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv("	","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';tr.exportTo('tr.ui.b',function(){var THIS_DOC=document.currentScript.ownerDocument;var svgNS='http://www.w3.org/2000/svg';var ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){var id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected)
+id+=ColorScheme.properties.brightenedOffsets[0];return ColorScheme.colorsAsStrings[id];}
+var ChartBase=tr.ui.b.define('svg',undefined,svgNS);ChartBase.prototype={__proto__:HTMLUnknownElement.prototype,decorate:function(){this.classList.add('chart-base');this.chartTitle_=undefined;this.seriesKeys_=undefined;this.width_=400;this.height_=300;var template=THIS_DOC.querySelector('#chart-base-template');var svgEl=template.content.querySelector('svg');for(var i=0;i<svgEl.children.length;i++)
+this.appendChild(svgEl.children[i].cloneNode(true));Object.defineProperty(this,'width',{get:function(){return this.width_;},set:function(width){this.width_=width;this.updateContents_();}});Object.defineProperty(this,'height',{get:function(){return this.height_;},set:function(height){this.height_=height;this.updateContents_();}});},get chartTitle(){return this.chartTitle_;},set chartTitle(chartTitle){this.chartTitle_=chartTitle;this.updateContents_();},get chartAreaElement(){return this.querySelector('#chart-area');},setSize:function(size){this.width_=size.width;this.height_=size.height;this.updateContents_();},getMargin_:function(){var margin={top:20,right:20,bottom:30,left:50};if(this.chartTitle_)
+margin.top+=20;return margin;},get margin(){return this.getMargin_();},get chartAreaSize(){var margin=this.margin;return{width:this.width_-margin.left-margin.right,height:this.height_-margin.top-margin.bottom};},getLegendKeys_:function(){throw new Error('Not implemented');},updateScales_:function(){throw new Error('Not implemented');},updateContents_:function(){var margin=this.margin;var thisSel=d3.select(this);thisSel.attr('width',this.width_);thisSel.attr('height',this.height_);var chartAreaSel=d3.select(this.chartAreaElement);chartAreaSel.attr('transform','translate('+margin.left+','+margin.top+')');this.updateScales_();this.updateTitle_(chartAreaSel);this.updateLegend_();},updateTitle_:function(chartAreaSel){var titleSel=chartAreaSel.select('#title');if(!this.chartTitle_){titleSel.style('display','none');return;}
+var width=this.chartAreaSize.width;titleSel.attr('transform','translate('+width*0.5+',-5)').style('display',undefined).style('text-anchor','middle').attr('class','title').attr('width',width).text(this.chartTitle_);},updateLegend_:function(){var keys=this.getLegendKeys_();if(keys===undefined)
+return;var chartAreaSel=d3.select(this.chartAreaElement);var chartAreaSize=this.chartAreaSize;var legendEntriesSel=chartAreaSel.selectAll('.legend').data(keys.slice().reverse());legendEntriesSel.enter().append('g').attr('class','legend').attr('transform',function(d,i){return'translate(0,'+i*20+')';}).append('text').text(function(key){return key;});legendEntriesSel.exit().remove();legendEntriesSel.attr('x',chartAreaSize.width-18).attr('width',18).attr('height',18).style('fill',function(key){var selected=this.currentHighlightedLegendKey===key;return getColorOfKey(key,selected);}.bind(this));legendEntriesSel.selectAll('text').attr('x',chartAreaSize.width-24).attr('y',9).attr('dy','.35em').style('text-anchor','end').text(function(d){return d;});},get highlightedLegendKey(){return this.highlightedLegendKey_;},set highlightedLegendKey(highlightedLegendKey){this.highlightedLegendKey_=highlightedLegendKey;this.updateHighlight_();},get currentHighlightedLegendKey(){if(this.tempHighlightedLegendKey_)
+return this.tempHighlightedLegendKey_;return this.highlightedLegendKey_;},pushTempHighlightedLegendKey:function(key){if(this.tempHighlightedLegendKey_)
+throw new Error('push cannot nest');this.tempHighlightedLegendKey_=key;this.updateHighlight_();},popTempHighlightedLegendKey:function(key){if(this.tempHighlightedLegendKey_!=key)
+throw new Error('pop cannot happen');this.tempHighlightedLegendKey_=undefined;this.updateHighlight_();},updateHighlight_:function(){var chartAreaSel=d3.select(this.chartAreaElement);var legendEntriesSel=chartAreaSel.selectAll('.legend');var that=this;legendEntriesSel.each(function(key){var highlighted=key==that.currentHighlightedLegendKey;var color=getColorOfKey(key,highlighted);this.style.fill=color;if(highlighted)
+this.style.fontWeight='bold';else
+this.style.fontWeight='';});}};return{getColorOfKey:getColorOfKey,ChartBase:ChartBase};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase=tr.ui.b.ChartBase;var ChartBase2D=tr.ui.b.define('chart-base-2d',ChartBase);ChartBase2D.prototype={__proto__:ChartBase.prototype,decorate:function(){ChartBase.prototype.decorate.call(this);this.classList.add('chart-base-2d');this.xScale_=d3.scale.linear();this.yScale_=d3.scale.linear();this.isYLogScale_=false;this.yLogScaleMin_=undefined;this.dataRange_=new tr.b.Range();this.data_=[];this.seriesKeys_=[];this.leftMargin_=50;d3.select(this.chartAreaElement).append('g').attr('id','brushes');d3.select(this.chartAreaElement).append('g').attr('id','series');this.addEventListener('mousedown',this.onMouseDown_.bind(this));},get data(){return this.data_;},set data(data){if(data===undefined)
+throw new Error('data must be an Array');this.data_=data;this.updateSeriesKeys_();this.updateDataRange_();this.updateContents_();},set isYLogScale(logScale){if(logScale)
+this.yScale_=d3.scale.log(10);else
+this.yScale_=d3.scale.linear();this.isYLogScale_=logScale;},getYScaleMin_:function(){return this.isYLogScale_?this.yLogScaleMin_:0;},getYScaleDomain_:function(minValue,maxValue){if(this.isYLogScale_)
+return[this.getYScaleMin_(),maxValue];return[Math.min(minValue,this.getYScaleMin_()),maxValue];},getSampleWidth_:function(data,index,leftSide){var leftIndex,rightIndex;if(leftSide){leftIndex=Math.max(index-1,0);rightIndex=index;}else{leftIndex=index;rightIndex=Math.min(index+1,data.length-1);}
+var leftWidth=this.getXForDatum_(data[index],index)-
+this.getXForDatum_(data[leftIndex],leftIndex);var rightWidth=this.getXForDatum_(data[rightIndex],rightIndex)-
+this.getXForDatum_(data[index],index);return leftWidth*0.5+rightWidth*0.5;},getLegendKeys_:function(){if(this.seriesKeys_&&this.seriesKeys_.length>1)
+return this.seriesKeys_.slice();return[];},updateSeriesKeys_:function(){var keySet={};this.data_.forEach(function(datum){Object.keys(datum).forEach(function(key){if(this.isDatumFieldSeries_(key))
+keySet[key]=true;},this);},this);this.seriesKeys_=Object.keys(keySet);},isDatumFieldSeries_:function(fieldName){throw new Error('Not implemented');},getXForDatum_:function(datum,index){throw new Error('Not implemented');},updateScales_:function(){if(this.data_.length===0)
+return;var width=this.chartAreaSize.width;var height=this.chartAreaSize.height;this.xScale_.range([0,width]);this.xScale_.domain(d3.extent(this.data_,this.getXForDatum_.bind(this)));var yRange=new tr.b.Range();this.data_.forEach(function(datum){this.seriesKeys_.forEach(function(key){if(datum[key]!==undefined)
+yRange.addValue(datum[key]);});},this);this.yScale_.range([height,0]);this.yScale_.domain([yRange.min,yRange.max]);},updateBrushContents_:function(brushSel){brushSel.selectAll('*').remove();},updateXAxis_:function(xAxis){xAxis.selectAll('*').remove();xAxis[0][0].style.opacity=0;xAxis.attr('transform','translate(0,'+this.chartAreaSize.height+')').call(d3.svg.axis().scale(this.xScale_).orient('bottom'));window.requestAnimationFrame(function(){var previousRight=undefined;xAxis.selectAll('.tick')[0].forEach(function(tick){var currentLeft=tick.transform.baseVal[0].matrix.e;if((previousRight===undefined)||(currentLeft>(previousRight+3))){var currentWidth=tick.getBBox().width;previousRight=currentLeft+currentWidth;}else{tick.style.opacity=0;}});xAxis[0][0].style.opacity=1;});},getMargin_:function(){var margin=ChartBase.prototype.getMargin_.call(this);margin.left=this.leftMargin_;return margin;},updateDataRange_:function(){var dataBySeriesKey=this.getDataBySeriesKey_();this.dataRange_.reset();tr.b.iterItems(dataBySeriesKey,function(series,values){for(var i=0;i<values.length;i++){this.dataRange_.addValue(values[i][series]);}},this);this.yLogScaleMin_=undefined;if(this.dataRange_.min!==undefined){var minValue=this.dataRange_.min;if(minValue==0)
+minValue=1;var onePowerLess=Math.floor(Math.log(minValue)/Math.log(10))-1;this.yLogScaleMin_=Math.pow(10,onePowerLess);}},updateYAxis_:function(yAxis){yAxis.selectAll('*').remove();yAxis[0][0].style.opacity=0;var axisModifier=d3.svg.axis().scale(this.yScale_).orient('left');if(this.isYLogScale_){if(this.yLogScaleMin_===undefined)
+return;var minValue=this.dataRange_.min;if(minValue==0)
+minValue=1;var largestPower=Math.ceil(Math.log(this.dataRange_.max)/Math.log(10))+1;var smallestPower=Math.floor(Math.log(minValue)/Math.log(10));var tickValues=[];for(var i=smallestPower;i<largestPower;i++){tickValues.push(Math.pow(10,i));}
+axisModifier=axisModifier.tickValues(tickValues).tickFormat(function(d){return d;});}
+yAxis.call(axisModifier);window.requestAnimationFrame(function(){var previousTop=undefined;var leftMargin=0;yAxis.selectAll('.tick')[0].forEach(function(tick){var bbox=tick.getBBox();leftMargin=Math.max(leftMargin,bbox.width);var currentTop=tick.transform.baseVal[0].matrix.f;var currentBottom=currentTop+bbox.height;if((previousTop===undefined)||(previousTop>(currentBottom+3))){previousTop=currentTop;}else{tick.style.opacity=0;}});if(leftMargin>this.leftMargin_){this.leftMargin_=leftMargin;this.updateContents_();}else{yAxis[0][0].style.opacity=1;}}.bind(this));},updateContents_:function(){ChartBase.prototype.updateContents_.call(this);var chartAreaSel=d3.select(this.chartAreaElement);this.updateXAxis_(chartAreaSel.select('.x.axis'));this.updateYAxis_(chartAreaSel.select('.y.axis'));this.updateBrushContents_(chartAreaSel.select('#brushes'));this.updateDataContents_(chartAreaSel.select('#series'));},updateDataContents_:function(seriesSel){throw new Error('Not implemented');},getDataBySeriesKey_:function(){var dataBySeriesKey={};this.seriesKeys_.forEach(function(seriesKey){dataBySeriesKey[seriesKey]=[];});this.data_.forEach(function(multiSeriesDatum,index){var x=this.getXForDatum_(multiSeriesDatum,index);d3.keys(multiSeriesDatum).forEach(function(seriesKey){if(seriesKey==='x')
+return;if(multiSeriesDatum[seriesKey]===undefined)
+return;if(!this.isDatumFieldSeries_(seriesKey))
+return;var singleSeriesDatum={x:x};singleSeriesDatum[seriesKey]=multiSeriesDatum[seriesKey];dataBySeriesKey[seriesKey].push(singleSeriesDatum);},this);},this);return dataBySeriesKey;},getDataPointAtClientPoint_:function(clientX,clientY){var rect=this.getBoundingClientRect();var margin=this.margin;var x=clientX-rect.left-margin.left;var y=clientY-rect.top-margin.top;x=this.xScale_.invert(x);y=this.yScale_.invert(y);x=tr.b.clamp(x,this.xScale_.domain()[0],this.xScale_.domain()[1]);y=tr.b.clamp(y,this.yScale_.domain()[0],this.yScale_.domain()[1]);return{x:x,y:y};},prepareDataEvent_:function(mouseEvent,dataEvent){var dataPoint=this.getDataPointAtClientPoint_(mouseEvent.clientX,mouseEvent.clientY);dataEvent.x=dataPoint.x;dataEvent.y=dataPoint.y;},onMouseDown_:function(mouseEvent){tr.ui.b.trackMouseMovesUntilMouseUp(this.onMouseMove_.bind(this,mouseEvent.button),this.onMouseUp_.bind(this,mouseEvent.button));mouseEvent.preventDefault();mouseEvent.stopPropagation();var dataEvent=new tr.b.Event('item-mousedown');dataEvent.button=mouseEvent.button;this.classList.add('updating-brushing-state');this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);},onMouseMove_:function(button,mouseEvent){if(mouseEvent.buttons!==undefined){mouseEvent.preventDefault();mouseEvent.stopPropagation();}
+var dataEvent=new tr.b.Event('item-mousemove');dataEvent.button=button;this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);},onMouseUp_:function(button,mouseEvent){mouseEvent.preventDefault();mouseEvent.stopPropagation();var dataEvent=new tr.b.Event('item-mouseup');dataEvent.button=button;this.prepareDataEvent_(mouseEvent,dataEvent);this.dispatchEvent(dataEvent);this.classList.remove('updating-brushing-state');}};return{ChartBase2D:ChartBase2D};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase2D=tr.ui.b.ChartBase2D;var ChartBase2DBrushX=tr.ui.b.define('chart-base-2d-brush-1d',ChartBase2D);ChartBase2DBrushX.prototype={__proto__:ChartBase2D.prototype,decorate:function(){ChartBase2D.prototype.decorate.call(this);this.brushedRange_=new tr.b.Range();},set brushedRange(range){this.brushedRange_.reset();this.brushedRange_.addRange(range);this.updateContents_();},computeBrushRangeFromIndices:function(indexA,indexB){indexA=tr.b.clamp(indexA,0,this.data_.length-1);indexB=tr.b.clamp(indexB,0,this.data_.length-1);var leftIndex=Math.min(indexA,indexB);var rightIndex=Math.max(indexA,indexB);var r=new tr.b.Range();r.addValue(this.getXForDatum_(this.data_[leftIndex],leftIndex)-
+this.getSampleWidth_(this.data_,leftIndex,true));r.addValue(this.getXForDatum_(this.data_[rightIndex],rightIndex)+
+this.getSampleWidth_(this.data_,rightIndex,false));return r;},getDataIndex_:function(dataX){if(!this.data_)
+return undefined;var bisect=d3.bisector(this.getXForDatum_.bind(this)).right;return bisect(this.data_,dataX)-1;},prepareDataEvent_:function(mouseEvent,dataEvent){ChartBase2D.prototype.prepareDataEvent_.call(this,mouseEvent,dataEvent);dataEvent.index=this.getDataIndex_(dataEvent.x);if(dataEvent.index!==undefined)
+dataEvent.data=this.data_[dataEvent.index];},updateBrushContents_:function(brushSel){brushSel.selectAll('*').remove();var brushes=this.brushedRange_.isEmpty?[]:[this.brushedRange_];var brushRectsSel=brushSel.selectAll('rect').data(brushes);brushRectsSel.enter().append('rect');brushRectsSel.exit().remove();brushRectsSel.attr('x',function(d){return this.xScale_(d.min);}.bind(this)).attr('y',0).attr('width',function(d){return this.xScale_(d.max)-this.xScale_(d.min);}.bind(this)).attr('height',this.chartAreaSize.height);}};return{ChartBase2DBrushX:ChartBase2DBrushX};});'use strict';tr.exportTo('tr.ui.b',function(){var ChartBase2DBrushX=tr.ui.b.ChartBase2DBrushX;var LineChart=tr.ui.b.define('line-chart',ChartBase2DBrushX);LineChart.prototype={__proto__:ChartBase2DBrushX.prototype,decorate:function(){ChartBase2DBrushX.prototype.decorate.call(this);this.classList.add('line-chart');},isDatumFieldSeries_:function(fieldName){return fieldName!='x';},getXForDatum_:function(datum,index){return datum.x;},updateDataContents_:function(dataSel){dataSel.selectAll('*').remove();var dataBySeriesKey=this.getDataBySeriesKey_();var pathsSel=dataSel.selectAll('path').data(this.seriesKeys_);pathsSel.enter().append('path').attr('class','line').style('stroke',function(key){return tr.ui.b.getColorOfKey(key);}).attr('d',function(key){var line=d3.svg.line().x(function(d){return this.xScale_(d.x);}.bind(this)).y(function(d){return this.yScale_(d[key]);}.bind(this));return line(dataBySeriesKey[key]);}.bind(this));pathsSel.exit().remove();}};return{LineChart:LineChart};});'use strict';Polymer('tr-ui-e-s-alerts-side-panel',{ready:function(){this.rangeOfInterest_=new tr.b.Range();this.selection_=undefined;},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();},set selection(selection){},set rangeOfInterest(rangeOfInterest){},selectAlertsOfType:function(alertTypeString){var alertsOfType=this.model_.alerts.filter(function(alert){return alert.title===alertTypeString;});var event=new tr.model.RequestSelectionChangeEvent();event.selection=new tr.model.EventSet(alertsOfType);this.dispatchEvent(event);},alertsByType_:function(alerts){var alertsByType={};alerts.forEach(function(alert){if(!alertsByType[alert.title])
+alertsByType[alert.title]=[];alertsByType[alert.title].push(alert);});return alertsByType;},alertsTableRows_:function(alertsByType){return Object.keys(alertsByType).map(function(key){return{alertType:key,count:alertsByType[key].length};});},alertsTableColumns_:function(){return[{title:'Alert type',value:function(row){return row.alertType;},width:'180px'},{title:'Count',width:'100%',value:function(row){return row.count;}}];},createAlertsTable_:function(alerts){var alertsByType=this.alertsByType_(alerts);var table=document.createElement('tr-ui-b-table');table.tableColumns=this.alertsTableColumns_();table.tableRows=this.alertsTableRows_(alertsByType);table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;table.addEventListener('selection-changed',function(e){var row=table.selectedTableRow;if(row)
+this.selectAlertsOfType(row.alertType);}.bind(this));return table;},updateContents_:function(){this.$.result_area.textContent='';if(this.model_===undefined)
+return;var panel=this.createAlertsTable_(this.model_.alerts);this.$.result_area.appendChild(panel);},supportsModel:function(m){if(m==undefined){return{supported:false,reason:'Unknown tracing model'};}else if(m.alerts.length===0){return{supported:false,reason:'No alerts in tracing model'};}
+return{supported:true};},get textLabel(){return'Alerts';}});'use strict';tr.exportTo('tr.c',function(){function ScriptingObject(){}
+ScriptingObject.prototype={onModelChanged:function(model){}};return{ScriptingObject:ScriptingObject};});'use strict';tr.exportTo('tr.c',function(){function ScriptingController(brushingStateController){this.brushingStateController_=brushingStateController;this.scriptObjectNames_=[];this.scriptObjectValues_=[];this.brushingStateController.addEventListener('model-changed',this.onModelChanged_.bind(this));var typeInfos=ScriptingObjectRegistry.getAllRegisteredTypeInfos();typeInfos.forEach(function(typeInfo){this.addScriptObject(typeInfo.metadata.name,typeInfo.constructor);global[typeInfo.metadata.name]=typeInfo.constructor;},this);}
+function ScriptingObjectRegistry(){}
+var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);tr.b.decorateExtensionRegistry(ScriptingObjectRegistry,options);ScriptingController.prototype={get brushingStateController(){return this.brushingStateController_;},onModelChanged_:function(){this.scriptObjectValues_.forEach(function(v){if(v.onModelChanged)
+v.onModelChanged(this.brushingStateController.model);},this);},addScriptObject:function(name,value){this.scriptObjectNames_.push(name);this.scriptObjectValues_.push(value);},executeCommand:function(command){var f=new Function(this.scriptObjectNames_,'return eval('+command+')');return f.apply(null,this.scriptObjectValues_);}};return{ScriptingController:ScriptingController,ScriptingObjectRegistry:ScriptingObjectRegistry};});'use strict';tr.exportTo('tr.metrics',function(){function MetricRegistry(){}
+var options=new tr.b.ExtensionRegistryOptions(tr.b.BASIC_REGISTRY_MODE);options.defaultMetadata={};options.mandatoryBaseClass=Function;tr.b.decorateExtensionRegistry(MetricRegistry,options);return{MetricRegistry:MetricRegistry};});'use strict';tr.exportTo('tr.v',function(){function Value(canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics){if(typeof(name)!=='string')
+throw new Error('Expected value_name grouping key to be provided');this.groupingKeys=opt_groupingKeys||{};this.groupingKeys.name=name;this.diagnostics=opt_diagnostics||{};this.diagnostics.canonical_url=canonicalUrl;var options=opt_options||{};this.description=options.description;this.important=options.important!==undefined?options.important:false;}
+Value.fromDict=function(d){if(d.type==='numeric')
+return NumericValue.fromDict(d);if(d.type==='dict')
+return DictValue.fromDict(d);if(d.type=='failure')
+return FailureValue.fromDict(d);if(d.type==='skip')
+return SkipValue.fromDict(d);throw new Error('Not implemented');};Value.prototype={get name(){return this.groupingKeys.name;},get canonicalUrl(){return this.diagnostics.canonical_url;},addGroupingKey:function(keyName,key){if(this.groupingKeys.hasOwnProperty(keyName))
+throw new Error('Tried to redefine grouping key '+keyName);this.groupingKeys[keyName]=key;},asDict:function(){return this.asJSON();},asJSON:function(){var d={grouping_keys:this.groupingKeys,description:this.description,important:this.important,diagnostics:this.diagnostics};this._asDictInto(d);if(d.type===undefined)
+throw new Error('_asDictInto must set type field');return d;},_asDictInto:function(d){throw new Error('Not implemented');}};function NumericValue(canonicalUrl,name,numeric,opt_options,opt_groupingKeys,opt_diagnostics){if(!(numeric instanceof tr.v.NumericBase))
+throw new Error('Expected numeric to be instance of tr.v.NumericBase');Value.call(this,canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics);this.numeric=numeric;}
+NumericValue.fromDict=function(d){if(d.numeric===undefined)
+throw new Error('Expected numeric to be provided');var numeric=tr.v.NumericBase.fromDict(d.numeric);return new NumericValue(d.diagnostics.canonical_url,d.grouping_keys.name,numeric,d,d.grouping_keys,d.diagnostics);};NumericValue.prototype={__proto__:Value.prototype,_asDictInto:function(d){d.type='numeric';d.numeric=this.numeric.asDict();}};function DictValue(canonicalUrl,name,value,opt_options,opt_groupingKeys,opt_diagnostics){Value.call(this,canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics);this.value=value;}
+DictValue.fromDict=function(d){if(d.units!==undefined)
+throw new Error('Expected units to be undefined');if(d.value===undefined)
+throw new Error('Expected value to be provided');return new DictValue(d.diagnostics.canonical_url,d.grouping_keys.name,d.value,d,d.groupingKeys,d.diagnostics);};DictValue.prototype={__proto__:Value.prototype,_asDictInto:function(d){d.type='dict';d.value=this.value;}};function FailureValue(canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics){var options=opt_options||{};var stack;if(options.stack===undefined){if(options.stack_str===undefined){throw new Error('Expected stack_str or stack to be provided');}else{stack=options.stack_str;}}else{stack=options.stack;}
+if(typeof stack!=='string')
+throw new Error('stack must be provided as a string');if(canonicalUrl===undefined){throw new Error('FailureValue must provide canonicalUrl');}
+Value.call(this,canonicalUrl,name,options,opt_groupingKeys,opt_diagnostics);this.stack=stack;}
+FailureValue.fromError=function(canonicalUrl,e){var ex=tr.b.normalizeException(e);return new FailureValue(canonicalUrl,ex.typeName,{description:ex.message,stack:ex.stack});};FailureValue.fromDict=function(d){if(d.units!==undefined)
+throw new Error('Expected units to be undefined');if(d.stack_str===undefined)
+throw new Error('Expected stack_str to be provided');return new FailureValue(d.diagnostics.canonical_url,d.grouping_keys.name,d,d.grouping_keys,d.diagnostics);};FailureValue.prototype={__proto__:Value.prototype,_asDictInto:function(d){d.type='failure';d.stack_str=this.stack;}};function SkipValue(canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics){Value.call(this,canonicalUrl,name,opt_options,opt_groupingKeys,opt_diagnostics);}
+SkipValue.fromDict=function(d){if(d.units!==undefined)
+throw new Error('Expected units to be undefined');return new SkipValue(d.diagnostics.canonical_url,d.grouping_keys.name,d,d.grouping_keys,d.diagnostics);};SkipValue.prototype={__proto__:Value.prototype,_asDictInto:function(d){d.type='skip';}};return{Value:Value,NumericValue:NumericValue,DictValue:DictValue,FailureValue:FailureValue,SkipValue:SkipValue};});'use strict';tr.exportTo('tr.metrics',function(){function sampleMetric(valueList,model){var unit=tr.v.Unit.byName.sizeInBytes_smallerIsBetter;var n1=new tr.v.ScalarNumeric(unit,1);var n2=new tr.v.ScalarNumeric(unit,2);valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'foo',n1));valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'bar',n2));}
+sampleMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(sampleMetric);return{sampleMetric:sampleMetric};});'use strict';tr.exportTo('tr.metrics.sh',function(){function perceptualBlend(ir,index,score){return Math.exp(1-score);}
+function filterExpectationsByRange(irs,opt_range){var filteredExpectations=[];irs.forEach(function(ir){if(!(ir instanceof tr.model.um.UserExpectation))
+return;if(!opt_range||opt_range.intersectsExplicitRangeExclusive(ir.start,ir.end))
+filteredExpectations.push(ir);});return filteredExpectations;}
+return{perceptualBlend:perceptualBlend,filterExpectationsByRange:filterExpectationsByRange};});'use strict';tr.exportTo('tr.metrics.sh',function(){var UNIT=tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;var DESCRIPTION='Normalized CPU budget consumption';function EfficiencyMetric(valueList,model){var scores=[];model.userModel.expectations.forEach(function(ue){var options={};options.description=DESCRIPTION;var groupingKeys={};groupingKeys.userExpectationStableId=ue.stableId;groupingKeys.userExpectationStageTitle=ue.stageTitle;groupingKeys.userExpectationInitiatorTitle=ue.initiatorTitle;var score=undefined;if((ue.totalCpuMs===undefined)||(ue.totalCpuMs==0))
+return;var cpuFractionBudget=tr.b.Range.fromExplicitRange(0.5,1.5);if(ue instanceof tr.model.um.IdleExpectation){cpuFractionBudget=tr.b.Range.fromExplicitRange(0.1,1);}else if(ue instanceof tr.model.um.AnimationExpectation){cpuFractionBudget=tr.b.Range.fromExplicitRange(1,2);}
+var cpuMsBudget=tr.b.Range.fromExplicitRange(ue.duration*cpuFractionBudget.min,ue.duration*cpuFractionBudget.max);var normalizedCpu=tr.b.normalize(ue.totalCpuMs,cpuMsBudget.min,cpuMsBudget.max);score=1-tr.b.clamp(normalizedCpu,0,1);scores.push(score);valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'efficiency',new tr.v.ScalarNumeric(UNIT,score),options,groupingKeys));});var options={};options.description=DESCRIPTION;var groupingKeys={};var overallScore=tr.b.Statistics.weightedMean(scores,tr.metrics.sh.perceptualBlend);if(overallScore===undefined)
+return;valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'efficiency',new tr.v.ScalarNumeric(UNIT,overallScore),options,groupingKeys));}
+EfficiencyMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(EfficiencyMetric);return{EfficiencyMetric:EfficiencyMetric};});'use strict';tr.exportTo('tr.metrics.sh',function(){var MIN_DISCREPANCY=0.05;var MAX_DISCREPANCY=0.3;var UNIT=tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;var DESCRIPTION='Mean Opinion Score for Animation smoothness';function AnimationSmoothnessMetric(valueList,model){model.userModel.expectations.forEach(function(ue){if(!(ue instanceof tr.model.um.AnimationExpectation))
+return;if(ue.frameEvents===undefined||ue.frameEvents.length===0)
+throw new Error('Animation missing frameEvents '+ue.stableId);var options={};options.description=DESCRIPTION;var groupingKeys={};groupingKeys.userExpectationStableId=ue.stableId;groupingKeys.userExpectationStageTitle=ue.stageTitle;groupingKeys.userExpectationInitiatorTitle=ue.initiatorTitle;var frameTimestamps=ue.frameEvents.toArray().map(function(event){return event.start;});var absolute=false;var discrepancy=tr.b.Statistics.timestampsDiscrepancy(frameTimestamps,absolute);var smoothness=1-tr.b.normalize(discrepancy,MIN_DISCREPANCY,MAX_DISCREPANCY);var score=tr.b.clamp(smoothness,0,1);valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'smoothness',new tr.v.ScalarNumeric(UNIT,score),options,groupingKeys));});}
+AnimationSmoothnessMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(AnimationSmoothnessMetric);return{AnimationSmoothnessMetric:AnimationSmoothnessMetric};});'use strict';tr.exportTo('tr.metrics.sh',function(){var MAX_FPS=60;var MIN_FPS=10;var UNIT=tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;var DESCRIPTION='Mean Opinion Score for Animation throughput';function AnimationThroughputMetric(valueList,model){model.userModel.expectations.forEach(function(ue){if(!(ue instanceof tr.model.um.AnimationExpectation))
+return;if(ue.frameEvents===undefined||ue.frameEvents.length===0)
+throw new Error('Animation missing frameEvents '+ue.stableId);var options={};options.description=DESCRIPTION;var groupingKeys={};groupingKeys.userExpectationStableId=ue.stableId;groupingKeys.userExpectationStageTitle=ue.stageTitle;groupingKeys.userExpectationInitiatorTitle=ue.initiatorTitle;var durationSeconds=ue.duration/1000;var avgSpf=durationSeconds/ue.frameEvents.length;var throughput=1-tr.b.normalize(avgSpf,1/MAX_FPS,1/MIN_FPS);var score=tr.b.clamp(throughput,0,1);valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'throughput',new tr.v.ScalarNumeric(UNIT,score),options,groupingKeys));});}
+AnimationThroughputMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(AnimationThroughputMetric);return{AnimationThroughputMetric:AnimationThroughputMetric};});'use strict';tr.exportTo('tr.metrics.sh',function(){var RESPONSE_HISTOGRAM=tr.v.Numeric.fromDict({unit:'unitless',min:150,max:5000,centralBinWidth:485,underflowBin:{min:-Number.MAX_VALUE,max:150,count:1000},centralBins:[{min:150,max:635,count:708},{min:635,max:1120,count:223},{min:1120,max:1605,count:50},{min:1605,max:2090,count:33},{min:2090,max:2575,count:23},{min:2575,max:3060,count:17},{min:3060,max:3545,count:12},{min:3545,max:4030,count:8},{min:4030,max:4515,count:4},{min:4515,max:5000,count:1}],overflowBin:{min:5000,max:Number.MAX_VALUE,count:0}});var FAST_RESPONSE_HISTOGRAM=tr.v.Numeric.fromDict({unit:'unitless',min:66,max:2200,centralBinWidth:214,underflowBin:{min:-Number.MAX_VALUE,max:66,count:1000},centralBins:[{min:66,max:280,count:708},{min:280,max:493,count:223},{min:493,max:706,count:50},{min:706,max:920,count:33},{min:920,max:1133,count:23},{min:1133,max:1346,count:17},{min:1346,max:1560,count:12},{min:1560,max:1773,count:8},{min:1773,max:1987,count:4},{min:1987,max:2200,count:1}],overflowBin:{min:2200,max:Number.MAX_VALUE,count:0}});var LOAD_HISTOGRAM=tr.v.Numeric.fromDict({unit:'unitless',min:1000,max:60000,centralBinWidth:5900,underflowBin:{min:-Number.MAX_VALUE,max:1000,count:1000},centralBins:[{min:1000,max:6900,count:901},{min:6900,max:12800,count:574},{min:12800,max:18700,count:298},{min:18700,max:24600,count:65},{min:24600,max:30500,count:35},{min:30500,max:36400,count:23},{min:36400,max:42300,count:16},{min:42300,max:48200,count:10},{min:48200,max:54100,count:5},{min:54100,max:60000,count:2}],overflowBin:{min:60000,max:Number.MAX_VALUE,count:0}});var UNIT=tr.v.Unit.byName.normalizedPercentage_biggerIsBetter;var DESCRIPTION=('For Load and Response, Mean Opinion Score of completion time; '+'For Animation, perceptual blend of Mean Opinion Scores of '+'throughput and smoothness');function getDurationScore(histogram,duration){return histogram.getInterpolatedCountAt(duration)/histogram.maxCount;}
+function ResponsivenessMetric(valueList,model){tr.metrics.sh.AnimationThroughputMetric(valueList,model);tr.metrics.sh.AnimationSmoothnessMetric(valueList,model);var throughputForAnimation={};var smoothnessForAnimation={};valueList.valueDicts.forEach(function(value){if((value.type!=='numeric')||(value.numeric.type!=='scalar'))
+return;var ue=value.grouping_keys.userExpectationStableId;if(value.grouping_keys.name==='throughput')
+throughputForAnimation[ue]=value.numeric.value;if(value.grouping_keys.name==='smoothness')
+smoothnessForAnimation[ue]=value.numeric.value;});var scores=[];model.userModel.expectations.forEach(function(ue){var score=undefined;if(ue instanceof tr.model.um.IdleExpectation){return;}else if(ue instanceof tr.model.um.LoadExpectation){score=getDurationScore(LOAD_HISTOGRAM,ue.duration);}else if(ue instanceof tr.model.um.ResponseExpectation){var histogram=RESPONSE_HISTOGRAM;if(ue.isAnimationBegin)
+histogram=FAST_RESPONSE_HISTOGRAM;score=getDurationScore(histogram,ue.duration);}else if(ue instanceof tr.model.um.AnimationExpectation){var throughput=throughputForAnimation[ue.stableId];var smoothness=smoothnessForAnimation[ue.stableId];if(throughput===undefined)
+throw new Error('Missing throughput for '+ue.stableId);if(smoothness===undefined)
+throw new Error('Missing smoothness for '+ue.stableId);score=tr.b.Statistics.weightedMean([throughput,smoothness],tr.metrics.sh.perceptualBlend);}else{throw new Error('Unrecognized stage for '+ue.stableId);}
+if(score===undefined)
+throw new Error('Failed to compute responsiveness for '+ue.stableId);scores.push(score);var options={};options.description=DESCRIPTION;var groupingKeys={};groupingKeys.userExpectationStableId=ue.stableId;groupingKeys.userExpectationStageTitle=ue.stageTitle;groupingKeys.userExpectationInitiatorTitle=ue.initiatorTitle;valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'responsiveness',new tr.v.ScalarNumeric(UNIT,score),options,groupingKeys));});var options={};options.description=DESCRIPTION;var groupingKeys={};var overallScore=tr.b.Statistics.weightedMean(scores,tr.metrics.sh.perceptualBlend);if(overallScore===undefined)
+return;valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'responsiveness',new tr.v.ScalarNumeric(UNIT,overallScore),options,groupingKeys));}
+ResponsivenessMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(ResponsivenessMetric);return{ResponsivenessMetric:ResponsivenessMetric};});'use strict';tr.exportTo('tr.metrics.sh',function(){function SystemHealthMetrics(valueList,model){tr.metrics.sh.ResponsivenessMetric(valueList,model);tr.metrics.sh.EfficiencyMetric(valueList,model);}
+SystemHealthMetrics.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(SystemHealthMetrics);return{SystemHealthMetrics:SystemHealthMetrics};});'use strict';tr.exportTo('tr.metrics',function(){function tracingMetric(valueList,model){if(!model.stats.hasEventSizesinBytes){throw new Error('Model stats does not have event size information. '+'Please enable ImportOptions.trackDetailedModelStats.');}
+var eventStats=model.stats.allTraceEventStatsInTimeIntervals;eventStats.sort(function(a,b){return a.timeInterval-b.timeInterval;});var maxEventCountPerSec=0;var maxEventBytesPerSec=0;var totalTraceBytes=0;var WINDOW_SIZE=Math.floor(1000/model.stats.TIME_INTERVAL_SIZE_IN_MS);var runningEventNumPerSec=0;var runningEventBytesPerSec=0;var start=0;var end=0;while(end<eventStats.length){var startEventStats=eventStats[start];var endEventStats=eventStats[end];var timeWindow=endEventStats.timeInterval-startEventStats.timeInterval;if(timeWindow>=WINDOW_SIZE){runningEventNumPerSec-=startEventStats.numEvents;runningEventBytesPerSec-=startEventStats.totalEventSizeinBytes;start++;continue;}
+runningEventNumPerSec+=endEventStats.numEvents;if(maxEventCountPerSec<runningEventNumPerSec)
+maxEventCountPerSec=runningEventNumPerSec;runningEventBytesPerSec+=endEventStats.totalEventSizeinBytes;if(maxEventBytesPerSec<runningEventBytesPerSec)
+maxEventBytesPerSec=runningEventBytesPerSec;totalTraceBytes+=endEventStats.totalEventSizeinBytes;end++;}
+var stats=model.stats.allTraceEventStats;var categoryStatsMap=new Map();var categoryStats=[];for(var i=0;i<stats.length;i++){var categoryStat=categoryStatsMap.get(stats[i].category);if(categoryStat===undefined){categoryStat={category:stats[i].category,totalEventSizeinBytes:0};categoryStatsMap.set(stats[i].category,categoryStat);categoryStats.push(categoryStat);}
+categoryStat.totalEventSizeinBytes+=stats[i].totalEventSizeinBytes;}
+var maxCategoryStats=categoryStats.reduce(function(a,b){return a.totalEventSizeinBytes<b.totalEventSizeinBytes?b:a;});var maxEventBytesPerCategory=maxCategoryStats.totalEventSizeinBytes;var maxCategoryName=maxCategoryStats.category;var maxEventCountPerSecValue=new tr.v.ScalarNumeric(tr.v.Unit.byName.unitlessNumber_smallerIsBetter,maxEventCountPerSec);var maxEventBytesPerSecValue=new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes_smallerIsBetter,maxEventBytesPerSec);var totalTraceBytesValue=new tr.v.ScalarNumeric(tr.v.Unit.byName.sizeInBytes_smallerIsBetter,totalTraceBytes);var diagnostics={category_with_max_event_size:{name:maxCategoryName,size_in_bytes:maxEventBytesPerCategory}};valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'Total trace size in bytes',totalTraceBytesValue,undefined,undefined,diagnostics));valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'Max number of events per second',maxEventCountPerSecValue,undefined,undefined,diagnostics));valueList.addValue(new tr.v.NumericValue(model.canonicalUrlThatCreatedThisTrace,'Max event size in bytes per second',maxEventBytesPerSecValue,undefined,undefined,diagnostics));}
+tracingMetric.prototype={__proto__:Function.prototype};tr.metrics.MetricRegistry.register(tracingMetric);return{tracingMetric:tracingMetric};});'use strict';Polymer('tr-ui-a-sub-view',{set tabLabel(label){return this.setAttribute('tab-label',label);},get tabLabel(){return this.getAttribute('tab-label');},get requiresTallView(){return false;},get relatedEventsToHighlight(){return undefined;},set selection(selection){throw new Error('Not implemented!');},get selection(){throw new Error('Not implemented!');}});'use strict';Polymer('tr-ui-a-alert-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=[{title:'Label',value:function(row){return row.name;},width:'150px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];this.$.table.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},getRowsForSingleAlert_:function(alert){var rows=[];for(var argName in alert.args){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=alert.args[argName];rows.push({name:argName,value:argView});}
+if(alert.associatedEvents.length){alert.associatedEvents.forEach(function(event,i){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return event;},event.title);var valueString='';if(event instanceof tr.model.TimedEvent)
+valueString='took '+event.duration.toFixed(2)+'ms';rows.push({name:linkEl,value:valueString});});}
+var descriptionEl=tr.ui.b.createDiv({textContent:alert.info.description,maxWidth:'300px'});rows.push({name:'Description',value:descriptionEl});if(alert.info.docLinks){alert.info.docLinks.forEach(function(linkObject){var linkEl=document.createElement('a');linkEl.target='_blank';linkEl.href=linkObject.href;linkEl.textContent=linkObject.textContent;rows.push({name:linkObject.label,value:linkEl});});}
+return rows;},getRowsForAlerts_:function(alerts){if(alerts.length==1){var rows=[{name:'Alert',value:alerts[0].title}];var detailRows=this.getRowsForSingleAlert_(alerts[0]);rows.push.apply(rows,detailRows);return rows;}else{return alerts.map(function(alert){return{name:'Alert',value:alert.title,isExpanded:alerts.size<10,subRows:this.getRowsForSingleAlert_(alert)};},this);}},updateContents_:function(){if(this.currentSelection_===undefined){this.$.table.rows=[];this.$.table.rebuild();return;}
+var alerts=this.currentSelection_;this.$.table.tableRows=this.getRowsForAlerts_(alerts);this.$.table.rebuild();},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';tr.exportTo('tr.ui.b',function(){var EventSet=tr.model.EventSet;var SelectionState=tr.model.SelectionState;function BrushingState(){this.guid_=tr.b.GUID.allocate();this.selection_=new EventSet();this.findMatches_=new EventSet();this.analysisViewRelatedEvents_=new EventSet();this.analysisLinkHoveredEvents_=new EventSet();this.appliedToModel_=undefined;this.viewSpecificBrushingStates_={};}
+BrushingState.prototype={get guid(){return this.guid_;},clone:function(){var that=new BrushingState();that.selection_=this.selection_;that.findMatches_=this.findMatches_;that.analysisViewRelatedEvents_=this.analysisViewRelatedEvents_;that.analysisLinkHoveredEvents_=this.analysisLinkHoveredEvents_;that.viewSpecificBrushingStates_=this.viewSpecificBrushingStates_;return that;},equals:function(that){if(!this.selection_.equals(that.selection_))
+return false;if(!this.findMatches_.equals(that.findMatches_))
+return false;if(!this.analysisViewRelatedEvents_.equals(that.analysisViewRelatedEvents_)){return false;}
+if(!this.analysisLinkHoveredEvents_.equals(that.analysisLinkHoveredEvents_)){return false;}
+return true;},get selectionOfInterest(){if(this.selection_.length)
+return this.selection_;if(this.highlight_.length)
+return this.highlight_;if(this.analysisViewRelatedEvents_.length)
+return this.analysisViewRelatedEvents_;if(this.analysisLinkHoveredEvents_.length)
+return this.analysisLinkHoveredEvents_;return this.selection_;},get selection(){return this.selection_;},set selection(selection){if(this.appliedToModel_)
+throw new Error('Cannot mutate this state right now');if(selection===undefined)
+selection=new EventSet();this.selection_=selection;},get findMatches(){return this.findMatches_;},set findMatches(findMatches){if(this.appliedToModel_)
+throw new Error('Cannot mutate this state right now');if(findMatches===undefined)
+findMatches=new EventSet();this.findMatches_=findMatches;},get analysisViewRelatedEvents(){return this.analysisViewRelatedEvents_;},set analysisViewRelatedEvents(analysisViewRelatedEvents){if(this.appliedToModel_)
+throw new Error('Cannot mutate this state right now');if(analysisViewRelatedEvents===undefined)
+analysisViewRelatedEvents=new EventSet();this.analysisViewRelatedEvents_=analysisViewRelatedEvents;},get analysisLinkHoveredEvents(){return this.analysisLinkHoveredEvents_;},set analysisLinkHoveredEvents(analysisLinkHoveredEvents){if(this.appliedToModel_)
+throw new Error('Cannot mutate this state right now');if(analysisLinkHoveredEvents===undefined)
+analysisLinkHoveredEvents=new EventSet();this.analysisLinkHoveredEvents_=analysisLinkHoveredEvents;},get isAppliedToModel(){return this.appliedToModel_!==undefined;},get viewSpecificBrushingStates(){return this.viewSpecificBrushingStates_;},set viewSpecificBrushingStates(viewSpecificBrushingStates){this.viewSpecificBrushingStates_=viewSpecificBrushingStates;},get causesDimming_(){return this.findMatches_.length>0||this.analysisViewRelatedEvents_.length>0;},get brightenedEvents_(){var brightenedEvents=new EventSet();brightenedEvents.addEventSet(this.selection_);brightenedEvents.addEventSet(this.analysisLinkHoveredEvents_);return brightenedEvents;},applyToModelSelectionState:function(model){this.appliedToModel_=model;if(!this.causesDimming_){this.brightenedEvents_.forEach(function(e){var score;score=0;if(this.selection_.contains(e))
+score++;if(this.analysisLinkHoveredEvents_.contains(e))
+score++;e.selectionState=SelectionState.getFromBrighteningLevel(score);},this);return;}
+var brightenedEvents=this.brightenedEvents_;model.iterateAllEvents(function(e){var score;if(brightenedEvents.contains(e)){score=0;if(this.selection_.contains(e))
+score++;if(this.analysisLinkHoveredEvents_.contains(e))
+score++;e.selectionState=SelectionState.getFromBrighteningLevel(score);}else{score=0;if(this.findMatches_.contains(e))
+score++;if(this.analysisViewRelatedEvents_.contains(e))
+score++;e.selectionState=SelectionState.getFromDimmingLevel(score);}}.bind(this));},transferModelOwnershipToClone:function(that){if(!this.appliedToModel_)
+throw new Error('Not applied');that.appliedToModel_=this.appliedToModel_;this.appliedToModel_=undefined;},unapplyFromModelSelectionState:function(){if(!this.appliedToModel_)
+throw new Error('Not applied');var model=this.appliedToModel_;this.appliedToModel_=undefined;if(!this.causesDimming_){this.brightenedEvents_.forEach(function(e){e.selectionState=SelectionState.NONE;});return;}
+model.iterateAllEvents(function(e){e.selectionState=SelectionState.NONE;});}};return{BrushingState:BrushingState};});'use strict';tr.exportTo('tr.model',function(){function YComponent(stableId,yPercentOffset){this.stableId=stableId;this.yPercentOffset=yPercentOffset;}
+YComponent.prototype={toDict:function(){return{stableId:this.stableId,yPercentOffset:this.yPercentOffset};}};function Location(xWorld,yComponents){this.xWorld_=xWorld;this.yComponents_=yComponents;};Location.fromViewCoordinates=function(viewport,viewX,viewY){var dt=viewport.currentDisplayTransform;var xWorld=dt.xViewToWorld(viewX);var yComponents=[];var elem=document.elementFromPoint(viewX+viewport.modelTrackContainer.canvas.offsetLeft,viewY+viewport.modelTrackContainer.canvas.offsetTop);while(elem instanceof tr.ui.tracks.Track){if(elem.eventContainer){var boundRect=elem.getBoundingClientRect();var yPercentOffset=(viewY-boundRect.top)/boundRect.height;yComponents.push(new YComponent(elem.eventContainer.stableId,yPercentOffset));}
+elem=elem.parentElement;}
+if(yComponents.length==0)
+return;return new Location(xWorld,yComponents);}
+Location.fromStableIdAndTimestamp=function(viewport,stableId,ts){var xWorld=ts;var yComponents=[];var containerToTrack=viewport.containerToTrackMap;var elem=containerToTrack.getTrackByStableId(stableId);if(!elem)
+return;var firstY=elem.getBoundingClientRect().top;while(elem instanceof tr.ui.tracks.Track){if(elem.eventContainer){var boundRect=elem.getBoundingClientRect();var yPercentOffset=(firstY-boundRect.top)/boundRect.height;yComponents.push(new YComponent(elem.eventContainer.stableId,yPercentOffset));}
+elem=elem.parentElement;}
+if(yComponents.length==0)
+return;return new Location(xWorld,yComponents);}
+Location.prototype={get xWorld(){return this.xWorld_;},getContainingTrack:function(viewport){var containerToTrack=viewport.containerToTrackMap;for(var i in this.yComponents_){var yComponent=this.yComponents_[i];var track=containerToTrack.getTrackByStableId(yComponent.stableId);if(track!==undefined)
+return track;}},toViewCoordinates:function(viewport){var dt=viewport.currentDisplayTransform;var containerToTrack=viewport.containerToTrackMap;var viewX=dt.xWorldToView(this.xWorld_);var viewY=-1;for(var index in this.yComponents_){var yComponent=this.yComponents_[index];var track=containerToTrack.getTrackByStableId(yComponent.stableId);if(track!==undefined){var boundRect=track.getBoundingClientRect();viewY=yComponent.yPercentOffset*boundRect.height+boundRect.top;break;}}
+return{viewX:viewX,viewY:viewY};},toDict:function(){return{xWorld:this.xWorld_,yComponents:this.yComponents_};}};return{Location:Location};});'use strict';tr.exportTo('tr.ui.b',function(){var Location=tr.model.Location;function UIState(location,scaleX){this.location_=location;this.scaleX_=scaleX;};UIState.fromUserFriendlyString=function(model,viewport,stateString){var navByFinderPattern=/^(-?\d+(\.\d+)?)@(.+)x(\d+(\.\d+)?)$/g;var match=navByFinderPattern.exec(stateString);if(!match)
+return;var timestamp=parseFloat(match[1]);var stableId=match[3];var scaleX=parseFloat(match[4]);if(scaleX<=0)
+throw new Error('Invalid ScaleX value in UI State string.');if(!viewport.containerToTrackMap.getTrackByStableId(stableId))
+throw new Error('Invalid StableID given in UI State String.');var loc=tr.model.Location.fromStableIdAndTimestamp(viewport,stableId,timestamp);return new UIState(loc,scaleX);}
+UIState.prototype={get location(){return this.location_;},get scaleX(){return this.scaleX_;},toUserFriendlyString:function(viewport){var timestamp=this.location_.xWorld;var stableId=this.location_.getContainingTrack(viewport).eventContainer.stableId;var scaleX=this.scaleX_;return timestamp.toFixed(5)+'@'+stableId+'x'+scaleX.toFixed(5);},toDict:function(){return{location:this.location_.toDict(),scaleX:this.scaleX_};}};return{UIState:UIState};});'use strict';tr.exportTo('tr.c',function(){var BrushingState=tr.ui.b.BrushingState;var EventSet=tr.model.EventSet;var SelectionState=tr.model.SelectionState;var Viewport=tr.ui.TimelineViewport;function BrushingStateController(timelineView){tr.b.EventTarget.call(this);this.timelineView_=timelineView;this.currentBrushingState_=new BrushingState();this.onPopState_=this.onPopState_.bind(this);this.historyEnabled_=false;this.selections_={};}
+BrushingStateController.prototype={__proto__:tr.b.EventTarget.prototype,dispatchChangeEvent_:function(){var e=new tr.b.Event('change',false,false);this.dispatchEvent(e);},get model(){if(!this.timelineView_)
+return undefined;return this.timelineView_.model;},get trackView(){if(!this.timelineView_)
+return undefined;return this.timelineView_.trackView;},get viewport(){if(!this.timelineView_)
+return undefined;if(!this.timelineView_.trackView)
+return undefined;return this.timelineView_.trackView.viewport;},get historyEnabled(){return this.historyEnabled_;},set historyEnabled(historyEnabled){this.historyEnabled_=!!historyEnabled;if(historyEnabled)
+window.addEventListener('popstate',this.onPopState_);else
+window.removeEventListener('popstate',this.onPopState_);},modelWillChange:function(){if(this.currentBrushingState_.isAppliedToModel)
+this.currentBrushingState_.unapplyFromModelSelectionState();},modelDidChange:function(){this.selections_={};this.currentBrushingState_=new BrushingState();this.currentBrushingState_.applyToModelSelectionState(this.model);var e=new tr.b.Event('model-changed',false,false);this.dispatchEvent(e);this.dispatchChangeEvent_();},onUserInitiatedSelectionChange_:function(){var selection=this.selection;if(this.historyEnabled){this.selections_[selection.guid]=selection;var state={selection_guid:selection.guid};window.history.pushState(state,document.title);}},onPopState_:function(e){if(e.state===null)
+return;var selection=this.selections_[e.state.selection_guid];if(selection){var newState=this.currentBrushingState_.clone();newState.selection=selection;this.currentBrushingState=newState;}
+e.stopPropagation();},get selection(){return this.currentBrushingState_.selection;},get findMatches(){return this.currentBrushingState_.findMatches;},get selectionOfInterest(){return this.currentBrushingState_.selectionOfInterest;},get currentBrushingState(){return this.currentBrushingState_;},set currentBrushingState(newBrushingState){if(newBrushingState.isAppliedToModel)
+throw new Error('Cannot apply this state, it is applied');var hasValueChanged=!this.currentBrushingState_.equals(newBrushingState);if(newBrushingState!==this.currentBrushingState_&&!hasValueChanged){if(this.currentBrushingState_.isAppliedToModel){this.currentBrushingState_.transferModelOwnershipToClone(newBrushingState);}
+this.currentBrushingState_=newBrushingState;return;}
+if(this.currentBrushingState_.isAppliedToModel)
+this.currentBrushingState_.unapplyFromModelSelectionState();this.currentBrushingState_=newBrushingState;if(this.model)
+this.currentBrushingState_.applyToModelSelectionState(this.model);this.dispatchChangeEvent_();},addAllEventsMatchingFilterToSelectionAsTask:function(filter,selection){var timelineView=this.timelineView_.trackView;if(!timelineView)
+return new tr.b.Task();return timelineView.addAllEventsMatchingFilterToSelectionAsTask(filter,selection);},findTextChangedTo:function(allPossibleMatches){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.findMatches=allPossibleMatches;this.currentBrushingState=newBrushingState;},findFocusChangedTo:function(currentFocus){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=currentFocus;this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},findTextCleared:function(){if(this.xNavStringMarker_!==undefined){this.model.removeAnnotation(this.xNavStringMarker_);this.xNavStringMarker_=undefined;}
+if(this.guideLineAnnotation_!==undefined){this.model.removeAnnotation(this.guideLineAnnotation_);this.guideLineAnnotation_=undefined;}
+var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=new EventSet();newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},uiStateFromString:function(string){return tr.ui.b.UIState.fromUserFriendlyString(this.model,this.viewport,string);},navToPosition:function(uiState,showNavLine){this.trackView.navToPosition(uiState,showNavLine);},changeSelectionFromTimeline:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},showScriptControlSelection:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;},changeSelectionFromRequestSelectionChangeEvent:function(selection){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.selection=selection;newBrushingState.findMatches=new EventSet();this.currentBrushingState=newBrushingState;this.onUserInitiatedSelectionChange_();},changeAnalysisViewRelatedEvents:function(eventSet){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.analysisViewRelatedEvents=eventSet;this.currentBrushingState=newBrushingState;},changeAnalysisLinkHoveredEvents:function(eventSet){var newBrushingState=this.currentBrushingState_.clone();newBrushingState.analysisLinkHoveredEvents=eventSet;this.currentBrushingState=newBrushingState;},getViewSpecificBrushingState:function(viewId){return this.currentBrushingState.viewSpecificBrushingStates[viewId];},changeViewSpecificBrushingState:function(viewId,newState){var oldStates=this.currentBrushingState_.viewSpecificBrushingStates;var newStates={};for(var id in oldStates)
+newStates[id]=oldStates[id];if(newState===undefined)
+delete newStates[viewId];else
+newStates[viewId]=newState;var newBrushingState=this.currentBrushingState_.clone();newBrushingState.viewSpecificBrushingStates=newStates;this.currentBrushingState=newBrushingState;}};BrushingStateController.getControllerForElement=function(element){if(tr.isHeadless)
+throw new Error('Unsupported');var currentElement=element;while(currentElement){if(currentElement.brushingStateController)
+return currentElement.brushingStateController;if(currentElement.parentElement){currentElement=currentElement.parentElement;continue;}
+var currentNode=currentElement;while(currentNode.parentNode)
+currentNode=currentNode.parentNode;currentElement=currentNode.host;}
+return undefined;};return{BrushingStateController:BrushingStateController};});'use strict';Polymer('tr-ui-a-analysis-link',{ready:function(){this.selection_=undefined;},attached:function(){this.controller_=tr.c.BrushingStateController.getControllerForElement(this);},detached:function(){this.clearHighlight_();this.controller_=undefined;},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.textContent=selection.userFriendlyName;},setSelectionAndContent:function(selection,opt_textContent){this.selection_=selection;if(opt_textContent)
+this.textContent=opt_textContent;},getCurrentSelection_:function(){if(typeof this.selection_==='function')
+return this.selection_();return this.selection_;},setHighlight_:function(opt_eventSet){if(this.controller_)
+this.controller_.changeAnalysisLinkHoveredEvents(opt_eventSet);},clearHighlight_:function(opt_eventSet){this.setHighlight_();},onClicked_:function(){if(!this.selection_)
+return;var event=new tr.model.RequestSelectionChangeEvent();event.selection=this.getCurrentSelection_();this.dispatchEvent(event);},onMouseEnter_:function(){this.setHighlight_(this.getCurrentSelection_());},onMouseLeave_:function(){this.clearHighlight_();}});'use strict';tr.exportTo('tr.b',function(){function MultiDimensionalViewNode(title,isLowerBound){this.title=title;var dimensions=title.length;this.children=new Array(dimensions);for(var i=0;i<dimensions;i++)
+this.children[i]=new Map();this.total=0;this.self=0;this.isLowerBound=!!isLowerBound;}
+MultiDimensionalViewNode.prototype={get subRows(){return tr.b.mapValues(this.children[0]);}};var MultiDimensionalViewType={TOP_DOWN_TREE_VIEW:0,TOP_DOWN_HEAVY_VIEW:1,BOTTOM_UP_HEAVY_VIEW:2};function MultiDimensionalViewBuilder(dimensions){if(dimensions<0)
+throw new Error('Dimensions must be non-negative');this.dimensions_=dimensions;this.buildRoot_=this.createRootNode_();this.topDownTreeViewRoot_=undefined;this.topDownHeavyViewRoot_=undefined;this.bottomUpHeavyViewNode_=undefined;this.maxDimensionDepths_=new Array(dimensions);for(var d=0;d<dimensions;d++)
+this.maxDimensionDepths_[d]=0;}
+MultiDimensionalViewBuilder.ValueKind={SELF:0,TOTAL:1};MultiDimensionalViewBuilder.prototype={addPath:function(path,value,valueKind){if(this.buildRoot_===undefined){throw new Error('Paths cannot be added after either view has been built');}
+if(path.length!==this.dimensions_)
+throw new Error('Path must be '+this.dimensions_+'-dimensional');var node=this.buildRoot_;for(var d=0;d<path.length;d++){var singleDimensionPath=path[d];var singleDimensionPathLength=singleDimensionPath.length;this.maxDimensionDepths_[d]=Math.max(this.maxDimensionDepths_[d],singleDimensionPathLength);for(var i=0;i<singleDimensionPathLength;i++)
+node=this.getOrCreateChildNode_(node,d,singleDimensionPath[i]);}
+switch(valueKind){case MultiDimensionalViewBuilder.ValueKind.SELF:node.self+=value;break;case MultiDimensionalViewBuilder.ValueKind.TOTAL:node.total+=value;break;default:throw new Error('Invalid value kind: '+valueKind);}
+node.isLowerBound=false;},buildView:function(viewType){switch(viewType){case MultiDimensionalViewType.TOP_DOWN_TREE_VIEW:return this.buildTopDownTreeView();case MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW:return this.buildTopDownHeavyView();case MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW:return this.buildBottomUpHeavyView();default:throw new Error('Unknown multi-dimensional view type: '+viewType);}},buildTopDownTreeView:function(){if(this.topDownTreeViewRoot_===undefined){var treeViewRoot=this.buildRoot_;this.buildRoot_=undefined;this.setUpMissingChildRelationships_(treeViewRoot,0);this.finalizeTotalValues_(treeViewRoot,0,new WeakMap());this.topDownTreeViewRoot_=treeViewRoot;}
+return this.topDownTreeViewRoot_;},buildTopDownHeavyView:function(){if(this.topDownHeavyViewRoot_===undefined){this.topDownHeavyViewRoot_=this.buildGenericHeavyView_(this.addDimensionToTopDownHeavyViewNode_.bind(this));}
+return this.topDownHeavyViewRoot_;},buildBottomUpHeavyView:function(){if(this.bottomUpHeavyViewNode_===undefined){this.bottomUpHeavyViewNode_=this.buildGenericHeavyView_(this.addDimensionToBottomUpHeavyViewNode_.bind(this));}
+return this.bottomUpHeavyViewNode_;},createRootNode_:function(){return new MultiDimensionalViewNode(new Array(this.dimensions_),true);},getOrCreateChildNode_:function(parentNode,dimension,childDimensionTitle){if(dimension<0||dimension>=this.dimensions_)
+throw new Error('Invalid dimension');var dimensionChildren=parentNode.children[dimension];var childNode=dimensionChildren.get(childDimensionTitle);if(childNode!==undefined)
+return childNode;var childTitle=parentNode.title.slice();childTitle[dimension]=childDimensionTitle;childNode=new MultiDimensionalViewNode(childTitle,true);dimensionChildren.set(childDimensionTitle,childNode);return childNode;},setUpMissingChildRelationships_:function(node,firstDimensionToSetUp){for(var d=firstDimensionToSetUp;d<this.dimensions_;d++){var currentDimensionChildTitles=new Set(node.children[d].keys());for(var i=0;i<d;i++){for(var previousDimensionChildNode of node.children[i].values()){for(var previousDimensionGrandChildTitle of
+previousDimensionChildNode.children[d].keys()){currentDimensionChildTitles.add(previousDimensionGrandChildTitle);}}}
+for(var currentDimensionChildTitle of currentDimensionChildTitles){var currentDimensionChildNode=this.getOrCreateChildNode_(node,d,currentDimensionChildTitle);for(var i=0;i<d;i++){for(var previousDimensionChildNode of node.children[i].values()){var previousDimensionGrandChildNode=previousDimensionChildNode.children[d].get(currentDimensionChildTitle);if(previousDimensionGrandChildNode!==undefined){currentDimensionChildNode.children[i].set(previousDimensionChildNode.title[i],previousDimensionGrandChildNode);}}}
+this.setUpMissingChildRelationships_(currentDimensionChildNode,d);}}},finalizeTotalValues_:function(node,firstDimensionToFinalize,dimensionalSelfSumsMap){var dimensionalSelfSums=new Array(this.dimensions_);var maxChildResidualSum=0;var nodeSelfSum=node.self;for(var d=0;d<this.dimensions_;d++){var childResidualSum=0;for(var childNode of node.children[d].values()){if(d>=firstDimensionToFinalize)
+this.finalizeTotalValues_(childNode,d,dimensionalSelfSumsMap);var childNodeSelfSums=dimensionalSelfSumsMap.get(childNode);nodeSelfSum+=childNodeSelfSums[d];var residual=childNode.total-childNodeSelfSums[this.dimensions_-1];childResidualSum+=residual;}
+dimensionalSelfSums[d]=nodeSelfSum;maxChildResidualSum=Math.max(maxChildResidualSum,childResidualSum);}
+node.total=Math.max(node.total,nodeSelfSum+maxChildResidualSum);if(dimensionalSelfSumsMap.has(node))
+throw new Error('Internal error: Node finalized more than once');dimensionalSelfSumsMap.set(node,dimensionalSelfSums);},buildGenericHeavyView_:function(treeViewNodeHandler){var treeViewRoot=this.buildTopDownTreeView();var heavyViewRoot=this.createRootNode_();heavyViewRoot.total=treeViewRoot.total;heavyViewRoot.self=treeViewRoot.self;heavyViewRoot.isLowerBound=treeViewRoot.isLowerBound;var recursionDepthTrackers=new Array(this.dimensions_);for(var d=0;d<this.dimensions_;d++){recursionDepthTrackers[d]=new RecursionDepthTracker(this.maxDimensionDepths_[d],d);}
+this.addDimensionsToGenericHeavyViewNode_(treeViewRoot,heavyViewRoot,0,recursionDepthTrackers,false,treeViewNodeHandler);this.setUpMissingChildRelationships_(heavyViewRoot,0);return heavyViewRoot;},addDimensionsToGenericHeavyViewNode_:function(treeViewParentNode,heavyViewParentNode,startDimension,recursionDepthTrackers,previousDimensionsRecursive,treeViewNodeHandler){for(var d=startDimension;d<this.dimensions_;d++){this.addDimensionDescendantsToGenericHeavyViewNode_(treeViewParentNode,heavyViewParentNode,d,recursionDepthTrackers,previousDimensionsRecursive,treeViewNodeHandler);}},addDimensionDescendantsToGenericHeavyViewNode_:function(treeViewParentNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive,treeViewNodeHandler){var treeViewChildren=treeViewParentNode.children[currentDimension];var recursionDepthTracker=recursionDepthTrackers[currentDimension];for(var treeViewChildNode of treeViewChildren.values()){recursionDepthTracker.push(treeViewChildNode);treeViewNodeHandler(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive);this.addDimensionDescendantsToGenericHeavyViewNode_(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive,treeViewNodeHandler);recursionDepthTracker.pop();}},addDimensionToTopDownHeavyViewNode_:function(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive){this.addDimensionToTopDownHeavyViewNodeRecursively_(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive,1);},addDimensionToTopDownHeavyViewNodeRecursively_:function(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive,subTreeDepth){var recursionDepthTracker=recursionDepthTrackers[currentDimension];var currentDimensionRecursive=subTreeDepth<=recursionDepthTracker.recursionDepth;var currentOrPreviousDimensionsRecursive=currentDimensionRecursive||previousDimensionsRecursive;var dimensionTitle=treeViewChildNode.title[currentDimension];var heavyViewChildNode=this.getOrCreateChildNode_(heavyViewParentNode,currentDimension,dimensionTitle);heavyViewChildNode.self+=treeViewChildNode.self;if(!currentOrPreviousDimensionsRecursive)
+heavyViewChildNode.total+=treeViewChildNode.total;this.addDimensionsToGenericHeavyViewNode_(treeViewChildNode,heavyViewChildNode,currentDimension+1,recursionDepthTrackers,currentOrPreviousDimensionsRecursive,this.addDimensionToTopDownHeavyViewNode_.bind(this));for(var treeViewGrandChildNode of
+treeViewChildNode.children[currentDimension].values()){recursionDepthTracker.push(treeViewGrandChildNode);this.addDimensionToTopDownHeavyViewNodeRecursively_(treeViewGrandChildNode,heavyViewChildNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive,subTreeDepth+1);recursionDepthTracker.pop();}},addDimensionToBottomUpHeavyViewNode_:function(treeViewChildNode,heavyViewParentNode,currentDimension,recursionDepthTrackers,previousDimensionsRecursive){var recursionDepthTracker=recursionDepthTrackers[currentDimension];var bottomIndex=recursionDepthTracker.bottomIndex;var topIndex=recursionDepthTracker.topIndex;var firstNonRecursiveIndex=bottomIndex+recursionDepthTracker.recursionDepth;var viewNodePath=recursionDepthTracker.viewNodePath;var trackerAncestorNode=recursionDepthTracker.trackerAncestorNode;var heavyViewDescendantNode=heavyViewParentNode;for(var i=bottomIndex;i<topIndex;i++){var treeViewAncestorNode=viewNodePath[i];var dimensionTitle=treeViewAncestorNode.title[currentDimension];heavyViewDescendantNode=this.getOrCreateChildNode_(heavyViewDescendantNode,currentDimension,dimensionTitle);var currentDimensionRecursive=i<firstNonRecursiveIndex;var currentOrPreviousDimensionsRecursive=currentDimensionRecursive||previousDimensionsRecursive;heavyViewDescendantNode.self+=treeViewChildNode.self;if(!currentOrPreviousDimensionsRecursive)
+heavyViewDescendantNode.total+=treeViewChildNode.total;this.addDimensionsToGenericHeavyViewNode_(treeViewChildNode,heavyViewDescendantNode,currentDimension+1,recursionDepthTrackers,currentOrPreviousDimensionsRecursive,this.addDimensionToBottomUpHeavyViewNode_.bind(this));}}};function RecursionDepthTracker(maxDepth,dimension){this.titlePath=new Array(maxDepth);this.viewNodePath=new Array(maxDepth);this.bottomIndex=this.topIndex=maxDepth;this.dimension_=dimension;this.currentTrackerNode_=this.createNode_(0,undefined);}
+RecursionDepthTracker.prototype={push:function(viewNode){if(this.bottomIndex===0)
+throw new Error('Cannot push to a full tracker');var title=viewNode.title[this.dimension_];this.bottomIndex--;this.titlePath[this.bottomIndex]=title;this.viewNodePath[this.bottomIndex]=viewNode;var childTrackerNode=this.currentTrackerNode_.children.get(title);if(childTrackerNode!==undefined){this.currentTrackerNode_=childTrackerNode;return;}
+var maxLengths=zFunction(this.titlePath,this.bottomIndex);var recursionDepth=0;for(var i=0;i<maxLengths.length;i++)
+recursionDepth=Math.max(recursionDepth,maxLengths[i]);childTrackerNode=this.createNode_(recursionDepth,this.currentTrackerNode_);this.currentTrackerNode_.children.set(title,childTrackerNode);this.currentTrackerNode_=childTrackerNode;},pop:function(){if(this.bottomIndex===this.topIndex)
+throw new Error('Cannot pop from an empty tracker');this.titlePath[this.bottomIndex]=undefined;this.viewNodePath[this.bottomIndex]=undefined;this.bottomIndex++;this.currentTrackerNode_=this.currentTrackerNode_.parent;},get recursionDepth(){return this.currentTrackerNode_.recursionDepth;},createNode_:function(recursionDepth,parent){return{recursionDepth:recursionDepth,parent:parent,children:new Map()};}};function zFunction(list,startIndex){var n=list.length-startIndex;if(n===0)
+return[];var z=new Array(n);z[0]=0;for(var i=1,left=0,right=0;i<n;++i){var maxLength;if(i<=right)
+maxLength=Math.min(right-i+1,z[i-left]);else
+maxLength=0;while(i+maxLength<n&&list[startIndex+maxLength]===list[startIndex+i+maxLength]){++maxLength;}
+if(i+maxLength-1>right){left=i;right=i+maxLength-1;}
+z[i]=maxLength;}
+return z;}
+return{MultiDimensionalViewBuilder:MultiDimensionalViewBuilder,MultiDimensionalViewType:MultiDimensionalViewType,MultiDimensionalViewNode:MultiDimensionalViewNode,RecursionDepthTracker:RecursionDepthTracker,zFunction:zFunction};});'use strict';tr.exportTo('tr.b',function(){function _iterateElementDeeplyImpl(element,cb,thisArg,includeElement){if(includeElement){if(cb.call(thisArg,element))
+return true;}
+if(element.shadowRoot){if(_iterateElementDeeplyImpl(element.shadowRoot,cb,thisArg,false))
+return true;}
+for(var i=0;i<element.children.length;i++){if(_iterateElementDeeplyImpl(element.children[i],cb,thisArg,true))
+return true;}}
+function iterateElementDeeply(element,cb,thisArg){_iterateElementDeeplyImpl(element,cb,thisArg,false);}
+function findDeepElementMatchingPredicate(element,predicate){var foundElement=undefined;function matches(element){var match=predicate(element);if(!match)
+return false;foundElement=element;return true;}
+iterateElementDeeply(element,matches);return foundElement;}
+function findDeepElementsMatchingPredicate(element,predicate){var foundElements=[];function matches(element){var match=predicate(element);if(match){foundElements.push(element);}
+return false;}
+iterateElementDeeply(element,matches);return foundElements;}
+function findDeepElementMatching(element,selector){return findDeepElementMatchingPredicate(element,function(element){return element.matches(selector);});}
+function findDeepElementsMatching(element,selector){return findDeepElementsMatchingPredicate(element,function(element){return element.matches(selector);});}
+function findDeepElementWithTextContent(element,re){return findDeepElementMatchingPredicate(element,function(element){if(element.children.length!==0)
+return false;return re.test(element.textContent);});}
+return{iterateElementDeeply:iterateElementDeeply,findDeepElementMatching:findDeepElementMatching,findDeepElementsMatching:findDeepElementsMatching,findDeepElementMatchingPredicate:findDeepElementMatchingPredicate,findDeepElementsMatchingPredicate:findDeepElementsMatchingPredicate,findDeepElementWithTextContent:findDeepElementWithTextContent};});'use strict';tr.exportTo('tr.ui.b',function(){function getPolymerElementNamed(tagName){for(var i=0;i<Polymer.elements.length;i++){if(Polymer.elements[i].name===tagName)
+return Polymer.elements[i];}}
+function getPolymerElementsThatSubclass(tagName){if(Polymer.waitingFor().length){throw new Error('There are unresolved polymer elements. '+'Wait until Polymer.whenReady');}
+var baseElement;var elementNamesThatExtend={};Polymer.elements.forEach(function(element){if(element.name===tagName)
+baseElement=element;if(element.extends){if(elementNamesThatExtend[element.extends]===undefined)
+elementNamesThatExtend[element.extends]=[];elementNamesThatExtend[element.extends].push(element.name);}});if(!baseElement)
+throw new Error(tagName+' is not a polymer element');var allFoundSubElementNames=[baseElement.name];for(var i=0;i<allFoundSubElementNames.length;i++){var elementName=allFoundSubElementNames[i];allFoundSubElementNames.push.apply(allFoundSubElementNames,elementNamesThatExtend[elementName]);}
+allFoundSubElementNames.shift();return allFoundSubElementNames;}
+return{getPolymerElementNamed:getPolymerElementNamed,getPolymerElementsThatSubclass:getPolymerElementsThatSubclass};});'use strict';tr.exportTo('tr.v.ui',function(){function createScalarSpan(value,opt_config){if(value===undefined)
+return'';var config=opt_config||{};var ownerDocument=config.ownerDocument||document;var span=ownerDocument.createElement('tr-v-ui-scalar-span');var numericValue;if(value instanceof tr.v.ScalarNumeric){span.value=value;numericValue=value.value;}else{var unit=config.unit;if(unit===undefined){throw new Error('Unit must be provided in config when value is a number');}
+span.setValueAndUnit(value,unit);numericValue=value;}
+if(config.total)
+span.percentage=numericValue/config.total;if(config.rightAlign)
+span.rightAlign=true;return span;}
+tr.v.Unit.addEventListener('display-mode-changed',function(e){var scalarSpanTagName='tr-v-ui-scalar-span';var subclassNames=tr.ui.b.getPolymerElementsThatSubclass(scalarSpanTagName);subclassNames.push(scalarSpanTagName);var isSubclass={};subclassNames.forEach(function(n){isSubclass[n.toUpperCase()]=true;});var m=tr.b.findDeepElementsMatchingPredicate(document.body,function(el){return isSubclass[el.tagName];});m.forEach(function(el){el.updateContent_();});});return{createScalarSpan:createScalarSpan};});'use strict';Polymer('tr-v-ui-scalar-span',{ready:function(){this.value_=undefined;this.unit_=undefined;this.warning_=undefined;this.percentage_=undefined;},set contentTextDecoration(deco){this.$.content.style.textDecoration=deco;},get value(){return this.value_;},set value(value){if(value instanceof tr.v.ScalarNumeric){this.value_=value.value;this.unit_=value.unit;}else{this.value_=value;}
+this.updateContent_();},get unit(){return this.unit_;},set unit(unit){this.unit_=unit;this.updateContent_();},setValueAndUnit:function(value,unit){this.value_=value;this.unit_=unit;this.updateContent_();},get percentage(){return this.percentage_;},set percentage(percentage){this.percentage_=percentage;this.updateSparkline_();},get rightAlign(){return this.$.content.classList.contains('right-align');},set rightAlign(rightAlign){if(rightAlign)
+this.$.content.classList.add('right-align');else
+this.$.content.classList.remove('right-align');},updateSparkline_:function(){if(this.percentage_===undefined){this.$.sparkline.style.display='none';this.$.sparkline.style.width='0';}else{this.$.sparkline.style.display='block';this.$.sparkline.style.width=(this.percentage_*100)+'%';}},updateContent_:function(){if(this.unit_===undefined){this.$.content.textContent='';this.$.content.style.color='';return;}
+this.$.content.textContent=this.unit_.format(this.value);var BIGGER_IS_BETTER=tr.v.ImprovementDirection.BIGGER_IS_BETTER;var SMALLER_IS_BETTER=tr.v.ImprovementDirection.SMALLER_IS_BETTER;var color='';if(this.unit_.isDelta){var improvementDirection=this.unit_.improvementDirection;if(this.value>0){switch(improvementDirection){case BIGGER_IS_BETTER:color='green';break;case SMALLER_IS_BETTER:color='red';break;}}else if(this.value<0){switch(improvementDirection){case BIGGER_IS_BETTER:color='red';break;case SMALLER_IS_BETTER:color='green';break;}}}
+this.$.content.style.color=color;},get warning(){return this.warning_;},set warning(warning){this.warning_=warning;var warningEl=this.$.warning;if(this.warning_){warningEl.title=warning;warningEl.style.display='';}else{warningEl.title='';warningEl.style.display='none';}}});'use strict';tr.exportTo('tr.ui.analysis',function(){var NO_BREAK_SPACE=String.fromCharCode(160);var RIGHTWARDS_ARROW=String.fromCharCode(8594);var COLLATOR=new Intl.Collator(undefined,{numeric:true});function TitleColumn(title){this.title=title;}
+TitleColumn.prototype={supportsCellSelection:false,value:function(row){var formattedTitle=this.formatTitle(row);var contexts=row.contexts;if(contexts===undefined||contexts.length===0)
+return formattedTitle;var firstContext=contexts[0];var lastContext=contexts[contexts.length-1];var changeDefinedContextCount=0;for(var i=1;i<contexts.length;i++){if((contexts[i]===undefined)!==(contexts[i-1]===undefined))
+changeDefinedContextCount++;}
+var color=undefined;var prefix=undefined;if(!firstContext&&lastContext){color='red';prefix='+++';}else if(firstContext&&!lastContext){color='green';prefix='---';}
+if(changeDefinedContextCount>1){color='purple';}
+if(color===undefined&&prefix===undefined)
+return formattedTitle;var titleEl=document.createElement('span');if(prefix!==undefined){var prefixEl=tr.ui.b.createSpan({textContent:prefix});prefixEl.style.fontFamily='monospace';titleEl.appendChild(prefixEl);titleEl.appendChild(tr.ui.b.asHTMLOrTextNode(NO_BREAK_SPACE));}
+if(color!==undefined)
+titleEl.style.color=color;titleEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedTitle));return titleEl;},formatTitle:function(row){return row.title;},cmp:function(rowA,rowB){return COLLATOR.compare(rowA.title,rowB.title);}};function MemoryColumn(name,cellPath,aggregationMode){this.name=name;this.cellPath=cellPath;this.aggregationMode=aggregationMode;}
+MemoryColumn.fromRows=function(rows,cellKey,aggregationMode,rules){var cellNames=new Set();function gatherCellNames(rows){rows.forEach(function(row){if(row===undefined)
+return;var fieldCells=row[cellKey];if(fieldCells!==undefined){tr.b.iterItems(fieldCells,function(fieldName,fieldCell){if(fieldCell===undefined||fieldCell.fields===undefined)
+return;cellNames.add(fieldName);});}
+var subRows=row.subRows;if(subRows!==undefined)
+gatherCellNames(subRows);});}
+gatherCellNames(rows);var positions=[];cellNames.forEach(function(cellName){var cellPath=[cellKey,cellName];var matchingRule=MemoryColumn.findMatchingRule(cellName,rules);var constructor=matchingRule.columnConstructor;var column=new constructor(cellName,cellPath,aggregationMode);positions.push({importance:matchingRule.importance,column:column});});positions.sort(function(a,b){if(a.importance===b.importance)
+return COLLATOR.compare(a.column.name,b.column.name);return b.importance-a.importance;});return positions.map(function(position){return position.column});};MemoryColumn.spaceEqually=function(columns){var columnWidth=(100/columns.length).toFixed(3)+'%';columns.forEach(function(column){column.width=columnWidth;});};MemoryColumn.findMatchingRule=function(name,rules){for(var i=0;i<rules.length;i++){var rule=rules[i];if(MemoryColumn.nameMatchesCondition(name,rule.condition))
+return rule;}
+return undefined;};MemoryColumn.nameMatchesCondition=function(name,condition){if(condition===undefined)
+return true;if(typeof(condition)==='string')
+return name===condition;return condition.test(name);};MemoryColumn.AggregationMode={DIFF:0,MAX:1};MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER='at some selected timestamps';MemoryColumn.prototype={get title(){return this.name;},cell:function(row){var cell=row;var cellPath=this.cellPath;for(var i=0;i<cellPath.length;i++){if(cell===undefined)
+return undefined;cell=cell[cellPath[i]];}
+return cell;},aggregateCells:function(row,subRows){},fields:function(row){var cell=this.cell(row);if(cell===undefined)
+return undefined;return cell.fields;},value:function(row){var fields=this.fields(row);if(this.hasAllRelevantFieldsUndefined(fields))
+return'';var contexts=row.contexts;var color=this.color(fields,contexts);var infos=[];this.addInfos(fields,contexts,infos);var formattedFields=this.formatFields(fields);if((color===undefined||formattedFields==='')&&infos.length===0)
+return formattedFields;var fieldEl=document.createElement('span');fieldEl.style.display='flex';fieldEl.style.alignItems='center';fieldEl.appendChild(tr.ui.b.asHTMLOrTextNode(formattedFields));infos.forEach(function(info){var infoEl=document.createElement('span');infoEl.style.paddingLeft='4px';infoEl.style.cursor='help';infoEl.style.fontWeight='bold';infoEl.textContent=info.icon;if(info.color!==undefined)
+infoEl.style.color=info.color;infoEl.title=info.message;fieldEl.appendChild(infoEl);},this);if(color!==undefined)
+fieldEl.style.color=color;return fieldEl;},hasAllRelevantFieldsUndefined:function(fields){if(fields===undefined)
+return true;switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return fields[0]===undefined&&fields[fields.length-1]===undefined;case MemoryColumn.AggregationMode.MAX:default:return fields.every(function(field){return field===undefined;});}},color:function(fields,contexts){return undefined;},formatFields:function(fields){if(fields.length===1)
+return this.formatSingleField(fields[0]);else
+return this.formatMultipleFields(fields);},formatSingleField:function(field){throw new Error('Not implemented');},formatMultipleFields:function(fields){switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return this.formatMultipleFieldsDiff(fields[0],fields[fields.length-1]);case MemoryColumn.AggregationMode.MAX:return this.formatMultipleFieldsMax(fields);default:return tr.ui.b.createSpan({textContent:'(unsupported aggregation mode)',italic:true});}},formatMultipleFieldsDiff:function(firstField,lastField){throw new Error('Not implemented');},formatMultipleFieldsMax:function(fields){return this.formatSingleField(this.getMaxField(fields));},cmp:function(rowA,rowB){var fieldsA=this.fields(rowA);var fieldsB=this.fields(rowB);if(fieldsA!==undefined&&fieldsB!==undefined&&fieldsA.length!==fieldsB.length)
+throw new Error('Different number of fields');var undefinedA=this.hasAllRelevantFieldsUndefined(fieldsA);var undefinedB=this.hasAllRelevantFieldsUndefined(fieldsB);if(undefinedA&&undefinedB)
+return 0;if(undefinedA)
+return-1;if(undefinedB)
+return 1;return this.compareFields(fieldsA,fieldsB);},compareFields:function(fieldsA,fieldsB){if(fieldsA.length===1)
+return this.compareSingleFields(fieldsA[0],fieldsB[0]);else
+return this.compareMultipleFields(fieldsA,fieldsB);},compareSingleFields:function(fieldA,fieldB){throw new Error('Not implemented');},compareMultipleFields:function(fieldsA,fieldsB){switch(this.aggregationMode){case MemoryColumn.AggregationMode.DIFF:return this.compareMultipleFieldsDiff(fieldsA[0],fieldsA[fieldsA.length-1],fieldsB[0],fieldsB[fieldsB.length-1]);case MemoryColumn.AggregationMode.MAX:return this.compareMultipleFieldsMax(fieldsA,fieldsB);default:return 0;}},compareMultipleFieldsDiff:function(firstFieldA,lastFieldA,firstFieldB,lastFieldB){throw new Error('Not implemented');},compareMultipleFieldsMax:function(fieldsA,fieldsB){return this.compareSingleFields(this.getMaxField(fieldsA),this.getMaxField(fieldsB));},getMaxField:function(fields){return fields.reduce(function(accumulator,field){if(field===undefined)
+return accumulator;if(accumulator===undefined||this.compareSingleFields(field,accumulator)>0){return field;}
+return accumulator;}.bind(this),undefined);},addInfos:function(fields,contexts,infos){},getImportance:function(importanceRules){if(importanceRules.length===0)
+return 0;var matchingRule=MemoryColumn.findMatchingRule(this.name,importanceRules);if(matchingRule!==undefined)
+return matchingRule.importance;var minImportance=importanceRules[0].importance;for(var i=1;i<importanceRules.length;i++)
+minImportance=Math.min(minImportance,importanceRules[i].importance);return minImportance-1;}};function StringMemoryColumn(name,cellPath,aggregationMode){MemoryColumn.call(this,name,cellPath,aggregationMode);}
+StringMemoryColumn.prototype={__proto__:MemoryColumn.prototype,formatSingleField:function(string){return string;},formatMultipleFieldsDiff:function(firstString,lastString){if(firstString===undefined){var spanEl=tr.ui.b.createSpan({color:'red'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('+'));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleField(lastString)));return spanEl;}else if(lastString===undefined){var spanEl=tr.ui.b.createSpan({color:'green'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode('-'));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleField(firstString)));return spanEl;}else if(firstString===lastString){return this.formatSingleField(firstString);}else{var spanEl=tr.ui.b.createSpan({color:'DarkOrange'});spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleField(firstString)));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(' '+RIGHTWARDS_ARROW+' '));spanEl.appendChild(tr.ui.b.asHTMLOrTextNode(this.formatSingleField(lastString)));return spanEl;}},compareSingleFields:function(stringA,stringB){return COLLATOR.compare(stringA,stringB);},compareMultipleFieldsDiff:function(firstStringA,lastStringA,firstStringB,lastStringB){if(firstStringA===undefined&&firstStringB!==undefined)
+return 1;if(firstStringA!==undefined&&firstStringB===undefined)
+return-1;if(firstStringA===undefined&&firstStringB===undefined)
+return this.compareSingleFields(lastStringA,lastStringB);if(lastStringA===undefined&&lastStringB!==undefined)
+return-1;if(lastStringA!==undefined&&lastStringB===undefined)
+return 1;if(lastStringA===undefined&&lastStringB===undefined)
+return this.compareSingleFields(firstStringB,firstStringA);var areStringsAEqual=firstStringA===lastStringA;var areStringsBEqual=firstStringB===lastStringB;if(areStringsAEqual&&areStringsBEqual)
+return 0;if(areStringsAEqual)
+return-1;if(areStringsBEqual)
+return 1;return 0;}};function NumericMemoryColumn(name,cellPath,aggregationMode){MemoryColumn.call(this,name,cellPath,aggregationMode);}
+NumericMemoryColumn.DIFF_EPSILON=0.0001;NumericMemoryColumn.prototype={__proto__:MemoryColumn.prototype,aggregateCells:function(row,subRows){var subRowCells=subRows.map(this.cell,this);var hasDefinedSubRowNumeric=false;var timestampCount=undefined;subRowCells.forEach(function(subRowCell){if(subRowCell===undefined)
+return;var subRowNumerics=subRowCell.fields;if(subRowNumerics===undefined)
+return;if(timestampCount===undefined)
+timestampCount=subRowNumerics.length;else if(timestampCount!==subRowNumerics.length)
+throw new Error('Sub-rows have different numbers of timestamps');if(hasDefinedSubRowNumeric)
+return;hasDefinedSubRowNumeric=subRowNumerics.some(function(numeric){return numeric!==undefined;});});if(!hasDefinedSubRowNumeric)
+return;var cellPath=this.cellPath;var rowCell=row;for(var i=0;i<cellPath.length;i++){var nextStepName=cellPath[i];var nextStep=rowCell[nextStepName];if(nextStep===undefined){if(i<cellPath.length-1)
+nextStep={};else
+nextStep=new MemoryCell(undefined);rowCell[nextStepName]=nextStep;}
+rowCell=nextStep;}
+if(rowCell.fields===undefined){rowCell.fields=new Array(timestampCount);}else if(rowCell.fields.length!==timestampCount){throw new Error('Row has a different number of timestamps than sub-rows');}
+for(var i=0;i<timestampCount;i++){if(rowCell.fields[i]!==undefined)
+continue;rowCell.fields[i]=tr.model.MemoryAllocatorDump.aggregateNumerics(subRowCells.map(function(subRowCell){if(subRowCell===undefined||subRowCell.fields===undefined)
+return undefined;return subRowCell.fields[i];}));}},formatSingleField:function(numeric){if(numeric===undefined)
+return'';return tr.v.ui.createScalarSpan(numeric);},formatMultipleFieldsDiff:function(firstNumeric,lastNumeric){return this.formatSingleField(this.getDiffField_(firstNumeric,lastNumeric));},compareSingleFields:function(numericA,numericB){return numericA.value-numericB.value;},compareMultipleFieldsDiff:function(firstNumericA,lastNumericA,firstNumericB,lastNumericB){return this.getDiffFieldValue_(firstNumericA,lastNumericA)-
+this.getDiffFieldValue_(firstNumericB,lastNumericB);},getDiffField_:function(firstNumeric,lastNumeric){var definedNumeric=firstNumeric||lastNumeric;return new tr.v.ScalarNumeric(definedNumeric.unit.correspondingDeltaUnit,this.getDiffFieldValue_(firstNumeric,lastNumeric));},getDiffFieldValue_:function(firstNumeric,lastNumeric){var firstValue=firstNumeric===undefined?0:firstNumeric.value;var lastValue=lastNumeric===undefined?0:lastNumeric.value;var diff=lastValue-firstValue;return Math.abs(diff)<NumericMemoryColumn.DIFF_EPSILON?0:diff;}};function MemoryCell(fields){this.fields=fields;}
+MemoryCell.extractFields=function(cell){if(cell===undefined)
+return undefined;return cell.fields;};var RECURSIVE_EXPANSION_MAX_VISIBLE_ROW_COUNT=10;function expandTableRowsRecursively(table){var currentLevelRows=table.tableRows;var totalVisibleRowCount=currentLevelRows.length;while(currentLevelRows.length>0){var nextLevelRowCount=0;currentLevelRows.forEach(function(currentLevelRow){var subRows=currentLevelRow.subRows;if(subRows===undefined||subRows.length===0)
+return;nextLevelRowCount+=subRows.length;});if(totalVisibleRowCount+nextLevelRowCount>RECURSIVE_EXPANSION_MAX_VISIBLE_ROW_COUNT){break;}
+var nextLevelRows=new Array(nextLevelRowCount);var nextLevelRowIndex=0;currentLevelRows.forEach(function(currentLevelRow){var subRows=currentLevelRow.subRows;if(subRows===undefined||subRows.length===0)
+return;table.setExpandedForTableRow(currentLevelRow,true);subRows.forEach(function(subRow){nextLevelRows[nextLevelRowIndex++]=subRow;});});totalVisibleRowCount+=nextLevelRowCount;currentLevelRows=nextLevelRows;}}
+function aggregateTableRowCellsRecursively(row,columns,opt_predicate){var subRows=row.subRows;if(subRows===undefined||subRows.length===0)
+return;subRows.forEach(function(subRow){aggregateTableRowCellsRecursively(subRow,columns,opt_predicate);});if(opt_predicate===undefined||opt_predicate(row.contexts))
+aggregateTableRowCells(row,subRows,columns);}
+function aggregateTableRowCells(row,subRows,columns){columns.forEach(function(column){if(!(column instanceof MemoryColumn))
+return;column.aggregateCells(row,subRows);});}
+function createCells(timeToValues,valueFieldsGetter){var fieldNameToFields=tr.b.invertArrayOfDicts(timeToValues,valueFieldsGetter);return tr.b.mapItems(fieldNameToFields,function(fieldName,fields){return new tr.ui.analysis.MemoryCell(fields);});}
+function createWarningInfo(message){return{message:message,icon:String.fromCharCode(9888),color:'red'};}
+return{TitleColumn:TitleColumn,MemoryColumn:MemoryColumn,StringMemoryColumn:StringMemoryColumn,NumericMemoryColumn:NumericMemoryColumn,MemoryCell:MemoryCell,expandTableRowsRecursively:expandTableRowsRecursively,aggregateTableRowCellsRecursively:aggregateTableRowCellsRecursively,aggregateTableRowCells:aggregateTableRowCells,createCells:createCells,createWarningInfo:createWarningInfo};});'use strict';Polymer('tr-ui-a-stacked-pane',{rebuild:function(){if(!this.paneDirty_){return;}
+this.paneDirty_=false;this.rebuildPane_();},scheduleRebuildPane_:function(){if(this.paneDirty_)
+return;this.paneDirty_=true;setTimeout(this.rebuild.bind(this),0);},rebuildPane_:function(){},set childPaneBuilder(childPaneBuilder){this.childPaneBuilder_=childPaneBuilder;this.dispatchEvent(new tr.b.Event('request-child-pane-change'));},get childPaneBuilder(){return this.childPaneBuilder_;},appended:function(){this.rebuild();}});'use strict';Polymer('tr-ui-b-info-bar',{ready:function(){this.messageEl_=this.$.message;this.buttonsEl_=this.$.buttons;this.message='';this.visible=false;},get message(){return this.messageEl_.textContent;},set message(message){this.messageEl_.textContent=message;},get visible(){return!this.classList.contains('info-bar-hidden');},set visible(visible){if(visible)
+this.classList.remove('info-bar-hidden');else
+this.classList.add('info-bar-hidden');},removeAllButtons:function(){this.buttonsEl_.textContent='';},addButton:function(text,clickCallback){var button=document.createElement('button');button.textContent=text;button.addEventListener('click',clickCallback);this.buttonsEl_.appendChild(button);return button;}});'use strict';tr.exportTo('tr.ui.analysis',function(){var ScalarNumeric=tr.v.ScalarNumeric;var sizeInBytes_smallerIsBetter=tr.v.Unit.byName.sizeInBytes_smallerIsBetter;var RowDimension={ROOT:-1,STACK_FRAME:0,OBJECT_TYPE:1};var LATIN_SMALL_LETTER_F_WITH_HOOK=String.fromCharCode(0x0192);var CIRCLED_LATIN_CAPITAL_LETTER_T=String.fromCharCode(0x24C9);function HeapDumpNodeTitleColumn(title){tr.ui.analysis.TitleColumn.call(this,title);}
+HeapDumpNodeTitleColumn.prototype={__proto__:tr.ui.analysis.TitleColumn.prototype,formatTitle:function(row){var title=row.title;var dimension=row.dimension;switch(dimension){case RowDimension.ROOT:return title;case RowDimension.STACK_FRAME:case RowDimension.OBJECT_TYPE:return this.formatSubRow_(title,dimension);default:throw new Error('Invalid row dimension: '+row.dimension);}},cmp:function(rowA,rowB){if(rowA.dimension!==rowB.dimension)
+return rowA.dimension-rowB.dimension;return tr.ui.analysis.TitleColumn.prototype.cmp.call(this,rowA,rowB);},formatSubRow_:function(title,dimension){var titleEl=document.createElement('span');var symbolEl=document.createElement('span');var symbolColorName;if(dimension===RowDimension.STACK_FRAME){symbolEl.textContent=LATIN_SMALL_LETTER_F_WITH_HOOK;symbolEl.title='Stack frame';symbolColorName='heap_dump_stack_frame';}else{symbolEl.textContent=CIRCLED_LATIN_CAPITAL_LETTER_T;symbolEl.title='Object type';symbolColorName='heap_dump_object_type';}
+symbolEl.style.color=tr.b.ColorScheme.getColorForReservedNameAsString(symbolColorName);symbolEl.style.paddingRight='4px';symbolEl.style.cursor='help';symbolEl.style.weight='bold';titleEl.appendChild(symbolEl);titleEl.appendChild(document.createTextNode(title));return titleEl;}};var COLUMN_RULES=[{importance:0,columnConstructor:tr.ui.analysis.NumericMemoryColumn}];Polymer('tr-ui-a-memory-dump-heap-details-pane',{created:function(){this.heapDumps_=undefined;this.aggregationMode_=undefined;this.viewMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;this.$.info_bar.message='Note: Values displayed in the heavy view '+'are lower bounds (except for the root).';this.$.view_mode_container.appendChild(tr.ui.b.createSelector(this,'viewMode','memoryDumpHeapDetailsPane.viewMode',tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW,[{label:'Top-down (Tree)',value:tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW},{label:'Top-down (Heavy)',value:tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW},{label:'Bottom-up (Heavy)',value:tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW}]));},set heapDumps(heapDumps){this.heapDumps_=heapDumps;this.scheduleRebuildPane_();},get heapDumps(){return this.heapDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},set viewMode(viewMode){this.viewMode_=viewMode;this.scheduleRebuildPane_();},get viewMode(){return this.viewMode_;},get heavyView(){switch(this.viewMode){case tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW:case tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW:return true;default:return false;}},rebuildPane_:function(){if(this.heapDumps_===undefined||this.heapDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.view_mode_container.style.display='none';this.$.info_bar.visible=false;this.$.table.clear();this.$.table.rebuild();return;}
+this.$.info_text.style.display='none';this.$.table.style.display='block';this.$.view_mode_container.style.display='block';this.$.info_bar.visible=this.heavyView;var stackFrameTrees=this.createStackFrameTrees_(this.heapDumps_);var rows=this.createRows_(stackFrameTrees);var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);},createStackFrameTrees_:function(heapDumps){return heapDumps.map(function(heapDump){if(heapDump===undefined)
+return undefined;var builder=new tr.b.MultiDimensionalViewBuilder(2);heapDump.entries.forEach(function(entry){var leafStackFrame=entry.leafStackFrame;var stackTracePath=leafStackFrame===undefined?[]:leafStackFrame.getUserFriendlyStackTrace().reverse();var objectTypeName=entry.objectTypeName;var objectTypeNamePath=objectTypeName===undefined?[]:[objectTypeName];builder.addPath([stackTracePath,objectTypeNamePath],entry.size,tr.b.MultiDimensionalViewBuilder.ValueKind.TOTAL);},this);return builder.buildView(this.viewMode);},this);},createRows_:function(stackFrameTrees){var definedHeapDump=tr.b.findFirstInArray(this.heapDumps);if(definedHeapDump===undefined)
+return[];var rootRowTitle=definedHeapDump.allocatorName;return[this.createHeapRowRecursively_(stackFrameTrees,RowDimension.ROOT,rootRowTitle)];},createHeapRowRecursively_:function(nodes,dimension,title){var cells=tr.ui.analysis.createCells(nodes,function(node){return{'Size':new ScalarNumeric(sizeInBytes_smallerIsBetter,node.total)};});var row={dimension:dimension,title:title,contexts:nodes,cells:cells};var stackFrameSubRows=this.createHeapDimensionSubRowsRecursively_(nodes,RowDimension.STACK_FRAME);var objectTypeSubRows=this.createHeapDimensionSubRowsRecursively_(nodes,RowDimension.OBJECT_TYPE);var subRows=stackFrameSubRows.concat(objectTypeSubRows);if(subRows.length>0)
+row.subRows=subRows;return row;},createHeapDimensionSubRowsRecursively_:function(nodes,dimension){var dimensionGroupedChildNodes=tr.b.dictionaryValues(tr.b.invertArrayOfDicts(nodes,function(node){var childDict={};var displayedChildrenTotal=0;var hasDisplayedChildren=false;for(var child of node.children[dimension].values()){if(!this.heavyView&&child.isLowerBound)
+continue;childDict[child.title[dimension]]=child;displayedChildrenTotal+=child.total;hasDisplayedChildren=true;}
+if(!this.heavyView&&displayedChildrenTotal<node.total&&hasDisplayedChildren){var otherTitle=node.title.slice();otherTitle[dimension]='<other>';childDict['<other>']={title:otherTitle,total:node.total-displayedChildrenTotal,children:[new Map(),new Map()]};}
+return childDict;},this));return dimensionGroupedChildNodes.map(function(subRowNodes){var subRowTitle=tr.b.findFirstInArray(subRowNodes).title[dimension];return this.createHeapRowRecursively_(subRowNodes,dimension,subRowTitle);},this);},createColumns_:function(rows){var titleColumn=new HeapDumpNodeTitleColumn('Stack frame');titleColumn.width='500px';var numericColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'cells',this.aggregationMode_,COLUMN_RULES);tr.ui.analysis.MemoryColumn.spaceEqually(numericColumns);var columns=[titleColumn].concat(numericColumns);return columns;}});return{RowDimension:RowDimension};});'use strict';tr.exportTo('tr.ui.analysis',function(){var SUBALLOCATION_CONTEXT=true;var MemoryAllocatorDumpInfoType=tr.model.MemoryAllocatorDumpInfoType;var PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN=MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN;var PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER=MemoryAllocatorDumpInfoType.PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER;var LEFTWARDS_OPEN_HEADED_ARROW=String.fromCharCode(0x21FD);var RIGHTWARDS_OPEN_HEADED_ARROW=String.fromCharCode(0x21FE);var EN_DASH=String.fromCharCode(0x2013);var CIRCLED_LATIN_SMALL_LETTER_I=String.fromCharCode(0x24D8);function AllocatorDumpNameColumn(){tr.ui.analysis.TitleColumn.call(this,'Component');}
+AllocatorDumpNameColumn.prototype={__proto__:tr.ui.analysis.TitleColumn.prototype,formatTitle:function(row){if(!row.suballocation)
+return row.title;return tr.ui.b.createSpan({textContent:row.title,italic:true,tooltip:row.fullNames===undefined?undefined:row.fullNames.join(', ')});}};function getAndUpdateEntry(map,name,createdCallback){var entry=map.get(name);if(entry===undefined){entry={count:0};createdCallback(entry);map.set(name,entry);}
+entry.count++;return entry;}
+function SizeInfoMessageBuilder(){this.parts_=[];this.indent_=0;}
+SizeInfoMessageBuilder.prototype={append:function(){this.parts_.push.apply(this.parts_,Array.prototype.slice.apply(arguments));},appendMap:function(map,hasPluralSuffix,emptyText,itemCallback,opt_this){opt_this=opt_this||this;if(map.size===0){if(emptyText)
+this.append(emptyText);}else if(map.size===1){this.parts_.push(' ');var key=map.keys().next().value;itemCallback.call(opt_this,key,map.get(key));}else{if(hasPluralSuffix)
+this.parts_.push('s');this.parts_.push(':');this.indent_++;for(var key of map.keys()){this.parts_.push('\n',' '.repeat(3*(this.indent_-1)),' - ');itemCallback.call(opt_this,key,map.get(key));}
+this.indent_--;}},appendImportanceRange:function(range){this.append(' (importance: ');if(range.min===range.max)
+this.append(range.min);else
+this.append(range.min,EN_DASH,range.max);this.append(')');},appendSizeIfDefined:function(size){if(size!==undefined)
+this.append(' (',tr.v.Unit.byName.sizeInBytes.format(size),')');},appendSomeTimestampsQuantifier:function(){this.append(' ',tr.ui.analysis.MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER);},build:function(){return this.parts_.join('');}};function EffectiveSizeColumn(name,cellPath,aggregationMode){tr.ui.analysis.NumericMemoryColumn.call(this,name,cellPath,aggregationMode);}
+EffectiveSizeColumn.prototype={__proto__:tr.ui.analysis.NumericMemoryColumn.prototype,addInfos:function(numerics,memoryAllocatorDumps,infos){if(memoryAllocatorDumps===undefined)
+return;var ownerNameToEntry=new Map();var ownedNameToEntry=new Map();for(var i=0;i<numerics.length;i++){if(numerics[i]===undefined)
+continue;var dump=memoryAllocatorDumps[i];if(dump===SUBALLOCATION_CONTEXT)
+return;dump.ownedBy.forEach(function(ownerLink){var ownerDump=ownerLink.source;this.getAndUpdateOwnershipEntry_(ownerNameToEntry,ownerDump,ownerLink);},this);var ownedLink=dump.owns;if(ownedLink!==undefined){var ownedDump=ownedLink.target;var ownedEntry=this.getAndUpdateOwnershipEntry_(ownedNameToEntry,ownedDump,ownedLink,true);var sharerNameToEntry=ownedEntry.sharerNameToEntry;ownedDump.ownedBy.forEach(function(sharerLink){var sharerDump=sharerLink.source;if(sharerDump===dump)
+return;this.getAndUpdateOwnershipEntry_(sharerNameToEntry,sharerDump,sharerLink);},this);}}
+if(ownerNameToEntry.size>0){var messageBuilder=new SizeInfoMessageBuilder();messageBuilder.append('shared by');messageBuilder.appendMap(ownerNameToEntry,false,undefined,function(ownerName,ownerEntry){messageBuilder.append(ownerName);if(ownerEntry.count<numerics.length)
+messageBuilder.appendSomeTimestampsQuantifier();messageBuilder.appendImportanceRange(ownerEntry.importanceRange);},this);infos.push({message:messageBuilder.build(),icon:LEFTWARDS_OPEN_HEADED_ARROW,color:'green'});}
+if(ownedNameToEntry.size>0){var messageBuilder=new SizeInfoMessageBuilder();messageBuilder.append('shares');messageBuilder.appendMap(ownedNameToEntry,false,undefined,function(ownedName,ownedEntry){messageBuilder.append(ownedName);var ownedCount=ownedEntry.count;if(ownedCount<numerics.length)
+messageBuilder.appendSomeTimestampsQuantifier();messageBuilder.appendImportanceRange(ownedEntry.importanceRange);messageBuilder.append(' with');messageBuilder.appendMap(ownedEntry.sharerNameToEntry,false,' no other dumps',function(sharerName,sharerEntry){messageBuilder.append(sharerName);if(sharerEntry.count<ownedCount)
+messageBuilder.appendSomeTimestampsQuantifier();messageBuilder.appendImportanceRange(sharerEntry.importanceRange);},this);},this);infos.push({message:messageBuilder.build(),icon:RIGHTWARDS_OPEN_HEADED_ARROW,color:'green'});}},getAndUpdateOwnershipEntry_:function(map,dump,link,opt_withSharerNameToEntry){var entry=getAndUpdateEntry(map,dump.quantifiedName,function(newEntry){newEntry.importanceRange=new tr.b.Range();if(opt_withSharerNameToEntry)
+newEntry.sharerNameToEntry=new Map();});entry.importanceRange.addValue(link.importance||0);return entry;}};function SizeColumn(name,cellPath,aggregationMode){tr.ui.analysis.NumericMemoryColumn.call(this,name,cellPath,aggregationMode);}
+SizeColumn.prototype={__proto__:tr.ui.analysis.NumericMemoryColumn.prototype,addInfos:function(numerics,memoryAllocatorDumps,infos){if(memoryAllocatorDumps===undefined)
+return;this.addOverlapInfo_(numerics,memoryAllocatorDumps,infos);this.addProvidedSizeWarningInfos_(numerics,memoryAllocatorDumps,infos);},addOverlapInfo_:function(numerics,memoryAllocatorDumps,infos){var siblingNameToEntry=new Map();for(var i=0;i<numerics.length;i++){if(numerics[i]===undefined)
+continue;var dump=memoryAllocatorDumps[i];if(dump===SUBALLOCATION_CONTEXT)
+return;var ownedBySiblingSizes=dump.ownedBySiblingSizes;for(var siblingDump of ownedBySiblingSizes.keys()){var siblingName=siblingDump.name;getAndUpdateEntry(siblingNameToEntry,siblingName,function(newEntry){if(numerics.length===1)
+newEntry.size=ownedBySiblingSizes.get(siblingDump);});}}
+if(siblingNameToEntry.size>0){var messageBuilder=new SizeInfoMessageBuilder();messageBuilder.append('overlaps with its sibling');messageBuilder.appendMap(siblingNameToEntry,true,undefined,function(siblingName,siblingEntry){messageBuilder.append('\'',siblingName,'\'');messageBuilder.appendSizeIfDefined(siblingEntry.size);if(siblingEntry.count<numerics.length)
+messageBuilder.appendSomeTimestampsQuantifier();},this);infos.push({message:messageBuilder.build(),icon:CIRCLED_LATIN_SMALL_LETTER_I,color:'blue'});}},addProvidedSizeWarningInfos_:function(numerics,memoryAllocatorDumps,infos){var infoTypeToEntry=new Map();for(var i=0;i<numerics.length;i++){if(numerics[i]===undefined)
+continue;var dump=memoryAllocatorDumps[i];if(dump===SUBALLOCATION_CONTEXT)
+return;dump.infos.forEach(function(dumpInfo){getAndUpdateEntry(infoTypeToEntry,dumpInfo.type,function(newEntry){if(numerics.length===1){newEntry.providedSize=dumpInfo.providedSize;newEntry.dependencySize=dumpInfo.dependencySize;}});});}
+for(var infoType of infoTypeToEntry.keys()){var entry=infoTypeToEntry.get(infoType);var messageBuilder=new SizeInfoMessageBuilder();messageBuilder.append('provided size');messageBuilder.appendSizeIfDefined(entry.providedSize);var dependencyName;switch(infoType){case PROVIDED_SIZE_LESS_THAN_AGGREGATED_CHILDREN:dependencyName='the aggregated size of the children';break;case PROVIDED_SIZE_LESS_THAN_LARGEST_OWNER:dependencyName='the size of the largest owner';break;default:dependencyName='an unknown dependency';break;}
+messageBuilder.append(' was less than ',dependencyName);messageBuilder.appendSizeIfDefined(entry.dependencySize);if(entry.count<numerics.length)
+messageBuilder.appendSomeTimestampsQuantifier();infos.push(tr.ui.analysis.createWarningInfo(messageBuilder.build()));}}};var NUMERIC_COLUMN_RULES=[{condition:tr.model.MemoryAllocatorDump.EFFECTIVE_SIZE_NUMERIC_NAME,importance:10,columnConstructor:EffectiveSizeColumn},{condition:tr.model.MemoryAllocatorDump.SIZE_NUMERIC_NAME,importance:9,columnConstructor:SizeColumn},{condition:'page_size',importance:0,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:/size/,importance:5,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{importance:0,columnConstructor:tr.ui.analysis.NumericMemoryColumn}];var DIAGNOSTIC_COLUMN_RULES=[{importance:0,columnConstructor:tr.ui.analysis.StringMemoryColumn}];Polymer('tr-ui-a-memory-dump-allocator-details-pane',{created:function(){this.memoryAllocatorDumps_=undefined;this.heapDumps_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;},set memoryAllocatorDumps(memoryAllocatorDumps){this.memoryAllocatorDumps_=memoryAllocatorDumps;this.scheduleRebuildPane_();},get memoryAllocatorDumps(){return this.memoryAllocatorDumps_;},set heapDumps(heapDumps){this.heapDumps_=heapDumps;this.scheduleRebuildPane_();},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){if(this.memoryAllocatorDumps_===undefined||this.memoryAllocatorDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();this.childPaneBuilder=undefined;return;}
+this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.createRows_();var columns=this.createColumns_(rows);rows.forEach(function(rootRow){tr.ui.analysis.aggregateTableRowCellsRecursively(rootRow,columns,function(contexts){return contexts!==undefined&&contexts.some(function(context){return context===SUBALLOCATION_CONTEXT;});});});this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);if(this.heapDumps_===undefined){this.childPaneBuilder=undefined;}else{this.childPaneBuilder=function(){var pane=document.createElement('tr-ui-a-memory-dump-heap-details-pane');pane.heapDumps=this.heapDumps_;pane.aggregationMode=this.aggregationMode_;return pane;}.bind(this);}},createRows_:function(){return[this.createAllocatorRowRecursively_(this.memoryAllocatorDumps_)];},createAllocatorRowRecursively_:function(dumps){var definedDump=tr.b.findFirstInArray(dumps);var title=definedDump.name;var fullName=definedDump.fullName;var numericCells=tr.ui.analysis.createCells(dumps,function(dump){return dump.numerics;});var diagnosticCells=tr.ui.analysis.createCells(dumps,function(dump){return dump.diagnostics;});var suballocatedBy=undefined;if(title.startsWith('__')){for(var i=0;i<dumps.length;i++){var dump=dumps[i];if(dump===undefined||dump.ownedBy.length===0){continue;}
+var ownerDump=dump.ownedBy[0].source;if(dump.ownedBy.length>1||dump.children.length>0||ownerDump.containerMemoryDump!==dump.containerMemoryDump){suballocatedBy=undefined;break;}
+if(suballocatedBy===undefined){suballocatedBy=ownerDump.fullName;}else if(suballocatedBy!==ownerDump.fullName){suballocatedBy=undefined;break;}}}
+var row={title:title,fullNames:[fullName],contexts:dumps,numericCells:numericCells,diagnosticCells:diagnosticCells,suballocatedBy:suballocatedBy};var childDumpNameToDumps=tr.b.invertArrayOfDicts(dumps,function(dump){return tr.b.arrayToDict(dump.children,function(child){return child.name;});});var subRows=[];var suballocationClassificationRootNode=undefined;tr.b.iterItems(childDumpNameToDumps,function(childName,childDumps){var childRow=this.createAllocatorRowRecursively_(childDumps);if(childRow.suballocatedBy===undefined){subRows.push(childRow);}else{suballocationClassificationRootNode=this.classifySuballocationRow_(childRow,suballocationClassificationRootNode);}},this);if(suballocationClassificationRootNode!==undefined){var suballocationRow=this.createSuballocationRowRecursively_('suballocations',suballocationClassificationRootNode);subRows.push(suballocationRow);}
+if(subRows.length>0)
+row.subRows=subRows;return row;},classifySuballocationRow_:function(suballocationRow,rootNode){if(rootNode===undefined){rootNode={children:{},row:undefined};}
+var suballocationLevels=suballocationRow.suballocatedBy.split('/');var currentNode=rootNode;for(var i=0;i<suballocationLevels.length;i++){var suballocationLevel=suballocationLevels[i];var nextNode=currentNode.children[suballocationLevel];if(nextNode===undefined){currentNode.children[suballocationLevel]=nextNode={children:{},row:undefined};}
+var currentNode=nextNode;}
+var existingRow=currentNode.row;if(existingRow!==undefined){for(var i=0;i<suballocationRow.contexts.length;i++){var newContext=suballocationRow.contexts[i];if(newContext===undefined)
+continue;if(existingRow.contexts[i]!==undefined)
+throw new Error('Multiple suballocations with the same owner name');existingRow.contexts[i]=newContext;['numericCells','diagnosticCells'].forEach(function(cellKey){var suballocationCells=suballocationRow[cellKey];if(suballocationCells===undefined)
+return;tr.b.iterItems(suballocationCells,function(cellName,cell){if(cell===undefined)
+return;var fields=cell.fields;if(fields===undefined)
+return;var field=fields[i];if(field===undefined)
+return;var existingCells=existingRow[cellKey];if(existingCells===undefined){existingCells={};existingRow[cellKey]=existingCells;}
+var existingCell=existingCells[cellName];if(existingCell===undefined){existingCell=new tr.ui.analysis.MemoryCell(new Array(fields.length));existingCells[cellName]=existingCell;}
+existingCell.fields[i]=field;});});}
+existingRow.fullNames.push.apply(existingRow.fullNames,suballocationRow.fullNames);}else{currentNode.row=suballocationRow;}
+return rootNode;},createSuballocationRowRecursively_:function(name,node){var childCount=Object.keys(node.children).length;if(childCount===0){if(node.row===undefined)
+throw new Error('Suballocation node must have a row or children');var row=node.row;row.title=name;row.suballocation=true;return row;}
+var subRows=tr.b.dictionaryValues(tr.b.mapItems(node.children,this.createSuballocationRowRecursively_,this));if(node.row!==undefined){var row=node.row;row.title='<unspecified>';row.suballocation=true;subRows.unshift(row);}
+var contexts=new Array(subRows[0].contexts.length);for(var i=0;i<subRows.length;i++){subRows[i].contexts.forEach(function(subContext,index){if(subContext!==undefined)
+contexts[index]=SUBALLOCATION_CONTEXT;});}
+return{title:name,suballocation:true,contexts:contexts,subRows:subRows};},createColumns_:function(rows){var titleColumn=new AllocatorDumpNameColumn();titleColumn.width='200px';var numericColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'numericCells',this.aggregationMode_,NUMERIC_COLUMN_RULES);var diagnosticColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'diagnosticCells',this.aggregationMode_,DIAGNOSTIC_COLUMN_RULES);var fieldColumns=numericColumns.concat(diagnosticColumns);tr.ui.analysis.MemoryColumn.spaceEqually(fieldColumns);var columns=[titleColumn].concat(fieldColumns);return columns;}});return{SUBALLOCATION_CONTEXT:SUBALLOCATION_CONTEXT,AllocatorDumpNameColumn:AllocatorDumpNameColumn,EffectiveSizeColumn:EffectiveSizeColumn,SizeColumn:SizeColumn};});'use strict';tr.exportTo('tr.ui.analysis',function(){var ScalarNumeric=tr.v.ScalarNumeric;var sizeInBytes_smallerIsBetter=tr.v.Unit.byName.sizeInBytes_smallerIsBetter;var CONSTANT_COLUMN_RULES=[{condition:'Start address',importance:0,columnConstructor:tr.ui.analysis.StringMemoryColumn}];var VARIABLE_COLUMN_RULES=[{condition:'Virtual size',importance:7,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Protection flags',importance:6,columnConstructor:tr.ui.analysis.StringMemoryColumn},{condition:'PSS',importance:5,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Private dirty',importance:4,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Private clean',importance:3,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Shared dirty',importance:2,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Shared clean',importance:1,columnConstructor:tr.ui.analysis.NumericMemoryColumn},{condition:'Swapped',importance:0,columnConstructor:tr.ui.analysis.NumericMemoryColumn}];var BYTE_STAT_COLUMN_MAP={'proportionalResident':'PSS','privateDirtyResident':'Private dirty','privateCleanResident':'Private clean','sharedDirtyResident':'Shared dirty','sharedCleanResident':'Shared clean','swapped':'Swapped'};function hexString(address,is64BitAddress){if(address===undefined)
+return undefined;var hexPadding=is64BitAddress?'0000000000000000':'00000000';return(hexPadding+address.toString(16)).substr(-hexPadding.length);}
+function pruneEmptyRuleRows(row){if(row.subRows===undefined||row.subRows.length===0)
+return;if(row.subRows[0].rule===undefined){return;}
+row.subRows.forEach(pruneEmptyRuleRows);row.subRows=row.subRows.filter(function(subRow){return subRow.subRows.length>0;});}
+Polymer('tr-ui-a-memory-dump-vm-regions-details-pane',{created:function(){this.vmRegions_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;},set vmRegions(vmRegions){this.vmRegions_=vmRegions;this.scheduleRebuildPane_();},get vmRegions(){return this.vmRegions_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){if(this.vmRegions_===undefined||this.vmRegions_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();return;}
+this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.createRows_(this.vmRegions_);var columns=this.createColumns_(rows);this.$.table.tableRows=rows;this.$.table.tableColumns=columns;this.$.table.rebuild();tr.ui.analysis.expandTableRowsRecursively(this.$.table);},createRows_:function(timeToVmRegionTree){var is64BitAddress=timeToVmRegionTree.some(function(vmRegionTree){if(vmRegionTree===undefined)
+return false;return vmRegionTree.someRegion(function(region){if(region.startAddress===undefined)
+return false;return region.startAddress>=4294967296;});});return[this.createClassificationNodeRow(timeToVmRegionTree,is64BitAddress)];},createClassificationNodeRow:function(timeToNode,is64BitAddress){var definedNode=tr.b.findFirstInArray(timeToNode);var childNodeIdToTimeToNode=tr.b.dictionaryValues(tr.b.invertArrayOfDicts(timeToNode,function(node){var children=node.children;if(children===undefined)
+return undefined;var childMap={};children.forEach(function(childNode){if(!childNode.hasRegions)
+return;childMap[childNode.title]=childNode;});return childMap;}));var childNodeSubRows=childNodeIdToTimeToNode.map(function(timeToChildNode){return this.createClassificationNodeRow(timeToChildNode,is64BitAddress);},this);var regionIdToTimeToRegion=tr.b.dictionaryValues(tr.b.invertArrayOfDicts(timeToNode,function(node){var regions=node.regions;if(regions===undefined)
+return undefined;return tr.b.arrayToDict(regions,function(region){return region.uniqueIdWithinProcess;});}));var regionSubRows=regionIdToTimeToRegion.map(function(timeToRegion){return this.createRegionRow_(timeToRegion,is64BitAddress);},this);var subRows=childNodeSubRows.concat(regionSubRows);return{title:definedNode.title,contexts:timeToNode,variableCells:this.createVariableCells_(timeToNode),subRows:subRows};},createRegionRow_:function(timeToRegion,is64BitAddress){var definedRegion=tr.b.findFirstInArray(timeToRegion);return{title:definedRegion.mappedFile,contexts:timeToRegion,constantCells:this.createConstantCells_(definedRegion,is64BitAddress),variableCells:this.createVariableCells_(timeToRegion)};},createConstantCells_:function(definedRegion,is64BitAddress){return tr.ui.analysis.createCells([definedRegion],function(region){var startAddress=region.startAddress;if(startAddress===undefined)
+return undefined;return{'Start address':hexString(startAddress,is64BitAddress)};});},createVariableCells_:function(timeToRegion){return tr.ui.analysis.createCells(timeToRegion,function(region){var fields={};var sizeInBytes=region.sizeInBytes;if(sizeInBytes!==undefined){fields['Virtual size']=new ScalarNumeric(sizeInBytes_smallerIsBetter,sizeInBytes);}
+var protectionFlags=region.protectionFlagsToString;if(protectionFlags!==undefined)
+fields['Protection flags']=protectionFlags;tr.b.iterItems(BYTE_STAT_COLUMN_MAP,function(byteStatName,columnName){var byteStat=region.byteStats[byteStatName];if(byteStat===undefined)
+return;fields[columnName]=new ScalarNumeric(sizeInBytes_smallerIsBetter,byteStat);});return fields;});},createColumns_:function(rows){var titleColumn=new tr.ui.analysis.TitleColumn('Mapped file');titleColumn.width='200px';var constantColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'constantCells',undefined,CONSTANT_COLUMN_RULES);var variableColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'variableCells',this.aggregationMode_,VARIABLE_COLUMN_RULES);var fieldColumns=constantColumns.concat(variableColumns);tr.ui.analysis.MemoryColumn.spaceEqually(fieldColumns);var columns=[titleColumn].concat(fieldColumns);return columns;}});return{};});'use strict';Polymer('tr-ui-b-color-legend',{ready:function(){var blackSquareCharCode=9632;this.$.square.innerText=String.fromCharCode(blackSquareCharCode);this.label_=undefined;this.compoundEventSelectionState_=tr.model.CompoundEventSelectionState.NOT_SELECTED;},set compoundEventSelectionState(compoundEventSelectionState){this.compoundEventSelectionState_=compoundEventSelectionState;},get label(){return this.label_;},set label(label){if(label===undefined){this.setLabelAndColorId(undefined,undefined);return;}
+var colorId=tr.b.ColorScheme.getColorIdForGeneralPurposeString(label);this.setLabelAndColorId(label,colorId);},setLabelAndColorId:function(label,colorId){this.label_=label;this.$.label.textContent='';this.$.label.appendChild(tr.ui.b.asHTMLOrTextNode(label));if(colorId===undefined)
+this.$.square.style.color='initial';else
+this.$.square.style.color=tr.b.ColorScheme.colorsAsStrings[colorId];}});'use strict';Polymer('tr-ui-b-view-specific-brushing-state',{get viewId(){return this.getAttribute('view-id');},set viewId(viewId){this.setAttribute('view-id',viewId);},get:function(){var viewId=this.viewId;if(!viewId)
+throw new Error('Element must have a view-id attribute!');var brushingStateController=tr.c.BrushingStateController.getControllerForElement(this);if(!brushingStateController)
+return undefined;return brushingStateController.getViewSpecificBrushingState(viewId);},set:function(state){var viewId=this.viewId;if(!viewId)
+throw new Error('Element must have a view-id attribute!');var brushingStateController=tr.c.BrushingStateController.getControllerForElement(this);if(!brushingStateController)
+return;brushingStateController.changeViewSpecificBrushingState(viewId,state);}});'use strict';tr.exportTo('tr.ui.analysis',function(){var ColorScheme=tr.b.ColorScheme;var ScalarNumeric=tr.v.ScalarNumeric;var sizeInBytes_smallerIsBetter=tr.v.Unit.byName.sizeInBytes_smallerIsBetter;var PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX='_bytes';var DISPLAYED_SIZE_NUMERIC_NAME=tr.model.MemoryAllocatorDump.DISPLAYED_SIZE_NUMERIC_NAME;var SOME_TIMESTAMPS_INFO_QUANTIFIER=tr.ui.analysis.MemoryColumn.SOME_TIMESTAMPS_INFO_QUANTIFIER;var RIGHTWARDS_ARROW_WITH_HOOK=String.fromCharCode(0x21AA);var RIGHTWARDS_ARROW_FROM_BAR=String.fromCharCode(0x21A6);var GREATER_THAN_OR_EQUAL_TO=String.fromCharCode(0x2265);var UNMARRIED_PARTNERSHIP_SYMBOL=String.fromCharCode(0x26AF);var TRIGRAM_FOR_HEAVEN=String.fromCharCode(0x2630);function lazyMap(list,fn,opt_this){opt_this=opt_this||this;var result=undefined;list.forEach(function(item,index){var value=fn.call(opt_this,item,index);if(value===undefined)
+return;if(result===undefined)
+result=new Array(list.length);result[index]=value;});return result;}
+function ProcessNameColumn(){tr.ui.analysis.TitleColumn.call(this,'Process');}
+ProcessNameColumn.prototype={__proto__:tr.ui.analysis.TitleColumn.prototype,formatTitle:function(row){if(row.contexts===undefined)
+return row.title;var titleEl=document.createElement('tr-ui-b-color-legend');titleEl.label=row.title;return titleEl;}};function UsedMemoryColumn(name,cellPath,aggregationMode){tr.ui.analysis.NumericMemoryColumn.call(this,name,cellPath,aggregationMode);}
+UsedMemoryColumn.COLOR=ColorScheme.getColorForReservedNameAsString('used_memory_column');UsedMemoryColumn.OLDER_COLOR=ColorScheme.getColorForReservedNameAsString('older_used_memory_column');UsedMemoryColumn.prototype={__proto__:tr.ui.analysis.NumericMemoryColumn.prototype,get title(){return tr.ui.b.createSpan({textContent:this.name,color:UsedMemoryColumn.COLOR});},color:function(numerics,processMemoryDumps){return UsedMemoryColumn.COLOR;},getChildPaneBuilder:function(processMemoryDumps){if(processMemoryDumps===undefined)
+return undefined;var vmRegions=lazyMap(processMemoryDumps,function(pmd){if(pmd===undefined)
+return undefined;return pmd.mostRecentVmRegions;});if(vmRegions===undefined)
+return undefined;return function(){var pane=document.createElement('tr-ui-a-memory-dump-vm-regions-details-pane');pane.vmRegions=vmRegions;pane.aggregationMode=this.aggregationMode;return pane;}.bind(this);}};function PeakMemoryColumn(name,cellPath,aggregationMode){UsedMemoryColumn.call(this,name,cellPath,aggregationMode);}
+PeakMemoryColumn.prototype={__proto__:UsedMemoryColumn.prototype,addInfos:function(numerics,processMemoryDumps,infos){if(processMemoryDumps===undefined)
+return;var resettableValueCount=0;var nonResettableValueCount=0;for(var i=0;i<numerics.length;i++){if(numerics[i]===undefined)
+continue;if(processMemoryDumps[i].arePeakResidentBytesResettable)
+resettableValueCount++;else
+nonResettableValueCount++;}
+if(resettableValueCount>0&&nonResettableValueCount>0){infos.push(tr.ui.analysis.createWarningInfo('Both resettable and '+'non-resettable peak RSS values were provided by the process'));}else if(resettableValueCount>0){infos.push({icon:RIGHTWARDS_ARROW_WITH_HOOK,message:'Peak RSS since previous memory dump.'});}else{infos.push({icon:RIGHTWARDS_ARROW_FROM_BAR,message:'Peak RSS since process startup. Finer grained '+'peaks require a Linux kernel version '+
+GREATER_THAN_OR_EQUAL_TO+' 4.0.'});}}};function ByteStatColumn(name,cellPath,aggregationMode){UsedMemoryColumn.call(this,name,cellPath,aggregationMode);}
+ByteStatColumn.prototype={__proto__:UsedMemoryColumn.prototype,color:function(numerics,processMemoryDumps){if(processMemoryDumps===undefined)
+return UsedMemoryColumn.COLOR;var allOlderValues=processMemoryDumps.every(function(processMemoryDump){if(processMemoryDump===undefined)
+return true;return!processMemoryDump.hasOwnVmRegions;});if(allOlderValues)
+return UsedMemoryColumn.OLDER_COLOR;else
+return UsedMemoryColumn.COLOR;},addInfos:function(numerics,processMemoryDumps,infos){if(processMemoryDumps===undefined)
+return;var olderValueCount=0;for(var i=0;i<numerics.length;i++){var processMemoryDump=processMemoryDumps[i];if(processMemoryDump!==undefined&&!processMemoryDump.hasOwnVmRegions){olderValueCount++;}}
+if(olderValueCount===0)
+return;var infoQuantifier=olderValueCount<numerics.length?' '+SOME_TIMESTAMPS_INFO_QUANTIFIER:'';infos.push({message:'Older value'+infoQuantifier+' (only heavy (purple) memory dumps contain memory maps).',icon:UNMARRIED_PARTNERSHIP_SYMBOL});}};UsedMemoryColumn.RULES=[{condition:'Total resident',importance:10,columnConstructor:UsedMemoryColumn},{condition:'Peak total resident',importance:9,columnConstructor:PeakMemoryColumn},{condition:'PSS',importance:8,columnConstructor:ByteStatColumn},{condition:'Private dirty',importance:7,columnConstructor:ByteStatColumn},{condition:'Swapped',importance:6,columnConstructor:ByteStatColumn},{importance:0,columnConstructor:UsedMemoryColumn}];UsedMemoryColumn.TOTALS_MAP={'residentBytes':'Total resident','peakResidentBytes':'Peak total resident'};UsedMemoryColumn.BYTE_STAT_MAP={'proportionalResident':'PSS','privateDirtyResident':'Private dirty','swapped':'Swapped'};function AllocatorColumn(name,cellPath,aggregationMode){tr.ui.analysis.NumericMemoryColumn.call(this,name,cellPath,aggregationMode);}
+AllocatorColumn.prototype={__proto__:tr.ui.analysis.NumericMemoryColumn.prototype,get title(){var titleEl=document.createElement('tr-ui-b-color-legend');titleEl.label=this.name;return titleEl;},addInfos:function(numerics,processMemoryDumps,infos){if(processMemoryDumps===undefined)
+return;var heapDumpCount=0;for(var i=0;i<processMemoryDumps.length;i++){var processMemoryDump=processMemoryDumps[i];if(processMemoryDump===undefined)
+continue;var heapDumps=processMemoryDump.heapDumps;if(heapDumps===undefined)
+continue;if(heapDumps[this.name]!==undefined)
+heapDumpCount++;}
+if(heapDumpCount===0)
+return;var infoQuantifier=heapDumpCount<numerics.length?' '+SOME_TIMESTAMPS_INFO_QUANTIFIER:'';infos.push({message:'Heap dump provided'+infoQuantifier+'.',icon:TRIGRAM_FOR_HEAVEN});},getChildPaneBuilder:function(processMemoryDumps){if(processMemoryDumps===undefined)
+return undefined;var memoryAllocatorDumps=lazyMap(processMemoryDumps,function(pmd){if(pmd===undefined)
+return undefined;return pmd.getMemoryAllocatorDumpByFullName(this.name);},this);if(memoryAllocatorDumps===undefined)
+return undefined;var heapDumps=lazyMap(processMemoryDumps,function(pmd){if(pmd===undefined||pmd.heapDumps===undefined)
+return undefined;return pmd.heapDumps[this.name];},this);return function(){var pane=document.createElement('tr-ui-a-memory-dump-allocator-details-pane');pane.memoryAllocatorDumps=memoryAllocatorDumps;pane.heapDumps=heapDumps;pane.aggregationMode=this.aggregationMode;return pane;}.bind(this);}};function TracingColumn(name,cellPath,aggregationMode){AllocatorColumn.call(this,name,cellPath,aggregationMode);}
+TracingColumn.COLOR=ColorScheme.getColorForReservedNameAsString('tracing_memory_column');TracingColumn.prototype={__proto__:AllocatorColumn.prototype,get title(){return tr.ui.b.createSpan({textContent:this.name,color:TracingColumn.COLOR});},color:function(numerics,processMemoryDumps){return TracingColumn.COLOR;}};AllocatorColumn.RULES=[{condition:'tracing',importance:0,columnConstructor:TracingColumn},{importance:1,columnConstructor:AllocatorColumn}];Polymer('tr-ui-a-memory-dump-overview-pane',{created:function(){this.processMemoryDumps_=undefined;this.aggregationMode_=undefined;},ready:function(){this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.CELL;this.$.table.addEventListener('selection-changed',function(tableEvent){tableEvent.stopPropagation();this.changeChildPane_();}.bind(this));},set processMemoryDumps(processMemoryDumps){this.processMemoryDumps_=processMemoryDumps;this.scheduleRebuildPane_();},get processMemoryDumps(){return this.processMemoryDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},get selectedMemoryCell(){if(this.processMemoryDumps_===undefined||this.processMemoryDumps_.length===0){return undefined;}
+var selectedTableRow=this.$.table.selectedTableRow;if(!selectedTableRow)
+return undefined;var selectedColumnIndex=this.$.table.selectedColumnIndex;if(selectedColumnIndex===undefined)
+return undefined;var selectedColumn=this.$.table.tableColumns[selectedColumnIndex];var selectedMemoryCell=selectedColumn.cell(selectedTableRow);return selectedMemoryCell;},changeChildPane_:function(){this.storeSelection_();this.childPaneBuilder=this.determineChildPaneBuilderFromSelection_();},determineChildPaneBuilderFromSelection_:function(){if(this.processMemoryDumps_===undefined||this.processMemoryDumps_.length===0){return undefined;}
+var selectedTableRow=this.$.table.selectedTableRow;if(!selectedTableRow)
+return undefined;var selectedColumnIndex=this.$.table.selectedColumnIndex;if(selectedColumnIndex===undefined)
+return undefined;var selectedColumn=this.$.table.tableColumns[selectedColumnIndex];return selectedColumn.getChildPaneBuilder(selectedTableRow.contexts);},rebuildPane_:function(){if(this.processMemoryDumps_===undefined||this.processMemoryDumps_.length===0){this.$.info_text.style.display='block';this.$.table.style.display='none';this.$.table.clear();this.$.table.rebuild();return;}
+this.$.info_text.style.display='none';this.$.table.style.display='block';var rows=this.createRows_();var columns=this.createColumns_(rows);var footerRows=this.createFooterRows_(rows,columns);this.$.table.tableRows=rows;this.$.table.footerRows=footerRows;this.$.table.tableColumns=columns;this.$.table.rebuild();this.restoreSelection_();},createRows_:function(){var timeToPidToProcessMemoryDump=this.processMemoryDumps_;var pidToTimeToProcessMemoryDump=tr.b.invertArrayOfDicts(timeToPidToProcessMemoryDump);return tr.b.dictionaryValues(tr.b.mapItems(pidToTimeToProcessMemoryDump,function(pid,timeToDump){var process=tr.b.findFirstInArray(timeToDump).process;var usedMemoryCells=tr.ui.analysis.createCells(timeToDump,function(dump){var sizes={};var totals=dump.totals;if(totals!==undefined){tr.b.iterItems(UsedMemoryColumn.TOTALS_MAP,function(totalName,cellName){var total=totals[totalName];if(total===undefined)
+return;sizes[cellName]=new ScalarNumeric(sizeInBytes_smallerIsBetter,total);});var platformSpecific=totals.platformSpecific;if(platformSpecific!==undefined){tr.b.iterItems(platformSpecific,function(name,size){if(name.endsWith(PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX)){name=name.substring(0,name.length-
+PLATFORM_SPECIFIC_TOTAL_NAME_SUFFIX.length);}
+name=name.replace('_',' ').trim();name=name.charAt(0).toUpperCase()+name.slice(1);sizes[name]=new ScalarNumeric(sizeInBytes_smallerIsBetter,size);});}}
+var vmRegions=dump.mostRecentVmRegions;if(vmRegions!==undefined){tr.b.iterItems(UsedMemoryColumn.BYTE_STAT_MAP,function(byteStatName,cellName){var byteStat=vmRegions.byteStats[byteStatName];if(byteStat===undefined)
+return;sizes[cellName]=new ScalarNumeric(sizeInBytes_smallerIsBetter,byteStat);});}
+return sizes;});var allocatorCells=tr.ui.analysis.createCells(timeToDump,function(dump){var memoryAllocatorDumps=dump.memoryAllocatorDumps;if(memoryAllocatorDumps===undefined)
+return undefined;var sizes={};memoryAllocatorDumps.forEach(function(allocatorDump){var rootDisplayedSizeNumeric=allocatorDump.numerics[DISPLAYED_SIZE_NUMERIC_NAME];if(rootDisplayedSizeNumeric!==undefined)
+sizes[allocatorDump.fullName]=rootDisplayedSizeNumeric;});return sizes;});return{title:process.userFriendlyName,contexts:timeToDump,usedMemoryCells:usedMemoryCells,allocatorCells:allocatorCells};}));},createFooterRows_:function(rows,columns){if(rows.length<=1)
+return[];var totalRow={title:'Total'};tr.ui.analysis.aggregateTableRowCells(totalRow,rows,columns);return[totalRow];},createColumns_:function(rows){var titleColumn=new ProcessNameColumn();titleColumn.width='200px';var usedMemorySizeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'usedMemoryCells',this.aggregationMode_,UsedMemoryColumn.RULES);var allocatorSizeColumns=tr.ui.analysis.MemoryColumn.fromRows(rows,'allocatorCells',this.aggregationMode_,AllocatorColumn.RULES);var sizeColumns=usedMemorySizeColumns.concat(allocatorSizeColumns);tr.ui.analysis.MemoryColumn.spaceEqually(sizeColumns);var columns=[titleColumn].concat(sizeColumns);return columns;},storeSelection_:function(){var selectedRowTitle;var selectedRow=this.$.table.selectedTableRow;if(selectedRow!==undefined)
+selectedRowTitle=selectedRow.title;var selectedColumnName;var selectedColumnIndex=this.$.table.selectedColumnIndex;if(selectedColumnIndex!==undefined){var selectedColumn=this.$.table.tableColumns[selectedColumnIndex];selectedColumnName=selectedColumn.name;}
+this.$.state.set({rowTitle:selectedRowTitle,columnName:selectedColumnName});},restoreSelection_:function(){var settings=this.$.state.get();if(settings===undefined||settings.rowTitle===undefined||settings.columnName===undefined)
+return;var selectedColumnName=settings.columnName;var selectedColumnIndex=tr.b.findFirstIndexInArray(this.$.table.tableColumns,function(column){return column.name===selectedColumnName;});if(selectedColumnIndex<0)
+return;var selectedRowTitle=settings.rowTitle;var selectedRow=tr.b.findFirstInArray(this.$.table.tableRows,function(row){return row.title===selectedRowTitle;});if(selectedRow===undefined)
+return;this.$.table.selectedTableRow=selectedRow;this.$.table.selectedColumnIndex=selectedColumnIndex;}});return{ProcessNameColumn:ProcessNameColumn,UsedMemoryColumn:UsedMemoryColumn,PeakMemoryColumn:PeakMemoryColumn,ByteStatColumn:ByteStatColumn,AllocatorColumn:AllocatorColumn,TracingColumn:TracingColumn};});'use strict';tr.exportTo('tr.ui.analysis',function(){Polymer('tr-ui-a-memory-dump-header-pane',{created:function(){this.containerMemoryDumps_=undefined;},ready:function(){this.$.aggregation_mode_container.appendChild(tr.ui.b.createSelector(this,'aggregationMode','memoryDumpHeaderPane.aggregationMode',tr.ui.analysis.MemoryColumn.AggregationMode.DIFF,[{label:'Diff',value:tr.ui.analysis.MemoryColumn.AggregationMode.DIFF},{label:'Max',value:tr.ui.analysis.MemoryColumn.AggregationMode.MAX}]));},set containerMemoryDumps(containerMemoryDumps){this.containerMemoryDumps_=containerMemoryDumps;this.scheduleRebuildPane_();},get containerMemoryDumps(){return this.containerMemoryDumps_;},set aggregationMode(aggregationMode){this.aggregationMode_=aggregationMode;this.scheduleRebuildPane_();},get aggregationMode(){return this.aggregationMode_;},rebuildPane_:function(){this.updateLabel_();this.updateAggregationModeSelector_();this.changeChildPane_();},updateLabel_:function(){this.$.label.textContent='';if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=0){this.$.label.textContent='No memory dumps selected';return;}
+var containerDumpCount=this.containerMemoryDumps_.length;var isMultiSelection=containerDumpCount>1;this.$.label.appendChild(document.createTextNode('Selected '+containerDumpCount+' memory dump'+
+(isMultiSelection?'s':'')+' in '+this.containerMemoryDumps_[0].containerName+' at '));this.$.label.appendChild(document.createTextNode(tr.v.Unit.byName.timeStampInMs.format(this.containerMemoryDumps_[0].start)));if(isMultiSelection){var ELLIPSIS=String.fromCharCode(8230);this.$.label.appendChild(document.createTextNode(ELLIPSIS));this.$.label.appendChild(document.createTextNode(tr.v.Unit.byName.timeStampInMs.format(this.containerMemoryDumps_[containerDumpCount-1].start)));}},updateAggregationModeSelector_:function(){var displayStyle;if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=1)
+displayStyle='none';else
+displayStyle='initial';this.$.aggregation_mode_container.style.display=displayStyle;},changeChildPane_:function(){this.childPaneBuilder=function(){if(this.containerMemoryDumps_===undefined||this.containerMemoryDumps_.length<=0)
+return undefined;var overviewPane=document.createElement('tr-ui-a-memory-dump-overview-pane');overviewPane.processMemoryDumps=this.containerMemoryDumps_.map(function(containerDump){return containerDump.processMemoryDumps;});overviewPane.aggregationMode=this.aggregationMode;return overviewPane;}.bind(this);}});return{};});'use strict';Polymer('tr-ui-a-stacked-pane-view',{setPaneBuilder:function(paneBuilder,opt_parentPane){var paneContainer=this.$.pane_container;if(opt_parentPane){if(!(opt_parentPane instanceof HTMLElement))
+throw new Error('Parent pane must be an HTML element');if(opt_parentPane.parentElement!==paneContainer)
+throw new Error('Parent pane must be a child of the pane container');}
+while(paneContainer.lastElementChild!==null&&paneContainer.lastElementChild!==opt_parentPane){var removedPane=this.$.pane_container.lastElementChild;var listener=this.listeners_.get(removedPane);if(listener===undefined)
+throw new Error('No listener associated with pane');this.listeners_.delete(removedPane);removedPane.removeEventListener('request-child-pane-change',listener);paneContainer.removeChild(removedPane);}
+if(opt_parentPane&&opt_parentPane.parentElement!==paneContainer)
+throw new Error('Parent pane was removed from the pane container');if(!paneBuilder)
+return;var pane=paneBuilder();if(!pane)
+return;if(!(pane instanceof HTMLElement))
+throw new Error('Pane must be an HTML element');var listener=function(event){this.setPaneBuilder(pane.childPaneBuilder,pane);}.bind(this);if(!this.listeners_){this.listeners_=new WeakMap();}
+this.listeners_.set(pane,listener);pane.addEventListener('request-child-pane-change',listener);paneContainer.appendChild(pane);pane.appended();},rebuild:function(){var currentPane=this.$.pane_container.firstElementChild;while(currentPane){currentPane.rebuild();currentPane=currentPane.nextElementSibling;}},get panesForTesting(){var panes=[];var currentChild=this.$.pane_container.firstElementChild;while(currentChild){panes.push(currentChild);currentChild=currentChild.nextElementSibling;}
+return panes;}});'use strict';tr.exportTo('tr.ui.analysis',function(){Polymer('tr-ui-a-container-memory-dump-sub-view',{set selection(selection){if(selection===undefined){this.currentSelection_=undefined;this.dumpsByContainerName_=undefined;this.updateContents_();return;}
+selection.forEach(function(event){if(!(event instanceof tr.model.ContainerMemoryDump)){throw new Error('Memory dump sub-view only supports container memory dumps');}});this.currentSelection_=selection;this.dumpsByContainerName_=tr.b.group(this.currentSelection_.toArray(),function(dump){return dump.containerName;});tr.b.iterItems(this.dumpsByContainerName_,function(containerName,dumps){dumps.sort(function(a,b){return a.start-b.start;});});this.updateContents_();},get selection(){return this.currentSelection_;},get requiresTallView(){return true;},updateContents_:function(){this.$.content.textContent='';if(this.dumpsByContainerName_===undefined)
+return;var containerNames=Object.keys(this.dumpsByContainerName_);if(containerNames.length===0)
+return;if(containerNames.length>1)
+this.buildViewForMultipleContainerNames_();else
+this.buildViewForSingleContainerName_();},buildViewForSingleContainerName_:function(){var containerMemoryDumps=tr.b.dictionaryValues(this.dumpsByContainerName_)[0];var dumpView=this.ownerDocument.createElement('tr-ui-a-stacked-pane-view');this.$.content.appendChild(dumpView);dumpView.setPaneBuilder(function(){var headerPane=document.createElement('tr-ui-a-memory-dump-header-pane');headerPane.containerMemoryDumps=containerMemoryDumps;return headerPane;});},buildViewForMultipleContainerNames_:function(){var ownerDocument=this.ownerDocument;var rows=tr.b.dictionaryValues(tr.b.mapItems(this.dumpsByContainerName_,function(containerName,dumps){return{containerName:containerName,subRows:dumps,isExpanded:true};}));rows.sort(function(a,b){return a.containerName.localeCompare(b.containerName);});var columns=[{title:'Dump',value:function(row){if(row.subRows===undefined)
+return this.singleDumpValue_(row);else
+return this.groupedDumpValue_(row);},singleDumpValue_:function(row){var linkEl=ownerDocument.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(new tr.model.EventSet([row]));linkEl.appendChild(tr.v.ui.createScalarSpan(row.start,{unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:ownerDocument}));return linkEl;},groupedDumpValue_:function(row){var linkEl=ownerDocument.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(new tr.model.EventSet(row.subRows));linkEl.appendChild(tr.ui.b.createSpan({ownerDocument:ownerDocument,textContent:row.subRows.length+' memory dump'+
+(row.subRows.length===1?'':'s')+' in '}));linkEl.appendChild(tr.ui.b.createSpan({ownerDocument:ownerDocument,textContent:row.containerName,bold:true}));return linkEl;}}];var table=this.ownerDocument.createElement('tr-ui-b-table');table.tableColumns=columns;table.tableRows=rows;table.showHeader=false;table.rebuild();this.$.content.appendChild(table);}});return{};});'use strict';(function(){var COUNTER_SAMPLE_TABLE_COLUMNS=[{title:'Counter',width:'150px',value:function(row){return row.counter;}},{title:'Series',width:'150px',value:function(row){return row.series;}},{title:'Time',width:'150px',value:function(row){return row.start;}},{title:'Value',width:'100%',value:function(row){return row.value;}}];Polymer('tr-ui-a-counter-sample-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=COUNTER_SAMPLE_TABLE_COLUMNS;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){this.$.table.tableRows=this.selection?this.getRows_(this.selection.toArray()):[];this.$.table.rebuild();},getRows_:function(samples){var samplesByCounter=tr.b.group(samples,function(sample){return sample.series.counter.guid;});var rows=[];tr.b.iterItems(samplesByCounter,function(unused,counterSamples){var samplesBySeries=tr.b.group(counterSamples,function(sample){return sample.series.guid;});tr.b.iterItems(samplesBySeries,function(unused,seriesSamples){var seriesRows=this.getRowsForSamples_(seriesSamples);seriesRows[0].counter=seriesSamples[0].series.counter.name;seriesRows[0].series=seriesSamples[0].series.name;if(seriesRows.length>1){seriesRows[0].subRows=seriesRows.slice(1);seriesRows[0].isExpanded=true;}
+rows.push(seriesRows[0]);},this);},this);return rows;},getRowsForSamples_:function(samples){return samples.map(function(sample){return{start:sample.timestamp,value:sample.value};});}});})();'use strict';tr.exportTo('tr.ui.analysis',function(){Polymer('tr-ui-a-layout-tree-sub-view',{set selection(selection){this.currentSelection_=selection;this.updateContents_();},get selection(){return this.currentSelection_;},updateContents_:function(){this.$.content.textContent='';if(!this.currentSelection_)
+return;var columns=[{title:'Tag/Name',value:function(layoutObject){return layoutObject.tag||':'+layoutObject.name;}},{title:'htmlId',value:function(layoutObject){return layoutObject.htmlId||'';}},{title:'classNames',value:function(layoutObject){return layoutObject.classNames||'';}},{title:'reasons',value:function(layoutObject){return layoutObject.needsLayoutReasons.join(', ');}},{title:'width',value:function(layoutObject){return layoutObject.absoluteRect.width;}},{title:'height',value:function(layoutObject){return layoutObject.absoluteRect.height;}},{title:'absX',value:function(layoutObject){return layoutObject.absoluteRect.left;}},{title:'absY',value:function(layoutObject){return layoutObject.absoluteRect.top;}},{title:'relX',value:function(layoutObject){return layoutObject.relativeRect.left;}},{title:'relY',value:function(layoutObject){return layoutObject.relativeRect.top;}},{title:'float',value:function(layoutObject){return layoutObject.isFloat?'float':'';}},{title:'positioned',value:function(layoutObject){return layoutObject.isPositioned?'positioned':'';}},{title:'relative',value:function(layoutObject){return layoutObject.isRelativePositioned?'relative':'';}},{title:'sticky',value:function(layoutObject){return layoutObject.isStickyPositioned?'sticky':'';}},{title:'anonymous',value:function(layoutObject){return layoutObject.isAnonymous?'anonymous':'';}},{title:'row',value:function(layoutObject){if(layoutObject.tableRow===undefined)
+return'';return layoutObject.tableRow;}},{title:'col',value:function(layoutObject){if(layoutObject.tableCol===undefined)
+return'';return layoutObject.tableCol;}},{title:'rowSpan',value:function(layoutObject){if(layoutObject.tableRowSpan===undefined)
+return'';return layoutObject.tableRowSpan;}},{title:'colSpan',value:function(layoutObject){if(layoutObject.tableColSpan===undefined)
+return'';return layoutObject.tableColSpan;}},{title:'address',value:function(layoutObject){return layoutObject.id.toString(16);}}];var table=this.ownerDocument.createElement('tr-ui-b-table');table.defaultExpansionStateCallback=function(layoutObject,parentLayoutObject){return true;};table.subRowsPropertyName='childLayoutObjects';table.tableColumns=columns;table.tableRows=this.currentSelection_.map(function(snapshot){return snapshot.rootLayoutObject;});table.rebuild();this.$.content.appendChild(table);}});return{};});'use strict';Polymer('tr-ui-a-selection-summary-table',{created:function(){this.selection_=new tr.b.Range();},ready:function(){this.$.table.showHeader=false;this.$.table.tableColumns=[{title:'Name',value:function(row){return row.title;},width:'350px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},updateContents_:function(){var selection=this.selection_;var rows=[];var hasRange;if(this.selection_&&(!selection.bounds.isEmpty))
+hasRange=true;else
+hasRange=false;rows.push({title:'Selection start',value:hasRange?tr.v.ui.createScalarSpan(selection.bounds.min,{unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:this.ownerDocument}):'<empty>'});rows.push({title:'Selection extent',value:hasRange?tr.v.ui.createScalarSpan(selection.bounds.range,{unit:tr.v.Unit.byName.timeDurationInMs,ownerDocument:this.ownerDocument}):'<empty>'});this.$.table.tableRows=rows;this.$.table.rebuild();}});'use strict';tr.exportTo('tr.ui.analysis',function(){function MultiEventSummary(title,events){this.title=title;this.duration_=undefined;this.selfTime_=undefined;this.events_=events;this.cpuTimesComputed_=false;this.cpuSelfTime_=undefined;this.cpuDuration_=undefined;this.maxDuration_=undefined;this.maxCpuDuration_=undefined;this.maxSelfTime_=undefined;this.maxCpuSelfTime_=undefined;this.untotallableArgs_=[];this.totalledArgs_=undefined;};MultiEventSummary.prototype={set title(title){if(title=='Totals')
+this.totalsRow=true;this.title_=title;},get title(){return this.title_;},get duration(){if(this.duration_===undefined){this.duration_=tr.b.Statistics.sum(this.events_,function(event){return event.duration;});}
+return this.duration_;},get cpuSelfTime(){this.computeCpuTimesIfNeeded_();return this.cpuSelfTime_;},get cpuDuration(){this.computeCpuTimesIfNeeded_();return this.cpuDuration_;},computeCpuTimesIfNeeded_:function(){if(this.cpuTimesComputed_)
+return;this.cpuTimesComputed_=true;var cpuSelfTime=0;var cpuDuration=0;var hasCpuData=false;for(var i=0;i<this.events_.length;i++){var event=this.events_[i];if(event.cpuDuration!==undefined){cpuDuration+=event.cpuDuration;hasCpuData=true;}
+if(event.cpuSelfTime!==undefined){cpuSelfTime+=event.cpuSelfTime;hasCpuData=true;}}
+if(hasCpuData){this.cpuDuration_=cpuDuration;this.cpuSelfTime_=cpuSelfTime;}},get selfTime(){if(this.selfTime_===undefined){this.selfTime_=0;for(var i=0;i<this.events_.length;i++){if(this.events_[i].selfTime!==undefined)
+this.selfTime_+=this.events[i].selfTime;}}
+return this.selfTime_;},get events(){return this.events_;},get numEvents(){return this.events_.length;},get numAlerts(){if(this.numAlerts_===undefined){this.numAlerts_=tr.b.Statistics.sum(this.events_,function(event){return event.associatedAlerts.length;});}
+return this.numAlerts_;},get untotallableArgs(){this.updateArgsIfNeeded_();return this.untotallableArgs_;},get totalledArgs(){this.updateArgsIfNeeded_();return this.totalledArgs_;},get maxDuration(){if(this.maxDuration_===undefined){this.maxDuration_=tr.b.Statistics.max(this.events_,function(event){return event.duration;});}
+return this.maxDuration_;},get maxCpuDuration(){if(this.maxCpuDuration_===undefined){this.maxCpuDuration_=tr.b.Statistics.max(this.events_,function(event){return event.cpuDuration;});}
+return this.maxCpuDuration_;},get maxSelfTime(){if(this.maxSelfTime_===undefined){this.maxSelfTime_=tr.b.Statistics.max(this.events_,function(event){return event.selfTime;});}
+return this.maxSelfTime_;},get maxCpuSelfTime(){if(this.maxCpuSelfTime_===undefined){this.maxCpuSelfTime_=tr.b.Statistics.max(this.events_,function(event){return event.cpuSelfTime;});}
+return this.maxCpuSelfTime_;},updateArgsIfNeeded_:function(){if(this.totalledArgs_!==undefined)
+return;var untotallableArgs={};var totalledArgs={};for(var i=0;i<this.events_.length;i++){var event=this.events_[i];for(var argName in event.args){var argVal=event.args[argName];var type=typeof argVal;if(type!=='number'){untotallableArgs[argName]=true;delete totalledArgs[argName];continue;}
+if(untotallableArgs[argName]){continue;}
+if(totalledArgs[argName]===undefined)
+totalledArgs[argName]=0;totalledArgs[argName]+=argVal;}}
+this.untotallableArgs_=tr.b.dictionaryKeys(untotallableArgs);this.totalledArgs_=totalledArgs;}};return{MultiEventSummary:MultiEventSummary};});'use strict';Polymer('tr-ui-a-multi-event-summary-table',{ready:function(){this.showTotals_=false;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;this.eventsByTitle_=undefined;},updateTableColumns_:function(rows,maxValues){var hasCpuData=false;var hasAlerts=false;rows.forEach(function(row){if(row.cpuDuration!==undefined)
+hasCpuData=true;if(row.cpuSelfTime!==undefined)
+hasCpuData=true;if(row.numAlerts)
+hasAlerts=true;});var ownerDocument=this.ownerDocument;var columns=[];columns.push({title:'Name',value:function(row){if(row.title==='Totals')
+return'Totals';var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(row.events);},row.title);return linkEl;},width:'350px',cmp:function(rowA,rowB){return rowA.title.localeCompare(rowB.title);}});if(this.eventsHaveDuration_){columns.push({title:'Wall Duration',value:function(row){return tr.v.ui.createScalarSpan(row.duration,{unit:tr.v.Unit.byName.timeDurationInMs,total:row.totalsRow?undefined:maxValues.duration,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.duration-rowB.duration;}});}
+if(this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Duration',value:function(row){return tr.v.ui.createScalarSpan(row.cpuDuration,{unit:tr.v.Unit.byName.timeDurationInMs,total:row.totalsRow?undefined:maxValues.cpuDuration,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuDuration-rowB.cpuDuration;}});}
+if(this.eventsHaveSubRows_&&this.eventsHaveDuration_){columns.push({title:'Self time',value:function(row){return tr.v.ui.createScalarSpan(row.selfTime,{unit:tr.v.Unit.byName.timeDurationInMs,total:row.totalsRow?undefined:maxValues.selfTime,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.selfTime-rowB.selfTime;}});}
+if(this.eventsHaveSubRows_&&this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Self Time',value:function(row){return tr.v.ui.createScalarSpan(row.cpuSelfTime,{unit:tr.v.Unit.byName.timeDurationInMs,total:row.totalsRow?undefined:maxValues.cpuSelfTime,ownerDocument:ownerDocument,rightAlign:true});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuSelfTime-rowB.cpuSelfTime;}});}
+columns.push({title:'Occurrences',value:function(row){return row.numEvents;},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.numEvents-rowB.numEvents;}});var alertsColumnIndex;if(hasAlerts){columns.push({title:'Num Alerts',value:function(row){return row.numAlerts;},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.numAlerts-rowB.numAlerts;}});alertsColumnIndex=columns.length-1;}
+var colWidthPercentage;if(columns.length==1)
+colWidthPercentage='100%';else
+colWidthPercentage=(100/(columns.length-1)).toFixed(3)+'%';for(var i=1;i<columns.length;i++)
+columns[i].width=colWidthPercentage;this.$.table.tableColumns=columns;if(hasAlerts){this.$.table.sortColumnIndex=alertsColumnIndex;this.$.table.sortDescending=true;}},configure:function(config){if(config.eventsByTitle===undefined)
+throw new Error('Required: eventsByTitle');if(config.showTotals!==undefined)
+this.showTotals_=config.showTotals;else
+this.showTotals_=true;if(config.eventsHaveDuration!==undefined)
+this.eventsHaveDuration_=config.eventsHaveDuration;else
+this.eventsHaveDuration_=true;if(config.eventsHaveSubRows!==undefined)
+this.eventsHaveSubRows_=config.eventsHaveSubRows;else
+this.eventsHaveSubRows_=true;this.eventsByTitle_=config.eventsByTitle;this.updateContents_();},get showTotals(){return this.showTotals_;},set showTotals(showTotals){this.showTotals_=showTotals;this.updateContents_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},get eventsByTitle(){return this.eventsByTitle_;},set eventsByTitle(eventsByTitle){this.eventsByTitle_=eventsByTitle;this.updateContents_();},get selectionBounds(){return this.selectionBounds_;},set selectionBounds(selectionBounds){this.selectionBounds_=selectionBounds;this.updateContents_();},updateContents_:function(){var eventsByTitle;if(this.eventsByTitle_!==undefined)
+eventsByTitle=this.eventsByTitle_;else
+eventsByTitle=[];var allEvents=[];var rows=[];tr.b.iterItems(eventsByTitle,function(title,eventsOfSingleTitle){allEvents.push.apply(allEvents,eventsOfSingleTitle);var row=new tr.ui.analysis.MultiEventSummary(title,eventsOfSingleTitle);rows.push(row);});this.updateTableColumns_(rows);this.$.table.tableRows=rows;var maxValues={duration:undefined,selfTime:undefined,cpuSelfTime:undefined,cpuDuration:undefined};if(this.eventsHaveDuration){for(var column in maxValues){maxValues[column]=tr.b.Statistics.max(rows,function(event){return event[column];});}}
+var footerRows=[];if(this.showTotals_){var multiEventSummary=new tr.ui.analysis.MultiEventSummary('Totals',allEvents);footerRows.push(multiEventSummary);}
+this.updateTableColumns_(rows,maxValues);this.$.table.tableRows=rows;this.$.table.footerRows=footerRows;this.$.table.rebuild();}});'use strict';function isTable(object){if(!(object instanceof Array)||(object.length<2))return false;for(var colName in object[0]){if(typeof colName!=='string')return false;}
+for(var i=0;i<object.length;++i){if(!(object[i]instanceof Object))return false;for(var colName in object[i]){if(i&&(object[0][colName]===undefined))return false;var cellType=typeof object[i][colName];if(cellType!=='string'&&cellType!='number')return false;}
+if(i){for(var colName in object[0]){if(object[i][colName]===undefined)return false;}}}
+return true;}
+Polymer('tr-ui-a-generic-object-view',{ready:function(){this.object_=undefined;},get object(){return this.object_;},set object(object){this.object_=object;this.updateContents_();},updateContents_:function(){this.$.content.textContent='';this.appendElementsForType_('',this.object_,0,0,5,'');},appendElementsForType_:function(label,object,indent,depth,maxDepth,suffix){if(depth>maxDepth){this.appendSimpleText_(label,indent,'<recursion limit reached>',suffix);return;}
+if(object===undefined){this.appendSimpleText_(label,indent,'undefined',suffix);return;}
+if(object===null){this.appendSimpleText_(label,indent,'null',suffix);return;}
+if(!(object instanceof Object)){var type=typeof object;if(type=='string'){var objectReplaced=false;if((object[0]=='{'&&object[object.length-1]=='}')||(object[0]=='['&&object[object.length-1]==']')){try{object=JSON.parse(object);objectReplaced=true;}catch(e){}}
+if(!objectReplaced){if(object.indexOf('\n')!==-1){var lines=object.split('\n');lines.forEach(function(line,i){var text,ioff,ll,ss;if(i==0){text='"'+line;ioff=0;ll=label;ss='';}else if(i<lines.length-1){text=line;ioff=1;ll='';ss='';}else{text=line+'"';ioff=1;ll='';ss=suffix;}
+var el=this.appendSimpleText_(ll,indent+ioff*label.length+ioff,text,ss);el.style.whiteSpace='pre';return el;},this);return;}else{this.appendSimpleText_(label,indent,'"'+object+'"',suffix);return;}}
+else{}}else{return this.appendSimpleText_(label,indent,object,suffix);}}
+if(object instanceof tr.model.ObjectSnapshot){var link=document.createElement('tr-ui-a-analysis-link');link.selection=new tr.model.EventSet(object);this.appendElementWithLabel_(label,indent,link,suffix);return;}
+if(object instanceof tr.model.ObjectInstance){var link=document.createElement('tr-ui-a-analysis-link');link.selection=new tr.model.EventSet(object);this.appendElementWithLabel_(label,indent,link,suffix);return;}
+if(object instanceof tr.b.Rect){this.appendSimpleText_(label,indent,object.toString(),suffix);return;}
+if(object instanceof tr.v.ScalarNumeric){var el=this.ownerDocument.createElement('tr-v-ui-scalar-span');el.value=object;this.appendElementWithLabel_(label,indent,el,suffix);return;}
+if(object instanceof Array){this.appendElementsForArray_(label,object,indent,depth,maxDepth,suffix);return;}
+this.appendElementsForObject_(label,object,indent,depth,maxDepth,suffix);},appendElementsForArray_:function(label,object,indent,depth,maxDepth,suffix){if(object.length==0){this.appendSimpleText_(label,indent,'[]',suffix);return;}
+if(isTable(object)){var table=document.createElement('tr-ui-b-table');var columns=[];tr.b.iterItems(object[0],function(colName){columns.push({title:colName,value:function(row){return row[colName];}});});table.tableColumns=columns;table.tableRows=object;this.appendElementWithLabel_(label,indent,table,suffix);table.rebuild();return;}
+this.appendElementsForType_(label+'[',object[0],indent,depth+1,maxDepth,object.length>1?',':']'+suffix);for(var i=1;i<object.length;i++){this.appendElementsForType_('',object[i],indent+label.length+1,depth+1,maxDepth,i<object.length-1?',':']'+suffix);}
+return;},appendElementsForObject_:function(label,object,indent,depth,maxDepth,suffix){var keys=tr.b.dictionaryKeys(object);if(keys.length==0){this.appendSimpleText_(label,indent,'{}',suffix);return;}
+this.appendElementsForType_(label+'{'+keys[0]+': ',object[keys[0]],indent,depth,maxDepth,keys.length>1?',':'}'+suffix);for(var i=1;i<keys.length;i++){this.appendElementsForType_(keys[i]+': ',object[keys[i]],indent+label.length+1,depth+1,maxDepth,i<keys.length-1?',':'}'+suffix);}},appendElementWithLabel_:function(label,indent,dataElement,suffix){var row=document.createElement('div');var indentSpan=document.createElement('span');indentSpan.style.whiteSpace='pre';for(var i=0;i<indent;i++)
+indentSpan.textContent+=' ';row.appendChild(indentSpan);var labelSpan=document.createElement('span');labelSpan.textContent=label;row.appendChild(labelSpan);row.appendChild(dataElement);var suffixSpan=document.createElement('span');suffixSpan.textContent=suffix;row.appendChild(suffixSpan);row.dataElement=dataElement;this.$.content.appendChild(row);},appendSimpleText_:function(label,indent,text,suffix){var el=this.ownerDocument.createElement('span');el.textContent=text;this.appendElementWithLabel_(label,indent,el,suffix);return el;}});'use strict';Polymer('tr-ui-a-generic-object-view-with-label',{ready:function(){this.labelEl_=document.createElement('div');this.genericObjectView_=document.createElement('tr-ui-a-generic-object-view');this.shadowRoot.appendChild(this.labelEl_);this.shadowRoot.appendChild(this.genericObjectView_);},get label(){return this.labelEl_.textContent;},set label(label){this.labelEl_.textContent=label;},get object(){return this.genericObjectView_.object;},set object(object){this.genericObjectView_.object=object;}});'use strict';Polymer('tr-ui-a-multi-event-details-table',{created:function(){this.selection_=undefined;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;},ready:function(){this.initTitleTable_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},updateContents_:function(){var selection=this.selection_;this.updateTitleTable_();if(this.selection_===undefined){this.$.table.tableRows=[];this.$.table.tableFooterRows=[];this.$.table.rebuild();return;}
+var summary=new tr.ui.analysis.MultiEventSummary('Totals',this.selection_);this.updateColumns_(summary);this.updateRows_(summary);this.$.table.rebuild();},initTitleTable_:function(){var table=this.$.titletable;table.showHeader=false;table.tableColumns=[{title:'Title',value:function(row){return row.title;},width:'350px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];},updateTitleTable_:function(){var title;if(this.selection_&&this.selection_.length)
+title=this.selection_[0].title;else
+title='<No selection>';var table=this.$.titletable;table.tableRows=[{title:'Title',value:title}];},updateColumns_:function(summary){var hasCpuData;if(summary.cpuDuration!==undefined)
+hasCpuData=true;if(summary.cpuSelfTime!==undefined)
+hasCpuData=true;var colWidthPercentage;if(hasCpuData)
+colWidthPercentage='20%';else
+colWidthPercentage='33.3333%';var ownerDocument=this.ownerDocument;var columns=[];columns.push({title:'Start',value:function(row){if(row.__proto__===tr.ui.analysis.MultiEventSummary.prototype){return row.title;}
+var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(row.event);});linkEl.appendChild(tr.v.ui.createScalarSpan(row.start,{unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:ownerDocument}));return linkEl;},width:'350px',cmp:function(rowA,rowB){return rowA.start-rowB.start;}});if(this.eventsHaveDuration_){columns.push({title:'Wall Duration (ms)',value:function(row){return tr.v.ui.createScalarSpan(row.duration,{unit:tr.v.Unit.byName.timeDurationInMs,ownerDocument:ownerDocument});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.duration-rowB.duration;}});}
+if(this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Duration (ms)',value:function(row){return tr.v.ui.createScalarSpan(row.cpuDuration,{unit:tr.v.Unit.byName.timeDurationInMs,ownerDocument:ownerDocument});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuDuration-rowB.cpuDuration;}});}
+if(this.eventsHaveSubRows_&&this.eventsHaveDuration_){columns.push({title:'Self time (ms)',value:function(row){return tr.v.ui.createScalarSpan(row.selfTime,{unit:tr.v.Unit.byName.timeDurationInMs,ownerDocument:ownerDocument});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.selfTime-rowB.selfTime;}});}
+if(this.eventsHaveSubRows_&&this.eventsHaveDuration_&&hasCpuData){columns.push({title:'CPU Self Time (ms)',value:function(row){return tr.v.ui.createScalarSpan(row.cpuSelfTime,{unit:tr.v.Unit.byName.timeDurationInMs,ownerDocument:ownerDocument});},width:'<upated further down>',cmp:function(rowA,rowB){return rowA.cpuSelfTime-rowB.cpuSelfTime;}});}
+var argKeys=tr.b.dictionaryKeys(summary.totalledArgs);argKeys.sort();var otherKeys=summary.untotallableArgs.slice(0);otherKeys.sort();argKeys.push.apply(argKeys,otherKeys);var keysWithColumns=argKeys.slice(0,4);var keysInOtherColumn=argKeys.slice(4);keysWithColumns.forEach(function(argKey){var hasTotal=summary.totalledArgs[argKey];var colDesc={title:'Arg: '+argKey,value:function(row){if(row.__proto__!==tr.ui.analysis.MultiEventSummary.prototype){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=row.args[argKey];return argView;}
+if(hasTotal)
+return row.totalledArgs[argKey];return'';},width:'<upated further down>'};if(hasTotal){colDesc.cmp=function(rowA,rowB){return rowA.args[argKey]-rowB.args[argKey];};}
+columns.push(colDesc);});if(keysInOtherColumn.length){columns.push({title:'Other Args',value:function(row){if(row.__proto__===tr.ui.analysis.MultiEventSummary.prototype)
+return'';var argView=document.createElement('tr-ui-a-generic-object-view');var obj={};for(var i=0;i<keysInOtherColumn.length;i++)
+obj[keysInOtherColumn[i]]=row.args[keysInOtherColumn[i]];argView.object=obj;return argView;},width:'<upated further down>'});}
+var colWidthPercentage;if(columns.length==1)
+colWidthPercentage='100%';else
+colWidthPercentage=(100/(columns.length-1)).toFixed(3)+'%';for(var i=1;i<columns.length;i++)
+columns[i].width=colWidthPercentage;this.$.table.tableColumns=columns;},updateRows_:function(summary){this.$.table.sortColumnIndex=0;function Row(event){this.event=event;}
+Row.prototype={get start(){return this.event.start;},get duration(){return this.event.duration;},get cpuDuration(){return this.event.cpuDuration;},get selfTime(){return this.event.selfTime;},get cpuSelfTime(){return this.event.cpuSelfTime;},get args(){return this.event.args;}};this.$.table.tableRows=this.selection_.map(function(event){return new Row(event);});this.$.table.footerRows=[summary];}});'use strict';Polymer('tr-ui-a-multi-event-sub-view',{created:function(){this.currentSelection_=undefined;this.eventsHaveDuration_=true;this.eventsHaveSubRows_=true;},set selection(selection){if(selection.length<=1)
+throw new Error('Only supports multiple items');this.setSelectionWithoutErrorChecks(selection);},get selection(){return this.currentSelection_;},setSelectionWithoutErrorChecks:function(selection){this.currentSelection_=selection;this.updateContents_();},get eventsHaveDuration(){return this.eventsHaveDuration_;},set eventsHaveDuration(eventsHaveDuration){this.eventsHaveDuration_=eventsHaveDuration;this.updateContents_();},get eventsHaveSubRows(){return this.eventsHaveSubRows_;},set eventsHaveSubRows(eventsHaveSubRows){this.eventsHaveSubRows_=eventsHaveSubRows;this.updateContents_();},updateContents_:function(){var selection=this.currentSelection_;this.$.content.textContent='';if(!selection)
+return;var eventsByTitle=selection.getEventsOrganizedByTitle();var numTitles=tr.b.dictionaryLength(eventsByTitle);var summaryTableEl=document.createElement('tr-ui-a-multi-event-summary-table');summaryTableEl.configure({showTotals:numTitles>1,eventsByTitle:eventsByTitle,eventsHaveDuration:this.eventsHaveDuration_,eventsHaveSubRows:this.eventsHaveSubRows_});this.$.content.appendChild(summaryTableEl);var selectionSummaryTableEl=document.createElement('tr-ui-a-selection-summary-table');selectionSummaryTableEl.selection=this.currentSelection_;this.$.content.appendChild(selectionSummaryTableEl);if(numTitles===1){var detailsTableEl=document.createElement('tr-ui-a-multi-event-details-table');detailsTableEl.eventsHaveDuration=this.eventsHaveDuration_;detailsTableEl.eventsHaveSubRows=this.eventsHaveSubRows_;detailsTableEl.selection=selection;this.$.content.appendChild(detailsTableEl);}}});'use strict';tr.exportTo('tr.ui.analysis',function(){var FLOW_IN=0x1;var FLOW_OUT=0x2;var FLOW_IN_OUT=FLOW_IN|FLOW_OUT;function FlowClassifier(){this.numEvents_=0;this.eventsByGUID_={};}
+FlowClassifier.prototype={getFS_:function(event){var fs=this.eventsByGUID_[event.guid];if(fs===undefined){this.numEvents_++;fs={state:0,event:event};this.eventsByGUID_[event.guid]=fs;}
+return fs;},addInFlow:function(event){var fs=this.getFS_(event);fs.state|=FLOW_IN;return event;},addOutFlow:function(event){var fs=this.getFS_(event);fs.state|=FLOW_OUT;return event;},hasEvents:function(){return this.numEvents_>0;},get inFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_IN)
+selection.push(fs.event);}
+return selection;},get outFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_OUT)
+selection.push(fs.event);}
+return selection;},get internalFlowEvents(){var selection=new tr.model.EventSet();for(var guid in this.eventsByGUID_){var fs=this.eventsByGUID_[guid];if(fs.state===FLOW_IN_OUT)
+selection.push(fs.event);}
+return selection;}};return{FlowClassifier:FlowClassifier};});'use strict';Polymer('tr-ui-a-related-events',{ready:function(){this.eventGroups_=[];this.cancelFunctions_=[];this.$.table.tableColumns=[{title:'Event(s)',value:function(row){var typeEl=document.createElement('span');typeEl.innerText=row.type;if(row.tooltip)
+typeEl.title=row.tooltip;return typeEl;},width:'150px'},{title:'Link',width:'100%',value:function(row){var linkEl=document.createElement('tr-ui-a-analysis-link');if(row.name)
+linkEl.setSelectionAndContent(row.selection,row.name);else
+linkEl.selection=row.selection;return linkEl;}}];},hasRelatedEvents:function(){return(this.eventGroups_&&this.eventGroups_.length>0);},setRelatedEvents:function(eventSet){this.cancelAllTasks_();this.eventGroups_=[];this.addConnectedFlows_(eventSet);this.addConnectedEvents_(eventSet);this.addOverlappingSamples_(eventSet);this.updateContents_();},addConnectedFlows_:function(eventSet){var classifier=new tr.ui.analysis.FlowClassifier();eventSet.forEach(function(slice){if(slice.inFlowEvents){slice.inFlowEvents.forEach(function(flow){classifier.addInFlow(flow);});}
+if(slice.outFlowEvents){slice.outFlowEvents.forEach(function(flow){classifier.addOutFlow(flow);});}});if(!classifier.hasEvents())
+return;var addToEventGroups=function(type,flowEvent){this.eventGroups_.push({type:type,selection:new tr.model.EventSet(flowEvent),name:flowEvent.title});};classifier.inFlowEvents.forEach(addToEventGroups.bind(this,'Incoming flow'));classifier.outFlowEvents.forEach(addToEventGroups.bind(this,'Outgoing flow'));classifier.internalFlowEvents.forEach(addToEventGroups.bind(this,'Internal flow'));},cancelAllTasks_:function(){this.cancelFunctions_.forEach(function(cancelFunction){cancelFunction();});this.cancelFunctions_=[];},addConnectedEvents_:function(eventSet){this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('Preceding events','Add all events that have led to the selected one(s), connected by '+'flow arrows or by call stack.',eventSet,function(event,events){this.addInFlowEvents_(event,events);this.addAncestors_(event,events);if(event.startSlice)
+events.push(event.startSlice);}.bind(this)));this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('Following events','Add all events that have been caused by the selected one(s), '+'connected by flow arrows or by call stack.',eventSet,function(event,events){this.addOutFlowEvents_(event,events);this.addDescendents_(event,events);if(event.endSlice)
+events.push(event.endSlice);}.bind(this)));this.cancelFunctions_.push(this.createEventsLinkIfNeeded_('All connected events','Add all events connected to the selected one(s) by flow arrows or '+'by call stack.',eventSet,function(event,events){this.addInFlowEvents_(event,events);this.addOutFlowEvents_(event,events);this.addAncestors_(event,events);this.addDescendents_(event,events);if(event.startSlice)
+events.push(event.startSlice);if(event.endSlice)
+events.push(event.endSlice);}.bind(this)));},createEventsLinkIfNeeded_:function(title,tooltip,events,addFunction){events=new tr.model.EventSet(events);var lengthBefore=events.length;var task;var isCanceled=false;function addEventsUntilTimeout(startingIndex){if(isCanceled)
+return;var startingTime=window.performance.now();while(startingIndex<events.length){addFunction(events[startingIndex],events);startingIndex++;if(window.performance.now()-startingTime>8){var newTask=new tr.b.Task(addEventsUntilTimeout.bind(this,startingIndex),this);task.after(newTask);task=newTask;return;}}
+if(lengthBefore===events.length)
+return;this.eventGroups_.push({type:title,tooltip:tooltip,selection:events});this.updateContents_();};function cancelTask(){isCanceled=true;}
+task=new tr.b.Task(addEventsUntilTimeout.bind(this,0),this);tr.b.Task.RunWhenIdle(task);return cancelTask;},addInFlowEvents_:function(event,eventSet){if(!event.inFlowEvents)
+return;event.inFlowEvents.forEach(function(e){eventSet.push(e);});},addOutFlowEvents_:function(event,eventSet){if(!event.outFlowEvents)
+return;event.outFlowEvents.forEach(function(e){eventSet.push(e);});},addAncestors_:function(event,eventSet){if(!event.iterateAllAncestors)
+return;event.iterateAllAncestors(function(e){eventSet.push(e);});},addDescendents_:function(event,eventSet){if(!event.iterateAllDescendents)
+return;event.iterateAllDescendents(function(e){eventSet.push(e);});},addOverlappingSamples_:function(eventSet){var samples=new tr.model.EventSet;eventSet.forEach(function(slice){if(!slice.parentContainer||!slice.parentContainer.samples)
+return;var candidates=slice.parentContainer.samples;var range=tr.b.Range.fromExplicitRange(slice.start,slice.start+slice.duration);var filteredSamples=range.filterArray(candidates,function(value){return value.start;});filteredSamples.forEach(function(sample){samples.push(sample);});}.bind(this));if(samples.length>0){this.eventGroups_.push({type:'Overlapping samples',tooltip:'All samples overlapping the selected slice(s).',selection:samples});}},updateContents_:function(){var table=this.$.table;if(this.eventGroups_===undefined)
+table.tableRows=[];else
+table.tableRows=this.eventGroups_.slice();table.rebuild();}});'use strict';Polymer('tr-ui-a-multi-async-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){this.$.content.selection=selection;this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents()){this.$.relatedEvents.style.display='';}else{this.$.relatedEvents.style.display='none';}},get relatedEventsToHighlight(){if(!this.$.content.selection)
+return undefined;var selection=new tr.model.EventSet();this.$.content.selection.forEach(function(asyncEvent){if(!asyncEvent.associatedEvents)
+return;asyncEvent.associatedEvents.forEach(function(event){selection.push(event);});});if(selection.length)
+return selection;return undefined;}});'use strict';Polymer('tr-ui-a-multi-cpu-slice-sub-view',{ready:function(){this.$.content.eventsHaveSubRows=false;},get selection(){return this.$.content.selection;},set selection(selection){this.$.content.setSelectionWithoutErrorChecks(selection);}});'use strict';Polymer('tr-ui-a-multi-flow-event-sub-view',{ready:function(){this.$.content.eventsHaveDuration=false;this.$.content.eventsHaveSubRows=false;},set selection(selection){this.$.content.selection=selection;},get selection(){return this.$.content.selection;}});'use strict';Polymer('tr-ui-a-multi-frame-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');realView.eventsHaveDuration=false;realView.eventsHaveSubRows=false;this.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;var selection=new tr.model.EventSet();this.currentSelection_.forEach(function(frameEvent){frameEvent.associatedEvents.forEach(function(event){selection.push(event);});});return selection;}});'use strict';Polymer('tr-ui-a-multi-instant-event-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.$.content.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');realView.eventsHaveDuration=false;realView.eventsHaveSubRows=false;this.$.content.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;}});'use strict';Polymer('tr-ui-a-multi-object-sub-view',{created:function(){this.currentSelection_=undefined;},ready:function(){this.$.content.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;var objectEvents=tr.b.asArray(selection).sort(tr.b.Range.compareByMinTimes);var timeSpanConfig={unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:this.ownerDocument};var table=this.$.content;table.tableColumns=[{title:'First',value:function(event){if(event instanceof tr.model.ObjectSnapshot)
+return tr.v.ui.createScalarSpan(event.ts,timeSpanConfig);var spanEl=document.createElement('span');spanEl.appendChild(tr.v.ui.createScalarSpan(event.creationTs,timeSpanConfig));spanEl.appendChild(tr.ui.b.createSpan({textContent:'-',marginLeft:'4px',marginRight:'4px'}));if(event.deletionTs!=Number.MAX_VALUE){spanEl.appendChild(tr.v.ui.createScalarSpan(event.deletionTs,timeSpanConfig));}
+return spanEl;},width:'200px'},{title:'Second',value:function(event){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(event);},event.userFriendlyName);return linkEl;},width:'100%'}];table.tableRows=objectEvents;table.rebuild();}});'use strict';var EventSet=tr.model.EventSet;var CHART_TITLE='Power (W) by ms since vertical sync';var CHART_WIDTH_FRACTION_OF_BODY=0.5;Polymer('tr-ui-a-frame-power-usage-chart',{ready:function(){this.chart_=undefined;this.samples_=new EventSet();this.vSyncTimestamps_=[];},get chart(){return this.chart_;},get samples(){return this.samples_;},get vSyncTimestamps(){return this.vSyncTimestamps_;},setData:function(samples,vSyncTimestamps){this.samples_=(samples===undefined)?new EventSet():samples;this.vSyncTimestamps_=(vSyncTimestamps===undefined)?[]:vSyncTimestamps;this.updateContents_();},updateContents_:function(){this.clearChart_();var data=this.getDataForLineChart_();if(data.length===0)
+return;this.chart_=this.createChart_(data);this.$.content.appendChild(this.chart_);},createChart_:function(data){var chart=new tr.ui.b.LineChart();var width=document.body.clientWidth*CHART_WIDTH_FRACTION_OF_BODY;chart.setSize({width:width,height:chart.height});chart.chartTitle=CHART_TITLE;chart.data=data;return chart;},clearChart_:function(){var content=this.$.content;while(content.firstChild)
+content.removeChild(content.firstChild);this.chart_=undefined;},getDataForLineChart_:function(){var sortedSamples=this.sortSamplesByTimestampAscending_(this.samples);var vSyncTimestamps=this.vSyncTimestamps.slice();var lastVSyncTimestamp=undefined;var points=[];var frameNumber=0;sortedSamples.forEach(function(sample){while(vSyncTimestamps.length>0&&vSyncTimestamps[0]<=sample.start){lastVSyncTimestamp=vSyncTimestamps.shift();frameNumber++;}
+if(lastVSyncTimestamp===undefined)
+return;var point={x:sample.start-lastVSyncTimestamp};point['f'+frameNumber]=sample.power/1000;points.push(point);});return points;},sortSamplesByTimestampAscending_:function(samples){return samples.toArray().sort(function(smpl1,smpl2){return smpl1.start-smpl2.start;});}});'use strict';Polymer('tr-ui-a-power-sample-summary-table',{ready:function(){this.$.table.tableColumns=[{title:'Min power',width:'100px',value:function(row){return tr.v.Unit.byName.powerInWatts.format(row.min/1000.0);}},{title:'Max power',width:'100px',value:function(row){return tr.v.Unit.byName.powerInWatts.format(row.max/1000.0);}},{title:'Time-weighted average',width:'100px',value:function(row){return tr.v.Unit.byName.powerInWatts.format(row.timeWeightedAverage/1000.0);}},{title:'Energy consumed',width:'100px',value:function(row){return tr.v.Unit.byName.energyInJoules.format(row.energyConsumed);}},{title:'Sample count',width:'100%',value:function(row){return row.sampleCount;}}];this.samples=new tr.model.EventSet();},get samples(){return this.samples_;},set samples(samples){if(samples===this.samples)
+return;this.samples_=(samples===undefined)?new tr.model.EventSet():samples;this.updateContents_();},updateContents_:function(){if(this.samples.length===0){this.$.table.tableRows=[];}else{this.$.table.tableRows=[{min:this.getMin(),max:this.getMax(),timeWeightedAverage:this.getTimeWeightedAverage(),energyConsumed:this.getEnergyConsumed(),sampleCount:this.samples.length}];}
+this.$.table.rebuild();},getMin:function(){return Math.min.apply(null,this.samples.map(function(sample){return sample.power;}));},getMax:function(){return Math.max.apply(null,this.samples.map(function(sample){return sample.power;}));},getTimeWeightedAverage:function(){var energyConsumed=this.getEnergyConsumed();if(energyConsumed==='N/A')
+return'N/A';var energyInMillijoules=this.getEnergyConsumed()*1000;var durationInSeconds=this.samples.bounds.duration/1000;return energyInMillijoules/durationInSeconds;},getEnergyConsumed:function(){if(this.samples.length<2)
+return'N/A';var bounds=this.samples.bounds;return this.samples[0].series.getEnergyConsumed(bounds.min,bounds.max);}});'use strict';var EventSet=tr.model.EventSet;Polymer('tr-ui-a-power-sample-table',{ready:function(){this.$.table.tableColumns=[{title:'Time',width:'100px',value:function(row){return tr.v.ui.createScalarSpan(row.start,{unit:tr.v.Unit.byName.timeStampInMs});}},{title:'Power',width:'100%',value:function(row){return tr.v.ui.createScalarSpan(row.power/1000,{unit:tr.v.Unit.byName.powerInWatts});}}];this.samples=new EventSet();},get samples(){return this.samples_;},set samples(samples){this.samples_=(samples===undefined)?new EventSet():samples;this.updateContents_();},updateContents_:function(){this.$.table.tableRows=this.samples.toArray();this.$.table.rebuild();}});'use strict';Polymer('tr-ui-a-multi-power-sample-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){var samples=this.selection;var vSyncTimestamps=(this.selection===undefined)?[]:this.selection[0].series.device.vSyncTimestamps;this.$.summaryTable.samples=samples;this.$.samplesTable.samples=samples;this.$.chart.setData(this.selection,vSyncTimestamps);}});'use strict';(function(){Polymer('tr-ui-a-multi-sample-sub-view',{created:function(){this.viewOption_=undefined;this.selection_=undefined;},ready:function(){var viewSelector=tr.ui.b.createSelector(this,'viewOption','tracing.ui.analysis.multi_sample_sub_view',tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW,[{label:'Top-down (Tree)',value:tr.b.MultiDimensionalViewType.TOP_DOWN_TREE_VIEW},{label:'Top-down (Heavy)',value:tr.b.MultiDimensionalViewType.TOP_DOWN_HEAVY_VIEW},{label:'Bottom-up (Heavy)',value:tr.b.MultiDimensionalViewType.BOTTOM_UP_HEAVY_VIEW}]);this.$.control.appendChild(viewSelector);this.$.table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;this.updateContents_();},get viewOption(){return this.viewOption_;},set viewOption(viewOption){this.viewOption_=viewOption;this.updateContents_();},createSamplingSummary_:function(selection,viewOption){var builder=new tr.b.MultiDimensionalViewBuilder(1);var samples=selection.getEventsOrganizedByBaseType().sample;samples.forEach(function(sample){builder.addPath([sample.getUserFriendlyStackTrace().reverse()],1,tr.b.MultiDimensionalViewBuilder.ValueKind.SELF);});return builder.buildView(viewOption);},updateContents_:function(){if(this.selection===undefined){this.$.table.tableColumns=[];this.$.table.tableRows=[];this.$.table.rebuild();return;}
+var samplingData=this.createSamplingSummary_(this.selection,this.viewOption);var columns=[this.createPercentColumn_('Total',samplingData.total),this.createSamplesColumn_('Total'),this.createPercentColumn_('Self',samplingData.total),this.createSamplesColumn_('Self'),{title:'Symbol',value:function(row){return row.title[0];},width:'250px',cmp:function(a,b){return a.title[0].localeCompare(b.title[0]);},showExpandButtons:true}];this.$.table.tableColumns=columns;this.$.table.sortColumnIndex=1;this.$.table.sortDescending=true;this.$.table.tableRows=samplingData.subRows;this.$.table.rebuild();},createPercentColumn_:function(title,samplingDataTotal){var field=title.toLowerCase();return{title:title+' percent',value:function(row){var percent=row[field]/samplingDataTotal;var span=document.createElement('tr-v-ui-scalar-span');span.value=(percent*100).toFixed(2);span.percentage=percent;span.unit=tr.v.Unit.byName.unitlessNumber;return span;}.bind(this),width:'60px',cmp:function(a,b){return a[field]-b[field];}};},createSamplesColumn_:function(title){var field=title.toLowerCase();return{title:title+' samples',value:function(row){return row[field];},width:'60px',cmp:function(a,b){return a[field]-b[field];}};}});})();'use strict';Polymer('tr-ui-a-multi-thread-slice-sub-view',{created:function(){this.selection_=undefined;},get selection(){return this.selection_;},set selection(selection){this.selection_=selection;if(tr.isExported('tr.ui.e.chrome.cc.RasterTaskSelection')){if(tr.ui.e.chrome.cc.RasterTaskSelection.supports(selection)){var ltvSelection=new tr.ui.e.chrome.cc.RasterTaskSelection(selection);var ltv=new tr.ui.e.chrome.cc.LayerTreeHostImplSnapshotView();ltv.objectSnapshot=ltvSelection.containingSnapshot;ltv.selection=ltvSelection;ltv.extraHighlightsByLayerId=ltvSelection.extraHighlightsByLayerId;this.$.content.textContent='';this.$.content.appendChild(ltv);this.requiresTallView_=true;return;}}
+this.$.content.textContent='';var mesv=document.createElement('tr-ui-a-multi-event-sub-view');mesv.selection=selection;this.$.content.appendChild(mesv);var relatedEvents=document.createElement('tr-ui-a-related-events');relatedEvents.setRelatedEvents(selection);if(relatedEvents.hasRelatedEvents()){this.$.content.appendChild(relatedEvents);}},get requiresTallView(){if(this.$.content.children.length===0)
+return false;var childTagName=this.$.content.children[0].tagName;if(childTagName==='TR-UI-A-MULTI-EVENT-SUB-VIEW')
+return false;return true;}});'use strict';Polymer('tr-ui-a-multi-thread-time-slice-sub-view',{ready:function(){this.$.content.eventsHaveSubRows=false;},get selection(){return this.$.content.selection;},set selection(selection){this.$.content.setSelectionWithoutErrorChecks(selection);}});'use strict';Polymer('tr-ui-a-multi-user-expectation-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.currentSelection_=selection;this.textContent='';var realView=document.createElement('tr-ui-a-multi-event-sub-view');this.appendChild(realView);realView.setSelectionWithoutErrorChecks(selection);this.currentSelection_=selection;},get selection(){return this.currentSelection_;},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;var selection=new tr.model.EventSet();this.currentSelection_.forEach(function(ir){ir.associatedEvents.forEach(function(event){selection.push(event);});});return selection;}});'use strict';Polymer('tr-ui-a-stack-frame',{ready:function(){this.stackFrame_=undefined;this.$.table.tableColumns=[];this.$.table.showHeader=true;},get stackFrame(){return this.stackFrame_;},set stackFrame(stackFrame){var table=this.$.table;this.stackFrame_=stackFrame;if(stackFrame===undefined){table.tableColumns=[];table.tableRows=[];table.rebuild();return;}
+var hasName=false;var hasTitle=false;table.tableRows=stackFrame.stackTrace;table.tableRows.forEach(function(row){hasName|=row.name!==undefined;hasTitle|=row.title!==undefined;});var cols=[];if(hasName){cols.push({title:'Name',value:function(row){return row.name;}});}
+if(hasTitle){cols.push({title:'Title',value:function(row){return row.title;}});}
+table.tableColumns=cols;table.rebuild();},tableForTesting:function(){return this.$.table;}});'use strict';Polymer('tr-ui-a-single-event-sub-view',{ready:function(){this.currentSelection_=undefined;this.$.table.tableColumns=[{title:'Label',value:function(row){return row.name;},width:'150px'},{title:'Value',width:'100%',value:function(row){return row.value;}}];this.$.table.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single slices');this.setSelectionWithoutErrorChecks(selection);},setSelectionWithoutErrorChecks:function(selection){this.currentSelection_=selection;this.updateContents_();},getEventRows_:function(event){var rows=[];if(event.error)
+rows.push({name:'Error',value:event.error});if(event.title)
+rows.push({name:'Title',value:event.title});if(event.category)
+rows.push({name:'Category',value:event.category});if(event.model!==undefined){var ufc=event.model.getUserFriendlyCategoryFromEvent(event);if(ufc!==undefined)
+rows.push({name:'User Friendly Category',value:ufc});}
+if(event.name)
+rows.push({name:'Name',value:event.name});rows.push({name:'Start',value:tr.v.ui.createScalarSpan(event.start,{unit:tr.v.Unit.byName.timeStampInMs})});if(event.duration){rows.push({name:'Wall Duration',value:tr.v.ui.createScalarSpan(event.duration,{unit:tr.v.Unit.byName.timeDurationInMs})});}
+if(event.cpuDuration){rows.push({name:'CPU Duration',value:tr.v.ui.createScalarSpan(event.cpuDuration,{unit:tr.v.Unit.byName.timeDurationInMs})});}
+if(event.subSlices!==undefined&&event.subSlices.length!==0){if(event.selfTime){rows.push({name:'Self Time',value:tr.v.ui.createScalarSpan(event.selfTime,{unit:tr.v.Unit.byName.timeDurationInMs})});}
+if(event.cpuSelfTime){var cpuSelfTimeEl=tr.v.ui.createScalarSpan(event.cpuSelfTime,{unit:tr.v.Unit.byName.timeDurationInMs});if(event.cpuSelfTime>event.selfTime){cpuSelfTimeEl.warning=' Note that CPU Self Time is larger than Self Time. '+'This is a known limitation of this system, which occurs '+'due to several subslices, rounding issues, and imprecise '+'time at which we get cpu- and real-time.';}
+rows.push({name:'CPU Self Time',value:cpuSelfTimeEl});}}
+if(event.durationInUserTime){rows.push({name:'Duration (U)',value:tr.v.ui.createScalarSpan(event.durationInUserTime,{unit:tr.v.Unit.byName.timeDurationInMs})});}
+function createStackFrameEl(sf){var sfEl=document.createElement('tr-ui-a-stack-frame');sfEl.stackFrame=sf;return sfEl;}
+if(event.startStackFrame&&event.endStackFrame){if(event.startStackFrame===event.endStackFrame){rows.push({name:'Start+End Stack Trace',value:createStackFrameEl(event.startStackFrame)});}else{rows.push({name:'Start Stack Trace',value:createStackFrameEl(event.startStackFrame)});rows.push({name:'End Stack Trace',value:createStackFrameEl(event.endStackFrame)});}}else if(event.startStackFrame){rows.push({name:'Start Stack Trace',value:createStackFrameEl(event.startStackFrame)});}else if(event.endStackFrame){rows.push({name:'End Stack Trace',value:createStackFrameEl(event.endStackFrame)});}
+if(event.info){var descriptionEl=tr.ui.b.createDiv({textContent:event.info.description,maxWidth:'300px'});rows.push({name:'Description',value:descriptionEl});if(event.info.docLinks){event.info.docLinks.forEach(function(linkObject){var linkEl=document.createElement('a');linkEl.target='_blank';linkEl.href=linkObject.href;linkEl.textContent=linkObject.textContent;rows.push({name:linkObject.label,value:linkEl});});}}
+if(event.associatedAlerts.length){var alertSubRows=[];event.associatedAlerts.forEach(function(alert){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(alert);},alert.info.description);alertSubRows.push({name:alert.title,value:linkEl});});rows.push({name:'Alerts',value:'',isExpanded:true,subRows:alertSubRows});}
+return rows;},addArgsToRows_:function(rows,args){var n=0;for(var argName in args){n+=1;}
+if(n>0){var subRows=[];for(var argName in args){var argView=document.createElement('tr-ui-a-generic-object-view');argView.object=args[argName];subRows.push({name:argName,value:argView});}
+rows.push({name:'Args',value:'',isExpanded:true,subRows:subRows});}
+return rows;},updateContents_:function(){if(this.currentSelection_===undefined){this.$.table.rows=[];this.$.table.rebuild();return;}
+var event=this.currentSelection_[0];var rows=this.getEventRows_(event);if(event.argsStripped)
+rows.push({name:'Args',value:'Stripped'});else
+this.addArgsToRows_(rows,event.args);var event=new tr.b.Event('customize-rows');event.rows=rows;this.dispatchEvent(event);this.$.table.tableRows=rows;this.$.table.rebuild();}});'use strict';Polymer('tr-ui-a-single-async-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single slices');this.$.content.setSelectionWithoutErrorChecks(selection);this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents()){this.$.relatedEvents.style.display='';}else{this.$.relatedEvents.style.display='none';}},getEventRows_:function(event){var rows=this.__proto__.__proto__.getEventRows_(event);rows.splice(0,0,{name:'ID',value:event.id});return rows;},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-single-cpu-slice-sub-view',{created:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single slices');if(!(selection[0]instanceof tr.model.CpuSlice))
+throw new Error('Only supports thread time slices');this.currentSelection_=selection;var cpuSlice=selection[0];var thread=cpuSlice.threadThatWasRunning;var shadowRoot=this.shadowRoot;if(thread){shadowRoot.querySelector('#process-name').textContent=thread.parent.userFriendlyName;shadowRoot.querySelector('#thread-name').textContent=thread.userFriendlyName;}else{shadowRoot.querySelector('#process-name').parentElement.style.display='none';shadowRoot.querySelector('#thread-name').textContent=cpuSlice.title;}
+shadowRoot.querySelector('#start').setValueAndUnit(cpuSlice.start,tr.v.Unit.byName.timeStampInMs);shadowRoot.querySelector('#duration').setValueAndUnit(cpuSlice.duration,tr.v.Unit.byName.timeDurationInMs);var runningThreadEl=shadowRoot.querySelector('#running-thread');var timeSlice=cpuSlice.getAssociatedTimeslice();if(!timeSlice){runningThreadEl.parentElement.style.display='none';}else{var threadLink=document.createElement('tr-ui-a-analysis-link');threadLink.selection=new tr.model.EventSet(timeSlice);threadLink.textContent='Click to select';runningThreadEl.parentElement.style.display='';runningThreadEl.textContent='';runningThreadEl.appendChild(threadLink);}
+shadowRoot.querySelector('#args').object=cpuSlice.args;}});'use strict';Polymer('tr-ui-a-single-flow-event-sub-view',{getEventRows_:function(event){var rows=this.__proto__.__proto__.getEventRows_(event);rows.splice(0,0,{name:'ID',value:event.id});function createLinkTo(slice){var linkEl=document.createElement('tr-ui-a-analysis-link');linkEl.setSelectionAndContent(function(){return new tr.model.EventSet(slice);});linkEl.textContent=slice.userFriendlyName;return linkEl;}
+rows.push({name:'From',value:createLinkTo(event.startSlice)});rows.push({name:'To',value:createLinkTo(event.endSlice)});return rows;}});'use strict';Polymer('tr-ui-a-single-frame-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!=1)
+throw new Error('Only supports single frame!');this.currentSelection_=selection;this.$.asv.selection=selection[0].associatedAlerts;},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;return this.currentSelection_[0].associatedEvents;}});'use strict';Polymer('tr-ui-a-single-instant-event-sub-view',{created:function(){this.currentSelection_=undefined;},set selection(selection){this.$.content.textContent='';var realView=document.createElement('tr-ui-a-single-event-sub-view');realView.setSelectionWithoutErrorChecks(selection);this.$.content.appendChild(realView);this.currentSelection_=selection;},get selection(){return this.currentSelection_;}});'use strict';Polymer('tr-ui-a-single-object-instance-sub-view',{created:function(){this.currentSelection_=undefined;},get requiresTallView(){if(this.$.content.children.length===0)
+return false;if(this.$.content.children[0]instanceof
+tr.ui.analysis.ObjectInstanceView)
+return this.$.content.children[0].requiresTallView;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single item selections');if(!(selection[0]instanceof tr.model.ObjectInstance))
+throw new Error('Only supports object instances');this.$.content.textContent='';this.currentSelection_=selection;var instance=selection[0];var typeInfo=tr.ui.analysis.ObjectInstanceView.getTypeInfo(instance.category,instance.typeName);if(typeInfo){var customView=new typeInfo.constructor();this.$.content.appendChild(customView);customView.modelEvent=instance;}else{this.appendGenericAnalysis_(instance);}},appendGenericAnalysis_:function(instance){var html='';html+='<div class="title">'+
+instance.typeName+' '+
+instance.id+'</div>\n';html+='<table>';html+='<tr>';html+='<tr><td>creationTs:</td><td>'+
+instance.creationTs+'</td></tr>\n';if(instance.deletionTs!=Number.MAX_VALUE){html+='<tr><td>deletionTs:</td><td>'+
+instance.deletionTs+'</td></tr>\n';}else{html+='<tr><td>deletionTs:</td><td>not deleted</td></tr>\n';}
+html+='<tr><td>snapshots:</td><td id="snapshots"></td></tr>\n';html+='</table>';this.$.content.innerHTML=html;var snapshotsEl=this.$.content.querySelector('#snapshots');instance.snapshots.forEach(function(snapshot){var snapshotLink=document.createElement('tr-ui-a-analysis-link');snapshotLink.selection=new tr.model.EventSet(snapshot);snapshotsEl.appendChild(snapshotLink);});}});'use strict';Polymer('tr-ui-a-single-object-snapshot-sub-view',{created:function(){this.currentSelection_=undefined;},get requiresTallView(){if(this.children.length===0)
+return false;if(this.children[0]instanceof tr.ui.analysis.ObjectSnapshotView)
+return this.children[0].requiresTallView;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single item selections');if(!(selection[0]instanceof tr.model.ObjectSnapshot))
+throw new Error('Only supports object instances');this.textContent='';this.currentSelection_=selection;var snapshot=selection[0];var typeInfo=tr.ui.analysis.ObjectSnapshotView.getTypeInfo(snapshot.objectInstance.category,snapshot.objectInstance.typeName);if(typeInfo){var customView=new typeInfo.constructor();this.appendChild(customView);customView.modelEvent=snapshot;}else{this.appendGenericAnalysis_(snapshot);}},appendGenericAnalysis_:function(snapshot){var instance=snapshot.objectInstance;this.textContent='';var titleEl=document.createElement('div');titleEl.classList.add('title');titleEl.appendChild(document.createTextNode('Snapshot of '));this.appendChild(titleEl);var instanceLinkEl=document.createElement('tr-ui-a-analysis-link');instanceLinkEl.selection=new tr.model.EventSet(instance);titleEl.appendChild(instanceLinkEl);titleEl.appendChild(document.createTextNode(' @ '));titleEl.appendChild(tr.v.ui.createScalarSpan(snapshot.ts,{unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:this.ownerDocument}));var tableEl=document.createElement('table');this.appendChild(tableEl);var rowEl=document.createElement('tr');tableEl.appendChild(rowEl);var labelEl=document.createElement('td');labelEl.textContent='args:';rowEl.appendChild(labelEl);var argsEl=document.createElement('td');argsEl.id='args';rowEl.appendChild(argsEl);var objectViewEl=document.createElement('tr-ui-a-generic-object-view');objectViewEl.object=snapshot.args;argsEl.appendChild(objectViewEl);}});'use strict';Polymer('tr-ui-a-single-power-sample-sub-view',{ready:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;this.updateContents_();},updateContents_:function(){this.$.samplesTable.samples=this.selection;}});'use strict';Polymer('tr-ui-a-single-sample-sub-view',{created:function(){this.currentSelection_=undefined;},ready:function(){this.$.content.tableColumns=[{title:'FirstColumn',value:function(row){return row.title;},width:'250px'},{title:'SecondColumn',value:function(row){return row.value;},width:'100%'}];this.$.content.showHeader=false;},get selection(){return this.currentSelection_;},set selection(selection){this.currentSelection_=selection;if(this.currentSelection_===undefined){this.$.content.tableRows=[];return;}
+var sample=this.currentSelection_[0];var table=this.$.content;var rows=[];rows.push({title:'Title',value:sample.title});rows.push({title:'Sample time',value:tr.v.ui.createScalarSpan(sample.start,{unit:tr.v.Unit.byName.timeStampInMs,ownerDocument:this.ownerDocument})});var sfEl=document.createElement('tr-ui-a-stack-frame');sfEl.stackFrame=sample.leafStackFrame;rows.push({title:'Stack trace',value:sfEl});table.tableRows=rows;table.rebuild();}});'use strict';Polymer('tr-ui-a-single-thread-slice-sub-view',{get selection(){return this.$.content.selection;},set selection(selection){this.$.content.selection=selection;this.$.relatedEvents.setRelatedEvents(selection);if(this.$.relatedEvents.hasRelatedEvents())
+this.$.relatedEvents.style.display='';else
+this.$.relatedEvents.style.display='none';}});'use strict';Polymer('tr-ui-a-single-thread-time-slice-sub-view',{created:function(){this.currentSelection_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){if(selection.length!==1)
+throw new Error('Only supports single slices');if(!(selection[0]instanceof tr.model.ThreadTimeSlice))
+throw new Error('Only supports thread time slices');this.currentSelection_=selection;var timeSlice=selection[0];var thread=timeSlice.thread;var shadowRoot=this.shadowRoot;shadowRoot.querySelector('#state').textContent=timeSlice.title;var stateColor=tr.b.ColorScheme.colorsAsStrings[timeSlice.colorId];shadowRoot.querySelector('#state').style.backgroundColor=stateColor;shadowRoot.querySelector('#process-name').textContent=thread.parent.userFriendlyName;shadowRoot.querySelector('#thread-name').textContent=thread.userFriendlyName;shadowRoot.querySelector('#start').setValueAndUnit(timeSlice.start,tr.v.Unit.byName.timeStampInMs);shadowRoot.querySelector('#duration').setValueAndUnit(timeSlice.duration,tr.v.Unit.byName.timeDurationInMs);var onCpuEl=shadowRoot.querySelector('#on-cpu');onCpuEl.textContent='';var runningInsteadEl=shadowRoot.querySelector('#running-instead');if(timeSlice.cpuOnWhichThreadWasRunning){runningInsteadEl.parentElement.removeChild(runningInsteadEl);var cpuLink=document.createElement('tr-ui-a-analysis-link');cpuLink.selection=new tr.model.EventSet(timeSlice.getAssociatedCpuSlice());cpuLink.textContent=timeSlice.cpuOnWhichThreadWasRunning.userFriendlyName;onCpuEl.appendChild(cpuLink);}else{onCpuEl.parentElement.removeChild(onCpuEl);var cpuSliceThatTookCpu=timeSlice.getCpuSliceThatTookCpu();if(cpuSliceThatTookCpu){var cpuLink=document.createElement('tr-ui-a-analysis-link');cpuLink.selection=new tr.model.EventSet(cpuSliceThatTookCpu);if(cpuSliceThatTookCpu.thread)
+cpuLink.textContent=cpuSliceThatTookCpu.thread.userFriendlyName;else
+cpuLink.textContent=cpuSliceThatTookCpu.title;runningInsteadEl.appendChild(cpuLink);}else{runningInsteadEl.parentElement.removeChild(runningInsteadEl);}}
+var argsEl=shadowRoot.querySelector('#args');if(tr.b.dictionaryKeys(timeSlice.args).length>0){var argsView=document.createElement('tr-ui-a-generic-object-view');argsView.object=timeSlice.args;argsEl.parentElement.style.display='';argsEl.textContent='';argsEl.appendChild(argsView);}else{argsEl.parentElement.style.display='none';}}});'use strict';tr.exportTo('tr.metrics',function(){function ValueList(values){if(values!==undefined)
+this.values_=values;else
+this.values_=[];}
+ValueList.prototype={get valueDicts(){return this.values_.map(function(v){return v.asDict();});},getValuesWithName:function(name){return this.values_.filter(function(value){return value.name.indexOf(name)>-1;});},addValue:function(v){if(!(v instanceof tr.v.NumericValue)){var err=new Error('Tried to add value '+v+' which is non-Numeric');err.name='ValueError';throw err;}
+this.values_.push(v);}};return{ValueList:ValueList};});'use strict';Polymer('tr-ui-a-single-user-expectation-sub-view',{created:function(){this.currentSelection_=undefined;this.realView_=undefined;},get selection(){return this.currentSelection_;},set selection(selection){this.textContent='';this.realView_=document.createElement('tr-ui-a-single-event-sub-view');this.realView_.addEventListener('customize-rows',this.onCustomizeRows_.bind(this));this.appendChild(this.realView_);this.currentSelection_=selection;this.realView_.setSelectionWithoutErrorChecks(selection);},get relatedEventsToHighlight(){if(!this.currentSelection_)
+return undefined;return this.currentSelection_[0].associatedEvents;},onCustomizeRows_:function(event){var ue=this.selection[0];var valueList=new tr.metrics.ValueList();function runMetric(metricInfo){try{metricInfo.constructor(valueList,ue.parentModel);}catch(failure){console.error(metricInfo,failure);}}
+tr.metrics.MetricRegistry.getAllRegisteredTypeInfos().forEach(runMetric);var metricValues={};valueList.valueDicts.forEach(function(value){if(value.grouping_keys.userExpectationStableId!==ue.stableId)
+return;if((value.type!=='numeric')||(value.numeric.type!=='scalar'))
+return;metricValues[value.grouping_keys.name]=value.numeric;});for(var name in metricValues){event.rows.push({name:name,value:tr.v.ui.createScalarSpan(metricValues[name].value,{unit:tr.v.Unit.fromJSON(metricValues[name].unit)})});}
+if(ue.rawCpuMs){event.rows.push({name:'Total CPU',value:tr.v.ui.createScalarSpan(ue.totalCpuMs,{unit:tr.v.Unit.byName.timeDurationInMs})});}}});'use strict';Polymer('tr-ui-a-tab-view',{ready:function(){this.$.tshh.style.display='none';this.tabs_=[];this.selectedTab_=undefined;for(var i=0;i<this.children.length;i++)
+this.processAddedChild_(this.children[i]);this.childrenObserver_=new MutationObserver(this.childrenUpdated_.bind(this));this.childrenObserver_.observe(this,{childList:'true'});},get tabStripHeadingText(){return this.$.tsh.textContent;},set tabStripHeadingText(tabStripHeadingText){this.$.tsh.textContent=tabStripHeadingText;if(!!tabStripHeadingText)
+this.$.tshh.style.display='';else
+this.$.tshh.style.display='none';},get selectedTab(){this.childrenUpdated_(this.childrenObserver_.takeRecords(),this.childrenObserver_);if(this.selectedTab_)
+return this.selectedTab_.content;return undefined;},set selectedTab(content){this.childrenUpdated_(this.childrenObserver_.takeRecords(),this.childrenObserver_);if(content===undefined||content===null){this.changeSelectedTabById_(undefined);return;}
+var contentTabId=undefined;for(var i=0;i<this.tabs_.length;i++)
+if(this.tabs_[i].content===content){contentTabId=this.tabs_[i].id;break;}
+if(contentTabId===undefined)
+return;this.changeSelectedTabById_(contentTabId);},get tabsHidden(){var ts=this.shadowRoot.querySelector('tab-strip');return ts.hasAttribute('tabs-hidden');},set tabsHidden(tabsHidden){tabsHidden=!!tabsHidden;var ts=this.shadowRoot.querySelector('tab-strip');if(tabsHidden)
+ts.setAttribute('tabs-hidden',true);else
+ts.removeAttribute('tabs-hidden');},get tabs(){return this.tabs_.map(function(tabObject){return tabObject.content;});},processAddedChild_:function(child){var observerAttributeSelected=new MutationObserver(this.childAttributesChanged_.bind(this));var observerAttributeTabLabel=new MutationObserver(this.childAttributesChanged_.bind(this));var tabObject={id:this.tabs_.length,content:child,label:child.getAttribute('tab-label'),observers:{forAttributeSelected:observerAttributeSelected,forAttributeTabLabel:observerAttributeTabLabel}};this.tabs_.push(tabObject);if(child.hasAttribute('selected')){if(this.selectedTab_)
+child.removeAttribute('selected');else
+this.setSelectedTabById_(tabObject.id);}
+var previousSelected=child.selected;var tabView=this;Object.defineProperty(child,'selected',{configurable:true,set:function(value){if(value){tabView.changeSelectedTabById_(tabObject.id);return;}
+var wasSelected=tabView.selectedTab_===tabObject;if(wasSelected)
+tabView.changeSelectedTabById_(undefined);},get:function(){return this.hasAttribute('selected');}});if(previousSelected)
+child.selected=previousSelected;observerAttributeSelected.observe(child,{attributeFilter:['selected']});observerAttributeTabLabel.observe(child,{attributeFilter:['tab-label']});},processRemovedChild_:function(child){for(var i=0;i<this.tabs_.length;i++){this.tabs_[i].id=i;if(this.tabs_[i].content===child){this.tabs_[i].observers.forAttributeSelected.disconnect();this.tabs_[i].observers.forAttributeTabLabel.disconnect();if(this.tabs_[i]===this.selectedTab_){this.clearSelectedTab_();this.fire('selected-tab-change');}
+child.removeAttribute('selected');delete child.selected;this.tabs_.splice(i,1);i--;}}},childAttributesChanged_:function(mutations,observer){var tabObject=undefined;for(var i=0;i<this.tabs_.length;i++){var observers=this.tabs_[i].observers;if(observers.forAttributeSelected===observer||observers.forAttributeTabLabel===observer){tabObject=this.tabs_[i];break;}}
+if(!tabObject)
+return;for(var i=0;i<mutations.length;i++){var node=tabObject.content;if(mutations[i].attributeName==='tab-label')
+tabObject.label=node.getAttribute('tab-label');if(mutations[i].attributeName==='selected'){var nodeIsSelected=node.hasAttribute('selected');if(nodeIsSelected)
+this.changeSelectedTabById_(tabObject.id);else
+this.changeSelectedTabById_(undefined);}}},childrenUpdated_:function(mutations,observer){mutations.forEach(function(mutation){for(var i=0;i<mutation.removedNodes.length;i++)
+this.processRemovedChild_(mutation.removedNodes[i]);for(var i=0;i<mutation.addedNodes.length;i++)
+this.processAddedChild_(mutation.addedNodes[i]);},this);},tabButtonSelectHandler_:function(event,detail,sender){this.changeSelectedTabById_(sender.getAttribute('button-id'));},changeSelectedTabById_:function(id){var newTab=id!==undefined?this.tabs_[id]:undefined;var changed=this.selectedTab_!==newTab;this.saveCurrentTabScrollPosition_();this.clearSelectedTab_();if(id!==undefined){this.setSelectedTabById_(id);this.restoreCurrentTabScrollPosition_();}
+if(changed)
+this.fire('selected-tab-change');},setSelectedTabById_:function(id){this.selectedTab_=this.tabs_[id];this.selectedTab_.observers.forAttributeSelected.disconnect();this.selectedTab_.content.setAttribute('selected','selected');this.selectedTab_.observers.forAttributeSelected.observe(this.selectedTab_.content,{attributeFilter:['selected']});},saveTabStates:function(){this.saveCurrentTabScrollPosition_();},saveCurrentTabScrollPosition_:function(){if(this.selectedTab_){this.selectedTab_.content._savedScrollTop=this.$['content-container'].scrollTop;this.selectedTab_.content._savedScrollLeft=this.$['content-container'].scrollLeft;}},restoreCurrentTabScrollPosition_:function(){if(this.selectedTab_){this.$['content-container'].scrollTop=this.selectedTab_.content._savedScrollTop||0;this.$['content-container'].scrollLeft=this.selectedTab_.content._savedScrollLeft||0;}},clearSelectedTab_:function(){if(this.selectedTab_){this.selectedTab_.observers.forAttributeSelected.disconnect();this.selectedTab_.content.removeAttribute('selected');this.selectedTab_.observers.forAttributeSelected.observe(this.selectedTab_.content,{attributeFilter:['selected']});this.selectedTab_=undefined;}}});'use strict';(function(){var EventRegistry=tr.model.EventRegistry;Polymer('tr-ui-a-analysis-view',{ready:function(){this.tabView_=document.createElement('tr-ui-a-tab-view');this.tabView_.style.flex='1 1 auto';this.appendChild(this.tabView_);this.brushingStateController_=undefined;this.onSelectedTabChange_=this.onSelectedTabChange_.bind(this);this.onSelectionChanged_=this.onSelectionChanged_.bind(this);this.lastSeenSelection_=new tr.model.EventSet();},set tallMode(value){if(value)
+this.classList.add('tall-mode');else
+this.classList.remove('tall-mode');},get tallMode(){return this.classList.contains('tall-mode');},get tabView(){return this.tabView_;},get brushingStateController(){return this.brushingStateController_;},set brushingStateController(brushingStateController){if(this.brushingStateController){this.brushingStateController_.removeEventListener('change',this.onSelectionChanged_);}
+this.brushingStateController_=brushingStateController;if(this.brushingStateController){this.brushingStateController_.addEventListener('change',this.onSelectionChanged_);}
+this.onSelectionChanged_();},get selection(){return this.brushingStateController_.selection;},onSelectionChanged_:function(e){var selection=this.brushingStateController_.selection;var selectionHasSameValue=this.lastSeenSelection_.equals(selection);this.lastSeenSelection_=selection;if(selectionHasSameValue)
+return;var lastSelectedTabTagName;var lastSelectedTabTypeName;if(this.tabView_.selectedTab){lastSelectedTabTagName=this.tabView_.selectedTab.tagName;lastSelectedTabTypeName=this.tabView_.selectedTab._eventTypeName;}
+this.tallMode=false;var previouslySelectedTab=this.tabView_.selectedTab;this.tabView_.removeEventListener('selected-tab-change',this.onSelectedTabChange_);var previousSubViews={};for(var i=0;i<this.tabView_.children.length;i++){var previousSubView=this.tabView_.children[i];previousSubViews[previousSubView._eventTypeName]=previousSubView;}
+this.tabView_.saveTabStates();this.tabView_.textContent='';if(selection.length==0){this.tabView_.tabStripHeadingText='Nothing selected. Tap stuff.';}else if(selection.length==1){this.tabView_.tabStripHeadingText='1 item selected: ';}else{this.tabView_.tabStripHeadingText=selection.length+' items selected: ';}
+var eventsByBaseTypeName=selection.getEventsOrganizedByBaseType(true);var numBaseTypesToAnalyze=tr.b.dictionaryLength(eventsByBaseTypeName);for(var eventTypeName in eventsByBaseTypeName){var subSelection=eventsByBaseTypeName[eventTypeName];var subView=this.createSubViewForSelection_(eventTypeName,subSelection,previousSubViews[eventTypeName]);subView._eventTypeName=eventTypeName;this.tabView_.appendChild(subView);subView.selection=subSelection;}
+var tab;if(lastSelectedTabTagName)
+tab=this.tabView_.querySelector(lastSelectedTabTagName);if(!tab&&lastSelectedTabTypeName){var tab=tr.b.findFirstInArray(this.tabView_.children,function(tab){return tab._eventTypeName===lastSelectedTabTypeName;});}
+if(!tab)
+tab=this.tabView_.firstChild;this.tabView_.selectedTab=tab;this.onSelectedTabChange_();this.tabView_.addEventListener('selected-tab-change',this.onSelectedTabChange_);},createSubViewForSelection_:function(eventTypeName,subSelection,previousSubView){var eventTypeInfo=EventRegistry.getEventTypeInfoByTypeName(eventTypeName);var singleMode=subSelection.length==1;var tagName;if(subSelection.length===1)
+tagName=eventTypeInfo.metadata.singleViewElementName;else
+tagName=eventTypeInfo.metadata.multiViewElementName;if(!tr.ui.b.getPolymerElementNamed(tagName))
+throw new Error('Element not registered: '+tagName);var subView;if(previousSubView&&previousSubView.tagName===tagName.toUpperCase())
+subView=previousSubView;else
+subView=document.createElement(tagName);var camelLabel;if(subSelection.length===1)
+camelLabel=EventRegistry.getUserFriendlySingularName(eventTypeName);else
+camelLabel=EventRegistry.getUserFriendlyPluralName(eventTypeName);subView.tabLabel=camelLabel+' ('+subSelection.length+')';return subView;},onSelectedTabChange_:function(){var brushingStateController=this.brushingStateController_;if(this.tabView_.selectedTab){var selectedTab=this.tabView_.selectedTab;this.tallMode=selectedTab.requiresTallView;if(brushingStateController){var rlth=selectedTab.relatedEventsToHighlight;brushingStateController.changeAnalysisViewRelatedEvents(rlth);}}else{this.tallMode=false;if(brushingStateController)
+brushingStateController.changeAnalysisViewRelatedEvents(undefined);}}});})();'use strict';Polymer('tr-ui-b-drag-handle',{__proto__:HTMLDivElement.prototype,created:function(){this.lastMousePos_=0;this.onMouseMove_=this.onMouseMove_.bind(this);this.onMouseUp_=this.onMouseUp_.bind(this);this.addEventListener('mousedown',this.onMouseDown_);this.target_=undefined;this.horizontal=true;this.observer_=new WebKitMutationObserver(this.didTargetMutate_.bind(this));this.targetSizesByModeKey_={};},get modeKey_(){return this.target_.className==''?'.':this.target_.className;},get target(){return this.target_;},set target(target){this.observer_.disconnect();this.target_=target;if(!this.target_)
+return;this.observer_.observe(this.target_,{attributes:true,attributeFilter:['class']});},get horizontal(){return this.horizontal_;},set horizontal(h){this.horizontal_=h;if(this.horizontal_)
+this.className='horizontal-drag-handle';else
+this.className='vertical-drag-handle';},get vertical(){return!this.horizontal_;},set vertical(v){this.horizontal=!v;},forceMutationObserverFlush_:function(){var records=this.observer_.takeRecords();if(records.length)
+this.didTargetMutate_(records);},didTargetMutate_:function(e){var modeSize=this.targetSizesByModeKey_[this.modeKey_];if(modeSize!==undefined){this.setTargetSize_(modeSize);return;}
+this.target_.style[this.targetStyleKey_]='';},get targetStyleKey_(){return this.horizontal_?'height':'width';},getTargetSize_:function(){var targetStyleKey=this.targetStyleKey_;if(!this.target_.style[targetStyleKey]){this.target_.style[targetStyleKey]=window.getComputedStyle(this.target_)[targetStyleKey];}
+var size=parseInt(this.target_.style[targetStyleKey]);this.targetSizesByModeKey_[this.modeKey_]=size;return size;},setTargetSize_:function(s){this.target_.style[this.targetStyleKey_]=s+'px';this.targetSizesByModeKey_[this.modeKey_]=s;},applyDelta_:function(delta){var curSize=this.getTargetSize_();var newSize;if(this.target_===this.nextElementSibling){newSize=curSize+delta;}else{newSize=curSize-delta;}
+this.setTargetSize_(newSize);},onMouseMove_:function(e){var curMousePos=this.horizontal_?e.clientY:e.clientX;var delta=this.lastMousePos_-curMousePos;this.applyDelta_(delta);this.lastMousePos_=curMousePos;e.preventDefault();return true;},onMouseDown_:function(e){if(!this.target_)
+return;this.forceMutationObserverFlush_();this.lastMousePos_=this.horizontal_?e.clientY:e.clientX;document.addEventListener('mousemove',this.onMouseMove_);document.addEventListener('mouseup',this.onMouseUp_);e.preventDefault();return true;},onMouseUp_:function(e){document.removeEventListener('mousemove',this.onMouseMove_);document.removeEventListener('mouseup',this.onMouseUp_);e.preventDefault();}});'use strict';Polymer('tr-ui-b-dropdown',{ready:function(){this.$.outer.tabIndex=0;},get iconElement(){return this.$.icon;},onOuterKeyDown_:function(e){if(e.keyCode===' '.charCodeAt(0)){this.toggle_();e.preventDefault();e.stopPropagation();}},onOuterClick_:function(e){var or=this.$.outer.getBoundingClientRect();var inside=true;inside&=e.clientX>=or.left;inside&=e.clientX<or.right;inside&=e.clientY>=or.top;inside&=e.clientY<or.bottom;if(!inside)
+return;e.preventDefault();this.toggle_();},toggle_:function(){if(!this.isOpen)
+this.show();else
+this.close();},show:function(){if(this.isOpen)
+return;this.$.outer.classList.add('open');var ddr=this.$.outer.getBoundingClientRect();var rW=Math.max(ddr.width,150);this.$.dialog.style.minWidth=rW+'px';this.$.dialog.showModal();var ddw=this.$.outer.getBoundingClientRect().width;var w=this.$.dialog.getBoundingClientRect().width;this.$.dialog.style.top=ddr.bottom-1+'px';this.$.dialog.style.left=ddr.left+'px';},onDialogClick_:function(e){if(!this.isOpen)
+return;if(e.srcElement!==this.$.dialog)
+return;e.preventDefault();this.close();},onDialogCancel_:function(e){e.preventDefault();this.close();},close:function(){if(!this.isOpen)
+return;this.$.dialog.close();this.$.outer.classList.remove('open');this.$.outer.focus();},get isOpen(){return this.$.dialog.hasAttribute('open');}});'use strict';tr.exportTo('tr.ui.b',function(){var FaviconsByHue={blue:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjj8xAGArIgqOPzE8nUY3dqJJOJeiSTiXnUY3do4/MTxhKyIKjkAxAAAAAAAAAAAAAAAAAAAAAABQJBwAAAAAAZJBMzSoSzqlsU8+6bRQP/21UT//tVE//7RQP/2wTz3ppko6pY9AMjQAAAABTyMbAAAAAAB7e3sAAP//AKFSRE+wTz3dtVE//7VRP/+1UT//tVE//7VRP/+zUD7/sE89/7BOPf+qTDvdl0M0TwAAAABWJx4A+fn5ANjd3TnIiX7ftVA9/7VRP/+1UT//tVE//7VRP/+xTz3/rE08/6xMO/+sTDv/rE08/6dKOt+SQTM5q0w7ALO0tA3v8fGu05uR/7NMOf+0Tzz/tE88/7RPPv+uTT3/p0o7/6ZJOv+mSTr/pkk6/6ZJOv+mSjr/n0Y4rnIwKg3h4eFK9/j48N2zrP/FeGr/xnps/8Z6bP/AaUv/tlw1/7RbNf+1WzX/tFs1/7RbNf+0WzX/tFs1/7NbNPCqWy1K7e3tjPn5+f/49vX/9vLy//by8v/28vH/8bZv/+6RH//ukyP/7pMj/+6SI//ukiP/7pMj/+2SIv/qjyL/34kfjPHx8bL5+fn/+fn5//n5+f/5+fr/+fn5//W7cP/zlB3/85Yh//OWIf/zliH/85Yh//GVIf/rkR//6ZAf/+KLHrLz8/O2+fn5//n5+f/5+fn/+fn5//n5+f/1unD/85Qd//OWIf/zliH/85Yh//CUIP/mjh//44we/+OMHv/diR628vLymfn5+f/5+fn/+fn5//n5+f/5+fn/9bx0//OXI//zmCb/85gm/++VIv/hjB//3Yoe/92KHv/dih7/2IYdmfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5//jo0//33bv/9929//bbtf/euDX/06oJ/9OrC//Tqwv/06oM98yfD1zr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/5+vv/+fv8//n7/f/3+PH/3Ms6/9O8AP/UvQD/1L0A/9K8AMbItAAY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/5+fr/9/bu/9zKOf/TuwD/1LwA/9S8APLQuABW3cQAAOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+fn6//f27v/cyTn/07sA/9S8APTRugB4w60ABcmyAAAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/49/H/5Ndu/NjEIdLSugBdybIABsy1AAAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tvX063Lt6MMhOQAAAM+/RAAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCwUEDDgZExxWJx4tYiwiN2IsIjdWJx4tOBkTHAsFBAwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///wAbDAkKZS0jMYs+MWydRjeipko6x6tMO9utTTzjrU0846tMO9umSjrHnUY3oos+MWxlLSMxGwwJCv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgZFAAPBwUHcjMoPJtFNpqsTTzhs1A+/LVRP/+2UT//tVE//7VRP/+1UT//tVE//7ZRP/+1UT//s1A+/KxNPOGbRTaacTInPA8HBQc4GRMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/yp4AUCQcGZVDNICtTjzktVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+0UT//s1A+/7JQPv+rTDvkkkEzgE8jGxn/xZoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA////AGswJSqiSTivs1A++7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tFA+/7FPPf+xTz3/sU89/7FPPf+vTj37nkc3r2guJCr///8AAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAP/DogB/VEwsqE09v7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7NQPv+vTj3/r049/69OPf+vTj3/r049/69OPf+uTjz/oUg4v20xJiz/nnsAAgEBAAAAAAAAAAAAAAAAAAAAAAD19fUAkp2fHdK2sbW5W0r/tVA+/7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+yUD7/rU08/6xNPP+tTTz/rU08/61NPP+tTTz/rU08/61NPP+sTTz/nkY3tWAqIR2pSzsAAAAAAAAAAAAAAAAAeXl5ADY2Ngnd39+O6tbT/blbSv+1UD7/tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//slA+/6xNPP+rTDv/q0w7/6tMO/+rTDv/q0w7/6tMO/+rTDv/q0w7/6tMO/+qTDv9lkM0jiUQDQlSJR0AAAAAAAAAAAD///8AxMTES/X29u3s2NX/uVtK/7VQPv+1UT//tVE//7VRP/+1UT//tVE//7VRP/+1UT//tVE//7FPPv+qTDv/qEs6/6hLOv+oSzr/qEs6/6hLOv+oSzr/qEs6/6hLOv+oSzr/qEs6/6lLOv+lSTnthDsuS/+TcgAAAAAAm5ubAHBwcA/o6Oix+vv8/+zY1P+5W0r/tVA+/7VRP/+1UT//tVE//7VRP/+1UT//tVE//7VRP/+xTz3/qEs6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+mSjr/pko6/6ZKOv+bRTaxSiEaD2cuJAD///8AycnJRfX19fD6+/z/69fU/7hYR/+0Tjv/tE48/7ROPP+0Tjz/tE48/7ROPP+0Tz3/r04+/6VJOv+jSDn/o0g5/6NIOf+jSDn/o0g5/6NIOf+jSDn/o0g5/6NIOf+jSDr/o0g5/6NIOf+jSDn/o0g6/6BHOfCCOS9F0FxKAAAAAALk5OSN+fn5//n6+v/y5+X/05uS/9CTiP/QlIn/0JSJ/9CUif/QlIn/0JSK/8yGb//AaDb/vWc0/71nNf+9ZzT/vWc0/71nNP+9ZjT/vWY0/71mNP+9ZjT/vGY0/7xmNP+8ZjT/vGY0/7xmNP+8ZjT/u2U0/7FiLY0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/5+vr/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//KWI//ylSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//GVIf/xlSH/8ZUh//CUIf/vkyD/5Y0fxY1XExbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LL/85cj//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/wlCD/7pIg/+6SIP/pjx/lunIZM9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fv9//fYsv/zlyP/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/75Mg/+uRH//qkB//6pAf/+iPH/TIfBtQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//OXI//zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh/+6TIP/ojx//548f/+ePH//njx//5o4f+c1/HGHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LL/85cj//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/tkiD/5Y0f/+SNH//ljR//5Y0f/+WNH//kjB/6zn8cZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fv9//fYsv/zlyP/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/65Eg/+KMHv/iix7/4ose/+KLHv/iix7/4ose/+CLHvfLfRta3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+/3/99iy//OXI//zliH/85Yh//OWIf/zliH/85Yh//OWIf/zliH/85Yh/+qRIP/gih7/34oe/9+KHv/fih7/34oe/9+KHv/fih7/3Yge78V6GkLS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n7/f/32LH/85Yg//OVHv/zlR7/85Ue//OVHv/zlR7/85Ue//OVIf/pjyH/3ogf/92HH//dhx//3Ycf/92HH//dhx//3Ycf/92HH//ahh7ZunMZI56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fr7//jr2f/2ypL/9smP//bJkP/2yZD/9smQ//bJkP/2yZD/5rNI/9OeFP/SnhX/0p4V/9KeFf/SnhX/0Z0V/9GdFf/RnRX/0Z0V/8yWFq6KVBcI////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn6//n6/P/5+vz/+fr8//n6/P/5+vz/+fr8//n6/P/h013/0rsA/9O8AP/TvAD/07wA/9O8AP/TvAD/07wA/9O8AP/SvAD+yLMAav/mAADr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+LSW//TuwD/1LwA/9S8AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9K6ANu/qgAkyLEAALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/4tJb/9O7AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9S8AP/UvAD/zrYAgQAAAACfjQAAAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/i0lv/07sA/9S8AP/UvAD/1LwA/9S8AP/UvAD/1LwA/9K6AMzCrAAeybIAAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+LSW//TuwD/1LwA/9S8AP/UvAD/1LwA/9S8AP/TuwDsy7QATu7UAACXhQAAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/4tJb/9O7AP/UvAD/1LwA/9S8AP/UvAD/07wA9M63AG6ZiQADtqIAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/i0lv/07sA/9S8AP/UvAD/1LwA/9O8APDPuABzuKMABsGrAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/+HSW//TugD/1LsA/9S8AP/TuwDazrcAWbejAATBqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/7uas/+bZdv/j1mvt2cYznMu0ACsUFAAAtaEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL4+frS9/j8kPT1/Trs8v8G8PP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',green:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbWJLAEpCMwptYks8eWxTdn1wVpd9cFaXeWxTdm1iSzxKQzMKbWJLAAAAAAAAAAAAAAAAAAAAAAA+OCsAAAAAAXBlTTSBdFmliHpe6Yp8X/2LfWD/i31g/4p8X/2HeV3pf3NYpW5jTDQAAAABPTcqAAAAAAB7e3sAlv//AIB1Xk+HeV3di31g/4t9YP+LfWD/i31g/4t9YP+Je1//h3pd/4d5Xf+DdVrddGhQTwAAAABDPC4A+fn5ANrb3DmupZPfinxf/4t9YP+LfWD/i31g/4t9YP+Iel7/hHdb/4R2W/+Edlv/hHdb/4BzWN9wZU05g3ZaALS0tA3w8PGuu7Sj/4h5W/+Je17/iXte/4t8X/+HeFz/gnNY/4FyWP+Bclj/gXJY/4FyWP+Bclj/fG1Url9NPA3h4eFK9/j48MvFuf+kmoP/ppuF/6abhf+JkHL/c4Rj/3OEY/9zhGP/coNj/3KDY/9yg2P/coNj/3CDYvBgf19K7e3tjPn5+f/39vb/9fTz//X08//09PP/itKw/0m+h/9Mv4n/TL+J/0y/if9Mv4n/TL+J/0y+iP9Lu4b/RrJ/jPHx8bL5+fn/+fn5//n5+f/5+fn/+fn5/4rXtP9Hwon/SsOL/0rDi/9Kw4v/SsOL/0nCiv9HvYb/RruF/0S1gbLz8/O2+fn5//n5+f/5+fn/+fn5//n5+f+K17P/R8KJ/0rDi/9Kw4v/SsOL/0nBif9GuYT/RbaC/0W2gv9Dsn+28vLymfn5+f/5+fn/+fn5//n5+f/5+fn/jdi1/0vDjP9OxI7/TsSO/0rAiv9FtoP/RLKA/0SygP9EsoD/Qq59mfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5/9rw5v/H6tn/yOra/8Lp2f9e1b7/O8yz/z3MtP89zLT/Pcuy9zzApVzr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/7+vr//Pr7//z6+//z+fn/ZuPY/zbczv853c7/Od3O/zjbzcY10sYY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/6+fn/8Pj3/2Xj1/823Mz/OdzN/znczfI42MlWO+XWAOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+vn5//D49/9j4tf/NdvM/znczfQ42ct4Ncu9BTbRwgAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/z+Pj/jung/FLf0tI42ctdNdHCBjfUxgAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tu329XLO7+whAFQmAGrUygAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCQgGDCsmHRxCOy4tS0M0N0tDNDdCOy4tKyYdHAkIBgwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///wAVEg4KTUU1MWtgSmx5bVOigHNYx4N2W9uFd1zjhXdc44N2W9uAc1jHeW1TomtgSmxNRjUxFRMOCv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACsnHgALCggHWE88PHdrUpqEd1vhiXxf/It9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/iXxf/IR3W+F3a1KaV048PAsKCAcrJx4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD///AAPjcqGXJnT4CFeFzki31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+KfWD/iXxf/4l7Xv+DdlrkcGVNgDw2Khn//+sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////AFJKOSp9cFavinxf+4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/inxf/4h6Xv+Iel3/iHpd/4h6Xv+GeV37eW1Ur1BINyr///8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAP//3gBsZ1osgnVbv4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4l8X/+HeV3/hnlc/4Z5XP+GeVz/hnlc/4Z5XP+GeFz/fG9Vv1RLOiz/9LoAAgIBAAAAAAAAAAAAAAAAAAAAAAD19fUAl5ibHcbCurWShGn/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+Je1//hXhc/4R3W/+Fd1v/hXdb/4V3W/+Fd1v/hXdb/4V3W/+Ed1v/eW1TtUlCMh2CdVkAAAAAAAAAAAAAAAAAeXl5ADY2Ngne3t+O4t/Z/ZKFaf+LfV//i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/iXte/4R3W/+Ddlr/g3Za/4N2Wv+Ddlr/g3Za/4N2Wv+Ddlr/g3Za/4N2Wv+CdVr9c2dPjhwZEwk/OSsAAAAAAAAAAAD///8AxMTES/X19u3k4dv/koRp/4t9X/+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4h6Xv+CdVr/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf+BdFn/gXRZ/4F0Wf9+clftZVtGS/3jrgAAAAAAm5ubAHBwcA/o6Oix+/v7/+Pg2/+ShGn/i31f/4t9YP+LfWD/i31g/4t9YP+LfWD/i31g/4t9YP+Iel7/gXRZ/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP+Ac1j/gHNY/4BzWP93a1KxOTMnD1BHNwD///8AycnJRfX19fD7+/v/4+Da/5CCZ/+Jel3/iXtd/4l7Xf+Je13/iXtd/4l7Xf+Ke17/iHhd/4BxV/9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/39wVv9/cFb/f3BW/31uVPBnWURFo45tAAAAAALk5OSN+fn5//r6+v/t7Oj/vLSk/7aunP+3rp3/t66d/7eunf+3rp3/uK+e/6Gmjv9vkG3/bI5r/2yOa/9sjmv/bI5r/2yOa/9sjmv/bI5r/2yOa/9sjmr/bI1q/2yNav9sjWr/bI1q/2uNav9rjWr/a41q/16GZI0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/5+fr/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/wOfV/0vCi/9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0rCiv9Kwor/SsKK/0nAif9Jv4j/RreCxStxUBbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/TMSM/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9JwYn/SL6I/0i+iP9GuoXlOJVqM9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pr7/7/n1f9Mw4z/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/ScCJ/0e8hv9HvIb/R7yG/0a6hfQ9oXJQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/v+fV/0zDjP9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0i/iP9GuoX/RrqE/0a6hP9GuoT/RrmD+T6ldWHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/TMOM/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Ivof/RbiD/0W3gv9FuIP/RbiD/0W4g/9Ft4L6PqZ2ZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pr7/7/n1f9Mw4z/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SL2H/0W2gv9FtYH/RbWB/0W1gf9FtYH/RbWB/0S0gPc+o3Ra3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8+vv/v+fV/0zDjP9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0rDi/9Kw4v/SsOL/0e8hv9EtID/RLOA/0SzgP9Es4D/RLOA/0SzgP9Es4D/Q7F/7zyecULS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z6+/+/59X/SsOL/0jCiv9Iwor/SMKK/0jCiv9Iwor/SMKK/0rCiv9HuoT/RLF+/0Owff9EsH3/RLB9/0Swff9EsH3/RLB9/0Swff9CrnzZOJZrI56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn6/9/x6f+l38X/o9/D/6Tfw/+k38P/pN/D/6Tfw/+k38T/a9Kz/0DBof9BwKH/QcCh/0HAof9BwKD/QcCg/0G/oP9Bv6D/Qb+g/0C4mK4tbU4I////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn6//v6+//7+vv/+/r7//v6+//7+vv//Pr7//v6+/+B597/NdvN/znczf853M3/OdzN/znczf853M3/OdzN/znczf85283+NtHDakb/+gDr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/3/n3f823Mz/OdzN/znczf853M3/OdzN/znczf853M3/OdzN/zjay9s0x7kkNs/BALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/f+fd/zbbzP853M3/OdzN/znczf853M3/OdzN/znczf853M3/N9XHgQAAAAAspZoAAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9/593/NtvM/znczf853M3/OdzN/znczf853M3/OdzN/zjay8w0yrweNtDCAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/3/n3f8228z/OdzN/znczf853M3/OdzN/znczf8528zsN9PETkD45gAonJEAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/f+fd/zbbzP853M3/OdzN/znczf853M3/OdvM9DjWx24qoJUDMb2wAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9/593/NtvM/znczf853M3/OdzN/znbzPA418hzMr6xBjTIugAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/37m3f8z28z/N9zN/znczf8528zaONbIWTK/sgQ0yLsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/vfDr/5Tq4v+L6ODtYODUnDTTxSsAGBsAMrywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL6+PjS+vf3kPv09Tr/6u4G/+/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',red:'data:image/vndmicrosofticon;base64,AAABAAIAEBAAAAEAIABoBAAAJgAAACAgAAABACAAqBAAAI4EAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQxmbAC0RagpDGZs8ShysdkwdspdMHbKXShysdkMZmzwuEWoKQxmcAAAAAAAAAAAAAAAAAAAAAAAmDlgAAAAAAUQanzRPHrilUx/B6VQgxf1VIMb/VSDG/1Qgxf1TH8DpTh22pUMZnDQAAAABJQ5XAAAAAAB7ensA//8AAFUrr09SH8DdVSDG/1Ugxv9VIMb/VSDG/1Ugxv9UH8P/Ux/B/1IfwP9QHrrdRxqlTwAAAAAoD14A+fn5ANzf1zmMatPfVB7G/1Ugxv9VIMb/VSDG/1Ugxv9TH8L/UR68/1AevP9QHrz/UR68/04dt99EGaA5UB67ALS0sw3x8u+unYDd/1AZxP9THcX/Ux3F/1Qexf9THr//Tx23/08ctv9PHbb/Tx22/08dtv9PHbb/SxuurjkSfg3h4eFK+Pj38LWf5P97UtL/fVXS/31V0/9fOcz/SSfC/0knwP9JJ8D/SSfA/0knwP9JJ8D/SSfA/0gnv/A/KLNK7e3tjPn5+f/29fj/8vD3//Px9//y8Pf/fILz/zQ/8P83QvD/N0Lw/zdC8P83QvD/N0Lw/zdB7/82QOz/Mz3gjPHx8bL5+fn/+fn5//n6+f/5+vn/+fn5/36G9v8yQPT/NkP0/zZD9P82Q/T/NkP0/zVC8v80QOz/M0Dq/zI+47Lz8/O2+fn5//n5+f/5+fn/+fn5//n5+f99hvb/MkD0/zZD9P82Q/T/NkP0/zVC8f8zP+f/Mj7k/zI+5P8xPd628vLymfn5+f/5+fn/+fn5//n5+f/5+fn/gYn2/zdE9P87R/T/O0f0/zZF8P8yQOP/MT/e/zE/3v8xP97/Lz3ZmfHx8Vz4+Pj3+fn5//n5+f/5+fn/+fn5/9fZ+P/Bxfj/wsb4/7vD+P87j/X/Dnzx/xF98f8RffH/EXzw9xZv5Vzr6+sY9/f3xvn5+f/5+fn/+fn5//n5+f/7+/n//Pz5//38+f/x+Pn/OrD+/wCY//8Amf//AJn//wCZ/cYAlPMY////APT09Fb4+Pjy+fn5//n5+f/5+fn/+fn5//n5+f/6+fn/7vX5/zmu/v8Al///AJj//wCY/vIAlfpWAJ//AOzs7ADm5uYF9vb2ePn5+fT5+fn/+fn5//n5+f/5+fn/+vn5/+71+f85rf7/AJb//wCY//QAlvx4AIzrBQCQ8gAAAAAA8PDwAOzs7Ab29vZd+Pj40vn5+fz5+fn/+fn5//n5+f/x9vn/bsP8/CGk/tIAlvxdAJDyBgCT9QAAAAAAAAAAAAAAAADn5+cAqKioAPT09CH39/dy+Pj4tvj4+NX4+PjV+Pj4tuvy93LD4fUhAAC7AESo6wAAAAAAAAAAAPAPAADAAwAAwAMAAIABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAwAMAAPAPAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBgIMDBoKPRwoD14tLhFrNy4RazcoD14tGgo9HAYCDAwAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP+3/wANBR0KLxJuMUEYmGxKHKyiTh22x1Aeu9tRHr3jUR6941Aeu9tOHbbHShysokEYmGwvEm4xDQUeCv+6/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoKPgAHAxAHNhR9PEkbqppRHr3hVCDE/FUgxv9VIMf/VSDH/1Ugxv9VIMb/VSDH/1Ugx/9VIMb/VCDE/FEevOFIG6maNRR8PAcDEAcaCj0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADVUP8AJg5YGUYao4BRH77kVSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMX/VB/E/1Qfw/9QHrvkRRmggCUOVhnQTv8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEA////ADITdSpMHbKvVCDE+1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VCDE/1Mfwv9TH8H/Ux/B/1Mfwv9SH7/7ShytrzEScSr///8AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAMto/wBVPoYsUSC3v1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1QfxP9SHsD/Uh6//1Iev/9SHr//Uh6//1Iev/9SHr//SxywvzMTdyymPf8AAQACAAAAAAAAAAAAAAAAAAAAAAD19fUAnaKQHbep1rVfLcn/VB/G/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9UH8P/UR6+/1Eevf9RHr3/UR69/1Eevf9RHr3/UR69/1Eevf9RHr3/ShuttS0RaB1PHrkAAAAAAAAAAAAAAAAAeXl5ADY2Ngnf4NyO18zu/V8tyf9UH8b/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VB/D/1EevP9QHrr/UB67/1Aeu/9QHrv/UB67/1Aeu/9QHrv/UB67/1Aeu/9QHrr9RhqkjhEGKAknDloAAAAAAAAAAAD///8AxMTES/b39O3Zzu//Xy3J/1Qfxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Mfwv9QHbr/Tx24/08duP9PHbj/Tx24/08duP9PHbj/Tx24/08duP9PHbj/Tx24/08duf9NHLTtPheRS5s5/wAAAAAAm5ubAHBwcA/o6Oix+/z6/9jO7/9fLcn/VB/G/1Ugxv9VIMb/VSDG/1Ugxv9VIMb/VSDG/1Ugxv9TH8H/Tx24/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9OHbb/Th22/04dtv9JG6mxIw1RDzAScQD///8AycnJRfX19fD7/Pr/2M3v/1wqyP9SHMX/UhzF/1Icxf9SHMX/UhzF/1Icxf9THcX/Ux7A/04ctf9NHLL/Thyz/04cs/9NHLP/TRyz/00cs/9OHLP/Thyz/04cs/9OHLP/Thyz/04cs/9NHLP/Thyz/0wcsPA/Fo9FYyTkAAAAAALk5OSN+fn5//r6+f/n4vT/noDd/5Z22v+Wdtr/lnba/5Z22v+Wdtr/mHfb/35g1/9KMMr/SC/H/0gvx/9IL8f/SC/H/0gvx/9IL8b/SC/G/0gvxv9HL8b/Ry/G/0cvxv9HL8b/Ry/G/0cvxv9HL8X/Ry7F/z8tuI0AAAACk5OTFu/v78X5+fn/+fn5//n5+f/6+vn/+fr5//n6+f/5+vn/+fr5//n6+f/9/fn/ub73/zhF8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zZD8v82Q/L/NkPy/zVC8f81QvD/Mz/mxR8njhbDw8Mz9PT05fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+5vff/OEX0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P81QvH/NEHv/zRB7/8zQOrlKTO6M9XV1VD39/f0+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pz5/7m99/84RfT/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NULw/zRA7P80QOv/NEDr/zNA6fQsN8lQ3d3dYfj4+Pn5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8/Pn/ub33/zhF9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zVB7/8zQOn/Mz/o/zM/6P8zQOj/Mz/n+S04zmHh4eFl+Pj4+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+5vff/OEX0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P80Qe7/Mz/m/zM/5f8zP+b/Mz/m/zM/5v8yP+X6LjnPZeDg4Fr4+Pj3+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//Pz5/7m99/84RfT/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NEHs/zI+4/8yPuP/Mj7j/zI+4/8yPuP/Mj7j/zI+4fctOMxa3NzcQvf39+/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/8/Pn/ub33/zhF9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zZD9P82Q/T/NkP0/zRA6/8xPeH/MT3g/zE94P8xPeD/MT3g/zE94P8xPeD/MT3e7ys2xkLS0tIj9fX12fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//z8+f+4vff/NkP0/zNB9P80QfT/NEH0/zRB9P80QfT/NEH0/zZC8/81P+n/Mjze/zI73f8yO93/Mjvd/zI73f8yO93/Mjvd/zI73f8xO9rZKTO7I56engjy8vKu+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+/r5/9ze+P+covf/mqD3/5qg9/+aoPf/mqD3/5qg9/+aoPf/UoLz/x1p5/8eaeb/Hmnm/x5p5v8eaeX/Hmnl/x5p5f8eaOX/Hmjl/yBh3a4jJokI////AO3t7Wr5+fn++fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vr5//z8+f/8/Pn//Pz5//z8+f/8/Pn//Pz5//z8+f9dvfz/AJf+/wCZ/v8Amf7/AJn+/wCZ/v8Amf7/AJn+/wCZ/v8AmP7+AJLxagC4/wDr6+sA4eHhJPb29tv5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u8/f8Alv//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCW/NsAieckAI/xALu7uwAAAAAA8vLygfn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/W7z9/wCW//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJP3gQAAAAAAcr8AAAAAAOzs7ADk5OQe9vb2zPn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9bvP3/AJb//wCY//8AmP//AJj//wCY//8AmP//AJj//wCW/MwAi+oeAJDxAAAAAAAAAAAAsLCwAP///wDv7+9O+Pj47Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u8/f8Alv//AJj//wCY//8AmP//AJj//wCY//8Al/7sAJL0TgCr/wAAa7QAAAAAAAAAAAAAAAAA1tbWALS0tAPy8vJv+Pj49Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/W7z9/wCW//8AmP//AJj//wCY//8AmP//AJj+9ACU+G4AbrgDAIPaAAAAAAAAAAAAAAAAAAAAAAAAAAAA4uLiANfX1wbz8/Nz+Pj48Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f9bvP3/AJb//wCY//8AmP//AJj//wCY/vAAlflzAITcBgCK5wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4+PjANjY2ATy8vJZ+Pj42vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/1u7/f8Alf//AJf//wCY//8Al/7aAJT4WQCE3AQAiucAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1NTUAB8fHwDw8PAr9vb2nPj4+O35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/rNv7/3bG/P9rwfztM6r7nACR9SsAER0AAIPZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOvr6wDj4+MG8vLyOvb29pD4+PjS+fn58vn5+f35+fn/+fn5//n5+f/5+fn/+fn5/fn5+fL6+fjS/Pj2kP338jr/+eIG//fqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh4eEA1tbWAu/v7xv09PRJ9vb2dvb29pf39/eo9/f3qPb29pf29vZ29PT0Se/v7xvW1tYC4eHhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gB///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABwAAAA8AAAAPgAAAH4AAAB/AAAA/4AAAf/gAAf/8AAP//wAP/',yellow:'data:image/vndmicrosofticon;base64,AAABAAIAICAAAAEAIACoEAAAJgAAABAQAAABACAAaAQAAM4QAAAoAAAAIAAAAEAAAAABACAAAAAAAAAQAAASCwAAEgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAZKhQAOWAiAEV0KgBFdCoAOWAiABkqFAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8ZAAAChAHAEp8JwBvu10AgNeSAInluACN7c4Aj/DXAI/w1wCN7c4AieW4AIDXkgBvu10ASnwnAAoQBwA8ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbLgAAAAAFAFmWMwB/1YwAj/DXAJX7+QCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJX7+QCP79cAftWMAFmVMwAAAAUAGy4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7v8AAD1mFQB6zXYAkPLdAJf+/gCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP7/AJf+/wCV/P4AjvDdAHjKdgA8ZBUA6f8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP//AABWkCYAh+KoAJb8+QCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJf+/wCV+v8AlPr/AJT6/wCV+v8Akvf5AIPdqABTjCYA//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgICABb//wAka5wqAozquwCY/v8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCX/f8Ak/j/AJP3/wCT9/8Ak/f/AJP3/wCT9/8Akvb/AIbiuwBZlyoA//8AAAECAAAAAAAAAAAAAAAAAAAAAADz8/MAqJaJHZDD5rQLnP7/AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8Alvz/AJL2/wCR9P8AkfT/AJH0/wCR9P8AkfT/AJH0/wCR9P8AkfT/AITftABQhh0AjO0AAAAAAAAAAAAAAAAAfX19ADw8PAni3tuPuuD5/Quc//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJb8/wCQ8/8Aj/H/AI/x/wCP8f8Aj/H/AI/x/wCP8f8Aj/H/AI/x/wCP8f8AjvD9AH7UjwAiOQkASHkAAAAAAAgICAD///8AxcXFT/j19O+94vv/Cpz//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCV+/8Aj/H/AI3u/wCN7v8Aje7/AI3u/wCN7v8Aje7/AI3u/wCN7v8Aje7/AI3u/wCO7v8AiunvAHC8TwD//wAABQgAqKioAHp6ehHp6em3/fv5/7zh+v8KnP//AJj//wCY//8AmP//AJj//wCY//8AmP//AJj//wCY//8Alfr/AI7u/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8AjOv/AIzr/wCM6/8Ag9y3AERyEQBenQD///8AzMzMTfb29vP9+/n/vOH6/wqb//8Alv//AJb//wCW//8Alv//AJb//wCW//8Al///AJT5/wCL6/8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCJ6P8Aiej/AIno/wCH5fMAb75NAMP/AAAAAAXl5eWX+fn5//v6+f/T6vr/Wbv9/0+3/f9Qt/3/ULf9/1C3/f9Qt/3/Ubj9/zew+/8InO//B5nr/weZ6/8Hmev/B5nq/weZ6v8Hmer/B5nq/weZ6v8Hmer/B5jq/weY6v8HmOn/B5jp/weY6f8HmOn/Bpjp/weP15cBAAAFpKSkHfDw8M/5+fn/+fn5//n5+f/1+Pn/9Pf5//T3+f/09/n/9Pf5//T3+f/4+Pn/o+T6/wq//f8Hv/3/CL/9/wi//f8Iv/3/CL/9/wi//f8Iv/3/CL/8/wi+/P8Ivvz/CL78/wi+/P8Ivvz/CL78/we9+/8HvPr/BrbxzwR9pR3Ly8tA9fX17Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+l5vv/CcL//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hv/3/Br36/wa9+v8GuvbsBZnLQNra2mD39/f4+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//fr5/6Xm+/8Jwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B778/wa79/8Guvf/Brr3/wa59fgFo9hg4uLidPj4+P35+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/9+vn/peb7/wnB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//we++/8GufX/Brj0/wa49P8GuPT/Brfz/QWm3XTk5OR6+Pj4/fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+l5vv/CcH//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hvfr/Brfy/wa28f8GtvH/Brbx/wa28f8GtfD9BafdeuXl5W/4+Pj8+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn//fr5/6Xm+/8Jwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B7z5/wa17/8GtO7/BrTu/wa07v8GtO7/BrTu/waz7fwFpdtv4eHhVvj4+Pb5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/9+vn/peb7/wnB//8Hwf//B8H//wfB//8Hwf//B8H//wfB//8Hwf//B8H//we7+P8Gsu3/BrHr/wax6/8Gsev/BrHr/wax6/8Gsev/BrDq9gWh1Vba2toz9vb25vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//36+f+k5fv/BsH//wPA//8DwP//A8D//wPA//8DwP//A8D//wXA//8Guvb/BrDq/wau6P8Gruj/Bq7o/wau6P8Gruj/Bq7o/wau6P8GreXmBZnLM7+/vxH09PTC+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+/r5/83v+v9x2vz/btn9/2/Z/f9v2f3/b9n9/2/Z/f9v2f3/RdL5/yXG7v8mxOz/JsTs/ybE6/8mxOv/JsTr/yXE6/8lw+v/JcPr/yK95cIQirAR////APDw8IH5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+vn5//r5+f/6+fn/+vn5//r5+f/6+fn/+vn5//r5+f+H8Pz/Oer+/zzq/v886v7/POr+/zzq/v886v7/POr+/zzq/v886v3/OuDzgWz//wD09PQA5+fnNPf39+n5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Xw/f846///O+v//zvr//876///O+v//zvr//876///O+v//zvp/ek32+00Ouf6AMrKygCzs7MF8/Pzmvn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/hfD9/zjr//876///O+v//zvr//876///O+v//zvr//876///OuX5miqptwUwv88AAAAAAPPz8wDp6eku9/f33fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f+F8P3/OOv//zvr//876///O+v//zvr//876///O+v//zvp/d033O8uOuX5AAAAAAAAAAAAvr6+AP///wDx8fFl+Pj49fn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Xw/f846///O+v//zvr//876///O+v//zvr//876v71OeP2ZY7//wAus8IAAAAAAAAAAAAAAAAA4ODgANPT0wj09PSI+fn5+vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/hfD9/zjr//876///O+v//zvr//876///O+v/+jrm+Ygyx9gINdPlAAAAAAAAAAAAAAAAAAAAAAAAAAAA6enpAOHh4Q309PSM+fn5+Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f+F8P3/OOv//zvr//876///O+v//zvr//g65/qMNtXnDTjd7wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6enpAOLi4gr09PRw+Pj45/n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5/4Pw/f816///Oev//zvr//876v7nOub5cDbW5wo33O4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ODgANHR0QLx8fE89/f3sfn5+fX5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+f/5+fn/t/T7/4Xx/f+A8P31Xez8sTnk9zwuxdUCNtTkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAREREAP///wDo6OgM9PT0Tff396T4+Pjf+fn5+Pn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//n5+fj5+Pjf9vf3pPL09E3m6OgM7/3/APtbOwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACMjIwD19fUA4uLiBvHx8Sn19fVd9vb2jff396739/e99/f3vff396729vaN9fX1XfHx8Snl4uIG9PX1AFEnIgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/wD///gAH//gAAf/wAAD/4AAAf8AAAD+AAAAfAAAADwAAAA4AAAAGAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAGAAAABgAAAAcAAAAPgAAAH4AAAB/AAAA/4AAAf/AAAP/8AAP//wAP/KAAAABAAAAAgAAAAAQAgAAAAAAAABAAAEgsAABILAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABorgAAS34IAHTDNQCC22wAh+OMAIfjjACC22wAdMQ1AEx/CABorwAAAAAAAAAAAAAAAAAAAAAAAEBrAAAAAAAAecswAIzsngCU+OUAl/37AJj+/wCY/v8Al/37AJP35QCL6Z4Ad8gwAAAAAAA+aQAAAAAAcXd6AP8AAAAOiNtNAJP32gCY//8AmP//AJj//wCY//8AmP//AJb8/wCU+f8Ak/j/AI7w2gB+1E0AAAAAAEd4APn7/ADc2NU5T7P33gCX//8AmP//AJj//wCY//8AmP//AJX6/wCR8/8AkPL/AJDz/wCQ8/8AjOzeAHrOOQCR9AC3t7cO8e/vsGnA/f8Alf//AJf//wCX//8Al///AJP4/wCN7v8AjOz/AIzs/wCM7P8AjOz/AIzt/wCG4rAAY6oO4uLiT/j39/GIzfz/Mav+/zSs/v80rP7/FaH5/wOV7f8DlOv/A5Tr/wOU6/8DlOv/A5Tr/wOU6/8Dk+jxBIvVT+3t7ZT5+fn/8fb5/+vz+f/r9Pn/6vP5/1nR+/8EvPz/B738/we9/P8Hvfz/B738/we9/P8HvPv/B7r4/wax7ZTy8vK7+fn5//n5+f/6+fn/+vn5//n5+f9e1f3/A8D//wfB//8Hwf//B8H//wfB//8HwP3/Brv3/wa59f8GtO678/Pzwfn5+f/5+fn/+fn5//n5+f/4+fn/XtX9/wPA//8Hwf//B8H//wfB//8Hv/z/Brfz/wa17/8Gte//BrHqwfPz86X5+fn/+fn5//n5+f/5+fn/+Pn5/2DW/f8Gwf//CsL//wrC//8Jv/v/CLXu/wix6f8Isen/CLHp/wet5KXy8vJo+fn5+vn5+f/5+fn/+fn5//n5+f/I7vr/quf7/6zn+/+m5/v/Tdz5/yzV9P8u1fT/LtX0/y7U8/ooyOpo7OzsH/f399D5+fn/+fn5//n5+f/5+fn//Pr5//36+f/++vn/9fn5/2rv/v857P//POz//zzs//886/3QOuLzH////wD09PRh+fn59vn5+f/5+fn/+fn5//n5+f/5+fn/+fn5//H4+f9o7v7/OOv//zvr//876//2Ouf6YUH//wDu7u4A6enpB/b29oT5+fn3+fn5//n5+f/5+fn/+fn5//n5+f/x+Pn/Zu7+/zfr//876//3Ouj8hDfc7wc44PMAAAAAAPHx8QDu7u4I9vb2aPj4+Nn5+fn9+fn5//n5+f/5+fn/8/n5/4zx/P1S7P7ZO+n8aDfh9Ag55PcAAAAAAAAAAAAAAAAA6+vrAN/f3wH19fUo9/f3fvj4+MH4+Pje+Pj43vj4+MHq9vh+w/H2KADM5wFk4e8AAAAAAAAAAADwDwAA4AcAAMADAACAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAQAAgAEAAMADAADgBwAA'};return{FaviconsByHue:FaviconsByHue};});'use strict';Polymer('tr-ui-b-info-bar-group',{ready:function(){this.messages_=[];},clearMessages:function(){this.messages_=[];this.updateContents_();},addMessage:function(text,opt_buttons){opt_buttons=opt_buttons||[];for(var i=0;i<opt_buttons.length;i++){if(opt_buttons[i].buttonText===undefined)
+throw new Error('buttonText must be provided');if(opt_buttons[i].onClick===undefined)
+throw new Error('onClick must be provided');}
+this.messages_.push({text:text,buttons:opt_buttons||[]});this.updateContents_();},updateContents_:function(){this.$.messages.textContent='';this.messages_.forEach(function(message){var bar=document.createElement('tr-ui-b-info-bar');bar.message=message.text;bar.visible=true;message.buttons.forEach(function(button){bar.addButton(button.buttonText,button.onClick);},this);this.$.messages.appendChild(bar);},this);}});'use strict';tr.exportTo('tr.ui',function(){var Task=tr.b.Task;function FindController(brushingStateController){this.brushingStateController_=brushingStateController;this.filterHits_=new tr.model.EventSet();this.currentHitIndex_=-1;this.activePromise_=Promise.resolve();this.activeTask_=undefined;};FindController.prototype={__proto__:Object.prototype,get model(){return this.brushingStateController_.model;},get brushingStateController(){return this.brushingStateController_;},enqueueOperation_:function(operation){var task;if(operation instanceof tr.b.Task)
+task=operation;else
+task=new tr.b.Task(operation,this);if(this.activeTask_){this.activeTask_=this.activeTask_.enqueue(task);}else{this.activeTask_=task;this.activePromise_=Task.RunWhenIdle(this.activeTask_);this.activePromise_.then(function(){this.activePromise_=undefined;this.activeTask_=undefined;}.bind(this));}},startFiltering:function(filterText){var sc=this.brushingStateController_;if(!sc)
+return;this.enqueueOperation_(function(){this.filterHits_=new tr.model.EventSet();this.currentHitIndex_=-1;}.bind(this));var stateFromString;try{stateFromString=sc.uiStateFromString(filterText);}catch(e){this.enqueueOperation_(function(){var overlay=new tr.ui.b.Overlay();overlay.textContent=e.message;overlay.title='UI State Navigation Error';overlay.visible=true;});return this.activePromise_;}
+if(stateFromString!==undefined){this.enqueueOperation_(sc.navToPosition.bind(this,stateFromString,true));}else{if(filterText.length===0){this.enqueueOperation_(sc.findTextCleared.bind(sc));}else{var filter=new tr.c.FullTextFilter(filterText);var filterHits=new tr.model.EventSet();this.enqueueOperation_(sc.addAllEventsMatchingFilterToSelectionAsTask(filter,filterHits));this.enqueueOperation_(function(){this.filterHits_=filterHits;sc.findTextChangedTo(filterHits);}.bind(this));}}
+return this.activePromise_;},get filterHits(){return this.filterHits_;},get currentHitIndex(){return this.currentHitIndex_;},find_:function(dir){var firstHit=this.currentHitIndex_===-1;if(firstHit&&dir<0)
+this.currentHitIndex_=0;var N=this.filterHits.length;this.currentHitIndex_=(this.currentHitIndex_+dir+N)%N;if(!this.brushingStateController_)
+return;this.brushingStateController_.findFocusChangedTo(this.filterHits.subEventSet(this.currentHitIndex_,1));},findNext:function(){this.find_(1);},findPrevious:function(){this.find_(-1);}};return{FindController:FindController};});'use strict';Polymer('tr-ui-find-control',{filterKeyDown:function(e){if(e.keyCode===27){var hkc=tr.b.getHotkeyControllerForElement(this);if(hkc){hkc.childRequestsBlur(this);}else{this.blur();}
 e.preventDefault();e.stopPropagation();return;}else if(e.keyCode===13){if(e.shiftKey)
 this.findPrevious();else
 this.findNext();}},filterBlur:function(e){this.updateHitCountEl();},filterFocus:function(e){this.$.filter.select();},filterMouseUp:function(e){e.preventDefault();},get controller(){return this.controller_;},set controller(c){this.controller_=c;this.updateHitCountEl();},focus:function(){this.$.filter.focus();},get hasFocus(){return this===document.activeElement;},filterTextChanged:function(){this.$.hitCount.textContent='';this.$.spinner.style.visibility='visible';this.controller.startFiltering(this.$.filter.value).then(function(){this.$.spinner.style.visibility='hidden';this.updateHitCountEl();}.bind(this));},findNext:function(){if(this.controller)
@@ -5272,11 +5792,12 @@
 return true;for(var i=0;i<this.subExpressions.length;i++){if(!this.subExpressions[i].evaluate(context))
 return false;}
 return true;}};tr.c.ScriptingObjectRegistry.register(function(){var exprs=[];for(var i=0;i<arguments.length;i++){exprs.push(arguments[i]);}
-return new FilterAllOf(exprs);},{name:'allOf'});return{FilterAllOf:FilterAllOf};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterAnyOf(opt_subExpressions){tr.e.tquery.Filter.call(this);this.subExpressions=opt_subExpressions||[];};FilterAnyOf.prototype={__proto__:tr.e.tquery.Filter.prototype,set subExpressions(exprs){this.subExpressions_=[];for(var i=0;i<exprs.length;i++){this.subExpressions_.push(tr.e.tquery.Filter.normalizeFilterExpression(exprs[i]));}},get subExpressions(){return this.subExpressions_;},evaluate:function(context){if(!this.subExpressions.length)
+return new FilterAllOf(exprs);},{name:'allOf'});return{FilterAllOf:FilterAllOf};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterNot(subExpression){tr.e.tquery.Filter.call(this);this.subExpression=subExpression;}
+FilterNot.prototype={__proto__:tr.e.tquery.Filter.prototype,set subExpression(expr){this.subExpression_=tr.e.tquery.Filter.normalizeFilterExpression(expr);},get subExpression(){return this.subExpression_;},evaluate:function(context){return!this.subExpression.evaluate(context);}};tr.c.ScriptingObjectRegistry.register(function(){var exprs=Array.prototype.slice.call(arguments);if(exprs.length!==1)
+throw new Error('not() must have exactly one subexpression');return new FilterNot(exprs[0]);},{name:'not'});return{FilterNot:FilterNot};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterAnyOf(opt_subExpressions){tr.e.tquery.Filter.call(this);this.subExpressions=opt_subExpressions||[];};FilterAnyOf.prototype={__proto__:tr.e.tquery.Filter.prototype,set subExpressions(exprs){this.subExpressions_=[];for(var i=0;i<exprs.length;i++){this.subExpressions_.push(tr.e.tquery.Filter.normalizeFilterExpression(exprs[i]));}},get subExpressions(){return this.subExpressions_;},evaluate:function(context){if(!this.subExpressions.length)
 return true;for(var i=0;i<this.subExpressions.length;i++){if(this.subExpressions[i].evaluate(context))
 return true;}
-return false;}};tr.c.ScriptingObjectRegistry.register(function(){var exprs=[];for(var i=0;i<arguments.length;i++){exprs.push(arguments[i]);}
-return new FilterAnyOf(exprs);},{name:'anyOf'});return{FilterAnyOf:FilterAnyOf};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterHasAncestor(opt_subExpression){this.subExpression=opt_subExpression;};FilterHasAncestor.prototype={__proto__:tr.e.tquery.Filter.prototype,set subExpression(expr){this.subExpression_=tr.e.tquery.Filter.normalizeFilterExpression(expr);},get subExpression(){return this.subExpression_;},evaluate:function(context){if(!this.subExpression)
+return false;}};tr.c.ScriptingObjectRegistry.register(function(){var exprs=Array.prototype.slice.call(arguments);return new FilterAnyOf(exprs);},{name:'anyOf'});tr.c.ScriptingObjectRegistry.register(function(){var exprs=Array.prototype.slice.call(arguments);return new tr.e.tquery.FilterNot(new FilterAnyOf(exprs));},{name:'noneOf'});return{FilterAnyOf:FilterAnyOf};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterHasAncestor(opt_subExpression){this.subExpression=opt_subExpression;};FilterHasAncestor.prototype={__proto__:tr.e.tquery.Filter.prototype,set subExpression(expr){this.subExpression_=tr.e.tquery.Filter.normalizeFilterExpression(expr);},get subExpression(){return this.subExpression_;},evaluate:function(context){if(!this.subExpression)
 return context.ancestors.length>0;while(context.ancestors.length){context=context.pop();if(this.subExpression.evaluate(context))
 return true;}
 return false;}};tr.c.ScriptingObjectRegistry.register(function(subExpression){return new FilterHasAncestor(subExpression);},{name:'hasAncestor'});return{FilterHasAncestor:FilterHasAncestor};});'use strict';tr.exportTo('tr.e.tquery',function(){function FilterHasDuration(minValueOrExpected,opt_maxValue){if(minValueOrExpected!==undefined&&opt_maxValue!==undefined){this.minValue=minValueOrExpected;this.maxValue=opt_maxValue;}else{this.expected=minValueOrExpected;}};FilterHasDuration.prototype={__proto__:tr.e.tquery.Filter.prototype,evaluate:function(context){if(context.event.duration===undefined)
@@ -5298,7 +5819,7 @@
 return;var promptEl=this.$.prompt;var command=promptEl.innerText;if(command.length===0)
 return;promptEl.innerText='';this.addLine_(String.fromCharCode(187)+' '+command);try{var result=this.controller_.executeCommand(command);}catch(e){result=e.stack||e.stackTrace;}
 if(result instanceof tr.e.tquery.TQuery){result.ready().then(function(selection){this.addLine_(selection.length+' matches');this.controller_.brushingStateController.showScriptControlSelection(selection);}.bind(this));}else{this.addLine_(result);}},addLine_:function(line){var historyEl=this.$.history;if(historyEl.innerText.length!==0)
-historyEl.innerText+='\n';historyEl.innerText+=line;},promptKeyPress:function(e){e.stopPropagation();},toggleVisibility:function(){var root=this.$.root;if(!this.visible){root.classList.remove('hidden');this._setFocused(true);}else{root.classList.add('hidden');this._setFocused(false);}},get hasFocus(){return this===document.activeElement;},get visible(){var root=this.$.root;return!root.classList.contains('hidden');},get controller(){return this.controller_;},set controller(c){this.controller_=c;}});'use strict';Polymer('tr-ui-side-panel',{ready:function(){},get rangeOfInterest(){throw new Error('Not implemented');},set rangeOfInterest(rangeOfInterest){throw new Error('Not implemented');},get selection(){throw new Error('Not implemented');},set selection(selection){throw new Error('Not implemented');},get model(){throw new Error('Not implemented');},set model(model){throw new Error('Not implemented');},get listeningToKeys(){throw new Error('Not implemented');},supportsModel:function(m){throw new Error('Not implemented');}});'use strict';Polymer('tr-ui-side-panel-container',{ready:function(){this.activePanelContainer_=this.$.active_panel_container;this.tabStrip_=this.$.tab_strip;this.rangeOfInterest_=new tr.b.Range();this.brushingStateController_=undefined;this.onSelectionChanged_=this.onSelectionChanged_.bind(this);this.onModelChanged_=this.onModelChanged_.bind(this);},get brushingStateController(){return this.brushingStateController_;},set brushingStateController(brushingStateController){if(this.brushingStateController){this.brushingStateController_.removeEventListener('change',this.onSelectionChanged_);this.brushingStateController_.removeEventListener('model-changed',this.onModelChanged_);}
+historyEl.innerText+='\n';historyEl.innerText+=line;},promptKeyPress:function(e){e.stopPropagation();},toggleVisibility:function(){var root=this.$.root;if(!this.visible){root.classList.remove('hidden');this._setFocused(true);}else{root.classList.add('hidden');this._setFocused(false);}},get hasFocus(){return this===document.activeElement;},get visible(){var root=this.$.root;return!root.classList.contains('hidden');},get controller(){return this.controller_;},set controller(c){this.controller_=c;}});'use strict';Polymer('tr-ui-side-panel-container',{ready:function(){this.activePanelContainer_=this.$.active_panel_container;this.tabStrip_=this.$.tab_strip;this.rangeOfInterest_=new tr.b.Range();this.brushingStateController_=undefined;this.onSelectionChanged_=this.onSelectionChanged_.bind(this);this.onModelChanged_=this.onModelChanged_.bind(this);},get brushingStateController(){return this.brushingStateController_;},set brushingStateController(brushingStateController){if(this.brushingStateController){this.brushingStateController_.removeEventListener('change',this.onSelectionChanged_);this.brushingStateController_.removeEventListener('model-changed',this.onModelChanged_);}
 this.brushingStateController_=brushingStateController;if(this.brushingStateController){this.brushingStateController_.addEventListener('change',this.onSelectionChanged_);this.brushingStateController_.addEventListener('model-changed',this.onModelChanged_);}},get selection(){return this.brushingStateController_.selection;},onSelectionChanged_:function(){if(this.activePanel)
 this.activePanel.selection=this.selection;},get model(){return this.brushingStateController_.model;},onModelChanged_:function(){this.activePanelType_=undefined;this.updateContents_();},get expanded(){this.hasAttribute('expanded');},get activePanel(){if(this.activePanelContainer_.children.length===0)
 return undefined;return this.activePanelContainer_.children[0];},get activePanelType(){return this.activePanelType_;},set activePanelType(panelType){if(this.model===undefined)
@@ -5313,14 +5834,14 @@
 supported.reason;labelEl.style.display='none';}
 this.tabStrip_.appendChild(labelEl);},this);if(previouslyActivePanelType&&supportedPanelTypes.indexOf(previouslyActivePanelType)!=-1){this.activePanelType=previouslyActivePanelType;this.setAttribute('expanded',true);}else{this.activePanelContainer_.textContent='';this.removeAttribute('expanded');}},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(range){if(range==undefined)
 throw new Error('Must not be undefined');this.rangeOfInterest_=range;if(this.activePanel)
-this.activePanel.rangeOfInterest=range;}});'use strict';Polymer('tr-ui-timeline-view-help-overlay',{ready:function(){var mod=tr.isMac?'cmd ':'ctrl';var spans=this.shadowRoot.querySelectorAll('span.mod');for(var i=0;i<spans.length;i++){spans[i].textContent=mod;}}});'use strict';tr.exportTo('tr.b.u',function(){function GenericTable(items){if(items!==undefined)
+this.activePanel.rangeOfInterest=range;}});'use strict';Polymer('tr-ui-timeline-view-help-overlay',{ready:function(){var mod=tr.isMac?'cmd ':'ctrl';var spans=this.shadowRoot.querySelectorAll('span.mod');for(var i=0;i<spans.length;i++){spans[i].textContent=mod;}}});'use strict';tr.exportTo('tr.v',function(){function GenericTable(items){if(items!==undefined)
 this.items=items;else
-this.items=[];};GenericTable.prototype={};return{GenericTable:GenericTable};});'use strict';tr.exportTo('tr.ui.units',function(){var ArrayOfNumbersSummaryModes={AVERAGE_MODE:'average-mode',TOTAL_MODE:'total-mode'};return{ArrayOfNumbersSummaryModes:ArrayOfNumbersSummaryModes};});'use strict';Polymer('tr-ui-u-array-of-numbers-span',{created:function(){this.numbers_=undefined;this.summaryMode_=tr.ui.units.ArrayOfNumbersSummaryModes.AVERAGE_MODE;},get summaryMode(){return this.summaryMode_;},set summaryMode(summaryMode){this.summaryMode_=summaryMode;this.updateContents_();},get numbers(){return this.numbers_;},set numbers(numbers){if(numbers===undefined){this.numbers_=undefined;this.updateContents_();return;}
+this.items=[];};GenericTable.prototype={};return{GenericTable:GenericTable};});'use strict';tr.exportTo('tr.v.ui',function(){var ArrayOfNumbersSummaryModes={AVERAGE_MODE:'average-mode',TOTAL_MODE:'total-mode'};return{ArrayOfNumbersSummaryModes:ArrayOfNumbersSummaryModes};});'use strict';Polymer('tr-v-ui-array-of-numbers-span',{created:function(){this.numbers_=undefined;this.summaryMode_=tr.v.ui.ArrayOfNumbersSummaryModes.AVERAGE_MODE;},get summaryMode(){return this.summaryMode_;},set summaryMode(summaryMode){this.summaryMode_=summaryMode;this.updateContents_();},get numbers(){return this.numbers_;},set numbers(numbers){if(numbers===undefined){this.numbers_=undefined;this.updateContents_();return;}
 if(!(numbers instanceof Array))
 throw new Error('Must provide an array');this.numbers_=numbers;this.updateContents_();},updateContents_:function(){if(this.numbers_===undefined){this.shadowRoot.textContent='-';return;}
-var ArrayOfNumbersSummaryModes=tr.ui.units.ArrayOfNumbersSummaryModes;var value;if(this.summaryMode_===ArrayOfNumbersSummaryModes.AVERAGE_MODE)
+var ArrayOfNumbersSummaryModes=tr.v.ui.ArrayOfNumbersSummaryModes;var value;if(this.summaryMode_===ArrayOfNumbersSummaryModes.AVERAGE_MODE)
 value=tr.b.Statistics.mean(this.numbers_);else
-value=tr.b.Statistics.sum(this.numbers_);var valueRounded=Math.round(value*1000.0)/1000.0;this.shadowRoot.textContent=valueRounded;}});'use strict';tr.exportTo('tr.ui.units',function(){var TEXT_COLUMN_MODE=1;var NUMERIC_COLUMN_MODE=2;var ELEMENT_COLUMN_MODE=3;function isNumeric(value){if((typeof value)==='number')
+value=tr.b.Statistics.sum(this.numbers_);var valueRounded=Math.round(value*1000.0)/1000.0;this.shadowRoot.textContent=valueRounded;}});'use strict';tr.exportTo('tr.v.ui',function(){var TEXT_COLUMN_MODE=1;var NUMERIC_COLUMN_MODE=2;var ELEMENT_COLUMN_MODE=3;function isNumeric(value){if((typeof value)==='number')
 return true;else if(value instanceof Number)
 return true;return false;}
 function GenericTableViewTotalsItem(opt_values){if(opt_values!==undefined)
@@ -5337,11 +5858,11 @@
 return;if(isNumeric(fieldValue))
 return;if(fieldValue instanceof HTMLElement){this.columnMode_=ELEMENT_COLUMN_MODE;return;}
 if(this.columnMode_===NUMERIC_COLUMN_MODE)
-this.columnMode_=TEXT_COLUMN_MODE;},value:function(item){var fieldValue=item[this.fieldName];if(fieldValue instanceof GenericTableViewTotalsItem){var span=document.createElement('tr-ui-u-array-of-numbers-span');span.summaryMode=tr.ui.units.ArrayOfNumbersSummaryModes.TOTAL_MODE;span.numbers=fieldValue.values;return span;}
+this.columnMode_=TEXT_COLUMN_MODE;},value:function(item){var fieldValue=item[this.fieldName];if(fieldValue instanceof GenericTableViewTotalsItem){var span=document.createElement('tr-v-ui-array-of-numbers-span');span.summaryMode=tr.v.ui.ArrayOfNumbersSummaryModes.TOTAL_MODE;span.numbers=fieldValue.values;return span;}
 if(fieldValue===undefined)
 return'-';if(fieldValue instanceof HTMLElement)
 return fieldValue;if(fieldValue instanceof Object){var gov=document.createElement('tr-ui-a-generic-object-view');gov.object=fieldValue;return gov;}
-return fieldValue;}};Polymer('tr-ui-u-generic-table-view',{created:function(){this.items_=undefined;this.importantColumNames_=[];},get items(){return this.items_;},set items(itemsOrGenericTable){if(itemsOrGenericTable===undefined){this.items_=undefined;}else if(itemsOrGenericTable instanceof Array){this.items_=itemsOrGenericTable;}else if(itemsOrGenericTable instanceof tr.b.u.GenericTable){this.items_=itemsOrGenericTable.items;}
+return fieldValue;}};Polymer('tr-v-ui-generic-table-view',{created:function(){this.items_=undefined;this.importantColumNames_=[];},get items(){return this.items_;},set items(itemsOrGenericTable){if(itemsOrGenericTable===undefined){this.items_=undefined;}else if(itemsOrGenericTable instanceof Array){this.items_=itemsOrGenericTable;}else if(itemsOrGenericTable instanceof tr.v.GenericTable){this.items_=itemsOrGenericTable.items;}
 this.updateContents_();},get importantColumNames(){return this.importantColumNames_;},set importantColumNames(importantColumNames){this.importantColumNames_=importantColumNames;this.updateContents_();},createColumns_:function(){var columnsByName={};this.items_.forEach(function(item){tr.b.iterItems(item,function(itemFieldName,itemFieldValue){var colDesc=columnsByName[itemFieldName];if(colDesc!==undefined){colDesc.updateModeGivenValue(itemFieldValue);return;}
 colDesc=new GenericTableViewColumnDescriptor(itemFieldName,itemFieldValue);columnsByName[itemFieldName]=colDesc;},this);},this);var columns=tr.b.dictionaryValues(columnsByName);if(columns.length===0)
 return undefined;var isColumnNameImportant={};var importantColumNames=this.importantColumNames||[];importantColumNames.forEach(function(icn){isColumnNameImportant[icn]=true;});columns.sort(function(a,b){var iA=isColumnNameImportant[a.title]?1:0;var iB=isColumnNameImportant[b.title]?1:0;if((iB-iA)!==0)
@@ -5353,8 +5874,8 @@
 return;var totalsItem=new GenericTableViewTotalsItem();this.items_.forEach(function(item){var fieldValue=item[column.fieldName];if(fieldValue===undefined||fieldValue===null)
 return;totalsItem.values.push(fieldValue);});totalsItems[column.fieldName]=totalsItem;},this);return[totalsItems];},updateContents_:function(){var columns;if(this.items_!==undefined)
 columns=this.createColumns_();if(!columns){this.$.table.tableColumns=[];this.$.table.tableRows=[];this.$.table.footerRows=[];return;}
-this.$.table.tableColumns=columns;this.$.table.tableRows=this.items_;this.$.table.footerRows=this.createFooterRowsIfNeeded_(columns);this.$.table.rebuild();},get selectionMode(){return this.$.table.selectionMode;},set selectionMode(selectionMode){this.$.table.selectionMode=selectionMode;},get rowHighlightStyle(){return this.$.table.rowHighlightStyle;},set rowHighlightStyle(rowHighlightStyle){this.$.table.rowHighlightStyle=rowHighlightStyle;},get cellHighlightStyle(){return this.$.table.cellHighlightStyle;},set cellHighlightStyle(cellHighlightStyle){this.$.table.cellHighlightStyle=cellHighlightStyle;}});return{GenericTableViewTotalsItem:GenericTableViewTotalsItem,GenericTableViewColumnDescriptor:GenericTableViewColumnDescriptor};});'use strict';Polymer('tr-ui-timeline-view-metadata-overlay',{created:function(){this.metadata_=undefined;},get metadata(){return this.metadata_;},set metadata(metadata){this.metadata_=metadata;this.$.gtv.items=this.metadata_;}});'use strict';Polymer('tr-ui-u-preferred-display-unit',{ready:function(){this.preferredTimeDisplayMode_=undefined;},attached:function(){tr.b.u.Units.didPreferredTimeDisplayUnitChange();},detached:function(){tr.b.u.Units.didPreferredTimeDisplayUnitChange();},get preferredTimeDisplayMode(){return this.preferredTimeDisplayMode_;},set preferredTimeDisplayMode(v){if(this.preferredTimeDisplayMode_===v)
-return;this.preferredTimeDisplayMode_=v;tr.b.u.Units.didPreferredTimeDisplayUnitChange();}});'use strict';Polymer('tr-ui-timeline-view',{ready:function(){this.tabIndex=0;this.titleEl_=this.$.title;this.leftControlsEl_=this.$.left_controls;this.rightControlsEl_=this.$.right_controls;this.collapsingControlsEl_=this.$.collapsing_controls;this.sidePanelContainer_=this.$.side_panel_container;this.brushingStateController_=new tr.c.BrushingStateController(this);this.findCtl_=this.$.view_find_control;this.findCtl_.controller=new tr.ui.FindController(this.brushingStateController_);this.scriptingCtl_=document.createElement('tr-ui-scripting-control');this.scriptingCtl_.controller=new tr.c.ScriptingController(this.brushingStateController_);this.sidePanelContainer_.brushingStateController=this.brushingStateController_;if(window.tr.e&&window.tr.e.rail&&window.tr.e.rail.RAILScore){this.railScoreSpan_=document.createElement('tr-ui-e-rail-rail-score-span');this.rightControls.appendChild(this.railScoreSpan_);}else{this.railScoreSpan_=undefined;}
+this.$.table.tableColumns=columns;this.$.table.tableRows=this.items_;this.$.table.footerRows=this.createFooterRowsIfNeeded_(columns);this.$.table.rebuild();},get selectionMode(){return this.$.table.selectionMode;},set selectionMode(selectionMode){this.$.table.selectionMode=selectionMode;},get rowHighlightStyle(){return this.$.table.rowHighlightStyle;},set rowHighlightStyle(rowHighlightStyle){this.$.table.rowHighlightStyle=rowHighlightStyle;},get cellHighlightStyle(){return this.$.table.cellHighlightStyle;},set cellHighlightStyle(cellHighlightStyle){this.$.table.cellHighlightStyle=cellHighlightStyle;}});return{GenericTableViewTotalsItem:GenericTableViewTotalsItem,GenericTableViewColumnDescriptor:GenericTableViewColumnDescriptor};});'use strict';Polymer('tr-ui-timeline-view-metadata-overlay',{created:function(){this.metadata_=undefined;},get metadata(){return this.metadata_;},set metadata(metadata){this.metadata_=metadata;this.$.gtv.items=this.metadata_;}});'use strict';Polymer('tr-v-ui-preferred-display-unit',{ready:function(){this.preferredTimeDisplayMode_=undefined;},attached:function(){tr.v.Unit.didPreferredTimeDisplayUnitChange();},detached:function(){tr.v.Unit.didPreferredTimeDisplayUnitChange();},get preferredTimeDisplayMode(){return this.preferredTimeDisplayMode_;},set preferredTimeDisplayMode(v){if(this.preferredTimeDisplayMode_===v)
+return;this.preferredTimeDisplayMode_=v;tr.v.Unit.didPreferredTimeDisplayUnitChange();}});'use strict';Polymer('tr-ui-timeline-view',{ready:function(){this.tabIndex=0;this.titleEl_=this.$.title;this.leftControlsEl_=this.$.left_controls;this.rightControlsEl_=this.$.right_controls;this.collapsingControlsEl_=this.$.collapsing_controls;this.sidePanelContainer_=this.$.side_panel_container;this.brushingStateController_=new tr.c.BrushingStateController(this);this.findCtl_=this.$.view_find_control;this.findCtl_.controller=new tr.ui.FindController(this.brushingStateController_);this.scriptingCtl_=document.createElement('tr-ui-scripting-control');this.scriptingCtl_.controller=new tr.c.ScriptingController(this.brushingStateController_);this.sidePanelContainer_.brushingStateController=this.brushingStateController_;if(window.tr.metrics&&window.tr.metrics.sh&&window.tr.metrics.sh.SystemHealthMetric){this.railScoreSpan_=document.createElement('tr-metrics-ui-sh-system-health-span');this.rightControls.appendChild(this.railScoreSpan_);}else{this.railScoreSpan_=undefined;}
 this.optionsDropdown_=this.$.view_options_dropdown;this.optionsDropdown_.iconElement.textContent='View Options';this.showFlowEvents_=false;this.optionsDropdown_.appendChild(tr.ui.b.createCheckBox(this,'showFlowEvents','tr.ui.TimelineView.showFlowEvents',false,'Flow events'));this.highlightVSync_=false;this.highlightVSyncCheckbox_=tr.ui.b.createCheckBox(this,'highlightVSync','tr.ui.TimelineView.highlightVSync',false,'Highlight VSync');this.optionsDropdown_.appendChild(this.highlightVSyncCheckbox_);this.initMetadataButton_();this.initConsoleButton_();this.initHelpButton_();this.collapsingControls.appendChild(this.scriptingCtl_);this.dragEl_=this.$.drag_handle;this.analysisEl_=this.$.analysis;this.analysisEl_.brushingStateController=this.brushingStateController_;this.addEventListener('requestSelectionChange',function(e){var sc=this.brushingStateController_;sc.changeSelectionFromRequestSelectionChangeEvent(e.selection);}.bind(this));this.onViewportChanged_=this.onViewportChanged_.bind(this);this.bindKeyListeners_();this.dragEl_.target=this.analysisEl_;},domReady:function(){this.trackViewContainer_=this.querySelector('#track_view_container');},get globalMode(){return this.hotkeyController.globalMode;},set globalMode(globalMode){globalMode=!!globalMode;this.brushingStateController_.historyEnabled=globalMode;this.hotkeyController.globalMode=globalMode;},get hotkeyController(){return this.$.hkc;},updateDocumentFavicon:function(){var hue;if(!this.model)
 hue='blue';else
 hue=this.model.faviconHue;var faviconData=tr.ui.b.FaviconsByHue[hue];if(faviconData===undefined)
@@ -5367,11 +5888,11 @@
 showEl.addEventListener('click',onClick.bind(this));this.updateMetadataButtonVisibility_();},updateMetadataButtonVisibility_:function(){var showEl=this.$.view_metadata_button;showEl.style.display=(this.model&&this.model.metadata.length)?'':'none';},get leftControls(){return this.leftControlsEl_;},get rightControls(){return this.rightControlsEl_;},get collapsingControls(){return this.collapsingControlsEl_;},get viewTitle(){return this.titleEl_.textContent.substring(this.titleEl_.textContent.length-2);},set viewTitle(text){if(text===undefined){this.titleEl_.textContent='';this.titleEl_.hidden=true;return;}
 this.titleEl_.hidden=false;this.titleEl_.textContent=text;},get model(){if(this.trackView_)
 return this.trackView_.model;return undefined;},set model(model){var modelInstanceChanged=model!=this.model;var modelValid=model&&!model.bounds.isEmpty;var importWarningsEl=this.shadowRoot.querySelector('#import-warnings');importWarningsEl.textContent='';if(modelInstanceChanged){if(this.railScoreSpan_)
-this.railScoreSpan_.railScore=undefined;this.trackViewContainer_.textContent='';if(this.trackView_){this.trackView_.viewport.removeEventListener('change',this.onViewportChanged_);this.trackView_.brushingStateController=undefined;this.trackView_.detach();this.trackView_=undefined;}
+this.railScoreSpan_.model=undefined;this.trackViewContainer_.textContent='';if(this.trackView_){this.trackView_.viewport.removeEventListener('change',this.onViewportChanged_);this.trackView_.brushingStateController=undefined;this.trackView_.detach();this.trackView_=undefined;}
 this.brushingStateController_.modelWillChange();}
 if(modelValid&&!this.trackView_){this.trackView_=document.createElement('tr-ui-timeline-track-view');this.trackView_.timelineView=this;this.trackView.brushingStateController=this.brushingStateController_;this.trackViewContainer_.appendChild(this.trackView_);this.trackView_.viewport.addEventListener('change',this.onViewportChanged_);}
-if(modelValid){this.trackView_.model=model;this.trackView_.viewport.showFlowEvents=this.showFlowEvents;this.trackView_.viewport.highlightVSync=this.highlightVSync;if(this.railScoreSpan_){var railScore=tr.e.rail.RAILScore.fromModel(model);this.railScoreSpan_.railScore=railScore;}
-this.$.display_unit.preferredTimeDisplayMode=model.intrinsicTimeUnit;}
+if(modelValid){this.trackView_.model=model;this.trackView_.viewport.showFlowEvents=this.showFlowEvents;this.trackView_.viewport.highlightVSync=this.highlightVSync;if(this.railScoreSpan_)
+this.railScoreSpan_.model=model;this.$.display_unit.preferredTimeDisplayMode=model.intrinsicTimeUnit;}
 if(model){model.importWarningsThatShouldBeShownToUser.forEach(function(importWarning){importWarningsEl.addMessage('Import Warning: '+importWarning.type+': '+
 importWarning.message);},this);}
 if(modelInstanceChanged){this.updateMetadataButtonVisibility_();this.brushingStateController_.modelDidChange();this.onViewportChanged_();}},get brushingStateController(){return this.brushingStateController_;},get trackView(){return this.trackView_;},get settings(){if(!this.settings_)
@@ -5381,56 +5902,7 @@
 this.focus();else
 this.findCtl_.focus();e.preventDefault();e.stopPropagation();}}));hkc.addHotKey(new tr.ui.b.HotKey({eventType:'keypress',keyCode:'?'.charCodeAt(0),useCapture:false,thisArg:this,callback:function(e){this.$.view_help_button.click();e.stopPropagation();}}));hkc.addHotKey(new tr.ui.b.HotKey({eventType:'keypress',keyCode:'v'.charCodeAt(0),useCapture:false,thisArg:this,callback:function(e){this.toggleHighlightVSync_();e.stopPropagation();}}));},onViewportChanged_:function(e){var spc=this.sidePanelContainer_;if(!this.trackView_){spc.rangeOfInterest.reset();return;}
 var vr=this.trackView_.viewport.interestRange.asRangeObject();if(!spc.rangeOfInterest.equals(vr))
-spc.rangeOfInterest=vr;if(this.railScoreSpan_&&this.model){var railScore=tr.e.rail.RAILScore.fromModel(this.model,vr);this.railScoreSpan_.railScore=railScore;}},toggleHighlightVSync_:function(){this.highlightVSyncCheckbox_.checked=!this.highlightVSyncCheckbox_.checked;},setFindCtlText:function(string){this.findCtl_.setText(string);}});'use strict';tr.exportTo('tr.e.audits',function(){var MAIN_FRAMETIME_TYPE='main_frametime_type';var IMPL_FRAMETIME_TYPE='impl_frametime_type';var MAIN_RENDERING_STATS='BenchmarkInstrumentation::MainThreadRenderingStats';var IMPL_RENDERING_STATS='BenchmarkInstrumentation::ImplThreadRenderingStats';function getSlicesIntersectingRange(rangeOfInterest,slices){var slicesInFilterRange=[];for(var i=0;i<slices.length;i++){var slice=slices[i];if(rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end))
-slicesInFilterRange.push(slice);}
-return slicesInFilterRange;}
-function ChromeProcessHelper(modelHelper,process){this.modelHelper=modelHelper;this.process=process;}
-ChromeProcessHelper.prototype={get pid(){return this.process.pid;},getFrameEventsInRange:function(frametimeType,range){var titleToGet;if(frametimeType==MAIN_FRAMETIME_TYPE)
-titleToGet=MAIN_RENDERING_STATS;else
-titleToGet=IMPL_RENDERING_STATS;var frameEvents=[];this.process.iterateAllEvents(function(event){if(event.title!==titleToGet)
-return;if(range.intersectsExplicitRangeInclusive(event.start,event.end))
-frameEvents.push(event);});frameEvents.sort(function(a,b){return a.start-b.start});return frameEvents;}};function getFrametimeDataFromEvents(frameEvents){var frametimeData=[];for(var i=1;i<frameEvents.length;i++){var diff=frameEvents[i].start-frameEvents[i-1].start;frametimeData.push({'x':frameEvents[i].start,'frametime':diff});}
-return frametimeData;}
-return{ChromeProcessHelper:ChromeProcessHelper,MAIN_FRAMETIME_TYPE:MAIN_FRAMETIME_TYPE,IMPL_FRAMETIME_TYPE:IMPL_FRAMETIME_TYPE,MAIN_RENDERING_STATS:MAIN_RENDERING_STATS,IMPL_RENDERING_STATS:IMPL_RENDERING_STATS,getSlicesIntersectingRange:getSlicesIntersectingRange,getFrametimeDataFromEvents:getFrametimeDataFromEvents};});'use strict';tr.exportTo('tr.e.audits',function(){function ChromeBrowserHelper(modelHelper,process){tr.e.audits.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrBrowserMain');}
-ChromeBrowserHelper.isBrowserProcess=function(process){return!!process.findAtMostOneThreadNamed('CrBrowserMain');};ChromeBrowserHelper.prototype={__proto__:tr.e.audits.ChromeProcessHelper.prototype,get rendererHelpers(){return this.modelHelper.rendererHelpers;},getLoadingEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return slice.title.indexOf('WebContentsImpl Loading')===0&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},getCommitProvisionalLoadEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return slice.title==='RenderFrameImpl::didCommitProvisionalLoad'&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},get hasLatencyEvents(){var hasLatency=false;this.modelHelper.model.getAllThreads().some(function(thread){thread.iterateAllEvents(function(event){if(!event.isTopLevel)
-return;if(!(event instanceof tr.e.cc.InputLatencyAsyncSlice))
-return;hasLatency=true;});return hasLatency;});return hasLatency;},getLatencyEventsInRange:function(rangeOfInterest){return this.getAllAsyncSlicesMatching(function(slice){return(slice.title.indexOf('InputLatency')===0)&&rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end);});},getAllAsyncSlicesMatching:function(pred,opt_this){var events=[];this.iterAllThreads(function(thread){thread.iterateAllEvents(function(slice){if(pred.call(opt_this,slice))
-events.push(slice);});});return events;},getAllNetworkEventsInRange:function(rangeOfInterest){var networkEvents=[];this.modelHelper.model.getAllThreads().forEach(function(thread){thread.asyncSliceGroup.slices.forEach(function(slice){var match=false;if(slice.category=='net'||slice.category=='disabled-by-default-netlog'||slice.category=='netlog'){match=true;}
-if(!match)
-return;if(rangeOfInterest.intersectsExplicitRangeInclusive(slice.start,slice.end))
-networkEvents.push(slice);});});return networkEvents;},iterAllThreads:function(func,opt_this){tr.b.iterItems(this.process.threads,function(tid,thread){func.call(opt_this,thread);});tr.b.iterItems(this.rendererHelpers,function(pid,rendererHelper){var rendererProcess=rendererHelper.process;tr.b.iterItems(rendererProcess.threads,function(tid,thread){func.call(opt_this,thread);});},this);}};return{ChromeBrowserHelper:ChromeBrowserHelper};});'use strict';tr.exportTo('tr.e.audits',function(){function ChromeGpuHelper(modelHelper,process){tr.e.audits.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrGpuMain');};ChromeGpuHelper.isGpuProcess=function(process){if(process.findAtMostOneThreadNamed('CrBrowserMain')||process.findAtMostOneThreadNamed('CrRendererMain'))
-return false;return process.findAtMostOneThreadNamed('CrGpuMain');};ChromeGpuHelper.prototype={__proto__:tr.e.audits.ChromeProcessHelper.prototype,get mainThread(){return this.mainThread_;}};return{ChromeGpuHelper:ChromeGpuHelper};});'use strict';tr.exportTo('tr.e.audits',function(){function ChromeRendererHelper(modelHelper,process){tr.e.audits.ChromeProcessHelper.call(this,modelHelper,process);this.mainThread_=process.findAtMostOneThreadNamed('CrRendererMain');this.compositorThread_=process.findAtMostOneThreadNamed('Compositor');this.rasterWorkerThreads_=process.findAllThreadsMatching(function(t){if(t.name===undefined)
-return false;if(t.name.indexOf('CompositorTileWorker')===0)
-return true;if(t.name.indexOf('CompositorRasterWorker')===0)
-return true;return false;});};ChromeRendererHelper.isRenderProcess=function(process){if(!process.findAtMostOneThreadNamed('CrRendererMain'))
-return false;if(!process.findAtMostOneThreadNamed('Compositor'))
-return false;return true;};ChromeRendererHelper.prototype={__proto__:tr.e.audits.ChromeProcessHelper.prototype,get mainThread(){return this.mainThread_;},get compositorThread(){return this.compositorThread_;},get rasterWorkerThreads(){return this.rasterWorkerThreads_;}};return{ChromeRendererHelper:ChromeRendererHelper};});'use strict';tr.exportTo('tr.e.audits',function(){function findChromeBrowserProcess(model){var browserProcesses=[];model.getAllProcesses().forEach(function(process){if(!tr.e.audits.ChromeBrowserHelper.isBrowserProcess(process))
-return;browserProcesses.push(process);},this);if(browserProcesses.length===0)
-return undefined;if(browserProcesses.length>1)
-return undefined;return browserProcesses[0];}
-function findChromeRenderProcesses(model){var rendererProcesses=[];model.getAllProcesses().forEach(function(process){if(!tr.e.audits.ChromeRendererHelper.isRenderProcess(process))
-return;rendererProcesses.push(process);});return rendererProcesses;}
-function findChromeGpuProcess(model){var gpuProcesses=model.getAllProcesses().filter(tr.e.audits.ChromeGpuHelper.isGpuProcess);if(gpuProcesses.length!=1)
-return undefined;return gpuProcesses[0];}
-function ChromeModelHelper(model){this.model_=model;this.browserProcess_=findChromeBrowserProcess(model);if(this.browserProcess_){this.browserHelper_=new tr.e.audits.ChromeBrowserHelper(this,this.browserProcess_);}else{this.browserHelper_=undefined;}
-var gpuProcess=findChromeGpuProcess(model);if(gpuProcess){this.gpuHelper_=new tr.e.audits.ChromeGpuHelper(this,gpuProcess);}else{this.gpuHelper_=undefined;}
-var rendererProcesses_=findChromeRenderProcesses(model);this.rendererHelpers_={};rendererProcesses_.forEach(function(renderProcess){var rendererHelper=new tr.e.audits.ChromeRendererHelper(this,renderProcess);this.rendererHelpers_[rendererHelper.pid]=rendererHelper;},this);}
-ChromeModelHelper.supportsModel=function(model){if(findChromeBrowserProcess(model)!==undefined)
-return true;if(findChromeRenderProcesses(model).length)
-return true;return false;}
-ChromeModelHelper.prototype={get pid(){throw new Error('woah');},get process(){throw new Error('woah');},get model(){return this.model_;},get browserProcess(){return this.browserProcess_;},get browserHelper(){return this.browserHelper_;},get gpuHelper(){return this.gpuHelper_;},get rendererHelpers(){return this.rendererHelpers_;}};return{ChromeModelHelper:ChromeModelHelper};});'use strict';Polymer('tr-ui-e-s-alerts-side-panel',{ready:function(){this.rangeOfInterest_=new tr.b.Range();this.selection_=undefined;},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();},get listeningToKeys(){return false;},set selection(selection){},set rangeOfInterest(rangeOfInterest){},selectAlertsOfType:function(alertTypeString){var alertsOfType=this.model_.alerts.filter(function(alert){return alert.title===alertTypeString;});var event=new tr.model.RequestSelectionChangeEvent();event.selection=new tr.model.EventSet(alertsOfType);this.dispatchEvent(event);},alertsByType_:function(alerts){var alertsByType={};alerts.forEach(function(alert){if(!alertsByType[alert.title])
-alertsByType[alert.title]=[];alertsByType[alert.title].push(alert);});return alertsByType;},alertsTableRows_:function(alertsByType){return Object.keys(alertsByType).map(function(key){return{alertType:key,count:alertsByType[key].length};});},alertsTableColumns_:function(){return[{title:'Alert type',value:function(row){return row.alertType;},width:'180px'},{title:'Count',width:'100%',value:function(row){return row.count;}}];},createAlertsTable_:function(alerts){var alertsByType=this.alertsByType_(alerts);var table=document.createElement('tr-ui-b-table');table.tableColumns=this.alertsTableColumns_();table.tableRows=this.alertsTableRows_(alertsByType);table.selectionMode=tr.ui.b.TableFormat.SelectionMode.ROW;table.addEventListener('selection-changed',function(e){var row=table.selectedTableRow;if(row)
-this.selectAlertsOfType(row.alertType);}.bind(this));return table;},updateContents_:function(){this.$.result_area.textContent='';if(this.model_===undefined)
-return;var panel=this.createAlertsTable_(this.model_.alerts);this.$.result_area.appendChild(panel);},supportsModel:function(m){if(m==undefined){return{supported:false,reason:'Unknown tracing model'};}else if(m.alerts.length===0){return{supported:false,reason:'No alerts in tracing model'};}
-return{supported:true};},get textLabel(){return'Alerts';}});'use strict';tr.exportTo('tr.ui.e.highlighter',function(){var Highlighter=tr.ui.tracks.Highlighter;function VSyncHighlighter(viewport){Highlighter.call(this,viewport);this.times_=[];}
-VSyncHighlighter.VSYNC_HIGHLIGHT_COLOR=new tr.b.Color(0,0,255);VSyncHighlighter.VSYNC_HIGHLIGHT_ALPHA=0.1;VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT=0.20;VSyncHighlighter.VSYNC_DENSITY_OPAQUE=0.10;VSyncHighlighter.VSYNC_DENSITY_RANGE=VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT-
-VSyncHighlighter.VSYNC_DENSITY_OPAQUE;VSyncHighlighter.generateStripes=function(times,minTime,maxTime){if(times.length===0)
-return[];var stripes=[];var lowIndex=tr.b.findLowIndexInSortedArray(times,function(time){return time;},minTime);var highIndex=lowIndex-1;while(times[highIndex+1]<=maxTime){highIndex++;}
-for(var i=lowIndex-(lowIndex%2);i<=highIndex;i+=2){var left=i<lowIndex?minTime:times[i];var right=i+1>highIndex?maxTime:times[i+1];stripes.push([left,right]);}
-return stripes;}
-VSyncHighlighter.prototype={__proto__:Highlighter.prototype,processModel:function(model){this.times_=model.device.vSyncTimestamps;},drawHighlight:function(ctx,dt,viewLWorld,viewRWorld,viewHeight){if(!this.viewport_.highlightVSync){return;}
-var stripes=VSyncHighlighter.generateStripes(this.times_,viewLWorld,viewRWorld);if(stripes.length==0){return;}
-var stripeRange=stripes[stripes.length-1][1]-stripes[0][0];var stripeDensity=stripes.length/(dt.scaleX*stripeRange);var clampedStripeDensity=tr.b.clamp(stripeDensity,VSyncHighlighter.VSYNC_DENSITY_OPAQUE,VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT);var opacity=(VSyncHighlighter.VSYNC_DENSITY_TRANSPARENT-clampedStripeDensity)/VSyncHighlighter.VSYNC_DENSITY_RANGE;if(opacity==0){return;}
-var pixelRatio=window.devicePixelRatio||1;var height=viewHeight*pixelRatio;var c=VSyncHighlighter.VSYNC_HIGHLIGHT_COLOR;ctx.fillStyle=c.toStringWithAlphaOverride(VSyncHighlighter.VSYNC_HIGHLIGHT_ALPHA*opacity);for(var i=0;i<stripes.length;i++){var xLeftView=dt.xWorldToView(stripes[i][0]);var xRightView=dt.xWorldToView(stripes[i][1]);ctx.fillRect(xLeftView,0,xRightView-xLeftView,height);}}};tr.ui.tracks.Highlighter.register(VSyncHighlighter);return{VSyncHighlighter:VSyncHighlighter};});
+spc.rangeOfInterest=vr;if(this.railScoreSpan_&&this.model)
+this.railScoreSpan_.model=this.model;},toggleHighlightVSync_:function(){this.highlightVSyncCheckbox_.checked=!this.highlightVSyncCheckbox_.checked;},setFindCtlText:function(string){this.findCtl_.setText(string);}});
 </script>
 <!--CATAPULT_REV=NO_AUTO_UPDATE-->
\ No newline at end of file